grpc 1.8.7 → 1.9.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +549 -325
- data/include/grpc/impl/codegen/grpc_types.h +1 -2
- data/include/grpc/impl/codegen/port_platform.h +46 -5
- data/include/grpc/impl/codegen/slice.h +1 -2
- data/include/grpc/module.modulemap +0 -2
- data/include/grpc/slice_buffer.h +1 -2
- data/include/grpc/support/log.h +4 -2
- data/include/grpc/support/thd.h +4 -1
- data/include/grpc/support/tls.h +6 -0
- data/include/grpc/support/tls_gcc.h +5 -40
- data/include/grpc/support/tls_msvc.h +9 -0
- data/include/grpc/support/tls_pthread.h +9 -0
- data/src/core/ext/filters/client_channel/backup_poller.cc +32 -29
- data/src/core/ext/filters/client_channel/backup_poller.h +2 -2
- data/src/core/ext/filters/client_channel/channel_connectivity.cc +26 -32
- data/src/core/ext/filters/client_channel/client_channel.cc +325 -356
- data/src/core/ext/filters/client_channel/client_channel.h +4 -12
- data/src/core/ext/filters/client_channel/client_channel_factory.cc +9 -14
- data/src/core/ext/filters/client_channel/client_channel_factory.h +7 -20
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +7 -10
- data/src/core/ext/filters/client_channel/connector.cc +6 -7
- data/src/core/ext/filters/client_channel/connector.h +6 -16
- data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +38 -50
- data/src/core/ext/filters/client_channel/http_connect_handshaker.h +0 -8
- data/src/core/ext/filters/client_channel/http_proxy.cc +9 -13
- data/src/core/ext/filters/client_channel/http_proxy.h +0 -8
- data/src/core/ext/filters/client_channel/lb_policy.cc +72 -94
- data/src/core/ext/filters/client_channel/lb_policy.h +83 -92
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +14 -19
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +0 -8
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +474 -591
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h +0 -8
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +2 -10
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +6 -6
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +0 -8
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +0 -9
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +0 -9
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +3 -4
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +9 -12
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +160 -182
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +182 -221
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +24 -35
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +9 -20
- data/src/core/ext/filters/client_channel/lb_policy_factory.cc +6 -9
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +4 -15
- data/src/core/ext/filters/client_channel/lb_policy_registry.cc +3 -3
- data/src/core/ext/filters/client_channel/lb_policy_registry.h +1 -9
- data/src/core/ext/filters/client_channel/parse_address.cc +1 -1
- data/src/core/ext/filters/client_channel/parse_address.h +0 -8
- data/src/core/ext/filters/client_channel/proxy_mapper.cc +6 -8
- data/src/core/ext/filters/client_channel/proxy_mapper.h +6 -16
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.cc +13 -17
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +2 -12
- data/src/core/ext/filters/client_channel/resolver.cc +11 -13
- data/src/core/ext/filters/client_channel/resolver.h +14 -25
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +57 -70
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +2 -12
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +23 -31
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +27 -45
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +5 -15
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +9 -11
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +53 -66
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +25 -33
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +1 -9
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +26 -35
- data/src/core/ext/filters/client_channel/resolver_factory.cc +2 -3
- data/src/core/ext/filters/client_channel/resolver_factory.h +2 -12
- data/src/core/ext/filters/client_channel/resolver_registry.cc +12 -15
- data/src/core/ext/filters/client_channel/resolver_registry.h +3 -12
- data/src/core/ext/filters/client_channel/retry_throttle.h +0 -8
- data/src/core/ext/filters/client_channel/subchannel.cc +289 -301
- data/src/core/ext/filters/client_channel/subchannel.h +57 -84
- data/src/core/ext/filters/client_channel/subchannel_index.cc +30 -33
- data/src/core/ext/filters/client_channel/subchannel_index.h +4 -16
- data/src/core/ext/filters/client_channel/uri_parser.cc +13 -17
- data/src/core/ext/filters/client_channel/uri_parser.h +1 -10
- data/src/core/ext/filters/deadline/deadline_filter.cc +49 -67
- data/src/core/ext/filters/deadline/deadline_filter.h +4 -14
- data/src/core/ext/filters/http/client/http_client_filter.cc +60 -77
- data/src/core/ext/filters/http/client/http_client_filter.h +0 -8
- data/src/core/ext/filters/http/http_filters_plugin.cc +4 -6
- data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +63 -79
- data/src/core/ext/filters/http/message_compress/message_compress_filter.h +0 -8
- data/src/core/ext/filters/http/server/http_server_filter.cc +57 -71
- data/src/core/ext/filters/http/server/http_server_filter.h +0 -8
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +19 -24
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.h +0 -8
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc +3 -3
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +0 -8
- data/src/core/ext/filters/max_age/max_age_filter.cc +49 -62
- data/src/core/ext/filters/max_age/max_age_filter.h +0 -8
- data/src/core/ext/filters/message_size/message_size_filter.cc +23 -29
- data/src/core/ext/filters/message_size/message_size_filter.h +0 -8
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc +15 -18
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +0 -8
- data/src/core/ext/filters/workarounds/workaround_utils.h +0 -8
- data/src/core/ext/transport/chttp2/alpn/alpn.h +0 -8
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +33 -40
- data/src/core/ext/transport/chttp2/client/chttp2_connector.h +0 -8
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +15 -17
- data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +8 -8
- data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +23 -28
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +50 -57
- data/src/core/ext/transport/chttp2/server/chttp2_server.h +1 -10
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc +3 -3
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +7 -10
- data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +5 -6
- data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +7 -9
- data/src/core/ext/transport/chttp2/transport/bin_decoder.h +2 -11
- data/src/core/ext/transport/chttp2/transport/bin_encoder.h +1 -9
- data/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc +10 -2
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +516 -636
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +4 -11
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +29 -13
- data/src/core/ext/transport/chttp2/transport/flow_control.h +196 -53
- data/src/core/ext/transport/chttp2/transport/frame.h +0 -8
- data/src/core/ext/transport/chttp2/transport/frame_data.cc +31 -33
- data/src/core/ext/transport/chttp2/transport/frame_data.h +3 -12
- data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +2 -3
- data/src/core/ext/transport/chttp2/transport/frame_goaway.h +1 -10
- data/src/core/ext/transport/chttp2/transport/frame_ping.cc +5 -6
- data/src/core/ext/transport/chttp2/transport/frame_ping.h +1 -9
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +2 -3
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +1 -10
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +8 -3
- data/src/core/ext/transport/chttp2/transport/frame_settings.h +1 -10
- data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +8 -8
- data/src/core/ext/transport/chttp2/transport/frame_window_update.h +5 -11
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +63 -81
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +2 -12
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +230 -318
- data/src/core/ext/transport/chttp2/transport/hpack_parser.h +6 -19
- data/src/core/ext/transport/chttp2/transport/hpack_table.cc +14 -20
- data/src/core/ext/transport/chttp2/transport/hpack_table.h +5 -16
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +0 -7
- data/src/core/ext/transport/chttp2/transport/huffsyms.h +0 -8
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +8 -11
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +4 -13
- data/src/core/ext/transport/chttp2/transport/internal.h +51 -75
- data/src/core/ext/transport/chttp2/transport/parsing.cc +83 -109
- data/src/core/ext/transport/chttp2/transport/stream_lists.cc +2 -0
- data/src/core/ext/transport/chttp2/transport/stream_map.h +0 -8
- data/src/core/ext/transport/chttp2/transport/varint.h +0 -8
- data/src/core/ext/transport/chttp2/transport/writing.cc +61 -65
- data/src/core/ext/transport/inproc/inproc_plugin.cc +2 -4
- data/src/core/ext/transport/inproc/inproc_transport.cc +177 -188
- data/src/core/ext/transport/inproc/inproc_transport.h +0 -8
- data/src/core/lib/backoff/backoff.cc +39 -44
- data/src/core/lib/backoff/backoff.h +61 -57
- data/src/core/lib/channel/channel_args.cc +8 -10
- data/src/core/lib/channel/channel_args.h +4 -13
- data/src/core/lib/channel/channel_stack.cc +19 -27
- data/src/core/lib/channel/channel_stack.h +27 -47
- data/src/core/lib/channel/channel_stack_builder.cc +11 -14
- data/src/core/lib/channel/channel_stack_builder.h +4 -15
- data/src/core/lib/channel/connected_channel.cc +23 -36
- data/src/core/lib/channel/connected_channel.h +1 -10
- data/src/core/lib/channel/handshaker.cc +31 -40
- data/src/core/lib/channel/handshaker.h +14 -25
- data/src/core/lib/channel/handshaker_factory.cc +6 -6
- data/src/core/lib/channel/handshaker_factory.h +5 -15
- data/src/core/lib/channel/handshaker_registry.cc +9 -13
- data/src/core/lib/channel/handshaker_registry.h +2 -11
- data/src/core/lib/compression/algorithm_metadata.h +0 -8
- data/src/core/lib/compression/message_compress.cc +19 -23
- data/src/core/lib/compression/message_compress.h +2 -12
- data/src/core/lib/compression/stream_compression.cc +1 -1
- data/src/core/lib/compression/stream_compression.h +0 -8
- data/src/core/lib/compression/stream_compression_gzip.cc +12 -11
- data/src/core/lib/compression/stream_compression_gzip.h +0 -8
- data/src/core/lib/compression/stream_compression_identity.h +0 -8
- data/src/core/lib/debug/stats.cc +4 -4
- data/src/core/lib/debug/stats.h +9 -19
- data/src/core/lib/debug/stats_data.cc +85 -116
- data/src/core/lib/debug/stats_data.h +236 -312
- data/src/core/lib/debug/trace.cc +1 -1
- data/src/core/lib/debug/trace.h +0 -12
- data/src/core/lib/{support → gpr++}/abstract.h +8 -3
- data/src/core/lib/{support → gpr++}/atomic.h +5 -5
- data/src/core/lib/{support → gpr++}/atomic_with_atm.h +3 -3
- data/src/core/lib/{support → gpr++}/atomic_with_std.h +3 -3
- data/src/core/lib/gpr++/debug_location.h +52 -0
- data/src/core/lib/gpr++/inlined_vector.h +112 -0
- data/src/core/lib/{support → gpr++}/manual_constructor.h +2 -2
- data/src/core/lib/{support → gpr++}/memory.h +3 -3
- data/src/core/lib/gpr++/orphanable.h +171 -0
- data/src/core/lib/gpr++/ref_counted.h +133 -0
- data/src/core/lib/gpr++/ref_counted_ptr.h +99 -0
- data/src/core/lib/{support → gpr}/alloc.cc +0 -0
- data/src/core/lib/{support → gpr}/arena.cc +1 -1
- data/src/core/lib/{support → gpr}/arena.h +3 -11
- data/src/core/lib/{support → gpr}/atm.cc +0 -0
- data/src/core/lib/{support → gpr}/avl.cc +0 -0
- data/src/core/lib/{support → gpr}/cmdline.cc +1 -1
- data/src/core/lib/{support → gpr}/cpu_iphone.cc +0 -0
- data/src/core/lib/{support → gpr}/cpu_linux.cc +0 -0
- data/src/core/lib/{support → gpr}/cpu_posix.cc +0 -0
- data/src/core/lib/{support → gpr}/cpu_windows.cc +0 -0
- data/src/core/lib/{support → gpr}/env.h +3 -11
- data/src/core/lib/{support → gpr}/env_linux.cc +2 -2
- data/src/core/lib/{support → gpr}/env_posix.cc +4 -4
- data/src/core/lib/{support → gpr}/env_windows.cc +3 -3
- data/src/core/lib/{support → gpr}/fork.cc +3 -3
- data/src/core/lib/{support → gpr}/fork.h +3 -3
- data/src/core/lib/{support → gpr}/host_port.cc +1 -1
- data/src/core/lib/{support → gpr}/log.cc +3 -3
- data/src/core/lib/{support → gpr}/log_android.cc +3 -3
- data/src/core/lib/{support → gpr}/log_linux.cc +1 -1
- data/src/core/lib/{support → gpr}/log_posix.cc +5 -5
- data/src/core/lib/{support → gpr}/log_windows.cc +3 -3
- data/src/core/lib/{support → gpr}/mpscq.cc +1 -1
- data/src/core/lib/{support → gpr}/mpscq.h +3 -10
- data/src/core/lib/{support → gpr}/murmur_hash.cc +1 -1
- data/src/core/lib/{support → gpr}/murmur_hash.h +3 -11
- data/src/core/lib/{support → gpr}/spinlock.h +3 -3
- data/src/core/lib/{support → gpr}/string.cc +1 -1
- data/src/core/lib/{support → gpr}/string.h +3 -10
- data/src/core/lib/{support → gpr}/string_posix.cc +0 -0
- data/src/core/lib/{support → gpr}/string_util_windows.cc +2 -2
- data/src/core/lib/{support → gpr}/string_windows.cc +1 -1
- data/src/core/lib/{support → gpr}/string_windows.h +3 -11
- data/src/core/lib/{support → gpr}/subprocess_posix.cc +0 -0
- data/src/core/lib/{support → gpr}/subprocess_windows.cc +2 -2
- data/src/core/lib/{support → gpr}/sync.cc +0 -0
- data/src/core/lib/{support → gpr}/sync_posix.cc +10 -1
- data/src/core/lib/{support → gpr}/sync_windows.cc +0 -0
- data/src/core/lib/{support → gpr}/thd.cc +0 -0
- data/src/core/lib/{support → gpr}/thd_internal.h +3 -3
- data/src/core/lib/{support → gpr}/thd_posix.cc +18 -2
- data/src/core/lib/{support → gpr}/thd_windows.cc +2 -1
- data/src/core/lib/{support → gpr}/time.cc +0 -0
- data/src/core/lib/{support → gpr}/time_posix.cc +2 -4
- data/src/core/lib/{support → gpr}/time_precise.cc +1 -1
- data/src/core/lib/{support → gpr}/time_precise.h +3 -11
- data/src/core/lib/{support → gpr}/time_windows.cc +1 -3
- data/src/core/lib/{support → gpr}/tls_pthread.cc +0 -0
- data/src/core/lib/{support → gpr}/tmpfile.h +3 -11
- data/src/core/lib/{support → gpr}/tmpfile_msys.cc +2 -2
- data/src/core/lib/{support → gpr}/tmpfile_posix.cc +2 -2
- data/src/core/lib/{support → gpr}/tmpfile_windows.cc +2 -2
- data/src/core/lib/{support → gpr}/wrap_memcpy.cc +0 -0
- data/src/core/lib/http/format_request.cc +1 -1
- data/src/core/lib/http/format_request.h +0 -8
- data/src/core/lib/http/httpcli.cc +55 -74
- data/src/core/lib/http/httpcli.h +13 -22
- data/src/core/lib/http/httpcli_security_connector.cc +27 -33
- data/src/core/lib/http/parser.h +0 -8
- data/src/core/lib/iomgr/block_annotate.h +10 -17
- data/src/core/lib/iomgr/call_combiner.cc +14 -17
- data/src/core/lib/iomgr/call_combiner.h +16 -34
- data/src/core/lib/iomgr/closure.h +24 -37
- data/src/core/lib/iomgr/combiner.cc +62 -66
- data/src/core/lib/iomgr/combiner.h +6 -16
- data/src/core/lib/iomgr/endpoint.cc +15 -21
- data/src/core/lib/iomgr/endpoint.h +16 -33
- data/src/core/lib/iomgr/endpoint_pair.h +0 -8
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +4 -5
- data/src/core/lib/iomgr/endpoint_pair_windows.cc +4 -6
- data/src/core/lib/iomgr/error.cc +2 -6
- data/src/core/lib/iomgr/error.h +4 -9
- data/src/core/lib/iomgr/error_internal.h +0 -8
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +110 -117
- data/src/core/lib/iomgr/ev_epoll1_linux.h +0 -8
- data/src/core/lib/iomgr/ev_epollex_linux.cc +111 -141
- data/src/core/lib/iomgr/ev_epollex_linux.h +0 -8
- data/src/core/lib/iomgr/ev_epollsig_linux.cc +83 -109
- data/src/core/lib/iomgr/ev_epollsig_linux.h +2 -10
- data/src/core/lib/iomgr/ev_poll_posix.cc +103 -125
- data/src/core/lib/iomgr/ev_poll_posix.h +0 -8
- data/src/core/lib/iomgr/ev_posix.cc +35 -50
- data/src/core/lib/iomgr/ev_posix.h +27 -53
- data/src/core/lib/iomgr/exec_ctx.cc +46 -78
- data/src/core/lib/iomgr/exec_ctx.h +127 -60
- data/src/core/lib/iomgr/executor.cc +34 -38
- data/src/core/lib/iomgr/executor.h +3 -11
- data/src/core/lib/iomgr/fork_posix.cc +13 -12
- data/src/core/lib/iomgr/gethostname.h +0 -8
- data/src/core/lib/iomgr/gethostname_sysconf.cc +1 -1
- data/src/core/lib/iomgr/iocp_windows.cc +14 -16
- data/src/core/lib/iomgr/iocp_windows.h +1 -10
- data/src/core/lib/iomgr/iomgr.cc +60 -59
- data/src/core/lib/iomgr/iomgr.h +3 -12
- data/src/core/lib/iomgr/iomgr_internal.h +0 -8
- data/src/core/lib/iomgr/iomgr_uv.cc +2 -3
- data/src/core/lib/iomgr/iomgr_uv.h +0 -8
- data/src/core/lib/iomgr/is_epollexclusive_available.cc +1 -1
- data/src/core/lib/iomgr/load_file.cc +1 -1
- data/src/core/lib/iomgr/load_file.h +0 -8
- data/src/core/lib/iomgr/lockfree_event.cc +7 -8
- data/src/core/lib/iomgr/lockfree_event.h +3 -3
- data/src/core/lib/iomgr/polling_entity.cc +6 -10
- data/src/core/lib/iomgr/polling_entity.h +2 -11
- data/src/core/lib/iomgr/pollset.h +4 -13
- data/src/core/lib/iomgr/pollset_set.h +5 -18
- data/src/core/lib/iomgr/pollset_set_uv.cc +5 -10
- data/src/core/lib/iomgr/pollset_set_windows.cc +5 -10
- data/src/core/lib/iomgr/pollset_uv.cc +8 -9
- data/src/core/lib/iomgr/pollset_uv.h +0 -8
- data/src/core/lib/iomgr/pollset_windows.cc +14 -15
- data/src/core/lib/iomgr/pollset_windows.h +0 -8
- data/src/core/lib/iomgr/port.h +6 -1
- data/src/core/lib/iomgr/resolve_address.h +1 -10
- data/src/core/lib/iomgr/resolve_address_posix.cc +10 -12
- data/src/core/lib/iomgr/resolve_address_uv.cc +7 -8
- data/src/core/lib/iomgr/resolve_address_windows.cc +8 -9
- data/src/core/lib/iomgr/resource_quota.cc +77 -107
- data/src/core/lib/iomgr/resource_quota.h +8 -25
- data/src/core/lib/iomgr/sockaddr_utils.cc +1 -1
- data/src/core/lib/iomgr/sockaddr_utils.h +0 -8
- data/src/core/lib/iomgr/socket_factory_posix.cc +1 -1
- data/src/core/lib/iomgr/socket_factory_posix.h +0 -8
- data/src/core/lib/iomgr/socket_mutator.cc +1 -1
- data/src/core/lib/iomgr/socket_mutator.h +1 -9
- data/src/core/lib/iomgr/socket_utils.h +0 -8
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +1 -1
- data/src/core/lib/iomgr/socket_utils_posix.h +0 -8
- data/src/core/lib/iomgr/socket_windows.cc +8 -11
- data/src/core/lib/iomgr/socket_windows.h +3 -14
- data/src/core/lib/iomgr/tcp_client.h +1 -10
- data/src/core/lib/iomgr/tcp_client_posix.cc +94 -78
- data/src/core/lib/iomgr/tcp_client_posix.h +36 -8
- data/src/core/lib/iomgr/tcp_client_uv.cc +16 -23
- data/src/core/lib/iomgr/tcp_client_windows.cc +22 -25
- data/src/core/lib/iomgr/tcp_posix.cc +131 -153
- data/src/core/lib/iomgr/tcp_posix.h +3 -12
- data/src/core/lib/iomgr/tcp_server.h +6 -17
- data/src/core/lib/iomgr/tcp_server_posix.cc +31 -35
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +0 -8
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +1 -1
- data/src/core/lib/iomgr/tcp_server_uv.cc +23 -34
- data/src/core/lib/iomgr/tcp_server_windows.cc +24 -34
- data/src/core/lib/iomgr/tcp_uv.cc +42 -56
- data/src/core/lib/iomgr/tcp_uv.h +0 -8
- data/src/core/lib/iomgr/tcp_windows.cc +43 -50
- data/src/core/lib/iomgr/tcp_windows.h +1 -9
- data/src/core/lib/iomgr/time_averaged_stats.h +0 -8
- data/src/core/lib/iomgr/timer.h +6 -15
- data/src/core/lib/iomgr/timer_generic.cc +22 -27
- data/src/core/lib/iomgr/timer_heap.h +0 -8
- data/src/core/lib/iomgr/timer_manager.cc +17 -19
- data/src/core/lib/iomgr/timer_manager.h +0 -8
- data/src/core/lib/iomgr/timer_uv.cc +12 -14
- data/src/core/lib/iomgr/udp_server.cc +148 -54
- data/src/core/lib/iomgr/udp_server.h +16 -21
- data/src/core/lib/iomgr/unix_sockets_posix.h +0 -8
- data/src/core/lib/iomgr/wakeup_fd_cv.cc +4 -4
- data/src/core/lib/iomgr/wakeup_fd_cv.h +12 -20
- data/src/core/lib/iomgr/wakeup_fd_nospecial.cc +1 -1
- data/src/core/lib/iomgr/wakeup_fd_pipe.h +0 -8
- data/src/core/lib/iomgr/wakeup_fd_posix.h +0 -8
- data/src/core/lib/json/json.h +0 -8
- data/src/core/lib/json/json_reader.h +0 -8
- data/src/core/lib/json/json_writer.h +0 -8
- data/src/core/lib/profiling/basic_timers.cc +3 -2
- data/src/core/lib/profiling/timers.h +0 -8
- data/src/core/lib/security/context/security_context.cc +9 -10
- data/src/core/lib/security/context/security_context.h +0 -8
- data/src/core/lib/security/credentials/composite/composite_credentials.cc +23 -28
- data/src/core/lib/security/credentials/composite/composite_credentials.h +0 -8
- data/src/core/lib/security/credentials/credentials.cc +33 -42
- data/src/core/lib/security/credentials/credentials.h +24 -43
- data/src/core/lib/security/credentials/credentials_metadata.cc +2 -2
- data/src/core/lib/security/credentials/fake/fake_credentials.cc +16 -22
- data/src/core/lib/security/credentials/fake/fake_credentials.h +0 -8
- data/src/core/lib/security/credentials/google_default/credentials_generic.cc +3 -3
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +28 -34
- data/src/core/lib/security/credentials/google_default/google_default_credentials.h +0 -8
- data/src/core/lib/security/credentials/iam/iam_credentials.cc +9 -13
- data/src/core/lib/security/credentials/jwt/json_token.cc +1 -1
- data/src/core/lib/security/credentials/jwt/json_token.h +0 -8
- data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +14 -20
- data/src/core/lib/security/credentials/jwt/jwt_credentials.h +1 -10
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +56 -72
- data/src/core/lib/security/credentials/jwt/jwt_verifier.h +5 -17
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +47 -55
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +3 -12
- data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +23 -28
- data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +8 -13
- data/src/core/lib/security/credentials/ssl/ssl_credentials.h +0 -8
- data/src/core/lib/security/transport/auth_filters.h +0 -8
- data/src/core/lib/security/transport/client_auth_filter.cc +45 -54
- data/src/core/lib/security/transport/lb_targets_info.cc +2 -2
- data/src/core/lib/security/transport/lb_targets_info.h +0 -8
- data/src/core/lib/security/transport/secure_endpoint.cc +54 -68
- data/src/core/lib/security/transport/secure_endpoint.h +0 -8
- data/src/core/lib/security/transport/security_connector.cc +62 -86
- data/src/core/lib/security/transport/security_connector.h +22 -39
- data/src/core/lib/security/transport/security_handshaker.cc +83 -106
- data/src/core/lib/security/transport/security_handshaker.h +1 -10
- data/src/core/lib/security/transport/server_auth_filter.cc +31 -38
- data/src/core/lib/security/transport/tsi_error.h +0 -8
- data/src/core/lib/security/util/json_util.h +0 -8
- data/src/core/lib/slice/b64.cc +5 -6
- data/src/core/lib/slice/b64.h +3 -12
- data/src/core/lib/slice/percent_encoding.h +0 -8
- data/src/core/lib/slice/slice.cc +8 -9
- data/src/core/lib/slice/slice_buffer.cc +11 -16
- data/src/core/lib/slice/slice_hash_table.cc +5 -7
- data/src/core/lib/slice/slice_hash_table.h +2 -12
- data/src/core/lib/slice/slice_intern.cc +4 -5
- data/src/core/lib/slice/slice_internal.h +4 -15
- data/src/core/lib/slice/slice_string_helpers.cc +1 -1
- data/src/core/lib/slice/slice_string_helpers.h +1 -9
- data/src/core/lib/surface/alarm.cc +11 -14
- data/src/core/lib/surface/alarm_internal.h +0 -8
- data/src/core/lib/surface/byte_buffer.cc +2 -3
- data/src/core/lib/surface/byte_buffer_reader.cc +7 -9
- data/src/core/lib/surface/call.cc +198 -241
- data/src/core/lib/surface/call.h +9 -23
- data/src/core/lib/surface/call_details.cc +3 -4
- data/src/core/lib/surface/call_log_batch.cc +1 -1
- data/src/core/lib/surface/call_test_only.h +0 -8
- data/src/core/lib/surface/channel.cc +53 -64
- data/src/core/lib/surface/channel.h +12 -23
- data/src/core/lib/surface/channel_init.cc +2 -3
- data/src/core/lib/surface/channel_init.h +2 -12
- data/src/core/lib/surface/channel_ping.cc +7 -9
- data/src/core/lib/surface/channel_stack_type.h +0 -8
- data/src/core/lib/surface/completion_queue.cc +158 -176
- data/src/core/lib/surface/completion_queue.h +9 -20
- data/src/core/lib/surface/completion_queue_factory.h +0 -8
- data/src/core/lib/surface/event_string.cc +1 -1
- data/src/core/lib/surface/event_string.h +0 -8
- data/src/core/lib/surface/init.cc +27 -25
- data/src/core/lib/surface/init.h +0 -8
- data/src/core/lib/surface/init_secure.cc +2 -2
- data/src/core/lib/surface/lame_client.cc +30 -33
- data/src/core/lib/surface/lame_client.h +0 -8
- data/src/core/lib/surface/server.cc +151 -203
- data/src/core/lib/surface/server.h +7 -16
- data/src/core/lib/surface/validate_metadata.h +0 -8
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/bdp_estimator.cc +2 -2
- data/src/core/lib/transport/bdp_estimator.h +1 -1
- data/src/core/lib/transport/byte_stream.cc +24 -38
- data/src/core/lib/transport/byte_stream.h +10 -25
- data/src/core/lib/transport/connectivity_state.cc +9 -13
- data/src/core/lib/transport/connectivity_state.h +4 -14
- data/src/core/lib/transport/error_utils.cc +6 -6
- data/src/core/lib/transport/error_utils.h +2 -11
- data/src/core/lib/transport/metadata.cc +21 -23
- data/src/core/lib/transport/metadata.h +8 -20
- data/src/core/lib/transport/metadata_batch.cc +34 -45
- data/src/core/lib/transport/metadata_batch.h +18 -32
- data/src/core/lib/transport/service_config.cc +11 -15
- data/src/core/lib/transport/service_config.h +3 -13
- data/src/core/lib/transport/static_metadata.cc +1 -1
- data/src/core/lib/transport/static_metadata.h +1 -7
- data/src/core/lib/transport/status_conversion.cc +2 -3
- data/src/core/lib/transport/status_conversion.h +1 -10
- data/src/core/lib/transport/timeout_encoding.cc +1 -1
- data/src/core/lib/transport/timeout_encoding.h +1 -9
- data/src/core/lib/transport/transport.cc +36 -50
- data/src/core/lib/transport/transport.h +28 -30
- data/src/core/lib/transport/transport_impl.h +12 -23
- data/src/core/lib/transport/transport_op_string.cc +2 -2
- data/src/core/plugin_registry/grpc_plugin_registry.cc +34 -34
- data/src/core/tsi/fake_transport_security.cc +7 -10
- data/src/core/tsi/fake_transport_security.h +0 -8
- data/src/core/tsi/gts_transport_security.cc +2 -2
- data/src/core/tsi/gts_transport_security.h +0 -8
- data/src/core/tsi/ssl_transport_security.cc +3 -0
- data/src/core/tsi/ssl_transport_security.h +0 -8
- data/src/core/tsi/ssl_types.h +0 -8
- data/src/core/tsi/transport_security.h +1 -9
- data/src/core/tsi/transport_security_adapter.h +0 -8
- data/src/core/tsi/transport_security_grpc.cc +11 -18
- data/src/core/tsi/transport_security_grpc.h +9 -21
- data/src/core/tsi/transport_security_interface.h +0 -8
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +0 -30
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +2 -48
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/channel_connection_spec.rb +2 -1
- data/src/ruby/spec/client_auth_spec.rb +1 -1
- data/src/ruby/spec/client_server_spec.rb +2 -2
- data/src/ruby/spec/generic/active_call_spec.rb +1 -1
- data/src/ruby/spec/generic/client_stub_spec.rb +4 -4
- data/src/ruby/spec/generic/interceptor_registry_spec.rb +1 -1
- data/src/ruby/spec/generic/rpc_server_spec.rb +12 -12
- data/src/ruby/spec/google_rpc_status_utils_spec.rb +3 -2
- data/src/ruby/spec/pb/health/checker_spec.rb +1 -1
- data/src/ruby/spec/server_spec.rb +9 -9
- data/src/ruby/spec/support/helpers.rb +35 -1
- metadata +68 -66
- data/include/grpc/impl/codegen/exec_ctx_fwd.h +0 -26
- data/include/grpc/support/histogram.h +0 -64
- data/src/core/lib/support/histogram.cc +0 -227
@@ -25,16 +25,15 @@
|
|
25
25
|
#include "src/core/lib/iomgr/error.h"
|
26
26
|
#include "src/core/lib/profiling/timers.h"
|
27
27
|
|
28
|
-
static grpc_error* init_channel_elem(
|
29
|
-
grpc_channel_element* elem,
|
28
|
+
static grpc_error* init_channel_elem(grpc_channel_element* elem,
|
30
29
|
grpc_channel_element_args* args) {
|
31
30
|
return GRPC_ERROR_NONE;
|
32
31
|
}
|
33
32
|
|
34
|
-
static void destroy_channel_elem(
|
35
|
-
grpc_channel_element* elem) {}
|
33
|
+
static void destroy_channel_elem(grpc_channel_element* elem) {}
|
36
34
|
|
37
|
-
|
35
|
+
namespace {
|
36
|
+
struct call_data {
|
38
37
|
// Stats object to update.
|
39
38
|
grpc_grpclb_client_stats* client_stats;
|
40
39
|
// State for intercepting send_initial_metadata.
|
@@ -45,30 +44,27 @@ typedef struct {
|
|
45
44
|
grpc_closure recv_initial_metadata_ready;
|
46
45
|
grpc_closure* original_recv_initial_metadata_ready;
|
47
46
|
bool recv_initial_metadata_succeeded;
|
48
|
-
}
|
47
|
+
};
|
48
|
+
} // namespace
|
49
49
|
|
50
|
-
static void on_complete_for_send(
|
51
|
-
grpc_error* error) {
|
50
|
+
static void on_complete_for_send(void* arg, grpc_error* error) {
|
52
51
|
call_data* calld = (call_data*)arg;
|
53
52
|
if (error == GRPC_ERROR_NONE) {
|
54
53
|
calld->send_initial_metadata_succeeded = true;
|
55
54
|
}
|
56
|
-
GRPC_CLOSURE_RUN(
|
57
|
-
GRPC_ERROR_REF(error));
|
55
|
+
GRPC_CLOSURE_RUN(calld->original_on_complete_for_send, GRPC_ERROR_REF(error));
|
58
56
|
}
|
59
57
|
|
60
|
-
static void recv_initial_metadata_ready(
|
61
|
-
grpc_error* error) {
|
58
|
+
static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
|
62
59
|
call_data* calld = (call_data*)arg;
|
63
60
|
if (error == GRPC_ERROR_NONE) {
|
64
61
|
calld->recv_initial_metadata_succeeded = true;
|
65
62
|
}
|
66
|
-
GRPC_CLOSURE_RUN(
|
63
|
+
GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready,
|
67
64
|
GRPC_ERROR_REF(error));
|
68
65
|
}
|
69
66
|
|
70
|
-
static grpc_error* init_call_elem(
|
71
|
-
grpc_call_element* elem,
|
67
|
+
static grpc_error* init_call_elem(grpc_call_element* elem,
|
72
68
|
const grpc_call_element_args* args) {
|
73
69
|
call_data* calld = (call_data*)elem->call_data;
|
74
70
|
// Get stats object from context and take a ref.
|
@@ -81,7 +77,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
|
|
81
77
|
return GRPC_ERROR_NONE;
|
82
78
|
}
|
83
79
|
|
84
|
-
static void destroy_call_elem(
|
80
|
+
static void destroy_call_elem(grpc_call_element* elem,
|
85
81
|
const grpc_call_final_info* final_info,
|
86
82
|
grpc_closure* ignored) {
|
87
83
|
call_data* calld = (call_data*)elem->call_data;
|
@@ -96,8 +92,7 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
|
|
96
92
|
}
|
97
93
|
|
98
94
|
static void start_transport_stream_op_batch(
|
99
|
-
|
100
|
-
grpc_transport_stream_op_batch* batch) {
|
95
|
+
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
|
101
96
|
call_data* calld = (call_data*)elem->call_data;
|
102
97
|
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
|
103
98
|
// Intercept send_initial_metadata.
|
@@ -118,7 +113,7 @@ static void start_transport_stream_op_batch(
|
|
118
113
|
&calld->recv_initial_metadata_ready;
|
119
114
|
}
|
120
115
|
// Chain to next filter.
|
121
|
-
grpc_call_next_op(
|
116
|
+
grpc_call_next_op(elem, batch);
|
122
117
|
GPR_TIMER_END("clr_start_transport_stream_op_batch", 0);
|
123
118
|
}
|
124
119
|
|
@@ -21,15 +21,7 @@
|
|
21
21
|
|
22
22
|
#include "src/core/lib/channel/channel_stack.h"
|
23
23
|
|
24
|
-
#ifdef __cplusplus
|
25
|
-
extern "C" {
|
26
|
-
#endif
|
27
|
-
|
28
24
|
extern const grpc_channel_filter grpc_client_load_reporting_filter;
|
29
25
|
|
30
|
-
#ifdef __cplusplus
|
31
|
-
}
|
32
|
-
#endif
|
33
|
-
|
34
26
|
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
|
35
27
|
*/
|
@@ -54,7 +54,7 @@
|
|
54
54
|
* operations in progress over the old RR instance. This is done by
|
55
55
|
* decreasing the reference count on the old policy. The moment no more
|
56
56
|
* references are held on the old RR policy, it'll be destroyed and \a
|
57
|
-
*
|
57
|
+
* on_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
|
58
58
|
* state. At this point we can transition to a new RR instance safely, which
|
59
59
|
* is done once again via \a rr_handover_locked().
|
60
60
|
*
|
@@ -106,6 +106,8 @@
|
|
106
106
|
#include "src/core/lib/backoff/backoff.h"
|
107
107
|
#include "src/core/lib/channel/channel_args.h"
|
108
108
|
#include "src/core/lib/channel/channel_stack.h"
|
109
|
+
#include "src/core/lib/gpr++/manual_constructor.h"
|
110
|
+
#include "src/core/lib/gpr/string.h"
|
109
111
|
#include "src/core/lib/iomgr/combiner.h"
|
110
112
|
#include "src/core/lib/iomgr/sockaddr.h"
|
111
113
|
#include "src/core/lib/iomgr/sockaddr_utils.h"
|
@@ -113,13 +115,11 @@
|
|
113
115
|
#include "src/core/lib/slice/slice_hash_table.h"
|
114
116
|
#include "src/core/lib/slice/slice_internal.h"
|
115
117
|
#include "src/core/lib/slice/slice_string_helpers.h"
|
116
|
-
#include "src/core/lib/support/string.h"
|
117
118
|
#include "src/core/lib/surface/call.h"
|
118
119
|
#include "src/core/lib/surface/channel.h"
|
119
120
|
#include "src/core/lib/surface/channel_init.h"
|
120
121
|
#include "src/core/lib/transport/static_metadata.h"
|
121
122
|
|
122
|
-
#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
|
123
123
|
#define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
|
124
124
|
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
|
125
125
|
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
|
@@ -128,174 +128,48 @@
|
|
128
128
|
|
129
129
|
grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
|
130
130
|
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
grpc_closure* wrapped_closure;
|
153
|
-
|
154
|
-
/* the pick's initial metadata, kept in order to append the LB token for the
|
155
|
-
* pick */
|
156
|
-
grpc_metadata_batch* initial_metadata;
|
157
|
-
|
158
|
-
/* the picked target, used to determine which LB token to add to the pick's
|
159
|
-
* initial metadata */
|
160
|
-
grpc_connected_subchannel** target;
|
161
|
-
|
162
|
-
/* the context to be populated for the subchannel call */
|
163
|
-
grpc_call_context_element* context;
|
164
|
-
|
165
|
-
/* Stats for client-side load reporting. Note that this holds a
|
166
|
-
* reference, which must be either passed on via context or unreffed. */
|
131
|
+
struct glb_lb_policy;
|
132
|
+
|
133
|
+
namespace {
|
134
|
+
|
135
|
+
/// Linked list of pending pick requests. It stores all information needed to
|
136
|
+
/// eventually call (Round Robin's) pick() on them. They mainly stay pending
|
137
|
+
/// waiting for the RR policy to be created.
|
138
|
+
///
|
139
|
+
/// Note that when a pick is sent to the RR policy, we inject our own
|
140
|
+
/// on_complete callback, so that we can intercept the result before
|
141
|
+
/// invoking the original on_complete callback. This allows us to set the
|
142
|
+
/// LB token metadata and add client_stats to the call context.
|
143
|
+
/// See \a pending_pick_complete() for details.
|
144
|
+
struct pending_pick {
|
145
|
+
// Our on_complete closure and the original one.
|
146
|
+
grpc_closure on_complete;
|
147
|
+
grpc_closure* original_on_complete;
|
148
|
+
// The original pick.
|
149
|
+
grpc_lb_policy_pick_state* pick;
|
150
|
+
// Stats for client-side load reporting. Note that this holds a
|
151
|
+
// reference, which must be either passed on via context or unreffed.
|
167
152
|
grpc_grpclb_client_stats* client_stats;
|
168
|
-
|
169
|
-
|
153
|
+
// The LB token associated with the pick. This is set via user_data in
|
154
|
+
// the pick.
|
170
155
|
grpc_mdelem lb_token;
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
/* The RR instance related to the closure */
|
176
|
-
grpc_lb_policy* rr_policy;
|
177
|
-
|
178
|
-
/* The grpclb instance that created the wrapping. This instance is not owned,
|
179
|
-
* reference counts are untouched. It's used only for logging purposes. */
|
180
|
-
grpc_lb_policy* glb_policy;
|
181
|
-
|
182
|
-
/* heap memory to be freed upon closure execution. */
|
183
|
-
void* free_when_done;
|
184
|
-
} wrapped_rr_closure_arg;
|
185
|
-
|
186
|
-
/* The \a on_complete closure passed as part of the pick requires keeping a
|
187
|
-
* reference to its associated round robin instance. We wrap this closure in
|
188
|
-
* order to unref the round robin instance upon its invocation */
|
189
|
-
static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
|
190
|
-
grpc_error* error) {
|
191
|
-
wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
|
192
|
-
|
193
|
-
GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
|
194
|
-
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
|
195
|
-
|
196
|
-
if (wc_arg->rr_policy != nullptr) {
|
197
|
-
/* if *target is NULL, no pick has been made by the RR policy (eg, all
|
198
|
-
* addresses failed to connect). There won't be any user_data/token
|
199
|
-
* available */
|
200
|
-
if (*wc_arg->target != nullptr) {
|
201
|
-
if (!GRPC_MDISNULL(wc_arg->lb_token)) {
|
202
|
-
initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
|
203
|
-
wc_arg->lb_token_mdelem_storage,
|
204
|
-
GRPC_MDELEM_REF(wc_arg->lb_token));
|
205
|
-
} else {
|
206
|
-
gpr_log(
|
207
|
-
GPR_ERROR,
|
208
|
-
"[grpclb %p] No LB token for connected subchannel pick %p (from RR "
|
209
|
-
"instance %p).",
|
210
|
-
wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
|
211
|
-
abort();
|
212
|
-
}
|
213
|
-
// Pass on client stats via context. Passes ownership of the reference.
|
214
|
-
GPR_ASSERT(wc_arg->client_stats != nullptr);
|
215
|
-
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
|
216
|
-
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
|
217
|
-
} else {
|
218
|
-
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
|
219
|
-
}
|
220
|
-
if (grpc_lb_glb_trace.enabled()) {
|
221
|
-
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
|
222
|
-
wc_arg->rr_policy);
|
223
|
-
}
|
224
|
-
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
|
225
|
-
}
|
226
|
-
GPR_ASSERT(wc_arg->free_when_done != nullptr);
|
227
|
-
gpr_free(wc_arg->free_when_done);
|
228
|
-
}
|
229
|
-
|
230
|
-
/* Linked list of pending pick requests. It stores all information needed to
|
231
|
-
* eventually call (Round Robin's) pick() on them. They mainly stay pending
|
232
|
-
* waiting for the RR policy to be created/updated.
|
233
|
-
*
|
234
|
-
* One particularity is the wrapping of the user-provided \a on_complete closure
|
235
|
-
* (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
|
236
|
-
* order to correctly unref the RR policy instance upon completion of the pick.
|
237
|
-
* See \a wrapped_rr_closure for details. */
|
238
|
-
typedef struct pending_pick {
|
156
|
+
// The grpclb instance that created the wrapping. This instance is not owned,
|
157
|
+
// reference counts are untouched. It's used only for logging purposes.
|
158
|
+
glb_lb_policy* glb_policy;
|
159
|
+
// Next pending pick.
|
239
160
|
struct pending_pick* next;
|
161
|
+
};
|
240
162
|
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
* upon error. */
|
246
|
-
grpc_connected_subchannel** target;
|
247
|
-
|
248
|
-
/* args for wrapped_on_complete */
|
249
|
-
wrapped_rr_closure_arg wrapped_on_complete_arg;
|
250
|
-
} pending_pick;
|
251
|
-
|
252
|
-
static void add_pending_pick(pending_pick** root,
|
253
|
-
const grpc_lb_policy_pick_args* pick_args,
|
254
|
-
grpc_connected_subchannel** target,
|
255
|
-
grpc_call_context_element* context,
|
256
|
-
grpc_closure* on_complete) {
|
257
|
-
pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
|
258
|
-
pp->next = *root;
|
259
|
-
pp->pick_args = *pick_args;
|
260
|
-
pp->target = target;
|
261
|
-
pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
|
262
|
-
pp->wrapped_on_complete_arg.target = target;
|
263
|
-
pp->wrapped_on_complete_arg.context = context;
|
264
|
-
pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
|
265
|
-
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
|
266
|
-
pick_args->lb_token_mdelem_storage;
|
267
|
-
pp->wrapped_on_complete_arg.free_when_done = pp;
|
268
|
-
GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
|
269
|
-
wrapped_rr_closure, &pp->wrapped_on_complete_arg,
|
270
|
-
grpc_schedule_on_exec_ctx);
|
271
|
-
*root = pp;
|
272
|
-
}
|
273
|
-
|
274
|
-
/* Same as the \a pending_pick struct but for ping operations */
|
275
|
-
typedef struct pending_ping {
|
163
|
+
/// A linked list of pending pings waiting for the RR policy to be created.
|
164
|
+
struct pending_ping {
|
165
|
+
grpc_closure* on_initiate;
|
166
|
+
grpc_closure* on_ack;
|
276
167
|
struct pending_ping* next;
|
168
|
+
};
|
277
169
|
|
278
|
-
|
279
|
-
wrapped_rr_closure_arg wrapped_notify_arg;
|
280
|
-
} pending_ping;
|
281
|
-
|
282
|
-
static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
|
283
|
-
pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
|
284
|
-
pping->wrapped_notify_arg.wrapped_closure = notify;
|
285
|
-
pping->wrapped_notify_arg.free_when_done = pping;
|
286
|
-
pping->next = *root;
|
287
|
-
GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
|
288
|
-
wrapped_rr_closure, &pping->wrapped_notify_arg,
|
289
|
-
grpc_schedule_on_exec_ctx);
|
290
|
-
*root = pping;
|
291
|
-
}
|
292
|
-
|
293
|
-
/*
|
294
|
-
* glb_lb_policy
|
295
|
-
*/
|
296
|
-
typedef struct rr_connectivity_data rr_connectivity_data;
|
170
|
+
} // namespace
|
297
171
|
|
298
|
-
|
172
|
+
struct glb_lb_policy {
|
299
173
|
/** base policy: must be first */
|
300
174
|
grpc_lb_policy base;
|
301
175
|
|
@@ -320,6 +194,9 @@ typedef struct glb_lb_policy {
|
|
320
194
|
/** the RR policy to use of the backend servers returned by the LB server */
|
321
195
|
grpc_lb_policy* rr_policy;
|
322
196
|
|
197
|
+
grpc_closure on_rr_connectivity_changed;
|
198
|
+
grpc_connectivity_state rr_connectivity_state;
|
199
|
+
|
323
200
|
bool started_picking;
|
324
201
|
|
325
202
|
/** our connectivity state tracker */
|
@@ -328,8 +205,8 @@ typedef struct glb_lb_policy {
|
|
328
205
|
/** connectivity state of the LB channel */
|
329
206
|
grpc_connectivity_state lb_channel_connectivity;
|
330
207
|
|
331
|
-
/** stores the deserialized response from the LB. May be
|
332
|
-
* response has arrived. */
|
208
|
+
/** stores the deserialized response from the LB. May be nullptr until one
|
209
|
+
* such response has arrived. */
|
333
210
|
grpc_grpclb_serverlist* serverlist;
|
334
211
|
|
335
212
|
/** Index into serverlist for next pick.
|
@@ -354,11 +231,11 @@ typedef struct glb_lb_policy {
|
|
354
231
|
/** are we already watching the LB channel's connectivity? */
|
355
232
|
bool watching_lb_channel;
|
356
233
|
|
357
|
-
/** is \a lb_call_retry_timer
|
358
|
-
bool
|
234
|
+
/** is the callback associated with \a lb_call_retry_timer pending? */
|
235
|
+
bool retry_timer_callback_pending;
|
359
236
|
|
360
|
-
/** is \a lb_fallback_timer
|
361
|
-
bool
|
237
|
+
/** is the callback associated with \a lb_fallback_timer pending? */
|
238
|
+
bool fallback_timer_callback_pending;
|
362
239
|
|
363
240
|
/** called upon changes to the LB channel's connectivity. */
|
364
241
|
grpc_closure lb_channel_on_connectivity_changed;
|
@@ -366,6 +243,9 @@ typedef struct glb_lb_policy {
|
|
366
243
|
/************************************************************/
|
367
244
|
/* client data associated with the LB server communication */
|
368
245
|
/************************************************************/
|
246
|
+
/* Finished sending initial request. */
|
247
|
+
grpc_closure lb_on_sent_initial_request;
|
248
|
+
|
369
249
|
/* Status from the LB server has been received. This signals the end of the LB
|
370
250
|
* call. */
|
371
251
|
grpc_closure lb_on_server_status_received;
|
@@ -397,7 +277,7 @@ typedef struct glb_lb_policy {
|
|
397
277
|
grpc_slice lb_call_status_details;
|
398
278
|
|
399
279
|
/** LB call retry backoff state */
|
400
|
-
|
280
|
+
grpc_core::ManualConstructor<grpc_core::BackOff> lb_call_backoff;
|
401
281
|
|
402
282
|
/** LB call retry timer */
|
403
283
|
grpc_timer lb_call_retry_timer;
|
@@ -405,6 +285,7 @@ typedef struct glb_lb_policy {
|
|
405
285
|
/** LB fallback timer */
|
406
286
|
grpc_timer lb_fallback_timer;
|
407
287
|
|
288
|
+
bool initial_request_sent;
|
408
289
|
bool seen_initial_response;
|
409
290
|
|
410
291
|
/* Stats for client-side load reporting. Should be unreffed and
|
@@ -413,22 +294,94 @@ typedef struct glb_lb_policy {
|
|
413
294
|
/* Interval and timer for next client load report. */
|
414
295
|
grpc_millis client_stats_report_interval;
|
415
296
|
grpc_timer client_load_report_timer;
|
416
|
-
bool
|
297
|
+
bool client_load_report_timer_callback_pending;
|
417
298
|
bool last_client_load_report_counters_were_zero;
|
418
299
|
/* Closure used for either the load report timer or the callback for
|
419
300
|
* completion of sending the load report. */
|
420
301
|
grpc_closure client_load_report_closure;
|
421
302
|
/* Client load report message payload. */
|
422
303
|
grpc_byte_buffer* client_load_report_payload;
|
423
|
-
} glb_lb_policy;
|
424
|
-
|
425
|
-
/* Keeps track and reacts to changes in connectivity of the RR instance */
|
426
|
-
struct rr_connectivity_data {
|
427
|
-
grpc_closure on_change;
|
428
|
-
grpc_connectivity_state state;
|
429
|
-
glb_lb_policy* glb_policy;
|
430
304
|
};
|
431
305
|
|
306
|
+
/* add lb_token of selected subchannel (address) to the call's initial
|
307
|
+
* metadata */
|
308
|
+
static grpc_error* initial_metadata_add_lb_token(
|
309
|
+
grpc_metadata_batch* initial_metadata,
|
310
|
+
grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
|
311
|
+
GPR_ASSERT(lb_token_mdelem_storage != nullptr);
|
312
|
+
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
|
313
|
+
return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
|
314
|
+
lb_token);
|
315
|
+
}
|
316
|
+
|
317
|
+
static void destroy_client_stats(void* arg) {
|
318
|
+
grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
|
319
|
+
}
|
320
|
+
|
321
|
+
static void pending_pick_set_metadata_and_context(pending_pick* pp) {
|
322
|
+
/* if connected_subchannel is nullptr, no pick has been made by the RR
|
323
|
+
* policy (e.g., all addresses failed to connect). There won't be any
|
324
|
+
* user_data/token available */
|
325
|
+
if (pp->pick->connected_subchannel != nullptr) {
|
326
|
+
if (!GRPC_MDISNULL(pp->lb_token)) {
|
327
|
+
initial_metadata_add_lb_token(pp->pick->initial_metadata,
|
328
|
+
&pp->pick->lb_token_mdelem_storage,
|
329
|
+
GRPC_MDELEM_REF(pp->lb_token));
|
330
|
+
} else {
|
331
|
+
gpr_log(GPR_ERROR,
|
332
|
+
"[grpclb %p] No LB token for connected subchannel pick %p",
|
333
|
+
pp->glb_policy, pp->pick);
|
334
|
+
abort();
|
335
|
+
}
|
336
|
+
// Pass on client stats via context. Passes ownership of the reference.
|
337
|
+
GPR_ASSERT(pp->client_stats != nullptr);
|
338
|
+
pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
|
339
|
+
pp->client_stats;
|
340
|
+
pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
|
341
|
+
destroy_client_stats;
|
342
|
+
} else {
|
343
|
+
if (pp->client_stats != nullptr) {
|
344
|
+
grpc_grpclb_client_stats_unref(pp->client_stats);
|
345
|
+
}
|
346
|
+
}
|
347
|
+
}
|
348
|
+
|
349
|
+
/* The \a on_complete closure passed as part of the pick requires keeping a
|
350
|
+
* reference to its associated round robin instance. We wrap this closure in
|
351
|
+
* order to unref the round robin instance upon its invocation */
|
352
|
+
static void pending_pick_complete(void* arg, grpc_error* error) {
|
353
|
+
pending_pick* pp = (pending_pick*)arg;
|
354
|
+
pending_pick_set_metadata_and_context(pp);
|
355
|
+
GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
|
356
|
+
gpr_free(pp);
|
357
|
+
}
|
358
|
+
|
359
|
+
static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
|
360
|
+
grpc_lb_policy_pick_state* pick) {
|
361
|
+
pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
|
362
|
+
pp->pick = pick;
|
363
|
+
pp->glb_policy = glb_policy;
|
364
|
+
GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
|
365
|
+
grpc_schedule_on_exec_ctx);
|
366
|
+
pp->original_on_complete = pick->on_complete;
|
367
|
+
pp->pick->on_complete = &pp->on_complete;
|
368
|
+
return pp;
|
369
|
+
}
|
370
|
+
|
371
|
+
static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
|
372
|
+
new_pp->next = *root;
|
373
|
+
*root = new_pp;
|
374
|
+
}
|
375
|
+
|
376
|
+
static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
|
377
|
+
grpc_closure* on_ack) {
|
378
|
+
pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
|
379
|
+
pping->on_initiate = on_initiate;
|
380
|
+
pping->on_ack = on_ack;
|
381
|
+
pping->next = *root;
|
382
|
+
*root = pping;
|
383
|
+
}
|
384
|
+
|
432
385
|
static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
|
433
386
|
bool log) {
|
434
387
|
if (server->drop) return false;
|
@@ -459,9 +412,9 @@ static void* lb_token_copy(void* token) {
|
|
459
412
|
? nullptr
|
460
413
|
: (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
|
461
414
|
}
|
462
|
-
static void lb_token_destroy(
|
415
|
+
static void lb_token_destroy(void* token) {
|
463
416
|
if (token != nullptr) {
|
464
|
-
GRPC_MDELEM_UNREF(
|
417
|
+
GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
|
465
418
|
}
|
466
419
|
}
|
467
420
|
static int lb_token_cmp(void* token1, void* token2) {
|
@@ -497,7 +450,7 @@ static void parse_server(const grpc_grpclb_server* server,
|
|
497
450
|
|
498
451
|
/* Returns addresses extracted from \a serverlist. */
|
499
452
|
static grpc_lb_addresses* process_serverlist_locked(
|
500
|
-
|
453
|
+
const grpc_grpclb_serverlist* serverlist) {
|
501
454
|
size_t num_valid = 0;
|
502
455
|
/* first pass: count how many are valid in order to allocate the necessary
|
503
456
|
* memory in a single block */
|
@@ -528,9 +481,9 @@ static grpc_lb_addresses* process_serverlist_locked(
|
|
528
481
|
strnlen(server->load_balance_token, lb_token_max_length);
|
529
482
|
grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
|
530
483
|
server->load_balance_token, lb_token_length);
|
531
|
-
user_data =
|
532
|
-
|
533
|
-
|
484
|
+
user_data =
|
485
|
+
(void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
|
486
|
+
.payload;
|
534
487
|
} else {
|
535
488
|
char* uri = grpc_sockaddr_to_uri(&addr);
|
536
489
|
gpr_log(GPR_INFO,
|
@@ -540,7 +493,6 @@ static grpc_lb_addresses* process_serverlist_locked(
|
|
540
493
|
gpr_free(uri);
|
541
494
|
user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
|
542
495
|
}
|
543
|
-
|
544
496
|
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
|
545
497
|
false /* is_balancer */,
|
546
498
|
nullptr /* balancer_name */, user_data);
|
@@ -552,7 +504,7 @@ static grpc_lb_addresses* process_serverlist_locked(
|
|
552
504
|
|
553
505
|
/* Returns the backend addresses extracted from the given addresses */
|
554
506
|
static grpc_lb_addresses* extract_backend_addresses_locked(
|
555
|
-
|
507
|
+
const grpc_lb_addresses* addresses) {
|
556
508
|
/* first pass: count the number of backend addresses */
|
557
509
|
size_t num_backends = 0;
|
558
510
|
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
@@ -577,11 +529,10 @@ static grpc_lb_addresses* extract_backend_addresses_locked(
|
|
577
529
|
}
|
578
530
|
|
579
531
|
static void update_lb_connectivity_status_locked(
|
580
|
-
|
581
|
-
|
532
|
+
glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
|
533
|
+
grpc_error* rr_state_error) {
|
582
534
|
const grpc_connectivity_state curr_glb_state =
|
583
535
|
grpc_connectivity_state_check(&glb_policy->state_tracker);
|
584
|
-
|
585
536
|
/* The new connectivity status is a function of the previous one and the new
|
586
537
|
* input coming from the status of the RR policy.
|
587
538
|
*
|
@@ -611,7 +562,6 @@ static void update_lb_connectivity_status_locked(
|
|
611
562
|
*
|
612
563
|
* (*) This function mustn't be called during shutting down. */
|
613
564
|
GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
|
614
|
-
|
615
565
|
switch (rr_state) {
|
616
566
|
case GRPC_CHANNEL_TRANSIENT_FAILURE:
|
617
567
|
case GRPC_CHANNEL_SHUTDOWN:
|
@@ -622,7 +572,6 @@ static void update_lb_connectivity_status_locked(
|
|
622
572
|
case GRPC_CHANNEL_READY:
|
623
573
|
GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
|
624
574
|
}
|
625
|
-
|
626
575
|
if (grpc_lb_glb_trace.enabled()) {
|
627
576
|
gpr_log(
|
628
577
|
GPR_INFO,
|
@@ -630,20 +579,18 @@ static void update_lb_connectivity_status_locked(
|
|
630
579
|
glb_policy, grpc_connectivity_state_name(rr_state),
|
631
580
|
glb_policy->rr_policy);
|
632
581
|
}
|
633
|
-
grpc_connectivity_state_set(
|
582
|
+
grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
|
634
583
|
rr_state_error,
|
635
584
|
"update_lb_connectivity_status_locked");
|
636
585
|
}
|
637
586
|
|
638
587
|
/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
|
639
588
|
* immediately (ignoring its completion callback), we need to perform the
|
640
|
-
* cleanups this callback would otherwise be
|
589
|
+
* cleanups this callback would otherwise be responsible for.
|
641
590
|
* If \a force_async is true, then we will manually schedule the
|
642
591
|
* completion callback even if the pick is available immediately. */
|
643
|
-
static bool pick_from_internal_rr_locked(
|
644
|
-
|
645
|
-
const grpc_lb_policy_pick_args* pick_args, bool force_async,
|
646
|
-
grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
|
592
|
+
static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
|
593
|
+
bool force_async, pending_pick* pp) {
|
647
594
|
// Check for drops if we are not using fallback backend addresses.
|
648
595
|
if (glb_policy->serverlist != nullptr) {
|
649
596
|
// Look at the index into the serverlist to see if we should drop this call.
|
@@ -653,57 +600,36 @@ static bool pick_from_internal_rr_locked(
|
|
653
600
|
glb_policy->serverlist_index = 0; // Wrap-around.
|
654
601
|
}
|
655
602
|
if (server->drop) {
|
656
|
-
// Not using the RR policy, so unref it.
|
657
|
-
if (grpc_lb_glb_trace.enabled()) {
|
658
|
-
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
|
659
|
-
wc_arg->rr_policy);
|
660
|
-
}
|
661
|
-
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
|
662
603
|
// Update client load reporting stats to indicate the number of
|
663
604
|
// dropped calls. Note that we have to do this here instead of in
|
664
605
|
// the client_load_reporting filter, because we do not create a
|
665
606
|
// subchannel call (and therefore no client_load_reporting filter)
|
666
607
|
// for dropped calls.
|
667
|
-
GPR_ASSERT(
|
608
|
+
GPR_ASSERT(glb_policy->client_stats != nullptr);
|
668
609
|
grpc_grpclb_client_stats_add_call_dropped_locked(
|
669
|
-
server->load_balance_token,
|
670
|
-
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
|
610
|
+
server->load_balance_token, glb_policy->client_stats);
|
671
611
|
if (force_async) {
|
672
|
-
|
673
|
-
|
674
|
-
gpr_free(wc_arg->free_when_done);
|
612
|
+
GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
|
613
|
+
gpr_free(pp);
|
675
614
|
return false;
|
676
615
|
}
|
677
|
-
gpr_free(
|
616
|
+
gpr_free(pp);
|
678
617
|
return true;
|
679
618
|
}
|
680
619
|
}
|
620
|
+
// Set client_stats and user_data.
|
621
|
+
pp->client_stats = grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
622
|
+
GPR_ASSERT(pp->pick->user_data == nullptr);
|
623
|
+
pp->pick->user_data = (void**)&pp->lb_token;
|
681
624
|
// Pick via the RR policy.
|
682
|
-
|
683
|
-
exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
|
684
|
-
(void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
|
625
|
+
bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
|
685
626
|
if (pick_done) {
|
686
|
-
|
687
|
-
if (grpc_lb_glb_trace.enabled()) {
|
688
|
-
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
|
689
|
-
wc_arg->rr_policy);
|
690
|
-
}
|
691
|
-
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
|
692
|
-
/* add the load reporting initial metadata */
|
693
|
-
initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
|
694
|
-
pick_args->lb_token_mdelem_storage,
|
695
|
-
GRPC_MDELEM_REF(wc_arg->lb_token));
|
696
|
-
// Pass on client stats via context. Passes ownership of the reference.
|
697
|
-
GPR_ASSERT(wc_arg->client_stats != nullptr);
|
698
|
-
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
|
699
|
-
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
|
627
|
+
pending_pick_set_metadata_and_context(pp);
|
700
628
|
if (force_async) {
|
701
|
-
|
702
|
-
|
703
|
-
gpr_free(wc_arg->free_when_done);
|
704
|
-
return false;
|
629
|
+
GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
|
630
|
+
pick_done = false;
|
705
631
|
}
|
706
|
-
gpr_free(
|
632
|
+
gpr_free(pp);
|
707
633
|
}
|
708
634
|
/* else, the pending pick will be registered and taken care of by the
|
709
635
|
* pending pick list inside the RR policy (glb_policy->rr_policy).
|
@@ -712,12 +638,11 @@ static bool pick_from_internal_rr_locked(
|
|
712
638
|
return pick_done;
|
713
639
|
}
|
714
640
|
|
715
|
-
static grpc_lb_policy_args* lb_policy_args_create(
|
716
|
-
glb_lb_policy* glb_policy) {
|
641
|
+
static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
|
717
642
|
grpc_lb_addresses* addresses;
|
718
643
|
if (glb_policy->serverlist != nullptr) {
|
719
644
|
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
|
720
|
-
addresses = process_serverlist_locked(
|
645
|
+
addresses = process_serverlist_locked(glb_policy->serverlist);
|
721
646
|
} else {
|
722
647
|
// If rr_handover_locked() is invoked when we haven't received any
|
723
648
|
// serverlist from the balancer, we use the fallback backends returned by
|
@@ -737,24 +662,21 @@ static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
|
|
737
662
|
args->args = grpc_channel_args_copy_and_add_and_remove(
|
738
663
|
glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
|
739
664
|
1);
|
740
|
-
grpc_lb_addresses_destroy(
|
665
|
+
grpc_lb_addresses_destroy(addresses);
|
741
666
|
return args;
|
742
667
|
}
|
743
668
|
|
744
|
-
static void lb_policy_args_destroy(
|
745
|
-
|
746
|
-
grpc_channel_args_destroy(exec_ctx, args->args);
|
669
|
+
static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
|
670
|
+
grpc_channel_args_destroy(args->args);
|
747
671
|
gpr_free(args);
|
748
672
|
}
|
749
673
|
|
750
|
-
static void
|
751
|
-
|
752
|
-
static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
|
674
|
+
static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error);
|
675
|
+
static void create_rr_locked(glb_lb_policy* glb_policy,
|
753
676
|
grpc_lb_policy_args* args) {
|
754
677
|
GPR_ASSERT(glb_policy->rr_policy == nullptr);
|
755
678
|
|
756
|
-
grpc_lb_policy* new_rr_policy =
|
757
|
-
grpc_lb_policy_create(exec_ctx, "round_robin", args);
|
679
|
+
grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
|
758
680
|
if (new_rr_policy == nullptr) {
|
759
681
|
gpr_log(GPR_ERROR,
|
760
682
|
"[grpclb %p] Failure creating a RoundRobin policy for serverlist "
|
@@ -766,125 +688,101 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
|
|
766
688
|
glb_policy->rr_policy);
|
767
689
|
return;
|
768
690
|
}
|
691
|
+
grpc_lb_policy_set_reresolve_closure_locked(
|
692
|
+
new_rr_policy, glb_policy->base.request_reresolution);
|
693
|
+
glb_policy->base.request_reresolution = nullptr;
|
769
694
|
glb_policy->rr_policy = new_rr_policy;
|
770
695
|
grpc_error* rr_state_error = nullptr;
|
771
|
-
|
772
|
-
|
773
|
-
&rr_state_error);
|
696
|
+
glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
|
697
|
+
glb_policy->rr_policy, &rr_state_error);
|
774
698
|
/* Connectivity state is a function of the RR policy updated/created */
|
775
|
-
update_lb_connectivity_status_locked(
|
776
|
-
|
699
|
+
update_lb_connectivity_status_locked(
|
700
|
+
glb_policy, glb_policy->rr_connectivity_state, rr_state_error);
|
777
701
|
/* Add the gRPC LB's interested_parties pollset_set to that of the newly
|
778
702
|
* created RR policy. This will make the RR policy progress upon activity on
|
779
703
|
* gRPC LB, which in turn is tied to the application's call */
|
780
|
-
grpc_pollset_set_add_pollset_set(
|
781
|
-
glb_policy->rr_policy->interested_parties,
|
704
|
+
grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
|
782
705
|
glb_policy->base.interested_parties);
|
783
|
-
|
784
|
-
|
785
|
-
* It'll be deallocated in glb_rr_connectivity_changed() */
|
786
|
-
rr_connectivity_data* rr_connectivity =
|
787
|
-
(rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
|
788
|
-
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
|
789
|
-
glb_rr_connectivity_changed_locked, rr_connectivity,
|
706
|
+
GRPC_CLOSURE_INIT(&glb_policy->on_rr_connectivity_changed,
|
707
|
+
on_rr_connectivity_changed_locked, glb_policy,
|
790
708
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
791
|
-
rr_connectivity->glb_policy = glb_policy;
|
792
|
-
rr_connectivity->state = rr_state;
|
793
|
-
|
794
709
|
/* Subscribe to changes to the connectivity of the new RR */
|
795
|
-
|
796
|
-
grpc_lb_policy_notify_on_state_change_locked(
|
797
|
-
|
798
|
-
|
799
|
-
grpc_lb_policy_exit_idle_locked(
|
800
|
-
|
801
|
-
/* Update picks and pings in wait */
|
710
|
+
GRPC_LB_POLICY_REF(&glb_policy->base, "glb_rr_connectivity_cb");
|
711
|
+
grpc_lb_policy_notify_on_state_change_locked(
|
712
|
+
glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
|
713
|
+
&glb_policy->on_rr_connectivity_changed);
|
714
|
+
grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
|
715
|
+
// Send pending picks to RR policy.
|
802
716
|
pending_pick* pp;
|
803
717
|
while ((pp = glb_policy->pending_picks)) {
|
804
718
|
glb_policy->pending_picks = pp->next;
|
805
|
-
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
|
806
|
-
pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
|
807
|
-
pp->wrapped_on_complete_arg.client_stats =
|
808
|
-
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
809
719
|
if (grpc_lb_glb_trace.enabled()) {
|
810
720
|
gpr_log(GPR_INFO,
|
811
721
|
"[grpclb %p] Pending pick about to (async) PICK from RR %p",
|
812
722
|
glb_policy, glb_policy->rr_policy);
|
813
723
|
}
|
814
|
-
pick_from_internal_rr_locked(
|
815
|
-
true /* force_async */, pp->target,
|
816
|
-
&pp->wrapped_on_complete_arg);
|
724
|
+
pick_from_internal_rr_locked(glb_policy, true /* force_async */, pp);
|
817
725
|
}
|
818
|
-
|
726
|
+
// Send pending pings to RR policy.
|
819
727
|
pending_ping* pping;
|
820
728
|
while ((pping = glb_policy->pending_pings)) {
|
821
729
|
glb_policy->pending_pings = pping->next;
|
822
|
-
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
|
823
|
-
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
|
824
730
|
if (grpc_lb_glb_trace.enabled()) {
|
825
731
|
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
|
826
732
|
glb_policy, glb_policy->rr_policy);
|
827
733
|
}
|
828
|
-
grpc_lb_policy_ping_one_locked(
|
829
|
-
|
734
|
+
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, pping->on_initiate,
|
735
|
+
pping->on_ack);
|
736
|
+
gpr_free(pping);
|
830
737
|
}
|
831
738
|
}
|
832
739
|
|
833
|
-
/* glb_policy->rr_policy may be
|
834
|
-
static void rr_handover_locked(
|
835
|
-
glb_lb_policy* glb_policy) {
|
740
|
+
/* glb_policy->rr_policy may be nullptr (initial handover) */
|
741
|
+
static void rr_handover_locked(glb_lb_policy* glb_policy) {
|
836
742
|
if (glb_policy->shutting_down) return;
|
837
|
-
grpc_lb_policy_args* args = lb_policy_args_create(
|
743
|
+
grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
|
838
744
|
GPR_ASSERT(args != nullptr);
|
839
745
|
if (glb_policy->rr_policy != nullptr) {
|
840
746
|
if (grpc_lb_glb_trace.enabled()) {
|
841
747
|
gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
|
842
748
|
glb_policy->rr_policy);
|
843
749
|
}
|
844
|
-
grpc_lb_policy_update_locked(
|
750
|
+
grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
|
845
751
|
} else {
|
846
|
-
create_rr_locked(
|
752
|
+
create_rr_locked(glb_policy, args);
|
847
753
|
if (grpc_lb_glb_trace.enabled()) {
|
848
754
|
gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
|
849
755
|
glb_policy->rr_policy);
|
850
756
|
}
|
851
757
|
}
|
852
|
-
lb_policy_args_destroy(
|
758
|
+
lb_policy_args_destroy(args);
|
853
759
|
}
|
854
760
|
|
855
|
-
static void
|
856
|
-
|
857
|
-
rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
|
858
|
-
glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
|
761
|
+
static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
|
762
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
859
763
|
if (glb_policy->shutting_down) {
|
860
|
-
|
861
|
-
"glb_rr_connectivity_cb");
|
862
|
-
gpr_free(rr_connectivity);
|
764
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
|
863
765
|
return;
|
864
766
|
}
|
865
|
-
if (
|
767
|
+
if (glb_policy->rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
|
866
768
|
/* An RR policy that has transitioned into the SHUTDOWN connectivity state
|
867
769
|
* should not be considered for picks or updates: the SHUTDOWN state is a
|
868
770
|
* sink, policies can't transition back from it. .*/
|
869
|
-
GRPC_LB_POLICY_UNREF(
|
870
|
-
"rr_connectivity_shutdown");
|
771
|
+
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
|
871
772
|
glb_policy->rr_policy = nullptr;
|
872
|
-
|
873
|
-
"glb_rr_connectivity_cb");
|
874
|
-
gpr_free(rr_connectivity);
|
773
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
|
875
774
|
return;
|
876
775
|
}
|
877
776
|
/* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
|
878
777
|
update_lb_connectivity_status_locked(
|
879
|
-
|
880
|
-
/* Resubscribe. Reuse the "glb_rr_connectivity_cb"
|
881
|
-
grpc_lb_policy_notify_on_state_change_locked(
|
882
|
-
|
883
|
-
|
778
|
+
glb_policy, glb_policy->rr_connectivity_state, GRPC_ERROR_REF(error));
|
779
|
+
/* Resubscribe. Reuse the "glb_rr_connectivity_cb" ref. */
|
780
|
+
grpc_lb_policy_notify_on_state_change_locked(
|
781
|
+
glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
|
782
|
+
&glb_policy->on_rr_connectivity_changed);
|
884
783
|
}
|
885
784
|
|
886
|
-
static void destroy_balancer_name(
|
887
|
-
void* balancer_name) {
|
785
|
+
static void destroy_balancer_name(void* balancer_name) {
|
888
786
|
gpr_free(balancer_name);
|
889
787
|
}
|
890
788
|
|
@@ -911,7 +809,7 @@ static int balancer_name_cmp_fn(void* a, void* b) {
|
|
911
809
|
* above the grpclb policy.
|
912
810
|
* - \a args: other args inherited from the grpclb policy. */
|
913
811
|
static grpc_channel_args* build_lb_channel_args(
|
914
|
-
|
812
|
+
const grpc_lb_addresses* addresses,
|
915
813
|
grpc_fake_resolver_response_generator* response_generator,
|
916
814
|
const grpc_channel_args* args) {
|
917
815
|
size_t num_grpclb_addrs = 0;
|
@@ -954,7 +852,7 @@ static grpc_channel_args* build_lb_channel_args(
|
|
954
852
|
gpr_free(targets_info_entries);
|
955
853
|
|
956
854
|
grpc_channel_args* lb_channel_args =
|
957
|
-
grpc_lb_policy_grpclb_build_lb_channel_args(
|
855
|
+
grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
|
958
856
|
response_generator, args);
|
959
857
|
|
960
858
|
grpc_arg lb_channel_addresses_arg =
|
@@ -962,65 +860,57 @@ static grpc_channel_args* build_lb_channel_args(
|
|
962
860
|
|
963
861
|
grpc_channel_args* result = grpc_channel_args_copy_and_add(
|
964
862
|
lb_channel_args, &lb_channel_addresses_arg, 1);
|
965
|
-
grpc_slice_hash_table_unref(
|
966
|
-
grpc_channel_args_destroy(
|
967
|
-
grpc_lb_addresses_destroy(
|
863
|
+
grpc_slice_hash_table_unref(targets_info);
|
864
|
+
grpc_channel_args_destroy(lb_channel_args);
|
865
|
+
grpc_lb_addresses_destroy(lb_addresses);
|
968
866
|
return result;
|
969
867
|
}
|
970
868
|
|
971
|
-
static void glb_destroy(
|
869
|
+
static void glb_destroy(grpc_lb_policy* pol) {
|
972
870
|
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
973
871
|
GPR_ASSERT(glb_policy->pending_picks == nullptr);
|
974
872
|
GPR_ASSERT(glb_policy->pending_pings == nullptr);
|
975
873
|
gpr_free((void*)glb_policy->server_name);
|
976
|
-
grpc_channel_args_destroy(
|
874
|
+
grpc_channel_args_destroy(glb_policy->args);
|
977
875
|
if (glb_policy->client_stats != nullptr) {
|
978
876
|
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
|
979
877
|
}
|
980
|
-
grpc_connectivity_state_destroy(
|
878
|
+
grpc_connectivity_state_destroy(&glb_policy->state_tracker);
|
981
879
|
if (glb_policy->serverlist != nullptr) {
|
982
880
|
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
983
881
|
}
|
984
882
|
if (glb_policy->fallback_backend_addresses != nullptr) {
|
985
|
-
grpc_lb_addresses_destroy(
|
883
|
+
grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
|
986
884
|
}
|
987
885
|
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
|
988
886
|
grpc_subchannel_index_unref();
|
989
887
|
gpr_free(glb_policy);
|
990
888
|
}
|
991
889
|
|
992
|
-
static void glb_shutdown_locked(
|
890
|
+
static void glb_shutdown_locked(grpc_lb_policy* pol,
|
891
|
+
grpc_lb_policy* new_policy) {
|
993
892
|
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
893
|
+
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
|
994
894
|
glb_policy->shutting_down = true;
|
995
|
-
|
996
|
-
/* We need a copy of the lb_call pointer because we can't cancell the call
|
997
|
-
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
|
998
|
-
* the cancel, needs to acquire that same lock */
|
999
|
-
grpc_call* lb_call = glb_policy->lb_call;
|
1000
|
-
|
1001
895
|
/* glb_policy->lb_call and this local lb_call must be consistent at this point
|
1002
896
|
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
|
1003
897
|
* of query_for_backends_locked, which can only be invoked while
|
1004
898
|
* glb_policy->shutting_down is false. */
|
1005
|
-
if (lb_call != nullptr) {
|
1006
|
-
grpc_call_cancel(lb_call, nullptr);
|
899
|
+
if (glb_policy->lb_call != nullptr) {
|
900
|
+
grpc_call_cancel(glb_policy->lb_call, nullptr);
|
1007
901
|
/* lb_on_server_status_received will pick up the cancel and clean up */
|
1008
902
|
}
|
1009
|
-
if (glb_policy->
|
1010
|
-
grpc_timer_cancel(
|
1011
|
-
glb_policy->retry_timer_active = false;
|
903
|
+
if (glb_policy->retry_timer_callback_pending) {
|
904
|
+
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
|
1012
905
|
}
|
1013
|
-
if (glb_policy->
|
1014
|
-
grpc_timer_cancel(
|
1015
|
-
glb_policy->fallback_timer_active = false;
|
906
|
+
if (glb_policy->fallback_timer_callback_pending) {
|
907
|
+
grpc_timer_cancel(&glb_policy->lb_fallback_timer);
|
1016
908
|
}
|
1017
|
-
|
1018
|
-
pending_pick* pp = glb_policy->pending_picks;
|
1019
|
-
glb_policy->pending_picks = nullptr;
|
1020
|
-
pending_ping* pping = glb_policy->pending_pings;
|
1021
|
-
glb_policy->pending_pings = nullptr;
|
1022
909
|
if (glb_policy->rr_policy != nullptr) {
|
1023
|
-
|
910
|
+
grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
|
911
|
+
GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
|
912
|
+
} else {
|
913
|
+
grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
|
1024
914
|
}
|
1025
915
|
// We destroy the LB channel here because
|
1026
916
|
// glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
|
@@ -1030,28 +920,41 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
|
|
1030
920
|
grpc_channel_destroy(glb_policy->lb_channel);
|
1031
921
|
glb_policy->lb_channel = nullptr;
|
1032
922
|
}
|
1033
|
-
grpc_connectivity_state_set(
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
923
|
+
grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
|
924
|
+
GRPC_ERROR_REF(error), "glb_shutdown");
|
925
|
+
// Clear pending picks.
|
926
|
+
pending_pick* pp = glb_policy->pending_picks;
|
927
|
+
glb_policy->pending_picks = nullptr;
|
1037
928
|
while (pp != nullptr) {
|
1038
929
|
pending_pick* next = pp->next;
|
1039
|
-
|
1040
|
-
|
1041
|
-
|
1042
|
-
|
1043
|
-
|
930
|
+
if (new_policy != nullptr) {
|
931
|
+
// Hand pick over to new policy.
|
932
|
+
if (pp->client_stats != nullptr) {
|
933
|
+
grpc_grpclb_client_stats_unref(pp->client_stats);
|
934
|
+
}
|
935
|
+
pp->pick->on_complete = pp->original_on_complete;
|
936
|
+
if (grpc_lb_policy_pick_locked(new_policy, pp->pick)) {
|
937
|
+
// Synchronous return; schedule callback.
|
938
|
+
GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
|
939
|
+
}
|
940
|
+
gpr_free(pp);
|
941
|
+
} else {
|
942
|
+
pp->pick->connected_subchannel.reset();
|
943
|
+
GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
|
944
|
+
}
|
1044
945
|
pp = next;
|
1045
946
|
}
|
1046
|
-
|
947
|
+
// Clear pending pings.
|
948
|
+
pending_ping* pping = glb_policy->pending_pings;
|
949
|
+
glb_policy->pending_pings = nullptr;
|
1047
950
|
while (pping != nullptr) {
|
1048
951
|
pending_ping* next = pping->next;
|
1049
|
-
GRPC_CLOSURE_SCHED(
|
1050
|
-
|
1051
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
|
952
|
+
GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
|
953
|
+
GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
|
1052
954
|
gpr_free(pping);
|
1053
955
|
pping = next;
|
1054
956
|
}
|
957
|
+
GRPC_ERROR_UNREF(error);
|
1055
958
|
}
|
1056
959
|
|
1057
960
|
// Cancel a specific pending pick.
|
@@ -1063,18 +966,18 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
|
|
1063
966
|
// pick needs also be cancelled by the RR instance.
|
1064
967
|
// - Otherwise, without an RR instance, picks stay pending at this policy's
|
1065
968
|
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
|
1066
|
-
// we invoke the completion closure and set *target to
|
1067
|
-
static void glb_cancel_pick_locked(
|
1068
|
-
|
969
|
+
// we invoke the completion closure and set *target to nullptr right here.
|
970
|
+
static void glb_cancel_pick_locked(grpc_lb_policy* pol,
|
971
|
+
grpc_lb_policy_pick_state* pick,
|
1069
972
|
grpc_error* error) {
|
1070
973
|
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1071
974
|
pending_pick* pp = glb_policy->pending_picks;
|
1072
975
|
glb_policy->pending_picks = nullptr;
|
1073
976
|
while (pp != nullptr) {
|
1074
977
|
pending_pick* next = pp->next;
|
1075
|
-
if (pp->
|
1076
|
-
|
1077
|
-
GRPC_CLOSURE_SCHED(
|
978
|
+
if (pp->pick == pick) {
|
979
|
+
pick->connected_subchannel.reset();
|
980
|
+
GRPC_CLOSURE_SCHED(&pp->on_complete,
|
1078
981
|
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1079
982
|
"Pick Cancelled", &error, 1));
|
1080
983
|
} else {
|
@@ -1084,7 +987,7 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
|
1084
987
|
pp = next;
|
1085
988
|
}
|
1086
989
|
if (glb_policy->rr_policy != nullptr) {
|
1087
|
-
grpc_lb_policy_cancel_pick_locked(
|
990
|
+
grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, pick,
|
1088
991
|
GRPC_ERROR_REF(error));
|
1089
992
|
}
|
1090
993
|
GRPC_ERROR_UNREF(error);
|
@@ -1099,9 +1002,8 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
|
1099
1002
|
// pick needs also be cancelled by the RR instance.
|
1100
1003
|
// - Otherwise, without an RR instance, picks stay pending at this policy's
|
1101
1004
|
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
|
1102
|
-
// we invoke the completion closure and set *target to
|
1103
|
-
static void glb_cancel_picks_locked(
|
1104
|
-
grpc_lb_policy* pol,
|
1005
|
+
// we invoke the completion closure and set *target to nullptr right here.
|
1006
|
+
static void glb_cancel_picks_locked(grpc_lb_policy* pol,
|
1105
1007
|
uint32_t initial_metadata_flags_mask,
|
1106
1008
|
uint32_t initial_metadata_flags_eq,
|
1107
1009
|
grpc_error* error) {
|
@@ -1110,9 +1012,9 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
|
|
1110
1012
|
glb_policy->pending_picks = nullptr;
|
1111
1013
|
while (pp != nullptr) {
|
1112
1014
|
pending_pick* next = pp->next;
|
1113
|
-
if ((pp->
|
1015
|
+
if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
|
1114
1016
|
initial_metadata_flags_eq) {
|
1115
|
-
GRPC_CLOSURE_SCHED(
|
1017
|
+
GRPC_CLOSURE_SCHED(&pp->on_complete,
|
1116
1018
|
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1117
1019
|
"Pick Cancelled", &error, 1));
|
1118
1020
|
} else {
|
@@ -1123,66 +1025,54 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
|
|
1123
1025
|
}
|
1124
1026
|
if (glb_policy->rr_policy != nullptr) {
|
1125
1027
|
grpc_lb_policy_cancel_picks_locked(
|
1126
|
-
|
1028
|
+
glb_policy->rr_policy, initial_metadata_flags_mask,
|
1127
1029
|
initial_metadata_flags_eq, GRPC_ERROR_REF(error));
|
1128
1030
|
}
|
1129
1031
|
GRPC_ERROR_UNREF(error);
|
1130
1032
|
}
|
1131
1033
|
|
1132
|
-
static void lb_on_fallback_timer_locked(
|
1133
|
-
|
1134
|
-
static void
|
1135
|
-
glb_lb_policy* glb_policy);
|
1136
|
-
static void start_picking_locked(grpc_exec_ctx* exec_ctx,
|
1137
|
-
glb_lb_policy* glb_policy) {
|
1034
|
+
static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
|
1035
|
+
static void query_for_backends_locked(glb_lb_policy* glb_policy);
|
1036
|
+
static void start_picking_locked(glb_lb_policy* glb_policy) {
|
1138
1037
|
/* start a timer to fall back */
|
1139
1038
|
if (glb_policy->lb_fallback_timeout_ms > 0 &&
|
1140
|
-
glb_policy->serverlist == nullptr &&
|
1039
|
+
glb_policy->serverlist == nullptr &&
|
1040
|
+
!glb_policy->fallback_timer_callback_pending) {
|
1141
1041
|
grpc_millis deadline =
|
1142
|
-
|
1143
|
-
|
1042
|
+
grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
|
1043
|
+
GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_fallback_timer");
|
1144
1044
|
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
|
1145
1045
|
glb_policy,
|
1146
1046
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1147
|
-
glb_policy->
|
1148
|
-
grpc_timer_init(
|
1047
|
+
glb_policy->fallback_timer_callback_pending = true;
|
1048
|
+
grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
|
1149
1049
|
&glb_policy->lb_on_fallback);
|
1150
1050
|
}
|
1151
1051
|
|
1152
1052
|
glb_policy->started_picking = true;
|
1153
|
-
|
1154
|
-
query_for_backends_locked(
|
1053
|
+
glb_policy->lb_call_backoff->Reset();
|
1054
|
+
query_for_backends_locked(glb_policy);
|
1155
1055
|
}
|
1156
1056
|
|
1157
|
-
static void glb_exit_idle_locked(
|
1057
|
+
static void glb_exit_idle_locked(grpc_lb_policy* pol) {
|
1158
1058
|
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1159
1059
|
if (!glb_policy->started_picking) {
|
1160
|
-
start_picking_locked(
|
1060
|
+
start_picking_locked(glb_policy);
|
1161
1061
|
}
|
1162
1062
|
}
|
1163
1063
|
|
1164
|
-
static int glb_pick_locked(
|
1165
|
-
|
1166
|
-
grpc_connected_subchannel** target,
|
1167
|
-
grpc_call_context_element* context, void** user_data,
|
1168
|
-
grpc_closure* on_complete) {
|
1169
|
-
if (pick_args->lb_token_mdelem_storage == nullptr) {
|
1170
|
-
*target = nullptr;
|
1171
|
-
GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
|
1172
|
-
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
1173
|
-
"No mdelem storage for the LB token. Load reporting "
|
1174
|
-
"won't work without it. Failing"));
|
1175
|
-
return 0;
|
1176
|
-
}
|
1064
|
+
static int glb_pick_locked(grpc_lb_policy* pol,
|
1065
|
+
grpc_lb_policy_pick_state* pick) {
|
1177
1066
|
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1067
|
+
pending_pick* pp = pending_pick_create(glb_policy, pick);
|
1178
1068
|
bool pick_done = false;
|
1179
1069
|
if (glb_policy->rr_policy != nullptr) {
|
1180
1070
|
const grpc_connectivity_state rr_connectivity_state =
|
1181
|
-
grpc_lb_policy_check_connectivity_locked(
|
1182
|
-
|
1071
|
+
grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
|
1072
|
+
nullptr);
|
1183
1073
|
// The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
|
1184
1074
|
// callback registered to capture this event
|
1185
|
-
// (
|
1075
|
+
// (on_rr_connectivity_changed_locked) may not have been invoked yet. We
|
1186
1076
|
// need to make sure we aren't trying to pick from a RR policy instance
|
1187
1077
|
// that's in shutdown.
|
1188
1078
|
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
|
@@ -1192,33 +1082,16 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
|
1192
1082
|
glb_policy, glb_policy->rr_policy,
|
1193
1083
|
grpc_connectivity_state_name(rr_connectivity_state));
|
1194
1084
|
}
|
1195
|
-
|
1196
|
-
on_complete);
|
1085
|
+
pending_pick_add(&glb_policy->pending_picks, pp);
|
1197
1086
|
pick_done = false;
|
1198
1087
|
} else { // RR not in shutdown
|
1199
1088
|
if (grpc_lb_glb_trace.enabled()) {
|
1200
1089
|
gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
|
1201
1090
|
glb_policy->rr_policy);
|
1202
1091
|
}
|
1203
|
-
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
|
1204
|
-
wrapped_rr_closure_arg* wc_arg =
|
1205
|
-
(wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
|
1206
|
-
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
|
1207
|
-
grpc_schedule_on_exec_ctx);
|
1208
|
-
wc_arg->rr_policy = glb_policy->rr_policy;
|
1209
|
-
wc_arg->target = target;
|
1210
|
-
wc_arg->context = context;
|
1211
1092
|
GPR_ASSERT(glb_policy->client_stats != nullptr);
|
1212
|
-
wc_arg->client_stats =
|
1213
|
-
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
1214
|
-
wc_arg->wrapped_closure = on_complete;
|
1215
|
-
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
|
1216
|
-
wc_arg->initial_metadata = pick_args->initial_metadata;
|
1217
|
-
wc_arg->free_when_done = wc_arg;
|
1218
|
-
wc_arg->glb_policy = pol;
|
1219
1093
|
pick_done =
|
1220
|
-
pick_from_internal_rr_locked(
|
1221
|
-
false /* force_async */, target, wc_arg);
|
1094
|
+
pick_from_internal_rr_locked(glb_policy, false /* force_async */, pp);
|
1222
1095
|
}
|
1223
1096
|
} else { // glb_policy->rr_policy == NULL
|
1224
1097
|
if (grpc_lb_glb_trace.enabled()) {
|
@@ -1226,10 +1099,9 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
|
1226
1099
|
"[grpclb %p] No RR policy. Adding to grpclb's pending picks",
|
1227
1100
|
glb_policy);
|
1228
1101
|
}
|
1229
|
-
|
1230
|
-
on_complete);
|
1102
|
+
pending_pick_add(&glb_policy->pending_picks, pp);
|
1231
1103
|
if (!glb_policy->started_picking) {
|
1232
|
-
start_picking_locked(
|
1104
|
+
start_picking_locked(glb_policy);
|
1233
1105
|
}
|
1234
1106
|
pick_done = false;
|
1235
1107
|
}
|
@@ -1237,117 +1109,124 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
|
1237
1109
|
}
|
1238
1110
|
|
1239
1111
|
static grpc_connectivity_state glb_check_connectivity_locked(
|
1240
|
-
|
1241
|
-
grpc_error** connectivity_error) {
|
1112
|
+
grpc_lb_policy* pol, grpc_error** connectivity_error) {
|
1242
1113
|
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1243
1114
|
return grpc_connectivity_state_get(&glb_policy->state_tracker,
|
1244
1115
|
connectivity_error);
|
1245
1116
|
}
|
1246
1117
|
|
1247
|
-
static void glb_ping_one_locked(
|
1248
|
-
grpc_closure*
|
1118
|
+
static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
|
1119
|
+
grpc_closure* on_ack) {
|
1249
1120
|
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1250
1121
|
if (glb_policy->rr_policy) {
|
1251
|
-
grpc_lb_policy_ping_one_locked(
|
1122
|
+
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
|
1252
1123
|
} else {
|
1253
|
-
|
1124
|
+
pending_ping_add(&glb_policy->pending_pings, on_initiate, on_ack);
|
1254
1125
|
if (!glb_policy->started_picking) {
|
1255
|
-
start_picking_locked(
|
1126
|
+
start_picking_locked(glb_policy);
|
1256
1127
|
}
|
1257
1128
|
}
|
1258
1129
|
}
|
1259
1130
|
|
1260
|
-
static void glb_notify_on_state_change_locked(
|
1261
|
-
grpc_lb_policy* pol,
|
1131
|
+
static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
|
1262
1132
|
grpc_connectivity_state* current,
|
1263
1133
|
grpc_closure* notify) {
|
1264
1134
|
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1265
|
-
grpc_connectivity_state_notify_on_state_change(
|
1266
|
-
|
1135
|
+
grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
|
1136
|
+
current, notify);
|
1267
1137
|
}
|
1268
1138
|
|
1269
|
-
static void lb_call_on_retry_timer_locked(
|
1270
|
-
grpc_error* error) {
|
1139
|
+
static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
|
1271
1140
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1272
|
-
glb_policy->
|
1141
|
+
glb_policy->retry_timer_callback_pending = false;
|
1273
1142
|
if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
|
1274
1143
|
error == GRPC_ERROR_NONE) {
|
1275
1144
|
if (grpc_lb_glb_trace.enabled()) {
|
1276
1145
|
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
|
1277
1146
|
}
|
1278
|
-
query_for_backends_locked(
|
1147
|
+
query_for_backends_locked(glb_policy);
|
1279
1148
|
}
|
1280
|
-
|
1149
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_retry_timer");
|
1281
1150
|
}
|
1282
1151
|
|
1283
|
-
static void maybe_restart_lb_call(
|
1284
|
-
glb_lb_policy* glb_policy) {
|
1152
|
+
static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
|
1285
1153
|
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
|
1286
|
-
if (glb_policy->
|
1287
|
-
grpc_timer_cancel(
|
1154
|
+
if (glb_policy->retry_timer_callback_pending) {
|
1155
|
+
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
|
1288
1156
|
}
|
1289
|
-
if (!glb_policy->shutting_down) start_picking_locked(
|
1157
|
+
if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
|
1290
1158
|
glb_policy->updating_lb_call = false;
|
1291
1159
|
} else if (!glb_policy->shutting_down) {
|
1292
1160
|
/* if we aren't shutting down, restart the LB client call after some time */
|
1293
|
-
grpc_millis next_try =
|
1294
|
-
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
|
1295
|
-
.next_attempt_start_time;
|
1161
|
+
grpc_millis next_try = glb_policy->lb_call_backoff->NextAttemptTime();
|
1296
1162
|
if (grpc_lb_glb_trace.enabled()) {
|
1297
1163
|
gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
|
1298
1164
|
glb_policy);
|
1299
|
-
grpc_millis timeout = next_try -
|
1165
|
+
grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
|
1300
1166
|
if (timeout > 0) {
|
1301
1167
|
gpr_log(GPR_DEBUG,
|
1302
|
-
"[grpclb %p] ...
|
1168
|
+
"[grpclb %p] ... retry LB call after %" PRIuPTR "ms.",
|
1303
1169
|
glb_policy, timeout);
|
1304
1170
|
} else {
|
1305
|
-
gpr_log(GPR_DEBUG, "[grpclb %p] ...
|
1171
|
+
gpr_log(GPR_DEBUG, "[grpclb %p] ... retry LB call immediately.",
|
1306
1172
|
glb_policy);
|
1307
1173
|
}
|
1308
1174
|
}
|
1309
|
-
|
1175
|
+
GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_retry_timer");
|
1310
1176
|
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
|
1311
1177
|
lb_call_on_retry_timer_locked, glb_policy,
|
1312
1178
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1313
|
-
glb_policy->
|
1314
|
-
grpc_timer_init(
|
1179
|
+
glb_policy->retry_timer_callback_pending = true;
|
1180
|
+
grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
|
1315
1181
|
&glb_policy->lb_on_call_retry);
|
1316
1182
|
}
|
1317
|
-
|
1318
|
-
|
1183
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base,
|
1184
|
+
"lb_on_server_status_received_locked");
|
1319
1185
|
}
|
1320
1186
|
|
1321
|
-
static void send_client_load_report_locked(
|
1322
|
-
grpc_error* error);
|
1187
|
+
static void send_client_load_report_locked(void* arg, grpc_error* error);
|
1323
1188
|
|
1324
|
-
static void schedule_next_client_load_report(
|
1325
|
-
glb_lb_policy* glb_policy) {
|
1189
|
+
static void schedule_next_client_load_report(glb_lb_policy* glb_policy) {
|
1326
1190
|
const grpc_millis next_client_load_report_time =
|
1327
|
-
|
1191
|
+
grpc_core::ExecCtx::Get()->Now() +
|
1192
|
+
glb_policy->client_stats_report_interval;
|
1328
1193
|
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
|
1329
1194
|
send_client_load_report_locked, glb_policy,
|
1330
1195
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1331
|
-
grpc_timer_init(
|
1196
|
+
grpc_timer_init(&glb_policy->client_load_report_timer,
|
1332
1197
|
next_client_load_report_time,
|
1333
1198
|
&glb_policy->client_load_report_closure);
|
1334
1199
|
}
|
1335
1200
|
|
1336
|
-
static void client_load_report_done_locked(
|
1337
|
-
grpc_error* error) {
|
1201
|
+
static void client_load_report_done_locked(void* arg, grpc_error* error) {
|
1338
1202
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1339
1203
|
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
|
1340
1204
|
glb_policy->client_load_report_payload = nullptr;
|
1341
1205
|
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
|
1342
|
-
glb_policy->
|
1343
|
-
|
1344
|
-
"client_load_report");
|
1206
|
+
glb_policy->client_load_report_timer_callback_pending = false;
|
1207
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
|
1345
1208
|
if (glb_policy->lb_call == nullptr) {
|
1346
|
-
maybe_restart_lb_call(
|
1209
|
+
maybe_restart_lb_call(glb_policy);
|
1347
1210
|
}
|
1348
1211
|
return;
|
1349
1212
|
}
|
1350
|
-
schedule_next_client_load_report(
|
1213
|
+
schedule_next_client_load_report(glb_policy);
|
1214
|
+
}
|
1215
|
+
|
1216
|
+
static void do_send_client_load_report_locked(glb_lb_policy* glb_policy) {
|
1217
|
+
grpc_op op;
|
1218
|
+
memset(&op, 0, sizeof(op));
|
1219
|
+
op.op = GRPC_OP_SEND_MESSAGE;
|
1220
|
+
op.data.send_message.send_message = glb_policy->client_load_report_payload;
|
1221
|
+
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
|
1222
|
+
client_load_report_done_locked, glb_policy,
|
1223
|
+
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1224
|
+
grpc_call_error call_error = grpc_call_start_batch_and_execute(
|
1225
|
+
glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure);
|
1226
|
+
if (call_error != GRPC_CALL_OK) {
|
1227
|
+
gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
|
1228
|
+
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1229
|
+
}
|
1351
1230
|
}
|
1352
1231
|
|
1353
1232
|
static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
|
@@ -1362,15 +1241,13 @@ static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
|
|
1362
1241
|
(drop_entries == nullptr || drop_entries->num_entries == 0);
|
1363
1242
|
}
|
1364
1243
|
|
1365
|
-
static void send_client_load_report_locked(
|
1366
|
-
grpc_error* error) {
|
1244
|
+
static void send_client_load_report_locked(void* arg, grpc_error* error) {
|
1367
1245
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1368
1246
|
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
|
1369
|
-
glb_policy->
|
1370
|
-
|
1371
|
-
"client_load_report");
|
1247
|
+
glb_policy->client_load_report_timer_callback_pending = false;
|
1248
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
|
1372
1249
|
if (glb_policy->lb_call == nullptr) {
|
1373
|
-
maybe_restart_lb_call(
|
1250
|
+
maybe_restart_lb_call(glb_policy);
|
1374
1251
|
}
|
1375
1252
|
return;
|
1376
1253
|
}
|
@@ -1383,7 +1260,7 @@ static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
|
1383
1260
|
if (load_report_counters_are_zero(request)) {
|
1384
1261
|
if (glb_policy->last_client_load_report_counters_were_zero) {
|
1385
1262
|
grpc_grpclb_request_destroy(request);
|
1386
|
-
schedule_next_client_load_report(
|
1263
|
+
schedule_next_client_load_report(glb_policy);
|
1387
1264
|
return;
|
1388
1265
|
}
|
1389
1266
|
glb_policy->last_client_load_report_counters_were_zero = true;
|
@@ -1393,31 +1270,20 @@ static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
|
1393
1270
|
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
|
1394
1271
|
glb_policy->client_load_report_payload =
|
1395
1272
|
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
|
1396
|
-
grpc_slice_unref_internal(
|
1273
|
+
grpc_slice_unref_internal(request_payload_slice);
|
1397
1274
|
grpc_grpclb_request_destroy(request);
|
1398
|
-
//
|
1399
|
-
|
1400
|
-
|
1401
|
-
|
1402
|
-
|
1403
|
-
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
|
1404
|
-
client_load_report_done_locked, glb_policy,
|
1405
|
-
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1406
|
-
grpc_call_error call_error = grpc_call_start_batch_and_execute(
|
1407
|
-
exec_ctx, glb_policy->lb_call, &op, 1,
|
1408
|
-
&glb_policy->client_load_report_closure);
|
1409
|
-
if (call_error != GRPC_CALL_OK) {
|
1410
|
-
gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
|
1411
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1275
|
+
// If we've already sent the initial request, then we can go ahead and send
|
1276
|
+
// the load report. Otherwise, we need to wait until the initial request has
|
1277
|
+
// been sent to send this (see lb_on_sent_initial_request_locked() below).
|
1278
|
+
if (glb_policy->initial_request_sent) {
|
1279
|
+
do_send_client_load_report_locked(glb_policy);
|
1412
1280
|
}
|
1413
1281
|
}
|
1414
1282
|
|
1415
|
-
static void
|
1416
|
-
|
1417
|
-
static void lb_on_response_received_locked(
|
1418
|
-
|
1419
|
-
static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
|
1420
|
-
glb_lb_policy* glb_policy) {
|
1283
|
+
static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error);
|
1284
|
+
static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
|
1285
|
+
static void lb_on_response_received_locked(void* arg, grpc_error* error);
|
1286
|
+
static void lb_call_init_locked(glb_lb_policy* glb_policy) {
|
1421
1287
|
GPR_ASSERT(glb_policy->server_name != nullptr);
|
1422
1288
|
GPR_ASSERT(glb_policy->server_name[0] != '\0');
|
1423
1289
|
GPR_ASSERT(glb_policy->lb_call == nullptr);
|
@@ -1430,13 +1296,13 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
|
|
1430
1296
|
grpc_millis deadline =
|
1431
1297
|
glb_policy->lb_call_timeout_ms == 0
|
1432
1298
|
? GRPC_MILLIS_INF_FUTURE
|
1433
|
-
:
|
1299
|
+
: grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
|
1434
1300
|
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
|
1435
|
-
|
1301
|
+
glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
|
1436
1302
|
glb_policy->base.interested_parties,
|
1437
1303
|
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
|
1438
1304
|
&host, deadline, nullptr);
|
1439
|
-
grpc_slice_unref_internal(
|
1305
|
+
grpc_slice_unref_internal(host);
|
1440
1306
|
|
1441
1307
|
if (glb_policy->client_stats != nullptr) {
|
1442
1308
|
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
|
@@ -1451,9 +1317,12 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
|
|
1451
1317
|
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
|
1452
1318
|
glb_policy->lb_request_payload =
|
1453
1319
|
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
|
1454
|
-
grpc_slice_unref_internal(
|
1320
|
+
grpc_slice_unref_internal(request_payload_slice);
|
1455
1321
|
grpc_grpclb_request_destroy(request);
|
1456
1322
|
|
1323
|
+
GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
|
1324
|
+
lb_on_sent_initial_request_locked, glb_policy,
|
1325
|
+
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1457
1326
|
GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
|
1458
1327
|
lb_on_server_status_received_locked, glb_policy,
|
1459
1328
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
@@ -1461,19 +1330,21 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
|
|
1461
1330
|
lb_on_response_received_locked, glb_policy,
|
1462
1331
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1463
1332
|
|
1464
|
-
|
1465
|
-
|
1466
|
-
|
1467
|
-
|
1468
|
-
|
1469
|
-
|
1333
|
+
grpc_core::BackOff::Options backoff_options;
|
1334
|
+
backoff_options
|
1335
|
+
.set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
|
1336
|
+
.set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
|
1337
|
+
.set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
|
1338
|
+
.set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
|
1470
1339
|
|
1340
|
+
glb_policy->lb_call_backoff.Init(backoff_options);
|
1341
|
+
|
1342
|
+
glb_policy->initial_request_sent = false;
|
1471
1343
|
glb_policy->seen_initial_response = false;
|
1472
1344
|
glb_policy->last_client_load_report_counters_were_zero = false;
|
1473
1345
|
}
|
1474
1346
|
|
1475
|
-
static void lb_call_destroy_locked(
|
1476
|
-
glb_lb_policy* glb_policy) {
|
1347
|
+
static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
|
1477
1348
|
GPR_ASSERT(glb_policy->lb_call != nullptr);
|
1478
1349
|
grpc_call_unref(glb_policy->lb_call);
|
1479
1350
|
glb_policy->lb_call = nullptr;
|
@@ -1482,22 +1353,21 @@ static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
|
|
1482
1353
|
grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
|
1483
1354
|
|
1484
1355
|
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
|
1485
|
-
grpc_slice_unref_internal(
|
1356
|
+
grpc_slice_unref_internal(glb_policy->lb_call_status_details);
|
1486
1357
|
|
1487
|
-
if (glb_policy->
|
1488
|
-
grpc_timer_cancel(
|
1358
|
+
if (glb_policy->client_load_report_timer_callback_pending) {
|
1359
|
+
grpc_timer_cancel(&glb_policy->client_load_report_timer);
|
1489
1360
|
}
|
1490
1361
|
}
|
1491
1362
|
|
1492
1363
|
/*
|
1493
1364
|
* Auxiliary functions and LB client callbacks.
|
1494
1365
|
*/
|
1495
|
-
static void query_for_backends_locked(
|
1496
|
-
glb_lb_policy* glb_policy) {
|
1366
|
+
static void query_for_backends_locked(glb_lb_policy* glb_policy) {
|
1497
1367
|
GPR_ASSERT(glb_policy->lb_channel != nullptr);
|
1498
1368
|
if (glb_policy->shutting_down) return;
|
1499
1369
|
|
1500
|
-
lb_call_init_locked(
|
1370
|
+
lb_call_init_locked(glb_policy);
|
1501
1371
|
|
1502
1372
|
if (grpc_lb_glb_trace.enabled()) {
|
1503
1373
|
gpr_log(GPR_INFO,
|
@@ -1528,8 +1398,11 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
|
|
1528
1398
|
op->flags = 0;
|
1529
1399
|
op->reserved = nullptr;
|
1530
1400
|
op++;
|
1401
|
+
/* take a ref to be released in lb_on_sent_initial_request_locked() */
|
1402
|
+
GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_sent_initial_request_locked");
|
1531
1403
|
call_error = grpc_call_start_batch_and_execute(
|
1532
|
-
|
1404
|
+
glb_policy->lb_call, ops, (size_t)(op - ops),
|
1405
|
+
&glb_policy->lb_on_sent_initial_request);
|
1533
1406
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1534
1407
|
|
1535
1408
|
op = ops;
|
@@ -1542,12 +1415,10 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
|
|
1542
1415
|
op->flags = 0;
|
1543
1416
|
op->reserved = nullptr;
|
1544
1417
|
op++;
|
1545
|
-
/* take a
|
1546
|
-
|
1547
|
-
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
|
1548
|
-
"lb_on_server_status_received_locked");
|
1418
|
+
/* take a ref to be released in lb_on_server_status_received_locked() */
|
1419
|
+
GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_server_status_received_locked");
|
1549
1420
|
call_error = grpc_call_start_batch_and_execute(
|
1550
|
-
|
1421
|
+
glb_policy->lb_call, ops, (size_t)(op - ops),
|
1551
1422
|
&glb_policy->lb_on_server_status_received);
|
1552
1423
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1553
1424
|
|
@@ -1557,23 +1428,32 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
|
|
1557
1428
|
op->flags = 0;
|
1558
1429
|
op->reserved = nullptr;
|
1559
1430
|
op++;
|
1560
|
-
/* take
|
1561
|
-
|
1562
|
-
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
|
1431
|
+
/* take a ref to be unref'd/reused in lb_on_response_received_locked() */
|
1432
|
+
GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_response_received_locked");
|
1563
1433
|
call_error = grpc_call_start_batch_and_execute(
|
1564
|
-
|
1434
|
+
glb_policy->lb_call, ops, (size_t)(op - ops),
|
1565
1435
|
&glb_policy->lb_on_response_received);
|
1566
1436
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1567
1437
|
}
|
1568
1438
|
|
1569
|
-
static void
|
1570
|
-
|
1439
|
+
static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
|
1440
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1441
|
+
glb_policy->initial_request_sent = true;
|
1442
|
+
// If we attempted to send a client load report before the initial request was
|
1443
|
+
// sent, send the load report now.
|
1444
|
+
if (glb_policy->client_load_report_payload != nullptr) {
|
1445
|
+
do_send_client_load_report_locked(glb_policy);
|
1446
|
+
}
|
1447
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base, "lb_on_sent_initial_request_locked");
|
1448
|
+
}
|
1449
|
+
|
1450
|
+
static void lb_on_response_received_locked(void* arg, grpc_error* error) {
|
1571
1451
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1572
1452
|
grpc_op ops[2];
|
1573
1453
|
memset(ops, 0, sizeof(ops));
|
1574
1454
|
grpc_op* op = ops;
|
1575
1455
|
if (glb_policy->lb_response_payload != nullptr) {
|
1576
|
-
|
1456
|
+
glb_policy->lb_call_backoff->Reset();
|
1577
1457
|
/* Received data from the LB server. Look inside
|
1578
1458
|
* glb_policy->lb_response_payload, for a serverlist. */
|
1579
1459
|
grpc_byte_buffer_reader bbr;
|
@@ -1596,12 +1476,10 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
|
1596
1476
|
"client load reporting interval = %" PRIdPTR " milliseconds",
|
1597
1477
|
glb_policy, glb_policy->client_stats_report_interval);
|
1598
1478
|
}
|
1599
|
-
/* take a
|
1600
|
-
|
1601
|
-
|
1602
|
-
glb_policy
|
1603
|
-
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
|
1604
|
-
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1479
|
+
/* take a ref to be unref'd in send_client_load_report_locked() */
|
1480
|
+
glb_policy->client_load_report_timer_callback_pending = true;
|
1481
|
+
GRPC_LB_POLICY_REF(&glb_policy->base, "client_load_report");
|
1482
|
+
schedule_next_client_load_report(glb_policy);
|
1605
1483
|
} else if (grpc_lb_glb_trace.enabled()) {
|
1606
1484
|
gpr_log(GPR_INFO,
|
1607
1485
|
"[grpclb %p] Received initial LB response message; client load "
|
@@ -1646,12 +1524,10 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
|
1646
1524
|
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
1647
1525
|
} else {
|
1648
1526
|
/* or dispose of the fallback */
|
1649
|
-
grpc_lb_addresses_destroy(
|
1650
|
-
glb_policy->fallback_backend_addresses);
|
1527
|
+
grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
|
1651
1528
|
glb_policy->fallback_backend_addresses = nullptr;
|
1652
|
-
if (glb_policy->
|
1653
|
-
grpc_timer_cancel(
|
1654
|
-
glb_policy->fallback_timer_active = false;
|
1529
|
+
if (glb_policy->fallback_timer_callback_pending) {
|
1530
|
+
grpc_timer_cancel(&glb_policy->lb_fallback_timer);
|
1655
1531
|
}
|
1656
1532
|
}
|
1657
1533
|
/* and update the copy in the glb_lb_policy instance. This
|
@@ -1659,7 +1535,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
|
1659
1535
|
* update or in glb_destroy() */
|
1660
1536
|
glb_policy->serverlist = serverlist;
|
1661
1537
|
glb_policy->serverlist_index = 0;
|
1662
|
-
rr_handover_locked(
|
1538
|
+
rr_handover_locked(glb_policy);
|
1663
1539
|
}
|
1664
1540
|
} else {
|
1665
1541
|
if (grpc_lb_glb_trace.enabled()) {
|
@@ -1669,14 +1545,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
|
1669
1545
|
}
|
1670
1546
|
grpc_grpclb_destroy_serverlist(serverlist);
|
1671
1547
|
}
|
1672
|
-
} else { /* serverlist ==
|
1548
|
+
} else { /* serverlist == nullptr */
|
1673
1549
|
gpr_log(GPR_ERROR,
|
1674
1550
|
"[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
|
1675
1551
|
glb_policy,
|
1676
1552
|
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
|
1677
1553
|
}
|
1678
1554
|
}
|
1679
|
-
grpc_slice_unref_internal(
|
1555
|
+
grpc_slice_unref_internal(response_slice);
|
1680
1556
|
if (!glb_policy->shutting_down) {
|
1681
1557
|
/* keep listening for serverlist updates */
|
1682
1558
|
op->op = GRPC_OP_RECV_MESSAGE;
|
@@ -1684,28 +1560,27 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
|
1684
1560
|
op->flags = 0;
|
1685
1561
|
op->reserved = nullptr;
|
1686
1562
|
op++;
|
1687
|
-
/* reuse the "lb_on_response_received_locked"
|
1563
|
+
/* reuse the "lb_on_response_received_locked" ref taken in
|
1688
1564
|
* query_for_backends_locked() */
|
1689
1565
|
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
|
1690
|
-
|
1566
|
+
glb_policy->lb_call, ops, (size_t)(op - ops),
|
1691
1567
|
&glb_policy->lb_on_response_received); /* loop */
|
1692
1568
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1693
1569
|
} else {
|
1694
|
-
|
1695
|
-
|
1570
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base,
|
1571
|
+
"lb_on_response_received_locked_shutdown");
|
1696
1572
|
}
|
1697
1573
|
} else { /* empty payload: call cancelled. */
|
1698
|
-
/* dispose of the "lb_on_response_received_locked"
|
1574
|
+
/* dispose of the "lb_on_response_received_locked" ref taken in
|
1699
1575
|
* query_for_backends_locked() and reused in every reception loop */
|
1700
|
-
|
1701
|
-
|
1576
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base,
|
1577
|
+
"lb_on_response_received_locked_empty_payload");
|
1702
1578
|
}
|
1703
1579
|
}
|
1704
1580
|
|
1705
|
-
static void lb_on_fallback_timer_locked(
|
1706
|
-
grpc_error* error) {
|
1581
|
+
static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
|
1707
1582
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1708
|
-
glb_policy->
|
1583
|
+
glb_policy->fallback_timer_callback_pending = false;
|
1709
1584
|
/* If we receive a serverlist after the timer fires but before this callback
|
1710
1585
|
* actually runs, don't fall back. */
|
1711
1586
|
if (glb_policy->serverlist == nullptr) {
|
@@ -1716,15 +1591,13 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
|
1716
1591
|
glb_policy);
|
1717
1592
|
}
|
1718
1593
|
GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
|
1719
|
-
rr_handover_locked(
|
1594
|
+
rr_handover_locked(glb_policy);
|
1720
1595
|
}
|
1721
1596
|
}
|
1722
|
-
|
1723
|
-
"grpclb_fallback_timer");
|
1597
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
|
1724
1598
|
}
|
1725
1599
|
|
1726
|
-
static void lb_on_server_status_received_locked(
|
1727
|
-
void* arg, grpc_error* error) {
|
1600
|
+
static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
|
1728
1601
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1729
1602
|
GPR_ASSERT(glb_policy->lb_call != nullptr);
|
1730
1603
|
if (grpc_lb_glb_trace.enabled()) {
|
@@ -1738,29 +1611,28 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
|
|
1738
1611
|
gpr_free(status_details);
|
1739
1612
|
}
|
1740
1613
|
/* We need to perform cleanups no matter what. */
|
1741
|
-
lb_call_destroy_locked(
|
1614
|
+
lb_call_destroy_locked(glb_policy);
|
1742
1615
|
// If the load report timer is still pending, we wait for it to be
|
1743
1616
|
// called before restarting the call. Otherwise, we restart the call
|
1744
1617
|
// here.
|
1745
|
-
if (!glb_policy->
|
1746
|
-
maybe_restart_lb_call(
|
1618
|
+
if (!glb_policy->client_load_report_timer_callback_pending) {
|
1619
|
+
maybe_restart_lb_call(glb_policy);
|
1747
1620
|
}
|
1748
1621
|
}
|
1749
1622
|
|
1750
|
-
static void fallback_update_locked(
|
1751
|
-
glb_lb_policy* glb_policy,
|
1623
|
+
static void fallback_update_locked(glb_lb_policy* glb_policy,
|
1752
1624
|
const grpc_lb_addresses* addresses) {
|
1753
1625
|
GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
|
1754
|
-
grpc_lb_addresses_destroy(
|
1626
|
+
grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
|
1755
1627
|
glb_policy->fallback_backend_addresses =
|
1756
|
-
extract_backend_addresses_locked(
|
1757
|
-
if (glb_policy->
|
1758
|
-
|
1759
|
-
rr_handover_locked(
|
1628
|
+
extract_backend_addresses_locked(addresses);
|
1629
|
+
if (glb_policy->lb_fallback_timeout_ms > 0 &&
|
1630
|
+
glb_policy->rr_policy != nullptr) {
|
1631
|
+
rr_handover_locked(glb_policy);
|
1760
1632
|
}
|
1761
1633
|
}
|
1762
1634
|
|
1763
|
-
static void glb_update_locked(
|
1635
|
+
static void glb_update_locked(grpc_lb_policy* policy,
|
1764
1636
|
const grpc_lb_policy_args* args) {
|
1765
1637
|
glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
|
1766
1638
|
const grpc_arg* arg =
|
@@ -1770,7 +1642,7 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
|
1770
1642
|
// If we don't have a current channel to the LB, go into TRANSIENT
|
1771
1643
|
// FAILURE.
|
1772
1644
|
grpc_connectivity_state_set(
|
1773
|
-
|
1645
|
+
&glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
|
1774
1646
|
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
|
1775
1647
|
"glb_update_missing");
|
1776
1648
|
} else {
|
@@ -1787,16 +1659,16 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
|
1787
1659
|
// If a non-empty serverlist hasn't been received from the balancer,
|
1788
1660
|
// propagate the update to fallback_backend_addresses.
|
1789
1661
|
if (glb_policy->serverlist == nullptr) {
|
1790
|
-
fallback_update_locked(
|
1662
|
+
fallback_update_locked(glb_policy, addresses);
|
1791
1663
|
}
|
1792
1664
|
GPR_ASSERT(glb_policy->lb_channel != nullptr);
|
1793
1665
|
// Propagate updates to the LB channel (pick_first) through the fake
|
1794
1666
|
// resolver.
|
1795
1667
|
grpc_channel_args* lb_channel_args = build_lb_channel_args(
|
1796
|
-
|
1668
|
+
addresses, glb_policy->response_generator, args->args);
|
1797
1669
|
grpc_fake_resolver_response_generator_set_response(
|
1798
|
-
|
1799
|
-
grpc_channel_args_destroy(
|
1670
|
+
glb_policy->response_generator, lb_channel_args);
|
1671
|
+
grpc_channel_args_destroy(lb_channel_args);
|
1800
1672
|
// Start watching the LB channel connectivity for connection, if not
|
1801
1673
|
// already doing so.
|
1802
1674
|
if (!glb_policy->watching_lb_channel) {
|
@@ -1806,9 +1678,9 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
|
1806
1678
|
grpc_channel_get_channel_stack(glb_policy->lb_channel));
|
1807
1679
|
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
|
1808
1680
|
glb_policy->watching_lb_channel = true;
|
1809
|
-
|
1681
|
+
GRPC_LB_POLICY_REF(&glb_policy->base, "watch_lb_channel_connectivity");
|
1810
1682
|
grpc_client_channel_watch_connectivity_state(
|
1811
|
-
|
1683
|
+
client_channel_elem,
|
1812
1684
|
grpc_polling_entity_create_from_pollset_set(
|
1813
1685
|
glb_policy->base.interested_parties),
|
1814
1686
|
&glb_policy->lb_channel_connectivity,
|
@@ -1819,8 +1691,7 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
|
1819
1691
|
// Invoked as part of the update process. It continues watching the LB channel
|
1820
1692
|
// until it shuts down or becomes READY. It's invoked even if the LB channel
|
1821
1693
|
// stayed READY throughout the update (for example if the update is identical).
|
1822
|
-
static void glb_lb_channel_on_connectivity_changed_cb(
|
1823
|
-
void* arg,
|
1694
|
+
static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
|
1824
1695
|
grpc_error* error) {
|
1825
1696
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1826
1697
|
if (glb_policy->shutting_down) goto done;
|
@@ -1836,7 +1707,7 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
|
|
1836
1707
|
grpc_channel_get_channel_stack(glb_policy->lb_channel));
|
1837
1708
|
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
|
1838
1709
|
grpc_client_channel_watch_connectivity_state(
|
1839
|
-
|
1710
|
+
client_channel_elem,
|
1840
1711
|
grpc_polling_entity_create_from_pollset_set(
|
1841
1712
|
glb_policy->base.interested_parties),
|
1842
1713
|
&glb_policy->lb_channel_connectivity,
|
@@ -1853,23 +1724,35 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
|
|
1853
1724
|
grpc_call_cancel(glb_policy->lb_call, nullptr);
|
1854
1725
|
// lb_on_server_status_received() will pick up the cancel and reinit
|
1855
1726
|
// lb_call.
|
1856
|
-
} else if (glb_policy->started_picking
|
1857
|
-
if (glb_policy->
|
1858
|
-
grpc_timer_cancel(
|
1859
|
-
glb_policy->retry_timer_active = false;
|
1727
|
+
} else if (glb_policy->started_picking) {
|
1728
|
+
if (glb_policy->retry_timer_callback_pending) {
|
1729
|
+
grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
|
1860
1730
|
}
|
1861
|
-
start_picking_locked(
|
1731
|
+
start_picking_locked(glb_policy);
|
1862
1732
|
}
|
1863
1733
|
/* fallthrough */
|
1864
1734
|
case GRPC_CHANNEL_SHUTDOWN:
|
1865
1735
|
done:
|
1866
1736
|
glb_policy->watching_lb_channel = false;
|
1867
|
-
|
1868
|
-
|
1737
|
+
GRPC_LB_POLICY_UNREF(&glb_policy->base,
|
1738
|
+
"watch_lb_channel_connectivity_cb_shutdown");
|
1869
1739
|
break;
|
1870
1740
|
}
|
1871
1741
|
}
|
1872
1742
|
|
1743
|
+
static void glb_set_reresolve_closure_locked(
|
1744
|
+
grpc_lb_policy* policy, grpc_closure* request_reresolution) {
|
1745
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
|
1746
|
+
GPR_ASSERT(!glb_policy->shutting_down);
|
1747
|
+
GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
|
1748
|
+
if (glb_policy->rr_policy != nullptr) {
|
1749
|
+
grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
|
1750
|
+
request_reresolution);
|
1751
|
+
} else {
|
1752
|
+
glb_policy->base.request_reresolution = request_reresolution;
|
1753
|
+
}
|
1754
|
+
}
|
1755
|
+
|
1873
1756
|
/* Code wiring the policy with the rest of the core */
|
1874
1757
|
static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
|
1875
1758
|
glb_destroy,
|
@@ -1881,10 +1764,10 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
|
|
1881
1764
|
glb_exit_idle_locked,
|
1882
1765
|
glb_check_connectivity_locked,
|
1883
1766
|
glb_notify_on_state_change_locked,
|
1884
|
-
glb_update_locked
|
1767
|
+
glb_update_locked,
|
1768
|
+
glb_set_reresolve_closure_locked};
|
1885
1769
|
|
1886
|
-
static grpc_lb_policy* glb_create(
|
1887
|
-
grpc_lb_policy_factory* factory,
|
1770
|
+
static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
|
1888
1771
|
grpc_lb_policy_args* args) {
|
1889
1772
|
/* Count the number of gRPC-LB addresses. There must be at least one. */
|
1890
1773
|
const grpc_arg* arg =
|
@@ -1905,7 +1788,7 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
|
|
1905
1788
|
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
|
1906
1789
|
GPR_ASSERT(arg != nullptr);
|
1907
1790
|
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
|
1908
|
-
grpc_uri* uri = grpc_uri_parse(
|
1791
|
+
grpc_uri* uri = grpc_uri_parse(arg->value.string, true);
|
1909
1792
|
GPR_ASSERT(uri->path[0] != '\0');
|
1910
1793
|
glb_policy->server_name =
|
1911
1794
|
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
|
@@ -1938,26 +1821,26 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
|
|
1938
1821
|
/* Extract the backend addresses (may be empty) from the resolver for
|
1939
1822
|
* fallback. */
|
1940
1823
|
glb_policy->fallback_backend_addresses =
|
1941
|
-
extract_backend_addresses_locked(
|
1824
|
+
extract_backend_addresses_locked(addresses);
|
1942
1825
|
|
1943
1826
|
/* Create a client channel over them to communicate with a LB service */
|
1944
1827
|
glb_policy->response_generator =
|
1945
1828
|
grpc_fake_resolver_response_generator_create();
|
1946
1829
|
grpc_channel_args* lb_channel_args = build_lb_channel_args(
|
1947
|
-
|
1830
|
+
addresses, glb_policy->response_generator, args->args);
|
1948
1831
|
char* uri_str;
|
1949
1832
|
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
|
1950
1833
|
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
|
1951
|
-
|
1834
|
+
uri_str, args->client_channel_factory, lb_channel_args);
|
1952
1835
|
|
1953
1836
|
/* Propagate initial resolution */
|
1954
1837
|
grpc_fake_resolver_response_generator_set_response(
|
1955
|
-
|
1956
|
-
grpc_channel_args_destroy(
|
1838
|
+
glb_policy->response_generator, lb_channel_args);
|
1839
|
+
grpc_channel_args_destroy(lb_channel_args);
|
1957
1840
|
gpr_free(uri_str);
|
1958
1841
|
if (glb_policy->lb_channel == nullptr) {
|
1959
1842
|
gpr_free((void*)glb_policy->server_name);
|
1960
|
-
grpc_channel_args_destroy(
|
1843
|
+
grpc_channel_args_destroy(glb_policy->args);
|
1961
1844
|
gpr_free(glb_policy);
|
1962
1845
|
return nullptr;
|
1963
1846
|
}
|
@@ -1988,7 +1871,7 @@ grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
|
|
1988
1871
|
|
1989
1872
|
// Only add client_load_reporting filter if the grpclb LB policy is used.
|
1990
1873
|
static bool maybe_add_client_load_reporting_filter(
|
1991
|
-
|
1874
|
+
grpc_channel_stack_builder* builder, void* arg) {
|
1992
1875
|
const grpc_channel_args* args =
|
1993
1876
|
grpc_channel_stack_builder_get_channel_arguments(builder);
|
1994
1877
|
const grpc_arg* channel_arg =
|
@@ -2001,7 +1884,7 @@ static bool maybe_add_client_load_reporting_filter(
|
|
2001
1884
|
return true;
|
2002
1885
|
}
|
2003
1886
|
|
2004
|
-
|
1887
|
+
void grpc_lb_policy_grpclb_init() {
|
2005
1888
|
grpc_register_lb_policy(grpc_glb_lb_factory_create());
|
2006
1889
|
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
|
2007
1890
|
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
|
@@ -2009,4 +1892,4 @@ extern "C" void grpc_lb_policy_grpclb_init() {
|
|
2009
1892
|
(void*)&grpc_client_load_reporting_filter);
|
2010
1893
|
}
|
2011
1894
|
|
2012
|
-
|
1895
|
+
void grpc_lb_policy_grpclb_shutdown() {}
|