grpc 1.7.3 → 1.8.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +2106 -2116
- data/include/grpc/census.h +1 -432
- data/include/grpc/compression.h +9 -9
- data/include/grpc/grpc.h +115 -98
- data/include/grpc/grpc_cronet.h +3 -3
- data/include/grpc/grpc_posix.h +4 -4
- data/include/grpc/grpc_security.h +160 -88
- data/include/grpc/grpc_security_constants.h +7 -0
- data/include/grpc/impl/codegen/atm.h +9 -1
- data/include/grpc/impl/codegen/atm_gcc_atomic.h +13 -4
- data/include/grpc/impl/codegen/atm_gcc_sync.h +6 -5
- data/include/grpc/impl/codegen/atm_windows.h +23 -22
- data/include/grpc/impl/codegen/byte_buffer.h +14 -14
- data/include/grpc/impl/codegen/byte_buffer_reader.h +2 -2
- data/include/grpc/impl/codegen/connectivity_state.h +0 -2
- data/include/grpc/impl/codegen/grpc_types.h +32 -28
- data/include/grpc/impl/codegen/port_platform.h +48 -0
- data/include/grpc/impl/codegen/slice.h +10 -10
- data/include/grpc/impl/codegen/sync_generic.h +9 -3
- data/include/grpc/slice.h +16 -17
- data/include/grpc/slice_buffer.h +22 -22
- data/include/grpc/support/alloc.h +11 -11
- data/include/grpc/support/avl.h +28 -20
- data/include/grpc/support/cmdline.h +13 -13
- data/include/grpc/support/histogram.h +17 -17
- data/include/grpc/support/host_port.h +2 -2
- data/include/grpc/support/log.h +9 -9
- data/include/grpc/support/log_windows.h +1 -1
- data/include/grpc/support/string_util.h +2 -2
- data/include/grpc/support/subprocess.h +5 -5
- data/include/grpc/support/sync.h +43 -27
- data/include/grpc/support/thd.h +6 -6
- data/include/grpc/support/tls_gcc.h +1 -1
- data/include/grpc/support/tls_pthread.h +1 -1
- data/src/core/ext/census/{grpc_context.c → grpc_context.cc} +5 -8
- data/src/core/ext/filters/client_channel/backup_poller.cc +165 -0
- data/src/core/ext/{census/grpc_filter.h → filters/client_channel/backup_poller.h} +12 -7
- data/src/core/ext/filters/client_channel/{channel_connectivity.c → channel_connectivity.cc} +45 -42
- data/src/core/ext/filters/client_channel/{client_channel.c → client_channel.cc} +452 -417
- data/src/core/ext/filters/client_channel/client_channel.h +16 -8
- data/src/core/ext/filters/client_channel/{client_channel_factory.c → client_channel_factory.cc} +0 -0
- data/src/core/ext/filters/client_channel/client_channel_factory.h +29 -21
- data/src/core/ext/filters/client_channel/{client_channel_plugin.c → client_channel_plugin.cc} +15 -19
- data/src/core/ext/filters/client_channel/{connector.c → connector.cc} +0 -0
- data/src/core/ext/filters/client_channel/connector.h +29 -21
- data/src/core/ext/filters/client_channel/{http_connect_handshaker.c → http_connect_handshaker.cc} +10 -10
- data/src/core/ext/filters/client_channel/http_connect_handshaker.h +8 -0
- data/src/core/ext/filters/client_channel/{http_proxy.c → http_proxy.cc} +14 -14
- data/src/core/ext/filters/client_channel/http_proxy.h +8 -0
- data/src/core/ext/filters/client_channel/{lb_policy.c → lb_policy.cc} +47 -48
- data/src/core/ext/filters/client_channel/lb_policy.h +76 -70
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/{client_load_reporting_filter.c → client_load_reporting_filter.cc} +28 -29
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +9 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/{grpclb.c → grpclb.cc} +554 -563
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h +9 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +17 -9
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/{grpclb_channel_secure.c → grpclb_channel_secure.cc} +17 -17
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/{grpclb_client_stats.c → grpclb_client_stats.cc} +3 -3
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +9 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/{load_balancer_api.c → load_balancer_api.cc} +64 -67
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +20 -21
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +599 -0
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +695 -0
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +270 -0
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +153 -0
- data/src/core/ext/filters/client_channel/{lb_policy_factory.c → lb_policy_factory.cc} +10 -10
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +48 -40
- data/src/core/ext/filters/client_channel/{lb_policy_registry.c → lb_policy_registry.cc} +9 -9
- data/src/core/ext/filters/client_channel/lb_policy_registry.h +11 -3
- data/src/core/ext/filters/client_channel/{parse_address.c → parse_address.cc} +24 -24
- data/src/core/ext/filters/client_channel/parse_address.h +14 -6
- data/src/core/ext/filters/client_channel/{proxy_mapper.c → proxy_mapper.cc} +0 -0
- data/src/core/ext/filters/client_channel/proxy_mapper.h +8 -0
- data/src/core/ext/filters/client_channel/{proxy_mapper_registry.c → proxy_mapper_registry.cc} +0 -0
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +8 -0
- data/src/core/ext/filters/client_channel/{resolver.c → resolver.cc} +21 -23
- data/src/core/ext/filters/client_channel/resolver.h +33 -27
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/{dns_resolver_ares.c → dns_resolver_ares.cc} +133 -133
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +18 -9
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/{grpc_ares_ev_driver_posix.c → grpc_ares_ev_driver_posix.cc} +58 -56
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/{grpc_ares_wrapper.c → grpc_ares_wrapper.cc} +118 -115
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +23 -15
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +60 -0
- data/src/core/ext/filters/client_channel/resolver/dns/native/{dns_resolver.c → dns_resolver.cc} +100 -94
- data/src/core/ext/filters/client_channel/resolver/fake/{fake_resolver.c → fake_resolver.cc} +14 -14
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +9 -1
- data/src/core/ext/filters/client_channel/resolver/sockaddr/{sockaddr_resolver.c → sockaddr_resolver.cc} +60 -60
- data/src/core/ext/filters/client_channel/{resolver_factory.c → resolver_factory.cc} +2 -2
- data/src/core/ext/filters/client_channel/resolver_factory.h +27 -19
- data/src/core/ext/filters/client_channel/{resolver_registry.c → resolver_registry.cc} +35 -35
- data/src/core/ext/filters/client_channel/resolver_registry.h +18 -10
- data/src/core/ext/filters/client_channel/{retry_throttle.c → retry_throttle.cc} +10 -10
- data/src/core/ext/filters/client_channel/retry_throttle.h +8 -0
- data/src/core/ext/filters/client_channel/{subchannel.c → subchannel.cc} +210 -213
- data/src/core/ext/filters/client_channel/subchannel.h +68 -60
- data/src/core/ext/filters/client_channel/{subchannel_index.c → subchannel_index.cc} +52 -52
- data/src/core/ext/filters/client_channel/subchannel_index.h +22 -14
- data/src/core/ext/filters/client_channel/{uri_parser.c → uri_parser.cc} +29 -27
- data/src/core/ext/filters/client_channel/uri_parser.h +18 -10
- data/src/core/ext/filters/deadline/{deadline_filter.c → deadline_filter.cc} +12 -15
- data/src/core/ext/filters/deadline/deadline_filter.h +11 -2
- data/src/core/ext/filters/http/client/{http_client_filter.c → http_client_filter.cc} +83 -83
- data/src/core/ext/filters/http/client/http_client_filter.h +8 -0
- data/src/core/ext/filters/http/{http_filters_plugin.c → http_filters_plugin.cc} +20 -21
- data/src/core/ext/filters/http/message_compress/{message_compress_filter.c → message_compress_filter.cc} +84 -83
- data/src/core/ext/filters/http/message_compress/message_compress_filter.h +9 -1
- data/src/core/ext/filters/http/server/{http_server_filter.c → http_server_filter.cc} +80 -78
- data/src/core/ext/filters/http/server/http_server_filter.h +8 -0
- data/src/core/ext/filters/load_reporting/{server_load_reporting_filter.c → server_load_reporting_filter.cc} +29 -29
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.h +9 -1
- data/src/core/ext/filters/load_reporting/{server_load_reporting_plugin.c → server_load_reporting_plugin.cc} +11 -11
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +13 -5
- data/src/core/ext/filters/max_age/{max_age_filter.c → max_age_filter.cc} +46 -56
- data/src/core/ext/filters/max_age/max_age_filter.h +8 -0
- data/src/core/ext/filters/message_size/{message_size_filter.c → message_size_filter.cc} +62 -40
- data/src/core/ext/filters/message_size/message_size_filter.h +8 -0
- data/src/core/ext/filters/workarounds/{workaround_cronet_compression_filter.c → workaround_cronet_compression_filter.cc} +11 -11
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +8 -0
- data/src/core/ext/filters/workarounds/{workaround_utils.c → workaround_utils.cc} +7 -7
- data/src/core/ext/filters/workarounds/workaround_utils.h +9 -1
- data/src/core/ext/transport/chttp2/alpn/{alpn.c → alpn.cc} +3 -3
- data/src/core/ext/transport/chttp2/alpn/alpn.h +10 -2
- data/src/core/ext/transport/chttp2/client/{chttp2_connector.c → chttp2_connector.cc} +49 -45
- data/src/core/ext/transport/chttp2/client/chttp2_connector.h +8 -0
- data/src/core/ext/transport/chttp2/client/insecure/{channel_create.c → channel_create.cc} +28 -27
- data/src/core/ext/transport/chttp2/client/insecure/{channel_create_posix.c → channel_create_posix.cc} +14 -13
- data/src/core/ext/transport/chttp2/client/secure/{secure_channel_create.c → secure_channel_create.cc} +68 -66
- data/src/core/ext/transport/chttp2/server/{chttp2_server.c → chttp2_server.cc} +76 -77
- data/src/core/ext/transport/chttp2/server/chttp2_server.h +11 -3
- data/src/core/ext/transport/chttp2/server/insecure/{server_chttp2.c → server_chttp2.cc} +3 -3
- data/src/core/ext/transport/chttp2/server/insecure/{server_chttp2_posix.c → server_chttp2_posix.cc} +13 -12
- data/src/core/ext/transport/chttp2/server/secure/{server_secure_chttp2.c → server_secure_chttp2.cc} +12 -10
- data/src/core/ext/transport/chttp2/transport/{bin_decoder.c → bin_decoder.cc} +7 -7
- data/src/core/ext/transport/chttp2/transport/bin_decoder.h +15 -7
- data/src/core/ext/transport/chttp2/transport/{bin_encoder.c → bin_encoder.cc} +11 -11
- data/src/core/ext/transport/chttp2/transport/bin_encoder.h +8 -0
- data/src/core/ext/transport/chttp2/transport/{chttp2_plugin.c → chttp2_plugin.cc} +2 -9
- data/src/core/ext/transport/chttp2/transport/{chttp2_transport.c → chttp2_transport.cc} +778 -778
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +14 -10
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +385 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.h +337 -0
- data/src/core/ext/transport/chttp2/transport/frame.h +8 -0
- data/src/core/ext/transport/chttp2/transport/{frame_data.c → frame_data.cc} +34 -34
- data/src/core/ext/transport/chttp2/transport/frame_data.h +25 -17
- data/src/core/ext/transport/chttp2/transport/{frame_goaway.c → frame_goaway.cc} +18 -18
- data/src/core/ext/transport/chttp2/transport/frame_goaway.h +18 -10
- data/src/core/ext/transport/chttp2/transport/{frame_ping.c → frame_ping.cc} +18 -19
- data/src/core/ext/transport/chttp2/transport/frame_ping.h +12 -4
- data/src/core/ext/transport/chttp2/transport/{frame_rst_stream.c → frame_rst_stream.cc} +16 -16
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +16 -8
- data/src/core/ext/transport/chttp2/transport/{frame_settings.c → frame_settings.cc} +23 -24
- data/src/core/ext/transport/chttp2/transport/frame_settings.h +17 -9
- data/src/core/ext/transport/chttp2/transport/{frame_window_update.c → frame_window_update.cc} +22 -24
- data/src/core/ext/transport/chttp2/transport/frame_window_update.h +14 -6
- data/src/core/ext/transport/chttp2/transport/{hpack_encoder.c → hpack_encoder.cc} +206 -161
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +23 -13
- data/src/core/ext/transport/chttp2/transport/{hpack_parser.c → hpack_parser.cc} +340 -334
- data/src/core/ext/transport/chttp2/transport/hpack_parser.h +30 -22
- data/src/core/ext/transport/chttp2/transport/{hpack_table.c → hpack_table.cc} +25 -25
- data/src/core/ext/transport/chttp2/transport/hpack_table.h +19 -11
- data/src/core/ext/transport/chttp2/transport/{http2_settings.c → http2_settings.cc} +1 -1
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +10 -2
- data/src/core/ext/transport/chttp2/transport/{huffsyms.c → huffsyms.cc} +0 -0
- data/src/core/ext/transport/chttp2/transport/huffsyms.h +8 -0
- data/src/core/ext/transport/chttp2/transport/{incoming_metadata.c → incoming_metadata.cc} +14 -13
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +18 -10
- data/src/core/ext/transport/chttp2/transport/internal.h +214 -340
- data/src/core/ext/transport/chttp2/transport/{parsing.c → parsing.cc} +152 -141
- data/src/core/ext/transport/chttp2/transport/{stream_lists.c → stream_lists.cc} +53 -53
- data/src/core/ext/transport/chttp2/transport/{stream_map.c → stream_map.cc} +35 -34
- data/src/core/ext/transport/chttp2/transport/stream_map.h +22 -14
- data/src/core/ext/transport/chttp2/transport/{varint.c → varint.cc} +0 -0
- data/src/core/ext/transport/chttp2/transport/varint.h +8 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +636 -0
- data/src/core/ext/transport/inproc/{inproc_plugin.c → inproc_plugin.cc} +5 -6
- data/src/core/ext/transport/inproc/{inproc_transport.c → inproc_transport.cc} +479 -544
- data/src/core/ext/transport/inproc/inproc_transport.h +4 -4
- data/src/core/lib/backoff/backoff.cc +80 -0
- data/src/core/lib/backoff/backoff.h +83 -0
- data/src/core/lib/channel/{channel_args.c → channel_args.cc} +92 -92
- data/src/core/lib/channel/channel_args.h +45 -37
- data/src/core/lib/channel/{channel_stack.c → channel_stack.cc} +67 -68
- data/src/core/lib/channel/channel_stack.h +76 -75
- data/src/core/lib/channel/{channel_stack_builder.c → channel_stack_builder.cc} +91 -91
- data/src/core/lib/channel/channel_stack_builder.h +45 -45
- data/src/core/lib/channel/{connected_channel.c → connected_channel.cc} +70 -70
- data/src/core/lib/channel/connected_channel.h +12 -4
- data/src/core/lib/channel/context.h +2 -2
- data/src/core/lib/channel/{handshaker.c → handshaker.cc} +7 -9
- data/src/core/lib/channel/handshaker.h +9 -1
- data/src/core/lib/channel/{handshaker_factory.c → handshaker_factory.cc} +7 -7
- data/src/core/lib/channel/handshaker_factory.h +18 -10
- data/src/core/lib/channel/{handshaker_registry.c → handshaker_registry.cc} +0 -0
- data/src/core/lib/channel/handshaker_registry.h +8 -0
- data/src/core/lib/compression/algorithm_metadata.h +8 -0
- data/src/core/lib/compression/{compression.c → compression.cc} +9 -9
- data/src/core/lib/compression/{message_compress.c → message_compress.cc} +0 -0
- data/src/core/lib/compression/message_compress.h +8 -0
- data/src/core/lib/compression/{stream_compression.c → stream_compression.cc} +12 -12
- data/src/core/lib/compression/stream_compression.h +27 -19
- data/src/core/lib/compression/{stream_compression_gzip.c → stream_compression_gzip.cc} +35 -36
- data/src/core/lib/compression/stream_compression_gzip.h +8 -0
- data/src/core/lib/compression/{stream_compression_identity.c → stream_compression_identity.cc} +19 -20
- data/src/core/lib/compression/stream_compression_identity.h +8 -0
- data/src/core/lib/debug/{stats.c → stats.cc} +19 -19
- data/src/core/lib/debug/stats.h +17 -9
- data/src/core/lib/debug/{stats_data.c → stats_data.cc} +45 -22
- data/src/core/lib/debug/stats_data.h +58 -19
- data/src/core/lib/debug/trace.cc +142 -0
- data/src/core/lib/debug/trace.h +74 -14
- data/src/core/lib/http/{format_request.c → format_request.cc} +10 -10
- data/src/core/lib/http/format_request.h +12 -4
- data/src/core/lib/http/{httpcli.c → httpcli.cc} +80 -80
- data/src/core/lib/http/httpcli.h +41 -33
- data/src/core/lib/http/{httpcli_security_connector.c → httpcli_security_connector.cc} +69 -55
- data/src/core/lib/http/{parser.c → parser.cc} +42 -42
- data/src/core/lib/http/parser.h +28 -20
- data/src/core/lib/{support → iomgr}/block_annotate.h +17 -8
- data/src/core/lib/iomgr/{call_combiner.c → call_combiner.cc} +29 -17
- data/src/core/lib/iomgr/call_combiner.h +9 -1
- data/src/core/lib/iomgr/closure.h +220 -62
- data/src/core/lib/iomgr/{combiner.c → combiner.cc} +63 -62
- data/src/core/lib/iomgr/combiner.h +16 -8
- data/src/core/lib/iomgr/{endpoint.c → endpoint.cc} +6 -0
- data/src/core/lib/iomgr/endpoint.h +47 -32
- data/src/core/lib/iomgr/endpoint_pair.h +12 -4
- data/src/core/lib/iomgr/{endpoint_pair_posix.c → endpoint_pair_posix.cc} +3 -3
- data/src/core/lib/iomgr/{endpoint_pair_uv.c → endpoint_pair_uv.cc} +2 -2
- data/src/core/lib/iomgr/{endpoint_pair_windows.c → endpoint_pair_windows.cc} +6 -6
- data/src/core/lib/iomgr/{error.c → error.cc} +125 -124
- data/src/core/lib/iomgr/error.h +32 -27
- data/src/core/lib/iomgr/error_internal.h +11 -2
- data/src/core/lib/iomgr/{ev_epoll1_linux.c → ev_epoll1_linux.cc} +214 -215
- data/src/core/lib/iomgr/ev_epoll1_linux.h +9 -1
- data/src/core/lib/iomgr/ev_epollex_linux.cc +1488 -0
- data/src/core/lib/iomgr/ev_epollex_linux.h +9 -1
- data/src/core/lib/iomgr/{ev_epollsig_linux.c → ev_epollsig_linux.cc} +304 -305
- data/src/core/lib/iomgr/ev_epollsig_linux.h +12 -4
- data/src/core/lib/iomgr/{ev_poll_posix.c → ev_poll_posix.cc} +272 -283
- data/src/core/lib/iomgr/ev_poll_posix.h +10 -2
- data/src/core/lib/iomgr/ev_posix.cc +288 -0
- data/src/core/lib/iomgr/ev_posix.h +75 -67
- data/src/core/lib/iomgr/{ev_windows.c → ev_windows.cc} +2 -2
- data/src/core/lib/iomgr/exec_ctx.cc +177 -0
- data/src/core/lib/iomgr/exec_ctx.h +35 -13
- data/src/core/lib/iomgr/{executor.c → executor.cc} +34 -35
- data/src/core/lib/iomgr/executor.h +12 -4
- data/src/core/lib/iomgr/{fork_posix.c → fork_posix.cc} +0 -0
- data/src/core/lib/iomgr/{fork_windows.c → fork_windows.cc} +0 -0
- data/src/core/lib/iomgr/gethostname.h +9 -1
- data/src/core/lib/iomgr/{gethostname_fallback.c → gethostname_fallback.cc} +2 -1
- data/src/core/lib/iomgr/{gethostname_host_name_max.c → gethostname_host_name_max.cc} +4 -3
- data/src/core/lib/iomgr/{gethostname_sysconf.c → gethostname_sysconf.cc} +3 -2
- data/src/core/lib/iomgr/{iocp_windows.c → iocp_windows.cc} +23 -25
- data/src/core/lib/iomgr/iocp_windows.h +17 -3
- data/src/core/lib/iomgr/{iomgr.c → iomgr.cc} +25 -19
- data/src/core/lib/iomgr/iomgr.h +11 -3
- data/src/core/lib/iomgr/iomgr_internal.h +13 -5
- data/src/core/lib/iomgr/{iomgr_posix.c → iomgr_posix.cc} +0 -1
- data/src/core/lib/iomgr/{iomgr_uv.c → iomgr_uv.cc} +1 -1
- data/src/core/lib/iomgr/iomgr_uv.h +8 -0
- data/src/core/lib/iomgr/{iomgr_windows.c → iomgr_windows.cc} +0 -0
- data/src/core/lib/iomgr/{is_epollexclusive_available.c → is_epollexclusive_available.cc} +1 -1
- data/src/core/lib/iomgr/is_epollexclusive_available.h +8 -0
- data/src/core/lib/iomgr/{load_file.c → load_file.cc} +12 -12
- data/src/core/lib/iomgr/load_file.h +2 -2
- data/src/core/lib/iomgr/{lockfree_event.c → lockfree_event.cc} +76 -68
- data/src/core/lib/iomgr/lockfree_event.h +30 -11
- data/src/core/lib/iomgr/{network_status_tracker.c → network_status_tracker.cc} +3 -2
- data/src/core/lib/iomgr/network_status_tracker.h +2 -2
- data/src/core/lib/iomgr/{polling_entity.c → polling_entity.cc} +18 -18
- data/src/core/lib/iomgr/polling_entity.h +21 -13
- data/src/core/lib/iomgr/pollset.h +17 -11
- data/src/core/lib/iomgr/pollset_set.h +23 -15
- data/src/core/lib/iomgr/{pollset_set_uv.c → pollset_set_uv.cc} +0 -0
- data/src/core/lib/iomgr/{pollset_set_windows.c → pollset_set_windows.cc} +0 -0
- data/src/core/lib/iomgr/{pollset_uv.c → pollset_uv.cc} +31 -29
- data/src/core/lib/iomgr/pollset_uv.h +8 -0
- data/src/core/lib/iomgr/{pollset_windows.c → pollset_windows.cc} +24 -24
- data/src/core/lib/iomgr/pollset_windows.h +17 -4
- data/src/core/lib/iomgr/port.h +10 -0
- data/src/core/lib/iomgr/resolve_address.h +18 -10
- data/src/core/lib/iomgr/{resolve_address_posix.c → resolve_address_posix.cc} +40 -40
- data/src/core/lib/iomgr/{resolve_address_uv.c → resolve_address_uv.cc} +61 -56
- data/src/core/lib/iomgr/{resolve_address_windows.c → resolve_address_windows.cc} +36 -34
- data/src/core/lib/iomgr/{resource_quota.c → resource_quota.cc} +209 -180
- data/src/core/lib/iomgr/resource_quota.h +45 -37
- data/src/core/lib/iomgr/{sockaddr_utils.c → sockaddr_utils.cc} +61 -61
- data/src/core/lib/iomgr/sockaddr_utils.h +23 -15
- data/src/core/lib/iomgr/sockaddr_windows.h +6 -0
- data/src/core/lib/iomgr/{socket_factory_posix.c → socket_factory_posix.cc} +20 -20
- data/src/core/lib/iomgr/socket_factory_posix.h +15 -15
- data/src/core/lib/iomgr/{socket_mutator.c → socket_mutator.cc} +18 -18
- data/src/core/lib/iomgr/socket_mutator.h +11 -11
- data/src/core/lib/iomgr/socket_utils.h +9 -1
- data/src/core/lib/iomgr/{socket_utils_common_posix.c → socket_utils_common_posix.cc} +28 -28
- data/src/core/lib/iomgr/{socket_utils_linux.c → socket_utils_linux.cc} +3 -3
- data/src/core/lib/iomgr/{socket_utils_posix.c → socket_utils_posix.cc} +3 -3
- data/src/core/lib/iomgr/socket_utils_posix.h +26 -18
- data/src/core/lib/iomgr/{socket_utils_uv.c → socket_utils_uv.cc} +1 -1
- data/src/core/lib/iomgr/{socket_utils_windows.c → socket_utils_windows.cc} +2 -2
- data/src/core/lib/iomgr/{socket_windows.c → socket_windows.cc} +18 -18
- data/src/core/lib/iomgr/socket_windows.h +26 -13
- data/src/core/lib/iomgr/tcp_client.h +14 -6
- data/src/core/lib/iomgr/{tcp_client_posix.c → tcp_client_posix.cc} +69 -70
- data/src/core/lib/iomgr/tcp_client_posix.h +11 -3
- data/src/core/lib/iomgr/{tcp_client_uv.c → tcp_client_uv.cc} +47 -48
- data/src/core/lib/iomgr/{tcp_client_windows.c → tcp_client_windows.cc} +46 -44
- data/src/core/lib/iomgr/{tcp_posix.c → tcp_posix.cc} +198 -175
- data/src/core/lib/iomgr/tcp_posix.h +15 -7
- data/src/core/lib/iomgr/tcp_server.h +31 -23
- data/src/core/lib/iomgr/{tcp_server_posix.c → tcp_server_posix.cc} +78 -77
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +27 -19
- data/src/core/lib/iomgr/{tcp_server_utils_posix_common.c → tcp_server_utils_posix_common.cc} +27 -27
- data/src/core/lib/iomgr/{tcp_server_utils_posix_ifaddrs.c → tcp_server_utils_posix_ifaddrs.cc} +25 -25
- data/src/core/lib/iomgr/{tcp_server_utils_posix_noifaddrs.c → tcp_server_utils_posix_noifaddrs.cc} +2 -2
- data/src/core/lib/iomgr/{tcp_server_uv.c → tcp_server_uv.cc} +133 -105
- data/src/core/lib/iomgr/{tcp_server_windows.c → tcp_server_windows.cc} +81 -77
- data/src/core/lib/iomgr/tcp_uv.cc +420 -0
- data/src/core/lib/iomgr/tcp_uv.h +18 -4
- data/src/core/lib/iomgr/{tcp_windows.c → tcp_windows.cc} +90 -79
- data/src/core/lib/iomgr/tcp_windows.h +17 -4
- data/src/core/lib/iomgr/{time_averaged_stats.c → time_averaged_stats.cc} +0 -0
- data/src/core/lib/iomgr/time_averaged_stats.h +8 -0
- data/src/core/lib/iomgr/timer.h +16 -9
- data/src/core/lib/iomgr/{timer_generic.c → timer_generic.cc} +130 -171
- data/src/core/lib/iomgr/timer_generic.h +4 -4
- data/src/core/lib/iomgr/{timer_heap.c → timer_heap.cc} +20 -21
- data/src/core/lib/iomgr/timer_heap.h +16 -8
- data/src/core/lib/iomgr/{timer_manager.c → timer_manager.cc} +54 -52
- data/src/core/lib/iomgr/timer_manager.h +8 -0
- data/src/core/lib/iomgr/{timer_uv.c → timer_uv.cc} +22 -24
- data/src/core/lib/iomgr/timer_uv.h +2 -2
- data/src/core/lib/iomgr/{udp_server.c → udp_server.cc} +75 -75
- data/src/core/lib/iomgr/udp_server.h +25 -17
- data/src/core/lib/iomgr/{unix_sockets_posix.c → unix_sockets_posix.cc} +22 -21
- data/src/core/lib/iomgr/unix_sockets_posix.h +14 -6
- data/src/core/lib/iomgr/{unix_sockets_posix_noop.c → unix_sockets_posix_noop.cc} +5 -5
- data/src/core/lib/iomgr/{wakeup_fd_cv.c → wakeup_fd_cv.cc} +2 -2
- data/src/core/lib/iomgr/wakeup_fd_cv.h +10 -0
- data/src/core/lib/iomgr/{wakeup_fd_eventfd.c → wakeup_fd_eventfd.cc} +0 -0
- data/src/core/lib/iomgr/{wakeup_fd_nospecial.c → wakeup_fd_nospecial.cc} +0 -0
- data/src/core/lib/iomgr/{wakeup_fd_pipe.c → wakeup_fd_pipe.cc} +1 -0
- data/src/core/lib/iomgr/wakeup_fd_pipe.h +9 -1
- data/src/core/lib/iomgr/{wakeup_fd_posix.c → wakeup_fd_posix.cc} +6 -7
- data/src/core/lib/iomgr/wakeup_fd_posix.h +8 -0
- data/src/core/lib/json/{json.c → json.cc} +0 -0
- data/src/core/lib/json/json.h +8 -0
- data/src/core/lib/json/{json_reader.c → json_reader.cc} +18 -18
- data/src/core/lib/json/json_reader.h +26 -18
- data/src/core/lib/json/{json_string.c → json_string.cc} +57 -57
- data/src/core/lib/json/{json_writer.c → json_writer.cc} +20 -20
- data/src/core/lib/json/json_writer.h +23 -15
- data/src/core/lib/profiling/{basic_timers.c → basic_timers.cc} +34 -34
- data/src/core/lib/profiling/{stap_timers.c → stap_timers.cc} +5 -5
- data/src/core/lib/profiling/timers.h +6 -6
- data/src/core/lib/security/context/{security_context.c → security_context.cc} +98 -95
- data/src/core/lib/security/context/security_context.h +27 -29
- data/src/core/lib/security/credentials/composite/{composite_credentials.c → composite_credentials.cc} +79 -73
- data/src/core/lib/security/credentials/composite/composite_credentials.h +17 -9
- data/src/core/lib/security/credentials/{credentials.c → credentials.cc} +97 -92
- data/src/core/lib/security/credentials/credentials.h +83 -75
- data/src/core/lib/security/credentials/{credentials_metadata.c → credentials_metadata.cc} +7 -6
- data/src/core/lib/security/credentials/fake/{fake_credentials.c → fake_credentials.cc} +39 -36
- data/src/core/lib/security/credentials/fake/fake_credentials.h +13 -5
- data/src/core/lib/security/credentials/google_default/{credentials_generic.c → credentials_generic.cc} +5 -5
- data/src/core/lib/security/credentials/google_default/{google_default_credentials.c → google_default_credentials.cc} +55 -55
- data/src/core/lib/security/credentials/google_default/google_default_credentials.h +9 -1
- data/src/core/lib/security/credentials/iam/{iam_credentials.c → iam_credentials.cc} +19 -18
- data/src/core/lib/security/credentials/jwt/{json_token.c → json_token.cc} +80 -75
- data/src/core/lib/security/credentials/jwt/json_token.h +23 -15
- data/src/core/lib/security/credentials/jwt/{jwt_credentials.c → jwt_credentials.cc} +45 -41
- data/src/core/lib/security/credentials/jwt/jwt_credentials.h +11 -3
- data/src/core/lib/security/credentials/jwt/{jwt_verifier.c → jwt_verifier.cc} +262 -252
- data/src/core/lib/security/credentials/jwt/jwt_verifier.h +38 -30
- data/src/core/lib/security/credentials/oauth2/{oauth2_credentials.c → oauth2_credentials.cc} +138 -141
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +30 -22
- data/src/core/lib/security/credentials/plugin/{plugin_credentials.c → plugin_credentials.cc} +52 -53
- data/src/core/lib/security/credentials/plugin/plugin_credentials.h +7 -7
- data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +344 -0
- data/src/core/lib/security/credentials/ssl/ssl_credentials.h +27 -0
- data/src/core/lib/security/transport/auth_filters.h +16 -0
- data/src/core/lib/security/transport/{client_auth_filter.c → client_auth_filter.cc} +127 -115
- data/src/core/lib/security/transport/{lb_targets_info.c → lb_targets_info.cc} +16 -13
- data/src/core/lib/security/transport/lb_targets_info.h +11 -3
- data/src/core/lib/security/transport/{secure_endpoint.c → secure_endpoint.cc} +84 -76
- data/src/core/lib/security/transport/secure_endpoint.h +13 -5
- data/src/core/lib/security/transport/security_connector.cc +1121 -0
- data/src/core/lib/security/transport/security_connector.h +97 -79
- data/src/core/lib/security/transport/{security_handshaker.c → security_handshaker.cc} +139 -132
- data/src/core/lib/security/transport/security_handshaker.h +11 -3
- data/src/core/lib/security/transport/{server_auth_filter.c → server_auth_filter.cc} +68 -68
- data/src/core/lib/security/transport/{tsi_error.c → tsi_error.cc} +1 -1
- data/src/core/lib/security/transport/tsi_error.h +9 -1
- data/src/core/lib/security/util/{json_util.c → json_util.cc} +11 -11
- data/src/core/lib/security/util/json_util.h +12 -4
- data/src/core/lib/slice/{b64.c → b64.cc} +15 -15
- data/src/core/lib/slice/b64.h +12 -4
- data/src/core/lib/slice/{percent_encoding.c → percent_encoding.cc} +15 -15
- data/src/core/lib/slice/percent_encoding.h +11 -3
- data/src/core/lib/slice/{slice.c → slice.cc} +64 -64
- data/src/core/lib/slice/{slice_buffer.c → slice_buffer.cc} +38 -38
- data/src/core/lib/slice/{slice_hash_table.c → slice_hash_table.cc} +7 -7
- data/src/core/lib/slice/slice_hash_table.h +19 -11
- data/src/core/lib/slice/{slice_intern.c → slice_intern.cc} +35 -34
- data/src/core/lib/slice/slice_internal.h +17 -6
- data/src/core/lib/slice/{slice_string_helpers.c → slice_string_helpers.cc} +9 -9
- data/src/core/lib/slice/slice_string_helpers.h +3 -3
- data/src/core/lib/support/abstract.h +29 -0
- data/src/core/lib/support/{alloc.c → alloc.cc} +22 -22
- data/src/core/lib/support/{arena.c → arena.cc} +12 -12
- data/src/core/lib/support/arena.h +11 -3
- data/src/core/lib/support/{atm.c → atm.cc} +1 -1
- data/src/core/lib/support/{avl.c → avl.cc} +71 -70
- data/src/core/lib/support/{cmdline.c → cmdline.cc} +62 -62
- data/src/core/lib/support/{cpu_iphone.c → cpu_iphone.cc} +2 -0
- data/src/core/lib/support/{cpu_linux.c → cpu_linux.cc} +10 -0
- data/src/core/lib/support/{cpu_posix.c → cpu_posix.cc} +27 -4
- data/src/core/lib/support/{cpu_windows.c → cpu_windows.cc} +1 -0
- data/src/core/lib/support/env.h +3 -3
- data/src/core/lib/support/{env_linux.c → env_linux.cc} +11 -11
- data/src/core/lib/support/{env_posix.c → env_posix.cc} +4 -4
- data/src/core/lib/support/{env_windows.c → env_windows.cc} +5 -5
- data/src/core/lib/support/{fork.c → fork.cc} +2 -2
- data/src/core/lib/support/{histogram.c → histogram.cc} +25 -26
- data/src/core/lib/support/{host_port.c → host_port.cc} +16 -16
- data/src/core/lib/support/{log.c → log.cc} +8 -8
- data/src/core/lib/support/{log_android.c → log_android.cc} +7 -7
- data/src/core/lib/support/{log_linux.c → log_linux.cc} +8 -8
- data/src/core/lib/support/{log_posix.c → log_posix.cc} +9 -10
- data/src/core/lib/support/{log_windows.c → log_windows.cc} +7 -7
- data/src/core/lib/support/manual_constructor.h +211 -0
- data/src/core/lib/support/memory.h +41 -0
- data/src/core/lib/support/mpscq.cc +114 -0
- data/src/core/lib/support/mpscq.h +45 -7
- data/src/core/lib/support/{murmur_hash.c → murmur_hash.cc} +9 -12
- data/src/core/lib/support/murmur_hash.h +9 -1
- data/src/core/lib/support/spinlock.h +8 -1
- data/src/core/lib/support/{string.c → string.cc} +56 -55
- data/src/core/lib/support/string.h +21 -21
- data/src/core/lib/support/{string_posix.c → string_posix.cc} +5 -4
- data/src/core/lib/support/{string_util_windows.c → string_util_windows.cc} +9 -6
- data/src/core/lib/support/{string_windows.c → string_windows.cc} +3 -2
- data/src/core/lib/support/string_windows.h +8 -0
- data/src/core/lib/support/{subprocess_posix.c → subprocess_posix.cc} +13 -13
- data/src/core/lib/support/{subprocess_windows.c → subprocess_windows.cc} +9 -9
- data/src/core/lib/support/{sync.c → sync.cc} +22 -22
- data/src/core/lib/support/{sync_posix.c → sync_posix.cc} +6 -2
- data/src/core/lib/support/{sync_windows.c → sync_windows.cc} +14 -14
- data/src/core/lib/support/{thd.c → thd.cc} +0 -0
- data/src/core/lib/support/{thd_posix.c → thd_posix.cc} +10 -10
- data/src/core/lib/support/{thd_windows.c → thd_windows.cc} +10 -10
- data/src/core/lib/support/{time.c → time.cc} +0 -0
- data/src/core/lib/support/{time_posix.c → time_posix.cc} +5 -6
- data/src/core/lib/support/{time_precise.c → time_precise.cc} +6 -4
- data/src/core/lib/support/time_precise.h +9 -1
- data/src/core/lib/support/{time_windows.c → time_windows.cc} +2 -3
- data/src/core/lib/support/{tls_pthread.c → tls_pthread.cc} +2 -2
- data/src/core/lib/support/tmpfile.h +1 -1
- data/src/core/lib/support/{tmpfile_msys.c → tmpfile_msys.cc} +2 -2
- data/src/core/lib/support/{tmpfile_posix.c → tmpfile_posix.cc} +7 -7
- data/src/core/lib/support/{tmpfile_windows.c → tmpfile_windows.cc} +2 -2
- data/src/core/lib/support/{wrap_memcpy.c → wrap_memcpy.cc} +4 -2
- data/src/core/lib/surface/{alarm.c → alarm.cc} +32 -31
- data/src/core/lib/surface/alarm_internal.h +10 -2
- data/src/core/lib/surface/{api_trace.c → api_trace.cc} +1 -1
- data/src/core/lib/surface/api_trace.h +2 -2
- data/src/core/lib/surface/{byte_buffer.c → byte_buffer.cc} +13 -13
- data/src/core/lib/surface/{byte_buffer_reader.c → byte_buffer_reader.cc} +9 -9
- data/src/core/lib/surface/{call.c → call.cc} +379 -372
- data/src/core/lib/surface/call.h +37 -38
- data/src/core/lib/surface/{call_details.c → call_details.cc} +0 -0
- data/src/core/lib/surface/{call_log_batch.c → call_log_batch.cc} +13 -11
- data/src/core/lib/surface/call_test_only.h +5 -5
- data/src/core/lib/surface/{channel.c → channel.cc} +94 -95
- data/src/core/lib/surface/channel.h +29 -21
- data/src/core/lib/surface/{channel_init.c → channel_init.cc} +13 -13
- data/src/core/lib/surface/channel_init.h +6 -6
- data/src/core/lib/surface/{channel_ping.c → channel_ping.cc} +12 -12
- data/src/core/lib/surface/{channel_stack_type.c → channel_stack_type.cc} +1 -1
- data/src/core/lib/surface/channel_stack_type.h +9 -1
- data/src/core/lib/surface/{completion_queue.c → completion_queue.cc} +416 -379
- data/src/core/lib/surface/completion_queue.h +29 -29
- data/src/core/lib/surface/{completion_queue_factory.c → completion_queue_factory.cc} +1 -1
- data/src/core/lib/surface/completion_queue_factory.h +8 -0
- data/src/core/lib/surface/{event_string.c → event_string.cc} +9 -9
- data/src/core/lib/surface/event_string.h +9 -1
- data/src/core/lib/surface/{init.c → init.cc} +16 -39
- data/src/core/lib/surface/init.h +8 -0
- data/src/core/lib/surface/{init_secure.c → init_secure.cc} +12 -25
- data/src/core/lib/surface/lame_client.cc +38 -40
- data/src/core/lib/surface/lame_client.h +8 -0
- data/src/core/lib/surface/{metadata_array.c → metadata_array.cc} +0 -0
- data/src/core/lib/surface/{server.c → server.cc} +340 -404
- data/src/core/lib/surface/server.h +22 -14
- data/src/core/lib/surface/{validate_metadata.c → validate_metadata.cc} +10 -9
- data/src/core/lib/surface/validate_metadata.h +10 -2
- data/src/core/lib/surface/{version.c → version.cc} +2 -2
- data/src/core/lib/transport/bdp_estimator.cc +84 -0
- data/src/core/lib/transport/bdp_estimator.h +67 -42
- data/src/core/lib/transport/{byte_stream.c → byte_stream.cc} +51 -51
- data/src/core/lib/transport/byte_stream.h +41 -33
- data/src/core/lib/transport/{connectivity_state.c → connectivity_state.cc} +36 -40
- data/src/core/lib/transport/connectivity_state.h +29 -21
- data/src/core/lib/transport/{error_utils.c → error_utils.cc} +26 -22
- data/src/core/lib/transport/error_utils.h +18 -6
- data/src/core/lib/transport/{metadata.c → metadata.cc} +92 -88
- data/src/core/lib/transport/metadata.h +22 -20
- data/src/core/lib/transport/{metadata_batch.c → metadata_batch.cc} +78 -79
- data/src/core/lib/transport/metadata_batch.h +46 -45
- data/src/core/lib/transport/pid_controller.cc +48 -0
- data/src/core/lib/transport/pid_controller.h +84 -32
- data/src/core/lib/transport/{service_config.c → service_config.cc} +66 -48
- data/src/core/lib/transport/service_config.h +11 -2
- data/src/core/lib/transport/{static_metadata.c → static_metadata.cc} +2 -2
- data/src/core/lib/transport/static_metadata.h +30 -23
- data/src/core/lib/transport/{status_conversion.c → status_conversion.cc} +4 -3
- data/src/core/lib/transport/status_conversion.h +12 -2
- data/src/core/lib/transport/{timeout_encoding.c → timeout_encoding.cc} +28 -61
- data/src/core/lib/transport/timeout_encoding.h +11 -2
- data/src/core/lib/transport/{transport.c → transport.cc} +79 -79
- data/src/core/lib/transport/transport.h +78 -80
- data/src/core/lib/transport/transport_impl.h +27 -19
- data/src/core/lib/transport/{transport_op_string.c → transport_op_string.cc} +32 -30
- data/src/core/plugin_registry/{grpc_plugin_registry.c → grpc_plugin_registry.cc} +34 -38
- data/src/core/tsi/{fake_transport_security.c → fake_transport_security.cc} +141 -132
- data/src/core/tsi/fake_transport_security.h +5 -5
- data/src/core/tsi/{gts_transport_security.c → gts_transport_security.cc} +4 -4
- data/src/core/tsi/gts_transport_security.h +11 -3
- data/src/core/tsi/{ssl_transport_security.c → ssl_transport_security.cc} +309 -300
- data/src/core/tsi/ssl_transport_security.h +25 -25
- data/src/core/tsi/ssl_types.h +8 -0
- data/src/core/tsi/{transport_security.c → transport_security.cc} +94 -87
- data/src/core/tsi/transport_security.h +55 -55
- data/src/core/tsi/{transport_security_adapter.c → transport_security_adapter.cc} +58 -55
- data/src/core/tsi/transport_security_adapter.h +2 -2
- data/src/core/tsi/{transport_security_grpc.c → transport_security_grpc.cc} +21 -21
- data/src/core/tsi/transport_security_grpc.h +19 -19
- data/src/core/tsi/transport_security_interface.h +41 -41
- data/src/ruby/ext/grpc/extconf.rb +4 -2
- data/src/ruby/ext/grpc/rb_byte_buffer.c +5 -5
- data/src/ruby/ext/grpc/rb_byte_buffer.h +2 -2
- data/src/ruby/ext/grpc/rb_call.c +41 -42
- data/src/ruby/ext/grpc/rb_call.h +6 -6
- data/src/ruby/ext/grpc/rb_call_credentials.c +30 -30
- data/src/ruby/ext/grpc/rb_channel.c +87 -87
- data/src/ruby/ext/grpc/rb_channel_credentials.c +23 -23
- data/src/ruby/ext/grpc/rb_completion_queue.c +11 -11
- data/src/ruby/ext/grpc/rb_completion_queue.h +3 -3
- data/src/ruby/ext/grpc/rb_compression_options.c +20 -20
- data/src/ruby/ext/grpc/rb_event_thread.c +14 -14
- data/src/ruby/ext/grpc/rb_event_thread.h +1 -1
- data/src/ruby/ext/grpc/rb_grpc.c +8 -8
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +16 -58
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +242 -306
- data/src/ruby/ext/grpc/rb_server.c +23 -23
- data/src/ruby/ext/grpc/rb_server_credentials.c +13 -13
- data/src/ruby/lib/grpc/generic/rpc_server.rb +25 -12
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/grpc/health/checker.rb +14 -0
- data/src/ruby/spec/pb/health/checker_spec.rb +29 -0
- data/third_party/cares/config_freebsd/ares_config.h +502 -0
- data/third_party/cares/config_openbsd/ares_config.h +502 -0
- metadata +302 -328
- data/src/core/ext/census/aggregation.h +0 -51
- data/src/core/ext/census/base_resources.c +0 -56
- data/src/core/ext/census/base_resources.h +0 -24
- data/src/core/ext/census/census_interface.h +0 -61
- data/src/core/ext/census/census_rpc_stats.h +0 -86
- data/src/core/ext/census/context.c +0 -496
- data/src/core/ext/census/gen/census.pb.c +0 -161
- data/src/core/ext/census/gen/census.pb.h +0 -280
- data/src/core/ext/census/gen/trace_context.pb.c +0 -39
- data/src/core/ext/census/gen/trace_context.pb.h +0 -78
- data/src/core/ext/census/grpc_filter.c +0 -196
- data/src/core/ext/census/grpc_plugin.c +0 -70
- data/src/core/ext/census/initialize.c +0 -51
- data/src/core/ext/census/intrusive_hash_map.c +0 -305
- data/src/core/ext/census/intrusive_hash_map.h +0 -152
- data/src/core/ext/census/intrusive_hash_map_internal.h +0 -48
- data/src/core/ext/census/mlog.c +0 -586
- data/src/core/ext/census/mlog.h +0 -80
- data/src/core/ext/census/operation.c +0 -48
- data/src/core/ext/census/placeholders.c +0 -49
- data/src/core/ext/census/resource.c +0 -303
- data/src/core/ext/census/resource.h +0 -48
- data/src/core/ext/census/rpc_metric_id.h +0 -36
- data/src/core/ext/census/trace_context.c +0 -71
- data/src/core/ext/census/trace_context.h +0 -56
- data/src/core/ext/census/trace_label.h +0 -46
- data/src/core/ext/census/trace_propagation.h +0 -48
- data/src/core/ext/census/trace_status.h +0 -30
- data/src/core/ext/census/trace_string.h +0 -35
- data/src/core/ext/census/tracing.c +0 -55
- data/src/core/ext/census/tracing.h +0 -109
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +0 -714
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +0 -924
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.c +0 -60
- data/src/core/ext/transport/chttp2/transport/flow_control.c +0 -502
- data/src/core/ext/transport/chttp2/transport/writing.c +0 -534
- data/src/core/lib/debug/trace.c +0 -146
- data/src/core/lib/iomgr/closure.c +0 -219
- data/src/core/lib/iomgr/ev_epollex_linux.c +0 -1461
- data/src/core/lib/iomgr/ev_posix.c +0 -266
- data/src/core/lib/iomgr/exec_ctx.c +0 -113
- data/src/core/lib/iomgr/tcp_uv.c +0 -381
- data/src/core/lib/security/credentials/ssl/ssl_credentials.c +0 -194
- data/src/core/lib/security/transport/security_connector.c +0 -914
- data/src/core/lib/support/backoff.c +0 -72
- data/src/core/lib/support/backoff.h +0 -56
- data/src/core/lib/support/mpscq.c +0 -79
- data/src/core/lib/support/stack_lockfree.c +0 -137
- data/src/core/lib/support/stack_lockfree.h +0 -38
- data/src/core/lib/transport/bdp_estimator.c +0 -110
- data/src/core/lib/transport/pid_controller.c +0 -63
@@ -23,81 +23,83 @@
|
|
23
23
|
#include "src/core/lib/iomgr/polling_entity.h"
|
24
24
|
#include "src/core/lib/transport/connectivity_state.h"
|
25
25
|
|
26
|
+
#ifdef __cplusplus
|
27
|
+
extern "C" {
|
28
|
+
#endif
|
29
|
+
|
26
30
|
/** A load balancing policy: specified by a vtable and a struct (which
|
27
31
|
is expected to be extended to contain some parameters) */
|
28
32
|
typedef struct grpc_lb_policy grpc_lb_policy;
|
29
33
|
typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
|
30
34
|
typedef struct grpc_lb_policy_args grpc_lb_policy_args;
|
31
35
|
|
32
|
-
|
33
|
-
extern grpc_tracer_flag grpc_trace_lb_policy_refcount;
|
34
|
-
#endif
|
36
|
+
extern grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
|
35
37
|
|
36
38
|
struct grpc_lb_policy {
|
37
|
-
const grpc_lb_policy_vtable
|
39
|
+
const grpc_lb_policy_vtable* vtable;
|
38
40
|
gpr_atm ref_pair;
|
39
41
|
/* owned pointer to interested parties in load balancing decisions */
|
40
|
-
grpc_pollset_set
|
42
|
+
grpc_pollset_set* interested_parties;
|
41
43
|
/* combiner under which lb_policy actions take place */
|
42
|
-
grpc_combiner
|
44
|
+
grpc_combiner* combiner;
|
43
45
|
};
|
44
46
|
|
45
47
|
/** Extra arguments for an LB pick */
|
46
48
|
typedef struct grpc_lb_policy_pick_args {
|
47
49
|
/** Initial metadata associated with the picking call. */
|
48
|
-
grpc_metadata_batch
|
50
|
+
grpc_metadata_batch* initial_metadata;
|
49
51
|
/** Bitmask used for selective cancelling. See \a
|
50
52
|
* grpc_lb_policy_cancel_picks() and \a GRPC_INITIAL_METADATA_* in
|
51
53
|
* grpc_types.h */
|
52
54
|
uint32_t initial_metadata_flags;
|
53
55
|
/** Storage for LB token in \a initial_metadata, or NULL if not used */
|
54
|
-
grpc_linked_mdelem
|
56
|
+
grpc_linked_mdelem* lb_token_mdelem_storage;
|
55
57
|
} grpc_lb_policy_pick_args;
|
56
58
|
|
57
59
|
struct grpc_lb_policy_vtable {
|
58
|
-
void (*destroy)(grpc_exec_ctx
|
59
|
-
void (*shutdown_locked)(grpc_exec_ctx
|
60
|
+
void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
|
61
|
+
void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
|
60
62
|
|
61
63
|
/** \see grpc_lb_policy_pick */
|
62
|
-
int (*pick_locked)(grpc_exec_ctx
|
63
|
-
const grpc_lb_policy_pick_args
|
64
|
-
grpc_connected_subchannel
|
65
|
-
grpc_call_context_element
|
66
|
-
grpc_closure
|
64
|
+
int (*pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
65
|
+
const grpc_lb_policy_pick_args* pick_args,
|
66
|
+
grpc_connected_subchannel** target,
|
67
|
+
grpc_call_context_element* context, void** user_data,
|
68
|
+
grpc_closure* on_complete);
|
67
69
|
|
68
70
|
/** \see grpc_lb_policy_cancel_pick */
|
69
|
-
void (*cancel_pick_locked)(grpc_exec_ctx
|
70
|
-
grpc_connected_subchannel
|
71
|
-
grpc_error
|
71
|
+
void (*cancel_pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
72
|
+
grpc_connected_subchannel** target,
|
73
|
+
grpc_error* error);
|
72
74
|
|
73
75
|
/** \see grpc_lb_policy_cancel_picks */
|
74
|
-
void (*cancel_picks_locked)(grpc_exec_ctx
|
76
|
+
void (*cancel_picks_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
75
77
|
uint32_t initial_metadata_flags_mask,
|
76
78
|
uint32_t initial_metadata_flags_eq,
|
77
|
-
grpc_error
|
79
|
+
grpc_error* error);
|
78
80
|
|
79
81
|
/** \see grpc_lb_policy_ping_one */
|
80
|
-
void (*ping_one_locked)(grpc_exec_ctx
|
81
|
-
grpc_closure
|
82
|
+
void (*ping_one_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
83
|
+
grpc_closure* closure);
|
82
84
|
|
83
85
|
/** Try to enter a READY connectivity state */
|
84
|
-
void (*exit_idle_locked)(grpc_exec_ctx
|
86
|
+
void (*exit_idle_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
|
85
87
|
|
86
88
|
/** check the current connectivity of the lb_policy */
|
87
89
|
grpc_connectivity_state (*check_connectivity_locked)(
|
88
|
-
grpc_exec_ctx
|
89
|
-
grpc_error
|
90
|
+
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
91
|
+
grpc_error** connectivity_error);
|
90
92
|
|
91
93
|
/** call notify when the connectivity state of a channel changes from *state.
|
92
94
|
Updates *state with the new state of the policy. Calling with a NULL \a
|
93
95
|
state cancels the subscription. */
|
94
|
-
void (*notify_on_state_change_locked)(grpc_exec_ctx
|
95
|
-
grpc_lb_policy
|
96
|
-
grpc_connectivity_state
|
97
|
-
grpc_closure
|
96
|
+
void (*notify_on_state_change_locked)(grpc_exec_ctx* exec_ctx,
|
97
|
+
grpc_lb_policy* policy,
|
98
|
+
grpc_connectivity_state* state,
|
99
|
+
grpc_closure* closure);
|
98
100
|
|
99
|
-
void (*update_locked)(grpc_exec_ctx
|
100
|
-
const grpc_lb_policy_args
|
101
|
+
void (*update_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
102
|
+
const grpc_lb_policy_args* args);
|
101
103
|
};
|
102
104
|
|
103
105
|
#ifndef NDEBUG
|
@@ -115,29 +117,29 @@ struct grpc_lb_policy_vtable {
|
|
115
117
|
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
|
116
118
|
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
|
117
119
|
grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
|
118
|
-
void grpc_lb_policy_ref(grpc_lb_policy
|
119
|
-
const char
|
120
|
-
void grpc_lb_policy_unref(grpc_exec_ctx
|
121
|
-
const char
|
122
|
-
void grpc_lb_policy_weak_ref(grpc_lb_policy
|
123
|
-
const char
|
124
|
-
void grpc_lb_policy_weak_unref(grpc_exec_ctx
|
125
|
-
const char
|
120
|
+
void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
|
121
|
+
const char* reason);
|
122
|
+
void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
123
|
+
const char* file, int line, const char* reason);
|
124
|
+
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
|
125
|
+
const char* reason);
|
126
|
+
void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
127
|
+
const char* file, int line, const char* reason);
|
126
128
|
#else
|
127
129
|
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
|
128
130
|
#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
|
129
131
|
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
|
130
132
|
#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
|
131
|
-
void grpc_lb_policy_ref(grpc_lb_policy
|
132
|
-
void grpc_lb_policy_unref(grpc_exec_ctx
|
133
|
-
void grpc_lb_policy_weak_ref(grpc_lb_policy
|
134
|
-
void grpc_lb_policy_weak_unref(grpc_exec_ctx
|
133
|
+
void grpc_lb_policy_ref(grpc_lb_policy* policy);
|
134
|
+
void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
|
135
|
+
void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
|
136
|
+
void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
|
135
137
|
#endif
|
136
138
|
|
137
139
|
/** called by concrete implementations to initialize the base struct */
|
138
|
-
void grpc_lb_policy_init(grpc_lb_policy
|
139
|
-
const grpc_lb_policy_vtable
|
140
|
-
grpc_combiner
|
140
|
+
void grpc_lb_policy_init(grpc_lb_policy* policy,
|
141
|
+
const grpc_lb_policy_vtable* vtable,
|
142
|
+
grpc_combiner* combiner);
|
141
143
|
|
142
144
|
/** Finds an appropriate subchannel for a call, based on \a pick_args.
|
143
145
|
|
@@ -156,52 +158,56 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
|
|
156
158
|
|
157
159
|
Any IO should be done under the \a interested_parties \a grpc_pollset_set
|
158
160
|
in the \a grpc_lb_policy struct. */
|
159
|
-
int grpc_lb_policy_pick_locked(grpc_exec_ctx
|
160
|
-
const grpc_lb_policy_pick_args
|
161
|
-
grpc_connected_subchannel
|
162
|
-
grpc_call_context_element
|
163
|
-
void
|
161
|
+
int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
162
|
+
const grpc_lb_policy_pick_args* pick_args,
|
163
|
+
grpc_connected_subchannel** target,
|
164
|
+
grpc_call_context_element* context,
|
165
|
+
void** user_data, grpc_closure* on_complete);
|
164
166
|
|
165
167
|
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
|
166
168
|
against one of the connected subchannels managed by \a policy. */
|
167
|
-
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx
|
168
|
-
grpc_lb_policy
|
169
|
-
grpc_closure
|
169
|
+
void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
|
170
|
+
grpc_lb_policy* policy,
|
171
|
+
grpc_closure* closure);
|
170
172
|
|
171
173
|
/** Cancel picks for \a target.
|
172
174
|
The \a on_complete callback of the pending picks will be invoked with \a
|
173
175
|
*target set to NULL. */
|
174
|
-
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx
|
175
|
-
grpc_lb_policy
|
176
|
-
grpc_connected_subchannel
|
177
|
-
grpc_error
|
176
|
+
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
|
177
|
+
grpc_lb_policy* policy,
|
178
|
+
grpc_connected_subchannel** target,
|
179
|
+
grpc_error* error);
|
178
180
|
|
179
181
|
/** Cancel all pending picks for which their \a initial_metadata_flags (as given
|
180
182
|
in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
|
181
183
|
when AND'd with \a initial_metadata_flags_mask */
|
182
|
-
void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx
|
183
|
-
grpc_lb_policy
|
184
|
+
void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
|
185
|
+
grpc_lb_policy* policy,
|
184
186
|
uint32_t initial_metadata_flags_mask,
|
185
187
|
uint32_t initial_metadata_flags_eq,
|
186
|
-
grpc_error
|
188
|
+
grpc_error* error);
|
187
189
|
|
188
190
|
/** Try to enter a READY connectivity state */
|
189
|
-
void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx
|
190
|
-
grpc_lb_policy
|
191
|
+
void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
|
192
|
+
grpc_lb_policy* policy);
|
191
193
|
|
192
194
|
/* Call notify when the connectivity state of a channel changes from \a *state.
|
193
195
|
* Updates \a *state with the new state of the policy */
|
194
196
|
void grpc_lb_policy_notify_on_state_change_locked(
|
195
|
-
grpc_exec_ctx
|
196
|
-
grpc_connectivity_state
|
197
|
+
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
198
|
+
grpc_connectivity_state* state, grpc_closure* closure);
|
197
199
|
|
198
200
|
grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
|
199
|
-
grpc_exec_ctx
|
200
|
-
grpc_error
|
201
|
+
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
202
|
+
grpc_error** connectivity_error);
|
201
203
|
|
202
204
|
/** Update \a policy with \a lb_policy_args. */
|
203
|
-
void grpc_lb_policy_update_locked(grpc_exec_ctx
|
204
|
-
grpc_lb_policy
|
205
|
-
const grpc_lb_policy_args
|
205
|
+
void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
|
206
|
+
grpc_lb_policy* policy,
|
207
|
+
const grpc_lb_policy_args* lb_policy_args);
|
208
|
+
|
209
|
+
#ifdef __cplusplus
|
210
|
+
}
|
211
|
+
#endif
|
206
212
|
|
207
213
|
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H */
|
@@ -25,31 +25,31 @@
|
|
25
25
|
#include "src/core/lib/iomgr/error.h"
|
26
26
|
#include "src/core/lib/profiling/timers.h"
|
27
27
|
|
28
|
-
static grpc_error
|
29
|
-
grpc_channel_element
|
30
|
-
grpc_channel_element_args
|
28
|
+
static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
|
29
|
+
grpc_channel_element* elem,
|
30
|
+
grpc_channel_element_args* args) {
|
31
31
|
return GRPC_ERROR_NONE;
|
32
32
|
}
|
33
33
|
|
34
|
-
static void destroy_channel_elem(grpc_exec_ctx
|
35
|
-
grpc_channel_element
|
34
|
+
static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
|
35
|
+
grpc_channel_element* elem) {}
|
36
36
|
|
37
37
|
typedef struct {
|
38
38
|
// Stats object to update.
|
39
|
-
grpc_grpclb_client_stats
|
39
|
+
grpc_grpclb_client_stats* client_stats;
|
40
40
|
// State for intercepting send_initial_metadata.
|
41
41
|
grpc_closure on_complete_for_send;
|
42
|
-
grpc_closure
|
42
|
+
grpc_closure* original_on_complete_for_send;
|
43
43
|
bool send_initial_metadata_succeeded;
|
44
44
|
// State for intercepting recv_initial_metadata.
|
45
45
|
grpc_closure recv_initial_metadata_ready;
|
46
|
-
grpc_closure
|
46
|
+
grpc_closure* original_recv_initial_metadata_ready;
|
47
47
|
bool recv_initial_metadata_succeeded;
|
48
48
|
} call_data;
|
49
49
|
|
50
|
-
static void on_complete_for_send(grpc_exec_ctx
|
51
|
-
grpc_error
|
52
|
-
call_data
|
50
|
+
static void on_complete_for_send(grpc_exec_ctx* exec_ctx, void* arg,
|
51
|
+
grpc_error* error) {
|
52
|
+
call_data* calld = (call_data*)arg;
|
53
53
|
if (error == GRPC_ERROR_NONE) {
|
54
54
|
calld->send_initial_metadata_succeeded = true;
|
55
55
|
}
|
@@ -57,9 +57,9 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
|
|
57
57
|
GRPC_ERROR_REF(error));
|
58
58
|
}
|
59
59
|
|
60
|
-
static void recv_initial_metadata_ready(grpc_exec_ctx
|
61
|
-
grpc_error
|
62
|
-
call_data
|
60
|
+
static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
|
61
|
+
grpc_error* error) {
|
62
|
+
call_data* calld = (call_data*)arg;
|
63
63
|
if (error == GRPC_ERROR_NONE) {
|
64
64
|
calld->recv_initial_metadata_succeeded = true;
|
65
65
|
}
|
@@ -67,25 +67,24 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
|
|
67
67
|
GRPC_ERROR_REF(error));
|
68
68
|
}
|
69
69
|
|
70
|
-
static grpc_error
|
71
|
-
grpc_call_element
|
72
|
-
const grpc_call_element_args
|
73
|
-
call_data
|
70
|
+
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
|
71
|
+
grpc_call_element* elem,
|
72
|
+
const grpc_call_element_args* args) {
|
73
|
+
call_data* calld = (call_data*)elem->call_data;
|
74
74
|
// Get stats object from context and take a ref.
|
75
|
-
GPR_ASSERT(args->context !=
|
76
|
-
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value !=
|
75
|
+
GPR_ASSERT(args->context != nullptr);
|
76
|
+
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr);
|
77
77
|
calld->client_stats = grpc_grpclb_client_stats_ref(
|
78
|
-
(grpc_grpclb_client_stats
|
79
|
-
.value);
|
78
|
+
(grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS].value);
|
80
79
|
// Record call started.
|
81
80
|
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
|
82
81
|
return GRPC_ERROR_NONE;
|
83
82
|
}
|
84
83
|
|
85
|
-
static void destroy_call_elem(grpc_exec_ctx
|
86
|
-
const grpc_call_final_info
|
87
|
-
grpc_closure
|
88
|
-
call_data
|
84
|
+
static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
|
85
|
+
const grpc_call_final_info* final_info,
|
86
|
+
grpc_closure* ignored) {
|
87
|
+
call_data* calld = (call_data*)elem->call_data;
|
89
88
|
// Record call finished, optionally setting client_failed_to_send and
|
90
89
|
// received.
|
91
90
|
grpc_grpclb_client_stats_add_call_finished(
|
@@ -97,9 +96,9 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
|
97
96
|
}
|
98
97
|
|
99
98
|
static void start_transport_stream_op_batch(
|
100
|
-
grpc_exec_ctx
|
101
|
-
grpc_transport_stream_op_batch
|
102
|
-
call_data
|
99
|
+
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
|
100
|
+
grpc_transport_stream_op_batch* batch) {
|
101
|
+
call_data* calld = (call_data*)elem->call_data;
|
103
102
|
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
|
104
103
|
// Intercept send_initial_metadata.
|
105
104
|
if (batch->send_initial_metadata) {
|
@@ -21,7 +21,15 @@
|
|
21
21
|
|
22
22
|
#include "src/core/lib/channel/channel_stack.h"
|
23
23
|
|
24
|
+
#ifdef __cplusplus
|
25
|
+
extern "C" {
|
26
|
+
#endif
|
27
|
+
|
24
28
|
extern const grpc_channel_filter grpc_client_load_reporting_filter;
|
25
29
|
|
30
|
+
#ifdef __cplusplus
|
31
|
+
}
|
32
|
+
#endif
|
33
|
+
|
26
34
|
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
|
27
|
-
|
35
|
+
*/
|
@@ -80,6 +80,7 @@
|
|
80
80
|
headers. Therefore, sockaddr.h must always be included first */
|
81
81
|
#include "src/core/lib/iomgr/sockaddr.h"
|
82
82
|
|
83
|
+
#include <inttypes.h>
|
83
84
|
#include <limits.h>
|
84
85
|
#include <string.h>
|
85
86
|
|
@@ -102,6 +103,7 @@
|
|
102
103
|
#include "src/core/ext/filters/client_channel/parse_address.h"
|
103
104
|
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
|
104
105
|
#include "src/core/ext/filters/client_channel/subchannel_index.h"
|
106
|
+
#include "src/core/lib/backoff/backoff.h"
|
105
107
|
#include "src/core/lib/channel/channel_args.h"
|
106
108
|
#include "src/core/lib/channel/channel_stack.h"
|
107
109
|
#include "src/core/lib/iomgr/combiner.h"
|
@@ -111,7 +113,6 @@
|
|
111
113
|
#include "src/core/lib/slice/slice_hash_table.h"
|
112
114
|
#include "src/core/lib/slice/slice_internal.h"
|
113
115
|
#include "src/core/lib/slice/slice_string_helpers.h"
|
114
|
-
#include "src/core/lib/support/backoff.h"
|
115
116
|
#include "src/core/lib/support/string.h"
|
116
117
|
#include "src/core/lib/surface/call.h"
|
117
118
|
#include "src/core/lib/surface/channel.h"
|
@@ -125,21 +126,21 @@
|
|
125
126
|
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
|
126
127
|
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
|
127
128
|
|
128
|
-
|
129
|
+
grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
|
129
130
|
|
130
131
|
/* add lb_token of selected subchannel (address) to the call's initial
|
131
132
|
* metadata */
|
132
|
-
static grpc_error
|
133
|
-
grpc_exec_ctx
|
134
|
-
grpc_linked_mdelem
|
135
|
-
GPR_ASSERT(lb_token_mdelem_storage !=
|
133
|
+
static grpc_error* initial_metadata_add_lb_token(
|
134
|
+
grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
|
135
|
+
grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
|
136
|
+
GPR_ASSERT(lb_token_mdelem_storage != nullptr);
|
136
137
|
GPR_ASSERT(!GRPC_MDISNULL(lb_token));
|
137
138
|
return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
|
138
139
|
lb_token_mdelem_storage, lb_token);
|
139
140
|
}
|
140
141
|
|
141
|
-
static void destroy_client_stats(void
|
142
|
-
grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats
|
142
|
+
static void destroy_client_stats(void* arg) {
|
143
|
+
grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
|
143
144
|
}
|
144
145
|
|
145
146
|
typedef struct wrapped_rr_closure_arg {
|
@@ -148,75 +149,81 @@ typedef struct wrapped_rr_closure_arg {
|
|
148
149
|
|
149
150
|
/* the original closure. Usually a on_complete/notify cb for pick() and ping()
|
150
151
|
* calls against the internal RR instance, respectively. */
|
151
|
-
grpc_closure
|
152
|
+
grpc_closure* wrapped_closure;
|
152
153
|
|
153
154
|
/* the pick's initial metadata, kept in order to append the LB token for the
|
154
155
|
* pick */
|
155
|
-
grpc_metadata_batch
|
156
|
+
grpc_metadata_batch* initial_metadata;
|
156
157
|
|
157
158
|
/* the picked target, used to determine which LB token to add to the pick's
|
158
159
|
* initial metadata */
|
159
|
-
grpc_connected_subchannel
|
160
|
+
grpc_connected_subchannel** target;
|
160
161
|
|
161
162
|
/* the context to be populated for the subchannel call */
|
162
|
-
grpc_call_context_element
|
163
|
+
grpc_call_context_element* context;
|
163
164
|
|
164
165
|
/* Stats for client-side load reporting. Note that this holds a
|
165
166
|
* reference, which must be either passed on via context or unreffed. */
|
166
|
-
grpc_grpclb_client_stats
|
167
|
+
grpc_grpclb_client_stats* client_stats;
|
167
168
|
|
168
169
|
/* the LB token associated with the pick */
|
169
170
|
grpc_mdelem lb_token;
|
170
171
|
|
171
172
|
/* storage for the lb token initial metadata mdelem */
|
172
|
-
grpc_linked_mdelem
|
173
|
+
grpc_linked_mdelem* lb_token_mdelem_storage;
|
173
174
|
|
174
175
|
/* The RR instance related to the closure */
|
175
|
-
grpc_lb_policy
|
176
|
+
grpc_lb_policy* rr_policy;
|
177
|
+
|
178
|
+
/* The grpclb instance that created the wrapping. This instance is not owned,
|
179
|
+
* reference counts are untouched. It's used only for logging purposes. */
|
180
|
+
grpc_lb_policy* glb_policy;
|
176
181
|
|
177
182
|
/* heap memory to be freed upon closure execution. */
|
178
|
-
void
|
183
|
+
void* free_when_done;
|
179
184
|
} wrapped_rr_closure_arg;
|
180
185
|
|
181
186
|
/* The \a on_complete closure passed as part of the pick requires keeping a
|
182
187
|
* reference to its associated round robin instance. We wrap this closure in
|
183
188
|
* order to unref the round robin instance upon its invocation */
|
184
|
-
static void wrapped_rr_closure(grpc_exec_ctx
|
185
|
-
grpc_error
|
186
|
-
wrapped_rr_closure_arg
|
189
|
+
static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
|
190
|
+
grpc_error* error) {
|
191
|
+
wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
|
187
192
|
|
188
|
-
GPR_ASSERT(wc_arg->wrapped_closure !=
|
193
|
+
GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
|
189
194
|
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
|
190
195
|
|
191
|
-
if (wc_arg->rr_policy !=
|
196
|
+
if (wc_arg->rr_policy != nullptr) {
|
192
197
|
/* if *target is NULL, no pick has been made by the RR policy (eg, all
|
193
198
|
* addresses failed to connect). There won't be any user_data/token
|
194
199
|
* available */
|
195
|
-
if (*wc_arg->target !=
|
200
|
+
if (*wc_arg->target != nullptr) {
|
196
201
|
if (!GRPC_MDISNULL(wc_arg->lb_token)) {
|
197
202
|
initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
|
198
203
|
wc_arg->lb_token_mdelem_storage,
|
199
204
|
GRPC_MDELEM_REF(wc_arg->lb_token));
|
200
205
|
} else {
|
201
|
-
gpr_log(
|
202
|
-
|
203
|
-
|
204
|
-
|
206
|
+
gpr_log(
|
207
|
+
GPR_ERROR,
|
208
|
+
"[grpclb %p] No LB token for connected subchannel pick %p (from RR "
|
209
|
+
"instance %p).",
|
210
|
+
wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
|
205
211
|
abort();
|
206
212
|
}
|
207
213
|
// Pass on client stats via context. Passes ownership of the reference.
|
208
|
-
GPR_ASSERT(wc_arg->client_stats !=
|
214
|
+
GPR_ASSERT(wc_arg->client_stats != nullptr);
|
209
215
|
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
|
210
216
|
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
|
211
217
|
} else {
|
212
218
|
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
|
213
219
|
}
|
214
|
-
if (
|
215
|
-
gpr_log(GPR_INFO, "Unreffing RR %p",
|
220
|
+
if (grpc_lb_glb_trace.enabled()) {
|
221
|
+
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
|
222
|
+
wc_arg->rr_policy);
|
216
223
|
}
|
217
224
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
|
218
225
|
}
|
219
|
-
GPR_ASSERT(wc_arg->free_when_done !=
|
226
|
+
GPR_ASSERT(wc_arg->free_when_done != nullptr);
|
220
227
|
gpr_free(wc_arg->free_when_done);
|
221
228
|
}
|
222
229
|
|
@@ -229,25 +236,25 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
|
|
229
236
|
* order to correctly unref the RR policy instance upon completion of the pick.
|
230
237
|
* See \a wrapped_rr_closure for details. */
|
231
238
|
typedef struct pending_pick {
|
232
|
-
struct pending_pick
|
239
|
+
struct pending_pick* next;
|
233
240
|
|
234
241
|
/* original pick()'s arguments */
|
235
242
|
grpc_lb_policy_pick_args pick_args;
|
236
243
|
|
237
244
|
/* output argument where to store the pick()ed connected subchannel, or NULL
|
238
245
|
* upon error. */
|
239
|
-
grpc_connected_subchannel
|
246
|
+
grpc_connected_subchannel** target;
|
240
247
|
|
241
248
|
/* args for wrapped_on_complete */
|
242
249
|
wrapped_rr_closure_arg wrapped_on_complete_arg;
|
243
250
|
} pending_pick;
|
244
251
|
|
245
|
-
static void add_pending_pick(pending_pick
|
246
|
-
const grpc_lb_policy_pick_args
|
247
|
-
grpc_connected_subchannel
|
248
|
-
grpc_call_context_element
|
249
|
-
grpc_closure
|
250
|
-
pending_pick
|
252
|
+
static void add_pending_pick(pending_pick** root,
|
253
|
+
const grpc_lb_policy_pick_args* pick_args,
|
254
|
+
grpc_connected_subchannel** target,
|
255
|
+
grpc_call_context_element* context,
|
256
|
+
grpc_closure* on_complete) {
|
257
|
+
pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
|
251
258
|
pp->next = *root;
|
252
259
|
pp->pick_args = *pick_args;
|
253
260
|
pp->target = target;
|
@@ -266,14 +273,14 @@ static void add_pending_pick(pending_pick **root,
|
|
266
273
|
|
267
274
|
/* Same as the \a pending_pick struct but for ping operations */
|
268
275
|
typedef struct pending_ping {
|
269
|
-
struct pending_ping
|
276
|
+
struct pending_ping* next;
|
270
277
|
|
271
278
|
/* args for wrapped_notify */
|
272
279
|
wrapped_rr_closure_arg wrapped_notify_arg;
|
273
280
|
} pending_ping;
|
274
281
|
|
275
|
-
static void add_pending_ping(pending_ping
|
276
|
-
pending_ping
|
282
|
+
static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
|
283
|
+
pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
|
277
284
|
pping->wrapped_notify_arg.wrapped_closure = notify;
|
278
285
|
pping->wrapped_notify_arg.free_when_done = pping;
|
279
286
|
pping->next = *root;
|
@@ -293,9 +300,9 @@ typedef struct glb_lb_policy {
|
|
293
300
|
grpc_lb_policy base;
|
294
301
|
|
295
302
|
/** who the client is trying to communicate with */
|
296
|
-
const char
|
297
|
-
grpc_client_channel_factory
|
298
|
-
grpc_channel_args
|
303
|
+
const char* server_name;
|
304
|
+
grpc_client_channel_factory* cc_factory;
|
305
|
+
grpc_channel_args* args;
|
299
306
|
|
300
307
|
/** timeout in milliseconds for the LB call. 0 means no deadline. */
|
301
308
|
int lb_call_timeout_ms;
|
@@ -305,13 +312,13 @@ typedef struct glb_lb_policy {
|
|
305
312
|
int lb_fallback_timeout_ms;
|
306
313
|
|
307
314
|
/** for communicating with the LB server */
|
308
|
-
grpc_channel
|
315
|
+
grpc_channel* lb_channel;
|
309
316
|
|
310
317
|
/** response generator to inject address updates into \a lb_channel */
|
311
|
-
grpc_fake_resolver_response_generator
|
318
|
+
grpc_fake_resolver_response_generator* response_generator;
|
312
319
|
|
313
320
|
/** the RR policy to use of the backend servers returned by the LB server */
|
314
|
-
grpc_lb_policy
|
321
|
+
grpc_lb_policy* rr_policy;
|
315
322
|
|
316
323
|
bool started_picking;
|
317
324
|
|
@@ -323,7 +330,7 @@ typedef struct glb_lb_policy {
|
|
323
330
|
|
324
331
|
/** stores the deserialized response from the LB. May be NULL until one such
|
325
332
|
* response has arrived. */
|
326
|
-
grpc_grpclb_serverlist
|
333
|
+
grpc_grpclb_serverlist* serverlist;
|
327
334
|
|
328
335
|
/** Index into serverlist for next pick.
|
329
336
|
* If the server at this index is a drop, we return a drop.
|
@@ -331,22 +338,19 @@ typedef struct glb_lb_policy {
|
|
331
338
|
size_t serverlist_index;
|
332
339
|
|
333
340
|
/** stores the backend addresses from the resolver */
|
334
|
-
grpc_lb_addresses
|
341
|
+
grpc_lb_addresses* fallback_backend_addresses;
|
335
342
|
|
336
343
|
/** list of picks that are waiting on RR's policy connectivity */
|
337
|
-
pending_pick
|
344
|
+
pending_pick* pending_picks;
|
338
345
|
|
339
346
|
/** list of pings that are waiting on RR's policy connectivity */
|
340
|
-
pending_ping
|
347
|
+
pending_ping* pending_pings;
|
341
348
|
|
342
349
|
bool shutting_down;
|
343
350
|
|
344
351
|
/** are we currently updating lb_call? */
|
345
352
|
bool updating_lb_call;
|
346
353
|
|
347
|
-
/** are we currently updating lb_channel? */
|
348
|
-
bool updating_lb_channel;
|
349
|
-
|
350
354
|
/** are we already watching the LB channel's connectivity? */
|
351
355
|
bool watching_lb_channel;
|
352
356
|
|
@@ -359,9 +363,6 @@ typedef struct glb_lb_policy {
|
|
359
363
|
/** called upon changes to the LB channel's connectivity. */
|
360
364
|
grpc_closure lb_channel_on_connectivity_changed;
|
361
365
|
|
362
|
-
/** args from the latest update received while already updating, or NULL */
|
363
|
-
grpc_lb_policy_args *pending_update_args;
|
364
|
-
|
365
366
|
/************************************************************/
|
366
367
|
/* client data associated with the LB server communication */
|
367
368
|
/************************************************************/
|
@@ -378,7 +379,7 @@ typedef struct glb_lb_policy {
|
|
378
379
|
/* LB fallback timer callback. */
|
379
380
|
grpc_closure lb_on_fallback;
|
380
381
|
|
381
|
-
grpc_call
|
382
|
+
grpc_call* lb_call; /* streaming call to the LB server, */
|
382
383
|
|
383
384
|
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
|
384
385
|
grpc_metadata_array
|
@@ -386,17 +387,17 @@ typedef struct glb_lb_policy {
|
|
386
387
|
|
387
388
|
/* what's being sent to the LB server. Note that its value may vary if the LB
|
388
389
|
* server indicates a redirect. */
|
389
|
-
grpc_byte_buffer
|
390
|
+
grpc_byte_buffer* lb_request_payload;
|
390
391
|
|
391
392
|
/* response the LB server, if any. Processed in lb_on_response_received() */
|
392
|
-
grpc_byte_buffer
|
393
|
+
grpc_byte_buffer* lb_response_payload;
|
393
394
|
|
394
395
|
/* call status code and details, set in lb_on_server_status_received() */
|
395
396
|
grpc_status_code lb_call_status;
|
396
397
|
grpc_slice lb_call_status_details;
|
397
398
|
|
398
399
|
/** LB call retry backoff state */
|
399
|
-
|
400
|
+
grpc_backoff lb_call_backoff_state;
|
400
401
|
|
401
402
|
/** LB call retry timer */
|
402
403
|
grpc_timer lb_call_retry_timer;
|
@@ -408,9 +409,9 @@ typedef struct glb_lb_policy {
|
|
408
409
|
|
409
410
|
/* Stats for client-side load reporting. Should be unreffed and
|
410
411
|
* recreated whenever lb_call is replaced. */
|
411
|
-
grpc_grpclb_client_stats
|
412
|
+
grpc_grpclb_client_stats* client_stats;
|
412
413
|
/* Interval and timer for next client load report. */
|
413
|
-
|
414
|
+
grpc_millis client_stats_report_interval;
|
414
415
|
grpc_timer client_load_report_timer;
|
415
416
|
bool client_load_report_timer_pending;
|
416
417
|
bool last_client_load_report_counters_were_zero;
|
@@ -418,20 +419,20 @@ typedef struct glb_lb_policy {
|
|
418
419
|
* completion of sending the load report. */
|
419
420
|
grpc_closure client_load_report_closure;
|
420
421
|
/* Client load report message payload. */
|
421
|
-
grpc_byte_buffer
|
422
|
+
grpc_byte_buffer* client_load_report_payload;
|
422
423
|
} glb_lb_policy;
|
423
424
|
|
424
425
|
/* Keeps track and reacts to changes in connectivity of the RR instance */
|
425
426
|
struct rr_connectivity_data {
|
426
427
|
grpc_closure on_change;
|
427
428
|
grpc_connectivity_state state;
|
428
|
-
glb_lb_policy
|
429
|
+
glb_lb_policy* glb_policy;
|
429
430
|
};
|
430
431
|
|
431
|
-
static bool is_server_valid(const grpc_grpclb_server
|
432
|
+
static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
|
432
433
|
bool log) {
|
433
434
|
if (server->drop) return false;
|
434
|
-
const grpc_grpclb_ip_address
|
435
|
+
const grpc_grpclb_ip_address* ip = &server->ip_address;
|
435
436
|
if (server->port >> 16 != 0) {
|
436
437
|
if (log) {
|
437
438
|
gpr_log(GPR_ERROR,
|
@@ -453,17 +454,17 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
|
|
453
454
|
}
|
454
455
|
|
455
456
|
/* vtable for LB tokens in grpc_lb_addresses. */
|
456
|
-
static void
|
457
|
-
return token ==
|
458
|
-
?
|
459
|
-
: (void
|
457
|
+
static void* lb_token_copy(void* token) {
|
458
|
+
return token == nullptr
|
459
|
+
? nullptr
|
460
|
+
: (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
|
460
461
|
}
|
461
|
-
static void lb_token_destroy(grpc_exec_ctx
|
462
|
-
if (token !=
|
463
|
-
GRPC_MDELEM_UNREF(exec_ctx,
|
462
|
+
static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
|
463
|
+
if (token != nullptr) {
|
464
|
+
GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
|
464
465
|
}
|
465
466
|
}
|
466
|
-
static int lb_token_cmp(void
|
467
|
+
static int lb_token_cmp(void* token1, void* token2) {
|
467
468
|
if (token1 > token2) return 1;
|
468
469
|
if (token1 < token2) return -1;
|
469
470
|
return 0;
|
@@ -471,23 +472,23 @@ static int lb_token_cmp(void *token1, void *token2) {
|
|
471
472
|
static const grpc_lb_user_data_vtable lb_token_vtable = {
|
472
473
|
lb_token_copy, lb_token_destroy, lb_token_cmp};
|
473
474
|
|
474
|
-
static void parse_server(const grpc_grpclb_server
|
475
|
-
grpc_resolved_address
|
475
|
+
static void parse_server(const grpc_grpclb_server* server,
|
476
|
+
grpc_resolved_address* addr) {
|
476
477
|
memset(addr, 0, sizeof(*addr));
|
477
478
|
if (server->drop) return;
|
478
479
|
const uint16_t netorder_port = htons((uint16_t)server->port);
|
479
480
|
/* the addresses are given in binary format (a in(6)_addr struct) in
|
480
481
|
* server->ip_address.bytes. */
|
481
|
-
const grpc_grpclb_ip_address
|
482
|
+
const grpc_grpclb_ip_address* ip = &server->ip_address;
|
482
483
|
if (ip->size == 4) {
|
483
484
|
addr->len = sizeof(struct sockaddr_in);
|
484
|
-
struct sockaddr_in
|
485
|
+
struct sockaddr_in* addr4 = (struct sockaddr_in*)&addr->addr;
|
485
486
|
addr4->sin_family = AF_INET;
|
486
487
|
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
|
487
488
|
addr4->sin_port = netorder_port;
|
488
489
|
} else if (ip->size == 16) {
|
489
490
|
addr->len = sizeof(struct sockaddr_in6);
|
490
|
-
struct sockaddr_in6
|
491
|
+
struct sockaddr_in6* addr6 = (struct sockaddr_in6*)&addr->addr;
|
491
492
|
addr6->sin6_family = AF_INET6;
|
492
493
|
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
|
493
494
|
addr6->sin6_port = netorder_port;
|
@@ -495,15 +496,15 @@ static void parse_server(const grpc_grpclb_server *server,
|
|
495
496
|
}
|
496
497
|
|
497
498
|
/* Returns addresses extracted from \a serverlist. */
|
498
|
-
static grpc_lb_addresses
|
499
|
-
grpc_exec_ctx
|
499
|
+
static grpc_lb_addresses* process_serverlist_locked(
|
500
|
+
grpc_exec_ctx* exec_ctx, const grpc_grpclb_serverlist* serverlist) {
|
500
501
|
size_t num_valid = 0;
|
501
502
|
/* first pass: count how many are valid in order to allocate the necessary
|
502
503
|
* memory in a single block */
|
503
504
|
for (size_t i = 0; i < serverlist->num_servers; ++i) {
|
504
505
|
if (is_server_valid(serverlist->servers[i], i, true)) ++num_valid;
|
505
506
|
}
|
506
|
-
grpc_lb_addresses
|
507
|
+
grpc_lb_addresses* lb_addresses =
|
507
508
|
grpc_lb_addresses_create(num_valid, &lb_token_vtable);
|
508
509
|
/* second pass: actually populate the addresses and LB tokens (aka user data
|
509
510
|
* to the outside world) to be read by the RR policy during its creation.
|
@@ -512,14 +513,14 @@ static grpc_lb_addresses *process_serverlist_locked(
|
|
512
513
|
* incurr in an allocation due to the arbitrary number of server */
|
513
514
|
size_t addr_idx = 0;
|
514
515
|
for (size_t sl_idx = 0; sl_idx < serverlist->num_servers; ++sl_idx) {
|
515
|
-
const grpc_grpclb_server
|
516
|
+
const grpc_grpclb_server* server = serverlist->servers[sl_idx];
|
516
517
|
if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
|
517
518
|
GPR_ASSERT(addr_idx < num_valid);
|
518
519
|
/* address processing */
|
519
520
|
grpc_resolved_address addr;
|
520
521
|
parse_server(server, &addr);
|
521
522
|
/* lb token processing */
|
522
|
-
void
|
523
|
+
void* user_data;
|
523
524
|
if (server->has_load_balance_token) {
|
524
525
|
const size_t lb_token_max_length =
|
525
526
|
GPR_ARRAY_SIZE(server->load_balance_token);
|
@@ -527,22 +528,22 @@ static grpc_lb_addresses *process_serverlist_locked(
|
|
527
528
|
strnlen(server->load_balance_token, lb_token_max_length);
|
528
529
|
grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
|
529
530
|
server->load_balance_token, lb_token_length);
|
530
|
-
user_data = (void
|
531
|
-
|
531
|
+
user_data = (void*)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
|
532
|
+
lb_token_mdstr)
|
532
533
|
.payload;
|
533
534
|
} else {
|
534
|
-
char
|
535
|
+
char* uri = grpc_sockaddr_to_uri(&addr);
|
535
536
|
gpr_log(GPR_INFO,
|
536
537
|
"Missing LB token for backend address '%s'. The empty token will "
|
537
538
|
"be used instead",
|
538
539
|
uri);
|
539
540
|
gpr_free(uri);
|
540
|
-
user_data = (void
|
541
|
+
user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
|
541
542
|
}
|
542
543
|
|
543
544
|
grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
|
544
545
|
false /* is_balancer */,
|
545
|
-
|
546
|
+
nullptr /* balancer_name */, user_data);
|
546
547
|
++addr_idx;
|
547
548
|
}
|
548
549
|
GPR_ASSERT(addr_idx == num_valid);
|
@@ -550,8 +551,8 @@ static grpc_lb_addresses *process_serverlist_locked(
|
|
550
551
|
}
|
551
552
|
|
552
553
|
/* Returns the backend addresses extracted from the given addresses */
|
553
|
-
static grpc_lb_addresses
|
554
|
-
grpc_exec_ctx
|
554
|
+
static grpc_lb_addresses* extract_backend_addresses_locked(
|
555
|
+
grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses) {
|
555
556
|
/* first pass: count the number of backend addresses */
|
556
557
|
size_t num_backends = 0;
|
557
558
|
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
@@ -560,24 +561,24 @@ static grpc_lb_addresses *extract_backend_addresses_locked(
|
|
560
561
|
}
|
561
562
|
}
|
562
563
|
/* second pass: actually populate the addresses and (empty) LB tokens */
|
563
|
-
grpc_lb_addresses
|
564
|
+
grpc_lb_addresses* backend_addresses =
|
564
565
|
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
|
565
566
|
size_t num_copied = 0;
|
566
567
|
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
567
568
|
if (addresses->addresses[i].is_balancer) continue;
|
568
|
-
const grpc_resolved_address
|
569
|
+
const grpc_resolved_address* addr = &addresses->addresses[i].address;
|
569
570
|
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
|
570
571
|
addr->len, false /* is_balancer */,
|
571
|
-
|
572
|
-
(void
|
572
|
+
nullptr /* balancer_name */,
|
573
|
+
(void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
|
573
574
|
++num_copied;
|
574
575
|
}
|
575
576
|
return backend_addresses;
|
576
577
|
}
|
577
578
|
|
578
579
|
static void update_lb_connectivity_status_locked(
|
579
|
-
grpc_exec_ctx
|
580
|
-
grpc_connectivity_state rr_state, grpc_error
|
580
|
+
grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
|
581
|
+
grpc_connectivity_state rr_state, grpc_error* rr_state_error) {
|
581
582
|
const grpc_connectivity_state curr_glb_state =
|
582
583
|
grpc_connectivity_state_check(&glb_policy->state_tracker);
|
583
584
|
|
@@ -616,17 +617,18 @@ static void update_lb_connectivity_status_locked(
|
|
616
617
|
case GRPC_CHANNEL_SHUTDOWN:
|
617
618
|
GPR_ASSERT(rr_state_error != GRPC_ERROR_NONE);
|
618
619
|
break;
|
619
|
-
case GRPC_CHANNEL_INIT:
|
620
620
|
case GRPC_CHANNEL_IDLE:
|
621
621
|
case GRPC_CHANNEL_CONNECTING:
|
622
622
|
case GRPC_CHANNEL_READY:
|
623
623
|
GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
|
624
624
|
}
|
625
625
|
|
626
|
-
if (
|
626
|
+
if (grpc_lb_glb_trace.enabled()) {
|
627
627
|
gpr_log(
|
628
|
-
GPR_INFO,
|
629
|
-
|
628
|
+
GPR_INFO,
|
629
|
+
"[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.",
|
630
|
+
glb_policy, grpc_connectivity_state_name(rr_state),
|
631
|
+
glb_policy->rr_policy);
|
630
632
|
}
|
631
633
|
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
|
632
634
|
rr_state_error,
|
@@ -639,22 +641,22 @@ static void update_lb_connectivity_status_locked(
|
|
639
641
|
* If \a force_async is true, then we will manually schedule the
|
640
642
|
* completion callback even if the pick is available immediately. */
|
641
643
|
static bool pick_from_internal_rr_locked(
|
642
|
-
grpc_exec_ctx
|
643
|
-
const grpc_lb_policy_pick_args
|
644
|
-
grpc_connected_subchannel
|
644
|
+
grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
|
645
|
+
const grpc_lb_policy_pick_args* pick_args, bool force_async,
|
646
|
+
grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
|
645
647
|
// Check for drops if we are not using fallback backend addresses.
|
646
|
-
if (glb_policy->serverlist !=
|
648
|
+
if (glb_policy->serverlist != nullptr) {
|
647
649
|
// Look at the index into the serverlist to see if we should drop this call.
|
648
|
-
grpc_grpclb_server
|
650
|
+
grpc_grpclb_server* server =
|
649
651
|
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
|
650
652
|
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
|
651
653
|
glb_policy->serverlist_index = 0; // Wrap-around.
|
652
654
|
}
|
653
655
|
if (server->drop) {
|
654
656
|
// Not using the RR policy, so unref it.
|
655
|
-
if (
|
656
|
-
gpr_log(GPR_INFO, "Unreffing RR for drop
|
657
|
-
|
657
|
+
if (grpc_lb_glb_trace.enabled()) {
|
658
|
+
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
|
659
|
+
wc_arg->rr_policy);
|
658
660
|
}
|
659
661
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
|
660
662
|
// Update client load reporting stats to indicate the number of
|
@@ -662,11 +664,12 @@ static bool pick_from_internal_rr_locked(
|
|
662
664
|
// the client_load_reporting filter, because we do not create a
|
663
665
|
// subchannel call (and therefore no client_load_reporting filter)
|
664
666
|
// for dropped calls.
|
667
|
+
GPR_ASSERT(wc_arg->client_stats != nullptr);
|
665
668
|
grpc_grpclb_client_stats_add_call_dropped_locked(
|
666
669
|
server->load_balance_token, wc_arg->client_stats);
|
667
670
|
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
|
668
671
|
if (force_async) {
|
669
|
-
GPR_ASSERT(wc_arg->wrapped_closure !=
|
672
|
+
GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
|
670
673
|
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
|
671
674
|
gpr_free(wc_arg->free_when_done);
|
672
675
|
return false;
|
@@ -678,12 +681,12 @@ static bool pick_from_internal_rr_locked(
|
|
678
681
|
// Pick via the RR policy.
|
679
682
|
const bool pick_done = grpc_lb_policy_pick_locked(
|
680
683
|
exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
|
681
|
-
(void
|
684
|
+
(void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
|
682
685
|
if (pick_done) {
|
683
686
|
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
|
684
|
-
if (
|
685
|
-
gpr_log(GPR_INFO, "Unreffing RR
|
686
|
-
|
687
|
+
if (grpc_lb_glb_trace.enabled()) {
|
688
|
+
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
|
689
|
+
wc_arg->rr_policy);
|
687
690
|
}
|
688
691
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
|
689
692
|
/* add the load reporting initial metadata */
|
@@ -691,11 +694,11 @@ static bool pick_from_internal_rr_locked(
|
|
691
694
|
pick_args->lb_token_mdelem_storage,
|
692
695
|
GRPC_MDELEM_REF(wc_arg->lb_token));
|
693
696
|
// Pass on client stats via context. Passes ownership of the reference.
|
694
|
-
GPR_ASSERT(wc_arg->client_stats !=
|
697
|
+
GPR_ASSERT(wc_arg->client_stats != nullptr);
|
695
698
|
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
|
696
699
|
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
|
697
700
|
if (force_async) {
|
698
|
-
GPR_ASSERT(wc_arg->wrapped_closure !=
|
701
|
+
GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
|
699
702
|
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
|
700
703
|
gpr_free(wc_arg->free_when_done);
|
701
704
|
return false;
|
@@ -709,10 +712,10 @@ static bool pick_from_internal_rr_locked(
|
|
709
712
|
return pick_done;
|
710
713
|
}
|
711
714
|
|
712
|
-
static grpc_lb_policy_args
|
713
|
-
glb_lb_policy
|
714
|
-
grpc_lb_addresses
|
715
|
-
if (glb_policy->serverlist !=
|
715
|
+
static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
|
716
|
+
glb_lb_policy* glb_policy) {
|
717
|
+
grpc_lb_addresses* addresses;
|
718
|
+
if (glb_policy->serverlist != nullptr) {
|
716
719
|
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
|
717
720
|
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
|
718
721
|
} else {
|
@@ -720,16 +723,16 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
|
|
720
723
|
// serverlist from the balancer, we use the fallback backends returned by
|
721
724
|
// the resolver. Note that the fallback backend list may be empty, in which
|
722
725
|
// case the new round_robin policy will keep the requested picks pending.
|
723
|
-
GPR_ASSERT(glb_policy->fallback_backend_addresses !=
|
726
|
+
GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
|
724
727
|
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
|
725
728
|
}
|
726
|
-
GPR_ASSERT(addresses !=
|
727
|
-
grpc_lb_policy_args
|
729
|
+
GPR_ASSERT(addresses != nullptr);
|
730
|
+
grpc_lb_policy_args* args = (grpc_lb_policy_args*)gpr_zalloc(sizeof(*args));
|
728
731
|
args->client_channel_factory = glb_policy->cc_factory;
|
729
732
|
args->combiner = glb_policy->base.combiner;
|
730
733
|
// Replace the LB addresses in the channel args that we pass down to
|
731
734
|
// the subchannel.
|
732
|
-
static const char
|
735
|
+
static const char* keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
|
733
736
|
const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
|
734
737
|
args->args = grpc_channel_args_copy_and_add_and_remove(
|
735
738
|
glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
|
@@ -738,32 +741,33 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
|
|
738
741
|
return args;
|
739
742
|
}
|
740
743
|
|
741
|
-
static void lb_policy_args_destroy(grpc_exec_ctx
|
742
|
-
grpc_lb_policy_args
|
744
|
+
static void lb_policy_args_destroy(grpc_exec_ctx* exec_ctx,
|
745
|
+
grpc_lb_policy_args* args) {
|
743
746
|
grpc_channel_args_destroy(exec_ctx, args->args);
|
744
747
|
gpr_free(args);
|
745
748
|
}
|
746
749
|
|
747
|
-
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx
|
748
|
-
void
|
749
|
-
static void create_rr_locked(grpc_exec_ctx
|
750
|
-
grpc_lb_policy_args
|
751
|
-
GPR_ASSERT(glb_policy->rr_policy ==
|
750
|
+
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
|
751
|
+
void* arg, grpc_error* error);
|
752
|
+
static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
|
753
|
+
grpc_lb_policy_args* args) {
|
754
|
+
GPR_ASSERT(glb_policy->rr_policy == nullptr);
|
752
755
|
|
753
|
-
grpc_lb_policy
|
756
|
+
grpc_lb_policy* new_rr_policy =
|
754
757
|
grpc_lb_policy_create(exec_ctx, "round_robin", args);
|
755
|
-
if (new_rr_policy ==
|
758
|
+
if (new_rr_policy == nullptr) {
|
756
759
|
gpr_log(GPR_ERROR,
|
757
|
-
"Failure creating a RoundRobin policy for serverlist
|
758
|
-
"
|
759
|
-
"
|
760
|
+
"[grpclb %p] Failure creating a RoundRobin policy for serverlist "
|
761
|
+
"update with %" PRIuPTR
|
762
|
+
" entries. The previous RR instance (%p), if any, will continue to "
|
763
|
+
"be used. Future updates from the LB will attempt to create new "
|
760
764
|
"instances.",
|
761
|
-
|
762
|
-
|
765
|
+
glb_policy, glb_policy->serverlist->num_servers,
|
766
|
+
glb_policy->rr_policy);
|
763
767
|
return;
|
764
768
|
}
|
765
769
|
glb_policy->rr_policy = new_rr_policy;
|
766
|
-
grpc_error
|
770
|
+
grpc_error* rr_state_error = nullptr;
|
767
771
|
const grpc_connectivity_state rr_state =
|
768
772
|
grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
|
769
773
|
&rr_state_error);
|
@@ -779,8 +783,8 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
|
|
779
783
|
|
780
784
|
/* Allocate the data for the tracking of the new RR policy's connectivity.
|
781
785
|
* It'll be deallocated in glb_rr_connectivity_changed() */
|
782
|
-
rr_connectivity_data
|
783
|
-
(rr_connectivity_data
|
786
|
+
rr_connectivity_data* rr_connectivity =
|
787
|
+
(rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
|
784
788
|
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
|
785
789
|
glb_rr_connectivity_changed_locked, rr_connectivity,
|
786
790
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
@@ -795,30 +799,31 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
|
|
795
799
|
grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
|
796
800
|
|
797
801
|
/* Update picks and pings in wait */
|
798
|
-
pending_pick
|
802
|
+
pending_pick* pp;
|
799
803
|
while ((pp = glb_policy->pending_picks)) {
|
800
804
|
glb_policy->pending_picks = pp->next;
|
801
805
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
|
802
806
|
pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
|
803
807
|
pp->wrapped_on_complete_arg.client_stats =
|
804
808
|
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
805
|
-
if (
|
806
|
-
gpr_log(GPR_INFO,
|
807
|
-
(
|
809
|
+
if (grpc_lb_glb_trace.enabled()) {
|
810
|
+
gpr_log(GPR_INFO,
|
811
|
+
"[grpclb %p] Pending pick about to (async) PICK from RR %p",
|
812
|
+
glb_policy, glb_policy->rr_policy);
|
808
813
|
}
|
809
814
|
pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
|
810
815
|
true /* force_async */, pp->target,
|
811
816
|
&pp->wrapped_on_complete_arg);
|
812
817
|
}
|
813
818
|
|
814
|
-
pending_ping
|
819
|
+
pending_ping* pping;
|
815
820
|
while ((pping = glb_policy->pending_pings)) {
|
816
821
|
glb_policy->pending_pings = pping->next;
|
817
822
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
|
818
823
|
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
|
819
|
-
if (
|
820
|
-
gpr_log(GPR_INFO, "Pending ping about to PING from
|
821
|
-
|
824
|
+
if (grpc_lb_glb_trace.enabled()) {
|
825
|
+
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
|
826
|
+
glb_policy, glb_policy->rr_policy);
|
822
827
|
}
|
823
828
|
grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
|
824
829
|
&pping->wrapped_notify_arg.wrapper_closure);
|
@@ -826,31 +831,31 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
|
|
826
831
|
}
|
827
832
|
|
828
833
|
/* glb_policy->rr_policy may be NULL (initial handover) */
|
829
|
-
static void rr_handover_locked(grpc_exec_ctx
|
830
|
-
glb_lb_policy
|
834
|
+
static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
|
835
|
+
glb_lb_policy* glb_policy) {
|
831
836
|
if (glb_policy->shutting_down) return;
|
832
|
-
grpc_lb_policy_args
|
833
|
-
GPR_ASSERT(args !=
|
834
|
-
if (glb_policy->rr_policy !=
|
835
|
-
if (
|
836
|
-
gpr_log(GPR_DEBUG, "Updating
|
837
|
-
|
837
|
+
grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
|
838
|
+
GPR_ASSERT(args != nullptr);
|
839
|
+
if (glb_policy->rr_policy != nullptr) {
|
840
|
+
if (grpc_lb_glb_trace.enabled()) {
|
841
|
+
gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
|
842
|
+
glb_policy->rr_policy);
|
838
843
|
}
|
839
844
|
grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
|
840
845
|
} else {
|
841
846
|
create_rr_locked(exec_ctx, glb_policy, args);
|
842
|
-
if (
|
843
|
-
gpr_log(GPR_DEBUG, "Created new
|
844
|
-
|
847
|
+
if (grpc_lb_glb_trace.enabled()) {
|
848
|
+
gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
|
849
|
+
glb_policy->rr_policy);
|
845
850
|
}
|
846
851
|
}
|
847
852
|
lb_policy_args_destroy(exec_ctx, args);
|
848
853
|
}
|
849
854
|
|
850
|
-
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx
|
851
|
-
void
|
852
|
-
rr_connectivity_data
|
853
|
-
glb_lb_policy
|
855
|
+
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
|
856
|
+
void* arg, grpc_error* error) {
|
857
|
+
rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
|
858
|
+
glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
|
854
859
|
if (glb_policy->shutting_down) {
|
855
860
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
856
861
|
"glb_rr_connectivity_cb");
|
@@ -863,7 +868,7 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
863
868
|
* sink, policies can't transition back from it. .*/
|
864
869
|
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
|
865
870
|
"rr_connectivity_shutdown");
|
866
|
-
glb_policy->rr_policy =
|
871
|
+
glb_policy->rr_policy = nullptr;
|
867
872
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
868
873
|
"glb_rr_connectivity_cb");
|
869
874
|
gpr_free(rr_connectivity);
|
@@ -878,22 +883,22 @@ static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
|
|
878
883
|
&rr_connectivity->on_change);
|
879
884
|
}
|
880
885
|
|
881
|
-
static void destroy_balancer_name(grpc_exec_ctx
|
882
|
-
void
|
886
|
+
static void destroy_balancer_name(grpc_exec_ctx* exec_ctx,
|
887
|
+
void* balancer_name) {
|
883
888
|
gpr_free(balancer_name);
|
884
889
|
}
|
885
890
|
|
886
891
|
static grpc_slice_hash_table_entry targets_info_entry_create(
|
887
|
-
const char
|
892
|
+
const char* address, const char* balancer_name) {
|
888
893
|
grpc_slice_hash_table_entry entry;
|
889
894
|
entry.key = grpc_slice_from_copied_string(address);
|
890
895
|
entry.value = gpr_strdup(balancer_name);
|
891
896
|
return entry;
|
892
897
|
}
|
893
898
|
|
894
|
-
static int balancer_name_cmp_fn(void
|
895
|
-
const char
|
896
|
-
const char
|
899
|
+
static int balancer_name_cmp_fn(void* a, void* b) {
|
900
|
+
const char* a_str = (const char*)a;
|
901
|
+
const char* b_str = (const char*)b;
|
897
902
|
return strcmp(a_str, b_str);
|
898
903
|
}
|
899
904
|
|
@@ -905,10 +910,10 @@ static int balancer_name_cmp_fn(void *a, void *b) {
|
|
905
910
|
* - \a response_generator: in order to propagate updates from the resolver
|
906
911
|
* above the grpclb policy.
|
907
912
|
* - \a args: other args inherited from the grpclb policy. */
|
908
|
-
static grpc_channel_args
|
909
|
-
grpc_exec_ctx
|
910
|
-
grpc_fake_resolver_response_generator
|
911
|
-
const grpc_channel_args
|
913
|
+
static grpc_channel_args* build_lb_channel_args(
|
914
|
+
grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses,
|
915
|
+
grpc_fake_resolver_response_generator* response_generator,
|
916
|
+
const grpc_channel_args* args) {
|
912
917
|
size_t num_grpclb_addrs = 0;
|
913
918
|
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
914
919
|
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
|
@@ -917,20 +922,20 @@ static grpc_channel_args *build_lb_channel_args(
|
|
917
922
|
* It's the resolver's responsibility to make sure this policy is only
|
918
923
|
* instantiated and used in that case. Otherwise, something has gone wrong. */
|
919
924
|
GPR_ASSERT(num_grpclb_addrs > 0);
|
920
|
-
grpc_lb_addresses
|
921
|
-
grpc_lb_addresses_create(num_grpclb_addrs,
|
922
|
-
grpc_slice_hash_table_entry
|
923
|
-
(grpc_slice_hash_table_entry
|
924
|
-
|
925
|
+
grpc_lb_addresses* lb_addresses =
|
926
|
+
grpc_lb_addresses_create(num_grpclb_addrs, nullptr);
|
927
|
+
grpc_slice_hash_table_entry* targets_info_entries =
|
928
|
+
(grpc_slice_hash_table_entry*)gpr_zalloc(sizeof(*targets_info_entries) *
|
929
|
+
num_grpclb_addrs);
|
925
930
|
|
926
931
|
size_t lb_addresses_idx = 0;
|
927
932
|
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
928
933
|
if (!addresses->addresses[i].is_balancer) continue;
|
929
|
-
if (addresses->addresses[i].user_data !=
|
934
|
+
if (addresses->addresses[i].user_data != nullptr) {
|
930
935
|
gpr_log(GPR_ERROR,
|
931
936
|
"This LB policy doesn't support user data. It will be ignored");
|
932
937
|
}
|
933
|
-
char
|
938
|
+
char* addr_str;
|
934
939
|
GPR_ASSERT(grpc_sockaddr_to_string(
|
935
940
|
&addr_str, &addresses->addresses[i].address, true) > 0);
|
936
941
|
targets_info_entries[lb_addresses_idx] = targets_info_entry_create(
|
@@ -940,22 +945,22 @@ static grpc_channel_args *build_lb_channel_args(
|
|
940
945
|
grpc_lb_addresses_set_address(
|
941
946
|
lb_addresses, lb_addresses_idx++, addresses->addresses[i].address.addr,
|
942
947
|
addresses->addresses[i].address.len, false /* is balancer */,
|
943
|
-
addresses->addresses[i].balancer_name,
|
948
|
+
addresses->addresses[i].balancer_name, nullptr /* user data */);
|
944
949
|
}
|
945
950
|
GPR_ASSERT(num_grpclb_addrs == lb_addresses_idx);
|
946
|
-
grpc_slice_hash_table
|
951
|
+
grpc_slice_hash_table* targets_info =
|
947
952
|
grpc_slice_hash_table_create(num_grpclb_addrs, targets_info_entries,
|
948
953
|
destroy_balancer_name, balancer_name_cmp_fn);
|
949
954
|
gpr_free(targets_info_entries);
|
950
955
|
|
951
|
-
grpc_channel_args
|
956
|
+
grpc_channel_args* lb_channel_args =
|
952
957
|
grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
|
953
958
|
response_generator, args);
|
954
959
|
|
955
960
|
grpc_arg lb_channel_addresses_arg =
|
956
961
|
grpc_lb_addresses_create_channel_arg(lb_addresses);
|
957
962
|
|
958
|
-
grpc_channel_args
|
963
|
+
grpc_channel_args* result = grpc_channel_args_copy_and_add(
|
959
964
|
lb_channel_args, &lb_channel_addresses_arg, 1);
|
960
965
|
grpc_slice_hash_table_unref(exec_ctx, targets_info);
|
961
966
|
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
|
@@ -963,84 +968,88 @@ static grpc_channel_args *build_lb_channel_args(
|
|
963
968
|
return result;
|
964
969
|
}
|
965
970
|
|
966
|
-
static void glb_destroy(grpc_exec_ctx
|
967
|
-
glb_lb_policy
|
968
|
-
GPR_ASSERT(glb_policy->pending_picks ==
|
969
|
-
GPR_ASSERT(glb_policy->pending_pings ==
|
970
|
-
gpr_free((void
|
971
|
+
static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
|
972
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
973
|
+
GPR_ASSERT(glb_policy->pending_picks == nullptr);
|
974
|
+
GPR_ASSERT(glb_policy->pending_pings == nullptr);
|
975
|
+
gpr_free((void*)glb_policy->server_name);
|
971
976
|
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
|
972
|
-
if (glb_policy->client_stats !=
|
977
|
+
if (glb_policy->client_stats != nullptr) {
|
973
978
|
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
|
974
979
|
}
|
975
980
|
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
|
976
|
-
if (glb_policy->serverlist !=
|
981
|
+
if (glb_policy->serverlist != nullptr) {
|
977
982
|
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
978
983
|
}
|
979
|
-
if (glb_policy->fallback_backend_addresses !=
|
984
|
+
if (glb_policy->fallback_backend_addresses != nullptr) {
|
980
985
|
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
|
981
986
|
}
|
982
987
|
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
|
983
988
|
grpc_subchannel_index_unref();
|
984
|
-
if (glb_policy->pending_update_args != NULL) {
|
985
|
-
grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
|
986
|
-
gpr_free(glb_policy->pending_update_args);
|
987
|
-
}
|
988
989
|
gpr_free(glb_policy);
|
989
990
|
}
|
990
991
|
|
991
|
-
static void glb_shutdown_locked(grpc_exec_ctx
|
992
|
-
glb_lb_policy
|
992
|
+
static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
|
993
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
993
994
|
glb_policy->shutting_down = true;
|
994
995
|
|
995
996
|
/* We need a copy of the lb_call pointer because we can't cancell the call
|
996
997
|
* while holding glb_policy->mu: lb_on_server_status_received, invoked due to
|
997
998
|
* the cancel, needs to acquire that same lock */
|
998
|
-
grpc_call
|
999
|
+
grpc_call* lb_call = glb_policy->lb_call;
|
999
1000
|
|
1000
1001
|
/* glb_policy->lb_call and this local lb_call must be consistent at this point
|
1001
1002
|
* because glb_policy->lb_call is only assigned in lb_call_init_locked as part
|
1002
1003
|
* of query_for_backends_locked, which can only be invoked while
|
1003
1004
|
* glb_policy->shutting_down is false. */
|
1004
|
-
if (lb_call !=
|
1005
|
-
grpc_call_cancel(lb_call,
|
1005
|
+
if (lb_call != nullptr) {
|
1006
|
+
grpc_call_cancel(lb_call, nullptr);
|
1006
1007
|
/* lb_on_server_status_received will pick up the cancel and clean up */
|
1007
1008
|
}
|
1008
1009
|
if (glb_policy->retry_timer_active) {
|
1009
1010
|
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
|
1010
1011
|
glb_policy->retry_timer_active = false;
|
1011
1012
|
}
|
1013
|
+
if (glb_policy->fallback_timer_active) {
|
1014
|
+
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
|
1015
|
+
glb_policy->fallback_timer_active = false;
|
1016
|
+
}
|
1012
1017
|
|
1013
|
-
pending_pick
|
1014
|
-
glb_policy->pending_picks =
|
1015
|
-
pending_ping
|
1016
|
-
glb_policy->pending_pings =
|
1017
|
-
if (glb_policy->rr_policy !=
|
1018
|
+
pending_pick* pp = glb_policy->pending_picks;
|
1019
|
+
glb_policy->pending_picks = nullptr;
|
1020
|
+
pending_ping* pping = glb_policy->pending_pings;
|
1021
|
+
glb_policy->pending_pings = nullptr;
|
1022
|
+
if (glb_policy->rr_policy != nullptr) {
|
1018
1023
|
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
|
1019
1024
|
}
|
1020
1025
|
// We destroy the LB channel here because
|
1021
1026
|
// glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
|
1022
1027
|
// instance. Destroying the lb channel in glb_destroy would likely result in
|
1023
1028
|
// a callback invocation without a valid glb_policy arg.
|
1024
|
-
if (glb_policy->lb_channel !=
|
1029
|
+
if (glb_policy->lb_channel != nullptr) {
|
1025
1030
|
grpc_channel_destroy(glb_policy->lb_channel);
|
1026
|
-
glb_policy->lb_channel =
|
1031
|
+
glb_policy->lb_channel = nullptr;
|
1027
1032
|
}
|
1028
1033
|
grpc_connectivity_state_set(
|
1029
1034
|
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
|
1030
1035
|
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
|
1031
1036
|
|
1032
|
-
while (pp !=
|
1033
|
-
pending_pick
|
1034
|
-
*pp->target =
|
1035
|
-
GRPC_CLOSURE_SCHED(
|
1036
|
-
|
1037
|
+
while (pp != nullptr) {
|
1038
|
+
pending_pick* next = pp->next;
|
1039
|
+
*pp->target = nullptr;
|
1040
|
+
GRPC_CLOSURE_SCHED(
|
1041
|
+
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
|
1042
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
|
1043
|
+
gpr_free(pp);
|
1037
1044
|
pp = next;
|
1038
1045
|
}
|
1039
1046
|
|
1040
|
-
while (pping !=
|
1041
|
-
pending_ping
|
1042
|
-
GRPC_CLOSURE_SCHED(
|
1043
|
-
|
1047
|
+
while (pping != nullptr) {
|
1048
|
+
pending_ping* next = pping->next;
|
1049
|
+
GRPC_CLOSURE_SCHED(
|
1050
|
+
exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
|
1051
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
|
1052
|
+
gpr_free(pping);
|
1044
1053
|
pping = next;
|
1045
1054
|
}
|
1046
1055
|
}
|
@@ -1055,16 +1064,16 @@ static void glb_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
|
|
1055
1064
|
// - Otherwise, without an RR instance, picks stay pending at this policy's
|
1056
1065
|
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
|
1057
1066
|
// we invoke the completion closure and set *target to NULL right here.
|
1058
|
-
static void glb_cancel_pick_locked(grpc_exec_ctx
|
1059
|
-
grpc_connected_subchannel
|
1060
|
-
grpc_error
|
1061
|
-
glb_lb_policy
|
1062
|
-
pending_pick
|
1063
|
-
glb_policy->pending_picks =
|
1064
|
-
while (pp !=
|
1065
|
-
pending_pick
|
1067
|
+
static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
1068
|
+
grpc_connected_subchannel** target,
|
1069
|
+
grpc_error* error) {
|
1070
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1071
|
+
pending_pick* pp = glb_policy->pending_picks;
|
1072
|
+
glb_policy->pending_picks = nullptr;
|
1073
|
+
while (pp != nullptr) {
|
1074
|
+
pending_pick* next = pp->next;
|
1066
1075
|
if (pp->target == target) {
|
1067
|
-
*target =
|
1076
|
+
*target = nullptr;
|
1068
1077
|
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
|
1069
1078
|
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
1070
1079
|
"Pick Cancelled", &error, 1));
|
@@ -1074,7 +1083,7 @@ static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1074
1083
|
}
|
1075
1084
|
pp = next;
|
1076
1085
|
}
|
1077
|
-
if (glb_policy->rr_policy !=
|
1086
|
+
if (glb_policy->rr_policy != nullptr) {
|
1078
1087
|
grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
|
1079
1088
|
GRPC_ERROR_REF(error));
|
1080
1089
|
}
|
@@ -1091,16 +1100,16 @@ static void glb_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1091
1100
|
// - Otherwise, without an RR instance, picks stay pending at this policy's
|
1092
1101
|
// level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
|
1093
1102
|
// we invoke the completion closure and set *target to NULL right here.
|
1094
|
-
static void glb_cancel_picks_locked(grpc_exec_ctx
|
1095
|
-
grpc_lb_policy
|
1103
|
+
static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
|
1104
|
+
grpc_lb_policy* pol,
|
1096
1105
|
uint32_t initial_metadata_flags_mask,
|
1097
1106
|
uint32_t initial_metadata_flags_eq,
|
1098
|
-
grpc_error
|
1099
|
-
glb_lb_policy
|
1100
|
-
pending_pick
|
1101
|
-
glb_policy->pending_picks =
|
1102
|
-
while (pp !=
|
1103
|
-
pending_pick
|
1107
|
+
grpc_error* error) {
|
1108
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1109
|
+
pending_pick* pp = glb_policy->pending_picks;
|
1110
|
+
glb_policy->pending_picks = nullptr;
|
1111
|
+
while (pp != nullptr) {
|
1112
|
+
pending_pick* next = pp->next;
|
1104
1113
|
if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
|
1105
1114
|
initial_metadata_flags_eq) {
|
1106
1115
|
GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
|
@@ -1112,7 +1121,7 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
|
|
1112
1121
|
}
|
1113
1122
|
pp = next;
|
1114
1123
|
}
|
1115
|
-
if (glb_policy->rr_policy !=
|
1124
|
+
if (glb_policy->rr_policy != nullptr) {
|
1116
1125
|
grpc_lb_policy_cancel_picks_locked(
|
1117
1126
|
exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
|
1118
1127
|
initial_metadata_flags_eq, GRPC_ERROR_REF(error));
|
@@ -1120,92 +1129,105 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
|
|
1120
1129
|
GRPC_ERROR_UNREF(error);
|
1121
1130
|
}
|
1122
1131
|
|
1123
|
-
static void lb_on_fallback_timer_locked(grpc_exec_ctx
|
1124
|
-
grpc_error
|
1125
|
-
static void query_for_backends_locked(grpc_exec_ctx
|
1126
|
-
glb_lb_policy
|
1127
|
-
static void start_picking_locked(grpc_exec_ctx
|
1128
|
-
glb_lb_policy
|
1132
|
+
static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
1133
|
+
grpc_error* error);
|
1134
|
+
static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
|
1135
|
+
glb_lb_policy* glb_policy);
|
1136
|
+
static void start_picking_locked(grpc_exec_ctx* exec_ctx,
|
1137
|
+
glb_lb_policy* glb_policy) {
|
1129
1138
|
/* start a timer to fall back */
|
1130
1139
|
if (glb_policy->lb_fallback_timeout_ms > 0 &&
|
1131
|
-
glb_policy->serverlist ==
|
1132
|
-
|
1133
|
-
|
1134
|
-
now,
|
1135
|
-
gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
|
1140
|
+
glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
|
1141
|
+
grpc_millis deadline =
|
1142
|
+
grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
|
1136
1143
|
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
|
1137
1144
|
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
|
1138
1145
|
glb_policy,
|
1139
1146
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1140
1147
|
glb_policy->fallback_timer_active = true;
|
1141
1148
|
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
|
1142
|
-
&glb_policy->lb_on_fallback
|
1149
|
+
&glb_policy->lb_on_fallback);
|
1143
1150
|
}
|
1144
1151
|
|
1145
1152
|
glb_policy->started_picking = true;
|
1146
|
-
|
1153
|
+
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
|
1147
1154
|
query_for_backends_locked(exec_ctx, glb_policy);
|
1148
1155
|
}
|
1149
1156
|
|
1150
|
-
static void glb_exit_idle_locked(grpc_exec_ctx
|
1151
|
-
glb_lb_policy
|
1157
|
+
static void glb_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
|
1158
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1152
1159
|
if (!glb_policy->started_picking) {
|
1153
1160
|
start_picking_locked(exec_ctx, glb_policy);
|
1154
1161
|
}
|
1155
1162
|
}
|
1156
1163
|
|
1157
|
-
static int glb_pick_locked(grpc_exec_ctx
|
1158
|
-
const grpc_lb_policy_pick_args
|
1159
|
-
grpc_connected_subchannel
|
1160
|
-
grpc_call_context_element
|
1161
|
-
grpc_closure
|
1162
|
-
if (pick_args->lb_token_mdelem_storage ==
|
1163
|
-
*target =
|
1164
|
+
static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
1165
|
+
const grpc_lb_policy_pick_args* pick_args,
|
1166
|
+
grpc_connected_subchannel** target,
|
1167
|
+
grpc_call_context_element* context, void** user_data,
|
1168
|
+
grpc_closure* on_complete) {
|
1169
|
+
if (pick_args->lb_token_mdelem_storage == nullptr) {
|
1170
|
+
*target = nullptr;
|
1164
1171
|
GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
|
1165
1172
|
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
|
1166
1173
|
"No mdelem storage for the LB token. Load reporting "
|
1167
1174
|
"won't work without it. Failing"));
|
1168
1175
|
return 0;
|
1169
1176
|
}
|
1170
|
-
|
1171
|
-
|
1172
|
-
|
1173
|
-
|
1174
|
-
|
1175
|
-
|
1176
|
-
|
1177
|
-
|
1177
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1178
|
+
bool pick_done = false;
|
1179
|
+
if (glb_policy->rr_policy != nullptr) {
|
1180
|
+
const grpc_connectivity_state rr_connectivity_state =
|
1181
|
+
grpc_lb_policy_check_connectivity_locked(
|
1182
|
+
exec_ctx, glb_policy->rr_policy, nullptr);
|
1183
|
+
// The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
|
1184
|
+
// callback registered to capture this event
|
1185
|
+
// (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
|
1186
|
+
// need to make sure we aren't trying to pick from a RR policy instance
|
1187
|
+
// that's in shutdown.
|
1188
|
+
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
|
1189
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1190
|
+
gpr_log(GPR_INFO,
|
1191
|
+
"[grpclb %p] NOT picking from from RR %p: RR conn state=%s",
|
1192
|
+
glb_policy, glb_policy->rr_policy,
|
1193
|
+
grpc_connectivity_state_name(rr_connectivity_state));
|
1194
|
+
}
|
1195
|
+
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
|
1196
|
+
on_complete);
|
1197
|
+
pick_done = false;
|
1198
|
+
} else { // RR not in shutdown
|
1199
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1200
|
+
gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
|
1201
|
+
glb_policy->rr_policy);
|
1202
|
+
}
|
1203
|
+
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
|
1204
|
+
wrapped_rr_closure_arg* wc_arg =
|
1205
|
+
(wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
|
1206
|
+
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
|
1207
|
+
grpc_schedule_on_exec_ctx);
|
1208
|
+
wc_arg->rr_policy = glb_policy->rr_policy;
|
1209
|
+
wc_arg->target = target;
|
1210
|
+
wc_arg->context = context;
|
1211
|
+
GPR_ASSERT(glb_policy->client_stats != nullptr);
|
1212
|
+
wc_arg->client_stats =
|
1213
|
+
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
1214
|
+
wc_arg->wrapped_closure = on_complete;
|
1215
|
+
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
|
1216
|
+
wc_arg->initial_metadata = pick_args->initial_metadata;
|
1217
|
+
wc_arg->free_when_done = wc_arg;
|
1218
|
+
wc_arg->glb_policy = pol;
|
1219
|
+
pick_done =
|
1220
|
+
pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
|
1221
|
+
false /* force_async */, target, wc_arg);
|
1178
1222
|
}
|
1179
|
-
|
1180
|
-
|
1181
|
-
wrapped_rr_closure_arg *wc_arg =
|
1182
|
-
(wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
|
1183
|
-
|
1184
|
-
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
|
1185
|
-
grpc_schedule_on_exec_ctx);
|
1186
|
-
wc_arg->rr_policy = glb_policy->rr_policy;
|
1187
|
-
wc_arg->target = target;
|
1188
|
-
wc_arg->context = context;
|
1189
|
-
GPR_ASSERT(glb_policy->client_stats != NULL);
|
1190
|
-
wc_arg->client_stats =
|
1191
|
-
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
1192
|
-
wc_arg->wrapped_closure = on_complete;
|
1193
|
-
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
|
1194
|
-
wc_arg->initial_metadata = pick_args->initial_metadata;
|
1195
|
-
wc_arg->free_when_done = wc_arg;
|
1196
|
-
pick_done =
|
1197
|
-
pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
|
1198
|
-
false /* force_async */, target, wc_arg);
|
1199
|
-
} else {
|
1200
|
-
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1223
|
+
} else { // glb_policy->rr_policy == NULL
|
1224
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1201
1225
|
gpr_log(GPR_DEBUG,
|
1202
|
-
"No RR policy
|
1203
|
-
|
1204
|
-
(void *)(glb_policy));
|
1226
|
+
"[grpclb %p] No RR policy. Adding to grpclb's pending picks",
|
1227
|
+
glb_policy);
|
1205
1228
|
}
|
1206
1229
|
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
|
1207
1230
|
on_complete);
|
1208
|
-
|
1209
1231
|
if (!glb_policy->started_picking) {
|
1210
1232
|
start_picking_locked(exec_ctx, glb_policy);
|
1211
1233
|
}
|
@@ -1215,16 +1237,16 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1215
1237
|
}
|
1216
1238
|
|
1217
1239
|
static grpc_connectivity_state glb_check_connectivity_locked(
|
1218
|
-
grpc_exec_ctx
|
1219
|
-
grpc_error
|
1220
|
-
glb_lb_policy
|
1240
|
+
grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
1241
|
+
grpc_error** connectivity_error) {
|
1242
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1221
1243
|
return grpc_connectivity_state_get(&glb_policy->state_tracker,
|
1222
1244
|
connectivity_error);
|
1223
1245
|
}
|
1224
1246
|
|
1225
|
-
static void glb_ping_one_locked(grpc_exec_ctx
|
1226
|
-
grpc_closure
|
1227
|
-
glb_lb_policy
|
1247
|
+
static void glb_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
|
1248
|
+
grpc_closure* closure) {
|
1249
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1228
1250
|
if (glb_policy->rr_policy) {
|
1229
1251
|
grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
|
1230
1252
|
} else {
|
@@ -1235,32 +1257,31 @@ static void glb_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1235
1257
|
}
|
1236
1258
|
}
|
1237
1259
|
|
1238
|
-
static void glb_notify_on_state_change_locked(grpc_exec_ctx
|
1239
|
-
grpc_lb_policy
|
1240
|
-
grpc_connectivity_state
|
1241
|
-
grpc_closure
|
1242
|
-
glb_lb_policy
|
1260
|
+
static void glb_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
|
1261
|
+
grpc_lb_policy* pol,
|
1262
|
+
grpc_connectivity_state* current,
|
1263
|
+
grpc_closure* notify) {
|
1264
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
|
1243
1265
|
grpc_connectivity_state_notify_on_state_change(
|
1244
1266
|
exec_ctx, &glb_policy->state_tracker, current, notify);
|
1245
1267
|
}
|
1246
1268
|
|
1247
|
-
static void lb_call_on_retry_timer_locked(grpc_exec_ctx
|
1248
|
-
grpc_error
|
1249
|
-
glb_lb_policy
|
1269
|
+
static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
1270
|
+
grpc_error* error) {
|
1271
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1250
1272
|
glb_policy->retry_timer_active = false;
|
1251
|
-
if (!glb_policy->shutting_down &&
|
1252
|
-
|
1253
|
-
|
1254
|
-
|
1273
|
+
if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
|
1274
|
+
error == GRPC_ERROR_NONE) {
|
1275
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1276
|
+
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
|
1255
1277
|
}
|
1256
|
-
GPR_ASSERT(glb_policy->lb_call == NULL);
|
1257
1278
|
query_for_backends_locked(exec_ctx, glb_policy);
|
1258
1279
|
}
|
1259
1280
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
|
1260
1281
|
}
|
1261
1282
|
|
1262
|
-
static void maybe_restart_lb_call(grpc_exec_ctx
|
1263
|
-
glb_lb_policy
|
1283
|
+
static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx,
|
1284
|
+
glb_lb_policy* glb_policy) {
|
1264
1285
|
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
|
1265
1286
|
if (glb_policy->retry_timer_active) {
|
1266
1287
|
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
|
@@ -1269,19 +1290,20 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
|
|
1269
1290
|
glb_policy->updating_lb_call = false;
|
1270
1291
|
} else if (!glb_policy->shutting_down) {
|
1271
1292
|
/* if we aren't shutting down, restart the LB client call after some time */
|
1272
|
-
|
1273
|
-
|
1274
|
-
|
1275
|
-
if (
|
1276
|
-
gpr_log(GPR_DEBUG, "Connection to LB server lost
|
1277
|
-
|
1278
|
-
|
1279
|
-
if (
|
1293
|
+
grpc_millis next_try =
|
1294
|
+
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
|
1295
|
+
.next_attempt_start_time;
|
1296
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1297
|
+
gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
|
1298
|
+
glb_policy);
|
1299
|
+
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
|
1300
|
+
if (timeout > 0) {
|
1280
1301
|
gpr_log(GPR_DEBUG,
|
1281
|
-
"... retry_timer_active in %"
|
1282
|
-
|
1302
|
+
"[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
|
1303
|
+
glb_policy, timeout);
|
1283
1304
|
} else {
|
1284
|
-
gpr_log(GPR_DEBUG, "... retry_timer_active immediately."
|
1305
|
+
gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
|
1306
|
+
glb_policy);
|
1285
1307
|
}
|
1286
1308
|
}
|
1287
1309
|
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
|
@@ -1290,69 +1312,71 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
|
|
1290
1312
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1291
1313
|
glb_policy->retry_timer_active = true;
|
1292
1314
|
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
|
1293
|
-
&glb_policy->lb_on_call_retry
|
1315
|
+
&glb_policy->lb_on_call_retry);
|
1294
1316
|
}
|
1295
1317
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1296
1318
|
"lb_on_server_status_received_locked");
|
1297
1319
|
}
|
1298
1320
|
|
1299
|
-
static void send_client_load_report_locked(grpc_exec_ctx
|
1300
|
-
grpc_error
|
1321
|
+
static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
1322
|
+
grpc_error* error);
|
1301
1323
|
|
1302
|
-
static void schedule_next_client_load_report(grpc_exec_ctx
|
1303
|
-
glb_lb_policy
|
1304
|
-
const
|
1305
|
-
|
1306
|
-
gpr_time_add(now, glb_policy->client_stats_report_interval);
|
1324
|
+
static void schedule_next_client_load_report(grpc_exec_ctx* exec_ctx,
|
1325
|
+
glb_lb_policy* glb_policy) {
|
1326
|
+
const grpc_millis next_client_load_report_time =
|
1327
|
+
grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
|
1307
1328
|
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
|
1308
1329
|
send_client_load_report_locked, glb_policy,
|
1309
1330
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1310
1331
|
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
|
1311
1332
|
next_client_load_report_time,
|
1312
|
-
&glb_policy->client_load_report_closure
|
1333
|
+
&glb_policy->client_load_report_closure);
|
1313
1334
|
}
|
1314
1335
|
|
1315
|
-
static void client_load_report_done_locked(grpc_exec_ctx
|
1316
|
-
grpc_error
|
1317
|
-
glb_lb_policy
|
1336
|
+
static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
1337
|
+
grpc_error* error) {
|
1338
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1318
1339
|
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
|
1319
|
-
glb_policy->client_load_report_payload =
|
1320
|
-
if (error != GRPC_ERROR_NONE || glb_policy->lb_call ==
|
1340
|
+
glb_policy->client_load_report_payload = nullptr;
|
1341
|
+
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
|
1321
1342
|
glb_policy->client_load_report_timer_pending = false;
|
1322
1343
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1323
1344
|
"client_load_report");
|
1345
|
+
if (glb_policy->lb_call == nullptr) {
|
1346
|
+
maybe_restart_lb_call(exec_ctx, glb_policy);
|
1347
|
+
}
|
1324
1348
|
return;
|
1325
1349
|
}
|
1326
1350
|
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1327
1351
|
}
|
1328
1352
|
|
1329
|
-
static bool load_report_counters_are_zero(grpc_grpclb_request
|
1330
|
-
grpc_grpclb_dropped_call_counts
|
1331
|
-
(grpc_grpclb_dropped_call_counts
|
1353
|
+
static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
|
1354
|
+
grpc_grpclb_dropped_call_counts* drop_entries =
|
1355
|
+
(grpc_grpclb_dropped_call_counts*)
|
1332
1356
|
request->client_stats.calls_finished_with_drop.arg;
|
1333
1357
|
return request->client_stats.num_calls_started == 0 &&
|
1334
1358
|
request->client_stats.num_calls_finished == 0 &&
|
1335
1359
|
request->client_stats.num_calls_finished_with_client_failed_to_send ==
|
1336
1360
|
0 &&
|
1337
1361
|
request->client_stats.num_calls_finished_known_received == 0 &&
|
1338
|
-
(drop_entries ==
|
1362
|
+
(drop_entries == nullptr || drop_entries->num_entries == 0);
|
1339
1363
|
}
|
1340
1364
|
|
1341
|
-
static void send_client_load_report_locked(grpc_exec_ctx
|
1342
|
-
grpc_error
|
1343
|
-
glb_lb_policy
|
1344
|
-
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call ==
|
1365
|
+
static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
1366
|
+
grpc_error* error) {
|
1367
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1368
|
+
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
|
1345
1369
|
glb_policy->client_load_report_timer_pending = false;
|
1346
1370
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1347
1371
|
"client_load_report");
|
1348
|
-
if (glb_policy->lb_call ==
|
1372
|
+
if (glb_policy->lb_call == nullptr) {
|
1349
1373
|
maybe_restart_lb_call(exec_ctx, glb_policy);
|
1350
1374
|
}
|
1351
1375
|
return;
|
1352
1376
|
}
|
1353
1377
|
// Construct message payload.
|
1354
|
-
GPR_ASSERT(glb_policy->client_load_report_payload ==
|
1355
|
-
grpc_grpclb_request
|
1378
|
+
GPR_ASSERT(glb_policy->client_load_report_payload == nullptr);
|
1379
|
+
grpc_grpclb_request* request =
|
1356
1380
|
grpc_grpclb_load_report_request_create_locked(glb_policy->client_stats);
|
1357
1381
|
// Skip client load report if the counters were all zero in the last
|
1358
1382
|
// report and they are still zero in this one.
|
@@ -1383,40 +1407,38 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1383
1407
|
exec_ctx, glb_policy->lb_call, &op, 1,
|
1384
1408
|
&glb_policy->client_load_report_closure);
|
1385
1409
|
if (call_error != GRPC_CALL_OK) {
|
1386
|
-
gpr_log(GPR_ERROR, "call_error=%d", call_error);
|
1410
|
+
gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
|
1387
1411
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1388
1412
|
}
|
1389
1413
|
}
|
1390
1414
|
|
1391
|
-
static void lb_on_server_status_received_locked(grpc_exec_ctx
|
1392
|
-
void
|
1393
|
-
static void lb_on_response_received_locked(grpc_exec_ctx
|
1394
|
-
grpc_error
|
1395
|
-
static void lb_call_init_locked(grpc_exec_ctx
|
1396
|
-
glb_lb_policy
|
1397
|
-
GPR_ASSERT(glb_policy->server_name !=
|
1415
|
+
static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
|
1416
|
+
void* arg, grpc_error* error);
|
1417
|
+
static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
1418
|
+
grpc_error* error);
|
1419
|
+
static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
|
1420
|
+
glb_lb_policy* glb_policy) {
|
1421
|
+
GPR_ASSERT(glb_policy->server_name != nullptr);
|
1398
1422
|
GPR_ASSERT(glb_policy->server_name[0] != '\0');
|
1399
|
-
GPR_ASSERT(glb_policy->lb_call ==
|
1423
|
+
GPR_ASSERT(glb_policy->lb_call == nullptr);
|
1400
1424
|
GPR_ASSERT(!glb_policy->shutting_down);
|
1401
1425
|
|
1402
1426
|
/* Note the following LB call progresses every time there's activity in \a
|
1403
1427
|
* glb_policy->base.interested_parties, which is comprised of the polling
|
1404
1428
|
* entities from \a client_channel. */
|
1405
1429
|
grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
|
1406
|
-
|
1430
|
+
grpc_millis deadline =
|
1407
1431
|
glb_policy->lb_call_timeout_ms == 0
|
1408
|
-
?
|
1409
|
-
:
|
1410
|
-
gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
|
1411
|
-
GPR_TIMESPAN));
|
1432
|
+
? GRPC_MILLIS_INF_FUTURE
|
1433
|
+
: grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
|
1412
1434
|
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
|
1413
|
-
exec_ctx, glb_policy->lb_channel,
|
1435
|
+
exec_ctx, glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
|
1414
1436
|
glb_policy->base.interested_parties,
|
1415
1437
|
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
|
1416
|
-
&host, deadline,
|
1438
|
+
&host, deadline, nullptr);
|
1417
1439
|
grpc_slice_unref_internal(exec_ctx, host);
|
1418
1440
|
|
1419
|
-
if (glb_policy->client_stats !=
|
1441
|
+
if (glb_policy->client_stats != nullptr) {
|
1420
1442
|
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
|
1421
1443
|
}
|
1422
1444
|
glb_policy->client_stats = grpc_grpclb_client_stats_create();
|
@@ -1424,7 +1446,7 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1424
1446
|
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
|
1425
1447
|
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
|
1426
1448
|
|
1427
|
-
grpc_grpclb_request
|
1449
|
+
grpc_grpclb_request* request =
|
1428
1450
|
grpc_grpclb_request_create(glb_policy->server_name);
|
1429
1451
|
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
|
1430
1452
|
glb_policy->lb_request_payload =
|
@@ -1439,22 +1461,22 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1439
1461
|
lb_on_response_received_locked, glb_policy,
|
1440
1462
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1441
1463
|
|
1442
|
-
|
1443
|
-
|
1444
|
-
|
1445
|
-
|
1446
|
-
|
1447
|
-
|
1464
|
+
grpc_backoff_init(&glb_policy->lb_call_backoff_state,
|
1465
|
+
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
|
1466
|
+
GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
|
1467
|
+
GRPC_GRPCLB_RECONNECT_JITTER,
|
1468
|
+
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
|
1469
|
+
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
|
1448
1470
|
|
1449
1471
|
glb_policy->seen_initial_response = false;
|
1450
1472
|
glb_policy->last_client_load_report_counters_were_zero = false;
|
1451
1473
|
}
|
1452
1474
|
|
1453
|
-
static void lb_call_destroy_locked(grpc_exec_ctx
|
1454
|
-
glb_lb_policy
|
1455
|
-
GPR_ASSERT(glb_policy->lb_call !=
|
1475
|
+
static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
|
1476
|
+
glb_lb_policy* glb_policy) {
|
1477
|
+
GPR_ASSERT(glb_policy->lb_call != nullptr);
|
1456
1478
|
grpc_call_unref(glb_policy->lb_call);
|
1457
|
-
glb_policy->lb_call =
|
1479
|
+
glb_policy->lb_call = nullptr;
|
1458
1480
|
|
1459
1481
|
grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
|
1460
1482
|
grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
|
@@ -1470,45 +1492,44 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
|
|
1470
1492
|
/*
|
1471
1493
|
* Auxiliary functions and LB client callbacks.
|
1472
1494
|
*/
|
1473
|
-
static void query_for_backends_locked(grpc_exec_ctx
|
1474
|
-
glb_lb_policy
|
1475
|
-
GPR_ASSERT(glb_policy->lb_channel !=
|
1495
|
+
static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
|
1496
|
+
glb_lb_policy* glb_policy) {
|
1497
|
+
GPR_ASSERT(glb_policy->lb_channel != nullptr);
|
1476
1498
|
if (glb_policy->shutting_down) return;
|
1477
1499
|
|
1478
1500
|
lb_call_init_locked(exec_ctx, glb_policy);
|
1479
1501
|
|
1480
|
-
if (
|
1502
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1481
1503
|
gpr_log(GPR_INFO,
|
1482
|
-
"Query for backends (
|
1483
|
-
|
1484
|
-
(void *)glb_policy->lb_call);
|
1504
|
+
"[grpclb %p] Query for backends (lb_channel: %p, lb_call: %p)",
|
1505
|
+
glb_policy, glb_policy->lb_channel, glb_policy->lb_call);
|
1485
1506
|
}
|
1486
|
-
GPR_ASSERT(glb_policy->lb_call !=
|
1507
|
+
GPR_ASSERT(glb_policy->lb_call != nullptr);
|
1487
1508
|
|
1488
1509
|
grpc_call_error call_error;
|
1489
1510
|
grpc_op ops[3];
|
1490
1511
|
memset(ops, 0, sizeof(ops));
|
1491
1512
|
|
1492
|
-
grpc_op
|
1513
|
+
grpc_op* op = ops;
|
1493
1514
|
op->op = GRPC_OP_SEND_INITIAL_METADATA;
|
1494
1515
|
op->data.send_initial_metadata.count = 0;
|
1495
1516
|
op->flags = 0;
|
1496
|
-
op->reserved =
|
1517
|
+
op->reserved = nullptr;
|
1497
1518
|
op++;
|
1498
1519
|
op->op = GRPC_OP_RECV_INITIAL_METADATA;
|
1499
1520
|
op->data.recv_initial_metadata.recv_initial_metadata =
|
1500
1521
|
&glb_policy->lb_initial_metadata_recv;
|
1501
1522
|
op->flags = 0;
|
1502
|
-
op->reserved =
|
1523
|
+
op->reserved = nullptr;
|
1503
1524
|
op++;
|
1504
|
-
GPR_ASSERT(glb_policy->lb_request_payload !=
|
1525
|
+
GPR_ASSERT(glb_policy->lb_request_payload != nullptr);
|
1505
1526
|
op->op = GRPC_OP_SEND_MESSAGE;
|
1506
1527
|
op->data.send_message.send_message = glb_policy->lb_request_payload;
|
1507
1528
|
op->flags = 0;
|
1508
|
-
op->reserved =
|
1529
|
+
op->reserved = nullptr;
|
1509
1530
|
op++;
|
1510
|
-
call_error = grpc_call_start_batch_and_execute(
|
1511
|
-
|
1531
|
+
call_error = grpc_call_start_batch_and_execute(
|
1532
|
+
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), nullptr);
|
1512
1533
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1513
1534
|
|
1514
1535
|
op = ops;
|
@@ -1519,7 +1540,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1519
1540
|
op->data.recv_status_on_client.status_details =
|
1520
1541
|
&glb_policy->lb_call_status_details;
|
1521
1542
|
op->flags = 0;
|
1522
|
-
op->reserved =
|
1543
|
+
op->reserved = nullptr;
|
1523
1544
|
op++;
|
1524
1545
|
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
|
1525
1546
|
* count goes to zero) to be unref'd in lb_on_server_status_received_locked */
|
@@ -1534,7 +1555,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1534
1555
|
op->op = GRPC_OP_RECV_MESSAGE;
|
1535
1556
|
op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
|
1536
1557
|
op->flags = 0;
|
1537
|
-
op->reserved =
|
1558
|
+
op->reserved = nullptr;
|
1538
1559
|
op++;
|
1539
1560
|
/* take another weak ref to be unref'd/reused in
|
1540
1561
|
* lb_on_response_received_locked */
|
@@ -1545,14 +1566,14 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1545
1566
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1546
1567
|
}
|
1547
1568
|
|
1548
|
-
static void lb_on_response_received_locked(grpc_exec_ctx
|
1549
|
-
grpc_error
|
1550
|
-
glb_lb_policy
|
1569
|
+
static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
1570
|
+
grpc_error* error) {
|
1571
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1551
1572
|
grpc_op ops[2];
|
1552
1573
|
memset(ops, 0, sizeof(ops));
|
1553
|
-
grpc_op
|
1554
|
-
if (glb_policy->lb_response_payload !=
|
1555
|
-
|
1574
|
+
grpc_op* op = ops;
|
1575
|
+
if (glb_policy->lb_response_payload != nullptr) {
|
1576
|
+
grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
|
1556
1577
|
/* Received data from the LB server. Look inside
|
1557
1578
|
* glb_policy->lb_response_payload, for a serverlist. */
|
1558
1579
|
grpc_byte_buffer_reader bbr;
|
@@ -1561,21 +1582,19 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1561
1582
|
grpc_byte_buffer_reader_destroy(&bbr);
|
1562
1583
|
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
|
1563
1584
|
|
1564
|
-
grpc_grpclb_initial_response
|
1585
|
+
grpc_grpclb_initial_response* response = nullptr;
|
1565
1586
|
if (!glb_policy->seen_initial_response &&
|
1566
1587
|
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
|
1567
|
-
|
1588
|
+
nullptr) {
|
1568
1589
|
if (response->has_client_stats_report_interval) {
|
1569
|
-
glb_policy->client_stats_report_interval =
|
1570
|
-
|
1571
|
-
|
1572
|
-
|
1573
|
-
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1590
|
+
glb_policy->client_stats_report_interval = GPR_MAX(
|
1591
|
+
GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
|
1592
|
+
&response->client_stats_report_interval));
|
1593
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1574
1594
|
gpr_log(GPR_INFO,
|
1575
|
-
"
|
1576
|
-
"client load reporting interval = %"
|
1577
|
-
glb_policy->client_stats_report_interval
|
1578
|
-
glb_policy->client_stats_report_interval.tv_nsec);
|
1595
|
+
"[grpclb %p] Received initial LB response message; "
|
1596
|
+
"client load reporting interval = %" PRIdPTR " milliseconds",
|
1597
|
+
glb_policy, glb_policy->client_stats_report_interval);
|
1579
1598
|
}
|
1580
1599
|
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
|
1581
1600
|
* strong ref count goes to zero) to be unref'd in
|
@@ -1583,27 +1602,30 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1583
1602
|
glb_policy->client_load_report_timer_pending = true;
|
1584
1603
|
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
|
1585
1604
|
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1586
|
-
} else if (
|
1605
|
+
} else if (grpc_lb_glb_trace.enabled()) {
|
1587
1606
|
gpr_log(GPR_INFO,
|
1588
|
-
"
|
1589
|
-
"
|
1607
|
+
"[grpclb %p] Received initial LB response message; client load "
|
1608
|
+
"reporting NOT enabled",
|
1609
|
+
glb_policy);
|
1590
1610
|
}
|
1591
1611
|
grpc_grpclb_initial_response_destroy(response);
|
1592
1612
|
glb_policy->seen_initial_response = true;
|
1593
1613
|
} else {
|
1594
|
-
grpc_grpclb_serverlist
|
1614
|
+
grpc_grpclb_serverlist* serverlist =
|
1595
1615
|
grpc_grpclb_response_parse_serverlist(response_slice);
|
1596
|
-
if (serverlist !=
|
1597
|
-
GPR_ASSERT(glb_policy->lb_call !=
|
1598
|
-
if (
|
1599
|
-
gpr_log(GPR_INFO,
|
1600
|
-
|
1616
|
+
if (serverlist != nullptr) {
|
1617
|
+
GPR_ASSERT(glb_policy->lb_call != nullptr);
|
1618
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1619
|
+
gpr_log(GPR_INFO,
|
1620
|
+
"[grpclb %p] Serverlist with %" PRIuPTR " servers received",
|
1621
|
+
glb_policy, serverlist->num_servers);
|
1601
1622
|
for (size_t i = 0; i < serverlist->num_servers; ++i) {
|
1602
1623
|
grpc_resolved_address addr;
|
1603
1624
|
parse_server(serverlist->servers[i], &addr);
|
1604
|
-
char
|
1625
|
+
char* ipport;
|
1605
1626
|
grpc_sockaddr_to_string(&ipport, &addr, false);
|
1606
|
-
gpr_log(GPR_INFO, "Serverlist[%
|
1627
|
+
gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s",
|
1628
|
+
glb_policy, i, ipport);
|
1607
1629
|
gpr_free(ipport);
|
1608
1630
|
}
|
1609
1631
|
}
|
@@ -1611,20 +1633,22 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1611
1633
|
if (serverlist->num_servers > 0) {
|
1612
1634
|
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
|
1613
1635
|
serverlist)) {
|
1614
|
-
if (
|
1636
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1615
1637
|
gpr_log(GPR_INFO,
|
1616
|
-
"Incoming server list identical to current,
|
1638
|
+
"[grpclb %p] Incoming server list identical to current, "
|
1639
|
+
"ignoring.",
|
1640
|
+
glb_policy);
|
1617
1641
|
}
|
1618
1642
|
grpc_grpclb_destroy_serverlist(serverlist);
|
1619
1643
|
} else { /* new serverlist */
|
1620
|
-
if (glb_policy->serverlist !=
|
1644
|
+
if (glb_policy->serverlist != nullptr) {
|
1621
1645
|
/* dispose of the old serverlist */
|
1622
1646
|
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
1623
1647
|
} else {
|
1624
1648
|
/* or dispose of the fallback */
|
1625
1649
|
grpc_lb_addresses_destroy(exec_ctx,
|
1626
1650
|
glb_policy->fallback_backend_addresses);
|
1627
|
-
glb_policy->fallback_backend_addresses =
|
1651
|
+
glb_policy->fallback_backend_addresses = nullptr;
|
1628
1652
|
if (glb_policy->fallback_timer_active) {
|
1629
1653
|
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
|
1630
1654
|
glb_policy->fallback_timer_active = false;
|
@@ -1638,13 +1662,17 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1638
1662
|
rr_handover_locked(exec_ctx, glb_policy);
|
1639
1663
|
}
|
1640
1664
|
} else {
|
1641
|
-
if (
|
1642
|
-
gpr_log(GPR_INFO,
|
1665
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1666
|
+
gpr_log(GPR_INFO,
|
1667
|
+
"[grpclb %p] Received empty server list, ignoring.",
|
1668
|
+
glb_policy);
|
1643
1669
|
}
|
1644
1670
|
grpc_grpclb_destroy_serverlist(serverlist);
|
1645
1671
|
}
|
1646
1672
|
} else { /* serverlist == NULL */
|
1647
|
-
gpr_log(GPR_ERROR,
|
1673
|
+
gpr_log(GPR_ERROR,
|
1674
|
+
"[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
|
1675
|
+
glb_policy,
|
1648
1676
|
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
|
1649
1677
|
}
|
1650
1678
|
}
|
@@ -1654,7 +1682,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1654
1682
|
op->op = GRPC_OP_RECV_MESSAGE;
|
1655
1683
|
op->data.recv_message.recv_message = &glb_policy->lb_response_payload;
|
1656
1684
|
op->flags = 0;
|
1657
|
-
op->reserved =
|
1685
|
+
op->reserved = nullptr;
|
1658
1686
|
op++;
|
1659
1687
|
/* reuse the "lb_on_response_received_locked" weak ref taken in
|
1660
1688
|
* query_for_backends_locked() */
|
@@ -1674,20 +1702,20 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1674
1702
|
}
|
1675
1703
|
}
|
1676
1704
|
|
1677
|
-
static void lb_on_fallback_timer_locked(grpc_exec_ctx
|
1678
|
-
grpc_error
|
1679
|
-
glb_lb_policy
|
1705
|
+
static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
|
1706
|
+
grpc_error* error) {
|
1707
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1680
1708
|
glb_policy->fallback_timer_active = false;
|
1681
1709
|
/* If we receive a serverlist after the timer fires but before this callback
|
1682
1710
|
* actually runs, don't fall back. */
|
1683
|
-
if (glb_policy->serverlist ==
|
1711
|
+
if (glb_policy->serverlist == nullptr) {
|
1684
1712
|
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
|
1685
|
-
if (
|
1713
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1686
1714
|
gpr_log(GPR_INFO,
|
1687
|
-
"Falling back to use backends from resolver
|
1688
|
-
|
1715
|
+
"[grpclb %p] Falling back to use backends from resolver",
|
1716
|
+
glb_policy);
|
1689
1717
|
}
|
1690
|
-
GPR_ASSERT(glb_policy->fallback_backend_addresses !=
|
1718
|
+
GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
|
1691
1719
|
rr_handover_locked(exec_ctx, glb_policy);
|
1692
1720
|
}
|
1693
1721
|
}
|
@@ -1695,18 +1723,18 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1695
1723
|
"grpclb_fallback_timer");
|
1696
1724
|
}
|
1697
1725
|
|
1698
|
-
static void lb_on_server_status_received_locked(grpc_exec_ctx
|
1699
|
-
void
|
1700
|
-
glb_lb_policy
|
1701
|
-
GPR_ASSERT(glb_policy->lb_call !=
|
1702
|
-
if (
|
1703
|
-
char
|
1726
|
+
static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
|
1727
|
+
void* arg, grpc_error* error) {
|
1728
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1729
|
+
GPR_ASSERT(glb_policy->lb_call != nullptr);
|
1730
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1731
|
+
char* status_details =
|
1704
1732
|
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
|
1705
1733
|
gpr_log(GPR_INFO,
|
1706
|
-
"Status from LB server received. Status = %d, Details
|
1707
|
-
"(call: %p), error %
|
1708
|
-
glb_policy->lb_call_status, status_details,
|
1709
|
-
|
1734
|
+
"[grpclb %p] Status from LB server received. Status = %d, Details "
|
1735
|
+
"= '%s', (call: %p), error '%s'",
|
1736
|
+
glb_policy, glb_policy->lb_call_status, status_details,
|
1737
|
+
glb_policy->lb_call, grpc_error_string(error));
|
1710
1738
|
gpr_free(status_details);
|
1711
1739
|
}
|
1712
1740
|
/* We need to perform cleanups no matter what. */
|
@@ -1719,26 +1747,26 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
|
1719
1747
|
}
|
1720
1748
|
}
|
1721
1749
|
|
1722
|
-
static void fallback_update_locked(grpc_exec_ctx
|
1723
|
-
glb_lb_policy
|
1724
|
-
const grpc_lb_addresses
|
1725
|
-
GPR_ASSERT(glb_policy->fallback_backend_addresses !=
|
1750
|
+
static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
|
1751
|
+
glb_lb_policy* glb_policy,
|
1752
|
+
const grpc_lb_addresses* addresses) {
|
1753
|
+
GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
|
1726
1754
|
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
|
1727
1755
|
glb_policy->fallback_backend_addresses =
|
1728
1756
|
extract_backend_addresses_locked(exec_ctx, addresses);
|
1729
|
-
if (glb_policy->lb_fallback_timeout_ms > 0 &&
|
1757
|
+
if (glb_policy->started_picking && glb_policy->lb_fallback_timeout_ms > 0 &&
|
1730
1758
|
!glb_policy->fallback_timer_active) {
|
1731
1759
|
rr_handover_locked(exec_ctx, glb_policy);
|
1732
1760
|
}
|
1733
1761
|
}
|
1734
1762
|
|
1735
|
-
static void glb_update_locked(grpc_exec_ctx
|
1736
|
-
const grpc_lb_policy_args
|
1737
|
-
glb_lb_policy
|
1738
|
-
const grpc_arg
|
1763
|
+
static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
|
1764
|
+
const grpc_lb_policy_args* args) {
|
1765
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
|
1766
|
+
const grpc_arg* arg =
|
1739
1767
|
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
|
1740
|
-
if (arg ==
|
1741
|
-
if (glb_policy->lb_channel ==
|
1768
|
+
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
|
1769
|
+
if (glb_policy->lb_channel == nullptr) {
|
1742
1770
|
// If we don't have a current channel to the LB, go into TRANSIENT
|
1743
1771
|
// FAILURE.
|
1744
1772
|
grpc_connectivity_state_set(
|
@@ -1747,57 +1775,34 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
|
1747
1775
|
"glb_update_missing");
|
1748
1776
|
} else {
|
1749
1777
|
// otherwise, keep using the current LB channel (ignore this update).
|
1750
|
-
gpr_log(
|
1751
|
-
|
1752
|
-
|
1753
|
-
|
1778
|
+
gpr_log(
|
1779
|
+
GPR_ERROR,
|
1780
|
+
"[grpclb %p] No valid LB addresses channel arg in update, ignoring.",
|
1781
|
+
glb_policy);
|
1754
1782
|
}
|
1755
1783
|
return;
|
1756
1784
|
}
|
1757
|
-
const grpc_lb_addresses
|
1758
|
-
(const grpc_lb_addresses
|
1759
|
-
|
1760
|
-
|
1761
|
-
|
1762
|
-
// propagate the update to fallback_backend_addresses.
|
1785
|
+
const grpc_lb_addresses* addresses =
|
1786
|
+
(const grpc_lb_addresses*)arg->value.pointer.p;
|
1787
|
+
// If a non-empty serverlist hasn't been received from the balancer,
|
1788
|
+
// propagate the update to fallback_backend_addresses.
|
1789
|
+
if (glb_policy->serverlist == nullptr) {
|
1763
1790
|
fallback_update_locked(exec_ctx, glb_policy, addresses);
|
1764
|
-
} else if (glb_policy->updating_lb_channel) {
|
1765
|
-
// If we have recieved serverlist from the balancer, we need to defer update
|
1766
|
-
// when there is an in-progress one.
|
1767
|
-
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1768
|
-
gpr_log(GPR_INFO,
|
1769
|
-
"Update already in progress for grpclb %p. Deferring update.",
|
1770
|
-
(void *)glb_policy);
|
1771
|
-
}
|
1772
|
-
if (glb_policy->pending_update_args != NULL) {
|
1773
|
-
grpc_channel_args_destroy(exec_ctx,
|
1774
|
-
glb_policy->pending_update_args->args);
|
1775
|
-
gpr_free(glb_policy->pending_update_args);
|
1776
|
-
}
|
1777
|
-
glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
|
1778
|
-
sizeof(*glb_policy->pending_update_args));
|
1779
|
-
glb_policy->pending_update_args->client_channel_factory =
|
1780
|
-
args->client_channel_factory;
|
1781
|
-
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
|
1782
|
-
glb_policy->pending_update_args->combiner = args->combiner;
|
1783
|
-
return;
|
1784
1791
|
}
|
1785
|
-
|
1786
|
-
|
1787
|
-
|
1788
|
-
grpc_channel_args
|
1792
|
+
GPR_ASSERT(glb_policy->lb_channel != nullptr);
|
1793
|
+
// Propagate updates to the LB channel (pick_first) through the fake
|
1794
|
+
// resolver.
|
1795
|
+
grpc_channel_args* lb_channel_args = build_lb_channel_args(
|
1789
1796
|
exec_ctx, addresses, glb_policy->response_generator, args->args);
|
1790
|
-
/* Propagate updates to the LB channel (pick first) through the fake resolver
|
1791
|
-
*/
|
1792
1797
|
grpc_fake_resolver_response_generator_set_response(
|
1793
1798
|
exec_ctx, glb_policy->response_generator, lb_channel_args);
|
1794
1799
|
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
|
1795
|
-
|
1800
|
+
// Start watching the LB channel connectivity for connection, if not
|
1801
|
+
// already doing so.
|
1796
1802
|
if (!glb_policy->watching_lb_channel) {
|
1797
|
-
// Watch the LB channel connectivity for connection.
|
1798
1803
|
glb_policy->lb_channel_connectivity = grpc_channel_check_connectivity_state(
|
1799
1804
|
glb_policy->lb_channel, true /* try to connect */);
|
1800
|
-
grpc_channel_element
|
1805
|
+
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
|
1801
1806
|
grpc_channel_get_channel_stack(glb_policy->lb_channel));
|
1802
1807
|
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
|
1803
1808
|
glb_policy->watching_lb_channel = true;
|
@@ -1807,27 +1812,26 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
|
1807
1812
|
grpc_polling_entity_create_from_pollset_set(
|
1808
1813
|
glb_policy->base.interested_parties),
|
1809
1814
|
&glb_policy->lb_channel_connectivity,
|
1810
|
-
&glb_policy->lb_channel_on_connectivity_changed,
|
1815
|
+
&glb_policy->lb_channel_on_connectivity_changed, nullptr);
|
1811
1816
|
}
|
1812
1817
|
}
|
1813
1818
|
|
1814
1819
|
// Invoked as part of the update process. It continues watching the LB channel
|
1815
1820
|
// until it shuts down or becomes READY. It's invoked even if the LB channel
|
1816
1821
|
// stayed READY throughout the update (for example if the update is identical).
|
1817
|
-
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx
|
1818
|
-
void
|
1819
|
-
grpc_error
|
1820
|
-
glb_lb_policy
|
1822
|
+
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
|
1823
|
+
void* arg,
|
1824
|
+
grpc_error* error) {
|
1825
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
|
1821
1826
|
if (glb_policy->shutting_down) goto done;
|
1822
1827
|
// Re-initialize the lb_call. This should also take care of updating the
|
1823
1828
|
// embedded RR policy. Note that the current RR policy, if any, will stay in
|
1824
1829
|
// effect until an update from the new lb_call is received.
|
1825
1830
|
switch (glb_policy->lb_channel_connectivity) {
|
1826
|
-
case GRPC_CHANNEL_INIT:
|
1827
1831
|
case GRPC_CHANNEL_CONNECTING:
|
1828
1832
|
case GRPC_CHANNEL_TRANSIENT_FAILURE: {
|
1829
1833
|
/* resub. */
|
1830
|
-
grpc_channel_element
|
1834
|
+
grpc_channel_element* client_channel_elem =
|
1831
1835
|
grpc_channel_stack_last_element(
|
1832
1836
|
grpc_channel_get_channel_stack(glb_policy->lb_channel));
|
1833
1837
|
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
|
@@ -1836,28 +1840,19 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
|
|
1836
1840
|
grpc_polling_entity_create_from_pollset_set(
|
1837
1841
|
glb_policy->base.interested_parties),
|
1838
1842
|
&glb_policy->lb_channel_connectivity,
|
1839
|
-
&glb_policy->lb_channel_on_connectivity_changed,
|
1843
|
+
&glb_policy->lb_channel_on_connectivity_changed, nullptr);
|
1840
1844
|
break;
|
1841
1845
|
}
|
1842
1846
|
case GRPC_CHANNEL_IDLE:
|
1843
|
-
|
1844
|
-
|
1845
|
-
GPR_ASSERT(glb_policy->lb_call == NULL);
|
1847
|
+
// lb channel inactive (probably shutdown prior to update). Restart lb
|
1848
|
+
// call to kick the lb channel into gear.
|
1846
1849
|
/* fallthrough */
|
1847
1850
|
case GRPC_CHANNEL_READY:
|
1848
|
-
if (glb_policy->lb_call !=
|
1849
|
-
glb_policy->updating_lb_channel = false;
|
1851
|
+
if (glb_policy->lb_call != nullptr) {
|
1850
1852
|
glb_policy->updating_lb_call = true;
|
1851
|
-
grpc_call_cancel(glb_policy->lb_call,
|
1852
|
-
// lb_on_server_status_received will pick up the cancel and reinit
|
1853
|
+
grpc_call_cancel(glb_policy->lb_call, nullptr);
|
1854
|
+
// lb_on_server_status_received() will pick up the cancel and reinit
|
1853
1855
|
// lb_call.
|
1854
|
-
if (glb_policy->pending_update_args != NULL) {
|
1855
|
-
grpc_lb_policy_args *args = glb_policy->pending_update_args;
|
1856
|
-
glb_policy->pending_update_args = NULL;
|
1857
|
-
glb_update_locked(exec_ctx, &glb_policy->base, args);
|
1858
|
-
grpc_channel_args_destroy(exec_ctx, args->args);
|
1859
|
-
gpr_free(args);
|
1860
|
-
}
|
1861
1856
|
} else if (glb_policy->started_picking && !glb_policy->shutting_down) {
|
1862
1857
|
if (glb_policy->retry_timer_active) {
|
1863
1858
|
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
|
@@ -1888,55 +1883,55 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
|
|
1888
1883
|
glb_notify_on_state_change_locked,
|
1889
1884
|
glb_update_locked};
|
1890
1885
|
|
1891
|
-
static grpc_lb_policy
|
1892
|
-
grpc_lb_policy_factory
|
1893
|
-
grpc_lb_policy_args
|
1886
|
+
static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
|
1887
|
+
grpc_lb_policy_factory* factory,
|
1888
|
+
grpc_lb_policy_args* args) {
|
1894
1889
|
/* Count the number of gRPC-LB addresses. There must be at least one. */
|
1895
|
-
const grpc_arg
|
1890
|
+
const grpc_arg* arg =
|
1896
1891
|
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
|
1897
|
-
if (arg ==
|
1898
|
-
return
|
1892
|
+
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) {
|
1893
|
+
return nullptr;
|
1899
1894
|
}
|
1900
|
-
grpc_lb_addresses
|
1895
|
+
grpc_lb_addresses* addresses = (grpc_lb_addresses*)arg->value.pointer.p;
|
1901
1896
|
size_t num_grpclb_addrs = 0;
|
1902
1897
|
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
1903
1898
|
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
|
1904
1899
|
}
|
1905
|
-
if (num_grpclb_addrs == 0) return
|
1900
|
+
if (num_grpclb_addrs == 0) return nullptr;
|
1906
1901
|
|
1907
|
-
glb_lb_policy
|
1902
|
+
glb_lb_policy* glb_policy = (glb_lb_policy*)gpr_zalloc(sizeof(*glb_policy));
|
1908
1903
|
|
1909
1904
|
/* Get server name. */
|
1910
1905
|
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
|
1911
|
-
GPR_ASSERT(arg !=
|
1906
|
+
GPR_ASSERT(arg != nullptr);
|
1912
1907
|
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
|
1913
|
-
grpc_uri
|
1908
|
+
grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
|
1914
1909
|
GPR_ASSERT(uri->path[0] != '\0');
|
1915
1910
|
glb_policy->server_name =
|
1916
1911
|
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
|
1917
|
-
if (
|
1918
|
-
gpr_log(GPR_INFO,
|
1919
|
-
|
1912
|
+
if (grpc_lb_glb_trace.enabled()) {
|
1913
|
+
gpr_log(GPR_INFO,
|
1914
|
+
"[grpclb %p] Will use '%s' as the server name for LB request.",
|
1915
|
+
glb_policy, glb_policy->server_name);
|
1920
1916
|
}
|
1921
1917
|
grpc_uri_destroy(uri);
|
1922
1918
|
|
1923
1919
|
glb_policy->cc_factory = args->client_channel_factory;
|
1924
|
-
GPR_ASSERT(glb_policy->cc_factory !=
|
1920
|
+
GPR_ASSERT(glb_policy->cc_factory != nullptr);
|
1925
1921
|
|
1926
1922
|
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
|
1927
1923
|
glb_policy->lb_call_timeout_ms =
|
1928
|
-
grpc_channel_arg_get_integer(arg,
|
1924
|
+
grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
|
1929
1925
|
|
1930
1926
|
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
|
1931
1927
|
glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
|
1932
|
-
arg,
|
1933
|
-
INT_MAX});
|
1928
|
+
arg, {GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
|
1934
1929
|
|
1935
1930
|
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
|
1936
1931
|
// since we use this to trigger the client_load_reporting filter.
|
1937
1932
|
grpc_arg new_arg = grpc_channel_arg_string_create(
|
1938
|
-
(char
|
1939
|
-
static const char
|
1933
|
+
(char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
|
1934
|
+
static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
|
1940
1935
|
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
|
1941
1936
|
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
|
1942
1937
|
|
@@ -1948,9 +1943,9 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
1948
1943
|
/* Create a client channel over them to communicate with a LB service */
|
1949
1944
|
glb_policy->response_generator =
|
1950
1945
|
grpc_fake_resolver_response_generator_create();
|
1951
|
-
grpc_channel_args
|
1946
|
+
grpc_channel_args* lb_channel_args = build_lb_channel_args(
|
1952
1947
|
exec_ctx, addresses, glb_policy->response_generator, args->args);
|
1953
|
-
char
|
1948
|
+
char* uri_str;
|
1954
1949
|
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
|
1955
1950
|
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
|
1956
1951
|
exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
|
@@ -1960,11 +1955,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
1960
1955
|
exec_ctx, glb_policy->response_generator, lb_channel_args);
|
1961
1956
|
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
|
1962
1957
|
gpr_free(uri_str);
|
1963
|
-
if (glb_policy->lb_channel ==
|
1964
|
-
gpr_free((void
|
1958
|
+
if (glb_policy->lb_channel == nullptr) {
|
1959
|
+
gpr_free((void*)glb_policy->server_name);
|
1965
1960
|
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
|
1966
1961
|
gpr_free(glb_policy);
|
1967
|
-
return
|
1962
|
+
return nullptr;
|
1968
1963
|
}
|
1969
1964
|
grpc_subchannel_index_ref();
|
1970
1965
|
GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
|
@@ -1976,16 +1971,16 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
1976
1971
|
return &glb_policy->base;
|
1977
1972
|
}
|
1978
1973
|
|
1979
|
-
static void glb_factory_ref(grpc_lb_policy_factory
|
1974
|
+
static void glb_factory_ref(grpc_lb_policy_factory* factory) {}
|
1980
1975
|
|
1981
|
-
static void glb_factory_unref(grpc_lb_policy_factory
|
1976
|
+
static void glb_factory_unref(grpc_lb_policy_factory* factory) {}
|
1982
1977
|
|
1983
1978
|
static const grpc_lb_policy_factory_vtable glb_factory_vtable = {
|
1984
1979
|
glb_factory_ref, glb_factory_unref, glb_create, "grpclb"};
|
1985
1980
|
|
1986
1981
|
static grpc_lb_policy_factory glb_lb_policy_factory = {&glb_factory_vtable};
|
1987
1982
|
|
1988
|
-
grpc_lb_policy_factory
|
1983
|
+
grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
|
1989
1984
|
return &glb_lb_policy_factory;
|
1990
1985
|
}
|
1991
1986
|
|
@@ -1993,29 +1988,25 @@ grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
|
|
1993
1988
|
|
1994
1989
|
// Only add client_load_reporting filter if the grpclb LB policy is used.
|
1995
1990
|
static bool maybe_add_client_load_reporting_filter(
|
1996
|
-
grpc_exec_ctx
|
1997
|
-
const grpc_channel_args
|
1991
|
+
grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
|
1992
|
+
const grpc_channel_args* args =
|
1998
1993
|
grpc_channel_stack_builder_get_channel_arguments(builder);
|
1999
|
-
const grpc_arg
|
1994
|
+
const grpc_arg* channel_arg =
|
2000
1995
|
grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
|
2001
|
-
if (channel_arg !=
|
1996
|
+
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_STRING &&
|
2002
1997
|
strcmp(channel_arg->value.string, "grpclb") == 0) {
|
2003
1998
|
return grpc_channel_stack_builder_append_filter(
|
2004
|
-
builder, (const grpc_channel_filter
|
1999
|
+
builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
|
2005
2000
|
}
|
2006
2001
|
return true;
|
2007
2002
|
}
|
2008
2003
|
|
2009
|
-
void grpc_lb_policy_grpclb_init() {
|
2004
|
+
extern "C" void grpc_lb_policy_grpclb_init() {
|
2010
2005
|
grpc_register_lb_policy(grpc_glb_lb_factory_create());
|
2011
|
-
grpc_register_tracer(&grpc_lb_glb_trace);
|
2012
|
-
#ifndef NDEBUG
|
2013
|
-
grpc_register_tracer(&grpc_trace_lb_policy_refcount);
|
2014
|
-
#endif
|
2015
2006
|
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
|
2016
2007
|
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
|
2017
2008
|
maybe_add_client_load_reporting_filter,
|
2018
|
-
(void
|
2009
|
+
(void*)&grpc_client_load_reporting_filter);
|
2019
2010
|
}
|
2020
2011
|
|
2021
|
-
void grpc_lb_policy_grpclb_shutdown() {}
|
2012
|
+
extern "C" void grpc_lb_policy_grpclb_shutdown() {}
|