grpc 1.6.7 → 1.7.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +579 -77
- data/include/grpc/byte_buffer.h +1 -63
- data/include/grpc/compression.h +27 -5
- data/include/grpc/fork.h +24 -0
- data/include/grpc/grpc.h +12 -6
- data/include/grpc/grpc_security.h +28 -7
- data/include/grpc/impl/codegen/atm.h +1 -0
- data/include/grpc/impl/codegen/byte_buffer.h +86 -0
- data/include/grpc/impl/codegen/compression_types.h +63 -5
- data/include/grpc/impl/codegen/fork.h +48 -0
- data/include/grpc/impl/codegen/grpc_types.h +26 -9
- data/include/grpc/impl/codegen/port_platform.h +11 -4
- data/include/grpc/impl/codegen/slice.h +6 -1
- data/include/grpc/impl/codegen/sync.h +3 -1
- data/include/grpc/impl/codegen/sync_custom.h +36 -0
- data/include/grpc/module.modulemap +75 -3
- data/include/grpc/slice.h +1 -5
- data/include/grpc/support/sync_custom.h +24 -0
- data/src/core/ext/census/base_resources.c +14 -14
- data/src/core/ext/census/context.c +7 -5
- data/src/core/ext/census/grpc_filter.c +12 -14
- data/src/core/ext/census/mlog.c +2 -1
- data/src/core/ext/census/resource.c +13 -9
- data/src/core/ext/filters/client_channel/channel_connectivity.c +15 -8
- data/src/core/ext/filters/client_channel/client_channel.c +418 -439
- data/src/core/ext/filters/client_channel/client_channel_factory.c +4 -5
- data/src/core/ext/filters/client_channel/client_channel_plugin.c +2 -2
- data/src/core/ext/filters/client_channel/http_connect_handshaker.c +7 -5
- data/src/core/ext/filters/client_channel/http_proxy.c +17 -21
- data/src/core/ext/filters/client_channel/lb_policy.c +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +7 -7
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +371 -257
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +7 -5
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +25 -14
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +16 -16
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +33 -28
- data/src/core/ext/filters/client_channel/lb_policy_factory.c +10 -8
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -1
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.c +1 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +7 -6
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +62 -28
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +29 -23
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c +25 -14
- data/src/core/ext/filters/client_channel/retry_throttle.c +9 -6
- data/src/core/ext/filters/client_channel/subchannel.c +30 -30
- data/src/core/ext/filters/client_channel/subchannel.h +1 -4
- data/src/core/ext/filters/client_channel/subchannel_index.c +31 -15
- data/src/core/ext/filters/client_channel/subchannel_index.h +7 -0
- data/src/core/ext/filters/client_channel/uri_parser.c +4 -3
- data/src/core/ext/filters/deadline/deadline_filter.c +78 -39
- data/src/core/ext/filters/deadline/deadline_filter.h +7 -1
- data/src/core/ext/filters/http/client/http_client_filter.c +14 -14
- data/src/core/ext/filters/http/http_filters_plugin.c +1 -1
- data/src/core/ext/filters/http/message_compress/message_compress_filter.c +240 -175
- data/src/core/ext/filters/http/server/http_server_filter.c +48 -36
- data/src/core/ext/filters/load_reporting/{load_reporting_filter.c → server_load_reporting_filter.c} +11 -12
- data/src/core/ext/filters/load_reporting/{load_reporting_filter.h → server_load_reporting_filter.h} +6 -5
- data/src/core/ext/filters/load_reporting/{load_reporting.c → server_load_reporting_plugin.c} +19 -13
- data/src/core/ext/filters/load_reporting/{load_reporting.h → server_load_reporting_plugin.h} +4 -3
- data/src/core/ext/filters/max_age/max_age_filter.c +2 -3
- data/src/core/ext/filters/message_size/message_size_filter.c +4 -2
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +0 -1
- data/src/core/ext/transport/chttp2/client/chttp2_connector.c +5 -5
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
- data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c +1 -1
- data/src/core/ext/transport/chttp2/server/chttp2_server.c +20 -18
- data/src/core/ext/transport/chttp2/transport/chttp2_plugin.c +1 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +493 -210
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +1 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.c +9 -8
- data/src/core/ext/transport/chttp2/transport/frame_data.c +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_goaway.c +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_ping.c +5 -4
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.c +10 -9
- data/src/core/ext/transport/chttp2/transport/frame_window_update.c +9 -5
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +62 -41
- data/src/core/ext/transport/chttp2/transport/hpack_parser.c +52 -8
- data/src/core/ext/transport/chttp2/transport/hpack_table.c +2 -2
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.c +3 -2
- data/src/core/ext/transport/chttp2/transport/internal.h +60 -30
- data/src/core/ext/transport/chttp2/transport/parsing.c +16 -5
- data/src/core/ext/transport/chttp2/transport/stream_lists.c +36 -16
- data/src/core/ext/transport/chttp2/transport/stream_map.c +6 -4
- data/src/core/ext/transport/chttp2/transport/writing.c +133 -105
- data/src/core/ext/transport/inproc/inproc_transport.c +61 -65
- data/src/core/lib/channel/channel_args.c +112 -12
- data/src/core/lib/channel/channel_args.h +31 -0
- data/src/core/lib/channel/channel_stack.c +1 -15
- data/src/core/lib/channel/channel_stack.h +3 -10
- data/src/core/lib/channel/channel_stack_builder.c +41 -10
- data/src/core/lib/channel/channel_stack_builder.h +10 -0
- data/src/core/lib/channel/connected_channel.c +94 -23
- data/src/core/lib/channel/handshaker.c +8 -6
- data/src/core/lib/channel/handshaker_registry.c +1 -1
- data/src/core/lib/compression/algorithm_metadata.h +14 -0
- data/src/core/lib/compression/compression.c +101 -1
- data/src/core/lib/compression/stream_compression.c +32 -146
- data/src/core/lib/compression/stream_compression.h +28 -4
- data/src/core/lib/compression/stream_compression_gzip.c +228 -0
- data/src/core/lib/{iomgr/ev_epoll_thread_pool_linux.h → compression/stream_compression_gzip.h} +5 -7
- data/src/core/lib/compression/stream_compression_identity.c +94 -0
- data/src/core/lib/{iomgr/ev_epoll_limited_pollers_linux.h → compression/stream_compression_identity.h} +7 -8
- data/src/core/lib/debug/stats.c +174 -0
- data/src/core/lib/debug/stats.h +61 -0
- data/src/core/lib/debug/stats_data.c +687 -0
- data/src/core/lib/debug/stats_data.h +470 -0
- data/src/core/lib/debug/trace.c +3 -3
- data/src/core/lib/debug/trace.h +1 -1
- data/src/core/lib/http/format_request.c +1 -1
- data/src/core/lib/http/httpcli.c +8 -7
- data/src/core/lib/http/httpcli_security_connector.c +2 -1
- data/src/core/lib/http/parser.c +4 -3
- data/src/core/lib/iomgr/call_combiner.c +202 -0
- data/src/core/lib/iomgr/call_combiner.h +121 -0
- data/src/core/lib/iomgr/closure.c +18 -4
- data/src/core/lib/iomgr/combiner.c +11 -4
- data/src/core/lib/iomgr/error.c +26 -24
- data/src/core/lib/iomgr/ev_epoll1_linux.c +395 -212
- data/src/core/lib/iomgr/ev_epollex_linux.c +141 -128
- data/src/core/lib/iomgr/ev_epollsig_linux.c +44 -41
- data/src/core/lib/iomgr/ev_poll_posix.c +99 -75
- data/src/core/lib/iomgr/ev_posix.c +5 -9
- data/src/core/lib/iomgr/ev_posix.h +1 -1
- data/src/core/lib/iomgr/exec_ctx.h +6 -1
- data/src/core/lib/iomgr/executor.c +142 -36
- data/src/core/lib/iomgr/executor.h +6 -1
- data/src/core/lib/iomgr/fork_posix.c +88 -0
- data/src/core/lib/iomgr/fork_windows.c +39 -0
- data/src/core/lib/iomgr/iocp_windows.c +2 -0
- data/src/core/lib/iomgr/iomgr.c +2 -8
- data/src/core/lib/iomgr/is_epollexclusive_available.c +6 -6
- data/src/core/lib/iomgr/load_file.c +2 -1
- data/src/core/lib/iomgr/polling_entity.c +9 -9
- data/src/core/lib/iomgr/polling_entity.h +7 -1
- data/src/core/lib/iomgr/pollset.h +1 -1
- data/src/core/lib/iomgr/pollset_uv.c +1 -1
- data/src/core/lib/iomgr/pollset_windows.c +3 -3
- data/src/core/lib/iomgr/port.h +4 -0
- data/src/core/lib/iomgr/resolve_address_posix.c +8 -7
- data/src/core/lib/iomgr/resolve_address_windows.c +1 -1
- data/src/core/lib/iomgr/resource_quota.c +24 -19
- data/src/core/lib/iomgr/socket_factory_posix.c +4 -4
- data/src/core/lib/iomgr/socket_mutator.c +4 -4
- data/src/core/lib/iomgr/socket_utils_windows.c +0 -4
- data/src/core/lib/iomgr/tcp_client_posix.c +5 -4
- data/src/core/lib/iomgr/tcp_posix.c +181 -20
- data/src/core/lib/iomgr/tcp_server_posix.c +8 -7
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.c +1 -1
- data/src/core/lib/iomgr/timer.h +4 -0
- data/src/core/lib/iomgr/timer_generic.c +138 -3
- data/src/core/lib/iomgr/timer_generic.h +3 -0
- data/src/core/lib/iomgr/timer_heap.c +4 -4
- data/src/core/lib/iomgr/timer_manager.c +2 -2
- data/src/core/lib/iomgr/timer_uv.c +2 -0
- data/src/core/lib/iomgr/udp_server.c +10 -8
- data/src/core/lib/iomgr/unix_sockets_posix.c +4 -2
- data/src/core/lib/iomgr/wakeup_fd_cv.c +9 -8
- data/src/core/lib/iomgr/wakeup_fd_cv.h +2 -2
- data/src/core/lib/json/json.c +1 -1
- data/src/core/lib/json/json_string.c +13 -13
- data/src/core/lib/profiling/timers.h +18 -8
- data/src/core/lib/security/credentials/composite/composite_credentials.c +4 -10
- data/src/core/lib/security/credentials/google_default/google_default_credentials.c +2 -1
- data/src/core/lib/security/credentials/jwt/jwt_verifier.c +11 -6
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +4 -4
- data/src/core/lib/security/credentials/plugin/plugin_credentials.c +132 -50
- data/src/core/lib/security/credentials/plugin/plugin_credentials.h +2 -0
- data/src/core/lib/security/transport/client_auth_filter.c +68 -135
- data/src/core/lib/security/transport/secure_endpoint.c +110 -90
- data/src/core/lib/security/transport/secure_endpoint.h +8 -3
- data/src/core/lib/security/transport/security_connector.c +10 -12
- data/src/core/lib/security/transport/security_handshaker.c +45 -24
- data/src/core/lib/security/transport/server_auth_filter.c +71 -20
- data/src/core/lib/slice/b64.c +2 -2
- data/src/core/lib/slice/slice.c +16 -14
- data/src/core/lib/slice/slice_buffer.c +5 -4
- data/src/core/lib/slice/slice_hash_table.c +3 -2
- data/src/core/lib/slice/slice_intern.c +8 -5
- data/src/core/lib/support/block_annotate.h +22 -0
- data/src/core/lib/support/fork.c +62 -0
- data/src/core/lib/support/fork.h +35 -0
- data/src/core/lib/support/log_linux.c +1 -1
- data/src/core/lib/support/string.c +15 -1
- data/src/core/lib/support/string.h +3 -0
- data/src/core/lib/support/thd_internal.h +6 -0
- data/src/core/lib/support/thd_posix.c +56 -0
- data/src/core/lib/support/thd_windows.c +2 -0
- data/src/core/lib/surface/alarm.c +22 -15
- data/src/core/lib/surface/byte_buffer.c +4 -2
- data/src/core/lib/surface/call.c +442 -141
- data/src/core/lib/surface/call.h +6 -6
- data/src/core/lib/surface/call_log_batch.c +1 -1
- data/src/core/lib/surface/call_test_only.h +12 -0
- data/src/core/lib/surface/channel.c +39 -4
- data/src/core/lib/surface/channel_init.c +6 -6
- data/src/core/lib/surface/channel_ping.c +2 -2
- data/src/core/lib/surface/completion_queue.c +56 -57
- data/src/core/lib/surface/init.c +17 -3
- data/src/core/lib/surface/init_secure.c +5 -1
- data/src/core/lib/surface/lame_client.cc +9 -10
- data/src/core/lib/surface/server.c +81 -72
- data/src/core/lib/surface/version.c +2 -2
- data/src/core/lib/transport/byte_stream.c +1 -0
- data/src/core/lib/transport/byte_stream.h +3 -1
- data/src/core/lib/transport/connectivity_state.c +2 -1
- data/src/core/lib/transport/metadata.c +7 -4
- data/src/core/lib/transport/metadata_batch.c +18 -16
- data/src/core/lib/transport/metadata_batch.h +1 -0
- data/src/core/lib/transport/service_config.c +5 -3
- data/src/core/lib/transport/static_metadata.c +395 -614
- data/src/core/lib/transport/static_metadata.h +165 -133
- data/src/core/lib/transport/status_conversion.c +1 -1
- data/src/core/lib/transport/transport.c +20 -20
- data/src/core/lib/transport/transport.h +8 -5
- data/src/core/lib/transport/transport_impl.h +0 -3
- data/src/core/lib/transport/transport_op_string.c +8 -1
- data/src/core/plugin_registry/grpc_plugin_registry.c +4 -4
- data/src/core/tsi/fake_transport_security.c +133 -2
- data/src/core/tsi/fake_transport_security.h +5 -0
- data/src/core/tsi/ssl_transport_security.c +105 -8
- data/src/core/tsi/ssl_transport_security.h +30 -7
- data/src/core/tsi/transport_security.h +8 -2
- data/src/core/tsi/transport_security_grpc.c +20 -13
- data/src/core/tsi/transport_security_grpc.h +13 -9
- data/src/ruby/ext/grpc/rb_call_credentials.c +6 -2
- data/src/ruby/ext/grpc/rb_grpc.c +1 -1
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +30 -20
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +50 -35
- data/src/ruby/lib/grpc.rb +1 -0
- data/src/ruby/lib/grpc/generic/active_call.rb +34 -9
- data/src/ruby/lib/grpc/generic/bidi_call.rb +19 -10
- data/src/ruby/lib/grpc/generic/client_stub.rb +95 -38
- data/src/ruby/lib/grpc/generic/interceptor_registry.rb +53 -0
- data/src/ruby/lib/grpc/generic/interceptors.rb +186 -0
- data/src/ruby/lib/grpc/generic/rpc_desc.rb +66 -20
- data/src/ruby/lib/grpc/generic/rpc_server.rb +15 -3
- data/src/ruby/lib/grpc/google_rpc_status_utils.rb +1 -2
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb +1 -0
- data/src/ruby/spec/channel_connection_spec.rb +1 -34
- data/src/ruby/spec/client_server_spec.rb +188 -82
- data/src/ruby/spec/generic/active_call_spec.rb +65 -11
- data/src/ruby/spec/generic/client_interceptors_spec.rb +153 -0
- data/src/ruby/spec/generic/interceptor_registry_spec.rb +65 -0
- data/src/ruby/spec/generic/rpc_desc_spec.rb +38 -0
- data/src/ruby/spec/generic/rpc_server_spec.rb +1 -34
- data/src/ruby/spec/generic/server_interceptors_spec.rb +218 -0
- data/src/ruby/spec/spec_helper.rb +4 -0
- data/src/ruby/spec/support/helpers.rb +73 -0
- data/src/ruby/spec/support/services.rb +147 -0
- data/third_party/cares/ares_build.h +21 -62
- data/third_party/cares/cares/ares.h +23 -1
- data/third_party/cares/cares/ares__close_sockets.c +2 -2
- data/third_party/cares/cares/ares_create_query.c +3 -3
- data/third_party/cares/cares/ares_expand_name.c +6 -2
- data/third_party/cares/cares/ares_expand_string.c +1 -1
- data/third_party/cares/cares/ares_getnameinfo.c +27 -7
- data/third_party/cares/cares/ares_init.c +407 -39
- data/third_party/cares/cares/ares_library_init.c +10 -0
- data/third_party/cares/cares/ares_library_init.h +2 -1
- data/third_party/cares/cares/ares_nowarn.c +6 -6
- data/third_party/cares/cares/ares_nowarn.h +2 -2
- data/third_party/cares/cares/ares_parse_naptr_reply.c +6 -1
- data/third_party/cares/cares/ares_private.h +11 -0
- data/third_party/cares/cares/ares_process.c +126 -37
- data/third_party/cares/cares/ares_version.h +2 -2
- data/third_party/cares/cares/ares_writev.c +2 -2
- data/third_party/cares/cares/config-win32.h +8 -34
- data/third_party/cares/cares/inet_net_pton.c +2 -2
- data/third_party/cares/cares/setup_once.h +5 -5
- data/third_party/cares/config_darwin/ares_config.h +98 -196
- data/third_party/cares/config_linux/ares_config.h +103 -203
- metadata +47 -20
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +0 -1957
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +0 -1182
@@ -43,14 +43,13 @@ grpc_channel* grpc_client_channel_factory_create_channel(
|
|
43
43
|
}
|
44
44
|
|
45
45
|
static void* factory_arg_copy(void* factory) {
|
46
|
-
grpc_client_channel_factory_ref(factory);
|
46
|
+
grpc_client_channel_factory_ref((grpc_client_channel_factory*)factory);
|
47
47
|
return factory;
|
48
48
|
}
|
49
49
|
|
50
50
|
static void factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* factory) {
|
51
|
-
|
52
|
-
|
53
|
-
grpc_client_channel_factory_unref(exec_ctx, factory);
|
51
|
+
grpc_client_channel_factory_unref(exec_ctx,
|
52
|
+
(grpc_client_channel_factory*)factory);
|
54
53
|
}
|
55
54
|
|
56
55
|
static int factory_arg_cmp(void* factory1, void* factory2) {
|
@@ -64,6 +63,6 @@ static const grpc_arg_pointer_vtable factory_arg_vtable = {
|
|
64
63
|
|
65
64
|
grpc_arg grpc_client_channel_factory_create_channel_arg(
|
66
65
|
grpc_client_channel_factory* factory) {
|
67
|
-
return grpc_channel_arg_pointer_create(GRPC_ARG_CLIENT_CHANNEL_FACTORY,
|
66
|
+
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY,
|
68
67
|
factory, &factory_arg_vtable);
|
69
68
|
}
|
@@ -54,8 +54,8 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx,
|
|
54
54
|
char *default_authority = grpc_get_default_authority(
|
55
55
|
exec_ctx, grpc_channel_stack_builder_get_target(builder));
|
56
56
|
if (default_authority != NULL) {
|
57
|
-
grpc_arg arg = grpc_channel_arg_string_create(
|
58
|
-
|
57
|
+
grpc_arg arg = grpc_channel_arg_string_create(
|
58
|
+
(char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
|
59
59
|
grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
|
60
60
|
grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
|
61
61
|
new_args);
|
@@ -124,7 +124,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
|
|
124
124
|
// Callback invoked when finished writing HTTP CONNECT request.
|
125
125
|
static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
|
126
126
|
grpc_error* error) {
|
127
|
-
http_connect_handshaker* handshaker = arg;
|
127
|
+
http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
|
128
128
|
gpr_mu_lock(&handshaker->mu);
|
129
129
|
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
|
130
130
|
// If the write failed or we're shutting down, clean up and invoke the
|
@@ -145,7 +145,7 @@ static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
|
|
145
145
|
// Callback invoked for reading HTTP CONNECT response.
|
146
146
|
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
|
147
147
|
grpc_error* error) {
|
148
|
-
http_connect_handshaker* handshaker = arg;
|
148
|
+
http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
|
149
149
|
gpr_mu_lock(&handshaker->mu);
|
150
150
|
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
|
151
151
|
// If the read failed or we're shutting down, clean up and invoke the
|
@@ -281,7 +281,8 @@ static void http_connect_handshaker_do_handshake(
|
|
281
281
|
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
|
282
282
|
gpr_string_split(arg->value.string, "\n", &header_strings,
|
283
283
|
&num_header_strings);
|
284
|
-
headers = gpr_malloc(sizeof(grpc_http_header) *
|
284
|
+
headers = (grpc_http_header*)gpr_malloc(sizeof(grpc_http_header) *
|
285
|
+
num_header_strings);
|
285
286
|
for (size_t i = 0; i < num_header_strings; ++i) {
|
286
287
|
char* sep = strchr(header_strings[i], ':');
|
287
288
|
if (sep == NULL) {
|
@@ -308,7 +309,7 @@ static void http_connect_handshaker_do_handshake(
|
|
308
309
|
grpc_httpcli_request request;
|
309
310
|
memset(&request, 0, sizeof(request));
|
310
311
|
request.host = server_name;
|
311
|
-
request.http.method = "CONNECT";
|
312
|
+
request.http.method = (char*)"CONNECT";
|
312
313
|
request.http.path = server_name;
|
313
314
|
request.http.hdrs = headers;
|
314
315
|
request.http.hdr_count = num_headers;
|
@@ -333,7 +334,8 @@ static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
|
|
333
334
|
http_connect_handshaker_do_handshake};
|
334
335
|
|
335
336
|
static grpc_handshaker* grpc_http_connect_handshaker_create() {
|
336
|
-
http_connect_handshaker* handshaker =
|
337
|
+
http_connect_handshaker* handshaker =
|
338
|
+
(http_connect_handshaker*)gpr_malloc(sizeof(*handshaker));
|
337
339
|
memset(handshaker, 0, sizeof(*handshaker));
|
338
340
|
grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
|
339
341
|
gpr_mu_init(&handshaker->mu);
|
@@ -44,6 +44,8 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
|
|
44
44
|
GPR_ASSERT(user_cred != NULL);
|
45
45
|
char* proxy_name = NULL;
|
46
46
|
char* uri_str = gpr_getenv("http_proxy");
|
47
|
+
char** authority_strs = NULL;
|
48
|
+
size_t authority_nstrs;
|
47
49
|
if (uri_str == NULL) return NULL;
|
48
50
|
grpc_uri* uri =
|
49
51
|
grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */);
|
@@ -56,8 +58,6 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
|
|
56
58
|
goto done;
|
57
59
|
}
|
58
60
|
/* Split on '@' to separate user credentials from host */
|
59
|
-
char** authority_strs = NULL;
|
60
|
-
size_t authority_nstrs;
|
61
61
|
gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs);
|
62
62
|
GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */
|
63
63
|
if (authority_nstrs == 1) {
|
@@ -91,6 +91,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
|
|
91
91
|
char* user_cred = NULL;
|
92
92
|
*name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
|
93
93
|
if (*name_to_resolve == NULL) return false;
|
94
|
+
char* no_proxy_str = NULL;
|
94
95
|
grpc_uri* uri =
|
95
96
|
grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
|
96
97
|
if (uri == NULL || uri->path[0] == '\0') {
|
@@ -98,20 +99,14 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
|
|
98
99
|
"'http_proxy' environment variable set, but cannot "
|
99
100
|
"parse server URI '%s' -- not using proxy",
|
100
101
|
server_uri);
|
101
|
-
|
102
|
-
gpr_free(user_cred);
|
103
|
-
grpc_uri_destroy(uri);
|
104
|
-
}
|
105
|
-
return false;
|
102
|
+
goto no_use_proxy;
|
106
103
|
}
|
107
104
|
if (strcmp(uri->scheme, "unix") == 0) {
|
108
105
|
gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
|
109
106
|
server_uri);
|
110
|
-
|
111
|
-
grpc_uri_destroy(uri);
|
112
|
-
return false;
|
107
|
+
goto no_use_proxy;
|
113
108
|
}
|
114
|
-
|
109
|
+
no_proxy_str = gpr_getenv("no_proxy");
|
115
110
|
if (no_proxy_str != NULL) {
|
116
111
|
static const char* NO_PROXY_SEPARATOR = ",";
|
117
112
|
bool use_proxy = true;
|
@@ -147,17 +142,12 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
|
|
147
142
|
gpr_free(no_proxy_hosts);
|
148
143
|
gpr_free(server_host);
|
149
144
|
gpr_free(server_port);
|
150
|
-
if (!use_proxy)
|
151
|
-
grpc_uri_destroy(uri);
|
152
|
-
gpr_free(*name_to_resolve);
|
153
|
-
*name_to_resolve = NULL;
|
154
|
-
return false;
|
155
|
-
}
|
145
|
+
if (!use_proxy) goto no_use_proxy;
|
156
146
|
}
|
157
147
|
}
|
158
148
|
grpc_arg args_to_add[2];
|
159
149
|
args_to_add[0] = grpc_channel_arg_string_create(
|
160
|
-
GRPC_ARG_HTTP_CONNECT_SERVER,
|
150
|
+
(char*)GRPC_ARG_HTTP_CONNECT_SERVER,
|
161
151
|
uri->path[0] == '/' ? uri->path + 1 : uri->path);
|
162
152
|
if (user_cred != NULL) {
|
163
153
|
/* Use base64 encoding for user credentials as stated in RFC 7617 */
|
@@ -166,16 +156,22 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
|
|
166
156
|
char* header;
|
167
157
|
gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
|
168
158
|
gpr_free(encoded_user_cred);
|
169
|
-
args_to_add[1] =
|
170
|
-
|
159
|
+
args_to_add[1] = grpc_channel_arg_string_create(
|
160
|
+
(char*)GRPC_ARG_HTTP_CONNECT_HEADERS, header);
|
171
161
|
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
|
172
162
|
gpr_free(header);
|
173
163
|
} else {
|
174
164
|
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
|
175
165
|
}
|
176
|
-
gpr_free(user_cred);
|
177
166
|
grpc_uri_destroy(uri);
|
167
|
+
gpr_free(user_cred);
|
178
168
|
return true;
|
169
|
+
no_use_proxy:
|
170
|
+
if (uri != NULL) grpc_uri_destroy(uri);
|
171
|
+
gpr_free(*name_to_resolve);
|
172
|
+
*name_to_resolve = NULL;
|
173
|
+
gpr_free(user_cred);
|
174
|
+
return false;
|
179
175
|
}
|
180
176
|
|
181
177
|
static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
|
@@ -67,7 +67,7 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
|
|
67
67
|
|
68
68
|
static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
69
69
|
grpc_error *error) {
|
70
|
-
grpc_lb_policy *policy = arg;
|
70
|
+
grpc_lb_policy *policy = (grpc_lb_policy *)arg;
|
71
71
|
policy->vtable->shutdown_locked(exec_ctx, policy);
|
72
72
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
|
73
73
|
}
|
@@ -49,7 +49,7 @@ typedef struct {
|
|
49
49
|
|
50
50
|
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
|
51
51
|
grpc_error *error) {
|
52
|
-
call_data *calld = arg;
|
52
|
+
call_data *calld = (call_data *)arg;
|
53
53
|
if (error == GRPC_ERROR_NONE) {
|
54
54
|
calld->send_initial_metadata_succeeded = true;
|
55
55
|
}
|
@@ -59,7 +59,7 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
|
|
59
59
|
|
60
60
|
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
|
61
61
|
grpc_error *error) {
|
62
|
-
call_data *calld = arg;
|
62
|
+
call_data *calld = (call_data *)arg;
|
63
63
|
if (error == GRPC_ERROR_NONE) {
|
64
64
|
calld->recv_initial_metadata_succeeded = true;
|
65
65
|
}
|
@@ -70,12 +70,13 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
|
|
70
70
|
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
|
71
71
|
grpc_call_element *elem,
|
72
72
|
const grpc_call_element_args *args) {
|
73
|
-
call_data *calld = elem->call_data;
|
73
|
+
call_data *calld = (call_data *)elem->call_data;
|
74
74
|
// Get stats object from context and take a ref.
|
75
75
|
GPR_ASSERT(args->context != NULL);
|
76
76
|
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
|
77
77
|
calld->client_stats = grpc_grpclb_client_stats_ref(
|
78
|
-
args->context[GRPC_GRPCLB_CLIENT_STATS]
|
78
|
+
(grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
|
79
|
+
.value);
|
79
80
|
// Record call started.
|
80
81
|
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
|
81
82
|
return GRPC_ERROR_NONE;
|
@@ -84,7 +85,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
|
|
84
85
|
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
85
86
|
const grpc_call_final_info *final_info,
|
86
87
|
grpc_closure *ignored) {
|
87
|
-
call_data *calld = elem->call_data;
|
88
|
+
call_data *calld = (call_data *)elem->call_data;
|
88
89
|
// Record call finished, optionally setting client_failed_to_send and
|
89
90
|
// received.
|
90
91
|
grpc_grpclb_client_stats_add_call_finished(
|
@@ -98,7 +99,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
|
98
99
|
static void start_transport_stream_op_batch(
|
99
100
|
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
100
101
|
grpc_transport_stream_op_batch *batch) {
|
101
|
-
call_data *calld = elem->call_data;
|
102
|
+
call_data *calld = (call_data *)elem->call_data;
|
102
103
|
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
|
103
104
|
// Intercept send_initial_metadata.
|
104
105
|
if (batch->send_initial_metadata) {
|
@@ -132,6 +133,5 @@ const grpc_channel_filter grpc_client_load_reporting_filter = {
|
|
132
133
|
0, // sizeof(channel_data)
|
133
134
|
init_channel_elem,
|
134
135
|
destroy_channel_elem,
|
135
|
-
grpc_call_next_get_peer,
|
136
136
|
grpc_channel_next_get_info,
|
137
137
|
"client_load_reporting"};
|
@@ -101,6 +101,7 @@
|
|
101
101
|
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
|
102
102
|
#include "src/core/ext/filters/client_channel/parse_address.h"
|
103
103
|
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
|
104
|
+
#include "src/core/ext/filters/client_channel/subchannel_index.h"
|
104
105
|
#include "src/core/lib/channel/channel_args.h"
|
105
106
|
#include "src/core/lib/channel/channel_stack.h"
|
106
107
|
#include "src/core/lib/iomgr/combiner.h"
|
@@ -122,6 +123,7 @@
|
|
122
123
|
#define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
|
123
124
|
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
|
124
125
|
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
|
126
|
+
#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000
|
125
127
|
|
126
128
|
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb");
|
127
129
|
|
@@ -137,7 +139,7 @@ static grpc_error *initial_metadata_add_lb_token(
|
|
137
139
|
}
|
138
140
|
|
139
141
|
static void destroy_client_stats(void *arg) {
|
140
|
-
grpc_grpclb_client_stats_unref(arg);
|
142
|
+
grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats *)arg);
|
141
143
|
}
|
142
144
|
|
143
145
|
typedef struct wrapped_rr_closure_arg {
|
@@ -181,7 +183,7 @@ typedef struct wrapped_rr_closure_arg {
|
|
181
183
|
* order to unref the round robin instance upon its invocation */
|
182
184
|
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
|
183
185
|
grpc_error *error) {
|
184
|
-
wrapped_rr_closure_arg *wc_arg = arg;
|
186
|
+
wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
|
185
187
|
|
186
188
|
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
|
187
189
|
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
|
@@ -245,7 +247,7 @@ static void add_pending_pick(pending_pick **root,
|
|
245
247
|
grpc_connected_subchannel **target,
|
246
248
|
grpc_call_context_element *context,
|
247
249
|
grpc_closure *on_complete) {
|
248
|
-
pending_pick *pp = gpr_zalloc(sizeof(*pp));
|
250
|
+
pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
|
249
251
|
pp->next = *root;
|
250
252
|
pp->pick_args = *pick_args;
|
251
253
|
pp->target = target;
|
@@ -271,7 +273,7 @@ typedef struct pending_ping {
|
|
271
273
|
} pending_ping;
|
272
274
|
|
273
275
|
static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
|
274
|
-
pending_ping *pping = gpr_zalloc(sizeof(*pping));
|
276
|
+
pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
|
275
277
|
pping->wrapped_notify_arg.wrapped_closure = notify;
|
276
278
|
pping->wrapped_notify_arg.free_when_done = pping;
|
277
279
|
pping->next = *root;
|
@@ -285,7 +287,7 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
|
|
285
287
|
* glb_lb_policy
|
286
288
|
*/
|
287
289
|
typedef struct rr_connectivity_data rr_connectivity_data;
|
288
|
-
|
290
|
+
|
289
291
|
typedef struct glb_lb_policy {
|
290
292
|
/** base policy: must be first */
|
291
293
|
grpc_lb_policy base;
|
@@ -298,6 +300,10 @@ typedef struct glb_lb_policy {
|
|
298
300
|
/** timeout in milliseconds for the LB call. 0 means no deadline. */
|
299
301
|
int lb_call_timeout_ms;
|
300
302
|
|
303
|
+
/** timeout in milliseconds for before using fallback backend addresses.
|
304
|
+
* 0 means not using fallback. */
|
305
|
+
int lb_fallback_timeout_ms;
|
306
|
+
|
301
307
|
/** for communicating with the LB server */
|
302
308
|
grpc_channel *lb_channel;
|
303
309
|
|
@@ -324,6 +330,9 @@ typedef struct glb_lb_policy {
|
|
324
330
|
* Otherwise, we delegate to the RR policy. */
|
325
331
|
size_t serverlist_index;
|
326
332
|
|
333
|
+
/** stores the backend addresses from the resolver */
|
334
|
+
grpc_lb_addresses *fallback_backend_addresses;
|
335
|
+
|
327
336
|
/** list of picks that are waiting on RR's policy connectivity */
|
328
337
|
pending_pick *pending_picks;
|
329
338
|
|
@@ -344,6 +353,9 @@ typedef struct glb_lb_policy {
|
|
344
353
|
/** is \a lb_call_retry_timer active? */
|
345
354
|
bool retry_timer_active;
|
346
355
|
|
356
|
+
/** is \a lb_fallback_timer active? */
|
357
|
+
bool fallback_timer_active;
|
358
|
+
|
347
359
|
/** called upon changes to the LB channel's connectivity. */
|
348
360
|
grpc_closure lb_channel_on_connectivity_changed;
|
349
361
|
|
@@ -353,9 +365,6 @@ typedef struct glb_lb_policy {
|
|
353
365
|
/************************************************************/
|
354
366
|
/* client data associated with the LB server communication */
|
355
367
|
/************************************************************/
|
356
|
-
/* Finished sending initial request. */
|
357
|
-
grpc_closure lb_on_sent_initial_request;
|
358
|
-
|
359
368
|
/* Status from the LB server has been received. This signals the end of the LB
|
360
369
|
* call. */
|
361
370
|
grpc_closure lb_on_server_status_received;
|
@@ -366,6 +375,9 @@ typedef struct glb_lb_policy {
|
|
366
375
|
/* LB call retry timer callback. */
|
367
376
|
grpc_closure lb_on_call_retry;
|
368
377
|
|
378
|
+
/* LB fallback timer callback. */
|
379
|
+
grpc_closure lb_on_fallback;
|
380
|
+
|
369
381
|
grpc_call *lb_call; /* streaming call to the LB server, */
|
370
382
|
|
371
383
|
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
|
@@ -389,7 +401,9 @@ typedef struct glb_lb_policy {
|
|
389
401
|
/** LB call retry timer */
|
390
402
|
grpc_timer lb_call_retry_timer;
|
391
403
|
|
392
|
-
|
404
|
+
/** LB fallback timer */
|
405
|
+
grpc_timer lb_fallback_timer;
|
406
|
+
|
393
407
|
bool seen_initial_response;
|
394
408
|
|
395
409
|
/* Stats for client-side load reporting. Should be unreffed and
|
@@ -535,6 +549,32 @@ static grpc_lb_addresses *process_serverlist_locked(
|
|
535
549
|
return lb_addresses;
|
536
550
|
}
|
537
551
|
|
552
|
+
/* Returns the backend addresses extracted from the given addresses */
|
553
|
+
static grpc_lb_addresses *extract_backend_addresses_locked(
|
554
|
+
grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) {
|
555
|
+
/* first pass: count the number of backend addresses */
|
556
|
+
size_t num_backends = 0;
|
557
|
+
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
558
|
+
if (!addresses->addresses[i].is_balancer) {
|
559
|
+
++num_backends;
|
560
|
+
}
|
561
|
+
}
|
562
|
+
/* second pass: actually populate the addresses and (empty) LB tokens */
|
563
|
+
grpc_lb_addresses *backend_addresses =
|
564
|
+
grpc_lb_addresses_create(num_backends, &lb_token_vtable);
|
565
|
+
size_t num_copied = 0;
|
566
|
+
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
567
|
+
if (addresses->addresses[i].is_balancer) continue;
|
568
|
+
const grpc_resolved_address *addr = &addresses->addresses[i].address;
|
569
|
+
grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr,
|
570
|
+
addr->len, false /* is_balancer */,
|
571
|
+
NULL /* balancer_name */,
|
572
|
+
(void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload);
|
573
|
+
++num_copied;
|
574
|
+
}
|
575
|
+
return backend_addresses;
|
576
|
+
}
|
577
|
+
|
538
578
|
static void update_lb_connectivity_status_locked(
|
539
579
|
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
|
540
580
|
grpc_connectivity_state rr_state, grpc_error *rr_state_error) {
|
@@ -602,35 +642,38 @@ static bool pick_from_internal_rr_locked(
|
|
602
642
|
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
|
603
643
|
const grpc_lb_policy_pick_args *pick_args, bool force_async,
|
604
644
|
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
|
605
|
-
//
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
// Not using the RR policy, so unref it.
|
613
|
-
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
614
|
-
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
|
615
|
-
(intptr_t)wc_arg->rr_policy);
|
645
|
+
// Check for drops if we are not using fallback backend addresses.
|
646
|
+
if (glb_policy->serverlist != NULL) {
|
647
|
+
// Look at the index into the serverlist to see if we should drop this call.
|
648
|
+
grpc_grpclb_server *server =
|
649
|
+
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
|
650
|
+
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
|
651
|
+
glb_policy->serverlist_index = 0; // Wrap-around.
|
616
652
|
}
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
653
|
+
if (server->drop) {
|
654
|
+
// Not using the RR policy, so unref it.
|
655
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
656
|
+
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
|
657
|
+
(intptr_t)wc_arg->rr_policy);
|
658
|
+
}
|
659
|
+
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
|
660
|
+
// Update client load reporting stats to indicate the number of
|
661
|
+
// dropped calls. Note that we have to do this here instead of in
|
662
|
+
// the client_load_reporting filter, because we do not create a
|
663
|
+
// subchannel call (and therefore no client_load_reporting filter)
|
664
|
+
// for dropped calls.
|
665
|
+
grpc_grpclb_client_stats_add_call_dropped_locked(
|
666
|
+
server->load_balance_token, wc_arg->client_stats);
|
667
|
+
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
|
668
|
+
if (force_async) {
|
669
|
+
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
|
670
|
+
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
|
671
|
+
gpr_free(wc_arg->free_when_done);
|
672
|
+
return false;
|
673
|
+
}
|
629
674
|
gpr_free(wc_arg->free_when_done);
|
630
|
-
return
|
675
|
+
return true;
|
631
676
|
}
|
632
|
-
gpr_free(wc_arg->free_when_done);
|
633
|
-
return true;
|
634
677
|
}
|
635
678
|
// Pick via the RR policy.
|
636
679
|
const bool pick_done = grpc_lb_policy_pick_locked(
|
@@ -668,10 +711,20 @@ static bool pick_from_internal_rr_locked(
|
|
668
711
|
|
669
712
|
static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
|
670
713
|
glb_lb_policy *glb_policy) {
|
671
|
-
grpc_lb_addresses *addresses
|
672
|
-
|
714
|
+
grpc_lb_addresses *addresses;
|
715
|
+
if (glb_policy->serverlist != NULL) {
|
716
|
+
GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
|
717
|
+
addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
|
718
|
+
} else {
|
719
|
+
// If rr_handover_locked() is invoked when we haven't received any
|
720
|
+
// serverlist from the balancer, we use the fallback backends returned by
|
721
|
+
// the resolver. Note that the fallback backend list may be empty, in which
|
722
|
+
// case the new round_robin policy will keep the requested picks pending.
|
723
|
+
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
|
724
|
+
addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses);
|
725
|
+
}
|
673
726
|
GPR_ASSERT(addresses != NULL);
|
674
|
-
grpc_lb_policy_args *args = gpr_zalloc(sizeof(*args));
|
727
|
+
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
|
675
728
|
args->client_channel_factory = glb_policy->cc_factory;
|
676
729
|
args->combiner = glb_policy->base.combiner;
|
677
730
|
// Replace the LB addresses in the channel args that we pass down to
|
@@ -727,7 +780,7 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
|
|
727
780
|
/* Allocate the data for the tracking of the new RR policy's connectivity.
|
728
781
|
* It'll be deallocated in glb_rr_connectivity_changed() */
|
729
782
|
rr_connectivity_data *rr_connectivity =
|
730
|
-
gpr_zalloc(sizeof(rr_connectivity_data));
|
783
|
+
(rr_connectivity_data *)gpr_zalloc(sizeof(rr_connectivity_data));
|
731
784
|
GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
|
732
785
|
glb_rr_connectivity_changed_locked, rr_connectivity,
|
733
786
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
@@ -775,8 +828,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
|
|
775
828
|
/* glb_policy->rr_policy may be NULL (initial handover) */
|
776
829
|
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
777
830
|
glb_lb_policy *glb_policy) {
|
778
|
-
GPR_ASSERT(glb_policy->serverlist != NULL &&
|
779
|
-
glb_policy->serverlist->num_servers > 0);
|
780
831
|
if (glb_policy->shutting_down) return;
|
781
832
|
grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy);
|
782
833
|
GPR_ASSERT(args != NULL);
|
@@ -798,7 +849,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
798
849
|
|
799
850
|
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
|
800
851
|
void *arg, grpc_error *error) {
|
801
|
-
rr_connectivity_data *rr_connectivity = arg;
|
852
|
+
rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
|
802
853
|
glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
|
803
854
|
if (glb_policy->shutting_down) {
|
804
855
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
@@ -841,8 +892,8 @@ static grpc_slice_hash_table_entry targets_info_entry_create(
|
|
841
892
|
}
|
842
893
|
|
843
894
|
static int balancer_name_cmp_fn(void *a, void *b) {
|
844
|
-
const char *a_str = a;
|
845
|
-
const char *b_str = b;
|
895
|
+
const char *a_str = (const char *)a;
|
896
|
+
const char *b_str = (const char *)b;
|
846
897
|
return strcmp(a_str, b_str);
|
847
898
|
}
|
848
899
|
|
@@ -869,7 +920,8 @@ static grpc_channel_args *build_lb_channel_args(
|
|
869
920
|
grpc_lb_addresses *lb_addresses =
|
870
921
|
grpc_lb_addresses_create(num_grpclb_addrs, NULL);
|
871
922
|
grpc_slice_hash_table_entry *targets_info_entries =
|
872
|
-
gpr_zalloc(sizeof(*targets_info_entries) *
|
923
|
+
(grpc_slice_hash_table_entry *)gpr_zalloc(sizeof(*targets_info_entries) *
|
924
|
+
num_grpclb_addrs);
|
873
925
|
|
874
926
|
size_t lb_addresses_idx = 0;
|
875
927
|
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
@@ -911,92 +963,6 @@ static grpc_channel_args *build_lb_channel_args(
|
|
911
963
|
return result;
|
912
964
|
}
|
913
965
|
|
914
|
-
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
|
915
|
-
void *arg,
|
916
|
-
grpc_error *error);
|
917
|
-
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
918
|
-
grpc_lb_policy_factory *factory,
|
919
|
-
grpc_lb_policy_args *args) {
|
920
|
-
/* Count the number of gRPC-LB addresses. There must be at least one.
|
921
|
-
* TODO(roth): For now, we ignore non-balancer addresses, but in the
|
922
|
-
* future, we may change the behavior such that we fall back to using
|
923
|
-
* the non-balancer addresses if we cannot reach any balancers. In the
|
924
|
-
* fallback case, we should use the LB policy indicated by
|
925
|
-
* GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
|
926
|
-
* unset, we should default to pick_first). */
|
927
|
-
const grpc_arg *arg =
|
928
|
-
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
|
929
|
-
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
|
930
|
-
return NULL;
|
931
|
-
}
|
932
|
-
grpc_lb_addresses *addresses = arg->value.pointer.p;
|
933
|
-
size_t num_grpclb_addrs = 0;
|
934
|
-
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
935
|
-
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
|
936
|
-
}
|
937
|
-
if (num_grpclb_addrs == 0) return NULL;
|
938
|
-
|
939
|
-
glb_lb_policy *glb_policy = gpr_zalloc(sizeof(*glb_policy));
|
940
|
-
|
941
|
-
/* Get server name. */
|
942
|
-
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
|
943
|
-
GPR_ASSERT(arg != NULL);
|
944
|
-
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
|
945
|
-
grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
|
946
|
-
GPR_ASSERT(uri->path[0] != '\0');
|
947
|
-
glb_policy->server_name =
|
948
|
-
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
|
949
|
-
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
950
|
-
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
|
951
|
-
glb_policy->server_name);
|
952
|
-
}
|
953
|
-
grpc_uri_destroy(uri);
|
954
|
-
|
955
|
-
glb_policy->cc_factory = args->client_channel_factory;
|
956
|
-
GPR_ASSERT(glb_policy->cc_factory != NULL);
|
957
|
-
|
958
|
-
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
|
959
|
-
glb_policy->lb_call_timeout_ms =
|
960
|
-
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
|
961
|
-
|
962
|
-
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
|
963
|
-
// since we use this to trigger the client_load_reporting filter.
|
964
|
-
grpc_arg new_arg =
|
965
|
-
grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb");
|
966
|
-
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
|
967
|
-
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
|
968
|
-
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
|
969
|
-
|
970
|
-
/* Create a client channel over them to communicate with a LB service */
|
971
|
-
glb_policy->response_generator =
|
972
|
-
grpc_fake_resolver_response_generator_create();
|
973
|
-
grpc_channel_args *lb_channel_args = build_lb_channel_args(
|
974
|
-
exec_ctx, addresses, glb_policy->response_generator, args->args);
|
975
|
-
char *uri_str;
|
976
|
-
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
|
977
|
-
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
|
978
|
-
exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
|
979
|
-
|
980
|
-
/* Propagate initial resolution */
|
981
|
-
grpc_fake_resolver_response_generator_set_response(
|
982
|
-
exec_ctx, glb_policy->response_generator, lb_channel_args);
|
983
|
-
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
|
984
|
-
gpr_free(uri_str);
|
985
|
-
if (glb_policy->lb_channel == NULL) {
|
986
|
-
gpr_free((void *)glb_policy->server_name);
|
987
|
-
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
|
988
|
-
gpr_free(glb_policy);
|
989
|
-
return NULL;
|
990
|
-
}
|
991
|
-
GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
|
992
|
-
glb_lb_channel_on_connectivity_changed_cb, glb_policy,
|
993
|
-
grpc_combiner_scheduler(args->combiner));
|
994
|
-
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
|
995
|
-
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
|
996
|
-
"grpclb");
|
997
|
-
return &glb_policy->base;
|
998
|
-
}
|
999
|
-
|
1000
966
|
static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
|
1001
967
|
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
|
1002
968
|
GPR_ASSERT(glb_policy->pending_picks == NULL);
|
@@ -1010,7 +976,11 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
|
|
1010
976
|
if (glb_policy->serverlist != NULL) {
|
1011
977
|
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
1012
978
|
}
|
979
|
+
if (glb_policy->fallback_backend_addresses != NULL) {
|
980
|
+
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
|
981
|
+
}
|
1013
982
|
grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
|
983
|
+
grpc_subchannel_index_unref();
|
1014
984
|
if (glb_policy->pending_update_args != NULL) {
|
1015
985
|
grpc_channel_args_destroy(exec_ctx, glb_policy->pending_update_args->args);
|
1016
986
|
gpr_free(glb_policy->pending_update_args);
|
@@ -1150,10 +1120,28 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx,
|
|
1150
1120
|
GRPC_ERROR_UNREF(error);
|
1151
1121
|
}
|
1152
1122
|
|
1123
|
+
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1124
|
+
grpc_error *error);
|
1153
1125
|
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
1154
1126
|
glb_lb_policy *glb_policy);
|
1155
1127
|
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
|
1156
1128
|
glb_lb_policy *glb_policy) {
|
1129
|
+
/* start a timer to fall back */
|
1130
|
+
if (glb_policy->lb_fallback_timeout_ms > 0 &&
|
1131
|
+
glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) {
|
1132
|
+
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
|
1133
|
+
gpr_timespec deadline = gpr_time_add(
|
1134
|
+
now,
|
1135
|
+
gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN));
|
1136
|
+
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
|
1137
|
+
GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
|
1138
|
+
glb_policy,
|
1139
|
+
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1140
|
+
glb_policy->fallback_timer_active = true;
|
1141
|
+
grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
|
1142
|
+
&glb_policy->lb_on_fallback, now);
|
1143
|
+
}
|
1144
|
+
|
1157
1145
|
glb_policy->started_picking = true;
|
1158
1146
|
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
|
1159
1147
|
query_for_backends_locked(exec_ctx, glb_policy);
|
@@ -1190,7 +1178,8 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1190
1178
|
}
|
1191
1179
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
|
1192
1180
|
|
1193
|
-
wrapped_rr_closure_arg *wc_arg =
|
1181
|
+
wrapped_rr_closure_arg *wc_arg =
|
1182
|
+
(wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
|
1194
1183
|
|
1195
1184
|
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
|
1196
1185
|
grpc_schedule_on_exec_ctx);
|
@@ -1255,6 +1244,58 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
|
|
1255
1244
|
exec_ctx, &glb_policy->state_tracker, current, notify);
|
1256
1245
|
}
|
1257
1246
|
|
1247
|
+
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1248
|
+
grpc_error *error) {
|
1249
|
+
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
|
1250
|
+
glb_policy->retry_timer_active = false;
|
1251
|
+
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
|
1252
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1253
|
+
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
|
1254
|
+
(void *)glb_policy);
|
1255
|
+
}
|
1256
|
+
GPR_ASSERT(glb_policy->lb_call == NULL);
|
1257
|
+
query_for_backends_locked(exec_ctx, glb_policy);
|
1258
|
+
}
|
1259
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
|
1260
|
+
}
|
1261
|
+
|
1262
|
+
static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
|
1263
|
+
glb_lb_policy *glb_policy) {
|
1264
|
+
if (glb_policy->started_picking && glb_policy->updating_lb_call) {
|
1265
|
+
if (glb_policy->retry_timer_active) {
|
1266
|
+
grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
|
1267
|
+
}
|
1268
|
+
if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
|
1269
|
+
glb_policy->updating_lb_call = false;
|
1270
|
+
} else if (!glb_policy->shutting_down) {
|
1271
|
+
/* if we aren't shutting down, restart the LB client call after some time */
|
1272
|
+
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
|
1273
|
+
gpr_timespec next_try =
|
1274
|
+
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
|
1275
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1276
|
+
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
|
1277
|
+
(void *)glb_policy);
|
1278
|
+
gpr_timespec timeout = gpr_time_sub(next_try, now);
|
1279
|
+
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
|
1280
|
+
gpr_log(GPR_DEBUG,
|
1281
|
+
"... retry_timer_active in %" PRId64 ".%09d seconds.",
|
1282
|
+
timeout.tv_sec, timeout.tv_nsec);
|
1283
|
+
} else {
|
1284
|
+
gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
|
1285
|
+
}
|
1286
|
+
}
|
1287
|
+
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
|
1288
|
+
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
|
1289
|
+
lb_call_on_retry_timer_locked, glb_policy,
|
1290
|
+
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1291
|
+
glb_policy->retry_timer_active = true;
|
1292
|
+
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
|
1293
|
+
&glb_policy->lb_on_call_retry, now);
|
1294
|
+
}
|
1295
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1296
|
+
"lb_on_server_status_received_locked");
|
1297
|
+
}
|
1298
|
+
|
1258
1299
|
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1259
1300
|
grpc_error *error);
|
1260
1301
|
|
@@ -1273,7 +1314,7 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
|
|
1273
1314
|
|
1274
1315
|
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1275
1316
|
grpc_error *error) {
|
1276
|
-
glb_lb_policy *glb_policy = arg;
|
1317
|
+
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
|
1277
1318
|
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
|
1278
1319
|
glb_policy->client_load_report_payload = NULL;
|
1279
1320
|
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
|
@@ -1285,24 +1326,10 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1285
1326
|
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1286
1327
|
}
|
1287
1328
|
|
1288
|
-
static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
|
1289
|
-
glb_lb_policy *glb_policy) {
|
1290
|
-
grpc_op op;
|
1291
|
-
memset(&op, 0, sizeof(op));
|
1292
|
-
op.op = GRPC_OP_SEND_MESSAGE;
|
1293
|
-
op.data.send_message.send_message = glb_policy->client_load_report_payload;
|
1294
|
-
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
|
1295
|
-
client_load_report_done_locked, glb_policy,
|
1296
|
-
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1297
|
-
grpc_call_error call_error = grpc_call_start_batch_and_execute(
|
1298
|
-
exec_ctx, glb_policy->lb_call, &op, 1,
|
1299
|
-
&glb_policy->client_load_report_closure);
|
1300
|
-
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1301
|
-
}
|
1302
|
-
|
1303
1329
|
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
|
1304
1330
|
grpc_grpclb_dropped_call_counts *drop_entries =
|
1305
|
-
|
1331
|
+
(grpc_grpclb_dropped_call_counts *)
|
1332
|
+
request->client_stats.calls_finished_with_drop.arg;
|
1306
1333
|
return request->client_stats.num_calls_started == 0 &&
|
1307
1334
|
request->client_stats.num_calls_finished == 0 &&
|
1308
1335
|
request->client_stats.num_calls_finished_with_client_failed_to_send ==
|
@@ -1313,11 +1340,14 @@ static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
|
|
1313
1340
|
|
1314
1341
|
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1315
1342
|
grpc_error *error) {
|
1316
|
-
glb_lb_policy *glb_policy = arg;
|
1343
|
+
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
|
1317
1344
|
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
|
1318
1345
|
glb_policy->client_load_report_timer_pending = false;
|
1319
1346
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1320
1347
|
"client_load_report");
|
1348
|
+
if (glb_policy->lb_call == NULL) {
|
1349
|
+
maybe_restart_lb_call(exec_ctx, glb_policy);
|
1350
|
+
}
|
1321
1351
|
return;
|
1322
1352
|
}
|
1323
1353
|
// Construct message payload.
|
@@ -1341,17 +1371,23 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1341
1371
|
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
|
1342
1372
|
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
|
1343
1373
|
grpc_grpclb_request_destroy(request);
|
1344
|
-
//
|
1345
|
-
|
1346
|
-
|
1347
|
-
|
1348
|
-
|
1349
|
-
|
1374
|
+
// Send load report message.
|
1375
|
+
grpc_op op;
|
1376
|
+
memset(&op, 0, sizeof(op));
|
1377
|
+
op.op = GRPC_OP_SEND_MESSAGE;
|
1378
|
+
op.data.send_message.send_message = glb_policy->client_load_report_payload;
|
1379
|
+
GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
|
1380
|
+
client_load_report_done_locked, glb_policy,
|
1381
|
+
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1382
|
+
grpc_call_error call_error = grpc_call_start_batch_and_execute(
|
1383
|
+
exec_ctx, glb_policy->lb_call, &op, 1,
|
1384
|
+
&glb_policy->client_load_report_closure);
|
1385
|
+
if (call_error != GRPC_CALL_OK) {
|
1386
|
+
gpr_log(GPR_ERROR, "call_error=%d", call_error);
|
1387
|
+
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1350
1388
|
}
|
1351
1389
|
}
|
1352
1390
|
|
1353
|
-
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
|
1354
|
-
void *arg, grpc_error *error);
|
1355
1391
|
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
1356
1392
|
void *arg, grpc_error *error);
|
1357
1393
|
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
@@ -1396,9 +1432,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1396
1432
|
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
|
1397
1433
|
grpc_grpclb_request_destroy(request);
|
1398
1434
|
|
1399
|
-
GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
|
1400
|
-
lb_on_sent_initial_request_locked, glb_policy,
|
1401
|
-
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1402
1435
|
GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
|
1403
1436
|
lb_on_server_status_received_locked, glb_policy,
|
1404
1437
|
grpc_combiner_scheduler(glb_policy->base.combiner));
|
@@ -1413,7 +1446,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1413
1446
|
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
|
1414
1447
|
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
|
1415
1448
|
|
1416
|
-
glb_policy->initial_request_sent = false;
|
1417
1449
|
glb_policy->seen_initial_response = false;
|
1418
1450
|
glb_policy->last_client_load_report_counters_were_zero = false;
|
1419
1451
|
}
|
@@ -1430,7 +1462,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
|
|
1430
1462
|
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
|
1431
1463
|
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
|
1432
1464
|
|
1433
|
-
if (
|
1465
|
+
if (glb_policy->client_load_report_timer_pending) {
|
1434
1466
|
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
|
1435
1467
|
}
|
1436
1468
|
}
|
@@ -1454,7 +1486,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1454
1486
|
GPR_ASSERT(glb_policy->lb_call != NULL);
|
1455
1487
|
|
1456
1488
|
grpc_call_error call_error;
|
1457
|
-
grpc_op ops[
|
1489
|
+
grpc_op ops[3];
|
1458
1490
|
memset(ops, 0, sizeof(ops));
|
1459
1491
|
|
1460
1492
|
grpc_op *op = ops;
|
@@ -1475,13 +1507,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1475
1507
|
op->flags = 0;
|
1476
1508
|
op->reserved = NULL;
|
1477
1509
|
op++;
|
1478
|
-
|
1479
|
-
|
1480
|
-
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
|
1481
|
-
"lb_on_sent_initial_request_locked");
|
1482
|
-
call_error = grpc_call_start_batch_and_execute(
|
1483
|
-
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
|
1484
|
-
&glb_policy->lb_on_sent_initial_request);
|
1510
|
+
call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call,
|
1511
|
+
ops, (size_t)(op - ops), NULL);
|
1485
1512
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1486
1513
|
|
1487
1514
|
op = ops;
|
@@ -1518,22 +1545,9 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1518
1545
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1519
1546
|
}
|
1520
1547
|
|
1521
|
-
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
|
1522
|
-
void *arg, grpc_error *error) {
|
1523
|
-
glb_lb_policy *glb_policy = arg;
|
1524
|
-
glb_policy->initial_request_sent = true;
|
1525
|
-
// If we attempted to send a client load report before the initial
|
1526
|
-
// request was sent, send the load report now.
|
1527
|
-
if (glb_policy->client_load_report_payload != NULL) {
|
1528
|
-
do_send_client_load_report_locked(exec_ctx, glb_policy);
|
1529
|
-
}
|
1530
|
-
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1531
|
-
"lb_on_sent_initial_request_locked");
|
1532
|
-
}
|
1533
|
-
|
1534
1548
|
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1535
1549
|
grpc_error *error) {
|
1536
|
-
glb_lb_policy *glb_policy = arg;
|
1550
|
+
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
|
1537
1551
|
grpc_op ops[2];
|
1538
1552
|
memset(ops, 0, sizeof(ops));
|
1539
1553
|
grpc_op *op = ops;
|
@@ -1544,6 +1558,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1544
1558
|
grpc_byte_buffer_reader bbr;
|
1545
1559
|
grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
|
1546
1560
|
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
|
1561
|
+
grpc_byte_buffer_reader_destroy(&bbr);
|
1547
1562
|
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
|
1548
1563
|
|
1549
1564
|
grpc_grpclb_initial_response *response = NULL;
|
@@ -1605,6 +1620,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1605
1620
|
if (glb_policy->serverlist != NULL) {
|
1606
1621
|
/* dispose of the old serverlist */
|
1607
1622
|
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
1623
|
+
} else {
|
1624
|
+
/* or dispose of the fallback */
|
1625
|
+
grpc_lb_addresses_destroy(exec_ctx,
|
1626
|
+
glb_policy->fallback_backend_addresses);
|
1627
|
+
glb_policy->fallback_backend_addresses = NULL;
|
1628
|
+
if (glb_policy->fallback_timer_active) {
|
1629
|
+
grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
|
1630
|
+
glb_policy->fallback_timer_active = false;
|
1631
|
+
}
|
1608
1632
|
}
|
1609
1633
|
/* and update the copy in the glb_lb_policy instance. This
|
1610
1634
|
* serverlist instance will be destroyed either upon the next
|
@@ -1615,9 +1639,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1615
1639
|
}
|
1616
1640
|
} else {
|
1617
1641
|
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1618
|
-
gpr_log(GPR_INFO,
|
1619
|
-
"Received empty server list. Picks will stay pending until "
|
1620
|
-
"a response with > 0 servers is received");
|
1642
|
+
gpr_log(GPR_INFO, "Received empty server list, ignoring.");
|
1621
1643
|
}
|
1622
1644
|
grpc_grpclb_destroy_serverlist(serverlist);
|
1623
1645
|
}
|
@@ -1640,6 +1662,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1640
1662
|
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
|
1641
1663
|
&glb_policy->lb_on_response_received); /* loop */
|
1642
1664
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1665
|
+
} else {
|
1666
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1667
|
+
"lb_on_response_received_locked_shutdown");
|
1643
1668
|
}
|
1644
1669
|
} else { /* empty payload: call cancelled. */
|
1645
1670
|
/* dispose of the "lb_on_response_received_locked" weak ref taken in
|
@@ -1649,24 +1674,30 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1649
1674
|
}
|
1650
1675
|
}
|
1651
1676
|
|
1652
|
-
static void
|
1653
|
-
|
1654
|
-
glb_lb_policy *glb_policy = arg;
|
1655
|
-
glb_policy->
|
1656
|
-
|
1657
|
-
|
1658
|
-
|
1659
|
-
|
1677
|
+
static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1678
|
+
grpc_error *error) {
|
1679
|
+
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
|
1680
|
+
glb_policy->fallback_timer_active = false;
|
1681
|
+
/* If we receive a serverlist after the timer fires but before this callback
|
1682
|
+
* actually runs, don't fall back. */
|
1683
|
+
if (glb_policy->serverlist == NULL) {
|
1684
|
+
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
|
1685
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1686
|
+
gpr_log(GPR_INFO,
|
1687
|
+
"Falling back to use backends from resolver (grpclb %p)",
|
1688
|
+
(void *)glb_policy);
|
1689
|
+
}
|
1690
|
+
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
|
1691
|
+
rr_handover_locked(exec_ctx, glb_policy);
|
1660
1692
|
}
|
1661
|
-
GPR_ASSERT(glb_policy->lb_call == NULL);
|
1662
|
-
query_for_backends_locked(exec_ctx, glb_policy);
|
1663
1693
|
}
|
1664
|
-
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1694
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1695
|
+
"grpclb_fallback_timer");
|
1665
1696
|
}
|
1666
1697
|
|
1667
1698
|
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
1668
1699
|
void *arg, grpc_error *error) {
|
1669
|
-
glb_lb_policy *glb_policy = arg;
|
1700
|
+
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
|
1670
1701
|
GPR_ASSERT(glb_policy->lb_call != NULL);
|
1671
1702
|
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1672
1703
|
char *status_details =
|
@@ -1680,66 +1711,30 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
|
1680
1711
|
}
|
1681
1712
|
/* We need to perform cleanups no matter what. */
|
1682
1713
|
lb_call_destroy_locked(exec_ctx, glb_policy);
|
1683
|
-
|
1684
|
-
|
1685
|
-
|
1686
|
-
|
1687
|
-
|
1688
|
-
|
1689
|
-
|
1690
|
-
|
1691
|
-
|
1692
|
-
|
1693
|
-
|
1694
|
-
|
1695
|
-
|
1696
|
-
|
1697
|
-
|
1698
|
-
|
1699
|
-
|
1700
|
-
|
1701
|
-
timeout.tv_sec, timeout.tv_nsec);
|
1702
|
-
} else {
|
1703
|
-
gpr_log(GPR_DEBUG, "... retry_timer_active immediately.");
|
1704
|
-
}
|
1705
|
-
}
|
1706
|
-
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
|
1707
|
-
GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
|
1708
|
-
lb_call_on_retry_timer_locked, glb_policy,
|
1709
|
-
grpc_combiner_scheduler(glb_policy->base.combiner));
|
1710
|
-
glb_policy->retry_timer_active = true;
|
1711
|
-
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
|
1712
|
-
&glb_policy->lb_on_call_retry, now);
|
1714
|
+
// If the load report timer is still pending, we wait for it to be
|
1715
|
+
// called before restarting the call. Otherwise, we restart the call
|
1716
|
+
// here.
|
1717
|
+
if (!glb_policy->client_load_report_timer_pending) {
|
1718
|
+
maybe_restart_lb_call(exec_ctx, glb_policy);
|
1719
|
+
}
|
1720
|
+
}
|
1721
|
+
|
1722
|
+
static void fallback_update_locked(grpc_exec_ctx *exec_ctx,
|
1723
|
+
glb_lb_policy *glb_policy,
|
1724
|
+
const grpc_lb_addresses *addresses) {
|
1725
|
+
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL);
|
1726
|
+
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
|
1727
|
+
glb_policy->fallback_backend_addresses =
|
1728
|
+
extract_backend_addresses_locked(exec_ctx, addresses);
|
1729
|
+
if (glb_policy->lb_fallback_timeout_ms > 0 &&
|
1730
|
+
!glb_policy->fallback_timer_active) {
|
1731
|
+
rr_handover_locked(exec_ctx, glb_policy);
|
1713
1732
|
}
|
1714
|
-
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1715
|
-
"lb_on_server_status_received_locked");
|
1716
1733
|
}
|
1717
1734
|
|
1718
1735
|
static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
1719
1736
|
const grpc_lb_policy_args *args) {
|
1720
1737
|
glb_lb_policy *glb_policy = (glb_lb_policy *)policy;
|
1721
|
-
if (glb_policy->updating_lb_channel) {
|
1722
|
-
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1723
|
-
gpr_log(GPR_INFO,
|
1724
|
-
"Update already in progress for grpclb %p. Deferring update.",
|
1725
|
-
(void *)glb_policy);
|
1726
|
-
}
|
1727
|
-
if (glb_policy->pending_update_args != NULL) {
|
1728
|
-
grpc_channel_args_destroy(exec_ctx,
|
1729
|
-
glb_policy->pending_update_args->args);
|
1730
|
-
gpr_free(glb_policy->pending_update_args);
|
1731
|
-
}
|
1732
|
-
glb_policy->pending_update_args =
|
1733
|
-
gpr_zalloc(sizeof(*glb_policy->pending_update_args));
|
1734
|
-
glb_policy->pending_update_args->client_channel_factory =
|
1735
|
-
args->client_channel_factory;
|
1736
|
-
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
|
1737
|
-
glb_policy->pending_update_args->combiner = args->combiner;
|
1738
|
-
return;
|
1739
|
-
}
|
1740
|
-
|
1741
|
-
glb_policy->updating_lb_channel = true;
|
1742
|
-
// Propagate update to lb_channel (pick first).
|
1743
1738
|
const grpc_arg *arg =
|
1744
1739
|
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
|
1745
1740
|
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
|
@@ -1757,12 +1752,43 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
|
1757
1752
|
"ignoring.",
|
1758
1753
|
(void *)glb_policy);
|
1759
1754
|
}
|
1755
|
+
return;
|
1756
|
+
}
|
1757
|
+
const grpc_lb_addresses *addresses =
|
1758
|
+
(const grpc_lb_addresses *)arg->value.pointer.p;
|
1759
|
+
|
1760
|
+
if (glb_policy->serverlist == NULL) {
|
1761
|
+
// If a non-empty serverlist hasn't been received from the balancer,
|
1762
|
+
// propagate the update to fallback_backend_addresses.
|
1763
|
+
fallback_update_locked(exec_ctx, glb_policy, addresses);
|
1764
|
+
} else if (glb_policy->updating_lb_channel) {
|
1765
|
+
// If we have recieved serverlist from the balancer, we need to defer update
|
1766
|
+
// when there is an in-progress one.
|
1767
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1768
|
+
gpr_log(GPR_INFO,
|
1769
|
+
"Update already in progress for grpclb %p. Deferring update.",
|
1770
|
+
(void *)glb_policy);
|
1771
|
+
}
|
1772
|
+
if (glb_policy->pending_update_args != NULL) {
|
1773
|
+
grpc_channel_args_destroy(exec_ctx,
|
1774
|
+
glb_policy->pending_update_args->args);
|
1775
|
+
gpr_free(glb_policy->pending_update_args);
|
1776
|
+
}
|
1777
|
+
glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
|
1778
|
+
sizeof(*glb_policy->pending_update_args));
|
1779
|
+
glb_policy->pending_update_args->client_channel_factory =
|
1780
|
+
args->client_channel_factory;
|
1781
|
+
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
|
1782
|
+
glb_policy->pending_update_args->combiner = args->combiner;
|
1783
|
+
return;
|
1760
1784
|
}
|
1761
|
-
|
1785
|
+
|
1786
|
+
glb_policy->updating_lb_channel = true;
|
1762
1787
|
GPR_ASSERT(glb_policy->lb_channel != NULL);
|
1763
1788
|
grpc_channel_args *lb_channel_args = build_lb_channel_args(
|
1764
1789
|
exec_ctx, addresses, glb_policy->response_generator, args->args);
|
1765
|
-
/* Propagate updates to the LB channel through the fake resolver
|
1790
|
+
/* Propagate updates to the LB channel (pick first) through the fake resolver
|
1791
|
+
*/
|
1766
1792
|
grpc_fake_resolver_response_generator_set_response(
|
1767
1793
|
exec_ctx, glb_policy->response_generator, lb_channel_args);
|
1768
1794
|
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
|
@@ -1791,7 +1817,7 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
|
1791
1817
|
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
|
1792
1818
|
void *arg,
|
1793
1819
|
grpc_error *error) {
|
1794
|
-
glb_lb_policy *glb_policy = arg;
|
1820
|
+
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
|
1795
1821
|
if (glb_policy->shutting_down) goto done;
|
1796
1822
|
// Re-initialize the lb_call. This should also take care of updating the
|
1797
1823
|
// embedded RR policy. Note that the current RR policy, if any, will stay in
|
@@ -1862,6 +1888,94 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
|
|
1862
1888
|
glb_notify_on_state_change_locked,
|
1863
1889
|
glb_update_locked};
|
1864
1890
|
|
1891
|
+
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
1892
|
+
grpc_lb_policy_factory *factory,
|
1893
|
+
grpc_lb_policy_args *args) {
|
1894
|
+
/* Count the number of gRPC-LB addresses. There must be at least one. */
|
1895
|
+
const grpc_arg *arg =
|
1896
|
+
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
|
1897
|
+
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
|
1898
|
+
return NULL;
|
1899
|
+
}
|
1900
|
+
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
|
1901
|
+
size_t num_grpclb_addrs = 0;
|
1902
|
+
for (size_t i = 0; i < addresses->num_addresses; ++i) {
|
1903
|
+
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
|
1904
|
+
}
|
1905
|
+
if (num_grpclb_addrs == 0) return NULL;
|
1906
|
+
|
1907
|
+
glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
|
1908
|
+
|
1909
|
+
/* Get server name. */
|
1910
|
+
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
|
1911
|
+
GPR_ASSERT(arg != NULL);
|
1912
|
+
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
|
1913
|
+
grpc_uri *uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
|
1914
|
+
GPR_ASSERT(uri->path[0] != '\0');
|
1915
|
+
glb_policy->server_name =
|
1916
|
+
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
|
1917
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1918
|
+
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
|
1919
|
+
glb_policy->server_name);
|
1920
|
+
}
|
1921
|
+
grpc_uri_destroy(uri);
|
1922
|
+
|
1923
|
+
glb_policy->cc_factory = args->client_channel_factory;
|
1924
|
+
GPR_ASSERT(glb_policy->cc_factory != NULL);
|
1925
|
+
|
1926
|
+
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
|
1927
|
+
glb_policy->lb_call_timeout_ms =
|
1928
|
+
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
|
1929
|
+
|
1930
|
+
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
|
1931
|
+
glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer(
|
1932
|
+
arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0,
|
1933
|
+
INT_MAX});
|
1934
|
+
|
1935
|
+
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
|
1936
|
+
// since we use this to trigger the client_load_reporting filter.
|
1937
|
+
grpc_arg new_arg = grpc_channel_arg_string_create(
|
1938
|
+
(char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb");
|
1939
|
+
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
|
1940
|
+
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
|
1941
|
+
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
|
1942
|
+
|
1943
|
+
/* Extract the backend addresses (may be empty) from the resolver for
|
1944
|
+
* fallback. */
|
1945
|
+
glb_policy->fallback_backend_addresses =
|
1946
|
+
extract_backend_addresses_locked(exec_ctx, addresses);
|
1947
|
+
|
1948
|
+
/* Create a client channel over them to communicate with a LB service */
|
1949
|
+
glb_policy->response_generator =
|
1950
|
+
grpc_fake_resolver_response_generator_create();
|
1951
|
+
grpc_channel_args *lb_channel_args = build_lb_channel_args(
|
1952
|
+
exec_ctx, addresses, glb_policy->response_generator, args->args);
|
1953
|
+
char *uri_str;
|
1954
|
+
gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
|
1955
|
+
glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
|
1956
|
+
exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
|
1957
|
+
|
1958
|
+
/* Propagate initial resolution */
|
1959
|
+
grpc_fake_resolver_response_generator_set_response(
|
1960
|
+
exec_ctx, glb_policy->response_generator, lb_channel_args);
|
1961
|
+
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
|
1962
|
+
gpr_free(uri_str);
|
1963
|
+
if (glb_policy->lb_channel == NULL) {
|
1964
|
+
gpr_free((void *)glb_policy->server_name);
|
1965
|
+
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
|
1966
|
+
gpr_free(glb_policy);
|
1967
|
+
return NULL;
|
1968
|
+
}
|
1969
|
+
grpc_subchannel_index_ref();
|
1970
|
+
GRPC_CLOSURE_INIT(&glb_policy->lb_channel_on_connectivity_changed,
|
1971
|
+
glb_lb_channel_on_connectivity_changed_cb, glb_policy,
|
1972
|
+
grpc_combiner_scheduler(args->combiner));
|
1973
|
+
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable, args->combiner);
|
1974
|
+
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
|
1975
|
+
"grpclb");
|
1976
|
+
return &glb_policy->base;
|
1977
|
+
}
|
1978
|
+
|
1865
1979
|
static void glb_factory_ref(grpc_lb_policy_factory *factory) {}
|
1866
1980
|
|
1867
1981
|
static void glb_factory_unref(grpc_lb_policy_factory *factory) {}
|