grpc 1.3.4 → 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +581 -450
- data/include/grpc/census.h +49 -49
- data/include/grpc/grpc.h +16 -70
- data/include/grpc/grpc_security.h +59 -59
- data/include/grpc/grpc_security_constants.h +9 -9
- data/include/grpc/impl/codegen/atm.h +1 -1
- data/include/grpc/impl/codegen/atm_windows.h +4 -4
- data/include/grpc/impl/codegen/byte_buffer_reader.h +2 -2
- data/include/grpc/impl/codegen/compression_types.h +4 -5
- data/include/grpc/impl/codegen/gpr_slice.h +5 -5
- data/include/grpc/impl/codegen/gpr_types.h +6 -7
- data/include/grpc/impl/codegen/grpc_types.h +128 -59
- data/include/grpc/impl/codegen/port_platform.h +6 -0
- data/include/grpc/impl/codegen/propagation_bits.h +2 -2
- data/include/grpc/impl/codegen/slice.h +13 -12
- data/include/grpc/impl/codegen/status.h +23 -18
- data/include/grpc/impl/codegen/sync.h +1 -1
- data/include/grpc/load_reporting.h +6 -6
- data/include/grpc/slice.h +47 -25
- data/include/grpc/slice_buffer.h +18 -14
- data/include/grpc/support/alloc.h +7 -7
- data/include/grpc/support/cmdline.h +10 -10
- data/include/grpc/support/cpu.h +3 -3
- data/include/grpc/support/histogram.h +1 -1
- data/include/grpc/support/host_port.h +2 -2
- data/include/grpc/support/log.h +9 -9
- data/include/grpc/support/log_windows.h +1 -1
- data/include/grpc/support/string_util.h +3 -3
- data/include/grpc/support/subprocess.h +3 -3
- data/include/grpc/support/sync.h +31 -31
- data/include/grpc/support/thd.h +11 -11
- data/include/grpc/support/time.h +12 -12
- data/include/grpc/support/tls.h +1 -1
- data/include/grpc/support/tls_gcc.h +2 -2
- data/include/grpc/support/tls_msvc.h +1 -1
- data/include/grpc/support/tls_pthread.h +1 -1
- data/include/grpc/support/useful.h +2 -2
- data/include/grpc/support/workaround_list.h +46 -0
- data/src/core/ext/census/context.c +1 -1
- data/src/core/ext/census/intrusive_hash_map.c +319 -0
- data/src/core/ext/census/intrusive_hash_map.h +167 -0
- data/src/core/ext/census/intrusive_hash_map_internal.h +63 -0
- data/src/core/ext/census/resource.c +3 -1
- data/src/core/ext/filters/client_channel/channel_connectivity.c +1 -1
- data/src/core/ext/filters/client_channel/client_channel.c +173 -103
- data/src/core/ext/filters/client_channel/client_channel_plugin.c +3 -2
- data/src/core/ext/filters/client_channel/lb_policy.c +2 -1
- data/src/core/ext/filters/client_channel/lb_policy.h +8 -7
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +153 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +42 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +405 -102
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +133 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +65 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +90 -51
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +7 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +19 -8
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +63 -34
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +2 -1
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +188 -294
- data/src/core/ext/filters/client_channel/lb_policy_factory.c +28 -5
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +18 -4
- data/src/core/ext/filters/client_channel/parse_address.c +90 -59
- data/src/core/ext/filters/client_channel/parse_address.h +17 -8
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +11 -7
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +59 -14
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +6 -0
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c +3 -3
- data/src/core/ext/filters/client_channel/subchannel.c +20 -17
- data/src/core/ext/filters/client_channel/subchannel.h +1 -0
- data/src/core/ext/filters/client_channel/subchannel_index.c +11 -1
- data/src/core/ext/filters/client_channel/uri_parser.c +36 -22
- data/src/core/ext/filters/client_channel/uri_parser.h +1 -1
- data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.c +42 -17
- data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.h +8 -9
- data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.c +19 -11
- data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.h +3 -6
- data/src/core/ext/filters/http/http_filters_plugin.c +104 -0
- data/src/core/{lib/channel/compress_filter.c → ext/filters/http/message_compress/message_compress_filter.c} +124 -23
- data/src/core/{lib/channel/compress_filter.h → ext/filters/http/message_compress/message_compress_filter.h} +5 -6
- data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.c +4 -6
- data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.h +3 -3
- data/src/core/ext/filters/load_reporting/load_reporting.c +2 -25
- data/src/core/ext/filters/load_reporting/load_reporting_filter.c +26 -1
- data/src/core/ext/filters/max_age/max_age_filter.c +14 -14
- data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.c +91 -47
- data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.h +3 -3
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +223 -0
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +40 -0
- data/src/core/ext/filters/workarounds/workaround_utils.c +65 -0
- data/src/core/ext/filters/workarounds/workaround_utils.h +52 -0
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
- data/src/core/ext/transport/chttp2/server/chttp2_server.c +3 -2
- data/src/core/ext/transport/chttp2/transport/bin_decoder.c +2 -2
- data/src/core/ext/transport/chttp2/transport/bin_encoder.c +3 -3
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +319 -175
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -2
- data/src/core/ext/transport/chttp2/transport/frame_data.c +203 -164
- data/src/core/ext/transport/chttp2/transport/frame_data.h +8 -14
- data/src/core/ext/transport/chttp2/transport/frame_goaway.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_ping.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.c +5 -5
- data/src/core/ext/transport/chttp2/transport/frame_window_update.c +1 -1
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +4 -4
- data/src/core/ext/transport/chttp2/transport/hpack_parser.c +2 -4
- data/src/core/ext/transport/chttp2/transport/hpack_table.c +4 -3
- data/src/core/ext/transport/chttp2/transport/internal.h +50 -33
- data/src/core/ext/transport/chttp2/transport/parsing.c +10 -11
- data/src/core/ext/transport/chttp2/transport/writing.c +32 -13
- data/src/core/lib/channel/channel_args.c +30 -9
- data/src/core/lib/channel/channel_args.h +5 -1
- data/src/core/lib/channel/channel_stack.c +1 -1
- data/src/core/lib/channel/channel_stack.h +2 -2
- data/src/core/lib/channel/channel_stack_builder.c +13 -1
- data/src/core/lib/channel/channel_stack_builder.h +5 -1
- data/src/core/lib/channel/connected_channel.c +3 -1
- data/src/core/lib/channel/context.h +2 -2
- data/src/core/lib/compression/message_compress.c +2 -2
- data/src/core/lib/debug/trace.c +13 -6
- data/src/core/lib/debug/trace.h +27 -1
- data/src/core/lib/http/httpcli.c +1 -1
- data/src/core/lib/http/httpcli_security_connector.c +9 -11
- data/src/core/lib/http/parser.c +2 -2
- data/src/core/lib/http/parser.h +2 -1
- data/src/core/lib/iomgr/combiner.c +6 -6
- data/src/core/lib/iomgr/combiner.h +2 -1
- data/src/core/lib/iomgr/error.c +12 -5
- data/src/core/lib/iomgr/error.h +13 -13
- data/src/core/lib/iomgr/ev_epoll1_linux.c +984 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.h +44 -0
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +2146 -0
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h +43 -0
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +1337 -0
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h +43 -0
- data/src/core/lib/iomgr/ev_epollex_linux.c +1511 -0
- data/src/core/lib/iomgr/ev_epollex_linux.h +43 -0
- data/src/core/lib/iomgr/{ev_epoll_linux.c → ev_epollsig_linux.c} +41 -33
- data/src/core/lib/iomgr/{ev_epoll_linux.h → ev_epollsig_linux.h} +4 -4
- data/src/core/lib/iomgr/ev_poll_posix.c +12 -27
- data/src/core/lib/iomgr/ev_poll_posix.h +2 -2
- data/src/core/lib/iomgr/ev_posix.c +22 -8
- data/src/core/lib/iomgr/ev_posix.h +4 -3
- data/src/core/lib/iomgr/ev_windows.c +43 -0
- data/src/core/lib/iomgr/exec_ctx.c +5 -0
- data/src/core/lib/iomgr/exec_ctx.h +2 -0
- data/src/core/lib/iomgr/iomgr.c +4 -0
- data/src/core/lib/iomgr/iomgr.h +3 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.c +116 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.h +41 -0
- data/src/core/lib/iomgr/lockfree_event.c +16 -0
- data/src/core/lib/iomgr/pollset.h +2 -5
- data/src/core/lib/iomgr/pollset_uv.c +1 -1
- data/src/core/lib/iomgr/pollset_windows.c +3 -3
- data/src/core/lib/iomgr/resource_quota.c +9 -8
- data/src/core/lib/iomgr/resource_quota.h +2 -1
- data/src/core/lib/iomgr/sockaddr_utils.h +1 -1
- data/src/core/lib/iomgr/socket_mutator.h +2 -0
- data/src/core/lib/iomgr/sys_epoll_wrapper.h +43 -0
- data/src/core/lib/iomgr/tcp_client_posix.c +6 -6
- data/src/core/lib/iomgr/tcp_client_uv.c +3 -3
- data/src/core/lib/iomgr/tcp_posix.c +7 -7
- data/src/core/lib/iomgr/tcp_posix.h +2 -1
- data/src/core/lib/iomgr/tcp_server_posix.c +1 -1
- data/src/core/lib/iomgr/tcp_uv.c +6 -6
- data/src/core/lib/iomgr/tcp_uv.h +2 -1
- data/src/core/lib/iomgr/tcp_windows.c +1 -1
- data/src/core/lib/iomgr/timer_generic.c +24 -25
- data/src/core/lib/iomgr/timer_manager.c +276 -0
- data/src/core/lib/iomgr/timer_manager.h +52 -0
- data/src/core/lib/iomgr/timer_uv.c +6 -0
- data/src/core/lib/iomgr/udp_server.c +42 -9
- data/src/core/lib/iomgr/udp_server.h +3 -1
- data/src/core/lib/security/credentials/credentials.c +0 -1
- data/src/core/lib/security/credentials/fake/fake_credentials.c +23 -0
- data/src/core/lib/security/credentials/fake/fake_credentials.h +12 -9
- data/src/core/lib/security/credentials/google_default/google_default_credentials.c +1 -1
- data/src/core/lib/security/credentials/jwt/jwt_credentials.c +1 -1
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +1 -1
- data/src/core/lib/security/credentials/ssl/ssl_credentials.c +24 -53
- data/src/core/lib/security/transport/client_auth_filter.c +9 -3
- data/src/core/lib/security/transport/secure_endpoint.c +7 -7
- data/src/core/lib/security/transport/secure_endpoint.h +1 -1
- data/src/core/lib/security/transport/security_connector.c +45 -57
- data/src/core/lib/security/transport/security_connector.h +10 -14
- data/src/core/lib/security/transport/security_handshaker.c +123 -97
- data/src/core/lib/slice/b64.c +1 -1
- data/src/core/lib/slice/percent_encoding.c +3 -3
- data/src/core/lib/slice/slice.c +66 -33
- data/src/core/lib/slice/slice_buffer.c +25 -6
- data/src/core/lib/slice/slice_hash_table.c +33 -35
- data/src/core/lib/slice/slice_hash_table.h +7 -12
- data/src/core/lib/support/atomic.h +45 -0
- data/src/core/lib/support/atomic_with_atm.h +70 -0
- data/src/core/lib/support/atomic_with_std.h +48 -0
- data/src/core/lib/support/avl.c +14 -14
- data/src/core/lib/support/cmdline.c +3 -3
- data/src/core/lib/support/histogram.c +2 -2
- data/src/core/lib/support/host_port.c +1 -1
- data/src/core/lib/support/memory.h +74 -0
- data/src/core/lib/support/mpscq.c +36 -2
- data/src/core/lib/support/mpscq.h +28 -1
- data/src/core/lib/support/stack_lockfree.c +3 -36
- data/src/core/lib/support/string.c +12 -12
- data/src/core/lib/support/string_posix.c +1 -1
- data/src/core/lib/support/subprocess_posix.c +2 -2
- data/src/core/lib/support/thd_posix.c +1 -1
- data/src/core/lib/support/time_posix.c +8 -0
- data/src/core/lib/support/tmpfile_posix.c +10 -10
- data/src/core/lib/surface/alarm.c +3 -1
- data/src/core/lib/surface/api_trace.c +2 -1
- data/src/core/lib/surface/api_trace.h +2 -2
- data/src/core/lib/surface/byte_buffer_reader.c +1 -1
- data/src/core/lib/surface/call.c +65 -22
- data/src/core/lib/surface/call.h +4 -2
- data/src/core/lib/surface/channel_init.c +2 -19
- data/src/core/lib/surface/channel_stack_type.c +18 -0
- data/src/core/lib/surface/channel_stack_type.h +2 -0
- data/src/core/lib/surface/completion_queue.c +694 -247
- data/src/core/lib/surface/completion_queue.h +30 -13
- data/src/core/lib/surface/completion_queue_factory.c +24 -9
- data/src/core/lib/surface/init.c +1 -52
- data/src/core/lib/surface/{lame_client.c → lame_client.cc} +37 -26
- data/src/core/lib/surface/server.c +79 -110
- data/src/core/lib/surface/server.h +2 -1
- data/src/core/lib/surface/version.c +2 -2
- data/src/core/lib/transport/bdp_estimator.c +25 -9
- data/src/core/lib/transport/bdp_estimator.h +7 -1
- data/src/core/lib/transport/byte_stream.c +23 -9
- data/src/core/lib/transport/byte_stream.h +15 -6
- data/src/core/lib/transport/connectivity_state.c +6 -6
- data/src/core/lib/transport/connectivity_state.h +2 -1
- data/src/core/lib/transport/service_config.c +6 -13
- data/src/core/lib/transport/service_config.h +2 -2
- data/src/core/lib/transport/static_metadata.c +403 -389
- data/src/core/lib/transport/static_metadata.h +127 -114
- data/src/core/plugin_registry/grpc_plugin_registry.c +16 -0
- data/src/core/tsi/fake_transport_security.c +5 -4
- data/src/core/tsi/ssl_transport_security.c +71 -82
- data/src/core/tsi/ssl_transport_security.h +39 -61
- data/src/core/tsi/transport_security.c +83 -2
- data/src/core/tsi/transport_security.h +27 -2
- data/src/core/tsi/transport_security_adapter.c +236 -0
- data/src/core/tsi/transport_security_adapter.h +62 -0
- data/src/core/tsi/transport_security_interface.h +179 -66
- data/src/ruby/ext/grpc/extconf.rb +2 -1
- data/src/ruby/ext/grpc/rb_byte_buffer.c +8 -6
- data/src/ruby/ext/grpc/rb_call.c +56 -48
- data/src/ruby/ext/grpc/rb_call.h +3 -4
- data/src/ruby/ext/grpc/rb_call_credentials.c +23 -22
- data/src/ruby/ext/grpc/rb_channel.c +2 -3
- data/src/ruby/ext/grpc/rb_channel_args.c +11 -9
- data/src/ruby/ext/grpc/rb_channel_credentials.c +16 -12
- data/src/ruby/ext/grpc/rb_completion_queue.c +7 -9
- data/src/ruby/ext/grpc/rb_compression_options.c +7 -6
- data/src/ruby/ext/grpc/rb_event_thread.c +10 -12
- data/src/ruby/ext/grpc/rb_event_thread.h +1 -2
- data/src/ruby/ext/grpc/rb_grpc.c +11 -15
- data/src/ruby/ext/grpc/rb_grpc.h +2 -2
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +16 -6
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +25 -10
- data/src/ruby/ext/grpc/rb_server.c +26 -28
- data/src/ruby/lib/grpc/grpc.rb +1 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/third_party/cares/config_linux/ares_config.h +36 -2
- data/third_party/zlib/adler32.c +14 -7
- data/third_party/zlib/compress.c +24 -18
- data/third_party/zlib/crc32.c +29 -12
- data/third_party/zlib/deflate.c +499 -303
- data/third_party/zlib/deflate.h +19 -16
- data/third_party/zlib/gzguts.h +16 -7
- data/third_party/zlib/gzlib.c +17 -14
- data/third_party/zlib/gzread.c +108 -48
- data/third_party/zlib/gzwrite.c +210 -122
- data/third_party/zlib/infback.c +2 -2
- data/third_party/zlib/inffast.c +34 -51
- data/third_party/zlib/inflate.c +86 -37
- data/third_party/zlib/inflate.h +7 -4
- data/third_party/zlib/inftrees.c +12 -14
- data/third_party/zlib/trees.c +38 -61
- data/third_party/zlib/uncompr.c +66 -32
- data/third_party/zlib/zconf.h +32 -9
- data/third_party/zlib/zlib.h +298 -154
- data/third_party/zlib/zutil.c +25 -24
- data/third_party/zlib/zutil.h +35 -17
- metadata +63 -30
@@ -91,8 +91,9 @@ void grpc_client_channel_init(void) {
|
|
91
91
|
grpc_subchannel_index_init();
|
92
92
|
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MIN,
|
93
93
|
set_default_host_if_unset, NULL);
|
94
|
-
grpc_channel_init_register_stage(
|
95
|
-
|
94
|
+
grpc_channel_init_register_stage(
|
95
|
+
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
|
96
|
+
(void *)&grpc_client_channel_filter);
|
96
97
|
grpc_http_connect_register_handshaker_factory();
|
97
98
|
}
|
98
99
|
|
@@ -119,9 +119,10 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
|
|
119
119
|
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
120
120
|
const grpc_lb_policy_pick_args *pick_args,
|
121
121
|
grpc_connected_subchannel **target,
|
122
|
+
grpc_call_context_element *context,
|
122
123
|
void **user_data, grpc_closure *on_complete) {
|
123
124
|
return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
|
124
|
-
user_data, on_complete);
|
125
|
+
context, user_data, on_complete);
|
125
126
|
}
|
126
127
|
|
127
128
|
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
|
@@ -43,9 +43,6 @@
|
|
43
43
|
typedef struct grpc_lb_policy grpc_lb_policy;
|
44
44
|
typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
|
45
45
|
|
46
|
-
typedef void (*grpc_lb_completion)(void *cb_arg, grpc_subchannel *subchannel,
|
47
|
-
grpc_status_code status, const char *errmsg);
|
48
|
-
|
49
46
|
struct grpc_lb_policy {
|
50
47
|
const grpc_lb_policy_vtable *vtable;
|
51
48
|
gpr_atm ref_pair;
|
@@ -65,8 +62,6 @@ typedef struct grpc_lb_policy_pick_args {
|
|
65
62
|
uint32_t initial_metadata_flags;
|
66
63
|
/** Storage for LB token in \a initial_metadata, or NULL if not used */
|
67
64
|
grpc_linked_mdelem *lb_token_mdelem_storage;
|
68
|
-
/** Deadline for the call to the LB server */
|
69
|
-
gpr_timespec deadline;
|
70
65
|
} grpc_lb_policy_pick_args;
|
71
66
|
|
72
67
|
struct grpc_lb_policy_vtable {
|
@@ -76,7 +71,8 @@ struct grpc_lb_policy_vtable {
|
|
76
71
|
/** \see grpc_lb_policy_pick */
|
77
72
|
int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
78
73
|
const grpc_lb_policy_pick_args *pick_args,
|
79
|
-
grpc_connected_subchannel **target,
|
74
|
+
grpc_connected_subchannel **target,
|
75
|
+
grpc_call_context_element *context, void **user_data,
|
80
76
|
grpc_closure *on_complete);
|
81
77
|
|
82
78
|
/** \see grpc_lb_policy_cancel_pick */
|
@@ -153,9 +149,13 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
|
|
153
149
|
|
154
150
|
/** Finds an appropriate subchannel for a call, based on \a pick_args.
|
155
151
|
|
156
|
-
\a target will be set to the selected subchannel, or NULL on failure
|
152
|
+
\a target will be set to the selected subchannel, or NULL on failure
|
153
|
+
or when the LB policy decides to drop the call.
|
154
|
+
|
157
155
|
Upon success, \a user_data will be set to whatever opaque information
|
158
156
|
may need to be propagated from the LB policy, or NULL if not needed.
|
157
|
+
\a context will be populated with context to pass to the subchannel
|
158
|
+
call, if needed.
|
159
159
|
|
160
160
|
If the pick succeeds and a result is known immediately, a non-zero
|
161
161
|
value will be returned. Otherwise, \a on_complete will be invoked
|
@@ -167,6 +167,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
|
|
167
167
|
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
168
168
|
const grpc_lb_policy_pick_args *pick_args,
|
169
169
|
grpc_connected_subchannel **target,
|
170
|
+
grpc_call_context_element *context,
|
170
171
|
void **user_data, grpc_closure *on_complete);
|
171
172
|
|
172
173
|
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
|
@@ -0,0 +1,153 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2017, Google Inc.
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are
|
8
|
+
* met:
|
9
|
+
*
|
10
|
+
* * Redistributions of source code must retain the above copyright
|
11
|
+
* notice, this list of conditions and the following disclaimer.
|
12
|
+
* * Redistributions in binary form must reproduce the above
|
13
|
+
* copyright notice, this list of conditions and the following disclaimer
|
14
|
+
* in the documentation and/or other materials provided with the
|
15
|
+
* distribution.
|
16
|
+
* * Neither the name of Google Inc. nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
|
34
|
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
|
35
|
+
|
36
|
+
#include <grpc/support/atm.h>
|
37
|
+
#include <grpc/support/log.h>
|
38
|
+
|
39
|
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
|
40
|
+
#include "src/core/lib/iomgr/error.h"
|
41
|
+
#include "src/core/lib/profiling/timers.h"
|
42
|
+
|
43
|
+
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
|
44
|
+
grpc_channel_element *elem,
|
45
|
+
grpc_channel_element_args *args) {
|
46
|
+
return GRPC_ERROR_NONE;
|
47
|
+
}
|
48
|
+
|
49
|
+
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
|
50
|
+
grpc_channel_element *elem) {}
|
51
|
+
|
52
|
+
typedef struct {
|
53
|
+
// Stats object to update.
|
54
|
+
grpc_grpclb_client_stats *client_stats;
|
55
|
+
// State for intercepting send_initial_metadata.
|
56
|
+
grpc_closure on_complete_for_send;
|
57
|
+
grpc_closure *original_on_complete_for_send;
|
58
|
+
bool send_initial_metadata_succeeded;
|
59
|
+
// State for intercepting recv_initial_metadata.
|
60
|
+
grpc_closure recv_initial_metadata_ready;
|
61
|
+
grpc_closure *original_recv_initial_metadata_ready;
|
62
|
+
bool recv_initial_metadata_succeeded;
|
63
|
+
} call_data;
|
64
|
+
|
65
|
+
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
|
66
|
+
grpc_error *error) {
|
67
|
+
call_data *calld = arg;
|
68
|
+
if (error == GRPC_ERROR_NONE) {
|
69
|
+
calld->send_initial_metadata_succeeded = true;
|
70
|
+
}
|
71
|
+
grpc_closure_run(exec_ctx, calld->original_on_complete_for_send,
|
72
|
+
GRPC_ERROR_REF(error));
|
73
|
+
}
|
74
|
+
|
75
|
+
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
|
76
|
+
grpc_error *error) {
|
77
|
+
call_data *calld = arg;
|
78
|
+
if (error == GRPC_ERROR_NONE) {
|
79
|
+
calld->recv_initial_metadata_succeeded = true;
|
80
|
+
}
|
81
|
+
grpc_closure_run(exec_ctx, calld->original_recv_initial_metadata_ready,
|
82
|
+
GRPC_ERROR_REF(error));
|
83
|
+
}
|
84
|
+
|
85
|
+
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
|
86
|
+
grpc_call_element *elem,
|
87
|
+
const grpc_call_element_args *args) {
|
88
|
+
call_data *calld = elem->call_data;
|
89
|
+
// Get stats object from context and take a ref.
|
90
|
+
GPR_ASSERT(args->context != NULL);
|
91
|
+
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
|
92
|
+
calld->client_stats = grpc_grpclb_client_stats_ref(
|
93
|
+
args->context[GRPC_GRPCLB_CLIENT_STATS].value);
|
94
|
+
// Record call started.
|
95
|
+
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
|
96
|
+
return GRPC_ERROR_NONE;
|
97
|
+
}
|
98
|
+
|
99
|
+
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
100
|
+
const grpc_call_final_info *final_info,
|
101
|
+
grpc_closure *ignored) {
|
102
|
+
call_data *calld = elem->call_data;
|
103
|
+
// Record call finished, optionally setting client_failed_to_send and
|
104
|
+
// received.
|
105
|
+
grpc_grpclb_client_stats_add_call_finished(
|
106
|
+
false /* drop_for_rate_limiting */, false /* drop_for_load_balancing */,
|
107
|
+
!calld->send_initial_metadata_succeeded /* client_failed_to_send */,
|
108
|
+
calld->recv_initial_metadata_succeeded /* known_received */,
|
109
|
+
calld->client_stats);
|
110
|
+
// All done, so unref the stats object.
|
111
|
+
grpc_grpclb_client_stats_unref(calld->client_stats);
|
112
|
+
}
|
113
|
+
|
114
|
+
static void start_transport_stream_op_batch(
|
115
|
+
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
116
|
+
grpc_transport_stream_op_batch *batch) {
|
117
|
+
call_data *calld = elem->call_data;
|
118
|
+
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
|
119
|
+
// Intercept send_initial_metadata.
|
120
|
+
if (batch->send_initial_metadata) {
|
121
|
+
calld->original_on_complete_for_send = batch->on_complete;
|
122
|
+
grpc_closure_init(&calld->on_complete_for_send, on_complete_for_send, calld,
|
123
|
+
grpc_schedule_on_exec_ctx);
|
124
|
+
batch->on_complete = &calld->on_complete_for_send;
|
125
|
+
}
|
126
|
+
// Intercept recv_initial_metadata.
|
127
|
+
if (batch->recv_initial_metadata) {
|
128
|
+
calld->original_recv_initial_metadata_ready =
|
129
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
|
130
|
+
grpc_closure_init(&calld->recv_initial_metadata_ready,
|
131
|
+
recv_initial_metadata_ready, calld,
|
132
|
+
grpc_schedule_on_exec_ctx);
|
133
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
|
134
|
+
&calld->recv_initial_metadata_ready;
|
135
|
+
}
|
136
|
+
// Chain to next filter.
|
137
|
+
grpc_call_next_op(exec_ctx, elem, batch);
|
138
|
+
GPR_TIMER_END("clr_start_transport_stream_op_batch", 0);
|
139
|
+
}
|
140
|
+
|
141
|
+
const grpc_channel_filter grpc_client_load_reporting_filter = {
|
142
|
+
start_transport_stream_op_batch,
|
143
|
+
grpc_channel_next_op,
|
144
|
+
sizeof(call_data),
|
145
|
+
init_call_elem,
|
146
|
+
grpc_call_stack_ignore_set_pollset_or_pollset_set,
|
147
|
+
destroy_call_elem,
|
148
|
+
0, // sizeof(channel_data)
|
149
|
+
init_channel_elem,
|
150
|
+
destroy_channel_elem,
|
151
|
+
grpc_call_next_get_peer,
|
152
|
+
grpc_channel_next_get_info,
|
153
|
+
"client_load_reporting"};
|
@@ -0,0 +1,42 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2017, Google Inc.
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are
|
8
|
+
* met:
|
9
|
+
*
|
10
|
+
* * Redistributions of source code must retain the above copyright
|
11
|
+
* notice, this list of conditions and the following disclaimer.
|
12
|
+
* * Redistributions in binary form must reproduce the above
|
13
|
+
* copyright notice, this list of conditions and the following disclaimer
|
14
|
+
* in the documentation and/or other materials provided with the
|
15
|
+
* distribution.
|
16
|
+
* * Neither the name of Google Inc. nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
|
34
|
+
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H
|
35
|
+
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H
|
36
|
+
|
37
|
+
#include "src/core/lib/channel/channel_stack.h"
|
38
|
+
|
39
|
+
extern const grpc_channel_filter grpc_client_load_reporting_filter;
|
40
|
+
|
41
|
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
|
42
|
+
*/
|
@@ -95,8 +95,7 @@
|
|
95
95
|
headers. Therefore, sockaddr.h must always be included first */
|
96
96
|
#include "src/core/lib/iomgr/sockaddr.h"
|
97
97
|
|
98
|
-
#include <
|
99
|
-
|
98
|
+
#include <limits.h>
|
100
99
|
#include <string.h>
|
101
100
|
|
102
101
|
#include <grpc/byte_buffer_reader.h>
|
@@ -108,13 +107,16 @@
|
|
108
107
|
|
109
108
|
#include "src/core/ext/filters/client_channel/client_channel.h"
|
110
109
|
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
|
110
|
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
|
111
111
|
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
|
112
112
|
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
|
113
|
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
|
113
114
|
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
|
114
115
|
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
|
115
116
|
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
|
116
117
|
#include "src/core/ext/filters/client_channel/parse_address.h"
|
117
118
|
#include "src/core/lib/channel/channel_args.h"
|
119
|
+
#include "src/core/lib/channel/channel_stack.h"
|
118
120
|
#include "src/core/lib/iomgr/combiner.h"
|
119
121
|
#include "src/core/lib/iomgr/sockaddr.h"
|
120
122
|
#include "src/core/lib/iomgr/sockaddr_utils.h"
|
@@ -126,6 +128,7 @@
|
|
126
128
|
#include "src/core/lib/support/string.h"
|
127
129
|
#include "src/core/lib/surface/call.h"
|
128
130
|
#include "src/core/lib/surface/channel.h"
|
131
|
+
#include "src/core/lib/surface/channel_init.h"
|
129
132
|
#include "src/core/lib/transport/static_metadata.h"
|
130
133
|
|
131
134
|
#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
|
@@ -134,7 +137,7 @@
|
|
134
137
|
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
|
135
138
|
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
|
136
139
|
|
137
|
-
|
140
|
+
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false);
|
138
141
|
|
139
142
|
/* add lb_token of selected subchannel (address) to the call's initial
|
140
143
|
* metadata */
|
@@ -147,6 +150,10 @@ static grpc_error *initial_metadata_add_lb_token(
|
|
147
150
|
lb_token_mdelem_storage, lb_token);
|
148
151
|
}
|
149
152
|
|
153
|
+
static void destroy_client_stats(void *arg) {
|
154
|
+
grpc_grpclb_client_stats_unref(arg);
|
155
|
+
}
|
156
|
+
|
150
157
|
typedef struct wrapped_rr_closure_arg {
|
151
158
|
/* the closure instance using this struct as argument */
|
152
159
|
grpc_closure wrapper_closure;
|
@@ -163,6 +170,13 @@ typedef struct wrapped_rr_closure_arg {
|
|
163
170
|
* initial metadata */
|
164
171
|
grpc_connected_subchannel **target;
|
165
172
|
|
173
|
+
/* the context to be populated for the subchannel call */
|
174
|
+
grpc_call_context_element *context;
|
175
|
+
|
176
|
+
/* Stats for client-side load reporting. Note that this holds a
|
177
|
+
* reference, which must be either passed on via context or unreffed. */
|
178
|
+
grpc_grpclb_client_stats *client_stats;
|
179
|
+
|
166
180
|
/* the LB token associated with the pick */
|
167
181
|
grpc_mdelem lb_token;
|
168
182
|
|
@@ -202,8 +216,14 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
|
|
202
216
|
(void *)*wc_arg->target, (void *)wc_arg->rr_policy);
|
203
217
|
abort();
|
204
218
|
}
|
219
|
+
// Pass on client stats via context. Passes ownership of the reference.
|
220
|
+
GPR_ASSERT(wc_arg->client_stats != NULL);
|
221
|
+
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
|
222
|
+
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
|
223
|
+
} else {
|
224
|
+
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
|
205
225
|
}
|
206
|
-
if (grpc_lb_glb_trace) {
|
226
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
207
227
|
gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
|
208
228
|
}
|
209
229
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
|
@@ -237,6 +257,7 @@ typedef struct pending_pick {
|
|
237
257
|
static void add_pending_pick(pending_pick **root,
|
238
258
|
const grpc_lb_policy_pick_args *pick_args,
|
239
259
|
grpc_connected_subchannel **target,
|
260
|
+
grpc_call_context_element *context,
|
240
261
|
grpc_closure *on_complete) {
|
241
262
|
pending_pick *pp = gpr_zalloc(sizeof(*pp));
|
242
263
|
pp->next = *root;
|
@@ -244,6 +265,7 @@ static void add_pending_pick(pending_pick **root,
|
|
244
265
|
pp->target = target;
|
245
266
|
pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
|
246
267
|
pp->wrapped_on_complete_arg.target = target;
|
268
|
+
pp->wrapped_on_complete_arg.context = context;
|
247
269
|
pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
|
248
270
|
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
|
249
271
|
pick_args->lb_token_mdelem_storage;
|
@@ -287,8 +309,8 @@ typedef struct glb_lb_policy {
|
|
287
309
|
grpc_client_channel_factory *cc_factory;
|
288
310
|
grpc_channel_args *args;
|
289
311
|
|
290
|
-
/**
|
291
|
-
|
312
|
+
/** timeout in milliseconds for the LB call. 0 means no deadline. */
|
313
|
+
int lb_call_timeout_ms;
|
292
314
|
|
293
315
|
/** for communicating with the LB server */
|
294
316
|
grpc_channel *lb_channel;
|
@@ -305,6 +327,11 @@ typedef struct glb_lb_policy {
|
|
305
327
|
* response has arrived. */
|
306
328
|
grpc_grpclb_serverlist *serverlist;
|
307
329
|
|
330
|
+
/** Index into serverlist for next pick.
|
331
|
+
* If the server at this index is a drop, we return a drop.
|
332
|
+
* Otherwise, we delegate to the RR policy. */
|
333
|
+
size_t serverlist_index;
|
334
|
+
|
308
335
|
/** list of picks that are waiting on RR's policy connectivity */
|
309
336
|
pending_pick *pending_picks;
|
310
337
|
|
@@ -316,6 +343,10 @@ typedef struct glb_lb_policy {
|
|
316
343
|
/************************************************************/
|
317
344
|
/* client data associated with the LB server communication */
|
318
345
|
/************************************************************/
|
346
|
+
|
347
|
+
/* Finished sending initial request. */
|
348
|
+
grpc_closure lb_on_sent_initial_request;
|
349
|
+
|
319
350
|
/* Status from the LB server has been received. This signals the end of the LB
|
320
351
|
* call. */
|
321
352
|
grpc_closure lb_on_server_status_received;
|
@@ -348,6 +379,23 @@ typedef struct glb_lb_policy {
|
|
348
379
|
|
349
380
|
/** LB call retry timer */
|
350
381
|
grpc_timer lb_call_retry_timer;
|
382
|
+
|
383
|
+
bool initial_request_sent;
|
384
|
+
bool seen_initial_response;
|
385
|
+
|
386
|
+
/* Stats for client-side load reporting. Should be unreffed and
|
387
|
+
* recreated whenever lb_call is replaced. */
|
388
|
+
grpc_grpclb_client_stats *client_stats;
|
389
|
+
/* Interval and timer for next client load report. */
|
390
|
+
gpr_timespec client_stats_report_interval;
|
391
|
+
grpc_timer client_load_report_timer;
|
392
|
+
bool client_load_report_timer_pending;
|
393
|
+
bool last_client_load_report_counters_were_zero;
|
394
|
+
/* Closure used for either the load report timer or the callback for
|
395
|
+
* completion of sending the load report. */
|
396
|
+
grpc_closure client_load_report_closure;
|
397
|
+
/* Client load report message payload. */
|
398
|
+
grpc_byte_buffer *client_load_report_payload;
|
351
399
|
} glb_lb_policy;
|
352
400
|
|
353
401
|
/* Keeps track and reacts to changes in connectivity of the RR instance */
|
@@ -359,6 +407,9 @@ struct rr_connectivity_data {
|
|
359
407
|
|
360
408
|
static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
|
361
409
|
bool log) {
|
410
|
+
if (server->drop_for_rate_limiting || server->drop_for_load_balancing) {
|
411
|
+
return false;
|
412
|
+
}
|
362
413
|
const grpc_grpclb_ip_address *ip = &server->ip_address;
|
363
414
|
if (server->port >> 16 != 0) {
|
364
415
|
if (log) {
|
@@ -368,7 +419,6 @@ static bool is_server_valid(const grpc_grpclb_server *server, size_t idx,
|
|
368
419
|
}
|
369
420
|
return false;
|
370
421
|
}
|
371
|
-
|
372
422
|
if (ip->size != 4 && ip->size != 16) {
|
373
423
|
if (log) {
|
374
424
|
gpr_log(GPR_ERROR,
|
@@ -402,11 +452,12 @@ static const grpc_lb_user_data_vtable lb_token_vtable = {
|
|
402
452
|
|
403
453
|
static void parse_server(const grpc_grpclb_server *server,
|
404
454
|
grpc_resolved_address *addr) {
|
455
|
+
memset(addr, 0, sizeof(*addr));
|
456
|
+
if (server->drop_for_rate_limiting || server->drop_for_load_balancing) return;
|
405
457
|
const uint16_t netorder_port = htons((uint16_t)server->port);
|
406
458
|
/* the addresses are given in binary format (a in(6)_addr struct) in
|
407
459
|
* server->ip_address.bytes. */
|
408
460
|
const grpc_grpclb_ip_address *ip = &server->ip_address;
|
409
|
-
memset(addr, 0, sizeof(*addr));
|
410
461
|
if (ip->size == 4) {
|
411
462
|
addr->len = sizeof(struct sockaddr_in);
|
412
463
|
struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
|
@@ -531,7 +582,7 @@ static bool update_lb_connectivity_status_locked(
|
|
531
582
|
GPR_ASSERT(new_rr_state_error == GRPC_ERROR_NONE);
|
532
583
|
}
|
533
584
|
|
534
|
-
if (grpc_lb_glb_trace) {
|
585
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
535
586
|
gpr_log(GPR_INFO,
|
536
587
|
"Setting grpclb's state to %s from new RR policy %p state.",
|
537
588
|
grpc_connectivity_state_name(new_rr_state),
|
@@ -543,31 +594,74 @@ static bool update_lb_connectivity_status_locked(
|
|
543
594
|
return true;
|
544
595
|
}
|
545
596
|
|
546
|
-
/*
|
547
|
-
* (ignoring its completion callback) we need to perform the
|
548
|
-
* callback would be
|
597
|
+
/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
|
598
|
+
* immediately (ignoring its completion callback), we need to perform the
|
599
|
+
* cleanups this callback would otherwise be resposible for.
|
600
|
+
* If \a force_async is true, then we will manually schedule the
|
601
|
+
* completion callback even if the pick is available immediately. */
|
549
602
|
static bool pick_from_internal_rr_locked(
|
550
|
-
grpc_exec_ctx *exec_ctx,
|
551
|
-
const grpc_lb_policy_pick_args *pick_args,
|
603
|
+
grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy,
|
604
|
+
const grpc_lb_policy_pick_args *pick_args, bool force_async,
|
552
605
|
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
|
553
|
-
|
606
|
+
// Look at the index into the serverlist to see if we should drop this call.
|
607
|
+
grpc_grpclb_server *server =
|
608
|
+
glb_policy->serverlist->servers[glb_policy->serverlist_index++];
|
609
|
+
if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) {
|
610
|
+
glb_policy->serverlist_index = 0; // Wrap-around.
|
611
|
+
}
|
612
|
+
if (server->drop_for_rate_limiting || server->drop_for_load_balancing) {
|
613
|
+
// Not using the RR policy, so unref it.
|
614
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
615
|
+
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")",
|
616
|
+
(intptr_t)wc_arg->rr_policy);
|
617
|
+
}
|
618
|
+
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
|
619
|
+
// Update client load reporting stats to indicate the number of
|
620
|
+
// dropped calls. Note that we have to do this here instead of in
|
621
|
+
// the client_load_reporting filter, because we do not create a
|
622
|
+
// subchannel call (and therefore no client_load_reporting filter)
|
623
|
+
// for dropped calls.
|
624
|
+
grpc_grpclb_client_stats_add_call_started(wc_arg->client_stats);
|
625
|
+
grpc_grpclb_client_stats_add_call_finished(
|
626
|
+
server->drop_for_rate_limiting, server->drop_for_load_balancing,
|
627
|
+
false /* failed_to_send */, false /* known_received */,
|
628
|
+
wc_arg->client_stats);
|
629
|
+
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
|
630
|
+
if (force_async) {
|
631
|
+
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
|
632
|
+
grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
|
633
|
+
gpr_free(wc_arg->free_when_done);
|
634
|
+
return false;
|
635
|
+
}
|
636
|
+
gpr_free(wc_arg->free_when_done);
|
637
|
+
return true;
|
638
|
+
}
|
639
|
+
// Pick via the RR policy.
|
554
640
|
const bool pick_done = grpc_lb_policy_pick_locked(
|
555
|
-
exec_ctx, rr_policy, pick_args, target,
|
556
|
-
&wc_arg->wrapper_closure);
|
641
|
+
exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
|
642
|
+
(void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
|
557
643
|
if (pick_done) {
|
558
644
|
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
|
559
|
-
if (grpc_lb_glb_trace) {
|
645
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
560
646
|
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
|
561
647
|
(intptr_t)wc_arg->rr_policy);
|
562
648
|
}
|
563
649
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
|
564
|
-
|
565
650
|
/* add the load reporting initial metadata */
|
566
651
|
initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
|
567
652
|
pick_args->lb_token_mdelem_storage,
|
568
653
|
GRPC_MDELEM_REF(wc_arg->lb_token));
|
569
|
-
|
570
|
-
|
654
|
+
// Pass on client stats via context. Passes ownership of the reference.
|
655
|
+
GPR_ASSERT(wc_arg->client_stats != NULL);
|
656
|
+
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
|
657
|
+
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
|
658
|
+
if (force_async) {
|
659
|
+
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
|
660
|
+
grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
|
661
|
+
gpr_free(wc_arg->free_when_done);
|
662
|
+
return false;
|
663
|
+
}
|
664
|
+
gpr_free(wc_arg->free_when_done);
|
571
665
|
}
|
572
666
|
/* else, the pending pick will be registered and taken care of by the
|
573
667
|
* pending pick list inside the RR policy (glb_policy->rr_policy).
|
@@ -637,7 +731,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
637
731
|
if (!replace_old_rr) {
|
638
732
|
/* dispose of the new RR policy that won't be used after all */
|
639
733
|
GRPC_LB_POLICY_UNREF(exec_ctx, new_rr_policy, "rr_handover_no_replace");
|
640
|
-
if (grpc_lb_glb_trace) {
|
734
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
641
735
|
gpr_log(GPR_INFO,
|
642
736
|
"Keeping old RR policy (%p) despite new serverlist: new RR "
|
643
737
|
"policy was in %s connectivity state.",
|
@@ -647,7 +741,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
647
741
|
return;
|
648
742
|
}
|
649
743
|
|
650
|
-
if (grpc_lb_glb_trace) {
|
744
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
651
745
|
gpr_log(GPR_INFO, "Created RR policy (%p) to replace old RR (%p)",
|
652
746
|
(void *)new_rr_policy, (void *)glb_policy->rr_policy);
|
653
747
|
}
|
@@ -690,12 +784,14 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
690
784
|
glb_policy->pending_picks = pp->next;
|
691
785
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
|
692
786
|
pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
|
693
|
-
|
787
|
+
pp->wrapped_on_complete_arg.client_stats =
|
788
|
+
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
789
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
694
790
|
gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
|
695
791
|
(intptr_t)glb_policy->rr_policy);
|
696
792
|
}
|
697
|
-
pick_from_internal_rr_locked(exec_ctx, glb_policy->
|
698
|
-
|
793
|
+
pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
|
794
|
+
true /* force_async */, pp->target,
|
699
795
|
&pp->wrapped_on_complete_arg);
|
700
796
|
}
|
701
797
|
|
@@ -704,7 +800,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
704
800
|
glb_policy->pending_pings = pping->next;
|
705
801
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
|
706
802
|
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
|
707
|
-
if (grpc_lb_glb_trace) {
|
803
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
708
804
|
gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
|
709
805
|
(intptr_t)glb_policy->rr_policy);
|
710
806
|
}
|
@@ -750,18 +846,11 @@ static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
|
|
750
846
|
gpr_free(balancer_name);
|
751
847
|
}
|
752
848
|
|
753
|
-
static void *copy_balancer_name(void *balancer_name) {
|
754
|
-
return gpr_strdup(balancer_name);
|
755
|
-
}
|
756
|
-
|
757
849
|
static grpc_slice_hash_table_entry targets_info_entry_create(
|
758
850
|
const char *address, const char *balancer_name) {
|
759
|
-
static const grpc_slice_hash_table_vtable vtable = {destroy_balancer_name,
|
760
|
-
copy_balancer_name};
|
761
851
|
grpc_slice_hash_table_entry entry;
|
762
852
|
entry.key = grpc_slice_from_copied_string(address);
|
763
|
-
entry.value = (
|
764
|
-
entry.vtable = &vtable;
|
853
|
+
entry.value = gpr_strdup(balancer_name);
|
765
854
|
return entry;
|
766
855
|
}
|
767
856
|
|
@@ -825,11 +914,8 @@ static char *get_lb_uri_target_addresses(grpc_exec_ctx *exec_ctx,
|
|
825
914
|
uri_path);
|
826
915
|
gpr_free(uri_path);
|
827
916
|
|
828
|
-
*targets_info =
|
829
|
-
|
830
|
-
for (size_t i = 0; i < num_grpclb_addrs; i++) {
|
831
|
-
grpc_slice_unref_internal(exec_ctx, targets_info_entries[i].key);
|
832
|
-
}
|
917
|
+
*targets_info = grpc_slice_hash_table_create(
|
918
|
+
num_grpclb_addrs, targets_info_entries, destroy_balancer_name);
|
833
919
|
gpr_free(targets_info_entries);
|
834
920
|
|
835
921
|
return target_uri_str;
|
@@ -841,10 +927,10 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
841
927
|
/* Count the number of gRPC-LB addresses. There must be at least one.
|
842
928
|
* TODO(roth): For now, we ignore non-balancer addresses, but in the
|
843
929
|
* future, we may change the behavior such that we fall back to using
|
844
|
-
* the non-balancer addresses if we cannot reach any balancers.
|
845
|
-
*
|
846
|
-
*
|
847
|
-
*
|
930
|
+
* the non-balancer addresses if we cannot reach any balancers. In the
|
931
|
+
* fallback case, we should use the LB policy indicated by
|
932
|
+
* GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
|
933
|
+
* unset, we should default to pick_first). */
|
848
934
|
const grpc_arg *arg =
|
849
935
|
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
|
850
936
|
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
|
@@ -867,16 +953,29 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
867
953
|
GPR_ASSERT(uri->path[0] != '\0');
|
868
954
|
glb_policy->server_name =
|
869
955
|
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
|
870
|
-
if (grpc_lb_glb_trace) {
|
956
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
871
957
|
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
|
872
958
|
glb_policy->server_name);
|
873
959
|
}
|
874
960
|
grpc_uri_destroy(uri);
|
875
961
|
|
876
962
|
glb_policy->cc_factory = args->client_channel_factory;
|
877
|
-
glb_policy->args = grpc_channel_args_copy(args->args);
|
878
963
|
GPR_ASSERT(glb_policy->cc_factory != NULL);
|
879
964
|
|
965
|
+
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
|
966
|
+
glb_policy->lb_call_timeout_ms =
|
967
|
+
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
|
968
|
+
|
969
|
+
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
|
970
|
+
// since we use this to trigger the client_load_reporting filter.
|
971
|
+
grpc_arg new_arg;
|
972
|
+
new_arg.key = GRPC_ARG_LB_POLICY_NAME;
|
973
|
+
new_arg.type = GRPC_ARG_STRING;
|
974
|
+
new_arg.value.string = "grpclb";
|
975
|
+
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
|
976
|
+
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
|
977
|
+
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
|
978
|
+
|
880
979
|
grpc_slice_hash_table *targets_info = NULL;
|
881
980
|
/* Create a client channel over them to communicate with a LB service */
|
882
981
|
char *lb_service_target_addresses =
|
@@ -890,6 +989,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
890
989
|
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
|
891
990
|
gpr_free(lb_service_target_addresses);
|
892
991
|
if (glb_policy->lb_channel == NULL) {
|
992
|
+
gpr_free((void *)glb_policy->server_name);
|
993
|
+
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
|
893
994
|
gpr_free(glb_policy);
|
894
995
|
return NULL;
|
895
996
|
}
|
@@ -905,6 +1006,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
|
|
905
1006
|
GPR_ASSERT(glb_policy->pending_pings == NULL);
|
906
1007
|
gpr_free((void *)glb_policy->server_name);
|
907
1008
|
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
|
1009
|
+
if (glb_policy->client_stats != NULL) {
|
1010
|
+
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
|
1011
|
+
}
|
908
1012
|
grpc_channel_destroy(glb_policy->lb_channel);
|
909
1013
|
glb_policy->lb_channel = NULL;
|
910
1014
|
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
|
@@ -1021,7 +1125,8 @@ static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
|
|
1021
1125
|
|
1022
1126
|
static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
1023
1127
|
const grpc_lb_policy_pick_args *pick_args,
|
1024
|
-
grpc_connected_subchannel **target,
|
1128
|
+
grpc_connected_subchannel **target,
|
1129
|
+
grpc_call_context_element *context, void **user_data,
|
1025
1130
|
grpc_closure *on_complete) {
|
1026
1131
|
if (pick_args->lb_token_mdelem_storage == NULL) {
|
1027
1132
|
*target = NULL;
|
@@ -1033,11 +1138,10 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1033
1138
|
}
|
1034
1139
|
|
1035
1140
|
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
|
1036
|
-
glb_policy->deadline = pick_args->deadline;
|
1037
1141
|
bool pick_done;
|
1038
1142
|
|
1039
1143
|
if (glb_policy->rr_policy != NULL) {
|
1040
|
-
if (grpc_lb_glb_trace) {
|
1144
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1041
1145
|
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
|
1042
1146
|
(void *)glb_policy, (void *)glb_policy->rr_policy);
|
1043
1147
|
}
|
@@ -1049,20 +1153,25 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1049
1153
|
grpc_schedule_on_exec_ctx);
|
1050
1154
|
wc_arg->rr_policy = glb_policy->rr_policy;
|
1051
1155
|
wc_arg->target = target;
|
1156
|
+
wc_arg->context = context;
|
1157
|
+
GPR_ASSERT(glb_policy->client_stats != NULL);
|
1158
|
+
wc_arg->client_stats =
|
1159
|
+
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
1052
1160
|
wc_arg->wrapped_closure = on_complete;
|
1053
1161
|
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
|
1054
1162
|
wc_arg->initial_metadata = pick_args->initial_metadata;
|
1055
1163
|
wc_arg->free_when_done = wc_arg;
|
1056
|
-
pick_done =
|
1057
|
-
|
1164
|
+
pick_done =
|
1165
|
+
pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
|
1166
|
+
false /* force_async */, target, wc_arg);
|
1058
1167
|
} else {
|
1059
|
-
if (grpc_lb_glb_trace) {
|
1168
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1060
1169
|
gpr_log(GPR_DEBUG,
|
1061
1170
|
"No RR policy in grpclb instance %p. Adding to grpclb's pending "
|
1062
1171
|
"picks",
|
1063
1172
|
(void *)(glb_policy));
|
1064
1173
|
}
|
1065
|
-
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
|
1174
|
+
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
|
1066
1175
|
on_complete);
|
1067
1176
|
|
1068
1177
|
if (!glb_policy->started_picking) {
|
@@ -1103,6 +1212,104 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
|
|
1103
1212
|
exec_ctx, &glb_policy->state_tracker, current, notify);
|
1104
1213
|
}
|
1105
1214
|
|
1215
|
+
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1216
|
+
grpc_error *error);
|
1217
|
+
|
1218
|
+
static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
|
1219
|
+
glb_lb_policy *glb_policy) {
|
1220
|
+
const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
|
1221
|
+
const gpr_timespec next_client_load_report_time =
|
1222
|
+
gpr_time_add(now, glb_policy->client_stats_report_interval);
|
1223
|
+
grpc_closure_init(&glb_policy->client_load_report_closure,
|
1224
|
+
send_client_load_report_locked, glb_policy,
|
1225
|
+
grpc_combiner_scheduler(glb_policy->base.combiner, false));
|
1226
|
+
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
|
1227
|
+
next_client_load_report_time,
|
1228
|
+
&glb_policy->client_load_report_closure, now);
|
1229
|
+
}
|
1230
|
+
|
1231
|
+
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1232
|
+
grpc_error *error) {
|
1233
|
+
glb_lb_policy *glb_policy = arg;
|
1234
|
+
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
|
1235
|
+
glb_policy->client_load_report_payload = NULL;
|
1236
|
+
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
|
1237
|
+
glb_policy->client_load_report_timer_pending = false;
|
1238
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1239
|
+
"client_load_report");
|
1240
|
+
return;
|
1241
|
+
}
|
1242
|
+
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1243
|
+
}
|
1244
|
+
|
1245
|
+
static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
|
1246
|
+
glb_lb_policy *glb_policy) {
|
1247
|
+
grpc_op op;
|
1248
|
+
memset(&op, 0, sizeof(op));
|
1249
|
+
op.op = GRPC_OP_SEND_MESSAGE;
|
1250
|
+
op.data.send_message.send_message = glb_policy->client_load_report_payload;
|
1251
|
+
grpc_closure_init(&glb_policy->client_load_report_closure,
|
1252
|
+
client_load_report_done_locked, glb_policy,
|
1253
|
+
grpc_combiner_scheduler(glb_policy->base.combiner, false));
|
1254
|
+
grpc_call_error call_error = grpc_call_start_batch_and_execute(
|
1255
|
+
exec_ctx, glb_policy->lb_call, &op, 1,
|
1256
|
+
&glb_policy->client_load_report_closure);
|
1257
|
+
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1258
|
+
}
|
1259
|
+
|
1260
|
+
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
|
1261
|
+
return request->client_stats.num_calls_started == 0 &&
|
1262
|
+
request->client_stats.num_calls_finished == 0 &&
|
1263
|
+
request->client_stats.num_calls_finished_with_drop_for_rate_limiting ==
|
1264
|
+
0 &&
|
1265
|
+
request->client_stats
|
1266
|
+
.num_calls_finished_with_drop_for_load_balancing == 0 &&
|
1267
|
+
request->client_stats.num_calls_finished_with_client_failed_to_send ==
|
1268
|
+
0 &&
|
1269
|
+
request->client_stats.num_calls_finished_known_received == 0;
|
1270
|
+
}
|
1271
|
+
|
1272
|
+
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1273
|
+
grpc_error *error) {
|
1274
|
+
glb_lb_policy *glb_policy = arg;
|
1275
|
+
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
|
1276
|
+
glb_policy->client_load_report_timer_pending = false;
|
1277
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1278
|
+
"client_load_report");
|
1279
|
+
return;
|
1280
|
+
}
|
1281
|
+
// Construct message payload.
|
1282
|
+
GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
|
1283
|
+
grpc_grpclb_request *request =
|
1284
|
+
grpc_grpclb_load_report_request_create(glb_policy->client_stats);
|
1285
|
+
// Skip client load report if the counters were all zero in the last
|
1286
|
+
// report and they are still zero in this one.
|
1287
|
+
if (load_report_counters_are_zero(request)) {
|
1288
|
+
if (glb_policy->last_client_load_report_counters_were_zero) {
|
1289
|
+
grpc_grpclb_request_destroy(request);
|
1290
|
+
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1291
|
+
return;
|
1292
|
+
}
|
1293
|
+
glb_policy->last_client_load_report_counters_were_zero = true;
|
1294
|
+
} else {
|
1295
|
+
glb_policy->last_client_load_report_counters_were_zero = false;
|
1296
|
+
}
|
1297
|
+
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
|
1298
|
+
glb_policy->client_load_report_payload =
|
1299
|
+
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
|
1300
|
+
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
|
1301
|
+
grpc_grpclb_request_destroy(request);
|
1302
|
+
// If we've already sent the initial request, then we can go ahead and
|
1303
|
+
// sent the load report. Otherwise, we need to wait until the initial
|
1304
|
+
// request has been sent to send this
|
1305
|
+
// (see lb_on_sent_initial_request_locked() below).
|
1306
|
+
if (glb_policy->initial_request_sent) {
|
1307
|
+
do_send_client_load_report_locked(exec_ctx, glb_policy);
|
1308
|
+
}
|
1309
|
+
}
|
1310
|
+
|
1311
|
+
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
|
1312
|
+
void *arg, grpc_error *error);
|
1106
1313
|
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
1107
1314
|
void *arg, grpc_error *error);
|
1108
1315
|
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
@@ -1117,11 +1324,23 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1117
1324
|
* glb_policy->base.interested_parties, which is comprised of the polling
|
1118
1325
|
* entities from \a client_channel. */
|
1119
1326
|
grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
|
1327
|
+
gpr_timespec deadline =
|
1328
|
+
glb_policy->lb_call_timeout_ms == 0
|
1329
|
+
? gpr_inf_future(GPR_CLOCK_MONOTONIC)
|
1330
|
+
: gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
|
1331
|
+
gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
|
1332
|
+
GPR_TIMESPAN));
|
1120
1333
|
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
|
1121
1334
|
exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
|
1122
1335
|
glb_policy->base.interested_parties,
|
1123
1336
|
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
|
1124
|
-
&host,
|
1337
|
+
&host, deadline, NULL);
|
1338
|
+
grpc_slice_unref_internal(exec_ctx, host);
|
1339
|
+
|
1340
|
+
if (glb_policy->client_stats != NULL) {
|
1341
|
+
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
|
1342
|
+
}
|
1343
|
+
glb_policy->client_stats = grpc_grpclb_client_stats_create();
|
1125
1344
|
|
1126
1345
|
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
|
1127
1346
|
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
|
@@ -1134,6 +1353,9 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1134
1353
|
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
|
1135
1354
|
grpc_grpclb_request_destroy(request);
|
1136
1355
|
|
1356
|
+
grpc_closure_init(&glb_policy->lb_on_sent_initial_request,
|
1357
|
+
lb_on_sent_initial_request_locked, glb_policy,
|
1358
|
+
grpc_combiner_scheduler(glb_policy->base.combiner, false));
|
1137
1359
|
grpc_closure_init(&glb_policy->lb_on_server_status_received,
|
1138
1360
|
lb_on_server_status_received_locked, glb_policy,
|
1139
1361
|
grpc_combiner_scheduler(glb_policy->base.combiner, false));
|
@@ -1147,12 +1369,16 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1147
1369
|
GRPC_GRPCLB_RECONNECT_JITTER,
|
1148
1370
|
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
|
1149
1371
|
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
|
1372
|
+
|
1373
|
+
glb_policy->initial_request_sent = false;
|
1374
|
+
glb_policy->seen_initial_response = false;
|
1375
|
+
glb_policy->last_client_load_report_counters_were_zero = false;
|
1150
1376
|
}
|
1151
1377
|
|
1152
1378
|
static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
|
1153
1379
|
glb_lb_policy *glb_policy) {
|
1154
1380
|
GPR_ASSERT(glb_policy->lb_call != NULL);
|
1155
|
-
|
1381
|
+
grpc_call_unref(glb_policy->lb_call);
|
1156
1382
|
glb_policy->lb_call = NULL;
|
1157
1383
|
|
1158
1384
|
grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
|
@@ -1160,6 +1386,10 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
|
|
1160
1386
|
|
1161
1387
|
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
|
1162
1388
|
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
|
1389
|
+
|
1390
|
+
if (!glb_policy->client_load_report_timer_pending) {
|
1391
|
+
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
|
1392
|
+
}
|
1163
1393
|
}
|
1164
1394
|
|
1165
1395
|
/*
|
@@ -1172,7 +1402,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1172
1402
|
|
1173
1403
|
lb_call_init_locked(exec_ctx, glb_policy);
|
1174
1404
|
|
1175
|
-
if (grpc_lb_glb_trace) {
|
1405
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1176
1406
|
gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
|
1177
1407
|
(void *)glb_policy, (void *)glb_policy->lb_call);
|
1178
1408
|
}
|
@@ -1188,21 +1418,27 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1188
1418
|
op->flags = 0;
|
1189
1419
|
op->reserved = NULL;
|
1190
1420
|
op++;
|
1191
|
-
|
1192
1421
|
op->op = GRPC_OP_RECV_INITIAL_METADATA;
|
1193
1422
|
op->data.recv_initial_metadata.recv_initial_metadata =
|
1194
1423
|
&glb_policy->lb_initial_metadata_recv;
|
1195
1424
|
op->flags = 0;
|
1196
1425
|
op->reserved = NULL;
|
1197
1426
|
op++;
|
1198
|
-
|
1199
1427
|
GPR_ASSERT(glb_policy->lb_request_payload != NULL);
|
1200
1428
|
op->op = GRPC_OP_SEND_MESSAGE;
|
1201
1429
|
op->data.send_message.send_message = glb_policy->lb_request_payload;
|
1202
1430
|
op->flags = 0;
|
1203
1431
|
op->reserved = NULL;
|
1204
1432
|
op++;
|
1433
|
+
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
|
1434
|
+
* count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
|
1435
|
+
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
|
1436
|
+
call_error = grpc_call_start_batch_and_execute(
|
1437
|
+
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
|
1438
|
+
&glb_policy->lb_on_sent_initial_request);
|
1439
|
+
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1205
1440
|
|
1441
|
+
op = ops;
|
1206
1442
|
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
|
1207
1443
|
op->data.recv_status_on_client.trailing_metadata =
|
1208
1444
|
&glb_policy->lb_trailing_metadata_recv;
|
@@ -1234,6 +1470,19 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1234
1470
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1235
1471
|
}
|
1236
1472
|
|
1473
|
+
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
|
1474
|
+
void *arg, grpc_error *error) {
|
1475
|
+
glb_lb_policy *glb_policy = arg;
|
1476
|
+
glb_policy->initial_request_sent = true;
|
1477
|
+
// If we attempted to send a client load report before the initial
|
1478
|
+
// request was sent, send the load report now.
|
1479
|
+
if (glb_policy->client_load_report_payload != NULL) {
|
1480
|
+
do_send_client_load_report_locked(exec_ctx, glb_policy);
|
1481
|
+
}
|
1482
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1483
|
+
"lb_on_response_received_locked");
|
1484
|
+
}
|
1485
|
+
|
1237
1486
|
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1238
1487
|
grpc_error *error) {
|
1239
1488
|
glb_lb_policy *glb_policy = arg;
|
@@ -1249,57 +1498,91 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1249
1498
|
grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
|
1250
1499
|
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
|
1251
1500
|
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
|
1252
|
-
|
1253
|
-
|
1254
|
-
if (
|
1255
|
-
|
1256
|
-
|
1257
|
-
if (
|
1258
|
-
|
1259
|
-
|
1260
|
-
|
1261
|
-
|
1262
|
-
|
1263
|
-
|
1264
|
-
|
1265
|
-
|
1266
|
-
|
1501
|
+
|
1502
|
+
grpc_grpclb_initial_response *response = NULL;
|
1503
|
+
if (!glb_policy->seen_initial_response &&
|
1504
|
+
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
|
1505
|
+
NULL) {
|
1506
|
+
if (response->has_client_stats_report_interval) {
|
1507
|
+
glb_policy->client_stats_report_interval =
|
1508
|
+
gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
|
1509
|
+
grpc_grpclb_duration_to_timespec(
|
1510
|
+
&response->client_stats_report_interval));
|
1511
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1512
|
+
gpr_log(GPR_INFO,
|
1513
|
+
"received initial LB response message; "
|
1514
|
+
"client load reporting interval = %" PRId64 ".%09d sec",
|
1515
|
+
glb_policy->client_stats_report_interval.tv_sec,
|
1516
|
+
glb_policy->client_stats_report_interval.tv_nsec);
|
1267
1517
|
}
|
1518
|
+
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
|
1519
|
+
* strong ref count goes to zero) to be unref'd in
|
1520
|
+
* send_client_load_report() */
|
1521
|
+
glb_policy->client_load_report_timer_pending = true;
|
1522
|
+
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
|
1523
|
+
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1524
|
+
} else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1525
|
+
gpr_log(GPR_INFO,
|
1526
|
+
"received initial LB response message; "
|
1527
|
+
"client load reporting NOT enabled");
|
1268
1528
|
}
|
1529
|
+
grpc_grpclb_initial_response_destroy(response);
|
1530
|
+
glb_policy->seen_initial_response = true;
|
1531
|
+
} else {
|
1532
|
+
grpc_grpclb_serverlist *serverlist =
|
1533
|
+
grpc_grpclb_response_parse_serverlist(response_slice);
|
1534
|
+
if (serverlist != NULL) {
|
1535
|
+
GPR_ASSERT(glb_policy->lb_call != NULL);
|
1536
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1537
|
+
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
|
1538
|
+
(unsigned long)serverlist->num_servers);
|
1539
|
+
for (size_t i = 0; i < serverlist->num_servers; ++i) {
|
1540
|
+
grpc_resolved_address addr;
|
1541
|
+
parse_server(serverlist->servers[i], &addr);
|
1542
|
+
char *ipport;
|
1543
|
+
grpc_sockaddr_to_string(&ipport, &addr, false);
|
1544
|
+
gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
|
1545
|
+
gpr_free(ipport);
|
1546
|
+
}
|
1547
|
+
}
|
1269
1548
|
|
1270
|
-
|
1271
|
-
|
1272
|
-
|
1273
|
-
|
1549
|
+
/* update serverlist */
|
1550
|
+
if (serverlist->num_servers > 0) {
|
1551
|
+
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
|
1552
|
+
serverlist)) {
|
1553
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1554
|
+
gpr_log(GPR_INFO,
|
1555
|
+
"Incoming server list identical to current, ignoring.");
|
1556
|
+
}
|
1557
|
+
grpc_grpclb_destroy_serverlist(serverlist);
|
1558
|
+
} else { /* new serverlist */
|
1559
|
+
if (glb_policy->serverlist != NULL) {
|
1560
|
+
/* dispose of the old serverlist */
|
1561
|
+
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
1562
|
+
}
|
1563
|
+
/* and update the copy in the glb_lb_policy instance. This
|
1564
|
+
* serverlist instance will be destroyed either upon the next
|
1565
|
+
* update or in glb_destroy() */
|
1566
|
+
glb_policy->serverlist = serverlist;
|
1567
|
+
glb_policy->serverlist_index = 0;
|
1568
|
+
rr_handover_locked(exec_ctx, glb_policy);
|
1569
|
+
}
|
1570
|
+
} else {
|
1571
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1274
1572
|
gpr_log(GPR_INFO,
|
1275
|
-
"
|
1573
|
+
"Received empty server list. Picks will stay pending until "
|
1574
|
+
"a response with > 0 servers is received");
|
1276
1575
|
}
|
1277
1576
|
grpc_grpclb_destroy_serverlist(serverlist);
|
1278
|
-
} else { /* new serverlist */
|
1279
|
-
if (glb_policy->serverlist != NULL) {
|
1280
|
-
/* dispose of the old serverlist */
|
1281
|
-
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
1282
|
-
}
|
1283
|
-
/* and update the copy in the glb_lb_policy instance. This serverlist
|
1284
|
-
* instance will be destroyed either upon the next update or in
|
1285
|
-
* glb_destroy() */
|
1286
|
-
glb_policy->serverlist = serverlist;
|
1287
|
-
|
1288
|
-
rr_handover_locked(exec_ctx, glb_policy);
|
1289
|
-
}
|
1290
|
-
} else {
|
1291
|
-
if (grpc_lb_glb_trace) {
|
1292
|
-
gpr_log(GPR_INFO,
|
1293
|
-
"Received empty server list. Picks will stay pending until a "
|
1294
|
-
"response with > 0 servers is received");
|
1295
1577
|
}
|
1578
|
+
} else { /* serverlist == NULL */
|
1579
|
+
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
|
1580
|
+
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
|
1296
1581
|
}
|
1297
|
-
} else { /* serverlist == NULL */
|
1298
|
-
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
|
1299
|
-
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
|
1300
|
-
grpc_slice_unref_internal(exec_ctx, response_slice);
|
1301
1582
|
}
|
1302
1583
|
|
1584
|
+
grpc_slice_unref_internal(exec_ctx, response_slice);
|
1585
|
+
|
1303
1586
|
if (!glb_policy->shutting_down) {
|
1304
1587
|
/* keep listening for serverlist updates */
|
1305
1588
|
op->op = GRPC_OP_RECV_MESSAGE;
|
@@ -1327,7 +1610,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1327
1610
|
glb_lb_policy *glb_policy = arg;
|
1328
1611
|
|
1329
1612
|
if (!glb_policy->shutting_down) {
|
1330
|
-
if (grpc_lb_glb_trace) {
|
1613
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1331
1614
|
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
|
1332
1615
|
(void *)glb_policy);
|
1333
1616
|
}
|
@@ -1344,7 +1627,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
|
1344
1627
|
|
1345
1628
|
GPR_ASSERT(glb_policy->lb_call != NULL);
|
1346
1629
|
|
1347
|
-
if (grpc_lb_glb_trace) {
|
1630
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1348
1631
|
char *status_details =
|
1349
1632
|
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
|
1350
1633
|
gpr_log(GPR_DEBUG,
|
@@ -1363,7 +1646,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
|
1363
1646
|
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
|
1364
1647
|
gpr_timespec next_try =
|
1365
1648
|
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
|
1366
|
-
if (grpc_lb_glb_trace) {
|
1649
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1367
1650
|
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
|
1368
1651
|
(void *)glb_policy);
|
1369
1652
|
gpr_timespec timeout = gpr_time_sub(next_try, now);
|
@@ -1411,9 +1694,29 @@ grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
|
|
1411
1694
|
}
|
1412
1695
|
|
1413
1696
|
/* Plugin registration */
|
1697
|
+
|
1698
|
+
// Only add client_load_reporting filter if the grpclb LB policy is used.
|
1699
|
+
static bool maybe_add_client_load_reporting_filter(
|
1700
|
+
grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
|
1701
|
+
const grpc_channel_args *args =
|
1702
|
+
grpc_channel_stack_builder_get_channel_arguments(builder);
|
1703
|
+
const grpc_arg *channel_arg =
|
1704
|
+
grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
|
1705
|
+
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
|
1706
|
+
strcmp(channel_arg->value.string, "grpclb") == 0) {
|
1707
|
+
return grpc_channel_stack_builder_append_filter(
|
1708
|
+
builder, (const grpc_channel_filter *)arg, NULL, NULL);
|
1709
|
+
}
|
1710
|
+
return true;
|
1711
|
+
}
|
1712
|
+
|
1414
1713
|
void grpc_lb_policy_grpclb_init() {
|
1415
1714
|
grpc_register_lb_policy(grpc_glb_lb_factory_create());
|
1416
1715
|
grpc_register_tracer("glb", &grpc_lb_glb_trace);
|
1716
|
+
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
|
1717
|
+
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
|
1718
|
+
maybe_add_client_load_reporting_filter,
|
1719
|
+
(void *)&grpc_client_load_reporting_filter);
|
1417
1720
|
}
|
1418
1721
|
|
1419
1722
|
void grpc_lb_policy_grpclb_shutdown() {}
|