wearefair-grpc 1.3.1.pre.c → 1.4.0.fair
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Makefile +418 -126
- data/include/grpc/grpc.h +15 -69
- data/include/grpc/grpc_security.h +1 -1
- data/include/grpc/impl/codegen/compression_types.h +3 -4
- data/include/grpc/impl/codegen/gpr_types.h +0 -1
- data/include/grpc/impl/codegen/grpc_types.h +69 -3
- data/include/grpc/impl/codegen/port_platform.h +6 -0
- data/include/grpc/impl/codegen/slice.h +2 -1
- data/include/grpc/load_reporting.h +6 -6
- data/include/grpc/slice.h +25 -3
- data/include/grpc/slice_buffer.h +4 -0
- data/src/core/ext/census/context.c +1 -1
- data/src/core/ext/census/resource.c +3 -1
- data/src/core/ext/filters/client_channel/channel_connectivity.c +1 -1
- data/src/core/ext/filters/client_channel/client_channel.c +158 -100
- data/src/core/ext/filters/client_channel/client_channel_plugin.c +3 -2
- data/src/core/ext/filters/client_channel/lb_policy.c +2 -1
- data/src/core/ext/filters/client_channel/lb_policy.h +5 -6
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +153 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +42 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +344 -88
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +133 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +65 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +47 -5
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +6 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +19 -8
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +63 -34
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +2 -1
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +13 -12
- data/src/core/ext/filters/client_channel/lb_policy_factory.c +28 -5
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +18 -4
- data/src/core/ext/filters/client_channel/parse_address.c +37 -7
- data/src/core/ext/filters/client_channel/parse_address.h +11 -8
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c +3 -3
- data/src/core/ext/filters/client_channel/subchannel.c +19 -16
- data/src/core/ext/filters/client_channel/subchannel.h +1 -0
- data/src/core/ext/filters/client_channel/uri_parser.c +36 -22
- data/src/core/ext/filters/client_channel/uri_parser.h +1 -1
- data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.c +42 -17
- data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.h +8 -9
- data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.c +19 -11
- data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.h +3 -6
- data/src/core/ext/filters/http/http_filters_plugin.c +104 -0
- data/src/core/{lib/channel/compress_filter.c → ext/filters/http/message_compress/message_compress_filter.c} +124 -23
- data/src/core/{lib/channel/compress_filter.h → ext/filters/http/message_compress/message_compress_filter.h} +5 -6
- data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.c +4 -6
- data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.h +3 -3
- data/src/core/ext/filters/load_reporting/load_reporting.c +2 -25
- data/src/core/ext/filters/load_reporting/load_reporting_filter.c +26 -1
- data/src/core/ext/filters/max_age/max_age_filter.c +14 -14
- data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.c +91 -47
- data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.h +3 -3
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
- data/src/core/ext/transport/chttp2/server/chttp2_server.c +2 -2
- data/src/core/ext/transport/chttp2/transport/bin_decoder.c +2 -2
- data/src/core/ext/transport/chttp2/transport/bin_encoder.c +3 -3
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +296 -172
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -2
- data/src/core/ext/transport/chttp2/transport/frame_data.c +203 -164
- data/src/core/ext/transport/chttp2/transport/frame_data.h +8 -14
- data/src/core/ext/transport/chttp2/transport/frame_goaway.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_ping.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.c +5 -5
- data/src/core/ext/transport/chttp2/transport/frame_window_update.c +1 -1
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +4 -4
- data/src/core/ext/transport/chttp2/transport/hpack_parser.c +2 -4
- data/src/core/ext/transport/chttp2/transport/hpack_table.c +4 -3
- data/src/core/ext/transport/chttp2/transport/internal.h +50 -33
- data/src/core/ext/transport/chttp2/transport/parsing.c +10 -11
- data/src/core/ext/transport/chttp2/transport/writing.c +32 -13
- data/src/core/lib/channel/channel_args.c +28 -9
- data/src/core/lib/channel/channel_args.h +5 -1
- data/src/core/lib/channel/channel_stack.c +1 -1
- data/src/core/lib/channel/channel_stack.h +2 -2
- data/src/core/lib/channel/channel_stack_builder.c +13 -1
- data/src/core/lib/channel/channel_stack_builder.h +5 -1
- data/src/core/lib/channel/connected_channel.c +3 -1
- data/src/core/lib/channel/context.h +2 -2
- data/src/core/lib/compression/message_compress.c +2 -2
- data/src/core/lib/debug/trace.c +13 -6
- data/src/core/lib/debug/trace.h +27 -1
- data/src/core/lib/http/httpcli.c +1 -1
- data/src/core/lib/http/httpcli_security_connector.c +6 -10
- data/src/core/lib/http/parser.c +2 -2
- data/src/core/lib/http/parser.h +2 -1
- data/src/core/lib/iomgr/combiner.c +6 -6
- data/src/core/lib/iomgr/combiner.h +2 -1
- data/src/core/lib/iomgr/error.c +12 -5
- data/src/core/lib/iomgr/error.h +13 -13
- data/src/core/lib/iomgr/ev_epoll1_linux.c +984 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.h +44 -0
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +2146 -0
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h +43 -0
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +1337 -0
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h +43 -0
- data/src/core/lib/iomgr/ev_epollex_linux.c +1511 -0
- data/src/core/lib/iomgr/ev_epollex_linux.h +43 -0
- data/src/core/lib/iomgr/{ev_epoll_linux.c → ev_epollsig_linux.c} +24 -31
- data/src/core/lib/iomgr/{ev_epoll_linux.h → ev_epollsig_linux.h} +4 -4
- data/src/core/lib/iomgr/ev_poll_posix.c +12 -27
- data/src/core/lib/iomgr/ev_poll_posix.h +2 -2
- data/src/core/lib/iomgr/ev_posix.c +22 -8
- data/src/core/lib/iomgr/ev_posix.h +4 -3
- data/src/core/lib/iomgr/exec_ctx.c +5 -0
- data/src/core/lib/iomgr/exec_ctx.h +2 -0
- data/src/core/lib/iomgr/iomgr.c +4 -0
- data/src/core/lib/iomgr/iomgr.h +3 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.c +116 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.h +41 -0
- data/src/core/lib/iomgr/lockfree_event.c +16 -0
- data/src/core/lib/iomgr/pollset.h +2 -5
- data/src/core/lib/iomgr/pollset_uv.c +1 -1
- data/src/core/lib/iomgr/pollset_windows.c +3 -3
- data/src/core/lib/iomgr/resource_quota.c +9 -8
- data/src/core/lib/iomgr/resource_quota.h +2 -1
- data/src/core/lib/iomgr/sockaddr_utils.h +1 -1
- data/src/core/lib/iomgr/socket_mutator.h +2 -0
- data/src/core/lib/iomgr/sys_epoll_wrapper.h +43 -0
- data/src/core/lib/iomgr/tcp_client_posix.c +6 -6
- data/src/core/lib/iomgr/tcp_client_uv.c +3 -3
- data/src/core/lib/iomgr/tcp_posix.c +7 -7
- data/src/core/lib/iomgr/tcp_posix.h +2 -1
- data/src/core/lib/iomgr/tcp_server_posix.c +1 -1
- data/src/core/lib/iomgr/tcp_uv.c +6 -6
- data/src/core/lib/iomgr/tcp_uv.h +2 -1
- data/src/core/lib/iomgr/tcp_windows.c +1 -1
- data/src/core/lib/iomgr/timer_generic.c +24 -25
- data/src/core/lib/iomgr/timer_manager.c +276 -0
- data/src/core/lib/iomgr/timer_manager.h +52 -0
- data/src/core/lib/iomgr/timer_uv.c +6 -0
- data/src/core/lib/iomgr/udp_server.c +42 -9
- data/src/core/lib/iomgr/udp_server.h +3 -1
- data/src/core/lib/security/credentials/credentials.c +0 -1
- data/src/core/lib/security/credentials/fake/fake_credentials.c +23 -0
- data/src/core/lib/security/credentials/fake/fake_credentials.h +12 -9
- data/src/core/lib/security/credentials/google_default/google_default_credentials.c +1 -1
- data/src/core/lib/security/credentials/jwt/jwt_credentials.c +1 -1
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +1 -1
- data/src/core/lib/security/credentials/ssl/ssl_credentials.c +24 -53
- data/src/core/lib/security/transport/client_auth_filter.c +9 -3
- data/src/core/lib/security/transport/secure_endpoint.c +7 -7
- data/src/core/lib/security/transport/secure_endpoint.h +1 -1
- data/src/core/lib/security/transport/security_connector.c +32 -51
- data/src/core/lib/security/transport/security_connector.h +10 -14
- data/src/core/lib/slice/b64.c +1 -1
- data/src/core/lib/slice/percent_encoding.c +3 -3
- data/src/core/lib/slice/slice.c +66 -33
- data/src/core/lib/slice/slice_buffer.c +25 -6
- data/src/core/lib/slice/slice_hash_table.c +33 -35
- data/src/core/lib/slice/slice_hash_table.h +7 -12
- data/src/core/lib/support/atomic.h +45 -0
- data/src/core/lib/support/atomic_with_atm.h +70 -0
- data/src/core/lib/support/atomic_with_std.h +48 -0
- data/src/core/lib/support/avl.c +14 -14
- data/src/core/lib/support/memory.h +74 -0
- data/src/core/lib/support/mpscq.c +12 -1
- data/src/core/lib/support/mpscq.h +4 -0
- data/src/core/lib/support/stack_lockfree.c +3 -36
- data/src/core/lib/support/time_posix.c +8 -0
- data/src/core/lib/support/tmpfile_posix.c +10 -10
- data/src/core/lib/surface/alarm.c +3 -1
- data/src/core/lib/surface/api_trace.c +2 -1
- data/src/core/lib/surface/api_trace.h +2 -2
- data/src/core/lib/surface/byte_buffer_reader.c +1 -1
- data/src/core/lib/surface/call.c +65 -22
- data/src/core/lib/surface/call.h +4 -2
- data/src/core/lib/surface/channel_init.c +2 -19
- data/src/core/lib/surface/channel_stack_type.c +18 -0
- data/src/core/lib/surface/channel_stack_type.h +2 -0
- data/src/core/lib/surface/completion_queue.c +249 -83
- data/src/core/lib/surface/completion_queue.h +18 -13
- data/src/core/lib/surface/completion_queue_factory.c +24 -9
- data/src/core/lib/surface/init.c +1 -52
- data/src/core/lib/surface/{lame_client.c → lame_client.cc} +37 -26
- data/src/core/lib/surface/server.c +50 -27
- data/src/core/lib/surface/server.h +2 -1
- data/src/core/lib/surface/version.c +2 -2
- data/src/core/lib/transport/bdp_estimator.c +20 -9
- data/src/core/lib/transport/bdp_estimator.h +5 -1
- data/src/core/lib/transport/byte_stream.c +23 -9
- data/src/core/lib/transport/byte_stream.h +15 -6
- data/src/core/lib/transport/connectivity_state.c +6 -6
- data/src/core/lib/transport/connectivity_state.h +2 -1
- data/src/core/lib/transport/service_config.c +6 -13
- data/src/core/lib/transport/service_config.h +2 -2
- data/src/core/lib/transport/static_metadata.c +403 -389
- data/src/core/lib/transport/static_metadata.h +127 -114
- data/src/core/plugin_registry/grpc_plugin_registry.c +12 -0
- data/src/core/tsi/fake_transport_security.c +5 -4
- data/src/core/tsi/ssl_transport_security.c +71 -82
- data/src/core/tsi/ssl_transport_security.h +39 -61
- data/src/core/tsi/transport_security.c +83 -2
- data/src/core/tsi/transport_security.h +27 -2
- data/src/core/tsi/transport_security_adapter.c +236 -0
- data/src/core/tsi/transport_security_adapter.h +62 -0
- data/src/core/tsi/transport_security_interface.h +179 -66
- data/src/ruby/ext/grpc/extconf.rb +2 -1
- data/src/ruby/ext/grpc/rb_byte_buffer.c +8 -6
- data/src/ruby/ext/grpc/rb_call.c +56 -48
- data/src/ruby/ext/grpc/rb_call.h +3 -4
- data/src/ruby/ext/grpc/rb_call_credentials.c +23 -22
- data/src/ruby/ext/grpc/rb_channel.c +45 -29
- data/src/ruby/ext/grpc/rb_channel_args.c +11 -9
- data/src/ruby/ext/grpc/rb_channel_credentials.c +16 -12
- data/src/ruby/ext/grpc/rb_completion_queue.c +7 -9
- data/src/ruby/ext/grpc/rb_compression_options.c +7 -6
- data/src/ruby/ext/grpc/rb_event_thread.c +10 -12
- data/src/ruby/ext/grpc/rb_event_thread.h +1 -2
- data/src/ruby/ext/grpc/rb_grpc.c +11 -15
- data/src/ruby/ext/grpc/rb_grpc.h +2 -2
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +14 -6
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +22 -10
- data/src/ruby/ext/grpc/rb_server.c +26 -28
- data/src/ruby/lib/grpc/version.rb +1 -1
- metadata +40 -18
- data/src/ruby/lib/grpc/grpc_c.bundle +0 -0
- data/src/ruby/lib/grpc/grpc_c.so +0 -0
@@ -91,8 +91,9 @@ void grpc_client_channel_init(void) {
|
|
91
91
|
grpc_subchannel_index_init();
|
92
92
|
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MIN,
|
93
93
|
set_default_host_if_unset, NULL);
|
94
|
-
grpc_channel_init_register_stage(
|
95
|
-
|
94
|
+
grpc_channel_init_register_stage(
|
95
|
+
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
|
96
|
+
(void *)&grpc_client_channel_filter);
|
96
97
|
grpc_http_connect_register_handshaker_factory();
|
97
98
|
}
|
98
99
|
|
@@ -119,9 +119,10 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
|
|
119
119
|
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
120
120
|
const grpc_lb_policy_pick_args *pick_args,
|
121
121
|
grpc_connected_subchannel **target,
|
122
|
+
grpc_call_context_element *context,
|
122
123
|
void **user_data, grpc_closure *on_complete) {
|
123
124
|
return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
|
124
|
-
user_data, on_complete);
|
125
|
+
context, user_data, on_complete);
|
125
126
|
}
|
126
127
|
|
127
128
|
void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx *exec_ctx,
|
@@ -43,9 +43,6 @@
|
|
43
43
|
typedef struct grpc_lb_policy grpc_lb_policy;
|
44
44
|
typedef struct grpc_lb_policy_vtable grpc_lb_policy_vtable;
|
45
45
|
|
46
|
-
typedef void (*grpc_lb_completion)(void *cb_arg, grpc_subchannel *subchannel,
|
47
|
-
grpc_status_code status, const char *errmsg);
|
48
|
-
|
49
46
|
struct grpc_lb_policy {
|
50
47
|
const grpc_lb_policy_vtable *vtable;
|
51
48
|
gpr_atm ref_pair;
|
@@ -65,8 +62,6 @@ typedef struct grpc_lb_policy_pick_args {
|
|
65
62
|
uint32_t initial_metadata_flags;
|
66
63
|
/** Storage for LB token in \a initial_metadata, or NULL if not used */
|
67
64
|
grpc_linked_mdelem *lb_token_mdelem_storage;
|
68
|
-
/** Deadline for the call to the LB server */
|
69
|
-
gpr_timespec deadline;
|
70
65
|
} grpc_lb_policy_pick_args;
|
71
66
|
|
72
67
|
struct grpc_lb_policy_vtable {
|
@@ -76,7 +71,8 @@ struct grpc_lb_policy_vtable {
|
|
76
71
|
/** \see grpc_lb_policy_pick */
|
77
72
|
int (*pick_locked)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
78
73
|
const grpc_lb_policy_pick_args *pick_args,
|
79
|
-
grpc_connected_subchannel **target,
|
74
|
+
grpc_connected_subchannel **target,
|
75
|
+
grpc_call_context_element *context, void **user_data,
|
80
76
|
grpc_closure *on_complete);
|
81
77
|
|
82
78
|
/** \see grpc_lb_policy_cancel_pick */
|
@@ -156,6 +152,8 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
|
|
156
152
|
\a target will be set to the selected subchannel, or NULL on failure.
|
157
153
|
Upon success, \a user_data will be set to whatever opaque information
|
158
154
|
may need to be propagated from the LB policy, or NULL if not needed.
|
155
|
+
\a context will be populated with context to pass to the subchannel
|
156
|
+
call, if needed.
|
159
157
|
|
160
158
|
If the pick succeeds and a result is known immediately, a non-zero
|
161
159
|
value will be returned. Otherwise, \a on_complete will be invoked
|
@@ -167,6 +165,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
|
|
167
165
|
int grpc_lb_policy_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
|
168
166
|
const grpc_lb_policy_pick_args *pick_args,
|
169
167
|
grpc_connected_subchannel **target,
|
168
|
+
grpc_call_context_element *context,
|
170
169
|
void **user_data, grpc_closure *on_complete);
|
171
170
|
|
172
171
|
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
|
@@ -0,0 +1,153 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2017, Google Inc.
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are
|
8
|
+
* met:
|
9
|
+
*
|
10
|
+
* * Redistributions of source code must retain the above copyright
|
11
|
+
* notice, this list of conditions and the following disclaimer.
|
12
|
+
* * Redistributions in binary form must reproduce the above
|
13
|
+
* copyright notice, this list of conditions and the following disclaimer
|
14
|
+
* in the documentation and/or other materials provided with the
|
15
|
+
* distribution.
|
16
|
+
* * Neither the name of Google Inc. nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
|
34
|
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
|
35
|
+
|
36
|
+
#include <grpc/support/atm.h>
|
37
|
+
#include <grpc/support/log.h>
|
38
|
+
|
39
|
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
|
40
|
+
#include "src/core/lib/iomgr/error.h"
|
41
|
+
#include "src/core/lib/profiling/timers.h"
|
42
|
+
|
43
|
+
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
|
44
|
+
grpc_channel_element *elem,
|
45
|
+
grpc_channel_element_args *args) {
|
46
|
+
return GRPC_ERROR_NONE;
|
47
|
+
}
|
48
|
+
|
49
|
+
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
|
50
|
+
grpc_channel_element *elem) {}
|
51
|
+
|
52
|
+
typedef struct {
|
53
|
+
// Stats object to update.
|
54
|
+
grpc_grpclb_client_stats *client_stats;
|
55
|
+
// State for intercepting send_initial_metadata.
|
56
|
+
grpc_closure on_complete_for_send;
|
57
|
+
grpc_closure *original_on_complete_for_send;
|
58
|
+
bool send_initial_metadata_succeeded;
|
59
|
+
// State for intercepting recv_initial_metadata.
|
60
|
+
grpc_closure recv_initial_metadata_ready;
|
61
|
+
grpc_closure *original_recv_initial_metadata_ready;
|
62
|
+
bool recv_initial_metadata_succeeded;
|
63
|
+
} call_data;
|
64
|
+
|
65
|
+
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
|
66
|
+
grpc_error *error) {
|
67
|
+
call_data *calld = arg;
|
68
|
+
if (error == GRPC_ERROR_NONE) {
|
69
|
+
calld->send_initial_metadata_succeeded = true;
|
70
|
+
}
|
71
|
+
grpc_closure_run(exec_ctx, calld->original_on_complete_for_send,
|
72
|
+
GRPC_ERROR_REF(error));
|
73
|
+
}
|
74
|
+
|
75
|
+
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
|
76
|
+
grpc_error *error) {
|
77
|
+
call_data *calld = arg;
|
78
|
+
if (error == GRPC_ERROR_NONE) {
|
79
|
+
calld->recv_initial_metadata_succeeded = true;
|
80
|
+
}
|
81
|
+
grpc_closure_run(exec_ctx, calld->original_recv_initial_metadata_ready,
|
82
|
+
GRPC_ERROR_REF(error));
|
83
|
+
}
|
84
|
+
|
85
|
+
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
|
86
|
+
grpc_call_element *elem,
|
87
|
+
const grpc_call_element_args *args) {
|
88
|
+
call_data *calld = elem->call_data;
|
89
|
+
// Get stats object from context and take a ref.
|
90
|
+
GPR_ASSERT(args->context != NULL);
|
91
|
+
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
|
92
|
+
calld->client_stats = grpc_grpclb_client_stats_ref(
|
93
|
+
args->context[GRPC_GRPCLB_CLIENT_STATS].value);
|
94
|
+
// Record call started.
|
95
|
+
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
|
96
|
+
return GRPC_ERROR_NONE;
|
97
|
+
}
|
98
|
+
|
99
|
+
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
100
|
+
const grpc_call_final_info *final_info,
|
101
|
+
grpc_closure *ignored) {
|
102
|
+
call_data *calld = elem->call_data;
|
103
|
+
// Record call finished, optionally setting client_failed_to_send and
|
104
|
+
// received.
|
105
|
+
grpc_grpclb_client_stats_add_call_finished(
|
106
|
+
false /* drop_for_rate_limiting */, false /* drop_for_load_balancing */,
|
107
|
+
!calld->send_initial_metadata_succeeded /* client_failed_to_send */,
|
108
|
+
calld->recv_initial_metadata_succeeded /* known_received */,
|
109
|
+
calld->client_stats);
|
110
|
+
// All done, so unref the stats object.
|
111
|
+
grpc_grpclb_client_stats_unref(calld->client_stats);
|
112
|
+
}
|
113
|
+
|
114
|
+
static void start_transport_stream_op_batch(
|
115
|
+
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
|
116
|
+
grpc_transport_stream_op_batch *batch) {
|
117
|
+
call_data *calld = elem->call_data;
|
118
|
+
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
|
119
|
+
// Intercept send_initial_metadata.
|
120
|
+
if (batch->send_initial_metadata) {
|
121
|
+
calld->original_on_complete_for_send = batch->on_complete;
|
122
|
+
grpc_closure_init(&calld->on_complete_for_send, on_complete_for_send, calld,
|
123
|
+
grpc_schedule_on_exec_ctx);
|
124
|
+
batch->on_complete = &calld->on_complete_for_send;
|
125
|
+
}
|
126
|
+
// Intercept recv_initial_metadata.
|
127
|
+
if (batch->recv_initial_metadata) {
|
128
|
+
calld->original_recv_initial_metadata_ready =
|
129
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
|
130
|
+
grpc_closure_init(&calld->recv_initial_metadata_ready,
|
131
|
+
recv_initial_metadata_ready, calld,
|
132
|
+
grpc_schedule_on_exec_ctx);
|
133
|
+
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
|
134
|
+
&calld->recv_initial_metadata_ready;
|
135
|
+
}
|
136
|
+
// Chain to next filter.
|
137
|
+
grpc_call_next_op(exec_ctx, elem, batch);
|
138
|
+
GPR_TIMER_END("clr_start_transport_stream_op_batch", 0);
|
139
|
+
}
|
140
|
+
|
141
|
+
const grpc_channel_filter grpc_client_load_reporting_filter = {
|
142
|
+
start_transport_stream_op_batch,
|
143
|
+
grpc_channel_next_op,
|
144
|
+
sizeof(call_data),
|
145
|
+
init_call_elem,
|
146
|
+
grpc_call_stack_ignore_set_pollset_or_pollset_set,
|
147
|
+
destroy_call_elem,
|
148
|
+
0, // sizeof(channel_data)
|
149
|
+
init_channel_elem,
|
150
|
+
destroy_channel_elem,
|
151
|
+
grpc_call_next_get_peer,
|
152
|
+
grpc_channel_next_get_info,
|
153
|
+
"client_load_reporting"};
|
@@ -0,0 +1,42 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2017, Google Inc.
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are
|
8
|
+
* met:
|
9
|
+
*
|
10
|
+
* * Redistributions of source code must retain the above copyright
|
11
|
+
* notice, this list of conditions and the following disclaimer.
|
12
|
+
* * Redistributions in binary form must reproduce the above
|
13
|
+
* copyright notice, this list of conditions and the following disclaimer
|
14
|
+
* in the documentation and/or other materials provided with the
|
15
|
+
* distribution.
|
16
|
+
* * Neither the name of Google Inc. nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
|
34
|
+
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H
|
35
|
+
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H
|
36
|
+
|
37
|
+
#include "src/core/lib/channel/channel_stack.h"
|
38
|
+
|
39
|
+
extern const grpc_channel_filter grpc_client_load_reporting_filter;
|
40
|
+
|
41
|
+
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
|
42
|
+
*/
|
@@ -95,8 +95,7 @@
|
|
95
95
|
headers. Therefore, sockaddr.h must always be included first */
|
96
96
|
#include "src/core/lib/iomgr/sockaddr.h"
|
97
97
|
|
98
|
-
#include <
|
99
|
-
|
98
|
+
#include <limits.h>
|
100
99
|
#include <string.h>
|
101
100
|
|
102
101
|
#include <grpc/byte_buffer_reader.h>
|
@@ -108,13 +107,16 @@
|
|
108
107
|
|
109
108
|
#include "src/core/ext/filters/client_channel/client_channel.h"
|
110
109
|
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
|
110
|
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h"
|
111
111
|
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
|
112
112
|
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h"
|
113
|
+
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h"
|
113
114
|
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
|
114
115
|
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
|
115
116
|
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
|
116
117
|
#include "src/core/ext/filters/client_channel/parse_address.h"
|
117
118
|
#include "src/core/lib/channel/channel_args.h"
|
119
|
+
#include "src/core/lib/channel/channel_stack.h"
|
118
120
|
#include "src/core/lib/iomgr/combiner.h"
|
119
121
|
#include "src/core/lib/iomgr/sockaddr.h"
|
120
122
|
#include "src/core/lib/iomgr/sockaddr_utils.h"
|
@@ -126,6 +128,7 @@
|
|
126
128
|
#include "src/core/lib/support/string.h"
|
127
129
|
#include "src/core/lib/surface/call.h"
|
128
130
|
#include "src/core/lib/surface/channel.h"
|
131
|
+
#include "src/core/lib/surface/channel_init.h"
|
129
132
|
#include "src/core/lib/transport/static_metadata.h"
|
130
133
|
|
131
134
|
#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
|
@@ -134,7 +137,7 @@
|
|
134
137
|
#define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
|
135
138
|
#define GRPC_GRPCLB_RECONNECT_JITTER 0.2
|
136
139
|
|
137
|
-
|
140
|
+
grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false);
|
138
141
|
|
139
142
|
/* add lb_token of selected subchannel (address) to the call's initial
|
140
143
|
* metadata */
|
@@ -147,6 +150,10 @@ static grpc_error *initial_metadata_add_lb_token(
|
|
147
150
|
lb_token_mdelem_storage, lb_token);
|
148
151
|
}
|
149
152
|
|
153
|
+
static void destroy_client_stats(void *arg) {
|
154
|
+
grpc_grpclb_client_stats_unref(arg);
|
155
|
+
}
|
156
|
+
|
150
157
|
typedef struct wrapped_rr_closure_arg {
|
151
158
|
/* the closure instance using this struct as argument */
|
152
159
|
grpc_closure wrapper_closure;
|
@@ -163,6 +170,13 @@ typedef struct wrapped_rr_closure_arg {
|
|
163
170
|
* initial metadata */
|
164
171
|
grpc_connected_subchannel **target;
|
165
172
|
|
173
|
+
/* the context to be populated for the subchannel call */
|
174
|
+
grpc_call_context_element *context;
|
175
|
+
|
176
|
+
/* Stats for client-side load reporting. Note that this holds a
|
177
|
+
* reference, which must be either passed on via context or unreffed. */
|
178
|
+
grpc_grpclb_client_stats *client_stats;
|
179
|
+
|
166
180
|
/* the LB token associated with the pick */
|
167
181
|
grpc_mdelem lb_token;
|
168
182
|
|
@@ -202,8 +216,14 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
|
|
202
216
|
(void *)*wc_arg->target, (void *)wc_arg->rr_policy);
|
203
217
|
abort();
|
204
218
|
}
|
219
|
+
// Pass on client stats via context. Passes ownership of the reference.
|
220
|
+
GPR_ASSERT(wc_arg->client_stats != NULL);
|
221
|
+
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
|
222
|
+
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
|
223
|
+
} else {
|
224
|
+
grpc_grpclb_client_stats_unref(wc_arg->client_stats);
|
205
225
|
}
|
206
|
-
if (grpc_lb_glb_trace) {
|
226
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
207
227
|
gpr_log(GPR_INFO, "Unreffing RR %p", (void *)wc_arg->rr_policy);
|
208
228
|
}
|
209
229
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
|
@@ -237,6 +257,7 @@ typedef struct pending_pick {
|
|
237
257
|
static void add_pending_pick(pending_pick **root,
|
238
258
|
const grpc_lb_policy_pick_args *pick_args,
|
239
259
|
grpc_connected_subchannel **target,
|
260
|
+
grpc_call_context_element *context,
|
240
261
|
grpc_closure *on_complete) {
|
241
262
|
pending_pick *pp = gpr_zalloc(sizeof(*pp));
|
242
263
|
pp->next = *root;
|
@@ -244,6 +265,7 @@ static void add_pending_pick(pending_pick **root,
|
|
244
265
|
pp->target = target;
|
245
266
|
pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
|
246
267
|
pp->wrapped_on_complete_arg.target = target;
|
268
|
+
pp->wrapped_on_complete_arg.context = context;
|
247
269
|
pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
|
248
270
|
pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
|
249
271
|
pick_args->lb_token_mdelem_storage;
|
@@ -287,8 +309,8 @@ typedef struct glb_lb_policy {
|
|
287
309
|
grpc_client_channel_factory *cc_factory;
|
288
310
|
grpc_channel_args *args;
|
289
311
|
|
290
|
-
/**
|
291
|
-
|
312
|
+
/** timeout in milliseconds for the LB call. 0 means no deadline. */
|
313
|
+
int lb_call_timeout_ms;
|
292
314
|
|
293
315
|
/** for communicating with the LB server */
|
294
316
|
grpc_channel *lb_channel;
|
@@ -316,6 +338,10 @@ typedef struct glb_lb_policy {
|
|
316
338
|
/************************************************************/
|
317
339
|
/* client data associated with the LB server communication */
|
318
340
|
/************************************************************/
|
341
|
+
|
342
|
+
/* Finished sending initial request. */
|
343
|
+
grpc_closure lb_on_sent_initial_request;
|
344
|
+
|
319
345
|
/* Status from the LB server has been received. This signals the end of the LB
|
320
346
|
* call. */
|
321
347
|
grpc_closure lb_on_server_status_received;
|
@@ -348,6 +374,23 @@ typedef struct glb_lb_policy {
|
|
348
374
|
|
349
375
|
/** LB call retry timer */
|
350
376
|
grpc_timer lb_call_retry_timer;
|
377
|
+
|
378
|
+
bool initial_request_sent;
|
379
|
+
bool seen_initial_response;
|
380
|
+
|
381
|
+
/* Stats for client-side load reporting. Should be unreffed and
|
382
|
+
* recreated whenever lb_call is replaced. */
|
383
|
+
grpc_grpclb_client_stats *client_stats;
|
384
|
+
/* Interval and timer for next client load report. */
|
385
|
+
gpr_timespec client_stats_report_interval;
|
386
|
+
grpc_timer client_load_report_timer;
|
387
|
+
bool client_load_report_timer_pending;
|
388
|
+
bool last_client_load_report_counters_were_zero;
|
389
|
+
/* Closure used for either the load report timer or the callback for
|
390
|
+
* completion of sending the load report. */
|
391
|
+
grpc_closure client_load_report_closure;
|
392
|
+
/* Client load report message payload. */
|
393
|
+
grpc_byte_buffer *client_load_report_payload;
|
351
394
|
} glb_lb_policy;
|
352
395
|
|
353
396
|
/* Keeps track and reacts to changes in connectivity of the RR instance */
|
@@ -531,7 +574,7 @@ static bool update_lb_connectivity_status_locked(
|
|
531
574
|
GPR_ASSERT(new_rr_state_error == GRPC_ERROR_NONE);
|
532
575
|
}
|
533
576
|
|
534
|
-
if (grpc_lb_glb_trace) {
|
577
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
535
578
|
gpr_log(GPR_INFO,
|
536
579
|
"Setting grpclb's state to %s from new RR policy %p state.",
|
537
580
|
grpc_connectivity_state_name(new_rr_state),
|
@@ -552,11 +595,11 @@ static bool pick_from_internal_rr_locked(
|
|
552
595
|
grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) {
|
553
596
|
GPR_ASSERT(rr_policy != NULL);
|
554
597
|
const bool pick_done = grpc_lb_policy_pick_locked(
|
555
|
-
exec_ctx, rr_policy, pick_args, target,
|
556
|
-
&wc_arg->wrapper_closure);
|
598
|
+
exec_ctx, rr_policy, pick_args, target, wc_arg->context,
|
599
|
+
(void **)&wc_arg->lb_token, &wc_arg->wrapper_closure);
|
557
600
|
if (pick_done) {
|
558
601
|
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
|
559
|
-
if (grpc_lb_glb_trace) {
|
602
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
560
603
|
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
|
561
604
|
(intptr_t)wc_arg->rr_policy);
|
562
605
|
}
|
@@ -567,7 +610,12 @@ static bool pick_from_internal_rr_locked(
|
|
567
610
|
pick_args->lb_token_mdelem_storage,
|
568
611
|
GRPC_MDELEM_REF(wc_arg->lb_token));
|
569
612
|
|
570
|
-
|
613
|
+
// Pass on client stats via context. Passes ownership of the reference.
|
614
|
+
GPR_ASSERT(wc_arg->client_stats != NULL);
|
615
|
+
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
|
616
|
+
wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
|
617
|
+
|
618
|
+
gpr_free(wc_arg->free_when_done);
|
571
619
|
}
|
572
620
|
/* else, the pending pick will be registered and taken care of by the
|
573
621
|
* pending pick list inside the RR policy (glb_policy->rr_policy).
|
@@ -637,7 +685,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
637
685
|
if (!replace_old_rr) {
|
638
686
|
/* dispose of the new RR policy that won't be used after all */
|
639
687
|
GRPC_LB_POLICY_UNREF(exec_ctx, new_rr_policy, "rr_handover_no_replace");
|
640
|
-
if (grpc_lb_glb_trace) {
|
688
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
641
689
|
gpr_log(GPR_INFO,
|
642
690
|
"Keeping old RR policy (%p) despite new serverlist: new RR "
|
643
691
|
"policy was in %s connectivity state.",
|
@@ -647,7 +695,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
647
695
|
return;
|
648
696
|
}
|
649
697
|
|
650
|
-
if (grpc_lb_glb_trace) {
|
698
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
651
699
|
gpr_log(GPR_INFO, "Created RR policy (%p) to replace old RR (%p)",
|
652
700
|
(void *)new_rr_policy, (void *)glb_policy->rr_policy);
|
653
701
|
}
|
@@ -690,7 +738,9 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
690
738
|
glb_policy->pending_picks = pp->next;
|
691
739
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
|
692
740
|
pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
|
693
|
-
|
741
|
+
pp->wrapped_on_complete_arg.client_stats =
|
742
|
+
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
743
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
694
744
|
gpr_log(GPR_INFO, "Pending pick about to PICK from 0x%" PRIxPTR "",
|
695
745
|
(intptr_t)glb_policy->rr_policy);
|
696
746
|
}
|
@@ -704,7 +754,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
|
|
704
754
|
glb_policy->pending_pings = pping->next;
|
705
755
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
|
706
756
|
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
|
707
|
-
if (grpc_lb_glb_trace) {
|
757
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
708
758
|
gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "",
|
709
759
|
(intptr_t)glb_policy->rr_policy);
|
710
760
|
}
|
@@ -750,18 +800,11 @@ static void destroy_balancer_name(grpc_exec_ctx *exec_ctx,
|
|
750
800
|
gpr_free(balancer_name);
|
751
801
|
}
|
752
802
|
|
753
|
-
static void *copy_balancer_name(void *balancer_name) {
|
754
|
-
return gpr_strdup(balancer_name);
|
755
|
-
}
|
756
|
-
|
757
803
|
static grpc_slice_hash_table_entry targets_info_entry_create(
|
758
804
|
const char *address, const char *balancer_name) {
|
759
|
-
static const grpc_slice_hash_table_vtable vtable = {destroy_balancer_name,
|
760
|
-
copy_balancer_name};
|
761
805
|
grpc_slice_hash_table_entry entry;
|
762
806
|
entry.key = grpc_slice_from_copied_string(address);
|
763
|
-
entry.value = (
|
764
|
-
entry.vtable = &vtable;
|
807
|
+
entry.value = gpr_strdup(balancer_name);
|
765
808
|
return entry;
|
766
809
|
}
|
767
810
|
|
@@ -825,11 +868,8 @@ static char *get_lb_uri_target_addresses(grpc_exec_ctx *exec_ctx,
|
|
825
868
|
uri_path);
|
826
869
|
gpr_free(uri_path);
|
827
870
|
|
828
|
-
*targets_info =
|
829
|
-
|
830
|
-
for (size_t i = 0; i < num_grpclb_addrs; i++) {
|
831
|
-
grpc_slice_unref_internal(exec_ctx, targets_info_entries[i].key);
|
832
|
-
}
|
871
|
+
*targets_info = grpc_slice_hash_table_create(
|
872
|
+
num_grpclb_addrs, targets_info_entries, destroy_balancer_name);
|
833
873
|
gpr_free(targets_info_entries);
|
834
874
|
|
835
875
|
return target_uri_str;
|
@@ -841,10 +881,10 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
841
881
|
/* Count the number of gRPC-LB addresses. There must be at least one.
|
842
882
|
* TODO(roth): For now, we ignore non-balancer addresses, but in the
|
843
883
|
* future, we may change the behavior such that we fall back to using
|
844
|
-
* the non-balancer addresses if we cannot reach any balancers.
|
845
|
-
*
|
846
|
-
*
|
847
|
-
*
|
884
|
+
* the non-balancer addresses if we cannot reach any balancers. In the
|
885
|
+
* fallback case, we should use the LB policy indicated by
|
886
|
+
* GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is
|
887
|
+
* unset, we should default to pick_first). */
|
848
888
|
const grpc_arg *arg =
|
849
889
|
grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES);
|
850
890
|
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
|
@@ -867,16 +907,29 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
867
907
|
GPR_ASSERT(uri->path[0] != '\0');
|
868
908
|
glb_policy->server_name =
|
869
909
|
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
|
870
|
-
if (grpc_lb_glb_trace) {
|
910
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
871
911
|
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.",
|
872
912
|
glb_policy->server_name);
|
873
913
|
}
|
874
914
|
grpc_uri_destroy(uri);
|
875
915
|
|
876
916
|
glb_policy->cc_factory = args->client_channel_factory;
|
877
|
-
glb_policy->args = grpc_channel_args_copy(args->args);
|
878
917
|
GPR_ASSERT(glb_policy->cc_factory != NULL);
|
879
918
|
|
919
|
+
arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
|
920
|
+
glb_policy->lb_call_timeout_ms =
|
921
|
+
grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX});
|
922
|
+
|
923
|
+
// Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
|
924
|
+
// since we use this to trigger the client_load_reporting filter.
|
925
|
+
grpc_arg new_arg;
|
926
|
+
new_arg.key = GRPC_ARG_LB_POLICY_NAME;
|
927
|
+
new_arg.type = GRPC_ARG_STRING;
|
928
|
+
new_arg.value.string = "grpclb";
|
929
|
+
static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
|
930
|
+
glb_policy->args = grpc_channel_args_copy_and_add_and_remove(
|
931
|
+
args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
|
932
|
+
|
880
933
|
grpc_slice_hash_table *targets_info = NULL;
|
881
934
|
/* Create a client channel over them to communicate with a LB service */
|
882
935
|
char *lb_service_target_addresses =
|
@@ -890,6 +943,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
|
|
890
943
|
grpc_channel_args_destroy(exec_ctx, lb_channel_args);
|
891
944
|
gpr_free(lb_service_target_addresses);
|
892
945
|
if (glb_policy->lb_channel == NULL) {
|
946
|
+
gpr_free((void *)glb_policy->server_name);
|
947
|
+
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
|
893
948
|
gpr_free(glb_policy);
|
894
949
|
return NULL;
|
895
950
|
}
|
@@ -905,6 +960,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
|
|
905
960
|
GPR_ASSERT(glb_policy->pending_pings == NULL);
|
906
961
|
gpr_free((void *)glb_policy->server_name);
|
907
962
|
grpc_channel_args_destroy(exec_ctx, glb_policy->args);
|
963
|
+
if (glb_policy->client_stats != NULL) {
|
964
|
+
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
|
965
|
+
}
|
908
966
|
grpc_channel_destroy(glb_policy->lb_channel);
|
909
967
|
glb_policy->lb_channel = NULL;
|
910
968
|
grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
|
@@ -1021,7 +1079,8 @@ static void glb_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
|
|
1021
1079
|
|
1022
1080
|
static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
1023
1081
|
const grpc_lb_policy_pick_args *pick_args,
|
1024
|
-
grpc_connected_subchannel **target,
|
1082
|
+
grpc_connected_subchannel **target,
|
1083
|
+
grpc_call_context_element *context, void **user_data,
|
1025
1084
|
grpc_closure *on_complete) {
|
1026
1085
|
if (pick_args->lb_token_mdelem_storage == NULL) {
|
1027
1086
|
*target = NULL;
|
@@ -1033,11 +1092,10 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1033
1092
|
}
|
1034
1093
|
|
1035
1094
|
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
|
1036
|
-
glb_policy->deadline = pick_args->deadline;
|
1037
1095
|
bool pick_done;
|
1038
1096
|
|
1039
1097
|
if (glb_policy->rr_policy != NULL) {
|
1040
|
-
if (grpc_lb_glb_trace) {
|
1098
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1041
1099
|
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
|
1042
1100
|
(void *)glb_policy, (void *)glb_policy->rr_policy);
|
1043
1101
|
}
|
@@ -1049,6 +1107,10 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1049
1107
|
grpc_schedule_on_exec_ctx);
|
1050
1108
|
wc_arg->rr_policy = glb_policy->rr_policy;
|
1051
1109
|
wc_arg->target = target;
|
1110
|
+
wc_arg->context = context;
|
1111
|
+
GPR_ASSERT(glb_policy->client_stats != NULL);
|
1112
|
+
wc_arg->client_stats =
|
1113
|
+
grpc_grpclb_client_stats_ref(glb_policy->client_stats);
|
1052
1114
|
wc_arg->wrapped_closure = on_complete;
|
1053
1115
|
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
|
1054
1116
|
wc_arg->initial_metadata = pick_args->initial_metadata;
|
@@ -1056,13 +1118,13 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
|
|
1056
1118
|
pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
|
1057
1119
|
pick_args, target, wc_arg);
|
1058
1120
|
} else {
|
1059
|
-
if (grpc_lb_glb_trace) {
|
1121
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1060
1122
|
gpr_log(GPR_DEBUG,
|
1061
1123
|
"No RR policy in grpclb instance %p. Adding to grpclb's pending "
|
1062
1124
|
"picks",
|
1063
1125
|
(void *)(glb_policy));
|
1064
1126
|
}
|
1065
|
-
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
|
1127
|
+
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
|
1066
1128
|
on_complete);
|
1067
1129
|
|
1068
1130
|
if (!glb_policy->started_picking) {
|
@@ -1103,6 +1165,104 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
|
|
1103
1165
|
exec_ctx, &glb_policy->state_tracker, current, notify);
|
1104
1166
|
}
|
1105
1167
|
|
1168
|
+
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1169
|
+
grpc_error *error);
|
1170
|
+
|
1171
|
+
static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
|
1172
|
+
glb_lb_policy *glb_policy) {
|
1173
|
+
const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
|
1174
|
+
const gpr_timespec next_client_load_report_time =
|
1175
|
+
gpr_time_add(now, glb_policy->client_stats_report_interval);
|
1176
|
+
grpc_closure_init(&glb_policy->client_load_report_closure,
|
1177
|
+
send_client_load_report_locked, glb_policy,
|
1178
|
+
grpc_combiner_scheduler(glb_policy->base.combiner, false));
|
1179
|
+
grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
|
1180
|
+
next_client_load_report_time,
|
1181
|
+
&glb_policy->client_load_report_closure, now);
|
1182
|
+
}
|
1183
|
+
|
1184
|
+
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1185
|
+
grpc_error *error) {
|
1186
|
+
glb_lb_policy *glb_policy = arg;
|
1187
|
+
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
|
1188
|
+
glb_policy->client_load_report_payload = NULL;
|
1189
|
+
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
|
1190
|
+
glb_policy->client_load_report_timer_pending = false;
|
1191
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1192
|
+
"client_load_report");
|
1193
|
+
return;
|
1194
|
+
}
|
1195
|
+
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1196
|
+
}
|
1197
|
+
|
1198
|
+
static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx,
|
1199
|
+
glb_lb_policy *glb_policy) {
|
1200
|
+
grpc_op op;
|
1201
|
+
memset(&op, 0, sizeof(op));
|
1202
|
+
op.op = GRPC_OP_SEND_MESSAGE;
|
1203
|
+
op.data.send_message.send_message = glb_policy->client_load_report_payload;
|
1204
|
+
grpc_closure_init(&glb_policy->client_load_report_closure,
|
1205
|
+
client_load_report_done_locked, glb_policy,
|
1206
|
+
grpc_combiner_scheduler(glb_policy->base.combiner, false));
|
1207
|
+
grpc_call_error call_error = grpc_call_start_batch_and_execute(
|
1208
|
+
exec_ctx, glb_policy->lb_call, &op, 1,
|
1209
|
+
&glb_policy->client_load_report_closure);
|
1210
|
+
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1211
|
+
}
|
1212
|
+
|
1213
|
+
static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
|
1214
|
+
return request->client_stats.num_calls_started == 0 &&
|
1215
|
+
request->client_stats.num_calls_finished == 0 &&
|
1216
|
+
request->client_stats.num_calls_finished_with_drop_for_rate_limiting ==
|
1217
|
+
0 &&
|
1218
|
+
request->client_stats
|
1219
|
+
.num_calls_finished_with_drop_for_load_balancing == 0 &&
|
1220
|
+
request->client_stats.num_calls_finished_with_client_failed_to_send ==
|
1221
|
+
0 &&
|
1222
|
+
request->client_stats.num_calls_finished_known_received == 0;
|
1223
|
+
}
|
1224
|
+
|
1225
|
+
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1226
|
+
grpc_error *error) {
|
1227
|
+
glb_lb_policy *glb_policy = arg;
|
1228
|
+
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
|
1229
|
+
glb_policy->client_load_report_timer_pending = false;
|
1230
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1231
|
+
"client_load_report");
|
1232
|
+
return;
|
1233
|
+
}
|
1234
|
+
// Construct message payload.
|
1235
|
+
GPR_ASSERT(glb_policy->client_load_report_payload == NULL);
|
1236
|
+
grpc_grpclb_request *request =
|
1237
|
+
grpc_grpclb_load_report_request_create(glb_policy->client_stats);
|
1238
|
+
// Skip client load report if the counters were all zero in the last
|
1239
|
+
// report and they are still zero in this one.
|
1240
|
+
if (load_report_counters_are_zero(request)) {
|
1241
|
+
if (glb_policy->last_client_load_report_counters_were_zero) {
|
1242
|
+
grpc_grpclb_request_destroy(request);
|
1243
|
+
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1244
|
+
return;
|
1245
|
+
}
|
1246
|
+
glb_policy->last_client_load_report_counters_were_zero = true;
|
1247
|
+
} else {
|
1248
|
+
glb_policy->last_client_load_report_counters_were_zero = false;
|
1249
|
+
}
|
1250
|
+
grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
|
1251
|
+
glb_policy->client_load_report_payload =
|
1252
|
+
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
|
1253
|
+
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
|
1254
|
+
grpc_grpclb_request_destroy(request);
|
1255
|
+
// If we've already sent the initial request, then we can go ahead and
|
1256
|
+
// sent the load report. Otherwise, we need to wait until the initial
|
1257
|
+
// request has been sent to send this
|
1258
|
+
// (see lb_on_sent_initial_request_locked() below).
|
1259
|
+
if (glb_policy->initial_request_sent) {
|
1260
|
+
do_send_client_load_report_locked(exec_ctx, glb_policy);
|
1261
|
+
}
|
1262
|
+
}
|
1263
|
+
|
1264
|
+
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
|
1265
|
+
void *arg, grpc_error *error);
|
1106
1266
|
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
1107
1267
|
void *arg, grpc_error *error);
|
1108
1268
|
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
@@ -1117,11 +1277,23 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1117
1277
|
* glb_policy->base.interested_parties, which is comprised of the polling
|
1118
1278
|
* entities from \a client_channel. */
|
1119
1279
|
grpc_slice host = grpc_slice_from_copied_string(glb_policy->server_name);
|
1280
|
+
gpr_timespec deadline =
|
1281
|
+
glb_policy->lb_call_timeout_ms == 0
|
1282
|
+
? gpr_inf_future(GPR_CLOCK_MONOTONIC)
|
1283
|
+
: gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
|
1284
|
+
gpr_time_from_millis(glb_policy->lb_call_timeout_ms,
|
1285
|
+
GPR_TIMESPAN));
|
1120
1286
|
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
|
1121
1287
|
exec_ctx, glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
|
1122
1288
|
glb_policy->base.interested_parties,
|
1123
1289
|
GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
|
1124
|
-
&host,
|
1290
|
+
&host, deadline, NULL);
|
1291
|
+
grpc_slice_unref_internal(exec_ctx, host);
|
1292
|
+
|
1293
|
+
if (glb_policy->client_stats != NULL) {
|
1294
|
+
grpc_grpclb_client_stats_unref(glb_policy->client_stats);
|
1295
|
+
}
|
1296
|
+
glb_policy->client_stats = grpc_grpclb_client_stats_create();
|
1125
1297
|
|
1126
1298
|
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
|
1127
1299
|
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
|
@@ -1134,6 +1306,9 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1134
1306
|
grpc_slice_unref_internal(exec_ctx, request_payload_slice);
|
1135
1307
|
grpc_grpclb_request_destroy(request);
|
1136
1308
|
|
1309
|
+
grpc_closure_init(&glb_policy->lb_on_sent_initial_request,
|
1310
|
+
lb_on_sent_initial_request_locked, glb_policy,
|
1311
|
+
grpc_combiner_scheduler(glb_policy->base.combiner, false));
|
1137
1312
|
grpc_closure_init(&glb_policy->lb_on_server_status_received,
|
1138
1313
|
lb_on_server_status_received_locked, glb_policy,
|
1139
1314
|
grpc_combiner_scheduler(glb_policy->base.combiner, false));
|
@@ -1147,12 +1322,16 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
|
|
1147
1322
|
GRPC_GRPCLB_RECONNECT_JITTER,
|
1148
1323
|
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
|
1149
1324
|
GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
|
1325
|
+
|
1326
|
+
glb_policy->initial_request_sent = false;
|
1327
|
+
glb_policy->seen_initial_response = false;
|
1328
|
+
glb_policy->last_client_load_report_counters_were_zero = false;
|
1150
1329
|
}
|
1151
1330
|
|
1152
1331
|
static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
|
1153
1332
|
glb_lb_policy *glb_policy) {
|
1154
1333
|
GPR_ASSERT(glb_policy->lb_call != NULL);
|
1155
|
-
|
1334
|
+
grpc_call_unref(glb_policy->lb_call);
|
1156
1335
|
glb_policy->lb_call = NULL;
|
1157
1336
|
|
1158
1337
|
grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
|
@@ -1160,6 +1339,10 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx,
|
|
1160
1339
|
|
1161
1340
|
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
|
1162
1341
|
grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
|
1342
|
+
|
1343
|
+
if (!glb_policy->client_load_report_timer_pending) {
|
1344
|
+
grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
|
1345
|
+
}
|
1163
1346
|
}
|
1164
1347
|
|
1165
1348
|
/*
|
@@ -1172,7 +1355,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1172
1355
|
|
1173
1356
|
lb_call_init_locked(exec_ctx, glb_policy);
|
1174
1357
|
|
1175
|
-
if (grpc_lb_glb_trace) {
|
1358
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1176
1359
|
gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
|
1177
1360
|
(void *)glb_policy, (void *)glb_policy->lb_call);
|
1178
1361
|
}
|
@@ -1188,21 +1371,27 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1188
1371
|
op->flags = 0;
|
1189
1372
|
op->reserved = NULL;
|
1190
1373
|
op++;
|
1191
|
-
|
1192
1374
|
op->op = GRPC_OP_RECV_INITIAL_METADATA;
|
1193
1375
|
op->data.recv_initial_metadata.recv_initial_metadata =
|
1194
1376
|
&glb_policy->lb_initial_metadata_recv;
|
1195
1377
|
op->flags = 0;
|
1196
1378
|
op->reserved = NULL;
|
1197
1379
|
op++;
|
1198
|
-
|
1199
1380
|
GPR_ASSERT(glb_policy->lb_request_payload != NULL);
|
1200
1381
|
op->op = GRPC_OP_SEND_MESSAGE;
|
1201
1382
|
op->data.send_message.send_message = glb_policy->lb_request_payload;
|
1202
1383
|
op->flags = 0;
|
1203
1384
|
op->reserved = NULL;
|
1204
1385
|
op++;
|
1386
|
+
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
|
1387
|
+
* count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
|
1388
|
+
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
|
1389
|
+
call_error = grpc_call_start_batch_and_execute(
|
1390
|
+
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
|
1391
|
+
&glb_policy->lb_on_sent_initial_request);
|
1392
|
+
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1205
1393
|
|
1394
|
+
op = ops;
|
1206
1395
|
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
|
1207
1396
|
op->data.recv_status_on_client.trailing_metadata =
|
1208
1397
|
&glb_policy->lb_trailing_metadata_recv;
|
@@ -1234,6 +1423,19 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
|
|
1234
1423
|
GPR_ASSERT(GRPC_CALL_OK == call_error);
|
1235
1424
|
}
|
1236
1425
|
|
1426
|
+
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
|
1427
|
+
void *arg, grpc_error *error) {
|
1428
|
+
glb_lb_policy *glb_policy = arg;
|
1429
|
+
glb_policy->initial_request_sent = true;
|
1430
|
+
// If we attempted to send a client load report before the initial
|
1431
|
+
// request was sent, send the load report now.
|
1432
|
+
if (glb_policy->client_load_report_payload != NULL) {
|
1433
|
+
do_send_client_load_report_locked(exec_ctx, glb_policy);
|
1434
|
+
}
|
1435
|
+
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
|
1436
|
+
"lb_on_response_received_locked");
|
1437
|
+
}
|
1438
|
+
|
1237
1439
|
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
1238
1440
|
grpc_error *error) {
|
1239
1441
|
glb_lb_policy *glb_policy = arg;
|
@@ -1249,57 +1451,91 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1249
1451
|
grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
|
1250
1452
|
grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
|
1251
1453
|
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
|
1252
|
-
|
1253
|
-
|
1254
|
-
if (
|
1255
|
-
|
1256
|
-
|
1257
|
-
if (
|
1258
|
-
|
1259
|
-
|
1260
|
-
|
1261
|
-
|
1262
|
-
|
1263
|
-
|
1264
|
-
|
1265
|
-
|
1266
|
-
|
1454
|
+
|
1455
|
+
grpc_grpclb_initial_response *response = NULL;
|
1456
|
+
if (!glb_policy->seen_initial_response &&
|
1457
|
+
(response = grpc_grpclb_initial_response_parse(response_slice)) !=
|
1458
|
+
NULL) {
|
1459
|
+
if (response->has_client_stats_report_interval) {
|
1460
|
+
glb_policy->client_stats_report_interval =
|
1461
|
+
gpr_time_max(gpr_time_from_seconds(1, GPR_TIMESPAN),
|
1462
|
+
grpc_grpclb_duration_to_timespec(
|
1463
|
+
&response->client_stats_report_interval));
|
1464
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1465
|
+
gpr_log(GPR_INFO,
|
1466
|
+
"received initial LB response message; "
|
1467
|
+
"client load reporting interval = %" PRId64 ".%09d sec",
|
1468
|
+
glb_policy->client_stats_report_interval.tv_sec,
|
1469
|
+
glb_policy->client_stats_report_interval.tv_nsec);
|
1267
1470
|
}
|
1471
|
+
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
|
1472
|
+
* strong ref count goes to zero) to be unref'd in
|
1473
|
+
* send_client_load_report() */
|
1474
|
+
glb_policy->client_load_report_timer_pending = true;
|
1475
|
+
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
|
1476
|
+
schedule_next_client_load_report(exec_ctx, glb_policy);
|
1477
|
+
} else if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1478
|
+
gpr_log(GPR_INFO,
|
1479
|
+
"received initial LB response message; "
|
1480
|
+
"client load reporting NOT enabled");
|
1268
1481
|
}
|
1482
|
+
grpc_grpclb_initial_response_destroy(response);
|
1483
|
+
glb_policy->seen_initial_response = true;
|
1484
|
+
} else {
|
1485
|
+
grpc_grpclb_serverlist *serverlist =
|
1486
|
+
grpc_grpclb_response_parse_serverlist(response_slice);
|
1487
|
+
if (serverlist != NULL) {
|
1488
|
+
GPR_ASSERT(glb_policy->lb_call != NULL);
|
1489
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1490
|
+
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
|
1491
|
+
(unsigned long)serverlist->num_servers);
|
1492
|
+
for (size_t i = 0; i < serverlist->num_servers; ++i) {
|
1493
|
+
grpc_resolved_address addr;
|
1494
|
+
parse_server(serverlist->servers[i], &addr);
|
1495
|
+
char *ipport;
|
1496
|
+
grpc_sockaddr_to_string(&ipport, &addr, false);
|
1497
|
+
gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
|
1498
|
+
gpr_free(ipport);
|
1499
|
+
}
|
1500
|
+
}
|
1269
1501
|
|
1270
|
-
|
1271
|
-
|
1272
|
-
|
1273
|
-
|
1502
|
+
/* update serverlist */
|
1503
|
+
if (serverlist->num_servers > 0) {
|
1504
|
+
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist,
|
1505
|
+
serverlist)) {
|
1506
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1507
|
+
gpr_log(GPR_INFO,
|
1508
|
+
"Incoming server list identical to current, ignoring.");
|
1509
|
+
}
|
1510
|
+
grpc_grpclb_destroy_serverlist(serverlist);
|
1511
|
+
} else { /* new serverlist */
|
1512
|
+
if (glb_policy->serverlist != NULL) {
|
1513
|
+
/* dispose of the old serverlist */
|
1514
|
+
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
1515
|
+
}
|
1516
|
+
/* and update the copy in the glb_lb_policy instance. This
|
1517
|
+
* serverlist instance will be destroyed either upon the next
|
1518
|
+
* update or in glb_destroy() */
|
1519
|
+
glb_policy->serverlist = serverlist;
|
1520
|
+
|
1521
|
+
rr_handover_locked(exec_ctx, glb_policy);
|
1522
|
+
}
|
1523
|
+
} else {
|
1524
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1274
1525
|
gpr_log(GPR_INFO,
|
1275
|
-
"
|
1526
|
+
"Received empty server list. Picks will stay pending until "
|
1527
|
+
"a response with > 0 servers is received");
|
1276
1528
|
}
|
1277
1529
|
grpc_grpclb_destroy_serverlist(serverlist);
|
1278
|
-
} else { /* new serverlist */
|
1279
|
-
if (glb_policy->serverlist != NULL) {
|
1280
|
-
/* dispose of the old serverlist */
|
1281
|
-
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
|
1282
|
-
}
|
1283
|
-
/* and update the copy in the glb_lb_policy instance. This serverlist
|
1284
|
-
* instance will be destroyed either upon the next update or in
|
1285
|
-
* glb_destroy() */
|
1286
|
-
glb_policy->serverlist = serverlist;
|
1287
|
-
|
1288
|
-
rr_handover_locked(exec_ctx, glb_policy);
|
1289
|
-
}
|
1290
|
-
} else {
|
1291
|
-
if (grpc_lb_glb_trace) {
|
1292
|
-
gpr_log(GPR_INFO,
|
1293
|
-
"Received empty server list. Picks will stay pending until a "
|
1294
|
-
"response with > 0 servers is received");
|
1295
1530
|
}
|
1531
|
+
} else { /* serverlist == NULL */
|
1532
|
+
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
|
1533
|
+
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
|
1296
1534
|
}
|
1297
|
-
} else { /* serverlist == NULL */
|
1298
|
-
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
|
1299
|
-
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
|
1300
|
-
grpc_slice_unref_internal(exec_ctx, response_slice);
|
1301
1535
|
}
|
1302
1536
|
|
1537
|
+
grpc_slice_unref_internal(exec_ctx, response_slice);
|
1538
|
+
|
1303
1539
|
if (!glb_policy->shutting_down) {
|
1304
1540
|
/* keep listening for serverlist updates */
|
1305
1541
|
op->op = GRPC_OP_RECV_MESSAGE;
|
@@ -1327,7 +1563,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
|
|
1327
1563
|
glb_lb_policy *glb_policy = arg;
|
1328
1564
|
|
1329
1565
|
if (!glb_policy->shutting_down) {
|
1330
|
-
if (grpc_lb_glb_trace) {
|
1566
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1331
1567
|
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
|
1332
1568
|
(void *)glb_policy);
|
1333
1569
|
}
|
@@ -1344,7 +1580,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
|
1344
1580
|
|
1345
1581
|
GPR_ASSERT(glb_policy->lb_call != NULL);
|
1346
1582
|
|
1347
|
-
if (grpc_lb_glb_trace) {
|
1583
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1348
1584
|
char *status_details =
|
1349
1585
|
grpc_slice_to_c_string(glb_policy->lb_call_status_details);
|
1350
1586
|
gpr_log(GPR_DEBUG,
|
@@ -1363,7 +1599,7 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
|
|
1363
1599
|
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
|
1364
1600
|
gpr_timespec next_try =
|
1365
1601
|
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
|
1366
|
-
if (grpc_lb_glb_trace) {
|
1602
|
+
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
|
1367
1603
|
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
|
1368
1604
|
(void *)glb_policy);
|
1369
1605
|
gpr_timespec timeout = gpr_time_sub(next_try, now);
|
@@ -1411,9 +1647,29 @@ grpc_lb_policy_factory *grpc_glb_lb_factory_create() {
|
|
1411
1647
|
}
|
1412
1648
|
|
1413
1649
|
/* Plugin registration */
|
1650
|
+
|
1651
|
+
// Only add client_load_reporting filter if the grpclb LB policy is used.
|
1652
|
+
static bool maybe_add_client_load_reporting_filter(
|
1653
|
+
grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
|
1654
|
+
const grpc_channel_args *args =
|
1655
|
+
grpc_channel_stack_builder_get_channel_arguments(builder);
|
1656
|
+
const grpc_arg *channel_arg =
|
1657
|
+
grpc_channel_args_find(args, GRPC_ARG_LB_POLICY_NAME);
|
1658
|
+
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_STRING &&
|
1659
|
+
strcmp(channel_arg->value.string, "grpclb") == 0) {
|
1660
|
+
return grpc_channel_stack_builder_append_filter(
|
1661
|
+
builder, (const grpc_channel_filter *)arg, NULL, NULL);
|
1662
|
+
}
|
1663
|
+
return true;
|
1664
|
+
}
|
1665
|
+
|
1414
1666
|
void grpc_lb_policy_grpclb_init() {
|
1415
1667
|
grpc_register_lb_policy(grpc_glb_lb_factory_create());
|
1416
1668
|
grpc_register_tracer("glb", &grpc_lb_glb_trace);
|
1669
|
+
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
|
1670
|
+
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
|
1671
|
+
maybe_add_client_load_reporting_filter,
|
1672
|
+
(void *)&grpc_client_load_reporting_filter);
|
1417
1673
|
}
|
1418
1674
|
|
1419
1675
|
void grpc_lb_policy_grpclb_shutdown() {}
|