grpc 1.17.1 → 1.18.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +1228 -988
- data/etc/roots.pem +242 -30
- data/include/grpc/grpc.h +2 -1
- data/include/grpc/grpc_security_constants.h +3 -3
- data/include/grpc/impl/codegen/atm_gcc_sync.h +2 -0
- data/include/grpc/impl/codegen/atm_windows.h +2 -0
- data/include/grpc/impl/codegen/compression_types.h +2 -1
- data/include/grpc/impl/codegen/grpc_types.h +1 -1
- data/include/grpc/impl/codegen/port_platform.h +9 -0
- data/src/core/ext/filters/client_channel/client_channel.cc +163 -882
- data/src/core/ext/filters/client_channel/health/health_check_client.cc +2 -4
- data/src/core/ext/filters/client_channel/health/health_check_client.h +2 -3
- data/src/core/ext/filters/client_channel/lb_policy.cc +1 -1
- data/src/core/ext/filters/client_channel/lb_policy.h +8 -17
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +176 -216
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +20 -23
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +49 -52
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +13 -35
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +31 -30
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +69 -225
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc +20 -23
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h +1 -1
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +2 -84
- data/src/core/ext/filters/client_channel/request_routing.cc +936 -0
- data/src/core/ext/filters/client_channel/request_routing.h +177 -0
- data/src/core/ext/filters/client_channel/resolver.cc +1 -1
- data/src/core/ext/filters/client_channel/resolver.h +1 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +37 -26
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +30 -18
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +119 -100
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +8 -5
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +5 -4
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc +2 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +12 -14
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +5 -9
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +2 -1
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +1 -2
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +17 -17
- data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +45 -52
- data/src/core/ext/filters/client_channel/resolver_result_parsing.h +13 -17
- data/src/core/ext/filters/client_channel/server_address.cc +103 -0
- data/src/core/ext/filters/client_channel/server_address.h +108 -0
- data/src/core/ext/filters/client_channel/subchannel.cc +10 -8
- data/src/core/ext/filters/client_channel/subchannel.h +9 -6
- data/src/core/ext/filters/client_channel/subchannel_index.cc +20 -27
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +3 -2
- data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +8 -9
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +1 -1
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +1 -1
- data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +8 -11
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +24 -54
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -1
- data/src/core/ext/transport/chttp2/transport/context_list.cc +67 -0
- data/src/core/ext/transport/chttp2/transport/context_list.h +53 -0
- data/src/core/ext/transport/chttp2/transport/internal.h +38 -11
- data/src/core/ext/transport/chttp2/transport/writing.cc +5 -0
- data/src/core/ext/transport/inproc/inproc_transport.cc +1 -1
- data/src/core/lib/channel/channelz.cc +19 -18
- data/src/core/lib/channel/channelz.h +7 -1
- data/src/core/lib/channel/channelz_registry.cc +3 -2
- data/src/core/lib/debug/trace.cc +3 -0
- data/src/core/lib/debug/trace.h +5 -3
- data/src/core/lib/gpr/sync_posix.cc +96 -4
- data/src/core/lib/gprpp/inlined_vector.h +25 -19
- data/src/core/lib/gprpp/memory.h +2 -11
- data/src/core/lib/gprpp/orphanable.h +18 -82
- data/src/core/lib/gprpp/ref_counted.h +75 -84
- data/src/core/lib/gprpp/ref_counted_ptr.h +22 -17
- data/src/core/lib/http/httpcli_security_connector.cc +101 -94
- data/src/core/lib/http/parser.h +5 -5
- data/src/core/lib/iomgr/buffer_list.cc +16 -5
- data/src/core/lib/iomgr/buffer_list.h +10 -3
- data/src/core/lib/iomgr/call_combiner.cc +50 -2
- data/src/core/lib/iomgr/call_combiner.h +29 -2
- data/src/core/lib/iomgr/dynamic_annotations.h +67 -0
- data/src/core/lib/iomgr/endpoint.cc +4 -0
- data/src/core/lib/iomgr/endpoint.h +3 -0
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +4 -0
- data/src/core/lib/iomgr/ev_epollex_linux.cc +4 -0
- data/src/core/lib/iomgr/ev_poll_posix.cc +4 -0
- data/src/core/lib/iomgr/ev_posix.cc +15 -7
- data/src/core/lib/iomgr/ev_posix.h +10 -0
- data/src/core/lib/iomgr/exec_ctx.cc +13 -0
- data/src/core/lib/iomgr/fork_posix.cc +1 -1
- data/src/core/lib/iomgr/internal_errqueue.cc +36 -3
- data/src/core/lib/iomgr/internal_errqueue.h +7 -1
- data/src/core/lib/iomgr/iomgr.cc +7 -0
- data/src/core/lib/iomgr/iomgr.h +4 -0
- data/src/core/lib/iomgr/iomgr_custom.cc +3 -1
- data/src/core/lib/iomgr/iomgr_internal.cc +4 -0
- data/src/core/lib/iomgr/iomgr_internal.h +4 -0
- data/src/core/lib/iomgr/iomgr_posix.cc +6 -1
- data/src/core/lib/iomgr/iomgr_windows.cc +4 -1
- data/src/core/lib/iomgr/port.h +1 -2
- data/src/core/lib/iomgr/resource_quota.cc +1 -0
- data/src/core/lib/iomgr/sockaddr_utils.cc +1 -0
- data/src/core/lib/iomgr/tcp_custom.cc +4 -1
- data/src/core/lib/iomgr/tcp_posix.cc +95 -35
- data/src/core/lib/iomgr/tcp_windows.cc +4 -1
- data/src/core/lib/iomgr/timer_manager.cc +6 -0
- data/src/core/lib/security/context/security_context.cc +75 -108
- data/src/core/lib/security/context/security_context.h +59 -35
- data/src/core/lib/security/credentials/alts/alts_credentials.cc +36 -48
- data/src/core/lib/security/credentials/alts/alts_credentials.h +37 -10
- data/src/core/lib/security/credentials/composite/composite_credentials.cc +97 -157
- data/src/core/lib/security/credentials/composite/composite_credentials.h +60 -24
- data/src/core/lib/security/credentials/credentials.cc +18 -142
- data/src/core/lib/security/credentials/credentials.h +119 -95
- data/src/core/lib/security/credentials/fake/fake_credentials.cc +46 -71
- data/src/core/lib/security/credentials/fake/fake_credentials.h +23 -5
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +144 -51
- data/src/core/lib/security/credentials/google_default/google_default_credentials.h +28 -5
- data/src/core/lib/security/credentials/iam/iam_credentials.cc +27 -35
- data/src/core/lib/security/credentials/iam/iam_credentials.h +18 -4
- data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +60 -69
- data/src/core/lib/security/credentials/jwt/jwt_credentials.h +29 -10
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -0
- data/src/core/lib/security/credentials/local/local_credentials.cc +19 -32
- data/src/core/lib/security/credentials/local/local_credentials.h +32 -11
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +130 -149
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +74 -29
- data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +59 -77
- data/src/core/lib/security/credentials/plugin/plugin_credentials.h +40 -17
- data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +66 -83
- data/src/core/lib/security/credentials/ssl/ssl_credentials.h +58 -15
- data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +152 -177
- data/src/core/lib/security/security_connector/alts/alts_security_connector.h +12 -10
- data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +210 -215
- data/src/core/lib/security/security_connector/fake/fake_security_connector.h +9 -6
- data/src/core/lib/security/security_connector/local/local_security_connector.cc +176 -169
- data/src/core/lib/security/security_connector/local/local_security_connector.h +10 -9
- data/src/core/lib/security/security_connector/security_connector.cc +41 -124
- data/src/core/lib/security/security_connector/security_connector.h +102 -105
- data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +348 -370
- data/src/core/lib/security/security_connector/ssl/ssl_security_connector.h +14 -12
- data/src/core/lib/security/security_connector/ssl_utils.cc +13 -9
- data/src/core/lib/security/security_connector/ssl_utils.h +3 -1
- data/src/core/lib/security/transport/client_auth_filter.cc +50 -50
- data/src/core/lib/security/transport/secure_endpoint.cc +7 -1
- data/src/core/lib/security/transport/security_handshaker.cc +82 -66
- data/src/core/lib/security/transport/server_auth_filter.cc +15 -13
- data/src/core/lib/surface/init.cc +1 -0
- data/src/core/lib/surface/server.cc +13 -11
- data/src/core/lib/surface/server.h +6 -6
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/metadata.cc +1 -0
- data/src/core/lib/transport/static_metadata.cc +228 -221
- data/src/core/lib/transport/static_metadata.h +75 -71
- data/src/core/lib/transport/transport.cc +2 -1
- data/src/core/lib/transport/transport.h +5 -1
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +9 -2
- data/src/core/tsi/ssl_transport_security.cc +35 -24
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +1 -1
- data/src/ruby/lib/grpc/generic/rpc_server.rb +61 -0
- data/src/ruby/lib/grpc/generic/service.rb +1 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/grpc/health/checker.rb +2 -3
- data/src/ruby/spec/generic/rpc_server_spec.rb +22 -0
- data/src/ruby/spec/support/services.rb +1 -0
- metadata +37 -32
- data/src/core/ext/filters/client_channel/lb_policy_factory.cc +0 -163
@@ -21,7 +21,7 @@
|
|
21
21
|
|
22
22
|
#include <grpc/support/port_platform.h>
|
23
23
|
|
24
|
-
#include
|
24
|
+
#include <grpc/impl/codegen/grpc_types.h>
|
25
25
|
|
26
26
|
/// Makes any necessary modifications to \a args for use in the xds
|
27
27
|
/// balancer channel.
|
@@ -25,6 +25,7 @@
|
|
25
25
|
#include <string.h>
|
26
26
|
|
27
27
|
#include "src/core/ext/filters/client_channel/client_channel.h"
|
28
|
+
#include "src/core/ext/filters/client_channel/server_address.h"
|
28
29
|
#include "src/core/lib/channel/channel_args.h"
|
29
30
|
#include "src/core/lib/gpr/string.h"
|
30
31
|
#include "src/core/lib/iomgr/sockaddr_utils.h"
|
@@ -41,22 +42,23 @@ int BalancerNameCmp(const grpc_core::UniquePtr<char>& a,
|
|
41
42
|
}
|
42
43
|
|
43
44
|
RefCountedPtr<TargetAuthorityTable> CreateTargetAuthorityTable(
|
44
|
-
|
45
|
+
const ServerAddressList& addresses) {
|
45
46
|
TargetAuthorityTable::Entry* target_authority_entries =
|
46
|
-
static_cast<TargetAuthorityTable::Entry*>(
|
47
|
-
sizeof(*target_authority_entries) * addresses
|
48
|
-
for (size_t i = 0; i < addresses
|
47
|
+
static_cast<TargetAuthorityTable::Entry*>(
|
48
|
+
gpr_zalloc(sizeof(*target_authority_entries) * addresses.size()));
|
49
|
+
for (size_t i = 0; i < addresses.size(); ++i) {
|
49
50
|
char* addr_str;
|
50
|
-
GPR_ASSERT(
|
51
|
-
|
51
|
+
GPR_ASSERT(
|
52
|
+
grpc_sockaddr_to_string(&addr_str, &addresses[i].address(), true) > 0);
|
52
53
|
target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str);
|
53
|
-
target_authority_entries[i].value.reset(
|
54
|
-
gpr_strdup(addresses->addresses[i].balancer_name));
|
55
54
|
gpr_free(addr_str);
|
55
|
+
char* balancer_name = grpc_channel_arg_get_string(grpc_channel_args_find(
|
56
|
+
addresses[i].args(), GRPC_ARG_ADDRESS_BALANCER_NAME));
|
57
|
+
target_authority_entries[i].value.reset(gpr_strdup(balancer_name));
|
56
58
|
}
|
57
59
|
RefCountedPtr<TargetAuthorityTable> target_authority_table =
|
58
|
-
TargetAuthorityTable::Create(addresses
|
59
|
-
|
60
|
+
TargetAuthorityTable::Create(addresses.size(), target_authority_entries,
|
61
|
+
BalancerNameCmp);
|
60
62
|
gpr_free(target_authority_entries);
|
61
63
|
return target_authority_table;
|
62
64
|
}
|
@@ -71,13 +73,12 @@ grpc_channel_args* grpc_lb_policy_xds_modify_lb_channel_args(
|
|
71
73
|
grpc_arg args_to_add[2];
|
72
74
|
size_t num_args_to_add = 0;
|
73
75
|
// Add arg for targets info table.
|
74
|
-
|
75
|
-
|
76
|
-
GPR_ASSERT(
|
77
|
-
grpc_lb_addresses* addresses =
|
78
|
-
static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
|
76
|
+
grpc_core::ServerAddressList* addresses =
|
77
|
+
grpc_core::FindServerAddressListChannelArg(args);
|
78
|
+
GPR_ASSERT(addresses != nullptr);
|
79
79
|
grpc_core::RefCountedPtr<grpc_core::TargetAuthorityTable>
|
80
|
-
target_authority_table =
|
80
|
+
target_authority_table =
|
81
|
+
grpc_core::CreateTargetAuthorityTable(*addresses);
|
81
82
|
args_to_add[num_args_to_add++] =
|
82
83
|
grpc_core::CreateTargetAuthorityTableChannelArg(
|
83
84
|
target_authority_table.get());
|
@@ -86,22 +87,18 @@ grpc_channel_args* grpc_lb_policy_xds_modify_lb_channel_args(
|
|
86
87
|
// bearer token credentials.
|
87
88
|
grpc_channel_credentials* channel_credentials =
|
88
89
|
grpc_channel_credentials_find_in_args(args);
|
89
|
-
grpc_channel_credentials
|
90
|
+
grpc_core::RefCountedPtr<grpc_channel_credentials> creds_sans_call_creds;
|
90
91
|
if (channel_credentials != nullptr) {
|
91
92
|
creds_sans_call_creds =
|
92
|
-
|
93
|
-
channel_credentials);
|
93
|
+
channel_credentials->duplicate_without_call_credentials();
|
94
94
|
GPR_ASSERT(creds_sans_call_creds != nullptr);
|
95
95
|
args_to_remove[num_args_to_remove++] = GRPC_ARG_CHANNEL_CREDENTIALS;
|
96
96
|
args_to_add[num_args_to_add++] =
|
97
|
-
grpc_channel_credentials_to_arg(creds_sans_call_creds);
|
97
|
+
grpc_channel_credentials_to_arg(creds_sans_call_creds.get());
|
98
98
|
}
|
99
99
|
grpc_channel_args* result = grpc_channel_args_copy_and_add_and_remove(
|
100
100
|
args, args_to_remove, num_args_to_remove, args_to_add, num_args_to_add);
|
101
101
|
// Clean up.
|
102
102
|
grpc_channel_args_destroy(args);
|
103
|
-
if (creds_sans_call_creds != nullptr) {
|
104
|
-
grpc_channel_credentials_unref(creds_sans_call_creds);
|
105
|
-
}
|
106
103
|
return result;
|
107
104
|
}
|
@@ -25,7 +25,7 @@
|
|
25
25
|
|
26
26
|
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h"
|
27
27
|
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h"
|
28
|
-
#include "src/core/
|
28
|
+
#include "src/core/lib/iomgr/exec_ctx.h"
|
29
29
|
|
30
30
|
#define XDS_SERVICE_NAME_MAX_LENGTH 128
|
31
31
|
|
@@ -21,91 +21,9 @@
|
|
21
21
|
|
22
22
|
#include <grpc/support/port_platform.h>
|
23
23
|
|
24
|
-
#include "src/core/lib/iomgr/resolve_address.h"
|
25
|
-
|
26
|
-
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
|
27
24
|
#include "src/core/ext/filters/client_channel/lb_policy.h"
|
28
|
-
#include "src/core/lib/
|
29
|
-
|
30
|
-
//
|
31
|
-
// representation of an LB address
|
32
|
-
//
|
33
|
-
|
34
|
-
// Channel arg key for grpc_lb_addresses.
|
35
|
-
#define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses"
|
36
|
-
|
37
|
-
/** A resolved address alongside any LB related information associated with it.
|
38
|
-
* \a user_data, if not NULL, contains opaque data meant to be consumed by the
|
39
|
-
* gRPC LB policy. Note that no all LB policies support \a user_data as input.
|
40
|
-
* Those who don't will simply ignore it and will correspondingly return NULL in
|
41
|
-
* their namesake pick() output argument. */
|
42
|
-
// TODO(roth): Once we figure out a better way of handling user_data in
|
43
|
-
// LB policies, convert these structs to C++ classes.
|
44
|
-
typedef struct grpc_lb_address {
|
45
|
-
grpc_resolved_address address;
|
46
|
-
bool is_balancer;
|
47
|
-
char* balancer_name; /* For secure naming. */
|
48
|
-
void* user_data;
|
49
|
-
} grpc_lb_address;
|
50
|
-
|
51
|
-
typedef struct grpc_lb_user_data_vtable {
|
52
|
-
void* (*copy)(void*);
|
53
|
-
void (*destroy)(void*);
|
54
|
-
int (*cmp)(void*, void*);
|
55
|
-
} grpc_lb_user_data_vtable;
|
56
|
-
|
57
|
-
typedef struct grpc_lb_addresses {
|
58
|
-
size_t num_addresses;
|
59
|
-
grpc_lb_address* addresses;
|
60
|
-
const grpc_lb_user_data_vtable* user_data_vtable;
|
61
|
-
} grpc_lb_addresses;
|
62
|
-
|
63
|
-
/** Returns a grpc_addresses struct with enough space for
|
64
|
-
\a num_addresses addresses. The \a user_data_vtable argument may be
|
65
|
-
NULL if no user data will be added. */
|
66
|
-
grpc_lb_addresses* grpc_lb_addresses_create(
|
67
|
-
size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable);
|
68
|
-
|
69
|
-
/** Creates a copy of \a addresses. */
|
70
|
-
grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses);
|
71
|
-
|
72
|
-
/** Sets the value of the address at index \a index of \a addresses.
|
73
|
-
* \a address is a socket address of length \a address_len. */
|
74
|
-
void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index,
|
75
|
-
const void* address, size_t address_len,
|
76
|
-
bool is_balancer, const char* balancer_name,
|
77
|
-
void* user_data);
|
78
|
-
|
79
|
-
/** Sets the value of the address at index \a index of \a addresses from \a uri.
|
80
|
-
* Returns true upon success, false otherwise. */
|
81
|
-
bool grpc_lb_addresses_set_address_from_uri(grpc_lb_addresses* addresses,
|
82
|
-
size_t index, const grpc_uri* uri,
|
83
|
-
bool is_balancer,
|
84
|
-
const char* balancer_name,
|
85
|
-
void* user_data);
|
86
|
-
|
87
|
-
/** Compares \a addresses1 and \a addresses2. */
|
88
|
-
int grpc_lb_addresses_cmp(const grpc_lb_addresses* addresses1,
|
89
|
-
const grpc_lb_addresses* addresses2);
|
90
|
-
|
91
|
-
/** Destroys \a addresses. */
|
92
|
-
void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses);
|
93
|
-
|
94
|
-
/** Returns a channel arg containing \a addresses. */
|
95
|
-
grpc_arg grpc_lb_addresses_create_channel_arg(
|
96
|
-
const grpc_lb_addresses* addresses);
|
97
|
-
|
98
|
-
/** Returns the \a grpc_lb_addresses instance in \a channel_args or NULL */
|
99
|
-
grpc_lb_addresses* grpc_lb_addresses_find_channel_arg(
|
100
|
-
const grpc_channel_args* channel_args);
|
101
|
-
|
102
|
-
// Returns true if addresses contains at least one balancer address.
|
103
|
-
bool grpc_lb_addresses_contains_balancer_address(
|
104
|
-
const grpc_lb_addresses& addresses);
|
105
|
-
|
106
|
-
//
|
107
|
-
// LB policy factory
|
108
|
-
//
|
25
|
+
#include "src/core/lib/gprpp/abstract.h"
|
26
|
+
#include "src/core/lib/gprpp/orphanable.h"
|
109
27
|
|
110
28
|
namespace grpc_core {
|
111
29
|
|
@@ -0,0 +1,936 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2015 gRPC authors.
|
4
|
+
*
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
* you may not use this file except in compliance with the License.
|
7
|
+
* You may obtain a copy of the License at
|
8
|
+
*
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
*
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
* See the License for the specific language governing permissions and
|
15
|
+
* limitations under the License.
|
16
|
+
*
|
17
|
+
*/
|
18
|
+
|
19
|
+
#include <grpc/support/port_platform.h>
|
20
|
+
|
21
|
+
#include "src/core/ext/filters/client_channel/request_routing.h"
|
22
|
+
|
23
|
+
#include <inttypes.h>
|
24
|
+
#include <limits.h>
|
25
|
+
#include <stdbool.h>
|
26
|
+
#include <stdio.h>
|
27
|
+
#include <string.h>
|
28
|
+
|
29
|
+
#include <grpc/support/alloc.h>
|
30
|
+
#include <grpc/support/log.h>
|
31
|
+
#include <grpc/support/string_util.h>
|
32
|
+
#include <grpc/support/sync.h>
|
33
|
+
|
34
|
+
#include "src/core/ext/filters/client_channel/backup_poller.h"
|
35
|
+
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
|
36
|
+
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
|
37
|
+
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
|
38
|
+
#include "src/core/ext/filters/client_channel/resolver_registry.h"
|
39
|
+
#include "src/core/ext/filters/client_channel/retry_throttle.h"
|
40
|
+
#include "src/core/ext/filters/client_channel/server_address.h"
|
41
|
+
#include "src/core/ext/filters/client_channel/subchannel.h"
|
42
|
+
#include "src/core/ext/filters/deadline/deadline_filter.h"
|
43
|
+
#include "src/core/lib/backoff/backoff.h"
|
44
|
+
#include "src/core/lib/channel/channel_args.h"
|
45
|
+
#include "src/core/lib/channel/connected_channel.h"
|
46
|
+
#include "src/core/lib/channel/status_util.h"
|
47
|
+
#include "src/core/lib/gpr/string.h"
|
48
|
+
#include "src/core/lib/gprpp/inlined_vector.h"
|
49
|
+
#include "src/core/lib/gprpp/manual_constructor.h"
|
50
|
+
#include "src/core/lib/iomgr/combiner.h"
|
51
|
+
#include "src/core/lib/iomgr/iomgr.h"
|
52
|
+
#include "src/core/lib/iomgr/polling_entity.h"
|
53
|
+
#include "src/core/lib/profiling/timers.h"
|
54
|
+
#include "src/core/lib/slice/slice_internal.h"
|
55
|
+
#include "src/core/lib/slice/slice_string_helpers.h"
|
56
|
+
#include "src/core/lib/surface/channel.h"
|
57
|
+
#include "src/core/lib/transport/connectivity_state.h"
|
58
|
+
#include "src/core/lib/transport/error_utils.h"
|
59
|
+
#include "src/core/lib/transport/metadata.h"
|
60
|
+
#include "src/core/lib/transport/metadata_batch.h"
|
61
|
+
#include "src/core/lib/transport/service_config.h"
|
62
|
+
#include "src/core/lib/transport/static_metadata.h"
|
63
|
+
#include "src/core/lib/transport/status_metadata.h"
|
64
|
+
|
65
|
+
namespace grpc_core {
|
66
|
+
|
67
|
+
//
|
68
|
+
// RequestRouter::Request::ResolverResultWaiter
|
69
|
+
//
|
70
|
+
|
71
|
+
// Handles waiting for a resolver result.
|
72
|
+
// Used only for the first call on an idle channel.
|
73
|
+
class RequestRouter::Request::ResolverResultWaiter {
|
74
|
+
public:
|
75
|
+
explicit ResolverResultWaiter(Request* request)
|
76
|
+
: request_router_(request->request_router_),
|
77
|
+
request_(request),
|
78
|
+
tracer_enabled_(request_router_->tracer_->enabled()) {
|
79
|
+
if (tracer_enabled_) {
|
80
|
+
gpr_log(GPR_INFO,
|
81
|
+
"request_router=%p request=%p: deferring pick pending resolver "
|
82
|
+
"result",
|
83
|
+
request_router_, request);
|
84
|
+
}
|
85
|
+
// Add closure to be run when a resolver result is available.
|
86
|
+
GRPC_CLOSURE_INIT(&done_closure_, &DoneLocked, this,
|
87
|
+
grpc_combiner_scheduler(request_router_->combiner_));
|
88
|
+
AddToWaitingList();
|
89
|
+
// Set cancellation closure, so that we abort if the call is cancelled.
|
90
|
+
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this,
|
91
|
+
grpc_combiner_scheduler(request_router_->combiner_));
|
92
|
+
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_,
|
93
|
+
&cancel_closure_);
|
94
|
+
}
|
95
|
+
|
96
|
+
private:
|
97
|
+
// Adds done_closure_ to
|
98
|
+
// request_router_->waiting_for_resolver_result_closures_.
|
99
|
+
void AddToWaitingList() {
|
100
|
+
grpc_closure_list_append(
|
101
|
+
&request_router_->waiting_for_resolver_result_closures_, &done_closure_,
|
102
|
+
GRPC_ERROR_NONE);
|
103
|
+
}
|
104
|
+
|
105
|
+
// Invoked when a resolver result is available.
|
106
|
+
static void DoneLocked(void* arg, grpc_error* error) {
|
107
|
+
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
|
108
|
+
RequestRouter* request_router = self->request_router_;
|
109
|
+
// If CancelLocked() has already run, delete ourselves without doing
|
110
|
+
// anything. Note that the call stack may have already been destroyed,
|
111
|
+
// so it's not safe to access anything in state_.
|
112
|
+
if (GPR_UNLIKELY(self->finished_)) {
|
113
|
+
if (self->tracer_enabled_) {
|
114
|
+
gpr_log(GPR_INFO,
|
115
|
+
"request_router=%p: call cancelled before resolver result",
|
116
|
+
request_router);
|
117
|
+
}
|
118
|
+
Delete(self);
|
119
|
+
return;
|
120
|
+
}
|
121
|
+
// Otherwise, process the resolver result.
|
122
|
+
Request* request = self->request_;
|
123
|
+
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
|
124
|
+
if (self->tracer_enabled_) {
|
125
|
+
gpr_log(GPR_INFO,
|
126
|
+
"request_router=%p request=%p: resolver failed to return data",
|
127
|
+
request_router, request);
|
128
|
+
}
|
129
|
+
GRPC_CLOSURE_RUN(request->on_route_done_, GRPC_ERROR_REF(error));
|
130
|
+
} else if (GPR_UNLIKELY(request_router->resolver_ == nullptr)) {
|
131
|
+
// Shutting down.
|
132
|
+
if (self->tracer_enabled_) {
|
133
|
+
gpr_log(GPR_INFO, "request_router=%p request=%p: resolver disconnected",
|
134
|
+
request_router, request);
|
135
|
+
}
|
136
|
+
GRPC_CLOSURE_RUN(request->on_route_done_,
|
137
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
138
|
+
} else if (GPR_UNLIKELY(request_router->lb_policy_ == nullptr)) {
|
139
|
+
// Transient resolver failure.
|
140
|
+
// If call has wait_for_ready=true, try again; otherwise, fail.
|
141
|
+
if (*request->pick_.initial_metadata_flags &
|
142
|
+
GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
|
143
|
+
if (self->tracer_enabled_) {
|
144
|
+
gpr_log(GPR_INFO,
|
145
|
+
"request_router=%p request=%p: resolver returned but no LB "
|
146
|
+
"policy; wait_for_ready=true; trying again",
|
147
|
+
request_router, request);
|
148
|
+
}
|
149
|
+
// Re-add ourselves to the waiting list.
|
150
|
+
self->AddToWaitingList();
|
151
|
+
// Return early so that we don't set finished_ to true below.
|
152
|
+
return;
|
153
|
+
} else {
|
154
|
+
if (self->tracer_enabled_) {
|
155
|
+
gpr_log(GPR_INFO,
|
156
|
+
"request_router=%p request=%p: resolver returned but no LB "
|
157
|
+
"policy; wait_for_ready=false; failing",
|
158
|
+
request_router, request);
|
159
|
+
}
|
160
|
+
GRPC_CLOSURE_RUN(
|
161
|
+
request->on_route_done_,
|
162
|
+
grpc_error_set_int(
|
163
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
|
164
|
+
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
|
165
|
+
}
|
166
|
+
} else {
|
167
|
+
if (self->tracer_enabled_) {
|
168
|
+
gpr_log(GPR_INFO,
|
169
|
+
"request_router=%p request=%p: resolver returned, doing LB "
|
170
|
+
"pick",
|
171
|
+
request_router, request);
|
172
|
+
}
|
173
|
+
request->ProcessServiceConfigAndStartLbPickLocked();
|
174
|
+
}
|
175
|
+
self->finished_ = true;
|
176
|
+
}
|
177
|
+
|
178
|
+
// Invoked when the call is cancelled.
|
179
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
180
|
+
// holding the call combiner.
|
181
|
+
static void CancelLocked(void* arg, grpc_error* error) {
|
182
|
+
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
|
183
|
+
RequestRouter* request_router = self->request_router_;
|
184
|
+
// If DoneLocked() has already run, delete ourselves without doing anything.
|
185
|
+
if (self->finished_) {
|
186
|
+
Delete(self);
|
187
|
+
return;
|
188
|
+
}
|
189
|
+
Request* request = self->request_;
|
190
|
+
// If we are being cancelled, immediately invoke on_route_done_
|
191
|
+
// to propagate the error back to the caller.
|
192
|
+
if (error != GRPC_ERROR_NONE) {
|
193
|
+
if (self->tracer_enabled_) {
|
194
|
+
gpr_log(GPR_INFO,
|
195
|
+
"request_router=%p request=%p: cancelling call waiting for "
|
196
|
+
"name resolution",
|
197
|
+
request_router, request);
|
198
|
+
}
|
199
|
+
// Note: Although we are not in the call combiner here, we are
|
200
|
+
// basically stealing the call combiner from the pending pick, so
|
201
|
+
// it's safe to run on_route_done_ here -- we are essentially
|
202
|
+
// calling it here instead of calling it in DoneLocked().
|
203
|
+
GRPC_CLOSURE_RUN(request->on_route_done_,
|
204
|
+
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
205
|
+
"Pick cancelled", &error, 1));
|
206
|
+
}
|
207
|
+
self->finished_ = true;
|
208
|
+
}
|
209
|
+
|
210
|
+
RequestRouter* request_router_;
|
211
|
+
Request* request_;
|
212
|
+
const bool tracer_enabled_;
|
213
|
+
grpc_closure done_closure_;
|
214
|
+
grpc_closure cancel_closure_;
|
215
|
+
bool finished_ = false;
|
216
|
+
};
|
217
|
+
|
218
|
+
//
|
219
|
+
// RequestRouter::Request::AsyncPickCanceller
|
220
|
+
//
|
221
|
+
|
222
|
+
// Handles the call combiner cancellation callback for an async LB pick.
|
223
|
+
class RequestRouter::Request::AsyncPickCanceller {
|
224
|
+
public:
|
225
|
+
explicit AsyncPickCanceller(Request* request)
|
226
|
+
: request_router_(request->request_router_),
|
227
|
+
request_(request),
|
228
|
+
tracer_enabled_(request_router_->tracer_->enabled()) {
|
229
|
+
GRPC_CALL_STACK_REF(request->owning_call_, "pick_callback_cancel");
|
230
|
+
// Set cancellation closure, so that we abort if the call is cancelled.
|
231
|
+
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this,
|
232
|
+
grpc_combiner_scheduler(request_router_->combiner_));
|
233
|
+
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_,
|
234
|
+
&cancel_closure_);
|
235
|
+
}
|
236
|
+
|
237
|
+
void MarkFinishedLocked() {
|
238
|
+
finished_ = true;
|
239
|
+
GRPC_CALL_STACK_UNREF(request_->owning_call_, "pick_callback_cancel");
|
240
|
+
}
|
241
|
+
|
242
|
+
private:
|
243
|
+
// Invoked when the call is cancelled.
|
244
|
+
// Note: This runs under the client_channel combiner, but will NOT be
|
245
|
+
// holding the call combiner.
|
246
|
+
static void CancelLocked(void* arg, grpc_error* error) {
|
247
|
+
AsyncPickCanceller* self = static_cast<AsyncPickCanceller*>(arg);
|
248
|
+
Request* request = self->request_;
|
249
|
+
RequestRouter* request_router = self->request_router_;
|
250
|
+
if (!self->finished_) {
|
251
|
+
// Note: request_router->lb_policy_ may have changed since we started our
|
252
|
+
// pick, in which case we will be cancelling the pick on a policy other
|
253
|
+
// than the one we started it on. However, this will just be a no-op.
|
254
|
+
if (error != GRPC_ERROR_NONE && request_router->lb_policy_ != nullptr) {
|
255
|
+
if (self->tracer_enabled_) {
|
256
|
+
gpr_log(GPR_INFO,
|
257
|
+
"request_router=%p request=%p: cancelling pick from LB "
|
258
|
+
"policy %p",
|
259
|
+
request_router, request, request_router->lb_policy_.get());
|
260
|
+
}
|
261
|
+
request_router->lb_policy_->CancelPickLocked(&request->pick_,
|
262
|
+
GRPC_ERROR_REF(error));
|
263
|
+
}
|
264
|
+
request->pick_canceller_ = nullptr;
|
265
|
+
GRPC_CALL_STACK_UNREF(request->owning_call_, "pick_callback_cancel");
|
266
|
+
}
|
267
|
+
Delete(self);
|
268
|
+
}
|
269
|
+
|
270
|
+
RequestRouter* request_router_;
|
271
|
+
Request* request_;
|
272
|
+
const bool tracer_enabled_;
|
273
|
+
grpc_closure cancel_closure_;
|
274
|
+
bool finished_ = false;
|
275
|
+
};
|
276
|
+
|
277
|
+
//
|
278
|
+
// RequestRouter::Request
|
279
|
+
//
|
280
|
+
|
281
|
+
RequestRouter::Request::Request(grpc_call_stack* owning_call,
|
282
|
+
grpc_call_combiner* call_combiner,
|
283
|
+
grpc_polling_entity* pollent,
|
284
|
+
grpc_metadata_batch* send_initial_metadata,
|
285
|
+
uint32_t* send_initial_metadata_flags,
|
286
|
+
ApplyServiceConfigCallback apply_service_config,
|
287
|
+
void* apply_service_config_user_data,
|
288
|
+
grpc_closure* on_route_done)
|
289
|
+
: owning_call_(owning_call),
|
290
|
+
call_combiner_(call_combiner),
|
291
|
+
pollent_(pollent),
|
292
|
+
apply_service_config_(apply_service_config),
|
293
|
+
apply_service_config_user_data_(apply_service_config_user_data),
|
294
|
+
on_route_done_(on_route_done) {
|
295
|
+
pick_.initial_metadata = send_initial_metadata;
|
296
|
+
pick_.initial_metadata_flags = send_initial_metadata_flags;
|
297
|
+
}
|
298
|
+
|
299
|
+
RequestRouter::Request::~Request() {
|
300
|
+
if (pick_.connected_subchannel != nullptr) {
|
301
|
+
pick_.connected_subchannel.reset();
|
302
|
+
}
|
303
|
+
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
|
304
|
+
if (pick_.subchannel_call_context[i].destroy != nullptr) {
|
305
|
+
pick_.subchannel_call_context[i].destroy(
|
306
|
+
pick_.subchannel_call_context[i].value);
|
307
|
+
}
|
308
|
+
}
|
309
|
+
}
|
310
|
+
|
311
|
+
// Invoked once resolver results are available.
|
312
|
+
void RequestRouter::Request::ProcessServiceConfigAndStartLbPickLocked() {
|
313
|
+
// Get service config data if needed.
|
314
|
+
if (!apply_service_config_(apply_service_config_user_data_)) return;
|
315
|
+
// Start LB pick.
|
316
|
+
StartLbPickLocked();
|
317
|
+
}
|
318
|
+
|
319
|
+
void RequestRouter::Request::MaybeAddCallToInterestedPartiesLocked() {
|
320
|
+
if (!pollent_added_to_interested_parties_) {
|
321
|
+
pollent_added_to_interested_parties_ = true;
|
322
|
+
grpc_polling_entity_add_to_pollset_set(
|
323
|
+
pollent_, request_router_->interested_parties_);
|
324
|
+
}
|
325
|
+
}
|
326
|
+
|
327
|
+
void RequestRouter::Request::MaybeRemoveCallFromInterestedPartiesLocked() {
|
328
|
+
if (pollent_added_to_interested_parties_) {
|
329
|
+
pollent_added_to_interested_parties_ = false;
|
330
|
+
grpc_polling_entity_del_from_pollset_set(
|
331
|
+
pollent_, request_router_->interested_parties_);
|
332
|
+
}
|
333
|
+
}
|
334
|
+
|
335
|
+
// Starts a pick on the LB policy.
|
336
|
+
void RequestRouter::Request::StartLbPickLocked() {
|
337
|
+
if (request_router_->tracer_->enabled()) {
|
338
|
+
gpr_log(GPR_INFO,
|
339
|
+
"request_router=%p request=%p: starting pick on lb_policy=%p",
|
340
|
+
request_router_, this, request_router_->lb_policy_.get());
|
341
|
+
}
|
342
|
+
GRPC_CLOSURE_INIT(&on_pick_done_, &LbPickDoneLocked, this,
|
343
|
+
grpc_combiner_scheduler(request_router_->combiner_));
|
344
|
+
pick_.on_complete = &on_pick_done_;
|
345
|
+
GRPC_CALL_STACK_REF(owning_call_, "pick_callback");
|
346
|
+
grpc_error* error = GRPC_ERROR_NONE;
|
347
|
+
const bool pick_done =
|
348
|
+
request_router_->lb_policy_->PickLocked(&pick_, &error);
|
349
|
+
if (pick_done) {
|
350
|
+
// Pick completed synchronously.
|
351
|
+
if (request_router_->tracer_->enabled()) {
|
352
|
+
gpr_log(GPR_INFO,
|
353
|
+
"request_router=%p request=%p: pick completed synchronously",
|
354
|
+
request_router_, this);
|
355
|
+
}
|
356
|
+
GRPC_CLOSURE_RUN(on_route_done_, error);
|
357
|
+
GRPC_CALL_STACK_UNREF(owning_call_, "pick_callback");
|
358
|
+
} else {
|
359
|
+
// Pick will be returned asynchronously.
|
360
|
+
// Add the request's polling entity to the request_router's
|
361
|
+
// interested_parties, so that the I/O of the LB policy can be done
|
362
|
+
// under it. It will be removed in LbPickDoneLocked().
|
363
|
+
MaybeAddCallToInterestedPartiesLocked();
|
364
|
+
// Request notification on call cancellation.
|
365
|
+
// We allocate a separate object to track cancellation, since the
|
366
|
+
// cancellation closure might still be pending when we need to reuse
|
367
|
+
// the memory in which this Request object is stored for a subsequent
|
368
|
+
// retry attempt.
|
369
|
+
pick_canceller_ = New<AsyncPickCanceller>(this);
|
370
|
+
}
|
371
|
+
}
|
372
|
+
|
373
|
+
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
|
374
|
+
// Unrefs the LB policy and invokes on_route_done_.
|
375
|
+
void RequestRouter::Request::LbPickDoneLocked(void* arg, grpc_error* error) {
|
376
|
+
Request* self = static_cast<Request*>(arg);
|
377
|
+
RequestRouter* request_router = self->request_router_;
|
378
|
+
if (request_router->tracer_->enabled()) {
|
379
|
+
gpr_log(GPR_INFO,
|
380
|
+
"request_router=%p request=%p: pick completed asynchronously",
|
381
|
+
request_router, self);
|
382
|
+
}
|
383
|
+
self->MaybeRemoveCallFromInterestedPartiesLocked();
|
384
|
+
if (self->pick_canceller_ != nullptr) {
|
385
|
+
self->pick_canceller_->MarkFinishedLocked();
|
386
|
+
}
|
387
|
+
GRPC_CLOSURE_RUN(self->on_route_done_, GRPC_ERROR_REF(error));
|
388
|
+
GRPC_CALL_STACK_UNREF(self->owning_call_, "pick_callback");
|
389
|
+
}
|
390
|
+
|
391
|
+
//
|
392
|
+
// RequestRouter::LbConnectivityWatcher
|
393
|
+
//
|
394
|
+
|
395
|
+
class RequestRouter::LbConnectivityWatcher {
|
396
|
+
public:
|
397
|
+
LbConnectivityWatcher(RequestRouter* request_router,
|
398
|
+
grpc_connectivity_state state,
|
399
|
+
LoadBalancingPolicy* lb_policy,
|
400
|
+
grpc_channel_stack* owning_stack,
|
401
|
+
grpc_combiner* combiner)
|
402
|
+
: request_router_(request_router),
|
403
|
+
state_(state),
|
404
|
+
lb_policy_(lb_policy),
|
405
|
+
owning_stack_(owning_stack) {
|
406
|
+
GRPC_CHANNEL_STACK_REF(owning_stack_, "LbConnectivityWatcher");
|
407
|
+
GRPC_CLOSURE_INIT(&on_changed_, &OnLbPolicyStateChangedLocked, this,
|
408
|
+
grpc_combiner_scheduler(combiner));
|
409
|
+
lb_policy_->NotifyOnStateChangeLocked(&state_, &on_changed_);
|
410
|
+
}
|
411
|
+
|
412
|
+
~LbConnectivityWatcher() {
|
413
|
+
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "LbConnectivityWatcher");
|
414
|
+
}
|
415
|
+
|
416
|
+
private:
|
417
|
+
static void OnLbPolicyStateChangedLocked(void* arg, grpc_error* error) {
|
418
|
+
LbConnectivityWatcher* self = static_cast<LbConnectivityWatcher*>(arg);
|
419
|
+
// If the notification is not for the current policy, we're stale,
|
420
|
+
// so delete ourselves.
|
421
|
+
if (self->lb_policy_ != self->request_router_->lb_policy_.get()) {
|
422
|
+
Delete(self);
|
423
|
+
return;
|
424
|
+
}
|
425
|
+
// Otherwise, process notification.
|
426
|
+
if (self->request_router_->tracer_->enabled()) {
|
427
|
+
gpr_log(GPR_INFO, "request_router=%p: lb_policy=%p state changed to %s",
|
428
|
+
self->request_router_, self->lb_policy_,
|
429
|
+
grpc_connectivity_state_name(self->state_));
|
430
|
+
}
|
431
|
+
self->request_router_->SetConnectivityStateLocked(
|
432
|
+
self->state_, GRPC_ERROR_REF(error), "lb_changed");
|
433
|
+
// If shutting down, terminate watch.
|
434
|
+
if (self->state_ == GRPC_CHANNEL_SHUTDOWN) {
|
435
|
+
Delete(self);
|
436
|
+
return;
|
437
|
+
}
|
438
|
+
// Renew watch.
|
439
|
+
self->lb_policy_->NotifyOnStateChangeLocked(&self->state_,
|
440
|
+
&self->on_changed_);
|
441
|
+
}
|
442
|
+
|
443
|
+
RequestRouter* request_router_;
|
444
|
+
grpc_connectivity_state state_;
|
445
|
+
// LB policy address. No ref held, so not safe to dereference unless
|
446
|
+
// it happens to match request_router->lb_policy_.
|
447
|
+
LoadBalancingPolicy* lb_policy_;
|
448
|
+
grpc_channel_stack* owning_stack_;
|
449
|
+
grpc_closure on_changed_;
|
450
|
+
};
|
451
|
+
|
452
|
+
//
|
453
|
+
// RequestRounter::ReresolutionRequestHandler
|
454
|
+
//
|
455
|
+
|
456
|
+
class RequestRouter::ReresolutionRequestHandler {
|
457
|
+
public:
|
458
|
+
ReresolutionRequestHandler(RequestRouter* request_router,
|
459
|
+
LoadBalancingPolicy* lb_policy,
|
460
|
+
grpc_channel_stack* owning_stack,
|
461
|
+
grpc_combiner* combiner)
|
462
|
+
: request_router_(request_router),
|
463
|
+
lb_policy_(lb_policy),
|
464
|
+
owning_stack_(owning_stack) {
|
465
|
+
GRPC_CHANNEL_STACK_REF(owning_stack_, "ReresolutionRequestHandler");
|
466
|
+
GRPC_CLOSURE_INIT(&closure_, &OnRequestReresolutionLocked, this,
|
467
|
+
grpc_combiner_scheduler(combiner));
|
468
|
+
lb_policy_->SetReresolutionClosureLocked(&closure_);
|
469
|
+
}
|
470
|
+
|
471
|
+
private:
|
472
|
+
static void OnRequestReresolutionLocked(void* arg, grpc_error* error) {
|
473
|
+
ReresolutionRequestHandler* self =
|
474
|
+
static_cast<ReresolutionRequestHandler*>(arg);
|
475
|
+
RequestRouter* request_router = self->request_router_;
|
476
|
+
// If this invocation is for a stale LB policy, treat it as an LB shutdown
|
477
|
+
// signal.
|
478
|
+
if (self->lb_policy_ != request_router->lb_policy_.get() ||
|
479
|
+
error != GRPC_ERROR_NONE || request_router->resolver_ == nullptr) {
|
480
|
+
GRPC_CHANNEL_STACK_UNREF(request_router->owning_stack_,
|
481
|
+
"ReresolutionRequestHandler");
|
482
|
+
Delete(self);
|
483
|
+
return;
|
484
|
+
}
|
485
|
+
if (request_router->tracer_->enabled()) {
|
486
|
+
gpr_log(GPR_INFO, "request_router=%p: started name re-resolving",
|
487
|
+
request_router);
|
488
|
+
}
|
489
|
+
request_router->resolver_->RequestReresolutionLocked();
|
490
|
+
// Give back the closure to the LB policy.
|
491
|
+
self->lb_policy_->SetReresolutionClosureLocked(&self->closure_);
|
492
|
+
}
|
493
|
+
|
494
|
+
RequestRouter* request_router_;
|
495
|
+
// LB policy address. No ref held, so not safe to dereference unless
|
496
|
+
// it happens to match request_router->lb_policy_.
|
497
|
+
LoadBalancingPolicy* lb_policy_;
|
498
|
+
grpc_channel_stack* owning_stack_;
|
499
|
+
grpc_closure closure_;
|
500
|
+
};
|
501
|
+
|
502
|
+
//
|
503
|
+
// RequestRouter
|
504
|
+
//
|
505
|
+
|
506
|
+
RequestRouter::RequestRouter(
|
507
|
+
grpc_channel_stack* owning_stack, grpc_combiner* combiner,
|
508
|
+
grpc_client_channel_factory* client_channel_factory,
|
509
|
+
grpc_pollset_set* interested_parties, TraceFlag* tracer,
|
510
|
+
ProcessResolverResultCallback process_resolver_result,
|
511
|
+
void* process_resolver_result_user_data, const char* target_uri,
|
512
|
+
const grpc_channel_args* args, grpc_error** error)
|
513
|
+
: owning_stack_(owning_stack),
|
514
|
+
combiner_(combiner),
|
515
|
+
client_channel_factory_(client_channel_factory),
|
516
|
+
interested_parties_(interested_parties),
|
517
|
+
tracer_(tracer),
|
518
|
+
process_resolver_result_(process_resolver_result),
|
519
|
+
process_resolver_result_user_data_(process_resolver_result_user_data) {
|
520
|
+
GRPC_CLOSURE_INIT(&on_resolver_result_changed_,
|
521
|
+
&RequestRouter::OnResolverResultChangedLocked, this,
|
522
|
+
grpc_combiner_scheduler(combiner));
|
523
|
+
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
|
524
|
+
"request_router");
|
525
|
+
grpc_channel_args* new_args = nullptr;
|
526
|
+
if (process_resolver_result == nullptr) {
|
527
|
+
grpc_arg arg = grpc_channel_arg_integer_create(
|
528
|
+
const_cast<char*>(GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION), 0);
|
529
|
+
new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
|
530
|
+
}
|
531
|
+
resolver_ = ResolverRegistry::CreateResolver(
|
532
|
+
target_uri, (new_args == nullptr ? args : new_args), interested_parties_,
|
533
|
+
combiner_);
|
534
|
+
grpc_channel_args_destroy(new_args);
|
535
|
+
if (resolver_ == nullptr) {
|
536
|
+
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
|
537
|
+
}
|
538
|
+
}
|
539
|
+
|
540
|
+
RequestRouter::~RequestRouter() {
|
541
|
+
if (resolver_ != nullptr) {
|
542
|
+
// The only way we can get here is if we never started resolving,
|
543
|
+
// because we take a ref to the channel stack when we start
|
544
|
+
// resolving and do not release it until the resolver callback is
|
545
|
+
// invoked after the resolver shuts down.
|
546
|
+
resolver_.reset();
|
547
|
+
}
|
548
|
+
if (lb_policy_ != nullptr) {
|
549
|
+
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
|
550
|
+
interested_parties_);
|
551
|
+
lb_policy_.reset();
|
552
|
+
}
|
553
|
+
if (client_channel_factory_ != nullptr) {
|
554
|
+
grpc_client_channel_factory_unref(client_channel_factory_);
|
555
|
+
}
|
556
|
+
grpc_connectivity_state_destroy(&state_tracker_);
|
557
|
+
}
|
558
|
+
|
559
|
+
namespace {
|
560
|
+
|
561
|
+
const char* GetChannelConnectivityStateChangeString(
|
562
|
+
grpc_connectivity_state state) {
|
563
|
+
switch (state) {
|
564
|
+
case GRPC_CHANNEL_IDLE:
|
565
|
+
return "Channel state change to IDLE";
|
566
|
+
case GRPC_CHANNEL_CONNECTING:
|
567
|
+
return "Channel state change to CONNECTING";
|
568
|
+
case GRPC_CHANNEL_READY:
|
569
|
+
return "Channel state change to READY";
|
570
|
+
case GRPC_CHANNEL_TRANSIENT_FAILURE:
|
571
|
+
return "Channel state change to TRANSIENT_FAILURE";
|
572
|
+
case GRPC_CHANNEL_SHUTDOWN:
|
573
|
+
return "Channel state change to SHUTDOWN";
|
574
|
+
}
|
575
|
+
GPR_UNREACHABLE_CODE(return "UNKNOWN");
|
576
|
+
}
|
577
|
+
|
578
|
+
} // namespace
|
579
|
+
|
580
|
+
void RequestRouter::SetConnectivityStateLocked(grpc_connectivity_state state,
|
581
|
+
grpc_error* error,
|
582
|
+
const char* reason) {
|
583
|
+
if (lb_policy_ != nullptr) {
|
584
|
+
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
|
585
|
+
// Cancel picks with wait_for_ready=false.
|
586
|
+
lb_policy_->CancelMatchingPicksLocked(
|
587
|
+
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
|
588
|
+
/* check= */ 0, GRPC_ERROR_REF(error));
|
589
|
+
} else if (state == GRPC_CHANNEL_SHUTDOWN) {
|
590
|
+
// Cancel all picks.
|
591
|
+
lb_policy_->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0,
|
592
|
+
GRPC_ERROR_REF(error));
|
593
|
+
}
|
594
|
+
}
|
595
|
+
if (tracer_->enabled()) {
|
596
|
+
gpr_log(GPR_INFO, "request_router=%p: setting connectivity state to %s",
|
597
|
+
this, grpc_connectivity_state_name(state));
|
598
|
+
}
|
599
|
+
if (channelz_node_ != nullptr) {
|
600
|
+
channelz_node_->AddTraceEvent(
|
601
|
+
channelz::ChannelTrace::Severity::Info,
|
602
|
+
grpc_slice_from_static_string(
|
603
|
+
GetChannelConnectivityStateChangeString(state)));
|
604
|
+
}
|
605
|
+
grpc_connectivity_state_set(&state_tracker_, state, error, reason);
|
606
|
+
}
|
607
|
+
|
608
|
+
void RequestRouter::StartResolvingLocked() {
|
609
|
+
if (tracer_->enabled()) {
|
610
|
+
gpr_log(GPR_INFO, "request_router=%p: starting name resolution", this);
|
611
|
+
}
|
612
|
+
GPR_ASSERT(!started_resolving_);
|
613
|
+
started_resolving_ = true;
|
614
|
+
GRPC_CHANNEL_STACK_REF(owning_stack_, "resolver");
|
615
|
+
resolver_->NextLocked(&resolver_result_, &on_resolver_result_changed_);
|
616
|
+
}
|
617
|
+
|
618
|
+
// Invoked from the resolver NextLocked() callback when the resolver
|
619
|
+
// is shutting down.
|
620
|
+
void RequestRouter::OnResolverShutdownLocked(grpc_error* error) {
|
621
|
+
if (tracer_->enabled()) {
|
622
|
+
gpr_log(GPR_INFO, "request_router=%p: shutting down", this);
|
623
|
+
}
|
624
|
+
if (lb_policy_ != nullptr) {
|
625
|
+
if (tracer_->enabled()) {
|
626
|
+
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this,
|
627
|
+
lb_policy_.get());
|
628
|
+
}
|
629
|
+
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
|
630
|
+
interested_parties_);
|
631
|
+
lb_policy_.reset();
|
632
|
+
}
|
633
|
+
if (resolver_ != nullptr) {
|
634
|
+
// This should never happen; it can only be triggered by a resolver
|
635
|
+
// implementation spotaneously deciding to report shutdown without
|
636
|
+
// being orphaned. This code is included just to be defensive.
|
637
|
+
if (tracer_->enabled()) {
|
638
|
+
gpr_log(GPR_INFO,
|
639
|
+
"request_router=%p: spontaneous shutdown from resolver %p", this,
|
640
|
+
resolver_.get());
|
641
|
+
}
|
642
|
+
resolver_.reset();
|
643
|
+
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN,
|
644
|
+
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
645
|
+
"Resolver spontaneous shutdown", &error, 1),
|
646
|
+
"resolver_spontaneous_shutdown");
|
647
|
+
}
|
648
|
+
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_,
|
649
|
+
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
|
650
|
+
"Channel disconnected", &error, 1));
|
651
|
+
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_);
|
652
|
+
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "resolver");
|
653
|
+
grpc_channel_args_destroy(resolver_result_);
|
654
|
+
resolver_result_ = nullptr;
|
655
|
+
GRPC_ERROR_UNREF(error);
|
656
|
+
}
|
657
|
+
|
658
|
+
// Creates a new LB policy, replacing any previous one.
|
659
|
+
// If the new policy is created successfully, sets *connectivity_state and
|
660
|
+
// *connectivity_error to its initial connectivity state; otherwise,
|
661
|
+
// leaves them unchanged.
|
662
|
+
void RequestRouter::CreateNewLbPolicyLocked(
|
663
|
+
const char* lb_policy_name, grpc_json* lb_config,
|
664
|
+
grpc_connectivity_state* connectivity_state,
|
665
|
+
grpc_error** connectivity_error, TraceStringVector* trace_strings) {
|
666
|
+
LoadBalancingPolicy::Args lb_policy_args;
|
667
|
+
lb_policy_args.combiner = combiner_;
|
668
|
+
lb_policy_args.client_channel_factory = client_channel_factory_;
|
669
|
+
lb_policy_args.args = resolver_result_;
|
670
|
+
lb_policy_args.lb_config = lb_config;
|
671
|
+
OrphanablePtr<LoadBalancingPolicy> new_lb_policy =
|
672
|
+
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(lb_policy_name,
|
673
|
+
lb_policy_args);
|
674
|
+
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
|
675
|
+
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
|
676
|
+
if (channelz_node_ != nullptr) {
|
677
|
+
char* str;
|
678
|
+
gpr_asprintf(&str, "Could not create LB policy \'%s\'", lb_policy_name);
|
679
|
+
trace_strings->push_back(str);
|
680
|
+
}
|
681
|
+
} else {
|
682
|
+
if (tracer_->enabled()) {
|
683
|
+
gpr_log(GPR_INFO, "request_router=%p: created new LB policy \"%s\" (%p)",
|
684
|
+
this, lb_policy_name, new_lb_policy.get());
|
685
|
+
}
|
686
|
+
if (channelz_node_ != nullptr) {
|
687
|
+
char* str;
|
688
|
+
gpr_asprintf(&str, "Created new LB policy \'%s\'", lb_policy_name);
|
689
|
+
trace_strings->push_back(str);
|
690
|
+
}
|
691
|
+
// Swap out the LB policy and update the fds in interested_parties_.
|
692
|
+
if (lb_policy_ != nullptr) {
|
693
|
+
if (tracer_->enabled()) {
|
694
|
+
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this,
|
695
|
+
lb_policy_.get());
|
696
|
+
}
|
697
|
+
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
|
698
|
+
interested_parties_);
|
699
|
+
lb_policy_->HandOffPendingPicksLocked(new_lb_policy.get());
|
700
|
+
}
|
701
|
+
lb_policy_ = std::move(new_lb_policy);
|
702
|
+
grpc_pollset_set_add_pollset_set(lb_policy_->interested_parties(),
|
703
|
+
interested_parties_);
|
704
|
+
// Create re-resolution request handler for the new LB policy. It
|
705
|
+
// will delete itself when no longer needed.
|
706
|
+
New<ReresolutionRequestHandler>(this, lb_policy_.get(), owning_stack_,
|
707
|
+
combiner_);
|
708
|
+
// Get the new LB policy's initial connectivity state and start a
|
709
|
+
// connectivity watch.
|
710
|
+
GRPC_ERROR_UNREF(*connectivity_error);
|
711
|
+
*connectivity_state =
|
712
|
+
lb_policy_->CheckConnectivityLocked(connectivity_error);
|
713
|
+
if (exit_idle_when_lb_policy_arrives_) {
|
714
|
+
lb_policy_->ExitIdleLocked();
|
715
|
+
exit_idle_when_lb_policy_arrives_ = false;
|
716
|
+
}
|
717
|
+
// Create new watcher. It will delete itself when done.
|
718
|
+
New<LbConnectivityWatcher>(this, *connectivity_state, lb_policy_.get(),
|
719
|
+
owning_stack_, combiner_);
|
720
|
+
}
|
721
|
+
}
|
722
|
+
|
723
|
+
void RequestRouter::MaybeAddTraceMessagesForAddressChangesLocked(
|
724
|
+
TraceStringVector* trace_strings) {
|
725
|
+
const ServerAddressList* addresses =
|
726
|
+
FindServerAddressListChannelArg(resolver_result_);
|
727
|
+
const bool resolution_contains_addresses =
|
728
|
+
addresses != nullptr && addresses->size() > 0;
|
729
|
+
if (!resolution_contains_addresses &&
|
730
|
+
previous_resolution_contained_addresses_) {
|
731
|
+
trace_strings->push_back(gpr_strdup("Address list became empty"));
|
732
|
+
} else if (resolution_contains_addresses &&
|
733
|
+
!previous_resolution_contained_addresses_) {
|
734
|
+
trace_strings->push_back(gpr_strdup("Address list became non-empty"));
|
735
|
+
}
|
736
|
+
previous_resolution_contained_addresses_ = resolution_contains_addresses;
|
737
|
+
}
|
738
|
+
|
739
|
+
void RequestRouter::ConcatenateAndAddChannelTraceLocked(
|
740
|
+
TraceStringVector* trace_strings) const {
|
741
|
+
if (!trace_strings->empty()) {
|
742
|
+
gpr_strvec v;
|
743
|
+
gpr_strvec_init(&v);
|
744
|
+
gpr_strvec_add(&v, gpr_strdup("Resolution event: "));
|
745
|
+
bool is_first = 1;
|
746
|
+
for (size_t i = 0; i < trace_strings->size(); ++i) {
|
747
|
+
if (!is_first) gpr_strvec_add(&v, gpr_strdup(", "));
|
748
|
+
is_first = false;
|
749
|
+
gpr_strvec_add(&v, (*trace_strings)[i]);
|
750
|
+
}
|
751
|
+
char* flat;
|
752
|
+
size_t flat_len = 0;
|
753
|
+
flat = gpr_strvec_flatten(&v, &flat_len);
|
754
|
+
channelz_node_->AddTraceEvent(
|
755
|
+
grpc_core::channelz::ChannelTrace::Severity::Info,
|
756
|
+
grpc_slice_new(flat, flat_len, gpr_free));
|
757
|
+
gpr_strvec_destroy(&v);
|
758
|
+
}
|
759
|
+
}
|
760
|
+
|
761
|
+
// Callback invoked when a resolver result is available.
|
762
|
+
void RequestRouter::OnResolverResultChangedLocked(void* arg,
|
763
|
+
grpc_error* error) {
|
764
|
+
RequestRouter* self = static_cast<RequestRouter*>(arg);
|
765
|
+
if (self->tracer_->enabled()) {
|
766
|
+
const char* disposition =
|
767
|
+
self->resolver_result_ != nullptr
|
768
|
+
? ""
|
769
|
+
: (error == GRPC_ERROR_NONE ? " (transient error)"
|
770
|
+
: " (resolver shutdown)");
|
771
|
+
gpr_log(GPR_INFO,
|
772
|
+
"request_router=%p: got resolver result: resolver_result=%p "
|
773
|
+
"error=%s%s",
|
774
|
+
self, self->resolver_result_, grpc_error_string(error),
|
775
|
+
disposition);
|
776
|
+
}
|
777
|
+
// Handle shutdown.
|
778
|
+
if (error != GRPC_ERROR_NONE || self->resolver_ == nullptr) {
|
779
|
+
self->OnResolverShutdownLocked(GRPC_ERROR_REF(error));
|
780
|
+
return;
|
781
|
+
}
|
782
|
+
// Data used to set the channel's connectivity state.
|
783
|
+
bool set_connectivity_state = true;
|
784
|
+
// We only want to trace the address resolution in the follow cases:
|
785
|
+
// (a) Address resolution resulted in service config change.
|
786
|
+
// (b) Address resolution that causes number of backends to go from
|
787
|
+
// zero to non-zero.
|
788
|
+
// (c) Address resolution that causes number of backends to go from
|
789
|
+
// non-zero to zero.
|
790
|
+
// (d) Address resolution that causes a new LB policy to be created.
|
791
|
+
//
|
792
|
+
// we track a list of strings to eventually be concatenated and traced.
|
793
|
+
TraceStringVector trace_strings;
|
794
|
+
grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
|
795
|
+
grpc_error* connectivity_error =
|
796
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
|
797
|
+
// resolver_result_ will be null in the case of a transient
|
798
|
+
// resolution error. In that case, we don't have any new result to
|
799
|
+
// process, which means that we keep using the previous result (if any).
|
800
|
+
if (self->resolver_result_ == nullptr) {
|
801
|
+
if (self->tracer_->enabled()) {
|
802
|
+
gpr_log(GPR_INFO, "request_router=%p: resolver transient failure", self);
|
803
|
+
}
|
804
|
+
// Don't override connectivity state if we already have an LB policy.
|
805
|
+
if (self->lb_policy_ != nullptr) set_connectivity_state = false;
|
806
|
+
} else {
|
807
|
+
// Parse the resolver result.
|
808
|
+
const char* lb_policy_name = nullptr;
|
809
|
+
grpc_json* lb_policy_config = nullptr;
|
810
|
+
const bool service_config_changed = self->process_resolver_result_(
|
811
|
+
self->process_resolver_result_user_data_, *self->resolver_result_,
|
812
|
+
&lb_policy_name, &lb_policy_config);
|
813
|
+
GPR_ASSERT(lb_policy_name != nullptr);
|
814
|
+
// Check to see if we're already using the right LB policy.
|
815
|
+
const bool lb_policy_name_changed =
|
816
|
+
self->lb_policy_ == nullptr ||
|
817
|
+
strcmp(self->lb_policy_->name(), lb_policy_name) != 0;
|
818
|
+
if (self->lb_policy_ != nullptr && !lb_policy_name_changed) {
|
819
|
+
// Continue using the same LB policy. Update with new addresses.
|
820
|
+
if (self->tracer_->enabled()) {
|
821
|
+
gpr_log(GPR_INFO,
|
822
|
+
"request_router=%p: updating existing LB policy \"%s\" (%p)",
|
823
|
+
self, lb_policy_name, self->lb_policy_.get());
|
824
|
+
}
|
825
|
+
self->lb_policy_->UpdateLocked(*self->resolver_result_, lb_policy_config);
|
826
|
+
// No need to set the channel's connectivity state; the existing
|
827
|
+
// watch on the LB policy will take care of that.
|
828
|
+
set_connectivity_state = false;
|
829
|
+
} else {
|
830
|
+
// Instantiate new LB policy.
|
831
|
+
self->CreateNewLbPolicyLocked(lb_policy_name, lb_policy_config,
|
832
|
+
&connectivity_state, &connectivity_error,
|
833
|
+
&trace_strings);
|
834
|
+
}
|
835
|
+
// Add channel trace event.
|
836
|
+
if (self->channelz_node_ != nullptr) {
|
837
|
+
if (service_config_changed) {
|
838
|
+
// TODO(ncteisen): might be worth somehow including a snippet of the
|
839
|
+
// config in the trace, at the risk of bloating the trace logs.
|
840
|
+
trace_strings.push_back(gpr_strdup("Service config changed"));
|
841
|
+
}
|
842
|
+
self->MaybeAddTraceMessagesForAddressChangesLocked(&trace_strings);
|
843
|
+
self->ConcatenateAndAddChannelTraceLocked(&trace_strings);
|
844
|
+
}
|
845
|
+
// Clean up.
|
846
|
+
grpc_channel_args_destroy(self->resolver_result_);
|
847
|
+
self->resolver_result_ = nullptr;
|
848
|
+
}
|
849
|
+
// Set the channel's connectivity state if needed.
|
850
|
+
if (set_connectivity_state) {
|
851
|
+
self->SetConnectivityStateLocked(connectivity_state, connectivity_error,
|
852
|
+
"resolver_result");
|
853
|
+
} else {
|
854
|
+
GRPC_ERROR_UNREF(connectivity_error);
|
855
|
+
}
|
856
|
+
// Invoke closures that were waiting for results and renew the watch.
|
857
|
+
GRPC_CLOSURE_LIST_SCHED(&self->waiting_for_resolver_result_closures_);
|
858
|
+
self->resolver_->NextLocked(&self->resolver_result_,
|
859
|
+
&self->on_resolver_result_changed_);
|
860
|
+
}
|
861
|
+
|
862
|
+
void RequestRouter::RouteCallLocked(Request* request) {
|
863
|
+
GPR_ASSERT(request->pick_.connected_subchannel == nullptr);
|
864
|
+
request->request_router_ = this;
|
865
|
+
if (lb_policy_ != nullptr) {
|
866
|
+
// We already have resolver results, so process the service config
|
867
|
+
// and start an LB pick.
|
868
|
+
request->ProcessServiceConfigAndStartLbPickLocked();
|
869
|
+
} else if (resolver_ == nullptr) {
|
870
|
+
GRPC_CLOSURE_RUN(request->on_route_done_,
|
871
|
+
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
|
872
|
+
} else {
|
873
|
+
// We do not yet have an LB policy, so wait for a resolver result.
|
874
|
+
if (!started_resolving_) {
|
875
|
+
StartResolvingLocked();
|
876
|
+
}
|
877
|
+
// Create a new waiter, which will delete itself when done.
|
878
|
+
New<Request::ResolverResultWaiter>(request);
|
879
|
+
// Add the request's polling entity to the request_router's
|
880
|
+
// interested_parties, so that the I/O of the resolver can be done
|
881
|
+
// under it. It will be removed in LbPickDoneLocked().
|
882
|
+
request->MaybeAddCallToInterestedPartiesLocked();
|
883
|
+
}
|
884
|
+
}
|
885
|
+
|
886
|
+
void RequestRouter::ShutdownLocked(grpc_error* error) {
|
887
|
+
if (resolver_ != nullptr) {
|
888
|
+
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
|
889
|
+
"disconnect");
|
890
|
+
resolver_.reset();
|
891
|
+
if (!started_resolving_) {
|
892
|
+
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_,
|
893
|
+
GRPC_ERROR_REF(error));
|
894
|
+
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_);
|
895
|
+
}
|
896
|
+
if (lb_policy_ != nullptr) {
|
897
|
+
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
|
898
|
+
interested_parties_);
|
899
|
+
lb_policy_.reset();
|
900
|
+
}
|
901
|
+
}
|
902
|
+
GRPC_ERROR_UNREF(error);
|
903
|
+
}
|
904
|
+
|
905
|
+
grpc_connectivity_state RequestRouter::GetConnectivityState() {
|
906
|
+
return grpc_connectivity_state_check(&state_tracker_);
|
907
|
+
}
|
908
|
+
|
909
|
+
void RequestRouter::NotifyOnConnectivityStateChange(
|
910
|
+
grpc_connectivity_state* state, grpc_closure* closure) {
|
911
|
+
grpc_connectivity_state_notify_on_state_change(&state_tracker_, state,
|
912
|
+
closure);
|
913
|
+
}
|
914
|
+
|
915
|
+
void RequestRouter::ExitIdleLocked() {
|
916
|
+
if (lb_policy_ != nullptr) {
|
917
|
+
lb_policy_->ExitIdleLocked();
|
918
|
+
} else {
|
919
|
+
exit_idle_when_lb_policy_arrives_ = true;
|
920
|
+
if (!started_resolving_ && resolver_ != nullptr) {
|
921
|
+
StartResolvingLocked();
|
922
|
+
}
|
923
|
+
}
|
924
|
+
}
|
925
|
+
|
926
|
+
void RequestRouter::ResetConnectionBackoffLocked() {
|
927
|
+
if (resolver_ != nullptr) {
|
928
|
+
resolver_->ResetBackoffLocked();
|
929
|
+
resolver_->RequestReresolutionLocked();
|
930
|
+
}
|
931
|
+
if (lb_policy_ != nullptr) {
|
932
|
+
lb_policy_->ResetBackoffLocked();
|
933
|
+
}
|
934
|
+
}
|
935
|
+
|
936
|
+
} // namespace grpc_core
|