grpc 1.21.0 → 1.22.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (141) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +422 -62
  3. data/include/grpc/grpc_security.h +61 -5
  4. data/include/grpc/grpc_security_constants.h +1 -1
  5. data/include/grpc/impl/codegen/gpr_types.h +1 -1
  6. data/include/grpc/slice.h +2 -2
  7. data/src/core/ext/filters/client_channel/backup_poller.cc +2 -3
  8. data/src/core/ext/filters/client_channel/backup_poller.h +5 -2
  9. data/src/core/ext/filters/client_channel/client_channel.cc +260 -122
  10. data/src/core/ext/filters/client_channel/client_channel.h +0 -8
  11. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +3 -84
  12. data/src/core/ext/filters/client_channel/client_channel_channelz.h +2 -28
  13. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +2 -8
  14. data/src/core/ext/filters/client_channel/health/health_check_client.cc +5 -4
  15. data/src/core/ext/filters/client_channel/lb_policy.cc +16 -2
  16. data/src/core/ext/filters/client_channel/lb_policy.h +92 -98
  17. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +63 -87
  18. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +6 -2
  19. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +35 -87
  20. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +18 -74
  21. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +167 -217
  22. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +216 -190
  23. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc +6 -2
  24. data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -1
  25. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +1 -1
  26. data/src/core/ext/filters/client_channel/lb_policy_registry.h +1 -1
  27. data/src/core/ext/filters/client_channel/resolver.h +1 -1
  28. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +6 -3
  29. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +0 -1
  30. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +2 -0
  31. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +8 -8
  32. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +7 -7
  33. data/src/core/ext/filters/client_channel/resolving_lb_policy.cc +28 -64
  34. data/src/core/ext/filters/client_channel/resolving_lb_policy.h +4 -12
  35. data/src/core/ext/filters/client_channel/server_address.cc +4 -6
  36. data/src/core/ext/filters/client_channel/server_address.h +1 -3
  37. data/src/core/ext/filters/client_channel/service_config.cc +20 -22
  38. data/src/core/ext/filters/client_channel/service_config.h +26 -28
  39. data/src/core/ext/filters/client_channel/subchannel.cc +261 -160
  40. data/src/core/ext/filters/client_channel/subchannel.h +97 -23
  41. data/src/core/ext/filters/client_channel/subchannel_interface.h +113 -0
  42. data/src/core/ext/filters/message_size/message_size_filter.cc +12 -12
  43. data/src/core/ext/filters/message_size/message_size_filter.h +2 -2
  44. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +50 -2
  45. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +2 -2
  46. data/src/core/ext/transport/chttp2/transport/frame_data.cc +31 -36
  47. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +3 -2
  48. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +71 -52
  49. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +18 -3
  50. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +5 -12
  51. data/src/core/ext/transport/chttp2/transport/hpack_table.h +10 -1
  52. data/src/core/ext/transport/chttp2/transport/internal.h +3 -3
  53. data/src/core/ext/transport/chttp2/transport/parsing.cc +39 -57
  54. data/src/core/lib/channel/channelz.cc +136 -19
  55. data/src/core/lib/channel/channelz.h +36 -40
  56. data/src/core/lib/channel/channelz_registry.cc +74 -106
  57. data/src/core/lib/channel/channelz_registry.h +10 -28
  58. data/src/core/lib/channel/context.h +1 -1
  59. data/src/core/lib/channel/handshaker.cc +6 -0
  60. data/src/core/lib/compression/compression.cc +13 -8
  61. data/src/core/lib/compression/compression_internal.cc +14 -10
  62. data/src/core/lib/compression/compression_internal.h +1 -1
  63. data/src/core/lib/compression/stream_compression.cc +3 -2
  64. data/src/core/lib/compression/stream_compression.h +2 -2
  65. data/src/core/lib/compression/stream_compression_gzip.cc +9 -9
  66. data/src/core/lib/gpr/env.h +1 -1
  67. data/src/core/lib/gpr/string.cc +8 -1
  68. data/src/core/lib/gpr/string.h +6 -1
  69. data/src/core/lib/gprpp/fork.cc +1 -1
  70. data/src/core/lib/gprpp/fork.h +5 -1
  71. data/src/core/lib/gprpp/global_config.h +9 -0
  72. data/src/core/lib/gprpp/global_config_custom.h +1 -1
  73. data/src/core/lib/gprpp/inlined_vector.h +8 -0
  74. data/src/core/lib/gprpp/map.h +38 -21
  75. data/src/core/lib/gprpp/memory.h +2 -2
  76. data/src/core/lib/gprpp/orphanable.h +1 -1
  77. data/src/core/lib/gprpp/ref_counted.h +9 -4
  78. data/src/core/lib/http/httpcli.cc +3 -3
  79. data/src/core/lib/iomgr/buffer_list.h +1 -1
  80. data/src/core/lib/iomgr/call_combiner.cc +1 -1
  81. data/src/core/lib/iomgr/call_combiner.h +1 -1
  82. data/src/core/lib/iomgr/cfstream_handle.cc +3 -2
  83. data/src/core/lib/iomgr/cfstream_handle.h +4 -0
  84. data/src/core/lib/iomgr/error.cc +3 -3
  85. data/src/core/lib/iomgr/error.h +9 -3
  86. data/src/core/lib/iomgr/error_internal.h +1 -1
  87. data/src/core/lib/iomgr/ev_epoll1_linux.cc +1 -1
  88. data/src/core/lib/iomgr/ev_posix.cc +3 -3
  89. data/src/core/lib/iomgr/ev_posix.h +3 -2
  90. data/src/core/lib/iomgr/ev_windows.cc +2 -2
  91. data/src/core/lib/iomgr/iomgr.cc +4 -4
  92. data/src/core/lib/iomgr/lockfree_event.cc +1 -1
  93. data/src/core/lib/iomgr/port.h +5 -1
  94. data/src/core/lib/iomgr/tcp_posix.cc +1 -3
  95. data/src/core/lib/iomgr/tcp_server.cc +5 -0
  96. data/src/core/lib/iomgr/tcp_server.h +24 -0
  97. data/src/core/lib/iomgr/tcp_server_custom.cc +11 -9
  98. data/src/core/lib/iomgr/tcp_server_posix.cc +72 -11
  99. data/src/core/lib/iomgr/tcp_server_utils_posix.h +3 -0
  100. data/src/core/lib/iomgr/tcp_server_windows.cc +11 -9
  101. data/src/core/lib/iomgr/tcp_uv.cc +5 -6
  102. data/src/core/lib/iomgr/timer.h +2 -1
  103. data/src/core/lib/iomgr/udp_server.cc +2 -2
  104. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +1 -1
  105. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +20 -2
  106. data/src/core/lib/security/credentials/ssl/ssl_credentials.h +2 -2
  107. data/src/core/lib/security/security_connector/security_connector.h +1 -1
  108. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +1 -1
  109. data/src/core/lib/security/transport/auth_filters.h +3 -0
  110. data/src/core/lib/security/transport/client_auth_filter.cc +13 -0
  111. data/src/core/lib/security/transport/security_handshaker.cc +7 -7
  112. data/src/core/lib/slice/b64.h +2 -2
  113. data/src/core/lib/slice/slice.cc +82 -10
  114. data/src/core/lib/slice/slice_buffer.cc +49 -21
  115. data/src/core/lib/slice/slice_hash_table.h +2 -2
  116. data/src/core/lib/slice/slice_intern.cc +15 -16
  117. data/src/core/lib/slice/slice_internal.h +52 -0
  118. data/src/core/lib/slice/slice_string_helpers.cc +10 -1
  119. data/src/core/lib/slice/slice_string_helpers.h +3 -1
  120. data/src/core/lib/slice/slice_utils.h +50 -0
  121. data/src/core/lib/slice/slice_weak_hash_table.h +2 -2
  122. data/src/core/lib/surface/call.cc +14 -8
  123. data/src/core/lib/surface/channel.cc +89 -97
  124. data/src/core/lib/surface/channel.h +60 -6
  125. data/src/core/lib/surface/completion_queue.cc +49 -36
  126. data/src/core/lib/surface/completion_queue.h +2 -1
  127. data/src/core/lib/surface/server.cc +8 -8
  128. data/src/core/lib/surface/validate_metadata.cc +14 -8
  129. data/src/core/lib/surface/validate_metadata.h +13 -2
  130. data/src/core/lib/surface/version.cc +1 -1
  131. data/src/core/lib/transport/metadata.cc +56 -26
  132. data/src/core/lib/transport/metadata.h +91 -75
  133. data/src/core/lib/transport/static_metadata.cc +262 -176
  134. data/src/core/lib/transport/static_metadata.h +272 -180
  135. data/src/core/lib/transport/transport.cc +1 -1
  136. data/src/core/lib/transport/transport.h +8 -2
  137. data/src/core/tsi/alts/handshaker/alts_shared_resource.h +1 -1
  138. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +2 -0
  139. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +5 -2
  140. data/src/ruby/lib/grpc/version.rb +1 -1
  141. metadata +37 -35
@@ -163,6 +163,28 @@ typedef struct {
163
163
  const char* cert_chain;
164
164
  } grpc_ssl_pem_key_cert_pair;
165
165
 
166
+ /** Deprecated in favor of grpc_ssl_verify_peer_options. It will be removed
167
+ after all of its call sites are migrated to grpc_ssl_verify_peer_options.
168
+ Object that holds additional peer-verification options on a secure
169
+ channel. */
170
+ typedef struct {
171
+ /** If non-NULL this callback will be invoked with the expected
172
+ target_name, the peer's certificate (in PEM format), and whatever
173
+ userdata pointer is set below. If a non-zero value is returned by this
174
+ callback then it is treated as a verification failure. Invocation of
175
+ the callback is blocking, so any implementation should be light-weight.
176
+ */
177
+ int (*verify_peer_callback)(const char* target_name, const char* peer_pem,
178
+ void* userdata);
179
+ /** Arbitrary userdata that will be passed as the last argument to
180
+ verify_peer_callback. */
181
+ void* verify_peer_callback_userdata;
182
+ /** A destruct callback that will be invoked when the channel is being
183
+ cleaned up. The userdata argument will be passed to it. The intent is
184
+ to perform any cleanup associated with that userdata. */
185
+ void (*verify_peer_destruct)(void* userdata);
186
+ } verify_peer_options;
187
+
166
188
  /** Object that holds additional peer-verification options on a secure
167
189
  channel. */
168
190
  typedef struct {
@@ -181,9 +203,11 @@ typedef struct {
181
203
  cleaned up. The userdata argument will be passed to it. The intent is
182
204
  to perform any cleanup associated with that userdata. */
183
205
  void (*verify_peer_destruct)(void* userdata);
184
- } verify_peer_options;
206
+ } grpc_ssl_verify_peer_options;
185
207
 
186
- /** Creates an SSL credentials object.
208
+ /** Deprecated in favor of grpc_ssl_server_credentials_create_ex. It will be
209
+ removed after all of its call sites are migrated to
210
+ grpc_ssl_server_credentials_create_ex. Creates an SSL credentials object.
187
211
  - pem_root_certs is the NULL-terminated string containing the PEM encoding
188
212
  of the server root certificates. If this parameter is NULL, the
189
213
  implementation will first try to dereference the file pointed by the
@@ -214,6 +238,37 @@ GRPCAPI grpc_channel_credentials* grpc_ssl_credentials_create(
214
238
  const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pair,
215
239
  const verify_peer_options* verify_options, void* reserved);
216
240
 
241
+ /* Creates an SSL credentials object.
242
+ - pem_root_certs is the NULL-terminated string containing the PEM encoding
243
+ of the server root certificates. If this parameter is NULL, the
244
+ implementation will first try to dereference the file pointed by the
245
+ GRPC_DEFAULT_SSL_ROOTS_FILE_PATH environment variable, and if that fails,
246
+ try to get the roots set by grpc_override_ssl_default_roots. Eventually,
247
+ if all these fail, it will try to get the roots from a well-known place on
248
+ disk (in the grpc install directory).
249
+
250
+ gRPC has implemented root cache if the underlying OpenSSL library supports
251
+ it. The gRPC root certificates cache is only applicable on the default
252
+ root certificates, which is used when this parameter is nullptr. If user
253
+ provides their own pem_root_certs, when creating an SSL credential object,
254
+ gRPC would not be able to cache it, and each subchannel will generate a
255
+ copy of the root store. So it is recommended to avoid providing large room
256
+ pem with pem_root_certs parameter to avoid excessive memory consumption,
257
+ particularly on mobile platforms such as iOS.
258
+ - pem_key_cert_pair is a pointer on the object containing client's private
259
+ key and certificate chain. This parameter can be NULL if the client does
260
+ not have such a key/cert pair.
261
+ - verify_options is an optional verify_peer_options object which holds
262
+ additional options controlling how peer certificates are verified. For
263
+ example, you can supply a callback which receives the peer's certificate
264
+ with which you can do additional verification. Can be NULL, in which
265
+ case verification will retain default behavior. Any settings in
266
+ verify_options are copied during this call, so the verify_options
267
+ object can be released afterwards. */
268
+ GRPCAPI grpc_channel_credentials* grpc_ssl_credentials_create_ex(
269
+ const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pair,
270
+ const grpc_ssl_verify_peer_options* verify_options, void* reserved);
271
+
217
272
  /** --- grpc_call_credentials object.
218
273
 
219
274
  A call credentials object represents a way to authenticate on a particular
@@ -435,7 +490,7 @@ GRPCAPI grpc_server_credentials* grpc_ssl_server_credentials_create(
435
490
  /** Deprecated in favor of grpc_ssl_server_credentials_create_with_options.
436
491
  Same as grpc_ssl_server_credentials_create method except uses
437
492
  grpc_ssl_client_certificate_request_type enum to support more ways to
438
- authenticate client cerificates.*/
493
+ authenticate client certificates.*/
439
494
  GRPCAPI grpc_server_credentials* grpc_ssl_server_credentials_create_ex(
440
495
  const char* pem_root_certs, grpc_ssl_pem_key_cert_pair* pem_key_cert_pairs,
441
496
  size_t num_key_cert_pairs,
@@ -641,7 +696,7 @@ typedef struct grpc_tls_credentials_options grpc_tls_credentials_options;
641
696
 
642
697
  /** Create an empty TLS credentials options. It is used for
643
698
  * experimental purpose for now and subject to change. */
644
- GRPCAPI grpc_tls_credentials_options* grpc_tls_credentials_options_create();
699
+ GRPCAPI grpc_tls_credentials_options* grpc_tls_credentials_options_create(void);
645
700
 
646
701
  /** Set grpc_ssl_client_certificate_request_type field in credentials options
647
702
  with the provided type. options should not be NULL.
@@ -683,7 +738,8 @@ GRPCAPI int grpc_tls_credentials_options_set_server_authorization_check_config(
683
738
 
684
739
  /** Create an empty grpc_tls_key_materials_config instance.
685
740
  * It is used for experimental purpose for now and subject to change. */
686
- GRPCAPI grpc_tls_key_materials_config* grpc_tls_key_materials_config_create();
741
+ GRPCAPI grpc_tls_key_materials_config* grpc_tls_key_materials_config_create(
742
+ void);
687
743
 
688
744
  /** Set grpc_tls_key_materials_config instance with provided a TLS certificate.
689
745
  config will take the ownership of pem_root_certs and pem_key_cert_pairs.
@@ -96,7 +96,7 @@ typedef enum {
96
96
  /** Server requests client certificate and enforces that the client presents a
97
97
  certificate.
98
98
 
99
- The cerificate presented by the client is verified by the gRPC framework.
99
+ The certificate presented by the client is verified by the gRPC framework.
100
100
  (For a successful connection the client needs to present a certificate that
101
101
  can be verified against the root certificate configured by the server)
102
102
 
@@ -48,7 +48,7 @@ typedef struct gpr_timespec {
48
48
  int64_t tv_sec;
49
49
  int32_t tv_nsec;
50
50
  /** Against which clock was this time measured? (or GPR_TIMESPAN if
51
- this is a relative time meaure) */
51
+ this is a relative time measure) */
52
52
  gpr_clock_type clock_type;
53
53
  } gpr_timespec;
54
54
 
@@ -107,7 +107,7 @@ GPRAPI grpc_slice grpc_slice_sub_no_ref(grpc_slice s, size_t begin, size_t end);
107
107
 
108
108
  /** Splits s into two: modifies s to be s[0:split], and returns a new slice,
109
109
  sharing a refcount with s, that contains s[split:s.length].
110
- Requires s intialized, split <= s.length */
110
+ Requires s initialized, split <= s.length */
111
111
  GPRAPI grpc_slice grpc_slice_split_tail(grpc_slice* s, size_t split);
112
112
 
113
113
  typedef enum {
@@ -124,7 +124,7 @@ GPRAPI grpc_slice grpc_slice_split_tail_maybe_ref(grpc_slice* s, size_t split,
124
124
 
125
125
  /** Splits s into two: modifies s to be s[split:s.length], and returns a new
126
126
  slice, sharing a refcount with s, that contains s[0:split].
127
- Requires s intialized, split <= s.length */
127
+ Requires s initialized, split <= s.length */
128
128
  GPRAPI grpc_slice grpc_slice_split_head(grpc_slice* s, size_t split);
129
129
 
130
130
  GPRAPI grpc_slice grpc_empty_slice(void);
@@ -65,8 +65,8 @@ GPR_GLOBAL_CONFIG_DEFINE_INT32(
65
65
  "idleness), so that the next RPC on this channel won't fail. Set to 0 to "
66
66
  "turn off the backup polls.");
67
67
 
68
- static void init_globals() {
69
- gpr_mu_init(&g_poller_mu);
68
+ void grpc_client_channel_global_init_backup_polling() {
69
+ gpr_once_init(&g_once, [] { gpr_mu_init(&g_poller_mu); });
70
70
  int32_t poll_interval_ms =
71
71
  GPR_GLOBAL_CONFIG_GET(grpc_client_channel_backup_poll_interval_ms);
72
72
  if (poll_interval_ms < 0) {
@@ -153,7 +153,6 @@ static void g_poller_init_locked() {
153
153
 
154
154
  void grpc_client_channel_start_backup_polling(
155
155
  grpc_pollset_set* interested_parties) {
156
- gpr_once_init(&g_once, init_globals);
157
156
  if (g_poll_interval_ms == 0) {
158
157
  return;
159
158
  }
@@ -27,11 +27,14 @@
27
27
 
28
28
  GPR_GLOBAL_CONFIG_DECLARE_INT32(grpc_client_channel_backup_poll_interval_ms);
29
29
 
30
- /* Start polling \a interested_parties periodically in the timer thread */
30
+ /* Initializes backup polling. */
31
+ void grpc_client_channel_global_init_backup_polling();
32
+
33
+ /* Starts polling \a interested_parties periodically in the timer thread. */
31
34
  void grpc_client_channel_start_backup_polling(
32
35
  grpc_pollset_set* interested_parties);
33
36
 
34
- /* Stop polling \a interested_parties */
37
+ /* Stops polling \a interested_parties. */
35
38
  void grpc_client_channel_stop_backup_polling(
36
39
  grpc_pollset_set* interested_parties);
37
40
 
@@ -51,6 +51,7 @@
51
51
  #include "src/core/lib/gpr/string.h"
52
52
  #include "src/core/lib/gprpp/inlined_vector.h"
53
53
  #include "src/core/lib/gprpp/manual_constructor.h"
54
+ #include "src/core/lib/gprpp/map.h"
54
55
  #include "src/core/lib/gprpp/sync.h"
55
56
  #include "src/core/lib/iomgr/combiner.h"
56
57
  #include "src/core/lib/iomgr/iomgr.h"
@@ -66,7 +67,7 @@
66
67
  #include "src/core/lib/transport/static_metadata.h"
67
68
  #include "src/core/lib/transport/status_metadata.h"
68
69
 
69
- using grpc_core::internal::ClientChannelMethodParsedObject;
70
+ using grpc_core::internal::ClientChannelMethodParsedConfig;
70
71
  using grpc_core::internal::ServerRetryThrottleData;
71
72
 
72
73
  //
@@ -105,7 +106,6 @@ namespace {
105
106
  class ChannelData {
106
107
  public:
107
108
  struct QueuedPick {
108
- LoadBalancingPolicy::PickArgs pick;
109
109
  grpc_call_element* elem;
110
110
  QueuedPick* next = nullptr;
111
111
  };
@@ -118,18 +118,6 @@ class ChannelData {
118
118
  static void GetChannelInfo(grpc_channel_element* elem,
119
119
  const grpc_channel_info* info);
120
120
 
121
- void set_channelz_node(channelz::ClientChannelNode* node) {
122
- channelz_node_ = node;
123
- resolving_lb_policy_->set_channelz_node(node->Ref());
124
- }
125
- void FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels,
126
- channelz::ChildRefsList* child_channels) {
127
- if (resolving_lb_policy_ != nullptr) {
128
- resolving_lb_policy_->FillChildRefsForChannelz(child_subchannels,
129
- child_channels);
130
- }
131
- }
132
-
133
121
  bool deadline_checking_enabled() const { return deadline_checking_enabled_; }
134
122
  bool enable_retries() const { return enable_retries_; }
135
123
  size_t per_rpc_retry_buffer_size() const {
@@ -175,6 +163,7 @@ class ChannelData {
175
163
  private:
176
164
  class ConnectivityStateAndPickerSetter;
177
165
  class ServiceConfigSetter;
166
+ class GrpcSubchannel;
178
167
  class ClientChannelControlHelper;
179
168
 
180
169
  class ExternalConnectivityWatcher {
@@ -223,7 +212,7 @@ class ChannelData {
223
212
 
224
213
  static bool ProcessResolverResultLocked(
225
214
  void* arg, const Resolver::Result& result, const char** lb_policy_name,
226
- RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config,
215
+ RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config,
227
216
  grpc_error** service_config_error);
228
217
 
229
218
  grpc_error* DoPingLocked(grpc_transport_op* op);
@@ -234,9 +223,9 @@ class ChannelData {
234
223
 
235
224
  void ProcessLbPolicy(
236
225
  const Resolver::Result& resolver_result,
237
- const internal::ClientChannelGlobalParsedObject* parsed_service_config,
226
+ const internal::ClientChannelGlobalParsedConfig* parsed_service_config,
238
227
  UniquePtr<char>* lb_policy_name,
239
- RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config);
228
+ RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config);
240
229
 
241
230
  //
242
231
  // Fields set at construction and never modified.
@@ -248,8 +237,7 @@ class ChannelData {
248
237
  ClientChannelFactory* client_channel_factory_;
249
238
  UniquePtr<char> server_name_;
250
239
  RefCountedPtr<ServiceConfig> default_service_config_;
251
- // Initialized shortly after construction.
252
- channelz::ClientChannelNode* channelz_node_ = nullptr;
240
+ channelz::ChannelNode* channelz_node_;
253
241
 
254
242
  //
255
243
  // Fields used in the data plane. Guarded by data_plane_combiner.
@@ -268,12 +256,13 @@ class ChannelData {
268
256
  grpc_combiner* combiner_;
269
257
  grpc_pollset_set* interested_parties_;
270
258
  RefCountedPtr<SubchannelPoolInterface> subchannel_pool_;
271
- OrphanablePtr<LoadBalancingPolicy> resolving_lb_policy_;
259
+ OrphanablePtr<ResolvingLoadBalancingPolicy> resolving_lb_policy_;
272
260
  grpc_connectivity_state_tracker state_tracker_;
273
261
  ExternalConnectivityWatcher::WatcherList external_connectivity_watcher_list_;
274
262
  UniquePtr<char> health_check_service_name_;
275
263
  RefCountedPtr<ServiceConfig> saved_service_config_;
276
264
  bool received_first_resolver_result_ = false;
265
+ Map<Subchannel*, int> subchannel_refcount_map_;
277
266
 
278
267
  //
279
268
  // Fields accessed from both data plane and control plane combiners.
@@ -315,6 +304,16 @@ class CallData {
315
304
  private:
316
305
  class QueuedPickCanceller;
317
306
 
307
+ class LbCallState : public LoadBalancingPolicy::CallState {
308
+ public:
309
+ explicit LbCallState(CallData* calld) : calld_(calld) {}
310
+
311
+ void* Alloc(size_t size) override { return calld_->arena_->Alloc(size); }
312
+
313
+ private:
314
+ CallData* calld_;
315
+ };
316
+
318
317
  // State used for starting a retryable batch on a subchannel call.
319
318
  // This provides its own grpc_transport_stream_op_batch and other data
320
319
  // structures needed to populate the ops in the batch.
@@ -450,8 +449,9 @@ class CallData {
450
449
  grpc_call_element* elem, SubchannelCallBatchData* batch_data,
451
450
  SubchannelCallRetryState* retry_state);
452
451
 
453
- static void MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
454
- const LoadBalancingPolicy::PickArgs& pick,
452
+ static void RecvTrailingMetadataReadyForLoadBalancingPolicy(
453
+ void* arg, grpc_error* error);
454
+ void MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
455
455
  grpc_transport_stream_op_batch* batch);
456
456
 
457
457
  // Returns the index into pending_batches_ to be used for batch.
@@ -630,7 +630,7 @@ class CallData {
630
630
 
631
631
  RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
632
632
  ServiceConfig::CallData service_config_call_data_;
633
- const ClientChannelMethodParsedObject* method_params_ = nullptr;
633
+ const ClientChannelMethodParsedConfig* method_params_ = nullptr;
634
634
 
635
635
  RefCountedPtr<SubchannelCall> subchannel_call_;
636
636
 
@@ -641,8 +641,19 @@ class CallData {
641
641
  bool pick_queued_ = false;
642
642
  bool service_config_applied_ = false;
643
643
  QueuedPickCanceller* pick_canceller_ = nullptr;
644
+ LbCallState lb_call_state_;
645
+ RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
646
+ void (*lb_recv_trailing_metadata_ready_)(
647
+ void* user_data, grpc_metadata_batch* recv_trailing_metadata,
648
+ LoadBalancingPolicy::CallState* call_state) = nullptr;
649
+ void* lb_recv_trailing_metadata_ready_user_data_ = nullptr;
644
650
  grpc_closure pick_closure_;
645
651
 
652
+ // For intercepting recv_trailing_metadata_ready for the LB policy.
653
+ grpc_metadata_batch* recv_trailing_metadata_ = nullptr;
654
+ grpc_closure recv_trailing_metadata_ready_;
655
+ grpc_closure* original_recv_trailing_metadata_ready_ = nullptr;
656
+
646
657
  grpc_polling_entity* pollent_ = nullptr;
647
658
 
648
659
  // Batches are added to this list when received from above.
@@ -712,6 +723,7 @@ class ChannelData::ConnectivityStateAndPickerSetter {
712
723
  // Update connectivity state here, while holding control plane combiner.
713
724
  grpc_connectivity_state_set(&chand->state_tracker_, state, reason);
714
725
  if (chand->channelz_node_ != nullptr) {
726
+ chand->channelz_node_->SetConnectivityState(state);
715
727
  chand->channelz_node_->AddTraceEvent(
716
728
  channelz::ChannelTrace::Severity::Info,
717
729
  grpc_slice_from_static_string(
@@ -773,7 +785,7 @@ class ChannelData::ServiceConfigSetter {
773
785
  public:
774
786
  ServiceConfigSetter(
775
787
  ChannelData* chand,
776
- Optional<internal::ClientChannelGlobalParsedObject::RetryThrottling>
788
+ Optional<internal::ClientChannelGlobalParsedConfig::RetryThrottling>
777
789
  retry_throttle_data,
778
790
  RefCountedPtr<ServiceConfig> service_config)
779
791
  : chand_(chand),
@@ -812,7 +824,7 @@ class ChannelData::ServiceConfigSetter {
812
824
  }
813
825
 
814
826
  ChannelData* chand_;
815
- Optional<internal::ClientChannelGlobalParsedObject::RetryThrottling>
827
+ Optional<internal::ClientChannelGlobalParsedConfig::RetryThrottling>
816
828
  retry_throttle_data_;
817
829
  RefCountedPtr<ServiceConfig> service_config_;
818
830
  grpc_closure closure_;
@@ -934,6 +946,89 @@ void ChannelData::ExternalConnectivityWatcher::WatchConnectivityStateLocked(
934
946
  &self->chand_->state_tracker_, self->state_, &self->my_closure_);
935
947
  }
936
948
 
949
+ //
950
+ // ChannelData::GrpcSubchannel
951
+ //
952
+
953
+ // This class is a wrapper for Subchannel that hides details of the
954
+ // channel's implementation (such as the health check service name) from
955
+ // the LB policy API.
956
+ //
957
+ // Note that no synchronization is needed here, because even if the
958
+ // underlying subchannel is shared between channels, this wrapper will only
959
+ // be used within one channel, so it will always be synchronized by the
960
+ // control plane combiner.
961
+ class ChannelData::GrpcSubchannel : public SubchannelInterface {
962
+ public:
963
+ GrpcSubchannel(ChannelData* chand, Subchannel* subchannel,
964
+ UniquePtr<char> health_check_service_name)
965
+ : chand_(chand),
966
+ subchannel_(subchannel),
967
+ health_check_service_name_(std::move(health_check_service_name)) {
968
+ GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "GrpcSubchannel");
969
+ auto* subchannel_node = subchannel_->channelz_node();
970
+ if (subchannel_node != nullptr) {
971
+ intptr_t subchannel_uuid = subchannel_node->uuid();
972
+ auto it = chand_->subchannel_refcount_map_.find(subchannel_);
973
+ if (it == chand_->subchannel_refcount_map_.end()) {
974
+ chand_->channelz_node_->AddChildSubchannel(subchannel_uuid);
975
+ it = chand_->subchannel_refcount_map_.emplace(subchannel_, 0).first;
976
+ }
977
+ ++it->second;
978
+ }
979
+ }
980
+
981
+ ~GrpcSubchannel() {
982
+ auto* subchannel_node = subchannel_->channelz_node();
983
+ if (subchannel_node != nullptr) {
984
+ intptr_t subchannel_uuid = subchannel_node->uuid();
985
+ auto it = chand_->subchannel_refcount_map_.find(subchannel_);
986
+ GPR_ASSERT(it != chand_->subchannel_refcount_map_.end());
987
+ --it->second;
988
+ if (it->second == 0) {
989
+ chand_->channelz_node_->RemoveChildSubchannel(subchannel_uuid);
990
+ chand_->subchannel_refcount_map_.erase(it);
991
+ }
992
+ }
993
+ GRPC_SUBCHANNEL_UNREF(subchannel_, "unref from LB");
994
+ GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "GrpcSubchannel");
995
+ }
996
+
997
+ grpc_connectivity_state CheckConnectivityState(
998
+ RefCountedPtr<ConnectedSubchannelInterface>* connected_subchannel)
999
+ override {
1000
+ RefCountedPtr<ConnectedSubchannel> tmp;
1001
+ auto retval = subchannel_->CheckConnectivityState(
1002
+ health_check_service_name_.get(), &tmp);
1003
+ *connected_subchannel = std::move(tmp);
1004
+ return retval;
1005
+ }
1006
+
1007
+ void WatchConnectivityState(
1008
+ grpc_connectivity_state initial_state,
1009
+ UniquePtr<ConnectivityStateWatcher> watcher) override {
1010
+ subchannel_->WatchConnectivityState(
1011
+ initial_state,
1012
+ UniquePtr<char>(gpr_strdup(health_check_service_name_.get())),
1013
+ std::move(watcher));
1014
+ }
1015
+
1016
+ void CancelConnectivityStateWatch(
1017
+ ConnectivityStateWatcher* watcher) override {
1018
+ subchannel_->CancelConnectivityStateWatch(health_check_service_name_.get(),
1019
+ watcher);
1020
+ }
1021
+
1022
+ void AttemptToConnect() override { subchannel_->AttemptToConnect(); }
1023
+
1024
+ void ResetBackoff() override { subchannel_->ResetBackoff(); }
1025
+
1026
+ private:
1027
+ ChannelData* chand_;
1028
+ Subchannel* subchannel_;
1029
+ UniquePtr<char> health_check_service_name_;
1030
+ };
1031
+
937
1032
  //
938
1033
  // ChannelData::ClientChannelControlHelper
939
1034
  //
@@ -950,23 +1045,29 @@ class ChannelData::ClientChannelControlHelper
950
1045
  "ClientChannelControlHelper");
951
1046
  }
952
1047
 
953
- Subchannel* CreateSubchannel(const grpc_channel_args& args) override {
954
- grpc_arg args_to_add[2];
955
- int num_args_to_add = 0;
956
- if (chand_->health_check_service_name_ != nullptr) {
957
- args_to_add[0] = grpc_channel_arg_string_create(
958
- const_cast<char*>("grpc.temp.health_check"),
959
- const_cast<char*>(chand_->health_check_service_name_.get()));
960
- num_args_to_add++;
1048
+ RefCountedPtr<SubchannelInterface> CreateSubchannel(
1049
+ const grpc_channel_args& args) override {
1050
+ bool inhibit_health_checking = grpc_channel_arg_get_bool(
1051
+ grpc_channel_args_find(&args, GRPC_ARG_INHIBIT_HEALTH_CHECKING), false);
1052
+ UniquePtr<char> health_check_service_name;
1053
+ if (!inhibit_health_checking) {
1054
+ health_check_service_name.reset(
1055
+ gpr_strdup(chand_->health_check_service_name_.get()));
961
1056
  }
962
- args_to_add[num_args_to_add++] = SubchannelPoolInterface::CreateChannelArg(
1057
+ static const char* args_to_remove[] = {
1058
+ GRPC_ARG_INHIBIT_HEALTH_CHECKING,
1059
+ GRPC_ARG_CHANNELZ_CHANNEL_NODE,
1060
+ };
1061
+ grpc_arg arg = SubchannelPoolInterface::CreateChannelArg(
963
1062
  chand_->subchannel_pool_.get());
964
- grpc_channel_args* new_args =
965
- grpc_channel_args_copy_and_add(&args, args_to_add, num_args_to_add);
1063
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
1064
+ &args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &arg, 1);
966
1065
  Subchannel* subchannel =
967
1066
  chand_->client_channel_factory_->CreateSubchannel(new_args);
968
1067
  grpc_channel_args_destroy(new_args);
969
- return subchannel;
1068
+ if (subchannel == nullptr) return nullptr;
1069
+ return MakeRefCounted<GrpcSubchannel>(chand_, subchannel,
1070
+ std::move(health_check_service_name));
970
1071
  }
971
1072
 
972
1073
  grpc_channel* CreateChannel(const char* target,
@@ -997,7 +1098,22 @@ class ChannelData::ClientChannelControlHelper
997
1098
  // No-op -- we should never get this from ResolvingLoadBalancingPolicy.
998
1099
  void RequestReresolution() override {}
999
1100
 
1101
+ void AddTraceEvent(TraceSeverity severity, const char* message) override {
1102
+ if (chand_->channelz_node_ != nullptr) {
1103
+ chand_->channelz_node_->AddTraceEvent(
1104
+ ConvertSeverityEnum(severity),
1105
+ grpc_slice_from_copied_string(message));
1106
+ }
1107
+ }
1108
+
1000
1109
  private:
1110
+ static channelz::ChannelTrace::Severity ConvertSeverityEnum(
1111
+ TraceSeverity severity) {
1112
+ if (severity == TRACE_INFO) return channelz::ChannelTrace::Info;
1113
+ if (severity == TRACE_WARNING) return channelz::ChannelTrace::Warning;
1114
+ return channelz::ChannelTrace::Error;
1115
+ }
1116
+
1001
1117
  ChannelData* chand_;
1002
1118
  };
1003
1119
 
@@ -1040,6 +1156,15 @@ RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
1040
1156
  return GlobalSubchannelPool::instance();
1041
1157
  }
1042
1158
 
1159
+ channelz::ChannelNode* GetChannelzNode(const grpc_channel_args* args) {
1160
+ const grpc_arg* arg =
1161
+ grpc_channel_args_find(args, GRPC_ARG_CHANNELZ_CHANNEL_NODE);
1162
+ if (arg != nullptr && arg->type == GRPC_ARG_POINTER) {
1163
+ return static_cast<channelz::ChannelNode*>(arg->value.pointer.p);
1164
+ }
1165
+ return nullptr;
1166
+ }
1167
+
1043
1168
  ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
1044
1169
  : deadline_checking_enabled_(
1045
1170
  grpc_deadline_checking_enabled(args->channel_args)),
@@ -1049,11 +1174,16 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
1049
1174
  owning_stack_(args->channel_stack),
1050
1175
  client_channel_factory_(
1051
1176
  ClientChannelFactory::GetFromChannelArgs(args->channel_args)),
1177
+ channelz_node_(GetChannelzNode(args->channel_args)),
1052
1178
  data_plane_combiner_(grpc_combiner_create()),
1053
1179
  combiner_(grpc_combiner_create()),
1054
1180
  interested_parties_(grpc_pollset_set_create()),
1055
1181
  subchannel_pool_(GetSubchannelPool(args->channel_args)),
1056
1182
  disconnect_error_(GRPC_ERROR_NONE) {
1183
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1184
+ gpr_log(GPR_INFO, "chand=%p: creating client_channel for channel stack %p",
1185
+ this, owning_stack_);
1186
+ }
1057
1187
  // Initialize data members.
1058
1188
  grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
1059
1189
  "client_channel");
@@ -1078,8 +1208,6 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
1078
1208
  // Get default service config
1079
1209
  const char* service_config_json = grpc_channel_arg_get_string(
1080
1210
  grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG));
1081
- // TODO(yashkt): Make sure we set the channel in TRANSIENT_FAILURE on an
1082
- // invalid default service config
1083
1211
  if (service_config_json != nullptr) {
1084
1212
  *error = GRPC_ERROR_NONE;
1085
1213
  default_service_config_ = ServiceConfig::Create(service_config_json, error);
@@ -1135,6 +1263,9 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
1135
1263
  }
1136
1264
 
1137
1265
  ChannelData::~ChannelData() {
1266
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1267
+ gpr_log(GPR_INFO, "chand=%p: destroying channel", this);
1268
+ }
1138
1269
  if (resolving_lb_policy_ != nullptr) {
1139
1270
  grpc_pollset_set_del_pollset_set(resolving_lb_policy_->interested_parties(),
1140
1271
  interested_parties_);
@@ -1152,9 +1283,9 @@ ChannelData::~ChannelData() {
1152
1283
 
1153
1284
  void ChannelData::ProcessLbPolicy(
1154
1285
  const Resolver::Result& resolver_result,
1155
- const internal::ClientChannelGlobalParsedObject* parsed_service_config,
1286
+ const internal::ClientChannelGlobalParsedConfig* parsed_service_config,
1156
1287
  UniquePtr<char>* lb_policy_name,
1157
- RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config) {
1288
+ RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config) {
1158
1289
  // Prefer the LB policy name found in the service config.
1159
1290
  if (parsed_service_config != nullptr &&
1160
1291
  parsed_service_config->parsed_lb_config() != nullptr) {
@@ -1202,7 +1333,7 @@ void ChannelData::ProcessLbPolicy(
1202
1333
  // resolver result update.
1203
1334
  bool ChannelData::ProcessResolverResultLocked(
1204
1335
  void* arg, const Resolver::Result& result, const char** lb_policy_name,
1205
- RefCountedPtr<ParsedLoadBalancingConfig>* lb_policy_config,
1336
+ RefCountedPtr<LoadBalancingPolicy::Config>* lb_policy_config,
1206
1337
  grpc_error** service_config_error) {
1207
1338
  ChannelData* chand = static_cast<ChannelData*>(arg);
1208
1339
  RefCountedPtr<ServiceConfig> service_config;
@@ -1247,16 +1378,17 @@ bool ChannelData::ProcessResolverResultLocked(
1247
1378
  result.service_config_error != GRPC_ERROR_NONE) {
1248
1379
  return false;
1249
1380
  }
1250
- UniquePtr<char> service_config_json;
1251
1381
  // Process service config.
1252
- const internal::ClientChannelGlobalParsedObject* parsed_service_config =
1382
+ UniquePtr<char> service_config_json;
1383
+ const internal::ClientChannelGlobalParsedConfig* parsed_service_config =
1253
1384
  nullptr;
1254
1385
  if (service_config != nullptr) {
1255
1386
  parsed_service_config =
1256
- static_cast<const internal::ClientChannelGlobalParsedObject*>(
1257
- service_config->GetParsedGlobalServiceConfigObject(
1387
+ static_cast<const internal::ClientChannelGlobalParsedConfig*>(
1388
+ service_config->GetGlobalParsedConfig(
1258
1389
  internal::ClientChannelServiceConfigParser::ParserIndex()));
1259
1390
  }
1391
+ // Check if the config has changed.
1260
1392
  const bool service_config_changed =
1261
1393
  ((service_config == nullptr) !=
1262
1394
  (chand->saved_service_config_ == nullptr)) ||
@@ -1272,20 +1404,22 @@ bool ChannelData::ProcessResolverResultLocked(
1272
1404
  "chand=%p: resolver returned updated service config: \"%s\"",
1273
1405
  chand, service_config_json.get());
1274
1406
  }
1275
- chand->saved_service_config_ = std::move(service_config);
1276
- if (parsed_service_config != nullptr) {
1407
+ // Save health check service name.
1408
+ if (service_config != nullptr) {
1277
1409
  chand->health_check_service_name_.reset(
1278
1410
  gpr_strdup(parsed_service_config->health_check_service_name()));
1279
1411
  } else {
1280
1412
  chand->health_check_service_name_.reset();
1281
1413
  }
1414
+ // Save service config.
1415
+ chand->saved_service_config_ = std::move(service_config);
1282
1416
  }
1283
1417
  // We want to set the service config at least once. This should not really be
1284
1418
  // needed, but we are doing it as a defensive approach. This can be removed,
1285
1419
  // if we feel it is unnecessary.
1286
1420
  if (service_config_changed || !chand->received_first_resolver_result_) {
1287
1421
  chand->received_first_resolver_result_ = true;
1288
- Optional<internal::ClientChannelGlobalParsedObject::RetryThrottling>
1422
+ Optional<internal::ClientChannelGlobalParsedConfig::RetryThrottling>
1289
1423
  retry_throttle_data;
1290
1424
  if (parsed_service_config != nullptr) {
1291
1425
  retry_throttle_data = parsed_service_config->retry_throttling();
@@ -1315,19 +1449,19 @@ grpc_error* ChannelData::DoPingLocked(grpc_transport_op* op) {
1315
1449
  if (grpc_connectivity_state_check(&state_tracker_) != GRPC_CHANNEL_READY) {
1316
1450
  return GRPC_ERROR_CREATE_FROM_STATIC_STRING("channel not connected");
1317
1451
  }
1318
- LoadBalancingPolicy::PickArgs pick;
1319
- grpc_error* error = GRPC_ERROR_NONE;
1320
- picker_->Pick(&pick, &error);
1321
- if (pick.connected_subchannel != nullptr) {
1322
- pick.connected_subchannel->Ping(op->send_ping.on_initiate,
1323
- op->send_ping.on_ack);
1452
+ LoadBalancingPolicy::PickResult result =
1453
+ picker_->Pick(LoadBalancingPolicy::PickArgs());
1454
+ if (result.connected_subchannel != nullptr) {
1455
+ ConnectedSubchannel* connected_subchannel =
1456
+ static_cast<ConnectedSubchannel*>(result.connected_subchannel.get());
1457
+ connected_subchannel->Ping(op->send_ping.on_initiate, op->send_ping.on_ack);
1324
1458
  } else {
1325
- if (error == GRPC_ERROR_NONE) {
1326
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1459
+ if (result.error == GRPC_ERROR_NONE) {
1460
+ result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1327
1461
  "LB policy dropped call on ping");
1328
1462
  }
1329
1463
  }
1330
- return error;
1464
+ return result.error;
1331
1465
  }
1332
1466
 
1333
1467
  void ChannelData::StartTransportOpLocked(void* arg, grpc_error* ignored) {
@@ -1508,6 +1642,7 @@ CallData::CallData(grpc_call_element* elem, const ChannelData& chand,
1508
1642
  owning_call_(args.call_stack),
1509
1643
  call_combiner_(args.call_combiner),
1510
1644
  call_context_(args.context),
1645
+ lb_call_state_(this),
1511
1646
  pending_send_initial_metadata_(false),
1512
1647
  pending_send_message_(false),
1513
1648
  pending_send_trailing_metadata_(false),
@@ -1740,18 +1875,30 @@ void CallData::FreeCachedSendOpDataForCompletedBatch(
1740
1875
  // LB recv_trailing_metadata_ready handling
1741
1876
  //
1742
1877
 
1878
+ void CallData::RecvTrailingMetadataReadyForLoadBalancingPolicy(
1879
+ void* arg, grpc_error* error) {
1880
+ CallData* calld = static_cast<CallData*>(arg);
1881
+ // Invoke callback to LB policy.
1882
+ calld->lb_recv_trailing_metadata_ready_(
1883
+ calld->lb_recv_trailing_metadata_ready_user_data_,
1884
+ calld->recv_trailing_metadata_, &calld->lb_call_state_);
1885
+ // Chain to original callback.
1886
+ GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready_,
1887
+ GRPC_ERROR_REF(error));
1888
+ }
1889
+
1743
1890
  void CallData::MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
1744
- const LoadBalancingPolicy::PickArgs& pick,
1745
1891
  grpc_transport_stream_op_batch* batch) {
1746
- if (pick.recv_trailing_metadata_ready != nullptr) {
1747
- *pick.original_recv_trailing_metadata_ready =
1892
+ if (lb_recv_trailing_metadata_ready_ != nullptr) {
1893
+ recv_trailing_metadata_ =
1894
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata;
1895
+ original_recv_trailing_metadata_ready_ =
1748
1896
  batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
1897
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
1898
+ RecvTrailingMetadataReadyForLoadBalancingPolicy, this,
1899
+ grpc_schedule_on_exec_ctx);
1749
1900
  batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
1750
- pick.recv_trailing_metadata_ready;
1751
- if (pick.recv_trailing_metadata != nullptr) {
1752
- *pick.recv_trailing_metadata =
1753
- batch->payload->recv_trailing_metadata.recv_trailing_metadata;
1754
- }
1901
+ &recv_trailing_metadata_ready_;
1755
1902
  }
1756
1903
  }
1757
1904
 
@@ -1897,8 +2044,7 @@ void CallData::PendingBatchesFail(
1897
2044
  grpc_transport_stream_op_batch* batch = pending->batch;
1898
2045
  if (batch != nullptr) {
1899
2046
  if (batch->recv_trailing_metadata) {
1900
- MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(pick_.pick,
1901
- batch);
2047
+ MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(batch);
1902
2048
  }
1903
2049
  batch->handler_private.extra_arg = this;
1904
2050
  GRPC_CLOSURE_INIT(&batch->handler_private.closure,
@@ -1952,8 +2098,7 @@ void CallData::PendingBatchesResume(grpc_call_element* elem) {
1952
2098
  grpc_transport_stream_op_batch* batch = pending->batch;
1953
2099
  if (batch != nullptr) {
1954
2100
  if (batch->recv_trailing_metadata) {
1955
- MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(pick_.pick,
1956
- batch);
2101
+ MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(batch);
1957
2102
  }
1958
2103
  batch->handler_private.extra_arg = subchannel_call_.get();
1959
2104
  GRPC_CLOSURE_INIT(&batch->handler_private.closure,
@@ -2014,7 +2159,7 @@ void CallData::DoRetry(grpc_call_element* elem,
2014
2159
  GPR_ASSERT(retry_policy != nullptr);
2015
2160
  // Reset subchannel call and connected subchannel.
2016
2161
  subchannel_call_.reset();
2017
- pick_.pick.connected_subchannel.reset();
2162
+ connected_subchannel_.reset();
2018
2163
  // Compute backoff delay.
2019
2164
  grpc_millis next_attempt_time;
2020
2165
  if (server_pushback_ms >= 0) {
@@ -2871,7 +3016,7 @@ void CallData::AddRetriableRecvTrailingMetadataOp(
2871
3016
  .recv_trailing_metadata_ready =
2872
3017
  &retry_state->recv_trailing_metadata_ready;
2873
3018
  MaybeInjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
2874
- pick_.pick, &batch_data->batch);
3019
+ &batch_data->batch);
2875
3020
  }
2876
3021
 
2877
3022
  void CallData::StartInternalRecvTrailingMetadata(grpc_call_element* elem) {
@@ -3138,8 +3283,7 @@ void CallData::CreateSubchannelCall(grpc_call_element* elem) {
3138
3283
  // need to use a separate call context for each subchannel call.
3139
3284
  call_context_, call_combiner_, parent_data_size};
3140
3285
  grpc_error* error = GRPC_ERROR_NONE;
3141
- subchannel_call_ =
3142
- pick_.pick.connected_subchannel->CreateCall(call_args, &error);
3286
+ subchannel_call_ = connected_subchannel_->CreateCall(call_args, &error);
3143
3287
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3144
3288
  gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
3145
3289
  chand, this, subchannel_call_.get(), grpc_error_string(error));
@@ -3248,10 +3392,10 @@ void CallData::ApplyServiceConfigToCallLocked(grpc_call_element* elem) {
3248
3392
  service_config_call_data_ =
3249
3393
  ServiceConfig::CallData(chand->service_config(), path_);
3250
3394
  if (service_config_call_data_.service_config() != nullptr) {
3251
- call_context_[GRPC_SERVICE_CONFIG_CALL_DATA].value =
3395
+ call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value =
3252
3396
  &service_config_call_data_;
3253
- method_params_ = static_cast<ClientChannelMethodParsedObject*>(
3254
- service_config_call_data_.GetMethodParsedObject(
3397
+ method_params_ = static_cast<ClientChannelMethodParsedConfig*>(
3398
+ service_config_call_data_.GetMethodParsedConfig(
3255
3399
  internal::ClientChannelServiceConfigParser::ParserIndex()));
3256
3400
  }
3257
3401
  retry_throttle_data_ = chand->retry_throttle_data();
@@ -3300,13 +3444,14 @@ void CallData::MaybeApplyServiceConfigToCallLocked(grpc_call_element* elem) {
3300
3444
  }
3301
3445
  }
3302
3446
 
3303
- const char* PickResultName(LoadBalancingPolicy::PickResult result) {
3304
- switch (result) {
3305
- case LoadBalancingPolicy::PICK_COMPLETE:
3447
+ const char* PickResultTypeName(
3448
+ LoadBalancingPolicy::PickResult::ResultType type) {
3449
+ switch (type) {
3450
+ case LoadBalancingPolicy::PickResult::PICK_COMPLETE:
3306
3451
  return "COMPLETE";
3307
- case LoadBalancingPolicy::PICK_QUEUE:
3452
+ case LoadBalancingPolicy::PickResult::PICK_QUEUE:
3308
3453
  return "QUEUE";
3309
- case LoadBalancingPolicy::PICK_TRANSIENT_FAILURE:
3454
+ case LoadBalancingPolicy::PickResult::PICK_TRANSIENT_FAILURE:
3310
3455
  return "TRANSIENT_FAILURE";
3311
3456
  }
3312
3457
  GPR_UNREACHABLE_CODE(return "UNKNOWN");
@@ -3316,8 +3461,10 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) {
3316
3461
  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
3317
3462
  CallData* calld = static_cast<CallData*>(elem->call_data);
3318
3463
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
3319
- GPR_ASSERT(calld->pick_.pick.connected_subchannel == nullptr);
3464
+ GPR_ASSERT(calld->connected_subchannel_ == nullptr);
3320
3465
  GPR_ASSERT(calld->subchannel_call_ == nullptr);
3466
+ // Apply service config to call if needed.
3467
+ calld->MaybeApplyServiceConfigToCallLocked(elem);
3321
3468
  // If this is a retry, use the send_initial_metadata payload that
3322
3469
  // we've cached; otherwise, use the pending batch. The
3323
3470
  // send_initial_metadata batch will be the first pending batch in the
@@ -3328,58 +3475,58 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) {
3328
3475
  // allocate the subchannel batch earlier so that we can give the
3329
3476
  // subchannel's copy of the metadata batch (which is copied for each
3330
3477
  // attempt) to the LB policy instead the one from the parent channel.
3331
- calld->pick_.pick.initial_metadata =
3478
+ LoadBalancingPolicy::PickArgs pick_args;
3479
+ pick_args.call_state = &calld->lb_call_state_;
3480
+ pick_args.initial_metadata =
3332
3481
  calld->seen_send_initial_metadata_
3333
3482
  ? &calld->send_initial_metadata_
3334
3483
  : calld->pending_batches_[0]
3335
3484
  .batch->payload->send_initial_metadata.send_initial_metadata;
3336
- uint32_t* send_initial_metadata_flags =
3485
+ // Grab initial metadata flags so that we can check later if the call has
3486
+ // wait_for_ready enabled.
3487
+ const uint32_t send_initial_metadata_flags =
3337
3488
  calld->seen_send_initial_metadata_
3338
- ? &calld->send_initial_metadata_flags_
3339
- : &calld->pending_batches_[0]
3340
- .batch->payload->send_initial_metadata
3341
- .send_initial_metadata_flags;
3342
- // Apply service config to call if needed.
3343
- calld->MaybeApplyServiceConfigToCallLocked(elem);
3489
+ ? calld->send_initial_metadata_flags_
3490
+ : calld->pending_batches_[0]
3491
+ .batch->payload->send_initial_metadata
3492
+ .send_initial_metadata_flags;
3344
3493
  // When done, we schedule this closure to leave the data plane combiner.
3345
3494
  GRPC_CLOSURE_INIT(&calld->pick_closure_, PickDone, elem,
3346
3495
  grpc_schedule_on_exec_ctx);
3347
3496
  // Attempt pick.
3348
- error = GRPC_ERROR_NONE;
3349
- auto pick_result = chand->picker()->Pick(&calld->pick_.pick, &error);
3497
+ auto result = chand->picker()->Pick(pick_args);
3350
3498
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3351
3499
  gpr_log(GPR_INFO,
3352
3500
  "chand=%p calld=%p: LB pick returned %s (connected_subchannel=%p, "
3353
3501
  "error=%s)",
3354
- chand, calld, PickResultName(pick_result),
3355
- calld->pick_.pick.connected_subchannel.get(),
3356
- grpc_error_string(error));
3502
+ chand, calld, PickResultTypeName(result.type),
3503
+ result.connected_subchannel.get(), grpc_error_string(result.error));
3357
3504
  }
3358
- switch (pick_result) {
3359
- case LoadBalancingPolicy::PICK_TRANSIENT_FAILURE: {
3505
+ switch (result.type) {
3506
+ case LoadBalancingPolicy::PickResult::PICK_TRANSIENT_FAILURE: {
3360
3507
  // If we're shutting down, fail all RPCs.
3361
3508
  grpc_error* disconnect_error = chand->disconnect_error();
3362
3509
  if (disconnect_error != GRPC_ERROR_NONE) {
3363
- GRPC_ERROR_UNREF(error);
3510
+ GRPC_ERROR_UNREF(result.error);
3364
3511
  GRPC_CLOSURE_SCHED(&calld->pick_closure_,
3365
3512
  GRPC_ERROR_REF(disconnect_error));
3366
3513
  break;
3367
3514
  }
3368
3515
  // If wait_for_ready is false, then the error indicates the RPC
3369
3516
  // attempt's final status.
3370
- if ((*send_initial_metadata_flags &
3517
+ if ((send_initial_metadata_flags &
3371
3518
  GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
3372
3519
  // Retry if appropriate; otherwise, fail.
3373
3520
  grpc_status_code status = GRPC_STATUS_OK;
3374
- grpc_error_get_status(error, calld->deadline_, &status, nullptr,
3521
+ grpc_error_get_status(result.error, calld->deadline_, &status, nullptr,
3375
3522
  nullptr, nullptr);
3376
3523
  if (!calld->enable_retries_ ||
3377
3524
  !calld->MaybeRetry(elem, nullptr /* batch_data */, status,
3378
3525
  nullptr /* server_pushback_md */)) {
3379
3526
  grpc_error* new_error =
3380
3527
  GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
3381
- "Failed to pick subchannel", &error, 1);
3382
- GRPC_ERROR_UNREF(error);
3528
+ "Failed to pick subchannel", &result.error, 1);
3529
+ GRPC_ERROR_UNREF(result.error);
3383
3530
  GRPC_CLOSURE_SCHED(&calld->pick_closure_, new_error);
3384
3531
  }
3385
3532
  if (calld->pick_queued_) calld->RemoveCallFromQueuedPicksLocked(elem);
@@ -3387,19 +3534,24 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) {
3387
3534
  }
3388
3535
  // If wait_for_ready is true, then queue to retry when we get a new
3389
3536
  // picker.
3390
- GRPC_ERROR_UNREF(error);
3537
+ GRPC_ERROR_UNREF(result.error);
3391
3538
  }
3392
3539
  // Fallthrough
3393
- case LoadBalancingPolicy::PICK_QUEUE:
3540
+ case LoadBalancingPolicy::PickResult::PICK_QUEUE:
3394
3541
  if (!calld->pick_queued_) calld->AddCallToQueuedPicksLocked(elem);
3395
3542
  break;
3396
3543
  default: // PICK_COMPLETE
3397
3544
  // Handle drops.
3398
- if (GPR_UNLIKELY(calld->pick_.pick.connected_subchannel == nullptr)) {
3399
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
3545
+ if (GPR_UNLIKELY(result.connected_subchannel == nullptr)) {
3546
+ result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
3400
3547
  "Call dropped by load balancing policy");
3401
3548
  }
3402
- GRPC_CLOSURE_SCHED(&calld->pick_closure_, error);
3549
+ calld->connected_subchannel_ = std::move(result.connected_subchannel);
3550
+ calld->lb_recv_trailing_metadata_ready_ =
3551
+ result.recv_trailing_metadata_ready;
3552
+ calld->lb_recv_trailing_metadata_ready_user_data_ =
3553
+ result.recv_trailing_metadata_ready_user_data;
3554
+ GRPC_CLOSURE_SCHED(&calld->pick_closure_, result.error);
3403
3555
  if (calld->pick_queued_) calld->RemoveCallFromQueuedPicksLocked(elem);
3404
3556
  }
3405
3557
  }
@@ -3428,20 +3580,6 @@ const grpc_channel_filter grpc_client_channel_filter = {
3428
3580
  "client-channel",
3429
3581
  };
3430
3582
 
3431
- void grpc_client_channel_set_channelz_node(
3432
- grpc_channel_element* elem, grpc_core::channelz::ClientChannelNode* node) {
3433
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
3434
- chand->set_channelz_node(node);
3435
- }
3436
-
3437
- void grpc_client_channel_populate_child_refs(
3438
- grpc_channel_element* elem,
3439
- grpc_core::channelz::ChildRefsList* child_subchannels,
3440
- grpc_core::channelz::ChildRefsList* child_channels) {
3441
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
3442
- chand->FillChildRefsForChannelz(child_subchannels, child_channels);
3443
- }
3444
-
3445
3583
  grpc_connectivity_state grpc_client_channel_check_connectivity_state(
3446
3584
  grpc_channel_element* elem, int try_to_connect) {
3447
3585
  auto* chand = static_cast<ChannelData*>(elem->channel_data);