grpc 1.16.0 → 1.17.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (173) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +299 -133
  3. data/include/grpc/grpc.h +11 -1
  4. data/include/grpc/grpc_posix.h +0 -8
  5. data/include/grpc/impl/codegen/grpc_types.h +3 -0
  6. data/src/core/ext/filters/client_channel/client_channel.cc +336 -345
  7. data/src/core/ext/filters/client_channel/client_channel.h +6 -2
  8. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +3 -1
  9. data/src/core/ext/filters/client_channel/client_channel_channelz.h +0 -7
  10. data/src/core/ext/filters/client_channel/health/health.pb.c +23 -0
  11. data/src/core/ext/filters/client_channel/health/health.pb.h +73 -0
  12. data/src/core/ext/filters/client_channel/health/health_check_client.cc +652 -0
  13. data/src/core/ext/filters/client_channel/health/health_check_client.h +173 -0
  14. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +2 -1
  15. data/src/core/ext/filters/client_channel/http_proxy.cc +1 -1
  16. data/src/core/ext/filters/client_channel/lb_policy.h +17 -14
  17. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +15 -11
  18. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +21 -15
  19. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +18 -10
  20. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +12 -9
  21. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +19 -8
  22. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +1832 -0
  23. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.h +36 -0
  24. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h +36 -0
  25. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc +107 -0
  26. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc +85 -0
  27. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h +72 -0
  28. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc +307 -0
  29. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h +89 -0
  30. data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -1
  31. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +5 -0
  32. data/src/core/ext/filters/client_channel/lb_policy_registry.h +4 -0
  33. data/src/core/ext/filters/client_channel/parse_address.h +1 -1
  34. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +19 -22
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +41 -39
  36. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +3 -2
  37. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +4 -1
  38. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +15 -2
  39. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +5 -1
  40. data/src/core/ext/filters/client_channel/resolver_factory.h +1 -1
  41. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +384 -0
  42. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +146 -0
  43. data/src/core/ext/filters/client_channel/subchannel.cc +361 -103
  44. data/src/core/ext/filters/client_channel/subchannel.h +14 -8
  45. data/src/core/ext/filters/deadline/deadline_filter.cc +19 -23
  46. data/src/core/ext/filters/deadline/deadline_filter.h +9 -13
  47. data/src/core/ext/filters/http/client/http_client_filter.cc +29 -19
  48. data/src/core/ext/filters/http/client_authority_filter.cc +2 -3
  49. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +28 -16
  50. data/src/core/ext/filters/http/server/http_server_filter.cc +31 -20
  51. data/src/core/ext/filters/message_size/message_size_filter.cc +50 -45
  52. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +13 -6
  53. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +1 -1
  54. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +58 -8
  55. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +1 -1
  56. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +175 -173
  57. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +2 -1
  58. data/src/core/ext/transport/chttp2/transport/frame_data.cc +4 -10
  59. data/src/core/ext/transport/chttp2/transport/frame_data.h +10 -12
  60. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +1 -1
  61. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +28 -25
  62. data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +0 -12
  63. data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +12 -9
  64. data/src/core/ext/transport/chttp2/transport/internal.h +109 -94
  65. data/src/core/ext/transport/chttp2/transport/parsing.cc +4 -2
  66. data/src/core/ext/transport/inproc/inproc_transport.cc +280 -300
  67. data/src/core/lib/channel/channel_stack.cc +5 -4
  68. data/src/core/lib/channel/channel_stack.h +4 -4
  69. data/src/core/lib/channel/channel_stack_builder.cc +14 -2
  70. data/src/core/lib/channel/channel_stack_builder.h +8 -0
  71. data/src/core/lib/channel/channel_trace.cc +6 -2
  72. data/src/core/lib/channel/channelz.cc +137 -5
  73. data/src/core/lib/channel/channelz.h +32 -6
  74. data/src/core/lib/channel/channelz_registry.cc +134 -28
  75. data/src/core/lib/channel/channelz_registry.h +25 -3
  76. data/src/core/lib/channel/context.h +4 -4
  77. data/src/core/lib/channel/handshaker.cc +7 -6
  78. data/src/core/lib/channel/handshaker.h +7 -8
  79. data/src/core/lib/channel/handshaker_factory.cc +3 -2
  80. data/src/core/lib/channel/handshaker_factory.h +2 -0
  81. data/src/core/lib/channel/handshaker_registry.cc +6 -2
  82. data/src/core/lib/channel/handshaker_registry.h +1 -0
  83. data/src/core/lib/gpr/arena.cc +84 -37
  84. data/src/core/lib/gpr/arena.h +2 -0
  85. data/src/core/lib/gpr/mpscq.h +4 -2
  86. data/src/core/lib/gprpp/inlined_vector.h +8 -0
  87. data/src/core/lib/gprpp/ref_counted.h +105 -18
  88. data/src/core/lib/gprpp/ref_counted_ptr.h +11 -0
  89. data/src/core/lib/http/httpcli_security_connector.cc +7 -4
  90. data/src/core/lib/iomgr/call_combiner.cc +2 -0
  91. data/src/core/lib/iomgr/call_combiner.h +2 -2
  92. data/src/core/lib/iomgr/closure.h +1 -0
  93. data/src/core/lib/iomgr/error.cc +16 -31
  94. data/src/core/lib/iomgr/error.h +29 -4
  95. data/src/core/lib/iomgr/error_internal.h +0 -2
  96. data/src/core/lib/iomgr/ev_epoll1_linux.cc +7 -3
  97. data/src/core/lib/iomgr/ev_posix.cc +0 -2
  98. data/src/core/lib/iomgr/polling_entity.h +4 -4
  99. data/src/core/lib/iomgr/resource_quota.cc +64 -10
  100. data/src/core/lib/iomgr/resource_quota.h +21 -6
  101. data/src/core/lib/iomgr/socket_utils_common_posix.cc +11 -5
  102. data/src/core/lib/iomgr/tcp_client_custom.cc +14 -3
  103. data/src/core/lib/iomgr/tcp_client_posix.cc +2 -0
  104. data/src/core/lib/iomgr/tcp_posix.cc +4 -2
  105. data/src/core/lib/iomgr/timer_manager.cc +1 -1
  106. data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +3 -4
  107. data/src/core/lib/security/context/security_context.cc +20 -13
  108. data/src/core/lib/security/context/security_context.h +27 -19
  109. data/src/core/lib/security/credentials/alts/alts_credentials.cc +1 -1
  110. data/src/core/lib/security/credentials/credentials.h +2 -2
  111. data/src/core/lib/security/credentials/fake/fake_credentials.cc +1 -0
  112. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +39 -54
  113. data/src/core/lib/security/credentials/google_default/google_default_credentials.h +3 -2
  114. data/src/core/lib/security/credentials/local/local_credentials.cc +1 -1
  115. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +1 -2
  116. data/src/core/lib/security/credentials/ssl/ssl_credentials.h +2 -0
  117. data/src/core/lib/security/security_connector/{alts_security_connector.cc → alts/alts_security_connector.cc} +10 -9
  118. data/src/core/lib/security/security_connector/{alts_security_connector.h → alts/alts_security_connector.h} +3 -3
  119. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +310 -0
  120. data/src/core/lib/security/security_connector/fake/fake_security_connector.h +42 -0
  121. data/src/core/lib/security/security_connector/{local_security_connector.cc → local/local_security_connector.cc} +4 -3
  122. data/src/core/lib/security/security_connector/{local_security_connector.h → local/local_security_connector.h} +3 -3
  123. data/src/core/lib/security/security_connector/security_connector.cc +4 -1039
  124. data/src/core/lib/security/security_connector/security_connector.h +6 -114
  125. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +474 -0
  126. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.h +77 -0
  127. data/src/core/lib/security/security_connector/ssl_utils.cc +345 -0
  128. data/src/core/lib/security/security_connector/ssl_utils.h +93 -0
  129. data/src/core/lib/security/transport/client_auth_filter.cc +28 -17
  130. data/src/core/lib/security/transport/secure_endpoint.cc +51 -41
  131. data/src/core/lib/security/transport/security_handshaker.cc +6 -7
  132. data/src/core/lib/security/transport/server_auth_filter.cc +39 -31
  133. data/src/core/lib/surface/call.cc +100 -80
  134. data/src/core/lib/surface/call.h +4 -0
  135. data/src/core/lib/surface/channel.cc +27 -13
  136. data/src/core/lib/surface/channel.h +4 -3
  137. data/src/core/lib/surface/completion_queue.cc +8 -1
  138. data/src/core/lib/surface/init.cc +1 -0
  139. data/src/core/lib/surface/server.cc +111 -46
  140. data/src/core/lib/surface/server.h +16 -2
  141. data/src/core/lib/surface/version.cc +2 -2
  142. data/src/core/lib/transport/error_utils.cc +4 -2
  143. data/src/core/lib/transport/metadata.cc +3 -2
  144. data/src/core/lib/transport/metadata.h +3 -2
  145. data/src/core/lib/transport/metadata_batch.cc +1 -0
  146. data/src/core/lib/transport/metadata_batch.h +4 -2
  147. data/src/core/lib/transport/static_metadata.cc +225 -221
  148. data/src/core/lib/transport/static_metadata.h +74 -71
  149. data/src/core/lib/transport/transport.h +44 -26
  150. data/src/core/{ext/filters/client_channel → lib/uri}/uri_parser.cc +1 -1
  151. data/src/core/{ext/filters/client_channel → lib/uri}/uri_parser.h +3 -3
  152. data/src/core/plugin_registry/grpc_plugin_registry.cc +4 -4
  153. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +356 -77
  154. data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +46 -36
  155. data/src/core/tsi/alts/handshaker/alts_shared_resource.cc +83 -0
  156. data/src/core/tsi/alts/handshaker/alts_shared_resource.h +73 -0
  157. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +122 -175
  158. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +33 -22
  159. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +38 -10
  160. data/src/core/tsi/transport_security.cc +18 -1
  161. data/src/core/tsi/transport_security.h +2 -1
  162. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +4 -2
  163. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +6 -3
  164. data/src/ruby/lib/grpc/version.rb +1 -1
  165. data/src/ruby/spec/pb/codegen/grpc/testing/package_options.proto +28 -0
  166. data/src/ruby/spec/pb/codegen/package_option_spec.rb +2 -3
  167. metadata +58 -40
  168. data/src/core/ext/filters/client_channel/method_params.cc +0 -178
  169. data/src/core/ext/filters/client_channel/method_params.h +0 -78
  170. data/src/core/tsi/alts/handshaker/alts_tsi_event.cc +0 -75
  171. data/src/core/tsi/alts/handshaker/alts_tsi_event.h +0 -93
  172. data/src/core/tsi/alts_transport_security.cc +0 -65
  173. data/src/core/tsi/alts_transport_security.h +0 -47
@@ -248,10 +248,13 @@ GRPCAPI void* grpc_call_arena_alloc(grpc_call* call, size_t size);
248
248
  appropriate to call grpc_completion_queue_next or
249
249
  grpc_completion_queue_pluck consequent to the failed grpc_call_start_batch
250
250
  call.
251
+ If a call to grpc_call_start_batch with an empty batch returns
252
+ GRPC_CALL_OK, the tag is put in the completion queue immediately.
251
253
  THREAD SAFETY: access to grpc_call_start_batch in multi-threaded environment
252
254
  needs to be synchronized. As an optimization, you may synchronize batches
253
255
  containing just send operations independently from batches containing just
254
- receive operations. */
256
+ receive operations. Access to grpc_call_start_batch with an empty batch is
257
+ thread-compatible. */
255
258
  GRPCAPI grpc_call_error grpc_call_start_batch(grpc_call* call,
256
259
  const grpc_op* ops, size_t nops,
257
260
  void* tag, void* reserved);
@@ -503,6 +506,13 @@ GRPCAPI char* grpc_channelz_get_top_channels(intptr_t start_channel_id);
503
506
  /* Gets all servers that exist in the process. */
504
507
  GRPCAPI char* grpc_channelz_get_servers(intptr_t start_server_id);
505
508
 
509
+ /* Returns a single Server, or else a NOT_FOUND code. */
510
+ GRPCAPI char* grpc_channelz_get_server(intptr_t server_id);
511
+
512
+ /* Gets all server sockets that exist in the server. */
513
+ GRPCAPI char* grpc_channelz_get_server_sockets(intptr_t server_id,
514
+ intptr_t start_socket_id);
515
+
506
516
  /* Returns a single Channel, or else a NOT_FOUND code. The returned string
507
517
  is allocated and must be freed by the application. */
508
518
  GRPCAPI char* grpc_channelz_get_channel(intptr_t channel_id);
@@ -52,14 +52,6 @@ GRPCAPI grpc_channel* grpc_insecure_channel_create_from_fd(
52
52
  GRPCAPI void grpc_server_add_insecure_channel_from_fd(grpc_server* server,
53
53
  void* reserved, int fd);
54
54
 
55
- /** GRPC Core POSIX library may internally use signals to optimize some work.
56
- The library uses (SIGRTMIN + 6) signal by default. Use this API to instruct
57
- the library to use a different signal i.e 'signum' instead.
58
- Note:
59
- - To prevent GRPC library from using any signals, pass a 'signum' of -1
60
- - This API is optional but if called, it MUST be called before grpc_init() */
61
- GRPCAPI void grpc_use_signal(int signum);
62
-
63
55
  #ifdef __cplusplus
64
56
  }
65
57
  #endif
@@ -347,6 +347,9 @@ typedef struct {
347
347
  /** If set to non zero, surfaces the user agent string to the server. User
348
348
  agent is surfaced by default. */
349
349
  #define GRPC_ARG_SURFACE_USER_AGENT "grpc.surface_user_agent"
350
+ /** If set, inhibits health checking (which may be enabled via the
351
+ * service config.) */
352
+ #define GRPC_ARG_INHIBIT_HEALTH_CHECKING "grpc.inhibit_health_checking"
350
353
  /** \} */
351
354
 
352
355
  /** Result of a grpc call. If the caller satisfies the prerequisites of a
@@ -34,9 +34,9 @@
34
34
  #include "src/core/ext/filters/client_channel/backup_poller.h"
35
35
  #include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
36
36
  #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
37
- #include "src/core/ext/filters/client_channel/method_params.h"
38
37
  #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
39
38
  #include "src/core/ext/filters/client_channel/resolver_registry.h"
39
+ #include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
40
40
  #include "src/core/ext/filters/client_channel/retry_throttle.h"
41
41
  #include "src/core/ext/filters/client_channel/subchannel.h"
42
42
  #include "src/core/ext/filters/deadline/deadline_filter.h"
@@ -63,6 +63,8 @@
63
63
  #include "src/core/lib/transport/status_metadata.h"
64
64
 
65
65
  using grpc_core::internal::ClientChannelMethodParams;
66
+ using grpc_core::internal::ClientChannelMethodParamsTable;
67
+ using grpc_core::internal::ProcessedResolverResult;
66
68
  using grpc_core::internal::ServerRetryThrottleData;
67
69
 
68
70
  /* Client channel implementation */
@@ -83,10 +85,6 @@ grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
83
85
 
84
86
  struct external_connectivity_watcher;
85
87
 
86
- typedef grpc_core::SliceHashTable<
87
- grpc_core::RefCountedPtr<ClientChannelMethodParams>>
88
- MethodParamsTable;
89
-
90
88
  typedef struct client_channel_channel_data {
91
89
  grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
92
90
  bool started_resolving;
@@ -102,7 +100,7 @@ typedef struct client_channel_channel_data {
102
100
  /** retry throttle data */
103
101
  grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
104
102
  /** maps method names to method_parameters structs */
105
- grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
103
+ grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table;
106
104
  /** incoming resolver result - set by resolver.next() */
107
105
  grpc_channel_args* resolver_result;
108
106
  /** a list of closures that are all waiting for resolver result to come in */
@@ -129,6 +127,10 @@ typedef struct client_channel_channel_data {
129
127
  grpc_core::UniquePtr<char> info_lb_policy_name;
130
128
  /** service config in JSON form */
131
129
  grpc_core::UniquePtr<char> info_service_config_json;
130
+ /* backpointer to grpc_channel's channelz node */
131
+ grpc_core::channelz::ClientChannelNode* channelz_channel;
132
+ /* caches if the last resolution event contained addresses */
133
+ bool previous_resolution_contained_addresses;
132
134
  } channel_data;
133
135
 
134
136
  typedef struct {
@@ -153,6 +155,23 @@ static void watch_lb_policy_locked(channel_data* chand,
153
155
  grpc_core::LoadBalancingPolicy* lb_policy,
154
156
  grpc_connectivity_state current_state);
155
157
 
158
+ static const char* channel_connectivity_state_change_string(
159
+ grpc_connectivity_state state) {
160
+ switch (state) {
161
+ case GRPC_CHANNEL_IDLE:
162
+ return "Channel state change to IDLE";
163
+ case GRPC_CHANNEL_CONNECTING:
164
+ return "Channel state change to CONNECTING";
165
+ case GRPC_CHANNEL_READY:
166
+ return "Channel state change to READY";
167
+ case GRPC_CHANNEL_TRANSIENT_FAILURE:
168
+ return "Channel state change to TRANSIENT_FAILURE";
169
+ case GRPC_CHANNEL_SHUTDOWN:
170
+ return "Channel state change to SHUTDOWN";
171
+ }
172
+ GPR_UNREACHABLE_CODE(return "UNKNOWN");
173
+ }
174
+
156
175
  static void set_channel_connectivity_state_locked(channel_data* chand,
157
176
  grpc_connectivity_state state,
158
177
  grpc_error* error,
@@ -177,6 +196,12 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
177
196
  gpr_log(GPR_INFO, "chand=%p: setting connectivity state to %s", chand,
178
197
  grpc_connectivity_state_name(state));
179
198
  }
199
+ if (chand->channelz_channel != nullptr) {
200
+ chand->channelz_channel->AddTraceEvent(
201
+ grpc_core::channelz::ChannelTrace::Severity::Info,
202
+ grpc_slice_from_static_string(
203
+ channel_connectivity_state_change_string(state)));
204
+ }
180
205
  grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
181
206
  }
182
207
 
@@ -224,66 +249,6 @@ static void start_resolving_locked(channel_data* chand) {
224
249
  &chand->on_resolver_result_changed);
225
250
  }
226
251
 
227
- typedef struct {
228
- char* server_name;
229
- grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
230
- } service_config_parsing_state;
231
-
232
- static void parse_retry_throttle_params(
233
- const grpc_json* field, service_config_parsing_state* parsing_state) {
234
- if (strcmp(field->key, "retryThrottling") == 0) {
235
- if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
236
- if (field->type != GRPC_JSON_OBJECT) return;
237
- int max_milli_tokens = 0;
238
- int milli_token_ratio = 0;
239
- for (grpc_json* sub_field = field->child; sub_field != nullptr;
240
- sub_field = sub_field->next) {
241
- if (sub_field->key == nullptr) return;
242
- if (strcmp(sub_field->key, "maxTokens") == 0) {
243
- if (max_milli_tokens != 0) return; // Duplicate.
244
- if (sub_field->type != GRPC_JSON_NUMBER) return;
245
- max_milli_tokens = gpr_parse_nonnegative_int(sub_field->value);
246
- if (max_milli_tokens == -1) return;
247
- max_milli_tokens *= 1000;
248
- } else if (strcmp(sub_field->key, "tokenRatio") == 0) {
249
- if (milli_token_ratio != 0) return; // Duplicate.
250
- if (sub_field->type != GRPC_JSON_NUMBER) return;
251
- // We support up to 3 decimal digits.
252
- size_t whole_len = strlen(sub_field->value);
253
- uint32_t multiplier = 1;
254
- uint32_t decimal_value = 0;
255
- const char* decimal_point = strchr(sub_field->value, '.');
256
- if (decimal_point != nullptr) {
257
- whole_len = static_cast<size_t>(decimal_point - sub_field->value);
258
- multiplier = 1000;
259
- size_t decimal_len = strlen(decimal_point + 1);
260
- if (decimal_len > 3) decimal_len = 3;
261
- if (!gpr_parse_bytes_to_uint32(decimal_point + 1, decimal_len,
262
- &decimal_value)) {
263
- return;
264
- }
265
- uint32_t decimal_multiplier = 1;
266
- for (size_t i = 0; i < (3 - decimal_len); ++i) {
267
- decimal_multiplier *= 10;
268
- }
269
- decimal_value *= decimal_multiplier;
270
- }
271
- uint32_t whole_value;
272
- if (!gpr_parse_bytes_to_uint32(sub_field->value, whole_len,
273
- &whole_value)) {
274
- return;
275
- }
276
- milli_token_ratio =
277
- static_cast<int>((whole_value * multiplier) + decimal_value);
278
- if (milli_token_ratio <= 0) return;
279
- }
280
- }
281
- parsing_state->retry_throttle_data =
282
- grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
283
- parsing_state->server_name, max_milli_tokens, milli_token_ratio);
284
- }
285
- }
286
-
287
252
  // Invoked from the resolver NextLocked() callback when the resolver
288
253
  // is shutting down.
289
254
  static void on_resolver_shutdown_locked(channel_data* chand,
@@ -325,37 +290,6 @@ static void on_resolver_shutdown_locked(channel_data* chand,
325
290
  GRPC_ERROR_UNREF(error);
326
291
  }
327
292
 
328
- // Returns the LB policy name from the resolver result.
329
- static grpc_core::UniquePtr<char>
330
- get_lb_policy_name_from_resolver_result_locked(channel_data* chand) {
331
- // Find LB policy name in channel args.
332
- const grpc_arg* channel_arg =
333
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
334
- const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
335
- // Special case: If at least one balancer address is present, we use
336
- // the grpclb policy, regardless of what the resolver actually specified.
337
- channel_arg =
338
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
339
- if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
340
- grpc_lb_addresses* addresses =
341
- static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
342
- if (grpc_lb_addresses_contains_balancer_address(*addresses)) {
343
- if (lb_policy_name != nullptr &&
344
- gpr_stricmp(lb_policy_name, "grpclb") != 0) {
345
- gpr_log(GPR_INFO,
346
- "resolver requested LB policy %s but provided at least one "
347
- "balancer address -- forcing use of grpclb LB policy",
348
- lb_policy_name);
349
- }
350
- lb_policy_name = "grpclb";
351
- }
352
- }
353
- // Use pick_first if nothing was specified and we didn't select grpclb
354
- // above.
355
- if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
356
- return grpc_core::UniquePtr<char>(gpr_strdup(lb_policy_name));
357
- }
358
-
359
293
  static void request_reresolution_locked(void* arg, grpc_error* error) {
360
294
  reresolution_request_args* args =
361
295
  static_cast<reresolution_request_args*>(arg);
@@ -376,28 +310,41 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
376
310
  chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
377
311
  }
378
312
 
313
+ using TraceStringVector = grpc_core::InlinedVector<char*, 3>;
314
+
379
315
  // Creates a new LB policy, replacing any previous one.
380
316
  // If the new policy is created successfully, sets *connectivity_state and
381
317
  // *connectivity_error to its initial connectivity state; otherwise,
382
318
  // leaves them unchanged.
383
319
  static void create_new_lb_policy_locked(
384
- channel_data* chand, char* lb_policy_name,
320
+ channel_data* chand, char* lb_policy_name, grpc_json* lb_config,
385
321
  grpc_connectivity_state* connectivity_state,
386
- grpc_error** connectivity_error) {
322
+ grpc_error** connectivity_error, TraceStringVector* trace_strings) {
387
323
  grpc_core::LoadBalancingPolicy::Args lb_policy_args;
388
324
  lb_policy_args.combiner = chand->combiner;
389
325
  lb_policy_args.client_channel_factory = chand->client_channel_factory;
390
326
  lb_policy_args.args = chand->resolver_result;
327
+ lb_policy_args.lb_config = lb_config;
391
328
  grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy =
392
329
  grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
393
330
  lb_policy_name, lb_policy_args);
394
331
  if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
395
332
  gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
333
+ if (chand->channelz_channel != nullptr) {
334
+ char* str;
335
+ gpr_asprintf(&str, "Could not create LB policy \'%s\'", lb_policy_name);
336
+ trace_strings->push_back(str);
337
+ }
396
338
  } else {
397
339
  if (grpc_client_channel_trace.enabled()) {
398
340
  gpr_log(GPR_INFO, "chand=%p: created new LB policy \"%s\" (%p)", chand,
399
341
  lb_policy_name, new_lb_policy.get());
400
342
  }
343
+ if (chand->channelz_channel != nullptr) {
344
+ char* str;
345
+ gpr_asprintf(&str, "Created new LB policy \'%s\'", lb_policy_name);
346
+ trace_strings->push_back(str);
347
+ }
401
348
  // Swap out the LB policy and update the fds in
402
349
  // chand->interested_parties.
403
350
  if (chand->lb_policy != nullptr) {
@@ -434,42 +381,49 @@ static void create_new_lb_policy_locked(
434
381
  }
435
382
  }
436
383
 
437
- // Returns the service config (as a JSON string) from the resolver result.
438
- // Also updates state in chand.
439
- static grpc_core::UniquePtr<char>
440
- get_service_config_from_resolver_result_locked(channel_data* chand) {
384
+ static void maybe_add_trace_message_for_address_changes_locked(
385
+ channel_data* chand, TraceStringVector* trace_strings) {
386
+ int resolution_contains_addresses = false;
441
387
  const grpc_arg* channel_arg =
442
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
443
- const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
444
- if (service_config_json != nullptr) {
445
- if (grpc_client_channel_trace.enabled()) {
446
- gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
447
- chand, service_config_json);
448
- }
449
- grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
450
- grpc_core::ServiceConfig::Create(service_config_json);
451
- if (service_config != nullptr) {
452
- if (chand->enable_retries) {
453
- channel_arg =
454
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
455
- const char* server_uri = grpc_channel_arg_get_string(channel_arg);
456
- GPR_ASSERT(server_uri != nullptr);
457
- grpc_uri* uri = grpc_uri_parse(server_uri, true);
458
- GPR_ASSERT(uri->path[0] != '\0');
459
- service_config_parsing_state parsing_state;
460
- parsing_state.server_name =
461
- uri->path[0] == '/' ? uri->path + 1 : uri->path;
462
- service_config->ParseGlobalParams(parse_retry_throttle_params,
463
- &parsing_state);
464
- grpc_uri_destroy(uri);
465
- chand->retry_throttle_data =
466
- std::move(parsing_state.retry_throttle_data);
467
- }
468
- chand->method_params_table = service_config->CreateMethodConfigTable(
469
- ClientChannelMethodParams::CreateFromJson);
388
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
389
+ if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
390
+ grpc_lb_addresses* addresses =
391
+ static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
392
+ if (addresses->num_addresses > 0) {
393
+ resolution_contains_addresses = true;
394
+ }
395
+ }
396
+ if (!resolution_contains_addresses &&
397
+ chand->previous_resolution_contained_addresses) {
398
+ trace_strings->push_back(gpr_strdup("Address list became empty"));
399
+ } else if (resolution_contains_addresses &&
400
+ !chand->previous_resolution_contained_addresses) {
401
+ trace_strings->push_back(gpr_strdup("Address list became non-empty"));
402
+ }
403
+ chand->previous_resolution_contained_addresses =
404
+ resolution_contains_addresses;
405
+ }
406
+
407
+ static void concatenate_and_add_channel_trace_locked(
408
+ channel_data* chand, TraceStringVector* trace_strings) {
409
+ if (!trace_strings->empty()) {
410
+ gpr_strvec v;
411
+ gpr_strvec_init(&v);
412
+ gpr_strvec_add(&v, gpr_strdup("Resolution event: "));
413
+ bool is_first = 1;
414
+ for (size_t i = 0; i < trace_strings->size(); ++i) {
415
+ if (!is_first) gpr_strvec_add(&v, gpr_strdup(", "));
416
+ is_first = false;
417
+ gpr_strvec_add(&v, (*trace_strings)[i]);
470
418
  }
419
+ char* flat;
420
+ size_t flat_len = 0;
421
+ flat = gpr_strvec_flatten(&v, &flat_len);
422
+ chand->channelz_channel->AddTraceEvent(
423
+ grpc_core::channelz::ChannelTrace::Severity::Info,
424
+ grpc_slice_new(flat, flat_len, gpr_free));
425
+ gpr_strvec_destroy(&v);
471
426
  }
472
- return grpc_core::UniquePtr<char>(gpr_strdup(service_config_json));
473
427
  }
474
428
 
475
429
  // Callback invoked when a resolver result is available.
@@ -493,6 +447,16 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
493
447
  }
494
448
  // Data used to set the channel's connectivity state.
495
449
  bool set_connectivity_state = true;
450
+ // We only want to trace the address resolution in the follow cases:
451
+ // (a) Address resolution resulted in service config change.
452
+ // (b) Address resolution that causes number of backends to go from
453
+ // zero to non-zero.
454
+ // (c) Address resolution that causes number of backends to go from
455
+ // non-zero to zero.
456
+ // (d) Address resolution that causes a new LB policy to be created.
457
+ //
458
+ // we track a list of strings to eventually be concatenated and traced.
459
+ TraceStringVector trace_strings;
496
460
  grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
497
461
  grpc_error* connectivity_error =
498
462
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
@@ -503,9 +467,23 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
503
467
  if (grpc_client_channel_trace.enabled()) {
504
468
  gpr_log(GPR_INFO, "chand=%p: resolver transient failure", chand);
505
469
  }
470
+ // Don't override connectivity state if we already have an LB policy.
471
+ if (chand->lb_policy != nullptr) set_connectivity_state = false;
506
472
  } else {
473
+ // Parse the resolver result.
474
+ ProcessedResolverResult resolver_result(chand->resolver_result,
475
+ chand->enable_retries);
476
+ chand->retry_throttle_data = resolver_result.retry_throttle_data();
477
+ chand->method_params_table = resolver_result.method_params_table();
478
+ grpc_core::UniquePtr<char> service_config_json =
479
+ resolver_result.service_config_json();
480
+ if (service_config_json != nullptr && grpc_client_channel_trace.enabled()) {
481
+ gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
482
+ chand, service_config_json.get());
483
+ }
507
484
  grpc_core::UniquePtr<char> lb_policy_name =
508
- get_lb_policy_name_from_resolver_result_locked(chand);
485
+ resolver_result.lb_policy_name();
486
+ grpc_json* lb_policy_config = resolver_result.lb_policy_config();
509
487
  // Check to see if we're already using the right LB policy.
510
488
  // Note: It's safe to use chand->info_lb_policy_name here without
511
489
  // taking a lock on chand->info_mu, because this function is the
@@ -520,18 +498,33 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
520
498
  gpr_log(GPR_INFO, "chand=%p: updating existing LB policy \"%s\" (%p)",
521
499
  chand, lb_policy_name.get(), chand->lb_policy.get());
522
500
  }
523
- chand->lb_policy->UpdateLocked(*chand->resolver_result);
501
+ chand->lb_policy->UpdateLocked(*chand->resolver_result, lb_policy_config);
524
502
  // No need to set the channel's connectivity state; the existing
525
503
  // watch on the LB policy will take care of that.
526
504
  set_connectivity_state = false;
527
505
  } else {
528
506
  // Instantiate new LB policy.
529
- create_new_lb_policy_locked(chand, lb_policy_name.get(),
530
- &connectivity_state, &connectivity_error);
507
+ create_new_lb_policy_locked(chand, lb_policy_name.get(), lb_policy_config,
508
+ &connectivity_state, &connectivity_error,
509
+ &trace_strings);
510
+ }
511
+ // Note: It's safe to use chand->info_service_config_json here without
512
+ // taking a lock on chand->info_mu, because this function is the
513
+ // only thing that modifies its value, and it can only be invoked
514
+ // once at any given time.
515
+ if (chand->channelz_channel != nullptr) {
516
+ if (((service_config_json == nullptr) !=
517
+ (chand->info_service_config_json == nullptr)) ||
518
+ (service_config_json != nullptr &&
519
+ strcmp(service_config_json.get(),
520
+ chand->info_service_config_json.get()) != 0)) {
521
+ // TODO(ncteisen): might be worth somehow including a snippet of the
522
+ // config in the trace, at the risk of bloating the trace logs.
523
+ trace_strings.push_back(gpr_strdup("Service config changed"));
524
+ }
525
+ maybe_add_trace_message_for_address_changes_locked(chand, &trace_strings);
526
+ concatenate_and_add_channel_trace_locked(chand, &trace_strings);
531
527
  }
532
- // Find service config.
533
- grpc_core::UniquePtr<char> service_config_json =
534
- get_service_config_from_resolver_result_locked(chand);
535
528
  // Swap out the data used by cc_get_channel_info().
536
529
  gpr_mu_lock(&chand->info_mu);
537
530
  chand->info_lb_policy_name = std::move(lb_policy_name);
@@ -699,6 +692,8 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
699
692
  // Record enable_retries.
700
693
  arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
701
694
  chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
695
+ chand->channelz_channel = nullptr;
696
+ chand->previous_resolution_contained_addresses = false;
702
697
  // Record client channel factory.
703
698
  arg = grpc_channel_args_find(args->channel_args,
704
699
  GRPC_ARG_CLIENT_CHANNEL_FACTORY);
@@ -824,12 +819,26 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
824
819
  // (census filter is on top of this one)
825
820
  // - add census stats for retries
826
821
 
822
+ namespace {
823
+ struct call_data;
824
+
827
825
  // State used for starting a retryable batch on a subchannel call.
828
826
  // This provides its own grpc_transport_stream_op_batch and other data
829
827
  // structures needed to populate the ops in the batch.
830
828
  // We allocate one struct on the arena for each attempt at starting a
831
829
  // batch on a given subchannel call.
832
- typedef struct {
830
+ struct subchannel_batch_data {
831
+ subchannel_batch_data(grpc_call_element* elem, call_data* calld, int refcount,
832
+ bool set_on_complete);
833
+ // All dtor code must be added in `destroy`. This is because we may
834
+ // call closures in `subchannel_batch_data` after they are unrefed by
835
+ // `batch_data_unref`, and msan would complain about accessing this class
836
+ // after calling dtor. As a result we cannot call the `dtor` in
837
+ // `batch_data_unref`.
838
+ // TODO(soheil): We should try to call the dtor in `batch_data_unref`.
839
+ ~subchannel_batch_data() { destroy(); }
840
+ void destroy();
841
+
833
842
  gpr_refcount refs;
834
843
  grpc_call_element* elem;
835
844
  grpc_subchannel_call* subchannel_call; // Holds a ref.
@@ -838,11 +847,23 @@ typedef struct {
838
847
  grpc_transport_stream_op_batch batch;
839
848
  // For intercepting on_complete.
840
849
  grpc_closure on_complete;
841
- } subchannel_batch_data;
850
+ };
842
851
 
843
852
  // Retry state associated with a subchannel call.
844
853
  // Stored in the parent_data of the subchannel call object.
845
- typedef struct {
854
+ struct subchannel_call_retry_state {
855
+ explicit subchannel_call_retry_state(grpc_call_context_element* context)
856
+ : batch_payload(context),
857
+ started_send_initial_metadata(false),
858
+ completed_send_initial_metadata(false),
859
+ started_send_trailing_metadata(false),
860
+ completed_send_trailing_metadata(false),
861
+ started_recv_initial_metadata(false),
862
+ completed_recv_initial_metadata(false),
863
+ started_recv_trailing_metadata(false),
864
+ completed_recv_trailing_metadata(false),
865
+ retry_dispatched(false) {}
866
+
846
867
  // subchannel_batch_data.batch.payload points to this.
847
868
  grpc_transport_stream_op_batch_payload batch_payload;
848
869
  // For send_initial_metadata.
@@ -861,7 +882,7 @@ typedef struct {
861
882
  // For intercepting recv_initial_metadata.
862
883
  grpc_metadata_batch recv_initial_metadata;
863
884
  grpc_closure recv_initial_metadata_ready;
864
- bool trailing_metadata_available;
885
+ bool trailing_metadata_available = false;
865
886
  // For intercepting recv_message.
866
887
  grpc_closure recv_message_ready;
867
888
  grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_message;
@@ -871,10 +892,10 @@ typedef struct {
871
892
  grpc_closure recv_trailing_metadata_ready;
872
893
  // These fields indicate which ops have been started and completed on
873
894
  // this subchannel call.
874
- size_t started_send_message_count;
875
- size_t completed_send_message_count;
876
- size_t started_recv_message_count;
877
- size_t completed_recv_message_count;
895
+ size_t started_send_message_count = 0;
896
+ size_t completed_send_message_count = 0;
897
+ size_t started_recv_message_count = 0;
898
+ size_t completed_recv_message_count = 0;
878
899
  bool started_send_initial_metadata : 1;
879
900
  bool completed_send_initial_metadata : 1;
880
901
  bool started_send_trailing_metadata : 1;
@@ -883,14 +904,18 @@ typedef struct {
883
904
  bool completed_recv_initial_metadata : 1;
884
905
  bool started_recv_trailing_metadata : 1;
885
906
  bool completed_recv_trailing_metadata : 1;
907
+ subchannel_batch_data* recv_initial_metadata_ready_deferred_batch = nullptr;
908
+ grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
909
+ subchannel_batch_data* recv_message_ready_deferred_batch = nullptr;
910
+ grpc_error* recv_message_error = GRPC_ERROR_NONE;
911
+ subchannel_batch_data* recv_trailing_metadata_internal_batch = nullptr;
886
912
  // State for callback processing.
913
+ // NOTE: Do not move this next to the metadata bitfields above. That would
914
+ // save space but will also result in a data race because compiler will
915
+ // generate a 2 byte store which overwrites the meta-data fields upon
916
+ // setting this field.
887
917
  bool retry_dispatched : 1;
888
- subchannel_batch_data* recv_initial_metadata_ready_deferred_batch;
889
- grpc_error* recv_initial_metadata_error;
890
- subchannel_batch_data* recv_message_ready_deferred_batch;
891
- grpc_error* recv_message_error;
892
- subchannel_batch_data* recv_trailing_metadata_internal_batch;
893
- } subchannel_call_retry_state;
918
+ };
894
919
 
895
920
  // Pending batches stored in call data.
896
921
  typedef struct {
@@ -905,7 +930,44 @@ typedef struct {
905
930
  Handles queueing of stream ops until a call object is ready, waiting
906
931
  for initial metadata before trying to create a call object,
907
932
  and handling cancellation gracefully. */
908
- typedef struct client_channel_call_data {
933
+ struct call_data {
934
+ call_data(grpc_call_element* elem, const channel_data& chand,
935
+ const grpc_call_element_args& args)
936
+ : deadline_state(elem, args.call_stack, args.call_combiner,
937
+ GPR_LIKELY(chand.deadline_checking_enabled)
938
+ ? args.deadline
939
+ : GRPC_MILLIS_INF_FUTURE),
940
+ path(grpc_slice_ref_internal(args.path)),
941
+ call_start_time(args.start_time),
942
+ deadline(args.deadline),
943
+ arena(args.arena),
944
+ owning_call(args.call_stack),
945
+ call_combiner(args.call_combiner),
946
+ pending_send_initial_metadata(false),
947
+ pending_send_message(false),
948
+ pending_send_trailing_metadata(false),
949
+ enable_retries(chand.enable_retries),
950
+ retry_committed(false),
951
+ last_attempt_got_server_pushback(false) {}
952
+
953
+ ~call_data() {
954
+ if (GPR_LIKELY(subchannel_call != nullptr)) {
955
+ GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call,
956
+ "client_channel_destroy_call");
957
+ }
958
+ grpc_slice_unref_internal(path);
959
+ GRPC_ERROR_UNREF(cancel_error);
960
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
961
+ GPR_ASSERT(pending_batches[i].batch == nullptr);
962
+ }
963
+ for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
964
+ if (pick.subchannel_call_context[i].value != nullptr) {
965
+ pick.subchannel_call_context[i].destroy(
966
+ pick.subchannel_call_context[i].value);
967
+ }
968
+ }
969
+ }
970
+
909
971
  // State for handling deadlines.
910
972
  // The code in deadline_filter.c requires this to be the first field.
911
973
  // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
@@ -924,29 +986,24 @@ typedef struct client_channel_call_data {
924
986
  grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
925
987
  grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
926
988
 
927
- grpc_subchannel_call* subchannel_call;
989
+ grpc_subchannel_call* subchannel_call = nullptr;
928
990
 
929
991
  // Set when we get a cancel_stream op.
930
- grpc_error* cancel_error;
992
+ grpc_error* cancel_error = GRPC_ERROR_NONE;
931
993
 
932
994
  grpc_core::LoadBalancingPolicy::PickState pick;
933
995
  grpc_closure pick_closure;
934
996
  grpc_closure pick_cancel_closure;
935
997
 
936
- // state needed to support channelz interception of recv trailing metadata.
937
- grpc_closure recv_trailing_metadata_ready_channelz;
938
- grpc_closure* original_recv_trailing_metadata;
939
- grpc_metadata_batch* recv_trailing_metadata;
940
-
941
- grpc_polling_entity* pollent;
942
- bool pollent_added_to_interested_parties;
998
+ grpc_polling_entity* pollent = nullptr;
999
+ bool pollent_added_to_interested_parties = false;
943
1000
 
944
1001
  // Batches are added to this list when received from above.
945
1002
  // They are removed when we are done handling the batch (i.e., when
946
1003
  // either we have invoked all of the batch's callbacks or we have
947
1004
  // passed the batch down to the subchannel call and are not
948
1005
  // intercepting any of its callbacks).
949
- pending_batch pending_batches[MAX_PENDING_BATCHES];
1006
+ pending_batch pending_batches[MAX_PENDING_BATCHES] = {};
950
1007
  bool pending_send_initial_metadata : 1;
951
1008
  bool pending_send_message : 1;
952
1009
  bool pending_send_trailing_metadata : 1;
@@ -955,8 +1012,8 @@ typedef struct client_channel_call_data {
955
1012
  bool enable_retries : 1;
956
1013
  bool retry_committed : 1;
957
1014
  bool last_attempt_got_server_pushback : 1;
958
- int num_attempts_completed;
959
- size_t bytes_buffered_for_retry;
1015
+ int num_attempts_completed = 0;
1016
+ size_t bytes_buffered_for_retry = 0;
960
1017
  grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
961
1018
  grpc_timer retry_timer;
962
1019
 
@@ -967,12 +1024,12 @@ typedef struct client_channel_call_data {
967
1024
  // until all of these batches have completed.
968
1025
  // Note that we actually only need to track replay batches, but it's
969
1026
  // easier to track all batches with send ops.
970
- int num_pending_retriable_subchannel_send_batches;
1027
+ int num_pending_retriable_subchannel_send_batches = 0;
971
1028
 
972
1029
  // Cached data for retrying send ops.
973
1030
  // send_initial_metadata
974
- bool seen_send_initial_metadata;
975
- grpc_linked_mdelem* send_initial_metadata_storage;
1031
+ bool seen_send_initial_metadata = false;
1032
+ grpc_linked_mdelem* send_initial_metadata_storage = nullptr;
976
1033
  grpc_metadata_batch send_initial_metadata;
977
1034
  uint32_t send_initial_metadata_flags;
978
1035
  gpr_atm* peer_string;
@@ -983,14 +1040,13 @@ typedef struct client_channel_call_data {
983
1040
  // Note: We inline the cache for the first 3 send_message ops and use
984
1041
  // dynamic allocation after that. This number was essentially picked
985
1042
  // at random; it could be changed in the future to tune performance.
986
- grpc_core::ManualConstructor<
987
- grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3>>
988
- send_messages;
1043
+ grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3> send_messages;
989
1044
  // send_trailing_metadata
990
- bool seen_send_trailing_metadata;
991
- grpc_linked_mdelem* send_trailing_metadata_storage;
1045
+ bool seen_send_trailing_metadata = false;
1046
+ grpc_linked_mdelem* send_trailing_metadata_storage = nullptr;
992
1047
  grpc_metadata_batch send_trailing_metadata;
993
- } call_data;
1048
+ };
1049
+ } // namespace
994
1050
 
995
1051
  // Forward declarations.
996
1052
  static void retry_commit(grpc_call_element* elem,
@@ -999,8 +1055,6 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
999
1055
  static void on_complete(void* arg, grpc_error* error);
1000
1056
  static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
1001
1057
  static void start_pick_locked(void* arg, grpc_error* ignored);
1002
- static void maybe_intercept_recv_trailing_metadata_for_channelz(
1003
- grpc_call_element* elem, grpc_transport_stream_op_batch* batch);
1004
1058
 
1005
1059
  //
1006
1060
  // send op data caching
@@ -1036,7 +1090,7 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
1036
1090
  gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
1037
1091
  new (cache) grpc_core::ByteStreamCache(
1038
1092
  std::move(batch->payload->send_message.send_message));
1039
- calld->send_messages->push_back(cache);
1093
+ calld->send_messages.push_back(cache);
1040
1094
  }
1041
1095
  // Save metadata batch for send_trailing_metadata ops.
1042
1096
  if (batch->send_trailing_metadata) {
@@ -1073,7 +1127,7 @@ static void free_cached_send_message(channel_data* chand, call_data* calld,
1073
1127
  "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
1074
1128
  chand, calld, idx);
1075
1129
  }
1076
- (*calld->send_messages)[idx]->Destroy();
1130
+ calld->send_messages[idx]->Destroy();
1077
1131
  }
1078
1132
 
1079
1133
  // Frees cached send_trailing_metadata.
@@ -1299,7 +1353,6 @@ static void pending_batches_resume(grpc_call_element* elem) {
1299
1353
  pending_batch* pending = &calld->pending_batches[i];
1300
1354
  grpc_transport_stream_op_batch* batch = pending->batch;
1301
1355
  if (batch != nullptr) {
1302
- maybe_intercept_recv_trailing_metadata_for_channelz(elem, batch);
1303
1356
  batch->handler_private.extra_arg = calld->subchannel_call;
1304
1357
  GRPC_CLOSURE_INIT(&batch->handler_private.closure,
1305
1358
  resume_pending_batch_in_call_combiner, batch,
@@ -1544,55 +1597,66 @@ static bool maybe_retry(grpc_call_element* elem,
1544
1597
  // subchannel_batch_data
1545
1598
  //
1546
1599
 
1547
- // Creates a subchannel_batch_data object on the call's arena with the
1548
- // specified refcount. If set_on_complete is true, the batch's
1549
- // on_complete callback will be set to point to on_complete();
1550
- // otherwise, the batch's on_complete callback will be null.
1551
- static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
1552
- int refcount,
1553
- bool set_on_complete) {
1554
- call_data* calld = static_cast<call_data*>(elem->call_data);
1600
+ namespace {
1601
+ subchannel_batch_data::subchannel_batch_data(grpc_call_element* elem,
1602
+ call_data* calld, int refcount,
1603
+ bool set_on_complete)
1604
+ : elem(elem),
1605
+ subchannel_call(GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call,
1606
+ "batch_data_create")) {
1555
1607
  subchannel_call_retry_state* retry_state =
1556
1608
  static_cast<subchannel_call_retry_state*>(
1557
1609
  grpc_connected_subchannel_call_get_parent_data(
1558
1610
  calld->subchannel_call));
1559
- subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
1560
- gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
1561
- batch_data->elem = elem;
1562
- batch_data->subchannel_call =
1563
- GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
1564
- batch_data->batch.payload = &retry_state->batch_payload;
1565
- gpr_ref_init(&batch_data->refs, refcount);
1611
+ batch.payload = &retry_state->batch_payload;
1612
+ gpr_ref_init(&refs, refcount);
1566
1613
  if (set_on_complete) {
1567
- GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
1614
+ GRPC_CLOSURE_INIT(&on_complete, ::on_complete, this,
1568
1615
  grpc_schedule_on_exec_ctx);
1569
- batch_data->batch.on_complete = &batch_data->on_complete;
1616
+ batch.on_complete = &on_complete;
1570
1617
  }
1571
1618
  GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
1619
+ }
1620
+
1621
+ void subchannel_batch_data::destroy() {
1622
+ subchannel_call_retry_state* retry_state =
1623
+ static_cast<subchannel_call_retry_state*>(
1624
+ grpc_connected_subchannel_call_get_parent_data(subchannel_call));
1625
+ if (batch.send_initial_metadata) {
1626
+ grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
1627
+ }
1628
+ if (batch.send_trailing_metadata) {
1629
+ grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
1630
+ }
1631
+ if (batch.recv_initial_metadata) {
1632
+ grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
1633
+ }
1634
+ if (batch.recv_trailing_metadata) {
1635
+ grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
1636
+ }
1637
+ GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call, "batch_data_unref");
1638
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1639
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
1640
+ }
1641
+ } // namespace
1642
+
1643
+ // Creates a subchannel_batch_data object on the call's arena with the
1644
+ // specified refcount. If set_on_complete is true, the batch's
1645
+ // on_complete callback will be set to point to on_complete();
1646
+ // otherwise, the batch's on_complete callback will be null.
1647
+ static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
1648
+ int refcount,
1649
+ bool set_on_complete) {
1650
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1651
+ subchannel_batch_data* batch_data =
1652
+ new (gpr_arena_alloc(calld->arena, sizeof(*batch_data)))
1653
+ subchannel_batch_data(elem, calld, refcount, set_on_complete);
1572
1654
  return batch_data;
1573
1655
  }
1574
1656
 
1575
1657
  static void batch_data_unref(subchannel_batch_data* batch_data) {
1576
1658
  if (gpr_unref(&batch_data->refs)) {
1577
- subchannel_call_retry_state* retry_state =
1578
- static_cast<subchannel_call_retry_state*>(
1579
- grpc_connected_subchannel_call_get_parent_data(
1580
- batch_data->subchannel_call));
1581
- if (batch_data->batch.send_initial_metadata) {
1582
- grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
1583
- }
1584
- if (batch_data->batch.send_trailing_metadata) {
1585
- grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
1586
- }
1587
- if (batch_data->batch.recv_initial_metadata) {
1588
- grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
1589
- }
1590
- if (batch_data->batch.recv_trailing_metadata) {
1591
- grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
1592
- }
1593
- GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
1594
- call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
1595
- GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
1659
+ batch_data->destroy();
1596
1660
  }
1597
1661
  }
1598
1662
 
@@ -1890,7 +1954,7 @@ static bool pending_batch_is_unstarted(
1890
1954
  return true;
1891
1955
  }
1892
1956
  if (pending->batch->send_message &&
1893
- retry_state->started_send_message_count < calld->send_messages->size()) {
1957
+ retry_state->started_send_message_count < calld->send_messages.size()) {
1894
1958
  return true;
1895
1959
  }
1896
1960
  if (pending->batch->send_trailing_metadata &&
@@ -1977,15 +2041,6 @@ static void recv_trailing_metadata_ready(void* arg, grpc_error* error) {
1977
2041
  batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata;
1978
2042
  get_call_status(elem, md_batch, GRPC_ERROR_REF(error), &status,
1979
2043
  &server_pushback_md);
1980
- grpc_core::channelz::SubchannelNode* channelz_subchannel =
1981
- calld->pick.connected_subchannel->channelz_subchannel();
1982
- if (channelz_subchannel != nullptr) {
1983
- if (status == GRPC_STATUS_OK) {
1984
- channelz_subchannel->RecordCallSucceeded();
1985
- } else {
1986
- channelz_subchannel->RecordCallFailed();
1987
- }
1988
- }
1989
2044
  if (grpc_client_channel_trace.enabled()) {
1990
2045
  gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
1991
2046
  calld, grpc_status_code_to_string(status));
@@ -2055,7 +2110,7 @@ static void add_closures_for_replay_or_pending_send_ops(
2055
2110
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2056
2111
  call_data* calld = static_cast<call_data*>(elem->call_data);
2057
2112
  bool have_pending_send_message_ops =
2058
- retry_state->started_send_message_count < calld->send_messages->size();
2113
+ retry_state->started_send_message_count < calld->send_messages.size();
2059
2114
  bool have_pending_send_trailing_metadata_op =
2060
2115
  calld->seen_send_trailing_metadata &&
2061
2116
  !retry_state->started_send_trailing_metadata;
@@ -2211,9 +2266,9 @@ static void add_retriable_send_initial_metadata_op(
2211
2266
  .grpc_previous_rpc_attempts);
2212
2267
  }
2213
2268
  if (GPR_UNLIKELY(calld->num_attempts_completed > 0)) {
2214
- grpc_mdelem retry_md = grpc_mdelem_from_slices(
2269
+ grpc_mdelem retry_md = grpc_mdelem_create(
2215
2270
  GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
2216
- *retry_count_strings[calld->num_attempts_completed - 1]);
2271
+ *retry_count_strings[calld->num_attempts_completed - 1], nullptr);
2217
2272
  grpc_error* error = grpc_metadata_batch_add_tail(
2218
2273
  &retry_state->send_initial_metadata,
2219
2274
  &retry_state->send_initial_metadata_storage[calld->send_initial_metadata
@@ -2247,7 +2302,7 @@ static void add_retriable_send_message_op(
2247
2302
  chand, calld, retry_state->started_send_message_count);
2248
2303
  }
2249
2304
  grpc_core::ByteStreamCache* cache =
2250
- (*calld->send_messages)[retry_state->started_send_message_count];
2305
+ calld->send_messages[retry_state->started_send_message_count];
2251
2306
  ++retry_state->started_send_message_count;
2252
2307
  retry_state->send_message.Init(cache);
2253
2308
  batch_data->batch.send_message = true;
@@ -2379,7 +2434,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2379
2434
  }
2380
2435
  // send_message.
2381
2436
  // Note that we can only have one send_message op in flight at a time.
2382
- if (retry_state->started_send_message_count < calld->send_messages->size() &&
2437
+ if (retry_state->started_send_message_count < calld->send_messages.size() &&
2383
2438
  retry_state->started_send_message_count ==
2384
2439
  retry_state->completed_send_message_count &&
2385
2440
  !calld->pending_send_message) {
@@ -2400,7 +2455,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2400
2455
  // to start, since we can't send down any more send_message ops after
2401
2456
  // send_trailing_metadata.
2402
2457
  if (calld->seen_send_trailing_metadata &&
2403
- retry_state->started_send_message_count == calld->send_messages->size() &&
2458
+ retry_state->started_send_message_count == calld->send_messages.size() &&
2404
2459
  !retry_state->started_send_trailing_metadata &&
2405
2460
  !calld->pending_send_trailing_metadata) {
2406
2461
  if (grpc_client_channel_trace.enabled()) {
@@ -2452,7 +2507,7 @@ static void add_subchannel_batches_for_pending_batches(
2452
2507
  // send_message ops after send_trailing_metadata.
2453
2508
  if (batch->send_trailing_metadata &&
2454
2509
  (retry_state->started_send_message_count + batch->send_message <
2455
- calld->send_messages->size() ||
2510
+ calld->send_messages.size() ||
2456
2511
  retry_state->started_send_trailing_metadata)) {
2457
2512
  continue;
2458
2513
  }
@@ -2589,69 +2644,6 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
2589
2644
  closures.RunClosures(calld->call_combiner);
2590
2645
  }
2591
2646
 
2592
- //
2593
- // Channelz
2594
- //
2595
-
2596
- static void recv_trailing_metadata_ready_channelz(void* arg,
2597
- grpc_error* error) {
2598
- grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2599
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2600
- call_data* calld = static_cast<call_data*>(elem->call_data);
2601
- if (grpc_client_channel_trace.enabled()) {
2602
- gpr_log(GPR_INFO,
2603
- "chand=%p calld=%p: got recv_trailing_metadata_ready_channelz, "
2604
- "error=%s",
2605
- chand, calld, grpc_error_string(error));
2606
- }
2607
- GPR_ASSERT(calld->recv_trailing_metadata != nullptr);
2608
- grpc_status_code status = GRPC_STATUS_OK;
2609
- grpc_metadata_batch* md_batch = calld->recv_trailing_metadata;
2610
- get_call_status(elem, md_batch, GRPC_ERROR_REF(error), &status, nullptr);
2611
- grpc_core::channelz::SubchannelNode* channelz_subchannel =
2612
- calld->pick.connected_subchannel->channelz_subchannel();
2613
- GPR_ASSERT(channelz_subchannel != nullptr);
2614
- if (status == GRPC_STATUS_OK) {
2615
- channelz_subchannel->RecordCallSucceeded();
2616
- } else {
2617
- channelz_subchannel->RecordCallFailed();
2618
- }
2619
- calld->recv_trailing_metadata = nullptr;
2620
- GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata, error);
2621
- }
2622
-
2623
- // If channelz is enabled, intercept recv_trailing so that we may check the
2624
- // status and associate it to a subchannel.
2625
- // Returns true if callback was intercepted, false otherwise.
2626
- static void maybe_intercept_recv_trailing_metadata_for_channelz(
2627
- grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
2628
- call_data* calld = static_cast<call_data*>(elem->call_data);
2629
- // only intercept payloads with recv trailing.
2630
- if (!batch->recv_trailing_metadata) {
2631
- return;
2632
- }
2633
- // only add interceptor is channelz is enabled.
2634
- if (calld->pick.connected_subchannel->channelz_subchannel() == nullptr) {
2635
- return;
2636
- }
2637
- if (grpc_client_channel_trace.enabled()) {
2638
- gpr_log(GPR_INFO,
2639
- "calld=%p batch=%p: intercepting recv trailing for channelz", calld,
2640
- batch);
2641
- }
2642
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready_channelz,
2643
- recv_trailing_metadata_ready_channelz, elem,
2644
- grpc_schedule_on_exec_ctx);
2645
- // save some state needed for the interception callback.
2646
- GPR_ASSERT(calld->recv_trailing_metadata == nullptr);
2647
- calld->recv_trailing_metadata =
2648
- batch->payload->recv_trailing_metadata.recv_trailing_metadata;
2649
- calld->original_recv_trailing_metadata =
2650
- batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
2651
- batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
2652
- &calld->recv_trailing_metadata_ready_channelz;
2653
- }
2654
-
2655
2647
  //
2656
2648
  // LB pick
2657
2649
  //
@@ -2681,17 +2673,10 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
2681
2673
  new_error = grpc_error_add_child(new_error, error);
2682
2674
  pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
2683
2675
  } else {
2684
- grpc_core::channelz::SubchannelNode* channelz_subchannel =
2685
- calld->pick.connected_subchannel->channelz_subchannel();
2686
- if (channelz_subchannel != nullptr) {
2687
- channelz_subchannel->RecordCallStarted();
2688
- }
2689
2676
  if (parent_data_size > 0) {
2690
- subchannel_call_retry_state* retry_state =
2691
- static_cast<subchannel_call_retry_state*>(
2692
- grpc_connected_subchannel_call_get_parent_data(
2693
- calld->subchannel_call));
2694
- retry_state->batch_payload.context = calld->pick.subchannel_call_context;
2677
+ new (grpc_connected_subchannel_call_get_parent_data(
2678
+ calld->subchannel_call))
2679
+ subchannel_call_retry_state(calld->pick.subchannel_call_context);
2695
2680
  }
2696
2681
  pending_batches_resume(elem);
2697
2682
  }
@@ -2917,6 +2902,27 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
2917
2902
  }
2918
2903
  }
2919
2904
 
2905
+ // If the channel is in TRANSIENT_FAILURE and the call is not
2906
+ // wait_for_ready=true, fails the call and returns true.
2907
+ static bool fail_call_if_in_transient_failure(grpc_call_element* elem) {
2908
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2909
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2910
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[0].batch;
2911
+ if (grpc_connectivity_state_check(&chand->state_tracker) ==
2912
+ GRPC_CHANNEL_TRANSIENT_FAILURE &&
2913
+ (batch->payload->send_initial_metadata.send_initial_metadata_flags &
2914
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
2915
+ pending_batches_fail(
2916
+ elem,
2917
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
2918
+ "channel is in state TRANSIENT_FAILURE"),
2919
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
2920
+ true /* yield_call_combiner */);
2921
+ return true;
2922
+ }
2923
+ return false;
2924
+ }
2925
+
2920
2926
  // Invoked once resolver results are available.
2921
2927
  static void process_service_config_and_start_lb_pick_locked(
2922
2928
  grpc_call_element* elem) {
@@ -2924,6 +2930,9 @@ static void process_service_config_and_start_lb_pick_locked(
2924
2930
  // Only get service config data on the first attempt.
2925
2931
  if (GPR_LIKELY(calld->num_attempts_completed == 0)) {
2926
2932
  apply_service_config_to_call_locked(elem);
2933
+ // Check this after applying service config, since it may have
2934
+ // affected the call's wait_for_ready value.
2935
+ if (fail_call_if_in_transient_failure(elem)) return;
2927
2936
  }
2928
2937
  // Start LB pick.
2929
2938
  grpc_core::LbPicker::StartLocked(elem);
@@ -3093,6 +3102,16 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
3093
3102
  // We do not yet have an LB policy, so wait for a resolver result.
3094
3103
  if (GPR_UNLIKELY(!chand->started_resolving)) {
3095
3104
  start_resolving_locked(chand);
3105
+ } else {
3106
+ // Normally, we want to do this check in
3107
+ // process_service_config_and_start_lb_pick_locked(), so that we
3108
+ // can honor the wait_for_ready setting in the service config.
3109
+ // However, if the channel is in TRANSIENT_FAILURE at this point, that
3110
+ // means that the resolver has returned a failure, so we're not going
3111
+ // to get a service config right away. In that case, we fail the
3112
+ // call now based on the wait_for_ready value passed in from the
3113
+ // application.
3114
+ if (fail_call_if_in_transient_failure(elem)) return;
3096
3115
  }
3097
3116
  // Create a new waiter, which will delete itself when done.
3098
3117
  grpc_core::New<grpc_core::ResolverResultWaiter>(elem);
@@ -3197,21 +3216,8 @@ static void cc_start_transport_stream_op_batch(
3197
3216
  /* Constructor for call_data */
3198
3217
  static grpc_error* cc_init_call_elem(grpc_call_element* elem,
3199
3218
  const grpc_call_element_args* args) {
3200
- call_data* calld = static_cast<call_data*>(elem->call_data);
3201
3219
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3202
- // Initialize data members.
3203
- calld->path = grpc_slice_ref_internal(args->path);
3204
- calld->call_start_time = args->start_time;
3205
- calld->deadline = args->deadline;
3206
- calld->arena = args->arena;
3207
- calld->owning_call = args->call_stack;
3208
- calld->call_combiner = args->call_combiner;
3209
- if (GPR_LIKELY(chand->deadline_checking_enabled)) {
3210
- grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
3211
- calld->deadline);
3212
- }
3213
- calld->enable_retries = chand->enable_retries;
3214
- calld->send_messages.Init();
3220
+ new (elem->call_data) call_data(elem, *chand, *args);
3215
3221
  return GRPC_ERROR_NONE;
3216
3222
  }
3217
3223
 
@@ -3220,34 +3226,12 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
3220
3226
  const grpc_call_final_info* final_info,
3221
3227
  grpc_closure* then_schedule_closure) {
3222
3228
  call_data* calld = static_cast<call_data*>(elem->call_data);
3223
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3224
- if (GPR_LIKELY(chand->deadline_checking_enabled)) {
3225
- grpc_deadline_state_destroy(elem);
3226
- }
3227
- grpc_slice_unref_internal(calld->path);
3228
- calld->retry_throttle_data.reset();
3229
- calld->method_params.reset();
3230
- GRPC_ERROR_UNREF(calld->cancel_error);
3231
3229
  if (GPR_LIKELY(calld->subchannel_call != nullptr)) {
3232
3230
  grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
3233
3231
  then_schedule_closure);
3234
3232
  then_schedule_closure = nullptr;
3235
- GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
3236
- "client_channel_destroy_call");
3237
3233
  }
3238
- for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
3239
- GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
3240
- }
3241
- if (GPR_LIKELY(calld->pick.connected_subchannel != nullptr)) {
3242
- calld->pick.connected_subchannel.reset();
3243
- }
3244
- for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
3245
- if (calld->pick.subchannel_call_context[i].value != nullptr) {
3246
- calld->pick.subchannel_call_context[i].destroy(
3247
- calld->pick.subchannel_call_context[i].value);
3248
- }
3249
- }
3250
- calld->send_messages.Destroy();
3234
+ calld->~call_data();
3251
3235
  GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
3252
3236
  }
3253
3237
 
@@ -3288,9 +3272,16 @@ static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
3288
3272
  GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
3289
3273
  }
3290
3274
 
3275
+ void grpc_client_channel_set_channelz_node(
3276
+ grpc_channel_element* elem, grpc_core::channelz::ClientChannelNode* node) {
3277
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3278
+ chand->channelz_channel = node;
3279
+ }
3280
+
3291
3281
  void grpc_client_channel_populate_child_refs(
3292
- grpc_channel_element* elem, grpc_core::ChildRefsList* child_subchannels,
3293
- grpc_core::ChildRefsList* child_channels) {
3282
+ grpc_channel_element* elem,
3283
+ grpc_core::channelz::ChildRefsList* child_subchannels,
3284
+ grpc_core::channelz::ChildRefsList* child_channels) {
3294
3285
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3295
3286
  if (chand->lb_policy != nullptr) {
3296
3287
  chand->lb_policy->FillChildRefsForChannelz(child_subchannels,