grpc 1.13.0 → 1.14.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (213) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +403 -153
  3. data/include/grpc/grpc.h +0 -8
  4. data/include/grpc/grpc_security.h +59 -2
  5. data/include/grpc/impl/codegen/grpc_types.h +8 -2
  6. data/include/grpc/impl/codegen/log.h +112 -0
  7. data/include/grpc/module.modulemap +2 -0
  8. data/include/grpc/support/log.h +2 -88
  9. data/include/grpc/support/string_util.h +2 -0
  10. data/src/boringssl/err_data.c +597 -593
  11. data/src/core/ext/filters/client_channel/client_channel.cc +715 -770
  12. data/src/core/ext/filters/client_channel/client_channel.h +5 -0
  13. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +111 -0
  14. data/src/core/ext/filters/client_channel/client_channel_channelz.h +69 -0
  15. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +9 -0
  16. data/src/core/ext/filters/client_channel/http_proxy.cc +22 -5
  17. data/src/core/ext/filters/client_channel/lb_policy.h +15 -0
  18. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +3 -0
  19. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +3 -3
  20. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +3 -1
  21. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.c +19 -0
  22. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.h +54 -0
  23. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.c +19 -0
  24. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.h +54 -0
  25. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +4 -17
  26. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +37 -63
  27. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +79 -0
  28. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +5 -2
  29. data/src/core/ext/filters/client_channel/lb_policy_factory.cc +8 -0
  30. data/src/core/ext/filters/client_channel/lb_policy_factory.h +4 -0
  31. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +2 -2
  32. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +317 -0
  33. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +48 -9
  34. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +40 -293
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +106 -84
  36. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +6 -2
  37. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +6 -5
  38. data/src/core/ext/filters/client_channel/subchannel.cc +36 -6
  39. data/src/core/ext/filters/client_channel/subchannel.h +4 -0
  40. data/src/core/ext/filters/deadline/deadline_filter.cc +18 -15
  41. data/src/core/ext/filters/deadline/deadline_filter.h +5 -5
  42. data/src/core/ext/filters/http/client/http_client_filter.cc +10 -9
  43. data/src/core/ext/filters/http/server/http_server_filter.h +1 -1
  44. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +1 -1
  45. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +3 -2
  46. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +33 -22
  47. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +1 -1
  48. data/src/core/ext/transport/chttp2/transport/internal.h +10 -3
  49. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +17 -0
  50. data/src/core/ext/transport/chttp2/transport/writing.cc +21 -16
  51. data/src/core/ext/transport/inproc/inproc_transport.cc +46 -6
  52. data/src/core/lib/channel/channel_stack.cc +22 -24
  53. data/src/core/lib/channel/channel_trace.cc +28 -63
  54. data/src/core/lib/channel/channel_trace.h +13 -17
  55. data/src/core/lib/channel/channelz.cc +143 -0
  56. data/src/core/lib/channel/channelz.h +124 -0
  57. data/src/core/lib/channel/channelz_registry.cc +7 -24
  58. data/src/core/lib/channel/channelz_registry.h +12 -8
  59. data/src/core/lib/channel/connected_channel.cc +8 -1
  60. data/src/core/{ext/filters/load_reporting/server_load_reporting_filter.h → lib/gpr/alloc.h} +7 -9
  61. data/src/core/lib/gpr/arena.cc +8 -8
  62. data/src/core/lib/gpr/string.cc +28 -0
  63. data/src/core/lib/gpr/string.h +10 -0
  64. data/src/core/lib/gprpp/abstract.h +5 -2
  65. data/src/core/lib/gprpp/inlined_vector.h +57 -3
  66. data/src/core/lib/gprpp/memory.h +2 -2
  67. data/src/core/lib/gprpp/ref_counted_ptr.h +5 -0
  68. data/src/core/lib/gprpp/thd_posix.cc +1 -1
  69. data/src/core/lib/iomgr/call_combiner.h +80 -0
  70. data/src/core/lib/iomgr/closure.h +3 -2
  71. data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
  72. data/src/core/lib/iomgr/error.cc +12 -0
  73. data/src/core/lib/iomgr/error.h +5 -0
  74. data/src/core/lib/iomgr/ev_epoll1_linux.cc +36 -9
  75. data/src/core/lib/iomgr/ev_epollex_linux.cc +172 -46
  76. data/src/core/lib/iomgr/ev_epollsig_linux.cc +47 -21
  77. data/src/core/lib/iomgr/ev_poll_posix.cc +10 -4
  78. data/src/core/lib/iomgr/ev_posix.cc +17 -9
  79. data/src/core/lib/iomgr/ev_posix.h +20 -4
  80. data/src/core/lib/iomgr/executor.cc +196 -140
  81. data/src/core/lib/iomgr/executor.h +47 -14
  82. data/src/core/lib/iomgr/iomgr.cc +2 -0
  83. data/src/core/lib/iomgr/iomgr.h +5 -0
  84. data/src/core/lib/iomgr/is_epollexclusive_available.cc +1 -0
  85. data/src/core/lib/iomgr/socket_utils.h +9 -0
  86. data/src/core/lib/iomgr/socket_utils_common_posix.cc +4 -0
  87. data/src/core/lib/iomgr/socket_utils_uv.cc +4 -0
  88. data/src/core/lib/iomgr/socket_utils_windows.cc +4 -0
  89. data/src/core/lib/iomgr/tcp_client_posix.cc +3 -5
  90. data/src/core/lib/iomgr/tcp_posix.cc +6 -1
  91. data/src/core/lib/iomgr/tcp_server_posix.cc +3 -3
  92. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +1 -1
  93. data/src/core/lib/iomgr/timer_manager.cc +0 -1
  94. data/src/core/lib/iomgr/udp_server.cc +2 -3
  95. data/src/core/lib/json/json.cc +10 -0
  96. data/src/core/lib/json/json.h +5 -0
  97. data/src/core/lib/security/context/security_context.cc +8 -8
  98. data/src/core/lib/security/context/security_context.h +6 -2
  99. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +2 -1
  100. data/src/core/lib/security/credentials/local/local_credentials.cc +77 -0
  101. data/src/core/lib/security/credentials/local/local_credentials.h +40 -0
  102. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +17 -3
  103. data/src/core/lib/security/security_connector/local_security_connector.cc +245 -0
  104. data/src/core/lib/security/security_connector/local_security_connector.h +58 -0
  105. data/src/core/lib/security/security_connector/security_connector.cc +30 -5
  106. data/src/core/lib/security/security_connector/security_connector.h +1 -0
  107. data/src/core/lib/security/transport/client_auth_filter.cc +5 -1
  108. data/src/core/lib/security/transport/server_auth_filter.cc +4 -5
  109. data/src/core/lib/surface/call.cc +75 -32
  110. data/src/core/lib/surface/call.h +2 -0
  111. data/src/core/lib/surface/channel.cc +32 -13
  112. data/src/core/lib/surface/channel.h +4 -0
  113. data/src/core/lib/surface/version.cc +1 -1
  114. data/src/core/lib/transport/transport.cc +20 -9
  115. data/src/core/lib/transport/transport.h +12 -10
  116. data/src/core/lib/transport/transport_op_string.cc +0 -7
  117. data/src/core/plugin_registry/grpc_plugin_registry.cc +0 -4
  118. data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h +2 -2
  119. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +2 -1
  120. data/src/core/tsi/alts/handshaker/altscontext.pb.c +0 -1
  121. data/src/core/tsi/alts/handshaker/altscontext.pb.h +1 -2
  122. data/src/core/tsi/alts/handshaker/handshaker.pb.c +0 -1
  123. data/src/core/tsi/alts/handshaker/handshaker.pb.h +1 -2
  124. data/src/core/tsi/alts/handshaker/transport_security_common.pb.c +0 -1
  125. data/src/core/tsi/alts/handshaker/transport_security_common.pb.h +1 -1
  126. data/src/core/tsi/alts/handshaker/transport_security_common_api.h +2 -2
  127. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +47 -1
  128. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h +3 -1
  129. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +12 -11
  130. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h +7 -2
  131. data/src/core/tsi/local_transport_security.cc +209 -0
  132. data/src/core/tsi/local_transport_security.h +51 -0
  133. data/src/core/tsi/ssl_transport_security.cc +2 -3
  134. data/src/{core/ext → cpp/ext/filters}/census/grpc_context.cc +0 -0
  135. data/src/ruby/ext/grpc/rb_channel_credentials.c +3 -3
  136. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +18 -18
  137. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +29 -29
  138. data/src/ruby/lib/grpc/generic/active_call.rb +19 -23
  139. data/src/ruby/lib/grpc/version.rb +1 -1
  140. data/src/ruby/spec/call_credentials_spec.rb +1 -1
  141. data/src/ruby/spec/call_spec.rb +1 -1
  142. data/src/ruby/spec/channel_credentials_spec.rb +1 -1
  143. data/src/ruby/spec/channel_spec.rb +1 -1
  144. data/src/ruby/spec/client_auth_spec.rb +1 -12
  145. data/src/ruby/spec/client_server_spec.rb +1 -1
  146. data/src/ruby/spec/compression_options_spec.rb +1 -1
  147. data/src/ruby/spec/error_sanity_spec.rb +1 -1
  148. data/src/ruby/spec/generic/client_stub_spec.rb +13 -1
  149. data/src/ruby/spec/generic/rpc_desc_spec.rb +1 -1
  150. data/src/ruby/spec/generic/rpc_server_pool_spec.rb +1 -1
  151. data/src/ruby/spec/generic/service_spec.rb +1 -1
  152. data/src/ruby/spec/google_rpc_status_utils_spec.rb +1 -12
  153. data/src/ruby/spec/pb/duplicate/codegen_spec.rb +1 -0
  154. data/src/ruby/spec/pb/health/checker_spec.rb +1 -1
  155. data/src/ruby/spec/server_credentials_spec.rb +1 -1
  156. data/src/ruby/spec/server_spec.rb +1 -1
  157. data/src/ruby/spec/spec_helper.rb +1 -0
  158. data/src/ruby/spec/support/services.rb +1 -1
  159. data/src/ruby/spec/time_consts_spec.rb +1 -1
  160. data/third_party/boringssl/crypto/asn1/tasn_dec.c +40 -19
  161. data/third_party/boringssl/crypto/bytestring/cbs.c +1 -0
  162. data/third_party/boringssl/crypto/cipher_extra/e_aesccm.c +47 -15
  163. data/third_party/boringssl/crypto/ec_extra/ec_asn1.c +9 -10
  164. data/third_party/boringssl/crypto/ecdh/ecdh.c +4 -3
  165. data/third_party/boringssl/crypto/fipsmodule/bn/add.c +30 -54
  166. data/third_party/boringssl/crypto/fipsmodule/bn/bn.c +7 -1
  167. data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +8 -8
  168. data/third_party/boringssl/crypto/fipsmodule/bn/div.c +97 -11
  169. data/third_party/boringssl/crypto/fipsmodule/bn/gcd.c +274 -218
  170. data/third_party/boringssl/crypto/fipsmodule/bn/internal.h +111 -34
  171. data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +2 -2
  172. data/third_party/boringssl/crypto/fipsmodule/bn/montgomery_inv.c +1 -1
  173. data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +24 -6
  174. data/third_party/boringssl/crypto/fipsmodule/bn/prime.c +324 -63
  175. data/third_party/boringssl/crypto/fipsmodule/bn/random.c +74 -21
  176. data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +128 -86
  177. data/third_party/boringssl/crypto/fipsmodule/bn/sqrt.c +1 -1
  178. data/third_party/boringssl/crypto/fipsmodule/ec/ec_key.c +67 -112
  179. data/third_party/boringssl/crypto/fipsmodule/ec/internal.h +8 -1
  180. data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +5 -5
  181. data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +9 -17
  182. data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64-table.h +5378 -5418
  183. data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +32 -32
  184. data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +5 -11
  185. data/third_party/boringssl/crypto/fipsmodule/rsa/blinding.c +16 -40
  186. data/third_party/boringssl/crypto/fipsmodule/rsa/internal.h +1 -6
  187. data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +41 -29
  188. data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +63 -49
  189. data/third_party/boringssl/crypto/x509/vpm_int.h +1 -0
  190. data/third_party/boringssl/crypto/x509/x509_vfy.c +4 -0
  191. data/third_party/boringssl/crypto/x509/x509_vpm.c +44 -22
  192. data/third_party/boringssl/include/openssl/aead.h +8 -2
  193. data/third_party/boringssl/include/openssl/asn1.h +1 -0
  194. data/third_party/boringssl/include/openssl/base.h +4 -0
  195. data/third_party/boringssl/include/openssl/bn.h +13 -3
  196. data/third_party/boringssl/include/openssl/bytestring.h +4 -4
  197. data/third_party/boringssl/include/openssl/ec.h +10 -4
  198. data/third_party/boringssl/include/openssl/ec_key.h +0 -3
  199. data/third_party/boringssl/include/openssl/rsa.h +1 -0
  200. data/third_party/boringssl/include/openssl/ssl.h +8 -3
  201. data/third_party/boringssl/include/openssl/ssl3.h +0 -1
  202. data/third_party/boringssl/include/openssl/x509.h +1 -0
  203. data/third_party/boringssl/include/openssl/x509v3.h +1 -0
  204. data/third_party/boringssl/ssl/handshake_client.cc +36 -64
  205. data/third_party/boringssl/ssl/ssl_cipher.cc +4 -0
  206. data/third_party/boringssl/ssl/ssl_lib.cc +1 -1
  207. metadata +45 -38
  208. data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +0 -222
  209. data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc +0 -71
  210. data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +0 -61
  211. data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +0 -51
  212. data/src/ruby/spec/pb/package_with_underscore/data.proto +0 -23
  213. data/src/ruby/spec/pb/package_with_underscore/service.proto +0 -23
@@ -126,9 +126,9 @@ typedef struct client_channel_channel_data {
126
126
  /* the following properties are guarded by a mutex since APIs require them
127
127
  to be instantaneously available */
128
128
  gpr_mu info_mu;
129
- char* info_lb_policy_name;
129
+ grpc_core::UniquePtr<char> info_lb_policy_name;
130
130
  /** service config in JSON form */
131
- char* info_service_config_json;
131
+ grpc_core::UniquePtr<char> info_service_config_json;
132
132
  } channel_data;
133
133
 
134
134
  typedef struct {
@@ -284,6 +284,78 @@ static void parse_retry_throttle_params(
284
284
  }
285
285
  }
286
286
 
287
+ // Invoked from the resolver NextLocked() callback when the resolver
288
+ // is shutting down.
289
+ static void on_resolver_shutdown_locked(channel_data* chand,
290
+ grpc_error* error) {
291
+ if (grpc_client_channel_trace.enabled()) {
292
+ gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
293
+ }
294
+ if (chand->lb_policy != nullptr) {
295
+ if (grpc_client_channel_trace.enabled()) {
296
+ gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", chand,
297
+ chand->lb_policy.get());
298
+ }
299
+ grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
300
+ chand->interested_parties);
301
+ chand->lb_policy.reset();
302
+ }
303
+ if (chand->resolver != nullptr) {
304
+ // This should never happen; it can only be triggered by a resolver
305
+ // implementation spotaneously deciding to report shutdown without
306
+ // being orphaned. This code is included just to be defensive.
307
+ if (grpc_client_channel_trace.enabled()) {
308
+ gpr_log(GPR_INFO, "chand=%p: spontaneous shutdown from resolver %p",
309
+ chand, chand->resolver.get());
310
+ }
311
+ chand->resolver.reset();
312
+ set_channel_connectivity_state_locked(
313
+ chand, GRPC_CHANNEL_SHUTDOWN,
314
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
315
+ "Resolver spontaneous shutdown", &error, 1),
316
+ "resolver_spontaneous_shutdown");
317
+ }
318
+ grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
319
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
320
+ "Channel disconnected", &error, 1));
321
+ GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
322
+ GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
323
+ grpc_channel_args_destroy(chand->resolver_result);
324
+ chand->resolver_result = nullptr;
325
+ GRPC_ERROR_UNREF(error);
326
+ }
327
+
328
+ // Returns the LB policy name from the resolver result.
329
+ static grpc_core::UniquePtr<char>
330
+ get_lb_policy_name_from_resolver_result_locked(channel_data* chand) {
331
+ // Find LB policy name in channel args.
332
+ const grpc_arg* channel_arg =
333
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
334
+ const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
335
+ // Special case: If at least one balancer address is present, we use
336
+ // the grpclb policy, regardless of what the resolver actually specified.
337
+ channel_arg =
338
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
339
+ if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
340
+ grpc_lb_addresses* addresses =
341
+ static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
342
+ if (grpc_lb_addresses_contains_balancer_address(*addresses)) {
343
+ if (lb_policy_name != nullptr &&
344
+ gpr_stricmp(lb_policy_name, "grpclb") != 0) {
345
+ gpr_log(GPR_INFO,
346
+ "resolver requested LB policy %s but provided at least one "
347
+ "balancer address -- forcing use of grpclb LB policy",
348
+ lb_policy_name);
349
+ }
350
+ lb_policy_name = "grpclb";
351
+ }
352
+ }
353
+ // Use pick_first if nothing was specified and we didn't select grpclb
354
+ // above.
355
+ if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
356
+ return grpc_core::UniquePtr<char>(gpr_strdup(lb_policy_name));
357
+ }
358
+
287
359
  static void request_reresolution_locked(void* arg, grpc_error* error) {
288
360
  reresolution_request_args* args =
289
361
  static_cast<reresolution_request_args*>(arg);
@@ -304,234 +376,183 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
304
376
  chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
305
377
  }
306
378
 
307
- // TODO(roth): The logic in this function is very hard to follow. We
308
- // should refactor this so that it's easier to understand, perhaps as
309
- // part of changing the resolver API to more clearly differentiate
310
- // between transient failures and shutdown.
311
- static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
312
- channel_data* chand = static_cast<channel_data*>(arg);
313
- if (grpc_client_channel_trace.enabled()) {
314
- gpr_log(GPR_INFO,
315
- "chand=%p: got resolver result: resolver_result=%p error=%s", chand,
316
- chand->resolver_result, grpc_error_string(error));
317
- }
318
- // Extract the following fields from the resolver result, if non-nullptr.
319
- bool lb_policy_updated = false;
320
- bool lb_policy_created = false;
321
- char* lb_policy_name_dup = nullptr;
322
- bool lb_policy_name_changed = false;
323
- grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy;
324
- char* service_config_json = nullptr;
325
- grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
326
- grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
327
- if (chand->resolver_result != nullptr) {
328
- if (chand->resolver != nullptr) {
329
- // Find LB policy name.
330
- const grpc_arg* channel_arg = grpc_channel_args_find(
331
- chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
332
- const char* lb_policy_name = grpc_channel_arg_get_string(channel_arg);
333
- // Special case: If at least one balancer address is present, we use
334
- // the grpclb policy, regardless of what the resolver actually specified.
335
- channel_arg =
336
- grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
337
- if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
338
- grpc_lb_addresses* addresses =
339
- static_cast<grpc_lb_addresses*>(channel_arg->value.pointer.p);
340
- bool found_balancer_address = false;
341
- for (size_t i = 0; i < addresses->num_addresses; ++i) {
342
- if (addresses->addresses[i].is_balancer) {
343
- found_balancer_address = true;
344
- break;
345
- }
346
- }
347
- if (found_balancer_address) {
348
- if (lb_policy_name != nullptr &&
349
- strcmp(lb_policy_name, "grpclb") != 0) {
350
- gpr_log(GPR_INFO,
351
- "resolver requested LB policy %s but provided at least one "
352
- "balancer address -- forcing use of grpclb LB policy",
353
- lb_policy_name);
354
- }
355
- lb_policy_name = "grpclb";
356
- }
357
- }
358
- // Use pick_first if nothing was specified and we didn't select grpclb
359
- // above.
360
- if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
361
- // Check to see if we're already using the right LB policy.
362
- // Note: It's safe to use chand->info_lb_policy_name here without
363
- // taking a lock on chand->info_mu, because this function is the
364
- // only thing that modifies its value, and it can only be invoked
365
- // once at any given time.
366
- lb_policy_name_changed =
367
- chand->info_lb_policy_name == nullptr ||
368
- gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0;
369
- if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
370
- // Continue using the same LB policy. Update with new addresses.
371
- lb_policy_updated = true;
372
- chand->lb_policy->UpdateLocked(*chand->resolver_result);
373
- } else {
374
- // Instantiate new LB policy.
375
- grpc_core::LoadBalancingPolicy::Args lb_policy_args;
376
- lb_policy_args.combiner = chand->combiner;
377
- lb_policy_args.client_channel_factory = chand->client_channel_factory;
378
- lb_policy_args.args = chand->resolver_result;
379
- new_lb_policy =
380
- grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
381
- lb_policy_name, lb_policy_args);
382
- if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
383
- gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
384
- lb_policy_name);
385
- } else {
386
- lb_policy_created = true;
387
- reresolution_request_args* args =
388
- static_cast<reresolution_request_args*>(
389
- gpr_zalloc(sizeof(*args)));
390
- args->chand = chand;
391
- args->lb_policy = new_lb_policy.get();
392
- GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
393
- grpc_combiner_scheduler(chand->combiner));
394
- GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
395
- new_lb_policy->SetReresolutionClosureLocked(&args->closure);
396
- }
397
- }
398
- // Before we clean up, save a copy of lb_policy_name, since it might
399
- // be pointing to data inside chand->resolver_result.
400
- // The copy will be saved in chand->lb_policy_name below.
401
- lb_policy_name_dup = gpr_strdup(lb_policy_name);
402
- // Find service config.
403
- channel_arg = grpc_channel_args_find(chand->resolver_result,
404
- GRPC_ARG_SERVICE_CONFIG);
405
- service_config_json =
406
- gpr_strdup(grpc_channel_arg_get_string(channel_arg));
407
- if (service_config_json != nullptr) {
408
- grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
409
- grpc_core::ServiceConfig::Create(service_config_json);
410
- if (service_config != nullptr) {
411
- if (chand->enable_retries) {
412
- channel_arg = grpc_channel_args_find(chand->resolver_result,
413
- GRPC_ARG_SERVER_URI);
414
- const char* server_uri = grpc_channel_arg_get_string(channel_arg);
415
- GPR_ASSERT(server_uri != nullptr);
416
- grpc_uri* uri = grpc_uri_parse(server_uri, true);
417
- GPR_ASSERT(uri->path[0] != '\0');
418
- service_config_parsing_state parsing_state;
419
- memset(&parsing_state, 0, sizeof(parsing_state));
420
- parsing_state.server_name =
421
- uri->path[0] == '/' ? uri->path + 1 : uri->path;
422
- service_config->ParseGlobalParams(parse_retry_throttle_params,
423
- &parsing_state);
424
- grpc_uri_destroy(uri);
425
- retry_throttle_data = std::move(parsing_state.retry_throttle_data);
426
- }
427
- method_params_table = service_config->CreateMethodConfigTable(
428
- ClientChannelMethodParams::CreateFromJson);
429
- }
430
- }
379
+ // Creates a new LB policy, replacing any previous one.
380
+ // If the new policy is created successfully, sets *connectivity_state and
381
+ // *connectivity_error to its initial connectivity state; otherwise,
382
+ // leaves them unchanged.
383
+ static void create_new_lb_policy_locked(
384
+ channel_data* chand, char* lb_policy_name,
385
+ grpc_connectivity_state* connectivity_state,
386
+ grpc_error** connectivity_error) {
387
+ grpc_core::LoadBalancingPolicy::Args lb_policy_args;
388
+ lb_policy_args.combiner = chand->combiner;
389
+ lb_policy_args.client_channel_factory = chand->client_channel_factory;
390
+ lb_policy_args.args = chand->resolver_result;
391
+ grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy =
392
+ grpc_core::LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
393
+ lb_policy_name, lb_policy_args);
394
+ if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
395
+ gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
396
+ } else {
397
+ if (grpc_client_channel_trace.enabled()) {
398
+ gpr_log(GPR_INFO, "chand=%p: created new LB policy \"%s\" (%p)", chand,
399
+ lb_policy_name, new_lb_policy.get());
431
400
  }
432
- }
433
- if (grpc_client_channel_trace.enabled()) {
434
- gpr_log(GPR_INFO,
435
- "chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
436
- "service_config=\"%s\"",
437
- chand, lb_policy_name_dup,
438
- lb_policy_name_changed ? " (changed)" : "", service_config_json);
439
- }
440
- // Now swap out fields in chand. Note that the new values may still
441
- // be nullptr if (e.g.) the resolver failed to return results or the
442
- // results did not contain the necessary data.
443
- //
444
- // First, swap out the data used by cc_get_channel_info().
445
- gpr_mu_lock(&chand->info_mu);
446
- if (lb_policy_name_dup != nullptr) {
447
- gpr_free(chand->info_lb_policy_name);
448
- chand->info_lb_policy_name = lb_policy_name_dup;
449
- }
450
- if (service_config_json != nullptr) {
451
- gpr_free(chand->info_service_config_json);
452
- chand->info_service_config_json = service_config_json;
453
- }
454
- gpr_mu_unlock(&chand->info_mu);
455
- // Swap out the retry throttle data.
456
- chand->retry_throttle_data = std::move(retry_throttle_data);
457
- // Swap out the method params table.
458
- chand->method_params_table = std::move(method_params_table);
459
- // If we have a new LB policy or are shutting down (in which case
460
- // new_lb_policy will be nullptr), swap out the LB policy, unreffing the
461
- // old one and removing its fds from chand->interested_parties.
462
- // Note that we do NOT do this if either (a) we updated the existing
463
- // LB policy above or (b) we failed to create the new LB policy (in
464
- // which case we want to continue using the most recent one we had).
465
- if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
466
- chand->resolver == nullptr) {
401
+ // Swap out the LB policy and update the fds in
402
+ // chand->interested_parties.
467
403
  if (chand->lb_policy != nullptr) {
468
404
  if (grpc_client_channel_trace.enabled()) {
469
- gpr_log(GPR_INFO, "chand=%p: unreffing lb_policy=%p", chand,
405
+ gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", chand,
470
406
  chand->lb_policy.get());
471
407
  }
472
408
  grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
473
409
  chand->interested_parties);
474
410
  chand->lb_policy->HandOffPendingPicksLocked(new_lb_policy.get());
475
- chand->lb_policy.reset();
476
411
  }
477
412
  chand->lb_policy = std::move(new_lb_policy);
413
+ grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
414
+ chand->interested_parties);
415
+ // Set up re-resolution callback.
416
+ reresolution_request_args* args =
417
+ static_cast<reresolution_request_args*>(gpr_zalloc(sizeof(*args)));
418
+ args->chand = chand;
419
+ args->lb_policy = chand->lb_policy.get();
420
+ GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
421
+ grpc_combiner_scheduler(chand->combiner));
422
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
423
+ chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
424
+ // Get the new LB policy's initial connectivity state and start a
425
+ // connectivity watch.
426
+ GRPC_ERROR_UNREF(*connectivity_error);
427
+ *connectivity_state =
428
+ chand->lb_policy->CheckConnectivityLocked(connectivity_error);
429
+ if (chand->exit_idle_when_lb_policy_arrives) {
430
+ chand->lb_policy->ExitIdleLocked();
431
+ chand->exit_idle_when_lb_policy_arrives = false;
432
+ }
433
+ watch_lb_policy_locked(chand, chand->lb_policy.get(), *connectivity_state);
434
+ }
435
+ }
436
+
437
+ // Returns the service config (as a JSON string) from the resolver result.
438
+ // Also updates state in chand.
439
+ static grpc_core::UniquePtr<char>
440
+ get_service_config_from_resolver_result_locked(channel_data* chand) {
441
+ const grpc_arg* channel_arg =
442
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
443
+ const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
444
+ if (service_config_json != nullptr) {
445
+ if (grpc_client_channel_trace.enabled()) {
446
+ gpr_log(GPR_INFO, "chand=%p: resolver returned service config: \"%s\"",
447
+ chand, service_config_json);
448
+ }
449
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
450
+ grpc_core::ServiceConfig::Create(service_config_json);
451
+ if (service_config != nullptr) {
452
+ if (chand->enable_retries) {
453
+ channel_arg =
454
+ grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
455
+ const char* server_uri = grpc_channel_arg_get_string(channel_arg);
456
+ GPR_ASSERT(server_uri != nullptr);
457
+ grpc_uri* uri = grpc_uri_parse(server_uri, true);
458
+ GPR_ASSERT(uri->path[0] != '\0');
459
+ service_config_parsing_state parsing_state;
460
+ memset(&parsing_state, 0, sizeof(parsing_state));
461
+ parsing_state.server_name =
462
+ uri->path[0] == '/' ? uri->path + 1 : uri->path;
463
+ service_config->ParseGlobalParams(parse_retry_throttle_params,
464
+ &parsing_state);
465
+ grpc_uri_destroy(uri);
466
+ chand->retry_throttle_data =
467
+ std::move(parsing_state.retry_throttle_data);
468
+ }
469
+ chand->method_params_table = service_config->CreateMethodConfigTable(
470
+ ClientChannelMethodParams::CreateFromJson);
471
+ }
478
472
  }
479
- // Now that we've swapped out the relevant fields of chand, check for
480
- // error or shutdown.
473
+ return grpc_core::UniquePtr<char>(gpr_strdup(service_config_json));
474
+ }
475
+
476
+ // Callback invoked when a resolver result is available.
477
+ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
478
+ channel_data* chand = static_cast<channel_data*>(arg);
479
+ if (grpc_client_channel_trace.enabled()) {
480
+ const char* disposition =
481
+ chand->resolver_result != nullptr
482
+ ? ""
483
+ : (error == GRPC_ERROR_NONE ? " (transient error)"
484
+ : " (resolver shutdown)");
485
+ gpr_log(GPR_INFO,
486
+ "chand=%p: got resolver result: resolver_result=%p error=%s%s",
487
+ chand, chand->resolver_result, grpc_error_string(error),
488
+ disposition);
489
+ }
490
+ // Handle shutdown.
481
491
  if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
492
+ on_resolver_shutdown_locked(chand, GRPC_ERROR_REF(error));
493
+ return;
494
+ }
495
+ // Data used to set the channel's connectivity state.
496
+ bool set_connectivity_state = true;
497
+ grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
498
+ grpc_error* connectivity_error =
499
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
500
+ // chand->resolver_result will be null in the case of a transient
501
+ // resolution error. In that case, we don't have any new result to
502
+ // process, which means that we keep using the previous result (if any).
503
+ if (chand->resolver_result == nullptr) {
482
504
  if (grpc_client_channel_trace.enabled()) {
483
- gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
505
+ gpr_log(GPR_INFO, "chand=%p: resolver transient failure", chand);
484
506
  }
485
- if (chand->resolver != nullptr) {
486
- if (grpc_client_channel_trace.enabled()) {
487
- gpr_log(GPR_INFO, "chand=%p: shutting down resolver", chand);
488
- }
489
- chand->resolver.reset();
490
- }
491
- set_channel_connectivity_state_locked(
492
- chand, GRPC_CHANNEL_SHUTDOWN,
493
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
494
- "Got resolver result after disconnection", &error, 1),
495
- "resolver_gone");
496
- grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
497
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
498
- "Channel disconnected", &error, 1));
499
- GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
500
- GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
501
- grpc_channel_args_destroy(chand->resolver_result);
502
- chand->resolver_result = nullptr;
503
- } else { // Not shutting down.
504
- grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
505
- grpc_error* state_error =
506
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
507
- if (lb_policy_created) {
507
+ } else {
508
+ grpc_core::UniquePtr<char> lb_policy_name =
509
+ get_lb_policy_name_from_resolver_result_locked(chand);
510
+ // Check to see if we're already using the right LB policy.
511
+ // Note: It's safe to use chand->info_lb_policy_name here without
512
+ // taking a lock on chand->info_mu, because this function is the
513
+ // only thing that modifies its value, and it can only be invoked
514
+ // once at any given time.
515
+ bool lb_policy_name_changed = chand->info_lb_policy_name == nullptr ||
516
+ gpr_stricmp(chand->info_lb_policy_name.get(),
517
+ lb_policy_name.get()) != 0;
518
+ if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
519
+ // Continue using the same LB policy. Update with new addresses.
508
520
  if (grpc_client_channel_trace.enabled()) {
509
- gpr_log(GPR_INFO, "chand=%p: initializing new LB policy", chand);
510
- }
511
- GRPC_ERROR_UNREF(state_error);
512
- state = chand->lb_policy->CheckConnectivityLocked(&state_error);
513
- grpc_pollset_set_add_pollset_set(chand->lb_policy->interested_parties(),
514
- chand->interested_parties);
515
- GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
516
- if (chand->exit_idle_when_lb_policy_arrives) {
517
- chand->lb_policy->ExitIdleLocked();
518
- chand->exit_idle_when_lb_policy_arrives = false;
521
+ gpr_log(GPR_INFO, "chand=%p: updating existing LB policy \"%s\" (%p)",
522
+ chand, lb_policy_name.get(), chand->lb_policy.get());
519
523
  }
520
- watch_lb_policy_locked(chand, chand->lb_policy.get(), state);
521
- } else if (chand->resolver_result == nullptr) {
522
- // Transient failure.
523
- GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
524
- }
525
- if (!lb_policy_updated) {
526
- set_channel_connectivity_state_locked(
527
- chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
528
- }
524
+ chand->lb_policy->UpdateLocked(*chand->resolver_result);
525
+ // No need to set the channel's connectivity state; the existing
526
+ // watch on the LB policy will take care of that.
527
+ set_connectivity_state = false;
528
+ } else {
529
+ // Instantiate new LB policy.
530
+ create_new_lb_policy_locked(chand, lb_policy_name.get(),
531
+ &connectivity_state, &connectivity_error);
532
+ }
533
+ // Find service config.
534
+ grpc_core::UniquePtr<char> service_config_json =
535
+ get_service_config_from_resolver_result_locked(chand);
536
+ // Swap out the data used by cc_get_channel_info().
537
+ gpr_mu_lock(&chand->info_mu);
538
+ chand->info_lb_policy_name = std::move(lb_policy_name);
539
+ chand->info_service_config_json = std::move(service_config_json);
540
+ gpr_mu_unlock(&chand->info_mu);
541
+ // Clean up.
529
542
  grpc_channel_args_destroy(chand->resolver_result);
530
543
  chand->resolver_result = nullptr;
531
- chand->resolver->NextLocked(&chand->resolver_result,
532
- &chand->on_resolver_result_changed);
533
- GRPC_ERROR_UNREF(state_error);
534
544
  }
545
+ // Set the channel's connectivity state if needed.
546
+ if (set_connectivity_state) {
547
+ set_channel_connectivity_state_locked(
548
+ chand, connectivity_state, connectivity_error, "resolver_result");
549
+ } else {
550
+ GRPC_ERROR_UNREF(connectivity_error);
551
+ }
552
+ // Invoke closures that were waiting for results and renew the watch.
553
+ GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
554
+ chand->resolver->NextLocked(&chand->resolver_result,
555
+ &chand->on_resolver_result_changed);
535
556
  }
536
557
 
537
558
  static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
@@ -611,15 +632,11 @@ static void cc_get_channel_info(grpc_channel_element* elem,
611
632
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
612
633
  gpr_mu_lock(&chand->info_mu);
613
634
  if (info->lb_policy_name != nullptr) {
614
- *info->lb_policy_name = chand->info_lb_policy_name == nullptr
615
- ? nullptr
616
- : gpr_strdup(chand->info_lb_policy_name);
635
+ *info->lb_policy_name = gpr_strdup(chand->info_lb_policy_name.get());
617
636
  }
618
637
  if (info->service_config_json != nullptr) {
619
638
  *info->service_config_json =
620
- chand->info_service_config_json == nullptr
621
- ? nullptr
622
- : gpr_strdup(chand->info_service_config_json);
639
+ gpr_strdup(chand->info_service_config_json.get());
623
640
  }
624
641
  gpr_mu_unlock(&chand->info_mu);
625
642
  }
@@ -699,19 +716,15 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
699
716
  return GRPC_ERROR_NONE;
700
717
  }
701
718
 
702
- static void shutdown_resolver_locked(void* arg, grpc_error* error) {
703
- grpc_core::Resolver* resolver = static_cast<grpc_core::Resolver*>(arg);
704
- resolver->Orphan();
705
- }
706
-
707
719
  /* Destructor for channel_data */
708
720
  static void cc_destroy_channel_elem(grpc_channel_element* elem) {
709
721
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
710
722
  if (chand->resolver != nullptr) {
711
- GRPC_CLOSURE_SCHED(
712
- GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver.release(),
713
- grpc_combiner_scheduler(chand->combiner)),
714
- GRPC_ERROR_NONE);
723
+ // The only way we can get here is if we never started resolving,
724
+ // because we take a ref to the channel stack when we start
725
+ // resolving and do not release it until the resolver callback is
726
+ // invoked after the resolver shuts down.
727
+ chand->resolver.reset();
715
728
  }
716
729
  if (chand->client_channel_factory != nullptr) {
717
730
  grpc_client_channel_factory_unref(chand->client_channel_factory);
@@ -721,8 +734,10 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
721
734
  chand->interested_parties);
722
735
  chand->lb_policy.reset();
723
736
  }
724
- gpr_free(chand->info_lb_policy_name);
725
- gpr_free(chand->info_service_config_json);
737
+ // TODO(roth): Once we convert the filter API to C++, there will no
738
+ // longer be any need to explicitly reset these smart pointer data members.
739
+ chand->info_lb_policy_name.reset();
740
+ chand->info_service_config_json.reset();
726
741
  chand->retry_throttle_data.reset();
727
742
  chand->method_params_table.reset();
728
743
  grpc_client_channel_stop_backup_polling(chand->interested_parties);
@@ -794,6 +809,15 @@ typedef struct {
794
809
  // The batch to use in the subchannel call.
795
810
  // Its payload field points to subchannel_call_retry_state.batch_payload.
796
811
  grpc_transport_stream_op_batch batch;
812
+ // For intercepting on_complete.
813
+ grpc_closure on_complete;
814
+ } subchannel_batch_data;
815
+
816
+ // Retry state associated with a subchannel call.
817
+ // Stored in the parent_data of the subchannel call object.
818
+ typedef struct {
819
+ // subchannel_batch_data.batch.payload points to this.
820
+ grpc_transport_stream_op_batch_payload batch_payload;
797
821
  // For send_initial_metadata.
798
822
  // Note that we need to make a copy of the initial metadata for each
799
823
  // subchannel call instead of just referring to the copy in call_data,
@@ -817,15 +841,7 @@ typedef struct {
817
841
  // For intercepting recv_trailing_metadata.
818
842
  grpc_metadata_batch recv_trailing_metadata;
819
843
  grpc_transport_stream_stats collect_stats;
820
- // For intercepting on_complete.
821
- grpc_closure on_complete;
822
- } subchannel_batch_data;
823
-
824
- // Retry state associated with a subchannel call.
825
- // Stored in the parent_data of the subchannel call object.
826
- typedef struct {
827
- // subchannel_batch_data.batch.payload points to this.
828
- grpc_transport_stream_op_batch_payload batch_payload;
844
+ grpc_closure recv_trailing_metadata_ready;
829
845
  // These fields indicate which ops have been started and completed on
830
846
  // this subchannel call.
831
847
  size_t started_send_message_count;
@@ -1192,35 +1208,24 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
1192
1208
  "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
1193
1209
  elem->channel_data, calld, num_batches, grpc_error_string(error));
1194
1210
  }
1195
- grpc_transport_stream_op_batch*
1196
- batches[GPR_ARRAY_SIZE(calld->pending_batches)];
1197
- size_t num_batches = 0;
1211
+ grpc_core::CallCombinerClosureList closures;
1198
1212
  for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1199
1213
  pending_batch* pending = &calld->pending_batches[i];
1200
1214
  grpc_transport_stream_op_batch* batch = pending->batch;
1201
1215
  if (batch != nullptr) {
1202
- batches[num_batches++] = batch;
1216
+ batch->handler_private.extra_arg = calld;
1217
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
1218
+ fail_pending_batch_in_call_combiner, batch,
1219
+ grpc_schedule_on_exec_ctx);
1220
+ closures.Add(&batch->handler_private.closure, GRPC_ERROR_REF(error),
1221
+ "pending_batches_fail");
1203
1222
  pending_batch_clear(calld, pending);
1204
1223
  }
1205
1224
  }
1206
- for (size_t i = yield_call_combiner ? 1 : 0; i < num_batches; ++i) {
1207
- grpc_transport_stream_op_batch* batch = batches[i];
1208
- batch->handler_private.extra_arg = calld;
1209
- GRPC_CLOSURE_INIT(&batch->handler_private.closure,
1210
- fail_pending_batch_in_call_combiner, batch,
1211
- grpc_schedule_on_exec_ctx);
1212
- GRPC_CALL_COMBINER_START(calld->call_combiner,
1213
- &batch->handler_private.closure,
1214
- GRPC_ERROR_REF(error), "pending_batches_fail");
1215
- }
1216
1225
  if (yield_call_combiner) {
1217
- if (num_batches > 0) {
1218
- // Note: This will release the call combiner.
1219
- grpc_transport_stream_op_batch_finish_with_failure(
1220
- batches[0], GRPC_ERROR_REF(error), calld->call_combiner);
1221
- } else {
1222
- GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pending_batches_fail");
1223
- }
1226
+ closures.RunClosures(calld->call_combiner);
1227
+ } else {
1228
+ closures.RunClosuresWithoutYielding(calld->call_combiner);
1224
1229
  }
1225
1230
  GRPC_ERROR_UNREF(error);
1226
1231
  }
@@ -1255,30 +1260,22 @@ static void pending_batches_resume(grpc_call_element* elem) {
1255
1260
  " pending batches on subchannel_call=%p",
1256
1261
  chand, calld, num_batches, calld->subchannel_call);
1257
1262
  }
1258
- grpc_transport_stream_op_batch*
1259
- batches[GPR_ARRAY_SIZE(calld->pending_batches)];
1260
- size_t num_batches = 0;
1263
+ grpc_core::CallCombinerClosureList closures;
1261
1264
  for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1262
1265
  pending_batch* pending = &calld->pending_batches[i];
1263
1266
  grpc_transport_stream_op_batch* batch = pending->batch;
1264
1267
  if (batch != nullptr) {
1265
- batches[num_batches++] = batch;
1268
+ batch->handler_private.extra_arg = calld->subchannel_call;
1269
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
1270
+ resume_pending_batch_in_call_combiner, batch,
1271
+ grpc_schedule_on_exec_ctx);
1272
+ closures.Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
1273
+ "pending_batches_resume");
1266
1274
  pending_batch_clear(calld, pending);
1267
1275
  }
1268
1276
  }
1269
- for (size_t i = 1; i < num_batches; ++i) {
1270
- grpc_transport_stream_op_batch* batch = batches[i];
1271
- batch->handler_private.extra_arg = calld->subchannel_call;
1272
- GRPC_CLOSURE_INIT(&batch->handler_private.closure,
1273
- resume_pending_batch_in_call_combiner, batch,
1274
- grpc_schedule_on_exec_ctx);
1275
- GRPC_CALL_COMBINER_START(calld->call_combiner,
1276
- &batch->handler_private.closure, GRPC_ERROR_NONE,
1277
- "pending_batches_resume");
1278
- }
1279
- GPR_ASSERT(num_batches > 0);
1280
1277
  // Note: This will release the call combiner.
1281
- grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
1278
+ closures.RunClosures(calld->call_combiner);
1282
1279
  }
1283
1280
 
1284
1281
  static void maybe_clear_pending_batch(grpc_call_element* elem,
@@ -1293,7 +1290,10 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
1293
1290
  batch->payload->recv_initial_metadata.recv_initial_metadata_ready ==
1294
1291
  nullptr) &&
1295
1292
  (!batch->recv_message ||
1296
- batch->payload->recv_message.recv_message_ready == nullptr)) {
1293
+ batch->payload->recv_message.recv_message_ready == nullptr) &&
1294
+ (!batch->recv_trailing_metadata ||
1295
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
1296
+ nullptr)) {
1297
1297
  if (grpc_client_channel_trace.enabled()) {
1298
1298
  gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand,
1299
1299
  calld);
@@ -1302,75 +1302,27 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
1302
1302
  }
1303
1303
  }
1304
1304
 
1305
- // Returns true if all ops in the pending batch have been completed.
1306
- static bool pending_batch_is_completed(
1307
- pending_batch* pending, call_data* calld,
1308
- subchannel_call_retry_state* retry_state) {
1309
- if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
1310
- return false;
1311
- }
1312
- if (pending->batch->send_initial_metadata &&
1313
- !retry_state->completed_send_initial_metadata) {
1314
- return false;
1315
- }
1316
- if (pending->batch->send_message &&
1317
- retry_state->completed_send_message_count <
1318
- calld->send_messages->size()) {
1319
- return false;
1320
- }
1321
- if (pending->batch->send_trailing_metadata &&
1322
- !retry_state->completed_send_trailing_metadata) {
1323
- return false;
1324
- }
1325
- if (pending->batch->recv_initial_metadata &&
1326
- !retry_state->completed_recv_initial_metadata) {
1327
- return false;
1328
- }
1329
- if (pending->batch->recv_message &&
1330
- retry_state->completed_recv_message_count <
1331
- retry_state->started_recv_message_count) {
1332
- return false;
1333
- }
1334
- if (pending->batch->recv_trailing_metadata &&
1335
- !retry_state->completed_recv_trailing_metadata) {
1336
- return false;
1337
- }
1338
- return true;
1339
- }
1340
-
1341
- // Returns true if any op in the batch was not yet started.
1342
- static bool pending_batch_is_unstarted(
1343
- pending_batch* pending, call_data* calld,
1344
- subchannel_call_retry_state* retry_state) {
1345
- if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
1346
- return false;
1347
- }
1348
- if (pending->batch->send_initial_metadata &&
1349
- !retry_state->started_send_initial_metadata) {
1350
- return true;
1351
- }
1352
- if (pending->batch->send_message &&
1353
- retry_state->started_send_message_count < calld->send_messages->size()) {
1354
- return true;
1355
- }
1356
- if (pending->batch->send_trailing_metadata &&
1357
- !retry_state->started_send_trailing_metadata) {
1358
- return true;
1359
- }
1360
- if (pending->batch->recv_initial_metadata &&
1361
- !retry_state->started_recv_initial_metadata) {
1362
- return true;
1363
- }
1364
- if (pending->batch->recv_message &&
1365
- retry_state->completed_recv_message_count ==
1366
- retry_state->started_recv_message_count) {
1367
- return true;
1368
- }
1369
- if (pending->batch->recv_trailing_metadata &&
1370
- !retry_state->started_recv_trailing_metadata) {
1371
- return true;
1305
+ // Returns a pointer to the first pending batch for which predicate(batch)
1306
+ // returns true, or null if not found.
1307
+ template <typename Predicate>
1308
+ static pending_batch* pending_batch_find(grpc_call_element* elem,
1309
+ const char* log_message,
1310
+ Predicate predicate) {
1311
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1312
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1313
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1314
+ pending_batch* pending = &calld->pending_batches[i];
1315
+ grpc_transport_stream_op_batch* batch = pending->batch;
1316
+ if (batch != nullptr && predicate(batch)) {
1317
+ if (grpc_client_channel_trace.enabled()) {
1318
+ gpr_log(GPR_INFO,
1319
+ "chand=%p calld=%p: %s pending batch at index %" PRIuPTR, chand,
1320
+ calld, log_message, i);
1321
+ }
1322
+ return pending;
1323
+ }
1372
1324
  }
1373
- return false;
1325
+ return nullptr;
1374
1326
  }
1375
1327
 
1376
1328
  //
@@ -1557,8 +1509,13 @@ static bool maybe_retry(grpc_call_element* elem,
1557
1509
  // subchannel_batch_data
1558
1510
  //
1559
1511
 
1512
+ // Creates a subchannel_batch_data object on the call's arena with the
1513
+ // specified refcount. If set_on_complete is true, the batch's
1514
+ // on_complete callback will be set to point to on_complete();
1515
+ // otherwise, the batch's on_complete callback will be null.
1560
1516
  static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
1561
- int refcount) {
1517
+ int refcount,
1518
+ bool set_on_complete) {
1562
1519
  call_data* calld = static_cast<call_data*>(elem->call_data);
1563
1520
  subchannel_call_retry_state* retry_state =
1564
1521
  static_cast<subchannel_call_retry_state*>(
@@ -1571,26 +1528,32 @@ static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
1571
1528
  GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
1572
1529
  batch_data->batch.payload = &retry_state->batch_payload;
1573
1530
  gpr_ref_init(&batch_data->refs, refcount);
1574
- GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
1575
- grpc_schedule_on_exec_ctx);
1576
- batch_data->batch.on_complete = &batch_data->on_complete;
1531
+ if (set_on_complete) {
1532
+ GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
1533
+ grpc_schedule_on_exec_ctx);
1534
+ batch_data->batch.on_complete = &batch_data->on_complete;
1535
+ }
1577
1536
  GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
1578
1537
  return batch_data;
1579
1538
  }
1580
1539
 
1581
1540
  static void batch_data_unref(subchannel_batch_data* batch_data) {
1582
1541
  if (gpr_unref(&batch_data->refs)) {
1583
- if (batch_data->send_initial_metadata_storage != nullptr) {
1584
- grpc_metadata_batch_destroy(&batch_data->send_initial_metadata);
1542
+ subchannel_call_retry_state* retry_state =
1543
+ static_cast<subchannel_call_retry_state*>(
1544
+ grpc_connected_subchannel_call_get_parent_data(
1545
+ batch_data->subchannel_call));
1546
+ if (batch_data->batch.send_initial_metadata) {
1547
+ grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
1585
1548
  }
1586
- if (batch_data->send_trailing_metadata_storage != nullptr) {
1587
- grpc_metadata_batch_destroy(&batch_data->send_trailing_metadata);
1549
+ if (batch_data->batch.send_trailing_metadata) {
1550
+ grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
1588
1551
  }
1589
1552
  if (batch_data->batch.recv_initial_metadata) {
1590
- grpc_metadata_batch_destroy(&batch_data->recv_initial_metadata);
1553
+ grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
1591
1554
  }
1592
1555
  if (batch_data->batch.recv_trailing_metadata) {
1593
- grpc_metadata_batch_destroy(&batch_data->recv_trailing_metadata);
1556
+ grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
1594
1557
  }
1595
1558
  GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
1596
1559
  call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
@@ -1606,30 +1569,22 @@ static void batch_data_unref(subchannel_batch_data* batch_data) {
1606
1569
  static void invoke_recv_initial_metadata_callback(void* arg,
1607
1570
  grpc_error* error) {
1608
1571
  subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
1609
- channel_data* chand =
1610
- static_cast<channel_data*>(batch_data->elem->channel_data);
1611
- call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
1612
1572
  // Find pending batch.
1613
- pending_batch* pending = nullptr;
1614
- for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1615
- grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
1616
- if (batch != nullptr && batch->recv_initial_metadata &&
1617
- batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
1618
- nullptr) {
1619
- if (grpc_client_channel_trace.enabled()) {
1620
- gpr_log(GPR_INFO,
1621
- "chand=%p calld=%p: invoking recv_initial_metadata_ready for "
1622
- "pending batch at index %" PRIuPTR,
1623
- chand, calld, i);
1624
- }
1625
- pending = &calld->pending_batches[i];
1626
- break;
1627
- }
1628
- }
1573
+ pending_batch* pending = pending_batch_find(
1574
+ batch_data->elem, "invoking recv_initial_metadata_ready for",
1575
+ [](grpc_transport_stream_op_batch* batch) {
1576
+ return batch->recv_initial_metadata &&
1577
+ batch->payload->recv_initial_metadata
1578
+ .recv_initial_metadata_ready != nullptr;
1579
+ });
1629
1580
  GPR_ASSERT(pending != nullptr);
1630
1581
  // Return metadata.
1582
+ subchannel_call_retry_state* retry_state =
1583
+ static_cast<subchannel_call_retry_state*>(
1584
+ grpc_connected_subchannel_call_get_parent_data(
1585
+ batch_data->subchannel_call));
1631
1586
  grpc_metadata_batch_move(
1632
- &batch_data->recv_initial_metadata,
1587
+ &retry_state->recv_initial_metadata,
1633
1588
  pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
1634
1589
  // Update bookkeeping.
1635
1590
  // Note: Need to do this before invoking the callback, since invoking
@@ -1661,11 +1616,20 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
1661
1616
  static_cast<subchannel_call_retry_state*>(
1662
1617
  grpc_connected_subchannel_call_get_parent_data(
1663
1618
  batch_data->subchannel_call));
1619
+ retry_state->completed_recv_initial_metadata = true;
1620
+ // If a retry was already dispatched, then we're not going to use the
1621
+ // result of this recv_initial_metadata op, so do nothing.
1622
+ if (retry_state->retry_dispatched) {
1623
+ GRPC_CALL_COMBINER_STOP(
1624
+ calld->call_combiner,
1625
+ "recv_initial_metadata_ready after retry dispatched");
1626
+ return;
1627
+ }
1664
1628
  // If we got an error or a Trailers-Only response and have not yet gotten
1665
- // the recv_trailing_metadata on_complete callback, then defer
1666
- // propagating this callback back to the surface. We can evaluate whether
1667
- // to retry when recv_trailing_metadata comes back.
1668
- if (GPR_UNLIKELY((batch_data->trailing_metadata_available ||
1629
+ // the recv_trailing_metadata_ready callback, then defer propagating this
1630
+ // callback back to the surface. We can evaluate whether to retry when
1631
+ // recv_trailing_metadata comes back.
1632
+ if (GPR_UNLIKELY((retry_state->trailing_metadata_available ||
1669
1633
  error != GRPC_ERROR_NONE) &&
1670
1634
  !retry_state->completed_recv_trailing_metadata)) {
1671
1635
  if (grpc_client_channel_trace.enabled()) {
@@ -1689,9 +1653,9 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
1689
1653
  }
1690
1654
  // Received valid initial metadata, so commit the call.
1691
1655
  retry_commit(elem, retry_state);
1656
+ // Invoke the callback to return the result to the surface.
1692
1657
  // Manually invoking a callback function; it does not take ownership of error.
1693
1658
  invoke_recv_initial_metadata_callback(batch_data, error);
1694
- GRPC_ERROR_UNREF(error);
1695
1659
  }
1696
1660
 
1697
1661
  //
@@ -1701,29 +1665,21 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
1701
1665
  // Invokes recv_message_ready for a subchannel batch.
1702
1666
  static void invoke_recv_message_callback(void* arg, grpc_error* error) {
1703
1667
  subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
1704
- channel_data* chand =
1705
- static_cast<channel_data*>(batch_data->elem->channel_data);
1706
- call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
1707
1668
  // Find pending op.
1708
- pending_batch* pending = nullptr;
1709
- for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1710
- grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
1711
- if (batch != nullptr && batch->recv_message &&
1712
- batch->payload->recv_message.recv_message_ready != nullptr) {
1713
- if (grpc_client_channel_trace.enabled()) {
1714
- gpr_log(GPR_INFO,
1715
- "chand=%p calld=%p: invoking recv_message_ready for "
1716
- "pending batch at index %" PRIuPTR,
1717
- chand, calld, i);
1718
- }
1719
- pending = &calld->pending_batches[i];
1720
- break;
1721
- }
1722
- }
1669
+ pending_batch* pending = pending_batch_find(
1670
+ batch_data->elem, "invoking recv_message_ready for",
1671
+ [](grpc_transport_stream_op_batch* batch) {
1672
+ return batch->recv_message &&
1673
+ batch->payload->recv_message.recv_message_ready != nullptr;
1674
+ });
1723
1675
  GPR_ASSERT(pending != nullptr);
1724
1676
  // Return payload.
1677
+ subchannel_call_retry_state* retry_state =
1678
+ static_cast<subchannel_call_retry_state*>(
1679
+ grpc_connected_subchannel_call_get_parent_data(
1680
+ batch_data->subchannel_call));
1725
1681
  *pending->batch->payload->recv_message.recv_message =
1726
- std::move(batch_data->recv_message);
1682
+ std::move(retry_state->recv_message);
1727
1683
  // Update bookkeeping.
1728
1684
  // Note: Need to do this before invoking the callback, since invoking
1729
1685
  // the callback will result in yielding the call combiner.
@@ -1751,12 +1707,20 @@ static void recv_message_ready(void* arg, grpc_error* error) {
1751
1707
  static_cast<subchannel_call_retry_state*>(
1752
1708
  grpc_connected_subchannel_call_get_parent_data(
1753
1709
  batch_data->subchannel_call));
1710
+ ++retry_state->completed_recv_message_count;
1711
+ // If a retry was already dispatched, then we're not going to use the
1712
+ // result of this recv_message op, so do nothing.
1713
+ if (retry_state->retry_dispatched) {
1714
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
1715
+ "recv_message_ready after retry dispatched");
1716
+ return;
1717
+ }
1754
1718
  // If we got an error or the payload was nullptr and we have not yet gotten
1755
- // the recv_trailing_metadata on_complete callback, then defer
1756
- // propagating this callback back to the surface. We can evaluate whether
1757
- // to retry when recv_trailing_metadata comes back.
1719
+ // the recv_trailing_metadata_ready callback, then defer propagating this
1720
+ // callback back to the surface. We can evaluate whether to retry when
1721
+ // recv_trailing_metadata comes back.
1758
1722
  if (GPR_UNLIKELY(
1759
- (batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
1723
+ (retry_state->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
1760
1724
  !retry_state->completed_recv_trailing_metadata)) {
1761
1725
  if (grpc_client_channel_trace.enabled()) {
1762
1726
  gpr_log(GPR_INFO,
@@ -1777,133 +1741,272 @@ static void recv_message_ready(void* arg, grpc_error* error) {
1777
1741
  }
1778
1742
  // Received a valid message, so commit the call.
1779
1743
  retry_commit(elem, retry_state);
1744
+ // Invoke the callback to return the result to the surface.
1780
1745
  // Manually invoking a callback function; it does not take ownership of error.
1781
1746
  invoke_recv_message_callback(batch_data, error);
1782
- GRPC_ERROR_UNREF(error);
1783
1747
  }
1784
1748
 
1785
1749
  //
1786
- // list of closures to execute in call combiner
1750
+ // recv_trailing_metadata handling
1787
1751
  //
1788
1752
 
1789
- // Represents a closure that needs to run in the call combiner as part of
1790
- // starting or completing a batch.
1791
- typedef struct {
1792
- grpc_closure* closure;
1793
- grpc_error* error;
1794
- const char* reason;
1795
- bool free_reason = false;
1796
- } closure_to_execute;
1797
-
1798
- static void execute_closures_in_call_combiner(grpc_call_element* elem,
1799
- const char* caller,
1800
- closure_to_execute* closures,
1801
- size_t num_closures) {
1802
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1753
+ // Sets *status and *server_pushback_md based on batch_data and error.
1754
+ static void get_call_status(subchannel_batch_data* batch_data,
1755
+ grpc_error* error, grpc_status_code* status,
1756
+ grpc_mdelem** server_pushback_md) {
1757
+ grpc_call_element* elem = batch_data->elem;
1803
1758
  call_data* calld = static_cast<call_data*>(elem->call_data);
1804
- // Note that the call combiner will be yielded for each closure that
1805
- // we schedule. We're already running in the call combiner, so one of
1806
- // the closures can be scheduled directly, but the others will
1807
- // have to re-enter the call combiner.
1808
- if (num_closures > 0) {
1809
- if (grpc_client_channel_trace.enabled()) {
1810
- gpr_log(GPR_INFO, "chand=%p calld=%p: %s starting closure: %s", chand,
1811
- calld, caller, closures[0].reason);
1812
- }
1813
- GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
1814
- if (closures[0].free_reason) {
1815
- gpr_free(const_cast<char*>(closures[0].reason));
1816
- }
1817
- for (size_t i = 1; i < num_closures; ++i) {
1818
- if (grpc_client_channel_trace.enabled()) {
1819
- gpr_log(GPR_INFO,
1820
- "chand=%p calld=%p: %s starting closure in call combiner: %s",
1821
- chand, calld, caller, closures[i].reason);
1822
- }
1823
- GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
1824
- closures[i].error, closures[i].reason);
1825
- if (closures[i].free_reason) {
1826
- gpr_free(const_cast<char*>(closures[i].reason));
1827
- }
1828
- }
1759
+ if (error != GRPC_ERROR_NONE) {
1760
+ grpc_error_get_status(error, calld->deadline, status, nullptr, nullptr,
1761
+ nullptr);
1829
1762
  } else {
1830
- if (grpc_client_channel_trace.enabled()) {
1831
- gpr_log(GPR_INFO, "chand=%p calld=%p: no closures to run for %s", chand,
1832
- calld, caller);
1763
+ grpc_metadata_batch* md_batch =
1764
+ batch_data->batch.payload->recv_trailing_metadata
1765
+ .recv_trailing_metadata;
1766
+ GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
1767
+ *status =
1768
+ grpc_get_status_code_from_metadata(md_batch->idx.named.grpc_status->md);
1769
+ if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
1770
+ *server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
1833
1771
  }
1834
- GRPC_CALL_COMBINER_STOP(calld->call_combiner, "no closures to run");
1835
1772
  }
1773
+ GRPC_ERROR_UNREF(error);
1836
1774
  }
1837
1775
 
1838
- //
1839
- // on_complete callback handling
1840
- //
1841
-
1842
- // Updates retry_state to reflect the ops completed in batch_data.
1843
- static void update_retry_state_for_completed_batch(
1844
- subchannel_batch_data* batch_data,
1845
- subchannel_call_retry_state* retry_state) {
1846
- if (batch_data->batch.send_initial_metadata) {
1847
- retry_state->completed_send_initial_metadata = true;
1848
- }
1849
- if (batch_data->batch.send_message) {
1850
- ++retry_state->completed_send_message_count;
1851
- }
1852
- if (batch_data->batch.send_trailing_metadata) {
1853
- retry_state->completed_send_trailing_metadata = true;
1854
- }
1855
- if (batch_data->batch.recv_initial_metadata) {
1856
- retry_state->completed_recv_initial_metadata = true;
1857
- }
1858
- if (batch_data->batch.recv_message) {
1859
- ++retry_state->completed_recv_message_count;
1860
- }
1861
- if (batch_data->batch.recv_trailing_metadata) {
1862
- retry_state->completed_recv_trailing_metadata = true;
1776
+ // Adds recv_trailing_metadata_ready closure to closures.
1777
+ static void add_closure_for_recv_trailing_metadata_ready(
1778
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
1779
+ grpc_error* error, grpc_core::CallCombinerClosureList* closures) {
1780
+ // Find pending batch.
1781
+ pending_batch* pending = pending_batch_find(
1782
+ elem, "invoking recv_trailing_metadata for",
1783
+ [](grpc_transport_stream_op_batch* batch) {
1784
+ return batch->recv_trailing_metadata &&
1785
+ batch->payload->recv_trailing_metadata
1786
+ .recv_trailing_metadata_ready != nullptr;
1787
+ });
1788
+ // If we generated the recv_trailing_metadata op internally via
1789
+ // start_internal_recv_trailing_metadata(), then there will be no
1790
+ // pending batch.
1791
+ if (pending == nullptr) {
1792
+ GRPC_ERROR_UNREF(error);
1793
+ return;
1863
1794
  }
1795
+ // Return metadata.
1796
+ subchannel_call_retry_state* retry_state =
1797
+ static_cast<subchannel_call_retry_state*>(
1798
+ grpc_connected_subchannel_call_get_parent_data(
1799
+ batch_data->subchannel_call));
1800
+ grpc_metadata_batch_move(
1801
+ &retry_state->recv_trailing_metadata,
1802
+ pending->batch->payload->recv_trailing_metadata.recv_trailing_metadata);
1803
+ // Add closure.
1804
+ closures->Add(pending->batch->payload->recv_trailing_metadata
1805
+ .recv_trailing_metadata_ready,
1806
+ error, "recv_trailing_metadata_ready for pending batch");
1807
+ // Update bookkeeping.
1808
+ pending->batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
1809
+ nullptr;
1810
+ maybe_clear_pending_batch(elem, pending);
1864
1811
  }
1865
1812
 
1866
1813
  // Adds any necessary closures for deferred recv_initial_metadata and
1867
- // recv_message callbacks to closures, updating *num_closures as needed.
1814
+ // recv_message callbacks to closures.
1868
1815
  static void add_closures_for_deferred_recv_callbacks(
1869
1816
  subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
1870
- closure_to_execute* closures, size_t* num_closures) {
1817
+ grpc_core::CallCombinerClosureList* closures) {
1871
1818
  if (batch_data->batch.recv_trailing_metadata) {
1872
1819
  // Add closure for deferred recv_initial_metadata_ready.
1873
1820
  if (GPR_UNLIKELY(retry_state->recv_initial_metadata_ready_deferred_batch !=
1874
1821
  nullptr)) {
1875
- closure_to_execute* closure = &closures[(*num_closures)++];
1876
- closure->closure = GRPC_CLOSURE_INIT(
1877
- &batch_data->recv_initial_metadata_ready,
1878
- invoke_recv_initial_metadata_callback,
1879
- retry_state->recv_initial_metadata_ready_deferred_batch,
1880
- grpc_schedule_on_exec_ctx);
1881
- closure->error = retry_state->recv_initial_metadata_error;
1882
- closure->reason = "resuming recv_initial_metadata_ready";
1822
+ GRPC_CLOSURE_INIT(&retry_state->recv_initial_metadata_ready,
1823
+ invoke_recv_initial_metadata_callback,
1824
+ retry_state->recv_initial_metadata_ready_deferred_batch,
1825
+ grpc_schedule_on_exec_ctx);
1826
+ closures->Add(&retry_state->recv_initial_metadata_ready,
1827
+ retry_state->recv_initial_metadata_error,
1828
+ "resuming recv_initial_metadata_ready");
1883
1829
  retry_state->recv_initial_metadata_ready_deferred_batch = nullptr;
1884
1830
  }
1885
1831
  // Add closure for deferred recv_message_ready.
1886
1832
  if (GPR_UNLIKELY(retry_state->recv_message_ready_deferred_batch !=
1887
1833
  nullptr)) {
1888
- closure_to_execute* closure = &closures[(*num_closures)++];
1889
- closure->closure = GRPC_CLOSURE_INIT(
1890
- &batch_data->recv_message_ready, invoke_recv_message_callback,
1891
- retry_state->recv_message_ready_deferred_batch,
1892
- grpc_schedule_on_exec_ctx);
1893
- closure->error = retry_state->recv_message_error;
1894
- closure->reason = "resuming recv_message_ready";
1834
+ GRPC_CLOSURE_INIT(&retry_state->recv_message_ready,
1835
+ invoke_recv_message_callback,
1836
+ retry_state->recv_message_ready_deferred_batch,
1837
+ grpc_schedule_on_exec_ctx);
1838
+ closures->Add(&retry_state->recv_message_ready,
1839
+ retry_state->recv_message_error,
1840
+ "resuming recv_message_ready");
1895
1841
  retry_state->recv_message_ready_deferred_batch = nullptr;
1896
1842
  }
1897
1843
  }
1898
1844
  }
1899
1845
 
1846
+ // Returns true if any op in the batch was not yet started.
1847
+ // Only looks at send ops, since recv ops are always started immediately.
1848
+ static bool pending_batch_is_unstarted(
1849
+ pending_batch* pending, call_data* calld,
1850
+ subchannel_call_retry_state* retry_state) {
1851
+ if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
1852
+ return false;
1853
+ }
1854
+ if (pending->batch->send_initial_metadata &&
1855
+ !retry_state->started_send_initial_metadata) {
1856
+ return true;
1857
+ }
1858
+ if (pending->batch->send_message &&
1859
+ retry_state->started_send_message_count < calld->send_messages->size()) {
1860
+ return true;
1861
+ }
1862
+ if (pending->batch->send_trailing_metadata &&
1863
+ !retry_state->started_send_trailing_metadata) {
1864
+ return true;
1865
+ }
1866
+ return false;
1867
+ }
1868
+
1869
+ // For any pending batch containing an op that has not yet been started,
1870
+ // adds the pending batch's completion closures to closures.
1871
+ static void add_closures_to_fail_unstarted_pending_batches(
1872
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
1873
+ grpc_error* error, grpc_core::CallCombinerClosureList* closures) {
1874
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1875
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1876
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1877
+ pending_batch* pending = &calld->pending_batches[i];
1878
+ if (pending_batch_is_unstarted(pending, calld, retry_state)) {
1879
+ if (grpc_client_channel_trace.enabled()) {
1880
+ gpr_log(GPR_INFO,
1881
+ "chand=%p calld=%p: failing unstarted pending batch at index "
1882
+ "%" PRIuPTR,
1883
+ chand, calld, i);
1884
+ }
1885
+ closures->Add(pending->batch->on_complete, GRPC_ERROR_REF(error),
1886
+ "failing on_complete for pending batch");
1887
+ pending->batch->on_complete = nullptr;
1888
+ maybe_clear_pending_batch(elem, pending);
1889
+ }
1890
+ }
1891
+ GRPC_ERROR_UNREF(error);
1892
+ }
1893
+
1894
+ // Runs necessary closures upon completion of a call attempt.
1895
+ static void run_closures_for_completed_call(subchannel_batch_data* batch_data,
1896
+ grpc_error* error) {
1897
+ grpc_call_element* elem = batch_data->elem;
1898
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1899
+ subchannel_call_retry_state* retry_state =
1900
+ static_cast<subchannel_call_retry_state*>(
1901
+ grpc_connected_subchannel_call_get_parent_data(
1902
+ batch_data->subchannel_call));
1903
+ // Construct list of closures to execute.
1904
+ grpc_core::CallCombinerClosureList closures;
1905
+ // First, add closure for recv_trailing_metadata_ready.
1906
+ add_closure_for_recv_trailing_metadata_ready(
1907
+ elem, batch_data, GRPC_ERROR_REF(error), &closures);
1908
+ // If there are deferred recv_initial_metadata_ready or recv_message_ready
1909
+ // callbacks, add them to closures.
1910
+ add_closures_for_deferred_recv_callbacks(batch_data, retry_state, &closures);
1911
+ // Add closures to fail any pending batches that have not yet been started.
1912
+ add_closures_to_fail_unstarted_pending_batches(
1913
+ elem, retry_state, GRPC_ERROR_REF(error), &closures);
1914
+ // Don't need batch_data anymore.
1915
+ batch_data_unref(batch_data);
1916
+ // Schedule all of the closures identified above.
1917
+ // Note: This will release the call combiner.
1918
+ closures.RunClosures(calld->call_combiner);
1919
+ GRPC_ERROR_UNREF(error);
1920
+ }
1921
+
1922
+ // Intercepts recv_trailing_metadata_ready callback for retries.
1923
+ // Commits the call and returns the trailing metadata up the stack.
1924
+ static void recv_trailing_metadata_ready(void* arg, grpc_error* error) {
1925
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
1926
+ grpc_call_element* elem = batch_data->elem;
1927
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1928
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1929
+ if (grpc_client_channel_trace.enabled()) {
1930
+ gpr_log(GPR_INFO,
1931
+ "chand=%p calld=%p: got recv_trailing_metadata_ready, error=%s",
1932
+ chand, calld, grpc_error_string(error));
1933
+ }
1934
+ subchannel_call_retry_state* retry_state =
1935
+ static_cast<subchannel_call_retry_state*>(
1936
+ grpc_connected_subchannel_call_get_parent_data(
1937
+ batch_data->subchannel_call));
1938
+ retry_state->completed_recv_trailing_metadata = true;
1939
+ // Get the call's status and check for server pushback metadata.
1940
+ grpc_status_code status = GRPC_STATUS_OK;
1941
+ grpc_mdelem* server_pushback_md = nullptr;
1942
+ get_call_status(batch_data, GRPC_ERROR_REF(error), &status,
1943
+ &server_pushback_md);
1944
+ if (grpc_client_channel_trace.enabled()) {
1945
+ gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
1946
+ calld, grpc_status_code_to_string(status));
1947
+ }
1948
+ // Check if we should retry.
1949
+ if (maybe_retry(elem, batch_data, status, server_pushback_md)) {
1950
+ // Unref batch_data for deferred recv_initial_metadata_ready or
1951
+ // recv_message_ready callbacks, if any.
1952
+ if (retry_state->recv_initial_metadata_ready_deferred_batch != nullptr) {
1953
+ batch_data_unref(batch_data);
1954
+ GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
1955
+ }
1956
+ if (retry_state->recv_message_ready_deferred_batch != nullptr) {
1957
+ batch_data_unref(batch_data);
1958
+ GRPC_ERROR_UNREF(retry_state->recv_message_error);
1959
+ }
1960
+ batch_data_unref(batch_data);
1961
+ return;
1962
+ }
1963
+ // Not retrying, so commit the call.
1964
+ retry_commit(elem, retry_state);
1965
+ // Run any necessary closures.
1966
+ run_closures_for_completed_call(batch_data, GRPC_ERROR_REF(error));
1967
+ }
1968
+
1969
+ //
1970
+ // on_complete callback handling
1971
+ //
1972
+
1973
+ // Adds the on_complete closure for the pending batch completed in
1974
+ // batch_data to closures.
1975
+ static void add_closure_for_completed_pending_batch(
1976
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
1977
+ subchannel_call_retry_state* retry_state, grpc_error* error,
1978
+ grpc_core::CallCombinerClosureList* closures) {
1979
+ pending_batch* pending = pending_batch_find(
1980
+ elem, "completed", [batch_data](grpc_transport_stream_op_batch* batch) {
1981
+ // Match the pending batch with the same set of send ops as the
1982
+ // subchannel batch we've just completed.
1983
+ return batch->on_complete != nullptr &&
1984
+ batch_data->batch.send_initial_metadata ==
1985
+ batch->send_initial_metadata &&
1986
+ batch_data->batch.send_message == batch->send_message &&
1987
+ batch_data->batch.send_trailing_metadata ==
1988
+ batch->send_trailing_metadata;
1989
+ });
1990
+ // If batch_data is a replay batch, then there will be no pending
1991
+ // batch to complete.
1992
+ if (pending == nullptr) {
1993
+ GRPC_ERROR_UNREF(error);
1994
+ return;
1995
+ }
1996
+ // Add closure.
1997
+ closures->Add(pending->batch->on_complete, error,
1998
+ "on_complete for pending batch");
1999
+ pending->batch->on_complete = nullptr;
2000
+ maybe_clear_pending_batch(elem, pending);
2001
+ }
2002
+
1900
2003
  // If there are any cached ops to replay or pending ops to start on the
1901
2004
  // subchannel call, adds a closure to closures to invoke
1902
- // start_retriable_subchannel_batches(), updating *num_closures as needed.
2005
+ // start_retriable_subchannel_batches().
1903
2006
  static void add_closures_for_replay_or_pending_send_ops(
1904
2007
  grpc_call_element* elem, subchannel_batch_data* batch_data,
1905
- subchannel_call_retry_state* retry_state, closure_to_execute* closures,
1906
- size_t* num_closures) {
2008
+ subchannel_call_retry_state* retry_state,
2009
+ grpc_core::CallCombinerClosureList* closures) {
1907
2010
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1908
2011
  call_data* calld = static_cast<call_data*>(elem->call_data);
1909
2012
  bool have_pending_send_message_ops =
@@ -1929,93 +2032,12 @@ static void add_closures_for_replay_or_pending_send_ops(
1929
2032
  "chand=%p calld=%p: starting next batch for pending send op(s)",
1930
2033
  chand, calld);
1931
2034
  }
1932
- closure_to_execute* closure = &closures[(*num_closures)++];
1933
- closure->closure = GRPC_CLOSURE_INIT(
1934
- &batch_data->batch.handler_private.closure,
1935
- start_retriable_subchannel_batches, elem, grpc_schedule_on_exec_ctx);
1936
- closure->error = GRPC_ERROR_NONE;
1937
- closure->reason = "starting next batch for send_* op(s)";
1938
- }
1939
- }
1940
-
1941
- // For any pending batch completed in batch_data, adds the necessary
1942
- // completion closures to closures, updating *num_closures as needed.
1943
- static void add_closures_for_completed_pending_batches(
1944
- grpc_call_element* elem, subchannel_batch_data* batch_data,
1945
- subchannel_call_retry_state* retry_state, grpc_error* error,
1946
- closure_to_execute* closures, size_t* num_closures) {
1947
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1948
- call_data* calld = static_cast<call_data*>(elem->call_data);
1949
- for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1950
- pending_batch* pending = &calld->pending_batches[i];
1951
- if (pending_batch_is_completed(pending, calld, retry_state)) {
1952
- if (grpc_client_channel_trace.enabled()) {
1953
- gpr_log(GPR_INFO,
1954
- "chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
1955
- chand, calld, i);
1956
- }
1957
- // Copy the trailing metadata to return it to the surface.
1958
- if (batch_data->batch.recv_trailing_metadata) {
1959
- grpc_metadata_batch_move(&batch_data->recv_trailing_metadata,
1960
- pending->batch->payload->recv_trailing_metadata
1961
- .recv_trailing_metadata);
1962
- }
1963
- closure_to_execute* closure = &closures[(*num_closures)++];
1964
- closure->closure = pending->batch->on_complete;
1965
- closure->error = GRPC_ERROR_REF(error);
1966
- closure->reason = "on_complete for pending batch";
1967
- pending->batch->on_complete = nullptr;
1968
- maybe_clear_pending_batch(elem, pending);
1969
- }
1970
- }
1971
- GRPC_ERROR_UNREF(error);
1972
- }
1973
-
1974
- // For any pending batch containing an op that has not yet been started,
1975
- // adds the pending batch's completion closures to closures, updating
1976
- // *num_closures as needed.
1977
- static void add_closures_to_fail_unstarted_pending_batches(
1978
- grpc_call_element* elem, subchannel_call_retry_state* retry_state,
1979
- grpc_error* error, closure_to_execute* closures, size_t* num_closures) {
1980
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1981
- call_data* calld = static_cast<call_data*>(elem->call_data);
1982
- for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1983
- pending_batch* pending = &calld->pending_batches[i];
1984
- if (pending_batch_is_unstarted(pending, calld, retry_state)) {
1985
- if (grpc_client_channel_trace.enabled()) {
1986
- gpr_log(GPR_INFO,
1987
- "chand=%p calld=%p: failing unstarted pending batch at index "
1988
- "%" PRIuPTR,
1989
- chand, calld, i);
1990
- }
1991
- if (pending->batch->recv_initial_metadata) {
1992
- closure_to_execute* closure = &closures[(*num_closures)++];
1993
- closure->closure = pending->batch->payload->recv_initial_metadata
1994
- .recv_initial_metadata_ready;
1995
- closure->error = GRPC_ERROR_REF(error);
1996
- closure->reason =
1997
- "failing recv_initial_metadata_ready for pending batch";
1998
- pending->batch->payload->recv_initial_metadata
1999
- .recv_initial_metadata_ready = nullptr;
2000
- }
2001
- if (pending->batch->recv_message) {
2002
- *pending->batch->payload->recv_message.recv_message = nullptr;
2003
- closure_to_execute* closure = &closures[(*num_closures)++];
2004
- closure->closure =
2005
- pending->batch->payload->recv_message.recv_message_ready;
2006
- closure->error = GRPC_ERROR_REF(error);
2007
- closure->reason = "failing recv_message_ready for pending batch";
2008
- pending->batch->payload->recv_message.recv_message_ready = nullptr;
2009
- }
2010
- closure_to_execute* closure = &closures[(*num_closures)++];
2011
- closure->closure = pending->batch->on_complete;
2012
- closure->error = GRPC_ERROR_REF(error);
2013
- closure->reason = "failing on_complete for pending batch";
2014
- pending->batch->on_complete = nullptr;
2015
- maybe_clear_pending_batch(elem, pending);
2016
- }
2035
+ GRPC_CLOSURE_INIT(&batch_data->batch.handler_private.closure,
2036
+ start_retriable_subchannel_batches, elem,
2037
+ grpc_schedule_on_exec_ctx);
2038
+ closures->Add(&batch_data->batch.handler_private.closure, GRPC_ERROR_NONE,
2039
+ "starting next batch for send_* op(s)");
2017
2040
  }
2018
- GRPC_ERROR_UNREF(error);
2019
2041
  }
2020
2042
 
2021
2043
  // Callback used to intercept on_complete from subchannel calls.
@@ -2035,136 +2057,49 @@ static void on_complete(void* arg, grpc_error* error) {
2035
2057
  static_cast<subchannel_call_retry_state*>(
2036
2058
  grpc_connected_subchannel_call_get_parent_data(
2037
2059
  batch_data->subchannel_call));
2038
- // If we have previously completed recv_trailing_metadata, then the
2039
- // call is finished.
2040
- bool call_finished = retry_state->completed_recv_trailing_metadata;
2041
- // Record whether we were already committed before receiving this callback.
2042
- const bool previously_committed = calld->retry_committed;
2043
2060
  // Update bookkeeping in retry_state.
2044
- update_retry_state_for_completed_batch(batch_data, retry_state);
2045
- if (call_finished) {
2046
- if (grpc_client_channel_trace.enabled()) {
2047
- gpr_log(GPR_INFO, "chand=%p calld=%p: call already finished", chand,
2048
- calld);
2049
- }
2050
- } else {
2051
- // Check if this batch finished the call, and if so, get its status.
2052
- // The call is finished if either (a) this callback was invoked with
2053
- // an error or (b) we receive status.
2054
- grpc_status_code status = GRPC_STATUS_OK;
2055
- grpc_mdelem* server_pushback_md = nullptr;
2056
- if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) { // Case (a).
2057
- call_finished = true;
2058
- grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
2059
- nullptr);
2060
- } else if (batch_data->batch.recv_trailing_metadata) { // Case (b).
2061
- call_finished = true;
2062
- grpc_metadata_batch* md_batch =
2063
- batch_data->batch.payload->recv_trailing_metadata
2064
- .recv_trailing_metadata;
2065
- GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
2066
- status = grpc_get_status_code_from_metadata(
2067
- md_batch->idx.named.grpc_status->md);
2068
- if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
2069
- server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
2070
- }
2071
- }
2072
- // If the call just finished, check if we should retry.
2073
- if (call_finished) {
2074
- if (grpc_client_channel_trace.enabled()) {
2075
- gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
2076
- calld, grpc_status_code_to_string(status));
2077
- }
2078
- if (maybe_retry(elem, batch_data, status, server_pushback_md)) {
2079
- // Unref batch_data for deferred recv_initial_metadata_ready or
2080
- // recv_message_ready callbacks, if any.
2081
- if (batch_data->batch.recv_trailing_metadata &&
2082
- retry_state->recv_initial_metadata_ready_deferred_batch !=
2083
- nullptr) {
2084
- batch_data_unref(batch_data);
2085
- GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
2086
- }
2087
- if (batch_data->batch.recv_trailing_metadata &&
2088
- retry_state->recv_message_ready_deferred_batch != nullptr) {
2089
- batch_data_unref(batch_data);
2090
- GRPC_ERROR_UNREF(retry_state->recv_message_error);
2091
- }
2092
- // Track number of pending subchannel send batches and determine if
2093
- // this was the last one.
2094
- bool last_callback_complete = false;
2095
- if (batch_data->batch.send_initial_metadata ||
2096
- batch_data->batch.send_message ||
2097
- batch_data->batch.send_trailing_metadata) {
2098
- --calld->num_pending_retriable_subchannel_send_batches;
2099
- last_callback_complete =
2100
- calld->num_pending_retriable_subchannel_send_batches == 0;
2101
- }
2102
- batch_data_unref(batch_data);
2103
- // If we just completed the last subchannel send batch, unref the
2104
- // call stack.
2105
- if (last_callback_complete) {
2106
- GRPC_CALL_STACK_UNREF(calld->owning_call, "subchannel_send_batches");
2107
- }
2108
- return;
2109
- }
2110
- // Not retrying, so commit the call.
2111
- retry_commit(elem, retry_state);
2112
- }
2061
+ if (batch_data->batch.send_initial_metadata) {
2062
+ retry_state->completed_send_initial_metadata = true;
2113
2063
  }
2114
- // If we were already committed before receiving this callback, free
2115
- // cached data for send ops that we've just completed. (If the call has
2116
- // just now finished, the call to retry_commit() above will have freed all
2117
- // cached send ops, so we don't need to do it here.)
2118
- if (previously_committed) {
2064
+ if (batch_data->batch.send_message) {
2065
+ ++retry_state->completed_send_message_count;
2066
+ }
2067
+ if (batch_data->batch.send_trailing_metadata) {
2068
+ retry_state->completed_send_trailing_metadata = true;
2069
+ }
2070
+ // If the call is committed, free cached data for send ops that we've just
2071
+ // completed.
2072
+ if (calld->retry_committed) {
2119
2073
  free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
2120
2074
  }
2121
- // Call not being retried.
2122
2075
  // Construct list of closures to execute.
2123
- // Max number of closures is number of pending batches plus one for
2124
- // each of:
2125
- // - recv_initial_metadata_ready (either deferred or unstarted)
2126
- // - recv_message_ready (either deferred or unstarted)
2127
- // - starting a new batch for pending send ops
2128
- closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches) + 3];
2129
- size_t num_closures = 0;
2130
- // If there are deferred recv_initial_metadata_ready or recv_message_ready
2131
- // callbacks, add them to closures.
2132
- add_closures_for_deferred_recv_callbacks(batch_data, retry_state, closures,
2133
- &num_closures);
2134
- // Find pending batches whose ops are now complete and add their
2135
- // on_complete callbacks to closures.
2136
- add_closures_for_completed_pending_batches(elem, batch_data, retry_state,
2137
- GRPC_ERROR_REF(error), closures,
2138
- &num_closures);
2139
- // Add closures to handle any pending batches that have not yet been started.
2140
- // If the call is finished, we fail these batches; otherwise, we add a
2141
- // callback to start_retriable_subchannel_batches() to start them on
2142
- // the subchannel call.
2143
- if (call_finished) {
2144
- add_closures_to_fail_unstarted_pending_batches(
2145
- elem, retry_state, GRPC_ERROR_REF(error), closures, &num_closures);
2146
- } else {
2147
- add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
2148
- closures, &num_closures);
2076
+ grpc_core::CallCombinerClosureList closures;
2077
+ // If a retry was already dispatched, that means we saw
2078
+ // recv_trailing_metadata before this, so we do nothing here.
2079
+ // Otherwise, invoke the callback to return the result to the surface.
2080
+ if (!retry_state->retry_dispatched) {
2081
+ // Add closure for the completed pending batch, if any.
2082
+ add_closure_for_completed_pending_batch(elem, batch_data, retry_state,
2083
+ GRPC_ERROR_REF(error), &closures);
2084
+ // If needed, add a callback to start any replay or pending send ops on
2085
+ // the subchannel call.
2086
+ if (!retry_state->completed_recv_trailing_metadata) {
2087
+ add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
2088
+ &closures);
2089
+ }
2149
2090
  }
2150
2091
  // Track number of pending subchannel send batches and determine if this
2151
2092
  // was the last one.
2152
- bool last_callback_complete = false;
2153
- if (batch_data->batch.send_initial_metadata ||
2154
- batch_data->batch.send_message ||
2155
- batch_data->batch.send_trailing_metadata) {
2156
- --calld->num_pending_retriable_subchannel_send_batches;
2157
- last_callback_complete =
2158
- calld->num_pending_retriable_subchannel_send_batches == 0;
2159
- }
2093
+ --calld->num_pending_retriable_subchannel_send_batches;
2094
+ const bool last_send_batch_complete =
2095
+ calld->num_pending_retriable_subchannel_send_batches == 0;
2160
2096
  // Don't need batch_data anymore.
2161
2097
  batch_data_unref(batch_data);
2162
2098
  // Schedule all of the closures identified above.
2163
2099
  // Note: This yeilds the call combiner.
2164
- execute_closures_in_call_combiner(elem, "on_complete", closures,
2165
- num_closures);
2166
- // If we just completed the last subchannel send batch, unref the call stack.
2167
- if (last_callback_complete) {
2100
+ closures.RunClosures(calld->call_combiner);
2101
+ // If this was the last subchannel send batch, unref the call stack.
2102
+ if (last_send_batch_complete) {
2168
2103
  GRPC_CALL_STACK_UNREF(calld->owning_call, "subchannel_send_batches");
2169
2104
  }
2170
2105
  }
@@ -2185,27 +2120,22 @@ static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
2185
2120
 
2186
2121
  // Adds a closure to closures that will execute batch in the call combiner.
2187
2122
  static void add_closure_for_subchannel_batch(
2188
- call_data* calld, grpc_transport_stream_op_batch* batch,
2189
- closure_to_execute* closures, size_t* num_closures) {
2123
+ grpc_call_element* elem, grpc_transport_stream_op_batch* batch,
2124
+ grpc_core::CallCombinerClosureList* closures) {
2125
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2126
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2190
2127
  batch->handler_private.extra_arg = calld->subchannel_call;
2191
2128
  GRPC_CLOSURE_INIT(&batch->handler_private.closure,
2192
2129
  start_batch_in_call_combiner, batch,
2193
2130
  grpc_schedule_on_exec_ctx);
2194
- closure_to_execute* closure = &closures[(*num_closures)++];
2195
- closure->closure = &batch->handler_private.closure;
2196
- closure->error = GRPC_ERROR_NONE;
2197
- // If the tracer is enabled, we log a more detailed message, which
2198
- // requires dynamic allocation. This will be freed in
2199
- // start_retriable_subchannel_batches().
2200
2131
  if (grpc_client_channel_trace.enabled()) {
2201
2132
  char* batch_str = grpc_transport_stream_op_batch_string(batch);
2202
- gpr_asprintf(const_cast<char**>(&closure->reason),
2203
- "starting batch in call combiner: %s", batch_str);
2133
+ gpr_log(GPR_INFO, "chand=%p calld=%p: starting subchannel batch: %s", chand,
2134
+ calld, batch_str);
2204
2135
  gpr_free(batch_str);
2205
- closure->free_reason = true;
2206
- } else {
2207
- closure->reason = "start_subchannel_batch";
2208
2136
  }
2137
+ closures->Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
2138
+ "start_subchannel_batch");
2209
2139
  }
2210
2140
 
2211
2141
  // Adds retriable send_initial_metadata op to batch_data.
@@ -2221,28 +2151,28 @@ static void add_retriable_send_initial_metadata_op(
2221
2151
  //
2222
2152
  // If we've already completed one or more attempts, add the
2223
2153
  // grpc-retry-attempts header.
2224
- batch_data->send_initial_metadata_storage =
2154
+ retry_state->send_initial_metadata_storage =
2225
2155
  static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
2226
2156
  calld->arena, sizeof(grpc_linked_mdelem) *
2227
2157
  (calld->send_initial_metadata.list.count +
2228
2158
  (calld->num_attempts_completed > 0))));
2229
2159
  grpc_metadata_batch_copy(&calld->send_initial_metadata,
2230
- &batch_data->send_initial_metadata,
2231
- batch_data->send_initial_metadata_storage);
2232
- if (GPR_UNLIKELY(batch_data->send_initial_metadata.idx.named
2160
+ &retry_state->send_initial_metadata,
2161
+ retry_state->send_initial_metadata_storage);
2162
+ if (GPR_UNLIKELY(retry_state->send_initial_metadata.idx.named
2233
2163
  .grpc_previous_rpc_attempts != nullptr)) {
2234
- grpc_metadata_batch_remove(
2235
- &batch_data->send_initial_metadata,
2236
- batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts);
2164
+ grpc_metadata_batch_remove(&retry_state->send_initial_metadata,
2165
+ retry_state->send_initial_metadata.idx.named
2166
+ .grpc_previous_rpc_attempts);
2237
2167
  }
2238
2168
  if (GPR_UNLIKELY(calld->num_attempts_completed > 0)) {
2239
2169
  grpc_mdelem retry_md = grpc_mdelem_from_slices(
2240
2170
  GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
2241
2171
  *retry_count_strings[calld->num_attempts_completed - 1]);
2242
2172
  grpc_error* error = grpc_metadata_batch_add_tail(
2243
- &batch_data->send_initial_metadata,
2244
- &batch_data->send_initial_metadata_storage[calld->send_initial_metadata
2245
- .list.count],
2173
+ &retry_state->send_initial_metadata,
2174
+ &retry_state->send_initial_metadata_storage[calld->send_initial_metadata
2175
+ .list.count],
2246
2176
  retry_md);
2247
2177
  if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
2248
2178
  gpr_log(GPR_ERROR, "error adding retry metadata: %s",
@@ -2253,7 +2183,7 @@ static void add_retriable_send_initial_metadata_op(
2253
2183
  retry_state->started_send_initial_metadata = true;
2254
2184
  batch_data->batch.send_initial_metadata = true;
2255
2185
  batch_data->batch.payload->send_initial_metadata.send_initial_metadata =
2256
- &batch_data->send_initial_metadata;
2186
+ &retry_state->send_initial_metadata;
2257
2187
  batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags =
2258
2188
  calld->send_initial_metadata_flags;
2259
2189
  batch_data->batch.payload->send_initial_metadata.peer_string =
@@ -2274,10 +2204,10 @@ static void add_retriable_send_message_op(
2274
2204
  grpc_core::ByteStreamCache* cache =
2275
2205
  (*calld->send_messages)[retry_state->started_send_message_count];
2276
2206
  ++retry_state->started_send_message_count;
2277
- batch_data->send_message.Init(cache);
2207
+ retry_state->send_message.Init(cache);
2278
2208
  batch_data->batch.send_message = true;
2279
2209
  batch_data->batch.payload->send_message.send_message.reset(
2280
- batch_data->send_message.get());
2210
+ retry_state->send_message.get());
2281
2211
  }
2282
2212
 
2283
2213
  // Adds retriable send_trailing_metadata op to batch_data.
@@ -2287,17 +2217,17 @@ static void add_retriable_send_trailing_metadata_op(
2287
2217
  // We need to make a copy of the metadata batch for each attempt, since
2288
2218
  // the filters in the subchannel stack may modify this batch, and we don't
2289
2219
  // want those modifications to be passed forward to subsequent attempts.
2290
- batch_data->send_trailing_metadata_storage =
2220
+ retry_state->send_trailing_metadata_storage =
2291
2221
  static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
2292
2222
  calld->arena, sizeof(grpc_linked_mdelem) *
2293
2223
  calld->send_trailing_metadata.list.count));
2294
2224
  grpc_metadata_batch_copy(&calld->send_trailing_metadata,
2295
- &batch_data->send_trailing_metadata,
2296
- batch_data->send_trailing_metadata_storage);
2225
+ &retry_state->send_trailing_metadata,
2226
+ retry_state->send_trailing_metadata_storage);
2297
2227
  retry_state->started_send_trailing_metadata = true;
2298
2228
  batch_data->batch.send_trailing_metadata = true;
2299
2229
  batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata =
2300
- &batch_data->send_trailing_metadata;
2230
+ &retry_state->send_trailing_metadata;
2301
2231
  }
2302
2232
 
2303
2233
  // Adds retriable recv_initial_metadata op to batch_data.
@@ -2306,16 +2236,16 @@ static void add_retriable_recv_initial_metadata_op(
2306
2236
  subchannel_batch_data* batch_data) {
2307
2237
  retry_state->started_recv_initial_metadata = true;
2308
2238
  batch_data->batch.recv_initial_metadata = true;
2309
- grpc_metadata_batch_init(&batch_data->recv_initial_metadata);
2239
+ grpc_metadata_batch_init(&retry_state->recv_initial_metadata);
2310
2240
  batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata =
2311
- &batch_data->recv_initial_metadata;
2241
+ &retry_state->recv_initial_metadata;
2312
2242
  batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available =
2313
- &batch_data->trailing_metadata_available;
2314
- GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
2243
+ &retry_state->trailing_metadata_available;
2244
+ GRPC_CLOSURE_INIT(&retry_state->recv_initial_metadata_ready,
2315
2245
  recv_initial_metadata_ready, batch_data,
2316
2246
  grpc_schedule_on_exec_ctx);
2317
2247
  batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready =
2318
- &batch_data->recv_initial_metadata_ready;
2248
+ &retry_state->recv_initial_metadata_ready;
2319
2249
  }
2320
2250
 
2321
2251
  // Adds retriable recv_message op to batch_data.
@@ -2325,11 +2255,11 @@ static void add_retriable_recv_message_op(
2325
2255
  ++retry_state->started_recv_message_count;
2326
2256
  batch_data->batch.recv_message = true;
2327
2257
  batch_data->batch.payload->recv_message.recv_message =
2328
- &batch_data->recv_message;
2329
- GRPC_CLOSURE_INIT(&batch_data->recv_message_ready, recv_message_ready,
2258
+ &retry_state->recv_message;
2259
+ GRPC_CLOSURE_INIT(&retry_state->recv_message_ready, recv_message_ready,
2330
2260
  batch_data, grpc_schedule_on_exec_ctx);
2331
2261
  batch_data->batch.payload->recv_message.recv_message_ready =
2332
- &batch_data->recv_message_ready;
2262
+ &retry_state->recv_message_ready;
2333
2263
  }
2334
2264
 
2335
2265
  // Adds retriable recv_trailing_metadata op to batch_data.
@@ -2338,12 +2268,17 @@ static void add_retriable_recv_trailing_metadata_op(
2338
2268
  subchannel_batch_data* batch_data) {
2339
2269
  retry_state->started_recv_trailing_metadata = true;
2340
2270
  batch_data->batch.recv_trailing_metadata = true;
2341
- grpc_metadata_batch_init(&batch_data->recv_trailing_metadata);
2271
+ grpc_metadata_batch_init(&retry_state->recv_trailing_metadata);
2342
2272
  batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata =
2343
- &batch_data->recv_trailing_metadata;
2344
- batch_data->batch.collect_stats = true;
2345
- batch_data->batch.payload->collect_stats.collect_stats =
2346
- &batch_data->collect_stats;
2273
+ &retry_state->recv_trailing_metadata;
2274
+ batch_data->batch.payload->recv_trailing_metadata.collect_stats =
2275
+ &retry_state->collect_stats;
2276
+ GRPC_CLOSURE_INIT(&retry_state->recv_trailing_metadata_ready,
2277
+ recv_trailing_metadata_ready, batch_data,
2278
+ grpc_schedule_on_exec_ctx);
2279
+ batch_data->batch.payload->recv_trailing_metadata
2280
+ .recv_trailing_metadata_ready =
2281
+ &retry_state->recv_trailing_metadata_ready;
2347
2282
  }
2348
2283
 
2349
2284
  // Helper function used to start a recv_trailing_metadata batch. This
@@ -2364,9 +2299,11 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
2364
2299
  grpc_connected_subchannel_call_get_parent_data(
2365
2300
  calld->subchannel_call));
2366
2301
  // Create batch_data with 2 refs, since this batch will be unreffed twice:
2367
- // once when the subchannel batch returns, and again when we actually get
2368
- // a recv_trailing_metadata op from the surface.
2369
- subchannel_batch_data* batch_data = batch_data_create(elem, 2);
2302
+ // once for the recv_trailing_metadata_ready callback when the subchannel
2303
+ // batch returns, and again when we actually get a recv_trailing_metadata
2304
+ // op from the surface.
2305
+ subchannel_batch_data* batch_data =
2306
+ batch_data_create(elem, 2, false /* set_on_complete */);
2370
2307
  add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
2371
2308
  retry_state->recv_trailing_metadata_internal_batch = batch_data;
2372
2309
  // Note: This will release the call combiner.
@@ -2391,7 +2328,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2391
2328
  "send_initial_metadata op",
2392
2329
  chand, calld);
2393
2330
  }
2394
- replay_batch_data = batch_data_create(elem, 1);
2331
+ replay_batch_data = batch_data_create(elem, 1, true /* set_on_complete */);
2395
2332
  add_retriable_send_initial_metadata_op(calld, retry_state,
2396
2333
  replay_batch_data);
2397
2334
  }
@@ -2408,7 +2345,8 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2408
2345
  chand, calld);
2409
2346
  }
2410
2347
  if (replay_batch_data == nullptr) {
2411
- replay_batch_data = batch_data_create(elem, 1);
2348
+ replay_batch_data =
2349
+ batch_data_create(elem, 1, true /* set_on_complete */);
2412
2350
  }
2413
2351
  add_retriable_send_message_op(elem, retry_state, replay_batch_data);
2414
2352
  }
@@ -2427,7 +2365,8 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2427
2365
  chand, calld);
2428
2366
  }
2429
2367
  if (replay_batch_data == nullptr) {
2430
- replay_batch_data = batch_data_create(elem, 1);
2368
+ replay_batch_data =
2369
+ batch_data_create(elem, 1, true /* set_on_complete */);
2431
2370
  }
2432
2371
  add_retriable_send_trailing_metadata_op(calld, retry_state,
2433
2372
  replay_batch_data);
@@ -2439,7 +2378,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2439
2378
  // *num_batches as needed.
2440
2379
  static void add_subchannel_batches_for_pending_batches(
2441
2380
  grpc_call_element* elem, subchannel_call_retry_state* retry_state,
2442
- closure_to_execute* closures, size_t* num_closures) {
2381
+ grpc_core::CallCombinerClosureList* closures) {
2443
2382
  call_data* calld = static_cast<call_data*>(elem->call_data);
2444
2383
  for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
2445
2384
  pending_batch* pending = &calld->pending_batches[i];
@@ -2493,15 +2432,11 @@ static void add_subchannel_batches_for_pending_batches(
2493
2432
  // started subchannel batch, since we'll propagate the
2494
2433
  // completion when it completes.
2495
2434
  if (retry_state->completed_recv_trailing_metadata) {
2496
- subchannel_batch_data* batch_data =
2497
- retry_state->recv_trailing_metadata_internal_batch;
2498
- closure_to_execute* closure = &closures[(*num_closures)++];
2499
- closure->closure = &batch_data->on_complete;
2500
2435
  // Batches containing recv_trailing_metadata always succeed.
2501
- closure->error = GRPC_ERROR_NONE;
2502
- closure->reason =
2503
- "re-executing on_complete for recv_trailing_metadata "
2504
- "to propagate internally triggered result";
2436
+ closures->Add(
2437
+ &retry_state->recv_trailing_metadata_ready, GRPC_ERROR_NONE,
2438
+ "re-executing recv_trailing_metadata_ready to propagate "
2439
+ "internally triggered result");
2505
2440
  } else {
2506
2441
  batch_data_unref(retry_state->recv_trailing_metadata_internal_batch);
2507
2442
  }
@@ -2513,14 +2448,19 @@ static void add_subchannel_batches_for_pending_batches(
2513
2448
  if (calld->method_params == nullptr ||
2514
2449
  calld->method_params->retry_policy() == nullptr ||
2515
2450
  calld->retry_committed) {
2516
- add_closure_for_subchannel_batch(calld, batch, closures, num_closures);
2451
+ add_closure_for_subchannel_batch(elem, batch, closures);
2517
2452
  pending_batch_clear(calld, pending);
2518
2453
  continue;
2519
2454
  }
2520
2455
  // Create batch with the right number of callbacks.
2521
- const int num_callbacks =
2522
- 1 + batch->recv_initial_metadata + batch->recv_message;
2523
- subchannel_batch_data* batch_data = batch_data_create(elem, num_callbacks);
2456
+ const bool has_send_ops = batch->send_initial_metadata ||
2457
+ batch->send_message ||
2458
+ batch->send_trailing_metadata;
2459
+ const int num_callbacks = has_send_ops + batch->recv_initial_metadata +
2460
+ batch->recv_message +
2461
+ batch->recv_trailing_metadata;
2462
+ subchannel_batch_data* batch_data = batch_data_create(
2463
+ elem, num_callbacks, has_send_ops /* set_on_complete */);
2524
2464
  // Cache send ops if needed.
2525
2465
  maybe_cache_send_ops_for_batch(calld, pending);
2526
2466
  // send_initial_metadata.
@@ -2547,11 +2487,9 @@ static void add_subchannel_batches_for_pending_batches(
2547
2487
  }
2548
2488
  // recv_trailing_metadata.
2549
2489
  if (batch->recv_trailing_metadata) {
2550
- GPR_ASSERT(batch->collect_stats);
2551
2490
  add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
2552
2491
  }
2553
- add_closure_for_subchannel_batch(calld, &batch_data->batch, closures,
2554
- num_closures);
2492
+ add_closure_for_subchannel_batch(elem, &batch_data->batch, closures);
2555
2493
  // Track number of pending subchannel send batches.
2556
2494
  // If this is the first one, take a ref to the call stack.
2557
2495
  if (batch->send_initial_metadata || batch->send_message ||
@@ -2579,15 +2517,13 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
2579
2517
  grpc_connected_subchannel_call_get_parent_data(
2580
2518
  calld->subchannel_call));
2581
2519
  // Construct list of closures to execute, one for each pending batch.
2582
- // We can start up to 6 batches.
2583
- closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches)];
2584
- size_t num_closures = 0;
2520
+ grpc_core::CallCombinerClosureList closures;
2585
2521
  // Replay previously-returned send_* ops if needed.
2586
2522
  subchannel_batch_data* replay_batch_data =
2587
2523
  maybe_create_subchannel_batch_for_replay(elem, retry_state);
2588
2524
  if (replay_batch_data != nullptr) {
2589
- add_closure_for_subchannel_batch(calld, &replay_batch_data->batch, closures,
2590
- &num_closures);
2525
+ add_closure_for_subchannel_batch(elem, &replay_batch_data->batch,
2526
+ &closures);
2591
2527
  // Track number of pending subchannel send batches.
2592
2528
  // If this is the first one, take a ref to the call stack.
2593
2529
  if (calld->num_pending_retriable_subchannel_send_batches == 0) {
@@ -2596,17 +2532,16 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
2596
2532
  ++calld->num_pending_retriable_subchannel_send_batches;
2597
2533
  }
2598
2534
  // Now add pending batches.
2599
- add_subchannel_batches_for_pending_batches(elem, retry_state, closures,
2600
- &num_closures);
2535
+ add_subchannel_batches_for_pending_batches(elem, retry_state, &closures);
2601
2536
  // Start batches on subchannel call.
2602
2537
  if (grpc_client_channel_trace.enabled()) {
2603
2538
  gpr_log(GPR_INFO,
2604
2539
  "chand=%p calld=%p: starting %" PRIuPTR
2605
2540
  " retriable batches on subchannel_call=%p",
2606
- chand, calld, num_closures, calld->subchannel_call);
2541
+ chand, calld, closures.size(), calld->subchannel_call);
2607
2542
  }
2608
- execute_closures_in_call_combiner(elem, "start_retriable_subchannel_batches",
2609
- closures, num_closures);
2543
+ // Note: This will yield the call combiner.
2544
+ closures.RunClosures(calld->call_combiner);
2610
2545
  }
2611
2546
 
2612
2547
  //
@@ -3239,6 +3174,16 @@ static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
3239
3174
  GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
3240
3175
  }
3241
3176
 
3177
+ void grpc_client_channel_populate_child_refs(
3178
+ grpc_channel_element* elem, grpc_core::ChildRefsList* child_subchannels,
3179
+ grpc_core::ChildRefsList* child_channels) {
3180
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
3181
+ if (chand->lb_policy != nullptr) {
3182
+ chand->lb_policy->FillChildRefsForChannelz(child_subchannels,
3183
+ child_channels);
3184
+ }
3185
+ }
3186
+
3242
3187
  grpc_connectivity_state grpc_client_channel_check_connectivity_state(
3243
3188
  grpc_channel_element* elem, int try_to_connect) {
3244
3189
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);