grpc 1.11.1 → 1.12.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +225 -87
  3. data/etc/roots.pem +0 -33
  4. data/include/grpc/grpc_security.h +70 -0
  5. data/include/grpc/impl/codegen/port_platform.h +11 -0
  6. data/include/grpc/support/log.h +9 -1
  7. data/src/core/ext/filters/client_channel/client_channel.cc +305 -210
  8. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +1 -1
  9. data/src/core/ext/filters/client_channel/lb_policy.cc +2 -2
  10. data/src/core/ext/filters/client_channel/lb_policy.h +4 -0
  11. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +12 -9
  12. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +168 -197
  13. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +368 -373
  14. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +498 -98
  15. data/src/core/ext/filters/client_channel/method_params.h +4 -0
  16. data/src/core/ext/filters/client_channel/resolver.h +4 -0
  17. data/src/core/ext/filters/client_channel/retry_throttle.h +4 -0
  18. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +2 -2
  19. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +40 -15
  20. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +3 -3
  21. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +2 -2
  22. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +1 -1
  23. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +2 -2
  24. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +3 -3
  25. data/src/core/ext/transport/chttp2/transport/writing.cc +5 -5
  26. data/src/core/ext/transport/inproc/inproc_transport.cc +41 -43
  27. data/src/core/lib/channel/channel_args.cc +28 -0
  28. data/src/core/lib/channel/channel_args.h +4 -0
  29. data/src/core/lib/channel/handshaker.cc +47 -0
  30. data/src/core/lib/channel/handshaker.h +4 -0
  31. data/src/core/lib/debug/trace.cc +2 -1
  32. data/src/core/lib/debug/trace.h +10 -1
  33. data/src/core/lib/gpr/log.cc +8 -2
  34. data/src/core/lib/gpr/log_android.cc +4 -0
  35. data/src/core/lib/gpr/log_linux.cc +4 -0
  36. data/src/core/lib/gpr/log_posix.cc +4 -0
  37. data/src/core/lib/gpr/log_windows.cc +5 -0
  38. data/src/core/lib/gprpp/inlined_vector.h +30 -34
  39. data/src/core/lib/gprpp/orphanable.h +4 -4
  40. data/src/core/lib/gprpp/ref_counted.h +4 -4
  41. data/src/core/lib/iomgr/call_combiner.cc +13 -13
  42. data/src/core/lib/iomgr/closure.h +3 -3
  43. data/src/core/lib/iomgr/combiner.cc +11 -11
  44. data/src/core/lib/iomgr/ev_epoll1_linux.cc +24 -24
  45. data/src/core/lib/iomgr/ev_epollex_linux.cc +48 -29
  46. data/src/core/lib/iomgr/ev_epollsig_linux.cc +2 -2
  47. data/src/core/lib/iomgr/ev_poll_posix.cc +9 -3
  48. data/src/core/lib/iomgr/ev_posix.cc +3 -3
  49. data/src/core/lib/iomgr/executor.cc +6 -6
  50. data/src/core/lib/iomgr/resource_quota.cc +10 -11
  51. data/src/core/lib/iomgr/socket_utils_common_posix.cc +24 -0
  52. data/src/core/lib/iomgr/socket_utils_linux.cc +0 -1
  53. data/src/core/lib/iomgr/socket_utils_posix.cc +2 -3
  54. data/src/core/lib/iomgr/socket_utils_posix.h +3 -0
  55. data/src/core/lib/iomgr/tcp_client_custom.cc +2 -2
  56. data/src/core/lib/iomgr/tcp_client_posix.cc +4 -4
  57. data/src/core/lib/iomgr/tcp_custom.cc +10 -10
  58. data/src/core/lib/iomgr/tcp_posix.cc +25 -25
  59. data/src/core/lib/iomgr/tcp_server_custom.cc +5 -5
  60. data/src/core/lib/iomgr/tcp_server_posix.cc +4 -25
  61. data/src/core/lib/iomgr/tcp_server_windows.cc +1 -0
  62. data/src/core/lib/iomgr/tcp_uv.cc +3 -0
  63. data/src/core/lib/iomgr/tcp_windows.cc +16 -0
  64. data/src/core/lib/iomgr/timer_generic.cc +27 -17
  65. data/src/core/lib/iomgr/timer_manager.cc +11 -12
  66. data/src/core/lib/iomgr/timer_uv.cc +3 -0
  67. data/src/core/lib/iomgr/udp_server.cc +104 -49
  68. data/src/core/lib/iomgr/udp_server.h +8 -4
  69. data/src/core/lib/profiling/basic_timers.cc +1 -0
  70. data/src/core/lib/security/credentials/alts/alts_credentials.h +0 -20
  71. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc +7 -7
  72. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h +1 -38
  73. data/src/core/lib/security/security_connector/security_connector.cc +19 -16
  74. data/src/core/lib/security/security_connector/security_connector.h +4 -3
  75. data/src/core/lib/security/transport/secure_endpoint.cc +2 -2
  76. data/src/core/lib/security/transport/security_handshaker.cc +6 -2
  77. data/src/core/lib/slice/slice.cc +6 -2
  78. data/src/core/lib/slice/slice_buffer.cc +12 -4
  79. data/src/core/lib/slice/slice_hash_table.h +4 -0
  80. data/src/core/lib/slice/slice_weak_hash_table.h +4 -0
  81. data/src/core/lib/surface/call.cc +6 -6
  82. data/src/core/lib/surface/server.cc +16 -0
  83. data/src/core/lib/surface/version.cc +1 -1
  84. data/src/core/lib/transport/bdp_estimator.cc +3 -3
  85. data/src/core/lib/transport/bdp_estimator.h +2 -2
  86. data/src/core/lib/transport/connectivity_state.cc +6 -7
  87. data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +4 -0
  88. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +14 -0
  89. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +21 -0
  90. data/src/ruby/lib/grpc/version.rb +1 -1
  91. data/src/ruby/pb/generate_proto_ruby.sh +7 -1
  92. data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +2 -5
  93. data/third_party/address_sorting/address_sorting.c +10 -9
  94. metadata +27 -28
  95. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +0 -253
@@ -3525,39 +3525,6 @@ AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
3525
3525
  5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
3526
3526
  -----END CERTIFICATE-----
3527
3527
 
3528
- # Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş.
3529
- # Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5 O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş.
3530
- # Label: "TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H5"
3531
- # Serial: 156233699172481
3532
- # MD5 Fingerprint: da:70:8e:f0:22:df:93:26:f6:5f:9f:d3:15:06:52:4e
3533
- # SHA1 Fingerprint: c4:18:f6:4d:46:d1:df:00:3d:27:30:13:72:43:a9:12:11:c6:75:fb
3534
- # SHA256 Fingerprint: 49:35:1b:90:34:44:c1:85:cc:dc:5c:69:3d:24:d8:55:5c:b2:08:d6:a8:14:13:07:69:9f:4a:f0:63:19:9d:78
3535
- -----BEGIN CERTIFICATE-----
3536
- MIIEJzCCAw+gAwIBAgIHAI4X/iQggTANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UE
3537
- BhMCVFIxDzANBgNVBAcMBkFua2FyYTFNMEsGA1UECgxEVMOcUktUUlVTVCBCaWxn
3538
- aSDEsGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkg
3539
- QS7Fni4xQjBABgNVBAMMOVTDnFJLVFJVU1QgRWxla3Ryb25payBTZXJ0aWZpa2Eg
3540
- SGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSBINTAeFw0xMzA0MzAwODA3MDFaFw0yMzA0
3541
- MjgwODA3MDFaMIGxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMU0wSwYD
3542
- VQQKDERUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8
3543
- dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjFCMEAGA1UEAww5VMOcUktUUlVTVCBF
3544
- bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIEg1MIIB
3545
- IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApCUZ4WWe60ghUEoI5RHwWrom
3546
- /4NZzkQqL/7hzmAD/I0Dpe3/a6i6zDQGn1k19uwsu537jVJp45wnEFPzpALFp/kR
3547
- Gml1bsMdi9GYjZOHp3GXDSHHmflS0yxjXVW86B8BSLlg/kJK9siArs1mep5Fimh3
3548
- 4khon6La8eHBEJ/rPCmBp+EyCNSgBbGM+42WAA4+Jd9ThiI7/PS98wl+d+yG6w8z
3549
- 5UNP9FR1bSmZLmZaQ9/LXMrI5Tjxfjs1nQ/0xVqhzPMggCTTV+wVunUlm+hkS7M0
3550
- hO8EuPbJbKoCPrZV4jI3X/xml1/N1p7HIL9Nxqw/dV8c7TKcfGkAaZHjIxhT6QID
3551
- AQABo0IwQDAdBgNVHQ4EFgQUVpkHHtOsDGlktAxQR95DLL4gwPswDgYDVR0PAQH/
3552
- BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ5FdnsX
3553
- SDLyOIspve6WSk6BGLFRRyDN0GSxDsnZAdkJzsiZ3GglE9Rc8qPoBP5yCccLqh0l
3554
- VX6Wmle3usURehnmp349hQ71+S4pL+f5bFgWV1Al9j4uPqrtd3GqqpmWRgqujuwq
3555
- URawXs3qZwQcWDD1YIq9pr1N5Za0/EKJAWv2cMhQOQwt1WbZyNKzMrcbGW3LM/nf
3556
- peYVhDfwwvJllpKQd/Ct9JDpEXjXk4nAPQu6KfTomZ1yju2dL+6SfaHx/126M2CF
3557
- Yv4HAqGEVka+lgqaE9chTLd8B59OTj+RdPsnnRHM3eaxynFNExc5JsUpISuTKWqW
3558
- +qtB4Uu2NQvAmxU=
3559
- -----END CERTIFICATE-----
3560
-
3561
3528
  # Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
3562
3529
  # Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
3563
3530
  # Label: "Certinomis - Root CA"
@@ -488,6 +488,76 @@ typedef struct {
488
488
  GRPCAPI void grpc_server_credentials_set_auth_metadata_processor(
489
489
  grpc_server_credentials* creds, grpc_auth_metadata_processor processor);
490
490
 
491
+ /** --- ALTS channel/server credentials --- **/
492
+
493
+ /**
494
+ * Main interface for ALTS credentials options. The options will contain
495
+ * information that will be passed from grpc to TSI layer such as RPC protocol
496
+ * versions. ALTS client (channel) and server credentials will have their own
497
+ * implementation of this interface. The APIs listed in this header are
498
+ * thread-compatible. It is used for experimental purpose for now and subject
499
+ * to change.
500
+ */
501
+ typedef struct grpc_alts_credentials_options grpc_alts_credentials_options;
502
+
503
+ /**
504
+ * This method creates a grpc ALTS credentials client options instance.
505
+ * It is used for experimental purpose for now and subject to change.
506
+ */
507
+ GRPCAPI grpc_alts_credentials_options*
508
+ grpc_alts_credentials_client_options_create();
509
+
510
+ /**
511
+ * This method creates a grpc ALTS credentials server options instance.
512
+ * It is used for experimental purpose for now and subject to change.
513
+ */
514
+ GRPCAPI grpc_alts_credentials_options*
515
+ grpc_alts_credentials_server_options_create();
516
+
517
+ /**
518
+ * This method adds a target service account to grpc client's ALTS credentials
519
+ * options instance. It is used for experimental purpose for now and subject
520
+ * to change.
521
+ *
522
+ * - options: grpc ALTS credentials options instance.
523
+ * - service_account: service account of target endpoint.
524
+ */
525
+ GRPCAPI void grpc_alts_credentials_client_options_add_target_service_account(
526
+ grpc_alts_credentials_options* options, const char* service_account);
527
+
528
+ /**
529
+ * This method destroys a grpc_alts_credentials_options instance by
530
+ * de-allocating all of its occupied memory. It is used for experimental purpose
531
+ * for now and subject to change.
532
+ *
533
+ * - options: a grpc_alts_credentials_options instance that needs to be
534
+ * destroyed.
535
+ */
536
+ GRPCAPI void grpc_alts_credentials_options_destroy(
537
+ grpc_alts_credentials_options* options);
538
+
539
+ /**
540
+ * This method creates an ALTS channel credential object. It is used for
541
+ * experimental purpose for now and subject to change.
542
+ *
543
+ * - options: grpc ALTS credentials options instance for client.
544
+ *
545
+ * It returns the created ALTS channel credential object.
546
+ */
547
+ GRPCAPI grpc_channel_credentials* grpc_alts_credentials_create(
548
+ const grpc_alts_credentials_options* options);
549
+
550
+ /**
551
+ * This method creates an ALTS server credential object. It is used for
552
+ * experimental purpose for now and subject to change.
553
+ *
554
+ * - options: grpc ALTS credentials options instance for server.
555
+ *
556
+ * It returns the created ALTS server credential object.
557
+ */
558
+ GRPCAPI grpc_server_credentials* grpc_alts_server_credentials_create(
559
+ const grpc_alts_credentials_options* options);
560
+
491
561
  #ifdef __cplusplus
492
562
  }
493
563
  #endif
@@ -500,6 +500,17 @@ typedef unsigned __int64 uint64_t;
500
500
  #endif /* __GPR_WINDOWS */
501
501
  #endif /* GRPC_ALLOW_EXCEPTIONS */
502
502
 
503
+ /* Use GPR_LIKELY only in cases where you are sure that a certain outcome is the
504
+ * most likely. Ideally, also collect performance numbers to justify the claim.
505
+ */
506
+ #ifdef __GNUC__
507
+ #define GPR_LIKELY(x) __builtin_expect((x), 1)
508
+ #define GPR_UNLIKELY(x) __builtin_expect((x), 0)
509
+ #else /* __GNUC__ */
510
+ #define GPR_LIKELY(x) (x)
511
+ #define GPR_UNLIKELY(x) (x)
512
+ #endif /* __GNUC__ */
513
+
503
514
  #ifndef __STDC_FORMAT_MACROS
504
515
  #define __STDC_FORMAT_MACROS
505
516
  #endif
@@ -61,6 +61,8 @@ GPRAPI const char* gpr_log_severity_string(gpr_log_severity severity);
61
61
  GPRAPI void gpr_log(const char* file, int line, gpr_log_severity severity,
62
62
  const char* format, ...) GPR_PRINT_FORMAT_CHECK(4, 5);
63
63
 
64
+ GPRAPI int gpr_should_log(gpr_log_severity severity);
65
+
64
66
  GPRAPI void gpr_log_message(const char* file, int line,
65
67
  gpr_log_severity severity, const char* message);
66
68
 
@@ -91,12 +93,18 @@ GPRAPI void gpr_set_log_function(gpr_log_func func);
91
93
  an exception in a higher-level language, consider returning error code. */
92
94
  #define GPR_ASSERT(x) \
93
95
  do { \
94
- if (!(x)) { \
96
+ if (GPR_UNLIKELY(!(x))) { \
95
97
  gpr_log(GPR_ERROR, "assertion failed: %s", #x); \
96
98
  abort(); \
97
99
  } \
98
100
  } while (0)
99
101
 
102
+ #ifndef NDEBUG
103
+ #define GPR_DEBUG_ASSERT(x) GPR_ASSERT(x)
104
+ #else
105
+ #define GPR_DEBUG_ASSERT(x)
106
+ #endif
107
+
100
108
  #ifdef __cplusplus
101
109
  }
102
110
  #endif
@@ -174,7 +174,7 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
174
174
  }
175
175
  }
176
176
  if (grpc_client_channel_trace.enabled()) {
177
- gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
177
+ gpr_log(GPR_INFO, "chand=%p: setting connectivity state to %s", chand,
178
178
  grpc_connectivity_state_name(state));
179
179
  }
180
180
  grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
@@ -186,7 +186,7 @@ static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
186
186
  /* check if the notification is for the latest policy */
187
187
  if (w->lb_policy == w->chand->lb_policy.get()) {
188
188
  if (grpc_client_channel_trace.enabled()) {
189
- gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
189
+ gpr_log(GPR_INFO, "chand=%p: lb_policy=%p state changed to %s", w->chand,
190
190
  w->lb_policy, grpc_connectivity_state_name(w->state));
191
191
  }
192
192
  set_channel_connectivity_state_locked(w->chand, w->state,
@@ -215,7 +215,7 @@ static void watch_lb_policy_locked(channel_data* chand,
215
215
 
216
216
  static void start_resolving_locked(channel_data* chand) {
217
217
  if (grpc_client_channel_trace.enabled()) {
218
- gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
218
+ gpr_log(GPR_INFO, "chand=%p: starting name resolution", chand);
219
219
  }
220
220
  GPR_ASSERT(!chand->started_resolving);
221
221
  chand->started_resolving = true;
@@ -297,7 +297,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
297
297
  return;
298
298
  }
299
299
  if (grpc_client_channel_trace.enabled()) {
300
- gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand);
300
+ gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand);
301
301
  }
302
302
  chand->resolver->RequestReresolutionLocked();
303
303
  // Give back the closure to the LB policy.
@@ -311,7 +311,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
311
311
  static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
312
312
  channel_data* chand = static_cast<channel_data*>(arg);
313
313
  if (grpc_client_channel_trace.enabled()) {
314
- gpr_log(GPR_DEBUG,
314
+ gpr_log(GPR_INFO,
315
315
  "chand=%p: got resolver result: resolver_result=%p error=%s", chand,
316
316
  chand->resolver_result, grpc_error_string(error));
317
317
  }
@@ -431,7 +431,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
431
431
  }
432
432
  }
433
433
  if (grpc_client_channel_trace.enabled()) {
434
- gpr_log(GPR_DEBUG,
434
+ gpr_log(GPR_INFO,
435
435
  "chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
436
436
  "service_config=\"%s\"",
437
437
  chand, lb_policy_name_dup,
@@ -466,7 +466,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
466
466
  chand->resolver == nullptr) {
467
467
  if (chand->lb_policy != nullptr) {
468
468
  if (grpc_client_channel_trace.enabled()) {
469
- gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
469
+ gpr_log(GPR_INFO, "chand=%p: unreffing lb_policy=%p", chand,
470
470
  chand->lb_policy.get());
471
471
  }
472
472
  grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
@@ -480,11 +480,11 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
480
480
  // error or shutdown.
481
481
  if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
482
482
  if (grpc_client_channel_trace.enabled()) {
483
- gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand);
483
+ gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
484
484
  }
485
485
  if (chand->resolver != nullptr) {
486
486
  if (grpc_client_channel_trace.enabled()) {
487
- gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
487
+ gpr_log(GPR_INFO, "chand=%p: shutting down resolver", chand);
488
488
  }
489
489
  chand->resolver.reset();
490
490
  }
@@ -506,7 +506,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
506
506
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
507
507
  if (lb_policy_created) {
508
508
  if (grpc_client_channel_trace.enabled()) {
509
- gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
509
+ gpr_log(GPR_INFO, "chand=%p: initializing new LB policy", chand);
510
510
  }
511
511
  GRPC_ERROR_UNREF(state_error);
512
512
  state = chand->lb_policy->CheckConnectivityLocked(&state_error);
@@ -842,10 +842,11 @@ typedef struct {
842
842
  bool completed_recv_trailing_metadata : 1;
843
843
  // State for callback processing.
844
844
  bool retry_dispatched : 1;
845
- bool recv_initial_metadata_ready_deferred : 1;
846
- bool recv_message_ready_deferred : 1;
845
+ subchannel_batch_data* recv_initial_metadata_ready_deferred_batch;
847
846
  grpc_error* recv_initial_metadata_error;
847
+ subchannel_batch_data* recv_message_ready_deferred_batch;
848
848
  grpc_error* recv_message_error;
849
+ subchannel_batch_data* recv_trailing_metadata_internal_batch;
849
850
  } subchannel_call_retry_state;
850
851
 
851
852
  // Pending batches stored in call data.
@@ -924,7 +925,9 @@ typedef struct client_channel_call_data {
924
925
  // Note: We inline the cache for the first 3 send_message ops and use
925
926
  // dynamic allocation after that. This number was essentially picked
926
927
  // at random; it could be changed in the future to tune performance.
927
- grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3> send_messages;
928
+ grpc_core::ManualConstructor<
929
+ grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3>>
930
+ send_messages;
928
931
  // send_trailing_metadata
929
932
  bool seen_send_trailing_metadata;
930
933
  grpc_linked_mdelem* send_trailing_metadata_storage;
@@ -974,7 +977,7 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
974
977
  gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
975
978
  new (cache) grpc_core::ByteStreamCache(
976
979
  std::move(batch->payload->send_message.send_message));
977
- calld->send_messages.push_back(cache);
980
+ calld->send_messages->push_back(cache);
978
981
  }
979
982
  // Save metadata batch for send_trailing_metadata ops.
980
983
  if (batch->send_trailing_metadata) {
@@ -992,6 +995,39 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
992
995
  }
993
996
  }
994
997
 
998
+ // Frees cached send_initial_metadata.
999
+ static void free_cached_send_initial_metadata(channel_data* chand,
1000
+ call_data* calld) {
1001
+ if (grpc_client_channel_trace.enabled()) {
1002
+ gpr_log(GPR_INFO,
1003
+ "chand=%p calld=%p: destroying calld->send_initial_metadata", chand,
1004
+ calld);
1005
+ }
1006
+ grpc_metadata_batch_destroy(&calld->send_initial_metadata);
1007
+ }
1008
+
1009
+ // Frees cached send_message at index idx.
1010
+ static void free_cached_send_message(channel_data* chand, call_data* calld,
1011
+ size_t idx) {
1012
+ if (grpc_client_channel_trace.enabled()) {
1013
+ gpr_log(GPR_INFO,
1014
+ "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
1015
+ chand, calld, idx);
1016
+ }
1017
+ (*calld->send_messages)[idx]->Destroy();
1018
+ }
1019
+
1020
+ // Frees cached send_trailing_metadata.
1021
+ static void free_cached_send_trailing_metadata(channel_data* chand,
1022
+ call_data* calld) {
1023
+ if (grpc_client_channel_trace.enabled()) {
1024
+ gpr_log(GPR_INFO,
1025
+ "chand=%p calld=%p: destroying calld->send_trailing_metadata",
1026
+ chand, calld);
1027
+ }
1028
+ grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
1029
+ }
1030
+
995
1031
  // Frees cached send ops that have already been completed after
996
1032
  // committing the call.
997
1033
  static void free_cached_send_op_data_after_commit(
@@ -999,19 +1035,13 @@ static void free_cached_send_op_data_after_commit(
999
1035
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1000
1036
  call_data* calld = static_cast<call_data*>(elem->call_data);
1001
1037
  if (retry_state->completed_send_initial_metadata) {
1002
- grpc_metadata_batch_destroy(&calld->send_initial_metadata);
1038
+ free_cached_send_initial_metadata(chand, calld);
1003
1039
  }
1004
1040
  for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
1005
- if (grpc_client_channel_trace.enabled()) {
1006
- gpr_log(GPR_DEBUG,
1007
- "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
1008
- "]",
1009
- chand, calld, i);
1010
- }
1011
- calld->send_messages[i]->Destroy();
1041
+ free_cached_send_message(chand, calld, i);
1012
1042
  }
1013
1043
  if (retry_state->completed_send_trailing_metadata) {
1014
- grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
1044
+ free_cached_send_trailing_metadata(chand, calld);
1015
1045
  }
1016
1046
  }
1017
1047
 
@@ -1023,20 +1053,14 @@ static void free_cached_send_op_data_for_completed_batch(
1023
1053
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1024
1054
  call_data* calld = static_cast<call_data*>(elem->call_data);
1025
1055
  if (batch_data->batch.send_initial_metadata) {
1026
- grpc_metadata_batch_destroy(&calld->send_initial_metadata);
1056
+ free_cached_send_initial_metadata(chand, calld);
1027
1057
  }
1028
1058
  if (batch_data->batch.send_message) {
1029
- if (grpc_client_channel_trace.enabled()) {
1030
- gpr_log(GPR_DEBUG,
1031
- "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
1032
- "]",
1033
- chand, calld, retry_state->completed_send_message_count - 1);
1034
- }
1035
- calld->send_messages[retry_state->completed_send_message_count - 1]
1036
- ->Destroy();
1059
+ free_cached_send_message(chand, calld,
1060
+ retry_state->completed_send_message_count - 1);
1037
1061
  }
1038
1062
  if (batch_data->batch.send_trailing_metadata) {
1039
- grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
1063
+ free_cached_send_trailing_metadata(chand, calld);
1040
1064
  }
1041
1065
  }
1042
1066
 
@@ -1064,7 +1088,7 @@ static void pending_batches_add(grpc_call_element* elem,
1064
1088
  call_data* calld = static_cast<call_data*>(elem->call_data);
1065
1089
  const size_t idx = get_batch_index(batch);
1066
1090
  if (grpc_client_channel_trace.enabled()) {
1067
- gpr_log(GPR_DEBUG,
1091
+ gpr_log(GPR_INFO,
1068
1092
  "chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
1069
1093
  calld, idx);
1070
1094
  }
@@ -1092,7 +1116,7 @@ static void pending_batches_add(grpc_call_element* elem,
1092
1116
  }
1093
1117
  if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
1094
1118
  if (grpc_client_channel_trace.enabled()) {
1095
- gpr_log(GPR_DEBUG,
1119
+ gpr_log(GPR_INFO,
1096
1120
  "chand=%p calld=%p: exceeded retry buffer size, committing",
1097
1121
  chand, calld);
1098
1122
  }
@@ -1107,7 +1131,7 @@ static void pending_batches_add(grpc_call_element* elem,
1107
1131
  // retries are disabled so that we don't bother with retry overhead.
1108
1132
  if (calld->num_attempts_completed == 0) {
1109
1133
  if (grpc_client_channel_trace.enabled()) {
1110
- gpr_log(GPR_DEBUG,
1134
+ gpr_log(GPR_INFO,
1111
1135
  "chand=%p calld=%p: disabling retries before first attempt",
1112
1136
  chand, calld);
1113
1137
  }
@@ -1154,7 +1178,7 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
1154
1178
  for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1155
1179
  if (calld->pending_batches[i].batch != nullptr) ++num_batches;
1156
1180
  }
1157
- gpr_log(GPR_DEBUG,
1181
+ gpr_log(GPR_INFO,
1158
1182
  "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
1159
1183
  elem->channel_data, calld, num_batches, grpc_error_string(error));
1160
1184
  }
@@ -1216,7 +1240,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
1216
1240
  for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1217
1241
  if (calld->pending_batches[i].batch != nullptr) ++num_batches;
1218
1242
  }
1219
- gpr_log(GPR_DEBUG,
1243
+ gpr_log(GPR_INFO,
1220
1244
  "chand=%p calld=%p: starting %" PRIuPTR
1221
1245
  " pending batches on subchannel_call=%p",
1222
1246
  chand, calld, num_batches, calld->subchannel_call);
@@ -1261,7 +1285,7 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
1261
1285
  (!batch->recv_message ||
1262
1286
  batch->payload->recv_message.recv_message_ready == nullptr)) {
1263
1287
  if (grpc_client_channel_trace.enabled()) {
1264
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand,
1288
+ gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand,
1265
1289
  calld);
1266
1290
  }
1267
1291
  pending_batch_clear(calld, pending);
@@ -1280,7 +1304,8 @@ static bool pending_batch_is_completed(
1280
1304
  return false;
1281
1305
  }
1282
1306
  if (pending->batch->send_message &&
1283
- retry_state->completed_send_message_count < calld->send_messages.size()) {
1307
+ retry_state->completed_send_message_count <
1308
+ calld->send_messages->size()) {
1284
1309
  return false;
1285
1310
  }
1286
1311
  if (pending->batch->send_trailing_metadata &&
@@ -1315,7 +1340,7 @@ static bool pending_batch_is_unstarted(
1315
1340
  return true;
1316
1341
  }
1317
1342
  if (pending->batch->send_message &&
1318
- retry_state->started_send_message_count < calld->send_messages.size()) {
1343
+ retry_state->started_send_message_count < calld->send_messages->size()) {
1319
1344
  return true;
1320
1345
  }
1321
1346
  if (pending->batch->send_trailing_metadata &&
@@ -1350,7 +1375,7 @@ static void retry_commit(grpc_call_element* elem,
1350
1375
  if (calld->retry_committed) return;
1351
1376
  calld->retry_committed = true;
1352
1377
  if (grpc_client_channel_trace.enabled()) {
1353
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld);
1378
+ gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand, calld);
1354
1379
  }
1355
1380
  if (retry_state != nullptr) {
1356
1381
  free_cached_send_op_data_after_commit(elem, retry_state);
@@ -1395,7 +1420,7 @@ static void do_retry(grpc_call_element* elem,
1395
1420
  next_attempt_time = calld->retry_backoff->NextAttemptTime();
1396
1421
  }
1397
1422
  if (grpc_client_channel_trace.enabled()) {
1398
- gpr_log(GPR_DEBUG,
1423
+ gpr_log(GPR_INFO,
1399
1424
  "chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
1400
1425
  calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
1401
1426
  }
@@ -1429,7 +1454,7 @@ static bool maybe_retry(grpc_call_element* elem,
1429
1454
  batch_data->subchannel_call));
1430
1455
  if (retry_state->retry_dispatched) {
1431
1456
  if (grpc_client_channel_trace.enabled()) {
1432
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand,
1457
+ gpr_log(GPR_INFO, "chand=%p calld=%p: retry already dispatched", chand,
1433
1458
  calld);
1434
1459
  }
1435
1460
  return true;
@@ -1441,14 +1466,14 @@ static bool maybe_retry(grpc_call_element* elem,
1441
1466
  calld->retry_throttle_data->RecordSuccess();
1442
1467
  }
1443
1468
  if (grpc_client_channel_trace.enabled()) {
1444
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld);
1469
+ gpr_log(GPR_INFO, "chand=%p calld=%p: call succeeded", chand, calld);
1445
1470
  }
1446
1471
  return false;
1447
1472
  }
1448
1473
  // Status is not OK. Check whether the status is retryable.
1449
1474
  if (!retry_policy->retryable_status_codes.Contains(status)) {
1450
1475
  if (grpc_client_channel_trace.enabled()) {
1451
- gpr_log(GPR_DEBUG,
1476
+ gpr_log(GPR_INFO,
1452
1477
  "chand=%p calld=%p: status %s not configured as retryable", chand,
1453
1478
  calld, grpc_status_code_to_string(status));
1454
1479
  }
@@ -1464,14 +1489,14 @@ static bool maybe_retry(grpc_call_element* elem,
1464
1489
  if (calld->retry_throttle_data != nullptr &&
1465
1490
  !calld->retry_throttle_data->RecordFailure()) {
1466
1491
  if (grpc_client_channel_trace.enabled()) {
1467
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld);
1492
+ gpr_log(GPR_INFO, "chand=%p calld=%p: retries throttled", chand, calld);
1468
1493
  }
1469
1494
  return false;
1470
1495
  }
1471
1496
  // Check whether the call is committed.
1472
1497
  if (calld->retry_committed) {
1473
1498
  if (grpc_client_channel_trace.enabled()) {
1474
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand,
1499
+ gpr_log(GPR_INFO, "chand=%p calld=%p: retries already committed", chand,
1475
1500
  calld);
1476
1501
  }
1477
1502
  return false;
@@ -1480,7 +1505,7 @@ static bool maybe_retry(grpc_call_element* elem,
1480
1505
  ++calld->num_attempts_completed;
1481
1506
  if (calld->num_attempts_completed >= retry_policy->max_attempts) {
1482
1507
  if (grpc_client_channel_trace.enabled()) {
1483
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand,
1508
+ gpr_log(GPR_INFO, "chand=%p calld=%p: exceeded %d retry attempts", chand,
1484
1509
  calld, retry_policy->max_attempts);
1485
1510
  }
1486
1511
  return false;
@@ -1488,7 +1513,7 @@ static bool maybe_retry(grpc_call_element* elem,
1488
1513
  // If the call was cancelled from the surface, don't retry.
1489
1514
  if (calld->cancel_error != GRPC_ERROR_NONE) {
1490
1515
  if (grpc_client_channel_trace.enabled()) {
1491
- gpr_log(GPR_DEBUG,
1516
+ gpr_log(GPR_INFO,
1492
1517
  "chand=%p calld=%p: call cancelled from surface, not retrying",
1493
1518
  chand, calld);
1494
1519
  }
@@ -1501,16 +1526,15 @@ static bool maybe_retry(grpc_call_element* elem,
1501
1526
  uint32_t ms;
1502
1527
  if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
1503
1528
  if (grpc_client_channel_trace.enabled()) {
1504
- gpr_log(GPR_DEBUG,
1529
+ gpr_log(GPR_INFO,
1505
1530
  "chand=%p calld=%p: not retrying due to server push-back",
1506
1531
  chand, calld);
1507
1532
  }
1508
1533
  return false;
1509
1534
  } else {
1510
1535
  if (grpc_client_channel_trace.enabled()) {
1511
- gpr_log(GPR_DEBUG,
1512
- "chand=%p calld=%p: server push-back: retry in %u ms", chand,
1513
- calld, ms);
1536
+ gpr_log(GPR_INFO, "chand=%p calld=%p: server push-back: retry in %u ms",
1537
+ chand, calld, ms);
1514
1538
  }
1515
1539
  server_pushback_ms = (grpc_millis)ms;
1516
1540
  }
@@ -1583,7 +1607,7 @@ static void invoke_recv_initial_metadata_callback(void* arg,
1583
1607
  batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
1584
1608
  nullptr) {
1585
1609
  if (grpc_client_channel_trace.enabled()) {
1586
- gpr_log(GPR_DEBUG,
1610
+ gpr_log(GPR_INFO,
1587
1611
  "chand=%p calld=%p: invoking recv_initial_metadata_ready for "
1588
1612
  "pending batch at index %" PRIuPTR,
1589
1613
  chand, calld, i);
@@ -1619,7 +1643,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
1619
1643
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1620
1644
  call_data* calld = static_cast<call_data*>(elem->call_data);
1621
1645
  if (grpc_client_channel_trace.enabled()) {
1622
- gpr_log(GPR_DEBUG,
1646
+ gpr_log(GPR_INFO,
1623
1647
  "chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
1624
1648
  chand, calld, grpc_error_string(error));
1625
1649
  }
@@ -1634,12 +1658,12 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
1634
1658
  if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
1635
1659
  !retry_state->completed_recv_trailing_metadata) {
1636
1660
  if (grpc_client_channel_trace.enabled()) {
1637
- gpr_log(GPR_DEBUG,
1661
+ gpr_log(GPR_INFO,
1638
1662
  "chand=%p calld=%p: deferring recv_initial_metadata_ready "
1639
1663
  "(Trailers-Only)",
1640
1664
  chand, calld);
1641
1665
  }
1642
- retry_state->recv_initial_metadata_ready_deferred = true;
1666
+ retry_state->recv_initial_metadata_ready_deferred_batch = batch_data;
1643
1667
  retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
1644
1668
  if (!retry_state->started_recv_trailing_metadata) {
1645
1669
  // recv_trailing_metadata not yet started by application; start it
@@ -1676,7 +1700,7 @@ static void invoke_recv_message_callback(void* arg, grpc_error* error) {
1676
1700
  if (batch != nullptr && batch->recv_message &&
1677
1701
  batch->payload->recv_message.recv_message_ready != nullptr) {
1678
1702
  if (grpc_client_channel_trace.enabled()) {
1679
- gpr_log(GPR_DEBUG,
1703
+ gpr_log(GPR_INFO,
1680
1704
  "chand=%p calld=%p: invoking recv_message_ready for "
1681
1705
  "pending batch at index %" PRIuPTR,
1682
1706
  chand, calld, i);
@@ -1709,7 +1733,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
1709
1733
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1710
1734
  call_data* calld = static_cast<call_data*>(elem->call_data);
1711
1735
  if (grpc_client_channel_trace.enabled()) {
1712
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s",
1736
+ gpr_log(GPR_INFO, "chand=%p calld=%p: got recv_message_ready, error=%s",
1713
1737
  chand, calld, grpc_error_string(error));
1714
1738
  }
1715
1739
  subchannel_call_retry_state* retry_state =
@@ -1723,12 +1747,12 @@ static void recv_message_ready(void* arg, grpc_error* error) {
1723
1747
  if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
1724
1748
  !retry_state->completed_recv_trailing_metadata) {
1725
1749
  if (grpc_client_channel_trace.enabled()) {
1726
- gpr_log(GPR_DEBUG,
1750
+ gpr_log(GPR_INFO,
1727
1751
  "chand=%p calld=%p: deferring recv_message_ready (nullptr "
1728
1752
  "message and recv_trailing_metadata pending)",
1729
1753
  chand, calld);
1730
1754
  }
1731
- retry_state->recv_message_ready_deferred = true;
1755
+ retry_state->recv_message_ready_deferred_batch = batch_data;
1732
1756
  retry_state->recv_message_error = GRPC_ERROR_REF(error);
1733
1757
  if (!retry_state->started_recv_trailing_metadata) {
1734
1758
  // recv_trailing_metadata not yet started by application; start it
@@ -1746,6 +1770,59 @@ static void recv_message_ready(void* arg, grpc_error* error) {
1746
1770
  GRPC_ERROR_UNREF(error);
1747
1771
  }
1748
1772
 
1773
+ //
1774
+ // list of closures to execute in call combiner
1775
+ //
1776
+
1777
+ // Represents a closure that needs to run in the call combiner as part of
1778
+ // starting or completing a batch.
1779
+ typedef struct {
1780
+ grpc_closure* closure;
1781
+ grpc_error* error;
1782
+ const char* reason;
1783
+ bool free_reason = false;
1784
+ } closure_to_execute;
1785
+
1786
+ static void execute_closures_in_call_combiner(grpc_call_element* elem,
1787
+ const char* caller,
1788
+ closure_to_execute* closures,
1789
+ size_t num_closures) {
1790
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1791
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1792
+ // Note that the call combiner will be yielded for each closure that
1793
+ // we schedule. We're already running in the call combiner, so one of
1794
+ // the closures can be scheduled directly, but the others will
1795
+ // have to re-enter the call combiner.
1796
+ if (num_closures > 0) {
1797
+ if (grpc_client_channel_trace.enabled()) {
1798
+ gpr_log(GPR_INFO, "chand=%p calld=%p: %s starting closure: %s", chand,
1799
+ calld, caller, closures[0].reason);
1800
+ }
1801
+ GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
1802
+ if (closures[0].free_reason) {
1803
+ gpr_free(const_cast<char*>(closures[0].reason));
1804
+ }
1805
+ for (size_t i = 1; i < num_closures; ++i) {
1806
+ if (grpc_client_channel_trace.enabled()) {
1807
+ gpr_log(GPR_INFO,
1808
+ "chand=%p calld=%p: %s starting closure in call combiner: %s",
1809
+ chand, calld, caller, closures[i].reason);
1810
+ }
1811
+ GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
1812
+ closures[i].error, closures[i].reason);
1813
+ if (closures[i].free_reason) {
1814
+ gpr_free(const_cast<char*>(closures[i].reason));
1815
+ }
1816
+ }
1817
+ } else {
1818
+ if (grpc_client_channel_trace.enabled()) {
1819
+ gpr_log(GPR_INFO, "chand=%p calld=%p: no closures to run for %s", chand,
1820
+ calld, caller);
1821
+ }
1822
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner, "no closures to run");
1823
+ }
1824
+ }
1825
+
1749
1826
  //
1750
1827
  // on_complete callback handling
1751
1828
  //
@@ -1774,36 +1851,35 @@ static void update_retry_state_for_completed_batch(
1774
1851
  }
1775
1852
  }
1776
1853
 
1777
- // Represents a closure that needs to run as a result of a completed batch.
1778
- typedef struct {
1779
- grpc_closure* closure;
1780
- grpc_error* error;
1781
- const char* reason;
1782
- } closure_to_execute;
1783
-
1784
1854
  // Adds any necessary closures for deferred recv_initial_metadata and
1785
1855
  // recv_message callbacks to closures, updating *num_closures as needed.
1786
1856
  static void add_closures_for_deferred_recv_callbacks(
1787
1857
  subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
1788
1858
  closure_to_execute* closures, size_t* num_closures) {
1789
- if (batch_data->batch.recv_trailing_metadata &&
1790
- retry_state->recv_initial_metadata_ready_deferred) {
1791
- closure_to_execute* closure = &closures[(*num_closures)++];
1792
- closure->closure =
1793
- GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
1794
- invoke_recv_initial_metadata_callback, batch_data,
1795
- grpc_schedule_on_exec_ctx);
1796
- closure->error = retry_state->recv_initial_metadata_error;
1797
- closure->reason = "resuming recv_initial_metadata_ready";
1798
- }
1799
- if (batch_data->batch.recv_trailing_metadata &&
1800
- retry_state->recv_message_ready_deferred) {
1801
- closure_to_execute* closure = &closures[(*num_closures)++];
1802
- closure->closure = GRPC_CLOSURE_INIT(&batch_data->recv_message_ready,
1803
- invoke_recv_message_callback,
1804
- batch_data, grpc_schedule_on_exec_ctx);
1805
- closure->error = retry_state->recv_message_error;
1806
- closure->reason = "resuming recv_message_ready";
1859
+ if (batch_data->batch.recv_trailing_metadata) {
1860
+ // Add closure for deferred recv_initial_metadata_ready.
1861
+ if (retry_state->recv_initial_metadata_ready_deferred_batch != nullptr) {
1862
+ closure_to_execute* closure = &closures[(*num_closures)++];
1863
+ closure->closure = GRPC_CLOSURE_INIT(
1864
+ &batch_data->recv_initial_metadata_ready,
1865
+ invoke_recv_initial_metadata_callback,
1866
+ retry_state->recv_initial_metadata_ready_deferred_batch,
1867
+ grpc_schedule_on_exec_ctx);
1868
+ closure->error = retry_state->recv_initial_metadata_error;
1869
+ closure->reason = "resuming recv_initial_metadata_ready";
1870
+ retry_state->recv_initial_metadata_ready_deferred_batch = nullptr;
1871
+ }
1872
+ // Add closure for deferred recv_message_ready.
1873
+ if (retry_state->recv_message_ready_deferred_batch != nullptr) {
1874
+ closure_to_execute* closure = &closures[(*num_closures)++];
1875
+ closure->closure = GRPC_CLOSURE_INIT(
1876
+ &batch_data->recv_message_ready, invoke_recv_message_callback,
1877
+ retry_state->recv_message_ready_deferred_batch,
1878
+ grpc_schedule_on_exec_ctx);
1879
+ closure->error = retry_state->recv_message_error;
1880
+ closure->reason = "resuming recv_message_ready";
1881
+ retry_state->recv_message_ready_deferred_batch = nullptr;
1882
+ }
1807
1883
  }
1808
1884
  }
1809
1885
 
@@ -1817,7 +1893,7 @@ static void add_closures_for_replay_or_pending_send_ops(
1817
1893
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1818
1894
  call_data* calld = static_cast<call_data*>(elem->call_data);
1819
1895
  bool have_pending_send_message_ops =
1820
- retry_state->started_send_message_count < calld->send_messages.size();
1896
+ retry_state->started_send_message_count < calld->send_messages->size();
1821
1897
  bool have_pending_send_trailing_metadata_op =
1822
1898
  calld->seen_send_trailing_metadata &&
1823
1899
  !retry_state->started_send_trailing_metadata;
@@ -1835,7 +1911,7 @@ static void add_closures_for_replay_or_pending_send_ops(
1835
1911
  }
1836
1912
  if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
1837
1913
  if (grpc_client_channel_trace.enabled()) {
1838
- gpr_log(GPR_DEBUG,
1914
+ gpr_log(GPR_INFO,
1839
1915
  "chand=%p calld=%p: starting next batch for pending send op(s)",
1840
1916
  chand, calld);
1841
1917
  }
@@ -1860,7 +1936,7 @@ static void add_closures_for_completed_pending_batches(
1860
1936
  pending_batch* pending = &calld->pending_batches[i];
1861
1937
  if (pending_batch_is_completed(pending, calld, retry_state)) {
1862
1938
  if (grpc_client_channel_trace.enabled()) {
1863
- gpr_log(GPR_DEBUG,
1939
+ gpr_log(GPR_INFO,
1864
1940
  "chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
1865
1941
  chand, calld, i);
1866
1942
  }
@@ -1893,7 +1969,7 @@ static void add_closures_to_fail_unstarted_pending_batches(
1893
1969
  pending_batch* pending = &calld->pending_batches[i];
1894
1970
  if (pending_batch_is_unstarted(pending, calld, retry_state)) {
1895
1971
  if (grpc_client_channel_trace.enabled()) {
1896
- gpr_log(GPR_DEBUG,
1972
+ gpr_log(GPR_INFO,
1897
1973
  "chand=%p calld=%p: failing unstarted pending batch at index "
1898
1974
  "%" PRIuPTR,
1899
1975
  chand, calld, i);
@@ -1937,7 +2013,7 @@ static void on_complete(void* arg, grpc_error* error) {
1937
2013
  call_data* calld = static_cast<call_data*>(elem->call_data);
1938
2014
  if (grpc_client_channel_trace.enabled()) {
1939
2015
  char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
1940
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
2016
+ gpr_log(GPR_INFO, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
1941
2017
  chand, calld, grpc_error_string(error), batch_str);
1942
2018
  gpr_free(batch_str);
1943
2019
  }
@@ -1948,11 +2024,13 @@ static void on_complete(void* arg, grpc_error* error) {
1948
2024
  // If we have previously completed recv_trailing_metadata, then the
1949
2025
  // call is finished.
1950
2026
  bool call_finished = retry_state->completed_recv_trailing_metadata;
2027
+ // Record whether we were already committed before receiving this callback.
2028
+ const bool previously_committed = calld->retry_committed;
1951
2029
  // Update bookkeeping in retry_state.
1952
2030
  update_retry_state_for_completed_batch(batch_data, retry_state);
1953
2031
  if (call_finished) {
1954
2032
  if (grpc_client_channel_trace.enabled()) {
1955
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand,
2033
+ gpr_log(GPR_INFO, "chand=%p calld=%p: call already finished", chand,
1956
2034
  calld);
1957
2035
  }
1958
2036
  } else {
@@ -1976,35 +2054,39 @@ static void on_complete(void* arg, grpc_error* error) {
1976
2054
  if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
1977
2055
  server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
1978
2056
  }
1979
- } else if (retry_state->completed_recv_trailing_metadata) {
1980
- call_finished = true;
1981
2057
  }
1982
- if (call_finished && grpc_client_channel_trace.enabled()) {
1983
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
1984
- calld, grpc_status_code_to_string(status));
1985
- }
1986
- // If the call is finished, check if we should retry.
1987
- if (call_finished &&
1988
- maybe_retry(elem, batch_data, status, server_pushback_md)) {
1989
- // Unref batch_data for deferred recv_initial_metadata_ready or
1990
- // recv_message_ready callbacks, if any.
1991
- if (batch_data->batch.recv_trailing_metadata &&
1992
- retry_state->recv_initial_metadata_ready_deferred) {
1993
- batch_data_unref(batch_data);
1994
- GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
2058
+ // If the call just finished, check if we should retry.
2059
+ if (call_finished) {
2060
+ if (grpc_client_channel_trace.enabled()) {
2061
+ gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
2062
+ calld, grpc_status_code_to_string(status));
1995
2063
  }
1996
- if (batch_data->batch.recv_trailing_metadata &&
1997
- retry_state->recv_message_ready_deferred) {
2064
+ if (maybe_retry(elem, batch_data, status, server_pushback_md)) {
2065
+ // Unref batch_data for deferred recv_initial_metadata_ready or
2066
+ // recv_message_ready callbacks, if any.
2067
+ if (batch_data->batch.recv_trailing_metadata &&
2068
+ retry_state->recv_initial_metadata_ready_deferred_batch !=
2069
+ nullptr) {
2070
+ batch_data_unref(batch_data);
2071
+ GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
2072
+ }
2073
+ if (batch_data->batch.recv_trailing_metadata &&
2074
+ retry_state->recv_message_ready_deferred_batch != nullptr) {
2075
+ batch_data_unref(batch_data);
2076
+ GRPC_ERROR_UNREF(retry_state->recv_message_error);
2077
+ }
1998
2078
  batch_data_unref(batch_data);
1999
- GRPC_ERROR_UNREF(retry_state->recv_message_error);
2079
+ return;
2000
2080
  }
2001
- batch_data_unref(batch_data);
2002
- return;
2081
+ // Not retrying, so commit the call.
2082
+ retry_commit(elem, retry_state);
2003
2083
  }
2004
2084
  }
2005
- // If the call is finished or retries are committed, free cached data for
2006
- // send ops that we've just completed.
2007
- if (call_finished || calld->retry_committed) {
2085
+ // If we were already committed before receiving this callback, free
2086
+ // cached data for send ops that we've just completed. (If the call has
2087
+ // just now finished, the call to retry_commit() above will have freed all
2088
+ // cached send ops, so we don't need to do it here.)
2089
+ if (previously_committed) {
2008
2090
  free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
2009
2091
  }
2010
2092
  // Call not being retried.
@@ -2039,20 +2121,8 @@ static void on_complete(void* arg, grpc_error* error) {
2039
2121
  // Don't need batch_data anymore.
2040
2122
  batch_data_unref(batch_data);
2041
2123
  // Schedule all of the closures identified above.
2042
- // Note that the call combiner will be yielded for each closure that
2043
- // we schedule. We're already running in the call combiner, so one of
2044
- // the closures can be scheduled directly, but the others will
2045
- // have to re-enter the call combiner.
2046
- if (num_closures > 0) {
2047
- GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
2048
- for (size_t i = 1; i < num_closures; ++i) {
2049
- GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
2050
- closures[i].error, closures[i].reason);
2051
- }
2052
- } else {
2053
- GRPC_CALL_COMBINER_STOP(calld->call_combiner,
2054
- "no closures to run for on_complete");
2055
- }
2124
+ execute_closures_in_call_combiner(elem, "on_complete", closures,
2125
+ num_closures);
2056
2126
  }
2057
2127
 
2058
2128
  //
@@ -2069,6 +2139,31 @@ static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
2069
2139
  grpc_subchannel_call_process_op(subchannel_call, batch);
2070
2140
  }
2071
2141
 
2142
+ // Adds a closure to closures that will execute batch in the call combiner.
2143
+ static void add_closure_for_subchannel_batch(
2144
+ call_data* calld, grpc_transport_stream_op_batch* batch,
2145
+ closure_to_execute* closures, size_t* num_closures) {
2146
+ batch->handler_private.extra_arg = calld->subchannel_call;
2147
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
2148
+ start_batch_in_call_combiner, batch,
2149
+ grpc_schedule_on_exec_ctx);
2150
+ closure_to_execute* closure = &closures[(*num_closures)++];
2151
+ closure->closure = &batch->handler_private.closure;
2152
+ closure->error = GRPC_ERROR_NONE;
2153
+ // If the tracer is enabled, we log a more detailed message, which
2154
+ // requires dynamic allocation. This will be freed in
2155
+ // start_retriable_subchannel_batches().
2156
+ if (grpc_client_channel_trace.enabled()) {
2157
+ char* batch_str = grpc_transport_stream_op_batch_string(batch);
2158
+ gpr_asprintf(const_cast<char**>(&closure->reason),
2159
+ "starting batch in call combiner: %s", batch_str);
2160
+ gpr_free(batch_str);
2161
+ closure->free_reason = true;
2162
+ } else {
2163
+ closure->reason = "start_subchannel_batch";
2164
+ }
2165
+ }
2166
+
2072
2167
  // Adds retriable send_initial_metadata op to batch_data.
2073
2168
  static void add_retriable_send_initial_metadata_op(
2074
2169
  call_data* calld, subchannel_call_retry_state* retry_state,
@@ -2128,12 +2223,12 @@ static void add_retriable_send_message_op(
2128
2223
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2129
2224
  call_data* calld = static_cast<call_data*>(elem->call_data);
2130
2225
  if (grpc_client_channel_trace.enabled()) {
2131
- gpr_log(GPR_DEBUG,
2226
+ gpr_log(GPR_INFO,
2132
2227
  "chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
2133
2228
  chand, calld, retry_state->started_send_message_count);
2134
2229
  }
2135
2230
  grpc_core::ByteStreamCache* cache =
2136
- calld->send_messages[retry_state->started_send_message_count];
2231
+ (*calld->send_messages)[retry_state->started_send_message_count];
2137
2232
  ++retry_state->started_send_message_count;
2138
2233
  batch_data->send_message.Init(cache);
2139
2234
  batch_data->batch.send_message = true;
@@ -2215,7 +2310,7 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
2215
2310
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2216
2311
  call_data* calld = static_cast<call_data*>(elem->call_data);
2217
2312
  if (grpc_client_channel_trace.enabled()) {
2218
- gpr_log(GPR_DEBUG,
2313
+ gpr_log(GPR_INFO,
2219
2314
  "chand=%p calld=%p: call failed but recv_trailing_metadata not "
2220
2315
  "started; starting it internally",
2221
2316
  chand, calld);
@@ -2224,8 +2319,12 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
2224
2319
  static_cast<subchannel_call_retry_state*>(
2225
2320
  grpc_connected_subchannel_call_get_parent_data(
2226
2321
  calld->subchannel_call));
2227
- subchannel_batch_data* batch_data = batch_data_create(elem, 1);
2322
+ // Create batch_data with 2 refs, since this batch will be unreffed twice:
2323
+ // once when the subchannel batch returns, and again when we actually get
2324
+ // a recv_trailing_metadata op from the surface.
2325
+ subchannel_batch_data* batch_data = batch_data_create(elem, 2);
2228
2326
  add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
2327
+ retry_state->recv_trailing_metadata_internal_batch = batch_data;
2229
2328
  // Note: This will release the call combiner.
2230
2329
  grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
2231
2330
  }
@@ -2243,7 +2342,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2243
2342
  !retry_state->started_send_initial_metadata &&
2244
2343
  !calld->pending_send_initial_metadata) {
2245
2344
  if (grpc_client_channel_trace.enabled()) {
2246
- gpr_log(GPR_DEBUG,
2345
+ gpr_log(GPR_INFO,
2247
2346
  "chand=%p calld=%p: replaying previously completed "
2248
2347
  "send_initial_metadata op",
2249
2348
  chand, calld);
@@ -2254,12 +2353,12 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2254
2353
  }
2255
2354
  // send_message.
2256
2355
  // Note that we can only have one send_message op in flight at a time.
2257
- if (retry_state->started_send_message_count < calld->send_messages.size() &&
2356
+ if (retry_state->started_send_message_count < calld->send_messages->size() &&
2258
2357
  retry_state->started_send_message_count ==
2259
2358
  retry_state->completed_send_message_count &&
2260
2359
  !calld->pending_send_message) {
2261
2360
  if (grpc_client_channel_trace.enabled()) {
2262
- gpr_log(GPR_DEBUG,
2361
+ gpr_log(GPR_INFO,
2263
2362
  "chand=%p calld=%p: replaying previously completed "
2264
2363
  "send_message op",
2265
2364
  chand, calld);
@@ -2274,11 +2373,11 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2274
2373
  // to start, since we can't send down any more send_message ops after
2275
2374
  // send_trailing_metadata.
2276
2375
  if (calld->seen_send_trailing_metadata &&
2277
- retry_state->started_send_message_count == calld->send_messages.size() &&
2376
+ retry_state->started_send_message_count == calld->send_messages->size() &&
2278
2377
  !retry_state->started_send_trailing_metadata &&
2279
2378
  !calld->pending_send_trailing_metadata) {
2280
2379
  if (grpc_client_channel_trace.enabled()) {
2281
- gpr_log(GPR_DEBUG,
2380
+ gpr_log(GPR_INFO,
2282
2381
  "chand=%p calld=%p: replaying previously completed "
2283
2382
  "send_trailing_metadata op",
2284
2383
  chand, calld);
@@ -2296,7 +2395,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2296
2395
  // *num_batches as needed.
2297
2396
  static void add_subchannel_batches_for_pending_batches(
2298
2397
  grpc_call_element* elem, subchannel_call_retry_state* retry_state,
2299
- grpc_transport_stream_op_batch** batches, size_t* num_batches) {
2398
+ closure_to_execute* closures, size_t* num_closures) {
2300
2399
  call_data* calld = static_cast<call_data*>(elem->call_data);
2301
2400
  for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
2302
2401
  pending_batch* pending = &calld->pending_batches[i];
@@ -2325,7 +2424,7 @@ static void add_subchannel_batches_for_pending_batches(
2325
2424
  // send_message ops after send_trailing_metadata.
2326
2425
  if (batch->send_trailing_metadata &&
2327
2426
  (retry_state->started_send_message_count + batch->send_message <
2328
- calld->send_messages.size() ||
2427
+ calld->send_messages->size() ||
2329
2428
  retry_state->started_send_trailing_metadata)) {
2330
2429
  continue;
2331
2430
  }
@@ -2339,13 +2438,37 @@ static void add_subchannel_batches_for_pending_batches(
2339
2438
  }
2340
2439
  if (batch->recv_trailing_metadata &&
2341
2440
  retry_state->started_recv_trailing_metadata) {
2441
+ // If we previously completed a recv_trailing_metadata op
2442
+ // initiated by start_internal_recv_trailing_metadata(), use the
2443
+ // result of that instead of trying to re-start this op.
2444
+ if (retry_state->recv_trailing_metadata_internal_batch != nullptr) {
2445
+ // If the batch completed, then trigger the completion callback
2446
+ // directly, so that we return the previously returned results to
2447
+ // the application. Otherwise, just unref the internally
2448
+ // started subchannel batch, since we'll propagate the
2449
+ // completion when it completes.
2450
+ if (retry_state->completed_recv_trailing_metadata) {
2451
+ subchannel_batch_data* batch_data =
2452
+ retry_state->recv_trailing_metadata_internal_batch;
2453
+ closure_to_execute* closure = &closures[(*num_closures)++];
2454
+ closure->closure = &batch_data->on_complete;
2455
+ // Batches containing recv_trailing_metadata always succeed.
2456
+ closure->error = GRPC_ERROR_NONE;
2457
+ closure->reason =
2458
+ "re-executing on_complete for recv_trailing_metadata "
2459
+ "to propagate internally triggered result";
2460
+ } else {
2461
+ batch_data_unref(retry_state->recv_trailing_metadata_internal_batch);
2462
+ }
2463
+ retry_state->recv_trailing_metadata_internal_batch = nullptr;
2464
+ }
2342
2465
  continue;
2343
2466
  }
2344
2467
  // If we're not retrying, just send the batch as-is.
2345
2468
  if (calld->method_params == nullptr ||
2346
2469
  calld->method_params->retry_policy() == nullptr ||
2347
2470
  calld->retry_committed) {
2348
- batches[(*num_batches)++] = batch;
2471
+ add_closure_for_subchannel_batch(calld, batch, closures, num_closures);
2349
2472
  pending_batch_clear(calld, pending);
2350
2473
  continue;
2351
2474
  }
@@ -2382,7 +2505,8 @@ static void add_subchannel_batches_for_pending_batches(
2382
2505
  GPR_ASSERT(batch->collect_stats);
2383
2506
  add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
2384
2507
  }
2385
- batches[(*num_batches)++] = &batch_data->batch;
2508
+ add_closure_for_subchannel_batch(calld, &batch_data->batch, closures,
2509
+ num_closures);
2386
2510
  }
2387
2511
  }
2388
2512
 
@@ -2393,69 +2517,36 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
2393
2517
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2394
2518
  call_data* calld = static_cast<call_data*>(elem->call_data);
2395
2519
  if (grpc_client_channel_trace.enabled()) {
2396
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches",
2520
+ gpr_log(GPR_INFO, "chand=%p calld=%p: constructing retriable batches",
2397
2521
  chand, calld);
2398
2522
  }
2399
2523
  subchannel_call_retry_state* retry_state =
2400
2524
  static_cast<subchannel_call_retry_state*>(
2401
2525
  grpc_connected_subchannel_call_get_parent_data(
2402
2526
  calld->subchannel_call));
2527
+ // Construct list of closures to execute, one for each pending batch.
2403
2528
  // We can start up to 6 batches.
2404
- grpc_transport_stream_op_batch*
2405
- batches[GPR_ARRAY_SIZE(calld->pending_batches)];
2406
- size_t num_batches = 0;
2529
+ closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches)];
2530
+ size_t num_closures = 0;
2407
2531
  // Replay previously-returned send_* ops if needed.
2408
2532
  subchannel_batch_data* replay_batch_data =
2409
2533
  maybe_create_subchannel_batch_for_replay(elem, retry_state);
2410
2534
  if (replay_batch_data != nullptr) {
2411
- batches[num_batches++] = &replay_batch_data->batch;
2535
+ add_closure_for_subchannel_batch(calld, &replay_batch_data->batch, closures,
2536
+ &num_closures);
2412
2537
  }
2413
2538
  // Now add pending batches.
2414
- add_subchannel_batches_for_pending_batches(elem, retry_state, batches,
2415
- &num_batches);
2539
+ add_subchannel_batches_for_pending_batches(elem, retry_state, closures,
2540
+ &num_closures);
2416
2541
  // Start batches on subchannel call.
2417
- // Note that the call combiner will be yielded for each batch that we
2418
- // send down. We're already running in the call combiner, so one of
2419
- // the batches can be started directly, but the others will have to
2420
- // re-enter the call combiner.
2421
2542
  if (grpc_client_channel_trace.enabled()) {
2422
- gpr_log(GPR_DEBUG,
2543
+ gpr_log(GPR_INFO,
2423
2544
  "chand=%p calld=%p: starting %" PRIuPTR
2424
2545
  " retriable batches on subchannel_call=%p",
2425
- chand, calld, num_batches, calld->subchannel_call);
2426
- }
2427
- if (num_batches == 0) {
2428
- // This should be fairly rare, but it can happen when (e.g.) an
2429
- // attempt completes before it has finished replaying all
2430
- // previously sent messages.
2431
- GRPC_CALL_COMBINER_STOP(calld->call_combiner,
2432
- "no retriable subchannel batches to start");
2433
- } else {
2434
- for (size_t i = 1; i < num_batches; ++i) {
2435
- if (grpc_client_channel_trace.enabled()) {
2436
- char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
2437
- gpr_log(GPR_DEBUG,
2438
- "chand=%p calld=%p: starting batch in call combiner: %s", chand,
2439
- calld, batch_str);
2440
- gpr_free(batch_str);
2441
- }
2442
- batches[i]->handler_private.extra_arg = calld->subchannel_call;
2443
- GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
2444
- start_batch_in_call_combiner, batches[i],
2445
- grpc_schedule_on_exec_ctx);
2446
- GRPC_CALL_COMBINER_START(calld->call_combiner,
2447
- &batches[i]->handler_private.closure,
2448
- GRPC_ERROR_NONE, "start_subchannel_batch");
2449
- }
2450
- if (grpc_client_channel_trace.enabled()) {
2451
- char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
2452
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
2453
- batch_str);
2454
- gpr_free(batch_str);
2455
- }
2456
- // Note: This will release the call combiner.
2457
- grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
2546
+ chand, calld, num_closures, calld->subchannel_call);
2458
2547
  }
2548
+ execute_closures_in_call_combiner(elem, "start_retriable_subchannel_batches",
2549
+ closures, num_closures);
2459
2550
  }
2460
2551
 
2461
2552
  //
@@ -2480,7 +2571,7 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
2480
2571
  grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
2481
2572
  call_args, &calld->subchannel_call);
2482
2573
  if (grpc_client_channel_trace.enabled()) {
2483
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
2574
+ gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
2484
2575
  chand, calld, calld->subchannel_call, grpc_error_string(new_error));
2485
2576
  }
2486
2577
  if (new_error != GRPC_ERROR_NONE) {
@@ -2521,7 +2612,7 @@ static void pick_done(void* arg, grpc_error* error) {
2521
2612
  : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
2522
2613
  "Failed to create subchannel", &error, 1);
2523
2614
  if (grpc_client_channel_trace.enabled()) {
2524
- gpr_log(GPR_DEBUG,
2615
+ gpr_log(GPR_INFO,
2525
2616
  "chand=%p calld=%p: failed to create subchannel: error=%s",
2526
2617
  chand, calld, grpc_error_string(new_error));
2527
2618
  }
@@ -2565,7 +2656,7 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
2565
2656
  // the one we started it on. However, this will just be a no-op.
2566
2657
  if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
2567
2658
  if (grpc_client_channel_trace.enabled()) {
2568
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
2659
+ gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling pick from LB policy %p",
2569
2660
  chand, calld, chand->lb_policy.get());
2570
2661
  }
2571
2662
  chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
@@ -2580,8 +2671,8 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
2580
2671
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2581
2672
  call_data* calld = static_cast<call_data*>(elem->call_data);
2582
2673
  if (grpc_client_channel_trace.enabled()) {
2583
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
2584
- chand, calld);
2674
+ gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously", chand,
2675
+ calld);
2585
2676
  }
2586
2677
  async_pick_done_locked(elem, GRPC_ERROR_REF(error));
2587
2678
  GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
@@ -2593,7 +2684,7 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
2593
2684
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2594
2685
  call_data* calld = static_cast<call_data*>(elem->call_data);
2595
2686
  if (grpc_client_channel_trace.enabled()) {
2596
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
2687
+ gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
2597
2688
  chand, calld);
2598
2689
  }
2599
2690
  if (chand->retry_throttle_data != nullptr) {
@@ -2631,8 +2722,8 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
2631
2722
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2632
2723
  call_data* calld = static_cast<call_data*>(elem->call_data);
2633
2724
  if (grpc_client_channel_trace.enabled()) {
2634
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
2635
- chand, calld, chand->lb_policy.get());
2725
+ gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p", chand,
2726
+ calld, chand->lb_policy.get());
2636
2727
  }
2637
2728
  // Only get service config data on the first attempt.
2638
2729
  if (calld->num_attempts_completed == 0) {
@@ -2679,7 +2770,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
2679
2770
  if (pick_done) {
2680
2771
  // Pick completed synchronously.
2681
2772
  if (grpc_client_channel_trace.enabled()) {
2682
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
2773
+ gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
2683
2774
  chand, calld);
2684
2775
  }
2685
2776
  GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
@@ -2723,7 +2814,7 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
2723
2814
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2724
2815
  call_data* calld = static_cast<call_data*>(elem->call_data);
2725
2816
  if (grpc_client_channel_trace.enabled()) {
2726
- gpr_log(GPR_DEBUG,
2817
+ gpr_log(GPR_INFO,
2727
2818
  "chand=%p calld=%p: cancelling pick waiting for resolver result",
2728
2819
  chand, calld);
2729
2820
  }
@@ -2743,7 +2834,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
2743
2834
  if (args->finished) {
2744
2835
  /* cancelled, do nothing */
2745
2836
  if (grpc_client_channel_trace.enabled()) {
2746
- gpr_log(GPR_DEBUG, "call cancelled before resolver result");
2837
+ gpr_log(GPR_INFO, "call cancelled before resolver result");
2747
2838
  }
2748
2839
  gpr_free(args);
2749
2840
  return;
@@ -2754,14 +2845,14 @@ static void pick_after_resolver_result_done_locked(void* arg,
2754
2845
  call_data* calld = static_cast<call_data*>(elem->call_data);
2755
2846
  if (error != GRPC_ERROR_NONE) {
2756
2847
  if (grpc_client_channel_trace.enabled()) {
2757
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
2848
+ gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
2758
2849
  chand, calld);
2759
2850
  }
2760
2851
  async_pick_done_locked(elem, GRPC_ERROR_REF(error));
2761
2852
  } else if (chand->resolver == nullptr) {
2762
2853
  // Shutting down.
2763
2854
  if (grpc_client_channel_trace.enabled()) {
2764
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
2855
+ gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
2765
2856
  calld);
2766
2857
  }
2767
2858
  async_pick_done_locked(
@@ -2777,7 +2868,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
2777
2868
  .send_initial_metadata_flags;
2778
2869
  if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
2779
2870
  if (grpc_client_channel_trace.enabled()) {
2780
- gpr_log(GPR_DEBUG,
2871
+ gpr_log(GPR_INFO,
2781
2872
  "chand=%p calld=%p: resolver returned but no LB policy; "
2782
2873
  "wait_for_ready=true; trying again",
2783
2874
  chand, calld);
@@ -2785,7 +2876,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
2785
2876
  pick_after_resolver_result_start_locked(elem);
2786
2877
  } else {
2787
2878
  if (grpc_client_channel_trace.enabled()) {
2788
- gpr_log(GPR_DEBUG,
2879
+ gpr_log(GPR_INFO,
2789
2880
  "chand=%p calld=%p: resolver returned but no LB policy; "
2790
2881
  "wait_for_ready=false; failing",
2791
2882
  chand, calld);
@@ -2798,7 +2889,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
2798
2889
  }
2799
2890
  } else {
2800
2891
  if (grpc_client_channel_trace.enabled()) {
2801
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
2892
+ gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing pick",
2802
2893
  chand, calld);
2803
2894
  }
2804
2895
  if (pick_callback_start_locked(elem)) {
@@ -2816,7 +2907,7 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
2816
2907
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2817
2908
  call_data* calld = static_cast<call_data*>(elem->call_data);
2818
2909
  if (grpc_client_channel_trace.enabled()) {
2819
- gpr_log(GPR_DEBUG,
2910
+ gpr_log(GPR_INFO,
2820
2911
  "chand=%p calld=%p: deferring pick pending resolver result", chand,
2821
2912
  calld);
2822
2913
  }
@@ -2883,7 +2974,7 @@ static void cc_start_transport_stream_op_batch(
2883
2974
  // If we've previously been cancelled, immediately fail any new batches.
2884
2975
  if (calld->cancel_error != GRPC_ERROR_NONE) {
2885
2976
  if (grpc_client_channel_trace.enabled()) {
2886
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
2977
+ gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
2887
2978
  chand, calld, grpc_error_string(calld->cancel_error));
2888
2979
  }
2889
2980
  // Note: This will release the call combiner.
@@ -2902,7 +2993,7 @@ static void cc_start_transport_stream_op_batch(
2902
2993
  calld->cancel_error =
2903
2994
  GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
2904
2995
  if (grpc_client_channel_trace.enabled()) {
2905
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
2996
+ gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
2906
2997
  calld, grpc_error_string(calld->cancel_error));
2907
2998
  }
2908
2999
  // If we do not have a subchannel call (i.e., a pick has not yet
@@ -2928,7 +3019,7 @@ static void cc_start_transport_stream_op_batch(
2928
3019
  // streaming calls).
2929
3020
  if (calld->subchannel_call != nullptr) {
2930
3021
  if (grpc_client_channel_trace.enabled()) {
2931
- gpr_log(GPR_DEBUG,
3022
+ gpr_log(GPR_INFO,
2932
3023
  "chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
2933
3024
  calld, calld->subchannel_call);
2934
3025
  }
@@ -2940,7 +3031,7 @@ static void cc_start_transport_stream_op_batch(
2940
3031
  // combiner to start a pick.
2941
3032
  if (batch->send_initial_metadata) {
2942
3033
  if (grpc_client_channel_trace.enabled()) {
2943
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner",
3034
+ gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner",
2944
3035
  chand, calld);
2945
3036
  }
2946
3037
  GRPC_CLOSURE_SCHED(
@@ -2950,7 +3041,7 @@ static void cc_start_transport_stream_op_batch(
2950
3041
  } else {
2951
3042
  // For all other batches, release the call combiner.
2952
3043
  if (grpc_client_channel_trace.enabled()) {
2953
- gpr_log(GPR_DEBUG,
3044
+ gpr_log(GPR_INFO,
2954
3045
  "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
2955
3046
  calld);
2956
3047
  }
@@ -2976,6 +3067,7 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
2976
3067
  calld->deadline);
2977
3068
  }
2978
3069
  calld->enable_retries = chand->enable_retries;
3070
+ calld->send_messages.Init();
2979
3071
  return GRPC_ERROR_NONE;
2980
3072
  }
2981
3073
 
@@ -3011,6 +3103,7 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
3011
3103
  calld->pick.subchannel_call_context[i].value);
3012
3104
  }
3013
3105
  }
3106
+ calld->send_messages.Destroy();
3014
3107
  GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
3015
3108
  }
3016
3109
 
@@ -3159,6 +3252,8 @@ static void watch_connectivity_state_locked(void* arg,
3159
3252
  external_connectivity_watcher* found = nullptr;
3160
3253
  if (w->state != nullptr) {
3161
3254
  external_connectivity_watcher_list_append(w->chand, w);
3255
+ // An assumption is being made that the closure is scheduled on the exec ctx
3256
+ // scheduler and that GRPC_CLOSURE_RUN would run the closure immediately.
3162
3257
  GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
3163
3258
  GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
3164
3259
  grpc_combiner_scheduler(w->chand->combiner));