grpc 1.13.0 → 1.14.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (213) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +403 -153
  3. data/include/grpc/grpc.h +0 -8
  4. data/include/grpc/grpc_security.h +59 -2
  5. data/include/grpc/impl/codegen/grpc_types.h +8 -2
  6. data/include/grpc/impl/codegen/log.h +112 -0
  7. data/include/grpc/module.modulemap +2 -0
  8. data/include/grpc/support/log.h +2 -88
  9. data/include/grpc/support/string_util.h +2 -0
  10. data/src/boringssl/err_data.c +597 -593
  11. data/src/core/ext/filters/client_channel/client_channel.cc +715 -770
  12. data/src/core/ext/filters/client_channel/client_channel.h +5 -0
  13. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +111 -0
  14. data/src/core/ext/filters/client_channel/client_channel_channelz.h +69 -0
  15. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +9 -0
  16. data/src/core/ext/filters/client_channel/http_proxy.cc +22 -5
  17. data/src/core/ext/filters/client_channel/lb_policy.h +15 -0
  18. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +3 -0
  19. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +3 -3
  20. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +3 -1
  21. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.c +19 -0
  22. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.h +54 -0
  23. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.c +19 -0
  24. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.h +54 -0
  25. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +4 -17
  26. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +37 -63
  27. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +79 -0
  28. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +5 -2
  29. data/src/core/ext/filters/client_channel/lb_policy_factory.cc +8 -0
  30. data/src/core/ext/filters/client_channel/lb_policy_factory.h +4 -0
  31. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +2 -2
  32. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +317 -0
  33. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +48 -9
  34. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +40 -293
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +106 -84
  36. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +6 -2
  37. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +6 -5
  38. data/src/core/ext/filters/client_channel/subchannel.cc +36 -6
  39. data/src/core/ext/filters/client_channel/subchannel.h +4 -0
  40. data/src/core/ext/filters/deadline/deadline_filter.cc +18 -15
  41. data/src/core/ext/filters/deadline/deadline_filter.h +5 -5
  42. data/src/core/ext/filters/http/client/http_client_filter.cc +10 -9
  43. data/src/core/ext/filters/http/server/http_server_filter.h +1 -1
  44. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +1 -1
  45. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +3 -2
  46. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +33 -22
  47. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +1 -1
  48. data/src/core/ext/transport/chttp2/transport/internal.h +10 -3
  49. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +17 -0
  50. data/src/core/ext/transport/chttp2/transport/writing.cc +21 -16
  51. data/src/core/ext/transport/inproc/inproc_transport.cc +46 -6
  52. data/src/core/lib/channel/channel_stack.cc +22 -24
  53. data/src/core/lib/channel/channel_trace.cc +28 -63
  54. data/src/core/lib/channel/channel_trace.h +13 -17
  55. data/src/core/lib/channel/channelz.cc +143 -0
  56. data/src/core/lib/channel/channelz.h +124 -0
  57. data/src/core/lib/channel/channelz_registry.cc +7 -24
  58. data/src/core/lib/channel/channelz_registry.h +12 -8
  59. data/src/core/lib/channel/connected_channel.cc +8 -1
  60. data/src/core/{ext/filters/load_reporting/server_load_reporting_filter.h → lib/gpr/alloc.h} +7 -9
  61. data/src/core/lib/gpr/arena.cc +8 -8
  62. data/src/core/lib/gpr/string.cc +28 -0
  63. data/src/core/lib/gpr/string.h +10 -0
  64. data/src/core/lib/gprpp/abstract.h +5 -2
  65. data/src/core/lib/gprpp/inlined_vector.h +57 -3
  66. data/src/core/lib/gprpp/memory.h +2 -2
  67. data/src/core/lib/gprpp/ref_counted_ptr.h +5 -0
  68. data/src/core/lib/gprpp/thd_posix.cc +1 -1
  69. data/src/core/lib/iomgr/call_combiner.h +80 -0
  70. data/src/core/lib/iomgr/closure.h +3 -2
  71. data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
  72. data/src/core/lib/iomgr/error.cc +12 -0
  73. data/src/core/lib/iomgr/error.h +5 -0
  74. data/src/core/lib/iomgr/ev_epoll1_linux.cc +36 -9
  75. data/src/core/lib/iomgr/ev_epollex_linux.cc +172 -46
  76. data/src/core/lib/iomgr/ev_epollsig_linux.cc +47 -21
  77. data/src/core/lib/iomgr/ev_poll_posix.cc +10 -4
  78. data/src/core/lib/iomgr/ev_posix.cc +17 -9
  79. data/src/core/lib/iomgr/ev_posix.h +20 -4
  80. data/src/core/lib/iomgr/executor.cc +196 -140
  81. data/src/core/lib/iomgr/executor.h +47 -14
  82. data/src/core/lib/iomgr/iomgr.cc +2 -0
  83. data/src/core/lib/iomgr/iomgr.h +5 -0
  84. data/src/core/lib/iomgr/is_epollexclusive_available.cc +1 -0
  85. data/src/core/lib/iomgr/socket_utils.h +9 -0
  86. data/src/core/lib/iomgr/socket_utils_common_posix.cc +4 -0
  87. data/src/core/lib/iomgr/socket_utils_uv.cc +4 -0
  88. data/src/core/lib/iomgr/socket_utils_windows.cc +4 -0
  89. data/src/core/lib/iomgr/tcp_client_posix.cc +3 -5
  90. data/src/core/lib/iomgr/tcp_posix.cc +6 -1
  91. data/src/core/lib/iomgr/tcp_server_posix.cc +3 -3
  92. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +1 -1
  93. data/src/core/lib/iomgr/timer_manager.cc +0 -1
  94. data/src/core/lib/iomgr/udp_server.cc +2 -3
  95. data/src/core/lib/json/json.cc +10 -0
  96. data/src/core/lib/json/json.h +5 -0
  97. data/src/core/lib/security/context/security_context.cc +8 -8
  98. data/src/core/lib/security/context/security_context.h +6 -2
  99. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +2 -1
  100. data/src/core/lib/security/credentials/local/local_credentials.cc +77 -0
  101. data/src/core/lib/security/credentials/local/local_credentials.h +40 -0
  102. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +17 -3
  103. data/src/core/lib/security/security_connector/local_security_connector.cc +245 -0
  104. data/src/core/lib/security/security_connector/local_security_connector.h +58 -0
  105. data/src/core/lib/security/security_connector/security_connector.cc +30 -5
  106. data/src/core/lib/security/security_connector/security_connector.h +1 -0
  107. data/src/core/lib/security/transport/client_auth_filter.cc +5 -1
  108. data/src/core/lib/security/transport/server_auth_filter.cc +4 -5
  109. data/src/core/lib/surface/call.cc +75 -32
  110. data/src/core/lib/surface/call.h +2 -0
  111. data/src/core/lib/surface/channel.cc +32 -13
  112. data/src/core/lib/surface/channel.h +4 -0
  113. data/src/core/lib/surface/version.cc +1 -1
  114. data/src/core/lib/transport/transport.cc +20 -9
  115. data/src/core/lib/transport/transport.h +12 -10
  116. data/src/core/lib/transport/transport_op_string.cc +0 -7
  117. data/src/core/plugin_registry/grpc_plugin_registry.cc +0 -4
  118. data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h +2 -2
  119. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +2 -1
  120. data/src/core/tsi/alts/handshaker/altscontext.pb.c +0 -1
  121. data/src/core/tsi/alts/handshaker/altscontext.pb.h +1 -2
  122. data/src/core/tsi/alts/handshaker/handshaker.pb.c +0 -1
  123. data/src/core/tsi/alts/handshaker/handshaker.pb.h +1 -2
  124. data/src/core/tsi/alts/handshaker/transport_security_common.pb.c +0 -1
  125. data/src/core/tsi/alts/handshaker/transport_security_common.pb.h +1 -1
  126. data/src/core/tsi/alts/handshaker/transport_security_common_api.h +2 -2
  127. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +47 -1
  128. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h +3 -1
  129. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +12 -11
  130. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h +7 -2
  131. data/src/core/tsi/local_transport_security.cc +209 -0
  132. data/src/core/tsi/local_transport_security.h +51 -0
  133. data/src/core/tsi/ssl_transport_security.cc +2 -3
  134. data/src/{core/ext → cpp/ext/filters}/census/grpc_context.cc +0 -0
  135. data/src/ruby/ext/grpc/rb_channel_credentials.c +3 -3
  136. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +18 -18
  137. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +29 -29
  138. data/src/ruby/lib/grpc/generic/active_call.rb +19 -23
  139. data/src/ruby/lib/grpc/version.rb +1 -1
  140. data/src/ruby/spec/call_credentials_spec.rb +1 -1
  141. data/src/ruby/spec/call_spec.rb +1 -1
  142. data/src/ruby/spec/channel_credentials_spec.rb +1 -1
  143. data/src/ruby/spec/channel_spec.rb +1 -1
  144. data/src/ruby/spec/client_auth_spec.rb +1 -12
  145. data/src/ruby/spec/client_server_spec.rb +1 -1
  146. data/src/ruby/spec/compression_options_spec.rb +1 -1
  147. data/src/ruby/spec/error_sanity_spec.rb +1 -1
  148. data/src/ruby/spec/generic/client_stub_spec.rb +13 -1
  149. data/src/ruby/spec/generic/rpc_desc_spec.rb +1 -1
  150. data/src/ruby/spec/generic/rpc_server_pool_spec.rb +1 -1
  151. data/src/ruby/spec/generic/service_spec.rb +1 -1
  152. data/src/ruby/spec/google_rpc_status_utils_spec.rb +1 -12
  153. data/src/ruby/spec/pb/duplicate/codegen_spec.rb +1 -0
  154. data/src/ruby/spec/pb/health/checker_spec.rb +1 -1
  155. data/src/ruby/spec/server_credentials_spec.rb +1 -1
  156. data/src/ruby/spec/server_spec.rb +1 -1
  157. data/src/ruby/spec/spec_helper.rb +1 -0
  158. data/src/ruby/spec/support/services.rb +1 -1
  159. data/src/ruby/spec/time_consts_spec.rb +1 -1
  160. data/third_party/boringssl/crypto/asn1/tasn_dec.c +40 -19
  161. data/third_party/boringssl/crypto/bytestring/cbs.c +1 -0
  162. data/third_party/boringssl/crypto/cipher_extra/e_aesccm.c +47 -15
  163. data/third_party/boringssl/crypto/ec_extra/ec_asn1.c +9 -10
  164. data/third_party/boringssl/crypto/ecdh/ecdh.c +4 -3
  165. data/third_party/boringssl/crypto/fipsmodule/bn/add.c +30 -54
  166. data/third_party/boringssl/crypto/fipsmodule/bn/bn.c +7 -1
  167. data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +8 -8
  168. data/third_party/boringssl/crypto/fipsmodule/bn/div.c +97 -11
  169. data/third_party/boringssl/crypto/fipsmodule/bn/gcd.c +274 -218
  170. data/third_party/boringssl/crypto/fipsmodule/bn/internal.h +111 -34
  171. data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +2 -2
  172. data/third_party/boringssl/crypto/fipsmodule/bn/montgomery_inv.c +1 -1
  173. data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +24 -6
  174. data/third_party/boringssl/crypto/fipsmodule/bn/prime.c +324 -63
  175. data/third_party/boringssl/crypto/fipsmodule/bn/random.c +74 -21
  176. data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +128 -86
  177. data/third_party/boringssl/crypto/fipsmodule/bn/sqrt.c +1 -1
  178. data/third_party/boringssl/crypto/fipsmodule/ec/ec_key.c +67 -112
  179. data/third_party/boringssl/crypto/fipsmodule/ec/internal.h +8 -1
  180. data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +5 -5
  181. data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +9 -17
  182. data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64-table.h +5378 -5418
  183. data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +32 -32
  184. data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +5 -11
  185. data/third_party/boringssl/crypto/fipsmodule/rsa/blinding.c +16 -40
  186. data/third_party/boringssl/crypto/fipsmodule/rsa/internal.h +1 -6
  187. data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +41 -29
  188. data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +63 -49
  189. data/third_party/boringssl/crypto/x509/vpm_int.h +1 -0
  190. data/third_party/boringssl/crypto/x509/x509_vfy.c +4 -0
  191. data/third_party/boringssl/crypto/x509/x509_vpm.c +44 -22
  192. data/third_party/boringssl/include/openssl/aead.h +8 -2
  193. data/third_party/boringssl/include/openssl/asn1.h +1 -0
  194. data/third_party/boringssl/include/openssl/base.h +4 -0
  195. data/third_party/boringssl/include/openssl/bn.h +13 -3
  196. data/third_party/boringssl/include/openssl/bytestring.h +4 -4
  197. data/third_party/boringssl/include/openssl/ec.h +10 -4
  198. data/third_party/boringssl/include/openssl/ec_key.h +0 -3
  199. data/third_party/boringssl/include/openssl/rsa.h +1 -0
  200. data/third_party/boringssl/include/openssl/ssl.h +8 -3
  201. data/third_party/boringssl/include/openssl/ssl3.h +0 -1
  202. data/third_party/boringssl/include/openssl/x509.h +1 -0
  203. data/third_party/boringssl/include/openssl/x509v3.h +1 -0
  204. data/third_party/boringssl/ssl/handshake_client.cc +36 -64
  205. data/third_party/boringssl/ssl/ssl_cipher.cc +4 -0
  206. data/third_party/boringssl/ssl/ssl_lib.cc +1 -1
  207. metadata +45 -38
  208. data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +0 -222
  209. data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc +0 -71
  210. data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +0 -61
  211. data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +0 -51
  212. data/src/ruby/spec/pb/package_with_underscore/data.proto +0 -23
  213. data/src/ruby/spec/pb/package_with_underscore/service.proto +0 -23
@@ -21,6 +21,7 @@
21
21
 
22
22
  #include <grpc/support/port_platform.h>
23
23
 
24
+ #include "src/core/ext/filters/client_channel/client_channel_channelz.h"
24
25
  #include "src/core/ext/filters/client_channel/connector.h"
25
26
  #include "src/core/lib/channel/channel_stack.h"
26
27
  #include "src/core/lib/gpr/arena.h"
@@ -115,6 +116,9 @@ grpc_subchannel_call* grpc_subchannel_call_ref(
115
116
  void grpc_subchannel_call_unref(
116
117
  grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
117
118
 
119
+ grpc_core::channelz::SubchannelNode* grpc_subchannel_get_channelz_node(
120
+ grpc_subchannel* subchannel);
121
+
118
122
  /** Returns a pointer to the parent data associated with \a subchannel_call.
119
123
  The data will be of the size specified in \a parent_data_size
120
124
  field of the args passed to \a grpc_connected_subchannel_create_call(). */
@@ -128,21 +128,25 @@ static void cancel_timer_if_needed(grpc_deadline_state* deadline_state) {
128
128
  }
129
129
  }
130
130
 
131
- // Callback run when the call is complete.
132
- static void on_complete(void* arg, grpc_error* error) {
131
+ // Callback run when we receive trailing metadata.
132
+ static void recv_trailing_metadata_ready(void* arg, grpc_error* error) {
133
133
  grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
134
134
  cancel_timer_if_needed(deadline_state);
135
- // Invoke the next callback.
136
- GRPC_CLOSURE_RUN(deadline_state->next_on_complete, GRPC_ERROR_REF(error));
135
+ // Invoke the original callback.
136
+ GRPC_CLOSURE_RUN(deadline_state->original_recv_trailing_metadata_ready,
137
+ GRPC_ERROR_REF(error));
137
138
  }
138
139
 
139
- // Inject our own on_complete callback into op.
140
- static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
141
- grpc_transport_stream_op_batch* op) {
142
- deadline_state->next_on_complete = op->on_complete;
143
- GRPC_CLOSURE_INIT(&deadline_state->on_complete, on_complete, deadline_state,
140
+ // Inject our own recv_trailing_metadata_ready callback into op.
141
+ static void inject_recv_trailing_metadata_ready(
142
+ grpc_deadline_state* deadline_state, grpc_transport_stream_op_batch* op) {
143
+ deadline_state->original_recv_trailing_metadata_ready =
144
+ op->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
145
+ GRPC_CLOSURE_INIT(&deadline_state->recv_trailing_metadata_ready,
146
+ recv_trailing_metadata_ready, deadline_state,
144
147
  grpc_schedule_on_exec_ctx);
145
- op->on_complete = &deadline_state->on_complete;
148
+ op->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
149
+ &deadline_state->recv_trailing_metadata_ready;
146
150
  }
147
151
 
148
152
  // Callback and associated state for starting the timer after call stack
@@ -226,7 +230,7 @@ void grpc_deadline_state_client_start_transport_stream_op_batch(
226
230
  // Make sure we know when the call is complete, so that we can cancel
227
231
  // the timer.
228
232
  if (op->recv_trailing_metadata) {
229
- inject_on_complete_cb(deadline_state, op);
233
+ inject_recv_trailing_metadata_ready(deadline_state, op);
230
234
  }
231
235
  }
232
236
  }
@@ -289,11 +293,10 @@ static void client_start_transport_stream_op_batch(
289
293
  static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
290
294
  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
291
295
  server_call_data* calld = static_cast<server_call_data*>(elem->call_data);
292
- // Get deadline from metadata and start the timer if needed.
293
296
  start_timer_if_needed(elem, calld->recv_initial_metadata->deadline);
294
297
  // Invoke the next callback.
295
- calld->next_recv_initial_metadata_ready->cb(
296
- calld->next_recv_initial_metadata_ready->cb_arg, error);
298
+ GRPC_CLOSURE_RUN(calld->next_recv_initial_metadata_ready,
299
+ GRPC_ERROR_REF(error));
297
300
  }
298
301
 
299
302
  // Method for starting a call op for server filter.
@@ -323,7 +326,7 @@ static void server_start_transport_stream_op_batch(
323
326
  // the client never sends trailing metadata, because this is the
324
327
  // hook that tells us when the call is complete on the server side.
325
328
  if (op->recv_trailing_metadata) {
326
- inject_on_complete_cb(&calld->base.deadline_state, op);
329
+ inject_recv_trailing_metadata_ready(&calld->base.deadline_state, op);
327
330
  }
328
331
  }
329
332
  // Chain to next filter.
@@ -37,12 +37,12 @@ typedef struct grpc_deadline_state {
37
37
  grpc_deadline_timer_state timer_state;
38
38
  grpc_timer timer;
39
39
  grpc_closure timer_callback;
40
- // Closure to invoke when the call is complete.
40
+ // Closure to invoke when we receive trailing metadata.
41
41
  // We use this to cancel the timer.
42
- grpc_closure on_complete;
43
- // The original on_complete closure, which we chain to after our own
44
- // closure is invoked.
45
- grpc_closure* next_on_complete;
42
+ grpc_closure recv_trailing_metadata_ready;
43
+ // The original recv_trailing_metadata_ready closure, which we chain to
44
+ // after our own closure is invoked.
45
+ grpc_closure* original_recv_trailing_metadata_ready;
46
46
  } grpc_deadline_state;
47
47
 
48
48
  //
@@ -55,8 +55,8 @@ struct call_data {
55
55
  grpc_closure recv_initial_metadata_ready;
56
56
  // State for handling recv_trailing_metadata ops.
57
57
  grpc_metadata_batch* recv_trailing_metadata;
58
- grpc_closure* original_recv_trailing_metadata_on_complete;
59
- grpc_closure recv_trailing_metadata_on_complete;
58
+ grpc_closure* original_recv_trailing_metadata_ready;
59
+ grpc_closure recv_trailing_metadata_ready;
60
60
  // State for handling send_message ops.
61
61
  grpc_transport_stream_op_batch* send_message_batch;
62
62
  size_t send_message_bytes_read;
@@ -153,8 +153,7 @@ static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
153
153
  GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready, error);
154
154
  }
155
155
 
156
- static void recv_trailing_metadata_on_complete(void* user_data,
157
- grpc_error* error) {
156
+ static void recv_trailing_metadata_ready(void* user_data, grpc_error* error) {
158
157
  grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
159
158
  call_data* calld = static_cast<call_data*>(elem->call_data);
160
159
  if (error == GRPC_ERROR_NONE) {
@@ -163,7 +162,7 @@ static void recv_trailing_metadata_on_complete(void* user_data,
163
162
  } else {
164
163
  GRPC_ERROR_REF(error);
165
164
  }
166
- GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_on_complete, error);
165
+ GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, error);
167
166
  }
168
167
 
169
168
  static void send_message_on_complete(void* arg, grpc_error* error) {
@@ -312,8 +311,10 @@ static void hc_start_transport_stream_op_batch(
312
311
  /* substitute our callback for the higher callback */
313
312
  calld->recv_trailing_metadata =
314
313
  batch->payload->recv_trailing_metadata.recv_trailing_metadata;
315
- calld->original_recv_trailing_metadata_on_complete = batch->on_complete;
316
- batch->on_complete = &calld->recv_trailing_metadata_on_complete;
314
+ calld->original_recv_trailing_metadata_ready =
315
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
316
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
317
+ &calld->recv_trailing_metadata_ready;
317
318
  }
318
319
 
319
320
  grpc_error* error = GRPC_ERROR_NONE;
@@ -420,8 +421,8 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
420
421
  GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
421
422
  recv_initial_metadata_ready, elem,
422
423
  grpc_schedule_on_exec_ctx);
423
- GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_on_complete,
424
- recv_trailing_metadata_on_complete, elem,
424
+ GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
425
+ recv_trailing_metadata_ready, elem,
425
426
  grpc_schedule_on_exec_ctx);
426
427
  GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
427
428
  elem, grpc_schedule_on_exec_ctx);
@@ -23,7 +23,7 @@
23
23
 
24
24
  #include "src/core/lib/channel/channel_stack.h"
25
25
 
26
- /* Processes metadata on the client side for HTTP2 transports */
26
+ /* Processes metadata on the server side for HTTP2 transports */
27
27
  extern const grpc_channel_filter grpc_http_server_filter;
28
28
 
29
29
  #endif /* GRPC_CORE_EXT_FILTERS_HTTP_SERVER_HTTP_SERVER_FILTER_H */
@@ -50,7 +50,7 @@ grpc_channel* grpc_insecure_channel_create_from_fd(
50
50
  GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
51
51
 
52
52
  grpc_endpoint* client = grpc_tcp_client_create_from_fd(
53
- grpc_fd_create(fd, "client"), args, "fd-client");
53
+ grpc_fd_create(fd, "client", false), args, "fd-client");
54
54
 
55
55
  grpc_transport* transport =
56
56
  grpc_create_chttp2_transport(final_args, client, true);
@@ -43,8 +43,9 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server* server,
43
43
  char* name;
44
44
  gpr_asprintf(&name, "fd:%d", fd);
45
45
 
46
- grpc_endpoint* server_endpoint = grpc_tcp_create(
47
- grpc_fd_create(fd, name), grpc_server_get_channel_args(server), name);
46
+ grpc_endpoint* server_endpoint =
47
+ grpc_tcp_create(grpc_fd_create(fd, name, false),
48
+ grpc_server_get_channel_args(server), name);
48
49
 
49
50
  gpr_free(name);
50
51
 
@@ -813,7 +813,11 @@ static void set_write_state(grpc_chttp2_transport* t,
813
813
  write_state_name(st), reason));
814
814
  t->write_state = st;
815
815
  if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
816
- GRPC_CLOSURE_LIST_SCHED(&t->run_after_write);
816
+ grpc_chttp2_stream* s;
817
+ while (grpc_chttp2_list_pop_waiting_for_write_stream(t, &s)) {
818
+ GRPC_CLOSURE_LIST_SCHED(&s->run_after_write);
819
+ GRPC_CHTTP2_STREAM_UNREF(s, "chttp2:write_closure_sched");
820
+ }
817
821
  if (t->close_transport_on_writes_finished != nullptr) {
818
822
  grpc_error* err = t->close_transport_on_writes_finished;
819
823
  t->close_transport_on_writes_finished = nullptr;
@@ -1149,12 +1153,10 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
1149
1153
  }
1150
1154
  }
1151
1155
 
1152
- /* Flag that this closure barrier wants stats to be updated before finishing */
1153
- #define CLOSURE_BARRIER_STATS_BIT (1 << 0)
1154
1156
  /* Flag that this closure barrier may be covering a write in a pollset, and so
1155
1157
  we should not complete this closure until we can prove that the write got
1156
1158
  scheduled */
1157
- #define CLOSURE_BARRIER_MAY_COVER_WRITE (1 << 1)
1159
+ #define CLOSURE_BARRIER_MAY_COVER_WRITE (1 << 0)
1158
1160
  /* First bit of the reference count, stored in the high order bits (with the low
1159
1161
  bits being used for flags defined above) */
1160
1162
  #define CLOSURE_BARRIER_FIRST_REF_BIT (1 << 16)
@@ -1206,15 +1208,14 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
1206
1208
  grpc_error_add_child(closure->error_data.error, error);
1207
1209
  }
1208
1210
  if (closure->next_data.scratch < CLOSURE_BARRIER_FIRST_REF_BIT) {
1209
- if (closure->next_data.scratch & CLOSURE_BARRIER_STATS_BIT) {
1210
- grpc_transport_move_stats(&s->stats, s->collecting_stats);
1211
- s->collecting_stats = nullptr;
1212
- }
1213
- if ((t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) ||
1211
+ if (s->seen_error || (t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) ||
1214
1212
  !(closure->next_data.scratch & CLOSURE_BARRIER_MAY_COVER_WRITE)) {
1215
1213
  GRPC_CLOSURE_RUN(closure, closure->error_data.error);
1216
1214
  } else {
1217
- grpc_closure_list_append(&t->run_after_write, closure,
1215
+ if (grpc_chttp2_list_add_waiting_for_write_stream(t, s)) {
1216
+ GRPC_CHTTP2_STREAM_REF(s, "chttp2:pending_write_closure");
1217
+ }
1218
+ grpc_closure_list_append(&s->run_after_write, closure,
1218
1219
  closure->error_data.error);
1219
1220
  }
1220
1221
  }
@@ -1351,9 +1352,14 @@ static void perform_stream_op_locked(void* stream_op,
1351
1352
  }
1352
1353
 
1353
1354
  grpc_closure* on_complete = op->on_complete;
1355
+ // TODO(roth): This is a hack needed because we use data inside of the
1356
+ // closure itself to do the barrier calculation (i.e., to ensure that
1357
+ // we don't schedule the closure until all ops in the batch have been
1358
+ // completed). This can go away once we move to a new C++ closure API
1359
+ // that provides the ability to create a barrier closure.
1354
1360
  if (on_complete == nullptr) {
1355
- on_complete =
1356
- GRPC_CLOSURE_CREATE(do_nothing, nullptr, grpc_schedule_on_exec_ctx);
1361
+ on_complete = GRPC_CLOSURE_INIT(&op->handler_private.closure, do_nothing,
1362
+ nullptr, grpc_schedule_on_exec_ctx);
1357
1363
  }
1358
1364
 
1359
1365
  /* use final_data as a barrier until enqueue time; the inital counter is
@@ -1361,12 +1367,6 @@ static void perform_stream_op_locked(void* stream_op,
1361
1367
  on_complete->next_data.scratch = CLOSURE_BARRIER_FIRST_REF_BIT;
1362
1368
  on_complete->error_data.error = GRPC_ERROR_NONE;
1363
1369
 
1364
- if (op->collect_stats) {
1365
- GPR_ASSERT(s->collecting_stats == nullptr);
1366
- s->collecting_stats = op_payload->collect_stats.collect_stats;
1367
- on_complete->next_data.scratch |= CLOSURE_BARRIER_STATS_BIT;
1368
- }
1369
-
1370
1370
  if (op->cancel_stream) {
1371
1371
  GRPC_STATS_INC_HTTP2_OP_CANCEL();
1372
1372
  grpc_chttp2_cancel_stream(t, s, op_payload->cancel_stream.cancel_error);
@@ -1600,8 +1600,11 @@ static void perform_stream_op_locked(void* stream_op,
1600
1600
 
1601
1601
  if (op->recv_trailing_metadata) {
1602
1602
  GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA();
1603
+ GPR_ASSERT(s->collecting_stats == nullptr);
1604
+ s->collecting_stats = op_payload->recv_trailing_metadata.collect_stats;
1603
1605
  GPR_ASSERT(s->recv_trailing_metadata_finished == nullptr);
1604
- s->recv_trailing_metadata_finished = add_closure_barrier(on_complete);
1606
+ s->recv_trailing_metadata_finished =
1607
+ op_payload->recv_trailing_metadata.recv_trailing_metadata_ready;
1605
1608
  s->recv_trailing_metadata =
1606
1609
  op_payload->recv_trailing_metadata.recv_trailing_metadata;
1607
1610
  s->final_metadata_requested = true;
@@ -1960,11 +1963,12 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t,
1960
1963
  }
1961
1964
  if (s->read_closed && s->frame_storage.length == 0 && !pending_data &&
1962
1965
  s->recv_trailing_metadata_finished != nullptr) {
1966
+ grpc_transport_move_stats(&s->stats, s->collecting_stats);
1967
+ s->collecting_stats = nullptr;
1963
1968
  grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[1],
1964
1969
  s->recv_trailing_metadata);
1965
- grpc_chttp2_complete_closure_step(
1966
- t, s, &s->recv_trailing_metadata_finished, GRPC_ERROR_NONE,
1967
- "recv_trailing_metadata_finished");
1970
+ null_then_run_closure(&s->recv_trailing_metadata_finished,
1971
+ GRPC_ERROR_NONE);
1968
1972
  }
1969
1973
  }
1970
1974
  }
@@ -2012,6 +2016,10 @@ static void remove_stream(grpc_chttp2_transport* t, uint32_t id,
2012
2016
 
2013
2017
  void grpc_chttp2_cancel_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
2014
2018
  grpc_error* due_to_error) {
2019
+ GRPC_CLOSURE_LIST_SCHED(&s->run_after_write);
2020
+ if (grpc_chttp2_list_remove_waiting_for_write_stream(t, s)) {
2021
+ GRPC_CHTTP2_STREAM_UNREF(s, "chttp2:pending_write_closure");
2022
+ }
2015
2023
  if (!t->is_client && !s->sent_trailing_metadata &&
2016
2024
  grpc_error_has_clear_grpc_status(due_to_error)) {
2017
2025
  close_from_api(t, s, due_to_error);
@@ -2666,6 +2674,9 @@ static void init_keepalive_ping_locked(void* arg, grpc_error* error) {
2666
2674
 
2667
2675
  static void start_keepalive_ping_locked(void* arg, grpc_error* error) {
2668
2676
  grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
2677
+ if (error != GRPC_ERROR_NONE) {
2678
+ return;
2679
+ }
2669
2680
  GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
2670
2681
  grpc_timer_init(&t->keepalive_watchdog_timer,
2671
2682
  grpc_core::ExecCtx::Get()->Now() + t->keepalive_timeout,
@@ -1622,7 +1622,7 @@ grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser,
1622
1622
  grpc_chttp2_transport* t,
1623
1623
  grpc_chttp2_stream* s,
1624
1624
  grpc_slice slice, int is_last) {
1625
- GPR_TIMER_SCOPE("grpc_chttp2_hpack_parser_parse", 0);
1625
+ GPR_TIMER_SCOPE("grpc_chttp2_header_parser_parse", 0);
1626
1626
  grpc_chttp2_hpack_parser* parser =
1627
1627
  static_cast<grpc_chttp2_hpack_parser*>(hpack_parser);
1628
1628
  if (s != nullptr) {
@@ -54,6 +54,8 @@ typedef enum {
54
54
  /** streams that are waiting to start because there are too many concurrent
55
55
  streams on the connection */
56
56
  GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY,
57
+ /** streams with closures waiting to be run on a write **/
58
+ GRPC_CHTTP2_LIST_WAITING_FOR_WRITE,
57
59
  STREAM_LIST_COUNT /* must be last */
58
60
  } grpc_chttp2_stream_list_id;
59
61
 
@@ -431,9 +433,6 @@ struct grpc_chttp2_transport {
431
433
  */
432
434
  grpc_error* close_transport_on_writes_finished;
433
435
 
434
- /* a list of closures to run after writes are finished */
435
- grpc_closure_list run_after_write;
436
-
437
436
  /* buffer pool state */
438
437
  /** have we scheduled a benign cleanup? */
439
438
  bool benign_reclaimer_registered;
@@ -584,6 +583,7 @@ struct grpc_chttp2_stream {
584
583
 
585
584
  grpc_slice_buffer flow_controlled_buffer;
586
585
 
586
+ grpc_closure_list run_after_write;
587
587
  grpc_chttp2_write_cb* on_flow_controlled_cbs;
588
588
  grpc_chttp2_write_cb* on_write_finished_cbs;
589
589
  grpc_chttp2_write_cb* finish_after_write;
@@ -686,6 +686,13 @@ bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport* t,
686
686
  bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport* t,
687
687
  grpc_chttp2_stream* s);
688
688
 
689
+ bool grpc_chttp2_list_add_waiting_for_write_stream(grpc_chttp2_transport* t,
690
+ grpc_chttp2_stream* s);
691
+ bool grpc_chttp2_list_pop_waiting_for_write_stream(grpc_chttp2_transport* t,
692
+ grpc_chttp2_stream** s);
693
+ bool grpc_chttp2_list_remove_waiting_for_write_stream(grpc_chttp2_transport* t,
694
+ grpc_chttp2_stream* s);
695
+
689
696
  /********* Flow Control ***************/
690
697
 
691
698
  // Takes in a flow control action and performs all the needed operations.
@@ -35,6 +35,8 @@ static const char* stream_list_id_string(grpc_chttp2_stream_list_id id) {
35
35
  return "stalled_by_stream";
36
36
  case GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY:
37
37
  return "waiting_for_concurrency";
38
+ case GRPC_CHTTP2_LIST_WAITING_FOR_WRITE:
39
+ return "waiting_for_write";
38
40
  case STREAM_LIST_COUNT:
39
41
  GPR_UNREACHABLE_CODE(return "unknown");
40
42
  }
@@ -214,3 +216,18 @@ bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport* t,
214
216
  grpc_chttp2_stream* s) {
215
217
  return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM);
216
218
  }
219
+
220
+ bool grpc_chttp2_list_add_waiting_for_write_stream(grpc_chttp2_transport* t,
221
+ grpc_chttp2_stream* s) {
222
+ return stream_list_add(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_WRITE);
223
+ }
224
+
225
+ bool grpc_chttp2_list_pop_waiting_for_write_stream(grpc_chttp2_transport* t,
226
+ grpc_chttp2_stream** s) {
227
+ return stream_list_pop(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_WRITE);
228
+ }
229
+
230
+ bool grpc_chttp2_list_remove_waiting_for_write_stream(grpc_chttp2_transport* t,
231
+ grpc_chttp2_stream* s) {
232
+ return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_WAITING_FOR_WRITE);
233
+ }
@@ -139,22 +139,27 @@ static bool update_list(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
139
139
 
140
140
  static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
141
141
  const char* staller) {
142
- gpr_log(
143
- GPR_DEBUG,
144
- "%s:%p stream %d stalled by %s [fc:pending=%" PRIdPTR
145
- ":pending-compressed=%" PRIdPTR ":flowed=%" PRId64
146
- ":peer_initwin=%d:t_win=%" PRId64 ":s_win=%d:s_delta=%" PRId64 "]",
147
- t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
148
- s->compressed_data_buffer.length, s->flow_controlled_bytes_flowed,
149
- t->settings[GRPC_ACKED_SETTINGS]
150
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
151
- t->flow_control->remote_window(),
152
- static_cast<uint32_t> GPR_MAX(
153
- 0,
154
- s->flow_control->remote_window_delta() +
155
- (int64_t)t->settings[GRPC_PEER_SETTINGS]
156
- [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]),
157
- s->flow_control->remote_window_delta());
142
+ if (grpc_flowctl_trace.enabled()) {
143
+ gpr_log(
144
+ GPR_DEBUG,
145
+ "%s:%p stream %d moved to stalled list by %s. This is FULLY expected "
146
+ "to happen in a healthy program that is not seeing flow control stalls."
147
+ " However, if you know that there are unwanted stalls, here is some "
148
+ "helpful data: [fc:pending=%" PRIdPTR ":pending-compressed=%" PRIdPTR
149
+ ":flowed=%" PRId64 ":peer_initwin=%d:t_win=%" PRId64
150
+ ":s_win=%d:s_delta=%" PRId64 "]",
151
+ t->peer_string, t, s->id, staller, s->flow_controlled_buffer.length,
152
+ s->compressed_data_buffer.length, s->flow_controlled_bytes_flowed,
153
+ t->settings[GRPC_ACKED_SETTINGS]
154
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
155
+ t->flow_control->remote_window(),
156
+ static_cast<uint32_t> GPR_MAX(
157
+ 0,
158
+ s->flow_control->remote_window_delta() +
159
+ (int64_t)t->settings[GRPC_PEER_SETTINGS]
160
+ [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]),
161
+ s->flow_control->remote_window_delta());
162
+ }
158
163
  }
159
164
 
160
165
  static bool stream_ref_if_not_destroyed(gpr_refcount* r) {
@@ -120,7 +120,6 @@ typedef struct inproc_stream {
120
120
  struct inproc_stream* stream_list_next;
121
121
  } inproc_stream;
122
122
 
123
- static grpc_closure do_nothing_closure;
124
123
  static bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
125
124
  static void op_state_machine(void* arg, grpc_error* error);
126
125
 
@@ -373,6 +372,10 @@ static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
373
372
  const char* msg) {
374
373
  int is_sm = static_cast<int>(op == s->send_message_op);
375
374
  int is_stm = static_cast<int>(op == s->send_trailing_md_op);
375
+ // TODO(vjpai): We should not consider the recv ops here, since they
376
+ // have their own callbacks. We should invoke a batch's on_complete
377
+ // as soon as all of the batch's send ops are complete, even if there
378
+ // are still recv ops pending.
376
379
  int is_rim = static_cast<int>(op == s->recv_initial_md_op);
377
380
  int is_rm = static_cast<int>(op == s->recv_message_op);
378
381
  int is_rtm = static_cast<int>(op == s->recv_trailing_md_op);
@@ -496,6 +499,11 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
496
499
  s->send_trailing_md_op = nullptr;
497
500
  }
498
501
  if (s->recv_trailing_md_op) {
502
+ INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-metadata-ready %p",
503
+ s, error);
504
+ GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->payload->recv_trailing_metadata
505
+ .recv_trailing_metadata_ready,
506
+ GRPC_ERROR_REF(error));
499
507
  INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-md-on-complete %p",
500
508
  s, error);
501
509
  complete_if_batch_end_locked(
@@ -638,6 +646,12 @@ static void op_state_machine(void* arg, grpc_error* error) {
638
646
  }
639
647
  s->trailing_md_sent = true;
640
648
  if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
649
+ INPROC_LOG(GPR_INFO,
650
+ "op_state_machine %p scheduling trailing-metadata-ready", s);
651
+ GRPC_CLOSURE_SCHED(
652
+ s->recv_trailing_md_op->payload->recv_trailing_metadata
653
+ .recv_trailing_metadata_ready,
654
+ GRPC_ERROR_NONE);
641
655
  INPROC_LOG(GPR_INFO,
642
656
  "op_state_machine %p scheduling trailing-md-on-complete", s);
643
657
  GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
@@ -711,6 +725,12 @@ static void op_state_machine(void* arg, grpc_error* error) {
711
725
  }
712
726
  if (s->recv_trailing_md_op && s->t->is_client && other &&
713
727
  other->send_message_op) {
728
+ INPROC_LOG(GPR_INFO,
729
+ "op_state_machine %p scheduling trailing-metadata-ready %p", s,
730
+ GRPC_ERROR_NONE);
731
+ GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->payload->recv_trailing_metadata
732
+ .recv_trailing_metadata_ready,
733
+ GRPC_ERROR_NONE);
714
734
  maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE);
715
735
  }
716
736
  if (s->to_read_trailing_md_filled) {
@@ -766,6 +786,10 @@ static void op_state_machine(void* arg, grpc_error* error) {
766
786
  INPROC_LOG(GPR_INFO,
767
787
  "op_state_machine %p scheduling trailing-md-on-complete %p",
768
788
  s, new_err);
789
+ GRPC_CLOSURE_SCHED(
790
+ s->recv_trailing_md_op->payload->recv_trailing_metadata
791
+ .recv_trailing_metadata_ready,
792
+ GRPC_ERROR_REF(new_err));
769
793
  GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
770
794
  GRPC_ERROR_REF(new_err));
771
795
  s->recv_trailing_md_op = nullptr;
@@ -859,6 +883,9 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
859
883
  // couldn't complete that because we hadn't yet sent out trailing
860
884
  // md, now's the chance
861
885
  if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
886
+ GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->payload->recv_trailing_metadata
887
+ .recv_trailing_metadata_ready,
888
+ GRPC_ERROR_REF(s->cancel_self_error));
862
889
  complete_if_batch_end_locked(
863
890
  s, s->cancel_self_error, s->recv_trailing_md_op,
864
891
  "cancel_stream scheduling trailing-md-on-complete");
@@ -873,6 +900,8 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
873
900
  return ret;
874
901
  }
875
902
 
903
+ static void do_nothing(void* arg, grpc_error* error) {}
904
+
876
905
  static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
877
906
  grpc_transport_stream_op_batch* op) {
878
907
  INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
@@ -892,8 +921,14 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
892
921
  }
893
922
  grpc_error* error = GRPC_ERROR_NONE;
894
923
  grpc_closure* on_complete = op->on_complete;
924
+ // TODO(roth): This is a hack needed because we use data inside of the
925
+ // closure itself to do the barrier calculation (i.e., to ensure that
926
+ // we don't schedule the closure until all ops in the batch have been
927
+ // completed). This can go away once we move to a new C++ closure API
928
+ // that provides the ability to create a barrier closure.
895
929
  if (on_complete == nullptr) {
896
- on_complete = &do_nothing_closure;
930
+ on_complete = GRPC_CLOSURE_INIT(&op->handler_private.closure, do_nothing,
931
+ nullptr, grpc_schedule_on_exec_ctx);
897
932
  }
898
933
 
899
934
  if (op->cancel_stream) {
@@ -1026,6 +1061,15 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
1026
1061
  GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready,
1027
1062
  GRPC_ERROR_REF(error));
1028
1063
  }
1064
+ if (op->recv_trailing_metadata) {
1065
+ INPROC_LOG(
1066
+ GPR_INFO,
1067
+ "perform_stream_op error %p scheduling trailing-metadata-ready %p",
1068
+ s, error);
1069
+ GRPC_CLOSURE_SCHED(
1070
+ op->payload->recv_trailing_metadata.recv_trailing_metadata_ready,
1071
+ GRPC_ERROR_REF(error));
1072
+ }
1029
1073
  }
1030
1074
  INPROC_LOG(GPR_INFO, "perform_stream_op %p scheduling on_complete %p", s,
1031
1075
  error);
@@ -1129,12 +1173,8 @@ static grpc_endpoint* get_endpoint(grpc_transport* t) { return nullptr; }
1129
1173
  /*******************************************************************************
1130
1174
  * GLOBAL INIT AND DESTROY
1131
1175
  */
1132
- static void do_nothing(void* arg, grpc_error* error) {}
1133
-
1134
1176
  void grpc_inproc_transport_init(void) {
1135
1177
  grpc_core::ExecCtx exec_ctx;
1136
- GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, nullptr,
1137
- grpc_schedule_on_exec_ctx);
1138
1178
  g_empty_slice = grpc_slice_from_static_buffer(nullptr, 0);
1139
1179
 
1140
1180
  grpc_slice key_tmp = grpc_slice_from_static_string(":path");