wearefair-grpc 1.3.1.pre.c → 1.4.0.fair

Sign up to get free protection for your applications and to get access to all the features.
Files changed (219) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +418 -126
  3. data/include/grpc/grpc.h +15 -69
  4. data/include/grpc/grpc_security.h +1 -1
  5. data/include/grpc/impl/codegen/compression_types.h +3 -4
  6. data/include/grpc/impl/codegen/gpr_types.h +0 -1
  7. data/include/grpc/impl/codegen/grpc_types.h +69 -3
  8. data/include/grpc/impl/codegen/port_platform.h +6 -0
  9. data/include/grpc/impl/codegen/slice.h +2 -1
  10. data/include/grpc/load_reporting.h +6 -6
  11. data/include/grpc/slice.h +25 -3
  12. data/include/grpc/slice_buffer.h +4 -0
  13. data/src/core/ext/census/context.c +1 -1
  14. data/src/core/ext/census/resource.c +3 -1
  15. data/src/core/ext/filters/client_channel/channel_connectivity.c +1 -1
  16. data/src/core/ext/filters/client_channel/client_channel.c +158 -100
  17. data/src/core/ext/filters/client_channel/client_channel_plugin.c +3 -2
  18. data/src/core/ext/filters/client_channel/lb_policy.c +2 -1
  19. data/src/core/ext/filters/client_channel/lb_policy.h +5 -6
  20. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +153 -0
  21. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +42 -0
  22. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +344 -88
  23. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +133 -0
  24. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +65 -0
  25. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +47 -5
  26. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +6 -0
  27. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +19 -8
  28. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +63 -34
  29. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +2 -1
  30. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +13 -12
  31. data/src/core/ext/filters/client_channel/lb_policy_factory.c +28 -5
  32. data/src/core/ext/filters/client_channel/lb_policy_factory.h +18 -4
  33. data/src/core/ext/filters/client_channel/parse_address.c +37 -7
  34. data/src/core/ext/filters/client_channel/parse_address.h +11 -8
  35. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c +3 -3
  36. data/src/core/ext/filters/client_channel/subchannel.c +19 -16
  37. data/src/core/ext/filters/client_channel/subchannel.h +1 -0
  38. data/src/core/ext/filters/client_channel/uri_parser.c +36 -22
  39. data/src/core/ext/filters/client_channel/uri_parser.h +1 -1
  40. data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.c +42 -17
  41. data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.h +8 -9
  42. data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.c +19 -11
  43. data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.h +3 -6
  44. data/src/core/ext/filters/http/http_filters_plugin.c +104 -0
  45. data/src/core/{lib/channel/compress_filter.c → ext/filters/http/message_compress/message_compress_filter.c} +124 -23
  46. data/src/core/{lib/channel/compress_filter.h → ext/filters/http/message_compress/message_compress_filter.h} +5 -6
  47. data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.c +4 -6
  48. data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.h +3 -3
  49. data/src/core/ext/filters/load_reporting/load_reporting.c +2 -25
  50. data/src/core/ext/filters/load_reporting/load_reporting_filter.c +26 -1
  51. data/src/core/ext/filters/max_age/max_age_filter.c +14 -14
  52. data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.c +91 -47
  53. data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.h +3 -3
  54. data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
  55. data/src/core/ext/transport/chttp2/server/chttp2_server.c +2 -2
  56. data/src/core/ext/transport/chttp2/transport/bin_decoder.c +2 -2
  57. data/src/core/ext/transport/chttp2/transport/bin_encoder.c +3 -3
  58. data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +296 -172
  59. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -2
  60. data/src/core/ext/transport/chttp2/transport/frame_data.c +203 -164
  61. data/src/core/ext/transport/chttp2/transport/frame_data.h +8 -14
  62. data/src/core/ext/transport/chttp2/transport/frame_goaway.c +1 -1
  63. data/src/core/ext/transport/chttp2/transport/frame_ping.c +1 -1
  64. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
  65. data/src/core/ext/transport/chttp2/transport/frame_settings.c +5 -5
  66. data/src/core/ext/transport/chttp2/transport/frame_window_update.c +1 -1
  67. data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +4 -4
  68. data/src/core/ext/transport/chttp2/transport/hpack_parser.c +2 -4
  69. data/src/core/ext/transport/chttp2/transport/hpack_table.c +4 -3
  70. data/src/core/ext/transport/chttp2/transport/internal.h +50 -33
  71. data/src/core/ext/transport/chttp2/transport/parsing.c +10 -11
  72. data/src/core/ext/transport/chttp2/transport/writing.c +32 -13
  73. data/src/core/lib/channel/channel_args.c +28 -9
  74. data/src/core/lib/channel/channel_args.h +5 -1
  75. data/src/core/lib/channel/channel_stack.c +1 -1
  76. data/src/core/lib/channel/channel_stack.h +2 -2
  77. data/src/core/lib/channel/channel_stack_builder.c +13 -1
  78. data/src/core/lib/channel/channel_stack_builder.h +5 -1
  79. data/src/core/lib/channel/connected_channel.c +3 -1
  80. data/src/core/lib/channel/context.h +2 -2
  81. data/src/core/lib/compression/message_compress.c +2 -2
  82. data/src/core/lib/debug/trace.c +13 -6
  83. data/src/core/lib/debug/trace.h +27 -1
  84. data/src/core/lib/http/httpcli.c +1 -1
  85. data/src/core/lib/http/httpcli_security_connector.c +6 -10
  86. data/src/core/lib/http/parser.c +2 -2
  87. data/src/core/lib/http/parser.h +2 -1
  88. data/src/core/lib/iomgr/combiner.c +6 -6
  89. data/src/core/lib/iomgr/combiner.h +2 -1
  90. data/src/core/lib/iomgr/error.c +12 -5
  91. data/src/core/lib/iomgr/error.h +13 -13
  92. data/src/core/lib/iomgr/ev_epoll1_linux.c +984 -0
  93. data/src/core/lib/iomgr/ev_epoll1_linux.h +44 -0
  94. data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +2146 -0
  95. data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h +43 -0
  96. data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +1337 -0
  97. data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h +43 -0
  98. data/src/core/lib/iomgr/ev_epollex_linux.c +1511 -0
  99. data/src/core/lib/iomgr/ev_epollex_linux.h +43 -0
  100. data/src/core/lib/iomgr/{ev_epoll_linux.c → ev_epollsig_linux.c} +24 -31
  101. data/src/core/lib/iomgr/{ev_epoll_linux.h → ev_epollsig_linux.h} +4 -4
  102. data/src/core/lib/iomgr/ev_poll_posix.c +12 -27
  103. data/src/core/lib/iomgr/ev_poll_posix.h +2 -2
  104. data/src/core/lib/iomgr/ev_posix.c +22 -8
  105. data/src/core/lib/iomgr/ev_posix.h +4 -3
  106. data/src/core/lib/iomgr/exec_ctx.c +5 -0
  107. data/src/core/lib/iomgr/exec_ctx.h +2 -0
  108. data/src/core/lib/iomgr/iomgr.c +4 -0
  109. data/src/core/lib/iomgr/iomgr.h +3 -0
  110. data/src/core/lib/iomgr/is_epollexclusive_available.c +116 -0
  111. data/src/core/lib/iomgr/is_epollexclusive_available.h +41 -0
  112. data/src/core/lib/iomgr/lockfree_event.c +16 -0
  113. data/src/core/lib/iomgr/pollset.h +2 -5
  114. data/src/core/lib/iomgr/pollset_uv.c +1 -1
  115. data/src/core/lib/iomgr/pollset_windows.c +3 -3
  116. data/src/core/lib/iomgr/resource_quota.c +9 -8
  117. data/src/core/lib/iomgr/resource_quota.h +2 -1
  118. data/src/core/lib/iomgr/sockaddr_utils.h +1 -1
  119. data/src/core/lib/iomgr/socket_mutator.h +2 -0
  120. data/src/core/lib/iomgr/sys_epoll_wrapper.h +43 -0
  121. data/src/core/lib/iomgr/tcp_client_posix.c +6 -6
  122. data/src/core/lib/iomgr/tcp_client_uv.c +3 -3
  123. data/src/core/lib/iomgr/tcp_posix.c +7 -7
  124. data/src/core/lib/iomgr/tcp_posix.h +2 -1
  125. data/src/core/lib/iomgr/tcp_server_posix.c +1 -1
  126. data/src/core/lib/iomgr/tcp_uv.c +6 -6
  127. data/src/core/lib/iomgr/tcp_uv.h +2 -1
  128. data/src/core/lib/iomgr/tcp_windows.c +1 -1
  129. data/src/core/lib/iomgr/timer_generic.c +24 -25
  130. data/src/core/lib/iomgr/timer_manager.c +276 -0
  131. data/src/core/lib/iomgr/timer_manager.h +52 -0
  132. data/src/core/lib/iomgr/timer_uv.c +6 -0
  133. data/src/core/lib/iomgr/udp_server.c +42 -9
  134. data/src/core/lib/iomgr/udp_server.h +3 -1
  135. data/src/core/lib/security/credentials/credentials.c +0 -1
  136. data/src/core/lib/security/credentials/fake/fake_credentials.c +23 -0
  137. data/src/core/lib/security/credentials/fake/fake_credentials.h +12 -9
  138. data/src/core/lib/security/credentials/google_default/google_default_credentials.c +1 -1
  139. data/src/core/lib/security/credentials/jwt/jwt_credentials.c +1 -1
  140. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +1 -1
  141. data/src/core/lib/security/credentials/ssl/ssl_credentials.c +24 -53
  142. data/src/core/lib/security/transport/client_auth_filter.c +9 -3
  143. data/src/core/lib/security/transport/secure_endpoint.c +7 -7
  144. data/src/core/lib/security/transport/secure_endpoint.h +1 -1
  145. data/src/core/lib/security/transport/security_connector.c +32 -51
  146. data/src/core/lib/security/transport/security_connector.h +10 -14
  147. data/src/core/lib/slice/b64.c +1 -1
  148. data/src/core/lib/slice/percent_encoding.c +3 -3
  149. data/src/core/lib/slice/slice.c +66 -33
  150. data/src/core/lib/slice/slice_buffer.c +25 -6
  151. data/src/core/lib/slice/slice_hash_table.c +33 -35
  152. data/src/core/lib/slice/slice_hash_table.h +7 -12
  153. data/src/core/lib/support/atomic.h +45 -0
  154. data/src/core/lib/support/atomic_with_atm.h +70 -0
  155. data/src/core/lib/support/atomic_with_std.h +48 -0
  156. data/src/core/lib/support/avl.c +14 -14
  157. data/src/core/lib/support/memory.h +74 -0
  158. data/src/core/lib/support/mpscq.c +12 -1
  159. data/src/core/lib/support/mpscq.h +4 -0
  160. data/src/core/lib/support/stack_lockfree.c +3 -36
  161. data/src/core/lib/support/time_posix.c +8 -0
  162. data/src/core/lib/support/tmpfile_posix.c +10 -10
  163. data/src/core/lib/surface/alarm.c +3 -1
  164. data/src/core/lib/surface/api_trace.c +2 -1
  165. data/src/core/lib/surface/api_trace.h +2 -2
  166. data/src/core/lib/surface/byte_buffer_reader.c +1 -1
  167. data/src/core/lib/surface/call.c +65 -22
  168. data/src/core/lib/surface/call.h +4 -2
  169. data/src/core/lib/surface/channel_init.c +2 -19
  170. data/src/core/lib/surface/channel_stack_type.c +18 -0
  171. data/src/core/lib/surface/channel_stack_type.h +2 -0
  172. data/src/core/lib/surface/completion_queue.c +249 -83
  173. data/src/core/lib/surface/completion_queue.h +18 -13
  174. data/src/core/lib/surface/completion_queue_factory.c +24 -9
  175. data/src/core/lib/surface/init.c +1 -52
  176. data/src/core/lib/surface/{lame_client.c → lame_client.cc} +37 -26
  177. data/src/core/lib/surface/server.c +50 -27
  178. data/src/core/lib/surface/server.h +2 -1
  179. data/src/core/lib/surface/version.c +2 -2
  180. data/src/core/lib/transport/bdp_estimator.c +20 -9
  181. data/src/core/lib/transport/bdp_estimator.h +5 -1
  182. data/src/core/lib/transport/byte_stream.c +23 -9
  183. data/src/core/lib/transport/byte_stream.h +15 -6
  184. data/src/core/lib/transport/connectivity_state.c +6 -6
  185. data/src/core/lib/transport/connectivity_state.h +2 -1
  186. data/src/core/lib/transport/service_config.c +6 -13
  187. data/src/core/lib/transport/service_config.h +2 -2
  188. data/src/core/lib/transport/static_metadata.c +403 -389
  189. data/src/core/lib/transport/static_metadata.h +127 -114
  190. data/src/core/plugin_registry/grpc_plugin_registry.c +12 -0
  191. data/src/core/tsi/fake_transport_security.c +5 -4
  192. data/src/core/tsi/ssl_transport_security.c +71 -82
  193. data/src/core/tsi/ssl_transport_security.h +39 -61
  194. data/src/core/tsi/transport_security.c +83 -2
  195. data/src/core/tsi/transport_security.h +27 -2
  196. data/src/core/tsi/transport_security_adapter.c +236 -0
  197. data/src/core/tsi/transport_security_adapter.h +62 -0
  198. data/src/core/tsi/transport_security_interface.h +179 -66
  199. data/src/ruby/ext/grpc/extconf.rb +2 -1
  200. data/src/ruby/ext/grpc/rb_byte_buffer.c +8 -6
  201. data/src/ruby/ext/grpc/rb_call.c +56 -48
  202. data/src/ruby/ext/grpc/rb_call.h +3 -4
  203. data/src/ruby/ext/grpc/rb_call_credentials.c +23 -22
  204. data/src/ruby/ext/grpc/rb_channel.c +45 -29
  205. data/src/ruby/ext/grpc/rb_channel_args.c +11 -9
  206. data/src/ruby/ext/grpc/rb_channel_credentials.c +16 -12
  207. data/src/ruby/ext/grpc/rb_completion_queue.c +7 -9
  208. data/src/ruby/ext/grpc/rb_compression_options.c +7 -6
  209. data/src/ruby/ext/grpc/rb_event_thread.c +10 -12
  210. data/src/ruby/ext/grpc/rb_event_thread.h +1 -2
  211. data/src/ruby/ext/grpc/rb_grpc.c +11 -15
  212. data/src/ruby/ext/grpc/rb_grpc.h +2 -2
  213. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +14 -6
  214. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +22 -10
  215. data/src/ruby/ext/grpc/rb_server.c +26 -28
  216. data/src/ruby/lib/grpc/version.rb +1 -1
  217. metadata +40 -18
  218. data/src/ruby/lib/grpc/grpc_c.bundle +0 -0
  219. data/src/ruby/lib/grpc/grpc_c.so +0 -0
@@ -49,9 +49,9 @@
49
49
  #include "src/core/ext/filters/client_channel/resolver_registry.h"
50
50
  #include "src/core/ext/filters/client_channel/retry_throttle.h"
51
51
  #include "src/core/ext/filters/client_channel/subchannel.h"
52
+ #include "src/core/ext/filters/deadline/deadline_filter.h"
52
53
  #include "src/core/lib/channel/channel_args.h"
53
54
  #include "src/core/lib/channel/connected_channel.h"
54
- #include "src/core/lib/channel/deadline_filter.h"
55
55
  #include "src/core/lib/iomgr/combiner.h"
56
56
  #include "src/core/lib/iomgr/iomgr.h"
57
57
  #include "src/core/lib/iomgr/polling_entity.h"
@@ -96,17 +96,10 @@ static void method_parameters_unref(method_parameters *method_params) {
96
96
  }
97
97
  }
98
98
 
99
- static void *method_parameters_copy(void *value) {
100
- return method_parameters_ref(value);
101
- }
102
-
103
99
  static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
104
100
  method_parameters_unref(value);
105
101
  }
106
102
 
107
- static const grpc_slice_hash_table_vtable method_parameters_vtable = {
108
- method_parameters_free, method_parameters_copy};
109
-
110
103
  static bool parse_wait_for_ready(grpc_json *field,
111
104
  wait_for_ready_value *wait_for_ready) {
112
105
  if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
@@ -183,6 +176,8 @@ typedef struct client_channel_channel_data {
183
176
  grpc_resolver *resolver;
184
177
  /** have we started resolving this channel */
185
178
  bool started_resolving;
179
+ /** is deadline checking enabled? */
180
+ bool deadline_checking_enabled;
186
181
  /** client channel factory */
187
182
  grpc_client_channel_factory *client_channel_factory;
188
183
 
@@ -236,14 +231,23 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
236
231
  grpc_connectivity_state state,
237
232
  grpc_error *error,
238
233
  const char *reason) {
239
- if ((state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
240
- state == GRPC_CHANNEL_SHUTDOWN) &&
241
- chand->lb_policy != NULL) {
242
- /* cancel picks with wait_for_ready=false */
243
- grpc_lb_policy_cancel_picks_locked(
244
- exec_ctx, chand->lb_policy,
245
- /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
246
- /* check= */ 0, GRPC_ERROR_REF(error));
234
+ /* TODO: Improve failure handling:
235
+ * - Make it possible for policies to return GRPC_CHANNEL_TRANSIENT_FAILURE.
236
+ * - Hand over pending picks from old policies during the switch that happens
237
+ * when resolver provides an update. */
238
+ if (chand->lb_policy != NULL) {
239
+ if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
240
+ /* cancel picks with wait_for_ready=false */
241
+ grpc_lb_policy_cancel_picks_locked(
242
+ exec_ctx, chand->lb_policy,
243
+ /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
244
+ /* check= */ 0, GRPC_ERROR_REF(error));
245
+ } else if (state == GRPC_CHANNEL_SHUTDOWN) {
246
+ /* cancel all picks */
247
+ grpc_lb_policy_cancel_picks_locked(exec_ctx, chand->lb_policy,
248
+ /* mask= */ 0, /* check= */ 0,
249
+ GRPC_ERROR_REF(error));
250
+ }
247
251
  }
248
252
  grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error,
249
253
  reason);
@@ -346,6 +350,33 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
346
350
  }
347
351
  }
348
352
 
353
+ // Wrap a closure associated with \a lb_policy. The associated callback (\a
354
+ // wrapped_on_pick_closure_cb) is responsible for unref'ing \a lb_policy after
355
+ // scheduling \a wrapped_closure.
356
+ typedef struct wrapped_on_pick_closure_arg {
357
+ /* the closure instance using this struct as argument */
358
+ grpc_closure wrapper_closure;
359
+
360
+ /* the original closure. Usually a on_complete/notify cb for pick() and ping()
361
+ * calls against the internal RR instance, respectively. */
362
+ grpc_closure *wrapped_closure;
363
+
364
+ /* The policy instance related to the closure */
365
+ grpc_lb_policy *lb_policy;
366
+ } wrapped_on_pick_closure_arg;
367
+
368
+ // Invoke \a arg->wrapped_closure, unref \a arg->lb_policy and free \a arg.
369
+ static void wrapped_on_pick_closure_cb(grpc_exec_ctx *exec_ctx, void *arg,
370
+ grpc_error *error) {
371
+ wrapped_on_pick_closure_arg *wc_arg = arg;
372
+ GPR_ASSERT(wc_arg != NULL);
373
+ GPR_ASSERT(wc_arg->wrapped_closure != NULL);
374
+ GPR_ASSERT(wc_arg->lb_policy != NULL);
375
+ grpc_closure_run(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
376
+ GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->lb_policy, "pick_subchannel_wrapping");
377
+ gpr_free(wc_arg);
378
+ }
379
+
349
380
  static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
350
381
  void *arg, grpc_error *error) {
351
382
  channel_data *chand = arg;
@@ -369,26 +400,24 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
369
400
  GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
370
401
  lb_policy_name = channel_arg->value.string;
371
402
  }
372
- // Special case: If all of the addresses are balancer addresses,
373
- // assume that we should use the grpclb policy, regardless of what the
374
- // resolver actually specified.
403
+ // Special case: If at least one balancer address is present, we use
404
+ // the grpclb policy, regardless of what the resolver actually specified.
375
405
  channel_arg =
376
406
  grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
377
407
  if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
378
408
  grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
379
- bool found_backend_address = false;
409
+ bool found_balancer_address = false;
380
410
  for (size_t i = 0; i < addresses->num_addresses; ++i) {
381
- if (!addresses->addresses[i].is_balancer) {
382
- found_backend_address = true;
411
+ if (addresses->addresses[i].is_balancer) {
412
+ found_balancer_address = true;
383
413
  break;
384
414
  }
385
415
  }
386
- if (!found_backend_address) {
416
+ if (found_balancer_address) {
387
417
  if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
388
418
  gpr_log(GPR_INFO,
389
- "resolver requested LB policy %s but provided only balancer "
390
- "addresses, no backend addresses -- forcing use of grpclb LB "
391
- "policy",
419
+ "resolver requested LB policy %s but provided at least one "
420
+ "balancer address -- forcing use of grpclb LB policy",
392
421
  lb_policy_name);
393
422
  }
394
423
  lb_policy_name = "grpclb";
@@ -434,7 +463,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
434
463
  grpc_uri_destroy(uri);
435
464
  method_params_table = grpc_service_config_create_method_config_table(
436
465
  exec_ctx, service_config, method_parameters_create_from_json,
437
- &method_parameters_vtable);
466
+ method_parameters_free);
438
467
  grpc_service_config_destroy(service_config);
439
468
  }
440
469
  }
@@ -676,6 +705,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
676
705
  if (chand->resolver == NULL) {
677
706
  return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
678
707
  }
708
+ chand->deadline_checking_enabled =
709
+ grpc_deadline_checking_enabled(args->channel_args);
679
710
  return GRPC_ERROR_NONE;
680
711
  }
681
712
 
@@ -729,12 +760,6 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
729
760
 
730
761
  #define CANCELLED_CALL ((grpc_subchannel_call *)1)
731
762
 
732
- typedef enum {
733
- /* zero so that it can be default-initialized */
734
- GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING = 0,
735
- GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
736
- } subchannel_creation_phase;
737
-
738
763
  /** Call data. Holds a pointer to grpc_subchannel_call and the
739
764
  associated machinery to create such a pointer.
740
765
  Handles queueing of stream ops until a call object is ready, waiting
@@ -762,8 +787,9 @@ typedef struct client_channel_call_data {
762
787
  gpr_atm subchannel_call;
763
788
  gpr_arena *arena;
764
789
 
765
- subchannel_creation_phase creation_phase;
790
+ bool pick_pending;
766
791
  grpc_connected_subchannel *connected_subchannel;
792
+ grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
767
793
  grpc_polling_entity *pollent;
768
794
 
769
795
  grpc_transport_stream_op_batch **waiting_ops;
@@ -864,12 +890,14 @@ static void apply_final_configuration_locked(grpc_exec_ctx *exec_ctx,
864
890
  /* apply service-config level configuration to the call (now that we're
865
891
  * certain it exists) */
866
892
  call_data *calld = elem->call_data;
893
+ channel_data *chand = elem->channel_data;
867
894
  gpr_timespec per_method_deadline;
868
895
  if (set_call_method_params_from_service_config_locked(exec_ctx, elem,
869
896
  &per_method_deadline)) {
870
897
  // If the deadline from the service config is shorter than the one
871
898
  // from the client API, reset the deadline timer.
872
- if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
899
+ if (chand->deadline_checking_enabled &&
900
+ gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
873
901
  calld->deadline = per_method_deadline;
874
902
  grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
875
903
  }
@@ -881,11 +909,10 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
881
909
  grpc_call_element *elem = arg;
882
910
  call_data *calld = elem->call_data;
883
911
  channel_data *chand = elem->channel_data;
884
- GPR_ASSERT(calld->creation_phase ==
885
- GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
912
+ GPR_ASSERT(calld->pick_pending);
913
+ calld->pick_pending = false;
886
914
  grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
887
915
  chand->interested_parties);
888
- calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
889
916
  if (calld->connected_subchannel == NULL) {
890
917
  gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
891
918
  fail_locked(exec_ctx, calld,
@@ -911,7 +938,8 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg,
911
938
  .path = calld->path,
912
939
  .start_time = calld->call_start_time,
913
940
  .deadline = calld->deadline,
914
- .arena = calld->arena};
941
+ .arena = calld->arena,
942
+ .context = calld->subchannel_call_context};
915
943
  grpc_error *new_error = grpc_connected_subchannel_create_call(
916
944
  exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
917
945
  gpr_atm_rel_store(&calld->subchannel_call,
@@ -940,6 +968,7 @@ typedef struct {
940
968
  grpc_metadata_batch *initial_metadata;
941
969
  uint32_t initial_metadata_flags;
942
970
  grpc_connected_subchannel **connected_subchannel;
971
+ grpc_call_context_element *subchannel_call_context;
943
972
  grpc_closure *on_ready;
944
973
  grpc_call_element *elem;
945
974
  grpc_closure closure;
@@ -951,8 +980,8 @@ typedef struct {
951
980
  static bool pick_subchannel_locked(
952
981
  grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
953
982
  grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
954
- grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready,
955
- grpc_error *error);
983
+ grpc_connected_subchannel **connected_subchannel,
984
+ grpc_call_context_element *subchannel_call_context, grpc_closure *on_ready);
956
985
 
957
986
  static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg,
958
987
  grpc_error *error) {
@@ -964,49 +993,49 @@ static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg,
964
993
  } else {
965
994
  if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata,
966
995
  cpa->initial_metadata_flags,
967
- cpa->connected_subchannel, cpa->on_ready,
968
- GRPC_ERROR_NONE)) {
996
+ cpa->connected_subchannel,
997
+ cpa->subchannel_call_context, cpa->on_ready)) {
969
998
  grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
970
999
  }
971
1000
  }
972
1001
  gpr_free(cpa);
973
1002
  }
974
1003
 
1004
+ static void cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
1005
+ grpc_error *error) {
1006
+ channel_data *chand = elem->channel_data;
1007
+ call_data *calld = elem->call_data;
1008
+ if (chand->lb_policy != NULL) {
1009
+ grpc_lb_policy_cancel_pick_locked(exec_ctx, chand->lb_policy,
1010
+ &calld->connected_subchannel,
1011
+ GRPC_ERROR_REF(error));
1012
+ }
1013
+ for (grpc_closure *closure = chand->waiting_for_config_closures.head;
1014
+ closure != NULL; closure = closure->next_data.next) {
1015
+ continue_picking_args *cpa = closure->cb_arg;
1016
+ if (cpa->connected_subchannel == &calld->connected_subchannel) {
1017
+ cpa->connected_subchannel = NULL;
1018
+ grpc_closure_sched(exec_ctx, cpa->on_ready,
1019
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1020
+ "Pick cancelled", &error, 1));
1021
+ }
1022
+ }
1023
+ GRPC_ERROR_UNREF(error);
1024
+ }
1025
+
975
1026
  static bool pick_subchannel_locked(
976
1027
  grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
977
1028
  grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags,
978
- grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready,
979
- grpc_error *error) {
1029
+ grpc_connected_subchannel **connected_subchannel,
1030
+ grpc_call_context_element *subchannel_call_context,
1031
+ grpc_closure *on_ready) {
980
1032
  GPR_TIMER_BEGIN("pick_subchannel", 0);
981
1033
 
982
1034
  channel_data *chand = elem->channel_data;
983
1035
  call_data *calld = elem->call_data;
984
- continue_picking_args *cpa;
985
- grpc_closure *closure;
986
1036
 
987
1037
  GPR_ASSERT(connected_subchannel);
988
1038
 
989
- if (initial_metadata == NULL) {
990
- if (chand->lb_policy != NULL) {
991
- grpc_lb_policy_cancel_pick_locked(exec_ctx, chand->lb_policy,
992
- connected_subchannel,
993
- GRPC_ERROR_REF(error));
994
- }
995
- for (closure = chand->waiting_for_config_closures.head; closure != NULL;
996
- closure = closure->next_data.next) {
997
- cpa = closure->cb_arg;
998
- if (cpa->connected_subchannel == connected_subchannel) {
999
- cpa->connected_subchannel = NULL;
1000
- grpc_closure_sched(exec_ctx, cpa->on_ready,
1001
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1002
- "Pick cancelled", &error, 1));
1003
- }
1004
- }
1005
- GPR_TIMER_END("pick_subchannel", 0);
1006
- GRPC_ERROR_UNREF(error);
1007
- return true;
1008
- }
1009
- GPR_ASSERT(error == GRPC_ERROR_NONE);
1010
1039
  if (chand->lb_policy != NULL) {
1011
1040
  apply_final_configuration_locked(exec_ctx, elem);
1012
1041
  grpc_lb_policy *lb_policy = chand->lb_policy;
@@ -1029,13 +1058,30 @@ static bool pick_subchannel_locked(
1029
1058
  }
1030
1059
  }
1031
1060
  const grpc_lb_policy_pick_args inputs = {
1032
- initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
1033
- gpr_inf_future(GPR_CLOCK_MONOTONIC)};
1034
- const bool result = grpc_lb_policy_pick_locked(
1035
- exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
1061
+ initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem};
1062
+
1063
+ // Wrap the user-provided callback in order to hold a strong reference to
1064
+ // the LB policy for the duration of the pick.
1065
+ wrapped_on_pick_closure_arg *w_on_pick_arg =
1066
+ gpr_zalloc(sizeof(*w_on_pick_arg));
1067
+ grpc_closure_init(&w_on_pick_arg->wrapper_closure,
1068
+ wrapped_on_pick_closure_cb, w_on_pick_arg,
1069
+ grpc_schedule_on_exec_ctx);
1070
+ w_on_pick_arg->wrapped_closure = on_ready;
1071
+ GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel_wrapping");
1072
+ w_on_pick_arg->lb_policy = lb_policy;
1073
+ const bool pick_done = grpc_lb_policy_pick_locked(
1074
+ exec_ctx, lb_policy, &inputs, connected_subchannel,
1075
+ subchannel_call_context, NULL, &w_on_pick_arg->wrapper_closure);
1076
+ if (pick_done) {
1077
+ /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
1078
+ GRPC_LB_POLICY_UNREF(exec_ctx, w_on_pick_arg->lb_policy,
1079
+ "pick_subchannel_wrapping");
1080
+ gpr_free(w_on_pick_arg);
1081
+ }
1036
1082
  GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
1037
1083
  GPR_TIMER_END("pick_subchannel", 0);
1038
- return result;
1084
+ return pick_done;
1039
1085
  }
1040
1086
  if (chand->resolver != NULL && !chand->started_resolving) {
1041
1087
  chand->started_resolving = true;
@@ -1045,10 +1091,11 @@ static bool pick_subchannel_locked(
1045
1091
  &chand->on_resolver_result_changed);
1046
1092
  }
1047
1093
  if (chand->resolver != NULL) {
1048
- cpa = gpr_malloc(sizeof(*cpa));
1094
+ continue_picking_args *cpa = gpr_malloc(sizeof(*cpa));
1049
1095
  cpa->initial_metadata = initial_metadata;
1050
1096
  cpa->initial_metadata_flags = initial_metadata_flags;
1051
1097
  cpa->connected_subchannel = connected_subchannel;
1098
+ cpa->subchannel_call_context = subchannel_call_context;
1052
1099
  cpa->on_ready = on_ready;
1053
1100
  cpa->elem = elem;
1054
1101
  grpc_closure_init(&cpa->closure, continue_picking_locked, cpa,
@@ -1100,16 +1147,13 @@ static void start_transport_stream_op_batch_locked_inner(
1100
1147
  error to the caller when the first op does get passed down. */
1101
1148
  calld->cancel_error =
1102
1149
  GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error);
1103
- switch (calld->creation_phase) {
1104
- case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
1105
- fail_locked(exec_ctx, calld,
1106
- GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
1107
- break;
1108
- case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
1109
- pick_subchannel_locked(
1110
- exec_ctx, elem, NULL, 0, &calld->connected_subchannel, NULL,
1111
- GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
1112
- break;
1150
+ if (calld->pick_pending) {
1151
+ cancel_pick_locked(
1152
+ exec_ctx, elem,
1153
+ GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
1154
+ } else {
1155
+ fail_locked(exec_ctx, calld,
1156
+ GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
1113
1157
  }
1114
1158
  grpc_transport_stream_op_batch_finish_with_failure(
1115
1159
  exec_ctx, op,
@@ -1119,9 +1163,9 @@ static void start_transport_stream_op_batch_locked_inner(
1119
1163
  }
1120
1164
  }
1121
1165
  /* if we don't have a subchannel, try to get one */
1122
- if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
1123
- calld->connected_subchannel == NULL && op->send_initial_metadata) {
1124
- calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
1166
+ if (!calld->pick_pending && calld->connected_subchannel == NULL &&
1167
+ op->send_initial_metadata) {
1168
+ calld->pick_pending = true;
1125
1169
  grpc_closure_init(&calld->next_step, subchannel_ready_locked, elem,
1126
1170
  grpc_combiner_scheduler(chand->combiner, true));
1127
1171
  GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
@@ -1132,8 +1176,9 @@ static void start_transport_stream_op_batch_locked_inner(
1132
1176
  exec_ctx, elem,
1133
1177
  op->payload->send_initial_metadata.send_initial_metadata,
1134
1178
  op->payload->send_initial_metadata.send_initial_metadata_flags,
1135
- &calld->connected_subchannel, &calld->next_step, GRPC_ERROR_NONE)) {
1136
- calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
1179
+ &calld->connected_subchannel, calld->subchannel_call_context,
1180
+ &calld->next_step)) {
1181
+ calld->pick_pending = false;
1137
1182
  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
1138
1183
  } else {
1139
1184
  grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
@@ -1141,15 +1186,15 @@ static void start_transport_stream_op_batch_locked_inner(
1141
1186
  }
1142
1187
  }
1143
1188
  /* if we've got a subchannel, then let's ask it to create a call */
1144
- if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
1145
- calld->connected_subchannel != NULL) {
1189
+ if (!calld->pick_pending && calld->connected_subchannel != NULL) {
1146
1190
  grpc_subchannel_call *subchannel_call = NULL;
1147
1191
  const grpc_connected_subchannel_call_args call_args = {
1148
1192
  .pollent = calld->pollent,
1149
1193
  .path = calld->path,
1150
1194
  .start_time = calld->call_start_time,
1151
1195
  .deadline = calld->deadline,
1152
- .arena = calld->arena};
1196
+ .arena = calld->arena,
1197
+ .context = calld->subchannel_call_context};
1153
1198
  grpc_error *error = grpc_connected_subchannel_create_call(
1154
1199
  exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call);
1155
1200
  gpr_atm_rel_store(&calld->subchannel_call,
@@ -1227,8 +1272,10 @@ static void cc_start_transport_stream_op_batch(
1227
1272
  call_data *calld = elem->call_data;
1228
1273
  channel_data *chand = elem->channel_data;
1229
1274
  GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
1230
- grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
1231
- op);
1275
+ if (chand->deadline_checking_enabled) {
1276
+ grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
1277
+ op);
1278
+ }
1232
1279
  /* try to (atomically) get the call */
1233
1280
  grpc_subchannel_call *call = GET_CALL(calld);
1234
1281
  GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
@@ -1262,14 +1309,16 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
1262
1309
  grpc_call_element *elem,
1263
1310
  const grpc_call_element_args *args) {
1264
1311
  call_data *calld = elem->call_data;
1312
+ channel_data *chand = elem->channel_data;
1265
1313
  // Initialize data members.
1266
- grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
1267
1314
  calld->path = grpc_slice_ref_internal(args->path);
1268
1315
  calld->call_start_time = args->start_time;
1269
1316
  calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
1270
1317
  calld->owning_call = args->call_stack;
1271
1318
  calld->arena = args->arena;
1272
- grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
1319
+ if (chand->deadline_checking_enabled) {
1320
+ grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline);
1321
+ }
1273
1322
  return GRPC_ERROR_NONE;
1274
1323
  }
1275
1324
 
@@ -1279,7 +1328,10 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
1279
1328
  const grpc_call_final_info *final_info,
1280
1329
  grpc_closure *then_schedule_closure) {
1281
1330
  call_data *calld = elem->call_data;
1282
- grpc_deadline_state_destroy(exec_ctx, elem);
1331
+ channel_data *chand = elem->channel_data;
1332
+ if (chand->deadline_checking_enabled) {
1333
+ grpc_deadline_state_destroy(exec_ctx, elem);
1334
+ }
1283
1335
  grpc_slice_unref_internal(exec_ctx, calld->path);
1284
1336
  if (calld->method_params != NULL) {
1285
1337
  method_parameters_unref(calld->method_params);
@@ -1291,12 +1343,18 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
1291
1343
  then_schedule_closure = NULL;
1292
1344
  GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call");
1293
1345
  }
1294
- GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING);
1346
+ GPR_ASSERT(!calld->pick_pending);
1295
1347
  GPR_ASSERT(calld->waiting_ops_count == 0);
1296
1348
  if (calld->connected_subchannel != NULL) {
1297
1349
  GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
1298
1350
  "picked");
1299
1351
  }
1352
+ for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
1353
+ if (calld->subchannel_call_context[i].value != NULL) {
1354
+ calld->subchannel_call_context[i].destroy(
1355
+ calld->subchannel_call_context[i].value);
1356
+ }
1357
+ }
1300
1358
  gpr_free(calld->waiting_ops);
1301
1359
  grpc_closure_sched(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
1302
1360
  }
@@ -1392,12 +1450,12 @@ static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
1392
1450
 
1393
1451
  void grpc_client_channel_watch_connectivity_state(
1394
1452
  grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
1395
- grpc_connectivity_state *state, grpc_closure *on_complete) {
1453
+ grpc_connectivity_state *state, grpc_closure *closure) {
1396
1454
  channel_data *chand = elem->channel_data;
1397
1455
  external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
1398
1456
  w->chand = chand;
1399
1457
  w->pollset = pollset;
1400
- w->on_complete = on_complete;
1458
+ w->on_complete = closure;
1401
1459
  w->state = state;
1402
1460
  grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
1403
1461
  GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,