grpc 1.20.0 → 1.21.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (209) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +500 -29
  3. data/etc/roots.pem +146 -0
  4. data/include/grpc/grpc_security.h +1 -1
  5. data/include/grpc/impl/codegen/grpc_types.h +10 -7
  6. data/include/grpc/impl/codegen/port_platform.h +11 -1
  7. data/include/grpc/impl/codegen/slice.h +1 -21
  8. data/include/grpc/impl/codegen/status.h +2 -1
  9. data/include/grpc/slice.h +1 -1
  10. data/src/core/ext/filters/client_channel/backup_poller.cc +19 -13
  11. data/src/core/ext/filters/client_channel/backup_poller.h +3 -0
  12. data/src/core/ext/filters/client_channel/channel_connectivity.cc +1 -1
  13. data/src/core/ext/filters/client_channel/client_channel.cc +2084 -1673
  14. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +2 -3
  15. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +4 -0
  16. data/src/core/ext/filters/client_channel/health/health_check_client.cc +54 -49
  17. data/src/core/ext/filters/client_channel/health/health_check_client.h +20 -9
  18. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +1 -2
  19. data/src/core/ext/filters/client_channel/http_connect_handshaker.h +1 -1
  20. data/src/core/ext/filters/client_channel/lb_policy.cc +3 -30
  21. data/src/core/ext/filters/client_channel/lb_policy.h +16 -25
  22. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +106 -81
  23. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +6 -2
  24. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +8 -12
  25. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +2 -2
  26. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +1 -1
  27. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +57 -49
  28. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +47 -41
  29. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +24 -20
  30. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +989 -284
  31. data/src/core/ext/filters/client_channel/lb_policy_factory.h +4 -1
  32. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +105 -2
  33. data/src/core/ext/filters/client_channel/lb_policy_registry.h +9 -2
  34. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +79 -36
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +84 -2
  36. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +3 -0
  37. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc +179 -0
  38. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +15 -3
  39. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +80 -4
  40. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +7 -13
  41. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +2 -2
  42. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_libuv.cc +39 -0
  43. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc +0 -6
  44. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +2 -64
  45. data/src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc +28 -0
  46. data/src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.h +29 -0
  47. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +4 -4
  48. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +367 -232
  49. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +55 -76
  50. data/src/core/ext/filters/client_channel/resolving_lb_policy.cc +50 -39
  51. data/src/core/ext/filters/client_channel/resolving_lb_policy.h +18 -12
  52. data/src/core/ext/filters/client_channel/service_config.cc +247 -27
  53. data/src/core/ext/filters/client_channel/service_config.h +119 -166
  54. data/src/core/ext/filters/client_channel/subchannel.cc +46 -84
  55. data/src/core/ext/filters/client_channel/subchannel.h +7 -7
  56. data/src/core/ext/filters/deadline/deadline_filter.cc +3 -4
  57. data/src/core/ext/filters/deadline/deadline_filter.h +3 -2
  58. data/src/core/ext/filters/http/client/http_client_filter.cc +7 -5
  59. data/src/core/ext/filters/http/client/http_client_filter.h +1 -1
  60. data/src/core/ext/filters/http/client_authority_filter.cc +1 -1
  61. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +4 -3
  62. data/src/core/ext/filters/http/server/http_server_filter.cc +18 -12
  63. data/src/core/ext/filters/message_size/message_size_filter.cc +118 -76
  64. data/src/core/ext/filters/message_size/message_size_filter.h +33 -0
  65. data/src/core/ext/transport/chttp2/alpn/alpn.h +1 -1
  66. data/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc +9 -7
  67. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +93 -60
  68. data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -1
  69. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +4 -3
  70. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +3 -3
  71. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +8 -2
  72. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +2 -2
  73. data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +1 -1
  74. data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +3 -2
  75. data/src/core/ext/transport/chttp2/transport/internal.h +35 -23
  76. data/src/core/ext/transport/chttp2/transport/parsing.cc +4 -4
  77. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +3 -3
  78. data/src/core/ext/transport/chttp2/transport/writing.cc +61 -27
  79. data/src/core/ext/transport/inproc/inproc_transport.cc +18 -18
  80. data/src/core/lib/channel/channel_args.cc +0 -101
  81. data/src/core/lib/channel/channel_args.h +0 -37
  82. data/src/core/lib/channel/channel_stack.h +9 -5
  83. data/src/core/lib/channel/channelz_registry.cc +1 -1
  84. data/src/core/lib/channel/connected_channel.cc +2 -2
  85. data/src/core/lib/channel/context.h +3 -0
  86. data/src/core/lib/channel/handshaker.cc +4 -4
  87. data/src/core/lib/channel/handshaker.h +1 -1
  88. data/src/core/lib/compression/compression_args.cc +127 -0
  89. data/src/core/lib/compression/compression_args.h +55 -0
  90. data/src/core/lib/debug/trace.cc +13 -7
  91. data/src/core/lib/debug/trace.h +12 -0
  92. data/src/core/lib/gpr/arena.h +13 -9
  93. data/src/core/lib/gpr/env.h +2 -5
  94. data/src/core/lib/gpr/env_linux.cc +6 -1
  95. data/src/core/lib/gpr/env_posix.cc +5 -0
  96. data/src/core/lib/gpr/env_windows.cc +7 -5
  97. data/src/core/lib/gpr/log.cc +9 -13
  98. data/src/core/lib/gpr/string.cc +12 -6
  99. data/src/core/lib/gpr/string.h +4 -2
  100. data/src/core/lib/gpr/time_posix.cc +13 -0
  101. data/src/core/lib/gprpp/arena.cc +103 -0
  102. data/src/core/lib/gprpp/arena.h +121 -0
  103. data/src/core/lib/gprpp/fork.cc +12 -29
  104. data/src/core/lib/gprpp/global_config.h +87 -0
  105. data/src/core/lib/gprpp/global_config_custom.h +29 -0
  106. data/src/core/lib/gprpp/global_config_env.cc +135 -0
  107. data/src/core/lib/gprpp/global_config_env.h +131 -0
  108. data/src/core/lib/gprpp/global_config_generic.h +44 -0
  109. data/src/core/lib/gprpp/map.h +419 -0
  110. data/src/core/lib/gprpp/optional.h +1 -0
  111. data/src/core/lib/gprpp/orphanable.h +2 -2
  112. data/src/core/lib/gprpp/{mutex_lock.h → pair.h} +15 -19
  113. data/src/core/lib/gprpp/ref_counted.h +18 -2
  114. data/src/core/lib/gprpp/sync.h +126 -0
  115. data/src/core/lib/http/parser.cc +1 -1
  116. data/src/core/lib/iomgr/call_combiner.cc +84 -90
  117. data/src/core/lib/iomgr/call_combiner.h +75 -82
  118. data/src/core/lib/iomgr/cfstream_handle.cc +202 -0
  119. data/src/core/lib/iomgr/cfstream_handle.h +82 -0
  120. data/src/core/lib/iomgr/combiner.h +1 -1
  121. data/src/core/lib/iomgr/endpoint_cfstream.cc +375 -0
  122. data/src/core/lib/iomgr/endpoint_cfstream.h +49 -0
  123. data/src/core/lib/iomgr/endpoint_pair_windows.cc +2 -2
  124. data/src/core/lib/iomgr/error.h +23 -0
  125. data/src/core/lib/iomgr/error_cfstream.cc +52 -0
  126. data/src/core/lib/iomgr/error_cfstream.h +31 -0
  127. data/src/core/lib/iomgr/ev_epoll1_linux.cc +34 -27
  128. data/src/core/lib/iomgr/ev_epollex_linux.cc +33 -33
  129. data/src/core/lib/iomgr/ev_poll_posix.cc +7 -7
  130. data/src/core/lib/iomgr/ev_posix.cc +15 -13
  131. data/src/core/lib/iomgr/ev_posix.h +4 -1
  132. data/src/core/lib/iomgr/executor.cc +13 -9
  133. data/src/core/lib/iomgr/fork_posix.cc +0 -1
  134. data/src/core/lib/iomgr/internal_errqueue.cc +1 -1
  135. data/src/core/lib/iomgr/iomgr.cc +6 -5
  136. data/src/core/lib/iomgr/iomgr_custom.cc +3 -0
  137. data/src/core/lib/iomgr/iomgr_custom.h +2 -0
  138. data/src/core/lib/iomgr/iomgr_posix_cfstream.cc +93 -0
  139. data/src/core/lib/iomgr/iomgr_windows.cc +1 -0
  140. data/src/core/lib/iomgr/lockfree_event.cc +3 -3
  141. data/src/core/lib/iomgr/port.h +11 -0
  142. data/src/core/lib/iomgr/resource_quota.cc +40 -37
  143. data/src/core/lib/iomgr/socket_utils_common_posix.cc +6 -2
  144. data/src/core/lib/iomgr/socket_windows.cc +19 -0
  145. data/src/core/lib/iomgr/socket_windows.h +8 -0
  146. data/src/core/lib/iomgr/tcp_client_cfstream.cc +216 -0
  147. data/src/core/lib/iomgr/tcp_client_custom.cc +2 -2
  148. data/src/core/lib/iomgr/tcp_client_posix.cc +3 -3
  149. data/src/core/lib/iomgr/tcp_client_windows.cc +1 -1
  150. data/src/core/lib/iomgr/tcp_custom.cc +9 -9
  151. data/src/core/lib/iomgr/tcp_posix.cc +41 -41
  152. data/src/core/lib/iomgr/tcp_server_custom.cc +3 -3
  153. data/src/core/lib/iomgr/tcp_server_posix.cc +14 -1
  154. data/src/core/lib/iomgr/tcp_server_windows.cc +2 -2
  155. data/src/core/lib/iomgr/tcp_windows.cc +7 -9
  156. data/src/core/lib/iomgr/timer_generic.cc +16 -16
  157. data/src/core/lib/iomgr/timer_manager.cc +12 -11
  158. data/src/core/lib/profiling/basic_timers.cc +10 -4
  159. data/src/core/lib/security/context/security_context.cc +6 -7
  160. data/src/core/lib/security/context/security_context.h +3 -4
  161. data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +1 -1
  162. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -3
  163. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +1 -1
  164. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +7 -7
  165. data/src/core/lib/security/security_connector/load_system_roots_linux.cc +7 -5
  166. data/src/core/lib/security/security_connector/security_connector.cc +0 -1
  167. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +3 -2
  168. data/src/core/lib/security/security_connector/ssl_utils.cc +30 -26
  169. data/src/core/lib/security/security_connector/ssl_utils.h +5 -1
  170. data/src/core/lib/security/transport/client_auth_filter.cc +7 -11
  171. data/src/core/lib/security/transport/secure_endpoint.cc +4 -4
  172. data/src/core/lib/security/transport/server_auth_filter.cc +2 -3
  173. data/src/core/lib/slice/slice.cc +99 -116
  174. data/src/core/lib/slice/slice_buffer.cc +5 -0
  175. data/src/core/lib/slice/slice_intern.cc +38 -95
  176. data/src/core/lib/slice/slice_internal.h +200 -2
  177. data/src/core/lib/surface/api_trace.h +1 -1
  178. data/src/core/lib/surface/call.cc +41 -35
  179. data/src/core/lib/surface/call.h +7 -2
  180. data/src/core/lib/surface/call_details.cc +0 -1
  181. data/src/core/lib/surface/completion_queue.cc +36 -27
  182. data/src/core/lib/surface/init.cc +3 -4
  183. data/src/core/lib/surface/lame_client.cc +1 -1
  184. data/src/core/lib/surface/server.cc +18 -25
  185. data/src/core/lib/surface/version.cc +1 -1
  186. data/src/core/lib/transport/bdp_estimator.cc +3 -3
  187. data/src/core/lib/transport/bdp_estimator.h +2 -2
  188. data/src/core/lib/transport/connectivity_state.cc +10 -40
  189. data/src/core/lib/transport/connectivity_state.h +0 -8
  190. data/src/core/lib/transport/error_utils.cc +12 -0
  191. data/src/core/lib/transport/metadata.cc +206 -278
  192. data/src/core/lib/transport/metadata.h +205 -10
  193. data/src/core/lib/transport/static_metadata.cc +108 -116
  194. data/src/core/lib/transport/static_metadata.h +1 -2
  195. data/src/core/lib/transport/status_metadata.cc +3 -3
  196. data/src/core/lib/transport/transport.cc +29 -66
  197. data/src/core/lib/transport/transport.h +36 -8
  198. data/src/core/lib/transport/transport_impl.h +1 -1
  199. data/src/core/tsi/fake_transport_security.cc +4 -4
  200. data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +1 -1
  201. data/src/core/tsi/ssl_transport_security.cc +1 -1
  202. data/src/ruby/ext/grpc/rb_grpc.c +1 -1
  203. data/src/ruby/lib/grpc/errors.rb +22 -3
  204. data/src/ruby/lib/grpc/generic/bidi_call.rb +1 -1
  205. data/src/ruby/lib/grpc/generic/rpc_server.rb +1 -1
  206. data/src/ruby/lib/grpc/version.rb +1 -1
  207. data/src/ruby/spec/errors_spec.rb +141 -0
  208. metadata +57 -33
  209. data/src/core/lib/gpr/arena.cc +0 -192
@@ -49,8 +49,8 @@ ClientChannelNode::ClientChannelNode(grpc_channel* channel,
49
49
  : ChannelNode(channel, channel_tracer_max_nodes, is_top_level_channel) {
50
50
  client_channel_ =
51
51
  grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
52
- grpc_client_channel_set_channelz_node(client_channel_, this);
53
52
  GPR_ASSERT(client_channel_->filter == &grpc_client_channel_filter);
53
+ grpc_client_channel_set_channelz_node(client_channel_, this);
54
54
  }
55
55
 
56
56
  void ClientChannelNode::PopulateConnectivityState(grpc_json* json) {
@@ -127,8 +127,7 @@ void SubchannelNode::PopulateConnectivityState(grpc_json* json) {
127
127
  if (subchannel_ == nullptr) {
128
128
  state = GRPC_CHANNEL_SHUTDOWN;
129
129
  } else {
130
- state = subchannel_->CheckConnectivity(nullptr,
131
- true /* inhibit_health_checking */);
130
+ state = subchannel_->CheckConnectivity(true /* inhibit_health_checking */);
132
131
  }
133
132
  json = grpc_json_create_child(nullptr, json, "state", nullptr,
134
133
  GRPC_JSON_OBJECT, false);
@@ -32,6 +32,7 @@
32
32
  #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
33
33
  #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
34
34
  #include "src/core/ext/filters/client_channel/resolver_registry.h"
35
+ #include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
35
36
  #include "src/core/ext/filters/client_channel/retry_throttle.h"
36
37
  #include "src/core/lib/surface/channel_init.h"
37
38
 
@@ -49,6 +50,8 @@ static bool append_filter(grpc_channel_stack_builder* builder, void* arg) {
49
50
  }
50
51
 
51
52
  void grpc_client_channel_init(void) {
53
+ grpc_core::ServiceConfig::Init();
54
+ grpc_core::internal::ClientChannelServiceConfigParser::Register();
52
55
  grpc_core::LoadBalancingPolicyRegistry::Builder::InitRegistry();
53
56
  grpc_core::ResolverRegistry::Builder::InitRegistry();
54
57
  grpc_core::internal::ServerRetryThrottleMap::Init();
@@ -68,4 +71,5 @@ void grpc_client_channel_shutdown(void) {
68
71
  grpc_core::internal::ServerRetryThrottleMap::Shutdown();
69
72
  grpc_core::ResolverRegistry::Builder::ShutdownRegistry();
70
73
  grpc_core::LoadBalancingPolicyRegistry::Builder::ShutdownRegistry();
74
+ grpc_core::ServiceConfig::Shutdown();
71
75
  }
@@ -27,7 +27,7 @@
27
27
  #include "pb_encode.h"
28
28
  #include "src/core/ext/filters/client_channel/health/health.pb.h"
29
29
  #include "src/core/lib/debug/trace.h"
30
- #include "src/core/lib/gprpp/mutex_lock.h"
30
+ #include "src/core/lib/gprpp/sync.h"
31
31
  #include "src/core/lib/slice/slice_internal.h"
32
32
  #include "src/core/lib/transport/error_utils.h"
33
33
  #include "src/core/lib/transport/status_metadata.h"
@@ -37,11 +37,10 @@
37
37
  #define HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS 120
38
38
  #define HEALTH_CHECK_RECONNECT_JITTER 0.2
39
39
 
40
- grpc_core::TraceFlag grpc_health_check_client_trace(false,
41
- "health_check_client");
42
-
43
40
  namespace grpc_core {
44
41
 
42
+ TraceFlag grpc_health_check_client_trace(false, "health_check_client");
43
+
45
44
  //
46
45
  // HealthCheckClient
47
46
  //
@@ -50,7 +49,7 @@ HealthCheckClient::HealthCheckClient(
50
49
  const char* service_name,
51
50
  RefCountedPtr<ConnectedSubchannel> connected_subchannel,
52
51
  grpc_pollset_set* interested_parties,
53
- grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode> channelz_node)
52
+ RefCountedPtr<channelz::SubchannelNode> channelz_node)
54
53
  : InternallyRefCounted<HealthCheckClient>(&grpc_health_check_client_trace),
55
54
  service_name_(service_name),
56
55
  connected_subchannel_(std::move(connected_subchannel)),
@@ -64,21 +63,19 @@ HealthCheckClient::HealthCheckClient(
64
63
  .set_jitter(HEALTH_CHECK_RECONNECT_JITTER)
65
64
  .set_max_backoff(HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS *
66
65
  1000)) {
67
- if (grpc_health_check_client_trace.enabled()) {
66
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
68
67
  gpr_log(GPR_INFO, "created HealthCheckClient %p", this);
69
68
  }
70
69
  GRPC_CLOSURE_INIT(&retry_timer_callback_, OnRetryTimer, this,
71
70
  grpc_schedule_on_exec_ctx);
72
- gpr_mu_init(&mu_);
73
71
  StartCall();
74
72
  }
75
73
 
76
74
  HealthCheckClient::~HealthCheckClient() {
77
- if (grpc_health_check_client_trace.enabled()) {
75
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
78
76
  gpr_log(GPR_INFO, "destroying HealthCheckClient %p", this);
79
77
  }
80
78
  GRPC_ERROR_UNREF(error_);
81
- gpr_mu_destroy(&mu_);
82
79
  }
83
80
 
84
81
  void HealthCheckClient::NotifyOnHealthChange(grpc_connectivity_state* state,
@@ -102,7 +99,7 @@ void HealthCheckClient::SetHealthStatus(grpc_connectivity_state state,
102
99
 
103
100
  void HealthCheckClient::SetHealthStatusLocked(grpc_connectivity_state state,
104
101
  grpc_error* error) {
105
- if (grpc_health_check_client_trace.enabled()) {
102
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
106
103
  gpr_log(GPR_INFO, "HealthCheckClient %p: setting state=%d error=%s", this,
107
104
  state, grpc_error_string(error));
108
105
  }
@@ -118,7 +115,7 @@ void HealthCheckClient::SetHealthStatusLocked(grpc_connectivity_state state,
118
115
  }
119
116
 
120
117
  void HealthCheckClient::Orphan() {
121
- if (grpc_health_check_client_trace.enabled()) {
118
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
122
119
  gpr_log(GPR_INFO, "HealthCheckClient %p: shutting down", this);
123
120
  }
124
121
  {
@@ -148,7 +145,7 @@ void HealthCheckClient::StartCallLocked() {
148
145
  GPR_ASSERT(call_state_ == nullptr);
149
146
  SetHealthStatusLocked(GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE);
150
147
  call_state_ = MakeOrphanable<CallState>(Ref(), interested_parties_);
151
- if (grpc_health_check_client_trace.enabled()) {
148
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
152
149
  gpr_log(GPR_INFO, "HealthCheckClient %p: created CallState %p", this,
153
150
  call_state_.get());
154
151
  }
@@ -162,7 +159,7 @@ void HealthCheckClient::StartRetryTimer() {
162
159
  GRPC_ERROR_CREATE_FROM_STATIC_STRING(
163
160
  "health check call failed; will retry after backoff"));
164
161
  grpc_millis next_try = retry_backoff_.NextAttemptTime();
165
- if (grpc_health_check_client_trace.enabled()) {
162
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
166
163
  gpr_log(GPR_INFO, "HealthCheckClient %p: health check call lost...", this);
167
164
  grpc_millis timeout = next_try - ExecCtx::Get()->Now();
168
165
  if (timeout > 0) {
@@ -187,7 +184,7 @@ void HealthCheckClient::OnRetryTimer(void* arg, grpc_error* error) {
187
184
  self->retry_timer_callback_pending_ = false;
188
185
  if (!self->shutting_down_ && error == GRPC_ERROR_NONE &&
189
186
  self->call_state_ == nullptr) {
190
- if (grpc_health_check_client_trace.enabled()) {
187
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
191
188
  gpr_log(GPR_INFO, "HealthCheckClient %p: restarting health check call",
192
189
  self);
193
190
  }
@@ -280,24 +277,17 @@ bool DecodeResponse(grpc_slice_buffer* slice_buffer, grpc_error** error) {
280
277
  HealthCheckClient::CallState::CallState(
281
278
  RefCountedPtr<HealthCheckClient> health_check_client,
282
279
  grpc_pollset_set* interested_parties)
283
- : InternallyRefCounted<CallState>(&grpc_health_check_client_trace),
284
- health_check_client_(std::move(health_check_client)),
280
+ : health_check_client_(std::move(health_check_client)),
285
281
  pollent_(grpc_polling_entity_create_from_pollset_set(interested_parties)),
286
- arena_(gpr_arena_create(health_check_client_->connected_subchannel_
287
- ->GetInitialCallSizeEstimate(0))),
288
- payload_(context_) {
289
- grpc_call_combiner_init(&call_combiner_);
290
- gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(0));
291
- }
282
+ arena_(Arena::Create(health_check_client_->connected_subchannel_
283
+ ->GetInitialCallSizeEstimate(0))),
284
+ payload_(context_) {}
292
285
 
293
286
  HealthCheckClient::CallState::~CallState() {
294
- if (grpc_health_check_client_trace.enabled()) {
287
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
295
288
  gpr_log(GPR_INFO, "HealthCheckClient %p: destroying CallState %p",
296
289
  health_check_client_.get(), this);
297
290
  }
298
- // The subchannel call is in the arena, so reset the pointer before we destroy
299
- // the arena.
300
- call_.reset();
301
291
  for (size_t i = 0; i < GRPC_CONTEXT_COUNT; i++) {
302
292
  if (context_[i].destroy != nullptr) {
303
293
  context_[i].destroy(context_[i].value);
@@ -309,14 +299,13 @@ HealthCheckClient::CallState::~CallState() {
309
299
  // holding to the call stack. Also flush the closures on exec_ctx so that
310
300
  // filters that schedule cancel notification closures on exec_ctx do not
311
301
  // need to take a ref of the call stack to guarantee closure liveness.
312
- grpc_call_combiner_set_notify_on_cancel(&call_combiner_, nullptr);
313
- grpc_core::ExecCtx::Get()->Flush();
314
- grpc_call_combiner_destroy(&call_combiner_);
315
- gpr_arena_destroy(arena_);
302
+ call_combiner_.SetNotifyOnCancel(nullptr);
303
+ ExecCtx::Get()->Flush();
304
+ arena_->Destroy();
316
305
  }
317
306
 
318
307
  void HealthCheckClient::CallState::Orphan() {
319
- grpc_call_combiner_cancel(&call_combiner_, GRPC_ERROR_CANCELLED);
308
+ call_combiner_.Cancel(GRPC_ERROR_CANCELLED);
320
309
  Cancel();
321
310
  }
322
311
 
@@ -332,7 +321,8 @@ void HealthCheckClient::CallState::StartCall() {
332
321
  0, // parent_data_size
333
322
  };
334
323
  grpc_error* error = GRPC_ERROR_NONE;
335
- call_ = health_check_client_->connected_subchannel_->CreateCall(args, &error);
324
+ call_ = health_check_client_->connected_subchannel_->CreateCall(args, &error)
325
+ .release();
336
326
  if (error != GRPC_ERROR_NONE) {
337
327
  gpr_log(GPR_ERROR,
338
328
  "HealthCheckClient %p CallState %p: error creating health "
@@ -341,18 +331,22 @@ void HealthCheckClient::CallState::StartCall() {
341
331
  GRPC_ERROR_UNREF(error);
342
332
  // Schedule instead of running directly, since we must not be
343
333
  // holding health_check_client_->mu_ when CallEnded() is called.
344
- Ref(DEBUG_LOCATION, "call_end_closure").release();
334
+ call_->Ref(DEBUG_LOCATION, "call_end_closure").release();
345
335
  GRPC_CLOSURE_SCHED(
346
336
  GRPC_CLOSURE_INIT(&batch_.handler_private.closure, CallEndedRetry, this,
347
337
  grpc_schedule_on_exec_ctx),
348
338
  GRPC_ERROR_NONE);
349
339
  return;
350
340
  }
341
+ // Register after-destruction callback.
342
+ GRPC_CLOSURE_INIT(&after_call_stack_destruction_, AfterCallStackDestruction,
343
+ this, grpc_schedule_on_exec_ctx);
344
+ call_->SetAfterCallStackDestroy(&after_call_stack_destruction_);
351
345
  // Initialize payload and batch.
352
346
  payload_.context = context_;
353
347
  batch_.payload = &payload_;
354
348
  // on_complete callback takes ref, handled manually.
355
- Ref(DEBUG_LOCATION, "on_complete").release();
349
+ call_->Ref(DEBUG_LOCATION, "on_complete").release();
356
350
  batch_.on_complete = GRPC_CLOSURE_INIT(&on_complete_, OnComplete, this,
357
351
  grpc_schedule_on_exec_ctx);
358
352
  // Add send_initial_metadata op.
@@ -385,7 +379,7 @@ void HealthCheckClient::CallState::StartCall() {
385
379
  payload_.recv_initial_metadata.trailing_metadata_available = nullptr;
386
380
  payload_.recv_initial_metadata.peer_string = nullptr;
387
381
  // recv_initial_metadata_ready callback takes ref, handled manually.
388
- Ref(DEBUG_LOCATION, "recv_initial_metadata_ready").release();
382
+ call_->Ref(DEBUG_LOCATION, "recv_initial_metadata_ready").release();
389
383
  payload_.recv_initial_metadata.recv_initial_metadata_ready =
390
384
  GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_, RecvInitialMetadataReady,
391
385
  this, grpc_schedule_on_exec_ctx);
@@ -393,7 +387,7 @@ void HealthCheckClient::CallState::StartCall() {
393
387
  // Add recv_message op.
394
388
  payload_.recv_message.recv_message = &recv_message_;
395
389
  // recv_message callback takes ref, handled manually.
396
- Ref(DEBUG_LOCATION, "recv_message_ready").release();
390
+ call_->Ref(DEBUG_LOCATION, "recv_message_ready").release();
397
391
  payload_.recv_message.recv_message_ready = GRPC_CLOSURE_INIT(
398
392
  &recv_message_ready_, RecvMessageReady, this, grpc_schedule_on_exec_ctx);
399
393
  batch_.recv_message = true;
@@ -429,19 +423,26 @@ void HealthCheckClient::CallState::StartBatchInCallCombiner(void* arg,
429
423
 
430
424
  void HealthCheckClient::CallState::StartBatch(
431
425
  grpc_transport_stream_op_batch* batch) {
432
- batch->handler_private.extra_arg = call_.get();
426
+ batch->handler_private.extra_arg = call_;
433
427
  GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
434
428
  batch, grpc_schedule_on_exec_ctx);
435
429
  GRPC_CALL_COMBINER_START(&call_combiner_, &batch->handler_private.closure,
436
430
  GRPC_ERROR_NONE, "start_subchannel_batch");
437
431
  }
438
432
 
433
+ void HealthCheckClient::CallState::AfterCallStackDestruction(
434
+ void* arg, grpc_error* error) {
435
+ HealthCheckClient::CallState* self =
436
+ static_cast<HealthCheckClient::CallState*>(arg);
437
+ Delete(self);
438
+ }
439
+
439
440
  void HealthCheckClient::CallState::OnCancelComplete(void* arg,
440
441
  grpc_error* error) {
441
442
  HealthCheckClient::CallState* self =
442
443
  static_cast<HealthCheckClient::CallState*>(arg);
443
444
  GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "health_cancel");
444
- self->Unref(DEBUG_LOCATION, "cancel");
445
+ self->call_->Unref(DEBUG_LOCATION, "cancel");
445
446
  }
446
447
 
447
448
  void HealthCheckClient::CallState::StartCancel(void* arg, grpc_error* error) {
@@ -455,8 +456,10 @@ void HealthCheckClient::CallState::StartCancel(void* arg, grpc_error* error) {
455
456
  }
456
457
 
457
458
  void HealthCheckClient::CallState::Cancel() {
458
- if (call_ != nullptr) {
459
- Ref(DEBUG_LOCATION, "cancel").release();
459
+ bool expected = false;
460
+ if (cancelled_.CompareExchangeStrong(&expected, true, MemoryOrder::ACQ_REL,
461
+ MemoryOrder::ACQUIRE)) {
462
+ call_->Ref(DEBUG_LOCATION, "cancel").release();
460
463
  GRPC_CALL_COMBINER_START(
461
464
  &call_combiner_,
462
465
  GRPC_CLOSURE_CREATE(StartCancel, this, grpc_schedule_on_exec_ctx),
@@ -470,7 +473,7 @@ void HealthCheckClient::CallState::OnComplete(void* arg, grpc_error* error) {
470
473
  GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "on_complete");
471
474
  grpc_metadata_batch_destroy(&self->send_initial_metadata_);
472
475
  grpc_metadata_batch_destroy(&self->send_trailing_metadata_);
473
- self->Unref(DEBUG_LOCATION, "on_complete");
476
+ self->call_->Unref(DEBUG_LOCATION, "on_complete");
474
477
  }
475
478
 
476
479
  void HealthCheckClient::CallState::RecvInitialMetadataReady(void* arg,
@@ -479,7 +482,7 @@ void HealthCheckClient::CallState::RecvInitialMetadataReady(void* arg,
479
482
  static_cast<HealthCheckClient::CallState*>(arg);
480
483
  GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "recv_initial_metadata_ready");
481
484
  grpc_metadata_batch_destroy(&self->recv_initial_metadata_);
482
- self->Unref(DEBUG_LOCATION, "recv_initial_metadata_ready");
485
+ self->call_->Unref(DEBUG_LOCATION, "recv_initial_metadata_ready");
483
486
  }
484
487
 
485
488
  void HealthCheckClient::CallState::DoneReadingRecvMessage(grpc_error* error) {
@@ -488,7 +491,7 @@ void HealthCheckClient::CallState::DoneReadingRecvMessage(grpc_error* error) {
488
491
  GRPC_ERROR_UNREF(error);
489
492
  Cancel();
490
493
  grpc_slice_buffer_destroy_internal(&recv_message_buffer_);
491
- Unref(DEBUG_LOCATION, "recv_message_ready");
494
+ call_->Unref(DEBUG_LOCATION, "recv_message_ready");
492
495
  return;
493
496
  }
494
497
  const bool healthy = DecodeResponse(&recv_message_buffer_, &error);
@@ -498,7 +501,7 @@ void HealthCheckClient::CallState::DoneReadingRecvMessage(grpc_error* error) {
498
501
  error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("backend unhealthy");
499
502
  }
500
503
  health_check_client_->SetHealthStatus(state, error);
501
- gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(1));
504
+ seen_response_.Store(true, MemoryOrder::RELEASE);
502
505
  grpc_slice_buffer_destroy_internal(&recv_message_buffer_);
503
506
  // Start another recv_message batch.
504
507
  // This re-uses the ref we're holding.
@@ -561,7 +564,7 @@ void HealthCheckClient::CallState::RecvMessageReady(void* arg,
561
564
  static_cast<HealthCheckClient::CallState*>(arg);
562
565
  GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "recv_message_ready");
563
566
  if (self->recv_message_ == nullptr) {
564
- self->Unref(DEBUG_LOCATION, "recv_message_ready");
567
+ self->call_->Unref(DEBUG_LOCATION, "recv_message_ready");
565
568
  return;
566
569
  }
567
570
  grpc_slice_buffer_init(&self->recv_message_buffer_);
@@ -587,7 +590,7 @@ void HealthCheckClient::CallState::RecvTrailingMetadataReady(
587
590
  status = grpc_get_status_code_from_metadata(
588
591
  self->recv_trailing_metadata_.idx.named.grpc_status->md);
589
592
  }
590
- if (grpc_health_check_client_trace.enabled()) {
593
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
591
594
  gpr_log(GPR_INFO,
592
595
  "HealthCheckClient %p CallState %p: health watch failed with "
593
596
  "status %d",
@@ -619,7 +622,7 @@ void HealthCheckClient::CallState::CallEndedRetry(void* arg,
619
622
  HealthCheckClient::CallState* self =
620
623
  static_cast<HealthCheckClient::CallState*>(arg);
621
624
  self->CallEnded(true /* retry */);
622
- self->Unref(DEBUG_LOCATION, "call_end_closure");
625
+ self->call_->Unref(DEBUG_LOCATION, "call_end_closure");
623
626
  }
624
627
 
625
628
  void HealthCheckClient::CallState::CallEnded(bool retry) {
@@ -631,7 +634,7 @@ void HealthCheckClient::CallState::CallEnded(bool retry) {
631
634
  health_check_client_->call_state_.reset();
632
635
  if (retry) {
633
636
  GPR_ASSERT(!health_check_client_->shutting_down_);
634
- if (static_cast<bool>(gpr_atm_acq_load(&seen_response_))) {
637
+ if (seen_response_.Load(MemoryOrder::ACQUIRE)) {
635
638
  // If the call fails after we've gotten a successful response, reset
636
639
  // the backoff and restart the call immediately.
637
640
  health_check_client_->retry_backoff_.Reset();
@@ -642,7 +645,9 @@ void HealthCheckClient::CallState::CallEnded(bool retry) {
642
645
  }
643
646
  }
644
647
  }
645
- Unref(DEBUG_LOCATION, "call_ended");
648
+ // When the last ref to the call stack goes away, the CallState object
649
+ // will be automatically destroyed.
650
+ call_->Unref(DEBUG_LOCATION, "call_ended");
646
651
  }
647
652
 
648
653
  } // namespace grpc_core
@@ -22,15 +22,16 @@
22
22
  #include <grpc/support/port_platform.h>
23
23
 
24
24
  #include <grpc/grpc.h>
25
- #include <grpc/support/atm.h>
26
25
  #include <grpc/support/sync.h>
27
26
 
28
27
  #include "src/core/ext/filters/client_channel/client_channel_channelz.h"
29
28
  #include "src/core/ext/filters/client_channel/subchannel.h"
30
29
  #include "src/core/lib/backoff/backoff.h"
31
- #include "src/core/lib/gpr/arena.h"
30
+ #include "src/core/lib/gprpp/arena.h"
31
+ #include "src/core/lib/gprpp/atomic.h"
32
32
  #include "src/core/lib/gprpp/orphanable.h"
33
33
  #include "src/core/lib/gprpp/ref_counted_ptr.h"
34
+ #include "src/core/lib/gprpp/sync.h"
34
35
  #include "src/core/lib/iomgr/call_combiner.h"
35
36
  #include "src/core/lib/iomgr/closure.h"
36
37
  #include "src/core/lib/iomgr/polling_entity.h"
@@ -60,7 +61,7 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
60
61
 
61
62
  private:
62
63
  // Contains a call to the backend and all the data related to the call.
63
- class CallState : public InternallyRefCounted<CallState> {
64
+ class CallState : public Orphanable {
64
65
  public:
65
66
  CallState(RefCountedPtr<HealthCheckClient> health_check_client,
66
67
  grpc_pollset_set* interested_parties_);
@@ -91,15 +92,19 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
91
92
  grpc_error* PullSliceFromRecvMessage();
92
93
  void DoneReadingRecvMessage(grpc_error* error);
93
94
 
95
+ static void AfterCallStackDestruction(void* arg, grpc_error* error);
96
+
94
97
  RefCountedPtr<HealthCheckClient> health_check_client_;
95
98
  grpc_polling_entity pollent_;
96
99
 
97
- gpr_arena* arena_;
98
- grpc_call_combiner call_combiner_;
100
+ Arena* arena_;
101
+ grpc_core::CallCombiner call_combiner_;
99
102
  grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};
100
103
 
101
- // The streaming call to the backend. Always non-NULL.
102
- RefCountedPtr<SubchannelCall> call_;
104
+ // The streaming call to the backend. Always non-null.
105
+ // Refs are tracked manually; when the last ref is released, the
106
+ // CallState object will be automatically destroyed.
107
+ SubchannelCall* call_;
103
108
 
104
109
  grpc_transport_stream_op_batch_payload payload_;
105
110
  grpc_transport_stream_op_batch batch_;
@@ -126,12 +131,18 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
126
131
  OrphanablePtr<ByteStream> recv_message_;
127
132
  grpc_closure recv_message_ready_;
128
133
  grpc_slice_buffer recv_message_buffer_;
129
- gpr_atm seen_response_;
134
+ Atomic<bool> seen_response_{false};
130
135
 
131
136
  // recv_trailing_metadata
132
137
  grpc_metadata_batch recv_trailing_metadata_;
133
138
  grpc_transport_stream_stats collect_stats_;
134
139
  grpc_closure recv_trailing_metadata_ready_;
140
+
141
+ // True if the cancel_stream batch has been started.
142
+ Atomic<bool> cancelled_{false};
143
+
144
+ // Closure for call stack destruction.
145
+ grpc_closure after_call_stack_destruction_;
135
146
  };
136
147
 
137
148
  void StartCall();
@@ -149,7 +160,7 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
149
160
  grpc_pollset_set* interested_parties_; // Do not own.
150
161
  RefCountedPtr<channelz::SubchannelNode> channelz_node_;
151
162
 
152
- gpr_mu mu_;
163
+ Mutex mu_;
153
164
  grpc_connectivity_state state_ = GRPC_CHANNEL_CONNECTING;
154
165
  grpc_error* error_ = GRPC_ERROR_NONE;
155
166
  grpc_connectivity_state* notify_state_ = nullptr;
@@ -31,9 +31,8 @@
31
31
  #include "src/core/ext/filters/client_channel/resolver_registry.h"
32
32
  #include "src/core/lib/channel/channel_args.h"
33
33
  #include "src/core/lib/channel/handshaker_registry.h"
34
- #include "src/core/lib/gpr/env.h"
35
34
  #include "src/core/lib/gpr/string.h"
36
- #include "src/core/lib/gprpp/mutex_lock.h"
35
+ #include "src/core/lib/gprpp/sync.h"
37
36
  #include "src/core/lib/http/format_request.h"
38
37
  #include "src/core/lib/http/parser.h"
39
38
  #include "src/core/lib/slice/slice_internal.h"
@@ -25,7 +25,7 @@
25
25
 
26
26
  /// Channel arg indicating HTTP CONNECT headers (string).
27
27
  /// Multiple headers are separated by newlines. Key/value pairs are
28
- /// seperated by colons.
28
+ /// separated by colons.
29
29
  #define GRPC_ARG_HTTP_CONNECT_HEADERS "grpc.http_connect_headers"
30
30
 
31
31
  /// Registers handshaker factory.