grpc 1.16.0 → 1.17.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (173) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +299 -133
  3. data/include/grpc/grpc.h +11 -1
  4. data/include/grpc/grpc_posix.h +0 -8
  5. data/include/grpc/impl/codegen/grpc_types.h +3 -0
  6. data/src/core/ext/filters/client_channel/client_channel.cc +336 -345
  7. data/src/core/ext/filters/client_channel/client_channel.h +6 -2
  8. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +3 -1
  9. data/src/core/ext/filters/client_channel/client_channel_channelz.h +0 -7
  10. data/src/core/ext/filters/client_channel/health/health.pb.c +23 -0
  11. data/src/core/ext/filters/client_channel/health/health.pb.h +73 -0
  12. data/src/core/ext/filters/client_channel/health/health_check_client.cc +652 -0
  13. data/src/core/ext/filters/client_channel/health/health_check_client.h +173 -0
  14. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +2 -1
  15. data/src/core/ext/filters/client_channel/http_proxy.cc +1 -1
  16. data/src/core/ext/filters/client_channel/lb_policy.h +17 -14
  17. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +15 -11
  18. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +21 -15
  19. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +18 -10
  20. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +12 -9
  21. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +19 -8
  22. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +1832 -0
  23. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.h +36 -0
  24. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h +36 -0
  25. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc +107 -0
  26. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc +85 -0
  27. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h +72 -0
  28. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc +307 -0
  29. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h +89 -0
  30. data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -1
  31. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +5 -0
  32. data/src/core/ext/filters/client_channel/lb_policy_registry.h +4 -0
  33. data/src/core/ext/filters/client_channel/parse_address.h +1 -1
  34. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +19 -22
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +41 -39
  36. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +3 -2
  37. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +4 -1
  38. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +15 -2
  39. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +5 -1
  40. data/src/core/ext/filters/client_channel/resolver_factory.h +1 -1
  41. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +384 -0
  42. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +146 -0
  43. data/src/core/ext/filters/client_channel/subchannel.cc +361 -103
  44. data/src/core/ext/filters/client_channel/subchannel.h +14 -8
  45. data/src/core/ext/filters/deadline/deadline_filter.cc +19 -23
  46. data/src/core/ext/filters/deadline/deadline_filter.h +9 -13
  47. data/src/core/ext/filters/http/client/http_client_filter.cc +29 -19
  48. data/src/core/ext/filters/http/client_authority_filter.cc +2 -3
  49. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +28 -16
  50. data/src/core/ext/filters/http/server/http_server_filter.cc +31 -20
  51. data/src/core/ext/filters/message_size/message_size_filter.cc +50 -45
  52. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +13 -6
  53. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +1 -1
  54. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +58 -8
  55. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +1 -1
  56. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +175 -173
  57. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +2 -1
  58. data/src/core/ext/transport/chttp2/transport/frame_data.cc +4 -10
  59. data/src/core/ext/transport/chttp2/transport/frame_data.h +10 -12
  60. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +1 -1
  61. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +28 -25
  62. data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +0 -12
  63. data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +12 -9
  64. data/src/core/ext/transport/chttp2/transport/internal.h +109 -94
  65. data/src/core/ext/transport/chttp2/transport/parsing.cc +4 -2
  66. data/src/core/ext/transport/inproc/inproc_transport.cc +280 -300
  67. data/src/core/lib/channel/channel_stack.cc +5 -4
  68. data/src/core/lib/channel/channel_stack.h +4 -4
  69. data/src/core/lib/channel/channel_stack_builder.cc +14 -2
  70. data/src/core/lib/channel/channel_stack_builder.h +8 -0
  71. data/src/core/lib/channel/channel_trace.cc +6 -2
  72. data/src/core/lib/channel/channelz.cc +137 -5
  73. data/src/core/lib/channel/channelz.h +32 -6
  74. data/src/core/lib/channel/channelz_registry.cc +134 -28
  75. data/src/core/lib/channel/channelz_registry.h +25 -3
  76. data/src/core/lib/channel/context.h +4 -4
  77. data/src/core/lib/channel/handshaker.cc +7 -6
  78. data/src/core/lib/channel/handshaker.h +7 -8
  79. data/src/core/lib/channel/handshaker_factory.cc +3 -2
  80. data/src/core/lib/channel/handshaker_factory.h +2 -0
  81. data/src/core/lib/channel/handshaker_registry.cc +6 -2
  82. data/src/core/lib/channel/handshaker_registry.h +1 -0
  83. data/src/core/lib/gpr/arena.cc +84 -37
  84. data/src/core/lib/gpr/arena.h +2 -0
  85. data/src/core/lib/gpr/mpscq.h +4 -2
  86. data/src/core/lib/gprpp/inlined_vector.h +8 -0
  87. data/src/core/lib/gprpp/ref_counted.h +105 -18
  88. data/src/core/lib/gprpp/ref_counted_ptr.h +11 -0
  89. data/src/core/lib/http/httpcli_security_connector.cc +7 -4
  90. data/src/core/lib/iomgr/call_combiner.cc +2 -0
  91. data/src/core/lib/iomgr/call_combiner.h +2 -2
  92. data/src/core/lib/iomgr/closure.h +1 -0
  93. data/src/core/lib/iomgr/error.cc +16 -31
  94. data/src/core/lib/iomgr/error.h +29 -4
  95. data/src/core/lib/iomgr/error_internal.h +0 -2
  96. data/src/core/lib/iomgr/ev_epoll1_linux.cc +7 -3
  97. data/src/core/lib/iomgr/ev_posix.cc +0 -2
  98. data/src/core/lib/iomgr/polling_entity.h +4 -4
  99. data/src/core/lib/iomgr/resource_quota.cc +64 -10
  100. data/src/core/lib/iomgr/resource_quota.h +21 -6
  101. data/src/core/lib/iomgr/socket_utils_common_posix.cc +11 -5
  102. data/src/core/lib/iomgr/tcp_client_custom.cc +14 -3
  103. data/src/core/lib/iomgr/tcp_client_posix.cc +2 -0
  104. data/src/core/lib/iomgr/tcp_posix.cc +4 -2
  105. data/src/core/lib/iomgr/timer_manager.cc +1 -1
  106. data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +3 -4
  107. data/src/core/lib/security/context/security_context.cc +20 -13
  108. data/src/core/lib/security/context/security_context.h +27 -19
  109. data/src/core/lib/security/credentials/alts/alts_credentials.cc +1 -1
  110. data/src/core/lib/security/credentials/credentials.h +2 -2
  111. data/src/core/lib/security/credentials/fake/fake_credentials.cc +1 -0
  112. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +39 -54
  113. data/src/core/lib/security/credentials/google_default/google_default_credentials.h +3 -2
  114. data/src/core/lib/security/credentials/local/local_credentials.cc +1 -1
  115. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +1 -2
  116. data/src/core/lib/security/credentials/ssl/ssl_credentials.h +2 -0
  117. data/src/core/lib/security/security_connector/{alts_security_connector.cc → alts/alts_security_connector.cc} +10 -9
  118. data/src/core/lib/security/security_connector/{alts_security_connector.h → alts/alts_security_connector.h} +3 -3
  119. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +310 -0
  120. data/src/core/lib/security/security_connector/fake/fake_security_connector.h +42 -0
  121. data/src/core/lib/security/security_connector/{local_security_connector.cc → local/local_security_connector.cc} +4 -3
  122. data/src/core/lib/security/security_connector/{local_security_connector.h → local/local_security_connector.h} +3 -3
  123. data/src/core/lib/security/security_connector/security_connector.cc +4 -1039
  124. data/src/core/lib/security/security_connector/security_connector.h +6 -114
  125. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +474 -0
  126. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.h +77 -0
  127. data/src/core/lib/security/security_connector/ssl_utils.cc +345 -0
  128. data/src/core/lib/security/security_connector/ssl_utils.h +93 -0
  129. data/src/core/lib/security/transport/client_auth_filter.cc +28 -17
  130. data/src/core/lib/security/transport/secure_endpoint.cc +51 -41
  131. data/src/core/lib/security/transport/security_handshaker.cc +6 -7
  132. data/src/core/lib/security/transport/server_auth_filter.cc +39 -31
  133. data/src/core/lib/surface/call.cc +100 -80
  134. data/src/core/lib/surface/call.h +4 -0
  135. data/src/core/lib/surface/channel.cc +27 -13
  136. data/src/core/lib/surface/channel.h +4 -3
  137. data/src/core/lib/surface/completion_queue.cc +8 -1
  138. data/src/core/lib/surface/init.cc +1 -0
  139. data/src/core/lib/surface/server.cc +111 -46
  140. data/src/core/lib/surface/server.h +16 -2
  141. data/src/core/lib/surface/version.cc +2 -2
  142. data/src/core/lib/transport/error_utils.cc +4 -2
  143. data/src/core/lib/transport/metadata.cc +3 -2
  144. data/src/core/lib/transport/metadata.h +3 -2
  145. data/src/core/lib/transport/metadata_batch.cc +1 -0
  146. data/src/core/lib/transport/metadata_batch.h +4 -2
  147. data/src/core/lib/transport/static_metadata.cc +225 -221
  148. data/src/core/lib/transport/static_metadata.h +74 -71
  149. data/src/core/lib/transport/transport.h +44 -26
  150. data/src/core/{ext/filters/client_channel → lib/uri}/uri_parser.cc +1 -1
  151. data/src/core/{ext/filters/client_channel → lib/uri}/uri_parser.h +3 -3
  152. data/src/core/plugin_registry/grpc_plugin_registry.cc +4 -4
  153. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +356 -77
  154. data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +46 -36
  155. data/src/core/tsi/alts/handshaker/alts_shared_resource.cc +83 -0
  156. data/src/core/tsi/alts/handshaker/alts_shared_resource.h +73 -0
  157. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +122 -175
  158. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +33 -22
  159. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +38 -10
  160. data/src/core/tsi/transport_security.cc +18 -1
  161. data/src/core/tsi/transport_security.h +2 -1
  162. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +4 -2
  163. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +6 -3
  164. data/src/ruby/lib/grpc/version.rb +1 -1
  165. data/src/ruby/spec/pb/codegen/grpc/testing/package_options.proto +28 -0
  166. data/src/ruby/spec/pb/codegen/package_option_spec.rb +2 -3
  167. metadata +58 -40
  168. data/src/core/ext/filters/client_channel/method_params.cc +0 -178
  169. data/src/core/ext/filters/client_channel/method_params.h +0 -78
  170. data/src/core/tsi/alts/handshaker/alts_tsi_event.cc +0 -75
  171. data/src/core/tsi/alts/handshaker/alts_tsi_event.h +0 -93
  172. data/src/core/tsi/alts_transport_security.cc +0 -65
  173. data/src/core/tsi/alts_transport_security.h +0 -47
@@ -40,9 +40,13 @@ extern grpc_core::TraceFlag grpc_client_channel_trace;
40
40
 
41
41
  extern const grpc_channel_filter grpc_client_channel_filter;
42
42
 
43
+ void grpc_client_channel_set_channelz_node(
44
+ grpc_channel_element* elem, grpc_core::channelz::ClientChannelNode* node);
45
+
43
46
  void grpc_client_channel_populate_child_refs(
44
- grpc_channel_element* elem, grpc_core::ChildRefsList* child_subchannels,
45
- grpc_core::ChildRefsList* child_channels);
47
+ grpc_channel_element* elem,
48
+ grpc_core::channelz::ChildRefsList* child_subchannels,
49
+ grpc_core::channelz::ChildRefsList* child_channels);
46
50
 
47
51
  grpc_connectivity_state grpc_client_channel_check_connectivity_state(
48
52
  grpc_channel_element* elem, int try_to_connect);
@@ -49,6 +49,7 @@ ClientChannelNode::ClientChannelNode(grpc_channel* channel,
49
49
  : ChannelNode(channel, channel_tracer_max_nodes, is_top_level_channel) {
50
50
  client_channel_ =
51
51
  grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
52
+ grpc_client_channel_set_channelz_node(client_channel_, this);
52
53
  GPR_ASSERT(client_channel_->filter == &grpc_client_channel_filter);
53
54
  }
54
55
 
@@ -127,7 +128,8 @@ void SubchannelNode::PopulateConnectivityState(grpc_json* json) {
127
128
  if (subchannel_ == nullptr) {
128
129
  state = GRPC_CHANNEL_SHUTDOWN;
129
130
  } else {
130
- state = grpc_subchannel_check_connectivity(subchannel_, nullptr);
131
+ state = grpc_subchannel_check_connectivity(
132
+ subchannel_, nullptr, true /* inhibit_health_checking */);
131
133
  }
132
134
  json = grpc_json_create_child(nullptr, json, "state", nullptr,
133
135
  GRPC_JSON_OBJECT, false);
@@ -25,17 +25,10 @@
25
25
  #include "src/core/lib/channel/channel_stack.h"
26
26
  #include "src/core/lib/channel/channel_trace.h"
27
27
  #include "src/core/lib/channel/channelz.h"
28
- #include "src/core/lib/gprpp/inlined_vector.h"
29
28
 
30
29
  typedef struct grpc_subchannel grpc_subchannel;
31
30
 
32
31
  namespace grpc_core {
33
-
34
- // TODO(ncteisen), this only contains the uuids of the children for now,
35
- // since that is all that is strictly needed. In a future enhancement we will
36
- // add human readable names as in the channelz.proto
37
- typedef InlinedVector<intptr_t, 10> ChildRefsList;
38
-
39
32
  namespace channelz {
40
33
 
41
34
  // Subtype of ChannelNode that overrides and provides client_channel specific
@@ -0,0 +1,23 @@
1
+ /* Automatically generated nanopb constant definitions */
2
+ /* Generated by nanopb-0.3.7-dev */
3
+
4
+ #include "src/core/ext/filters/client_channel/health/health.pb.h"
5
+ /* @@protoc_insertion_point(includes) */
6
+ #if PB_PROTO_HEADER_VERSION != 30
7
+ #error Regenerate this file with the current version of nanopb generator.
8
+ #endif
9
+
10
+
11
+
12
+ const pb_field_t grpc_health_v1_HealthCheckRequest_fields[2] = {
13
+ PB_FIELD( 1, STRING , OPTIONAL, STATIC , FIRST, grpc_health_v1_HealthCheckRequest, service, service, 0),
14
+ PB_LAST_FIELD
15
+ };
16
+
17
+ const pb_field_t grpc_health_v1_HealthCheckResponse_fields[2] = {
18
+ PB_FIELD( 1, UENUM , OPTIONAL, STATIC , FIRST, grpc_health_v1_HealthCheckResponse, status, status, 0),
19
+ PB_LAST_FIELD
20
+ };
21
+
22
+
23
+ /* @@protoc_insertion_point(eof) */
@@ -0,0 +1,73 @@
1
+ /* Automatically generated nanopb header */
2
+ /* Generated by nanopb-0.3.7-dev */
3
+
4
+ #ifndef PB_GRPC_HEALTH_V1_HEALTH_PB_H_INCLUDED
5
+ #define PB_GRPC_HEALTH_V1_HEALTH_PB_H_INCLUDED
6
+ #include "pb.h"
7
+ /* @@protoc_insertion_point(includes) */
8
+ #if PB_PROTO_HEADER_VERSION != 30
9
+ #error Regenerate this file with the current version of nanopb generator.
10
+ #endif
11
+
12
+ #ifdef __cplusplus
13
+ extern "C" {
14
+ #endif
15
+
16
+ /* Enum definitions */
17
+ typedef enum _grpc_health_v1_HealthCheckResponse_ServingStatus {
18
+ grpc_health_v1_HealthCheckResponse_ServingStatus_UNKNOWN = 0,
19
+ grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING = 1,
20
+ grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING = 2,
21
+ grpc_health_v1_HealthCheckResponse_ServingStatus_SERVICE_UNKNOWN = 3
22
+ } grpc_health_v1_HealthCheckResponse_ServingStatus;
23
+ #define _grpc_health_v1_HealthCheckResponse_ServingStatus_MIN grpc_health_v1_HealthCheckResponse_ServingStatus_UNKNOWN
24
+ #define _grpc_health_v1_HealthCheckResponse_ServingStatus_MAX grpc_health_v1_HealthCheckResponse_ServingStatus_SERVICE_UNKNOWN
25
+ #define _grpc_health_v1_HealthCheckResponse_ServingStatus_ARRAYSIZE ((grpc_health_v1_HealthCheckResponse_ServingStatus)(grpc_health_v1_HealthCheckResponse_ServingStatus_SERVICE_UNKNOWN+1))
26
+
27
+ /* Struct definitions */
28
+ typedef struct _grpc_health_v1_HealthCheckRequest {
29
+ bool has_service;
30
+ char service[200];
31
+ /* @@protoc_insertion_point(struct:grpc_health_v1_HealthCheckRequest) */
32
+ } grpc_health_v1_HealthCheckRequest;
33
+
34
+ typedef struct _grpc_health_v1_HealthCheckResponse {
35
+ bool has_status;
36
+ grpc_health_v1_HealthCheckResponse_ServingStatus status;
37
+ /* @@protoc_insertion_point(struct:grpc_health_v1_HealthCheckResponse) */
38
+ } grpc_health_v1_HealthCheckResponse;
39
+
40
+ /* Default values for struct fields */
41
+
42
+ /* Initializer values for message structs */
43
+ #define grpc_health_v1_HealthCheckRequest_init_default {false, ""}
44
+ #define grpc_health_v1_HealthCheckResponse_init_default {false, (grpc_health_v1_HealthCheckResponse_ServingStatus)0}
45
+ #define grpc_health_v1_HealthCheckRequest_init_zero {false, ""}
46
+ #define grpc_health_v1_HealthCheckResponse_init_zero {false, (grpc_health_v1_HealthCheckResponse_ServingStatus)0}
47
+
48
+ /* Field tags (for use in manual encoding/decoding) */
49
+ #define grpc_health_v1_HealthCheckRequest_service_tag 1
50
+ #define grpc_health_v1_HealthCheckResponse_status_tag 1
51
+
52
+ /* Struct field encoding specification for nanopb */
53
+ extern const pb_field_t grpc_health_v1_HealthCheckRequest_fields[2];
54
+ extern const pb_field_t grpc_health_v1_HealthCheckResponse_fields[2];
55
+
56
+ /* Maximum encoded size of messages (where known) */
57
+ #define grpc_health_v1_HealthCheckRequest_size 203
58
+ #define grpc_health_v1_HealthCheckResponse_size 2
59
+
60
+ /* Message IDs (where set with "msgid" option) */
61
+ #ifdef PB_MSGID
62
+
63
+ #define HEALTH_MESSAGES \
64
+
65
+
66
+ #endif
67
+
68
+ #ifdef __cplusplus
69
+ } /* extern "C" */
70
+ #endif
71
+ /* @@protoc_insertion_point(eof) */
72
+
73
+ #endif
@@ -0,0 +1,652 @@
1
+ /*
2
+ *
3
+ * Copyright 2018 gRPC authors.
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ *
17
+ */
18
+
19
+ #include <grpc/support/port_platform.h>
20
+
21
+ #include <stdint.h>
22
+ #include <stdio.h>
23
+
24
+ #include "src/core/ext/filters/client_channel/health/health_check_client.h"
25
+
26
+ #include "pb_decode.h"
27
+ #include "pb_encode.h"
28
+ #include "src/core/ext/filters/client_channel/health/health.pb.h"
29
+ #include "src/core/lib/debug/trace.h"
30
+ #include "src/core/lib/gprpp/mutex_lock.h"
31
+ #include "src/core/lib/slice/slice_internal.h"
32
+ #include "src/core/lib/transport/error_utils.h"
33
+ #include "src/core/lib/transport/status_metadata.h"
34
+
35
+ #define HEALTH_CHECK_INITIAL_CONNECT_BACKOFF_SECONDS 1
36
+ #define HEALTH_CHECK_RECONNECT_BACKOFF_MULTIPLIER 1.6
37
+ #define HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS 120
38
+ #define HEALTH_CHECK_RECONNECT_JITTER 0.2
39
+
40
+ grpc_core::TraceFlag grpc_health_check_client_trace(false,
41
+ "health_check_client");
42
+
43
+ namespace grpc_core {
44
+
45
+ //
46
+ // HealthCheckClient
47
+ //
48
+
49
+ HealthCheckClient::HealthCheckClient(
50
+ const char* service_name,
51
+ RefCountedPtr<ConnectedSubchannel> connected_subchannel,
52
+ grpc_pollset_set* interested_parties,
53
+ grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode> channelz_node)
54
+ : InternallyRefCountedWithTracing<HealthCheckClient>(
55
+ &grpc_health_check_client_trace),
56
+ service_name_(service_name),
57
+ connected_subchannel_(std::move(connected_subchannel)),
58
+ interested_parties_(interested_parties),
59
+ channelz_node_(std::move(channelz_node)),
60
+ retry_backoff_(
61
+ BackOff::Options()
62
+ .set_initial_backoff(
63
+ HEALTH_CHECK_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
64
+ .set_multiplier(HEALTH_CHECK_RECONNECT_BACKOFF_MULTIPLIER)
65
+ .set_jitter(HEALTH_CHECK_RECONNECT_JITTER)
66
+ .set_max_backoff(HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS *
67
+ 1000)) {
68
+ if (grpc_health_check_client_trace.enabled()) {
69
+ gpr_log(GPR_INFO, "created HealthCheckClient %p", this);
70
+ }
71
+ GRPC_CLOSURE_INIT(&retry_timer_callback_, OnRetryTimer, this,
72
+ grpc_schedule_on_exec_ctx);
73
+ gpr_mu_init(&mu_);
74
+ StartCall();
75
+ }
76
+
77
+ HealthCheckClient::~HealthCheckClient() {
78
+ if (grpc_health_check_client_trace.enabled()) {
79
+ gpr_log(GPR_INFO, "destroying HealthCheckClient %p", this);
80
+ }
81
+ GRPC_ERROR_UNREF(error_);
82
+ gpr_mu_destroy(&mu_);
83
+ }
84
+
85
+ void HealthCheckClient::NotifyOnHealthChange(grpc_connectivity_state* state,
86
+ grpc_closure* closure) {
87
+ MutexLock lock(&mu_);
88
+ GPR_ASSERT(notify_state_ == nullptr);
89
+ if (*state != state_) {
90
+ *state = state_;
91
+ GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_REF(error_));
92
+ return;
93
+ }
94
+ notify_state_ = state;
95
+ on_health_changed_ = closure;
96
+ }
97
+
98
+ void HealthCheckClient::SetHealthStatus(grpc_connectivity_state state,
99
+ grpc_error* error) {
100
+ MutexLock lock(&mu_);
101
+ SetHealthStatusLocked(state, error);
102
+ }
103
+
104
+ void HealthCheckClient::SetHealthStatusLocked(grpc_connectivity_state state,
105
+ grpc_error* error) {
106
+ if (grpc_health_check_client_trace.enabled()) {
107
+ gpr_log(GPR_INFO, "HealthCheckClient %p: setting state=%d error=%s", this,
108
+ state, grpc_error_string(error));
109
+ }
110
+ if (notify_state_ != nullptr && *notify_state_ != state) {
111
+ *notify_state_ = state;
112
+ notify_state_ = nullptr;
113
+ GRPC_CLOSURE_SCHED(on_health_changed_, GRPC_ERROR_REF(error));
114
+ on_health_changed_ = nullptr;
115
+ }
116
+ state_ = state;
117
+ GRPC_ERROR_UNREF(error_);
118
+ error_ = error;
119
+ }
120
+
121
+ void HealthCheckClient::Orphan() {
122
+ if (grpc_health_check_client_trace.enabled()) {
123
+ gpr_log(GPR_INFO, "HealthCheckClient %p: shutting down", this);
124
+ }
125
+ {
126
+ MutexLock lock(&mu_);
127
+ if (on_health_changed_ != nullptr) {
128
+ *notify_state_ = GRPC_CHANNEL_SHUTDOWN;
129
+ notify_state_ = nullptr;
130
+ GRPC_CLOSURE_SCHED(on_health_changed_, GRPC_ERROR_NONE);
131
+ on_health_changed_ = nullptr;
132
+ }
133
+ shutting_down_ = true;
134
+ call_state_.reset();
135
+ if (retry_timer_callback_pending_) {
136
+ grpc_timer_cancel(&retry_timer_);
137
+ }
138
+ }
139
+ Unref(DEBUG_LOCATION, "orphan");
140
+ }
141
+
142
+ void HealthCheckClient::StartCall() {
143
+ MutexLock lock(&mu_);
144
+ StartCallLocked();
145
+ }
146
+
147
+ void HealthCheckClient::StartCallLocked() {
148
+ if (shutting_down_) return;
149
+ GPR_ASSERT(call_state_ == nullptr);
150
+ SetHealthStatusLocked(GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE);
151
+ call_state_ = MakeOrphanable<CallState>(Ref(), interested_parties_);
152
+ if (grpc_health_check_client_trace.enabled()) {
153
+ gpr_log(GPR_INFO, "HealthCheckClient %p: created CallState %p", this,
154
+ call_state_.get());
155
+ }
156
+ call_state_->StartCall();
157
+ }
158
+
159
+ void HealthCheckClient::StartRetryTimer() {
160
+ MutexLock lock(&mu_);
161
+ SetHealthStatusLocked(
162
+ GRPC_CHANNEL_TRANSIENT_FAILURE,
163
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING(
164
+ "health check call failed; will retry after backoff"));
165
+ grpc_millis next_try = retry_backoff_.NextAttemptTime();
166
+ if (grpc_health_check_client_trace.enabled()) {
167
+ gpr_log(GPR_INFO, "HealthCheckClient %p: health check call lost...", this);
168
+ grpc_millis timeout = next_try - ExecCtx::Get()->Now();
169
+ if (timeout > 0) {
170
+ gpr_log(GPR_INFO,
171
+ "HealthCheckClient %p: ... will retry in %" PRId64 "ms.", this,
172
+ timeout);
173
+ } else {
174
+ gpr_log(GPR_INFO, "HealthCheckClient %p: ... retrying immediately.",
175
+ this);
176
+ }
177
+ }
178
+ // Ref for callback, tracked manually.
179
+ Ref(DEBUG_LOCATION, "health_retry_timer").release();
180
+ retry_timer_callback_pending_ = true;
181
+ grpc_timer_init(&retry_timer_, next_try, &retry_timer_callback_);
182
+ }
183
+
184
+ void HealthCheckClient::OnRetryTimer(void* arg, grpc_error* error) {
185
+ HealthCheckClient* self = static_cast<HealthCheckClient*>(arg);
186
+ {
187
+ MutexLock lock(&self->mu_);
188
+ self->retry_timer_callback_pending_ = false;
189
+ if (!self->shutting_down_ && error == GRPC_ERROR_NONE &&
190
+ self->call_state_ == nullptr) {
191
+ if (grpc_health_check_client_trace.enabled()) {
192
+ gpr_log(GPR_INFO, "HealthCheckClient %p: restarting health check call",
193
+ self);
194
+ }
195
+ self->StartCallLocked();
196
+ }
197
+ }
198
+ self->Unref(DEBUG_LOCATION, "health_retry_timer");
199
+ }
200
+
201
+ //
202
+ // protobuf helpers
203
+ //
204
+
205
+ namespace {
206
+
207
+ void EncodeRequest(const char* service_name,
208
+ ManualConstructor<SliceBufferByteStream>* send_message) {
209
+ grpc_health_v1_HealthCheckRequest request_struct;
210
+ request_struct.has_service = true;
211
+ snprintf(request_struct.service, sizeof(request_struct.service), "%s",
212
+ service_name);
213
+ pb_ostream_t ostream;
214
+ memset(&ostream, 0, sizeof(ostream));
215
+ pb_encode(&ostream, grpc_health_v1_HealthCheckRequest_fields,
216
+ &request_struct);
217
+ grpc_slice request_slice = GRPC_SLICE_MALLOC(ostream.bytes_written);
218
+ ostream = pb_ostream_from_buffer(GRPC_SLICE_START_PTR(request_slice),
219
+ GRPC_SLICE_LENGTH(request_slice));
220
+ GPR_ASSERT(pb_encode(&ostream, grpc_health_v1_HealthCheckRequest_fields,
221
+ &request_struct) != 0);
222
+ grpc_slice_buffer slice_buffer;
223
+ grpc_slice_buffer_init(&slice_buffer);
224
+ grpc_slice_buffer_add(&slice_buffer, request_slice);
225
+ send_message->Init(&slice_buffer, 0);
226
+ grpc_slice_buffer_destroy_internal(&slice_buffer);
227
+ }
228
+
229
+ // Returns true if healthy.
230
+ // If there was an error parsing the response, sets *error and returns false.
231
+ bool DecodeResponse(grpc_slice_buffer* slice_buffer, grpc_error** error) {
232
+ // If message is empty, assume unhealthy.
233
+ if (slice_buffer->length == 0) {
234
+ *error =
235
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("health check response was empty");
236
+ return false;
237
+ }
238
+ // Concatenate the slices to form a single string.
239
+ UniquePtr<uint8_t> recv_message_deleter;
240
+ uint8_t* recv_message;
241
+ if (slice_buffer->count == 1) {
242
+ recv_message = GRPC_SLICE_START_PTR(slice_buffer->slices[0]);
243
+ } else {
244
+ recv_message = static_cast<uint8_t*>(gpr_malloc(slice_buffer->length));
245
+ recv_message_deleter.reset(recv_message);
246
+ size_t offset = 0;
247
+ for (size_t i = 0; i < slice_buffer->count; ++i) {
248
+ memcpy(recv_message + offset,
249
+ GRPC_SLICE_START_PTR(slice_buffer->slices[i]),
250
+ GRPC_SLICE_LENGTH(slice_buffer->slices[i]));
251
+ offset += GRPC_SLICE_LENGTH(slice_buffer->slices[i]);
252
+ }
253
+ }
254
+ // Deserialize message.
255
+ grpc_health_v1_HealthCheckResponse response_struct;
256
+ pb_istream_t istream =
257
+ pb_istream_from_buffer(recv_message, slice_buffer->length);
258
+ if (!pb_decode(&istream, grpc_health_v1_HealthCheckResponse_fields,
259
+ &response_struct)) {
260
+ // Can't parse message; assume unhealthy.
261
+ *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
262
+ "cannot parse health check response");
263
+ return false;
264
+ }
265
+ if (!response_struct.has_status) {
266
+ // Field not present; assume unhealthy.
267
+ *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
268
+ "status field not present in health check response");
269
+ return false;
270
+ }
271
+ return response_struct.status ==
272
+ grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING;
273
+ }
274
+
275
+ } // namespace
276
+
277
+ //
278
+ // HealthCheckClient::CallState
279
+ //
280
+
281
+ HealthCheckClient::CallState::CallState(
282
+ RefCountedPtr<HealthCheckClient> health_check_client,
283
+ grpc_pollset_set* interested_parties)
284
+ : InternallyRefCountedWithTracing<CallState>(
285
+ &grpc_health_check_client_trace),
286
+ health_check_client_(std::move(health_check_client)),
287
+ pollent_(grpc_polling_entity_create_from_pollset_set(interested_parties)),
288
+ arena_(gpr_arena_create(health_check_client_->connected_subchannel_
289
+ ->GetInitialCallSizeEstimate(0))),
290
+ payload_(context_) {
291
+ grpc_call_combiner_init(&call_combiner_);
292
+ gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(0));
293
+ }
294
+
295
+ HealthCheckClient::CallState::~CallState() {
296
+ if (grpc_health_check_client_trace.enabled()) {
297
+ gpr_log(GPR_INFO, "HealthCheckClient %p: destroying CallState %p",
298
+ health_check_client_.get(), this);
299
+ }
300
+ if (call_ != nullptr) GRPC_SUBCHANNEL_CALL_UNREF(call_, "call_ended");
301
+ for (size_t i = 0; i < GRPC_CONTEXT_COUNT; i++) {
302
+ if (context_[i].destroy != nullptr) {
303
+ context_[i].destroy(context_[i].value);
304
+ }
305
+ }
306
+ // Unset the call combiner cancellation closure. This has the
307
+ // effect of scheduling the previously set cancellation closure, if
308
+ // any, so that it can release any internal references it may be
309
+ // holding to the call stack. Also flush the closures on exec_ctx so that
310
+ // filters that schedule cancel notification closures on exec_ctx do not
311
+ // need to take a ref of the call stack to guarantee closure liveness.
312
+ grpc_call_combiner_set_notify_on_cancel(&call_combiner_, nullptr);
313
+ grpc_core::ExecCtx::Get()->Flush();
314
+ grpc_call_combiner_destroy(&call_combiner_);
315
+ gpr_arena_destroy(arena_);
316
+ }
317
+
318
+ void HealthCheckClient::CallState::Orphan() {
319
+ grpc_call_combiner_cancel(&call_combiner_, GRPC_ERROR_CANCELLED);
320
+ Cancel();
321
+ }
322
+
323
+ void HealthCheckClient::CallState::StartCall() {
324
+ ConnectedSubchannel::CallArgs args = {
325
+ &pollent_,
326
+ GRPC_MDSTR_SLASH_GRPC_DOT_HEALTH_DOT_V1_DOT_HEALTH_SLASH_WATCH,
327
+ gpr_now(GPR_CLOCK_MONOTONIC), // start_time
328
+ GRPC_MILLIS_INF_FUTURE, // deadline
329
+ arena_,
330
+ context_,
331
+ &call_combiner_,
332
+ 0, // parent_data_size
333
+ };
334
+ grpc_error* error =
335
+ health_check_client_->connected_subchannel_->CreateCall(args, &call_);
336
+ if (error != GRPC_ERROR_NONE) {
337
+ gpr_log(GPR_ERROR,
338
+ "HealthCheckClient %p CallState %p: error creating health "
339
+ "checking call on subchannel (%s); will retry",
340
+ health_check_client_.get(), this, grpc_error_string(error));
341
+ GRPC_ERROR_UNREF(error);
342
+ // Schedule instead of running directly, since we must not be
343
+ // holding health_check_client_->mu_ when CallEnded() is called.
344
+ Ref(DEBUG_LOCATION, "call_end_closure").release();
345
+ GRPC_CLOSURE_SCHED(
346
+ GRPC_CLOSURE_INIT(&batch_.handler_private.closure, CallEndedRetry, this,
347
+ grpc_schedule_on_exec_ctx),
348
+ GRPC_ERROR_NONE);
349
+ return;
350
+ }
351
+ // Initialize payload and batch.
352
+ memset(&batch_, 0, sizeof(batch_));
353
+ payload_.context = context_;
354
+ batch_.payload = &payload_;
355
+ // on_complete callback takes ref, handled manually.
356
+ Ref(DEBUG_LOCATION, "on_complete").release();
357
+ batch_.on_complete = GRPC_CLOSURE_INIT(&on_complete_, OnComplete, this,
358
+ grpc_schedule_on_exec_ctx);
359
+ // Add send_initial_metadata op.
360
+ grpc_metadata_batch_init(&send_initial_metadata_);
361
+ error = grpc_metadata_batch_add_head(
362
+ &send_initial_metadata_, &path_metadata_storage_,
363
+ grpc_mdelem_from_slices(
364
+ GRPC_MDSTR_PATH,
365
+ GRPC_MDSTR_SLASH_GRPC_DOT_HEALTH_DOT_V1_DOT_HEALTH_SLASH_WATCH));
366
+ GPR_ASSERT(error == GRPC_ERROR_NONE);
367
+ payload_.send_initial_metadata.send_initial_metadata =
368
+ &send_initial_metadata_;
369
+ payload_.send_initial_metadata.send_initial_metadata_flags = 0;
370
+ payload_.send_initial_metadata.peer_string = nullptr;
371
+ batch_.send_initial_metadata = true;
372
+ // Add send_message op.
373
+ EncodeRequest(health_check_client_->service_name_, &send_message_);
374
+ payload_.send_message.send_message.reset(send_message_.get());
375
+ batch_.send_message = true;
376
+ // Add send_trailing_metadata op.
377
+ grpc_metadata_batch_init(&send_trailing_metadata_);
378
+ payload_.send_trailing_metadata.send_trailing_metadata =
379
+ &send_trailing_metadata_;
380
+ batch_.send_trailing_metadata = true;
381
+ // Add recv_initial_metadata op.
382
+ grpc_metadata_batch_init(&recv_initial_metadata_);
383
+ payload_.recv_initial_metadata.recv_initial_metadata =
384
+ &recv_initial_metadata_;
385
+ payload_.recv_initial_metadata.recv_flags = nullptr;
386
+ payload_.recv_initial_metadata.trailing_metadata_available = nullptr;
387
+ payload_.recv_initial_metadata.peer_string = nullptr;
388
+ // recv_initial_metadata_ready callback takes ref, handled manually.
389
+ Ref(DEBUG_LOCATION, "recv_initial_metadata_ready").release();
390
+ payload_.recv_initial_metadata.recv_initial_metadata_ready =
391
+ GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_, RecvInitialMetadataReady,
392
+ this, grpc_schedule_on_exec_ctx);
393
+ batch_.recv_initial_metadata = true;
394
+ // Add recv_message op.
395
+ payload_.recv_message.recv_message = &recv_message_;
396
+ // recv_message callback takes ref, handled manually.
397
+ Ref(DEBUG_LOCATION, "recv_message_ready").release();
398
+ payload_.recv_message.recv_message_ready = GRPC_CLOSURE_INIT(
399
+ &recv_message_ready_, RecvMessageReady, this, grpc_schedule_on_exec_ctx);
400
+ batch_.recv_message = true;
401
+ // Start batch.
402
+ StartBatch(&batch_);
403
+ // Initialize recv_trailing_metadata batch.
404
+ memset(&recv_trailing_metadata_batch_, 0,
405
+ sizeof(recv_trailing_metadata_batch_));
406
+ recv_trailing_metadata_batch_.payload = &payload_;
407
+ // Add recv_trailing_metadata op.
408
+ grpc_metadata_batch_init(&recv_trailing_metadata_);
409
+ payload_.recv_trailing_metadata.recv_trailing_metadata =
410
+ &recv_trailing_metadata_;
411
+ payload_.recv_trailing_metadata.collect_stats = &collect_stats_;
412
+ // This callback signals the end of the call, so it relies on the
413
+ // initial ref instead of taking a new ref. When it's invoked, the
414
+ // initial ref is released.
415
+ payload_.recv_trailing_metadata.recv_trailing_metadata_ready =
416
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
417
+ RecvTrailingMetadataReady, this,
418
+ grpc_schedule_on_exec_ctx);
419
+ recv_trailing_metadata_batch_.recv_trailing_metadata = true;
420
+ // Start recv_trailing_metadata batch.
421
+ StartBatch(&recv_trailing_metadata_batch_);
422
+ }
423
+
424
+ void HealthCheckClient::CallState::StartBatchInCallCombiner(void* arg,
425
+ grpc_error* error) {
426
+ grpc_transport_stream_op_batch* batch =
427
+ static_cast<grpc_transport_stream_op_batch*>(arg);
428
+ grpc_subchannel_call* call =
429
+ static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
430
+ grpc_subchannel_call_process_op(call, batch);
431
+ }
432
+
433
+ void HealthCheckClient::CallState::StartBatch(
434
+ grpc_transport_stream_op_batch* batch) {
435
+ batch->handler_private.extra_arg = call_;
436
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
437
+ batch, grpc_schedule_on_exec_ctx);
438
+ GRPC_CALL_COMBINER_START(&call_combiner_, &batch->handler_private.closure,
439
+ GRPC_ERROR_NONE, "start_subchannel_batch");
440
+ }
441
+
442
+ void HealthCheckClient::CallState::OnCancelComplete(void* arg,
443
+ grpc_error* error) {
444
+ HealthCheckClient::CallState* self =
445
+ static_cast<HealthCheckClient::CallState*>(arg);
446
+ GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "health_cancel");
447
+ self->Unref(DEBUG_LOCATION, "cancel");
448
+ }
449
+
450
+ void HealthCheckClient::CallState::StartCancel(void* arg, grpc_error* error) {
451
+ HealthCheckClient::CallState* self =
452
+ static_cast<HealthCheckClient::CallState*>(arg);
453
+ auto* batch = grpc_make_transport_stream_op(
454
+ GRPC_CLOSURE_CREATE(OnCancelComplete, self, grpc_schedule_on_exec_ctx));
455
+ batch->cancel_stream = true;
456
+ batch->payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
457
+ grpc_subchannel_call_process_op(self->call_, batch);
458
+ }
459
+
460
+ void HealthCheckClient::CallState::Cancel() {
461
+ if (call_ != nullptr) {
462
+ Ref(DEBUG_LOCATION, "cancel").release();
463
+ GRPC_CALL_COMBINER_START(
464
+ &call_combiner_,
465
+ GRPC_CLOSURE_CREATE(StartCancel, this, grpc_schedule_on_exec_ctx),
466
+ GRPC_ERROR_NONE, "health_cancel");
467
+ }
468
+ }
469
+
470
+ void HealthCheckClient::CallState::OnComplete(void* arg, grpc_error* error) {
471
+ HealthCheckClient::CallState* self =
472
+ static_cast<HealthCheckClient::CallState*>(arg);
473
+ GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "on_complete");
474
+ grpc_metadata_batch_destroy(&self->send_initial_metadata_);
475
+ grpc_metadata_batch_destroy(&self->send_trailing_metadata_);
476
+ self->Unref(DEBUG_LOCATION, "on_complete");
477
+ }
478
+
479
+ void HealthCheckClient::CallState::RecvInitialMetadataReady(void* arg,
480
+ grpc_error* error) {
481
+ HealthCheckClient::CallState* self =
482
+ static_cast<HealthCheckClient::CallState*>(arg);
483
+ GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "recv_initial_metadata_ready");
484
+ grpc_metadata_batch_destroy(&self->recv_initial_metadata_);
485
+ self->Unref(DEBUG_LOCATION, "recv_initial_metadata_ready");
486
+ }
487
+
488
+ void HealthCheckClient::CallState::DoneReadingRecvMessage(grpc_error* error) {
489
+ recv_message_.reset();
490
+ if (error != GRPC_ERROR_NONE) {
491
+ GRPC_ERROR_UNREF(error);
492
+ Cancel();
493
+ grpc_slice_buffer_destroy_internal(&recv_message_buffer_);
494
+ Unref(DEBUG_LOCATION, "recv_message_ready");
495
+ return;
496
+ }
497
+ const bool healthy = DecodeResponse(&recv_message_buffer_, &error);
498
+ const grpc_connectivity_state state =
499
+ healthy ? GRPC_CHANNEL_READY : GRPC_CHANNEL_TRANSIENT_FAILURE;
500
+ if (error == GRPC_ERROR_NONE && !healthy) {
501
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("backend unhealthy");
502
+ }
503
+ health_check_client_->SetHealthStatus(state, error);
504
+ gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(1));
505
+ grpc_slice_buffer_destroy_internal(&recv_message_buffer_);
506
+ // Start another recv_message batch.
507
+ // This re-uses the ref we're holding.
508
+ // Note: Can't just reuse batch_ here, since we don't know that all
509
+ // callbacks from the original batch have completed yet.
510
+ memset(&recv_message_batch_, 0, sizeof(recv_message_batch_));
511
+ recv_message_batch_.payload = &payload_;
512
+ payload_.recv_message.recv_message = &recv_message_;
513
+ payload_.recv_message.recv_message_ready = GRPC_CLOSURE_INIT(
514
+ &recv_message_ready_, RecvMessageReady, this, grpc_schedule_on_exec_ctx);
515
+ recv_message_batch_.recv_message = true;
516
+ StartBatch(&recv_message_batch_);
517
+ }
518
+
519
+ grpc_error* HealthCheckClient::CallState::PullSliceFromRecvMessage() {
520
+ grpc_slice slice;
521
+ grpc_error* error = recv_message_->Pull(&slice);
522
+ if (error == GRPC_ERROR_NONE) {
523
+ grpc_slice_buffer_add(&recv_message_buffer_, slice);
524
+ }
525
+ return error;
526
+ }
527
+
528
+ void HealthCheckClient::CallState::ContinueReadingRecvMessage() {
529
+ while (recv_message_->Next(SIZE_MAX, &recv_message_ready_)) {
530
+ grpc_error* error = PullSliceFromRecvMessage();
531
+ if (error != GRPC_ERROR_NONE) {
532
+ DoneReadingRecvMessage(error);
533
+ return;
534
+ }
535
+ if (recv_message_buffer_.length == recv_message_->length()) {
536
+ DoneReadingRecvMessage(GRPC_ERROR_NONE);
537
+ break;
538
+ }
539
+ }
540
+ }
541
+
542
+ void HealthCheckClient::CallState::OnByteStreamNext(void* arg,
543
+ grpc_error* error) {
544
+ HealthCheckClient::CallState* self =
545
+ static_cast<HealthCheckClient::CallState*>(arg);
546
+ if (error != GRPC_ERROR_NONE) {
547
+ self->DoneReadingRecvMessage(GRPC_ERROR_REF(error));
548
+ return;
549
+ }
550
+ error = self->PullSliceFromRecvMessage();
551
+ if (error != GRPC_ERROR_NONE) {
552
+ self->DoneReadingRecvMessage(error);
553
+ return;
554
+ }
555
+ if (self->recv_message_buffer_.length == self->recv_message_->length()) {
556
+ self->DoneReadingRecvMessage(GRPC_ERROR_NONE);
557
+ } else {
558
+ self->ContinueReadingRecvMessage();
559
+ }
560
+ }
561
+
562
+ void HealthCheckClient::CallState::RecvMessageReady(void* arg,
563
+ grpc_error* error) {
564
+ HealthCheckClient::CallState* self =
565
+ static_cast<HealthCheckClient::CallState*>(arg);
566
+ GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "recv_message_ready");
567
+ if (self->recv_message_ == nullptr) {
568
+ self->Unref(DEBUG_LOCATION, "recv_message_ready");
569
+ return;
570
+ }
571
+ grpc_slice_buffer_init(&self->recv_message_buffer_);
572
+ GRPC_CLOSURE_INIT(&self->recv_message_ready_, OnByteStreamNext, self,
573
+ grpc_schedule_on_exec_ctx);
574
+ self->ContinueReadingRecvMessage();
575
+ // Ref will continue to be held until we finish draining the byte stream.
576
+ }
577
+
578
+ void HealthCheckClient::CallState::RecvTrailingMetadataReady(
579
+ void* arg, grpc_error* error) {
580
+ HealthCheckClient::CallState* self =
581
+ static_cast<HealthCheckClient::CallState*>(arg);
582
+ GRPC_CALL_COMBINER_STOP(&self->call_combiner_,
583
+ "recv_trailing_metadata_ready");
584
+ // Get call status.
585
+ grpc_status_code status = GRPC_STATUS_UNKNOWN;
586
+ if (error != GRPC_ERROR_NONE) {
587
+ grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, &status,
588
+ nullptr /* slice */, nullptr /* http_error */,
589
+ nullptr /* error_string */);
590
+ } else if (self->recv_trailing_metadata_.idx.named.grpc_status != nullptr) {
591
+ status = grpc_get_status_code_from_metadata(
592
+ self->recv_trailing_metadata_.idx.named.grpc_status->md);
593
+ }
594
+ if (grpc_health_check_client_trace.enabled()) {
595
+ gpr_log(GPR_INFO,
596
+ "HealthCheckClient %p CallState %p: health watch failed with "
597
+ "status %d",
598
+ self->health_check_client_.get(), self, status);
599
+ }
600
+ // Clean up.
601
+ grpc_metadata_batch_destroy(&self->recv_trailing_metadata_);
602
+ // For status UNIMPLEMENTED, give up and assume always healthy.
603
+ bool retry = true;
604
+ if (status == GRPC_STATUS_UNIMPLEMENTED) {
605
+ static const char kErrorMessage[] =
606
+ "health checking Watch method returned UNIMPLEMENTED; "
607
+ "disabling health checks but assuming server is healthy";
608
+ gpr_log(GPR_ERROR, kErrorMessage);
609
+ if (self->health_check_client_->channelz_node_ != nullptr) {
610
+ self->health_check_client_->channelz_node_->AddTraceEvent(
611
+ channelz::ChannelTrace::Error,
612
+ grpc_slice_from_static_string(kErrorMessage));
613
+ }
614
+ self->health_check_client_->SetHealthStatus(GRPC_CHANNEL_READY,
615
+ GRPC_ERROR_NONE);
616
+ retry = false;
617
+ }
618
+ self->CallEnded(retry);
619
+ }
620
+
621
+ void HealthCheckClient::CallState::CallEndedRetry(void* arg,
622
+ grpc_error* error) {
623
+ HealthCheckClient::CallState* self =
624
+ static_cast<HealthCheckClient::CallState*>(arg);
625
+ self->CallEnded(true /* retry */);
626
+ self->Unref(DEBUG_LOCATION, "call_end_closure");
627
+ }
628
+
629
+ void HealthCheckClient::CallState::CallEnded(bool retry) {
630
+ // If this CallState is still in use, this call ended because of a failure,
631
+ // so we need to stop using it and optionally create a new one.
632
+ // Otherwise, we have deliberately ended this call, and no further action
633
+ // is required.
634
+ if (this == health_check_client_->call_state_.get()) {
635
+ health_check_client_->call_state_.reset();
636
+ if (retry) {
637
+ GPR_ASSERT(!health_check_client_->shutting_down_);
638
+ if (static_cast<bool>(gpr_atm_acq_load(&seen_response_))) {
639
+ // If the call fails after we've gotten a successful response, reset
640
+ // the backoff and restart the call immediately.
641
+ health_check_client_->retry_backoff_.Reset();
642
+ health_check_client_->StartCall();
643
+ } else {
644
+ // If the call failed without receiving any messages, retry later.
645
+ health_check_client_->StartRetryTimer();
646
+ }
647
+ }
648
+ }
649
+ Unref(DEBUG_LOCATION, "call_ended");
650
+ }
651
+
652
+ } // namespace grpc_core