grpc 1.36.0 → 1.37.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (221) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +65 -37
  3. data/include/grpc/grpc.h +15 -1
  4. data/include/grpc/impl/codegen/port_platform.h +2 -0
  5. data/src/core/ext/filters/client_channel/client_channel.cc +327 -305
  6. data/src/core/ext/filters/client_channel/client_channel_factory.h +2 -1
  7. data/src/core/ext/filters/client_channel/config_selector.h +8 -0
  8. data/src/core/ext/filters/client_channel/dynamic_filters.cc +9 -4
  9. data/src/core/ext/filters/client_channel/global_subchannel_pool.cc +24 -142
  10. data/src/core/ext/filters/client_channel/global_subchannel_pool.h +15 -10
  11. data/src/core/ext/filters/client_channel/lb_policy.cc +3 -0
  12. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +23 -0
  13. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.h +27 -0
  14. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +7 -22
  15. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +1 -1
  16. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +2 -2
  17. data/src/core/ext/filters/client_channel/local_subchannel_pool.cc +27 -67
  18. data/src/core/ext/filters/client_channel/local_subchannel_pool.h +10 -9
  19. data/src/core/ext/filters/client_channel/resolver.cc +3 -0
  20. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +2 -2
  21. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +3 -1
  22. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +5 -9
  23. data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +18 -3
  24. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +295 -91
  25. data/src/core/ext/filters/client_channel/server_address.cc +3 -0
  26. data/src/core/ext/filters/client_channel/subchannel.cc +69 -146
  27. data/src/core/ext/filters/client_channel/subchannel.h +63 -95
  28. data/src/core/ext/filters/client_channel/subchannel_pool_interface.cc +16 -2
  29. data/src/core/ext/filters/client_channel/subchannel_pool_interface.h +10 -8
  30. data/src/core/ext/filters/client_idle/client_idle_filter.cc +1 -1
  31. data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +495 -0
  32. data/src/core/ext/filters/fault_injection/fault_injection_filter.h +39 -0
  33. data/src/core/ext/filters/fault_injection/service_config_parser.cc +189 -0
  34. data/src/core/ext/filters/fault_injection/service_config_parser.h +85 -0
  35. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc +1 -1
  36. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +1 -1
  37. data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +3 -2
  38. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +1 -1
  39. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +3 -2
  40. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +457 -170
  41. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +1 -1
  42. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +39 -7
  43. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +12 -1
  44. data/src/core/ext/transport/chttp2/transport/frame_data.cc +5 -1
  45. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +1 -1
  46. data/src/core/ext/transport/chttp2/transport/internal.h +1 -0
  47. data/src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c +406 -0
  48. data/src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.h +1459 -0
  49. data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c +350 -0
  50. data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.h +1348 -0
  51. data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c +6 -0
  52. data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.h +25 -0
  53. data/src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c +144 -0
  54. data/src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.h +488 -0
  55. data/src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c +141 -0
  56. data/src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.h +452 -0
  57. data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c +15 -0
  58. data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.h +44 -0
  59. data/src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c +79 -0
  60. data/src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.h +268 -0
  61. data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c +78 -0
  62. data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.h +281 -0
  63. data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c +41 -0
  64. data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.h +113 -0
  65. data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c +6 -5
  66. data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +13 -9
  67. data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c +93 -0
  68. data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.h +323 -0
  69. data/src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c +36 -0
  70. data/src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.h +90 -0
  71. data/src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c +46 -0
  72. data/src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.h +124 -0
  73. data/src/core/ext/upb-generated/udpa/type/v1/typed_struct.upb.c +33 -0
  74. data/src/core/ext/upb-generated/udpa/type/v1/typed_struct.upb.h +77 -0
  75. data/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c +354 -0
  76. data/src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.h +140 -0
  77. data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c +383 -0
  78. data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.h +115 -0
  79. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c +10 -7
  80. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.h +5 -0
  81. data/src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c +141 -0
  82. data/src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.h +70 -0
  83. data/src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c +141 -0
  84. data/src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.h +70 -0
  85. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c +13 -7
  86. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.h +5 -0
  87. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c +102 -0
  88. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.h +55 -0
  89. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c +120 -0
  90. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.h +45 -0
  91. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c +76 -0
  92. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.h +35 -0
  93. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +21 -20
  94. data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c +130 -0
  95. data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.h +50 -0
  96. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c +56 -0
  97. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.h +35 -0
  98. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c +63 -0
  99. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.h +40 -0
  100. data/src/core/ext/upbdefs-generated/udpa/type/v1/typed_struct.upbdefs.c +44 -0
  101. data/src/core/ext/upbdefs-generated/udpa/type/v1/typed_struct.upbdefs.h +35 -0
  102. data/src/core/ext/xds/xds_api.cc +1591 -279
  103. data/src/core/ext/xds/xds_api.h +279 -39
  104. data/src/core/ext/xds/xds_bootstrap.cc +21 -5
  105. data/src/core/ext/xds/xds_bootstrap.h +5 -1
  106. data/src/core/ext/xds/xds_client.cc +168 -23
  107. data/src/core/ext/xds/xds_client.h +26 -0
  108. data/src/core/ext/xds/xds_client_stats.h +2 -2
  109. data/src/core/ext/xds/xds_http_fault_filter.cc +226 -0
  110. data/src/core/ext/xds/xds_http_fault_filter.h +63 -0
  111. data/src/core/ext/xds/xds_http_filters.cc +114 -0
  112. data/src/core/ext/xds/xds_http_filters.h +130 -0
  113. data/src/core/ext/xds/xds_server_config_fetcher.cc +391 -126
  114. data/src/core/lib/channel/channel_stack.cc +12 -0
  115. data/src/core/lib/channel/channel_stack.h +7 -0
  116. data/src/core/lib/channel/channelz.cc +92 -4
  117. data/src/core/lib/channel/channelz.h +30 -1
  118. data/src/core/lib/channel/channelz_registry.cc +14 -0
  119. data/src/core/lib/channel/handshaker.cc +0 -39
  120. data/src/core/lib/channel/handshaker.h +0 -17
  121. data/src/core/lib/channel/status_util.cc +12 -2
  122. data/src/core/lib/channel/status_util.h +5 -0
  123. data/src/core/lib/gpr/sync_abseil.cc +3 -6
  124. data/src/core/lib/gpr/sync_windows.cc +2 -2
  125. data/src/core/lib/gprpp/atomic.h +3 -3
  126. data/src/core/lib/gprpp/dual_ref_counted.h +3 -3
  127. data/src/core/lib/gprpp/ref_counted_ptr.h +2 -0
  128. data/src/core/lib/gprpp/thd.h +1 -1
  129. data/src/core/lib/iomgr/buffer_list.h +1 -1
  130. data/src/core/lib/iomgr/cfstream_handle.cc +2 -2
  131. data/src/core/lib/iomgr/error.h +1 -1
  132. data/src/core/lib/iomgr/ev_apple.cc +1 -1
  133. data/src/core/lib/iomgr/ev_epoll1_linux.cc +3 -3
  134. data/src/core/lib/iomgr/ev_posix.cc +3 -3
  135. data/src/core/lib/iomgr/exec_ctx.cc +6 -2
  136. data/src/core/lib/iomgr/resource_quota.cc +1 -1
  137. data/src/core/lib/iomgr/sockaddr_utils.cc +120 -0
  138. data/src/core/lib/iomgr/sockaddr_utils.h +25 -0
  139. data/src/core/lib/iomgr/tcp_posix.cc +1 -4
  140. data/src/core/lib/iomgr/tcp_uv.cc +2 -2
  141. data/src/core/lib/iomgr/timer_generic.cc +2 -2
  142. data/src/core/lib/iomgr/timer_manager.cc +1 -1
  143. data/src/core/lib/iomgr/wakeup_fd_nospecial.cc +1 -1
  144. data/src/core/lib/{security/authorization → matchers}/matchers.cc +8 -8
  145. data/src/core/lib/{security/authorization → matchers}/matchers.h +14 -12
  146. data/src/core/lib/security/security_connector/ssl_utils.cc +6 -4
  147. data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +6 -0
  148. data/src/core/lib/security/transport/security_handshaker.cc +32 -2
  149. data/src/core/lib/slice/slice_intern.cc +6 -7
  150. data/src/core/lib/surface/channel.h +3 -3
  151. data/src/core/lib/surface/completion_queue.cc +1 -1
  152. data/src/core/lib/surface/lame_client.cc +38 -19
  153. data/src/core/lib/surface/lame_client.h +4 -3
  154. data/src/core/lib/surface/server.cc +40 -33
  155. data/src/core/lib/surface/server.h +74 -15
  156. data/src/core/lib/surface/version.cc +1 -1
  157. data/src/core/lib/transport/metadata_batch.cc +27 -0
  158. data/src/core/lib/transport/metadata_batch.h +14 -0
  159. data/src/core/plugin_registry/grpc_plugin_registry.cc +6 -0
  160. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +1 -4
  161. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +1 -1
  162. data/src/core/tsi/alts/handshaker/transport_security_common_api.cc +1 -3
  163. data/src/core/tsi/fake_transport_security.cc +10 -1
  164. data/src/ruby/ext/grpc/extconf.rb +9 -1
  165. data/src/ruby/ext/grpc/rb_channel.c +10 -1
  166. data/src/ruby/ext/grpc/rb_channel_credentials.c +11 -1
  167. data/src/ruby/ext/grpc/rb_channel_credentials.h +4 -0
  168. data/src/ruby/ext/grpc/rb_compression_options.c +1 -1
  169. data/src/ruby/ext/grpc/rb_enable_cpp.cc +1 -1
  170. data/src/ruby/ext/grpc/rb_grpc.c +4 -0
  171. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +2 -0
  172. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +4 -1
  173. data/src/ruby/ext/grpc/rb_server.c +13 -1
  174. data/src/ruby/ext/grpc/rb_server_credentials.c +19 -3
  175. data/src/ruby/ext/grpc/rb_server_credentials.h +4 -0
  176. data/src/ruby/ext/grpc/rb_xds_channel_credentials.c +215 -0
  177. data/src/ruby/ext/grpc/rb_xds_channel_credentials.h +35 -0
  178. data/src/ruby/ext/grpc/rb_xds_server_credentials.c +169 -0
  179. data/src/ruby/ext/grpc/rb_xds_server_credentials.h +35 -0
  180. data/src/ruby/lib/grpc/generic/client_stub.rb +4 -2
  181. data/src/ruby/lib/grpc/version.rb +1 -1
  182. data/src/ruby/spec/call_spec.rb +1 -1
  183. data/src/ruby/spec/channel_credentials_spec.rb +32 -0
  184. data/src/ruby/spec/channel_spec.rb +17 -6
  185. data/src/ruby/spec/client_auth_spec.rb +27 -1
  186. data/src/ruby/spec/errors_spec.rb +1 -1
  187. data/src/ruby/spec/generic/active_call_spec.rb +2 -2
  188. data/src/ruby/spec/generic/client_stub_spec.rb +4 -4
  189. data/src/ruby/spec/generic/rpc_server_spec.rb +1 -1
  190. data/src/ruby/spec/server_credentials_spec.rb +25 -0
  191. data/src/ruby/spec/server_spec.rb +22 -0
  192. data/third_party/boringssl-with-bazel/err_data.c +255 -255
  193. data/third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c +11 -2
  194. data/third_party/boringssl-with-bazel/src/crypto/cpu-arm.c +3 -3
  195. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/cipher/cipher.c +21 -13
  196. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/rand.c +7 -5
  197. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c +0 -28
  198. data/third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c +22 -17
  199. data/third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c +3 -1
  200. data/third_party/boringssl-with-bazel/src/include/openssl/cipher.h +4 -0
  201. data/third_party/boringssl-with-bazel/src/include/openssl/cpu.h +22 -32
  202. data/third_party/boringssl-with-bazel/src/include/openssl/ssl.h +25 -9
  203. data/third_party/boringssl-with-bazel/src/include/openssl/x509.h +0 -1
  204. data/third_party/boringssl-with-bazel/src/ssl/t1_lib.cc +33 -19
  205. data/third_party/xxhash/xxhash.h +5443 -0
  206. metadata +93 -49
  207. data/src/core/lib/security/authorization/authorization_engine.cc +0 -177
  208. data/src/core/lib/security/authorization/authorization_engine.h +0 -84
  209. data/src/core/lib/security/authorization/evaluate_args.cc +0 -148
  210. data/src/core/lib/security/authorization/evaluate_args.h +0 -59
  211. data/src/core/lib/security/authorization/mock_cel/activation.h +0 -57
  212. data/src/core/lib/security/authorization/mock_cel/cel_expr_builder_factory.h +0 -44
  213. data/src/core/lib/security/authorization/mock_cel/cel_expression.h +0 -69
  214. data/src/core/lib/security/authorization/mock_cel/cel_value.h +0 -99
  215. data/src/core/lib/security/authorization/mock_cel/evaluator_core.h +0 -67
  216. data/src/core/lib/security/authorization/mock_cel/flat_expr_builder.h +0 -57
  217. data/third_party/abseil-cpp/absl/container/flat_hash_set.h +0 -504
  218. data/third_party/upb/upb/json_decode.c +0 -1443
  219. data/third_party/upb/upb/json_decode.h +0 -23
  220. data/third_party/upb/upb/json_encode.c +0 -713
  221. data/third_party/upb/upb/json_encode.h +0 -36
@@ -121,18 +121,11 @@ namespace {
121
121
  // ChannelData definition
122
122
  //
123
123
 
124
- class LoadBalancedCall;
125
-
126
124
  class ChannelData {
127
125
  public:
128
- struct ResolverQueuedCall {
129
- grpc_call_element* elem;
130
- ResolverQueuedCall* next = nullptr;
131
- };
132
- struct LbQueuedCall {
133
- LoadBalancedCall* lb_call;
134
- LbQueuedCall* next = nullptr;
135
- };
126
+ class CallData;
127
+ class RetryingCall;
128
+ class LoadBalancedCall;
136
129
 
137
130
  static grpc_error* Init(grpc_channel_element* elem,
138
131
  grpc_channel_element_args* args);
@@ -142,51 +135,6 @@ class ChannelData {
142
135
  static void GetChannelInfo(grpc_channel_element* elem,
143
136
  const grpc_channel_info* info);
144
137
 
145
- bool deadline_checking_enabled() const { return deadline_checking_enabled_; }
146
- bool enable_retries() const { return enable_retries_; }
147
- size_t per_rpc_retry_buffer_size() const {
148
- return per_rpc_retry_buffer_size_;
149
- }
150
- grpc_channel_stack* owning_stack() const { return owning_stack_; }
151
-
152
- // Note: Does NOT return a new ref.
153
- grpc_error* disconnect_error() const {
154
- return disconnect_error_.Load(MemoryOrder::ACQUIRE);
155
- }
156
-
157
- Mutex* resolution_mu() const { return &resolution_mu_; }
158
- // These methods all require holding resolution_mu_.
159
- void AddResolverQueuedCall(ResolverQueuedCall* call,
160
- grpc_polling_entity* pollent);
161
- void RemoveResolverQueuedCall(ResolverQueuedCall* to_remove,
162
- grpc_polling_entity* pollent);
163
- bool received_service_config_data() const {
164
- return received_service_config_data_;
165
- }
166
- grpc_error* resolver_transient_failure_error() const {
167
- return resolver_transient_failure_error_;
168
- }
169
- RefCountedPtr<ServiceConfig> service_config() const {
170
- return service_config_;
171
- }
172
- ConfigSelector* config_selector() const { return config_selector_.get(); }
173
- RefCountedPtr<DynamicFilters> dynamic_filters() const {
174
- return dynamic_filters_;
175
- }
176
-
177
- Mutex* data_plane_mu() const { return &data_plane_mu_; }
178
- // These methods all require holding data_plane_mu_.
179
- LoadBalancingPolicy::SubchannelPicker* picker() const {
180
- return picker_.get();
181
- }
182
- void AddLbQueuedCall(LbQueuedCall* call, grpc_polling_entity* pollent);
183
- void RemoveLbQueuedCall(LbQueuedCall* to_remove,
184
- grpc_polling_entity* pollent);
185
- RefCountedPtr<ConnectedSubchannel> GetConnectedSubchannelInDataPlane(
186
- SubchannelInterface* subchannel) const;
187
-
188
- WorkSerializer* work_serializer() const { return work_serializer_.get(); }
189
-
190
138
  grpc_connectivity_state CheckConnectivityState(bool try_to_connect);
191
139
 
192
140
  void AddExternalConnectivityWatcher(grpc_polling_entity pollent,
@@ -215,6 +163,7 @@ class ChannelData {
215
163
  AsyncConnectivityStateWatcherInterface* watcher);
216
164
 
217
165
  private:
166
+ class DynamicTerminationFilterChannelData;
218
167
  class SubchannelWrapper;
219
168
  class ClientChannelControlHelper;
220
169
  class ConnectivityWatcherAdder;
@@ -281,9 +230,23 @@ class ChannelData {
281
230
  ChannelData* chand_;
282
231
  };
283
232
 
233
+ struct ResolverQueuedCall {
234
+ grpc_call_element* elem;
235
+ ResolverQueuedCall* next = nullptr;
236
+ };
237
+ struct LbQueuedCall {
238
+ LoadBalancedCall* lb_call;
239
+ LbQueuedCall* next = nullptr;
240
+ };
241
+
284
242
  ChannelData(grpc_channel_element_args* args, grpc_error** error);
285
243
  ~ChannelData();
286
244
 
245
+ // Note: Does NOT return a new ref.
246
+ grpc_error* disconnect_error() const {
247
+ return disconnect_error_.Load(MemoryOrder::ACQUIRE);
248
+ }
249
+
287
250
  // Note: All methods with "Locked" suffix must be invoked from within
288
251
  // work_serializer_.
289
252
 
@@ -318,6 +281,23 @@ class ChannelData {
318
281
 
319
282
  void TryToConnectLocked();
320
283
 
284
+ // These methods all require holding resolution_mu_.
285
+ void AddResolverQueuedCall(ResolverQueuedCall* call,
286
+ grpc_polling_entity* pollent)
287
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(resolution_mu_);
288
+ void RemoveResolverQueuedCall(ResolverQueuedCall* to_remove,
289
+ grpc_polling_entity* pollent)
290
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(resolution_mu_);
291
+
292
+ // These methods all require holding data_plane_mu_.
293
+ void AddLbQueuedCall(LbQueuedCall* call, grpc_polling_entity* pollent)
294
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(data_plane_mu_);
295
+ void RemoveLbQueuedCall(LbQueuedCall* to_remove, grpc_polling_entity* pollent)
296
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(data_plane_mu_);
297
+ RefCountedPtr<ConnectedSubchannel> GetConnectedSubchannelInDataPlane(
298
+ SubchannelInterface* subchannel) const
299
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(data_plane_mu_);
300
+
321
301
  //
322
302
  // Fields set at construction and never modified.
323
303
  //
@@ -337,21 +317,26 @@ class ChannelData {
337
317
  //
338
318
  mutable Mutex resolution_mu_;
339
319
  // Linked list of calls queued waiting for resolver result.
340
- ResolverQueuedCall* resolver_queued_calls_ = nullptr;
320
+ ResolverQueuedCall* resolver_queued_calls_ ABSL_GUARDED_BY(resolution_mu_) =
321
+ nullptr;
341
322
  // Data from service config.
342
- grpc_error* resolver_transient_failure_error_ = GRPC_ERROR_NONE;
343
- bool received_service_config_data_ = false;
344
- RefCountedPtr<ServiceConfig> service_config_;
345
- RefCountedPtr<ConfigSelector> config_selector_;
346
- RefCountedPtr<DynamicFilters> dynamic_filters_;
323
+ grpc_error* resolver_transient_failure_error_
324
+ ABSL_GUARDED_BY(resolution_mu_) = GRPC_ERROR_NONE;
325
+ bool received_service_config_data_ ABSL_GUARDED_BY(resolution_mu_) = false;
326
+ RefCountedPtr<ServiceConfig> service_config_ ABSL_GUARDED_BY(resolution_mu_);
327
+ RefCountedPtr<ConfigSelector> config_selector_
328
+ ABSL_GUARDED_BY(resolution_mu_);
329
+ RefCountedPtr<DynamicFilters> dynamic_filters_
330
+ ABSL_GUARDED_BY(resolution_mu_);
347
331
 
348
332
  //
349
333
  // Fields used in the data plane. Guarded by data_plane_mu_.
350
334
  //
351
335
  mutable Mutex data_plane_mu_;
352
- std::unique_ptr<LoadBalancingPolicy::SubchannelPicker> picker_;
336
+ std::unique_ptr<LoadBalancingPolicy::SubchannelPicker> picker_
337
+ ABSL_GUARDED_BY(data_plane_mu_);
353
338
  // Linked list of calls queued waiting for LB pick.
354
- LbQueuedCall* lb_queued_calls_ = nullptr;
339
+ LbQueuedCall* lb_queued_calls_ ABSL_GUARDED_BY(data_plane_mu_) = nullptr;
355
340
 
356
341
  //
357
342
  // Fields used in the control plane. Guarded by work_serializer.
@@ -390,8 +375,8 @@ class ChannelData {
390
375
  // synchronously via get_channel_info().
391
376
  //
392
377
  Mutex info_mu_;
393
- UniquePtr<char> info_lb_policy_name_;
394
- UniquePtr<char> info_service_config_json_;
378
+ UniquePtr<char> info_lb_policy_name_ ABSL_GUARDED_BY(info_mu_);
379
+ UniquePtr<char> info_service_config_json_ ABSL_GUARDED_BY(info_mu_);
395
380
 
396
381
  //
397
382
  // Fields guarded by a mutex, since they need to be accessed
@@ -399,14 +384,14 @@ class ChannelData {
399
384
  //
400
385
  mutable Mutex external_watchers_mu_;
401
386
  std::map<grpc_closure*, RefCountedPtr<ExternalConnectivityWatcher>>
402
- external_watchers_;
387
+ external_watchers_ ABSL_GUARDED_BY(external_watchers_mu_);
403
388
  };
404
389
 
405
390
  //
406
- // CallData definition
391
+ // ChannelData::CallData definition
407
392
  //
408
393
 
409
- class CallData {
394
+ class ChannelData::CallData {
410
395
  public:
411
396
  static grpc_error* Init(grpc_call_element* elem,
412
397
  const grpc_call_element_args* args);
@@ -424,7 +409,8 @@ class CallData {
424
409
  // Returns true if the service config has been applied to the call, in which
425
410
  // case the caller must invoke ResolutionDone() or AsyncResolutionDone()
426
411
  // with the returned error.
427
- bool CheckResolutionLocked(grpc_call_element* elem, grpc_error** error);
412
+ bool CheckResolutionLocked(grpc_call_element* elem, grpc_error** error)
413
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::resolution_mu_);
428
414
  // Schedules a callback to continue processing the call once
429
415
  // resolution is complete. The callback will not run until after this
430
416
  // method returns.
@@ -470,16 +456,19 @@ class CallData {
470
456
  // If an error is returned, the error indicates the status with which
471
457
  // the call should be failed.
472
458
  grpc_error* ApplyServiceConfigToCallLocked(
473
- grpc_call_element* elem, grpc_metadata_batch* initial_metadata);
459
+ grpc_call_element* elem, grpc_metadata_batch* initial_metadata)
460
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::resolution_mu_);
474
461
  // Invoked when the resolver result is applied to the caller, on both
475
462
  // success or failure.
476
463
  static void ResolutionDone(void* arg, grpc_error* error);
477
464
  // Removes the call (if present) from the channel's list of calls queued
478
465
  // for name resolution.
479
- void MaybeRemoveCallFromResolverQueuedCallsLocked(grpc_call_element* elem);
466
+ void MaybeRemoveCallFromResolverQueuedCallsLocked(grpc_call_element* elem)
467
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::resolution_mu_);
480
468
  // Adds the call (if not already present) to the channel's list of
481
469
  // calls queued for name resolution.
482
- void MaybeAddCallToResolverQueuedCallsLocked(grpc_call_element* elem);
470
+ void MaybeAddCallToResolverQueuedCallsLocked(grpc_call_element* elem)
471
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::resolution_mu_);
483
472
 
484
473
  static void RecvInitialMetadataReadyForConfigSelectorCommitCallback(
485
474
  void* arg, grpc_error* error);
@@ -534,10 +523,10 @@ class CallData {
534
523
  };
535
524
 
536
525
  //
537
- // RetryingCall definition
526
+ // ChannelData::RetryingCall definition
538
527
  //
539
528
 
540
- class RetryingCall {
529
+ class ChannelData::RetryingCall {
541
530
  public:
542
531
  RetryingCall(
543
532
  ChannelData* chand, const grpc_call_element_args& args,
@@ -581,7 +570,7 @@ class RetryingCall {
581
570
  gpr_refcount refs;
582
571
  grpc_call_element* elem;
583
572
  RetryingCall* call;
584
- RefCountedPtr<LoadBalancedCall> lb_call;
573
+ RefCountedPtr<ChannelData::LoadBalancedCall> lb_call;
585
574
  // The batch to use in the subchannel call.
586
575
  // Its payload field points to SubchannelCallRetryState::batch_payload.
587
576
  grpc_transport_stream_op_batch batch;
@@ -840,7 +829,7 @@ class RetryingCall {
840
829
 
841
830
  grpc_closure retry_closure_;
842
831
 
843
- RefCountedPtr<LoadBalancedCall> lb_call_;
832
+ RefCountedPtr<ChannelData::LoadBalancedCall> lb_call_;
844
833
 
845
834
  // Batches are added to this list when received from above.
846
835
  // They are removed when we are done handling the batch (i.e., when
@@ -897,14 +886,14 @@ class RetryingCall {
897
886
  };
898
887
 
899
888
  //
900
- // LoadBalancedCall definition
889
+ // ChannelData::LoadBalancedCall definition
901
890
  //
902
891
 
903
892
  // This object is ref-counted, but it cannot inherit from RefCounted<>,
904
893
  // because it is allocated on the arena and can't free its memory when
905
894
  // its refcount goes to zero. So instead, it manually implements the
906
895
  // same API as RefCounted<>, so that it can be used with RefCountedPtr<>.
907
- class LoadBalancedCall {
896
+ class ChannelData::LoadBalancedCall {
908
897
  public:
909
898
  static RefCountedPtr<LoadBalancedCall> Create(
910
899
  ChannelData* chand, const grpc_call_element_args& args,
@@ -932,7 +921,8 @@ class LoadBalancedCall {
932
921
  // Helper function for performing an LB pick while holding the data plane
933
922
  // mutex. Returns true if the pick is complete, in which case the caller
934
923
  // must invoke PickDone() or AsyncPickDone() with the returned error.
935
- bool PickSubchannelLocked(grpc_error** error);
924
+ bool PickSubchannelLocked(grpc_error** error)
925
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::data_plane_mu_);
936
926
  // Schedules a callback to process the completed pick. The callback
937
927
  // will not run until after this method returns.
938
928
  void AsyncPickDone(grpc_error* error);
@@ -990,9 +980,11 @@ class LoadBalancedCall {
990
980
  // Invoked when a pick is completed, on both success or failure.
991
981
  static void PickDone(void* arg, grpc_error* error);
992
982
  // Removes the call from the channel's list of queued picks if present.
993
- void MaybeRemoveCallFromLbQueuedCallsLocked();
983
+ void MaybeRemoveCallFromLbQueuedCallsLocked()
984
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::data_plane_mu_);
994
985
  // Adds the call to the channel's list of queued picks if not already present.
995
- void MaybeAddCallToLbQueuedCallsLocked();
986
+ void MaybeAddCallToLbQueuedCallsLocked()
987
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::data_plane_mu_);
996
988
 
997
989
  RefCount refs_;
998
990
 
@@ -1067,10 +1059,20 @@ const grpc_arg_pointer_vtable kRetryThrottleDataArgPointerVtable = {
1067
1059
  RetryThrottleDataArgCopy, RetryThrottleDataArgDestroy,
1068
1060
  RetryThrottleDataArgCmp};
1069
1061
 
1070
- class DynamicTerminationFilterChannelData {
1062
+ class ChannelData::DynamicTerminationFilterChannelData {
1071
1063
  public:
1064
+ class DynamicTerminationFilterCallData;
1065
+
1066
+ static const grpc_channel_filter kDynamicTerminationFilterVtable;
1067
+
1072
1068
  static grpc_error* Init(grpc_channel_element* elem,
1073
- grpc_channel_element_args* args);
1069
+ grpc_channel_element_args* args) {
1070
+ GPR_ASSERT(args->is_last);
1071
+ GPR_ASSERT(elem->filter == &kDynamicTerminationFilterVtable);
1072
+ new (elem->channel_data)
1073
+ DynamicTerminationFilterChannelData(args->channel_args);
1074
+ return GRPC_ERROR_NONE;
1075
+ }
1074
1076
 
1075
1077
  static void Destroy(grpc_channel_element* elem) {
1076
1078
  auto* chand =
@@ -1084,11 +1086,6 @@ class DynamicTerminationFilterChannelData {
1084
1086
  static void GetChannelInfo(grpc_channel_element* /*elem*/,
1085
1087
  const grpc_channel_info* /*info*/) {}
1086
1088
 
1087
- ChannelData* chand() const { return chand_; }
1088
- RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() const {
1089
- return retry_throttle_data_;
1090
- }
1091
-
1092
1089
  private:
1093
1090
  static RefCountedPtr<ServerRetryThrottleData> GetRetryThrottleDataFromArgs(
1094
1091
  const grpc_channel_args* args) {
@@ -1108,7 +1105,8 @@ class DynamicTerminationFilterChannelData {
1108
1105
  RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
1109
1106
  };
1110
1107
 
1111
- class DynamicTerminationFilterCallData {
1108
+ class ChannelData::DynamicTerminationFilterChannelData::
1109
+ DynamicTerminationFilterCallData {
1112
1110
  public:
1113
1111
  static grpc_error* Init(grpc_call_element* elem,
1114
1112
  const grpc_call_element_args* args) {
@@ -1124,7 +1122,7 @@ class DynamicTerminationFilterCallData {
1124
1122
  auto* chand =
1125
1123
  static_cast<DynamicTerminationFilterChannelData*>(elem->channel_data);
1126
1124
  RefCountedPtr<SubchannelCall> subchannel_call;
1127
- if (chand->chand()->enable_retries()) {
1125
+ if (chand->chand_->enable_retries_) {
1128
1126
  if (GPR_LIKELY(calld->retrying_call_ != nullptr)) {
1129
1127
  subchannel_call = calld->retrying_call_->subchannel_call();
1130
1128
  calld->retrying_call_->~RetryingCall();
@@ -1149,7 +1147,7 @@ class DynamicTerminationFilterCallData {
1149
1147
  static_cast<DynamicTerminationFilterCallData*>(elem->call_data);
1150
1148
  auto* chand =
1151
1149
  static_cast<DynamicTerminationFilterChannelData*>(elem->channel_data);
1152
- if (chand->chand()->enable_retries()) {
1150
+ if (chand->chand_->enable_retries_) {
1153
1151
  calld->retrying_call_->StartTransportStreamOpBatch(batch);
1154
1152
  } else {
1155
1153
  calld->lb_call_->StartTransportStreamOpBatch(batch);
@@ -1162,13 +1160,13 @@ class DynamicTerminationFilterCallData {
1162
1160
  static_cast<DynamicTerminationFilterCallData*>(elem->call_data);
1163
1161
  auto* chand =
1164
1162
  static_cast<DynamicTerminationFilterChannelData*>(elem->channel_data);
1165
- ChannelData* client_channel = chand->chand();
1163
+ ChannelData* client_channel = chand->chand_;
1166
1164
  grpc_call_element_args args = {
1167
1165
  calld->owning_call_, nullptr,
1168
1166
  calld->call_context_, calld->path_,
1169
1167
  calld->call_start_time_, calld->deadline_,
1170
1168
  calld->arena_, calld->call_combiner_};
1171
- if (client_channel->enable_retries()) {
1169
+ if (client_channel->enable_retries_) {
1172
1170
  // Get retry settings from service config.
1173
1171
  auto* svc_cfg_call_data = static_cast<ServiceConfigCallData*>(
1174
1172
  calld->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
@@ -1177,8 +1175,8 @@ class DynamicTerminationFilterCallData {
1177
1175
  svc_cfg_call_data->GetMethodParsedConfig(
1178
1176
  ClientChannelServiceConfigParser::ParserIndex()));
1179
1177
  // Create retrying call.
1180
- calld->retrying_call_ = calld->arena_->New<RetryingCall>(
1181
- client_channel, args, pollent, chand->retry_throttle_data(),
1178
+ calld->retrying_call_ = calld->arena_->New<ChannelData::RetryingCall>(
1179
+ client_channel, args, pollent, chand->retry_throttle_data_,
1182
1180
  method_config == nullptr ? nullptr : method_config->retry_policy());
1183
1181
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1184
1182
  gpr_log(
@@ -1187,8 +1185,8 @@ class DynamicTerminationFilterCallData {
1187
1185
  client_channel, calld, calld->retrying_call_);
1188
1186
  }
1189
1187
  } else {
1190
- calld->lb_call_ =
1191
- LoadBalancedCall::Create(client_channel, args, pollent, 0);
1188
+ calld->lb_call_ = ChannelData::LoadBalancedCall::Create(client_channel,
1189
+ args, pollent, 0);
1192
1190
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1193
1191
  gpr_log(GPR_INFO,
1194
1192
  "chand=%p dynamic_termination_calld=%p: create lb_call=%p",
@@ -1217,33 +1215,30 @@ class DynamicTerminationFilterCallData {
1217
1215
  CallCombiner* call_combiner_;
1218
1216
  grpc_call_context_element* call_context_;
1219
1217
 
1220
- RetryingCall* retrying_call_ = nullptr;
1218
+ ChannelData::RetryingCall* retrying_call_ = nullptr;
1221
1219
  RefCountedPtr<LoadBalancedCall> lb_call_;
1222
1220
  };
1223
1221
 
1224
- const grpc_channel_filter kDynamicTerminationFilterVtable = {
1225
- DynamicTerminationFilterCallData::StartTransportStreamOpBatch,
1226
- DynamicTerminationFilterChannelData::StartTransportOp,
1227
- sizeof(DynamicTerminationFilterCallData),
1228
- DynamicTerminationFilterCallData::Init,
1229
- DynamicTerminationFilterCallData::SetPollent,
1230
- DynamicTerminationFilterCallData::Destroy,
1231
- sizeof(DynamicTerminationFilterChannelData),
1232
- DynamicTerminationFilterChannelData::Init,
1233
- DynamicTerminationFilterChannelData::Destroy,
1234
- DynamicTerminationFilterChannelData::GetChannelInfo,
1235
- "dynamic_filter_termination",
1222
+ const grpc_channel_filter ChannelData::DynamicTerminationFilterChannelData::
1223
+ kDynamicTerminationFilterVtable = {
1224
+ ChannelData::DynamicTerminationFilterChannelData::
1225
+ DynamicTerminationFilterCallData::StartTransportStreamOpBatch,
1226
+ ChannelData::DynamicTerminationFilterChannelData::StartTransportOp,
1227
+ sizeof(ChannelData::DynamicTerminationFilterChannelData::
1228
+ DynamicTerminationFilterCallData),
1229
+ ChannelData::DynamicTerminationFilterChannelData::
1230
+ DynamicTerminationFilterCallData::Init,
1231
+ ChannelData::DynamicTerminationFilterChannelData::
1232
+ DynamicTerminationFilterCallData::SetPollent,
1233
+ ChannelData::DynamicTerminationFilterChannelData::
1234
+ DynamicTerminationFilterCallData::Destroy,
1235
+ sizeof(ChannelData::DynamicTerminationFilterChannelData),
1236
+ ChannelData::DynamicTerminationFilterChannelData::Init,
1237
+ ChannelData::DynamicTerminationFilterChannelData::Destroy,
1238
+ ChannelData::DynamicTerminationFilterChannelData::GetChannelInfo,
1239
+ "dynamic_filter_termination",
1236
1240
  };
1237
1241
 
1238
- grpc_error* DynamicTerminationFilterChannelData::Init(
1239
- grpc_channel_element* elem, grpc_channel_element_args* args) {
1240
- GPR_ASSERT(args->is_last);
1241
- GPR_ASSERT(elem->filter == &kDynamicTerminationFilterVtable);
1242
- new (elem->channel_data)
1243
- DynamicTerminationFilterChannelData(args->channel_args);
1244
- return GRPC_ERROR_NONE;
1245
- }
1246
-
1247
1242
  //
1248
1243
  // ChannelData::SubchannelWrapper
1249
1244
  //
@@ -1258,27 +1253,28 @@ grpc_error* DynamicTerminationFilterChannelData::Init(
1258
1253
  // control plane work_serializer.
1259
1254
  class ChannelData::SubchannelWrapper : public SubchannelInterface {
1260
1255
  public:
1261
- SubchannelWrapper(ChannelData* chand, Subchannel* subchannel,
1256
+ SubchannelWrapper(ChannelData* chand, RefCountedPtr<Subchannel> subchannel,
1262
1257
  absl::optional<std::string> health_check_service_name)
1263
1258
  : SubchannelInterface(
1264
1259
  GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)
1265
1260
  ? "SubchannelWrapper"
1266
1261
  : nullptr),
1267
1262
  chand_(chand),
1268
- subchannel_(subchannel),
1263
+ subchannel_(std::move(subchannel)),
1269
1264
  health_check_service_name_(std::move(health_check_service_name)) {
1270
1265
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1271
1266
  gpr_log(GPR_INFO,
1272
1267
  "chand=%p: creating subchannel wrapper %p for subchannel %p",
1273
- chand, this, subchannel_);
1268
+ chand, this, subchannel_.get());
1274
1269
  }
1275
1270
  GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "SubchannelWrapper");
1276
1271
  auto* subchannel_node = subchannel_->channelz_node();
1277
1272
  if (subchannel_node != nullptr) {
1278
- auto it = chand_->subchannel_refcount_map_.find(subchannel_);
1273
+ auto it = chand_->subchannel_refcount_map_.find(subchannel_.get());
1279
1274
  if (it == chand_->subchannel_refcount_map_.end()) {
1280
1275
  chand_->channelz_node_->AddChildSubchannel(subchannel_node->uuid());
1281
- it = chand_->subchannel_refcount_map_.emplace(subchannel_, 0).first;
1276
+ it = chand_->subchannel_refcount_map_.emplace(subchannel_.get(), 0)
1277
+ .first;
1282
1278
  }
1283
1279
  ++it->second;
1284
1280
  }
@@ -1289,12 +1285,12 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1289
1285
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1290
1286
  gpr_log(GPR_INFO,
1291
1287
  "chand=%p: destroying subchannel wrapper %p for subchannel %p",
1292
- chand_, this, subchannel_);
1288
+ chand_, this, subchannel_.get());
1293
1289
  }
1294
1290
  chand_->subchannel_wrappers_.erase(this);
1295
1291
  auto* subchannel_node = subchannel_->channelz_node();
1296
1292
  if (subchannel_node != nullptr) {
1297
- auto it = chand_->subchannel_refcount_map_.find(subchannel_);
1293
+ auto it = chand_->subchannel_refcount_map_.find(subchannel_.get());
1298
1294
  GPR_ASSERT(it != chand_->subchannel_refcount_map_.end());
1299
1295
  --it->second;
1300
1296
  if (it->second == 0) {
@@ -1302,7 +1298,6 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1302
1298
  chand_->subchannel_refcount_map_.erase(it);
1303
1299
  }
1304
1300
  }
1305
- GRPC_SUBCHANNEL_UNREF(subchannel_, "unref from LB");
1306
1301
  GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "SubchannelWrapper");
1307
1302
  }
1308
1303
 
@@ -1436,7 +1431,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1436
1431
  gpr_log(GPR_INFO,
1437
1432
  "chand=%p: connectivity change for subchannel wrapper %p "
1438
1433
  "subchannel %p; hopping into work_serializer",
1439
- parent_->chand_, parent_.get(), parent_->subchannel_);
1434
+ parent_->chand_, parent_.get(), parent_->subchannel_.get());
1440
1435
  }
1441
1436
  Ref().release(); // ref owned by lambda
1442
1437
  parent_->chand_->work_serializer_->Run(
@@ -1470,7 +1465,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1470
1465
  "chand=%p: processing connectivity change in work serializer "
1471
1466
  "for subchannel wrapper %p subchannel %p "
1472
1467
  "watcher=%p",
1473
- parent_->chand_, parent_.get(), parent_->subchannel_,
1468
+ parent_->chand_, parent_.get(), parent_->subchannel_.get(),
1474
1469
  watcher_.get());
1475
1470
  }
1476
1471
  ConnectivityStateChange state_change = PopConnectivityStateChange();
@@ -1539,7 +1534,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1539
1534
  }
1540
1535
 
1541
1536
  ChannelData* chand_;
1542
- Subchannel* subchannel_;
1537
+ RefCountedPtr<Subchannel> subchannel_;
1543
1538
  absl::optional<std::string> health_check_service_name_;
1544
1539
  // Maps from the address of the watcher passed to us by the LB policy
1545
1540
  // to the address of the WrapperWatcher that we passed to the underlying
@@ -1758,7 +1753,7 @@ class ChannelData::ClientChannelControlHelper
1758
1753
  args_to_add.data(), args_to_add.size());
1759
1754
  gpr_free(args_to_add[0].value.string);
1760
1755
  // Create subchannel.
1761
- Subchannel* subchannel =
1756
+ RefCountedPtr<Subchannel> subchannel =
1762
1757
  chand_->client_channel_factory_->CreateSubchannel(new_args);
1763
1758
  grpc_channel_args_destroy(new_args);
1764
1759
  if (subchannel == nullptr) return nullptr;
@@ -1766,7 +1761,7 @@ class ChannelData::ClientChannelControlHelper
1766
1761
  subchannel->ThrottleKeepaliveTime(chand_->keepalive_time_);
1767
1762
  // Create and return wrapper for the subchannel.
1768
1763
  return MakeRefCounted<SubchannelWrapper>(
1769
- chand_, subchannel, std::move(health_check_service_name));
1764
+ chand_, std::move(subchannel), std::move(health_check_service_name));
1770
1765
  }
1771
1766
 
1772
1767
  void UpdateState(
@@ -2297,10 +2292,11 @@ void ChannelData::UpdateServiceConfigInDataPlaneLocked() {
2297
2292
  server_name_, retry_throttle_config.value().max_milli_tokens,
2298
2293
  retry_throttle_config.value().milli_token_ratio);
2299
2294
  }
2300
- // Construct per-LB filter stack.
2295
+ // Construct dynamic filter stack.
2301
2296
  std::vector<const grpc_channel_filter*> filters =
2302
2297
  config_selector->GetFilters();
2303
- filters.push_back(&kDynamicTerminationFilterVtable);
2298
+ filters.push_back(
2299
+ &DynamicTerminationFilterChannelData::kDynamicTerminationFilterVtable);
2304
2300
  absl::InlinedVector<grpc_arg, 2> args_to_add;
2305
2301
  args_to_add.push_back(grpc_channel_arg_pointer_create(
2306
2302
  const_cast<char*>(GRPC_ARG_CLIENT_CHANNEL_DATA), this,
@@ -2312,6 +2308,7 @@ void ChannelData::UpdateServiceConfigInDataPlaneLocked() {
2312
2308
  }
2313
2309
  grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
2314
2310
  channel_args_, args_to_add.data(), args_to_add.size());
2311
+ new_args = config_selector->ModifyChannelArgs(new_args);
2315
2312
  RefCountedPtr<DynamicFilters> dynamic_filters =
2316
2313
  DynamicFilters::Create(new_args, std::move(filters));
2317
2314
  GPR_ASSERT(dynamic_filters != nullptr);
@@ -2461,8 +2458,11 @@ grpc_error* ChannelData::DoPingLocked(grpc_transport_op* op) {
2461
2458
  if (state_tracker_.state() != GRPC_CHANNEL_READY) {
2462
2459
  return GRPC_ERROR_CREATE_FROM_STATIC_STRING("channel not connected");
2463
2460
  }
2464
- LoadBalancingPolicy::PickResult result =
2465
- picker_->Pick(LoadBalancingPolicy::PickArgs());
2461
+ LoadBalancingPolicy::PickResult result;
2462
+ {
2463
+ MutexLock lock(&data_plane_mu_);
2464
+ result = picker_->Pick(LoadBalancingPolicy::PickArgs());
2465
+ }
2466
2466
  ConnectedSubchannel* connected_subchannel = nullptr;
2467
2467
  if (result.subchannel != nullptr) {
2468
2468
  SubchannelWrapper* subchannel =
@@ -2635,10 +2635,11 @@ void ChannelData::RemoveConnectivityWatcher(
2635
2635
  // CallData implementation
2636
2636
  //
2637
2637
 
2638
- CallData::CallData(grpc_call_element* elem, const ChannelData& chand,
2639
- const grpc_call_element_args& args)
2638
+ ChannelData::CallData::CallData(grpc_call_element* elem,
2639
+ const ChannelData& chand,
2640
+ const grpc_call_element_args& args)
2640
2641
  : deadline_state_(elem, args,
2641
- GPR_LIKELY(chand.deadline_checking_enabled())
2642
+ GPR_LIKELY(chand.deadline_checking_enabled_)
2642
2643
  ? args.deadline
2643
2644
  : GRPC_MILLIS_INF_FUTURE),
2644
2645
  path_(grpc_slice_ref_internal(args.path)),
@@ -2653,7 +2654,7 @@ CallData::CallData(grpc_call_element* elem, const ChannelData& chand,
2653
2654
  }
2654
2655
  }
2655
2656
 
2656
- CallData::~CallData() {
2657
+ ChannelData::CallData::~CallData() {
2657
2658
  grpc_slice_unref_internal(path_);
2658
2659
  GRPC_ERROR_UNREF(cancel_error_);
2659
2660
  // Make sure there are no remaining pending batches.
@@ -2662,16 +2663,16 @@ CallData::~CallData() {
2662
2663
  }
2663
2664
  }
2664
2665
 
2665
- grpc_error* CallData::Init(grpc_call_element* elem,
2666
- const grpc_call_element_args* args) {
2666
+ grpc_error* ChannelData::CallData::Init(grpc_call_element* elem,
2667
+ const grpc_call_element_args* args) {
2667
2668
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2668
2669
  new (elem->call_data) CallData(elem, *chand, *args);
2669
2670
  return GRPC_ERROR_NONE;
2670
2671
  }
2671
2672
 
2672
- void CallData::Destroy(grpc_call_element* elem,
2673
- const grpc_call_final_info* /*final_info*/,
2674
- grpc_closure* then_schedule_closure) {
2673
+ void ChannelData::CallData::Destroy(grpc_call_element* elem,
2674
+ const grpc_call_final_info* /*final_info*/,
2675
+ grpc_closure* then_schedule_closure) {
2675
2676
  CallData* calld = static_cast<CallData*>(elem->call_data);
2676
2677
  RefCountedPtr<DynamicFilters::Call> dynamic_call =
2677
2678
  std::move(calld->dynamic_call_);
@@ -2684,12 +2685,12 @@ void CallData::Destroy(grpc_call_element* elem,
2684
2685
  }
2685
2686
  }
2686
2687
 
2687
- void CallData::StartTransportStreamOpBatch(
2688
+ void ChannelData::CallData::StartTransportStreamOpBatch(
2688
2689
  grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
2689
2690
  GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
2690
2691
  CallData* calld = static_cast<CallData*>(elem->call_data);
2691
2692
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2692
- if (GPR_LIKELY(chand->deadline_checking_enabled())) {
2693
+ if (GPR_LIKELY(chand->deadline_checking_enabled_)) {
2693
2694
  grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
2694
2695
  }
2695
2696
  // Intercept recv_initial_metadata for config selector on-committed callback.
@@ -2773,8 +2774,8 @@ void CallData::StartTransportStreamOpBatch(
2773
2774
  }
2774
2775
  }
2775
2776
 
2776
- void CallData::SetPollent(grpc_call_element* elem,
2777
- grpc_polling_entity* pollent) {
2777
+ void ChannelData::CallData::SetPollent(grpc_call_element* elem,
2778
+ grpc_polling_entity* pollent) {
2778
2779
  CallData* calld = static_cast<CallData*>(elem->call_data);
2779
2780
  calld->pollent_ = pollent;
2780
2781
  }
@@ -2783,7 +2784,8 @@ void CallData::SetPollent(grpc_call_element* elem,
2783
2784
  // pending_batches management
2784
2785
  //
2785
2786
 
2786
- size_t CallData::GetBatchIndex(grpc_transport_stream_op_batch* batch) {
2787
+ size_t ChannelData::CallData::GetBatchIndex(
2788
+ grpc_transport_stream_op_batch* batch) {
2787
2789
  // Note: It is important the send_initial_metadata be the first entry
2788
2790
  // here, since the code in pick_subchannel_locked() assumes it will be.
2789
2791
  if (batch->send_initial_metadata) return 0;
@@ -2796,8 +2798,8 @@ size_t CallData::GetBatchIndex(grpc_transport_stream_op_batch* batch) {
2796
2798
  }
2797
2799
 
2798
2800
  // This is called via the call combiner, so access to calld is synchronized.
2799
- void CallData::PendingBatchesAdd(grpc_call_element* elem,
2800
- grpc_transport_stream_op_batch* batch) {
2801
+ void ChannelData::CallData::PendingBatchesAdd(
2802
+ grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
2801
2803
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2802
2804
  const size_t idx = GetBatchIndex(batch);
2803
2805
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@@ -2811,7 +2813,8 @@ void CallData::PendingBatchesAdd(grpc_call_element* elem,
2811
2813
  }
2812
2814
 
2813
2815
  // This is called via the call combiner, so access to calld is synchronized.
2814
- void CallData::FailPendingBatchInCallCombiner(void* arg, grpc_error* error) {
2816
+ void ChannelData::CallData::FailPendingBatchInCallCombiner(void* arg,
2817
+ grpc_error* error) {
2815
2818
  grpc_transport_stream_op_batch* batch =
2816
2819
  static_cast<grpc_transport_stream_op_batch*>(arg);
2817
2820
  CallData* calld = static_cast<CallData*>(batch->handler_private.extra_arg);
@@ -2821,7 +2824,7 @@ void CallData::FailPendingBatchInCallCombiner(void* arg, grpc_error* error) {
2821
2824
  }
2822
2825
 
2823
2826
  // This is called via the call combiner, so access to calld is synchronized.
2824
- void CallData::PendingBatchesFail(
2827
+ void ChannelData::CallData::PendingBatchesFail(
2825
2828
  grpc_call_element* elem, grpc_error* error,
2826
2829
  YieldCallCombinerPredicate yield_call_combiner_predicate) {
2827
2830
  GPR_ASSERT(error != GRPC_ERROR_NONE);
@@ -2856,8 +2859,8 @@ void CallData::PendingBatchesFail(
2856
2859
  }
2857
2860
 
2858
2861
  // This is called via the call combiner, so access to calld is synchronized.
2859
- void CallData::ResumePendingBatchInCallCombiner(void* arg,
2860
- grpc_error* /*ignored*/) {
2862
+ void ChannelData::CallData::ResumePendingBatchInCallCombiner(
2863
+ void* arg, grpc_error* /*ignored*/) {
2861
2864
  grpc_transport_stream_op_batch* batch =
2862
2865
  static_cast<grpc_transport_stream_op_batch*>(arg);
2863
2866
  auto* elem =
@@ -2868,7 +2871,7 @@ void CallData::ResumePendingBatchInCallCombiner(void* arg,
2868
2871
  }
2869
2872
 
2870
2873
  // This is called via the call combiner, so access to calld is synchronized.
2871
- void CallData::PendingBatchesResume(grpc_call_element* elem) {
2874
+ void ChannelData::CallData::PendingBatchesResume(grpc_call_element* elem) {
2872
2875
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2873
2876
  // Retries not enabled; send down batches as-is.
2874
2877
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@@ -2903,7 +2906,7 @@ void CallData::PendingBatchesResume(grpc_call_element* elem) {
2903
2906
 
2904
2907
  // A class to handle the call combiner cancellation callback for a
2905
2908
  // queued pick.
2906
- class CallData::ResolverQueuedCallCanceller {
2909
+ class ChannelData::CallData::ResolverQueuedCallCanceller {
2907
2910
  public:
2908
2911
  explicit ResolverQueuedCallCanceller(grpc_call_element* elem) : elem_(elem) {
2909
2912
  auto* calld = static_cast<CallData*>(elem->call_data);
@@ -2919,7 +2922,7 @@ class CallData::ResolverQueuedCallCanceller {
2919
2922
  auto* chand = static_cast<ChannelData*>(self->elem_->channel_data);
2920
2923
  auto* calld = static_cast<CallData*>(self->elem_->call_data);
2921
2924
  {
2922
- MutexLock lock(chand->resolution_mu());
2925
+ MutexLock lock(&chand->resolution_mu_);
2923
2926
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2924
2927
  gpr_log(GPR_INFO,
2925
2928
  "chand=%p calld=%p: cancelling resolver queued pick: "
@@ -2943,7 +2946,7 @@ class CallData::ResolverQueuedCallCanceller {
2943
2946
  grpc_closure closure_;
2944
2947
  };
2945
2948
 
2946
- void CallData::MaybeRemoveCallFromResolverQueuedCallsLocked(
2949
+ void ChannelData::CallData::MaybeRemoveCallFromResolverQueuedCallsLocked(
2947
2950
  grpc_call_element* elem) {
2948
2951
  if (!queued_pending_resolver_result_) return;
2949
2952
  auto* chand = static_cast<ChannelData*>(elem->channel_data);
@@ -2958,7 +2961,7 @@ void CallData::MaybeRemoveCallFromResolverQueuedCallsLocked(
2958
2961
  resolver_call_canceller_ = nullptr;
2959
2962
  }
2960
2963
 
2961
- void CallData::MaybeAddCallToResolverQueuedCallsLocked(
2964
+ void ChannelData::CallData::MaybeAddCallToResolverQueuedCallsLocked(
2962
2965
  grpc_call_element* elem) {
2963
2966
  if (queued_pending_resolver_result_) return;
2964
2967
  auto* chand = static_cast<ChannelData*>(elem->channel_data);
@@ -2973,14 +2976,14 @@ void CallData::MaybeAddCallToResolverQueuedCallsLocked(
2973
2976
  resolver_call_canceller_ = new ResolverQueuedCallCanceller(elem);
2974
2977
  }
2975
2978
 
2976
- grpc_error* CallData::ApplyServiceConfigToCallLocked(
2979
+ grpc_error* ChannelData::CallData::ApplyServiceConfigToCallLocked(
2977
2980
  grpc_call_element* elem, grpc_metadata_batch* initial_metadata) {
2978
2981
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2979
2982
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2980
2983
  gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
2981
2984
  chand, this);
2982
2985
  }
2983
- ConfigSelector* config_selector = chand->config_selector();
2986
+ ConfigSelector* config_selector = chand->config_selector_.get();
2984
2987
  if (config_selector != nullptr) {
2985
2988
  // Use the ConfigSelector to determine the config for the call.
2986
2989
  ConfigSelector::CallConfig call_config =
@@ -3002,7 +3005,7 @@ grpc_error* CallData::ApplyServiceConfigToCallLocked(
3002
3005
  if (method_params != nullptr) {
3003
3006
  // If the deadline from the service config is shorter than the one
3004
3007
  // from the client API, reset the deadline timer.
3005
- if (chand->deadline_checking_enabled() && method_params->timeout() != 0) {
3008
+ if (chand->deadline_checking_enabled_ && method_params->timeout() != 0) {
3006
3009
  const grpc_millis per_method_deadline =
3007
3010
  grpc_cycle_counter_to_millis_round_up(call_start_time_) +
3008
3011
  method_params->timeout();
@@ -3027,13 +3030,14 @@ grpc_error* CallData::ApplyServiceConfigToCallLocked(
3027
3030
  }
3028
3031
  }
3029
3032
  // Set the dynamic filter stack.
3030
- dynamic_filters_ = chand->dynamic_filters();
3033
+ dynamic_filters_ = chand->dynamic_filters_;
3031
3034
  }
3032
3035
  return GRPC_ERROR_NONE;
3033
3036
  }
3034
3037
 
3035
- void CallData::RecvInitialMetadataReadyForConfigSelectorCommitCallback(
3036
- void* arg, grpc_error* error) {
3038
+ void ChannelData::CallData::
3039
+ RecvInitialMetadataReadyForConfigSelectorCommitCallback(void* arg,
3040
+ grpc_error* error) {
3037
3041
  auto* self = static_cast<CallData*>(arg);
3038
3042
  if (self->on_call_committed_ != nullptr) {
3039
3043
  self->on_call_committed_();
@@ -3046,8 +3050,9 @@ void CallData::RecvInitialMetadataReadyForConfigSelectorCommitCallback(
3046
3050
 
3047
3051
  // TODO(roth): Consider not intercepting this callback unless we
3048
3052
  // actually need to, if this causes a performance problem.
3049
- void CallData::InjectRecvInitialMetadataReadyForConfigSelectorCommitCallback(
3050
- grpc_transport_stream_op_batch* batch) {
3053
+ void ChannelData::CallData::
3054
+ InjectRecvInitialMetadataReadyForConfigSelectorCommitCallback(
3055
+ grpc_transport_stream_op_batch* batch) {
3051
3056
  original_recv_initial_metadata_ready_ =
3052
3057
  batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
3053
3058
  GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_,
@@ -3057,12 +3062,13 @@ void CallData::InjectRecvInitialMetadataReadyForConfigSelectorCommitCallback(
3057
3062
  &recv_initial_metadata_ready_;
3058
3063
  }
3059
3064
 
3060
- void CallData::AsyncResolutionDone(grpc_call_element* elem, grpc_error* error) {
3065
+ void ChannelData::CallData::AsyncResolutionDone(grpc_call_element* elem,
3066
+ grpc_error* error) {
3061
3067
  GRPC_CLOSURE_INIT(&pick_closure_, ResolutionDone, elem, nullptr);
3062
3068
  ExecCtx::Run(DEBUG_LOCATION, &pick_closure_, error);
3063
3069
  }
3064
3070
 
3065
- void CallData::ResolutionDone(void* arg, grpc_error* error) {
3071
+ void ChannelData::CallData::ResolutionDone(void* arg, grpc_error* error) {
3066
3072
  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
3067
3073
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
3068
3074
  CallData* calld = static_cast<CallData*>(elem->call_data);
@@ -3078,13 +3084,13 @@ void CallData::ResolutionDone(void* arg, grpc_error* error) {
3078
3084
  calld->CreateDynamicCall(elem);
3079
3085
  }
3080
3086
 
3081
- void CallData::CheckResolution(void* arg, grpc_error* error) {
3087
+ void ChannelData::CallData::CheckResolution(void* arg, grpc_error* error) {
3082
3088
  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
3083
3089
  CallData* calld = static_cast<CallData*>(elem->call_data);
3084
3090
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
3085
3091
  bool resolution_complete;
3086
3092
  {
3087
- MutexLock lock(chand->resolution_mu());
3093
+ MutexLock lock(&chand->resolution_mu_);
3088
3094
  resolution_complete = calld->CheckResolutionLocked(elem, &error);
3089
3095
  }
3090
3096
  if (resolution_complete) {
@@ -3093,8 +3099,8 @@ void CallData::CheckResolution(void* arg, grpc_error* error) {
3093
3099
  }
3094
3100
  }
3095
3101
 
3096
- bool CallData::CheckResolutionLocked(grpc_call_element* elem,
3097
- grpc_error** error) {
3102
+ bool ChannelData::CallData::CheckResolutionLocked(grpc_call_element* elem,
3103
+ grpc_error** error) {
3098
3104
  ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
3099
3105
  // If we're still in IDLE, we need to start resolving.
3100
3106
  if (GPR_UNLIKELY(chand->CheckConnectivityState(false) == GRPC_CHANNEL_IDLE)) {
@@ -3102,16 +3108,16 @@ bool CallData::CheckResolutionLocked(grpc_call_element* elem,
3102
3108
  // in case we are still in IDLE state. Since we are holding on to the
3103
3109
  // resolution mutex here, we offload it on the ExecCtx so that we don't
3104
3110
  // deadlock with ourselves.
3105
- GRPC_CHANNEL_STACK_REF(chand->owning_stack(), "CheckResolutionLocked");
3111
+ GRPC_CHANNEL_STACK_REF(chand->owning_stack_, "CheckResolutionLocked");
3106
3112
  ExecCtx::Run(
3107
3113
  DEBUG_LOCATION,
3108
3114
  GRPC_CLOSURE_CREATE(
3109
3115
  [](void* arg, grpc_error* /*error*/) {
3110
3116
  auto* chand = static_cast<ChannelData*>(arg);
3111
- chand->work_serializer()->Run(
3117
+ chand->work_serializer_->Run(
3112
3118
  [chand]() {
3113
3119
  chand->CheckConnectivityState(/*try_to_connect=*/true);
3114
- GRPC_CHANNEL_STACK_UNREF(chand->owning_stack(),
3120
+ GRPC_CHANNEL_STACK_UNREF(chand->owning_stack_,
3115
3121
  "CheckResolutionLocked");
3116
3122
  },
3117
3123
  DEBUG_LOCATION);
@@ -3128,10 +3134,10 @@ bool CallData::CheckResolutionLocked(grpc_call_element* elem,
3128
3134
  send_initial_metadata.send_initial_metadata_flags;
3129
3135
  // If we don't yet have a resolver result, we need to queue the call
3130
3136
  // until we get one.
3131
- if (GPR_UNLIKELY(!chand->received_service_config_data())) {
3137
+ if (GPR_UNLIKELY(!chand->received_service_config_data_)) {
3132
3138
  // If the resolver returned transient failure before returning the
3133
3139
  // first service config, fail any non-wait_for_ready calls.
3134
- grpc_error* resolver_error = chand->resolver_transient_failure_error();
3140
+ grpc_error* resolver_error = chand->resolver_transient_failure_error_;
3135
3141
  if (resolver_error != GRPC_ERROR_NONE &&
3136
3142
  (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) ==
3137
3143
  0) {
@@ -3154,7 +3160,7 @@ bool CallData::CheckResolutionLocked(grpc_call_element* elem,
3154
3160
  return true;
3155
3161
  }
3156
3162
 
3157
- void CallData::CreateDynamicCall(grpc_call_element* elem) {
3163
+ void ChannelData::CallData::CreateDynamicCall(grpc_call_element* elem) {
3158
3164
  auto* chand = static_cast<ChannelData*>(elem->channel_data);
3159
3165
  DynamicFilters::Call::Args args = {std::move(dynamic_filters_),
3160
3166
  pollent_,
@@ -3224,7 +3230,7 @@ void CallData::CreateDynamicCall(grpc_call_element* elem) {
3224
3230
  // (census filter is on top of this one)
3225
3231
  // - add census stats for retries
3226
3232
 
3227
- RetryingCall::RetryingCall(
3233
+ ChannelData::RetryingCall::RetryingCall(
3228
3234
  ChannelData* chand, const grpc_call_element_args& args,
3229
3235
  grpc_polling_entity* pollent,
3230
3236
  RefCountedPtr<ServerRetryThrottleData> retry_throttle_data,
@@ -3257,7 +3263,7 @@ RetryingCall::RetryingCall(
3257
3263
  retry_committed_(false),
3258
3264
  last_attempt_got_server_pushback_(false) {}
3259
3265
 
3260
- RetryingCall::~RetryingCall() {
3266
+ ChannelData::RetryingCall::~RetryingCall() {
3261
3267
  grpc_slice_unref_internal(path_);
3262
3268
  GRPC_ERROR_UNREF(cancel_error_);
3263
3269
  // Make sure there are no remaining pending batches.
@@ -3266,7 +3272,7 @@ RetryingCall::~RetryingCall() {
3266
3272
  }
3267
3273
  }
3268
3274
 
3269
- void RetryingCall::StartTransportStreamOpBatch(
3275
+ void ChannelData::RetryingCall::StartTransportStreamOpBatch(
3270
3276
  grpc_transport_stream_op_batch* batch) {
3271
3277
  // If we've previously been cancelled, immediately fail any new batches.
3272
3278
  if (GPR_UNLIKELY(cancel_error_ != GRPC_ERROR_NONE)) {
@@ -3332,7 +3338,8 @@ void RetryingCall::StartTransportStreamOpBatch(
3332
3338
  PendingBatchesResume();
3333
3339
  }
3334
3340
 
3335
- RefCountedPtr<SubchannelCall> RetryingCall::subchannel_call() const {
3341
+ RefCountedPtr<SubchannelCall> ChannelData::RetryingCall::subchannel_call()
3342
+ const {
3336
3343
  if (lb_call_ == nullptr) return nullptr;
3337
3344
  return lb_call_->subchannel_call();
3338
3345
  }
@@ -3341,7 +3348,8 @@ RefCountedPtr<SubchannelCall> RetryingCall::subchannel_call() const {
3341
3348
  // send op data caching
3342
3349
  //
3343
3350
 
3344
- void RetryingCall::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
3351
+ void ChannelData::RetryingCall::MaybeCacheSendOpsForBatch(
3352
+ PendingBatch* pending) {
3345
3353
  if (pending->send_ops_cached) return;
3346
3354
  pending->send_ops_cached = true;
3347
3355
  grpc_transport_stream_op_batch* batch = pending->batch;
@@ -3380,7 +3388,7 @@ void RetryingCall::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
3380
3388
  }
3381
3389
  }
3382
3390
 
3383
- void RetryingCall::FreeCachedSendInitialMetadata() {
3391
+ void ChannelData::RetryingCall::FreeCachedSendInitialMetadata() {
3384
3392
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3385
3393
  gpr_log(GPR_INFO,
3386
3394
  "chand=%p retrying_call=%p: destroying send_initial_metadata",
@@ -3389,7 +3397,7 @@ void RetryingCall::FreeCachedSendInitialMetadata() {
3389
3397
  grpc_metadata_batch_destroy(&send_initial_metadata_);
3390
3398
  }
3391
3399
 
3392
- void RetryingCall::FreeCachedSendMessage(size_t idx) {
3400
+ void ChannelData::RetryingCall::FreeCachedSendMessage(size_t idx) {
3393
3401
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3394
3402
  gpr_log(GPR_INFO,
3395
3403
  "chand=%p retrying_call=%p: destroying send_messages[%" PRIuPTR "]",
@@ -3398,7 +3406,7 @@ void RetryingCall::FreeCachedSendMessage(size_t idx) {
3398
3406
  send_messages_[idx]->Destroy();
3399
3407
  }
3400
3408
 
3401
- void RetryingCall::FreeCachedSendTrailingMetadata() {
3409
+ void ChannelData::RetryingCall::FreeCachedSendTrailingMetadata() {
3402
3410
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3403
3411
  gpr_log(GPR_INFO,
3404
3412
  "chand_=%p retrying_call=%p: destroying send_trailing_metadata",
@@ -3407,7 +3415,7 @@ void RetryingCall::FreeCachedSendTrailingMetadata() {
3407
3415
  grpc_metadata_batch_destroy(&send_trailing_metadata_);
3408
3416
  }
3409
3417
 
3410
- void RetryingCall::FreeCachedSendOpDataAfterCommit(
3418
+ void ChannelData::RetryingCall::FreeCachedSendOpDataAfterCommit(
3411
3419
  SubchannelCallRetryState* retry_state) {
3412
3420
  if (retry_state->completed_send_initial_metadata) {
3413
3421
  FreeCachedSendInitialMetadata();
@@ -3420,7 +3428,7 @@ void RetryingCall::FreeCachedSendOpDataAfterCommit(
3420
3428
  }
3421
3429
  }
3422
3430
 
3423
- void RetryingCall::FreeCachedSendOpDataForCompletedBatch(
3431
+ void ChannelData::RetryingCall::FreeCachedSendOpDataForCompletedBatch(
3424
3432
  SubchannelCallBatchData* batch_data,
3425
3433
  SubchannelCallRetryState* retry_state) {
3426
3434
  if (batch_data->batch.send_initial_metadata) {
@@ -3438,7 +3446,8 @@ void RetryingCall::FreeCachedSendOpDataForCompletedBatch(
3438
3446
  // pending_batches management
3439
3447
  //
3440
3448
 
3441
- size_t RetryingCall::GetBatchIndex(grpc_transport_stream_op_batch* batch) {
3449
+ size_t ChannelData::RetryingCall::GetBatchIndex(
3450
+ grpc_transport_stream_op_batch* batch) {
3442
3451
  // Note: It is important the send_initial_metadata be the first entry
3443
3452
  // here, since the code in pick_subchannel_locked() assumes it will be.
3444
3453
  if (batch->send_initial_metadata) return 0;
@@ -3451,7 +3460,8 @@ size_t RetryingCall::GetBatchIndex(grpc_transport_stream_op_batch* batch) {
3451
3460
  }
3452
3461
 
3453
3462
  // This is called via the call combiner, so access to calld is synchronized.
3454
- void RetryingCall::PendingBatchesAdd(grpc_transport_stream_op_batch* batch) {
3463
+ void ChannelData::RetryingCall::PendingBatchesAdd(
3464
+ grpc_transport_stream_op_batch* batch) {
3455
3465
  const size_t idx = GetBatchIndex(batch);
3456
3466
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3457
3467
  gpr_log(
@@ -3482,7 +3492,7 @@ void RetryingCall::PendingBatchesAdd(grpc_transport_stream_op_batch* batch) {
3482
3492
  pending_send_trailing_metadata_ = true;
3483
3493
  }
3484
3494
  if (GPR_UNLIKELY(bytes_buffered_for_retry_ >
3485
- chand_->per_rpc_retry_buffer_size())) {
3495
+ chand_->per_rpc_retry_buffer_size_)) {
3486
3496
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3487
3497
  gpr_log(GPR_INFO,
3488
3498
  "chand=%p retrying_call=%p: exceeded retry buffer size, "
@@ -3510,7 +3520,7 @@ void RetryingCall::PendingBatchesAdd(grpc_transport_stream_op_batch* batch) {
3510
3520
  }
3511
3521
  }
3512
3522
 
3513
- void RetryingCall::PendingBatchClear(PendingBatch* pending) {
3523
+ void ChannelData::RetryingCall::PendingBatchClear(PendingBatch* pending) {
3514
3524
  if (enable_retries_) {
3515
3525
  if (pending->batch->send_initial_metadata) {
3516
3526
  pending_send_initial_metadata_ = false;
@@ -3525,7 +3535,7 @@ void RetryingCall::PendingBatchClear(PendingBatch* pending) {
3525
3535
  pending->batch = nullptr;
3526
3536
  }
3527
3537
 
3528
- void RetryingCall::MaybeClearPendingBatch(PendingBatch* pending) {
3538
+ void ChannelData::RetryingCall::MaybeClearPendingBatch(PendingBatch* pending) {
3529
3539
  grpc_transport_stream_op_batch* batch = pending->batch;
3530
3540
  // We clear the pending batch if all of its callbacks have been
3531
3541
  // scheduled and reset to nullptr.
@@ -3547,8 +3557,8 @@ void RetryingCall::MaybeClearPendingBatch(PendingBatch* pending) {
3547
3557
  }
3548
3558
 
3549
3559
  // This is called via the call combiner, so access to calld is synchronized.
3550
- void RetryingCall::FailPendingBatchInCallCombiner(void* arg,
3551
- grpc_error* error) {
3560
+ void ChannelData::RetryingCall::FailPendingBatchInCallCombiner(
3561
+ void* arg, grpc_error* error) {
3552
3562
  grpc_transport_stream_op_batch* batch =
3553
3563
  static_cast<grpc_transport_stream_op_batch*>(arg);
3554
3564
  RetryingCall* call =
@@ -3559,7 +3569,7 @@ void RetryingCall::FailPendingBatchInCallCombiner(void* arg,
3559
3569
  }
3560
3570
 
3561
3571
  // This is called via the call combiner, so access to calld is synchronized.
3562
- void RetryingCall::PendingBatchesFail(
3572
+ void ChannelData::RetryingCall::PendingBatchesFail(
3563
3573
  grpc_error* error,
3564
3574
  YieldCallCombinerPredicate yield_call_combiner_predicate) {
3565
3575
  GPR_ASSERT(error != GRPC_ERROR_NONE);
@@ -3596,18 +3606,18 @@ void RetryingCall::PendingBatchesFail(
3596
3606
  }
3597
3607
 
3598
3608
  // This is called via the call combiner, so access to calld is synchronized.
3599
- void RetryingCall::ResumePendingBatchInCallCombiner(void* arg,
3600
- grpc_error* /*ignored*/) {
3609
+ void ChannelData::RetryingCall::ResumePendingBatchInCallCombiner(
3610
+ void* arg, grpc_error* /*ignored*/) {
3601
3611
  grpc_transport_stream_op_batch* batch =
3602
3612
  static_cast<grpc_transport_stream_op_batch*>(arg);
3603
- auto* lb_call =
3604
- static_cast<LoadBalancedCall*>(batch->handler_private.extra_arg);
3613
+ auto* lb_call = static_cast<ChannelData::LoadBalancedCall*>(
3614
+ batch->handler_private.extra_arg);
3605
3615
  // Note: This will release the call combiner.
3606
3616
  lb_call->StartTransportStreamOpBatch(batch);
3607
3617
  }
3608
3618
 
3609
3619
  // This is called via the call combiner, so access to calld is synchronized.
3610
- void RetryingCall::PendingBatchesResume() {
3620
+ void ChannelData::RetryingCall::PendingBatchesResume() {
3611
3621
  if (enable_retries_) {
3612
3622
  StartRetriableSubchannelBatches(this, GRPC_ERROR_NONE);
3613
3623
  return;
@@ -3641,8 +3651,9 @@ void RetryingCall::PendingBatchesResume() {
3641
3651
  }
3642
3652
 
3643
3653
  template <typename Predicate>
3644
- RetryingCall::PendingBatch* RetryingCall::PendingBatchFind(
3645
- const char* log_message, Predicate predicate) {
3654
+ ChannelData::RetryingCall::PendingBatch*
3655
+ ChannelData::RetryingCall::PendingBatchFind(const char* log_message,
3656
+ Predicate predicate) {
3646
3657
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
3647
3658
  PendingBatch* pending = &pending_batches_[i];
3648
3659
  grpc_transport_stream_op_batch* batch = pending->batch;
@@ -3663,7 +3674,8 @@ RetryingCall::PendingBatch* RetryingCall::PendingBatchFind(
3663
3674
  // retry code
3664
3675
  //
3665
3676
 
3666
- void RetryingCall::RetryCommit(SubchannelCallRetryState* retry_state) {
3677
+ void ChannelData::RetryingCall::RetryCommit(
3678
+ SubchannelCallRetryState* retry_state) {
3667
3679
  if (retry_committed_) return;
3668
3680
  retry_committed_ = true;
3669
3681
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@@ -3675,8 +3687,8 @@ void RetryingCall::RetryCommit(SubchannelCallRetryState* retry_state) {
3675
3687
  }
3676
3688
  }
3677
3689
 
3678
- void RetryingCall::DoRetry(SubchannelCallRetryState* retry_state,
3679
- grpc_millis server_pushback_ms) {
3690
+ void ChannelData::RetryingCall::DoRetry(SubchannelCallRetryState* retry_state,
3691
+ grpc_millis server_pushback_ms) {
3680
3692
  GPR_ASSERT(retry_policy_ != nullptr);
3681
3693
  // Reset LB call.
3682
3694
  lb_call_.reset();
@@ -3703,9 +3715,9 @@ void RetryingCall::DoRetry(SubchannelCallRetryState* retry_state,
3703
3715
  if (retry_state != nullptr) retry_state->retry_dispatched = true;
3704
3716
  }
3705
3717
 
3706
- bool RetryingCall::MaybeRetry(SubchannelCallBatchData* batch_data,
3707
- grpc_status_code status,
3708
- grpc_mdelem* server_pushback_md) {
3718
+ bool ChannelData::RetryingCall::MaybeRetry(SubchannelCallBatchData* batch_data,
3719
+ grpc_status_code status,
3720
+ grpc_mdelem* server_pushback_md) {
3709
3721
  // Get retry policy.
3710
3722
  if (retry_policy_ == nullptr) return false;
3711
3723
  // If we've already dispatched a retry from this call, return true.
@@ -3813,17 +3825,17 @@ bool RetryingCall::MaybeRetry(SubchannelCallBatchData* batch_data,
3813
3825
  }
3814
3826
 
3815
3827
  //
3816
- // RetryingCall::SubchannelCallBatchData
3828
+ // ChannelData::RetryingCall::SubchannelCallBatchData
3817
3829
  //
3818
3830
 
3819
- RetryingCall::SubchannelCallBatchData*
3820
- RetryingCall::SubchannelCallBatchData::Create(RetryingCall* call, int refcount,
3821
- bool set_on_complete) {
3831
+ ChannelData::RetryingCall::SubchannelCallBatchData*
3832
+ ChannelData::RetryingCall::SubchannelCallBatchData::Create(
3833
+ RetryingCall* call, int refcount, bool set_on_complete) {
3822
3834
  return call->arena_->New<SubchannelCallBatchData>(call, refcount,
3823
3835
  set_on_complete);
3824
3836
  }
3825
3837
 
3826
- RetryingCall::SubchannelCallBatchData::SubchannelCallBatchData(
3838
+ ChannelData::RetryingCall::SubchannelCallBatchData::SubchannelCallBatchData(
3827
3839
  RetryingCall* call, int refcount, bool set_on_complete)
3828
3840
  : call(call), lb_call(call->lb_call_) {
3829
3841
  SubchannelCallRetryState* retry_state =
@@ -3831,14 +3843,14 @@ RetryingCall::SubchannelCallBatchData::SubchannelCallBatchData(
3831
3843
  batch.payload = &retry_state->batch_payload;
3832
3844
  gpr_ref_init(&refs, refcount);
3833
3845
  if (set_on_complete) {
3834
- GRPC_CLOSURE_INIT(&on_complete, RetryingCall::OnComplete, this,
3846
+ GRPC_CLOSURE_INIT(&on_complete, ChannelData::RetryingCall::OnComplete, this,
3835
3847
  grpc_schedule_on_exec_ctx);
3836
3848
  batch.on_complete = &on_complete;
3837
3849
  }
3838
3850
  GRPC_CALL_STACK_REF(call->owning_call_, "batch_data");
3839
3851
  }
3840
3852
 
3841
- void RetryingCall::SubchannelCallBatchData::Destroy() {
3853
+ void ChannelData::RetryingCall::SubchannelCallBatchData::Destroy() {
3842
3854
  SubchannelCallRetryState* retry_state =
3843
3855
  static_cast<SubchannelCallRetryState*>(lb_call->GetParentData());
3844
3856
  if (batch.send_initial_metadata) {
@@ -3861,8 +3873,8 @@ void RetryingCall::SubchannelCallBatchData::Destroy() {
3861
3873
  // recv_initial_metadata callback handling
3862
3874
  //
3863
3875
 
3864
- void RetryingCall::InvokeRecvInitialMetadataCallback(void* arg,
3865
- grpc_error* error) {
3876
+ void ChannelData::RetryingCall::InvokeRecvInitialMetadataCallback(
3877
+ void* arg, grpc_error* error) {
3866
3878
  SubchannelCallBatchData* batch_data =
3867
3879
  static_cast<SubchannelCallBatchData*>(arg);
3868
3880
  // Find pending batch.
@@ -3896,7 +3908,8 @@ void RetryingCall::InvokeRecvInitialMetadataCallback(void* arg,
3896
3908
  GRPC_ERROR_REF(error));
3897
3909
  }
3898
3910
 
3899
- void RetryingCall::RecvInitialMetadataReady(void* arg, grpc_error* error) {
3911
+ void ChannelData::RetryingCall::RecvInitialMetadataReady(void* arg,
3912
+ grpc_error* error) {
3900
3913
  SubchannelCallBatchData* batch_data =
3901
3914
  static_cast<SubchannelCallBatchData*>(arg);
3902
3915
  RetryingCall* call = batch_data->call;
@@ -3956,7 +3969,8 @@ void RetryingCall::RecvInitialMetadataReady(void* arg, grpc_error* error) {
3956
3969
  // recv_message callback handling
3957
3970
  //
3958
3971
 
3959
- void RetryingCall::InvokeRecvMessageCallback(void* arg, grpc_error* error) {
3972
+ void ChannelData::RetryingCall::InvokeRecvMessageCallback(void* arg,
3973
+ grpc_error* error) {
3960
3974
  SubchannelCallBatchData* batch_data =
3961
3975
  static_cast<SubchannelCallBatchData*>(arg);
3962
3976
  RetryingCall* call = batch_data->call;
@@ -3986,7 +4000,7 @@ void RetryingCall::InvokeRecvMessageCallback(void* arg, grpc_error* error) {
3986
4000
  Closure::Run(DEBUG_LOCATION, recv_message_ready, GRPC_ERROR_REF(error));
3987
4001
  }
3988
4002
 
3989
- void RetryingCall::RecvMessageReady(void* arg, grpc_error* error) {
4003
+ void ChannelData::RetryingCall::RecvMessageReady(void* arg, grpc_error* error) {
3990
4004
  SubchannelCallBatchData* batch_data =
3991
4005
  static_cast<SubchannelCallBatchData*>(arg);
3992
4006
  RetryingCall* call = batch_data->call;
@@ -4042,9 +4056,9 @@ void RetryingCall::RecvMessageReady(void* arg, grpc_error* error) {
4042
4056
  // recv_trailing_metadata handling
4043
4057
  //
4044
4058
 
4045
- void RetryingCall::GetCallStatus(grpc_metadata_batch* md_batch,
4046
- grpc_error* error, grpc_status_code* status,
4047
- grpc_mdelem** server_pushback_md) {
4059
+ void ChannelData::RetryingCall::GetCallStatus(
4060
+ grpc_metadata_batch* md_batch, grpc_error* error, grpc_status_code* status,
4061
+ grpc_mdelem** server_pushback_md) {
4048
4062
  if (error != GRPC_ERROR_NONE) {
4049
4063
  grpc_error_get_status(error, deadline_, status, nullptr, nullptr, nullptr);
4050
4064
  } else {
@@ -4059,7 +4073,7 @@ void RetryingCall::GetCallStatus(grpc_metadata_batch* md_batch,
4059
4073
  GRPC_ERROR_UNREF(error);
4060
4074
  }
4061
4075
 
4062
- void RetryingCall::AddClosureForRecvTrailingMetadataReady(
4076
+ void ChannelData::RetryingCall::AddClosureForRecvTrailingMetadataReady(
4063
4077
  SubchannelCallBatchData* batch_data, grpc_error* error,
4064
4078
  CallCombinerClosureList* closures) {
4065
4079
  // Find pending batch.
@@ -4093,7 +4107,7 @@ void RetryingCall::AddClosureForRecvTrailingMetadataReady(
4093
4107
  MaybeClearPendingBatch(pending);
4094
4108
  }
4095
4109
 
4096
- void RetryingCall::AddClosuresForDeferredRecvCallbacks(
4110
+ void ChannelData::RetryingCall::AddClosuresForDeferredRecvCallbacks(
4097
4111
  SubchannelCallBatchData* batch_data, SubchannelCallRetryState* retry_state,
4098
4112
  CallCombinerClosureList* closures) {
4099
4113
  if (batch_data->batch.recv_trailing_metadata) {
@@ -4124,7 +4138,7 @@ void RetryingCall::AddClosuresForDeferredRecvCallbacks(
4124
4138
  }
4125
4139
  }
4126
4140
 
4127
- bool RetryingCall::PendingBatchIsUnstarted(
4141
+ bool ChannelData::RetryingCall::PendingBatchIsUnstarted(
4128
4142
  PendingBatch* pending, SubchannelCallRetryState* retry_state) {
4129
4143
  if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
4130
4144
  return false;
@@ -4144,7 +4158,7 @@ bool RetryingCall::PendingBatchIsUnstarted(
4144
4158
  return false;
4145
4159
  }
4146
4160
 
4147
- void RetryingCall::AddClosuresToFailUnstartedPendingBatches(
4161
+ void ChannelData::RetryingCall::AddClosuresToFailUnstartedPendingBatches(
4148
4162
  SubchannelCallRetryState* retry_state, grpc_error* error,
4149
4163
  CallCombinerClosureList* closures) {
4150
4164
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@@ -4166,7 +4180,7 @@ void RetryingCall::AddClosuresToFailUnstartedPendingBatches(
4166
4180
  GRPC_ERROR_UNREF(error);
4167
4181
  }
4168
4182
 
4169
- void RetryingCall::RunClosuresForCompletedCall(
4183
+ void ChannelData::RetryingCall::RunClosuresForCompletedCall(
4170
4184
  SubchannelCallBatchData* batch_data, grpc_error* error) {
4171
4185
  SubchannelCallRetryState* retry_state =
4172
4186
  static_cast<SubchannelCallRetryState*>(
@@ -4190,7 +4204,8 @@ void RetryingCall::RunClosuresForCompletedCall(
4190
4204
  GRPC_ERROR_UNREF(error);
4191
4205
  }
4192
4206
 
4193
- void RetryingCall::RecvTrailingMetadataReady(void* arg, grpc_error* error) {
4207
+ void ChannelData::RetryingCall::RecvTrailingMetadataReady(void* arg,
4208
+ grpc_error* error) {
4194
4209
  SubchannelCallBatchData* batch_data =
4195
4210
  static_cast<SubchannelCallBatchData*>(arg);
4196
4211
  RetryingCall* call = batch_data->call;
@@ -4240,7 +4255,7 @@ void RetryingCall::RecvTrailingMetadataReady(void* arg, grpc_error* error) {
4240
4255
  // on_complete callback handling
4241
4256
  //
4242
4257
 
4243
- void RetryingCall::AddClosuresForCompletedPendingBatch(
4258
+ void ChannelData::RetryingCall::AddClosuresForCompletedPendingBatch(
4244
4259
  SubchannelCallBatchData* batch_data, grpc_error* error,
4245
4260
  CallCombinerClosureList* closures) {
4246
4261
  PendingBatch* pending = PendingBatchFind(
@@ -4267,7 +4282,7 @@ void RetryingCall::AddClosuresForCompletedPendingBatch(
4267
4282
  MaybeClearPendingBatch(pending);
4268
4283
  }
4269
4284
 
4270
- void RetryingCall::AddClosuresForReplayOrPendingSendOps(
4285
+ void ChannelData::RetryingCall::AddClosuresForReplayOrPendingSendOps(
4271
4286
  SubchannelCallBatchData* batch_data, SubchannelCallRetryState* retry_state,
4272
4287
  CallCombinerClosureList* closures) {
4273
4288
  bool have_pending_send_message_ops =
@@ -4302,7 +4317,7 @@ void RetryingCall::AddClosuresForReplayOrPendingSendOps(
4302
4317
  }
4303
4318
  }
4304
4319
 
4305
- void RetryingCall::OnComplete(void* arg, grpc_error* error) {
4320
+ void ChannelData::RetryingCall::OnComplete(void* arg, grpc_error* error) {
4306
4321
  SubchannelCallBatchData* batch_data =
4307
4322
  static_cast<SubchannelCallBatchData*>(arg);
4308
4323
  RetryingCall* call = batch_data->call;
@@ -4366,17 +4381,17 @@ void RetryingCall::OnComplete(void* arg, grpc_error* error) {
4366
4381
  // subchannel batch construction
4367
4382
  //
4368
4383
 
4369
- void RetryingCall::StartBatchInCallCombiner(void* arg,
4370
- grpc_error* /*ignored*/) {
4384
+ void ChannelData::RetryingCall::StartBatchInCallCombiner(
4385
+ void* arg, grpc_error* /*ignored*/) {
4371
4386
  grpc_transport_stream_op_batch* batch =
4372
4387
  static_cast<grpc_transport_stream_op_batch*>(arg);
4373
- auto* lb_call =
4374
- static_cast<LoadBalancedCall*>(batch->handler_private.extra_arg);
4388
+ auto* lb_call = static_cast<ChannelData::LoadBalancedCall*>(
4389
+ batch->handler_private.extra_arg);
4375
4390
  // Note: This will release the call combiner.
4376
4391
  lb_call->StartTransportStreamOpBatch(batch);
4377
4392
  }
4378
4393
 
4379
- void RetryingCall::AddClosureForSubchannelBatch(
4394
+ void ChannelData::RetryingCall::AddClosureForSubchannelBatch(
4380
4395
  grpc_transport_stream_op_batch* batch, CallCombinerClosureList* closures) {
4381
4396
  batch->handler_private.extra_arg = lb_call_.get();
4382
4397
  GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
@@ -4390,7 +4405,7 @@ void RetryingCall::AddClosureForSubchannelBatch(
4390
4405
  "start_subchannel_batch");
4391
4406
  }
4392
4407
 
4393
- void RetryingCall::AddRetriableSendInitialMetadataOp(
4408
+ void ChannelData::RetryingCall::AddRetriableSendInitialMetadataOp(
4394
4409
  SubchannelCallRetryState* retry_state,
4395
4410
  SubchannelCallBatchData* batch_data) {
4396
4411
  // Maps the number of retries to the corresponding metadata value slice.
@@ -4438,7 +4453,7 @@ void RetryingCall::AddRetriableSendInitialMetadataOp(
4438
4453
  batch_data->batch.payload->send_initial_metadata.peer_string = peer_string_;
4439
4454
  }
4440
4455
 
4441
- void RetryingCall::AddRetriableSendMessageOp(
4456
+ void ChannelData::RetryingCall::AddRetriableSendMessageOp(
4442
4457
  SubchannelCallRetryState* retry_state,
4443
4458
  SubchannelCallBatchData* batch_data) {
4444
4459
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@@ -4456,7 +4471,7 @@ void RetryingCall::AddRetriableSendMessageOp(
4456
4471
  retry_state->send_message.get());
4457
4472
  }
4458
4473
 
4459
- void RetryingCall::AddRetriableSendTrailingMetadataOp(
4474
+ void ChannelData::RetryingCall::AddRetriableSendTrailingMetadataOp(
4460
4475
  SubchannelCallRetryState* retry_state,
4461
4476
  SubchannelCallBatchData* batch_data) {
4462
4477
  // We need to make a copy of the metadata batch for each attempt, since
@@ -4474,7 +4489,7 @@ void RetryingCall::AddRetriableSendTrailingMetadataOp(
4474
4489
  &retry_state->send_trailing_metadata;
4475
4490
  }
4476
4491
 
4477
- void RetryingCall::AddRetriableRecvInitialMetadataOp(
4492
+ void ChannelData::RetryingCall::AddRetriableRecvInitialMetadataOp(
4478
4493
  SubchannelCallRetryState* retry_state,
4479
4494
  SubchannelCallBatchData* batch_data) {
4480
4495
  retry_state->started_recv_initial_metadata = true;
@@ -4491,7 +4506,7 @@ void RetryingCall::AddRetriableRecvInitialMetadataOp(
4491
4506
  &retry_state->recv_initial_metadata_ready;
4492
4507
  }
4493
4508
 
4494
- void RetryingCall::AddRetriableRecvMessageOp(
4509
+ void ChannelData::RetryingCall::AddRetriableRecvMessageOp(
4495
4510
  SubchannelCallRetryState* retry_state,
4496
4511
  SubchannelCallBatchData* batch_data) {
4497
4512
  ++retry_state->started_recv_message_count;
@@ -4504,7 +4519,7 @@ void RetryingCall::AddRetriableRecvMessageOp(
4504
4519
  &retry_state->recv_message_ready;
4505
4520
  }
4506
4521
 
4507
- void RetryingCall::AddRetriableRecvTrailingMetadataOp(
4522
+ void ChannelData::RetryingCall::AddRetriableRecvTrailingMetadataOp(
4508
4523
  SubchannelCallRetryState* retry_state,
4509
4524
  SubchannelCallBatchData* batch_data) {
4510
4525
  retry_state->started_recv_trailing_metadata = true;
@@ -4522,7 +4537,7 @@ void RetryingCall::AddRetriableRecvTrailingMetadataOp(
4522
4537
  &retry_state->recv_trailing_metadata_ready;
4523
4538
  }
4524
4539
 
4525
- void RetryingCall::StartInternalRecvTrailingMetadata() {
4540
+ void ChannelData::RetryingCall::StartInternalRecvTrailingMetadata() {
4526
4541
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4527
4542
  gpr_log(
4528
4543
  GPR_INFO,
@@ -4547,8 +4562,8 @@ void RetryingCall::StartInternalRecvTrailingMetadata() {
4547
4562
  // If there are any cached send ops that need to be replayed on the
4548
4563
  // current subchannel call, creates and returns a new subchannel batch
4549
4564
  // to replay those ops. Otherwise, returns nullptr.
4550
- RetryingCall::SubchannelCallBatchData*
4551
- RetryingCall::MaybeCreateSubchannelBatchForReplay(
4565
+ ChannelData::RetryingCall::SubchannelCallBatchData*
4566
+ ChannelData::RetryingCall::MaybeCreateSubchannelBatchForReplay(
4552
4567
  SubchannelCallRetryState* retry_state) {
4553
4568
  SubchannelCallBatchData* replay_batch_data = nullptr;
4554
4569
  // send_initial_metadata.
@@ -4606,7 +4621,7 @@ RetryingCall::MaybeCreateSubchannelBatchForReplay(
4606
4621
  return replay_batch_data;
4607
4622
  }
4608
4623
 
4609
- void RetryingCall::AddSubchannelBatchesForPendingBatches(
4624
+ void ChannelData::RetryingCall::AddSubchannelBatchesForPendingBatches(
4610
4625
  SubchannelCallRetryState* retry_state, CallCombinerClosureList* closures) {
4611
4626
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
4612
4627
  PendingBatch* pending = &pending_batches_[i];
@@ -4730,8 +4745,8 @@ void RetryingCall::AddSubchannelBatchesForPendingBatches(
4730
4745
  }
4731
4746
  }
4732
4747
 
4733
- void RetryingCall::StartRetriableSubchannelBatches(void* arg,
4734
- grpc_error* /*ignored*/) {
4748
+ void ChannelData::RetryingCall::StartRetriableSubchannelBatches(
4749
+ void* arg, grpc_error* /*ignored*/) {
4735
4750
  RetryingCall* call = static_cast<RetryingCall*>(arg);
4736
4751
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4737
4752
  gpr_log(GPR_INFO,
@@ -4767,7 +4782,7 @@ void RetryingCall::StartRetriableSubchannelBatches(void* arg,
4767
4782
  closures.RunClosures(call->call_combiner_);
4768
4783
  }
4769
4784
 
4770
- void RetryingCall::CreateLbCall(void* arg, grpc_error* /*error*/) {
4785
+ void ChannelData::RetryingCall::CreateLbCall(void* arg, grpc_error* /*error*/) {
4771
4786
  auto* call = static_cast<RetryingCall*>(arg);
4772
4787
  const size_t parent_data_size =
4773
4788
  call->enable_retries_ ? sizeof(SubchannelCallRetryState) : 0;
@@ -4775,8 +4790,8 @@ void RetryingCall::CreateLbCall(void* arg, grpc_error* /*error*/) {
4775
4790
  call->call_context_, call->path_,
4776
4791
  call->call_start_time_, call->deadline_,
4777
4792
  call->arena_, call->call_combiner_};
4778
- call->lb_call_ = LoadBalancedCall::Create(call->chand_, args, call->pollent_,
4779
- parent_data_size);
4793
+ call->lb_call_ = ChannelData::LoadBalancedCall::Create(
4794
+ call->chand_, args, call->pollent_, parent_data_size);
4780
4795
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
4781
4796
  gpr_log(GPR_INFO, "chand=%p retrying_call=%p: create lb_call=%p",
4782
4797
  call->chand_, call, call->lb_call_.get());
@@ -4789,10 +4804,10 @@ void RetryingCall::CreateLbCall(void* arg, grpc_error* /*error*/) {
4789
4804
  }
4790
4805
 
4791
4806
  //
4792
- // LoadBalancedCall::Metadata
4807
+ // ChannelData::LoadBalancedCall::Metadata
4793
4808
  //
4794
4809
 
4795
- class LoadBalancedCall::Metadata
4810
+ class ChannelData::LoadBalancedCall::Metadata
4796
4811
  : public LoadBalancingPolicy::MetadataInterface {
4797
4812
  public:
4798
4813
  Metadata(LoadBalancedCall* lb_call, grpc_metadata_batch* batch)
@@ -4855,10 +4870,11 @@ class LoadBalancedCall::Metadata
4855
4870
  };
4856
4871
 
4857
4872
  //
4858
- // LoadBalancedCall::LbCallState
4873
+ // ChannelData::LoadBalancedCall::LbCallState
4859
4874
  //
4860
4875
 
4861
- class LoadBalancedCall::LbCallState : public LoadBalancingPolicy::CallState {
4876
+ class ChannelData::LoadBalancedCall::LbCallState
4877
+ : public LoadBalancingPolicy::CallState {
4862
4878
  public:
4863
4879
  explicit LbCallState(LoadBalancedCall* lb_call) : lb_call_(lb_call) {}
4864
4880
 
@@ -4894,9 +4910,11 @@ class LoadBalancedCall::LbCallState : public LoadBalancingPolicy::CallState {
4894
4910
  // LoadBalancedCall
4895
4911
  //
4896
4912
 
4897
- RefCountedPtr<LoadBalancedCall> LoadBalancedCall::Create(
4898
- ChannelData* chand, const grpc_call_element_args& args,
4899
- grpc_polling_entity* pollent, size_t parent_data_size) {
4913
+ RefCountedPtr<ChannelData::LoadBalancedCall>
4914
+ ChannelData::LoadBalancedCall::Create(ChannelData* chand,
4915
+ const grpc_call_element_args& args,
4916
+ grpc_polling_entity* pollent,
4917
+ size_t parent_data_size) {
4900
4918
  const size_t alloc_size =
4901
4919
  parent_data_size > 0
4902
4920
  ? (GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(LoadBalancedCall)) +
@@ -4907,9 +4925,9 @@ RefCountedPtr<LoadBalancedCall> LoadBalancedCall::Create(
4907
4925
  return lb_call;
4908
4926
  }
4909
4927
 
4910
- LoadBalancedCall::LoadBalancedCall(ChannelData* chand,
4911
- const grpc_call_element_args& args,
4912
- grpc_polling_entity* pollent)
4928
+ ChannelData::LoadBalancedCall::LoadBalancedCall(
4929
+ ChannelData* chand, const grpc_call_element_args& args,
4930
+ grpc_polling_entity* pollent)
4913
4931
  : refs_(1, GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)
4914
4932
  ? "LoadBalancedCall"
4915
4933
  : nullptr),
@@ -4923,7 +4941,7 @@ LoadBalancedCall::LoadBalancedCall(ChannelData* chand,
4923
4941
  call_context_(args.context),
4924
4942
  pollent_(pollent) {}
4925
4943
 
4926
- LoadBalancedCall::~LoadBalancedCall() {
4944
+ ChannelData::LoadBalancedCall::~LoadBalancedCall() {
4927
4945
  grpc_slice_unref_internal(path_);
4928
4946
  GRPC_ERROR_UNREF(cancel_error_);
4929
4947
  if (backend_metric_data_ != nullptr) {
@@ -4936,43 +4954,45 @@ LoadBalancedCall::~LoadBalancedCall() {
4936
4954
  }
4937
4955
  }
4938
4956
 
4939
- RefCountedPtr<LoadBalancedCall> LoadBalancedCall::Ref() {
4957
+ RefCountedPtr<ChannelData::LoadBalancedCall>
4958
+ ChannelData::LoadBalancedCall::Ref() {
4940
4959
  IncrementRefCount();
4941
4960
  return RefCountedPtr<LoadBalancedCall>(this);
4942
4961
  }
4943
4962
 
4944
- RefCountedPtr<LoadBalancedCall> LoadBalancedCall::Ref(
4963
+ RefCountedPtr<ChannelData::LoadBalancedCall> ChannelData::LoadBalancedCall::Ref(
4945
4964
  const DebugLocation& location, const char* reason) {
4946
4965
  IncrementRefCount(location, reason);
4947
4966
  return RefCountedPtr<LoadBalancedCall>(this);
4948
4967
  }
4949
4968
 
4950
- void LoadBalancedCall::Unref() {
4969
+ void ChannelData::LoadBalancedCall::Unref() {
4951
4970
  if (GPR_UNLIKELY(refs_.Unref())) {
4952
4971
  this->~LoadBalancedCall();
4953
4972
  }
4954
4973
  }
4955
4974
 
4956
- void LoadBalancedCall::Unref(const DebugLocation& location,
4957
- const char* reason) {
4975
+ void ChannelData::LoadBalancedCall::Unref(const DebugLocation& location,
4976
+ const char* reason) {
4958
4977
  if (GPR_UNLIKELY(refs_.Unref(location, reason))) {
4959
4978
  this->~LoadBalancedCall();
4960
4979
  }
4961
4980
  }
4962
4981
 
4963
- void LoadBalancedCall::IncrementRefCount() { refs_.Ref(); }
4982
+ void ChannelData::LoadBalancedCall::IncrementRefCount() { refs_.Ref(); }
4964
4983
 
4965
- void LoadBalancedCall::IncrementRefCount(const DebugLocation& location,
4966
- const char* reason) {
4984
+ void ChannelData::LoadBalancedCall::IncrementRefCount(
4985
+ const DebugLocation& location, const char* reason) {
4967
4986
  refs_.Ref(location, reason);
4968
4987
  }
4969
4988
 
4970
- void* LoadBalancedCall::GetParentData() {
4989
+ void* ChannelData::LoadBalancedCall::GetParentData() {
4971
4990
  return reinterpret_cast<char*>(this) +
4972
4991
  GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(LoadBalancedCall));
4973
4992
  }
4974
4993
 
4975
- size_t LoadBalancedCall::GetBatchIndex(grpc_transport_stream_op_batch* batch) {
4994
+ size_t ChannelData::LoadBalancedCall::GetBatchIndex(
4995
+ grpc_transport_stream_op_batch* batch) {
4976
4996
  // Note: It is important the send_initial_metadata be the first entry
4977
4997
  // here, since the code in pick_subchannel_locked() assumes it will be.
4978
4998
  if (batch->send_initial_metadata) return 0;
@@ -4985,7 +5005,7 @@ size_t LoadBalancedCall::GetBatchIndex(grpc_transport_stream_op_batch* batch) {
4985
5005
  }
4986
5006
 
4987
5007
  // This is called via the call combiner, so access to calld is synchronized.
4988
- void LoadBalancedCall::PendingBatchesAdd(
5008
+ void ChannelData::LoadBalancedCall::PendingBatchesAdd(
4989
5009
  grpc_transport_stream_op_batch* batch) {
4990
5010
  const size_t idx = GetBatchIndex(batch);
4991
5011
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@@ -4998,8 +5018,8 @@ void LoadBalancedCall::PendingBatchesAdd(
4998
5018
  }
4999
5019
 
5000
5020
  // This is called via the call combiner, so access to calld is synchronized.
5001
- void LoadBalancedCall::FailPendingBatchInCallCombiner(void* arg,
5002
- grpc_error* error) {
5021
+ void ChannelData::LoadBalancedCall::FailPendingBatchInCallCombiner(
5022
+ void* arg, grpc_error* error) {
5003
5023
  grpc_transport_stream_op_batch* batch =
5004
5024
  static_cast<grpc_transport_stream_op_batch*>(arg);
5005
5025
  auto* self = static_cast<LoadBalancedCall*>(batch->handler_private.extra_arg);
@@ -5009,7 +5029,7 @@ void LoadBalancedCall::FailPendingBatchInCallCombiner(void* arg,
5009
5029
  }
5010
5030
 
5011
5031
  // This is called via the call combiner, so access to calld is synchronized.
5012
- void LoadBalancedCall::PendingBatchesFail(
5032
+ void ChannelData::LoadBalancedCall::PendingBatchesFail(
5013
5033
  grpc_error* error,
5014
5034
  YieldCallCombinerPredicate yield_call_combiner_predicate) {
5015
5035
  GPR_ASSERT(error != GRPC_ERROR_NONE);
@@ -5044,7 +5064,7 @@ void LoadBalancedCall::PendingBatchesFail(
5044
5064
  }
5045
5065
 
5046
5066
  // This is called via the call combiner, so access to calld is synchronized.
5047
- void LoadBalancedCall::ResumePendingBatchInCallCombiner(
5067
+ void ChannelData::LoadBalancedCall::ResumePendingBatchInCallCombiner(
5048
5068
  void* arg, grpc_error* /*ignored*/) {
5049
5069
  grpc_transport_stream_op_batch* batch =
5050
5070
  static_cast<grpc_transport_stream_op_batch*>(arg);
@@ -5055,7 +5075,7 @@ void LoadBalancedCall::ResumePendingBatchInCallCombiner(
5055
5075
  }
5056
5076
 
5057
5077
  // This is called via the call combiner, so access to calld is synchronized.
5058
- void LoadBalancedCall::PendingBatchesResume() {
5078
+ void ChannelData::LoadBalancedCall::PendingBatchesResume() {
5059
5079
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
5060
5080
  size_t num_batches = 0;
5061
5081
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@@ -5083,7 +5103,7 @@ void LoadBalancedCall::PendingBatchesResume() {
5083
5103
  closures.RunClosures(call_combiner_);
5084
5104
  }
5085
5105
 
5086
- void LoadBalancedCall::StartTransportStreamOpBatch(
5106
+ void ChannelData::LoadBalancedCall::StartTransportStreamOpBatch(
5087
5107
  grpc_transport_stream_op_batch* batch) {
5088
5108
  // Intercept recv_trailing_metadata_ready for LB callback.
5089
5109
  if (batch->recv_trailing_metadata) {
@@ -5164,8 +5184,9 @@ void LoadBalancedCall::StartTransportStreamOpBatch(
5164
5184
  }
5165
5185
  }
5166
5186
 
5167
- void LoadBalancedCall::RecvTrailingMetadataReadyForLoadBalancingPolicy(
5168
- void* arg, grpc_error* error) {
5187
+ void ChannelData::LoadBalancedCall::
5188
+ RecvTrailingMetadataReadyForLoadBalancingPolicy(void* arg,
5189
+ grpc_error* error) {
5169
5190
  auto* self = static_cast<LoadBalancedCall*>(arg);
5170
5191
  if (self->lb_recv_trailing_metadata_ready_ != nullptr) {
5171
5192
  // Set error if call did not succeed.
@@ -5203,8 +5224,9 @@ void LoadBalancedCall::RecvTrailingMetadataReadyForLoadBalancingPolicy(
5203
5224
 
5204
5225
  // TODO(roth): Consider not intercepting this callback unless we
5205
5226
  // actually need to, if this causes a performance problem.
5206
- void LoadBalancedCall::InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
5207
- grpc_transport_stream_op_batch* batch) {
5227
+ void ChannelData::LoadBalancedCall::
5228
+ InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
5229
+ grpc_transport_stream_op_batch* batch) {
5208
5230
  recv_trailing_metadata_ =
5209
5231
  batch->payload->recv_trailing_metadata.recv_trailing_metadata;
5210
5232
  original_recv_trailing_metadata_ready_ =
@@ -5216,7 +5238,7 @@ void LoadBalancedCall::InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
5216
5238
  &recv_trailing_metadata_ready_;
5217
5239
  }
5218
5240
 
5219
- void LoadBalancedCall::CreateSubchannelCall() {
5241
+ void ChannelData::LoadBalancedCall::CreateSubchannelCall() {
5220
5242
  SubchannelCall::Args call_args = {
5221
5243
  std::move(connected_subchannel_), pollent_, path_, call_start_time_,
5222
5244
  deadline_, arena_,
@@ -5244,7 +5266,7 @@ void LoadBalancedCall::CreateSubchannelCall() {
5244
5266
  // because there may be multiple LB picks happening in parallel.
5245
5267
  // Instead, we will probably need to maintain a list in the CallData
5246
5268
  // object of pending LB picks to be cancelled when the closure runs.
5247
- class LoadBalancedCall::LbQueuedCallCanceller {
5269
+ class ChannelData::LoadBalancedCall::LbQueuedCallCanceller {
5248
5270
  public:
5249
5271
  explicit LbQueuedCallCanceller(RefCountedPtr<LoadBalancedCall> lb_call)
5250
5272
  : lb_call_(std::move(lb_call)) {
@@ -5259,7 +5281,7 @@ class LoadBalancedCall::LbQueuedCallCanceller {
5259
5281
  auto* lb_call = self->lb_call_.get();
5260
5282
  auto* chand = lb_call->chand_;
5261
5283
  {
5262
- MutexLock lock(chand->data_plane_mu());
5284
+ MutexLock lock(&chand->data_plane_mu_);
5263
5285
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
5264
5286
  gpr_log(GPR_INFO,
5265
5287
  "chand=%p lb_call=%p: cancelling queued pick: "
@@ -5283,7 +5305,7 @@ class LoadBalancedCall::LbQueuedCallCanceller {
5283
5305
  grpc_closure closure_;
5284
5306
  };
5285
5307
 
5286
- void LoadBalancedCall::MaybeRemoveCallFromLbQueuedCallsLocked() {
5308
+ void ChannelData::LoadBalancedCall::MaybeRemoveCallFromLbQueuedCallsLocked() {
5287
5309
  if (!queued_pending_lb_pick_) return;
5288
5310
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
5289
5311
  gpr_log(GPR_INFO, "chand=%p lb_call=%p: removing from queued picks list",
@@ -5295,7 +5317,7 @@ void LoadBalancedCall::MaybeRemoveCallFromLbQueuedCallsLocked() {
5295
5317
  lb_call_canceller_ = nullptr;
5296
5318
  }
5297
5319
 
5298
- void LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
5320
+ void ChannelData::LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
5299
5321
  if (queued_pending_lb_pick_) return;
5300
5322
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
5301
5323
  gpr_log(GPR_INFO, "chand=%p lb_call=%p: adding to queued picks list",
@@ -5308,12 +5330,12 @@ void LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
5308
5330
  lb_call_canceller_ = new LbQueuedCallCanceller(Ref());
5309
5331
  }
5310
5332
 
5311
- void LoadBalancedCall::AsyncPickDone(grpc_error* error) {
5333
+ void ChannelData::LoadBalancedCall::AsyncPickDone(grpc_error* error) {
5312
5334
  GRPC_CLOSURE_INIT(&pick_closure_, PickDone, this, grpc_schedule_on_exec_ctx);
5313
5335
  ExecCtx::Run(DEBUG_LOCATION, &pick_closure_, error);
5314
5336
  }
5315
5337
 
5316
- void LoadBalancedCall::PickDone(void* arg, grpc_error* error) {
5338
+ void ChannelData::LoadBalancedCall::PickDone(void* arg, grpc_error* error) {
5317
5339
  auto* self = static_cast<LoadBalancedCall*>(arg);
5318
5340
  if (error != GRPC_ERROR_NONE) {
5319
5341
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
@@ -5340,11 +5362,12 @@ const char* PickResultTypeName(
5340
5362
  GPR_UNREACHABLE_CODE(return "UNKNOWN");
5341
5363
  }
5342
5364
 
5343
- void LoadBalancedCall::PickSubchannel(void* arg, grpc_error* error) {
5365
+ void ChannelData::LoadBalancedCall::PickSubchannel(void* arg,
5366
+ grpc_error* error) {
5344
5367
  auto* self = static_cast<LoadBalancedCall*>(arg);
5345
5368
  bool pick_complete;
5346
5369
  {
5347
- MutexLock lock(self->chand_->data_plane_mu());
5370
+ MutexLock lock(&self->chand_->data_plane_mu_);
5348
5371
  pick_complete = self->PickSubchannelLocked(&error);
5349
5372
  }
5350
5373
  if (pick_complete) {
@@ -5353,7 +5376,7 @@ void LoadBalancedCall::PickSubchannel(void* arg, grpc_error* error) {
5353
5376
  }
5354
5377
  }
5355
5378
 
5356
- bool LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
5379
+ bool ChannelData::LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
5357
5380
  GPR_ASSERT(connected_subchannel_ == nullptr);
5358
5381
  GPR_ASSERT(subchannel_call_ == nullptr);
5359
5382
  // Grab initial metadata.
@@ -5370,7 +5393,7 @@ bool LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
5370
5393
  pick_args.call_state = &lb_call_state;
5371
5394
  Metadata initial_metadata(this, initial_metadata_batch);
5372
5395
  pick_args.initial_metadata = &initial_metadata;
5373
- auto result = chand_->picker()->Pick(pick_args);
5396
+ auto result = chand_->picker_->Pick(pick_args);
5374
5397
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
5375
5398
  gpr_log(
5376
5399
  GPR_INFO,
@@ -5436,16 +5459,15 @@ bool LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
5436
5459
  * EXPORTED SYMBOLS
5437
5460
  */
5438
5461
 
5439
- using grpc_core::CallData;
5440
5462
  using grpc_core::ChannelData;
5441
5463
 
5442
5464
  const grpc_channel_filter grpc_client_channel_filter = {
5443
- CallData::StartTransportStreamOpBatch,
5465
+ ChannelData::CallData::StartTransportStreamOpBatch,
5444
5466
  ChannelData::StartTransportOp,
5445
- sizeof(CallData),
5446
- CallData::Init,
5447
- CallData::SetPollent,
5448
- CallData::Destroy,
5467
+ sizeof(ChannelData::CallData),
5468
+ ChannelData::CallData::Init,
5469
+ ChannelData::CallData::SetPollent,
5470
+ ChannelData::CallData::Destroy,
5449
5471
  sizeof(ChannelData),
5450
5472
  ChannelData::Init,
5451
5473
  ChannelData::Destroy,