grpc 1.37.0.pre1 → 1.39.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (636) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +96 -59
  3. data/include/grpc/event_engine/README.md +38 -0
  4. data/include/grpc/event_engine/endpoint_config.h +48 -0
  5. data/include/grpc/event_engine/event_engine.h +334 -0
  6. data/include/grpc/event_engine/port.h +41 -0
  7. data/include/grpc/event_engine/slice_allocator.h +91 -0
  8. data/include/grpc/grpc.h +11 -4
  9. data/include/grpc/grpc_security.h +32 -0
  10. data/include/grpc/grpc_security_constants.h +15 -0
  11. data/include/grpc/impl/codegen/grpc_types.h +28 -13
  12. data/include/grpc/impl/codegen/port_platform.h +22 -0
  13. data/include/grpc/module.modulemap +14 -14
  14. data/src/core/ext/filters/client_channel/backup_poller.cc +3 -3
  15. data/src/core/ext/filters/client_channel/channel_connectivity.cc +177 -202
  16. data/src/core/ext/filters/client_channel/client_channel.cc +630 -3103
  17. data/src/core/ext/filters/client_channel/client_channel.h +489 -55
  18. data/src/core/ext/filters/client_channel/client_channel_channelz.h +1 -1
  19. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +4 -1
  20. data/src/core/ext/filters/client_channel/config_selector.h +1 -1
  21. data/src/core/ext/filters/client_channel/connector.h +1 -1
  22. data/src/core/ext/filters/client_channel/dynamic_filters.cc +9 -10
  23. data/src/core/ext/filters/client_channel/dynamic_filters.h +3 -3
  24. data/src/core/ext/filters/client_channel/health/health_check_client.cc +28 -27
  25. data/src/core/ext/filters/client_channel/health/health_check_client.h +30 -29
  26. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +24 -21
  27. data/src/core/ext/filters/client_channel/http_proxy.cc +16 -1
  28. data/src/core/ext/filters/client_channel/lb_policy.cc +1 -1
  29. data/src/core/ext/filters/client_channel/lb_policy.h +4 -4
  30. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +6 -6
  31. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +46 -43
  32. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +1 -1
  33. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +2 -1
  34. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +5 -5
  35. data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +14 -12
  36. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +755 -0
  37. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.h +10 -0
  38. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +4 -4
  39. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +1 -1
  40. data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +15 -15
  41. data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +46 -54
  42. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +23 -23
  43. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +31 -46
  44. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +146 -155
  45. data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -1
  46. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +4 -4
  47. data/src/core/ext/filters/client_channel/lb_policy_registry.h +1 -1
  48. data/src/core/ext/filters/client_channel/resolver.h +2 -2
  49. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +24 -18
  50. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +1 -1
  51. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc +31 -0
  52. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc +3 -3
  53. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +2 -2
  54. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +14 -14
  55. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +33 -24
  56. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +1 -1
  57. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc +28 -0
  58. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_libuv.cc +1 -1
  59. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +1 -1
  60. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +18 -12
  61. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +20 -28
  62. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +7 -5
  63. data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +20 -13
  64. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +1 -1
  65. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +60 -32
  66. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +32 -239
  67. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +20 -49
  68. data/src/core/ext/filters/client_channel/retry_filter.cc +2449 -0
  69. data/src/core/ext/filters/client_channel/retry_filter.h +30 -0
  70. data/src/core/ext/filters/client_channel/retry_service_config.cc +306 -0
  71. data/src/core/ext/filters/client_channel/retry_service_config.h +96 -0
  72. data/src/core/ext/filters/client_channel/server_address.cc +1 -1
  73. data/src/core/ext/filters/client_channel/service_config.cc +15 -14
  74. data/src/core/ext/filters/client_channel/service_config.h +7 -6
  75. data/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc +5 -4
  76. data/src/core/ext/filters/client_channel/service_config_parser.cc +6 -6
  77. data/src/core/ext/filters/client_channel/service_config_parser.h +7 -4
  78. data/src/core/ext/filters/client_channel/subchannel.cc +17 -16
  79. data/src/core/ext/filters/client_channel/subchannel.h +7 -6
  80. data/src/core/ext/filters/client_idle/client_idle_filter.cc +17 -16
  81. data/src/core/ext/filters/deadline/deadline_filter.cc +10 -10
  82. data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +46 -34
  83. data/src/core/ext/filters/fault_injection/service_config_parser.cc +5 -5
  84. data/src/core/ext/filters/fault_injection/service_config_parser.h +1 -1
  85. data/src/core/ext/filters/http/client/http_client_filter.cc +28 -21
  86. data/src/core/ext/filters/http/client_authority_filter.cc +3 -3
  87. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +23 -22
  88. data/src/core/ext/filters/http/message_compress/message_decompress_filter.cc +21 -21
  89. data/src/core/ext/filters/http/server/http_server_filter.cc +27 -23
  90. data/src/core/ext/filters/max_age/max_age_filter.cc +12 -10
  91. data/src/core/ext/filters/message_size/message_size_filter.cc +14 -11
  92. data/src/core/ext/filters/message_size/message_size_filter.h +1 -1
  93. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc +4 -3
  94. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +7 -7
  95. data/src/core/ext/transport/chttp2/client/chttp2_connector.h +7 -7
  96. data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +2 -2
  97. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +3 -2
  98. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +3 -3
  99. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +44 -45
  100. data/src/core/ext/transport/chttp2/server/chttp2_server.h +2 -2
  101. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc +3 -4
  102. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +5 -4
  103. data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +3 -4
  104. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +139 -120
  105. data/src/core/ext/transport/chttp2/transport/context_list.cc +4 -5
  106. data/src/core/ext/transport/chttp2/transport/context_list.h +4 -4
  107. data/src/core/ext/transport/chttp2/transport/flow_control.cc +3 -3
  108. data/src/core/ext/transport/chttp2/transport/flow_control.h +8 -8
  109. data/src/core/ext/transport/chttp2/transport/frame_data.cc +8 -8
  110. data/src/core/ext/transport/chttp2/transport/frame_data.h +10 -10
  111. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +7 -8
  112. data/src/core/ext/transport/chttp2/transport/frame_goaway.h +6 -6
  113. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +7 -8
  114. data/src/core/ext/transport/chttp2/transport/frame_ping.h +7 -6
  115. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +7 -7
  116. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +6 -6
  117. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +6 -5
  118. data/src/core/ext/transport/chttp2/transport/frame_settings.h +6 -6
  119. data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +4 -6
  120. data/src/core/ext/transport/chttp2/transport/frame_window_update.h +4 -6
  121. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +237 -208
  122. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +10 -10
  123. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +4 -3
  124. data/src/core/ext/transport/chttp2/transport/hpack_table.h +4 -4
  125. data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +2 -2
  126. data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +2 -2
  127. data/src/core/ext/transport/chttp2/transport/internal.h +32 -27
  128. data/src/core/ext/transport/chttp2/transport/parsing.cc +65 -58
  129. data/src/core/ext/transport/chttp2/transport/writing.cc +7 -3
  130. data/src/core/ext/transport/inproc/inproc_transport.cc +72 -60
  131. data/src/core/ext/xds/certificate_provider_factory.h +1 -1
  132. data/src/core/ext/xds/certificate_provider_store.h +3 -3
  133. data/src/core/ext/xds/file_watcher_certificate_provider_factory.cc +3 -3
  134. data/src/core/ext/xds/file_watcher_certificate_provider_factory.h +2 -2
  135. data/src/core/ext/xds/xds_api.cc +349 -200
  136. data/src/core/ext/xds/xds_api.h +21 -12
  137. data/src/core/ext/xds/xds_bootstrap.cc +97 -159
  138. data/src/core/ext/xds/xds_bootstrap.h +19 -24
  139. data/src/core/ext/xds/xds_certificate_provider.cc +4 -4
  140. data/src/core/ext/xds/xds_certificate_provider.h +4 -4
  141. data/src/core/ext/xds/xds_channel_args.h +5 -2
  142. data/src/core/ext/xds/xds_client.cc +310 -178
  143. data/src/core/ext/xds/xds_client.h +41 -27
  144. data/src/core/ext/xds/xds_client_stats.h +3 -2
  145. data/src/core/ext/xds/xds_server_config_fetcher.cc +34 -20
  146. data/src/core/lib/{iomgr → address_utils}/parse_address.cc +17 -17
  147. data/src/core/lib/{iomgr → address_utils}/parse_address.h +7 -7
  148. data/src/core/lib/{iomgr → address_utils}/sockaddr_utils.cc +16 -20
  149. data/src/core/lib/{iomgr → address_utils}/sockaddr_utils.h +16 -11
  150. data/src/core/lib/channel/channel_stack.cc +10 -9
  151. data/src/core/lib/channel/channel_stack.h +10 -9
  152. data/src/core/lib/channel/channel_stack_builder.cc +2 -2
  153. data/src/core/lib/channel/channel_stack_builder.h +1 -1
  154. data/src/core/lib/channel/channelz.cc +21 -13
  155. data/src/core/lib/channel/channelz.h +3 -0
  156. data/src/core/lib/channel/connected_channel.cc +4 -4
  157. data/src/core/lib/channel/handshaker.cc +7 -6
  158. data/src/core/lib/channel/handshaker.h +5 -5
  159. data/src/core/lib/event_engine/endpoint_config.cc +46 -0
  160. data/src/core/lib/event_engine/endpoint_config_internal.h +42 -0
  161. data/src/core/lib/event_engine/event_engine.cc +50 -0
  162. data/src/core/lib/event_engine/slice_allocator.cc +89 -0
  163. data/src/core/lib/event_engine/sockaddr.cc +40 -0
  164. data/src/core/lib/event_engine/sockaddr.h +44 -0
  165. data/src/core/lib/gpr/wrap_memcpy.cc +2 -1
  166. data/src/core/lib/gprpp/ref_counted.h +28 -14
  167. data/src/core/lib/gprpp/status_helper.cc +407 -0
  168. data/src/core/lib/gprpp/status_helper.h +183 -0
  169. data/src/core/lib/http/httpcli.cc +11 -11
  170. data/src/core/lib/http/httpcli_security_connector.cc +11 -7
  171. data/src/core/lib/http/parser.cc +16 -16
  172. data/src/core/lib/http/parser.h +4 -4
  173. data/src/core/lib/iomgr/buffer_list.cc +7 -9
  174. data/src/core/lib/iomgr/buffer_list.h +4 -5
  175. data/src/core/lib/iomgr/call_combiner.cc +15 -12
  176. data/src/core/lib/iomgr/call_combiner.h +12 -14
  177. data/src/core/lib/iomgr/cfstream_handle.cc +3 -3
  178. data/src/core/lib/iomgr/cfstream_handle.h +1 -1
  179. data/src/core/lib/iomgr/closure.h +7 -6
  180. data/src/core/lib/iomgr/combiner.cc +14 -12
  181. data/src/core/lib/iomgr/combiner.h +2 -2
  182. data/src/core/lib/iomgr/endpoint.cc +1 -1
  183. data/src/core/lib/iomgr/endpoint.h +2 -2
  184. data/src/core/lib/iomgr/endpoint_cfstream.cc +11 -13
  185. data/src/core/lib/iomgr/endpoint_pair_event_engine.cc +33 -0
  186. data/src/core/lib/iomgr/endpoint_pair_windows.cc +1 -1
  187. data/src/core/lib/iomgr/error.cc +168 -61
  188. data/src/core/lib/iomgr/error.h +217 -106
  189. data/src/core/lib/iomgr/error_cfstream.cc +3 -2
  190. data/src/core/lib/iomgr/error_cfstream.h +2 -2
  191. data/src/core/lib/iomgr/error_internal.h +5 -1
  192. data/src/core/lib/iomgr/ev_apple.cc +5 -5
  193. data/src/core/lib/iomgr/ev_epoll1_linux.cc +19 -19
  194. data/src/core/lib/iomgr/ev_epollex_linux.cc +48 -45
  195. data/src/core/lib/iomgr/ev_poll_posix.cc +26 -23
  196. data/src/core/lib/iomgr/ev_posix.cc +9 -8
  197. data/src/core/lib/iomgr/ev_posix.h +9 -9
  198. data/src/core/lib/iomgr/event_engine/closure.cc +54 -0
  199. data/src/core/lib/iomgr/event_engine/closure.h +33 -0
  200. data/src/core/lib/iomgr/event_engine/endpoint.cc +194 -0
  201. data/src/core/lib/iomgr/event_engine/endpoint.h +53 -0
  202. data/src/core/lib/iomgr/event_engine/iomgr.cc +105 -0
  203. data/src/core/lib/iomgr/event_engine/iomgr.h +24 -0
  204. data/src/core/lib/iomgr/event_engine/pollset.cc +87 -0
  205. data/src/core/lib/iomgr/event_engine/pollset.h +25 -0
  206. data/src/core/lib/iomgr/event_engine/promise.h +51 -0
  207. data/src/core/lib/iomgr/event_engine/resolved_address_internal.cc +41 -0
  208. data/src/core/lib/iomgr/event_engine/resolved_address_internal.h +35 -0
  209. data/src/core/lib/iomgr/event_engine/resolver.cc +110 -0
  210. data/src/core/lib/iomgr/event_engine/tcp.cc +243 -0
  211. data/src/core/lib/iomgr/event_engine/timer.cc +57 -0
  212. data/src/core/lib/iomgr/exec_ctx.cc +12 -4
  213. data/src/core/lib/iomgr/exec_ctx.h +4 -5
  214. data/src/core/lib/iomgr/executor.cc +8 -8
  215. data/src/core/lib/iomgr/executor.h +2 -2
  216. data/src/core/lib/iomgr/executor/threadpool.cc +2 -3
  217. data/src/core/lib/iomgr/executor/threadpool.h +2 -2
  218. data/src/core/lib/iomgr/iomgr.cc +2 -2
  219. data/src/core/lib/iomgr/iomgr.h +1 -1
  220. data/src/core/lib/iomgr/iomgr_custom.cc +1 -1
  221. data/src/core/lib/iomgr/iomgr_internal.cc +2 -2
  222. data/src/core/lib/iomgr/iomgr_internal.h +3 -3
  223. data/src/core/lib/iomgr/iomgr_posix.cc +3 -1
  224. data/src/core/lib/iomgr/iomgr_posix_cfstream.cc +42 -12
  225. data/src/core/lib/iomgr/iomgr_windows.cc +1 -1
  226. data/src/core/lib/iomgr/load_file.cc +4 -4
  227. data/src/core/lib/iomgr/load_file.h +2 -2
  228. data/src/core/lib/iomgr/lockfree_event.cc +5 -5
  229. data/src/core/lib/iomgr/lockfree_event.h +1 -1
  230. data/src/core/lib/iomgr/pollset.cc +5 -5
  231. data/src/core/lib/iomgr/pollset.h +9 -9
  232. data/src/core/lib/iomgr/pollset_custom.cc +7 -7
  233. data/src/core/lib/iomgr/pollset_custom.h +3 -1
  234. data/src/core/lib/iomgr/pollset_uv.cc +3 -1
  235. data/src/core/lib/iomgr/pollset_uv.h +5 -1
  236. data/src/core/lib/iomgr/pollset_windows.cc +5 -5
  237. data/src/core/lib/iomgr/port.h +7 -5
  238. data/src/core/lib/iomgr/python_util.h +1 -1
  239. data/src/core/lib/iomgr/resolve_address.cc +8 -4
  240. data/src/core/lib/iomgr/resolve_address.h +12 -6
  241. data/src/core/lib/iomgr/resolve_address_custom.cc +10 -9
  242. data/src/core/lib/iomgr/resolve_address_custom.h +3 -3
  243. data/src/core/lib/iomgr/resolve_address_posix.cc +3 -3
  244. data/src/core/lib/iomgr/resolve_address_windows.cc +4 -4
  245. data/src/core/lib/iomgr/resource_quota.cc +11 -10
  246. data/src/core/lib/iomgr/sockaddr.h +1 -0
  247. data/src/core/lib/iomgr/socket_mutator.cc +15 -2
  248. data/src/core/lib/iomgr/socket_mutator.h +26 -2
  249. data/src/core/lib/iomgr/socket_utils_common_posix.cc +24 -22
  250. data/src/core/lib/iomgr/socket_utils_posix.h +20 -20
  251. data/src/core/lib/iomgr/tcp_client_cfstream.cc +4 -4
  252. data/src/core/lib/iomgr/tcp_client_custom.cc +5 -6
  253. data/src/core/lib/iomgr/tcp_client_posix.cc +22 -19
  254. data/src/core/lib/iomgr/tcp_client_posix.h +3 -4
  255. data/src/core/lib/iomgr/tcp_client_windows.cc +5 -5
  256. data/src/core/lib/iomgr/tcp_custom.cc +14 -16
  257. data/src/core/lib/iomgr/tcp_custom.h +13 -12
  258. data/src/core/lib/iomgr/tcp_posix.cc +78 -73
  259. data/src/core/lib/iomgr/tcp_posix.h +8 -0
  260. data/src/core/lib/iomgr/tcp_server.cc +6 -6
  261. data/src/core/lib/iomgr/tcp_server.h +12 -11
  262. data/src/core/lib/iomgr/tcp_server_custom.cc +26 -25
  263. data/src/core/lib/iomgr/tcp_server_posix.cc +28 -21
  264. data/src/core/lib/iomgr/tcp_server_utils_posix.h +13 -12
  265. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +21 -18
  266. data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +9 -9
  267. data/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc +4 -4
  268. data/src/core/lib/iomgr/tcp_server_windows.cc +26 -25
  269. data/src/core/lib/iomgr/tcp_uv.cc +25 -23
  270. data/src/core/lib/iomgr/tcp_windows.cc +13 -13
  271. data/src/core/lib/iomgr/tcp_windows.h +2 -2
  272. data/src/core/lib/iomgr/timer.h +6 -1
  273. data/src/core/lib/iomgr/timer_custom.cc +2 -1
  274. data/src/core/lib/iomgr/timer_custom.h +1 -1
  275. data/src/core/lib/iomgr/timer_generic.cc +6 -6
  276. data/src/core/lib/iomgr/udp_server.cc +21 -20
  277. data/src/core/lib/iomgr/unix_sockets_posix.cc +3 -3
  278. data/src/core/lib/iomgr/unix_sockets_posix.h +2 -2
  279. data/src/core/lib/iomgr/unix_sockets_posix_noop.cc +10 -7
  280. data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +3 -3
  281. data/src/core/lib/iomgr/wakeup_fd_pipe.cc +4 -4
  282. data/src/core/lib/iomgr/wakeup_fd_posix.cc +3 -3
  283. data/src/core/lib/iomgr/wakeup_fd_posix.h +8 -6
  284. data/src/core/lib/iomgr/work_serializer.h +17 -1
  285. data/src/core/lib/json/json.h +1 -1
  286. data/src/core/lib/json/json_reader.cc +4 -4
  287. data/src/core/lib/matchers/matchers.cc +39 -39
  288. data/src/core/lib/matchers/matchers.h +28 -28
  289. data/src/core/lib/security/authorization/authorization_engine.h +44 -0
  290. data/src/core/lib/security/authorization/authorization_policy_provider.h +32 -0
  291. data/src/core/lib/security/authorization/authorization_policy_provider_vtable.cc +46 -0
  292. data/src/core/lib/security/authorization/evaluate_args.cc +209 -0
  293. data/src/core/lib/security/authorization/evaluate_args.h +91 -0
  294. data/src/core/lib/security/credentials/composite/composite_credentials.cc +4 -4
  295. data/src/core/lib/security/credentials/composite/composite_credentials.h +2 -2
  296. data/src/core/lib/security/credentials/credentials.h +2 -2
  297. data/src/core/lib/security/credentials/external/aws_external_account_credentials.cc +17 -13
  298. data/src/core/lib/security/credentials/external/aws_external_account_credentials.h +13 -11
  299. data/src/core/lib/security/credentials/external/aws_request_signer.cc +2 -1
  300. data/src/core/lib/security/credentials/external/aws_request_signer.h +1 -1
  301. data/src/core/lib/security/credentials/external/external_account_credentials.cc +15 -12
  302. data/src/core/lib/security/credentials/external/external_account_credentials.h +9 -8
  303. data/src/core/lib/security/credentials/external/file_external_account_credentials.cc +5 -4
  304. data/src/core/lib/security/credentials/external/file_external_account_credentials.h +4 -3
  305. data/src/core/lib/security/credentials/external/url_external_account_credentials.cc +8 -8
  306. data/src/core/lib/security/credentials/external/url_external_account_credentials.h +9 -7
  307. data/src/core/lib/security/credentials/fake/fake_credentials.cc +2 -2
  308. data/src/core/lib/security/credentials/fake/fake_credentials.h +2 -2
  309. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +12 -10
  310. data/src/core/lib/security/credentials/iam/iam_credentials.cc +2 -2
  311. data/src/core/lib/security/credentials/iam/iam_credentials.h +2 -2
  312. data/src/core/lib/security/credentials/jwt/json_token.cc +2 -2
  313. data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +3 -3
  314. data/src/core/lib/security/credentials/jwt/jwt_credentials.h +2 -2
  315. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +7 -5
  316. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +21 -19
  317. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +5 -5
  318. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +5 -5
  319. data/src/core/lib/security/credentials/plugin/plugin_credentials.h +2 -2
  320. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc +8 -7
  321. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.h +9 -9
  322. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +19 -13
  323. data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc +4 -0
  324. data/src/core/lib/security/credentials/tls/tls_utils.cc +32 -0
  325. data/src/core/lib/security/credentials/tls/tls_utils.h +13 -0
  326. data/src/core/lib/security/credentials/xds/xds_credentials.cc +3 -3
  327. data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +13 -3
  328. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +13 -3
  329. data/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc +2 -2
  330. data/src/core/lib/security/security_connector/insecure/insecure_security_connector.h +12 -2
  331. data/src/core/lib/security/security_connector/load_system_roots_linux.cc +1 -1
  332. data/src/core/lib/security/security_connector/local/local_security_connector.cc +22 -9
  333. data/src/core/lib/security/security_connector/security_connector.h +9 -4
  334. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +16 -6
  335. data/src/core/lib/security/security_connector/ssl_utils.cc +27 -4
  336. data/src/core/lib/security/security_connector/ssl_utils.h +4 -4
  337. data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +56 -60
  338. data/src/core/lib/security/security_connector/tls/tls_security_connector.h +66 -48
  339. data/src/core/lib/security/transport/client_auth_filter.cc +18 -10
  340. data/src/core/lib/security/transport/secure_endpoint.cc +4 -4
  341. data/src/core/lib/security/transport/security_handshaker.cc +33 -32
  342. data/src/core/lib/security/transport/server_auth_filter.cc +24 -11
  343. data/src/core/lib/security/transport/tsi_error.cc +2 -1
  344. data/src/core/lib/security/transport/tsi_error.h +2 -1
  345. data/src/core/lib/security/util/json_util.cc +2 -2
  346. data/src/core/lib/security/util/json_util.h +1 -1
  347. data/src/core/lib/surface/call.cc +67 -46
  348. data/src/core/lib/surface/call.h +13 -2
  349. data/src/core/lib/surface/channel.cc +6 -6
  350. data/src/core/lib/surface/channel.h +3 -2
  351. data/src/core/lib/surface/channel_ping.cc +1 -1
  352. data/src/core/lib/surface/completion_queue.cc +68 -69
  353. data/src/core/lib/surface/completion_queue.h +3 -2
  354. data/src/core/lib/surface/completion_queue_factory.cc +1 -2
  355. data/src/core/lib/surface/init.cc +1 -3
  356. data/src/core/lib/surface/init.h +10 -1
  357. data/src/core/lib/surface/lame_client.cc +11 -11
  358. data/src/core/lib/surface/lame_client.h +1 -1
  359. data/src/core/lib/surface/server.cc +28 -22
  360. data/src/core/lib/surface/server.h +16 -15
  361. data/src/core/lib/surface/validate_metadata.cc +7 -7
  362. data/src/core/lib/surface/validate_metadata.h +3 -2
  363. data/src/core/lib/surface/version.cc +4 -2
  364. data/src/core/lib/transport/byte_stream.cc +5 -5
  365. data/src/core/lib/transport/byte_stream.h +8 -8
  366. data/src/core/lib/transport/connectivity_state.cc +1 -1
  367. data/src/core/lib/transport/error_utils.cc +21 -10
  368. data/src/core/lib/transport/error_utils.h +11 -5
  369. data/src/core/lib/transport/metadata_batch.cc +37 -37
  370. data/src/core/lib/transport/metadata_batch.h +19 -18
  371. data/src/core/lib/transport/transport.cc +4 -3
  372. data/src/core/lib/transport/transport.h +6 -4
  373. data/src/core/lib/transport/transport_op_string.cc +6 -6
  374. data/src/core/plugin_registry/grpc_plugin_registry.cc +4 -0
  375. data/src/core/tsi/alts/crypt/gsec.h +6 -0
  376. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +5 -4
  377. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +7 -6
  378. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +2 -1
  379. data/src/core/tsi/ssl_transport_security.cc +32 -14
  380. data/src/core/tsi/ssl_transport_security.h +3 -4
  381. data/src/ruby/bin/math_services_pb.rb +1 -1
  382. data/src/ruby/ext/grpc/extconf.rb +2 -0
  383. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +6 -0
  384. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +11 -2
  385. data/src/ruby/lib/grpc/version.rb +1 -1
  386. data/src/ruby/pb/grpc/health/v1/health_services_pb.rb +1 -1
  387. data/src/ruby/pb/src/proto/grpc/testing/test_services_pb.rb +6 -6
  388. data/third_party/abseil-cpp/absl/algorithm/container.h +3 -3
  389. data/third_party/abseil-cpp/absl/base/attributes.h +24 -4
  390. data/third_party/abseil-cpp/absl/base/call_once.h +2 -9
  391. data/third_party/abseil-cpp/absl/base/config.h +37 -9
  392. data/third_party/abseil-cpp/absl/base/dynamic_annotations.h +24 -10
  393. data/third_party/abseil-cpp/absl/base/internal/direct_mmap.h +4 -1
  394. data/third_party/abseil-cpp/absl/base/internal/endian.h +61 -0
  395. data/third_party/abseil-cpp/absl/base/internal/low_level_scheduling.h +2 -3
  396. data/third_party/abseil-cpp/absl/base/internal/raw_logging.cc +34 -32
  397. data/third_party/abseil-cpp/absl/base/internal/raw_logging.h +16 -6
  398. data/third_party/abseil-cpp/absl/base/internal/spinlock.cc +11 -2
  399. data/third_party/abseil-cpp/absl/base/internal/spinlock.h +14 -5
  400. data/third_party/abseil-cpp/absl/base/internal/spinlock_akaros.inc +2 -2
  401. data/third_party/abseil-cpp/absl/base/internal/spinlock_linux.inc +3 -3
  402. data/third_party/abseil-cpp/absl/base/internal/spinlock_posix.inc +2 -2
  403. data/third_party/abseil-cpp/absl/base/internal/spinlock_wait.h +11 -11
  404. data/third_party/abseil-cpp/absl/base/internal/spinlock_win32.inc +5 -5
  405. data/third_party/abseil-cpp/absl/base/internal/sysinfo.cc +1 -1
  406. data/third_party/abseil-cpp/absl/base/internal/thread_identity.cc +5 -2
  407. data/third_party/abseil-cpp/absl/base/internal/thread_identity.h +43 -42
  408. data/third_party/abseil-cpp/absl/base/internal/throw_delegate.cc +111 -7
  409. data/third_party/abseil-cpp/absl/base/internal/unaligned_access.h +0 -76
  410. data/third_party/abseil-cpp/absl/base/internal/unscaledcycleclock.cc +1 -3
  411. data/third_party/abseil-cpp/absl/base/log_severity.h +4 -4
  412. data/third_party/abseil-cpp/absl/base/macros.h +11 -0
  413. data/third_party/abseil-cpp/absl/base/optimization.h +10 -7
  414. data/third_party/abseil-cpp/absl/base/options.h +1 -1
  415. data/third_party/abseil-cpp/absl/base/port.h +0 -1
  416. data/third_party/abseil-cpp/absl/base/thread_annotations.h +1 -1
  417. data/third_party/abseil-cpp/absl/container/fixed_array.h +2 -2
  418. data/third_party/abseil-cpp/absl/container/inlined_vector.h +5 -3
  419. data/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h +1 -1
  420. data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc +5 -1
  421. data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h +2 -1
  422. data/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc +2 -1
  423. data/third_party/abseil-cpp/absl/container/internal/inlined_vector.h +141 -66
  424. data/third_party/abseil-cpp/absl/container/internal/layout.h +4 -4
  425. data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc +14 -1
  426. data/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h +136 -136
  427. data/third_party/abseil-cpp/absl/debugging/internal/demangle.cc +16 -12
  428. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_aarch64-inl.inc +5 -2
  429. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_config.h +3 -12
  430. data/third_party/abseil-cpp/absl/debugging/internal/stacktrace_powerpc-inl.inc +6 -1
  431. data/third_party/abseil-cpp/absl/debugging/internal/symbolize.h +3 -5
  432. data/third_party/abseil-cpp/absl/debugging/symbolize_darwin.inc +2 -2
  433. data/third_party/abseil-cpp/absl/debugging/symbolize_elf.inc +2 -2
  434. data/third_party/abseil-cpp/absl/hash/internal/city.cc +15 -12
  435. data/third_party/abseil-cpp/absl/hash/internal/city.h +1 -19
  436. data/third_party/abseil-cpp/absl/hash/internal/hash.cc +25 -10
  437. data/third_party/abseil-cpp/absl/hash/internal/hash.h +86 -37
  438. data/third_party/abseil-cpp/absl/hash/internal/wyhash.cc +111 -0
  439. data/third_party/abseil-cpp/absl/hash/internal/wyhash.h +48 -0
  440. data/third_party/abseil-cpp/absl/meta/type_traits.h +16 -2
  441. data/third_party/abseil-cpp/absl/numeric/bits.h +177 -0
  442. data/third_party/abseil-cpp/absl/numeric/int128.cc +3 -3
  443. data/third_party/abseil-cpp/absl/numeric/internal/bits.h +358 -0
  444. data/third_party/abseil-cpp/absl/numeric/internal/representation.h +55 -0
  445. data/third_party/abseil-cpp/absl/status/internal/status_internal.h +18 -0
  446. data/third_party/abseil-cpp/absl/status/internal/statusor_internal.h +4 -7
  447. data/third_party/abseil-cpp/absl/status/status.cc +29 -22
  448. data/third_party/abseil-cpp/absl/status/status.h +81 -20
  449. data/third_party/abseil-cpp/absl/status/statusor.h +3 -3
  450. data/third_party/abseil-cpp/absl/strings/charconv.cc +5 -5
  451. data/third_party/abseil-cpp/absl/strings/cord.cc +326 -371
  452. data/third_party/abseil-cpp/absl/strings/cord.h +182 -64
  453. data/third_party/abseil-cpp/absl/strings/escaping.cc +4 -4
  454. data/third_party/abseil-cpp/absl/strings/internal/charconv_parse.cc +6 -6
  455. data/third_party/abseil-cpp/absl/strings/internal/cord_internal.cc +83 -0
  456. data/third_party/abseil-cpp/absl/strings/internal/cord_internal.h +387 -17
  457. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_flat.h +146 -0
  458. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.cc +897 -0
  459. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring.h +589 -0
  460. data/third_party/abseil-cpp/absl/strings/internal/cord_rep_ring_reader.h +114 -0
  461. data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.cc +14 -0
  462. data/third_party/abseil-cpp/absl/strings/internal/str_format/arg.h +14 -0
  463. data/third_party/abseil-cpp/absl/strings/internal/str_format/bind.cc +15 -1
  464. data/third_party/abseil-cpp/absl/strings/internal/str_format/bind.h +19 -4
  465. data/third_party/abseil-cpp/absl/strings/internal/str_format/checker.h +14 -0
  466. data/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.cc +36 -18
  467. data/third_party/abseil-cpp/absl/strings/internal/str_format/float_conversion.h +14 -0
  468. data/third_party/abseil-cpp/absl/strings/internal/str_format/parser.cc +14 -0
  469. data/third_party/abseil-cpp/absl/strings/internal/str_format/parser.h +14 -0
  470. data/third_party/abseil-cpp/absl/strings/internal/str_split_internal.h +15 -40
  471. data/third_party/abseil-cpp/absl/strings/internal/string_constant.h +64 -0
  472. data/third_party/abseil-cpp/absl/strings/match.cc +6 -3
  473. data/third_party/abseil-cpp/absl/strings/match.h +16 -6
  474. data/third_party/abseil-cpp/absl/strings/numbers.cc +132 -4
  475. data/third_party/abseil-cpp/absl/strings/numbers.h +10 -10
  476. data/third_party/abseil-cpp/absl/strings/str_join.h +1 -1
  477. data/third_party/abseil-cpp/absl/strings/str_split.h +38 -4
  478. data/third_party/abseil-cpp/absl/synchronization/internal/futex.h +154 -0
  479. data/third_party/abseil-cpp/absl/synchronization/internal/kernel_timeout.h +2 -1
  480. data/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.cc +2 -2
  481. data/third_party/abseil-cpp/absl/synchronization/internal/per_thread_sem.h +4 -4
  482. data/third_party/abseil-cpp/absl/synchronization/internal/waiter.cc +1 -65
  483. data/third_party/abseil-cpp/absl/synchronization/internal/waiter.h +2 -6
  484. data/third_party/abseil-cpp/absl/synchronization/mutex.cc +71 -59
  485. data/third_party/abseil-cpp/absl/synchronization/mutex.h +79 -62
  486. data/third_party/abseil-cpp/absl/time/clock.cc +146 -130
  487. data/third_party/abseil-cpp/absl/time/clock.h +2 -2
  488. data/third_party/abseil-cpp/absl/time/duration.cc +3 -2
  489. data/third_party/abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h +7 -11
  490. data/third_party/abseil-cpp/absl/time/internal/cctz/src/time_zone_libc.cc +7 -1
  491. data/third_party/abseil-cpp/absl/time/internal/cctz/src/tzfile.h +4 -4
  492. data/third_party/abseil-cpp/absl/time/time.cc +4 -3
  493. data/third_party/abseil-cpp/absl/time/time.h +26 -24
  494. data/third_party/abseil-cpp/absl/types/internal/variant.h +1 -1
  495. data/third_party/abseil-cpp/absl/types/variant.h +9 -4
  496. data/third_party/boringssl-with-bazel/err_data.c +483 -461
  497. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c +1 -1
  498. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c +9 -7
  499. data/third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c +18 -8
  500. data/third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c +1 -2
  501. data/third_party/boringssl-with-bazel/src/crypto/asn1/asn1_locl.h +5 -0
  502. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c +1 -1
  503. data/third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c +1 -1
  504. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c +4 -0
  505. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c +1 -88
  506. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/internal.h +14 -3
  507. data/third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c +119 -273
  508. data/third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c +1 -1
  509. data/third_party/boringssl-with-bazel/src/crypto/curve25519/internal.h +1 -1
  510. data/third_party/boringssl-with-bazel/src/crypto/err/err.c +87 -80
  511. data/third_party/boringssl-with-bazel/src/crypto/evp/evp.c +9 -0
  512. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c +1 -0
  513. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/internal.h +1 -1
  514. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/bn/prime.c +0 -4
  515. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/cipher/cipher.c +11 -3
  516. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/cipher/e_aes.c +25 -2
  517. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/digest/digest.c +7 -0
  518. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/digest/digests.c +10 -2
  519. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/digest/md32_common.h +87 -160
  520. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/ec.c +4 -0
  521. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/ec_key.c +0 -1
  522. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ec/internal.h +0 -4
  523. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ecdsa/ecdsa.c +104 -93
  524. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/ecdsa/internal.h +39 -0
  525. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/md4/md4.c +52 -65
  526. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/md5/md5.c +52 -66
  527. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/modes/cbc.c +33 -22
  528. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/modes/cfb.c +9 -8
  529. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/modes/ctr.c +9 -8
  530. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/modes/gcm.c +17 -13
  531. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/modes/internal.h +1 -22
  532. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/modes/ofb.c +2 -1
  533. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/internal.h +1 -4
  534. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/rand.c +0 -13
  535. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rand/urandom.c +26 -7
  536. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/rsa.c +26 -24
  537. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/rsa/rsa_impl.c +10 -7
  538. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/self_check/fips.c +79 -0
  539. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/self_check/self_check.c +14 -9
  540. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/sha/sha1.c +61 -75
  541. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/sha/sha256.c +80 -103
  542. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/sha/sha512.c +40 -49
  543. data/third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c +367 -315
  544. data/third_party/boringssl-with-bazel/src/crypto/internal.h +65 -0
  545. data/third_party/boringssl-with-bazel/src/crypto/mem.c +14 -0
  546. data/third_party/boringssl-with-bazel/src/crypto/obj/obj.c +3 -3
  547. data/third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c +5 -3
  548. data/third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c +95 -48
  549. data/third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c +2 -2
  550. data/third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c +1 -1
  551. data/third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c +1 -2
  552. data/third_party/boringssl-with-bazel/src/crypto/thread_pthread.c +0 -28
  553. data/third_party/boringssl-with-bazel/src/crypto/x509/internal.h +120 -11
  554. data/third_party/boringssl-with-bazel/src/crypto/x509/t_req.c +2 -0
  555. data/third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c +3 -0
  556. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c +19 -25
  557. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c +3 -2
  558. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c +42 -89
  559. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c +9 -16
  560. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c +2 -0
  561. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c +14 -15
  562. data/third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c +53 -73
  563. data/third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c +31 -0
  564. data/third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c +3 -0
  565. data/third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c +21 -17
  566. data/third_party/boringssl-with-bazel/src/crypto/x509/x_all.c +3 -0
  567. data/third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c +7 -25
  568. data/third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c +5 -0
  569. data/third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c +25 -22
  570. data/third_party/boringssl-with-bazel/src/crypto/x509/x_req.c +5 -8
  571. data/third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c +5 -0
  572. data/third_party/boringssl-with-bazel/src/crypto/x509/x_val.c +2 -0
  573. data/third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c +3 -0
  574. data/third_party/boringssl-with-bazel/src/crypto/x509v3/internal.h +7 -0
  575. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c +2 -4
  576. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c +1 -1
  577. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c +1 -0
  578. data/third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c +5 -8
  579. data/third_party/boringssl-with-bazel/src/include/openssl/aead.h +1 -4
  580. data/third_party/boringssl-with-bazel/src/include/openssl/arm_arch.h +66 -1
  581. data/third_party/boringssl-with-bazel/src/include/openssl/asn1.h +120 -41
  582. data/third_party/boringssl-with-bazel/src/include/openssl/base.h +47 -7
  583. data/third_party/boringssl-with-bazel/src/include/openssl/bytestring.h +1 -0
  584. data/third_party/boringssl-with-bazel/src/include/openssl/chacha.h +1 -1
  585. data/third_party/boringssl-with-bazel/src/include/openssl/cipher.h +0 -8
  586. data/third_party/boringssl-with-bazel/src/include/openssl/crypto.h +24 -4
  587. data/third_party/boringssl-with-bazel/src/include/openssl/digest.h +6 -2
  588. data/third_party/boringssl-with-bazel/src/include/openssl/ec.h +5 -2
  589. data/third_party/boringssl-with-bazel/src/include/openssl/ecdsa.h +33 -0
  590. data/third_party/boringssl-with-bazel/src/include/openssl/err.h +3 -2
  591. data/third_party/boringssl-with-bazel/src/include/openssl/evp.h +20 -49
  592. data/third_party/boringssl-with-bazel/src/{crypto/x509/x509_r2x.c → include/openssl/evp_errors.h} +41 -58
  593. data/third_party/boringssl-with-bazel/src/include/openssl/hpke.h +325 -0
  594. data/third_party/boringssl-with-bazel/src/include/openssl/obj.h +24 -5
  595. data/third_party/boringssl-with-bazel/src/include/openssl/pkcs7.h +25 -7
  596. data/third_party/boringssl-with-bazel/src/include/openssl/pkcs8.h +9 -1
  597. data/third_party/boringssl-with-bazel/src/include/openssl/rand.h +2 -2
  598. data/third_party/boringssl-with-bazel/src/include/openssl/rsa.h +99 -63
  599. data/third_party/boringssl-with-bazel/src/include/openssl/ssl.h +283 -85
  600. data/third_party/boringssl-with-bazel/src/include/openssl/tls1.h +13 -19
  601. data/third_party/boringssl-with-bazel/src/include/openssl/x509.h +445 -152
  602. data/third_party/boringssl-with-bazel/src/include/openssl/x509_vfy.h +451 -435
  603. data/third_party/boringssl-with-bazel/src/include/openssl/x509v3.h +2 -1
  604. data/third_party/boringssl-with-bazel/src/ssl/d1_both.cc +7 -2
  605. data/third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc +1 -1
  606. data/third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc +1133 -0
  607. data/third_party/boringssl-with-bazel/src/ssl/handoff.cc +298 -22
  608. data/third_party/boringssl-with-bazel/src/ssl/handshake.cc +66 -30
  609. data/third_party/boringssl-with-bazel/src/ssl/handshake_client.cc +189 -86
  610. data/third_party/boringssl-with-bazel/src/ssl/handshake_server.cc +154 -24
  611. data/third_party/boringssl-with-bazel/src/ssl/internal.h +414 -135
  612. data/third_party/boringssl-with-bazel/src/ssl/s3_both.cc +9 -3
  613. data/third_party/boringssl-with-bazel/src/ssl/s3_lib.cc +2 -2
  614. data/third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc +14 -19
  615. data/third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc +4 -6
  616. data/third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc +23 -26
  617. data/third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc +51 -60
  618. data/third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc +2 -0
  619. data/third_party/boringssl-with-bazel/src/ssl/ssl_session.cc +8 -31
  620. data/third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc +3 -0
  621. data/third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc +4 -3
  622. data/third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc +7 -3
  623. data/third_party/boringssl-with-bazel/src/ssl/t1_lib.cc +664 -702
  624. data/third_party/boringssl-with-bazel/src/ssl/tls13_both.cc +65 -7
  625. data/third_party/boringssl-with-bazel/src/ssl/tls13_client.cc +98 -39
  626. data/third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc +141 -94
  627. data/third_party/boringssl-with-bazel/src/ssl/tls13_server.cc +213 -118
  628. data/third_party/boringssl-with-bazel/src/ssl/tls_method.cc +4 -2
  629. metadata +93 -45
  630. data/src/core/lib/iomgr/poller/eventmanager_libuv.cc +0 -88
  631. data/src/core/lib/iomgr/poller/eventmanager_libuv.h +0 -88
  632. data/third_party/abseil-cpp/absl/base/internal/bits.h +0 -219
  633. data/third_party/abseil-cpp/absl/synchronization/internal/mutex_nonprod.inc +0 -249
  634. data/third_party/boringssl-with-bazel/src/crypto/fipsmodule/is_fips.c +0 -29
  635. data/third_party/boringssl-with-bazel/src/crypto/hpke/internal.h +0 -246
  636. data/third_party/boringssl-with-bazel/src/crypto/x509/vpm_int.h +0 -71
@@ -51,7 +51,7 @@
51
51
  #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
52
52
  #include "src/core/ext/filters/client_channel/resolver_registry.h"
53
53
  #include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
54
- #include "src/core/ext/filters/client_channel/retry_throttle.h"
54
+ #include "src/core/ext/filters/client_channel/retry_filter.h"
55
55
  #include "src/core/ext/filters/client_channel/service_config.h"
56
56
  #include "src/core/ext/filters/client_channel/service_config_call_data.h"
57
57
  #include "src/core/ext/filters/client_channel/subchannel.h"
@@ -61,7 +61,6 @@
61
61
  #include "src/core/lib/channel/connected_channel.h"
62
62
  #include "src/core/lib/channel/status_util.h"
63
63
  #include "src/core/lib/gpr/string.h"
64
- #include "src/core/lib/gprpp/manual_constructor.h"
65
64
  #include "src/core/lib/gprpp/sync.h"
66
65
  #include "src/core/lib/iomgr/iomgr.h"
67
66
  #include "src/core/lib/iomgr/polling_entity.h"
@@ -81,320 +80,23 @@
81
80
  // Client channel filter
82
81
  //
83
82
 
84
- // By default, we buffer 256 KiB per RPC for retries.
85
- // TODO(roth): Do we have any data to suggest a better value?
86
- #define DEFAULT_PER_RPC_RETRY_BUFFER_SIZE (256 << 10)
87
-
88
- // This value was picked arbitrarily. It can be changed if there is
89
- // any even moderately compelling reason to do so.
90
- #define RETRY_BACKOFF_JITTER 0.2
91
-
92
- // Max number of batches that can be pending on a call at any given
93
- // time. This includes one batch for each of the following ops:
94
- // recv_initial_metadata
95
- // send_initial_metadata
96
- // recv_message
97
- // send_message
98
- // recv_trailing_metadata
99
- // send_trailing_metadata
100
- #define MAX_PENDING_BATCHES 6
101
-
102
- // Channel arg containing a pointer to the ChannelData object.
103
- #define GRPC_ARG_CLIENT_CHANNEL_DATA "grpc.internal.client_channel_data"
104
-
105
- // Channel arg containing a pointer to the RetryThrottleData object.
106
- #define GRPC_ARG_RETRY_THROTTLE_DATA "grpc.internal.retry_throttle_data"
107
-
108
83
  namespace grpc_core {
109
84
 
110
85
  using internal::ClientChannelGlobalParsedConfig;
111
86
  using internal::ClientChannelMethodParsedConfig;
112
87
  using internal::ClientChannelServiceConfigParser;
113
- using internal::ServerRetryThrottleData;
114
88
 
115
89
  TraceFlag grpc_client_channel_call_trace(false, "client_channel_call");
116
90
  TraceFlag grpc_client_channel_routing_trace(false, "client_channel_routing");
117
91
 
118
- namespace {
119
-
120
- //
121
- // ChannelData definition
122
- //
123
-
124
- class ChannelData {
125
- public:
126
- class CallData;
127
- class RetryingCall;
128
- class LoadBalancedCall;
129
-
130
- static grpc_error* Init(grpc_channel_element* elem,
131
- grpc_channel_element_args* args);
132
- static void Destroy(grpc_channel_element* elem);
133
- static void StartTransportOp(grpc_channel_element* elem,
134
- grpc_transport_op* op);
135
- static void GetChannelInfo(grpc_channel_element* elem,
136
- const grpc_channel_info* info);
137
-
138
- grpc_connectivity_state CheckConnectivityState(bool try_to_connect);
139
-
140
- void AddExternalConnectivityWatcher(grpc_polling_entity pollent,
141
- grpc_connectivity_state* state,
142
- grpc_closure* on_complete,
143
- grpc_closure* watcher_timer_init) {
144
- new ExternalConnectivityWatcher(this, pollent, state, on_complete,
145
- watcher_timer_init);
146
- }
147
-
148
- void RemoveExternalConnectivityWatcher(grpc_closure* on_complete,
149
- bool cancel) {
150
- ExternalConnectivityWatcher::RemoveWatcherFromExternalWatchersMap(
151
- this, on_complete, cancel);
152
- }
153
-
154
- int NumExternalConnectivityWatchers() const {
155
- MutexLock lock(&external_watchers_mu_);
156
- return static_cast<int>(external_watchers_.size());
157
- }
158
-
159
- void AddConnectivityWatcher(
160
- grpc_connectivity_state initial_state,
161
- OrphanablePtr<AsyncConnectivityStateWatcherInterface> watcher);
162
- void RemoveConnectivityWatcher(
163
- AsyncConnectivityStateWatcherInterface* watcher);
164
-
165
- private:
166
- class DynamicTerminationFilterChannelData;
167
- class SubchannelWrapper;
168
- class ClientChannelControlHelper;
169
- class ConnectivityWatcherAdder;
170
- class ConnectivityWatcherRemover;
171
-
172
- // Represents a pending connectivity callback from an external caller
173
- // via grpc_client_channel_watch_connectivity_state().
174
- class ExternalConnectivityWatcher : public ConnectivityStateWatcherInterface {
175
- public:
176
- ExternalConnectivityWatcher(ChannelData* chand, grpc_polling_entity pollent,
177
- grpc_connectivity_state* state,
178
- grpc_closure* on_complete,
179
- grpc_closure* watcher_timer_init);
180
-
181
- ~ExternalConnectivityWatcher() override;
182
-
183
- // Removes the watcher from the external_watchers_ map.
184
- static void RemoveWatcherFromExternalWatchersMap(ChannelData* chand,
185
- grpc_closure* on_complete,
186
- bool cancel);
187
-
188
- void Notify(grpc_connectivity_state state,
189
- const absl::Status& /* status */) override;
190
-
191
- void Cancel();
192
-
193
- private:
194
- // Adds the watcher to state_tracker_. Consumes the ref that is passed to it
195
- // from Start().
196
- void AddWatcherLocked();
197
- void RemoveWatcherLocked();
198
-
199
- ChannelData* chand_;
200
- grpc_polling_entity pollent_;
201
- grpc_connectivity_state initial_state_;
202
- grpc_connectivity_state* state_;
203
- grpc_closure* on_complete_;
204
- grpc_closure* watcher_timer_init_;
205
- Atomic<bool> done_{false};
206
- };
207
-
208
- class ResolverResultHandler : public Resolver::ResultHandler {
209
- public:
210
- explicit ResolverResultHandler(ChannelData* chand) : chand_(chand) {
211
- GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ResolverResultHandler");
212
- }
213
-
214
- ~ResolverResultHandler() override {
215
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
216
- gpr_log(GPR_INFO, "chand=%p: resolver shutdown complete", chand_);
217
- }
218
- GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "ResolverResultHandler");
219
- }
220
-
221
- void ReturnResult(Resolver::Result result) override {
222
- chand_->OnResolverResultChangedLocked(std::move(result));
223
- }
224
-
225
- void ReturnError(grpc_error* error) override {
226
- chand_->OnResolverErrorLocked(error);
227
- }
228
-
229
- private:
230
- ChannelData* chand_;
231
- };
232
-
233
- struct ResolverQueuedCall {
234
- grpc_call_element* elem;
235
- ResolverQueuedCall* next = nullptr;
236
- };
237
- struct LbQueuedCall {
238
- LoadBalancedCall* lb_call;
239
- LbQueuedCall* next = nullptr;
240
- };
241
-
242
- ChannelData(grpc_channel_element_args* args, grpc_error** error);
243
- ~ChannelData();
244
-
245
- // Note: Does NOT return a new ref.
246
- grpc_error* disconnect_error() const {
247
- return disconnect_error_.Load(MemoryOrder::ACQUIRE);
248
- }
249
-
250
- // Note: All methods with "Locked" suffix must be invoked from within
251
- // work_serializer_.
252
-
253
- void OnResolverResultChangedLocked(Resolver::Result result);
254
- void OnResolverErrorLocked(grpc_error* error);
255
-
256
- void CreateOrUpdateLbPolicyLocked(
257
- RefCountedPtr<LoadBalancingPolicy::Config> lb_policy_config,
258
- Resolver::Result result);
259
- OrphanablePtr<LoadBalancingPolicy> CreateLbPolicyLocked(
260
- const grpc_channel_args& args);
261
-
262
- void UpdateStateAndPickerLocked(
263
- grpc_connectivity_state state, const absl::Status& status,
264
- const char* reason,
265
- std::unique_ptr<LoadBalancingPolicy::SubchannelPicker> picker);
266
-
267
- void UpdateServiceConfigInControlPlaneLocked(
268
- RefCountedPtr<ServiceConfig> service_config,
269
- RefCountedPtr<ConfigSelector> config_selector,
270
- const internal::ClientChannelGlobalParsedConfig* parsed_service_config,
271
- const char* lb_policy_name);
272
-
273
- void UpdateServiceConfigInDataPlaneLocked();
274
-
275
- void CreateResolverLocked();
276
- void DestroyResolverAndLbPolicyLocked();
277
-
278
- grpc_error* DoPingLocked(grpc_transport_op* op);
279
-
280
- void StartTransportOpLocked(grpc_transport_op* op);
281
-
282
- void TryToConnectLocked();
283
-
284
- // These methods all require holding resolution_mu_.
285
- void AddResolverQueuedCall(ResolverQueuedCall* call,
286
- grpc_polling_entity* pollent)
287
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(resolution_mu_);
288
- void RemoveResolverQueuedCall(ResolverQueuedCall* to_remove,
289
- grpc_polling_entity* pollent)
290
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(resolution_mu_);
291
-
292
- // These methods all require holding data_plane_mu_.
293
- void AddLbQueuedCall(LbQueuedCall* call, grpc_polling_entity* pollent)
294
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(data_plane_mu_);
295
- void RemoveLbQueuedCall(LbQueuedCall* to_remove, grpc_polling_entity* pollent)
296
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(data_plane_mu_);
297
- RefCountedPtr<ConnectedSubchannel> GetConnectedSubchannelInDataPlane(
298
- SubchannelInterface* subchannel) const
299
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(data_plane_mu_);
300
-
301
- //
302
- // Fields set at construction and never modified.
303
- //
304
- const bool deadline_checking_enabled_;
305
- const bool enable_retries_;
306
- const size_t per_rpc_retry_buffer_size_;
307
- grpc_channel_stack* owning_stack_;
308
- ClientChannelFactory* client_channel_factory_;
309
- const grpc_channel_args* channel_args_;
310
- RefCountedPtr<ServiceConfig> default_service_config_;
311
- std::string server_name_;
312
- UniquePtr<char> target_uri_;
313
- channelz::ChannelNode* channelz_node_;
314
-
315
- //
316
- // Fields related to name resolution. Guarded by resolution_mu_.
317
- //
318
- mutable Mutex resolution_mu_;
319
- // Linked list of calls queued waiting for resolver result.
320
- ResolverQueuedCall* resolver_queued_calls_ ABSL_GUARDED_BY(resolution_mu_) =
321
- nullptr;
322
- // Data from service config.
323
- grpc_error* resolver_transient_failure_error_
324
- ABSL_GUARDED_BY(resolution_mu_) = GRPC_ERROR_NONE;
325
- bool received_service_config_data_ ABSL_GUARDED_BY(resolution_mu_) = false;
326
- RefCountedPtr<ServiceConfig> service_config_ ABSL_GUARDED_BY(resolution_mu_);
327
- RefCountedPtr<ConfigSelector> config_selector_
328
- ABSL_GUARDED_BY(resolution_mu_);
329
- RefCountedPtr<DynamicFilters> dynamic_filters_
330
- ABSL_GUARDED_BY(resolution_mu_);
331
-
332
- //
333
- // Fields used in the data plane. Guarded by data_plane_mu_.
334
- //
335
- mutable Mutex data_plane_mu_;
336
- std::unique_ptr<LoadBalancingPolicy::SubchannelPicker> picker_
337
- ABSL_GUARDED_BY(data_plane_mu_);
338
- // Linked list of calls queued waiting for LB pick.
339
- LbQueuedCall* lb_queued_calls_ ABSL_GUARDED_BY(data_plane_mu_) = nullptr;
340
-
341
- //
342
- // Fields used in the control plane. Guarded by work_serializer.
343
- //
344
- std::shared_ptr<WorkSerializer> work_serializer_;
345
- grpc_pollset_set* interested_parties_;
346
- ConnectivityStateTracker state_tracker_;
347
- OrphanablePtr<Resolver> resolver_;
348
- bool previous_resolution_contained_addresses_ = false;
349
- RefCountedPtr<ServiceConfig> saved_service_config_;
350
- RefCountedPtr<ConfigSelector> saved_config_selector_;
351
- absl::optional<std::string> health_check_service_name_;
352
- OrphanablePtr<LoadBalancingPolicy> lb_policy_;
353
- RefCountedPtr<SubchannelPoolInterface> subchannel_pool_;
354
- // The number of SubchannelWrapper instances referencing a given Subchannel.
355
- std::map<Subchannel*, int> subchannel_refcount_map_;
356
- // The set of SubchannelWrappers that currently exist.
357
- // No need to hold a ref, since the map is updated in the control-plane
358
- // work_serializer when the SubchannelWrappers are created and destroyed.
359
- std::set<SubchannelWrapper*> subchannel_wrappers_;
360
- // Pending ConnectedSubchannel updates for each SubchannelWrapper.
361
- // Updates are queued here in the control plane work_serializer and then
362
- // applied in the data plane mutex when the picker is updated.
363
- std::map<RefCountedPtr<SubchannelWrapper>, RefCountedPtr<ConnectedSubchannel>>
364
- pending_subchannel_updates_;
365
- int keepalive_time_ = -1;
366
-
367
- //
368
- // Fields accessed from both data plane mutex and control plane
369
- // work_serializer.
370
- //
371
- Atomic<grpc_error*> disconnect_error_;
372
-
373
- //
374
- // Fields guarded by a mutex, since they need to be accessed
375
- // synchronously via get_channel_info().
376
- //
377
- Mutex info_mu_;
378
- UniquePtr<char> info_lb_policy_name_ ABSL_GUARDED_BY(info_mu_);
379
- UniquePtr<char> info_service_config_json_ ABSL_GUARDED_BY(info_mu_);
380
-
381
- //
382
- // Fields guarded by a mutex, since they need to be accessed
383
- // synchronously via grpc_channel_num_external_connectivity_watchers().
384
- //
385
- mutable Mutex external_watchers_mu_;
386
- std::map<grpc_closure*, RefCountedPtr<ExternalConnectivityWatcher>>
387
- external_watchers_ ABSL_GUARDED_BY(external_watchers_mu_);
388
- };
389
-
390
92
  //
391
- // ChannelData::CallData definition
93
+ // ClientChannel::CallData definition
392
94
  //
393
95
 
394
- class ChannelData::CallData {
96
+ class ClientChannel::CallData {
395
97
  public:
396
- static grpc_error* Init(grpc_call_element* elem,
397
- const grpc_call_element_args* args);
98
+ static grpc_error_handle Init(grpc_call_element* elem,
99
+ const grpc_call_element_args* args);
398
100
  static void Destroy(grpc_call_element* elem,
399
101
  const grpc_call_final_info* final_info,
400
102
  grpc_closure* then_schedule_closure);
@@ -403,23 +105,23 @@ class ChannelData::CallData {
403
105
  static void SetPollent(grpc_call_element* elem, grpc_polling_entity* pollent);
404
106
 
405
107
  // Invoked by channel for queued calls when name resolution is completed.
406
- static void CheckResolution(void* arg, grpc_error* error);
108
+ static void CheckResolution(void* arg, grpc_error_handle error);
407
109
  // Helper function for applying the service config to a call while
408
- // holding ChannelData::resolution_mu_.
110
+ // holding ClientChannel::resolution_mu_.
409
111
  // Returns true if the service config has been applied to the call, in which
410
112
  // case the caller must invoke ResolutionDone() or AsyncResolutionDone()
411
113
  // with the returned error.
412
- bool CheckResolutionLocked(grpc_call_element* elem, grpc_error** error)
413
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::resolution_mu_);
114
+ bool CheckResolutionLocked(grpc_call_element* elem, grpc_error_handle* error)
115
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::resolution_mu_);
414
116
  // Schedules a callback to continue processing the call once
415
117
  // resolution is complete. The callback will not run until after this
416
118
  // method returns.
417
- void AsyncResolutionDone(grpc_call_element* elem, grpc_error* error);
119
+ void AsyncResolutionDone(grpc_call_element* elem, grpc_error_handle error);
418
120
 
419
121
  private:
420
122
  class ResolverQueuedCallCanceller;
421
123
 
422
- CallData(grpc_call_element* elem, const ChannelData& chand,
124
+ CallData(grpc_call_element* elem, const ClientChannel& chand,
423
125
  const grpc_call_element_args& args);
424
126
  ~CallData();
425
127
 
@@ -427,7 +129,8 @@ class ChannelData::CallData {
427
129
  static size_t GetBatchIndex(grpc_transport_stream_op_batch* batch);
428
130
  void PendingBatchesAdd(grpc_call_element* elem,
429
131
  grpc_transport_stream_op_batch* batch);
430
- static void FailPendingBatchInCallCombiner(void* arg, grpc_error* error);
132
+ static void FailPendingBatchInCallCombiner(void* arg,
133
+ grpc_error_handle error);
431
134
  // A predicate type and some useful implementations for PendingBatchesFail().
432
135
  typedef bool (*YieldCallCombinerPredicate)(
433
136
  const CallCombinerClosureList& closures);
@@ -445,9 +148,10 @@ class ChannelData::CallData {
445
148
  // If yield_call_combiner_predicate returns true, assumes responsibility for
446
149
  // yielding the call combiner.
447
150
  void PendingBatchesFail(
448
- grpc_call_element* elem, grpc_error* error,
151
+ grpc_call_element* elem, grpc_error_handle error,
449
152
  YieldCallCombinerPredicate yield_call_combiner_predicate);
450
- static void ResumePendingBatchInCallCombiner(void* arg, grpc_error* ignored);
153
+ static void ResumePendingBatchInCallCombiner(void* arg,
154
+ grpc_error_handle ignored);
451
155
  // Resumes all pending batches on lb_call_.
452
156
  void PendingBatchesResume(grpc_call_element* elem);
453
157
 
@@ -455,23 +159,23 @@ class ChannelData::CallData {
455
159
  // that the resolver has returned results to the channel.
456
160
  // If an error is returned, the error indicates the status with which
457
161
  // the call should be failed.
458
- grpc_error* ApplyServiceConfigToCallLocked(
162
+ grpc_error_handle ApplyServiceConfigToCallLocked(
459
163
  grpc_call_element* elem, grpc_metadata_batch* initial_metadata)
460
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::resolution_mu_);
164
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::resolution_mu_);
461
165
  // Invoked when the resolver result is applied to the caller, on both
462
166
  // success or failure.
463
- static void ResolutionDone(void* arg, grpc_error* error);
167
+ static void ResolutionDone(void* arg, grpc_error_handle error);
464
168
  // Removes the call (if present) from the channel's list of calls queued
465
169
  // for name resolution.
466
170
  void MaybeRemoveCallFromResolverQueuedCallsLocked(grpc_call_element* elem)
467
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::resolution_mu_);
171
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::resolution_mu_);
468
172
  // Adds the call (if not already present) to the channel's list of
469
173
  // calls queued for name resolution.
470
174
  void MaybeAddCallToResolverQueuedCallsLocked(grpc_call_element* elem)
471
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::resolution_mu_);
175
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::resolution_mu_);
472
176
 
473
177
  static void RecvInitialMetadataReadyForConfigSelectorCommitCallback(
474
- void* arg, grpc_error* error);
178
+ void* arg, grpc_error_handle error);
475
179
  void InjectRecvInitialMetadataReadyForConfigSelectorCommitCallback(
476
180
  grpc_transport_stream_op_batch* batch);
477
181
 
@@ -497,11 +201,15 @@ class ChannelData::CallData {
497
201
 
498
202
  grpc_closure pick_closure_;
499
203
 
500
- // Accessed while holding ChannelData::resolution_mu_.
501
- bool service_config_applied_ = false;
502
- bool queued_pending_resolver_result_ = false;
503
- ChannelData::ResolverQueuedCall resolver_queued_call_;
504
- ResolverQueuedCallCanceller* resolver_call_canceller_ = nullptr;
204
+ // Accessed while holding ClientChannel::resolution_mu_.
205
+ bool service_config_applied_ ABSL_GUARDED_BY(&ClientChannel::resolution_mu_) =
206
+ false;
207
+ bool queued_pending_resolver_result_
208
+ ABSL_GUARDED_BY(&ClientChannel::resolution_mu_) = false;
209
+ ClientChannel::ResolverQueuedCall resolver_queued_call_
210
+ ABSL_GUARDED_BY(&ClientChannel::resolution_mu_);
211
+ ResolverQueuedCallCanceller* resolver_call_canceller_
212
+ ABSL_GUARDED_BY(&ClientChannel::resolution_mu_) = nullptr;
505
213
 
506
214
  std::function<void()> on_call_committed_;
507
215
 
@@ -519,565 +227,72 @@ class ChannelData::CallData {
519
227
  grpc_transport_stream_op_batch* pending_batches_[MAX_PENDING_BATCHES] = {};
520
228
 
521
229
  // Set when we get a cancel_stream op.
522
- grpc_error* cancel_error_ = GRPC_ERROR_NONE;
523
- };
524
-
525
- //
526
- // ChannelData::RetryingCall definition
527
- //
528
-
529
- class ChannelData::RetryingCall {
530
- public:
531
- RetryingCall(
532
- ChannelData* chand, const grpc_call_element_args& args,
533
- grpc_polling_entity* pollent,
534
- RefCountedPtr<ServerRetryThrottleData> retry_throttle_data,
535
- const ClientChannelMethodParsedConfig::RetryPolicy* retry_policy);
536
- ~RetryingCall();
537
-
538
- void StartTransportStreamOpBatch(grpc_transport_stream_op_batch* batch);
539
-
540
- RefCountedPtr<SubchannelCall> subchannel_call() const;
541
-
542
- private:
543
- // State used for starting a retryable batch on a subchannel call.
544
- // This provides its own grpc_transport_stream_op_batch and other data
545
- // structures needed to populate the ops in the batch.
546
- // We allocate one struct on the arena for each attempt at starting a
547
- // batch on a given subchannel call.
548
- struct SubchannelCallBatchData {
549
- // Creates a SubchannelCallBatchData object on the call's arena with the
550
- // specified refcount. If set_on_complete is true, the batch's
551
- // on_complete callback will be set to point to on_complete();
552
- // otherwise, the batch's on_complete callback will be null.
553
- static SubchannelCallBatchData* Create(RetryingCall* call, int refcount,
554
- bool set_on_complete);
555
-
556
- void Unref() {
557
- if (gpr_unref(&refs)) Destroy();
558
- }
559
-
560
- SubchannelCallBatchData(RetryingCall* call, int refcount,
561
- bool set_on_complete);
562
- // All dtor code must be added in `Destroy()`. This is because we may
563
- // call closures in `SubchannelCallBatchData` after they are unrefed by
564
- // `Unref()`, and msan would complain about accessing this class
565
- // after calling dtor. As a result we cannot call the `dtor` in `Unref()`.
566
- // TODO(soheil): We should try to call the dtor in `Unref()`.
567
- ~SubchannelCallBatchData() { Destroy(); }
568
- void Destroy();
569
-
570
- gpr_refcount refs;
571
- grpc_call_element* elem;
572
- RetryingCall* call;
573
- RefCountedPtr<ChannelData::LoadBalancedCall> lb_call;
574
- // The batch to use in the subchannel call.
575
- // Its payload field points to SubchannelCallRetryState::batch_payload.
576
- grpc_transport_stream_op_batch batch;
577
- // For intercepting on_complete.
578
- grpc_closure on_complete;
579
- };
580
-
581
- // Retry state associated with a subchannel call.
582
- // Stored in the parent_data of the subchannel call object.
583
- struct SubchannelCallRetryState {
584
- explicit SubchannelCallRetryState(grpc_call_context_element* context)
585
- : batch_payload(context),
586
- started_send_initial_metadata(false),
587
- completed_send_initial_metadata(false),
588
- started_send_trailing_metadata(false),
589
- completed_send_trailing_metadata(false),
590
- started_recv_initial_metadata(false),
591
- completed_recv_initial_metadata(false),
592
- started_recv_trailing_metadata(false),
593
- completed_recv_trailing_metadata(false),
594
- retry_dispatched(false) {}
595
-
596
- // SubchannelCallBatchData.batch.payload points to this.
597
- grpc_transport_stream_op_batch_payload batch_payload;
598
- // For send_initial_metadata.
599
- // Note that we need to make a copy of the initial metadata for each
600
- // subchannel call instead of just referring to the copy in call_data,
601
- // because filters in the subchannel stack will probably add entries,
602
- // so we need to start in a pristine state for each attempt of the call.
603
- grpc_linked_mdelem* send_initial_metadata_storage;
604
- grpc_metadata_batch send_initial_metadata;
605
- // For send_message.
606
- // TODO(roth): Restructure this to eliminate use of ManualConstructor.
607
- ManualConstructor<ByteStreamCache::CachingByteStream> send_message;
608
- // For send_trailing_metadata.
609
- grpc_linked_mdelem* send_trailing_metadata_storage;
610
- grpc_metadata_batch send_trailing_metadata;
611
- // For intercepting recv_initial_metadata.
612
- grpc_metadata_batch recv_initial_metadata;
613
- grpc_closure recv_initial_metadata_ready;
614
- bool trailing_metadata_available = false;
615
- // For intercepting recv_message.
616
- grpc_closure recv_message_ready;
617
- OrphanablePtr<ByteStream> recv_message;
618
- // For intercepting recv_trailing_metadata.
619
- grpc_metadata_batch recv_trailing_metadata;
620
- grpc_transport_stream_stats collect_stats;
621
- grpc_closure recv_trailing_metadata_ready;
622
- // These fields indicate which ops have been started and completed on
623
- // this subchannel call.
624
- size_t started_send_message_count = 0;
625
- size_t completed_send_message_count = 0;
626
- size_t started_recv_message_count = 0;
627
- size_t completed_recv_message_count = 0;
628
- bool started_send_initial_metadata : 1;
629
- bool completed_send_initial_metadata : 1;
630
- bool started_send_trailing_metadata : 1;
631
- bool completed_send_trailing_metadata : 1;
632
- bool started_recv_initial_metadata : 1;
633
- bool completed_recv_initial_metadata : 1;
634
- bool started_recv_trailing_metadata : 1;
635
- bool completed_recv_trailing_metadata : 1;
636
- // State for callback processing.
637
- SubchannelCallBatchData* recv_initial_metadata_ready_deferred_batch =
638
- nullptr;
639
- grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
640
- SubchannelCallBatchData* recv_message_ready_deferred_batch = nullptr;
641
- grpc_error* recv_message_error = GRPC_ERROR_NONE;
642
- SubchannelCallBatchData* recv_trailing_metadata_internal_batch = nullptr;
643
- // NOTE: Do not move this next to the metadata bitfields above. That would
644
- // save space but will also result in a data race because compiler
645
- // will generate a 2 byte store which overwrites the meta-data
646
- // fields upon setting this field.
647
- bool retry_dispatched : 1;
648
- };
649
-
650
- // Pending batches stored in call data.
651
- struct PendingBatch {
652
- // The pending batch. If nullptr, this slot is empty.
653
- grpc_transport_stream_op_batch* batch = nullptr;
654
- // Indicates whether payload for send ops has been cached in CallData.
655
- bool send_ops_cached = false;
656
- };
657
-
658
- // Caches data for send ops so that it can be retried later, if not
659
- // already cached.
660
- void MaybeCacheSendOpsForBatch(PendingBatch* pending);
661
- void FreeCachedSendInitialMetadata();
662
- // Frees cached send_message at index idx.
663
- void FreeCachedSendMessage(size_t idx);
664
- void FreeCachedSendTrailingMetadata();
665
- // Frees cached send ops that have already been completed after
666
- // committing the call.
667
- void FreeCachedSendOpDataAfterCommit(SubchannelCallRetryState* retry_state);
668
- // Frees cached send ops that were completed by the completed batch in
669
- // batch_data. Used when batches are completed after the call is committed.
670
- void FreeCachedSendOpDataForCompletedBatch(
671
- SubchannelCallBatchData* batch_data,
672
- SubchannelCallRetryState* retry_state);
673
-
674
- // Returns the index into pending_batches_ to be used for batch.
675
- static size_t GetBatchIndex(grpc_transport_stream_op_batch* batch);
676
- void PendingBatchesAdd(grpc_transport_stream_op_batch* batch);
677
- void PendingBatchClear(PendingBatch* pending);
678
- void MaybeClearPendingBatch(PendingBatch* pending);
679
- static void FailPendingBatchInCallCombiner(void* arg, grpc_error* error);
680
- // A predicate type and some useful implementations for PendingBatchesFail().
681
- typedef bool (*YieldCallCombinerPredicate)(
682
- const CallCombinerClosureList& closures);
683
- static bool YieldCallCombiner(const CallCombinerClosureList& /*closures*/) {
684
- return true;
685
- }
686
- static bool NoYieldCallCombiner(const CallCombinerClosureList& /*closures*/) {
687
- return false;
688
- }
689
- static bool YieldCallCombinerIfPendingBatchesFound(
690
- const CallCombinerClosureList& closures) {
691
- return closures.size() > 0;
692
- }
693
- // Fails all pending batches.
694
- // If yield_call_combiner_predicate returns true, assumes responsibility for
695
- // yielding the call combiner.
696
- void PendingBatchesFail(
697
- grpc_error* error,
698
- YieldCallCombinerPredicate yield_call_combiner_predicate);
699
- static void ResumePendingBatchInCallCombiner(void* arg, grpc_error* ignored);
700
- // Resumes all pending batches on lb_call_.
701
- void PendingBatchesResume();
702
- // Returns a pointer to the first pending batch for which predicate(batch)
703
- // returns true, or null if not found.
704
- template <typename Predicate>
705
- PendingBatch* PendingBatchFind(const char* log_message, Predicate predicate);
706
-
707
- // Commits the call so that no further retry attempts will be performed.
708
- void RetryCommit(SubchannelCallRetryState* retry_state);
709
- // Starts a retry after appropriate back-off.
710
- void DoRetry(SubchannelCallRetryState* retry_state,
711
- grpc_millis server_pushback_ms);
712
- // Returns true if the call is being retried.
713
- bool MaybeRetry(SubchannelCallBatchData* batch_data, grpc_status_code status,
714
- grpc_mdelem* server_pushback_md);
715
-
716
- // Invokes recv_initial_metadata_ready for a subchannel batch.
717
- static void InvokeRecvInitialMetadataCallback(void* arg, grpc_error* error);
718
- // Intercepts recv_initial_metadata_ready callback for retries.
719
- // Commits the call and returns the initial metadata up the stack.
720
- static void RecvInitialMetadataReady(void* arg, grpc_error* error);
721
-
722
- // Invokes recv_message_ready for a subchannel batch.
723
- static void InvokeRecvMessageCallback(void* arg, grpc_error* error);
724
- // Intercepts recv_message_ready callback for retries.
725
- // Commits the call and returns the message up the stack.
726
- static void RecvMessageReady(void* arg, grpc_error* error);
727
-
728
- // Sets *status and *server_pushback_md based on md_batch and error.
729
- // Only sets *server_pushback_md if server_pushback_md != nullptr.
730
- void GetCallStatus(grpc_metadata_batch* md_batch, grpc_error* error,
731
- grpc_status_code* status,
732
- grpc_mdelem** server_pushback_md);
733
- // Adds recv_trailing_metadata_ready closure to closures.
734
- void AddClosureForRecvTrailingMetadataReady(
735
- SubchannelCallBatchData* batch_data, grpc_error* error,
736
- CallCombinerClosureList* closures);
737
- // Adds any necessary closures for deferred recv_initial_metadata and
738
- // recv_message callbacks to closures.
739
- static void AddClosuresForDeferredRecvCallbacks(
740
- SubchannelCallBatchData* batch_data,
741
- SubchannelCallRetryState* retry_state, CallCombinerClosureList* closures);
742
- // Returns true if any op in the batch was not yet started.
743
- // Only looks at send ops, since recv ops are always started immediately.
744
- bool PendingBatchIsUnstarted(PendingBatch* pending,
745
- SubchannelCallRetryState* retry_state);
746
- // For any pending batch containing an op that has not yet been started,
747
- // adds the pending batch's completion closures to closures.
748
- void AddClosuresToFailUnstartedPendingBatches(
749
- SubchannelCallRetryState* retry_state, grpc_error* error,
750
- CallCombinerClosureList* closures);
751
- // Runs necessary closures upon completion of a call attempt.
752
- void RunClosuresForCompletedCall(SubchannelCallBatchData* batch_data,
753
- grpc_error* error);
754
- // Intercepts recv_trailing_metadata_ready callback for retries.
755
- // Commits the call and returns the trailing metadata up the stack.
756
- static void RecvTrailingMetadataReady(void* arg, grpc_error* error);
757
-
758
- // Adds the on_complete closure for the pending batch completed in
759
- // batch_data to closures.
760
- void AddClosuresForCompletedPendingBatch(SubchannelCallBatchData* batch_data,
761
- grpc_error* error,
762
- CallCombinerClosureList* closures);
763
-
764
- // If there are any cached ops to replay or pending ops to start on the
765
- // subchannel call, adds a closure to closures to invoke
766
- // StartRetriableSubchannelBatches().
767
- void AddClosuresForReplayOrPendingSendOps(
768
- SubchannelCallBatchData* batch_data,
769
- SubchannelCallRetryState* retry_state, CallCombinerClosureList* closures);
770
-
771
- // Callback used to intercept on_complete from subchannel calls.
772
- // Called only when retries are enabled.
773
- static void OnComplete(void* arg, grpc_error* error);
774
-
775
- static void StartBatchInCallCombiner(void* arg, grpc_error* ignored);
776
- // Adds a closure to closures that will execute batch in the call combiner.
777
- void AddClosureForSubchannelBatch(grpc_transport_stream_op_batch* batch,
778
- CallCombinerClosureList* closures);
779
- // Adds retriable send_initial_metadata op to batch_data.
780
- void AddRetriableSendInitialMetadataOp(SubchannelCallRetryState* retry_state,
781
- SubchannelCallBatchData* batch_data);
782
- // Adds retriable send_message op to batch_data.
783
- void AddRetriableSendMessageOp(SubchannelCallRetryState* retry_state,
784
- SubchannelCallBatchData* batch_data);
785
- // Adds retriable send_trailing_metadata op to batch_data.
786
- void AddRetriableSendTrailingMetadataOp(SubchannelCallRetryState* retry_state,
787
- SubchannelCallBatchData* batch_data);
788
- // Adds retriable recv_initial_metadata op to batch_data.
789
- void AddRetriableRecvInitialMetadataOp(SubchannelCallRetryState* retry_state,
790
- SubchannelCallBatchData* batch_data);
791
- // Adds retriable recv_message op to batch_data.
792
- void AddRetriableRecvMessageOp(SubchannelCallRetryState* retry_state,
793
- SubchannelCallBatchData* batch_data);
794
- // Adds retriable recv_trailing_metadata op to batch_data.
795
- void AddRetriableRecvTrailingMetadataOp(SubchannelCallRetryState* retry_state,
796
- SubchannelCallBatchData* batch_data);
797
- // Helper function used to start a recv_trailing_metadata batch. This
798
- // is used in the case where a recv_initial_metadata or recv_message
799
- // op fails in a way that we know the call is over but when the application
800
- // has not yet started its own recv_trailing_metadata op.
801
- void StartInternalRecvTrailingMetadata();
802
- // If there are any cached send ops that need to be replayed on the
803
- // current subchannel call, creates and returns a new subchannel batch
804
- // to replay those ops. Otherwise, returns nullptr.
805
- SubchannelCallBatchData* MaybeCreateSubchannelBatchForReplay(
806
- SubchannelCallRetryState* retry_state);
807
- // Adds subchannel batches for pending batches to closures.
808
- void AddSubchannelBatchesForPendingBatches(
809
- SubchannelCallRetryState* retry_state, CallCombinerClosureList* closures);
810
- // Constructs and starts whatever subchannel batches are needed on the
811
- // subchannel call.
812
- static void StartRetriableSubchannelBatches(void* arg, grpc_error* ignored);
813
-
814
- static void CreateLbCall(void* arg, grpc_error* error);
815
-
816
- ChannelData* chand_;
817
- grpc_polling_entity* pollent_;
818
- RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
819
- const ClientChannelMethodParsedConfig::RetryPolicy* retry_policy_ = nullptr;
820
- BackOff retry_backoff_;
821
-
822
- grpc_slice path_; // Request path.
823
- gpr_cycle_counter call_start_time_;
824
- grpc_millis deadline_;
825
- Arena* arena_;
826
- grpc_call_stack* owning_call_;
827
- CallCombiner* call_combiner_;
828
- grpc_call_context_element* call_context_;
829
-
830
- grpc_closure retry_closure_;
831
-
832
- RefCountedPtr<ChannelData::LoadBalancedCall> lb_call_;
833
-
834
- // Batches are added to this list when received from above.
835
- // They are removed when we are done handling the batch (i.e., when
836
- // either we have invoked all of the batch's callbacks or we have
837
- // passed the batch down to the LB call and are not intercepting any of
838
- // its callbacks).
839
- // TODO(roth): Now that the retry code is split out into its own call
840
- // object, revamp this to work in a cleaner way, since we no longer need
841
- // for batches to ever wait for name resolution or LB picks.
842
- PendingBatch pending_batches_[MAX_PENDING_BATCHES];
843
- bool pending_send_initial_metadata_ : 1;
844
- bool pending_send_message_ : 1;
845
- bool pending_send_trailing_metadata_ : 1;
846
-
847
- // Set when we get a cancel_stream op.
848
- grpc_error* cancel_error_ = GRPC_ERROR_NONE;
849
-
850
- // Retry state.
851
- bool enable_retries_ : 1;
852
- bool retry_committed_ : 1;
853
- bool last_attempt_got_server_pushback_ : 1;
854
- int num_attempts_completed_ = 0;
855
- size_t bytes_buffered_for_retry_ = 0;
856
- grpc_timer retry_timer_;
857
-
858
- // The number of pending retriable subchannel batches containing send ops.
859
- // We hold a ref to the call stack while this is non-zero, since replay
860
- // batches may not complete until after all callbacks have been returned
861
- // to the surface, and we need to make sure that the call is not destroyed
862
- // until all of these batches have completed.
863
- // Note that we actually only need to track replay batches, but it's
864
- // easier to track all batches with send ops.
865
- int num_pending_retriable_subchannel_send_batches_ = 0;
866
-
867
- // Cached data for retrying send ops.
868
- // send_initial_metadata
869
- bool seen_send_initial_metadata_ = false;
870
- grpc_linked_mdelem* send_initial_metadata_storage_ = nullptr;
871
- grpc_metadata_batch send_initial_metadata_;
872
- uint32_t send_initial_metadata_flags_;
873
- gpr_atm* peer_string_;
874
- // send_message
875
- // When we get a send_message op, we replace the original byte stream
876
- // with a CachingByteStream that caches the slices to a local buffer for
877
- // use in retries.
878
- // Note: We inline the cache for the first 3 send_message ops and use
879
- // dynamic allocation after that. This number was essentially picked
880
- // at random; it could be changed in the future to tune performance.
881
- absl::InlinedVector<ByteStreamCache*, 3> send_messages_;
882
- // send_trailing_metadata
883
- bool seen_send_trailing_metadata_ = false;
884
- grpc_linked_mdelem* send_trailing_metadata_storage_ = nullptr;
885
- grpc_metadata_batch send_trailing_metadata_;
230
+ grpc_error_handle cancel_error_ = GRPC_ERROR_NONE;
886
231
  };
887
232
 
888
233
  //
889
- // ChannelData::LoadBalancedCall definition
234
+ // Filter vtable
890
235
  //
891
236
 
892
- // This object is ref-counted, but it cannot inherit from RefCounted<>,
893
- // because it is allocated on the arena and can't free its memory when
894
- // its refcount goes to zero. So instead, it manually implements the
895
- // same API as RefCounted<>, so that it can be used with RefCountedPtr<>.
896
- class ChannelData::LoadBalancedCall {
897
- public:
898
- static RefCountedPtr<LoadBalancedCall> Create(
899
- ChannelData* chand, const grpc_call_element_args& args,
900
- grpc_polling_entity* pollent, size_t parent_data_size);
901
-
902
- LoadBalancedCall(ChannelData* chand, const grpc_call_element_args& args,
903
- grpc_polling_entity* pollent);
904
- ~LoadBalancedCall();
905
-
906
- // Interface of RefCounted<>.
907
- RefCountedPtr<LoadBalancedCall> Ref() GRPC_MUST_USE_RESULT;
908
- RefCountedPtr<LoadBalancedCall> Ref(const DebugLocation& location,
909
- const char* reason) GRPC_MUST_USE_RESULT;
910
- // When refcount drops to 0, destroys itself and the associated call stack,
911
- // but does NOT free the memory because it's in the call arena.
912
- void Unref();
913
- void Unref(const DebugLocation& location, const char* reason);
914
-
915
- void* GetParentData();
916
-
917
- void StartTransportStreamOpBatch(grpc_transport_stream_op_batch* batch);
918
-
919
- // Invoked by channel for queued LB picks when the picker is updated.
920
- static void PickSubchannel(void* arg, grpc_error* error);
921
- // Helper function for performing an LB pick while holding the data plane
922
- // mutex. Returns true if the pick is complete, in which case the caller
923
- // must invoke PickDone() or AsyncPickDone() with the returned error.
924
- bool PickSubchannelLocked(grpc_error** error)
925
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::data_plane_mu_);
926
- // Schedules a callback to process the completed pick. The callback
927
- // will not run until after this method returns.
928
- void AsyncPickDone(grpc_error* error);
929
-
930
- RefCountedPtr<SubchannelCall> subchannel_call() const {
931
- return subchannel_call_;
932
- }
933
-
934
- private:
935
- // Allow RefCountedPtr<> to access IncrementRefCount().
936
- template <typename T>
937
- friend class ::grpc_core::RefCountedPtr;
938
-
939
- class LbQueuedCallCanceller;
940
- class Metadata;
941
- class LbCallState;
942
-
943
- // Interface of RefCounted<>.
944
- void IncrementRefCount();
945
- void IncrementRefCount(const DebugLocation& location, const char* reason);
946
-
947
- // Returns the index into pending_batches_ to be used for batch.
948
- static size_t GetBatchIndex(grpc_transport_stream_op_batch* batch);
949
- void PendingBatchesAdd(grpc_transport_stream_op_batch* batch);
950
- static void FailPendingBatchInCallCombiner(void* arg, grpc_error* error);
951
- // A predicate type and some useful implementations for PendingBatchesFail().
952
- typedef bool (*YieldCallCombinerPredicate)(
953
- const CallCombinerClosureList& closures);
954
- static bool YieldCallCombiner(const CallCombinerClosureList& /*closures*/) {
955
- return true;
956
- }
957
- static bool NoYieldCallCombiner(const CallCombinerClosureList& /*closures*/) {
958
- return false;
959
- }
960
- static bool YieldCallCombinerIfPendingBatchesFound(
961
- const CallCombinerClosureList& closures) {
962
- return closures.size() > 0;
963
- }
964
- // Fails all pending batches.
965
- // If yield_call_combiner_predicate returns true, assumes responsibility for
966
- // yielding the call combiner.
967
- void PendingBatchesFail(
968
- grpc_error* error,
969
- YieldCallCombinerPredicate yield_call_combiner_predicate);
970
- static void ResumePendingBatchInCallCombiner(void* arg, grpc_error* ignored);
971
- // Resumes all pending batches on subchannel_call_.
972
- void PendingBatchesResume();
973
-
974
- static void RecvTrailingMetadataReadyForLoadBalancingPolicy(
975
- void* arg, grpc_error* error);
976
- void InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
977
- grpc_transport_stream_op_batch* batch);
978
-
979
- void CreateSubchannelCall();
980
- // Invoked when a pick is completed, on both success or failure.
981
- static void PickDone(void* arg, grpc_error* error);
982
- // Removes the call from the channel's list of queued picks if present.
983
- void MaybeRemoveCallFromLbQueuedCallsLocked()
984
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::data_plane_mu_);
985
- // Adds the call to the channel's list of queued picks if not already present.
986
- void MaybeAddCallToLbQueuedCallsLocked()
987
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ChannelData::data_plane_mu_);
988
-
989
- RefCount refs_;
990
-
991
- ChannelData* chand_;
992
-
993
- // TODO(roth): Instead of duplicating these fields in every filter
994
- // that uses any one of them, we should store them in the call
995
- // context. This will save per-call memory overhead.
996
- grpc_slice path_; // Request path.
997
- gpr_cycle_counter call_start_time_;
998
- grpc_millis deadline_;
999
- Arena* arena_;
1000
- grpc_call_stack* owning_call_;
1001
- CallCombiner* call_combiner_;
1002
- grpc_call_context_element* call_context_;
1003
-
1004
- // Set when we get a cancel_stream op.
1005
- grpc_error* cancel_error_ = GRPC_ERROR_NONE;
1006
-
1007
- grpc_polling_entity* pollent_ = nullptr;
1008
-
1009
- grpc_closure pick_closure_;
1010
-
1011
- // Accessed while holding ChannelData::data_plane_mu_.
1012
- ChannelData::LbQueuedCall queued_call_;
1013
- bool queued_pending_lb_pick_ = false;
1014
- const LoadBalancingPolicy::BackendMetricData* backend_metric_data_ = nullptr;
1015
- RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
1016
- std::function<void(grpc_error*, LoadBalancingPolicy::MetadataInterface*,
1017
- LoadBalancingPolicy::CallState*)>
1018
- lb_recv_trailing_metadata_ready_;
1019
- LbQueuedCallCanceller* lb_call_canceller_ = nullptr;
1020
-
1021
- RefCountedPtr<SubchannelCall> subchannel_call_;
1022
-
1023
- // For intercepting recv_trailing_metadata_ready for the LB policy.
1024
- grpc_metadata_batch* recv_trailing_metadata_ = nullptr;
1025
- grpc_closure recv_trailing_metadata_ready_;
1026
- grpc_closure* original_recv_trailing_metadata_ready_ = nullptr;
1027
-
1028
- // Batches are added to this list when received from above.
1029
- // They are removed when we are done handling the batch (i.e., when
1030
- // either we have invoked all of the batch's callbacks or we have
1031
- // passed the batch down to the subchannel call and are not
1032
- // intercepting any of its callbacks).
1033
- grpc_transport_stream_op_batch* pending_batches_[MAX_PENDING_BATCHES] = {};
237
+ const grpc_channel_filter ClientChannel::kFilterVtable = {
238
+ ClientChannel::CallData::StartTransportStreamOpBatch,
239
+ ClientChannel::StartTransportOp,
240
+ sizeof(ClientChannel::CallData),
241
+ ClientChannel::CallData::Init,
242
+ ClientChannel::CallData::SetPollent,
243
+ ClientChannel::CallData::Destroy,
244
+ sizeof(ClientChannel),
245
+ ClientChannel::Init,
246
+ ClientChannel::Destroy,
247
+ ClientChannel::GetChannelInfo,
248
+ "client-channel",
1034
249
  };
1035
250
 
1036
251
  //
1037
252
  // dynamic termination filter
1038
253
  //
1039
254
 
1040
- // Channel arg pointer vtable for GRPC_ARG_CLIENT_CHANNEL_DATA.
1041
- void* ChannelDataArgCopy(void* p) { return p; }
1042
- void ChannelDataArgDestroy(void* /*p*/) {}
1043
- int ChannelDataArgCmp(void* p, void* q) { return GPR_ICMP(p, q); }
1044
- const grpc_arg_pointer_vtable kChannelDataArgPointerVtable = {
1045
- ChannelDataArgCopy, ChannelDataArgDestroy, ChannelDataArgCmp};
1046
-
1047
- // Channel arg pointer vtable for GRPC_ARG_RETRY_THROTTLE_DATA.
1048
- void* RetryThrottleDataArgCopy(void* p) {
1049
- auto* retry_throttle_data = static_cast<ServerRetryThrottleData*>(p);
1050
- retry_throttle_data->Ref().release();
255
+ namespace {
256
+
257
+ // Channel arg pointer vtable for GRPC_ARG_CLIENT_CHANNEL.
258
+ void* ClientChannelArgCopy(void* p) { return p; }
259
+ void ClientChannelArgDestroy(void* /*p*/) {}
260
+ int ClientChannelArgCmp(void* p, void* q) { return GPR_ICMP(p, q); }
261
+ const grpc_arg_pointer_vtable kClientChannelArgPointerVtable = {
262
+ ClientChannelArgCopy, ClientChannelArgDestroy, ClientChannelArgCmp};
263
+
264
+ // Channel arg pointer vtable for GRPC_ARG_SERVICE_CONFIG_OBJ.
265
+ void* ServiceConfigObjArgCopy(void* p) {
266
+ auto* service_config = static_cast<ServiceConfig*>(p);
267
+ service_config->Ref().release();
1051
268
  return p;
1052
269
  }
1053
- void RetryThrottleDataArgDestroy(void* p) {
1054
- auto* retry_throttle_data = static_cast<ServerRetryThrottleData*>(p);
1055
- retry_throttle_data->Unref();
270
+ void ServiceConfigObjArgDestroy(void* p) {
271
+ auto* service_config = static_cast<ServiceConfig*>(p);
272
+ service_config->Unref();
1056
273
  }
1057
- int RetryThrottleDataArgCmp(void* p, void* q) { return GPR_ICMP(p, q); }
1058
- const grpc_arg_pointer_vtable kRetryThrottleDataArgPointerVtable = {
1059
- RetryThrottleDataArgCopy, RetryThrottleDataArgDestroy,
1060
- RetryThrottleDataArgCmp};
274
+ int ServiceConfigObjArgCmp(void* p, void* q) { return GPR_ICMP(p, q); }
275
+ const grpc_arg_pointer_vtable kServiceConfigObjArgPointerVtable = {
276
+ ServiceConfigObjArgCopy, ServiceConfigObjArgDestroy,
277
+ ServiceConfigObjArgCmp};
1061
278
 
1062
- class ChannelData::DynamicTerminationFilterChannelData {
279
+ class DynamicTerminationFilter {
1063
280
  public:
1064
- class DynamicTerminationFilterCallData;
281
+ class CallData;
1065
282
 
1066
- static const grpc_channel_filter kDynamicTerminationFilterVtable;
283
+ static const grpc_channel_filter kFilterVtable;
1067
284
 
1068
- static grpc_error* Init(grpc_channel_element* elem,
1069
- grpc_channel_element_args* args) {
285
+ static grpc_error_handle Init(grpc_channel_element* elem,
286
+ grpc_channel_element_args* args) {
1070
287
  GPR_ASSERT(args->is_last);
1071
- GPR_ASSERT(elem->filter == &kDynamicTerminationFilterVtable);
1072
- new (elem->channel_data)
1073
- DynamicTerminationFilterChannelData(args->channel_args);
288
+ GPR_ASSERT(elem->filter == &kFilterVtable);
289
+ new (elem->channel_data) DynamicTerminationFilter(args->channel_args);
1074
290
  return GRPC_ERROR_NONE;
1075
291
  }
1076
292
 
1077
293
  static void Destroy(grpc_channel_element* elem) {
1078
- auto* chand =
1079
- static_cast<DynamicTerminationFilterChannelData*>(elem->channel_data);
1080
- chand->~DynamicTerminationFilterChannelData();
294
+ auto* chand = static_cast<DynamicTerminationFilter*>(elem->channel_data);
295
+ chand->~DynamicTerminationFilter();
1081
296
  }
1082
297
 
1083
298
  // Will never be called.
@@ -1087,52 +302,30 @@ class ChannelData::DynamicTerminationFilterChannelData {
1087
302
  const grpc_channel_info* /*info*/) {}
1088
303
 
1089
304
  private:
1090
- static RefCountedPtr<ServerRetryThrottleData> GetRetryThrottleDataFromArgs(
1091
- const grpc_channel_args* args) {
1092
- auto* retry_throttle_data =
1093
- grpc_channel_args_find_pointer<ServerRetryThrottleData>(
1094
- args, GRPC_ARG_RETRY_THROTTLE_DATA);
1095
- if (retry_throttle_data == nullptr) return nullptr;
1096
- return retry_throttle_data->Ref();
1097
- }
1098
-
1099
- explicit DynamicTerminationFilterChannelData(const grpc_channel_args* args)
1100
- : chand_(grpc_channel_args_find_pointer<ChannelData>(
1101
- args, GRPC_ARG_CLIENT_CHANNEL_DATA)),
1102
- retry_throttle_data_(GetRetryThrottleDataFromArgs(args)) {}
1103
-
1104
- ChannelData* chand_;
1105
- RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
305
+ explicit DynamicTerminationFilter(const grpc_channel_args* args)
306
+ : chand_(grpc_channel_args_find_pointer<ClientChannel>(
307
+ args, GRPC_ARG_CLIENT_CHANNEL)) {}
308
+
309
+ ClientChannel* chand_;
1106
310
  };
1107
311
 
1108
- class ChannelData::DynamicTerminationFilterChannelData::
1109
- DynamicTerminationFilterCallData {
312
+ class DynamicTerminationFilter::CallData {
1110
313
  public:
1111
- static grpc_error* Init(grpc_call_element* elem,
1112
- const grpc_call_element_args* args) {
1113
- new (elem->call_data) DynamicTerminationFilterCallData(*args);
314
+ static grpc_error_handle Init(grpc_call_element* elem,
315
+ const grpc_call_element_args* args) {
316
+ new (elem->call_data) CallData(*args);
1114
317
  return GRPC_ERROR_NONE;
1115
318
  }
1116
319
 
1117
320
  static void Destroy(grpc_call_element* elem,
1118
321
  const grpc_call_final_info* /*final_info*/,
1119
322
  grpc_closure* then_schedule_closure) {
1120
- auto* calld =
1121
- static_cast<DynamicTerminationFilterCallData*>(elem->call_data);
1122
- auto* chand =
1123
- static_cast<DynamicTerminationFilterChannelData*>(elem->channel_data);
323
+ auto* calld = static_cast<CallData*>(elem->call_data);
1124
324
  RefCountedPtr<SubchannelCall> subchannel_call;
1125
- if (chand->chand_->enable_retries_) {
1126
- if (GPR_LIKELY(calld->retrying_call_ != nullptr)) {
1127
- subchannel_call = calld->retrying_call_->subchannel_call();
1128
- calld->retrying_call_->~RetryingCall();
1129
- }
1130
- } else {
1131
- if (GPR_LIKELY(calld->lb_call_ != nullptr)) {
1132
- subchannel_call = calld->lb_call_->subchannel_call();
1133
- }
325
+ if (GPR_LIKELY(calld->lb_call_ != nullptr)) {
326
+ subchannel_call = calld->lb_call_->subchannel_call();
1134
327
  }
1135
- calld->~DynamicTerminationFilterCallData();
328
+ calld->~CallData();
1136
329
  if (GPR_LIKELY(subchannel_call != nullptr)) {
1137
330
  subchannel_call->SetAfterCallStackDestroy(then_schedule_closure);
1138
331
  } else {
@@ -1143,60 +336,31 @@ class ChannelData::DynamicTerminationFilterChannelData::
1143
336
 
1144
337
  static void StartTransportStreamOpBatch(
1145
338
  grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
1146
- auto* calld =
1147
- static_cast<DynamicTerminationFilterCallData*>(elem->call_data);
1148
- auto* chand =
1149
- static_cast<DynamicTerminationFilterChannelData*>(elem->channel_data);
1150
- if (chand->chand_->enable_retries_) {
1151
- calld->retrying_call_->StartTransportStreamOpBatch(batch);
1152
- } else {
1153
- calld->lb_call_->StartTransportStreamOpBatch(batch);
1154
- }
339
+ auto* calld = static_cast<CallData*>(elem->call_data);
340
+ calld->lb_call_->StartTransportStreamOpBatch(batch);
1155
341
  }
1156
342
 
1157
343
  static void SetPollent(grpc_call_element* elem,
1158
344
  grpc_polling_entity* pollent) {
1159
- auto* calld =
1160
- static_cast<DynamicTerminationFilterCallData*>(elem->call_data);
1161
- auto* chand =
1162
- static_cast<DynamicTerminationFilterChannelData*>(elem->channel_data);
1163
- ChannelData* client_channel = chand->chand_;
345
+ auto* calld = static_cast<CallData*>(elem->call_data);
346
+ auto* chand = static_cast<DynamicTerminationFilter*>(elem->channel_data);
347
+ ClientChannel* client_channel = chand->chand_;
1164
348
  grpc_call_element_args args = {
1165
349
  calld->owning_call_, nullptr,
1166
350
  calld->call_context_, calld->path_,
1167
351
  calld->call_start_time_, calld->deadline_,
1168
352
  calld->arena_, calld->call_combiner_};
1169
- if (client_channel->enable_retries_) {
1170
- // Get retry settings from service config.
1171
- auto* svc_cfg_call_data = static_cast<ServiceConfigCallData*>(
1172
- calld->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
1173
- GPR_ASSERT(svc_cfg_call_data != nullptr);
1174
- auto* method_config = static_cast<const ClientChannelMethodParsedConfig*>(
1175
- svc_cfg_call_data->GetMethodParsedConfig(
1176
- ClientChannelServiceConfigParser::ParserIndex()));
1177
- // Create retrying call.
1178
- calld->retrying_call_ = calld->arena_->New<ChannelData::RetryingCall>(
1179
- client_channel, args, pollent, chand->retry_throttle_data_,
1180
- method_config == nullptr ? nullptr : method_config->retry_policy());
1181
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1182
- gpr_log(
1183
- GPR_INFO,
1184
- "chand=%p dymamic_termination_calld=%p: create retrying_call=%p",
1185
- client_channel, calld, calld->retrying_call_);
1186
- }
1187
- } else {
1188
- calld->lb_call_ = ChannelData::LoadBalancedCall::Create(client_channel,
1189
- args, pollent, 0);
1190
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1191
- gpr_log(GPR_INFO,
1192
- "chand=%p dynamic_termination_calld=%p: create lb_call=%p",
1193
- chand, client_channel, calld->lb_call_.get());
1194
- }
353
+ calld->lb_call_ =
354
+ client_channel->CreateLoadBalancedCall(args, pollent, nullptr);
355
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
356
+ gpr_log(GPR_INFO,
357
+ "chand=%p dynamic_termination_calld=%p: create lb_call=%p", chand,
358
+ client_channel, calld->lb_call_.get());
1195
359
  }
1196
360
  }
1197
361
 
1198
362
  private:
1199
- explicit DynamicTerminationFilterCallData(const grpc_call_element_args& args)
363
+ explicit CallData(const grpc_call_element_args& args)
1200
364
  : path_(grpc_slice_ref_internal(args.path)),
1201
365
  call_start_time_(args.start_time),
1202
366
  deadline_(args.deadline),
@@ -1205,7 +369,7 @@ class ChannelData::DynamicTerminationFilterChannelData::
1205
369
  call_combiner_(args.call_combiner),
1206
370
  call_context_(args.context) {}
1207
371
 
1208
- ~DynamicTerminationFilterCallData() { grpc_slice_unref_internal(path_); }
372
+ ~CallData() { grpc_slice_unref_internal(path_); }
1209
373
 
1210
374
  grpc_slice path_; // Request path.
1211
375
  gpr_cycle_counter call_start_time_;
@@ -1215,32 +379,58 @@ class ChannelData::DynamicTerminationFilterChannelData::
1215
379
  CallCombiner* call_combiner_;
1216
380
  grpc_call_context_element* call_context_;
1217
381
 
1218
- ChannelData::RetryingCall* retrying_call_ = nullptr;
1219
- RefCountedPtr<LoadBalancedCall> lb_call_;
382
+ RefCountedPtr<ClientChannel::LoadBalancedCall> lb_call_;
1220
383
  };
1221
384
 
1222
- const grpc_channel_filter ChannelData::DynamicTerminationFilterChannelData::
1223
- kDynamicTerminationFilterVtable = {
1224
- ChannelData::DynamicTerminationFilterChannelData::
1225
- DynamicTerminationFilterCallData::StartTransportStreamOpBatch,
1226
- ChannelData::DynamicTerminationFilterChannelData::StartTransportOp,
1227
- sizeof(ChannelData::DynamicTerminationFilterChannelData::
1228
- DynamicTerminationFilterCallData),
1229
- ChannelData::DynamicTerminationFilterChannelData::
1230
- DynamicTerminationFilterCallData::Init,
1231
- ChannelData::DynamicTerminationFilterChannelData::
1232
- DynamicTerminationFilterCallData::SetPollent,
1233
- ChannelData::DynamicTerminationFilterChannelData::
1234
- DynamicTerminationFilterCallData::Destroy,
1235
- sizeof(ChannelData::DynamicTerminationFilterChannelData),
1236
- ChannelData::DynamicTerminationFilterChannelData::Init,
1237
- ChannelData::DynamicTerminationFilterChannelData::Destroy,
1238
- ChannelData::DynamicTerminationFilterChannelData::GetChannelInfo,
1239
- "dynamic_filter_termination",
385
+ const grpc_channel_filter DynamicTerminationFilter::kFilterVtable = {
386
+ DynamicTerminationFilter::CallData::StartTransportStreamOpBatch,
387
+ DynamicTerminationFilter::StartTransportOp,
388
+ sizeof(DynamicTerminationFilter::CallData),
389
+ DynamicTerminationFilter::CallData::Init,
390
+ DynamicTerminationFilter::CallData::SetPollent,
391
+ DynamicTerminationFilter::CallData::Destroy,
392
+ sizeof(DynamicTerminationFilter),
393
+ DynamicTerminationFilter::Init,
394
+ DynamicTerminationFilter::Destroy,
395
+ DynamicTerminationFilter::GetChannelInfo,
396
+ "dynamic_filter_termination",
397
+ };
398
+
399
+ } // namespace
400
+
401
+ //
402
+ // ClientChannel::ResolverResultHandler
403
+ //
404
+
405
+ class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
406
+ public:
407
+ explicit ResolverResultHandler(ClientChannel* chand) : chand_(chand) {
408
+ GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ResolverResultHandler");
409
+ }
410
+
411
+ ~ResolverResultHandler() override {
412
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
413
+ gpr_log(GPR_INFO, "chand=%p: resolver shutdown complete", chand_);
414
+ }
415
+ GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "ResolverResultHandler");
416
+ }
417
+
418
+ void ReturnResult(Resolver::Result result) override
419
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
420
+ chand_->OnResolverResultChangedLocked(std::move(result));
421
+ }
422
+
423
+ void ReturnError(grpc_error_handle error) override
424
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
425
+ chand_->OnResolverErrorLocked(error);
426
+ }
427
+
428
+ private:
429
+ ClientChannel* chand_;
1240
430
  };
1241
431
 
1242
432
  //
1243
- // ChannelData::SubchannelWrapper
433
+ // ClientChannel::SubchannelWrapper
1244
434
  //
1245
435
 
1246
436
  // This class is a wrapper for Subchannel that hides details of the
@@ -1251,9 +441,9 @@ const grpc_channel_filter ChannelData::DynamicTerminationFilterChannelData::
1251
441
  // underlying subchannel is shared between channels, this wrapper will only
1252
442
  // be used within one channel, so it will always be synchronized by the
1253
443
  // control plane work_serializer.
1254
- class ChannelData::SubchannelWrapper : public SubchannelInterface {
444
+ class ClientChannel::SubchannelWrapper : public SubchannelInterface {
1255
445
  public:
1256
- SubchannelWrapper(ChannelData* chand, RefCountedPtr<Subchannel> subchannel,
446
+ SubchannelWrapper(ClientChannel* chand, RefCountedPtr<Subchannel> subchannel,
1257
447
  absl::optional<std::string> health_check_service_name)
1258
448
  : SubchannelInterface(
1259
449
  GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)
@@ -1301,7 +491,8 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1301
491
  GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "SubchannelWrapper");
1302
492
  }
1303
493
 
1304
- grpc_connectivity_state CheckConnectivityState() override {
494
+ grpc_connectivity_state CheckConnectivityState() override
495
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
1305
496
  RefCountedPtr<ConnectedSubchannel> connected_subchannel;
1306
497
  grpc_connectivity_state connectivity_state =
1307
498
  subchannel_->CheckConnectivityState(health_check_service_name_,
@@ -1380,16 +571,19 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1380
571
  }
1381
572
 
1382
573
  // Caller must be holding the control-plane work_serializer.
1383
- ConnectedSubchannel* connected_subchannel() const {
574
+ ConnectedSubchannel* connected_subchannel() const
575
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::work_serializer_) {
1384
576
  return connected_subchannel_.get();
1385
577
  }
1386
578
 
1387
579
  // Caller must be holding the data-plane mutex.
1388
- ConnectedSubchannel* connected_subchannel_in_data_plane() const {
580
+ ConnectedSubchannel* connected_subchannel_in_data_plane() const
581
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
1389
582
  return connected_subchannel_in_data_plane_.get();
1390
583
  }
1391
584
  void set_connected_subchannel_in_data_plane(
1392
- RefCountedPtr<ConnectedSubchannel> connected_subchannel) {
585
+ RefCountedPtr<ConnectedSubchannel> connected_subchannel)
586
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
1393
587
  connected_subchannel_in_data_plane_ = std::move(connected_subchannel);
1394
588
  }
1395
589
 
@@ -1422,7 +616,10 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1422
616
  ~WatcherWrapper() override {
1423
617
  auto* parent = parent_.release(); // ref owned by lambda
1424
618
  parent->chand_->work_serializer_->Run(
1425
- [parent]() { parent->Unref(DEBUG_LOCATION, "WatcherWrapper"); },
619
+ [parent]()
620
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_->chand_->work_serializer_) {
621
+ parent->Unref(DEBUG_LOCATION, "WatcherWrapper");
622
+ },
1426
623
  DEBUG_LOCATION);
1427
624
  }
1428
625
 
@@ -1435,10 +632,11 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1435
632
  }
1436
633
  Ref().release(); // ref owned by lambda
1437
634
  parent_->chand_->work_serializer_->Run(
1438
- [this]() {
1439
- ApplyUpdateInControlPlaneWorkSerializer();
1440
- Unref();
1441
- },
635
+ [this]()
636
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_->chand_->work_serializer_) {
637
+ ApplyUpdateInControlPlaneWorkSerializer();
638
+ Unref();
639
+ },
1442
640
  DEBUG_LOCATION);
1443
641
  }
1444
642
 
@@ -1459,7 +657,8 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1459
657
  grpc_connectivity_state last_seen_state() const { return last_seen_state_; }
1460
658
 
1461
659
  private:
1462
- void ApplyUpdateInControlPlaneWorkSerializer() {
660
+ void ApplyUpdateInControlPlaneWorkSerializer()
661
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_->chand_->work_serializer_) {
1463
662
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1464
663
  gpr_log(GPR_INFO,
1465
664
  "chand=%p: processing connectivity change in work serializer "
@@ -1513,7 +712,8 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1513
712
  };
1514
713
 
1515
714
  void MaybeUpdateConnectedSubchannel(
1516
- RefCountedPtr<ConnectedSubchannel> connected_subchannel) {
715
+ RefCountedPtr<ConnectedSubchannel> connected_subchannel)
716
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::work_serializer_) {
1517
717
  // Update the connected subchannel only if the channel is not shutting
1518
718
  // down. This is because once the channel is shutting down, we
1519
719
  // ignore picker updates from the LB policy, which means that
@@ -1521,7 +721,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1521
721
  // in chand_->pending_subchannel_updates_. So we don't want to add
1522
722
  // entries there that will never be processed, since that would
1523
723
  // leave dangling refs to the channel and prevent its destruction.
1524
- grpc_error* disconnect_error = chand_->disconnect_error();
724
+ grpc_error_handle disconnect_error = chand_->disconnect_error();
1525
725
  if (disconnect_error != GRPC_ERROR_NONE) return;
1526
726
  // Not shutting down, so do the update.
1527
727
  if (connected_subchannel_ != connected_subchannel) {
@@ -1533,7 +733,7 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1533
733
  }
1534
734
  }
1535
735
 
1536
- ChannelData* chand_;
736
+ ClientChannel* chand_;
1537
737
  RefCountedPtr<Subchannel> subchannel_;
1538
738
  absl::optional<std::string> health_check_service_name_;
1539
739
  // Maps from the address of the watcher passed to us by the LB policy
@@ -1543,17 +743,19 @@ class ChannelData::SubchannelWrapper : public SubchannelInterface {
1543
743
  // corresponding WrapperWatcher to cancel on the underlying subchannel.
1544
744
  std::map<ConnectivityStateWatcherInterface*, WatcherWrapper*> watcher_map_;
1545
745
  // To be accessed only in the control plane work_serializer.
1546
- RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
746
+ RefCountedPtr<ConnectedSubchannel> connected_subchannel_
747
+ ABSL_GUARDED_BY(&ClientChannel::work_serializer_);
1547
748
  // To be accessed only in the data plane mutex.
1548
- RefCountedPtr<ConnectedSubchannel> connected_subchannel_in_data_plane_;
749
+ RefCountedPtr<ConnectedSubchannel> connected_subchannel_in_data_plane_
750
+ ABSL_GUARDED_BY(&ClientChannel::data_plane_mu_);
1549
751
  };
1550
752
 
1551
753
  //
1552
- // ChannelData::ExternalConnectivityWatcher
754
+ // ClientChannel::ExternalConnectivityWatcher
1553
755
  //
1554
756
 
1555
- ChannelData::ExternalConnectivityWatcher::ExternalConnectivityWatcher(
1556
- ChannelData* chand, grpc_polling_entity pollent,
757
+ ClientChannel::ExternalConnectivityWatcher::ExternalConnectivityWatcher(
758
+ ClientChannel* chand, grpc_polling_entity pollent,
1557
759
  grpc_connectivity_state* state, grpc_closure* on_complete,
1558
760
  grpc_closure* watcher_timer_init)
1559
761
  : chand_(chand),
@@ -1575,22 +777,22 @@ ChannelData::ExternalConnectivityWatcher::ExternalConnectivityWatcher(
1575
777
  }
1576
778
  // Pass the ref from creating the object to Start().
1577
779
  chand_->work_serializer_->Run(
1578
- [this]() {
780
+ [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
1579
781
  // The ref is passed to AddWatcherLocked().
1580
782
  AddWatcherLocked();
1581
783
  },
1582
784
  DEBUG_LOCATION);
1583
785
  }
1584
786
 
1585
- ChannelData::ExternalConnectivityWatcher::~ExternalConnectivityWatcher() {
787
+ ClientChannel::ExternalConnectivityWatcher::~ExternalConnectivityWatcher() {
1586
788
  grpc_polling_entity_del_from_pollset_set(&pollent_,
1587
789
  chand_->interested_parties_);
1588
790
  GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_,
1589
791
  "ExternalConnectivityWatcher");
1590
792
  }
1591
793
 
1592
- void ChannelData::ExternalConnectivityWatcher::
1593
- RemoveWatcherFromExternalWatchersMap(ChannelData* chand,
794
+ void ClientChannel::ExternalConnectivityWatcher::
795
+ RemoveWatcherFromExternalWatchersMap(ClientChannel* chand,
1594
796
  grpc_closure* on_complete,
1595
797
  bool cancel) {
1596
798
  RefCountedPtr<ExternalConnectivityWatcher> watcher;
@@ -1607,7 +809,7 @@ void ChannelData::ExternalConnectivityWatcher::
1607
809
  if (watcher != nullptr && cancel) watcher->Cancel();
1608
810
  }
1609
811
 
1610
- void ChannelData::ExternalConnectivityWatcher::Notify(
812
+ void ClientChannel::ExternalConnectivityWatcher::Notify(
1611
813
  grpc_connectivity_state state, const absl::Status& /* status */) {
1612
814
  bool done = false;
1613
815
  if (!done_.CompareExchangeStrong(&done, true, MemoryOrder::RELAXED,
@@ -1615,7 +817,8 @@ void ChannelData::ExternalConnectivityWatcher::Notify(
1615
817
  return; // Already done.
1616
818
  }
1617
819
  // Remove external watcher.
1618
- chand_->RemoveExternalConnectivityWatcher(on_complete_, /*cancel=*/false);
820
+ ExternalConnectivityWatcher::RemoveWatcherFromExternalWatchersMap(
821
+ chand_, on_complete_, /*cancel=*/false);
1619
822
  // Report new state to the user.
1620
823
  *state_ = state;
1621
824
  ExecCtx::Run(DEBUG_LOCATION, on_complete_, GRPC_ERROR_NONE);
@@ -1623,12 +826,15 @@ void ChannelData::ExternalConnectivityWatcher::Notify(
1623
826
  // Not needed in state SHUTDOWN, because the tracker will
1624
827
  // automatically remove all watchers in that case.
1625
828
  if (state != GRPC_CHANNEL_SHUTDOWN) {
1626
- chand_->work_serializer_->Run([this]() { RemoveWatcherLocked(); },
1627
- DEBUG_LOCATION);
829
+ chand_->work_serializer_->Run(
830
+ [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
831
+ RemoveWatcherLocked();
832
+ },
833
+ DEBUG_LOCATION);
1628
834
  }
1629
835
  }
1630
836
 
1631
- void ChannelData::ExternalConnectivityWatcher::Cancel() {
837
+ void ClientChannel::ExternalConnectivityWatcher::Cancel() {
1632
838
  bool done = false;
1633
839
  if (!done_.CompareExchangeStrong(&done, true, MemoryOrder::RELAXED,
1634
840
  MemoryOrder::RELAXED)) {
@@ -1636,84 +842,95 @@ void ChannelData::ExternalConnectivityWatcher::Cancel() {
1636
842
  }
1637
843
  ExecCtx::Run(DEBUG_LOCATION, on_complete_, GRPC_ERROR_CANCELLED);
1638
844
  // Hop back into the work_serializer to clean up.
1639
- chand_->work_serializer_->Run([this]() { RemoveWatcherLocked(); },
1640
- DEBUG_LOCATION);
845
+ chand_->work_serializer_->Run(
846
+ [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
847
+ RemoveWatcherLocked();
848
+ },
849
+ DEBUG_LOCATION);
1641
850
  }
1642
851
 
1643
- void ChannelData::ExternalConnectivityWatcher::AddWatcherLocked() {
852
+ void ClientChannel::ExternalConnectivityWatcher::AddWatcherLocked() {
1644
853
  Closure::Run(DEBUG_LOCATION, watcher_timer_init_, GRPC_ERROR_NONE);
1645
854
  // Add new watcher. Pass the ref of the object from creation to OrphanablePtr.
1646
855
  chand_->state_tracker_.AddWatcher(
1647
856
  initial_state_, OrphanablePtr<ConnectivityStateWatcherInterface>(this));
1648
857
  }
1649
858
 
1650
- void ChannelData::ExternalConnectivityWatcher::RemoveWatcherLocked() {
859
+ void ClientChannel::ExternalConnectivityWatcher::RemoveWatcherLocked() {
1651
860
  chand_->state_tracker_.RemoveWatcher(this);
1652
861
  }
1653
862
 
1654
863
  //
1655
- // ChannelData::ConnectivityWatcherAdder
864
+ // ClientChannel::ConnectivityWatcherAdder
1656
865
  //
1657
866
 
1658
- class ChannelData::ConnectivityWatcherAdder {
867
+ class ClientChannel::ConnectivityWatcherAdder {
1659
868
  public:
1660
869
  ConnectivityWatcherAdder(
1661
- ChannelData* chand, grpc_connectivity_state initial_state,
870
+ ClientChannel* chand, grpc_connectivity_state initial_state,
1662
871
  OrphanablePtr<AsyncConnectivityStateWatcherInterface> watcher)
1663
872
  : chand_(chand),
1664
873
  initial_state_(initial_state),
1665
874
  watcher_(std::move(watcher)) {
1666
875
  GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ConnectivityWatcherAdder");
1667
- chand_->work_serializer_->Run([this]() { AddWatcherLocked(); },
1668
- DEBUG_LOCATION);
876
+ chand_->work_serializer_->Run(
877
+ [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
878
+ AddWatcherLocked();
879
+ },
880
+ DEBUG_LOCATION);
1669
881
  }
1670
882
 
1671
883
  private:
1672
- void AddWatcherLocked() {
884
+ void AddWatcherLocked()
885
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
1673
886
  chand_->state_tracker_.AddWatcher(initial_state_, std::move(watcher_));
1674
887
  GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "ConnectivityWatcherAdder");
1675
888
  delete this;
1676
889
  }
1677
890
 
1678
- ChannelData* chand_;
891
+ ClientChannel* chand_;
1679
892
  grpc_connectivity_state initial_state_;
1680
893
  OrphanablePtr<AsyncConnectivityStateWatcherInterface> watcher_;
1681
894
  };
1682
895
 
1683
896
  //
1684
- // ChannelData::ConnectivityWatcherRemover
897
+ // ClientChannel::ConnectivityWatcherRemover
1685
898
  //
1686
899
 
1687
- class ChannelData::ConnectivityWatcherRemover {
900
+ class ClientChannel::ConnectivityWatcherRemover {
1688
901
  public:
1689
- ConnectivityWatcherRemover(ChannelData* chand,
902
+ ConnectivityWatcherRemover(ClientChannel* chand,
1690
903
  AsyncConnectivityStateWatcherInterface* watcher)
1691
904
  : chand_(chand), watcher_(watcher) {
1692
905
  GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ConnectivityWatcherRemover");
1693
- chand_->work_serializer_->Run([this]() { RemoveWatcherLocked(); },
1694
- DEBUG_LOCATION);
906
+ chand_->work_serializer_->Run(
907
+ [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
908
+ RemoveWatcherLocked();
909
+ },
910
+ DEBUG_LOCATION);
1695
911
  }
1696
912
 
1697
913
  private:
1698
- void RemoveWatcherLocked() {
914
+ void RemoveWatcherLocked()
915
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
1699
916
  chand_->state_tracker_.RemoveWatcher(watcher_);
1700
917
  GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_,
1701
918
  "ConnectivityWatcherRemover");
1702
919
  delete this;
1703
920
  }
1704
921
 
1705
- ChannelData* chand_;
922
+ ClientChannel* chand_;
1706
923
  AsyncConnectivityStateWatcherInterface* watcher_;
1707
924
  };
1708
925
 
1709
926
  //
1710
- // ChannelData::ClientChannelControlHelper
927
+ // ClientChannel::ClientChannelControlHelper
1711
928
  //
1712
929
 
1713
- class ChannelData::ClientChannelControlHelper
930
+ class ClientChannel::ClientChannelControlHelper
1714
931
  : public LoadBalancingPolicy::ChannelControlHelper {
1715
932
  public:
1716
- explicit ClientChannelControlHelper(ChannelData* chand) : chand_(chand) {
933
+ explicit ClientChannelControlHelper(ClientChannel* chand) : chand_(chand) {
1717
934
  GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "ClientChannelControlHelper");
1718
935
  }
1719
936
 
@@ -1723,11 +940,12 @@ class ChannelData::ClientChannelControlHelper
1723
940
  }
1724
941
 
1725
942
  RefCountedPtr<SubchannelInterface> CreateSubchannel(
1726
- ServerAddress address, const grpc_channel_args& args) override {
943
+ ServerAddress address, const grpc_channel_args& args) override
944
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
1727
945
  if (chand_->resolver_ == nullptr) return nullptr; // Shutting down.
1728
946
  // Determine health check service name.
1729
- bool inhibit_health_checking = grpc_channel_arg_get_bool(
1730
- grpc_channel_args_find(&args, GRPC_ARG_INHIBIT_HEALTH_CHECKING), false);
947
+ bool inhibit_health_checking = grpc_channel_args_find_bool(
948
+ &args, GRPC_ARG_INHIBIT_HEALTH_CHECKING, false);
1731
949
  absl::optional<std::string> health_check_service_name;
1732
950
  if (!inhibit_health_checking) {
1733
951
  health_check_service_name = chand_->health_check_service_name_;
@@ -1766,9 +984,10 @@ class ChannelData::ClientChannelControlHelper
1766
984
 
1767
985
  void UpdateState(
1768
986
  grpc_connectivity_state state, const absl::Status& status,
1769
- std::unique_ptr<LoadBalancingPolicy::SubchannelPicker> picker) override {
987
+ std::unique_ptr<LoadBalancingPolicy::SubchannelPicker> picker) override
988
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
1770
989
  if (chand_->resolver_ == nullptr) return; // Shutting down.
1771
- grpc_error* disconnect_error = chand_->disconnect_error();
990
+ grpc_error_handle disconnect_error = chand_->disconnect_error();
1772
991
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1773
992
  const char* extra = disconnect_error == GRPC_ERROR_NONE
1774
993
  ? ""
@@ -1784,7 +1003,8 @@ class ChannelData::ClientChannelControlHelper
1784
1003
  }
1785
1004
  }
1786
1005
 
1787
- void RequestReresolution() override {
1006
+ void RequestReresolution() override
1007
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
1788
1008
  if (chand_->resolver_ == nullptr) return; // Shutting down.
1789
1009
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1790
1010
  gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand_);
@@ -1792,8 +1012,8 @@ class ChannelData::ClientChannelControlHelper
1792
1012
  chand_->resolver_->RequestReresolutionLocked();
1793
1013
  }
1794
1014
 
1795
- void AddTraceEvent(TraceSeverity severity,
1796
- absl::string_view message) override {
1015
+ void AddTraceEvent(TraceSeverity severity, absl::string_view message) override
1016
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
1797
1017
  if (chand_->resolver_ == nullptr) return; // Shutting down.
1798
1018
  if (chand_->channelz_node_ != nullptr) {
1799
1019
  chand_->channelz_node_->AddTraceEvent(
@@ -1810,42 +1030,44 @@ class ChannelData::ClientChannelControlHelper
1810
1030
  return channelz::ChannelTrace::Error;
1811
1031
  }
1812
1032
 
1813
- ChannelData* chand_;
1033
+ ClientChannel* chand_;
1814
1034
  };
1815
1035
 
1816
1036
  //
1817
- // ChannelData implementation
1037
+ // ClientChannel implementation
1818
1038
  //
1819
1039
 
1820
- grpc_error* ChannelData::Init(grpc_channel_element* elem,
1821
- grpc_channel_element_args* args) {
1040
+ ClientChannel* ClientChannel::GetFromChannel(grpc_channel* channel) {
1041
+ grpc_channel_element* elem =
1042
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
1043
+ if (elem->filter != &kFilterVtable) return nullptr;
1044
+ return static_cast<ClientChannel*>(elem->channel_data);
1045
+ }
1046
+
1047
+ grpc_error_handle ClientChannel::Init(grpc_channel_element* elem,
1048
+ grpc_channel_element_args* args) {
1822
1049
  GPR_ASSERT(args->is_last);
1823
- GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
1824
- grpc_error* error = GRPC_ERROR_NONE;
1825
- new (elem->channel_data) ChannelData(args, &error);
1050
+ GPR_ASSERT(elem->filter == &kFilterVtable);
1051
+ grpc_error_handle error = GRPC_ERROR_NONE;
1052
+ new (elem->channel_data) ClientChannel(args, &error);
1826
1053
  return error;
1827
1054
  }
1828
1055
 
1829
- void ChannelData::Destroy(grpc_channel_element* elem) {
1830
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
1831
- chand->~ChannelData();
1056
+ void ClientChannel::Destroy(grpc_channel_element* elem) {
1057
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
1058
+ chand->~ClientChannel();
1832
1059
  }
1833
1060
 
1834
- bool GetEnableRetries(const grpc_channel_args* args) {
1835
- return grpc_channel_arg_get_bool(
1836
- grpc_channel_args_find(args, GRPC_ARG_ENABLE_RETRIES), true);
1837
- }
1061
+ namespace {
1838
1062
 
1839
- size_t GetMaxPerRpcRetryBufferSize(const grpc_channel_args* args) {
1840
- return static_cast<size_t>(grpc_channel_arg_get_integer(
1841
- grpc_channel_args_find(args, GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE),
1842
- {DEFAULT_PER_RPC_RETRY_BUFFER_SIZE, 0, INT_MAX}));
1063
+ bool GetEnableRetries(const grpc_channel_args* args) {
1064
+ return grpc_channel_args_find_bool(args, GRPC_ARG_ENABLE_RETRIES, false);
1843
1065
  }
1844
1066
 
1845
1067
  RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
1846
1068
  const grpc_channel_args* args) {
1847
- const bool use_local_subchannel_pool = grpc_channel_arg_get_bool(
1848
- grpc_channel_args_find(args, GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL), false);
1069
+ const bool use_local_subchannel_pool = grpc_channel_args_find_bool(
1070
+ args, GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, false);
1849
1071
  if (use_local_subchannel_pool) {
1850
1072
  return MakeRefCounted<LocalSubchannelPool>();
1851
1073
  }
@@ -1853,26 +1075,23 @@ RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
1853
1075
  }
1854
1076
 
1855
1077
  channelz::ChannelNode* GetChannelzNode(const grpc_channel_args* args) {
1856
- const grpc_arg* arg =
1857
- grpc_channel_args_find(args, GRPC_ARG_CHANNELZ_CHANNEL_NODE);
1858
- if (arg != nullptr && arg->type == GRPC_ARG_POINTER) {
1859
- return static_cast<channelz::ChannelNode*>(arg->value.pointer.p);
1860
- }
1861
- return nullptr;
1078
+ return grpc_channel_args_find_pointer<channelz::ChannelNode>(
1079
+ args, GRPC_ARG_CHANNELZ_CHANNEL_NODE);
1862
1080
  }
1863
1081
 
1864
- ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
1082
+ } // namespace
1083
+
1084
+ ClientChannel::ClientChannel(grpc_channel_element_args* args,
1085
+ grpc_error_handle* error)
1865
1086
  : deadline_checking_enabled_(
1866
1087
  grpc_deadline_checking_enabled(args->channel_args)),
1867
1088
  enable_retries_(GetEnableRetries(args->channel_args)),
1868
- per_rpc_retry_buffer_size_(
1869
- GetMaxPerRpcRetryBufferSize(args->channel_args)),
1870
1089
  owning_stack_(args->channel_stack),
1871
1090
  client_channel_factory_(
1872
1091
  ClientChannelFactory::GetFromChannelArgs(args->channel_args)),
1873
1092
  channelz_node_(GetChannelzNode(args->channel_args)),
1874
- work_serializer_(std::make_shared<WorkSerializer>()),
1875
1093
  interested_parties_(grpc_pollset_set_create()),
1094
+ work_serializer_(std::make_shared<WorkSerializer>()),
1876
1095
  state_tracker_("client_channel", GRPC_CHANNEL_IDLE),
1877
1096
  subchannel_pool_(GetSubchannelPool(args->channel_args)),
1878
1097
  disconnect_error_(GRPC_ERROR_NONE) {
@@ -1889,8 +1108,8 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
1889
1108
  return;
1890
1109
  }
1891
1110
  // Get server name to resolve, using proxy mapper if needed.
1892
- const char* server_uri = grpc_channel_arg_get_string(
1893
- grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI));
1111
+ const char* server_uri =
1112
+ grpc_channel_args_find_string(args->channel_args, GRPC_ARG_SERVER_URI);
1894
1113
  if (server_uri == nullptr) {
1895
1114
  *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1896
1115
  "server URI channel arg missing or wrong type in client channel "
@@ -1899,8 +1118,8 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
1899
1118
  }
1900
1119
  // Get default service config. If none is specified via the client API,
1901
1120
  // we use an empty config.
1902
- const char* service_config_json = grpc_channel_arg_get_string(
1903
- grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVICE_CONFIG));
1121
+ const char* service_config_json = grpc_channel_args_find_string(
1122
+ args->channel_args, GRPC_ARG_SERVICE_CONFIG);
1904
1123
  if (service_config_json == nullptr) service_config_json = "{}";
1905
1124
  *error = GRPC_ERROR_NONE;
1906
1125
  default_service_config_ =
@@ -1937,7 +1156,7 @@ ChannelData::ChannelData(grpc_channel_element_args* args, grpc_error** error)
1937
1156
  *error = GRPC_ERROR_NONE;
1938
1157
  }
1939
1158
 
1940
- ChannelData::~ChannelData() {
1159
+ ClientChannel::~ClientChannel() {
1941
1160
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
1942
1161
  gpr_log(GPR_INFO, "chand=%p: destroying channel", this);
1943
1162
  }
@@ -1950,6 +1169,16 @@ ChannelData::~ChannelData() {
1950
1169
  GRPC_ERROR_UNREF(disconnect_error_.Load(MemoryOrder::RELAXED));
1951
1170
  }
1952
1171
 
1172
+ RefCountedPtr<ClientChannel::LoadBalancedCall>
1173
+ ClientChannel::CreateLoadBalancedCall(
1174
+ const grpc_call_element_args& args, grpc_polling_entity* pollent,
1175
+ grpc_closure* on_call_destruction_complete) {
1176
+ return args.arena->New<LoadBalancedCall>(this, args, pollent,
1177
+ on_call_destruction_complete);
1178
+ }
1179
+
1180
+ namespace {
1181
+
1953
1182
  RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
1954
1183
  const Resolver::Result& resolver_result,
1955
1184
  const internal::ClientChannelGlobalParsedConfig* parsed_service_config) {
@@ -1963,9 +1192,8 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
1963
1192
  if (!parsed_service_config->parsed_deprecated_lb_policy().empty()) {
1964
1193
  policy_name = parsed_service_config->parsed_deprecated_lb_policy().c_str();
1965
1194
  } else {
1966
- const grpc_arg* channel_arg =
1967
- grpc_channel_args_find(resolver_result.args, GRPC_ARG_LB_POLICY_NAME);
1968
- policy_name = grpc_channel_arg_get_string(channel_arg);
1195
+ policy_name = grpc_channel_args_find_string(resolver_result.args,
1196
+ GRPC_ARG_LB_POLICY_NAME);
1969
1197
  }
1970
1198
  // Use pick_first if nothing was specified and we didn't select grpclb
1971
1199
  // above.
@@ -1974,7 +1202,7 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
1974
1202
  Json config_json = Json::Array{Json::Object{
1975
1203
  {policy_name, Json::Object{}},
1976
1204
  }};
1977
- grpc_error* parse_error = GRPC_ERROR_NONE;
1205
+ grpc_error_handle parse_error = GRPC_ERROR_NONE;
1978
1206
  auto lb_policy_config = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(
1979
1207
  config_json, &parse_error);
1980
1208
  // The policy name came from one of three places:
@@ -1994,7 +1222,9 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
1994
1222
  return lb_policy_config;
1995
1223
  }
1996
1224
 
1997
- void ChannelData::OnResolverResultChangedLocked(Resolver::Result result) {
1225
+ } // namespace
1226
+
1227
+ void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
1998
1228
  // Handle race conditions.
1999
1229
  if (resolver_ == nullptr) return;
2000
1230
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
@@ -2017,21 +1247,19 @@ void ChannelData::OnResolverResultChangedLocked(Resolver::Result result) {
2017
1247
  trace_strings.push_back("Address list became non-empty");
2018
1248
  }
2019
1249
  previous_resolution_contained_addresses_ = !result.addresses.empty();
2020
- // The result of grpc_error_string() is owned by the error itself.
2021
- // We're storing that string in trace_strings, so we need to make sure
2022
- // that the error lives until we're done with the string.
2023
- grpc_error* service_config_error =
2024
- GRPC_ERROR_REF(result.service_config_error);
2025
- if (service_config_error != GRPC_ERROR_NONE) {
2026
- trace_strings.push_back(grpc_error_string(service_config_error));
1250
+ std::string service_config_error_string_storage;
1251
+ if (result.service_config_error != GRPC_ERROR_NONE) {
1252
+ service_config_error_string_storage =
1253
+ grpc_error_std_string(result.service_config_error);
1254
+ trace_strings.push_back(service_config_error_string_storage.c_str());
2027
1255
  }
2028
1256
  // Choose the service config.
2029
1257
  RefCountedPtr<ServiceConfig> service_config;
2030
1258
  RefCountedPtr<ConfigSelector> config_selector;
2031
- if (service_config_error != GRPC_ERROR_NONE) {
1259
+ if (result.service_config_error != GRPC_ERROR_NONE) {
2032
1260
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2033
1261
  gpr_log(GPR_INFO, "chand=%p: resolver returned service config error: %s",
2034
- this, grpc_error_string(service_config_error));
1262
+ this, grpc_error_std_string(result.service_config_error).c_str());
2035
1263
  }
2036
1264
  // If the service config was invalid, then fallback to the
2037
1265
  // previously returned service config.
@@ -2048,7 +1276,7 @@ void ChannelData::OnResolverResultChangedLocked(Resolver::Result result) {
2048
1276
  // We received an invalid service config and we don't have a
2049
1277
  // previous service config to fall back to. Put the channel into
2050
1278
  // TRANSIENT_FAILURE.
2051
- OnResolverErrorLocked(GRPC_ERROR_REF(service_config_error));
1279
+ OnResolverErrorLocked(GRPC_ERROR_REF(result.service_config_error));
2052
1280
  trace_strings.push_back("no valid service config");
2053
1281
  }
2054
1282
  } else if (result.service_config == nullptr) {
@@ -2113,24 +1341,24 @@ void ChannelData::OnResolverResultChangedLocked(Resolver::Result result) {
2113
1341
  grpc_slice_from_cpp_string(message));
2114
1342
  }
2115
1343
  }
2116
- GRPC_ERROR_UNREF(service_config_error);
2117
1344
  }
2118
1345
 
2119
- void ChannelData::OnResolverErrorLocked(grpc_error* error) {
1346
+ void ClientChannel::OnResolverErrorLocked(grpc_error_handle error) {
2120
1347
  if (resolver_ == nullptr) {
2121
1348
  GRPC_ERROR_UNREF(error);
2122
1349
  return;
2123
1350
  }
2124
1351
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2125
1352
  gpr_log(GPR_INFO, "chand=%p: resolver transient failure: %s", this,
2126
- grpc_error_string(error));
1353
+ grpc_error_std_string(error).c_str());
2127
1354
  }
2128
1355
  // If we already have an LB policy from a previous resolution
2129
1356
  // result, then we continue to let it set the connectivity state.
2130
1357
  // Otherwise, we go into TRANSIENT_FAILURE.
2131
1358
  if (lb_policy_ == nullptr) {
2132
- grpc_error* state_error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
2133
- "Resolver transient failure", &error, 1);
1359
+ grpc_error_handle state_error =
1360
+ GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1361
+ "Resolver transient failure", &error, 1);
2134
1362
  {
2135
1363
  MutexLock lock(&resolution_mu_);
2136
1364
  // Update resolver transient failure.
@@ -2141,7 +1369,7 @@ void ChannelData::OnResolverErrorLocked(grpc_error* error) {
2141
1369
  call = call->next) {
2142
1370
  grpc_call_element* elem = call->elem;
2143
1371
  CallData* calld = static_cast<CallData*>(elem->call_data);
2144
- grpc_error* error = GRPC_ERROR_NONE;
1372
+ grpc_error_handle error = GRPC_ERROR_NONE;
2145
1373
  if (calld->CheckResolutionLocked(elem, &error)) {
2146
1374
  calld->AsyncResolutionDone(elem, error);
2147
1375
  }
@@ -2157,7 +1385,7 @@ void ChannelData::OnResolverErrorLocked(grpc_error* error) {
2157
1385
  GRPC_ERROR_UNREF(error);
2158
1386
  }
2159
1387
 
2160
- void ChannelData::CreateOrUpdateLbPolicyLocked(
1388
+ void ClientChannel::CreateOrUpdateLbPolicyLocked(
2161
1389
  RefCountedPtr<LoadBalancingPolicy::Config> lb_policy_config,
2162
1390
  Resolver::Result result) {
2163
1391
  // Construct update.
@@ -2183,7 +1411,7 @@ void ChannelData::CreateOrUpdateLbPolicyLocked(
2183
1411
  }
2184
1412
 
2185
1413
  // Creates a new LB policy.
2186
- OrphanablePtr<LoadBalancingPolicy> ChannelData::CreateLbPolicyLocked(
1414
+ OrphanablePtr<LoadBalancingPolicy> ClientChannel::CreateLbPolicyLocked(
2187
1415
  const grpc_channel_args& args) {
2188
1416
  LoadBalancingPolicy::Args lb_policy_args;
2189
1417
  lb_policy_args.work_serializer = work_serializer_;
@@ -2202,8 +1430,8 @@ OrphanablePtr<LoadBalancingPolicy> ChannelData::CreateLbPolicyLocked(
2202
1430
  return lb_policy;
2203
1431
  }
2204
1432
 
2205
- void ChannelData::AddResolverQueuedCall(ResolverQueuedCall* call,
2206
- grpc_polling_entity* pollent) {
1433
+ void ClientChannel::AddResolverQueuedCall(ResolverQueuedCall* call,
1434
+ grpc_polling_entity* pollent) {
2207
1435
  // Add call to queued calls list.
2208
1436
  call->next = resolver_queued_calls_;
2209
1437
  resolver_queued_calls_ = call;
@@ -2212,8 +1440,8 @@ void ChannelData::AddResolverQueuedCall(ResolverQueuedCall* call,
2212
1440
  grpc_polling_entity_add_to_pollset_set(pollent, interested_parties_);
2213
1441
  }
2214
1442
 
2215
- void ChannelData::RemoveResolverQueuedCall(ResolverQueuedCall* to_remove,
2216
- grpc_polling_entity* pollent) {
1443
+ void ClientChannel::RemoveResolverQueuedCall(ResolverQueuedCall* to_remove,
1444
+ grpc_polling_entity* pollent) {
2217
1445
  // Remove call's pollent from channel's interested_parties.
2218
1446
  grpc_polling_entity_del_from_pollset_set(pollent, interested_parties_);
2219
1447
  // Remove from queued calls list.
@@ -2226,7 +1454,7 @@ void ChannelData::RemoveResolverQueuedCall(ResolverQueuedCall* to_remove,
2226
1454
  }
2227
1455
  }
2228
1456
 
2229
- void ChannelData::UpdateServiceConfigInControlPlaneLocked(
1457
+ void ClientChannel::UpdateServiceConfigInControlPlaneLocked(
2230
1458
  RefCountedPtr<ServiceConfig> service_config,
2231
1459
  RefCountedPtr<ConfigSelector> config_selector,
2232
1460
  const internal::ClientChannelGlobalParsedConfig* parsed_service_config,
@@ -2266,7 +1494,7 @@ void ChannelData::UpdateServiceConfigInControlPlaneLocked(
2266
1494
  }
2267
1495
  }
2268
1496
 
2269
- void ChannelData::UpdateServiceConfigInDataPlaneLocked() {
1497
+ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
2270
1498
  // Grab ref to service config.
2271
1499
  RefCountedPtr<ServiceConfig> service_config = saved_service_config_;
2272
1500
  // Grab ref to config selector. Use default if resolver didn't supply one.
@@ -2279,33 +1507,22 @@ void ChannelData::UpdateServiceConfigInDataPlaneLocked() {
2279
1507
  config_selector =
2280
1508
  MakeRefCounted<DefaultConfigSelector>(saved_service_config_);
2281
1509
  }
2282
- // Get retry throttle data from service config.
2283
- const internal::ClientChannelGlobalParsedConfig* parsed_service_config =
2284
- static_cast<const internal::ClientChannelGlobalParsedConfig*>(
2285
- saved_service_config_->GetGlobalParsedConfig(
2286
- internal::ClientChannelServiceConfigParser::ParserIndex()));
2287
- absl::optional<internal::ClientChannelGlobalParsedConfig::RetryThrottling>
2288
- retry_throttle_config = parsed_service_config->retry_throttling();
2289
- RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
2290
- if (retry_throttle_config.has_value()) {
2291
- retry_throttle_data = internal::ServerRetryThrottleMap::GetDataForServer(
2292
- server_name_, retry_throttle_config.value().max_milli_tokens,
2293
- retry_throttle_config.value().milli_token_ratio);
2294
- }
2295
1510
  // Construct dynamic filter stack.
2296
1511
  std::vector<const grpc_channel_filter*> filters =
2297
1512
  config_selector->GetFilters();
2298
- filters.push_back(
2299
- &DynamicTerminationFilterChannelData::kDynamicTerminationFilterVtable);
2300
- absl::InlinedVector<grpc_arg, 2> args_to_add;
2301
- args_to_add.push_back(grpc_channel_arg_pointer_create(
2302
- const_cast<char*>(GRPC_ARG_CLIENT_CHANNEL_DATA), this,
2303
- &kChannelDataArgPointerVtable));
2304
- if (retry_throttle_data != nullptr) {
2305
- args_to_add.push_back(grpc_channel_arg_pointer_create(
2306
- const_cast<char*>(GRPC_ARG_RETRY_THROTTLE_DATA),
2307
- retry_throttle_data.get(), &kRetryThrottleDataArgPointerVtable));
2308
- }
1513
+ if (enable_retries_) {
1514
+ filters.push_back(&kRetryFilterVtable);
1515
+ } else {
1516
+ filters.push_back(&DynamicTerminationFilter::kFilterVtable);
1517
+ }
1518
+ absl::InlinedVector<grpc_arg, 2> args_to_add = {
1519
+ grpc_channel_arg_pointer_create(
1520
+ const_cast<char*>(GRPC_ARG_CLIENT_CHANNEL), this,
1521
+ &kClientChannelArgPointerVtable),
1522
+ grpc_channel_arg_pointer_create(
1523
+ const_cast<char*>(GRPC_ARG_SERVICE_CONFIG_OBJ), service_config.get(),
1524
+ &kServiceConfigObjArgPointerVtable),
1525
+ };
2309
1526
  grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
2310
1527
  channel_args_, args_to_add.data(), args_to_add.size());
2311
1528
  new_args = config_selector->ModifyChannelArgs(new_args);
@@ -2333,7 +1550,7 @@ void ChannelData::UpdateServiceConfigInDataPlaneLocked() {
2333
1550
  call = call->next) {
2334
1551
  grpc_call_element* elem = call->elem;
2335
1552
  CallData* calld = static_cast<CallData*>(elem->call_data);
2336
- grpc_error* error = GRPC_ERROR_NONE;
1553
+ grpc_error_handle error = GRPC_ERROR_NONE;
2337
1554
  if (calld->CheckResolutionLocked(elem, &error)) {
2338
1555
  calld->AsyncResolutionDone(elem, error);
2339
1556
  }
@@ -2343,7 +1560,7 @@ void ChannelData::UpdateServiceConfigInDataPlaneLocked() {
2343
1560
  // of scope.
2344
1561
  }
2345
1562
 
2346
- void ChannelData::CreateResolverLocked() {
1563
+ void ClientChannel::CreateResolverLocked() {
2347
1564
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2348
1565
  gpr_log(GPR_INFO, "chand=%p: starting name resolution", this);
2349
1566
  }
@@ -2362,7 +1579,7 @@ void ChannelData::CreateResolverLocked() {
2362
1579
  }
2363
1580
  }
2364
1581
 
2365
- void ChannelData::DestroyResolverAndLbPolicyLocked() {
1582
+ void ClientChannel::DestroyResolverAndLbPolicyLocked() {
2366
1583
  if (resolver_ != nullptr) {
2367
1584
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2368
1585
  gpr_log(GPR_INFO, "chand=%p: shutting down resolver=%p", this,
@@ -2381,7 +1598,7 @@ void ChannelData::DestroyResolverAndLbPolicyLocked() {
2381
1598
  }
2382
1599
  }
2383
1600
 
2384
- void ChannelData::UpdateStateAndPickerLocked(
1601
+ void ClientChannel::UpdateStateAndPickerLocked(
2385
1602
  grpc_connectivity_state state, const absl::Status& status,
2386
1603
  const char* reason,
2387
1604
  std::unique_ptr<LoadBalancingPolicy::SubchannelPicker> picker) {
@@ -2443,7 +1660,7 @@ void ChannelData::UpdateStateAndPickerLocked(
2443
1660
  // Re-process queued picks.
2444
1661
  for (LbQueuedCall* call = lb_queued_calls_; call != nullptr;
2445
1662
  call = call->next) {
2446
- grpc_error* error = GRPC_ERROR_NONE;
1663
+ grpc_error_handle error = GRPC_ERROR_NONE;
2447
1664
  if (call->lb_call->PickSubchannelLocked(&error)) {
2448
1665
  call->lb_call->AsyncPickDone(error);
2449
1666
  }
@@ -2454,7 +1671,7 @@ void ChannelData::UpdateStateAndPickerLocked(
2454
1671
  pending_subchannel_updates_.clear();
2455
1672
  }
2456
1673
 
2457
- grpc_error* ChannelData::DoPingLocked(grpc_transport_op* op) {
1674
+ grpc_error_handle ClientChannel::DoPingLocked(grpc_transport_op* op) {
2458
1675
  if (state_tracker_.state() != GRPC_CHANNEL_READY) {
2459
1676
  return GRPC_ERROR_CREATE_FROM_STATIC_STRING("channel not connected");
2460
1677
  }
@@ -2480,7 +1697,7 @@ grpc_error* ChannelData::DoPingLocked(grpc_transport_op* op) {
2480
1697
  return result.error;
2481
1698
  }
2482
1699
 
2483
- void ChannelData::StartTransportOpLocked(grpc_transport_op* op) {
1700
+ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
2484
1701
  // Connectivity watch.
2485
1702
  if (op->start_connectivity_watch != nullptr) {
2486
1703
  state_tracker_.AddWatcher(op->start_connectivity_watch_state,
@@ -2491,7 +1708,7 @@ void ChannelData::StartTransportOpLocked(grpc_transport_op* op) {
2491
1708
  }
2492
1709
  // Ping.
2493
1710
  if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
2494
- grpc_error* error = DoPingLocked(op);
1711
+ grpc_error_handle error = DoPingLocked(op);
2495
1712
  if (error != GRPC_ERROR_NONE) {
2496
1713
  ExecCtx::Run(DEBUG_LOCATION, op->send_ping.on_initiate,
2497
1714
  GRPC_ERROR_REF(error));
@@ -2511,7 +1728,7 @@ void ChannelData::StartTransportOpLocked(grpc_transport_op* op) {
2511
1728
  if (op->disconnect_with_error != GRPC_ERROR_NONE) {
2512
1729
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
2513
1730
  gpr_log(GPR_INFO, "chand=%p: disconnect_with_error: %s", this,
2514
- grpc_error_string(op->disconnect_with_error));
1731
+ grpc_error_std_string(op->disconnect_with_error).c_str());
2515
1732
  }
2516
1733
  DestroyResolverAndLbPolicyLocked();
2517
1734
  intptr_t value;
@@ -2539,9 +1756,9 @@ void ChannelData::StartTransportOpLocked(grpc_transport_op* op) {
2539
1756
  ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, GRPC_ERROR_NONE);
2540
1757
  }
2541
1758
 
2542
- void ChannelData::StartTransportOp(grpc_channel_element* elem,
2543
- grpc_transport_op* op) {
2544
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
1759
+ void ClientChannel::StartTransportOp(grpc_channel_element* elem,
1760
+ grpc_transport_op* op) {
1761
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
2545
1762
  GPR_ASSERT(op->set_accept_stream == false);
2546
1763
  // Handle bind_pollset.
2547
1764
  if (op->bind_pollset != nullptr) {
@@ -2550,12 +1767,15 @@ void ChannelData::StartTransportOp(grpc_channel_element* elem,
2550
1767
  // Pop into control plane work_serializer for remaining ops.
2551
1768
  GRPC_CHANNEL_STACK_REF(chand->owning_stack_, "start_transport_op");
2552
1769
  chand->work_serializer_->Run(
2553
- [chand, op]() { chand->StartTransportOpLocked(op); }, DEBUG_LOCATION);
1770
+ [chand, op]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand->work_serializer_) {
1771
+ chand->StartTransportOpLocked(op);
1772
+ },
1773
+ DEBUG_LOCATION);
2554
1774
  }
2555
1775
 
2556
- void ChannelData::GetChannelInfo(grpc_channel_element* elem,
2557
- const grpc_channel_info* info) {
2558
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
1776
+ void ClientChannel::GetChannelInfo(grpc_channel_element* elem,
1777
+ const grpc_channel_info* info) {
1778
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
2559
1779
  MutexLock lock(&chand->info_mu_);
2560
1780
  if (info->lb_policy_name != nullptr) {
2561
1781
  *info->lb_policy_name = gpr_strdup(chand->info_lb_policy_name_.get());
@@ -2566,8 +1786,8 @@ void ChannelData::GetChannelInfo(grpc_channel_element* elem,
2566
1786
  }
2567
1787
  }
2568
1788
 
2569
- void ChannelData::AddLbQueuedCall(LbQueuedCall* call,
2570
- grpc_polling_entity* pollent) {
1789
+ void ClientChannel::AddLbQueuedCall(LbQueuedCall* call,
1790
+ grpc_polling_entity* pollent) {
2571
1791
  // Add call to queued picks list.
2572
1792
  call->next = lb_queued_calls_;
2573
1793
  lb_queued_calls_ = call;
@@ -2576,8 +1796,8 @@ void ChannelData::AddLbQueuedCall(LbQueuedCall* call,
2576
1796
  grpc_polling_entity_add_to_pollset_set(pollent, interested_parties_);
2577
1797
  }
2578
1798
 
2579
- void ChannelData::RemoveLbQueuedCall(LbQueuedCall* to_remove,
2580
- grpc_polling_entity* pollent) {
1799
+ void ClientChannel::RemoveLbQueuedCall(LbQueuedCall* to_remove,
1800
+ grpc_polling_entity* pollent) {
2581
1801
  // Remove call's pollent from channel's interested_parties.
2582
1802
  grpc_polling_entity_del_from_pollset_set(pollent, interested_parties_);
2583
1803
  // Remove from queued picks list.
@@ -2591,7 +1811,7 @@ void ChannelData::RemoveLbQueuedCall(LbQueuedCall* to_remove,
2591
1811
  }
2592
1812
 
2593
1813
  RefCountedPtr<ConnectedSubchannel>
2594
- ChannelData::GetConnectedSubchannelInDataPlane(
1814
+ ClientChannel::GetConnectedSubchannelInDataPlane(
2595
1815
  SubchannelInterface* subchannel) const {
2596
1816
  SubchannelWrapper* subchannel_wrapper =
2597
1817
  static_cast<SubchannelWrapper*>(subchannel);
@@ -2601,7 +1821,7 @@ ChannelData::GetConnectedSubchannelInDataPlane(
2601
1821
  return connected_subchannel->Ref();
2602
1822
  }
2603
1823
 
2604
- void ChannelData::TryToConnectLocked() {
1824
+ void ClientChannel::TryToConnectLocked() {
2605
1825
  if (lb_policy_ != nullptr) {
2606
1826
  lb_policy_->ExitIdleLocked();
2607
1827
  } else if (resolver_ == nullptr) {
@@ -2610,23 +1830,29 @@ void ChannelData::TryToConnectLocked() {
2610
1830
  GRPC_CHANNEL_STACK_UNREF(owning_stack_, "TryToConnect");
2611
1831
  }
2612
1832
 
2613
- grpc_connectivity_state ChannelData::CheckConnectivityState(
1833
+ grpc_connectivity_state ClientChannel::CheckConnectivityState(
2614
1834
  bool try_to_connect) {
2615
- grpc_connectivity_state out = state_tracker_.state();
1835
+ // state_tracker_ is guarded by work_serializer_, which we're not
1836
+ // holding here. But the one method of state_tracker_ that *is*
1837
+ // thread-safe to call without external synchronization is the state()
1838
+ // method, so we can disable thread-safety analysis for this one read.
1839
+ grpc_connectivity_state out = ABSL_TS_UNCHECKED_READ(state_tracker_).state();
2616
1840
  if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
2617
1841
  GRPC_CHANNEL_STACK_REF(owning_stack_, "TryToConnect");
2618
- work_serializer_->Run([this]() { TryToConnectLocked(); }, DEBUG_LOCATION);
1842
+ work_serializer_->Run([this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
1843
+ work_serializer_) { TryToConnectLocked(); },
1844
+ DEBUG_LOCATION);
2619
1845
  }
2620
1846
  return out;
2621
1847
  }
2622
1848
 
2623
- void ChannelData::AddConnectivityWatcher(
1849
+ void ClientChannel::AddConnectivityWatcher(
2624
1850
  grpc_connectivity_state initial_state,
2625
1851
  OrphanablePtr<AsyncConnectivityStateWatcherInterface> watcher) {
2626
1852
  new ConnectivityWatcherAdder(this, initial_state, std::move(watcher));
2627
1853
  }
2628
1854
 
2629
- void ChannelData::RemoveConnectivityWatcher(
1855
+ void ClientChannel::RemoveConnectivityWatcher(
2630
1856
  AsyncConnectivityStateWatcherInterface* watcher) {
2631
1857
  new ConnectivityWatcherRemover(this, watcher);
2632
1858
  }
@@ -2635,9 +1861,9 @@ void ChannelData::RemoveConnectivityWatcher(
2635
1861
  // CallData implementation
2636
1862
  //
2637
1863
 
2638
- ChannelData::CallData::CallData(grpc_call_element* elem,
2639
- const ChannelData& chand,
2640
- const grpc_call_element_args& args)
1864
+ ClientChannel::CallData::CallData(grpc_call_element* elem,
1865
+ const ClientChannel& chand,
1866
+ const grpc_call_element_args& args)
2641
1867
  : deadline_state_(elem, args,
2642
1868
  GPR_LIKELY(chand.deadline_checking_enabled_)
2643
1869
  ? args.deadline
@@ -2654,7 +1880,7 @@ ChannelData::CallData::CallData(grpc_call_element* elem,
2654
1880
  }
2655
1881
  }
2656
1882
 
2657
- ChannelData::CallData::~CallData() {
1883
+ ClientChannel::CallData::~CallData() {
2658
1884
  grpc_slice_unref_internal(path_);
2659
1885
  GRPC_ERROR_UNREF(cancel_error_);
2660
1886
  // Make sure there are no remaining pending batches.
@@ -2663,16 +1889,16 @@ ChannelData::CallData::~CallData() {
2663
1889
  }
2664
1890
  }
2665
1891
 
2666
- grpc_error* ChannelData::CallData::Init(grpc_call_element* elem,
2667
- const grpc_call_element_args* args) {
2668
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
1892
+ grpc_error_handle ClientChannel::CallData::Init(
1893
+ grpc_call_element* elem, const grpc_call_element_args* args) {
1894
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
2669
1895
  new (elem->call_data) CallData(elem, *chand, *args);
2670
1896
  return GRPC_ERROR_NONE;
2671
1897
  }
2672
1898
 
2673
- void ChannelData::CallData::Destroy(grpc_call_element* elem,
2674
- const grpc_call_final_info* /*final_info*/,
2675
- grpc_closure* then_schedule_closure) {
1899
+ void ClientChannel::CallData::Destroy(
1900
+ grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
1901
+ grpc_closure* then_schedule_closure) {
2676
1902
  CallData* calld = static_cast<CallData*>(elem->call_data);
2677
1903
  RefCountedPtr<DynamicFilters::Call> dynamic_call =
2678
1904
  std::move(calld->dynamic_call_);
@@ -2685,11 +1911,11 @@ void ChannelData::CallData::Destroy(grpc_call_element* elem,
2685
1911
  }
2686
1912
  }
2687
1913
 
2688
- void ChannelData::CallData::StartTransportStreamOpBatch(
1914
+ void ClientChannel::CallData::StartTransportStreamOpBatch(
2689
1915
  grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
2690
1916
  GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
2691
1917
  CallData* calld = static_cast<CallData*>(elem->call_data);
2692
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
1918
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
2693
1919
  if (GPR_LIKELY(chand->deadline_checking_enabled_)) {
2694
1920
  grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
2695
1921
  }
@@ -2701,7 +1927,8 @@ void ChannelData::CallData::StartTransportStreamOpBatch(
2701
1927
  if (GPR_UNLIKELY(calld->cancel_error_ != GRPC_ERROR_NONE)) {
2702
1928
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
2703
1929
  gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
2704
- chand, calld, grpc_error_string(calld->cancel_error_));
1930
+ chand, calld,
1931
+ grpc_error_std_string(calld->cancel_error_).c_str());
2705
1932
  }
2706
1933
  // Note: This will release the call combiner.
2707
1934
  grpc_transport_stream_op_batch_finish_with_failure(
@@ -2720,7 +1947,7 @@ void ChannelData::CallData::StartTransportStreamOpBatch(
2720
1947
  GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
2721
1948
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
2722
1949
  gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
2723
- calld, grpc_error_string(calld->cancel_error_));
1950
+ calld, grpc_error_std_string(calld->cancel_error_).c_str());
2724
1951
  }
2725
1952
  // If we do not have a dynamic call (i.e., name resolution has not
2726
1953
  // yet completed), fail all pending batches. Otherwise, send the
@@ -2774,8 +2001,8 @@ void ChannelData::CallData::StartTransportStreamOpBatch(
2774
2001
  }
2775
2002
  }
2776
2003
 
2777
- void ChannelData::CallData::SetPollent(grpc_call_element* elem,
2778
- grpc_polling_entity* pollent) {
2004
+ void ClientChannel::CallData::SetPollent(grpc_call_element* elem,
2005
+ grpc_polling_entity* pollent) {
2779
2006
  CallData* calld = static_cast<CallData*>(elem->call_data);
2780
2007
  calld->pollent_ = pollent;
2781
2008
  }
@@ -2784,10 +2011,11 @@ void ChannelData::CallData::SetPollent(grpc_call_element* elem,
2784
2011
  // pending_batches management
2785
2012
  //
2786
2013
 
2787
- size_t ChannelData::CallData::GetBatchIndex(
2014
+ size_t ClientChannel::CallData::GetBatchIndex(
2788
2015
  grpc_transport_stream_op_batch* batch) {
2789
2016
  // Note: It is important the send_initial_metadata be the first entry
2790
- // here, since the code in pick_subchannel_locked() assumes it will be.
2017
+ // here, since the code in ApplyServiceConfigToCallLocked() and
2018
+ // CheckResolutionLocked() assumes it will be.
2791
2019
  if (batch->send_initial_metadata) return 0;
2792
2020
  if (batch->send_message) return 1;
2793
2021
  if (batch->send_trailing_metadata) return 2;
@@ -2798,9 +2026,9 @@ size_t ChannelData::CallData::GetBatchIndex(
2798
2026
  }
2799
2027
 
2800
2028
  // This is called via the call combiner, so access to calld is synchronized.
2801
- void ChannelData::CallData::PendingBatchesAdd(
2029
+ void ClientChannel::CallData::PendingBatchesAdd(
2802
2030
  grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
2803
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2031
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
2804
2032
  const size_t idx = GetBatchIndex(batch);
2805
2033
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
2806
2034
  gpr_log(GPR_INFO,
@@ -2813,8 +2041,8 @@ void ChannelData::CallData::PendingBatchesAdd(
2813
2041
  }
2814
2042
 
2815
2043
  // This is called via the call combiner, so access to calld is synchronized.
2816
- void ChannelData::CallData::FailPendingBatchInCallCombiner(void* arg,
2817
- grpc_error* error) {
2044
+ void ClientChannel::CallData::FailPendingBatchInCallCombiner(
2045
+ void* arg, grpc_error_handle error) {
2818
2046
  grpc_transport_stream_op_batch* batch =
2819
2047
  static_cast<grpc_transport_stream_op_batch*>(arg);
2820
2048
  CallData* calld = static_cast<CallData*>(batch->handler_private.extra_arg);
@@ -2824,8 +2052,8 @@ void ChannelData::CallData::FailPendingBatchInCallCombiner(void* arg,
2824
2052
  }
2825
2053
 
2826
2054
  // This is called via the call combiner, so access to calld is synchronized.
2827
- void ChannelData::CallData::PendingBatchesFail(
2828
- grpc_call_element* elem, grpc_error* error,
2055
+ void ClientChannel::CallData::PendingBatchesFail(
2056
+ grpc_call_element* elem, grpc_error_handle error,
2829
2057
  YieldCallCombinerPredicate yield_call_combiner_predicate) {
2830
2058
  GPR_ASSERT(error != GRPC_ERROR_NONE);
2831
2059
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@@ -2835,7 +2063,8 @@ void ChannelData::CallData::PendingBatchesFail(
2835
2063
  }
2836
2064
  gpr_log(GPR_INFO,
2837
2065
  "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
2838
- elem->channel_data, this, num_batches, grpc_error_string(error));
2066
+ elem->channel_data, this, num_batches,
2067
+ grpc_error_std_string(error).c_str());
2839
2068
  }
2840
2069
  CallCombinerClosureList closures;
2841
2070
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@@ -2859,8 +2088,8 @@ void ChannelData::CallData::PendingBatchesFail(
2859
2088
  }
2860
2089
 
2861
2090
  // This is called via the call combiner, so access to calld is synchronized.
2862
- void ChannelData::CallData::ResumePendingBatchInCallCombiner(
2863
- void* arg, grpc_error* /*ignored*/) {
2091
+ void ClientChannel::CallData::ResumePendingBatchInCallCombiner(
2092
+ void* arg, grpc_error_handle /*ignored*/) {
2864
2093
  grpc_transport_stream_op_batch* batch =
2865
2094
  static_cast<grpc_transport_stream_op_batch*>(arg);
2866
2095
  auto* elem =
@@ -2871,8 +2100,8 @@ void ChannelData::CallData::ResumePendingBatchInCallCombiner(
2871
2100
  }
2872
2101
 
2873
2102
  // This is called via the call combiner, so access to calld is synchronized.
2874
- void ChannelData::CallData::PendingBatchesResume(grpc_call_element* elem) {
2875
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2103
+ void ClientChannel::CallData::PendingBatchesResume(grpc_call_element* elem) {
2104
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
2876
2105
  // Retries not enabled; send down batches as-is.
2877
2106
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
2878
2107
  size_t num_batches = 0;
@@ -2892,7 +2121,7 @@ void ChannelData::CallData::PendingBatchesResume(grpc_call_element* elem) {
2892
2121
  GRPC_CLOSURE_INIT(&batch->handler_private.closure,
2893
2122
  ResumePendingBatchInCallCombiner, batch, nullptr);
2894
2123
  closures.Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
2895
- "PendingBatchesResume");
2124
+ "resuming pending batch from client channel call");
2896
2125
  batch = nullptr;
2897
2126
  }
2898
2127
  }
@@ -2906,7 +2135,7 @@ void ChannelData::CallData::PendingBatchesResume(grpc_call_element* elem) {
2906
2135
 
2907
2136
  // A class to handle the call combiner cancellation callback for a
2908
2137
  // queued pick.
2909
- class ChannelData::CallData::ResolverQueuedCallCanceller {
2138
+ class ClientChannel::CallData::ResolverQueuedCallCanceller {
2910
2139
  public:
2911
2140
  explicit ResolverQueuedCallCanceller(grpc_call_element* elem) : elem_(elem) {
2912
2141
  auto* calld = static_cast<CallData*>(elem->call_data);
@@ -2917,9 +2146,9 @@ class ChannelData::CallData::ResolverQueuedCallCanceller {
2917
2146
  }
2918
2147
 
2919
2148
  private:
2920
- static void CancelLocked(void* arg, grpc_error* error) {
2149
+ static void CancelLocked(void* arg, grpc_error_handle error) {
2921
2150
  auto* self = static_cast<ResolverQueuedCallCanceller*>(arg);
2922
- auto* chand = static_cast<ChannelData*>(self->elem_->channel_data);
2151
+ auto* chand = static_cast<ClientChannel*>(self->elem_->channel_data);
2923
2152
  auto* calld = static_cast<CallData*>(self->elem_->call_data);
2924
2153
  {
2925
2154
  MutexLock lock(&chand->resolution_mu_);
@@ -2927,7 +2156,7 @@ class ChannelData::CallData::ResolverQueuedCallCanceller {
2927
2156
  gpr_log(GPR_INFO,
2928
2157
  "chand=%p calld=%p: cancelling resolver queued pick: "
2929
2158
  "error=%s self=%p calld->resolver_pick_canceller=%p",
2930
- chand, calld, grpc_error_string(error), self,
2159
+ chand, calld, grpc_error_std_string(error).c_str(), self,
2931
2160
  calld->resolver_call_canceller_);
2932
2161
  }
2933
2162
  if (calld->resolver_call_canceller_ == self && error != GRPC_ERROR_NONE) {
@@ -2946,10 +2175,10 @@ class ChannelData::CallData::ResolverQueuedCallCanceller {
2946
2175
  grpc_closure closure_;
2947
2176
  };
2948
2177
 
2949
- void ChannelData::CallData::MaybeRemoveCallFromResolverQueuedCallsLocked(
2178
+ void ClientChannel::CallData::MaybeRemoveCallFromResolverQueuedCallsLocked(
2950
2179
  grpc_call_element* elem) {
2951
2180
  if (!queued_pending_resolver_result_) return;
2952
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
2181
+ auto* chand = static_cast<ClientChannel*>(elem->channel_data);
2953
2182
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2954
2183
  gpr_log(GPR_INFO,
2955
2184
  "chand=%p calld=%p: removing from resolver queued picks list",
@@ -2961,10 +2190,10 @@ void ChannelData::CallData::MaybeRemoveCallFromResolverQueuedCallsLocked(
2961
2190
  resolver_call_canceller_ = nullptr;
2962
2191
  }
2963
2192
 
2964
- void ChannelData::CallData::MaybeAddCallToResolverQueuedCallsLocked(
2193
+ void ClientChannel::CallData::MaybeAddCallToResolverQueuedCallsLocked(
2965
2194
  grpc_call_element* elem) {
2966
2195
  if (queued_pending_resolver_result_) return;
2967
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
2196
+ auto* chand = static_cast<ClientChannel*>(elem->channel_data);
2968
2197
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2969
2198
  gpr_log(GPR_INFO, "chand=%p calld=%p: adding to resolver queued picks list",
2970
2199
  chand, this);
@@ -2976,9 +2205,9 @@ void ChannelData::CallData::MaybeAddCallToResolverQueuedCallsLocked(
2976
2205
  resolver_call_canceller_ = new ResolverQueuedCallCanceller(elem);
2977
2206
  }
2978
2207
 
2979
- grpc_error* ChannelData::CallData::ApplyServiceConfigToCallLocked(
2208
+ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
2980
2209
  grpc_call_element* elem, grpc_metadata_batch* initial_metadata) {
2981
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2210
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
2982
2211
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2983
2212
  gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
2984
2213
  chand, this);
@@ -3035,9 +2264,9 @@ grpc_error* ChannelData::CallData::ApplyServiceConfigToCallLocked(
3035
2264
  return GRPC_ERROR_NONE;
3036
2265
  }
3037
2266
 
3038
- void ChannelData::CallData::
3039
- RecvInitialMetadataReadyForConfigSelectorCommitCallback(void* arg,
3040
- grpc_error* error) {
2267
+ void ClientChannel::CallData::
2268
+ RecvInitialMetadataReadyForConfigSelectorCommitCallback(
2269
+ void* arg, grpc_error_handle error) {
3041
2270
  auto* self = static_cast<CallData*>(arg);
3042
2271
  if (self->on_call_committed_ != nullptr) {
3043
2272
  self->on_call_committed_();
@@ -3050,7 +2279,7 @@ void ChannelData::CallData::
3050
2279
 
3051
2280
  // TODO(roth): Consider not intercepting this callback unless we
3052
2281
  // actually need to, if this causes a performance problem.
3053
- void ChannelData::CallData::
2282
+ void ClientChannel::CallData::
3054
2283
  InjectRecvInitialMetadataReadyForConfigSelectorCommitCallback(
3055
2284
  grpc_transport_stream_op_batch* batch) {
3056
2285
  original_recv_initial_metadata_ready_ =
@@ -3062,21 +2291,22 @@ void ChannelData::CallData::
3062
2291
  &recv_initial_metadata_ready_;
3063
2292
  }
3064
2293
 
3065
- void ChannelData::CallData::AsyncResolutionDone(grpc_call_element* elem,
3066
- grpc_error* error) {
2294
+ void ClientChannel::CallData::AsyncResolutionDone(grpc_call_element* elem,
2295
+ grpc_error_handle error) {
3067
2296
  GRPC_CLOSURE_INIT(&pick_closure_, ResolutionDone, elem, nullptr);
3068
2297
  ExecCtx::Run(DEBUG_LOCATION, &pick_closure_, error);
3069
2298
  }
3070
2299
 
3071
- void ChannelData::CallData::ResolutionDone(void* arg, grpc_error* error) {
2300
+ void ClientChannel::CallData::ResolutionDone(void* arg,
2301
+ grpc_error_handle error) {
3072
2302
  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
3073
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2303
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
3074
2304
  CallData* calld = static_cast<CallData*>(elem->call_data);
3075
2305
  if (error != GRPC_ERROR_NONE) {
3076
2306
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3077
2307
  gpr_log(GPR_INFO,
3078
2308
  "chand=%p calld=%p: error applying config to call: error=%s",
3079
- chand, calld, grpc_error_string(error));
2309
+ chand, calld, grpc_error_std_string(error).c_str());
3080
2310
  }
3081
2311
  calld->PendingBatchesFail(elem, GRPC_ERROR_REF(error), YieldCallCombiner);
3082
2312
  return;
@@ -3084,10 +2314,11 @@ void ChannelData::CallData::ResolutionDone(void* arg, grpc_error* error) {
3084
2314
  calld->CreateDynamicCall(elem);
3085
2315
  }
3086
2316
 
3087
- void ChannelData::CallData::CheckResolution(void* arg, grpc_error* error) {
2317
+ void ClientChannel::CallData::CheckResolution(void* arg,
2318
+ grpc_error_handle error) {
3088
2319
  grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
3089
2320
  CallData* calld = static_cast<CallData*>(elem->call_data);
3090
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2321
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
3091
2322
  bool resolution_complete;
3092
2323
  {
3093
2324
  MutexLock lock(&chand->resolution_mu_);
@@ -3099,9 +2330,9 @@ void ChannelData::CallData::CheckResolution(void* arg, grpc_error* error) {
3099
2330
  }
3100
2331
  }
3101
2332
 
3102
- bool ChannelData::CallData::CheckResolutionLocked(grpc_call_element* elem,
3103
- grpc_error** error) {
3104
- ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
2333
+ bool ClientChannel::CallData::CheckResolutionLocked(grpc_call_element* elem,
2334
+ grpc_error_handle* error) {
2335
+ ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
3105
2336
  // If we're still in IDLE, we need to start resolving.
3106
2337
  if (GPR_UNLIKELY(chand->CheckConnectivityState(false) == GRPC_CHANNEL_IDLE)) {
3107
2338
  // Bounce into the control plane work serializer to start resolving,
@@ -3112,14 +2343,15 @@ bool ChannelData::CallData::CheckResolutionLocked(grpc_call_element* elem,
3112
2343
  ExecCtx::Run(
3113
2344
  DEBUG_LOCATION,
3114
2345
  GRPC_CLOSURE_CREATE(
3115
- [](void* arg, grpc_error* /*error*/) {
3116
- auto* chand = static_cast<ChannelData*>(arg);
2346
+ [](void* arg, grpc_error_handle /*error*/) {
2347
+ auto* chand = static_cast<ClientChannel*>(arg);
3117
2348
  chand->work_serializer_->Run(
3118
- [chand]() {
3119
- chand->CheckConnectivityState(/*try_to_connect=*/true);
3120
- GRPC_CHANNEL_STACK_UNREF(chand->owning_stack_,
3121
- "CheckResolutionLocked");
3122
- },
2349
+ [chand]()
2350
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand->work_serializer_) {
2351
+ chand->CheckConnectivityState(/*try_to_connect=*/true);
2352
+ GRPC_CHANNEL_STACK_UNREF(chand->owning_stack_,
2353
+ "CheckResolutionLocked");
2354
+ },
3123
2355
  DEBUG_LOCATION);
3124
2356
  },
3125
2357
  chand, nullptr),
@@ -3137,7 +2369,7 @@ bool ChannelData::CallData::CheckResolutionLocked(grpc_call_element* elem,
3137
2369
  if (GPR_UNLIKELY(!chand->received_service_config_data_)) {
3138
2370
  // If the resolver returned transient failure before returning the
3139
2371
  // first service config, fail any non-wait_for_ready calls.
3140
- grpc_error* resolver_error = chand->resolver_transient_failure_error_;
2372
+ grpc_error_handle resolver_error = chand->resolver_transient_failure_error_;
3141
2373
  if (resolver_error != GRPC_ERROR_NONE &&
3142
2374
  (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) ==
3143
2375
  0) {
@@ -3160,8 +2392,8 @@ bool ChannelData::CallData::CheckResolutionLocked(grpc_call_element* elem,
3160
2392
  return true;
3161
2393
  }
3162
2394
 
3163
- void ChannelData::CallData::CreateDynamicCall(grpc_call_element* elem) {
3164
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
2395
+ void ClientChannel::CallData::CreateDynamicCall(grpc_call_element* elem) {
2396
+ auto* chand = static_cast<ClientChannel*>(elem->channel_data);
3165
2397
  DynamicFilters::Call::Args args = {std::move(dynamic_filters_),
3166
2398
  pollent_,
3167
2399
  path_,
@@ -3170,7 +2402,7 @@ void ChannelData::CallData::CreateDynamicCall(grpc_call_element* elem) {
3170
2402
  arena_,
3171
2403
  call_context_,
3172
2404
  call_combiner_};
3173
- grpc_error* error = GRPC_ERROR_NONE;
2405
+ grpc_error_handle error = GRPC_ERROR_NONE;
3174
2406
  DynamicFilters* channel_stack = args.channel_stack.get();
3175
2407
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3176
2408
  gpr_log(
@@ -3183,7 +2415,7 @@ void ChannelData::CallData::CreateDynamicCall(grpc_call_element* elem) {
3183
2415
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3184
2416
  gpr_log(GPR_INFO,
3185
2417
  "chand=%p calld=%p: failed to create dynamic call: error=%s",
3186
- chand, this, grpc_error_string(error));
2418
+ chand, this, grpc_error_std_string(error).c_str());
3187
2419
  }
3188
2420
  PendingBatchesFail(elem, error, YieldCallCombiner);
3189
2421
  return;
@@ -3192,264 +2424,151 @@ void ChannelData::CallData::CreateDynamicCall(grpc_call_element* elem) {
3192
2424
  }
3193
2425
 
3194
2426
  //
3195
- // RetryingCall implementation
2427
+ // ClientChannel::LoadBalancedCall::Metadata
3196
2428
  //
3197
2429
 
3198
- // Retry support:
3199
- //
3200
- // In order to support retries, we act as a proxy for stream op batches.
3201
- // When we get a batch from the surface, we add it to our list of pending
3202
- // batches, and we then use those batches to construct separate "child"
3203
- // batches to be started on the subchannel call. When the child batches
3204
- // return, we then decide which pending batches have been completed and
3205
- // schedule their callbacks accordingly. If a subchannel call fails and
3206
- // we want to retry it, we do a new pick and start again, constructing
3207
- // new "child" batches for the new subchannel call.
3208
- //
3209
- // Note that retries are committed when receiving data from the server
3210
- // (except for Trailers-Only responses). However, there may be many
3211
- // send ops started before receiving any data, so we may have already
3212
- // completed some number of send ops (and returned the completions up to
3213
- // the surface) by the time we realize that we need to retry. To deal
3214
- // with this, we cache data for send ops, so that we can replay them on a
3215
- // different subchannel call even after we have completed the original
3216
- // batches.
3217
- //
3218
- // There are two sets of data to maintain:
3219
- // - In call_data (in the parent channel), we maintain a list of pending
3220
- // ops and cached data for send ops.
3221
- // - In the subchannel call, we maintain state to indicate what ops have
3222
- // already been sent down to that call.
3223
- //
3224
- // When constructing the "child" batches, we compare those two sets of
3225
- // data to see which batches need to be sent to the subchannel call.
3226
-
3227
- // TODO(roth): In subsequent PRs:
3228
- // - add support for transparent retries (including initial metadata)
3229
- // - figure out how to record stats in census for retries
3230
- // (census filter is on top of this one)
3231
- // - add census stats for retries
3232
-
3233
- ChannelData::RetryingCall::RetryingCall(
3234
- ChannelData* chand, const grpc_call_element_args& args,
3235
- grpc_polling_entity* pollent,
3236
- RefCountedPtr<ServerRetryThrottleData> retry_throttle_data,
3237
- const ClientChannelMethodParsedConfig::RetryPolicy* retry_policy)
3238
- : chand_(chand),
3239
- pollent_(pollent),
3240
- retry_throttle_data_(std::move(retry_throttle_data)),
3241
- retry_policy_(retry_policy),
3242
- retry_backoff_(
3243
- BackOff::Options()
3244
- .set_initial_backoff(
3245
- retry_policy_ == nullptr ? 0 : retry_policy_->initial_backoff)
3246
- .set_multiplier(retry_policy_ == nullptr
3247
- ? 0
3248
- : retry_policy_->backoff_multiplier)
3249
- .set_jitter(RETRY_BACKOFF_JITTER)
3250
- .set_max_backoff(
3251
- retry_policy_ == nullptr ? 0 : retry_policy_->max_backoff)),
3252
- path_(grpc_slice_ref_internal(args.path)),
3253
- call_start_time_(args.start_time),
3254
- deadline_(args.deadline),
3255
- arena_(args.arena),
3256
- owning_call_(args.call_stack),
3257
- call_combiner_(args.call_combiner),
3258
- call_context_(args.context),
3259
- pending_send_initial_metadata_(false),
3260
- pending_send_message_(false),
3261
- pending_send_trailing_metadata_(false),
3262
- enable_retries_(true),
3263
- retry_committed_(false),
3264
- last_attempt_got_server_pushback_(false) {}
3265
-
3266
- ChannelData::RetryingCall::~RetryingCall() {
3267
- grpc_slice_unref_internal(path_);
3268
- GRPC_ERROR_UNREF(cancel_error_);
3269
- // Make sure there are no remaining pending batches.
3270
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
3271
- GPR_ASSERT(pending_batches_[i].batch == nullptr);
2430
+ class ClientChannel::LoadBalancedCall::Metadata
2431
+ : public LoadBalancingPolicy::MetadataInterface {
2432
+ public:
2433
+ Metadata(LoadBalancedCall* lb_call, grpc_metadata_batch* batch)
2434
+ : lb_call_(lb_call), batch_(batch) {}
2435
+
2436
+ void Add(absl::string_view key, absl::string_view value) override {
2437
+ grpc_linked_mdelem* linked_mdelem = static_cast<grpc_linked_mdelem*>(
2438
+ lb_call_->arena_->Alloc(sizeof(grpc_linked_mdelem)));
2439
+ linked_mdelem->md = grpc_mdelem_from_slices(
2440
+ ExternallyManagedSlice(key.data(), key.size()),
2441
+ ExternallyManagedSlice(value.data(), value.size()));
2442
+ GPR_ASSERT(grpc_metadata_batch_link_tail(batch_, linked_mdelem) ==
2443
+ GRPC_ERROR_NONE);
3272
2444
  }
3273
- }
3274
2445
 
3275
- void ChannelData::RetryingCall::StartTransportStreamOpBatch(
3276
- grpc_transport_stream_op_batch* batch) {
3277
- // If we've previously been cancelled, immediately fail any new batches.
3278
- if (GPR_UNLIKELY(cancel_error_ != GRPC_ERROR_NONE)) {
3279
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3280
- gpr_log(GPR_INFO,
3281
- "chand=%p retrying_call=%p: failing batch with error: %s", chand_,
3282
- this, grpc_error_string(cancel_error_));
3283
- }
3284
- // Note: This will release the call combiner.
3285
- grpc_transport_stream_op_batch_finish_with_failure(
3286
- batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
3287
- return;
2446
+ iterator begin() const override {
2447
+ static_assert(sizeof(grpc_linked_mdelem*) <= sizeof(intptr_t),
2448
+ "iterator size too large");
2449
+ return iterator(
2450
+ this, reinterpret_cast<intptr_t>(MaybeSkipEntry(batch_->list.head)));
3288
2451
  }
3289
- // Handle cancellation.
3290
- if (GPR_UNLIKELY(batch->cancel_stream)) {
3291
- // Stash a copy of cancel_error in our call data, so that we can use
3292
- // it for subsequent operations. This ensures that if the call is
3293
- // cancelled before any batches are passed down (e.g., if the deadline
3294
- // is in the past when the call starts), we can return the right
3295
- // error to the caller when the first batch does get passed down.
3296
- GRPC_ERROR_UNREF(cancel_error_);
3297
- cancel_error_ = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
3298
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3299
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: recording cancel_error=%s",
3300
- chand_, this, grpc_error_string(cancel_error_));
3301
- }
3302
- // If we do not have an LB call (i.e., a pick has not yet been started),
3303
- // fail all pending batches. Otherwise, send the cancellation down to the
3304
- // LB call.
3305
- if (lb_call_ == nullptr) {
3306
- // TODO(roth): If there is a pending retry callback, do we need to
3307
- // cancel it here?
3308
- PendingBatchesFail(GRPC_ERROR_REF(cancel_error_), NoYieldCallCombiner);
3309
- // Note: This will release the call combiner.
3310
- grpc_transport_stream_op_batch_finish_with_failure(
3311
- batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
3312
- } else {
3313
- // Note: This will release the call combiner.
3314
- lb_call_->StartTransportStreamOpBatch(batch);
3315
- }
3316
- return;
2452
+ iterator end() const override {
2453
+ static_assert(sizeof(grpc_linked_mdelem*) <= sizeof(intptr_t),
2454
+ "iterator size too large");
2455
+ return iterator(this, 0);
3317
2456
  }
3318
- // Add the batch to the pending list.
3319
- PendingBatchesAdd(batch);
3320
- // Create LB call if needed.
3321
- // TODO(roth): If we get a new batch from the surface after the
3322
- // initial retry attempt has failed, while the retry timer is pending,
3323
- // we should queue the batch and not try to send it immediately.
3324
- if (lb_call_ == nullptr) {
3325
- // We do not yet have an LB call, so create one.
3326
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3327
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: creating LB call", chand_,
3328
- this);
2457
+
2458
+ iterator erase(iterator it) override {
2459
+ grpc_linked_mdelem* linked_mdelem =
2460
+ reinterpret_cast<grpc_linked_mdelem*>(GetIteratorHandle(it));
2461
+ intptr_t handle = reinterpret_cast<intptr_t>(linked_mdelem->next);
2462
+ grpc_metadata_batch_remove(batch_, linked_mdelem);
2463
+ return iterator(this, handle);
2464
+ }
2465
+
2466
+ private:
2467
+ grpc_linked_mdelem* MaybeSkipEntry(grpc_linked_mdelem* entry) const {
2468
+ if (entry != nullptr && batch_->idx.named.path == entry) {
2469
+ return entry->next;
3329
2470
  }
3330
- CreateLbCall(this, GRPC_ERROR_NONE);
3331
- return;
2471
+ return entry;
3332
2472
  }
3333
- // Send batches to LB call.
3334
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3335
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: starting batch on lb_call=%p",
3336
- chand_, this, lb_call_.get());
2473
+
2474
+ intptr_t IteratorHandleNext(intptr_t handle) const override {
2475
+ grpc_linked_mdelem* linked_mdelem =
2476
+ reinterpret_cast<grpc_linked_mdelem*>(handle);
2477
+ return reinterpret_cast<intptr_t>(MaybeSkipEntry(linked_mdelem->next));
3337
2478
  }
3338
- PendingBatchesResume();
3339
- }
3340
2479
 
3341
- RefCountedPtr<SubchannelCall> ChannelData::RetryingCall::subchannel_call()
3342
- const {
3343
- if (lb_call_ == nullptr) return nullptr;
3344
- return lb_call_->subchannel_call();
3345
- }
2480
+ std::pair<absl::string_view, absl::string_view> IteratorHandleGet(
2481
+ intptr_t handle) const override {
2482
+ grpc_linked_mdelem* linked_mdelem =
2483
+ reinterpret_cast<grpc_linked_mdelem*>(handle);
2484
+ return std::make_pair(StringViewFromSlice(GRPC_MDKEY(linked_mdelem->md)),
2485
+ StringViewFromSlice(GRPC_MDVALUE(linked_mdelem->md)));
2486
+ }
2487
+
2488
+ LoadBalancedCall* lb_call_;
2489
+ grpc_metadata_batch* batch_;
2490
+ };
3346
2491
 
3347
2492
  //
3348
- // send op data caching
2493
+ // ClientChannel::LoadBalancedCall::LbCallState
3349
2494
  //
3350
2495
 
3351
- void ChannelData::RetryingCall::MaybeCacheSendOpsForBatch(
3352
- PendingBatch* pending) {
3353
- if (pending->send_ops_cached) return;
3354
- pending->send_ops_cached = true;
3355
- grpc_transport_stream_op_batch* batch = pending->batch;
3356
- // Save a copy of metadata for send_initial_metadata ops.
3357
- if (batch->send_initial_metadata) {
3358
- seen_send_initial_metadata_ = true;
3359
- GPR_ASSERT(send_initial_metadata_storage_ == nullptr);
3360
- grpc_metadata_batch* send_initial_metadata =
3361
- batch->payload->send_initial_metadata.send_initial_metadata;
3362
- send_initial_metadata_storage_ =
3363
- static_cast<grpc_linked_mdelem*>(arena_->Alloc(
3364
- sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count));
3365
- grpc_metadata_batch_copy(send_initial_metadata, &send_initial_metadata_,
3366
- send_initial_metadata_storage_);
3367
- send_initial_metadata_flags_ =
3368
- batch->payload->send_initial_metadata.send_initial_metadata_flags;
3369
- peer_string_ = batch->payload->send_initial_metadata.peer_string;
3370
- }
3371
- // Set up cache for send_message ops.
3372
- if (batch->send_message) {
3373
- ByteStreamCache* cache = arena_->New<ByteStreamCache>(
3374
- std::move(batch->payload->send_message.send_message));
3375
- send_messages_.push_back(cache);
3376
- }
3377
- // Save metadata batch for send_trailing_metadata ops.
3378
- if (batch->send_trailing_metadata) {
3379
- seen_send_trailing_metadata_ = true;
3380
- GPR_ASSERT(send_trailing_metadata_storage_ == nullptr);
3381
- grpc_metadata_batch* send_trailing_metadata =
3382
- batch->payload->send_trailing_metadata.send_trailing_metadata;
3383
- send_trailing_metadata_storage_ =
3384
- static_cast<grpc_linked_mdelem*>(arena_->Alloc(
3385
- sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count));
3386
- grpc_metadata_batch_copy(send_trailing_metadata, &send_trailing_metadata_,
3387
- send_trailing_metadata_storage_);
3388
- }
3389
- }
2496
+ class ClientChannel::LoadBalancedCall::LbCallState
2497
+ : public LoadBalancingPolicy::CallState {
2498
+ public:
2499
+ explicit LbCallState(LoadBalancedCall* lb_call) : lb_call_(lb_call) {}
3390
2500
 
3391
- void ChannelData::RetryingCall::FreeCachedSendInitialMetadata() {
3392
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3393
- gpr_log(GPR_INFO,
3394
- "chand=%p retrying_call=%p: destroying send_initial_metadata",
3395
- chand_, this);
3396
- }
3397
- grpc_metadata_batch_destroy(&send_initial_metadata_);
3398
- }
2501
+ void* Alloc(size_t size) override { return lb_call_->arena_->Alloc(size); }
3399
2502
 
3400
- void ChannelData::RetryingCall::FreeCachedSendMessage(size_t idx) {
3401
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3402
- gpr_log(GPR_INFO,
3403
- "chand=%p retrying_call=%p: destroying send_messages[%" PRIuPTR "]",
3404
- chand_, this, idx);
2503
+ const LoadBalancingPolicy::BackendMetricData* GetBackendMetricData()
2504
+ override {
2505
+ if (lb_call_->backend_metric_data_ == nullptr) {
2506
+ grpc_linked_mdelem* md = lb_call_->recv_trailing_metadata_->idx.named
2507
+ .x_endpoint_load_metrics_bin;
2508
+ if (md != nullptr) {
2509
+ lb_call_->backend_metric_data_ =
2510
+ ParseBackendMetricData(GRPC_MDVALUE(md->md), lb_call_->arena_);
2511
+ }
2512
+ }
2513
+ return lb_call_->backend_metric_data_;
3405
2514
  }
3406
- send_messages_[idx]->Destroy();
3407
- }
3408
2515
 
3409
- void ChannelData::RetryingCall::FreeCachedSendTrailingMetadata() {
3410
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3411
- gpr_log(GPR_INFO,
3412
- "chand_=%p retrying_call=%p: destroying send_trailing_metadata",
3413
- chand_, this);
2516
+ absl::string_view ExperimentalGetCallAttribute(const char* key) override {
2517
+ auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
2518
+ lb_call_->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
2519
+ auto& call_attributes = service_config_call_data->call_attributes();
2520
+ auto it = call_attributes.find(key);
2521
+ if (it == call_attributes.end()) return absl::string_view();
2522
+ return it->second;
3414
2523
  }
3415
- grpc_metadata_batch_destroy(&send_trailing_metadata_);
3416
- }
3417
2524
 
3418
- void ChannelData::RetryingCall::FreeCachedSendOpDataAfterCommit(
3419
- SubchannelCallRetryState* retry_state) {
3420
- if (retry_state->completed_send_initial_metadata) {
3421
- FreeCachedSendInitialMetadata();
3422
- }
3423
- for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
3424
- FreeCachedSendMessage(i);
3425
- }
3426
- if (retry_state->completed_send_trailing_metadata) {
3427
- FreeCachedSendTrailingMetadata();
3428
- }
3429
- }
2525
+ private:
2526
+ LoadBalancedCall* lb_call_;
2527
+ };
2528
+
2529
+ //
2530
+ // LoadBalancedCall
2531
+ //
2532
+
2533
+ ClientChannel::LoadBalancedCall::LoadBalancedCall(
2534
+ ClientChannel* chand, const grpc_call_element_args& args,
2535
+ grpc_polling_entity* pollent, grpc_closure* on_call_destruction_complete)
2536
+ : RefCounted(GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)
2537
+ ? "LoadBalancedCall"
2538
+ : nullptr),
2539
+ chand_(chand),
2540
+ path_(grpc_slice_ref_internal(args.path)),
2541
+ call_start_time_(args.start_time),
2542
+ deadline_(args.deadline),
2543
+ arena_(args.arena),
2544
+ owning_call_(args.call_stack),
2545
+ call_combiner_(args.call_combiner),
2546
+ call_context_(args.context),
2547
+ pollent_(pollent),
2548
+ on_call_destruction_complete_(on_call_destruction_complete) {}
3430
2549
 
3431
- void ChannelData::RetryingCall::FreeCachedSendOpDataForCompletedBatch(
3432
- SubchannelCallBatchData* batch_data,
3433
- SubchannelCallRetryState* retry_state) {
3434
- if (batch_data->batch.send_initial_metadata) {
3435
- FreeCachedSendInitialMetadata();
2550
+ ClientChannel::LoadBalancedCall::~LoadBalancedCall() {
2551
+ grpc_slice_unref_internal(path_);
2552
+ GRPC_ERROR_UNREF(cancel_error_);
2553
+ GRPC_ERROR_UNREF(failure_error_);
2554
+ if (backend_metric_data_ != nullptr) {
2555
+ backend_metric_data_
2556
+ ->LoadBalancingPolicy::BackendMetricData::~BackendMetricData();
3436
2557
  }
3437
- if (batch_data->batch.send_message) {
3438
- FreeCachedSendMessage(retry_state->completed_send_message_count - 1);
2558
+ // Make sure there are no remaining pending batches.
2559
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
2560
+ GPR_ASSERT(pending_batches_[i] == nullptr);
3439
2561
  }
3440
- if (batch_data->batch.send_trailing_metadata) {
3441
- FreeCachedSendTrailingMetadata();
2562
+ if (on_call_destruction_complete_ != nullptr) {
2563
+ ExecCtx::Run(DEBUG_LOCATION, on_call_destruction_complete_,
2564
+ GRPC_ERROR_NONE);
3442
2565
  }
3443
2566
  }
3444
2567
 
3445
- //
3446
- // pending_batches management
3447
- //
3448
-
3449
- size_t ChannelData::RetryingCall::GetBatchIndex(
2568
+ size_t ClientChannel::LoadBalancedCall::GetBatchIndex(
3450
2569
  grpc_transport_stream_op_batch* batch) {
3451
2570
  // Note: It is important the send_initial_metadata be the first entry
3452
- // here, since the code in pick_subchannel_locked() assumes it will be.
2571
+ // here, since the code in PickSubchannelLocked() assumes it will be.
3453
2572
  if (batch->send_initial_metadata) return 0;
3454
2573
  if (batch->send_message) return 1;
3455
2574
  if (batch->send_trailing_metadata) return 2;
@@ -3460,133 +2579,48 @@ size_t ChannelData::RetryingCall::GetBatchIndex(
3460
2579
  }
3461
2580
 
3462
2581
  // This is called via the call combiner, so access to calld is synchronized.
3463
- void ChannelData::RetryingCall::PendingBatchesAdd(
2582
+ void ClientChannel::LoadBalancedCall::PendingBatchesAdd(
3464
2583
  grpc_transport_stream_op_batch* batch) {
3465
2584
  const size_t idx = GetBatchIndex(batch);
3466
2585
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3467
- gpr_log(
3468
- GPR_INFO,
3469
- "chand_=%p retrying_call=%p: adding pending batch at index %" PRIuPTR,
3470
- chand_, this, idx);
3471
- }
3472
- PendingBatch* pending = &pending_batches_[idx];
3473
- GPR_ASSERT(pending->batch == nullptr);
3474
- pending->batch = batch;
3475
- pending->send_ops_cached = false;
3476
- if (enable_retries_) {
3477
- // Update state in calld about pending batches.
3478
- // Also check if the batch takes us over the retry buffer limit.
3479
- // Note: We don't check the size of trailing metadata here, because
3480
- // gRPC clients do not send trailing metadata.
3481
- if (batch->send_initial_metadata) {
3482
- pending_send_initial_metadata_ = true;
3483
- bytes_buffered_for_retry_ += grpc_metadata_batch_size(
3484
- batch->payload->send_initial_metadata.send_initial_metadata);
3485
- }
3486
- if (batch->send_message) {
3487
- pending_send_message_ = true;
3488
- bytes_buffered_for_retry_ +=
3489
- batch->payload->send_message.send_message->length();
3490
- }
3491
- if (batch->send_trailing_metadata) {
3492
- pending_send_trailing_metadata_ = true;
3493
- }
3494
- if (GPR_UNLIKELY(bytes_buffered_for_retry_ >
3495
- chand_->per_rpc_retry_buffer_size_)) {
3496
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3497
- gpr_log(GPR_INFO,
3498
- "chand=%p retrying_call=%p: exceeded retry buffer size, "
3499
- "committing",
3500
- chand_, this);
3501
- }
3502
- SubchannelCallRetryState* retry_state =
3503
- lb_call_ == nullptr ? nullptr
3504
- : static_cast<SubchannelCallRetryState*>(
3505
- lb_call_->GetParentData());
3506
- RetryCommit(retry_state);
3507
- // If we are not going to retry and have not yet started, pretend
3508
- // retries are disabled so that we don't bother with retry overhead.
3509
- if (num_attempts_completed_ == 0) {
3510
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3511
- gpr_log(GPR_INFO,
3512
- "chand=%p retrying_call=%p: disabling retries before first "
3513
- "attempt",
3514
- chand_, this);
3515
- }
3516
- // TODO(roth): Treat this as a commit?
3517
- enable_retries_ = false;
3518
- }
3519
- }
3520
- }
3521
- }
3522
-
3523
- void ChannelData::RetryingCall::PendingBatchClear(PendingBatch* pending) {
3524
- if (enable_retries_) {
3525
- if (pending->batch->send_initial_metadata) {
3526
- pending_send_initial_metadata_ = false;
3527
- }
3528
- if (pending->batch->send_message) {
3529
- pending_send_message_ = false;
3530
- }
3531
- if (pending->batch->send_trailing_metadata) {
3532
- pending_send_trailing_metadata_ = false;
3533
- }
3534
- }
3535
- pending->batch = nullptr;
3536
- }
3537
-
3538
- void ChannelData::RetryingCall::MaybeClearPendingBatch(PendingBatch* pending) {
3539
- grpc_transport_stream_op_batch* batch = pending->batch;
3540
- // We clear the pending batch if all of its callbacks have been
3541
- // scheduled and reset to nullptr.
3542
- if (batch->on_complete == nullptr &&
3543
- (!batch->recv_initial_metadata ||
3544
- batch->payload->recv_initial_metadata.recv_initial_metadata_ready ==
3545
- nullptr) &&
3546
- (!batch->recv_message ||
3547
- batch->payload->recv_message.recv_message_ready == nullptr) &&
3548
- (!batch->recv_trailing_metadata ||
3549
- batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
3550
- nullptr)) {
3551
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3552
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: clearing pending batch",
3553
- chand_, this);
3554
- }
3555
- PendingBatchClear(pending);
2586
+ gpr_log(GPR_INFO,
2587
+ "chand=%p lb_call=%p: adding pending batch at index %" PRIuPTR,
2588
+ chand_, this, idx);
3556
2589
  }
2590
+ GPR_ASSERT(pending_batches_[idx] == nullptr);
2591
+ pending_batches_[idx] = batch;
3557
2592
  }
3558
2593
 
3559
2594
  // This is called via the call combiner, so access to calld is synchronized.
3560
- void ChannelData::RetryingCall::FailPendingBatchInCallCombiner(
3561
- void* arg, grpc_error* error) {
2595
+ void ClientChannel::LoadBalancedCall::FailPendingBatchInCallCombiner(
2596
+ void* arg, grpc_error_handle error) {
3562
2597
  grpc_transport_stream_op_batch* batch =
3563
2598
  static_cast<grpc_transport_stream_op_batch*>(arg);
3564
- RetryingCall* call =
3565
- static_cast<RetryingCall*>(batch->handler_private.extra_arg);
2599
+ auto* self = static_cast<LoadBalancedCall*>(batch->handler_private.extra_arg);
3566
2600
  // Note: This will release the call combiner.
3567
2601
  grpc_transport_stream_op_batch_finish_with_failure(
3568
- batch, GRPC_ERROR_REF(error), call->call_combiner_);
2602
+ batch, GRPC_ERROR_REF(error), self->call_combiner_);
3569
2603
  }
3570
2604
 
3571
2605
  // This is called via the call combiner, so access to calld is synchronized.
3572
- void ChannelData::RetryingCall::PendingBatchesFail(
3573
- grpc_error* error,
2606
+ void ClientChannel::LoadBalancedCall::PendingBatchesFail(
2607
+ grpc_error_handle error,
3574
2608
  YieldCallCombinerPredicate yield_call_combiner_predicate) {
3575
2609
  GPR_ASSERT(error != GRPC_ERROR_NONE);
2610
+ GRPC_ERROR_UNREF(failure_error_);
2611
+ failure_error_ = error;
3576
2612
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3577
2613
  size_t num_batches = 0;
3578
2614
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
3579
- if (pending_batches_[i].batch != nullptr) ++num_batches;
2615
+ if (pending_batches_[i] != nullptr) ++num_batches;
3580
2616
  }
3581
2617
  gpr_log(GPR_INFO,
3582
- "chand=%p retrying_call=%p: failing %" PRIuPTR
3583
- " pending batches: %s",
3584
- chand_, this, num_batches, grpc_error_string(error));
2618
+ "chand=%p lb_call=%p: failing %" PRIuPTR " pending batches: %s",
2619
+ chand_, this, num_batches, grpc_error_std_string(error).c_str());
3585
2620
  }
3586
2621
  CallCombinerClosureList closures;
3587
2622
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
3588
- PendingBatch* pending = &pending_batches_[i];
3589
- grpc_transport_stream_op_batch* batch = pending->batch;
2623
+ grpc_transport_stream_op_batch*& batch = pending_batches_[i];
3590
2624
  if (batch != nullptr) {
3591
2625
  batch->handler_private.extra_arg = this;
3592
2626
  GRPC_CLOSURE_INIT(&batch->handler_private.closure,
@@ -3594,7 +2628,7 @@ void ChannelData::RetryingCall::PendingBatchesFail(
3594
2628
  grpc_schedule_on_exec_ctx);
3595
2629
  closures.Add(&batch->handler_private.closure, GRPC_ERROR_REF(error),
3596
2630
  "PendingBatchesFail");
3597
- PendingBatchClear(pending);
2631
+ batch = nullptr;
3598
2632
  }
3599
2633
  }
3600
2634
  if (yield_call_combiner_predicate(closures)) {
@@ -3602,1518 +2636,59 @@ void ChannelData::RetryingCall::PendingBatchesFail(
3602
2636
  } else {
3603
2637
  closures.RunClosuresWithoutYielding(call_combiner_);
3604
2638
  }
3605
- GRPC_ERROR_UNREF(error);
3606
2639
  }
3607
2640
 
3608
2641
  // This is called via the call combiner, so access to calld is synchronized.
3609
- void ChannelData::RetryingCall::ResumePendingBatchInCallCombiner(
3610
- void* arg, grpc_error* /*ignored*/) {
2642
+ void ClientChannel::LoadBalancedCall::ResumePendingBatchInCallCombiner(
2643
+ void* arg, grpc_error_handle /*ignored*/) {
3611
2644
  grpc_transport_stream_op_batch* batch =
3612
2645
  static_cast<grpc_transport_stream_op_batch*>(arg);
3613
- auto* lb_call = static_cast<ChannelData::LoadBalancedCall*>(
3614
- batch->handler_private.extra_arg);
2646
+ SubchannelCall* subchannel_call =
2647
+ static_cast<SubchannelCall*>(batch->handler_private.extra_arg);
3615
2648
  // Note: This will release the call combiner.
3616
- lb_call->StartTransportStreamOpBatch(batch);
2649
+ subchannel_call->StartTransportStreamOpBatch(batch);
3617
2650
  }
3618
2651
 
3619
2652
  // This is called via the call combiner, so access to calld is synchronized.
3620
- void ChannelData::RetryingCall::PendingBatchesResume() {
3621
- if (enable_retries_) {
3622
- StartRetriableSubchannelBatches(this, GRPC_ERROR_NONE);
3623
- return;
3624
- }
3625
- // Retries not enabled; send down batches as-is.
2653
+ void ClientChannel::LoadBalancedCall::PendingBatchesResume() {
3626
2654
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3627
2655
  size_t num_batches = 0;
3628
2656
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
3629
- if (pending_batches_[i].batch != nullptr) ++num_batches;
2657
+ if (pending_batches_[i] != nullptr) ++num_batches;
3630
2658
  }
3631
2659
  gpr_log(GPR_INFO,
3632
- "chand=%p retrying_call=%p: starting %" PRIuPTR
3633
- " pending batches on lb_call=%p",
3634
- chand_, this, num_batches, lb_call_.get());
2660
+ "chand=%p lb_call=%p: starting %" PRIuPTR
2661
+ " pending batches on subchannel_call=%p",
2662
+ chand_, this, num_batches, subchannel_call_.get());
3635
2663
  }
3636
2664
  CallCombinerClosureList closures;
3637
2665
  for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
3638
- PendingBatch* pending = &pending_batches_[i];
3639
- grpc_transport_stream_op_batch* batch = pending->batch;
2666
+ grpc_transport_stream_op_batch*& batch = pending_batches_[i];
3640
2667
  if (batch != nullptr) {
3641
- batch->handler_private.extra_arg = lb_call_.get();
2668
+ batch->handler_private.extra_arg = subchannel_call_.get();
3642
2669
  GRPC_CLOSURE_INIT(&batch->handler_private.closure,
3643
- ResumePendingBatchInCallCombiner, batch, nullptr);
2670
+ ResumePendingBatchInCallCombiner, batch,
2671
+ grpc_schedule_on_exec_ctx);
3644
2672
  closures.Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
3645
- "PendingBatchesResume");
3646
- PendingBatchClear(pending);
2673
+ "resuming pending batch from LB call");
2674
+ batch = nullptr;
3647
2675
  }
3648
2676
  }
3649
2677
  // Note: This will release the call combiner.
3650
2678
  closures.RunClosures(call_combiner_);
3651
2679
  }
3652
2680
 
3653
- template <typename Predicate>
3654
- ChannelData::RetryingCall::PendingBatch*
3655
- ChannelData::RetryingCall::PendingBatchFind(const char* log_message,
3656
- Predicate predicate) {
3657
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
3658
- PendingBatch* pending = &pending_batches_[i];
3659
- grpc_transport_stream_op_batch* batch = pending->batch;
3660
- if (batch != nullptr && predicate(batch)) {
3661
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3662
- gpr_log(
3663
- GPR_INFO,
3664
- "chand=%p retrying_call=%p: %s pending batch at index %" PRIuPTR,
3665
- chand_, this, log_message, i);
3666
- }
3667
- return pending;
3668
- }
3669
- }
3670
- return nullptr;
3671
- }
3672
-
3673
- //
3674
- // retry code
3675
- //
3676
-
3677
- void ChannelData::RetryingCall::RetryCommit(
3678
- SubchannelCallRetryState* retry_state) {
3679
- if (retry_committed_) return;
3680
- retry_committed_ = true;
3681
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3682
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: committing retries", chand_,
3683
- this);
3684
- }
3685
- if (retry_state != nullptr) {
3686
- FreeCachedSendOpDataAfterCommit(retry_state);
3687
- }
3688
- }
3689
-
3690
- void ChannelData::RetryingCall::DoRetry(SubchannelCallRetryState* retry_state,
3691
- grpc_millis server_pushback_ms) {
3692
- GPR_ASSERT(retry_policy_ != nullptr);
3693
- // Reset LB call.
3694
- lb_call_.reset();
3695
- // Compute backoff delay.
3696
- grpc_millis next_attempt_time;
3697
- if (server_pushback_ms >= 0) {
3698
- next_attempt_time = ExecCtx::Get()->Now() + server_pushback_ms;
3699
- last_attempt_got_server_pushback_ = true;
3700
- } else {
3701
- if (num_attempts_completed_ == 1 || last_attempt_got_server_pushback_) {
3702
- last_attempt_got_server_pushback_ = false;
3703
- }
3704
- next_attempt_time = retry_backoff_.NextAttemptTime();
3705
- }
3706
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3707
- gpr_log(GPR_INFO,
3708
- "chand=%p retrying_call=%p: retrying failed call in %" PRId64 " ms",
3709
- chand_, this, next_attempt_time - ExecCtx::Get()->Now());
3710
- }
3711
- // Schedule retry after computed delay.
3712
- GRPC_CLOSURE_INIT(&retry_closure_, CreateLbCall, this, nullptr);
3713
- grpc_timer_init(&retry_timer_, next_attempt_time, &retry_closure_);
3714
- // Update bookkeeping.
3715
- if (retry_state != nullptr) retry_state->retry_dispatched = true;
3716
- }
3717
-
3718
- bool ChannelData::RetryingCall::MaybeRetry(SubchannelCallBatchData* batch_data,
3719
- grpc_status_code status,
3720
- grpc_mdelem* server_pushback_md) {
3721
- // Get retry policy.
3722
- if (retry_policy_ == nullptr) return false;
3723
- // If we've already dispatched a retry from this call, return true.
3724
- // This catches the case where the batch has multiple callbacks
3725
- // (i.e., it includes either recv_message or recv_initial_metadata).
3726
- SubchannelCallRetryState* retry_state = nullptr;
3727
- if (batch_data != nullptr) {
3728
- retry_state = static_cast<SubchannelCallRetryState*>(
3729
- batch_data->lb_call->GetParentData());
3730
- if (retry_state->retry_dispatched) {
3731
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3732
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: retry already dispatched",
3733
- chand_, this);
3734
- }
3735
- return true;
3736
- }
3737
- }
3738
- // Check status.
3739
- if (GPR_LIKELY(status == GRPC_STATUS_OK)) {
3740
- if (retry_throttle_data_ != nullptr) {
3741
- retry_throttle_data_->RecordSuccess();
3742
- }
3743
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3744
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: call succeeded", chand_,
3745
- this);
3746
- }
3747
- return false;
3748
- }
3749
- // Status is not OK. Check whether the status is retryable.
3750
- if (!retry_policy_->retryable_status_codes.Contains(status)) {
3751
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3752
- gpr_log(
3753
- GPR_INFO,
3754
- "chand=%p retrying_call=%p: status %s not configured as retryable",
3755
- chand_, this, grpc_status_code_to_string(status));
3756
- }
3757
- return false;
3758
- }
3759
- // Record the failure and check whether retries are throttled.
3760
- // Note that it's important for this check to come after the status
3761
- // code check above, since we should only record failures whose statuses
3762
- // match the configured retryable status codes, so that we don't count
3763
- // things like failures due to malformed requests (INVALID_ARGUMENT).
3764
- // Conversely, it's important for this to come before the remaining
3765
- // checks, so that we don't fail to record failures due to other factors.
3766
- if (retry_throttle_data_ != nullptr &&
3767
- !retry_throttle_data_->RecordFailure()) {
3768
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3769
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: retries throttled", chand_,
3770
- this);
3771
- }
3772
- return false;
3773
- }
3774
- // Check whether the call is committed.
3775
- if (retry_committed_) {
3776
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3777
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: retries already committed",
3778
- chand_, this);
3779
- }
3780
- return false;
3781
- }
3782
- // Check whether we have retries remaining.
3783
- ++num_attempts_completed_;
3784
- if (num_attempts_completed_ >= retry_policy_->max_attempts) {
3785
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3786
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: exceeded %d retry attempts",
3787
- chand_, this, retry_policy_->max_attempts);
3788
- }
3789
- return false;
3790
- }
3791
- // If the call was cancelled from the surface, don't retry.
3792
- if (cancel_error_ != GRPC_ERROR_NONE) {
3793
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3794
- gpr_log(GPR_INFO,
3795
- "chand=%p retrying_call=%p: call cancelled from surface, not "
3796
- "retrying",
3797
- chand_, this);
3798
- }
3799
- return false;
3800
- }
3801
- // Check server push-back.
3802
- grpc_millis server_pushback_ms = -1;
3803
- if (server_pushback_md != nullptr) {
3804
- // If the value is "-1" or any other unparseable string, we do not retry.
3805
- uint32_t ms;
3806
- if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
3807
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3808
- gpr_log(
3809
- GPR_INFO,
3810
- "chand=%p retrying_call=%p: not retrying due to server push-back",
3811
- chand_, this);
3812
- }
3813
- return false;
3814
- } else {
3815
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3816
- gpr_log(GPR_INFO,
3817
- "chand=%p retrying_call=%p: server push-back: retry in %u ms",
3818
- chand_, this, ms);
3819
- }
3820
- server_pushback_ms = static_cast<grpc_millis>(ms);
3821
- }
3822
- }
3823
- DoRetry(retry_state, server_pushback_ms);
3824
- return true;
3825
- }
3826
-
3827
- //
3828
- // ChannelData::RetryingCall::SubchannelCallBatchData
3829
- //
3830
-
3831
- ChannelData::RetryingCall::SubchannelCallBatchData*
3832
- ChannelData::RetryingCall::SubchannelCallBatchData::Create(
3833
- RetryingCall* call, int refcount, bool set_on_complete) {
3834
- return call->arena_->New<SubchannelCallBatchData>(call, refcount,
3835
- set_on_complete);
3836
- }
3837
-
3838
- ChannelData::RetryingCall::SubchannelCallBatchData::SubchannelCallBatchData(
3839
- RetryingCall* call, int refcount, bool set_on_complete)
3840
- : call(call), lb_call(call->lb_call_) {
3841
- SubchannelCallRetryState* retry_state =
3842
- static_cast<SubchannelCallRetryState*>(lb_call->GetParentData());
3843
- batch.payload = &retry_state->batch_payload;
3844
- gpr_ref_init(&refs, refcount);
3845
- if (set_on_complete) {
3846
- GRPC_CLOSURE_INIT(&on_complete, ChannelData::RetryingCall::OnComplete, this,
3847
- grpc_schedule_on_exec_ctx);
3848
- batch.on_complete = &on_complete;
3849
- }
3850
- GRPC_CALL_STACK_REF(call->owning_call_, "batch_data");
3851
- }
3852
-
3853
- void ChannelData::RetryingCall::SubchannelCallBatchData::Destroy() {
3854
- SubchannelCallRetryState* retry_state =
3855
- static_cast<SubchannelCallRetryState*>(lb_call->GetParentData());
3856
- if (batch.send_initial_metadata) {
3857
- grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
3858
- }
3859
- if (batch.send_trailing_metadata) {
3860
- grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
3861
- }
3862
- if (batch.recv_initial_metadata) {
3863
- grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
3864
- }
3865
- if (batch.recv_trailing_metadata) {
3866
- grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
3867
- }
3868
- lb_call.reset();
3869
- GRPC_CALL_STACK_UNREF(call->owning_call_, "batch_data");
3870
- }
3871
-
3872
- //
3873
- // recv_initial_metadata callback handling
3874
- //
3875
-
3876
- void ChannelData::RetryingCall::InvokeRecvInitialMetadataCallback(
3877
- void* arg, grpc_error* error) {
3878
- SubchannelCallBatchData* batch_data =
3879
- static_cast<SubchannelCallBatchData*>(arg);
3880
- // Find pending batch.
3881
- PendingBatch* pending = batch_data->call->PendingBatchFind(
3882
- "invoking recv_initial_metadata_ready for",
3883
- [](grpc_transport_stream_op_batch* batch) {
3884
- return batch->recv_initial_metadata &&
3885
- batch->payload->recv_initial_metadata
3886
- .recv_initial_metadata_ready != nullptr;
3887
- });
3888
- GPR_ASSERT(pending != nullptr);
3889
- // Return metadata.
3890
- SubchannelCallRetryState* retry_state =
3891
- static_cast<SubchannelCallRetryState*>(
3892
- batch_data->lb_call->GetParentData());
3893
- grpc_metadata_batch_move(
3894
- &retry_state->recv_initial_metadata,
3895
- pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
3896
- // Update bookkeeping.
3897
- // Note: Need to do this before invoking the callback, since invoking
3898
- // the callback will result in yielding the call combiner.
3899
- grpc_closure* recv_initial_metadata_ready =
3900
- pending->batch->payload->recv_initial_metadata
3901
- .recv_initial_metadata_ready;
3902
- pending->batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
3903
- nullptr;
3904
- batch_data->call->MaybeClearPendingBatch(pending);
3905
- batch_data->Unref();
3906
- // Invoke callback.
3907
- Closure::Run(DEBUG_LOCATION, recv_initial_metadata_ready,
3908
- GRPC_ERROR_REF(error));
3909
- }
3910
-
3911
- void ChannelData::RetryingCall::RecvInitialMetadataReady(void* arg,
3912
- grpc_error* error) {
3913
- SubchannelCallBatchData* batch_data =
3914
- static_cast<SubchannelCallBatchData*>(arg);
3915
- RetryingCall* call = batch_data->call;
3916
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3917
- gpr_log(
3918
- GPR_INFO,
3919
- "chand=%p retrying_call=%p: got recv_initial_metadata_ready, error=%s",
3920
- call->chand_, call, grpc_error_string(error));
3921
- }
3922
- SubchannelCallRetryState* retry_state =
3923
- static_cast<SubchannelCallRetryState*>(
3924
- batch_data->lb_call->GetParentData());
3925
- retry_state->completed_recv_initial_metadata = true;
3926
- // If a retry was already dispatched, then we're not going to use the
3927
- // result of this recv_initial_metadata op, so do nothing.
3928
- if (retry_state->retry_dispatched) {
3929
- GRPC_CALL_COMBINER_STOP(
3930
- call->call_combiner_,
3931
- "recv_initial_metadata_ready after retry dispatched");
3932
- return;
3933
- }
3934
- // If we got an error or a Trailers-Only response and have not yet gotten
3935
- // the recv_trailing_metadata_ready callback, then defer propagating this
3936
- // callback back to the surface. We can evaluate whether to retry when
3937
- // recv_trailing_metadata comes back.
3938
- if (GPR_UNLIKELY((retry_state->trailing_metadata_available ||
3939
- error != GRPC_ERROR_NONE) &&
3940
- !retry_state->completed_recv_trailing_metadata)) {
3941
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
3942
- gpr_log(
3943
- GPR_INFO,
3944
- "chand=%p retrying_call=%p: deferring recv_initial_metadata_ready "
3945
- "(Trailers-Only)",
3946
- call->chand_, call);
3947
- }
3948
- retry_state->recv_initial_metadata_ready_deferred_batch = batch_data;
3949
- retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
3950
- if (!retry_state->started_recv_trailing_metadata) {
3951
- // recv_trailing_metadata not yet started by application; start it
3952
- // ourselves to get status.
3953
- call->StartInternalRecvTrailingMetadata();
3954
- } else {
3955
- GRPC_CALL_COMBINER_STOP(
3956
- call->call_combiner_,
3957
- "recv_initial_metadata_ready trailers-only or error");
3958
- }
3959
- return;
3960
- }
3961
- // Received valid initial metadata, so commit the call.
3962
- call->RetryCommit(retry_state);
3963
- // Invoke the callback to return the result to the surface.
3964
- // Manually invoking a callback function; it does not take ownership of error.
3965
- call->InvokeRecvInitialMetadataCallback(batch_data, error);
3966
- }
3967
-
3968
- //
3969
- // recv_message callback handling
3970
- //
3971
-
3972
- void ChannelData::RetryingCall::InvokeRecvMessageCallback(void* arg,
3973
- grpc_error* error) {
3974
- SubchannelCallBatchData* batch_data =
3975
- static_cast<SubchannelCallBatchData*>(arg);
3976
- RetryingCall* call = batch_data->call;
3977
- // Find pending op.
3978
- PendingBatch* pending = call->PendingBatchFind(
3979
- "invoking recv_message_ready for",
3980
- [](grpc_transport_stream_op_batch* batch) {
3981
- return batch->recv_message &&
3982
- batch->payload->recv_message.recv_message_ready != nullptr;
3983
- });
3984
- GPR_ASSERT(pending != nullptr);
3985
- // Return payload.
3986
- SubchannelCallRetryState* retry_state =
3987
- static_cast<SubchannelCallRetryState*>(
3988
- batch_data->lb_call->GetParentData());
3989
- *pending->batch->payload->recv_message.recv_message =
3990
- std::move(retry_state->recv_message);
3991
- // Update bookkeeping.
3992
- // Note: Need to do this before invoking the callback, since invoking
3993
- // the callback will result in yielding the call combiner.
3994
- grpc_closure* recv_message_ready =
3995
- pending->batch->payload->recv_message.recv_message_ready;
3996
- pending->batch->payload->recv_message.recv_message_ready = nullptr;
3997
- call->MaybeClearPendingBatch(pending);
3998
- batch_data->Unref();
3999
- // Invoke callback.
4000
- Closure::Run(DEBUG_LOCATION, recv_message_ready, GRPC_ERROR_REF(error));
4001
- }
4002
-
4003
- void ChannelData::RetryingCall::RecvMessageReady(void* arg, grpc_error* error) {
4004
- SubchannelCallBatchData* batch_data =
4005
- static_cast<SubchannelCallBatchData*>(arg);
4006
- RetryingCall* call = batch_data->call;
4007
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4008
- gpr_log(GPR_INFO,
4009
- "chand=%p retrying_call=%p: got recv_message_ready, error=%s",
4010
- call->chand_, call, grpc_error_string(error));
4011
- }
4012
- SubchannelCallRetryState* retry_state =
4013
- static_cast<SubchannelCallRetryState*>(
4014
- batch_data->lb_call->GetParentData());
4015
- ++retry_state->completed_recv_message_count;
4016
- // If a retry was already dispatched, then we're not going to use the
4017
- // result of this recv_message op, so do nothing.
4018
- if (retry_state->retry_dispatched) {
4019
- GRPC_CALL_COMBINER_STOP(call->call_combiner_,
4020
- "recv_message_ready after retry dispatched");
4021
- return;
4022
- }
4023
- // If we got an error or the payload was nullptr and we have not yet gotten
4024
- // the recv_trailing_metadata_ready callback, then defer propagating this
4025
- // callback back to the surface. We can evaluate whether to retry when
4026
- // recv_trailing_metadata comes back.
4027
- if (GPR_UNLIKELY(
4028
- (retry_state->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
4029
- !retry_state->completed_recv_trailing_metadata)) {
4030
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4031
- gpr_log(
4032
- GPR_INFO,
4033
- "chand=%p retrying_call=%p: deferring recv_message_ready (nullptr "
4034
- "message and recv_trailing_metadata pending)",
4035
- call->chand_, call);
4036
- }
4037
- retry_state->recv_message_ready_deferred_batch = batch_data;
4038
- retry_state->recv_message_error = GRPC_ERROR_REF(error);
4039
- if (!retry_state->started_recv_trailing_metadata) {
4040
- // recv_trailing_metadata not yet started by application; start it
4041
- // ourselves to get status.
4042
- call->StartInternalRecvTrailingMetadata();
4043
- } else {
4044
- GRPC_CALL_COMBINER_STOP(call->call_combiner_, "recv_message_ready null");
4045
- }
4046
- return;
4047
- }
4048
- // Received a valid message, so commit the call.
4049
- call->RetryCommit(retry_state);
4050
- // Invoke the callback to return the result to the surface.
4051
- // Manually invoking a callback function; it does not take ownership of error.
4052
- call->InvokeRecvMessageCallback(batch_data, error);
4053
- }
4054
-
4055
- //
4056
- // recv_trailing_metadata handling
4057
- //
4058
-
4059
- void ChannelData::RetryingCall::GetCallStatus(
4060
- grpc_metadata_batch* md_batch, grpc_error* error, grpc_status_code* status,
4061
- grpc_mdelem** server_pushback_md) {
4062
- if (error != GRPC_ERROR_NONE) {
4063
- grpc_error_get_status(error, deadline_, status, nullptr, nullptr, nullptr);
4064
- } else {
4065
- GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
4066
- *status =
4067
- grpc_get_status_code_from_metadata(md_batch->idx.named.grpc_status->md);
4068
- if (server_pushback_md != nullptr &&
4069
- md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
4070
- *server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
4071
- }
4072
- }
4073
- GRPC_ERROR_UNREF(error);
4074
- }
4075
-
4076
- void ChannelData::RetryingCall::AddClosureForRecvTrailingMetadataReady(
4077
- SubchannelCallBatchData* batch_data, grpc_error* error,
4078
- CallCombinerClosureList* closures) {
4079
- // Find pending batch.
4080
- PendingBatch* pending = PendingBatchFind(
4081
- "invoking recv_trailing_metadata for",
4082
- [](grpc_transport_stream_op_batch* batch) {
4083
- return batch->recv_trailing_metadata &&
4084
- batch->payload->recv_trailing_metadata
4085
- .recv_trailing_metadata_ready != nullptr;
4086
- });
4087
- // If we generated the recv_trailing_metadata op internally via
4088
- // StartInternalRecvTrailingMetadata(), then there will be no pending batch.
4089
- if (pending == nullptr) {
4090
- GRPC_ERROR_UNREF(error);
4091
- return;
4092
- }
4093
- // Return metadata.
4094
- SubchannelCallRetryState* retry_state =
4095
- static_cast<SubchannelCallRetryState*>(
4096
- batch_data->lb_call->GetParentData());
4097
- grpc_metadata_batch_move(
4098
- &retry_state->recv_trailing_metadata,
4099
- pending->batch->payload->recv_trailing_metadata.recv_trailing_metadata);
4100
- // Add closure.
4101
- closures->Add(pending->batch->payload->recv_trailing_metadata
4102
- .recv_trailing_metadata_ready,
4103
- error, "recv_trailing_metadata_ready for pending batch");
4104
- // Update bookkeeping.
4105
- pending->batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
4106
- nullptr;
4107
- MaybeClearPendingBatch(pending);
4108
- }
4109
-
4110
- void ChannelData::RetryingCall::AddClosuresForDeferredRecvCallbacks(
4111
- SubchannelCallBatchData* batch_data, SubchannelCallRetryState* retry_state,
4112
- CallCombinerClosureList* closures) {
4113
- if (batch_data->batch.recv_trailing_metadata) {
4114
- // Add closure for deferred recv_initial_metadata_ready.
4115
- if (GPR_UNLIKELY(retry_state->recv_initial_metadata_ready_deferred_batch !=
4116
- nullptr)) {
4117
- GRPC_CLOSURE_INIT(&retry_state->recv_initial_metadata_ready,
4118
- InvokeRecvInitialMetadataCallback,
4119
- retry_state->recv_initial_metadata_ready_deferred_batch,
4120
- grpc_schedule_on_exec_ctx);
4121
- closures->Add(&retry_state->recv_initial_metadata_ready,
4122
- retry_state->recv_initial_metadata_error,
4123
- "resuming recv_initial_metadata_ready");
4124
- retry_state->recv_initial_metadata_ready_deferred_batch = nullptr;
4125
- }
4126
- // Add closure for deferred recv_message_ready.
4127
- if (GPR_UNLIKELY(retry_state->recv_message_ready_deferred_batch !=
4128
- nullptr)) {
4129
- GRPC_CLOSURE_INIT(&retry_state->recv_message_ready,
4130
- InvokeRecvMessageCallback,
4131
- retry_state->recv_message_ready_deferred_batch,
4132
- grpc_schedule_on_exec_ctx);
4133
- closures->Add(&retry_state->recv_message_ready,
4134
- retry_state->recv_message_error,
4135
- "resuming recv_message_ready");
4136
- retry_state->recv_message_ready_deferred_batch = nullptr;
4137
- }
4138
- }
4139
- }
4140
-
4141
- bool ChannelData::RetryingCall::PendingBatchIsUnstarted(
4142
- PendingBatch* pending, SubchannelCallRetryState* retry_state) {
4143
- if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
4144
- return false;
4145
- }
4146
- if (pending->batch->send_initial_metadata &&
4147
- !retry_state->started_send_initial_metadata) {
4148
- return true;
4149
- }
4150
- if (pending->batch->send_message &&
4151
- retry_state->started_send_message_count < send_messages_.size()) {
4152
- return true;
4153
- }
4154
- if (pending->batch->send_trailing_metadata &&
4155
- !retry_state->started_send_trailing_metadata) {
4156
- return true;
4157
- }
4158
- return false;
4159
- }
4160
-
4161
- void ChannelData::RetryingCall::AddClosuresToFailUnstartedPendingBatches(
4162
- SubchannelCallRetryState* retry_state, grpc_error* error,
4163
- CallCombinerClosureList* closures) {
4164
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
4165
- PendingBatch* pending = &pending_batches_[i];
4166
- if (PendingBatchIsUnstarted(pending, retry_state)) {
4167
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4168
- gpr_log(GPR_INFO,
4169
- "chand=%p retrying_call=%p: failing unstarted pending batch at "
4170
- "index "
4171
- "%" PRIuPTR,
4172
- chand_, this, i);
4173
- }
4174
- closures->Add(pending->batch->on_complete, GRPC_ERROR_REF(error),
4175
- "failing on_complete for pending batch");
4176
- pending->batch->on_complete = nullptr;
4177
- MaybeClearPendingBatch(pending);
4178
- }
4179
- }
4180
- GRPC_ERROR_UNREF(error);
4181
- }
4182
-
4183
- void ChannelData::RetryingCall::RunClosuresForCompletedCall(
4184
- SubchannelCallBatchData* batch_data, grpc_error* error) {
4185
- SubchannelCallRetryState* retry_state =
4186
- static_cast<SubchannelCallRetryState*>(
4187
- batch_data->lb_call->GetParentData());
4188
- // Construct list of closures to execute.
4189
- CallCombinerClosureList closures;
4190
- // First, add closure for recv_trailing_metadata_ready.
4191
- AddClosureForRecvTrailingMetadataReady(batch_data, GRPC_ERROR_REF(error),
4192
- &closures);
4193
- // If there are deferred recv_initial_metadata_ready or recv_message_ready
4194
- // callbacks, add them to closures.
4195
- AddClosuresForDeferredRecvCallbacks(batch_data, retry_state, &closures);
4196
- // Add closures to fail any pending batches that have not yet been started.
4197
- AddClosuresToFailUnstartedPendingBatches(retry_state, GRPC_ERROR_REF(error),
4198
- &closures);
4199
- // Don't need batch_data anymore.
4200
- batch_data->Unref();
4201
- // Schedule all of the closures identified above.
4202
- // Note: This will release the call combiner.
4203
- closures.RunClosures(call_combiner_);
4204
- GRPC_ERROR_UNREF(error);
4205
- }
4206
-
4207
- void ChannelData::RetryingCall::RecvTrailingMetadataReady(void* arg,
4208
- grpc_error* error) {
4209
- SubchannelCallBatchData* batch_data =
4210
- static_cast<SubchannelCallBatchData*>(arg);
4211
- RetryingCall* call = batch_data->call;
4212
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4213
- gpr_log(
4214
- GPR_INFO,
4215
- "chand=%p retrying_call=%p: got recv_trailing_metadata_ready, error=%s",
4216
- call->chand_, call, grpc_error_string(error));
4217
- }
4218
- SubchannelCallRetryState* retry_state =
4219
- static_cast<SubchannelCallRetryState*>(
4220
- batch_data->lb_call->GetParentData());
4221
- retry_state->completed_recv_trailing_metadata = true;
4222
- // Get the call's status and check for server pushback metadata.
4223
- grpc_status_code status = GRPC_STATUS_OK;
4224
- grpc_mdelem* server_pushback_md = nullptr;
4225
- grpc_metadata_batch* md_batch =
4226
- batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata;
4227
- call->GetCallStatus(md_batch, GRPC_ERROR_REF(error), &status,
4228
- &server_pushback_md);
4229
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4230
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: call finished, status=%s",
4231
- call->chand_, call, grpc_status_code_to_string(status));
4232
- }
4233
- // Check if we should retry.
4234
- if (call->MaybeRetry(batch_data, status, server_pushback_md)) {
4235
- // Unref batch_data for deferred recv_initial_metadata_ready or
4236
- // recv_message_ready callbacks, if any.
4237
- if (retry_state->recv_initial_metadata_ready_deferred_batch != nullptr) {
4238
- batch_data->Unref();
4239
- GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
4240
- }
4241
- if (retry_state->recv_message_ready_deferred_batch != nullptr) {
4242
- batch_data->Unref();
4243
- GRPC_ERROR_UNREF(retry_state->recv_message_error);
4244
- }
4245
- batch_data->Unref();
4246
- return;
4247
- }
4248
- // Not retrying, so commit the call.
4249
- call->RetryCommit(retry_state);
4250
- // Run any necessary closures.
4251
- call->RunClosuresForCompletedCall(batch_data, GRPC_ERROR_REF(error));
4252
- }
4253
-
4254
- //
4255
- // on_complete callback handling
4256
- //
4257
-
4258
- void ChannelData::RetryingCall::AddClosuresForCompletedPendingBatch(
4259
- SubchannelCallBatchData* batch_data, grpc_error* error,
4260
- CallCombinerClosureList* closures) {
4261
- PendingBatch* pending = PendingBatchFind(
4262
- "completed", [batch_data](grpc_transport_stream_op_batch* batch) {
4263
- // Match the pending batch with the same set of send ops as the
4264
- // subchannel batch we've just completed.
4265
- return batch->on_complete != nullptr &&
4266
- batch_data->batch.send_initial_metadata ==
4267
- batch->send_initial_metadata &&
4268
- batch_data->batch.send_message == batch->send_message &&
4269
- batch_data->batch.send_trailing_metadata ==
4270
- batch->send_trailing_metadata;
4271
- });
4272
- // If batch_data is a replay batch, then there will be no pending
4273
- // batch to complete.
4274
- if (pending == nullptr) {
4275
- GRPC_ERROR_UNREF(error);
4276
- return;
4277
- }
4278
- // Add closure.
4279
- closures->Add(pending->batch->on_complete, error,
4280
- "on_complete for pending batch");
4281
- pending->batch->on_complete = nullptr;
4282
- MaybeClearPendingBatch(pending);
4283
- }
4284
-
4285
- void ChannelData::RetryingCall::AddClosuresForReplayOrPendingSendOps(
4286
- SubchannelCallBatchData* batch_data, SubchannelCallRetryState* retry_state,
4287
- CallCombinerClosureList* closures) {
4288
- bool have_pending_send_message_ops =
4289
- retry_state->started_send_message_count < send_messages_.size();
4290
- bool have_pending_send_trailing_metadata_op =
4291
- seen_send_trailing_metadata_ &&
4292
- !retry_state->started_send_trailing_metadata;
4293
- if (!have_pending_send_message_ops &&
4294
- !have_pending_send_trailing_metadata_op) {
4295
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
4296
- PendingBatch* pending = &pending_batches_[i];
4297
- grpc_transport_stream_op_batch* batch = pending->batch;
4298
- if (batch == nullptr || pending->send_ops_cached) continue;
4299
- if (batch->send_message) have_pending_send_message_ops = true;
4300
- if (batch->send_trailing_metadata) {
4301
- have_pending_send_trailing_metadata_op = true;
4302
- }
4303
- }
4304
- }
4305
- if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
4306
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4307
- gpr_log(GPR_INFO,
4308
- "chand=%p retrying_call=%p: starting next batch for pending send "
4309
- "op(s)",
4310
- chand_, this);
4311
- }
4312
- GRPC_CLOSURE_INIT(&batch_data->batch.handler_private.closure,
4313
- StartRetriableSubchannelBatches, this,
4314
- grpc_schedule_on_exec_ctx);
4315
- closures->Add(&batch_data->batch.handler_private.closure, GRPC_ERROR_NONE,
4316
- "starting next batch for send_* op(s)");
4317
- }
4318
- }
4319
-
4320
- void ChannelData::RetryingCall::OnComplete(void* arg, grpc_error* error) {
4321
- SubchannelCallBatchData* batch_data =
4322
- static_cast<SubchannelCallBatchData*>(arg);
4323
- RetryingCall* call = batch_data->call;
4324
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4325
- gpr_log(GPR_INFO,
4326
- "chand=%p retrying_call=%p: got on_complete, error=%s, batch=%s",
4327
- call->chand_, call, grpc_error_string(error),
4328
- grpc_transport_stream_op_batch_string(&batch_data->batch).c_str());
4329
- }
4330
- SubchannelCallRetryState* retry_state =
4331
- static_cast<SubchannelCallRetryState*>(
4332
- batch_data->lb_call->GetParentData());
4333
- // Update bookkeeping in retry_state.
4334
- if (batch_data->batch.send_initial_metadata) {
4335
- retry_state->completed_send_initial_metadata = true;
4336
- }
4337
- if (batch_data->batch.send_message) {
4338
- ++retry_state->completed_send_message_count;
4339
- }
4340
- if (batch_data->batch.send_trailing_metadata) {
4341
- retry_state->completed_send_trailing_metadata = true;
4342
- }
4343
- // If the call is committed, free cached data for send ops that we've just
4344
- // completed.
4345
- if (call->retry_committed_) {
4346
- call->FreeCachedSendOpDataForCompletedBatch(batch_data, retry_state);
4347
- }
4348
- // Construct list of closures to execute.
4349
- CallCombinerClosureList closures;
4350
- // If a retry was already dispatched, that means we saw
4351
- // recv_trailing_metadata before this, so we do nothing here.
4352
- // Otherwise, invoke the callback to return the result to the surface.
4353
- if (!retry_state->retry_dispatched) {
4354
- // Add closure for the completed pending batch, if any.
4355
- call->AddClosuresForCompletedPendingBatch(batch_data, GRPC_ERROR_REF(error),
4356
- &closures);
4357
- // If needed, add a callback to start any replay or pending send ops on
4358
- // the subchannel call.
4359
- if (!retry_state->completed_recv_trailing_metadata) {
4360
- call->AddClosuresForReplayOrPendingSendOps(batch_data, retry_state,
4361
- &closures);
4362
- }
4363
- }
4364
- // Track number of pending subchannel send batches and determine if this
4365
- // was the last one.
4366
- --call->num_pending_retriable_subchannel_send_batches_;
4367
- const bool last_send_batch_complete =
4368
- call->num_pending_retriable_subchannel_send_batches_ == 0;
4369
- // Don't need batch_data anymore.
4370
- batch_data->Unref();
4371
- // Schedule all of the closures identified above.
4372
- // Note: This yeilds the call combiner.
4373
- closures.RunClosures(call->call_combiner_);
4374
- // If this was the last subchannel send batch, unref the call stack.
4375
- if (last_send_batch_complete) {
4376
- GRPC_CALL_STACK_UNREF(call->owning_call_, "subchannel_send_batches");
4377
- }
4378
- }
4379
-
4380
- //
4381
- // subchannel batch construction
4382
- //
4383
-
4384
- void ChannelData::RetryingCall::StartBatchInCallCombiner(
4385
- void* arg, grpc_error* /*ignored*/) {
4386
- grpc_transport_stream_op_batch* batch =
4387
- static_cast<grpc_transport_stream_op_batch*>(arg);
4388
- auto* lb_call = static_cast<ChannelData::LoadBalancedCall*>(
4389
- batch->handler_private.extra_arg);
4390
- // Note: This will release the call combiner.
4391
- lb_call->StartTransportStreamOpBatch(batch);
4392
- }
4393
-
4394
- void ChannelData::RetryingCall::AddClosureForSubchannelBatch(
4395
- grpc_transport_stream_op_batch* batch, CallCombinerClosureList* closures) {
4396
- batch->handler_private.extra_arg = lb_call_.get();
4397
- GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
4398
- batch, grpc_schedule_on_exec_ctx);
4399
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4400
- gpr_log(GPR_INFO,
4401
- "chand=%p retrying_call=%p: starting subchannel batch: %s", chand_,
4402
- this, grpc_transport_stream_op_batch_string(batch).c_str());
4403
- }
4404
- closures->Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
4405
- "start_subchannel_batch");
4406
- }
4407
-
4408
- void ChannelData::RetryingCall::AddRetriableSendInitialMetadataOp(
4409
- SubchannelCallRetryState* retry_state,
4410
- SubchannelCallBatchData* batch_data) {
4411
- // Maps the number of retries to the corresponding metadata value slice.
4412
- const grpc_slice* retry_count_strings[] = {&GRPC_MDSTR_1, &GRPC_MDSTR_2,
4413
- &GRPC_MDSTR_3, &GRPC_MDSTR_4};
4414
- // We need to make a copy of the metadata batch for each attempt, since
4415
- // the filters in the subchannel stack may modify this batch, and we don't
4416
- // want those modifications to be passed forward to subsequent attempts.
4417
- //
4418
- // If we've already completed one or more attempts, add the
4419
- // grpc-retry-attempts header.
4420
- retry_state->send_initial_metadata_storage =
4421
- static_cast<grpc_linked_mdelem*>(arena_->Alloc(
4422
- sizeof(grpc_linked_mdelem) *
4423
- (send_initial_metadata_.list.count + (num_attempts_completed_ > 0))));
4424
- grpc_metadata_batch_copy(&send_initial_metadata_,
4425
- &retry_state->send_initial_metadata,
4426
- retry_state->send_initial_metadata_storage);
4427
- if (GPR_UNLIKELY(retry_state->send_initial_metadata.idx.named
4428
- .grpc_previous_rpc_attempts != nullptr)) {
4429
- grpc_metadata_batch_remove(&retry_state->send_initial_metadata,
4430
- GRPC_BATCH_GRPC_PREVIOUS_RPC_ATTEMPTS);
4431
- }
4432
- if (GPR_UNLIKELY(num_attempts_completed_ > 0)) {
4433
- grpc_mdelem retry_md = grpc_mdelem_create(
4434
- GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
4435
- *retry_count_strings[num_attempts_completed_ - 1], nullptr);
4436
- grpc_error* error = grpc_metadata_batch_add_tail(
4437
- &retry_state->send_initial_metadata,
4438
- &retry_state
4439
- ->send_initial_metadata_storage[send_initial_metadata_.list.count],
4440
- retry_md, GRPC_BATCH_GRPC_PREVIOUS_RPC_ATTEMPTS);
4441
- if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
4442
- gpr_log(GPR_ERROR, "error adding retry metadata: %s",
4443
- grpc_error_string(error));
4444
- GPR_ASSERT(false);
4445
- }
4446
- }
4447
- retry_state->started_send_initial_metadata = true;
4448
- batch_data->batch.send_initial_metadata = true;
4449
- batch_data->batch.payload->send_initial_metadata.send_initial_metadata =
4450
- &retry_state->send_initial_metadata;
4451
- batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags =
4452
- send_initial_metadata_flags_;
4453
- batch_data->batch.payload->send_initial_metadata.peer_string = peer_string_;
4454
- }
4455
-
4456
- void ChannelData::RetryingCall::AddRetriableSendMessageOp(
4457
- SubchannelCallRetryState* retry_state,
4458
- SubchannelCallBatchData* batch_data) {
4459
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4460
- gpr_log(GPR_INFO,
4461
- "chand=%p retrying_call=%p: starting calld->send_messages[%" PRIuPTR
4462
- "]",
4463
- chand_, this, retry_state->started_send_message_count);
4464
- }
4465
- ByteStreamCache* cache =
4466
- send_messages_[retry_state->started_send_message_count];
4467
- ++retry_state->started_send_message_count;
4468
- retry_state->send_message.Init(cache);
4469
- batch_data->batch.send_message = true;
4470
- batch_data->batch.payload->send_message.send_message.reset(
4471
- retry_state->send_message.get());
4472
- }
4473
-
4474
- void ChannelData::RetryingCall::AddRetriableSendTrailingMetadataOp(
4475
- SubchannelCallRetryState* retry_state,
4476
- SubchannelCallBatchData* batch_data) {
4477
- // We need to make a copy of the metadata batch for each attempt, since
4478
- // the filters in the subchannel stack may modify this batch, and we don't
4479
- // want those modifications to be passed forward to subsequent attempts.
4480
- retry_state->send_trailing_metadata_storage =
4481
- static_cast<grpc_linked_mdelem*>(arena_->Alloc(
4482
- sizeof(grpc_linked_mdelem) * send_trailing_metadata_.list.count));
4483
- grpc_metadata_batch_copy(&send_trailing_metadata_,
4484
- &retry_state->send_trailing_metadata,
4485
- retry_state->send_trailing_metadata_storage);
4486
- retry_state->started_send_trailing_metadata = true;
4487
- batch_data->batch.send_trailing_metadata = true;
4488
- batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata =
4489
- &retry_state->send_trailing_metadata;
4490
- }
4491
-
4492
- void ChannelData::RetryingCall::AddRetriableRecvInitialMetadataOp(
4493
- SubchannelCallRetryState* retry_state,
4494
- SubchannelCallBatchData* batch_data) {
4495
- retry_state->started_recv_initial_metadata = true;
4496
- batch_data->batch.recv_initial_metadata = true;
4497
- grpc_metadata_batch_init(&retry_state->recv_initial_metadata);
4498
- batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata =
4499
- &retry_state->recv_initial_metadata;
4500
- batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available =
4501
- &retry_state->trailing_metadata_available;
4502
- GRPC_CLOSURE_INIT(&retry_state->recv_initial_metadata_ready,
4503
- RecvInitialMetadataReady, batch_data,
4504
- grpc_schedule_on_exec_ctx);
4505
- batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready =
4506
- &retry_state->recv_initial_metadata_ready;
4507
- }
4508
-
4509
- void ChannelData::RetryingCall::AddRetriableRecvMessageOp(
4510
- SubchannelCallRetryState* retry_state,
4511
- SubchannelCallBatchData* batch_data) {
4512
- ++retry_state->started_recv_message_count;
4513
- batch_data->batch.recv_message = true;
4514
- batch_data->batch.payload->recv_message.recv_message =
4515
- &retry_state->recv_message;
4516
- GRPC_CLOSURE_INIT(&retry_state->recv_message_ready, RecvMessageReady,
4517
- batch_data, grpc_schedule_on_exec_ctx);
4518
- batch_data->batch.payload->recv_message.recv_message_ready =
4519
- &retry_state->recv_message_ready;
4520
- }
4521
-
4522
- void ChannelData::RetryingCall::AddRetriableRecvTrailingMetadataOp(
4523
- SubchannelCallRetryState* retry_state,
4524
- SubchannelCallBatchData* batch_data) {
4525
- retry_state->started_recv_trailing_metadata = true;
4526
- batch_data->batch.recv_trailing_metadata = true;
4527
- grpc_metadata_batch_init(&retry_state->recv_trailing_metadata);
4528
- batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata =
4529
- &retry_state->recv_trailing_metadata;
4530
- batch_data->batch.payload->recv_trailing_metadata.collect_stats =
4531
- &retry_state->collect_stats;
4532
- GRPC_CLOSURE_INIT(&retry_state->recv_trailing_metadata_ready,
4533
- RecvTrailingMetadataReady, batch_data,
4534
- grpc_schedule_on_exec_ctx);
4535
- batch_data->batch.payload->recv_trailing_metadata
4536
- .recv_trailing_metadata_ready =
4537
- &retry_state->recv_trailing_metadata_ready;
4538
- }
4539
-
4540
- void ChannelData::RetryingCall::StartInternalRecvTrailingMetadata() {
4541
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4542
- gpr_log(
4543
- GPR_INFO,
4544
- "chand=%p retrying_call=%p: call failed but recv_trailing_metadata not "
4545
- "started; starting it internally",
4546
- chand_, this);
4547
- }
4548
- SubchannelCallRetryState* retry_state =
4549
- static_cast<SubchannelCallRetryState*>(lb_call_->GetParentData());
4550
- // Create batch_data with 2 refs, since this batch will be unreffed twice:
4551
- // once for the recv_trailing_metadata_ready callback when the subchannel
4552
- // batch returns, and again when we actually get a recv_trailing_metadata
4553
- // op from the surface.
4554
- SubchannelCallBatchData* batch_data =
4555
- SubchannelCallBatchData::Create(this, 2, false /* set_on_complete */);
4556
- AddRetriableRecvTrailingMetadataOp(retry_state, batch_data);
4557
- retry_state->recv_trailing_metadata_internal_batch = batch_data;
4558
- // Note: This will release the call combiner.
4559
- lb_call_->StartTransportStreamOpBatch(&batch_data->batch);
4560
- }
4561
-
4562
- // If there are any cached send ops that need to be replayed on the
4563
- // current subchannel call, creates and returns a new subchannel batch
4564
- // to replay those ops. Otherwise, returns nullptr.
4565
- ChannelData::RetryingCall::SubchannelCallBatchData*
4566
- ChannelData::RetryingCall::MaybeCreateSubchannelBatchForReplay(
4567
- SubchannelCallRetryState* retry_state) {
4568
- SubchannelCallBatchData* replay_batch_data = nullptr;
4569
- // send_initial_metadata.
4570
- if (seen_send_initial_metadata_ &&
4571
- !retry_state->started_send_initial_metadata &&
4572
- !pending_send_initial_metadata_) {
4573
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4574
- gpr_log(GPR_INFO,
4575
- "chand=%p retrying_call=%p: replaying previously completed "
4576
- "send_initial_metadata op",
4577
- chand_, this);
4578
- }
4579
- replay_batch_data =
4580
- SubchannelCallBatchData::Create(this, 1, true /* set_on_complete */);
4581
- AddRetriableSendInitialMetadataOp(retry_state, replay_batch_data);
4582
- }
4583
- // send_message.
4584
- // Note that we can only have one send_message op in flight at a time.
4585
- if (retry_state->started_send_message_count < send_messages_.size() &&
4586
- retry_state->started_send_message_count ==
4587
- retry_state->completed_send_message_count &&
4588
- !pending_send_message_) {
4589
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4590
- gpr_log(GPR_INFO,
4591
- "chand=%p retrying_call=%p: replaying previously completed "
4592
- "send_message op",
4593
- chand_, this);
4594
- }
4595
- if (replay_batch_data == nullptr) {
4596
- replay_batch_data =
4597
- SubchannelCallBatchData::Create(this, 1, true /* set_on_complete */);
4598
- }
4599
- AddRetriableSendMessageOp(retry_state, replay_batch_data);
4600
- }
4601
- // send_trailing_metadata.
4602
- // Note that we only add this op if we have no more send_message ops
4603
- // to start, since we can't send down any more send_message ops after
4604
- // send_trailing_metadata.
4605
- if (seen_send_trailing_metadata_ &&
4606
- retry_state->started_send_message_count == send_messages_.size() &&
4607
- !retry_state->started_send_trailing_metadata &&
4608
- !pending_send_trailing_metadata_) {
4609
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4610
- gpr_log(GPR_INFO,
4611
- "chand=%p retrying_call=%p: replaying previously completed "
4612
- "send_trailing_metadata op",
4613
- chand_, this);
4614
- }
4615
- if (replay_batch_data == nullptr) {
4616
- replay_batch_data =
4617
- SubchannelCallBatchData::Create(this, 1, true /* set_on_complete */);
4618
- }
4619
- AddRetriableSendTrailingMetadataOp(retry_state, replay_batch_data);
4620
- }
4621
- return replay_batch_data;
4622
- }
4623
-
4624
- void ChannelData::RetryingCall::AddSubchannelBatchesForPendingBatches(
4625
- SubchannelCallRetryState* retry_state, CallCombinerClosureList* closures) {
4626
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
4627
- PendingBatch* pending = &pending_batches_[i];
4628
- grpc_transport_stream_op_batch* batch = pending->batch;
4629
- if (batch == nullptr) continue;
4630
- // Skip any batch that either (a) has already been started on this
4631
- // subchannel call or (b) we can't start yet because we're still
4632
- // replaying send ops that need to be completed first.
4633
- // TODO(roth): Note that if any one op in the batch can't be sent
4634
- // yet due to ops that we're replaying, we don't start any of the ops
4635
- // in the batch. This is probably okay, but it could conceivably
4636
- // lead to increased latency in some cases -- e.g., we could delay
4637
- // starting a recv op due to it being in the same batch with a send
4638
- // op. If/when we revamp the callback protocol in
4639
- // transport_stream_op_batch, we may be able to fix this.
4640
- if (batch->send_initial_metadata &&
4641
- retry_state->started_send_initial_metadata) {
4642
- continue;
4643
- }
4644
- if (batch->send_message && retry_state->completed_send_message_count <
4645
- retry_state->started_send_message_count) {
4646
- continue;
4647
- }
4648
- // Note that we only start send_trailing_metadata if we have no more
4649
- // send_message ops to start, since we can't send down any more
4650
- // send_message ops after send_trailing_metadata.
4651
- if (batch->send_trailing_metadata &&
4652
- (retry_state->started_send_message_count + batch->send_message <
4653
- send_messages_.size() ||
4654
- retry_state->started_send_trailing_metadata)) {
4655
- continue;
4656
- }
4657
- if (batch->recv_initial_metadata &&
4658
- retry_state->started_recv_initial_metadata) {
4659
- continue;
4660
- }
4661
- if (batch->recv_message && retry_state->completed_recv_message_count <
4662
- retry_state->started_recv_message_count) {
4663
- continue;
4664
- }
4665
- if (batch->recv_trailing_metadata &&
4666
- retry_state->started_recv_trailing_metadata) {
4667
- // If we previously completed a recv_trailing_metadata op
4668
- // initiated by StartInternalRecvTrailingMetadata(), use the
4669
- // result of that instead of trying to re-start this op.
4670
- if (GPR_UNLIKELY((retry_state->recv_trailing_metadata_internal_batch !=
4671
- nullptr))) {
4672
- // If the batch completed, then trigger the completion callback
4673
- // directly, so that we return the previously returned results to
4674
- // the application. Otherwise, just unref the internally
4675
- // started subchannel batch, since we'll propagate the
4676
- // completion when it completes.
4677
- if (retry_state->completed_recv_trailing_metadata) {
4678
- // Batches containing recv_trailing_metadata always succeed.
4679
- closures->Add(
4680
- &retry_state->recv_trailing_metadata_ready, GRPC_ERROR_NONE,
4681
- "re-executing recv_trailing_metadata_ready to propagate "
4682
- "internally triggered result");
4683
- } else {
4684
- retry_state->recv_trailing_metadata_internal_batch->Unref();
4685
- }
4686
- retry_state->recv_trailing_metadata_internal_batch = nullptr;
4687
- }
4688
- continue;
4689
- }
4690
- // If we're not retrying, just send the batch as-is.
4691
- // TODO(roth): This condition doesn't seem exactly right -- maybe need a
4692
- // notion of "draining" once we've committed and are done replaying?
4693
- if (retry_policy_ == nullptr || retry_committed_) {
4694
- AddClosureForSubchannelBatch(batch, closures);
4695
- PendingBatchClear(pending);
4696
- continue;
4697
- }
4698
- // Create batch with the right number of callbacks.
4699
- const bool has_send_ops = batch->send_initial_metadata ||
4700
- batch->send_message ||
4701
- batch->send_trailing_metadata;
4702
- const int num_callbacks = has_send_ops + batch->recv_initial_metadata +
4703
- batch->recv_message +
4704
- batch->recv_trailing_metadata;
4705
- SubchannelCallBatchData* batch_data = SubchannelCallBatchData::Create(
4706
- this, num_callbacks, has_send_ops /* set_on_complete */);
4707
- // Cache send ops if needed.
4708
- MaybeCacheSendOpsForBatch(pending);
4709
- // send_initial_metadata.
4710
- if (batch->send_initial_metadata) {
4711
- AddRetriableSendInitialMetadataOp(retry_state, batch_data);
4712
- }
4713
- // send_message.
4714
- if (batch->send_message) {
4715
- AddRetriableSendMessageOp(retry_state, batch_data);
4716
- }
4717
- // send_trailing_metadata.
4718
- if (batch->send_trailing_metadata) {
4719
- AddRetriableSendTrailingMetadataOp(retry_state, batch_data);
4720
- }
4721
- // recv_initial_metadata.
4722
- if (batch->recv_initial_metadata) {
4723
- // recv_flags is only used on the server side.
4724
- GPR_ASSERT(batch->payload->recv_initial_metadata.recv_flags == nullptr);
4725
- AddRetriableRecvInitialMetadataOp(retry_state, batch_data);
4726
- }
4727
- // recv_message.
4728
- if (batch->recv_message) {
4729
- AddRetriableRecvMessageOp(retry_state, batch_data);
4730
- }
4731
- // recv_trailing_metadata.
4732
- if (batch->recv_trailing_metadata) {
4733
- AddRetriableRecvTrailingMetadataOp(retry_state, batch_data);
4734
- }
4735
- AddClosureForSubchannelBatch(&batch_data->batch, closures);
4736
- // Track number of pending subchannel send batches.
4737
- // If this is the first one, take a ref to the call stack.
4738
- if (batch->send_initial_metadata || batch->send_message ||
4739
- batch->send_trailing_metadata) {
4740
- if (num_pending_retriable_subchannel_send_batches_ == 0) {
4741
- GRPC_CALL_STACK_REF(owning_call_, "subchannel_send_batches");
4742
- }
4743
- ++num_pending_retriable_subchannel_send_batches_;
4744
- }
4745
- }
4746
- }
4747
-
4748
- void ChannelData::RetryingCall::StartRetriableSubchannelBatches(
4749
- void* arg, grpc_error* /*ignored*/) {
4750
- RetryingCall* call = static_cast<RetryingCall*>(arg);
4751
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4752
- gpr_log(GPR_INFO,
4753
- "chand=%p retrying_call=%p: constructing retriable batches",
4754
- call->chand_, call);
4755
- }
4756
- SubchannelCallRetryState* retry_state =
4757
- static_cast<SubchannelCallRetryState*>(call->lb_call_->GetParentData());
4758
- // Construct list of closures to execute, one for each pending batch.
4759
- CallCombinerClosureList closures;
4760
- // Replay previously-returned send_* ops if needed.
4761
- SubchannelCallBatchData* replay_batch_data =
4762
- call->MaybeCreateSubchannelBatchForReplay(retry_state);
4763
- if (replay_batch_data != nullptr) {
4764
- call->AddClosureForSubchannelBatch(&replay_batch_data->batch, &closures);
4765
- // Track number of pending subchannel send batches.
4766
- // If this is the first one, take a ref to the call stack.
4767
- if (call->num_pending_retriable_subchannel_send_batches_ == 0) {
4768
- GRPC_CALL_STACK_REF(call->owning_call_, "subchannel_send_batches");
4769
- }
4770
- ++call->num_pending_retriable_subchannel_send_batches_;
4771
- }
4772
- // Now add pending batches.
4773
- call->AddSubchannelBatchesForPendingBatches(retry_state, &closures);
4774
- // Start batches on subchannel call.
4775
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
4776
- gpr_log(GPR_INFO,
4777
- "chand=%p retrying_call=%p: starting %" PRIuPTR
4778
- " retriable batches on lb_call=%p",
4779
- call->chand_, call, closures.size(), call->lb_call_.get());
4780
- }
4781
- // Note: This will yield the call combiner.
4782
- closures.RunClosures(call->call_combiner_);
4783
- }
4784
-
4785
- void ChannelData::RetryingCall::CreateLbCall(void* arg, grpc_error* /*error*/) {
4786
- auto* call = static_cast<RetryingCall*>(arg);
4787
- const size_t parent_data_size =
4788
- call->enable_retries_ ? sizeof(SubchannelCallRetryState) : 0;
4789
- grpc_call_element_args args = {call->owning_call_, nullptr,
4790
- call->call_context_, call->path_,
4791
- call->call_start_time_, call->deadline_,
4792
- call->arena_, call->call_combiner_};
4793
- call->lb_call_ = ChannelData::LoadBalancedCall::Create(
4794
- call->chand_, args, call->pollent_, parent_data_size);
4795
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
4796
- gpr_log(GPR_INFO, "chand=%p retrying_call=%p: create lb_call=%p",
4797
- call->chand_, call, call->lb_call_.get());
4798
- }
4799
- if (parent_data_size > 0) {
4800
- new (call->lb_call_->GetParentData())
4801
- SubchannelCallRetryState(call->call_context_);
4802
- }
4803
- call->PendingBatchesResume();
4804
- }
4805
-
4806
- //
4807
- // ChannelData::LoadBalancedCall::Metadata
4808
- //
4809
-
4810
- class ChannelData::LoadBalancedCall::Metadata
4811
- : public LoadBalancingPolicy::MetadataInterface {
4812
- public:
4813
- Metadata(LoadBalancedCall* lb_call, grpc_metadata_batch* batch)
4814
- : lb_call_(lb_call), batch_(batch) {}
4815
-
4816
- void Add(absl::string_view key, absl::string_view value) override {
4817
- grpc_linked_mdelem* linked_mdelem = static_cast<grpc_linked_mdelem*>(
4818
- lb_call_->arena_->Alloc(sizeof(grpc_linked_mdelem)));
4819
- linked_mdelem->md = grpc_mdelem_from_slices(
4820
- ExternallyManagedSlice(key.data(), key.size()),
4821
- ExternallyManagedSlice(value.data(), value.size()));
4822
- GPR_ASSERT(grpc_metadata_batch_link_tail(batch_, linked_mdelem) ==
4823
- GRPC_ERROR_NONE);
4824
- }
4825
-
4826
- iterator begin() const override {
4827
- static_assert(sizeof(grpc_linked_mdelem*) <= sizeof(intptr_t),
4828
- "iterator size too large");
4829
- return iterator(
4830
- this, reinterpret_cast<intptr_t>(MaybeSkipEntry(batch_->list.head)));
4831
- }
4832
- iterator end() const override {
4833
- static_assert(sizeof(grpc_linked_mdelem*) <= sizeof(intptr_t),
4834
- "iterator size too large");
4835
- return iterator(this, 0);
4836
- }
4837
-
4838
- iterator erase(iterator it) override {
4839
- grpc_linked_mdelem* linked_mdelem =
4840
- reinterpret_cast<grpc_linked_mdelem*>(GetIteratorHandle(it));
4841
- intptr_t handle = reinterpret_cast<intptr_t>(linked_mdelem->next);
4842
- grpc_metadata_batch_remove(batch_, linked_mdelem);
4843
- return iterator(this, handle);
4844
- }
4845
-
4846
- private:
4847
- grpc_linked_mdelem* MaybeSkipEntry(grpc_linked_mdelem* entry) const {
4848
- if (entry != nullptr && batch_->idx.named.path == entry) {
4849
- return entry->next;
4850
- }
4851
- return entry;
4852
- }
4853
-
4854
- intptr_t IteratorHandleNext(intptr_t handle) const override {
4855
- grpc_linked_mdelem* linked_mdelem =
4856
- reinterpret_cast<grpc_linked_mdelem*>(handle);
4857
- return reinterpret_cast<intptr_t>(MaybeSkipEntry(linked_mdelem->next));
4858
- }
4859
-
4860
- std::pair<absl::string_view, absl::string_view> IteratorHandleGet(
4861
- intptr_t handle) const override {
4862
- grpc_linked_mdelem* linked_mdelem =
4863
- reinterpret_cast<grpc_linked_mdelem*>(handle);
4864
- return std::make_pair(StringViewFromSlice(GRPC_MDKEY(linked_mdelem->md)),
4865
- StringViewFromSlice(GRPC_MDVALUE(linked_mdelem->md)));
4866
- }
4867
-
4868
- LoadBalancedCall* lb_call_;
4869
- grpc_metadata_batch* batch_;
4870
- };
4871
-
4872
- //
4873
- // ChannelData::LoadBalancedCall::LbCallState
4874
- //
4875
-
4876
- class ChannelData::LoadBalancedCall::LbCallState
4877
- : public LoadBalancingPolicy::CallState {
4878
- public:
4879
- explicit LbCallState(LoadBalancedCall* lb_call) : lb_call_(lb_call) {}
4880
-
4881
- void* Alloc(size_t size) override { return lb_call_->arena_->Alloc(size); }
4882
-
4883
- const LoadBalancingPolicy::BackendMetricData* GetBackendMetricData()
4884
- override {
4885
- if (lb_call_->backend_metric_data_ == nullptr) {
4886
- grpc_linked_mdelem* md = lb_call_->recv_trailing_metadata_->idx.named
4887
- .x_endpoint_load_metrics_bin;
4888
- if (md != nullptr) {
4889
- lb_call_->backend_metric_data_ =
4890
- ParseBackendMetricData(GRPC_MDVALUE(md->md), lb_call_->arena_);
4891
- }
4892
- }
4893
- return lb_call_->backend_metric_data_;
4894
- }
4895
-
4896
- absl::string_view ExperimentalGetCallAttribute(const char* key) override {
4897
- auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
4898
- lb_call_->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
4899
- auto& call_attributes = service_config_call_data->call_attributes();
4900
- auto it = call_attributes.find(key);
4901
- if (it == call_attributes.end()) return absl::string_view();
4902
- return it->second;
4903
- }
4904
-
4905
- private:
4906
- LoadBalancedCall* lb_call_;
4907
- };
4908
-
4909
- //
4910
- // LoadBalancedCall
4911
- //
4912
-
4913
- RefCountedPtr<ChannelData::LoadBalancedCall>
4914
- ChannelData::LoadBalancedCall::Create(ChannelData* chand,
4915
- const grpc_call_element_args& args,
4916
- grpc_polling_entity* pollent,
4917
- size_t parent_data_size) {
4918
- const size_t alloc_size =
4919
- parent_data_size > 0
4920
- ? (GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(LoadBalancedCall)) +
4921
- parent_data_size)
4922
- : sizeof(LoadBalancedCall);
4923
- auto* lb_call = static_cast<LoadBalancedCall*>(args.arena->Alloc(alloc_size));
4924
- new (lb_call) LoadBalancedCall(chand, args, pollent);
4925
- return lb_call;
4926
- }
4927
-
4928
- ChannelData::LoadBalancedCall::LoadBalancedCall(
4929
- ChannelData* chand, const grpc_call_element_args& args,
4930
- grpc_polling_entity* pollent)
4931
- : refs_(1, GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)
4932
- ? "LoadBalancedCall"
4933
- : nullptr),
4934
- chand_(chand),
4935
- path_(grpc_slice_ref_internal(args.path)),
4936
- call_start_time_(args.start_time),
4937
- deadline_(args.deadline),
4938
- arena_(args.arena),
4939
- owning_call_(args.call_stack),
4940
- call_combiner_(args.call_combiner),
4941
- call_context_(args.context),
4942
- pollent_(pollent) {}
4943
-
4944
- ChannelData::LoadBalancedCall::~LoadBalancedCall() {
4945
- grpc_slice_unref_internal(path_);
4946
- GRPC_ERROR_UNREF(cancel_error_);
4947
- if (backend_metric_data_ != nullptr) {
4948
- backend_metric_data_
4949
- ->LoadBalancingPolicy::BackendMetricData::~BackendMetricData();
4950
- }
4951
- // Make sure there are no remaining pending batches.
4952
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
4953
- GPR_ASSERT(pending_batches_[i] == nullptr);
4954
- }
4955
- }
4956
-
4957
- RefCountedPtr<ChannelData::LoadBalancedCall>
4958
- ChannelData::LoadBalancedCall::Ref() {
4959
- IncrementRefCount();
4960
- return RefCountedPtr<LoadBalancedCall>(this);
4961
- }
4962
-
4963
- RefCountedPtr<ChannelData::LoadBalancedCall> ChannelData::LoadBalancedCall::Ref(
4964
- const DebugLocation& location, const char* reason) {
4965
- IncrementRefCount(location, reason);
4966
- return RefCountedPtr<LoadBalancedCall>(this);
4967
- }
4968
-
4969
- void ChannelData::LoadBalancedCall::Unref() {
4970
- if (GPR_UNLIKELY(refs_.Unref())) {
4971
- this->~LoadBalancedCall();
4972
- }
4973
- }
4974
-
4975
- void ChannelData::LoadBalancedCall::Unref(const DebugLocation& location,
4976
- const char* reason) {
4977
- if (GPR_UNLIKELY(refs_.Unref(location, reason))) {
4978
- this->~LoadBalancedCall();
4979
- }
4980
- }
4981
-
4982
- void ChannelData::LoadBalancedCall::IncrementRefCount() { refs_.Ref(); }
4983
-
4984
- void ChannelData::LoadBalancedCall::IncrementRefCount(
4985
- const DebugLocation& location, const char* reason) {
4986
- refs_.Ref(location, reason);
4987
- }
4988
-
4989
- void* ChannelData::LoadBalancedCall::GetParentData() {
4990
- return reinterpret_cast<char*>(this) +
4991
- GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(LoadBalancedCall));
4992
- }
4993
-
4994
- size_t ChannelData::LoadBalancedCall::GetBatchIndex(
4995
- grpc_transport_stream_op_batch* batch) {
4996
- // Note: It is important the send_initial_metadata be the first entry
4997
- // here, since the code in pick_subchannel_locked() assumes it will be.
4998
- if (batch->send_initial_metadata) return 0;
4999
- if (batch->send_message) return 1;
5000
- if (batch->send_trailing_metadata) return 2;
5001
- if (batch->recv_initial_metadata) return 3;
5002
- if (batch->recv_message) return 4;
5003
- if (batch->recv_trailing_metadata) return 5;
5004
- GPR_UNREACHABLE_CODE(return (size_t)-1);
5005
- }
5006
-
5007
- // This is called via the call combiner, so access to calld is synchronized.
5008
- void ChannelData::LoadBalancedCall::PendingBatchesAdd(
5009
- grpc_transport_stream_op_batch* batch) {
5010
- const size_t idx = GetBatchIndex(batch);
5011
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
5012
- gpr_log(GPR_INFO,
5013
- "chand=%p lb_call=%p: adding pending batch at index %" PRIuPTR,
5014
- chand_, this, idx);
5015
- }
5016
- GPR_ASSERT(pending_batches_[idx] == nullptr);
5017
- pending_batches_[idx] = batch;
5018
- }
5019
-
5020
- // This is called via the call combiner, so access to calld is synchronized.
5021
- void ChannelData::LoadBalancedCall::FailPendingBatchInCallCombiner(
5022
- void* arg, grpc_error* error) {
5023
- grpc_transport_stream_op_batch* batch =
5024
- static_cast<grpc_transport_stream_op_batch*>(arg);
5025
- auto* self = static_cast<LoadBalancedCall*>(batch->handler_private.extra_arg);
5026
- // Note: This will release the call combiner.
5027
- grpc_transport_stream_op_batch_finish_with_failure(
5028
- batch, GRPC_ERROR_REF(error), self->call_combiner_);
5029
- }
5030
-
5031
- // This is called via the call combiner, so access to calld is synchronized.
5032
- void ChannelData::LoadBalancedCall::PendingBatchesFail(
5033
- grpc_error* error,
5034
- YieldCallCombinerPredicate yield_call_combiner_predicate) {
5035
- GPR_ASSERT(error != GRPC_ERROR_NONE);
5036
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
5037
- size_t num_batches = 0;
5038
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
5039
- if (pending_batches_[i] != nullptr) ++num_batches;
5040
- }
5041
- gpr_log(GPR_INFO,
5042
- "chand=%p lb_call=%p: failing %" PRIuPTR " pending batches: %s",
5043
- chand_, this, num_batches, grpc_error_string(error));
5044
- }
5045
- CallCombinerClosureList closures;
5046
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
5047
- grpc_transport_stream_op_batch*& batch = pending_batches_[i];
5048
- if (batch != nullptr) {
5049
- batch->handler_private.extra_arg = this;
5050
- GRPC_CLOSURE_INIT(&batch->handler_private.closure,
5051
- FailPendingBatchInCallCombiner, batch,
5052
- grpc_schedule_on_exec_ctx);
5053
- closures.Add(&batch->handler_private.closure, GRPC_ERROR_REF(error),
5054
- "PendingBatchesFail");
5055
- batch = nullptr;
5056
- }
5057
- }
5058
- if (yield_call_combiner_predicate(closures)) {
5059
- closures.RunClosures(call_combiner_);
5060
- } else {
5061
- closures.RunClosuresWithoutYielding(call_combiner_);
5062
- }
5063
- GRPC_ERROR_UNREF(error);
5064
- }
5065
-
5066
- // This is called via the call combiner, so access to calld is synchronized.
5067
- void ChannelData::LoadBalancedCall::ResumePendingBatchInCallCombiner(
5068
- void* arg, grpc_error* /*ignored*/) {
5069
- grpc_transport_stream_op_batch* batch =
5070
- static_cast<grpc_transport_stream_op_batch*>(arg);
5071
- SubchannelCall* subchannel_call =
5072
- static_cast<SubchannelCall*>(batch->handler_private.extra_arg);
5073
- // Note: This will release the call combiner.
5074
- subchannel_call->StartTransportStreamOpBatch(batch);
5075
- }
5076
-
5077
- // This is called via the call combiner, so access to calld is synchronized.
5078
- void ChannelData::LoadBalancedCall::PendingBatchesResume() {
5079
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
5080
- size_t num_batches = 0;
5081
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
5082
- if (pending_batches_[i] != nullptr) ++num_batches;
5083
- }
5084
- gpr_log(GPR_INFO,
5085
- "chand=%p lb_call=%p: starting %" PRIuPTR
5086
- " pending batches on subchannel_call=%p",
5087
- chand_, this, num_batches, subchannel_call_.get());
5088
- }
5089
- CallCombinerClosureList closures;
5090
- for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
5091
- grpc_transport_stream_op_batch*& batch = pending_batches_[i];
5092
- if (batch != nullptr) {
5093
- batch->handler_private.extra_arg = subchannel_call_.get();
5094
- GRPC_CLOSURE_INIT(&batch->handler_private.closure,
5095
- ResumePendingBatchInCallCombiner, batch,
5096
- grpc_schedule_on_exec_ctx);
5097
- closures.Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
5098
- "PendingBatchesResume");
5099
- batch = nullptr;
5100
- }
5101
- }
5102
- // Note: This will release the call combiner.
5103
- closures.RunClosures(call_combiner_);
5104
- }
5105
-
5106
- void ChannelData::LoadBalancedCall::StartTransportStreamOpBatch(
5107
- grpc_transport_stream_op_batch* batch) {
5108
- // Intercept recv_trailing_metadata_ready for LB callback.
5109
- if (batch->recv_trailing_metadata) {
5110
- InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(batch);
2681
+ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
2682
+ grpc_transport_stream_op_batch* batch) {
2683
+ // Intercept recv_trailing_metadata_ready for LB callback.
2684
+ if (batch->recv_trailing_metadata) {
2685
+ InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(batch);
5111
2686
  }
5112
2687
  // If we've previously been cancelled, immediately fail any new batches.
5113
2688
  if (GPR_UNLIKELY(cancel_error_ != GRPC_ERROR_NONE)) {
5114
2689
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
5115
2690
  gpr_log(GPR_INFO, "chand=%p lb_call=%p: failing batch with error: %s",
5116
- chand_, this, grpc_error_string(cancel_error_));
2691
+ chand_, this, grpc_error_std_string(cancel_error_).c_str());
5117
2692
  }
5118
2693
  // Note: This will release the call combiner.
5119
2694
  grpc_transport_stream_op_batch_finish_with_failure(
@@ -5131,7 +2706,7 @@ void ChannelData::LoadBalancedCall::StartTransportStreamOpBatch(
5131
2706
  cancel_error_ = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
5132
2707
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
5133
2708
  gpr_log(GPR_INFO, "chand=%p lb_call=%p: recording cancel_error=%s",
5134
- chand_, this, grpc_error_string(cancel_error_));
2709
+ chand_, this, grpc_error_std_string(cancel_error_).c_str());
5135
2710
  }
5136
2711
  // If we do not have a subchannel call (i.e., a pick has not yet
5137
2712
  // been started), fail all pending batches. Otherwise, send the
@@ -5184,13 +2759,13 @@ void ChannelData::LoadBalancedCall::StartTransportStreamOpBatch(
5184
2759
  }
5185
2760
  }
5186
2761
 
5187
- void ChannelData::LoadBalancedCall::
2762
+ void ClientChannel::LoadBalancedCall::
5188
2763
  RecvTrailingMetadataReadyForLoadBalancingPolicy(void* arg,
5189
- grpc_error* error) {
2764
+ grpc_error_handle error) {
5190
2765
  auto* self = static_cast<LoadBalancedCall*>(arg);
5191
2766
  if (self->lb_recv_trailing_metadata_ready_ != nullptr) {
5192
2767
  // Set error if call did not succeed.
5193
- grpc_error* error_for_lb = GRPC_ERROR_NONE;
2768
+ grpc_error_handle error_for_lb = GRPC_ERROR_NONE;
5194
2769
  if (error != GRPC_ERROR_NONE) {
5195
2770
  error_for_lb = error;
5196
2771
  } else {
@@ -5218,13 +2793,17 @@ void ChannelData::LoadBalancedCall::
5218
2793
  if (error == GRPC_ERROR_NONE) GRPC_ERROR_UNREF(error_for_lb);
5219
2794
  }
5220
2795
  // Chain to original callback.
2796
+ if (self->failure_error_ != GRPC_ERROR_NONE) {
2797
+ error = self->failure_error_;
2798
+ self->failure_error_ = GRPC_ERROR_NONE;
2799
+ } else {
2800
+ error = GRPC_ERROR_REF(error);
2801
+ }
5221
2802
  Closure::Run(DEBUG_LOCATION, self->original_recv_trailing_metadata_ready_,
5222
- GRPC_ERROR_REF(error));
2803
+ error);
5223
2804
  }
5224
2805
 
5225
- // TODO(roth): Consider not intercepting this callback unless we
5226
- // actually need to, if this causes a performance problem.
5227
- void ChannelData::LoadBalancedCall::
2806
+ void ClientChannel::LoadBalancedCall::
5228
2807
  InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
5229
2808
  grpc_transport_stream_op_batch* batch) {
5230
2809
  recv_trailing_metadata_ =
@@ -5238,19 +2817,23 @@ void ChannelData::LoadBalancedCall::
5238
2817
  &recv_trailing_metadata_ready_;
5239
2818
  }
5240
2819
 
5241
- void ChannelData::LoadBalancedCall::CreateSubchannelCall() {
2820
+ void ClientChannel::LoadBalancedCall::CreateSubchannelCall() {
5242
2821
  SubchannelCall::Args call_args = {
5243
2822
  std::move(connected_subchannel_), pollent_, path_, call_start_time_,
5244
2823
  deadline_, arena_,
5245
2824
  // TODO(roth): When we implement hedging support, we will probably
5246
2825
  // need to use a separate call context for each subchannel call.
5247
2826
  call_context_, call_combiner_};
5248
- grpc_error* error = GRPC_ERROR_NONE;
2827
+ grpc_error_handle error = GRPC_ERROR_NONE;
5249
2828
  subchannel_call_ = SubchannelCall::Create(std::move(call_args), &error);
5250
2829
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
5251
2830
  gpr_log(GPR_INFO,
5252
2831
  "chand=%p lb_call=%p: create subchannel_call=%p: error=%s", chand_,
5253
- this, subchannel_call_.get(), grpc_error_string(error));
2832
+ this, subchannel_call_.get(), grpc_error_std_string(error).c_str());
2833
+ }
2834
+ if (on_call_destruction_complete_ != nullptr) {
2835
+ subchannel_call_->SetAfterCallStackDestroy(on_call_destruction_complete_);
2836
+ on_call_destruction_complete_ = nullptr;
5254
2837
  }
5255
2838
  if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
5256
2839
  PendingBatchesFail(error, YieldCallCombiner);
@@ -5266,7 +2849,7 @@ void ChannelData::LoadBalancedCall::CreateSubchannelCall() {
5266
2849
  // because there may be multiple LB picks happening in parallel.
5267
2850
  // Instead, we will probably need to maintain a list in the CallData
5268
2851
  // object of pending LB picks to be cancelled when the closure runs.
5269
- class ChannelData::LoadBalancedCall::LbQueuedCallCanceller {
2852
+ class ClientChannel::LoadBalancedCall::LbQueuedCallCanceller {
5270
2853
  public:
5271
2854
  explicit LbQueuedCallCanceller(RefCountedPtr<LoadBalancedCall> lb_call)
5272
2855
  : lb_call_(std::move(lb_call)) {
@@ -5276,7 +2859,7 @@ class ChannelData::LoadBalancedCall::LbQueuedCallCanceller {
5276
2859
  }
5277
2860
 
5278
2861
  private:
5279
- static void CancelLocked(void* arg, grpc_error* error) {
2862
+ static void CancelLocked(void* arg, grpc_error_handle error) {
5280
2863
  auto* self = static_cast<LbQueuedCallCanceller*>(arg);
5281
2864
  auto* lb_call = self->lb_call_.get();
5282
2865
  auto* chand = lb_call->chand_;
@@ -5286,7 +2869,7 @@ class ChannelData::LoadBalancedCall::LbQueuedCallCanceller {
5286
2869
  gpr_log(GPR_INFO,
5287
2870
  "chand=%p lb_call=%p: cancelling queued pick: "
5288
2871
  "error=%s self=%p calld->pick_canceller=%p",
5289
- chand, lb_call, grpc_error_string(error), self,
2872
+ chand, lb_call, grpc_error_std_string(error).c_str(), self,
5290
2873
  lb_call->lb_call_canceller_);
5291
2874
  }
5292
2875
  if (lb_call->lb_call_canceller_ == self && error != GRPC_ERROR_NONE) {
@@ -5305,7 +2888,7 @@ class ChannelData::LoadBalancedCall::LbQueuedCallCanceller {
5305
2888
  grpc_closure closure_;
5306
2889
  };
5307
2890
 
5308
- void ChannelData::LoadBalancedCall::MaybeRemoveCallFromLbQueuedCallsLocked() {
2891
+ void ClientChannel::LoadBalancedCall::MaybeRemoveCallFromLbQueuedCallsLocked() {
5309
2892
  if (!queued_pending_lb_pick_) return;
5310
2893
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
5311
2894
  gpr_log(GPR_INFO, "chand=%p lb_call=%p: removing from queued picks list",
@@ -5317,7 +2900,7 @@ void ChannelData::LoadBalancedCall::MaybeRemoveCallFromLbQueuedCallsLocked() {
5317
2900
  lb_call_canceller_ = nullptr;
5318
2901
  }
5319
2902
 
5320
- void ChannelData::LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
2903
+ void ClientChannel::LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
5321
2904
  if (queued_pending_lb_pick_) return;
5322
2905
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
5323
2906
  gpr_log(GPR_INFO, "chand=%p lb_call=%p: adding to queued picks list",
@@ -5330,18 +2913,19 @@ void ChannelData::LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
5330
2913
  lb_call_canceller_ = new LbQueuedCallCanceller(Ref());
5331
2914
  }
5332
2915
 
5333
- void ChannelData::LoadBalancedCall::AsyncPickDone(grpc_error* error) {
2916
+ void ClientChannel::LoadBalancedCall::AsyncPickDone(grpc_error_handle error) {
5334
2917
  GRPC_CLOSURE_INIT(&pick_closure_, PickDone, this, grpc_schedule_on_exec_ctx);
5335
2918
  ExecCtx::Run(DEBUG_LOCATION, &pick_closure_, error);
5336
2919
  }
5337
2920
 
5338
- void ChannelData::LoadBalancedCall::PickDone(void* arg, grpc_error* error) {
2921
+ void ClientChannel::LoadBalancedCall::PickDone(void* arg,
2922
+ grpc_error_handle error) {
5339
2923
  auto* self = static_cast<LoadBalancedCall*>(arg);
5340
2924
  if (error != GRPC_ERROR_NONE) {
5341
2925
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
5342
2926
  gpr_log(GPR_INFO,
5343
2927
  "chand=%p lb_call=%p: failed to pick subchannel: error=%s",
5344
- self->chand_, self, grpc_error_string(error));
2928
+ self->chand_, self, grpc_error_std_string(error).c_str());
5345
2929
  }
5346
2930
  self->PendingBatchesFail(GRPC_ERROR_REF(error), YieldCallCombiner);
5347
2931
  return;
@@ -5349,6 +2933,8 @@ void ChannelData::LoadBalancedCall::PickDone(void* arg, grpc_error* error) {
5349
2933
  self->CreateSubchannelCall();
5350
2934
  }
5351
2935
 
2936
+ namespace {
2937
+
5352
2938
  const char* PickResultTypeName(
5353
2939
  LoadBalancingPolicy::PickResult::ResultType type) {
5354
2940
  switch (type) {
@@ -5362,8 +2948,10 @@ const char* PickResultTypeName(
5362
2948
  GPR_UNREACHABLE_CODE(return "UNKNOWN");
5363
2949
  }
5364
2950
 
5365
- void ChannelData::LoadBalancedCall::PickSubchannel(void* arg,
5366
- grpc_error* error) {
2951
+ } // namespace
2952
+
2953
+ void ClientChannel::LoadBalancedCall::PickSubchannel(void* arg,
2954
+ grpc_error_handle error) {
5367
2955
  auto* self = static_cast<LoadBalancedCall*>(arg);
5368
2956
  bool pick_complete;
5369
2957
  {
@@ -5376,7 +2964,8 @@ void ChannelData::LoadBalancedCall::PickSubchannel(void* arg,
5376
2964
  }
5377
2965
  }
5378
2966
 
5379
- bool ChannelData::LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
2967
+ bool ClientChannel::LoadBalancedCall::PickSubchannelLocked(
2968
+ grpc_error_handle* error) {
5380
2969
  GPR_ASSERT(connected_subchannel_ == nullptr);
5381
2970
  GPR_ASSERT(subchannel_call_ == nullptr);
5382
2971
  // Grab initial metadata.
@@ -5399,12 +2988,12 @@ bool ChannelData::LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
5399
2988
  GPR_INFO,
5400
2989
  "chand=%p lb_call=%p: LB pick returned %s (subchannel=%p, error=%s)",
5401
2990
  chand_, this, PickResultTypeName(result.type), result.subchannel.get(),
5402
- grpc_error_string(result.error));
2991
+ grpc_error_std_string(result.error).c_str());
5403
2992
  }
5404
2993
  switch (result.type) {
5405
2994
  case LoadBalancingPolicy::PickResult::PICK_FAILED: {
5406
2995
  // If we're shutting down, fail all RPCs.
5407
- grpc_error* disconnect_error = chand_->disconnect_error();
2996
+ grpc_error_handle disconnect_error = chand_->disconnect_error();
5408
2997
  if (disconnect_error != GRPC_ERROR_NONE) {
5409
2998
  GRPC_ERROR_UNREF(result.error);
5410
2999
  MaybeRemoveCallFromLbQueuedCallsLocked();
@@ -5415,7 +3004,7 @@ bool ChannelData::LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
5415
3004
  // attempt's final status.
5416
3005
  if ((send_initial_metadata_flags &
5417
3006
  GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
5418
- grpc_error* new_error =
3007
+ grpc_error_handle new_error =
5419
3008
  GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
5420
3009
  "Failed to pick subchannel", &result.error, 1);
5421
3010
  GRPC_ERROR_UNREF(result.error);
@@ -5436,9 +3025,11 @@ bool ChannelData::LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
5436
3025
  // Handle drops.
5437
3026
  if (GPR_UNLIKELY(result.subchannel == nullptr)) {
5438
3027
  result.error = grpc_error_set_int(
5439
- GRPC_ERROR_CREATE_FROM_STATIC_STRING(
5440
- "Call dropped by load balancing policy"),
5441
- GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
3028
+ grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
3029
+ "Call dropped by load balancing policy"),
3030
+ GRPC_ERROR_INT_GRPC_STATUS,
3031
+ GRPC_STATUS_UNAVAILABLE),
3032
+ GRPC_ERROR_INT_LB_POLICY_DROP, 1);
5442
3033
  } else {
5443
3034
  // Grab a ref to the connected subchannel while we're still
5444
3035
  // holding the data plane mutex.
@@ -5452,68 +3043,4 @@ bool ChannelData::LoadBalancedCall::PickSubchannelLocked(grpc_error** error) {
5452
3043
  }
5453
3044
  }
5454
3045
 
5455
- } // namespace
5456
3046
  } // namespace grpc_core
5457
-
5458
- /*************************************************************************
5459
- * EXPORTED SYMBOLS
5460
- */
5461
-
5462
- using grpc_core::ChannelData;
5463
-
5464
- const grpc_channel_filter grpc_client_channel_filter = {
5465
- ChannelData::CallData::StartTransportStreamOpBatch,
5466
- ChannelData::StartTransportOp,
5467
- sizeof(ChannelData::CallData),
5468
- ChannelData::CallData::Init,
5469
- ChannelData::CallData::SetPollent,
5470
- ChannelData::CallData::Destroy,
5471
- sizeof(ChannelData),
5472
- ChannelData::Init,
5473
- ChannelData::Destroy,
5474
- ChannelData::GetChannelInfo,
5475
- "client-channel",
5476
- };
5477
-
5478
- grpc_connectivity_state grpc_client_channel_check_connectivity_state(
5479
- grpc_channel_element* elem, int try_to_connect) {
5480
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
5481
- return chand->CheckConnectivityState(try_to_connect);
5482
- }
5483
-
5484
- int grpc_client_channel_num_external_connectivity_watchers(
5485
- grpc_channel_element* elem) {
5486
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
5487
- return chand->NumExternalConnectivityWatchers();
5488
- }
5489
-
5490
- void grpc_client_channel_watch_connectivity_state(
5491
- grpc_channel_element* elem, grpc_polling_entity pollent,
5492
- grpc_connectivity_state* state, grpc_closure* on_complete,
5493
- grpc_closure* watcher_timer_init) {
5494
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
5495
- if (state == nullptr) {
5496
- // Handle cancellation.
5497
- GPR_ASSERT(watcher_timer_init == nullptr);
5498
- chand->RemoveExternalConnectivityWatcher(on_complete, /*cancel=*/true);
5499
- return;
5500
- }
5501
- // Handle addition.
5502
- return chand->AddExternalConnectivityWatcher(pollent, state, on_complete,
5503
- watcher_timer_init);
5504
- }
5505
-
5506
- void grpc_client_channel_start_connectivity_watch(
5507
- grpc_channel_element* elem, grpc_connectivity_state initial_state,
5508
- grpc_core::OrphanablePtr<grpc_core::AsyncConnectivityStateWatcherInterface>
5509
- watcher) {
5510
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
5511
- chand->AddConnectivityWatcher(initial_state, std::move(watcher));
5512
- }
5513
-
5514
- void grpc_client_channel_stop_connectivity_watch(
5515
- grpc_channel_element* elem,
5516
- grpc_core::AsyncConnectivityStateWatcherInterface* watcher) {
5517
- auto* chand = static_cast<ChannelData*>(elem->channel_data);
5518
- chand->RemoveConnectivityWatcher(watcher);
5519
- }