grpc 1.10.0 → 1.11.0.pre2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (762) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +2098 -501
  3. data/include/grpc/byte_buffer.h +2 -0
  4. data/include/grpc/byte_buffer_reader.h +2 -0
  5. data/include/grpc/census.h +2 -0
  6. data/include/grpc/fork.h +2 -0
  7. data/include/grpc/grpc.h +10 -0
  8. data/include/grpc/grpc_cronet.h +2 -0
  9. data/include/grpc/grpc_posix.h +2 -1
  10. data/include/grpc/grpc_security.h +21 -0
  11. data/include/grpc/grpc_security_constants.h +1 -0
  12. data/include/grpc/impl/codegen/byte_buffer.h +2 -0
  13. data/include/grpc/impl/codegen/grpc_types.h +24 -0
  14. data/include/grpc/impl/codegen/slice.h +1 -1
  15. data/include/grpc/impl/codegen/sync.h +1 -0
  16. data/include/grpc/impl/codegen/sync_custom.h +2 -0
  17. data/include/grpc/impl/codegen/sync_generic.h +2 -0
  18. data/include/grpc/impl/codegen/sync_posix.h +2 -0
  19. data/include/grpc/impl/codegen/sync_windows.h +2 -0
  20. data/include/grpc/slice.h +2 -0
  21. data/include/grpc/slice_buffer.h +2 -0
  22. data/include/grpc/status.h +2 -0
  23. data/include/grpc/support/alloc.h +2 -2
  24. data/include/grpc/support/atm.h +2 -0
  25. data/include/grpc/support/atm_gcc_atomic.h +2 -0
  26. data/include/grpc/support/atm_gcc_sync.h +2 -0
  27. data/include/grpc/support/atm_windows.h +2 -0
  28. data/include/grpc/support/log.h +1 -1
  29. data/include/grpc/support/sync.h +2 -0
  30. data/include/grpc/support/sync_custom.h +2 -0
  31. data/include/grpc/support/sync_generic.h +2 -0
  32. data/include/grpc/support/sync_posix.h +2 -0
  33. data/include/grpc/support/sync_windows.h +2 -0
  34. data/include/grpc/support/time.h +2 -0
  35. data/src/boringssl/err_data.c +444 -438
  36. data/src/core/ext/census/grpc_context.cc +2 -0
  37. data/src/core/ext/filters/client_channel/backup_poller.cc +13 -8
  38. data/src/core/ext/filters/client_channel/backup_poller.h +3 -2
  39. data/src/core/ext/filters/client_channel/channel_connectivity.cc +2 -0
  40. data/src/core/ext/filters/client_channel/client_channel.cc +1988 -433
  41. data/src/core/ext/filters/client_channel/client_channel.h +2 -0
  42. data/src/core/ext/filters/client_channel/client_channel_factory.cc +2 -0
  43. data/src/core/ext/filters/client_channel/client_channel_factory.h +2 -0
  44. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +2 -27
  45. data/src/core/ext/filters/client_channel/connector.cc +2 -0
  46. data/src/core/ext/filters/client_channel/connector.h +2 -0
  47. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +2 -0
  48. data/src/core/ext/filters/client_channel/http_proxy.cc +2 -0
  49. data/src/core/ext/filters/client_channel/lb_policy.cc +2 -0
  50. data/src/core/ext/filters/client_channel/lb_policy.h +2 -0
  51. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +2 -0
  52. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +2 -0
  53. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +96 -78
  54. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +9 -17
  55. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +70 -62
  56. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +2 -0
  57. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +2 -0
  58. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +2 -0
  59. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +2 -0
  60. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +4 -2
  61. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +4 -2
  62. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +2 -0
  63. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +2 -0
  64. data/src/core/ext/filters/client_channel/lb_policy_factory.cc +3 -1
  65. data/src/core/ext/filters/client_channel/lb_policy_factory.h +2 -1
  66. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +2 -0
  67. data/src/core/ext/filters/client_channel/lb_policy_registry.h +2 -1
  68. data/src/core/ext/filters/client_channel/method_params.cc +178 -0
  69. data/src/core/ext/filters/client_channel/method_params.h +74 -0
  70. data/src/core/ext/filters/client_channel/parse_address.cc +17 -13
  71. data/src/core/ext/filters/client_channel/parse_address.h +2 -0
  72. data/src/core/ext/filters/client_channel/proxy_mapper.cc +2 -0
  73. data/src/core/ext/filters/client_channel/proxy_mapper.h +2 -0
  74. data/src/core/ext/filters/client_channel/proxy_mapper_registry.cc +2 -0
  75. data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +2 -0
  76. data/src/core/ext/filters/client_channel/resolver.cc +2 -0
  77. data/src/core/ext/filters/client_channel/resolver.h +6 -0
  78. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +24 -5
  79. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +2 -1
  80. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +1 -0
  81. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +55 -1
  82. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +8 -1
  83. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +1 -0
  84. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +30 -3
  85. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +7 -0
  86. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +2 -1
  87. data/src/core/ext/filters/client_channel/resolver_factory.h +2 -0
  88. data/src/core/ext/filters/client_channel/resolver_registry.cc +2 -0
  89. data/src/core/ext/filters/client_channel/resolver_registry.h +2 -0
  90. data/src/core/ext/filters/client_channel/retry_throttle.cc +102 -120
  91. data/src/core/ext/filters/client_channel/retry_throttle.h +52 -25
  92. data/src/core/ext/filters/client_channel/subchannel.cc +14 -4
  93. data/src/core/ext/filters/client_channel/subchannel.h +10 -1
  94. data/src/core/ext/filters/client_channel/subchannel_index.cc +2 -0
  95. data/src/core/ext/filters/client_channel/subchannel_index.h +2 -0
  96. data/src/core/ext/filters/client_channel/uri_parser.cc +2 -1
  97. data/src/core/ext/filters/client_channel/uri_parser.h +2 -1
  98. data/src/core/ext/filters/deadline/deadline_filter.cc +2 -1
  99. data/src/core/ext/filters/deadline/deadline_filter.h +2 -0
  100. data/src/core/ext/filters/http/client/http_client_filter.cc +27 -25
  101. data/src/core/ext/filters/http/client/http_client_filter.h +2 -0
  102. data/src/core/ext/filters/http/client_authority_filter.cc +156 -0
  103. data/src/core/ext/filters/http/client_authority_filter.h +34 -0
  104. data/src/core/ext/filters/http/http_filters_plugin.cc +2 -0
  105. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +21 -22
  106. data/src/core/ext/filters/http/message_compress/message_compress_filter.h +2 -0
  107. data/src/core/ext/filters/http/server/http_server_filter.cc +11 -8
  108. data/src/core/ext/filters/http/server/http_server_filter.h +2 -0
  109. data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +2 -0
  110. data/src/core/ext/filters/load_reporting/server_load_reporting_filter.h +2 -0
  111. data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +2 -0
  112. data/src/core/ext/filters/max_age/max_age_filter.cc +2 -0
  113. data/src/core/ext/filters/max_age/max_age_filter.h +2 -0
  114. data/src/core/ext/filters/message_size/message_size_filter.cc +52 -49
  115. data/src/core/ext/filters/message_size/message_size_filter.h +2 -0
  116. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc +5 -1
  117. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +2 -0
  118. data/src/core/ext/filters/workarounds/workaround_utils.cc +2 -0
  119. data/src/core/ext/filters/workarounds/workaround_utils.h +2 -0
  120. data/src/core/ext/transport/chttp2/alpn/alpn.cc +3 -1
  121. data/src/core/ext/transport/chttp2/alpn/alpn.h +2 -0
  122. data/src/core/ext/transport/chttp2/client/authority.cc +42 -0
  123. data/src/core/ext/transport/chttp2/client/authority.h +36 -0
  124. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +2 -0
  125. data/src/core/ext/transport/chttp2/client/chttp2_connector.h +2 -0
  126. data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +10 -3
  127. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +2 -2
  128. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +37 -25
  129. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +2 -0
  130. data/src/core/ext/transport/chttp2/server/chttp2_server.h +3 -1
  131. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc +2 -1
  132. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +2 -1
  133. data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +2 -0
  134. data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +3 -1
  135. data/src/core/ext/transport/chttp2/transport/bin_decoder.h +2 -0
  136. data/src/core/ext/transport/chttp2/transport/bin_encoder.cc +2 -0
  137. data/src/core/ext/transport/chttp2/transport/bin_encoder.h +2 -0
  138. data/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc +2 -0
  139. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +152 -182
  140. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +2 -0
  141. data/src/core/ext/transport/chttp2/transport/flow_control.cc +2 -0
  142. data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -0
  143. data/src/core/ext/transport/chttp2/transport/frame.h +2 -1
  144. data/src/core/ext/transport/chttp2/transport/frame_data.cc +15 -19
  145. data/src/core/ext/transport/chttp2/transport/frame_data.h +7 -5
  146. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +2 -0
  147. data/src/core/ext/transport/chttp2/transport/frame_goaway.h +2 -2
  148. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +2 -0
  149. data/src/core/ext/transport/chttp2/transport/frame_ping.h +2 -1
  150. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +2 -0
  151. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +2 -1
  152. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +2 -0
  153. data/src/core/ext/transport/chttp2/transport/frame_settings.h +2 -2
  154. data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +2 -0
  155. data/src/core/ext/transport/chttp2/transport/frame_window_update.h +2 -1
  156. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +2 -0
  157. data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +2 -1
  158. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +2 -1
  159. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +2 -2
  160. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +2 -0
  161. data/src/core/ext/transport/chttp2/transport/hpack_table.h +2 -1
  162. data/src/core/ext/transport/chttp2/transport/http2_settings.cc +2 -0
  163. data/src/core/ext/transport/chttp2/transport/http2_settings.h +2 -0
  164. data/src/core/ext/transport/chttp2/transport/huffsyms.cc +2 -0
  165. data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +3 -2
  166. data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +2 -0
  167. data/src/core/ext/transport/chttp2/transport/internal.h +60 -24
  168. data/src/core/ext/transport/chttp2/transport/parsing.cc +2 -4
  169. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +2 -0
  170. data/src/core/ext/transport/chttp2/transport/stream_map.cc +2 -0
  171. data/src/core/ext/transport/chttp2/transport/varint.cc +2 -0
  172. data/src/core/ext/transport/chttp2/transport/writing.cc +10 -6
  173. data/src/core/ext/transport/inproc/inproc_plugin.cc +2 -0
  174. data/src/core/ext/transport/inproc/inproc_transport.cc +20 -23
  175. data/src/core/ext/transport/inproc/inproc_transport.h +2 -0
  176. data/src/core/lib/avl/avl.cc +2 -0
  177. data/src/core/lib/avl/avl.h +2 -0
  178. data/src/core/lib/backoff/backoff.cc +2 -0
  179. data/src/core/lib/backoff/backoff.h +2 -0
  180. data/src/core/lib/channel/channel_args.h +2 -0
  181. data/src/core/lib/channel/channel_stack.cc +3 -1
  182. data/src/core/lib/channel/channel_stack.h +2 -0
  183. data/src/core/lib/channel/channel_stack_builder.cc +2 -0
  184. data/src/core/lib/channel/channel_stack_builder.h +2 -0
  185. data/src/core/lib/channel/channel_trace.cc +239 -0
  186. data/src/core/lib/channel/channel_trace.h +133 -0
  187. data/src/core/lib/channel/channel_trace_registry.cc +80 -0
  188. data/src/core/lib/channel/channel_trace_registry.h +43 -0
  189. data/src/core/lib/channel/connected_channel.cc +2 -0
  190. data/src/core/lib/channel/connected_channel.h +2 -0
  191. data/src/core/lib/channel/handshaker.cc +2 -0
  192. data/src/core/lib/channel/handshaker.h +2 -0
  193. data/src/core/lib/channel/handshaker_factory.cc +2 -0
  194. data/src/core/lib/channel/handshaker_factory.h +2 -1
  195. data/src/core/lib/channel/handshaker_registry.cc +2 -0
  196. data/src/core/lib/channel/handshaker_registry.h +2 -1
  197. data/src/core/lib/channel/status_util.cc +100 -0
  198. data/src/core/lib/channel/status_util.h +58 -0
  199. data/src/core/lib/compression/algorithm_metadata.h +2 -0
  200. data/src/core/lib/compression/compression.cc +2 -0
  201. data/src/core/lib/compression/compression_internal.cc +2 -0
  202. data/src/core/lib/compression/compression_internal.h +2 -0
  203. data/src/core/lib/compression/message_compress.cc +2 -0
  204. data/src/core/lib/compression/message_compress.h +2 -0
  205. data/src/core/lib/compression/stream_compression.cc +2 -0
  206. data/src/core/lib/compression/stream_compression.h +2 -0
  207. data/src/core/lib/compression/stream_compression_gzip.cc +2 -0
  208. data/src/core/lib/compression/stream_compression_gzip.h +2 -0
  209. data/src/core/lib/compression/stream_compression_identity.cc +2 -1
  210. data/src/core/lib/compression/stream_compression_identity.h +2 -0
  211. data/src/core/lib/debug/stats.cc +2 -0
  212. data/src/core/lib/debug/stats.h +2 -0
  213. data/src/core/lib/debug/stats_data.cc +3 -1
  214. data/src/core/lib/debug/stats_data.h +2 -0
  215. data/src/core/lib/debug/trace.cc +2 -0
  216. data/src/core/lib/debug/trace.h +2 -1
  217. data/src/core/lib/gpr/alloc.cc +2 -1
  218. data/src/core/lib/gpr/arena.cc +47 -0
  219. data/src/core/lib/gpr/arena.h +2 -0
  220. data/src/core/lib/gpr/atm.cc +2 -0
  221. data/src/core/lib/gpr/cpu_linux.cc +5 -1
  222. data/src/core/lib/gpr/cpu_posix.cc +1 -1
  223. data/src/core/lib/gpr/env.h +2 -0
  224. data/src/core/lib/gpr/fork.cc +2 -0
  225. data/src/core/lib/gpr/host_port.cc +2 -0
  226. data/src/core/lib/gpr/log.cc +2 -1
  227. data/src/core/lib/gpr/log_linux.cc +1 -0
  228. data/src/core/lib/gpr/mpscq.cc +2 -0
  229. data/src/core/lib/gpr/mpscq.h +2 -0
  230. data/src/core/lib/gpr/murmur_hash.cc +2 -0
  231. data/src/core/lib/gpr/spinlock.h +2 -0
  232. data/src/core/lib/gpr/string.cc +2 -1
  233. data/src/core/lib/gpr/string.h +2 -2
  234. data/src/core/lib/gpr/sync.cc +2 -0
  235. data/src/core/lib/gpr/time.cc +2 -0
  236. data/src/core/lib/gpr/time_posix.cc +1 -0
  237. data/src/core/lib/gpr/time_precise.cc +2 -0
  238. data/src/core/lib/gpr/time_precise.h +2 -0
  239. data/src/core/lib/gpr/tls_gcc.h +2 -0
  240. data/src/core/lib/gpr/tls_msvc.h +2 -0
  241. data/src/core/lib/gpr/tls_pthread.h +2 -0
  242. data/src/core/lib/gpr/tmpfile.h +2 -0
  243. data/src/core/lib/gprpp/atomic_with_atm.h +2 -0
  244. data/src/core/lib/gprpp/atomic_with_std.h +2 -0
  245. data/src/core/lib/gprpp/inlined_vector.h +2 -0
  246. data/src/core/lib/gprpp/manual_constructor.h +3 -1
  247. data/src/core/lib/gprpp/memory.h +5 -3
  248. data/src/core/lib/gprpp/orphanable.h +3 -0
  249. data/src/core/lib/gprpp/ref_counted.h +4 -0
  250. data/src/core/lib/gprpp/ref_counted_ptr.h +3 -0
  251. data/src/core/lib/gprpp/thd.h +135 -0
  252. data/src/core/lib/gprpp/thd_posix.cc +209 -0
  253. data/src/core/lib/gprpp/thd_windows.cc +162 -0
  254. data/src/core/lib/http/format_request.cc +2 -0
  255. data/src/core/lib/http/format_request.h +2 -0
  256. data/src/core/lib/http/httpcli.cc +2 -0
  257. data/src/core/lib/http/httpcli.h +2 -0
  258. data/src/core/lib/http/httpcli_security_connector.cc +16 -7
  259. data/src/core/lib/http/parser.cc +2 -0
  260. data/src/core/lib/http/parser.h +2 -1
  261. data/src/core/lib/iomgr/call_combiner.cc +2 -0
  262. data/src/core/lib/iomgr/call_combiner.h +2 -1
  263. data/src/core/lib/iomgr/combiner.cc +2 -0
  264. data/src/core/lib/iomgr/combiner.h +2 -0
  265. data/src/core/lib/iomgr/endpoint.cc +4 -0
  266. data/src/core/lib/iomgr/endpoint.h +2 -0
  267. data/src/core/lib/iomgr/endpoint_pair.h +2 -0
  268. data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -0
  269. data/src/core/lib/iomgr/endpoint_pair_uv.cc +2 -0
  270. data/src/core/lib/iomgr/endpoint_pair_windows.cc +7 -4
  271. data/src/core/lib/iomgr/error.h +2 -0
  272. data/src/core/lib/iomgr/error_internal.h +2 -0
  273. data/src/core/lib/iomgr/ev_epoll1_linux.cc +2 -0
  274. data/src/core/lib/iomgr/ev_epoll1_linux.h +2 -0
  275. data/src/core/lib/iomgr/ev_epollex_linux.cc +4 -18
  276. data/src/core/lib/iomgr/ev_epollex_linux.h +2 -0
  277. data/src/core/lib/iomgr/ev_epollsig_linux.cc +2 -0
  278. data/src/core/lib/iomgr/ev_epollsig_linux.h +2 -0
  279. data/src/core/lib/iomgr/ev_poll_posix.cc +61 -31
  280. data/src/core/lib/iomgr/ev_poll_posix.h +2 -0
  281. data/src/core/lib/iomgr/ev_posix.cc +35 -19
  282. data/src/core/lib/iomgr/ev_posix.h +2 -0
  283. data/src/core/lib/iomgr/ev_windows.cc +2 -0
  284. data/src/core/lib/iomgr/exec_ctx.cc +3 -1
  285. data/src/core/lib/iomgr/exec_ctx.h +21 -9
  286. data/src/core/lib/iomgr/executor.cc +13 -11
  287. data/src/core/lib/iomgr/executor.h +2 -0
  288. data/src/core/lib/iomgr/fork_posix.cc +4 -2
  289. data/src/core/lib/iomgr/fork_windows.cc +2 -0
  290. data/src/core/lib/iomgr/gethostname_fallback.cc +2 -0
  291. data/src/core/lib/iomgr/gethostname_host_name_max.cc +2 -0
  292. data/src/core/lib/iomgr/gethostname_sysconf.cc +2 -0
  293. data/src/core/lib/iomgr/iocp_windows.cc +3 -1
  294. data/src/core/lib/iomgr/iocp_windows.h +3 -0
  295. data/src/core/lib/iomgr/iomgr.cc +2 -1
  296. data/src/core/lib/iomgr/iomgr.h +2 -0
  297. data/src/core/lib/iomgr/iomgr_custom.cc +63 -0
  298. data/src/core/lib/iomgr/iomgr_custom.h +47 -0
  299. data/src/core/lib/iomgr/iomgr_internal.cc +43 -0
  300. data/src/core/lib/iomgr/iomgr_internal.h +14 -0
  301. data/src/core/lib/iomgr/iomgr_posix.cc +30 -3
  302. data/src/core/lib/iomgr/iomgr_posix.h +2 -0
  303. data/src/core/lib/iomgr/iomgr_uv.cc +17 -20
  304. data/src/core/lib/iomgr/iomgr_windows.cc +29 -3
  305. data/src/core/lib/iomgr/is_epollexclusive_available.cc +2 -0
  306. data/src/core/lib/iomgr/is_epollexclusive_available.h +2 -0
  307. data/src/core/lib/iomgr/load_file.cc +2 -0
  308. data/src/core/lib/iomgr/load_file.h +2 -0
  309. data/src/core/lib/iomgr/lockfree_event.cc +2 -0
  310. data/src/core/lib/iomgr/lockfree_event.h +14 -1
  311. data/src/core/lib/iomgr/nameser.h +2 -0
  312. data/src/core/lib/iomgr/network_status_tracker.cc +3 -1
  313. data/src/core/lib/iomgr/network_status_tracker.h +2 -0
  314. data/src/core/lib/iomgr/polling_entity.cc +2 -0
  315. data/src/core/lib/iomgr/polling_entity.h +2 -0
  316. data/src/core/lib/iomgr/pollset.cc +56 -0
  317. data/src/core/lib/iomgr/pollset.h +19 -0
  318. data/src/core/lib/iomgr/pollset_custom.cc +106 -0
  319. data/src/core/lib/iomgr/{timer_generic.h → pollset_custom.h} +15 -17
  320. data/src/core/lib/iomgr/pollset_set.cc +55 -0
  321. data/src/core/lib/iomgr/pollset_set.h +13 -0
  322. data/src/core/lib/iomgr/pollset_set_custom.cc +48 -0
  323. data/src/core/lib/iomgr/{pollset_uv.h → pollset_set_custom.h} +6 -7
  324. data/src/core/lib/iomgr/pollset_set_windows.cc +17 -10
  325. data/src/core/lib/iomgr/pollset_set_windows.h +2 -0
  326. data/src/core/lib/iomgr/pollset_uv.cc +42 -105
  327. data/src/core/lib/iomgr/pollset_windows.cc +20 -12
  328. data/src/core/lib/iomgr/pollset_windows.h +2 -0
  329. data/src/core/lib/iomgr/port.h +10 -19
  330. data/src/core/lib/iomgr/resolve_address.cc +50 -0
  331. data/src/core/lib/iomgr/resolve_address.h +39 -10
  332. data/src/core/lib/iomgr/resolve_address_custom.cc +187 -0
  333. data/src/core/lib/iomgr/resolve_address_custom.h +43 -0
  334. data/src/core/lib/iomgr/resolve_address_posix.cc +10 -22
  335. data/src/core/lib/iomgr/resolve_address_windows.cc +10 -22
  336. data/src/core/lib/iomgr/resource_quota.cc +2 -0
  337. data/src/core/lib/iomgr/resource_quota.h +3 -5
  338. data/src/core/lib/iomgr/sockaddr.h +3 -11
  339. data/src/core/lib/iomgr/sockaddr_custom.h +54 -0
  340. data/src/core/lib/iomgr/sockaddr_posix.h +26 -0
  341. data/src/core/lib/iomgr/sockaddr_utils.cc +91 -71
  342. data/src/core/lib/iomgr/sockaddr_utils.h +4 -0
  343. data/src/core/lib/iomgr/sockaddr_windows.h +21 -0
  344. data/src/core/lib/iomgr/socket_factory_posix.cc +2 -0
  345. data/src/core/lib/iomgr/socket_factory_posix.h +2 -0
  346. data/src/core/lib/iomgr/socket_mutator.cc +2 -0
  347. data/src/core/lib/iomgr/socket_mutator.h +2 -0
  348. data/src/core/lib/iomgr/socket_utils.h +11 -0
  349. data/src/core/lib/iomgr/socket_utils_common_posix.cc +15 -6
  350. data/src/core/lib/iomgr/socket_utils_linux.cc +4 -4
  351. data/src/core/lib/iomgr/socket_utils_posix.cc +3 -2
  352. data/src/core/lib/iomgr/socket_utils_posix.h +2 -0
  353. data/src/core/lib/iomgr/socket_utils_uv.cc +13 -2
  354. data/src/core/lib/iomgr/socket_utils_windows.cc +10 -0
  355. data/src/core/lib/iomgr/socket_windows.cc +2 -0
  356. data/src/core/lib/iomgr/socket_windows.h +2 -1
  357. data/src/core/lib/iomgr/sys_epoll_wrapper.h +2 -0
  358. data/src/core/lib/iomgr/tcp_client.cc +36 -0
  359. data/src/core/lib/iomgr/tcp_client.h +13 -0
  360. data/src/core/lib/iomgr/tcp_client_custom.cc +151 -0
  361. data/src/core/lib/iomgr/tcp_client_posix.cc +11 -24
  362. data/src/core/lib/iomgr/tcp_client_posix.h +2 -0
  363. data/src/core/lib/iomgr/tcp_client_windows.cc +10 -23
  364. data/src/core/lib/iomgr/tcp_custom.cc +365 -0
  365. data/src/core/lib/iomgr/tcp_custom.h +81 -0
  366. data/src/core/lib/iomgr/tcp_posix.cc +3 -1
  367. data/src/core/lib/iomgr/tcp_posix.h +2 -0
  368. data/src/core/lib/iomgr/tcp_server.cc +73 -0
  369. data/src/core/lib/iomgr/tcp_server.h +24 -0
  370. data/src/core/lib/iomgr/tcp_server_custom.cc +472 -0
  371. data/src/core/lib/iomgr/tcp_server_posix.cc +41 -23
  372. data/src/core/lib/iomgr/tcp_server_utils_posix.h +2 -0
  373. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +7 -7
  374. data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +8 -6
  375. data/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc +2 -0
  376. data/src/core/lib/iomgr/tcp_server_windows.cc +43 -21
  377. data/src/core/lib/iomgr/tcp_uv.cc +308 -314
  378. data/src/core/lib/iomgr/tcp_windows.cc +3 -1
  379. data/src/core/lib/iomgr/tcp_windows.h +2 -0
  380. data/src/core/lib/iomgr/time_averaged_stats.cc +2 -0
  381. data/src/core/lib/iomgr/timer.cc +45 -0
  382. data/src/core/lib/iomgr/timer.h +36 -15
  383. data/src/core/lib/iomgr/timer_custom.cc +93 -0
  384. data/src/core/lib/iomgr/timer_custom.h +43 -0
  385. data/src/core/lib/iomgr/timer_generic.cc +12 -10
  386. data/src/core/lib/iomgr/timer_heap.cc +2 -4
  387. data/src/core/lib/iomgr/timer_heap.h +2 -0
  388. data/src/core/lib/iomgr/timer_manager.cc +12 -20
  389. data/src/core/lib/iomgr/timer_manager.h +2 -0
  390. data/src/core/lib/iomgr/timer_uv.cc +15 -49
  391. data/src/core/lib/iomgr/udp_server.cc +271 -230
  392. data/src/core/lib/iomgr/udp_server.h +44 -20
  393. data/src/core/lib/iomgr/unix_sockets_posix.cc +10 -7
  394. data/src/core/lib/iomgr/unix_sockets_posix.h +2 -0
  395. data/src/core/lib/iomgr/unix_sockets_posix_noop.cc +2 -0
  396. data/src/core/lib/iomgr/wakeup_fd_cv.cc +3 -1
  397. data/src/core/lib/iomgr/wakeup_fd_cv.h +2 -0
  398. data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +2 -0
  399. data/src/core/lib/iomgr/wakeup_fd_nospecial.cc +2 -0
  400. data/src/core/lib/iomgr/wakeup_fd_pipe.cc +2 -0
  401. data/src/core/lib/iomgr/wakeup_fd_pipe.h +2 -0
  402. data/src/core/lib/iomgr/wakeup_fd_posix.cc +2 -0
  403. data/src/core/lib/iomgr/wakeup_fd_posix.h +2 -0
  404. data/src/core/lib/json/json.cc +38 -0
  405. data/src/core/lib/json/json.h +22 -1
  406. data/src/core/lib/json/json_reader.cc +2 -2
  407. data/src/core/lib/json/json_reader.h +1 -0
  408. data/src/core/lib/json/json_string.cc +2 -0
  409. data/src/core/lib/json/json_writer.cc +2 -2
  410. data/src/core/lib/json/json_writer.h +2 -0
  411. data/src/core/lib/profiling/basic_timers.cc +11 -9
  412. data/src/core/lib/profiling/timers.h +6 -3
  413. data/src/core/lib/security/context/security_context.cc +2 -0
  414. data/src/core/lib/security/context/security_context.h +2 -0
  415. data/src/core/lib/security/credentials/alts/alts_credentials.cc +119 -0
  416. data/src/core/lib/security/credentials/alts/alts_credentials.h +102 -0
  417. data/src/core/lib/security/credentials/alts/check_gcp_environment.cc +72 -0
  418. data/src/core/lib/security/credentials/alts/check_gcp_environment.h +57 -0
  419. data/src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc +67 -0
  420. data/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc +33 -0
  421. data/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc +114 -0
  422. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc +126 -0
  423. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc +46 -0
  424. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_options.h +112 -0
  425. data/src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc +58 -0
  426. data/src/core/lib/security/credentials/composite/composite_credentials.cc +2 -0
  427. data/src/core/lib/security/credentials/composite/composite_credentials.h +2 -0
  428. data/src/core/lib/security/credentials/credentials.cc +2 -0
  429. data/src/core/lib/security/credentials/credentials.h +2 -0
  430. data/src/core/lib/security/credentials/credentials_metadata.cc +2 -0
  431. data/src/core/lib/security/credentials/fake/fake_credentials.cc +2 -3
  432. data/src/core/lib/security/credentials/fake/fake_credentials.h +5 -0
  433. data/src/core/lib/security/credentials/google_default/credentials_generic.cc +2 -0
  434. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +2 -0
  435. data/src/core/lib/security/credentials/iam/iam_credentials.cc +2 -0
  436. data/src/core/lib/security/credentials/iam/iam_credentials.h +2 -0
  437. data/src/core/lib/security/credentials/jwt/json_token.cc +2 -0
  438. data/src/core/lib/security/credentials/jwt/json_token.h +2 -0
  439. data/src/core/lib/security/credentials/jwt/jwt_credentials.h +2 -0
  440. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -0
  441. data/src/core/lib/security/credentials/jwt/jwt_verifier.h +2 -0
  442. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +2 -0
  443. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +2 -0
  444. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +2 -0
  445. data/src/core/lib/security/credentials/plugin/plugin_credentials.h +2 -0
  446. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +11 -2
  447. data/src/core/lib/security/credentials/ssl/ssl_credentials.h +2 -0
  448. data/src/core/lib/security/security_connector/alts_security_connector.cc +287 -0
  449. data/src/core/lib/security/security_connector/alts_security_connector.h +69 -0
  450. data/src/core/lib/security/security_connector/security_connector.cc +174 -74
  451. data/src/core/lib/security/security_connector/security_connector.h +41 -7
  452. data/src/core/lib/security/transport/auth_filters.h +2 -0
  453. data/src/core/lib/security/transport/client_auth_filter.cc +14 -28
  454. data/src/core/lib/security/transport/secure_endpoint.cc +2 -0
  455. data/src/core/lib/security/transport/secure_endpoint.h +2 -0
  456. data/src/core/lib/security/transport/security_handshaker.cc +2 -0
  457. data/src/core/lib/security/transport/security_handshaker.h +2 -1
  458. data/src/core/lib/security/transport/server_auth_filter.cc +2 -0
  459. data/src/core/lib/security/transport/target_authority_table.cc +75 -0
  460. data/src/core/lib/security/transport/{lb_targets_info.h → target_authority_table.h} +16 -8
  461. data/src/core/lib/security/transport/tsi_error.cc +2 -0
  462. data/src/core/lib/security/transport/tsi_error.h +2 -0
  463. data/src/core/lib/security/util/json_util.cc +2 -0
  464. data/src/core/lib/security/util/json_util.h +2 -0
  465. data/src/core/lib/slice/b64.cc +2 -0
  466. data/src/core/lib/slice/b64.h +2 -0
  467. data/src/core/lib/slice/percent_encoding.cc +2 -0
  468. data/src/core/lib/slice/percent_encoding.h +2 -0
  469. data/src/core/lib/slice/slice.cc +2 -0
  470. data/src/core/lib/slice/slice_buffer.cc +3 -1
  471. data/src/core/lib/slice/slice_hash_table.h +178 -45
  472. data/src/core/lib/slice/slice_intern.cc +2 -0
  473. data/src/core/lib/slice/slice_internal.h +2 -2
  474. data/src/core/lib/slice/slice_string_helpers.cc +2 -0
  475. data/src/core/lib/slice/slice_string_helpers.h +2 -1
  476. data/src/core/lib/slice/slice_weak_hash_table.h +105 -0
  477. data/src/core/lib/surface/api_trace.cc +3 -1
  478. data/src/core/lib/surface/api_trace.h +2 -0
  479. data/src/core/lib/surface/byte_buffer.cc +3 -0
  480. data/src/core/lib/surface/byte_buffer_reader.cc +3 -0
  481. data/src/core/lib/surface/call.cc +46 -80
  482. data/src/core/lib/surface/call.h +2 -0
  483. data/src/core/lib/surface/call_details.cc +2 -0
  484. data/src/core/lib/surface/call_log_batch.cc +2 -0
  485. data/src/core/lib/surface/call_test_only.h +2 -0
  486. data/src/core/lib/surface/channel.cc +72 -41
  487. data/src/core/lib/surface/channel.h +2 -0
  488. data/src/core/lib/surface/channel_init.cc +2 -0
  489. data/src/core/lib/surface/channel_init.h +2 -0
  490. data/src/core/lib/surface/channel_ping.cc +2 -0
  491. data/src/core/lib/surface/channel_stack_type.cc +3 -2
  492. data/src/core/lib/surface/channel_stack_type.h +2 -0
  493. data/src/core/lib/surface/completion_queue.h +2 -0
  494. data/src/core/lib/surface/completion_queue_factory.cc +3 -1
  495. data/src/core/lib/surface/completion_queue_factory.h +2 -0
  496. data/src/core/lib/surface/event_string.cc +2 -0
  497. data/src/core/lib/surface/event_string.h +2 -0
  498. data/src/core/lib/surface/init.cc +5 -2
  499. data/src/core/lib/surface/init_secure.cc +5 -2
  500. data/src/core/lib/surface/lame_client.cc +7 -5
  501. data/src/core/lib/surface/lame_client.h +2 -0
  502. data/src/core/lib/surface/metadata_array.cc +2 -0
  503. data/src/core/lib/surface/server.cc +2 -0
  504. data/src/core/lib/surface/server.h +2 -0
  505. data/src/core/lib/surface/validate_metadata.cc +2 -1
  506. data/src/core/lib/surface/validate_metadata.h +2 -0
  507. data/src/core/lib/surface/version.cc +4 -2
  508. data/src/core/lib/transport/bdp_estimator.cc +2 -0
  509. data/src/core/lib/transport/byte_stream.cc +94 -116
  510. data/src/core/lib/transport/byte_stream.h +111 -78
  511. data/src/core/lib/transport/connectivity_state.cc +2 -0
  512. data/src/core/lib/transport/connectivity_state.h +3 -1
  513. data/src/core/lib/transport/error_utils.cc +2 -0
  514. data/src/core/lib/transport/error_utils.h +2 -0
  515. data/src/core/lib/transport/metadata.cc +2 -0
  516. data/src/core/lib/transport/metadata.h +3 -1
  517. data/src/core/lib/transport/metadata_batch.cc +26 -0
  518. data/src/core/lib/transport/metadata_batch.h +12 -1
  519. data/src/core/lib/transport/pid_controller.cc +2 -0
  520. data/src/core/lib/transport/pid_controller.h +2 -0
  521. data/src/core/lib/transport/service_config.cc +21 -175
  522. data/src/core/lib/transport/service_config.h +223 -35
  523. data/src/core/lib/transport/static_metadata.cc +310 -294
  524. data/src/core/lib/transport/static_metadata.h +96 -82
  525. data/src/core/lib/transport/status_conversion.cc +2 -0
  526. data/src/core/lib/transport/status_conversion.h +3 -0
  527. data/src/core/lib/transport/status_metadata.cc +54 -0
  528. data/src/core/lib/{iomgr/timer_uv.h → transport/status_metadata.h} +10 -12
  529. data/src/core/lib/transport/timeout_encoding.cc +2 -1
  530. data/src/core/lib/transport/timeout_encoding.h +2 -0
  531. data/src/core/lib/transport/transport.cc +3 -1
  532. data/src/core/lib/transport/transport.h +33 -7
  533. data/src/core/lib/transport/transport_impl.h +2 -0
  534. data/src/core/lib/transport/transport_op_string.cc +10 -3
  535. data/src/core/plugin_registry/grpc_plugin_registry.cc +10 -4
  536. data/src/core/tsi/alts/crypt/aes_gcm.cc +687 -0
  537. data/src/core/tsi/alts/crypt/gsec.cc +189 -0
  538. data/src/core/tsi/alts/crypt/gsec.h +454 -0
  539. data/src/core/tsi/alts/frame_protector/alts_counter.cc +118 -0
  540. data/src/core/tsi/alts/frame_protector/alts_counter.h +98 -0
  541. data/src/core/tsi/alts/frame_protector/alts_crypter.cc +66 -0
  542. data/src/core/tsi/alts/frame_protector/alts_crypter.h +255 -0
  543. data/src/core/tsi/alts/frame_protector/alts_frame_protector.cc +407 -0
  544. data/src/core/tsi/alts/frame_protector/alts_frame_protector.h +55 -0
  545. data/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc +114 -0
  546. data/src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.h +114 -0
  547. data/src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc +105 -0
  548. data/src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc +103 -0
  549. data/src/core/tsi/alts/frame_protector/frame_handler.cc +218 -0
  550. data/src/core/tsi/alts/frame_protector/frame_handler.h +236 -0
  551. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +316 -0
  552. data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +137 -0
  553. data/src/core/tsi/alts/handshaker/alts_handshaker_service_api.cc +520 -0
  554. data/src/core/tsi/alts/handshaker/alts_handshaker_service_api.h +323 -0
  555. data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.cc +143 -0
  556. data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h +149 -0
  557. data/src/core/tsi/alts/handshaker/alts_tsi_event.cc +73 -0
  558. data/src/core/tsi/alts/handshaker/alts_tsi_event.h +93 -0
  559. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +483 -0
  560. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +83 -0
  561. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +52 -0
  562. data/src/core/tsi/alts/handshaker/alts_tsi_utils.cc +58 -0
  563. data/src/core/tsi/alts/handshaker/alts_tsi_utils.h +52 -0
  564. data/src/core/tsi/alts/handshaker/altscontext.pb.c +48 -0
  565. data/src/core/tsi/alts/handshaker/altscontext.pb.h +64 -0
  566. data/src/core/tsi/alts/handshaker/handshaker.pb.c +123 -0
  567. data/src/core/tsi/alts/handshaker/handshaker.pb.h +255 -0
  568. data/src/core/tsi/alts/handshaker/transport_security_common.pb.c +50 -0
  569. data/src/core/tsi/alts/handshaker/transport_security_common.pb.h +78 -0
  570. data/src/core/tsi/alts/handshaker/transport_security_common_api.cc +196 -0
  571. data/src/core/tsi/alts/handshaker/transport_security_common_api.h +163 -0
  572. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +180 -0
  573. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h +52 -0
  574. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc +144 -0
  575. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.h +49 -0
  576. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h +91 -0
  577. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc +174 -0
  578. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h +100 -0
  579. data/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc +476 -0
  580. data/src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.h +199 -0
  581. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +296 -0
  582. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h +52 -0
  583. data/src/core/tsi/alts_transport_security.cc +3 -1
  584. data/src/core/tsi/alts_transport_security.h +4 -2
  585. data/src/core/tsi/fake_transport_security.cc +2 -1
  586. data/src/core/tsi/fake_transport_security.h +2 -0
  587. data/src/core/tsi/ssl/session_cache/ssl_session.h +73 -0
  588. data/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc +58 -0
  589. data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +211 -0
  590. data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +93 -0
  591. data/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc +76 -0
  592. data/src/core/tsi/ssl_transport_security.cc +266 -62
  593. data/src/core/tsi/ssl_transport_security.h +128 -6
  594. data/src/core/tsi/ssl_types.h +2 -0
  595. data/src/core/tsi/transport_security.cc +2 -0
  596. data/src/core/tsi/transport_security.h +2 -0
  597. data/src/core/tsi/transport_security_adapter.cc +2 -0
  598. data/src/core/tsi/transport_security_adapter.h +2 -0
  599. data/src/core/tsi/transport_security_grpc.cc +2 -0
  600. data/src/core/tsi/transport_security_grpc.h +2 -0
  601. data/src/core/tsi/transport_security_interface.h +2 -0
  602. data/src/ruby/ext/grpc/extconf.rb +1 -2
  603. data/src/ruby/ext/grpc/rb_call.c +1 -13
  604. data/src/ruby/ext/grpc/rb_channel.c +6 -6
  605. data/src/ruby/ext/grpc/rb_compression_options.c +1 -1
  606. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +10 -0
  607. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +15 -0
  608. data/src/ruby/lib/grpc/core/time_consts.rb +1 -1
  609. data/src/ruby/lib/grpc/generic/bidi_call.rb +19 -8
  610. data/src/ruby/lib/grpc/generic/client_stub.rb +6 -10
  611. data/src/ruby/lib/grpc/generic/interceptors.rb +1 -1
  612. data/src/ruby/lib/grpc/generic/rpc_server.rb +2 -2
  613. data/src/ruby/lib/grpc/version.rb +1 -1
  614. data/src/ruby/spec/generic/client_stub_spec.rb +133 -0
  615. data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +54 -0
  616. data/src/ruby/spec/pb/package_with_underscore/data.proto +23 -0
  617. data/src/ruby/spec/pb/package_with_underscore/service.proto +23 -0
  618. data/third_party/address_sorting/address_sorting.c +369 -0
  619. data/third_party/address_sorting/address_sorting_internal.h +70 -0
  620. data/third_party/address_sorting/address_sorting_posix.c +97 -0
  621. data/third_party/address_sorting/address_sorting_windows.c +55 -0
  622. data/third_party/address_sorting/include/address_sorting/address_sorting.h +110 -0
  623. data/third_party/boringssl/crypto/asn1/a_enum.c +20 -9
  624. data/third_party/boringssl/crypto/asn1/a_i2d_fp.c +3 -0
  625. data/third_party/boringssl/crypto/asn1/a_int.c +19 -8
  626. data/third_party/boringssl/crypto/asn1/a_object.c +0 -128
  627. data/third_party/boringssl/crypto/asn1/asn1_locl.h +3 -0
  628. data/third_party/boringssl/crypto/asn1/tasn_fre.c +2 -4
  629. data/third_party/boringssl/crypto/asn1/tasn_new.c +3 -2
  630. data/third_party/boringssl/crypto/bn_extra/bn_asn1.c +0 -16
  631. data/third_party/boringssl/crypto/buf/buf.c +14 -0
  632. data/third_party/boringssl/crypto/bytestring/cbb.c +93 -0
  633. data/third_party/boringssl/crypto/conf/conf.c +2 -2
  634. data/third_party/boringssl/crypto/cpu-intel.c +17 -17
  635. data/third_party/boringssl/crypto/crypto.c +16 -4
  636. data/third_party/boringssl/crypto/curve25519/spake25519.c +11 -11
  637. data/third_party/boringssl/crypto/curve25519/x25519-x86_64.c +1 -1
  638. data/third_party/boringssl/crypto/dsa/dsa.c +9 -21
  639. data/third_party/boringssl/crypto/ec_extra/ec_asn1.c +2 -2
  640. data/third_party/boringssl/crypto/ecdsa_extra/ecdsa_asn1.c +1 -8
  641. data/third_party/boringssl/crypto/evp/p_rsa_asn1.c +2 -23
  642. data/third_party/boringssl/crypto/ex_data.c +0 -1
  643. data/third_party/boringssl/crypto/fipsmodule/bn/add.c +7 -11
  644. data/third_party/boringssl/crypto/fipsmodule/bn/asm/x86_64-gcc.c +19 -16
  645. data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +15 -0
  646. data/third_party/boringssl/crypto/fipsmodule/bn/div.c +53 -46
  647. data/third_party/boringssl/crypto/fipsmodule/bn/exponentiation.c +242 -85
  648. data/third_party/boringssl/crypto/fipsmodule/bn/generic.c +42 -47
  649. data/third_party/boringssl/crypto/fipsmodule/bn/internal.h +176 -34
  650. data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +118 -65
  651. data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +94 -61
  652. data/third_party/boringssl/crypto/fipsmodule/bn/random.c +79 -63
  653. data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +26 -28
  654. data/third_party/boringssl/crypto/fipsmodule/cipher/cipher.c +2 -0
  655. data/third_party/boringssl/crypto/fipsmodule/ec/ec.c +250 -149
  656. data/third_party/boringssl/crypto/fipsmodule/ec/ec_montgomery.c +0 -27
  657. data/third_party/boringssl/crypto/fipsmodule/ec/internal.h +54 -20
  658. data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +3 -3
  659. data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +7 -41
  660. data/third_party/boringssl/crypto/fipsmodule/ec/p256-64.c +6 -40
  661. data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64.c +17 -122
  662. data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +3 -64
  663. data/third_party/boringssl/crypto/fipsmodule/ec/wnaf.c +27 -9
  664. data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +203 -205
  665. data/third_party/boringssl/crypto/fipsmodule/modes/cbc.c +14 -15
  666. data/third_party/boringssl/crypto/fipsmodule/modes/cfb.c +12 -8
  667. data/third_party/boringssl/crypto/fipsmodule/modes/ctr.c +4 -3
  668. data/third_party/boringssl/crypto/fipsmodule/modes/gcm.c +25 -36
  669. data/third_party/boringssl/crypto/fipsmodule/modes/internal.h +10 -0
  670. data/third_party/boringssl/crypto/fipsmodule/rsa/internal.h +0 -4
  671. data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +2 -0
  672. data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +9 -19
  673. data/third_party/boringssl/crypto/lhash/lhash.c +19 -0
  674. data/third_party/boringssl/crypto/obj/obj.c +29 -69
  675. data/third_party/boringssl/crypto/pem/pem_lib.c +2 -2
  676. data/third_party/boringssl/crypto/poly1305/poly1305_vec.c +4 -55
  677. data/third_party/boringssl/crypto/rsa_extra/rsa_asn1.c +3 -22
  678. data/third_party/boringssl/crypto/x509/by_dir.c +1 -3
  679. data/third_party/boringssl/crypto/x509/by_file.c +0 -1
  680. data/third_party/boringssl/crypto/x509/x509_lu.c +0 -1
  681. data/third_party/boringssl/crypto/x509/x509_obj.c +1 -3
  682. data/third_party/boringssl/crypto/x509/x509_txt.c +0 -6
  683. data/third_party/boringssl/crypto/x509/x509_vfy.c +0 -1
  684. data/third_party/boringssl/crypto/x509/x509_vpm.c +0 -1
  685. data/third_party/boringssl/crypto/x509/x_algor.c +2 -2
  686. data/third_party/boringssl/crypto/x509v3/v3_alt.c +3 -4
  687. data/third_party/boringssl/crypto/x509v3/v3_genn.c +1 -0
  688. data/third_party/boringssl/crypto/x509v3/v3_info.c +1 -2
  689. data/third_party/boringssl/crypto/x509v3/v3_lib.c +15 -7
  690. data/third_party/boringssl/crypto/x509v3/v3_utl.c +41 -2
  691. data/third_party/boringssl/include/openssl/asn1.h +0 -1
  692. data/third_party/boringssl/include/openssl/base.h +1 -1
  693. data/third_party/boringssl/include/openssl/bio.h +5 -2
  694. data/third_party/boringssl/include/openssl/bn.h +2 -17
  695. data/third_party/boringssl/include/openssl/buf.h +4 -0
  696. data/third_party/boringssl/include/openssl/bytestring.h +11 -0
  697. data/third_party/boringssl/include/openssl/chacha.h +5 -1
  698. data/third_party/boringssl/include/openssl/cipher.h +10 -0
  699. data/third_party/boringssl/include/openssl/conf.h +4 -8
  700. data/third_party/boringssl/include/openssl/dsa.h +2 -18
  701. data/third_party/boringssl/include/openssl/ec.h +5 -5
  702. data/third_party/boringssl/include/openssl/ecdsa.h +10 -28
  703. data/third_party/boringssl/include/openssl/evp.h +0 -4
  704. data/third_party/boringssl/include/openssl/lhash.h +1 -18
  705. data/third_party/boringssl/include/openssl/obj.h +1 -0
  706. data/third_party/boringssl/include/openssl/rsa.h +3 -4
  707. data/third_party/boringssl/include/openssl/ssl.h +35 -54
  708. data/third_party/boringssl/include/openssl/ssl3.h +2 -0
  709. data/third_party/boringssl/include/openssl/stack.h +1 -1
  710. data/third_party/boringssl/include/openssl/tls1.h +1 -16
  711. data/third_party/boringssl/include/openssl/x509.h +3 -2
  712. data/third_party/boringssl/include/openssl/x509_vfy.h +0 -2
  713. data/third_party/boringssl/include/openssl/x509v3.h +1 -0
  714. data/third_party/boringssl/ssl/custom_extensions.cc +1 -1
  715. data/third_party/boringssl/ssl/d1_both.cc +120 -129
  716. data/third_party/boringssl/ssl/d1_lib.cc +23 -21
  717. data/third_party/boringssl/ssl/d1_pkt.cc +39 -143
  718. data/third_party/boringssl/ssl/dtls_method.cc +16 -23
  719. data/third_party/boringssl/ssl/dtls_record.cc +11 -4
  720. data/third_party/boringssl/ssl/handshake.cc +109 -40
  721. data/third_party/boringssl/ssl/handshake_client.cc +104 -96
  722. data/third_party/boringssl/ssl/handshake_server.cc +62 -72
  723. data/third_party/boringssl/ssl/internal.h +397 -318
  724. data/third_party/boringssl/ssl/s3_both.cc +173 -191
  725. data/third_party/boringssl/ssl/s3_lib.cc +26 -34
  726. data/third_party/boringssl/ssl/s3_pkt.cc +105 -247
  727. data/third_party/boringssl/ssl/ssl_asn1.cc +22 -22
  728. data/third_party/boringssl/ssl/ssl_buffer.cc +98 -108
  729. data/third_party/boringssl/ssl/ssl_cert.cc +12 -1
  730. data/third_party/boringssl/ssl/ssl_cipher.cc +23 -28
  731. data/third_party/boringssl/ssl/ssl_key_share.cc +11 -6
  732. data/third_party/boringssl/ssl/ssl_lib.cc +190 -113
  733. data/third_party/boringssl/ssl/ssl_privkey.cc +76 -106
  734. data/third_party/boringssl/ssl/ssl_session.cc +3 -3
  735. data/third_party/boringssl/ssl/ssl_stat.cc +3 -3
  736. data/third_party/boringssl/ssl/ssl_transcript.cc +38 -22
  737. data/third_party/boringssl/ssl/ssl_versions.cc +64 -31
  738. data/third_party/boringssl/ssl/t1_enc.cc +137 -154
  739. data/third_party/boringssl/ssl/t1_lib.cc +463 -478
  740. data/third_party/boringssl/ssl/tls13_both.cc +57 -58
  741. data/third_party/boringssl/ssl/tls13_client.cc +256 -121
  742. data/third_party/boringssl/ssl/tls13_enc.cc +187 -72
  743. data/third_party/boringssl/ssl/tls13_server.cc +187 -86
  744. data/third_party/boringssl/ssl/tls_method.cc +20 -30
  745. data/third_party/boringssl/ssl/tls_record.cc +77 -40
  746. data/third_party/boringssl/third_party/fiat/curve25519.c +5062 -0
  747. data/third_party/boringssl/{crypto/curve25519 → third_party/fiat}/internal.h +40 -27
  748. data/third_party/nanopb/pb.h +1 -1
  749. metadata +147 -45
  750. data/src/core/lib/gpr/thd.cc +0 -49
  751. data/src/core/lib/gpr/thd.h +0 -71
  752. data/src/core/lib/gpr/thd_posix.cc +0 -154
  753. data/src/core/lib/gpr/thd_windows.cc +0 -107
  754. data/src/core/lib/iomgr/iomgr_uv.h +0 -37
  755. data/src/core/lib/iomgr/pollset_set_uv.cc +0 -43
  756. data/src/core/lib/iomgr/resolve_address_uv.cc +0 -284
  757. data/src/core/lib/iomgr/tcp_client_uv.cc +0 -175
  758. data/src/core/lib/iomgr/tcp_server_uv.cc +0 -471
  759. data/src/core/lib/iomgr/tcp_uv.h +0 -51
  760. data/src/core/lib/security/transport/lb_targets_info.cc +0 -59
  761. data/src/core/lib/slice/slice_hash_table.cc +0 -145
  762. data/third_party/boringssl/crypto/curve25519/curve25519.c +0 -4938
@@ -16,6 +16,8 @@
16
16
  *
17
17
  */
18
18
 
19
+ #include <grpc/support/port_platform.h>
20
+
19
21
  #include <grpc/census.h>
20
22
  #include <grpc/grpc.h>
21
23
  #include "src/core/lib/surface/api_trace.h"
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  *
3
- * Copyright 2015 gRPC authors.
3
+ * Copyright 2017 gRPC authors.
4
4
  *
5
5
  * Licensed under the Apache License, Version 2.0 (the "License");
6
6
  * you may not use this file except in compliance with the License.
@@ -16,6 +16,8 @@
16
16
  *
17
17
  */
18
18
 
19
+ #include <grpc/support/port_platform.h>
20
+
19
21
  #include "src/core/ext/filters/client_channel/backup_poller.h"
20
22
 
21
23
  #include <grpc/grpc.h>
@@ -125,13 +127,7 @@ static void run_poller(void* arg, grpc_error* error) {
125
127
  &p->run_poller_closure);
126
128
  }
127
129
 
128
- void grpc_client_channel_start_backup_polling(
129
- grpc_pollset_set* interested_parties) {
130
- gpr_once_init(&g_once, init_globals);
131
- if (g_poll_interval_ms == 0) {
132
- return;
133
- }
134
- gpr_mu_lock(&g_poller_mu);
130
+ static void g_poller_init_locked() {
135
131
  if (g_poller == nullptr) {
136
132
  g_poller = static_cast<backup_poller*>(gpr_zalloc(sizeof(backup_poller)));
137
133
  g_poller->pollset =
@@ -147,7 +143,16 @@ void grpc_client_channel_start_backup_polling(
147
143
  grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms,
148
144
  &g_poller->run_poller_closure);
149
145
  }
146
+ }
150
147
 
148
+ void grpc_client_channel_start_backup_polling(
149
+ grpc_pollset_set* interested_parties) {
150
+ gpr_once_init(&g_once, init_globals);
151
+ if (g_poll_interval_ms == 0) {
152
+ return;
153
+ }
154
+ gpr_mu_lock(&g_poller_mu);
155
+ g_poller_init_locked();
151
156
  gpr_ref(&g_poller->refs);
152
157
  /* Get a reference to g_poller->pollset before releasing g_poller_mu to make
153
158
  * TSAN happy. Otherwise, reading from g_poller (i.e g_poller->pollset) after
@@ -1,6 +1,6 @@
1
1
  /*
2
2
  *
3
- * Copyright 2015 gRPC authors.
3
+ * Copyright 2017 gRPC authors.
4
4
  *
5
5
  * Licensed under the Apache License, Version 2.0 (the "License");
6
6
  * you may not use this file except in compliance with the License.
@@ -19,9 +19,10 @@
19
19
  #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
20
20
  #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H
21
21
 
22
+ #include <grpc/support/port_platform.h>
23
+
22
24
  #include <grpc/grpc.h>
23
25
  #include "src/core/lib/channel/channel_stack.h"
24
- #include "src/core/lib/iomgr/exec_ctx.h"
25
26
 
26
27
  /* Start polling \a interested_parties periodically in the timer thread */
27
28
  void grpc_client_channel_start_backup_polling(
@@ -16,6 +16,8 @@
16
16
  *
17
17
  */
18
18
 
19
+ #include <grpc/support/port_platform.h>
20
+
19
21
  #include "src/core/lib/surface/channel.h"
20
22
 
21
23
  #include <inttypes.h>
@@ -21,6 +21,7 @@
21
21
  #include "src/core/ext/filters/client_channel/client_channel.h"
22
22
 
23
23
  #include <inttypes.h>
24
+ #include <limits.h>
24
25
  #include <stdbool.h>
25
26
  #include <stdio.h>
26
27
  #include <string.h>
@@ -33,153 +34,75 @@
33
34
  #include "src/core/ext/filters/client_channel/backup_poller.h"
34
35
  #include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
35
36
  #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
37
+ #include "src/core/ext/filters/client_channel/method_params.h"
36
38
  #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
37
39
  #include "src/core/ext/filters/client_channel/resolver_registry.h"
38
40
  #include "src/core/ext/filters/client_channel/retry_throttle.h"
39
41
  #include "src/core/ext/filters/client_channel/subchannel.h"
40
42
  #include "src/core/ext/filters/deadline/deadline_filter.h"
43
+ #include "src/core/lib/backoff/backoff.h"
41
44
  #include "src/core/lib/channel/channel_args.h"
42
45
  #include "src/core/lib/channel/connected_channel.h"
46
+ #include "src/core/lib/channel/status_util.h"
43
47
  #include "src/core/lib/gpr/string.h"
48
+ #include "src/core/lib/gprpp/inlined_vector.h"
49
+ #include "src/core/lib/gprpp/manual_constructor.h"
44
50
  #include "src/core/lib/iomgr/combiner.h"
45
51
  #include "src/core/lib/iomgr/iomgr.h"
46
52
  #include "src/core/lib/iomgr/polling_entity.h"
47
53
  #include "src/core/lib/profiling/timers.h"
48
54
  #include "src/core/lib/slice/slice_internal.h"
55
+ #include "src/core/lib/slice/slice_string_helpers.h"
49
56
  #include "src/core/lib/surface/channel.h"
50
57
  #include "src/core/lib/transport/connectivity_state.h"
58
+ #include "src/core/lib/transport/error_utils.h"
51
59
  #include "src/core/lib/transport/metadata.h"
52
60
  #include "src/core/lib/transport/metadata_batch.h"
53
61
  #include "src/core/lib/transport/service_config.h"
54
62
  #include "src/core/lib/transport/static_metadata.h"
63
+ #include "src/core/lib/transport/status_metadata.h"
64
+
65
+ using grpc_core::internal::ClientChannelMethodParams;
66
+ using grpc_core::internal::ServerRetryThrottleData;
55
67
 
56
68
  /* Client channel implementation */
57
69
 
70
+ // By default, we buffer 256 KiB per RPC for retries.
71
+ // TODO(roth): Do we have any data to suggest a better value?
72
+ #define DEFAULT_PER_RPC_RETRY_BUFFER_SIZE (256 << 10)
73
+
74
+ // This value was picked arbitrarily. It can be changed if there is
75
+ // any even moderately compelling reason to do so.
76
+ #define RETRY_BACKOFF_JITTER 0.2
77
+
58
78
  grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
59
79
 
60
80
  /*************************************************************************
61
- * METHOD-CONFIG TABLE
81
+ * CHANNEL-WIDE FUNCTIONS
62
82
  */
63
83
 
64
- typedef enum {
65
- /* zero so it can be default initialized */
66
- WAIT_FOR_READY_UNSET = 0,
67
- WAIT_FOR_READY_FALSE,
68
- WAIT_FOR_READY_TRUE
69
- } wait_for_ready_value;
70
-
71
- typedef struct {
72
- gpr_refcount refs;
73
- grpc_millis timeout;
74
- wait_for_ready_value wait_for_ready;
75
- } method_parameters;
76
-
77
- static method_parameters* method_parameters_ref(
78
- method_parameters* method_params) {
79
- gpr_ref(&method_params->refs);
80
- return method_params;
81
- }
82
-
83
- static void method_parameters_unref(method_parameters* method_params) {
84
- if (gpr_unref(&method_params->refs)) {
85
- gpr_free(method_params);
86
- }
87
- }
88
-
89
- // Wrappers to pass to grpc_service_config_create_method_config_table().
90
- static void* method_parameters_ref_wrapper(void* value) {
91
- return method_parameters_ref(static_cast<method_parameters*>(value));
92
- }
93
- static void method_parameters_unref_wrapper(void* value) {
94
- method_parameters_unref(static_cast<method_parameters*>(value));
95
- }
96
-
97
- static bool parse_wait_for_ready(grpc_json* field,
98
- wait_for_ready_value* wait_for_ready) {
99
- if (field->type != GRPC_JSON_TRUE && field->type != GRPC_JSON_FALSE) {
100
- return false;
101
- }
102
- *wait_for_ready = field->type == GRPC_JSON_TRUE ? WAIT_FOR_READY_TRUE
103
- : WAIT_FOR_READY_FALSE;
104
- return true;
105
- }
106
-
107
- static bool parse_timeout(grpc_json* field, grpc_millis* timeout) {
108
- if (field->type != GRPC_JSON_STRING) return false;
109
- size_t len = strlen(field->value);
110
- if (field->value[len - 1] != 's') return false;
111
- char* buf = gpr_strdup(field->value);
112
- buf[len - 1] = '\0'; // Remove trailing 's'.
113
- char* decimal_point = strchr(buf, '.');
114
- int nanos = 0;
115
- if (decimal_point != nullptr) {
116
- *decimal_point = '\0';
117
- nanos = gpr_parse_nonnegative_int(decimal_point + 1);
118
- if (nanos == -1) {
119
- gpr_free(buf);
120
- return false;
121
- }
122
- int num_digits = static_cast<int>(strlen(decimal_point + 1));
123
- if (num_digits > 9) { // We don't accept greater precision than nanos.
124
- gpr_free(buf);
125
- return false;
126
- }
127
- for (int i = 0; i < (9 - num_digits); ++i) {
128
- nanos *= 10;
129
- }
130
- }
131
- int seconds = decimal_point == buf ? 0 : gpr_parse_nonnegative_int(buf);
132
- gpr_free(buf);
133
- if (seconds == -1) return false;
134
- *timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
135
- return true;
136
- }
137
-
138
- static void* method_parameters_create_from_json(const grpc_json* json) {
139
- wait_for_ready_value wait_for_ready = WAIT_FOR_READY_UNSET;
140
- grpc_millis timeout = 0;
141
- for (grpc_json* field = json->child; field != nullptr; field = field->next) {
142
- if (field->key == nullptr) continue;
143
- if (strcmp(field->key, "waitForReady") == 0) {
144
- if (wait_for_ready != WAIT_FOR_READY_UNSET) return nullptr; // Duplicate.
145
- if (!parse_wait_for_ready(field, &wait_for_ready)) return nullptr;
146
- } else if (strcmp(field->key, "timeout") == 0) {
147
- if (timeout > 0) return nullptr; // Duplicate.
148
- if (!parse_timeout(field, &timeout)) return nullptr;
149
- }
150
- }
151
- method_parameters* value =
152
- static_cast<method_parameters*>(gpr_malloc(sizeof(method_parameters)));
153
- gpr_ref_init(&value->refs, 1);
154
- value->timeout = timeout;
155
- value->wait_for_ready = wait_for_ready;
156
- return value;
157
- }
158
-
159
84
  struct external_connectivity_watcher;
160
85
 
161
- /*************************************************************************
162
- * CHANNEL-WIDE FUNCTIONS
163
- */
86
+ typedef grpc_core::SliceHashTable<
87
+ grpc_core::RefCountedPtr<ClientChannelMethodParams>>
88
+ MethodParamsTable;
164
89
 
165
90
  typedef struct client_channel_channel_data {
166
- /** resolver for this channel */
167
91
  grpc_core::OrphanablePtr<grpc_core::Resolver> resolver;
168
- /** have we started resolving this channel */
169
92
  bool started_resolving;
170
- /** is deadline checking enabled? */
171
93
  bool deadline_checking_enabled;
172
- /** client channel factory */
173
94
  grpc_client_channel_factory* client_channel_factory;
95
+ bool enable_retries;
96
+ size_t per_rpc_retry_buffer_size;
174
97
 
175
98
  /** combiner protecting all variables below in this data structure */
176
99
  grpc_combiner* combiner;
177
100
  /** currently active load balancer */
178
101
  grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> lb_policy;
179
102
  /** retry throttle data */
180
- grpc_server_retry_throttle_data* retry_throttle_data;
103
+ grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
181
104
  /** maps method names to method_parameters structs */
182
- grpc_slice_hash_table* method_params_table;
105
+ grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
183
106
  /** incoming resolver result - set by resolver.next() */
184
107
  grpc_channel_args* resolver_result;
185
108
  /** a list of closures that are all waiting for resolver result to come in */
@@ -200,7 +123,7 @@ typedef struct client_channel_channel_data {
200
123
  gpr_mu external_connectivity_watcher_list_mu;
201
124
  struct external_connectivity_watcher* external_connectivity_watcher_list_head;
202
125
 
203
- /* the following properties are guarded by a mutex since API's require them
126
+ /* the following properties are guarded by a mutex since APIs require them
204
127
  to be instantaneously available */
205
128
  gpr_mu info_mu;
206
129
  char* info_lb_policy_name;
@@ -303,12 +226,11 @@ static void start_resolving_locked(channel_data* chand) {
303
226
 
304
227
  typedef struct {
305
228
  char* server_name;
306
- grpc_server_retry_throttle_data* retry_throttle_data;
229
+ grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
307
230
  } service_config_parsing_state;
308
231
 
309
- static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
310
- service_config_parsing_state* parsing_state =
311
- static_cast<service_config_parsing_state*>(arg);
232
+ static void parse_retry_throttle_params(
233
+ const grpc_json* field, service_config_parsing_state* parsing_state) {
312
234
  if (strcmp(field->key, "retryThrottling") == 0) {
313
235
  if (parsing_state->retry_throttle_data != nullptr) return; // Duplicate.
314
236
  if (field->type != GRPC_JSON_OBJECT) return;
@@ -357,7 +279,7 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
357
279
  }
358
280
  }
359
281
  parsing_state->retry_throttle_data =
360
- grpc_retry_throttle_map_get_data_for_server(
282
+ grpc_core::internal::ServerRetryThrottleMap::GetDataForServer(
361
283
  parsing_state->server_name, max_milli_tokens, milli_token_ratio);
362
284
  }
363
285
  }
@@ -382,21 +304,26 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
382
304
  chand->lb_policy->SetReresolutionClosureLocked(&args->closure);
383
305
  }
384
306
 
307
+ // TODO(roth): The logic in this function is very hard to follow. We
308
+ // should refactor this so that it's easier to understand, perhaps as
309
+ // part of changing the resolver API to more clearly differentiate
310
+ // between transient failures and shutdown.
385
311
  static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
386
312
  channel_data* chand = static_cast<channel_data*>(arg);
387
313
  if (grpc_client_channel_trace.enabled()) {
388
- gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
389
- grpc_error_string(error));
314
+ gpr_log(GPR_DEBUG,
315
+ "chand=%p: got resolver result: resolver_result=%p error=%s", chand,
316
+ chand->resolver_result, grpc_error_string(error));
390
317
  }
391
- // Extract the following fields from the resolver result, if non-NULL.
318
+ // Extract the following fields from the resolver result, if non-nullptr.
392
319
  bool lb_policy_updated = false;
393
320
  bool lb_policy_created = false;
394
321
  char* lb_policy_name_dup = nullptr;
395
322
  bool lb_policy_name_changed = false;
396
323
  grpc_core::OrphanablePtr<grpc_core::LoadBalancingPolicy> new_lb_policy;
397
324
  char* service_config_json = nullptr;
398
- grpc_server_retry_throttle_data* retry_throttle_data = nullptr;
399
- grpc_slice_hash_table* method_params_table = nullptr;
325
+ grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
326
+ grpc_core::RefCountedPtr<MethodParamsTable> method_params_table;
400
327
  if (chand->resolver_result != nullptr) {
401
328
  if (chand->resolver != nullptr) {
402
329
  // Find LB policy name.
@@ -431,7 +358,6 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
431
358
  // Use pick_first if nothing was specified and we didn't select grpclb
432
359
  // above.
433
360
  if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
434
-
435
361
  // Check to see if we're already using the right LB policy.
436
362
  // Note: It's safe to use chand->info_lb_policy_name here without
437
363
  // taking a lock on chand->info_mu, because this function is the
@@ -469,42 +395,40 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
469
395
  new_lb_policy->SetReresolutionClosureLocked(&args->closure);
470
396
  }
471
397
  }
398
+ // Before we clean up, save a copy of lb_policy_name, since it might
399
+ // be pointing to data inside chand->resolver_result.
400
+ // The copy will be saved in chand->lb_policy_name below.
401
+ lb_policy_name_dup = gpr_strdup(lb_policy_name);
472
402
  // Find service config.
473
403
  channel_arg = grpc_channel_args_find(chand->resolver_result,
474
404
  GRPC_ARG_SERVICE_CONFIG);
475
405
  service_config_json =
476
406
  gpr_strdup(grpc_channel_arg_get_string(channel_arg));
477
407
  if (service_config_json != nullptr) {
478
- grpc_service_config* service_config =
479
- grpc_service_config_create(service_config_json);
408
+ grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config =
409
+ grpc_core::ServiceConfig::Create(service_config_json);
480
410
  if (service_config != nullptr) {
481
- channel_arg = grpc_channel_args_find(chand->resolver_result,
482
- GRPC_ARG_SERVER_URI);
483
- const char* server_uri = grpc_channel_arg_get_string(channel_arg);
484
- GPR_ASSERT(server_uri != nullptr);
485
- grpc_uri* uri = grpc_uri_parse(server_uri, true);
486
- GPR_ASSERT(uri->path[0] != '\0');
487
- service_config_parsing_state parsing_state;
488
- memset(&parsing_state, 0, sizeof(parsing_state));
489
- parsing_state.server_name =
490
- uri->path[0] == '/' ? uri->path + 1 : uri->path;
491
- grpc_service_config_parse_global_params(
492
- service_config, parse_retry_throttle_params, &parsing_state);
493
- grpc_uri_destroy(uri);
494
- retry_throttle_data = parsing_state.retry_throttle_data;
495
- method_params_table = grpc_service_config_create_method_config_table(
496
- service_config, method_parameters_create_from_json,
497
- method_parameters_ref_wrapper, method_parameters_unref_wrapper);
498
- grpc_service_config_destroy(service_config);
411
+ if (chand->enable_retries) {
412
+ channel_arg = grpc_channel_args_find(chand->resolver_result,
413
+ GRPC_ARG_SERVER_URI);
414
+ const char* server_uri = grpc_channel_arg_get_string(channel_arg);
415
+ GPR_ASSERT(server_uri != nullptr);
416
+ grpc_uri* uri = grpc_uri_parse(server_uri, true);
417
+ GPR_ASSERT(uri->path[0] != '\0');
418
+ service_config_parsing_state parsing_state;
419
+ memset(&parsing_state, 0, sizeof(parsing_state));
420
+ parsing_state.server_name =
421
+ uri->path[0] == '/' ? uri->path + 1 : uri->path;
422
+ service_config->ParseGlobalParams(parse_retry_throttle_params,
423
+ &parsing_state);
424
+ grpc_uri_destroy(uri);
425
+ retry_throttle_data = std::move(parsing_state.retry_throttle_data);
426
+ }
427
+ method_params_table = service_config->CreateMethodConfigTable(
428
+ ClientChannelMethodParams::CreateFromJson);
499
429
  }
500
430
  }
501
- // Before we clean up, save a copy of lb_policy_name, since it might
502
- // be pointing to data inside chand->resolver_result.
503
- // The copy will be saved in chand->lb_policy_name below.
504
- lb_policy_name_dup = gpr_strdup(lb_policy_name);
505
431
  }
506
- grpc_channel_args_destroy(chand->resolver_result);
507
- chand->resolver_result = nullptr;
508
432
  }
509
433
  if (grpc_client_channel_trace.enabled()) {
510
434
  gpr_log(GPR_DEBUG,
@@ -514,7 +438,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
514
438
  lb_policy_name_changed ? " (changed)" : "", service_config_json);
515
439
  }
516
440
  // Now swap out fields in chand. Note that the new values may still
517
- // be NULL if (e.g.) the resolver failed to return results or the
441
+ // be nullptr if (e.g.) the resolver failed to return results or the
518
442
  // results did not contain the necessary data.
519
443
  //
520
444
  // First, swap out the data used by cc_get_channel_info().
@@ -529,21 +453,15 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
529
453
  }
530
454
  gpr_mu_unlock(&chand->info_mu);
531
455
  // Swap out the retry throttle data.
532
- if (chand->retry_throttle_data != nullptr) {
533
- grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
534
- }
535
- chand->retry_throttle_data = retry_throttle_data;
456
+ chand->retry_throttle_data = std::move(retry_throttle_data);
536
457
  // Swap out the method params table.
537
- if (chand->method_params_table != nullptr) {
538
- grpc_slice_hash_table_unref(chand->method_params_table);
539
- }
540
- chand->method_params_table = method_params_table;
458
+ chand->method_params_table = std::move(method_params_table);
541
459
  // If we have a new LB policy or are shutting down (in which case
542
- // new_lb_policy will be NULL), swap out the LB policy, unreffing the old one
543
- // and removing its fds from chand->interested_parties. Note that we do NOT do
544
- // this if either (a) we updated the existing LB policy above or (b) we failed
545
- // to create the new LB policy (in which case we want to continue using the
546
- // most recent one we had).
460
+ // new_lb_policy will be nullptr), swap out the LB policy, unreffing the
461
+ // old one and removing its fds from chand->interested_parties.
462
+ // Note that we do NOT do this if either (a) we updated the existing
463
+ // LB policy above or (b) we failed to create the new LB policy (in
464
+ // which case we want to continue using the most recent one we had).
547
465
  if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
548
466
  chand->resolver == nullptr) {
549
467
  if (chand->lb_policy != nullptr) {
@@ -580,6 +498,8 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
580
498
  "Channel disconnected", &error, 1));
581
499
  GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
582
500
  GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
501
+ grpc_channel_args_destroy(chand->resolver_result);
502
+ chand->resolver_result = nullptr;
583
503
  } else { // Not shutting down.
584
504
  grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
585
505
  grpc_error* state_error =
@@ -598,11 +518,16 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
598
518
  chand->exit_idle_when_lb_policy_arrives = false;
599
519
  }
600
520
  watch_lb_policy_locked(chand, chand->lb_policy.get(), state);
521
+ } else if (chand->resolver_result == nullptr) {
522
+ // Transient failure.
523
+ GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
601
524
  }
602
525
  if (!lb_policy_updated) {
603
526
  set_channel_connectivity_state_locked(
604
527
  chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
605
528
  }
529
+ grpc_channel_args_destroy(chand->resolver_result);
530
+ chand->resolver_result = nullptr;
606
531
  chand->resolver->NextLocked(&chand->resolver_result,
607
532
  &chand->on_resolver_result_changed);
608
533
  GRPC_ERROR_UNREF(state_error);
@@ -722,9 +647,17 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
722
647
  grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
723
648
  "client_channel");
724
649
  grpc_client_channel_start_backup_polling(chand->interested_parties);
650
+ // Record max per-RPC retry buffer size.
651
+ const grpc_arg* arg = grpc_channel_args_find(
652
+ args->channel_args, GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE);
653
+ chand->per_rpc_retry_buffer_size = (size_t)grpc_channel_arg_get_integer(
654
+ arg, {DEFAULT_PER_RPC_RETRY_BUFFER_SIZE, 0, INT_MAX});
655
+ // Record enable_retries.
656
+ arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_ENABLE_RETRIES);
657
+ chand->enable_retries = grpc_channel_arg_get_bool(arg, true);
725
658
  // Record client channel factory.
726
- const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
727
- GRPC_ARG_CLIENT_CHANNEL_FACTORY);
659
+ arg = grpc_channel_args_find(args->channel_args,
660
+ GRPC_ARG_CLIENT_CHANNEL_FACTORY);
728
661
  if (arg == nullptr) {
729
662
  return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
730
663
  "Missing client channel factory in args for client channel filter");
@@ -790,12 +723,8 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
790
723
  }
791
724
  gpr_free(chand->info_lb_policy_name);
792
725
  gpr_free(chand->info_service_config_json);
793
- if (chand->retry_throttle_data != nullptr) {
794
- grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
795
- }
796
- if (chand->method_params_table != nullptr) {
797
- grpc_slice_hash_table_unref(chand->method_params_table);
798
- }
726
+ chand->retry_throttle_data.reset();
727
+ chand->method_params_table.reset();
799
728
  grpc_client_channel_stop_backup_polling(chand->interested_parties);
800
729
  grpc_connectivity_state_destroy(&chand->state_tracker);
801
730
  grpc_pollset_set_destroy(chand->interested_parties);
@@ -809,15 +738,123 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
809
738
  */
810
739
 
811
740
  // Max number of batches that can be pending on a call at any given
812
- // time. This includes:
741
+ // time. This includes one batch for each of the following ops:
813
742
  // recv_initial_metadata
814
743
  // send_initial_metadata
815
744
  // recv_message
816
745
  // send_message
817
746
  // recv_trailing_metadata
818
747
  // send_trailing_metadata
819
- // We also add room for a single cancel_stream batch.
820
- #define MAX_WAITING_BATCHES 7
748
+ #define MAX_PENDING_BATCHES 6
749
+
750
+ // Retry support:
751
+ //
752
+ // In order to support retries, we act as a proxy for stream op batches.
753
+ // When we get a batch from the surface, we add it to our list of pending
754
+ // batches, and we then use those batches to construct separate "child"
755
+ // batches to be started on the subchannel call. When the child batches
756
+ // return, we then decide which pending batches have been completed and
757
+ // schedule their callbacks accordingly. If a subchannel call fails and
758
+ // we want to retry it, we do a new pick and start again, constructing
759
+ // new "child" batches for the new subchannel call.
760
+ //
761
+ // Note that retries are committed when receiving data from the server
762
+ // (except for Trailers-Only responses). However, there may be many
763
+ // send ops started before receiving any data, so we may have already
764
+ // completed some number of send ops (and returned the completions up to
765
+ // the surface) by the time we realize that we need to retry. To deal
766
+ // with this, we cache data for send ops, so that we can replay them on a
767
+ // different subchannel call even after we have completed the original
768
+ // batches.
769
+ //
770
+ // There are two sets of data to maintain:
771
+ // - In call_data (in the parent channel), we maintain a list of pending
772
+ // ops and cached data for send ops.
773
+ // - In the subchannel call, we maintain state to indicate what ops have
774
+ // already been sent down to that call.
775
+ //
776
+ // When constructing the "child" batches, we compare those two sets of
777
+ // data to see which batches need to be sent to the subchannel call.
778
+
779
+ // TODO(roth): In subsequent PRs:
780
+ // - add support for transparent retries (including initial metadata)
781
+ // - figure out how to record stats in census for retries
782
+ // (census filter is on top of this one)
783
+ // - add census stats for retries
784
+
785
+ // State used for starting a retryable batch on a subchannel call.
786
+ // This provides its own grpc_transport_stream_op_batch and other data
787
+ // structures needed to populate the ops in the batch.
788
+ // We allocate one struct on the arena for each attempt at starting a
789
+ // batch on a given subchannel call.
790
+ typedef struct {
791
+ gpr_refcount refs;
792
+ grpc_call_element* elem;
793
+ grpc_subchannel_call* subchannel_call; // Holds a ref.
794
+ // The batch to use in the subchannel call.
795
+ // Its payload field points to subchannel_call_retry_state.batch_payload.
796
+ grpc_transport_stream_op_batch batch;
797
+ // For send_initial_metadata.
798
+ // Note that we need to make a copy of the initial metadata for each
799
+ // subchannel call instead of just referring to the copy in call_data,
800
+ // because filters in the subchannel stack will probably add entries,
801
+ // so we need to start in a pristine state for each attempt of the call.
802
+ grpc_linked_mdelem* send_initial_metadata_storage;
803
+ grpc_metadata_batch send_initial_metadata;
804
+ // For send_message.
805
+ grpc_core::ManualConstructor<grpc_core::ByteStreamCache::CachingByteStream>
806
+ send_message;
807
+ // For send_trailing_metadata.
808
+ grpc_linked_mdelem* send_trailing_metadata_storage;
809
+ grpc_metadata_batch send_trailing_metadata;
810
+ // For intercepting recv_initial_metadata.
811
+ grpc_metadata_batch recv_initial_metadata;
812
+ grpc_closure recv_initial_metadata_ready;
813
+ bool trailing_metadata_available;
814
+ // For intercepting recv_message.
815
+ grpc_closure recv_message_ready;
816
+ grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_message;
817
+ // For intercepting recv_trailing_metadata.
818
+ grpc_metadata_batch recv_trailing_metadata;
819
+ grpc_transport_stream_stats collect_stats;
820
+ // For intercepting on_complete.
821
+ grpc_closure on_complete;
822
+ } subchannel_batch_data;
823
+
824
+ // Retry state associated with a subchannel call.
825
+ // Stored in the parent_data of the subchannel call object.
826
+ typedef struct {
827
+ // subchannel_batch_data.batch.payload points to this.
828
+ grpc_transport_stream_op_batch_payload batch_payload;
829
+ // These fields indicate which ops have been started and completed on
830
+ // this subchannel call.
831
+ size_t started_send_message_count;
832
+ size_t completed_send_message_count;
833
+ size_t started_recv_message_count;
834
+ size_t completed_recv_message_count;
835
+ bool started_send_initial_metadata : 1;
836
+ bool completed_send_initial_metadata : 1;
837
+ bool started_send_trailing_metadata : 1;
838
+ bool completed_send_trailing_metadata : 1;
839
+ bool started_recv_initial_metadata : 1;
840
+ bool completed_recv_initial_metadata : 1;
841
+ bool started_recv_trailing_metadata : 1;
842
+ bool completed_recv_trailing_metadata : 1;
843
+ // State for callback processing.
844
+ bool retry_dispatched : 1;
845
+ bool recv_initial_metadata_ready_deferred : 1;
846
+ bool recv_message_ready_deferred : 1;
847
+ grpc_error* recv_initial_metadata_error;
848
+ grpc_error* recv_message_error;
849
+ } subchannel_call_retry_state;
850
+
851
+ // Pending batches stored in call data.
852
+ typedef struct {
853
+ // The pending batch. If nullptr, this slot is empty.
854
+ grpc_transport_stream_op_batch* batch;
855
+ // Indicates whether payload for send ops has been cached in call data.
856
+ bool send_ops_cached;
857
+ } pending_batch;
821
858
 
822
859
  /** Call data. Holds a pointer to grpc_subchannel_call and the
823
860
  associated machinery to create such a pointer.
@@ -840,254 +877,1754 @@ typedef struct client_channel_call_data {
840
877
  grpc_call_stack* owning_call;
841
878
  grpc_call_combiner* call_combiner;
842
879
 
843
- grpc_server_retry_throttle_data* retry_throttle_data;
844
- method_parameters* method_params;
880
+ grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
881
+ grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
845
882
 
846
883
  grpc_subchannel_call* subchannel_call;
847
- grpc_error* error;
884
+
885
+ // Set when we get a cancel_stream op.
886
+ grpc_error* cancel_error;
848
887
 
849
888
  grpc_core::LoadBalancingPolicy::PickState pick;
850
- grpc_closure lb_pick_closure;
851
- grpc_closure lb_pick_cancel_closure;
889
+ grpc_closure pick_closure;
890
+ grpc_closure pick_cancel_closure;
852
891
 
853
892
  grpc_polling_entity* pollent;
854
893
 
855
- grpc_transport_stream_op_batch* waiting_for_pick_batches[MAX_WAITING_BATCHES];
856
- size_t waiting_for_pick_batches_count;
857
- grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES];
894
+ // Batches are added to this list when received from above.
895
+ // They are removed when we are done handling the batch (i.e., when
896
+ // either we have invoked all of the batch's callbacks or we have
897
+ // passed the batch down to the subchannel call and are not
898
+ // intercepting any of its callbacks).
899
+ pending_batch pending_batches[MAX_PENDING_BATCHES];
900
+ bool pending_send_initial_metadata : 1;
901
+ bool pending_send_message : 1;
902
+ bool pending_send_trailing_metadata : 1;
903
+
904
+ // Retry state.
905
+ bool enable_retries : 1;
906
+ bool retry_committed : 1;
907
+ bool last_attempt_got_server_pushback : 1;
908
+ int num_attempts_completed;
909
+ size_t bytes_buffered_for_retry;
910
+ grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
911
+ grpc_timer retry_timer;
912
+
913
+ // Cached data for retrying send ops.
914
+ // send_initial_metadata
915
+ bool seen_send_initial_metadata;
916
+ grpc_linked_mdelem* send_initial_metadata_storage;
917
+ grpc_metadata_batch send_initial_metadata;
918
+ uint32_t send_initial_metadata_flags;
919
+ gpr_atm* peer_string;
920
+ // send_message
921
+ // When we get a send_message op, we replace the original byte stream
922
+ // with a CachingByteStream that caches the slices to a local buffer for
923
+ // use in retries.
924
+ // Note: We inline the cache for the first 3 send_message ops and use
925
+ // dynamic allocation after that. This number was essentially picked
926
+ // at random; it could be changed in the future to tune performance.
927
+ grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3> send_messages;
928
+ // send_trailing_metadata
929
+ bool seen_send_trailing_metadata;
930
+ grpc_linked_mdelem* send_trailing_metadata_storage;
931
+ grpc_metadata_batch send_trailing_metadata;
932
+ } call_data;
858
933
 
859
- grpc_transport_stream_op_batch* initial_metadata_batch;
934
+ // Forward declarations.
935
+ static void retry_commit(grpc_call_element* elem,
936
+ subchannel_call_retry_state* retry_state);
937
+ static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
938
+ static void on_complete(void* arg, grpc_error* error);
939
+ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
940
+ static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
941
+ static void start_pick_locked(void* arg, grpc_error* ignored);
942
+
943
+ //
944
+ // send op data caching
945
+ //
946
+
947
+ // Caches data for send ops so that it can be retried later, if not
948
+ // already cached.
949
+ static void maybe_cache_send_ops_for_batch(call_data* calld,
950
+ pending_batch* pending) {
951
+ if (pending->send_ops_cached) return;
952
+ pending->send_ops_cached = true;
953
+ grpc_transport_stream_op_batch* batch = pending->batch;
954
+ // Save a copy of metadata for send_initial_metadata ops.
955
+ if (batch->send_initial_metadata) {
956
+ calld->seen_send_initial_metadata = true;
957
+ GPR_ASSERT(calld->send_initial_metadata_storage == nullptr);
958
+ grpc_metadata_batch* send_initial_metadata =
959
+ batch->payload->send_initial_metadata.send_initial_metadata;
960
+ calld->send_initial_metadata_storage = (grpc_linked_mdelem*)gpr_arena_alloc(
961
+ calld->arena,
962
+ sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
963
+ grpc_metadata_batch_copy(send_initial_metadata,
964
+ &calld->send_initial_metadata,
965
+ calld->send_initial_metadata_storage);
966
+ calld->send_initial_metadata_flags =
967
+ batch->payload->send_initial_metadata.send_initial_metadata_flags;
968
+ calld->peer_string = batch->payload->send_initial_metadata.peer_string;
969
+ }
970
+ // Set up cache for send_message ops.
971
+ if (batch->send_message) {
972
+ grpc_core::ByteStreamCache* cache =
973
+ static_cast<grpc_core::ByteStreamCache*>(
974
+ gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
975
+ new (cache) grpc_core::ByteStreamCache(
976
+ std::move(batch->payload->send_message.send_message));
977
+ calld->send_messages.push_back(cache);
978
+ }
979
+ // Save metadata batch for send_trailing_metadata ops.
980
+ if (batch->send_trailing_metadata) {
981
+ calld->seen_send_trailing_metadata = true;
982
+ GPR_ASSERT(calld->send_trailing_metadata_storage == nullptr);
983
+ grpc_metadata_batch* send_trailing_metadata =
984
+ batch->payload->send_trailing_metadata.send_trailing_metadata;
985
+ calld->send_trailing_metadata_storage =
986
+ (grpc_linked_mdelem*)gpr_arena_alloc(
987
+ calld->arena,
988
+ sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count);
989
+ grpc_metadata_batch_copy(send_trailing_metadata,
990
+ &calld->send_trailing_metadata,
991
+ calld->send_trailing_metadata_storage);
992
+ }
993
+ }
860
994
 
861
- grpc_closure on_complete;
862
- grpc_closure* original_on_complete;
863
- } call_data;
995
+ // Frees cached send ops that have already been completed after
996
+ // committing the call.
997
+ static void free_cached_send_op_data_after_commit(
998
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
999
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1000
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1001
+ if (retry_state->completed_send_initial_metadata) {
1002
+ grpc_metadata_batch_destroy(&calld->send_initial_metadata);
1003
+ }
1004
+ for (size_t i = 0; i < retry_state->completed_send_message_count; ++i) {
1005
+ if (grpc_client_channel_trace.enabled()) {
1006
+ gpr_log(GPR_DEBUG,
1007
+ "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
1008
+ "]",
1009
+ chand, calld, i);
1010
+ }
1011
+ calld->send_messages[i]->Destroy();
1012
+ }
1013
+ if (retry_state->completed_send_trailing_metadata) {
1014
+ grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
1015
+ }
1016
+ }
864
1017
 
865
- grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
866
- grpc_call_element* elem) {
1018
+ // Frees cached send ops that were completed by the completed batch in
1019
+ // batch_data. Used when batches are completed after the call is committed.
1020
+ static void free_cached_send_op_data_for_completed_batch(
1021
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
1022
+ subchannel_call_retry_state* retry_state) {
1023
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
867
1024
  call_data* calld = static_cast<call_data*>(elem->call_data);
868
- return calld->subchannel_call;
1025
+ if (batch_data->batch.send_initial_metadata) {
1026
+ grpc_metadata_batch_destroy(&calld->send_initial_metadata);
1027
+ }
1028
+ if (batch_data->batch.send_message) {
1029
+ if (grpc_client_channel_trace.enabled()) {
1030
+ gpr_log(GPR_DEBUG,
1031
+ "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR
1032
+ "]",
1033
+ chand, calld, retry_state->completed_send_message_count - 1);
1034
+ }
1035
+ calld->send_messages[retry_state->completed_send_message_count - 1]
1036
+ ->Destroy();
1037
+ }
1038
+ if (batch_data->batch.send_trailing_metadata) {
1039
+ grpc_metadata_batch_destroy(&calld->send_trailing_metadata);
1040
+ }
1041
+ }
1042
+
1043
+ //
1044
+ // pending_batches management
1045
+ //
1046
+
1047
+ // Returns the index into calld->pending_batches to be used for batch.
1048
+ static size_t get_batch_index(grpc_transport_stream_op_batch* batch) {
1049
+ // Note: It is important the send_initial_metadata be the first entry
1050
+ // here, since the code in pick_subchannel_locked() assumes it will be.
1051
+ if (batch->send_initial_metadata) return 0;
1052
+ if (batch->send_message) return 1;
1053
+ if (batch->send_trailing_metadata) return 2;
1054
+ if (batch->recv_initial_metadata) return 3;
1055
+ if (batch->recv_message) return 4;
1056
+ if (batch->recv_trailing_metadata) return 5;
1057
+ GPR_UNREACHABLE_CODE(return (size_t)-1);
869
1058
  }
870
1059
 
871
1060
  // This is called via the call combiner, so access to calld is synchronized.
872
- static void waiting_for_pick_batches_add(
873
- call_data* calld, grpc_transport_stream_op_batch* batch) {
874
- if (batch->send_initial_metadata) {
875
- GPR_ASSERT(calld->initial_metadata_batch == nullptr);
876
- calld->initial_metadata_batch = batch;
877
- } else {
878
- GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES);
879
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] =
880
- batch;
1061
+ static void pending_batches_add(grpc_call_element* elem,
1062
+ grpc_transport_stream_op_batch* batch) {
1063
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1064
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1065
+ const size_t idx = get_batch_index(batch);
1066
+ if (grpc_client_channel_trace.enabled()) {
1067
+ gpr_log(GPR_DEBUG,
1068
+ "chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
1069
+ calld, idx);
1070
+ }
1071
+ pending_batch* pending = &calld->pending_batches[idx];
1072
+ GPR_ASSERT(pending->batch == nullptr);
1073
+ pending->batch = batch;
1074
+ pending->send_ops_cached = false;
1075
+ if (calld->enable_retries) {
1076
+ // Update state in calld about pending batches.
1077
+ // Also check if the batch takes us over the retry buffer limit.
1078
+ // Note: We don't check the size of trailing metadata here, because
1079
+ // gRPC clients do not send trailing metadata.
1080
+ if (batch->send_initial_metadata) {
1081
+ calld->pending_send_initial_metadata = true;
1082
+ calld->bytes_buffered_for_retry += grpc_metadata_batch_size(
1083
+ batch->payload->send_initial_metadata.send_initial_metadata);
1084
+ }
1085
+ if (batch->send_message) {
1086
+ calld->pending_send_message = true;
1087
+ calld->bytes_buffered_for_retry +=
1088
+ batch->payload->send_message.send_message->length();
1089
+ }
1090
+ if (batch->send_trailing_metadata) {
1091
+ calld->pending_send_trailing_metadata = true;
1092
+ }
1093
+ if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
1094
+ if (grpc_client_channel_trace.enabled()) {
1095
+ gpr_log(GPR_DEBUG,
1096
+ "chand=%p calld=%p: exceeded retry buffer size, committing",
1097
+ chand, calld);
1098
+ }
1099
+ subchannel_call_retry_state* retry_state =
1100
+ calld->subchannel_call == nullptr
1101
+ ? nullptr
1102
+ : static_cast<subchannel_call_retry_state*>(
1103
+ grpc_connected_subchannel_call_get_parent_data(
1104
+ calld->subchannel_call));
1105
+ retry_commit(elem, retry_state);
1106
+ // If we are not going to retry and have not yet started, pretend
1107
+ // retries are disabled so that we don't bother with retry overhead.
1108
+ if (calld->num_attempts_completed == 0) {
1109
+ if (grpc_client_channel_trace.enabled()) {
1110
+ gpr_log(GPR_DEBUG,
1111
+ "chand=%p calld=%p: disabling retries before first attempt",
1112
+ chand, calld);
1113
+ }
1114
+ calld->enable_retries = false;
1115
+ }
1116
+ }
1117
+ }
1118
+ }
1119
+
1120
+ static void pending_batch_clear(call_data* calld, pending_batch* pending) {
1121
+ if (calld->enable_retries) {
1122
+ if (pending->batch->send_initial_metadata) {
1123
+ calld->pending_send_initial_metadata = false;
1124
+ }
1125
+ if (pending->batch->send_message) {
1126
+ calld->pending_send_message = false;
1127
+ }
1128
+ if (pending->batch->send_trailing_metadata) {
1129
+ calld->pending_send_trailing_metadata = false;
1130
+ }
881
1131
  }
1132
+ pending->batch = nullptr;
882
1133
  }
883
1134
 
884
1135
  // This is called via the call combiner, so access to calld is synchronized.
885
1136
  static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
886
- call_data* calld = static_cast<call_data*>(arg);
887
- if (calld->waiting_for_pick_batches_count > 0) {
888
- --calld->waiting_for_pick_batches_count;
889
- grpc_transport_stream_op_batch_finish_with_failure(
890
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
891
- GRPC_ERROR_REF(error), calld->call_combiner);
892
- }
1137
+ grpc_transport_stream_op_batch* batch =
1138
+ static_cast<grpc_transport_stream_op_batch*>(arg);
1139
+ call_data* calld = static_cast<call_data*>(batch->handler_private.extra_arg);
1140
+ // Note: This will release the call combiner.
1141
+ grpc_transport_stream_op_batch_finish_with_failure(
1142
+ batch, GRPC_ERROR_REF(error), calld->call_combiner);
893
1143
  }
894
1144
 
895
1145
  // This is called via the call combiner, so access to calld is synchronized.
896
- static void waiting_for_pick_batches_fail(grpc_call_element* elem,
897
- grpc_error* error) {
1146
+ // If yield_call_combiner is true, assumes responsibility for yielding
1147
+ // the call combiner.
1148
+ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
1149
+ bool yield_call_combiner) {
1150
+ GPR_ASSERT(error != GRPC_ERROR_NONE);
898
1151
  call_data* calld = static_cast<call_data*>(elem->call_data);
899
1152
  if (grpc_client_channel_trace.enabled()) {
1153
+ size_t num_batches = 0;
1154
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1155
+ if (calld->pending_batches[i].batch != nullptr) ++num_batches;
1156
+ }
900
1157
  gpr_log(GPR_DEBUG,
901
1158
  "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
902
- elem->channel_data, calld, calld->waiting_for_pick_batches_count,
903
- grpc_error_string(error));
1159
+ elem->channel_data, calld, num_batches, grpc_error_string(error));
1160
+ }
1161
+ grpc_transport_stream_op_batch*
1162
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
1163
+ size_t num_batches = 0;
1164
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1165
+ pending_batch* pending = &calld->pending_batches[i];
1166
+ grpc_transport_stream_op_batch* batch = pending->batch;
1167
+ if (batch != nullptr) {
1168
+ batches[num_batches++] = batch;
1169
+ pending_batch_clear(calld, pending);
1170
+ }
904
1171
  }
905
- for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
906
- GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
907
- fail_pending_batch_in_call_combiner, calld,
1172
+ for (size_t i = yield_call_combiner ? 1 : 0; i < num_batches; ++i) {
1173
+ grpc_transport_stream_op_batch* batch = batches[i];
1174
+ batch->handler_private.extra_arg = calld;
1175
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
1176
+ fail_pending_batch_in_call_combiner, batch,
908
1177
  grpc_schedule_on_exec_ctx);
909
- GRPC_CALL_COMBINER_START(
910
- calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
911
- GRPC_ERROR_REF(error), "waiting_for_pick_batches_fail");
912
- }
913
- if (calld->initial_metadata_batch != nullptr) {
914
- grpc_transport_stream_op_batch_finish_with_failure(
915
- calld->initial_metadata_batch, GRPC_ERROR_REF(error),
916
- calld->call_combiner);
917
- } else {
918
- GRPC_CALL_COMBINER_STOP(calld->call_combiner,
919
- "waiting_for_pick_batches_fail");
1178
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
1179
+ &batch->handler_private.closure,
1180
+ GRPC_ERROR_REF(error), "pending_batches_fail");
1181
+ }
1182
+ if (yield_call_combiner) {
1183
+ if (num_batches > 0) {
1184
+ // Note: This will release the call combiner.
1185
+ grpc_transport_stream_op_batch_finish_with_failure(
1186
+ batches[0], GRPC_ERROR_REF(error), calld->call_combiner);
1187
+ } else {
1188
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner, "pending_batches_fail");
1189
+ }
920
1190
  }
921
1191
  GRPC_ERROR_UNREF(error);
922
1192
  }
923
1193
 
924
1194
  // This is called via the call combiner, so access to calld is synchronized.
925
- static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
926
- call_data* calld = static_cast<call_data*>(arg);
927
- if (calld->waiting_for_pick_batches_count > 0) {
928
- --calld->waiting_for_pick_batches_count;
929
- grpc_subchannel_call_process_op(
930
- calld->subchannel_call,
931
- calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
932
- }
1195
+ static void resume_pending_batch_in_call_combiner(void* arg,
1196
+ grpc_error* ignored) {
1197
+ grpc_transport_stream_op_batch* batch =
1198
+ static_cast<grpc_transport_stream_op_batch*>(arg);
1199
+ grpc_subchannel_call* subchannel_call =
1200
+ static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
1201
+ // Note: This will release the call combiner.
1202
+ grpc_subchannel_call_process_op(subchannel_call, batch);
933
1203
  }
934
1204
 
935
1205
  // This is called via the call combiner, so access to calld is synchronized.
936
- static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
1206
+ static void pending_batches_resume(grpc_call_element* elem) {
937
1207
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
938
1208
  call_data* calld = static_cast<call_data*>(elem->call_data);
1209
+ if (calld->enable_retries) {
1210
+ start_retriable_subchannel_batches(elem, GRPC_ERROR_NONE);
1211
+ return;
1212
+ }
1213
+ // Retries not enabled; send down batches as-is.
939
1214
  if (grpc_client_channel_trace.enabled()) {
1215
+ size_t num_batches = 0;
1216
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1217
+ if (calld->pending_batches[i].batch != nullptr) ++num_batches;
1218
+ }
940
1219
  gpr_log(GPR_DEBUG,
941
- "chand=%p calld=%p: sending %" PRIuPTR
942
- " pending batches to subchannel_call=%p",
943
- chand, calld, calld->waiting_for_pick_batches_count,
944
- calld->subchannel_call);
945
- }
946
- for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) {
947
- GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
948
- run_pending_batch_in_call_combiner, calld,
1220
+ "chand=%p calld=%p: starting %" PRIuPTR
1221
+ " pending batches on subchannel_call=%p",
1222
+ chand, calld, num_batches, calld->subchannel_call);
1223
+ }
1224
+ grpc_transport_stream_op_batch*
1225
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
1226
+ size_t num_batches = 0;
1227
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1228
+ pending_batch* pending = &calld->pending_batches[i];
1229
+ grpc_transport_stream_op_batch* batch = pending->batch;
1230
+ if (batch != nullptr) {
1231
+ batches[num_batches++] = batch;
1232
+ pending_batch_clear(calld, pending);
1233
+ }
1234
+ }
1235
+ for (size_t i = 1; i < num_batches; ++i) {
1236
+ grpc_transport_stream_op_batch* batch = batches[i];
1237
+ batch->handler_private.extra_arg = calld->subchannel_call;
1238
+ GRPC_CLOSURE_INIT(&batch->handler_private.closure,
1239
+ resume_pending_batch_in_call_combiner, batch,
949
1240
  grpc_schedule_on_exec_ctx);
950
- GRPC_CALL_COMBINER_START(
951
- calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
952
- GRPC_ERROR_NONE, "waiting_for_pick_batches_resume");
1241
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
1242
+ &batch->handler_private.closure, GRPC_ERROR_NONE,
1243
+ "pending_batches_resume");
953
1244
  }
954
- GPR_ASSERT(calld->initial_metadata_batch != nullptr);
955
- grpc_subchannel_call_process_op(calld->subchannel_call,
956
- calld->initial_metadata_batch);
1245
+ GPR_ASSERT(num_batches > 0);
1246
+ // Note: This will release the call combiner.
1247
+ grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
957
1248
  }
958
1249
 
959
- // Applies service config to the call. Must be invoked once we know
960
- // that the resolver has returned results to the channel.
961
- static void apply_service_config_to_call_locked(grpc_call_element* elem) {
1250
+ static void maybe_clear_pending_batch(grpc_call_element* elem,
1251
+ pending_batch* pending) {
962
1252
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
963
1253
  call_data* calld = static_cast<call_data*>(elem->call_data);
964
- if (grpc_client_channel_trace.enabled()) {
965
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
966
- chand, calld);
967
- }
968
- if (chand->retry_throttle_data != nullptr) {
969
- calld->retry_throttle_data =
970
- grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
971
- }
972
- if (chand->method_params_table != nullptr) {
973
- calld->method_params = static_cast<method_parameters*>(
974
- grpc_method_config_table_get(chand->method_params_table, calld->path));
975
- if (calld->method_params != nullptr) {
976
- method_parameters_ref(calld->method_params);
977
- // If the deadline from the service config is shorter than the one
978
- // from the client API, reset the deadline timer.
979
- if (chand->deadline_checking_enabled &&
980
- calld->method_params->timeout != 0) {
981
- const grpc_millis per_method_deadline =
982
- grpc_timespec_to_millis_round_up(calld->call_start_time) +
983
- calld->method_params->timeout;
984
- if (per_method_deadline < calld->deadline) {
985
- calld->deadline = per_method_deadline;
986
- grpc_deadline_state_reset(elem, calld->deadline);
987
- }
988
- }
1254
+ grpc_transport_stream_op_batch* batch = pending->batch;
1255
+ // We clear the pending batch if all of its callbacks have been
1256
+ // scheduled and reset to nullptr.
1257
+ if (batch->on_complete == nullptr &&
1258
+ (!batch->recv_initial_metadata ||
1259
+ batch->payload->recv_initial_metadata.recv_initial_metadata_ready ==
1260
+ nullptr) &&
1261
+ (!batch->recv_message ||
1262
+ batch->payload->recv_message.recv_message_ready == nullptr)) {
1263
+ if (grpc_client_channel_trace.enabled()) {
1264
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand,
1265
+ calld);
989
1266
  }
1267
+ pending_batch_clear(calld, pending);
990
1268
  }
991
1269
  }
992
1270
 
993
- static void create_subchannel_call_locked(grpc_call_element* elem,
994
- grpc_error* error) {
995
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
996
- call_data* calld = static_cast<call_data*>(elem->call_data);
997
- const grpc_core::ConnectedSubchannel::CallArgs call_args = {
998
- calld->pollent, // pollent
999
- calld->path, // path
1000
- calld->call_start_time, // start_time
1001
- calld->deadline, // deadline
1002
- calld->arena, // arena
1003
- calld->pick.subchannel_call_context, // context
1004
- calld->call_combiner // call_combiner
1005
- };
1006
- grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
1007
- call_args, &calld->subchannel_call);
1008
- if (grpc_client_channel_trace.enabled()) {
1009
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
1010
- chand, calld, calld->subchannel_call, grpc_error_string(new_error));
1271
+ // Returns true if all ops in the pending batch have been completed.
1272
+ static bool pending_batch_is_completed(
1273
+ pending_batch* pending, call_data* calld,
1274
+ subchannel_call_retry_state* retry_state) {
1275
+ if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
1276
+ return false;
1011
1277
  }
1012
- if (new_error != GRPC_ERROR_NONE) {
1013
- new_error = grpc_error_add_child(new_error, error);
1014
- waiting_for_pick_batches_fail(elem, new_error);
1015
- } else {
1016
- waiting_for_pick_batches_resume(elem);
1278
+ if (pending->batch->send_initial_metadata &&
1279
+ !retry_state->completed_send_initial_metadata) {
1280
+ return false;
1017
1281
  }
1018
- GRPC_ERROR_UNREF(error);
1282
+ if (pending->batch->send_message &&
1283
+ retry_state->completed_send_message_count < calld->send_messages.size()) {
1284
+ return false;
1285
+ }
1286
+ if (pending->batch->send_trailing_metadata &&
1287
+ !retry_state->completed_send_trailing_metadata) {
1288
+ return false;
1289
+ }
1290
+ if (pending->batch->recv_initial_metadata &&
1291
+ !retry_state->completed_recv_initial_metadata) {
1292
+ return false;
1293
+ }
1294
+ if (pending->batch->recv_message &&
1295
+ retry_state->completed_recv_message_count <
1296
+ retry_state->started_recv_message_count) {
1297
+ return false;
1298
+ }
1299
+ if (pending->batch->recv_trailing_metadata &&
1300
+ !retry_state->completed_recv_trailing_metadata) {
1301
+ return false;
1302
+ }
1303
+ return true;
1019
1304
  }
1020
1305
 
1021
- // Invoked when a pick is completed, on both success or failure.
1022
- static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
1023
- call_data* calld = static_cast<call_data*>(elem->call_data);
1024
- channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1025
- if (calld->pick.connected_subchannel == nullptr) {
1026
- // Failed to create subchannel.
1027
- GRPC_ERROR_UNREF(calld->error);
1028
- calld->error = error == GRPC_ERROR_NONE
1029
- ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1030
- "Call dropped by load balancing policy")
1031
- : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1032
- "Failed to create subchannel", &error, 1);
1033
- if (grpc_client_channel_trace.enabled()) {
1034
- gpr_log(GPR_DEBUG,
1035
- "chand=%p calld=%p: failed to create subchannel: error=%s", chand,
1036
- calld, grpc_error_string(calld->error));
1037
- }
1038
- waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
1039
- } else {
1040
- /* Create call on subchannel. */
1041
- create_subchannel_call_locked(elem, GRPC_ERROR_REF(error));
1306
+ // Returns true if any op in the batch was not yet started.
1307
+ static bool pending_batch_is_unstarted(
1308
+ pending_batch* pending, call_data* calld,
1309
+ subchannel_call_retry_state* retry_state) {
1310
+ if (pending->batch == nullptr || pending->batch->on_complete == nullptr) {
1311
+ return false;
1042
1312
  }
1043
- GRPC_ERROR_UNREF(error);
1313
+ if (pending->batch->send_initial_metadata &&
1314
+ !retry_state->started_send_initial_metadata) {
1315
+ return true;
1316
+ }
1317
+ if (pending->batch->send_message &&
1318
+ retry_state->started_send_message_count < calld->send_messages.size()) {
1319
+ return true;
1320
+ }
1321
+ if (pending->batch->send_trailing_metadata &&
1322
+ !retry_state->started_send_trailing_metadata) {
1323
+ return true;
1324
+ }
1325
+ if (pending->batch->recv_initial_metadata &&
1326
+ !retry_state->started_recv_initial_metadata) {
1327
+ return true;
1328
+ }
1329
+ if (pending->batch->recv_message &&
1330
+ retry_state->completed_recv_message_count ==
1331
+ retry_state->started_recv_message_count) {
1332
+ return true;
1333
+ }
1334
+ if (pending->batch->recv_trailing_metadata &&
1335
+ !retry_state->started_recv_trailing_metadata) {
1336
+ return true;
1337
+ }
1338
+ return false;
1044
1339
  }
1045
1340
 
1046
- // A wrapper around pick_done_locked() that is used in cases where
1047
- // either (a) the pick was deferred pending a resolver result or (b) the
1048
- // pick was done asynchronously. Removes the call's polling entity from
1049
- // chand->interested_parties before invoking pick_done_locked().
1050
- static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
1341
+ //
1342
+ // retry code
1343
+ //
1344
+
1345
+ // Commits the call so that no further retry attempts will be performed.
1346
+ static void retry_commit(grpc_call_element* elem,
1347
+ subchannel_call_retry_state* retry_state) {
1051
1348
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1052
1349
  call_data* calld = static_cast<call_data*>(elem->call_data);
1053
- grpc_polling_entity_del_from_pollset_set(calld->pollent,
1054
- chand->interested_parties);
1055
- pick_done_locked(elem, error);
1350
+ if (calld->retry_committed) return;
1351
+ calld->retry_committed = true;
1352
+ if (grpc_client_channel_trace.enabled()) {
1353
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld);
1354
+ }
1355
+ if (retry_state != nullptr) {
1356
+ free_cached_send_op_data_after_commit(elem, retry_state);
1357
+ }
1056
1358
  }
1057
1359
 
1058
- // Note: This runs under the client_channel combiner, but will NOT be
1059
- // holding the call combiner.
1060
- static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
1061
- grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1360
+ // Starts a retry after appropriate back-off.
1361
+ static void do_retry(grpc_call_element* elem,
1362
+ subchannel_call_retry_state* retry_state,
1363
+ grpc_millis server_pushback_ms) {
1062
1364
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1063
1365
  call_data* calld = static_cast<call_data*>(elem->call_data);
1064
- // Note: chand->lb_policy may have changed since we started our pick,
1065
- // in which case we will be cancelling the pick on a policy other than
1066
- // the one we started it on. However, this will just be a no-op.
1067
- if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
1068
- if (grpc_client_channel_trace.enabled()) {
1069
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
1070
- chand, calld, chand->lb_policy.get());
1366
+ GPR_ASSERT(calld->method_params != nullptr);
1367
+ const ClientChannelMethodParams::RetryPolicy* retry_policy =
1368
+ calld->method_params->retry_policy();
1369
+ GPR_ASSERT(retry_policy != nullptr);
1370
+ // Reset subchannel call and connected subchannel.
1371
+ if (calld->subchannel_call != nullptr) {
1372
+ GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
1373
+ "client_channel_call_retry");
1374
+ calld->subchannel_call = nullptr;
1375
+ }
1376
+ if (calld->pick.connected_subchannel != nullptr) {
1377
+ calld->pick.connected_subchannel.reset();
1378
+ }
1379
+ // Compute backoff delay.
1380
+ grpc_millis next_attempt_time;
1381
+ if (server_pushback_ms >= 0) {
1382
+ next_attempt_time = grpc_core::ExecCtx::Get()->Now() + server_pushback_ms;
1383
+ calld->last_attempt_got_server_pushback = true;
1384
+ } else {
1385
+ if (calld->num_attempts_completed == 1 ||
1386
+ calld->last_attempt_got_server_pushback) {
1387
+ calld->retry_backoff.Init(
1388
+ grpc_core::BackOff::Options()
1389
+ .set_initial_backoff(retry_policy->initial_backoff)
1390
+ .set_multiplier(retry_policy->backoff_multiplier)
1391
+ .set_jitter(RETRY_BACKOFF_JITTER)
1392
+ .set_max_backoff(retry_policy->max_backoff));
1393
+ calld->last_attempt_got_server_pushback = false;
1071
1394
  }
1072
- chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
1395
+ next_attempt_time = calld->retry_backoff->NextAttemptTime();
1073
1396
  }
1074
- GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
1397
+ if (grpc_client_channel_trace.enabled()) {
1398
+ gpr_log(GPR_DEBUG,
1399
+ "chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
1400
+ calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
1401
+ }
1402
+ // Schedule retry after computed delay.
1403
+ GRPC_CLOSURE_INIT(&calld->pick_closure, start_pick_locked, elem,
1404
+ grpc_combiner_scheduler(chand->combiner));
1405
+ grpc_timer_init(&calld->retry_timer, next_attempt_time, &calld->pick_closure);
1406
+ // Update bookkeeping.
1407
+ if (retry_state != nullptr) retry_state->retry_dispatched = true;
1075
1408
  }
1076
1409
 
1077
- // Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
1078
- // Unrefs the LB policy and invokes async_pick_done_locked().
1079
- static void pick_callback_done_locked(void* arg, grpc_error* error) {
1080
- grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1410
+ // Returns true if the call is being retried.
1411
+ static bool maybe_retry(grpc_call_element* elem,
1412
+ subchannel_batch_data* batch_data,
1413
+ grpc_status_code status,
1414
+ grpc_mdelem* server_pushback_md) {
1081
1415
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1082
1416
  call_data* calld = static_cast<call_data*>(elem->call_data);
1083
- if (grpc_client_channel_trace.enabled()) {
1084
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
1085
- chand, calld);
1417
+ // Get retry policy.
1418
+ if (calld->method_params == nullptr) return false;
1419
+ const ClientChannelMethodParams::RetryPolicy* retry_policy =
1420
+ calld->method_params->retry_policy();
1421
+ if (retry_policy == nullptr) return false;
1422
+ // If we've already dispatched a retry from this call, return true.
1423
+ // This catches the case where the batch has multiple callbacks
1424
+ // (i.e., it includes either recv_message or recv_initial_metadata).
1425
+ subchannel_call_retry_state* retry_state = nullptr;
1426
+ if (batch_data != nullptr) {
1427
+ retry_state = static_cast<subchannel_call_retry_state*>(
1428
+ grpc_connected_subchannel_call_get_parent_data(
1429
+ batch_data->subchannel_call));
1430
+ if (retry_state->retry_dispatched) {
1431
+ if (grpc_client_channel_trace.enabled()) {
1432
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand,
1433
+ calld);
1434
+ }
1435
+ return true;
1436
+ }
1437
+ }
1438
+ // Check status.
1439
+ if (status == GRPC_STATUS_OK) {
1440
+ if (calld->retry_throttle_data != nullptr) {
1441
+ calld->retry_throttle_data->RecordSuccess();
1442
+ }
1443
+ if (grpc_client_channel_trace.enabled()) {
1444
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld);
1445
+ }
1446
+ return false;
1447
+ }
1448
+ // Status is not OK. Check whether the status is retryable.
1449
+ if (!retry_policy->retryable_status_codes.Contains(status)) {
1450
+ if (grpc_client_channel_trace.enabled()) {
1451
+ gpr_log(GPR_DEBUG,
1452
+ "chand=%p calld=%p: status %s not configured as retryable", chand,
1453
+ calld, grpc_status_code_to_string(status));
1454
+ }
1455
+ return false;
1456
+ }
1457
+ // Record the failure and check whether retries are throttled.
1458
+ // Note that it's important for this check to come after the status
1459
+ // code check above, since we should only record failures whose statuses
1460
+ // match the configured retryable status codes, so that we don't count
1461
+ // things like failures due to malformed requests (INVALID_ARGUMENT).
1462
+ // Conversely, it's important for this to come before the remaining
1463
+ // checks, so that we don't fail to record failures due to other factors.
1464
+ if (calld->retry_throttle_data != nullptr &&
1465
+ !calld->retry_throttle_data->RecordFailure()) {
1466
+ if (grpc_client_channel_trace.enabled()) {
1467
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld);
1468
+ }
1469
+ return false;
1470
+ }
1471
+ // Check whether the call is committed.
1472
+ if (calld->retry_committed) {
1473
+ if (grpc_client_channel_trace.enabled()) {
1474
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand,
1475
+ calld);
1476
+ }
1477
+ return false;
1478
+ }
1479
+ // Check whether we have retries remaining.
1480
+ ++calld->num_attempts_completed;
1481
+ if (calld->num_attempts_completed >= retry_policy->max_attempts) {
1482
+ if (grpc_client_channel_trace.enabled()) {
1483
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand,
1484
+ calld, retry_policy->max_attempts);
1485
+ }
1486
+ return false;
1487
+ }
1488
+ // If the call was cancelled from the surface, don't retry.
1489
+ if (calld->cancel_error != GRPC_ERROR_NONE) {
1490
+ if (grpc_client_channel_trace.enabled()) {
1491
+ gpr_log(GPR_DEBUG,
1492
+ "chand=%p calld=%p: call cancelled from surface, not retrying",
1493
+ chand, calld);
1494
+ }
1495
+ return false;
1496
+ }
1497
+ // Check server push-back.
1498
+ grpc_millis server_pushback_ms = -1;
1499
+ if (server_pushback_md != nullptr) {
1500
+ // If the value is "-1" or any other unparseable string, we do not retry.
1501
+ uint32_t ms;
1502
+ if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
1503
+ if (grpc_client_channel_trace.enabled()) {
1504
+ gpr_log(GPR_DEBUG,
1505
+ "chand=%p calld=%p: not retrying due to server push-back",
1506
+ chand, calld);
1507
+ }
1508
+ return false;
1509
+ } else {
1510
+ if (grpc_client_channel_trace.enabled()) {
1511
+ gpr_log(GPR_DEBUG,
1512
+ "chand=%p calld=%p: server push-back: retry in %u ms", chand,
1513
+ calld, ms);
1514
+ }
1515
+ server_pushback_ms = (grpc_millis)ms;
1516
+ }
1517
+ }
1518
+ do_retry(elem, retry_state, server_pushback_ms);
1519
+ return true;
1520
+ }
1521
+
1522
+ //
1523
+ // subchannel_batch_data
1524
+ //
1525
+
1526
+ static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
1527
+ int refcount) {
1528
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1529
+ subchannel_call_retry_state* retry_state =
1530
+ static_cast<subchannel_call_retry_state*>(
1531
+ grpc_connected_subchannel_call_get_parent_data(
1532
+ calld->subchannel_call));
1533
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
1534
+ gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
1535
+ batch_data->elem = elem;
1536
+ batch_data->subchannel_call =
1537
+ GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
1538
+ batch_data->batch.payload = &retry_state->batch_payload;
1539
+ gpr_ref_init(&batch_data->refs, refcount);
1540
+ GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
1541
+ grpc_schedule_on_exec_ctx);
1542
+ batch_data->batch.on_complete = &batch_data->on_complete;
1543
+ GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
1544
+ return batch_data;
1545
+ }
1546
+
1547
+ static void batch_data_unref(subchannel_batch_data* batch_data) {
1548
+ if (gpr_unref(&batch_data->refs)) {
1549
+ if (batch_data->send_initial_metadata_storage != nullptr) {
1550
+ grpc_metadata_batch_destroy(&batch_data->send_initial_metadata);
1551
+ }
1552
+ if (batch_data->send_trailing_metadata_storage != nullptr) {
1553
+ grpc_metadata_batch_destroy(&batch_data->send_trailing_metadata);
1554
+ }
1555
+ if (batch_data->batch.recv_initial_metadata) {
1556
+ grpc_metadata_batch_destroy(&batch_data->recv_initial_metadata);
1557
+ }
1558
+ if (batch_data->batch.recv_trailing_metadata) {
1559
+ grpc_metadata_batch_destroy(&batch_data->recv_trailing_metadata);
1560
+ }
1561
+ GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
1562
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
1563
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
1564
+ }
1565
+ }
1566
+
1567
+ //
1568
+ // recv_initial_metadata callback handling
1569
+ //
1570
+
1571
+ // Invokes recv_initial_metadata_ready for a subchannel batch.
1572
+ static void invoke_recv_initial_metadata_callback(void* arg,
1573
+ grpc_error* error) {
1574
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
1575
+ channel_data* chand =
1576
+ static_cast<channel_data*>(batch_data->elem->channel_data);
1577
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
1578
+ // Find pending batch.
1579
+ pending_batch* pending = nullptr;
1580
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1581
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
1582
+ if (batch != nullptr && batch->recv_initial_metadata &&
1583
+ batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
1584
+ nullptr) {
1585
+ if (grpc_client_channel_trace.enabled()) {
1586
+ gpr_log(GPR_DEBUG,
1587
+ "chand=%p calld=%p: invoking recv_initial_metadata_ready for "
1588
+ "pending batch at index %" PRIuPTR,
1589
+ chand, calld, i);
1590
+ }
1591
+ pending = &calld->pending_batches[i];
1592
+ break;
1593
+ }
1594
+ }
1595
+ GPR_ASSERT(pending != nullptr);
1596
+ // Return metadata.
1597
+ grpc_metadata_batch_move(
1598
+ &batch_data->recv_initial_metadata,
1599
+ pending->batch->payload->recv_initial_metadata.recv_initial_metadata);
1600
+ // Update bookkeeping.
1601
+ // Note: Need to do this before invoking the callback, since invoking
1602
+ // the callback will result in yielding the call combiner.
1603
+ grpc_closure* recv_initial_metadata_ready =
1604
+ pending->batch->payload->recv_initial_metadata
1605
+ .recv_initial_metadata_ready;
1606
+ pending->batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
1607
+ nullptr;
1608
+ maybe_clear_pending_batch(batch_data->elem, pending);
1609
+ batch_data_unref(batch_data);
1610
+ // Invoke callback.
1611
+ GRPC_CLOSURE_RUN(recv_initial_metadata_ready, GRPC_ERROR_REF(error));
1612
+ }
1613
+
1614
+ // Intercepts recv_initial_metadata_ready callback for retries.
1615
+ // Commits the call and returns the initial metadata up the stack.
1616
+ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
1617
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
1618
+ grpc_call_element* elem = batch_data->elem;
1619
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1620
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1621
+ if (grpc_client_channel_trace.enabled()) {
1622
+ gpr_log(GPR_DEBUG,
1623
+ "chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
1624
+ chand, calld, grpc_error_string(error));
1625
+ }
1626
+ subchannel_call_retry_state* retry_state =
1627
+ static_cast<subchannel_call_retry_state*>(
1628
+ grpc_connected_subchannel_call_get_parent_data(
1629
+ batch_data->subchannel_call));
1630
+ // If we got an error or a Trailers-Only response and have not yet gotten
1631
+ // the recv_trailing_metadata on_complete callback, then defer
1632
+ // propagating this callback back to the surface. We can evaluate whether
1633
+ // to retry when recv_trailing_metadata comes back.
1634
+ if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
1635
+ !retry_state->completed_recv_trailing_metadata) {
1636
+ if (grpc_client_channel_trace.enabled()) {
1637
+ gpr_log(GPR_DEBUG,
1638
+ "chand=%p calld=%p: deferring recv_initial_metadata_ready "
1639
+ "(Trailers-Only)",
1640
+ chand, calld);
1641
+ }
1642
+ retry_state->recv_initial_metadata_ready_deferred = true;
1643
+ retry_state->recv_initial_metadata_error = GRPC_ERROR_REF(error);
1644
+ if (!retry_state->started_recv_trailing_metadata) {
1645
+ // recv_trailing_metadata not yet started by application; start it
1646
+ // ourselves to get status.
1647
+ start_internal_recv_trailing_metadata(elem);
1648
+ } else {
1649
+ GRPC_CALL_COMBINER_STOP(
1650
+ calld->call_combiner,
1651
+ "recv_initial_metadata_ready trailers-only or error");
1652
+ }
1653
+ return;
1654
+ }
1655
+ // Received valid initial metadata, so commit the call.
1656
+ retry_commit(elem, retry_state);
1657
+ // Manually invoking a callback function; it does not take ownership of error.
1658
+ invoke_recv_initial_metadata_callback(batch_data, error);
1659
+ GRPC_ERROR_UNREF(error);
1660
+ }
1661
+
1662
+ //
1663
+ // recv_message callback handling
1664
+ //
1665
+
1666
+ // Invokes recv_message_ready for a subchannel batch.
1667
+ static void invoke_recv_message_callback(void* arg, grpc_error* error) {
1668
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
1669
+ channel_data* chand =
1670
+ static_cast<channel_data*>(batch_data->elem->channel_data);
1671
+ call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
1672
+ // Find pending op.
1673
+ pending_batch* pending = nullptr;
1674
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1675
+ grpc_transport_stream_op_batch* batch = calld->pending_batches[i].batch;
1676
+ if (batch != nullptr && batch->recv_message &&
1677
+ batch->payload->recv_message.recv_message_ready != nullptr) {
1678
+ if (grpc_client_channel_trace.enabled()) {
1679
+ gpr_log(GPR_DEBUG,
1680
+ "chand=%p calld=%p: invoking recv_message_ready for "
1681
+ "pending batch at index %" PRIuPTR,
1682
+ chand, calld, i);
1683
+ }
1684
+ pending = &calld->pending_batches[i];
1685
+ break;
1686
+ }
1687
+ }
1688
+ GPR_ASSERT(pending != nullptr);
1689
+ // Return payload.
1690
+ *pending->batch->payload->recv_message.recv_message =
1691
+ std::move(batch_data->recv_message);
1692
+ // Update bookkeeping.
1693
+ // Note: Need to do this before invoking the callback, since invoking
1694
+ // the callback will result in yielding the call combiner.
1695
+ grpc_closure* recv_message_ready =
1696
+ pending->batch->payload->recv_message.recv_message_ready;
1697
+ pending->batch->payload->recv_message.recv_message_ready = nullptr;
1698
+ maybe_clear_pending_batch(batch_data->elem, pending);
1699
+ batch_data_unref(batch_data);
1700
+ // Invoke callback.
1701
+ GRPC_CLOSURE_RUN(recv_message_ready, GRPC_ERROR_REF(error));
1702
+ }
1703
+
1704
+ // Intercepts recv_message_ready callback for retries.
1705
+ // Commits the call and returns the message up the stack.
1706
+ static void recv_message_ready(void* arg, grpc_error* error) {
1707
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
1708
+ grpc_call_element* elem = batch_data->elem;
1709
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1710
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1711
+ if (grpc_client_channel_trace.enabled()) {
1712
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s",
1713
+ chand, calld, grpc_error_string(error));
1714
+ }
1715
+ subchannel_call_retry_state* retry_state =
1716
+ static_cast<subchannel_call_retry_state*>(
1717
+ grpc_connected_subchannel_call_get_parent_data(
1718
+ batch_data->subchannel_call));
1719
+ // If we got an error or the payload was nullptr and we have not yet gotten
1720
+ // the recv_trailing_metadata on_complete callback, then defer
1721
+ // propagating this callback back to the surface. We can evaluate whether
1722
+ // to retry when recv_trailing_metadata comes back.
1723
+ if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
1724
+ !retry_state->completed_recv_trailing_metadata) {
1725
+ if (grpc_client_channel_trace.enabled()) {
1726
+ gpr_log(GPR_DEBUG,
1727
+ "chand=%p calld=%p: deferring recv_message_ready (nullptr "
1728
+ "message and recv_trailing_metadata pending)",
1729
+ chand, calld);
1730
+ }
1731
+ retry_state->recv_message_ready_deferred = true;
1732
+ retry_state->recv_message_error = GRPC_ERROR_REF(error);
1733
+ if (!retry_state->started_recv_trailing_metadata) {
1734
+ // recv_trailing_metadata not yet started by application; start it
1735
+ // ourselves to get status.
1736
+ start_internal_recv_trailing_metadata(elem);
1737
+ } else {
1738
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner, "recv_message_ready null");
1739
+ }
1740
+ return;
1741
+ }
1742
+ // Received a valid message, so commit the call.
1743
+ retry_commit(elem, retry_state);
1744
+ // Manually invoking a callback function; it does not take ownership of error.
1745
+ invoke_recv_message_callback(batch_data, error);
1746
+ GRPC_ERROR_UNREF(error);
1747
+ }
1748
+
1749
+ //
1750
+ // on_complete callback handling
1751
+ //
1752
+
1753
+ // Updates retry_state to reflect the ops completed in batch_data.
1754
+ static void update_retry_state_for_completed_batch(
1755
+ subchannel_batch_data* batch_data,
1756
+ subchannel_call_retry_state* retry_state) {
1757
+ if (batch_data->batch.send_initial_metadata) {
1758
+ retry_state->completed_send_initial_metadata = true;
1759
+ }
1760
+ if (batch_data->batch.send_message) {
1761
+ ++retry_state->completed_send_message_count;
1762
+ }
1763
+ if (batch_data->batch.send_trailing_metadata) {
1764
+ retry_state->completed_send_trailing_metadata = true;
1765
+ }
1766
+ if (batch_data->batch.recv_initial_metadata) {
1767
+ retry_state->completed_recv_initial_metadata = true;
1768
+ }
1769
+ if (batch_data->batch.recv_message) {
1770
+ ++retry_state->completed_recv_message_count;
1771
+ }
1772
+ if (batch_data->batch.recv_trailing_metadata) {
1773
+ retry_state->completed_recv_trailing_metadata = true;
1774
+ }
1775
+ }
1776
+
1777
+ // Represents a closure that needs to run as a result of a completed batch.
1778
+ typedef struct {
1779
+ grpc_closure* closure;
1780
+ grpc_error* error;
1781
+ const char* reason;
1782
+ } closure_to_execute;
1783
+
1784
+ // Adds any necessary closures for deferred recv_initial_metadata and
1785
+ // recv_message callbacks to closures, updating *num_closures as needed.
1786
+ static void add_closures_for_deferred_recv_callbacks(
1787
+ subchannel_batch_data* batch_data, subchannel_call_retry_state* retry_state,
1788
+ closure_to_execute* closures, size_t* num_closures) {
1789
+ if (batch_data->batch.recv_trailing_metadata &&
1790
+ retry_state->recv_initial_metadata_ready_deferred) {
1791
+ closure_to_execute* closure = &closures[(*num_closures)++];
1792
+ closure->closure =
1793
+ GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
1794
+ invoke_recv_initial_metadata_callback, batch_data,
1795
+ grpc_schedule_on_exec_ctx);
1796
+ closure->error = retry_state->recv_initial_metadata_error;
1797
+ closure->reason = "resuming recv_initial_metadata_ready";
1798
+ }
1799
+ if (batch_data->batch.recv_trailing_metadata &&
1800
+ retry_state->recv_message_ready_deferred) {
1801
+ closure_to_execute* closure = &closures[(*num_closures)++];
1802
+ closure->closure = GRPC_CLOSURE_INIT(&batch_data->recv_message_ready,
1803
+ invoke_recv_message_callback,
1804
+ batch_data, grpc_schedule_on_exec_ctx);
1805
+ closure->error = retry_state->recv_message_error;
1806
+ closure->reason = "resuming recv_message_ready";
1807
+ }
1808
+ }
1809
+
1810
+ // If there are any cached ops to replay or pending ops to start on the
1811
+ // subchannel call, adds a closure to closures to invoke
1812
+ // start_retriable_subchannel_batches(), updating *num_closures as needed.
1813
+ static void add_closures_for_replay_or_pending_send_ops(
1814
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
1815
+ subchannel_call_retry_state* retry_state, closure_to_execute* closures,
1816
+ size_t* num_closures) {
1817
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1818
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1819
+ bool have_pending_send_message_ops =
1820
+ retry_state->started_send_message_count < calld->send_messages.size();
1821
+ bool have_pending_send_trailing_metadata_op =
1822
+ calld->seen_send_trailing_metadata &&
1823
+ !retry_state->started_send_trailing_metadata;
1824
+ if (!have_pending_send_message_ops &&
1825
+ !have_pending_send_trailing_metadata_op) {
1826
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1827
+ pending_batch* pending = &calld->pending_batches[i];
1828
+ grpc_transport_stream_op_batch* batch = pending->batch;
1829
+ if (batch == nullptr || pending->send_ops_cached) continue;
1830
+ if (batch->send_message) have_pending_send_message_ops = true;
1831
+ if (batch->send_trailing_metadata) {
1832
+ have_pending_send_trailing_metadata_op = true;
1833
+ }
1834
+ }
1835
+ }
1836
+ if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
1837
+ if (grpc_client_channel_trace.enabled()) {
1838
+ gpr_log(GPR_DEBUG,
1839
+ "chand=%p calld=%p: starting next batch for pending send op(s)",
1840
+ chand, calld);
1841
+ }
1842
+ closure_to_execute* closure = &closures[(*num_closures)++];
1843
+ closure->closure = GRPC_CLOSURE_INIT(
1844
+ &batch_data->batch.handler_private.closure,
1845
+ start_retriable_subchannel_batches, elem, grpc_schedule_on_exec_ctx);
1846
+ closure->error = GRPC_ERROR_NONE;
1847
+ closure->reason = "starting next batch for send_* op(s)";
1848
+ }
1849
+ }
1850
+
1851
+ // For any pending batch completed in batch_data, adds the necessary
1852
+ // completion closures to closures, updating *num_closures as needed.
1853
+ static void add_closures_for_completed_pending_batches(
1854
+ grpc_call_element* elem, subchannel_batch_data* batch_data,
1855
+ subchannel_call_retry_state* retry_state, grpc_error* error,
1856
+ closure_to_execute* closures, size_t* num_closures) {
1857
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1858
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1859
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1860
+ pending_batch* pending = &calld->pending_batches[i];
1861
+ if (pending_batch_is_completed(pending, calld, retry_state)) {
1862
+ if (grpc_client_channel_trace.enabled()) {
1863
+ gpr_log(GPR_DEBUG,
1864
+ "chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
1865
+ chand, calld, i);
1866
+ }
1867
+ // Copy the trailing metadata to return it to the surface.
1868
+ if (batch_data->batch.recv_trailing_metadata) {
1869
+ grpc_metadata_batch_move(&batch_data->recv_trailing_metadata,
1870
+ pending->batch->payload->recv_trailing_metadata
1871
+ .recv_trailing_metadata);
1872
+ }
1873
+ closure_to_execute* closure = &closures[(*num_closures)++];
1874
+ closure->closure = pending->batch->on_complete;
1875
+ closure->error = GRPC_ERROR_REF(error);
1876
+ closure->reason = "on_complete for pending batch";
1877
+ pending->batch->on_complete = nullptr;
1878
+ maybe_clear_pending_batch(elem, pending);
1879
+ }
1880
+ }
1881
+ GRPC_ERROR_UNREF(error);
1882
+ }
1883
+
1884
+ // For any pending batch containing an op that has not yet been started,
1885
+ // adds the pending batch's completion closures to closures, updating
1886
+ // *num_closures as needed.
1887
+ static void add_closures_to_fail_unstarted_pending_batches(
1888
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
1889
+ grpc_error* error, closure_to_execute* closures, size_t* num_closures) {
1890
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1891
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1892
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
1893
+ pending_batch* pending = &calld->pending_batches[i];
1894
+ if (pending_batch_is_unstarted(pending, calld, retry_state)) {
1895
+ if (grpc_client_channel_trace.enabled()) {
1896
+ gpr_log(GPR_DEBUG,
1897
+ "chand=%p calld=%p: failing unstarted pending batch at index "
1898
+ "%" PRIuPTR,
1899
+ chand, calld, i);
1900
+ }
1901
+ if (pending->batch->recv_initial_metadata) {
1902
+ closure_to_execute* closure = &closures[(*num_closures)++];
1903
+ closure->closure = pending->batch->payload->recv_initial_metadata
1904
+ .recv_initial_metadata_ready;
1905
+ closure->error = GRPC_ERROR_REF(error);
1906
+ closure->reason =
1907
+ "failing recv_initial_metadata_ready for pending batch";
1908
+ pending->batch->payload->recv_initial_metadata
1909
+ .recv_initial_metadata_ready = nullptr;
1910
+ }
1911
+ if (pending->batch->recv_message) {
1912
+ *pending->batch->payload->recv_message.recv_message = nullptr;
1913
+ closure_to_execute* closure = &closures[(*num_closures)++];
1914
+ closure->closure =
1915
+ pending->batch->payload->recv_message.recv_message_ready;
1916
+ closure->error = GRPC_ERROR_REF(error);
1917
+ closure->reason = "failing recv_message_ready for pending batch";
1918
+ pending->batch->payload->recv_message.recv_message_ready = nullptr;
1919
+ }
1920
+ closure_to_execute* closure = &closures[(*num_closures)++];
1921
+ closure->closure = pending->batch->on_complete;
1922
+ closure->error = GRPC_ERROR_REF(error);
1923
+ closure->reason = "failing on_complete for pending batch";
1924
+ pending->batch->on_complete = nullptr;
1925
+ maybe_clear_pending_batch(elem, pending);
1926
+ }
1927
+ }
1928
+ GRPC_ERROR_UNREF(error);
1929
+ }
1930
+
1931
+ // Callback used to intercept on_complete from subchannel calls.
1932
+ // Called only when retries are enabled.
1933
+ static void on_complete(void* arg, grpc_error* error) {
1934
+ subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(arg);
1935
+ grpc_call_element* elem = batch_data->elem;
1936
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1937
+ call_data* calld = static_cast<call_data*>(elem->call_data);
1938
+ if (grpc_client_channel_trace.enabled()) {
1939
+ char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
1940
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
1941
+ chand, calld, grpc_error_string(error), batch_str);
1942
+ gpr_free(batch_str);
1943
+ }
1944
+ subchannel_call_retry_state* retry_state =
1945
+ static_cast<subchannel_call_retry_state*>(
1946
+ grpc_connected_subchannel_call_get_parent_data(
1947
+ batch_data->subchannel_call));
1948
+ // If we have previously completed recv_trailing_metadata, then the
1949
+ // call is finished.
1950
+ bool call_finished = retry_state->completed_recv_trailing_metadata;
1951
+ // Update bookkeeping in retry_state.
1952
+ update_retry_state_for_completed_batch(batch_data, retry_state);
1953
+ if (call_finished) {
1954
+ if (grpc_client_channel_trace.enabled()) {
1955
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand,
1956
+ calld);
1957
+ }
1958
+ } else {
1959
+ // Check if this batch finished the call, and if so, get its status.
1960
+ // The call is finished if either (a) this callback was invoked with
1961
+ // an error or (b) we receive status.
1962
+ grpc_status_code status = GRPC_STATUS_OK;
1963
+ grpc_mdelem* server_pushback_md = nullptr;
1964
+ if (error != GRPC_ERROR_NONE) { // Case (a).
1965
+ call_finished = true;
1966
+ grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
1967
+ nullptr);
1968
+ } else if (batch_data->batch.recv_trailing_metadata) { // Case (b).
1969
+ call_finished = true;
1970
+ grpc_metadata_batch* md_batch =
1971
+ batch_data->batch.payload->recv_trailing_metadata
1972
+ .recv_trailing_metadata;
1973
+ GPR_ASSERT(md_batch->idx.named.grpc_status != nullptr);
1974
+ status = grpc_get_status_code_from_metadata(
1975
+ md_batch->idx.named.grpc_status->md);
1976
+ if (md_batch->idx.named.grpc_retry_pushback_ms != nullptr) {
1977
+ server_pushback_md = &md_batch->idx.named.grpc_retry_pushback_ms->md;
1978
+ }
1979
+ } else if (retry_state->completed_recv_trailing_metadata) {
1980
+ call_finished = true;
1981
+ }
1982
+ if (call_finished && grpc_client_channel_trace.enabled()) {
1983
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand,
1984
+ calld, grpc_status_code_to_string(status));
1985
+ }
1986
+ // If the call is finished, check if we should retry.
1987
+ if (call_finished &&
1988
+ maybe_retry(elem, batch_data, status, server_pushback_md)) {
1989
+ // Unref batch_data for deferred recv_initial_metadata_ready or
1990
+ // recv_message_ready callbacks, if any.
1991
+ if (batch_data->batch.recv_trailing_metadata &&
1992
+ retry_state->recv_initial_metadata_ready_deferred) {
1993
+ batch_data_unref(batch_data);
1994
+ GRPC_ERROR_UNREF(retry_state->recv_initial_metadata_error);
1995
+ }
1996
+ if (batch_data->batch.recv_trailing_metadata &&
1997
+ retry_state->recv_message_ready_deferred) {
1998
+ batch_data_unref(batch_data);
1999
+ GRPC_ERROR_UNREF(retry_state->recv_message_error);
2000
+ }
2001
+ batch_data_unref(batch_data);
2002
+ return;
2003
+ }
2004
+ }
2005
+ // If the call is finished or retries are committed, free cached data for
2006
+ // send ops that we've just completed.
2007
+ if (call_finished || calld->retry_committed) {
2008
+ free_cached_send_op_data_for_completed_batch(elem, batch_data, retry_state);
2009
+ }
2010
+ // Call not being retried.
2011
+ // Construct list of closures to execute.
2012
+ // Max number of closures is number of pending batches plus one for
2013
+ // each of:
2014
+ // - recv_initial_metadata_ready (either deferred or unstarted)
2015
+ // - recv_message_ready (either deferred or unstarted)
2016
+ // - starting a new batch for pending send ops
2017
+ closure_to_execute closures[GPR_ARRAY_SIZE(calld->pending_batches) + 3];
2018
+ size_t num_closures = 0;
2019
+ // If there are deferred recv_initial_metadata_ready or recv_message_ready
2020
+ // callbacks, add them to closures.
2021
+ add_closures_for_deferred_recv_callbacks(batch_data, retry_state, closures,
2022
+ &num_closures);
2023
+ // Find pending batches whose ops are now complete and add their
2024
+ // on_complete callbacks to closures.
2025
+ add_closures_for_completed_pending_batches(elem, batch_data, retry_state,
2026
+ GRPC_ERROR_REF(error), closures,
2027
+ &num_closures);
2028
+ // Add closures to handle any pending batches that have not yet been started.
2029
+ // If the call is finished, we fail these batches; otherwise, we add a
2030
+ // callback to start_retriable_subchannel_batches() to start them on
2031
+ // the subchannel call.
2032
+ if (call_finished) {
2033
+ add_closures_to_fail_unstarted_pending_batches(
2034
+ elem, retry_state, GRPC_ERROR_REF(error), closures, &num_closures);
2035
+ } else {
2036
+ add_closures_for_replay_or_pending_send_ops(elem, batch_data, retry_state,
2037
+ closures, &num_closures);
2038
+ }
2039
+ // Don't need batch_data anymore.
2040
+ batch_data_unref(batch_data);
2041
+ // Schedule all of the closures identified above.
2042
+ // Note that the call combiner will be yielded for each closure that
2043
+ // we schedule. We're already running in the call combiner, so one of
2044
+ // the closures can be scheduled directly, but the others will
2045
+ // have to re-enter the call combiner.
2046
+ if (num_closures > 0) {
2047
+ GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
2048
+ for (size_t i = 1; i < num_closures; ++i) {
2049
+ GRPC_CALL_COMBINER_START(calld->call_combiner, closures[i].closure,
2050
+ closures[i].error, closures[i].reason);
2051
+ }
2052
+ } else {
2053
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
2054
+ "no closures to run for on_complete");
2055
+ }
2056
+ }
2057
+
2058
+ //
2059
+ // subchannel batch construction
2060
+ //
2061
+
2062
+ // Helper function used to start a subchannel batch in the call combiner.
2063
+ static void start_batch_in_call_combiner(void* arg, grpc_error* ignored) {
2064
+ grpc_transport_stream_op_batch* batch =
2065
+ static_cast<grpc_transport_stream_op_batch*>(arg);
2066
+ grpc_subchannel_call* subchannel_call =
2067
+ static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg);
2068
+ // Note: This will release the call combiner.
2069
+ grpc_subchannel_call_process_op(subchannel_call, batch);
2070
+ }
2071
+
2072
+ // Adds retriable send_initial_metadata op to batch_data.
2073
+ static void add_retriable_send_initial_metadata_op(
2074
+ call_data* calld, subchannel_call_retry_state* retry_state,
2075
+ subchannel_batch_data* batch_data) {
2076
+ // Maps the number of retries to the corresponding metadata value slice.
2077
+ static const grpc_slice* retry_count_strings[] = {
2078
+ &GRPC_MDSTR_1, &GRPC_MDSTR_2, &GRPC_MDSTR_3, &GRPC_MDSTR_4};
2079
+ // We need to make a copy of the metadata batch for each attempt, since
2080
+ // the filters in the subchannel stack may modify this batch, and we don't
2081
+ // want those modifications to be passed forward to subsequent attempts.
2082
+ //
2083
+ // If we've already completed one or more attempts, add the
2084
+ // grpc-retry-attempts header.
2085
+ batch_data->send_initial_metadata_storage =
2086
+ static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
2087
+ calld->arena, sizeof(grpc_linked_mdelem) *
2088
+ (calld->send_initial_metadata.list.count +
2089
+ (calld->num_attempts_completed > 0))));
2090
+ grpc_metadata_batch_copy(&calld->send_initial_metadata,
2091
+ &batch_data->send_initial_metadata,
2092
+ batch_data->send_initial_metadata_storage);
2093
+ if (batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts !=
2094
+ nullptr) {
2095
+ grpc_metadata_batch_remove(
2096
+ &batch_data->send_initial_metadata,
2097
+ batch_data->send_initial_metadata.idx.named.grpc_previous_rpc_attempts);
2098
+ }
2099
+ if (calld->num_attempts_completed > 0) {
2100
+ grpc_mdelem retry_md = grpc_mdelem_from_slices(
2101
+ GRPC_MDSTR_GRPC_PREVIOUS_RPC_ATTEMPTS,
2102
+ *retry_count_strings[calld->num_attempts_completed - 1]);
2103
+ grpc_error* error = grpc_metadata_batch_add_tail(
2104
+ &batch_data->send_initial_metadata,
2105
+ &batch_data->send_initial_metadata_storage[calld->send_initial_metadata
2106
+ .list.count],
2107
+ retry_md);
2108
+ if (error != GRPC_ERROR_NONE) {
2109
+ gpr_log(GPR_ERROR, "error adding retry metadata: %s",
2110
+ grpc_error_string(error));
2111
+ GPR_ASSERT(false);
2112
+ }
2113
+ }
2114
+ retry_state->started_send_initial_metadata = true;
2115
+ batch_data->batch.send_initial_metadata = true;
2116
+ batch_data->batch.payload->send_initial_metadata.send_initial_metadata =
2117
+ &batch_data->send_initial_metadata;
2118
+ batch_data->batch.payload->send_initial_metadata.send_initial_metadata_flags =
2119
+ calld->send_initial_metadata_flags;
2120
+ batch_data->batch.payload->send_initial_metadata.peer_string =
2121
+ calld->peer_string;
2122
+ }
2123
+
2124
+ // Adds retriable send_message op to batch_data.
2125
+ static void add_retriable_send_message_op(
2126
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
2127
+ subchannel_batch_data* batch_data) {
2128
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2129
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2130
+ if (grpc_client_channel_trace.enabled()) {
2131
+ gpr_log(GPR_DEBUG,
2132
+ "chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
2133
+ chand, calld, retry_state->started_send_message_count);
2134
+ }
2135
+ grpc_core::ByteStreamCache* cache =
2136
+ calld->send_messages[retry_state->started_send_message_count];
2137
+ ++retry_state->started_send_message_count;
2138
+ batch_data->send_message.Init(cache);
2139
+ batch_data->batch.send_message = true;
2140
+ batch_data->batch.payload->send_message.send_message.reset(
2141
+ batch_data->send_message.get());
2142
+ }
2143
+
2144
+ // Adds retriable send_trailing_metadata op to batch_data.
2145
+ static void add_retriable_send_trailing_metadata_op(
2146
+ call_data* calld, subchannel_call_retry_state* retry_state,
2147
+ subchannel_batch_data* batch_data) {
2148
+ // We need to make a copy of the metadata batch for each attempt, since
2149
+ // the filters in the subchannel stack may modify this batch, and we don't
2150
+ // want those modifications to be passed forward to subsequent attempts.
2151
+ batch_data->send_trailing_metadata_storage =
2152
+ static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
2153
+ calld->arena, sizeof(grpc_linked_mdelem) *
2154
+ calld->send_trailing_metadata.list.count));
2155
+ grpc_metadata_batch_copy(&calld->send_trailing_metadata,
2156
+ &batch_data->send_trailing_metadata,
2157
+ batch_data->send_trailing_metadata_storage);
2158
+ retry_state->started_send_trailing_metadata = true;
2159
+ batch_data->batch.send_trailing_metadata = true;
2160
+ batch_data->batch.payload->send_trailing_metadata.send_trailing_metadata =
2161
+ &batch_data->send_trailing_metadata;
2162
+ }
2163
+
2164
+ // Adds retriable recv_initial_metadata op to batch_data.
2165
+ static void add_retriable_recv_initial_metadata_op(
2166
+ call_data* calld, subchannel_call_retry_state* retry_state,
2167
+ subchannel_batch_data* batch_data) {
2168
+ retry_state->started_recv_initial_metadata = true;
2169
+ batch_data->batch.recv_initial_metadata = true;
2170
+ grpc_metadata_batch_init(&batch_data->recv_initial_metadata);
2171
+ batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata =
2172
+ &batch_data->recv_initial_metadata;
2173
+ batch_data->batch.payload->recv_initial_metadata.trailing_metadata_available =
2174
+ &batch_data->trailing_metadata_available;
2175
+ GRPC_CLOSURE_INIT(&batch_data->recv_initial_metadata_ready,
2176
+ recv_initial_metadata_ready, batch_data,
2177
+ grpc_schedule_on_exec_ctx);
2178
+ batch_data->batch.payload->recv_initial_metadata.recv_initial_metadata_ready =
2179
+ &batch_data->recv_initial_metadata_ready;
2180
+ }
2181
+
2182
+ // Adds retriable recv_message op to batch_data.
2183
+ static void add_retriable_recv_message_op(
2184
+ call_data* calld, subchannel_call_retry_state* retry_state,
2185
+ subchannel_batch_data* batch_data) {
2186
+ ++retry_state->started_recv_message_count;
2187
+ batch_data->batch.recv_message = true;
2188
+ batch_data->batch.payload->recv_message.recv_message =
2189
+ &batch_data->recv_message;
2190
+ GRPC_CLOSURE_INIT(&batch_data->recv_message_ready, recv_message_ready,
2191
+ batch_data, grpc_schedule_on_exec_ctx);
2192
+ batch_data->batch.payload->recv_message.recv_message_ready =
2193
+ &batch_data->recv_message_ready;
2194
+ }
2195
+
2196
+ // Adds retriable recv_trailing_metadata op to batch_data.
2197
+ static void add_retriable_recv_trailing_metadata_op(
2198
+ call_data* calld, subchannel_call_retry_state* retry_state,
2199
+ subchannel_batch_data* batch_data) {
2200
+ retry_state->started_recv_trailing_metadata = true;
2201
+ batch_data->batch.recv_trailing_metadata = true;
2202
+ grpc_metadata_batch_init(&batch_data->recv_trailing_metadata);
2203
+ batch_data->batch.payload->recv_trailing_metadata.recv_trailing_metadata =
2204
+ &batch_data->recv_trailing_metadata;
2205
+ batch_data->batch.collect_stats = true;
2206
+ batch_data->batch.payload->collect_stats.collect_stats =
2207
+ &batch_data->collect_stats;
2208
+ }
2209
+
2210
+ // Helper function used to start a recv_trailing_metadata batch. This
2211
+ // is used in the case where a recv_initial_metadata or recv_message
2212
+ // op fails in a way that we know the call is over but when the application
2213
+ // has not yet started its own recv_trailing_metadata op.
2214
+ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
2215
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2216
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2217
+ if (grpc_client_channel_trace.enabled()) {
2218
+ gpr_log(GPR_DEBUG,
2219
+ "chand=%p calld=%p: call failed but recv_trailing_metadata not "
2220
+ "started; starting it internally",
2221
+ chand, calld);
2222
+ }
2223
+ subchannel_call_retry_state* retry_state =
2224
+ static_cast<subchannel_call_retry_state*>(
2225
+ grpc_connected_subchannel_call_get_parent_data(
2226
+ calld->subchannel_call));
2227
+ subchannel_batch_data* batch_data = batch_data_create(elem, 1);
2228
+ add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
2229
+ // Note: This will release the call combiner.
2230
+ grpc_subchannel_call_process_op(calld->subchannel_call, &batch_data->batch);
2231
+ }
2232
+
2233
+ // If there are any cached send ops that need to be replayed on the
2234
+ // current subchannel call, creates and returns a new subchannel batch
2235
+ // to replay those ops. Otherwise, returns nullptr.
2236
+ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
2237
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state) {
2238
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2239
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2240
+ subchannel_batch_data* replay_batch_data = nullptr;
2241
+ // send_initial_metadata.
2242
+ if (calld->seen_send_initial_metadata &&
2243
+ !retry_state->started_send_initial_metadata &&
2244
+ !calld->pending_send_initial_metadata) {
2245
+ if (grpc_client_channel_trace.enabled()) {
2246
+ gpr_log(GPR_DEBUG,
2247
+ "chand=%p calld=%p: replaying previously completed "
2248
+ "send_initial_metadata op",
2249
+ chand, calld);
2250
+ }
2251
+ replay_batch_data = batch_data_create(elem, 1);
2252
+ add_retriable_send_initial_metadata_op(calld, retry_state,
2253
+ replay_batch_data);
2254
+ }
2255
+ // send_message.
2256
+ // Note that we can only have one send_message op in flight at a time.
2257
+ if (retry_state->started_send_message_count < calld->send_messages.size() &&
2258
+ retry_state->started_send_message_count ==
2259
+ retry_state->completed_send_message_count &&
2260
+ !calld->pending_send_message) {
2261
+ if (grpc_client_channel_trace.enabled()) {
2262
+ gpr_log(GPR_DEBUG,
2263
+ "chand=%p calld=%p: replaying previously completed "
2264
+ "send_message op",
2265
+ chand, calld);
2266
+ }
2267
+ if (replay_batch_data == nullptr) {
2268
+ replay_batch_data = batch_data_create(elem, 1);
2269
+ }
2270
+ add_retriable_send_message_op(elem, retry_state, replay_batch_data);
2271
+ }
2272
+ // send_trailing_metadata.
2273
+ // Note that we only add this op if we have no more send_message ops
2274
+ // to start, since we can't send down any more send_message ops after
2275
+ // send_trailing_metadata.
2276
+ if (calld->seen_send_trailing_metadata &&
2277
+ retry_state->started_send_message_count == calld->send_messages.size() &&
2278
+ !retry_state->started_send_trailing_metadata &&
2279
+ !calld->pending_send_trailing_metadata) {
2280
+ if (grpc_client_channel_trace.enabled()) {
2281
+ gpr_log(GPR_DEBUG,
2282
+ "chand=%p calld=%p: replaying previously completed "
2283
+ "send_trailing_metadata op",
2284
+ chand, calld);
2285
+ }
2286
+ if (replay_batch_data == nullptr) {
2287
+ replay_batch_data = batch_data_create(elem, 1);
2288
+ }
2289
+ add_retriable_send_trailing_metadata_op(calld, retry_state,
2290
+ replay_batch_data);
2291
+ }
2292
+ return replay_batch_data;
2293
+ }
2294
+
2295
+ // Adds subchannel batches for pending batches to batches, updating
2296
+ // *num_batches as needed.
2297
+ static void add_subchannel_batches_for_pending_batches(
2298
+ grpc_call_element* elem, subchannel_call_retry_state* retry_state,
2299
+ grpc_transport_stream_op_batch** batches, size_t* num_batches) {
2300
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2301
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
2302
+ pending_batch* pending = &calld->pending_batches[i];
2303
+ grpc_transport_stream_op_batch* batch = pending->batch;
2304
+ if (batch == nullptr) continue;
2305
+ // Skip any batch that either (a) has already been started on this
2306
+ // subchannel call or (b) we can't start yet because we're still
2307
+ // replaying send ops that need to be completed first.
2308
+ // TODO(roth): Note that if any one op in the batch can't be sent
2309
+ // yet due to ops that we're replaying, we don't start any of the ops
2310
+ // in the batch. This is probably okay, but it could conceivably
2311
+ // lead to increased latency in some cases -- e.g., we could delay
2312
+ // starting a recv op due to it being in the same batch with a send
2313
+ // op. If/when we revamp the callback protocol in
2314
+ // transport_stream_op_batch, we may be able to fix this.
2315
+ if (batch->send_initial_metadata &&
2316
+ retry_state->started_send_initial_metadata) {
2317
+ continue;
2318
+ }
2319
+ if (batch->send_message && retry_state->completed_send_message_count <
2320
+ retry_state->started_send_message_count) {
2321
+ continue;
2322
+ }
2323
+ // Note that we only start send_trailing_metadata if we have no more
2324
+ // send_message ops to start, since we can't send down any more
2325
+ // send_message ops after send_trailing_metadata.
2326
+ if (batch->send_trailing_metadata &&
2327
+ (retry_state->started_send_message_count + batch->send_message <
2328
+ calld->send_messages.size() ||
2329
+ retry_state->started_send_trailing_metadata)) {
2330
+ continue;
2331
+ }
2332
+ if (batch->recv_initial_metadata &&
2333
+ retry_state->started_recv_initial_metadata) {
2334
+ continue;
2335
+ }
2336
+ if (batch->recv_message && retry_state->completed_recv_message_count <
2337
+ retry_state->started_recv_message_count) {
2338
+ continue;
2339
+ }
2340
+ if (batch->recv_trailing_metadata &&
2341
+ retry_state->started_recv_trailing_metadata) {
2342
+ continue;
2343
+ }
2344
+ // If we're not retrying, just send the batch as-is.
2345
+ if (calld->method_params == nullptr ||
2346
+ calld->method_params->retry_policy() == nullptr ||
2347
+ calld->retry_committed) {
2348
+ batches[(*num_batches)++] = batch;
2349
+ pending_batch_clear(calld, pending);
2350
+ continue;
2351
+ }
2352
+ // Create batch with the right number of callbacks.
2353
+ const int num_callbacks =
2354
+ 1 + batch->recv_initial_metadata + batch->recv_message;
2355
+ subchannel_batch_data* batch_data = batch_data_create(elem, num_callbacks);
2356
+ // Cache send ops if needed.
2357
+ maybe_cache_send_ops_for_batch(calld, pending);
2358
+ // send_initial_metadata.
2359
+ if (batch->send_initial_metadata) {
2360
+ add_retriable_send_initial_metadata_op(calld, retry_state, batch_data);
2361
+ }
2362
+ // send_message.
2363
+ if (batch->send_message) {
2364
+ add_retriable_send_message_op(elem, retry_state, batch_data);
2365
+ }
2366
+ // send_trailing_metadata.
2367
+ if (batch->send_trailing_metadata) {
2368
+ add_retriable_send_trailing_metadata_op(calld, retry_state, batch_data);
2369
+ }
2370
+ // recv_initial_metadata.
2371
+ if (batch->recv_initial_metadata) {
2372
+ // recv_flags is only used on the server side.
2373
+ GPR_ASSERT(batch->payload->recv_initial_metadata.recv_flags == nullptr);
2374
+ add_retriable_recv_initial_metadata_op(calld, retry_state, batch_data);
2375
+ }
2376
+ // recv_message.
2377
+ if (batch->recv_message) {
2378
+ add_retriable_recv_message_op(calld, retry_state, batch_data);
2379
+ }
2380
+ // recv_trailing_metadata.
2381
+ if (batch->recv_trailing_metadata) {
2382
+ GPR_ASSERT(batch->collect_stats);
2383
+ add_retriable_recv_trailing_metadata_op(calld, retry_state, batch_data);
2384
+ }
2385
+ batches[(*num_batches)++] = &batch_data->batch;
2386
+ }
2387
+ }
2388
+
2389
+ // Constructs and starts whatever subchannel batches are needed on the
2390
+ // subchannel call.
2391
+ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
2392
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2393
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2394
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2395
+ if (grpc_client_channel_trace.enabled()) {
2396
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches",
2397
+ chand, calld);
2398
+ }
2399
+ subchannel_call_retry_state* retry_state =
2400
+ static_cast<subchannel_call_retry_state*>(
2401
+ grpc_connected_subchannel_call_get_parent_data(
2402
+ calld->subchannel_call));
2403
+ // We can start up to 6 batches.
2404
+ grpc_transport_stream_op_batch*
2405
+ batches[GPR_ARRAY_SIZE(calld->pending_batches)];
2406
+ size_t num_batches = 0;
2407
+ // Replay previously-returned send_* ops if needed.
2408
+ subchannel_batch_data* replay_batch_data =
2409
+ maybe_create_subchannel_batch_for_replay(elem, retry_state);
2410
+ if (replay_batch_data != nullptr) {
2411
+ batches[num_batches++] = &replay_batch_data->batch;
2412
+ }
2413
+ // Now add pending batches.
2414
+ add_subchannel_batches_for_pending_batches(elem, retry_state, batches,
2415
+ &num_batches);
2416
+ // Start batches on subchannel call.
2417
+ // Note that the call combiner will be yielded for each batch that we
2418
+ // send down. We're already running in the call combiner, so one of
2419
+ // the batches can be started directly, but the others will have to
2420
+ // re-enter the call combiner.
2421
+ if (grpc_client_channel_trace.enabled()) {
2422
+ gpr_log(GPR_DEBUG,
2423
+ "chand=%p calld=%p: starting %" PRIuPTR
2424
+ " retriable batches on subchannel_call=%p",
2425
+ chand, calld, num_batches, calld->subchannel_call);
2426
+ }
2427
+ if (num_batches == 0) {
2428
+ // This should be fairly rare, but it can happen when (e.g.) an
2429
+ // attempt completes before it has finished replaying all
2430
+ // previously sent messages.
2431
+ GRPC_CALL_COMBINER_STOP(calld->call_combiner,
2432
+ "no retriable subchannel batches to start");
2433
+ } else {
2434
+ for (size_t i = 1; i < num_batches; ++i) {
2435
+ if (grpc_client_channel_trace.enabled()) {
2436
+ char* batch_str = grpc_transport_stream_op_batch_string(batches[i]);
2437
+ gpr_log(GPR_DEBUG,
2438
+ "chand=%p calld=%p: starting batch in call combiner: %s", chand,
2439
+ calld, batch_str);
2440
+ gpr_free(batch_str);
2441
+ }
2442
+ batches[i]->handler_private.extra_arg = calld->subchannel_call;
2443
+ GRPC_CLOSURE_INIT(&batches[i]->handler_private.closure,
2444
+ start_batch_in_call_combiner, batches[i],
2445
+ grpc_schedule_on_exec_ctx);
2446
+ GRPC_CALL_COMBINER_START(calld->call_combiner,
2447
+ &batches[i]->handler_private.closure,
2448
+ GRPC_ERROR_NONE, "start_subchannel_batch");
2449
+ }
2450
+ if (grpc_client_channel_trace.enabled()) {
2451
+ char* batch_str = grpc_transport_stream_op_batch_string(batches[0]);
2452
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting batch: %s", chand, calld,
2453
+ batch_str);
2454
+ gpr_free(batch_str);
2455
+ }
2456
+ // Note: This will release the call combiner.
2457
+ grpc_subchannel_call_process_op(calld->subchannel_call, batches[0]);
2458
+ }
2459
+ }
2460
+
2461
+ //
2462
+ // LB pick
2463
+ //
2464
+
2465
+ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
2466
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2467
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2468
+ const size_t parent_data_size =
2469
+ calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0;
2470
+ const grpc_core::ConnectedSubchannel::CallArgs call_args = {
2471
+ calld->pollent, // pollent
2472
+ calld->path, // path
2473
+ calld->call_start_time, // start_time
2474
+ calld->deadline, // deadline
2475
+ calld->arena, // arena
2476
+ calld->pick.subchannel_call_context, // context
2477
+ calld->call_combiner, // call_combiner
2478
+ parent_data_size // parent_data_size
2479
+ };
2480
+ grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
2481
+ call_args, &calld->subchannel_call);
2482
+ if (grpc_client_channel_trace.enabled()) {
2483
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
2484
+ chand, calld, calld->subchannel_call, grpc_error_string(new_error));
2485
+ }
2486
+ if (new_error != GRPC_ERROR_NONE) {
2487
+ new_error = grpc_error_add_child(new_error, error);
2488
+ pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
2489
+ } else {
2490
+ if (parent_data_size > 0) {
2491
+ subchannel_call_retry_state* retry_state =
2492
+ static_cast<subchannel_call_retry_state*>(
2493
+ grpc_connected_subchannel_call_get_parent_data(
2494
+ calld->subchannel_call));
2495
+ retry_state->batch_payload.context = calld->pick.subchannel_call_context;
2496
+ }
2497
+ pending_batches_resume(elem);
2498
+ }
2499
+ GRPC_ERROR_UNREF(error);
2500
+ }
2501
+
2502
+ // Invoked when a pick is completed, on both success or failure.
2503
+ static void pick_done(void* arg, grpc_error* error) {
2504
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2505
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2506
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2507
+ if (calld->pick.connected_subchannel == nullptr) {
2508
+ // Failed to create subchannel.
2509
+ // If there was no error, this is an LB policy drop, in which case
2510
+ // we return an error; otherwise, we may retry.
2511
+ grpc_status_code status = GRPC_STATUS_OK;
2512
+ grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
2513
+ nullptr);
2514
+ if (error == GRPC_ERROR_NONE || !calld->enable_retries ||
2515
+ !maybe_retry(elem, nullptr /* batch_data */, status,
2516
+ nullptr /* server_pushback_md */)) {
2517
+ grpc_error* new_error =
2518
+ error == GRPC_ERROR_NONE
2519
+ ? GRPC_ERROR_CREATE_FROM_STATIC_STRING(
2520
+ "Call dropped by load balancing policy")
2521
+ : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
2522
+ "Failed to create subchannel", &error, 1);
2523
+ if (grpc_client_channel_trace.enabled()) {
2524
+ gpr_log(GPR_DEBUG,
2525
+ "chand=%p calld=%p: failed to create subchannel: error=%s",
2526
+ chand, calld, grpc_error_string(new_error));
2527
+ }
2528
+ pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
2529
+ }
2530
+ } else {
2531
+ /* Create call on subchannel. */
2532
+ create_subchannel_call(elem, GRPC_ERROR_REF(error));
2533
+ }
2534
+ }
2535
+
2536
+ // Invoked when a pick is completed to leave the client_channel combiner
2537
+ // and continue processing in the call combiner.
2538
+ static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
2539
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2540
+ GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
2541
+ grpc_schedule_on_exec_ctx);
2542
+ GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
2543
+ }
2544
+
2545
+ // A wrapper around pick_done_locked() that is used in cases where
2546
+ // either (a) the pick was deferred pending a resolver result or (b) the
2547
+ // pick was done asynchronously. Removes the call's polling entity from
2548
+ // chand->interested_parties before invoking pick_done_locked().
2549
+ static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
2550
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2551
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2552
+ grpc_polling_entity_del_from_pollset_set(calld->pollent,
2553
+ chand->interested_parties);
2554
+ pick_done_locked(elem, error);
2555
+ }
2556
+
2557
+ // Note: This runs under the client_channel combiner, but will NOT be
2558
+ // holding the call combiner.
2559
+ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
2560
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2561
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2562
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2563
+ // Note: chand->lb_policy may have changed since we started our pick,
2564
+ // in which case we will be cancelling the pick on a policy other than
2565
+ // the one we started it on. However, this will just be a no-op.
2566
+ if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
2567
+ if (grpc_client_channel_trace.enabled()) {
2568
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
2569
+ chand, calld, chand->lb_policy.get());
2570
+ }
2571
+ chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
2572
+ }
2573
+ GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
2574
+ }
2575
+
2576
+ // Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
2577
+ // Unrefs the LB policy and invokes async_pick_done_locked().
2578
+ static void pick_callback_done_locked(void* arg, grpc_error* error) {
2579
+ grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
2580
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2581
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2582
+ if (grpc_client_channel_trace.enabled()) {
2583
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
2584
+ chand, calld);
1086
2585
  }
1087
2586
  async_pick_done_locked(elem, GRPC_ERROR_REF(error));
1088
2587
  GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
1089
2588
  }
1090
2589
 
2590
+ // Applies service config to the call. Must be invoked once we know
2591
+ // that the resolver has returned results to the channel.
2592
+ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
2593
+ channel_data* chand = static_cast<channel_data*>(elem->channel_data);
2594
+ call_data* calld = static_cast<call_data*>(elem->call_data);
2595
+ if (grpc_client_channel_trace.enabled()) {
2596
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
2597
+ chand, calld);
2598
+ }
2599
+ if (chand->retry_throttle_data != nullptr) {
2600
+ calld->retry_throttle_data = chand->retry_throttle_data->Ref();
2601
+ }
2602
+ if (chand->method_params_table != nullptr) {
2603
+ calld->method_params = grpc_core::ServiceConfig::MethodConfigTableLookup(
2604
+ *chand->method_params_table, calld->path);
2605
+ if (calld->method_params != nullptr) {
2606
+ // If the deadline from the service config is shorter than the one
2607
+ // from the client API, reset the deadline timer.
2608
+ if (chand->deadline_checking_enabled &&
2609
+ calld->method_params->timeout() != 0) {
2610
+ const grpc_millis per_method_deadline =
2611
+ grpc_timespec_to_millis_round_up(calld->call_start_time) +
2612
+ calld->method_params->timeout();
2613
+ if (per_method_deadline < calld->deadline) {
2614
+ calld->deadline = per_method_deadline;
2615
+ grpc_deadline_state_reset(elem, calld->deadline);
2616
+ }
2617
+ }
2618
+ }
2619
+ }
2620
+ // If no retry policy, disable retries.
2621
+ // TODO(roth): Remove this when adding support for transparent retries.
2622
+ if (calld->method_params == nullptr ||
2623
+ calld->method_params->retry_policy() == nullptr) {
2624
+ calld->enable_retries = false;
2625
+ }
2626
+ }
2627
+
1091
2628
  // Starts a pick on chand->lb_policy.
1092
2629
  // Returns true if pick is completed synchronously.
1093
2630
  static bool pick_callback_start_locked(grpc_call_element* elem) {
@@ -1097,33 +2634,46 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
1097
2634
  gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
1098
2635
  chand, calld, chand->lb_policy.get());
1099
2636
  }
1100
- apply_service_config_to_call_locked(elem);
2637
+ // Only get service config data on the first attempt.
2638
+ if (calld->num_attempts_completed == 0) {
2639
+ apply_service_config_to_call_locked(elem);
2640
+ }
1101
2641
  // If the application explicitly set wait_for_ready, use that.
1102
2642
  // Otherwise, if the service config specified a value for this
1103
2643
  // method, use that.
1104
- uint32_t initial_metadata_flags =
1105
- calld->initial_metadata_batch->payload->send_initial_metadata
1106
- .send_initial_metadata_flags;
2644
+ //
2645
+ // The send_initial_metadata batch will be the first one in the list,
2646
+ // as set by get_batch_index() above.
2647
+ calld->pick.initial_metadata =
2648
+ calld->seen_send_initial_metadata
2649
+ ? &calld->send_initial_metadata
2650
+ : calld->pending_batches[0]
2651
+ .batch->payload->send_initial_metadata.send_initial_metadata;
2652
+ uint32_t send_initial_metadata_flags =
2653
+ calld->seen_send_initial_metadata
2654
+ ? calld->send_initial_metadata_flags
2655
+ : calld->pending_batches[0]
2656
+ .batch->payload->send_initial_metadata
2657
+ .send_initial_metadata_flags;
1107
2658
  const bool wait_for_ready_set_from_api =
1108
- initial_metadata_flags &
2659
+ send_initial_metadata_flags &
1109
2660
  GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
1110
2661
  const bool wait_for_ready_set_from_service_config =
1111
2662
  calld->method_params != nullptr &&
1112
- calld->method_params->wait_for_ready != WAIT_FOR_READY_UNSET;
2663
+ calld->method_params->wait_for_ready() !=
2664
+ ClientChannelMethodParams::WAIT_FOR_READY_UNSET;
1113
2665
  if (!wait_for_ready_set_from_api && wait_for_ready_set_from_service_config) {
1114
- if (calld->method_params->wait_for_ready == WAIT_FOR_READY_TRUE) {
1115
- initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
2666
+ if (calld->method_params->wait_for_ready() ==
2667
+ ClientChannelMethodParams::WAIT_FOR_READY_TRUE) {
2668
+ send_initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
1116
2669
  } else {
1117
- initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
2670
+ send_initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
1118
2671
  }
1119
2672
  }
1120
- calld->pick.initial_metadata =
1121
- calld->initial_metadata_batch->payload->send_initial_metadata
1122
- .send_initial_metadata;
1123
- calld->pick.initial_metadata_flags = initial_metadata_flags;
1124
- GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
2673
+ calld->pick.initial_metadata_flags = send_initial_metadata_flags;
2674
+ GRPC_CLOSURE_INIT(&calld->pick_closure, pick_callback_done_locked, elem,
1125
2675
  grpc_combiner_scheduler(chand->combiner));
1126
- calld->pick.on_complete = &calld->lb_pick_closure;
2676
+ calld->pick.on_complete = &calld->pick_closure;
1127
2677
  GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback");
1128
2678
  const bool pick_done = chand->lb_policy->PickLocked(&calld->pick);
1129
2679
  if (pick_done) {
@@ -1137,7 +2687,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
1137
2687
  GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
1138
2688
  grpc_call_combiner_set_notify_on_cancel(
1139
2689
  calld->call_combiner,
1140
- GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
2690
+ GRPC_CLOSURE_INIT(&calld->pick_cancel_closure,
1141
2691
  pick_callback_cancel_locked, elem,
1142
2692
  grpc_combiner_scheduler(chand->combiner)));
1143
2693
  }
@@ -1186,8 +2736,6 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
1186
2736
  "Pick cancelled", &error, 1));
1187
2737
  }
1188
2738
 
1189
- static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
1190
-
1191
2739
  static void pick_after_resolver_result_done_locked(void* arg,
1192
2740
  grpc_error* error) {
1193
2741
  pick_after_resolver_result_args* args =
@@ -1210,7 +2758,45 @@ static void pick_after_resolver_result_done_locked(void* arg,
1210
2758
  chand, calld);
1211
2759
  }
1212
2760
  async_pick_done_locked(elem, GRPC_ERROR_REF(error));
1213
- } else if (chand->lb_policy != nullptr) {
2761
+ } else if (chand->resolver == nullptr) {
2762
+ // Shutting down.
2763
+ if (grpc_client_channel_trace.enabled()) {
2764
+ gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
2765
+ calld);
2766
+ }
2767
+ async_pick_done_locked(
2768
+ elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
2769
+ } else if (chand->lb_policy == nullptr) {
2770
+ // Transient resolver failure.
2771
+ // If call has wait_for_ready=true, try again; otherwise, fail.
2772
+ uint32_t send_initial_metadata_flags =
2773
+ calld->seen_send_initial_metadata
2774
+ ? calld->send_initial_metadata_flags
2775
+ : calld->pending_batches[0]
2776
+ .batch->payload->send_initial_metadata
2777
+ .send_initial_metadata_flags;
2778
+ if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
2779
+ if (grpc_client_channel_trace.enabled()) {
2780
+ gpr_log(GPR_DEBUG,
2781
+ "chand=%p calld=%p: resolver returned but no LB policy; "
2782
+ "wait_for_ready=true; trying again",
2783
+ chand, calld);
2784
+ }
2785
+ pick_after_resolver_result_start_locked(elem);
2786
+ } else {
2787
+ if (grpc_client_channel_trace.enabled()) {
2788
+ gpr_log(GPR_DEBUG,
2789
+ "chand=%p calld=%p: resolver returned but no LB policy; "
2790
+ "wait_for_ready=false; failing",
2791
+ chand, calld);
2792
+ }
2793
+ async_pick_done_locked(
2794
+ elem,
2795
+ grpc_error_set_int(
2796
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
2797
+ GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
2798
+ }
2799
+ } else {
1214
2800
  if (grpc_client_channel_trace.enabled()) {
1215
2801
  gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
1216
2802
  chand, calld);
@@ -1224,30 +2810,6 @@ static void pick_after_resolver_result_done_locked(void* arg,
1224
2810
  async_pick_done_locked(elem, GRPC_ERROR_NONE);
1225
2811
  }
1226
2812
  }
1227
- // TODO(roth): It should be impossible for chand->lb_policy to be NULL
1228
- // here, so the rest of this code should never actually be executed.
1229
- // However, we have reports of a crash on iOS that triggers this case,
1230
- // so we are temporarily adding this to restore branches that were
1231
- // removed in https://github.com/grpc/grpc/pull/12297. Need to figure
1232
- // out what is actually causing this to occur and then figure out the
1233
- // right way to deal with it.
1234
- else if (chand->resolver != nullptr) {
1235
- // No LB policy, so try again.
1236
- if (grpc_client_channel_trace.enabled()) {
1237
- gpr_log(GPR_DEBUG,
1238
- "chand=%p calld=%p: resolver returned but no LB policy, "
1239
- "trying again",
1240
- chand, calld);
1241
- }
1242
- pick_after_resolver_result_start_locked(elem);
1243
- } else {
1244
- if (grpc_client_channel_trace.enabled()) {
1245
- gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
1246
- calld);
1247
- }
1248
- async_pick_done_locked(
1249
- elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
1250
- }
1251
2813
  }
1252
2814
 
1253
2815
  static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
@@ -1277,6 +2839,7 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
1277
2839
  call_data* calld = static_cast<call_data*>(elem->call_data);
1278
2840
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
1279
2841
  GPR_ASSERT(calld->pick.connected_subchannel == nullptr);
2842
+ GPR_ASSERT(calld->subchannel_call == nullptr);
1280
2843
  if (chand->lb_policy != nullptr) {
1281
2844
  // We already have an LB policy, so ask it for a pick.
1282
2845
  if (pick_callback_start_locked(elem)) {
@@ -1305,24 +2868,9 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
1305
2868
  chand->interested_parties);
1306
2869
  }
1307
2870
 
1308
- static void on_complete(void* arg, grpc_error* error) {
1309
- grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
1310
- call_data* calld = static_cast<call_data*>(elem->call_data);
1311
- if (calld->retry_throttle_data != nullptr) {
1312
- if (error == GRPC_ERROR_NONE) {
1313
- grpc_server_retry_throttle_data_record_success(
1314
- calld->retry_throttle_data);
1315
- } else {
1316
- // TODO(roth): In a subsequent PR, check the return value here and
1317
- // decide whether or not to retry. Note that we should only
1318
- // record failures whose statuses match the configured retryable
1319
- // or non-fatal status codes.
1320
- grpc_server_retry_throttle_data_record_failure(
1321
- calld->retry_throttle_data);
1322
- }
1323
- }
1324
- GRPC_CLOSURE_RUN(calld->original_on_complete, GRPC_ERROR_REF(error));
1325
- }
2871
+ //
2872
+ // filter call vtable functions
2873
+ //
1326
2874
 
1327
2875
  static void cc_start_transport_stream_op_batch(
1328
2876
  grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
@@ -1333,46 +2881,47 @@ static void cc_start_transport_stream_op_batch(
1333
2881
  grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
1334
2882
  }
1335
2883
  // If we've previously been cancelled, immediately fail any new batches.
1336
- if (calld->error != GRPC_ERROR_NONE) {
2884
+ if (calld->cancel_error != GRPC_ERROR_NONE) {
1337
2885
  if (grpc_client_channel_trace.enabled()) {
1338
2886
  gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s",
1339
- chand, calld, grpc_error_string(calld->error));
2887
+ chand, calld, grpc_error_string(calld->cancel_error));
1340
2888
  }
2889
+ // Note: This will release the call combiner.
1341
2890
  grpc_transport_stream_op_batch_finish_with_failure(
1342
- batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
2891
+ batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
1343
2892
  return;
1344
2893
  }
2894
+ // Handle cancellation.
1345
2895
  if (batch->cancel_stream) {
1346
2896
  // Stash a copy of cancel_error in our call data, so that we can use
1347
2897
  // it for subsequent operations. This ensures that if the call is
1348
2898
  // cancelled before any batches are passed down (e.g., if the deadline
1349
2899
  // is in the past when the call starts), we can return the right
1350
2900
  // error to the caller when the first batch does get passed down.
1351
- GRPC_ERROR_UNREF(calld->error);
1352
- calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
2901
+ GRPC_ERROR_UNREF(calld->cancel_error);
2902
+ calld->cancel_error =
2903
+ GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
1353
2904
  if (grpc_client_channel_trace.enabled()) {
1354
2905
  gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand,
1355
- calld, grpc_error_string(calld->error));
2906
+ calld, grpc_error_string(calld->cancel_error));
1356
2907
  }
1357
- // If we have a subchannel call, send the cancellation batch down.
1358
- // Otherwise, fail all pending batches.
1359
- if (calld->subchannel_call != nullptr) {
1360
- grpc_subchannel_call_process_op(calld->subchannel_call, batch);
2908
+ // If we do not have a subchannel call (i.e., a pick has not yet
2909
+ // been started), fail all pending batches. Otherwise, send the
2910
+ // cancellation down to the subchannel call.
2911
+ if (calld->subchannel_call == nullptr) {
2912
+ pending_batches_fail(elem, GRPC_ERROR_REF(calld->cancel_error),
2913
+ false /* yield_call_combiner */);
2914
+ // Note: This will release the call combiner.
2915
+ grpc_transport_stream_op_batch_finish_with_failure(
2916
+ batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
1361
2917
  } else {
1362
- waiting_for_pick_batches_add(calld, batch);
1363
- waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
2918
+ // Note: This will release the call combiner.
2919
+ grpc_subchannel_call_process_op(calld->subchannel_call, batch);
1364
2920
  }
1365
2921
  return;
1366
2922
  }
1367
- // Intercept on_complete for recv_trailing_metadata so that we can
1368
- // check retry throttle status.
1369
- if (batch->recv_trailing_metadata) {
1370
- GPR_ASSERT(batch->on_complete != nullptr);
1371
- calld->original_on_complete = batch->on_complete;
1372
- GRPC_CLOSURE_INIT(&calld->on_complete, on_complete, elem,
1373
- grpc_schedule_on_exec_ctx);
1374
- batch->on_complete = &calld->on_complete;
1375
- }
2923
+ // Add the batch to the pending list.
2924
+ pending_batches_add(elem, batch);
1376
2925
  // Check if we've already gotten a subchannel call.
1377
2926
  // Note that once we have completed the pick, we do not need to enter
1378
2927
  // the channel combiner, which is more efficient (especially for
@@ -1380,15 +2929,13 @@ static void cc_start_transport_stream_op_batch(
1380
2929
  if (calld->subchannel_call != nullptr) {
1381
2930
  if (grpc_client_channel_trace.enabled()) {
1382
2931
  gpr_log(GPR_DEBUG,
1383
- "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
2932
+ "chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
1384
2933
  calld, calld->subchannel_call);
1385
2934
  }
1386
- grpc_subchannel_call_process_op(calld->subchannel_call, batch);
2935
+ pending_batches_resume(elem);
1387
2936
  return;
1388
2937
  }
1389
2938
  // We do not yet have a subchannel call.
1390
- // Add the batch to the waiting-for-pick list.
1391
- waiting_for_pick_batches_add(calld, batch);
1392
2939
  // For batches containing a send_initial_metadata op, enter the channel
1393
2940
  // combiner to start a pick.
1394
2941
  if (batch->send_initial_metadata) {
@@ -1428,6 +2975,7 @@ static grpc_error* cc_init_call_elem(grpc_call_element* elem,
1428
2975
  grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
1429
2976
  calld->deadline);
1430
2977
  }
2978
+ calld->enable_retries = chand->enable_retries;
1431
2979
  return GRPC_ERROR_NONE;
1432
2980
  }
1433
2981
 
@@ -1441,10 +2989,9 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
1441
2989
  grpc_deadline_state_destroy(elem);
1442
2990
  }
1443
2991
  grpc_slice_unref_internal(calld->path);
1444
- if (calld->method_params != nullptr) {
1445
- method_parameters_unref(calld->method_params);
1446
- }
1447
- GRPC_ERROR_UNREF(calld->error);
2992
+ calld->retry_throttle_data.reset();
2993
+ calld->method_params.reset();
2994
+ GRPC_ERROR_UNREF(calld->cancel_error);
1448
2995
  if (calld->subchannel_call != nullptr) {
1449
2996
  grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
1450
2997
  then_schedule_closure);
@@ -1452,7 +2999,9 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
1452
2999
  GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
1453
3000
  "client_channel_destroy_call");
1454
3001
  }
1455
- GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
3002
+ for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
3003
+ GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
3004
+ }
1456
3005
  if (calld->pick.connected_subchannel != nullptr) {
1457
3006
  calld->pick.connected_subchannel.reset();
1458
3007
  }
@@ -1652,3 +3201,9 @@ void grpc_client_channel_watch_connectivity_state(
1652
3201
  grpc_combiner_scheduler(chand->combiner)),
1653
3202
  GRPC_ERROR_NONE);
1654
3203
  }
3204
+
3205
+ grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
3206
+ grpc_call_element* elem) {
3207
+ call_data* calld = static_cast<call_data*>(elem->call_data);
3208
+ return calld->subchannel_call;
3209
+ }