grpc 1.50.0 → 1.51.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (459) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +131 -42
  3. data/include/grpc/event_engine/event_engine.h +10 -3
  4. data/include/grpc/event_engine/slice_buffer.h +17 -0
  5. data/include/grpc/grpc.h +0 -10
  6. data/include/grpc/impl/codegen/grpc_types.h +1 -5
  7. data/include/grpc/impl/codegen/port_platform.h +0 -3
  8. data/src/core/ext/filters/channel_idle/channel_idle_filter.cc +19 -13
  9. data/src/core/ext/filters/channel_idle/channel_idle_filter.h +1 -0
  10. data/src/core/ext/filters/client_channel/backup_poller.cc +3 -3
  11. data/src/core/ext/filters/client_channel/channel_connectivity.cc +7 -5
  12. data/src/core/ext/filters/client_channel/client_channel.cc +120 -140
  13. data/src/core/ext/filters/client_channel/client_channel.h +3 -4
  14. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +0 -2
  15. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +1 -1
  16. data/src/core/ext/filters/client_channel/client_channel_service_config.cc +153 -0
  17. data/src/core/ext/filters/client_channel/{resolver_result_parsing.h → client_channel_service_config.h} +26 -23
  18. data/src/core/ext/filters/client_channel/connector.h +1 -1
  19. data/src/core/ext/filters/client_channel/dynamic_filters.cc +20 -47
  20. data/src/core/ext/filters/client_channel/dynamic_filters.h +7 -8
  21. data/src/core/ext/filters/client_channel/health/health_check_client.cc +3 -4
  22. data/src/core/ext/filters/client_channel/http_proxy.cc +0 -1
  23. data/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc +3 -4
  24. data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc +5 -0
  25. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +8 -7
  26. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +35 -44
  27. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc +0 -1
  28. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +1 -3
  29. data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc +3 -4
  30. data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.h +1 -1
  31. data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc +41 -29
  32. data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.h +2 -2
  33. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +9 -11
  34. data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +15 -12
  35. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +8 -10
  36. data/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc +26 -27
  37. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +7 -9
  38. data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +44 -26
  39. data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +17 -27
  40. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_attributes.cc +42 -0
  41. data/src/core/ext/filters/client_channel/lb_policy/xds/{xds.h → xds_attributes.h} +15 -17
  42. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +13 -7
  43. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +48 -47
  44. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +40 -126
  45. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_wrr_locality.cc +364 -0
  46. data/src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc +9 -9
  47. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +23 -32
  48. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +1 -2
  49. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +22 -23
  50. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +50 -52
  51. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +1 -1
  52. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +2 -4
  53. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +1 -3
  54. data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +34 -26
  55. data/src/core/ext/filters/client_channel/resolver/polling_resolver.cc +3 -4
  56. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +4 -7
  57. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +63 -46
  58. data/src/core/ext/filters/client_channel/retry_filter.cc +80 -102
  59. data/src/core/ext/filters/client_channel/retry_service_config.cc +192 -234
  60. data/src/core/ext/filters/client_channel/retry_service_config.h +20 -23
  61. data/src/core/ext/filters/client_channel/retry_throttle.cc +8 -8
  62. data/src/core/ext/filters/client_channel/retry_throttle.h +8 -7
  63. data/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc +2 -2
  64. data/src/core/ext/filters/client_channel/subchannel.cc +21 -25
  65. data/src/core/ext/filters/client_channel/subchannel.h +2 -2
  66. data/src/core/ext/filters/client_channel/subchannel_stream_client.cc +11 -12
  67. data/src/core/ext/filters/deadline/deadline_filter.cc +13 -14
  68. data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +1 -1
  69. data/src/core/ext/filters/fault_injection/fault_injection_filter.h +0 -4
  70. data/src/core/ext/filters/fault_injection/fault_injection_service_config_parser.cc +118 -0
  71. data/src/core/ext/filters/fault_injection/{service_config_parser.h → fault_injection_service_config_parser.h} +20 -12
  72. data/src/core/ext/filters/http/client/http_client_filter.cc +16 -16
  73. data/src/core/ext/filters/http/client_authority_filter.cc +1 -1
  74. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +13 -13
  75. data/src/core/ext/filters/http/message_compress/message_decompress_filter.cc +34 -34
  76. data/src/core/ext/filters/http/server/http_server_filter.cc +26 -25
  77. data/src/core/ext/filters/message_size/message_size_filter.cc +86 -117
  78. data/src/core/ext/filters/message_size/message_size_filter.h +22 -15
  79. data/src/core/ext/filters/rbac/rbac_filter.cc +12 -12
  80. data/src/core/ext/filters/rbac/rbac_service_config_parser.cc +728 -530
  81. data/src/core/ext/filters/rbac/rbac_service_config_parser.h +4 -3
  82. data/src/core/ext/filters/server_config_selector/server_config_selector.h +1 -1
  83. data/src/core/ext/filters/server_config_selector/server_config_selector_filter.cc +6 -7
  84. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +17 -21
  85. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +57 -72
  86. data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +5 -5
  87. data/src/core/ext/transport/chttp2/transport/bin_encoder.h +1 -1
  88. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +212 -253
  89. data/src/core/ext/transport/chttp2/transport/flow_control.cc +42 -11
  90. data/src/core/ext/transport/chttp2/transport/flow_control.h +4 -3
  91. data/src/core/ext/transport/chttp2/transport/frame_data.cc +16 -15
  92. data/src/core/ext/transport/chttp2/transport/frame_data.h +1 -1
  93. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +13 -13
  94. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +4 -3
  95. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +10 -7
  96. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +15 -17
  97. data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +5 -4
  98. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +5 -6
  99. data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +1 -1
  100. data/src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc +2 -1
  101. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +31 -39
  102. data/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc +7 -6
  103. data/src/core/ext/transport/chttp2/transport/internal.h +24 -8
  104. data/src/core/ext/transport/chttp2/transport/parsing.cc +51 -52
  105. data/src/core/ext/transport/chttp2/transport/varint.cc +2 -3
  106. data/src/core/ext/transport/chttp2/transport/varint.h +11 -8
  107. data/src/core/ext/transport/chttp2/transport/writing.cc +16 -16
  108. data/src/core/ext/transport/inproc/inproc_transport.cc +97 -115
  109. data/src/core/ext/xds/certificate_provider_store.cc +4 -4
  110. data/src/core/ext/xds/file_watcher_certificate_provider_factory.cc +4 -7
  111. data/src/core/ext/xds/xds_api.cc +15 -68
  112. data/src/core/ext/xds/xds_api.h +3 -7
  113. data/src/core/ext/xds/xds_bootstrap.h +0 -1
  114. data/src/core/ext/xds/xds_bootstrap_grpc.cc +3 -12
  115. data/src/core/ext/xds/xds_bootstrap_grpc.h +16 -1
  116. data/src/core/ext/xds/xds_certificate_provider.cc +22 -25
  117. data/src/core/ext/xds/xds_channel_stack_modifier.cc +0 -1
  118. data/src/core/ext/xds/xds_client.cc +122 -90
  119. data/src/core/ext/xds/xds_client.h +7 -2
  120. data/src/core/ext/xds/xds_client_grpc.cc +5 -24
  121. data/src/core/ext/xds/xds_cluster.cc +291 -183
  122. data/src/core/ext/xds/xds_cluster.h +11 -15
  123. data/src/core/ext/xds/xds_cluster_specifier_plugin.cc +32 -29
  124. data/src/core/ext/xds/xds_cluster_specifier_plugin.h +35 -16
  125. data/src/core/ext/xds/xds_common_types.cc +208 -141
  126. data/src/core/ext/xds/xds_common_types.h +19 -13
  127. data/src/core/ext/xds/xds_endpoint.cc +214 -129
  128. data/src/core/ext/xds/xds_endpoint.h +4 -7
  129. data/src/core/ext/xds/xds_http_fault_filter.cc +56 -43
  130. data/src/core/ext/xds/xds_http_fault_filter.h +13 -21
  131. data/src/core/ext/xds/xds_http_filters.cc +60 -73
  132. data/src/core/ext/xds/xds_http_filters.h +67 -19
  133. data/src/core/ext/xds/xds_http_rbac_filter.cc +152 -207
  134. data/src/core/ext/xds/xds_http_rbac_filter.h +12 -15
  135. data/src/core/ext/xds/xds_lb_policy_registry.cc +122 -169
  136. data/src/core/ext/xds/xds_lb_policy_registry.h +10 -11
  137. data/src/core/ext/xds/xds_listener.cc +459 -417
  138. data/src/core/ext/xds/xds_listener.h +43 -47
  139. data/src/core/ext/xds/xds_resource_type.h +3 -11
  140. data/src/core/ext/xds/xds_resource_type_impl.h +8 -13
  141. data/src/core/ext/xds/xds_route_config.cc +94 -80
  142. data/src/core/ext/xds/xds_route_config.h +10 -10
  143. data/src/core/ext/xds/xds_routing.cc +2 -1
  144. data/src/core/ext/xds/xds_routing.h +2 -0
  145. data/src/core/ext/xds/xds_server_config_fetcher.cc +109 -94
  146. data/src/core/ext/xds/xds_transport_grpc.cc +4 -5
  147. data/src/core/lib/address_utils/parse_address.cc +11 -10
  148. data/src/core/lib/channel/channel_args.h +16 -1
  149. data/src/core/lib/channel/channel_stack.cc +23 -20
  150. data/src/core/lib/channel/channel_stack.h +17 -4
  151. data/src/core/lib/channel/channel_stack_builder.cc +4 -7
  152. data/src/core/lib/channel/channel_stack_builder.h +14 -6
  153. data/src/core/lib/channel/channel_stack_builder_impl.cc +25 -7
  154. data/src/core/lib/channel/channel_stack_builder_impl.h +2 -0
  155. data/src/core/lib/channel/channel_trace.cc +4 -5
  156. data/src/core/lib/channel/channelz.cc +1 -1
  157. data/src/core/lib/channel/connected_channel.cc +695 -35
  158. data/src/core/lib/channel/connected_channel.h +0 -4
  159. data/src/core/lib/channel/promise_based_filter.cc +1004 -140
  160. data/src/core/lib/channel/promise_based_filter.h +364 -87
  161. data/src/core/lib/compression/message_compress.cc +5 -5
  162. data/src/core/lib/debug/event_log.cc +88 -0
  163. data/src/core/lib/debug/event_log.h +81 -0
  164. data/src/core/lib/debug/histogram_view.cc +69 -0
  165. data/src/core/lib/{slice/slice_refcount.cc → debug/histogram_view.h} +15 -13
  166. data/src/core/lib/debug/stats.cc +22 -119
  167. data/src/core/lib/debug/stats.h +29 -35
  168. data/src/core/lib/debug/stats_data.cc +224 -73
  169. data/src/core/lib/debug/stats_data.h +263 -122
  170. data/src/core/lib/event_engine/common_closures.h +71 -0
  171. data/src/core/lib/event_engine/default_event_engine.cc +38 -15
  172. data/src/core/lib/event_engine/default_event_engine.h +15 -3
  173. data/src/core/lib/event_engine/default_event_engine_factory.cc +2 -4
  174. data/src/core/lib/event_engine/memory_allocator.cc +1 -1
  175. data/src/core/lib/event_engine/poller.h +10 -4
  176. data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc +618 -0
  177. data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.h +129 -0
  178. data/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc +901 -0
  179. data/src/core/lib/event_engine/posix_engine/ev_poll_posix.h +97 -0
  180. data/src/core/lib/event_engine/posix_engine/event_poller.h +111 -0
  181. data/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc +74 -0
  182. data/src/core/lib/event_engine/{executor/threaded_executor.cc → posix_engine/event_poller_posix_default.h} +13 -16
  183. data/src/core/lib/event_engine/posix_engine/internal_errqueue.cc +77 -0
  184. data/src/core/lib/event_engine/posix_engine/internal_errqueue.h +179 -0
  185. data/src/core/lib/event_engine/posix_engine/lockfree_event.cc +267 -0
  186. data/src/core/lib/event_engine/posix_engine/lockfree_event.h +73 -0
  187. data/src/core/lib/event_engine/posix_engine/posix_endpoint.cc +1270 -0
  188. data/src/core/lib/event_engine/posix_engine/posix_endpoint.h +682 -0
  189. data/src/core/lib/event_engine/posix_engine/posix_engine.cc +453 -18
  190. data/src/core/lib/event_engine/posix_engine/posix_engine.h +148 -24
  191. data/src/core/lib/event_engine/posix_engine/posix_engine_closure.h +80 -0
  192. data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc +1081 -0
  193. data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.h +361 -0
  194. data/src/core/lib/event_engine/posix_engine/timer.h +9 -8
  195. data/src/core/lib/event_engine/posix_engine/timer_manager.cc +57 -194
  196. data/src/core/lib/event_engine/posix_engine/timer_manager.h +21 -49
  197. data/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc +301 -0
  198. data/src/core/lib/event_engine/posix_engine/traced_buffer_list.h +179 -0
  199. data/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.cc +126 -0
  200. data/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.h +45 -0
  201. data/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.cc +151 -0
  202. data/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.h +45 -0
  203. data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix.h +76 -0
  204. data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.cc +67 -0
  205. data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.h +37 -0
  206. data/src/core/lib/event_engine/slice.cc +7 -6
  207. data/src/core/lib/event_engine/slice_buffer.cc +2 -2
  208. data/src/core/lib/event_engine/thread_pool.cc +106 -25
  209. data/src/core/lib/event_engine/thread_pool.h +32 -9
  210. data/src/core/lib/event_engine/windows/win_socket.cc +7 -7
  211. data/src/core/lib/event_engine/windows/windows_engine.cc +18 -12
  212. data/src/core/lib/event_engine/windows/windows_engine.h +8 -4
  213. data/src/core/lib/experiments/config.cc +1 -1
  214. data/src/core/lib/experiments/experiments.cc +13 -2
  215. data/src/core/lib/experiments/experiments.h +8 -1
  216. data/src/core/lib/gpr/cpu_linux.cc +6 -2
  217. data/src/core/lib/gpr/log_linux.cc +3 -4
  218. data/src/core/lib/gpr/string.h +1 -1
  219. data/src/core/lib/gpr/tmpfile_posix.cc +3 -2
  220. data/src/core/lib/gprpp/load_file.cc +75 -0
  221. data/src/core/lib/gprpp/load_file.h +33 -0
  222. data/src/core/lib/gprpp/per_cpu.h +46 -0
  223. data/src/core/lib/gprpp/stat_posix.cc +5 -4
  224. data/src/core/lib/gprpp/stat_windows.cc +3 -2
  225. data/src/core/lib/gprpp/status_helper.h +1 -3
  226. data/src/core/lib/gprpp/strerror.cc +41 -0
  227. data/src/core/{ext/xds/xds_resource_type.cc → lib/gprpp/strerror.h} +9 -13
  228. data/src/core/lib/gprpp/thd_windows.cc +1 -2
  229. data/src/core/lib/gprpp/time.cc +3 -4
  230. data/src/core/lib/gprpp/time.h +13 -2
  231. data/src/core/lib/gprpp/validation_errors.h +18 -1
  232. data/src/core/lib/http/httpcli.cc +40 -44
  233. data/src/core/lib/http/httpcli.h +6 -5
  234. data/src/core/lib/http/httpcli_security_connector.cc +4 -6
  235. data/src/core/lib/http/parser.cc +54 -65
  236. data/src/core/lib/iomgr/buffer_list.cc +105 -116
  237. data/src/core/lib/iomgr/buffer_list.h +60 -44
  238. data/src/core/lib/iomgr/call_combiner.cc +11 -10
  239. data/src/core/lib/iomgr/call_combiner.h +3 -4
  240. data/src/core/lib/iomgr/cfstream_handle.cc +13 -16
  241. data/src/core/lib/iomgr/closure.h +49 -5
  242. data/src/core/lib/iomgr/combiner.cc +2 -2
  243. data/src/core/lib/iomgr/endpoint.h +1 -1
  244. data/src/core/lib/iomgr/endpoint_cfstream.cc +26 -25
  245. data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
  246. data/src/core/lib/iomgr/error.cc +27 -42
  247. data/src/core/lib/iomgr/error.h +22 -152
  248. data/src/core/lib/iomgr/ev_apple.cc +4 -4
  249. data/src/core/lib/iomgr/ev_epoll1_linux.cc +26 -25
  250. data/src/core/lib/iomgr/ev_poll_posix.cc +27 -31
  251. data/src/core/lib/iomgr/exec_ctx.cc +3 -4
  252. data/src/core/lib/iomgr/exec_ctx.h +2 -3
  253. data/src/core/lib/iomgr/executor.cc +1 -2
  254. data/src/core/lib/iomgr/internal_errqueue.cc +3 -1
  255. data/src/core/lib/iomgr/iocp_windows.cc +1 -0
  256. data/src/core/lib/iomgr/iomgr_posix.cc +2 -2
  257. data/src/core/lib/iomgr/iomgr_posix_cfstream.cc +2 -1
  258. data/src/core/lib/iomgr/iomgr_windows.cc +2 -1
  259. data/src/core/lib/iomgr/load_file.cc +5 -9
  260. data/src/core/lib/iomgr/lockfree_event.cc +10 -10
  261. data/src/core/lib/iomgr/pollset_windows.cc +4 -4
  262. data/src/core/lib/iomgr/python_util.h +2 -2
  263. data/src/core/lib/iomgr/resolve_address.cc +8 -3
  264. data/src/core/lib/iomgr/resolve_address.h +3 -4
  265. data/src/core/lib/iomgr/resolve_address_impl.h +1 -1
  266. data/src/core/lib/iomgr/resolve_address_posix.cc +14 -25
  267. data/src/core/lib/iomgr/resolve_address_posix.h +1 -2
  268. data/src/core/lib/iomgr/resolve_address_windows.cc +14 -17
  269. data/src/core/lib/iomgr/resolve_address_windows.h +1 -2
  270. data/src/core/lib/iomgr/socket_utils_common_posix.cc +30 -29
  271. data/src/core/lib/iomgr/socket_utils_posix.cc +1 -0
  272. data/src/core/lib/iomgr/socket_utils_posix.h +2 -2
  273. data/src/core/lib/iomgr/socket_windows.cc +2 -2
  274. data/src/core/lib/iomgr/tcp_client_cfstream.cc +6 -10
  275. data/src/core/lib/iomgr/tcp_client_posix.cc +31 -35
  276. data/src/core/lib/iomgr/tcp_client_windows.cc +8 -12
  277. data/src/core/lib/iomgr/tcp_posix.cc +92 -108
  278. data/src/core/lib/iomgr/tcp_server_posix.cc +34 -34
  279. data/src/core/lib/iomgr/tcp_server_utils_posix.h +1 -1
  280. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +18 -21
  281. data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +12 -13
  282. data/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc +1 -1
  283. data/src/core/lib/iomgr/tcp_server_windows.cc +26 -29
  284. data/src/core/lib/iomgr/tcp_windows.cc +27 -34
  285. data/src/core/lib/iomgr/timer.h +8 -8
  286. data/src/core/lib/iomgr/timer_generic.cc +9 -15
  287. data/src/core/lib/iomgr/unix_sockets_posix.cc +2 -4
  288. data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +4 -3
  289. data/src/core/lib/iomgr/wakeup_fd_pipe.cc +10 -8
  290. data/src/core/lib/json/json_channel_args.h +42 -0
  291. data/src/core/lib/json/json_object_loader.cc +7 -2
  292. data/src/core/lib/json/json_object_loader.h +22 -0
  293. data/src/core/lib/json/json_util.cc +5 -5
  294. data/src/core/lib/json/json_util.h +4 -4
  295. data/src/core/lib/load_balancing/lb_policy.cc +1 -1
  296. data/src/core/lib/load_balancing/lb_policy.h +4 -0
  297. data/src/core/lib/load_balancing/subchannel_interface.h +0 -7
  298. data/src/core/lib/matchers/matchers.cc +3 -4
  299. data/src/core/lib/promise/activity.cc +16 -2
  300. data/src/core/lib/promise/activity.h +38 -15
  301. data/src/core/lib/promise/arena_promise.h +80 -51
  302. data/src/core/lib/promise/context.h +13 -6
  303. data/src/core/lib/promise/detail/basic_seq.h +9 -28
  304. data/src/core/lib/promise/detail/promise_factory.h +58 -10
  305. data/src/core/lib/promise/detail/status.h +28 -0
  306. data/src/core/lib/promise/detail/switch.h +1455 -0
  307. data/src/core/lib/promise/exec_ctx_wakeup_scheduler.h +3 -1
  308. data/src/core/lib/promise/for_each.h +129 -0
  309. data/src/core/lib/promise/loop.h +7 -5
  310. data/src/core/lib/promise/map_pipe.h +87 -0
  311. data/src/core/lib/promise/pipe.cc +19 -0
  312. data/src/core/lib/promise/pipe.h +505 -0
  313. data/src/core/lib/promise/poll.h +13 -0
  314. data/src/core/lib/promise/seq.h +3 -5
  315. data/src/core/lib/promise/sleep.cc +5 -4
  316. data/src/core/lib/promise/sleep.h +1 -2
  317. data/src/core/lib/promise/try_concurrently.h +341 -0
  318. data/src/core/lib/promise/try_seq.h +10 -13
  319. data/src/core/lib/resolver/server_address.cc +1 -0
  320. data/src/core/lib/resolver/server_address.h +1 -3
  321. data/src/core/lib/resource_quota/api.cc +0 -1
  322. data/src/core/lib/resource_quota/arena.cc +19 -0
  323. data/src/core/lib/resource_quota/arena.h +89 -0
  324. data/src/core/lib/resource_quota/memory_quota.cc +1 -0
  325. data/src/core/lib/security/authorization/grpc_authorization_engine.cc +1 -3
  326. data/src/core/lib/security/authorization/grpc_server_authz_filter.cc +4 -2
  327. data/src/core/lib/security/authorization/matchers.cc +25 -22
  328. data/src/core/lib/security/authorization/rbac_policy.cc +2 -3
  329. data/src/core/lib/security/context/security_context.h +10 -0
  330. data/src/core/lib/security/credentials/channel_creds_registry_init.cc +3 -4
  331. data/src/core/lib/security/credentials/composite/composite_credentials.cc +1 -1
  332. data/src/core/lib/security/credentials/external/aws_external_account_credentials.cc +77 -55
  333. data/src/core/lib/security/credentials/external/aws_request_signer.cc +4 -3
  334. data/src/core/lib/security/credentials/external/external_account_credentials.cc +40 -51
  335. data/src/core/lib/security/credentials/external/file_external_account_credentials.cc +17 -21
  336. data/src/core/lib/security/credentials/external/url_external_account_credentials.cc +21 -25
  337. data/src/core/lib/security/credentials/fake/fake_credentials.cc +1 -0
  338. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +27 -24
  339. data/src/core/lib/security/credentials/iam/iam_credentials.cc +1 -0
  340. data/src/core/lib/security/credentials/jwt/json_token.cc +1 -2
  341. data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +1 -1
  342. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +5 -5
  343. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +24 -30
  344. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +6 -5
  345. data/src/core/lib/security/credentials/plugin/plugin_credentials.h +3 -3
  346. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc +19 -27
  347. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.h +4 -11
  348. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +29 -41
  349. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc +1 -1
  350. data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +6 -11
  351. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +8 -15
  352. data/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc +2 -2
  353. data/src/core/lib/security/security_connector/insecure/insecure_security_connector.h +2 -6
  354. data/src/core/lib/security/security_connector/load_system_roots_supported.cc +1 -4
  355. data/src/core/lib/security/security_connector/local/local_security_connector.cc +7 -11
  356. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +9 -14
  357. data/src/core/lib/security/security_connector/ssl_utils.cc +5 -7
  358. data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +21 -27
  359. data/src/core/lib/security/transport/client_auth_filter.cc +1 -1
  360. data/src/core/lib/security/transport/secure_endpoint.cc +26 -28
  361. data/src/core/lib/security/transport/security_handshaker.cc +53 -53
  362. data/src/core/lib/security/transport/server_auth_filter.cc +21 -21
  363. data/src/core/lib/security/transport/tsi_error.cc +6 -3
  364. data/src/core/lib/security/util/json_util.cc +4 -5
  365. data/src/core/lib/service_config/service_config.h +1 -1
  366. data/src/core/lib/service_config/service_config_impl.cc +111 -158
  367. data/src/core/lib/service_config/service_config_impl.h +14 -17
  368. data/src/core/lib/service_config/service_config_parser.cc +14 -31
  369. data/src/core/lib/service_config/service_config_parser.h +14 -10
  370. data/src/core/lib/slice/b64.cc +2 -2
  371. data/src/core/lib/slice/slice.cc +7 -1
  372. data/src/core/lib/slice/slice.h +19 -6
  373. data/src/core/lib/slice/slice_buffer.cc +13 -14
  374. data/src/core/lib/slice/slice_internal.h +13 -21
  375. data/src/core/lib/slice/slice_refcount.h +34 -19
  376. data/src/core/lib/surface/byte_buffer.cc +3 -4
  377. data/src/core/lib/surface/byte_buffer_reader.cc +4 -4
  378. data/src/core/lib/surface/call.cc +1366 -239
  379. data/src/core/lib/surface/call.h +44 -0
  380. data/src/core/lib/surface/call_details.cc +3 -3
  381. data/src/core/lib/surface/call_trace.cc +113 -0
  382. data/src/core/lib/surface/call_trace.h +30 -0
  383. data/src/core/lib/surface/channel.cc +44 -49
  384. data/src/core/lib/surface/channel.h +9 -1
  385. data/src/core/lib/surface/channel_ping.cc +1 -1
  386. data/src/core/lib/surface/channel_stack_type.cc +4 -0
  387. data/src/core/lib/surface/channel_stack_type.h +2 -0
  388. data/src/core/lib/surface/completion_queue.cc +38 -52
  389. data/src/core/lib/surface/init.cc +8 -39
  390. data/src/core/lib/surface/init_internally.h +8 -0
  391. data/src/core/lib/surface/lame_client.cc +10 -8
  392. data/src/core/lib/surface/server.cc +48 -70
  393. data/src/core/lib/surface/server.h +3 -4
  394. data/src/core/lib/surface/validate_metadata.cc +11 -12
  395. data/src/core/lib/surface/version.cc +2 -2
  396. data/src/core/lib/transport/connectivity_state.cc +2 -2
  397. data/src/core/lib/transport/error_utils.cc +34 -28
  398. data/src/core/lib/transport/error_utils.h +3 -3
  399. data/src/core/lib/transport/handshaker.cc +14 -14
  400. data/src/core/lib/transport/handshaker.h +1 -1
  401. data/src/core/lib/transport/handshaker_factory.h +26 -0
  402. data/src/core/lib/transport/handshaker_registry.cc +8 -2
  403. data/src/core/lib/transport/handshaker_registry.h +3 -4
  404. data/src/core/lib/transport/http_connect_handshaker.cc +23 -24
  405. data/src/core/lib/transport/metadata_batch.h +17 -1
  406. data/src/core/lib/transport/parsed_metadata.cc +2 -6
  407. data/src/core/lib/transport/tcp_connect_handshaker.cc +15 -20
  408. data/src/core/lib/transport/transport.cc +63 -17
  409. data/src/core/lib/transport/transport.h +64 -68
  410. data/src/core/lib/transport/transport_impl.h +1 -1
  411. data/src/core/lib/transport/transport_op_string.cc +7 -6
  412. data/src/core/plugin_registry/grpc_plugin_registry.cc +6 -10
  413. data/src/core/plugin_registry/grpc_plugin_registry_extra.cc +2 -14
  414. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +10 -10
  415. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +8 -8
  416. data/src/core/tsi/alts/handshaker/alts_tsi_utils.cc +2 -1
  417. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +7 -7
  418. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc +7 -6
  419. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc +1 -1
  420. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +5 -5
  421. data/src/core/tsi/fake_transport_security.cc +3 -3
  422. data/src/core/tsi/ssl/key_logging/ssl_key_logging.cc +7 -3
  423. data/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc +1 -1
  424. data/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc +6 -2
  425. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +0 -2
  426. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +0 -3
  427. data/src/ruby/lib/grpc/version.rb +1 -1
  428. data/src/ruby/spec/channel_spec.rb +0 -43
  429. data/src/ruby/spec/generic/active_call_spec.rb +12 -3
  430. data/third_party/abseil-cpp/absl/cleanup/cleanup.h +140 -0
  431. data/third_party/abseil-cpp/absl/cleanup/internal/cleanup.h +100 -0
  432. data/third_party/zlib/compress.c +3 -3
  433. data/third_party/zlib/crc32.c +21 -12
  434. data/third_party/zlib/deflate.c +112 -106
  435. data/third_party/zlib/deflate.h +2 -2
  436. data/third_party/zlib/gzlib.c +1 -1
  437. data/third_party/zlib/gzread.c +3 -5
  438. data/third_party/zlib/gzwrite.c +1 -1
  439. data/third_party/zlib/infback.c +10 -7
  440. data/third_party/zlib/inflate.c +5 -2
  441. data/third_party/zlib/inftrees.c +2 -2
  442. data/third_party/zlib/inftrees.h +1 -1
  443. data/third_party/zlib/trees.c +61 -62
  444. data/third_party/zlib/uncompr.c +2 -2
  445. data/third_party/zlib/zconf.h +16 -3
  446. data/third_party/zlib/zlib.h +10 -10
  447. data/third_party/zlib/zutil.c +9 -7
  448. data/third_party/zlib/zutil.h +1 -0
  449. metadata +55 -18
  450. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +0 -188
  451. data/src/core/ext/filters/fault_injection/service_config_parser.cc +0 -187
  452. data/src/core/lib/event_engine/executor/threaded_executor.h +0 -44
  453. data/src/core/lib/gpr/murmur_hash.cc +0 -82
  454. data/src/core/lib/gpr/murmur_hash.h +0 -29
  455. data/src/core/lib/gpr/tls.h +0 -156
  456. data/src/core/lib/promise/call_push_pull.h +0 -148
  457. data/src/core/lib/slice/slice_api.cc +0 -39
  458. data/src/core/lib/slice/slice_buffer_api.cc +0 -35
  459. data/src/core/lib/slice/slice_refcount_base.h +0 -60
@@ -16,23 +16,57 @@
16
16
 
17
17
  #include "src/core/lib/channel/promise_based_filter.h"
18
18
 
19
+ #include <algorithm>
19
20
  #include <memory>
20
21
  #include <string>
22
+ #include <vector>
21
23
 
22
24
  #include "absl/base/attributes.h"
23
- #include "absl/memory/memory.h"
25
+ #include "absl/functional/function_ref.h"
26
+ #include "absl/strings/str_cat.h"
27
+ #include "absl/strings/str_join.h"
24
28
  #include "absl/types/variant.h"
25
29
 
26
30
  #include <grpc/status.h>
27
31
 
28
32
  #include "src/core/lib/channel/channel_stack.h"
33
+ #include "src/core/lib/debug/trace.h"
29
34
  #include "src/core/lib/gprpp/manual_constructor.h"
35
+ #include "src/core/lib/gprpp/status_helper.h"
30
36
  #include "src/core/lib/iomgr/error.h"
31
37
  #include "src/core/lib/slice/slice.h"
32
38
 
39
+ extern grpc_core::TraceFlag grpc_trace_channel;
40
+
33
41
  namespace grpc_core {
34
42
  namespace promise_filter_detail {
35
43
 
44
+ namespace {
45
+ class FakeActivity final : public Activity {
46
+ public:
47
+ void Orphan() override {}
48
+ void ForceImmediateRepoll() override {}
49
+ Waker MakeOwningWaker() override { abort(); }
50
+ Waker MakeNonOwningWaker() override { abort(); }
51
+ void Run(absl::FunctionRef<void()> f) {
52
+ ScopedActivity activity(this);
53
+ f();
54
+ }
55
+ };
56
+
57
+ absl::Status StatusFromMetadata(const ServerMetadata& md) {
58
+ auto status_code = md.get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN);
59
+ if (status_code == GRPC_STATUS_OK) {
60
+ return absl::OkStatus();
61
+ }
62
+ const auto* message = md.get_pointer(GrpcMessageMetadata());
63
+ return grpc_error_set_int(
64
+ absl::Status(static_cast<absl::StatusCode>(status_code),
65
+ message == nullptr ? "" : message->as_string_view()),
66
+ StatusIntProperty::kRpcStatus, status_code);
67
+ }
68
+ } // namespace
69
+
36
70
  ///////////////////////////////////////////////////////////////////////////////
37
71
  // BaseCallData
38
72
 
@@ -43,16 +77,34 @@ BaseCallData::BaseCallData(grpc_call_element* elem,
43
77
  arena_(args->arena),
44
78
  call_combiner_(args->call_combiner),
45
79
  deadline_(args->deadline),
46
- context_(args->context) {
47
- if (flags & kFilterExaminesServerInitialMetadata) {
48
- server_initial_metadata_latch_ = arena_->New<Latch<ServerMetadata*>>();
49
- }
80
+ context_(args->context),
81
+ server_initial_metadata_latch_(
82
+ flags & kFilterExaminesServerInitialMetadata
83
+ ? arena_->New<Latch<ServerMetadata*>>()
84
+ : nullptr),
85
+ send_message_(flags & kFilterExaminesOutboundMessages
86
+ ? arena_->New<SendMessage>(this)
87
+ : nullptr),
88
+ receive_message_(flags & kFilterExaminesInboundMessages
89
+ ? arena_->New<ReceiveMessage>(this)
90
+ : nullptr),
91
+ event_engine_(
92
+ static_cast<ChannelFilter*>(elem->channel_data)
93
+ ->hack_until_per_channel_stack_event_engines_land_get_event_engine()) {
50
94
  }
51
95
 
52
96
  BaseCallData::~BaseCallData() {
53
- if (server_initial_metadata_latch_ != nullptr) {
54
- server_initial_metadata_latch_->~Latch();
55
- }
97
+ FakeActivity().Run([this] {
98
+ if (send_message_ != nullptr) {
99
+ send_message_->~SendMessage();
100
+ }
101
+ if (receive_message_ != nullptr) {
102
+ receive_message_->~ReceiveMessage();
103
+ }
104
+ if (server_initial_metadata_latch_ != nullptr) {
105
+ server_initial_metadata_latch_->~Latch();
106
+ }
107
+ });
56
108
  }
57
109
 
58
110
  // We don't form ActivityPtr's to this type, and consequently don't need
@@ -75,11 +127,17 @@ void BaseCallData::Wakeup() {
75
127
  self->Drop();
76
128
  };
77
129
  auto* closure = GRPC_CLOSURE_CREATE(wakeup, this, nullptr);
78
- GRPC_CALL_COMBINER_START(call_combiner_, closure, GRPC_ERROR_NONE, "wakeup");
130
+ GRPC_CALL_COMBINER_START(call_combiner_, closure, absl::OkStatus(), "wakeup");
79
131
  }
80
132
 
81
133
  void BaseCallData::Drop() { GRPC_CALL_STACK_UNREF(call_stack_, "waker"); }
82
134
 
135
+ std::string BaseCallData::LogTag() const {
136
+ return absl::StrCat(
137
+ ClientOrServerString(), "[", elem_->filter->name, ":0x",
138
+ absl::Hex(reinterpret_cast<uintptr_t>(elem_), absl::kZeroPad8), "]");
139
+ }
140
+
83
141
  ///////////////////////////////////////////////////////////////////////////////
84
142
  // BaseCallData::CapturedBatch
85
143
 
@@ -159,7 +217,6 @@ void BaseCallData::CapturedBatch::CancelWith(grpc_error_handle error,
159
217
  uintptr_t& refcnt = *RefCountField(batch);
160
218
  if (refcnt == 0) {
161
219
  // refcnt==0 ==> cancelled
162
- GRPC_ERROR_UNREF(error);
163
220
  return;
164
221
  }
165
222
  refcnt = 0;
@@ -188,23 +245,501 @@ BaseCallData::Flusher::~Flusher() {
188
245
  auto* batch = static_cast<grpc_transport_stream_op_batch*>(p);
189
246
  BaseCallData* call =
190
247
  static_cast<BaseCallData*>(batch->handler_private.extra_arg);
248
+ if (grpc_trace_channel.enabled()) {
249
+ gpr_log(GPR_DEBUG, "FLUSHER:forward batch via closure: %s",
250
+ grpc_transport_stream_op_batch_string(batch).c_str());
251
+ }
191
252
  grpc_call_next_op(call->elem(), batch);
192
253
  GRPC_CALL_STACK_UNREF(call->call_stack(), "flusher_batch");
193
254
  };
194
255
  for (size_t i = 1; i < release_.size(); i++) {
195
256
  auto* batch = release_[i];
257
+ if (grpc_trace_channel.enabled()) {
258
+ gpr_log(GPR_DEBUG, "FLUSHER:queue batch to forward in closure: %s",
259
+ grpc_transport_stream_op_batch_string(release_[i]).c_str());
260
+ }
196
261
  batch->handler_private.extra_arg = call_;
197
262
  GRPC_CLOSURE_INIT(&batch->handler_private.closure, call_next_op, batch,
198
263
  nullptr);
199
264
  GRPC_CALL_STACK_REF(call_->call_stack(), "flusher_batch");
200
- call_closures_.Add(&batch->handler_private.closure, GRPC_ERROR_NONE,
265
+ call_closures_.Add(&batch->handler_private.closure, absl::OkStatus(),
201
266
  "flusher_batch");
202
267
  }
203
268
  call_closures_.RunClosuresWithoutYielding(call_->call_combiner());
269
+ if (grpc_trace_channel.enabled()) {
270
+ gpr_log(GPR_DEBUG, "FLUSHER:forward batch: %s",
271
+ grpc_transport_stream_op_batch_string(release_[0]).c_str());
272
+ }
204
273
  grpc_call_next_op(call_->elem(), release_[0]);
205
274
  GRPC_CALL_STACK_UNREF(call_->call_stack(), "flusher");
206
275
  }
207
276
 
277
+ ///////////////////////////////////////////////////////////////////////////////
278
+ // BaseCallData::SendMessage
279
+
280
+ const char* BaseCallData::SendMessage::StateString(State state) {
281
+ switch (state) {
282
+ case State::kInitial:
283
+ return "INITIAL";
284
+ case State::kIdle:
285
+ return "IDLE";
286
+ case State::kGotBatchNoPipe:
287
+ return "GOT_BATCH_NO_PIPE";
288
+ case State::kGotBatch:
289
+ return "GOT_BATCH";
290
+ case State::kPushedToPipe:
291
+ return "PUSHED_TO_PIPE";
292
+ case State::kForwardedBatch:
293
+ return "FORWARDED_BATCH";
294
+ case State::kBatchCompleted:
295
+ return "BATCH_COMPLETED";
296
+ case State::kCancelled:
297
+ return "CANCELLED";
298
+ }
299
+ return "UNKNOWN";
300
+ }
301
+
302
+ void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
303
+ if (grpc_trace_channel.enabled()) {
304
+ gpr_log(GPR_DEBUG, "%s SendMessage.StartOp st=%s", base_->LogTag().c_str(),
305
+ StateString(state_));
306
+ }
307
+ switch (state_) {
308
+ case State::kInitial:
309
+ state_ = State::kGotBatchNoPipe;
310
+ break;
311
+ case State::kIdle:
312
+ state_ = State::kGotBatch;
313
+ break;
314
+ case State::kGotBatch:
315
+ case State::kGotBatchNoPipe:
316
+ case State::kForwardedBatch:
317
+ case State::kBatchCompleted:
318
+ case State::kPushedToPipe:
319
+ abort();
320
+ case State::kCancelled:
321
+ return;
322
+ }
323
+ batch_ = batch;
324
+ intercepted_on_complete_ = std::exchange(batch_->on_complete, &on_complete_);
325
+ }
326
+
327
+ void BaseCallData::SendMessage::GotPipe(PipeReceiver<MessageHandle>* receiver) {
328
+ if (grpc_trace_channel.enabled()) {
329
+ gpr_log(GPR_DEBUG, "%s SendMessage.GotPipe st=%s", base_->LogTag().c_str(),
330
+ StateString(state_));
331
+ }
332
+ GPR_ASSERT(receiver != nullptr);
333
+ switch (state_) {
334
+ case State::kInitial:
335
+ state_ = State::kIdle;
336
+ Activity::current()->ForceImmediateRepoll();
337
+ break;
338
+ case State::kGotBatchNoPipe:
339
+ state_ = State::kGotBatch;
340
+ Activity::current()->ForceImmediateRepoll();
341
+ break;
342
+ case State::kIdle:
343
+ case State::kGotBatch:
344
+ case State::kForwardedBatch:
345
+ case State::kBatchCompleted:
346
+ case State::kPushedToPipe:
347
+ abort();
348
+ case State::kCancelled:
349
+ return;
350
+ }
351
+ receiver_ = receiver;
352
+ }
353
+
354
+ bool BaseCallData::SendMessage::IsIdle() const {
355
+ switch (state_) {
356
+ case State::kInitial:
357
+ case State::kIdle:
358
+ case State::kForwardedBatch:
359
+ case State::kCancelled:
360
+ return true;
361
+ case State::kGotBatchNoPipe:
362
+ case State::kGotBatch:
363
+ case State::kBatchCompleted:
364
+ case State::kPushedToPipe:
365
+ return false;
366
+ }
367
+ GPR_UNREACHABLE_CODE(return false);
368
+ }
369
+
370
+ void BaseCallData::SendMessage::OnComplete(absl::Status status) {
371
+ Flusher flusher(base_);
372
+ if (grpc_trace_channel.enabled()) {
373
+ gpr_log(GPR_DEBUG, "%s SendMessage.OnComplete st=%s status=%s",
374
+ base_->LogTag().c_str(), StateString(state_),
375
+ status.ToString().c_str());
376
+ }
377
+ switch (state_) {
378
+ case State::kInitial:
379
+ case State::kIdle:
380
+ case State::kGotBatchNoPipe:
381
+ case State::kPushedToPipe:
382
+ case State::kGotBatch:
383
+ case State::kBatchCompleted:
384
+ abort();
385
+ break;
386
+ case State::kCancelled:
387
+ flusher.AddClosure(intercepted_on_complete_, status,
388
+ "forward after cancel");
389
+ break;
390
+ case State::kForwardedBatch:
391
+ completed_status_ = status;
392
+ state_ = State::kBatchCompleted;
393
+ base_->WakeInsideCombiner(&flusher);
394
+ break;
395
+ }
396
+ }
397
+
398
+ void BaseCallData::SendMessage::Done(const ServerMetadata& metadata) {
399
+ if (grpc_trace_channel.enabled()) {
400
+ gpr_log(GPR_DEBUG, "%s SendMessage.Done st=%s md=%s",
401
+ base_->LogTag().c_str(), StateString(state_),
402
+ metadata.DebugString().c_str());
403
+ }
404
+ switch (state_) {
405
+ case State::kCancelled:
406
+ break;
407
+ case State::kInitial:
408
+ case State::kIdle:
409
+ case State::kForwardedBatch:
410
+ state_ = State::kCancelled;
411
+ break;
412
+ case State::kGotBatchNoPipe:
413
+ case State::kGotBatch:
414
+ case State::kBatchCompleted:
415
+ abort();
416
+ break;
417
+ case State::kPushedToPipe:
418
+ push_.reset();
419
+ next_.reset();
420
+ state_ = State::kCancelled;
421
+ break;
422
+ }
423
+ }
424
+
425
+ void BaseCallData::SendMessage::WakeInsideCombiner(Flusher* flusher) {
426
+ if (grpc_trace_channel.enabled()) {
427
+ gpr_log(GPR_DEBUG, "%s SendMessage.WakeInsideCombiner st=%s%s",
428
+ base_->LogTag().c_str(), StateString(state_),
429
+ state_ == State::kBatchCompleted
430
+ ? absl::StrCat(" status=", completed_status_.ToString()).c_str()
431
+ : "");
432
+ }
433
+ switch (state_) {
434
+ case State::kInitial:
435
+ case State::kIdle:
436
+ case State::kGotBatchNoPipe:
437
+ case State::kForwardedBatch:
438
+ case State::kCancelled:
439
+ break;
440
+ case State::kGotBatch: {
441
+ state_ = State::kPushedToPipe;
442
+ auto message = GetContext<Arena>()->MakePooled<Message>();
443
+ message->payload()->Swap(batch_->payload->send_message.send_message);
444
+ message->mutable_flags() = batch_->payload->send_message.flags;
445
+ push_ = pipe_.sender.Push(std::move(message));
446
+ next_ = receiver_->Next();
447
+ }
448
+ ABSL_FALLTHROUGH_INTENDED;
449
+ case State::kPushedToPipe: {
450
+ GPR_ASSERT(push_.has_value());
451
+ auto r_push = (*push_)();
452
+ if (auto* p = absl::get_if<bool>(&r_push)) {
453
+ if (grpc_trace_channel.enabled()) {
454
+ gpr_log(GPR_DEBUG,
455
+ "%s SendMessage.WakeInsideCombiner push complete, result=%s",
456
+ base_->LogTag().c_str(), *p ? "true" : "false");
457
+ }
458
+ // We haven't pulled through yet, so this certainly shouldn't succeed.
459
+ GPR_ASSERT(!*p);
460
+ state_ = State::kCancelled;
461
+ batch_.CancelWith(absl::CancelledError(), flusher);
462
+ break;
463
+ }
464
+ GPR_ASSERT(next_.has_value());
465
+ auto r_next = (*next_)();
466
+ if (auto* p = absl::get_if<NextResult<MessageHandle>>(&r_next)) {
467
+ if (grpc_trace_channel.enabled()) {
468
+ gpr_log(GPR_DEBUG,
469
+ "%s SendMessage.WakeInsideCombiner next complete, "
470
+ "result.has_value=%s",
471
+ base_->LogTag().c_str(), p->has_value() ? "true" : "false");
472
+ }
473
+ GPR_ASSERT(p->has_value());
474
+ batch_->payload->send_message.send_message->Swap((**p)->payload());
475
+ batch_->payload->send_message.flags = (**p)->flags();
476
+ state_ = State::kForwardedBatch;
477
+ batch_.ResumeWith(flusher);
478
+ next_result_ = std::move(*p);
479
+ next_.reset();
480
+ }
481
+ } break;
482
+ case State::kBatchCompleted:
483
+ next_result_.reset();
484
+ // We've cleared out the NextResult on the pipe from promise to us, but
485
+ // there's also the pipe from us to the promise (so that the promise can
486
+ // intercept the sent messages). The push promise here is pushing into the
487
+ // latter pipe, and so we need to keep polling it until it's done, which
488
+ // depending on what happens inside the promise may take some time.
489
+ if (absl::holds_alternative<Pending>((*push_)())) break;
490
+ if (completed_status_.ok()) {
491
+ state_ = State::kIdle;
492
+ Activity::current()->ForceImmediateRepoll();
493
+ } else {
494
+ state_ = State::kCancelled;
495
+ }
496
+ push_.reset();
497
+ flusher->AddClosure(intercepted_on_complete_, completed_status_,
498
+ "batch_completed");
499
+ break;
500
+ }
501
+ }
502
+
503
+ ///////////////////////////////////////////////////////////////////////////////
504
+ // BaseCallData::ReceiveMessage
505
+
506
+ const char* BaseCallData::ReceiveMessage::StateString(State state) {
507
+ switch (state) {
508
+ case State::kInitial:
509
+ return "INITIAL";
510
+ case State::kIdle:
511
+ return "IDLE";
512
+ case State::kForwardedBatchNoPipe:
513
+ return "FORWARDED_BATCH_NO_PIPE";
514
+ case State::kForwardedBatch:
515
+ return "FORWARDED_BATCH";
516
+ case State::kBatchCompletedNoPipe:
517
+ return "BATCH_COMPLETED_NO_PIPE";
518
+ case State::kBatchCompleted:
519
+ return "BATCH_COMPLETED";
520
+ case State::kPushedToPipe:
521
+ return "PUSHED_TO_PIPE";
522
+ case State::kPulledFromPipe:
523
+ return "PULLED_FROM_PIPE";
524
+ case State::kCancelled:
525
+ return "CANCELLED";
526
+ case State::kCancelledWhilstForwarding:
527
+ return "CANCELLED_WHILST_FORWARDING";
528
+ case State::kBatchCompletedButCancelled:
529
+ return "BATCH_COMPLETED_BUT_CANCELLED";
530
+ }
531
+ return "UNKNOWN";
532
+ }
533
+
534
+ void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
535
+ if (grpc_trace_channel.enabled()) {
536
+ gpr_log(GPR_DEBUG, "%s ReceiveMessage.StartOp st=%s",
537
+ base_->LogTag().c_str(), StateString(state_));
538
+ }
539
+ switch (state_) {
540
+ case State::kInitial:
541
+ state_ = State::kForwardedBatchNoPipe;
542
+ break;
543
+ case State::kIdle:
544
+ state_ = State::kForwardedBatch;
545
+ break;
546
+ case State::kCancelledWhilstForwarding:
547
+ case State::kBatchCompletedButCancelled:
548
+ case State::kForwardedBatch:
549
+ case State::kForwardedBatchNoPipe:
550
+ case State::kBatchCompleted:
551
+ case State::kBatchCompletedNoPipe:
552
+ case State::kPushedToPipe:
553
+ case State::kPulledFromPipe:
554
+ abort();
555
+ case State::kCancelled:
556
+ return;
557
+ }
558
+ intercepted_slice_buffer_ = batch->payload->recv_message.recv_message;
559
+ intercepted_flags_ = batch->payload->recv_message.flags;
560
+ if (intercepted_flags_ == nullptr) {
561
+ intercepted_flags_ = &scratch_flags_;
562
+ *intercepted_flags_ = 0;
563
+ }
564
+ intercepted_on_complete_ = std::exchange(
565
+ batch->payload->recv_message.recv_message_ready, &on_complete_);
566
+ }
567
+
568
+ void BaseCallData::ReceiveMessage::GotPipe(PipeSender<MessageHandle>* sender) {
569
+ if (grpc_trace_channel.enabled()) {
570
+ gpr_log(GPR_DEBUG, "%s ReceiveMessage.GotPipe st=%s",
571
+ base_->LogTag().c_str(), StateString(state_));
572
+ }
573
+ switch (state_) {
574
+ case State::kInitial:
575
+ state_ = State::kIdle;
576
+ break;
577
+ case State::kForwardedBatchNoPipe:
578
+ state_ = State::kForwardedBatch;
579
+ break;
580
+ case State::kBatchCompletedNoPipe:
581
+ state_ = State::kBatchCompleted;
582
+ Activity::current()->ForceImmediateRepoll();
583
+ break;
584
+ case State::kIdle:
585
+ case State::kForwardedBatch:
586
+ case State::kBatchCompleted:
587
+ case State::kPushedToPipe:
588
+ case State::kPulledFromPipe:
589
+ case State::kCancelledWhilstForwarding:
590
+ case State::kBatchCompletedButCancelled:
591
+ abort();
592
+ case State::kCancelled:
593
+ return;
594
+ }
595
+ sender_ = sender;
596
+ }
597
+
598
+ void BaseCallData::ReceiveMessage::OnComplete(absl::Status status) {
599
+ if (grpc_trace_channel.enabled()) {
600
+ gpr_log(GPR_DEBUG, "%s ReceiveMessage.OnComplete st=%s status=%s",
601
+ base_->LogTag().c_str(), StateString(state_),
602
+ status.ToString().c_str());
603
+ }
604
+ switch (state_) {
605
+ case State::kInitial:
606
+ case State::kIdle:
607
+ case State::kPushedToPipe:
608
+ case State::kPulledFromPipe:
609
+ case State::kBatchCompleted:
610
+ case State::kBatchCompletedNoPipe:
611
+ case State::kCancelled:
612
+ case State::kBatchCompletedButCancelled:
613
+ abort();
614
+ case State::kForwardedBatchNoPipe:
615
+ state_ = State::kBatchCompletedNoPipe;
616
+ return;
617
+ case State::kForwardedBatch:
618
+ state_ = State::kBatchCompleted;
619
+ break;
620
+ case State::kCancelledWhilstForwarding:
621
+ state_ = State::kBatchCompletedButCancelled;
622
+ break;
623
+ }
624
+ completed_status_ = status;
625
+ Flusher flusher(base_);
626
+ ScopedContext ctx(base_);
627
+ base_->WakeInsideCombiner(&flusher);
628
+ }
629
+
630
+ void BaseCallData::ReceiveMessage::Done(const ServerMetadata& metadata,
631
+ Flusher* flusher) {
632
+ if (grpc_trace_channel.enabled()) {
633
+ gpr_log(GPR_DEBUG, "%s ReceiveMessage.Done st=%s md=%s",
634
+ base_->LogTag().c_str(), StateString(state_),
635
+ metadata.DebugString().c_str());
636
+ }
637
+ switch (state_) {
638
+ case State::kInitial:
639
+ case State::kIdle:
640
+ state_ = State::kCancelled;
641
+ break;
642
+ case State::kForwardedBatch:
643
+ case State::kForwardedBatchNoPipe:
644
+ state_ = State::kCancelledWhilstForwarding;
645
+ break;
646
+ case State::kPulledFromPipe:
647
+ case State::kPushedToPipe: {
648
+ auto status_code =
649
+ metadata.get(GrpcStatusMetadata()).value_or(GRPC_STATUS_OK);
650
+ GPR_ASSERT(status_code != GRPC_STATUS_OK);
651
+ push_.reset();
652
+ next_.reset();
653
+ flusher->AddClosure(intercepted_on_complete_,
654
+ StatusFromMetadata(metadata), "recv_message_done");
655
+ state_ = State::kCancelled;
656
+ } break;
657
+ case State::kBatchCompleted:
658
+ case State::kBatchCompletedNoPipe:
659
+ case State::kBatchCompletedButCancelled:
660
+ abort();
661
+ case State::kCancelledWhilstForwarding:
662
+ case State::kCancelled:
663
+ break;
664
+ }
665
+ }
666
+
667
+ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher) {
668
+ if (grpc_trace_channel.enabled()) {
669
+ gpr_log(GPR_DEBUG, "%s ReceiveMessage.WakeInsideCombiner st=%s",
670
+ base_->LogTag().c_str(), StateString(state_));
671
+ }
672
+ switch (state_) {
673
+ case State::kInitial:
674
+ case State::kIdle:
675
+ case State::kForwardedBatchNoPipe:
676
+ case State::kForwardedBatch:
677
+ case State::kCancelled:
678
+ case State::kCancelledWhilstForwarding:
679
+ case State::kBatchCompletedNoPipe:
680
+ break;
681
+ case State::kBatchCompletedButCancelled:
682
+ sender_->Close();
683
+ state_ = State::kCancelled;
684
+ flusher->AddClosure(std::exchange(intercepted_on_complete_, nullptr),
685
+ completed_status_, "recv_message");
686
+ break;
687
+ case State::kBatchCompleted:
688
+ if (completed_status_.ok() && intercepted_slice_buffer_->has_value()) {
689
+ state_ = State::kPushedToPipe;
690
+ auto message = GetContext<Arena>()->MakePooled<Message>();
691
+ message->payload()->Swap(&**intercepted_slice_buffer_);
692
+ message->mutable_flags() = *intercepted_flags_;
693
+ push_ = sender_->Push(std::move(message));
694
+ next_ = pipe_.receiver.Next();
695
+ } else {
696
+ sender_->Close();
697
+ state_ = State::kCancelled;
698
+ flusher->AddClosure(std::exchange(intercepted_on_complete_, nullptr),
699
+ completed_status_, "recv_message");
700
+ break;
701
+ }
702
+ GPR_ASSERT(state_ == State::kPushedToPipe);
703
+ ABSL_FALLTHROUGH_INTENDED;
704
+ case State::kPushedToPipe: {
705
+ GPR_ASSERT(push_.has_value());
706
+ auto r_push = (*push_)();
707
+ if (auto* p = absl::get_if<bool>(&r_push)) {
708
+ // We haven't pulled through yet, so this certainly shouldn't succeed.
709
+ GPR_ASSERT(!*p);
710
+ state_ = State::kCancelled;
711
+ break;
712
+ }
713
+ GPR_ASSERT(next_.has_value());
714
+ auto r_next = (*next_)();
715
+ if (auto* p = absl::get_if<NextResult<MessageHandle>>(&r_next)) {
716
+ next_.reset();
717
+ if (p->has_value()) {
718
+ *intercepted_slice_buffer_ = std::move(*(**p)->payload());
719
+ *intercepted_flags_ = (**p)->flags();
720
+ state_ = State::kPulledFromPipe;
721
+ } else {
722
+ *intercepted_slice_buffer_ = absl::nullopt;
723
+ *intercepted_flags_ = 0;
724
+ state_ = State::kCancelled;
725
+ }
726
+ }
727
+ }
728
+ if (state_ != State::kPulledFromPipe) break;
729
+ ABSL_FALLTHROUGH_INTENDED;
730
+ case State::kPulledFromPipe: {
731
+ GPR_ASSERT(push_.has_value());
732
+ if (!absl::holds_alternative<Pending>((*push_)())) {
733
+ state_ = State::kIdle;
734
+ push_.reset();
735
+ flusher->AddClosure(std::exchange(intercepted_on_complete_, nullptr),
736
+ absl::OkStatus(), "recv_message");
737
+ }
738
+ break;
739
+ }
740
+ }
741
+ }
742
+
208
743
  ///////////////////////////////////////////////////////////////////////////////
209
744
  // ClientCallData
210
745
 
@@ -228,6 +763,8 @@ struct ClientCallData::RecvInitialMetadata final {
228
763
  kCompleteAndSetLatch,
229
764
  // Called the original callback
230
765
  kResponded,
766
+ // Called the original callback with an error: still need to set the latch
767
+ kRespondedButNeedToSetLatch,
231
768
  };
232
769
 
233
770
  State state = kInitial;
@@ -235,6 +772,32 @@ struct ClientCallData::RecvInitialMetadata final {
235
772
  grpc_closure on_ready;
236
773
  grpc_metadata_batch* metadata = nullptr;
237
774
  Latch<ServerMetadata*>* server_initial_metadata_publisher = nullptr;
775
+
776
+ static const char* StateString(State state) {
777
+ switch (state) {
778
+ case kInitial:
779
+ return "INITIAL";
780
+ case kGotLatch:
781
+ return "GOT_LATCH";
782
+ case kRespondedToTrailingMetadataPriorToHook:
783
+ return "RESPONDED_TO_TRAILING_METADATA_PRIOR_TO_HOOK";
784
+ case kHookedWaitingForLatch:
785
+ return "HOOKED_WAITING_FOR_LATCH";
786
+ case kHookedAndGotLatch:
787
+ return "HOOKED_AND_GOT_LATCH";
788
+ case kCompleteWaitingForLatch:
789
+ return "COMPLETE_WAITING_FOR_LATCH";
790
+ case kCompleteAndGotLatch:
791
+ return "COMPLETE_AND_GOT_LATCH";
792
+ case kCompleteAndSetLatch:
793
+ return "COMPLETE_AND_SET_LATCH";
794
+ case kResponded:
795
+ return "RESPONDED";
796
+ case kRespondedButNeedToSetLatch:
797
+ return "RESPONDED_BUT_NEED_TO_SET_LATCH";
798
+ }
799
+ return "UNKNOWN";
800
+ }
238
801
  };
239
802
 
240
803
  class ClientCallData::PollContext {
@@ -252,8 +815,18 @@ class ClientCallData::PollContext {
252
815
  PollContext& operator=(const PollContext&) = delete;
253
816
 
254
817
  void Run() {
818
+ if (grpc_trace_channel.enabled()) {
819
+ gpr_log(GPR_DEBUG, "%s ClientCallData.PollContext.Run %s",
820
+ self_->LogTag().c_str(), self_->DebugString().c_str());
821
+ }
255
822
  GPR_ASSERT(have_scoped_activity_);
256
823
  repoll_ = false;
824
+ if (self_->send_message() != nullptr) {
825
+ self_->send_message()->WakeInsideCombiner(flusher_);
826
+ }
827
+ if (self_->receive_message() != nullptr) {
828
+ self_->receive_message()->WakeInsideCombiner(flusher_);
829
+ }
257
830
  if (self_->server_initial_metadata_latch() != nullptr) {
258
831
  switch (self_->recv_initial_metadata_->state) {
259
832
  case RecvInitialMetadata::kInitial:
@@ -264,6 +837,12 @@ class ClientCallData::PollContext {
264
837
  case RecvInitialMetadata::kResponded:
265
838
  case RecvInitialMetadata::kRespondedToTrailingMetadataPriorToHook:
266
839
  break;
840
+ case RecvInitialMetadata::kRespondedButNeedToSetLatch:
841
+ self_->recv_initial_metadata_->server_initial_metadata_publisher->Set(
842
+ nullptr);
843
+ self_->recv_initial_metadata_->state =
844
+ RecvInitialMetadata::kResponded;
845
+ break;
267
846
  case RecvInitialMetadata::kCompleteAndGotLatch:
268
847
  self_->recv_initial_metadata_->state =
269
848
  RecvInitialMetadata::kCompleteAndSetLatch;
@@ -283,7 +862,7 @@ class ClientCallData::PollContext {
283
862
  flusher_->AddClosure(
284
863
  std::exchange(self_->recv_initial_metadata_->original_on_ready,
285
864
  nullptr),
286
- GRPC_ERROR_NONE,
865
+ absl::OkStatus(),
287
866
  "wake_inside_combiner:recv_initial_metadata_ready");
288
867
  }
289
868
  } break;
@@ -298,20 +877,30 @@ class ClientCallData::PollContext {
298
877
  case SendInitialState::kForwarded: {
299
878
  // Poll the promise once since we're waiting for it.
300
879
  Poll<ServerMetadataHandle> poll = self_->promise_();
880
+ if (grpc_trace_channel.enabled()) {
881
+ gpr_log(GPR_DEBUG, "%s ClientCallData.PollContext.Run: poll=%s",
882
+ self_->LogTag().c_str(),
883
+ PollToString(poll, [](const ServerMetadataHandle& h) {
884
+ return h->DebugString();
885
+ }).c_str());
886
+ }
301
887
  if (auto* r = absl::get_if<ServerMetadataHandle>(&poll)) {
302
- auto* md = UnwrapMetadata(std::move(*r));
303
- bool destroy_md = true;
888
+ auto md = std::move(*r);
889
+ if (self_->send_message() != nullptr) {
890
+ self_->send_message()->Done(*md);
891
+ }
892
+ if (self_->receive_message() != nullptr) {
893
+ self_->receive_message()->Done(*md, flusher_);
894
+ }
304
895
  if (self_->recv_trailing_state_ == RecvTrailingState::kComplete) {
305
- if (self_->recv_trailing_metadata_ != md) {
896
+ if (self_->recv_trailing_metadata_ != md.get()) {
306
897
  *self_->recv_trailing_metadata_ = std::move(*md);
307
- } else {
308
- destroy_md = false;
309
898
  }
310
899
  self_->recv_trailing_state_ = RecvTrailingState::kResponded;
311
900
  flusher_->AddClosure(
312
901
  std::exchange(self_->original_recv_trailing_metadata_ready_,
313
902
  nullptr),
314
- GRPC_ERROR_NONE, "wake_inside_combiner:recv_trailing_ready:1");
903
+ absl::OkStatus(), "wake_inside_combiner:recv_trailing_ready:1");
315
904
  if (self_->recv_initial_metadata_ != nullptr) {
316
905
  switch (self_->recv_initial_metadata_->state) {
317
906
  case RecvInitialMetadata::kInitial:
@@ -321,6 +910,7 @@ class ClientCallData::PollContext {
321
910
  break;
322
911
  case RecvInitialMetadata::
323
912
  kRespondedToTrailingMetadataPriorToHook:
913
+ case RecvInitialMetadata::kRespondedButNeedToSetLatch:
324
914
  abort(); // not reachable
325
915
  break;
326
916
  case RecvInitialMetadata::kHookedWaitingForLatch:
@@ -336,24 +926,13 @@ class ClientCallData::PollContext {
336
926
  std::exchange(
337
927
  self_->recv_initial_metadata_->original_on_ready,
338
928
  nullptr),
339
- GRPC_ERROR_CANCELLED,
929
+ absl::CancelledError(),
340
930
  "wake_inside_combiner:recv_initial_metadata_ready");
341
931
  }
342
932
  }
343
933
  } else {
344
- GPR_ASSERT(*md->get_pointer(GrpcStatusMetadata()) !=
345
- GRPC_STATUS_OK);
346
- grpc_error_handle error = grpc_error_set_int(
347
- GRPC_ERROR_CREATE_FROM_STATIC_STRING(
348
- "early return from promise based filter"),
349
- GRPC_ERROR_INT_GRPC_STATUS,
350
- *md->get_pointer(GrpcStatusMetadata()));
351
- if (auto* message = md->get_pointer(GrpcMessageMetadata())) {
352
- error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
353
- message->as_string_view());
354
- }
355
- GRPC_ERROR_UNREF(self_->cancelled_error_);
356
- self_->cancelled_error_ = GRPC_ERROR_REF(error);
934
+ self_->cancelled_error_ = StatusFromMetadata(*md);
935
+ GPR_ASSERT(!self_->cancelled_error_.ok());
357
936
  if (self_->recv_initial_metadata_ != nullptr) {
358
937
  switch (self_->recv_initial_metadata_->state) {
359
938
  case RecvInitialMetadata::kInitial:
@@ -367,6 +946,7 @@ class ClientCallData::PollContext {
367
946
  break;
368
947
  case RecvInitialMetadata::
369
948
  kRespondedToTrailingMetadataPriorToHook:
949
+ case RecvInitialMetadata::kRespondedButNeedToSetLatch:
370
950
  abort(); // not reachable
371
951
  break;
372
952
  case RecvInitialMetadata::kCompleteWaitingForLatch:
@@ -378,18 +958,19 @@ class ClientCallData::PollContext {
378
958
  std::exchange(
379
959
  self_->recv_initial_metadata_->original_on_ready,
380
960
  nullptr),
381
- GRPC_ERROR_REF(error),
961
+ self_->cancelled_error_,
382
962
  "wake_inside_combiner:recv_initial_metadata_ready");
383
963
  }
384
964
  }
385
965
  if (self_->send_initial_state_ == SendInitialState::kQueued) {
386
966
  self_->send_initial_state_ = SendInitialState::kCancelled;
387
- self_->send_initial_metadata_batch_.CancelWith(error, flusher_);
967
+ self_->send_initial_metadata_batch_.CancelWith(
968
+ self_->cancelled_error_, flusher_);
388
969
  } else {
389
970
  GPR_ASSERT(
390
971
  self_->recv_trailing_state_ == RecvTrailingState::kInitial ||
391
972
  self_->recv_trailing_state_ == RecvTrailingState::kForwarded);
392
- self_->call_combiner()->Cancel(GRPC_ERROR_REF(error));
973
+ self_->call_combiner()->Cancel(self_->cancelled_error_);
393
974
  CapturedBatch b(grpc_make_transport_stream_op(GRPC_CLOSURE_CREATE(
394
975
  [](void* p, grpc_error_handle) {
395
976
  GRPC_CALL_COMBINER_STOP(static_cast<CallCombiner*>(p),
@@ -397,17 +978,15 @@ class ClientCallData::PollContext {
397
978
  },
398
979
  self_->call_combiner(), nullptr)));
399
980
  b->cancel_stream = true;
400
- b->payload->cancel_stream.cancel_error = error;
981
+ b->payload->cancel_stream.cancel_error = self_->cancelled_error_;
401
982
  b.ResumeWith(flusher_);
402
983
  }
984
+ self_->cancelling_metadata_ = std::move(md);
403
985
  self_->recv_trailing_state_ = RecvTrailingState::kCancelled;
404
986
  }
405
- if (destroy_md) {
406
- md->~grpc_metadata_batch();
407
- }
987
+ self_->promise_ = ArenaPromise<ServerMetadataHandle>();
408
988
  scoped_activity_.Destroy();
409
989
  have_scoped_activity_ = false;
410
- self_->promise_ = ArenaPromise<ServerMetadataHandle>();
411
990
  }
412
991
  } break;
413
992
  case SendInitialState::kInitial:
@@ -420,7 +999,7 @@ class ClientCallData::PollContext {
420
999
  flusher_->AddClosure(
421
1000
  std::exchange(self_->original_recv_trailing_metadata_ready_,
422
1001
  nullptr),
423
- GRPC_ERROR_NONE, "wake_inside_combiner:recv_trailing_ready:2");
1002
+ absl::OkStatus(), "wake_inside_combiner:recv_trailing_ready:2");
424
1003
  }
425
1004
  break;
426
1005
  }
@@ -445,12 +1024,12 @@ class ClientCallData::PollContext {
445
1024
  };
446
1025
  // Unique ptr --> release to suppress clang-tidy warnings about allocating
447
1026
  // in a destructor.
448
- auto* p = absl::make_unique<NextPoll>().release();
1027
+ auto* p = std::make_unique<NextPoll>().release();
449
1028
  p->call_stack = self_->call_stack();
450
1029
  p->call_data = self_;
451
1030
  GRPC_CALL_STACK_REF(self_->call_stack(), "re-poll");
452
1031
  GRPC_CLOSURE_INIT(p, run, p, nullptr);
453
- flusher_->AddClosure(p, GRPC_ERROR_NONE, "re-poll");
1032
+ flusher_->AddClosure(p, absl::OkStatus(), "re-poll");
454
1033
  }
455
1034
  }
456
1035
 
@@ -482,7 +1061,6 @@ ClientCallData::ClientCallData(grpc_call_element* elem,
482
1061
 
483
1062
  ClientCallData::~ClientCallData() {
484
1063
  GPR_ASSERT(poll_ctx_ == nullptr);
485
- GRPC_ERROR_UNREF(cancelled_error_);
486
1064
  if (recv_initial_metadata_ != nullptr) {
487
1065
  recv_initial_metadata_->~RecvInitialMetadata();
488
1066
  }
@@ -494,6 +1072,58 @@ void ClientCallData::ForceImmediateRepoll() {
494
1072
  poll_ctx_->Repoll();
495
1073
  }
496
1074
 
1075
+ const char* ClientCallData::StateString(SendInitialState state) {
1076
+ switch (state) {
1077
+ case SendInitialState::kInitial:
1078
+ return "INITIAL";
1079
+ case SendInitialState::kQueued:
1080
+ return "QUEUED";
1081
+ case SendInitialState::kForwarded:
1082
+ return "FORWARDED";
1083
+ case SendInitialState::kCancelled:
1084
+ return "CANCELLED";
1085
+ }
1086
+ return "UNKNOWN";
1087
+ }
1088
+
1089
+ const char* ClientCallData::StateString(RecvTrailingState state) {
1090
+ switch (state) {
1091
+ case RecvTrailingState::kInitial:
1092
+ return "INITIAL";
1093
+ case RecvTrailingState::kQueued:
1094
+ return "QUEUED";
1095
+ case RecvTrailingState::kComplete:
1096
+ return "COMPLETE";
1097
+ case RecvTrailingState::kForwarded:
1098
+ return "FORWARDED";
1099
+ case RecvTrailingState::kCancelled:
1100
+ return "CANCELLED";
1101
+ case RecvTrailingState::kResponded:
1102
+ return "RESPONDED";
1103
+ }
1104
+ return "UNKNOWN";
1105
+ }
1106
+
1107
+ std::string ClientCallData::DebugString() const {
1108
+ std::vector<absl::string_view> captured;
1109
+ if (send_initial_metadata_batch_.is_captured()) {
1110
+ captured.push_back("send_initial_metadata");
1111
+ }
1112
+ if (send_message() != nullptr && send_message()->HaveCapturedBatch()) {
1113
+ captured.push_back("send_message");
1114
+ }
1115
+ return absl::StrCat(
1116
+ "has_promise=", promise_.has_value() ? "true" : "false",
1117
+ " sent_initial_state=", StateString(send_initial_state_),
1118
+ " recv_trailing_state=", StateString(recv_trailing_state_), " captured={",
1119
+ absl::StrJoin(captured, ","), "}",
1120
+ server_initial_metadata_latch() == nullptr
1121
+ ? ""
1122
+ : absl::StrCat(" recv_initial_metadata=",
1123
+ RecvInitialMetadata::StateString(
1124
+ recv_initial_metadata_->state)));
1125
+ }
1126
+
497
1127
  // Handle one grpc_transport_stream_op_batch
498
1128
  void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
499
1129
  // Fake out the activity based context.
@@ -501,6 +1131,11 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
501
1131
  CapturedBatch batch(b);
502
1132
  Flusher flusher(this);
503
1133
 
1134
+ if (grpc_trace_channel.enabled()) {
1135
+ gpr_log(GPR_DEBUG, "%s StartBatch %s", LogTag().c_str(),
1136
+ DebugString().c_str());
1137
+ }
1138
+
504
1139
  // If this is a cancel stream, cancel anything we have pending and propagate
505
1140
  // the cancellation.
506
1141
  if (batch->cancel_stream) {
@@ -508,9 +1143,10 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
508
1143
  !batch->send_trailing_metadata && !batch->send_message &&
509
1144
  !batch->recv_initial_metadata && !batch->recv_message &&
510
1145
  !batch->recv_trailing_metadata);
511
- Cancel(batch->payload->cancel_stream.cancel_error);
1146
+ PollContext poll_ctx(this, &flusher);
1147
+ Cancel(batch->payload->cancel_stream.cancel_error, &flusher);
1148
+ poll_ctx.Run();
512
1149
  if (is_last()) {
513
- GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error);
514
1150
  batch.CompleteWith(&flusher);
515
1151
  } else {
516
1152
  batch.ResumeWith(&flusher);
@@ -537,6 +1173,7 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
537
1173
  case RecvInitialMetadata::kCompleteAndGotLatch:
538
1174
  case RecvInitialMetadata::kCompleteAndSetLatch:
539
1175
  case RecvInitialMetadata::kResponded:
1176
+ case RecvInitialMetadata::kRespondedButNeedToSetLatch:
540
1177
  abort(); // unreachable
541
1178
  }
542
1179
  if (hook) {
@@ -554,13 +1191,23 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
554
1191
  }
555
1192
  }
556
1193
 
1194
+ bool wake = false;
1195
+ if (send_message() != nullptr && batch->send_message) {
1196
+ send_message()->StartOp(batch);
1197
+ wake = true;
1198
+ }
1199
+ if (receive_message() != nullptr && batch->recv_message) {
1200
+ receive_message()->StartOp(batch);
1201
+ wake = true;
1202
+ }
1203
+
557
1204
  // send_initial_metadata: seeing this triggers the start of the promise part
558
1205
  // of this filter.
559
1206
  if (batch->send_initial_metadata) {
560
1207
  // If we're already cancelled, just terminate the batch.
561
1208
  if (send_initial_state_ == SendInitialState::kCancelled ||
562
1209
  recv_trailing_state_ == RecvTrailingState::kCancelled) {
563
- batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
1210
+ batch.CancelWith(cancelled_error_, &flusher);
564
1211
  } else {
565
1212
  // Otherwise, we should not have seen a send_initial_metadata op yet.
566
1213
  GPR_ASSERT(send_initial_state_ == SendInitialState::kInitial);
@@ -575,35 +1222,43 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
575
1222
  send_initial_metadata_batch_ = batch;
576
1223
  // And kick start the promise.
577
1224
  StartPromise(&flusher);
1225
+ wake = false;
578
1226
  }
579
1227
  } else if (batch->recv_trailing_metadata) {
580
1228
  // recv_trailing_metadata *without* send_initial_metadata: hook it so we
581
1229
  // can respond to it, and push it down.
582
1230
  if (recv_trailing_state_ == RecvTrailingState::kCancelled) {
583
- batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
1231
+ batch.CancelWith(cancelled_error_, &flusher);
584
1232
  } else {
585
1233
  GPR_ASSERT(recv_trailing_state_ == RecvTrailingState::kInitial);
586
1234
  recv_trailing_state_ = RecvTrailingState::kForwarded;
587
1235
  HookRecvTrailingMetadata(batch);
588
1236
  }
589
- } else if (!GRPC_ERROR_IS_NONE(cancelled_error_)) {
590
- batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
1237
+ } else if (!cancelled_error_.ok()) {
1238
+ batch.CancelWith(cancelled_error_, &flusher);
1239
+ }
1240
+
1241
+ if (wake) {
1242
+ PollContext(this, &flusher).Run();
591
1243
  }
592
1244
 
593
1245
  if (batch.is_captured()) {
594
1246
  if (!is_last()) {
595
1247
  batch.ResumeWith(&flusher);
596
1248
  } else {
597
- batch.CancelWith(GRPC_ERROR_CANCELLED, &flusher);
1249
+ batch.CancelWith(absl::CancelledError(), &flusher);
598
1250
  }
599
1251
  }
600
1252
  }
601
1253
 
602
1254
  // Handle cancellation.
603
- void ClientCallData::Cancel(grpc_error_handle error) {
1255
+ void ClientCallData::Cancel(grpc_error_handle error, Flusher* flusher) {
1256
+ if (grpc_trace_channel.enabled()) {
1257
+ gpr_log(GPR_DEBUG, "%s Cancel error=%s", LogTag().c_str(),
1258
+ error.ToString().c_str());
1259
+ }
604
1260
  // Track the latest reason for cancellation.
605
- GRPC_ERROR_UNREF(cancelled_error_);
606
- cancelled_error_ = GRPC_ERROR_REF(error);
1261
+ cancelled_error_ = error;
607
1262
  // Stop running the promise.
608
1263
  promise_ = ArenaPromise<ServerMetadataHandle>();
609
1264
  // If we have an op queued, fail that op.
@@ -613,27 +1268,7 @@ void ClientCallData::Cancel(grpc_error_handle error) {
613
1268
  if (recv_trailing_state_ == RecvTrailingState::kQueued) {
614
1269
  recv_trailing_state_ = RecvTrailingState::kCancelled;
615
1270
  }
616
- struct FailBatch : public grpc_closure {
617
- CapturedBatch batch;
618
- ClientCallData* call;
619
- };
620
- auto fail = [](void* p, grpc_error_handle error) {
621
- auto* f = static_cast<FailBatch*>(p);
622
- {
623
- Flusher flusher(f->call);
624
- f->batch.CancelWith(GRPC_ERROR_REF(error), &flusher);
625
- GRPC_CALL_STACK_UNREF(f->call->call_stack(), "cancel pending batch");
626
- }
627
- delete f;
628
- };
629
- auto* b = new FailBatch();
630
- GRPC_CLOSURE_INIT(b, fail, b, nullptr);
631
- b->batch = std::move(send_initial_metadata_batch_);
632
- b->call = this;
633
- GRPC_CALL_STACK_REF(call_stack(), "cancel pending batch");
634
- GRPC_CALL_COMBINER_START(call_combiner(), b,
635
- GRPC_ERROR_REF(cancelled_error_),
636
- "cancel pending batch");
1271
+ send_initial_metadata_batch_.CancelWith(error, flusher);
637
1272
  } else {
638
1273
  send_initial_state_ = SendInitialState::kCancelled;
639
1274
  }
@@ -646,7 +1281,7 @@ void ClientCallData::Cancel(grpc_error_handle error) {
646
1281
  GRPC_CALL_COMBINER_START(
647
1282
  call_combiner(),
648
1283
  std::exchange(recv_initial_metadata_->original_on_ready, nullptr),
649
- GRPC_ERROR_REF(error), "propagate cancellation");
1284
+ error, "propagate cancellation");
650
1285
  break;
651
1286
  case RecvInitialMetadata::kInitial:
652
1287
  case RecvInitialMetadata::kGotLatch:
@@ -655,8 +1290,17 @@ void ClientCallData::Cancel(grpc_error_handle error) {
655
1290
  case RecvInitialMetadata::kHookedAndGotLatch:
656
1291
  case RecvInitialMetadata::kResponded:
657
1292
  break;
1293
+ case RecvInitialMetadata::kRespondedButNeedToSetLatch:
1294
+ abort();
1295
+ break;
658
1296
  }
659
1297
  }
1298
+ if (send_message() != nullptr) {
1299
+ send_message()->Done(*ServerMetadataFromStatus(error));
1300
+ }
1301
+ if (receive_message() != nullptr) {
1302
+ receive_message()->Done(*ServerMetadataFromStatus(error), flusher);
1303
+ }
660
1304
  }
661
1305
 
662
1306
  // Begin running the promise - which will ultimately take some initial
@@ -670,7 +1314,8 @@ void ClientCallData::StartPromise(Flusher* flusher) {
670
1314
  promise_ = filter->MakeCallPromise(
671
1315
  CallArgs{WrapMetadata(send_initial_metadata_batch_->payload
672
1316
  ->send_initial_metadata.send_initial_metadata),
673
- server_initial_metadata_latch()},
1317
+ server_initial_metadata_latch(), outgoing_messages_pipe(),
1318
+ incoming_messages_pipe()},
674
1319
  [this](CallArgs call_args) {
675
1320
  return MakeNextPromise(std::move(call_args));
676
1321
  });
@@ -678,36 +1323,60 @@ void ClientCallData::StartPromise(Flusher* flusher) {
678
1323
  }
679
1324
 
680
1325
  void ClientCallData::RecvInitialMetadataReady(grpc_error_handle error) {
681
- ScopedContext context(this);
682
- switch (recv_initial_metadata_->state) {
683
- case RecvInitialMetadata::kHookedWaitingForLatch:
684
- recv_initial_metadata_->state =
685
- RecvInitialMetadata::kCompleteWaitingForLatch;
686
- break;
687
- case RecvInitialMetadata::kHookedAndGotLatch:
688
- recv_initial_metadata_->state = RecvInitialMetadata::kCompleteAndGotLatch;
689
- break;
690
- case RecvInitialMetadata::kInitial:
691
- case RecvInitialMetadata::kGotLatch:
692
- case RecvInitialMetadata::kCompleteWaitingForLatch:
693
- case RecvInitialMetadata::kCompleteAndGotLatch:
694
- case RecvInitialMetadata::kCompleteAndSetLatch:
695
- case RecvInitialMetadata::kResponded:
696
- case RecvInitialMetadata::kRespondedToTrailingMetadataPriorToHook:
697
- abort(); // unreachable
1326
+ if (grpc_trace_channel.enabled()) {
1327
+ gpr_log(GPR_DEBUG, "%s ClientCallData.RecvInitialMetadataReady %s",
1328
+ LogTag().c_str(), DebugString().c_str());
698
1329
  }
1330
+ ScopedContext context(this);
699
1331
  Flusher flusher(this);
700
- if (!GRPC_ERROR_IS_NONE(error)) {
701
- recv_initial_metadata_->state = RecvInitialMetadata::kResponded;
1332
+ if (!error.ok()) {
1333
+ switch (recv_initial_metadata_->state) {
1334
+ case RecvInitialMetadata::kHookedWaitingForLatch:
1335
+ recv_initial_metadata_->state = RecvInitialMetadata::kResponded;
1336
+ break;
1337
+ case RecvInitialMetadata::kHookedAndGotLatch:
1338
+ recv_initial_metadata_->state =
1339
+ RecvInitialMetadata::kRespondedButNeedToSetLatch;
1340
+ break;
1341
+ case RecvInitialMetadata::kInitial:
1342
+ case RecvInitialMetadata::kGotLatch:
1343
+ case RecvInitialMetadata::kCompleteWaitingForLatch:
1344
+ case RecvInitialMetadata::kCompleteAndGotLatch:
1345
+ case RecvInitialMetadata::kCompleteAndSetLatch:
1346
+ case RecvInitialMetadata::kResponded:
1347
+ case RecvInitialMetadata::kRespondedToTrailingMetadataPriorToHook:
1348
+ case RecvInitialMetadata::kRespondedButNeedToSetLatch:
1349
+ abort(); // unreachable
1350
+ }
702
1351
  flusher.AddClosure(
703
1352
  std::exchange(recv_initial_metadata_->original_on_ready, nullptr),
704
- GRPC_ERROR_REF(error), "propagate cancellation");
1353
+ error, "propagate cancellation");
705
1354
  } else if (send_initial_state_ == SendInitialState::kCancelled ||
706
1355
  recv_trailing_state_ == RecvTrailingState::kResponded) {
707
1356
  recv_initial_metadata_->state = RecvInitialMetadata::kResponded;
708
1357
  flusher.AddClosure(
709
1358
  std::exchange(recv_initial_metadata_->original_on_ready, nullptr),
710
- GRPC_ERROR_REF(cancelled_error_), "propagate cancellation");
1359
+ cancelled_error_, "propagate cancellation");
1360
+ } else {
1361
+ switch (recv_initial_metadata_->state) {
1362
+ case RecvInitialMetadata::kHookedWaitingForLatch:
1363
+ recv_initial_metadata_->state =
1364
+ RecvInitialMetadata::kCompleteWaitingForLatch;
1365
+ break;
1366
+ case RecvInitialMetadata::kHookedAndGotLatch:
1367
+ recv_initial_metadata_->state =
1368
+ RecvInitialMetadata::kCompleteAndGotLatch;
1369
+ break;
1370
+ case RecvInitialMetadata::kInitial:
1371
+ case RecvInitialMetadata::kGotLatch:
1372
+ case RecvInitialMetadata::kCompleteWaitingForLatch:
1373
+ case RecvInitialMetadata::kCompleteAndGotLatch:
1374
+ case RecvInitialMetadata::kCompleteAndSetLatch:
1375
+ case RecvInitialMetadata::kResponded:
1376
+ case RecvInitialMetadata::kRespondedToTrailingMetadataPriorToHook:
1377
+ case RecvInitialMetadata::kRespondedButNeedToSetLatch:
1378
+ abort(); // unreachable
1379
+ }
711
1380
  }
712
1381
  WakeInsideCombiner(&flusher);
713
1382
  }
@@ -730,6 +1399,10 @@ void ClientCallData::HookRecvTrailingMetadata(CapturedBatch batch) {
730
1399
  // - return a wrapper around PollTrailingMetadata as the promise.
731
1400
  ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
732
1401
  CallArgs call_args) {
1402
+ if (grpc_trace_channel.enabled()) {
1403
+ gpr_log(GPR_DEBUG, "%s ClientCallData.MakeNextPromise %s", LogTag().c_str(),
1404
+ DebugString().c_str());
1405
+ }
733
1406
  GPR_ASSERT(poll_ctx_ != nullptr);
734
1407
  GPR_ASSERT(send_initial_state_ == SendInitialState::kQueued);
735
1408
  send_initial_metadata_batch_->payload->send_initial_metadata
@@ -762,11 +1435,22 @@ ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
762
1435
  case RecvInitialMetadata::kCompleteAndSetLatch:
763
1436
  case RecvInitialMetadata::kResponded:
764
1437
  case RecvInitialMetadata::kRespondedToTrailingMetadataPriorToHook:
1438
+ case RecvInitialMetadata::kRespondedButNeedToSetLatch:
765
1439
  abort(); // unreachable
766
1440
  }
767
1441
  } else {
768
1442
  GPR_ASSERT(call_args.server_initial_metadata == nullptr);
769
1443
  }
1444
+ if (send_message() != nullptr) {
1445
+ send_message()->GotPipe(call_args.outgoing_messages);
1446
+ } else {
1447
+ GPR_ASSERT(call_args.outgoing_messages == nullptr);
1448
+ }
1449
+ if (receive_message() != nullptr) {
1450
+ receive_message()->GotPipe(call_args.incoming_messages);
1451
+ } else {
1452
+ GPR_ASSERT(call_args.incoming_messages == nullptr);
1453
+ }
770
1454
  return ArenaPromise<ServerMetadataHandle>(
771
1455
  [this]() { return PollTrailingMetadata(); });
772
1456
  }
@@ -776,6 +1460,10 @@ ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
776
1460
  // All polls: await receiving the trailing metadata, then return it to the
777
1461
  // application.
778
1462
  Poll<ServerMetadataHandle> ClientCallData::PollTrailingMetadata() {
1463
+ if (grpc_trace_channel.enabled()) {
1464
+ gpr_log(GPR_DEBUG, "%s ClientCallData.PollTrailingMetadata %s",
1465
+ LogTag().c_str(), DebugString().c_str());
1466
+ }
779
1467
  GPR_ASSERT(poll_ctx_ != nullptr);
780
1468
  if (send_initial_state_ == SendInitialState::kQueued) {
781
1469
  // First poll: pass the send_initial_metadata op down the stack.
@@ -822,19 +1510,27 @@ void ClientCallData::RecvTrailingMetadataReadyCallback(
822
1510
 
823
1511
  void ClientCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
824
1512
  Flusher flusher(this);
1513
+ if (grpc_trace_channel.enabled()) {
1514
+ gpr_log(GPR_DEBUG,
1515
+ "%s ClientCallData.RecvTrailingMetadataReady error=%s md=%s",
1516
+ LogTag().c_str(), error.ToString().c_str(),
1517
+ recv_trailing_metadata_->DebugString().c_str());
1518
+ }
825
1519
  // If we were cancelled prior to receiving this callback, we should simply
826
1520
  // forward the callback up with the same error.
827
1521
  if (recv_trailing_state_ == RecvTrailingState::kCancelled) {
1522
+ if (cancelling_metadata_.get() != nullptr) {
1523
+ *recv_trailing_metadata_ = std::move(*cancelling_metadata_);
1524
+ }
828
1525
  if (grpc_closure* call_closure =
829
1526
  std::exchange(original_recv_trailing_metadata_ready_, nullptr)) {
830
- flusher.AddClosure(call_closure, GRPC_ERROR_REF(error),
831
- "propagate failure");
1527
+ flusher.AddClosure(call_closure, error, "propagate failure");
832
1528
  }
833
1529
  return;
834
1530
  }
835
1531
  // If there was an error, we'll put that into the trailing metadata and
836
1532
  // proceed as if there was not.
837
- if (!GRPC_ERROR_IS_NONE(error)) {
1533
+ if (!error.ok()) {
838
1534
  SetStatusFromError(recv_trailing_metadata_, error);
839
1535
  }
840
1536
  // Record that we've got the callback.
@@ -855,7 +1551,7 @@ void ClientCallData::SetStatusFromError(grpc_metadata_batch* metadata,
855
1551
  metadata->Set(GrpcStatusMetadata(), status_code);
856
1552
  metadata->Set(GrpcMessageMetadata(), Slice::FromCopiedString(status_details));
857
1553
  metadata->GetOrCreatePointer(GrpcStatusContext())
858
- ->emplace_back(grpc_error_std_string(error));
1554
+ ->emplace_back(StatusToString(error));
859
1555
  }
860
1556
 
861
1557
  // Wakeup and poll the promise if appropriate.
@@ -885,6 +1581,26 @@ struct ServerCallData::SendInitialMetadata {
885
1581
  State state = kInitial;
886
1582
  CapturedBatch batch;
887
1583
  Latch<ServerMetadata*>* server_initial_metadata_publisher = nullptr;
1584
+
1585
+ static const char* StateString(State state) {
1586
+ switch (state) {
1587
+ case kInitial:
1588
+ return "INITIAL";
1589
+ case kGotLatch:
1590
+ return "GOT_LATCH";
1591
+ case kQueuedWaitingForLatch:
1592
+ return "QUEUED_WAITING_FOR_LATCH";
1593
+ case kQueuedAndGotLatch:
1594
+ return "QUEUED_AND_GOT_LATCH";
1595
+ case kQueuedAndSetLatch:
1596
+ return "QUEUED_AND_SET_LATCH";
1597
+ case kForwarded:
1598
+ return "FORWARDED";
1599
+ case kCancelled:
1600
+ return "CANCELLED";
1601
+ }
1602
+ return "UNKNOWN";
1603
+ }
888
1604
  };
889
1605
 
890
1606
  class ServerCallData::PollContext {
@@ -917,12 +1633,12 @@ class ServerCallData::PollContext {
917
1633
  GRPC_CALL_STACK_UNREF(next_poll->call_stack, "re-poll");
918
1634
  delete next_poll;
919
1635
  };
920
- auto* p = absl::make_unique<NextPoll>().release();
1636
+ auto* p = std::make_unique<NextPoll>().release();
921
1637
  p->call_stack = self_->call_stack();
922
1638
  p->call_data = self_;
923
1639
  GRPC_CALL_STACK_REF(self_->call_stack(), "re-poll");
924
1640
  GRPC_CLOSURE_INIT(p, run, p, nullptr);
925
- flusher_->AddClosure(p, GRPC_ERROR_NONE, "re-poll");
1641
+ flusher_->AddClosure(p, absl::OkStatus(), "re-poll");
926
1642
  }
927
1643
  }
928
1644
 
@@ -937,6 +1653,36 @@ class ServerCallData::PollContext {
937
1653
  bool have_scoped_activity_;
938
1654
  };
939
1655
 
1656
+ const char* ServerCallData::StateString(RecvInitialState state) {
1657
+ switch (state) {
1658
+ case RecvInitialState::kInitial:
1659
+ return "INITIAL";
1660
+ case RecvInitialState::kForwarded:
1661
+ return "FORWARDED";
1662
+ case RecvInitialState::kComplete:
1663
+ return "COMPLETE";
1664
+ case RecvInitialState::kResponded:
1665
+ return "RESPONDED";
1666
+ }
1667
+ return "UNKNOWN";
1668
+ }
1669
+
1670
+ const char* ServerCallData::StateString(SendTrailingState state) {
1671
+ switch (state) {
1672
+ case SendTrailingState::kInitial:
1673
+ return "INITIAL";
1674
+ case SendTrailingState::kForwarded:
1675
+ return "FORWARDED";
1676
+ case SendTrailingState::kQueuedBehindSendMessage:
1677
+ return "QUEUED_BEHIND_SEND_MESSAGE";
1678
+ case SendTrailingState::kQueued:
1679
+ return "QUEUED";
1680
+ case SendTrailingState::kCancelled:
1681
+ return "CANCELLED";
1682
+ }
1683
+ return "UNKNOWN";
1684
+ }
1685
+
940
1686
  ServerCallData::ServerCallData(grpc_call_element* elem,
941
1687
  const grpc_call_element_args* args,
942
1688
  uint8_t flags)
@@ -947,11 +1693,17 @@ ServerCallData::ServerCallData(grpc_call_element* elem,
947
1693
  GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_,
948
1694
  RecvInitialMetadataReadyCallback, this,
949
1695
  grpc_schedule_on_exec_ctx);
1696
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
1697
+ RecvTrailingMetadataReadyCallback, this,
1698
+ grpc_schedule_on_exec_ctx);
950
1699
  }
951
1700
 
952
1701
  ServerCallData::~ServerCallData() {
1702
+ if (grpc_trace_channel.enabled()) {
1703
+ gpr_log(GPR_DEBUG, "%s ~ServerCallData %s", LogTag().c_str(),
1704
+ DebugString().c_str());
1705
+ }
953
1706
  GPR_ASSERT(poll_ctx_ == nullptr);
954
- GRPC_ERROR_UNREF(cancelled_error_);
955
1707
  }
956
1708
 
957
1709
  // Activity implementation.
@@ -968,6 +1720,11 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
968
1720
  Flusher flusher(this);
969
1721
  bool wake = false;
970
1722
 
1723
+ if (grpc_trace_channel.enabled()) {
1724
+ gpr_log(GPR_DEBUG, "%s StartBatch: %s", LogTag().c_str(),
1725
+ DebugString().c_str());
1726
+ }
1727
+
971
1728
  // If this is a cancel stream, cancel anything we have pending and
972
1729
  // propagate the cancellation.
973
1730
  if (batch->cancel_stream) {
@@ -975,10 +1732,9 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
975
1732
  !batch->send_trailing_metadata && !batch->send_message &&
976
1733
  !batch->recv_initial_metadata && !batch->recv_message &&
977
1734
  !batch->recv_trailing_metadata);
978
- Cancel(GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error),
979
- &flusher);
1735
+ PollContext poll_ctx(this, &flusher);
1736
+ Completed(batch->payload->cancel_stream.cancel_error, &flusher);
980
1737
  if (is_last()) {
981
- GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error);
982
1738
  batch.CompleteWith(&flusher);
983
1739
  } else {
984
1740
  batch.ResumeWith(&flusher);
@@ -1004,6 +1760,16 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
1004
1760
  recv_initial_state_ = RecvInitialState::kForwarded;
1005
1761
  }
1006
1762
 
1763
+ // Hook recv_trailing_metadata so we can see cancellation from the client.
1764
+ if (batch->recv_trailing_metadata) {
1765
+ recv_trailing_metadata_ =
1766
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata;
1767
+ original_recv_trailing_metadata_ready_ =
1768
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
1769
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
1770
+ &recv_trailing_metadata_ready_;
1771
+ }
1772
+
1007
1773
  // send_initial_metadata
1008
1774
  if (send_initial_metadata_ != nullptr && batch->send_initial_metadata) {
1009
1775
  switch (send_initial_metadata_->state) {
@@ -1015,7 +1781,9 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
1015
1781
  send_initial_metadata_->state = SendInitialMetadata::kQueuedAndGotLatch;
1016
1782
  break;
1017
1783
  case SendInitialMetadata::kCancelled:
1018
- batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
1784
+ batch.CancelWith(
1785
+ cancelled_error_.ok() ? absl::CancelledError() : cancelled_error_,
1786
+ &flusher);
1019
1787
  break;
1020
1788
  case SendInitialMetadata::kQueuedAndGotLatch:
1021
1789
  case SendInitialMetadata::kQueuedWaitingForLatch:
@@ -1027,20 +1795,36 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
1027
1795
  wake = true;
1028
1796
  }
1029
1797
 
1798
+ if (send_message() != nullptr && batch->send_message) {
1799
+ send_message()->StartOp(batch);
1800
+ wake = true;
1801
+ }
1802
+ if (receive_message() != nullptr && batch->recv_message) {
1803
+ receive_message()->StartOp(batch);
1804
+ wake = true;
1805
+ }
1806
+
1030
1807
  // send_trailing_metadata
1031
1808
  if (batch.is_captured() && batch->send_trailing_metadata) {
1032
1809
  switch (send_trailing_state_) {
1033
1810
  case SendTrailingState::kInitial:
1034
1811
  send_trailing_metadata_batch_ = batch;
1035
- send_trailing_state_ = SendTrailingState::kQueued;
1036
- wake = true;
1812
+ if (send_message() != nullptr && !send_message()->IsIdle()) {
1813
+ send_trailing_state_ = SendTrailingState::kQueuedBehindSendMessage;
1814
+ } else {
1815
+ send_trailing_state_ = SendTrailingState::kQueued;
1816
+ wake = true;
1817
+ }
1037
1818
  break;
1038
1819
  case SendTrailingState::kQueued:
1820
+ case SendTrailingState::kQueuedBehindSendMessage:
1039
1821
  case SendTrailingState::kForwarded:
1040
1822
  abort(); // unreachable
1041
1823
  break;
1042
1824
  case SendTrailingState::kCancelled:
1043
- batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
1825
+ batch.CancelWith(
1826
+ cancelled_error_.ok() ? absl::CancelledError() : cancelled_error_,
1827
+ &flusher);
1044
1828
  break;
1045
1829
  }
1046
1830
  }
@@ -1050,15 +1834,14 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
1050
1834
  }
1051
1835
 
1052
1836
  // Handle cancellation.
1053
- void ServerCallData::Cancel(grpc_error_handle error, Flusher* flusher) {
1837
+ void ServerCallData::Completed(grpc_error_handle error, Flusher* flusher) {
1054
1838
  // Track the latest reason for cancellation.
1055
- GRPC_ERROR_UNREF(cancelled_error_);
1056
1839
  cancelled_error_ = error;
1057
1840
  // Stop running the promise.
1058
1841
  promise_ = ArenaPromise<ServerMetadataHandle>();
1059
1842
  if (send_trailing_state_ == SendTrailingState::kQueued) {
1060
1843
  send_trailing_state_ = SendTrailingState::kCancelled;
1061
- send_trailing_metadata_batch_.CancelWith(GRPC_ERROR_REF(error), flusher);
1844
+ send_trailing_metadata_batch_.CancelWith(error, flusher);
1062
1845
  } else {
1063
1846
  send_trailing_state_ = SendTrailingState::kCancelled;
1064
1847
  }
@@ -1072,16 +1855,21 @@ void ServerCallData::Cancel(grpc_error_handle error, Flusher* flusher) {
1072
1855
  case SendInitialMetadata::kQueuedWaitingForLatch:
1073
1856
  case SendInitialMetadata::kQueuedAndGotLatch:
1074
1857
  case SendInitialMetadata::kQueuedAndSetLatch:
1075
- send_initial_metadata_->batch.CancelWith(GRPC_ERROR_REF(error),
1076
- flusher);
1858
+ send_initial_metadata_->batch.CancelWith(error, flusher);
1077
1859
  break;
1078
1860
  }
1079
1861
  send_initial_metadata_->state = SendInitialMetadata::kCancelled;
1080
1862
  }
1081
1863
  if (auto* closure =
1082
1864
  std::exchange(original_recv_initial_metadata_ready_, nullptr)) {
1083
- flusher->AddClosure(closure, GRPC_ERROR_REF(error),
1084
- "original_recv_initial_metadata");
1865
+ flusher->AddClosure(closure, error, "original_recv_initial_metadata");
1866
+ }
1867
+ ScopedContext ctx(this);
1868
+ if (send_message() != nullptr) {
1869
+ send_message()->Done(*ServerMetadataFromStatus(error));
1870
+ }
1871
+ if (receive_message() != nullptr) {
1872
+ receive_message()->Done(*ServerMetadataFromStatus(error), flusher);
1085
1873
  }
1086
1874
  }
1087
1875
 
@@ -1120,6 +1908,16 @@ ArenaPromise<ServerMetadataHandle> ServerCallData::MakeNextPromise(
1120
1908
  } else {
1121
1909
  GPR_ASSERT(call_args.server_initial_metadata == nullptr);
1122
1910
  }
1911
+ if (send_message() != nullptr) {
1912
+ send_message()->GotPipe(call_args.outgoing_messages);
1913
+ } else {
1914
+ GPR_ASSERT(call_args.outgoing_messages == nullptr);
1915
+ }
1916
+ if (receive_message() != nullptr) {
1917
+ receive_message()->GotPipe(call_args.incoming_messages);
1918
+ } else {
1919
+ GPR_ASSERT(call_args.incoming_messages == nullptr);
1920
+ }
1123
1921
  return ArenaPromise<ServerMetadataHandle>(
1124
1922
  [this]() { return PollTrailingMetadata(); });
1125
1923
  }
@@ -1130,6 +1928,7 @@ ArenaPromise<ServerMetadataHandle> ServerCallData::MakeNextPromise(
1130
1928
  Poll<ServerMetadataHandle> ServerCallData::PollTrailingMetadata() {
1131
1929
  switch (send_trailing_state_) {
1132
1930
  case SendTrailingState::kInitial:
1931
+ case SendTrailingState::kQueuedBehindSendMessage:
1133
1932
  return Pending{};
1134
1933
  case SendTrailingState::kQueued:
1135
1934
  return WrapMetadata(send_trailing_metadata_batch_->payload
@@ -1145,20 +1944,43 @@ Poll<ServerMetadataHandle> ServerCallData::PollTrailingMetadata() {
1145
1944
  GPR_UNREACHABLE_CODE(return Pending{});
1146
1945
  }
1147
1946
 
1947
+ void ServerCallData::RecvTrailingMetadataReadyCallback(
1948
+ void* arg, grpc_error_handle error) {
1949
+ static_cast<ServerCallData*>(arg)->RecvTrailingMetadataReady(
1950
+ std::move(error));
1951
+ }
1952
+
1953
+ void ServerCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
1954
+ if (grpc_trace_channel.enabled()) {
1955
+ gpr_log(GPR_DEBUG, "%s: RecvTrailingMetadataReady error=%s md=%s",
1956
+ LogTag().c_str(), error.ToString().c_str(),
1957
+ recv_trailing_metadata_->DebugString().c_str());
1958
+ }
1959
+ Flusher flusher(this);
1960
+ PollContext poll_ctx(this, &flusher);
1961
+ Completed(error, &flusher);
1962
+ flusher.AddClosure(original_recv_trailing_metadata_ready_, std::move(error),
1963
+ "continue recv trailing");
1964
+ }
1965
+
1148
1966
  void ServerCallData::RecvInitialMetadataReadyCallback(void* arg,
1149
1967
  grpc_error_handle error) {
1150
- static_cast<ServerCallData*>(arg)->RecvInitialMetadataReady(error);
1968
+ static_cast<ServerCallData*>(arg)->RecvInitialMetadataReady(std::move(error));
1151
1969
  }
1152
1970
 
1153
1971
  void ServerCallData::RecvInitialMetadataReady(grpc_error_handle error) {
1154
1972
  Flusher flusher(this);
1973
+ if (grpc_trace_channel.enabled()) {
1974
+ gpr_log(GPR_DEBUG, "%s: RecvInitialMetadataReady %s", LogTag().c_str(),
1975
+ error.ToString().c_str());
1976
+ }
1155
1977
  GPR_ASSERT(recv_initial_state_ == RecvInitialState::kForwarded);
1156
1978
  // If there was an error we just propagate that through
1157
- if (!GRPC_ERROR_IS_NONE(error)) {
1979
+ if (!error.ok()) {
1158
1980
  recv_initial_state_ = RecvInitialState::kResponded;
1159
1981
  flusher.AddClosure(
1160
- std::exchange(original_recv_initial_metadata_ready_, nullptr),
1161
- GRPC_ERROR_REF(error), "propagate error");
1982
+ std::exchange(original_recv_initial_metadata_ready_, nullptr), error,
1983
+ "propagate error");
1162
1984
  return;
1163
1985
  }
1164
1986
  // Record that we've got the callback.
@@ -1168,24 +1990,52 @@ void ServerCallData::RecvInitialMetadataReady(grpc_error_handle error) {
1168
1990
  ScopedContext context(this);
1169
1991
  // Construct the promise.
1170
1992
  ChannelFilter* filter = static_cast<ChannelFilter*>(elem()->channel_data);
1171
- promise_ =
1172
- filter->MakeCallPromise(CallArgs{WrapMetadata(recv_initial_metadata_),
1173
- server_initial_metadata_latch()},
1174
- [this](CallArgs call_args) {
1175
- return MakeNextPromise(std::move(call_args));
1176
- });
1993
+ FakeActivity().Run([this, filter] {
1994
+ promise_ = filter->MakeCallPromise(
1995
+ CallArgs{WrapMetadata(recv_initial_metadata_),
1996
+ server_initial_metadata_latch(), outgoing_messages_pipe(),
1997
+ incoming_messages_pipe()},
1998
+ [this](CallArgs call_args) {
1999
+ return MakeNextPromise(std::move(call_args));
2000
+ });
2001
+ });
1177
2002
  // Poll once.
1178
2003
  WakeInsideCombiner(&flusher);
1179
2004
  if (auto* closure =
1180
2005
  std::exchange(original_recv_initial_metadata_ready_, nullptr)) {
1181
- flusher.AddClosure(closure, GRPC_ERROR_NONE,
2006
+ flusher.AddClosure(closure, absl::OkStatus(),
1182
2007
  "original_recv_initial_metadata");
1183
2008
  }
1184
2009
  }
1185
2010
 
2011
+ std::string ServerCallData::DebugString() const {
2012
+ std::vector<absl::string_view> captured;
2013
+ if (send_message() != nullptr && send_message()->HaveCapturedBatch()) {
2014
+ captured.push_back("send_message");
2015
+ }
2016
+ if (send_trailing_metadata_batch_.is_captured()) {
2017
+ captured.push_back("send_trailing_metadata");
2018
+ }
2019
+ return absl::StrCat(
2020
+ "have_promise=", promise_.has_value() ? "true" : "false",
2021
+ " recv_initial_state=", StateString(recv_initial_state_),
2022
+ " send_trailing_state=", StateString(send_trailing_state_), " captured={",
2023
+ absl::StrJoin(captured, ","), "}",
2024
+ send_initial_metadata_ == nullptr
2025
+ ? ""
2026
+ : absl::StrCat(
2027
+ " send_initial_metadata=",
2028
+ SendInitialMetadata::StateString(send_initial_metadata_->state))
2029
+ .c_str());
2030
+ }
2031
+
1186
2032
  // Wakeup and poll the promise if appropriate.
1187
2033
  void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
1188
2034
  PollContext poll_ctx(this, flusher);
2035
+ if (grpc_trace_channel.enabled()) {
2036
+ gpr_log(GPR_DEBUG, "%s: WakeInsideCombiner %s", LogTag().c_str(),
2037
+ DebugString().c_str());
2038
+ }
1189
2039
  if (send_initial_metadata_ != nullptr &&
1190
2040
  send_initial_metadata_->state ==
1191
2041
  SendInitialMetadata::kQueuedAndGotLatch) {
@@ -1195,9 +2045,25 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
1195
2045
  .send_initial_metadata);
1196
2046
  }
1197
2047
  poll_ctx.ClearRepoll();
2048
+ if (send_message() != nullptr) {
2049
+ send_message()->WakeInsideCombiner(flusher);
2050
+ if (send_trailing_state_ == SendTrailingState::kQueuedBehindSendMessage &&
2051
+ send_message()->IsIdle()) {
2052
+ send_trailing_state_ = SendTrailingState::kQueued;
2053
+ }
2054
+ }
2055
+ if (receive_message() != nullptr) {
2056
+ receive_message()->WakeInsideCombiner(flusher);
2057
+ }
1198
2058
  if (promise_.has_value()) {
1199
2059
  Poll<ServerMetadataHandle> poll;
1200
2060
  poll = promise_();
2061
+ if (grpc_trace_channel.enabled()) {
2062
+ gpr_log(GPR_DEBUG, "%s: WakeInsideCombiner poll=%s", LogTag().c_str(),
2063
+ PollToString(poll, [](const ServerMetadataHandle& h) {
2064
+ return h->DebugString();
2065
+ }).c_str());
2066
+ }
1201
2067
  if (send_initial_metadata_ != nullptr &&
1202
2068
  send_initial_metadata_->state ==
1203
2069
  SendInitialMetadata::kQueuedAndSetLatch) {
@@ -1217,7 +2083,14 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
1217
2083
  promise_ = ArenaPromise<ServerMetadataHandle>();
1218
2084
  auto* md = UnwrapMetadata(std::move(*r));
1219
2085
  bool destroy_md = true;
2086
+ if (send_message() != nullptr) {
2087
+ send_message()->Done(*md);
2088
+ }
2089
+ if (receive_message() != nullptr) {
2090
+ receive_message()->Done(*md, flusher);
2091
+ }
1220
2092
  switch (send_trailing_state_) {
2093
+ case SendTrailingState::kQueuedBehindSendMessage:
1221
2094
  case SendTrailingState::kQueued: {
1222
2095
  if (send_trailing_metadata_batch_->payload->send_trailing_metadata
1223
2096
  .send_trailing_metadata != md) {
@@ -1234,16 +2107,7 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
1234
2107
  break;
1235
2108
  case SendTrailingState::kInitial: {
1236
2109
  GPR_ASSERT(*md->get_pointer(GrpcStatusMetadata()) != GRPC_STATUS_OK);
1237
- grpc_error_handle error =
1238
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1239
- "early return from promise based filter"),
1240
- GRPC_ERROR_INT_GRPC_STATUS,
1241
- *md->get_pointer(GrpcStatusMetadata()));
1242
- if (auto* message = md->get_pointer(GrpcMessageMetadata())) {
1243
- error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
1244
- message->as_string_view());
1245
- }
1246
- Cancel(error, flusher);
2110
+ Completed(StatusFromMetadata(*md), flusher);
1247
2111
  } break;
1248
2112
  case SendTrailingState::kCancelled:
1249
2113
  // Nothing to do.