grpc 1.76.0 → 1.78.0.pre1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (763) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +11 -5
  3. data/include/grpc/credentials.h +6 -1
  4. data/include/grpc/event_engine/memory_allocator.h +2 -0
  5. data/include/grpc/impl/channel_arg_names.h +5 -0
  6. data/include/grpc/support/metrics.h +7 -1
  7. data/src/core/call/call_filters.cc +1 -1
  8. data/src/core/call/call_filters.h +175 -1
  9. data/src/core/call/call_spine.cc +1 -1
  10. data/src/core/call/call_spine.h +27 -5
  11. data/src/core/call/channelz_context.h +30 -0
  12. data/src/core/call/client_call.cc +43 -5
  13. data/src/core/call/client_call.h +6 -3
  14. data/src/core/call/filter_fusion.h +4 -4
  15. data/src/core/call/interception_chain.h +7 -6
  16. data/src/core/call/metadata_batch.cc +49 -55
  17. data/src/core/call/metadata_batch.h +7 -6
  18. data/src/core/call/metadata_info.cc +1 -1
  19. data/src/core/call/parsed_metadata.h +2 -2
  20. data/src/core/call/request_buffer.cc +1 -1
  21. data/src/core/call/security_context.cc +1 -1
  22. data/src/core/call/security_context.h +1 -1
  23. data/src/core/call/server_call.cc +1 -1
  24. data/src/core/call/server_call.h +5 -3
  25. data/src/core/call/simple_slice_based_metadata.h +1 -1
  26. data/src/core/call/status_util.cc +1 -1
  27. data/src/core/channelz/channel_trace.cc +1 -1
  28. data/src/core/channelz/channel_trace.h +3 -3
  29. data/src/core/channelz/channelz.cc +13 -11
  30. data/src/core/channelz/channelz.h +41 -6
  31. data/src/core/channelz/channelz_registry.cc +2 -2
  32. data/src/core/channelz/channelz_registry.h +42 -2
  33. data/src/core/channelz/property_list.h +6 -4
  34. data/src/core/channelz/v2tov1/convert.cc +6 -6
  35. data/src/core/channelz/v2tov1/legacy_api.cc +4 -5
  36. data/src/core/channelz/v2tov1/property_list.cc +1 -1
  37. data/src/core/channelz/ztrace_collector.h +14 -2
  38. data/src/core/client_channel/backup_poller.cc +2 -2
  39. data/src/core/client_channel/buffered_call.cc +140 -0
  40. data/src/core/client_channel/buffered_call.h +104 -0
  41. data/src/core/client_channel/client_channel.cc +124 -71
  42. data/src/core/client_channel/client_channel.h +8 -11
  43. data/src/core/client_channel/client_channel_factory.h +1 -1
  44. data/src/core/client_channel/client_channel_filter.cc +393 -663
  45. data/src/core/client_channel/client_channel_filter.h +57 -150
  46. data/src/core/client_channel/client_channel_internal.h +5 -1
  47. data/src/core/client_channel/client_channel_service_config.cc +43 -3
  48. data/src/core/client_channel/client_channel_service_config.h +12 -1
  49. data/src/core/client_channel/config_selector.h +2 -2
  50. data/src/core/client_channel/connector.h +2 -0
  51. data/src/core/client_channel/dynamic_filters.cc +2 -2
  52. data/src/core/client_channel/global_subchannel_pool.h +1 -1
  53. data/src/core/client_channel/lb_metadata.h +1 -1
  54. data/src/core/client_channel/load_balanced_call_destination.cc +3 -5
  55. data/src/core/client_channel/load_balanced_call_destination.h +1 -1
  56. data/src/core/client_channel/retry_filter.cc +2 -2
  57. data/src/core/client_channel/retry_filter_legacy_call_data.cc +6 -7
  58. data/src/core/client_channel/retry_filter_legacy_call_data.h +6 -8
  59. data/src/core/client_channel/retry_service_config.cc +3 -3
  60. data/src/core/client_channel/retry_service_config.h +1 -1
  61. data/src/core/client_channel/subchannel.cc +106 -17
  62. data/src/core/client_channel/subchannel.h +24 -8
  63. data/src/core/client_channel/subchannel_pool_interface.cc +2 -2
  64. data/src/core/client_channel/subchannel_pool_interface.h +1 -1
  65. data/src/core/client_channel/subchannel_stream_client.cc +1 -1
  66. data/src/core/client_channel/subchannel_stream_client.h +3 -3
  67. data/src/core/config/config_vars.cc +8 -2
  68. data/src/core/config/config_vars.h +5 -0
  69. data/src/core/config/core_configuration.h +1 -1
  70. data/src/core/config/load_config.cc +1 -1
  71. data/src/core/credentials/call/call_credentials.h +2 -2
  72. data/src/core/credentials/call/call_creds_registry.h +1 -1
  73. data/src/core/credentials/call/call_creds_registry_init.cc +2 -2
  74. data/src/core/credentials/call/call_creds_util.cc +3 -3
  75. data/src/core/credentials/call/composite/composite_call_credentials.cc +2 -2
  76. data/src/core/credentials/call/composite/composite_call_credentials.h +1 -1
  77. data/src/core/credentials/call/external/aws_external_account_credentials.cc +6 -6
  78. data/src/core/credentials/call/external/aws_external_account_credentials.h +1 -1
  79. data/src/core/credentials/call/external/external_account_credentials.cc +12 -12
  80. data/src/core/credentials/call/external/external_account_credentials.h +1 -1
  81. data/src/core/credentials/call/external/file_external_account_credentials.cc +3 -3
  82. data/src/core/credentials/call/external/file_external_account_credentials.h +1 -1
  83. data/src/core/credentials/call/external/url_external_account_credentials.cc +6 -6
  84. data/src/core/credentials/call/external/url_external_account_credentials.h +1 -1
  85. data/src/core/credentials/call/gcp_service_account_identity/gcp_service_account_identity_credentials.cc +24 -71
  86. data/src/core/credentials/call/gcp_service_account_identity/gcp_service_account_identity_credentials.h +1 -8
  87. data/src/core/credentials/call/iam/iam_credentials.cc +2 -2
  88. data/src/core/credentials/call/iam/iam_credentials.h +1 -1
  89. data/src/core/credentials/call/json_util.cc +1 -1
  90. data/src/core/credentials/call/jwt/json_token.cc +4 -4
  91. data/src/core/credentials/call/jwt/jwt_credentials.cc +3 -3
  92. data/src/core/credentials/call/jwt/jwt_credentials.h +4 -4
  93. data/src/core/credentials/call/jwt/jwt_verifier.cc +5 -5
  94. data/src/core/credentials/call/jwt_token_file/jwt_token_file_call_credentials.cc +2 -2
  95. data/src/core/credentials/call/jwt_token_file/jwt_token_file_call_credentials.h +3 -3
  96. data/src/core/credentials/call/jwt_util.cc +3 -3
  97. data/src/core/credentials/call/jwt_util.h +1 -1
  98. data/src/core/credentials/call/oauth2/oauth2_credentials.cc +29 -60
  99. data/src/core/credentials/call/oauth2/oauth2_credentials.h +3 -9
  100. data/src/core/credentials/call/plugin/plugin_credentials.cc +4 -4
  101. data/src/core/credentials/call/plugin/plugin_credentials.h +2 -2
  102. data/src/core/credentials/call/token_fetcher/token_fetcher_credentials.cc +46 -0
  103. data/src/core/credentials/call/token_fetcher/token_fetcher_credentials.h +32 -3
  104. data/src/core/credentials/transport/alts/alts_credentials.cc +3 -3
  105. data/src/core/credentials/transport/alts/alts_security_connector.cc +3 -3
  106. data/src/core/credentials/transport/alts/check_gcp_environment_no_op.cc +1 -1
  107. data/src/core/credentials/transport/alts/grpc_alts_credentials_client_options.cc +1 -1
  108. data/src/core/credentials/transport/alts/grpc_alts_credentials_options.h +1 -1
  109. data/src/core/credentials/transport/channel_creds_registry.h +1 -1
  110. data/src/core/credentials/transport/channel_creds_registry_init.cc +1 -1
  111. data/src/core/credentials/transport/composite/composite_channel_credentials.cc +2 -2
  112. data/src/core/credentials/transport/composite/composite_channel_credentials.h +1 -1
  113. data/src/core/credentials/transport/fake/fake_credentials.cc +1 -1
  114. data/src/core/credentials/transport/fake/fake_credentials.h +1 -1
  115. data/src/core/credentials/transport/fake/fake_security_connector.cc +5 -5
  116. data/src/core/credentials/transport/google_default/credentials_generic.cc +2 -2
  117. data/src/core/credentials/transport/google_default/google_default_credentials.cc +5 -11
  118. data/src/core/credentials/transport/google_default/google_default_credentials.h +0 -2
  119. data/src/core/credentials/transport/insecure/insecure_security_connector.h +2 -2
  120. data/src/core/credentials/transport/local/local_security_connector.cc +5 -5
  121. data/src/core/credentials/transport/security_connector.cc +1 -1
  122. data/src/core/credentials/transport/security_connector.h +2 -2
  123. data/src/core/credentials/transport/ssl/ssl_credentials.cc +1 -1
  124. data/src/core/credentials/transport/ssl/ssl_security_connector.cc +5 -5
  125. data/src/core/credentials/transport/tls/certificate_provider_factory.h +1 -1
  126. data/src/core/credentials/transport/tls/certificate_provider_registry.cc +1 -1
  127. data/src/core/credentials/transport/tls/certificate_provider_registry.h +1 -1
  128. data/src/core/credentials/transport/tls/grpc_tls_certificate_distributor.cc +1 -1
  129. data/src/core/credentials/transport/tls/grpc_tls_certificate_distributor.h +2 -2
  130. data/src/core/credentials/transport/tls/grpc_tls_certificate_match.cc +1 -1
  131. data/src/core/credentials/transport/tls/grpc_tls_certificate_provider.cc +3 -3
  132. data/src/core/credentials/transport/tls/grpc_tls_certificate_provider.h +3 -3
  133. data/src/core/credentials/transport/tls/grpc_tls_certificate_verifier.cc +1 -1
  134. data/src/core/credentials/transport/tls/grpc_tls_certificate_verifier.h +2 -2
  135. data/src/core/credentials/transport/tls/grpc_tls_credentials_options.cc +1 -1
  136. data/src/core/credentials/transport/tls/grpc_tls_crl_provider.cc +5 -5
  137. data/src/core/credentials/transport/tls/grpc_tls_crl_provider.h +3 -3
  138. data/src/core/credentials/transport/tls/load_system_roots_supported.cc +1 -1
  139. data/src/core/credentials/transport/tls/spiffe_utils.cc +10 -8
  140. data/src/core/credentials/transport/tls/spiffe_utils.h +2 -2
  141. data/src/core/credentials/transport/tls/ssl_utils.cc +4 -4
  142. data/src/core/credentials/transport/tls/ssl_utils.h +2 -2
  143. data/src/core/credentials/transport/tls/tls_credentials.cc +1 -1
  144. data/src/core/credentials/transport/tls/tls_security_connector.cc +4 -4
  145. data/src/core/credentials/transport/tls/tls_security_connector.h +3 -3
  146. data/src/core/credentials/transport/transport_credentials.cc +1 -1
  147. data/src/core/credentials/transport/transport_credentials.h +2 -2
  148. data/src/core/credentials/transport/xds/xds_credentials.h +1 -1
  149. data/src/core/ext/filters/backend_metrics/backend_metric_filter.cc +2 -2
  150. data/src/core/ext/filters/backend_metrics/backend_metric_filter.h +4 -1
  151. data/src/core/ext/filters/channel_idle/legacy_channel_idle_filter.cc +5 -5
  152. data/src/core/ext/filters/channel_idle/legacy_channel_idle_filter.h +2 -2
  153. data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +7 -7
  154. data/src/core/ext/filters/fault_injection/fault_injection_filter.h +6 -3
  155. data/src/core/ext/filters/fault_injection/fault_injection_service_config_parser.h +1 -1
  156. data/src/core/ext/filters/gcp_authentication/gcp_authentication_filter.cc +1 -1
  157. data/src/core/ext/filters/gcp_authentication/gcp_authentication_filter.h +6 -3
  158. data/src/core/ext/filters/gcp_authentication/gcp_authentication_service_config_parser.h +1 -1
  159. data/src/core/ext/filters/http/client/http_client_filter.cc +6 -6
  160. data/src/core/ext/filters/http/client/http_client_filter.h +4 -1
  161. data/src/core/ext/filters/http/client_authority_filter.cc +2 -2
  162. data/src/core/ext/filters/http/client_authority_filter.h +4 -1
  163. data/src/core/ext/filters/http/http_filters_plugin.cc +1 -1
  164. data/src/core/ext/filters/http/message_compress/compression_filter.cc +3 -3
  165. data/src/core/ext/filters/http/message_compress/compression_filter.h +21 -2
  166. data/src/core/ext/filters/http/server/http_server_filter.cc +3 -3
  167. data/src/core/ext/filters/http/server/http_server_filter.h +4 -1
  168. data/src/core/ext/filters/message_size/message_size_filter.cc +2 -2
  169. data/src/core/ext/filters/message_size/message_size_filter.h +8 -2
  170. data/src/core/ext/filters/rbac/rbac_filter.cc +1 -1
  171. data/src/core/ext/filters/rbac/rbac_filter.h +4 -1
  172. data/src/core/ext/filters/rbac/rbac_service_config_parser.cc +3 -3
  173. data/src/core/ext/filters/rbac/rbac_service_config_parser.h +1 -1
  174. data/src/core/ext/filters/stateful_session/stateful_session_filter.cc +7 -7
  175. data/src/core/ext/filters/stateful_session/stateful_session_filter.h +13 -2
  176. data/src/core/ext/filters/stateful_session/stateful_session_service_config_parser.h +1 -1
  177. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +50 -37
  178. data/src/core/ext/transport/chttp2/client/chttp2_connector.h +2 -3
  179. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +15 -17
  180. data/src/core/ext/transport/chttp2/server/chttp2_server.h +1 -2
  181. data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +2 -2
  182. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +210 -60
  183. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +10 -2
  184. data/src/core/ext/transport/chttp2/transport/flow_control.cc +39 -4
  185. data/src/core/ext/transport/chttp2/transport/flow_control.h +213 -78
  186. data/src/core/ext/transport/chttp2/transport/flow_control_manager.h +46 -1
  187. data/src/core/ext/transport/chttp2/transport/frame.cc +147 -21
  188. data/src/core/ext/transport/chttp2/transport/frame.h +44 -10
  189. data/src/core/ext/transport/chttp2/transport/frame_data.cc +2 -2
  190. data/src/core/ext/transport/chttp2/transport/frame_data.h +1 -1
  191. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +2 -2
  192. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +4 -4
  193. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +5 -5
  194. data/src/core/ext/transport/chttp2/transport/frame_security.cc +1 -1
  195. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +7 -15
  196. data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +3 -3
  197. data/src/core/ext/transport/chttp2/transport/goaway.cc +129 -0
  198. data/src/core/ext/transport/chttp2/transport/goaway.h +350 -0
  199. data/src/core/ext/transport/chttp2/transport/header_assembler.h +175 -51
  200. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +1 -1
  201. data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +4 -4
  202. data/src/core/ext/transport/chttp2/transport/hpack_parse_result.cc +1 -1
  203. data/src/core/ext/transport/chttp2/transport/hpack_parse_result.h +3 -3
  204. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +7 -7
  205. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +4 -4
  206. data/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc +4 -4
  207. data/src/core/ext/transport/chttp2/transport/hpack_parser_table.h +1 -1
  208. data/src/core/ext/transport/chttp2/transport/http2_client_transport.cc +1177 -511
  209. data/src/core/ext/transport/chttp2/transport/http2_client_transport.h +264 -174
  210. data/src/core/ext/transport/chttp2/transport/http2_settings.cc +1 -1
  211. data/src/core/ext/transport/chttp2/transport/http2_settings.h +6 -4
  212. data/src/core/ext/transport/chttp2/transport/http2_settings_manager.cc +4 -6
  213. data/src/core/ext/transport/chttp2/transport/http2_settings_manager.h +6 -16
  214. data/src/core/ext/transport/chttp2/transport/http2_settings_promises.h +320 -82
  215. data/src/core/ext/transport/chttp2/transport/http2_status.h +7 -1
  216. data/src/core/ext/transport/chttp2/transport/http2_transport.cc +286 -7
  217. data/src/core/ext/transport/chttp2/transport/http2_transport.h +187 -19
  218. data/src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h +57 -1
  219. data/src/core/ext/transport/chttp2/transport/incoming_metadata_tracker.h +128 -0
  220. data/src/core/ext/transport/chttp2/transport/internal.h +25 -5
  221. data/src/core/ext/transport/chttp2/transport/keepalive.cc +12 -5
  222. data/src/core/ext/transport/chttp2/transport/keepalive.h +14 -10
  223. data/src/core/ext/transport/chttp2/transport/message_assembler.h +24 -15
  224. data/src/core/ext/transport/chttp2/transport/parsing.cc +8 -8
  225. data/src/core/ext/transport/chttp2/transport/ping_callbacks.cc +1 -1
  226. data/src/core/ext/transport/chttp2/transport/ping_callbacks.h +3 -3
  227. data/src/core/ext/transport/chttp2/transport/ping_promise.cc +11 -5
  228. data/src/core/ext/transport/chttp2/transport/ping_promise.h +7 -3
  229. data/src/core/ext/transport/chttp2/transport/ping_rate_policy.cc +1 -1
  230. data/src/core/ext/transport/chttp2/transport/security_frame.cc +31 -0
  231. data/src/core/ext/transport/chttp2/transport/security_frame.h +32 -0
  232. data/src/core/ext/transport/chttp2/transport/stream.h +139 -59
  233. data/src/core/ext/transport/chttp2/transport/stream_data_queue.h +225 -98
  234. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +1 -1
  235. data/src/core/ext/transport/chttp2/transport/transport_common.cc +1 -1
  236. data/src/core/ext/transport/chttp2/transport/transport_common.h +5 -0
  237. data/src/core/ext/transport/chttp2/transport/writable_streams.h +27 -11
  238. data/src/core/ext/transport/chttp2/transport/writing.cc +3 -3
  239. data/src/core/ext/transport/inproc/inproc_transport.cc +8 -2
  240. data/src/core/ext/transport/inproc/legacy_inproc_transport.cc +8 -5
  241. data/src/core/filter/auth/auth_filters.h +7 -1
  242. data/src/core/filter/auth/client_auth_filter.cc +2 -2
  243. data/src/core/filter/auth/server_auth_filter.cc +3 -3
  244. data/src/core/filter/blackboard.h +2 -2
  245. data/src/core/filter/filter_args.h +40 -2
  246. data/src/core/handshaker/endpoint_info/endpoint_info_handshaker.cc +2 -2
  247. data/src/core/handshaker/handshaker.cc +5 -5
  248. data/src/core/handshaker/handshaker.h +2 -2
  249. data/src/core/handshaker/http_connect/http_connect_handshaker.cc +5 -5
  250. data/src/core/handshaker/http_connect/http_proxy_mapper.cc +11 -11
  251. data/src/core/handshaker/http_connect/http_proxy_mapper.h +1 -1
  252. data/src/core/handshaker/http_connect/xds_http_proxy_mapper.cc +1 -1
  253. data/src/core/handshaker/http_connect/xds_http_proxy_mapper.h +1 -1
  254. data/src/core/handshaker/proxy_mapper.h +1 -1
  255. data/src/core/handshaker/proxy_mapper_registry.h +1 -1
  256. data/src/core/handshaker/security/legacy_secure_endpoint.cc +4 -4
  257. data/src/core/handshaker/security/pipelined_secure_endpoint.cc +7 -7
  258. data/src/core/handshaker/security/secure_endpoint.cc +15 -5
  259. data/src/core/handshaker/security/security_handshaker.cc +8 -5
  260. data/src/core/handshaker/security/security_handshaker.h +1 -1
  261. data/src/core/handshaker/tcp_connect/tcp_connect_handshaker.cc +4 -4
  262. data/src/core/lib/address_utils/parse_address.cc +5 -5
  263. data/src/core/lib/address_utils/parse_address.h +2 -2
  264. data/src/core/lib/address_utils/sockaddr_utils.cc +4 -4
  265. data/src/core/lib/address_utils/sockaddr_utils.h +1 -1
  266. data/src/core/lib/channel/channel_args.cc +1 -1
  267. data/src/core/lib/channel/channel_args.h +2 -2
  268. data/src/core/lib/channel/channel_stack.cc +22 -21
  269. data/src/core/lib/channel/channel_stack.h +5 -3
  270. data/src/core/lib/channel/channel_stack_builder.cc +8 -4
  271. data/src/core/lib/channel/channel_stack_builder.h +10 -9
  272. data/src/core/lib/channel/channel_stack_builder_impl.cc +7 -13
  273. data/src/core/lib/channel/channel_stack_builder_impl.h +1 -1
  274. data/src/core/lib/channel/connected_channel.cc +2 -2
  275. data/src/core/lib/channel/promise_based_filter.cc +63 -8
  276. data/src/core/lib/channel/promise_based_filter.h +23 -8
  277. data/src/core/lib/compression/compression_internal.cc +4 -4
  278. data/src/core/lib/compression/compression_internal.h +1 -1
  279. data/src/core/lib/compression/message_compress.cc +1 -1
  280. data/src/core/lib/debug/trace.cc +2 -5
  281. data/src/core/lib/debug/trace.h +10 -0
  282. data/src/core/lib/debug/trace_flags.cc +2 -2
  283. data/src/core/lib/debug/trace_flags.h +1 -1
  284. data/src/core/lib/event_engine/ares_resolver.cc +8 -8
  285. data/src/core/lib/event_engine/ares_resolver.h +4 -4
  286. data/src/core/lib/event_engine/cf_engine/cf_engine.cc +1 -1
  287. data/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc +2 -2
  288. data/src/core/lib/event_engine/cf_engine/cfstream_endpoint.h +1 -1
  289. data/src/core/lib/event_engine/cf_engine/dns_service_resolver.cc +2 -2
  290. data/src/core/lib/event_engine/cf_engine/dns_service_resolver.h +1 -1
  291. data/src/core/lib/event_engine/channel_args_endpoint_config.h +1 -1
  292. data/src/core/lib/event_engine/default_event_engine.cc +1 -1
  293. data/src/core/lib/event_engine/event_engine.cc +1 -1
  294. data/src/core/lib/event_engine/extensions/channelz.h +1 -1
  295. data/src/core/lib/event_engine/extensions/chaotic_good_extension.h +1 -1
  296. data/src/core/lib/event_engine/extensions/tcp_trace.h +8 -1
  297. data/src/core/lib/event_engine/grpc_polled_fd.h +1 -1
  298. data/src/core/lib/event_engine/memory_allocator_factory.h +1 -1
  299. data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc +4 -4
  300. data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.h +5 -5
  301. data/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc +5 -5
  302. data/src/core/lib/event_engine/posix_engine/ev_poll_posix.h +3 -3
  303. data/src/core/lib/event_engine/posix_engine/event_poller.h +2 -2
  304. data/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc +2 -2
  305. data/src/core/lib/event_engine/posix_engine/file_descriptor_collection.cc +1 -1
  306. data/src/core/lib/event_engine/posix_engine/file_descriptor_collection.h +1 -1
  307. data/src/core/lib/event_engine/posix_engine/grpc_polled_fd_posix.h +4 -4
  308. data/src/core/lib/event_engine/posix_engine/internal_errqueue.cc +1 -1
  309. data/src/core/lib/event_engine/posix_engine/lockfree_event.cc +1 -1
  310. data/src/core/lib/event_engine/posix_engine/lockfree_event.h +1 -1
  311. data/src/core/lib/event_engine/posix_engine/native_posix_dns_resolver.cc +3 -3
  312. data/src/core/lib/event_engine/posix_engine/native_posix_dns_resolver.h +1 -1
  313. data/src/core/lib/event_engine/posix_engine/posix_endpoint.cc +5 -5
  314. data/src/core/lib/event_engine/posix_engine/posix_endpoint.h +6 -6
  315. data/src/core/lib/event_engine/posix_engine/posix_engine.cc +7 -7
  316. data/src/core/lib/event_engine/posix_engine/posix_engine.h +7 -7
  317. data/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc +7 -4
  318. data/src/core/lib/event_engine/posix_engine/posix_engine_listener.h +4 -4
  319. data/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.cc +4 -4
  320. data/src/core/lib/event_engine/posix_engine/posix_engine_listener_utils.h +1 -1
  321. data/src/core/lib/event_engine/posix_engine/posix_interface.h +1 -1
  322. data/src/core/lib/event_engine/posix_engine/posix_interface_posix.cc +5 -5
  323. data/src/core/lib/event_engine/posix_engine/posix_write_event_sink.h +1 -1
  324. data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.h +1 -1
  325. data/src/core/lib/event_engine/posix_engine/timer.h +1 -1
  326. data/src/core/lib/event_engine/posix_engine/timer_manager.cc +2 -2
  327. data/src/core/lib/event_engine/posix_engine/timer_manager.h +1 -1
  328. data/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc +2 -2
  329. data/src/core/lib/event_engine/posix_engine/traced_buffer_list.h +2 -2
  330. data/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.cc +1 -1
  331. data/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.h +2 -2
  332. data/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.cc +1 -1
  333. data/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.h +2 -2
  334. data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix.h +1 -1
  335. data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.cc +2 -2
  336. data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.h +1 -1
  337. data/src/core/lib/event_engine/ref_counted_dns_resolver_interface.h +1 -1
  338. data/src/core/lib/event_engine/tcp_socket_utils.cc +4 -4
  339. data/src/core/lib/event_engine/thread_pool/thread_count.cc +1 -1
  340. data/src/core/lib/event_engine/thread_pool/thread_count.h +1 -1
  341. data/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc +4 -4
  342. data/src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.h +3 -3
  343. data/src/core/lib/event_engine/utils.cc +3 -3
  344. data/src/core/lib/event_engine/utils.h +1 -1
  345. data/src/core/lib/event_engine/windows/grpc_polled_fd_windows.cc +1 -1
  346. data/src/core/lib/event_engine/windows/grpc_polled_fd_windows.h +2 -2
  347. data/src/core/lib/event_engine/windows/iocp.cc +1 -1
  348. data/src/core/lib/event_engine/windows/iocp.h +1 -1
  349. data/src/core/lib/event_engine/windows/native_windows_dns_resolver.cc +5 -2
  350. data/src/core/lib/event_engine/windows/win_socket.cc +1 -1
  351. data/src/core/lib/event_engine/windows/win_socket.h +2 -2
  352. data/src/core/lib/event_engine/windows/windows_endpoint.cc +5 -5
  353. data/src/core/lib/event_engine/windows/windows_engine.cc +4 -4
  354. data/src/core/lib/event_engine/windows/windows_engine.h +3 -3
  355. data/src/core/lib/event_engine/windows/windows_listener.cc +3 -3
  356. data/src/core/lib/event_engine/windows/windows_listener.h +2 -2
  357. data/src/core/lib/event_engine/work_queue/basic_work_queue.h +2 -2
  358. data/src/core/lib/experiments/config.cc +4 -4
  359. data/src/core/lib/experiments/experiments.cc +174 -48
  360. data/src/core/lib/experiments/experiments.h +76 -24
  361. data/src/core/lib/iomgr/buffer_list.cc +1 -1
  362. data/src/core/lib/iomgr/call_combiner.cc +1 -1
  363. data/src/core/lib/iomgr/call_combiner.h +2 -2
  364. data/src/core/lib/iomgr/cfstream_handle.cc +1 -1
  365. data/src/core/lib/iomgr/closure.h +2 -2
  366. data/src/core/lib/iomgr/combiner.cc +2 -2
  367. data/src/core/lib/iomgr/endpoint.h +1 -1
  368. data/src/core/lib/iomgr/endpoint_cfstream.cc +1 -1
  369. data/src/core/lib/iomgr/endpoint_pair_posix.cc +1 -1
  370. data/src/core/lib/iomgr/endpoint_pair_windows.cc +1 -1
  371. data/src/core/lib/iomgr/error.cc +1 -1
  372. data/src/core/lib/iomgr/error.h +2 -2
  373. data/src/core/lib/iomgr/error_cfstream.cc +1 -1
  374. data/src/core/lib/iomgr/ev_apple.cc +1 -1
  375. data/src/core/lib/iomgr/ev_epoll1_linux.cc +4 -4
  376. data/src/core/lib/iomgr/ev_poll_posix.cc +3 -3
  377. data/src/core/lib/iomgr/ev_posix.cc +3 -3
  378. data/src/core/lib/iomgr/event_engine_shims/closure.cc +3 -3
  379. data/src/core/lib/iomgr/event_engine_shims/closure.h +1 -1
  380. data/src/core/lib/iomgr/event_engine_shims/endpoint.cc +5 -5
  381. data/src/core/lib/iomgr/event_engine_shims/tcp_client.cc +2 -2
  382. data/src/core/lib/iomgr/exec_ctx.cc +3 -3
  383. data/src/core/lib/iomgr/exec_ctx.h +1 -1
  384. data/src/core/lib/iomgr/fork_posix.cc +1 -1
  385. data/src/core/lib/iomgr/internal_errqueue.cc +1 -1
  386. data/src/core/lib/iomgr/iocp_windows.cc +1 -1
  387. data/src/core/lib/iomgr/iomgr.cc +1 -1
  388. data/src/core/lib/iomgr/lockfree_event.cc +1 -1
  389. data/src/core/lib/iomgr/polling_entity.cc +1 -1
  390. data/src/core/lib/iomgr/resolve_address.cc +1 -1
  391. data/src/core/lib/iomgr/resolve_address.h +2 -2
  392. data/src/core/lib/iomgr/resolve_address_posix.cc +4 -4
  393. data/src/core/lib/iomgr/resolve_address_windows.cc +1 -1
  394. data/src/core/lib/iomgr/sockaddr_utils_posix.cc +1 -1
  395. data/src/core/lib/iomgr/socket_utils_common_posix.cc +2 -2
  396. data/src/core/lib/iomgr/socket_windows.cc +2 -2
  397. data/src/core/lib/iomgr/tcp_client_cfstream.cc +1 -1
  398. data/src/core/lib/iomgr/tcp_client_posix.cc +3 -3
  399. data/src/core/lib/iomgr/tcp_posix.cc +5 -5
  400. data/src/core/lib/iomgr/tcp_server_posix.cc +20 -9
  401. data/src/core/lib/iomgr/tcp_server_utils_posix.h +1 -1
  402. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +2 -2
  403. data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +2 -2
  404. data/src/core/lib/iomgr/tcp_server_windows.cc +2 -2
  405. data/src/core/lib/iomgr/tcp_windows.cc +1 -1
  406. data/src/core/lib/iomgr/timer_generic.cc +3 -3
  407. data/src/core/lib/iomgr/timer_manager.cc +1 -1
  408. data/src/core/lib/iomgr/unix_sockets_posix.cc +1 -1
  409. data/src/core/lib/iomgr/unix_sockets_posix.h +1 -1
  410. data/src/core/lib/iomgr/vsock.cc +1 -1
  411. data/src/core/lib/iomgr/vsock.h +1 -1
  412. data/src/core/lib/iomgr/wakeup_fd_pipe.cc +1 -1
  413. data/src/core/lib/promise/activity.cc +2 -2
  414. data/src/core/lib/promise/activity.h +5 -4
  415. data/src/core/lib/promise/all_ok.h +3 -3
  416. data/src/core/lib/promise/arena_promise.h +47 -6
  417. data/src/core/lib/promise/context.h +1 -1
  418. data/src/core/lib/promise/detail/join_state.h +1 -1
  419. data/src/core/lib/promise/detail/promise_factory.h +1 -1
  420. data/src/core/lib/promise/detail/promise_like.h +1 -1
  421. data/src/core/lib/promise/detail/seq_state.h +3 -3
  422. data/src/core/lib/promise/detail/status.h +1 -1
  423. data/src/core/lib/promise/exec_ctx_wakeup_scheduler.h +1 -1
  424. data/src/core/lib/promise/for_each.h +3 -3
  425. data/src/core/lib/promise/if.h +1 -1
  426. data/src/core/lib/promise/inter_activity_latch.h +3 -3
  427. data/src/core/lib/promise/inter_activity_mutex.h +1 -1
  428. data/src/core/lib/promise/interceptor_list.h +3 -3
  429. data/src/core/lib/promise/latch.h +2 -2
  430. data/src/core/lib/promise/loop.h +2 -2
  431. data/src/core/lib/promise/map.h +2 -2
  432. data/src/core/lib/promise/mpsc.cc +5 -4
  433. data/src/core/lib/promise/observable.h +2 -2
  434. data/src/core/lib/promise/party.cc +14 -8
  435. data/src/core/lib/promise/party.h +11 -4
  436. data/src/core/lib/promise/pipe.h +16 -2
  437. data/src/core/lib/promise/poll.h +2 -2
  438. data/src/core/lib/promise/promise.h +2 -2
  439. data/src/core/lib/promise/sleep.h +1 -1
  440. data/src/core/lib/promise/status_flag.h +2 -2
  441. data/src/core/lib/promise/try_join.h +3 -3
  442. data/src/core/lib/promise/try_seq.h +3 -3
  443. data/src/core/lib/promise/wait_set.h +2 -2
  444. data/src/core/lib/resource_quota/api.cc +1 -1
  445. data/src/core/lib/resource_quota/arena.cc +1 -1
  446. data/src/core/lib/resource_quota/connection_quota.h +1 -1
  447. data/src/core/lib/resource_quota/memory_quota.cc +3 -3
  448. data/src/core/lib/resource_quota/memory_quota.h +9 -5
  449. data/src/core/lib/resource_quota/periodic_update.h +1 -1
  450. data/src/core/lib/resource_quota/resource_quota.cc +8 -0
  451. data/src/core/lib/resource_quota/resource_quota.h +2 -1
  452. data/src/core/lib/resource_quota/stream_quota.cc +22 -0
  453. data/src/core/lib/resource_quota/stream_quota.h +31 -0
  454. data/src/core/lib/resource_quota/telemetry.h +1 -0
  455. data/src/core/lib/resource_quota/thread_quota.h +1 -1
  456. data/src/core/lib/security/authorization/audit_logging.cc +3 -3
  457. data/src/core/lib/security/authorization/audit_logging.h +1 -1
  458. data/src/core/lib/security/authorization/authorization_policy_provider.h +1 -1
  459. data/src/core/lib/security/authorization/evaluate_args.cc +5 -5
  460. data/src/core/lib/security/authorization/evaluate_args.h +1 -1
  461. data/src/core/lib/security/authorization/grpc_server_authz_filter.cc +3 -3
  462. data/src/core/lib/security/authorization/grpc_server_authz_filter.h +4 -1
  463. data/src/core/lib/security/authorization/matchers.cc +2 -2
  464. data/src/core/lib/security/authorization/stdout_logger.cc +1 -1
  465. data/src/core/lib/slice/percent_encoding.cc +1 -1
  466. data/src/core/lib/slice/slice.cc +1 -1
  467. data/src/core/lib/slice/slice.h +2 -2
  468. data/src/core/lib/slice/slice_buffer.cc +1 -1
  469. data/src/core/lib/slice/slice_internal.h +1 -1
  470. data/src/core/lib/surface/call.cc +42 -14
  471. data/src/core/lib/surface/call.h +12 -5
  472. data/src/core/lib/surface/call_log_batch.cc +2 -2
  473. data/src/core/lib/surface/call_utils.cc +5 -5
  474. data/src/core/lib/surface/call_utils.h +83 -18
  475. data/src/core/lib/surface/channel.cc +2 -1
  476. data/src/core/lib/surface/channel.h +13 -3
  477. data/src/core/lib/surface/channel_create.cc +2 -2
  478. data/src/core/lib/surface/channel_create.h +1 -1
  479. data/src/core/lib/surface/channel_init.cc +5 -5
  480. data/src/core/lib/surface/channel_init.h +4 -2
  481. data/src/core/lib/surface/completion_queue.cc +4 -4
  482. data/src/core/lib/surface/filter_stack_call.cc +13 -8
  483. data/src/core/lib/surface/filter_stack_call.h +3 -3
  484. data/src/core/lib/surface/init.cc +4 -4
  485. data/src/core/lib/surface/lame_client.cc +2 -2
  486. data/src/core/lib/surface/lame_client.h +3 -3
  487. data/src/core/lib/surface/legacy_channel.cc +3 -3
  488. data/src/core/lib/surface/legacy_channel.h +1 -1
  489. data/src/core/lib/surface/validate_metadata.cc +2 -2
  490. data/src/core/lib/surface/validate_metadata.h +1 -1
  491. data/src/core/lib/surface/version.cc +2 -2
  492. data/src/core/lib/transport/bdp_estimator.cc +1 -1
  493. data/src/core/lib/transport/bdp_estimator.h +2 -2
  494. data/src/core/lib/transport/connectivity_state.cc +1 -1
  495. data/src/core/lib/transport/connectivity_state.h +2 -2
  496. data/src/core/lib/transport/error_utils.h +1 -1
  497. data/src/core/lib/transport/promise_endpoint.cc +1 -1
  498. data/src/core/lib/transport/promise_endpoint.h +3 -3
  499. data/src/core/lib/transport/timeout_encoding.cc +1 -1
  500. data/src/core/lib/transport/transport.cc +3 -3
  501. data/src/core/lib/transport/transport.h +62 -4
  502. data/src/core/lib/transport/transport_framing_endpoint_extension.h +1 -1
  503. data/src/core/lib/transport/transport_op_string.cc +2 -2
  504. data/src/core/load_balancing/address_filtering.cc +1 -1
  505. data/src/core/load_balancing/address_filtering.h +2 -2
  506. data/src/core/load_balancing/backend_metric_parser.cc +1 -1
  507. data/src/core/load_balancing/backend_metric_parser.h +1 -1
  508. data/src/core/load_balancing/child_policy_handler.cc +4 -4
  509. data/src/core/load_balancing/child_policy_handler.h +2 -2
  510. data/src/core/load_balancing/delegating_helper.h +2 -2
  511. data/src/core/load_balancing/endpoint_list.cc +4 -4
  512. data/src/core/load_balancing/endpoint_list.h +2 -2
  513. data/src/core/load_balancing/grpclb/client_load_reporting_filter.cc +7 -5
  514. data/src/core/load_balancing/grpclb/client_load_reporting_filter.h +5 -1
  515. data/src/core/load_balancing/grpclb/grpclb.cc +13 -24
  516. data/src/core/load_balancing/grpclb/grpclb_client_stats.h +2 -2
  517. data/src/core/load_balancing/grpclb/load_balancer_api.cc +1 -1
  518. data/src/core/load_balancing/grpclb/load_balancer_api.h +1 -1
  519. data/src/core/load_balancing/health_check_client.cc +9 -5
  520. data/src/core/load_balancing/health_check_client_internal.h +3 -3
  521. data/src/core/load_balancing/lb_policy.h +11 -8
  522. data/src/core/load_balancing/lb_policy_factory.h +2 -2
  523. data/src/core/load_balancing/lb_policy_registry.cc +2 -2
  524. data/src/core/load_balancing/lb_policy_registry.h +2 -2
  525. data/src/core/load_balancing/oob_backend_metric.cc +7 -3
  526. data/src/core/load_balancing/oob_backend_metric_internal.h +2 -2
  527. data/src/core/load_balancing/outlier_detection/outlier_detection.cc +7 -15
  528. data/src/core/load_balancing/pick_first/pick_first.cc +48 -18
  529. data/src/core/load_balancing/priority/priority.cc +6 -6
  530. data/src/core/load_balancing/ring_hash/ring_hash.cc +8 -8
  531. data/src/core/load_balancing/rls/rls.cc +10 -10
  532. data/src/core/load_balancing/round_robin/round_robin.cc +7 -7
  533. data/src/core/load_balancing/subchannel_interface.h +2 -2
  534. data/src/core/load_balancing/weighted_round_robin/static_stride_scheduler.cc +1 -1
  535. data/src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc +9 -15
  536. data/src/core/load_balancing/weighted_target/weighted_target.cc +9 -9
  537. data/src/core/load_balancing/xds/cds.cc +5 -5
  538. data/src/core/load_balancing/xds/xds_cluster_impl.cc +22 -39
  539. data/src/core/load_balancing/xds/xds_cluster_manager.cc +6 -6
  540. data/src/core/load_balancing/xds/xds_override_host.cc +10 -10
  541. data/src/core/load_balancing/xds/xds_override_host.h +1 -1
  542. data/src/core/load_balancing/xds/xds_wrr_locality.cc +5 -5
  543. data/src/core/resolver/dns/c_ares/dns_resolver_ares.cc +8 -8
  544. data/src/core/resolver/dns/c_ares/dns_resolver_ares.h +1 -1
  545. data/src/core/resolver/dns/c_ares/grpc_ares_ev_driver.h +1 -1
  546. data/src/core/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +2 -2
  547. data/src/core/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +2 -2
  548. data/src/core/resolver/dns/c_ares/grpc_ares_wrapper.cc +6 -6
  549. data/src/core/resolver/dns/c_ares/grpc_ares_wrapper.h +2 -2
  550. data/src/core/resolver/dns/dns_resolver_plugin.cc +2 -2
  551. data/src/core/resolver/dns/event_engine/event_engine_client_channel_resolver.cc +8 -8
  552. data/src/core/resolver/dns/event_engine/event_engine_client_channel_resolver.h +1 -1
  553. data/src/core/resolver/dns/event_engine/service_config_helper.cc +2 -2
  554. data/src/core/resolver/dns/native/dns_resolver.cc +7 -7
  555. data/src/core/resolver/endpoint_addresses.cc +4 -4
  556. data/src/core/resolver/endpoint_addresses.h +1 -1
  557. data/src/core/resolver/fake/fake_resolver.cc +1 -1
  558. data/src/core/resolver/fake/fake_resolver.h +3 -3
  559. data/src/core/resolver/google_c2p/google_c2p_resolver.cc +5 -5
  560. data/src/core/resolver/polling_resolver.cc +5 -5
  561. data/src/core/resolver/polling_resolver.h +1 -1
  562. data/src/core/resolver/resolver.h +2 -2
  563. data/src/core/resolver/resolver_factory.h +2 -2
  564. data/src/core/resolver/resolver_registry.cc +1 -1
  565. data/src/core/resolver/resolver_registry.h +1 -1
  566. data/src/core/resolver/sockaddr/sockaddr_resolver.cc +4 -4
  567. data/src/core/resolver/xds/xds_config.cc +1 -1
  568. data/src/core/resolver/xds/xds_config.h +3 -3
  569. data/src/core/resolver/xds/xds_dependency_manager.cc +2 -2
  570. data/src/core/resolver/xds/xds_dependency_manager.h +3 -3
  571. data/src/core/resolver/xds/xds_resolver.cc +16 -13
  572. data/src/core/resolver/xds/xds_resolver_attributes.h +1 -1
  573. data/src/core/server/add_port.cc +2 -2
  574. data/src/core/server/server.cc +9 -5
  575. data/src/core/server/server.h +8 -7
  576. data/src/core/server/server_call_tracer_filter.cc +1 -1
  577. data/src/core/server/server_call_tracer_filter.h +5 -1
  578. data/src/core/server/server_config_selector.h +2 -2
  579. data/src/core/server/server_config_selector_filter.cc +3 -3
  580. data/src/core/server/xds_channel_stack_modifier.cc +3 -2
  581. data/src/core/server/xds_channel_stack_modifier.h +1 -1
  582. data/src/core/server/xds_server_config_fetcher.cc +10 -10
  583. data/src/core/service_config/service_config.h +1 -1
  584. data/src/core/service_config/service_config_channel_arg_filter.h +4 -1
  585. data/src/core/service_config/service_config_impl.cc +3 -3
  586. data/src/core/service_config/service_config_impl.h +2 -2
  587. data/src/core/service_config/service_config_parser.h +1 -1
  588. data/src/core/telemetry/call_tracer.h +2 -2
  589. data/src/core/telemetry/default_tcp_tracer.h +3 -3
  590. data/src/core/telemetry/histogram.h +1 -1
  591. data/src/core/telemetry/instrument.cc +550 -270
  592. data/src/core/telemetry/instrument.h +301 -128
  593. data/src/core/telemetry/metrics.cc +2 -0
  594. data/src/core/telemetry/metrics.h +33 -4
  595. data/src/core/telemetry/stats.h +2 -2
  596. data/src/core/telemetry/stats_data.cc +1 -1
  597. data/src/core/telemetry/stats_data.h +2 -2
  598. data/src/core/transport/auth_context.cc +1 -1
  599. data/src/core/transport/auth_context.h +2 -1
  600. data/src/core/transport/auth_context_comparator_registry.h +1 -1
  601. data/src/core/tsi/alts/crypt/aes_gcm.cc +1 -1
  602. data/src/core/tsi/alts/frame_protector/alts_frame_protector.cc +2 -2
  603. data/src/core/tsi/alts/frame_protector/frame_handler.cc +1 -1
  604. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +3 -3
  605. data/src/core/tsi/alts/handshaker/alts_handshaker_client.h +1 -1
  606. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +9 -7
  607. data/src/core/tsi/alts/handshaker/alts_tsi_utils.cc +1 -1
  608. data/src/core/tsi/alts/handshaker/transport_security_common_api.cc +1 -1
  609. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +1 -1
  610. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc +11 -3
  611. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol.h +10 -0
  612. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc +9 -1
  613. data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.h +3 -0
  614. data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +15 -2
  615. data/src/core/tsi/fake_transport_security.cc +2 -1
  616. data/src/core/tsi/local_transport_security.cc +1 -1
  617. data/src/core/tsi/ssl/key_logging/ssl_key_logging.cc +1 -1
  618. data/src/core/tsi/ssl/key_logging/ssl_key_logging.h +1 -1
  619. data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +1 -1
  620. data/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc +1 -1
  621. data/src/core/tsi/ssl_transport_security.cc +6 -6
  622. data/src/core/tsi/ssl_transport_security.h +1 -1
  623. data/src/core/tsi/ssl_transport_security_utils.cc +2 -2
  624. data/src/core/tsi/ssl_transport_security_utils.h +2 -2
  625. data/src/core/tsi/transport_security_grpc.cc +8 -0
  626. data/src/core/tsi/transport_security_grpc.h +15 -0
  627. data/src/core/util/alloc.cc +1 -1
  628. data/src/core/util/backoff.h +1 -1
  629. data/src/core/util/crash.h +1 -1
  630. data/src/core/util/dual_ref_counted.h +2 -2
  631. data/src/core/util/event_log.cc +1 -1
  632. data/src/core/util/event_log.h +3 -3
  633. data/src/core/util/gcp_metadata_query.cc +5 -5
  634. data/src/core/util/gcp_metadata_query.h +2 -2
  635. data/src/core/util/grpc_check.cc +2 -0
  636. data/src/core/util/grpc_check.h +1 -1
  637. data/src/core/util/grpc_if_nametoindex_posix.cc +1 -1
  638. data/src/core/util/grpc_if_nametoindex_unsupported.cc +1 -1
  639. data/src/core/util/http_client/format_request.cc +1 -1
  640. data/src/core/util/http_client/httpcli.cc +3 -3
  641. data/src/core/util/http_client/httpcli.h +4 -4
  642. data/src/core/util/http_client/httpcli_security_connector.cc +4 -4
  643. data/src/core/util/http_client/parser.cc +1 -1
  644. data/src/core/util/json/json_channel_args.h +1 -1
  645. data/src/core/util/json/json_object_loader.h +6 -6
  646. data/src/core/util/json/json_reader.cc +2 -2
  647. data/src/core/util/json/json_reader.h +1 -1
  648. data/src/core/util/json/json_util.h +3 -3
  649. data/src/core/util/json/json_writer.cc +1 -1
  650. data/src/core/util/latent_see.cc +45 -24
  651. data/src/core/util/latent_see.h +192 -24
  652. data/src/core/util/linux/cpu.cc +1 -1
  653. data/src/core/util/load_file.cc +1 -1
  654. data/src/core/util/load_file.h +1 -1
  655. data/src/core/util/log.cc +3 -3
  656. data/src/core/util/lru_cache.h +1 -1
  657. data/src/core/util/matchers.h +1 -1
  658. data/src/core/util/memory_usage.h +3 -3
  659. data/src/core/util/mpscq.h +1 -1
  660. data/src/core/util/notification.h +1 -1
  661. data/src/core/util/posix/cpu.cc +1 -1
  662. data/src/core/util/posix/stat.cc +2 -2
  663. data/src/core/util/posix/thd.cc +2 -2
  664. data/src/core/util/posix/tmpfile.cc +2 -2
  665. data/src/core/util/ref_counted.h +2 -2
  666. data/src/core/util/ref_counted_ptr.h +1 -1
  667. data/src/core/util/ref_counted_string.h +1 -1
  668. data/src/core/util/single_set_ptr.h +3 -1
  669. data/src/core/util/status_helper.cc +8 -8
  670. data/src/core/util/status_helper.h +1 -1
  671. data/src/core/util/string.cc +2 -2
  672. data/src/core/util/sync_abseil.cc +1 -1
  673. data/src/core/util/table.h +1 -1
  674. data/src/core/util/time.cc +1 -1
  675. data/src/core/util/time_precise.cc +1 -1
  676. data/src/core/util/unique_ptr_with_bitset.h +1 -1
  677. data/src/core/util/unique_type_name.h +1 -1
  678. data/src/core/util/upb_utils.h +6 -1
  679. data/src/core/util/validation_errors.cc +2 -2
  680. data/src/core/util/validation_errors.h +2 -3
  681. data/src/core/util/wait_for_single_owner.h +2 -2
  682. data/src/core/util/windows/directory_reader.cc +1 -1
  683. data/src/core/util/windows/stat.cc +2 -2
  684. data/src/core/util/windows/thd.cc +2 -2
  685. data/src/core/util/windows/time.cc +1 -1
  686. data/src/core/util/work_serializer.cc +3 -3
  687. data/src/core/util/work_serializer.h +2 -2
  688. data/src/core/xds/grpc/certificate_provider_store.cc +2 -2
  689. data/src/core/xds/grpc/certificate_provider_store.h +2 -2
  690. data/src/core/xds/grpc/file_watcher_certificate_provider_factory.cc +3 -3
  691. data/src/core/xds/grpc/file_watcher_certificate_provider_factory.h +1 -1
  692. data/src/core/xds/grpc/xds_audit_logger_registry.cc +3 -3
  693. data/src/core/xds/grpc/xds_audit_logger_registry.h +1 -1
  694. data/src/core/xds/grpc/xds_bootstrap_grpc.cc +7 -7
  695. data/src/core/xds/grpc/xds_bootstrap_grpc.h +2 -2
  696. data/src/core/xds/grpc/xds_certificate_provider.cc +1 -1
  697. data/src/core/xds/grpc/xds_certificate_provider.h +2 -2
  698. data/src/core/xds/grpc/xds_client_grpc.cc +5 -5
  699. data/src/core/xds/grpc/xds_client_grpc.h +2 -2
  700. data/src/core/xds/grpc/xds_cluster.cc +2 -2
  701. data/src/core/xds/grpc/xds_cluster.h +1 -1
  702. data/src/core/xds/grpc/xds_cluster_parser.cc +5 -5
  703. data/src/core/xds/grpc/xds_cluster_parser.h +1 -1
  704. data/src/core/xds/grpc/xds_cluster_specifier_plugin.cc +2 -2
  705. data/src/core/xds/grpc/xds_cluster_specifier_plugin.h +1 -1
  706. data/src/core/xds/grpc/xds_common_types.cc +1 -1
  707. data/src/core/xds/grpc/xds_common_types.h +1 -1
  708. data/src/core/xds/grpc/xds_common_types_parser.cc +5 -5
  709. data/src/core/xds/grpc/xds_endpoint.h +2 -2
  710. data/src/core/xds/grpc/xds_endpoint_parser.cc +5 -5
  711. data/src/core/xds/grpc/xds_endpoint_parser.h +1 -1
  712. data/src/core/xds/grpc/xds_health_status.cc +1 -1
  713. data/src/core/xds/grpc/xds_health_status.h +1 -1
  714. data/src/core/xds/grpc/xds_http_fault_filter.cc +4 -4
  715. data/src/core/xds/grpc/xds_http_fault_filter.h +2 -2
  716. data/src/core/xds/grpc/xds_http_filter.h +3 -3
  717. data/src/core/xds/grpc/xds_http_filter_registry.h +2 -2
  718. data/src/core/xds/grpc/xds_http_gcp_authn_filter.cc +3 -3
  719. data/src/core/xds/grpc/xds_http_gcp_authn_filter.h +2 -2
  720. data/src/core/xds/grpc/xds_http_rbac_filter.cc +4 -4
  721. data/src/core/xds/grpc/xds_http_rbac_filter.h +2 -2
  722. data/src/core/xds/grpc/xds_http_stateful_session_filter.cc +3 -3
  723. data/src/core/xds/grpc/xds_http_stateful_session_filter.h +2 -2
  724. data/src/core/xds/grpc/xds_lb_policy_registry.cc +1 -1
  725. data/src/core/xds/grpc/xds_lb_policy_registry.h +1 -1
  726. data/src/core/xds/grpc/xds_listener.cc +2 -2
  727. data/src/core/xds/grpc/xds_listener_parser.cc +6 -6
  728. data/src/core/xds/grpc/xds_listener_parser.h +1 -1
  729. data/src/core/xds/grpc/xds_matcher.cc +1 -1
  730. data/src/core/xds/grpc/xds_matcher.h +3 -3
  731. data/src/core/xds/grpc/xds_matcher_input.h +1 -1
  732. data/src/core/xds/grpc/xds_metadata.cc +1 -1
  733. data/src/core/xds/grpc/xds_metadata.h +3 -3
  734. data/src/core/xds/grpc/xds_metadata_parser.cc +2 -2
  735. data/src/core/xds/grpc/xds_route_config.cc +3 -3
  736. data/src/core/xds/grpc/xds_route_config_parser.cc +8 -8
  737. data/src/core/xds/grpc/xds_route_config_parser.h +1 -1
  738. data/src/core/xds/grpc/xds_routing.cc +4 -4
  739. data/src/core/xds/grpc/xds_routing.h +2 -2
  740. data/src/core/xds/grpc/xds_server_grpc.cc +2 -2
  741. data/src/core/xds/grpc/xds_transport_grpc.cc +1 -1
  742. data/src/core/xds/grpc/xds_transport_grpc.h +2 -2
  743. data/src/core/xds/xds_client/lrs_client.cc +3 -3
  744. data/src/core/xds/xds_client/lrs_client.h +4 -4
  745. data/src/core/xds/xds_client/xds_api.h +1 -1
  746. data/src/core/xds/xds_client/xds_backend_metric_propagation.cc +1 -1
  747. data/src/core/xds/xds_client/xds_backend_metric_propagation.h +1 -1
  748. data/src/core/xds/xds_client/xds_client.cc +8 -8
  749. data/src/core/xds/xds_client/xds_client.h +5 -5
  750. data/src/core/xds/xds_client/xds_locality.h +2 -2
  751. data/src/core/xds/xds_client/xds_resource_type.h +2 -2
  752. data/src/core/xds/xds_client/xds_resource_type_impl.h +1 -1
  753. data/src/core/xds/xds_client/xds_transport.h +2 -2
  754. data/src/ruby/ext/grpc/extconf.rb +14 -12
  755. data/src/ruby/ext/grpc/rb_call.c +0 -1
  756. data/src/ruby/ext/grpc/rb_channel_args.c +0 -1
  757. data/src/ruby/ext/grpc/rb_channel_credentials.c +0 -1
  758. data/src/ruby/ext/grpc/rb_compression_options.c +0 -1
  759. data/src/ruby/ext/grpc/rb_server_credentials.c +0 -1
  760. data/src/ruby/ext/grpc/rb_xds_channel_credentials.c +0 -1
  761. data/src/ruby/ext/grpc/rb_xds_server_credentials.c +0 -1
  762. data/src/ruby/lib/grpc/version.rb +1 -1
  763. metadata +16 -6
@@ -19,56 +19,95 @@
19
19
  #include "src/core/ext/transport/chttp2/transport/http2_client_transport.h"
20
20
 
21
21
  #include <grpc/event_engine/event_engine.h>
22
+ #include <grpc/grpc.h>
22
23
  #include <grpc/support/port_platform.h>
24
+ #include <limits.h>
23
25
 
26
+ #include <algorithm>
27
+ #include <cstddef>
24
28
  #include <cstdint>
29
+ #include <iterator>
25
30
  #include <memory>
26
31
  #include <optional>
32
+ #include <string>
27
33
  #include <utility>
34
+ #include <vector>
28
35
 
29
- #include "absl/log/log.h"
30
- #include "absl/status/status.h"
31
- #include "absl/strings/string_view.h"
32
36
  #include "src/core/call/call_spine.h"
33
37
  #include "src/core/call/message.h"
38
+ #include "src/core/call/metadata.h"
34
39
  #include "src/core/call/metadata_batch.h"
40
+ #include "src/core/call/metadata_info.h"
41
+ #include "src/core/channelz/channelz.h"
35
42
  #include "src/core/ext/transport/chttp2/transport/flow_control.h"
36
43
  #include "src/core/ext/transport/chttp2/transport/flow_control_manager.h"
37
44
  #include "src/core/ext/transport/chttp2/transport/frame.h"
45
+ #include "src/core/ext/transport/chttp2/transport/goaway.h"
38
46
  #include "src/core/ext/transport/chttp2/transport/header_assembler.h"
39
47
  #include "src/core/ext/transport/chttp2/transport/http2_settings.h"
40
- #include "src/core/ext/transport/chttp2/transport/http2_settings_manager.h"
48
+ #include "src/core/ext/transport/chttp2/transport/http2_settings_promises.h"
41
49
  #include "src/core/ext/transport/chttp2/transport/http2_status.h"
50
+ #include "src/core/ext/transport/chttp2/transport/http2_transport.h"
42
51
  #include "src/core/ext/transport/chttp2/transport/http2_ztrace_collector.h"
43
- #include "src/core/ext/transport/chttp2/transport/internal_channel_arg_names.h"
52
+ #include "src/core/ext/transport/chttp2/transport/incoming_metadata_tracker.h"
53
+ #include "src/core/ext/transport/chttp2/transport/keepalive.h"
44
54
  #include "src/core/ext/transport/chttp2/transport/message_assembler.h"
55
+ #include "src/core/ext/transport/chttp2/transport/ping_promise.h"
56
+ #include "src/core/ext/transport/chttp2/transport/stream.h"
45
57
  #include "src/core/ext/transport/chttp2/transport/stream_data_queue.h"
46
58
  #include "src/core/ext/transport/chttp2/transport/transport_common.h"
47
59
  #include "src/core/lib/channel/channel_args.h"
48
- #include "src/core/lib/debug/trace.h"
60
+ #include "src/core/lib/iomgr/exec_ctx.h"
61
+ #include "src/core/lib/promise/activity.h"
62
+ #include "src/core/lib/promise/context.h"
49
63
  #include "src/core/lib/promise/for_each.h"
64
+ #include "src/core/lib/promise/if.h"
50
65
  #include "src/core/lib/promise/loop.h"
51
66
  #include "src/core/lib/promise/map.h"
52
67
  #include "src/core/lib/promise/match_promise.h"
53
68
  #include "src/core/lib/promise/party.h"
54
69
  #include "src/core/lib/promise/poll.h"
55
70
  #include "src/core/lib/promise/promise.h"
71
+ #include "src/core/lib/promise/race.h"
72
+ #include "src/core/lib/promise/sleep.h"
56
73
  #include "src/core/lib/promise/try_seq.h"
57
74
  #include "src/core/lib/resource_quota/arena.h"
58
75
  #include "src/core/lib/resource_quota/resource_quota.h"
59
76
  #include "src/core/lib/slice/slice.h"
60
77
  #include "src/core/lib/slice/slice_buffer.h"
78
+ #include "src/core/lib/transport/connectivity_state.h"
61
79
  #include "src/core/lib/transport/promise_endpoint.h"
62
80
  #include "src/core/lib/transport/transport.h"
81
+ #include "src/core/util/debug_location.h"
63
82
  #include "src/core/util/grpc_check.h"
83
+ #include "src/core/util/latent_see.h"
84
+ #include "src/core/util/orphanable.h"
64
85
  #include "src/core/util/ref_counted_ptr.h"
65
86
  #include "src/core/util/sync.h"
87
+ #include "src/core/util/time.h"
88
+ #include "absl/base/thread_annotations.h"
89
+ #include "absl/container/flat_hash_map.h"
90
+ #include "absl/log/log.h"
91
+ #include "absl/status/status.h"
92
+ #include "absl/strings/cord.h"
93
+ #include "absl/strings/str_cat.h"
94
+ #include "absl/strings/string_view.h"
95
+ #include "absl/types/span.h"
66
96
 
67
97
  namespace grpc_core {
68
98
  namespace http2 {
69
99
 
100
+ // TODO(akshitpatel)(tjagtap) [PH2][P2] : When settings frame increases incoming
101
+ // window size, our transport must make the streams that were blocked on stream
102
+ // flow control as writeable.
103
+
104
+ // As a gRPC server never initiates a stream, the last incoming stream id on
105
+ // the client side will always be 0.
106
+ constexpr uint32_t kLastIncomingStreamIdClient = 0;
107
+
70
108
  using grpc_event_engine::experimental::EventEngine;
71
- using EnqueueResult = StreamDataQueue<ClientMetadataHandle>::EnqueueResult;
109
+ using StreamWritabilityUpdate =
110
+ StreamDataQueue<ClientMetadataHandle>::StreamWritabilityUpdate;
72
111
 
73
112
  // Experimental : This is just the initial skeleton of class
74
113
  // and it is functions. The code will be written iteratively.
@@ -77,6 +116,32 @@ using EnqueueResult = StreamDataQueue<ClientMetadataHandle>::EnqueueResult;
77
116
  // TODO(tjagtap) : [PH2][P3] : Delete this comment when http2
78
117
  // rollout begins
79
118
 
119
+ template <typename Factory>
120
+ void Http2ClientTransport::SpawnInfallible(RefCountedPtr<Party> party,
121
+ absl::string_view name,
122
+ Factory&& factory) {
123
+ party->Spawn(name, std::forward<Factory>(factory), [](Empty) {});
124
+ }
125
+
126
+ template <typename Factory>
127
+ void Http2ClientTransport::SpawnInfallibleTransportParty(absl::string_view name,
128
+ Factory&& factory) {
129
+ SpawnInfallible(general_party_, name, std::forward<Factory>(factory));
130
+ }
131
+
132
+ template <typename Factory>
133
+ void Http2ClientTransport::SpawnGuardedTransportParty(absl::string_view name,
134
+ Factory&& factory) {
135
+ general_party_->Spawn(
136
+ name, std::forward<Factory>(factory),
137
+ [self = RefAsSubclass<Http2ClientTransport>()](absl::Status status) {
138
+ if (!status.ok()) {
139
+ GRPC_UNUSED absl::Status error = self->HandleError(
140
+ /*stream_id=*/std::nullopt, ToHttpOkOrConnError(status));
141
+ }
142
+ });
143
+ }
144
+
80
145
  void Http2ClientTransport::PerformOp(grpc_transport_op* op) {
81
146
  // Notes : Refer : src/core/ext/transport/chaotic_good/client_transport.cc
82
147
  // Functions : StartConnectivityWatch, StopConnectivityWatch, PerformOp
@@ -118,90 +183,145 @@ void Http2ClientTransport::StopConnectivityWatch(
118
183
  state_tracker_.RemoveWatcher(watcher);
119
184
  }
120
185
 
186
+ void Http2ClientTransport::ReportDisconnection(
187
+ const absl::Status& status, StateWatcher::DisconnectInfo disconnect_info,
188
+ const char* reason) {
189
+ MutexLock lock(&transport_mutex_);
190
+ ReportDisconnectionLocked(status, disconnect_info, reason);
191
+ }
192
+
193
+ void Http2ClientTransport::ReportDisconnectionLocked(
194
+ const absl::Status& status, StateWatcher::DisconnectInfo disconnect_info,
195
+ const char* reason) {
196
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport ReportDisconnection: status="
197
+ << status.ToString() << "; reason=" << reason;
198
+ state_tracker_.SetState(GRPC_CHANNEL_TRANSIENT_FAILURE, status, reason);
199
+ NotifyStateWatcherOnDisconnectLocked(status, disconnect_info);
200
+ }
201
+
202
+ void Http2ClientTransport::StartWatch(RefCountedPtr<StateWatcher> watcher) {
203
+ MutexLock lock(&transport_mutex_);
204
+ GRPC_CHECK(watcher_ == nullptr);
205
+ watcher_ = std::move(watcher);
206
+ if (is_transport_closed_) {
207
+ // TODO(tjagtap) : [PH2][P2] : Provide better status message and
208
+ // disconnect info here.
209
+ NotifyStateWatcherOnDisconnectLocked(
210
+ absl::UnknownError("transport closed before watcher started"), {});
211
+ } else {
212
+ // TODO(tjagtap) : [PH2][P2] : Notify the state watcher of the current
213
+ // value of the peer's MAX_CONCURRENT_STREAMS setting.
214
+ }
215
+ }
216
+
217
+ void Http2ClientTransport::StopWatch(RefCountedPtr<StateWatcher> watcher) {
218
+ MutexLock lock(&transport_mutex_);
219
+ if (watcher_ == watcher) watcher_.reset();
220
+ }
221
+
222
+ void Http2ClientTransport::NotifyStateWatcherOnDisconnectLocked(
223
+ absl::Status status, StateWatcher::DisconnectInfo disconnect_info) {
224
+ if (watcher_ == nullptr) return;
225
+ event_engine_->Run([watcher = std::move(watcher_), status = std::move(status),
226
+ disconnect_info]() mutable {
227
+ ExecCtx exec_ctx;
228
+ watcher->OnDisconnect(std::move(status), disconnect_info);
229
+ watcher.reset(); // Before ExecCtx goes out of scope.
230
+ });
231
+ }
232
+
121
233
  void Http2ClientTransport::Orphan() {
122
234
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport Orphan Begin";
123
235
  // Accessing general_party here is not advisable. It may so happen that
124
236
  // the party is already freed/may free up any time. The only guarantee here
125
237
  // is that the transport is still valid.
126
- MaybeSpawnCloseTransport(Http2Status::AbslConnectionError(
127
- absl::StatusCode::kUnavailable, "Orphaned"));
238
+ SourceDestructing();
239
+ MaybeSpawnCloseTransport(
240
+ ToHttpOkOrConnError(absl::UnavailableError("Orphaned")));
128
241
  Unref();
129
242
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport Orphan End";
130
243
  }
131
244
 
132
- void Http2ClientTransport::AbortWithError() {
133
- GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport AbortWithError Begin";
134
- // TODO(tjagtap) : [PH2][P2] : Implement this function.
135
- GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport AbortWithError End";
136
- }
137
-
138
245
  ///////////////////////////////////////////////////////////////////////////////
139
246
  // Processing each type of frame
140
247
 
141
248
  Http2Status Http2ClientTransport::ProcessHttp2DataFrame(Http2DataFrame frame) {
142
249
  // https://www.rfc-editor.org/rfc/rfc9113.html#name-data
143
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessHttp2DataFrame { stream_id="
144
- << frame.stream_id
145
- << ", end_stream=" << frame.end_stream
146
- << ", payload=" << frame.payload.JoinIntoString()
147
- << "}";
250
+ GRPC_HTTP2_CLIENT_DLOG
251
+ << "Http2ClientTransport ProcessHttp2DataFrame { stream_id="
252
+ << frame.stream_id << ", end_stream=" << frame.end_stream
253
+ << ", payload=" << MaybeTruncatePayload(frame.payload)
254
+ << ", payload length=" << frame.payload.Length() << "}";
148
255
 
149
256
  // TODO(akshitpatel) : [PH2][P3] : Investigate if we should do this even if
150
257
  // the function returns a non-ok status?
151
- ping_manager_.ReceivedDataFrame();
258
+ ping_manager_->ReceivedDataFrame();
152
259
 
153
260
  // Lookup stream
154
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessHttp2DataFrame LookupStream";
261
+ GRPC_HTTP2_CLIENT_DLOG
262
+ << "Http2ClientTransport ProcessHttp2DataFrame LookupStream";
155
263
  RefCountedPtr<Stream> stream = LookupStream(frame.stream_id);
264
+
265
+ ValueOrHttp2Status<chttp2::FlowControlAction> flow_control_action =
266
+ ProcessIncomingDataFrameFlowControl(current_frame_header_, flow_control_,
267
+ stream);
268
+ if (!flow_control_action.IsOk()) {
269
+ return ValueOrHttp2Status<chttp2::FlowControlAction>::TakeStatus(
270
+ std::move(flow_control_action));
271
+ }
272
+ ActOnFlowControlAction(flow_control_action.value(), stream);
273
+
156
274
  if (stream == nullptr) {
157
275
  // TODO(tjagtap) : [PH2][P2] : Implement the correct behaviour later.
158
276
  // RFC9113 : If a DATA frame is received whose stream is not in the "open"
159
277
  // or "half-closed (local)" state, the recipient MUST respond with a stream
160
278
  // error (Section 5.4.2) of type STREAM_CLOSED.
161
279
  GRPC_HTTP2_CLIENT_DLOG
162
- << "Http2Transport ProcessHttp2DataFrame { stream_id="
280
+ << "Http2ClientTransport ProcessHttp2DataFrame { stream_id="
163
281
  << frame.stream_id << "} Lookup Failed";
164
282
  return Http2Status::Ok();
165
283
  }
166
284
 
167
- if (stream->GetStreamState() == HttpStreamState::kHalfClosedRemote) {
168
- return Http2Status::Http2StreamError(
169
- Http2ErrorCode::kStreamClosed,
170
- std::string(RFC9113::kHalfClosedRemoteState));
285
+ // TODO(akshitpatel) : [PH2][P3] : We should add a check to reset stream if
286
+ // the stream state is kIdle as well.
287
+
288
+ Http2Status stream_status = stream->CanStreamReceiveDataFrames();
289
+ if (!stream_status.IsOk()) {
290
+ return stream_status;
171
291
  }
172
292
 
173
293
  // Add frame to assembler
174
294
  GRPC_HTTP2_CLIENT_DLOG
175
- << "Http2Transport ProcessHttp2DataFrame AppendNewDataFrame";
295
+ << "Http2ClientTransport ProcessHttp2DataFrame AppendNewDataFrame";
176
296
  GrpcMessageAssembler& assembler = stream->assembler;
177
297
  Http2Status status =
178
298
  assembler.AppendNewDataFrame(frame.payload, frame.end_stream);
179
299
  if (!status.IsOk()) {
180
- GRPC_HTTP2_CLIENT_DLOG
181
- << "Http2Transport ProcessHttp2DataFrame AppendNewDataFrame Failed";
300
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport ProcessHttp2DataFrame "
301
+ "AppendNewDataFrame Failed";
182
302
  return status;
183
303
  }
184
304
 
185
305
  // Pass the messages up the stack if it is ready.
186
306
  while (true) {
187
307
  GRPC_HTTP2_CLIENT_DLOG
188
- << "Http2Transport ProcessHttp2DataFrame ExtractMessage";
308
+ << "Http2ClientTransport ProcessHttp2DataFrame ExtractMessage";
189
309
  ValueOrHttp2Status<MessageHandle> result = assembler.ExtractMessage();
190
310
  if (!result.IsOk()) {
191
311
  GRPC_HTTP2_CLIENT_DLOG
192
- << "Http2Transport ProcessHttp2DataFrame ExtractMessage Failed";
312
+ << "Http2ClientTransport ProcessHttp2DataFrame ExtractMessage Failed";
193
313
  return ValueOrHttp2Status<MessageHandle>::TakeStatus(std::move(result));
194
314
  }
195
315
  MessageHandle message = TakeValue(std::move(result));
196
316
  if (message != nullptr) {
197
317
  GRPC_HTTP2_CLIENT_DLOG
198
- << "Http2Transport ProcessHttp2DataFrame SpawnPushMessage "
318
+ << "Http2ClientTransport ProcessHttp2DataFrame SpawnPushMessage "
199
319
  << message->DebugString();
200
320
  stream->call.SpawnPushMessage(std::move(message));
201
321
  continue;
202
322
  }
203
323
  GRPC_HTTP2_CLIENT_DLOG
204
- << "Http2Transport ProcessHttp2DataFrame While Break";
324
+ << "Http2ClientTransport ProcessHttp2DataFrame While Break";
205
325
  break;
206
326
  }
207
327
 
@@ -221,11 +341,14 @@ Http2Status Http2ClientTransport::ProcessHttp2HeaderFrame(
221
341
  Http2HeaderFrame frame) {
222
342
  // https://www.rfc-editor.org/rfc/rfc9113.html#name-headers
223
343
  GRPC_HTTP2_CLIENT_DLOG
224
- << "Http2Transport ProcessHttp2HeaderFrame Promise { stream_id="
344
+ << "Http2ClientTransport ProcessHttp2HeaderFrame Promise { stream_id="
225
345
  << frame.stream_id << ", end_headers=" << frame.end_headers
226
346
  << ", end_stream=" << frame.end_stream
227
- << ", payload=" << frame.payload.JoinIntoString() << " }";
228
- ping_manager_.ReceivedDataFrame();
347
+ << ", payload=" << MaybeTruncatePayload(frame.payload) << " }";
348
+ // State update MUST happen before processing the frame.
349
+ incoming_headers_.OnHeaderReceived(frame);
350
+
351
+ ping_manager_->ReceivedDataFrame();
229
352
 
230
353
  RefCountedPtr<Stream> stream = LookupStream(frame.stream_id);
231
354
  if (stream == nullptr) {
@@ -237,71 +360,82 @@ Http2Status Http2ClientTransport::ProcessHttp2HeaderFrame(
237
360
  // receives an unexpected stream identifier MUST respond with a connection
238
361
  // error (Section 5.4.1) of type PROTOCOL_ERROR.
239
362
  GRPC_HTTP2_CLIENT_DLOG
240
- << "Http2Transport ProcessHttp2HeaderFrame Promise { stream_id="
363
+ << "Http2ClientTransport ProcessHttp2HeaderFrame Promise { stream_id="
241
364
  << frame.stream_id << "} Lookup Failed";
242
- return Http2Status::Ok();
365
+ return ParseAndDiscardHeaders(std::move(frame.payload), frame.end_headers,
366
+ /*stream=*/nullptr, Http2Status::Ok());
243
367
  }
244
- if (stream->GetStreamState() == HttpStreamState::kHalfClosedRemote) {
245
- return Http2Status::Http2StreamError(
246
- Http2ErrorCode::kStreamClosed,
247
- std::string(RFC9113::kHalfClosedRemoteState));
368
+
369
+ if (stream->IsStreamHalfClosedRemote()) {
370
+ return ParseAndDiscardHeaders(
371
+ std::move(frame.payload), frame.end_headers, stream,
372
+ Http2Status::Http2StreamError(
373
+ Http2ErrorCode::kStreamClosed,
374
+ std::string(RFC9113::kHalfClosedRemoteState)));
248
375
  }
249
376
 
250
- incoming_header_in_progress_ = !frame.end_headers;
251
- incoming_header_stream_id_ = frame.stream_id;
252
- incoming_header_end_stream_ = frame.end_stream;
253
- if ((incoming_header_end_stream_ && stream->did_push_trailing_metadata) ||
254
- (!incoming_header_end_stream_ && stream->did_push_initial_metadata)) {
255
- return Http2Status::Http2StreamError(
256
- Http2ErrorCode::kInternalError,
257
- "gRPC Error : A gRPC server can send upto 1 initial metadata followed "
258
- "by upto 1 trailing metadata");
377
+ if (incoming_headers_.ClientReceivedDuplicateMetadata(
378
+ stream->did_receive_initial_metadata,
379
+ stream->did_receive_trailing_metadata)) {
380
+ return ParseAndDiscardHeaders(
381
+ std::move(frame.payload), frame.end_headers, stream,
382
+ Http2Status::Http2StreamError(
383
+ Http2ErrorCode::kInternalError,
384
+ std::string(GrpcErrors::kTooManyMetadata)));
259
385
  }
260
386
 
261
- HeaderAssembler& assembler = stream->header_assembler;
262
- Http2Status append_result = assembler.AppendHeaderFrame(std::move(frame));
263
- if (append_result.IsOk()) {
264
- return ProcessMetadata(stream);
387
+ Http2Status append_result = stream->header_assembler.AppendHeaderFrame(frame);
388
+ if (!append_result.IsOk()) {
389
+ // Frame payload is not consumed if AppendHeaderFrame returns a non-OK
390
+ // status. We need to process it to keep our in consistent state.
391
+ return ParseAndDiscardHeaders(std::move(frame.payload), frame.end_headers,
392
+ stream, std::move(append_result));
265
393
  }
266
- return append_result;
394
+
395
+ Http2Status status = ProcessMetadata(stream);
396
+ if (!status.IsOk()) {
397
+ // Frame payload has been moved to the HeaderAssembler. So calling
398
+ // ParseAndDiscardHeaders with an empty buffer.
399
+ return ParseAndDiscardHeaders(SliceBuffer(), frame.end_headers, stream,
400
+ std::move(status));
401
+ }
402
+
403
+ // Frame payload has either been processed or moved to the HeaderAssembler.
404
+ return Http2Status::Ok();
267
405
  }
268
406
 
269
407
  Http2Status Http2ClientTransport::ProcessMetadata(
270
408
  RefCountedPtr<Stream> stream) {
271
- const uint32_t stream_id = stream->GetStreamId();
272
409
  HeaderAssembler& assembler = stream->header_assembler;
273
410
  CallHandler call = stream->call;
274
411
 
275
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessMetadata";
412
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport ProcessMetadata";
276
413
  if (assembler.IsReady()) {
277
414
  ValueOrHttp2Status<ServerMetadataHandle> read_result =
278
- assembler.ReadMetadata(parser_, !incoming_header_end_stream_,
415
+ assembler.ReadMetadata(parser_, !incoming_headers_.HeaderHasEndStream(),
279
416
  /*is_client=*/true,
280
417
  /*max_header_list_size_soft_limit=*/
281
- max_header_list_size_soft_limit_,
418
+ incoming_headers_.soft_limit(),
282
419
  /*max_header_list_size_hard_limit=*/
283
- settings_.acked().max_header_list_size());
420
+ settings_->acked().max_header_list_size());
284
421
  if (read_result.IsOk()) {
285
422
  ServerMetadataHandle metadata = TakeValue(std::move(read_result));
286
- if (incoming_header_end_stream_) {
423
+ if (incoming_headers_.HeaderHasEndStream()) {
287
424
  // TODO(tjagtap) : [PH2][P1] : Is this the right way to differentiate
288
425
  // between initial and trailing metadata?
289
- GRPC_HTTP2_CLIENT_DLOG
290
- << "Http2Transport ProcessMetadata SpawnPushServerTrailingMetadata";
291
426
  stream->MarkHalfClosedRemote();
292
- BeginCloseStream(
293
- stream_id,
294
- Http2ErrorCodeToRstFrameErrorCode(Http2ErrorCode::kNoError),
295
- std::move(metadata));
427
+ stream->did_receive_trailing_metadata = true;
428
+ BeginCloseStream(stream, /*reset_stream_error_code=*/std::nullopt,
429
+ std::move(metadata));
296
430
  } else {
297
- GRPC_HTTP2_CLIENT_DLOG
298
- << "Http2Transport ProcessMetadata SpawnPushServerInitialMetadata";
299
- stream->did_push_initial_metadata = true;
431
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport ProcessMetadata "
432
+ "SpawnPushServerInitialMetadata";
433
+ stream->did_receive_initial_metadata = true;
300
434
  call.SpawnPushServerInitialMetadata(std::move(metadata));
301
435
  }
302
436
  return Http2Status::Ok();
303
437
  }
304
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessMetadata Failed";
438
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport ProcessMetadata Failed";
305
439
  return ValueOrHttp2Status<Arena::PoolPtr<grpc_metadata_batch>>::TakeStatus(
306
440
  std::move(read_result));
307
441
  }
@@ -312,14 +446,18 @@ Http2Status Http2ClientTransport::ProcessHttp2RstStreamFrame(
312
446
  Http2RstStreamFrame frame) {
313
447
  // https://www.rfc-editor.org/rfc/rfc9113.html#name-rst_stream
314
448
  GRPC_HTTP2_CLIENT_DLOG
315
- << "Http2Transport ProcessHttp2RstStreamFrame { stream_id="
449
+ << "Http2ClientTransport ProcessHttp2RstStreamFrame { stream_id="
316
450
  << frame.stream_id << ", error_code=" << frame.error_code << " }";
317
- Http2ErrorCode error_code =
318
- RstFrameErrorCodeToHttp2ErrorCode(frame.error_code);
451
+
452
+ Http2ErrorCode error_code = FrameErrorCodeToHttp2ErrorCode(frame.error_code);
319
453
  absl::Status status = absl::Status(ErrorCodeToAbslStatusCode(error_code),
320
454
  "Reset stream frame received.");
321
- BeginCloseStream(frame.stream_id, /*reset_stream_error_code=*/std::nullopt,
322
- CancelledServerMetadataFromStatus(status));
455
+ RefCountedPtr<Stream> stream = LookupStream(frame.stream_id);
456
+ if (stream != nullptr) {
457
+ stream->MarkHalfClosedRemote();
458
+ BeginCloseStream(stream, /*reset_stream_error_code=*/std::nullopt,
459
+ CancelledServerMetadataFromStatus(status));
460
+ }
323
461
 
324
462
  // In case of stream error, we do not want the Read Loop to be broken. Hence
325
463
  // returning an ok status.
@@ -330,33 +468,28 @@ Http2Status Http2ClientTransport::ProcessHttp2SettingsFrame(
330
468
  Http2SettingsFrame frame) {
331
469
  // https://www.rfc-editor.org/rfc/rfc9113.html#name-settings
332
470
 
333
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessHttp2SettingsFrame { ack="
334
- << frame.ack
335
- << ", settings length=" << frame.settings.size()
336
- << "}";
337
-
338
- // The connector code needs us to run this
339
- if (on_receive_settings_ != nullptr) {
340
- ExecCtx::Run(DEBUG_LOCATION, on_receive_settings_, absl::OkStatus());
341
- on_receive_settings_ = nullptr;
342
- }
471
+ GRPC_HTTP2_CLIENT_DLOG
472
+ << "Http2ClientTransport ProcessHttp2SettingsFrame { ack=" << frame.ack
473
+ << ", settings length=" << frame.settings.size() << "}";
343
474
 
344
- // TODO(tjagtap) : [PH2][P2] Decide later if we want this only for AckLastSend
345
- // or does any other operation also need this lock.
346
- MutexLock lock(&transport_mutex_);
347
475
  if (!frame.ack) {
348
- // Check if the received settings have legal values
349
476
  Http2Status status = ValidateSettingsValues(frame.settings);
350
477
  if (!status.IsOk()) {
351
478
  return status;
352
479
  }
353
- // TODO(tjagtap) : [PH2][P1]
354
- // Apply the new settings
355
- // Quickly send the ACK to the peer once the settings are applied
480
+ settings_->BufferPeerSettings(std::move(frame.settings));
481
+ SpawnGuardedTransportParty("SettingsAck", TriggerWriteCycle());
482
+ if (GPR_UNLIKELY(!settings_->IsFirstPeerSettingsApplied())) {
483
+ // Apply the first settings before we read any other frames.
484
+ reader_state_.SetPauseReadLoop();
485
+ }
356
486
  } else {
357
- // Process the SETTINGS ACK Frame
358
- if (settings_.AckLastSend()) {
359
- transport_settings_.OnSettingsAckReceived();
487
+ if (settings_->OnSettingsAckReceived()) {
488
+ parser_.hpack_table()->SetMaxBytes(
489
+ settings_->acked().header_table_size());
490
+ ActOnFlowControlAction(flow_control_.SetAckedInitialWindow(
491
+ settings_->acked().initial_window_size()),
492
+ /*stream=*/nullptr);
360
493
  } else {
361
494
  // TODO(tjagtap) [PH2][P4] : The RFC does not say anything about what
362
495
  // should happen if we receive an unsolicited SETTINGS ACK. Decide if we
@@ -370,7 +503,7 @@ Http2Status Http2ClientTransport::ProcessHttp2SettingsFrame(
370
503
 
371
504
  auto Http2ClientTransport::ProcessHttp2PingFrame(Http2PingFrame frame) {
372
505
  // https://www.rfc-editor.org/rfc/rfc9113.html#name-ping
373
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessHttp2PingFrame { ack="
506
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport ProcessHttp2PingFrame { ack="
374
507
  << frame.ack << ", opaque=" << frame.opaque << " }";
375
508
  return AssertResultType<Http2Status>(If(
376
509
  frame.ack,
@@ -379,37 +512,114 @@ auto Http2ClientTransport::ProcessHttp2PingFrame(Http2PingFrame frame) {
379
512
  return self->AckPing(opaque);
380
513
  },
381
514
  [self = RefAsSubclass<Http2ClientTransport>(), opaque = frame.opaque]() {
382
- // TODO(akshitpatel) : [PH2][P2] : Have a counter to track number of
383
- // pending induced frames (Ping/Settings Ack). This is to ensure that
384
- // if write is taking a long time, we can stop reads and prioritize
385
- // writes.
386
- // RFC9113: PING responses SHOULD be given higher priority than any
387
- // other frame.
388
- self->ping_manager_.AddPendingPingAck(opaque);
389
- // TODO(akshitpatel) : [PH2][P2] : This is done assuming that the other
390
- // ProcessFrame promises may return stream or connection failures. If
391
- // this does not turn out to be true, consider returning absl::Status
392
- // here.
393
- return Map(self->TriggerWriteCycle(), [](absl::Status status) {
394
- return (status.ok())
395
- ? Http2Status::Ok()
396
- : Http2Status::AbslConnectionError(
397
- status.code(), std::string(status.message()));
398
- });
515
+ return If(
516
+ self->test_only_ack_pings_,
517
+ [self, opaque]() {
518
+ // TODO(akshitpatel) : [PH2][P2] : Have a counter to track number
519
+ // of pending induced frames (Ping/Settings Ack). This is to
520
+ // ensure that if write is taking a long time, we can stop reads
521
+ // and prioritize writes. RFC9113: PING responses SHOULD be given
522
+ // higher priority than any other frame.
523
+ self->ping_manager_->AddPendingPingAck(opaque);
524
+ // TODO(akshitpatel) : [PH2][P2] : This is done assuming that the
525
+ // other ProcessFrame promises may return stream or connection
526
+ // failures. If this does not turn out to be true, consider
527
+ // returning absl::Status here.
528
+ return Map(self->TriggerWriteCycle(), [](absl::Status status) {
529
+ return ToHttpOkOrConnError(status);
530
+ });
531
+ },
532
+ []() {
533
+ GRPC_HTTP2_CLIENT_DLOG
534
+ << "Http2ClientTransport ProcessHttp2PingFrame "
535
+ "test_only_ack_pings_ is false. Ignoring the ping "
536
+ "request.";
537
+ return Immediate(Http2Status::Ok());
538
+ });
399
539
  }));
400
540
  }
401
541
 
402
542
  Http2Status Http2ClientTransport::ProcessHttp2GoawayFrame(
403
543
  Http2GoawayFrame frame) {
404
544
  // https://www.rfc-editor.org/rfc/rfc9113.html#name-goaway
405
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessHttp2GoawayFrame Factory";
406
- // TODO(tjagtap) : [PH2][P2] : Implement this.
407
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessHttp2GoawayFrame Promise { "
408
- "last_stream_id="
409
- << frame.last_stream_id
410
- << ", error_code=" << frame.error_code
411
- << ", debug_data=" << frame.debug_data.as_string_view()
412
- << "}";
545
+ GRPC_HTTP2_CLIENT_DLOG
546
+ << "Http2ClientTransport ProcessHttp2GoawayFrame Promise { "
547
+ "last_stream_id="
548
+ << frame.last_stream_id << ", error_code=" << frame.error_code
549
+ << ", debug_data=" << frame.debug_data.as_string_view() << "}";
550
+ LOG_IF(ERROR,
551
+ frame.error_code != static_cast<uint32_t>(Http2ErrorCode::kNoError))
552
+ << "Received GOAWAY frame with error code: " << frame.error_code
553
+ << " and debug data: " << frame.debug_data.as_string_view();
554
+
555
+ uint32_t last_stream_id = 0;
556
+ absl::Status status(ErrorCodeToAbslStatusCode(
557
+ FrameErrorCodeToHttp2ErrorCode(frame.error_code)),
558
+ frame.debug_data.as_string_view());
559
+ if (frame.error_code == static_cast<uint32_t>(Http2ErrorCode::kNoError) &&
560
+ frame.last_stream_id == RFC9113::kMaxStreamId31Bit) {
561
+ const uint32_t next_stream_id = PeekNextStreamId();
562
+ last_stream_id = (next_stream_id > 1) ? next_stream_id - 2 : 0;
563
+ } else {
564
+ last_stream_id = frame.last_stream_id;
565
+ }
566
+ SetMaxAllowedStreamId(last_stream_id);
567
+
568
+ bool close_transport = false;
569
+ {
570
+ MutexLock lock(&transport_mutex_);
571
+ if (CanCloseTransportLocked()) {
572
+ close_transport = true;
573
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport ProcessHttp2GoawayFrame "
574
+ "stream_list_ is empty";
575
+ }
576
+ }
577
+
578
+ StateWatcher::DisconnectInfo disconnect_info;
579
+ disconnect_info.reason = Transport::StateWatcher::kGoaway;
580
+ disconnect_info.http2_error_code =
581
+ static_cast<Http2ErrorCode>(frame.error_code);
582
+
583
+ // Throttle keepalive time if the server sends a GOAWAY with error code
584
+ // ENHANCE_YOUR_CALM and debug data equal to "too_many_pings". This will
585
+ // apply to any new transport created on by any subchannel of this channel.
586
+ if (GPR_UNLIKELY(frame.error_code == static_cast<uint32_t>(
587
+ Http2ErrorCode::kEnhanceYourCalm) &&
588
+ frame.debug_data == "too_many_pings")) {
589
+ LOG(ERROR) << ": Received a GOAWAY with error code ENHANCE_YOUR_CALM and "
590
+ "debug data equal to \"too_many_pings\". Current keepalive "
591
+ "time (before throttling): "
592
+ << keepalive_time_.ToString();
593
+ constexpr int max_keepalive_time_millis =
594
+ INT_MAX / KEEPALIVE_TIME_BACKOFF_MULTIPLIER;
595
+ uint64_t throttled_keepalive_time =
596
+ keepalive_time_.millis() > max_keepalive_time_millis
597
+ ? INT_MAX
598
+ : keepalive_time_.millis() * KEEPALIVE_TIME_BACKOFF_MULTIPLIER;
599
+ if (!IsTransportStateWatcherEnabled()) {
600
+ status.SetPayload(kKeepaliveThrottlingKey,
601
+ absl::Cord(std::to_string(throttled_keepalive_time)));
602
+ }
603
+ disconnect_info.keepalive_time =
604
+ Duration::Milliseconds(throttled_keepalive_time);
605
+ }
606
+
607
+ if (close_transport) {
608
+ // TODO(akshitpatel) : [PH2][P3] : Ideally the error here should be
609
+ // kNoError. However, Http2Status does not support kNoError. We should
610
+ // revisit this and update the error code.
611
+ MaybeSpawnCloseTransport(Http2Status::Http2ConnectionError(
612
+ FrameErrorCodeToHttp2ErrorCode((
613
+ frame.error_code ==
614
+ Http2ErrorCodeToFrameErrorCode(Http2ErrorCode::kNoError)
615
+ ? Http2ErrorCodeToFrameErrorCode(Http2ErrorCode::kInternalError)
616
+ : frame.error_code)),
617
+ std::string(frame.debug_data.as_string_view())));
618
+ }
619
+
620
+ // lie: use transient failure from the transport to indicate goaway has been
621
+ // received.
622
+ ReportDisconnection(status, disconnect_info, "got_goaway");
413
623
  return Http2Status::Ok();
414
624
  }
415
625
 
@@ -417,23 +627,30 @@ Http2Status Http2ClientTransport::ProcessHttp2WindowUpdateFrame(
417
627
  Http2WindowUpdateFrame frame) {
418
628
  // https://www.rfc-editor.org/rfc/rfc9113.html#name-window_update
419
629
  GRPC_HTTP2_CLIENT_DLOG
420
- << "Http2Transport ProcessHttp2WindowUpdateFrame Factory";
421
- // TODO(tjagtap) : [PH2][P2] : Implement this.
422
- GRPC_HTTP2_CLIENT_DLOG
423
- << "Http2Transport ProcessHttp2WindowUpdateFrame Promise { "
630
+ << "Http2ClientTransport ProcessHttp2WindowUpdateFrame Promise { "
424
631
  " stream_id="
425
632
  << frame.stream_id << ", increment=" << frame.increment << "}";
633
+
634
+ RefCountedPtr<Stream> stream = nullptr;
426
635
  if (frame.stream_id != 0) {
427
- RefCountedPtr<Stream> stream = LookupStream(frame.stream_id);
428
- if (stream != nullptr) {
429
- chttp2::StreamFlowControl::OutgoingUpdateContext fc_update(
430
- &stream->flow_control);
431
- fc_update.RecvUpdate(frame.increment);
636
+ stream = LookupStream(frame.stream_id);
637
+ }
638
+ if (stream != nullptr) {
639
+ StreamWritabilityUpdate update =
640
+ stream->ReceivedFlowControlWindowUpdate(frame.increment);
641
+ if (update.became_writable) {
642
+ absl::Status status = writable_stream_list_.EnqueueWrapper(
643
+ stream, update.priority, AreTransportFlowControlTokensAvailable());
644
+ if (!status.ok()) {
645
+ return ToHttpOkOrConnError(status);
646
+ }
432
647
  }
433
- } else {
434
- chttp2::TransportFlowControl::OutgoingUpdateContext fc_update(
435
- &flow_control_);
436
- fc_update.RecvUpdate(frame.increment);
648
+ }
649
+
650
+ const bool should_trigger_write =
651
+ ProcessIncomingWindowUpdateFrameFlowControl(frame, flow_control_, stream);
652
+ if (should_trigger_write) {
653
+ SpawnGuardedTransportParty("TransportTokensAvailable", TriggerWriteCycle());
437
654
  }
438
655
  return Http2Status::Ok();
439
656
  }
@@ -442,11 +659,14 @@ Http2Status Http2ClientTransport::ProcessHttp2ContinuationFrame(
442
659
  Http2ContinuationFrame frame) {
443
660
  // https://www.rfc-editor.org/rfc/rfc9113.html#name-continuation
444
661
  GRPC_HTTP2_CLIENT_DLOG
445
- << "Http2Transport ProcessHttp2ContinuationFrame Promise { "
662
+ << "Http2ClientTransport ProcessHttp2ContinuationFrame Promise { "
446
663
  "stream_id="
447
664
  << frame.stream_id << ", end_headers=" << frame.end_headers
448
- << ", payload=" << frame.payload.JoinIntoString() << " }";
449
- incoming_header_in_progress_ = !frame.end_headers;
665
+ << ", payload=" << MaybeTruncatePayload(frame.payload) << " }";
666
+
667
+ // State update MUST happen before processing the frame.
668
+ incoming_headers_.OnContinuationReceived(frame);
669
+
450
670
  RefCountedPtr<Stream> stream = LookupStream(frame.stream_id);
451
671
  if (stream == nullptr) {
452
672
  // TODO(tjagtap) : [PH2][P3] : Implement this.
@@ -456,32 +676,45 @@ Http2Status Http2ClientTransport::ProcessHttp2ContinuationFrame(
456
676
  // frame and streams that are reserved using PUSH_PROMISE. An endpoint that
457
677
  // receives an unexpected stream identifier MUST respond with a connection
458
678
  // error (Section 5.4.1) of type PROTOCOL_ERROR.
459
- return Http2Status::Ok();
679
+ return ParseAndDiscardHeaders(std::move(frame.payload), frame.end_headers,
680
+ nullptr, Http2Status::Ok());
681
+ }
682
+
683
+ if (stream->IsStreamHalfClosedRemote()) {
684
+ return ParseAndDiscardHeaders(
685
+ std::move(frame.payload), frame.end_headers, stream,
686
+ Http2Status::Http2StreamError(
687
+ Http2ErrorCode::kStreamClosed,
688
+ std::string(RFC9113::kHalfClosedRemoteState)));
460
689
  }
461
- if (stream->GetStreamState() == HttpStreamState::kHalfClosedRemote) {
462
- return Http2Status::Http2StreamError(
463
- Http2ErrorCode::kStreamClosed,
464
- std::string(RFC9113::kHalfClosedRemoteState));
690
+
691
+ Http2Status append_result =
692
+ stream->header_assembler.AppendContinuationFrame(frame);
693
+ if (!append_result.IsOk()) {
694
+ // Frame payload is not consumed if AppendContinuationFrame returns a
695
+ // non-OK status. We need to process it to keep our in consistent state.
696
+ return ParseAndDiscardHeaders(std::move(frame.payload), frame.end_headers,
697
+ stream, std::move(append_result));
465
698
  }
466
699
 
467
- HeaderAssembler& assember = stream->header_assembler;
468
- Http2Status result = assember.AppendContinuationFrame(std::move(frame));
469
- if (result.IsOk()) {
470
- return ProcessMetadata(stream);
700
+ Http2Status status = ProcessMetadata(stream);
701
+ if (!status.IsOk()) {
702
+ // Frame payload is consumed by HeaderAssembler. So passing an empty
703
+ // SliceBuffer to ParseAndDiscardHeaders.
704
+ return ParseAndDiscardHeaders(SliceBuffer(), frame.end_headers, stream,
705
+ std::move(status));
471
706
  }
472
- return result;
707
+
708
+ // Frame payload has either been processed or moved to the HeaderAssembler.
709
+ return Http2Status::Ok();
473
710
  }
474
711
 
475
712
  Http2Status Http2ClientTransport::ProcessHttp2SecurityFrame(
476
713
  Http2SecurityFrame frame) {
477
- GRPC_HTTP2_CLIENT_DLOG << "Http2Transport ProcessHttp2SecurityFrame "
478
- "ProcessHttp2SecurityFrame { payload="
479
- << frame.payload.JoinIntoString() << " }";
480
- if ((settings_.acked().allow_security_frame() ||
481
- settings_.local().allow_security_frame()) &&
482
- settings_.peer().allow_security_frame()) {
483
- // TODO(tjagtap) : [PH2][P4] : Evaluate when to accept the frame and when to
484
- // reject it. Compare it with the requirement and with CHTTP2.
714
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport ProcessHttp2SecurityFrame "
715
+ "{ payload.Length="
716
+ << frame.payload.Length() << " }";
717
+ if (settings_->IsSecurityFrameExpected()) {
485
718
  // TODO(tjagtap) : [PH2][P3] : Add handling of Security frame
486
719
  // Just the frame.payload needs to be passed to the endpoint_ object.
487
720
  // Refer usage of TransportFramingEndpointExtension.
@@ -525,7 +758,7 @@ auto Http2ClientTransport::ProcessOneFrame(Http2Frame frame) {
525
758
  return self->ProcessHttp2SecurityFrame(std::move(frame));
526
759
  },
527
760
  [](GRPC_UNUSED Http2UnknownFrame frame) {
528
- // As per HTTP2 RFC, implementations MUST ignore and discard frames of
761
+ // RFC9113: Implementations MUST ignore and discard frames of
529
762
  // unknown types.
530
763
  return Http2Status::Ok();
531
764
  },
@@ -536,8 +769,64 @@ auto Http2ClientTransport::ProcessOneFrame(Http2Frame frame) {
536
769
  }));
537
770
  }
538
771
 
772
+ Http2Status Http2ClientTransport::ParseAndDiscardHeaders(
773
+ SliceBuffer&& buffer, const bool is_end_headers,
774
+ const RefCountedPtr<Stream> stream, Http2Status&& original_status,
775
+ DebugLocation whence) {
776
+ const bool is_initial_metadata = !incoming_headers_.HeaderHasEndStream();
777
+ const uint32_t incoming_stream_id = incoming_headers_.GetStreamId();
778
+ GRPC_HTTP2_CLIENT_DLOG
779
+ << "Http2ClientTransport ParseAndDiscardHeaders buffer "
780
+ "size: "
781
+ << buffer.Length() << " is_initial_metadata: " << is_initial_metadata
782
+ << " is_end_headers: " << is_end_headers
783
+ << " incoming_stream_id: " << incoming_stream_id
784
+ << " stream_id: " << (stream == nullptr ? 0 : stream->GetStreamId())
785
+ << " original_status: " << original_status.DebugString()
786
+ << " whence: " << whence.file() << ":" << whence.line();
787
+
788
+ return http2::ParseAndDiscardHeaders(
789
+ parser_, std::move(buffer),
790
+ HeaderAssembler::ParseHeaderArgs{
791
+ /*is_initial_metadata=*/is_initial_metadata,
792
+ /*is_end_headers=*/is_end_headers,
793
+ /*is_client=*/true,
794
+ /*max_header_list_size_soft_limit=*/
795
+ incoming_headers_.soft_limit(),
796
+ /*max_header_list_size_hard_limit=*/
797
+ settings_->acked().max_header_list_size(),
798
+ /*stream_id=*/incoming_stream_id,
799
+ },
800
+ stream, std::move(original_status));
801
+ }
802
+
539
803
  ///////////////////////////////////////////////////////////////////////////////
540
804
  // Read Related Promises and Promise Factories
805
+ auto Http2ClientTransport::EndpointReadSlice(const size_t num_bytes) {
806
+ return Map(
807
+ endpoint_.ReadSlice(num_bytes),
808
+ [self = RefAsSubclass<Http2ClientTransport>(),
809
+ num_bytes](absl::StatusOr<Slice> status) {
810
+ if (status.ok()) {
811
+ self->keepalive_manager_->GotData();
812
+ self->ztrace_collector_->Append(PromiseEndpointReadTrace{num_bytes});
813
+ }
814
+ return status;
815
+ });
816
+ }
817
+
818
+ auto Http2ClientTransport::EndpointRead(const size_t num_bytes) {
819
+ return Map(
820
+ endpoint_.Read(num_bytes),
821
+ [self = RefAsSubclass<Http2ClientTransport>(),
822
+ num_bytes](absl::StatusOr<SliceBuffer> status) {
823
+ if (status.ok()) {
824
+ self->keepalive_manager_->GotData();
825
+ self->ztrace_collector_->Append(PromiseEndpointReadTrace{num_bytes});
826
+ }
827
+ return status;
828
+ });
829
+ }
541
830
 
542
831
  auto Http2ClientTransport::ReadAndProcessOneFrame() {
543
832
  GRPC_HTTP2_CLIENT_DLOG
@@ -556,10 +845,16 @@ auto Http2ClientTransport::ReadAndProcessOneFrame() {
556
845
  // Validate the incoming frame as per the current state of the transport
557
846
  [self = RefAsSubclass<Http2ClientTransport>()](Http2FrameHeader header) {
558
847
  Http2Status status = ValidateFrameHeader(
559
- /*max_frame_size_setting*/ self->settings_.acked().max_frame_size(),
560
- /*incoming_header_in_progress*/ self->incoming_header_in_progress_,
561
- /*incoming_header_stream_id*/ self->incoming_header_stream_id_,
562
- /*current_frame_header*/ header);
848
+ /*max_frame_size_setting*/ self->settings_->acked()
849
+ .max_frame_size(),
850
+ /*incoming_header_in_progress*/
851
+ self->incoming_headers_.IsWaitingForContinuationFrame(),
852
+ /*incoming_header_stream_id*/
853
+ self->incoming_headers_.GetStreamId(),
854
+ /*current_frame_header*/ header,
855
+ /*last_stream_id=*/self->GetLastStreamId(),
856
+ /*is_client=*/true, /*is_first_settings_processed=*/
857
+ self->settings_->IsFirstPeerSettingsApplied());
563
858
 
564
859
  if (GPR_UNLIKELY(!status.IsOk())) {
565
860
  GRPC_DCHECK(status.GetType() ==
@@ -584,7 +879,7 @@ auto Http2ClientTransport::ReadAndProcessOneFrame() {
584
879
  SliceBuffer payload) -> absl::StatusOr<Http2Frame> {
585
880
  GRPC_HTTP2_CLIENT_DLOG
586
881
  << "Http2ClientTransport ReadAndProcessOneFrame ParseFramePayload "
587
- << payload.JoinIntoString();
882
+ << MaybeTruncatePayload(payload);
588
883
  ValueOrHttp2Status<Http2Frame> frame =
589
884
  ParseFramePayload(self->current_frame_header_, std::move(payload));
590
885
  if (!frame.IsOk()) {
@@ -607,6 +902,9 @@ auto Http2ClientTransport::ReadAndProcessOneFrame() {
607
902
  }
608
903
  return absl::OkStatus();
609
904
  }));
905
+ },
906
+ [self = RefAsSubclass<Http2ClientTransport>()]() -> Poll<absl::Status> {
907
+ return self->reader_state_.MaybePauseReadLoop();
610
908
  }));
611
909
  }
612
910
 
@@ -623,95 +921,155 @@ auto Http2ClientTransport::ReadLoop() {
623
921
  }));
624
922
  }
625
923
 
626
- auto Http2ClientTransport::OnReadLoopEnded() {
627
- GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport OnReadLoopEnded Factory";
628
- return
629
- [self = RefAsSubclass<Http2ClientTransport>()](absl::Status status) {
924
+ ///////////////////////////////////////////////////////////////////////////////
925
+ // Flow Control for the Transport
926
+
927
+ auto Http2ClientTransport::FlowControlPeriodicUpdateLoop() {
928
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport PeriodicUpdateLoop Factory";
929
+ return AssertResultType<absl::Status>(
930
+ Loop([self = RefAsSubclass<Http2ClientTransport>()]() {
630
931
  GRPC_HTTP2_CLIENT_DLOG
631
- << "Http2ClientTransport OnReadLoopEnded Promise Status=" << status;
632
- GRPC_UNUSED absl::Status error = self->HandleError(
633
- std::nullopt, Http2Status::AbslConnectionError(
634
- status.code(), std::string(status.message())));
635
- };
932
+ << "Http2ClientTransport FlowControlPeriodicUpdateLoop Loop";
933
+ return TrySeq(
934
+ // TODO(tjagtap) [PH2][P2][BDP] Remove this static sleep when the
935
+ // BDP code is done.
936
+ Sleep(chttp2::kFlowControlPeriodicUpdateTimer),
937
+ [self]() -> Poll<absl::Status> {
938
+ GRPC_HTTP2_CLIENT_DLOG
939
+ << "Http2ClientTransport FlowControl PeriodicUpdate()";
940
+ chttp2::FlowControlAction action =
941
+ self->flow_control_.PeriodicUpdate();
942
+ bool is_action_empty = action == chttp2::FlowControlAction();
943
+ // This may trigger a write cycle
944
+ self->ActOnFlowControlAction(action, nullptr);
945
+ if (is_action_empty) {
946
+ // TODO(tjagtap) [PH2][P2][BDP] Remove this when the BDP code is
947
+ // done. We must continue to do PeriodicUpdate once BDP is in
948
+ // place.
949
+ MutexLock lock(&self->transport_mutex_);
950
+ if (self->GetActiveStreamCountLocked() == 0) {
951
+ self->AddPeriodicUpdatePromiseWaker();
952
+ return Pending{};
953
+ }
954
+ }
955
+ return absl::OkStatus();
956
+ },
957
+ [self]() -> LoopCtl<absl::Status> { return Continue{}; });
958
+ }));
636
959
  }
637
960
 
638
961
  // Equivalent to grpc_chttp2_act_on_flowctl_action in chttp2_transport.cc
639
- // TODO(tjagtap) : [PH2][P4] : grpc_chttp2_act_on_flowctl_action has a "reason"
640
- // parameter which looks like it would be really helpful for debugging. Add that
641
962
  void Http2ClientTransport::ActOnFlowControlAction(
642
- const chttp2::FlowControlAction& action, const uint32_t stream_id) {
643
- GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::ActOnFlowControlAction";
963
+ const chttp2::FlowControlAction& action, RefCountedPtr<Stream> stream) {
964
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::ActOnFlowControlAction"
965
+ << action.DebugString();
644
966
  if (action.send_stream_update() != kNoActionNeeded) {
645
- GRPC_DCHECK_GT(stream_id, 0u);
646
- RefCountedPtr<Stream> stream = LookupStream(stream_id);
647
967
  if (GPR_LIKELY(stream != nullptr)) {
648
- const HttpStreamState state = stream->GetStreamState();
649
- if (state != HttpStreamState::kHalfClosedRemote &&
650
- state != HttpStreamState::kClosed) {
651
- // Stream is not remotely closed, so sending a WINDOW_UPDATE is
652
- // potentially useful.
653
- // TODO(tjagtap) : [PH2][P1] Plumb with flow control
968
+ GRPC_DCHECK_GT(stream->GetStreamId(), 0u);
969
+ if (stream->CanSendWindowUpdateFrames()) {
970
+ window_update_list_.insert(stream->GetStreamId());
971
+ GRPC_HTTP2_CLIENT_DLOG
972
+ << "Http2ClientTransport::ActOnFlowControlAction "
973
+ "added stream "
974
+ << stream->GetStreamId() << " to window_update_list_";
654
975
  }
655
976
  } else {
656
977
  GRPC_HTTP2_CLIENT_DLOG
657
- << "Http2ClientTransport ActOnFlowControlAction stream is null";
978
+ << "Http2ClientTransport::ActOnFlowControlAction stream is null";
658
979
  }
659
980
  }
660
981
 
661
- if (action.send_transport_update() != kNoActionNeeded) {
662
- // TODO(tjagtap) : [PH2][P1] Plumb with flow control
663
- }
664
-
665
- // TODO(tjagtap) : [PH2][P1] Plumb
666
- // enable_preferred_rx_crypto_frame_advertisement with settings
667
982
  ActOnFlowControlActionSettings(
668
- action, settings_.mutable_local(),
669
- /*enable_preferred_rx_crypto_frame_advertisement=*/true);
983
+ action, settings_->mutable_local(),
984
+ enable_preferred_rx_crypto_frame_advertisement_);
670
985
 
671
986
  if (action.AnyUpdateImmediately()) {
672
- TriggerWriteCycle();
987
+ // Prioritize sending flow control updates over reading data. If we
988
+ // continue reading while urgent flow control updates are pending, we might
989
+ // exhaust the flow control window. This prevents us from sending window
990
+ // updates to the peer, causing the peer to block unnecessarily while
991
+ // waiting for flow control tokens.
992
+ reader_state_.SetPauseReadLoop();
993
+ SpawnGuardedTransportParty("SendControlFrames", TriggerWriteCycle());
994
+ GRPC_HTTP2_CLIENT_DLOG << "Update Immediately : "
995
+ << action.ImmediateUpdateReasons();
673
996
  }
674
997
  }
675
998
 
676
999
  ///////////////////////////////////////////////////////////////////////////////
677
1000
  // Write Related Promises and Promise Factories
678
1001
 
679
- auto Http2ClientTransport::WriteControlFrames() {
680
- GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport WriteControlFrames Factory";
1002
+ auto Http2ClientTransport::ProcessAndWriteControlFrames() {
681
1003
  SliceBuffer output_buf;
682
1004
  if (is_first_write_) {
683
- GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport Write "
1005
+ // RFC9113: That is, the connection preface starts with the string
1006
+ // "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
1007
+ // This sequence MUST be followed by a SETTINGS frame, which MAY be empty.
1008
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport WriteControlFrames "
684
1009
  "GRPC_CHTTP2_CLIENT_CONNECT_STRING";
685
- output_buf.Append(Slice(
686
- grpc_slice_from_copied_string(GRPC_CHTTP2_CLIENT_CONNECT_STRING)));
1010
+ output_buf.Append(
1011
+ Slice::FromCopiedString(GRPC_CHTTP2_CLIENT_CONNECT_STRING));
1012
+ settings_->MaybeGetSettingsAndSettingsAckFrames(flow_control_, output_buf);
1013
+ // TODO(tjagtap) [PH2][P2][Server] : This will be opposite for server. We
1014
+ // must read before we write for the server. So the ReadLoop will be Spawned
1015
+ // just after the constructor, and the write loop should be spawned only
1016
+ // after the first SETTINGS frame is completely received.
1017
+ //
1018
+ // Because the client is expected to write before it reads, we spawn the
1019
+ // ReadLoop of the client only after the first write is queued.
1020
+ SpawnGuardedTransportParty("ReadLoop", UntilTransportClosed(ReadLoop()));
687
1021
  is_first_write_ = false;
688
1022
  }
689
- MaybeGetSettingsFrame(output_buf);
690
- ping_manager_.MaybeGetSerializedPingFrames(output_buf,
691
- NextAllowedPingInterval());
1023
+
1024
+ // Order of Control Frames is important.
1025
+ // 1. GOAWAY - This is first because if this is the final GoAway, then we may
1026
+ // not need to send anything else to the peer.
1027
+ // 2. SETTINGS and SETTINGS ACK
1028
+ // 3. PING and PING acks.
1029
+ // 4. WINDOW_UPDATE
1030
+ // 5. Custom gRPC security frame
1031
+
1032
+ goaway_manager_.MaybeGetSerializedGoawayFrame(output_buf);
1033
+ http2::Http2ErrorCode apply_status =
1034
+ settings_->MaybeReportAndApplyBufferedPeerSettings(event_engine_.get());
1035
+ if (!goaway_manager_.IsImmediateGoAway() &&
1036
+ apply_status == http2::Http2ErrorCode::kNoError) {
1037
+ EnforceLatestIncomingSettings();
1038
+ settings_->MaybeGetSettingsAndSettingsAckFrames(flow_control_, output_buf);
1039
+ ping_manager_->MaybeGetSerializedPingFrames(output_buf,
1040
+ NextAllowedPingInterval());
1041
+ MaybeGetWindowUpdateFrames(output_buf);
1042
+ }
692
1043
  const uint64_t buffer_length = output_buf.Length();
693
- return If(
694
- buffer_length > 0,
695
- [self = RefAsSubclass<Http2ClientTransport>(),
696
- output_buf = std::move(output_buf), buffer_length]() mutable {
697
- GRPC_HTTP2_CLIENT_DLOG
698
- << "Http2ClientTransport WriteControlFrames Writing buffer of size "
699
- << buffer_length << " to endpoint";
700
- return self->endpoint_.Write(std::move(output_buf),
701
- PromiseEndpoint::WriteArgs{});
702
- },
703
- [self = RefAsSubclass<Http2ClientTransport>(), buffer_length] {
704
- self->ztrace_collector_->Append(
705
- PromiseEndpointWriteTrace{buffer_length});
706
- return absl::OkStatus();
707
- });
1044
+ ztrace_collector_->Append(PromiseEndpointWriteTrace{buffer_length});
1045
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport WriteControlFrames Size : "
1046
+ << buffer_length;
1047
+ return AssertResultType<absl::Status>(
1048
+ If((buffer_length > 0 && apply_status == http2::Http2ErrorCode::kNoError),
1049
+ [self = RefAsSubclass<Http2ClientTransport>(),
1050
+ output_buf = std::move(output_buf)]() mutable {
1051
+ return self->endpoint_.Write(std::move(output_buf),
1052
+ GetWriteArgs(self->settings_->peer()));
1053
+ },
1054
+ [self = RefAsSubclass<Http2ClientTransport>(), apply_status]() {
1055
+ if (apply_status != http2::Http2ErrorCode::kNoError) {
1056
+ return self->HandleError(
1057
+ std::nullopt,
1058
+ Http2Status::Http2ConnectionError(
1059
+ apply_status, "Failed to apply incoming settings"));
1060
+ }
1061
+ return absl::OkStatus();
1062
+ }));
708
1063
  }
709
1064
 
710
1065
  void Http2ClientTransport::NotifyControlFramesWriteDone() {
711
1066
  // Notify Control modules that we have sent the frames.
712
1067
  // All notifications are expected to be synchronous.
713
1068
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport NotifyControlFramesWriteDone";
714
- ping_manager_.NotifyPingSent(ping_timeout_);
1069
+ reader_state_.ResumeReadLoopIfPaused();
1070
+ ping_manager_->NotifyPingSent();
1071
+ goaway_manager_.NotifyGoawaySent();
1072
+ MaybeSpawnWaitForSettingsTimeout();
715
1073
  }
716
1074
 
717
1075
  auto Http2ClientTransport::SerializeAndWrite(std::vector<Http2Frame>&& frames) {
@@ -728,7 +1086,7 @@ auto Http2ClientTransport::SerializeAndWrite(std::vector<Http2Frame>&& frames) {
728
1086
  [self = RefAsSubclass<Http2ClientTransport>(),
729
1087
  output_buf = std::move(output_buf)]() mutable {
730
1088
  return self->endpoint_.Write(std::move(output_buf),
731
- PromiseEndpoint::WriteArgs{});
1089
+ GetWriteArgs(self->settings_->peer()));
732
1090
  },
733
1091
  []() { return absl::OkStatus(); }));
734
1092
  }
@@ -740,45 +1098,71 @@ Http2ClientTransport::DequeueStreamFrames(RefCountedPtr<Stream> stream) {
740
1098
  // data frames when write_bytes_remaining_ is very low. As the
741
1099
  // available transport tokens can only range from 0 to 2^31 - 1,
742
1100
  // we are clamping the write_bytes_remaining_ to that range.
743
- // TODO(akshitpatel) : [PH2][P3] : Plug transport_tokens when
744
- // transport flow control is implemented.
1101
+ const uint32_t tokens =
1102
+ GetMaxPermittedDequeue(flow_control_, stream->flow_control,
1103
+ write_bytes_remaining_, settings_->peer());
1104
+ const uint32_t stream_flow_control_tokens = static_cast<uint32_t>(
1105
+ GetStreamFlowControlTokens(stream->flow_control, settings_->peer()));
1106
+ stream->flow_control.ReportIfStalled(
1107
+ /*is_client=*/true, stream->GetStreamId(), settings_->peer());
745
1108
  StreamDataQueue<ClientMetadataHandle>::DequeueResult result =
746
- stream->DequeueFrames(
747
- /*transport_tokens*/ std::min(
748
- std::numeric_limits<uint32_t>::max(),
749
- static_cast<uint32_t>(Clamp<size_t>(write_bytes_remaining_, 0,
750
- RFC9113::kMaxSize31Bit - 1))),
751
- settings_.peer().max_frame_size(), encoder_);
1109
+ stream->DequeueFrames(tokens, stream_flow_control_tokens,
1110
+ settings_->peer().max_frame_size(), encoder_);
1111
+ ProcessOutgoingDataFrameFlowControl(stream->flow_control,
1112
+ result.flow_control_tokens_consumed);
752
1113
  if (result.is_writable) {
753
1114
  // Stream is still writable. Enqueue it back to the writable
754
1115
  // stream list.
755
- // TODO(akshitpatel) : [PH2][P3] : Plug transport_tokens when
756
- // transport flow control is implemented.
757
- absl::Status status =
758
- writable_stream_list_.Enqueue(stream, result.priority);
1116
+ absl::Status status = writable_stream_list_.EnqueueWrapper(
1117
+ stream, result.priority, AreTransportFlowControlTokensAvailable());
1118
+
759
1119
  if (GPR_UNLIKELY(!status.ok())) {
760
1120
  GRPC_HTTP2_CLIENT_DLOG
761
- << "Http2ClientTransport MultiplexerLoop Failed to "
1121
+ << "Http2ClientTransport DequeueStreamFrames Failed to "
762
1122
  "enqueue stream "
763
1123
  << stream->GetStreamId() << " with status: " << status;
764
1124
  // Close transport if we fail to enqueue stream.
765
- return HandleError(std::nullopt, Http2Status::AbslConnectionError(
766
- absl::StatusCode::kUnavailable,
767
- std::string(status.message())));
1125
+ return HandleError(std::nullopt, ToHttpOkOrConnError(status));
768
1126
  }
769
1127
  }
1128
+
1129
+ // If the stream is aborted before initial metadata is dequeued, we will
1130
+ // not dequeue any frames from the stream data queue (including RST_STREAM).
1131
+ // Because of this, we will add the stream to the stream_list only when
1132
+ // we are guaranteed to send initial metadata on the wire. If the above
1133
+ // mentioned scenario occurs, the stream ref will be dropped by the
1134
+ // multiplexer loop as the stream will never be writable again. Additionally,
1135
+ // the other two stream refs, CallHandler OnDone and OutboundLoop will be
1136
+ // dropped by Callv3 triggering cleaning up the stream object.
770
1137
  if (result.InitialMetadataDequeued()) {
1138
+ GRPC_HTTP2_CLIENT_DLOG
1139
+ << "Http2ClientTransport DequeueStreamFrames InitialMetadataDequeued "
1140
+ "stream_id = "
1141
+ << stream->GetStreamId();
771
1142
  stream->SentInitialMetadata();
1143
+ // After this point, initial metadata is guaranteed to be sent out.
1144
+ AddToStreamList(stream);
772
1145
  }
1146
+
773
1147
  if (result.HalfCloseDequeued()) {
774
- CloseStream(stream, CloseStreamArgs{/*close_reads=*/false,
775
- /*close_writes=*/true});
1148
+ GRPC_HTTP2_CLIENT_DLOG
1149
+ << "Http2ClientTransport DequeueStreamFrames HalfCloseDequeued "
1150
+ "stream_id = "
1151
+ << stream->GetStreamId();
776
1152
  stream->MarkHalfClosedLocal();
1153
+ CloseStream(
1154
+ stream,
1155
+ CloseStreamArgs{/*close_reads=*/stream->did_receive_trailing_metadata,
1156
+ /*close_writes=*/true});
777
1157
  }
778
1158
  if (result.ResetStreamDequeued()) {
1159
+ GRPC_HTTP2_CLIENT_DLOG
1160
+ << "Http2ClientTransport DequeueStreamFrames ResetStreamDequeued "
1161
+ "stream_id = "
1162
+ << stream->GetStreamId();
1163
+ stream->MarkHalfClosedLocal();
779
1164
  CloseStream(stream, CloseStreamArgs{/*close_reads=*/true,
780
1165
  /*close_writes=*/true});
781
- stream->MarkHalfClosedLocal();
782
1166
  }
783
1167
 
784
1168
  // Update the write_bytes_remaining_ based on the bytes consumed
@@ -787,7 +1171,7 @@ Http2ClientTransport::DequeueStreamFrames(RefCountedPtr<Stream> stream) {
787
1171
  (write_bytes_remaining_ >= result.total_bytes_consumed)
788
1172
  ? (write_bytes_remaining_ - result.total_bytes_consumed)
789
1173
  : 0;
790
- GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport MultiplexerLoop "
1174
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport DequeueStreamFrames "
791
1175
  "write_bytes_remaining_ after dequeue = "
792
1176
  << write_bytes_remaining_ << " total_bytes_consumed = "
793
1177
  << result.total_bytes_consumed
@@ -799,6 +1183,8 @@ Http2ClientTransport::DequeueStreamFrames(RefCountedPtr<Stream> stream) {
799
1183
  return std::move(result.frames);
800
1184
  }
801
1185
 
1186
+ // This MultiplexerLoop promise is responsible for Multiplexing multiple gRPC
1187
+ // Requests (HTTP2 Streams) and writing them into one common endpoint.
802
1188
  auto Http2ClientTransport::MultiplexerLoop() {
803
1189
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport MultiplexerLoop Factory";
804
1190
  return AssertResultType<
@@ -814,17 +1200,19 @@ auto Http2ClientTransport::MultiplexerLoop() {
814
1200
  // WriteControlFrames() to indicate if we should do a separate write
815
1201
  // for the queued control frames or send the queued frames with the
816
1202
  // data frames(if any).
817
- return Map(self->WriteControlFrames(), [self](absl::Status status) {
818
- if (GPR_UNLIKELY(!status.ok())) {
819
- GRPC_HTTP2_CLIENT_DLOG
820
- << "Http2ClientTransport MultiplexerLoop Failed to "
821
- "write control frames with status: "
822
- << status;
823
- return status;
824
- }
825
- self->NotifyControlFramesWriteDone();
826
- return absl::OkStatus();
827
- });
1203
+ return Map(
1204
+ self->ProcessAndWriteControlFrames(),
1205
+ [self](absl::Status status) {
1206
+ if (GPR_UNLIKELY(!status.ok())) {
1207
+ GRPC_HTTP2_CLIENT_DLOG
1208
+ << "Http2ClientTransport MultiplexerLoop Failed to "
1209
+ "write control frames with status: "
1210
+ << status;
1211
+ return status;
1212
+ }
1213
+ self->NotifyControlFramesWriteDone();
1214
+ return absl::OkStatus();
1215
+ });
828
1216
  },
829
1217
  [self]() -> absl::StatusOr<std::vector<Http2Frame>> {
830
1218
  std::vector<Http2Frame> frames;
@@ -833,8 +1221,6 @@ auto Http2ClientTransport::MultiplexerLoop() {
833
1221
  // some cases, we may write more than max_write_size_ bytes(like
834
1222
  // writing metadata).
835
1223
  while (self->write_bytes_remaining_ > 0) {
836
- // TODO(akshitpatel) : [PH2][P3] : Plug transport_tokens when
837
- // transport flow control is implemented.
838
1224
  std::optional<RefCountedPtr<Stream>> optional_stream =
839
1225
  self->writable_stream_list_.ImmediateNext(
840
1226
  self->AreTransportFlowControlTokensAvailable());
@@ -852,8 +1238,28 @@ auto Http2ClientTransport::MultiplexerLoop() {
852
1238
  << stream->GetStreamId()
853
1239
  << " is_closed_for_writes = " << stream->IsClosedForWrites();
854
1240
 
1241
+ if (stream->GetStreamId() == kInvalidStreamId) {
1242
+ GRPC_DCHECK(stream->IsStreamIdle());
1243
+ // TODO(akshitpatel) : [PH2][P5] : We will waste a stream id in
1244
+ // the rare scenario where the stream is aborted before it can be
1245
+ // written to. This is a possible area to optimize in future.
1246
+ absl::Status status = self->InitializeStream(stream);
1247
+ if (!status.ok()) {
1248
+ GRPC_HTTP2_CLIENT_DLOG
1249
+ << "Http2ClientTransport MultiplexerLoop "
1250
+ "Failed to assign stream id and add to stream list for "
1251
+ "stream: "
1252
+ << stream.get() << " closing this stream.";
1253
+ self->BeginCloseStream(
1254
+ stream, /*reset_stream_error_code=*/std::nullopt,
1255
+ CancelledServerMetadataFromStatus(status));
1256
+ continue;
1257
+ }
1258
+ }
1259
+
855
1260
  if (GPR_LIKELY(!stream->IsClosedForWrites())) {
856
- auto stream_frames = self->DequeueStreamFrames(stream);
1261
+ absl::StatusOr<std::vector<Http2Frame>> stream_frames =
1262
+ self->DequeueStreamFrames(stream);
857
1263
  if (GPR_UNLIKELY(!stream_frames.ok())) {
858
1264
  GRPC_HTTP2_CLIENT_DLOG
859
1265
  << "Http2ClientTransport MultiplexerLoop "
@@ -884,7 +1290,7 @@ auto Http2ClientTransport::MultiplexerLoop() {
884
1290
  if (self->should_reset_ping_clock_) {
885
1291
  GRPC_HTTP2_CLIENT_DLOG
886
1292
  << "Http2ClientTransport MultiplexerLoop ResetPingClock";
887
- self->ping_manager_.ResetPingClock(/*is_client=*/true);
1293
+ self->ping_manager_->ResetPingClock(/*is_client=*/true);
888
1294
  self->should_reset_ping_clock_ = false;
889
1295
  }
890
1296
  return Continue();
@@ -892,18 +1298,102 @@ auto Http2ClientTransport::MultiplexerLoop() {
892
1298
  }));
893
1299
  }
894
1300
 
895
- auto Http2ClientTransport::OnMultiplexerLoopEnded() {
896
- GRPC_HTTP2_CLIENT_DLOG
897
- << "Http2ClientTransport OnMultiplexerLoopEnded Factory";
898
- return
899
- [self = RefAsSubclass<Http2ClientTransport>()](absl::Status status) {
900
- GRPC_HTTP2_CLIENT_DLOG
901
- << "Http2ClientTransport OnMultiplexerLoopEnded Promise Status="
902
- << status;
903
- GRPC_UNUSED absl::Status error = self->HandleError(
904
- std::nullopt, Http2Status::AbslConnectionError(
905
- status.code(), std::string(status.message())));
906
- };
1301
+ absl::Status Http2ClientTransport::InitializeStream(
1302
+ RefCountedPtr<Stream> stream) {
1303
+ absl::StatusOr<uint32_t> next_stream_id = NextStreamId();
1304
+ if (!next_stream_id.ok()) {
1305
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport InitializeStream "
1306
+ "Failed to get next stream id for stream: "
1307
+ << stream.get();
1308
+ return std::move(next_stream_id).status();
1309
+ }
1310
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport InitializeStream "
1311
+ "Assigned stream id: "
1312
+ << next_stream_id.value()
1313
+ << " to stream: " << stream.get()
1314
+ << ", allow_true_binary_metadata:"
1315
+ << settings_->peer().allow_true_binary_metadata();
1316
+ stream->InitializeStream(next_stream_id.value(),
1317
+ settings_->peer().allow_true_binary_metadata(),
1318
+ settings_->acked().allow_true_binary_metadata());
1319
+ return absl::OkStatus();
1320
+ }
1321
+
1322
+ void Http2ClientTransport::AddToStreamList(RefCountedPtr<Stream> stream) {
1323
+ bool should_wake_periodic_updates = false;
1324
+ {
1325
+ MutexLock lock(&transport_mutex_);
1326
+ GRPC_DCHECK(stream != nullptr) << "stream is null";
1327
+ GRPC_DCHECK_GT(stream->GetStreamId(), 0u) << "stream id is invalid";
1328
+ GRPC_HTTP2_CLIENT_DLOG
1329
+ << "Http2ClientTransport AddToStreamList for stream id: "
1330
+ << stream->GetStreamId();
1331
+ stream_list_.emplace(stream->GetStreamId(), stream);
1332
+ // TODO(tjagtap) [PH2][P2][BDP] Remove this when the BDP code is done.
1333
+ if (GetActiveStreamCountLocked() == 1) {
1334
+ should_wake_periodic_updates = true;
1335
+ }
1336
+ }
1337
+ // TODO(tjagtap) [PH2][P2][BDP] Remove this when the BDP code is done.
1338
+ if (should_wake_periodic_updates) {
1339
+ // Release the lock before you wake up another promise on the party.
1340
+ WakeupPeriodicUpdatePromise();
1341
+ }
1342
+ }
1343
+
1344
+ ///////////////////////////////////////////////////////////////////////////////
1345
+ // Settings and Window Update Management
1346
+
1347
+ void Http2ClientTransport::EnforceLatestIncomingSettings() {
1348
+ encoder_.SetMaxTableSize(settings_->peer().header_table_size());
1349
+ }
1350
+
1351
+ auto Http2ClientTransport::WaitForSettingsTimeoutOnDone() {
1352
+ return [self = RefAsSubclass<Http2ClientTransport>()](absl::Status status) {
1353
+ if (!status.ok()) {
1354
+ GRPC_UNUSED absl::Status result = self->HandleError(
1355
+ std::nullopt, Http2Status::Http2ConnectionError(
1356
+ Http2ErrorCode::kProtocolError,
1357
+ std::string(RFC9113::kSettingsTimeout)));
1358
+ }
1359
+ };
1360
+ }
1361
+
1362
+ void Http2ClientTransport::MaybeSpawnWaitForSettingsTimeout() {
1363
+ if (settings_->ShouldSpawnWaitForSettingsTimeout()) {
1364
+ GRPC_HTTP2_CLIENT_DLOG
1365
+ << "Http2ClientTransport::MaybeSpawnWaitForSettingsTimeout Spawning";
1366
+ general_party_->Spawn("WaitForSettingsTimeout",
1367
+ settings_->WaitForSettingsTimeout(),
1368
+ WaitForSettingsTimeoutOnDone());
1369
+ }
1370
+ }
1371
+
1372
+ void Http2ClientTransport::MaybeGetWindowUpdateFrames(SliceBuffer& output_buf) {
1373
+ std::vector<Http2Frame> frames;
1374
+ frames.reserve(window_update_list_.size() + 1);
1375
+ uint32_t window_size =
1376
+ flow_control_.DesiredAnnounceSize(/*writing_anyway=*/true);
1377
+ if (window_size > 0) {
1378
+ GRPC_HTTP2_CLIENT_DLOG
1379
+ << "Http2ClientTransport::MaybeGetWindowUpdateFrames Transport Window "
1380
+ "Update : "
1381
+ << window_size;
1382
+ frames.emplace_back(Http2WindowUpdateFrame{/*stream_id=*/0, window_size});
1383
+ flow_control_.SentUpdate(window_size);
1384
+ }
1385
+ for (const uint32_t stream_id : window_update_list_) {
1386
+ RefCountedPtr<Stream> stream = LookupStream(stream_id);
1387
+ MaybeAddStreamWindowUpdateFrame(stream, frames);
1388
+ }
1389
+ window_update_list_.clear();
1390
+ if (!frames.empty()) {
1391
+ GRPC_HTTP2_CLIENT_DLOG
1392
+ << "Http2ClientTransport::MaybeGetWindowUpdateFrames Total Window "
1393
+ "Update Frames : "
1394
+ << frames.size();
1395
+ Serialize(absl::Span<Http2Frame>(frames), output_buf);
1396
+ }
907
1397
  }
908
1398
 
909
1399
  ///////////////////////////////////////////////////////////////////////////////
@@ -912,54 +1402,20 @@ auto Http2ClientTransport::OnMultiplexerLoopEnded() {
912
1402
  Http2ClientTransport::Http2ClientTransport(
913
1403
  PromiseEndpoint endpoint, GRPC_UNUSED const ChannelArgs& channel_args,
914
1404
  std::shared_ptr<EventEngine> event_engine,
915
- grpc_closure* on_receive_settings)
1405
+ absl::AnyInvocable<void(absl::StatusOr<uint32_t>)> on_receive_settings)
916
1406
  : channelz::DataSource(http2::CreateChannelzSocketNode(
917
1407
  endpoint.GetEventEngineEndpoint(), channel_args)),
1408
+ event_engine_(std::move(event_engine)),
918
1409
  endpoint_(std::move(endpoint)),
919
- stream_id_mutex_(/*Initial Stream Id*/ 1),
1410
+ settings_(MakeRefCounted<SettingsPromiseManager>(
1411
+ std::move(on_receive_settings))),
1412
+ next_stream_id_(/*Initial Stream ID*/ 1),
920
1413
  should_reset_ping_clock_(false),
921
- incoming_header_in_progress_(false),
922
- incoming_header_end_stream_(false),
923
1414
  is_first_write_(true),
924
- incoming_header_stream_id_(0),
925
- on_receive_settings_(on_receive_settings),
926
- max_header_list_size_soft_limit_(
927
- GetSoftLimitFromChannelArgs(channel_args)),
928
1415
  max_write_size_(kMaxWriteSize),
929
- keepalive_time_(std::max(
930
- Duration::Seconds(10),
931
- channel_args.GetDurationFromIntMillis(GRPC_ARG_KEEPALIVE_TIME_MS)
932
- .value_or(Duration::Infinity()))),
933
- // Keepalive timeout is only passed to the keepalive manager if it is less
934
- // than the ping timeout. As keepalives use pings for health checks, if
935
- // keepalive timeout is greater than ping timeout, we would always hit the
936
- // ping timeout first.
937
- keepalive_timeout_(std::max(
938
- Duration::Zero(),
939
- channel_args.GetDurationFromIntMillis(GRPC_ARG_KEEPALIVE_TIMEOUT_MS)
940
- .value_or(keepalive_time_ == Duration::Infinity()
941
- ? Duration::Infinity()
942
- : (Duration::Seconds(20))))),
943
- ping_timeout_(std::max(
944
- Duration::Zero(),
945
- channel_args.GetDurationFromIntMillis(GRPC_ARG_PING_TIMEOUT_MS)
946
- .value_or(keepalive_time_ == Duration::Infinity()
947
- ? Duration::Infinity()
948
- : Duration::Minutes(1)))),
949
- ping_manager_(channel_args, PingSystemInterfaceImpl::Make(this),
950
- event_engine),
951
- keepalive_manager_(
952
- KeepAliveInterfaceImpl::Make(this),
953
- ((keepalive_timeout_ < ping_timeout_) ? keepalive_timeout_
954
- : Duration::Infinity()),
955
- keepalive_time_),
956
- keepalive_permit_without_calls_(
957
- channel_args.GetBool(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)
958
- .value_or(false)),
959
- enable_preferred_rx_crypto_frame_advertisement_(
960
- channel_args
961
- .GetBool(GRPC_ARG_EXPERIMENTAL_HTTP2_PREFERRED_CRYPTO_FRAME_SIZE)
962
- .value_or(false)),
1416
+ ping_manager_(std::nullopt),
1417
+ keepalive_manager_(std::nullopt),
1418
+ goaway_manager_(GoawayInterfaceImpl::Make(this)),
963
1419
  memory_owner_(channel_args.GetObject<ResourceQuota>()
964
1420
  ->memory_quota()
965
1421
  ->CreateMemoryOwner()),
@@ -969,165 +1425,274 @@ Http2ClientTransport::Http2ClientTransport(
969
1425
  &memory_owner_),
970
1426
  ztrace_collector_(std::make_shared<PromiseHttp2ZTraceCollector>()) {
971
1427
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport Constructor Begin";
972
- SourceConstructed();
1428
+ // Initialize the general party and write party.
1429
+ RefCountedPtr<Arena> party_arena = SimpleArenaAllocator(0)->MakeArena();
1430
+ party_arena->SetContext<EventEngine>(event_engine_.get());
1431
+ general_party_ = Party::Make(std::move(party_arena));
973
1432
 
974
- InitLocalSettings(settings_.mutable_local(), /*is_client=*/true);
975
- ReadSettingsFromChannelArgs(channel_args, settings_.mutable_local(),
976
- flow_control_, /*is_client=*/true);
1433
+ InitLocalSettings(settings_->mutable_local(), /*is_client=*/true);
1434
+ TransportChannelArgs args;
1435
+ ReadChannelArgs(channel_args, args);
977
1436
 
978
- // Initialize the general party and write party.
979
- auto general_party_arena = SimpleArenaAllocator(0)->MakeArena();
980
- general_party_arena->SetContext<EventEngine>(event_engine.get());
981
- general_party_ = Party::Make(std::move(general_party_arena));
982
-
983
- general_party_->Spawn("ReadLoop", UntilTransportClosed(ReadLoop()),
984
- OnReadLoopEnded());
985
- general_party_->Spawn("MultiplexerLoop",
986
- UntilTransportClosed(MultiplexerLoop()),
987
- OnMultiplexerLoopEnded());
988
- // The keepalive loop is only spawned if the keepalive time is not infinity.
989
- keepalive_manager_.Spawn(general_party_.get());
990
-
991
- // TODO(tjagtap) : [PH2][P2] Delete this hack once flow control is done.
992
- // We are increasing the flow control window so that we can avoid sending
993
- // WINDOW_UPDATE frames while flow control is under development. Once it is
994
- // ready we should remove these lines.
995
- // <DeleteAfterFlowControl>
996
- Http2ErrorCode code = settings_.mutable_local().Apply(
997
- Http2Settings::kInitialWindowSizeWireId,
998
- (Http2Settings::max_initial_window_size() - 1));
999
- GRPC_DCHECK(code == Http2ErrorCode::kNoError);
1000
- // </DeleteAfterFlowControl>
1001
-
1002
- const int max_hpack_table_size =
1003
- channel_args.GetInt(GRPC_ARG_HTTP2_HPACK_TABLE_SIZE_ENCODER).value_or(-1);
1004
- if (max_hpack_table_size >= 0) {
1005
- encoder_.SetMaxUsableSize(max_hpack_table_size);
1006
- }
1437
+ ping_manager_.emplace(channel_args, args.ping_timeout,
1438
+ PingSystemInterfaceImpl::Make(this), event_engine_);
1007
1439
 
1008
- transport_settings_.SetSettingsTimeout(channel_args, keepalive_timeout_);
1440
+ // The keepalive loop is only spawned if the keepalive time is not infinity.
1441
+ keepalive_manager_.emplace(
1442
+ KeepAliveInterfaceImpl::Make(this),
1443
+ ((args.keepalive_timeout < args.ping_timeout) ? args.keepalive_timeout
1444
+ : Duration::Infinity()),
1445
+ args.keepalive_time, general_party_.get());
1009
1446
 
1010
- if (settings_.local().allow_security_frame()) {
1447
+ if (settings_->local().allow_security_frame()) {
1011
1448
  // TODO(tjagtap) : [PH2][P3] : Setup the plumbing to pass the security frame
1012
1449
  // to the endpoing via TransportFramingEndpointExtension.
1013
1450
  // Also decide if this plumbing is done here, or when the peer sends
1014
1451
  // allow_security_frame too.
1015
1452
  }
1016
1453
 
1017
- // Spawn a promise to flush the gRPC initial connection string and settings
1018
- // frames.
1019
- general_party_->Spawn("SpawnFlushInitialFrames", TriggerWriteCycle(),
1020
- [](GRPC_UNUSED absl::Status status) {});
1021
-
1454
+ GRPC_DCHECK(ping_manager_.has_value());
1455
+ GRPC_DCHECK(keepalive_manager_.has_value());
1456
+ SourceConstructed();
1022
1457
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport Constructor End";
1023
1458
  }
1024
1459
 
1460
+ void Http2ClientTransport::SpawnTransportLoops() {
1461
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::SpawnTransportLoops Begin";
1462
+ SpawnGuardedTransportParty(
1463
+ "FlowControlPeriodicUpdateLoop",
1464
+ UntilTransportClosed(FlowControlPeriodicUpdateLoop()));
1465
+
1466
+ SpawnGuardedTransportParty("FlushInitialFrames", TriggerWriteCycle());
1467
+ SpawnGuardedTransportParty("MultiplexerLoop",
1468
+ UntilTransportClosed(MultiplexerLoop()));
1469
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::SpawnTransportLoops End";
1470
+ }
1471
+
1472
+ void Http2ClientTransport::ReadChannelArgs(const ChannelArgs& channel_args,
1473
+ TransportChannelArgs& args) {
1474
+ http2::ReadChannelArgs(channel_args, args, settings_->mutable_local(),
1475
+ flow_control_,
1476
+ /*is_client=*/true);
1477
+
1478
+ // Assign the channel args to the member variables.
1479
+ keepalive_time_ = args.keepalive_time;
1480
+ incoming_headers_.set_soft_limit(args.max_header_list_size_soft_limit);
1481
+ keepalive_permit_without_calls_ = args.keepalive_permit_without_calls;
1482
+ enable_preferred_rx_crypto_frame_advertisement_ =
1483
+ args.enable_preferred_rx_crypto_frame_advertisement;
1484
+ test_only_ack_pings_ = args.test_only_ack_pings;
1485
+
1486
+ if (args.initial_sequence_number > 0) {
1487
+ next_stream_id_ = args.initial_sequence_number;
1488
+ }
1489
+
1490
+ settings_->SetSettingsTimeout(args.settings_timeout);
1491
+ if (args.max_usable_hpack_table_size >= 0) {
1492
+ encoder_.SetMaxUsableSize(args.max_usable_hpack_table_size);
1493
+ }
1494
+ }
1495
+
1025
1496
  // This function MUST be idempotent. This function MUST be called from the
1026
1497
  // transport party.
1027
1498
  void Http2ClientTransport::CloseStream(RefCountedPtr<Stream> stream,
1028
1499
  CloseStreamArgs args,
1029
1500
  DebugLocation whence) {
1030
- // TODO(akshitpatel) : [PH2][P3] : Measure the impact of holding mutex
1031
- // throughout this function.
1032
- MutexLock lock(&transport_mutex_);
1033
- GRPC_DCHECK(stream != nullptr) << "stream is null";
1034
- GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::CloseStream for stream id: "
1035
- << stream->GetStreamId()
1036
- << " location=" << whence.file() << ":"
1037
- << whence.line();
1501
+ std::optional<Http2Status> close_transport_error;
1038
1502
 
1039
- if (args.close_writes) {
1040
- stream->SetWriteClosed();
1041
- }
1042
-
1043
- if (args.close_reads) {
1503
+ {
1504
+ // TODO(akshitpatel) : [PH2][P3] : Measure the impact of holding mutex
1505
+ // throughout this function.
1506
+ MutexLock lock(&transport_mutex_);
1507
+ GRPC_DCHECK(stream != nullptr) << "stream is null";
1044
1508
  GRPC_HTTP2_CLIENT_DLOG
1045
1509
  << "Http2ClientTransport::CloseStream for stream id: "
1046
- << stream->GetStreamId() << " closing stream for reads.";
1047
- stream_list_.erase(stream->GetStreamId());
1510
+ << stream->GetStreamId() << " close_reads=" << args.close_reads
1511
+ << " close_writes=" << args.close_writes
1512
+ << " incoming_headers_=" << incoming_headers_.DebugString()
1513
+ << " location=" << whence.file() << ":" << whence.line();
1514
+
1515
+ if (args.close_writes) {
1516
+ stream->SetWriteClosed();
1517
+ }
1518
+
1519
+ if (args.close_reads) {
1520
+ GRPC_HTTP2_CLIENT_DLOG
1521
+ << "Http2ClientTransport::CloseStream for stream id: "
1522
+ << stream->GetStreamId() << " closing stream for reads.";
1523
+ // If the stream is closed while reading HEADER/CONTINUATION frames, we
1524
+ // should still parse the enqueued buffer to maintain HPACK state between
1525
+ // peers.
1526
+ if (incoming_headers_.IsWaitingForContinuationFrame()) {
1527
+ Http2Status result = http2::ParseAndDiscardHeaders(
1528
+ parser_, SliceBuffer(),
1529
+ HeaderAssembler::ParseHeaderArgs{
1530
+ /*is_initial_metadata=*/!incoming_headers_.HeaderHasEndStream(),
1531
+ /*is_end_headers=*/false,
1532
+ /*is_client=*/true,
1533
+ /*max_header_list_size_soft_limit=*/
1534
+ incoming_headers_.soft_limit(),
1535
+ /*max_header_list_size_hard_limit=*/
1536
+ settings_->acked().max_header_list_size(),
1537
+ /*stream_id=*/incoming_headers_.GetStreamId(),
1538
+ },
1539
+ stream, /*original_status=*/Http2Status::Ok());
1540
+ if (result.GetType() == Http2Status::Http2ErrorType::kConnectionError) {
1541
+ GRPC_HTTP2_CLIENT_DLOG
1542
+ << "Http2ClientTransport::CloseStream for stream id: "
1543
+ << stream->GetStreamId()
1544
+ << " failed to partially process header: "
1545
+ << result.DebugString();
1546
+ close_transport_error.emplace(std::move(result));
1547
+ }
1548
+ }
1549
+
1550
+ stream_list_.erase(stream->GetStreamId());
1551
+ if (!close_transport_error.has_value() && CanCloseTransportLocked()) {
1552
+ // TODO(akshitpatel) : [PH2][P3] : Is kInternalError the right error
1553
+ // code to use here? IMO it should be kNoError.
1554
+ close_transport_error.emplace(Http2Status::Http2ConnectionError(
1555
+ Http2ErrorCode::kInternalError,
1556
+ std::string(RFC9113::kLastStreamClosed)));
1557
+ }
1558
+ }
1559
+ }
1560
+
1561
+ if (close_transport_error.has_value()) {
1562
+ GRPC_UNUSED absl::Status status = HandleError(
1563
+ /*stream_id=*/std::nullopt, std::move(*close_transport_error));
1048
1564
  }
1049
1565
  }
1050
1566
 
1051
- // Here is the flow for stream close:
1052
- // 1. BeginCloseStream is invoked if the transport needs to close the stream.
1053
- // 2. If reset stream does not need to be sent, the stream is closed for reads
1054
- // and writes immediately. Also, the stream is removed from the
1055
- // stream_list_.
1056
- // 3. If reset stream needs to be sent and the stream is cancelled, the stream
1057
- // is closed for reads immediately. This will result in stream being removed
1058
- // from the stream_list_. Additionally, the reset stream frame is enqueued
1059
- // and the stream is closed for writes once the frame is created.
1060
- // 4. Trailing metadata is pushed to the call stack.
1061
- // Extended:
1062
- // 5. Eventually CallHandler.OnDone() is invoked.
1063
- // 6. If the call was cancelled, we try to enqueue a reset stream frame. In most
1064
- // of the cases, this would be a no-op. The only case where this would
1065
- // enqueue the reset stream frame is an application initiated abort.
1066
- // 7. If the call was not cancelled, we try to enqueue a half close frame. If
1067
- // the stream was already closed from writes, this would be a no-op.
1567
+ // This function is idempotent and MUST be called from the transport party.
1568
+ // All the scenarios that can lead to this function being called are:
1569
+ // 1. Reading a RST stream frame: In this case, the stream is immediately
1570
+ // closed for reads and writes and removed from the stream_list_.
1571
+ // 2. Reading a Trailing Metadata frame: There are two possible scenarios:
1572
+ // a. The stream is closed for writes: Close the stream for reads and writes
1573
+ // and remove the stream from the stream_list_.
1574
+ // b. The stream is NOT closed for writes: Stream is kept open for reads and
1575
+ // writes. CallHandler OnDone will trigger sending a half close frame. If
1576
+ // before the multiplexer loop triggers sending a half close a RST stream
1577
+ // is read, the stream is closed for reads and writes immediately and the
1578
+ // half close is discarded. If no RST stream is read, the stream is closed
1579
+ // for reads and writes upon sending the half close frame from the
1580
+ // multiplexer loop.
1581
+ // 3. Hitting error condition in the transport: In this case, RST stream is
1582
+ // enqueued and the stream is closed for reads immediately. This implies we
1583
+ // reduce the number of active streams inline. When multiplexer loop
1584
+ // processes the RST stream frame, the stream ref will dropped. The other
1585
+ // stream ref will be dropped when CallHandler's OnDone is executed causing
1586
+ // the stream to be destroyed. CallHandlers OnDone also tries to enqueue a
1587
+ // RST stream frame. This is a no-op at this point.
1588
+ // 4. Application abort: In this case, CallHandler OnDone will enqueue RST
1589
+ // stream frame to the stream data queue. The multiplexer loop will send the
1590
+ // reset stream frame and close the stream from reads and writes.
1591
+ // 5. Transport close: This takes up the same path as case 3.
1592
+ // In all the above cases, trailing metadata is pushed to the call spine.
1593
+ // Note: The stream ref is held in atmost 3 places:
1594
+ // 1. stream_list_ : This is released when the stream is closed for reads.
1595
+ // 2. CallHandler OnDone : This is released when Trailing Metadata is pushed to
1596
+ // the call spine.
1597
+ // 3. List of writable streams : This is released after the final frame is
1598
+ // dequeued from the StreamDataQueue.
1068
1599
  void Http2ClientTransport::BeginCloseStream(
1069
- const uint32_t stream_id, std::optional<uint32_t> reset_stream_error_code,
1600
+ RefCountedPtr<Stream> stream,
1601
+ std::optional<uint32_t> reset_stream_error_code,
1070
1602
  ServerMetadataHandle&& metadata, DebugLocation whence) {
1603
+ if (stream == nullptr) {
1604
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::BeginCloseStream stream "
1605
+ "is null reset_stream_error_code="
1606
+ << (reset_stream_error_code.has_value()
1607
+ ? absl::StrCat(*reset_stream_error_code)
1608
+ : "nullopt")
1609
+ << " metadata=" << metadata->DebugString();
1610
+ return;
1611
+ }
1612
+
1071
1613
  GRPC_HTTP2_CLIENT_DLOG
1072
- << "Http2ClientTransport::BeginCloseStream for stream id: " << stream_id
1073
- << " error_code="
1614
+ << "Http2ClientTransport::BeginCloseStream for stream id: "
1615
+ << stream->GetStreamId() << " error_code="
1074
1616
  << (reset_stream_error_code.has_value()
1075
1617
  ? absl::StrCat(*reset_stream_error_code)
1076
1618
  : "nullopt")
1077
1619
  << " ServerMetadata=" << metadata->DebugString()
1078
1620
  << " location=" << whence.file() << ":" << whence.line();
1079
1621
 
1080
- RefCountedPtr<Stream> stream = LookupStream(stream_id);
1081
- if (stream != nullptr) {
1082
- if (stream->did_push_trailing_metadata) {
1083
- return;
1084
- }
1085
-
1086
- // If reset stream needs to be sent, CloseStream will be called from the
1087
- // Multiplexer after the reset stream frame is created.
1622
+ bool close_reads = false;
1623
+ bool close_writes = false;
1624
+ if (metadata->get(GrpcCallWasCancelled())) {
1088
1625
  if (!reset_stream_error_code) {
1089
1626
  // Callers taking this path:
1090
1627
  // 1. Reading a RST stream frame (will not send any frame out).
1091
-
1092
- CloseStream(stream,
1093
- CloseStreamArgs{/*close_reads*/ true, /*close_writes=*/true},
1094
- whence);
1095
- stream->MarkHalfClosedRemote();
1628
+ // 2. Closing a stream before initial metadata is sent.
1629
+ close_reads = true;
1630
+ close_writes = true;
1631
+ GRPC_HTTP2_CLIENT_DLOG
1632
+ << "Http2ClientTransport::BeginCloseStream for stream id: "
1633
+ << stream->GetStreamId() << " close_reads= " << close_reads
1634
+ << " close_writes= " << close_writes;
1096
1635
  } else {
1097
1636
  // Callers taking this path:
1098
- // 1. Reading Trailing Metadata (MAY send half close from OnDone).
1099
- // 2. Processing Error in transport (will send reset stream from here).
1100
-
1101
- if (metadata->get(GrpcCallWasCancelled())) {
1102
- CloseStream(
1103
- stream,
1104
- CloseStreamArgs{/*close_reads*/ true, /*close_writes=*/false},
1105
- whence);
1106
- absl::StatusOr<EnqueueResult> enqueue_result =
1107
- stream->EnqueueResetStream(reset_stream_error_code.value());
1108
- GRPC_HTTP2_CLIENT_DLOG << "Enqueued ResetStream with error code="
1109
- << reset_stream_error_code.value()
1110
- << " status=" << enqueue_result.status();
1111
- if (enqueue_result.ok()) {
1112
- GRPC_UNUSED absl::Status status = MaybeAddStreamToWritableStreamList(
1113
- stream, enqueue_result.value());
1114
- }
1637
+ // 1. Processing Error in transport (will send reset stream from here).
1638
+ absl::StatusOr<StreamWritabilityUpdate> enqueue_result =
1639
+ stream->EnqueueResetStream(reset_stream_error_code.value());
1640
+ GRPC_HTTP2_CLIENT_DLOG << "Enqueued ResetStream with error code="
1641
+ << reset_stream_error_code.value()
1642
+ << " status=" << enqueue_result.status();
1643
+ if (enqueue_result.ok()) {
1644
+ GRPC_UNUSED absl::Status status =
1645
+ MaybeAddStreamToWritableStreamList(stream, enqueue_result.value());
1115
1646
  }
1647
+ close_reads = true;
1648
+ GRPC_HTTP2_CLIENT_DLOG
1649
+ << "Http2ClientTransport::BeginCloseStream for stream id: "
1650
+ << stream->GetStreamId() << " close_reads= " << close_reads
1651
+ << " close_writes= " << close_writes;
1116
1652
  }
1653
+ } else {
1654
+ // Callers taking this path:
1655
+ // 1. Reading Trailing Metadata (MAY send half close from OnDone).
1656
+ if (stream->IsClosedForWrites()) {
1657
+ close_reads = true;
1658
+ close_writes = true;
1659
+ GRPC_HTTP2_CLIENT_DLOG
1660
+ << "Http2ClientTransport::BeginCloseStream for stream id: "
1661
+ << stream->GetStreamId() << " close_reads= " << close_reads
1662
+ << " close_writes= " << close_writes;
1663
+ }
1664
+ }
1117
1665
 
1118
- stream->did_push_trailing_metadata = true;
1119
- // This maybe called multiple times while closing a stream. This should be
1120
- // fine as the the call spine ignores the subsequent calls.
1121
- stream->call.SpawnPushServerTrailingMetadata(std::move(metadata));
1666
+ if (close_reads || close_writes) {
1667
+ CloseStream(stream, CloseStreamArgs{close_reads, close_writes}, whence);
1122
1668
  }
1669
+
1670
+ // If the call was cancelled, the stream MUST be closed for reads.
1671
+ GRPC_DCHECK(metadata->get(GrpcCallWasCancelled()) ? close_reads : true);
1672
+
1673
+ // This maybe called multiple times while closing a stream. In CallV3, the
1674
+ // flow for pushing server trailing metadata is idempotent. However, there is
1675
+ // a subtle difference. When we push server trailing metadata with a cancelled
1676
+ // status PushServerTrailingMetadata is spawned inline on the Call party
1677
+ // whereas for the non-cancelled status, PushServerTrailingMetadata is
1678
+ // spawned in the server_to_client spawn serializer. Because of this, in
1679
+ // case when the server pushes trailing metadata (non-cancelled) followed by a
1680
+ // RST stream with cancelled status, it is possible that the cancelled
1681
+ // trailing metadata (for RST stream) is processed before. This would result
1682
+ // in losing the actual status/message pushed by the server.
1683
+ // To address this, we push the server trailing metadata to the stream only
1684
+ // if it is not pushed already.
1685
+ stream->MaybePushServerTrailingMetadata(std::move(metadata));
1123
1686
  }
1124
1687
 
1125
1688
  void Http2ClientTransport::CloseTransport() {
1126
1689
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::CloseTransport";
1127
1690
 
1128
1691
  transport_closed_latch_.Set();
1129
- // This is the only place where the general_party_ is
1130
- // reset.
1692
+ settings_->HandleTransportShutdown(event_engine_.get());
1693
+
1694
+ MutexLock lock(&transport_mutex_);
1695
+ // This is the only place where the general_party_ is reset.
1131
1696
  general_party_.reset();
1132
1697
  }
1133
1698
 
@@ -1153,16 +1718,15 @@ void Http2ClientTransport::MaybeSpawnCloseTransport(Http2Status http2_status,
1153
1718
  absl::flat_hash_map<uint32_t, RefCountedPtr<Stream>> stream_list =
1154
1719
  std::move(stream_list_);
1155
1720
  stream_list_.clear();
1156
- state_tracker_.SetState(GRPC_CHANNEL_SHUTDOWN,
1157
- http2_status.GetAbslConnectionError(),
1158
- "transport closed");
1721
+ ReportDisconnectionLocked(
1722
+ http2_status.GetAbslConnectionError(), {},
1723
+ absl::StrCat("Transport closed: ", http2_status.DebugString()).c_str());
1159
1724
  lock.Release();
1160
1725
 
1161
- general_party_->Spawn(
1162
- "CloseTransport",
1163
- [self = RefAsSubclass<Http2ClientTransport>(),
1164
- stream_list = std::move(stream_list),
1165
- http2_status = std::move(http2_status)]() mutable {
1726
+ SpawnInfallibleTransportParty(
1727
+ "CloseTransport", [self = RefAsSubclass<Http2ClientTransport>(),
1728
+ stream_list = std::move(stream_list),
1729
+ http2_status = std::move(http2_status)]() mutable {
1166
1730
  GRPC_HTTP2_CLIENT_DLOG
1167
1731
  << "Http2ClientTransport::CloseTransport Cleaning up call stacks";
1168
1732
  // Clean up the call stacks for all active streams.
@@ -1172,8 +1736,8 @@ void Http2ClientTransport::MaybeSpawnCloseTransport(Http2Status http2_status,
1172
1736
  // fail. Also, as this is running on the transport
1173
1737
  // party, there would not be concurrent access to the stream.
1174
1738
  auto& stream = pair.second;
1175
- self->BeginCloseStream(stream->stream_id,
1176
- Http2ErrorCodeToRstFrameErrorCode(
1739
+ self->BeginCloseStream(stream,
1740
+ Http2ErrorCodeToFrameErrorCode(
1177
1741
  http2_status.GetConnectionErrorCode()),
1178
1742
  CancelledServerMetadataFromStatus(
1179
1743
  http2_status.GetAbslConnectionError()));
@@ -1183,38 +1747,115 @@ void Http2ClientTransport::MaybeSpawnCloseTransport(Http2Status http2_status,
1183
1747
  // the connection; a receiver of a GOAWAY that has no more use for the
1184
1748
  // connection SHOULD still send a GOAWAY frame before terminating the
1185
1749
  // connection.
1186
- // TODO(akshitpatel) : [PH2][P2] : There would a timer for sending
1187
- // goaway here. Once goaway is sent or timer is expired, close the
1188
- // transport.
1189
- return Map(Immediate(absl::OkStatus()),
1190
- [self](GRPC_UNUSED absl::Status) mutable {
1191
- self->CloseTransport();
1192
- return Empty{};
1193
- });
1194
- },
1195
- [](Empty) {});
1750
+ return Map(
1751
+ // TODO(akshitpatel) : [PH2][P4] : This is creating a copy of
1752
+ // the debug data. Verify if this is causing a performance
1753
+ // issue.
1754
+ Race(AssertResultType<absl::Status>(
1755
+ self->goaway_manager_.RequestGoaway(
1756
+ http2_status.GetConnectionErrorCode(),
1757
+ /*debug_data=*/
1758
+ Slice::FromCopiedString(
1759
+ http2_status.GetAbslConnectionError().message()),
1760
+ kLastIncomingStreamIdClient, /*immediate=*/true)),
1761
+ // Failsafe to close the transport if goaway is not
1762
+ // sent within kGoawaySendTimeoutSeconds seconds.
1763
+ Sleep(Duration::Seconds(kGoawaySendTimeoutSeconds))),
1764
+ [self](auto) mutable {
1765
+ self->CloseTransport();
1766
+ return Empty{};
1767
+ });
1768
+ ;
1769
+ });
1770
+ }
1771
+
1772
+ bool Http2ClientTransport::CanCloseTransportLocked() const {
1773
+ // If there are no more streams and next stream id is greater than the
1774
+ // max allowed stream id, then no more streams can be created and it is
1775
+ // safe to close the transport.
1776
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::CanCloseTransportLocked "
1777
+ "GetActiveStreamCountLocked="
1778
+ << GetActiveStreamCountLocked()
1779
+ << " PeekNextStreamId=" << PeekNextStreamId()
1780
+ << " GetMaxAllowedStreamId="
1781
+ << GetMaxAllowedStreamId();
1782
+ return GetActiveStreamCountLocked() == 0 &&
1783
+ PeekNextStreamId() > GetMaxAllowedStreamId();
1196
1784
  }
1197
1785
 
1198
1786
  Http2ClientTransport::~Http2ClientTransport() {
1199
1787
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport Destructor Begin";
1200
1788
  GRPC_DCHECK(stream_list_.empty());
1789
+ GRPC_DCHECK(general_party_ == nullptr);
1201
1790
  memory_owner_.Reset();
1202
- SourceDestructing();
1203
1791
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport Destructor End";
1204
1792
  }
1205
1793
 
1794
+ void Http2ClientTransport::SpawnAddChannelzData(RefCountedPtr<Party> party,
1795
+ channelz::DataSink sink) {
1796
+ SpawnInfallible(
1797
+ std::move(party), "AddData",
1798
+ [self = RefAsSubclass<Http2ClientTransport>(),
1799
+ sink = std::move(sink)]() mutable {
1800
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::AddData Promise";
1801
+ sink.AddData(
1802
+ "Http2ClientTransport",
1803
+ channelz::PropertyList()
1804
+ .Set("keepalive_time", self->keepalive_time_)
1805
+ .Set("keepalive_permit_without_calls",
1806
+ self->keepalive_permit_without_calls_)
1807
+ .Set("settings", self->settings_->ChannelzProperties())
1808
+ .Set("flow_control",
1809
+ self->flow_control_.stats().ChannelzProperties()));
1810
+ self->general_party_->ExportToChannelz("Http2ClientTransport Party",
1811
+ sink);
1812
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::AddData End";
1813
+ return Empty{};
1814
+ });
1815
+ }
1816
+
1206
1817
  void Http2ClientTransport::AddData(channelz::DataSink sink) {
1207
- sink.AddData(
1208
- "Http2ClientTransport",
1209
- channelz::PropertyList()
1210
- .Set("settings", settings_.ChannelzProperties())
1211
- .Set("keepalive_time", keepalive_time_)
1212
- .Set("keepalive_timeout", keepalive_timeout_)
1213
- .Set("ping_timeout", ping_timeout_)
1214
- .Set("keepalive_permit_without_calls",
1215
- keepalive_permit_without_calls_)
1216
- .Set("flow_control", flow_control_.stats().ChannelzProperties()));
1217
- general_party_->ExportToChannelz("Http2ClientTransport Party", sink);
1818
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport::AddData Begin";
1819
+
1820
+ event_engine_->Run([self = RefAsSubclass<Http2ClientTransport>(),
1821
+ sink = std::move(sink)]() mutable {
1822
+ bool is_party_null = false;
1823
+ {
1824
+ // Apart from CloseTransport, this is the only place where a lock is taken
1825
+ // to access general_party_. All other access to general_party_ happens
1826
+ // on the general party itself and hence do not race with CloseTransport.
1827
+ // TODO(akshitpatel) : [PH2][P4] : Check if a new mutex is needed to
1828
+ // protect general_party_. Curently transport_mutex_ can is used in
1829
+ // these places:
1830
+ // 1. In promises running on the transport party
1831
+ // 2. In AddData promise
1832
+ // 3. In Orphan function.
1833
+ // 4. Stream creation (this will be removed soon).
1834
+ // Given that #1 is already serialized (guaranteed by party), #2 is on
1835
+ // demand and #3 happens once for the lifetime of the transport while
1836
+ // closing the transport, the contention should be minimal.
1837
+ MutexLock lock(&self->transport_mutex_);
1838
+ // TODO(akshitpatel) : [PH2][P2] : There is still a potential for a race
1839
+ // here where the general_party_ is reset between the lock being
1840
+ // released and the spawn. We cannot just do a spawn inside the mutex as
1841
+ // that may result in deadlock.
1842
+ // Potential fix to hold a ref to the party inside the mutex and do a
1843
+ // spawn outside the mutex. The only side effect is that this introduces
1844
+ // an additional ref to the party other the transport's copy.
1845
+ if (GPR_UNLIKELY(self->general_party_ == nullptr)) {
1846
+ is_party_null = true;
1847
+ GRPC_HTTP2_CLIENT_DLOG
1848
+ << "Http2ClientTransport::AddData general_party_ is "
1849
+ "null. Transport is closed.";
1850
+ }
1851
+ }
1852
+
1853
+ ExecCtx exec_ctx;
1854
+ if (!is_party_null) {
1855
+ self->SpawnAddChannelzData(self->general_party_, std::move(sink));
1856
+ }
1857
+ self.reset(); // Cleanup with exec_ctx in scope
1858
+ });
1218
1859
  }
1219
1860
 
1220
1861
  ///////////////////////////////////////////////////////////////////////////////
@@ -1240,7 +1881,7 @@ bool Http2ClientTransport::SetOnDone(CallHandler call_handler,
1240
1881
  GRPC_HTTP2_CLIENT_DLOG << "PH2: Client call " << self.get()
1241
1882
  << " id=" << stream_id
1242
1883
  << " done: cancelled=" << cancelled;
1243
- absl::StatusOr<EnqueueResult> enqueue_result;
1884
+ absl::StatusOr<StreamWritabilityUpdate> enqueue_result;
1244
1885
  GRPC_HTTP2_CLIENT_DLOG
1245
1886
  << "PH2: Client call " << self.get() << " id=" << stream_id
1246
1887
  << " done: stream=" << stream.get() << " cancelled=" << cancelled;
@@ -1262,6 +1903,12 @@ bool Http2ClientTransport::SetOnDone(CallHandler call_handler,
1262
1903
  }
1263
1904
 
1264
1905
  if (enqueue_result.ok()) {
1906
+ GRPC_HTTP2_CLIENT_DLOG
1907
+ << "Http2ClientTransport::SetOnDone "
1908
+ "MaybeAddStreamToWritableStreamList for stream= "
1909
+ << stream->GetStreamId() << " enqueue_result={became_writable="
1910
+ << enqueue_result.value().became_writable << ", priority="
1911
+ << static_cast<uint8_t>(enqueue_result.value().priority) << "}";
1265
1912
  GRPC_UNUSED absl::Status status =
1266
1913
  self->MaybeAddStreamToWritableStreamList(stream,
1267
1914
  enqueue_result.value());
@@ -1270,38 +1917,63 @@ bool Http2ClientTransport::SetOnDone(CallHandler call_handler,
1270
1917
  }
1271
1918
 
1272
1919
  std::optional<RefCountedPtr<Stream>> Http2ClientTransport::MakeStream(
1273
- CallHandler call_handler, const uint32_t stream_id) {
1920
+ CallHandler call_handler) {
1274
1921
  // https://datatracker.ietf.org/doc/html/rfc9113#name-stream-identifiers
1275
- // TODO(tjagtap) : [PH2][P2] Validate implementation.
1276
-
1277
- // TODO(akshitpatel) : [PH2][P1] : Probably do not need this lock. This
1278
- // function is always called under the stream_id_mutex_. The issue is the
1279
- // OnDone needs to be synchronous and hence InterActivityMutex might not be
1280
- // an option to protect the stream_list_.
1281
- MutexLock lock(&transport_mutex_);
1282
- RefCountedPtr<Stream> stream = MakeRefCounted<Stream>(
1283
- call_handler, stream_id, settings_.peer().allow_true_binary_metadata(),
1284
- settings_.acked().allow_true_binary_metadata(), flow_control_);
1922
+ RefCountedPtr<Stream> stream;
1923
+ {
1924
+ // TODO(akshitpatel) : [PH2][P3] : Remove this mutex once settings is in
1925
+ // place.
1926
+ MutexLock lock(&transport_mutex_);
1927
+ stream = MakeRefCounted<Stream>(call_handler, flow_control_);
1928
+ }
1285
1929
  const bool on_done_added = SetOnDone(call_handler, stream);
1286
1930
  if (!on_done_added) return std::nullopt;
1287
- stream_list_.emplace(stream_id, stream);
1288
1931
  return stream;
1289
1932
  }
1290
1933
 
1934
+ uint32_t Http2ClientTransport::GetMaxAllowedStreamId() const {
1935
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport GetMaxAllowedStreamId "
1936
+ << max_allowed_stream_id_;
1937
+ return max_allowed_stream_id_;
1938
+ }
1939
+
1940
+ void Http2ClientTransport::SetMaxAllowedStreamId(
1941
+ const uint32_t max_allowed_stream_id) {
1942
+ const uint32_t old_max_allowed_stream_id = GetMaxAllowedStreamId();
1943
+ GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport SetMaxAllowedStreamId "
1944
+ << " max_allowed_stream_id: " << max_allowed_stream_id
1945
+ << " old_allowed_max_stream_id: "
1946
+ << old_max_allowed_stream_id;
1947
+ // RFC9113 : Endpoints MUST NOT increase the value they send in the last
1948
+ // stream identifier, since the peers might already have retried unprocessed
1949
+ // requests on another connection.
1950
+ if (GPR_LIKELY(max_allowed_stream_id <= old_max_allowed_stream_id)) {
1951
+ max_allowed_stream_id_ = max_allowed_stream_id;
1952
+ } else {
1953
+ LOG_IF(ERROR, max_allowed_stream_id > old_max_allowed_stream_id)
1954
+ << "Endpoints MUST NOT increase the value they send in the last "
1955
+ "stream "
1956
+ "identifier";
1957
+ GRPC_DCHECK_LE(max_allowed_stream_id, old_max_allowed_stream_id)
1958
+ << "Endpoints MUST NOT increase the value they send in the last "
1959
+ "stream "
1960
+ "identifier";
1961
+ }
1962
+ }
1963
+
1291
1964
  ///////////////////////////////////////////////////////////////////////////////
1292
1965
  // Call Spine related operations
1293
1966
 
1294
- auto Http2ClientTransport::CallOutboundLoop(
1295
- CallHandler call_handler, RefCountedPtr<Stream> stream,
1296
- InterActivityMutex<uint32_t>::Lock lock /* Locked stream_id_mutex */,
1297
- ClientMetadataHandle metadata) {
1967
+ auto Http2ClientTransport::CallOutboundLoop(CallHandler call_handler,
1968
+ RefCountedPtr<Stream> stream,
1969
+ ClientMetadataHandle metadata) {
1298
1970
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport CallOutboundLoop";
1299
1971
  GRPC_DCHECK(stream != nullptr);
1300
1972
 
1301
1973
  auto send_message = [self = RefAsSubclass<Http2ClientTransport>(),
1302
1974
  stream](MessageHandle&& message) mutable {
1303
1975
  return TrySeq(stream->EnqueueMessage(std::move(message)),
1304
- [self, stream](const EnqueueResult result) mutable {
1976
+ [self, stream](const StreamWritabilityUpdate result) mutable {
1305
1977
  GRPC_HTTP2_CLIENT_DLOG
1306
1978
  << "Http2ClientTransport CallOutboundLoop "
1307
1979
  "Enqueued Message";
@@ -1317,7 +1989,7 @@ auto Http2ClientTransport::CallOutboundLoop(
1317
1989
  [stream, metadata = std::move(metadata)]() mutable {
1318
1990
  return stream->EnqueueInitialMetadata(std::move(metadata));
1319
1991
  },
1320
- [self, stream](const EnqueueResult result) mutable {
1992
+ [self, stream](const StreamWritabilityUpdate result) mutable {
1321
1993
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport CallOutboundLoop "
1322
1994
  "Enqueued Initial Metadata";
1323
1995
  return self->MaybeAddStreamToWritableStreamList(std::move(stream),
@@ -1328,7 +2000,7 @@ auto Http2ClientTransport::CallOutboundLoop(
1328
2000
  auto send_half_closed = [self = RefAsSubclass<Http2ClientTransport>(),
1329
2001
  stream]() mutable {
1330
2002
  return TrySeq([stream]() { return stream->EnqueueHalfClosed(); },
1331
- [self, stream](const EnqueueResult result) mutable {
2003
+ [self, stream](const StreamWritabilityUpdate result) mutable {
1332
2004
  GRPC_HTTP2_CLIENT_DLOG
1333
2005
  << "Http2ClientTransport CallOutboundLoop "
1334
2006
  "Enqueued Half Closed";
@@ -1340,7 +2012,7 @@ auto Http2ClientTransport::CallOutboundLoop(
1340
2012
  "Ph2CallOutboundLoop",
1341
2013
  TrySeq(
1342
2014
  send_initial_metadata(),
1343
- [call_handler, send_message, lock = std::move(lock)]() {
2015
+ [call_handler, send_message]() {
1344
2016
  // The lock will be released once the promise is constructed from
1345
2017
  // this factory. ForEach will be polled after the lock is
1346
2018
  // released.
@@ -1365,44 +2037,38 @@ void Http2ClientTransport::StartCall(CallHandler call_handler) {
1365
2037
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport StartCall Begin";
1366
2038
  call_handler.SpawnGuarded(
1367
2039
  "OutboundLoop",
1368
- TrySeq(
1369
- call_handler.PullClientInitialMetadata(),
1370
- [self = RefAsSubclass<Http2ClientTransport>()](
1371
- ClientMetadataHandle metadata) {
1372
- // Lock the stream_id_mutex_
1373
- return Staple(self->stream_id_mutex_.Acquire(),
1374
- std::move(metadata));
1375
- },
1376
- [self = RefAsSubclass<Http2ClientTransport>(),
1377
- call_handler](auto args /* Locked stream_id_mutex */) mutable {
1378
- // For a gRPC Client, we only need to check the
1379
- // MAX_CONCURRENT_STREAMS setting compliance at the time of
1380
- // sending (that is write path). A gRPC Client will never
1381
- // receive a stream initiated by a server, so we dont have to
1382
- // check MAX_CONCURRENT_STREAMS compliance on the Read-Path.
1383
- //
1384
- // TODO(tjagtap) : [PH2][P1] Check for MAX_CONCURRENT_STREAMS
1385
- // sent by peer before making a stream. Decide behaviour if we are
1386
- // crossing this threshold.
1387
- //
1388
- // TODO(tjagtap) : [PH2][P1] : For a server we will have to do
1389
- // this for incoming streams only. If a server receives more streams
1390
- // from a client than is allowed by the clients settings, whether or
1391
- // not we should fail is debatable.
1392
- const uint32_t stream_id = self->NextStreamId(std::get<0>(args));
1393
- std::optional<RefCountedPtr<Stream>> stream =
1394
- self->MakeStream(call_handler, stream_id);
1395
- return If(
1396
- stream.has_value(),
1397
- [self, call_handler, stream, args = std::move(args)]() mutable {
1398
- return Map(
1399
- self->CallOutboundLoop(call_handler, stream.value(),
1400
- std::move(std::get<0>(args)),
1401
- std::move(std::get<1>(args))),
1402
- [](absl::Status status) { return status; });
1403
- },
1404
- []() { return absl::InternalError("Failed to make stream"); });
1405
- }));
2040
+ TrySeq(call_handler.PullClientInitialMetadata(),
2041
+ [self = RefAsSubclass<Http2ClientTransport>(),
2042
+ call_handler](ClientMetadataHandle metadata) mutable {
2043
+ // For a gRPC Client, we only need to check the
2044
+ // MAX_CONCURRENT_STREAMS setting compliance at the time of
2045
+ // sending (that is write path). A gRPC Client will never
2046
+ // receive a stream initiated by a server, so we dont have to
2047
+ // check MAX_CONCURRENT_STREAMS compliance on the Read-Path.
2048
+ //
2049
+ // TODO(tjagtap) : [PH2][P1] Check for MAX_CONCURRENT_STREAMS
2050
+ // sent by peer before making a stream. Decide behaviour if we
2051
+ // are crossing this threshold.
2052
+ //
2053
+ // TODO(tjagtap) : [PH2][P1] : For a server we will have to do
2054
+ // this for incoming streams only. If a server receives more
2055
+ // streams from a client than is allowed by the clients settings,
2056
+ // whether or not we should fail is debatable.
2057
+ std::optional<RefCountedPtr<Stream>> stream =
2058
+ self->MakeStream(call_handler);
2059
+ return If(
2060
+ stream.has_value(),
2061
+ [self, call_handler, stream,
2062
+ initial_metadata = std::move(metadata)]() mutable {
2063
+ return Map(
2064
+ self->CallOutboundLoop(call_handler, stream.value(),
2065
+ std::move(initial_metadata)),
2066
+ [](absl::Status status) { return status; });
2067
+ },
2068
+ []() {
2069
+ return absl::InternalError("Failed to make stream");
2070
+ });
2071
+ }));
1406
2072
  GRPC_HTTP2_CLIENT_DLOG << "Http2ClientTransport StartCall End";
1407
2073
  }
1408
2074