grpc 1.25.0 → 1.26.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (278) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +782 -291
  3. data/include/grpc/impl/codegen/grpc_types.h +4 -0
  4. data/include/grpc/impl/codegen/port_platform.h +7 -0
  5. data/include/grpc/support/alloc.h +0 -16
  6. data/src/core/ext/filters/client_channel/backend_metric.cc +2 -2
  7. data/src/core/ext/filters/client_channel/backup_poller.cc +1 -1
  8. data/src/core/ext/filters/client_channel/channel_connectivity.cc +2 -2
  9. data/src/core/ext/filters/client_channel/client_channel.cc +95 -88
  10. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +7 -7
  11. data/src/core/ext/filters/client_channel/client_channel_channelz.h +4 -2
  12. data/src/core/ext/filters/client_channel/client_channel_factory.cc +1 -1
  13. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +3 -3
  14. data/src/core/ext/filters/client_channel/connector.h +40 -45
  15. data/src/core/ext/filters/client_channel/global_subchannel_pool.cc +10 -10
  16. data/src/core/ext/filters/client_channel/health/health_check_client.cc +16 -13
  17. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +66 -37
  18. data/src/core/ext/filters/client_channel/http_proxy.cc +107 -116
  19. data/src/core/ext/filters/client_channel/http_proxy.h +5 -1
  20. data/src/core/ext/filters/client_channel/lb_policy.cc +3 -3
  21. data/src/core/ext/filters/client_channel/lb_policy.h +9 -5
  22. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +10 -8
  23. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +18 -19
  24. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +4 -3
  25. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +4 -4
  26. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +1 -1
  27. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +3 -3
  28. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +3 -3
  29. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +6 -9
  30. data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +368 -0
  31. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +157 -77
  32. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +5 -5
  33. data/src/core/ext/filters/client_channel/lb_policy_registry.h +1 -1
  34. data/src/core/ext/filters/client_channel/local_subchannel_pool.cc +8 -8
  35. data/src/core/ext/filters/client_channel/proxy_mapper.h +14 -34
  36. data/src/core/ext/filters/client_channel/proxy_mapper_registry.cc +46 -79
  37. data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +23 -17
  38. data/src/core/ext/filters/client_channel/resolver.cc +2 -1
  39. data/src/core/ext/filters/client_channel/resolver.h +2 -2
  40. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +1 -1
  41. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +4 -4
  42. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +1 -1
  43. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_libuv.cc +13 -10
  44. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +3 -2
  45. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +9 -8
  46. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +17 -16
  47. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +2 -2
  48. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +4 -4
  49. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +9 -9
  50. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +5 -3
  51. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +1 -1
  52. data/src/core/ext/filters/client_channel/resolver_factory.h +3 -3
  53. data/src/core/ext/filters/client_channel/resolver_registry.cc +14 -12
  54. data/src/core/ext/filters/client_channel/resolver_registry.h +6 -4
  55. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +6 -6
  56. data/src/core/ext/filters/client_channel/resolver_result_parsing.h +6 -6
  57. data/src/core/ext/filters/client_channel/resolving_lb_policy.cc +5 -5
  58. data/src/core/ext/filters/client_channel/resolving_lb_policy.h +3 -3
  59. data/src/core/ext/filters/client_channel/service_config.cc +15 -14
  60. data/src/core/ext/filters/client_channel/service_config.h +14 -19
  61. data/src/core/ext/filters/client_channel/subchannel.cc +38 -36
  62. data/src/core/ext/filters/client_channel/subchannel.h +11 -12
  63. data/src/core/ext/filters/client_channel/subchannel_interface.h +1 -1
  64. data/src/core/ext/filters/client_channel/xds/xds_api.cc +19 -9
  65. data/src/core/ext/filters/client_channel/xds/xds_api.h +19 -9
  66. data/src/core/ext/filters/client_channel/xds/xds_bootstrap.cc +3 -3
  67. data/src/core/ext/filters/client_channel/xds/xds_bootstrap.h +6 -5
  68. data/src/core/ext/filters/client_channel/xds/xds_client.cc +58 -31
  69. data/src/core/ext/filters/client_channel/xds/xds_client.h +20 -15
  70. data/src/core/ext/filters/client_channel/xds/xds_client_stats.cc +5 -3
  71. data/src/core/ext/filters/client_channel/xds/xds_client_stats.h +18 -15
  72. data/src/core/ext/filters/client_idle/client_idle_filter.cc +10 -10
  73. data/src/core/ext/filters/deadline/deadline_filter.cc +15 -13
  74. data/src/core/ext/filters/http/client/http_client_filter.cc +12 -12
  75. data/src/core/ext/filters/http/client_authority_filter.cc +3 -3
  76. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +13 -7
  77. data/src/core/ext/filters/http/server/http_server_filter.cc +14 -13
  78. data/src/core/ext/filters/max_age/max_age_filter.cc +16 -14
  79. data/src/core/ext/filters/message_size/message_size_filter.cc +10 -8
  80. data/src/core/ext/filters/message_size/message_size_filter.h +1 -1
  81. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc +9 -8
  82. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +141 -174
  83. data/src/core/ext/transport/chttp2/client/chttp2_connector.h +31 -1
  84. data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +7 -6
  85. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +14 -12
  86. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +8 -5
  87. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +46 -38
  88. data/src/core/ext/transport/chttp2/transport/context_list.cc +2 -2
  89. data/src/core/ext/transport/chttp2/transport/flow_control.h +0 -5
  90. data/src/core/ext/transport/chttp2/transport/frame_data.cc +6 -7
  91. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +2 -2
  92. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +1 -1
  93. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +4 -3
  94. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +5 -5
  95. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +11 -8
  96. data/src/core/ext/transport/chttp2/transport/internal.h +3 -3
  97. data/src/core/ext/transport/chttp2/transport/parsing.cc +4 -4
  98. data/src/core/ext/transport/chttp2/transport/writing.cc +3 -2
  99. data/src/core/ext/transport/inproc/inproc_transport.cc +65 -41
  100. data/src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c +0 -1
  101. data/src/core/ext/upb-generated/envoy/api/v2/cds.upb.c +141 -70
  102. data/src/core/ext/upb-generated/envoy/api/v2/cds.upb.h +352 -118
  103. data/src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c +0 -1
  104. data/src/core/ext/upb-generated/envoy/api/v2/cluster/filter.upb.c +0 -1
  105. data/src/core/ext/upb-generated/envoy/api/v2/cluster/outlier_detection.upb.c +8 -4
  106. data/src/core/ext/upb-generated/envoy/api/v2/cluster/outlier_detection.upb.h +65 -0
  107. data/src/core/ext/upb-generated/envoy/api/v2/core/address.upb.c +0 -1
  108. data/src/core/ext/upb-generated/envoy/api/v2/core/base.upb.c +16 -2
  109. data/src/core/ext/upb-generated/envoy/api/v2/core/base.upb.h +36 -0
  110. data/src/core/ext/upb-generated/envoy/api/v2/core/config_source.upb.c +12 -5
  111. data/src/core/ext/upb-generated/envoy/api/v2/core/config_source.upb.h +34 -0
  112. data/src/core/ext/upb-generated/envoy/api/v2/core/grpc_service.upb.c +1 -2
  113. data/src/core/ext/upb-generated/envoy/api/v2/core/health_check.upb.c +13 -12
  114. data/src/core/ext/upb-generated/envoy/api/v2/core/health_check.upb.h +28 -24
  115. data/src/core/ext/upb-generated/envoy/api/v2/core/http_uri.upb.c +0 -1
  116. data/src/core/ext/upb-generated/envoy/api/v2/core/protocol.upb.c +5 -4
  117. data/src/core/ext/upb-generated/envoy/api/v2/core/protocol.upb.h +13 -0
  118. data/src/core/ext/upb-generated/envoy/api/v2/discovery.upb.c +0 -1
  119. data/src/core/ext/upb-generated/envoy/api/v2/eds.upb.c +23 -23
  120. data/src/core/ext/upb-generated/envoy/api/v2/eds.upb.h +48 -44
  121. data/src/core/ext/upb-generated/envoy/api/v2/endpoint/endpoint.upb.c +0 -1
  122. data/src/core/ext/upb-generated/envoy/api/v2/endpoint/load_report.upb.c +0 -1
  123. data/src/core/ext/upb-generated/envoy/type/http.upb.c +16 -0
  124. data/src/core/ext/upb-generated/envoy/type/http.upb.h +36 -0
  125. data/src/core/ext/upb-generated/envoy/type/percent.upb.c +0 -1
  126. data/src/core/ext/upb-generated/envoy/type/range.upb.c +0 -1
  127. data/src/core/lib/avl/avl.cc +1 -1
  128. data/src/core/lib/channel/channel_stack.cc +1 -1
  129. data/src/core/lib/channel/channel_stack.h +16 -4
  130. data/src/core/lib/channel/channel_trace.cc +4 -4
  131. data/src/core/lib/channel/channelz.cc +46 -46
  132. data/src/core/lib/channel/channelz.h +37 -35
  133. data/src/core/lib/channel/channelz_registry.cc +2 -2
  134. data/src/core/lib/channel/channelz_registry.h +1 -1
  135. data/src/core/lib/channel/connected_channel.cc +3 -2
  136. data/src/core/lib/channel/handshaker.cc +1 -1
  137. data/src/core/lib/channel/handshaker_registry.cc +5 -5
  138. data/src/core/lib/channel/handshaker_registry.h +3 -3
  139. data/src/core/lib/compression/message_compress.cc +3 -2
  140. data/src/core/lib/compression/stream_compression_identity.cc +5 -7
  141. data/src/core/lib/gpr/alloc.cc +4 -29
  142. data/src/core/lib/gpr/cpu_linux.cc +1 -1
  143. data/src/core/lib/gprpp/fork.cc +4 -4
  144. data/src/core/lib/gprpp/global_config_env.cc +7 -7
  145. data/src/core/lib/gprpp/global_config_env.h +2 -2
  146. data/src/core/lib/gprpp/host_port.cc +8 -8
  147. data/src/core/lib/gprpp/host_port.h +3 -3
  148. data/src/core/lib/gprpp/inlined_vector.h +13 -0
  149. data/src/core/lib/gprpp/map.h +2 -9
  150. data/src/core/lib/gprpp/memory.h +12 -98
  151. data/src/core/lib/gprpp/orphanable.h +3 -3
  152. data/src/core/lib/gprpp/ref_counted.h +3 -3
  153. data/src/core/lib/gprpp/ref_counted_ptr.h +1 -1
  154. data/src/core/lib/gprpp/string_view.h +45 -23
  155. data/src/core/lib/gprpp/thd.h +1 -1
  156. data/src/core/lib/gprpp/thd_posix.cc +6 -5
  157. data/src/core/lib/gprpp/thd_windows.cc +3 -3
  158. data/src/core/lib/http/httpcli.cc +1 -1
  159. data/src/core/lib/http/httpcli_security_connector.cc +3 -3
  160. data/src/core/lib/iomgr/buffer_list.cc +10 -5
  161. data/src/core/lib/iomgr/call_combiner.cc +7 -6
  162. data/src/core/lib/iomgr/call_combiner.h +4 -3
  163. data/src/core/lib/iomgr/cfstream_handle.cc +2 -2
  164. data/src/core/lib/iomgr/closure.h +33 -135
  165. data/src/core/lib/iomgr/combiner.cc +10 -17
  166. data/src/core/lib/iomgr/combiner.h +0 -2
  167. data/src/core/lib/iomgr/endpoint_cfstream.cc +2 -2
  168. data/src/core/lib/iomgr/endpoint_pair_posix.cc +1 -1
  169. data/src/core/lib/iomgr/ev_epoll1_linux.cc +3 -2
  170. data/src/core/lib/iomgr/ev_epollex_linux.cc +23 -13
  171. data/src/core/lib/iomgr/ev_poll_posix.cc +30 -17
  172. data/src/core/lib/iomgr/exec_ctx.cc +52 -5
  173. data/src/core/lib/iomgr/exec_ctx.h +6 -2
  174. data/src/core/lib/iomgr/executor.cc +16 -37
  175. data/src/core/lib/iomgr/executor.h +4 -7
  176. data/src/core/lib/iomgr/executor/threadpool.cc +4 -4
  177. data/src/core/lib/iomgr/iomgr_custom.cc +1 -1
  178. data/src/core/lib/iomgr/lockfree_event.cc +9 -8
  179. data/src/core/lib/iomgr/logical_thread.cc +103 -0
  180. data/src/core/lib/iomgr/logical_thread.h +52 -0
  181. data/src/core/lib/iomgr/pollset_custom.cc +5 -5
  182. data/src/core/lib/iomgr/pollset_set_custom.cc +9 -9
  183. data/src/core/lib/iomgr/pollset_windows.cc +16 -2
  184. data/src/core/lib/iomgr/port.h +3 -0
  185. data/src/core/lib/iomgr/resolve_address_custom.cc +4 -4
  186. data/src/core/lib/iomgr/resolve_address_posix.cc +8 -9
  187. data/src/core/lib/iomgr/resolve_address_windows.cc +4 -6
  188. data/src/core/lib/iomgr/resource_quota.cc +26 -21
  189. data/src/core/lib/iomgr/socket_utils_common_posix.cc +11 -0
  190. data/src/core/lib/iomgr/socket_windows.cc +2 -2
  191. data/src/core/lib/iomgr/tcp_client_cfstream.cc +2 -2
  192. data/src/core/lib/iomgr/tcp_client_custom.cc +2 -2
  193. data/src/core/lib/iomgr/tcp_client_posix.cc +5 -4
  194. data/src/core/lib/iomgr/tcp_client_windows.cc +2 -2
  195. data/src/core/lib/iomgr/tcp_custom.cc +10 -9
  196. data/src/core/lib/iomgr/tcp_posix.cc +19 -15
  197. data/src/core/lib/iomgr/tcp_server_custom.cc +3 -2
  198. data/src/core/lib/iomgr/tcp_server_posix.cc +5 -4
  199. data/src/core/lib/iomgr/tcp_server_windows.cc +5 -3
  200. data/src/core/lib/iomgr/tcp_windows.cc +16 -13
  201. data/src/core/lib/iomgr/timer_custom.cc +4 -3
  202. data/src/core/lib/iomgr/timer_generic.cc +11 -9
  203. data/src/core/lib/iomgr/udp_server.cc +16 -13
  204. data/src/core/lib/security/credentials/alts/alts_credentials.cc +8 -5
  205. data/src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc +45 -57
  206. data/src/core/lib/security/credentials/composite/composite_credentials.cc +7 -6
  207. data/src/core/lib/security/credentials/credentials.cc +8 -8
  208. data/src/core/lib/security/credentials/credentials.h +5 -5
  209. data/src/core/lib/security/credentials/fake/fake_credentials.cc +5 -5
  210. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +1 -1
  211. data/src/core/lib/security/credentials/iam/iam_credentials.cc +4 -4
  212. data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +4 -4
  213. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -2
  214. data/src/core/lib/security/credentials/local/local_credentials.cc +3 -3
  215. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +13 -11
  216. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +6 -5
  217. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +4 -4
  218. data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc +5 -5
  219. data/src/core/lib/security/credentials/tls/spiffe_credentials.cc +2 -2
  220. data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +28 -22
  221. data/src/core/lib/security/security_connector/alts/alts_security_connector.h +5 -0
  222. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +10 -10
  223. data/src/core/lib/security/security_connector/load_system_roots_linux.cc +2 -1
  224. data/src/core/lib/security/security_connector/local/local_security_connector.cc +8 -8
  225. data/src/core/lib/security/security_connector/security_connector.h +1 -1
  226. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +9 -9
  227. data/src/core/lib/security/security_connector/ssl_utils.cc +5 -4
  228. data/src/core/lib/security/security_connector/tls/spiffe_security_connector.cc +14 -15
  229. data/src/core/lib/security/transport/client_auth_filter.cc +4 -3
  230. data/src/core/lib/security/transport/secure_endpoint.cc +9 -8
  231. data/src/core/lib/security/transport/security_handshaker.cc +67 -23
  232. data/src/core/lib/security/transport/server_auth_filter.cc +6 -5
  233. data/src/core/lib/security/transport/target_authority_table.h +1 -1
  234. data/src/core/lib/slice/b64.cc +3 -4
  235. data/src/core/lib/slice/b64.h +1 -2
  236. data/src/core/lib/slice/slice.cc +8 -13
  237. data/src/core/lib/surface/call.cc +19 -19
  238. data/src/core/lib/surface/call.h +6 -7
  239. data/src/core/lib/surface/call_log_batch.cc +1 -2
  240. data/src/core/lib/surface/channel.cc +17 -18
  241. data/src/core/lib/surface/channel.h +4 -19
  242. data/src/core/lib/surface/channel_ping.cc +1 -1
  243. data/src/core/lib/surface/completion_queue.cc +21 -22
  244. data/src/core/lib/surface/completion_queue_factory.cc +1 -1
  245. data/src/core/lib/surface/init.cc +1 -1
  246. data/src/core/lib/surface/init_secure.cc +2 -2
  247. data/src/core/lib/surface/lame_client.cc +10 -12
  248. data/src/core/lib/surface/server.cc +24 -18
  249. data/src/core/lib/surface/version.cc +2 -2
  250. data/src/core/lib/transport/byte_stream.cc +2 -2
  251. data/src/core/lib/transport/byte_stream.h +2 -1
  252. data/src/core/lib/transport/connectivity_state.cc +4 -4
  253. data/src/core/lib/transport/connectivity_state.h +2 -2
  254. data/src/core/lib/transport/metadata.cc +8 -10
  255. data/src/core/lib/transport/metadata.h +5 -8
  256. data/src/core/lib/transport/metadata_batch.cc +6 -0
  257. data/src/core/lib/transport/static_metadata.cc +2 -4
  258. data/src/core/lib/transport/status_metadata.cc +7 -0
  259. data/src/core/lib/transport/status_metadata.h +18 -0
  260. data/src/core/lib/transport/transport.cc +9 -7
  261. data/src/core/plugin_registry/grpc_plugin_registry.cc +4 -0
  262. data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +292 -43
  263. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +197 -46
  264. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.h +4 -2
  265. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker_private.h +5 -0
  266. data/src/core/tsi/ssl/session_cache/ssl_session.h +1 -1
  267. data/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc +1 -1
  268. data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +4 -4
  269. data/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc +1 -1
  270. data/src/core/tsi/ssl_transport_security.cc +2 -1
  271. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +0 -4
  272. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +0 -6
  273. data/src/ruby/lib/grpc/version.rb +1 -1
  274. data/third_party/upb/upb/decode.c +1 -0
  275. metadata +34 -32
  276. data/src/core/ext/filters/client_channel/connector.cc +0 -41
  277. data/src/core/ext/filters/client_channel/proxy_mapper.cc +0 -48
  278. data/src/core/lib/gprpp/set.h +0 -33
@@ -99,8 +99,7 @@ struct channel_data {
99
99
 
100
100
  } // namespace
101
101
 
102
- static grpc_error* hs_filter_outgoing_metadata(grpc_call_element* elem,
103
- grpc_metadata_batch* b) {
102
+ static grpc_error* hs_filter_outgoing_metadata(grpc_metadata_batch* b) {
104
103
  if (b->idx.named.grpc_message != nullptr) {
105
104
  grpc_slice pct_encoded_msg = grpc_percent_encode_slice(
106
105
  GRPC_MDVALUE(b->idx.named.grpc_message->md),
@@ -364,7 +363,8 @@ static void hs_recv_initial_metadata_ready(void* user_data, grpc_error* err) {
364
363
  "resuming hs_recv_trailing_metadata_ready from "
365
364
  "hs_recv_initial_metadata_ready");
366
365
  }
367
- GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready, err);
366
+ grpc_core::Closure::Run(DEBUG_LOCATION,
367
+ calld->original_recv_initial_metadata_ready, err);
368
368
  }
369
369
 
370
370
  static void hs_recv_message_ready(void* user_data, grpc_error* err) {
@@ -379,7 +379,8 @@ static void hs_recv_message_ready(void* user_data, grpc_error* err) {
379
379
  calld->recv_message->reset(calld->read_stream.get());
380
380
  calld->have_read_stream = false;
381
381
  }
382
- GRPC_CLOSURE_RUN(calld->original_recv_message_ready, GRPC_ERROR_REF(err));
382
+ grpc_core::Closure::Run(DEBUG_LOCATION, calld->original_recv_message_ready,
383
+ GRPC_ERROR_REF(err));
383
384
  } else {
384
385
  // We have not yet seen the recv_initial_metadata callback, so we
385
386
  // need to wait to see if this is a GET request.
@@ -405,7 +406,8 @@ static void hs_recv_trailing_metadata_ready(void* user_data, grpc_error* err) {
405
406
  err = grpc_error_add_child(
406
407
  GRPC_ERROR_REF(err),
407
408
  GRPC_ERROR_REF(calld->recv_initial_metadata_ready_error));
408
- GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, err);
409
+ grpc_core::Closure::Run(DEBUG_LOCATION,
410
+ calld->original_recv_trailing_metadata_ready, err);
409
411
  }
410
412
 
411
413
  static grpc_error* hs_mutate_op(grpc_call_element* elem,
@@ -427,10 +429,9 @@ static grpc_error* hs_mutate_op(grpc_call_element* elem,
427
429
  &calld->content_type,
428
430
  GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
429
431
  GRPC_BATCH_CONTENT_TYPE));
430
- hs_add_error(
431
- error_name, &error,
432
- hs_filter_outgoing_metadata(
433
- elem, op->payload->send_initial_metadata.send_initial_metadata));
432
+ hs_add_error(error_name, &error,
433
+ hs_filter_outgoing_metadata(
434
+ op->payload->send_initial_metadata.send_initial_metadata));
434
435
  if (error != GRPC_ERROR_NONE) return error;
435
436
  }
436
437
 
@@ -463,7 +464,7 @@ static grpc_error* hs_mutate_op(grpc_call_element* elem,
463
464
 
464
465
  if (op->send_trailing_metadata) {
465
466
  grpc_error* error = hs_filter_outgoing_metadata(
466
- elem, op->payload->send_trailing_metadata.send_trailing_metadata);
467
+ op->payload->send_trailing_metadata.send_trailing_metadata);
467
468
  if (error != GRPC_ERROR_NONE) return error;
468
469
  }
469
470
 
@@ -492,8 +493,8 @@ static grpc_error* hs_init_call_elem(grpc_call_element* elem,
492
493
 
493
494
  /* Destructor for call_data */
494
495
  static void hs_destroy_call_elem(grpc_call_element* elem,
495
- const grpc_call_final_info* final_info,
496
- grpc_closure* ignored) {
496
+ const grpc_call_final_info* /*final_info*/,
497
+ grpc_closure* /*ignored*/) {
497
498
  call_data* calld = static_cast<call_data*>(elem->call_data);
498
499
  calld->~call_data();
499
500
  }
@@ -511,7 +512,7 @@ static grpc_error* hs_init_channel_elem(grpc_channel_element* elem,
511
512
  }
512
513
 
513
514
  /* Destructor for channel data */
514
- static void hs_destroy_channel_elem(grpc_channel_element* elem) {}
515
+ static void hs_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
515
516
 
516
517
  const grpc_channel_filter grpc_http_server_filter = {
517
518
  hs_start_transport_stream_op_batch,
@@ -206,7 +206,7 @@ static void decrease_call_count(channel_data* chand) {
206
206
  }
207
207
  }
208
208
 
209
- static void start_max_idle_timer_after_init(void* arg, grpc_error* error) {
209
+ static void start_max_idle_timer_after_init(void* arg, grpc_error* /*error*/) {
210
210
  channel_data* chand = static_cast<channel_data*>(arg);
211
211
  /* Decrease call_count. If there are no active calls at this time,
212
212
  max_idle_timer will start here. If the number of active calls is not 0,
@@ -257,7 +257,7 @@ class ConnectivityWatcher : public AsyncConnectivityStateWatcherInterface {
257
257
 
258
258
  } // namespace grpc_core
259
259
 
260
- static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
260
+ static void start_max_age_timer_after_init(void* arg, grpc_error* /*error*/) {
261
261
  channel_data* chand = static_cast<channel_data*>(arg);
262
262
  gpr_mu_lock(&chand->max_age_timer_mu);
263
263
  chand->max_age_timer_pending = true;
@@ -267,8 +267,7 @@ static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
267
267
  &chand->close_max_age_channel);
268
268
  gpr_mu_unlock(&chand->max_age_timer_mu);
269
269
  grpc_transport_op* op = grpc_make_transport_op(nullptr);
270
- op->start_connectivity_watch.reset(
271
- grpc_core::New<grpc_core::ConnectivityWatcher>(chand));
270
+ op->start_connectivity_watch.reset(new grpc_core::ConnectivityWatcher(chand));
272
271
  op->start_connectivity_watch_state = GRPC_CHANNEL_IDLE;
273
272
  grpc_channel_next_op(grpc_channel_stack_element(chand->channel_stack, 0), op);
274
273
  GRPC_CHANNEL_STACK_UNREF(chand->channel_stack,
@@ -276,7 +275,7 @@ static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
276
275
  }
277
276
 
278
277
  static void start_max_age_grace_timer_after_goaway_op(void* arg,
279
- grpc_error* error) {
278
+ grpc_error* /*error*/) {
280
279
  channel_data* chand = static_cast<channel_data*>(arg);
281
280
  gpr_mu_lock(&chand->max_age_timer_mu);
282
281
  chand->max_age_grace_timer_pending = true;
@@ -407,17 +406,17 @@ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) {
407
406
  }
408
407
 
409
408
  /* Constructor for call_data. */
410
- static grpc_error* max_age_init_call_elem(grpc_call_element* elem,
411
- const grpc_call_element_args* args) {
409
+ static grpc_error* max_age_init_call_elem(
410
+ grpc_call_element* elem, const grpc_call_element_args* /*args*/) {
412
411
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
413
412
  increase_call_count(chand);
414
413
  return GRPC_ERROR_NONE;
415
414
  }
416
415
 
417
416
  /* Destructor for call_data. */
418
- static void max_age_destroy_call_elem(grpc_call_element* elem,
419
- const grpc_call_final_info* final_info,
420
- grpc_closure* ignored) {
417
+ static void max_age_destroy_call_elem(
418
+ grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
419
+ grpc_closure* /*ignored*/) {
421
420
  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
422
421
  decrease_call_count(chand);
423
422
  }
@@ -492,7 +491,9 @@ static grpc_error* max_age_init_channel_elem(grpc_channel_element* elem,
492
491
  initialization is done. */
493
492
  GRPC_CHANNEL_STACK_REF(chand->channel_stack,
494
493
  "max_age start_max_age_timer_after_init");
495
- GRPC_CLOSURE_SCHED(&chand->start_max_age_timer_after_init, GRPC_ERROR_NONE);
494
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION,
495
+ &chand->start_max_age_timer_after_init,
496
+ GRPC_ERROR_NONE);
496
497
  }
497
498
 
498
499
  /* Initialize the number of calls as 1, so that the max_idle_timer will not
@@ -501,8 +502,9 @@ static grpc_error* max_age_init_channel_elem(grpc_channel_element* elem,
501
502
  if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) {
502
503
  GRPC_CHANNEL_STACK_REF(chand->channel_stack,
503
504
  "max_age start_max_idle_timer_after_init");
504
- GRPC_CLOSURE_SCHED(&chand->start_max_idle_timer_after_init,
505
- GRPC_ERROR_NONE);
505
+ grpc_core::ExecCtx::Run(DEBUG_LOCATION,
506
+ &chand->start_max_idle_timer_after_init,
507
+ GRPC_ERROR_NONE);
506
508
  }
507
509
  return GRPC_ERROR_NONE;
508
510
  }
@@ -527,7 +529,7 @@ const grpc_channel_filter grpc_max_age_filter = {
527
529
  "max_age"};
528
530
 
529
531
  static bool maybe_add_max_age_filter(grpc_channel_stack_builder* builder,
530
- void* arg) {
532
+ void* /*arg*/) {
531
533
  const grpc_channel_args* channel_args =
532
534
  grpc_channel_stack_builder_get_channel_arguments(builder);
533
535
  bool enable =
@@ -44,8 +44,9 @@ namespace {
44
44
  size_t g_message_size_parser_index;
45
45
  } // namespace
46
46
 
47
- UniquePtr<ServiceConfig::ParsedConfig> MessageSizeParser::ParsePerMethodParams(
48
- const grpc_json* json, grpc_error** error) {
47
+ std::unique_ptr<ServiceConfig::ParsedConfig>
48
+ MessageSizeParser::ParsePerMethodParams(const grpc_json* json,
49
+ grpc_error** error) {
49
50
  GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE);
50
51
  int max_request_message_bytes = -1;
51
52
  int max_response_message_bytes = -1;
@@ -215,7 +216,7 @@ static void recv_message_ready(void* user_data, grpc_error* error) {
215
216
  calld->recv_trailing_metadata_error,
216
217
  "continue recv_trailing_metadata_ready");
217
218
  }
218
- GRPC_CLOSURE_RUN(closure, error);
219
+ grpc_core::Closure::Run(DEBUG_LOCATION, closure, error);
219
220
  }
220
221
 
221
222
  // Callback invoked on completion of recv_trailing_metadata
@@ -234,7 +235,8 @@ static void recv_trailing_metadata_ready(void* user_data, grpc_error* error) {
234
235
  error =
235
236
  grpc_error_add_child(GRPC_ERROR_REF(error), GRPC_ERROR_REF(calld->error));
236
237
  // Invoke the next callback.
237
- GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready, error);
238
+ grpc_core::Closure::Run(DEBUG_LOCATION,
239
+ calld->original_recv_trailing_metadata_ready, error);
238
240
  }
239
241
 
240
242
  // Start transport stream op.
@@ -286,8 +288,8 @@ static grpc_error* message_size_init_call_elem(
286
288
 
287
289
  // Destructor for call_data.
288
290
  static void message_size_destroy_call_elem(
289
- grpc_call_element* elem, const grpc_call_final_info* final_info,
290
- grpc_closure* ignored) {
291
+ grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
292
+ grpc_closure* /*ignored*/) {
291
293
  call_data* calld = (call_data*)elem->call_data;
292
294
  calld->~call_data();
293
295
  }
@@ -375,7 +377,7 @@ const grpc_channel_filter grpc_message_size_filter = {
375
377
 
376
378
  // Used for GRPC_CLIENT_SUBCHANNEL
377
379
  static bool maybe_add_message_size_filter_subchannel(
378
- grpc_channel_stack_builder* builder, void* arg) {
380
+ grpc_channel_stack_builder* builder, void* /*arg*/) {
379
381
  const grpc_channel_args* channel_args =
380
382
  grpc_channel_stack_builder_get_channel_arguments(builder);
381
383
  if (grpc_channel_args_want_minimal_stack(channel_args)) {
@@ -388,7 +390,7 @@ static bool maybe_add_message_size_filter_subchannel(
388
390
  // Used for GRPC_CLIENT_DIRECT_CHANNEL and GRPC_SERVER_CHANNEL. Adds the filter
389
391
  // only if message size limits or service config is specified.
390
392
  static bool maybe_add_message_size_filter(grpc_channel_stack_builder* builder,
391
- void* arg) {
393
+ void* /*arg*/) {
392
394
  const grpc_channel_args* channel_args =
393
395
  grpc_channel_stack_builder_get_channel_arguments(builder);
394
396
  bool enable = false;
@@ -46,7 +46,7 @@ class MessageSizeParsedConfig : public ServiceConfig::ParsedConfig {
46
46
 
47
47
  class MessageSizeParser : public ServiceConfig::Parser {
48
48
  public:
49
- UniquePtr<ServiceConfig::ParsedConfig> ParsePerMethodParams(
49
+ std::unique_ptr<ServiceConfig::ParsedConfig> ParsePerMethodParams(
50
50
  const grpc_json* json, grpc_error** error) override;
51
51
 
52
52
  static void Register();
@@ -70,8 +70,9 @@ static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
70
70
  }
71
71
 
72
72
  // Invoke the next callback.
73
- GRPC_CLOSURE_RUN(calld->next_recv_initial_metadata_ready,
74
- GRPC_ERROR_REF(error));
73
+ grpc_core::Closure::Run(DEBUG_LOCATION,
74
+ calld->next_recv_initial_metadata_ready,
75
+ GRPC_ERROR_REF(error));
75
76
  }
76
77
 
77
78
  // Start transport stream op.
@@ -105,7 +106,7 @@ static void cronet_compression_start_transport_stream_op_batch(
105
106
 
106
107
  // Constructor for call_data.
107
108
  static grpc_error* cronet_compression_init_call_elem(
108
- grpc_call_element* elem, const grpc_call_element_args* args) {
109
+ grpc_call_element* elem, const grpc_call_element_args* /*args*/) {
109
110
  call_data* calld = static_cast<call_data*>(elem->call_data);
110
111
  calld->next_recv_initial_metadata_ready = nullptr;
111
112
  calld->workaround_active = false;
@@ -117,18 +118,18 @@ static grpc_error* cronet_compression_init_call_elem(
117
118
 
118
119
  // Destructor for call_data.
119
120
  static void cronet_compression_destroy_call_elem(
120
- grpc_call_element* elem, const grpc_call_final_info* final_info,
121
- grpc_closure* ignored) {}
121
+ grpc_call_element* /*elem*/, const grpc_call_final_info* /*final_info*/,
122
+ grpc_closure* /*ignored*/) {}
122
123
 
123
124
  // Constructor for channel_data.
124
125
  static grpc_error* cronet_compression_init_channel_elem(
125
- grpc_channel_element* elem, grpc_channel_element_args* args) {
126
+ grpc_channel_element* /*elem*/, grpc_channel_element_args* /*args*/) {
126
127
  return GRPC_ERROR_NONE;
127
128
  }
128
129
 
129
130
  // Destructor for channel_data.
130
131
  static void cronet_compression_destroy_channel_elem(
131
- grpc_channel_element* elem) {}
132
+ grpc_channel_element* /*elem*/) {}
132
133
 
133
134
  // Parse the user agent
134
135
  static bool parse_user_agent(grpc_mdelem md) {
@@ -183,7 +184,7 @@ const grpc_channel_filter grpc_workaround_cronet_compression_filter = {
183
184
  "workaround_cronet_compression"};
184
185
 
185
186
  static bool register_workaround_cronet_compression(
186
- grpc_channel_stack_builder* builder, void* arg) {
187
+ grpc_channel_stack_builder* builder, void* /*arg*/) {
187
188
  const grpc_channel_args* channel_args =
188
189
  grpc_channel_stack_builder_get_channel_arguments(builder);
189
190
  const grpc_arg* a = grpc_channel_args_find(
@@ -38,202 +38,169 @@
38
38
  #include "src/core/lib/iomgr/tcp_client.h"
39
39
  #include "src/core/lib/slice/slice_internal.h"
40
40
 
41
- typedef struct {
42
- grpc_connector base;
41
+ namespace grpc_core {
43
42
 
44
- gpr_mu mu;
45
- gpr_refcount refs;
46
-
47
- bool shutdown;
48
- bool connecting;
49
-
50
- grpc_closure* notify;
51
- grpc_connect_in_args args;
52
- grpc_connect_out_args* result;
53
-
54
- grpc_endpoint* endpoint; // Non-NULL until handshaking starts.
55
-
56
- grpc_closure connected;
57
-
58
- grpc_core::RefCountedPtr<grpc_core::HandshakeManager> handshake_mgr;
59
- } chttp2_connector;
43
+ Chttp2Connector::Chttp2Connector() {
44
+ GRPC_CLOSURE_INIT(&connected_, Connected, this, grpc_schedule_on_exec_ctx);
45
+ }
60
46
 
61
- static void chttp2_connector_ref(grpc_connector* con) {
62
- chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
63
- gpr_ref(&c->refs);
47
+ Chttp2Connector::~Chttp2Connector() {
48
+ if (endpoint_ != nullptr) grpc_endpoint_destroy(endpoint_);
64
49
  }
65
50
 
66
- static void chttp2_connector_unref(grpc_connector* con) {
67
- chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
68
- if (gpr_unref(&c->refs)) {
69
- gpr_mu_destroy(&c->mu);
70
- // If handshaking is not yet in progress, destroy the endpoint.
71
- // Otherwise, the handshaker will do this for us.
72
- if (c->endpoint != nullptr) grpc_endpoint_destroy(c->endpoint);
73
- gpr_free(c);
51
+ void Chttp2Connector::Connect(const Args& args, Result* result,
52
+ grpc_closure* notify) {
53
+ grpc_resolved_address addr;
54
+ Subchannel::GetAddressFromSubchannelAddressArg(args.channel_args, &addr);
55
+ grpc_endpoint** ep;
56
+ {
57
+ MutexLock lock(&mu_);
58
+ GPR_ASSERT(notify_ == nullptr);
59
+ args_ = args;
60
+ result_ = result;
61
+ notify_ = notify;
62
+ GPR_ASSERT(!connecting_);
63
+ connecting_ = true;
64
+ GPR_ASSERT(endpoint_ == nullptr);
65
+ ep = &endpoint_;
74
66
  }
67
+ // In some implementations, the closure can be flushed before
68
+ // grpc_tcp_client_connect() returns, and since the closure requires access
69
+ // to mu_, this can result in a deadlock (see
70
+ // https://github.com/grpc/grpc/issues/16427 for details).
71
+ // grpc_tcp_client_connect() will fill endpoint_ with proper contents, and we
72
+ // make sure that we still exist at that point by taking a ref.
73
+ Ref().release(); // Ref held by callback.
74
+ grpc_tcp_client_connect(&connected_, ep, args.interested_parties,
75
+ args.channel_args, &addr, args.deadline);
75
76
  }
76
77
 
77
- static void chttp2_connector_shutdown(grpc_connector* con, grpc_error* why) {
78
- chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
79
- gpr_mu_lock(&c->mu);
80
- c->shutdown = true;
81
- if (c->handshake_mgr != nullptr) {
82
- c->handshake_mgr->Shutdown(GRPC_ERROR_REF(why));
78
+ void Chttp2Connector::Shutdown(grpc_error* error) {
79
+ MutexLock lock(&mu_);
80
+ shutdown_ = true;
81
+ if (handshake_mgr_ != nullptr) {
82
+ handshake_mgr_->Shutdown(GRPC_ERROR_REF(error));
83
83
  }
84
84
  // If handshaking is not yet in progress, shutdown the endpoint.
85
85
  // Otherwise, the handshaker will do this for us.
86
- if (!c->connecting && c->endpoint != nullptr) {
87
- grpc_endpoint_shutdown(c->endpoint, GRPC_ERROR_REF(why));
86
+ if (!connecting_ && endpoint_ != nullptr) {
87
+ grpc_endpoint_shutdown(endpoint_, GRPC_ERROR_REF(error));
88
88
  }
89
- gpr_mu_unlock(&c->mu);
90
- GRPC_ERROR_UNREF(why);
89
+ GRPC_ERROR_UNREF(error);
91
90
  }
92
91
 
93
- static void on_handshake_done(void* arg, grpc_error* error) {
94
- auto* args = static_cast<grpc_core::HandshakerArgs*>(arg);
95
- chttp2_connector* c = static_cast<chttp2_connector*>(args->user_data);
96
- gpr_mu_lock(&c->mu);
97
- if (error != GRPC_ERROR_NONE || c->shutdown) {
98
- if (error == GRPC_ERROR_NONE) {
99
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
100
- // We were shut down after handshaking completed successfully, so
101
- // destroy the endpoint here.
102
- // TODO(ctiller): It is currently necessary to shutdown endpoints
103
- // before destroying them, even if we know that there are no
104
- // pending read/write callbacks. This should be fixed, at which
105
- // point this can be removed.
106
- grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_REF(error));
107
- grpc_endpoint_destroy(args->endpoint);
108
- grpc_channel_args_destroy(args->args);
109
- grpc_slice_buffer_destroy_internal(args->read_buffer);
110
- gpr_free(args->read_buffer);
92
+ void Chttp2Connector::Connected(void* arg, grpc_error* error) {
93
+ Chttp2Connector* self = static_cast<Chttp2Connector*>(arg);
94
+ bool unref = false;
95
+ {
96
+ MutexLock lock(&self->mu_);
97
+ GPR_ASSERT(self->connecting_);
98
+ self->connecting_ = false;
99
+ if (error != GRPC_ERROR_NONE || self->shutdown_) {
100
+ if (error == GRPC_ERROR_NONE) {
101
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
102
+ } else {
103
+ error = GRPC_ERROR_REF(error);
104
+ }
105
+ if (self->endpoint_ != nullptr) {
106
+ grpc_endpoint_shutdown(self->endpoint_, GRPC_ERROR_REF(error));
107
+ }
108
+ self->result_->Reset();
109
+ grpc_closure* notify = self->notify_;
110
+ self->notify_ = nullptr;
111
+ ExecCtx::Run(DEBUG_LOCATION, notify, error);
112
+ unref = true;
111
113
  } else {
112
- error = GRPC_ERROR_REF(error);
114
+ GPR_ASSERT(self->endpoint_ != nullptr);
115
+ self->StartHandshakeLocked();
113
116
  }
114
- c->result->reset();
115
- } else {
116
- grpc_endpoint_delete_from_pollset_set(args->endpoint,
117
- c->args.interested_parties);
118
- c->result->transport =
119
- grpc_create_chttp2_transport(args->args, args->endpoint, true);
120
- c->result->socket =
121
- grpc_chttp2_transport_get_socket_node(c->result->transport);
122
- GPR_ASSERT(c->result->transport);
123
- // TODO(roth): We ideally want to wait until we receive HTTP/2
124
- // settings from the server before we consider the connection
125
- // established. If that doesn't happen before the connection
126
- // timeout expires, then we should consider the connection attempt a
127
- // failure and feed that information back into the backoff code.
128
- // We could pass a notify_on_receive_settings callback to
129
- // grpc_chttp2_transport_start_reading() to let us know when
130
- // settings are received, but we would need to figure out how to use
131
- // that information here.
132
- //
133
- // Unfortunately, we don't currently have a way to split apart the two
134
- // effects of scheduling c->notify: we start sending RPCs immediately
135
- // (which we want to do) and we consider the connection attempt successful
136
- // (which we don't want to do until we get the notify_on_receive_settings
137
- // callback from the transport). If we could split those things
138
- // apart, then we could start sending RPCs but then wait for our
139
- // timeout before deciding if the connection attempt is successful.
140
- // If the attempt is not successful, then we would tear down the
141
- // transport and feed the failure back into the backoff code.
142
- //
143
- // In addition, even if we did that, we would probably not want to do
144
- // so until after transparent retries is implemented. Otherwise, any
145
- // RPC that we attempt to send on the connection before the timeout
146
- // would fail instead of being retried on a subsequent attempt.
147
- grpc_chttp2_transport_start_reading(c->result->transport, args->read_buffer,
148
- nullptr);
149
- c->result->channel_args = args->args;
150
117
  }
151
- grpc_closure* notify = c->notify;
152
- c->notify = nullptr;
153
- GRPC_CLOSURE_SCHED(notify, error);
154
- c->handshake_mgr.reset();
155
- gpr_mu_unlock(&c->mu);
156
- chttp2_connector_unref(reinterpret_cast<grpc_connector*>(c));
118
+ if (unref) self->Unref();
157
119
  }
158
120
 
159
- static void start_handshake_locked(chttp2_connector* c) {
160
- c->handshake_mgr = grpc_core::MakeRefCounted<grpc_core::HandshakeManager>();
161
- grpc_core::HandshakerRegistry::AddHandshakers(
162
- grpc_core::HANDSHAKER_CLIENT, c->args.channel_args,
163
- c->args.interested_parties, c->handshake_mgr.get());
164
- grpc_endpoint_add_to_pollset_set(c->endpoint, c->args.interested_parties);
165
- c->handshake_mgr->DoHandshake(c->endpoint, c->args.channel_args,
166
- c->args.deadline, nullptr /* acceptor */,
167
- on_handshake_done, c);
168
- c->endpoint = nullptr; // Endpoint handed off to handshake manager.
121
+ void Chttp2Connector::StartHandshakeLocked() {
122
+ handshake_mgr_ = MakeRefCounted<HandshakeManager>();
123
+ HandshakerRegistry::AddHandshakers(HANDSHAKER_CLIENT, args_.channel_args,
124
+ args_.interested_parties,
125
+ handshake_mgr_.get());
126
+ grpc_endpoint_add_to_pollset_set(endpoint_, args_.interested_parties);
127
+ handshake_mgr_->DoHandshake(endpoint_, args_.channel_args, args_.deadline,
128
+ nullptr /* acceptor */, OnHandshakeDone, this);
129
+ endpoint_ = nullptr; // Endpoint handed off to handshake manager.
169
130
  }
170
131
 
171
- static void connected(void* arg, grpc_error* error) {
172
- chttp2_connector* c = static_cast<chttp2_connector*>(arg);
173
- gpr_mu_lock(&c->mu);
174
- GPR_ASSERT(c->connecting);
175
- c->connecting = false;
176
- if (error != GRPC_ERROR_NONE || c->shutdown) {
177
- if (error == GRPC_ERROR_NONE) {
178
- error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
132
+ void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error* error) {
133
+ auto* args = static_cast<HandshakerArgs*>(arg);
134
+ Chttp2Connector* self = static_cast<Chttp2Connector*>(args->user_data);
135
+ {
136
+ MutexLock lock(&self->mu_);
137
+ if (error != GRPC_ERROR_NONE || self->shutdown_) {
138
+ if (error == GRPC_ERROR_NONE) {
139
+ error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
140
+ // We were shut down after handshaking completed successfully, so
141
+ // destroy the endpoint here.
142
+ if (args->endpoint != nullptr) {
143
+ // TODO(ctiller): It is currently necessary to shutdown endpoints
144
+ // before destroying them, even if we know that there are no
145
+ // pending read/write callbacks. This should be fixed, at which
146
+ // point this can be removed.
147
+ grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_REF(error));
148
+ grpc_endpoint_destroy(args->endpoint);
149
+ grpc_channel_args_destroy(args->args);
150
+ grpc_slice_buffer_destroy_internal(args->read_buffer);
151
+ gpr_free(args->read_buffer);
152
+ }
153
+ } else {
154
+ error = GRPC_ERROR_REF(error);
155
+ }
156
+ self->result_->Reset();
157
+ } else if (args->endpoint != nullptr) {
158
+ grpc_endpoint_delete_from_pollset_set(args->endpoint,
159
+ self->args_.interested_parties);
160
+ self->result_->transport =
161
+ grpc_create_chttp2_transport(args->args, args->endpoint, true);
162
+ self->result_->socket_node =
163
+ grpc_chttp2_transport_get_socket_node(self->result_->transport);
164
+ GPR_ASSERT(self->result_->transport != nullptr);
165
+ // TODO(roth): We ideally want to wait until we receive HTTP/2
166
+ // settings from the server before we consider the connection
167
+ // established. If that doesn't happen before the connection
168
+ // timeout expires, then we should consider the connection attempt a
169
+ // failure and feed that information back into the backoff code.
170
+ // We could pass a notify_on_receive_settings callback to
171
+ // grpc_chttp2_transport_start_reading() to let us know when
172
+ // settings are received, but we would need to figure out how to use
173
+ // that information here.
174
+ //
175
+ // Unfortunately, we don't currently have a way to split apart the two
176
+ // effects of scheduling c->notify: we start sending RPCs immediately
177
+ // (which we want to do) and we consider the connection attempt successful
178
+ // (which we don't want to do until we get the notify_on_receive_settings
179
+ // callback from the transport). If we could split those things
180
+ // apart, then we could start sending RPCs but then wait for our
181
+ // timeout before deciding if the connection attempt is successful.
182
+ // If the attempt is not successful, then we would tear down the
183
+ // transport and feed the failure back into the backoff code.
184
+ //
185
+ // In addition, even if we did that, we would probably not want to do
186
+ // so until after transparent retries is implemented. Otherwise, any
187
+ // RPC that we attempt to send on the connection before the timeout
188
+ // would fail instead of being retried on a subsequent attempt.
189
+ grpc_chttp2_transport_start_reading(self->result_->transport,
190
+ args->read_buffer, nullptr);
191
+ self->result_->channel_args = args->args;
179
192
  } else {
180
- error = GRPC_ERROR_REF(error);
193
+ // If the handshaking succeeded but there is no endpoint, then the
194
+ // handshaker may have handed off the connection to some external
195
+ // code. Just verify that exit_early flag is set.
196
+ GPR_DEBUG_ASSERT(args->exit_early);
181
197
  }
182
- c->result->reset();
183
- grpc_closure* notify = c->notify;
184
- c->notify = nullptr;
185
- GRPC_CLOSURE_SCHED(notify, error);
186
- if (c->endpoint != nullptr) {
187
- grpc_endpoint_shutdown(c->endpoint, GRPC_ERROR_REF(error));
188
- }
189
- gpr_mu_unlock(&c->mu);
190
- chttp2_connector_unref(static_cast<grpc_connector*>(arg));
191
- } else {
192
- GPR_ASSERT(c->endpoint != nullptr);
193
- start_handshake_locked(c);
194
- gpr_mu_unlock(&c->mu);
198
+ grpc_closure* notify = self->notify_;
199
+ self->notify_ = nullptr;
200
+ ExecCtx::Run(DEBUG_LOCATION, notify, error);
201
+ self->handshake_mgr_.reset();
195
202
  }
203
+ self->Unref();
196
204
  }
197
205
 
198
- static void chttp2_connector_connect(grpc_connector* con,
199
- const grpc_connect_in_args* args,
200
- grpc_connect_out_args* result,
201
- grpc_closure* notify) {
202
- chttp2_connector* c = reinterpret_cast<chttp2_connector*>(con);
203
- grpc_resolved_address addr;
204
- grpc_core::Subchannel::GetAddressFromSubchannelAddressArg(args->channel_args,
205
- &addr);
206
- gpr_mu_lock(&c->mu);
207
- GPR_ASSERT(c->notify == nullptr);
208
- c->notify = notify;
209
- c->args = *args;
210
- c->result = result;
211
- GPR_ASSERT(c->endpoint == nullptr);
212
- chttp2_connector_ref(con); // Ref taken for callback.
213
- GRPC_CLOSURE_INIT(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
214
- GPR_ASSERT(!c->connecting);
215
- c->connecting = true;
216
- grpc_closure* closure = &c->connected;
217
- grpc_endpoint** ep = &c->endpoint;
218
- gpr_mu_unlock(&c->mu);
219
- // In some implementations, the closure can be flushed before
220
- // grpc_tcp_client_connect and since the closure requires access to c->mu,
221
- // this can result in a deadlock. Refer
222
- // https://github.com/grpc/grpc/issues/16427
223
- // grpc_tcp_client_connect would fill c->endpoint with proper contents and we
224
- // make sure that we would still exist at that point by taking a ref.
225
- grpc_tcp_client_connect(closure, ep, args->interested_parties,
226
- args->channel_args, &addr, args->deadline);
227
- }
228
-
229
- static const grpc_connector_vtable chttp2_connector_vtable = {
230
- chttp2_connector_ref, chttp2_connector_unref, chttp2_connector_shutdown,
231
- chttp2_connector_connect};
232
-
233
- grpc_connector* grpc_chttp2_connector_create() {
234
- chttp2_connector* c = static_cast<chttp2_connector*>(gpr_zalloc(sizeof(*c)));
235
- c->base.vtable = &chttp2_connector_vtable;
236
- gpr_mu_init(&c->mu);
237
- gpr_ref_init(&c->refs, 1);
238
- return &c->base;
239
- }
206
+ } // namespace grpc_core