grpc 1.39.0 → 1.40.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (168) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +20 -4
  3. data/include/grpc/event_engine/event_engine.h +10 -14
  4. data/include/grpc/event_engine/slice_allocator.h +8 -33
  5. data/include/grpc/impl/codegen/grpc_types.h +18 -8
  6. data/include/grpc/impl/codegen/port_platform.h +24 -0
  7. data/src/core/ext/filters/client_channel/client_channel.cc +413 -247
  8. data/src/core/ext/filters/client_channel/client_channel.h +42 -18
  9. data/src/core/ext/filters/client_channel/config_selector.h +19 -6
  10. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +7 -8
  11. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +12 -21
  12. data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +3 -5
  13. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +17 -38
  14. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +8 -15
  15. data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +3 -6
  16. data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +8 -12
  17. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +14 -22
  18. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +2 -9
  19. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +5 -8
  20. data/src/core/ext/filters/client_channel/lb_policy.cc +1 -15
  21. data/src/core/ext/filters/client_channel/lb_policy.h +70 -46
  22. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +101 -73
  23. data/src/core/ext/filters/client_channel/retry_filter.cc +392 -243
  24. data/src/core/ext/filters/client_channel/retry_service_config.cc +36 -26
  25. data/src/core/ext/filters/client_channel/retry_service_config.h +1 -1
  26. data/src/core/ext/filters/client_channel/service_config_call_data.h +45 -5
  27. data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +0 -6
  28. data/src/core/ext/filters/http/client/http_client_filter.cc +5 -2
  29. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +5 -1
  30. data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +1 -1
  31. data/src/core/{lib/event_engine/slice_allocator.cc → ext/transport/chttp2/transport/chttp2_slice_allocator.cc} +15 -38
  32. data/src/core/ext/transport/chttp2/transport/chttp2_slice_allocator.h +74 -0
  33. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +2 -6
  34. data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -1
  35. data/src/core/ext/transport/chttp2/transport/frame_data.cc +4 -4
  36. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +8 -8
  37. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +5 -5
  38. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +639 -752
  39. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +190 -69
  40. data/src/core/ext/transport/chttp2/transport/internal.h +1 -1
  41. data/src/core/ext/transport/chttp2/transport/parsing.cc +70 -54
  42. data/src/core/ext/transport/chttp2/transport/varint.cc +6 -4
  43. data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c +56 -35
  44. data/src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.h +180 -76
  45. data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c +35 -27
  46. data/src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.h +97 -48
  47. data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c +45 -9
  48. data/src/core/ext/upb-generated/envoy/config/core/v3/base.upb.h +67 -7
  49. data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c +66 -9
  50. data/src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.h +227 -0
  51. data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c +46 -0
  52. data/src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.h +121 -0
  53. data/src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c +1 -0
  54. data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c +35 -0
  55. data/src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.h +90 -0
  56. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c +32 -24
  57. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.h +120 -73
  58. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c +4 -2
  59. data/src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.h +15 -0
  60. data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c +48 -0
  61. data/src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.h +171 -0
  62. data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c +8 -6
  63. data/src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.h +27 -19
  64. data/src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c +1 -0
  65. data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c +24 -7
  66. data/src/core/ext/upb-generated/envoy/config/route/v3/route.upb.h +57 -0
  67. data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c +29 -17
  68. data/src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.h +72 -0
  69. data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c +3 -2
  70. data/src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.h +4 -0
  71. data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c +6 -5
  72. data/src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.h +15 -11
  73. data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c +85 -43
  74. data/src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.h +274 -91
  75. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c +11 -8
  76. data/src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.h +30 -13
  77. data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c +33 -5
  78. data/src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.h +115 -0
  79. data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c +60 -0
  80. data/src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.h +181 -0
  81. data/src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c +1 -0
  82. data/src/core/ext/upb-generated/validate/validate.upb.c +82 -66
  83. data/src/core/ext/upb-generated/validate/validate.upb.h +220 -124
  84. data/src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c +15 -7
  85. data/src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c +53 -52
  86. data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c +318 -277
  87. data/src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.h +5 -0
  88. data/src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c +437 -410
  89. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c +198 -170
  90. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.h +10 -0
  91. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c +9 -8
  92. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c +219 -163
  93. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.h +15 -0
  94. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c +59 -0
  95. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.h +40 -0
  96. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c +29 -25
  97. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c +52 -0
  98. data/src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.h +35 -0
  99. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c +135 -125
  100. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.h +5 -0
  101. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c +131 -123
  102. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c +90 -0
  103. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.h +35 -0
  104. data/src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c +32 -24
  105. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c +69 -55
  106. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.h +5 -0
  107. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c +684 -664
  108. data/src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.h +5 -0
  109. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c +13 -10
  110. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c +13 -10
  111. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c +441 -375
  112. data/src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.h +10 -0
  113. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c +122 -114
  114. data/src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c +1 -1
  115. data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c +112 -79
  116. data/src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.h +5 -0
  117. data/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c +64 -0
  118. data/src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.h +50 -0
  119. data/src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c +35 -32
  120. data/src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c +4 -4
  121. data/src/core/ext/upbdefs-generated/validate/validate.upbdefs.c +182 -160
  122. data/src/core/ext/xds/certificate_provider_store.h +1 -1
  123. data/src/core/ext/xds/xds_api.cc +320 -121
  124. data/src/core/ext/xds/xds_api.h +31 -2
  125. data/src/core/ext/xds/xds_bootstrap.cc +4 -1
  126. data/src/core/ext/xds/xds_client.cc +66 -43
  127. data/src/core/ext/xds/xds_client.h +0 -4
  128. data/src/core/ext/xds/xds_http_filters.cc +3 -2
  129. data/src/core/ext/xds/xds_http_filters.h +3 -0
  130. data/src/core/lib/channel/call_tracer.h +85 -0
  131. data/src/core/lib/channel/channel_stack.h +1 -1
  132. data/src/core/lib/channel/context.h +3 -0
  133. data/src/core/lib/channel/status_util.h +4 -0
  134. data/src/core/lib/compression/stream_compression.h +1 -1
  135. data/src/core/lib/compression/stream_compression_gzip.h +1 -1
  136. data/src/core/lib/compression/stream_compression_identity.h +1 -1
  137. data/src/core/lib/debug/stats.h +1 -1
  138. data/src/core/lib/gpr/murmur_hash.cc +4 -2
  139. data/src/core/lib/gprpp/manual_constructor.h +1 -1
  140. data/src/core/lib/gprpp/orphanable.h +3 -3
  141. data/src/core/lib/gprpp/sync.h +2 -30
  142. data/src/core/lib/iomgr/buffer_list.cc +1 -1
  143. data/src/core/lib/iomgr/ev_apple.h +1 -1
  144. data/src/core/lib/iomgr/event_engine/endpoint.cc +6 -8
  145. data/src/core/lib/iomgr/event_engine/tcp.cc +30 -10
  146. data/src/core/lib/iomgr/python_util.h +1 -1
  147. data/src/core/lib/iomgr/resource_quota.cc +2 -0
  148. data/src/core/lib/iomgr/tcp_client_windows.cc +2 -0
  149. data/src/core/lib/iomgr/tcp_server_posix.cc +1 -0
  150. data/src/core/lib/iomgr/timer_manager.cc +1 -1
  151. data/src/core/lib/json/json_reader.cc +1 -2
  152. data/src/core/lib/matchers/matchers.cc +8 -20
  153. data/src/core/lib/matchers/matchers.h +2 -1
  154. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +49 -0
  155. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +7 -0
  156. data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +6 -18
  157. data/src/core/lib/security/transport/security_handshaker.cc +12 -4
  158. data/src/core/lib/security/transport/server_auth_filter.cc +0 -7
  159. data/src/core/lib/slice/slice_internal.h +1 -0
  160. data/src/core/lib/surface/call.cc +5 -6
  161. data/src/core/lib/surface/server.cc +3 -1
  162. data/src/core/lib/surface/server.h +3 -3
  163. data/src/core/lib/surface/version.cc +1 -3
  164. data/src/ruby/ext/grpc/extconf.rb +1 -1
  165. data/src/ruby/lib/grpc/version.rb +1 -1
  166. data/third_party/xxhash/xxhash.h +77 -195
  167. metadata +52 -35
  168. data/src/core/lib/gpr/arena.h +0 -47
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9734db2e5f21e57d5fc97b7464965e75fc7cca02b4f8fb8e85e95769067e605b
4
- data.tar.gz: 503abd470885ffa903b5ecb449789f627c1021b9ba33c119b8c59ab6b7c087fb
3
+ metadata.gz: cf4977eb9d4631eb3f57dd3a14819a47b396e4b168d4c8bd71940ae67e702a07
4
+ data.tar.gz: 1556459d6ed1ef4b6e6109be093f81122d90cc4dc49e4b0a07b607d9f152cdf6
5
5
  SHA512:
6
- metadata.gz: 451e8d43cbcc6e39fd13e360c2949a35b3f166f7cc6c7edb854c278c9f0182cc1a5cac0a0d2b369008a3989657e20227e13bff9e5b0524f7eb74f6cee103ea43
7
- data.tar.gz: 0322b68afd67abee15ec884ce40a2ea861f03495fcf32bcd2277ab7c224aa81650b47a8a6ad883adb278ab1c721a914533b768dc4671b457e0bb134c7c76bafc
6
+ metadata.gz: e5ae2edd3bd04dc52f9c865999fa2e26e22f9af1df05852235ff01fbbf25ee6a9ab4a2efd69a2ce87f221b8e41d97ff966b554c1a3ad3bc597e1494140517701
7
+ data.tar.gz: 4d15ba612d2a2ecb754eab736a7dd99b93d1821e572c7ff697b5d6d397d54c1220e3f0091550377ddc5af2eb3694ff2fb4495ac8fd05c7679bbb3e57bd31d6a4
data/Makefile CHANGED
@@ -455,8 +455,8 @@ Q = @
455
455
  endif
456
456
 
457
457
  CORE_VERSION = 18.0.0
458
- CPP_VERSION = 1.39.0
459
- CSHARP_VERSION = 2.39.0
458
+ CPP_VERSION = 1.40.0-pre1
459
+ CSHARP_VERSION = 2.40.0-pre1
460
460
 
461
461
  CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
462
462
  CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@@ -1136,6 +1136,7 @@ LIBGRPC_SRC = \
1136
1136
  src/core/ext/transport/chttp2/transport/bin_decoder.cc \
1137
1137
  src/core/ext/transport/chttp2/transport/bin_encoder.cc \
1138
1138
  src/core/ext/transport/chttp2/transport/chttp2_plugin.cc \
1139
+ src/core/ext/transport/chttp2/transport/chttp2_slice_allocator.cc \
1139
1140
  src/core/ext/transport/chttp2/transport/chttp2_transport.cc \
1140
1141
  src/core/ext/transport/chttp2/transport/context_list.cc \
1141
1142
  src/core/ext/transport/chttp2/transport/flow_control.cc \
@@ -1178,14 +1179,17 @@ LIBGRPC_SRC = \
1178
1179
  src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c \
1179
1180
  src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c \
1180
1181
  src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c \
1182
+ src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c \
1181
1183
  src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c \
1182
1184
  src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c \
1185
+ src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c \
1183
1186
  src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c \
1184
1187
  src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c \
1185
1188
  src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c \
1186
1189
  src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c \
1187
1190
  src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c \
1188
1191
  src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c \
1192
+ src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c \
1189
1193
  src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c \
1190
1194
  src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c \
1191
1195
  src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c \
@@ -1212,6 +1216,7 @@ LIBGRPC_SRC = \
1212
1216
  src/core/ext/upb-generated/envoy/service/route/v3/rds.upb.c \
1213
1217
  src/core/ext/upb-generated/envoy/service/route/v3/srds.upb.c \
1214
1218
  src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c \
1219
+ src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c \
1215
1220
  src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c \
1216
1221
  src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c \
1217
1222
  src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c \
@@ -1265,14 +1270,17 @@ LIBGRPC_SRC = \
1265
1270
  src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c \
1266
1271
  src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c \
1267
1272
  src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c \
1273
+ src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c \
1268
1274
  src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c \
1269
1275
  src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c \
1276
+ src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c \
1270
1277
  src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c \
1271
1278
  src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c \
1272
1279
  src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c \
1273
1280
  src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c \
1274
1281
  src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c \
1275
1282
  src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c \
1283
+ src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c \
1276
1284
  src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c \
1277
1285
  src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c \
1278
1286
  src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c \
@@ -1298,6 +1306,7 @@ LIBGRPC_SRC = \
1298
1306
  src/core/ext/upbdefs-generated/envoy/service/route/v3/rds.upbdefs.c \
1299
1307
  src/core/ext/upbdefs-generated/envoy/service/route/v3/srds.upbdefs.c \
1300
1308
  src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c \
1309
+ src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c \
1301
1310
  src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c \
1302
1311
  src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c \
1303
1312
  src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c \
@@ -1371,7 +1380,6 @@ LIBGRPC_SRC = \
1371
1380
  src/core/lib/debug/trace.cc \
1372
1381
  src/core/lib/event_engine/endpoint_config.cc \
1373
1382
  src/core/lib/event_engine/event_engine.cc \
1374
- src/core/lib/event_engine/slice_allocator.cc \
1375
1383
  src/core/lib/event_engine/sockaddr.cc \
1376
1384
  src/core/lib/http/format_request.cc \
1377
1385
  src/core/lib/http/httpcli.cc \
@@ -1807,6 +1815,7 @@ LIBGRPC_UNSECURE_SRC = \
1807
1815
  src/core/ext/transport/chttp2/transport/bin_decoder.cc \
1808
1816
  src/core/ext/transport/chttp2/transport/bin_encoder.cc \
1809
1817
  src/core/ext/transport/chttp2/transport/chttp2_plugin.cc \
1818
+ src/core/ext/transport/chttp2/transport/chttp2_slice_allocator.cc \
1810
1819
  src/core/ext/transport/chttp2/transport/chttp2_transport.cc \
1811
1820
  src/core/ext/transport/chttp2/transport/context_list.cc \
1812
1821
  src/core/ext/transport/chttp2/transport/flow_control.cc \
@@ -1859,7 +1868,6 @@ LIBGRPC_UNSECURE_SRC = \
1859
1868
  src/core/lib/debug/trace.cc \
1860
1869
  src/core/lib/event_engine/endpoint_config.cc \
1861
1870
  src/core/lib/event_engine/event_engine.cc \
1862
- src/core/lib/event_engine/slice_allocator.cc \
1863
1871
  src/core/lib/event_engine/sockaddr.cc \
1864
1872
  src/core/lib/http/format_request.cc \
1865
1873
  src/core/lib/http/httpcli.cc \
@@ -2727,14 +2735,17 @@ src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c: $(OPENSSL_DE
2727
2735
  src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c: $(OPENSSL_DEP)
2728
2736
  src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c: $(OPENSSL_DEP)
2729
2737
  src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c: $(OPENSSL_DEP)
2738
+ src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c: $(OPENSSL_DEP)
2730
2739
  src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c: $(OPENSSL_DEP)
2731
2740
  src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c: $(OPENSSL_DEP)
2741
+ src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c: $(OPENSSL_DEP)
2732
2742
  src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c: $(OPENSSL_DEP)
2733
2743
  src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c: $(OPENSSL_DEP)
2734
2744
  src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c: $(OPENSSL_DEP)
2735
2745
  src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c: $(OPENSSL_DEP)
2736
2746
  src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c: $(OPENSSL_DEP)
2737
2747
  src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c: $(OPENSSL_DEP)
2748
+ src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c: $(OPENSSL_DEP)
2738
2749
  src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c: $(OPENSSL_DEP)
2739
2750
  src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c: $(OPENSSL_DEP)
2740
2751
  src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c: $(OPENSSL_DEP)
@@ -2761,6 +2772,7 @@ src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c: $(OPENSSL_DEP)
2761
2772
  src/core/ext/upb-generated/envoy/service/route/v3/rds.upb.c: $(OPENSSL_DEP)
2762
2773
  src/core/ext/upb-generated/envoy/service/route/v3/srds.upb.c: $(OPENSSL_DEP)
2763
2774
  src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c: $(OPENSSL_DEP)
2775
+ src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c: $(OPENSSL_DEP)
2764
2776
  src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c: $(OPENSSL_DEP)
2765
2777
  src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c: $(OPENSSL_DEP)
2766
2778
  src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c: $(OPENSSL_DEP)
@@ -2810,14 +2822,17 @@ src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c: $(OP
2810
2822
  src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c: $(OPENSSL_DEP)
2811
2823
  src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c: $(OPENSSL_DEP)
2812
2824
  src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c: $(OPENSSL_DEP)
2825
+ src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c: $(OPENSSL_DEP)
2813
2826
  src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c: $(OPENSSL_DEP)
2814
2827
  src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c: $(OPENSSL_DEP)
2828
+ src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c: $(OPENSSL_DEP)
2815
2829
  src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c: $(OPENSSL_DEP)
2816
2830
  src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c: $(OPENSSL_DEP)
2817
2831
  src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c: $(OPENSSL_DEP)
2818
2832
  src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c: $(OPENSSL_DEP)
2819
2833
  src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c: $(OPENSSL_DEP)
2820
2834
  src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c: $(OPENSSL_DEP)
2835
+ src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c: $(OPENSSL_DEP)
2821
2836
  src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c: $(OPENSSL_DEP)
2822
2837
  src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c: $(OPENSSL_DEP)
2823
2838
  src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c: $(OPENSSL_DEP)
@@ -2843,6 +2858,7 @@ src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c: $(OPEN
2843
2858
  src/core/ext/upbdefs-generated/envoy/service/route/v3/rds.upbdefs.c: $(OPENSSL_DEP)
2844
2859
  src/core/ext/upbdefs-generated/envoy/service/route/v3/srds.upbdefs.c: $(OPENSSL_DEP)
2845
2860
  src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c: $(OPENSSL_DEP)
2861
+ src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c: $(OPENSSL_DEP)
2846
2862
  src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c: $(OPENSSL_DEP)
2847
2863
  src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c: $(OPENSSL_DEP)
2848
2864
  src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c: $(OPENSSL_DEP)
@@ -80,7 +80,7 @@ class EventEngine {
80
80
  using Callback = std::function<void(absl::Status)>;
81
81
  /// A callback handle, used to cancel a callback.
82
82
  struct TaskHandle {
83
- intptr_t key;
83
+ intptr_t keys[2];
84
84
  };
85
85
  /// A thin wrapper around a platform-specific sockaddr type. A sockaddr struct
86
86
  /// exists on all platforms that gRPC supports.
@@ -127,10 +127,8 @@ class EventEngine {
127
127
  ///
128
128
  /// For failed read operations, implementations should pass the appropriate
129
129
  /// statuses to \a on_read. For example, callbacks might expect to receive
130
- /// DEADLINE_EXCEEDED when the deadline is exceeded, and CANCELLED on
131
- /// endpoint shutdown.
132
- virtual void Read(Callback on_read, SliceBuffer* buffer,
133
- absl::Time deadline) = 0;
130
+ /// CANCELLED on endpoint shutdown.
131
+ virtual void Read(Callback on_read, SliceBuffer* buffer) = 0;
134
132
  /// Write data out on the connection.
135
133
  ///
136
134
  /// \a on_writable is called when the connection is ready for more data. The
@@ -140,15 +138,13 @@ class EventEngine {
140
138
  ///
141
139
  /// For failed write operations, implementations should pass the appropriate
142
140
  /// statuses to \a on_writable. For example, callbacks might expect to
143
- /// receive DEADLINE_EXCEEDED when the deadline is exceeded, and CANCELLED
144
- /// on endpoint shutdown.
145
- virtual void Write(Callback on_writable, SliceBuffer* data,
146
- absl::Time deadline) = 0;
141
+ /// receive CANCELLED on endpoint shutdown.
142
+ virtual void Write(Callback on_writable, SliceBuffer* data) = 0;
147
143
  /// These methods return an address in the format described in DNSResolver.
148
144
  /// The returned values are owned by the Endpoint and are expected to remain
149
145
  /// valid for the life of the Endpoint.
150
- virtual const ResolvedAddress* GetPeerAddress() const = 0;
151
- virtual const ResolvedAddress* GetLocalAddress() const = 0;
146
+ virtual const ResolvedAddress& GetPeerAddress() const = 0;
147
+ virtual const ResolvedAddress& GetLocalAddress() const = 0;
152
148
  };
153
149
 
154
150
  /// Called when a new connection is established.
@@ -197,7 +193,7 @@ class EventEngine {
197
193
  virtual absl::StatusOr<std::unique_ptr<Listener>> CreateListener(
198
194
  Listener::AcceptCallback on_accept, Callback on_shutdown,
199
195
  const EndpointConfig& args,
200
- SliceAllocatorFactory slice_allocator_factory) = 0;
196
+ std::unique_ptr<SliceAllocatorFactory> slice_allocator_factory) = 0;
201
197
  /// Creates a client network connection to a remote network listener.
202
198
  ///
203
199
  /// \a Connect may return an error status immediately if there was a failure
@@ -214,7 +210,7 @@ class EventEngine {
214
210
  virtual absl::Status Connect(OnConnectCallback on_connect,
215
211
  const ResolvedAddress& addr,
216
212
  const EndpointConfig& args,
217
- SliceAllocator slice_allocator,
213
+ std::unique_ptr<SliceAllocator> slice_allocator,
218
214
  absl::Time deadline) = 0;
219
215
 
220
216
  /// The DNSResolver that provides asynchronous resolution.
@@ -222,7 +218,7 @@ class EventEngine {
222
218
  public:
223
219
  /// A task handle for DNS Resolution requests.
224
220
  struct LookupTaskHandle {
225
- intptr_t key;
221
+ intptr_t key[2];
226
222
  };
227
223
  /// A DNS SRV record type.
228
224
  struct SRVRecord {
@@ -37,18 +37,8 @@ class SliceBuffer {
37
37
 
38
38
  class SliceAllocator {
39
39
  public:
40
- // gRPC-internal constructor
41
- explicit SliceAllocator(grpc_resource_user* user);
42
- // Not copyable
43
- SliceAllocator(SliceAllocator& other) = delete;
44
- SliceAllocator& operator=(const SliceAllocator& other) = delete;
45
- // Moveable
46
- SliceAllocator(SliceAllocator&& other) noexcept;
47
- SliceAllocator& operator=(SliceAllocator&& other) noexcept;
48
- ~SliceAllocator();
49
-
50
- using AllocateCallback =
51
- std::function<void(absl::Status, SliceBuffer* buffer)>;
40
+ using AllocateCallback = std::function<void(absl::Status)>;
41
+ virtual ~SliceAllocator() = default;
52
42
  /// Requests \a size bytes from gRPC, and populates \a dest with the allocated
53
43
  /// slices. Ownership of the \a SliceBuffer is not transferred.
54
44
  ///
@@ -57,32 +47,17 @@ class SliceAllocator {
57
47
  /// interrupted to attempt to reclaim memory from participating gRPC
58
48
  /// internals. When there is sufficient memory available, slice allocation
59
49
  /// proceeds as normal.
60
- absl::Status Allocate(size_t size, SliceBuffer* dest,
61
- SliceAllocator::AllocateCallback cb);
62
-
63
- private:
64
- grpc_resource_user* resource_user_;
50
+ virtual absl::Status Allocate(size_t size, SliceBuffer* dest,
51
+ SliceAllocator::AllocateCallback cb) = 0;
65
52
  };
66
53
 
67
54
  class SliceAllocatorFactory {
68
55
  public:
69
- // gRPC-internal constructor
70
- explicit SliceAllocatorFactory(grpc_resource_quota* quota);
71
- // Not copyable
72
- SliceAllocatorFactory(SliceAllocatorFactory& other) = delete;
73
- SliceAllocatorFactory& operator=(const SliceAllocatorFactory& other) = delete;
74
- // Moveable
75
- SliceAllocatorFactory(SliceAllocatorFactory&& other) noexcept;
76
- SliceAllocatorFactory& operator=(SliceAllocatorFactory&& other) noexcept;
77
- ~SliceAllocatorFactory();
78
-
56
+ virtual ~SliceAllocatorFactory() = default;
79
57
  /// On Endpoint creation, call \a CreateSliceAllocator with the name of the
80
- /// endpoint peer (a URI string, most likely). Note: \a peer_name must outlive
81
- /// the Endpoint.
82
- SliceAllocator CreateSliceAllocator(absl::string_view peer_name);
83
-
84
- private:
85
- grpc_resource_quota* resource_quota_;
58
+ /// endpoint peer (a URI string, most likely).
59
+ virtual std::unique_ptr<SliceAllocator> CreateSliceAllocator(
60
+ absl::string_view peer_name) = 0;
86
61
  };
87
62
 
88
63
  } // namespace experimental
@@ -384,12 +384,26 @@ typedef struct {
384
384
  Defaults to "blend". In the current implementation "blend" is equivalent to
385
385
  "latency". */
386
386
  #define GRPC_ARG_OPTIMIZATION_TARGET "grpc.optimization_target"
387
- /** If set to zero, disables retry behavior. Otherwise, transparent retries
388
- are enabled for all RPCs, and configurable retries are enabled when they
389
- are configured via the service config. For details, see:
387
+ /** Enables retry functionality. Defaults to true. When enabled,
388
+ configurable retries are enabled when they are configured via the
389
+ service config. For details, see:
390
390
  https://github.com/grpc/proposal/blob/master/A6-client-retries.md
391
+ NOTE: Transparent retries are not yet implemented. When they are
392
+ implemented, they will also be enabled by this arg.
393
+ NOTE: Hedging functionality is not yet implemented, so those
394
+ fields in the service config will currently be ignored. See
395
+ also the GRPC_ARG_EXPERIMENTAL_ENABLE_HEDGING arg below.
391
396
  */
392
397
  #define GRPC_ARG_ENABLE_RETRIES "grpc.enable_retries"
398
+ /** Enables hedging functionality, as described in:
399
+ https://github.com/grpc/proposal/blob/master/A6-client-retries.md
400
+ Default is currently false, since this functionality is not yet
401
+ fully implemented.
402
+ NOTE: This channel arg is experimental and will eventually be removed.
403
+ Once hedging functionality has been implemented and proves stable,
404
+ this arg will be removed, and the hedging functionality will
405
+ be enabled via the GRPC_ARG_ENABLE_RETRIES arg above. */
406
+ #define GRPC_ARG_EXPERIMENTAL_ENABLE_HEDGING "grpc.experimental.enable_hedging"
393
407
  /** Per-RPC retry buffer size, in bytes. Default is 256 KiB. */
394
408
  #define GRPC_ARG_PER_RPC_RETRY_BUFFER_SIZE "grpc.per_rpc_retry_buffer_size"
395
409
  /** Channel arg that carries the bridged objective c object for custom metrics
@@ -495,6 +509,7 @@ typedef enum grpc_call_error {
495
509
  (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_THROUGH)
496
510
 
497
511
  /** Initial metadata flags */
512
+ /** These flags are to be passed to the `grpc_op::flags` field */
498
513
  /** Signal that the call is idempotent */
499
514
  #define GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST (0x00000010u)
500
515
  /** Signal that the call should not return UNAVAILABLE before it has started */
@@ -522,8 +537,6 @@ typedef struct grpc_metadata {
522
537
  grpc_slice key;
523
538
  grpc_slice value;
524
539
 
525
- uint32_t flags;
526
-
527
540
  /** The following fields are reserved for grpc internal use.
528
541
  There is no need to initialize them, and they will be set to garbage
529
542
  during calls to grpc. */
@@ -770,9 +783,6 @@ typedef struct grpc_completion_queue_functor {
770
783
  struct grpc_completion_queue_functor* internal_next;
771
784
  } grpc_completion_queue_functor;
772
785
 
773
- typedef grpc_completion_queue_functor
774
- grpc_experimental_completion_queue_functor;
775
-
776
786
  #define GRPC_CQ_CURRENT_VERSION 2
777
787
  #define GRPC_CQ_VERSION_MINIMUM_FOR_CALLBACKABLE 2
778
788
  typedef struct grpc_completion_queue_attributes {
@@ -566,6 +566,14 @@ typedef unsigned __int64 uint64_t;
566
566
  #define CENSUSAPI GRPCAPI
567
567
  #endif
568
568
 
569
+ #ifndef GPR_HAS_CPP_ATTRIBUTE
570
+ #ifdef __has_cpp_attribute
571
+ #define GPR_HAS_CPP_ATTRIBUTE(a) __has_cpp_attribute(a)
572
+ #else
573
+ #define GPR_HAS_CPP_ATTRIBUTE(a) 0
574
+ #endif
575
+ #endif /* GPR_HAS_CPP_ATTRIBUTE */
576
+
569
577
  #ifndef GPR_HAS_ATTRIBUTE
570
578
  #ifdef __has_attribute
571
579
  #define GPR_HAS_ATTRIBUTE(a) __has_attribute(a)
@@ -591,6 +599,22 @@ typedef unsigned __int64 uint64_t;
591
599
  #endif
592
600
  #endif /* GPR_ATTRIBUTE_NOINLINE */
593
601
 
602
+ #ifndef GPR_NO_UNIQUE_ADDRESS
603
+ #if GPR_HAS_CPP_ATTRIBUTE(no_unique_address)
604
+ #define GPR_NO_UNIQUE_ADDRESS [[no_unique_address]]
605
+ #else
606
+ #define GPR_NO_UNIQUE_ADDRESS
607
+ #endif
608
+ #endif /* GPR_NO_UNIQUE_ADDRESS */
609
+
610
+ #ifndef GRPC_DEPRECATED
611
+ #if GPR_HAS_CPP_ATTRIBUTE(deprecated)
612
+ #define GRPC_DEPRECATED(reason) [[deprecated(reason)]]
613
+ #else
614
+ #define GRPC_DEPRECATED(reason)
615
+ #endif
616
+ #endif /* GRPC_DEPRECATED */
617
+
594
618
  #ifndef GPR_ATTRIBUTE_WEAK
595
619
  /* Attribute weak is broken on LLVM/windows:
596
620
  * https://bugs.llvm.org/show_bug.cgi?id=37598 */
@@ -174,9 +174,9 @@ class ClientChannel::CallData {
174
174
  void MaybeAddCallToResolverQueuedCallsLocked(grpc_call_element* elem)
175
175
  ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::resolution_mu_);
176
176
 
177
- static void RecvInitialMetadataReadyForConfigSelectorCommitCallback(
177
+ static void RecvTrailingMetadataReadyForConfigSelectorCommitCallback(
178
178
  void* arg, grpc_error_handle error);
179
- void InjectRecvInitialMetadataReadyForConfigSelectorCommitCallback(
179
+ void InjectRecvTrailingMetadataReadyForConfigSelectorCommitCallback(
180
180
  grpc_transport_stream_op_batch* batch);
181
181
 
182
182
  void CreateDynamicCall(grpc_call_element* elem);
@@ -199,7 +199,7 @@ class ClientChannel::CallData {
199
199
 
200
200
  grpc_polling_entity* pollent_ = nullptr;
201
201
 
202
- grpc_closure pick_closure_;
202
+ grpc_closure resolution_done_closure_;
203
203
 
204
204
  // Accessed while holding ClientChannel::resolution_mu_.
205
205
  bool service_config_applied_ ABSL_GUARDED_BY(&ClientChannel::resolution_mu_) =
@@ -211,10 +211,8 @@ class ClientChannel::CallData {
211
211
  ResolverQueuedCallCanceller* resolver_call_canceller_
212
212
  ABSL_GUARDED_BY(&ClientChannel::resolution_mu_) = nullptr;
213
213
 
214
- std::function<void()> on_call_committed_;
215
-
216
- grpc_closure* original_recv_initial_metadata_ready_ = nullptr;
217
- grpc_closure recv_initial_metadata_ready_;
214
+ grpc_closure* original_recv_trailing_metadata_ready_ = nullptr;
215
+ grpc_closure recv_trailing_metadata_ready_;
218
216
 
219
217
  RefCountedPtr<DynamicFilters> dynamic_filters_;
220
218
  RefCountedPtr<DynamicFilters::Call> dynamic_call_;
@@ -345,13 +343,16 @@ class DynamicTerminationFilter::CallData {
345
343
  auto* calld = static_cast<CallData*>(elem->call_data);
346
344
  auto* chand = static_cast<DynamicTerminationFilter*>(elem->channel_data);
347
345
  ClientChannel* client_channel = chand->chand_;
348
- grpc_call_element_args args = {
349
- calld->owning_call_, nullptr,
350
- calld->call_context_, calld->path_,
351
- calld->call_start_time_, calld->deadline_,
352
- calld->arena_, calld->call_combiner_};
353
- calld->lb_call_ =
354
- client_channel->CreateLoadBalancedCall(args, pollent, nullptr);
346
+ grpc_call_element_args args = {calld->owning_call_, nullptr,
347
+ calld->call_context_, calld->path_,
348
+ /*start_time=*/0, calld->deadline_,
349
+ calld->arena_, calld->call_combiner_};
350
+ auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
351
+ calld->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
352
+ calld->lb_call_ = client_channel->CreateLoadBalancedCall(
353
+ args, pollent, nullptr,
354
+ service_config_call_data->call_dispatch_controller(),
355
+ /*is_transparent_retry=*/false);
355
356
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
356
357
  gpr_log(GPR_INFO,
357
358
  "chand=%p dynamic_termination_calld=%p: create lb_call=%p", chand,
@@ -362,7 +363,6 @@ class DynamicTerminationFilter::CallData {
362
363
  private:
363
364
  explicit CallData(const grpc_call_element_args& args)
364
365
  : path_(grpc_slice_ref_internal(args.path)),
365
- call_start_time_(args.start_time),
366
366
  deadline_(args.deadline),
367
367
  arena_(args.arena),
368
368
  owning_call_(args.call_stack),
@@ -372,14 +372,13 @@ class DynamicTerminationFilter::CallData {
372
372
  ~CallData() { grpc_slice_unref_internal(path_); }
373
373
 
374
374
  grpc_slice path_; // Request path.
375
- gpr_cycle_counter call_start_time_;
376
375
  grpc_millis deadline_;
377
376
  Arena* arena_;
378
377
  grpc_call_stack* owning_call_;
379
378
  CallCombiner* call_combiner_;
380
379
  grpc_call_context_element* call_context_;
381
380
 
382
- RefCountedPtr<ClientChannel::LoadBalancedCall> lb_call_;
381
+ OrphanablePtr<ClientChannel::LoadBalancedCall> lb_call_;
383
382
  };
384
383
 
385
384
  const grpc_channel_filter DynamicTerminationFilter::kFilterVtable = {
@@ -1060,10 +1059,6 @@ void ClientChannel::Destroy(grpc_channel_element* elem) {
1060
1059
 
1061
1060
  namespace {
1062
1061
 
1063
- bool GetEnableRetries(const grpc_channel_args* args) {
1064
- return grpc_channel_args_find_bool(args, GRPC_ARG_ENABLE_RETRIES, false);
1065
- }
1066
-
1067
1062
  RefCountedPtr<SubchannelPoolInterface> GetSubchannelPool(
1068
1063
  const grpc_channel_args* args) {
1069
1064
  const bool use_local_subchannel_pool = grpc_channel_args_find_bool(
@@ -1085,7 +1080,6 @@ ClientChannel::ClientChannel(grpc_channel_element_args* args,
1085
1080
  grpc_error_handle* error)
1086
1081
  : deadline_checking_enabled_(
1087
1082
  grpc_deadline_checking_enabled(args->channel_args)),
1088
- enable_retries_(GetEnableRetries(args->channel_args)),
1089
1083
  owning_stack_(args->channel_stack),
1090
1084
  client_channel_factory_(
1091
1085
  ClientChannelFactory::GetFromChannelArgs(args->channel_args)),
@@ -1169,12 +1163,15 @@ ClientChannel::~ClientChannel() {
1169
1163
  GRPC_ERROR_UNREF(disconnect_error_.Load(MemoryOrder::RELAXED));
1170
1164
  }
1171
1165
 
1172
- RefCountedPtr<ClientChannel::LoadBalancedCall>
1166
+ OrphanablePtr<ClientChannel::LoadBalancedCall>
1173
1167
  ClientChannel::CreateLoadBalancedCall(
1174
1168
  const grpc_call_element_args& args, grpc_polling_entity* pollent,
1175
- grpc_closure* on_call_destruction_complete) {
1176
- return args.arena->New<LoadBalancedCall>(this, args, pollent,
1177
- on_call_destruction_complete);
1169
+ grpc_closure* on_call_destruction_complete,
1170
+ ConfigSelector::CallDispatchController* call_dispatch_controller,
1171
+ bool is_transparent_retry) {
1172
+ return OrphanablePtr<LoadBalancedCall>(args.arena->New<LoadBalancedCall>(
1173
+ this, args, pollent, on_call_destruction_complete,
1174
+ call_dispatch_controller, is_transparent_retry));
1178
1175
  }
1179
1176
 
1180
1177
  namespace {
@@ -1359,11 +1356,12 @@ void ClientChannel::OnResolverErrorLocked(grpc_error_handle error) {
1359
1356
  grpc_error_handle state_error =
1360
1357
  GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1361
1358
  "Resolver transient failure", &error, 1);
1359
+ absl::Status status = grpc_error_to_absl_status(state_error);
1362
1360
  {
1363
1361
  MutexLock lock(&resolution_mu_);
1364
1362
  // Update resolver transient failure.
1365
1363
  GRPC_ERROR_UNREF(resolver_transient_failure_error_);
1366
- resolver_transient_failure_error_ = GRPC_ERROR_REF(state_error);
1364
+ resolver_transient_failure_error_ = state_error;
1367
1365
  // Process calls that were queued waiting for the resolver result.
1368
1366
  for (ResolverQueuedCall* call = resolver_queued_calls_; call != nullptr;
1369
1367
  call = call->next) {
@@ -1377,10 +1375,8 @@ void ClientChannel::OnResolverErrorLocked(grpc_error_handle error) {
1377
1375
  }
1378
1376
  // Update connectivity state.
1379
1377
  UpdateStateAndPickerLocked(
1380
- GRPC_CHANNEL_TRANSIENT_FAILURE, grpc_error_to_absl_status(state_error),
1381
- "resolver failure",
1382
- absl::make_unique<LoadBalancingPolicy::TransientFailurePicker>(
1383
- state_error));
1378
+ GRPC_CHANNEL_TRANSIENT_FAILURE, status, "resolver failure",
1379
+ absl::make_unique<LoadBalancingPolicy::TransientFailurePicker>(status));
1384
1380
  }
1385
1381
  GRPC_ERROR_UNREF(error);
1386
1382
  }
@@ -1507,14 +1503,6 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
1507
1503
  config_selector =
1508
1504
  MakeRefCounted<DefaultConfigSelector>(saved_service_config_);
1509
1505
  }
1510
- // Construct dynamic filter stack.
1511
- std::vector<const grpc_channel_filter*> filters =
1512
- config_selector->GetFilters();
1513
- if (enable_retries_) {
1514
- filters.push_back(&kRetryFilterVtable);
1515
- } else {
1516
- filters.push_back(&DynamicTerminationFilter::kFilterVtable);
1517
- }
1518
1506
  absl::InlinedVector<grpc_arg, 2> args_to_add = {
1519
1507
  grpc_channel_arg_pointer_create(
1520
1508
  const_cast<char*>(GRPC_ARG_CLIENT_CHANNEL), this,
@@ -1526,6 +1514,16 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
1526
1514
  grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
1527
1515
  channel_args_, args_to_add.data(), args_to_add.size());
1528
1516
  new_args = config_selector->ModifyChannelArgs(new_args);
1517
+ bool enable_retries =
1518
+ grpc_channel_args_find_bool(new_args, GRPC_ARG_ENABLE_RETRIES, true);
1519
+ // Construct dynamic filter stack.
1520
+ std::vector<const grpc_channel_filter*> filters =
1521
+ config_selector->GetFilters();
1522
+ if (enable_retries) {
1523
+ filters.push_back(&kRetryFilterVtable);
1524
+ } else {
1525
+ filters.push_back(&DynamicTerminationFilter::kFilterVtable);
1526
+ }
1529
1527
  RefCountedPtr<DynamicFilters> dynamic_filters =
1530
1528
  DynamicFilters::Create(new_args, std::move(filters));
1531
1529
  GPR_ASSERT(dynamic_filters != nullptr);
@@ -1548,6 +1546,15 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
1548
1546
  // Process calls that were queued waiting for the resolver result.
1549
1547
  for (ResolverQueuedCall* call = resolver_queued_calls_; call != nullptr;
1550
1548
  call = call->next) {
1549
+ // If there are a lot of queued calls here, resuming them all may cause us
1550
+ // to stay inside C-core for a long period of time. All of that work would
1551
+ // be done using the same ExecCtx instance and therefore the same cached
1552
+ // value of "now". The longer it takes to finish all of this work and exit
1553
+ // from C-core, the more stale the cached value of "now" may become. This
1554
+ // can cause problems whereby (e.g.) we calculate a timer deadline based
1555
+ // on the stale value, which results in the timer firing too early. To
1556
+ // avoid this, we invalidate the cached value for each call we process.
1557
+ ExecCtx::Get()->InvalidateNow();
1551
1558
  grpc_call_element* elem = call->elem;
1552
1559
  CallData* calld = static_cast<CallData*>(elem->call_data);
1553
1560
  grpc_error_handle error = GRPC_ERROR_NONE;
@@ -1660,6 +1667,15 @@ void ClientChannel::UpdateStateAndPickerLocked(
1660
1667
  // Re-process queued picks.
1661
1668
  for (LbQueuedCall* call = lb_queued_calls_; call != nullptr;
1662
1669
  call = call->next) {
1670
+ // If there are a lot of queued calls here, resuming them all may cause us
1671
+ // to stay inside C-core for a long period of time. All of that work would
1672
+ // be done using the same ExecCtx instance and therefore the same cached
1673
+ // value of "now". The longer it takes to finish all of this work and exit
1674
+ // from C-core, the more stale the cached value of "now" may become. This
1675
+ // can cause problems whereby (e.g.) we calculate a timer deadline based
1676
+ // on the stale value, which results in the timer firing too early. To
1677
+ // avoid this, we invalidate the cached value for each call we process.
1678
+ ExecCtx::Get()->InvalidateNow();
1663
1679
  grpc_error_handle error = GRPC_ERROR_NONE;
1664
1680
  if (call->lb_call->PickSubchannelLocked(&error)) {
1665
1681
  call->lb_call->AsyncPickDone(error);
@@ -1671,6 +1687,40 @@ void ClientChannel::UpdateStateAndPickerLocked(
1671
1687
  pending_subchannel_updates_.clear();
1672
1688
  }
1673
1689
 
1690
+ namespace {
1691
+
1692
+ // TODO(roth): Remove this in favor of the gprpp Match() function once
1693
+ // we can do that without breaking lock annotations.
1694
+ template <typename T>
1695
+ T HandlePickResult(
1696
+ LoadBalancingPolicy::PickResult* result,
1697
+ std::function<T(LoadBalancingPolicy::PickResult::Complete*)> complete_func,
1698
+ std::function<T(LoadBalancingPolicy::PickResult::Queue*)> queue_func,
1699
+ std::function<T(LoadBalancingPolicy::PickResult::Fail*)> fail_func,
1700
+ std::function<T(LoadBalancingPolicy::PickResult::Drop*)> drop_func) {
1701
+ auto* complete_pick =
1702
+ absl::get_if<LoadBalancingPolicy::PickResult::Complete>(&result->result);
1703
+ if (complete_pick != nullptr) {
1704
+ return complete_func(complete_pick);
1705
+ }
1706
+ auto* queue_pick =
1707
+ absl::get_if<LoadBalancingPolicy::PickResult::Queue>(&result->result);
1708
+ if (queue_pick != nullptr) {
1709
+ return queue_func(queue_pick);
1710
+ }
1711
+ auto* fail_pick =
1712
+ absl::get_if<LoadBalancingPolicy::PickResult::Fail>(&result->result);
1713
+ if (fail_pick != nullptr) {
1714
+ return fail_func(fail_pick);
1715
+ }
1716
+ auto* drop_pick =
1717
+ absl::get_if<LoadBalancingPolicy::PickResult::Drop>(&result->result);
1718
+ GPR_ASSERT(drop_pick != nullptr);
1719
+ return drop_func(drop_pick);
1720
+ }
1721
+
1722
+ } // namespace
1723
+
1674
1724
  grpc_error_handle ClientChannel::DoPingLocked(grpc_transport_op* op) {
1675
1725
  if (state_tracker_.state() != GRPC_CHANNEL_READY) {
1676
1726
  return GRPC_ERROR_CREATE_FROM_STATIC_STRING("channel not connected");
@@ -1680,21 +1730,31 @@ grpc_error_handle ClientChannel::DoPingLocked(grpc_transport_op* op) {
1680
1730
  MutexLock lock(&data_plane_mu_);
1681
1731
  result = picker_->Pick(LoadBalancingPolicy::PickArgs());
1682
1732
  }
1683
- ConnectedSubchannel* connected_subchannel = nullptr;
1684
- if (result.subchannel != nullptr) {
1685
- SubchannelWrapper* subchannel =
1686
- static_cast<SubchannelWrapper*>(result.subchannel.get());
1687
- connected_subchannel = subchannel->connected_subchannel();
1688
- }
1689
- if (connected_subchannel != nullptr) {
1690
- connected_subchannel->Ping(op->send_ping.on_initiate, op->send_ping.on_ack);
1691
- } else {
1692
- if (result.error == GRPC_ERROR_NONE) {
1693
- result.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1694
- "LB policy dropped call on ping");
1695
- }
1696
- }
1697
- return result.error;
1733
+ return HandlePickResult<grpc_error_handle>(
1734
+ &result,
1735
+ // Complete pick.
1736
+ [op](LoadBalancingPolicy::PickResult::Complete* complete_pick)
1737
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::work_serializer_) {
1738
+ SubchannelWrapper* subchannel = static_cast<SubchannelWrapper*>(
1739
+ complete_pick->subchannel.get());
1740
+ ConnectedSubchannel* connected_subchannel =
1741
+ subchannel->connected_subchannel();
1742
+ connected_subchannel->Ping(op->send_ping.on_initiate,
1743
+ op->send_ping.on_ack);
1744
+ return GRPC_ERROR_NONE;
1745
+ },
1746
+ // Queue pick.
1747
+ [](LoadBalancingPolicy::PickResult::Queue* /*queue_pick*/) {
1748
+ return GRPC_ERROR_CREATE_FROM_STATIC_STRING("LB picker queued call");
1749
+ },
1750
+ // Fail pick.
1751
+ [](LoadBalancingPolicy::PickResult::Fail* fail_pick) {
1752
+ return absl_status_to_grpc_error(fail_pick->status);
1753
+ },
1754
+ // Drop pick.
1755
+ [](LoadBalancingPolicy::PickResult::Drop* drop_pick) {
1756
+ return absl_status_to_grpc_error(drop_pick->status);
1757
+ });
1698
1758
  }
1699
1759
 
1700
1760
  void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
@@ -1749,7 +1809,7 @@ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
1749
1809
  UpdateStateAndPickerLocked(
1750
1810
  GRPC_CHANNEL_SHUTDOWN, absl::Status(), "shutdown from API",
1751
1811
  absl::make_unique<LoadBalancingPolicy::TransientFailurePicker>(
1752
- GRPC_ERROR_REF(op->disconnect_with_error)));
1812
+ grpc_error_to_absl_status(op->disconnect_with_error)));
1753
1813
  }
1754
1814
  }
1755
1815
  GRPC_CHANNEL_STACK_UNREF(owning_stack_, "start_transport_op");
@@ -1919,10 +1979,26 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
1919
1979
  if (GPR_LIKELY(chand->deadline_checking_enabled_)) {
1920
1980
  grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
1921
1981
  }
1922
- // Intercept recv_initial_metadata for config selector on-committed callback.
1923
- if (batch->recv_initial_metadata) {
1924
- calld->InjectRecvInitialMetadataReadyForConfigSelectorCommitCallback(batch);
1982
+ // Intercept recv_trailing_metadata to call CallDispatchController::Commit(),
1983
+ // in case we wind up failing the call before we get down to the retry
1984
+ // or LB call layer.
1985
+ if (batch->recv_trailing_metadata) {
1986
+ calld->InjectRecvTrailingMetadataReadyForConfigSelectorCommitCallback(
1987
+ batch);
1988
+ }
1989
+ // If we already have a dynamic call, pass the batch down to it.
1990
+ // Note that once we have done so, we do not need to acquire the channel's
1991
+ // resolution mutex, which is more efficient (especially for streaming calls).
1992
+ if (calld->dynamic_call_ != nullptr) {
1993
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
1994
+ gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on dynamic_call=%p",
1995
+ chand, calld, calld->dynamic_call_.get());
1996
+ }
1997
+ calld->dynamic_call_->StartTransportStreamOpBatch(batch);
1998
+ return;
1925
1999
  }
2000
+ // We do not yet have a dynamic call.
2001
+ //
1926
2002
  // If we've previously been cancelled, immediately fail any new batches.
1927
2003
  if (GPR_UNLIKELY(calld->cancel_error_ != GRPC_ERROR_NONE)) {
1928
2004
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@@ -1949,35 +2025,16 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
1949
2025
  gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
1950
2026
  calld, grpc_error_std_string(calld->cancel_error_).c_str());
1951
2027
  }
1952
- // If we do not have a dynamic call (i.e., name resolution has not
1953
- // yet completed), fail all pending batches. Otherwise, send the
1954
- // cancellation down to the dynamic call.
1955
- if (calld->dynamic_call_ == nullptr) {
1956
- calld->PendingBatchesFail(elem, GRPC_ERROR_REF(calld->cancel_error_),
1957
- NoYieldCallCombiner);
1958
- // Note: This will release the call combiner.
1959
- grpc_transport_stream_op_batch_finish_with_failure(
1960
- batch, GRPC_ERROR_REF(calld->cancel_error_), calld->call_combiner_);
1961
- } else {
1962
- // Note: This will release the call combiner.
1963
- calld->dynamic_call_->StartTransportStreamOpBatch(batch);
1964
- }
2028
+ // Fail all pending batches.
2029
+ calld->PendingBatchesFail(elem, GRPC_ERROR_REF(calld->cancel_error_),
2030
+ NoYieldCallCombiner);
2031
+ // Note: This will release the call combiner.
2032
+ grpc_transport_stream_op_batch_finish_with_failure(
2033
+ batch, GRPC_ERROR_REF(calld->cancel_error_), calld->call_combiner_);
1965
2034
  return;
1966
2035
  }
1967
2036
  // Add the batch to the pending list.
1968
2037
  calld->PendingBatchesAdd(elem, batch);
1969
- // Check if we've already created a dynamic call.
1970
- // Note that once we have done so, we do not need to acquire the channel's
1971
- // resolution mutex, which is more efficient (especially for streaming calls).
1972
- if (calld->dynamic_call_ != nullptr) {
1973
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
1974
- gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on dynamic_call=%p",
1975
- chand, calld, calld->dynamic_call_.get());
1976
- }
1977
- calld->PendingBatchesResume(elem);
1978
- return;
1979
- }
1980
- // We do not yet have a dynamic call.
1981
2038
  // For batches containing a send_initial_metadata op, acquire the
1982
2039
  // channel's resolution mutex to apply the service config to the call,
1983
2040
  // after which we will create a dynamic call.
@@ -2218,7 +2275,6 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
2218
2275
  ConfigSelector::CallConfig call_config =
2219
2276
  config_selector->GetCallConfig({&path_, initial_metadata, arena_});
2220
2277
  if (call_config.error != GRPC_ERROR_NONE) return call_config.error;
2221
- on_call_committed_ = std::move(call_config.on_call_committed);
2222
2278
  // Create a ServiceConfigCallData for the call. This stores a ref to the
2223
2279
  // ServiceConfig and caches the right set of parsed configs to use for
2224
2280
  // the call. The MethodConfig will store itself in the call context,
@@ -2226,7 +2282,8 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
2226
2282
  // will be cleaned up when the call ends.
2227
2283
  auto* service_config_call_data = arena_->New<ServiceConfigCallData>(
2228
2284
  std::move(call_config.service_config), call_config.method_configs,
2229
- std::move(call_config.call_attributes), call_context_);
2285
+ std::move(call_config.call_attributes),
2286
+ call_config.call_dispatch_controller, call_context_);
2230
2287
  // Apply our own method params to the call.
2231
2288
  auto* method_params = static_cast<ClientChannelMethodParsedConfig*>(
2232
2289
  service_config_call_data->GetMethodParsedConfig(
@@ -2265,36 +2322,36 @@ grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
2265
2322
  }
2266
2323
 
2267
2324
  void ClientChannel::CallData::
2268
- RecvInitialMetadataReadyForConfigSelectorCommitCallback(
2325
+ RecvTrailingMetadataReadyForConfigSelectorCommitCallback(
2269
2326
  void* arg, grpc_error_handle error) {
2270
2327
  auto* self = static_cast<CallData*>(arg);
2271
- if (self->on_call_committed_ != nullptr) {
2272
- self->on_call_committed_();
2273
- self->on_call_committed_ = nullptr;
2328
+ auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
2329
+ self->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
2330
+ if (service_config_call_data != nullptr) {
2331
+ service_config_call_data->call_dispatch_controller()->Commit();
2274
2332
  }
2275
2333
  // Chain to original callback.
2276
- Closure::Run(DEBUG_LOCATION, self->original_recv_initial_metadata_ready_,
2334
+ Closure::Run(DEBUG_LOCATION, self->original_recv_trailing_metadata_ready_,
2277
2335
  GRPC_ERROR_REF(error));
2278
2336
  }
2279
2337
 
2280
- // TODO(roth): Consider not intercepting this callback unless we
2281
- // actually need to, if this causes a performance problem.
2282
2338
  void ClientChannel::CallData::
2283
- InjectRecvInitialMetadataReadyForConfigSelectorCommitCallback(
2339
+ InjectRecvTrailingMetadataReadyForConfigSelectorCommitCallback(
2284
2340
  grpc_transport_stream_op_batch* batch) {
2285
- original_recv_initial_metadata_ready_ =
2286
- batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
2287
- GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_,
2288
- RecvInitialMetadataReadyForConfigSelectorCommitCallback,
2341
+ original_recv_trailing_metadata_ready_ =
2342
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
2343
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
2344
+ RecvTrailingMetadataReadyForConfigSelectorCommitCallback,
2289
2345
  this, nullptr);
2290
- batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
2291
- &recv_initial_metadata_ready_;
2346
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
2347
+ &recv_trailing_metadata_ready_;
2292
2348
  }
2293
2349
 
2294
2350
  void ClientChannel::CallData::AsyncResolutionDone(grpc_call_element* elem,
2295
2351
  grpc_error_handle error) {
2296
- GRPC_CLOSURE_INIT(&pick_closure_, ResolutionDone, elem, nullptr);
2297
- ExecCtx::Run(DEBUG_LOCATION, &pick_closure_, error);
2352
+ // TODO(roth): Does this callback need to hold a ref to the call stack?
2353
+ GRPC_CLOSURE_INIT(&resolution_done_closure_, ResolutionDone, elem, nullptr);
2354
+ ExecCtx::Run(DEBUG_LOCATION, &resolution_done_closure_, error);
2298
2355
  }
2299
2356
 
2300
2357
  void ClientChannel::CallData::ResolutionDone(void* arg,
@@ -2530,22 +2587,39 @@ class ClientChannel::LoadBalancedCall::LbCallState
2530
2587
  // LoadBalancedCall
2531
2588
  //
2532
2589
 
2590
+ namespace {
2591
+
2592
+ CallTracer::CallAttemptTracer* GetCallAttemptTracer(
2593
+ grpc_call_context_element* context, bool is_transparent_retry) {
2594
+ auto* call_tracer =
2595
+ static_cast<CallTracer*>(context[GRPC_CONTEXT_CALL_TRACER].value);
2596
+ if (call_tracer == nullptr) return nullptr;
2597
+ return call_tracer->StartNewAttempt(is_transparent_retry);
2598
+ }
2599
+
2600
+ } // namespace
2601
+
2533
2602
  ClientChannel::LoadBalancedCall::LoadBalancedCall(
2534
2603
  ClientChannel* chand, const grpc_call_element_args& args,
2535
- grpc_polling_entity* pollent, grpc_closure* on_call_destruction_complete)
2536
- : RefCounted(GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)
2537
- ? "LoadBalancedCall"
2538
- : nullptr),
2604
+ grpc_polling_entity* pollent, grpc_closure* on_call_destruction_complete,
2605
+ ConfigSelector::CallDispatchController* call_dispatch_controller,
2606
+ bool is_transparent_retry)
2607
+ : InternallyRefCounted(
2608
+ GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)
2609
+ ? "LoadBalancedCall"
2610
+ : nullptr),
2539
2611
  chand_(chand),
2540
2612
  path_(grpc_slice_ref_internal(args.path)),
2541
- call_start_time_(args.start_time),
2542
2613
  deadline_(args.deadline),
2543
2614
  arena_(args.arena),
2544
2615
  owning_call_(args.call_stack),
2545
2616
  call_combiner_(args.call_combiner),
2546
2617
  call_context_(args.context),
2547
2618
  pollent_(pollent),
2548
- on_call_destruction_complete_(on_call_destruction_complete) {}
2619
+ on_call_destruction_complete_(on_call_destruction_complete),
2620
+ call_dispatch_controller_(call_dispatch_controller),
2621
+ call_attempt_tracer_(
2622
+ GetCallAttemptTracer(args.context, is_transparent_retry)) {}
2549
2623
 
2550
2624
  ClientChannel::LoadBalancedCall::~LoadBalancedCall() {
2551
2625
  grpc_slice_unref_internal(path_);
@@ -2565,6 +2639,16 @@ ClientChannel::LoadBalancedCall::~LoadBalancedCall() {
2565
2639
  }
2566
2640
  }
2567
2641
 
2642
+ void ClientChannel::LoadBalancedCall::Orphan() {
2643
+ // Compute latency and report it to the tracer.
2644
+ if (call_attempt_tracer_ != nullptr) {
2645
+ gpr_timespec latency =
2646
+ gpr_cycle_counter_sub(gpr_get_cycle_counter(), lb_call_start_time_);
2647
+ call_attempt_tracer_->RecordEnd(latency);
2648
+ }
2649
+ Unref();
2650
+ }
2651
+
2568
2652
  size_t ClientChannel::LoadBalancedCall::GetBatchIndex(
2569
2653
  grpc_transport_stream_op_batch* batch) {
2570
2654
  // Note: It is important the send_initial_metadata be the first entry
@@ -2680,10 +2764,79 @@ void ClientChannel::LoadBalancedCall::PendingBatchesResume() {
2680
2764
 
2681
2765
  void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
2682
2766
  grpc_transport_stream_op_batch* batch) {
2683
- // Intercept recv_trailing_metadata_ready for LB callback.
2767
+ // Handle call tracing.
2768
+ if (call_attempt_tracer_ != nullptr) {
2769
+ // Record send ops in tracer.
2770
+ if (batch->cancel_stream) {
2771
+ call_attempt_tracer_->RecordCancel(
2772
+ GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error));
2773
+ }
2774
+ if (batch->send_initial_metadata) {
2775
+ call_attempt_tracer_->RecordSendInitialMetadata(
2776
+ batch->payload->send_initial_metadata.send_initial_metadata,
2777
+ batch->payload->send_initial_metadata.send_initial_metadata_flags);
2778
+ peer_string_ = batch->payload->send_initial_metadata.peer_string;
2779
+ original_send_initial_metadata_on_complete_ = batch->on_complete;
2780
+ GRPC_CLOSURE_INIT(&send_initial_metadata_on_complete_,
2781
+ SendInitialMetadataOnComplete, this, nullptr);
2782
+ batch->on_complete = &send_initial_metadata_on_complete_;
2783
+ }
2784
+ if (batch->send_message) {
2785
+ call_attempt_tracer_->RecordSendMessage(
2786
+ *batch->payload->send_message.send_message);
2787
+ }
2788
+ if (batch->send_trailing_metadata) {
2789
+ call_attempt_tracer_->RecordSendTrailingMetadata(
2790
+ batch->payload->send_trailing_metadata.send_trailing_metadata);
2791
+ }
2792
+ // Intercept recv ops.
2793
+ if (batch->recv_initial_metadata) {
2794
+ recv_initial_metadata_ =
2795
+ batch->payload->recv_initial_metadata.recv_initial_metadata;
2796
+ original_recv_initial_metadata_ready_ =
2797
+ batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
2798
+ GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_, RecvInitialMetadataReady,
2799
+ this, nullptr);
2800
+ batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
2801
+ &recv_initial_metadata_ready_;
2802
+ }
2803
+ if (batch->recv_message) {
2804
+ recv_message_ = batch->payload->recv_message.recv_message;
2805
+ original_recv_message_ready_ =
2806
+ batch->payload->recv_message.recv_message_ready;
2807
+ GRPC_CLOSURE_INIT(&recv_message_ready_, RecvMessageReady, this, nullptr);
2808
+ batch->payload->recv_message.recv_message_ready = &recv_message_ready_;
2809
+ }
2810
+ }
2811
+ // Intercept recv_trailing_metadata even if there is no call tracer,
2812
+ // since we may need to notify the LB policy about trailing metadata.
2684
2813
  if (batch->recv_trailing_metadata) {
2685
- InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(batch);
2814
+ recv_trailing_metadata_ =
2815
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata;
2816
+ transport_stream_stats_ =
2817
+ batch->payload->recv_trailing_metadata.collect_stats;
2818
+ original_recv_trailing_metadata_ready_ =
2819
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
2820
+ GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, RecvTrailingMetadataReady,
2821
+ this, nullptr);
2822
+ batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
2823
+ &recv_trailing_metadata_ready_;
2824
+ }
2825
+ // If we've already gotten a subchannel call, pass the batch down to it.
2826
+ // Note that once we have picked a subchannel, we do not need to acquire
2827
+ // the channel's data plane mutex, which is more efficient (especially for
2828
+ // streaming calls).
2829
+ if (subchannel_call_ != nullptr) {
2830
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
2831
+ gpr_log(GPR_INFO,
2832
+ "chand=%p lb_call=%p: starting batch on subchannel_call=%p",
2833
+ chand_, this, subchannel_call_.get());
2834
+ }
2835
+ subchannel_call_->StartTransportStreamOpBatch(batch);
2836
+ return;
2686
2837
  }
2838
+ // We do not yet have a subchannel call.
2839
+ //
2687
2840
  // If we've previously been cancelled, immediately fail any new batches.
2688
2841
  if (GPR_UNLIKELY(cancel_error_ != GRPC_ERROR_NONE)) {
2689
2842
  if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
@@ -2708,36 +2861,15 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
2708
2861
  gpr_log(GPR_INFO, "chand=%p lb_call=%p: recording cancel_error=%s",
2709
2862
  chand_, this, grpc_error_std_string(cancel_error_).c_str());
2710
2863
  }
2711
- // If we do not have a subchannel call (i.e., a pick has not yet
2712
- // been started), fail all pending batches. Otherwise, send the
2713
- // cancellation down to the subchannel call.
2714
- if (subchannel_call_ == nullptr) {
2715
- PendingBatchesFail(GRPC_ERROR_REF(cancel_error_), NoYieldCallCombiner);
2716
- // Note: This will release the call combiner.
2717
- grpc_transport_stream_op_batch_finish_with_failure(
2718
- batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
2719
- } else {
2720
- // Note: This will release the call combiner.
2721
- subchannel_call_->StartTransportStreamOpBatch(batch);
2722
- }
2864
+ // Fail all pending batches.
2865
+ PendingBatchesFail(GRPC_ERROR_REF(cancel_error_), NoYieldCallCombiner);
2866
+ // Note: This will release the call combiner.
2867
+ grpc_transport_stream_op_batch_finish_with_failure(
2868
+ batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
2723
2869
  return;
2724
2870
  }
2725
2871
  // Add the batch to the pending list.
2726
2872
  PendingBatchesAdd(batch);
2727
- // Check if we've already gotten a subchannel call.
2728
- // Note that once we have picked a subchannel, we do not need to acquire
2729
- // the channel's data plane mutex, which is more efficient (especially for
2730
- // streaming calls).
2731
- if (subchannel_call_ != nullptr) {
2732
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
2733
- gpr_log(GPR_INFO,
2734
- "chand=%p lb_call=%p: starting batch on subchannel_call=%p",
2735
- chand_, this, subchannel_call_.get());
2736
- }
2737
- PendingBatchesResume();
2738
- return;
2739
- }
2740
- // We do not yet have a subchannel call.
2741
2873
  // For batches containing a send_initial_metadata op, acquire the
2742
2874
  // channel's data plane mutex to pick a subchannel.
2743
2875
  if (GPR_LIKELY(batch->send_initial_metadata)) {
@@ -2759,38 +2891,82 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
2759
2891
  }
2760
2892
  }
2761
2893
 
2762
- void ClientChannel::LoadBalancedCall::
2763
- RecvTrailingMetadataReadyForLoadBalancingPolicy(void* arg,
2764
- grpc_error_handle error) {
2894
+ void ClientChannel::LoadBalancedCall::SendInitialMetadataOnComplete(
2895
+ void* arg, grpc_error_handle error) {
2896
+ auto* self = static_cast<LoadBalancedCall*>(arg);
2897
+ self->call_attempt_tracer_->RecordOnDoneSendInitialMetadata(
2898
+ self->peer_string_);
2899
+ Closure::Run(DEBUG_LOCATION,
2900
+ self->original_send_initial_metadata_on_complete_,
2901
+ GRPC_ERROR_REF(error));
2902
+ }
2903
+
2904
+ void ClientChannel::LoadBalancedCall::RecvInitialMetadataReady(
2905
+ void* arg, grpc_error_handle error) {
2906
+ auto* self = static_cast<LoadBalancedCall*>(arg);
2907
+ if (error == GRPC_ERROR_NONE) {
2908
+ // recv_initial_metadata_flags is not populated for clients
2909
+ self->call_attempt_tracer_->RecordReceivedInitialMetadata(
2910
+ self->recv_initial_metadata_, 0 /* recv_initial_metadata_flags */);
2911
+ }
2912
+ Closure::Run(DEBUG_LOCATION, self->original_recv_initial_metadata_ready_,
2913
+ GRPC_ERROR_REF(error));
2914
+ }
2915
+
2916
+ void ClientChannel::LoadBalancedCall::RecvMessageReady(
2917
+ void* arg, grpc_error_handle error) {
2918
+ auto* self = static_cast<LoadBalancedCall*>(arg);
2919
+ if (*self->recv_message_ != nullptr) {
2920
+ self->call_attempt_tracer_->RecordReceivedMessage(**self->recv_message_);
2921
+ }
2922
+ Closure::Run(DEBUG_LOCATION, self->original_recv_message_ready_,
2923
+ GRPC_ERROR_REF(error));
2924
+ }
2925
+
2926
+ void ClientChannel::LoadBalancedCall::RecvTrailingMetadataReady(
2927
+ void* arg, grpc_error_handle error) {
2765
2928
  auto* self = static_cast<LoadBalancedCall*>(arg);
2766
- if (self->lb_recv_trailing_metadata_ready_ != nullptr) {
2767
- // Set error if call did not succeed.
2768
- grpc_error_handle error_for_lb = GRPC_ERROR_NONE;
2929
+ // Check if we have a tracer or an LB callback to invoke.
2930
+ if (self->call_attempt_tracer_ != nullptr ||
2931
+ self->lb_recv_trailing_metadata_ready_ != nullptr) {
2932
+ // Get the call's status.
2933
+ absl::Status status;
2769
2934
  if (error != GRPC_ERROR_NONE) {
2770
- error_for_lb = error;
2935
+ // Get status from error.
2936
+ grpc_status_code code;
2937
+ grpc_slice message = grpc_empty_slice();
2938
+ grpc_error_get_status(error, self->deadline_, &code, &message,
2939
+ /*http_error=*/nullptr, /*error_string=*/nullptr);
2940
+ status = absl::Status(static_cast<absl::StatusCode>(code),
2941
+ StringViewFromSlice(message));
2771
2942
  } else {
2943
+ // Get status from headers.
2772
2944
  const auto& fields = self->recv_trailing_metadata_->idx.named;
2773
2945
  GPR_ASSERT(fields.grpc_status != nullptr);
2774
- grpc_status_code status =
2946
+ grpc_status_code code =
2775
2947
  grpc_get_status_code_from_metadata(fields.grpc_status->md);
2776
- std::string msg;
2777
- if (status != GRPC_STATUS_OK) {
2778
- error_for_lb = grpc_error_set_int(
2779
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("call failed"),
2780
- GRPC_ERROR_INT_GRPC_STATUS, status);
2948
+ if (code != GRPC_STATUS_OK) {
2949
+ absl::string_view message;
2781
2950
  if (fields.grpc_message != nullptr) {
2782
- error_for_lb = grpc_error_set_str(
2783
- error_for_lb, GRPC_ERROR_STR_GRPC_MESSAGE,
2784
- grpc_slice_ref_internal(GRPC_MDVALUE(fields.grpc_message->md)));
2951
+ message = StringViewFromSlice(GRPC_MDVALUE(fields.grpc_message->md));
2785
2952
  }
2953
+ status = absl::Status(static_cast<absl::StatusCode>(code), message);
2786
2954
  }
2787
2955
  }
2788
- // Invoke callback to LB policy.
2789
- Metadata trailing_metadata(self, self->recv_trailing_metadata_);
2790
- LbCallState lb_call_state(self);
2791
- self->lb_recv_trailing_metadata_ready_(error_for_lb, &trailing_metadata,
2792
- &lb_call_state);
2793
- if (error == GRPC_ERROR_NONE) GRPC_ERROR_UNREF(error_for_lb);
2956
+ // If we have a tracer, notify it.
2957
+ if (self->call_attempt_tracer_ != nullptr) {
2958
+ self->call_attempt_tracer_->RecordReceivedTrailingMetadata(
2959
+ status, self->recv_trailing_metadata_,
2960
+ *self->transport_stream_stats_);
2961
+ }
2962
+ // If the LB policy requested a callback for trailing metadata, invoke
2963
+ // the callback.
2964
+ if (self->lb_recv_trailing_metadata_ready_ != nullptr) {
2965
+ Metadata trailing_metadata(self, self->recv_trailing_metadata_);
2966
+ LbCallState lb_call_state(self);
2967
+ self->lb_recv_trailing_metadata_ready_(status, &trailing_metadata,
2968
+ &lb_call_state);
2969
+ }
2794
2970
  }
2795
2971
  // Chain to original callback.
2796
2972
  if (self->failure_error_ != GRPC_ERROR_NONE) {
@@ -2803,23 +2979,9 @@ void ClientChannel::LoadBalancedCall::
2803
2979
  error);
2804
2980
  }
2805
2981
 
2806
- void ClientChannel::LoadBalancedCall::
2807
- InjectRecvTrailingMetadataReadyForLoadBalancingPolicy(
2808
- grpc_transport_stream_op_batch* batch) {
2809
- recv_trailing_metadata_ =
2810
- batch->payload->recv_trailing_metadata.recv_trailing_metadata;
2811
- original_recv_trailing_metadata_ready_ =
2812
- batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready;
2813
- GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_,
2814
- RecvTrailingMetadataReadyForLoadBalancingPolicy, this,
2815
- grpc_schedule_on_exec_ctx);
2816
- batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready =
2817
- &recv_trailing_metadata_ready_;
2818
- }
2819
-
2820
2982
  void ClientChannel::LoadBalancedCall::CreateSubchannelCall() {
2821
2983
  SubchannelCall::Args call_args = {
2822
- std::move(connected_subchannel_), pollent_, path_, call_start_time_,
2984
+ std::move(connected_subchannel_), pollent_, path_, /*start_time=*/0,
2823
2985
  deadline_, arena_,
2824
2986
  // TODO(roth): When we implement hedging support, we will probably
2825
2987
  // need to use a separate call context for each subchannel call.
@@ -2873,6 +3035,7 @@ class ClientChannel::LoadBalancedCall::LbQueuedCallCanceller {
2873
3035
  lb_call->lb_call_canceller_);
2874
3036
  }
2875
3037
  if (lb_call->lb_call_canceller_ == self && error != GRPC_ERROR_NONE) {
3038
+ lb_call->call_dispatch_controller_->Commit();
2876
3039
  // Remove pick from list of queued picks.
2877
3040
  lb_call->MaybeRemoveCallFromLbQueuedCallsLocked();
2878
3041
  // Fail pending batches on the call.
@@ -2914,6 +3077,7 @@ void ClientChannel::LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
2914
3077
  }
2915
3078
 
2916
3079
  void ClientChannel::LoadBalancedCall::AsyncPickDone(grpc_error_handle error) {
3080
+ // TODO(roth): Does this callback need to hold a ref to LoadBalancedCall?
2917
3081
  GRPC_CLOSURE_INIT(&pick_closure_, PickDone, this, grpc_schedule_on_exec_ctx);
2918
3082
  ExecCtx::Run(DEBUG_LOCATION, &pick_closure_, error);
2919
3083
  }
@@ -2930,26 +3094,10 @@ void ClientChannel::LoadBalancedCall::PickDone(void* arg,
2930
3094
  self->PendingBatchesFail(GRPC_ERROR_REF(error), YieldCallCombiner);
2931
3095
  return;
2932
3096
  }
3097
+ self->call_dispatch_controller_->Commit();
2933
3098
  self->CreateSubchannelCall();
2934
3099
  }
2935
3100
 
2936
- namespace {
2937
-
2938
- const char* PickResultTypeName(
2939
- LoadBalancingPolicy::PickResult::ResultType type) {
2940
- switch (type) {
2941
- case LoadBalancingPolicy::PickResult::PICK_COMPLETE:
2942
- return "COMPLETE";
2943
- case LoadBalancingPolicy::PickResult::PICK_QUEUE:
2944
- return "QUEUE";
2945
- case LoadBalancingPolicy::PickResult::PICK_FAILED:
2946
- return "FAILED";
2947
- }
2948
- GPR_UNREACHABLE_CODE(return "UNKNOWN");
2949
- }
2950
-
2951
- } // namespace
2952
-
2953
3101
  void ClientChannel::LoadBalancedCall::PickSubchannel(void* arg,
2954
3102
  grpc_error_handle error) {
2955
3103
  auto* self = static_cast<LoadBalancedCall*>(arg);
@@ -2983,64 +3131,82 @@ bool ClientChannel::LoadBalancedCall::PickSubchannelLocked(
2983
3131
  Metadata initial_metadata(this, initial_metadata_batch);
2984
3132
  pick_args.initial_metadata = &initial_metadata;
2985
3133
  auto result = chand_->picker_->Pick(pick_args);
2986
- if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
2987
- gpr_log(
2988
- GPR_INFO,
2989
- "chand=%p lb_call=%p: LB pick returned %s (subchannel=%p, error=%s)",
2990
- chand_, this, PickResultTypeName(result.type), result.subchannel.get(),
2991
- grpc_error_std_string(result.error).c_str());
2992
- }
2993
- switch (result.type) {
2994
- case LoadBalancingPolicy::PickResult::PICK_FAILED: {
2995
- // If we're shutting down, fail all RPCs.
2996
- grpc_error_handle disconnect_error = chand_->disconnect_error();
2997
- if (disconnect_error != GRPC_ERROR_NONE) {
2998
- GRPC_ERROR_UNREF(result.error);
2999
- MaybeRemoveCallFromLbQueuedCallsLocked();
3000
- *error = GRPC_ERROR_REF(disconnect_error);
3001
- return true;
3002
- }
3003
- // If wait_for_ready is false, then the error indicates the RPC
3004
- // attempt's final status.
3005
- if ((send_initial_metadata_flags &
3006
- GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
3007
- grpc_error_handle new_error =
3008
- GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
3009
- "Failed to pick subchannel", &result.error, 1);
3010
- GRPC_ERROR_UNREF(result.error);
3011
- *error = new_error;
3012
- MaybeRemoveCallFromLbQueuedCallsLocked();
3013
- return true;
3014
- }
3015
- // If wait_for_ready is true, then queue to retry when we get a new
3016
- // picker.
3017
- GRPC_ERROR_UNREF(result.error);
3018
- }
3019
- // Fallthrough
3020
- case LoadBalancingPolicy::PickResult::PICK_QUEUE:
3021
- MaybeAddCallToLbQueuedCallsLocked();
3022
- return false;
3023
- default: // PICK_COMPLETE
3024
- MaybeRemoveCallFromLbQueuedCallsLocked();
3025
- // Handle drops.
3026
- if (GPR_UNLIKELY(result.subchannel == nullptr)) {
3027
- result.error = grpc_error_set_int(
3028
- grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
3029
- "Call dropped by load balancing policy"),
3030
- GRPC_ERROR_INT_GRPC_STATUS,
3031
- GRPC_STATUS_UNAVAILABLE),
3032
- GRPC_ERROR_INT_LB_POLICY_DROP, 1);
3033
- } else {
3034
- // Grab a ref to the connected subchannel while we're still
3035
- // holding the data plane mutex.
3036
- connected_subchannel_ =
3037
- chand_->GetConnectedSubchannelInDataPlane(result.subchannel.get());
3038
- GPR_ASSERT(connected_subchannel_ != nullptr);
3039
- }
3040
- lb_recv_trailing_metadata_ready_ = result.recv_trailing_metadata_ready;
3041
- *error = result.error;
3042
- return true;
3043
- }
3134
+ return HandlePickResult<bool>(
3135
+ &result,
3136
+ // CompletePick
3137
+ [this](LoadBalancingPolicy::PickResult::Complete* complete_pick)
3138
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
3139
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3140
+ gpr_log(GPR_INFO,
3141
+ "chand=%p lb_call=%p: LB pick succeeded: subchannel=%p",
3142
+ chand_, this, complete_pick->subchannel.get());
3143
+ }
3144
+ GPR_ASSERT(complete_pick->subchannel != nullptr);
3145
+ // Grab a ref to the connected subchannel while we're still
3146
+ // holding the data plane mutex.
3147
+ connected_subchannel_ = chand_->GetConnectedSubchannelInDataPlane(
3148
+ complete_pick->subchannel.get());
3149
+ GPR_ASSERT(connected_subchannel_ != nullptr);
3150
+ lb_recv_trailing_metadata_ready_ =
3151
+ std::move(complete_pick->recv_trailing_metadata_ready);
3152
+ MaybeRemoveCallFromLbQueuedCallsLocked();
3153
+ return true;
3154
+ },
3155
+ // QueuePick
3156
+ [this](LoadBalancingPolicy::PickResult::Queue* /*queue_pick*/)
3157
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
3158
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3159
+ gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick queued", chand_,
3160
+ this);
3161
+ }
3162
+ MaybeAddCallToLbQueuedCallsLocked();
3163
+ return false;
3164
+ },
3165
+ // FailPick
3166
+ [this, send_initial_metadata_flags,
3167
+ &error](LoadBalancingPolicy::PickResult::Fail* fail_pick)
3168
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
3169
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3170
+ gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick failed: %s",
3171
+ chand_, this, fail_pick->status.ToString().c_str());
3172
+ }
3173
+ // If we're shutting down, fail all RPCs.
3174
+ grpc_error_handle disconnect_error = chand_->disconnect_error();
3175
+ if (disconnect_error != GRPC_ERROR_NONE) {
3176
+ MaybeRemoveCallFromLbQueuedCallsLocked();
3177
+ *error = GRPC_ERROR_REF(disconnect_error);
3178
+ return true;
3179
+ }
3180
+ // If wait_for_ready is false, then the error indicates the RPC
3181
+ // attempt's final status.
3182
+ if ((send_initial_metadata_flags &
3183
+ GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
3184
+ grpc_error_handle lb_error =
3185
+ absl_status_to_grpc_error(fail_pick->status);
3186
+ *error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
3187
+ "Failed to pick subchannel", &lb_error, 1);
3188
+ GRPC_ERROR_UNREF(lb_error);
3189
+ MaybeRemoveCallFromLbQueuedCallsLocked();
3190
+ return true;
3191
+ }
3192
+ // If wait_for_ready is true, then queue to retry when we get a new
3193
+ // picker.
3194
+ MaybeAddCallToLbQueuedCallsLocked();
3195
+ return false;
3196
+ },
3197
+ // DropPick
3198
+ [this, &error](LoadBalancingPolicy::PickResult::Drop* drop_pick)
3199
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
3200
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
3201
+ gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick dropped: %s",
3202
+ chand_, this, drop_pick->status.ToString().c_str());
3203
+ }
3204
+ *error =
3205
+ grpc_error_set_int(absl_status_to_grpc_error(drop_pick->status),
3206
+ GRPC_ERROR_INT_LB_POLICY_DROP, 1);
3207
+ MaybeRemoveCallFromLbQueuedCallsLocked();
3208
+ return true;
3209
+ });
3044
3210
  }
3045
3211
 
3046
3212
  } // namespace grpc_core