grpc 1.8.7 → 1.9.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (488) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +549 -325
  3. data/include/grpc/impl/codegen/grpc_types.h +1 -2
  4. data/include/grpc/impl/codegen/port_platform.h +46 -5
  5. data/include/grpc/impl/codegen/slice.h +1 -2
  6. data/include/grpc/module.modulemap +0 -2
  7. data/include/grpc/slice_buffer.h +1 -2
  8. data/include/grpc/support/log.h +4 -2
  9. data/include/grpc/support/thd.h +4 -1
  10. data/include/grpc/support/tls.h +6 -0
  11. data/include/grpc/support/tls_gcc.h +5 -40
  12. data/include/grpc/support/tls_msvc.h +9 -0
  13. data/include/grpc/support/tls_pthread.h +9 -0
  14. data/src/core/ext/filters/client_channel/backup_poller.cc +32 -29
  15. data/src/core/ext/filters/client_channel/backup_poller.h +2 -2
  16. data/src/core/ext/filters/client_channel/channel_connectivity.cc +26 -32
  17. data/src/core/ext/filters/client_channel/client_channel.cc +325 -356
  18. data/src/core/ext/filters/client_channel/client_channel.h +4 -12
  19. data/src/core/ext/filters/client_channel/client_channel_factory.cc +9 -14
  20. data/src/core/ext/filters/client_channel/client_channel_factory.h +7 -20
  21. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +7 -10
  22. data/src/core/ext/filters/client_channel/connector.cc +6 -7
  23. data/src/core/ext/filters/client_channel/connector.h +6 -16
  24. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +38 -50
  25. data/src/core/ext/filters/client_channel/http_connect_handshaker.h +0 -8
  26. data/src/core/ext/filters/client_channel/http_proxy.cc +9 -13
  27. data/src/core/ext/filters/client_channel/http_proxy.h +0 -8
  28. data/src/core/ext/filters/client_channel/lb_policy.cc +72 -94
  29. data/src/core/ext/filters/client_channel/lb_policy.h +83 -92
  30. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +14 -19
  31. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +0 -8
  32. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +474 -591
  33. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h +0 -8
  34. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h +2 -10
  35. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc +6 -6
  36. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +0 -8
  37. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +0 -9
  38. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +0 -9
  39. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +3 -4
  40. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +9 -12
  41. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +160 -182
  42. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +182 -221
  43. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc +24 -35
  44. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +9 -20
  45. data/src/core/ext/filters/client_channel/lb_policy_factory.cc +6 -9
  46. data/src/core/ext/filters/client_channel/lb_policy_factory.h +4 -15
  47. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +3 -3
  48. data/src/core/ext/filters/client_channel/lb_policy_registry.h +1 -9
  49. data/src/core/ext/filters/client_channel/parse_address.cc +1 -1
  50. data/src/core/ext/filters/client_channel/parse_address.h +0 -8
  51. data/src/core/ext/filters/client_channel/proxy_mapper.cc +6 -8
  52. data/src/core/ext/filters/client_channel/proxy_mapper.h +6 -16
  53. data/src/core/ext/filters/client_channel/proxy_mapper_registry.cc +13 -17
  54. data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +2 -12
  55. data/src/core/ext/filters/client_channel/resolver.cc +11 -13
  56. data/src/core/ext/filters/client_channel/resolver.h +14 -25
  57. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +57 -70
  58. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +2 -12
  59. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +23 -31
  60. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +27 -45
  61. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +5 -15
  62. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +9 -11
  63. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +53 -66
  64. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +25 -33
  65. data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h +1 -9
  66. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +26 -35
  67. data/src/core/ext/filters/client_channel/resolver_factory.cc +2 -3
  68. data/src/core/ext/filters/client_channel/resolver_factory.h +2 -12
  69. data/src/core/ext/filters/client_channel/resolver_registry.cc +12 -15
  70. data/src/core/ext/filters/client_channel/resolver_registry.h +3 -12
  71. data/src/core/ext/filters/client_channel/retry_throttle.h +0 -8
  72. data/src/core/ext/filters/client_channel/subchannel.cc +289 -301
  73. data/src/core/ext/filters/client_channel/subchannel.h +57 -84
  74. data/src/core/ext/filters/client_channel/subchannel_index.cc +30 -33
  75. data/src/core/ext/filters/client_channel/subchannel_index.h +4 -16
  76. data/src/core/ext/filters/client_channel/uri_parser.cc +13 -17
  77. data/src/core/ext/filters/client_channel/uri_parser.h +1 -10
  78. data/src/core/ext/filters/deadline/deadline_filter.cc +49 -67
  79. data/src/core/ext/filters/deadline/deadline_filter.h +4 -14
  80. data/src/core/ext/filters/http/client/http_client_filter.cc +60 -77
  81. data/src/core/ext/filters/http/client/http_client_filter.h +0 -8
  82. data/src/core/ext/filters/http/http_filters_plugin.cc +4 -6
  83. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +63 -79
  84. data/src/core/ext/filters/http/message_compress/message_compress_filter.h +0 -8
  85. data/src/core/ext/filters/http/server/http_server_filter.cc +57 -71
  86. data/src/core/ext/filters/http/server/http_server_filter.h +0 -8
  87. data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +19 -24
  88. data/src/core/ext/filters/load_reporting/server_load_reporting_filter.h +0 -8
  89. data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc +3 -3
  90. data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +0 -8
  91. data/src/core/ext/filters/max_age/max_age_filter.cc +49 -62
  92. data/src/core/ext/filters/max_age/max_age_filter.h +0 -8
  93. data/src/core/ext/filters/message_size/message_size_filter.cc +23 -29
  94. data/src/core/ext/filters/message_size/message_size_filter.h +0 -8
  95. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc +15 -18
  96. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +0 -8
  97. data/src/core/ext/filters/workarounds/workaround_utils.h +0 -8
  98. data/src/core/ext/transport/chttp2/alpn/alpn.h +0 -8
  99. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +33 -40
  100. data/src/core/ext/transport/chttp2/client/chttp2_connector.h +0 -8
  101. data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +15 -17
  102. data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +8 -8
  103. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +23 -28
  104. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +50 -57
  105. data/src/core/ext/transport/chttp2/server/chttp2_server.h +1 -10
  106. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc +3 -3
  107. data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +7 -10
  108. data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc +5 -6
  109. data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +7 -9
  110. data/src/core/ext/transport/chttp2/transport/bin_decoder.h +2 -11
  111. data/src/core/ext/transport/chttp2/transport/bin_encoder.h +1 -9
  112. data/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc +10 -2
  113. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +516 -636
  114. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +4 -11
  115. data/src/core/ext/transport/chttp2/transport/flow_control.cc +29 -13
  116. data/src/core/ext/transport/chttp2/transport/flow_control.h +196 -53
  117. data/src/core/ext/transport/chttp2/transport/frame.h +0 -8
  118. data/src/core/ext/transport/chttp2/transport/frame_data.cc +31 -33
  119. data/src/core/ext/transport/chttp2/transport/frame_data.h +3 -12
  120. data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +2 -3
  121. data/src/core/ext/transport/chttp2/transport/frame_goaway.h +1 -10
  122. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +5 -6
  123. data/src/core/ext/transport/chttp2/transport/frame_ping.h +1 -9
  124. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +2 -3
  125. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.h +1 -10
  126. data/src/core/ext/transport/chttp2/transport/frame_settings.cc +8 -3
  127. data/src/core/ext/transport/chttp2/transport/frame_settings.h +1 -10
  128. data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +8 -8
  129. data/src/core/ext/transport/chttp2/transport/frame_window_update.h +5 -11
  130. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +63 -81
  131. data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +2 -12
  132. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +230 -318
  133. data/src/core/ext/transport/chttp2/transport/hpack_parser.h +6 -19
  134. data/src/core/ext/transport/chttp2/transport/hpack_table.cc +14 -20
  135. data/src/core/ext/transport/chttp2/transport/hpack_table.h +5 -16
  136. data/src/core/ext/transport/chttp2/transport/http2_settings.h +0 -7
  137. data/src/core/ext/transport/chttp2/transport/huffsyms.h +0 -8
  138. data/src/core/ext/transport/chttp2/transport/incoming_metadata.cc +8 -11
  139. data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +4 -13
  140. data/src/core/ext/transport/chttp2/transport/internal.h +51 -75
  141. data/src/core/ext/transport/chttp2/transport/parsing.cc +83 -109
  142. data/src/core/ext/transport/chttp2/transport/stream_lists.cc +2 -0
  143. data/src/core/ext/transport/chttp2/transport/stream_map.h +0 -8
  144. data/src/core/ext/transport/chttp2/transport/varint.h +0 -8
  145. data/src/core/ext/transport/chttp2/transport/writing.cc +61 -65
  146. data/src/core/ext/transport/inproc/inproc_plugin.cc +2 -4
  147. data/src/core/ext/transport/inproc/inproc_transport.cc +177 -188
  148. data/src/core/ext/transport/inproc/inproc_transport.h +0 -8
  149. data/src/core/lib/backoff/backoff.cc +39 -44
  150. data/src/core/lib/backoff/backoff.h +61 -57
  151. data/src/core/lib/channel/channel_args.cc +8 -10
  152. data/src/core/lib/channel/channel_args.h +4 -13
  153. data/src/core/lib/channel/channel_stack.cc +19 -27
  154. data/src/core/lib/channel/channel_stack.h +27 -47
  155. data/src/core/lib/channel/channel_stack_builder.cc +11 -14
  156. data/src/core/lib/channel/channel_stack_builder.h +4 -15
  157. data/src/core/lib/channel/connected_channel.cc +23 -36
  158. data/src/core/lib/channel/connected_channel.h +1 -10
  159. data/src/core/lib/channel/handshaker.cc +31 -40
  160. data/src/core/lib/channel/handshaker.h +14 -25
  161. data/src/core/lib/channel/handshaker_factory.cc +6 -6
  162. data/src/core/lib/channel/handshaker_factory.h +5 -15
  163. data/src/core/lib/channel/handshaker_registry.cc +9 -13
  164. data/src/core/lib/channel/handshaker_registry.h +2 -11
  165. data/src/core/lib/compression/algorithm_metadata.h +0 -8
  166. data/src/core/lib/compression/message_compress.cc +19 -23
  167. data/src/core/lib/compression/message_compress.h +2 -12
  168. data/src/core/lib/compression/stream_compression.cc +1 -1
  169. data/src/core/lib/compression/stream_compression.h +0 -8
  170. data/src/core/lib/compression/stream_compression_gzip.cc +12 -11
  171. data/src/core/lib/compression/stream_compression_gzip.h +0 -8
  172. data/src/core/lib/compression/stream_compression_identity.h +0 -8
  173. data/src/core/lib/debug/stats.cc +4 -4
  174. data/src/core/lib/debug/stats.h +9 -19
  175. data/src/core/lib/debug/stats_data.cc +85 -116
  176. data/src/core/lib/debug/stats_data.h +236 -312
  177. data/src/core/lib/debug/trace.cc +1 -1
  178. data/src/core/lib/debug/trace.h +0 -12
  179. data/src/core/lib/{support → gpr++}/abstract.h +8 -3
  180. data/src/core/lib/{support → gpr++}/atomic.h +5 -5
  181. data/src/core/lib/{support → gpr++}/atomic_with_atm.h +3 -3
  182. data/src/core/lib/{support → gpr++}/atomic_with_std.h +3 -3
  183. data/src/core/lib/gpr++/debug_location.h +52 -0
  184. data/src/core/lib/gpr++/inlined_vector.h +112 -0
  185. data/src/core/lib/{support → gpr++}/manual_constructor.h +2 -2
  186. data/src/core/lib/{support → gpr++}/memory.h +3 -3
  187. data/src/core/lib/gpr++/orphanable.h +171 -0
  188. data/src/core/lib/gpr++/ref_counted.h +133 -0
  189. data/src/core/lib/gpr++/ref_counted_ptr.h +99 -0
  190. data/src/core/lib/{support → gpr}/alloc.cc +0 -0
  191. data/src/core/lib/{support → gpr}/arena.cc +1 -1
  192. data/src/core/lib/{support → gpr}/arena.h +3 -11
  193. data/src/core/lib/{support → gpr}/atm.cc +0 -0
  194. data/src/core/lib/{support → gpr}/avl.cc +0 -0
  195. data/src/core/lib/{support → gpr}/cmdline.cc +1 -1
  196. data/src/core/lib/{support → gpr}/cpu_iphone.cc +0 -0
  197. data/src/core/lib/{support → gpr}/cpu_linux.cc +0 -0
  198. data/src/core/lib/{support → gpr}/cpu_posix.cc +0 -0
  199. data/src/core/lib/{support → gpr}/cpu_windows.cc +0 -0
  200. data/src/core/lib/{support → gpr}/env.h +3 -11
  201. data/src/core/lib/{support → gpr}/env_linux.cc +2 -2
  202. data/src/core/lib/{support → gpr}/env_posix.cc +4 -4
  203. data/src/core/lib/{support → gpr}/env_windows.cc +3 -3
  204. data/src/core/lib/{support → gpr}/fork.cc +3 -3
  205. data/src/core/lib/{support → gpr}/fork.h +3 -3
  206. data/src/core/lib/{support → gpr}/host_port.cc +1 -1
  207. data/src/core/lib/{support → gpr}/log.cc +3 -3
  208. data/src/core/lib/{support → gpr}/log_android.cc +3 -3
  209. data/src/core/lib/{support → gpr}/log_linux.cc +1 -1
  210. data/src/core/lib/{support → gpr}/log_posix.cc +5 -5
  211. data/src/core/lib/{support → gpr}/log_windows.cc +3 -3
  212. data/src/core/lib/{support → gpr}/mpscq.cc +1 -1
  213. data/src/core/lib/{support → gpr}/mpscq.h +3 -10
  214. data/src/core/lib/{support → gpr}/murmur_hash.cc +1 -1
  215. data/src/core/lib/{support → gpr}/murmur_hash.h +3 -11
  216. data/src/core/lib/{support → gpr}/spinlock.h +3 -3
  217. data/src/core/lib/{support → gpr}/string.cc +1 -1
  218. data/src/core/lib/{support → gpr}/string.h +3 -10
  219. data/src/core/lib/{support → gpr}/string_posix.cc +0 -0
  220. data/src/core/lib/{support → gpr}/string_util_windows.cc +2 -2
  221. data/src/core/lib/{support → gpr}/string_windows.cc +1 -1
  222. data/src/core/lib/{support → gpr}/string_windows.h +3 -11
  223. data/src/core/lib/{support → gpr}/subprocess_posix.cc +0 -0
  224. data/src/core/lib/{support → gpr}/subprocess_windows.cc +2 -2
  225. data/src/core/lib/{support → gpr}/sync.cc +0 -0
  226. data/src/core/lib/{support → gpr}/sync_posix.cc +10 -1
  227. data/src/core/lib/{support → gpr}/sync_windows.cc +0 -0
  228. data/src/core/lib/{support → gpr}/thd.cc +0 -0
  229. data/src/core/lib/{support → gpr}/thd_internal.h +3 -3
  230. data/src/core/lib/{support → gpr}/thd_posix.cc +18 -2
  231. data/src/core/lib/{support → gpr}/thd_windows.cc +2 -1
  232. data/src/core/lib/{support → gpr}/time.cc +0 -0
  233. data/src/core/lib/{support → gpr}/time_posix.cc +2 -4
  234. data/src/core/lib/{support → gpr}/time_precise.cc +1 -1
  235. data/src/core/lib/{support → gpr}/time_precise.h +3 -11
  236. data/src/core/lib/{support → gpr}/time_windows.cc +1 -3
  237. data/src/core/lib/{support → gpr}/tls_pthread.cc +0 -0
  238. data/src/core/lib/{support → gpr}/tmpfile.h +3 -11
  239. data/src/core/lib/{support → gpr}/tmpfile_msys.cc +2 -2
  240. data/src/core/lib/{support → gpr}/tmpfile_posix.cc +2 -2
  241. data/src/core/lib/{support → gpr}/tmpfile_windows.cc +2 -2
  242. data/src/core/lib/{support → gpr}/wrap_memcpy.cc +0 -0
  243. data/src/core/lib/http/format_request.cc +1 -1
  244. data/src/core/lib/http/format_request.h +0 -8
  245. data/src/core/lib/http/httpcli.cc +55 -74
  246. data/src/core/lib/http/httpcli.h +13 -22
  247. data/src/core/lib/http/httpcli_security_connector.cc +27 -33
  248. data/src/core/lib/http/parser.h +0 -8
  249. data/src/core/lib/iomgr/block_annotate.h +10 -17
  250. data/src/core/lib/iomgr/call_combiner.cc +14 -17
  251. data/src/core/lib/iomgr/call_combiner.h +16 -34
  252. data/src/core/lib/iomgr/closure.h +24 -37
  253. data/src/core/lib/iomgr/combiner.cc +62 -66
  254. data/src/core/lib/iomgr/combiner.h +6 -16
  255. data/src/core/lib/iomgr/endpoint.cc +15 -21
  256. data/src/core/lib/iomgr/endpoint.h +16 -33
  257. data/src/core/lib/iomgr/endpoint_pair.h +0 -8
  258. data/src/core/lib/iomgr/endpoint_pair_posix.cc +4 -5
  259. data/src/core/lib/iomgr/endpoint_pair_windows.cc +4 -6
  260. data/src/core/lib/iomgr/error.cc +2 -6
  261. data/src/core/lib/iomgr/error.h +4 -9
  262. data/src/core/lib/iomgr/error_internal.h +0 -8
  263. data/src/core/lib/iomgr/ev_epoll1_linux.cc +110 -117
  264. data/src/core/lib/iomgr/ev_epoll1_linux.h +0 -8
  265. data/src/core/lib/iomgr/ev_epollex_linux.cc +111 -141
  266. data/src/core/lib/iomgr/ev_epollex_linux.h +0 -8
  267. data/src/core/lib/iomgr/ev_epollsig_linux.cc +83 -109
  268. data/src/core/lib/iomgr/ev_epollsig_linux.h +2 -10
  269. data/src/core/lib/iomgr/ev_poll_posix.cc +103 -125
  270. data/src/core/lib/iomgr/ev_poll_posix.h +0 -8
  271. data/src/core/lib/iomgr/ev_posix.cc +35 -50
  272. data/src/core/lib/iomgr/ev_posix.h +27 -53
  273. data/src/core/lib/iomgr/exec_ctx.cc +46 -78
  274. data/src/core/lib/iomgr/exec_ctx.h +127 -60
  275. data/src/core/lib/iomgr/executor.cc +34 -38
  276. data/src/core/lib/iomgr/executor.h +3 -11
  277. data/src/core/lib/iomgr/fork_posix.cc +13 -12
  278. data/src/core/lib/iomgr/gethostname.h +0 -8
  279. data/src/core/lib/iomgr/gethostname_sysconf.cc +1 -1
  280. data/src/core/lib/iomgr/iocp_windows.cc +14 -16
  281. data/src/core/lib/iomgr/iocp_windows.h +1 -10
  282. data/src/core/lib/iomgr/iomgr.cc +60 -59
  283. data/src/core/lib/iomgr/iomgr.h +3 -12
  284. data/src/core/lib/iomgr/iomgr_internal.h +0 -8
  285. data/src/core/lib/iomgr/iomgr_uv.cc +2 -3
  286. data/src/core/lib/iomgr/iomgr_uv.h +0 -8
  287. data/src/core/lib/iomgr/is_epollexclusive_available.cc +1 -1
  288. data/src/core/lib/iomgr/load_file.cc +1 -1
  289. data/src/core/lib/iomgr/load_file.h +0 -8
  290. data/src/core/lib/iomgr/lockfree_event.cc +7 -8
  291. data/src/core/lib/iomgr/lockfree_event.h +3 -3
  292. data/src/core/lib/iomgr/polling_entity.cc +6 -10
  293. data/src/core/lib/iomgr/polling_entity.h +2 -11
  294. data/src/core/lib/iomgr/pollset.h +4 -13
  295. data/src/core/lib/iomgr/pollset_set.h +5 -18
  296. data/src/core/lib/iomgr/pollset_set_uv.cc +5 -10
  297. data/src/core/lib/iomgr/pollset_set_windows.cc +5 -10
  298. data/src/core/lib/iomgr/pollset_uv.cc +8 -9
  299. data/src/core/lib/iomgr/pollset_uv.h +0 -8
  300. data/src/core/lib/iomgr/pollset_windows.cc +14 -15
  301. data/src/core/lib/iomgr/pollset_windows.h +0 -8
  302. data/src/core/lib/iomgr/port.h +6 -1
  303. data/src/core/lib/iomgr/resolve_address.h +1 -10
  304. data/src/core/lib/iomgr/resolve_address_posix.cc +10 -12
  305. data/src/core/lib/iomgr/resolve_address_uv.cc +7 -8
  306. data/src/core/lib/iomgr/resolve_address_windows.cc +8 -9
  307. data/src/core/lib/iomgr/resource_quota.cc +77 -107
  308. data/src/core/lib/iomgr/resource_quota.h +8 -25
  309. data/src/core/lib/iomgr/sockaddr_utils.cc +1 -1
  310. data/src/core/lib/iomgr/sockaddr_utils.h +0 -8
  311. data/src/core/lib/iomgr/socket_factory_posix.cc +1 -1
  312. data/src/core/lib/iomgr/socket_factory_posix.h +0 -8
  313. data/src/core/lib/iomgr/socket_mutator.cc +1 -1
  314. data/src/core/lib/iomgr/socket_mutator.h +1 -9
  315. data/src/core/lib/iomgr/socket_utils.h +0 -8
  316. data/src/core/lib/iomgr/socket_utils_common_posix.cc +1 -1
  317. data/src/core/lib/iomgr/socket_utils_posix.h +0 -8
  318. data/src/core/lib/iomgr/socket_windows.cc +8 -11
  319. data/src/core/lib/iomgr/socket_windows.h +3 -14
  320. data/src/core/lib/iomgr/tcp_client.h +1 -10
  321. data/src/core/lib/iomgr/tcp_client_posix.cc +94 -78
  322. data/src/core/lib/iomgr/tcp_client_posix.h +36 -8
  323. data/src/core/lib/iomgr/tcp_client_uv.cc +16 -23
  324. data/src/core/lib/iomgr/tcp_client_windows.cc +22 -25
  325. data/src/core/lib/iomgr/tcp_posix.cc +131 -153
  326. data/src/core/lib/iomgr/tcp_posix.h +3 -12
  327. data/src/core/lib/iomgr/tcp_server.h +6 -17
  328. data/src/core/lib/iomgr/tcp_server_posix.cc +31 -35
  329. data/src/core/lib/iomgr/tcp_server_utils_posix.h +0 -8
  330. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +1 -1
  331. data/src/core/lib/iomgr/tcp_server_uv.cc +23 -34
  332. data/src/core/lib/iomgr/tcp_server_windows.cc +24 -34
  333. data/src/core/lib/iomgr/tcp_uv.cc +42 -56
  334. data/src/core/lib/iomgr/tcp_uv.h +0 -8
  335. data/src/core/lib/iomgr/tcp_windows.cc +43 -50
  336. data/src/core/lib/iomgr/tcp_windows.h +1 -9
  337. data/src/core/lib/iomgr/time_averaged_stats.h +0 -8
  338. data/src/core/lib/iomgr/timer.h +6 -15
  339. data/src/core/lib/iomgr/timer_generic.cc +22 -27
  340. data/src/core/lib/iomgr/timer_heap.h +0 -8
  341. data/src/core/lib/iomgr/timer_manager.cc +17 -19
  342. data/src/core/lib/iomgr/timer_manager.h +0 -8
  343. data/src/core/lib/iomgr/timer_uv.cc +12 -14
  344. data/src/core/lib/iomgr/udp_server.cc +148 -54
  345. data/src/core/lib/iomgr/udp_server.h +16 -21
  346. data/src/core/lib/iomgr/unix_sockets_posix.h +0 -8
  347. data/src/core/lib/iomgr/wakeup_fd_cv.cc +4 -4
  348. data/src/core/lib/iomgr/wakeup_fd_cv.h +12 -20
  349. data/src/core/lib/iomgr/wakeup_fd_nospecial.cc +1 -1
  350. data/src/core/lib/iomgr/wakeup_fd_pipe.h +0 -8
  351. data/src/core/lib/iomgr/wakeup_fd_posix.h +0 -8
  352. data/src/core/lib/json/json.h +0 -8
  353. data/src/core/lib/json/json_reader.h +0 -8
  354. data/src/core/lib/json/json_writer.h +0 -8
  355. data/src/core/lib/profiling/basic_timers.cc +3 -2
  356. data/src/core/lib/profiling/timers.h +0 -8
  357. data/src/core/lib/security/context/security_context.cc +9 -10
  358. data/src/core/lib/security/context/security_context.h +0 -8
  359. data/src/core/lib/security/credentials/composite/composite_credentials.cc +23 -28
  360. data/src/core/lib/security/credentials/composite/composite_credentials.h +0 -8
  361. data/src/core/lib/security/credentials/credentials.cc +33 -42
  362. data/src/core/lib/security/credentials/credentials.h +24 -43
  363. data/src/core/lib/security/credentials/credentials_metadata.cc +2 -2
  364. data/src/core/lib/security/credentials/fake/fake_credentials.cc +16 -22
  365. data/src/core/lib/security/credentials/fake/fake_credentials.h +0 -8
  366. data/src/core/lib/security/credentials/google_default/credentials_generic.cc +3 -3
  367. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +28 -34
  368. data/src/core/lib/security/credentials/google_default/google_default_credentials.h +0 -8
  369. data/src/core/lib/security/credentials/iam/iam_credentials.cc +9 -13
  370. data/src/core/lib/security/credentials/jwt/json_token.cc +1 -1
  371. data/src/core/lib/security/credentials/jwt/json_token.h +0 -8
  372. data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +14 -20
  373. data/src/core/lib/security/credentials/jwt/jwt_credentials.h +1 -10
  374. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +56 -72
  375. data/src/core/lib/security/credentials/jwt/jwt_verifier.h +5 -17
  376. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +47 -55
  377. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.h +3 -12
  378. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +23 -28
  379. data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +8 -13
  380. data/src/core/lib/security/credentials/ssl/ssl_credentials.h +0 -8
  381. data/src/core/lib/security/transport/auth_filters.h +0 -8
  382. data/src/core/lib/security/transport/client_auth_filter.cc +45 -54
  383. data/src/core/lib/security/transport/lb_targets_info.cc +2 -2
  384. data/src/core/lib/security/transport/lb_targets_info.h +0 -8
  385. data/src/core/lib/security/transport/secure_endpoint.cc +54 -68
  386. data/src/core/lib/security/transport/secure_endpoint.h +0 -8
  387. data/src/core/lib/security/transport/security_connector.cc +62 -86
  388. data/src/core/lib/security/transport/security_connector.h +22 -39
  389. data/src/core/lib/security/transport/security_handshaker.cc +83 -106
  390. data/src/core/lib/security/transport/security_handshaker.h +1 -10
  391. data/src/core/lib/security/transport/server_auth_filter.cc +31 -38
  392. data/src/core/lib/security/transport/tsi_error.h +0 -8
  393. data/src/core/lib/security/util/json_util.h +0 -8
  394. data/src/core/lib/slice/b64.cc +5 -6
  395. data/src/core/lib/slice/b64.h +3 -12
  396. data/src/core/lib/slice/percent_encoding.h +0 -8
  397. data/src/core/lib/slice/slice.cc +8 -9
  398. data/src/core/lib/slice/slice_buffer.cc +11 -16
  399. data/src/core/lib/slice/slice_hash_table.cc +5 -7
  400. data/src/core/lib/slice/slice_hash_table.h +2 -12
  401. data/src/core/lib/slice/slice_intern.cc +4 -5
  402. data/src/core/lib/slice/slice_internal.h +4 -15
  403. data/src/core/lib/slice/slice_string_helpers.cc +1 -1
  404. data/src/core/lib/slice/slice_string_helpers.h +1 -9
  405. data/src/core/lib/surface/alarm.cc +11 -14
  406. data/src/core/lib/surface/alarm_internal.h +0 -8
  407. data/src/core/lib/surface/byte_buffer.cc +2 -3
  408. data/src/core/lib/surface/byte_buffer_reader.cc +7 -9
  409. data/src/core/lib/surface/call.cc +198 -241
  410. data/src/core/lib/surface/call.h +9 -23
  411. data/src/core/lib/surface/call_details.cc +3 -4
  412. data/src/core/lib/surface/call_log_batch.cc +1 -1
  413. data/src/core/lib/surface/call_test_only.h +0 -8
  414. data/src/core/lib/surface/channel.cc +53 -64
  415. data/src/core/lib/surface/channel.h +12 -23
  416. data/src/core/lib/surface/channel_init.cc +2 -3
  417. data/src/core/lib/surface/channel_init.h +2 -12
  418. data/src/core/lib/surface/channel_ping.cc +7 -9
  419. data/src/core/lib/surface/channel_stack_type.h +0 -8
  420. data/src/core/lib/surface/completion_queue.cc +158 -176
  421. data/src/core/lib/surface/completion_queue.h +9 -20
  422. data/src/core/lib/surface/completion_queue_factory.h +0 -8
  423. data/src/core/lib/surface/event_string.cc +1 -1
  424. data/src/core/lib/surface/event_string.h +0 -8
  425. data/src/core/lib/surface/init.cc +27 -25
  426. data/src/core/lib/surface/init.h +0 -8
  427. data/src/core/lib/surface/init_secure.cc +2 -2
  428. data/src/core/lib/surface/lame_client.cc +30 -33
  429. data/src/core/lib/surface/lame_client.h +0 -8
  430. data/src/core/lib/surface/server.cc +151 -203
  431. data/src/core/lib/surface/server.h +7 -16
  432. data/src/core/lib/surface/validate_metadata.h +0 -8
  433. data/src/core/lib/surface/version.cc +2 -2
  434. data/src/core/lib/transport/bdp_estimator.cc +2 -2
  435. data/src/core/lib/transport/bdp_estimator.h +1 -1
  436. data/src/core/lib/transport/byte_stream.cc +24 -38
  437. data/src/core/lib/transport/byte_stream.h +10 -25
  438. data/src/core/lib/transport/connectivity_state.cc +9 -13
  439. data/src/core/lib/transport/connectivity_state.h +4 -14
  440. data/src/core/lib/transport/error_utils.cc +6 -6
  441. data/src/core/lib/transport/error_utils.h +2 -11
  442. data/src/core/lib/transport/metadata.cc +21 -23
  443. data/src/core/lib/transport/metadata.h +8 -20
  444. data/src/core/lib/transport/metadata_batch.cc +34 -45
  445. data/src/core/lib/transport/metadata_batch.h +18 -32
  446. data/src/core/lib/transport/service_config.cc +11 -15
  447. data/src/core/lib/transport/service_config.h +3 -13
  448. data/src/core/lib/transport/static_metadata.cc +1 -1
  449. data/src/core/lib/transport/static_metadata.h +1 -7
  450. data/src/core/lib/transport/status_conversion.cc +2 -3
  451. data/src/core/lib/transport/status_conversion.h +1 -10
  452. data/src/core/lib/transport/timeout_encoding.cc +1 -1
  453. data/src/core/lib/transport/timeout_encoding.h +1 -9
  454. data/src/core/lib/transport/transport.cc +36 -50
  455. data/src/core/lib/transport/transport.h +28 -30
  456. data/src/core/lib/transport/transport_impl.h +12 -23
  457. data/src/core/lib/transport/transport_op_string.cc +2 -2
  458. data/src/core/plugin_registry/grpc_plugin_registry.cc +34 -34
  459. data/src/core/tsi/fake_transport_security.cc +7 -10
  460. data/src/core/tsi/fake_transport_security.h +0 -8
  461. data/src/core/tsi/gts_transport_security.cc +2 -2
  462. data/src/core/tsi/gts_transport_security.h +0 -8
  463. data/src/core/tsi/ssl_transport_security.cc +3 -0
  464. data/src/core/tsi/ssl_transport_security.h +0 -8
  465. data/src/core/tsi/ssl_types.h +0 -8
  466. data/src/core/tsi/transport_security.h +1 -9
  467. data/src/core/tsi/transport_security_adapter.h +0 -8
  468. data/src/core/tsi/transport_security_grpc.cc +11 -18
  469. data/src/core/tsi/transport_security_grpc.h +9 -21
  470. data/src/core/tsi/transport_security_interface.h +0 -8
  471. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +0 -30
  472. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +2 -48
  473. data/src/ruby/lib/grpc/version.rb +1 -1
  474. data/src/ruby/spec/channel_connection_spec.rb +2 -1
  475. data/src/ruby/spec/client_auth_spec.rb +1 -1
  476. data/src/ruby/spec/client_server_spec.rb +2 -2
  477. data/src/ruby/spec/generic/active_call_spec.rb +1 -1
  478. data/src/ruby/spec/generic/client_stub_spec.rb +4 -4
  479. data/src/ruby/spec/generic/interceptor_registry_spec.rb +1 -1
  480. data/src/ruby/spec/generic/rpc_server_spec.rb +12 -12
  481. data/src/ruby/spec/google_rpc_status_utils_spec.rb +3 -2
  482. data/src/ruby/spec/pb/health/checker_spec.rb +1 -1
  483. data/src/ruby/spec/server_spec.rb +9 -9
  484. data/src/ruby/spec/support/helpers.rb +35 -1
  485. metadata +68 -66
  486. data/include/grpc/impl/codegen/exec_ctx_fwd.h +0 -26
  487. data/include/grpc/support/histogram.h +0 -64
  488. data/src/core/lib/support/histogram.cc +0 -227
@@ -25,16 +25,15 @@
25
25
  #include "src/core/lib/iomgr/error.h"
26
26
  #include "src/core/lib/profiling/timers.h"
27
27
 
28
- static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
29
- grpc_channel_element* elem,
28
+ static grpc_error* init_channel_elem(grpc_channel_element* elem,
30
29
  grpc_channel_element_args* args) {
31
30
  return GRPC_ERROR_NONE;
32
31
  }
33
32
 
34
- static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
35
- grpc_channel_element* elem) {}
33
+ static void destroy_channel_elem(grpc_channel_element* elem) {}
36
34
 
37
- typedef struct {
35
+ namespace {
36
+ struct call_data {
38
37
  // Stats object to update.
39
38
  grpc_grpclb_client_stats* client_stats;
40
39
  // State for intercepting send_initial_metadata.
@@ -45,30 +44,27 @@ typedef struct {
45
44
  grpc_closure recv_initial_metadata_ready;
46
45
  grpc_closure* original_recv_initial_metadata_ready;
47
46
  bool recv_initial_metadata_succeeded;
48
- } call_data;
47
+ };
48
+ } // namespace
49
49
 
50
- static void on_complete_for_send(grpc_exec_ctx* exec_ctx, void* arg,
51
- grpc_error* error) {
50
+ static void on_complete_for_send(void* arg, grpc_error* error) {
52
51
  call_data* calld = (call_data*)arg;
53
52
  if (error == GRPC_ERROR_NONE) {
54
53
  calld->send_initial_metadata_succeeded = true;
55
54
  }
56
- GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete_for_send,
57
- GRPC_ERROR_REF(error));
55
+ GRPC_CLOSURE_RUN(calld->original_on_complete_for_send, GRPC_ERROR_REF(error));
58
56
  }
59
57
 
60
- static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
61
- grpc_error* error) {
58
+ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
62
59
  call_data* calld = (call_data*)arg;
63
60
  if (error == GRPC_ERROR_NONE) {
64
61
  calld->recv_initial_metadata_succeeded = true;
65
62
  }
66
- GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready,
63
+ GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready,
67
64
  GRPC_ERROR_REF(error));
68
65
  }
69
66
 
70
- static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
71
- grpc_call_element* elem,
67
+ static grpc_error* init_call_elem(grpc_call_element* elem,
72
68
  const grpc_call_element_args* args) {
73
69
  call_data* calld = (call_data*)elem->call_data;
74
70
  // Get stats object from context and take a ref.
@@ -81,7 +77,7 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
81
77
  return GRPC_ERROR_NONE;
82
78
  }
83
79
 
84
- static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
80
+ static void destroy_call_elem(grpc_call_element* elem,
85
81
  const grpc_call_final_info* final_info,
86
82
  grpc_closure* ignored) {
87
83
  call_data* calld = (call_data*)elem->call_data;
@@ -96,8 +92,7 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
96
92
  }
97
93
 
98
94
  static void start_transport_stream_op_batch(
99
- grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
100
- grpc_transport_stream_op_batch* batch) {
95
+ grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
101
96
  call_data* calld = (call_data*)elem->call_data;
102
97
  GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
103
98
  // Intercept send_initial_metadata.
@@ -118,7 +113,7 @@ static void start_transport_stream_op_batch(
118
113
  &calld->recv_initial_metadata_ready;
119
114
  }
120
115
  // Chain to next filter.
121
- grpc_call_next_op(exec_ctx, elem, batch);
116
+ grpc_call_next_op(elem, batch);
122
117
  GPR_TIMER_END("clr_start_transport_stream_op_batch", 0);
123
118
  }
124
119
 
@@ -21,15 +21,7 @@
21
21
 
22
22
  #include "src/core/lib/channel/channel_stack.h"
23
23
 
24
- #ifdef __cplusplus
25
- extern "C" {
26
- #endif
27
-
28
24
  extern const grpc_channel_filter grpc_client_load_reporting_filter;
29
25
 
30
- #ifdef __cplusplus
31
- }
32
- #endif
33
-
34
26
  #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
35
27
  */
@@ -54,7 +54,7 @@
54
54
  * operations in progress over the old RR instance. This is done by
55
55
  * decreasing the reference count on the old policy. The moment no more
56
56
  * references are held on the old RR policy, it'll be destroyed and \a
57
- * glb_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
57
+ * on_rr_connectivity_changed notified with a \a GRPC_CHANNEL_SHUTDOWN
58
58
  * state. At this point we can transition to a new RR instance safely, which
59
59
  * is done once again via \a rr_handover_locked().
60
60
  *
@@ -106,6 +106,8 @@
106
106
  #include "src/core/lib/backoff/backoff.h"
107
107
  #include "src/core/lib/channel/channel_args.h"
108
108
  #include "src/core/lib/channel/channel_stack.h"
109
+ #include "src/core/lib/gpr++/manual_constructor.h"
110
+ #include "src/core/lib/gpr/string.h"
109
111
  #include "src/core/lib/iomgr/combiner.h"
110
112
  #include "src/core/lib/iomgr/sockaddr.h"
111
113
  #include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -113,13 +115,11 @@
113
115
  #include "src/core/lib/slice/slice_hash_table.h"
114
116
  #include "src/core/lib/slice/slice_internal.h"
115
117
  #include "src/core/lib/slice/slice_string_helpers.h"
116
- #include "src/core/lib/support/string.h"
117
118
  #include "src/core/lib/surface/call.h"
118
119
  #include "src/core/lib/surface/channel.h"
119
120
  #include "src/core/lib/surface/channel_init.h"
120
121
  #include "src/core/lib/transport/static_metadata.h"
121
122
 
122
- #define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
123
123
  #define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
124
124
  #define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
125
125
  #define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
@@ -128,174 +128,48 @@
128
128
 
129
129
  grpc_core::TraceFlag grpc_lb_glb_trace(false, "glb");
130
130
 
131
- /* add lb_token of selected subchannel (address) to the call's initial
132
- * metadata */
133
- static grpc_error* initial_metadata_add_lb_token(
134
- grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
135
- grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
136
- GPR_ASSERT(lb_token_mdelem_storage != nullptr);
137
- GPR_ASSERT(!GRPC_MDISNULL(lb_token));
138
- return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
139
- lb_token_mdelem_storage, lb_token);
140
- }
141
-
142
- static void destroy_client_stats(void* arg) {
143
- grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
144
- }
145
-
146
- typedef struct wrapped_rr_closure_arg {
147
- /* the closure instance using this struct as argument */
148
- grpc_closure wrapper_closure;
149
-
150
- /* the original closure. Usually a on_complete/notify cb for pick() and ping()
151
- * calls against the internal RR instance, respectively. */
152
- grpc_closure* wrapped_closure;
153
-
154
- /* the pick's initial metadata, kept in order to append the LB token for the
155
- * pick */
156
- grpc_metadata_batch* initial_metadata;
157
-
158
- /* the picked target, used to determine which LB token to add to the pick's
159
- * initial metadata */
160
- grpc_connected_subchannel** target;
161
-
162
- /* the context to be populated for the subchannel call */
163
- grpc_call_context_element* context;
164
-
165
- /* Stats for client-side load reporting. Note that this holds a
166
- * reference, which must be either passed on via context or unreffed. */
131
+ struct glb_lb_policy;
132
+
133
+ namespace {
134
+
135
+ /// Linked list of pending pick requests. It stores all information needed to
136
+ /// eventually call (Round Robin's) pick() on them. They mainly stay pending
137
+ /// waiting for the RR policy to be created.
138
+ ///
139
+ /// Note that when a pick is sent to the RR policy, we inject our own
140
+ /// on_complete callback, so that we can intercept the result before
141
+ /// invoking the original on_complete callback. This allows us to set the
142
+ /// LB token metadata and add client_stats to the call context.
143
+ /// See \a pending_pick_complete() for details.
144
+ struct pending_pick {
145
+ // Our on_complete closure and the original one.
146
+ grpc_closure on_complete;
147
+ grpc_closure* original_on_complete;
148
+ // The original pick.
149
+ grpc_lb_policy_pick_state* pick;
150
+ // Stats for client-side load reporting. Note that this holds a
151
+ // reference, which must be either passed on via context or unreffed.
167
152
  grpc_grpclb_client_stats* client_stats;
168
-
169
- /* the LB token associated with the pick */
153
+ // The LB token associated with the pick. This is set via user_data in
154
+ // the pick.
170
155
  grpc_mdelem lb_token;
171
-
172
- /* storage for the lb token initial metadata mdelem */
173
- grpc_linked_mdelem* lb_token_mdelem_storage;
174
-
175
- /* The RR instance related to the closure */
176
- grpc_lb_policy* rr_policy;
177
-
178
- /* The grpclb instance that created the wrapping. This instance is not owned,
179
- * reference counts are untouched. It's used only for logging purposes. */
180
- grpc_lb_policy* glb_policy;
181
-
182
- /* heap memory to be freed upon closure execution. */
183
- void* free_when_done;
184
- } wrapped_rr_closure_arg;
185
-
186
- /* The \a on_complete closure passed as part of the pick requires keeping a
187
- * reference to its associated round robin instance. We wrap this closure in
188
- * order to unref the round robin instance upon its invocation */
189
- static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
190
- grpc_error* error) {
191
- wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
192
-
193
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
194
- GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
195
-
196
- if (wc_arg->rr_policy != nullptr) {
197
- /* if *target is NULL, no pick has been made by the RR policy (eg, all
198
- * addresses failed to connect). There won't be any user_data/token
199
- * available */
200
- if (*wc_arg->target != nullptr) {
201
- if (!GRPC_MDISNULL(wc_arg->lb_token)) {
202
- initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
203
- wc_arg->lb_token_mdelem_storage,
204
- GRPC_MDELEM_REF(wc_arg->lb_token));
205
- } else {
206
- gpr_log(
207
- GPR_ERROR,
208
- "[grpclb %p] No LB token for connected subchannel pick %p (from RR "
209
- "instance %p).",
210
- wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy);
211
- abort();
212
- }
213
- // Pass on client stats via context. Passes ownership of the reference.
214
- GPR_ASSERT(wc_arg->client_stats != nullptr);
215
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
216
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
217
- } else {
218
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
219
- }
220
- if (grpc_lb_glb_trace.enabled()) {
221
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
222
- wc_arg->rr_policy);
223
- }
224
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
225
- }
226
- GPR_ASSERT(wc_arg->free_when_done != nullptr);
227
- gpr_free(wc_arg->free_when_done);
228
- }
229
-
230
- /* Linked list of pending pick requests. It stores all information needed to
231
- * eventually call (Round Robin's) pick() on them. They mainly stay pending
232
- * waiting for the RR policy to be created/updated.
233
- *
234
- * One particularity is the wrapping of the user-provided \a on_complete closure
235
- * (in \a wrapped_on_complete and \a wrapped_on_complete_arg). This is needed in
236
- * order to correctly unref the RR policy instance upon completion of the pick.
237
- * See \a wrapped_rr_closure for details. */
238
- typedef struct pending_pick {
156
+ // The grpclb instance that created the wrapping. This instance is not owned,
157
+ // reference counts are untouched. It's used only for logging purposes.
158
+ glb_lb_policy* glb_policy;
159
+ // Next pending pick.
239
160
  struct pending_pick* next;
161
+ };
240
162
 
241
- /* original pick()'s arguments */
242
- grpc_lb_policy_pick_args pick_args;
243
-
244
- /* output argument where to store the pick()ed connected subchannel, or NULL
245
- * upon error. */
246
- grpc_connected_subchannel** target;
247
-
248
- /* args for wrapped_on_complete */
249
- wrapped_rr_closure_arg wrapped_on_complete_arg;
250
- } pending_pick;
251
-
252
- static void add_pending_pick(pending_pick** root,
253
- const grpc_lb_policy_pick_args* pick_args,
254
- grpc_connected_subchannel** target,
255
- grpc_call_context_element* context,
256
- grpc_closure* on_complete) {
257
- pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
258
- pp->next = *root;
259
- pp->pick_args = *pick_args;
260
- pp->target = target;
261
- pp->wrapped_on_complete_arg.wrapped_closure = on_complete;
262
- pp->wrapped_on_complete_arg.target = target;
263
- pp->wrapped_on_complete_arg.context = context;
264
- pp->wrapped_on_complete_arg.initial_metadata = pick_args->initial_metadata;
265
- pp->wrapped_on_complete_arg.lb_token_mdelem_storage =
266
- pick_args->lb_token_mdelem_storage;
267
- pp->wrapped_on_complete_arg.free_when_done = pp;
268
- GRPC_CLOSURE_INIT(&pp->wrapped_on_complete_arg.wrapper_closure,
269
- wrapped_rr_closure, &pp->wrapped_on_complete_arg,
270
- grpc_schedule_on_exec_ctx);
271
- *root = pp;
272
- }
273
-
274
- /* Same as the \a pending_pick struct but for ping operations */
275
- typedef struct pending_ping {
163
+ /// A linked list of pending pings waiting for the RR policy to be created.
164
+ struct pending_ping {
165
+ grpc_closure* on_initiate;
166
+ grpc_closure* on_ack;
276
167
  struct pending_ping* next;
168
+ };
277
169
 
278
- /* args for wrapped_notify */
279
- wrapped_rr_closure_arg wrapped_notify_arg;
280
- } pending_ping;
281
-
282
- static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
283
- pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
284
- pping->wrapped_notify_arg.wrapped_closure = notify;
285
- pping->wrapped_notify_arg.free_when_done = pping;
286
- pping->next = *root;
287
- GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
288
- wrapped_rr_closure, &pping->wrapped_notify_arg,
289
- grpc_schedule_on_exec_ctx);
290
- *root = pping;
291
- }
292
-
293
- /*
294
- * glb_lb_policy
295
- */
296
- typedef struct rr_connectivity_data rr_connectivity_data;
170
+ } // namespace
297
171
 
298
- typedef struct glb_lb_policy {
172
+ struct glb_lb_policy {
299
173
  /** base policy: must be first */
300
174
  grpc_lb_policy base;
301
175
 
@@ -320,6 +194,9 @@ typedef struct glb_lb_policy {
320
194
  /** the RR policy to use of the backend servers returned by the LB server */
321
195
  grpc_lb_policy* rr_policy;
322
196
 
197
+ grpc_closure on_rr_connectivity_changed;
198
+ grpc_connectivity_state rr_connectivity_state;
199
+
323
200
  bool started_picking;
324
201
 
325
202
  /** our connectivity state tracker */
@@ -328,8 +205,8 @@ typedef struct glb_lb_policy {
328
205
  /** connectivity state of the LB channel */
329
206
  grpc_connectivity_state lb_channel_connectivity;
330
207
 
331
- /** stores the deserialized response from the LB. May be NULL until one such
332
- * response has arrived. */
208
+ /** stores the deserialized response from the LB. May be nullptr until one
209
+ * such response has arrived. */
333
210
  grpc_grpclb_serverlist* serverlist;
334
211
 
335
212
  /** Index into serverlist for next pick.
@@ -354,11 +231,11 @@ typedef struct glb_lb_policy {
354
231
  /** are we already watching the LB channel's connectivity? */
355
232
  bool watching_lb_channel;
356
233
 
357
- /** is \a lb_call_retry_timer active? */
358
- bool retry_timer_active;
234
+ /** is the callback associated with \a lb_call_retry_timer pending? */
235
+ bool retry_timer_callback_pending;
359
236
 
360
- /** is \a lb_fallback_timer active? */
361
- bool fallback_timer_active;
237
+ /** is the callback associated with \a lb_fallback_timer pending? */
238
+ bool fallback_timer_callback_pending;
362
239
 
363
240
  /** called upon changes to the LB channel's connectivity. */
364
241
  grpc_closure lb_channel_on_connectivity_changed;
@@ -366,6 +243,9 @@ typedef struct glb_lb_policy {
366
243
  /************************************************************/
367
244
  /* client data associated with the LB server communication */
368
245
  /************************************************************/
246
+ /* Finished sending initial request. */
247
+ grpc_closure lb_on_sent_initial_request;
248
+
369
249
  /* Status from the LB server has been received. This signals the end of the LB
370
250
  * call. */
371
251
  grpc_closure lb_on_server_status_received;
@@ -397,7 +277,7 @@ typedef struct glb_lb_policy {
397
277
  grpc_slice lb_call_status_details;
398
278
 
399
279
  /** LB call retry backoff state */
400
- grpc_backoff lb_call_backoff_state;
280
+ grpc_core::ManualConstructor<grpc_core::BackOff> lb_call_backoff;
401
281
 
402
282
  /** LB call retry timer */
403
283
  grpc_timer lb_call_retry_timer;
@@ -405,6 +285,7 @@ typedef struct glb_lb_policy {
405
285
  /** LB fallback timer */
406
286
  grpc_timer lb_fallback_timer;
407
287
 
288
+ bool initial_request_sent;
408
289
  bool seen_initial_response;
409
290
 
410
291
  /* Stats for client-side load reporting. Should be unreffed and
@@ -413,22 +294,94 @@ typedef struct glb_lb_policy {
413
294
  /* Interval and timer for next client load report. */
414
295
  grpc_millis client_stats_report_interval;
415
296
  grpc_timer client_load_report_timer;
416
- bool client_load_report_timer_pending;
297
+ bool client_load_report_timer_callback_pending;
417
298
  bool last_client_load_report_counters_were_zero;
418
299
  /* Closure used for either the load report timer or the callback for
419
300
  * completion of sending the load report. */
420
301
  grpc_closure client_load_report_closure;
421
302
  /* Client load report message payload. */
422
303
  grpc_byte_buffer* client_load_report_payload;
423
- } glb_lb_policy;
424
-
425
- /* Keeps track and reacts to changes in connectivity of the RR instance */
426
- struct rr_connectivity_data {
427
- grpc_closure on_change;
428
- grpc_connectivity_state state;
429
- glb_lb_policy* glb_policy;
430
304
  };
431
305
 
306
+ /* add lb_token of selected subchannel (address) to the call's initial
307
+ * metadata */
308
+ static grpc_error* initial_metadata_add_lb_token(
309
+ grpc_metadata_batch* initial_metadata,
310
+ grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
311
+ GPR_ASSERT(lb_token_mdelem_storage != nullptr);
312
+ GPR_ASSERT(!GRPC_MDISNULL(lb_token));
313
+ return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
314
+ lb_token);
315
+ }
316
+
317
+ static void destroy_client_stats(void* arg) {
318
+ grpc_grpclb_client_stats_unref((grpc_grpclb_client_stats*)arg);
319
+ }
320
+
321
+ static void pending_pick_set_metadata_and_context(pending_pick* pp) {
322
+ /* if connected_subchannel is nullptr, no pick has been made by the RR
323
+ * policy (e.g., all addresses failed to connect). There won't be any
324
+ * user_data/token available */
325
+ if (pp->pick->connected_subchannel != nullptr) {
326
+ if (!GRPC_MDISNULL(pp->lb_token)) {
327
+ initial_metadata_add_lb_token(pp->pick->initial_metadata,
328
+ &pp->pick->lb_token_mdelem_storage,
329
+ GRPC_MDELEM_REF(pp->lb_token));
330
+ } else {
331
+ gpr_log(GPR_ERROR,
332
+ "[grpclb %p] No LB token for connected subchannel pick %p",
333
+ pp->glb_policy, pp->pick);
334
+ abort();
335
+ }
336
+ // Pass on client stats via context. Passes ownership of the reference.
337
+ GPR_ASSERT(pp->client_stats != nullptr);
338
+ pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
339
+ pp->client_stats;
340
+ pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
341
+ destroy_client_stats;
342
+ } else {
343
+ if (pp->client_stats != nullptr) {
344
+ grpc_grpclb_client_stats_unref(pp->client_stats);
345
+ }
346
+ }
347
+ }
348
+
349
+ /* The \a on_complete closure passed as part of the pick requires keeping a
350
+ * reference to its associated round robin instance. We wrap this closure in
351
+ * order to unref the round robin instance upon its invocation */
352
+ static void pending_pick_complete(void* arg, grpc_error* error) {
353
+ pending_pick* pp = (pending_pick*)arg;
354
+ pending_pick_set_metadata_and_context(pp);
355
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
356
+ gpr_free(pp);
357
+ }
358
+
359
+ static pending_pick* pending_pick_create(glb_lb_policy* glb_policy,
360
+ grpc_lb_policy_pick_state* pick) {
361
+ pending_pick* pp = (pending_pick*)gpr_zalloc(sizeof(*pp));
362
+ pp->pick = pick;
363
+ pp->glb_policy = glb_policy;
364
+ GRPC_CLOSURE_INIT(&pp->on_complete, pending_pick_complete, pp,
365
+ grpc_schedule_on_exec_ctx);
366
+ pp->original_on_complete = pick->on_complete;
367
+ pp->pick->on_complete = &pp->on_complete;
368
+ return pp;
369
+ }
370
+
371
+ static void pending_pick_add(pending_pick** root, pending_pick* new_pp) {
372
+ new_pp->next = *root;
373
+ *root = new_pp;
374
+ }
375
+
376
+ static void pending_ping_add(pending_ping** root, grpc_closure* on_initiate,
377
+ grpc_closure* on_ack) {
378
+ pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
379
+ pping->on_initiate = on_initiate;
380
+ pping->on_ack = on_ack;
381
+ pping->next = *root;
382
+ *root = pping;
383
+ }
384
+
432
385
  static bool is_server_valid(const grpc_grpclb_server* server, size_t idx,
433
386
  bool log) {
434
387
  if (server->drop) return false;
@@ -459,9 +412,9 @@ static void* lb_token_copy(void* token) {
459
412
  ? nullptr
460
413
  : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
461
414
  }
462
- static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
415
+ static void lb_token_destroy(void* token) {
463
416
  if (token != nullptr) {
464
- GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
417
+ GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
465
418
  }
466
419
  }
467
420
  static int lb_token_cmp(void* token1, void* token2) {
@@ -497,7 +450,7 @@ static void parse_server(const grpc_grpclb_server* server,
497
450
 
498
451
  /* Returns addresses extracted from \a serverlist. */
499
452
  static grpc_lb_addresses* process_serverlist_locked(
500
- grpc_exec_ctx* exec_ctx, const grpc_grpclb_serverlist* serverlist) {
453
+ const grpc_grpclb_serverlist* serverlist) {
501
454
  size_t num_valid = 0;
502
455
  /* first pass: count how many are valid in order to allocate the necessary
503
456
  * memory in a single block */
@@ -528,9 +481,9 @@ static grpc_lb_addresses* process_serverlist_locked(
528
481
  strnlen(server->load_balance_token, lb_token_max_length);
529
482
  grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
530
483
  server->load_balance_token, lb_token_length);
531
- user_data = (void*)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
532
- lb_token_mdstr)
533
- .payload;
484
+ user_data =
485
+ (void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
486
+ .payload;
534
487
  } else {
535
488
  char* uri = grpc_sockaddr_to_uri(&addr);
536
489
  gpr_log(GPR_INFO,
@@ -540,7 +493,6 @@ static grpc_lb_addresses* process_serverlist_locked(
540
493
  gpr_free(uri);
541
494
  user_data = (void*)GRPC_MDELEM_LB_TOKEN_EMPTY.payload;
542
495
  }
543
-
544
496
  grpc_lb_addresses_set_address(lb_addresses, addr_idx, &addr.addr, addr.len,
545
497
  false /* is_balancer */,
546
498
  nullptr /* balancer_name */, user_data);
@@ -552,7 +504,7 @@ static grpc_lb_addresses* process_serverlist_locked(
552
504
 
553
505
  /* Returns the backend addresses extracted from the given addresses */
554
506
  static grpc_lb_addresses* extract_backend_addresses_locked(
555
- grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses) {
507
+ const grpc_lb_addresses* addresses) {
556
508
  /* first pass: count the number of backend addresses */
557
509
  size_t num_backends = 0;
558
510
  for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -577,11 +529,10 @@ static grpc_lb_addresses* extract_backend_addresses_locked(
577
529
  }
578
530
 
579
531
  static void update_lb_connectivity_status_locked(
580
- grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
581
- grpc_connectivity_state rr_state, grpc_error* rr_state_error) {
532
+ glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
533
+ grpc_error* rr_state_error) {
582
534
  const grpc_connectivity_state curr_glb_state =
583
535
  grpc_connectivity_state_check(&glb_policy->state_tracker);
584
-
585
536
  /* The new connectivity status is a function of the previous one and the new
586
537
  * input coming from the status of the RR policy.
587
538
  *
@@ -611,7 +562,6 @@ static void update_lb_connectivity_status_locked(
611
562
  *
612
563
  * (*) This function mustn't be called during shutting down. */
613
564
  GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
614
-
615
565
  switch (rr_state) {
616
566
  case GRPC_CHANNEL_TRANSIENT_FAILURE:
617
567
  case GRPC_CHANNEL_SHUTDOWN:
@@ -622,7 +572,6 @@ static void update_lb_connectivity_status_locked(
622
572
  case GRPC_CHANNEL_READY:
623
573
  GPR_ASSERT(rr_state_error == GRPC_ERROR_NONE);
624
574
  }
625
-
626
575
  if (grpc_lb_glb_trace.enabled()) {
627
576
  gpr_log(
628
577
  GPR_INFO,
@@ -630,20 +579,18 @@ static void update_lb_connectivity_status_locked(
630
579
  glb_policy, grpc_connectivity_state_name(rr_state),
631
580
  glb_policy->rr_policy);
632
581
  }
633
- grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
582
+ grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
634
583
  rr_state_error,
635
584
  "update_lb_connectivity_status_locked");
636
585
  }
637
586
 
638
587
  /* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
639
588
  * immediately (ignoring its completion callback), we need to perform the
640
- * cleanups this callback would otherwise be resposible for.
589
+ * cleanups this callback would otherwise be responsible for.
641
590
  * If \a force_async is true, then we will manually schedule the
642
591
  * completion callback even if the pick is available immediately. */
643
- static bool pick_from_internal_rr_locked(
644
- grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
645
- const grpc_lb_policy_pick_args* pick_args, bool force_async,
646
- grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
592
+ static bool pick_from_internal_rr_locked(glb_lb_policy* glb_policy,
593
+ bool force_async, pending_pick* pp) {
647
594
  // Check for drops if we are not using fallback backend addresses.
648
595
  if (glb_policy->serverlist != nullptr) {
649
596
  // Look at the index into the serverlist to see if we should drop this call.
@@ -653,57 +600,36 @@ static bool pick_from_internal_rr_locked(
653
600
  glb_policy->serverlist_index = 0; // Wrap-around.
654
601
  }
655
602
  if (server->drop) {
656
- // Not using the RR policy, so unref it.
657
- if (grpc_lb_glb_trace.enabled()) {
658
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
659
- wc_arg->rr_policy);
660
- }
661
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
662
603
  // Update client load reporting stats to indicate the number of
663
604
  // dropped calls. Note that we have to do this here instead of in
664
605
  // the client_load_reporting filter, because we do not create a
665
606
  // subchannel call (and therefore no client_load_reporting filter)
666
607
  // for dropped calls.
667
- GPR_ASSERT(wc_arg->client_stats != nullptr);
608
+ GPR_ASSERT(glb_policy->client_stats != nullptr);
668
609
  grpc_grpclb_client_stats_add_call_dropped_locked(
669
- server->load_balance_token, wc_arg->client_stats);
670
- grpc_grpclb_client_stats_unref(wc_arg->client_stats);
610
+ server->load_balance_token, glb_policy->client_stats);
671
611
  if (force_async) {
672
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
673
- GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
674
- gpr_free(wc_arg->free_when_done);
612
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
613
+ gpr_free(pp);
675
614
  return false;
676
615
  }
677
- gpr_free(wc_arg->free_when_done);
616
+ gpr_free(pp);
678
617
  return true;
679
618
  }
680
619
  }
620
+ // Set client_stats and user_data.
621
+ pp->client_stats = grpc_grpclb_client_stats_ref(glb_policy->client_stats);
622
+ GPR_ASSERT(pp->pick->user_data == nullptr);
623
+ pp->pick->user_data = (void**)&pp->lb_token;
681
624
  // Pick via the RR policy.
682
- const bool pick_done = grpc_lb_policy_pick_locked(
683
- exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
684
- (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
625
+ bool pick_done = grpc_lb_policy_pick_locked(glb_policy->rr_policy, pp->pick);
685
626
  if (pick_done) {
686
- /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
687
- if (grpc_lb_glb_trace.enabled()) {
688
- gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
689
- wc_arg->rr_policy);
690
- }
691
- GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
692
- /* add the load reporting initial metadata */
693
- initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
694
- pick_args->lb_token_mdelem_storage,
695
- GRPC_MDELEM_REF(wc_arg->lb_token));
696
- // Pass on client stats via context. Passes ownership of the reference.
697
- GPR_ASSERT(wc_arg->client_stats != nullptr);
698
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].value = wc_arg->client_stats;
699
- wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
627
+ pending_pick_set_metadata_and_context(pp);
700
628
  if (force_async) {
701
- GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
702
- GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
703
- gpr_free(wc_arg->free_when_done);
704
- return false;
629
+ GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_NONE);
630
+ pick_done = false;
705
631
  }
706
- gpr_free(wc_arg->free_when_done);
632
+ gpr_free(pp);
707
633
  }
708
634
  /* else, the pending pick will be registered and taken care of by the
709
635
  * pending pick list inside the RR policy (glb_policy->rr_policy).
@@ -712,12 +638,11 @@ static bool pick_from_internal_rr_locked(
712
638
  return pick_done;
713
639
  }
714
640
 
715
- static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
716
- glb_lb_policy* glb_policy) {
641
+ static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
717
642
  grpc_lb_addresses* addresses;
718
643
  if (glb_policy->serverlist != nullptr) {
719
644
  GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
720
- addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
645
+ addresses = process_serverlist_locked(glb_policy->serverlist);
721
646
  } else {
722
647
  // If rr_handover_locked() is invoked when we haven't received any
723
648
  // serverlist from the balancer, we use the fallback backends returned by
@@ -737,24 +662,21 @@ static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
737
662
  args->args = grpc_channel_args_copy_and_add_and_remove(
738
663
  glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
739
664
  1);
740
- grpc_lb_addresses_destroy(exec_ctx, addresses);
665
+ grpc_lb_addresses_destroy(addresses);
741
666
  return args;
742
667
  }
743
668
 
744
- static void lb_policy_args_destroy(grpc_exec_ctx* exec_ctx,
745
- grpc_lb_policy_args* args) {
746
- grpc_channel_args_destroy(exec_ctx, args->args);
669
+ static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
670
+ grpc_channel_args_destroy(args->args);
747
671
  gpr_free(args);
748
672
  }
749
673
 
750
- static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
751
- void* arg, grpc_error* error);
752
- static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
674
+ static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error);
675
+ static void create_rr_locked(glb_lb_policy* glb_policy,
753
676
  grpc_lb_policy_args* args) {
754
677
  GPR_ASSERT(glb_policy->rr_policy == nullptr);
755
678
 
756
- grpc_lb_policy* new_rr_policy =
757
- grpc_lb_policy_create(exec_ctx, "round_robin", args);
679
+ grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
758
680
  if (new_rr_policy == nullptr) {
759
681
  gpr_log(GPR_ERROR,
760
682
  "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
@@ -766,125 +688,101 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
766
688
  glb_policy->rr_policy);
767
689
  return;
768
690
  }
691
+ grpc_lb_policy_set_reresolve_closure_locked(
692
+ new_rr_policy, glb_policy->base.request_reresolution);
693
+ glb_policy->base.request_reresolution = nullptr;
769
694
  glb_policy->rr_policy = new_rr_policy;
770
695
  grpc_error* rr_state_error = nullptr;
771
- const grpc_connectivity_state rr_state =
772
- grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
773
- &rr_state_error);
696
+ glb_policy->rr_connectivity_state = grpc_lb_policy_check_connectivity_locked(
697
+ glb_policy->rr_policy, &rr_state_error);
774
698
  /* Connectivity state is a function of the RR policy updated/created */
775
- update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state,
776
- rr_state_error);
699
+ update_lb_connectivity_status_locked(
700
+ glb_policy, glb_policy->rr_connectivity_state, rr_state_error);
777
701
  /* Add the gRPC LB's interested_parties pollset_set to that of the newly
778
702
  * created RR policy. This will make the RR policy progress upon activity on
779
703
  * gRPC LB, which in turn is tied to the application's call */
780
- grpc_pollset_set_add_pollset_set(exec_ctx,
781
- glb_policy->rr_policy->interested_parties,
704
+ grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
782
705
  glb_policy->base.interested_parties);
783
-
784
- /* Allocate the data for the tracking of the new RR policy's connectivity.
785
- * It'll be deallocated in glb_rr_connectivity_changed() */
786
- rr_connectivity_data* rr_connectivity =
787
- (rr_connectivity_data*)gpr_zalloc(sizeof(rr_connectivity_data));
788
- GRPC_CLOSURE_INIT(&rr_connectivity->on_change,
789
- glb_rr_connectivity_changed_locked, rr_connectivity,
706
+ GRPC_CLOSURE_INIT(&glb_policy->on_rr_connectivity_changed,
707
+ on_rr_connectivity_changed_locked, glb_policy,
790
708
  grpc_combiner_scheduler(glb_policy->base.combiner));
791
- rr_connectivity->glb_policy = glb_policy;
792
- rr_connectivity->state = rr_state;
793
-
794
709
  /* Subscribe to changes to the connectivity of the new RR */
795
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
796
- grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
797
- &rr_connectivity->state,
798
- &rr_connectivity->on_change);
799
- grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
800
-
801
- /* Update picks and pings in wait */
710
+ GRPC_LB_POLICY_REF(&glb_policy->base, "glb_rr_connectivity_cb");
711
+ grpc_lb_policy_notify_on_state_change_locked(
712
+ glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
713
+ &glb_policy->on_rr_connectivity_changed);
714
+ grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
715
+ // Send pending picks to RR policy.
802
716
  pending_pick* pp;
803
717
  while ((pp = glb_policy->pending_picks)) {
804
718
  glb_policy->pending_picks = pp->next;
805
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_pick");
806
- pp->wrapped_on_complete_arg.rr_policy = glb_policy->rr_policy;
807
- pp->wrapped_on_complete_arg.client_stats =
808
- grpc_grpclb_client_stats_ref(glb_policy->client_stats);
809
719
  if (grpc_lb_glb_trace.enabled()) {
810
720
  gpr_log(GPR_INFO,
811
721
  "[grpclb %p] Pending pick about to (async) PICK from RR %p",
812
722
  glb_policy, glb_policy->rr_policy);
813
723
  }
814
- pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
815
- true /* force_async */, pp->target,
816
- &pp->wrapped_on_complete_arg);
724
+ pick_from_internal_rr_locked(glb_policy, true /* force_async */, pp);
817
725
  }
818
-
726
+ // Send pending pings to RR policy.
819
727
  pending_ping* pping;
820
728
  while ((pping = glb_policy->pending_pings)) {
821
729
  glb_policy->pending_pings = pping->next;
822
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
823
- pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
824
730
  if (grpc_lb_glb_trace.enabled()) {
825
731
  gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
826
732
  glb_policy, glb_policy->rr_policy);
827
733
  }
828
- grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
829
- &pping->wrapped_notify_arg.wrapper_closure);
734
+ grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, pping->on_initiate,
735
+ pping->on_ack);
736
+ gpr_free(pping);
830
737
  }
831
738
  }
832
739
 
833
- /* glb_policy->rr_policy may be NULL (initial handover) */
834
- static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
835
- glb_lb_policy* glb_policy) {
740
+ /* glb_policy->rr_policy may be nullptr (initial handover) */
741
+ static void rr_handover_locked(glb_lb_policy* glb_policy) {
836
742
  if (glb_policy->shutting_down) return;
837
- grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
743
+ grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
838
744
  GPR_ASSERT(args != nullptr);
839
745
  if (glb_policy->rr_policy != nullptr) {
840
746
  if (grpc_lb_glb_trace.enabled()) {
841
747
  gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
842
748
  glb_policy->rr_policy);
843
749
  }
844
- grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
750
+ grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
845
751
  } else {
846
- create_rr_locked(exec_ctx, glb_policy, args);
752
+ create_rr_locked(glb_policy, args);
847
753
  if (grpc_lb_glb_trace.enabled()) {
848
754
  gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
849
755
  glb_policy->rr_policy);
850
756
  }
851
757
  }
852
- lb_policy_args_destroy(exec_ctx, args);
758
+ lb_policy_args_destroy(args);
853
759
  }
854
760
 
855
- static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
856
- void* arg, grpc_error* error) {
857
- rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
858
- glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
761
+ static void on_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
762
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
859
763
  if (glb_policy->shutting_down) {
860
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
861
- "glb_rr_connectivity_cb");
862
- gpr_free(rr_connectivity);
764
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
863
765
  return;
864
766
  }
865
- if (rr_connectivity->state == GRPC_CHANNEL_SHUTDOWN) {
767
+ if (glb_policy->rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
866
768
  /* An RR policy that has transitioned into the SHUTDOWN connectivity state
867
769
  * should not be considered for picks or updates: the SHUTDOWN state is a
868
770
  * sink, policies can't transition back from it. .*/
869
- GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
870
- "rr_connectivity_shutdown");
771
+ GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
871
772
  glb_policy->rr_policy = nullptr;
872
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
873
- "glb_rr_connectivity_cb");
874
- gpr_free(rr_connectivity);
773
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
875
774
  return;
876
775
  }
877
776
  /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
878
777
  update_lb_connectivity_status_locked(
879
- exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error));
880
- /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
881
- grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
882
- &rr_connectivity->state,
883
- &rr_connectivity->on_change);
778
+ glb_policy, glb_policy->rr_connectivity_state, GRPC_ERROR_REF(error));
779
+ /* Resubscribe. Reuse the "glb_rr_connectivity_cb" ref. */
780
+ grpc_lb_policy_notify_on_state_change_locked(
781
+ glb_policy->rr_policy, &glb_policy->rr_connectivity_state,
782
+ &glb_policy->on_rr_connectivity_changed);
884
783
  }
885
784
 
886
- static void destroy_balancer_name(grpc_exec_ctx* exec_ctx,
887
- void* balancer_name) {
785
+ static void destroy_balancer_name(void* balancer_name) {
888
786
  gpr_free(balancer_name);
889
787
  }
890
788
 
@@ -911,7 +809,7 @@ static int balancer_name_cmp_fn(void* a, void* b) {
911
809
  * above the grpclb policy.
912
810
  * - \a args: other args inherited from the grpclb policy. */
913
811
  static grpc_channel_args* build_lb_channel_args(
914
- grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses,
812
+ const grpc_lb_addresses* addresses,
915
813
  grpc_fake_resolver_response_generator* response_generator,
916
814
  const grpc_channel_args* args) {
917
815
  size_t num_grpclb_addrs = 0;
@@ -954,7 +852,7 @@ static grpc_channel_args* build_lb_channel_args(
954
852
  gpr_free(targets_info_entries);
955
853
 
956
854
  grpc_channel_args* lb_channel_args =
957
- grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
855
+ grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
958
856
  response_generator, args);
959
857
 
960
858
  grpc_arg lb_channel_addresses_arg =
@@ -962,65 +860,57 @@ static grpc_channel_args* build_lb_channel_args(
962
860
 
963
861
  grpc_channel_args* result = grpc_channel_args_copy_and_add(
964
862
  lb_channel_args, &lb_channel_addresses_arg, 1);
965
- grpc_slice_hash_table_unref(exec_ctx, targets_info);
966
- grpc_channel_args_destroy(exec_ctx, lb_channel_args);
967
- grpc_lb_addresses_destroy(exec_ctx, lb_addresses);
863
+ grpc_slice_hash_table_unref(targets_info);
864
+ grpc_channel_args_destroy(lb_channel_args);
865
+ grpc_lb_addresses_destroy(lb_addresses);
968
866
  return result;
969
867
  }
970
868
 
971
- static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
869
+ static void glb_destroy(grpc_lb_policy* pol) {
972
870
  glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
973
871
  GPR_ASSERT(glb_policy->pending_picks == nullptr);
974
872
  GPR_ASSERT(glb_policy->pending_pings == nullptr);
975
873
  gpr_free((void*)glb_policy->server_name);
976
- grpc_channel_args_destroy(exec_ctx, glb_policy->args);
874
+ grpc_channel_args_destroy(glb_policy->args);
977
875
  if (glb_policy->client_stats != nullptr) {
978
876
  grpc_grpclb_client_stats_unref(glb_policy->client_stats);
979
877
  }
980
- grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
878
+ grpc_connectivity_state_destroy(&glb_policy->state_tracker);
981
879
  if (glb_policy->serverlist != nullptr) {
982
880
  grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
983
881
  }
984
882
  if (glb_policy->fallback_backend_addresses != nullptr) {
985
- grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
883
+ grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
986
884
  }
987
885
  grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
988
886
  grpc_subchannel_index_unref();
989
887
  gpr_free(glb_policy);
990
888
  }
991
889
 
992
- static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
890
+ static void glb_shutdown_locked(grpc_lb_policy* pol,
891
+ grpc_lb_policy* new_policy) {
993
892
  glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
893
+ grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
994
894
  glb_policy->shutting_down = true;
995
-
996
- /* We need a copy of the lb_call pointer because we can't cancell the call
997
- * while holding glb_policy->mu: lb_on_server_status_received, invoked due to
998
- * the cancel, needs to acquire that same lock */
999
- grpc_call* lb_call = glb_policy->lb_call;
1000
-
1001
895
  /* glb_policy->lb_call and this local lb_call must be consistent at this point
1002
896
  * because glb_policy->lb_call is only assigned in lb_call_init_locked as part
1003
897
  * of query_for_backends_locked, which can only be invoked while
1004
898
  * glb_policy->shutting_down is false. */
1005
- if (lb_call != nullptr) {
1006
- grpc_call_cancel(lb_call, nullptr);
899
+ if (glb_policy->lb_call != nullptr) {
900
+ grpc_call_cancel(glb_policy->lb_call, nullptr);
1007
901
  /* lb_on_server_status_received will pick up the cancel and clean up */
1008
902
  }
1009
- if (glb_policy->retry_timer_active) {
1010
- grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1011
- glb_policy->retry_timer_active = false;
903
+ if (glb_policy->retry_timer_callback_pending) {
904
+ grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
1012
905
  }
1013
- if (glb_policy->fallback_timer_active) {
1014
- grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1015
- glb_policy->fallback_timer_active = false;
906
+ if (glb_policy->fallback_timer_callback_pending) {
907
+ grpc_timer_cancel(&glb_policy->lb_fallback_timer);
1016
908
  }
1017
-
1018
- pending_pick* pp = glb_policy->pending_picks;
1019
- glb_policy->pending_picks = nullptr;
1020
- pending_ping* pping = glb_policy->pending_pings;
1021
- glb_policy->pending_pings = nullptr;
1022
909
  if (glb_policy->rr_policy != nullptr) {
1023
- GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
910
+ grpc_lb_policy_shutdown_locked(glb_policy->rr_policy, nullptr);
911
+ GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
912
+ } else {
913
+ grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
1024
914
  }
1025
915
  // We destroy the LB channel here because
1026
916
  // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
@@ -1030,28 +920,41 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
1030
920
  grpc_channel_destroy(glb_policy->lb_channel);
1031
921
  glb_policy->lb_channel = nullptr;
1032
922
  }
1033
- grpc_connectivity_state_set(
1034
- exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
1035
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
1036
-
923
+ grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
924
+ GRPC_ERROR_REF(error), "glb_shutdown");
925
+ // Clear pending picks.
926
+ pending_pick* pp = glb_policy->pending_picks;
927
+ glb_policy->pending_picks = nullptr;
1037
928
  while (pp != nullptr) {
1038
929
  pending_pick* next = pp->next;
1039
- *pp->target = nullptr;
1040
- GRPC_CLOSURE_SCHED(
1041
- exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1042
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
1043
- gpr_free(pp);
930
+ if (new_policy != nullptr) {
931
+ // Hand pick over to new policy.
932
+ if (pp->client_stats != nullptr) {
933
+ grpc_grpclb_client_stats_unref(pp->client_stats);
934
+ }
935
+ pp->pick->on_complete = pp->original_on_complete;
936
+ if (grpc_lb_policy_pick_locked(new_policy, pp->pick)) {
937
+ // Synchronous return; schedule callback.
938
+ GRPC_CLOSURE_SCHED(pp->pick->on_complete, GRPC_ERROR_NONE);
939
+ }
940
+ gpr_free(pp);
941
+ } else {
942
+ pp->pick->connected_subchannel.reset();
943
+ GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
944
+ }
1044
945
  pp = next;
1045
946
  }
1046
-
947
+ // Clear pending pings.
948
+ pending_ping* pping = glb_policy->pending_pings;
949
+ glb_policy->pending_pings = nullptr;
1047
950
  while (pping != nullptr) {
1048
951
  pending_ping* next = pping->next;
1049
- GRPC_CLOSURE_SCHED(
1050
- exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
1051
- GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
952
+ GRPC_CLOSURE_SCHED(pping->on_initiate, GRPC_ERROR_REF(error));
953
+ GRPC_CLOSURE_SCHED(pping->on_ack, GRPC_ERROR_REF(error));
1052
954
  gpr_free(pping);
1053
955
  pping = next;
1054
956
  }
957
+ GRPC_ERROR_UNREF(error);
1055
958
  }
1056
959
 
1057
960
  // Cancel a specific pending pick.
@@ -1063,18 +966,18 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
1063
966
  // pick needs also be cancelled by the RR instance.
1064
967
  // - Otherwise, without an RR instance, picks stay pending at this policy's
1065
968
  // level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1066
- // we invoke the completion closure and set *target to NULL right here.
1067
- static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1068
- grpc_connected_subchannel** target,
969
+ // we invoke the completion closure and set *target to nullptr right here.
970
+ static void glb_cancel_pick_locked(grpc_lb_policy* pol,
971
+ grpc_lb_policy_pick_state* pick,
1069
972
  grpc_error* error) {
1070
973
  glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1071
974
  pending_pick* pp = glb_policy->pending_picks;
1072
975
  glb_policy->pending_picks = nullptr;
1073
976
  while (pp != nullptr) {
1074
977
  pending_pick* next = pp->next;
1075
- if (pp->target == target) {
1076
- *target = nullptr;
1077
- GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
978
+ if (pp->pick == pick) {
979
+ pick->connected_subchannel.reset();
980
+ GRPC_CLOSURE_SCHED(&pp->on_complete,
1078
981
  GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1079
982
  "Pick Cancelled", &error, 1));
1080
983
  } else {
@@ -1084,7 +987,7 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1084
987
  pp = next;
1085
988
  }
1086
989
  if (glb_policy->rr_policy != nullptr) {
1087
- grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
990
+ grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, pick,
1088
991
  GRPC_ERROR_REF(error));
1089
992
  }
1090
993
  GRPC_ERROR_UNREF(error);
@@ -1099,9 +1002,8 @@ static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1099
1002
  // pick needs also be cancelled by the RR instance.
1100
1003
  // - Otherwise, without an RR instance, picks stay pending at this policy's
1101
1004
  // level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
1102
- // we invoke the completion closure and set *target to NULL right here.
1103
- static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
1104
- grpc_lb_policy* pol,
1005
+ // we invoke the completion closure and set *target to nullptr right here.
1006
+ static void glb_cancel_picks_locked(grpc_lb_policy* pol,
1105
1007
  uint32_t initial_metadata_flags_mask,
1106
1008
  uint32_t initial_metadata_flags_eq,
1107
1009
  grpc_error* error) {
@@ -1110,9 +1012,9 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
1110
1012
  glb_policy->pending_picks = nullptr;
1111
1013
  while (pp != nullptr) {
1112
1014
  pending_pick* next = pp->next;
1113
- if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
1015
+ if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
1114
1016
  initial_metadata_flags_eq) {
1115
- GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
1017
+ GRPC_CLOSURE_SCHED(&pp->on_complete,
1116
1018
  GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
1117
1019
  "Pick Cancelled", &error, 1));
1118
1020
  } else {
@@ -1123,66 +1025,54 @@ static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
1123
1025
  }
1124
1026
  if (glb_policy->rr_policy != nullptr) {
1125
1027
  grpc_lb_policy_cancel_picks_locked(
1126
- exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
1028
+ glb_policy->rr_policy, initial_metadata_flags_mask,
1127
1029
  initial_metadata_flags_eq, GRPC_ERROR_REF(error));
1128
1030
  }
1129
1031
  GRPC_ERROR_UNREF(error);
1130
1032
  }
1131
1033
 
1132
- static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
1133
- grpc_error* error);
1134
- static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
1135
- glb_lb_policy* glb_policy);
1136
- static void start_picking_locked(grpc_exec_ctx* exec_ctx,
1137
- glb_lb_policy* glb_policy) {
1034
+ static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
1035
+ static void query_for_backends_locked(glb_lb_policy* glb_policy);
1036
+ static void start_picking_locked(glb_lb_policy* glb_policy) {
1138
1037
  /* start a timer to fall back */
1139
1038
  if (glb_policy->lb_fallback_timeout_ms > 0 &&
1140
- glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
1039
+ glb_policy->serverlist == nullptr &&
1040
+ !glb_policy->fallback_timer_callback_pending) {
1141
1041
  grpc_millis deadline =
1142
- grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
1143
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
1042
+ grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
1043
+ GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_fallback_timer");
1144
1044
  GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
1145
1045
  glb_policy,
1146
1046
  grpc_combiner_scheduler(glb_policy->base.combiner));
1147
- glb_policy->fallback_timer_active = true;
1148
- grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
1047
+ glb_policy->fallback_timer_callback_pending = true;
1048
+ grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
1149
1049
  &glb_policy->lb_on_fallback);
1150
1050
  }
1151
1051
 
1152
1052
  glb_policy->started_picking = true;
1153
- grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
1154
- query_for_backends_locked(exec_ctx, glb_policy);
1053
+ glb_policy->lb_call_backoff->Reset();
1054
+ query_for_backends_locked(glb_policy);
1155
1055
  }
1156
1056
 
1157
- static void glb_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
1057
+ static void glb_exit_idle_locked(grpc_lb_policy* pol) {
1158
1058
  glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1159
1059
  if (!glb_policy->started_picking) {
1160
- start_picking_locked(exec_ctx, glb_policy);
1060
+ start_picking_locked(glb_policy);
1161
1061
  }
1162
1062
  }
1163
1063
 
1164
- static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1165
- const grpc_lb_policy_pick_args* pick_args,
1166
- grpc_connected_subchannel** target,
1167
- grpc_call_context_element* context, void** user_data,
1168
- grpc_closure* on_complete) {
1169
- if (pick_args->lb_token_mdelem_storage == nullptr) {
1170
- *target = nullptr;
1171
- GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
1172
- GRPC_ERROR_CREATE_FROM_STATIC_STRING(
1173
- "No mdelem storage for the LB token. Load reporting "
1174
- "won't work without it. Failing"));
1175
- return 0;
1176
- }
1064
+ static int glb_pick_locked(grpc_lb_policy* pol,
1065
+ grpc_lb_policy_pick_state* pick) {
1177
1066
  glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1067
+ pending_pick* pp = pending_pick_create(glb_policy, pick);
1178
1068
  bool pick_done = false;
1179
1069
  if (glb_policy->rr_policy != nullptr) {
1180
1070
  const grpc_connectivity_state rr_connectivity_state =
1181
- grpc_lb_policy_check_connectivity_locked(
1182
- exec_ctx, glb_policy->rr_policy, nullptr);
1071
+ grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
1072
+ nullptr);
1183
1073
  // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
1184
1074
  // callback registered to capture this event
1185
- // (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
1075
+ // (on_rr_connectivity_changed_locked) may not have been invoked yet. We
1186
1076
  // need to make sure we aren't trying to pick from a RR policy instance
1187
1077
  // that's in shutdown.
1188
1078
  if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
@@ -1192,33 +1082,16 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1192
1082
  glb_policy, glb_policy->rr_policy,
1193
1083
  grpc_connectivity_state_name(rr_connectivity_state));
1194
1084
  }
1195
- add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
1196
- on_complete);
1085
+ pending_pick_add(&glb_policy->pending_picks, pp);
1197
1086
  pick_done = false;
1198
1087
  } else { // RR not in shutdown
1199
1088
  if (grpc_lb_glb_trace.enabled()) {
1200
1089
  gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy,
1201
1090
  glb_policy->rr_policy);
1202
1091
  }
1203
- GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
1204
- wrapped_rr_closure_arg* wc_arg =
1205
- (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
1206
- GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
1207
- grpc_schedule_on_exec_ctx);
1208
- wc_arg->rr_policy = glb_policy->rr_policy;
1209
- wc_arg->target = target;
1210
- wc_arg->context = context;
1211
1092
  GPR_ASSERT(glb_policy->client_stats != nullptr);
1212
- wc_arg->client_stats =
1213
- grpc_grpclb_client_stats_ref(glb_policy->client_stats);
1214
- wc_arg->wrapped_closure = on_complete;
1215
- wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage;
1216
- wc_arg->initial_metadata = pick_args->initial_metadata;
1217
- wc_arg->free_when_done = wc_arg;
1218
- wc_arg->glb_policy = pol;
1219
1093
  pick_done =
1220
- pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
1221
- false /* force_async */, target, wc_arg);
1094
+ pick_from_internal_rr_locked(glb_policy, false /* force_async */, pp);
1222
1095
  }
1223
1096
  } else { // glb_policy->rr_policy == NULL
1224
1097
  if (grpc_lb_glb_trace.enabled()) {
@@ -1226,10 +1099,9 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1226
1099
  "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
1227
1100
  glb_policy);
1228
1101
  }
1229
- add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
1230
- on_complete);
1102
+ pending_pick_add(&glb_policy->pending_picks, pp);
1231
1103
  if (!glb_policy->started_picking) {
1232
- start_picking_locked(exec_ctx, glb_policy);
1104
+ start_picking_locked(glb_policy);
1233
1105
  }
1234
1106
  pick_done = false;
1235
1107
  }
@@ -1237,117 +1109,124 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1237
1109
  }
1238
1110
 
1239
1111
  static grpc_connectivity_state glb_check_connectivity_locked(
1240
- grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1241
- grpc_error** connectivity_error) {
1112
+ grpc_lb_policy* pol, grpc_error** connectivity_error) {
1242
1113
  glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1243
1114
  return grpc_connectivity_state_get(&glb_policy->state_tracker,
1244
1115
  connectivity_error);
1245
1116
  }
1246
1117
 
1247
- static void glb_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
1248
- grpc_closure* closure) {
1118
+ static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
1119
+ grpc_closure* on_ack) {
1249
1120
  glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1250
1121
  if (glb_policy->rr_policy) {
1251
- grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
1122
+ grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
1252
1123
  } else {
1253
- add_pending_ping(&glb_policy->pending_pings, closure);
1124
+ pending_ping_add(&glb_policy->pending_pings, on_initiate, on_ack);
1254
1125
  if (!glb_policy->started_picking) {
1255
- start_picking_locked(exec_ctx, glb_policy);
1126
+ start_picking_locked(glb_policy);
1256
1127
  }
1257
1128
  }
1258
1129
  }
1259
1130
 
1260
- static void glb_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
1261
- grpc_lb_policy* pol,
1131
+ static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
1262
1132
  grpc_connectivity_state* current,
1263
1133
  grpc_closure* notify) {
1264
1134
  glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
1265
- grpc_connectivity_state_notify_on_state_change(
1266
- exec_ctx, &glb_policy->state_tracker, current, notify);
1135
+ grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
1136
+ current, notify);
1267
1137
  }
1268
1138
 
1269
- static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
1270
- grpc_error* error) {
1139
+ static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
1271
1140
  glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1272
- glb_policy->retry_timer_active = false;
1141
+ glb_policy->retry_timer_callback_pending = false;
1273
1142
  if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
1274
1143
  error == GRPC_ERROR_NONE) {
1275
1144
  if (grpc_lb_glb_trace.enabled()) {
1276
1145
  gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
1277
1146
  }
1278
- query_for_backends_locked(exec_ctx, glb_policy);
1147
+ query_for_backends_locked(glb_policy);
1279
1148
  }
1280
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
1149
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_retry_timer");
1281
1150
  }
1282
1151
 
1283
- static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx,
1284
- glb_lb_policy* glb_policy) {
1152
+ static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
1285
1153
  if (glb_policy->started_picking && glb_policy->updating_lb_call) {
1286
- if (glb_policy->retry_timer_active) {
1287
- grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1154
+ if (glb_policy->retry_timer_callback_pending) {
1155
+ grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
1288
1156
  }
1289
- if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
1157
+ if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
1290
1158
  glb_policy->updating_lb_call = false;
1291
1159
  } else if (!glb_policy->shutting_down) {
1292
1160
  /* if we aren't shutting down, restart the LB client call after some time */
1293
- grpc_millis next_try =
1294
- grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
1295
- .next_attempt_start_time;
1161
+ grpc_millis next_try = glb_policy->lb_call_backoff->NextAttemptTime();
1296
1162
  if (grpc_lb_glb_trace.enabled()) {
1297
1163
  gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
1298
1164
  glb_policy);
1299
- grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
1165
+ grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
1300
1166
  if (timeout > 0) {
1301
1167
  gpr_log(GPR_DEBUG,
1302
- "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
1168
+ "[grpclb %p] ... retry LB call after %" PRIuPTR "ms.",
1303
1169
  glb_policy, timeout);
1304
1170
  } else {
1305
- gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.",
1171
+ gpr_log(GPR_DEBUG, "[grpclb %p] ... retry LB call immediately.",
1306
1172
  glb_policy);
1307
1173
  }
1308
1174
  }
1309
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
1175
+ GRPC_LB_POLICY_REF(&glb_policy->base, "grpclb_retry_timer");
1310
1176
  GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry,
1311
1177
  lb_call_on_retry_timer_locked, glb_policy,
1312
1178
  grpc_combiner_scheduler(glb_policy->base.combiner));
1313
- glb_policy->retry_timer_active = true;
1314
- grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
1179
+ glb_policy->retry_timer_callback_pending = true;
1180
+ grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
1315
1181
  &glb_policy->lb_on_call_retry);
1316
1182
  }
1317
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1318
- "lb_on_server_status_received_locked");
1183
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
1184
+ "lb_on_server_status_received_locked");
1319
1185
  }
1320
1186
 
1321
- static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
1322
- grpc_error* error);
1187
+ static void send_client_load_report_locked(void* arg, grpc_error* error);
1323
1188
 
1324
- static void schedule_next_client_load_report(grpc_exec_ctx* exec_ctx,
1325
- glb_lb_policy* glb_policy) {
1189
+ static void schedule_next_client_load_report(glb_lb_policy* glb_policy) {
1326
1190
  const grpc_millis next_client_load_report_time =
1327
- grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
1191
+ grpc_core::ExecCtx::Get()->Now() +
1192
+ glb_policy->client_stats_report_interval;
1328
1193
  GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1329
1194
  send_client_load_report_locked, glb_policy,
1330
1195
  grpc_combiner_scheduler(glb_policy->base.combiner));
1331
- grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
1196
+ grpc_timer_init(&glb_policy->client_load_report_timer,
1332
1197
  next_client_load_report_time,
1333
1198
  &glb_policy->client_load_report_closure);
1334
1199
  }
1335
1200
 
1336
- static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
1337
- grpc_error* error) {
1201
+ static void client_load_report_done_locked(void* arg, grpc_error* error) {
1338
1202
  glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1339
1203
  grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
1340
1204
  glb_policy->client_load_report_payload = nullptr;
1341
1205
  if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
1342
- glb_policy->client_load_report_timer_pending = false;
1343
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1344
- "client_load_report");
1206
+ glb_policy->client_load_report_timer_callback_pending = false;
1207
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
1345
1208
  if (glb_policy->lb_call == nullptr) {
1346
- maybe_restart_lb_call(exec_ctx, glb_policy);
1209
+ maybe_restart_lb_call(glb_policy);
1347
1210
  }
1348
1211
  return;
1349
1212
  }
1350
- schedule_next_client_load_report(exec_ctx, glb_policy);
1213
+ schedule_next_client_load_report(glb_policy);
1214
+ }
1215
+
1216
+ static void do_send_client_load_report_locked(glb_lb_policy* glb_policy) {
1217
+ grpc_op op;
1218
+ memset(&op, 0, sizeof(op));
1219
+ op.op = GRPC_OP_SEND_MESSAGE;
1220
+ op.data.send_message.send_message = glb_policy->client_load_report_payload;
1221
+ GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1222
+ client_load_report_done_locked, glb_policy,
1223
+ grpc_combiner_scheduler(glb_policy->base.combiner));
1224
+ grpc_call_error call_error = grpc_call_start_batch_and_execute(
1225
+ glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure);
1226
+ if (call_error != GRPC_CALL_OK) {
1227
+ gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
1228
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
1229
+ }
1351
1230
  }
1352
1231
 
1353
1232
  static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
@@ -1362,15 +1241,13 @@ static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
1362
1241
  (drop_entries == nullptr || drop_entries->num_entries == 0);
1363
1242
  }
1364
1243
 
1365
- static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
1366
- grpc_error* error) {
1244
+ static void send_client_load_report_locked(void* arg, grpc_error* error) {
1367
1245
  glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1368
1246
  if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
1369
- glb_policy->client_load_report_timer_pending = false;
1370
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1371
- "client_load_report");
1247
+ glb_policy->client_load_report_timer_callback_pending = false;
1248
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "client_load_report");
1372
1249
  if (glb_policy->lb_call == nullptr) {
1373
- maybe_restart_lb_call(exec_ctx, glb_policy);
1250
+ maybe_restart_lb_call(glb_policy);
1374
1251
  }
1375
1252
  return;
1376
1253
  }
@@ -1383,7 +1260,7 @@ static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
1383
1260
  if (load_report_counters_are_zero(request)) {
1384
1261
  if (glb_policy->last_client_load_report_counters_were_zero) {
1385
1262
  grpc_grpclb_request_destroy(request);
1386
- schedule_next_client_load_report(exec_ctx, glb_policy);
1263
+ schedule_next_client_load_report(glb_policy);
1387
1264
  return;
1388
1265
  }
1389
1266
  glb_policy->last_client_load_report_counters_were_zero = true;
@@ -1393,31 +1270,20 @@ static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
1393
1270
  grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1394
1271
  glb_policy->client_load_report_payload =
1395
1272
  grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1396
- grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1273
+ grpc_slice_unref_internal(request_payload_slice);
1397
1274
  grpc_grpclb_request_destroy(request);
1398
- // Send load report message.
1399
- grpc_op op;
1400
- memset(&op, 0, sizeof(op));
1401
- op.op = GRPC_OP_SEND_MESSAGE;
1402
- op.data.send_message.send_message = glb_policy->client_load_report_payload;
1403
- GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
1404
- client_load_report_done_locked, glb_policy,
1405
- grpc_combiner_scheduler(glb_policy->base.combiner));
1406
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
1407
- exec_ctx, glb_policy->lb_call, &op, 1,
1408
- &glb_policy->client_load_report_closure);
1409
- if (call_error != GRPC_CALL_OK) {
1410
- gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
1411
- GPR_ASSERT(GRPC_CALL_OK == call_error);
1275
+ // If we've already sent the initial request, then we can go ahead and send
1276
+ // the load report. Otherwise, we need to wait until the initial request has
1277
+ // been sent to send this (see lb_on_sent_initial_request_locked() below).
1278
+ if (glb_policy->initial_request_sent) {
1279
+ do_send_client_load_report_locked(glb_policy);
1412
1280
  }
1413
1281
  }
1414
1282
 
1415
- static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
1416
- void* arg, grpc_error* error);
1417
- static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1418
- grpc_error* error);
1419
- static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
1420
- glb_lb_policy* glb_policy) {
1283
+ static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error);
1284
+ static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
1285
+ static void lb_on_response_received_locked(void* arg, grpc_error* error);
1286
+ static void lb_call_init_locked(glb_lb_policy* glb_policy) {
1421
1287
  GPR_ASSERT(glb_policy->server_name != nullptr);
1422
1288
  GPR_ASSERT(glb_policy->server_name[0] != '\0');
1423
1289
  GPR_ASSERT(glb_policy->lb_call == nullptr);
@@ -1430,13 +1296,13 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
1430
1296
  grpc_millis deadline =
1431
1297
  glb_policy->lb_call_timeout_ms == 0
1432
1298
  ? GRPC_MILLIS_INF_FUTURE
1433
- : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
1299
+ : grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
1434
1300
  glb_policy->lb_call = grpc_channel_create_pollset_set_call(
1435
- exec_ctx, glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
1301
+ glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
1436
1302
  glb_policy->base.interested_parties,
1437
1303
  GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
1438
1304
  &host, deadline, nullptr);
1439
- grpc_slice_unref_internal(exec_ctx, host);
1305
+ grpc_slice_unref_internal(host);
1440
1306
 
1441
1307
  if (glb_policy->client_stats != nullptr) {
1442
1308
  grpc_grpclb_client_stats_unref(glb_policy->client_stats);
@@ -1451,9 +1317,12 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
1451
1317
  grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
1452
1318
  glb_policy->lb_request_payload =
1453
1319
  grpc_raw_byte_buffer_create(&request_payload_slice, 1);
1454
- grpc_slice_unref_internal(exec_ctx, request_payload_slice);
1320
+ grpc_slice_unref_internal(request_payload_slice);
1455
1321
  grpc_grpclb_request_destroy(request);
1456
1322
 
1323
+ GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
1324
+ lb_on_sent_initial_request_locked, glb_policy,
1325
+ grpc_combiner_scheduler(glb_policy->base.combiner));
1457
1326
  GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
1458
1327
  lb_on_server_status_received_locked, glb_policy,
1459
1328
  grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -1461,19 +1330,21 @@ static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
1461
1330
  lb_on_response_received_locked, glb_policy,
1462
1331
  grpc_combiner_scheduler(glb_policy->base.combiner));
1463
1332
 
1464
- grpc_backoff_init(&glb_policy->lb_call_backoff_state,
1465
- GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
1466
- GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
1467
- GRPC_GRPCLB_RECONNECT_JITTER,
1468
- GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
1469
- GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
1333
+ grpc_core::BackOff::Options backoff_options;
1334
+ backoff_options
1335
+ .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
1336
+ .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
1337
+ .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
1338
+ .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
1470
1339
 
1340
+ glb_policy->lb_call_backoff.Init(backoff_options);
1341
+
1342
+ glb_policy->initial_request_sent = false;
1471
1343
  glb_policy->seen_initial_response = false;
1472
1344
  glb_policy->last_client_load_report_counters_were_zero = false;
1473
1345
  }
1474
1346
 
1475
- static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
1476
- glb_lb_policy* glb_policy) {
1347
+ static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
1477
1348
  GPR_ASSERT(glb_policy->lb_call != nullptr);
1478
1349
  grpc_call_unref(glb_policy->lb_call);
1479
1350
  glb_policy->lb_call = nullptr;
@@ -1482,22 +1353,21 @@ static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
1482
1353
  grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
1483
1354
 
1484
1355
  grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
1485
- grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
1356
+ grpc_slice_unref_internal(glb_policy->lb_call_status_details);
1486
1357
 
1487
- if (glb_policy->client_load_report_timer_pending) {
1488
- grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
1358
+ if (glb_policy->client_load_report_timer_callback_pending) {
1359
+ grpc_timer_cancel(&glb_policy->client_load_report_timer);
1489
1360
  }
1490
1361
  }
1491
1362
 
1492
1363
  /*
1493
1364
  * Auxiliary functions and LB client callbacks.
1494
1365
  */
1495
- static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
1496
- glb_lb_policy* glb_policy) {
1366
+ static void query_for_backends_locked(glb_lb_policy* glb_policy) {
1497
1367
  GPR_ASSERT(glb_policy->lb_channel != nullptr);
1498
1368
  if (glb_policy->shutting_down) return;
1499
1369
 
1500
- lb_call_init_locked(exec_ctx, glb_policy);
1370
+ lb_call_init_locked(glb_policy);
1501
1371
 
1502
1372
  if (grpc_lb_glb_trace.enabled()) {
1503
1373
  gpr_log(GPR_INFO,
@@ -1528,8 +1398,11 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
1528
1398
  op->flags = 0;
1529
1399
  op->reserved = nullptr;
1530
1400
  op++;
1401
+ /* take a ref to be released in lb_on_sent_initial_request_locked() */
1402
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_sent_initial_request_locked");
1531
1403
  call_error = grpc_call_start_batch_and_execute(
1532
- exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), nullptr);
1404
+ glb_policy->lb_call, ops, (size_t)(op - ops),
1405
+ &glb_policy->lb_on_sent_initial_request);
1533
1406
  GPR_ASSERT(GRPC_CALL_OK == call_error);
1534
1407
 
1535
1408
  op = ops;
@@ -1542,12 +1415,10 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
1542
1415
  op->flags = 0;
1543
1416
  op->reserved = nullptr;
1544
1417
  op++;
1545
- /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
1546
- * count goes to zero) to be unref'd in lb_on_server_status_received_locked */
1547
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
1548
- "lb_on_server_status_received_locked");
1418
+ /* take a ref to be released in lb_on_server_status_received_locked() */
1419
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_server_status_received_locked");
1549
1420
  call_error = grpc_call_start_batch_and_execute(
1550
- exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1421
+ glb_policy->lb_call, ops, (size_t)(op - ops),
1551
1422
  &glb_policy->lb_on_server_status_received);
1552
1423
  GPR_ASSERT(GRPC_CALL_OK == call_error);
1553
1424
 
@@ -1557,23 +1428,32 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
1557
1428
  op->flags = 0;
1558
1429
  op->reserved = nullptr;
1559
1430
  op++;
1560
- /* take another weak ref to be unref'd/reused in
1561
- * lb_on_response_received_locked */
1562
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
1431
+ /* take a ref to be unref'd/reused in lb_on_response_received_locked() */
1432
+ GRPC_LB_POLICY_REF(&glb_policy->base, "lb_on_response_received_locked");
1563
1433
  call_error = grpc_call_start_batch_and_execute(
1564
- exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1434
+ glb_policy->lb_call, ops, (size_t)(op - ops),
1565
1435
  &glb_policy->lb_on_response_received);
1566
1436
  GPR_ASSERT(GRPC_CALL_OK == call_error);
1567
1437
  }
1568
1438
 
1569
- static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1570
- grpc_error* error) {
1439
+ static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
1440
+ glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1441
+ glb_policy->initial_request_sent = true;
1442
+ // If we attempted to send a client load report before the initial request was
1443
+ // sent, send the load report now.
1444
+ if (glb_policy->client_load_report_payload != nullptr) {
1445
+ do_send_client_load_report_locked(glb_policy);
1446
+ }
1447
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "lb_on_sent_initial_request_locked");
1448
+ }
1449
+
1450
+ static void lb_on_response_received_locked(void* arg, grpc_error* error) {
1571
1451
  glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1572
1452
  grpc_op ops[2];
1573
1453
  memset(ops, 0, sizeof(ops));
1574
1454
  grpc_op* op = ops;
1575
1455
  if (glb_policy->lb_response_payload != nullptr) {
1576
- grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
1456
+ glb_policy->lb_call_backoff->Reset();
1577
1457
  /* Received data from the LB server. Look inside
1578
1458
  * glb_policy->lb_response_payload, for a serverlist. */
1579
1459
  grpc_byte_buffer_reader bbr;
@@ -1596,12 +1476,10 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1596
1476
  "client load reporting interval = %" PRIdPTR " milliseconds",
1597
1477
  glb_policy, glb_policy->client_stats_report_interval);
1598
1478
  }
1599
- /* take a weak ref (won't prevent calling of \a glb_shutdown() if the
1600
- * strong ref count goes to zero) to be unref'd in
1601
- * send_client_load_report_locked() */
1602
- glb_policy->client_load_report_timer_pending = true;
1603
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
1604
- schedule_next_client_load_report(exec_ctx, glb_policy);
1479
+ /* take a ref to be unref'd in send_client_load_report_locked() */
1480
+ glb_policy->client_load_report_timer_callback_pending = true;
1481
+ GRPC_LB_POLICY_REF(&glb_policy->base, "client_load_report");
1482
+ schedule_next_client_load_report(glb_policy);
1605
1483
  } else if (grpc_lb_glb_trace.enabled()) {
1606
1484
  gpr_log(GPR_INFO,
1607
1485
  "[grpclb %p] Received initial LB response message; client load "
@@ -1646,12 +1524,10 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1646
1524
  grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
1647
1525
  } else {
1648
1526
  /* or dispose of the fallback */
1649
- grpc_lb_addresses_destroy(exec_ctx,
1650
- glb_policy->fallback_backend_addresses);
1527
+ grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
1651
1528
  glb_policy->fallback_backend_addresses = nullptr;
1652
- if (glb_policy->fallback_timer_active) {
1653
- grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
1654
- glb_policy->fallback_timer_active = false;
1529
+ if (glb_policy->fallback_timer_callback_pending) {
1530
+ grpc_timer_cancel(&glb_policy->lb_fallback_timer);
1655
1531
  }
1656
1532
  }
1657
1533
  /* and update the copy in the glb_lb_policy instance. This
@@ -1659,7 +1535,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1659
1535
  * update or in glb_destroy() */
1660
1536
  glb_policy->serverlist = serverlist;
1661
1537
  glb_policy->serverlist_index = 0;
1662
- rr_handover_locked(exec_ctx, glb_policy);
1538
+ rr_handover_locked(glb_policy);
1663
1539
  }
1664
1540
  } else {
1665
1541
  if (grpc_lb_glb_trace.enabled()) {
@@ -1669,14 +1545,14 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1669
1545
  }
1670
1546
  grpc_grpclb_destroy_serverlist(serverlist);
1671
1547
  }
1672
- } else { /* serverlist == NULL */
1548
+ } else { /* serverlist == nullptr */
1673
1549
  gpr_log(GPR_ERROR,
1674
1550
  "[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
1675
1551
  glb_policy,
1676
1552
  grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
1677
1553
  }
1678
1554
  }
1679
- grpc_slice_unref_internal(exec_ctx, response_slice);
1555
+ grpc_slice_unref_internal(response_slice);
1680
1556
  if (!glb_policy->shutting_down) {
1681
1557
  /* keep listening for serverlist updates */
1682
1558
  op->op = GRPC_OP_RECV_MESSAGE;
@@ -1684,28 +1560,27 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
1684
1560
  op->flags = 0;
1685
1561
  op->reserved = nullptr;
1686
1562
  op++;
1687
- /* reuse the "lb_on_response_received_locked" weak ref taken in
1563
+ /* reuse the "lb_on_response_received_locked" ref taken in
1688
1564
  * query_for_backends_locked() */
1689
1565
  const grpc_call_error call_error = grpc_call_start_batch_and_execute(
1690
- exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
1566
+ glb_policy->lb_call, ops, (size_t)(op - ops),
1691
1567
  &glb_policy->lb_on_response_received); /* loop */
1692
1568
  GPR_ASSERT(GRPC_CALL_OK == call_error);
1693
1569
  } else {
1694
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1695
- "lb_on_response_received_locked_shutdown");
1570
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
1571
+ "lb_on_response_received_locked_shutdown");
1696
1572
  }
1697
1573
  } else { /* empty payload: call cancelled. */
1698
- /* dispose of the "lb_on_response_received_locked" weak ref taken in
1574
+ /* dispose of the "lb_on_response_received_locked" ref taken in
1699
1575
  * query_for_backends_locked() and reused in every reception loop */
1700
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1701
- "lb_on_response_received_locked_empty_payload");
1576
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
1577
+ "lb_on_response_received_locked_empty_payload");
1702
1578
  }
1703
1579
  }
1704
1580
 
1705
- static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
1706
- grpc_error* error) {
1581
+ static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
1707
1582
  glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1708
- glb_policy->fallback_timer_active = false;
1583
+ glb_policy->fallback_timer_callback_pending = false;
1709
1584
  /* If we receive a serverlist after the timer fires but before this callback
1710
1585
  * actually runs, don't fall back. */
1711
1586
  if (glb_policy->serverlist == nullptr) {
@@ -1716,15 +1591,13 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
1716
1591
  glb_policy);
1717
1592
  }
1718
1593
  GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
1719
- rr_handover_locked(exec_ctx, glb_policy);
1594
+ rr_handover_locked(glb_policy);
1720
1595
  }
1721
1596
  }
1722
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1723
- "grpclb_fallback_timer");
1597
+ GRPC_LB_POLICY_UNREF(&glb_policy->base, "grpclb_fallback_timer");
1724
1598
  }
1725
1599
 
1726
- static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
1727
- void* arg, grpc_error* error) {
1600
+ static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
1728
1601
  glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1729
1602
  GPR_ASSERT(glb_policy->lb_call != nullptr);
1730
1603
  if (grpc_lb_glb_trace.enabled()) {
@@ -1738,29 +1611,28 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
1738
1611
  gpr_free(status_details);
1739
1612
  }
1740
1613
  /* We need to perform cleanups no matter what. */
1741
- lb_call_destroy_locked(exec_ctx, glb_policy);
1614
+ lb_call_destroy_locked(glb_policy);
1742
1615
  // If the load report timer is still pending, we wait for it to be
1743
1616
  // called before restarting the call. Otherwise, we restart the call
1744
1617
  // here.
1745
- if (!glb_policy->client_load_report_timer_pending) {
1746
- maybe_restart_lb_call(exec_ctx, glb_policy);
1618
+ if (!glb_policy->client_load_report_timer_callback_pending) {
1619
+ maybe_restart_lb_call(glb_policy);
1747
1620
  }
1748
1621
  }
1749
1622
 
1750
- static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
1751
- glb_lb_policy* glb_policy,
1623
+ static void fallback_update_locked(glb_lb_policy* glb_policy,
1752
1624
  const grpc_lb_addresses* addresses) {
1753
1625
  GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
1754
- grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
1626
+ grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
1755
1627
  glb_policy->fallback_backend_addresses =
1756
- extract_backend_addresses_locked(exec_ctx, addresses);
1757
- if (glb_policy->started_picking && glb_policy->lb_fallback_timeout_ms > 0 &&
1758
- !glb_policy->fallback_timer_active) {
1759
- rr_handover_locked(exec_ctx, glb_policy);
1628
+ extract_backend_addresses_locked(addresses);
1629
+ if (glb_policy->lb_fallback_timeout_ms > 0 &&
1630
+ glb_policy->rr_policy != nullptr) {
1631
+ rr_handover_locked(glb_policy);
1760
1632
  }
1761
1633
  }
1762
1634
 
1763
- static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
1635
+ static void glb_update_locked(grpc_lb_policy* policy,
1764
1636
  const grpc_lb_policy_args* args) {
1765
1637
  glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1766
1638
  const grpc_arg* arg =
@@ -1770,7 +1642,7 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
1770
1642
  // If we don't have a current channel to the LB, go into TRANSIENT
1771
1643
  // FAILURE.
1772
1644
  grpc_connectivity_state_set(
1773
- exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
1645
+ &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
1774
1646
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
1775
1647
  "glb_update_missing");
1776
1648
  } else {
@@ -1787,16 +1659,16 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
1787
1659
  // If a non-empty serverlist hasn't been received from the balancer,
1788
1660
  // propagate the update to fallback_backend_addresses.
1789
1661
  if (glb_policy->serverlist == nullptr) {
1790
- fallback_update_locked(exec_ctx, glb_policy, addresses);
1662
+ fallback_update_locked(glb_policy, addresses);
1791
1663
  }
1792
1664
  GPR_ASSERT(glb_policy->lb_channel != nullptr);
1793
1665
  // Propagate updates to the LB channel (pick_first) through the fake
1794
1666
  // resolver.
1795
1667
  grpc_channel_args* lb_channel_args = build_lb_channel_args(
1796
- exec_ctx, addresses, glb_policy->response_generator, args->args);
1668
+ addresses, glb_policy->response_generator, args->args);
1797
1669
  grpc_fake_resolver_response_generator_set_response(
1798
- exec_ctx, glb_policy->response_generator, lb_channel_args);
1799
- grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1670
+ glb_policy->response_generator, lb_channel_args);
1671
+ grpc_channel_args_destroy(lb_channel_args);
1800
1672
  // Start watching the LB channel connectivity for connection, if not
1801
1673
  // already doing so.
1802
1674
  if (!glb_policy->watching_lb_channel) {
@@ -1806,9 +1678,9 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
1806
1678
  grpc_channel_get_channel_stack(glb_policy->lb_channel));
1807
1679
  GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1808
1680
  glb_policy->watching_lb_channel = true;
1809
- GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1681
+ GRPC_LB_POLICY_REF(&glb_policy->base, "watch_lb_channel_connectivity");
1810
1682
  grpc_client_channel_watch_connectivity_state(
1811
- exec_ctx, client_channel_elem,
1683
+ client_channel_elem,
1812
1684
  grpc_polling_entity_create_from_pollset_set(
1813
1685
  glb_policy->base.interested_parties),
1814
1686
  &glb_policy->lb_channel_connectivity,
@@ -1819,8 +1691,7 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
1819
1691
  // Invoked as part of the update process. It continues watching the LB channel
1820
1692
  // until it shuts down or becomes READY. It's invoked even if the LB channel
1821
1693
  // stayed READY throughout the update (for example if the update is identical).
1822
- static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
1823
- void* arg,
1694
+ static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
1824
1695
  grpc_error* error) {
1825
1696
  glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
1826
1697
  if (glb_policy->shutting_down) goto done;
@@ -1836,7 +1707,7 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
1836
1707
  grpc_channel_get_channel_stack(glb_policy->lb_channel));
1837
1708
  GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
1838
1709
  grpc_client_channel_watch_connectivity_state(
1839
- exec_ctx, client_channel_elem,
1710
+ client_channel_elem,
1840
1711
  grpc_polling_entity_create_from_pollset_set(
1841
1712
  glb_policy->base.interested_parties),
1842
1713
  &glb_policy->lb_channel_connectivity,
@@ -1853,23 +1724,35 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
1853
1724
  grpc_call_cancel(glb_policy->lb_call, nullptr);
1854
1725
  // lb_on_server_status_received() will pick up the cancel and reinit
1855
1726
  // lb_call.
1856
- } else if (glb_policy->started_picking && !glb_policy->shutting_down) {
1857
- if (glb_policy->retry_timer_active) {
1858
- grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
1859
- glb_policy->retry_timer_active = false;
1727
+ } else if (glb_policy->started_picking) {
1728
+ if (glb_policy->retry_timer_callback_pending) {
1729
+ grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
1860
1730
  }
1861
- start_picking_locked(exec_ctx, glb_policy);
1731
+ start_picking_locked(glb_policy);
1862
1732
  }
1863
1733
  /* fallthrough */
1864
1734
  case GRPC_CHANNEL_SHUTDOWN:
1865
1735
  done:
1866
1736
  glb_policy->watching_lb_channel = false;
1867
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
1868
- "watch_lb_channel_connectivity_cb_shutdown");
1737
+ GRPC_LB_POLICY_UNREF(&glb_policy->base,
1738
+ "watch_lb_channel_connectivity_cb_shutdown");
1869
1739
  break;
1870
1740
  }
1871
1741
  }
1872
1742
 
1743
+ static void glb_set_reresolve_closure_locked(
1744
+ grpc_lb_policy* policy, grpc_closure* request_reresolution) {
1745
+ glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
1746
+ GPR_ASSERT(!glb_policy->shutting_down);
1747
+ GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
1748
+ if (glb_policy->rr_policy != nullptr) {
1749
+ grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
1750
+ request_reresolution);
1751
+ } else {
1752
+ glb_policy->base.request_reresolution = request_reresolution;
1753
+ }
1754
+ }
1755
+
1873
1756
  /* Code wiring the policy with the rest of the core */
1874
1757
  static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
1875
1758
  glb_destroy,
@@ -1881,10 +1764,10 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
1881
1764
  glb_exit_idle_locked,
1882
1765
  glb_check_connectivity_locked,
1883
1766
  glb_notify_on_state_change_locked,
1884
- glb_update_locked};
1767
+ glb_update_locked,
1768
+ glb_set_reresolve_closure_locked};
1885
1769
 
1886
- static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
1887
- grpc_lb_policy_factory* factory,
1770
+ static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
1888
1771
  grpc_lb_policy_args* args) {
1889
1772
  /* Count the number of gRPC-LB addresses. There must be at least one. */
1890
1773
  const grpc_arg* arg =
@@ -1905,7 +1788,7 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
1905
1788
  arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
1906
1789
  GPR_ASSERT(arg != nullptr);
1907
1790
  GPR_ASSERT(arg->type == GRPC_ARG_STRING);
1908
- grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
1791
+ grpc_uri* uri = grpc_uri_parse(arg->value.string, true);
1909
1792
  GPR_ASSERT(uri->path[0] != '\0');
1910
1793
  glb_policy->server_name =
1911
1794
  gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
@@ -1938,26 +1821,26 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
1938
1821
  /* Extract the backend addresses (may be empty) from the resolver for
1939
1822
  * fallback. */
1940
1823
  glb_policy->fallback_backend_addresses =
1941
- extract_backend_addresses_locked(exec_ctx, addresses);
1824
+ extract_backend_addresses_locked(addresses);
1942
1825
 
1943
1826
  /* Create a client channel over them to communicate with a LB service */
1944
1827
  glb_policy->response_generator =
1945
1828
  grpc_fake_resolver_response_generator_create();
1946
1829
  grpc_channel_args* lb_channel_args = build_lb_channel_args(
1947
- exec_ctx, addresses, glb_policy->response_generator, args->args);
1830
+ addresses, glb_policy->response_generator, args->args);
1948
1831
  char* uri_str;
1949
1832
  gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
1950
1833
  glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
1951
- exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
1834
+ uri_str, args->client_channel_factory, lb_channel_args);
1952
1835
 
1953
1836
  /* Propagate initial resolution */
1954
1837
  grpc_fake_resolver_response_generator_set_response(
1955
- exec_ctx, glb_policy->response_generator, lb_channel_args);
1956
- grpc_channel_args_destroy(exec_ctx, lb_channel_args);
1838
+ glb_policy->response_generator, lb_channel_args);
1839
+ grpc_channel_args_destroy(lb_channel_args);
1957
1840
  gpr_free(uri_str);
1958
1841
  if (glb_policy->lb_channel == nullptr) {
1959
1842
  gpr_free((void*)glb_policy->server_name);
1960
- grpc_channel_args_destroy(exec_ctx, glb_policy->args);
1843
+ grpc_channel_args_destroy(glb_policy->args);
1961
1844
  gpr_free(glb_policy);
1962
1845
  return nullptr;
1963
1846
  }
@@ -1988,7 +1871,7 @@ grpc_lb_policy_factory* grpc_glb_lb_factory_create() {
1988
1871
 
1989
1872
  // Only add client_load_reporting filter if the grpclb LB policy is used.
1990
1873
  static bool maybe_add_client_load_reporting_filter(
1991
- grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
1874
+ grpc_channel_stack_builder* builder, void* arg) {
1992
1875
  const grpc_channel_args* args =
1993
1876
  grpc_channel_stack_builder_get_channel_arguments(builder);
1994
1877
  const grpc_arg* channel_arg =
@@ -2001,7 +1884,7 @@ static bool maybe_add_client_load_reporting_filter(
2001
1884
  return true;
2002
1885
  }
2003
1886
 
2004
- extern "C" void grpc_lb_policy_grpclb_init() {
1887
+ void grpc_lb_policy_grpclb_init() {
2005
1888
  grpc_register_lb_policy(grpc_glb_lb_factory_create());
2006
1889
  grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
2007
1890
  GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
@@ -2009,4 +1892,4 @@ extern "C" void grpc_lb_policy_grpclb_init() {
2009
1892
  (void*)&grpc_client_load_reporting_filter);
2010
1893
  }
2011
1894
 
2012
- extern "C" void grpc_lb_policy_grpclb_shutdown() {}
1895
+ void grpc_lb_policy_grpclb_shutdown() {}