grpc 1.3.4 → 1.4.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (286) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +581 -450
  3. data/include/grpc/census.h +49 -49
  4. data/include/grpc/grpc.h +16 -70
  5. data/include/grpc/grpc_security.h +59 -59
  6. data/include/grpc/grpc_security_constants.h +9 -9
  7. data/include/grpc/impl/codegen/atm.h +1 -1
  8. data/include/grpc/impl/codegen/atm_windows.h +4 -4
  9. data/include/grpc/impl/codegen/byte_buffer_reader.h +2 -2
  10. data/include/grpc/impl/codegen/compression_types.h +4 -5
  11. data/include/grpc/impl/codegen/gpr_slice.h +5 -5
  12. data/include/grpc/impl/codegen/gpr_types.h +6 -7
  13. data/include/grpc/impl/codegen/grpc_types.h +128 -59
  14. data/include/grpc/impl/codegen/port_platform.h +6 -0
  15. data/include/grpc/impl/codegen/propagation_bits.h +2 -2
  16. data/include/grpc/impl/codegen/slice.h +13 -12
  17. data/include/grpc/impl/codegen/status.h +23 -18
  18. data/include/grpc/impl/codegen/sync.h +1 -1
  19. data/include/grpc/load_reporting.h +6 -6
  20. data/include/grpc/slice.h +47 -25
  21. data/include/grpc/slice_buffer.h +18 -14
  22. data/include/grpc/support/alloc.h +7 -7
  23. data/include/grpc/support/cmdline.h +10 -10
  24. data/include/grpc/support/cpu.h +3 -3
  25. data/include/grpc/support/histogram.h +1 -1
  26. data/include/grpc/support/host_port.h +2 -2
  27. data/include/grpc/support/log.h +9 -9
  28. data/include/grpc/support/log_windows.h +1 -1
  29. data/include/grpc/support/string_util.h +3 -3
  30. data/include/grpc/support/subprocess.h +3 -3
  31. data/include/grpc/support/sync.h +31 -31
  32. data/include/grpc/support/thd.h +11 -11
  33. data/include/grpc/support/time.h +12 -12
  34. data/include/grpc/support/tls.h +1 -1
  35. data/include/grpc/support/tls_gcc.h +2 -2
  36. data/include/grpc/support/tls_msvc.h +1 -1
  37. data/include/grpc/support/tls_pthread.h +1 -1
  38. data/include/grpc/support/useful.h +2 -2
  39. data/include/grpc/support/workaround_list.h +46 -0
  40. data/src/core/ext/census/context.c +1 -1
  41. data/src/core/ext/census/intrusive_hash_map.c +319 -0
  42. data/src/core/ext/census/intrusive_hash_map.h +167 -0
  43. data/src/core/ext/census/intrusive_hash_map_internal.h +63 -0
  44. data/src/core/ext/census/resource.c +3 -1
  45. data/src/core/ext/filters/client_channel/channel_connectivity.c +1 -1
  46. data/src/core/ext/filters/client_channel/client_channel.c +173 -103
  47. data/src/core/ext/filters/client_channel/client_channel_plugin.c +3 -2
  48. data/src/core/ext/filters/client_channel/lb_policy.c +2 -1
  49. data/src/core/ext/filters/client_channel/lb_policy.h +8 -7
  50. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +153 -0
  51. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +42 -0
  52. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +405 -102
  53. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +133 -0
  54. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +65 -0
  55. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +90 -51
  56. data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +7 -1
  57. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +19 -8
  58. data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +63 -34
  59. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +2 -1
  60. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +188 -294
  61. data/src/core/ext/filters/client_channel/lb_policy_factory.c +28 -5
  62. data/src/core/ext/filters/client_channel/lb_policy_factory.h +18 -4
  63. data/src/core/ext/filters/client_channel/parse_address.c +90 -59
  64. data/src/core/ext/filters/client_channel/parse_address.h +17 -8
  65. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +11 -7
  66. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +59 -14
  67. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +6 -0
  68. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c +3 -3
  69. data/src/core/ext/filters/client_channel/subchannel.c +20 -17
  70. data/src/core/ext/filters/client_channel/subchannel.h +1 -0
  71. data/src/core/ext/filters/client_channel/subchannel_index.c +11 -1
  72. data/src/core/ext/filters/client_channel/uri_parser.c +36 -22
  73. data/src/core/ext/filters/client_channel/uri_parser.h +1 -1
  74. data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.c +42 -17
  75. data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.h +8 -9
  76. data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.c +19 -11
  77. data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.h +3 -6
  78. data/src/core/ext/filters/http/http_filters_plugin.c +104 -0
  79. data/src/core/{lib/channel/compress_filter.c → ext/filters/http/message_compress/message_compress_filter.c} +124 -23
  80. data/src/core/{lib/channel/compress_filter.h → ext/filters/http/message_compress/message_compress_filter.h} +5 -6
  81. data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.c +4 -6
  82. data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.h +3 -3
  83. data/src/core/ext/filters/load_reporting/load_reporting.c +2 -25
  84. data/src/core/ext/filters/load_reporting/load_reporting_filter.c +26 -1
  85. data/src/core/ext/filters/max_age/max_age_filter.c +14 -14
  86. data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.c +91 -47
  87. data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.h +3 -3
  88. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +223 -0
  89. data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +40 -0
  90. data/src/core/ext/filters/workarounds/workaround_utils.c +65 -0
  91. data/src/core/ext/filters/workarounds/workaround_utils.h +52 -0
  92. data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
  93. data/src/core/ext/transport/chttp2/server/chttp2_server.c +3 -2
  94. data/src/core/ext/transport/chttp2/transport/bin_decoder.c +2 -2
  95. data/src/core/ext/transport/chttp2/transport/bin_encoder.c +3 -3
  96. data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +319 -175
  97. data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -2
  98. data/src/core/ext/transport/chttp2/transport/frame_data.c +203 -164
  99. data/src/core/ext/transport/chttp2/transport/frame_data.h +8 -14
  100. data/src/core/ext/transport/chttp2/transport/frame_goaway.c +1 -1
  101. data/src/core/ext/transport/chttp2/transport/frame_ping.c +1 -1
  102. data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
  103. data/src/core/ext/transport/chttp2/transport/frame_settings.c +5 -5
  104. data/src/core/ext/transport/chttp2/transport/frame_window_update.c +1 -1
  105. data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +4 -4
  106. data/src/core/ext/transport/chttp2/transport/hpack_parser.c +2 -4
  107. data/src/core/ext/transport/chttp2/transport/hpack_table.c +4 -3
  108. data/src/core/ext/transport/chttp2/transport/internal.h +50 -33
  109. data/src/core/ext/transport/chttp2/transport/parsing.c +10 -11
  110. data/src/core/ext/transport/chttp2/transport/writing.c +32 -13
  111. data/src/core/lib/channel/channel_args.c +30 -9
  112. data/src/core/lib/channel/channel_args.h +5 -1
  113. data/src/core/lib/channel/channel_stack.c +1 -1
  114. data/src/core/lib/channel/channel_stack.h +2 -2
  115. data/src/core/lib/channel/channel_stack_builder.c +13 -1
  116. data/src/core/lib/channel/channel_stack_builder.h +5 -1
  117. data/src/core/lib/channel/connected_channel.c +3 -1
  118. data/src/core/lib/channel/context.h +2 -2
  119. data/src/core/lib/compression/message_compress.c +2 -2
  120. data/src/core/lib/debug/trace.c +13 -6
  121. data/src/core/lib/debug/trace.h +27 -1
  122. data/src/core/lib/http/httpcli.c +1 -1
  123. data/src/core/lib/http/httpcli_security_connector.c +9 -11
  124. data/src/core/lib/http/parser.c +2 -2
  125. data/src/core/lib/http/parser.h +2 -1
  126. data/src/core/lib/iomgr/combiner.c +6 -6
  127. data/src/core/lib/iomgr/combiner.h +2 -1
  128. data/src/core/lib/iomgr/error.c +12 -5
  129. data/src/core/lib/iomgr/error.h +13 -13
  130. data/src/core/lib/iomgr/ev_epoll1_linux.c +984 -0
  131. data/src/core/lib/iomgr/ev_epoll1_linux.h +44 -0
  132. data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +2146 -0
  133. data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h +43 -0
  134. data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +1337 -0
  135. data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h +43 -0
  136. data/src/core/lib/iomgr/ev_epollex_linux.c +1511 -0
  137. data/src/core/lib/iomgr/ev_epollex_linux.h +43 -0
  138. data/src/core/lib/iomgr/{ev_epoll_linux.c → ev_epollsig_linux.c} +41 -33
  139. data/src/core/lib/iomgr/{ev_epoll_linux.h → ev_epollsig_linux.h} +4 -4
  140. data/src/core/lib/iomgr/ev_poll_posix.c +12 -27
  141. data/src/core/lib/iomgr/ev_poll_posix.h +2 -2
  142. data/src/core/lib/iomgr/ev_posix.c +22 -8
  143. data/src/core/lib/iomgr/ev_posix.h +4 -3
  144. data/src/core/lib/iomgr/ev_windows.c +43 -0
  145. data/src/core/lib/iomgr/exec_ctx.c +5 -0
  146. data/src/core/lib/iomgr/exec_ctx.h +2 -0
  147. data/src/core/lib/iomgr/iomgr.c +4 -0
  148. data/src/core/lib/iomgr/iomgr.h +3 -0
  149. data/src/core/lib/iomgr/is_epollexclusive_available.c +116 -0
  150. data/src/core/lib/iomgr/is_epollexclusive_available.h +41 -0
  151. data/src/core/lib/iomgr/lockfree_event.c +16 -0
  152. data/src/core/lib/iomgr/pollset.h +2 -5
  153. data/src/core/lib/iomgr/pollset_uv.c +1 -1
  154. data/src/core/lib/iomgr/pollset_windows.c +3 -3
  155. data/src/core/lib/iomgr/resource_quota.c +9 -8
  156. data/src/core/lib/iomgr/resource_quota.h +2 -1
  157. data/src/core/lib/iomgr/sockaddr_utils.h +1 -1
  158. data/src/core/lib/iomgr/socket_mutator.h +2 -0
  159. data/src/core/lib/iomgr/sys_epoll_wrapper.h +43 -0
  160. data/src/core/lib/iomgr/tcp_client_posix.c +6 -6
  161. data/src/core/lib/iomgr/tcp_client_uv.c +3 -3
  162. data/src/core/lib/iomgr/tcp_posix.c +7 -7
  163. data/src/core/lib/iomgr/tcp_posix.h +2 -1
  164. data/src/core/lib/iomgr/tcp_server_posix.c +1 -1
  165. data/src/core/lib/iomgr/tcp_uv.c +6 -6
  166. data/src/core/lib/iomgr/tcp_uv.h +2 -1
  167. data/src/core/lib/iomgr/tcp_windows.c +1 -1
  168. data/src/core/lib/iomgr/timer_generic.c +24 -25
  169. data/src/core/lib/iomgr/timer_manager.c +276 -0
  170. data/src/core/lib/iomgr/timer_manager.h +52 -0
  171. data/src/core/lib/iomgr/timer_uv.c +6 -0
  172. data/src/core/lib/iomgr/udp_server.c +42 -9
  173. data/src/core/lib/iomgr/udp_server.h +3 -1
  174. data/src/core/lib/security/credentials/credentials.c +0 -1
  175. data/src/core/lib/security/credentials/fake/fake_credentials.c +23 -0
  176. data/src/core/lib/security/credentials/fake/fake_credentials.h +12 -9
  177. data/src/core/lib/security/credentials/google_default/google_default_credentials.c +1 -1
  178. data/src/core/lib/security/credentials/jwt/jwt_credentials.c +1 -1
  179. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +1 -1
  180. data/src/core/lib/security/credentials/ssl/ssl_credentials.c +24 -53
  181. data/src/core/lib/security/transport/client_auth_filter.c +9 -3
  182. data/src/core/lib/security/transport/secure_endpoint.c +7 -7
  183. data/src/core/lib/security/transport/secure_endpoint.h +1 -1
  184. data/src/core/lib/security/transport/security_connector.c +45 -57
  185. data/src/core/lib/security/transport/security_connector.h +10 -14
  186. data/src/core/lib/security/transport/security_handshaker.c +123 -97
  187. data/src/core/lib/slice/b64.c +1 -1
  188. data/src/core/lib/slice/percent_encoding.c +3 -3
  189. data/src/core/lib/slice/slice.c +66 -33
  190. data/src/core/lib/slice/slice_buffer.c +25 -6
  191. data/src/core/lib/slice/slice_hash_table.c +33 -35
  192. data/src/core/lib/slice/slice_hash_table.h +7 -12
  193. data/src/core/lib/support/atomic.h +45 -0
  194. data/src/core/lib/support/atomic_with_atm.h +70 -0
  195. data/src/core/lib/support/atomic_with_std.h +48 -0
  196. data/src/core/lib/support/avl.c +14 -14
  197. data/src/core/lib/support/cmdline.c +3 -3
  198. data/src/core/lib/support/histogram.c +2 -2
  199. data/src/core/lib/support/host_port.c +1 -1
  200. data/src/core/lib/support/memory.h +74 -0
  201. data/src/core/lib/support/mpscq.c +36 -2
  202. data/src/core/lib/support/mpscq.h +28 -1
  203. data/src/core/lib/support/stack_lockfree.c +3 -36
  204. data/src/core/lib/support/string.c +12 -12
  205. data/src/core/lib/support/string_posix.c +1 -1
  206. data/src/core/lib/support/subprocess_posix.c +2 -2
  207. data/src/core/lib/support/thd_posix.c +1 -1
  208. data/src/core/lib/support/time_posix.c +8 -0
  209. data/src/core/lib/support/tmpfile_posix.c +10 -10
  210. data/src/core/lib/surface/alarm.c +3 -1
  211. data/src/core/lib/surface/api_trace.c +2 -1
  212. data/src/core/lib/surface/api_trace.h +2 -2
  213. data/src/core/lib/surface/byte_buffer_reader.c +1 -1
  214. data/src/core/lib/surface/call.c +65 -22
  215. data/src/core/lib/surface/call.h +4 -2
  216. data/src/core/lib/surface/channel_init.c +2 -19
  217. data/src/core/lib/surface/channel_stack_type.c +18 -0
  218. data/src/core/lib/surface/channel_stack_type.h +2 -0
  219. data/src/core/lib/surface/completion_queue.c +694 -247
  220. data/src/core/lib/surface/completion_queue.h +30 -13
  221. data/src/core/lib/surface/completion_queue_factory.c +24 -9
  222. data/src/core/lib/surface/init.c +1 -52
  223. data/src/core/lib/surface/{lame_client.c → lame_client.cc} +37 -26
  224. data/src/core/lib/surface/server.c +79 -110
  225. data/src/core/lib/surface/server.h +2 -1
  226. data/src/core/lib/surface/version.c +2 -2
  227. data/src/core/lib/transport/bdp_estimator.c +25 -9
  228. data/src/core/lib/transport/bdp_estimator.h +7 -1
  229. data/src/core/lib/transport/byte_stream.c +23 -9
  230. data/src/core/lib/transport/byte_stream.h +15 -6
  231. data/src/core/lib/transport/connectivity_state.c +6 -6
  232. data/src/core/lib/transport/connectivity_state.h +2 -1
  233. data/src/core/lib/transport/service_config.c +6 -13
  234. data/src/core/lib/transport/service_config.h +2 -2
  235. data/src/core/lib/transport/static_metadata.c +403 -389
  236. data/src/core/lib/transport/static_metadata.h +127 -114
  237. data/src/core/plugin_registry/grpc_plugin_registry.c +16 -0
  238. data/src/core/tsi/fake_transport_security.c +5 -4
  239. data/src/core/tsi/ssl_transport_security.c +71 -82
  240. data/src/core/tsi/ssl_transport_security.h +39 -61
  241. data/src/core/tsi/transport_security.c +83 -2
  242. data/src/core/tsi/transport_security.h +27 -2
  243. data/src/core/tsi/transport_security_adapter.c +236 -0
  244. data/src/core/tsi/transport_security_adapter.h +62 -0
  245. data/src/core/tsi/transport_security_interface.h +179 -66
  246. data/src/ruby/ext/grpc/extconf.rb +2 -1
  247. data/src/ruby/ext/grpc/rb_byte_buffer.c +8 -6
  248. data/src/ruby/ext/grpc/rb_call.c +56 -48
  249. data/src/ruby/ext/grpc/rb_call.h +3 -4
  250. data/src/ruby/ext/grpc/rb_call_credentials.c +23 -22
  251. data/src/ruby/ext/grpc/rb_channel.c +2 -3
  252. data/src/ruby/ext/grpc/rb_channel_args.c +11 -9
  253. data/src/ruby/ext/grpc/rb_channel_credentials.c +16 -12
  254. data/src/ruby/ext/grpc/rb_completion_queue.c +7 -9
  255. data/src/ruby/ext/grpc/rb_compression_options.c +7 -6
  256. data/src/ruby/ext/grpc/rb_event_thread.c +10 -12
  257. data/src/ruby/ext/grpc/rb_event_thread.h +1 -2
  258. data/src/ruby/ext/grpc/rb_grpc.c +11 -15
  259. data/src/ruby/ext/grpc/rb_grpc.h +2 -2
  260. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +16 -6
  261. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +25 -10
  262. data/src/ruby/ext/grpc/rb_server.c +26 -28
  263. data/src/ruby/lib/grpc/grpc.rb +1 -1
  264. data/src/ruby/lib/grpc/version.rb +1 -1
  265. data/third_party/cares/config_linux/ares_config.h +36 -2
  266. data/third_party/zlib/adler32.c +14 -7
  267. data/third_party/zlib/compress.c +24 -18
  268. data/third_party/zlib/crc32.c +29 -12
  269. data/third_party/zlib/deflate.c +499 -303
  270. data/third_party/zlib/deflate.h +19 -16
  271. data/third_party/zlib/gzguts.h +16 -7
  272. data/third_party/zlib/gzlib.c +17 -14
  273. data/third_party/zlib/gzread.c +108 -48
  274. data/third_party/zlib/gzwrite.c +210 -122
  275. data/third_party/zlib/infback.c +2 -2
  276. data/third_party/zlib/inffast.c +34 -51
  277. data/third_party/zlib/inflate.c +86 -37
  278. data/third_party/zlib/inflate.h +7 -4
  279. data/third_party/zlib/inftrees.c +12 -14
  280. data/third_party/zlib/trees.c +38 -61
  281. data/third_party/zlib/uncompr.c +66 -32
  282. data/third_party/zlib/zconf.h +32 -9
  283. data/third_party/zlib/zlib.h +298 -154
  284. data/third_party/zlib/zutil.c +25 -24
  285. data/third_party/zlib/zutil.h +35 -17
  286. metadata +63 -30
@@ -14,16 +14,6 @@ extern "C" {
14
14
  #endif
15
15
 
16
16
  /* Struct definitions */
17
- typedef struct _grpc_lb_v1_ClientStats {
18
- bool has_total_requests;
19
- int64_t total_requests;
20
- bool has_client_rpc_errors;
21
- int64_t client_rpc_errors;
22
- bool has_dropped_requests;
23
- int64_t dropped_requests;
24
- /* @@protoc_insertion_point(struct:grpc_lb_v1_ClientStats) */
25
- } grpc_lb_v1_ClientStats;
26
-
27
17
  typedef struct _grpc_lb_v1_Duration {
28
18
  bool has_seconds;
29
19
  int64_t seconds;
@@ -46,11 +36,39 @@ typedef struct _grpc_lb_v1_Server {
46
36
  int32_t port;
47
37
  bool has_load_balance_token;
48
38
  char load_balance_token[50];
49
- bool has_drop_request;
50
- bool drop_request;
39
+ bool has_drop_for_rate_limiting;
40
+ bool drop_for_rate_limiting;
41
+ bool has_drop_for_load_balancing;
42
+ bool drop_for_load_balancing;
51
43
  /* @@protoc_insertion_point(struct:grpc_lb_v1_Server) */
52
44
  } grpc_lb_v1_Server;
53
45
 
46
+ typedef struct _grpc_lb_v1_Timestamp {
47
+ bool has_seconds;
48
+ int64_t seconds;
49
+ bool has_nanos;
50
+ int32_t nanos;
51
+ /* @@protoc_insertion_point(struct:grpc_lb_v1_Timestamp) */
52
+ } grpc_lb_v1_Timestamp;
53
+
54
+ typedef struct _grpc_lb_v1_ClientStats {
55
+ bool has_timestamp;
56
+ grpc_lb_v1_Timestamp timestamp;
57
+ bool has_num_calls_started;
58
+ int64_t num_calls_started;
59
+ bool has_num_calls_finished;
60
+ int64_t num_calls_finished;
61
+ bool has_num_calls_finished_with_drop_for_rate_limiting;
62
+ int64_t num_calls_finished_with_drop_for_rate_limiting;
63
+ bool has_num_calls_finished_with_drop_for_load_balancing;
64
+ int64_t num_calls_finished_with_drop_for_load_balancing;
65
+ bool has_num_calls_finished_with_client_failed_to_send;
66
+ int64_t num_calls_finished_with_client_failed_to_send;
67
+ bool has_num_calls_finished_known_received;
68
+ int64_t num_calls_finished_known_received;
69
+ /* @@protoc_insertion_point(struct:grpc_lb_v1_ClientStats) */
70
+ } grpc_lb_v1_ClientStats;
71
+
54
72
  typedef struct _grpc_lb_v1_InitialLoadBalanceResponse {
55
73
  bool has_load_balancer_delegate;
56
74
  char load_balancer_delegate[64];
@@ -59,6 +77,13 @@ typedef struct _grpc_lb_v1_InitialLoadBalanceResponse {
59
77
  /* @@protoc_insertion_point(struct:grpc_lb_v1_InitialLoadBalanceResponse) */
60
78
  } grpc_lb_v1_InitialLoadBalanceResponse;
61
79
 
80
+ typedef struct _grpc_lb_v1_ServerList {
81
+ pb_callback_t servers;
82
+ bool has_expiration_interval;
83
+ grpc_lb_v1_Duration expiration_interval;
84
+ /* @@protoc_insertion_point(struct:grpc_lb_v1_ServerList) */
85
+ } grpc_lb_v1_ServerList;
86
+
62
87
  typedef struct _grpc_lb_v1_LoadBalanceRequest {
63
88
  bool has_initial_request;
64
89
  grpc_lb_v1_InitialLoadBalanceRequest initial_request;
@@ -67,13 +92,6 @@ typedef struct _grpc_lb_v1_LoadBalanceRequest {
67
92
  /* @@protoc_insertion_point(struct:grpc_lb_v1_LoadBalanceRequest) */
68
93
  } grpc_lb_v1_LoadBalanceRequest;
69
94
 
70
- typedef struct _grpc_lb_v1_ServerList {
71
- pb_callback_t servers;
72
- bool has_expiration_interval;
73
- grpc_lb_v1_Duration expiration_interval;
74
- /* @@protoc_insertion_point(struct:grpc_lb_v1_ServerList) */
75
- } grpc_lb_v1_ServerList;
76
-
77
95
  typedef struct _grpc_lb_v1_LoadBalanceResponse {
78
96
  bool has_initial_response;
79
97
  grpc_lb_v1_InitialLoadBalanceResponse initial_response;
@@ -86,61 +104,72 @@ typedef struct _grpc_lb_v1_LoadBalanceResponse {
86
104
 
87
105
  /* Initializer values for message structs */
88
106
  #define grpc_lb_v1_Duration_init_default {false, 0, false, 0}
107
+ #define grpc_lb_v1_Timestamp_init_default {false, 0, false, 0}
89
108
  #define grpc_lb_v1_LoadBalanceRequest_init_default {false, grpc_lb_v1_InitialLoadBalanceRequest_init_default, false, grpc_lb_v1_ClientStats_init_default}
90
109
  #define grpc_lb_v1_InitialLoadBalanceRequest_init_default {false, ""}
91
- #define grpc_lb_v1_ClientStats_init_default {false, 0, false, 0, false, 0}
110
+ #define grpc_lb_v1_ClientStats_init_default {false, grpc_lb_v1_Timestamp_init_default, false, 0, false, 0, false, 0, false, 0, false, 0, false, 0}
92
111
  #define grpc_lb_v1_LoadBalanceResponse_init_default {false, grpc_lb_v1_InitialLoadBalanceResponse_init_default, false, grpc_lb_v1_ServerList_init_default}
93
112
  #define grpc_lb_v1_InitialLoadBalanceResponse_init_default {false, "", false, grpc_lb_v1_Duration_init_default}
94
113
  #define grpc_lb_v1_ServerList_init_default {{{NULL}, NULL}, false, grpc_lb_v1_Duration_init_default}
95
- #define grpc_lb_v1_Server_init_default {false, {0, {0}}, false, 0, false, "", false, 0}
114
+ #define grpc_lb_v1_Server_init_default {false, {0, {0}}, false, 0, false, "", false, 0, false, 0}
96
115
  #define grpc_lb_v1_Duration_init_zero {false, 0, false, 0}
116
+ #define grpc_lb_v1_Timestamp_init_zero {false, 0, false, 0}
97
117
  #define grpc_lb_v1_LoadBalanceRequest_init_zero {false, grpc_lb_v1_InitialLoadBalanceRequest_init_zero, false, grpc_lb_v1_ClientStats_init_zero}
98
118
  #define grpc_lb_v1_InitialLoadBalanceRequest_init_zero {false, ""}
99
- #define grpc_lb_v1_ClientStats_init_zero {false, 0, false, 0, false, 0}
119
+ #define grpc_lb_v1_ClientStats_init_zero {false, grpc_lb_v1_Timestamp_init_zero, false, 0, false, 0, false, 0, false, 0, false, 0, false, 0}
100
120
  #define grpc_lb_v1_LoadBalanceResponse_init_zero {false, grpc_lb_v1_InitialLoadBalanceResponse_init_zero, false, grpc_lb_v1_ServerList_init_zero}
101
121
  #define grpc_lb_v1_InitialLoadBalanceResponse_init_zero {false, "", false, grpc_lb_v1_Duration_init_zero}
102
122
  #define grpc_lb_v1_ServerList_init_zero {{{NULL}, NULL}, false, grpc_lb_v1_Duration_init_zero}
103
- #define grpc_lb_v1_Server_init_zero {false, {0, {0}}, false, 0, false, "", false, 0}
123
+ #define grpc_lb_v1_Server_init_zero {false, {0, {0}}, false, 0, false, "", false, 0, false, 0}
104
124
 
105
125
  /* Field tags (for use in manual encoding/decoding) */
106
- #define grpc_lb_v1_ClientStats_total_requests_tag 1
107
- #define grpc_lb_v1_ClientStats_client_rpc_errors_tag 2
108
- #define grpc_lb_v1_ClientStats_dropped_requests_tag 3
109
126
  #define grpc_lb_v1_Duration_seconds_tag 1
110
127
  #define grpc_lb_v1_Duration_nanos_tag 2
111
128
  #define grpc_lb_v1_InitialLoadBalanceRequest_name_tag 1
112
129
  #define grpc_lb_v1_Server_ip_address_tag 1
113
130
  #define grpc_lb_v1_Server_port_tag 2
114
131
  #define grpc_lb_v1_Server_load_balance_token_tag 3
115
- #define grpc_lb_v1_Server_drop_request_tag 4
132
+ #define grpc_lb_v1_Server_drop_for_rate_limiting_tag 4
133
+ #define grpc_lb_v1_Server_drop_for_load_balancing_tag 5
134
+ #define grpc_lb_v1_Timestamp_seconds_tag 1
135
+ #define grpc_lb_v1_Timestamp_nanos_tag 2
136
+ #define grpc_lb_v1_ClientStats_timestamp_tag 1
137
+ #define grpc_lb_v1_ClientStats_num_calls_started_tag 2
138
+ #define grpc_lb_v1_ClientStats_num_calls_finished_tag 3
139
+ #define grpc_lb_v1_ClientStats_num_calls_finished_with_drop_for_rate_limiting_tag 4
140
+ #define grpc_lb_v1_ClientStats_num_calls_finished_with_drop_for_load_balancing_tag 5
141
+ #define grpc_lb_v1_ClientStats_num_calls_finished_with_client_failed_to_send_tag 6
142
+ #define grpc_lb_v1_ClientStats_num_calls_finished_known_received_tag 7
116
143
  #define grpc_lb_v1_InitialLoadBalanceResponse_load_balancer_delegate_tag 1
117
144
  #define grpc_lb_v1_InitialLoadBalanceResponse_client_stats_report_interval_tag 2
118
- #define grpc_lb_v1_LoadBalanceRequest_initial_request_tag 1
119
- #define grpc_lb_v1_LoadBalanceRequest_client_stats_tag 2
120
145
  #define grpc_lb_v1_ServerList_servers_tag 1
121
146
  #define grpc_lb_v1_ServerList_expiration_interval_tag 3
147
+ #define grpc_lb_v1_LoadBalanceRequest_initial_request_tag 1
148
+ #define grpc_lb_v1_LoadBalanceRequest_client_stats_tag 2
122
149
  #define grpc_lb_v1_LoadBalanceResponse_initial_response_tag 1
123
150
  #define grpc_lb_v1_LoadBalanceResponse_server_list_tag 2
124
151
 
125
152
  /* Struct field encoding specification for nanopb */
126
153
  extern const pb_field_t grpc_lb_v1_Duration_fields[3];
154
+ extern const pb_field_t grpc_lb_v1_Timestamp_fields[3];
127
155
  extern const pb_field_t grpc_lb_v1_LoadBalanceRequest_fields[3];
128
156
  extern const pb_field_t grpc_lb_v1_InitialLoadBalanceRequest_fields[2];
129
- extern const pb_field_t grpc_lb_v1_ClientStats_fields[4];
157
+ extern const pb_field_t grpc_lb_v1_ClientStats_fields[8];
130
158
  extern const pb_field_t grpc_lb_v1_LoadBalanceResponse_fields[3];
131
159
  extern const pb_field_t grpc_lb_v1_InitialLoadBalanceResponse_fields[3];
132
160
  extern const pb_field_t grpc_lb_v1_ServerList_fields[3];
133
- extern const pb_field_t grpc_lb_v1_Server_fields[5];
161
+ extern const pb_field_t grpc_lb_v1_Server_fields[6];
134
162
 
135
163
  /* Maximum encoded size of messages (where known) */
136
164
  #define grpc_lb_v1_Duration_size 22
137
- #define grpc_lb_v1_LoadBalanceRequest_size 169
165
+ #define grpc_lb_v1_Timestamp_size 22
166
+ #define grpc_lb_v1_LoadBalanceRequest_size 226
138
167
  #define grpc_lb_v1_InitialLoadBalanceRequest_size 131
139
- #define grpc_lb_v1_ClientStats_size 33
168
+ #define grpc_lb_v1_ClientStats_size 90
140
169
  #define grpc_lb_v1_LoadBalanceResponse_size (98 + grpc_lb_v1_ServerList_size)
141
170
  #define grpc_lb_v1_InitialLoadBalanceResponse_size 90
142
171
  /* grpc_lb_v1_ServerList_size depends on runtime parameters */
143
- #define grpc_lb_v1_Server_size 83
172
+ #define grpc_lb_v1_Server_size 85
144
173
 
145
174
  /* Message IDs (where set with "msgid" option) */
146
175
  #ifdef PB_MSGID
@@ -189,7 +189,8 @@ static void pf_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
189
189
 
190
190
  static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
191
191
  const grpc_lb_policy_pick_args *pick_args,
192
- grpc_connected_subchannel **target, void **user_data,
192
+ grpc_connected_subchannel **target,
193
+ grpc_call_context_element *context, void **user_data,
193
194
  grpc_closure *on_complete) {
194
195
  pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
195
196
  pending_pick *pp;
@@ -74,7 +74,7 @@
74
74
 
75
75
  typedef struct round_robin_lb_policy round_robin_lb_policy;
76
76
 
77
- int grpc_lb_round_robin_trace = 0;
77
+ grpc_tracer_flag grpc_lb_round_robin_trace = GRPC_TRACER_INITIALIZER(false);
78
78
 
79
79
  /** List of entities waiting for a pick.
80
80
  *
@@ -99,26 +99,13 @@ typedef struct pending_pick {
99
99
  grpc_closure *on_complete;
100
100
  } pending_pick;
101
101
 
102
- /** List of subchannels in a connectivity READY state */
103
- typedef struct ready_list {
104
- grpc_subchannel *subchannel;
105
- /* references namesake entry in subchannel_data */
106
- void *user_data;
107
- struct ready_list *next;
108
- struct ready_list *prev;
109
- } ready_list;
110
-
111
102
  typedef struct {
112
- /** index within policy->subchannels */
113
- size_t index;
114
103
  /** backpointer to owning policy */
115
104
  round_robin_lb_policy *policy;
116
105
  /** subchannel itself */
117
106
  grpc_subchannel *subchannel;
118
107
  /** notification that connectivity has changed on subchannel */
119
108
  grpc_closure connectivity_changed_closure;
120
- /** this subchannels current position in subchannel->ready_list */
121
- ready_list *ready_list_node;
122
109
  /** last observed connectivity. Not updated by
123
110
  * \a grpc_subchannel_notify_on_state_change. Used to determine the previous
124
111
  * state while processing the new state in \a rr_connectivity_changed */
@@ -126,6 +113,10 @@ typedef struct {
126
113
  /** current connectivity state. Updated by \a
127
114
  * grpc_subchannel_notify_on_state_change */
128
115
  grpc_connectivity_state curr_connectivity_state;
116
+ /** connectivity state to be updated by the watcher, not guarded by
117
+ * the combiner. Will be moved to curr_connectivity_state inside of
118
+ * the combiner by rr_connectivity_changed_locked(). */
119
+ grpc_connectivity_state pending_connectivity_state_unsafe;
129
120
  /** the subchannel's target user data */
130
121
  void *user_data;
131
122
  /** vtable to operate over \a user_data */
@@ -141,182 +132,106 @@ struct round_robin_lb_policy {
141
132
 
142
133
  /** all our subchannels */
143
134
  size_t num_subchannels;
144
- subchannel_data **subchannels;
135
+ subchannel_data *subchannels;
145
136
 
146
- /** how many subchannels are in TRANSIENT_FAILURE */
137
+ /** how many subchannels are in state READY */
138
+ size_t num_ready;
139
+ /** how many subchannels are in state TRANSIENT_FAILURE */
147
140
  size_t num_transient_failures;
148
- /** how many subchannels are IDLE */
141
+ /** how many subchannels are in state IDLE */
149
142
  size_t num_idle;
150
143
 
151
144
  /** have we started picking? */
152
- int started_picking;
145
+ bool started_picking;
153
146
  /** are we shutting down? */
154
- int shutdown;
147
+ bool shutdown;
155
148
  /** List of picks that are waiting on connectivity */
156
149
  pending_pick *pending_picks;
157
150
 
158
151
  /** our connectivity state tracker */
159
152
  grpc_connectivity_state_tracker state_tracker;
160
153
 
161
- /** (Dummy) root of the doubly linked list containing READY subchannels */
162
- ready_list ready_list;
163
- /** Last pick from the ready list. */
164
- ready_list *ready_list_last_pick;
154
+ // Index into subchannels for last pick.
155
+ size_t last_ready_subchannel_index;
165
156
  };
166
157
 
167
- /** Returns the next subchannel from the connected list or NULL if the list is
168
- * empty.
158
+ /** Returns the index into p->subchannels of the next subchannel in
159
+ * READY state, or p->num_subchannels if no subchannel is READY.
169
160
  *
170
- * Note that this function does *not* advance p->ready_list_last_pick. Use \a
171
- * advance_last_picked_locked() for that. */
172
- static ready_list *peek_next_connected_locked(const round_robin_lb_policy *p) {
173
- ready_list *selected;
174
- selected = p->ready_list_last_pick->next;
175
-
176
- while (selected != NULL) {
177
- if (selected == &p->ready_list) {
178
- GPR_ASSERT(selected->subchannel == NULL);
179
- /* skip dummy root */
180
- selected = selected->next;
181
- } else {
182
- GPR_ASSERT(selected->subchannel != NULL);
183
- return selected;
184
- }
161
+ * Note that this function does *not* update p->last_ready_subchannel_index.
162
+ * The caller must do that if it returns a pick. */
163
+ static size_t get_next_ready_subchannel_index_locked(
164
+ const round_robin_lb_policy *p) {
165
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
166
+ gpr_log(GPR_INFO,
167
+ "[RR: %p] getting next ready subchannel, "
168
+ "last_ready_subchannel_index=%lu",
169
+ p, (unsigned long)p->last_ready_subchannel_index);
185
170
  }
186
- return NULL;
187
- }
188
-
189
- /** Advance the \a ready_list picking head. */
190
- static void advance_last_picked_locked(round_robin_lb_policy *p) {
191
- if (p->ready_list_last_pick->next != NULL) { /* non-empty list */
192
- p->ready_list_last_pick = p->ready_list_last_pick->next;
193
- if (p->ready_list_last_pick == &p->ready_list) {
194
- /* skip dummy root */
195
- p->ready_list_last_pick = p->ready_list_last_pick->next;
171
+ for (size_t i = 0; i < p->num_subchannels; ++i) {
172
+ const size_t index =
173
+ (i + p->last_ready_subchannel_index + 1) % p->num_subchannels;
174
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
175
+ gpr_log(GPR_DEBUG, "[RR %p] checking index %lu: state=%d", p,
176
+ (unsigned long)index,
177
+ p->subchannels[index].curr_connectivity_state);
178
+ }
179
+ if (p->subchannels[index].curr_connectivity_state == GRPC_CHANNEL_READY) {
180
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
181
+ gpr_log(GPR_DEBUG, "[RR %p] found next ready subchannel at index %lu",
182
+ p, (unsigned long)index);
183
+ }
184
+ return index;
196
185
  }
197
- } else { /* should be an empty list */
198
- GPR_ASSERT(p->ready_list_last_pick == &p->ready_list);
199
- }
200
-
201
- if (grpc_lb_round_robin_trace) {
202
- gpr_log(GPR_DEBUG,
203
- "[READYLIST, RR: %p] ADVANCED LAST PICK. NOW AT NODE %p (SC %p, "
204
- "CSC %p)",
205
- (void *)p, (void *)p->ready_list_last_pick,
206
- (void *)p->ready_list_last_pick->subchannel,
207
- (void *)grpc_subchannel_get_connected_subchannel(
208
- p->ready_list_last_pick->subchannel));
209
- }
210
- }
211
-
212
- /** Prepends (relative to the root at p->ready_list) the connected subchannel \a
213
- * csc to the list of ready subchannels. */
214
- static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
215
- subchannel_data *sd) {
216
- ready_list *new_elem = gpr_zalloc(sizeof(ready_list));
217
- new_elem->subchannel = sd->subchannel;
218
- new_elem->user_data = sd->user_data;
219
- if (p->ready_list.prev == NULL) {
220
- /* first element */
221
- new_elem->next = &p->ready_list;
222
- new_elem->prev = &p->ready_list;
223
- p->ready_list.next = new_elem;
224
- p->ready_list.prev = new_elem;
225
- } else {
226
- new_elem->next = &p->ready_list;
227
- new_elem->prev = p->ready_list.prev;
228
- p->ready_list.prev->next = new_elem;
229
- p->ready_list.prev = new_elem;
230
186
  }
231
- if (grpc_lb_round_robin_trace) {
232
- gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (Conn. SC %p)",
233
- (void *)new_elem, (void *)sd->subchannel);
187
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
188
+ gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", p);
234
189
  }
235
- return new_elem;
190
+ return p->num_subchannels;
236
191
  }
237
192
 
238
- /** Removes \a node from the list of connected subchannels */
239
- static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
240
- ready_list *node) {
241
- if (node == NULL) {
242
- return;
243
- }
244
- if (node == p->ready_list_last_pick) {
245
- p->ready_list_last_pick = p->ready_list_last_pick->prev;
246
- }
247
-
248
- /* removing last item */
249
- if (node->next == &p->ready_list && node->prev == &p->ready_list) {
250
- GPR_ASSERT(p->ready_list.next == node);
251
- GPR_ASSERT(p->ready_list.prev == node);
252
- p->ready_list.next = NULL;
253
- p->ready_list.prev = NULL;
254
- } else {
255
- node->prev->next = node->next;
256
- node->next->prev = node->prev;
257
- }
258
-
259
- if (grpc_lb_round_robin_trace) {
260
- gpr_log(GPR_DEBUG, "[READYLIST] REMOVED NODE %p (SC %p)", (void *)node,
261
- (void *)node->subchannel);
193
+ // Sets p->last_ready_subchannel_index to last_ready_index.
194
+ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
195
+ size_t last_ready_index) {
196
+ GPR_ASSERT(last_ready_index < p->num_subchannels);
197
+ p->last_ready_subchannel_index = last_ready_index;
198
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
199
+ gpr_log(GPR_DEBUG,
200
+ "[RR: %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
201
+ (void *)p, (unsigned long)last_ready_index,
202
+ (void *)p->subchannels[last_ready_index].subchannel,
203
+ (void *)grpc_subchannel_get_connected_subchannel(
204
+ p->subchannels[last_ready_index].subchannel));
262
205
  }
263
-
264
- node->next = NULL;
265
- node->prev = NULL;
266
- node->subchannel = NULL;
267
-
268
- gpr_free(node);
269
- }
270
-
271
- static bool is_ready_list_empty(round_robin_lb_policy *p) {
272
- return p->ready_list.prev == NULL;
273
206
  }
274
207
 
275
208
  static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
276
209
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
277
- ready_list *elem;
278
-
279
- if (grpc_lb_round_robin_trace) {
210
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
280
211
  gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol);
281
212
  }
282
-
283
213
  for (size_t i = 0; i < p->num_subchannels; i++) {
284
- subchannel_data *sd = p->subchannels[i];
285
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_destroy");
286
- if (sd->user_data != NULL) {
287
- GPR_ASSERT(sd->user_data_vtable != NULL);
288
- sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
214
+ subchannel_data *sd = &p->subchannels[i];
215
+ if (sd->subchannel != NULL) {
216
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_destroy");
217
+ if (sd->user_data != NULL) {
218
+ GPR_ASSERT(sd->user_data_vtable != NULL);
219
+ sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
220
+ }
289
221
  }
290
- gpr_free(sd);
291
222
  }
292
-
293
223
  grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
294
224
  gpr_free(p->subchannels);
295
-
296
- elem = p->ready_list.next;
297
- while (elem != NULL && elem != &p->ready_list) {
298
- ready_list *tmp;
299
- tmp = elem->next;
300
- elem->next = NULL;
301
- elem->prev = NULL;
302
- elem->subchannel = NULL;
303
- gpr_free(elem);
304
- elem = tmp;
305
- }
306
-
307
225
  gpr_free(p);
308
226
  }
309
227
 
310
228
  static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
311
229
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
312
- pending_pick *pp;
313
- size_t i;
314
-
315
- if (grpc_lb_round_robin_trace) {
230
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
316
231
  gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
317
232
  }
318
-
319
- p->shutdown = 1;
233
+ p->shutdown = true;
234
+ pending_pick *pp;
320
235
  while ((pp = p->pending_picks)) {
321
236
  p->pending_picks = pp->next;
322
237
  *pp->target = NULL;
@@ -328,10 +243,13 @@ static void rr_shutdown_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
328
243
  grpc_connectivity_state_set(
329
244
  exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
330
245
  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "rr_shutdown");
331
- for (i = 0; i < p->num_subchannels; i++) {
332
- subchannel_data *sd = p->subchannels[i];
333
- grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
334
- &sd->connectivity_changed_closure);
246
+ for (size_t i = 0; i < p->num_subchannels; i++) {
247
+ subchannel_data *sd = &p->subchannels[i];
248
+ if (sd->subchannel != NULL) {
249
+ grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL,
250
+ NULL,
251
+ &sd->connectivity_changed_closure);
252
+ }
335
253
  }
336
254
  }
337
255
 
@@ -339,8 +257,7 @@ static void rr_cancel_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
339
257
  grpc_connected_subchannel **target,
340
258
  grpc_error *error) {
341
259
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
342
- pending_pick *pp;
343
- pp = p->pending_picks;
260
+ pending_pick *pp = p->pending_picks;
344
261
  p->pending_picks = NULL;
345
262
  while (pp != NULL) {
346
263
  pending_pick *next = pp->next;
@@ -364,8 +281,7 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
364
281
  uint32_t initial_metadata_flags_eq,
365
282
  grpc_error *error) {
366
283
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
367
- pending_pick *pp;
368
- pp = p->pending_picks;
284
+ pending_pick *pp = p->pending_picks;
369
285
  p->pending_picks = NULL;
370
286
  while (pp != NULL) {
371
287
  pending_pick *next = pp->next;
@@ -387,21 +303,16 @@ static void rr_cancel_picks_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
387
303
 
388
304
  static void start_picking_locked(grpc_exec_ctx *exec_ctx,
389
305
  round_robin_lb_policy *p) {
390
- size_t i;
391
- p->started_picking = 1;
392
-
393
- for (i = 0; i < p->num_subchannels; i++) {
394
- subchannel_data *sd = p->subchannels[i];
395
- /* use some sentinel value outside of the range of grpc_connectivity_state
396
- * to signal an undefined previous state. We won't be referring to this
397
- * value again and it'll be overwritten after the first call to
398
- * rr_connectivity_changed */
399
- sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
400
- sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
401
- GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity");
402
- grpc_subchannel_notify_on_state_change(
403
- exec_ctx, sd->subchannel, p->base.interested_parties,
404
- &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
306
+ p->started_picking = true;
307
+ for (size_t i = 0; i < p->num_subchannels; i++) {
308
+ subchannel_data *sd = &p->subchannels[i];
309
+ if (sd->subchannel != NULL) {
310
+ GRPC_LB_POLICY_WEAK_REF(&p->base, "rr_connectivity");
311
+ grpc_subchannel_notify_on_state_change(
312
+ exec_ctx, sd->subchannel, p->base.interested_parties,
313
+ &sd->pending_connectivity_state_unsafe,
314
+ &sd->connectivity_changed_closure);
315
+ }
405
316
  }
406
317
  }
407
318
 
@@ -414,39 +325,36 @@ static void rr_exit_idle_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
414
325
 
415
326
  static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
416
327
  const grpc_lb_policy_pick_args *pick_args,
417
- grpc_connected_subchannel **target, void **user_data,
328
+ grpc_connected_subchannel **target,
329
+ grpc_call_context_element *context, void **user_data,
418
330
  grpc_closure *on_complete) {
419
331
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
420
- pending_pick *pp;
421
- ready_list *selected;
422
-
423
- if (grpc_lb_round_robin_trace) {
332
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
424
333
  gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
425
334
  }
426
-
427
- if ((selected = peek_next_connected_locked(p))) {
335
+ const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
336
+ if (next_ready_index < p->num_subchannels) {
428
337
  /* readily available, report right away */
338
+ subchannel_data *sd = &p->subchannels[next_ready_index];
429
339
  *target = GRPC_CONNECTED_SUBCHANNEL_REF(
430
- grpc_subchannel_get_connected_subchannel(selected->subchannel),
431
- "rr_picked");
432
-
340
+ grpc_subchannel_get_connected_subchannel(sd->subchannel), "rr_picked");
433
341
  if (user_data != NULL) {
434
- *user_data = selected->user_data;
342
+ *user_data = sd->user_data;
435
343
  }
436
- if (grpc_lb_round_robin_trace) {
344
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
437
345
  gpr_log(GPR_DEBUG,
438
- "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
439
- (void *)*target, (void *)selected);
346
+ "[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (INDEX %lu)",
347
+ (void *)*target, (unsigned long)next_ready_index);
440
348
  }
441
349
  /* only advance the last picked pointer if the selection was used */
442
- advance_last_picked_locked(p);
350
+ update_last_ready_subchannel_index_locked(p, next_ready_index);
443
351
  return 1;
444
352
  } else {
445
353
  /* no pick currently available. Save for later in list of pending picks */
446
354
  if (!p->started_picking) {
447
355
  start_picking_locked(exec_ctx, p);
448
356
  }
449
- pp = gpr_malloc(sizeof(*pp));
357
+ pending_pick *pp = gpr_malloc(sizeof(*pp));
450
358
  pp->next = p->pending_picks;
451
359
  pp->target = target;
452
360
  pp->on_complete = on_complete;
@@ -457,25 +365,31 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
457
365
  }
458
366
  }
459
367
 
460
- static void update_state_counters(subchannel_data *sd) {
368
+ static void update_state_counters_locked(subchannel_data *sd) {
461
369
  round_robin_lb_policy *p = sd->policy;
462
-
463
- /* update p->num_transient_failures (resp. p->num_idle): if the previous
464
- * state was TRANSIENT_FAILURE (resp. IDLE), decrement
465
- * p->num_transient_failures (resp. p->num_idle). */
466
- if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
370
+ if (sd->prev_connectivity_state == GRPC_CHANNEL_READY) {
371
+ GPR_ASSERT(p->num_ready > 0);
372
+ --p->num_ready;
373
+ } else if (sd->prev_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
467
374
  GPR_ASSERT(p->num_transient_failures > 0);
468
375
  --p->num_transient_failures;
469
376
  } else if (sd->prev_connectivity_state == GRPC_CHANNEL_IDLE) {
470
377
  GPR_ASSERT(p->num_idle > 0);
471
378
  --p->num_idle;
472
379
  }
380
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
381
+ ++p->num_ready;
382
+ } else if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
383
+ ++p->num_transient_failures;
384
+ } else if (sd->curr_connectivity_state == GRPC_CHANNEL_IDLE) {
385
+ ++p->num_idle;
386
+ }
473
387
  }
474
388
 
475
389
  /* sd is the subchannel_data associted with the updated subchannel.
476
390
  * shutdown_error will only be used upon policy transition to TRANSIENT_FAILURE
477
391
  * or SHUTDOWN */
478
- static grpc_connectivity_state update_lb_connectivity_status(
392
+ static grpc_connectivity_state update_lb_connectivity_status_locked(
479
393
  grpc_exec_ctx *exec_ctx, subchannel_data *sd, grpc_error *error) {
480
394
  /* In priority order. The first rule to match terminates the search (ie, if we
481
395
  * are on rule n, all previous rules were unfulfilled).
@@ -497,7 +411,7 @@ static grpc_connectivity_state update_lb_connectivity_status(
497
411
  * CHECK: p->num_idle == p->num_subchannels.
498
412
  */
499
413
  round_robin_lb_policy *p = sd->policy;
500
- if (!is_ready_list_empty(p)) { /* 1) READY */
414
+ if (p->num_ready > 0) { /* 1) READY */
501
415
  grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
502
416
  GRPC_ERROR_NONE, "rr_ready");
503
417
  return GRPC_CHANNEL_READY;
@@ -531,32 +445,62 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
531
445
  grpc_error *error) {
532
446
  subchannel_data *sd = arg;
533
447
  round_robin_lb_policy *p = sd->policy;
534
- pending_pick *pp;
535
-
536
- GRPC_ERROR_REF(error);
537
-
448
+ // Now that we're inside the combiner, copy the pending connectivity
449
+ // state (which was set by the connectivity state watcher) to
450
+ // curr_connectivity_state, which is what we use inside of the combiner.
451
+ sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
452
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
453
+ gpr_log(GPR_DEBUG,
454
+ "[RR %p] connectivity changed for subchannel %p: "
455
+ "prev_state=%d new_state=%d",
456
+ p, sd->subchannel, sd->prev_connectivity_state,
457
+ sd->curr_connectivity_state);
458
+ }
459
+ // If we're shutting down, unref and return.
538
460
  if (p->shutdown) {
539
461
  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
540
- GRPC_ERROR_UNREF(error);
541
462
  return;
542
463
  }
543
- switch (sd->curr_connectivity_state) {
544
- case GRPC_CHANNEL_INIT:
545
- GPR_UNREACHABLE_CODE(return );
546
- case GRPC_CHANNEL_READY:
547
- /* add the newly connected subchannel to the list of connected ones.
548
- * Note that it goes to the "end of the line". */
549
- sd->ready_list_node = add_connected_sc_locked(p, sd);
464
+ // Update state counters and determine new overall state.
465
+ update_state_counters_locked(sd);
466
+ sd->prev_connectivity_state = sd->curr_connectivity_state;
467
+ grpc_connectivity_state new_connectivity_state =
468
+ update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error));
469
+ // If the new state is SHUTDOWN, unref the subchannel, and if the new
470
+ // overall state is SHUTDOWN, clean up.
471
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
472
+ GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
473
+ sd->subchannel = NULL;
474
+ if (sd->user_data != NULL) {
475
+ GPR_ASSERT(sd->user_data_vtable != NULL);
476
+ sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
477
+ }
478
+ if (new_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
479
+ /* the policy is shutting down. Flush all the pending picks... */
480
+ pending_pick *pp;
481
+ while ((pp = p->pending_picks)) {
482
+ p->pending_picks = pp->next;
483
+ *pp->target = NULL;
484
+ grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
485
+ gpr_free(pp);
486
+ }
487
+ }
488
+ /* unref the "rr_connectivity" weak ref from start_picking */
489
+ GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
490
+ } else {
491
+ if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
550
492
  /* at this point we know there's at least one suitable subchannel. Go
551
493
  * ahead and pick one and notify the pending suitors in
552
494
  * p->pending_picks. This preemtively replicates rr_pick()'s actions. */
553
- ready_list *selected = peek_next_connected_locked(p);
554
- GPR_ASSERT(selected != NULL);
495
+ const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
496
+ GPR_ASSERT(next_ready_index < p->num_subchannels);
497
+ subchannel_data *selected = &p->subchannels[next_ready_index];
555
498
  if (p->pending_picks != NULL) {
556
499
  /* if the selected subchannel is going to be used for the pending
557
500
  * picks, update the last picked pointer */
558
- advance_last_picked_locked(p);
501
+ update_last_ready_subchannel_index_locked(p, next_ready_index);
559
502
  }
503
+ pending_pick *pp;
560
504
  while ((pp = p->pending_picks)) {
561
505
  p->pending_picks = pp->next;
562
506
  *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(
@@ -565,74 +509,22 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
565
509
  if (pp->user_data != NULL) {
566
510
  *pp->user_data = selected->user_data;
567
511
  }
568
- if (grpc_lb_round_robin_trace) {
512
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
569
513
  gpr_log(GPR_DEBUG,
570
- "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
571
- (void *)selected->subchannel, (void *)selected);
514
+ "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (INDEX %lu)",
515
+ (void *)selected->subchannel,
516
+ (unsigned long)next_ready_index);
572
517
  }
573
518
  grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
574
519
  gpr_free(pp);
575
520
  }
576
- update_lb_connectivity_status(exec_ctx, sd, error);
577
- sd->prev_connectivity_state = sd->curr_connectivity_state;
578
- /* renew notification: reuses the "rr_connectivity" weak ref */
579
- grpc_subchannel_notify_on_state_change(
580
- exec_ctx, sd->subchannel, p->base.interested_parties,
581
- &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
582
- break;
583
- case GRPC_CHANNEL_IDLE:
584
- ++p->num_idle;
585
- /* fallthrough */
586
- case GRPC_CHANNEL_CONNECTING:
587
- update_state_counters(sd);
588
- update_lb_connectivity_status(exec_ctx, sd, error);
589
- sd->prev_connectivity_state = sd->curr_connectivity_state;
590
- /* renew notification: reuses the "rr_connectivity" weak ref */
591
- grpc_subchannel_notify_on_state_change(
592
- exec_ctx, sd->subchannel, p->base.interested_parties,
593
- &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
594
- break;
595
- case GRPC_CHANNEL_TRANSIENT_FAILURE:
596
- ++p->num_transient_failures;
597
- /* remove from ready list if still present */
598
- if (sd->ready_list_node != NULL) {
599
- remove_disconnected_sc_locked(p, sd->ready_list_node);
600
- sd->ready_list_node = NULL;
601
- }
602
- update_lb_connectivity_status(exec_ctx, sd, error);
603
- sd->prev_connectivity_state = sd->curr_connectivity_state;
604
- /* renew notification: reuses the "rr_connectivity" weak ref */
605
- grpc_subchannel_notify_on_state_change(
606
- exec_ctx, sd->subchannel, p->base.interested_parties,
607
- &sd->curr_connectivity_state, &sd->connectivity_changed_closure);
608
- break;
609
- case GRPC_CHANNEL_SHUTDOWN:
610
- update_state_counters(sd);
611
- if (sd->ready_list_node != NULL) {
612
- remove_disconnected_sc_locked(p, sd->ready_list_node);
613
- sd->ready_list_node = NULL;
614
- }
615
- --p->num_subchannels;
616
- GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
617
- p->subchannels[p->num_subchannels]);
618
- GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "rr_subchannel_shutdown");
619
- p->subchannels[sd->index]->index = sd->index;
620
- if (update_lb_connectivity_status(exec_ctx, sd, error) ==
621
- GRPC_CHANNEL_SHUTDOWN) {
622
- /* the policy is shutting down. Flush all the pending picks... */
623
- while ((pp = p->pending_picks)) {
624
- p->pending_picks = pp->next;
625
- *pp->target = NULL;
626
- grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
627
- gpr_free(pp);
628
- }
629
- }
630
- gpr_free(sd);
631
- /* unref the "rr_connectivity" weak ref from start_picking */
632
- GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "rr_connectivity");
633
- break;
521
+ }
522
+ /* renew notification: reuses the "rr_connectivity" weak ref */
523
+ grpc_subchannel_notify_on_state_change(
524
+ exec_ctx, sd->subchannel, p->base.interested_parties,
525
+ &sd->pending_connectivity_state_unsafe,
526
+ &sd->connectivity_changed_closure);
634
527
  }
635
- GRPC_ERROR_UNREF(error);
636
528
  }
637
529
 
638
530
  static grpc_connectivity_state rr_check_connectivity_locked(
@@ -653,10 +545,10 @@ static void rr_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx,
653
545
  static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
654
546
  grpc_closure *closure) {
655
547
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
656
- ready_list *selected;
657
- grpc_connected_subchannel *target;
658
- if ((selected = peek_next_connected_locked(p))) {
659
- target = GRPC_CONNECTED_SUBCHANNEL_REF(
548
+ const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
549
+ if (next_ready_index < p->num_subchannels) {
550
+ subchannel_data *selected = &p->subchannels[next_ready_index];
551
+ grpc_connected_subchannel *target = GRPC_CONNECTED_SUBCHANNEL_REF(
660
552
  grpc_subchannel_get_connected_subchannel(selected->subchannel),
661
553
  "rr_picked");
662
554
  grpc_connected_subchannel_ping(exec_ctx, target, closure);
@@ -707,7 +599,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
707
599
  p->subchannels = gpr_zalloc(sizeof(*p->subchannels) * num_addrs);
708
600
 
709
601
  grpc_subchannel_args sc_args;
710
- size_t subchannel_idx = 0;
602
+ size_t subchannel_index = 0;
711
603
  for (size_t i = 0; i < addresses->num_addresses; i++) {
712
604
  /* Skip balancer addresses, since we only know how to handle backends. */
713
605
  if (addresses->addresses[i].is_balancer) continue;
@@ -723,51 +615,53 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
723
615
  sc_args.args = new_args;
724
616
  grpc_subchannel *subchannel = grpc_client_channel_factory_create_subchannel(
725
617
  exec_ctx, args->client_channel_factory, &sc_args);
726
- if (grpc_lb_round_robin_trace) {
618
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
727
619
  char *address_uri =
728
620
  grpc_sockaddr_to_uri(&addresses->addresses[i].address);
729
- gpr_log(GPR_DEBUG, "Created subchannel %p for address uri %s",
730
- (void *)subchannel, address_uri);
621
+ gpr_log(GPR_DEBUG, "index %lu: Created subchannel %p for address uri %s",
622
+ (unsigned long)subchannel_index, (void *)subchannel, address_uri);
731
623
  gpr_free(address_uri);
732
624
  }
733
625
  grpc_channel_args_destroy(exec_ctx, new_args);
734
626
 
735
627
  if (subchannel != NULL) {
736
- subchannel_data *sd = gpr_zalloc(sizeof(*sd));
737
- p->subchannels[subchannel_idx] = sd;
628
+ subchannel_data *sd = &p->subchannels[subchannel_index];
738
629
  sd->policy = p;
739
- sd->index = subchannel_idx;
740
630
  sd->subchannel = subchannel;
631
+ /* use some sentinel value outside of the range of grpc_connectivity_state
632
+ * to signal an undefined previous state. We won't be referring to this
633
+ * value again and it'll be overwritten after the first call to
634
+ * rr_connectivity_changed */
635
+ sd->prev_connectivity_state = GRPC_CHANNEL_INIT;
636
+ sd->curr_connectivity_state = GRPC_CHANNEL_IDLE;
741
637
  sd->user_data_vtable = addresses->user_data_vtable;
742
638
  if (sd->user_data_vtable != NULL) {
743
639
  sd->user_data =
744
640
  sd->user_data_vtable->copy(addresses->addresses[i].user_data);
745
641
  }
746
- ++subchannel_idx;
747
642
  grpc_closure_init(&sd->connectivity_changed_closure,
748
643
  rr_connectivity_changed_locked, sd,
749
644
  grpc_combiner_scheduler(args->combiner, false));
645
+ ++subchannel_index;
750
646
  }
751
647
  }
752
- if (subchannel_idx == 0) {
648
+ if (subchannel_index == 0) {
753
649
  /* couldn't create any subchannel. Bail out */
754
650
  gpr_free(p->subchannels);
755
651
  gpr_free(p);
756
652
  return NULL;
757
653
  }
758
- p->num_subchannels = subchannel_idx;
654
+ p->num_subchannels = subchannel_index;
759
655
 
760
- /* The (dummy node) root of the ready list */
761
- p->ready_list.subchannel = NULL;
762
- p->ready_list.prev = NULL;
763
- p->ready_list.next = NULL;
764
- p->ready_list_last_pick = &p->ready_list;
656
+ // Initialize the last pick index to the last subchannel, so that the
657
+ // first pick will start at the beginning of the list.
658
+ p->last_ready_subchannel_index = subchannel_index - 1;
765
659
 
766
660
  grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
767
661
  grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
768
662
  "round_robin");
769
663
 
770
- if (grpc_lb_round_robin_trace) {
664
+ if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
771
665
  gpr_log(GPR_DEBUG, "Created RR policy at %p with %lu subchannels",
772
666
  (void *)p, (unsigned long)p->num_subchannels);
773
667
  }