grpc 1.49.1 → 1.50.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (270) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +54 -153
  3. data/include/grpc/event_engine/endpoint_config.h +11 -5
  4. data/include/grpc/event_engine/event_engine.h +1 -1
  5. data/include/grpc/impl/codegen/atm_gcc_atomic.h +19 -28
  6. data/include/grpc/impl/codegen/atm_gcc_sync.h +0 -2
  7. data/include/grpc/impl/codegen/atm_windows.h +0 -2
  8. data/include/grpc/impl/codegen/grpc_types.h +6 -0
  9. data/src/core/ext/filters/channel_idle/channel_idle_filter.cc +3 -3
  10. data/src/core/ext/filters/client_channel/backup_poller.cc +4 -6
  11. data/src/core/ext/filters/client_channel/client_channel.cc +33 -22
  12. data/src/core/ext/filters/client_channel/client_channel.h +1 -1
  13. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +0 -16
  14. data/src/core/ext/filters/client_channel/http_proxy.cc +12 -19
  15. data/src/core/ext/filters/client_channel/http_proxy.h +3 -2
  16. data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc +6 -4
  17. data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h +5 -4
  18. data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +0 -2
  19. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +112 -96
  20. data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc +20 -11
  21. data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc +106 -108
  22. data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.h +16 -0
  23. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +20 -13
  24. data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +165 -257
  25. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +218 -231
  26. data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.h +10 -6
  27. data/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc +389 -444
  28. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +16 -16
  29. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +8 -13
  30. data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +84 -96
  31. data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +38 -37
  32. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +106 -186
  33. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +106 -93
  34. data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +170 -218
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +2 -2
  36. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +1 -1
  37. data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +13 -15
  38. data/src/core/ext/filters/client_channel/resolver/polling_resolver.cc +84 -37
  39. data/src/core/ext/filters/client_channel/resolver/polling_resolver.h +11 -0
  40. data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +1 -0
  41. data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +5 -3
  42. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +5 -4
  43. data/src/core/ext/filters/client_channel/retry_filter.cc +25 -29
  44. data/src/core/ext/filters/client_channel/subchannel.cc +38 -33
  45. data/src/core/ext/filters/client_channel/subchannel.h +12 -3
  46. data/src/core/ext/filters/client_channel/subchannel_stream_client.cc +1 -2
  47. data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +23 -16
  48. data/src/core/ext/filters/fault_injection/fault_injection_filter.h +8 -0
  49. data/src/core/ext/filters/http/client/http_client_filter.cc +1 -2
  50. data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +2 -4
  51. data/src/core/ext/filters/http/message_compress/message_decompress_filter.cc +0 -2
  52. data/src/core/ext/filters/http/server/http_server_filter.cc +1 -2
  53. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +12 -8
  54. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +32 -26
  55. data/src/core/ext/transport/chttp2/transport/bin_encoder.cc +1 -1
  56. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +25 -130
  57. data/src/core/ext/transport/chttp2/transport/decode_huff.cc +287 -0
  58. data/src/core/ext/transport/chttp2/transport/decode_huff.h +1018 -0
  59. data/src/core/ext/transport/chttp2/transport/flow_control.cc +83 -51
  60. data/src/core/ext/transport/chttp2/transport/flow_control.h +11 -6
  61. data/src/core/ext/transport/chttp2/transport/frame_ping.cc +1 -2
  62. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +2 -20
  63. data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +28 -28
  64. data/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc +1 -10
  65. data/src/core/ext/transport/chttp2/transport/hpack_parser_table.h +11 -6
  66. data/src/core/ext/transport/chttp2/transport/internal.h +2 -0
  67. data/src/core/ext/transport/chttp2/transport/parsing.cc +44 -0
  68. data/src/core/ext/transport/chttp2/transport/writing.cc +3 -14
  69. data/src/core/ext/transport/inproc/inproc_transport.cc +1 -3
  70. data/src/core/ext/xds/certificate_provider_store.cc +63 -3
  71. data/src/core/ext/xds/certificate_provider_store.h +9 -1
  72. data/src/core/ext/xds/file_watcher_certificate_provider_factory.cc +5 -5
  73. data/src/core/ext/xds/file_watcher_certificate_provider_factory.h +1 -1
  74. data/src/core/ext/xds/xds_api.cc +21 -17
  75. data/src/core/ext/xds/xds_api.h +7 -0
  76. data/src/core/ext/xds/xds_bootstrap.cc +5 -537
  77. data/src/core/ext/xds/xds_bootstrap.h +39 -111
  78. data/src/core/ext/xds/xds_bootstrap_grpc.cc +370 -0
  79. data/src/core/ext/xds/xds_bootstrap_grpc.h +169 -0
  80. data/src/core/ext/xds/xds_client.cc +219 -145
  81. data/src/core/ext/xds/xds_client.h +19 -17
  82. data/src/core/ext/xds/xds_client_grpc.cc +18 -80
  83. data/src/core/ext/xds/xds_client_grpc.h +2 -25
  84. data/src/core/ext/xds/xds_client_stats.cc +4 -4
  85. data/src/core/ext/xds/xds_cluster.cc +87 -79
  86. data/src/core/ext/xds/xds_cluster.h +5 -5
  87. data/src/core/ext/xds/xds_cluster_specifier_plugin.cc +3 -1
  88. data/src/core/ext/xds/xds_common_types.cc +13 -5
  89. data/src/core/ext/xds/xds_endpoint.cc +8 -6
  90. data/src/core/ext/xds/xds_endpoint.h +3 -4
  91. data/src/core/ext/xds/xds_lb_policy_registry.cc +4 -2
  92. data/src/core/ext/xds/xds_listener.cc +25 -20
  93. data/src/core/ext/xds/xds_listener.h +3 -4
  94. data/src/core/ext/xds/xds_resource_type.h +11 -8
  95. data/src/core/ext/xds/xds_route_config.cc +15 -16
  96. data/src/core/ext/xds/xds_route_config.h +3 -3
  97. data/src/core/ext/xds/xds_server_config_fetcher.cc +7 -5
  98. data/src/core/ext/xds/xds_transport_grpc.cc +15 -7
  99. data/src/core/lib/backoff/backoff.cc +2 -4
  100. data/src/core/lib/channel/call_finalization.h +1 -3
  101. data/src/core/lib/channel/channel_args.h +114 -14
  102. data/src/core/lib/channel/channel_trace.cc +3 -4
  103. data/src/core/lib/channel/promise_based_filter.cc +18 -19
  104. data/src/core/lib/channel/status_util.cc +27 -0
  105. data/src/core/lib/channel/status_util.h +10 -0
  106. data/src/core/lib/config/core_configuration.cc +5 -1
  107. data/src/core/lib/config/core_configuration.h +33 -0
  108. data/src/core/lib/debug/stats.cc +26 -30
  109. data/src/core/lib/debug/stats.h +2 -12
  110. data/src/core/lib/debug/stats_data.cc +118 -614
  111. data/src/core/lib/debug/stats_data.h +67 -465
  112. data/src/core/lib/debug/trace.cc +0 -2
  113. data/src/core/lib/event_engine/channel_args_endpoint_config.cc +12 -20
  114. data/src/core/lib/event_engine/channel_args_endpoint_config.h +13 -7
  115. data/src/core/lib/event_engine/forkable.cc +1 -1
  116. data/src/core/lib/event_engine/poller.h +14 -12
  117. data/src/core/lib/event_engine/posix_engine/timer_manager.cc +53 -32
  118. data/src/core/lib/event_engine/posix_engine/timer_manager.h +23 -1
  119. data/src/core/lib/event_engine/thread_pool.cc +131 -94
  120. data/src/core/lib/event_engine/thread_pool.h +56 -23
  121. data/src/core/lib/event_engine/time_util.cc +30 -0
  122. data/src/core/lib/event_engine/time_util.h +32 -0
  123. data/src/core/lib/event_engine/utils.cc +0 -5
  124. data/src/core/lib/event_engine/utils.h +0 -4
  125. data/src/core/lib/event_engine/windows/iocp.cc +13 -7
  126. data/src/core/lib/event_engine/windows/iocp.h +2 -1
  127. data/src/core/lib/event_engine/windows/win_socket.cc +1 -1
  128. data/src/core/lib/experiments/config.cc +146 -0
  129. data/src/core/lib/experiments/config.h +43 -0
  130. data/src/core/lib/experiments/experiments.cc +75 -0
  131. data/src/core/lib/experiments/experiments.h +56 -0
  132. data/src/core/lib/gpr/alloc.cc +1 -9
  133. data/src/core/lib/gpr/log_windows.cc +0 -1
  134. data/src/core/lib/gpr/string_util_windows.cc +3 -30
  135. data/src/core/lib/gpr/sync_abseil.cc +0 -14
  136. data/src/core/lib/gpr/sync_posix.cc +0 -14
  137. data/src/core/lib/gpr/time_posix.cc +0 -6
  138. data/src/core/lib/gpr/time_precise.h +1 -1
  139. data/src/core/lib/gpr/tmpfile_windows.cc +5 -7
  140. data/src/core/lib/gpr/useful.h +11 -0
  141. data/src/core/lib/{gpr → gprpp}/env.h +25 -12
  142. data/src/core/lib/{gpr → gprpp}/env_linux.cc +20 -15
  143. data/src/core/lib/{gpr → gprpp}/env_posix.cc +11 -10
  144. data/src/core/lib/gprpp/env_windows.cc +56 -0
  145. data/src/core/lib/gprpp/fork.cc +14 -22
  146. data/src/core/lib/gprpp/fork.h +0 -8
  147. data/src/core/lib/gprpp/global_config_env.cc +7 -6
  148. data/src/core/lib/gprpp/notification.h +67 -0
  149. data/src/core/lib/gprpp/packed_table.h +40 -0
  150. data/src/core/lib/gprpp/ref_counted_ptr.h +20 -33
  151. data/src/core/lib/gprpp/sorted_pack.h +98 -0
  152. data/src/core/lib/gprpp/status_helper.h +6 -0
  153. data/src/core/lib/gprpp/table.h +9 -1
  154. data/src/core/lib/gprpp/tchar.cc +49 -0
  155. data/src/core/lib/gprpp/tchar.h +33 -0
  156. data/src/core/lib/gprpp/time.cc +21 -0
  157. data/src/core/lib/gprpp/time.h +55 -0
  158. data/src/core/lib/gprpp/validation_errors.cc +61 -0
  159. data/src/core/lib/gprpp/validation_errors.h +110 -0
  160. data/src/core/{ext/filters/client_channel → lib/handshaker}/proxy_mapper.h +3 -3
  161. data/src/core/{ext/filters/client_channel → lib/handshaker}/proxy_mapper_registry.cc +14 -36
  162. data/src/core/lib/handshaker/proxy_mapper_registry.h +75 -0
  163. data/src/core/lib/iomgr/call_combiner.cc +0 -8
  164. data/src/core/lib/iomgr/closure.h +0 -1
  165. data/src/core/lib/iomgr/endpoint_pair_posix.cc +14 -10
  166. data/src/core/lib/iomgr/endpoint_pair_windows.cc +2 -2
  167. data/src/core/lib/iomgr/ev_epoll1_linux.cc +1 -38
  168. data/src/core/lib/iomgr/ev_poll_posix.cc +2 -17
  169. data/src/core/lib/iomgr/exec_ctx.cc +0 -10
  170. data/src/core/lib/iomgr/exec_ctx.h +7 -31
  171. data/src/core/lib/iomgr/iocp_windows.cc +1 -2
  172. data/src/core/lib/iomgr/iomgr.cc +6 -8
  173. data/src/core/lib/iomgr/iomgr_fwd.h +1 -0
  174. data/src/core/lib/iomgr/pollset.h +1 -1
  175. data/src/core/lib/iomgr/pollset_set.h +0 -1
  176. data/src/core/lib/iomgr/resolve_address.h +1 -0
  177. data/src/core/lib/iomgr/resolve_address_impl.h +1 -0
  178. data/src/core/lib/iomgr/resolve_address_posix.cc +1 -0
  179. data/src/core/lib/iomgr/resolve_address_windows.cc +1 -0
  180. data/src/core/lib/iomgr/sockaddr_utils_posix.cc +2 -1
  181. data/src/core/lib/iomgr/socket_utils_common_posix.cc +12 -34
  182. data/src/core/lib/iomgr/socket_utils_posix.cc +83 -1
  183. data/src/core/lib/iomgr/socket_utils_posix.h +98 -6
  184. data/src/core/lib/iomgr/tcp_client.cc +6 -7
  185. data/src/core/lib/iomgr/tcp_client.h +11 -11
  186. data/src/core/lib/iomgr/tcp_client_cfstream.cc +6 -6
  187. data/src/core/lib/iomgr/tcp_client_posix.cc +33 -29
  188. data/src/core/lib/iomgr/tcp_client_posix.h +12 -9
  189. data/src/core/lib/iomgr/tcp_client_windows.cc +6 -6
  190. data/src/core/lib/iomgr/tcp_posix.cc +131 -114
  191. data/src/core/lib/iomgr/tcp_posix.h +3 -1
  192. data/src/core/lib/iomgr/tcp_server.cc +5 -4
  193. data/src/core/lib/iomgr/tcp_server.h +9 -6
  194. data/src/core/lib/iomgr/tcp_server_posix.cc +17 -28
  195. data/src/core/lib/iomgr/tcp_server_utils_posix.h +2 -2
  196. data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +3 -3
  197. data/src/core/lib/iomgr/tcp_server_windows.cc +6 -7
  198. data/src/core/lib/iomgr/tcp_windows.cc +0 -1
  199. data/src/core/lib/iomgr/tcp_windows.h +0 -1
  200. data/src/core/lib/iomgr/timer_generic.cc +4 -4
  201. data/src/core/lib/iomgr/timer_manager.cc +1 -2
  202. data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +0 -2
  203. data/src/core/lib/json/json_object_loader.cc +21 -52
  204. data/src/core/lib/json/json_object_loader.h +56 -76
  205. data/src/core/lib/json/json_util.cc +2 -1
  206. data/src/core/lib/load_balancing/lb_policy.h +5 -5
  207. data/src/core/lib/load_balancing/lb_policy_registry.cc +29 -55
  208. data/src/core/lib/load_balancing/lb_policy_registry.h +23 -11
  209. data/src/core/lib/promise/activity.h +2 -3
  210. data/src/core/lib/promise/context.h +1 -1
  211. data/src/core/lib/promise/sleep.cc +16 -4
  212. data/src/core/lib/promise/sleep.h +8 -2
  213. data/src/core/lib/resolver/resolver.h +13 -3
  214. data/src/core/lib/resource_quota/api.cc +9 -0
  215. data/src/core/lib/resource_quota/api.h +6 -0
  216. data/src/core/lib/resource_quota/arena.cc +1 -3
  217. data/src/core/lib/resource_quota/memory_quota.cc +8 -24
  218. data/src/core/lib/resource_quota/memory_quota.h +6 -19
  219. data/src/core/lib/resource_quota/periodic_update.cc +2 -3
  220. data/src/core/{ext/xds → lib/security/certificate_provider}/certificate_provider_factory.h +3 -3
  221. data/src/core/lib/security/certificate_provider/certificate_provider_registry.cc +60 -0
  222. data/src/core/lib/security/certificate_provider/certificate_provider_registry.h +70 -0
  223. data/src/core/lib/security/credentials/channel_creds_registry_init.cc +1 -0
  224. data/src/core/lib/security/credentials/external/aws_external_account_credentials.cc +15 -16
  225. data/src/core/lib/security/credentials/external/external_account_credentials.cc +2 -1
  226. data/src/core/lib/security/credentials/google_default/credentials_generic.cc +5 -8
  227. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +6 -6
  228. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +3 -2
  229. data/src/core/lib/security/credentials/jwt/jwt_verifier.h +1 -1
  230. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +1 -2
  231. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +4 -3
  232. data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.h +4 -2
  233. data/src/core/lib/security/credentials/tls/tls_utils.cc +3 -1
  234. data/src/core/lib/security/transport/client_auth_filter.cc +12 -1
  235. data/src/core/lib/security/transport/secure_endpoint.cc +0 -4
  236. data/src/core/lib/surface/call.cc +1 -11
  237. data/src/core/lib/surface/channel.cc +3 -2
  238. data/src/core/lib/surface/completion_queue.cc +16 -28
  239. data/src/core/lib/surface/completion_queue.h +1 -1
  240. data/src/core/lib/surface/completion_queue_factory.cc +5 -0
  241. data/src/core/lib/surface/init.cc +16 -11
  242. data/src/core/lib/surface/init_internally.cc +24 -0
  243. data/src/core/lib/surface/init_internally.h +28 -0
  244. data/src/core/lib/surface/server.cc +1 -7
  245. data/src/core/lib/surface/server.h +4 -6
  246. data/src/core/lib/surface/version.cc +2 -2
  247. data/src/core/lib/transport/bdp_estimator.cc +1 -3
  248. data/src/core/lib/transport/metadata_batch.cc +2 -3
  249. data/src/core/lib/transport/metadata_batch.h +9 -7
  250. data/src/core/lib/transport/parsed_metadata.h +4 -2
  251. data/src/core/lib/transport/status_conversion.cc +1 -3
  252. data/src/core/lib/transport/tcp_connect_handshaker.cc +9 -5
  253. data/src/core/lib/transport/transport.h +0 -1
  254. data/src/core/lib/transport/transport_impl.h +0 -1
  255. data/src/core/plugin_registry/grpc_plugin_registry.cc +23 -46
  256. data/src/core/plugin_registry/grpc_plugin_registry_extra.cc +13 -25
  257. data/src/ruby/lib/grpc/version.rb +1 -1
  258. data/src/ruby/spec/channel_spec.rb +5 -0
  259. data/src/ruby/spec/generic/server_interceptors_spec.rb +1 -1
  260. data/src/ruby/spec/user_agent_spec.rb +1 -1
  261. metadata +33 -19
  262. data/src/core/ext/filters/client_channel/proxy_mapper_registry.h +0 -56
  263. data/src/core/ext/xds/certificate_provider_registry.cc +0 -103
  264. data/src/core/ext/xds/certificate_provider_registry.h +0 -59
  265. data/src/core/lib/event_engine/promise.h +0 -78
  266. data/src/core/lib/gpr/env_windows.cc +0 -74
  267. data/src/core/lib/gpr/string_windows.h +0 -32
  268. data/src/core/lib/profiling/basic_timers.cc +0 -295
  269. data/src/core/lib/profiling/stap_timers.cc +0 -50
  270. data/src/core/lib/profiling/timers.h +0 -94
@@ -17,24 +17,30 @@
17
17
  #include <grpc/support/port_platform.h>
18
18
 
19
19
  #include "absl/strings/string_view.h"
20
+ #include "absl/types/optional.h"
20
21
 
21
22
  #include <grpc/event_engine/endpoint_config.h>
22
- #include <grpc/impl/codegen/grpc_types.h>
23
+
24
+ #include "src/core/lib/channel/channel_args.h"
23
25
 
24
26
  namespace grpc_event_engine {
25
27
  namespace experimental {
26
28
 
27
- /// A readonly \a EndpointConfig based on grpc_channel_args. This class does not
28
- /// take ownership of the grpc_endpoint_args*, and instances of this class
29
- /// should not be used after the underlying args are destroyed.
30
29
  class ChannelArgsEndpointConfig : public EndpointConfig {
31
30
  public:
32
- explicit ChannelArgsEndpointConfig(const grpc_channel_args* args)
31
+ ChannelArgsEndpointConfig() = default;
32
+ explicit ChannelArgsEndpointConfig(const grpc_core::ChannelArgs& args)
33
33
  : args_(args) {}
34
- Setting Get(absl::string_view key) const override;
34
+ ChannelArgsEndpointConfig(const ChannelArgsEndpointConfig& config) = default;
35
+ ChannelArgsEndpointConfig& operator=(const ChannelArgsEndpointConfig& other) =
36
+ default;
37
+ absl::optional<int> GetInt(absl::string_view key) const override;
38
+ absl::optional<absl::string_view> GetString(
39
+ absl::string_view key) const override;
40
+ void* GetVoidPointer(absl::string_view key) const override;
35
41
 
36
42
  private:
37
- const grpc_channel_args* args_;
43
+ grpc_core::ChannelArgs args_;
38
44
  };
39
45
 
40
46
  } // namespace experimental
@@ -41,7 +41,7 @@ Forkable::~Forkable() { StopManagingForkable(this); }
41
41
 
42
42
  void RegisterForkHandlers() {
43
43
  grpc_core::MutexLock lock(g_mu.get());
44
- if (!absl::exchange(g_registered, true)) {
44
+ if (!std::exchange(g_registered, true)) {
45
45
  pthread_atfork(PrepareFork, PostforkParent, PostforkChild);
46
46
  }
47
47
  };
@@ -16,8 +16,7 @@
16
16
 
17
17
  #include <grpc/support/port_platform.h>
18
18
 
19
- #include "absl/container/inlined_vector.h"
20
- #include "absl/types/variant.h"
19
+ #include "absl/functional/function_ref.h"
21
20
 
22
21
  #include <grpc/event_engine/event_engine.h>
23
22
 
@@ -30,20 +29,23 @@ namespace experimental {
30
29
  // Work(...).
31
30
  class Poller {
32
31
  public:
33
- // This initial vector size may need to be tuned
34
- using Events = absl::InlinedVector<EventEngine::Closure*, 5>;
35
- struct DeadlineExceeded {};
36
- struct Kicked {};
37
- using WorkResult = absl::variant<Events, DeadlineExceeded, Kicked>;
32
+ enum class WorkResult { kOk, kDeadlineExceeded, kKicked };
38
33
 
39
34
  virtual ~Poller() = default;
40
- // Poll once for events, returning a collection of Closures to be executed.
35
+ // Poll once for events and process received events. The callback function
36
+ // "schedule_poll_again" is expected to be run synchronously prior to
37
+ // processing received events. The callback's responsibility primarily is to
38
+ // schedule Poller::Work asynchronously again. This would ensure that the next
39
+ // polling cycle would run as quickly as possible to ensure continuous
40
+ // polling.
41
41
  //
42
42
  // Returns:
43
- // * absl::AbortedError if it was Kicked.
44
- // * absl::DeadlineExceeded if timeout occurred
45
- // * A collection of closures to execute, otherwise
46
- virtual WorkResult Work(EventEngine::Duration timeout) = 0;
43
+ // * Poller::WorkResult::kKicked if it was Kicked.
44
+ // * Poller::WorkResult::kDeadlineExceeded if timeout occurred
45
+ // * Poller::WorkResult::kOk, otherwise indicating that the callback function
46
+ // was run synchonously before some events were processed.
47
+ virtual WorkResult Work(EventEngine::Duration timeout,
48
+ absl::FunctionRef<void()> schedule_poll_again) = 0;
47
49
  // Trigger the threads executing Work(..) to break out as soon as possible.
48
50
  virtual void Kick() = 0;
49
51
  };
@@ -32,11 +32,17 @@
32
32
  #include <grpc/support/log.h>
33
33
  #include <grpc/support/time.h>
34
34
 
35
+ #include "src/core/lib/debug/trace.h"
36
+ #include "src/core/lib/gpr/tls.h"
35
37
  #include "src/core/lib/gprpp/thd.h"
36
38
 
39
+ static GPR_THREAD_LOCAL(bool) g_timer_thread;
40
+
37
41
  namespace grpc_event_engine {
38
42
  namespace posix_engine {
39
43
 
44
+ grpc_core::DebugOnlyTraceFlag grpc_event_engine_timer_trace(false, "timer");
45
+
40
46
  namespace {
41
47
  class ThreadCollector {
42
48
  public:
@@ -88,7 +94,7 @@ void TimerManager::RunSomeTimers(
88
94
  // if there's no thread waiting with a timeout, kick an existing untimed
89
95
  // waiter so that the next deadline is not missed
90
96
  if (!has_timed_waiter_) {
91
- cv_.Signal();
97
+ cv_wait_.Signal();
92
98
  }
93
99
  }
94
100
  }
@@ -151,8 +157,8 @@ bool TimerManager::WaitUntil(grpc_core::Timestamp next) {
151
157
  }
152
158
  }
153
159
 
154
- cv_.WaitWithTimeout(&mu_,
155
- absl::Milliseconds((next - host_.Now()).millis()));
160
+ cv_wait_.WaitWithTimeout(&mu_,
161
+ absl::Milliseconds((next - host_.Now()).millis()));
156
162
 
157
163
  // if this was the timed waiter, then we need to check timers, and flag
158
164
  // that there's now no timed waiter... we'll look for a replacement if
@@ -196,16 +202,29 @@ void TimerManager::MainLoop() {
196
202
  }
197
203
 
198
204
  void TimerManager::RunThread(void* arg) {
205
+ g_timer_thread = true;
199
206
  std::unique_ptr<RunThreadArgs> thread(static_cast<RunThreadArgs*>(arg));
200
- thread->self->MainLoop();
201
- {
202
- grpc_core::MutexLock lock(&thread->self->mu_);
203
- thread->self->thread_count_--;
204
- thread->self->completed_threads_.push_back(std::move(thread->thread));
207
+ if (grpc_event_engine_timer_trace.enabled()) {
208
+ gpr_log(GPR_DEBUG, "TimerManager::%p starting thread::%p", thread->self,
209
+ &thread->thread);
205
210
  }
206
- thread->self->cv_.Signal();
211
+ thread->self->Run(std::move(thread->thread));
212
+ if (grpc_event_engine_timer_trace.enabled()) {
213
+ gpr_log(GPR_DEBUG, "TimerManager::%p thread::%p finished", thread->self,
214
+ &thread->thread);
215
+ }
216
+ }
217
+
218
+ void TimerManager::Run(grpc_core::Thread thread) {
219
+ MainLoop();
220
+ grpc_core::MutexLock lock(&mu_);
221
+ completed_threads_.push_back(std::move(thread));
222
+ thread_count_--;
223
+ if (thread_count_ == 0) cv_threadcount_.Signal();
207
224
  }
208
225
 
226
+ bool TimerManager::IsTimerManagerThread() { return g_timer_thread; }
227
+
209
228
  TimerManager::TimerManager() : host_(this) {
210
229
  timer_list_ = absl::make_unique<TimerList>(&host_);
211
230
  grpc_core::MutexLock lock(&mu_);
@@ -227,17 +246,23 @@ bool TimerManager::TimerCancel(Timer* timer) {
227
246
  }
228
247
 
229
248
  TimerManager::~TimerManager() {
230
- {
231
- grpc_core::MutexLock lock(&mu_);
232
- shutdown_ = true;
233
- cv_.SignalAll();
249
+ if (grpc_event_engine_timer_trace.enabled()) {
250
+ gpr_log(GPR_DEBUG, "TimerManager::%p shutting down", this);
234
251
  }
235
- while (true) {
236
- ThreadCollector collector;
237
- grpc_core::MutexLock lock(&mu_);
238
- collector.Collect(std::move(completed_threads_));
239
- if (thread_count_ == 0) break;
240
- cv_.Wait(&mu_);
252
+ ThreadCollector collector;
253
+ grpc_core::MutexLock lock(&mu_);
254
+ shutdown_ = true;
255
+ cv_wait_.SignalAll();
256
+ while (thread_count_ > 0) {
257
+ cv_threadcount_.Wait(&mu_);
258
+ if (grpc_event_engine_timer_trace.enabled()) {
259
+ gpr_log(GPR_DEBUG, "TimerManager::%p waiting for %zu threads to finish",
260
+ this, thread_count_);
261
+ }
262
+ }
263
+ collector.Collect(std::move(completed_threads_));
264
+ if (grpc_event_engine_timer_trace.enabled()) {
265
+ gpr_log(GPR_DEBUG, "TimerManager::%p shutdown complete", this);
241
266
  }
242
267
  }
243
268
 
@@ -249,23 +274,19 @@ void TimerManager::Kick() {
249
274
  timed_waiter_deadline_ = grpc_core::Timestamp::InfFuture();
250
275
  ++timed_waiter_generation_;
251
276
  kicked_ = true;
252
- cv_.Signal();
277
+ cv_wait_.Signal();
253
278
  }
254
279
 
255
280
  void TimerManager::PrepareFork() {
256
- {
257
- grpc_core::MutexLock lock(&mu_);
258
- forking_ = true;
259
- prefork_thread_count_ = thread_count_;
260
- cv_.SignalAll();
261
- }
262
- while (true) {
263
- grpc_core::MutexLock lock(&mu_);
264
- ThreadCollector collector;
265
- collector.Collect(std::move(completed_threads_));
266
- if (thread_count_ == 0) break;
267
- cv_.Wait(&mu_);
281
+ ThreadCollector collector;
282
+ grpc_core::MutexLock lock(&mu_);
283
+ forking_ = true;
284
+ prefork_thread_count_ = thread_count_;
285
+ cv_wait_.SignalAll();
286
+ while (thread_count_ > 0) {
287
+ cv_threadcount_.Wait(&mu_);
268
288
  }
289
+ collector.Collect(std::move(completed_threads_));
269
290
  }
270
291
 
271
292
  void TimerManager::PostforkParent() {
@@ -60,6 +60,8 @@ class TimerManager final : public grpc_event_engine::experimental::Forkable {
60
60
  void PostforkParent() override;
61
61
  void PostforkChild() override;
62
62
 
63
+ static bool IsTimerManagerThread();
64
+
63
65
  private:
64
66
  struct RunThreadArgs {
65
67
  TimerManager* self;
@@ -80,13 +82,33 @@ class TimerManager final : public grpc_event_engine::experimental::Forkable {
80
82
 
81
83
  void StartThread() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
82
84
  static void RunThread(void* arg);
85
+ void Run(grpc_core::Thread thread);
83
86
  void MainLoop();
84
87
  void RunSomeTimers(std::vector<experimental::EventEngine::Closure*> timers);
85
88
  bool WaitUntil(grpc_core::Timestamp next);
86
89
  void Kick();
87
90
 
88
91
  grpc_core::Mutex mu_;
89
- grpc_core::CondVar cv_;
92
+ // Condvar associated with decrementing the thread count.
93
+ // Threads will signal this when thread count reaches zero, and the forking
94
+ // code *or* the destructor will wait upon it.
95
+ grpc_core::CondVar cv_threadcount_;
96
+ // Condvar associated with threads waiting to wakeup and work.
97
+ // Threads wait on this until either a timeout is reached or another thread is
98
+ // needed to wait for a timeout.
99
+ // On shutdown we SignalAll against this to wake up all threads and have them
100
+ // finish.
101
+ // On kick we Signal against this to wake up at least one thread (but not
102
+ // all)! Similarly when we note that no thread is watching timers.
103
+ //
104
+ // This is a different condvar than cv_threadcount_!
105
+ // If this were the same:
106
+ // - thread exits would require a SignalAll to ensure that the specific thread
107
+ // we want to wake is woken up.
108
+ // - kicks would need to signal all threads to avoid having the kick absorbed
109
+ // by a shutdown thread and cause a deadlock, leading to thundering herd
110
+ // problems in the common case.
111
+ grpc_core::CondVar cv_wait_;
90
112
  Host host_;
91
113
  // number of threads in the system
92
114
  size_t thread_count_ ABSL_GUARDED_BY(mu_) = 0;
@@ -20,138 +20,175 @@
20
20
 
21
21
  #include "src/core/lib/event_engine/thread_pool.h"
22
22
 
23
+ #include <memory>
23
24
  #include <utility>
24
25
 
26
+ #include "absl/time/clock.h"
27
+ #include "absl/time/time.h"
28
+
29
+ #include <grpc/support/log.h>
30
+
31
+ #include "src/core/lib/gpr/tls.h"
25
32
  #include "src/core/lib/gprpp/thd.h"
26
33
 
27
34
  namespace grpc_event_engine {
28
35
  namespace experimental {
29
36
 
30
- ThreadPool::Thread::Thread(ThreadPool* pool)
31
- : pool_(pool),
32
- thd_(
33
- "posix_eventengine_pool",
34
- [](void* th) { static_cast<ThreadPool::Thread*>(th)->ThreadFunc(); },
35
- this, nullptr, grpc_core::Thread::Options().set_tracked(false)) {
36
- thd_.Start();
37
+ namespace {
38
+ // TODO(drfloob): Remove this, and replace it with the WorkQueue* for the
39
+ // current thread (with nullptr indicating not a threadpool thread).
40
+ GPR_THREAD_LOCAL(bool) g_threadpool_thread;
41
+ } // namespace
42
+
43
+ void ThreadPool::StartThread(StatePtr state, bool throttled) {
44
+ state->thread_count.Add();
45
+ struct ThreadArg {
46
+ StatePtr state;
47
+ bool throttled;
48
+ };
49
+ grpc_core::Thread(
50
+ "event_engine",
51
+ [](void* arg) {
52
+ std::unique_ptr<ThreadArg> a(static_cast<ThreadArg*>(arg));
53
+ g_threadpool_thread = true;
54
+ if (a->throttled) {
55
+ GPR_ASSERT(a->state->currently_starting_one_thread.exchange(
56
+ false, std::memory_order_relaxed));
57
+ }
58
+ ThreadFunc(a->state);
59
+ },
60
+ new ThreadArg{state, throttled}, nullptr,
61
+ grpc_core::Thread::Options().set_tracked(false).set_joinable(false))
62
+ .Start();
37
63
  }
38
- ThreadPool::Thread::~Thread() { thd_.Join(); }
39
-
40
- void ThreadPool::Thread::ThreadFunc() {
41
- pool_->ThreadFunc();
42
- // Now that we have killed ourselves, we should reduce the thread count
43
- grpc_core::MutexLock lock(&pool_->mu_);
44
- pool_->nthreads_--;
45
- // Move ourselves to dead list
46
- pool_->dead_threads_.push_back(this);
47
-
48
- if (pool_->nthreads_ == 0) {
49
- if (pool_->forking_) pool_->fork_cv_.Signal();
50
- if (pool_->shutdown_) pool_->shutdown_cv_.Signal();
64
+
65
+ void ThreadPool::ThreadFunc(StatePtr state) {
66
+ while (state->queue.Step()) {
51
67
  }
68
+ state->thread_count.Remove();
52
69
  }
53
70
 
54
- void ThreadPool::ThreadFunc() {
55
- for (;;) {
56
- // Wait until work is available or we are shutting down.
57
- grpc_core::ReleasableMutexLock lock(&mu_);
58
- if (!forking_ && !shutdown_ && callbacks_.empty()) {
59
- // If there are too many threads waiting, then quit this thread
60
- if (threads_waiting_ >= reserve_threads_) {
61
- break;
62
- }
63
- threads_waiting_++;
64
- cv_.Wait(&mu_);
65
- threads_waiting_--;
66
- }
67
- // a fork could be initiated while the thread was waiting
68
- if (forking_) return;
69
- // Drain callbacks before considering shutdown to ensure all work
70
- // gets completed.
71
- if (!callbacks_.empty()) {
72
- auto cb = std::move(callbacks_.front());
73
- callbacks_.pop();
74
- lock.Release();
75
- cb();
76
- } else if (shutdown_) {
71
+ bool ThreadPool::Queue::Step() {
72
+ grpc_core::ReleasableMutexLock lock(&mu_);
73
+ // Wait until work is available or we are shutting down.
74
+ while (state_ == State::kRunning && callbacks_.empty()) {
75
+ // If there are too many threads waiting, then quit this thread.
76
+ // TODO(ctiller): wait some time in this case to be sure.
77
+ if (threads_waiting_ >= reserve_threads_) return false;
78
+ threads_waiting_++;
79
+ cv_.Wait(&mu_);
80
+ threads_waiting_--;
81
+ }
82
+ switch (state_) {
83
+ case State::kRunning:
77
84
  break;
78
- }
85
+ case State::kShutdown:
86
+ case State::kForking:
87
+ if (!callbacks_.empty()) break;
88
+ return false;
79
89
  }
90
+ GPR_ASSERT(!callbacks_.empty());
91
+ auto callback = std::move(callbacks_.front());
92
+ callbacks_.pop();
93
+ lock.Release();
94
+ callback();
95
+ return true;
80
96
  }
81
97
 
82
98
  ThreadPool::ThreadPool(int reserve_threads)
83
- : shutdown_(false),
84
- reserve_threads_(reserve_threads),
85
- nthreads_(0),
86
- threads_waiting_(0),
87
- forking_(false) {
88
- grpc_core::MutexLock lock(&mu_);
89
- StartNThreadsLocked(reserve_threads_);
90
- }
91
-
92
- void ThreadPool::StartNThreadsLocked(int n) {
93
- for (int i = 0; i < n; i++) {
94
- nthreads_++;
95
- new Thread(this);
99
+ : reserve_threads_(reserve_threads) {
100
+ for (int i = 0; i < reserve_threads; i++) {
101
+ StartThread(state_, /*throttled=*/false);
96
102
  }
97
103
  }
98
104
 
99
- void ThreadPool::ReapThreads(std::vector<Thread*>* tlist) {
100
- for (auto* t : *tlist) delete t;
101
- tlist->clear();
105
+ ThreadPool::~ThreadPool() {
106
+ state_->queue.SetShutdown();
107
+ // Wait until all threads are exited.
108
+ // Note that if this is a threadpool thread then we won't exit this thread
109
+ // until the callstack unwinds a little, so we need to wait for just one
110
+ // thread running instead of zero.
111
+ state_->thread_count.BlockUntilThreadCount(g_threadpool_thread ? 1 : 0,
112
+ "shutting down");
102
113
  }
103
114
 
104
- ThreadPool::~ThreadPool() {
105
- grpc_core::MutexLock lock(&mu_);
106
- shutdown_ = true;
107
- cv_.SignalAll();
108
- while (nthreads_ != 0) {
109
- shutdown_cv_.Wait(&mu_);
115
+ void ThreadPool::Add(absl::AnyInvocable<void()> callback) {
116
+ if (state_->queue.Add(std::move(callback))) {
117
+ if (!state_->currently_starting_one_thread.exchange(
118
+ true, std::memory_order_relaxed)) {
119
+ StartThread(state_, /*throttled=*/true);
120
+ }
110
121
  }
111
- ReapThreads(&dead_threads_);
112
122
  }
113
123
 
114
- void ThreadPool::Add(absl::AnyInvocable<void()> callback) {
124
+ bool ThreadPool::Queue::Add(absl::AnyInvocable<void()> callback) {
115
125
  grpc_core::MutexLock lock(&mu_);
116
126
  // Add works to the callbacks list
117
127
  callbacks_.push(std::move(callback));
118
- // Store the callback for later if we are forking.
119
- // TODO(hork): should we block instead?
120
- if (forking_) return;
121
- // Increase pool size or notify as needed
122
- if (threads_waiting_ == 0) {
123
- // Kick off a new thread
124
- nthreads_++;
125
- new Thread(this);
126
- } else {
127
- cv_.Signal();
128
- }
129
- // Also use this chance to harvest dead threads
130
- if (!dead_threads_.empty()) {
131
- ReapThreads(&dead_threads_);
128
+ cv_.Signal();
129
+ switch (state_) {
130
+ case State::kRunning:
131
+ case State::kShutdown:
132
+ return threads_waiting_ == 0;
133
+ case State::kForking:
134
+ return false;
132
135
  }
136
+ GPR_UNREACHABLE_CODE(return false);
133
137
  }
134
138
 
135
- void ThreadPool::PrepareFork() {
139
+ void ThreadPool::Queue::SetState(State state) {
136
140
  grpc_core::MutexLock lock(&mu_);
137
- forking_ = true;
138
- cv_.SignalAll();
139
- while (nthreads_ != 0) {
140
- fork_cv_.Wait(&mu_);
141
+ if (state == State::kRunning) {
142
+ GPR_ASSERT(state_ != State::kRunning);
143
+ } else {
144
+ GPR_ASSERT(state_ == State::kRunning);
141
145
  }
142
- ReapThreads(&dead_threads_);
146
+ state_ = state;
147
+ cv_.SignalAll();
143
148
  }
144
149
 
145
- void ThreadPool::PostforkParent() {
150
+ void ThreadPool::ThreadCount::Add() {
146
151
  grpc_core::MutexLock lock(&mu_);
147
- forking_ = false;
148
- StartNThreadsLocked(reserve_threads_);
152
+ ++threads_;
149
153
  }
150
154
 
151
- void ThreadPool::PostforkChild() {
155
+ void ThreadPool::ThreadCount::Remove() {
152
156
  grpc_core::MutexLock lock(&mu_);
153
- forking_ = false;
154
- StartNThreadsLocked(reserve_threads_);
157
+ --threads_;
158
+ cv_.Signal();
159
+ }
160
+
161
+ void ThreadPool::ThreadCount::BlockUntilThreadCount(int threads,
162
+ const char* why) {
163
+ grpc_core::MutexLock lock(&mu_);
164
+ auto last_log = absl::Now();
165
+ while (threads_ > threads) {
166
+ // Wait for all threads to exit.
167
+ // At least once every three seconds (but no faster than once per second in
168
+ // the event of spurious wakeups) log a message indicating we're waiting to
169
+ // fork.
170
+ cv_.WaitWithTimeout(&mu_, absl::Seconds(3));
171
+ if (threads_ > threads && absl::Now() - last_log > absl::Seconds(1)) {
172
+ gpr_log(GPR_ERROR, "Waiting for thread pool to idle before %s", why);
173
+ last_log = absl::Now();
174
+ }
175
+ }
176
+ }
177
+
178
+ void ThreadPool::PrepareFork() {
179
+ state_->queue.SetForking();
180
+ state_->thread_count.BlockUntilThreadCount(0, "forking");
181
+ }
182
+
183
+ void ThreadPool::PostforkParent() { Postfork(); }
184
+
185
+ void ThreadPool::PostforkChild() { Postfork(); }
186
+
187
+ void ThreadPool::Postfork() {
188
+ state_->queue.Reset();
189
+ for (int i = 0; i < reserve_threads_; i++) {
190
+ StartThread(state_, /*throttled=*/false);
191
+ }
155
192
  }
156
193
 
157
194
  } // namespace experimental
@@ -21,15 +21,15 @@
21
21
 
22
22
  #include <grpc/support/port_platform.h>
23
23
 
24
+ #include <atomic>
25
+ #include <memory>
24
26
  #include <queue>
25
- #include <vector>
26
27
 
27
28
  #include "absl/base/thread_annotations.h"
28
29
  #include "absl/functional/any_invocable.h"
29
30
 
30
31
  #include "src/core/lib/event_engine/forkable.h"
31
32
  #include "src/core/lib/gprpp/sync.h"
32
- #include "src/core/lib/gprpp/thd.h"
33
33
 
34
34
  namespace grpc_event_engine {
35
35
  namespace experimental {
@@ -37,42 +37,75 @@ namespace experimental {
37
37
  class ThreadPool final : public grpc_event_engine::experimental::Forkable {
38
38
  public:
39
39
  explicit ThreadPool(int reserve_threads);
40
+ // Ensures the thread pool is empty before destroying it.
40
41
  ~ThreadPool() override;
41
42
 
42
43
  void Add(absl::AnyInvocable<void()> callback);
43
44
 
44
45
  // Forkable
46
+ // Ensures that the thread pool is empty before forking.
45
47
  void PrepareFork() override;
46
48
  void PostforkParent() override;
47
49
  void PostforkChild() override;
48
50
 
49
51
  private:
50
- class Thread {
52
+ class Queue {
51
53
  public:
52
- explicit Thread(ThreadPool* pool);
53
- ~Thread();
54
+ explicit Queue(int reserve_threads) : reserve_threads_(reserve_threads) {}
55
+ bool Step();
56
+ void SetShutdown() { SetState(State::kShutdown); }
57
+ void SetForking() { SetState(State::kForking); }
58
+ // Add a callback to the queue.
59
+ // Return true if we should also spin up a new thread.
60
+ bool Add(absl::AnyInvocable<void()> callback);
61
+ void Reset() { SetState(State::kRunning); }
54
62
 
55
63
  private:
56
- ThreadPool* pool_;
57
- grpc_core::Thread thd_;
58
- void ThreadFunc();
64
+ enum class State { kRunning, kShutdown, kForking };
65
+
66
+ void SetState(State state);
67
+
68
+ grpc_core::Mutex mu_;
69
+ grpc_core::CondVar cv_;
70
+ std::queue<absl::AnyInvocable<void()>> callbacks_ ABSL_GUARDED_BY(mu_);
71
+ int threads_waiting_ ABSL_GUARDED_BY(mu_) = 0;
72
+ const int reserve_threads_;
73
+ State state_ ABSL_GUARDED_BY(mu_) = State::kRunning;
59
74
  };
60
75
 
61
- void ThreadFunc();
62
- void StartNThreadsLocked(int n) ABSL_EXCLUSIVE_LOCKS_REQUIRED(&mu_);
63
- static void ReapThreads(std::vector<Thread*>* tlist);
64
-
65
- grpc_core::Mutex mu_;
66
- grpc_core::CondVar cv_;
67
- grpc_core::CondVar shutdown_cv_;
68
- grpc_core::CondVar fork_cv_;
69
- bool shutdown_;
70
- std::queue<absl::AnyInvocable<void()>> callbacks_;
71
- int reserve_threads_;
72
- int nthreads_;
73
- int threads_waiting_;
74
- std::vector<Thread*> dead_threads_;
75
- bool forking_;
76
+ class ThreadCount {
77
+ public:
78
+ void Add();
79
+ void Remove();
80
+ void BlockUntilThreadCount(int threads, const char* why);
81
+
82
+ private:
83
+ grpc_core::Mutex mu_;
84
+ grpc_core::CondVar cv_;
85
+ int threads_ ABSL_GUARDED_BY(mu_) = 0;
86
+ };
87
+
88
+ struct State {
89
+ explicit State(int reserve_threads) : queue(reserve_threads) {}
90
+ Queue queue;
91
+ ThreadCount thread_count;
92
+ // After pool creation we use this to rate limit creation of threads to one
93
+ // at a time.
94
+ std::atomic<bool> currently_starting_one_thread{false};
95
+ };
96
+
97
+ using StatePtr = std::shared_ptr<State>;
98
+
99
+ static void ThreadFunc(StatePtr state);
100
+ // Start a new thread; throttled indicates whether the State::starting_thread
101
+ // variable is being used to throttle this threads creation against others or
102
+ // not: at thread pool startup we start several threads concurrently, but
103
+ // after that we only start one at a time.
104
+ static void StartThread(StatePtr state, bool throttled);
105
+ void Postfork();
106
+
107
+ const int reserve_threads_;
108
+ const StatePtr state_ = std::make_shared<State>(reserve_threads_);
76
109
  };
77
110
 
78
111
  } // namespace experimental