grpc 1.14.2 → 1.15.0.pre1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (113) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +307 -12
  3. data/etc/roots.pem +40 -163
  4. data/include/grpc/grpc.h +49 -0
  5. data/include/grpc/grpc_security.h +0 -6
  6. data/include/grpc/grpc_security_constants.h +6 -0
  7. data/include/grpc/impl/codegen/grpc_types.h +17 -2
  8. data/include/grpc/impl/codegen/port_platform.h +41 -4
  9. data/include/grpc/support/sync.h +0 -16
  10. data/src/{cpp → core}/ext/filters/census/grpc_context.cc +0 -0
  11. data/src/core/ext/filters/client_channel/client_channel.cc +40 -11
  12. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +11 -9
  13. data/src/core/ext/filters/client_channel/client_channel_channelz.h +4 -2
  14. data/src/core/ext/filters/client_channel/lb_policy.h +14 -11
  15. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +67 -90
  16. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +108 -91
  17. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +79 -25
  18. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +40 -0
  19. data/src/core/ext/filters/client_channel/resolver.h +8 -0
  20. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +11 -3
  21. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +13 -10
  22. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +18 -4
  23. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +13 -5
  24. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +537 -0
  25. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +6 -5
  26. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +11 -0
  27. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc +29 -0
  28. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc +29 -0
  29. data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +9 -0
  30. data/src/core/ext/filters/client_channel/subchannel.cc +21 -8
  31. data/src/core/ext/filters/client_channel/subchannel.h +7 -0
  32. data/src/core/ext/filters/http/client_authority_filter.cc +1 -1
  33. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +24 -0
  34. data/src/core/ext/transport/chttp2/transport/flow_control.cc +10 -7
  35. data/src/core/lib/channel/channel_stack.h +1 -1
  36. data/src/core/lib/channel/channel_trace.cc +1 -1
  37. data/src/core/lib/channel/channel_trace.h +1 -1
  38. data/src/core/lib/channel/channelz.cc +37 -27
  39. data/src/core/lib/channel/channelz.h +13 -4
  40. data/src/core/lib/channel/channelz_registry.cc +89 -4
  41. data/src/core/lib/channel/channelz_registry.h +56 -39
  42. data/src/core/lib/gpr/arena.cc +33 -40
  43. data/src/core/lib/gprpp/fork.cc +41 -33
  44. data/src/core/lib/gprpp/fork.h +13 -4
  45. data/src/core/lib/gprpp/mutex_lock.h +42 -0
  46. data/src/core/lib/gprpp/orphanable.h +4 -2
  47. data/src/core/lib/gprpp/ref_counted.h +4 -2
  48. data/src/core/lib/gprpp/ref_counted_ptr.h +65 -13
  49. data/src/core/lib/iomgr/call_combiner.h +4 -1
  50. data/src/core/lib/iomgr/ev_epoll1_linux.cc +77 -17
  51. data/src/core/lib/iomgr/ev_epollex_linux.cc +8 -26
  52. data/src/core/lib/iomgr/ev_epollsig_linux.cc +10 -28
  53. data/src/core/lib/iomgr/ev_poll_posix.cc +144 -35
  54. data/src/core/lib/iomgr/ev_posix.cc +58 -9
  55. data/src/core/lib/iomgr/ev_posix.h +22 -8
  56. data/src/core/lib/iomgr/exec_ctx.cc +6 -0
  57. data/src/core/lib/iomgr/exec_ctx.h +2 -0
  58. data/src/core/lib/iomgr/executor.cc +148 -72
  59. data/src/core/lib/iomgr/executor.h +39 -6
  60. data/src/core/lib/iomgr/fork_posix.cc +12 -1
  61. data/src/core/lib/iomgr/iocp_windows.cc +9 -4
  62. data/src/core/lib/iomgr/lockfree_event.cc +5 -1
  63. data/src/core/lib/iomgr/port.h +15 -2
  64. data/src/core/lib/iomgr/resolve_address_posix.cc +3 -2
  65. data/src/core/lib/iomgr/resolve_address_windows.cc +3 -2
  66. data/src/core/lib/iomgr/resource_quota.cc +78 -0
  67. data/src/core/lib/iomgr/resource_quota.h +16 -0
  68. data/src/core/lib/iomgr/socket_mutator.cc +1 -1
  69. data/src/core/lib/iomgr/socket_mutator.h +1 -1
  70. data/src/core/lib/iomgr/socket_windows.cc +33 -0
  71. data/src/core/lib/iomgr/socket_windows.h +6 -0
  72. data/src/core/lib/iomgr/tcp_windows.cc +2 -2
  73. data/src/core/lib/iomgr/tcp_windows.h +2 -0
  74. data/src/core/lib/iomgr/timer.h +3 -2
  75. data/src/core/lib/json/json.cc +2 -1
  76. data/src/core/lib/security/credentials/jwt/json_token.h +2 -0
  77. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +2 -0
  78. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +1 -1
  79. data/src/core/lib/security/security_connector/load_system_roots.h +29 -0
  80. data/src/core/lib/security/security_connector/load_system_roots_fallback.cc +32 -0
  81. data/src/core/lib/security/security_connector/load_system_roots_linux.cc +165 -0
  82. data/src/core/lib/security/security_connector/load_system_roots_linux.h +44 -0
  83. data/src/core/lib/security/security_connector/security_connector.cc +23 -4
  84. data/src/core/lib/security/transport/client_auth_filter.cc +0 -4
  85. data/src/core/lib/security/transport/server_auth_filter.cc +0 -2
  86. data/src/core/lib/surface/call.cc +7 -3
  87. data/src/core/lib/surface/channel.cc +18 -2
  88. data/src/core/lib/surface/completion_queue.cc +152 -15
  89. data/src/core/lib/surface/completion_queue.h +20 -1
  90. data/src/core/lib/surface/completion_queue_factory.cc +13 -4
  91. data/src/core/lib/surface/init.cc +2 -2
  92. data/src/core/lib/surface/init.h +0 -1
  93. data/src/core/lib/surface/version.cc +2 -2
  94. data/src/core/lib/transport/service_config.cc +2 -2
  95. data/src/core/lib/transport/service_config.h +3 -3
  96. data/src/core/lib/transport/transport.h +2 -0
  97. data/src/core/tsi/alts/crypt/aes_gcm.cc +2 -0
  98. data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +8 -0
  99. data/src/core/tsi/grpc_shadow_boringssl.h +3006 -0
  100. data/src/core/tsi/ssl/session_cache/ssl_session.h +2 -0
  101. data/src/core/tsi/ssl/session_cache/ssl_session_cache.cc +5 -5
  102. data/src/core/tsi/ssl/session_cache/ssl_session_cache.h +2 -0
  103. data/src/core/tsi/ssl_transport_security.cc +5 -3
  104. data/src/core/tsi/ssl_types.h +2 -0
  105. data/src/ruby/ext/grpc/extconf.rb +1 -26
  106. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +12 -0
  107. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +18 -0
  108. data/src/ruby/lib/grpc/version.rb +1 -1
  109. data/src/ruby/spec/generic/client_stub_spec.rb +3 -3
  110. data/third_party/address_sorting/address_sorting.c +7 -2
  111. data/third_party/address_sorting/address_sorting_windows.c +43 -3
  112. data/third_party/address_sorting/include/address_sorting/address_sorting.h +3 -0
  113. metadata +40 -31
@@ -27,7 +27,8 @@
27
27
 
28
28
  typedef struct {
29
29
  gpr_mu mu;
30
- size_t id; // For debugging purposes
30
+ size_t id; // For debugging purposes
31
+ const char* name; // Thread state name
31
32
  gpr_cv cv;
32
33
  grpc_closure_list elems;
33
34
  size_t depth; // Number of closures in the closure list
@@ -36,7 +37,11 @@ typedef struct {
36
37
  grpc_core::Thread thd;
37
38
  } ThreadState;
38
39
 
39
- typedef enum { GRPC_EXECUTOR_SHORT, GRPC_EXECUTOR_LONG } GrpcExecutorJobType;
40
+ typedef enum {
41
+ GRPC_EXECUTOR_SHORT = 0,
42
+ GRPC_EXECUTOR_LONG,
43
+ GRPC_NUM_EXECUTOR_JOB_TYPES // Add new values above this
44
+ } GrpcExecutorJobType;
40
45
 
41
46
  class GrpcExecutor {
42
47
  public:
@@ -58,7 +63,7 @@ class GrpcExecutor {
58
63
  void Enqueue(grpc_closure* closure, grpc_error* error, bool is_short);
59
64
 
60
65
  private:
61
- static size_t RunClosures(grpc_closure_list list);
66
+ static size_t RunClosures(const char* executor_name, grpc_closure_list list);
62
67
  static void ThreadMain(void* arg);
63
68
 
64
69
  const char* name_;
@@ -70,14 +75,42 @@ class GrpcExecutor {
70
75
 
71
76
  // == Global executor functions ==
72
77
 
78
+ typedef enum {
79
+ GRPC_DEFAULT_EXECUTOR = 0,
80
+ GRPC_RESOLVER_EXECUTOR,
81
+
82
+ GRPC_NUM_EXECUTORS // Add new values above this
83
+ } GrpcExecutorType;
84
+
85
+ // TODO(sreek): Currently we have two executors (available globally): The
86
+ // default executor and the resolver executor.
87
+ //
88
+ // Some of the functions below operate on the DEFAULT executor only while some
89
+ // operate of ALL the executors. This is a bit confusing and should be cleaned
90
+ // up in future (where we make all the following functions take executor_type
91
+ // and/or job_type)
92
+
93
+ // Initialize ALL the executors
73
94
  void grpc_executor_init();
74
95
 
96
+ // Shutdown ALL the executors
97
+ void grpc_executor_shutdown();
98
+
99
+ // Set the threading mode for ALL the executors
100
+ void grpc_executor_set_threading(bool enable);
101
+
102
+ // Get the DEFAULT executor scheduler for the given job_type
75
103
  grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type);
76
104
 
77
- void grpc_executor_shutdown();
105
+ // Get the executor scheduler for a given executor_type and a job_type
106
+ grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorType executor_type,
107
+ GrpcExecutorJobType job_type);
78
108
 
79
- bool grpc_executor_is_threaded();
109
+ // Return if a given executor is running in threaded mode (i.e if
110
+ // grpc_executor_set_threading(true) was called previously on that executor)
111
+ bool grpc_executor_is_threaded(GrpcExecutorType executor_type);
80
112
 
81
- void grpc_executor_set_threading(bool enable);
113
+ // Return if the DEFAULT executor is threaded
114
+ bool grpc_executor_is_threaded();
82
115
 
83
116
  #endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */
@@ -25,6 +25,7 @@
25
25
  #include <string.h>
26
26
 
27
27
  #include <grpc/fork.h>
28
+ #include <grpc/grpc.h>
28
29
  #include <grpc/support/log.h>
29
30
 
30
31
  #include "src/core/lib/gpr/env.h"
@@ -34,7 +35,6 @@
34
35
  #include "src/core/lib/iomgr/executor.h"
35
36
  #include "src/core/lib/iomgr/timer_manager.h"
36
37
  #include "src/core/lib/iomgr/wakeup_fd_posix.h"
37
- #include "src/core/lib/surface/init.h"
38
38
 
39
39
  /*
40
40
  * NOTE: FORKING IS NOT GENERALLY SUPPORTED, THIS IS ONLY INTENDED TO WORK
@@ -58,6 +58,12 @@ void grpc_prefork() {
58
58
  "environment variable GRPC_ENABLE_FORK_SUPPORT=1");
59
59
  return;
60
60
  }
61
+ if (strcmp(grpc_get_poll_strategy_name(), "epoll1") != 0 &&
62
+ strcmp(grpc_get_poll_strategy_name(), "poll") != 0) {
63
+ gpr_log(GPR_ERROR,
64
+ "Fork support is only compatible with the epoll1 and poll polling "
65
+ "strategies");
66
+ }
61
67
  if (!grpc_core::Fork::BlockExecCtx()) {
62
68
  gpr_log(GPR_INFO,
63
69
  "Other threads are currently calling into gRPC, skipping fork() "
@@ -84,6 +90,11 @@ void grpc_postfork_child() {
84
90
  if (!skipped_handler) {
85
91
  grpc_core::Fork::AllowExecCtx();
86
92
  grpc_core::ExecCtx exec_ctx;
93
+ grpc_core::Fork::child_postfork_func reset_polling_engine =
94
+ grpc_core::Fork::GetResetChildPollingEngineFunc();
95
+ if (reset_polling_engine != nullptr) {
96
+ reset_polling_engine();
97
+ }
87
98
  grpc_timer_manager_set_threading(true);
88
99
  grpc_executor_set_threading(true);
89
100
  }
@@ -89,10 +89,15 @@ grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) {
89
89
  } else {
90
90
  abort();
91
91
  }
92
- success = WSAGetOverlappedResult(socket->socket, &info->overlapped, &bytes,
93
- FALSE, &flags);
94
- info->bytes_transfered = bytes;
95
- info->wsa_error = success ? 0 : WSAGetLastError();
92
+ if (socket->shutdown_called) {
93
+ info->bytes_transfered = 0;
94
+ info->wsa_error = WSA_OPERATION_ABORTED;
95
+ } else {
96
+ success = WSAGetOverlappedResult(socket->socket, &info->overlapped, &bytes,
97
+ FALSE, &flags);
98
+ info->bytes_transfered = bytes;
99
+ info->wsa_error = success ? 0 : WSAGetLastError();
100
+ }
96
101
  GPR_ASSERT(overlapped == &info->overlapped);
97
102
  grpc_socket_become_ready(socket, info);
98
103
  return GRPC_IOCP_WORK_WORK;
@@ -89,7 +89,11 @@ void LockfreeEvent::DestroyEvent() {
89
89
 
90
90
  void LockfreeEvent::NotifyOn(grpc_closure* closure) {
91
91
  while (true) {
92
- gpr_atm curr = gpr_atm_no_barrier_load(&state_);
92
+ /* This load needs to be an acquire load because this can be a shutdown
93
+ * error that we might need to reference. Adding acquire semantics makes
94
+ * sure that the shutdown error has been initialized properly before us
95
+ * referencing it. */
96
+ gpr_atm curr = gpr_atm_acq_load(&state_);
93
97
  if (grpc_polling_trace.enabled()) {
94
98
  gpr_log(GPR_ERROR, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this,
95
99
  (void*)curr, closure);
@@ -98,9 +98,9 @@
98
98
  #define GRPC_POSIX_FORK 1
99
99
  #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
100
100
  #ifdef GRPC_CFSTREAM
101
- #define GRPC_POSIX_SOCKET_IOMGR 1
102
- #define GRPC_CFSTREAM_ENDPOINT 1
101
+ #define GRPC_CFSTREAM_IOMGR 1
103
102
  #define GRPC_CFSTREAM_CLIENT 1
103
+ #define GRPC_CFSTREAM_ENDPOINT 1
104
104
  #define GRPC_POSIX_SOCKET_ARES_EV_DRIVER 1
105
105
  #define GRPC_POSIX_SOCKET_EV 1
106
106
  #define GRPC_POSIX_SOCKET_EV_EPOLL1 1
@@ -111,6 +111,7 @@
111
111
  #define GRPC_POSIX_SOCKET_SOCKADDR 1
112
112
  #define GRPC_POSIX_SOCKET_SOCKET_FACTORY 1
113
113
  #define GRPC_POSIX_SOCKET_TCP 1
114
+ #define GRPC_POSIX_SOCKET_TCP_CLIENT 1
114
115
  #define GRPC_POSIX_SOCKET_TCP_SERVER 1
115
116
  #define GRPC_POSIX_SOCKET_TCP_SERVER_UTILS_COMMON 1
116
117
  #define GRPC_POSIX_SOCKET_UTILS_COMMON 1
@@ -139,6 +140,18 @@
139
140
  #define GRPC_POSIX_SOCKET 1
140
141
  #define GRPC_POSIX_SOCKETUTILS 1
141
142
  #define GRPC_POSIX_WAKEUP_FD 1
143
+ #elif defined(GPR_SOLARIS)
144
+ #define GRPC_HAVE_UNIX_SOCKET 1
145
+ #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
146
+ #define GRPC_POSIX_SOCKET 1
147
+ #define GRPC_POSIX_SOCKETUTILS 1
148
+ #define GRPC_POSIX_WAKEUP_FD 1
149
+ #elif defined(GPR_AIX)
150
+ #define GRPC_HAVE_UNIX_SOCKET 1
151
+ #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
152
+ #define GRPC_POSIX_SOCKET 1
153
+ #define GRPC_POSIX_SOCKETUTILS 1
154
+ #define GRPC_POSIX_WAKEUP_FD 1
142
155
  #elif defined(GPR_NACL)
143
156
  #define GRPC_HAVE_ARPA_NAMESER 1
144
157
  #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1
@@ -166,8 +166,9 @@ static void posix_resolve_address(const char* name, const char* default_port,
166
166
  grpc_closure* on_done,
167
167
  grpc_resolved_addresses** addrs) {
168
168
  request* r = static_cast<request*>(gpr_malloc(sizeof(request)));
169
- GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
170
- grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
169
+ GRPC_CLOSURE_INIT(
170
+ &r->request_closure, do_request_thread, r,
171
+ grpc_executor_scheduler(GRPC_RESOLVER_EXECUTOR, GRPC_EXECUTOR_SHORT));
171
172
  r->name = gpr_strdup(name);
172
173
  r->default_port = gpr_strdup(default_port);
173
174
  r->on_done = on_done;
@@ -151,8 +151,9 @@ static void windows_resolve_address(const char* name, const char* default_port,
151
151
  grpc_closure* on_done,
152
152
  grpc_resolved_addresses** addresses) {
153
153
  request* r = (request*)gpr_malloc(sizeof(request));
154
- GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
155
- grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
154
+ GRPC_CLOSURE_INIT(
155
+ &r->request_closure, do_request_thread, r,
156
+ grpc_executor_scheduler(GRPC_RESOLVER_EXECUTOR, GRPC_EXECUTOR_SHORT));
156
157
  r->name = gpr_strdup(name);
157
158
  r->default_port = gpr_strdup(default_port);
158
159
  r->on_done = on_done;
@@ -96,6 +96,9 @@ struct grpc_resource_user {
96
96
  list, false otherwise */
97
97
  bool added_to_free_pool;
98
98
 
99
+ /* The number of threads currently allocated to this resource user */
100
+ gpr_atm num_threads_allocated;
101
+
99
102
  /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
100
103
  */
101
104
  grpc_closure* reclaimers[2];
@@ -135,12 +138,33 @@ struct grpc_resource_quota {
135
138
 
136
139
  gpr_atm last_size;
137
140
 
141
+ /* Mutex to protect max_threads and num_threads_allocated */
142
+ /* Note: We could have used gpr_atm for max_threads and num_threads_allocated
143
+ * and avoid having this mutex; but in that case, each invocation of the
144
+ * function grpc_resource_user_allocate_threads() would have had to do at
145
+ * least two atomic loads (for max_threads and num_threads_allocated) followed
146
+ * by a CAS (on num_threads_allocated).
147
+ * Moreover, we expect grpc_resource_user_allocate_threads() to be often
148
+ * called concurrently thereby increasing the chances of failing the CAS
149
+ * operation. This additional complexity is not worth the tiny perf gain we
150
+ * may (or may not) have by using atomics */
151
+ gpr_mu thread_count_mu;
152
+
153
+ /* Max number of threads allowed */
154
+ int max_threads;
155
+
156
+ /* Number of threads currently allocated via this resource_quota object */
157
+ int num_threads_allocated;
158
+
138
159
  /* Has rq_step been scheduled to occur? */
139
160
  bool step_scheduled;
161
+
140
162
  /* Are we currently reclaiming memory */
141
163
  bool reclaiming;
164
+
142
165
  /* Closure around rq_step */
143
166
  grpc_closure rq_step_closure;
167
+
144
168
  /* Closure around rq_reclamation_done */
145
169
  grpc_closure rq_reclamation_done_closure;
146
170
 
@@ -524,6 +548,11 @@ static void ru_shutdown(void* ru, grpc_error* error) {
524
548
  static void ru_destroy(void* ru, grpc_error* error) {
525
549
  grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
526
550
  GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
551
+ // Free all the remaining thread quota
552
+ grpc_resource_user_free_threads(resource_user,
553
+ static_cast<int>(gpr_atm_no_barrier_load(
554
+ &resource_user->num_threads_allocated)));
555
+
527
556
  for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
528
557
  rulist_remove(resource_user, static_cast<grpc_rulist>(i));
529
558
  }
@@ -594,6 +623,9 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
594
623
  resource_quota->free_pool = INT64_MAX;
595
624
  resource_quota->size = INT64_MAX;
596
625
  gpr_atm_no_barrier_store(&resource_quota->last_size, GPR_ATM_MAX);
626
+ gpr_mu_init(&resource_quota->thread_count_mu);
627
+ resource_quota->max_threads = INT_MAX;
628
+ resource_quota->num_threads_allocated = 0;
597
629
  resource_quota->step_scheduled = false;
598
630
  resource_quota->reclaiming = false;
599
631
  gpr_atm_no_barrier_store(&resource_quota->memory_usage_estimation, 0);
@@ -616,6 +648,8 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
616
648
 
617
649
  void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) {
618
650
  if (gpr_unref(&resource_quota->refs)) {
651
+ // No outstanding thread quota
652
+ GPR_ASSERT(resource_quota->num_threads_allocated == 0);
619
653
  GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota");
620
654
  gpr_free(resource_quota->name);
621
655
  gpr_free(resource_quota);
@@ -646,6 +680,15 @@ double grpc_resource_quota_get_memory_pressure(
646
680
  (static_cast<double>(MEMORY_USAGE_ESTIMATION_MAX));
647
681
  }
648
682
 
683
+ /* Public API */
684
+ void grpc_resource_quota_set_max_threads(grpc_resource_quota* resource_quota,
685
+ int new_max_threads) {
686
+ GPR_ASSERT(new_max_threads >= 0);
687
+ gpr_mu_lock(&resource_quota->thread_count_mu);
688
+ resource_quota->max_threads = new_max_threads;
689
+ gpr_mu_unlock(&resource_quota->thread_count_mu);
690
+ }
691
+
649
692
  /* Public API */
650
693
  void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
651
694
  size_t size) {
@@ -731,6 +774,7 @@ grpc_resource_user* grpc_resource_user_create(
731
774
  grpc_closure_list_init(&resource_user->on_allocated);
732
775
  resource_user->allocating = false;
733
776
  resource_user->added_to_free_pool = false;
777
+ gpr_atm_no_barrier_store(&resource_user->num_threads_allocated, 0);
734
778
  resource_user->reclaimers[0] = nullptr;
735
779
  resource_user->reclaimers[1] = nullptr;
736
780
  resource_user->new_reclaimers[0] = nullptr;
@@ -785,6 +829,40 @@ void grpc_resource_user_shutdown(grpc_resource_user* resource_user) {
785
829
  }
786
830
  }
787
831
 
832
+ bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
833
+ int thread_count) {
834
+ GPR_ASSERT(thread_count >= 0);
835
+ bool is_success = false;
836
+ gpr_mu_lock(&resource_user->resource_quota->thread_count_mu);
837
+ grpc_resource_quota* rq = resource_user->resource_quota;
838
+ if (rq->num_threads_allocated + thread_count <= rq->max_threads) {
839
+ rq->num_threads_allocated += thread_count;
840
+ gpr_atm_no_barrier_fetch_add(&resource_user->num_threads_allocated,
841
+ thread_count);
842
+ is_success = true;
843
+ }
844
+ gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
845
+ return is_success;
846
+ }
847
+
848
+ void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
849
+ int thread_count) {
850
+ GPR_ASSERT(thread_count >= 0);
851
+ gpr_mu_lock(&resource_user->resource_quota->thread_count_mu);
852
+ grpc_resource_quota* rq = resource_user->resource_quota;
853
+ rq->num_threads_allocated -= thread_count;
854
+ int old_count = static_cast<int>(gpr_atm_no_barrier_fetch_add(
855
+ &resource_user->num_threads_allocated, -thread_count));
856
+ if (old_count < thread_count || rq->num_threads_allocated < 0) {
857
+ gpr_log(GPR_ERROR,
858
+ "Releasing more threads (%d) than currently allocated (rq threads: "
859
+ "%d, ru threads: %d)",
860
+ thread_count, rq->num_threads_allocated + thread_count, old_count);
861
+ abort();
862
+ }
863
+ gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
864
+ }
865
+
788
866
  void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
789
867
  grpc_closure* optional_on_done) {
790
868
  gpr_mu_lock(&resource_user->mu);
@@ -93,6 +93,22 @@ void grpc_resource_user_ref(grpc_resource_user* resource_user);
93
93
  void grpc_resource_user_unref(grpc_resource_user* resource_user);
94
94
  void grpc_resource_user_shutdown(grpc_resource_user* resource_user);
95
95
 
96
+ /* Attempts to get quota from the resource_user to create 'thread_count' number
97
+ * of threads. Returns true if successful (i.e the caller is now free to create
98
+ * 'thread_count' number of threads) or false if quota is not available */
99
+ bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
100
+ int thread_count);
101
+ /* Releases 'thread_count' worth of quota back to the resource user. The quota
102
+ * should have been previously obtained successfully by calling
103
+ * grpc_resource_user_allocate_threads().
104
+ *
105
+ * Note: There need not be an exact one-to-one correspondence between
106
+ * grpc_resource_user_allocate_threads() and grpc_resource_user_free_threads()
107
+ * calls. The only requirement is that the number of threads allocated should
108
+ * all be eventually released */
109
+ void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
110
+ int thread_count);
111
+
96
112
  /* Allocate from the resource user (and its quota).
97
113
  If optional_on_done is NULL, then allocate immediately. This may push the
98
114
  quota over-limit, at which point reclamation will kick in.
@@ -57,7 +57,7 @@ int grpc_socket_mutator_compare(grpc_socket_mutator* a,
57
57
 
58
58
  void grpc_socket_mutator_unref(grpc_socket_mutator* mutator) {
59
59
  if (gpr_unref(&mutator->refcount)) {
60
- mutator->vtable->destory(mutator);
60
+ mutator->vtable->destroy(mutator);
61
61
  }
62
62
  }
63
63
 
@@ -33,7 +33,7 @@ typedef struct {
33
33
  /** Compare socket mutator \a a and \a b */
34
34
  int (*compare)(grpc_socket_mutator* a, grpc_socket_mutator* b);
35
35
  /** Destroys the socket mutator instance */
36
- void (*destory)(grpc_socket_mutator* mutator);
36
+ void (*destroy)(grpc_socket_mutator* mutator);
37
37
  } grpc_socket_mutator_vtable;
38
38
 
39
39
  /** The Socket Mutator interface allows changes on socket options */
@@ -36,6 +36,7 @@
36
36
  #include "src/core/lib/iomgr/iomgr_internal.h"
37
37
  #include "src/core/lib/iomgr/pollset.h"
38
38
  #include "src/core/lib/iomgr/pollset_windows.h"
39
+ #include "src/core/lib/iomgr/sockaddr_windows.h"
39
40
  #include "src/core/lib/iomgr/socket_windows.h"
40
41
 
41
42
  grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) {
@@ -51,6 +52,10 @@ grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name) {
51
52
  return r;
52
53
  }
53
54
 
55
+ SOCKET grpc_winsocket_wrapped_socket(grpc_winsocket* socket) {
56
+ return socket->socket;
57
+ }
58
+
54
59
  /* Schedule a shutdown of the socket operations. Will call the pending
55
60
  operations to abort them. We need to do that this way because of the
56
61
  various callsites of that function, which happens to be in various
@@ -148,4 +153,32 @@ void grpc_socket_become_ready(grpc_winsocket* socket,
148
153
  if (should_destroy) destroy(socket);
149
154
  }
150
155
 
156
+ static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT;
157
+ static bool g_ipv6_loopback_available = false;
158
+
159
+ static void probe_ipv6_once(void) {
160
+ SOCKET s = socket(AF_INET6, SOCK_STREAM, 0);
161
+ g_ipv6_loopback_available = 0;
162
+ if (s == INVALID_SOCKET) {
163
+ gpr_log(GPR_INFO, "Disabling AF_INET6 sockets because socket() failed.");
164
+ } else {
165
+ grpc_sockaddr_in6 addr;
166
+ memset(&addr, 0, sizeof(addr));
167
+ addr.sin6_family = AF_INET6;
168
+ addr.sin6_addr.s6_addr[15] = 1; /* [::1]:0 */
169
+ if (bind(s, reinterpret_cast<grpc_sockaddr*>(&addr), sizeof(addr)) == 0) {
170
+ g_ipv6_loopback_available = 1;
171
+ } else {
172
+ gpr_log(GPR_INFO,
173
+ "Disabling AF_INET6 sockets because ::1 is not available.");
174
+ }
175
+ closesocket(s);
176
+ }
177
+ }
178
+
179
+ int grpc_ipv6_loopback_available(void) {
180
+ gpr_once_init(&g_probe_ipv6_once, probe_ipv6_once);
181
+ return g_ipv6_loopback_available;
182
+ }
183
+
151
184
  #endif /* GRPC_WINSOCK_SOCKET */
@@ -92,6 +92,8 @@ typedef struct grpc_winsocket {
92
92
  it will be responsible for closing it. */
93
93
  grpc_winsocket* grpc_winsocket_create(SOCKET socket, const char* name);
94
94
 
95
+ SOCKET grpc_winsocket_wrapped_socket(grpc_winsocket* socket);
96
+
95
97
  /* Initiate an asynchronous shutdown of the socket. Will call off any pending
96
98
  operation to cancel them. */
97
99
  void grpc_winsocket_shutdown(grpc_winsocket* socket);
@@ -108,6 +110,10 @@ void grpc_socket_notify_on_read(grpc_winsocket* winsocket,
108
110
  void grpc_socket_become_ready(grpc_winsocket* winsocket,
109
111
  grpc_winsocket_callback_info* ci);
110
112
 
113
+ /* Returns true if this system can create AF_INET6 sockets bound to ::1.
114
+ The value is probed once, and cached for the life of the process. */
115
+ int grpc_ipv6_loopback_available(void);
116
+
111
117
  #endif
112
118
 
113
119
  #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H */