grpc 1.18.0 → 1.19.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of grpc might be problematic. Click here for more details.

Files changed (147) hide show
  1. checksums.yaml +4 -4
  2. data/Makefile +301 -33
  3. data/include/grpc/grpc_security.h +195 -0
  4. data/include/grpc/impl/codegen/grpc_types.h +17 -1
  5. data/include/grpc/impl/codegen/port_platform.h +40 -0
  6. data/include/grpc/impl/codegen/slice.h +1 -1
  7. data/src/core/ext/filters/client_channel/channel_connectivity.cc +2 -0
  8. data/src/core/ext/filters/client_channel/client_channel.cc +74 -69
  9. data/src/core/ext/filters/client_channel/client_channel.h +2 -2
  10. data/src/core/ext/filters/client_channel/client_channel_channelz.cc +5 -6
  11. data/src/core/ext/filters/client_channel/client_channel_channelz.h +5 -4
  12. data/src/core/ext/filters/client_channel/client_channel_factory.cc +2 -2
  13. data/src/core/ext/filters/client_channel/client_channel_factory.h +4 -4
  14. data/src/core/ext/filters/client_channel/client_channel_plugin.cc +3 -3
  15. data/src/core/ext/filters/client_channel/global_subchannel_pool.cc +176 -0
  16. data/src/core/ext/filters/client_channel/global_subchannel_pool.h +68 -0
  17. data/src/core/ext/filters/client_channel/health/health_check_client.cc +10 -8
  18. data/src/core/ext/filters/client_channel/health/health_check_client.h +1 -1
  19. data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +146 -156
  20. data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +28 -30
  21. data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +5 -8
  22. data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +5 -8
  23. data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +23 -24
  24. data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +80 -15
  25. data/src/core/ext/filters/client_channel/lb_policy.cc +30 -1
  26. data/src/core/ext/filters/client_channel/lb_policy.h +29 -1
  27. data/src/core/ext/filters/client_channel/lb_policy_factory.h +6 -1
  28. data/src/core/ext/filters/client_channel/lb_policy_registry.cc +2 -2
  29. data/src/core/ext/filters/client_channel/lb_policy_registry.h +1 -1
  30. data/src/core/ext/filters/client_channel/local_subchannel_pool.cc +96 -0
  31. data/src/core/ext/filters/client_channel/local_subchannel_pool.h +56 -0
  32. data/src/core/ext/filters/client_channel/parse_address.cc +24 -5
  33. data/src/core/ext/filters/client_channel/request_routing.cc +13 -3
  34. data/src/core/ext/filters/client_channel/request_routing.h +5 -1
  35. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +11 -6
  36. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +9 -6
  37. data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +2 -2
  38. data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +7 -35
  39. data/src/core/ext/filters/client_channel/subchannel.cc +698 -791
  40. data/src/core/ext/filters/client_channel/subchannel.h +213 -123
  41. data/src/core/ext/filters/client_channel/subchannel_pool_interface.cc +97 -0
  42. data/src/core/ext/filters/client_channel/subchannel_pool_interface.h +94 -0
  43. data/src/core/ext/filters/http/client_authority_filter.cc +5 -2
  44. data/src/core/ext/filters/max_age/max_age_filter.cc +1 -1
  45. data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +13 -12
  46. data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +5 -7
  47. data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +19 -27
  48. data/src/core/ext/transport/chttp2/server/chttp2_server.cc +18 -19
  49. data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +27 -6
  50. data/src/core/ext/transport/chttp2/transport/flow_control.cc +1 -1
  51. data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +3 -2
  52. data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +1 -1
  53. data/src/core/ext/transport/chttp2/transport/writing.cc +8 -5
  54. data/src/core/lib/channel/handshaker.cc +141 -214
  55. data/src/core/lib/channel/handshaker.h +110 -101
  56. data/src/core/lib/channel/handshaker_factory.h +11 -19
  57. data/src/core/lib/channel/handshaker_registry.cc +64 -52
  58. data/src/core/lib/channel/handshaker_registry.h +21 -16
  59. data/src/core/lib/gpr/log_posix.cc +2 -1
  60. data/src/core/lib/gpr/time.cc +8 -0
  61. data/src/core/lib/gpr/time_posix.cc +8 -2
  62. data/src/core/lib/gprpp/optional.h +47 -0
  63. data/src/core/lib/http/httpcli_security_connector.cc +13 -14
  64. data/src/core/lib/iomgr/buffer_list.cc +182 -24
  65. data/src/core/lib/iomgr/buffer_list.h +70 -8
  66. data/src/core/lib/iomgr/combiner.cc +11 -3
  67. data/src/core/lib/iomgr/error.cc +9 -5
  68. data/src/core/lib/iomgr/ev_epoll1_linux.cc +3 -0
  69. data/src/core/lib/iomgr/ev_epollex_linux.cc +136 -162
  70. data/src/core/lib/iomgr/ev_poll_posix.cc +3 -0
  71. data/src/core/lib/iomgr/ev_posix.cc +4 -0
  72. data/src/core/lib/iomgr/ev_posix.h +4 -0
  73. data/src/core/lib/iomgr/exec_ctx.cc +1 -0
  74. data/src/core/lib/iomgr/exec_ctx.h +137 -8
  75. data/src/core/lib/iomgr/executor.cc +122 -87
  76. data/src/core/lib/iomgr/executor.h +53 -48
  77. data/src/core/lib/iomgr/fork_posix.cc +6 -4
  78. data/src/core/lib/iomgr/{network_status_tracker.cc → grpc_if_nametoindex.h} +8 -14
  79. data/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc +42 -0
  80. data/src/core/lib/iomgr/{network_status_tracker.h → grpc_if_nametoindex_unsupported.cc} +15 -9
  81. data/src/core/lib/iomgr/internal_errqueue.h +105 -3
  82. data/src/core/lib/iomgr/iomgr.cc +6 -5
  83. data/src/core/lib/iomgr/iomgr.h +8 -0
  84. data/src/core/lib/iomgr/iomgr_custom.cc +6 -2
  85. data/src/core/lib/iomgr/iomgr_internal.cc +4 -0
  86. data/src/core/lib/iomgr/iomgr_internal.h +4 -0
  87. data/src/core/lib/iomgr/iomgr_posix.cc +10 -1
  88. data/src/core/lib/iomgr/iomgr_windows.cc +8 -1
  89. data/src/core/lib/iomgr/port.h +1 -0
  90. data/src/core/lib/iomgr/resolve_address_posix.cc +4 -3
  91. data/src/core/lib/iomgr/resolve_address_windows.cc +2 -1
  92. data/src/core/lib/iomgr/tcp_custom.cc +0 -4
  93. data/src/core/lib/iomgr/tcp_posix.cc +58 -44
  94. data/src/core/lib/iomgr/tcp_uv.cc +0 -1
  95. data/src/core/lib/iomgr/tcp_windows.cc +0 -4
  96. data/src/core/lib/iomgr/timer_manager.cc +8 -0
  97. data/src/core/lib/iomgr/udp_server.cc +6 -4
  98. data/src/core/lib/json/json.cc +1 -4
  99. data/src/core/lib/security/credentials/alts/alts_credentials.cc +1 -1
  100. data/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc +2 -2
  101. data/src/core/lib/security/credentials/composite/composite_credentials.h +4 -0
  102. data/src/core/lib/security/credentials/credentials.h +9 -1
  103. data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +15 -2
  104. data/src/core/lib/security/credentials/google_default/google_default_credentials.h +2 -0
  105. data/src/core/lib/security/credentials/jwt/json_token.cc +1 -1
  106. data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +1 -0
  107. data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +3 -2
  108. data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +2 -2
  109. data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +1 -0
  110. data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc +192 -0
  111. data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h +213 -0
  112. data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +10 -8
  113. data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +6 -10
  114. data/src/core/lib/security/security_connector/local/local_security_connector.cc +10 -8
  115. data/src/core/lib/security/security_connector/security_connector.h +2 -2
  116. data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +4 -6
  117. data/src/core/lib/security/security_connector/ssl_utils.h +33 -0
  118. data/src/core/lib/security/transport/security_handshaker.cc +267 -300
  119. data/src/core/lib/security/transport/security_handshaker.h +11 -2
  120. data/src/core/lib/security/transport/server_auth_filter.cc +1 -0
  121. data/src/core/lib/surface/call.cc +5 -1
  122. data/src/core/lib/surface/channel_init.h +5 -0
  123. data/src/core/lib/surface/completion_queue.cc +4 -7
  124. data/src/core/lib/surface/init.cc +5 -3
  125. data/src/core/lib/surface/init_secure.cc +1 -1
  126. data/src/core/lib/surface/server.cc +19 -17
  127. data/src/core/lib/surface/version.cc +1 -1
  128. data/src/core/lib/transport/service_config.h +1 -0
  129. data/src/core/lib/transport/static_metadata.cc +279 -279
  130. data/src/core/lib/transport/transport.cc +5 -3
  131. data/src/core/tsi/ssl_transport_security.cc +10 -4
  132. data/src/ruby/ext/grpc/extconf.rb +12 -4
  133. data/src/ruby/ext/grpc/rb_call_credentials.c +8 -5
  134. data/src/ruby/ext/grpc/rb_channel.c +14 -10
  135. data/src/ruby/ext/grpc/rb_channel_credentials.c +8 -4
  136. data/src/ruby/ext/grpc/rb_compression_options.c +9 -7
  137. data/src/ruby/ext/grpc/rb_event_thread.c +2 -0
  138. data/src/ruby/ext/grpc/rb_grpc.c +22 -23
  139. data/src/ruby/ext/grpc/rb_grpc.h +4 -2
  140. data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +18 -0
  141. data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +27 -0
  142. data/src/ruby/ext/grpc/rb_server.c +8 -4
  143. data/src/ruby/lib/grpc/version.rb +1 -1
  144. metadata +45 -38
  145. data/src/core/ext/filters/client_channel/subchannel_index.cc +0 -248
  146. data/src/core/ext/filters/client_channel/subchannel_index.h +0 -76
  147. data/src/core/lib/channel/handshaker_factory.cc +0 -42
@@ -45,6 +45,7 @@
45
45
  #include "src/core/lib/gpr/spinlock.h"
46
46
  #include "src/core/lib/gpr/tls.h"
47
47
  #include "src/core/lib/gpr/useful.h"
48
+ #include "src/core/lib/gprpp/inlined_vector.h"
48
49
  #include "src/core/lib/gprpp/manual_constructor.h"
49
50
  #include "src/core/lib/gprpp/mutex_lock.h"
50
51
  #include "src/core/lib/iomgr/block_annotate.h"
@@ -78,18 +79,6 @@ typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
78
79
 
79
80
  typedef struct pollable pollable;
80
81
 
81
- typedef struct cached_fd {
82
- // Set to the grpc_fd's salt value. See 'salt' variable' in grpc_fd for more
83
- // details
84
- intptr_t salt;
85
-
86
- // The underlying fd
87
- int fd;
88
-
89
- // A recency time counter that helps to determine the LRU fd in the cache
90
- uint64_t last_used;
91
- } cached_fd;
92
-
93
82
  /// A pollable is something that can be polled: it has an epoll set to poll on,
94
83
  /// and a wakeup fd for kicks
95
84
  /// There are three broad types:
@@ -120,33 +109,6 @@ struct pollable {
120
109
  int event_cursor;
121
110
  int event_count;
122
111
  struct epoll_event events[MAX_EPOLL_EVENTS];
123
-
124
- // We may be calling pollable_add_fd() on the same (pollable, fd) multiple
125
- // times. To prevent pollable_add_fd() from making multiple sys calls to
126
- // epoll_ctl() to add the fd, we maintain a cache of what fds are already
127
- // present in the underlying epoll-set.
128
- //
129
- // Since this is not a correctness issue, we do not need to maintain all the
130
- // fds in the cache. Hence we just use an LRU cache of size 'MAX_FDS_IN_CACHE'
131
- //
132
- // NOTE: An ideal implementation of this should do the following:
133
- // 1) Add fds to the cache in pollable_add_fd() function (i.e whenever the fd
134
- // is added to the pollable's epoll set)
135
- // 2) Remove the fd from the cache whenever the fd is removed from the
136
- // underlying epoll set (i.e whenever fd_orphan() is called).
137
- //
138
- // Implementing (2) above (i.e removing fds from cache on fd_orphan) adds a
139
- // lot of complexity since an fd can be present in multiple pollables. So our
140
- // implementation ONLY DOES (1) and NOT (2).
141
- //
142
- // The cache_fd.salt variable helps here to maintain correctness (it serves as
143
- // an epoch that differentiates one grpc_fd from the other even though both of
144
- // them may have the same fd number)
145
- //
146
- // The following implements LRU-eviction cache of fds in this pollable
147
- cached_fd fd_cache[MAX_FDS_IN_CACHE];
148
- int fd_cache_size;
149
- uint64_t fd_cache_counter; // Recency timer tick counter
150
112
  };
151
113
 
152
114
  static const char* pollable_type_string(pollable_type t) {
@@ -189,37 +151,86 @@ static void pollable_unref(pollable* p, int line, const char* reason);
189
151
  * Fd Declarations
190
152
  */
191
153
 
192
- // Monotonically increasing Epoch counter that is assinged to each grpc_fd. See
193
- // the description of 'salt' variable in 'grpc_fd' for more details
194
- // TODO: (sreek/kpayson) gpr_atm is intptr_t which may not be wide-enough on
195
- // 32-bit systems. Change this to int_64 - atleast on 32-bit systems
196
- static gpr_atm g_fd_salt;
197
-
198
154
  struct grpc_fd {
199
- int fd;
155
+ grpc_fd(int fd, const char* name, bool track_err)
156
+ : fd(fd), track_err(track_err) {
157
+ gpr_mu_init(&orphan_mu);
158
+ gpr_mu_init(&pollable_mu);
159
+ read_closure.InitEvent();
160
+ write_closure.InitEvent();
161
+ error_closure.InitEvent();
162
+
163
+ char* fd_name;
164
+ gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
165
+ grpc_iomgr_register_object(&iomgr_object, fd_name);
166
+ #ifndef NDEBUG
167
+ if (grpc_trace_fd_refcount.enabled()) {
168
+ gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, this, fd_name);
169
+ }
170
+ #endif
171
+ gpr_free(fd_name);
172
+ }
173
+
174
+ // This is really the dtor, but the poller threads waking up from
175
+ // epoll_wait() may access the (read|write|error)_closure after destruction.
176
+ // Since the object will be added to the free pool, this behavior is
177
+ // not going to cause issues, except spurious events if the FD is reused
178
+ // while the race happens.
179
+ void destroy() {
180
+ grpc_iomgr_unregister_object(&iomgr_object);
181
+
182
+ POLLABLE_UNREF(pollable_obj, "fd_pollable");
183
+ pollset_fds.clear();
184
+ gpr_mu_destroy(&pollable_mu);
185
+ gpr_mu_destroy(&orphan_mu);
186
+
187
+ read_closure.DestroyEvent();
188
+ write_closure.DestroyEvent();
189
+ error_closure.DestroyEvent();
190
+
191
+ invalidate();
192
+ }
200
193
 
201
- // Since fd numbers can be reused (after old fds are closed), this serves as
202
- // an epoch that uniquely identifies this fd (i.e the pair (salt, fd) is
203
- // unique (until the salt counter (i.e g_fd_salt) overflows)
204
- intptr_t salt;
194
+ #ifndef NDEBUG
195
+ /* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
196
+ * hard-to-debug cases where fd fields are accessed even after calling
197
+ * fd_destroy(). The following invalidates fd fields to make catching such
198
+ * errors easier */
199
+ void invalidate() {
200
+ fd = -1;
201
+ gpr_atm_no_barrier_store(&refst, -1);
202
+ memset(&orphan_mu, -1, sizeof(orphan_mu));
203
+ memset(&pollable_mu, -1, sizeof(pollable_mu));
204
+ pollable_obj = nullptr;
205
+ on_done_closure = nullptr;
206
+ memset(&iomgr_object, -1, sizeof(iomgr_object));
207
+ track_err = false;
208
+ }
209
+ #else
210
+ void invalidate() {}
211
+ #endif
212
+
213
+ int fd;
205
214
 
206
215
  // refst format:
207
216
  // bit 0 : 1=Active / 0=Orphaned
208
217
  // bits 1-n : refcount
209
218
  // Ref/Unref by two to avoid altering the orphaned bit
210
- gpr_atm refst;
219
+ gpr_atm refst = 1;
211
220
 
212
221
  gpr_mu orphan_mu;
213
222
 
223
+ // Protects pollable_obj and pollset_fds.
214
224
  gpr_mu pollable_mu;
215
- pollable* pollable_obj;
225
+ grpc_core::InlinedVector<int, 1> pollset_fds; // Used in PO_MULTI.
226
+ pollable* pollable_obj = nullptr; // Used in PO_FD.
216
227
 
217
- grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
218
- grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
219
- grpc_core::ManualConstructor<grpc_core::LockfreeEvent> error_closure;
228
+ grpc_core::LockfreeEvent read_closure;
229
+ grpc_core::LockfreeEvent write_closure;
230
+ grpc_core::LockfreeEvent error_closure;
220
231
 
221
- struct grpc_fd* freelist_next;
222
- grpc_closure* on_done_closure;
232
+ struct grpc_fd* freelist_next = nullptr;
233
+ grpc_closure* on_done_closure = nullptr;
223
234
 
224
235
  grpc_iomgr_object iomgr_object;
225
236
 
@@ -258,6 +269,7 @@ struct grpc_pollset_worker {
258
269
  struct grpc_pollset {
259
270
  gpr_mu mu;
260
271
  gpr_atm worker_count;
272
+ gpr_atm active_pollable_type;
261
273
  pollable* active_pollable;
262
274
  bool kicked_without_poller;
263
275
  grpc_closure* shutdown_closure;
@@ -337,39 +349,10 @@ static void ref_by(grpc_fd* fd, int n) {
337
349
  GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
338
350
  }
339
351
 
340
- #ifndef NDEBUG
341
- #define INVALIDATE_FD(fd) invalidate_fd(fd)
342
- /* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
343
- * hard to cases where fd fields are accessed even after calling fd_destroy().
344
- * The following invalidates fd fields to make catching such errors easier */
345
- static void invalidate_fd(grpc_fd* fd) {
346
- fd->fd = -1;
347
- fd->salt = -1;
348
- gpr_atm_no_barrier_store(&fd->refst, -1);
349
- memset(&fd->orphan_mu, -1, sizeof(fd->orphan_mu));
350
- memset(&fd->pollable_mu, -1, sizeof(fd->pollable_mu));
351
- fd->pollable_obj = nullptr;
352
- fd->on_done_closure = nullptr;
353
- memset(&fd->iomgr_object, -1, sizeof(fd->iomgr_object));
354
- fd->track_err = false;
355
- }
356
- #else
357
- #define INVALIDATE_FD(fd)
358
- #endif
359
-
360
352
  /* Uninitialize and add to the freelist */
361
353
  static void fd_destroy(void* arg, grpc_error* error) {
362
354
  grpc_fd* fd = static_cast<grpc_fd*>(arg);
363
- grpc_iomgr_unregister_object(&fd->iomgr_object);
364
- POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
365
- gpr_mu_destroy(&fd->pollable_mu);
366
- gpr_mu_destroy(&fd->orphan_mu);
367
-
368
- fd->read_closure->DestroyEvent();
369
- fd->write_closure->DestroyEvent();
370
- fd->error_closure->DestroyEvent();
371
-
372
- INVALIDATE_FD(fd);
355
+ fd->destroy();
373
356
 
374
357
  /* Add the fd to the freelist */
375
358
  gpr_mu_lock(&fd_freelist_mu);
@@ -429,35 +412,9 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
429
412
 
430
413
  if (new_fd == nullptr) {
431
414
  new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
432
- new_fd->read_closure.Init();
433
- new_fd->write_closure.Init();
434
- new_fd->error_closure.Init();
435
- }
436
-
437
- new_fd->fd = fd;
438
- new_fd->salt = gpr_atm_no_barrier_fetch_add(&g_fd_salt, 1);
439
- gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
440
- gpr_mu_init(&new_fd->orphan_mu);
441
- gpr_mu_init(&new_fd->pollable_mu);
442
- new_fd->pollable_obj = nullptr;
443
- new_fd->read_closure->InitEvent();
444
- new_fd->write_closure->InitEvent();
445
- new_fd->error_closure->InitEvent();
446
- new_fd->freelist_next = nullptr;
447
- new_fd->on_done_closure = nullptr;
448
-
449
- char* fd_name;
450
- gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
451
- grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
452
- #ifndef NDEBUG
453
- if (grpc_trace_fd_refcount.enabled()) {
454
- gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
455
415
  }
456
- #endif
457
- gpr_free(fd_name);
458
416
 
459
- new_fd->track_err = track_err;
460
- return new_fd;
417
+ return new (new_fd) grpc_fd(fd, name, track_err);
461
418
  }
462
419
 
463
420
  static int fd_wrapped_fd(grpc_fd* fd) {
@@ -475,7 +432,6 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
475
432
  // true so that the pollable will no longer access its owner_fd field.
476
433
  gpr_mu_lock(&fd->pollable_mu);
477
434
  pollable* pollable_obj = fd->pollable_obj;
478
- gpr_mu_unlock(&fd->pollable_mu);
479
435
 
480
436
  if (pollable_obj) {
481
437
  gpr_mu_lock(&pollable_obj->owner_orphan_mu);
@@ -487,6 +443,19 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
487
443
  /* If release_fd is not NULL, we should be relinquishing control of the file
488
444
  descriptor fd->fd (but we still own the grpc_fd structure). */
489
445
  if (release_fd != nullptr) {
446
+ // Remove the FD from all epolls sets, before releasing it.
447
+ // Otherwise, we will receive epoll events after we release the FD.
448
+ epoll_event ev_fd;
449
+ memset(&ev_fd, 0, sizeof(ev_fd));
450
+ if (release_fd != nullptr) {
451
+ if (pollable_obj != nullptr) { // For PO_FD.
452
+ epoll_ctl(pollable_obj->epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
453
+ }
454
+ for (size_t i = 0; i < fd->pollset_fds.size(); ++i) { // For PO_MULTI.
455
+ const int epfd = fd->pollset_fds[i];
456
+ epoll_ctl(epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
457
+ }
458
+ }
490
459
  *release_fd = fd->fd;
491
460
  } else {
492
461
  close(fd->fd);
@@ -508,40 +477,58 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
508
477
  gpr_mu_unlock(&pollable_obj->owner_orphan_mu);
509
478
  }
510
479
 
480
+ gpr_mu_unlock(&fd->pollable_mu);
511
481
  gpr_mu_unlock(&fd->orphan_mu);
512
482
 
513
483
  UNREF_BY(fd, 2, reason); /* Drop the reference */
514
484
  }
515
485
 
516
486
  static bool fd_is_shutdown(grpc_fd* fd) {
517
- return fd->read_closure->IsShutdown();
487
+ return fd->read_closure.IsShutdown();
518
488
  }
519
489
 
520
490
  /* Might be called multiple times */
521
491
  static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
522
- if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
492
+ if (fd->read_closure.SetShutdown(GRPC_ERROR_REF(why))) {
523
493
  if (shutdown(fd->fd, SHUT_RDWR)) {
524
494
  if (errno != ENOTCONN) {
525
495
  gpr_log(GPR_ERROR, "Error shutting down fd %d. errno: %d",
526
496
  grpc_fd_wrapped_fd(fd), errno);
527
497
  }
528
498
  }
529
- fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
530
- fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
499
+ fd->write_closure.SetShutdown(GRPC_ERROR_REF(why));
500
+ fd->error_closure.SetShutdown(GRPC_ERROR_REF(why));
531
501
  }
532
502
  GRPC_ERROR_UNREF(why);
533
503
  }
534
504
 
535
505
  static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
536
- fd->read_closure->NotifyOn(closure);
506
+ fd->read_closure.NotifyOn(closure);
537
507
  }
538
508
 
539
509
  static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
540
- fd->write_closure->NotifyOn(closure);
510
+ fd->write_closure.NotifyOn(closure);
541
511
  }
542
512
 
543
513
  static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
544
- fd->error_closure->NotifyOn(closure);
514
+ fd->error_closure.NotifyOn(closure);
515
+ }
516
+
517
+ static bool fd_has_pollset(grpc_fd* fd, grpc_pollset* pollset) {
518
+ const int epfd = pollset->active_pollable->epfd;
519
+ grpc_core::MutexLock lock(&fd->pollable_mu);
520
+ for (size_t i = 0; i < fd->pollset_fds.size(); ++i) {
521
+ if (fd->pollset_fds[i] == epfd) {
522
+ return true;
523
+ }
524
+ }
525
+ return false;
526
+ }
527
+
528
+ static void fd_add_pollset(grpc_fd* fd, grpc_pollset* pollset) {
529
+ const int epfd = pollset->active_pollable->epfd;
530
+ grpc_core::MutexLock lock(&fd->pollable_mu);
531
+ fd->pollset_fds.push_back(epfd);
545
532
  }
546
533
 
547
534
  /*******************************************************************************
@@ -594,8 +581,6 @@ static grpc_error* pollable_create(pollable_type type, pollable** p) {
594
581
  (*p)->root_worker = nullptr;
595
582
  (*p)->event_cursor = 0;
596
583
  (*p)->event_count = 0;
597
- (*p)->fd_cache_size = 0;
598
- (*p)->fd_cache_counter = 0;
599
584
  return GRPC_ERROR_NONE;
600
585
  }
601
586
 
@@ -637,39 +622,6 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
637
622
  grpc_error* error = GRPC_ERROR_NONE;
638
623
  static const char* err_desc = "pollable_add_fd";
639
624
  const int epfd = p->epfd;
640
- gpr_mu_lock(&p->mu);
641
- p->fd_cache_counter++;
642
-
643
- // Handle the case of overflow for our cache counter by
644
- // reseting the recency-counter on all cache objects
645
- if (p->fd_cache_counter == 0) {
646
- for (int i = 0; i < p->fd_cache_size; i++) {
647
- p->fd_cache[i].last_used = 0;
648
- }
649
- }
650
-
651
- int lru_idx = 0;
652
- for (int i = 0; i < p->fd_cache_size; i++) {
653
- if (p->fd_cache[i].fd == fd->fd && p->fd_cache[i].salt == fd->salt) {
654
- GRPC_STATS_INC_POLLSET_FD_CACHE_HITS();
655
- p->fd_cache[i].last_used = p->fd_cache_counter;
656
- gpr_mu_unlock(&p->mu);
657
- return GRPC_ERROR_NONE;
658
- } else if (p->fd_cache[i].last_used < p->fd_cache[lru_idx].last_used) {
659
- lru_idx = i;
660
- }
661
- }
662
-
663
- // Add to cache
664
- if (p->fd_cache_size < MAX_FDS_IN_CACHE) {
665
- lru_idx = p->fd_cache_size;
666
- p->fd_cache_size++;
667
- }
668
- p->fd_cache[lru_idx].fd = fd->fd;
669
- p->fd_cache[lru_idx].salt = fd->salt;
670
- p->fd_cache[lru_idx].last_used = p->fd_cache_counter;
671
- gpr_mu_unlock(&p->mu);
672
-
673
625
  if (grpc_polling_trace.enabled()) {
674
626
  gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
675
627
  }
@@ -849,6 +801,7 @@ static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
849
801
  static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
850
802
  gpr_mu_init(&pollset->mu);
851
803
  gpr_atm_no_barrier_store(&pollset->worker_count, 0);
804
+ gpr_atm_no_barrier_store(&pollset->active_pollable_type, PO_EMPTY);
852
805
  pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
853
806
  pollset->kicked_without_poller = false;
854
807
  pollset->shutdown_closure = nullptr;
@@ -869,11 +822,11 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
869
822
  return static_cast<int>(delta);
870
823
  }
871
824
 
872
- static void fd_become_readable(grpc_fd* fd) { fd->read_closure->SetReady(); }
825
+ static void fd_become_readable(grpc_fd* fd) { fd->read_closure.SetReady(); }
873
826
 
874
- static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
827
+ static void fd_become_writable(grpc_fd* fd) { fd->write_closure.SetReady(); }
875
828
 
876
- static void fd_has_errors(grpc_fd* fd) { fd->error_closure->SetReady(); }
829
+ static void fd_has_errors(grpc_fd* fd) { fd->error_closure.SetReady(); }
877
830
 
878
831
  /* Get the pollable_obj attached to this fd. If none is attached, create a new
879
832
  * pollable object (of type PO_FD), attach it to the fd and return it
@@ -1283,6 +1236,8 @@ static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
1283
1236
  POLLABLE_UNREF(pollset->active_pollable, "pollset");
1284
1237
  pollset->active_pollable = po_at_start;
1285
1238
  } else {
1239
+ gpr_atm_rel_store(&pollset->active_pollable_type,
1240
+ pollset->active_pollable->type);
1286
1241
  POLLABLE_UNREF(po_at_start, "pollset_add_fd");
1287
1242
  }
1288
1243
  return error;
@@ -1329,6 +1284,8 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
1329
1284
  pollset->active_pollable = po_at_start;
1330
1285
  *pollable_obj = nullptr;
1331
1286
  } else {
1287
+ gpr_atm_rel_store(&pollset->active_pollable_type,
1288
+ pollset->active_pollable->type);
1332
1289
  *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
1333
1290
  POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
1334
1291
  }
@@ -1337,9 +1294,23 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
1337
1294
 
1338
1295
  static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
1339
1296
  GPR_TIMER_SCOPE("pollset_add_fd", 0);
1340
- gpr_mu_lock(&pollset->mu);
1297
+
1298
+ // We never transition from PO_MULTI to other modes (i.e., PO_FD or PO_EMOPTY)
1299
+ // and, thus, it is safe to simply store and check whether the FD has already
1300
+ // been added to the active pollable previously.
1301
+ if (gpr_atm_acq_load(&pollset->active_pollable_type) == PO_MULTI &&
1302
+ fd_has_pollset(fd, pollset)) {
1303
+ return;
1304
+ }
1305
+
1306
+ grpc_core::MutexLock lock(&pollset->mu);
1341
1307
  grpc_error* error = pollset_add_fd_locked(pollset, fd);
1342
- gpr_mu_unlock(&pollset->mu);
1308
+
1309
+ // If we are in PO_MULTI mode, we should update the pollsets of the FD.
1310
+ if (gpr_atm_no_barrier_load(&pollset->active_pollable_type) == PO_MULTI) {
1311
+ fd_add_pollset(fd, pollset);
1312
+ }
1313
+
1343
1314
  GRPC_LOG_IF_ERROR("pollset_add_fd", error);
1344
1315
  }
1345
1316
 
@@ -1604,6 +1575,8 @@ static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
1604
1575
  * Event engine binding
1605
1576
  */
1606
1577
 
1578
+ static bool is_any_background_poller_thread(void) { return false; }
1579
+
1607
1580
  static void shutdown_background_closure(void) {}
1608
1581
 
1609
1582
  static void shutdown_engine(void) {
@@ -1644,6 +1617,7 @@ static const grpc_event_engine_vtable vtable = {
1644
1617
  pollset_set_add_fd,
1645
1618
  pollset_set_del_fd,
1646
1619
 
1620
+ is_any_background_poller_thread,
1647
1621
  shutdown_background_closure,
1648
1622
  shutdown_engine,
1649
1623
  };
@@ -1782,6 +1782,8 @@ static void global_cv_fd_table_shutdown() {
1782
1782
  * event engine binding
1783
1783
  */
1784
1784
 
1785
+ static bool is_any_background_poller_thread(void) { return false; }
1786
+
1785
1787
  static void shutdown_background_closure(void) {}
1786
1788
 
1787
1789
  static void shutdown_engine(void) {
@@ -1828,6 +1830,7 @@ static const grpc_event_engine_vtable vtable = {
1828
1830
  pollset_set_add_fd,
1829
1831
  pollset_set_del_fd,
1830
1832
 
1833
+ is_any_background_poller_thread,
1831
1834
  shutdown_background_closure,
1832
1835
  shutdown_engine,
1833
1836
  };
@@ -399,6 +399,10 @@ void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
399
399
  g_event_engine->pollset_set_del_fd(pollset_set, fd);
400
400
  }
401
401
 
402
+ bool grpc_is_any_background_poller_thread(void) {
403
+ return g_event_engine->is_any_background_poller_thread();
404
+ }
405
+
402
406
  void grpc_shutdown_background_closure(void) {
403
407
  g_event_engine->shutdown_background_closure();
404
408
  }
@@ -80,6 +80,7 @@ typedef struct grpc_event_engine_vtable {
80
80
  void (*pollset_set_add_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
81
81
  void (*pollset_set_del_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
82
82
 
83
+ bool (*is_any_background_poller_thread)(void);
83
84
  void (*shutdown_background_closure)(void);
84
85
  void (*shutdown_engine)(void);
85
86
  } grpc_event_engine_vtable;
@@ -181,6 +182,9 @@ void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd);
181
182
  void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
182
183
  void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
183
184
 
185
+ /* Returns true if the caller is a worker thread for any background poller. */
186
+ bool grpc_is_any_background_poller_thread();
187
+
184
188
  /* Shut down all the closures registered in the background poller. */
185
189
  void grpc_shutdown_background_closure();
186
190
 
@@ -115,6 +115,7 @@ grpc_closure_scheduler* grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
115
115
 
116
116
  namespace grpc_core {
117
117
  GPR_TLS_CLASS_DEF(ExecCtx::exec_ctx_);
118
+ GPR_TLS_CLASS_DEF(ApplicationCallbackExecCtx::callback_exec_ctx_);
118
119
 
119
120
  // WARNING: for testing purposes only!
120
121
  void ExecCtx::TestOnlyGlobalInit(gpr_timespec new_val) {