grpc 1.18.0 → 1.19.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +301 -33
- data/include/grpc/grpc_security.h +195 -0
- data/include/grpc/impl/codegen/grpc_types.h +17 -1
- data/include/grpc/impl/codegen/port_platform.h +36 -0
- data/include/grpc/impl/codegen/slice.h +1 -1
- data/src/core/ext/filters/client_channel/channel_connectivity.cc +2 -0
- data/src/core/ext/filters/client_channel/client_channel.cc +74 -69
- data/src/core/ext/filters/client_channel/client_channel.h +2 -2
- data/src/core/ext/filters/client_channel/client_channel_channelz.cc +5 -6
- data/src/core/ext/filters/client_channel/client_channel_channelz.h +5 -4
- data/src/core/ext/filters/client_channel/client_channel_factory.cc +2 -2
- data/src/core/ext/filters/client_channel/client_channel_factory.h +4 -4
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +3 -3
- data/src/core/ext/filters/client_channel/global_subchannel_pool.cc +176 -0
- data/src/core/ext/filters/client_channel/global_subchannel_pool.h +68 -0
- data/src/core/ext/filters/client_channel/health/health_check_client.cc +10 -8
- data/src/core/ext/filters/client_channel/health/health_check_client.h +1 -1
- data/src/core/ext/filters/client_channel/http_connect_handshaker.cc +146 -156
- data/src/core/ext/filters/client_channel/lb_policy.cc +30 -1
- data/src/core/ext/filters/client_channel/lb_policy.h +29 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +28 -30
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +5 -8
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +5 -8
- data/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h +23 -24
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +80 -15
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +6 -1
- data/src/core/ext/filters/client_channel/lb_policy_registry.cc +2 -2
- data/src/core/ext/filters/client_channel/lb_policy_registry.h +1 -1
- data/src/core/ext/filters/client_channel/local_subchannel_pool.cc +96 -0
- data/src/core/ext/filters/client_channel/local_subchannel_pool.h +56 -0
- data/src/core/ext/filters/client_channel/parse_address.cc +24 -5
- data/src/core/ext/filters/client_channel/request_routing.cc +13 -3
- data/src/core/ext/filters/client_channel/request_routing.h +5 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +11 -6
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +2 -2
- data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +7 -35
- data/src/core/ext/filters/client_channel/subchannel.cc +698 -791
- data/src/core/ext/filters/client_channel/subchannel.h +213 -123
- data/src/core/ext/filters/client_channel/subchannel_pool_interface.cc +97 -0
- data/src/core/ext/filters/client_channel/subchannel_pool_interface.h +94 -0
- data/src/core/ext/filters/http/client_authority_filter.cc +5 -2
- data/src/core/ext/filters/max_age/max_age_filter.cc +1 -1
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +13 -12
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.cc +5 -7
- data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc +19 -27
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +18 -19
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +27 -6
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +3 -2
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/writing.cc +8 -5
- data/src/core/lib/channel/handshaker.cc +141 -214
- data/src/core/lib/channel/handshaker.h +110 -101
- data/src/core/lib/channel/handshaker_factory.h +11 -19
- data/src/core/lib/channel/handshaker_registry.cc +64 -52
- data/src/core/lib/channel/handshaker_registry.h +21 -16
- data/src/core/lib/gpr/log_posix.cc +2 -1
- data/src/core/lib/gpr/time.cc +8 -0
- data/src/core/lib/gpr/time_posix.cc +8 -2
- data/src/core/lib/gprpp/optional.h +47 -0
- data/src/core/lib/http/httpcli_security_connector.cc +13 -14
- data/src/core/lib/iomgr/buffer_list.cc +182 -24
- data/src/core/lib/iomgr/buffer_list.h +70 -8
- data/src/core/lib/iomgr/combiner.cc +11 -3
- data/src/core/lib/iomgr/error.cc +9 -5
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +3 -0
- data/src/core/lib/iomgr/ev_epollex_linux.cc +136 -162
- data/src/core/lib/iomgr/ev_poll_posix.cc +3 -0
- data/src/core/lib/iomgr/ev_posix.cc +4 -0
- data/src/core/lib/iomgr/ev_posix.h +4 -0
- data/src/core/lib/iomgr/exec_ctx.cc +1 -0
- data/src/core/lib/iomgr/exec_ctx.h +137 -8
- data/src/core/lib/iomgr/executor.cc +122 -87
- data/src/core/lib/iomgr/executor.h +53 -48
- data/src/core/lib/iomgr/fork_posix.cc +6 -4
- data/src/core/lib/iomgr/{network_status_tracker.cc → grpc_if_nametoindex.h} +8 -14
- data/src/core/lib/iomgr/grpc_if_nametoindex_posix.cc +42 -0
- data/src/core/lib/iomgr/{network_status_tracker.h → grpc_if_nametoindex_unsupported.cc} +15 -9
- data/src/core/lib/iomgr/internal_errqueue.h +105 -3
- data/src/core/lib/iomgr/iomgr.cc +6 -5
- data/src/core/lib/iomgr/iomgr.h +8 -0
- data/src/core/lib/iomgr/iomgr_custom.cc +6 -2
- data/src/core/lib/iomgr/iomgr_internal.cc +4 -0
- data/src/core/lib/iomgr/iomgr_internal.h +4 -0
- data/src/core/lib/iomgr/iomgr_posix.cc +10 -1
- data/src/core/lib/iomgr/iomgr_windows.cc +8 -1
- data/src/core/lib/iomgr/port.h +1 -0
- data/src/core/lib/iomgr/resolve_address_posix.cc +4 -3
- data/src/core/lib/iomgr/resolve_address_windows.cc +2 -1
- data/src/core/lib/iomgr/tcp_custom.cc +0 -4
- data/src/core/lib/iomgr/tcp_posix.cc +58 -44
- data/src/core/lib/iomgr/tcp_uv.cc +0 -1
- data/src/core/lib/iomgr/tcp_windows.cc +0 -4
- data/src/core/lib/iomgr/timer_manager.cc +8 -0
- data/src/core/lib/iomgr/udp_server.cc +6 -4
- data/src/core/lib/json/json.cc +1 -4
- data/src/core/lib/security/credentials/alts/alts_credentials.cc +1 -1
- data/src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc +2 -2
- data/src/core/lib/security/credentials/composite/composite_credentials.h +4 -0
- data/src/core/lib/security/credentials/credentials.h +9 -1
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +15 -2
- data/src/core/lib/security/credentials/google_default/google_default_credentials.h +2 -0
- data/src/core/lib/security/credentials/jwt/json_token.cc +1 -1
- data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +1 -0
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +3 -2
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +2 -2
- data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +1 -0
- data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc +192 -0
- data/src/core/lib/security/credentials/tls/grpc_tls_credentials_options.h +213 -0
- data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +10 -8
- data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +6 -10
- data/src/core/lib/security/security_connector/local/local_security_connector.cc +10 -8
- data/src/core/lib/security/security_connector/security_connector.h +2 -2
- data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +4 -6
- data/src/core/lib/security/security_connector/ssl_utils.h +33 -0
- data/src/core/lib/security/transport/security_handshaker.cc +267 -300
- data/src/core/lib/security/transport/security_handshaker.h +11 -2
- data/src/core/lib/security/transport/server_auth_filter.cc +1 -0
- data/src/core/lib/surface/call.cc +5 -1
- data/src/core/lib/surface/channel_init.h +5 -0
- data/src/core/lib/surface/completion_queue.cc +4 -7
- data/src/core/lib/surface/init.cc +5 -3
- data/src/core/lib/surface/init_secure.cc +1 -1
- data/src/core/lib/surface/server.cc +19 -17
- data/src/core/lib/surface/version.cc +1 -1
- data/src/core/lib/transport/service_config.h +1 -0
- data/src/core/lib/transport/static_metadata.cc +279 -279
- data/src/core/lib/transport/transport.cc +5 -3
- data/src/core/tsi/ssl_transport_security.cc +10 -4
- data/src/ruby/ext/grpc/extconf.rb +12 -4
- data/src/ruby/ext/grpc/rb_call_credentials.c +8 -5
- data/src/ruby/ext/grpc/rb_channel.c +14 -10
- data/src/ruby/ext/grpc/rb_channel_credentials.c +8 -4
- data/src/ruby/ext/grpc/rb_compression_options.c +9 -7
- data/src/ruby/ext/grpc/rb_event_thread.c +2 -0
- data/src/ruby/ext/grpc/rb_grpc.c +22 -23
- data/src/ruby/ext/grpc/rb_grpc.h +4 -2
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +18 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +27 -0
- data/src/ruby/ext/grpc/rb_server.c +8 -4
- data/src/ruby/lib/grpc/version.rb +1 -1
- metadata +46 -39
- data/src/core/ext/filters/client_channel/subchannel_index.cc +0 -248
- data/src/core/ext/filters/client_channel/subchannel_index.h +0 -76
- data/src/core/lib/channel/handshaker_factory.cc +0 -42
@@ -29,6 +29,7 @@
|
|
29
29
|
|
30
30
|
#include "src/core/lib/debug/stats.h"
|
31
31
|
#include "src/core/lib/iomgr/executor.h"
|
32
|
+
#include "src/core/lib/iomgr/iomgr.h"
|
32
33
|
#include "src/core/lib/profiling/timers.h"
|
33
34
|
|
34
35
|
grpc_core::DebugOnlyTraceFlag grpc_combiner_trace(false, "combiner");
|
@@ -82,8 +83,9 @@ grpc_combiner* grpc_combiner_create(void) {
|
|
82
83
|
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
|
83
84
|
gpr_mpscq_init(&lock->queue);
|
84
85
|
grpc_closure_list_init(&lock->final_list);
|
85
|
-
GRPC_CLOSURE_INIT(
|
86
|
-
|
86
|
+
GRPC_CLOSURE_INIT(
|
87
|
+
&lock->offload, offload, lock,
|
88
|
+
grpc_core::Executor::Scheduler(grpc_core::ExecutorJobType::SHORT));
|
87
89
|
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock));
|
88
90
|
return lock;
|
89
91
|
}
|
@@ -228,8 +230,14 @@ bool grpc_combiner_continue_exec_ctx() {
|
|
228
230
|
grpc_core::ExecCtx::Get()->IsReadyToFinish(),
|
229
231
|
lock->time_to_execute_final_list));
|
230
232
|
|
233
|
+
// offload only if all the following conditions are true:
|
234
|
+
// 1. the combiner is contended and has more than one closure to execute
|
235
|
+
// 2. the current execution context needs to finish as soon as possible
|
236
|
+
// 3. the DEFAULT executor is threaded
|
237
|
+
// 4. the current thread is not a worker for any background poller
|
231
238
|
if (contended && grpc_core::ExecCtx::Get()->IsReadyToFinish() &&
|
232
|
-
|
239
|
+
grpc_core::Executor::IsThreadedDefault() &&
|
240
|
+
!grpc_iomgr_is_any_background_poller_thread()) {
|
233
241
|
GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
|
234
242
|
// this execution context wants to move on: schedule remaining work to be
|
235
243
|
// picked up on the executor
|
data/src/core/lib/iomgr/error.cc
CHANGED
@@ -303,11 +303,15 @@ static void internal_add_error(grpc_error** err, grpc_error* new_err) {
|
|
303
303
|
// It is very common to include and extra int and string in an error
|
304
304
|
#define SURPLUS_CAPACITY (2 * SLOTS_PER_INT + SLOTS_PER_TIME)
|
305
305
|
|
306
|
-
static
|
306
|
+
static gpr_atm g_error_creation_allowed = true;
|
307
307
|
|
308
|
-
void grpc_disable_error_creation() {
|
308
|
+
void grpc_disable_error_creation() {
|
309
|
+
gpr_atm_no_barrier_store(&g_error_creation_allowed, false);
|
310
|
+
}
|
309
311
|
|
310
|
-
void grpc_enable_error_creation() {
|
312
|
+
void grpc_enable_error_creation() {
|
313
|
+
gpr_atm_no_barrier_store(&g_error_creation_allowed, true);
|
314
|
+
}
|
311
315
|
|
312
316
|
grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
|
313
317
|
grpc_error** referencing,
|
@@ -323,7 +327,7 @@ grpc_error* grpc_error_create(const char* file, int line, grpc_slice desc,
|
|
323
327
|
return GRPC_ERROR_OOM;
|
324
328
|
}
|
325
329
|
#ifndef NDEBUG
|
326
|
-
if (!g_error_creation_allowed) {
|
330
|
+
if (!gpr_atm_no_barrier_load(&g_error_creation_allowed)) {
|
327
331
|
gpr_log(GPR_ERROR,
|
328
332
|
"Error creation occurred when error creation was disabled [%s:%d]",
|
329
333
|
file, line);
|
@@ -765,7 +769,7 @@ grpc_error* grpc_os_error(const char* file, int line, int err,
|
|
765
769
|
grpc_error_set_str(
|
766
770
|
grpc_error_set_int(
|
767
771
|
grpc_error_create(file, line,
|
768
|
-
grpc_slice_from_static_string(
|
772
|
+
grpc_slice_from_static_string(strerror(err)),
|
769
773
|
nullptr, 0),
|
770
774
|
GRPC_ERROR_INT_ERRNO, err),
|
771
775
|
GRPC_ERROR_STR_OS_ERROR,
|
@@ -1242,6 +1242,8 @@ static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
|
|
1242
1242
|
* Event engine binding
|
1243
1243
|
*/
|
1244
1244
|
|
1245
|
+
static bool is_any_background_poller_thread(void) { return false; }
|
1246
|
+
|
1245
1247
|
static void shutdown_background_closure(void) {}
|
1246
1248
|
|
1247
1249
|
static void shutdown_engine(void) {
|
@@ -1287,6 +1289,7 @@ static const grpc_event_engine_vtable vtable = {
|
|
1287
1289
|
pollset_set_add_fd,
|
1288
1290
|
pollset_set_del_fd,
|
1289
1291
|
|
1292
|
+
is_any_background_poller_thread,
|
1290
1293
|
shutdown_background_closure,
|
1291
1294
|
shutdown_engine,
|
1292
1295
|
};
|
@@ -45,6 +45,7 @@
|
|
45
45
|
#include "src/core/lib/gpr/spinlock.h"
|
46
46
|
#include "src/core/lib/gpr/tls.h"
|
47
47
|
#include "src/core/lib/gpr/useful.h"
|
48
|
+
#include "src/core/lib/gprpp/inlined_vector.h"
|
48
49
|
#include "src/core/lib/gprpp/manual_constructor.h"
|
49
50
|
#include "src/core/lib/gprpp/mutex_lock.h"
|
50
51
|
#include "src/core/lib/iomgr/block_annotate.h"
|
@@ -78,18 +79,6 @@ typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
|
|
78
79
|
|
79
80
|
typedef struct pollable pollable;
|
80
81
|
|
81
|
-
typedef struct cached_fd {
|
82
|
-
// Set to the grpc_fd's salt value. See 'salt' variable' in grpc_fd for more
|
83
|
-
// details
|
84
|
-
intptr_t salt;
|
85
|
-
|
86
|
-
// The underlying fd
|
87
|
-
int fd;
|
88
|
-
|
89
|
-
// A recency time counter that helps to determine the LRU fd in the cache
|
90
|
-
uint64_t last_used;
|
91
|
-
} cached_fd;
|
92
|
-
|
93
82
|
/// A pollable is something that can be polled: it has an epoll set to poll on,
|
94
83
|
/// and a wakeup fd for kicks
|
95
84
|
/// There are three broad types:
|
@@ -120,33 +109,6 @@ struct pollable {
|
|
120
109
|
int event_cursor;
|
121
110
|
int event_count;
|
122
111
|
struct epoll_event events[MAX_EPOLL_EVENTS];
|
123
|
-
|
124
|
-
// We may be calling pollable_add_fd() on the same (pollable, fd) multiple
|
125
|
-
// times. To prevent pollable_add_fd() from making multiple sys calls to
|
126
|
-
// epoll_ctl() to add the fd, we maintain a cache of what fds are already
|
127
|
-
// present in the underlying epoll-set.
|
128
|
-
//
|
129
|
-
// Since this is not a correctness issue, we do not need to maintain all the
|
130
|
-
// fds in the cache. Hence we just use an LRU cache of size 'MAX_FDS_IN_CACHE'
|
131
|
-
//
|
132
|
-
// NOTE: An ideal implementation of this should do the following:
|
133
|
-
// 1) Add fds to the cache in pollable_add_fd() function (i.e whenever the fd
|
134
|
-
// is added to the pollable's epoll set)
|
135
|
-
// 2) Remove the fd from the cache whenever the fd is removed from the
|
136
|
-
// underlying epoll set (i.e whenever fd_orphan() is called).
|
137
|
-
//
|
138
|
-
// Implementing (2) above (i.e removing fds from cache on fd_orphan) adds a
|
139
|
-
// lot of complexity since an fd can be present in multiple pollables. So our
|
140
|
-
// implementation ONLY DOES (1) and NOT (2).
|
141
|
-
//
|
142
|
-
// The cache_fd.salt variable helps here to maintain correctness (it serves as
|
143
|
-
// an epoch that differentiates one grpc_fd from the other even though both of
|
144
|
-
// them may have the same fd number)
|
145
|
-
//
|
146
|
-
// The following implements LRU-eviction cache of fds in this pollable
|
147
|
-
cached_fd fd_cache[MAX_FDS_IN_CACHE];
|
148
|
-
int fd_cache_size;
|
149
|
-
uint64_t fd_cache_counter; // Recency timer tick counter
|
150
112
|
};
|
151
113
|
|
152
114
|
static const char* pollable_type_string(pollable_type t) {
|
@@ -189,37 +151,86 @@ static void pollable_unref(pollable* p, int line, const char* reason);
|
|
189
151
|
* Fd Declarations
|
190
152
|
*/
|
191
153
|
|
192
|
-
// Monotonically increasing Epoch counter that is assinged to each grpc_fd. See
|
193
|
-
// the description of 'salt' variable in 'grpc_fd' for more details
|
194
|
-
// TODO: (sreek/kpayson) gpr_atm is intptr_t which may not be wide-enough on
|
195
|
-
// 32-bit systems. Change this to int_64 - atleast on 32-bit systems
|
196
|
-
static gpr_atm g_fd_salt;
|
197
|
-
|
198
154
|
struct grpc_fd {
|
199
|
-
int fd
|
155
|
+
grpc_fd(int fd, const char* name, bool track_err)
|
156
|
+
: fd(fd), track_err(track_err) {
|
157
|
+
gpr_mu_init(&orphan_mu);
|
158
|
+
gpr_mu_init(&pollable_mu);
|
159
|
+
read_closure.InitEvent();
|
160
|
+
write_closure.InitEvent();
|
161
|
+
error_closure.InitEvent();
|
162
|
+
|
163
|
+
char* fd_name;
|
164
|
+
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
|
165
|
+
grpc_iomgr_register_object(&iomgr_object, fd_name);
|
166
|
+
#ifndef NDEBUG
|
167
|
+
if (grpc_trace_fd_refcount.enabled()) {
|
168
|
+
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, this, fd_name);
|
169
|
+
}
|
170
|
+
#endif
|
171
|
+
gpr_free(fd_name);
|
172
|
+
}
|
173
|
+
|
174
|
+
// This is really the dtor, but the poller threads waking up from
|
175
|
+
// epoll_wait() may access the (read|write|error)_closure after destruction.
|
176
|
+
// Since the object will be added to the free pool, this behavior is
|
177
|
+
// not going to cause issues, except spurious events if the FD is reused
|
178
|
+
// while the race happens.
|
179
|
+
void destroy() {
|
180
|
+
grpc_iomgr_unregister_object(&iomgr_object);
|
181
|
+
|
182
|
+
POLLABLE_UNREF(pollable_obj, "fd_pollable");
|
183
|
+
pollset_fds.clear();
|
184
|
+
gpr_mu_destroy(&pollable_mu);
|
185
|
+
gpr_mu_destroy(&orphan_mu);
|
186
|
+
|
187
|
+
read_closure.DestroyEvent();
|
188
|
+
write_closure.DestroyEvent();
|
189
|
+
error_closure.DestroyEvent();
|
190
|
+
|
191
|
+
invalidate();
|
192
|
+
}
|
200
193
|
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
194
|
+
#ifndef NDEBUG
|
195
|
+
/* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
|
196
|
+
* hard-to-debug cases where fd fields are accessed even after calling
|
197
|
+
* fd_destroy(). The following invalidates fd fields to make catching such
|
198
|
+
* errors easier */
|
199
|
+
void invalidate() {
|
200
|
+
fd = -1;
|
201
|
+
gpr_atm_no_barrier_store(&refst, -1);
|
202
|
+
memset(&orphan_mu, -1, sizeof(orphan_mu));
|
203
|
+
memset(&pollable_mu, -1, sizeof(pollable_mu));
|
204
|
+
pollable_obj = nullptr;
|
205
|
+
on_done_closure = nullptr;
|
206
|
+
memset(&iomgr_object, -1, sizeof(iomgr_object));
|
207
|
+
track_err = false;
|
208
|
+
}
|
209
|
+
#else
|
210
|
+
void invalidate() {}
|
211
|
+
#endif
|
212
|
+
|
213
|
+
int fd;
|
205
214
|
|
206
215
|
// refst format:
|
207
216
|
// bit 0 : 1=Active / 0=Orphaned
|
208
217
|
// bits 1-n : refcount
|
209
218
|
// Ref/Unref by two to avoid altering the orphaned bit
|
210
|
-
gpr_atm refst;
|
219
|
+
gpr_atm refst = 1;
|
211
220
|
|
212
221
|
gpr_mu orphan_mu;
|
213
222
|
|
223
|
+
// Protects pollable_obj and pollset_fds.
|
214
224
|
gpr_mu pollable_mu;
|
215
|
-
|
225
|
+
grpc_core::InlinedVector<int, 1> pollset_fds; // Used in PO_MULTI.
|
226
|
+
pollable* pollable_obj = nullptr; // Used in PO_FD.
|
216
227
|
|
217
|
-
grpc_core::
|
218
|
-
grpc_core::
|
219
|
-
grpc_core::
|
228
|
+
grpc_core::LockfreeEvent read_closure;
|
229
|
+
grpc_core::LockfreeEvent write_closure;
|
230
|
+
grpc_core::LockfreeEvent error_closure;
|
220
231
|
|
221
|
-
struct grpc_fd* freelist_next;
|
222
|
-
grpc_closure* on_done_closure;
|
232
|
+
struct grpc_fd* freelist_next = nullptr;
|
233
|
+
grpc_closure* on_done_closure = nullptr;
|
223
234
|
|
224
235
|
grpc_iomgr_object iomgr_object;
|
225
236
|
|
@@ -258,6 +269,7 @@ struct grpc_pollset_worker {
|
|
258
269
|
struct grpc_pollset {
|
259
270
|
gpr_mu mu;
|
260
271
|
gpr_atm worker_count;
|
272
|
+
gpr_atm active_pollable_type;
|
261
273
|
pollable* active_pollable;
|
262
274
|
bool kicked_without_poller;
|
263
275
|
grpc_closure* shutdown_closure;
|
@@ -337,39 +349,10 @@ static void ref_by(grpc_fd* fd, int n) {
|
|
337
349
|
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
|
338
350
|
}
|
339
351
|
|
340
|
-
#ifndef NDEBUG
|
341
|
-
#define INVALIDATE_FD(fd) invalidate_fd(fd)
|
342
|
-
/* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
|
343
|
-
* hard to cases where fd fields are accessed even after calling fd_destroy().
|
344
|
-
* The following invalidates fd fields to make catching such errors easier */
|
345
|
-
static void invalidate_fd(grpc_fd* fd) {
|
346
|
-
fd->fd = -1;
|
347
|
-
fd->salt = -1;
|
348
|
-
gpr_atm_no_barrier_store(&fd->refst, -1);
|
349
|
-
memset(&fd->orphan_mu, -1, sizeof(fd->orphan_mu));
|
350
|
-
memset(&fd->pollable_mu, -1, sizeof(fd->pollable_mu));
|
351
|
-
fd->pollable_obj = nullptr;
|
352
|
-
fd->on_done_closure = nullptr;
|
353
|
-
memset(&fd->iomgr_object, -1, sizeof(fd->iomgr_object));
|
354
|
-
fd->track_err = false;
|
355
|
-
}
|
356
|
-
#else
|
357
|
-
#define INVALIDATE_FD(fd)
|
358
|
-
#endif
|
359
|
-
|
360
352
|
/* Uninitialize and add to the freelist */
|
361
353
|
static void fd_destroy(void* arg, grpc_error* error) {
|
362
354
|
grpc_fd* fd = static_cast<grpc_fd*>(arg);
|
363
|
-
|
364
|
-
POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
|
365
|
-
gpr_mu_destroy(&fd->pollable_mu);
|
366
|
-
gpr_mu_destroy(&fd->orphan_mu);
|
367
|
-
|
368
|
-
fd->read_closure->DestroyEvent();
|
369
|
-
fd->write_closure->DestroyEvent();
|
370
|
-
fd->error_closure->DestroyEvent();
|
371
|
-
|
372
|
-
INVALIDATE_FD(fd);
|
355
|
+
fd->destroy();
|
373
356
|
|
374
357
|
/* Add the fd to the freelist */
|
375
358
|
gpr_mu_lock(&fd_freelist_mu);
|
@@ -429,35 +412,9 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
|
|
429
412
|
|
430
413
|
if (new_fd == nullptr) {
|
431
414
|
new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
|
432
|
-
new_fd->read_closure.Init();
|
433
|
-
new_fd->write_closure.Init();
|
434
|
-
new_fd->error_closure.Init();
|
435
|
-
}
|
436
|
-
|
437
|
-
new_fd->fd = fd;
|
438
|
-
new_fd->salt = gpr_atm_no_barrier_fetch_add(&g_fd_salt, 1);
|
439
|
-
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
|
440
|
-
gpr_mu_init(&new_fd->orphan_mu);
|
441
|
-
gpr_mu_init(&new_fd->pollable_mu);
|
442
|
-
new_fd->pollable_obj = nullptr;
|
443
|
-
new_fd->read_closure->InitEvent();
|
444
|
-
new_fd->write_closure->InitEvent();
|
445
|
-
new_fd->error_closure->InitEvent();
|
446
|
-
new_fd->freelist_next = nullptr;
|
447
|
-
new_fd->on_done_closure = nullptr;
|
448
|
-
|
449
|
-
char* fd_name;
|
450
|
-
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
|
451
|
-
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
|
452
|
-
#ifndef NDEBUG
|
453
|
-
if (grpc_trace_fd_refcount.enabled()) {
|
454
|
-
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
|
455
415
|
}
|
456
|
-
#endif
|
457
|
-
gpr_free(fd_name);
|
458
416
|
|
459
|
-
new_fd
|
460
|
-
return new_fd;
|
417
|
+
return new (new_fd) grpc_fd(fd, name, track_err);
|
461
418
|
}
|
462
419
|
|
463
420
|
static int fd_wrapped_fd(grpc_fd* fd) {
|
@@ -475,7 +432,6 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
|
475
432
|
// true so that the pollable will no longer access its owner_fd field.
|
476
433
|
gpr_mu_lock(&fd->pollable_mu);
|
477
434
|
pollable* pollable_obj = fd->pollable_obj;
|
478
|
-
gpr_mu_unlock(&fd->pollable_mu);
|
479
435
|
|
480
436
|
if (pollable_obj) {
|
481
437
|
gpr_mu_lock(&pollable_obj->owner_orphan_mu);
|
@@ -487,6 +443,19 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
|
487
443
|
/* If release_fd is not NULL, we should be relinquishing control of the file
|
488
444
|
descriptor fd->fd (but we still own the grpc_fd structure). */
|
489
445
|
if (release_fd != nullptr) {
|
446
|
+
// Remove the FD from all epolls sets, before releasing it.
|
447
|
+
// Otherwise, we will receive epoll events after we release the FD.
|
448
|
+
epoll_event ev_fd;
|
449
|
+
memset(&ev_fd, 0, sizeof(ev_fd));
|
450
|
+
if (release_fd != nullptr) {
|
451
|
+
if (pollable_obj != nullptr) { // For PO_FD.
|
452
|
+
epoll_ctl(pollable_obj->epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
|
453
|
+
}
|
454
|
+
for (size_t i = 0; i < fd->pollset_fds.size(); ++i) { // For PO_MULTI.
|
455
|
+
const int epfd = fd->pollset_fds[i];
|
456
|
+
epoll_ctl(epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
|
457
|
+
}
|
458
|
+
}
|
490
459
|
*release_fd = fd->fd;
|
491
460
|
} else {
|
492
461
|
close(fd->fd);
|
@@ -508,40 +477,58 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
|
508
477
|
gpr_mu_unlock(&pollable_obj->owner_orphan_mu);
|
509
478
|
}
|
510
479
|
|
480
|
+
gpr_mu_unlock(&fd->pollable_mu);
|
511
481
|
gpr_mu_unlock(&fd->orphan_mu);
|
512
482
|
|
513
483
|
UNREF_BY(fd, 2, reason); /* Drop the reference */
|
514
484
|
}
|
515
485
|
|
516
486
|
static bool fd_is_shutdown(grpc_fd* fd) {
|
517
|
-
return fd->read_closure
|
487
|
+
return fd->read_closure.IsShutdown();
|
518
488
|
}
|
519
489
|
|
520
490
|
/* Might be called multiple times */
|
521
491
|
static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
|
522
|
-
if (fd->read_closure
|
492
|
+
if (fd->read_closure.SetShutdown(GRPC_ERROR_REF(why))) {
|
523
493
|
if (shutdown(fd->fd, SHUT_RDWR)) {
|
524
494
|
if (errno != ENOTCONN) {
|
525
495
|
gpr_log(GPR_ERROR, "Error shutting down fd %d. errno: %d",
|
526
496
|
grpc_fd_wrapped_fd(fd), errno);
|
527
497
|
}
|
528
498
|
}
|
529
|
-
fd->write_closure
|
530
|
-
fd->error_closure
|
499
|
+
fd->write_closure.SetShutdown(GRPC_ERROR_REF(why));
|
500
|
+
fd->error_closure.SetShutdown(GRPC_ERROR_REF(why));
|
531
501
|
}
|
532
502
|
GRPC_ERROR_UNREF(why);
|
533
503
|
}
|
534
504
|
|
535
505
|
static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
|
536
|
-
fd->read_closure
|
506
|
+
fd->read_closure.NotifyOn(closure);
|
537
507
|
}
|
538
508
|
|
539
509
|
static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
|
540
|
-
fd->write_closure
|
510
|
+
fd->write_closure.NotifyOn(closure);
|
541
511
|
}
|
542
512
|
|
543
513
|
static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
|
544
|
-
fd->error_closure
|
514
|
+
fd->error_closure.NotifyOn(closure);
|
515
|
+
}
|
516
|
+
|
517
|
+
static bool fd_has_pollset(grpc_fd* fd, grpc_pollset* pollset) {
|
518
|
+
const int epfd = pollset->active_pollable->epfd;
|
519
|
+
grpc_core::MutexLock lock(&fd->pollable_mu);
|
520
|
+
for (size_t i = 0; i < fd->pollset_fds.size(); ++i) {
|
521
|
+
if (fd->pollset_fds[i] == epfd) {
|
522
|
+
return true;
|
523
|
+
}
|
524
|
+
}
|
525
|
+
return false;
|
526
|
+
}
|
527
|
+
|
528
|
+
static void fd_add_pollset(grpc_fd* fd, grpc_pollset* pollset) {
|
529
|
+
const int epfd = pollset->active_pollable->epfd;
|
530
|
+
grpc_core::MutexLock lock(&fd->pollable_mu);
|
531
|
+
fd->pollset_fds.push_back(epfd);
|
545
532
|
}
|
546
533
|
|
547
534
|
/*******************************************************************************
|
@@ -594,8 +581,6 @@ static grpc_error* pollable_create(pollable_type type, pollable** p) {
|
|
594
581
|
(*p)->root_worker = nullptr;
|
595
582
|
(*p)->event_cursor = 0;
|
596
583
|
(*p)->event_count = 0;
|
597
|
-
(*p)->fd_cache_size = 0;
|
598
|
-
(*p)->fd_cache_counter = 0;
|
599
584
|
return GRPC_ERROR_NONE;
|
600
585
|
}
|
601
586
|
|
@@ -637,39 +622,6 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
|
|
637
622
|
grpc_error* error = GRPC_ERROR_NONE;
|
638
623
|
static const char* err_desc = "pollable_add_fd";
|
639
624
|
const int epfd = p->epfd;
|
640
|
-
gpr_mu_lock(&p->mu);
|
641
|
-
p->fd_cache_counter++;
|
642
|
-
|
643
|
-
// Handle the case of overflow for our cache counter by
|
644
|
-
// reseting the recency-counter on all cache objects
|
645
|
-
if (p->fd_cache_counter == 0) {
|
646
|
-
for (int i = 0; i < p->fd_cache_size; i++) {
|
647
|
-
p->fd_cache[i].last_used = 0;
|
648
|
-
}
|
649
|
-
}
|
650
|
-
|
651
|
-
int lru_idx = 0;
|
652
|
-
for (int i = 0; i < p->fd_cache_size; i++) {
|
653
|
-
if (p->fd_cache[i].fd == fd->fd && p->fd_cache[i].salt == fd->salt) {
|
654
|
-
GRPC_STATS_INC_POLLSET_FD_CACHE_HITS();
|
655
|
-
p->fd_cache[i].last_used = p->fd_cache_counter;
|
656
|
-
gpr_mu_unlock(&p->mu);
|
657
|
-
return GRPC_ERROR_NONE;
|
658
|
-
} else if (p->fd_cache[i].last_used < p->fd_cache[lru_idx].last_used) {
|
659
|
-
lru_idx = i;
|
660
|
-
}
|
661
|
-
}
|
662
|
-
|
663
|
-
// Add to cache
|
664
|
-
if (p->fd_cache_size < MAX_FDS_IN_CACHE) {
|
665
|
-
lru_idx = p->fd_cache_size;
|
666
|
-
p->fd_cache_size++;
|
667
|
-
}
|
668
|
-
p->fd_cache[lru_idx].fd = fd->fd;
|
669
|
-
p->fd_cache[lru_idx].salt = fd->salt;
|
670
|
-
p->fd_cache[lru_idx].last_used = p->fd_cache_counter;
|
671
|
-
gpr_mu_unlock(&p->mu);
|
672
|
-
|
673
625
|
if (grpc_polling_trace.enabled()) {
|
674
626
|
gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
|
675
627
|
}
|
@@ -849,6 +801,7 @@ static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
|
|
849
801
|
static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
|
850
802
|
gpr_mu_init(&pollset->mu);
|
851
803
|
gpr_atm_no_barrier_store(&pollset->worker_count, 0);
|
804
|
+
gpr_atm_no_barrier_store(&pollset->active_pollable_type, PO_EMPTY);
|
852
805
|
pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
|
853
806
|
pollset->kicked_without_poller = false;
|
854
807
|
pollset->shutdown_closure = nullptr;
|
@@ -869,11 +822,11 @@ static int poll_deadline_to_millis_timeout(grpc_millis millis) {
|
|
869
822
|
return static_cast<int>(delta);
|
870
823
|
}
|
871
824
|
|
872
|
-
static void fd_become_readable(grpc_fd* fd) { fd->read_closure
|
825
|
+
static void fd_become_readable(grpc_fd* fd) { fd->read_closure.SetReady(); }
|
873
826
|
|
874
|
-
static void fd_become_writable(grpc_fd* fd) { fd->write_closure
|
827
|
+
static void fd_become_writable(grpc_fd* fd) { fd->write_closure.SetReady(); }
|
875
828
|
|
876
|
-
static void fd_has_errors(grpc_fd* fd) { fd->error_closure
|
829
|
+
static void fd_has_errors(grpc_fd* fd) { fd->error_closure.SetReady(); }
|
877
830
|
|
878
831
|
/* Get the pollable_obj attached to this fd. If none is attached, create a new
|
879
832
|
* pollable object (of type PO_FD), attach it to the fd and return it
|
@@ -1283,6 +1236,8 @@ static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
|
|
1283
1236
|
POLLABLE_UNREF(pollset->active_pollable, "pollset");
|
1284
1237
|
pollset->active_pollable = po_at_start;
|
1285
1238
|
} else {
|
1239
|
+
gpr_atm_rel_store(&pollset->active_pollable_type,
|
1240
|
+
pollset->active_pollable->type);
|
1286
1241
|
POLLABLE_UNREF(po_at_start, "pollset_add_fd");
|
1287
1242
|
}
|
1288
1243
|
return error;
|
@@ -1329,6 +1284,8 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
|
|
1329
1284
|
pollset->active_pollable = po_at_start;
|
1330
1285
|
*pollable_obj = nullptr;
|
1331
1286
|
} else {
|
1287
|
+
gpr_atm_rel_store(&pollset->active_pollable_type,
|
1288
|
+
pollset->active_pollable->type);
|
1332
1289
|
*pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
|
1333
1290
|
POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
|
1334
1291
|
}
|
@@ -1337,9 +1294,23 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
|
|
1337
1294
|
|
1338
1295
|
static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
|
1339
1296
|
GPR_TIMER_SCOPE("pollset_add_fd", 0);
|
1340
|
-
|
1297
|
+
|
1298
|
+
// We never transition from PO_MULTI to other modes (i.e., PO_FD or PO_EMOPTY)
|
1299
|
+
// and, thus, it is safe to simply store and check whether the FD has already
|
1300
|
+
// been added to the active pollable previously.
|
1301
|
+
if (gpr_atm_acq_load(&pollset->active_pollable_type) == PO_MULTI &&
|
1302
|
+
fd_has_pollset(fd, pollset)) {
|
1303
|
+
return;
|
1304
|
+
}
|
1305
|
+
|
1306
|
+
grpc_core::MutexLock lock(&pollset->mu);
|
1341
1307
|
grpc_error* error = pollset_add_fd_locked(pollset, fd);
|
1342
|
-
|
1308
|
+
|
1309
|
+
// If we are in PO_MULTI mode, we should update the pollsets of the FD.
|
1310
|
+
if (gpr_atm_no_barrier_load(&pollset->active_pollable_type) == PO_MULTI) {
|
1311
|
+
fd_add_pollset(fd, pollset);
|
1312
|
+
}
|
1313
|
+
|
1343
1314
|
GRPC_LOG_IF_ERROR("pollset_add_fd", error);
|
1344
1315
|
}
|
1345
1316
|
|
@@ -1604,6 +1575,8 @@ static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
|
|
1604
1575
|
* Event engine binding
|
1605
1576
|
*/
|
1606
1577
|
|
1578
|
+
static bool is_any_background_poller_thread(void) { return false; }
|
1579
|
+
|
1607
1580
|
static void shutdown_background_closure(void) {}
|
1608
1581
|
|
1609
1582
|
static void shutdown_engine(void) {
|
@@ -1644,6 +1617,7 @@ static const grpc_event_engine_vtable vtable = {
|
|
1644
1617
|
pollset_set_add_fd,
|
1645
1618
|
pollset_set_del_fd,
|
1646
1619
|
|
1620
|
+
is_any_background_poller_thread,
|
1647
1621
|
shutdown_background_closure,
|
1648
1622
|
shutdown_engine,
|
1649
1623
|
};
|