grpc 1.13.0 → 1.14.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +403 -153
- data/include/grpc/grpc.h +0 -8
- data/include/grpc/grpc_security.h +59 -2
- data/include/grpc/impl/codegen/grpc_types.h +8 -2
- data/include/grpc/impl/codegen/log.h +112 -0
- data/include/grpc/module.modulemap +2 -0
- data/include/grpc/support/log.h +2 -88
- data/include/grpc/support/string_util.h +2 -0
- data/src/boringssl/err_data.c +597 -593
- data/src/core/ext/filters/client_channel/client_channel.cc +715 -770
- data/src/core/ext/filters/client_channel/client_channel.h +5 -0
- data/src/core/ext/filters/client_channel/client_channel_channelz.cc +111 -0
- data/src/core/ext/filters/client_channel/client_channel_channelz.h +69 -0
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +9 -0
- data/src/core/ext/filters/client_channel/http_proxy.cc +22 -5
- data/src/core/ext/filters/client_channel/lb_policy.h +15 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +3 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc +3 -3
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +3 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.c +19 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/duration.pb.h +54 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.c +19 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/google/protobuf/timestamp.pb.h +54 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +4 -17
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +37 -63
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +79 -0
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +5 -2
- data/src/core/ext/filters/client_channel/lb_policy_factory.cc +8 -0
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +4 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +2 -2
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.cc +317 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +48 -9
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +40 -293
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +106 -84
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +6 -2
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc +6 -5
- data/src/core/ext/filters/client_channel/subchannel.cc +36 -6
- data/src/core/ext/filters/client_channel/subchannel.h +4 -0
- data/src/core/ext/filters/deadline/deadline_filter.cc +18 -15
- data/src/core/ext/filters/deadline/deadline_filter.h +5 -5
- data/src/core/ext/filters/http/client/http_client_filter.cc +10 -9
- data/src/core/ext/filters/http/server/http_server_filter.h +1 -1
- data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc +1 -1
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc +3 -2
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +33 -22
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +1 -1
- data/src/core/ext/transport/chttp2/transport/internal.h +10 -3
- data/src/core/ext/transport/chttp2/transport/stream_lists.cc +17 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +21 -16
- data/src/core/ext/transport/inproc/inproc_transport.cc +46 -6
- data/src/core/lib/channel/channel_stack.cc +22 -24
- data/src/core/lib/channel/channel_trace.cc +28 -63
- data/src/core/lib/channel/channel_trace.h +13 -17
- data/src/core/lib/channel/channelz.cc +143 -0
- data/src/core/lib/channel/channelz.h +124 -0
- data/src/core/lib/channel/channelz_registry.cc +7 -24
- data/src/core/lib/channel/channelz_registry.h +12 -8
- data/src/core/lib/channel/connected_channel.cc +8 -1
- data/src/core/{ext/filters/load_reporting/server_load_reporting_filter.h → lib/gpr/alloc.h} +7 -9
- data/src/core/lib/gpr/arena.cc +8 -8
- data/src/core/lib/gpr/string.cc +28 -0
- data/src/core/lib/gpr/string.h +10 -0
- data/src/core/lib/gprpp/abstract.h +5 -2
- data/src/core/lib/gprpp/inlined_vector.h +57 -3
- data/src/core/lib/gprpp/memory.h +2 -2
- data/src/core/lib/gprpp/ref_counted_ptr.h +5 -0
- data/src/core/lib/gprpp/thd_posix.cc +1 -1
- data/src/core/lib/iomgr/call_combiner.h +80 -0
- data/src/core/lib/iomgr/closure.h +3 -2
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
- data/src/core/lib/iomgr/error.cc +12 -0
- data/src/core/lib/iomgr/error.h +5 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +36 -9
- data/src/core/lib/iomgr/ev_epollex_linux.cc +172 -46
- data/src/core/lib/iomgr/ev_epollsig_linux.cc +47 -21
- data/src/core/lib/iomgr/ev_poll_posix.cc +10 -4
- data/src/core/lib/iomgr/ev_posix.cc +17 -9
- data/src/core/lib/iomgr/ev_posix.h +20 -4
- data/src/core/lib/iomgr/executor.cc +196 -140
- data/src/core/lib/iomgr/executor.h +47 -14
- data/src/core/lib/iomgr/iomgr.cc +2 -0
- data/src/core/lib/iomgr/iomgr.h +5 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.cc +1 -0
- data/src/core/lib/iomgr/socket_utils.h +9 -0
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +4 -0
- data/src/core/lib/iomgr/socket_utils_uv.cc +4 -0
- data/src/core/lib/iomgr/socket_utils_windows.cc +4 -0
- data/src/core/lib/iomgr/tcp_client_posix.cc +3 -5
- data/src/core/lib/iomgr/tcp_posix.cc +6 -1
- data/src/core/lib/iomgr/tcp_server_posix.cc +3 -3
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +1 -1
- data/src/core/lib/iomgr/timer_manager.cc +0 -1
- data/src/core/lib/iomgr/udp_server.cc +2 -3
- data/src/core/lib/json/json.cc +10 -0
- data/src/core/lib/json/json.h +5 -0
- data/src/core/lib/security/context/security_context.cc +8 -8
- data/src/core/lib/security/context/security_context.h +6 -2
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +2 -1
- data/src/core/lib/security/credentials/local/local_credentials.cc +77 -0
- data/src/core/lib/security/credentials/local/local_credentials.h +40 -0
- data/src/core/lib/security/credentials/ssl/ssl_credentials.cc +17 -3
- data/src/core/lib/security/security_connector/local_security_connector.cc +245 -0
- data/src/core/lib/security/security_connector/local_security_connector.h +58 -0
- data/src/core/lib/security/security_connector/security_connector.cc +30 -5
- data/src/core/lib/security/security_connector/security_connector.h +1 -0
- data/src/core/lib/security/transport/client_auth_filter.cc +5 -1
- data/src/core/lib/security/transport/server_auth_filter.cc +4 -5
- data/src/core/lib/surface/call.cc +75 -32
- data/src/core/lib/surface/call.h +2 -0
- data/src/core/lib/surface/channel.cc +32 -13
- data/src/core/lib/surface/channel.h +4 -0
- data/src/core/lib/surface/version.cc +1 -1
- data/src/core/lib/transport/transport.cc +20 -9
- data/src/core/lib/transport/transport.h +12 -10
- data/src/core/lib/transport/transport_op_string.cc +0 -7
- data/src/core/plugin_registry/grpc_plugin_registry.cc +0 -4
- data/src/core/tsi/alts/handshaker/alts_handshaker_service_api_util.h +2 -2
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +2 -1
- data/src/core/tsi/alts/handshaker/altscontext.pb.c +0 -1
- data/src/core/tsi/alts/handshaker/altscontext.pb.h +1 -2
- data/src/core/tsi/alts/handshaker/handshaker.pb.c +0 -1
- data/src/core/tsi/alts/handshaker/handshaker.pb.h +1 -2
- data/src/core/tsi/alts/handshaker/transport_security_common.pb.c +0 -1
- data/src/core/tsi/alts/handshaker/transport_security_common.pb.h +1 -1
- data/src/core/tsi/alts/handshaker/transport_security_common_api.h +2 -2
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +47 -1
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.h +3 -1
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +12 -11
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.h +7 -2
- data/src/core/tsi/local_transport_security.cc +209 -0
- data/src/core/tsi/local_transport_security.h +51 -0
- data/src/core/tsi/ssl_transport_security.cc +2 -3
- data/src/{core/ext → cpp/ext/filters}/census/grpc_context.cc +0 -0
- data/src/ruby/ext/grpc/rb_channel_credentials.c +3 -3
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +18 -18
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +29 -29
- data/src/ruby/lib/grpc/generic/active_call.rb +19 -23
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/call_credentials_spec.rb +1 -1
- data/src/ruby/spec/call_spec.rb +1 -1
- data/src/ruby/spec/channel_credentials_spec.rb +1 -1
- data/src/ruby/spec/channel_spec.rb +1 -1
- data/src/ruby/spec/client_auth_spec.rb +1 -12
- data/src/ruby/spec/client_server_spec.rb +1 -1
- data/src/ruby/spec/compression_options_spec.rb +1 -1
- data/src/ruby/spec/error_sanity_spec.rb +1 -1
- data/src/ruby/spec/generic/client_stub_spec.rb +13 -1
- data/src/ruby/spec/generic/rpc_desc_spec.rb +1 -1
- data/src/ruby/spec/generic/rpc_server_pool_spec.rb +1 -1
- data/src/ruby/spec/generic/service_spec.rb +1 -1
- data/src/ruby/spec/google_rpc_status_utils_spec.rb +1 -12
- data/src/ruby/spec/pb/duplicate/codegen_spec.rb +1 -0
- data/src/ruby/spec/pb/health/checker_spec.rb +1 -1
- data/src/ruby/spec/server_credentials_spec.rb +1 -1
- data/src/ruby/spec/server_spec.rb +1 -1
- data/src/ruby/spec/spec_helper.rb +1 -0
- data/src/ruby/spec/support/services.rb +1 -1
- data/src/ruby/spec/time_consts_spec.rb +1 -1
- data/third_party/boringssl/crypto/asn1/tasn_dec.c +40 -19
- data/third_party/boringssl/crypto/bytestring/cbs.c +1 -0
- data/third_party/boringssl/crypto/cipher_extra/e_aesccm.c +47 -15
- data/third_party/boringssl/crypto/ec_extra/ec_asn1.c +9 -10
- data/third_party/boringssl/crypto/ecdh/ecdh.c +4 -3
- data/third_party/boringssl/crypto/fipsmodule/bn/add.c +30 -54
- data/third_party/boringssl/crypto/fipsmodule/bn/bn.c +7 -1
- data/third_party/boringssl/crypto/fipsmodule/bn/cmp.c +8 -8
- data/third_party/boringssl/crypto/fipsmodule/bn/div.c +97 -11
- data/third_party/boringssl/crypto/fipsmodule/bn/gcd.c +274 -218
- data/third_party/boringssl/crypto/fipsmodule/bn/internal.h +111 -34
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery.c +2 -2
- data/third_party/boringssl/crypto/fipsmodule/bn/montgomery_inv.c +1 -1
- data/third_party/boringssl/crypto/fipsmodule/bn/mul.c +24 -6
- data/third_party/boringssl/crypto/fipsmodule/bn/prime.c +324 -63
- data/third_party/boringssl/crypto/fipsmodule/bn/random.c +74 -21
- data/third_party/boringssl/crypto/fipsmodule/bn/shift.c +128 -86
- data/third_party/boringssl/crypto/fipsmodule/bn/sqrt.c +1 -1
- data/third_party/boringssl/crypto/fipsmodule/ec/ec_key.c +67 -112
- data/third_party/boringssl/crypto/fipsmodule/ec/internal.h +8 -1
- data/third_party/boringssl/crypto/fipsmodule/ec/oct.c +5 -5
- data/third_party/boringssl/crypto/fipsmodule/ec/p224-64.c +9 -17
- data/third_party/boringssl/crypto/fipsmodule/ec/p256-x86_64-table.h +5378 -5418
- data/third_party/boringssl/crypto/fipsmodule/ec/simple.c +32 -32
- data/third_party/boringssl/crypto/fipsmodule/ecdsa/ecdsa.c +5 -11
- data/third_party/boringssl/crypto/fipsmodule/rsa/blinding.c +16 -40
- data/third_party/boringssl/crypto/fipsmodule/rsa/internal.h +1 -6
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa.c +41 -29
- data/third_party/boringssl/crypto/fipsmodule/rsa/rsa_impl.c +63 -49
- data/third_party/boringssl/crypto/x509/vpm_int.h +1 -0
- data/third_party/boringssl/crypto/x509/x509_vfy.c +4 -0
- data/third_party/boringssl/crypto/x509/x509_vpm.c +44 -22
- data/third_party/boringssl/include/openssl/aead.h +8 -2
- data/third_party/boringssl/include/openssl/asn1.h +1 -0
- data/third_party/boringssl/include/openssl/base.h +4 -0
- data/third_party/boringssl/include/openssl/bn.h +13 -3
- data/third_party/boringssl/include/openssl/bytestring.h +4 -4
- data/third_party/boringssl/include/openssl/ec.h +10 -4
- data/third_party/boringssl/include/openssl/ec_key.h +0 -3
- data/third_party/boringssl/include/openssl/rsa.h +1 -0
- data/third_party/boringssl/include/openssl/ssl.h +8 -3
- data/third_party/boringssl/include/openssl/ssl3.h +0 -1
- data/third_party/boringssl/include/openssl/x509.h +1 -0
- data/third_party/boringssl/include/openssl/x509v3.h +1 -0
- data/third_party/boringssl/ssl/handshake_client.cc +36 -64
- data/third_party/boringssl/ssl/ssl_cipher.cc +4 -0
- data/third_party/boringssl/ssl/ssl_lib.cc +1 -1
- metadata +45 -38
- data/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc +0 -222
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc +0 -71
- data/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h +0 -61
- data/src/ruby/spec/pb/package_with_underscore/checker_spec.rb +0 -51
- data/src/ruby/spec/pb/package_with_underscore/data.proto +0 -23
- data/src/ruby/spec/pb/package_with_underscore/service.proto +0 -23
@@ -132,6 +132,7 @@ struct grpc_fd {
|
|
132
132
|
|
133
133
|
grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
|
134
134
|
grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
|
135
|
+
grpc_core::ManualConstructor<grpc_core::LockfreeEvent> error_closure;
|
135
136
|
|
136
137
|
struct grpc_fd* freelist_next;
|
137
138
|
grpc_closure* on_done_closure;
|
@@ -141,6 +142,9 @@ struct grpc_fd {
|
|
141
142
|
gpr_atm read_notifier_pollset;
|
142
143
|
|
143
144
|
grpc_iomgr_object iomgr_object;
|
145
|
+
|
146
|
+
/* Do we need to track EPOLLERR events separately? */
|
147
|
+
bool track_err;
|
144
148
|
};
|
145
149
|
|
146
150
|
/* Reference counting for fds */
|
@@ -352,7 +356,10 @@ static void polling_island_add_fds_locked(polling_island* pi, grpc_fd** fds,
|
|
352
356
|
|
353
357
|
for (i = 0; i < fd_count; i++) {
|
354
358
|
ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLOUT | EPOLLET);
|
355
|
-
ev.data.ptr
|
359
|
+
/* Use the least significant bit of ev.data.ptr to store track_err to avoid
|
360
|
+
* synchronization issues when accessing it after receiving an event */
|
361
|
+
ev.data.ptr = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(fds[i]) |
|
362
|
+
(fds[i]->track_err ? 1 : 0));
|
356
363
|
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev);
|
357
364
|
|
358
365
|
if (err < 0) {
|
@@ -435,7 +442,6 @@ static void polling_island_remove_all_fds_locked(polling_island* pi,
|
|
435
442
|
|
436
443
|
/* The caller is expected to hold pi->mu lock before calling this function */
|
437
444
|
static void polling_island_remove_fd_locked(polling_island* pi, grpc_fd* fd,
|
438
|
-
bool is_fd_closed,
|
439
445
|
grpc_error** error) {
|
440
446
|
int err;
|
441
447
|
size_t i;
|
@@ -444,16 +450,14 @@ static void polling_island_remove_fd_locked(polling_island* pi, grpc_fd* fd,
|
|
444
450
|
|
445
451
|
/* If fd is already closed, then it would have been automatically been removed
|
446
452
|
from the epoll set */
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
gpr_free(err_msg);
|
456
|
-
}
|
453
|
+
err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, nullptr);
|
454
|
+
if (err < 0 && errno != ENOENT) {
|
455
|
+
gpr_asprintf(
|
456
|
+
&err_msg,
|
457
|
+
"epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
|
458
|
+
pi->epoll_fd, fd->fd, errno, strerror(errno));
|
459
|
+
append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
|
460
|
+
gpr_free(err_msg);
|
457
461
|
}
|
458
462
|
|
459
463
|
for (i = 0; i < pi->fd_cnt; i++) {
|
@@ -769,6 +773,7 @@ static void unref_by(grpc_fd* fd, int n) {
|
|
769
773
|
|
770
774
|
fd->read_closure->DestroyEvent();
|
771
775
|
fd->write_closure->DestroyEvent();
|
776
|
+
fd->error_closure->DestroyEvent();
|
772
777
|
|
773
778
|
gpr_mu_unlock(&fd_freelist_mu);
|
774
779
|
} else {
|
@@ -806,7 +811,7 @@ static void fd_global_shutdown(void) {
|
|
806
811
|
gpr_mu_destroy(&fd_freelist_mu);
|
807
812
|
}
|
808
813
|
|
809
|
-
static grpc_fd* fd_create(int fd, const char* name) {
|
814
|
+
static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
|
810
815
|
grpc_fd* new_fd = nullptr;
|
811
816
|
|
812
817
|
gpr_mu_lock(&fd_freelist_mu);
|
@@ -821,6 +826,7 @@ static grpc_fd* fd_create(int fd, const char* name) {
|
|
821
826
|
gpr_mu_init(&new_fd->po.mu);
|
822
827
|
new_fd->read_closure.Init();
|
823
828
|
new_fd->write_closure.Init();
|
829
|
+
new_fd->error_closure.Init();
|
824
830
|
}
|
825
831
|
|
826
832
|
/* Note: It is not really needed to get the new_fd->po.mu lock here. If this
|
@@ -837,6 +843,8 @@ static grpc_fd* fd_create(int fd, const char* name) {
|
|
837
843
|
new_fd->orphaned = false;
|
838
844
|
new_fd->read_closure->InitEvent();
|
839
845
|
new_fd->write_closure->InitEvent();
|
846
|
+
new_fd->error_closure->InitEvent();
|
847
|
+
new_fd->track_err = track_err;
|
840
848
|
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
|
841
849
|
|
842
850
|
new_fd->freelist_next = nullptr;
|
@@ -863,7 +871,7 @@ static int fd_wrapped_fd(grpc_fd* fd) {
|
|
863
871
|
}
|
864
872
|
|
865
873
|
static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
866
|
-
|
874
|
+
const char* reason) {
|
867
875
|
grpc_error* error = GRPC_ERROR_NONE;
|
868
876
|
polling_island* unref_pi = nullptr;
|
869
877
|
|
@@ -884,7 +892,7 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
|
884
892
|
before doing this.) */
|
885
893
|
if (fd->po.pi != nullptr) {
|
886
894
|
polling_island* pi_latest = polling_island_lock(fd->po.pi);
|
887
|
-
polling_island_remove_fd_locked(pi_latest, fd,
|
895
|
+
polling_island_remove_fd_locked(pi_latest, fd, &error);
|
888
896
|
gpr_mu_unlock(&pi_latest->mu);
|
889
897
|
|
890
898
|
unref_pi = fd->po.pi;
|
@@ -933,6 +941,7 @@ static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
|
|
933
941
|
if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
|
934
942
|
shutdown(fd->fd, SHUT_RDWR);
|
935
943
|
fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
|
944
|
+
fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
|
936
945
|
}
|
937
946
|
GRPC_ERROR_UNREF(why);
|
938
947
|
}
|
@@ -945,6 +954,10 @@ static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
|
|
945
954
|
fd->write_closure->NotifyOn(closure);
|
946
955
|
}
|
947
956
|
|
957
|
+
static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
|
958
|
+
fd->error_closure->NotifyOn(closure);
|
959
|
+
}
|
960
|
+
|
948
961
|
/*******************************************************************************
|
949
962
|
* Pollset Definitions
|
950
963
|
*/
|
@@ -1116,6 +1129,8 @@ static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
|
|
1116
1129
|
|
1117
1130
|
static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
|
1118
1131
|
|
1132
|
+
static void fd_has_errors(grpc_fd* fd) { fd->error_closure->SetReady(); }
|
1133
|
+
|
1119
1134
|
static void pollset_release_polling_island(grpc_pollset* ps,
|
1120
1135
|
const char* reason) {
|
1121
1136
|
if (ps->po.pi != nullptr) {
|
@@ -1254,14 +1269,23 @@ static void pollset_work_and_unlock(grpc_pollset* pollset,
|
|
1254
1269
|
to the function pollset_work_and_unlock() will pick up the correct
|
1255
1270
|
epoll_fd */
|
1256
1271
|
} else {
|
1257
|
-
grpc_fd* fd =
|
1258
|
-
|
1259
|
-
|
1260
|
-
|
1261
|
-
|
1272
|
+
grpc_fd* fd = reinterpret_cast<grpc_fd*>(
|
1273
|
+
reinterpret_cast<intptr_t>(data_ptr) & ~static_cast<intptr_t>(1));
|
1274
|
+
bool track_err =
|
1275
|
+
reinterpret_cast<intptr_t>(data_ptr) & ~static_cast<intptr_t>(1);
|
1276
|
+
bool cancel = (ep_ev[i].events & EPOLLHUP) != 0;
|
1277
|
+
bool error = (ep_ev[i].events & EPOLLERR) != 0;
|
1278
|
+
bool read_ev = (ep_ev[i].events & (EPOLLIN | EPOLLPRI)) != 0;
|
1279
|
+
bool write_ev = (ep_ev[i].events & EPOLLOUT) != 0;
|
1280
|
+
bool err_fallback = error && !track_err;
|
1281
|
+
|
1282
|
+
if (error && !err_fallback) {
|
1283
|
+
fd_has_errors(fd);
|
1284
|
+
}
|
1285
|
+
if (read_ev || cancel || err_fallback) {
|
1262
1286
|
fd_become_readable(fd, pollset);
|
1263
1287
|
}
|
1264
|
-
if (write_ev || cancel) {
|
1288
|
+
if (write_ev || cancel || err_fallback) {
|
1265
1289
|
fd_become_writable(fd);
|
1266
1290
|
}
|
1267
1291
|
}
|
@@ -1634,6 +1658,7 @@ static void shutdown_engine(void) {
|
|
1634
1658
|
|
1635
1659
|
static const grpc_event_engine_vtable vtable = {
|
1636
1660
|
sizeof(grpc_pollset),
|
1661
|
+
true,
|
1637
1662
|
|
1638
1663
|
fd_create,
|
1639
1664
|
fd_wrapped_fd,
|
@@ -1641,6 +1666,7 @@ static const grpc_event_engine_vtable vtable = {
|
|
1641
1666
|
fd_shutdown,
|
1642
1667
|
fd_notify_on_read,
|
1643
1668
|
fd_notify_on_write,
|
1669
|
+
fd_notify_on_error,
|
1644
1670
|
fd_is_shutdown,
|
1645
1671
|
fd_get_read_notifier_pollset,
|
1646
1672
|
|
@@ -330,7 +330,8 @@ static void unref_by(grpc_fd* fd, int n) {
|
|
330
330
|
}
|
331
331
|
}
|
332
332
|
|
333
|
-
static grpc_fd* fd_create(int fd, const char* name) {
|
333
|
+
static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
|
334
|
+
GPR_DEBUG_ASSERT(track_err == false);
|
334
335
|
grpc_fd* r = static_cast<grpc_fd*>(gpr_malloc(sizeof(*r)));
|
335
336
|
gpr_mu_init(&r->mu);
|
336
337
|
gpr_atm_rel_store(&r->refst, 1);
|
@@ -424,14 +425,12 @@ static int fd_wrapped_fd(grpc_fd* fd) {
|
|
424
425
|
}
|
425
426
|
|
426
427
|
static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
427
|
-
|
428
|
+
const char* reason) {
|
428
429
|
fd->on_done_closure = on_done;
|
429
430
|
fd->released = release_fd != nullptr;
|
430
431
|
if (release_fd != nullptr) {
|
431
432
|
*release_fd = fd->fd;
|
432
433
|
fd->released = true;
|
433
|
-
} else if (already_closed) {
|
434
|
-
fd->released = true;
|
435
434
|
}
|
436
435
|
gpr_mu_lock(&fd->mu);
|
437
436
|
REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
|
@@ -553,6 +552,11 @@ static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
|
|
553
552
|
gpr_mu_unlock(&fd->mu);
|
554
553
|
}
|
555
554
|
|
555
|
+
static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
|
556
|
+
gpr_log(GPR_ERROR, "Polling engine does not support tracking errors.");
|
557
|
+
abort();
|
558
|
+
}
|
559
|
+
|
556
560
|
static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset,
|
557
561
|
grpc_pollset_worker* worker, uint32_t read_mask,
|
558
562
|
uint32_t write_mask, grpc_fd_watcher* watcher) {
|
@@ -1710,6 +1714,7 @@ static void shutdown_engine(void) {
|
|
1710
1714
|
|
1711
1715
|
static const grpc_event_engine_vtable vtable = {
|
1712
1716
|
sizeof(grpc_pollset),
|
1717
|
+
false,
|
1713
1718
|
|
1714
1719
|
fd_create,
|
1715
1720
|
fd_wrapped_fd,
|
@@ -1717,6 +1722,7 @@ static const grpc_event_engine_vtable vtable = {
|
|
1717
1722
|
fd_shutdown,
|
1718
1723
|
fd_notify_on_read,
|
1719
1724
|
fd_notify_on_write,
|
1725
|
+
fd_notify_on_error,
|
1720
1726
|
fd_is_shutdown,
|
1721
1727
|
fd_get_read_notifier_pollset,
|
1722
1728
|
|
@@ -193,10 +193,15 @@ void grpc_event_engine_shutdown(void) {
|
|
193
193
|
g_event_engine = nullptr;
|
194
194
|
}
|
195
195
|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
196
|
+
bool grpc_event_engine_can_track_errors(void) {
|
197
|
+
return g_event_engine->can_track_err;
|
198
|
+
}
|
199
|
+
|
200
|
+
grpc_fd* grpc_fd_create(int fd, const char* name, bool track_err) {
|
201
|
+
GRPC_POLLING_API_TRACE("fd_create(%d, %s, %d)", fd, name, track_err);
|
202
|
+
GRPC_FD_TRACE("fd_create(%d, %s, %d)", fd, name, track_err);
|
203
|
+
GPR_DEBUG_ASSERT(!track_err || g_event_engine->can_track_err);
|
204
|
+
return g_event_engine->fd_create(fd, name, track_err);
|
200
205
|
}
|
201
206
|
|
202
207
|
int grpc_fd_wrapped_fd(grpc_fd* fd) {
|
@@ -204,13 +209,12 @@ int grpc_fd_wrapped_fd(grpc_fd* fd) {
|
|
204
209
|
}
|
205
210
|
|
206
211
|
void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
207
|
-
|
208
|
-
GRPC_POLLING_API_TRACE("fd_orphan(%d, %p, %p, %
|
209
|
-
|
210
|
-
already_closed, reason);
|
212
|
+
const char* reason) {
|
213
|
+
GRPC_POLLING_API_TRACE("fd_orphan(%d, %p, %p, %s)", grpc_fd_wrapped_fd(fd),
|
214
|
+
on_done, release_fd, reason);
|
211
215
|
GRPC_FD_TRACE("grpc_fd_orphan, fd:%d closed", grpc_fd_wrapped_fd(fd));
|
212
216
|
|
213
|
-
g_event_engine->fd_orphan(fd, on_done, release_fd,
|
217
|
+
g_event_engine->fd_orphan(fd, on_done, release_fd, reason);
|
214
218
|
}
|
215
219
|
|
216
220
|
void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why) {
|
@@ -231,6 +235,10 @@ void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
|
|
231
235
|
g_event_engine->fd_notify_on_write(fd, closure);
|
232
236
|
}
|
233
237
|
|
238
|
+
void grpc_fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
|
239
|
+
g_event_engine->fd_notify_on_error(fd, closure);
|
240
|
+
}
|
241
|
+
|
234
242
|
static size_t pollset_size(void) { return g_event_engine->pollset_size; }
|
235
243
|
|
236
244
|
static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
|
@@ -41,14 +41,16 @@ typedef struct grpc_fd grpc_fd;
|
|
41
41
|
|
42
42
|
typedef struct grpc_event_engine_vtable {
|
43
43
|
size_t pollset_size;
|
44
|
+
bool can_track_err;
|
44
45
|
|
45
|
-
grpc_fd* (*fd_create)(int fd, const char* name);
|
46
|
+
grpc_fd* (*fd_create)(int fd, const char* name, bool track_err);
|
46
47
|
int (*fd_wrapped_fd)(grpc_fd* fd);
|
47
48
|
void (*fd_orphan)(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
48
|
-
|
49
|
+
const char* reason);
|
49
50
|
void (*fd_shutdown)(grpc_fd* fd, grpc_error* why);
|
50
51
|
void (*fd_notify_on_read)(grpc_fd* fd, grpc_closure* closure);
|
51
52
|
void (*fd_notify_on_write)(grpc_fd* fd, grpc_closure* closure);
|
53
|
+
void (*fd_notify_on_error)(grpc_fd* fd, grpc_closure* closure);
|
52
54
|
bool (*fd_is_shutdown)(grpc_fd* fd);
|
53
55
|
grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_fd* fd);
|
54
56
|
|
@@ -84,10 +86,20 @@ void grpc_event_engine_shutdown(void);
|
|
84
86
|
/* Return the name of the poll strategy */
|
85
87
|
const char* grpc_get_poll_strategy_name();
|
86
88
|
|
89
|
+
/* Returns true if polling engine can track errors separately, false otherwise.
|
90
|
+
* If this is true, fd can be created with track_err set. After this, error
|
91
|
+
* events will be reported using fd_notify_on_error. If it is not set, errors
|
92
|
+
* will continue to be reported through fd_notify_on_read and
|
93
|
+
* fd_notify_on_write.
|
94
|
+
*/
|
95
|
+
bool grpc_event_engine_can_track_errors();
|
96
|
+
|
87
97
|
/* Create a wrapped file descriptor.
|
88
98
|
Requires fd is a non-blocking file descriptor.
|
99
|
+
\a track_err if true means that error events would be tracked separately
|
100
|
+
using grpc_fd_notify_on_error. Currently, valid only for linux systems.
|
89
101
|
This takes ownership of closing fd. */
|
90
|
-
grpc_fd* grpc_fd_create(int fd, const char* name);
|
102
|
+
grpc_fd* grpc_fd_create(int fd, const char* name, bool track_err);
|
91
103
|
|
92
104
|
/* Return the wrapped fd, or -1 if it has been released or closed. */
|
93
105
|
int grpc_fd_wrapped_fd(grpc_fd* fd);
|
@@ -100,7 +112,7 @@ int grpc_fd_wrapped_fd(grpc_fd* fd);
|
|
100
112
|
notify_on_write.
|
101
113
|
MUST NOT be called with a pollset lock taken */
|
102
114
|
void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
|
103
|
-
|
115
|
+
const char* reason);
|
104
116
|
|
105
117
|
/* Has grpc_fd_shutdown been called on an fd? */
|
106
118
|
bool grpc_fd_is_shutdown(grpc_fd* fd);
|
@@ -126,6 +138,10 @@ void grpc_fd_notify_on_read(grpc_fd* fd, grpc_closure* closure);
|
|
126
138
|
/* Exactly the same semantics as above, except based on writable events. */
|
127
139
|
void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure);
|
128
140
|
|
141
|
+
/* Exactly the same semantics as above, except based on error events. track_err
|
142
|
+
* needs to have been set on grpc_fd_create */
|
143
|
+
void grpc_fd_notify_on_error(grpc_fd* fd, grpc_closure* closure);
|
144
|
+
|
129
145
|
/* Return the read notifier pollset from the fd */
|
130
146
|
grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_fd* fd);
|
131
147
|
|
@@ -28,52 +28,43 @@
|
|
28
28
|
#include <grpc/support/sync.h>
|
29
29
|
|
30
30
|
#include "src/core/lib/debug/stats.h"
|
31
|
-
#include "src/core/lib/gpr/spinlock.h"
|
32
31
|
#include "src/core/lib/gpr/tls.h"
|
33
32
|
#include "src/core/lib/gpr/useful.h"
|
34
|
-
#include "src/core/lib/gprpp/
|
33
|
+
#include "src/core/lib/gprpp/memory.h"
|
35
34
|
#include "src/core/lib/iomgr/exec_ctx.h"
|
36
35
|
|
37
36
|
#define MAX_DEPTH 2
|
38
37
|
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
bool queued_long_job;
|
46
|
-
grpc_core::Thread thd;
|
47
|
-
} thread_state;
|
48
|
-
|
49
|
-
static thread_state* g_thread_state;
|
50
|
-
static size_t g_max_threads;
|
51
|
-
static gpr_atm g_cur_threads;
|
52
|
-
static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
|
38
|
+
#define EXECUTOR_TRACE(format, ...) \
|
39
|
+
if (executor_trace.enabled()) { \
|
40
|
+
gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \
|
41
|
+
}
|
42
|
+
|
43
|
+
grpc_core::TraceFlag executor_trace(false, "executor");
|
53
44
|
|
54
45
|
GPR_TLS_DECL(g_this_thread_state);
|
55
46
|
|
56
|
-
|
47
|
+
GrpcExecutor::GrpcExecutor(const char* executor_name) : name_(executor_name) {
|
48
|
+
adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
|
49
|
+
gpr_atm_no_barrier_store(&num_threads_, 0);
|
50
|
+
max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores());
|
51
|
+
}
|
57
52
|
|
58
|
-
|
53
|
+
void GrpcExecutor::Init() { SetThreading(true); }
|
59
54
|
|
60
|
-
|
55
|
+
size_t GrpcExecutor::RunClosures(grpc_closure_list list) {
|
61
56
|
size_t n = 0;
|
62
57
|
|
63
58
|
grpc_closure* c = list.head;
|
64
59
|
while (c != nullptr) {
|
65
60
|
grpc_closure* next = c->next_data.next;
|
66
61
|
grpc_error* error = c->error_data.error;
|
67
|
-
if (executor_trace.enabled()) {
|
68
|
-
#ifndef NDEBUG
|
69
|
-
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
|
70
|
-
c->file_created, c->line_created);
|
71
|
-
#else
|
72
|
-
gpr_log(GPR_INFO, "EXECUTOR: run %p", c);
|
73
|
-
#endif
|
74
|
-
}
|
75
62
|
#ifndef NDEBUG
|
63
|
+
EXECUTOR_TRACE("run %p [created by %s:%d]", c, c->file_created,
|
64
|
+
c->line_created);
|
76
65
|
c->scheduled = false;
|
66
|
+
#else
|
67
|
+
EXECUTOR_TRACE("run %p", c);
|
77
68
|
#endif
|
78
69
|
c->cb(c->cb_arg, error);
|
79
70
|
GRPC_ERROR_UNREF(error);
|
@@ -85,217 +76,282 @@ static size_t run_closures(grpc_closure_list list) {
|
|
85
76
|
return n;
|
86
77
|
}
|
87
78
|
|
88
|
-
bool
|
89
|
-
return gpr_atm_no_barrier_load(&
|
79
|
+
bool GrpcExecutor::IsThreaded() const {
|
80
|
+
return gpr_atm_no_barrier_load(&num_threads_) > 0;
|
90
81
|
}
|
91
82
|
|
92
|
-
void
|
93
|
-
gpr_atm
|
83
|
+
void GrpcExecutor::SetThreading(bool threading) {
|
84
|
+
gpr_atm curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
|
85
|
+
|
94
86
|
if (threading) {
|
95
|
-
if (
|
96
|
-
|
97
|
-
|
87
|
+
if (curr_num_threads > 0) return;
|
88
|
+
|
89
|
+
GPR_ASSERT(num_threads_ == 0);
|
90
|
+
gpr_atm_no_barrier_store(&num_threads_, 1);
|
98
91
|
gpr_tls_init(&g_this_thread_state);
|
99
|
-
|
100
|
-
gpr_zalloc(sizeof(
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
92
|
+
thd_state_ = static_cast<ThreadState*>(
|
93
|
+
gpr_zalloc(sizeof(ThreadState) * max_threads_));
|
94
|
+
|
95
|
+
for (size_t i = 0; i < max_threads_; i++) {
|
96
|
+
gpr_mu_init(&thd_state_[i].mu);
|
97
|
+
gpr_cv_init(&thd_state_[i].cv);
|
98
|
+
thd_state_[i].id = i;
|
99
|
+
thd_state_[i].thd = grpc_core::Thread();
|
100
|
+
thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT;
|
106
101
|
}
|
107
102
|
|
108
|
-
|
109
|
-
grpc_core::Thread(
|
110
|
-
|
111
|
-
} else {
|
112
|
-
if (
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
103
|
+
thd_state_[0].thd =
|
104
|
+
grpc_core::Thread(name_, &GrpcExecutor::ThreadMain, &thd_state_[0]);
|
105
|
+
thd_state_[0].thd.Start();
|
106
|
+
} else { // !threading
|
107
|
+
if (curr_num_threads == 0) return;
|
108
|
+
|
109
|
+
for (size_t i = 0; i < max_threads_; i++) {
|
110
|
+
gpr_mu_lock(&thd_state_[i].mu);
|
111
|
+
thd_state_[i].shutdown = true;
|
112
|
+
gpr_cv_signal(&thd_state_[i].cv);
|
113
|
+
gpr_mu_unlock(&thd_state_[i].mu);
|
118
114
|
}
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
115
|
+
|
116
|
+
/* Ensure no thread is adding a new thread. Once this is past, then no
|
117
|
+
* thread will try to add a new one either (since shutdown is true) */
|
118
|
+
gpr_spinlock_lock(&adding_thread_lock_);
|
119
|
+
gpr_spinlock_unlock(&adding_thread_lock_);
|
120
|
+
|
121
|
+
curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
|
122
|
+
for (gpr_atm i = 0; i < curr_num_threads; i++) {
|
123
|
+
thd_state_[i].thd.Join();
|
124
|
+
EXECUTOR_TRACE(" Thread %" PRIdPTR " of %" PRIdPTR " joined", i,
|
125
|
+
curr_num_threads);
|
125
126
|
}
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
127
|
+
|
128
|
+
gpr_atm_no_barrier_store(&num_threads_, 0);
|
129
|
+
for (size_t i = 0; i < max_threads_; i++) {
|
130
|
+
gpr_mu_destroy(&thd_state_[i].mu);
|
131
|
+
gpr_cv_destroy(&thd_state_[i].cv);
|
132
|
+
RunClosures(thd_state_[i].elems);
|
131
133
|
}
|
132
|
-
|
134
|
+
|
135
|
+
gpr_free(thd_state_);
|
133
136
|
gpr_tls_destroy(&g_this_thread_state);
|
134
137
|
}
|
135
138
|
}
|
136
139
|
|
137
|
-
void
|
138
|
-
gpr_atm_no_barrier_store(&g_cur_threads, 0);
|
139
|
-
grpc_executor_set_threading(true);
|
140
|
-
}
|
140
|
+
void GrpcExecutor::Shutdown() { SetThreading(false); }
|
141
141
|
|
142
|
-
void
|
143
|
-
|
144
|
-
|
145
|
-
thread_state* ts = static_cast<thread_state*>(arg);
|
146
|
-
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
|
142
|
+
void GrpcExecutor::ThreadMain(void* arg) {
|
143
|
+
ThreadState* ts = static_cast<ThreadState*>(arg);
|
144
|
+
gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(ts));
|
147
145
|
|
148
146
|
grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
|
149
147
|
|
150
148
|
size_t subtract_depth = 0;
|
151
149
|
for (;;) {
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
}
|
150
|
+
EXECUTOR_TRACE("[%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")", ts->id,
|
151
|
+
subtract_depth);
|
152
|
+
|
156
153
|
gpr_mu_lock(&ts->mu);
|
157
154
|
ts->depth -= subtract_depth;
|
155
|
+
// Wait for closures to be enqueued or for the executor to be shutdown
|
158
156
|
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
|
159
157
|
ts->queued_long_job = false;
|
160
158
|
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
|
161
159
|
}
|
160
|
+
|
162
161
|
if (ts->shutdown) {
|
163
|
-
|
164
|
-
gpr_log(GPR_INFO, "EXECUTOR[%d]: shutdown",
|
165
|
-
static_cast<int>(ts - g_thread_state));
|
166
|
-
}
|
162
|
+
EXECUTOR_TRACE("[%" PRIdPTR "]: shutdown", ts->id);
|
167
163
|
gpr_mu_unlock(&ts->mu);
|
168
164
|
break;
|
169
165
|
}
|
166
|
+
|
170
167
|
GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED();
|
171
|
-
grpc_closure_list
|
168
|
+
grpc_closure_list closures = ts->elems;
|
172
169
|
ts->elems = GRPC_CLOSURE_LIST_INIT;
|
173
170
|
gpr_mu_unlock(&ts->mu);
|
174
|
-
|
175
|
-
|
176
|
-
static_cast<int>(ts - g_thread_state));
|
177
|
-
}
|
171
|
+
|
172
|
+
EXECUTOR_TRACE("[%" PRIdPTR "]: execute", ts->id);
|
178
173
|
|
179
174
|
grpc_core::ExecCtx::Get()->InvalidateNow();
|
180
|
-
subtract_depth =
|
175
|
+
subtract_depth = RunClosures(closures);
|
181
176
|
}
|
182
177
|
}
|
183
178
|
|
184
|
-
|
185
|
-
|
179
|
+
void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error,
|
180
|
+
bool is_short) {
|
186
181
|
bool retry_push;
|
187
182
|
if (is_short) {
|
188
183
|
GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS();
|
189
184
|
} else {
|
190
185
|
GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS();
|
191
186
|
}
|
187
|
+
|
192
188
|
do {
|
193
189
|
retry_push = false;
|
194
190
|
size_t cur_thread_count =
|
195
|
-
static_cast<size_t>(gpr_atm_no_barrier_load(&
|
191
|
+
static_cast<size_t>(gpr_atm_no_barrier_load(&num_threads_));
|
192
|
+
|
193
|
+
// If the number of threads is zero(i.e either the executor is not threaded
|
194
|
+
// or already shutdown), then queue the closure on the exec context itself
|
196
195
|
if (cur_thread_count == 0) {
|
197
|
-
if (executor_trace.enabled()) {
|
198
196
|
#ifndef NDEBUG
|
199
|
-
|
200
|
-
|
197
|
+
EXECUTOR_TRACE("schedule %p (created %s:%d) inline", closure,
|
198
|
+
closure->file_created, closure->line_created);
|
201
199
|
#else
|
202
|
-
|
200
|
+
EXECUTOR_TRACE("schedule %p inline", closure);
|
203
201
|
#endif
|
204
|
-
}
|
205
202
|
grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
|
206
203
|
closure, error);
|
207
204
|
return;
|
208
205
|
}
|
209
|
-
|
206
|
+
|
207
|
+
ThreadState* ts = (ThreadState*)gpr_tls_get(&g_this_thread_state);
|
210
208
|
if (ts == nullptr) {
|
211
|
-
ts = &
|
212
|
-
|
209
|
+
ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
|
210
|
+
cur_thread_count)];
|
213
211
|
} else {
|
214
212
|
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF();
|
215
213
|
}
|
216
|
-
thread_state* orig_ts = ts;
|
217
214
|
|
218
|
-
|
215
|
+
ThreadState* orig_ts = ts;
|
216
|
+
|
217
|
+
bool try_new_thread = false;
|
219
218
|
for (;;) {
|
220
|
-
if (executor_trace.enabled()) {
|
221
219
|
#ifndef NDEBUG
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
220
|
+
EXECUTOR_TRACE(
|
221
|
+
"try to schedule %p (%s) (created %s:%d) to thread "
|
222
|
+
"%" PRIdPTR,
|
223
|
+
closure, is_short ? "short" : "long", closure->file_created,
|
224
|
+
closure->line_created, ts->id);
|
227
225
|
#else
|
228
|
-
|
229
|
-
|
230
|
-
(int)(ts - g_thread_state));
|
226
|
+
EXECUTOR_TRACE("try to schedule %p (%s) to thread %" PRIdPTR, closure,
|
227
|
+
is_short ? "short" : "long", ts->id);
|
231
228
|
#endif
|
232
|
-
|
229
|
+
|
233
230
|
gpr_mu_lock(&ts->mu);
|
234
231
|
if (ts->queued_long_job) {
|
235
232
|
// if there's a long job queued, we never queue anything else to this
|
236
233
|
// queue (since long jobs can take 'infinite' time and we need to
|
237
|
-
// guarantee no starvation)
|
238
|
-
// ... spin through queues and try again
|
234
|
+
// guarantee no starvation). Spin through queues and try again
|
239
235
|
gpr_mu_unlock(&ts->mu);
|
240
|
-
size_t idx =
|
241
|
-
ts = &
|
236
|
+
size_t idx = ts->id;
|
237
|
+
ts = &thd_state_[(idx + 1) % cur_thread_count];
|
242
238
|
if (ts == orig_ts) {
|
239
|
+
// We cycled through all the threads. Retry enqueue again (by creating
|
240
|
+
// a new thread)
|
243
241
|
retry_push = true;
|
242
|
+
// TODO (sreek): What if the executor is shutdown OR if
|
243
|
+
// cur_thread_count is already equal to max_threads ? (currently - as
|
244
|
+
// of July 2018, we do not run in to this issue because there is only
|
245
|
+
// one instance of long job in gRPC. This has to be fixed soon)
|
244
246
|
try_new_thread = true;
|
245
247
|
break;
|
246
248
|
}
|
249
|
+
|
247
250
|
continue;
|
248
251
|
}
|
252
|
+
|
253
|
+
// == Found the thread state (i.e thread) to enqueue this closure! ==
|
254
|
+
|
255
|
+
// Also, if this thread has been waiting for closures, wake it up.
|
256
|
+
// - If grpc_closure_list_empty() is true and the Executor is not
|
257
|
+
// shutdown, it means that the thread must be waiting in ThreadMain()
|
258
|
+
// - Note that gpr_cv_signal() won't immediately wakeup the thread. That
|
259
|
+
// happens after we release the mutex &ts->mu a few lines below
|
249
260
|
if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
|
250
261
|
GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED();
|
251
262
|
gpr_cv_signal(&ts->cv);
|
252
263
|
}
|
264
|
+
|
253
265
|
grpc_closure_list_append(&ts->elems, closure, error);
|
266
|
+
|
267
|
+
// If we already queued more than MAX_DEPTH number of closures on this
|
268
|
+
// thread, use this as a hint to create more threads
|
254
269
|
ts->depth++;
|
255
270
|
try_new_thread = ts->depth > MAX_DEPTH &&
|
256
|
-
cur_thread_count <
|
257
|
-
|
271
|
+
cur_thread_count < max_threads_ && !ts->shutdown;
|
272
|
+
|
273
|
+
ts->queued_long_job = !is_short;
|
274
|
+
|
258
275
|
gpr_mu_unlock(&ts->mu);
|
259
276
|
break;
|
260
277
|
}
|
261
|
-
|
278
|
+
|
279
|
+
if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) {
|
262
280
|
cur_thread_count =
|
263
|
-
static_cast<size_t>(gpr_atm_no_barrier_load(&
|
264
|
-
if (cur_thread_count <
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
281
|
+
static_cast<size_t>(gpr_atm_no_barrier_load(&num_threads_));
|
282
|
+
if (cur_thread_count < max_threads_) {
|
283
|
+
// Increment num_threads (Safe to do a no_barrier_store instead of a
|
284
|
+
// cas because we always increment num_threads under the
|
285
|
+
// 'adding_thread_lock')
|
286
|
+
gpr_atm_no_barrier_store(&num_threads_, cur_thread_count + 1);
|
287
|
+
|
288
|
+
thd_state_[cur_thread_count].thd = grpc_core::Thread(
|
289
|
+
name_, &GrpcExecutor::ThreadMain, &thd_state_[cur_thread_count]);
|
290
|
+
thd_state_[cur_thread_count].thd.Start();
|
271
291
|
}
|
272
|
-
gpr_spinlock_unlock(&
|
292
|
+
gpr_spinlock_unlock(&adding_thread_lock_);
|
273
293
|
}
|
294
|
+
|
274
295
|
if (retry_push) {
|
275
296
|
GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES();
|
276
297
|
}
|
277
298
|
} while (retry_push);
|
278
299
|
}
|
279
300
|
|
280
|
-
static
|
281
|
-
|
301
|
+
static GrpcExecutor* global_executor;
|
302
|
+
|
303
|
+
void enqueue_long(grpc_closure* closure, grpc_error* error) {
|
304
|
+
global_executor->Enqueue(closure, error, false /* is_short */);
|
305
|
+
}
|
306
|
+
|
307
|
+
void enqueue_short(grpc_closure* closure, grpc_error* error) {
|
308
|
+
global_executor->Enqueue(closure, error, true /* is_short */);
|
309
|
+
}
|
310
|
+
|
311
|
+
// Short-Job executor scheduler
|
312
|
+
static const grpc_closure_scheduler_vtable global_executor_vtable_short = {
|
313
|
+
enqueue_short, enqueue_short, "executor-short"};
|
314
|
+
static grpc_closure_scheduler global_scheduler_short = {
|
315
|
+
&global_executor_vtable_short};
|
316
|
+
|
317
|
+
// Long-job executor scheduler
|
318
|
+
static const grpc_closure_scheduler_vtable global_executor_vtable_long = {
|
319
|
+
enqueue_long, enqueue_long, "executor-long"};
|
320
|
+
static grpc_closure_scheduler global_scheduler_long = {
|
321
|
+
&global_executor_vtable_long};
|
322
|
+
|
323
|
+
// grpc_executor_init() and grpc_executor_shutdown() functions are called in the
|
324
|
+
// the grpc_init() and grpc_shutdown() code paths which are protected by a
|
325
|
+
// global mutex. So it is okay to assume that these functions are thread-safe
|
326
|
+
void grpc_executor_init() {
|
327
|
+
if (global_executor != nullptr) {
|
328
|
+
// grpc_executor_init() already called once (and grpc_executor_shutdown()
|
329
|
+
// wasn't called)
|
330
|
+
return;
|
331
|
+
}
|
332
|
+
|
333
|
+
global_executor = grpc_core::New<GrpcExecutor>("global-executor");
|
334
|
+
global_executor->Init();
|
282
335
|
}
|
283
336
|
|
284
|
-
|
285
|
-
|
337
|
+
void grpc_executor_shutdown() {
|
338
|
+
// Shutdown already called
|
339
|
+
if (global_executor == nullptr) {
|
340
|
+
return;
|
341
|
+
}
|
342
|
+
|
343
|
+
global_executor->Shutdown();
|
344
|
+
grpc_core::Delete<GrpcExecutor>(global_executor);
|
345
|
+
global_executor = nullptr;
|
286
346
|
}
|
287
347
|
|
288
|
-
|
289
|
-
executor_push_short, executor_push_short, "executor"};
|
290
|
-
static grpc_closure_scheduler executor_scheduler_short = {
|
291
|
-
&executor_vtable_short};
|
348
|
+
bool grpc_executor_is_threaded() { return global_executor->IsThreaded(); }
|
292
349
|
|
293
|
-
|
294
|
-
|
295
|
-
|
350
|
+
void grpc_executor_set_threading(bool enable) {
|
351
|
+
global_executor->SetThreading(enable);
|
352
|
+
}
|
296
353
|
|
297
|
-
grpc_closure_scheduler* grpc_executor_scheduler(
|
298
|
-
|
299
|
-
|
300
|
-
: &executor_scheduler_long;
|
354
|
+
grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) {
|
355
|
+
return job_type == GRPC_EXECUTOR_SHORT ? &global_scheduler_short
|
356
|
+
: &global_scheduler_long;
|
301
357
|
}
|