wearefair-grpc 1.3.1.pre.c → 1.4.0.fair
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Makefile +418 -126
- data/include/grpc/grpc.h +15 -69
- data/include/grpc/grpc_security.h +1 -1
- data/include/grpc/impl/codegen/compression_types.h +3 -4
- data/include/grpc/impl/codegen/gpr_types.h +0 -1
- data/include/grpc/impl/codegen/grpc_types.h +69 -3
- data/include/grpc/impl/codegen/port_platform.h +6 -0
- data/include/grpc/impl/codegen/slice.h +2 -1
- data/include/grpc/load_reporting.h +6 -6
- data/include/grpc/slice.h +25 -3
- data/include/grpc/slice_buffer.h +4 -0
- data/src/core/ext/census/context.c +1 -1
- data/src/core/ext/census/resource.c +3 -1
- data/src/core/ext/filters/client_channel/channel_connectivity.c +1 -1
- data/src/core/ext/filters/client_channel/client_channel.c +158 -100
- data/src/core/ext/filters/client_channel/client_channel_plugin.c +3 -2
- data/src/core/ext/filters/client_channel/lb_policy.c +2 -1
- data/src/core/ext/filters/client_channel/lb_policy.h +5 -6
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +153 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +42 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +344 -88
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +133 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +65 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +47 -5
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +6 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +19 -8
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +63 -34
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +2 -1
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +13 -12
- data/src/core/ext/filters/client_channel/lb_policy_factory.c +28 -5
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +18 -4
- data/src/core/ext/filters/client_channel/parse_address.c +37 -7
- data/src/core/ext/filters/client_channel/parse_address.h +11 -8
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c +3 -3
- data/src/core/ext/filters/client_channel/subchannel.c +19 -16
- data/src/core/ext/filters/client_channel/subchannel.h +1 -0
- data/src/core/ext/filters/client_channel/uri_parser.c +36 -22
- data/src/core/ext/filters/client_channel/uri_parser.h +1 -1
- data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.c +42 -17
- data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.h +8 -9
- data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.c +19 -11
- data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.h +3 -6
- data/src/core/ext/filters/http/http_filters_plugin.c +104 -0
- data/src/core/{lib/channel/compress_filter.c → ext/filters/http/message_compress/message_compress_filter.c} +124 -23
- data/src/core/{lib/channel/compress_filter.h → ext/filters/http/message_compress/message_compress_filter.h} +5 -6
- data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.c +4 -6
- data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.h +3 -3
- data/src/core/ext/filters/load_reporting/load_reporting.c +2 -25
- data/src/core/ext/filters/load_reporting/load_reporting_filter.c +26 -1
- data/src/core/ext/filters/max_age/max_age_filter.c +14 -14
- data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.c +91 -47
- data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.h +3 -3
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
- data/src/core/ext/transport/chttp2/server/chttp2_server.c +2 -2
- data/src/core/ext/transport/chttp2/transport/bin_decoder.c +2 -2
- data/src/core/ext/transport/chttp2/transport/bin_encoder.c +3 -3
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +296 -172
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -2
- data/src/core/ext/transport/chttp2/transport/frame_data.c +203 -164
- data/src/core/ext/transport/chttp2/transport/frame_data.h +8 -14
- data/src/core/ext/transport/chttp2/transport/frame_goaway.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_ping.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.c +5 -5
- data/src/core/ext/transport/chttp2/transport/frame_window_update.c +1 -1
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +4 -4
- data/src/core/ext/transport/chttp2/transport/hpack_parser.c +2 -4
- data/src/core/ext/transport/chttp2/transport/hpack_table.c +4 -3
- data/src/core/ext/transport/chttp2/transport/internal.h +50 -33
- data/src/core/ext/transport/chttp2/transport/parsing.c +10 -11
- data/src/core/ext/transport/chttp2/transport/writing.c +32 -13
- data/src/core/lib/channel/channel_args.c +28 -9
- data/src/core/lib/channel/channel_args.h +5 -1
- data/src/core/lib/channel/channel_stack.c +1 -1
- data/src/core/lib/channel/channel_stack.h +2 -2
- data/src/core/lib/channel/channel_stack_builder.c +13 -1
- data/src/core/lib/channel/channel_stack_builder.h +5 -1
- data/src/core/lib/channel/connected_channel.c +3 -1
- data/src/core/lib/channel/context.h +2 -2
- data/src/core/lib/compression/message_compress.c +2 -2
- data/src/core/lib/debug/trace.c +13 -6
- data/src/core/lib/debug/trace.h +27 -1
- data/src/core/lib/http/httpcli.c +1 -1
- data/src/core/lib/http/httpcli_security_connector.c +6 -10
- data/src/core/lib/http/parser.c +2 -2
- data/src/core/lib/http/parser.h +2 -1
- data/src/core/lib/iomgr/combiner.c +6 -6
- data/src/core/lib/iomgr/combiner.h +2 -1
- data/src/core/lib/iomgr/error.c +12 -5
- data/src/core/lib/iomgr/error.h +13 -13
- data/src/core/lib/iomgr/ev_epoll1_linux.c +984 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.h +44 -0
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +2146 -0
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h +43 -0
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +1337 -0
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h +43 -0
- data/src/core/lib/iomgr/ev_epollex_linux.c +1511 -0
- data/src/core/lib/iomgr/ev_epollex_linux.h +43 -0
- data/src/core/lib/iomgr/{ev_epoll_linux.c → ev_epollsig_linux.c} +24 -31
- data/src/core/lib/iomgr/{ev_epoll_linux.h → ev_epollsig_linux.h} +4 -4
- data/src/core/lib/iomgr/ev_poll_posix.c +12 -27
- data/src/core/lib/iomgr/ev_poll_posix.h +2 -2
- data/src/core/lib/iomgr/ev_posix.c +22 -8
- data/src/core/lib/iomgr/ev_posix.h +4 -3
- data/src/core/lib/iomgr/exec_ctx.c +5 -0
- data/src/core/lib/iomgr/exec_ctx.h +2 -0
- data/src/core/lib/iomgr/iomgr.c +4 -0
- data/src/core/lib/iomgr/iomgr.h +3 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.c +116 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.h +41 -0
- data/src/core/lib/iomgr/lockfree_event.c +16 -0
- data/src/core/lib/iomgr/pollset.h +2 -5
- data/src/core/lib/iomgr/pollset_uv.c +1 -1
- data/src/core/lib/iomgr/pollset_windows.c +3 -3
- data/src/core/lib/iomgr/resource_quota.c +9 -8
- data/src/core/lib/iomgr/resource_quota.h +2 -1
- data/src/core/lib/iomgr/sockaddr_utils.h +1 -1
- data/src/core/lib/iomgr/socket_mutator.h +2 -0
- data/src/core/lib/iomgr/sys_epoll_wrapper.h +43 -0
- data/src/core/lib/iomgr/tcp_client_posix.c +6 -6
- data/src/core/lib/iomgr/tcp_client_uv.c +3 -3
- data/src/core/lib/iomgr/tcp_posix.c +7 -7
- data/src/core/lib/iomgr/tcp_posix.h +2 -1
- data/src/core/lib/iomgr/tcp_server_posix.c +1 -1
- data/src/core/lib/iomgr/tcp_uv.c +6 -6
- data/src/core/lib/iomgr/tcp_uv.h +2 -1
- data/src/core/lib/iomgr/tcp_windows.c +1 -1
- data/src/core/lib/iomgr/timer_generic.c +24 -25
- data/src/core/lib/iomgr/timer_manager.c +276 -0
- data/src/core/lib/iomgr/timer_manager.h +52 -0
- data/src/core/lib/iomgr/timer_uv.c +6 -0
- data/src/core/lib/iomgr/udp_server.c +42 -9
- data/src/core/lib/iomgr/udp_server.h +3 -1
- data/src/core/lib/security/credentials/credentials.c +0 -1
- data/src/core/lib/security/credentials/fake/fake_credentials.c +23 -0
- data/src/core/lib/security/credentials/fake/fake_credentials.h +12 -9
- data/src/core/lib/security/credentials/google_default/google_default_credentials.c +1 -1
- data/src/core/lib/security/credentials/jwt/jwt_credentials.c +1 -1
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +1 -1
- data/src/core/lib/security/credentials/ssl/ssl_credentials.c +24 -53
- data/src/core/lib/security/transport/client_auth_filter.c +9 -3
- data/src/core/lib/security/transport/secure_endpoint.c +7 -7
- data/src/core/lib/security/transport/secure_endpoint.h +1 -1
- data/src/core/lib/security/transport/security_connector.c +32 -51
- data/src/core/lib/security/transport/security_connector.h +10 -14
- data/src/core/lib/slice/b64.c +1 -1
- data/src/core/lib/slice/percent_encoding.c +3 -3
- data/src/core/lib/slice/slice.c +66 -33
- data/src/core/lib/slice/slice_buffer.c +25 -6
- data/src/core/lib/slice/slice_hash_table.c +33 -35
- data/src/core/lib/slice/slice_hash_table.h +7 -12
- data/src/core/lib/support/atomic.h +45 -0
- data/src/core/lib/support/atomic_with_atm.h +70 -0
- data/src/core/lib/support/atomic_with_std.h +48 -0
- data/src/core/lib/support/avl.c +14 -14
- data/src/core/lib/support/memory.h +74 -0
- data/src/core/lib/support/mpscq.c +12 -1
- data/src/core/lib/support/mpscq.h +4 -0
- data/src/core/lib/support/stack_lockfree.c +3 -36
- data/src/core/lib/support/time_posix.c +8 -0
- data/src/core/lib/support/tmpfile_posix.c +10 -10
- data/src/core/lib/surface/alarm.c +3 -1
- data/src/core/lib/surface/api_trace.c +2 -1
- data/src/core/lib/surface/api_trace.h +2 -2
- data/src/core/lib/surface/byte_buffer_reader.c +1 -1
- data/src/core/lib/surface/call.c +65 -22
- data/src/core/lib/surface/call.h +4 -2
- data/src/core/lib/surface/channel_init.c +2 -19
- data/src/core/lib/surface/channel_stack_type.c +18 -0
- data/src/core/lib/surface/channel_stack_type.h +2 -0
- data/src/core/lib/surface/completion_queue.c +249 -83
- data/src/core/lib/surface/completion_queue.h +18 -13
- data/src/core/lib/surface/completion_queue_factory.c +24 -9
- data/src/core/lib/surface/init.c +1 -52
- data/src/core/lib/surface/{lame_client.c → lame_client.cc} +37 -26
- data/src/core/lib/surface/server.c +50 -27
- data/src/core/lib/surface/server.h +2 -1
- data/src/core/lib/surface/version.c +2 -2
- data/src/core/lib/transport/bdp_estimator.c +20 -9
- data/src/core/lib/transport/bdp_estimator.h +5 -1
- data/src/core/lib/transport/byte_stream.c +23 -9
- data/src/core/lib/transport/byte_stream.h +15 -6
- data/src/core/lib/transport/connectivity_state.c +6 -6
- data/src/core/lib/transport/connectivity_state.h +2 -1
- data/src/core/lib/transport/service_config.c +6 -13
- data/src/core/lib/transport/service_config.h +2 -2
- data/src/core/lib/transport/static_metadata.c +403 -389
- data/src/core/lib/transport/static_metadata.h +127 -114
- data/src/core/plugin_registry/grpc_plugin_registry.c +12 -0
- data/src/core/tsi/fake_transport_security.c +5 -4
- data/src/core/tsi/ssl_transport_security.c +71 -82
- data/src/core/tsi/ssl_transport_security.h +39 -61
- data/src/core/tsi/transport_security.c +83 -2
- data/src/core/tsi/transport_security.h +27 -2
- data/src/core/tsi/transport_security_adapter.c +236 -0
- data/src/core/tsi/transport_security_adapter.h +62 -0
- data/src/core/tsi/transport_security_interface.h +179 -66
- data/src/ruby/ext/grpc/extconf.rb +2 -1
- data/src/ruby/ext/grpc/rb_byte_buffer.c +8 -6
- data/src/ruby/ext/grpc/rb_call.c +56 -48
- data/src/ruby/ext/grpc/rb_call.h +3 -4
- data/src/ruby/ext/grpc/rb_call_credentials.c +23 -22
- data/src/ruby/ext/grpc/rb_channel.c +45 -29
- data/src/ruby/ext/grpc/rb_channel_args.c +11 -9
- data/src/ruby/ext/grpc/rb_channel_credentials.c +16 -12
- data/src/ruby/ext/grpc/rb_completion_queue.c +7 -9
- data/src/ruby/ext/grpc/rb_compression_options.c +7 -6
- data/src/ruby/ext/grpc/rb_event_thread.c +10 -12
- data/src/ruby/ext/grpc/rb_event_thread.h +1 -2
- data/src/ruby/ext/grpc/rb_grpc.c +11 -15
- data/src/ruby/ext/grpc/rb_grpc.h +2 -2
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +14 -6
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +22 -10
- data/src/ruby/ext/grpc/rb_server.c +26 -28
- data/src/ruby/lib/grpc/version.rb +1 -1
- metadata +40 -18
- data/src/ruby/lib/grpc/grpc_c.bundle +0 -0
- data/src/ruby/lib/grpc/grpc_c.so +0 -0
@@ -58,7 +58,7 @@
|
|
58
58
|
#include "src/core/lib/iomgr/unix_sockets_posix.h"
|
59
59
|
#include "src/core/lib/support/string.h"
|
60
60
|
|
61
|
-
extern
|
61
|
+
extern grpc_tracer_flag grpc_tcp_trace;
|
62
62
|
|
63
63
|
typedef struct {
|
64
64
|
gpr_mu mu;
|
@@ -114,7 +114,7 @@ done:
|
|
114
114
|
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
|
115
115
|
int done;
|
116
116
|
async_connect *ac = acp;
|
117
|
-
if (grpc_tcp_trace) {
|
117
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
118
118
|
const char *str = grpc_error_string(error);
|
119
119
|
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
|
120
120
|
str);
|
@@ -152,7 +152,7 @@ static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
|
|
152
152
|
|
153
153
|
GRPC_ERROR_REF(error);
|
154
154
|
|
155
|
-
if (grpc_tcp_trace) {
|
155
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
156
156
|
const char *str = grpc_error_string(error);
|
157
157
|
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s",
|
158
158
|
ac->addr_str, str);
|
@@ -330,9 +330,9 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
|
|
330
330
|
grpc_schedule_on_exec_ctx);
|
331
331
|
ac->channel_args = grpc_channel_args_copy(channel_args);
|
332
332
|
|
333
|
-
if (grpc_tcp_trace) {
|
334
|
-
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
|
335
|
-
ac->addr_str);
|
333
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
334
|
+
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
|
335
|
+
ac->addr_str, fdobj);
|
336
336
|
}
|
337
337
|
|
338
338
|
gpr_mu_lock(&ac->mu);
|
@@ -46,7 +46,7 @@
|
|
46
46
|
#include "src/core/lib/iomgr/tcp_uv.h"
|
47
47
|
#include "src/core/lib/iomgr/timer.h"
|
48
48
|
|
49
|
-
extern
|
49
|
+
extern grpc_tracer_flag grpc_tcp_trace;
|
50
50
|
|
51
51
|
typedef struct grpc_uv_tcp_connect {
|
52
52
|
uv_connect_t connect_req;
|
@@ -72,7 +72,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
|
|
72
72
|
grpc_error *error) {
|
73
73
|
int done;
|
74
74
|
grpc_uv_tcp_connect *connect = acp;
|
75
|
-
if (grpc_tcp_trace) {
|
75
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
76
76
|
const char *str = grpc_error_string(error);
|
77
77
|
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s",
|
78
78
|
connect->addr_name, str);
|
@@ -156,7 +156,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
|
|
156
156
|
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
|
157
157
|
connect->connect_req.data = connect;
|
158
158
|
|
159
|
-
if (grpc_tcp_trace) {
|
159
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
160
160
|
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
|
161
161
|
connect->addr_name);
|
162
162
|
}
|
@@ -74,7 +74,7 @@ typedef GRPC_MSG_IOVLEN_TYPE msg_iovlen_type;
|
|
74
74
|
typedef size_t msg_iovlen_type;
|
75
75
|
#endif
|
76
76
|
|
77
|
-
|
77
|
+
grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false);
|
78
78
|
|
79
79
|
typedef struct {
|
80
80
|
grpc_endpoint base;
|
@@ -221,7 +221,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
|
|
221
221
|
grpc_error *error) {
|
222
222
|
grpc_closure *cb = tcp->read_cb;
|
223
223
|
|
224
|
-
if (grpc_tcp_trace) {
|
224
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
225
225
|
size_t i;
|
226
226
|
const char *str = grpc_error_string(error);
|
227
227
|
gpr_log(GPR_DEBUG, "read: error=%s", str);
|
@@ -468,14 +468,14 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
|
|
468
468
|
}
|
469
469
|
|
470
470
|
if (!tcp_flush(tcp, &error)) {
|
471
|
-
if (grpc_tcp_trace) {
|
471
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
472
472
|
gpr_log(GPR_DEBUG, "write: delayed");
|
473
473
|
}
|
474
474
|
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
|
475
475
|
} else {
|
476
476
|
cb = tcp->write_cb;
|
477
477
|
tcp->write_cb = NULL;
|
478
|
-
if (grpc_tcp_trace) {
|
478
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
479
479
|
const char *str = grpc_error_string(error);
|
480
480
|
gpr_log(GPR_DEBUG, "write: %s", str);
|
481
481
|
}
|
@@ -490,7 +490,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
|
|
490
490
|
grpc_tcp *tcp = (grpc_tcp *)ep;
|
491
491
|
grpc_error *error = GRPC_ERROR_NONE;
|
492
492
|
|
493
|
-
if (grpc_tcp_trace) {
|
493
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
494
494
|
size_t i;
|
495
495
|
|
496
496
|
for (i = 0; i < buf->count; i++) {
|
@@ -521,12 +521,12 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
|
|
521
521
|
if (!tcp_flush(tcp, &error)) {
|
522
522
|
TCP_REF(tcp, "write");
|
523
523
|
tcp->write_cb = cb;
|
524
|
-
if (grpc_tcp_trace) {
|
524
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
525
525
|
gpr_log(GPR_DEBUG, "write: delayed");
|
526
526
|
}
|
527
527
|
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
|
528
528
|
} else {
|
529
|
-
if (grpc_tcp_trace) {
|
529
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
530
530
|
const char *str = grpc_error_string(error);
|
531
531
|
gpr_log(GPR_DEBUG, "write: %s", str);
|
532
532
|
}
|
@@ -44,10 +44,11 @@
|
|
44
44
|
otherwise specified.
|
45
45
|
*/
|
46
46
|
|
47
|
+
#include "src/core/lib/debug/trace.h"
|
47
48
|
#include "src/core/lib/iomgr/endpoint.h"
|
48
49
|
#include "src/core/lib/iomgr/ev_posix.h"
|
49
50
|
|
50
|
-
extern
|
51
|
+
extern grpc_tracer_flag grpc_tcp_trace;
|
51
52
|
|
52
53
|
/* Create a tcp endpoint given a file desciptor and a read slice size.
|
53
54
|
Takes ownership of fd. */
|
@@ -257,7 +257,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
|
|
257
257
|
addr_str = grpc_sockaddr_to_uri(&addr);
|
258
258
|
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
|
259
259
|
|
260
|
-
if (grpc_tcp_trace) {
|
260
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
261
261
|
gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
|
262
262
|
}
|
263
263
|
|
data/src/core/lib/iomgr/tcp_uv.c
CHANGED
@@ -52,7 +52,7 @@
|
|
52
52
|
#include "src/core/lib/slice/slice_string_helpers.h"
|
53
53
|
#include "src/core/lib/support/string.h"
|
54
54
|
|
55
|
-
|
55
|
+
grpc_tracer_flag grpc_tcp_trace = GRPC_TRACER_INITIALIZER(false);
|
56
56
|
|
57
57
|
typedef struct {
|
58
58
|
grpc_endpoint base;
|
@@ -158,7 +158,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
|
|
158
158
|
sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
|
159
159
|
grpc_slice_buffer_add(tcp->read_slices, sub);
|
160
160
|
error = GRPC_ERROR_NONE;
|
161
|
-
if (grpc_tcp_trace) {
|
161
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
162
162
|
size_t i;
|
163
163
|
const char *str = grpc_error_string(error);
|
164
164
|
gpr_log(GPR_DEBUG, "read: error=%s", str);
|
@@ -199,7 +199,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
|
|
199
199
|
grpc_slice_from_static_string(uv_strerror(status)));
|
200
200
|
grpc_closure_sched(exec_ctx, cb, error);
|
201
201
|
}
|
202
|
-
if (grpc_tcp_trace) {
|
202
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
203
203
|
const char *str = grpc_error_string(error);
|
204
204
|
gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp, str);
|
205
205
|
}
|
@@ -217,7 +217,7 @@ static void write_callback(uv_write_t *req, int status) {
|
|
217
217
|
} else {
|
218
218
|
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Write failed");
|
219
219
|
}
|
220
|
-
if (grpc_tcp_trace) {
|
220
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
221
221
|
const char *str = grpc_error_string(error);
|
222
222
|
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
|
223
223
|
}
|
@@ -238,7 +238,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
|
|
238
238
|
grpc_slice *slice;
|
239
239
|
uv_write_t *write_req;
|
240
240
|
|
241
|
-
if (grpc_tcp_trace) {
|
241
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
242
242
|
size_t j;
|
243
243
|
|
244
244
|
for (j = 0; j < write_slices->count; j++) {
|
@@ -346,7 +346,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
|
|
346
346
|
char *peer_string) {
|
347
347
|
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
|
348
348
|
|
349
|
-
if (grpc_tcp_trace) {
|
349
|
+
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
|
350
350
|
gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
|
351
351
|
}
|
352
352
|
|
data/src/core/lib/iomgr/tcp_uv.h
CHANGED
@@ -44,11 +44,12 @@
|
|
44
44
|
otherwise specified.
|
45
45
|
*/
|
46
46
|
|
47
|
+
#include "src/core/lib/debug/trace.h"
|
47
48
|
#include "src/core/lib/iomgr/endpoint.h"
|
48
49
|
|
49
50
|
#include <uv.h>
|
50
51
|
|
51
|
-
extern
|
52
|
+
extern grpc_tracer_flag grpc_tcp_trace;
|
52
53
|
|
53
54
|
#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
|
54
55
|
|
@@ -219,7 +219,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
|
|
219
219
|
tcp->read_slices = read_slices;
|
220
220
|
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);
|
221
221
|
|
222
|
-
tcp->read_slice =
|
222
|
+
tcp->read_slice = GRPC_SLICE_MALLOC(8192);
|
223
223
|
|
224
224
|
buffer.len = (ULONG)GRPC_SLICE_LENGTH(
|
225
225
|
tcp->read_slice); // we know slice size fits in 32bit.
|
@@ -56,8 +56,8 @@
|
|
56
56
|
#define MIN_QUEUE_WINDOW_DURATION 0.01
|
57
57
|
#define MAX_QUEUE_WINDOW_DURATION 1
|
58
58
|
|
59
|
-
|
60
|
-
|
59
|
+
grpc_tracer_flag grpc_timer_trace = GRPC_TRACER_INITIALIZER(false);
|
60
|
+
grpc_tracer_flag grpc_timer_check_trace = GRPC_TRACER_INITIALIZER(false);
|
61
61
|
|
62
62
|
typedef struct {
|
63
63
|
gpr_mu mu;
|
@@ -232,14 +232,13 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
|
|
232
232
|
GPR_ASSERT(deadline.clock_type == g_clock_type);
|
233
233
|
GPR_ASSERT(now.clock_type == g_clock_type);
|
234
234
|
timer->closure = closure;
|
235
|
-
timer->deadline = timespec_to_atm_round_up(deadline);
|
235
|
+
gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline);
|
236
236
|
|
237
|
-
if (grpc_timer_trace) {
|
237
|
+
if (GRPC_TRACER_ON(grpc_timer_trace)) {
|
238
238
|
gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR
|
239
239
|
"] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]",
|
240
|
-
timer, deadline.tv_sec, deadline.tv_nsec,
|
241
|
-
now.
|
242
|
-
closure->cb);
|
240
|
+
timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec,
|
241
|
+
now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb);
|
243
242
|
}
|
244
243
|
|
245
244
|
if (!g_shared_mutables.initialized) {
|
@@ -262,13 +261,13 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
|
|
262
261
|
|
263
262
|
grpc_time_averaged_stats_add_sample(&shard->stats,
|
264
263
|
ts_to_dbl(gpr_time_sub(deadline, now)));
|
265
|
-
if (
|
264
|
+
if (deadline_atm < shard->queue_deadline_cap) {
|
266
265
|
is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
|
267
266
|
} else {
|
268
267
|
timer->heap_index = INVALID_HEAP_INDEX;
|
269
268
|
list_join(&shard->list, timer);
|
270
269
|
}
|
271
|
-
if (grpc_timer_trace) {
|
270
|
+
if (GRPC_TRACER_ON(grpc_timer_trace)) {
|
272
271
|
gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
|
273
272
|
" => is_first_timer=%s",
|
274
273
|
(int)(shard - g_shards), shard->queue_deadline_cap,
|
@@ -289,16 +288,16 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
|
|
289
288
|
grpc_timer_check. */
|
290
289
|
if (is_first_timer) {
|
291
290
|
gpr_mu_lock(&g_shared_mutables.mu);
|
292
|
-
if (grpc_timer_trace) {
|
291
|
+
if (GRPC_TRACER_ON(grpc_timer_trace)) {
|
293
292
|
gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR,
|
294
293
|
shard->min_deadline);
|
295
294
|
}
|
296
|
-
if (
|
295
|
+
if (deadline_atm < shard->min_deadline) {
|
297
296
|
gpr_atm old_min_deadline = g_shard_queue[0]->min_deadline;
|
298
|
-
shard->min_deadline =
|
297
|
+
shard->min_deadline = deadline_atm;
|
299
298
|
note_deadline_change(shard);
|
300
|
-
if (shard->shard_queue_index == 0 &&
|
301
|
-
gpr_atm_no_barrier_store(&g_shared_mutables.min_timer,
|
299
|
+
if (shard->shard_queue_index == 0 && deadline_atm < old_min_deadline) {
|
300
|
+
gpr_atm_no_barrier_store(&g_shared_mutables.min_timer, deadline_atm);
|
302
301
|
grpc_kick_poller();
|
303
302
|
}
|
304
303
|
}
|
@@ -319,7 +318,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
|
|
319
318
|
|
320
319
|
shard_type *shard = &g_shards[GPR_HASH_POINTER(timer, NUM_SHARDS)];
|
321
320
|
gpr_mu_lock(&shard->mu);
|
322
|
-
if (grpc_timer_trace) {
|
321
|
+
if (GRPC_TRACER_ON(grpc_timer_trace)) {
|
323
322
|
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer,
|
324
323
|
timer->pending ? "true" : "false");
|
325
324
|
}
|
@@ -355,7 +354,7 @@ static int refill_queue(shard_type *shard, gpr_atm now) {
|
|
355
354
|
saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
|
356
355
|
(gpr_atm)(deadline_delta * 1000.0));
|
357
356
|
|
358
|
-
if (grpc_timer_check_trace) {
|
357
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
359
358
|
gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
|
360
359
|
(int)(shard - g_shards), shard->queue_deadline_cap);
|
361
360
|
}
|
@@ -363,7 +362,7 @@ static int refill_queue(shard_type *shard, gpr_atm now) {
|
|
363
362
|
next = timer->next;
|
364
363
|
|
365
364
|
if (timer->deadline < shard->queue_deadline_cap) {
|
366
|
-
if (grpc_timer_check_trace) {
|
365
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
367
366
|
gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap",
|
368
367
|
timer->deadline);
|
369
368
|
}
|
@@ -380,7 +379,7 @@ static int refill_queue(shard_type *shard, gpr_atm now) {
|
|
380
379
|
static grpc_timer *pop_one(shard_type *shard, gpr_atm now) {
|
381
380
|
grpc_timer *timer;
|
382
381
|
for (;;) {
|
383
|
-
if (grpc_timer_check_trace) {
|
382
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
384
383
|
gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s",
|
385
384
|
(int)(shard - g_shards),
|
386
385
|
grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
|
@@ -390,13 +389,13 @@ static grpc_timer *pop_one(shard_type *shard, gpr_atm now) {
|
|
390
389
|
if (!refill_queue(shard, now)) return NULL;
|
391
390
|
}
|
392
391
|
timer = grpc_timer_heap_top(&shard->heap);
|
393
|
-
if (grpc_timer_check_trace) {
|
392
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
394
393
|
gpr_log(GPR_DEBUG,
|
395
394
|
" .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
|
396
395
|
timer->deadline, now);
|
397
396
|
}
|
398
397
|
if (timer->deadline > now) return NULL;
|
399
|
-
if (grpc_timer_trace) {
|
398
|
+
if (GRPC_TRACER_ON(grpc_timer_trace)) {
|
400
399
|
gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late", timer,
|
401
400
|
now - timer->deadline);
|
402
401
|
}
|
@@ -436,7 +435,7 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_atm now,
|
|
436
435
|
if (gpr_spinlock_trylock(&g_shared_mutables.checker_mu)) {
|
437
436
|
gpr_mu_lock(&g_shared_mutables.mu);
|
438
437
|
|
439
|
-
if (grpc_timer_check_trace) {
|
438
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
440
439
|
gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR,
|
441
440
|
(int)(g_shard_queue[0] - g_shards),
|
442
441
|
g_shard_queue[0]->min_deadline);
|
@@ -452,7 +451,7 @@ static int run_some_expired_timers(grpc_exec_ctx *exec_ctx, gpr_atm now,
|
|
452
451
|
n +=
|
453
452
|
pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline, error);
|
454
453
|
|
455
|
-
if (grpc_timer_check_trace) {
|
454
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
456
455
|
gpr_log(GPR_DEBUG, " .. popped --> %" PRIdPTR
|
457
456
|
", shard[%d]->min_deadline %" PRIdPTR
|
458
457
|
" --> %" PRIdPTR ", now=%" PRIdPTR,
|
@@ -509,7 +508,7 @@ bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
|
|
509
508
|
*next =
|
510
509
|
atm_to_timespec(GPR_MIN(timespec_to_atm_round_up(*next), min_timer));
|
511
510
|
}
|
512
|
-
if (grpc_timer_check_trace) {
|
511
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
513
512
|
gpr_log(GPR_DEBUG,
|
514
513
|
"TIMER CHECK SKIP: now_atm=%" PRIdPTR " min_timer=%" PRIdPTR,
|
515
514
|
now_atm, min_timer);
|
@@ -523,7 +522,7 @@ bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
|
|
523
522
|
: GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
|
524
523
|
|
525
524
|
// tracing
|
526
|
-
if (grpc_timer_check_trace) {
|
525
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
527
526
|
char *next_str;
|
528
527
|
if (next == NULL) {
|
529
528
|
next_str = gpr_strdup("NULL");
|
@@ -549,7 +548,7 @@ bool grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_timespec now,
|
|
549
548
|
*next = atm_to_timespec(next_atm);
|
550
549
|
}
|
551
550
|
// tracing
|
552
|
-
if (grpc_timer_check_trace) {
|
551
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
553
552
|
char *next_str;
|
554
553
|
if (next == NULL) {
|
555
554
|
next_str = gpr_strdup("NULL");
|
@@ -0,0 +1,276 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2017, Google Inc.
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are
|
8
|
+
* met:
|
9
|
+
*
|
10
|
+
* * Redistributions of source code must retain the above copyright
|
11
|
+
* notice, this list of conditions and the following disclaimer.
|
12
|
+
* * Redistributions in binary form must reproduce the above
|
13
|
+
* copyright notice, this list of conditions and the following disclaimer
|
14
|
+
* in the documentation and/or other materials provided with the
|
15
|
+
* distribution.
|
16
|
+
* * Neither the name of Google Inc. nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
|
34
|
+
#include "src/core/lib/iomgr/timer_manager.h"
|
35
|
+
|
36
|
+
#include <grpc/support/alloc.h>
|
37
|
+
#include <grpc/support/log.h>
|
38
|
+
#include <grpc/support/thd.h>
|
39
|
+
|
40
|
+
#include "src/core/lib/debug/trace.h"
|
41
|
+
#include "src/core/lib/iomgr/timer.h"
|
42
|
+
|
43
|
+
typedef struct completed_thread {
|
44
|
+
gpr_thd_id t;
|
45
|
+
struct completed_thread *next;
|
46
|
+
} completed_thread;
|
47
|
+
|
48
|
+
extern grpc_tracer_flag grpc_timer_check_trace;
|
49
|
+
|
50
|
+
// global mutex
|
51
|
+
static gpr_mu g_mu;
|
52
|
+
// are we multi-threaded
|
53
|
+
static bool g_threaded;
|
54
|
+
// cv to wait until a thread is needed
|
55
|
+
static gpr_cv g_cv_wait;
|
56
|
+
// cv for notification when threading ends
|
57
|
+
static gpr_cv g_cv_shutdown;
|
58
|
+
// number of threads in the system
|
59
|
+
static int g_thread_count;
|
60
|
+
// number of threads sitting around waiting
|
61
|
+
static int g_waiter_count;
|
62
|
+
// linked list of threads that have completed (and need joining)
|
63
|
+
static completed_thread *g_completed_threads;
|
64
|
+
// was the manager kicked by the timer system
|
65
|
+
static bool g_kicked;
|
66
|
+
// is there a thread waiting until the next timer should fire?
|
67
|
+
static bool g_has_timed_waiter;
|
68
|
+
// generation counter to track which thread is waiting for the next timer
|
69
|
+
static uint64_t g_timed_waiter_generation;
|
70
|
+
|
71
|
+
static void timer_thread(void *unused);
|
72
|
+
|
73
|
+
static void gc_completed_threads(void) {
|
74
|
+
if (g_completed_threads != NULL) {
|
75
|
+
completed_thread *to_gc = g_completed_threads;
|
76
|
+
g_completed_threads = NULL;
|
77
|
+
gpr_mu_unlock(&g_mu);
|
78
|
+
while (to_gc != NULL) {
|
79
|
+
gpr_thd_join(to_gc->t);
|
80
|
+
completed_thread *next = to_gc->next;
|
81
|
+
gpr_free(to_gc);
|
82
|
+
to_gc = next;
|
83
|
+
}
|
84
|
+
gpr_mu_lock(&g_mu);
|
85
|
+
}
|
86
|
+
}
|
87
|
+
|
88
|
+
static void start_timer_thread_and_unlock(void) {
|
89
|
+
GPR_ASSERT(g_threaded);
|
90
|
+
++g_waiter_count;
|
91
|
+
++g_thread_count;
|
92
|
+
gpr_mu_unlock(&g_mu);
|
93
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
94
|
+
gpr_log(GPR_DEBUG, "Spawn timer thread");
|
95
|
+
}
|
96
|
+
gpr_thd_id thd;
|
97
|
+
gpr_thd_options opt = gpr_thd_options_default();
|
98
|
+
gpr_thd_options_set_joinable(&opt);
|
99
|
+
gpr_thd_new(&thd, timer_thread, NULL, &opt);
|
100
|
+
}
|
101
|
+
|
102
|
+
void grpc_timer_manager_tick() {
|
103
|
+
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
|
104
|
+
gpr_timespec next = gpr_inf_future(GPR_CLOCK_MONOTONIC);
|
105
|
+
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
|
106
|
+
grpc_timer_check(&exec_ctx, now, &next);
|
107
|
+
grpc_exec_ctx_finish(&exec_ctx);
|
108
|
+
}
|
109
|
+
|
110
|
+
static void timer_thread(void *unused) {
|
111
|
+
// this threads exec_ctx: we try to run things through to completion here
|
112
|
+
// since it's easy to spin up new threads
|
113
|
+
grpc_exec_ctx exec_ctx =
|
114
|
+
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
|
115
|
+
const gpr_timespec inf_future = gpr_inf_future(GPR_CLOCK_MONOTONIC);
|
116
|
+
for (;;) {
|
117
|
+
gpr_timespec next = inf_future;
|
118
|
+
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
|
119
|
+
// check timer state, updates next to the next time to run a check
|
120
|
+
if (grpc_timer_check(&exec_ctx, now, &next)) {
|
121
|
+
// if there's something to execute...
|
122
|
+
gpr_mu_lock(&g_mu);
|
123
|
+
// remove a waiter from the pool, and start another thread if necessary
|
124
|
+
--g_waiter_count;
|
125
|
+
if (g_waiter_count == 0 && g_threaded) {
|
126
|
+
start_timer_thread_and_unlock();
|
127
|
+
} else {
|
128
|
+
// if there's no thread waiting with a timeout, kick an existing waiter
|
129
|
+
// so that the next deadline is not missed
|
130
|
+
if (!g_has_timed_waiter) {
|
131
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
132
|
+
gpr_log(GPR_DEBUG, "kick untimed waiter");
|
133
|
+
}
|
134
|
+
gpr_cv_signal(&g_cv_wait);
|
135
|
+
}
|
136
|
+
gpr_mu_unlock(&g_mu);
|
137
|
+
}
|
138
|
+
// without our lock, flush the exec_ctx
|
139
|
+
grpc_exec_ctx_flush(&exec_ctx);
|
140
|
+
gpr_mu_lock(&g_mu);
|
141
|
+
// garbage collect any threads hanging out that are dead
|
142
|
+
gc_completed_threads();
|
143
|
+
// get ready to wait again
|
144
|
+
++g_waiter_count;
|
145
|
+
gpr_mu_unlock(&g_mu);
|
146
|
+
} else {
|
147
|
+
gpr_mu_lock(&g_mu);
|
148
|
+
// if we're not threaded anymore, leave
|
149
|
+
if (!g_threaded) break;
|
150
|
+
// if there's no timed waiter, we should become one: that waiter waits
|
151
|
+
// only until the next timer should expire
|
152
|
+
// all other timers wait forever
|
153
|
+
uint64_t my_timed_waiter_generation = g_timed_waiter_generation - 1;
|
154
|
+
if (!g_has_timed_waiter) {
|
155
|
+
g_has_timed_waiter = true;
|
156
|
+
// we use a generation counter to track the timed waiter so we can
|
157
|
+
// cancel an existing one quickly (and when it actually times out it'll
|
158
|
+
// figure stuff out instead of incurring a wakeup)
|
159
|
+
my_timed_waiter_generation = ++g_timed_waiter_generation;
|
160
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
161
|
+
gpr_log(GPR_DEBUG, "sleep for a while");
|
162
|
+
}
|
163
|
+
} else {
|
164
|
+
next = inf_future;
|
165
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
166
|
+
gpr_log(GPR_DEBUG, "sleep until kicked");
|
167
|
+
}
|
168
|
+
}
|
169
|
+
gpr_cv_wait(&g_cv_wait, &g_mu, next);
|
170
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
171
|
+
gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
|
172
|
+
my_timed_waiter_generation == g_timed_waiter_generation,
|
173
|
+
g_kicked);
|
174
|
+
}
|
175
|
+
// if this was the timed waiter, then we need to check timers, and flag
|
176
|
+
// that there's now no timed waiter... we'll look for a replacement if
|
177
|
+
// there's work to do after checking timers (code above)
|
178
|
+
if (my_timed_waiter_generation == g_timed_waiter_generation) {
|
179
|
+
g_has_timed_waiter = false;
|
180
|
+
}
|
181
|
+
// if this was a kick from the timer system, consume it (and don't stop
|
182
|
+
// this thread yet)
|
183
|
+
if (g_kicked) {
|
184
|
+
grpc_timer_consume_kick();
|
185
|
+
g_kicked = false;
|
186
|
+
}
|
187
|
+
gpr_mu_unlock(&g_mu);
|
188
|
+
}
|
189
|
+
}
|
190
|
+
// terminate the thread: drop the waiter count, thread count, and let whomever
|
191
|
+
// stopped the threading stuff know that we're done
|
192
|
+
--g_waiter_count;
|
193
|
+
--g_thread_count;
|
194
|
+
if (0 == g_thread_count) {
|
195
|
+
gpr_cv_signal(&g_cv_shutdown);
|
196
|
+
}
|
197
|
+
completed_thread *ct = gpr_malloc(sizeof(*ct));
|
198
|
+
ct->t = gpr_thd_currentid();
|
199
|
+
ct->next = g_completed_threads;
|
200
|
+
g_completed_threads = ct;
|
201
|
+
gpr_mu_unlock(&g_mu);
|
202
|
+
grpc_exec_ctx_finish(&exec_ctx);
|
203
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
204
|
+
gpr_log(GPR_DEBUG, "End timer thread");
|
205
|
+
}
|
206
|
+
}
|
207
|
+
|
208
|
+
static void start_threads(void) {
|
209
|
+
gpr_mu_lock(&g_mu);
|
210
|
+
if (!g_threaded) {
|
211
|
+
g_threaded = true;
|
212
|
+
start_timer_thread_and_unlock();
|
213
|
+
} else {
|
214
|
+
g_threaded = false;
|
215
|
+
gpr_mu_unlock(&g_mu);
|
216
|
+
}
|
217
|
+
}
|
218
|
+
|
219
|
+
void grpc_timer_manager_init(void) {
|
220
|
+
gpr_mu_init(&g_mu);
|
221
|
+
gpr_cv_init(&g_cv_wait);
|
222
|
+
gpr_cv_init(&g_cv_shutdown);
|
223
|
+
g_threaded = false;
|
224
|
+
g_thread_count = 0;
|
225
|
+
g_waiter_count = 0;
|
226
|
+
g_completed_threads = NULL;
|
227
|
+
|
228
|
+
start_threads();
|
229
|
+
}
|
230
|
+
|
231
|
+
static void stop_threads(void) {
|
232
|
+
gpr_mu_lock(&g_mu);
|
233
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
234
|
+
gpr_log(GPR_DEBUG, "stop timer threads: threaded=%d", g_threaded);
|
235
|
+
}
|
236
|
+
if (g_threaded) {
|
237
|
+
g_threaded = false;
|
238
|
+
gpr_cv_broadcast(&g_cv_wait);
|
239
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
240
|
+
gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
|
241
|
+
}
|
242
|
+
while (g_thread_count > 0) {
|
243
|
+
gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
|
244
|
+
if (GRPC_TRACER_ON(grpc_timer_check_trace)) {
|
245
|
+
gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
|
246
|
+
}
|
247
|
+
gc_completed_threads();
|
248
|
+
}
|
249
|
+
}
|
250
|
+
gpr_mu_unlock(&g_mu);
|
251
|
+
}
|
252
|
+
|
253
|
+
void grpc_timer_manager_shutdown(void) {
|
254
|
+
stop_threads();
|
255
|
+
|
256
|
+
gpr_mu_destroy(&g_mu);
|
257
|
+
gpr_cv_destroy(&g_cv_wait);
|
258
|
+
gpr_cv_destroy(&g_cv_shutdown);
|
259
|
+
}
|
260
|
+
|
261
|
+
void grpc_timer_manager_set_threading(bool threaded) {
|
262
|
+
if (threaded) {
|
263
|
+
start_threads();
|
264
|
+
} else {
|
265
|
+
stop_threads();
|
266
|
+
}
|
267
|
+
}
|
268
|
+
|
269
|
+
void grpc_kick_poller(void) {
|
270
|
+
gpr_mu_lock(&g_mu);
|
271
|
+
g_kicked = true;
|
272
|
+
g_has_timed_waiter = false;
|
273
|
+
++g_timed_waiter_generation;
|
274
|
+
gpr_cv_signal(&g_cv_wait);
|
275
|
+
gpr_mu_unlock(&g_mu);
|
276
|
+
}
|