grpc 1.6.7 → 1.7.0.pre1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +579 -77
- data/include/grpc/byte_buffer.h +1 -63
- data/include/grpc/compression.h +27 -5
- data/include/grpc/fork.h +24 -0
- data/include/grpc/grpc.h +12 -6
- data/include/grpc/grpc_security.h +28 -7
- data/include/grpc/impl/codegen/atm.h +1 -0
- data/include/grpc/impl/codegen/byte_buffer.h +86 -0
- data/include/grpc/impl/codegen/compression_types.h +63 -5
- data/include/grpc/impl/codegen/fork.h +48 -0
- data/include/grpc/impl/codegen/grpc_types.h +26 -9
- data/include/grpc/impl/codegen/port_platform.h +11 -4
- data/include/grpc/impl/codegen/slice.h +6 -1
- data/include/grpc/impl/codegen/sync.h +3 -1
- data/include/grpc/impl/codegen/sync_custom.h +36 -0
- data/include/grpc/module.modulemap +75 -3
- data/include/grpc/slice.h +1 -5
- data/include/grpc/support/sync_custom.h +24 -0
- data/src/core/ext/census/base_resources.c +14 -14
- data/src/core/ext/census/context.c +7 -5
- data/src/core/ext/census/grpc_filter.c +12 -14
- data/src/core/ext/census/mlog.c +2 -1
- data/src/core/ext/census/resource.c +13 -9
- data/src/core/ext/filters/client_channel/channel_connectivity.c +15 -8
- data/src/core/ext/filters/client_channel/client_channel.c +418 -439
- data/src/core/ext/filters/client_channel/client_channel_factory.c +4 -5
- data/src/core/ext/filters/client_channel/client_channel_plugin.c +2 -2
- data/src/core/ext/filters/client_channel/http_connect_handshaker.c +7 -5
- data/src/core/ext/filters/client_channel/http_proxy.c +17 -21
- data/src/core/ext/filters/client_channel/lb_policy.c +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +7 -7
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +371 -257
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +7 -5
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +25 -14
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +16 -16
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +33 -28
- data/src/core/ext/filters/client_channel/lb_policy_factory.c +10 -8
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +1 -1
- data/src/core/ext/filters/client_channel/proxy_mapper_registry.c +1 -1
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +7 -6
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +62 -28
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +29 -23
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c +25 -14
- data/src/core/ext/filters/client_channel/retry_throttle.c +9 -6
- data/src/core/ext/filters/client_channel/subchannel.c +30 -30
- data/src/core/ext/filters/client_channel/subchannel.h +1 -4
- data/src/core/ext/filters/client_channel/subchannel_index.c +31 -15
- data/src/core/ext/filters/client_channel/subchannel_index.h +7 -0
- data/src/core/ext/filters/client_channel/uri_parser.c +4 -3
- data/src/core/ext/filters/deadline/deadline_filter.c +78 -39
- data/src/core/ext/filters/deadline/deadline_filter.h +7 -1
- data/src/core/ext/filters/http/client/http_client_filter.c +14 -14
- data/src/core/ext/filters/http/http_filters_plugin.c +1 -1
- data/src/core/ext/filters/http/message_compress/message_compress_filter.c +240 -175
- data/src/core/ext/filters/http/server/http_server_filter.c +48 -36
- data/src/core/ext/filters/load_reporting/{load_reporting_filter.c → server_load_reporting_filter.c} +11 -12
- data/src/core/ext/filters/load_reporting/{load_reporting_filter.h → server_load_reporting_filter.h} +6 -5
- data/src/core/ext/filters/load_reporting/{load_reporting.c → server_load_reporting_plugin.c} +19 -13
- data/src/core/ext/filters/load_reporting/{load_reporting.h → server_load_reporting_plugin.h} +4 -3
- data/src/core/ext/filters/max_age/max_age_filter.c +2 -3
- data/src/core/ext/filters/message_size/message_size_filter.c +4 -2
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +0 -1
- data/src/core/ext/transport/chttp2/client/chttp2_connector.c +5 -5
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
- data/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c +1 -1
- data/src/core/ext/transport/chttp2/server/chttp2_server.c +20 -18
- data/src/core/ext/transport/chttp2/transport/chttp2_plugin.c +1 -0
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +493 -210
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +1 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.c +9 -8
- data/src/core/ext/transport/chttp2/transport/frame_data.c +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_goaway.c +2 -2
- data/src/core/ext/transport/chttp2/transport/frame_ping.c +5 -4
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.c +10 -9
- data/src/core/ext/transport/chttp2/transport/frame_window_update.c +9 -5
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +62 -41
- data/src/core/ext/transport/chttp2/transport/hpack_parser.c +52 -8
- data/src/core/ext/transport/chttp2/transport/hpack_table.c +2 -2
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.c +3 -2
- data/src/core/ext/transport/chttp2/transport/internal.h +60 -30
- data/src/core/ext/transport/chttp2/transport/parsing.c +16 -5
- data/src/core/ext/transport/chttp2/transport/stream_lists.c +36 -16
- data/src/core/ext/transport/chttp2/transport/stream_map.c +6 -4
- data/src/core/ext/transport/chttp2/transport/writing.c +133 -105
- data/src/core/ext/transport/inproc/inproc_transport.c +61 -65
- data/src/core/lib/channel/channel_args.c +112 -12
- data/src/core/lib/channel/channel_args.h +31 -0
- data/src/core/lib/channel/channel_stack.c +1 -15
- data/src/core/lib/channel/channel_stack.h +3 -10
- data/src/core/lib/channel/channel_stack_builder.c +41 -10
- data/src/core/lib/channel/channel_stack_builder.h +10 -0
- data/src/core/lib/channel/connected_channel.c +94 -23
- data/src/core/lib/channel/handshaker.c +8 -6
- data/src/core/lib/channel/handshaker_registry.c +1 -1
- data/src/core/lib/compression/algorithm_metadata.h +14 -0
- data/src/core/lib/compression/compression.c +101 -1
- data/src/core/lib/compression/stream_compression.c +32 -146
- data/src/core/lib/compression/stream_compression.h +28 -4
- data/src/core/lib/compression/stream_compression_gzip.c +228 -0
- data/src/core/lib/{iomgr/ev_epoll_thread_pool_linux.h → compression/stream_compression_gzip.h} +5 -7
- data/src/core/lib/compression/stream_compression_identity.c +94 -0
- data/src/core/lib/{iomgr/ev_epoll_limited_pollers_linux.h → compression/stream_compression_identity.h} +7 -8
- data/src/core/lib/debug/stats.c +174 -0
- data/src/core/lib/debug/stats.h +61 -0
- data/src/core/lib/debug/stats_data.c +687 -0
- data/src/core/lib/debug/stats_data.h +470 -0
- data/src/core/lib/debug/trace.c +3 -3
- data/src/core/lib/debug/trace.h +1 -1
- data/src/core/lib/http/format_request.c +1 -1
- data/src/core/lib/http/httpcli.c +8 -7
- data/src/core/lib/http/httpcli_security_connector.c +2 -1
- data/src/core/lib/http/parser.c +4 -3
- data/src/core/lib/iomgr/call_combiner.c +202 -0
- data/src/core/lib/iomgr/call_combiner.h +121 -0
- data/src/core/lib/iomgr/closure.c +18 -4
- data/src/core/lib/iomgr/combiner.c +11 -4
- data/src/core/lib/iomgr/error.c +26 -24
- data/src/core/lib/iomgr/ev_epoll1_linux.c +395 -212
- data/src/core/lib/iomgr/ev_epollex_linux.c +141 -128
- data/src/core/lib/iomgr/ev_epollsig_linux.c +44 -41
- data/src/core/lib/iomgr/ev_poll_posix.c +99 -75
- data/src/core/lib/iomgr/ev_posix.c +5 -9
- data/src/core/lib/iomgr/ev_posix.h +1 -1
- data/src/core/lib/iomgr/exec_ctx.h +6 -1
- data/src/core/lib/iomgr/executor.c +142 -36
- data/src/core/lib/iomgr/executor.h +6 -1
- data/src/core/lib/iomgr/fork_posix.c +88 -0
- data/src/core/lib/iomgr/fork_windows.c +39 -0
- data/src/core/lib/iomgr/iocp_windows.c +2 -0
- data/src/core/lib/iomgr/iomgr.c +2 -8
- data/src/core/lib/iomgr/is_epollexclusive_available.c +6 -6
- data/src/core/lib/iomgr/load_file.c +2 -1
- data/src/core/lib/iomgr/polling_entity.c +9 -9
- data/src/core/lib/iomgr/polling_entity.h +7 -1
- data/src/core/lib/iomgr/pollset.h +1 -1
- data/src/core/lib/iomgr/pollset_uv.c +1 -1
- data/src/core/lib/iomgr/pollset_windows.c +3 -3
- data/src/core/lib/iomgr/port.h +4 -0
- data/src/core/lib/iomgr/resolve_address_posix.c +8 -7
- data/src/core/lib/iomgr/resolve_address_windows.c +1 -1
- data/src/core/lib/iomgr/resource_quota.c +24 -19
- data/src/core/lib/iomgr/socket_factory_posix.c +4 -4
- data/src/core/lib/iomgr/socket_mutator.c +4 -4
- data/src/core/lib/iomgr/socket_utils_windows.c +0 -4
- data/src/core/lib/iomgr/tcp_client_posix.c +5 -4
- data/src/core/lib/iomgr/tcp_posix.c +181 -20
- data/src/core/lib/iomgr/tcp_server_posix.c +8 -7
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.c +1 -1
- data/src/core/lib/iomgr/timer.h +4 -0
- data/src/core/lib/iomgr/timer_generic.c +138 -3
- data/src/core/lib/iomgr/timer_generic.h +3 -0
- data/src/core/lib/iomgr/timer_heap.c +4 -4
- data/src/core/lib/iomgr/timer_manager.c +2 -2
- data/src/core/lib/iomgr/timer_uv.c +2 -0
- data/src/core/lib/iomgr/udp_server.c +10 -8
- data/src/core/lib/iomgr/unix_sockets_posix.c +4 -2
- data/src/core/lib/iomgr/wakeup_fd_cv.c +9 -8
- data/src/core/lib/iomgr/wakeup_fd_cv.h +2 -2
- data/src/core/lib/json/json.c +1 -1
- data/src/core/lib/json/json_string.c +13 -13
- data/src/core/lib/profiling/timers.h +18 -8
- data/src/core/lib/security/credentials/composite/composite_credentials.c +4 -10
- data/src/core/lib/security/credentials/google_default/google_default_credentials.c +2 -1
- data/src/core/lib/security/credentials/jwt/jwt_verifier.c +11 -6
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +4 -4
- data/src/core/lib/security/credentials/plugin/plugin_credentials.c +132 -50
- data/src/core/lib/security/credentials/plugin/plugin_credentials.h +2 -0
- data/src/core/lib/security/transport/client_auth_filter.c +68 -135
- data/src/core/lib/security/transport/secure_endpoint.c +110 -90
- data/src/core/lib/security/transport/secure_endpoint.h +8 -3
- data/src/core/lib/security/transport/security_connector.c +10 -12
- data/src/core/lib/security/transport/security_handshaker.c +45 -24
- data/src/core/lib/security/transport/server_auth_filter.c +71 -20
- data/src/core/lib/slice/b64.c +2 -2
- data/src/core/lib/slice/slice.c +16 -14
- data/src/core/lib/slice/slice_buffer.c +5 -4
- data/src/core/lib/slice/slice_hash_table.c +3 -2
- data/src/core/lib/slice/slice_intern.c +8 -5
- data/src/core/lib/support/block_annotate.h +22 -0
- data/src/core/lib/support/fork.c +62 -0
- data/src/core/lib/support/fork.h +35 -0
- data/src/core/lib/support/log_linux.c +1 -1
- data/src/core/lib/support/string.c +15 -1
- data/src/core/lib/support/string.h +3 -0
- data/src/core/lib/support/thd_internal.h +6 -0
- data/src/core/lib/support/thd_posix.c +56 -0
- data/src/core/lib/support/thd_windows.c +2 -0
- data/src/core/lib/surface/alarm.c +22 -15
- data/src/core/lib/surface/byte_buffer.c +4 -2
- data/src/core/lib/surface/call.c +442 -141
- data/src/core/lib/surface/call.h +6 -6
- data/src/core/lib/surface/call_log_batch.c +1 -1
- data/src/core/lib/surface/call_test_only.h +12 -0
- data/src/core/lib/surface/channel.c +39 -4
- data/src/core/lib/surface/channel_init.c +6 -6
- data/src/core/lib/surface/channel_ping.c +2 -2
- data/src/core/lib/surface/completion_queue.c +56 -57
- data/src/core/lib/surface/init.c +17 -3
- data/src/core/lib/surface/init_secure.c +5 -1
- data/src/core/lib/surface/lame_client.cc +9 -10
- data/src/core/lib/surface/server.c +81 -72
- data/src/core/lib/surface/version.c +2 -2
- data/src/core/lib/transport/byte_stream.c +1 -0
- data/src/core/lib/transport/byte_stream.h +3 -1
- data/src/core/lib/transport/connectivity_state.c +2 -1
- data/src/core/lib/transport/metadata.c +7 -4
- data/src/core/lib/transport/metadata_batch.c +18 -16
- data/src/core/lib/transport/metadata_batch.h +1 -0
- data/src/core/lib/transport/service_config.c +5 -3
- data/src/core/lib/transport/static_metadata.c +395 -614
- data/src/core/lib/transport/static_metadata.h +165 -133
- data/src/core/lib/transport/status_conversion.c +1 -1
- data/src/core/lib/transport/transport.c +20 -20
- data/src/core/lib/transport/transport.h +8 -5
- data/src/core/lib/transport/transport_impl.h +0 -3
- data/src/core/lib/transport/transport_op_string.c +8 -1
- data/src/core/plugin_registry/grpc_plugin_registry.c +4 -4
- data/src/core/tsi/fake_transport_security.c +133 -2
- data/src/core/tsi/fake_transport_security.h +5 -0
- data/src/core/tsi/ssl_transport_security.c +105 -8
- data/src/core/tsi/ssl_transport_security.h +30 -7
- data/src/core/tsi/transport_security.h +8 -2
- data/src/core/tsi/transport_security_grpc.c +20 -13
- data/src/core/tsi/transport_security_grpc.h +13 -9
- data/src/ruby/ext/grpc/rb_call_credentials.c +6 -2
- data/src/ruby/ext/grpc/rb_grpc.c +1 -1
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +30 -20
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +50 -35
- data/src/ruby/lib/grpc.rb +1 -0
- data/src/ruby/lib/grpc/generic/active_call.rb +34 -9
- data/src/ruby/lib/grpc/generic/bidi_call.rb +19 -10
- data/src/ruby/lib/grpc/generic/client_stub.rb +95 -38
- data/src/ruby/lib/grpc/generic/interceptor_registry.rb +53 -0
- data/src/ruby/lib/grpc/generic/interceptors.rb +186 -0
- data/src/ruby/lib/grpc/generic/rpc_desc.rb +66 -20
- data/src/ruby/lib/grpc/generic/rpc_server.rb +15 -3
- data/src/ruby/lib/grpc/google_rpc_status_utils.rb +1 -2
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb +1 -0
- data/src/ruby/spec/channel_connection_spec.rb +1 -34
- data/src/ruby/spec/client_server_spec.rb +188 -82
- data/src/ruby/spec/generic/active_call_spec.rb +65 -11
- data/src/ruby/spec/generic/client_interceptors_spec.rb +153 -0
- data/src/ruby/spec/generic/interceptor_registry_spec.rb +65 -0
- data/src/ruby/spec/generic/rpc_desc_spec.rb +38 -0
- data/src/ruby/spec/generic/rpc_server_spec.rb +1 -34
- data/src/ruby/spec/generic/server_interceptors_spec.rb +218 -0
- data/src/ruby/spec/spec_helper.rb +4 -0
- data/src/ruby/spec/support/helpers.rb +73 -0
- data/src/ruby/spec/support/services.rb +147 -0
- data/third_party/cares/ares_build.h +21 -62
- data/third_party/cares/cares/ares.h +23 -1
- data/third_party/cares/cares/ares__close_sockets.c +2 -2
- data/third_party/cares/cares/ares_create_query.c +3 -3
- data/third_party/cares/cares/ares_expand_name.c +6 -2
- data/third_party/cares/cares/ares_expand_string.c +1 -1
- data/third_party/cares/cares/ares_getnameinfo.c +27 -7
- data/third_party/cares/cares/ares_init.c +407 -39
- data/third_party/cares/cares/ares_library_init.c +10 -0
- data/third_party/cares/cares/ares_library_init.h +2 -1
- data/third_party/cares/cares/ares_nowarn.c +6 -6
- data/third_party/cares/cares/ares_nowarn.h +2 -2
- data/third_party/cares/cares/ares_parse_naptr_reply.c +6 -1
- data/third_party/cares/cares/ares_private.h +11 -0
- data/third_party/cares/cares/ares_process.c +126 -37
- data/third_party/cares/cares/ares_version.h +2 -2
- data/third_party/cares/cares/ares_writev.c +2 -2
- data/third_party/cares/cares/config-win32.h +8 -34
- data/third_party/cares/cares/inet_net_pton.c +2 -2
- data/third_party/cares/cares/setup_once.h +5 -5
- data/third_party/cares/config_darwin/ares_config.h +98 -196
- data/third_party/cares/config_linux/ares_config.h +103 -203
- metadata +47 -20
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +0 -1957
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +0 -1182
@@ -109,7 +109,7 @@ typedef struct {
|
|
109
109
|
|
110
110
|
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
|
111
111
|
grpc_error *error) {
|
112
|
-
wrapped_closure *wc = arg;
|
112
|
+
wrapped_closure *wc = (wrapped_closure *)arg;
|
113
113
|
grpc_iomgr_cb_func cb = wc->cb;
|
114
114
|
void *cb_arg = wc->cb_arg;
|
115
115
|
gpr_free(wc);
|
@@ -124,7 +124,7 @@ grpc_closure *grpc_closure_create(const char *file, int line,
|
|
124
124
|
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
|
125
125
|
grpc_closure_scheduler *scheduler) {
|
126
126
|
#endif
|
127
|
-
wrapped_closure *wc = gpr_malloc(sizeof(*wc));
|
127
|
+
wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc));
|
128
128
|
wc->cb = cb;
|
129
129
|
wc->cb_arg = cb_arg;
|
130
130
|
#ifndef NDEBUG
|
@@ -167,7 +167,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
|
|
167
167
|
GPR_TIMER_BEGIN("grpc_closure_sched", 0);
|
168
168
|
if (c != NULL) {
|
169
169
|
#ifndef NDEBUG
|
170
|
-
|
170
|
+
if (c->scheduled) {
|
171
|
+
gpr_log(GPR_ERROR,
|
172
|
+
"Closure already scheduled. (closure: %p, created: [%s:%d], "
|
173
|
+
"previously scheduled at: [%s: %d] run?: %s",
|
174
|
+
c, c->file_created, c->line_created, c->file_initiated,
|
175
|
+
c->line_initiated, c->run ? "true" : "false");
|
176
|
+
abort();
|
177
|
+
}
|
171
178
|
c->scheduled = true;
|
172
179
|
c->file_initiated = file;
|
173
180
|
c->line_initiated = line;
|
@@ -191,7 +198,14 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
|
|
191
198
|
while (c != NULL) {
|
192
199
|
grpc_closure *next = c->next_data.next;
|
193
200
|
#ifndef NDEBUG
|
194
|
-
|
201
|
+
if (c->scheduled) {
|
202
|
+
gpr_log(GPR_ERROR,
|
203
|
+
"Closure already scheduled. (closure: %p, created: [%s:%d], "
|
204
|
+
"previously scheduled at: [%s: %d] run?: %s",
|
205
|
+
c, c->file_created, c->line_created, c->file_initiated,
|
206
|
+
c->line_initiated, c->run ? "true" : "false");
|
207
|
+
abort();
|
208
|
+
}
|
195
209
|
c->scheduled = true;
|
196
210
|
c->file_initiated = file;
|
197
211
|
c->line_initiated = line;
|
@@ -24,6 +24,7 @@
|
|
24
24
|
#include <grpc/support/alloc.h>
|
25
25
|
#include <grpc/support/log.h>
|
26
26
|
|
27
|
+
#include "src/core/lib/debug/stats.h"
|
27
28
|
#include "src/core/lib/iomgr/executor.h"
|
28
29
|
#include "src/core/lib/profiling/timers.h"
|
29
30
|
|
@@ -73,14 +74,15 @@ static const grpc_closure_scheduler_vtable finally_scheduler = {
|
|
73
74
|
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
|
74
75
|
|
75
76
|
grpc_combiner *grpc_combiner_create(void) {
|
76
|
-
grpc_combiner *lock = gpr_zalloc(sizeof(*lock));
|
77
|
+
grpc_combiner *lock = (grpc_combiner *)gpr_zalloc(sizeof(*lock));
|
77
78
|
gpr_ref_init(&lock->refs, 1);
|
78
79
|
lock->scheduler.vtable = &scheduler;
|
79
80
|
lock->finally_scheduler.vtable = &finally_scheduler;
|
80
81
|
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
|
81
82
|
gpr_mpscq_init(&lock->queue);
|
82
83
|
grpc_closure_list_init(&lock->final_list);
|
83
|
-
GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
|
84
|
+
GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
|
85
|
+
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
|
84
86
|
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
|
85
87
|
return lock;
|
86
88
|
}
|
@@ -153,6 +155,7 @@ static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
|
|
153
155
|
|
154
156
|
static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
|
155
157
|
grpc_error *error) {
|
158
|
+
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
|
156
159
|
GPR_TIMER_BEGIN("combiner.execute", 0);
|
157
160
|
grpc_combiner *lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
|
158
161
|
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
|
@@ -160,6 +163,7 @@ static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
|
|
160
163
|
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
|
161
164
|
lock, cl, last));
|
162
165
|
if (last == 1) {
|
166
|
+
GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx);
|
163
167
|
gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null,
|
164
168
|
(gpr_atm)exec_ctx);
|
165
169
|
// first element on this list: add it to the list of combiner locks
|
@@ -190,11 +194,12 @@ static void move_next(grpc_exec_ctx *exec_ctx) {
|
|
190
194
|
}
|
191
195
|
|
192
196
|
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
|
193
|
-
grpc_combiner *lock = arg;
|
197
|
+
grpc_combiner *lock = (grpc_combiner *)arg;
|
194
198
|
push_last_on_exec_ctx(exec_ctx, lock);
|
195
199
|
}
|
196
200
|
|
197
201
|
static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
|
202
|
+
GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx);
|
198
203
|
move_next(exec_ctx);
|
199
204
|
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
|
200
205
|
GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
|
@@ -325,6 +330,7 @@ static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
|
|
325
330
|
|
326
331
|
static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
|
327
332
|
grpc_closure *closure, grpc_error *error) {
|
333
|
+
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx);
|
328
334
|
grpc_combiner *lock =
|
329
335
|
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
|
330
336
|
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
|
@@ -350,7 +356,8 @@ static void combiner_finally_exec(grpc_exec_ctx *exec_ctx,
|
|
350
356
|
|
351
357
|
static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
|
352
358
|
grpc_error *error) {
|
353
|
-
combiner_finally_exec(exec_ctx, closure,
|
359
|
+
combiner_finally_exec(exec_ctx, (grpc_closure *)closure,
|
360
|
+
GRPC_ERROR_REF(error));
|
354
361
|
}
|
355
362
|
|
356
363
|
grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner) {
|
data/src/core/lib/iomgr/error.c
CHANGED
@@ -211,7 +211,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
|
|
211
211
|
#ifndef NDEBUG
|
212
212
|
grpc_error *orig = *err;
|
213
213
|
#endif
|
214
|
-
*err = gpr_realloc(
|
214
|
+
*err = (grpc_error *)gpr_realloc(
|
215
215
|
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
|
216
216
|
#ifndef NDEBUG
|
217
217
|
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
|
@@ -278,13 +278,13 @@ static void internal_set_time(grpc_error **err, grpc_error_times which,
|
|
278
278
|
memcpy((*err)->arena + slot, &value, sizeof(value));
|
279
279
|
}
|
280
280
|
|
281
|
-
static void internal_add_error(grpc_error **err, grpc_error *
|
282
|
-
grpc_linked_error new_last = {
|
281
|
+
static void internal_add_error(grpc_error **err, grpc_error *new_err) {
|
282
|
+
grpc_linked_error new_last = {new_err, UINT8_MAX};
|
283
283
|
uint8_t slot = get_placement(err, sizeof(grpc_linked_error));
|
284
284
|
if (slot == UINT8_MAX) {
|
285
|
-
gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err,
|
286
|
-
grpc_error_string(
|
287
|
-
GRPC_ERROR_UNREF(
|
285
|
+
gpr_log(GPR_ERROR, "Error %p is full, dropping error %p = %s", *err,
|
286
|
+
new_err, grpc_error_string(new_err));
|
287
|
+
GRPC_ERROR_UNREF(new_err);
|
288
288
|
return;
|
289
289
|
}
|
290
290
|
if ((*err)->first_err == UINT8_MAX) {
|
@@ -321,8 +321,8 @@ grpc_error *grpc_error_create(const char *file, int line, grpc_slice desc,
|
|
321
321
|
uint8_t initial_arena_capacity = (uint8_t)(
|
322
322
|
DEFAULT_ERROR_CAPACITY +
|
323
323
|
(uint8_t)(num_referencing * SLOTS_PER_LINKED_ERROR) + SURPLUS_CAPACITY);
|
324
|
-
grpc_error *err =
|
325
|
-
|
324
|
+
grpc_error *err = (grpc_error *)gpr_malloc(
|
325
|
+
sizeof(*err) + initial_arena_capacity * sizeof(intptr_t));
|
326
326
|
if (err == NULL) { // TODO(ctiller): make gpr_malloc return NULL
|
327
327
|
return GRPC_ERROR_OOM;
|
328
328
|
}
|
@@ -406,7 +406,8 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
|
|
406
406
|
if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
|
407
407
|
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
|
408
408
|
}
|
409
|
-
out = gpr_malloc(sizeof(*in) +
|
409
|
+
out = (grpc_error *)gpr_malloc(sizeof(*in) +
|
410
|
+
new_arena_capacity * sizeof(intptr_t));
|
410
411
|
#ifndef NDEBUG
|
411
412
|
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
|
412
413
|
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
|
@@ -431,10 +432,10 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
|
|
431
432
|
grpc_error *grpc_error_set_int(grpc_error *src, grpc_error_ints which,
|
432
433
|
intptr_t value) {
|
433
434
|
GPR_TIMER_BEGIN("grpc_error_set_int", 0);
|
434
|
-
grpc_error *
|
435
|
-
internal_set_int(&
|
435
|
+
grpc_error *new_err = copy_error_and_unref(src);
|
436
|
+
internal_set_int(&new_err, which, value);
|
436
437
|
GPR_TIMER_END("grpc_error_set_int", 0);
|
437
|
-
return
|
438
|
+
return new_err;
|
438
439
|
}
|
439
440
|
|
440
441
|
typedef struct {
|
@@ -476,10 +477,10 @@ bool grpc_error_get_int(grpc_error *err, grpc_error_ints which, intptr_t *p) {
|
|
476
477
|
grpc_error *grpc_error_set_str(grpc_error *src, grpc_error_strs which,
|
477
478
|
grpc_slice str) {
|
478
479
|
GPR_TIMER_BEGIN("grpc_error_set_str", 0);
|
479
|
-
grpc_error *
|
480
|
-
internal_set_str(&
|
480
|
+
grpc_error *new_err = copy_error_and_unref(src);
|
481
|
+
internal_set_str(&new_err, which, str);
|
481
482
|
GPR_TIMER_END("grpc_error_set_str", 0);
|
482
|
-
return
|
483
|
+
return new_err;
|
483
484
|
}
|
484
485
|
|
485
486
|
bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
|
@@ -506,10 +507,10 @@ bool grpc_error_get_str(grpc_error *err, grpc_error_strs which,
|
|
506
507
|
|
507
508
|
grpc_error *grpc_error_add_child(grpc_error *src, grpc_error *child) {
|
508
509
|
GPR_TIMER_BEGIN("grpc_error_add_child", 0);
|
509
|
-
grpc_error *
|
510
|
-
internal_add_error(&
|
510
|
+
grpc_error *new_err = copy_error_and_unref(src);
|
511
|
+
internal_add_error(&new_err, child);
|
511
512
|
GPR_TIMER_END("grpc_error_add_child", 0);
|
512
|
-
return
|
513
|
+
return new_err;
|
513
514
|
}
|
514
515
|
|
515
516
|
static const char *no_error_string = "\"No Error\"";
|
@@ -530,7 +531,7 @@ typedef struct {
|
|
530
531
|
static void append_chr(char c, char **s, size_t *sz, size_t *cap) {
|
531
532
|
if (*sz == *cap) {
|
532
533
|
*cap = GPR_MAX(8, 3 * *cap / 2);
|
533
|
-
*s = gpr_realloc(*s, *cap);
|
534
|
+
*s = (char *)gpr_realloc(*s, *cap);
|
534
535
|
}
|
535
536
|
(*s)[(*sz)++] = c;
|
536
537
|
}
|
@@ -582,7 +583,8 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
|
|
582
583
|
static void append_kv(kv_pairs *kvs, char *key, char *value) {
|
583
584
|
if (kvs->num_kvs == kvs->cap_kvs) {
|
584
585
|
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
|
585
|
-
kvs->kvs =
|
586
|
+
kvs->kvs =
|
587
|
+
(kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
|
586
588
|
}
|
587
589
|
kvs->kvs[kvs->num_kvs].key = key;
|
588
590
|
kvs->kvs[kvs->num_kvs].value = value;
|
@@ -639,7 +641,7 @@ static char *key_time(grpc_error_times which) {
|
|
639
641
|
|
640
642
|
static char *fmt_time(gpr_timespec tm) {
|
641
643
|
char *out;
|
642
|
-
char *pfx = "!!";
|
644
|
+
const char *pfx = "!!";
|
643
645
|
switch (tm.clock_type) {
|
644
646
|
case GPR_CLOCK_MONOTONIC:
|
645
647
|
pfx = "@monotonic:";
|
@@ -695,8 +697,8 @@ static char *errs_string(grpc_error *err) {
|
|
695
697
|
}
|
696
698
|
|
697
699
|
static int cmp_kvs(const void *a, const void *b) {
|
698
|
-
const kv_pair *ka = a;
|
699
|
-
const kv_pair *kb = b;
|
700
|
+
const kv_pair *ka = (const kv_pair *)a;
|
701
|
+
const kv_pair *kb = (const kv_pair *)b;
|
700
702
|
return strcmp(ka->key, kb->key);
|
701
703
|
}
|
702
704
|
|
@@ -731,7 +733,7 @@ const char *grpc_error_string(grpc_error *err) {
|
|
731
733
|
void *p = (void *)gpr_atm_acq_load(&err->atomics.error_string);
|
732
734
|
if (p != NULL) {
|
733
735
|
GPR_TIMER_END("grpc_error_string", 0);
|
734
|
-
return p;
|
736
|
+
return (const char *)p;
|
735
737
|
}
|
736
738
|
|
737
739
|
kv_pairs kvs;
|
@@ -39,6 +39,7 @@
|
|
39
39
|
#include <grpc/support/tls.h>
|
40
40
|
#include <grpc/support/useful.h>
|
41
41
|
|
42
|
+
#include "src/core/lib/debug/stats.h"
|
42
43
|
#include "src/core/lib/iomgr/ev_posix.h"
|
43
44
|
#include "src/core/lib/iomgr/iomgr_internal.h"
|
44
45
|
#include "src/core/lib/iomgr/lockfree_event.h"
|
@@ -48,7 +49,60 @@
|
|
48
49
|
#include "src/core/lib/support/string.h"
|
49
50
|
|
50
51
|
static grpc_wakeup_fd global_wakeup_fd;
|
51
|
-
|
52
|
+
|
53
|
+
/*******************************************************************************
|
54
|
+
* Singleton epoll set related fields
|
55
|
+
*/
|
56
|
+
|
57
|
+
#define MAX_EPOLL_EVENTS 100
|
58
|
+
#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
|
59
|
+
|
60
|
+
/* NOTE ON SYNCHRONIZATION:
|
61
|
+
* - Fields in this struct are only modified by the designated poller. Hence
|
62
|
+
* there is no need for any locks to protect the struct.
|
63
|
+
* - num_events and cursor fields have to be of atomic type to provide memory
|
64
|
+
* visibility guarantees only. i.e In case of multiple pollers, the designated
|
65
|
+
* polling thread keeps changing; the thread that wrote these values may be
|
66
|
+
* different from the thread reading the values
|
67
|
+
*/
|
68
|
+
typedef struct epoll_set {
|
69
|
+
int epfd;
|
70
|
+
|
71
|
+
/* The epoll_events after the last call to epoll_wait() */
|
72
|
+
struct epoll_event events[MAX_EPOLL_EVENTS];
|
73
|
+
|
74
|
+
/* The number of epoll_events after the last call to epoll_wait() */
|
75
|
+
gpr_atm num_events;
|
76
|
+
|
77
|
+
/* Index of the first event in epoll_events that has to be processed. This
|
78
|
+
* field is only valid if num_events > 0 */
|
79
|
+
gpr_atm cursor;
|
80
|
+
} epoll_set;
|
81
|
+
|
82
|
+
/* The global singleton epoll set */
|
83
|
+
static epoll_set g_epoll_set;
|
84
|
+
|
85
|
+
/* Must be called *only* once */
|
86
|
+
static bool epoll_set_init() {
|
87
|
+
g_epoll_set.epfd = epoll_create1(EPOLL_CLOEXEC);
|
88
|
+
if (g_epoll_set.epfd < 0) {
|
89
|
+
gpr_log(GPR_ERROR, "epoll unavailable");
|
90
|
+
return false;
|
91
|
+
}
|
92
|
+
|
93
|
+
gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
|
94
|
+
gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
|
95
|
+
gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
|
96
|
+
return true;
|
97
|
+
}
|
98
|
+
|
99
|
+
/* epoll_set_init() MUST be called before calling this. */
|
100
|
+
static void epoll_set_shutdown() {
|
101
|
+
if (g_epoll_set.epfd >= 0) {
|
102
|
+
close(g_epoll_set.epfd);
|
103
|
+
g_epoll_set.epfd = -1;
|
104
|
+
}
|
105
|
+
}
|
52
106
|
|
53
107
|
/*******************************************************************************
|
54
108
|
* Fd Declarations
|
@@ -91,7 +145,7 @@ static const char *kick_state_string(kick_state st) {
|
|
91
145
|
}
|
92
146
|
|
93
147
|
struct grpc_pollset_worker {
|
94
|
-
kick_state
|
148
|
+
kick_state state;
|
95
149
|
int kick_state_mutator; // which line of code last changed kick state
|
96
150
|
bool initialized_cv;
|
97
151
|
grpc_pollset_worker *next;
|
@@ -100,29 +154,29 @@ struct grpc_pollset_worker {
|
|
100
154
|
grpc_closure_list schedule_on_end_work;
|
101
155
|
};
|
102
156
|
|
103
|
-
#define SET_KICK_STATE(worker,
|
157
|
+
#define SET_KICK_STATE(worker, kick_state) \
|
104
158
|
do { \
|
105
|
-
(worker)->
|
159
|
+
(worker)->state = (kick_state); \
|
106
160
|
(worker)->kick_state_mutator = __LINE__; \
|
107
161
|
} while (false)
|
108
162
|
|
109
|
-
#define
|
163
|
+
#define MAX_NEIGHBORHOODS 1024
|
110
164
|
|
111
|
-
typedef struct
|
165
|
+
typedef struct pollset_neighborhood {
|
112
166
|
gpr_mu mu;
|
113
167
|
grpc_pollset *active_root;
|
114
168
|
char pad[GPR_CACHELINE_SIZE];
|
115
|
-
}
|
169
|
+
} pollset_neighborhood;
|
116
170
|
|
117
171
|
struct grpc_pollset {
|
118
172
|
gpr_mu mu;
|
119
|
-
|
120
|
-
bool
|
173
|
+
pollset_neighborhood *neighborhood;
|
174
|
+
bool reassigning_neighborhood;
|
121
175
|
grpc_pollset_worker *root_worker;
|
122
176
|
bool kicked_without_poller;
|
123
177
|
|
124
178
|
/* Set to true if the pollset is observed to have no workers available to
|
125
|
-
|
179
|
+
poll */
|
126
180
|
bool seen_inactive;
|
127
181
|
bool shutting_down; /* Is the pollset shutting down ? */
|
128
182
|
grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
|
@@ -206,7 +260,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
|
|
206
260
|
gpr_mu_unlock(&fd_freelist_mu);
|
207
261
|
|
208
262
|
if (new_fd == NULL) {
|
209
|
-
new_fd = gpr_malloc(sizeof(grpc_fd));
|
263
|
+
new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
|
210
264
|
}
|
211
265
|
|
212
266
|
new_fd->fd = fd;
|
@@ -226,9 +280,10 @@ static grpc_fd *fd_create(int fd, const char *name) {
|
|
226
280
|
#endif
|
227
281
|
gpr_free(fd_name);
|
228
282
|
|
229
|
-
struct epoll_event ev
|
230
|
-
|
231
|
-
|
283
|
+
struct epoll_event ev;
|
284
|
+
ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
|
285
|
+
ev.data.ptr = new_fd;
|
286
|
+
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
|
232
287
|
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
|
233
288
|
}
|
234
289
|
|
@@ -326,9 +381,12 @@ static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
|
|
326
381
|
|
327
382
|
GPR_TLS_DECL(g_current_thread_pollset);
|
328
383
|
GPR_TLS_DECL(g_current_thread_worker);
|
384
|
+
|
385
|
+
/* The designated poller */
|
329
386
|
static gpr_atm g_active_poller;
|
330
|
-
|
331
|
-
static
|
387
|
+
|
388
|
+
static pollset_neighborhood *g_neighborhoods;
|
389
|
+
static size_t g_num_neighborhoods;
|
332
390
|
|
333
391
|
/* Return true if first in list */
|
334
392
|
static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
|
@@ -367,8 +425,8 @@ static worker_remove_result worker_remove(grpc_pollset *pollset,
|
|
367
425
|
}
|
368
426
|
}
|
369
427
|
|
370
|
-
static size_t
|
371
|
-
return (size_t)gpr_cpu_current_cpu() %
|
428
|
+
static size_t choose_neighborhood(void) {
|
429
|
+
return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
|
372
430
|
}
|
373
431
|
|
374
432
|
static grpc_error *pollset_global_init(void) {
|
@@ -378,16 +436,18 @@ static grpc_error *pollset_global_init(void) {
|
|
378
436
|
global_wakeup_fd.read_fd = -1;
|
379
437
|
grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
|
380
438
|
if (err != GRPC_ERROR_NONE) return err;
|
381
|
-
struct epoll_event ev
|
382
|
-
|
383
|
-
|
439
|
+
struct epoll_event ev;
|
440
|
+
ev.events = (uint32_t)(EPOLLIN | EPOLLET);
|
441
|
+
ev.data.ptr = &global_wakeup_fd;
|
442
|
+
if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
|
443
|
+
&ev) != 0) {
|
384
444
|
return GRPC_OS_ERROR(errno, "epoll_ctl");
|
385
445
|
}
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
for (size_t i = 0; i <
|
390
|
-
gpr_mu_init(&
|
446
|
+
g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
|
447
|
+
g_neighborhoods = (pollset_neighborhood *)gpr_zalloc(
|
448
|
+
sizeof(*g_neighborhoods) * g_num_neighborhoods);
|
449
|
+
for (size_t i = 0; i < g_num_neighborhoods; i++) {
|
450
|
+
gpr_mu_init(&g_neighborhoods[i].mu);
|
391
451
|
}
|
392
452
|
return GRPC_ERROR_NONE;
|
393
453
|
}
|
@@ -396,62 +456,75 @@ static void pollset_global_shutdown(void) {
|
|
396
456
|
gpr_tls_destroy(&g_current_thread_pollset);
|
397
457
|
gpr_tls_destroy(&g_current_thread_worker);
|
398
458
|
if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
|
399
|
-
for (size_t i = 0; i <
|
400
|
-
gpr_mu_destroy(&
|
459
|
+
for (size_t i = 0; i < g_num_neighborhoods; i++) {
|
460
|
+
gpr_mu_destroy(&g_neighborhoods[i].mu);
|
401
461
|
}
|
402
|
-
gpr_free(
|
462
|
+
gpr_free(g_neighborhoods);
|
403
463
|
}
|
404
464
|
|
405
465
|
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
|
406
466
|
gpr_mu_init(&pollset->mu);
|
407
467
|
*mu = &pollset->mu;
|
408
|
-
pollset->
|
468
|
+
pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
|
469
|
+
pollset->reassigning_neighborhood = false;
|
470
|
+
pollset->root_worker = NULL;
|
471
|
+
pollset->kicked_without_poller = false;
|
409
472
|
pollset->seen_inactive = true;
|
473
|
+
pollset->shutting_down = false;
|
474
|
+
pollset->shutdown_closure = NULL;
|
475
|
+
pollset->begin_refs = 0;
|
476
|
+
pollset->next = pollset->prev = NULL;
|
410
477
|
}
|
411
478
|
|
412
479
|
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
|
413
480
|
gpr_mu_lock(&pollset->mu);
|
414
481
|
if (!pollset->seen_inactive) {
|
415
|
-
|
482
|
+
pollset_neighborhood *neighborhood = pollset->neighborhood;
|
416
483
|
gpr_mu_unlock(&pollset->mu);
|
417
|
-
|
418
|
-
gpr_mu_lock(&
|
484
|
+
retry_lock_neighborhood:
|
485
|
+
gpr_mu_lock(&neighborhood->mu);
|
419
486
|
gpr_mu_lock(&pollset->mu);
|
420
487
|
if (!pollset->seen_inactive) {
|
421
|
-
if (pollset->
|
422
|
-
gpr_mu_unlock(&
|
423
|
-
|
488
|
+
if (pollset->neighborhood != neighborhood) {
|
489
|
+
gpr_mu_unlock(&neighborhood->mu);
|
490
|
+
neighborhood = pollset->neighborhood;
|
424
491
|
gpr_mu_unlock(&pollset->mu);
|
425
|
-
goto
|
492
|
+
goto retry_lock_neighborhood;
|
426
493
|
}
|
427
494
|
pollset->prev->next = pollset->next;
|
428
495
|
pollset->next->prev = pollset->prev;
|
429
|
-
if (pollset == pollset->
|
430
|
-
pollset->
|
496
|
+
if (pollset == pollset->neighborhood->active_root) {
|
497
|
+
pollset->neighborhood->active_root =
|
431
498
|
pollset->next == pollset ? NULL : pollset->next;
|
432
499
|
}
|
433
500
|
}
|
434
|
-
gpr_mu_unlock(&pollset->
|
501
|
+
gpr_mu_unlock(&pollset->neighborhood->mu);
|
435
502
|
}
|
436
503
|
gpr_mu_unlock(&pollset->mu);
|
437
504
|
gpr_mu_destroy(&pollset->mu);
|
438
505
|
}
|
439
506
|
|
440
|
-
static grpc_error *pollset_kick_all(
|
507
|
+
static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
|
508
|
+
grpc_pollset *pollset) {
|
509
|
+
GPR_TIMER_BEGIN("pollset_kick_all", 0);
|
441
510
|
grpc_error *error = GRPC_ERROR_NONE;
|
442
511
|
if (pollset->root_worker != NULL) {
|
443
512
|
grpc_pollset_worker *worker = pollset->root_worker;
|
444
513
|
do {
|
445
|
-
|
514
|
+
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
|
515
|
+
switch (worker->state) {
|
446
516
|
case KICKED:
|
517
|
+
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
|
447
518
|
break;
|
448
519
|
case UNKICKED:
|
449
520
|
SET_KICK_STATE(worker, KICKED);
|
450
521
|
if (worker->initialized_cv) {
|
522
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
|
451
523
|
gpr_cv_signal(&worker->cv);
|
452
524
|
}
|
453
525
|
break;
|
454
526
|
case DESIGNATED_POLLER:
|
527
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
|
455
528
|
SET_KICK_STATE(worker, KICKED);
|
456
529
|
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
|
457
530
|
"pollset_kick_all");
|
@@ -463,7 +536,7 @@ static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
|
|
463
536
|
}
|
464
537
|
// TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
|
465
538
|
// in the else case
|
466
|
-
|
539
|
+
GPR_TIMER_END("pollset_kick_all", 0);
|
467
540
|
return error;
|
468
541
|
}
|
469
542
|
|
@@ -471,6 +544,7 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
|
|
471
544
|
grpc_pollset *pollset) {
|
472
545
|
if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
|
473
546
|
pollset->begin_refs == 0) {
|
547
|
+
GPR_TIMER_MARK("pollset_finish_shutdown", 0);
|
474
548
|
GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
|
475
549
|
pollset->shutdown_closure = NULL;
|
476
550
|
}
|
@@ -478,16 +552,16 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
|
|
478
552
|
|
479
553
|
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
480
554
|
grpc_closure *closure) {
|
555
|
+
GPR_TIMER_BEGIN("pollset_shutdown", 0);
|
481
556
|
GPR_ASSERT(pollset->shutdown_closure == NULL);
|
482
557
|
GPR_ASSERT(!pollset->shutting_down);
|
483
558
|
pollset->shutdown_closure = closure;
|
484
559
|
pollset->shutting_down = true;
|
485
|
-
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
|
560
|
+
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
|
486
561
|
pollset_maybe_finish_shutdown(exec_ctx, pollset);
|
562
|
+
GPR_TIMER_END("pollset_shutdown", 0);
|
487
563
|
}
|
488
564
|
|
489
|
-
#define MAX_EPOLL_EVENTS 100
|
490
|
-
|
491
565
|
static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
|
492
566
|
gpr_timespec now) {
|
493
567
|
gpr_timespec timeout;
|
@@ -500,58 +574,105 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
|
|
500
574
|
}
|
501
575
|
|
502
576
|
static const gpr_timespec round_up = {
|
503
|
-
|
577
|
+
0, /* tv_sec */
|
578
|
+
GPR_NS_PER_MS - 1, /* tv_nsec */
|
579
|
+
GPR_TIMESPAN /* clock_type */
|
580
|
+
};
|
504
581
|
timeout = gpr_time_sub(deadline, now);
|
505
582
|
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
|
506
583
|
return millis >= 1 ? millis : 1;
|
507
584
|
}
|
508
585
|
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
r = epoll_wait(g_epfd, events, MAX_EPOLL_EVENTS, timeout);
|
522
|
-
} while (r < 0 && errno == EINTR);
|
523
|
-
if (timeout != 0) {
|
524
|
-
GRPC_SCHEDULING_END_BLOCKING_REGION;
|
525
|
-
}
|
586
|
+
/* Process the epoll events found by do_epoll_wait() function.
|
587
|
+
- g_epoll_set.cursor points to the index of the first event to be processed
|
588
|
+
- This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
|
589
|
+
updates the g_epoll_set.cursor
|
590
|
+
|
591
|
+
NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
|
592
|
+
called by g_active_poller thread. So there is no need for synchronization
|
593
|
+
when accessing fields in g_epoll_set */
|
594
|
+
static grpc_error *process_epoll_events(grpc_exec_ctx *exec_ctx,
|
595
|
+
grpc_pollset *pollset) {
|
596
|
+
static const char *err_desc = "process_events";
|
597
|
+
grpc_error *error = GRPC_ERROR_NONE;
|
526
598
|
|
527
|
-
|
599
|
+
GPR_TIMER_BEGIN("process_epoll_events", 0);
|
600
|
+
long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
|
601
|
+
long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
|
602
|
+
for (int idx = 0;
|
603
|
+
(idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
|
604
|
+
idx++) {
|
605
|
+
long c = cursor++;
|
606
|
+
struct epoll_event *ev = &g_epoll_set.events[c];
|
607
|
+
void *data_ptr = ev->data.ptr;
|
528
608
|
|
529
|
-
grpc_error *error = GRPC_ERROR_NONE;
|
530
|
-
for (int i = 0; i < r; i++) {
|
531
|
-
void *data_ptr = events[i].data.ptr;
|
532
609
|
if (data_ptr == &global_wakeup_fd) {
|
533
610
|
append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
|
534
611
|
err_desc);
|
535
612
|
} else {
|
536
613
|
grpc_fd *fd = (grpc_fd *)(data_ptr);
|
537
|
-
bool cancel = (events
|
538
|
-
bool read_ev = (events
|
539
|
-
bool write_ev = (events
|
614
|
+
bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
|
615
|
+
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
|
616
|
+
bool write_ev = (ev->events & EPOLLOUT) != 0;
|
617
|
+
|
540
618
|
if (read_ev || cancel) {
|
541
619
|
fd_become_readable(exec_ctx, fd, pollset);
|
542
620
|
}
|
621
|
+
|
543
622
|
if (write_ev || cancel) {
|
544
623
|
fd_become_writable(exec_ctx, fd);
|
545
624
|
}
|
546
625
|
}
|
547
626
|
}
|
548
|
-
|
627
|
+
gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
|
628
|
+
GPR_TIMER_END("process_epoll_events", 0);
|
549
629
|
return error;
|
550
630
|
}
|
551
631
|
|
632
|
+
/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
|
633
|
+
"process" any of the events yet; that is done in process_epoll_events().
|
634
|
+
*See process_epoll_events() function for more details.
|
635
|
+
|
636
|
+
NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
|
637
|
+
(i.e the designated poller thread) will be calling this function. So there is
|
638
|
+
no need for any synchronization when accesing fields in g_epoll_set */
|
639
|
+
static grpc_error *do_epoll_wait(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
|
640
|
+
gpr_timespec now, gpr_timespec deadline) {
|
641
|
+
GPR_TIMER_BEGIN("do_epoll_wait", 0);
|
642
|
+
|
643
|
+
int r;
|
644
|
+
int timeout = poll_deadline_to_millis_timeout(deadline, now);
|
645
|
+
if (timeout != 0) {
|
646
|
+
GRPC_SCHEDULING_START_BLOCKING_REGION;
|
647
|
+
}
|
648
|
+
do {
|
649
|
+
GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
|
650
|
+
r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
|
651
|
+
timeout);
|
652
|
+
} while (r < 0 && errno == EINTR);
|
653
|
+
if (timeout != 0) {
|
654
|
+
GRPC_SCHEDULING_END_BLOCKING_REGION;
|
655
|
+
}
|
656
|
+
|
657
|
+
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
|
658
|
+
|
659
|
+
GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
|
660
|
+
|
661
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
662
|
+
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
|
663
|
+
}
|
664
|
+
|
665
|
+
gpr_atm_rel_store(&g_epoll_set.num_events, r);
|
666
|
+
gpr_atm_rel_store(&g_epoll_set.cursor, 0);
|
667
|
+
|
668
|
+
GPR_TIMER_END("do_epoll_wait", 0);
|
669
|
+
return GRPC_ERROR_NONE;
|
670
|
+
}
|
671
|
+
|
552
672
|
static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
|
553
673
|
grpc_pollset_worker **worker_hdl, gpr_timespec *now,
|
554
674
|
gpr_timespec deadline) {
|
675
|
+
GPR_TIMER_BEGIN("begin_worker", 0);
|
555
676
|
if (worker_hdl != NULL) *worker_hdl = worker;
|
556
677
|
worker->initialized_cv = false;
|
557
678
|
SET_KICK_STATE(worker, UNKICKED);
|
@@ -566,69 +687,77 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
|
|
566
687
|
// pollset has been observed to be inactive, we need to move back to the
|
567
688
|
// active list
|
568
689
|
bool is_reassigning = false;
|
569
|
-
if (!pollset->
|
690
|
+
if (!pollset->reassigning_neighborhood) {
|
570
691
|
is_reassigning = true;
|
571
|
-
pollset->
|
572
|
-
pollset->
|
692
|
+
pollset->reassigning_neighborhood = true;
|
693
|
+
pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
|
573
694
|
}
|
574
|
-
|
695
|
+
pollset_neighborhood *neighborhood = pollset->neighborhood;
|
575
696
|
gpr_mu_unlock(&pollset->mu);
|
576
697
|
// pollset unlocked: state may change (even worker->kick_state)
|
577
|
-
|
578
|
-
gpr_mu_lock(&
|
698
|
+
retry_lock_neighborhood:
|
699
|
+
gpr_mu_lock(&neighborhood->mu);
|
579
700
|
gpr_mu_lock(&pollset->mu);
|
580
701
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
581
702
|
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
|
582
|
-
pollset, worker, kick_state_string(worker->
|
703
|
+
pollset, worker, kick_state_string(worker->state),
|
583
704
|
is_reassigning);
|
584
705
|
}
|
585
706
|
if (pollset->seen_inactive) {
|
586
|
-
if (
|
587
|
-
gpr_mu_unlock(&
|
588
|
-
|
707
|
+
if (neighborhood != pollset->neighborhood) {
|
708
|
+
gpr_mu_unlock(&neighborhood->mu);
|
709
|
+
neighborhood = pollset->neighborhood;
|
589
710
|
gpr_mu_unlock(&pollset->mu);
|
590
|
-
goto
|
711
|
+
goto retry_lock_neighborhood;
|
591
712
|
}
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
713
|
+
|
714
|
+
/* In the brief time we released the pollset locks above, the worker MAY
|
715
|
+
have been kicked. In this case, the worker should get out of this
|
716
|
+
pollset ASAP and hence this should neither add the pollset to
|
717
|
+
neighborhood nor mark the pollset as active.
|
718
|
+
|
719
|
+
On a side note, the only way a worker's kick state could have changed
|
720
|
+
at this point is if it were "kicked specifically". Since the worker has
|
721
|
+
not added itself to the pollset yet (by calling worker_insert()), it is
|
722
|
+
not visible in the "kick any" path yet */
|
723
|
+
if (worker->state == UNKICKED) {
|
724
|
+
pollset->seen_inactive = false;
|
725
|
+
if (neighborhood->active_root == NULL) {
|
726
|
+
neighborhood->active_root = pollset->next = pollset->prev = pollset;
|
727
|
+
/* Make this the designated poller if there isn't one already */
|
728
|
+
if (worker->state == UNKICKED &&
|
729
|
+
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
|
730
|
+
SET_KICK_STATE(worker, DESIGNATED_POLLER);
|
731
|
+
}
|
732
|
+
} else {
|
733
|
+
pollset->next = neighborhood->active_root;
|
734
|
+
pollset->prev = pollset->next->prev;
|
735
|
+
pollset->next->prev = pollset->prev->next = pollset;
|
603
736
|
}
|
604
|
-
} else {
|
605
|
-
pollset->next = neighbourhood->active_root;
|
606
|
-
pollset->prev = pollset->next->prev;
|
607
|
-
pollset->next->prev = pollset->prev->next = pollset;
|
608
737
|
}
|
609
738
|
}
|
610
739
|
if (is_reassigning) {
|
611
|
-
GPR_ASSERT(pollset->
|
612
|
-
pollset->
|
740
|
+
GPR_ASSERT(pollset->reassigning_neighborhood);
|
741
|
+
pollset->reassigning_neighborhood = false;
|
613
742
|
}
|
614
|
-
gpr_mu_unlock(&
|
743
|
+
gpr_mu_unlock(&neighborhood->mu);
|
615
744
|
}
|
616
745
|
|
617
746
|
worker_insert(pollset, worker);
|
618
747
|
pollset->begin_refs--;
|
619
|
-
if (worker->
|
748
|
+
if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
|
620
749
|
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
|
621
750
|
worker->initialized_cv = true;
|
622
751
|
gpr_cv_init(&worker->cv);
|
623
|
-
while (worker->
|
752
|
+
while (worker->state == UNKICKED && !pollset->shutting_down) {
|
624
753
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
625
754
|
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
|
626
|
-
pollset, worker, kick_state_string(worker->
|
755
|
+
pollset, worker, kick_state_string(worker->state),
|
627
756
|
pollset->shutting_down);
|
628
757
|
}
|
629
758
|
|
630
759
|
if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
|
631
|
-
worker->
|
760
|
+
worker->state == UNKICKED) {
|
632
761
|
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
|
633
762
|
received a kick */
|
634
763
|
SET_KICK_STATE(worker, KICKED);
|
@@ -641,12 +770,12 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
|
|
641
770
|
gpr_log(GPR_ERROR,
|
642
771
|
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
|
643
772
|
"kicked_without_poller: %d",
|
644
|
-
pollset, worker, kick_state_string(worker->
|
773
|
+
pollset, worker, kick_state_string(worker->state),
|
645
774
|
pollset->shutting_down, pollset->kicked_without_poller);
|
646
775
|
}
|
647
776
|
|
648
777
|
/* We release pollset lock in this function at a couple of places:
|
649
|
-
* 1. Briefly when assigning pollset to a
|
778
|
+
* 1. Briefly when assigning pollset to a neighborhood
|
650
779
|
* 2. When doing gpr_cv_wait()
|
651
780
|
* It is possible that 'kicked_without_poller' was set to true during (1) and
|
652
781
|
* 'shutting_down' is set to true during (1) or (2). If either of them is
|
@@ -656,17 +785,20 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
|
|
656
785
|
|
657
786
|
if (pollset->kicked_without_poller) {
|
658
787
|
pollset->kicked_without_poller = false;
|
788
|
+
GPR_TIMER_END("begin_worker", 0);
|
659
789
|
return false;
|
660
790
|
}
|
661
791
|
|
662
|
-
|
792
|
+
GPR_TIMER_END("begin_worker", 0);
|
793
|
+
return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
|
663
794
|
}
|
664
795
|
|
665
|
-
static bool
|
666
|
-
|
796
|
+
static bool check_neighborhood_for_available_poller(
|
797
|
+
grpc_exec_ctx *exec_ctx, pollset_neighborhood *neighborhood) {
|
798
|
+
GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
|
667
799
|
bool found_worker = false;
|
668
800
|
do {
|
669
|
-
grpc_pollset *inspect =
|
801
|
+
grpc_pollset *inspect = neighborhood->active_root;
|
670
802
|
if (inspect == NULL) {
|
671
803
|
break;
|
672
804
|
}
|
@@ -675,7 +807,7 @@ static bool check_neighbourhood_for_available_poller(
|
|
675
807
|
grpc_pollset_worker *inspect_worker = inspect->root_worker;
|
676
808
|
if (inspect_worker != NULL) {
|
677
809
|
do {
|
678
|
-
switch (inspect_worker->
|
810
|
+
switch (inspect_worker->state) {
|
679
811
|
case UNKICKED:
|
680
812
|
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
|
681
813
|
(gpr_atm)inspect_worker)) {
|
@@ -685,6 +817,8 @@ static bool check_neighbourhood_for_available_poller(
|
|
685
817
|
}
|
686
818
|
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
|
687
819
|
if (inspect_worker->initialized_cv) {
|
820
|
+
GPR_TIMER_MARK("signal worker", 0);
|
821
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
|
688
822
|
gpr_cv_signal(&inspect_worker->cv);
|
689
823
|
}
|
690
824
|
} else {
|
@@ -710,8 +844,8 @@ static bool check_neighbourhood_for_available_poller(
|
|
710
844
|
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
|
711
845
|
}
|
712
846
|
inspect->seen_inactive = true;
|
713
|
-
if (inspect ==
|
714
|
-
|
847
|
+
if (inspect == neighborhood->active_root) {
|
848
|
+
neighborhood->active_root =
|
715
849
|
inspect->next == inspect ? NULL : inspect->next;
|
716
850
|
}
|
717
851
|
inspect->next->prev = inspect->prev;
|
@@ -720,12 +854,14 @@ static bool check_neighbourhood_for_available_poller(
|
|
720
854
|
}
|
721
855
|
gpr_mu_unlock(&inspect->mu);
|
722
856
|
} while (!found_worker);
|
857
|
+
GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
|
723
858
|
return found_worker;
|
724
859
|
}
|
725
860
|
|
726
861
|
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
727
862
|
grpc_pollset_worker *worker,
|
728
863
|
grpc_pollset_worker **worker_hdl) {
|
864
|
+
GPR_TIMER_BEGIN("end_worker", 0);
|
729
865
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
730
866
|
gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
|
731
867
|
}
|
@@ -735,13 +871,14 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
|
735
871
|
grpc_closure_list_move(&worker->schedule_on_end_work,
|
736
872
|
&exec_ctx->closure_list);
|
737
873
|
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
|
738
|
-
if (worker->next != worker && worker->next->
|
874
|
+
if (worker->next != worker && worker->next->state == UNKICKED) {
|
739
875
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
740
876
|
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
|
741
877
|
}
|
742
878
|
GPR_ASSERT(worker->next->initialized_cv);
|
743
879
|
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
|
744
880
|
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
|
881
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
|
745
882
|
gpr_cv_signal(&worker->next->cv);
|
746
883
|
if (grpc_exec_ctx_has_work(exec_ctx)) {
|
747
884
|
gpr_mu_unlock(&pollset->mu);
|
@@ -750,32 +887,33 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
|
750
887
|
}
|
751
888
|
} else {
|
752
889
|
gpr_atm_no_barrier_store(&g_active_poller, 0);
|
753
|
-
size_t
|
754
|
-
(size_t)(pollset->
|
890
|
+
size_t poller_neighborhood_idx =
|
891
|
+
(size_t)(pollset->neighborhood - g_neighborhoods);
|
755
892
|
gpr_mu_unlock(&pollset->mu);
|
756
893
|
bool found_worker = false;
|
757
|
-
bool scan_state[
|
758
|
-
for (size_t i = 0; !found_worker && i <
|
759
|
-
|
760
|
-
&
|
761
|
-
|
762
|
-
if (gpr_mu_trylock(&
|
894
|
+
bool scan_state[MAX_NEIGHBORHOODS];
|
895
|
+
for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
|
896
|
+
pollset_neighborhood *neighborhood =
|
897
|
+
&g_neighborhoods[(poller_neighborhood_idx + i) %
|
898
|
+
g_num_neighborhoods];
|
899
|
+
if (gpr_mu_trylock(&neighborhood->mu)) {
|
763
900
|
found_worker =
|
764
|
-
|
765
|
-
gpr_mu_unlock(&
|
901
|
+
check_neighborhood_for_available_poller(exec_ctx, neighborhood);
|
902
|
+
gpr_mu_unlock(&neighborhood->mu);
|
766
903
|
scan_state[i] = true;
|
767
904
|
} else {
|
768
905
|
scan_state[i] = false;
|
769
906
|
}
|
770
907
|
}
|
771
|
-
for (size_t i = 0; !found_worker && i <
|
908
|
+
for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
|
772
909
|
if (scan_state[i]) continue;
|
773
|
-
|
774
|
-
&
|
775
|
-
|
776
|
-
gpr_mu_lock(&
|
777
|
-
found_worker =
|
778
|
-
|
910
|
+
pollset_neighborhood *neighborhood =
|
911
|
+
&g_neighborhoods[(poller_neighborhood_idx + i) %
|
912
|
+
g_num_neighborhoods];
|
913
|
+
gpr_mu_lock(&neighborhood->mu);
|
914
|
+
found_worker =
|
915
|
+
check_neighborhood_for_available_poller(exec_ctx, neighborhood);
|
916
|
+
gpr_mu_unlock(&neighborhood->mu);
|
779
917
|
}
|
780
918
|
grpc_exec_ctx_flush(exec_ctx);
|
781
919
|
gpr_mu_lock(&pollset->mu);
|
@@ -795,42 +933,72 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
|
795
933
|
pollset_maybe_finish_shutdown(exec_ctx, pollset);
|
796
934
|
}
|
797
935
|
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
|
936
|
+
GPR_TIMER_END("end_worker", 0);
|
798
937
|
}
|
799
938
|
|
800
939
|
/* pollset->po.mu lock must be held by the caller before calling this.
|
801
940
|
The function pollset_work() may temporarily release the lock (pollset->po.mu)
|
802
941
|
during the course of its execution but it will always re-acquire the lock and
|
803
942
|
ensure that it is held by the time the function returns */
|
804
|
-
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *
|
943
|
+
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,
|
805
944
|
grpc_pollset_worker **worker_hdl,
|
806
945
|
gpr_timespec now, gpr_timespec deadline) {
|
807
946
|
grpc_pollset_worker worker;
|
808
947
|
grpc_error *error = GRPC_ERROR_NONE;
|
809
948
|
static const char *err_desc = "pollset_work";
|
810
|
-
|
811
|
-
|
949
|
+
GPR_TIMER_BEGIN("pollset_work", 0);
|
950
|
+
if (ps->kicked_without_poller) {
|
951
|
+
ps->kicked_without_poller = false;
|
952
|
+
GPR_TIMER_END("pollset_work", 0);
|
812
953
|
return GRPC_ERROR_NONE;
|
813
954
|
}
|
814
|
-
|
815
|
-
|
955
|
+
|
956
|
+
if (begin_worker(ps, &worker, worker_hdl, &now, deadline)) {
|
957
|
+
gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
|
816
958
|
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
|
817
|
-
GPR_ASSERT(!
|
818
|
-
GPR_ASSERT(!
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
959
|
+
GPR_ASSERT(!ps->shutting_down);
|
960
|
+
GPR_ASSERT(!ps->seen_inactive);
|
961
|
+
|
962
|
+
gpr_mu_unlock(&ps->mu); /* unlock */
|
963
|
+
/* This is the designated polling thread at this point and should ideally do
|
964
|
+
polling. However, if there are unprocessed events left from a previous
|
965
|
+
call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
|
966
|
+
process the pending epoll events.
|
967
|
+
|
968
|
+
The reason for decoupling do_epoll_wait and process_epoll_events is to
|
969
|
+
better distrubute the work (i.e handling epoll events) across multiple
|
970
|
+
threads
|
971
|
+
|
972
|
+
process_epoll_events() returns very quickly: It just queues the work on
|
973
|
+
exec_ctx but does not execute it (the actual exectution or more
|
974
|
+
accurately grpc_exec_ctx_flush() happens in end_worker() AFTER selecting
|
975
|
+
a designated poller). So we are not waiting long periods without a
|
976
|
+
designated poller */
|
977
|
+
if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
|
978
|
+
gpr_atm_acq_load(&g_epoll_set.num_events)) {
|
979
|
+
append_error(&error, do_epoll_wait(exec_ctx, ps, now, deadline),
|
980
|
+
err_desc);
|
981
|
+
}
|
982
|
+
append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
|
983
|
+
|
984
|
+
gpr_mu_lock(&ps->mu); /* lock */
|
985
|
+
|
823
986
|
gpr_tls_set(&g_current_thread_worker, 0);
|
824
987
|
} else {
|
825
|
-
gpr_tls_set(&g_current_thread_pollset, (intptr_t)
|
988
|
+
gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
|
826
989
|
}
|
827
|
-
end_worker(exec_ctx,
|
990
|
+
end_worker(exec_ctx, ps, &worker, worker_hdl);
|
991
|
+
|
828
992
|
gpr_tls_set(&g_current_thread_pollset, 0);
|
993
|
+
GPR_TIMER_END("pollset_work", 0);
|
829
994
|
return error;
|
830
995
|
}
|
831
996
|
|
832
|
-
static grpc_error *pollset_kick(grpc_pollset *pollset,
|
997
|
+
static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
833
998
|
grpc_pollset_worker *specific_worker) {
|
999
|
+
GPR_TIMER_BEGIN("pollset_kick", 0);
|
1000
|
+
GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
|
1001
|
+
grpc_error *ret_err = GRPC_ERROR_NONE;
|
834
1002
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
835
1003
|
gpr_strvec log;
|
836
1004
|
gpr_strvec_init(&log);
|
@@ -842,14 +1010,14 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
|
|
842
1010
|
gpr_strvec_add(&log, tmp);
|
843
1011
|
if (pollset->root_worker != NULL) {
|
844
1012
|
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
|
845
|
-
kick_state_string(pollset->root_worker->
|
1013
|
+
kick_state_string(pollset->root_worker->state),
|
846
1014
|
pollset->root_worker->next,
|
847
|
-
kick_state_string(pollset->root_worker->next->
|
1015
|
+
kick_state_string(pollset->root_worker->next->state));
|
848
1016
|
gpr_strvec_add(&log, tmp);
|
849
1017
|
}
|
850
1018
|
if (specific_worker != NULL) {
|
851
1019
|
gpr_asprintf(&tmp, " worker_kick_state=%s",
|
852
|
-
kick_state_string(specific_worker->
|
1020
|
+
kick_state_string(specific_worker->state));
|
853
1021
|
gpr_strvec_add(&log, tmp);
|
854
1022
|
}
|
855
1023
|
tmp = gpr_strvec_flatten(&log, NULL);
|
@@ -857,49 +1025,56 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
|
|
857
1025
|
gpr_log(GPR_ERROR, "%s", tmp);
|
858
1026
|
gpr_free(tmp);
|
859
1027
|
}
|
1028
|
+
|
860
1029
|
if (specific_worker == NULL) {
|
861
1030
|
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
|
862
1031
|
grpc_pollset_worker *root_worker = pollset->root_worker;
|
863
1032
|
if (root_worker == NULL) {
|
1033
|
+
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
|
864
1034
|
pollset->kicked_without_poller = true;
|
865
1035
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
866
1036
|
gpr_log(GPR_ERROR, " .. kicked_without_poller");
|
867
1037
|
}
|
868
|
-
|
1038
|
+
goto done;
|
869
1039
|
}
|
870
1040
|
grpc_pollset_worker *next_worker = root_worker->next;
|
871
|
-
if (root_worker->
|
1041
|
+
if (root_worker->state == KICKED) {
|
1042
|
+
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
|
872
1043
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
873
1044
|
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
|
874
1045
|
}
|
875
1046
|
SET_KICK_STATE(root_worker, KICKED);
|
876
|
-
|
877
|
-
} else if (next_worker->
|
1047
|
+
goto done;
|
1048
|
+
} else if (next_worker->state == KICKED) {
|
1049
|
+
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
|
878
1050
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
879
1051
|
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
|
880
1052
|
}
|
881
1053
|
SET_KICK_STATE(next_worker, KICKED);
|
882
|
-
|
1054
|
+
goto done;
|
883
1055
|
} else if (root_worker ==
|
884
1056
|
next_worker && // only try and wake up a poller if
|
885
1057
|
// there is no next worker
|
886
1058
|
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
|
887
1059
|
&g_active_poller)) {
|
1060
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
|
888
1061
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
889
1062
|
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
|
890
1063
|
}
|
891
1064
|
SET_KICK_STATE(root_worker, KICKED);
|
892
|
-
|
893
|
-
|
1065
|
+
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
|
1066
|
+
goto done;
|
1067
|
+
} else if (next_worker->state == UNKICKED) {
|
1068
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
|
894
1069
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
895
1070
|
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
|
896
1071
|
}
|
897
1072
|
GPR_ASSERT(next_worker->initialized_cv);
|
898
1073
|
SET_KICK_STATE(next_worker, KICKED);
|
899
1074
|
gpr_cv_signal(&next_worker->cv);
|
900
|
-
|
901
|
-
} else if (next_worker->
|
902
|
-
if (root_worker->
|
1075
|
+
goto done;
|
1076
|
+
} else if (next_worker->state == DESIGNATED_POLLER) {
|
1077
|
+
if (root_worker->state != DESIGNATED_POLLER) {
|
903
1078
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
904
1079
|
gpr_log(
|
905
1080
|
GPR_ERROR,
|
@@ -908,61 +1083,78 @@ static grpc_error *pollset_kick(grpc_pollset *pollset,
|
|
908
1083
|
}
|
909
1084
|
SET_KICK_STATE(root_worker, KICKED);
|
910
1085
|
if (root_worker->initialized_cv) {
|
1086
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
|
911
1087
|
gpr_cv_signal(&root_worker->cv);
|
912
1088
|
}
|
913
|
-
|
1089
|
+
goto done;
|
914
1090
|
} else {
|
1091
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
|
915
1092
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
916
1093
|
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
|
917
1094
|
root_worker);
|
918
1095
|
}
|
919
1096
|
SET_KICK_STATE(next_worker, KICKED);
|
920
|
-
|
1097
|
+
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
|
1098
|
+
goto done;
|
921
1099
|
}
|
922
1100
|
} else {
|
923
|
-
|
1101
|
+
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
|
1102
|
+
GPR_ASSERT(next_worker->state == KICKED);
|
924
1103
|
SET_KICK_STATE(next_worker, KICKED);
|
925
|
-
|
1104
|
+
goto done;
|
926
1105
|
}
|
927
1106
|
} else {
|
1107
|
+
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
|
928
1108
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
929
1109
|
gpr_log(GPR_ERROR, " .. kicked while waking up");
|
930
1110
|
}
|
931
|
-
|
1111
|
+
goto done;
|
932
1112
|
}
|
933
|
-
|
1113
|
+
|
1114
|
+
GPR_UNREACHABLE_CODE(goto done);
|
1115
|
+
}
|
1116
|
+
|
1117
|
+
if (specific_worker->state == KICKED) {
|
934
1118
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
935
1119
|
gpr_log(GPR_ERROR, " .. specific worker already kicked");
|
936
1120
|
}
|
937
|
-
|
1121
|
+
goto done;
|
938
1122
|
} else if (gpr_tls_get(&g_current_thread_worker) ==
|
939
1123
|
(intptr_t)specific_worker) {
|
1124
|
+
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
|
940
1125
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
941
1126
|
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
|
942
1127
|
}
|
943
1128
|
SET_KICK_STATE(specific_worker, KICKED);
|
944
|
-
|
1129
|
+
goto done;
|
945
1130
|
} else if (specific_worker ==
|
946
1131
|
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
|
1132
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
|
947
1133
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
948
1134
|
gpr_log(GPR_ERROR, " .. kick active poller");
|
949
1135
|
}
|
950
1136
|
SET_KICK_STATE(specific_worker, KICKED);
|
951
|
-
|
1137
|
+
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
|
1138
|
+
goto done;
|
952
1139
|
} else if (specific_worker->initialized_cv) {
|
1140
|
+
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
|
953
1141
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
954
1142
|
gpr_log(GPR_ERROR, " .. kick waiting worker");
|
955
1143
|
}
|
956
1144
|
SET_KICK_STATE(specific_worker, KICKED);
|
957
1145
|
gpr_cv_signal(&specific_worker->cv);
|
958
|
-
|
1146
|
+
goto done;
|
959
1147
|
} else {
|
1148
|
+
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
|
960
1149
|
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
961
1150
|
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
|
962
1151
|
}
|
963
1152
|
SET_KICK_STATE(specific_worker, KICKED);
|
964
|
-
|
1153
|
+
goto done;
|
965
1154
|
}
|
1155
|
+
done:
|
1156
|
+
GPR_TIMER_END("pollset_kick", 0);
|
1157
|
+
return ret_err;
|
966
1158
|
}
|
967
1159
|
|
968
1160
|
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
@@ -1006,69 +1198,60 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
|
|
1006
1198
|
static void shutdown_engine(void) {
|
1007
1199
|
fd_global_shutdown();
|
1008
1200
|
pollset_global_shutdown();
|
1009
|
-
|
1201
|
+
epoll_set_shutdown();
|
1010
1202
|
}
|
1011
1203
|
|
1012
1204
|
static const grpc_event_engine_vtable vtable = {
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1024
|
-
|
1025
|
-
|
1026
|
-
|
1027
|
-
|
1028
|
-
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
|
1033
|
-
|
1034
|
-
|
1035
|
-
|
1036
|
-
|
1037
|
-
|
1038
|
-
|
1039
|
-
|
1040
|
-
|
1205
|
+
sizeof(grpc_pollset),
|
1206
|
+
|
1207
|
+
fd_create,
|
1208
|
+
fd_wrapped_fd,
|
1209
|
+
fd_orphan,
|
1210
|
+
fd_shutdown,
|
1211
|
+
fd_notify_on_read,
|
1212
|
+
fd_notify_on_write,
|
1213
|
+
fd_is_shutdown,
|
1214
|
+
fd_get_read_notifier_pollset,
|
1215
|
+
|
1216
|
+
pollset_init,
|
1217
|
+
pollset_shutdown,
|
1218
|
+
pollset_destroy,
|
1219
|
+
pollset_work,
|
1220
|
+
pollset_kick,
|
1221
|
+
pollset_add_fd,
|
1222
|
+
|
1223
|
+
pollset_set_create,
|
1224
|
+
pollset_set_destroy,
|
1225
|
+
pollset_set_add_pollset,
|
1226
|
+
pollset_set_del_pollset,
|
1227
|
+
pollset_set_add_pollset_set,
|
1228
|
+
pollset_set_del_pollset_set,
|
1229
|
+
pollset_set_add_fd,
|
1230
|
+
pollset_set_del_fd,
|
1231
|
+
|
1232
|
+
shutdown_engine,
|
1041
1233
|
};
|
1042
1234
|
|
1043
1235
|
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
|
1044
|
-
* Create
|
1236
|
+
* Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
|
1237
|
+
* support is available */
|
1045
1238
|
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
|
1046
|
-
/* TODO(sreek): Temporarily disable this poller unless explicitly requested
|
1047
|
-
* via GRPC_POLL_STRATEGY */
|
1048
|
-
if (!explicit_request) {
|
1049
|
-
return NULL;
|
1050
|
-
}
|
1051
|
-
|
1052
1239
|
if (!grpc_has_wakeup_fd()) {
|
1053
1240
|
return NULL;
|
1054
1241
|
}
|
1055
1242
|
|
1056
|
-
|
1057
|
-
if (g_epfd < 0) {
|
1058
|
-
gpr_log(GPR_ERROR, "epoll unavailable");
|
1243
|
+
if (!epoll_set_init()) {
|
1059
1244
|
return NULL;
|
1060
1245
|
}
|
1061
1246
|
|
1062
1247
|
fd_global_init();
|
1063
1248
|
|
1064
1249
|
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
|
1065
|
-
close(g_epfd);
|
1066
1250
|
fd_global_shutdown();
|
1251
|
+
epoll_set_shutdown();
|
1067
1252
|
return NULL;
|
1068
1253
|
}
|
1069
1254
|
|
1070
|
-
gpr_log(GPR_ERROR, "grpc epoll fd: %d", g_epfd);
|
1071
|
-
|
1072
1255
|
return &vtable;
|
1073
1256
|
}
|
1074
1257
|
|