grpc 1.3.4 → 1.4.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +581 -450
- data/include/grpc/census.h +49 -49
- data/include/grpc/grpc.h +16 -70
- data/include/grpc/grpc_security.h +59 -59
- data/include/grpc/grpc_security_constants.h +9 -9
- data/include/grpc/impl/codegen/atm.h +1 -1
- data/include/grpc/impl/codegen/atm_windows.h +4 -4
- data/include/grpc/impl/codegen/byte_buffer_reader.h +2 -2
- data/include/grpc/impl/codegen/compression_types.h +4 -5
- data/include/grpc/impl/codegen/gpr_slice.h +5 -5
- data/include/grpc/impl/codegen/gpr_types.h +6 -7
- data/include/grpc/impl/codegen/grpc_types.h +128 -59
- data/include/grpc/impl/codegen/port_platform.h +6 -0
- data/include/grpc/impl/codegen/propagation_bits.h +2 -2
- data/include/grpc/impl/codegen/slice.h +13 -12
- data/include/grpc/impl/codegen/status.h +23 -18
- data/include/grpc/impl/codegen/sync.h +1 -1
- data/include/grpc/load_reporting.h +6 -6
- data/include/grpc/slice.h +47 -25
- data/include/grpc/slice_buffer.h +18 -14
- data/include/grpc/support/alloc.h +7 -7
- data/include/grpc/support/cmdline.h +10 -10
- data/include/grpc/support/cpu.h +3 -3
- data/include/grpc/support/histogram.h +1 -1
- data/include/grpc/support/host_port.h +2 -2
- data/include/grpc/support/log.h +9 -9
- data/include/grpc/support/log_windows.h +1 -1
- data/include/grpc/support/string_util.h +3 -3
- data/include/grpc/support/subprocess.h +3 -3
- data/include/grpc/support/sync.h +31 -31
- data/include/grpc/support/thd.h +11 -11
- data/include/grpc/support/time.h +12 -12
- data/include/grpc/support/tls.h +1 -1
- data/include/grpc/support/tls_gcc.h +2 -2
- data/include/grpc/support/tls_msvc.h +1 -1
- data/include/grpc/support/tls_pthread.h +1 -1
- data/include/grpc/support/useful.h +2 -2
- data/include/grpc/support/workaround_list.h +46 -0
- data/src/core/ext/census/context.c +1 -1
- data/src/core/ext/census/intrusive_hash_map.c +319 -0
- data/src/core/ext/census/intrusive_hash_map.h +167 -0
- data/src/core/ext/census/intrusive_hash_map_internal.h +63 -0
- data/src/core/ext/census/resource.c +3 -1
- data/src/core/ext/filters/client_channel/channel_connectivity.c +1 -1
- data/src/core/ext/filters/client_channel/client_channel.c +173 -103
- data/src/core/ext/filters/client_channel/client_channel_plugin.c +3 -2
- data/src/core/ext/filters/client_channel/lb_policy.c +2 -1
- data/src/core/ext/filters/client_channel/lb_policy.h +8 -7
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c +153 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h +42 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +405 -102
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c +133 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h +65 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c +90 -51
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h +7 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +19 -8
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +63 -34
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c +2 -1
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +188 -294
- data/src/core/ext/filters/client_channel/lb_policy_factory.c +28 -5
- data/src/core/ext/filters/client_channel/lb_policy_factory.h +18 -4
- data/src/core/ext/filters/client_channel/parse_address.c +90 -59
- data/src/core/ext/filters/client_channel/parse_address.h +17 -8
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +11 -7
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +59 -14
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +6 -0
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c +3 -3
- data/src/core/ext/filters/client_channel/subchannel.c +20 -17
- data/src/core/ext/filters/client_channel/subchannel.h +1 -0
- data/src/core/ext/filters/client_channel/subchannel_index.c +11 -1
- data/src/core/ext/filters/client_channel/uri_parser.c +36 -22
- data/src/core/ext/filters/client_channel/uri_parser.h +1 -1
- data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.c +42 -17
- data/src/core/{lib/channel → ext/filters/deadline}/deadline_filter.h +8 -9
- data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.c +19 -11
- data/src/core/{lib/channel → ext/filters/http/client}/http_client_filter.h +3 -6
- data/src/core/ext/filters/http/http_filters_plugin.c +104 -0
- data/src/core/{lib/channel/compress_filter.c → ext/filters/http/message_compress/message_compress_filter.c} +124 -23
- data/src/core/{lib/channel/compress_filter.h → ext/filters/http/message_compress/message_compress_filter.h} +5 -6
- data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.c +4 -6
- data/src/core/{lib/channel → ext/filters/http/server}/http_server_filter.h +3 -3
- data/src/core/ext/filters/load_reporting/load_reporting.c +2 -25
- data/src/core/ext/filters/load_reporting/load_reporting_filter.c +26 -1
- data/src/core/ext/filters/max_age/max_age_filter.c +14 -14
- data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.c +91 -47
- data/src/core/{lib/channel → ext/filters/message_size}/message_size_filter.h +3 -3
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.c +223 -0
- data/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h +40 -0
- data/src/core/ext/filters/workarounds/workaround_utils.c +65 -0
- data/src/core/ext/filters/workarounds/workaround_utils.h +52 -0
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +1 -1
- data/src/core/ext/transport/chttp2/server/chttp2_server.c +3 -2
- data/src/core/ext/transport/chttp2/transport/bin_decoder.c +2 -2
- data/src/core/ext/transport/chttp2/transport/bin_encoder.c +3 -3
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +319 -175
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.h +3 -2
- data/src/core/ext/transport/chttp2/transport/frame_data.c +203 -164
- data/src/core/ext/transport/chttp2/transport/frame_data.h +8 -14
- data/src/core/ext/transport/chttp2/transport/frame_goaway.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_ping.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_settings.c +5 -5
- data/src/core/ext/transport/chttp2/transport/frame_window_update.c +1 -1
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +4 -4
- data/src/core/ext/transport/chttp2/transport/hpack_parser.c +2 -4
- data/src/core/ext/transport/chttp2/transport/hpack_table.c +4 -3
- data/src/core/ext/transport/chttp2/transport/internal.h +50 -33
- data/src/core/ext/transport/chttp2/transport/parsing.c +10 -11
- data/src/core/ext/transport/chttp2/transport/writing.c +32 -13
- data/src/core/lib/channel/channel_args.c +30 -9
- data/src/core/lib/channel/channel_args.h +5 -1
- data/src/core/lib/channel/channel_stack.c +1 -1
- data/src/core/lib/channel/channel_stack.h +2 -2
- data/src/core/lib/channel/channel_stack_builder.c +13 -1
- data/src/core/lib/channel/channel_stack_builder.h +5 -1
- data/src/core/lib/channel/connected_channel.c +3 -1
- data/src/core/lib/channel/context.h +2 -2
- data/src/core/lib/compression/message_compress.c +2 -2
- data/src/core/lib/debug/trace.c +13 -6
- data/src/core/lib/debug/trace.h +27 -1
- data/src/core/lib/http/httpcli.c +1 -1
- data/src/core/lib/http/httpcli_security_connector.c +9 -11
- data/src/core/lib/http/parser.c +2 -2
- data/src/core/lib/http/parser.h +2 -1
- data/src/core/lib/iomgr/combiner.c +6 -6
- data/src/core/lib/iomgr/combiner.h +2 -1
- data/src/core/lib/iomgr/error.c +12 -5
- data/src/core/lib/iomgr/error.h +13 -13
- data/src/core/lib/iomgr/ev_epoll1_linux.c +984 -0
- data/src/core/lib/iomgr/ev_epoll1_linux.h +44 -0
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +2146 -0
- data/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.h +43 -0
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +1337 -0
- data/src/core/lib/iomgr/ev_epoll_thread_pool_linux.h +43 -0
- data/src/core/lib/iomgr/ev_epollex_linux.c +1511 -0
- data/src/core/lib/iomgr/ev_epollex_linux.h +43 -0
- data/src/core/lib/iomgr/{ev_epoll_linux.c → ev_epollsig_linux.c} +41 -33
- data/src/core/lib/iomgr/{ev_epoll_linux.h → ev_epollsig_linux.h} +4 -4
- data/src/core/lib/iomgr/ev_poll_posix.c +12 -27
- data/src/core/lib/iomgr/ev_poll_posix.h +2 -2
- data/src/core/lib/iomgr/ev_posix.c +22 -8
- data/src/core/lib/iomgr/ev_posix.h +4 -3
- data/src/core/lib/iomgr/ev_windows.c +43 -0
- data/src/core/lib/iomgr/exec_ctx.c +5 -0
- data/src/core/lib/iomgr/exec_ctx.h +2 -0
- data/src/core/lib/iomgr/iomgr.c +4 -0
- data/src/core/lib/iomgr/iomgr.h +3 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.c +116 -0
- data/src/core/lib/iomgr/is_epollexclusive_available.h +41 -0
- data/src/core/lib/iomgr/lockfree_event.c +16 -0
- data/src/core/lib/iomgr/pollset.h +2 -5
- data/src/core/lib/iomgr/pollset_uv.c +1 -1
- data/src/core/lib/iomgr/pollset_windows.c +3 -3
- data/src/core/lib/iomgr/resource_quota.c +9 -8
- data/src/core/lib/iomgr/resource_quota.h +2 -1
- data/src/core/lib/iomgr/sockaddr_utils.h +1 -1
- data/src/core/lib/iomgr/socket_mutator.h +2 -0
- data/src/core/lib/iomgr/sys_epoll_wrapper.h +43 -0
- data/src/core/lib/iomgr/tcp_client_posix.c +6 -6
- data/src/core/lib/iomgr/tcp_client_uv.c +3 -3
- data/src/core/lib/iomgr/tcp_posix.c +7 -7
- data/src/core/lib/iomgr/tcp_posix.h +2 -1
- data/src/core/lib/iomgr/tcp_server_posix.c +1 -1
- data/src/core/lib/iomgr/tcp_uv.c +6 -6
- data/src/core/lib/iomgr/tcp_uv.h +2 -1
- data/src/core/lib/iomgr/tcp_windows.c +1 -1
- data/src/core/lib/iomgr/timer_generic.c +24 -25
- data/src/core/lib/iomgr/timer_manager.c +276 -0
- data/src/core/lib/iomgr/timer_manager.h +52 -0
- data/src/core/lib/iomgr/timer_uv.c +6 -0
- data/src/core/lib/iomgr/udp_server.c +42 -9
- data/src/core/lib/iomgr/udp_server.h +3 -1
- data/src/core/lib/security/credentials/credentials.c +0 -1
- data/src/core/lib/security/credentials/fake/fake_credentials.c +23 -0
- data/src/core/lib/security/credentials/fake/fake_credentials.h +12 -9
- data/src/core/lib/security/credentials/google_default/google_default_credentials.c +1 -1
- data/src/core/lib/security/credentials/jwt/jwt_credentials.c +1 -1
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.c +1 -1
- data/src/core/lib/security/credentials/ssl/ssl_credentials.c +24 -53
- data/src/core/lib/security/transport/client_auth_filter.c +9 -3
- data/src/core/lib/security/transport/secure_endpoint.c +7 -7
- data/src/core/lib/security/transport/secure_endpoint.h +1 -1
- data/src/core/lib/security/transport/security_connector.c +45 -57
- data/src/core/lib/security/transport/security_connector.h +10 -14
- data/src/core/lib/security/transport/security_handshaker.c +123 -97
- data/src/core/lib/slice/b64.c +1 -1
- data/src/core/lib/slice/percent_encoding.c +3 -3
- data/src/core/lib/slice/slice.c +66 -33
- data/src/core/lib/slice/slice_buffer.c +25 -6
- data/src/core/lib/slice/slice_hash_table.c +33 -35
- data/src/core/lib/slice/slice_hash_table.h +7 -12
- data/src/core/lib/support/atomic.h +45 -0
- data/src/core/lib/support/atomic_with_atm.h +70 -0
- data/src/core/lib/support/atomic_with_std.h +48 -0
- data/src/core/lib/support/avl.c +14 -14
- data/src/core/lib/support/cmdline.c +3 -3
- data/src/core/lib/support/histogram.c +2 -2
- data/src/core/lib/support/host_port.c +1 -1
- data/src/core/lib/support/memory.h +74 -0
- data/src/core/lib/support/mpscq.c +36 -2
- data/src/core/lib/support/mpscq.h +28 -1
- data/src/core/lib/support/stack_lockfree.c +3 -36
- data/src/core/lib/support/string.c +12 -12
- data/src/core/lib/support/string_posix.c +1 -1
- data/src/core/lib/support/subprocess_posix.c +2 -2
- data/src/core/lib/support/thd_posix.c +1 -1
- data/src/core/lib/support/time_posix.c +8 -0
- data/src/core/lib/support/tmpfile_posix.c +10 -10
- data/src/core/lib/surface/alarm.c +3 -1
- data/src/core/lib/surface/api_trace.c +2 -1
- data/src/core/lib/surface/api_trace.h +2 -2
- data/src/core/lib/surface/byte_buffer_reader.c +1 -1
- data/src/core/lib/surface/call.c +65 -22
- data/src/core/lib/surface/call.h +4 -2
- data/src/core/lib/surface/channel_init.c +2 -19
- data/src/core/lib/surface/channel_stack_type.c +18 -0
- data/src/core/lib/surface/channel_stack_type.h +2 -0
- data/src/core/lib/surface/completion_queue.c +694 -247
- data/src/core/lib/surface/completion_queue.h +30 -13
- data/src/core/lib/surface/completion_queue_factory.c +24 -9
- data/src/core/lib/surface/init.c +1 -52
- data/src/core/lib/surface/{lame_client.c → lame_client.cc} +37 -26
- data/src/core/lib/surface/server.c +79 -110
- data/src/core/lib/surface/server.h +2 -1
- data/src/core/lib/surface/version.c +2 -2
- data/src/core/lib/transport/bdp_estimator.c +25 -9
- data/src/core/lib/transport/bdp_estimator.h +7 -1
- data/src/core/lib/transport/byte_stream.c +23 -9
- data/src/core/lib/transport/byte_stream.h +15 -6
- data/src/core/lib/transport/connectivity_state.c +6 -6
- data/src/core/lib/transport/connectivity_state.h +2 -1
- data/src/core/lib/transport/service_config.c +6 -13
- data/src/core/lib/transport/service_config.h +2 -2
- data/src/core/lib/transport/static_metadata.c +403 -389
- data/src/core/lib/transport/static_metadata.h +127 -114
- data/src/core/plugin_registry/grpc_plugin_registry.c +16 -0
- data/src/core/tsi/fake_transport_security.c +5 -4
- data/src/core/tsi/ssl_transport_security.c +71 -82
- data/src/core/tsi/ssl_transport_security.h +39 -61
- data/src/core/tsi/transport_security.c +83 -2
- data/src/core/tsi/transport_security.h +27 -2
- data/src/core/tsi/transport_security_adapter.c +236 -0
- data/src/core/tsi/transport_security_adapter.h +62 -0
- data/src/core/tsi/transport_security_interface.h +179 -66
- data/src/ruby/ext/grpc/extconf.rb +2 -1
- data/src/ruby/ext/grpc/rb_byte_buffer.c +8 -6
- data/src/ruby/ext/grpc/rb_call.c +56 -48
- data/src/ruby/ext/grpc/rb_call.h +3 -4
- data/src/ruby/ext/grpc/rb_call_credentials.c +23 -22
- data/src/ruby/ext/grpc/rb_channel.c +2 -3
- data/src/ruby/ext/grpc/rb_channel_args.c +11 -9
- data/src/ruby/ext/grpc/rb_channel_credentials.c +16 -12
- data/src/ruby/ext/grpc/rb_completion_queue.c +7 -9
- data/src/ruby/ext/grpc/rb_compression_options.c +7 -6
- data/src/ruby/ext/grpc/rb_event_thread.c +10 -12
- data/src/ruby/ext/grpc/rb_event_thread.h +1 -2
- data/src/ruby/ext/grpc/rb_grpc.c +11 -15
- data/src/ruby/ext/grpc/rb_grpc.h +2 -2
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +16 -6
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +25 -10
- data/src/ruby/ext/grpc/rb_server.c +26 -28
- data/src/ruby/lib/grpc/grpc.rb +1 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/third_party/cares/config_linux/ares_config.h +36 -2
- data/third_party/zlib/adler32.c +14 -7
- data/third_party/zlib/compress.c +24 -18
- data/third_party/zlib/crc32.c +29 -12
- data/third_party/zlib/deflate.c +499 -303
- data/third_party/zlib/deflate.h +19 -16
- data/third_party/zlib/gzguts.h +16 -7
- data/third_party/zlib/gzlib.c +17 -14
- data/third_party/zlib/gzread.c +108 -48
- data/third_party/zlib/gzwrite.c +210 -122
- data/third_party/zlib/infback.c +2 -2
- data/third_party/zlib/inffast.c +34 -51
- data/third_party/zlib/inflate.c +86 -37
- data/third_party/zlib/inflate.h +7 -4
- data/third_party/zlib/inftrees.c +12 -14
- data/third_party/zlib/trees.c +38 -61
- data/third_party/zlib/uncompr.c +66 -32
- data/third_party/zlib/zconf.h +32 -9
- data/third_party/zlib/zlib.h +298 -154
- data/third_party/zlib/zutil.c +25 -24
- data/third_party/zlib/zutil.h +35 -17
- metadata +63 -30
@@ -0,0 +1,43 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2017, Google Inc.
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are
|
8
|
+
* met:
|
9
|
+
*
|
10
|
+
* * Redistributions of source code must retain the above copyright
|
11
|
+
* notice, this list of conditions and the following disclaimer.
|
12
|
+
* * Redistributions in binary form must reproduce the above
|
13
|
+
* copyright notice, this list of conditions and the following disclaimer
|
14
|
+
* in the documentation and/or other materials provided with the
|
15
|
+
* distribution.
|
16
|
+
* * Neither the name of Google Inc. nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
|
34
|
+
#ifndef GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
|
35
|
+
#define GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H
|
36
|
+
|
37
|
+
#include "src/core/lib/iomgr/ev_posix.h"
|
38
|
+
#include "src/core/lib/iomgr/port.h"
|
39
|
+
|
40
|
+
const grpc_event_engine_vtable *grpc_init_epoll_thread_pool_linux(
|
41
|
+
bool requested_explicitly);
|
42
|
+
|
43
|
+
#endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL_THREAD_POOL_LINUX_H */
|
@@ -0,0 +1,1511 @@
|
|
1
|
+
/*
|
2
|
+
*
|
3
|
+
* Copyright 2017, Google Inc.
|
4
|
+
* All rights reserved.
|
5
|
+
*
|
6
|
+
* Redistribution and use in source and binary forms, with or without
|
7
|
+
* modification, are permitted provided that the following conditions are
|
8
|
+
* met:
|
9
|
+
*
|
10
|
+
* * Redistributions of source code must retain the above copyright
|
11
|
+
* notice, this list of conditions and the following disclaimer.
|
12
|
+
* * Redistributions in binary form must reproduce the above
|
13
|
+
* copyright notice, this list of conditions and the following disclaimer
|
14
|
+
* in the documentation and/or other materials provided with the
|
15
|
+
* distribution.
|
16
|
+
* * Neither the name of Google Inc. nor the names of its
|
17
|
+
* contributors may be used to endorse or promote products derived from
|
18
|
+
* this software without specific prior written permission.
|
19
|
+
*
|
20
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
|
+
*
|
32
|
+
*/
|
33
|
+
|
34
|
+
#include "src/core/lib/iomgr/port.h"
|
35
|
+
|
36
|
+
/* This polling engine is only relevant on linux kernels supporting epoll() */
|
37
|
+
#ifdef GRPC_LINUX_EPOLL
|
38
|
+
|
39
|
+
#include "src/core/lib/iomgr/ev_epollex_linux.h"
|
40
|
+
|
41
|
+
#include <assert.h>
|
42
|
+
#include <errno.h>
|
43
|
+
#include <poll.h>
|
44
|
+
#include <pthread.h>
|
45
|
+
#include <string.h>
|
46
|
+
#include <sys/socket.h>
|
47
|
+
#include <unistd.h>
|
48
|
+
|
49
|
+
#include <grpc/support/alloc.h>
|
50
|
+
#include <grpc/support/log.h>
|
51
|
+
#include <grpc/support/string_util.h>
|
52
|
+
#include <grpc/support/tls.h>
|
53
|
+
#include <grpc/support/useful.h>
|
54
|
+
|
55
|
+
#include "src/core/lib/iomgr/ev_posix.h"
|
56
|
+
#include "src/core/lib/iomgr/iomgr_internal.h"
|
57
|
+
#include "src/core/lib/iomgr/is_epollexclusive_available.h"
|
58
|
+
#include "src/core/lib/iomgr/lockfree_event.h"
|
59
|
+
#include "src/core/lib/iomgr/sys_epoll_wrapper.h"
|
60
|
+
#include "src/core/lib/iomgr/timer.h"
|
61
|
+
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
|
62
|
+
#include "src/core/lib/iomgr/workqueue.h"
|
63
|
+
#include "src/core/lib/profiling/timers.h"
|
64
|
+
#include "src/core/lib/support/block_annotate.h"
|
65
|
+
#include "src/core/lib/support/spinlock.h"
|
66
|
+
|
67
|
+
/*******************************************************************************
|
68
|
+
* Pollset-set sibling link
|
69
|
+
*/
|
70
|
+
|
71
|
+
typedef enum {
|
72
|
+
PO_POLLING_GROUP,
|
73
|
+
PO_POLLSET_SET,
|
74
|
+
PO_POLLSET,
|
75
|
+
PO_FD, /* ordering is important: we always want to lock pollsets before fds:
|
76
|
+
this guarantees that using an fd as a pollable is safe */
|
77
|
+
PO_EMPTY_POLLABLE,
|
78
|
+
PO_COUNT
|
79
|
+
} polling_obj_type;
|
80
|
+
|
81
|
+
typedef struct polling_obj polling_obj;
|
82
|
+
typedef struct polling_group polling_group;
|
83
|
+
|
84
|
+
struct polling_obj {
|
85
|
+
gpr_mu mu;
|
86
|
+
polling_obj_type type;
|
87
|
+
polling_group *group;
|
88
|
+
struct polling_obj *next;
|
89
|
+
struct polling_obj *prev;
|
90
|
+
};
|
91
|
+
|
92
|
+
struct polling_group {
|
93
|
+
polling_obj po;
|
94
|
+
gpr_refcount refs;
|
95
|
+
};
|
96
|
+
|
97
|
+
static void po_init(polling_obj *po, polling_obj_type type);
|
98
|
+
static void po_destroy(polling_obj *po);
|
99
|
+
static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b);
|
100
|
+
static int po_cmp(polling_obj *a, polling_obj *b);
|
101
|
+
|
102
|
+
static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po,
|
103
|
+
size_t initial_po_count);
|
104
|
+
static polling_group *pg_ref(polling_group *pg);
|
105
|
+
static void pg_unref(polling_group *pg);
|
106
|
+
static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a,
|
107
|
+
polling_group *b);
|
108
|
+
static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg,
|
109
|
+
polling_obj *po);
|
110
|
+
|
111
|
+
/*******************************************************************************
|
112
|
+
* pollable Declarations
|
113
|
+
*/
|
114
|
+
|
115
|
+
typedef struct pollable {
|
116
|
+
polling_obj po;
|
117
|
+
int epfd;
|
118
|
+
grpc_wakeup_fd wakeup;
|
119
|
+
grpc_pollset_worker *root_worker;
|
120
|
+
} pollable;
|
121
|
+
|
122
|
+
static pollable g_empty_pollable;
|
123
|
+
|
124
|
+
static void pollable_init(pollable *p, polling_obj_type type);
|
125
|
+
static void pollable_destroy(pollable *p);
|
126
|
+
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
|
127
|
+
static grpc_error *pollable_materialize(pollable *p);
|
128
|
+
|
129
|
+
/*******************************************************************************
|
130
|
+
* Fd Declarations
|
131
|
+
*/
|
132
|
+
|
133
|
+
struct grpc_fd {
|
134
|
+
pollable pollable;
|
135
|
+
int fd;
|
136
|
+
/* refst format:
|
137
|
+
bit 0 : 1=Active / 0=Orphaned
|
138
|
+
bits 1-n : refcount
|
139
|
+
Ref/Unref by two to avoid altering the orphaned bit */
|
140
|
+
gpr_atm refst;
|
141
|
+
|
142
|
+
/* Wakeup fd used to wake pollers to check the contents of workqueue_items */
|
143
|
+
grpc_wakeup_fd workqueue_wakeup_fd;
|
144
|
+
grpc_closure_scheduler workqueue_scheduler;
|
145
|
+
/* Spinlock guarding the read end of the workqueue (must be held to pop from
|
146
|
+
* workqueue_items) */
|
147
|
+
gpr_spinlock workqueue_read_mu;
|
148
|
+
/* Queue of closures to be executed */
|
149
|
+
gpr_mpscq workqueue_items;
|
150
|
+
/* Count of items in workqueue_items */
|
151
|
+
gpr_atm workqueue_item_count;
|
152
|
+
|
153
|
+
/* The fd is either closed or we relinquished control of it. In either
|
154
|
+
cases, this indicates that the 'fd' on this structure is no longer
|
155
|
+
valid */
|
156
|
+
gpr_mu orphaned_mu;
|
157
|
+
bool orphaned;
|
158
|
+
|
159
|
+
gpr_atm read_closure;
|
160
|
+
gpr_atm write_closure;
|
161
|
+
|
162
|
+
struct grpc_fd *freelist_next;
|
163
|
+
grpc_closure *on_done_closure;
|
164
|
+
|
165
|
+
/* The pollset that last noticed that the fd is readable. The actual type
|
166
|
+
* stored in this is (grpc_pollset *) */
|
167
|
+
gpr_atm read_notifier_pollset;
|
168
|
+
|
169
|
+
grpc_iomgr_object iomgr_object;
|
170
|
+
};
|
171
|
+
|
172
|
+
static void fd_global_init(void);
|
173
|
+
static void fd_global_shutdown(void);
|
174
|
+
|
175
|
+
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
|
176
|
+
grpc_error *error);
|
177
|
+
|
178
|
+
static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
|
179
|
+
workqueue_enqueue, workqueue_enqueue, "workqueue"};
|
180
|
+
|
181
|
+
/*******************************************************************************
|
182
|
+
* Pollset Declarations
|
183
|
+
*/
|
184
|
+
|
185
|
+
typedef struct pollset_worker_link {
|
186
|
+
grpc_pollset_worker *next;
|
187
|
+
grpc_pollset_worker *prev;
|
188
|
+
} pollset_worker_link;
|
189
|
+
|
190
|
+
typedef enum {
|
191
|
+
PWL_POLLSET,
|
192
|
+
PWL_POLLABLE,
|
193
|
+
POLLSET_WORKER_LINK_COUNT
|
194
|
+
} pollset_worker_links;
|
195
|
+
|
196
|
+
struct grpc_pollset_worker {
|
197
|
+
bool kicked;
|
198
|
+
bool initialized_cv;
|
199
|
+
pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
|
200
|
+
gpr_cv cv;
|
201
|
+
grpc_pollset *pollset;
|
202
|
+
pollable *pollable;
|
203
|
+
};
|
204
|
+
|
205
|
+
struct grpc_pollset {
|
206
|
+
pollable pollable;
|
207
|
+
pollable *current_pollable;
|
208
|
+
bool kicked_without_poller;
|
209
|
+
grpc_closure *shutdown_closure;
|
210
|
+
grpc_pollset_worker *root_worker;
|
211
|
+
};
|
212
|
+
|
213
|
+
/*******************************************************************************
|
214
|
+
* Pollset-set Declarations
|
215
|
+
*/
|
216
|
+
struct grpc_pollset_set {
|
217
|
+
polling_obj po;
|
218
|
+
};
|
219
|
+
|
220
|
+
/*******************************************************************************
|
221
|
+
* Common helpers
|
222
|
+
*/
|
223
|
+
|
224
|
+
static bool append_error(grpc_error **composite, grpc_error *error,
|
225
|
+
const char *desc) {
|
226
|
+
if (error == GRPC_ERROR_NONE) return true;
|
227
|
+
if (*composite == GRPC_ERROR_NONE) {
|
228
|
+
*composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
|
229
|
+
}
|
230
|
+
*composite = grpc_error_add_child(*composite, error);
|
231
|
+
return false;
|
232
|
+
}
|
233
|
+
|
234
|
+
/*******************************************************************************
|
235
|
+
* Fd Definitions
|
236
|
+
*/
|
237
|
+
|
238
|
+
/* We need to keep a freelist not because of any concerns of malloc performance
|
239
|
+
* but instead so that implementations with multiple threads in (for example)
|
240
|
+
* epoll_wait deal with the race between pollset removal and incoming poll
|
241
|
+
* notifications.
|
242
|
+
*
|
243
|
+
* The problem is that the poller ultimately holds a reference to this
|
244
|
+
* object, so it is very difficult to know when is safe to free it, at least
|
245
|
+
* without some expensive synchronization.
|
246
|
+
*
|
247
|
+
* If we keep the object freelisted, in the worst case losing this race just
|
248
|
+
* becomes a spurious read notification on a reused fd.
|
249
|
+
*/
|
250
|
+
|
251
|
+
/* The alarm system needs to be able to wakeup 'some poller' sometimes
|
252
|
+
* (specifically when a new alarm needs to be triggered earlier than the next
|
253
|
+
* alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
|
254
|
+
* case occurs. */
|
255
|
+
|
256
|
+
static grpc_fd *fd_freelist = NULL;
|
257
|
+
static gpr_mu fd_freelist_mu;
|
258
|
+
|
259
|
+
#ifdef GRPC_FD_REF_COUNT_DEBUG
|
260
|
+
#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
|
261
|
+
#define UNREF_BY(ec, fd, n, reason) \
|
262
|
+
unref_by(ec, fd, n, reason, __FILE__, __LINE__)
|
263
|
+
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
|
264
|
+
int line) {
|
265
|
+
gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
|
266
|
+
(void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
|
267
|
+
gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
|
268
|
+
#else
|
269
|
+
#define REF_BY(fd, n, reason) ref_by(fd, n)
|
270
|
+
#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
|
271
|
+
static void ref_by(grpc_fd *fd, int n) {
|
272
|
+
#endif
|
273
|
+
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
|
274
|
+
}
|
275
|
+
|
276
|
+
static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
|
277
|
+
grpc_fd *fd = arg;
|
278
|
+
/* Add the fd to the freelist */
|
279
|
+
grpc_iomgr_unregister_object(&fd->iomgr_object);
|
280
|
+
pollable_destroy(&fd->pollable);
|
281
|
+
gpr_mu_destroy(&fd->orphaned_mu);
|
282
|
+
gpr_mu_lock(&fd_freelist_mu);
|
283
|
+
fd->freelist_next = fd_freelist;
|
284
|
+
fd_freelist = fd;
|
285
|
+
|
286
|
+
grpc_lfev_destroy(&fd->read_closure);
|
287
|
+
grpc_lfev_destroy(&fd->write_closure);
|
288
|
+
|
289
|
+
gpr_mu_unlock(&fd_freelist_mu);
|
290
|
+
}
|
291
|
+
|
292
|
+
#ifdef GRPC_FD_REF_COUNT_DEBUG
|
293
|
+
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
|
294
|
+
const char *reason, const char *file, int line) {
|
295
|
+
gpr_atm old;
|
296
|
+
gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
|
297
|
+
(void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
|
298
|
+
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
|
299
|
+
#else
|
300
|
+
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
|
301
|
+
gpr_atm old;
|
302
|
+
#endif
|
303
|
+
old = gpr_atm_full_fetch_add(&fd->refst, -n);
|
304
|
+
if (old == n) {
|
305
|
+
grpc_closure_sched(exec_ctx, grpc_closure_create(fd_destroy, fd,
|
306
|
+
grpc_schedule_on_exec_ctx),
|
307
|
+
GRPC_ERROR_NONE);
|
308
|
+
} else {
|
309
|
+
GPR_ASSERT(old > n);
|
310
|
+
}
|
311
|
+
}
|
312
|
+
|
313
|
+
static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
|
314
|
+
|
315
|
+
static void fd_global_shutdown(void) {
|
316
|
+
gpr_mu_lock(&fd_freelist_mu);
|
317
|
+
gpr_mu_unlock(&fd_freelist_mu);
|
318
|
+
while (fd_freelist != NULL) {
|
319
|
+
grpc_fd *fd = fd_freelist;
|
320
|
+
fd_freelist = fd_freelist->freelist_next;
|
321
|
+
gpr_free(fd);
|
322
|
+
}
|
323
|
+
gpr_mu_destroy(&fd_freelist_mu);
|
324
|
+
}
|
325
|
+
|
326
|
+
static grpc_fd *fd_create(int fd, const char *name) {
|
327
|
+
grpc_fd *new_fd = NULL;
|
328
|
+
|
329
|
+
gpr_mu_lock(&fd_freelist_mu);
|
330
|
+
if (fd_freelist != NULL) {
|
331
|
+
new_fd = fd_freelist;
|
332
|
+
fd_freelist = fd_freelist->freelist_next;
|
333
|
+
}
|
334
|
+
gpr_mu_unlock(&fd_freelist_mu);
|
335
|
+
|
336
|
+
if (new_fd == NULL) {
|
337
|
+
new_fd = gpr_malloc(sizeof(grpc_fd));
|
338
|
+
}
|
339
|
+
|
340
|
+
pollable_init(&new_fd->pollable, PO_FD);
|
341
|
+
|
342
|
+
gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
|
343
|
+
new_fd->fd = fd;
|
344
|
+
gpr_mu_init(&new_fd->orphaned_mu);
|
345
|
+
new_fd->orphaned = false;
|
346
|
+
grpc_lfev_init(&new_fd->read_closure);
|
347
|
+
grpc_lfev_init(&new_fd->write_closure);
|
348
|
+
gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
|
349
|
+
|
350
|
+
GRPC_LOG_IF_ERROR("fd_create",
|
351
|
+
grpc_wakeup_fd_init(&new_fd->workqueue_wakeup_fd));
|
352
|
+
new_fd->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
|
353
|
+
new_fd->workqueue_read_mu = GPR_SPINLOCK_INITIALIZER;
|
354
|
+
gpr_mpscq_init(&new_fd->workqueue_items);
|
355
|
+
gpr_atm_no_barrier_store(&new_fd->workqueue_item_count, 0);
|
356
|
+
|
357
|
+
new_fd->freelist_next = NULL;
|
358
|
+
new_fd->on_done_closure = NULL;
|
359
|
+
|
360
|
+
char *fd_name;
|
361
|
+
gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
|
362
|
+
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
|
363
|
+
#ifdef GRPC_FD_REF_COUNT_DEBUG
|
364
|
+
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
|
365
|
+
#endif
|
366
|
+
gpr_free(fd_name);
|
367
|
+
return new_fd;
|
368
|
+
}
|
369
|
+
|
370
|
+
static int fd_wrapped_fd(grpc_fd *fd) {
|
371
|
+
int ret_fd = -1;
|
372
|
+
gpr_mu_lock(&fd->orphaned_mu);
|
373
|
+
if (!fd->orphaned) {
|
374
|
+
ret_fd = fd->fd;
|
375
|
+
}
|
376
|
+
gpr_mu_unlock(&fd->orphaned_mu);
|
377
|
+
|
378
|
+
return ret_fd;
|
379
|
+
}
|
380
|
+
|
381
|
+
static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
|
382
|
+
grpc_closure *on_done, int *release_fd,
|
383
|
+
const char *reason) {
|
384
|
+
bool is_fd_closed = false;
|
385
|
+
grpc_error *error = GRPC_ERROR_NONE;
|
386
|
+
|
387
|
+
gpr_mu_lock(&fd->pollable.po.mu);
|
388
|
+
gpr_mu_lock(&fd->orphaned_mu);
|
389
|
+
fd->on_done_closure = on_done;
|
390
|
+
|
391
|
+
/* If release_fd is not NULL, we should be relinquishing control of the file
|
392
|
+
descriptor fd->fd (but we still own the grpc_fd structure). */
|
393
|
+
if (release_fd != NULL) {
|
394
|
+
*release_fd = fd->fd;
|
395
|
+
} else {
|
396
|
+
close(fd->fd);
|
397
|
+
is_fd_closed = true;
|
398
|
+
}
|
399
|
+
|
400
|
+
fd->orphaned = true;
|
401
|
+
|
402
|
+
if (!is_fd_closed) {
|
403
|
+
gpr_log(GPR_DEBUG, "TODO: handle fd removal?");
|
404
|
+
}
|
405
|
+
|
406
|
+
/* Remove the active status but keep referenced. We want this grpc_fd struct
|
407
|
+
to be alive (and not added to freelist) until the end of this function */
|
408
|
+
REF_BY(fd, 1, reason);
|
409
|
+
|
410
|
+
grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
|
411
|
+
|
412
|
+
gpr_mu_unlock(&fd->orphaned_mu);
|
413
|
+
gpr_mu_unlock(&fd->pollable.po.mu);
|
414
|
+
UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
|
415
|
+
GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
|
416
|
+
GRPC_ERROR_UNREF(error);
|
417
|
+
}
|
418
|
+
|
419
|
+
static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
|
420
|
+
grpc_fd *fd) {
|
421
|
+
gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
|
422
|
+
return (grpc_pollset *)notifier;
|
423
|
+
}
|
424
|
+
|
425
|
+
static bool fd_is_shutdown(grpc_fd *fd) {
|
426
|
+
return grpc_lfev_is_shutdown(&fd->read_closure);
|
427
|
+
}
|
428
|
+
|
429
|
+
/* Might be called multiple times */
|
430
|
+
static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
|
431
|
+
if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
|
432
|
+
GRPC_ERROR_REF(why))) {
|
433
|
+
shutdown(fd->fd, SHUT_RDWR);
|
434
|
+
grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
|
435
|
+
}
|
436
|
+
GRPC_ERROR_UNREF(why);
|
437
|
+
}
|
438
|
+
|
439
|
+
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
|
440
|
+
grpc_closure *closure) {
|
441
|
+
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
|
442
|
+
}
|
443
|
+
|
444
|
+
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
|
445
|
+
grpc_closure *closure) {
|
446
|
+
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
|
447
|
+
}
|
448
|
+
|
449
|
+
static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
|
450
|
+
REF_BY(fd, 2, "return_workqueue");
|
451
|
+
return (grpc_workqueue *)fd;
|
452
|
+
}
|
453
|
+
|
454
|
+
#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
|
455
|
+
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
|
456
|
+
const char *file, int line,
|
457
|
+
const char *reason) {
|
458
|
+
if (workqueue != NULL) {
|
459
|
+
ref_by((grpc_fd *)workqueue, 2, file, line, reason);
|
460
|
+
}
|
461
|
+
return workqueue;
|
462
|
+
}
|
463
|
+
|
464
|
+
static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
|
465
|
+
const char *file, int line, const char *reason) {
|
466
|
+
if (workqueue != NULL) {
|
467
|
+
unref_by(exec_ctx, (grpc_fd *)workqueue, 2, file, line, reason);
|
468
|
+
}
|
469
|
+
}
|
470
|
+
#else
|
471
|
+
static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
|
472
|
+
if (workqueue != NULL) {
|
473
|
+
ref_by((grpc_fd *)workqueue, 2);
|
474
|
+
}
|
475
|
+
return workqueue;
|
476
|
+
}
|
477
|
+
|
478
|
+
static void workqueue_unref(grpc_exec_ctx *exec_ctx,
|
479
|
+
grpc_workqueue *workqueue) {
|
480
|
+
if (workqueue != NULL) {
|
481
|
+
unref_by(exec_ctx, (grpc_fd *)workqueue, 2);
|
482
|
+
}
|
483
|
+
}
|
484
|
+
#endif
|
485
|
+
|
486
|
+
static void workqueue_wakeup(grpc_fd *fd) {
|
487
|
+
GRPC_LOG_IF_ERROR("workqueue_enqueue",
|
488
|
+
grpc_wakeup_fd_wakeup(&fd->workqueue_wakeup_fd));
|
489
|
+
}
|
490
|
+
|
491
|
+
static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
|
492
|
+
grpc_error *error) {
|
493
|
+
GPR_TIMER_BEGIN("workqueue.enqueue", 0);
|
494
|
+
grpc_fd *fd = (grpc_fd *)(((char *)closure->scheduler) -
|
495
|
+
offsetof(grpc_fd, workqueue_scheduler));
|
496
|
+
REF_BY(fd, 2, "workqueue_enqueue");
|
497
|
+
gpr_atm last = gpr_atm_no_barrier_fetch_add(&fd->workqueue_item_count, 1);
|
498
|
+
closure->error_data.error = error;
|
499
|
+
gpr_mpscq_push(&fd->workqueue_items, &closure->next_data.atm_next);
|
500
|
+
if (last == 0) {
|
501
|
+
workqueue_wakeup(fd);
|
502
|
+
}
|
503
|
+
UNREF_BY(exec_ctx, fd, 2, "workqueue_enqueue");
|
504
|
+
}
|
505
|
+
|
506
|
+
static void fd_invoke_workqueue(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
|
507
|
+
/* handle spurious wakeups */
|
508
|
+
if (!gpr_spinlock_trylock(&fd->workqueue_read_mu)) return;
|
509
|
+
gpr_mpscq_node *n = gpr_mpscq_pop(&fd->workqueue_items);
|
510
|
+
gpr_spinlock_unlock(&fd->workqueue_read_mu);
|
511
|
+
if (n != NULL) {
|
512
|
+
if (gpr_atm_full_fetch_add(&fd->workqueue_item_count, -1) > 1) {
|
513
|
+
workqueue_wakeup(fd);
|
514
|
+
}
|
515
|
+
grpc_closure *c = (grpc_closure *)n;
|
516
|
+
grpc_error *error = c->error_data.error;
|
517
|
+
#ifndef NDEBUG
|
518
|
+
c->scheduled = false;
|
519
|
+
#endif
|
520
|
+
c->cb(exec_ctx, c->cb_arg, error);
|
521
|
+
GRPC_ERROR_UNREF(error);
|
522
|
+
} else if (gpr_atm_no_barrier_load(&fd->workqueue_item_count) > 0) {
|
523
|
+
/* n == NULL might mean there's work but it's not available to be popped
|
524
|
+
* yet - try to ensure another workqueue wakes up to check shortly if so
|
525
|
+
*/
|
526
|
+
workqueue_wakeup(fd);
|
527
|
+
}
|
528
|
+
}
|
529
|
+
|
530
|
+
static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
|
531
|
+
return &((grpc_fd *)workqueue)->workqueue_scheduler;
|
532
|
+
}
|
533
|
+
|
534
|
+
/*******************************************************************************
|
535
|
+
* Pollable Definitions
|
536
|
+
*/
|
537
|
+
|
538
|
+
static void pollable_init(pollable *p, polling_obj_type type) {
|
539
|
+
po_init(&p->po, type);
|
540
|
+
p->root_worker = NULL;
|
541
|
+
p->epfd = -1;
|
542
|
+
}
|
543
|
+
|
544
|
+
static void pollable_destroy(pollable *p) {
|
545
|
+
po_destroy(&p->po);
|
546
|
+
if (p->epfd != -1) {
|
547
|
+
close(p->epfd);
|
548
|
+
grpc_wakeup_fd_destroy(&p->wakeup);
|
549
|
+
}
|
550
|
+
}
|
551
|
+
|
552
|
+
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */
|
553
|
+
static grpc_error *pollable_materialize(pollable *p) {
|
554
|
+
if (p->epfd == -1) {
|
555
|
+
int new_epfd = epoll_create1(EPOLL_CLOEXEC);
|
556
|
+
if (new_epfd < 0) {
|
557
|
+
return GRPC_OS_ERROR(errno, "epoll_create1");
|
558
|
+
}
|
559
|
+
grpc_error *err = grpc_wakeup_fd_init(&p->wakeup);
|
560
|
+
if (err != GRPC_ERROR_NONE) {
|
561
|
+
close(new_epfd);
|
562
|
+
return err;
|
563
|
+
}
|
564
|
+
struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
|
565
|
+
.data.ptr = &p->wakeup};
|
566
|
+
if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) {
|
567
|
+
err = GRPC_OS_ERROR(errno, "epoll_ctl");
|
568
|
+
close(new_epfd);
|
569
|
+
grpc_wakeup_fd_destroy(&p->wakeup);
|
570
|
+
return err;
|
571
|
+
}
|
572
|
+
|
573
|
+
p->epfd = new_epfd;
|
574
|
+
}
|
575
|
+
return GRPC_ERROR_NONE;
|
576
|
+
}
|
577
|
+
|
578
|
+
/* pollable must be materialized */
|
579
|
+
static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
|
580
|
+
grpc_error *error = GRPC_ERROR_NONE;
|
581
|
+
static const char *err_desc = "pollable_add_fd";
|
582
|
+
const int epfd = p->epfd;
|
583
|
+
GPR_ASSERT(epfd != -1);
|
584
|
+
|
585
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
586
|
+
gpr_log(GPR_DEBUG, "add fd %p to pollable %p", fd, p);
|
587
|
+
}
|
588
|
+
|
589
|
+
gpr_mu_lock(&fd->orphaned_mu);
|
590
|
+
if (fd->orphaned) {
|
591
|
+
gpr_mu_unlock(&fd->orphaned_mu);
|
592
|
+
return GRPC_ERROR_NONE;
|
593
|
+
}
|
594
|
+
struct epoll_event ev_fd = {
|
595
|
+
.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE),
|
596
|
+
.data.ptr = fd};
|
597
|
+
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
|
598
|
+
switch (errno) {
|
599
|
+
case EEXIST: /* if this fd is already in the epoll set, the workqueue fd
|
600
|
+
must also be - just return */
|
601
|
+
gpr_mu_unlock(&fd->orphaned_mu);
|
602
|
+
return GRPC_ERROR_NONE;
|
603
|
+
default:
|
604
|
+
append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
|
605
|
+
}
|
606
|
+
}
|
607
|
+
struct epoll_event ev_wq = {
|
608
|
+
.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLEXCLUSIVE),
|
609
|
+
.data.ptr = (void *)(1 + (intptr_t)fd)};
|
610
|
+
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->workqueue_wakeup_fd.read_fd, &ev_wq) !=
|
611
|
+
0) {
|
612
|
+
switch (errno) {
|
613
|
+
case EEXIST: /* if the workqueue fd is already in the epoll set we're ok
|
614
|
+
- no need to do anything special */
|
615
|
+
break;
|
616
|
+
default:
|
617
|
+
append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
|
618
|
+
}
|
619
|
+
}
|
620
|
+
gpr_mu_unlock(&fd->orphaned_mu);
|
621
|
+
|
622
|
+
return error;
|
623
|
+
}
|
624
|
+
|
625
|
+
/*******************************************************************************
|
626
|
+
* Pollset Definitions
|
627
|
+
*/
|
628
|
+
|
629
|
+
GPR_TLS_DECL(g_current_thread_pollset);
|
630
|
+
GPR_TLS_DECL(g_current_thread_worker);
|
631
|
+
|
632
|
+
/* Global state management */
|
633
|
+
static grpc_error *pollset_global_init(void) {
|
634
|
+
gpr_tls_init(&g_current_thread_pollset);
|
635
|
+
gpr_tls_init(&g_current_thread_worker);
|
636
|
+
pollable_init(&g_empty_pollable, PO_EMPTY_POLLABLE);
|
637
|
+
return GRPC_ERROR_NONE;
|
638
|
+
}
|
639
|
+
|
640
|
+
static void pollset_global_shutdown(void) {
|
641
|
+
pollable_destroy(&g_empty_pollable);
|
642
|
+
gpr_tls_destroy(&g_current_thread_pollset);
|
643
|
+
gpr_tls_destroy(&g_current_thread_worker);
|
644
|
+
}
|
645
|
+
|
646
|
+
static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
|
647
|
+
grpc_error *error = GRPC_ERROR_NONE;
|
648
|
+
if (pollset->root_worker != NULL) {
|
649
|
+
grpc_pollset_worker *worker = pollset->root_worker;
|
650
|
+
do {
|
651
|
+
if (worker->pollable != &pollset->pollable) {
|
652
|
+
gpr_mu_lock(&worker->pollable->po.mu);
|
653
|
+
}
|
654
|
+
if (worker->initialized_cv) {
|
655
|
+
worker->kicked = true;
|
656
|
+
gpr_cv_signal(&worker->cv);
|
657
|
+
} else {
|
658
|
+
append_error(&error, grpc_wakeup_fd_wakeup(&worker->pollable->wakeup),
|
659
|
+
"pollset_shutdown");
|
660
|
+
}
|
661
|
+
if (worker->pollable != &pollset->pollable) {
|
662
|
+
gpr_mu_unlock(&worker->pollable->po.mu);
|
663
|
+
}
|
664
|
+
|
665
|
+
worker = worker->links[PWL_POLLSET].next;
|
666
|
+
} while (worker != pollset->root_worker);
|
667
|
+
}
|
668
|
+
return error;
|
669
|
+
}
|
670
|
+
|
671
|
+
static grpc_error *pollset_kick_inner(grpc_pollset *pollset, pollable *p,
|
672
|
+
grpc_pollset_worker *specific_worker) {
|
673
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
674
|
+
gpr_log(GPR_DEBUG,
|
675
|
+
"PS:%p kick %p tls_pollset=%p tls_worker=%p "
|
676
|
+
"root_worker=(pollset:%p pollable:%p)",
|
677
|
+
p, specific_worker, (void *)gpr_tls_get(&g_current_thread_pollset),
|
678
|
+
(void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker,
|
679
|
+
p->root_worker);
|
680
|
+
}
|
681
|
+
if (specific_worker == NULL) {
|
682
|
+
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
|
683
|
+
if (pollset->root_worker == NULL) {
|
684
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
685
|
+
gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", p);
|
686
|
+
}
|
687
|
+
pollset->kicked_without_poller = true;
|
688
|
+
return GRPC_ERROR_NONE;
|
689
|
+
} else {
|
690
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
691
|
+
gpr_log(GPR_DEBUG, "PS:%p kicked_any_via_wakeup_fd", p);
|
692
|
+
}
|
693
|
+
grpc_error *err = pollable_materialize(p);
|
694
|
+
if (err != GRPC_ERROR_NONE) return err;
|
695
|
+
return grpc_wakeup_fd_wakeup(&p->wakeup);
|
696
|
+
}
|
697
|
+
} else {
|
698
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
699
|
+
gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", p);
|
700
|
+
}
|
701
|
+
return GRPC_ERROR_NONE;
|
702
|
+
}
|
703
|
+
} else if (specific_worker->kicked) {
|
704
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
705
|
+
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
|
706
|
+
}
|
707
|
+
return GRPC_ERROR_NONE;
|
708
|
+
} else if (gpr_tls_get(&g_current_thread_worker) ==
|
709
|
+
(intptr_t)specific_worker) {
|
710
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
711
|
+
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
|
712
|
+
}
|
713
|
+
specific_worker->kicked = true;
|
714
|
+
return GRPC_ERROR_NONE;
|
715
|
+
} else if (specific_worker == p->root_worker) {
|
716
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
717
|
+
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
|
718
|
+
}
|
719
|
+
grpc_error *err = pollable_materialize(p);
|
720
|
+
if (err != GRPC_ERROR_NONE) return err;
|
721
|
+
specific_worker->kicked = true;
|
722
|
+
return grpc_wakeup_fd_wakeup(&p->wakeup);
|
723
|
+
} else {
|
724
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
725
|
+
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
|
726
|
+
}
|
727
|
+
specific_worker->kicked = true;
|
728
|
+
gpr_cv_signal(&specific_worker->cv);
|
729
|
+
return GRPC_ERROR_NONE;
|
730
|
+
}
|
731
|
+
}
|
732
|
+
|
733
|
+
/* p->po.mu must be held before calling this function */
|
734
|
+
static grpc_error *pollset_kick(grpc_pollset *pollset,
|
735
|
+
grpc_pollset_worker *specific_worker) {
|
736
|
+
pollable *p = pollset->current_pollable;
|
737
|
+
if (p != &pollset->pollable) {
|
738
|
+
gpr_mu_lock(&p->po.mu);
|
739
|
+
}
|
740
|
+
grpc_error *error = pollset_kick_inner(pollset, p, specific_worker);
|
741
|
+
if (p != &pollset->pollable) {
|
742
|
+
gpr_mu_unlock(&p->po.mu);
|
743
|
+
}
|
744
|
+
return error;
|
745
|
+
}
|
746
|
+
|
747
|
+
static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
|
748
|
+
pollable_init(&pollset->pollable, PO_POLLSET);
|
749
|
+
pollset->current_pollable = &g_empty_pollable;
|
750
|
+
pollset->kicked_without_poller = false;
|
751
|
+
pollset->shutdown_closure = NULL;
|
752
|
+
pollset->root_worker = NULL;
|
753
|
+
*mu = &pollset->pollable.po.mu;
|
754
|
+
}
|
755
|
+
|
756
|
+
/* Convert a timespec to milliseconds:
|
757
|
+
- Very small or negative poll times are clamped to zero to do a non-blocking
|
758
|
+
poll (which becomes spin polling)
|
759
|
+
- Other small values are rounded up to one millisecond
|
760
|
+
- Longer than a millisecond polls are rounded up to the next nearest
|
761
|
+
millisecond to avoid spinning
|
762
|
+
- Infinite timeouts are converted to -1 */
|
763
|
+
static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
|
764
|
+
gpr_timespec now) {
|
765
|
+
gpr_timespec timeout;
|
766
|
+
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
|
767
|
+
return -1;
|
768
|
+
}
|
769
|
+
|
770
|
+
if (gpr_time_cmp(deadline, now) <= 0) {
|
771
|
+
return 0;
|
772
|
+
}
|
773
|
+
|
774
|
+
static const gpr_timespec round_up = {
|
775
|
+
.clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
|
776
|
+
timeout = gpr_time_sub(deadline, now);
|
777
|
+
int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
|
778
|
+
return millis >= 1 ? millis : 1;
|
779
|
+
}
|
780
|
+
|
781
|
+
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
|
782
|
+
grpc_pollset *notifier) {
|
783
|
+
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
|
784
|
+
|
785
|
+
/* Note, it is possible that fd_become_readable might be called twice with
|
786
|
+
different 'notifier's when an fd becomes readable and it is in two epoll
|
787
|
+
sets (This can happen briefly during polling island merges). In such cases
|
788
|
+
it does not really matter which notifer is set as the read_notifier_pollset
|
789
|
+
(They would both point to the same polling island anyway) */
|
790
|
+
/* Use release store to match with acquire load in fd_get_read_notifier */
|
791
|
+
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
|
792
|
+
}
|
793
|
+
|
794
|
+
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
|
795
|
+
grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
|
796
|
+
}
|
797
|
+
|
798
|
+
static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
|
799
|
+
grpc_error *error = GRPC_ERROR_NONE;
|
800
|
+
static const char *err_desc = "fd_become_pollable";
|
801
|
+
if (append_error(&error, pollable_materialize(&fd->pollable), err_desc)) {
|
802
|
+
append_error(&error, pollable_add_fd(&fd->pollable, fd), err_desc);
|
803
|
+
}
|
804
|
+
return error;
|
805
|
+
}
|
806
|
+
|
807
|
+
static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
|
808
|
+
grpc_pollset *pollset) {
|
809
|
+
if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) {
|
810
|
+
grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
|
811
|
+
pollset->shutdown_closure = NULL;
|
812
|
+
}
|
813
|
+
}
|
814
|
+
|
815
|
+
/* pollset->po.mu lock must be held by the caller before calling this */
|
816
|
+
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
817
|
+
grpc_closure *closure) {
|
818
|
+
GPR_ASSERT(pollset->shutdown_closure == NULL);
|
819
|
+
pollset->shutdown_closure = closure;
|
820
|
+
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
|
821
|
+
pollset_maybe_finish_shutdown(exec_ctx, pollset);
|
822
|
+
}
|
823
|
+
|
824
|
+
static bool pollset_is_pollable_fd(grpc_pollset *pollset, pollable *p) {
|
825
|
+
return p != &g_empty_pollable && p != &pollset->pollable;
|
826
|
+
}
|
827
|
+
|
828
|
+
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
|
829
|
+
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
|
830
|
+
pollable_destroy(&pollset->pollable);
|
831
|
+
if (pollset_is_pollable_fd(pollset, pollset->current_pollable)) {
|
832
|
+
UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable, 2,
|
833
|
+
"pollset_pollable");
|
834
|
+
}
|
835
|
+
}
|
836
|
+
|
837
|
+
#define MAX_EPOLL_EVENTS 100
|
838
|
+
|
839
|
+
static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
840
|
+
pollable *p, gpr_timespec now,
|
841
|
+
gpr_timespec deadline) {
|
842
|
+
struct epoll_event events[MAX_EPOLL_EVENTS];
|
843
|
+
static const char *err_desc = "pollset_poll";
|
844
|
+
|
845
|
+
int timeout = poll_deadline_to_millis_timeout(deadline, now);
|
846
|
+
|
847
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
848
|
+
gpr_log(GPR_DEBUG, "PS:%p poll %p for %dms", pollset, p, timeout);
|
849
|
+
}
|
850
|
+
|
851
|
+
if (timeout != 0) {
|
852
|
+
GRPC_SCHEDULING_START_BLOCKING_REGION;
|
853
|
+
}
|
854
|
+
int r;
|
855
|
+
do {
|
856
|
+
r = epoll_wait(p->epfd, events, MAX_EPOLL_EVENTS, timeout);
|
857
|
+
} while (r < 0 && errno == EINTR);
|
858
|
+
if (timeout != 0) {
|
859
|
+
GRPC_SCHEDULING_END_BLOCKING_REGION;
|
860
|
+
}
|
861
|
+
|
862
|
+
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
|
863
|
+
|
864
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
865
|
+
gpr_log(GPR_DEBUG, "PS:%p poll %p got %d events", pollset, p, r);
|
866
|
+
}
|
867
|
+
|
868
|
+
grpc_error *error = GRPC_ERROR_NONE;
|
869
|
+
for (int i = 0; i < r; i++) {
|
870
|
+
void *data_ptr = events[i].data.ptr;
|
871
|
+
if (data_ptr == &p->wakeup) {
|
872
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
873
|
+
gpr_log(GPR_DEBUG, "PS:%p poll %p got pollset_wakeup", pollset, p);
|
874
|
+
}
|
875
|
+
append_error(&error, grpc_wakeup_fd_consume_wakeup(&p->wakeup), err_desc);
|
876
|
+
} else {
|
877
|
+
grpc_fd *fd = (grpc_fd *)(((intptr_t)data_ptr) & ~(intptr_t)1);
|
878
|
+
bool is_workqueue = (((intptr_t)data_ptr) & 1) != 0;
|
879
|
+
bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
|
880
|
+
bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
|
881
|
+
bool write_ev = (events[i].events & EPOLLOUT) != 0;
|
882
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
883
|
+
gpr_log(GPR_DEBUG,
|
884
|
+
"PS:%p poll %p got fd %p: is_wq=%d cancel=%d read=%d "
|
885
|
+
"write=%d",
|
886
|
+
pollset, p, fd, is_workqueue, cancel, read_ev, write_ev);
|
887
|
+
}
|
888
|
+
if (is_workqueue) {
|
889
|
+
append_error(&error,
|
890
|
+
grpc_wakeup_fd_consume_wakeup(&fd->workqueue_wakeup_fd),
|
891
|
+
err_desc);
|
892
|
+
fd_invoke_workqueue(exec_ctx, fd);
|
893
|
+
} else {
|
894
|
+
if (read_ev || cancel) {
|
895
|
+
fd_become_readable(exec_ctx, fd, pollset);
|
896
|
+
}
|
897
|
+
if (write_ev || cancel) {
|
898
|
+
fd_become_writable(exec_ctx, fd);
|
899
|
+
}
|
900
|
+
}
|
901
|
+
}
|
902
|
+
}
|
903
|
+
|
904
|
+
return error;
|
905
|
+
}
|
906
|
+
|
907
|
+
/* Return true if first in list */
|
908
|
+
static bool worker_insert(grpc_pollset_worker **root, pollset_worker_links link,
|
909
|
+
grpc_pollset_worker *worker) {
|
910
|
+
if (*root == NULL) {
|
911
|
+
*root = worker;
|
912
|
+
worker->links[link].next = worker->links[link].prev = worker;
|
913
|
+
return true;
|
914
|
+
} else {
|
915
|
+
worker->links[link].next = *root;
|
916
|
+
worker->links[link].prev = worker->links[link].next->links[link].prev;
|
917
|
+
worker->links[link].next->links[link].prev = worker;
|
918
|
+
worker->links[link].prev->links[link].next = worker;
|
919
|
+
return false;
|
920
|
+
}
|
921
|
+
}
|
922
|
+
|
923
|
+
/* Return true if last in list */
|
924
|
+
typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
|
925
|
+
|
926
|
+
static worker_remove_result worker_remove(grpc_pollset_worker **root,
|
927
|
+
pollset_worker_links link,
|
928
|
+
grpc_pollset_worker *worker) {
|
929
|
+
if (worker == *root) {
|
930
|
+
if (worker == worker->links[link].next) {
|
931
|
+
*root = NULL;
|
932
|
+
return EMPTIED;
|
933
|
+
} else {
|
934
|
+
*root = worker->links[link].next;
|
935
|
+
worker->links[link].prev->links[link].next = worker->links[link].next;
|
936
|
+
worker->links[link].next->links[link].prev = worker->links[link].prev;
|
937
|
+
return NEW_ROOT;
|
938
|
+
}
|
939
|
+
} else {
|
940
|
+
worker->links[link].prev->links[link].next = worker->links[link].next;
|
941
|
+
worker->links[link].next->links[link].prev = worker->links[link].prev;
|
942
|
+
return REMOVED;
|
943
|
+
}
|
944
|
+
}
|
945
|
+
|
946
|
+
/* Return true if this thread should poll */
|
947
|
+
static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
|
948
|
+
grpc_pollset_worker **worker_hdl, gpr_timespec *now,
|
949
|
+
gpr_timespec deadline) {
|
950
|
+
bool do_poll = true;
|
951
|
+
if (worker_hdl != NULL) *worker_hdl = worker;
|
952
|
+
worker->initialized_cv = false;
|
953
|
+
worker->kicked = false;
|
954
|
+
worker->pollset = pollset;
|
955
|
+
worker->pollable = pollset->current_pollable;
|
956
|
+
|
957
|
+
if (pollset_is_pollable_fd(pollset, worker->pollable)) {
|
958
|
+
REF_BY((grpc_fd *)worker->pollable, 2, "one_poll");
|
959
|
+
}
|
960
|
+
|
961
|
+
worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
|
962
|
+
if (!worker_insert(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
|
963
|
+
worker->initialized_cv = true;
|
964
|
+
gpr_cv_init(&worker->cv);
|
965
|
+
if (worker->pollable != &pollset->pollable) {
|
966
|
+
gpr_mu_unlock(&pollset->pollable.po.mu);
|
967
|
+
}
|
968
|
+
if (GRPC_TRACER_ON(grpc_polling_trace) &&
|
969
|
+
worker->pollable->root_worker != worker) {
|
970
|
+
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
|
971
|
+
worker->pollable, worker,
|
972
|
+
poll_deadline_to_millis_timeout(deadline, *now));
|
973
|
+
}
|
974
|
+
while (do_poll && worker->pollable->root_worker != worker) {
|
975
|
+
if (gpr_cv_wait(&worker->cv, &worker->pollable->po.mu, deadline)) {
|
976
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
977
|
+
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
|
978
|
+
worker->pollable, worker);
|
979
|
+
}
|
980
|
+
do_poll = false;
|
981
|
+
} else if (worker->kicked) {
|
982
|
+
if (GRPC_TRACER_ON(grpc_polling_trace)) {
|
983
|
+
gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, worker->pollable,
|
984
|
+
worker);
|
985
|
+
}
|
986
|
+
do_poll = false;
|
987
|
+
} else if (GRPC_TRACER_ON(grpc_polling_trace) &&
|
988
|
+
worker->pollable->root_worker != worker) {
|
989
|
+
gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
|
990
|
+
worker->pollable, worker);
|
991
|
+
}
|
992
|
+
}
|
993
|
+
if (worker->pollable != &pollset->pollable) {
|
994
|
+
gpr_mu_unlock(&worker->pollable->po.mu);
|
995
|
+
gpr_mu_lock(&pollset->pollable.po.mu);
|
996
|
+
gpr_mu_lock(&worker->pollable->po.mu);
|
997
|
+
}
|
998
|
+
*now = gpr_now(now->clock_type);
|
999
|
+
}
|
1000
|
+
|
1001
|
+
return do_poll && pollset->shutdown_closure == NULL &&
|
1002
|
+
pollset->current_pollable == worker->pollable;
|
1003
|
+
}
|
1004
|
+
|
1005
|
+
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
1006
|
+
grpc_pollset_worker *worker,
|
1007
|
+
grpc_pollset_worker **worker_hdl) {
|
1008
|
+
if (NEW_ROOT ==
|
1009
|
+
worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
|
1010
|
+
gpr_cv_signal(&worker->pollable->root_worker->cv);
|
1011
|
+
}
|
1012
|
+
if (worker->initialized_cv) {
|
1013
|
+
gpr_cv_destroy(&worker->cv);
|
1014
|
+
}
|
1015
|
+
if (pollset_is_pollable_fd(pollset, worker->pollable)) {
|
1016
|
+
UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll");
|
1017
|
+
}
|
1018
|
+
if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
|
1019
|
+
pollset_maybe_finish_shutdown(exec_ctx, pollset);
|
1020
|
+
}
|
1021
|
+
}
|
1022
|
+
|
1023
|
+
/* pollset->po.mu lock must be held by the caller before calling this.
|
1024
|
+
The function pollset_work() may temporarily release the lock (pollset->po.mu)
|
1025
|
+
during the course of its execution but it will always re-acquire the lock and
|
1026
|
+
ensure that it is held by the time the function returns */
|
1027
|
+
static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
1028
|
+
grpc_pollset_worker **worker_hdl,
|
1029
|
+
gpr_timespec now, gpr_timespec deadline) {
|
1030
|
+
grpc_pollset_worker worker;
|
1031
|
+
if (0 && GRPC_TRACER_ON(grpc_polling_trace)) {
|
1032
|
+
gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRId64
|
1033
|
+
".%09d deadline=%" PRId64 ".%09d kwp=%d root_worker=%p",
|
1034
|
+
pollset, worker_hdl, &worker, now.tv_sec, now.tv_nsec,
|
1035
|
+
deadline.tv_sec, deadline.tv_nsec, pollset->kicked_without_poller,
|
1036
|
+
pollset->root_worker);
|
1037
|
+
}
|
1038
|
+
grpc_error *error = GRPC_ERROR_NONE;
|
1039
|
+
static const char *err_desc = "pollset_work";
|
1040
|
+
if (pollset->kicked_without_poller) {
|
1041
|
+
pollset->kicked_without_poller = false;
|
1042
|
+
return GRPC_ERROR_NONE;
|
1043
|
+
}
|
1044
|
+
if (pollset->current_pollable != &pollset->pollable) {
|
1045
|
+
gpr_mu_lock(&pollset->current_pollable->po.mu);
|
1046
|
+
}
|
1047
|
+
if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
|
1048
|
+
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
|
1049
|
+
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
|
1050
|
+
GPR_ASSERT(!pollset->shutdown_closure);
|
1051
|
+
append_error(&error, pollable_materialize(worker.pollable), err_desc);
|
1052
|
+
if (worker.pollable != &pollset->pollable) {
|
1053
|
+
gpr_mu_unlock(&worker.pollable->po.mu);
|
1054
|
+
}
|
1055
|
+
gpr_mu_unlock(&pollset->pollable.po.mu);
|
1056
|
+
append_error(&error, pollset_epoll(exec_ctx, pollset, worker.pollable, now,
|
1057
|
+
deadline),
|
1058
|
+
err_desc);
|
1059
|
+
grpc_exec_ctx_flush(exec_ctx);
|
1060
|
+
gpr_mu_lock(&pollset->pollable.po.mu);
|
1061
|
+
if (worker.pollable != &pollset->pollable) {
|
1062
|
+
gpr_mu_lock(&worker.pollable->po.mu);
|
1063
|
+
}
|
1064
|
+
gpr_tls_set(&g_current_thread_pollset, 0);
|
1065
|
+
gpr_tls_set(&g_current_thread_worker, 0);
|
1066
|
+
pollset_maybe_finish_shutdown(exec_ctx, pollset);
|
1067
|
+
}
|
1068
|
+
end_worker(exec_ctx, pollset, &worker, worker_hdl);
|
1069
|
+
if (worker.pollable != &pollset->pollable) {
|
1070
|
+
gpr_mu_unlock(&worker.pollable->po.mu);
|
1071
|
+
}
|
1072
|
+
return error;
|
1073
|
+
}
|
1074
|
+
|
1075
|
+
static void unref_fd_no_longer_poller(grpc_exec_ctx *exec_ctx, void *arg,
|
1076
|
+
grpc_error *error) {
|
1077
|
+
grpc_fd *fd = arg;
|
1078
|
+
UNREF_BY(exec_ctx, fd, 2, "pollset_pollable");
|
1079
|
+
}
|
1080
|
+
|
1081
|
+
/* expects pollsets locked, flag whether fd is locked or not */
|
1082
|
+
static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
|
1083
|
+
grpc_pollset *pollset, grpc_fd *fd,
|
1084
|
+
bool fd_locked) {
|
1085
|
+
static const char *err_desc = "pollset_add_fd";
|
1086
|
+
grpc_error *error = GRPC_ERROR_NONE;
|
1087
|
+
if (pollset->current_pollable == &g_empty_pollable) {
|
1088
|
+
if (GRPC_TRACER_ON(grpc_polling_trace))
|
1089
|
+
gpr_log(GPR_DEBUG,
|
1090
|
+
"PS:%p add fd %p; transition pollable from empty to fd", pollset,
|
1091
|
+
fd);
|
1092
|
+
/* empty pollable --> single fd pollable */
|
1093
|
+
append_error(&error, pollset_kick_all(pollset), err_desc);
|
1094
|
+
pollset->current_pollable = &fd->pollable;
|
1095
|
+
if (!fd_locked) gpr_mu_lock(&fd->pollable.po.mu);
|
1096
|
+
append_error(&error, fd_become_pollable_locked(fd), err_desc);
|
1097
|
+
if (!fd_locked) gpr_mu_unlock(&fd->pollable.po.mu);
|
1098
|
+
REF_BY(fd, 2, "pollset_pollable");
|
1099
|
+
} else if (pollset->current_pollable == &pollset->pollable) {
|
1100
|
+
if (GRPC_TRACER_ON(grpc_polling_trace))
|
1101
|
+
gpr_log(GPR_DEBUG, "PS:%p add fd %p; already multipolling", pollset, fd);
|
1102
|
+
append_error(&error, pollable_add_fd(pollset->current_pollable, fd),
|
1103
|
+
err_desc);
|
1104
|
+
} else if (pollset->current_pollable != &fd->pollable) {
|
1105
|
+
grpc_fd *had_fd = (grpc_fd *)pollset->current_pollable;
|
1106
|
+
if (GRPC_TRACER_ON(grpc_polling_trace))
|
1107
|
+
gpr_log(GPR_DEBUG,
|
1108
|
+
"PS:%p add fd %p; transition pollable from fd %p to multipoller",
|
1109
|
+
pollset, fd, had_fd);
|
1110
|
+
append_error(&error, pollset_kick_all(pollset), err_desc);
|
1111
|
+
pollset->current_pollable = &pollset->pollable;
|
1112
|
+
if (append_error(&error, pollable_materialize(&pollset->pollable),
|
1113
|
+
err_desc)) {
|
1114
|
+
pollable_add_fd(&pollset->pollable, had_fd);
|
1115
|
+
pollable_add_fd(&pollset->pollable, fd);
|
1116
|
+
}
|
1117
|
+
grpc_closure_sched(exec_ctx,
|
1118
|
+
grpc_closure_create(unref_fd_no_longer_poller, had_fd,
|
1119
|
+
grpc_schedule_on_exec_ctx),
|
1120
|
+
GRPC_ERROR_NONE);
|
1121
|
+
}
|
1122
|
+
return error;
|
1123
|
+
}
|
1124
|
+
|
1125
|
+
static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
|
1126
|
+
grpc_fd *fd) {
|
1127
|
+
gpr_mu_lock(&pollset->pollable.po.mu);
|
1128
|
+
grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd, false);
|
1129
|
+
gpr_mu_unlock(&pollset->pollable.po.mu);
|
1130
|
+
GRPC_LOG_IF_ERROR("pollset_add_fd", error);
|
1131
|
+
}
|
1132
|
+
|
1133
|
+
/*******************************************************************************
|
1134
|
+
* Pollset-set Definitions
|
1135
|
+
*/
|
1136
|
+
|
1137
|
+
static grpc_pollset_set *pollset_set_create(void) {
|
1138
|
+
grpc_pollset_set *pss = gpr_zalloc(sizeof(*pss));
|
1139
|
+
po_init(&pss->po, PO_POLLSET_SET);
|
1140
|
+
return pss;
|
1141
|
+
}
|
1142
|
+
|
1143
|
+
static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
|
1144
|
+
grpc_pollset_set *pss) {
|
1145
|
+
po_destroy(&pss->po);
|
1146
|
+
gpr_free(pss);
|
1147
|
+
}
|
1148
|
+
|
1149
|
+
static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
|
1150
|
+
grpc_fd *fd) {
|
1151
|
+
po_join(exec_ctx, &pss->po, &fd->pollable.po);
|
1152
|
+
}
|
1153
|
+
|
1154
|
+
static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
|
1155
|
+
grpc_fd *fd) {}
|
1156
|
+
|
1157
|
+
static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
|
1158
|
+
grpc_pollset_set *pss, grpc_pollset *ps) {
|
1159
|
+
po_join(exec_ctx, &pss->po, &ps->pollable.po);
|
1160
|
+
}
|
1161
|
+
|
1162
|
+
static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
|
1163
|
+
grpc_pollset_set *pss, grpc_pollset *ps) {}
|
1164
|
+
|
1165
|
+
static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
|
1166
|
+
grpc_pollset_set *bag,
|
1167
|
+
grpc_pollset_set *item) {
|
1168
|
+
po_join(exec_ctx, &bag->po, &item->po);
|
1169
|
+
}
|
1170
|
+
|
1171
|
+
static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
|
1172
|
+
grpc_pollset_set *bag,
|
1173
|
+
grpc_pollset_set *item) {}
|
1174
|
+
|
1175
|
+
static void po_init(polling_obj *po, polling_obj_type type) {
|
1176
|
+
gpr_mu_init(&po->mu);
|
1177
|
+
po->type = type;
|
1178
|
+
po->group = NULL;
|
1179
|
+
po->next = po;
|
1180
|
+
po->prev = po;
|
1181
|
+
}
|
1182
|
+
|
1183
|
+
static polling_group *pg_lock_latest(polling_group *pg) {
|
1184
|
+
/* assumes pg unlocked; consumes ref, returns ref */
|
1185
|
+
gpr_mu_lock(&pg->po.mu);
|
1186
|
+
while (pg->po.group != NULL) {
|
1187
|
+
polling_group *new_pg = pg_ref(pg->po.group);
|
1188
|
+
gpr_mu_unlock(&pg->po.mu);
|
1189
|
+
pg_unref(pg);
|
1190
|
+
pg = new_pg;
|
1191
|
+
gpr_mu_lock(&pg->po.mu);
|
1192
|
+
}
|
1193
|
+
return pg;
|
1194
|
+
}
|
1195
|
+
|
1196
|
+
static void po_destroy(polling_obj *po) {
|
1197
|
+
if (po->group != NULL) {
|
1198
|
+
polling_group *pg = pg_lock_latest(po->group);
|
1199
|
+
po->prev->next = po->next;
|
1200
|
+
po->next->prev = po->prev;
|
1201
|
+
gpr_mu_unlock(&pg->po.mu);
|
1202
|
+
pg_unref(pg);
|
1203
|
+
}
|
1204
|
+
gpr_mu_destroy(&po->mu);
|
1205
|
+
}
|
1206
|
+
|
1207
|
+
static polling_group *pg_ref(polling_group *pg) {
|
1208
|
+
gpr_ref(&pg->refs);
|
1209
|
+
return pg;
|
1210
|
+
}
|
1211
|
+
|
1212
|
+
static void pg_unref(polling_group *pg) {
|
1213
|
+
if (gpr_unref(&pg->refs)) {
|
1214
|
+
po_destroy(&pg->po);
|
1215
|
+
gpr_free(pg);
|
1216
|
+
}
|
1217
|
+
}
|
1218
|
+
|
1219
|
+
static int po_cmp(polling_obj *a, polling_obj *b) {
|
1220
|
+
if (a == b) return 0;
|
1221
|
+
if (a->type < b->type) return -1;
|
1222
|
+
if (a->type > b->type) return 1;
|
1223
|
+
if (a < b) return -1;
|
1224
|
+
assert(a > b);
|
1225
|
+
return 1;
|
1226
|
+
}
|
1227
|
+
|
1228
|
+
static void po_join(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) {
|
1229
|
+
switch (po_cmp(a, b)) {
|
1230
|
+
case 0:
|
1231
|
+
return;
|
1232
|
+
case 1:
|
1233
|
+
GPR_SWAP(polling_obj *, a, b);
|
1234
|
+
/* fall through */
|
1235
|
+
case -1:
|
1236
|
+
gpr_mu_lock(&a->mu);
|
1237
|
+
gpr_mu_lock(&b->mu);
|
1238
|
+
|
1239
|
+
if (a->group == NULL) {
|
1240
|
+
if (b->group == NULL) {
|
1241
|
+
polling_obj *initial_po[] = {a, b};
|
1242
|
+
pg_create(exec_ctx, initial_po, GPR_ARRAY_SIZE(initial_po));
|
1243
|
+
gpr_mu_unlock(&a->mu);
|
1244
|
+
gpr_mu_unlock(&b->mu);
|
1245
|
+
} else {
|
1246
|
+
polling_group *b_group = pg_ref(b->group);
|
1247
|
+
gpr_mu_unlock(&b->mu);
|
1248
|
+
gpr_mu_unlock(&a->mu);
|
1249
|
+
pg_join(exec_ctx, b_group, a);
|
1250
|
+
}
|
1251
|
+
} else if (b->group == NULL) {
|
1252
|
+
polling_group *a_group = pg_ref(a->group);
|
1253
|
+
gpr_mu_unlock(&a->mu);
|
1254
|
+
gpr_mu_unlock(&b->mu);
|
1255
|
+
pg_join(exec_ctx, a_group, b);
|
1256
|
+
} else if (a->group == b->group) {
|
1257
|
+
/* nothing to do */
|
1258
|
+
gpr_mu_unlock(&a->mu);
|
1259
|
+
gpr_mu_unlock(&b->mu);
|
1260
|
+
} else {
|
1261
|
+
polling_group *a_group = pg_ref(a->group);
|
1262
|
+
polling_group *b_group = pg_ref(b->group);
|
1263
|
+
gpr_mu_unlock(&a->mu);
|
1264
|
+
gpr_mu_unlock(&b->mu);
|
1265
|
+
pg_merge(exec_ctx, a_group, b_group);
|
1266
|
+
}
|
1267
|
+
}
|
1268
|
+
}
|
1269
|
+
|
1270
|
+
static void pg_notify(grpc_exec_ctx *exec_ctx, polling_obj *a, polling_obj *b) {
|
1271
|
+
if (a->type == PO_FD && b->type == PO_POLLSET) {
|
1272
|
+
pollset_add_fd_locked(exec_ctx, (grpc_pollset *)b, (grpc_fd *)a, true);
|
1273
|
+
} else if (a->type == PO_POLLSET && b->type == PO_FD) {
|
1274
|
+
pollset_add_fd_locked(exec_ctx, (grpc_pollset *)a, (grpc_fd *)b, true);
|
1275
|
+
}
|
1276
|
+
}
|
1277
|
+
|
1278
|
+
static void pg_broadcast(grpc_exec_ctx *exec_ctx, polling_group *from,
|
1279
|
+
polling_group *to) {
|
1280
|
+
for (polling_obj *a = from->po.next; a != &from->po; a = a->next) {
|
1281
|
+
for (polling_obj *b = to->po.next; b != &to->po; b = b->next) {
|
1282
|
+
if (po_cmp(a, b) < 0) {
|
1283
|
+
gpr_mu_lock(&a->mu);
|
1284
|
+
gpr_mu_lock(&b->mu);
|
1285
|
+
} else {
|
1286
|
+
GPR_ASSERT(po_cmp(a, b) != 0);
|
1287
|
+
gpr_mu_lock(&b->mu);
|
1288
|
+
gpr_mu_lock(&a->mu);
|
1289
|
+
}
|
1290
|
+
pg_notify(exec_ctx, a, b);
|
1291
|
+
gpr_mu_unlock(&a->mu);
|
1292
|
+
gpr_mu_unlock(&b->mu);
|
1293
|
+
}
|
1294
|
+
}
|
1295
|
+
}
|
1296
|
+
|
1297
|
+
static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po,
|
1298
|
+
size_t initial_po_count) {
|
1299
|
+
/* assumes all polling objects in initial_po are locked */
|
1300
|
+
polling_group *pg = gpr_malloc(sizeof(*pg));
|
1301
|
+
po_init(&pg->po, PO_POLLING_GROUP);
|
1302
|
+
gpr_ref_init(&pg->refs, (int)initial_po_count);
|
1303
|
+
for (size_t i = 0; i < initial_po_count; i++) {
|
1304
|
+
GPR_ASSERT(initial_po[i]->group == NULL);
|
1305
|
+
initial_po[i]->group = pg;
|
1306
|
+
}
|
1307
|
+
for (size_t i = 1; i < initial_po_count; i++) {
|
1308
|
+
initial_po[i]->prev = initial_po[i - 1];
|
1309
|
+
}
|
1310
|
+
for (size_t i = 0; i < initial_po_count - 1; i++) {
|
1311
|
+
initial_po[i]->next = initial_po[i + 1];
|
1312
|
+
}
|
1313
|
+
initial_po[0]->prev = &pg->po;
|
1314
|
+
initial_po[initial_po_count - 1]->next = &pg->po;
|
1315
|
+
pg->po.next = initial_po[0];
|
1316
|
+
pg->po.prev = initial_po[initial_po_count - 1];
|
1317
|
+
for (size_t i = 1; i < initial_po_count; i++) {
|
1318
|
+
for (size_t j = 0; j < i; j++) {
|
1319
|
+
pg_notify(exec_ctx, initial_po[i], initial_po[j]);
|
1320
|
+
}
|
1321
|
+
}
|
1322
|
+
}
|
1323
|
+
|
1324
|
+
static void pg_join(grpc_exec_ctx *exec_ctx, polling_group *pg,
|
1325
|
+
polling_obj *po) {
|
1326
|
+
/* assumes neither pg nor po are locked; consumes one ref to pg */
|
1327
|
+
pg = pg_lock_latest(pg);
|
1328
|
+
/* pg locked */
|
1329
|
+
for (polling_obj *existing = pg->po.next /* skip pg - it's just a stub */;
|
1330
|
+
existing != &pg->po; existing = existing->next) {
|
1331
|
+
if (po_cmp(po, existing) < 0) {
|
1332
|
+
gpr_mu_lock(&po->mu);
|
1333
|
+
gpr_mu_lock(&existing->mu);
|
1334
|
+
} else {
|
1335
|
+
GPR_ASSERT(po_cmp(po, existing) != 0);
|
1336
|
+
gpr_mu_lock(&existing->mu);
|
1337
|
+
gpr_mu_lock(&po->mu);
|
1338
|
+
}
|
1339
|
+
/* pg, po, existing locked */
|
1340
|
+
if (po->group != NULL) {
|
1341
|
+
gpr_mu_unlock(&pg->po.mu);
|
1342
|
+
polling_group *po_group = pg_ref(po->group);
|
1343
|
+
gpr_mu_unlock(&po->mu);
|
1344
|
+
gpr_mu_unlock(&existing->mu);
|
1345
|
+
pg_merge(exec_ctx, pg, po_group);
|
1346
|
+
/* early exit: polling obj picked up a group during joining: we needed
|
1347
|
+
to do a full merge */
|
1348
|
+
return;
|
1349
|
+
}
|
1350
|
+
pg_notify(exec_ctx, po, existing);
|
1351
|
+
gpr_mu_unlock(&po->mu);
|
1352
|
+
gpr_mu_unlock(&existing->mu);
|
1353
|
+
}
|
1354
|
+
gpr_mu_lock(&po->mu);
|
1355
|
+
if (po->group != NULL) {
|
1356
|
+
gpr_mu_unlock(&pg->po.mu);
|
1357
|
+
polling_group *po_group = pg_ref(po->group);
|
1358
|
+
gpr_mu_unlock(&po->mu);
|
1359
|
+
pg_merge(exec_ctx, pg, po_group);
|
1360
|
+
/* early exit: polling obj picked up a group during joining: we needed
|
1361
|
+
to do a full merge */
|
1362
|
+
return;
|
1363
|
+
}
|
1364
|
+
po->group = pg;
|
1365
|
+
po->next = &pg->po;
|
1366
|
+
po->prev = pg->po.prev;
|
1367
|
+
po->prev->next = po->next->prev = po;
|
1368
|
+
gpr_mu_unlock(&pg->po.mu);
|
1369
|
+
gpr_mu_unlock(&po->mu);
|
1370
|
+
}
|
1371
|
+
|
1372
|
+
static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a,
|
1373
|
+
polling_group *b) {
|
1374
|
+
for (;;) {
|
1375
|
+
if (a == b) {
|
1376
|
+
pg_unref(a);
|
1377
|
+
pg_unref(b);
|
1378
|
+
return;
|
1379
|
+
}
|
1380
|
+
if (a > b) GPR_SWAP(polling_group *, a, b);
|
1381
|
+
gpr_mu_lock(&a->po.mu);
|
1382
|
+
gpr_mu_lock(&b->po.mu);
|
1383
|
+
if (a->po.group != NULL) {
|
1384
|
+
polling_group *m2 = pg_ref(a->po.group);
|
1385
|
+
gpr_mu_unlock(&a->po.mu);
|
1386
|
+
gpr_mu_unlock(&b->po.mu);
|
1387
|
+
pg_unref(a);
|
1388
|
+
a = m2;
|
1389
|
+
} else if (b->po.group != NULL) {
|
1390
|
+
polling_group *m2 = pg_ref(b->po.group);
|
1391
|
+
gpr_mu_unlock(&a->po.mu);
|
1392
|
+
gpr_mu_unlock(&b->po.mu);
|
1393
|
+
pg_unref(b);
|
1394
|
+
b = m2;
|
1395
|
+
} else {
|
1396
|
+
break;
|
1397
|
+
}
|
1398
|
+
}
|
1399
|
+
polling_group **unref = NULL;
|
1400
|
+
size_t unref_count = 0;
|
1401
|
+
size_t unref_cap = 0;
|
1402
|
+
b->po.group = a;
|
1403
|
+
pg_broadcast(exec_ctx, a, b);
|
1404
|
+
pg_broadcast(exec_ctx, b, a);
|
1405
|
+
while (b->po.next != &b->po) {
|
1406
|
+
polling_obj *po = b->po.next;
|
1407
|
+
gpr_mu_lock(&po->mu);
|
1408
|
+
if (unref_count == unref_cap) {
|
1409
|
+
unref_cap = GPR_MAX(8, 3 * unref_cap / 2);
|
1410
|
+
unref = gpr_realloc(unref, unref_cap * sizeof(*unref));
|
1411
|
+
}
|
1412
|
+
unref[unref_count++] = po->group;
|
1413
|
+
po->group = pg_ref(a);
|
1414
|
+
// unlink from b
|
1415
|
+
po->prev->next = po->next;
|
1416
|
+
po->next->prev = po->prev;
|
1417
|
+
// link to a
|
1418
|
+
po->next = &a->po;
|
1419
|
+
po->prev = a->po.prev;
|
1420
|
+
po->next->prev = po->prev->next = po;
|
1421
|
+
gpr_mu_unlock(&po->mu);
|
1422
|
+
}
|
1423
|
+
gpr_mu_unlock(&a->po.mu);
|
1424
|
+
gpr_mu_unlock(&b->po.mu);
|
1425
|
+
for (size_t i = 0; i < unref_count; i++) {
|
1426
|
+
pg_unref(unref[i]);
|
1427
|
+
}
|
1428
|
+
gpr_free(unref);
|
1429
|
+
pg_unref(b);
|
1430
|
+
}
|
1431
|
+
|
1432
|
+
/*******************************************************************************
|
1433
|
+
* Event engine binding
|
1434
|
+
*/
|
1435
|
+
|
1436
|
+
static void shutdown_engine(void) {
|
1437
|
+
fd_global_shutdown();
|
1438
|
+
pollset_global_shutdown();
|
1439
|
+
}
|
1440
|
+
|
1441
|
+
static const grpc_event_engine_vtable vtable = {
|
1442
|
+
.pollset_size = sizeof(grpc_pollset),
|
1443
|
+
|
1444
|
+
.fd_create = fd_create,
|
1445
|
+
.fd_wrapped_fd = fd_wrapped_fd,
|
1446
|
+
.fd_orphan = fd_orphan,
|
1447
|
+
.fd_shutdown = fd_shutdown,
|
1448
|
+
.fd_is_shutdown = fd_is_shutdown,
|
1449
|
+
.fd_notify_on_read = fd_notify_on_read,
|
1450
|
+
.fd_notify_on_write = fd_notify_on_write,
|
1451
|
+
.fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
|
1452
|
+
.fd_get_workqueue = fd_get_workqueue,
|
1453
|
+
|
1454
|
+
.pollset_init = pollset_init,
|
1455
|
+
.pollset_shutdown = pollset_shutdown,
|
1456
|
+
.pollset_destroy = pollset_destroy,
|
1457
|
+
.pollset_work = pollset_work,
|
1458
|
+
.pollset_kick = pollset_kick,
|
1459
|
+
.pollset_add_fd = pollset_add_fd,
|
1460
|
+
|
1461
|
+
.pollset_set_create = pollset_set_create,
|
1462
|
+
.pollset_set_destroy = pollset_set_destroy,
|
1463
|
+
.pollset_set_add_pollset = pollset_set_add_pollset,
|
1464
|
+
.pollset_set_del_pollset = pollset_set_del_pollset,
|
1465
|
+
.pollset_set_add_pollset_set = pollset_set_add_pollset_set,
|
1466
|
+
.pollset_set_del_pollset_set = pollset_set_del_pollset_set,
|
1467
|
+
.pollset_set_add_fd = pollset_set_add_fd,
|
1468
|
+
.pollset_set_del_fd = pollset_set_del_fd,
|
1469
|
+
|
1470
|
+
.workqueue_ref = workqueue_ref,
|
1471
|
+
.workqueue_unref = workqueue_unref,
|
1472
|
+
.workqueue_scheduler = workqueue_scheduler,
|
1473
|
+
|
1474
|
+
.shutdown_engine = shutdown_engine,
|
1475
|
+
};
|
1476
|
+
|
1477
|
+
const grpc_event_engine_vtable *grpc_init_epollex_linux(
|
1478
|
+
bool explicitly_requested) {
|
1479
|
+
if (!explicitly_requested) return NULL;
|
1480
|
+
|
1481
|
+
if (!grpc_has_wakeup_fd()) {
|
1482
|
+
return NULL;
|
1483
|
+
}
|
1484
|
+
|
1485
|
+
if (!grpc_is_epollexclusive_available()) {
|
1486
|
+
return NULL;
|
1487
|
+
}
|
1488
|
+
|
1489
|
+
fd_global_init();
|
1490
|
+
|
1491
|
+
if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
|
1492
|
+
pollset_global_shutdown();
|
1493
|
+
fd_global_shutdown();
|
1494
|
+
return NULL;
|
1495
|
+
}
|
1496
|
+
|
1497
|
+
return &vtable;
|
1498
|
+
}
|
1499
|
+
|
1500
|
+
#else /* defined(GRPC_LINUX_EPOLL) */
|
1501
|
+
#if defined(GRPC_POSIX_SOCKET)
|
1502
|
+
#include "src/core/lib/iomgr/ev_posix.h"
|
1503
|
+
/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
|
1504
|
+
* NULL */
|
1505
|
+
const grpc_event_engine_vtable *grpc_init_epollex_linux(
|
1506
|
+
bool explicitly_requested) {
|
1507
|
+
return NULL;
|
1508
|
+
}
|
1509
|
+
#endif /* defined(GRPC_POSIX_SOCKET) */
|
1510
|
+
|
1511
|
+
#endif /* !defined(GRPC_LINUX_EPOLL) */
|