grpc 1.50.0-x86_64-linux → 1.51.0-x86_64-linux
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +131 -42
- data/include/grpc/event_engine/event_engine.h +10 -3
- data/include/grpc/event_engine/slice_buffer.h +17 -0
- data/include/grpc/grpc.h +0 -10
- data/include/grpc/impl/codegen/grpc_types.h +1 -5
- data/include/grpc/impl/codegen/port_platform.h +0 -3
- data/src/core/ext/filters/channel_idle/channel_idle_filter.cc +19 -13
- data/src/core/ext/filters/channel_idle/channel_idle_filter.h +1 -0
- data/src/core/ext/filters/client_channel/backup_poller.cc +3 -3
- data/src/core/ext/filters/client_channel/channel_connectivity.cc +7 -5
- data/src/core/ext/filters/client_channel/client_channel.cc +120 -140
- data/src/core/ext/filters/client_channel/client_channel.h +3 -4
- data/src/core/ext/filters/client_channel/client_channel_channelz.cc +0 -2
- data/src/core/ext/filters/client_channel/client_channel_plugin.cc +1 -1
- data/src/core/ext/filters/client_channel/client_channel_service_config.cc +153 -0
- data/src/core/ext/filters/client_channel/{resolver_result_parsing.h → client_channel_service_config.h} +26 -23
- data/src/core/ext/filters/client_channel/connector.h +1 -1
- data/src/core/ext/filters/client_channel/dynamic_filters.cc +20 -47
- data/src/core/ext/filters/client_channel/dynamic_filters.h +7 -8
- data/src/core/ext/filters/client_channel/health/health_check_client.cc +3 -4
- data/src/core/ext/filters/client_channel/http_proxy.cc +0 -1
- data/src/core/ext/filters/client_channel/lb_policy/address_filtering.cc +3 -4
- data/src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc +5 -0
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc +8 -7
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +35 -44
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc +0 -1
- data/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc +1 -3
- data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc +3 -4
- data/src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.h +1 -1
- data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc +41 -29
- data/src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.h +2 -2
- data/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +9 -11
- data/src/core/ext/filters/client_channel/lb_policy/priority/priority.cc +15 -12
- data/src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc +8 -10
- data/src/core/ext/filters/client_channel/lb_policy/rls/rls.cc +26 -27
- data/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +7 -9
- data/src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc +44 -26
- data/src/core/ext/filters/client_channel/lb_policy/xds/cds.cc +17 -27
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_attributes.cc +42 -0
- data/src/core/ext/filters/client_channel/lb_policy/xds/{xds.h → xds_attributes.h} +15 -17
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc +13 -7
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc +48 -47
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc +40 -126
- data/src/core/ext/filters/client_channel/lb_policy/xds/xds_wrr_locality.cc +364 -0
- data/src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc +9 -9
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc +23 -32
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc +1 -2
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +22 -23
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc +50 -52
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +1 -1
- data/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc +2 -4
- data/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc +1 -3
- data/src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc +34 -26
- data/src/core/ext/filters/client_channel/resolver/polling_resolver.cc +3 -4
- data/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc +4 -7
- data/src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc +63 -46
- data/src/core/ext/filters/client_channel/retry_filter.cc +80 -102
- data/src/core/ext/filters/client_channel/retry_service_config.cc +192 -234
- data/src/core/ext/filters/client_channel/retry_service_config.h +20 -23
- data/src/core/ext/filters/client_channel/retry_throttle.cc +8 -8
- data/src/core/ext/filters/client_channel/retry_throttle.h +8 -7
- data/src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc +2 -2
- data/src/core/ext/filters/client_channel/subchannel.cc +21 -25
- data/src/core/ext/filters/client_channel/subchannel.h +2 -2
- data/src/core/ext/filters/client_channel/subchannel_stream_client.cc +11 -12
- data/src/core/ext/filters/deadline/deadline_filter.cc +13 -14
- data/src/core/ext/filters/fault_injection/fault_injection_filter.cc +1 -1
- data/src/core/ext/filters/fault_injection/fault_injection_filter.h +0 -4
- data/src/core/ext/filters/fault_injection/fault_injection_service_config_parser.cc +118 -0
- data/src/core/ext/filters/fault_injection/{service_config_parser.h → fault_injection_service_config_parser.h} +20 -12
- data/src/core/ext/filters/http/client/http_client_filter.cc +16 -16
- data/src/core/ext/filters/http/client_authority_filter.cc +1 -1
- data/src/core/ext/filters/http/message_compress/message_compress_filter.cc +13 -13
- data/src/core/ext/filters/http/message_compress/message_decompress_filter.cc +34 -34
- data/src/core/ext/filters/http/server/http_server_filter.cc +26 -25
- data/src/core/ext/filters/message_size/message_size_filter.cc +86 -117
- data/src/core/ext/filters/message_size/message_size_filter.h +22 -15
- data/src/core/ext/filters/rbac/rbac_filter.cc +12 -12
- data/src/core/ext/filters/rbac/rbac_service_config_parser.cc +728 -530
- data/src/core/ext/filters/rbac/rbac_service_config_parser.h +4 -3
- data/src/core/ext/filters/server_config_selector/server_config_selector.h +1 -1
- data/src/core/ext/filters/server_config_selector/server_config_selector_filter.cc +6 -7
- data/src/core/ext/transport/chttp2/client/chttp2_connector.cc +17 -21
- data/src/core/ext/transport/chttp2/server/chttp2_server.cc +57 -72
- data/src/core/ext/transport/chttp2/transport/bin_decoder.cc +5 -5
- data/src/core/ext/transport/chttp2/transport/bin_encoder.h +1 -1
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +212 -253
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +42 -11
- data/src/core/ext/transport/chttp2/transport/flow_control.h +4 -3
- data/src/core/ext/transport/chttp2/transport/frame_data.cc +16 -15
- data/src/core/ext/transport/chttp2/transport/frame_data.h +1 -1
- data/src/core/ext/transport/chttp2/transport/frame_goaway.cc +13 -13
- data/src/core/ext/transport/chttp2/transport/frame_ping.cc +4 -3
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc +10 -7
- data/src/core/ext/transport/chttp2/transport/frame_settings.cc +15 -17
- data/src/core/ext/transport/chttp2/transport/frame_window_update.cc +5 -4
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.cc +5 -6
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +1 -1
- data/src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc +2 -1
- data/src/core/ext/transport/chttp2/transport/hpack_parser.cc +31 -39
- data/src/core/ext/transport/chttp2/transport/hpack_parser_table.cc +7 -6
- data/src/core/ext/transport/chttp2/transport/internal.h +24 -8
- data/src/core/ext/transport/chttp2/transport/parsing.cc +51 -52
- data/src/core/ext/transport/chttp2/transport/varint.cc +2 -3
- data/src/core/ext/transport/chttp2/transport/varint.h +11 -8
- data/src/core/ext/transport/chttp2/transport/writing.cc +16 -16
- data/src/core/ext/transport/inproc/inproc_transport.cc +97 -115
- data/src/core/ext/xds/certificate_provider_store.cc +4 -4
- data/src/core/ext/xds/file_watcher_certificate_provider_factory.cc +4 -7
- data/src/core/ext/xds/xds_api.cc +15 -68
- data/src/core/ext/xds/xds_api.h +3 -7
- data/src/core/ext/xds/xds_bootstrap.h +0 -1
- data/src/core/ext/xds/xds_bootstrap_grpc.cc +3 -12
- data/src/core/ext/xds/xds_bootstrap_grpc.h +16 -1
- data/src/core/ext/xds/xds_certificate_provider.cc +22 -25
- data/src/core/ext/xds/xds_channel_stack_modifier.cc +0 -1
- data/src/core/ext/xds/xds_client.cc +122 -90
- data/src/core/ext/xds/xds_client.h +7 -2
- data/src/core/ext/xds/xds_client_grpc.cc +5 -24
- data/src/core/ext/xds/xds_cluster.cc +291 -183
- data/src/core/ext/xds/xds_cluster.h +11 -15
- data/src/core/ext/xds/xds_cluster_specifier_plugin.cc +32 -29
- data/src/core/ext/xds/xds_cluster_specifier_plugin.h +35 -16
- data/src/core/ext/xds/xds_common_types.cc +208 -141
- data/src/core/ext/xds/xds_common_types.h +19 -13
- data/src/core/ext/xds/xds_endpoint.cc +214 -129
- data/src/core/ext/xds/xds_endpoint.h +4 -7
- data/src/core/ext/xds/xds_http_fault_filter.cc +56 -43
- data/src/core/ext/xds/xds_http_fault_filter.h +13 -21
- data/src/core/ext/xds/xds_http_filters.cc +60 -73
- data/src/core/ext/xds/xds_http_filters.h +67 -19
- data/src/core/ext/xds/xds_http_rbac_filter.cc +152 -207
- data/src/core/ext/xds/xds_http_rbac_filter.h +12 -15
- data/src/core/ext/xds/xds_lb_policy_registry.cc +122 -169
- data/src/core/ext/xds/xds_lb_policy_registry.h +10 -11
- data/src/core/ext/xds/xds_listener.cc +459 -417
- data/src/core/ext/xds/xds_listener.h +43 -47
- data/src/core/ext/xds/xds_resource_type.h +3 -11
- data/src/core/ext/xds/xds_resource_type_impl.h +8 -13
- data/src/core/ext/xds/xds_route_config.cc +94 -80
- data/src/core/ext/xds/xds_route_config.h +10 -10
- data/src/core/ext/xds/xds_routing.cc +2 -1
- data/src/core/ext/xds/xds_routing.h +2 -0
- data/src/core/ext/xds/xds_server_config_fetcher.cc +109 -94
- data/src/core/ext/xds/xds_transport_grpc.cc +4 -5
- data/src/core/lib/address_utils/parse_address.cc +11 -10
- data/src/core/lib/channel/channel_args.h +16 -1
- data/src/core/lib/channel/channel_stack.cc +23 -20
- data/src/core/lib/channel/channel_stack.h +17 -4
- data/src/core/lib/channel/channel_stack_builder.cc +4 -7
- data/src/core/lib/channel/channel_stack_builder.h +14 -6
- data/src/core/lib/channel/channel_stack_builder_impl.cc +25 -7
- data/src/core/lib/channel/channel_stack_builder_impl.h +2 -0
- data/src/core/lib/channel/channel_trace.cc +4 -5
- data/src/core/lib/channel/channelz.cc +1 -1
- data/src/core/lib/channel/connected_channel.cc +695 -35
- data/src/core/lib/channel/connected_channel.h +0 -4
- data/src/core/lib/channel/promise_based_filter.cc +1004 -140
- data/src/core/lib/channel/promise_based_filter.h +364 -87
- data/src/core/lib/compression/message_compress.cc +5 -5
- data/src/core/lib/debug/event_log.cc +88 -0
- data/src/core/lib/debug/event_log.h +81 -0
- data/src/core/lib/debug/histogram_view.cc +69 -0
- data/src/core/lib/{slice/slice_refcount.cc → debug/histogram_view.h} +15 -13
- data/src/core/lib/debug/stats.cc +22 -119
- data/src/core/lib/debug/stats.h +29 -35
- data/src/core/lib/debug/stats_data.cc +224 -73
- data/src/core/lib/debug/stats_data.h +263 -122
- data/src/core/lib/event_engine/common_closures.h +71 -0
- data/src/core/lib/event_engine/default_event_engine.cc +38 -15
- data/src/core/lib/event_engine/default_event_engine.h +15 -3
- data/src/core/lib/event_engine/default_event_engine_factory.cc +2 -4
- data/src/core/lib/event_engine/memory_allocator.cc +1 -1
- data/src/core/lib/event_engine/poller.h +10 -4
- data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc +618 -0
- data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.h +129 -0
- data/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc +901 -0
- data/src/core/lib/event_engine/posix_engine/ev_poll_posix.h +97 -0
- data/src/core/lib/event_engine/posix_engine/event_poller.h +111 -0
- data/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc +74 -0
- data/src/core/lib/event_engine/{executor/threaded_executor.cc → posix_engine/event_poller_posix_default.h} +13 -16
- data/src/core/lib/event_engine/posix_engine/internal_errqueue.cc +77 -0
- data/src/core/lib/event_engine/posix_engine/internal_errqueue.h +179 -0
- data/src/core/lib/event_engine/posix_engine/lockfree_event.cc +267 -0
- data/src/core/lib/event_engine/posix_engine/lockfree_event.h +73 -0
- data/src/core/lib/event_engine/posix_engine/posix_endpoint.cc +1270 -0
- data/src/core/lib/event_engine/posix_engine/posix_endpoint.h +682 -0
- data/src/core/lib/event_engine/posix_engine/posix_engine.cc +453 -18
- data/src/core/lib/event_engine/posix_engine/posix_engine.h +148 -24
- data/src/core/lib/event_engine/posix_engine/posix_engine_closure.h +80 -0
- data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc +1081 -0
- data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.h +361 -0
- data/src/core/lib/event_engine/posix_engine/timer.h +9 -8
- data/src/core/lib/event_engine/posix_engine/timer_manager.cc +57 -194
- data/src/core/lib/event_engine/posix_engine/timer_manager.h +21 -49
- data/src/core/lib/event_engine/posix_engine/traced_buffer_list.cc +301 -0
- data/src/core/lib/event_engine/posix_engine/traced_buffer_list.h +179 -0
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.cc +126 -0
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_eventfd.h +45 -0
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.cc +151 -0
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_pipe.h +45 -0
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix.h +76 -0
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.cc +67 -0
- data/src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.h +37 -0
- data/src/core/lib/event_engine/slice.cc +7 -6
- data/src/core/lib/event_engine/slice_buffer.cc +2 -2
- data/src/core/lib/event_engine/thread_pool.cc +106 -25
- data/src/core/lib/event_engine/thread_pool.h +32 -9
- data/src/core/lib/event_engine/windows/win_socket.cc +7 -7
- data/src/core/lib/event_engine/windows/windows_engine.cc +18 -12
- data/src/core/lib/event_engine/windows/windows_engine.h +8 -4
- data/src/core/lib/experiments/config.cc +1 -1
- data/src/core/lib/experiments/experiments.cc +13 -2
- data/src/core/lib/experiments/experiments.h +8 -1
- data/src/core/lib/gpr/cpu_linux.cc +6 -2
- data/src/core/lib/gpr/log_linux.cc +3 -4
- data/src/core/lib/gpr/string.h +1 -1
- data/src/core/lib/gpr/tmpfile_posix.cc +3 -2
- data/src/core/lib/gprpp/load_file.cc +75 -0
- data/src/core/lib/gprpp/load_file.h +33 -0
- data/src/core/lib/gprpp/per_cpu.h +46 -0
- data/src/core/lib/gprpp/stat_posix.cc +5 -4
- data/src/core/lib/gprpp/stat_windows.cc +3 -2
- data/src/core/lib/gprpp/status_helper.h +1 -3
- data/src/core/lib/gprpp/strerror.cc +41 -0
- data/src/core/{ext/xds/xds_resource_type.cc → lib/gprpp/strerror.h} +9 -13
- data/src/core/lib/gprpp/thd_windows.cc +1 -2
- data/src/core/lib/gprpp/time.cc +3 -4
- data/src/core/lib/gprpp/time.h +13 -2
- data/src/core/lib/gprpp/validation_errors.h +18 -1
- data/src/core/lib/http/httpcli.cc +40 -44
- data/src/core/lib/http/httpcli.h +6 -5
- data/src/core/lib/http/httpcli_security_connector.cc +4 -6
- data/src/core/lib/http/parser.cc +54 -65
- data/src/core/lib/iomgr/buffer_list.cc +105 -116
- data/src/core/lib/iomgr/buffer_list.h +60 -44
- data/src/core/lib/iomgr/call_combiner.cc +11 -10
- data/src/core/lib/iomgr/call_combiner.h +3 -4
- data/src/core/lib/iomgr/cfstream_handle.cc +13 -16
- data/src/core/lib/iomgr/closure.h +49 -5
- data/src/core/lib/iomgr/combiner.cc +2 -2
- data/src/core/lib/iomgr/endpoint.h +1 -1
- data/src/core/lib/iomgr/endpoint_cfstream.cc +26 -25
- data/src/core/lib/iomgr/endpoint_pair_posix.cc +2 -2
- data/src/core/lib/iomgr/error.cc +27 -42
- data/src/core/lib/iomgr/error.h +22 -152
- data/src/core/lib/iomgr/ev_apple.cc +4 -4
- data/src/core/lib/iomgr/ev_epoll1_linux.cc +26 -25
- data/src/core/lib/iomgr/ev_poll_posix.cc +27 -31
- data/src/core/lib/iomgr/exec_ctx.cc +3 -4
- data/src/core/lib/iomgr/exec_ctx.h +2 -3
- data/src/core/lib/iomgr/executor.cc +1 -2
- data/src/core/lib/iomgr/internal_errqueue.cc +3 -1
- data/src/core/lib/iomgr/iocp_windows.cc +1 -0
- data/src/core/lib/iomgr/iomgr_posix.cc +2 -2
- data/src/core/lib/iomgr/iomgr_posix_cfstream.cc +2 -1
- data/src/core/lib/iomgr/iomgr_windows.cc +2 -1
- data/src/core/lib/iomgr/load_file.cc +5 -9
- data/src/core/lib/iomgr/lockfree_event.cc +10 -10
- data/src/core/lib/iomgr/pollset_windows.cc +4 -4
- data/src/core/lib/iomgr/python_util.h +2 -2
- data/src/core/lib/iomgr/resolve_address.cc +8 -3
- data/src/core/lib/iomgr/resolve_address.h +3 -4
- data/src/core/lib/iomgr/resolve_address_impl.h +1 -1
- data/src/core/lib/iomgr/resolve_address_posix.cc +14 -25
- data/src/core/lib/iomgr/resolve_address_posix.h +1 -2
- data/src/core/lib/iomgr/resolve_address_windows.cc +14 -17
- data/src/core/lib/iomgr/resolve_address_windows.h +1 -2
- data/src/core/lib/iomgr/socket_utils_common_posix.cc +30 -29
- data/src/core/lib/iomgr/socket_utils_posix.cc +1 -0
- data/src/core/lib/iomgr/socket_utils_posix.h +2 -2
- data/src/core/lib/iomgr/socket_windows.cc +2 -2
- data/src/core/lib/iomgr/tcp_client_cfstream.cc +6 -10
- data/src/core/lib/iomgr/tcp_client_posix.cc +31 -35
- data/src/core/lib/iomgr/tcp_client_windows.cc +8 -12
- data/src/core/lib/iomgr/tcp_posix.cc +92 -108
- data/src/core/lib/iomgr/tcp_server_posix.cc +34 -34
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +1 -1
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.cc +18 -21
- data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc +12 -13
- data/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc +1 -1
- data/src/core/lib/iomgr/tcp_server_windows.cc +26 -29
- data/src/core/lib/iomgr/tcp_windows.cc +27 -34
- data/src/core/lib/iomgr/timer.h +8 -8
- data/src/core/lib/iomgr/timer_generic.cc +9 -15
- data/src/core/lib/iomgr/unix_sockets_posix.cc +2 -4
- data/src/core/lib/iomgr/wakeup_fd_eventfd.cc +4 -3
- data/src/core/lib/iomgr/wakeup_fd_pipe.cc +10 -8
- data/src/core/lib/json/json_channel_args.h +42 -0
- data/src/core/lib/json/json_object_loader.cc +7 -2
- data/src/core/lib/json/json_object_loader.h +22 -0
- data/src/core/lib/json/json_util.cc +5 -5
- data/src/core/lib/json/json_util.h +4 -4
- data/src/core/lib/load_balancing/lb_policy.cc +1 -1
- data/src/core/lib/load_balancing/lb_policy.h +4 -0
- data/src/core/lib/load_balancing/subchannel_interface.h +0 -7
- data/src/core/lib/matchers/matchers.cc +3 -4
- data/src/core/lib/promise/activity.cc +16 -2
- data/src/core/lib/promise/activity.h +38 -15
- data/src/core/lib/promise/arena_promise.h +80 -51
- data/src/core/lib/promise/context.h +13 -6
- data/src/core/lib/promise/detail/basic_seq.h +9 -28
- data/src/core/lib/promise/detail/promise_factory.h +58 -10
- data/src/core/lib/promise/detail/status.h +28 -0
- data/src/core/lib/promise/detail/switch.h +1455 -0
- data/src/core/lib/promise/exec_ctx_wakeup_scheduler.h +3 -1
- data/src/core/lib/promise/for_each.h +129 -0
- data/src/core/lib/promise/loop.h +7 -5
- data/src/core/lib/promise/map_pipe.h +87 -0
- data/src/core/lib/promise/pipe.cc +19 -0
- data/src/core/lib/promise/pipe.h +505 -0
- data/src/core/lib/promise/poll.h +13 -0
- data/src/core/lib/promise/seq.h +3 -5
- data/src/core/lib/promise/sleep.cc +5 -4
- data/src/core/lib/promise/sleep.h +1 -2
- data/src/core/lib/promise/try_concurrently.h +341 -0
- data/src/core/lib/promise/try_seq.h +10 -13
- data/src/core/lib/resolver/server_address.cc +1 -0
- data/src/core/lib/resolver/server_address.h +1 -3
- data/src/core/lib/resource_quota/api.cc +0 -1
- data/src/core/lib/resource_quota/arena.cc +19 -0
- data/src/core/lib/resource_quota/arena.h +89 -0
- data/src/core/lib/resource_quota/memory_quota.cc +1 -0
- data/src/core/lib/security/authorization/grpc_authorization_engine.cc +1 -3
- data/src/core/lib/security/authorization/grpc_server_authz_filter.cc +4 -2
- data/src/core/lib/security/authorization/matchers.cc +25 -22
- data/src/core/lib/security/authorization/rbac_policy.cc +2 -3
- data/src/core/lib/security/context/security_context.h +10 -0
- data/src/core/lib/security/credentials/channel_creds_registry_init.cc +3 -4
- data/src/core/lib/security/credentials/composite/composite_credentials.cc +1 -1
- data/src/core/lib/security/credentials/external/aws_external_account_credentials.cc +77 -55
- data/src/core/lib/security/credentials/external/aws_request_signer.cc +4 -3
- data/src/core/lib/security/credentials/external/external_account_credentials.cc +40 -51
- data/src/core/lib/security/credentials/external/file_external_account_credentials.cc +17 -21
- data/src/core/lib/security/credentials/external/url_external_account_credentials.cc +21 -25
- data/src/core/lib/security/credentials/fake/fake_credentials.cc +1 -0
- data/src/core/lib/security/credentials/google_default/google_default_credentials.cc +27 -24
- data/src/core/lib/security/credentials/iam/iam_credentials.cc +1 -0
- data/src/core/lib/security/credentials/jwt/json_token.cc +1 -2
- data/src/core/lib/security/credentials/jwt/jwt_credentials.cc +1 -1
- data/src/core/lib/security/credentials/jwt/jwt_verifier.cc +5 -5
- data/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc +24 -30
- data/src/core/lib/security/credentials/plugin/plugin_credentials.cc +6 -5
- data/src/core/lib/security/credentials/plugin/plugin_credentials.h +3 -3
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc +19 -27
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.h +4 -11
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc +29 -41
- data/src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc +1 -1
- data/src/core/lib/security/security_connector/alts/alts_security_connector.cc +6 -11
- data/src/core/lib/security/security_connector/fake/fake_security_connector.cc +8 -15
- data/src/core/lib/security/security_connector/insecure/insecure_security_connector.cc +2 -2
- data/src/core/lib/security/security_connector/insecure/insecure_security_connector.h +2 -6
- data/src/core/lib/security/security_connector/load_system_roots_supported.cc +1 -4
- data/src/core/lib/security/security_connector/local/local_security_connector.cc +7 -11
- data/src/core/lib/security/security_connector/ssl/ssl_security_connector.cc +9 -14
- data/src/core/lib/security/security_connector/ssl_utils.cc +5 -7
- data/src/core/lib/security/security_connector/tls/tls_security_connector.cc +21 -27
- data/src/core/lib/security/transport/client_auth_filter.cc +1 -1
- data/src/core/lib/security/transport/secure_endpoint.cc +26 -28
- data/src/core/lib/security/transport/security_handshaker.cc +53 -53
- data/src/core/lib/security/transport/server_auth_filter.cc +21 -21
- data/src/core/lib/security/transport/tsi_error.cc +6 -3
- data/src/core/lib/security/util/json_util.cc +4 -5
- data/src/core/lib/service_config/service_config.h +1 -1
- data/src/core/lib/service_config/service_config_impl.cc +111 -158
- data/src/core/lib/service_config/service_config_impl.h +14 -17
- data/src/core/lib/service_config/service_config_parser.cc +14 -31
- data/src/core/lib/service_config/service_config_parser.h +14 -10
- data/src/core/lib/slice/b64.cc +2 -2
- data/src/core/lib/slice/slice.cc +7 -1
- data/src/core/lib/slice/slice.h +19 -6
- data/src/core/lib/slice/slice_buffer.cc +13 -14
- data/src/core/lib/slice/slice_internal.h +13 -21
- data/src/core/lib/slice/slice_refcount.h +34 -19
- data/src/core/lib/surface/byte_buffer.cc +3 -4
- data/src/core/lib/surface/byte_buffer_reader.cc +4 -4
- data/src/core/lib/surface/call.cc +1366 -239
- data/src/core/lib/surface/call.h +44 -0
- data/src/core/lib/surface/call_details.cc +3 -3
- data/src/core/lib/surface/call_trace.cc +113 -0
- data/src/core/lib/surface/call_trace.h +30 -0
- data/src/core/lib/surface/channel.cc +44 -49
- data/src/core/lib/surface/channel.h +9 -1
- data/src/core/lib/surface/channel_ping.cc +1 -1
- data/src/core/lib/surface/channel_stack_type.cc +4 -0
- data/src/core/lib/surface/channel_stack_type.h +2 -0
- data/src/core/lib/surface/completion_queue.cc +38 -52
- data/src/core/lib/surface/init.cc +8 -39
- data/src/core/lib/surface/init_internally.h +8 -0
- data/src/core/lib/surface/lame_client.cc +10 -8
- data/src/core/lib/surface/server.cc +48 -70
- data/src/core/lib/surface/server.h +3 -4
- data/src/core/lib/surface/validate_metadata.cc +11 -12
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/connectivity_state.cc +2 -2
- data/src/core/lib/transport/error_utils.cc +34 -28
- data/src/core/lib/transport/error_utils.h +3 -3
- data/src/core/lib/transport/handshaker.cc +14 -14
- data/src/core/lib/transport/handshaker.h +1 -1
- data/src/core/lib/transport/handshaker_factory.h +26 -0
- data/src/core/lib/transport/handshaker_registry.cc +8 -2
- data/src/core/lib/transport/handshaker_registry.h +3 -4
- data/src/core/lib/transport/http_connect_handshaker.cc +23 -24
- data/src/core/lib/transport/metadata_batch.h +17 -1
- data/src/core/lib/transport/parsed_metadata.cc +2 -6
- data/src/core/lib/transport/tcp_connect_handshaker.cc +15 -20
- data/src/core/lib/transport/transport.cc +63 -17
- data/src/core/lib/transport/transport.h +64 -68
- data/src/core/lib/transport/transport_impl.h +1 -1
- data/src/core/lib/transport/transport_op_string.cc +7 -6
- data/src/core/plugin_registry/grpc_plugin_registry.cc +6 -10
- data/src/core/plugin_registry/grpc_plugin_registry_extra.cc +2 -14
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +10 -10
- data/src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc +8 -8
- data/src/core/tsi/alts/handshaker/alts_tsi_utils.cc +2 -1
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc +7 -7
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc +7 -6
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc +1 -1
- data/src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc +5 -5
- data/src/core/tsi/fake_transport_security.cc +3 -3
- data/src/core/tsi/ssl/key_logging/ssl_key_logging.cc +7 -3
- data/src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc +1 -1
- data/src/core/tsi/ssl/session_cache/ssl_session_openssl.cc +6 -2
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +0 -2
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +0 -3
- data/src/ruby/lib/grpc/2.6/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/2.7/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/3.0/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/3.1/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/grpc_c.so +0 -0
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/channel_spec.rb +0 -43
- data/src/ruby/spec/generic/active_call_spec.rb +12 -3
- data/third_party/abseil-cpp/absl/cleanup/cleanup.h +140 -0
- data/third_party/abseil-cpp/absl/cleanup/internal/cleanup.h +100 -0
- data/third_party/zlib/compress.c +3 -3
- data/third_party/zlib/crc32.c +21 -12
- data/third_party/zlib/deflate.c +112 -106
- data/third_party/zlib/deflate.h +2 -2
- data/third_party/zlib/gzlib.c +1 -1
- data/third_party/zlib/gzread.c +3 -5
- data/third_party/zlib/gzwrite.c +1 -1
- data/third_party/zlib/infback.c +10 -7
- data/third_party/zlib/inflate.c +5 -2
- data/third_party/zlib/inftrees.c +2 -2
- data/third_party/zlib/inftrees.h +1 -1
- data/third_party/zlib/trees.c +61 -62
- data/third_party/zlib/uncompr.c +2 -2
- data/third_party/zlib/zconf.h +16 -3
- data/third_party/zlib/zlib.h +10 -10
- data/third_party/zlib/zutil.c +9 -7
- data/third_party/zlib/zutil.h +1 -0
- metadata +55 -18
- data/src/core/ext/filters/client_channel/resolver_result_parsing.cc +0 -188
- data/src/core/ext/filters/fault_injection/service_config_parser.cc +0 -187
- data/src/core/lib/event_engine/executor/threaded_executor.h +0 -44
- data/src/core/lib/gpr/murmur_hash.cc +0 -82
- data/src/core/lib/gpr/murmur_hash.h +0 -29
- data/src/core/lib/gpr/tls.h +0 -156
- data/src/core/lib/promise/call_push_pull.h +0 -148
- data/src/core/lib/slice/slice_api.cc +0 -39
- data/src/core/lib/slice/slice_buffer_api.cc +0 -35
- data/src/core/lib/slice/slice_refcount_base.h +0 -60
@@ -0,0 +1,901 @@
|
|
1
|
+
// Copyright 2022 The gRPC Authors
|
2
|
+
//
|
3
|
+
// Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
// you may not use this file except in compliance with the License.
|
5
|
+
// You may obtain a copy of the License at
|
6
|
+
//
|
7
|
+
// http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
//
|
9
|
+
// Unless required by applicable law or agreed to in writing, software
|
10
|
+
// distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
// See the License for the specific language governing permissions and
|
13
|
+
// limitations under the License.
|
14
|
+
|
15
|
+
#include <grpc/support/port_platform.h>
|
16
|
+
|
17
|
+
#include "src/core/lib/event_engine/posix_engine/ev_poll_posix.h"
|
18
|
+
|
19
|
+
#include <stdint.h>
|
20
|
+
#include <stdlib.h>
|
21
|
+
|
22
|
+
#include <atomic>
|
23
|
+
#include <list>
|
24
|
+
#include <memory>
|
25
|
+
#include <utility>
|
26
|
+
|
27
|
+
#include "absl/container/inlined_vector.h"
|
28
|
+
#include "absl/functional/any_invocable.h"
|
29
|
+
#include "absl/status/status.h"
|
30
|
+
#include "absl/status/statusor.h"
|
31
|
+
|
32
|
+
#include <grpc/event_engine/event_engine.h>
|
33
|
+
#include <grpc/impl/codegen/gpr_types.h>
|
34
|
+
#include <grpc/support/log.h>
|
35
|
+
#include <grpc/support/sync.h>
|
36
|
+
#include <grpc/support/time.h>
|
37
|
+
|
38
|
+
#include "src/core/lib/event_engine/poller.h"
|
39
|
+
#include "src/core/lib/event_engine/posix_engine/event_poller.h"
|
40
|
+
#include "src/core/lib/event_engine/posix_engine/posix_engine_closure.h"
|
41
|
+
#include "src/core/lib/gprpp/memory.h"
|
42
|
+
#include "src/core/lib/iomgr/port.h"
|
43
|
+
|
44
|
+
#ifdef GRPC_POSIX_SOCKET_EV_POLL
|
45
|
+
|
46
|
+
#include <errno.h>
|
47
|
+
#include <limits.h>
|
48
|
+
#include <poll.h>
|
49
|
+
#include <sys/socket.h>
|
50
|
+
#include <unistd.h>
|
51
|
+
|
52
|
+
#include <grpc/support/alloc.h>
|
53
|
+
|
54
|
+
#include "src/core/lib/event_engine/common_closures.h"
|
55
|
+
#include "src/core/lib/event_engine/posix_engine/wakeup_fd_posix.h"
|
56
|
+
#include "src/core/lib/event_engine/posix_engine/wakeup_fd_posix_default.h"
|
57
|
+
#include "src/core/lib/event_engine/time_util.h"
|
58
|
+
#include "src/core/lib/gprpp/fork.h"
|
59
|
+
#include "src/core/lib/gprpp/global_config.h"
|
60
|
+
#include "src/core/lib/gprpp/strerror.h"
|
61
|
+
#include "src/core/lib/gprpp/sync.h"
|
62
|
+
#include "src/core/lib/gprpp/time.h"
|
63
|
+
|
64
|
+
GPR_GLOBAL_CONFIG_DECLARE_STRING(grpc_poll_strategy);
|
65
|
+
|
66
|
+
static const intptr_t kClosureNotReady = 0;
|
67
|
+
static const intptr_t kClosureReady = 1;
|
68
|
+
static const int kPollinCheck = POLLIN | POLLHUP | POLLERR;
|
69
|
+
static const int kPolloutCheck = POLLOUT | POLLHUP | POLLERR;
|
70
|
+
|
71
|
+
namespace grpc_event_engine {
|
72
|
+
namespace posix_engine {
|
73
|
+
|
74
|
+
using ::grpc_event_engine::experimental::AnyInvocableClosure;
|
75
|
+
using ::grpc_event_engine::experimental::EventEngine;
|
76
|
+
using ::grpc_event_engine::experimental::Poller;
|
77
|
+
using ::grpc_event_engine::posix_engine::WakeupFd;
|
78
|
+
using Events = absl::InlinedVector<PollEventHandle*, 5>;
|
79
|
+
|
80
|
+
class PollEventHandle : public EventHandle {
|
81
|
+
public:
|
82
|
+
PollEventHandle(int fd, PollPoller* poller)
|
83
|
+
: fd_(fd),
|
84
|
+
pending_actions_(0),
|
85
|
+
fork_fd_list_(this),
|
86
|
+
poller_handles_list_(this),
|
87
|
+
poller_(poller),
|
88
|
+
scheduler_(poller->GetScheduler()),
|
89
|
+
is_orphaned_(false),
|
90
|
+
is_shutdown_(false),
|
91
|
+
closed_(false),
|
92
|
+
released_(false),
|
93
|
+
pollhup_(false),
|
94
|
+
watch_mask_(-1),
|
95
|
+
shutdown_error_(absl::OkStatus()),
|
96
|
+
exec_actions_closure_([this]() { ExecutePendingActions(); }),
|
97
|
+
on_done_(nullptr),
|
98
|
+
read_closure_(reinterpret_cast<PosixEngineClosure*>(kClosureNotReady)),
|
99
|
+
write_closure_(
|
100
|
+
reinterpret_cast<PosixEngineClosure*>(kClosureNotReady)) {
|
101
|
+
poller_->Ref();
|
102
|
+
grpc_core::MutexLock lock(&poller_->mu_);
|
103
|
+
poller_->PollerHandlesListAddHandle(this);
|
104
|
+
}
|
105
|
+
PollPoller* Poller() override { return poller_; }
|
106
|
+
bool SetPendingActions(bool pending_read, bool pending_write) {
|
107
|
+
pending_actions_ |= pending_read;
|
108
|
+
if (pending_write) {
|
109
|
+
pending_actions_ |= (1 << 2);
|
110
|
+
}
|
111
|
+
if (pending_read || pending_write) {
|
112
|
+
// The closure is going to be executed. We'll Unref this handle in
|
113
|
+
// ExecutePendingActions.
|
114
|
+
Ref();
|
115
|
+
return true;
|
116
|
+
}
|
117
|
+
return false;
|
118
|
+
}
|
119
|
+
void ForceRemoveHandleFromPoller() {
|
120
|
+
grpc_core::MutexLock lock(&poller_->mu_);
|
121
|
+
poller_->PollerHandlesListRemoveHandle(this);
|
122
|
+
}
|
123
|
+
int WrappedFd() override { return fd_; }
|
124
|
+
bool IsOrphaned() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
|
125
|
+
return is_orphaned_;
|
126
|
+
}
|
127
|
+
void CloseFd() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
|
128
|
+
if (!released_ && !closed_) {
|
129
|
+
closed_ = true;
|
130
|
+
close(fd_);
|
131
|
+
}
|
132
|
+
}
|
133
|
+
bool IsPollhup() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) { return pollhup_; }
|
134
|
+
void SetPollhup(bool pollhup) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
|
135
|
+
pollhup_ = pollhup;
|
136
|
+
}
|
137
|
+
bool IsWatched(int& watch_mask) const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
|
138
|
+
watch_mask = watch_mask_;
|
139
|
+
return watch_mask_ != -1;
|
140
|
+
}
|
141
|
+
bool IsWatched() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
|
142
|
+
return watch_mask_ != -1;
|
143
|
+
}
|
144
|
+
void SetWatched(int watch_mask) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
|
145
|
+
watch_mask_ = watch_mask;
|
146
|
+
}
|
147
|
+
void OrphanHandle(PosixEngineClosure* on_done, int* release_fd,
|
148
|
+
absl::string_view reason) override;
|
149
|
+
void ShutdownHandle(absl::Status why) override;
|
150
|
+
void NotifyOnRead(PosixEngineClosure* on_read) override;
|
151
|
+
void NotifyOnWrite(PosixEngineClosure* on_write) override;
|
152
|
+
void NotifyOnError(PosixEngineClosure* on_error) override;
|
153
|
+
void SetReadable() override;
|
154
|
+
void SetWritable() override;
|
155
|
+
void SetHasError() override;
|
156
|
+
bool IsHandleShutdown() override {
|
157
|
+
grpc_core::MutexLock lock(&mu_);
|
158
|
+
return is_shutdown_;
|
159
|
+
};
|
160
|
+
inline void ExecutePendingActions() {
|
161
|
+
int kick = 0;
|
162
|
+
{
|
163
|
+
grpc_core::MutexLock lock(&mu_);
|
164
|
+
if ((pending_actions_ & 1UL)) {
|
165
|
+
if (SetReadyLocked(&read_closure_)) {
|
166
|
+
kick = 1;
|
167
|
+
}
|
168
|
+
}
|
169
|
+
if (((pending_actions_ >> 2) & 1UL)) {
|
170
|
+
if (SetReadyLocked(&write_closure_)) {
|
171
|
+
kick = 1;
|
172
|
+
}
|
173
|
+
}
|
174
|
+
pending_actions_ = 0;
|
175
|
+
}
|
176
|
+
if (kick) {
|
177
|
+
// SetReadyLocked immediately scheduled some closure. It would have set
|
178
|
+
// the closure state to NOT_READY. We need to wakeup the Work(...)
|
179
|
+
// thread to start polling on this fd. If this call is not made, it is
|
180
|
+
// possible that the poller will reach a state where all the fds under
|
181
|
+
// the poller's control are not polled for POLLIN/POLLOUT events thus
|
182
|
+
// leading to an indefinitely blocked Work(..) method.
|
183
|
+
poller_->KickExternal(false);
|
184
|
+
}
|
185
|
+
Unref();
|
186
|
+
}
|
187
|
+
void Ref() { ref_count_.fetch_add(1, std::memory_order_relaxed); }
|
188
|
+
void Unref() {
|
189
|
+
if (ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1) {
|
190
|
+
if (on_done_ != nullptr) {
|
191
|
+
scheduler_->Run(on_done_);
|
192
|
+
}
|
193
|
+
poller_->Unref();
|
194
|
+
delete this;
|
195
|
+
}
|
196
|
+
}
|
197
|
+
~PollEventHandle() override = default;
|
198
|
+
grpc_core::Mutex* mu() ABSL_LOCK_RETURNED(mu_) { return &mu_; }
|
199
|
+
PollPoller::HandlesList& ForkFdListPos() { return fork_fd_list_; }
|
200
|
+
PollPoller::HandlesList& PollerHandlesListPos() {
|
201
|
+
return poller_handles_list_;
|
202
|
+
}
|
203
|
+
uint32_t BeginPollLocked(uint32_t read_mask, uint32_t write_mask)
|
204
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
|
205
|
+
bool EndPollLocked(bool got_read, bool got_write)
|
206
|
+
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
|
207
|
+
|
208
|
+
private:
|
209
|
+
int SetReadyLocked(PosixEngineClosure** st);
|
210
|
+
int NotifyOnLocked(PosixEngineClosure** st, PosixEngineClosure* closure);
|
211
|
+
// See Epoll1Poller::ShutdownHandle for explanation on why a mutex is
|
212
|
+
// required.
|
213
|
+
grpc_core::Mutex mu_;
|
214
|
+
std::atomic<int> ref_count_{1};
|
215
|
+
int fd_;
|
216
|
+
int pending_actions_;
|
217
|
+
PollPoller::HandlesList fork_fd_list_;
|
218
|
+
PollPoller::HandlesList poller_handles_list_;
|
219
|
+
PollPoller* poller_;
|
220
|
+
Scheduler* scheduler_;
|
221
|
+
bool is_orphaned_;
|
222
|
+
bool is_shutdown_;
|
223
|
+
bool closed_;
|
224
|
+
bool released_;
|
225
|
+
bool pollhup_;
|
226
|
+
int watch_mask_;
|
227
|
+
absl::Status shutdown_error_;
|
228
|
+
AnyInvocableClosure exec_actions_closure_;
|
229
|
+
PosixEngineClosure* on_done_;
|
230
|
+
PosixEngineClosure* read_closure_;
|
231
|
+
PosixEngineClosure* write_closure_;
|
232
|
+
};
|
233
|
+
|
234
|
+
namespace {
|
235
|
+
// Only used when GRPC_ENABLE_FORK_SUPPORT=1
|
236
|
+
std::list<PollPoller*> fork_poller_list;
|
237
|
+
|
238
|
+
// Only used when GRPC_ENABLE_FORK_SUPPORT=1
|
239
|
+
PollEventHandle* fork_fd_list_head = nullptr;
|
240
|
+
gpr_mu fork_fd_list_mu;
|
241
|
+
|
242
|
+
void ForkFdListAddHandle(PollEventHandle* handle) {
|
243
|
+
if (grpc_core::Fork::Enabled()) {
|
244
|
+
gpr_mu_lock(&fork_fd_list_mu);
|
245
|
+
handle->ForkFdListPos().next = fork_fd_list_head;
|
246
|
+
handle->ForkFdListPos().prev = nullptr;
|
247
|
+
if (fork_fd_list_head != nullptr) {
|
248
|
+
fork_fd_list_head->ForkFdListPos().prev = handle;
|
249
|
+
}
|
250
|
+
fork_fd_list_head = handle;
|
251
|
+
gpr_mu_unlock(&fork_fd_list_mu);
|
252
|
+
}
|
253
|
+
}
|
254
|
+
|
255
|
+
void ForkFdListRemoveHandle(PollEventHandle* handle) {
|
256
|
+
if (grpc_core::Fork::Enabled()) {
|
257
|
+
gpr_mu_lock(&fork_fd_list_mu);
|
258
|
+
if (fork_fd_list_head == handle) {
|
259
|
+
fork_fd_list_head = handle->ForkFdListPos().next;
|
260
|
+
}
|
261
|
+
if (handle->ForkFdListPos().prev != nullptr) {
|
262
|
+
handle->ForkFdListPos().prev->ForkFdListPos().next =
|
263
|
+
handle->ForkFdListPos().next;
|
264
|
+
}
|
265
|
+
if (handle->ForkFdListPos().next != nullptr) {
|
266
|
+
handle->ForkFdListPos().next->ForkFdListPos().prev =
|
267
|
+
handle->ForkFdListPos().prev;
|
268
|
+
}
|
269
|
+
gpr_mu_unlock(&fork_fd_list_mu);
|
270
|
+
}
|
271
|
+
}
|
272
|
+
|
273
|
+
void ForkPollerListAddPoller(PollPoller* poller) {
|
274
|
+
if (grpc_core::Fork::Enabled()) {
|
275
|
+
gpr_mu_lock(&fork_fd_list_mu);
|
276
|
+
fork_poller_list.push_back(poller);
|
277
|
+
gpr_mu_unlock(&fork_fd_list_mu);
|
278
|
+
}
|
279
|
+
}
|
280
|
+
|
281
|
+
void ForkPollerListRemovePoller(PollPoller* poller) {
|
282
|
+
if (grpc_core::Fork::Enabled()) {
|
283
|
+
gpr_mu_lock(&fork_fd_list_mu);
|
284
|
+
fork_poller_list.remove(poller);
|
285
|
+
gpr_mu_unlock(&fork_fd_list_mu);
|
286
|
+
}
|
287
|
+
}
|
288
|
+
|
289
|
+
// Returns the number of milliseconds elapsed between now and start timestamp.
|
290
|
+
int PollElapsedTimeToMillis(grpc_core::Timestamp start) {
|
291
|
+
if (start == grpc_core::Timestamp::InfFuture()) return -1;
|
292
|
+
grpc_core::Timestamp now =
|
293
|
+
grpc_core::Timestamp::FromTimespecRoundDown(gpr_now(GPR_CLOCK_MONOTONIC));
|
294
|
+
int64_t delta = (now - start).millis();
|
295
|
+
if (delta > INT_MAX) {
|
296
|
+
return INT_MAX;
|
297
|
+
} else if (delta < 0) {
|
298
|
+
return 0;
|
299
|
+
} else {
|
300
|
+
return static_cast<int>(delta);
|
301
|
+
}
|
302
|
+
}
|
303
|
+
|
304
|
+
bool InitPollPollerPosix();
|
305
|
+
|
306
|
+
// Called by the child process's post-fork handler to close open fds,
|
307
|
+
// including the global epoll fd of each poller. This allows gRPC to shutdown
|
308
|
+
// in the child process without interfering with connections or RPCs ongoing
|
309
|
+
// in the parent.
|
310
|
+
void ResetEventManagerOnFork() {
|
311
|
+
// Delete all pending Epoll1EventHandles.
|
312
|
+
gpr_mu_lock(&fork_fd_list_mu);
|
313
|
+
while (fork_fd_list_head != nullptr) {
|
314
|
+
close(fork_fd_list_head->WrappedFd());
|
315
|
+
PollEventHandle* next = fork_fd_list_head->ForkFdListPos().next;
|
316
|
+
fork_fd_list_head->ForceRemoveHandleFromPoller();
|
317
|
+
delete fork_fd_list_head;
|
318
|
+
fork_fd_list_head = next;
|
319
|
+
}
|
320
|
+
// Delete all registered pollers.
|
321
|
+
while (!fork_poller_list.empty()) {
|
322
|
+
PollPoller* poller = fork_poller_list.front();
|
323
|
+
fork_poller_list.pop_front();
|
324
|
+
delete poller;
|
325
|
+
}
|
326
|
+
gpr_mu_unlock(&fork_fd_list_mu);
|
327
|
+
if (grpc_core::Fork::Enabled()) {
|
328
|
+
gpr_mu_destroy(&fork_fd_list_mu);
|
329
|
+
grpc_core::Fork::SetResetChildPollingEngineFunc(nullptr);
|
330
|
+
}
|
331
|
+
InitPollPollerPosix();
|
332
|
+
}
|
333
|
+
|
334
|
+
// It is possible that GLIBC has epoll but the underlying kernel doesn't.
|
335
|
+
// Create epoll_fd to make sure epoll support is available
|
336
|
+
bool InitPollPollerPosix() {
|
337
|
+
if (!grpc_event_engine::posix_engine::SupportsWakeupFd()) {
|
338
|
+
return false;
|
339
|
+
}
|
340
|
+
if (grpc_core::Fork::Enabled()) {
|
341
|
+
gpr_mu_init(&fork_fd_list_mu);
|
342
|
+
grpc_core::Fork::SetResetChildPollingEngineFunc(ResetEventManagerOnFork);
|
343
|
+
}
|
344
|
+
return true;
|
345
|
+
}
|
346
|
+
|
347
|
+
} // namespace
|
348
|
+
|
349
|
+
EventHandle* PollPoller::CreateHandle(int fd, absl::string_view /*name*/,
|
350
|
+
bool track_err) {
|
351
|
+
// Avoid unused-parameter warning for debug-only parameter
|
352
|
+
(void)track_err;
|
353
|
+
GPR_DEBUG_ASSERT(track_err == false);
|
354
|
+
PollEventHandle* handle = new PollEventHandle(fd, this);
|
355
|
+
ForkFdListAddHandle(handle);
|
356
|
+
// We need to send a kick to the thread executing Work(..) so that it can
|
357
|
+
// add this new Fd into the list of Fds to poll.
|
358
|
+
KickExternal(false);
|
359
|
+
return handle;
|
360
|
+
}
|
361
|
+
|
362
|
+
void PollEventHandle::OrphanHandle(PosixEngineClosure* on_done, int* release_fd,
|
363
|
+
absl::string_view /*reason*/) {
|
364
|
+
ForkFdListRemoveHandle(this);
|
365
|
+
ForceRemoveHandleFromPoller();
|
366
|
+
{
|
367
|
+
grpc_core::ReleasableMutexLock lock(&mu_);
|
368
|
+
on_done_ = on_done;
|
369
|
+
released_ = release_fd != nullptr;
|
370
|
+
if (release_fd != nullptr) {
|
371
|
+
*release_fd = fd_;
|
372
|
+
}
|
373
|
+
GPR_ASSERT(!is_orphaned_);
|
374
|
+
is_orphaned_ = true;
|
375
|
+
// Perform shutdown operations if not already done so.
|
376
|
+
if (!is_shutdown_) {
|
377
|
+
is_shutdown_ = true;
|
378
|
+
shutdown_error_ =
|
379
|
+
absl::Status(absl::StatusCode::kInternal, "FD Orphaned");
|
380
|
+
// signal read/write closed to OS so that future operations fail.
|
381
|
+
if (!released_) {
|
382
|
+
shutdown(fd_, SHUT_RDWR);
|
383
|
+
}
|
384
|
+
SetReadyLocked(&read_closure_);
|
385
|
+
SetReadyLocked(&write_closure_);
|
386
|
+
}
|
387
|
+
if (!IsWatched()) {
|
388
|
+
CloseFd();
|
389
|
+
} else {
|
390
|
+
// It is watched i.e we cannot take action wihout breaking from the
|
391
|
+
// blocking poll. Mark it as Unwatched and kick the thread executing
|
392
|
+
// Work(...). That thread should proceed with the cleanup.
|
393
|
+
SetWatched(-1);
|
394
|
+
lock.Release();
|
395
|
+
poller_->KickExternal(false);
|
396
|
+
}
|
397
|
+
}
|
398
|
+
Unref();
|
399
|
+
}
|
400
|
+
|
401
|
+
int PollEventHandle::NotifyOnLocked(PosixEngineClosure** st,
|
402
|
+
PosixEngineClosure* closure) {
|
403
|
+
if (is_shutdown_ || pollhup_) {
|
404
|
+
closure->SetStatus(shutdown_error_);
|
405
|
+
scheduler_->Run(closure);
|
406
|
+
} else if (*st == reinterpret_cast<PosixEngineClosure*>(kClosureNotReady)) {
|
407
|
+
// not ready ==> switch to a waiting state by setting the closure
|
408
|
+
*st = closure;
|
409
|
+
return 0;
|
410
|
+
} else if (*st == reinterpret_cast<PosixEngineClosure*>(kClosureReady)) {
|
411
|
+
// already ready ==> queue the closure to run immediately
|
412
|
+
*st = reinterpret_cast<PosixEngineClosure*>(kClosureNotReady);
|
413
|
+
closure->SetStatus(shutdown_error_);
|
414
|
+
scheduler_->Run(closure);
|
415
|
+
return 1;
|
416
|
+
} else {
|
417
|
+
/* upcallptr was set to a different closure. This is an error! */
|
418
|
+
gpr_log(GPR_ERROR,
|
419
|
+
"User called a notify_on function with a previous callback still "
|
420
|
+
"pending");
|
421
|
+
abort();
|
422
|
+
}
|
423
|
+
return 0;
|
424
|
+
}
|
425
|
+
|
426
|
+
// returns 1 if state becomes not ready
|
427
|
+
int PollEventHandle::SetReadyLocked(PosixEngineClosure** st) {
|
428
|
+
if (*st == reinterpret_cast<PosixEngineClosure*>(kClosureReady)) {
|
429
|
+
// duplicate ready ==> ignore
|
430
|
+
return 0;
|
431
|
+
} else if (*st == reinterpret_cast<PosixEngineClosure*>(kClosureNotReady)) {
|
432
|
+
// not ready, and not waiting ==> flag ready
|
433
|
+
*st = reinterpret_cast<PosixEngineClosure*>(kClosureReady);
|
434
|
+
return 0;
|
435
|
+
} else {
|
436
|
+
// waiting ==> queue closure
|
437
|
+
PosixEngineClosure* closure = *st;
|
438
|
+
*st = reinterpret_cast<PosixEngineClosure*>(kClosureNotReady);
|
439
|
+
closure->SetStatus(shutdown_error_);
|
440
|
+
scheduler_->Run(closure);
|
441
|
+
return 1;
|
442
|
+
}
|
443
|
+
}
|
444
|
+
|
445
|
+
void PollEventHandle::ShutdownHandle(absl::Status why) {
|
446
|
+
// We need to take a Ref here because SetReadyLocked may trigger execution
|
447
|
+
// of a closure which calls OrphanHandle or poller->Shutdown() prematurely.
|
448
|
+
Ref();
|
449
|
+
{
|
450
|
+
grpc_core::MutexLock lock(&mu_);
|
451
|
+
// only shutdown once
|
452
|
+
if (!is_shutdown_) {
|
453
|
+
is_shutdown_ = true;
|
454
|
+
shutdown_error_ = why;
|
455
|
+
// signal read/write closed to OS so that future operations fail.
|
456
|
+
shutdown(fd_, SHUT_RDWR);
|
457
|
+
SetReadyLocked(&read_closure_);
|
458
|
+
SetReadyLocked(&write_closure_);
|
459
|
+
}
|
460
|
+
}
|
461
|
+
// For the Ref() taken at the begining of this function.
|
462
|
+
Unref();
|
463
|
+
}
|
464
|
+
|
465
|
+
void PollEventHandle::NotifyOnRead(PosixEngineClosure* on_read) {
|
466
|
+
// We need to take a Ref here because NotifyOnLocked may trigger execution
|
467
|
+
// of a closure which calls OrphanHandle that may delete this object or call
|
468
|
+
// poller->Shutdown() prematurely.
|
469
|
+
Ref();
|
470
|
+
{
|
471
|
+
grpc_core::ReleasableMutexLock lock(&mu_);
|
472
|
+
if (NotifyOnLocked(&read_closure_, on_read)) {
|
473
|
+
lock.Release();
|
474
|
+
// NotifyOnLocked immediately scheduled some closure. It would have set
|
475
|
+
// the closure state to NOT_READY. We need to wakeup the Work(...) thread
|
476
|
+
// to start polling on this fd. If this call is not made, it is possible
|
477
|
+
// that the poller will reach a state where all the fds under the
|
478
|
+
// poller's control are not polled for POLLIN/POLLOUT events thus leading
|
479
|
+
// to an indefinitely blocked Work(..) method.
|
480
|
+
poller_->KickExternal(false);
|
481
|
+
}
|
482
|
+
}
|
483
|
+
// For the Ref() taken at the begining of this function.
|
484
|
+
Unref();
|
485
|
+
}
|
486
|
+
|
487
|
+
void PollEventHandle::NotifyOnWrite(PosixEngineClosure* on_write) {
|
488
|
+
// We need to take a Ref here because NotifyOnLocked may trigger execution
|
489
|
+
// of a closure which calls OrphanHandle that may delete this object or call
|
490
|
+
// poller->Shutdown() prematurely.
|
491
|
+
Ref();
|
492
|
+
{
|
493
|
+
grpc_core::ReleasableMutexLock lock(&mu_);
|
494
|
+
if (NotifyOnLocked(&write_closure_, on_write)) {
|
495
|
+
lock.Release();
|
496
|
+
// NotifyOnLocked immediately scheduled some closure. It would have set
|
497
|
+
// the closure state to NOT_READY. We need to wakeup the Work(...) thread
|
498
|
+
// to start polling on this fd. If this call is not made, it is possible
|
499
|
+
// that the poller will reach a state where all the fds under the
|
500
|
+
// poller's control are not polled for POLLIN/POLLOUT events thus leading
|
501
|
+
// to an indefinitely blocked Work(..) method.
|
502
|
+
poller_->KickExternal(false);
|
503
|
+
}
|
504
|
+
}
|
505
|
+
// For the Ref() taken at the begining of this function.
|
506
|
+
Unref();
|
507
|
+
}
|
508
|
+
|
509
|
+
void PollEventHandle::NotifyOnError(PosixEngineClosure* on_error) {
|
510
|
+
on_error->SetStatus(
|
511
|
+
absl::Status(absl::StatusCode::kCancelled,
|
512
|
+
"Polling engine does not support tracking errors"));
|
513
|
+
scheduler_->Run(on_error);
|
514
|
+
}
|
515
|
+
|
516
|
+
void PollEventHandle::SetReadable() {
|
517
|
+
Ref();
|
518
|
+
{
|
519
|
+
grpc_core::MutexLock lock(&mu_);
|
520
|
+
SetReadyLocked(&read_closure_);
|
521
|
+
}
|
522
|
+
Unref();
|
523
|
+
}
|
524
|
+
|
525
|
+
void PollEventHandle::SetWritable() {
|
526
|
+
Ref();
|
527
|
+
{
|
528
|
+
grpc_core::MutexLock lock(&mu_);
|
529
|
+
SetReadyLocked(&write_closure_);
|
530
|
+
}
|
531
|
+
Unref();
|
532
|
+
}
|
533
|
+
|
534
|
+
void PollEventHandle::SetHasError() {}
|
535
|
+
|
536
|
+
uint32_t PollEventHandle::BeginPollLocked(uint32_t read_mask,
|
537
|
+
uint32_t write_mask) {
|
538
|
+
uint32_t mask = 0;
|
539
|
+
bool read_ready = (pending_actions_ & 1UL);
|
540
|
+
bool write_ready = ((pending_actions_ >> 2) & 1UL);
|
541
|
+
Ref();
|
542
|
+
// If we are shutdown, then no need to poll this fd. Set watch_mask to 0.
|
543
|
+
if (is_shutdown_) {
|
544
|
+
SetWatched(0);
|
545
|
+
return 0;
|
546
|
+
}
|
547
|
+
// If there is nobody polling for read, but we need to, then start doing so.
|
548
|
+
if (read_mask && !read_ready &&
|
549
|
+
read_closure_ != reinterpret_cast<PosixEngineClosure*>(kClosureReady)) {
|
550
|
+
mask |= read_mask;
|
551
|
+
}
|
552
|
+
|
553
|
+
// If there is nobody polling for write, but we need to, then start doing so
|
554
|
+
if (write_mask && !write_ready &&
|
555
|
+
write_closure_ != reinterpret_cast<PosixEngineClosure*>(kClosureReady)) {
|
556
|
+
mask |= write_mask;
|
557
|
+
}
|
558
|
+
SetWatched(mask);
|
559
|
+
return mask;
|
560
|
+
}
|
561
|
+
|
562
|
+
bool PollEventHandle::EndPollLocked(bool got_read, bool got_write) {
|
563
|
+
if (is_orphaned_ && !IsWatched()) {
|
564
|
+
CloseFd();
|
565
|
+
} else if (!is_orphaned_) {
|
566
|
+
return SetPendingActions(got_read, got_write);
|
567
|
+
}
|
568
|
+
return false;
|
569
|
+
}
|
570
|
+
|
571
|
+
void PollPoller::KickExternal(bool ext) {
|
572
|
+
grpc_core::MutexLock lock(&mu_);
|
573
|
+
if (was_kicked_) {
|
574
|
+
if (ext) {
|
575
|
+
was_kicked_ext_ = true;
|
576
|
+
}
|
577
|
+
return;
|
578
|
+
}
|
579
|
+
was_kicked_ = true;
|
580
|
+
was_kicked_ext_ = ext;
|
581
|
+
GPR_ASSERT(wakeup_fd_->Wakeup().ok());
|
582
|
+
}
|
583
|
+
|
584
|
+
void PollPoller::Kick() { KickExternal(true); }
|
585
|
+
|
586
|
+
void PollPoller::PollerHandlesListAddHandle(PollEventHandle* handle) {
|
587
|
+
handle->PollerHandlesListPos().next = poll_handles_list_head_;
|
588
|
+
handle->PollerHandlesListPos().prev = nullptr;
|
589
|
+
if (poll_handles_list_head_ != nullptr) {
|
590
|
+
poll_handles_list_head_->PollerHandlesListPos().prev = handle;
|
591
|
+
}
|
592
|
+
poll_handles_list_head_ = handle;
|
593
|
+
++num_poll_handles_;
|
594
|
+
}
|
595
|
+
|
596
|
+
void PollPoller::PollerHandlesListRemoveHandle(PollEventHandle* handle) {
|
597
|
+
if (poll_handles_list_head_ == handle) {
|
598
|
+
poll_handles_list_head_ = handle->PollerHandlesListPos().next;
|
599
|
+
}
|
600
|
+
if (handle->PollerHandlesListPos().prev != nullptr) {
|
601
|
+
handle->PollerHandlesListPos().prev->PollerHandlesListPos().next =
|
602
|
+
handle->PollerHandlesListPos().next;
|
603
|
+
}
|
604
|
+
if (handle->PollerHandlesListPos().next != nullptr) {
|
605
|
+
handle->PollerHandlesListPos().next->PollerHandlesListPos().prev =
|
606
|
+
handle->PollerHandlesListPos().prev;
|
607
|
+
}
|
608
|
+
--num_poll_handles_;
|
609
|
+
}
|
610
|
+
|
611
|
+
PollPoller::PollPoller(Scheduler* scheduler)
|
612
|
+
: scheduler_(scheduler),
|
613
|
+
use_phony_poll_(false),
|
614
|
+
was_kicked_(false),
|
615
|
+
was_kicked_ext_(false),
|
616
|
+
num_poll_handles_(0),
|
617
|
+
poll_handles_list_head_(nullptr) {
|
618
|
+
wakeup_fd_ = *CreateWakeupFd();
|
619
|
+
GPR_ASSERT(wakeup_fd_ != nullptr);
|
620
|
+
ForkPollerListAddPoller(this);
|
621
|
+
}
|
622
|
+
|
623
|
+
PollPoller::PollPoller(Scheduler* scheduler, bool use_phony_poll)
|
624
|
+
: scheduler_(scheduler),
|
625
|
+
use_phony_poll_(use_phony_poll),
|
626
|
+
was_kicked_(false),
|
627
|
+
was_kicked_ext_(false),
|
628
|
+
num_poll_handles_(0),
|
629
|
+
poll_handles_list_head_(nullptr) {
|
630
|
+
wakeup_fd_ = *CreateWakeupFd();
|
631
|
+
GPR_ASSERT(wakeup_fd_ != nullptr);
|
632
|
+
ForkPollerListAddPoller(this);
|
633
|
+
}
|
634
|
+
|
635
|
+
PollPoller::~PollPoller() {
|
636
|
+
// Assert that no active handles are present at the time of destruction.
|
637
|
+
// They should have been orphaned before reaching this state.
|
638
|
+
GPR_ASSERT(num_poll_handles_ == 0);
|
639
|
+
GPR_ASSERT(poll_handles_list_head_ == nullptr);
|
640
|
+
}
|
641
|
+
|
642
|
+
Poller::WorkResult PollPoller::Work(
|
643
|
+
EventEngine::Duration timeout,
|
644
|
+
absl::FunctionRef<void()> schedule_poll_again) {
|
645
|
+
// Avoid malloc for small number of elements.
|
646
|
+
enum { inline_elements = 96 };
|
647
|
+
struct pollfd pollfd_space[inline_elements];
|
648
|
+
bool was_kicked_ext = false;
|
649
|
+
PollEventHandle* watcher_space[inline_elements];
|
650
|
+
Events pending_events;
|
651
|
+
pending_events.clear();
|
652
|
+
int timeout_ms =
|
653
|
+
static_cast<int>(grpc_event_engine::experimental::Milliseconds(timeout));
|
654
|
+
mu_.Lock();
|
655
|
+
// Start polling, and keep doing so while we're being asked to
|
656
|
+
// re-evaluate our pollers (this allows poll() based pollers to
|
657
|
+
// ensure they don't miss wakeups).
|
658
|
+
while (pending_events.empty() && timeout_ms >= 0) {
|
659
|
+
int r = 0;
|
660
|
+
size_t i;
|
661
|
+
nfds_t pfd_count;
|
662
|
+
struct pollfd* pfds;
|
663
|
+
PollEventHandle** watchers;
|
664
|
+
// Estimate start time for a poll iteration.
|
665
|
+
grpc_core::Timestamp start = grpc_core::Timestamp::FromTimespecRoundDown(
|
666
|
+
gpr_now(GPR_CLOCK_MONOTONIC));
|
667
|
+
if (num_poll_handles_ + 2 <= inline_elements) {
|
668
|
+
pfds = pollfd_space;
|
669
|
+
watchers = watcher_space;
|
670
|
+
} else {
|
671
|
+
const size_t pfd_size = sizeof(*pfds) * (num_poll_handles_ + 2);
|
672
|
+
const size_t watch_size = sizeof(*watchers) * (num_poll_handles_ + 2);
|
673
|
+
void* buf = gpr_malloc(pfd_size + watch_size);
|
674
|
+
pfds = static_cast<struct pollfd*>(buf);
|
675
|
+
watchers = static_cast<PollEventHandle**>(
|
676
|
+
static_cast<void*>((static_cast<char*>(buf) + pfd_size)));
|
677
|
+
pfds = static_cast<struct pollfd*>(buf);
|
678
|
+
}
|
679
|
+
|
680
|
+
pfd_count = 1;
|
681
|
+
pfds[0].fd = wakeup_fd_->ReadFd();
|
682
|
+
pfds[0].events = POLLIN;
|
683
|
+
pfds[0].revents = 0;
|
684
|
+
PollEventHandle* head = poll_handles_list_head_;
|
685
|
+
while (head != nullptr) {
|
686
|
+
{
|
687
|
+
grpc_core::MutexLock lock(head->mu());
|
688
|
+
// There shouldn't be any orphaned fds at this point. This is because
|
689
|
+
// prior to marking a handle as orphaned it is first removed from
|
690
|
+
// poll handle list for the poller under the poller lock.
|
691
|
+
GPR_ASSERT(!head->IsOrphaned());
|
692
|
+
if (!head->IsPollhup()) {
|
693
|
+
pfds[pfd_count].fd = head->WrappedFd();
|
694
|
+
watchers[pfd_count] = head;
|
695
|
+
// BeginPollLocked takes a ref of the handle. It also marks the
|
696
|
+
// fd as Watched with an appropriate watch_mask. The watch_mask
|
697
|
+
// is 0 if the fd is shutdown or if the fd is already ready (i.e
|
698
|
+
// both read and write events are already available) and doesn't
|
699
|
+
// need to be polled again. The watch_mask is > 0 otherwise
|
700
|
+
// indicating the fd needs to be polled.
|
701
|
+
pfds[pfd_count].events = head->BeginPollLocked(POLLIN, POLLOUT);
|
702
|
+
pfd_count++;
|
703
|
+
}
|
704
|
+
}
|
705
|
+
head = head->PollerHandlesListPos().next;
|
706
|
+
}
|
707
|
+
mu_.Unlock();
|
708
|
+
|
709
|
+
if (!use_phony_poll_ || timeout_ms == 0 || pfd_count == 1) {
|
710
|
+
// If use_phony_poll is true and pfd_count == 1, it implies only the
|
711
|
+
// wakeup_fd is present. Allow the call to get blocked in this case as
|
712
|
+
// well instead of crashing. This is because the poller::Work is called
|
713
|
+
// right after an event enging is constructed. Even if phony poll is
|
714
|
+
// expected to be used, we dont want to check for it until some actual
|
715
|
+
// event handles are registered. Otherwise the event engine construction
|
716
|
+
// may crash.
|
717
|
+
r = poll(pfds, pfd_count, timeout_ms);
|
718
|
+
} else {
|
719
|
+
gpr_log(GPR_ERROR,
|
720
|
+
"Attempted a blocking poll when declared non-polling.");
|
721
|
+
GPR_ASSERT(false);
|
722
|
+
}
|
723
|
+
|
724
|
+
if (r <= 0) {
|
725
|
+
if (r < 0 && errno != EINTR) {
|
726
|
+
// Abort fail here.
|
727
|
+
gpr_log(GPR_ERROR,
|
728
|
+
"(event_engine) PollPoller:%p encountered poll error: %s", this,
|
729
|
+
grpc_core::StrError(errno).c_str());
|
730
|
+
GPR_ASSERT(false);
|
731
|
+
}
|
732
|
+
|
733
|
+
for (i = 1; i < pfd_count; i++) {
|
734
|
+
PollEventHandle* head = watchers[i];
|
735
|
+
int watch_mask;
|
736
|
+
grpc_core::ReleasableMutexLock lock(head->mu());
|
737
|
+
if (head->IsWatched(watch_mask)) {
|
738
|
+
head->SetWatched(-1);
|
739
|
+
// This fd was Watched with a watch mask > 0.
|
740
|
+
if (watch_mask > 0 && r < 0) {
|
741
|
+
// This case implies the fd was polled (since watch_mask > 0 and
|
742
|
+
// the poll returned an error. Mark the fds as both readable and
|
743
|
+
// writable.
|
744
|
+
if (head->EndPollLocked(true, true)) {
|
745
|
+
// Its safe to add to list of pending events because
|
746
|
+
// EndPollLocked returns true only when the handle is
|
747
|
+
// not orphaned. But an orphan might be initiated on the handle
|
748
|
+
// after this Work() method returns and before the next Work()
|
749
|
+
// method is invoked.
|
750
|
+
pending_events.push_back(head);
|
751
|
+
}
|
752
|
+
} else {
|
753
|
+
// In this case, (1) watch_mask > 0 && r == 0 or (2) watch_mask ==
|
754
|
+
// 0 and r < 0 or (3) watch_mask == 0 and r == 0. For case-1, no
|
755
|
+
// events are pending on the fd even though the fd was polled. For
|
756
|
+
// case-2 and 3, the fd was not polled
|
757
|
+
head->EndPollLocked(false, false);
|
758
|
+
}
|
759
|
+
} else {
|
760
|
+
// It can enter this case if an orphan was invoked on the handle
|
761
|
+
// while it was being polled.
|
762
|
+
head->EndPollLocked(false, false);
|
763
|
+
}
|
764
|
+
lock.Release();
|
765
|
+
// Unref the ref taken at BeginPollLocked.
|
766
|
+
head->Unref();
|
767
|
+
}
|
768
|
+
} else {
|
769
|
+
if (pfds[0].revents & kPollinCheck) {
|
770
|
+
GPR_ASSERT(wakeup_fd_->ConsumeWakeup().ok());
|
771
|
+
}
|
772
|
+
for (i = 1; i < pfd_count; i++) {
|
773
|
+
PollEventHandle* head = watchers[i];
|
774
|
+
int watch_mask;
|
775
|
+
grpc_core::ReleasableMutexLock lock(head->mu());
|
776
|
+
if (!head->IsWatched(watch_mask) || watch_mask == 0) {
|
777
|
+
// IsWatched will be false if an orphan was invoked on the
|
778
|
+
// handle while it was being polled. If watch_mask is 0, then the fd
|
779
|
+
// was not polled.
|
780
|
+
head->SetWatched(-1);
|
781
|
+
head->EndPollLocked(false, false);
|
782
|
+
} else {
|
783
|
+
// Watched is true and watch_mask > 0
|
784
|
+
if (pfds[i].revents & POLLHUP) {
|
785
|
+
head->SetPollhup(true);
|
786
|
+
}
|
787
|
+
head->SetWatched(-1);
|
788
|
+
if (head->EndPollLocked(pfds[i].revents & kPollinCheck,
|
789
|
+
pfds[i].revents & kPolloutCheck)) {
|
790
|
+
// Its safe to add to list of pending events because EndPollLocked
|
791
|
+
// returns true only when the handle is not orphaned.
|
792
|
+
// But an orphan might be initiated on the handle after this
|
793
|
+
// Work() method returns and before the next Work() method is
|
794
|
+
// invoked.
|
795
|
+
pending_events.push_back(head);
|
796
|
+
}
|
797
|
+
}
|
798
|
+
lock.Release();
|
799
|
+
// Unref the ref taken at BeginPollLocked.
|
800
|
+
head->Unref();
|
801
|
+
}
|
802
|
+
}
|
803
|
+
|
804
|
+
if (pfds != pollfd_space) {
|
805
|
+
gpr_free(pfds);
|
806
|
+
}
|
807
|
+
|
808
|
+
// End of poll iteration. Update how much time is remaining.
|
809
|
+
timeout_ms -= PollElapsedTimeToMillis(start);
|
810
|
+
mu_.Lock();
|
811
|
+
if (std::exchange(was_kicked_, false) &&
|
812
|
+
std::exchange(was_kicked_ext_, false)) {
|
813
|
+
// External kick. Need to break out.
|
814
|
+
was_kicked_ext = true;
|
815
|
+
break;
|
816
|
+
}
|
817
|
+
}
|
818
|
+
mu_.Unlock();
|
819
|
+
if (pending_events.empty()) {
|
820
|
+
if (was_kicked_ext) {
|
821
|
+
return Poller::WorkResult::kKicked;
|
822
|
+
}
|
823
|
+
return Poller::WorkResult::kDeadlineExceeded;
|
824
|
+
}
|
825
|
+
// Run the provided callback synchronously.
|
826
|
+
schedule_poll_again();
|
827
|
+
// Process all pending events inline.
|
828
|
+
for (auto& it : pending_events) {
|
829
|
+
it->ExecutePendingActions();
|
830
|
+
}
|
831
|
+
return was_kicked_ext ? Poller::WorkResult::kKicked : Poller::WorkResult::kOk;
|
832
|
+
}
|
833
|
+
|
834
|
+
void PollPoller::Shutdown() {
|
835
|
+
ForkPollerListRemovePoller(this);
|
836
|
+
Unref();
|
837
|
+
}
|
838
|
+
|
839
|
+
PollPoller* MakePollPoller(Scheduler* scheduler, bool use_phony_poll) {
|
840
|
+
static bool kPollPollerSupported = InitPollPollerPosix();
|
841
|
+
if (kPollPollerSupported) {
|
842
|
+
return new PollPoller(scheduler, use_phony_poll);
|
843
|
+
}
|
844
|
+
return nullptr;
|
845
|
+
}
|
846
|
+
|
847
|
+
} // namespace posix_engine
|
848
|
+
} // namespace grpc_event_engine
|
849
|
+
|
850
|
+
#else /* GRPC_POSIX_SOCKET_EV_POLL */
|
851
|
+
|
852
|
+
namespace grpc_event_engine {
|
853
|
+
namespace posix_engine {
|
854
|
+
|
855
|
+
using ::grpc_event_engine::experimental::EventEngine;
|
856
|
+
using ::grpc_event_engine::experimental::Poller;
|
857
|
+
|
858
|
+
PollPoller::PollPoller(Scheduler* /* engine */) {
|
859
|
+
GPR_ASSERT(false && "unimplemented");
|
860
|
+
}
|
861
|
+
|
862
|
+
void PollPoller::Shutdown() { GPR_ASSERT(false && "unimplemented"); }
|
863
|
+
|
864
|
+
PollPoller::~PollPoller() { GPR_ASSERT(false && "unimplemented"); }
|
865
|
+
|
866
|
+
EventHandle* PollPoller::CreateHandle(int /*fd*/, absl::string_view /*name*/,
|
867
|
+
bool /*track_err*/) {
|
868
|
+
GPR_ASSERT(false && "unimplemented");
|
869
|
+
}
|
870
|
+
|
871
|
+
Poller::WorkResult PollPoller::Work(
|
872
|
+
EventEngine::Duration /*timeout*/,
|
873
|
+
absl::FunctionRef<void()> /*schedule_poll_again*/) {
|
874
|
+
GPR_ASSERT(false && "unimplemented");
|
875
|
+
}
|
876
|
+
|
877
|
+
void PollPoller::Kick() { GPR_ASSERT(false && "unimplemented"); }
|
878
|
+
|
879
|
+
// If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
|
880
|
+
// nullptr.
|
881
|
+
PollPoller* MakePollPoller(Scheduler* /*scheduler*/,
|
882
|
+
bool /* use_phony_poll */) {
|
883
|
+
return nullptr;
|
884
|
+
}
|
885
|
+
|
886
|
+
void PollPoller::KickExternal(bool /*ext*/) {
|
887
|
+
GPR_ASSERT(false && "unimplemented");
|
888
|
+
}
|
889
|
+
|
890
|
+
void PollPoller::PollerHandlesListAddHandle(PollEventHandle* /*handle*/) {
|
891
|
+
GPR_ASSERT(false && "unimplemented");
|
892
|
+
}
|
893
|
+
|
894
|
+
void PollPoller::PollerHandlesListRemoveHandle(PollEventHandle* /*handle*/) {
|
895
|
+
GPR_ASSERT(false && "unimplemented");
|
896
|
+
}
|
897
|
+
|
898
|
+
} // namespace posix_engine
|
899
|
+
} // namespace grpc_event_engine
|
900
|
+
|
901
|
+
#endif /* GRPC_POSIX_SOCKET_EV_POLL */
|