grpc 1.74.1 → 1.75.0.pre1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Makefile +83 -41
- data/include/grpc/credentials.h +7 -1
- data/src/core/call/client_call.cc +4 -4
- data/src/core/call/filter_fusion.h +1230 -0
- data/src/core/call/metadata.cc +22 -0
- data/src/core/call/metadata.h +24 -2
- data/src/core/channelz/channelz.cc +10 -17
- data/src/core/channelz/channelz.h +58 -19
- data/src/core/channelz/channelz_registry.cc +0 -162
- data/src/core/channelz/channelz_registry.h +14 -7
- data/src/core/channelz/property_list.cc +19 -23
- data/src/core/channelz/property_list.h +3 -1
- data/src/core/channelz/v2tov1/convert.cc +683 -0
- data/src/core/channelz/v2tov1/convert.h +58 -0
- data/src/core/channelz/v2tov1/legacy_api.cc +425 -0
- data/src/core/channelz/v2tov1/legacy_api.h +32 -0
- data/src/core/channelz/v2tov1/property_list.cc +118 -0
- data/src/core/channelz/v2tov1/property_list.h +52 -0
- data/src/core/client_channel/client_channel_filter.cc +5 -4
- data/src/core/client_channel/client_channel_filter.h +2 -2
- data/src/core/client_channel/client_channel_internal.h +2 -1
- data/src/core/client_channel/load_balanced_call_destination.cc +6 -5
- data/src/core/client_channel/subchannel.cc +14 -6
- data/src/core/client_channel/subchannel.h +2 -0
- data/src/core/config/core_configuration.cc +3 -1
- data/src/core/config/core_configuration.h +12 -0
- data/src/core/credentials/transport/alts/alts_credentials.cc +5 -0
- data/src/core/credentials/transport/alts/check_gcp_environment_windows.cc +2 -0
- data/src/core/credentials/transport/channel_creds_registry_init.cc +3 -1
- data/src/core/credentials/transport/ssl/ssl_credentials.cc +1 -1
- data/src/core/credentials/transport/ssl/ssl_security_connector.cc +8 -3
- data/src/core/credentials/transport/tls/grpc_tls_certificate_distributor.cc +29 -24
- data/src/core/credentials/transport/tls/grpc_tls_certificate_distributor.h +19 -8
- data/src/core/credentials/transport/tls/grpc_tls_certificate_provider.cc +96 -54
- data/src/core/credentials/transport/tls/grpc_tls_certificate_provider.h +15 -2
- data/src/core/credentials/transport/tls/spiffe_utils.cc +371 -0
- data/src/core/credentials/transport/tls/spiffe_utils.h +171 -0
- data/src/core/credentials/transport/tls/ssl_utils.cc +11 -10
- data/src/core/credentials/transport/tls/ssl_utils.h +4 -2
- data/src/core/credentials/transport/tls/tls_credentials.cc +2 -0
- data/src/core/credentials/transport/tls/tls_security_connector.cc +11 -26
- data/src/core/credentials/transport/tls/tls_security_connector.h +12 -12
- data/src/core/ext/filters/backend_metrics/backend_metric_filter.cc +1 -2
- data/src/core/ext/filters/http/client/http_client_filter.cc +3 -6
- data/src/core/ext/filters/http/client_authority_filter.cc +1 -2
- data/src/core/ext/filters/http/message_compress/compression_filter.cc +8 -8
- data/src/core/ext/filters/http/server/http_server_filter.cc +3 -6
- data/src/core/ext/filters/message_size/message_size_filter.cc +4 -4
- data/src/core/ext/filters/rbac/rbac_filter.cc +1 -1
- data/src/core/ext/filters/stateful_session/stateful_session_filter.cc +3 -5
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.cc +3 -2
- data/src/core/ext/transport/chttp2/transport/flow_control.cc +1 -0
- data/src/core/ext/transport/chttp2/transport/flow_control.h +1 -0
- data/src/core/ext/transport/chttp2/transport/frame.cc +89 -6
- data/src/core/ext/transport/chttp2/transport/frame.h +38 -0
- data/src/core/ext/transport/chttp2/transport/header_assembler.h +5 -14
- data/src/core/ext/transport/chttp2/transport/hpack_parser.h +4 -1
- data/src/core/ext/transport/chttp2/transport/http2_client_transport.cc +294 -78
- data/src/core/ext/transport/chttp2/transport/http2_client_transport.h +128 -9
- data/src/core/ext/transport/chttp2/transport/http2_settings.cc +11 -38
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +52 -35
- data/src/core/ext/transport/chttp2/transport/http2_settings_manager.cc +61 -0
- data/src/core/ext/transport/chttp2/transport/http2_settings_manager.h +142 -0
- data/src/core/ext/transport/chttp2/transport/http2_transport.cc +81 -3
- data/src/core/ext/transport/chttp2/transport/http2_transport.h +12 -1
- data/src/core/ext/transport/chttp2/transport/message_assembler.h +2 -2
- data/src/core/ext/transport/chttp2/transport/parsing.cc +2 -1
- data/src/core/ext/transport/chttp2/transport/ping_promise.cc +2 -1
- data/src/core/ext/transport/chttp2/transport/ping_promise.h +22 -5
- data/src/core/ext/transport/chttp2/transport/stream_data_queue.h +607 -0
- data/src/core/ext/transport/chttp2/transport/writable_streams.h +254 -0
- data/src/core/ext/transport/chttp2/transport/writing.cc +6 -4
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/channelz.upb.h +4959 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/channelz.upb_minitable.c +1111 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/channelz.upb_minitable.h +108 -0
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/property_list.upb.h +142 -54
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/property_list.upb_minitable.c +18 -14
- data/src/core/ext/upb-gen/src/proto/grpc/channelz/v2/property_list.upb_minitable.h +2 -2
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/channelz.upbdefs.c +716 -0
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/channelz.upbdefs.h +227 -0
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/v2/property_list.upbdefs.c +86 -88
- data/src/core/ext/upbdefs-gen/src/proto/grpc/channelz/v2/property_list.upbdefs.h +2 -2
- data/src/core/filter/auth/auth_filters.h +2 -2
- data/src/core/filter/fused_filters.cc +154 -0
- data/src/core/handshaker/security/legacy_secure_endpoint.cc +1 -1
- data/src/core/handshaker/security/pipelined_secure_endpoint.cc +965 -0
- data/src/core/handshaker/security/secure_endpoint.cc +28 -13
- data/src/core/handshaker/security/secure_endpoint.h +8 -0
- data/src/core/lib/channel/promise_based_filter.cc +15 -25
- data/src/core/lib/channel/promise_based_filter.h +6 -5
- data/src/core/lib/event_engine/ares_resolver.h +3 -1
- data/src/core/lib/event_engine/cf_engine/cf_engine.cc +9 -5
- data/src/core/lib/event_engine/cf_engine/cf_engine.h +2 -1
- data/src/core/lib/event_engine/cf_engine/cfsocket_listener.cc +263 -0
- data/src/core/lib/event_engine/cf_engine/cfsocket_listener.h +107 -0
- data/src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc +31 -3
- data/src/core/lib/event_engine/cf_engine/cfstream_endpoint.h +12 -0
- data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.cc +12 -10
- data/src/core/lib/event_engine/posix_engine/ev_epoll1_linux.h +6 -4
- data/src/core/lib/event_engine/posix_engine/ev_poll_posix.cc +15 -14
- data/src/core/lib/event_engine/posix_engine/ev_poll_posix.h +7 -5
- data/src/core/lib/event_engine/posix_engine/event_poller.h +0 -8
- data/src/core/lib/event_engine/posix_engine/event_poller_posix_default.cc +11 -5
- data/src/core/lib/event_engine/posix_engine/event_poller_posix_default.h +3 -2
- data/src/core/lib/event_engine/posix_engine/grpc_polled_fd_posix.h +1 -0
- data/src/core/lib/event_engine/posix_engine/lockfree_event.cc +4 -4
- data/src/core/lib/event_engine/posix_engine/lockfree_event.h +3 -4
- data/src/core/lib/event_engine/posix_engine/posix_endpoint.cc +2 -2
- data/src/core/lib/event_engine/posix_engine/posix_engine.cc +188 -199
- data/src/core/lib/event_engine/posix_engine/posix_engine.h +30 -45
- data/src/core/lib/event_engine/posix_engine/posix_engine_listener.cc +1 -1
- data/src/core/lib/event_engine/posix_engine/tcp_socket_utils.h +1 -1
- data/src/core/lib/event_engine/windows/grpc_polled_fd_windows.cc +2 -1
- data/src/core/lib/experiments/experiments.cc +120 -6
- data/src/core/lib/experiments/experiments.h +46 -3
- data/src/core/lib/iomgr/combiner.cc +1 -1
- data/src/core/lib/iomgr/exec_ctx.h +3 -9
- data/src/core/lib/iomgr/socket_mutator.cc +1 -1
- data/src/core/lib/iomgr/socket_utils_posix.cc +1 -1
- data/src/core/lib/iomgr/socket_utils_posix.h +1 -1
- data/src/core/lib/iomgr/tcp_client_posix.cc +1 -1
- data/src/core/lib/iomgr/tcp_posix.cc +3 -3
- data/src/core/lib/promise/activity.h +2 -2
- data/src/core/lib/promise/mpsc.cc +8 -8
- data/src/core/lib/promise/party.cc +7 -7
- data/src/core/lib/promise/party.h +4 -4
- data/src/core/lib/promise/poll.h +10 -0
- data/src/core/lib/resource_quota/memory_quota.cc +90 -3
- data/src/core/lib/resource_quota/memory_quota.h +20 -9
- data/src/core/lib/resource_quota/periodic_update.cc +14 -0
- data/src/core/lib/resource_quota/periodic_update.h +8 -0
- data/src/core/lib/resource_quota/resource_quota.cc +15 -4
- data/src/core/lib/resource_quota/resource_quota.h +3 -0
- data/src/core/lib/security/authorization/grpc_server_authz_filter.cc +1 -2
- data/src/core/lib/surface/call.cc +5 -5
- data/src/core/lib/surface/call.h +6 -5
- data/src/core/lib/surface/completion_queue.cc +2 -4
- data/src/core/lib/surface/filter_stack_call.cc +1 -1
- data/src/core/lib/surface/version.cc +2 -2
- data/src/core/lib/transport/promise_endpoint.cc +2 -2
- data/src/core/lib/transport/promise_endpoint.h +3 -3
- data/src/core/load_balancing/endpoint_list.cc +29 -2
- data/src/core/load_balancing/grpclb/client_load_reporting_filter.cc +3 -3
- data/src/core/load_balancing/grpclb/client_load_reporting_filter.h +1 -1
- data/src/core/load_balancing/pick_first/pick_first.cc +12 -5
- data/src/core/load_balancing/xds/xds_cluster_impl.cc +5 -3
- data/src/core/net/socket_mutator.cc +19 -0
- data/src/core/net/socket_mutator.h +25 -0
- data/src/core/plugin_registry/grpc_plugin_registry.cc +6 -0
- data/src/core/resolver/dns/c_ares/grpc_ares_ev_driver.h +6 -1
- data/src/core/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc +2 -1
- data/src/core/resolver/dns/c_ares/grpc_ares_wrapper.cc +8 -5
- data/src/core/resolver/dns/c_ares/grpc_ares_wrapper.h +2 -1
- data/src/core/resolver/xds/xds_dependency_manager.cc +1 -1
- data/src/core/server/server.cc +1 -1
- data/src/core/server/server_call_tracer_filter.cc +0 -66
- data/src/core/server/server_call_tracer_filter.h +64 -0
- data/src/core/server/server_config_selector_filter.cc +1 -1
- data/src/core/service_config/service_config_channel_arg_filter.cc +3 -60
- data/src/core/service_config/service_config_channel_arg_filter.h +82 -0
- data/src/core/telemetry/call_tracer.cc +20 -14
- data/src/core/telemetry/call_tracer.h +22 -17
- data/src/core/telemetry/metrics.h +8 -8
- data/src/core/telemetry/stats_data.cc +151 -151
- data/src/core/telemetry/stats_data.h +87 -87
- data/src/core/transport/auth_context.cc +20 -0
- data/src/core/transport/auth_context.h +4 -0
- data/src/core/transport/auth_context_comparator_registry.h +69 -0
- data/src/core/tsi/alts/handshaker/alts_handshaker_client.cc +2 -3
- data/src/core/tsi/ssl_transport_security.cc +202 -32
- data/src/core/tsi/ssl_transport_security.h +19 -10
- data/src/core/tsi/ssl_transport_security_utils.cc +21 -0
- data/src/core/tsi/ssl_transport_security_utils.h +4 -0
- data/src/core/util/http_client/httpcli_security_connector.cc +3 -1
- data/src/core/util/latent_see.cc +178 -146
- data/src/core/util/latent_see.h +245 -188
- data/src/core/util/single_set_ptr.h +5 -2
- data/src/core/util/useful.h +91 -0
- data/src/core/util/windows/directory_reader.cc +1 -0
- data/src/core/util/windows/thd.cc +1 -3
- data/src/core/util/work_serializer.cc +1 -1
- data/src/core/xds/grpc/file_watcher_certificate_provider_factory.cc +32 -5
- data/src/core/xds/grpc/file_watcher_certificate_provider_factory.h +5 -0
- data/src/core/xds/grpc/xds_certificate_provider.cc +5 -6
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +1 -1
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/third_party/cares/cares/include/ares.h +925 -460
- data/third_party/cares/cares/include/ares_dns.h +86 -71
- data/third_party/cares/cares/include/ares_dns_record.h +1118 -0
- data/third_party/cares/cares/include/ares_nameser.h +215 -189
- data/third_party/cares/cares/include/ares_version.h +37 -14
- data/third_party/cares/cares/src/lib/ares_addrinfo2hostent.c +305 -0
- data/third_party/cares/cares/src/lib/ares_addrinfo_localhost.c +245 -0
- data/third_party/cares/cares/src/lib/ares_android.c +216 -164
- data/third_party/cares/cares/src/lib/ares_android.h +25 -14
- data/third_party/cares/cares/src/lib/ares_cancel.c +68 -44
- data/third_party/cares/cares/src/lib/ares_close_sockets.c +137 -0
- data/third_party/cares/cares/src/lib/ares_conn.c +511 -0
- data/third_party/cares/cares/src/lib/ares_conn.h +196 -0
- data/third_party/cares/cares/src/lib/ares_cookie.c +461 -0
- data/third_party/cares/cares/src/lib/ares_data.c +93 -181
- data/third_party/cares/cares/src/lib/ares_data.h +50 -39
- data/third_party/cares/cares/src/lib/ares_destroy.c +127 -89
- data/third_party/cares/cares/src/lib/ares_free_hostent.c +35 -24
- data/third_party/cares/cares/src/lib/ares_free_string.c +24 -16
- data/third_party/cares/cares/src/lib/ares_freeaddrinfo.c +45 -38
- data/third_party/cares/cares/src/lib/ares_getaddrinfo.c +549 -663
- data/third_party/cares/cares/src/lib/ares_getenv.c +25 -15
- data/third_party/cares/cares/src/lib/ares_getenv.h +26 -18
- data/third_party/cares/cares/src/lib/ares_gethostbyaddr.c +163 -221
- data/third_party/cares/cares/src/lib/ares_gethostbyname.c +222 -223
- data/third_party/cares/cares/src/lib/ares_getnameinfo.c +328 -338
- data/third_party/cares/cares/src/lib/ares_hosts_file.c +952 -0
- data/third_party/cares/cares/src/lib/ares_inet_net_pton.h +25 -19
- data/third_party/cares/cares/src/lib/ares_init.c +425 -2091
- data/third_party/cares/cares/src/lib/ares_ipv6.h +63 -33
- data/third_party/cares/cares/src/lib/ares_library_init.c +110 -54
- data/third_party/cares/cares/src/lib/ares_metrics.c +261 -0
- data/third_party/cares/cares/src/lib/ares_options.c +418 -332
- data/third_party/cares/cares/src/lib/ares_parse_into_addrinfo.c +179 -0
- data/third_party/cares/cares/src/lib/ares_private.h +558 -356
- data/third_party/cares/cares/src/lib/ares_process.c +1224 -1369
- data/third_party/cares/cares/src/lib/ares_qcache.c +430 -0
- data/third_party/cares/cares/src/lib/ares_query.c +126 -121
- data/third_party/cares/cares/src/lib/ares_search.c +564 -262
- data/third_party/cares/cares/src/lib/ares_send.c +264 -93
- data/third_party/cares/cares/src/lib/ares_set_socket_functions.c +588 -0
- data/third_party/cares/cares/src/lib/ares_setup.h +115 -111
- data/third_party/cares/cares/src/lib/ares_socket.c +425 -0
- data/third_party/cares/cares/src/lib/ares_socket.h +163 -0
- data/third_party/cares/cares/src/lib/ares_sortaddrinfo.c +447 -0
- data/third_party/cares/cares/src/lib/ares_strerror.c +83 -48
- data/third_party/cares/cares/src/lib/ares_sysconfig.c +639 -0
- data/third_party/cares/cares/src/lib/ares_sysconfig_files.c +839 -0
- data/third_party/cares/cares/src/lib/ares_sysconfig_mac.c +373 -0
- data/third_party/cares/cares/src/lib/ares_sysconfig_win.c +621 -0
- data/third_party/cares/cares/src/lib/ares_timeout.c +136 -73
- data/third_party/cares/cares/src/lib/ares_update_servers.c +1362 -0
- data/third_party/cares/cares/src/lib/ares_version.c +29 -4
- data/third_party/cares/cares/src/lib/config-dos.h +88 -89
- data/third_party/cares/cares/src/lib/config-win32.h +122 -77
- data/third_party/cares/cares/src/lib/dsa/ares_array.c +394 -0
- data/third_party/cares/cares/src/lib/dsa/ares_htable.c +447 -0
- data/third_party/cares/cares/src/lib/dsa/ares_htable.h +174 -0
- data/third_party/cares/cares/src/lib/dsa/ares_htable_asvp.c +224 -0
- data/third_party/cares/cares/src/lib/dsa/ares_htable_dict.c +228 -0
- data/third_party/cares/cares/src/lib/dsa/ares_htable_strvp.c +210 -0
- data/third_party/cares/cares/src/lib/dsa/ares_htable_szvp.c +188 -0
- data/third_party/cares/cares/src/lib/dsa/ares_htable_vpstr.c +186 -0
- data/third_party/cares/cares/src/lib/dsa/ares_htable_vpvp.c +194 -0
- data/third_party/cares/cares/src/lib/dsa/ares_llist.c +382 -0
- data/third_party/cares/cares/src/lib/dsa/ares_slist.c +479 -0
- data/third_party/cares/cares/src/lib/dsa/ares_slist.h +207 -0
- data/third_party/cares/cares/src/lib/event/ares_event.h +191 -0
- data/third_party/cares/cares/src/lib/event/ares_event_configchg.c +743 -0
- data/third_party/cares/cares/src/lib/event/ares_event_epoll.c +192 -0
- data/third_party/cares/cares/src/lib/event/ares_event_kqueue.c +248 -0
- data/third_party/cares/cares/src/lib/event/ares_event_poll.c +140 -0
- data/third_party/cares/cares/src/lib/event/ares_event_select.c +159 -0
- data/third_party/cares/cares/src/lib/event/ares_event_thread.c +567 -0
- data/third_party/cares/cares/src/lib/event/ares_event_wake_pipe.c +166 -0
- data/third_party/cares/cares/src/lib/event/ares_event_win32.c +978 -0
- data/third_party/cares/cares/src/lib/event/ares_event_win32.h +161 -0
- data/third_party/cares/cares/src/lib/include/ares_array.h +276 -0
- data/third_party/cares/cares/src/lib/include/ares_buf.h +732 -0
- data/third_party/cares/cares/src/lib/include/ares_htable_asvp.h +130 -0
- data/third_party/cares/cares/src/lib/include/ares_htable_dict.h +123 -0
- data/third_party/cares/cares/src/lib/include/ares_htable_strvp.h +130 -0
- data/third_party/cares/cares/src/lib/include/ares_htable_szvp.h +118 -0
- data/third_party/cares/cares/src/lib/include/ares_htable_vpstr.h +111 -0
- data/third_party/cares/cares/src/lib/include/ares_htable_vpvp.h +128 -0
- data/third_party/cares/cares/src/lib/include/ares_llist.h +239 -0
- data/third_party/cares/cares/src/lib/include/ares_mem.h +38 -0
- data/third_party/cares/cares/src/lib/include/ares_str.h +244 -0
- data/third_party/cares/cares/src/lib/inet_net_pton.c +202 -157
- data/third_party/cares/cares/src/lib/inet_ntop.c +87 -69
- data/third_party/cares/cares/src/lib/legacy/ares_create_query.c +78 -0
- data/third_party/cares/cares/src/lib/legacy/ares_expand_name.c +99 -0
- data/third_party/cares/cares/src/lib/legacy/ares_expand_string.c +107 -0
- data/third_party/cares/cares/src/lib/legacy/ares_fds.c +80 -0
- data/third_party/cares/cares/src/lib/legacy/ares_getsock.c +85 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_a_reply.c +107 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_aaaa_reply.c +109 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_caa_reply.c +137 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_mx_reply.c +110 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_naptr_reply.c +132 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_ns_reply.c +154 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_ptr_reply.c +213 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_soa_reply.c +115 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_srv_reply.c +114 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_txt_reply.c +144 -0
- data/third_party/cares/cares/src/lib/legacy/ares_parse_uri_reply.c +113 -0
- data/third_party/cares/cares/src/lib/record/ares_dns_mapping.c +982 -0
- data/third_party/cares/cares/src/lib/record/ares_dns_multistring.c +307 -0
- data/third_party/cares/cares/src/lib/record/ares_dns_multistring.h +72 -0
- data/third_party/cares/cares/src/lib/record/ares_dns_name.c +673 -0
- data/third_party/cares/cares/src/lib/record/ares_dns_parse.c +1329 -0
- data/third_party/cares/cares/src/lib/record/ares_dns_private.h +273 -0
- data/third_party/cares/cares/src/lib/record/ares_dns_record.c +1661 -0
- data/third_party/cares/cares/src/lib/record/ares_dns_write.c +1229 -0
- data/third_party/cares/cares/src/lib/str/ares_buf.c +1498 -0
- data/third_party/cares/cares/src/lib/str/ares_str.c +508 -0
- data/third_party/cares/cares/src/lib/str/ares_strsplit.c +90 -0
- data/third_party/cares/cares/src/lib/str/ares_strsplit.h +51 -0
- data/third_party/cares/cares/src/lib/thirdparty/apple/dnsinfo.h +122 -0
- data/third_party/cares/cares/src/lib/util/ares_iface_ips.c +628 -0
- data/third_party/cares/cares/src/lib/util/ares_iface_ips.h +139 -0
- data/third_party/cares/cares/src/lib/util/ares_math.c +158 -0
- data/third_party/cares/cares/src/lib/util/ares_math.h +45 -0
- data/third_party/cares/cares/src/lib/util/ares_rand.c +389 -0
- data/third_party/cares/cares/src/lib/util/ares_rand.h +36 -0
- data/third_party/cares/cares/src/lib/util/ares_threads.c +614 -0
- data/third_party/cares/cares/src/lib/util/ares_threads.h +60 -0
- data/third_party/cares/cares/src/lib/util/ares_time.h +48 -0
- data/third_party/cares/cares/src/lib/util/ares_timeval.c +95 -0
- data/third_party/cares/cares/src/lib/util/ares_uri.c +1626 -0
- data/third_party/cares/cares/src/lib/util/ares_uri.h +252 -0
- data/third_party/cares/cares/src/lib/windows_port.c +16 -9
- metadata +121 -49
- data/src/core/util/ring_buffer.h +0 -122
- data/third_party/cares/cares/include/ares_rules.h +0 -125
- data/third_party/cares/cares/src/lib/ares__addrinfo2hostent.c +0 -266
- data/third_party/cares/cares/src/lib/ares__addrinfo_localhost.c +0 -240
- data/third_party/cares/cares/src/lib/ares__close_sockets.c +0 -61
- data/third_party/cares/cares/src/lib/ares__get_hostent.c +0 -260
- data/third_party/cares/cares/src/lib/ares__parse_into_addrinfo.c +0 -229
- data/third_party/cares/cares/src/lib/ares__read_line.c +0 -73
- data/third_party/cares/cares/src/lib/ares__readaddrinfo.c +0 -258
- data/third_party/cares/cares/src/lib/ares__sortaddrinfo.c +0 -507
- data/third_party/cares/cares/src/lib/ares__timeval.c +0 -111
- data/third_party/cares/cares/src/lib/ares_create_query.c +0 -197
- data/third_party/cares/cares/src/lib/ares_expand_name.c +0 -311
- data/third_party/cares/cares/src/lib/ares_expand_string.c +0 -67
- data/third_party/cares/cares/src/lib/ares_fds.c +0 -59
- data/third_party/cares/cares/src/lib/ares_getsock.c +0 -66
- data/third_party/cares/cares/src/lib/ares_iphlpapi.h +0 -221
- data/third_party/cares/cares/src/lib/ares_llist.c +0 -63
- data/third_party/cares/cares/src/lib/ares_llist.h +0 -39
- data/third_party/cares/cares/src/lib/ares_mkquery.c +0 -24
- data/third_party/cares/cares/src/lib/ares_nowarn.c +0 -260
- data/third_party/cares/cares/src/lib/ares_nowarn.h +0 -61
- data/third_party/cares/cares/src/lib/ares_parse_a_reply.c +0 -90
- data/third_party/cares/cares/src/lib/ares_parse_aaaa_reply.c +0 -92
- data/third_party/cares/cares/src/lib/ares_parse_caa_reply.c +0 -199
- data/third_party/cares/cares/src/lib/ares_parse_mx_reply.c +0 -164
- data/third_party/cares/cares/src/lib/ares_parse_naptr_reply.c +0 -183
- data/third_party/cares/cares/src/lib/ares_parse_ns_reply.c +0 -177
- data/third_party/cares/cares/src/lib/ares_parse_ptr_reply.c +0 -228
- data/third_party/cares/cares/src/lib/ares_parse_soa_reply.c +0 -179
- data/third_party/cares/cares/src/lib/ares_parse_srv_reply.c +0 -168
- data/third_party/cares/cares/src/lib/ares_parse_txt_reply.c +0 -214
- data/third_party/cares/cares/src/lib/ares_parse_uri_reply.c +0 -184
- data/third_party/cares/cares/src/lib/ares_platform.c +0 -11042
- data/third_party/cares/cares/src/lib/ares_platform.h +0 -43
- data/third_party/cares/cares/src/lib/ares_rand.c +0 -279
- data/third_party/cares/cares/src/lib/ares_strcasecmp.c +0 -66
- data/third_party/cares/cares/src/lib/ares_strcasecmp.h +0 -30
- data/third_party/cares/cares/src/lib/ares_strdup.c +0 -42
- data/third_party/cares/cares/src/lib/ares_strdup.h +0 -24
- data/third_party/cares/cares/src/lib/ares_strsplit.c +0 -94
- data/third_party/cares/cares/src/lib/ares_strsplit.h +0 -42
- data/third_party/cares/cares/src/lib/ares_writev.c +0 -79
- data/third_party/cares/cares/src/lib/ares_writev.h +0 -36
- data/third_party/cares/cares/src/lib/bitncmp.c +0 -59
- data/third_party/cares/cares/src/lib/bitncmp.h +0 -26
- data/third_party/cares/cares/src/lib/setup_once.h +0 -554
- data/third_party/cares/cares/src/tools/ares_getopt.h +0 -53
@@ -1,39 +1,31 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
* Copyright (
|
1
|
+
/* MIT License
|
2
|
+
*
|
3
|
+
* Copyright (c) 1998 Massachusetts Institute of Technology
|
4
|
+
* Copyright (c) 2010 Daniel Stenberg
|
5
|
+
*
|
6
|
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
7
|
+
* of this software and associated documentation files (the "Software"), to deal
|
8
|
+
* in the Software without restriction, including without limitation the rights
|
9
|
+
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10
|
+
* copies of the Software, and to permit persons to whom the Software is
|
11
|
+
* furnished to do so, subject to the following conditions:
|
4
12
|
*
|
5
|
-
*
|
6
|
-
*
|
7
|
-
*
|
8
|
-
*
|
9
|
-
*
|
10
|
-
*
|
11
|
-
*
|
12
|
-
*
|
13
|
-
*
|
14
|
-
*
|
15
|
-
*
|
13
|
+
* The above copyright notice and this permission notice (including the next
|
14
|
+
* paragraph) shall be included in all copies or substantial portions of the
|
15
|
+
* Software.
|
16
|
+
*
|
17
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
18
|
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
19
|
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
20
|
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
21
|
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
22
|
+
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
23
|
+
* SOFTWARE.
|
24
|
+
*
|
25
|
+
* SPDX-License-Identifier: MIT
|
16
26
|
*/
|
17
27
|
|
18
|
-
#include "
|
19
|
-
|
20
|
-
#ifdef HAVE_SYS_UIO_H
|
21
|
-
# include <sys/uio.h>
|
22
|
-
#endif
|
23
|
-
#ifdef HAVE_NETINET_IN_H
|
24
|
-
# include <netinet/in.h>
|
25
|
-
#endif
|
26
|
-
#ifdef HAVE_NETINET_TCP_H
|
27
|
-
# include <netinet/tcp.h>
|
28
|
-
#endif
|
29
|
-
#ifdef HAVE_NETDB_H
|
30
|
-
# include <netdb.h>
|
31
|
-
#endif
|
32
|
-
#ifdef HAVE_ARPA_INET_H
|
33
|
-
# include <arpa/inet.h>
|
34
|
-
#endif
|
35
|
-
|
36
|
-
#include "ares_nameser.h"
|
28
|
+
#include "ares_private.h"
|
37
29
|
|
38
30
|
#ifdef HAVE_STRINGS_H
|
39
31
|
# include <strings.h>
|
@@ -44,1544 +36,1407 @@
|
|
44
36
|
#ifdef NETWARE
|
45
37
|
# include <sys/filio.h>
|
46
38
|
#endif
|
39
|
+
#ifdef HAVE_STDINT_H
|
40
|
+
# include <stdint.h>
|
41
|
+
#endif
|
47
42
|
|
48
43
|
#include <assert.h>
|
49
44
|
#include <fcntl.h>
|
50
45
|
#include <limits.h>
|
51
46
|
|
52
|
-
#include "ares.h"
|
53
|
-
#include "ares_dns.h"
|
54
|
-
#include "ares_nowarn.h"
|
55
|
-
#include "ares_private.h"
|
56
|
-
|
57
47
|
|
58
|
-
static
|
59
|
-
static
|
60
|
-
|
61
|
-
static
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
static
|
74
|
-
|
75
|
-
static void
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
static
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
48
|
+
static void timeadd(ares_timeval_t *now, size_t millisecs);
|
49
|
+
static ares_status_t process_write(ares_channel_t *channel,
|
50
|
+
ares_socket_t write_fd);
|
51
|
+
static ares_status_t process_read(ares_channel_t *channel,
|
52
|
+
ares_socket_t read_fd,
|
53
|
+
const ares_timeval_t *now);
|
54
|
+
static ares_status_t process_timeouts(ares_channel_t *channel,
|
55
|
+
const ares_timeval_t *now);
|
56
|
+
static ares_status_t process_answer(ares_channel_t *channel,
|
57
|
+
const unsigned char *abuf, size_t alen,
|
58
|
+
ares_conn_t *conn,
|
59
|
+
const ares_timeval_t *now,
|
60
|
+
ares_array_t **requeue);
|
61
|
+
static void handle_conn_error(ares_conn_t *conn, ares_bool_t critical_failure,
|
62
|
+
ares_status_t failure_status);
|
63
|
+
static ares_bool_t same_questions(const ares_query_t *query,
|
64
|
+
const ares_dns_record_t *arec);
|
65
|
+
static void end_query(ares_channel_t *channel, ares_server_t *server,
|
66
|
+
ares_query_t *query, ares_status_t status,
|
67
|
+
const ares_dns_record_t *dnsrec);
|
68
|
+
|
69
|
+
static void ares_query_remove_from_conn(ares_query_t *query)
|
70
|
+
{
|
71
|
+
/* If its not part of a connection, it can't be tracked for timeouts either */
|
72
|
+
ares_slist_node_destroy(query->node_queries_by_timeout);
|
73
|
+
ares_llist_node_destroy(query->node_queries_to_conn);
|
74
|
+
query->node_queries_by_timeout = NULL;
|
75
|
+
query->node_queries_to_conn = NULL;
|
76
|
+
query->conn = NULL;
|
77
|
+
}
|
87
78
|
|
88
|
-
/*
|
89
|
-
|
90
|
-
|
79
|
+
/* Invoke the server state callback after a success or failure */
|
80
|
+
static void invoke_server_state_cb(const ares_server_t *server,
|
81
|
+
ares_bool_t success, int flags)
|
91
82
|
{
|
92
|
-
|
83
|
+
const ares_channel_t *channel = server->channel;
|
84
|
+
ares_buf_t *buf;
|
85
|
+
ares_status_t status;
|
86
|
+
char *server_string;
|
87
|
+
|
88
|
+
if (channel->server_state_cb == NULL) {
|
89
|
+
return;
|
90
|
+
}
|
93
91
|
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
92
|
+
buf = ares_buf_create();
|
93
|
+
if (buf == NULL) {
|
94
|
+
return; /* LCOV_EXCL_LINE: OutOfMemory */
|
95
|
+
}
|
98
96
|
|
99
|
-
|
100
|
-
|
97
|
+
status = ares_get_server_addr(server, buf);
|
98
|
+
if (status != ARES_SUCCESS) {
|
99
|
+
ares_buf_destroy(buf); /* LCOV_EXCL_LINE: OutOfMemory */
|
100
|
+
return; /* LCOV_EXCL_LINE: OutOfMemory */
|
101
|
+
}
|
102
|
+
|
103
|
+
server_string = ares_buf_finish_str(buf, NULL);
|
104
|
+
buf = NULL;
|
105
|
+
if (server_string == NULL) {
|
106
|
+
return; /* LCOV_EXCL_LINE: OutOfMemory */
|
107
|
+
}
|
108
|
+
|
109
|
+
channel->server_state_cb(server_string, success, flags,
|
110
|
+
channel->server_state_cb_data);
|
111
|
+
ares_free(server_string);
|
101
112
|
}
|
102
113
|
|
103
|
-
|
104
|
-
|
114
|
+
static void server_increment_failures(ares_server_t *server,
|
115
|
+
ares_bool_t used_tcp)
|
105
116
|
{
|
106
|
-
|
107
|
-
|
117
|
+
ares_slist_node_t *node;
|
118
|
+
const ares_channel_t *channel = server->channel;
|
119
|
+
ares_timeval_t next_retry_time;
|
108
120
|
|
109
|
-
|
110
|
-
|
111
|
-
|
121
|
+
node = ares_slist_node_find(channel->servers, server);
|
122
|
+
if (node == NULL) {
|
123
|
+
return; /* LCOV_EXCL_LINE: DefensiveCoding */
|
112
124
|
}
|
125
|
+
|
126
|
+
server->consec_failures++;
|
127
|
+
ares_slist_node_reinsert(node);
|
128
|
+
|
129
|
+
ares_tvnow(&next_retry_time);
|
130
|
+
timeadd(&next_retry_time, channel->server_retry_delay);
|
131
|
+
server->next_retry_time = next_retry_time;
|
132
|
+
|
133
|
+
invoke_server_state_cb(server, ARES_FALSE,
|
134
|
+
used_tcp == ARES_TRUE ? ARES_SERV_STATE_TCP
|
135
|
+
: ARES_SERV_STATE_UDP);
|
113
136
|
}
|
114
137
|
|
115
|
-
|
116
|
-
* generic process function
|
117
|
-
*/
|
118
|
-
static void processfds(ares_channel channel,
|
119
|
-
fd_set *read_fds, ares_socket_t read_fd,
|
120
|
-
fd_set *write_fds, ares_socket_t write_fd)
|
138
|
+
static void server_set_good(ares_server_t *server, ares_bool_t used_tcp)
|
121
139
|
{
|
122
|
-
|
140
|
+
ares_slist_node_t *node;
|
141
|
+
const ares_channel_t *channel = server->channel;
|
142
|
+
|
143
|
+
node = ares_slist_node_find(channel->servers, server);
|
144
|
+
if (node == NULL) {
|
145
|
+
return; /* LCOV_EXCL_LINE: DefensiveCoding */
|
146
|
+
}
|
147
|
+
|
148
|
+
if (server->consec_failures > 0) {
|
149
|
+
server->consec_failures = 0;
|
150
|
+
ares_slist_node_reinsert(node);
|
151
|
+
}
|
152
|
+
|
153
|
+
server->next_retry_time.sec = 0;
|
154
|
+
server->next_retry_time.usec = 0;
|
123
155
|
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
process_timeouts(channel, &now);
|
128
|
-
process_broken_connections(channel, &now);
|
156
|
+
invoke_server_state_cb(server, ARES_TRUE,
|
157
|
+
used_tcp == ARES_TRUE ? ARES_SERV_STATE_TCP
|
158
|
+
: ARES_SERV_STATE_UDP);
|
129
159
|
}
|
130
160
|
|
131
|
-
/*
|
132
|
-
|
133
|
-
|
134
|
-
void ares_process(ares_channel channel, fd_set *read_fds, fd_set *write_fds)
|
161
|
+
/* return true if now is exactly check time or later */
|
162
|
+
ares_bool_t ares_timedout(const ares_timeval_t *now,
|
163
|
+
const ares_timeval_t *check)
|
135
164
|
{
|
136
|
-
|
165
|
+
ares_int64_t secs = (now->sec - check->sec);
|
166
|
+
|
167
|
+
if (secs > 0) {
|
168
|
+
return ARES_TRUE; /* yes, timed out */
|
169
|
+
}
|
170
|
+
if (secs < 0) {
|
171
|
+
return ARES_FALSE; /* nope, not timed out */
|
172
|
+
}
|
173
|
+
|
174
|
+
/* if the full seconds were identical, check the sub second parts */
|
175
|
+
return ((ares_int64_t)now->usec - (ares_int64_t)check->usec) >= 0
|
176
|
+
? ARES_TRUE
|
177
|
+
: ARES_FALSE;
|
137
178
|
}
|
138
179
|
|
139
|
-
/*
|
140
|
-
|
141
|
-
*/
|
142
|
-
void ares_process_fd(ares_channel channel,
|
143
|
-
ares_socket_t read_fd, /* use ARES_SOCKET_BAD or valid
|
144
|
-
file descriptors */
|
145
|
-
ares_socket_t write_fd)
|
180
|
+
/* add the specific number of milliseconds to the time in the first argument */
|
181
|
+
static void timeadd(ares_timeval_t *now, size_t millisecs)
|
146
182
|
{
|
147
|
-
|
148
|
-
|
183
|
+
now->sec += (ares_int64_t)millisecs / 1000;
|
184
|
+
now->usec += (unsigned int)((millisecs % 1000) * 1000);
|
149
185
|
|
186
|
+
if (now->usec >= 1000000) {
|
187
|
+
now->sec += now->usec / 1000000;
|
188
|
+
now->usec %= 1000000;
|
189
|
+
}
|
190
|
+
}
|
150
191
|
|
151
|
-
|
152
|
-
*
|
153
|
-
|
154
|
-
*
|
155
|
-
* http://devrsrc1.external.hp.com/STKS/cgi-bin/man2html?
|
156
|
-
* manpage=/usr/share/man/man2.Z/send.2
|
157
|
-
*/
|
158
|
-
static int try_again(int errnum)
|
192
|
+
static ares_status_t ares_process_fds_nolock(ares_channel_t *channel,
|
193
|
+
const ares_fd_events_t *events,
|
194
|
+
size_t nevents, unsigned int flags)
|
159
195
|
{
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
196
|
+
ares_timeval_t now;
|
197
|
+
size_t i;
|
198
|
+
ares_status_t status = ARES_SUCCESS;
|
199
|
+
|
200
|
+
if (channel == NULL || (events == NULL && nevents != 0)) {
|
201
|
+
return ARES_EFORMERR; /* LCOV_EXCL_LINE: DefensiveCoding */
|
202
|
+
}
|
203
|
+
|
204
|
+
ares_tvnow(&now);
|
205
|
+
|
206
|
+
/* Process write events */
|
207
|
+
for (i = 0; i < nevents; i++) {
|
208
|
+
if (events[i].fd == ARES_SOCKET_BAD ||
|
209
|
+
!(events[i].events & ARES_FD_EVENT_WRITE)) {
|
210
|
+
continue;
|
211
|
+
}
|
212
|
+
status = process_write(channel, events[i].fd);
|
213
|
+
/* We only care about ENOMEM, anything else is handled via connection
|
214
|
+
* retries, etc */
|
215
|
+
if (status == ARES_ENOMEM) {
|
216
|
+
goto done;
|
217
|
+
}
|
218
|
+
}
|
219
|
+
|
220
|
+
/* Process read events */
|
221
|
+
for (i = 0; i < nevents; i++) {
|
222
|
+
if (events[i].fd == ARES_SOCKET_BAD ||
|
223
|
+
!(events[i].events & ARES_FD_EVENT_READ)) {
|
224
|
+
continue;
|
225
|
+
}
|
226
|
+
status = process_read(channel, events[i].fd, &now);
|
227
|
+
if (status == ARES_ENOMEM) {
|
228
|
+
goto done;
|
229
|
+
}
|
230
|
+
}
|
231
|
+
|
232
|
+
if (!(flags & ARES_PROCESS_FLAG_SKIP_NON_FD)) {
|
233
|
+
ares_check_cleanup_conns(channel);
|
234
|
+
status = process_timeouts(channel, &now);
|
235
|
+
if (status == ARES_ENOMEM) {
|
236
|
+
goto done;
|
173
237
|
}
|
174
|
-
|
238
|
+
}
|
239
|
+
|
240
|
+
done:
|
241
|
+
if (status == ARES_ENOMEM) {
|
242
|
+
return ARES_ENOMEM;
|
243
|
+
}
|
244
|
+
return ARES_SUCCESS;
|
175
245
|
}
|
176
246
|
|
177
|
-
|
247
|
+
ares_status_t ares_process_fds(ares_channel_t *channel,
|
248
|
+
const ares_fd_events_t *events, size_t nevents,
|
249
|
+
unsigned int flags)
|
178
250
|
{
|
179
|
-
|
180
|
-
return channel->sock_funcs->asendv(s, vec, len, channel->sock_func_cb_data);
|
251
|
+
ares_status_t status;
|
181
252
|
|
182
|
-
|
253
|
+
if (channel == NULL) {
|
254
|
+
return ARES_EFORMERR;
|
255
|
+
}
|
256
|
+
|
257
|
+
ares_channel_lock(channel);
|
258
|
+
status = ares_process_fds_nolock(channel, events, nevents, flags);
|
259
|
+
ares_channel_unlock(channel);
|
260
|
+
return status;
|
183
261
|
}
|
184
262
|
|
185
|
-
|
263
|
+
void ares_process_fd(ares_channel_t *channel, ares_socket_t read_fd,
|
264
|
+
ares_socket_t write_fd)
|
186
265
|
{
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
266
|
+
ares_fd_events_t events[2];
|
267
|
+
size_t nevents = 0;
|
268
|
+
|
269
|
+
memset(events, 0, sizeof(events));
|
270
|
+
|
271
|
+
if (read_fd != ARES_SOCKET_BAD) {
|
272
|
+
nevents++;
|
273
|
+
events[nevents - 1].fd = read_fd;
|
274
|
+
events[nevents - 1].events |= ARES_FD_EVENT_READ;
|
275
|
+
}
|
276
|
+
|
277
|
+
if (write_fd != ARES_SOCKET_BAD) {
|
278
|
+
if (write_fd != read_fd) {
|
279
|
+
nevents++;
|
193
280
|
}
|
194
|
-
|
281
|
+
events[nevents - 1].fd = write_fd;
|
282
|
+
events[nevents - 1].events |= ARES_FD_EVENT_WRITE;
|
283
|
+
}
|
284
|
+
|
285
|
+
ares_process_fds(channel, events, nevents, ARES_PROCESS_FLAG_NONE);
|
195
286
|
}
|
196
287
|
|
197
|
-
|
198
|
-
|
199
|
-
*/
|
200
|
-
static void write_tcp_data(ares_channel channel,
|
201
|
-
fd_set *write_fds,
|
202
|
-
ares_socket_t write_fd,
|
203
|
-
struct timeval *now)
|
288
|
+
static ares_socket_t *channel_socket_list(const ares_channel_t *channel,
|
289
|
+
size_t *num)
|
204
290
|
{
|
205
|
-
|
206
|
-
|
207
|
-
struct iovec *vec;
|
208
|
-
int i;
|
209
|
-
ares_ssize_t scount;
|
210
|
-
ares_ssize_t wcount;
|
211
|
-
size_t n;
|
212
|
-
/* From writev manpage: An implementation can advertise its limit by defining
|
213
|
-
IOV_MAX in <limits.h> or at run time via the return value from
|
214
|
-
sysconf(_SC_IOV_MAX). On modern Linux systems, the limit is 1024. Back in
|
215
|
-
Linux 2.0 days, this limit was 16. */
|
216
|
-
#if defined(IOV_MAX)
|
217
|
-
const size_t maxn = IOV_MAX; /* FreeBSD */
|
218
|
-
#elif defined(_SC_IOV_MAX)
|
219
|
-
const size_t maxn = sysconf(_SC_IOV_MAX); /* Linux */
|
220
|
-
#else
|
221
|
-
const size_t maxn = 16; /* Safe default */
|
222
|
-
#endif
|
291
|
+
ares_slist_node_t *snode;
|
292
|
+
ares_array_t *arr = ares_array_create(sizeof(ares_socket_t), NULL);
|
223
293
|
|
224
|
-
|
225
|
-
/* no possible action */
|
226
|
-
return;
|
294
|
+
*num = 0;
|
227
295
|
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
write_fd. */
|
232
|
-
server = &channel->servers[i];
|
233
|
-
if (!server->qhead || server->tcp_socket == ARES_SOCKET_BAD ||
|
234
|
-
server->is_broken)
|
235
|
-
continue;
|
296
|
+
if (arr == NULL) {
|
297
|
+
return NULL; /* LCOV_EXCL_LINE: OutOfMemory */
|
298
|
+
}
|
236
299
|
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
300
|
+
for (snode = ares_slist_node_first(channel->servers); snode != NULL;
|
301
|
+
snode = ares_slist_node_next(snode)) {
|
302
|
+
ares_server_t *server = ares_slist_node_val(snode);
|
303
|
+
ares_llist_node_t *node;
|
304
|
+
|
305
|
+
for (node = ares_llist_node_first(server->connections); node != NULL;
|
306
|
+
node = ares_llist_node_next(node)) {
|
307
|
+
const ares_conn_t *conn = ares_llist_node_val(node);
|
308
|
+
ares_socket_t *sptr;
|
309
|
+
ares_status_t status;
|
310
|
+
|
311
|
+
if (conn->fd == ARES_SOCKET_BAD) {
|
312
|
+
continue;
|
244
313
|
}
|
245
314
|
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
FD_CLR(server->tcp_socket, write_fds);
|
253
|
-
|
254
|
-
/* Count the number of send queue items. */
|
255
|
-
n = 0;
|
256
|
-
for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
|
257
|
-
n++;
|
258
|
-
|
259
|
-
/* Allocate iovecs so we can send all our data at once. */
|
260
|
-
vec = ares_malloc(n * sizeof(struct iovec));
|
261
|
-
if (vec)
|
262
|
-
{
|
263
|
-
/* Fill in the iovecs and send. */
|
264
|
-
n = 0;
|
265
|
-
for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
|
266
|
-
{
|
267
|
-
vec[n].iov_base = (char *) sendreq->data;
|
268
|
-
vec[n].iov_len = sendreq->len;
|
269
|
-
n++;
|
270
|
-
if(n >= maxn)
|
271
|
-
break;
|
272
|
-
}
|
273
|
-
wcount = socket_writev(channel, server->tcp_socket, vec, (int)n);
|
274
|
-
ares_free(vec);
|
275
|
-
if (wcount < 0)
|
276
|
-
{
|
277
|
-
if (!try_again(SOCKERRNO))
|
278
|
-
handle_error(channel, i, now);
|
279
|
-
continue;
|
280
|
-
}
|
281
|
-
|
282
|
-
/* Advance the send queue by as many bytes as we sent. */
|
283
|
-
advance_tcp_send_queue(channel, i, wcount);
|
284
|
-
}
|
285
|
-
else
|
286
|
-
{
|
287
|
-
/* Can't allocate iovecs; just send the first request. */
|
288
|
-
sendreq = server->qhead;
|
289
|
-
|
290
|
-
scount = socket_write(channel, server->tcp_socket, sendreq->data, sendreq->len);
|
291
|
-
if (scount < 0)
|
292
|
-
{
|
293
|
-
if (!try_again(SOCKERRNO))
|
294
|
-
handle_error(channel, i, now);
|
295
|
-
continue;
|
296
|
-
}
|
297
|
-
|
298
|
-
/* Advance the send queue by as many bytes as we sent. */
|
299
|
-
advance_tcp_send_queue(channel, i, scount);
|
300
|
-
}
|
315
|
+
status = ares_array_insert_last((void **)&sptr, arr);
|
316
|
+
if (status != ARES_SUCCESS) {
|
317
|
+
ares_array_destroy(arr); /* LCOV_EXCL_LINE: OutOfMemory */
|
318
|
+
return NULL; /* LCOV_EXCL_LINE: OutOfMemory */
|
319
|
+
}
|
320
|
+
*sptr = conn->fd;
|
301
321
|
}
|
322
|
+
}
|
323
|
+
|
324
|
+
return ares_array_finish(arr, num);
|
302
325
|
}
|
303
326
|
|
304
|
-
/*
|
305
|
-
|
306
|
-
|
327
|
+
/* Something interesting happened on the wire, or there was a timeout.
|
328
|
+
* See what's up and respond accordingly.
|
329
|
+
*/
|
330
|
+
void ares_process(ares_channel_t *channel, fd_set *read_fds, fd_set *write_fds)
|
307
331
|
{
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
332
|
+
size_t i;
|
333
|
+
size_t num_sockets;
|
334
|
+
ares_socket_t *socketlist;
|
335
|
+
ares_fd_events_t *events = NULL;
|
336
|
+
size_t nevents = 0;
|
337
|
+
|
338
|
+
if (channel == NULL) {
|
339
|
+
return;
|
340
|
+
}
|
341
|
+
|
342
|
+
ares_channel_lock(channel);
|
343
|
+
|
344
|
+
/* There is no good way to iterate across an fd_set, instead we must pull a
|
345
|
+
* list of all known fds, and iterate across that checking against the fd_set.
|
346
|
+
*/
|
347
|
+
socketlist = channel_socket_list(channel, &num_sockets);
|
348
|
+
|
349
|
+
/* Lets create an events array, maximum number is the number of sockets in
|
350
|
+
* the list, so we'll use that and just track entries with nevents */
|
351
|
+
if (num_sockets) {
|
352
|
+
events = ares_malloc_zero(sizeof(*events) * num_sockets);
|
353
|
+
if (events == NULL) {
|
354
|
+
goto done;
|
355
|
+
}
|
356
|
+
}
|
357
|
+
|
358
|
+
for (i = 0; i < num_sockets; i++) {
|
359
|
+
ares_bool_t had_read = ARES_FALSE;
|
360
|
+
if (read_fds && FD_ISSET(socketlist[i], read_fds)) {
|
361
|
+
nevents++;
|
362
|
+
events[nevents - 1].fd = socketlist[i];
|
363
|
+
events[nevents - 1].events |= ARES_FD_EVENT_READ;
|
364
|
+
had_read = ARES_TRUE;
|
325
365
|
}
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
366
|
+
if (write_fds && FD_ISSET(socketlist[i], write_fds)) {
|
367
|
+
if (!had_read) {
|
368
|
+
nevents++;
|
369
|
+
}
|
370
|
+
events[nevents - 1].fd = socketlist[i];
|
371
|
+
events[nevents - 1].events |= ARES_FD_EVENT_WRITE;
|
330
372
|
}
|
331
373
|
}
|
332
|
-
}
|
333
374
|
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
struct sockaddr *from,
|
340
|
-
ares_socklen_t *from_len)
|
341
|
-
{
|
342
|
-
if (channel->sock_funcs)
|
343
|
-
return channel->sock_funcs->arecvfrom(s, data, data_len,
|
344
|
-
flags, from, from_len,
|
345
|
-
channel->sock_func_cb_data);
|
346
|
-
|
347
|
-
#ifdef HAVE_RECVFROM
|
348
|
-
return recvfrom(s, data, data_len, flags, from, from_len);
|
349
|
-
#else
|
350
|
-
return sread(s, data, data_len);
|
351
|
-
#endif
|
375
|
+
done:
|
376
|
+
ares_process_fds_nolock(channel, events, nevents, ARES_PROCESS_FLAG_NONE);
|
377
|
+
ares_free(events);
|
378
|
+
ares_free(socketlist);
|
379
|
+
ares_channel_unlock(channel);
|
352
380
|
}
|
353
381
|
|
354
|
-
static
|
355
|
-
|
356
|
-
void * data,
|
357
|
-
size_t data_len)
|
382
|
+
static ares_status_t process_write(ares_channel_t *channel,
|
383
|
+
ares_socket_t write_fd)
|
358
384
|
{
|
359
|
-
|
360
|
-
|
361
|
-
channel->sock_func_cb_data);
|
385
|
+
ares_conn_t *conn = ares_conn_from_fd(channel, write_fd);
|
386
|
+
ares_status_t status;
|
362
387
|
|
363
|
-
|
388
|
+
if (conn == NULL) {
|
389
|
+
return ARES_SUCCESS;
|
390
|
+
}
|
391
|
+
|
392
|
+
/* Mark as connected if we got here and TFO Initial not set */
|
393
|
+
if (!(conn->flags & ARES_CONN_FLAG_TFO_INITIAL)) {
|
394
|
+
conn->state_flags |= ARES_CONN_STATE_CONNECTED;
|
395
|
+
}
|
396
|
+
|
397
|
+
status = ares_conn_flush(conn);
|
398
|
+
if (status != ARES_SUCCESS) {
|
399
|
+
handle_conn_error(conn, ARES_TRUE, status);
|
400
|
+
}
|
401
|
+
return status;
|
364
402
|
}
|
365
403
|
|
366
|
-
|
367
|
-
* allocate a buffer if we finish reading the length word, and process
|
368
|
-
* a packet if we finish reading one.
|
369
|
-
*/
|
370
|
-
static void read_tcp_data(ares_channel channel, fd_set *read_fds,
|
371
|
-
ares_socket_t read_fd, struct timeval *now)
|
404
|
+
void ares_process_pending_write(ares_channel_t *channel)
|
372
405
|
{
|
373
|
-
|
374
|
-
int i;
|
375
|
-
ares_ssize_t count;
|
406
|
+
ares_slist_node_t *node;
|
376
407
|
|
377
|
-
if
|
378
|
-
/* no possible action */
|
408
|
+
if (channel == NULL) {
|
379
409
|
return;
|
410
|
+
}
|
380
411
|
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
continue;
|
412
|
+
ares_channel_lock(channel);
|
413
|
+
if (!channel->notify_pending_write) {
|
414
|
+
ares_channel_unlock(channel);
|
415
|
+
return;
|
416
|
+
}
|
387
417
|
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
418
|
+
/* Set as untriggerd before calling into ares_conn_flush(), this is
|
419
|
+
* because its possible ares_conn_flush() might cause additional data to
|
420
|
+
* be enqueued if there is some form of exception so it will need to recurse.
|
421
|
+
*/
|
422
|
+
channel->notify_pending_write = ARES_FALSE;
|
423
|
+
|
424
|
+
for (node = ares_slist_node_first(channel->servers); node != NULL;
|
425
|
+
node = ares_slist_node_next(node)) {
|
426
|
+
ares_server_t *server = ares_slist_node_val(node);
|
427
|
+
ares_conn_t *conn = server->tcp_conn;
|
428
|
+
ares_status_t status;
|
429
|
+
|
430
|
+
if (conn == NULL) {
|
431
|
+
continue;
|
432
|
+
}
|
396
433
|
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
* disastrous, but is likely to result in extra system calls and
|
402
|
-
* confusion. */
|
403
|
-
FD_CLR(server->tcp_socket, read_fds);
|
404
|
-
|
405
|
-
if (server->tcp_lenbuf_pos != 2)
|
406
|
-
{
|
407
|
-
/* We haven't yet read a length word, so read that (or
|
408
|
-
* what's left to read of it).
|
409
|
-
*/
|
410
|
-
count = socket_recv(channel, server->tcp_socket,
|
411
|
-
server->tcp_lenbuf + server->tcp_lenbuf_pos,
|
412
|
-
2 - server->tcp_lenbuf_pos);
|
413
|
-
if (count <= 0)
|
414
|
-
{
|
415
|
-
if (!(count == -1 && try_again(SOCKERRNO)))
|
416
|
-
handle_error(channel, i, now);
|
417
|
-
continue;
|
418
|
-
}
|
419
|
-
|
420
|
-
server->tcp_lenbuf_pos += (int)count;
|
421
|
-
if (server->tcp_lenbuf_pos == 2)
|
422
|
-
{
|
423
|
-
/* We finished reading the length word. Decode the
|
424
|
-
* length and allocate a buffer for the data.
|
425
|
-
*/
|
426
|
-
server->tcp_length = server->tcp_lenbuf[0] << 8
|
427
|
-
| server->tcp_lenbuf[1];
|
428
|
-
server->tcp_buffer = ares_malloc(server->tcp_length);
|
429
|
-
if (!server->tcp_buffer) {
|
430
|
-
handle_error(channel, i, now);
|
431
|
-
return; /* bail out on malloc failure. TODO: make this
|
432
|
-
function return error codes */
|
433
|
-
}
|
434
|
-
server->tcp_buffer_pos = 0;
|
435
|
-
}
|
436
|
-
}
|
437
|
-
else
|
438
|
-
{
|
439
|
-
/* Read data into the allocated buffer. */
|
440
|
-
count = socket_recv(channel, server->tcp_socket,
|
441
|
-
server->tcp_buffer + server->tcp_buffer_pos,
|
442
|
-
server->tcp_length - server->tcp_buffer_pos);
|
443
|
-
if (count <= 0)
|
444
|
-
{
|
445
|
-
if (!(count == -1 && try_again(SOCKERRNO)))
|
446
|
-
handle_error(channel, i, now);
|
447
|
-
continue;
|
448
|
-
}
|
449
|
-
|
450
|
-
server->tcp_buffer_pos += (int)count;
|
451
|
-
if (server->tcp_buffer_pos == server->tcp_length)
|
452
|
-
{
|
453
|
-
/* We finished reading this answer; process it and
|
454
|
-
* prepare to read another length word.
|
455
|
-
*/
|
456
|
-
process_answer(channel, server->tcp_buffer, server->tcp_length,
|
457
|
-
i, 1, now);
|
458
|
-
ares_free(server->tcp_buffer);
|
459
|
-
server->tcp_buffer = NULL;
|
460
|
-
server->tcp_lenbuf_pos = 0;
|
461
|
-
server->tcp_buffer_pos = 0;
|
462
|
-
}
|
463
|
-
}
|
434
|
+
/* Enqueue any pending data if there is any */
|
435
|
+
status = ares_conn_flush(conn);
|
436
|
+
if (status != ARES_SUCCESS) {
|
437
|
+
handle_conn_error(conn, ARES_TRUE, status);
|
464
438
|
}
|
439
|
+
}
|
440
|
+
|
441
|
+
ares_channel_unlock(channel);
|
465
442
|
}
|
466
443
|
|
467
|
-
|
468
|
-
static void read_udp_packets(ares_channel channel, fd_set *read_fds,
|
469
|
-
ares_socket_t read_fd, struct timeval *now)
|
444
|
+
static ares_status_t read_conn_packets(ares_conn_t *conn)
|
470
445
|
{
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
446
|
+
ares_bool_t read_again;
|
447
|
+
ares_conn_err_t err;
|
448
|
+
const ares_channel_t *channel = conn->server->channel;
|
449
|
+
|
450
|
+
do {
|
451
|
+
size_t count;
|
452
|
+
size_t len = 65535;
|
453
|
+
unsigned char *ptr;
|
454
|
+
size_t start_len = ares_buf_len(conn->in_buf);
|
455
|
+
|
456
|
+
/* If UDP, lets write out a placeholder for the length indicator */
|
457
|
+
if (!(conn->flags & ARES_CONN_FLAG_TCP) &&
|
458
|
+
ares_buf_append_be16(conn->in_buf, 0) != ARES_SUCCESS) {
|
459
|
+
handle_conn_error(conn, ARES_FALSE /* not critical to connection */,
|
460
|
+
ARES_SUCCESS);
|
461
|
+
return ARES_ENOMEM;
|
462
|
+
}
|
483
463
|
|
484
|
-
|
485
|
-
|
486
|
-
return;
|
464
|
+
/* Get a buffer of sufficient size */
|
465
|
+
ptr = ares_buf_append_start(conn->in_buf, &len);
|
487
466
|
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
467
|
+
if (ptr == NULL) {
|
468
|
+
handle_conn_error(conn, ARES_FALSE /* not critical to connection */,
|
469
|
+
ARES_SUCCESS);
|
470
|
+
return ARES_ENOMEM;
|
471
|
+
}
|
492
472
|
|
493
|
-
|
494
|
-
|
473
|
+
/* Read from socket */
|
474
|
+
err = ares_conn_read(conn, ptr, len, &count);
|
495
475
|
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
else {
|
501
|
-
if(server->udp_socket != read_fd)
|
502
|
-
continue;
|
476
|
+
if (err != ARES_CONN_ERR_SUCCESS) {
|
477
|
+
ares_buf_append_finish(conn->in_buf, 0);
|
478
|
+
if (!(conn->flags & ARES_CONN_FLAG_TCP)) {
|
479
|
+
ares_buf_set_length(conn->in_buf, start_len);
|
503
480
|
}
|
481
|
+
break;
|
482
|
+
}
|
504
483
|
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
do {
|
516
|
-
if (server->udp_socket == ARES_SOCKET_BAD) {
|
517
|
-
read_len = -1;
|
518
|
-
} else {
|
519
|
-
if (server->addr.family == AF_INET) {
|
520
|
-
fromlen = sizeof(from.sa4);
|
521
|
-
} else {
|
522
|
-
fromlen = sizeof(from.sa6);
|
523
|
-
}
|
524
|
-
read_len = socket_recvfrom(channel, server->udp_socket, (void *)buf,
|
525
|
-
sizeof(buf), 0, &from.sa, &fromlen);
|
526
|
-
}
|
527
|
-
|
528
|
-
if (read_len == 0) {
|
529
|
-
/* UDP is connectionless, so result code of 0 is a 0-length UDP
|
530
|
-
* packet, and not an indication the connection is closed like on
|
531
|
-
* tcp */
|
532
|
-
continue;
|
533
|
-
} else if (read_len < 0) {
|
534
|
-
if (try_again(SOCKERRNO))
|
535
|
-
continue;
|
536
|
-
|
537
|
-
handle_error(channel, i, now);
|
538
|
-
|
539
|
-
#ifdef HAVE_RECVFROM
|
540
|
-
} else if (!same_address(&from.sa, &server->addr)) {
|
541
|
-
/* The address the response comes from does not match the address we
|
542
|
-
* sent the request to. Someone may be attempting to perform a cache
|
543
|
-
* poisoning attack. */
|
544
|
-
continue;
|
545
|
-
#endif
|
484
|
+
/* Record amount of data read */
|
485
|
+
ares_buf_append_finish(conn->in_buf, count);
|
486
|
+
|
487
|
+
/* Only loop if sockets support non-blocking operation, and are using UDP
|
488
|
+
* or are using TCP and read the maximum buffer size */
|
489
|
+
read_again = ARES_FALSE;
|
490
|
+
if (channel->sock_funcs.flags & ARES_SOCKFUNC_FLAG_NONBLOCKING &&
|
491
|
+
(!(conn->flags & ARES_CONN_FLAG_TCP) || count == len)) {
|
492
|
+
read_again = ARES_TRUE;
|
493
|
+
}
|
546
494
|
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
495
|
+
/* If UDP, overwrite length */
|
496
|
+
if (!(conn->flags & ARES_CONN_FLAG_TCP)) {
|
497
|
+
len = ares_buf_len(conn->in_buf);
|
498
|
+
ares_buf_set_length(conn->in_buf, start_len);
|
499
|
+
ares_buf_append_be16(conn->in_buf, (unsigned short)count);
|
500
|
+
ares_buf_set_length(conn->in_buf, len);
|
551
501
|
}
|
502
|
+
/* Try to read again only if *we* set up the socket, otherwise it may be
|
503
|
+
* a blocking socket and would cause recvfrom to hang. */
|
504
|
+
} while (read_again);
|
505
|
+
|
506
|
+
if (err != ARES_CONN_ERR_SUCCESS && err != ARES_CONN_ERR_WOULDBLOCK) {
|
507
|
+
handle_conn_error(conn, ARES_TRUE, ARES_ECONNREFUSED);
|
508
|
+
return ARES_ECONNREFUSED;
|
509
|
+
}
|
510
|
+
|
511
|
+
return ARES_SUCCESS;
|
552
512
|
}
|
553
513
|
|
554
|
-
/*
|
555
|
-
|
514
|
+
/* Simple data structure to store a query that needs to be requeued with
|
515
|
+
* optional server */
|
516
|
+
typedef struct {
|
517
|
+
unsigned short qid;
|
518
|
+
ares_server_t *server; /* optional */
|
519
|
+
} ares_requeue_t;
|
520
|
+
|
521
|
+
static ares_status_t ares_append_requeue(ares_array_t **requeue,
|
522
|
+
ares_query_t *query,
|
523
|
+
ares_server_t *server)
|
556
524
|
{
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
{
|
572
|
-
query = list_node->data;
|
573
|
-
list_node = list_node->next; /* in case the query gets deleted */
|
574
|
-
if (query->timeout.tv_sec && ares__timedout(now, &query->timeout))
|
575
|
-
{
|
576
|
-
query->error_status = ARES_ETIMEOUT;
|
577
|
-
++query->timeouts;
|
578
|
-
next_server(channel, query, now);
|
579
|
-
}
|
580
|
-
}
|
581
|
-
}
|
582
|
-
channel->last_timeout_processed = now->tv_sec;
|
525
|
+
ares_requeue_t entry;
|
526
|
+
|
527
|
+
if (*requeue == NULL) {
|
528
|
+
*requeue = ares_array_create(sizeof(ares_requeue_t), NULL);
|
529
|
+
if (*requeue == NULL) {
|
530
|
+
return ARES_ENOMEM;
|
531
|
+
}
|
532
|
+
}
|
533
|
+
|
534
|
+
ares_query_remove_from_conn(query);
|
535
|
+
|
536
|
+
entry.qid = query->qid;
|
537
|
+
entry.server = server;
|
538
|
+
return ares_array_insertdata_last(*requeue, &entry);
|
583
539
|
}
|
584
540
|
|
585
|
-
|
586
|
-
static void process_answer(ares_channel channel, unsigned char *abuf,
|
587
|
-
int alen, int whichserver, int tcp,
|
588
|
-
struct timeval *now)
|
541
|
+
static ares_status_t read_answers(ares_conn_t *conn, const ares_timeval_t *now)
|
589
542
|
{
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
543
|
+
ares_status_t status;
|
544
|
+
ares_channel_t *channel = conn->server->channel;
|
545
|
+
ares_array_t *requeue = NULL;
|
546
|
+
|
547
|
+
/* Process all queued answers */
|
548
|
+
while (1) {
|
549
|
+
unsigned short dns_len = 0;
|
550
|
+
const unsigned char *data = NULL;
|
551
|
+
size_t data_len = 0;
|
552
|
+
|
553
|
+
/* Tag so we can roll back */
|
554
|
+
ares_buf_tag(conn->in_buf);
|
555
|
+
|
556
|
+
/* Read length indicator */
|
557
|
+
status = ares_buf_fetch_be16(conn->in_buf, &dns_len);
|
558
|
+
if (status != ARES_SUCCESS) {
|
559
|
+
ares_buf_tag_rollback(conn->in_buf);
|
560
|
+
break;
|
561
|
+
}
|
600
562
|
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
563
|
+
/* Not enough data for a full response yet */
|
564
|
+
status = ares_buf_consume(conn->in_buf, dns_len);
|
565
|
+
if (status != ARES_SUCCESS) {
|
566
|
+
ares_buf_tag_rollback(conn->in_buf);
|
567
|
+
break;
|
568
|
+
}
|
605
569
|
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
*/
|
612
|
-
query = NULL;
|
613
|
-
list_head = &(channel->queries_by_qid[id % ARES_QID_TABLE_SIZE]);
|
614
|
-
for (list_node = list_head->next; list_node != list_head;
|
615
|
-
list_node = list_node->next)
|
616
|
-
{
|
617
|
-
struct query *q = list_node->data;
|
618
|
-
if ((q->qid == id) && same_questions(q->qbuf, q->qlen, abuf, alen))
|
619
|
-
{
|
620
|
-
query = q;
|
621
|
-
break;
|
622
|
-
}
|
570
|
+
/* Can't fail except for misuse */
|
571
|
+
data = ares_buf_tag_fetch(conn->in_buf, &data_len);
|
572
|
+
if (data == NULL || data_len < 2) {
|
573
|
+
ares_buf_tag_clear(conn->in_buf);
|
574
|
+
break;
|
623
575
|
}
|
624
|
-
if (!query)
|
625
|
-
return;
|
626
576
|
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
* without EDNS enabled. */
|
631
|
-
if (channel->flags & ARES_FLAG_EDNS)
|
632
|
-
{
|
633
|
-
packetsz = channel->ednspsz;
|
634
|
-
if (rcode == FORMERR && has_opt_rr(abuf, alen) != 1)
|
635
|
-
{
|
636
|
-
int qlen = (query->tcplen - 2) - EDNSFIXEDSZ;
|
637
|
-
channel->flags ^= ARES_FLAG_EDNS;
|
638
|
-
query->tcplen -= EDNSFIXEDSZ;
|
639
|
-
query->qlen -= EDNSFIXEDSZ;
|
640
|
-
query->tcpbuf[0] = (unsigned char)((qlen >> 8) & 0xff);
|
641
|
-
query->tcpbuf[1] = (unsigned char)(qlen & 0xff);
|
642
|
-
DNS_HEADER_SET_ARCOUNT(query->tcpbuf + 2, 0);
|
643
|
-
query->tcpbuf = ares_realloc(query->tcpbuf, query->tcplen);
|
644
|
-
query->qbuf = query->tcpbuf + 2;
|
645
|
-
ares__send_query(channel, query, now);
|
646
|
-
return;
|
647
|
-
}
|
648
|
-
}
|
577
|
+
/* Strip off 2 bytes length */
|
578
|
+
data += 2;
|
579
|
+
data_len -= 2;
|
649
580
|
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
{
|
656
|
-
if (!query->using_tcp)
|
657
|
-
{
|
658
|
-
query->using_tcp = 1;
|
659
|
-
ares__send_query(channel, query, now);
|
660
|
-
}
|
661
|
-
return;
|
581
|
+
/* We finished reading this answer; process it */
|
582
|
+
status = process_answer(channel, data, data_len, conn, now, &requeue);
|
583
|
+
if (status != ARES_SUCCESS) {
|
584
|
+
handle_conn_error(conn, ARES_TRUE, status);
|
585
|
+
goto cleanup;
|
662
586
|
}
|
663
587
|
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
if (alen > packetsz && !tcp)
|
668
|
-
alen = packetsz;
|
588
|
+
/* Since we processed the answer, clear the tag so space can be reclaimed */
|
589
|
+
ares_buf_tag_clear(conn->in_buf);
|
590
|
+
}
|
669
591
|
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
}
|
592
|
+
cleanup:
|
593
|
+
|
594
|
+
/* Flush requeue */
|
595
|
+
while (ares_array_len(requeue) > 0) {
|
596
|
+
ares_query_t *query;
|
597
|
+
ares_requeue_t entry;
|
598
|
+
ares_status_t internal_status;
|
599
|
+
|
600
|
+
internal_status = ares_array_claim_at(&entry, sizeof(entry), requeue, 0);
|
601
|
+
if (internal_status != ARES_SUCCESS) {
|
602
|
+
break;
|
682
603
|
}
|
683
604
|
|
684
|
-
|
685
|
-
|
605
|
+
/* Query disappeared */
|
606
|
+
query = ares_htable_szvp_get_direct(channel->queries_by_qid, entry.qid);
|
607
|
+
if (query == NULL) {
|
608
|
+
continue;
|
609
|
+
}
|
686
610
|
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
int i;
|
692
|
-
for (i = 0; i < channel->nservers; i++)
|
693
|
-
{
|
694
|
-
struct server_state *server = &channel->servers[i];
|
695
|
-
if (server->is_broken)
|
696
|
-
{
|
697
|
-
handle_error(channel, i, now);
|
698
|
-
}
|
611
|
+
internal_status = ares_send_query(entry.server, query, now);
|
612
|
+
/* We only care about ARES_ENOMEM */
|
613
|
+
if (internal_status == ARES_ENOMEM) {
|
614
|
+
status = ARES_ENOMEM;
|
699
615
|
}
|
616
|
+
}
|
617
|
+
ares_array_destroy(requeue);
|
618
|
+
|
619
|
+
return status;
|
700
620
|
}
|
701
621
|
|
702
|
-
|
703
|
-
|
704
|
-
|
622
|
+
static ares_status_t process_read(ares_channel_t *channel,
|
623
|
+
ares_socket_t read_fd,
|
624
|
+
const ares_timeval_t *now)
|
705
625
|
{
|
706
|
-
|
707
|
-
|
708
|
-
struct list_node old_a = *head_a;
|
709
|
-
struct list_node old_b = *head_b;
|
626
|
+
ares_conn_t *conn = ares_conn_from_fd(channel, read_fd);
|
627
|
+
ares_status_t status;
|
710
628
|
|
711
|
-
if (
|
712
|
-
|
713
|
-
} else {
|
714
|
-
*head_b = old_a;
|
715
|
-
old_a.next->prev = head_b;
|
716
|
-
old_a.prev->next = head_b;
|
629
|
+
if (conn == NULL) {
|
630
|
+
return ARES_SUCCESS;
|
717
631
|
}
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
632
|
+
|
633
|
+
/* TODO: There might be a potential issue here where there was a read that
|
634
|
+
* read some data, then looped and read again and got a disconnect.
|
635
|
+
* Right now, that would cause a resend instead of processing the data
|
636
|
+
* we have. This is fairly unlikely to occur due to only looping if
|
637
|
+
* a full buffer of 65535 bytes was read. */
|
638
|
+
status = read_conn_packets(conn);
|
639
|
+
|
640
|
+
if (status != ARES_SUCCESS) {
|
641
|
+
return status;
|
724
642
|
}
|
643
|
+
|
644
|
+
return read_answers(conn, now);
|
725
645
|
}
|
726
646
|
|
727
|
-
|
728
|
-
|
647
|
+
/* If any queries have timed out, note the timeout and move them on. */
|
648
|
+
static ares_status_t process_timeouts(ares_channel_t *channel,
|
649
|
+
const ares_timeval_t *now)
|
729
650
|
{
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
651
|
+
ares_slist_node_t *node;
|
652
|
+
ares_status_t status = ARES_SUCCESS;
|
653
|
+
|
654
|
+
/* Just keep popping off the first as this list will re-sort as things come
|
655
|
+
* and go. We don't want to try to rely on 'next' as some operation might
|
656
|
+
* cause a cleanup of that pointer and would become invalid */
|
657
|
+
while ((node = ares_slist_node_first(channel->queries_by_timeout)) != NULL) {
|
658
|
+
ares_query_t *query = ares_slist_node_val(node);
|
659
|
+
ares_conn_t *conn;
|
660
|
+
|
661
|
+
/* Since this is sorted, as soon as we hit a query that isn't timed out,
|
662
|
+
* break */
|
663
|
+
if (!ares_timedout(now, &query->timeout)) {
|
664
|
+
break;
|
665
|
+
}
|
736
666
|
|
737
|
-
|
738
|
-
ares__close_sockets(channel, server);
|
667
|
+
query->timeouts++;
|
739
668
|
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
ares__init_list_head(&list_head);
|
747
|
-
swap_lists(&list_head, &(server->queries_to_server));
|
748
|
-
for (list_node = list_head.next; list_node != &list_head; )
|
749
|
-
{
|
750
|
-
query = list_node->data;
|
751
|
-
list_node = list_node->next; /* in case the query gets deleted */
|
752
|
-
assert(query->server == whichserver);
|
753
|
-
skip_server(channel, query, whichserver);
|
754
|
-
next_server(channel, query, now);
|
669
|
+
conn = query->conn;
|
670
|
+
server_increment_failures(conn->server, query->using_tcp);
|
671
|
+
status = ares_requeue_query(query, now, ARES_ETIMEOUT, ARES_TRUE, NULL,
|
672
|
+
NULL);
|
673
|
+
if (status == ARES_ENOMEM) {
|
674
|
+
goto done;
|
755
675
|
}
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
676
|
+
}
|
677
|
+
done:
|
678
|
+
if (status == ARES_ENOMEM) {
|
679
|
+
return ARES_ENOMEM;
|
680
|
+
}
|
681
|
+
return ARES_SUCCESS;
|
760
682
|
}
|
761
683
|
|
762
|
-
static
|
763
|
-
int whichserver)
|
684
|
+
static ares_status_t rewrite_without_edns(ares_query_t *query)
|
764
685
|
{
|
765
|
-
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
{
|
775
|
-
query->
|
686
|
+
ares_status_t status = ARES_SUCCESS;
|
687
|
+
size_t i;
|
688
|
+
ares_bool_t found_opt_rr = ARES_FALSE;
|
689
|
+
|
690
|
+
/* Find and remove the OPT RR record */
|
691
|
+
for (i = 0; i < ares_dns_record_rr_cnt(query->query, ARES_SECTION_ADDITIONAL);
|
692
|
+
i++) {
|
693
|
+
const ares_dns_rr_t *rr;
|
694
|
+
rr = ares_dns_record_rr_get(query->query, ARES_SECTION_ADDITIONAL, i);
|
695
|
+
if (ares_dns_rr_get_type(rr) == ARES_REC_TYPE_OPT) {
|
696
|
+
ares_dns_record_rr_del(query->query, ARES_SECTION_ADDITIONAL, i);
|
697
|
+
found_opt_rr = ARES_TRUE;
|
698
|
+
break;
|
776
699
|
}
|
777
|
-
}
|
700
|
+
}
|
778
701
|
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
* servers to try. In total, we need to do channel->nservers * channel->tries
|
784
|
-
* attempts. Use query->try to remember how many times we already attempted
|
785
|
-
* this query. Use modular arithmetic to find the next server to try. */
|
786
|
-
while (++(query->try_count) < (channel->nservers * channel->tries))
|
787
|
-
{
|
788
|
-
struct server_state *server;
|
789
|
-
|
790
|
-
/* Move on to the next server. */
|
791
|
-
query->server = (query->server + 1) % channel->nservers;
|
792
|
-
server = &channel->servers[query->server];
|
793
|
-
|
794
|
-
/* We don't want to use this server if (1) we decided this connection is
|
795
|
-
* broken, and thus about to be closed, (2) we've decided to skip this
|
796
|
-
* server because of earlier errors we encountered, or (3) we already
|
797
|
-
* sent this query over this exact connection.
|
798
|
-
*/
|
799
|
-
if (!server->is_broken &&
|
800
|
-
!query->server_info[query->server].skip_server &&
|
801
|
-
!(query->using_tcp &&
|
802
|
-
(query->server_info[query->server].tcp_connection_generation ==
|
803
|
-
server->tcp_connection_generation)))
|
804
|
-
{
|
805
|
-
ares__send_query(channel, query, now);
|
806
|
-
return;
|
807
|
-
}
|
808
|
-
|
809
|
-
/* You might think that with TCP we only need one try. However, even
|
810
|
-
* when using TCP, servers can time-out our connection just as we're
|
811
|
-
* sending a request, or close our connection because they die, or never
|
812
|
-
* send us a reply because they get wedged or tickle a bug that drops
|
813
|
-
* our request.
|
814
|
-
*/
|
815
|
-
}
|
702
|
+
if (!found_opt_rr) {
|
703
|
+
status = ARES_EFORMERR;
|
704
|
+
goto done;
|
705
|
+
}
|
816
706
|
|
817
|
-
|
818
|
-
|
707
|
+
done:
|
708
|
+
return status;
|
819
709
|
}
|
820
710
|
|
821
|
-
|
822
|
-
|
711
|
+
static ares_bool_t issue_might_be_edns(const ares_dns_record_t *req,
|
712
|
+
const ares_dns_record_t *rsp)
|
823
713
|
{
|
824
|
-
|
825
|
-
struct server_state *server;
|
826
|
-
int timeplus;
|
827
|
-
|
828
|
-
server = &channel->servers[query->server];
|
829
|
-
if (query->using_tcp)
|
830
|
-
{
|
831
|
-
/* Make sure the TCP socket for this server is set up and queue
|
832
|
-
* a send request.
|
833
|
-
*/
|
834
|
-
if (server->tcp_socket == ARES_SOCKET_BAD)
|
835
|
-
{
|
836
|
-
if (open_tcp_socket(channel, server) == -1)
|
837
|
-
{
|
838
|
-
skip_server(channel, query, query->server);
|
839
|
-
next_server(channel, query, now);
|
840
|
-
return;
|
841
|
-
}
|
842
|
-
}
|
843
|
-
sendreq = ares_malloc(sizeof(struct send_request));
|
844
|
-
if (!sendreq)
|
845
|
-
{
|
846
|
-
end_query(channel, query, ARES_ENOMEM, NULL, 0);
|
847
|
-
return;
|
848
|
-
}
|
849
|
-
memset(sendreq, 0, sizeof(struct send_request));
|
850
|
-
/* To make the common case fast, we avoid copies by using the query's
|
851
|
-
* tcpbuf for as long as the query is alive. In the rare case where the
|
852
|
-
* query ends while it's queued for transmission, then we give the
|
853
|
-
* sendreq its own copy of the request packet and put it in
|
854
|
-
* sendreq->data_storage.
|
855
|
-
*/
|
856
|
-
sendreq->data_storage = NULL;
|
857
|
-
sendreq->data = query->tcpbuf;
|
858
|
-
sendreq->len = query->tcplen;
|
859
|
-
sendreq->owner_query = query;
|
860
|
-
sendreq->next = NULL;
|
861
|
-
if (server->qtail)
|
862
|
-
server->qtail->next = sendreq;
|
863
|
-
else
|
864
|
-
{
|
865
|
-
SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 1);
|
866
|
-
server->qhead = sendreq;
|
867
|
-
}
|
868
|
-
server->qtail = sendreq;
|
869
|
-
query->server_info[query->server].tcp_connection_generation =
|
870
|
-
server->tcp_connection_generation;
|
871
|
-
}
|
872
|
-
else
|
873
|
-
{
|
874
|
-
if (server->udp_socket == ARES_SOCKET_BAD)
|
875
|
-
{
|
876
|
-
if (open_udp_socket(channel, server) == -1)
|
877
|
-
{
|
878
|
-
skip_server(channel, query, query->server);
|
879
|
-
next_server(channel, query, now);
|
880
|
-
return;
|
881
|
-
}
|
882
|
-
}
|
883
|
-
if (socket_write(channel, server->udp_socket, query->qbuf, query->qlen) == -1)
|
884
|
-
{
|
885
|
-
/* FIXME: Handle EAGAIN here since it likely can happen. */
|
886
|
-
skip_server(channel, query, query->server);
|
887
|
-
next_server(channel, query, now);
|
888
|
-
return;
|
889
|
-
}
|
890
|
-
}
|
714
|
+
const ares_dns_rr_t *rr;
|
891
715
|
|
892
|
-
|
893
|
-
|
894
|
-
|
895
|
-
|
896
|
-
|
897
|
-
|
898
|
-
|
899
|
-
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
907
|
-
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
|
913
|
-
|
914
|
-
|
915
|
-
|
716
|
+
/* If we use EDNS and server answers with FORMERR without an OPT RR, the
|
717
|
+
* protocol extension is not understood by the responder. We must retry the
|
718
|
+
* query without EDNS enabled. */
|
719
|
+
if (ares_dns_record_get_rcode(rsp) != ARES_RCODE_FORMERR) {
|
720
|
+
return ARES_FALSE;
|
721
|
+
}
|
722
|
+
|
723
|
+
rr = ares_dns_get_opt_rr_const(req);
|
724
|
+
if (rr == NULL) {
|
725
|
+
/* We didn't send EDNS */
|
726
|
+
return ARES_FALSE;
|
727
|
+
}
|
728
|
+
|
729
|
+
if (ares_dns_get_opt_rr_const(rsp) == NULL) {
|
730
|
+
/* Spec says EDNS won't be echo'd back on non-supporting servers, so
|
731
|
+
* retry without EDNS */
|
732
|
+
return ARES_TRUE;
|
733
|
+
}
|
734
|
+
|
735
|
+
/* As per issue #911 some non-compliant servers that do indeed support EDNS
|
736
|
+
* but don't support unrecognized option codes exist. At this point we
|
737
|
+
* expect them to have also returned an EDNS opt record, but we may remove
|
738
|
+
* that check in the future. Lets detect this situation if we're sending
|
739
|
+
* option codes */
|
740
|
+
if (ares_dns_rr_get_opt_cnt(rr, ARES_RR_OPT_OPTIONS) == 0) {
|
741
|
+
/* We didn't send any option codes */
|
742
|
+
return ARES_FALSE;
|
743
|
+
}
|
744
|
+
|
745
|
+
if (ares_dns_get_opt_rr_const(rsp) != NULL) {
|
746
|
+
/* At this time we're requiring the server to respond with EDNS opt
|
747
|
+
* records since that's what has been observed in the field. We might
|
748
|
+
* find in the future we have to remove this, who knows. Lets go
|
749
|
+
* ahead and force a retry without EDNS*/
|
750
|
+
return ARES_TRUE;
|
751
|
+
}
|
916
752
|
|
917
|
-
|
918
|
-
timeadd(&query->timeout, timeplus);
|
919
|
-
/* Keep track of queries bucketed by timeout, so we can process
|
920
|
-
* timeout events quickly.
|
921
|
-
*/
|
922
|
-
ares__remove_from_list(&(query->queries_by_timeout));
|
923
|
-
ares__insert_in_list(
|
924
|
-
&(query->queries_by_timeout),
|
925
|
-
&(channel->queries_by_timeout[query->timeout.tv_sec %
|
926
|
-
ARES_TIMEOUT_TABLE_SIZE]));
|
927
|
-
|
928
|
-
/* Keep track of queries bucketed by server, so we can process server
|
929
|
-
* errors quickly.
|
930
|
-
*/
|
931
|
-
ares__remove_from_list(&(query->queries_to_server));
|
932
|
-
ares__insert_in_list(&(query->queries_to_server),
|
933
|
-
&(server->queries_to_server));
|
753
|
+
return ARES_FALSE;
|
934
754
|
}
|
935
755
|
|
936
|
-
/*
|
937
|
-
*
|
938
|
-
*
|
939
|
-
*
|
940
|
-
|
941
|
-
|
942
|
-
|
756
|
+
/* Handle an answer from a server. This must NEVER cleanup the
|
757
|
+
* server connection! Return something other than ARES_SUCCESS to cause
|
758
|
+
* the connection to be terminated after this call. */
|
759
|
+
static ares_status_t process_answer(ares_channel_t *channel,
|
760
|
+
const unsigned char *abuf, size_t alen,
|
761
|
+
ares_conn_t *conn,
|
762
|
+
const ares_timeval_t *now,
|
763
|
+
ares_array_t **requeue)
|
943
764
|
{
|
944
|
-
|
765
|
+
ares_query_t *query;
|
766
|
+
/* Cache these as once ares_send_query() gets called, it may end up
|
767
|
+
* invalidating the connection all-together */
|
768
|
+
ares_server_t *server = conn->server;
|
769
|
+
ares_dns_record_t *rdnsrec = NULL;
|
770
|
+
ares_status_t status;
|
771
|
+
ares_bool_t is_cached = ARES_FALSE;
|
772
|
+
|
773
|
+
/* UDP can have 0-byte messages, drop them to the ground */
|
774
|
+
if (alen == 0) {
|
775
|
+
return ARES_SUCCESS;
|
776
|
+
}
|
945
777
|
|
946
|
-
|
778
|
+
/* Parse the response */
|
779
|
+
status = ares_dns_parse(abuf, alen, 0, &rdnsrec);
|
780
|
+
if (status != ARES_SUCCESS) {
|
781
|
+
/* Malformations are never accepted */
|
782
|
+
status = ARES_EBADRESP;
|
783
|
+
goto cleanup;
|
784
|
+
}
|
947
785
|
|
948
|
-
|
786
|
+
/* Find the query corresponding to this packet. The queries are
|
787
|
+
* hashed/bucketed by query id, so this lookup should be quick.
|
788
|
+
*/
|
789
|
+
query = ares_htable_szvp_get_direct(channel->queries_by_qid,
|
790
|
+
ares_dns_record_get_id(rdnsrec));
|
791
|
+
if (!query) {
|
792
|
+
/* We may have stopped listening for this query, that's ok */
|
793
|
+
status = ARES_SUCCESS;
|
794
|
+
goto cleanup;
|
795
|
+
}
|
949
796
|
|
950
|
-
/*
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
956
|
-
|
797
|
+
/* Both the query id and the questions must be the same. We will drop any
|
798
|
+
* replies that aren't for the same query as this is considered invalid. */
|
799
|
+
if (!same_questions(query, rdnsrec)) {
|
800
|
+
/* Possible qid conflict due to delayed response, that's ok */
|
801
|
+
status = ARES_SUCCESS;
|
802
|
+
goto cleanup;
|
803
|
+
}
|
957
804
|
|
958
|
-
|
805
|
+
/* Validate DNS cookie in response. This function may need to requeue the
|
806
|
+
* query. */
|
807
|
+
if (ares_cookie_validate(query, rdnsrec, conn, now, requeue)
|
808
|
+
!= ARES_SUCCESS) {
|
809
|
+
/* Drop response and return */
|
810
|
+
status = ARES_SUCCESS;
|
811
|
+
goto cleanup;
|
812
|
+
}
|
959
813
|
|
960
|
-
/*
|
961
|
-
|
962
|
-
|
814
|
+
/* At this point we know we've received an answer for this query, so we should
|
815
|
+
* remove it from the connection's queue so we can possibly invalidate the
|
816
|
+
* connection. Delay cleaning up the connection though as we may enqueue
|
817
|
+
* something new. */
|
818
|
+
ares_llist_node_destroy(query->node_queries_to_conn);
|
819
|
+
query->node_queries_to_conn = NULL;
|
820
|
+
|
821
|
+
/* There are old servers that don't understand EDNS at all, then some servers
|
822
|
+
* that have non-compliant implementations. Lets try to detect this sort
|
823
|
+
* of thing. */
|
824
|
+
if (issue_might_be_edns(query->query, rdnsrec)) {
|
825
|
+
status = rewrite_without_edns(query);
|
826
|
+
if (status != ARES_SUCCESS) {
|
827
|
+
end_query(channel, server, query, status, NULL);
|
828
|
+
goto cleanup;
|
829
|
+
}
|
963
830
|
|
964
|
-
|
831
|
+
/* Requeue to same server */
|
832
|
+
status = ares_append_requeue(requeue, query, server);
|
833
|
+
goto cleanup;
|
834
|
+
}
|
965
835
|
|
966
|
-
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
836
|
+
/* If we got a truncated UDP packet and are not ignoring truncation,
|
837
|
+
* don't accept the packet, and switch the query to TCP if we hadn't
|
838
|
+
* done so already.
|
839
|
+
*/
|
840
|
+
if (ares_dns_record_get_flags(rdnsrec) & ARES_FLAG_TC &&
|
841
|
+
!(conn->flags & ARES_CONN_FLAG_TCP) &&
|
842
|
+
!(channel->flags & ARES_FLAG_IGNTC)) {
|
843
|
+
query->using_tcp = ARES_TRUE;
|
844
|
+
status = ares_append_requeue(requeue, query, NULL);
|
845
|
+
/* Status will reflect success except on memory error, which is good since
|
846
|
+
* requeuing to TCP is ok */
|
847
|
+
goto cleanup;
|
848
|
+
}
|
973
849
|
|
974
|
-
|
850
|
+
/* If we aren't passing through all error packets, discard packets
|
851
|
+
* with SERVFAIL, NOTIMP, or REFUSED response codes.
|
852
|
+
*/
|
853
|
+
if (!(channel->flags & ARES_FLAG_NOCHECKRESP)) {
|
854
|
+
ares_dns_rcode_t rcode = ares_dns_record_get_rcode(rdnsrec);
|
855
|
+
if (rcode == ARES_RCODE_SERVFAIL || rcode == ARES_RCODE_NOTIMP ||
|
856
|
+
rcode == ARES_RCODE_REFUSED) {
|
857
|
+
switch (rcode) {
|
858
|
+
case ARES_RCODE_SERVFAIL:
|
859
|
+
status = ARES_ESERVFAIL;
|
860
|
+
break;
|
861
|
+
case ARES_RCODE_NOTIMP:
|
862
|
+
status = ARES_ENOTIMP;
|
863
|
+
break;
|
864
|
+
case ARES_RCODE_REFUSED:
|
865
|
+
status = ARES_EREFUSED;
|
866
|
+
break;
|
867
|
+
default:
|
868
|
+
break;
|
869
|
+
}
|
975
870
|
|
976
|
-
|
977
|
-
|
978
|
-
return IoctlSocket(sockfd, FIONBIO, flags);
|
871
|
+
server_increment_failures(server, query->using_tcp);
|
872
|
+
status = ares_requeue_query(query, now, status, ARES_TRUE, rdnsrec, requeue);
|
979
873
|
|
980
|
-
|
874
|
+
if (status != ARES_ENOMEM) {
|
875
|
+
/* Should any of these cause a connection termination?
|
876
|
+
* Maybe SERVER_FAILURE? */
|
877
|
+
status = ARES_SUCCESS;
|
878
|
+
}
|
879
|
+
goto cleanup;
|
880
|
+
}
|
881
|
+
}
|
981
882
|
|
982
|
-
/*
|
983
|
-
|
984
|
-
|
883
|
+
/* If cache insertion was successful, it took ownership. We ignore
|
884
|
+
* other cache insertion failures. */
|
885
|
+
if (ares_qcache_insert(channel, now, query, rdnsrec) == ARES_SUCCESS) {
|
886
|
+
is_cached = ARES_TRUE;
|
887
|
+
}
|
985
888
|
|
986
|
-
|
987
|
-
|
988
|
-
|
889
|
+
server_set_good(server, query->using_tcp);
|
890
|
+
end_query(channel, server, query, ARES_SUCCESS, rdnsrec);
|
891
|
+
|
892
|
+
status = ARES_SUCCESS;
|
893
|
+
|
894
|
+
cleanup:
|
895
|
+
/* Don't cleanup the cached pointer to the dns response */
|
896
|
+
if (!is_cached) {
|
897
|
+
ares_dns_record_destroy(rdnsrec);
|
898
|
+
}
|
899
|
+
|
900
|
+
return status;
|
989
901
|
}
|
990
902
|
|
991
|
-
|
992
|
-
|
993
|
-
* Linux kernel, NetBSD, FreeBSD and Darwin: default is off;
|
994
|
-
* Windows Vista and later: default is on;
|
995
|
-
* DragonFly BSD: acts like off, and dummy setting;
|
996
|
-
* OpenBSD and earlier Windows: unsupported.
|
997
|
-
* Linux: controlled by /proc/sys/net/ipv6/bindv6only.
|
998
|
-
*/
|
999
|
-
static void set_ipv6_v6only(ares_socket_t sockfd, int on)
|
903
|
+
static void handle_conn_error(ares_conn_t *conn, ares_bool_t critical_failure,
|
904
|
+
ares_status_t failure_status)
|
1000
905
|
{
|
1001
|
-
|
906
|
+
ares_server_t *server = conn->server;
|
907
|
+
|
908
|
+
/* Increment failures first before requeue so it is unlikely to requeue
|
909
|
+
* to the same server */
|
910
|
+
if (critical_failure) {
|
911
|
+
server_increment_failures(
|
912
|
+
server, (conn->flags & ARES_CONN_FLAG_TCP) ? ARES_TRUE : ARES_FALSE);
|
913
|
+
}
|
914
|
+
|
915
|
+
/* This will requeue any connections automatically */
|
916
|
+
ares_close_connection(conn, failure_status);
|
1002
917
|
}
|
1003
|
-
#else
|
1004
|
-
#define set_ipv6_v6only(s,v)
|
1005
|
-
#endif
|
1006
918
|
|
1007
|
-
|
919
|
+
/* Requeue query will normally call ares_send_query() but in some circumstances
|
920
|
+
* this needs to be delayed, so if requeue is not NULL, it will add the query
|
921
|
+
* to the queue instead */
|
922
|
+
ares_status_t ares_requeue_query(ares_query_t *query, const ares_timeval_t *now,
|
923
|
+
ares_status_t status,
|
924
|
+
ares_bool_t inc_try_count,
|
925
|
+
const ares_dns_record_t *dnsrec,
|
926
|
+
ares_array_t **requeue)
|
1008
927
|
{
|
1009
|
-
|
1010
|
-
|
1011
|
-
struct sockaddr_in sa4;
|
1012
|
-
struct sockaddr_in6 sa6;
|
1013
|
-
} local;
|
1014
|
-
|
1015
|
-
/* do not set options for user-managed sockets */
|
1016
|
-
if (channel->sock_funcs)
|
1017
|
-
return 0;
|
1018
|
-
|
1019
|
-
(void)setsocknonblock(s, TRUE);
|
1020
|
-
|
1021
|
-
#if defined(FD_CLOEXEC) && !defined(MSDOS)
|
1022
|
-
/* Configure the socket fd as close-on-exec. */
|
1023
|
-
if (fcntl(s, F_SETFD, FD_CLOEXEC) == -1)
|
1024
|
-
return -1; /* LCOV_EXCL_LINE */
|
1025
|
-
#endif
|
928
|
+
ares_channel_t *channel = query->channel;
|
929
|
+
size_t max_tries = ares_slist_len(channel->servers) * channel->tries;
|
1026
930
|
|
1027
|
-
|
1028
|
-
|
1029
|
-
|
1030
|
-
|
1031
|
-
sizeof(channel->socket_send_buffer_size)) == -1)
|
1032
|
-
return -1;
|
1033
|
-
|
1034
|
-
if ((channel->socket_receive_buffer_size > 0) &&
|
1035
|
-
setsockopt(s, SOL_SOCKET, SO_RCVBUF,
|
1036
|
-
(void *)&channel->socket_receive_buffer_size,
|
1037
|
-
sizeof(channel->socket_receive_buffer_size)) == -1)
|
1038
|
-
return -1;
|
1039
|
-
|
1040
|
-
#ifdef SO_BINDTODEVICE
|
1041
|
-
if (channel->local_dev_name[0]) {
|
1042
|
-
if (setsockopt(s, SOL_SOCKET, SO_BINDTODEVICE,
|
1043
|
-
channel->local_dev_name, sizeof(channel->local_dev_name))) {
|
1044
|
-
/* Only root can do this, and usually not fatal if it doesn't work, so */
|
1045
|
-
/* just continue on. */
|
1046
|
-
}
|
931
|
+
ares_query_remove_from_conn(query);
|
932
|
+
|
933
|
+
if (status != ARES_SUCCESS) {
|
934
|
+
query->error_status = status;
|
1047
935
|
}
|
1048
|
-
#endif
|
1049
936
|
|
1050
|
-
if (
|
1051
|
-
|
1052
|
-
memset(&local.sa4, 0, sizeof(local.sa4));
|
1053
|
-
local.sa4.sin_family = AF_INET;
|
1054
|
-
local.sa4.sin_addr.s_addr = htonl(channel->local_ip4);
|
1055
|
-
if (bind(s, &local.sa, sizeof(local.sa4)) < 0)
|
1056
|
-
return -1;
|
1057
|
-
}
|
937
|
+
if (inc_try_count) {
|
938
|
+
query->try_count++;
|
1058
939
|
}
|
1059
|
-
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
local.sa6.sin6_family = AF_INET6;
|
1064
|
-
memcpy(&local.sa6.sin6_addr, channel->local_ip6,
|
1065
|
-
sizeof(channel->local_ip6));
|
1066
|
-
if (bind(s, &local.sa, sizeof(local.sa6)) < 0)
|
1067
|
-
return -1;
|
940
|
+
|
941
|
+
if (query->try_count < max_tries && !query->no_retries) {
|
942
|
+
if (requeue != NULL) {
|
943
|
+
return ares_append_requeue(requeue, query, NULL);
|
1068
944
|
}
|
1069
|
-
|
945
|
+
return ares_send_query(NULL, query, now);
|
946
|
+
}
|
947
|
+
|
948
|
+
/* If we are here, all attempts to perform query failed. */
|
949
|
+
if (query->error_status == ARES_SUCCESS) {
|
950
|
+
query->error_status = ARES_ETIMEOUT;
|
1070
951
|
}
|
1071
952
|
|
1072
|
-
|
953
|
+
end_query(channel, NULL, query, query->error_status, dnsrec);
|
954
|
+
return ARES_ETIMEOUT;
|
1073
955
|
}
|
1074
956
|
|
1075
|
-
|
957
|
+
/*! Count the number of servers that share the same highest priority (lowest
|
958
|
+
* consecutive failures). Since they are sorted in priority order, we just
|
959
|
+
* stop when the consecutive failure count changes. Used for random selection
|
960
|
+
* of good servers. */
|
961
|
+
static size_t count_highest_prio_servers(const ares_channel_t *channel)
|
1076
962
|
{
|
1077
|
-
|
1078
|
-
|
1079
|
-
|
1080
|
-
union {
|
1081
|
-
struct sockaddr_in sa4;
|
1082
|
-
struct sockaddr_in6 sa6;
|
1083
|
-
} saddr;
|
1084
|
-
struct sockaddr *sa;
|
1085
|
-
|
1086
|
-
switch (server->addr.family)
|
1087
|
-
{
|
1088
|
-
case AF_INET:
|
1089
|
-
sa = (void *)&saddr.sa4;
|
1090
|
-
salen = sizeof(saddr.sa4);
|
1091
|
-
memset(sa, 0, salen);
|
1092
|
-
saddr.sa4.sin_family = AF_INET;
|
1093
|
-
if (server->addr.tcp_port) {
|
1094
|
-
saddr.sa4.sin_port = aresx_sitous(server->addr.tcp_port);
|
1095
|
-
} else {
|
1096
|
-
saddr.sa4.sin_port = aresx_sitous(channel->tcp_port);
|
1097
|
-
}
|
1098
|
-
memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4,
|
1099
|
-
sizeof(server->addr.addrV4));
|
1100
|
-
break;
|
1101
|
-
case AF_INET6:
|
1102
|
-
sa = (void *)&saddr.sa6;
|
1103
|
-
salen = sizeof(saddr.sa6);
|
1104
|
-
memset(sa, 0, salen);
|
1105
|
-
saddr.sa6.sin6_family = AF_INET6;
|
1106
|
-
if (server->addr.tcp_port) {
|
1107
|
-
saddr.sa6.sin6_port = aresx_sitous(server->addr.tcp_port);
|
1108
|
-
} else {
|
1109
|
-
saddr.sa6.sin6_port = aresx_sitous(channel->tcp_port);
|
1110
|
-
}
|
1111
|
-
memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6,
|
1112
|
-
sizeof(server->addr.addrV6));
|
1113
|
-
break;
|
1114
|
-
default:
|
1115
|
-
return -1; /* LCOV_EXCL_LINE */
|
1116
|
-
}
|
963
|
+
ares_slist_node_t *node;
|
964
|
+
size_t cnt = 0;
|
965
|
+
size_t last_consec_failures = SIZE_MAX;
|
1117
966
|
|
1118
|
-
|
1119
|
-
|
1120
|
-
|
1121
|
-
return -1;
|
967
|
+
for (node = ares_slist_node_first(channel->servers); node != NULL;
|
968
|
+
node = ares_slist_node_next(node)) {
|
969
|
+
const ares_server_t *server = ares_slist_node_val(node);
|
1122
970
|
|
1123
|
-
|
1124
|
-
|
1125
|
-
|
1126
|
-
ares__close_socket(channel, s);
|
1127
|
-
return -1;
|
971
|
+
if (last_consec_failures != SIZE_MAX &&
|
972
|
+
last_consec_failures < server->consec_failures) {
|
973
|
+
break;
|
1128
974
|
}
|
1129
975
|
|
1130
|
-
|
1131
|
-
|
1132
|
-
|
1133
|
-
* in configure_socket). In general, in DNS lookups we're pretty much
|
1134
|
-
* interested in firing off a single request and then waiting for a reply,
|
1135
|
-
* so batching isn't very interesting.
|
1136
|
-
*/
|
1137
|
-
opt = 1;
|
1138
|
-
if (channel->sock_funcs == 0
|
1139
|
-
&&
|
1140
|
-
setsockopt(s, IPPROTO_TCP, TCP_NODELAY,
|
1141
|
-
(void *)&opt, sizeof(opt)) == -1)
|
1142
|
-
{
|
1143
|
-
ares__close_socket(channel, s);
|
1144
|
-
return -1;
|
1145
|
-
}
|
1146
|
-
#endif
|
976
|
+
last_consec_failures = server->consec_failures;
|
977
|
+
cnt++;
|
978
|
+
}
|
1147
979
|
|
1148
|
-
|
1149
|
-
|
1150
|
-
int err = channel->sock_config_cb(s, SOCK_STREAM,
|
1151
|
-
channel->sock_config_cb_data);
|
1152
|
-
if (err < 0)
|
1153
|
-
{
|
1154
|
-
ares__close_socket(channel, s);
|
1155
|
-
return err;
|
1156
|
-
}
|
1157
|
-
}
|
980
|
+
return cnt;
|
981
|
+
}
|
1158
982
|
|
1159
|
-
|
1160
|
-
|
1161
|
-
|
1162
|
-
|
983
|
+
/* Pick a random *best* server from the list, we first get a random number in
|
984
|
+
* the range of the number of *best* servers, then scan until we find that
|
985
|
+
* server in the list */
|
986
|
+
static ares_server_t *ares_random_server(ares_channel_t *channel)
|
987
|
+
{
|
988
|
+
unsigned char c;
|
989
|
+
size_t cnt;
|
990
|
+
size_t idx;
|
991
|
+
ares_slist_node_t *node;
|
992
|
+
size_t num_servers = count_highest_prio_servers(channel);
|
993
|
+
|
994
|
+
/* Silence coverity, not possible */
|
995
|
+
if (num_servers == 0) {
|
996
|
+
return NULL;
|
997
|
+
}
|
1163
998
|
|
1164
|
-
|
1165
|
-
{
|
1166
|
-
ares__close_socket(channel, s);
|
1167
|
-
return -1;
|
1168
|
-
}
|
1169
|
-
}
|
999
|
+
ares_rand_bytes(channel->rand_state, &c, 1);
|
1170
1000
|
|
1171
|
-
|
1172
|
-
|
1173
|
-
|
1174
|
-
|
1175
|
-
|
1176
|
-
|
1177
|
-
|
1178
|
-
|
1179
|
-
}
|
1001
|
+
cnt = c;
|
1002
|
+
idx = cnt % num_servers;
|
1003
|
+
|
1004
|
+
cnt = 0;
|
1005
|
+
for (node = ares_slist_node_first(channel->servers); node != NULL;
|
1006
|
+
node = ares_slist_node_next(node)) {
|
1007
|
+
if (cnt == idx) {
|
1008
|
+
return ares_slist_node_val(node);
|
1180
1009
|
}
|
1181
1010
|
|
1182
|
-
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1186
|
-
return 0;
|
1011
|
+
cnt++;
|
1012
|
+
}
|
1013
|
+
|
1014
|
+
return NULL;
|
1187
1015
|
}
|
1188
1016
|
|
1189
|
-
static
|
1017
|
+
static void server_probe_cb(void *arg, ares_status_t status, size_t timeouts,
|
1018
|
+
const ares_dns_record_t *dnsrec)
|
1190
1019
|
{
|
1191
|
-
|
1192
|
-
|
1193
|
-
|
1194
|
-
|
1195
|
-
|
1196
|
-
|
1197
|
-
|
1198
|
-
|
1199
|
-
|
1200
|
-
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1208
|
-
|
1209
|
-
|
1210
|
-
|
1211
|
-
|
1212
|
-
|
1213
|
-
|
1214
|
-
|
1215
|
-
sa = (void *)&saddr.sa6;
|
1216
|
-
salen = sizeof(saddr.sa6);
|
1217
|
-
memset(sa, 0, salen);
|
1218
|
-
saddr.sa6.sin6_family = AF_INET6;
|
1219
|
-
if (server->addr.udp_port) {
|
1220
|
-
saddr.sa6.sin6_port = aresx_sitous(server->addr.udp_port);
|
1221
|
-
} else {
|
1222
|
-
saddr.sa6.sin6_port = aresx_sitous(channel->udp_port);
|
1223
|
-
}
|
1224
|
-
memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6,
|
1225
|
-
sizeof(server->addr.addrV6));
|
1226
|
-
break;
|
1227
|
-
default:
|
1228
|
-
return -1; /* LCOV_EXCL_LINE */
|
1229
|
-
}
|
1020
|
+
(void)arg;
|
1021
|
+
(void)status;
|
1022
|
+
(void)timeouts;
|
1023
|
+
(void)dnsrec;
|
1024
|
+
/* Nothing to do, the logic internally will handle success/fail of this */
|
1025
|
+
}
|
1026
|
+
|
1027
|
+
/* Determine if we should probe a downed server */
|
1028
|
+
static void ares_probe_failed_server(ares_channel_t *channel,
|
1029
|
+
const ares_server_t *server,
|
1030
|
+
const ares_query_t *query)
|
1031
|
+
{
|
1032
|
+
const ares_server_t *last_server = ares_slist_last_val(channel->servers);
|
1033
|
+
unsigned short r;
|
1034
|
+
ares_timeval_t now;
|
1035
|
+
ares_slist_node_t *node;
|
1036
|
+
ares_server_t *probe_server = NULL;
|
1037
|
+
|
1038
|
+
/* If no servers have failures, or we're not configured with a server retry
|
1039
|
+
* chance, then nothing to probe */
|
1040
|
+
if ((last_server != NULL && last_server->consec_failures == 0) ||
|
1041
|
+
channel->server_retry_chance == 0) {
|
1042
|
+
return;
|
1043
|
+
}
|
1230
1044
|
|
1231
|
-
/*
|
1232
|
-
|
1233
|
-
|
1234
|
-
|
1045
|
+
/* Generate a random value to decide whether to retry a failed server. The
|
1046
|
+
* probability to use is 1/channel->server_retry_chance, rounded up to a
|
1047
|
+
* precision of 1/2^B where B is the number of bits in the random value.
|
1048
|
+
* We use an unsigned short for the random value for increased precision.
|
1049
|
+
*/
|
1050
|
+
ares_rand_bytes(channel->rand_state, (unsigned char *)&r, sizeof(r));
|
1051
|
+
if (r % channel->server_retry_chance != 0) {
|
1052
|
+
return;
|
1053
|
+
}
|
1235
1054
|
|
1236
|
-
/*
|
1237
|
-
|
1238
|
-
|
1239
|
-
|
1240
|
-
|
1055
|
+
/* Select the first server with failures to retry that has passed the retry
|
1056
|
+
* timeout and doesn't already have a pending probe */
|
1057
|
+
ares_tvnow(&now);
|
1058
|
+
for (node = ares_slist_node_first(channel->servers); node != NULL;
|
1059
|
+
node = ares_slist_node_next(node)) {
|
1060
|
+
ares_server_t *node_val = ares_slist_node_val(node);
|
1061
|
+
if (node_val != NULL && node_val->consec_failures > 0 &&
|
1062
|
+
!node_val->probe_pending &&
|
1063
|
+
ares_timedout(&now, &node_val->next_retry_time)) {
|
1064
|
+
probe_server = node_val;
|
1065
|
+
break;
|
1241
1066
|
}
|
1067
|
+
}
|
1242
1068
|
|
1243
|
-
|
1244
|
-
|
1245
|
-
|
1246
|
-
|
1247
|
-
|
1248
|
-
{
|
1249
|
-
ares__close_socket(channel, s);
|
1250
|
-
return err;
|
1251
|
-
}
|
1252
|
-
}
|
1069
|
+
/* Either nothing to probe or the query was enqueud to the same server
|
1070
|
+
* we were going to probe. Do nothing. */
|
1071
|
+
if (probe_server == NULL || server == probe_server) {
|
1072
|
+
return;
|
1073
|
+
}
|
1253
1074
|
|
1254
|
-
/*
|
1255
|
-
|
1256
|
-
|
1257
|
-
|
1075
|
+
/* Enqueue an identical query onto the specified server without honoring
|
1076
|
+
* the cache or allowing retries. We want to make sure it only attempts to
|
1077
|
+
* use the server in question */
|
1078
|
+
probe_server->probe_pending = ARES_TRUE;
|
1079
|
+
ares_send_nolock(channel, probe_server,
|
1080
|
+
ARES_SEND_FLAG_NOCACHE | ARES_SEND_FLAG_NORETRY,
|
1081
|
+
query->query, server_probe_cb, NULL, NULL);
|
1082
|
+
}
|
1258
1083
|
|
1259
|
-
|
1260
|
-
|
1261
|
-
|
1262
|
-
|
1263
|
-
|
1264
|
-
|
1084
|
+
static size_t ares_calc_query_timeout(const ares_query_t *query,
|
1085
|
+
const ares_server_t *server,
|
1086
|
+
const ares_timeval_t *now)
|
1087
|
+
{
|
1088
|
+
const ares_channel_t *channel = query->channel;
|
1089
|
+
size_t timeout = ares_metrics_server_timeout(server, now);
|
1090
|
+
size_t timeplus = timeout;
|
1091
|
+
size_t rounds;
|
1092
|
+
size_t num_servers = ares_slist_len(channel->servers);
|
1093
|
+
|
1094
|
+
if (num_servers == 0) {
|
1095
|
+
return 0; /* LCOV_EXCL_LINE: DefensiveCoding */
|
1096
|
+
}
|
1265
1097
|
|
1266
|
-
|
1267
|
-
|
1268
|
-
|
1269
|
-
|
1270
|
-
|
1271
|
-
|
1272
|
-
|
1273
|
-
|
1274
|
-
|
1275
|
-
|
1098
|
+
/* For each trip through the entire server list, we want to double the
|
1099
|
+
* retry from the last retry */
|
1100
|
+
rounds = (query->try_count / num_servers);
|
1101
|
+
if (rounds > 0) {
|
1102
|
+
timeplus <<= rounds;
|
1103
|
+
}
|
1104
|
+
|
1105
|
+
if (channel->maxtimeout && timeplus > channel->maxtimeout) {
|
1106
|
+
timeplus = channel->maxtimeout;
|
1107
|
+
}
|
1108
|
+
|
1109
|
+
/* Add some jitter to the retry timeout.
|
1110
|
+
*
|
1111
|
+
* Jitter is needed in situation when resolve requests are performed
|
1112
|
+
* simultaneously from multiple hosts and DNS server throttle these requests.
|
1113
|
+
* Adding randomness allows to avoid synchronisation of retries.
|
1114
|
+
*
|
1115
|
+
* Value of timeplus adjusted randomly to the range [0.5 * timeplus,
|
1116
|
+
* timeplus].
|
1117
|
+
*/
|
1118
|
+
if (rounds > 0) {
|
1119
|
+
unsigned short r;
|
1120
|
+
float delta_multiplier;
|
1276
1121
|
|
1277
|
-
|
1122
|
+
ares_rand_bytes(channel->rand_state, (unsigned char *)&r, sizeof(r));
|
1123
|
+
delta_multiplier = ((float)r / USHRT_MAX) * 0.5f;
|
1124
|
+
timeplus -= (size_t)((float)timeplus * delta_multiplier);
|
1125
|
+
}
|
1126
|
+
|
1127
|
+
/* We want explicitly guarantee that timeplus is greater or equal to timeout
|
1128
|
+
* specified in channel options. */
|
1129
|
+
if (timeplus < timeout) {
|
1130
|
+
timeplus = timeout;
|
1131
|
+
}
|
1278
1132
|
|
1279
|
-
|
1280
|
-
return 0;
|
1133
|
+
return timeplus;
|
1281
1134
|
}
|
1282
1135
|
|
1283
|
-
static
|
1284
|
-
|
1136
|
+
static ares_conn_t *ares_fetch_connection(const ares_channel_t *channel,
|
1137
|
+
ares_server_t *server,
|
1138
|
+
const ares_query_t *query)
|
1285
1139
|
{
|
1286
|
-
|
1287
|
-
|
1288
|
-
|
1289
|
-
|
1290
|
-
|
1291
|
-
|
1292
|
-
|
1293
|
-
|
1294
|
-
|
1295
|
-
|
1296
|
-
|
1297
|
-
|
1298
|
-
|
1299
|
-
|
1300
|
-
|
1301
|
-
|
1302
|
-
|
1303
|
-
|
1304
|
-
|
1305
|
-
/*
|
1306
|
-
|
1307
|
-
|
1308
|
-
|
1309
|
-
|
1310
|
-
|
1311
|
-
|
1312
|
-
return 0;
|
1313
|
-
q.p += q.namelen;
|
1314
|
-
if (q.p + QFIXEDSZ > qbuf + qlen)
|
1315
|
-
{
|
1316
|
-
ares_free(q.name);
|
1317
|
-
return 0;
|
1318
|
-
}
|
1319
|
-
q.type = DNS_QUESTION_TYPE(q.p);
|
1320
|
-
q.dnsclass = DNS_QUESTION_CLASS(q.p);
|
1321
|
-
q.p += QFIXEDSZ;
|
1322
|
-
|
1323
|
-
/* Search for this question in the answer. */
|
1324
|
-
a.p = abuf + HFIXEDSZ;
|
1325
|
-
for (j = 0; j < a.qdcount; j++)
|
1326
|
-
{
|
1327
|
-
/* Decode the question in the answer. */
|
1328
|
-
if (ares_expand_name(a.p, abuf, alen, &a.name, &a.namelen)
|
1329
|
-
!= ARES_SUCCESS)
|
1330
|
-
{
|
1331
|
-
ares_free(q.name);
|
1332
|
-
return 0;
|
1333
|
-
}
|
1334
|
-
a.p += a.namelen;
|
1335
|
-
if (a.p + QFIXEDSZ > abuf + alen)
|
1336
|
-
{
|
1337
|
-
ares_free(q.name);
|
1338
|
-
ares_free(a.name);
|
1339
|
-
return 0;
|
1340
|
-
}
|
1341
|
-
a.type = DNS_QUESTION_TYPE(a.p);
|
1342
|
-
a.dnsclass = DNS_QUESTION_CLASS(a.p);
|
1343
|
-
a.p += QFIXEDSZ;
|
1344
|
-
|
1345
|
-
/* Compare the decoded questions. */
|
1346
|
-
if (strcasecmp(q.name, a.name) == 0 && q.type == a.type
|
1347
|
-
&& q.dnsclass == a.dnsclass)
|
1348
|
-
{
|
1349
|
-
ares_free(a.name);
|
1350
|
-
break;
|
1351
|
-
}
|
1352
|
-
ares_free(a.name);
|
1353
|
-
}
|
1354
|
-
|
1355
|
-
ares_free(q.name);
|
1356
|
-
if (j == a.qdcount)
|
1357
|
-
return 0;
|
1358
|
-
}
|
1359
|
-
return 1;
|
1140
|
+
ares_llist_node_t *node;
|
1141
|
+
ares_conn_t *conn;
|
1142
|
+
|
1143
|
+
if (query->using_tcp) {
|
1144
|
+
return server->tcp_conn;
|
1145
|
+
}
|
1146
|
+
|
1147
|
+
/* Fetch existing UDP connection */
|
1148
|
+
node = ares_llist_node_first(server->connections);
|
1149
|
+
if (node == NULL) {
|
1150
|
+
return NULL;
|
1151
|
+
}
|
1152
|
+
|
1153
|
+
conn = ares_llist_node_val(node);
|
1154
|
+
/* Not UDP, skip */
|
1155
|
+
if (conn->flags & ARES_CONN_FLAG_TCP) {
|
1156
|
+
return NULL;
|
1157
|
+
}
|
1158
|
+
|
1159
|
+
/* Used too many times */
|
1160
|
+
if (channel->udp_max_queries > 0 &&
|
1161
|
+
conn->total_queries >= channel->udp_max_queries) {
|
1162
|
+
return NULL;
|
1163
|
+
}
|
1164
|
+
|
1165
|
+
return conn;
|
1360
1166
|
}
|
1361
1167
|
|
1362
|
-
static
|
1168
|
+
static ares_status_t ares_conn_query_write(ares_conn_t *conn,
|
1169
|
+
ares_query_t *query,
|
1170
|
+
const ares_timeval_t *now)
|
1363
1171
|
{
|
1364
|
-
|
1365
|
-
|
1366
|
-
|
1367
|
-
|
1368
|
-
|
1369
|
-
|
1370
|
-
|
1371
|
-
|
1372
|
-
|
1373
|
-
|
1374
|
-
|
1375
|
-
|
1376
|
-
|
1377
|
-
|
1378
|
-
|
1379
|
-
|
1380
|
-
|
1381
|
-
|
1382
|
-
|
1383
|
-
|
1384
|
-
|
1385
|
-
|
1386
|
-
|
1387
|
-
|
1172
|
+
ares_server_t *server = conn->server;
|
1173
|
+
ares_channel_t *channel = server->channel;
|
1174
|
+
ares_status_t status;
|
1175
|
+
|
1176
|
+
status = ares_cookie_apply(query->query, conn, now);
|
1177
|
+
if (status != ARES_SUCCESS) {
|
1178
|
+
return status;
|
1179
|
+
}
|
1180
|
+
|
1181
|
+
/* We write using the TCP format even for UDP, we just strip the length
|
1182
|
+
* before putting on the wire */
|
1183
|
+
status = ares_dns_write_buf_tcp(query->query, conn->out_buf);
|
1184
|
+
if (status != ARES_SUCCESS) {
|
1185
|
+
return status;
|
1186
|
+
}
|
1187
|
+
|
1188
|
+
/* Not pending a TFO write and not connected, so we can't even try to
|
1189
|
+
* write until we get a signal */
|
1190
|
+
if (conn->flags & ARES_CONN_FLAG_TCP &&
|
1191
|
+
!(conn->state_flags & ARES_CONN_STATE_CONNECTED) &&
|
1192
|
+
!(conn->flags & ARES_CONN_FLAG_TFO_INITIAL)) {
|
1193
|
+
return ARES_SUCCESS;
|
1194
|
+
}
|
1195
|
+
|
1196
|
+
/* Delay actual write if possible (TCP only, and only if callback
|
1197
|
+
* configured) */
|
1198
|
+
if (channel->notify_pending_write_cb && !channel->notify_pending_write &&
|
1199
|
+
conn->flags & ARES_CONN_FLAG_TCP) {
|
1200
|
+
channel->notify_pending_write = ARES_TRUE;
|
1201
|
+
channel->notify_pending_write_cb(channel->notify_pending_write_cb_data);
|
1202
|
+
return ARES_SUCCESS;
|
1203
|
+
}
|
1204
|
+
|
1205
|
+
/* Unfortunately we need to write right away and can't aggregate multiple
|
1206
|
+
* queries into a single write. */
|
1207
|
+
return ares_conn_flush(conn);
|
1388
1208
|
}
|
1389
1209
|
|
1390
|
-
|
1391
|
-
|
1210
|
+
ares_status_t ares_send_query(ares_server_t *requested_server,
|
1211
|
+
ares_query_t *query, const ares_timeval_t *now)
|
1392
1212
|
{
|
1393
|
-
|
1394
|
-
|
1395
|
-
|
1396
|
-
|
1397
|
-
|
1398
|
-
|
1399
|
-
|
1400
|
-
/* Parse the answer header. */
|
1401
|
-
qdcount = DNS_HEADER_QDCOUNT(abuf);
|
1402
|
-
ancount = DNS_HEADER_ANCOUNT(abuf);
|
1403
|
-
nscount = DNS_HEADER_NSCOUNT(abuf);
|
1404
|
-
arcount = DNS_HEADER_ARCOUNT(abuf);
|
1405
|
-
|
1406
|
-
aptr = abuf + HFIXEDSZ;
|
1407
|
-
|
1408
|
-
/* skip the questions */
|
1409
|
-
for (i = 0; i < qdcount; i++)
|
1410
|
-
{
|
1411
|
-
char* name;
|
1412
|
-
long len;
|
1413
|
-
status = ares_expand_name(aptr, abuf, alen, &name, &len);
|
1414
|
-
if (status != ARES_SUCCESS)
|
1415
|
-
return -1;
|
1416
|
-
ares_free_string(name);
|
1417
|
-
if (aptr + len + QFIXEDSZ > abuf + alen)
|
1418
|
-
return -1;
|
1419
|
-
aptr += len + QFIXEDSZ;
|
1420
|
-
}
|
1213
|
+
ares_channel_t *channel = query->channel;
|
1214
|
+
ares_server_t *server;
|
1215
|
+
ares_conn_t *conn;
|
1216
|
+
size_t timeplus;
|
1217
|
+
ares_status_t status;
|
1218
|
+
ares_bool_t probe_downed_server = ARES_TRUE;
|
1421
1219
|
|
1422
|
-
|
1423
|
-
|
1424
|
-
|
1425
|
-
|
1426
|
-
|
1427
|
-
|
1428
|
-
|
1429
|
-
|
1430
|
-
|
1431
|
-
|
1432
|
-
|
1433
|
-
return -1;
|
1434
|
-
aptr += len;
|
1435
|
-
dlen = DNS_RR_LEN(aptr);
|
1436
|
-
aptr += RRFIXEDSZ;
|
1437
|
-
if (aptr + dlen > abuf + alen)
|
1438
|
-
return -1;
|
1439
|
-
aptr += dlen;
|
1220
|
+
|
1221
|
+
/* Choose the server to send the query to */
|
1222
|
+
if (requested_server != NULL) {
|
1223
|
+
server = requested_server;
|
1224
|
+
} else {
|
1225
|
+
/* If rotate is turned on, do a random selection */
|
1226
|
+
if (channel->rotate) {
|
1227
|
+
server = ares_random_server(channel);
|
1228
|
+
} else {
|
1229
|
+
/* First server in list */
|
1230
|
+
server = ares_slist_first_val(channel->servers);
|
1440
1231
|
}
|
1232
|
+
}
|
1441
1233
|
|
1442
|
-
|
1443
|
-
|
1444
|
-
|
1445
|
-
|
1446
|
-
|
1447
|
-
|
1448
|
-
|
1449
|
-
|
1450
|
-
|
1451
|
-
|
1452
|
-
|
1453
|
-
|
1454
|
-
|
1455
|
-
|
1456
|
-
|
1457
|
-
|
1458
|
-
|
1459
|
-
|
1460
|
-
|
1461
|
-
|
1462
|
-
|
1463
|
-
|
1234
|
+
if (server == NULL) {
|
1235
|
+
end_query(channel, server, query, ARES_ENOSERVER /* ? */, NULL);
|
1236
|
+
return ARES_ENOSERVER;
|
1237
|
+
}
|
1238
|
+
|
1239
|
+
/* If a query is directed to a specific query, or the server chosen has
|
1240
|
+
* failures, or the query is being retried, don't probe for downed servers */
|
1241
|
+
if (requested_server != NULL || server->consec_failures > 0 ||
|
1242
|
+
query->try_count != 0) {
|
1243
|
+
probe_downed_server = ARES_FALSE;
|
1244
|
+
}
|
1245
|
+
|
1246
|
+
conn = ares_fetch_connection(channel, server, query);
|
1247
|
+
if (conn == NULL) {
|
1248
|
+
status = ares_open_connection(&conn, channel, server, query->using_tcp);
|
1249
|
+
switch (status) {
|
1250
|
+
/* Good result, continue on */
|
1251
|
+
case ARES_SUCCESS:
|
1252
|
+
break;
|
1253
|
+
|
1254
|
+
/* These conditions are retryable as they are server-specific
|
1255
|
+
* error codes */
|
1256
|
+
case ARES_ECONNREFUSED:
|
1257
|
+
case ARES_EBADFAMILY:
|
1258
|
+
server_increment_failures(server, query->using_tcp);
|
1259
|
+
return ares_requeue_query(query, now, status, ARES_TRUE, NULL, NULL);
|
1260
|
+
|
1261
|
+
/* Anything else is not retryable, likely ENOMEM */
|
1262
|
+
default:
|
1263
|
+
end_query(channel, server, query, status, NULL);
|
1264
|
+
return status;
|
1464
1265
|
}
|
1266
|
+
}
|
1267
|
+
|
1268
|
+
/* Write the query */
|
1269
|
+
status = ares_conn_query_write(conn, query, now);
|
1270
|
+
switch (status) {
|
1271
|
+
/* Good result, continue on */
|
1272
|
+
case ARES_SUCCESS:
|
1273
|
+
break;
|
1274
|
+
|
1275
|
+
case ARES_ENOMEM:
|
1276
|
+
/* Not retryable */
|
1277
|
+
end_query(channel, server, query, status, NULL);
|
1278
|
+
return status;
|
1279
|
+
|
1280
|
+
/* These conditions are retryable as they are server-specific
|
1281
|
+
* error codes */
|
1282
|
+
case ARES_ECONNREFUSED:
|
1283
|
+
case ARES_EBADFAMILY:
|
1284
|
+
handle_conn_error(conn, ARES_TRUE, status);
|
1285
|
+
status = ares_requeue_query(query, now, status, ARES_TRUE, NULL, NULL);
|
1286
|
+
if (status == ARES_ETIMEOUT) {
|
1287
|
+
status = ARES_ECONNREFUSED;
|
1288
|
+
}
|
1289
|
+
return status;
|
1290
|
+
|
1291
|
+
default:
|
1292
|
+
server_increment_failures(server, query->using_tcp);
|
1293
|
+
status = ares_requeue_query(query, now, status, ARES_TRUE, NULL, NULL);
|
1294
|
+
return status;
|
1295
|
+
}
|
1296
|
+
|
1297
|
+
timeplus = ares_calc_query_timeout(query, server, now);
|
1298
|
+
/* Keep track of queries bucketed by timeout, so we can process
|
1299
|
+
* timeout events quickly.
|
1300
|
+
*/
|
1301
|
+
ares_slist_node_destroy(query->node_queries_by_timeout);
|
1302
|
+
query->ts = *now;
|
1303
|
+
query->timeout = *now;
|
1304
|
+
timeadd(&query->timeout, timeplus);
|
1305
|
+
query->node_queries_by_timeout =
|
1306
|
+
ares_slist_insert(channel->queries_by_timeout, query);
|
1307
|
+
if (!query->node_queries_by_timeout) {
|
1308
|
+
/* LCOV_EXCL_START: OutOfMemory */
|
1309
|
+
end_query(channel, server, query, ARES_ENOMEM, NULL);
|
1310
|
+
return ARES_ENOMEM;
|
1311
|
+
/* LCOV_EXCL_STOP */
|
1312
|
+
}
|
1313
|
+
|
1314
|
+
/* Keep track of queries bucketed by connection, so we can process errors
|
1315
|
+
* quickly. */
|
1316
|
+
ares_llist_node_destroy(query->node_queries_to_conn);
|
1317
|
+
query->node_queries_to_conn =
|
1318
|
+
ares_llist_insert_last(conn->queries_to_conn, query);
|
1319
|
+
|
1320
|
+
if (query->node_queries_to_conn == NULL) {
|
1321
|
+
/* LCOV_EXCL_START: OutOfMemory */
|
1322
|
+
end_query(channel, server, query, ARES_ENOMEM, NULL);
|
1323
|
+
return ARES_ENOMEM;
|
1324
|
+
/* LCOV_EXCL_STOP */
|
1325
|
+
}
|
1326
|
+
|
1327
|
+
query->conn = conn;
|
1328
|
+
conn->total_queries++;
|
1329
|
+
|
1330
|
+
/* We just successfully enqueud a query, see if we should probe downed
|
1331
|
+
* servers. */
|
1332
|
+
if (probe_downed_server) {
|
1333
|
+
ares_probe_failed_server(channel, server, query);
|
1334
|
+
}
|
1465
1335
|
|
1466
|
-
return
|
1336
|
+
return ARES_SUCCESS;
|
1467
1337
|
}
|
1468
1338
|
|
1469
|
-
static
|
1470
|
-
|
1339
|
+
static ares_bool_t same_questions(const ares_query_t *query,
|
1340
|
+
const ares_dns_record_t *arec)
|
1471
1341
|
{
|
1472
|
-
|
1342
|
+
size_t i;
|
1343
|
+
ares_bool_t rv = ARES_FALSE;
|
1344
|
+
const ares_dns_record_t *qrec = query->query;
|
1345
|
+
const ares_channel_t *channel = query->channel;
|
1473
1346
|
|
1474
|
-
|
1475
|
-
|
1476
|
-
|
1477
|
-
|
1478
|
-
|
1479
|
-
|
1480
|
-
|
1481
|
-
|
1482
|
-
|
1483
|
-
|
1484
|
-
|
1485
|
-
|
1486
|
-
|
1487
|
-
|
1488
|
-
|
1489
|
-
|
1490
|
-
|
1491
|
-
* retransmission, then received a response before actually
|
1492
|
-
* retransmitting. This is perfectly fine, so we want to keep
|
1493
|
-
* the connection running smoothly if we can. But in the worst
|
1494
|
-
* case we may have sent only some prefix of the query, with
|
1495
|
-
* some suffix of the query left to send. Also, the buffer may
|
1496
|
-
* be queued on multiple queues. To prevent dangling pointers
|
1497
|
-
* to the query's tcpbuf and handle these cases, we just give
|
1498
|
-
* such sendreqs their own copy of the query packet.
|
1499
|
-
*/
|
1500
|
-
sendreq->data_storage = ares_malloc(sendreq->len);
|
1501
|
-
if (sendreq->data_storage != NULL)
|
1502
|
-
{
|
1503
|
-
memcpy(sendreq->data_storage, sendreq->data, sendreq->len);
|
1504
|
-
sendreq->data = sendreq->data_storage;
|
1505
|
-
}
|
1506
|
-
}
|
1507
|
-
if ((status != ARES_SUCCESS) || (sendreq->data_storage == NULL))
|
1508
|
-
{
|
1509
|
-
/* We encountered an error (probably a timeout, suggesting the
|
1510
|
-
* DNS server we're talking to is probably unreachable,
|
1511
|
-
* wedged, or severely overloaded) or we couldn't copy the
|
1512
|
-
* request, so mark the connection as broken. When we get to
|
1513
|
-
* process_broken_connections() we'll close the connection and
|
1514
|
-
* try to re-send requests to another server.
|
1515
|
-
*/
|
1516
|
-
server->is_broken = 1;
|
1517
|
-
/* Just to be paranoid, zero out this sendreq... */
|
1518
|
-
sendreq->data = NULL;
|
1519
|
-
sendreq->len = 0;
|
1520
|
-
}
|
1521
|
-
}
|
1347
|
+
|
1348
|
+
if (ares_dns_record_query_cnt(qrec) != ares_dns_record_query_cnt(arec)) {
|
1349
|
+
goto done;
|
1350
|
+
}
|
1351
|
+
|
1352
|
+
for (i = 0; i < ares_dns_record_query_cnt(qrec); i++) {
|
1353
|
+
const char *qname = NULL;
|
1354
|
+
const char *aname = NULL;
|
1355
|
+
ares_dns_rec_type_t qtype;
|
1356
|
+
ares_dns_rec_type_t atype;
|
1357
|
+
ares_dns_class_t qclass;
|
1358
|
+
ares_dns_class_t aclass;
|
1359
|
+
|
1360
|
+
if (ares_dns_record_query_get(qrec, i, &qname, &qtype, &qclass) !=
|
1361
|
+
ARES_SUCCESS ||
|
1362
|
+
qname == NULL) {
|
1363
|
+
goto done;
|
1522
1364
|
}
|
1523
1365
|
|
1524
|
-
|
1525
|
-
|
1526
|
-
|
1366
|
+
if (ares_dns_record_query_get(arec, i, &aname, &atype, &aclass) !=
|
1367
|
+
ARES_SUCCESS ||
|
1368
|
+
aname == NULL) {
|
1369
|
+
goto done;
|
1370
|
+
}
|
1527
1371
|
|
1528
|
-
|
1529
|
-
|
1530
|
-
*/
|
1531
|
-
if (!(channel->flags & ARES_FLAG_STAYOPEN) &&
|
1532
|
-
ares__is_list_empty(&(channel->all_queries)))
|
1533
|
-
{
|
1534
|
-
for (i = 0; i < channel->nservers; i++)
|
1535
|
-
ares__close_sockets(channel, &channel->servers[i]);
|
1372
|
+
if (qtype != atype || qclass != aclass) {
|
1373
|
+
goto done;
|
1536
1374
|
}
|
1375
|
+
|
1376
|
+
if (channel->flags & ARES_FLAG_DNS0x20 && !query->using_tcp) {
|
1377
|
+
/* NOTE: for DNS 0x20, part of the protection is to use a case-sensitive
|
1378
|
+
* comparison of the DNS query name. This expects the upstream DNS
|
1379
|
+
* server to preserve the case of the name in the response packet.
|
1380
|
+
* https://datatracker.ietf.org/doc/html/draft-vixie-dnsext-dns0x20-00
|
1381
|
+
*/
|
1382
|
+
if (!ares_streq(qname, aname)) {
|
1383
|
+
goto done;
|
1384
|
+
}
|
1385
|
+
} else {
|
1386
|
+
/* without DNS0x20 use case-insensitive matching */
|
1387
|
+
if (!ares_strcaseeq(qname, aname)) {
|
1388
|
+
goto done;
|
1389
|
+
}
|
1390
|
+
}
|
1391
|
+
}
|
1392
|
+
|
1393
|
+
rv = ARES_TRUE;
|
1394
|
+
|
1395
|
+
done:
|
1396
|
+
return rv;
|
1537
1397
|
}
|
1538
1398
|
|
1539
|
-
void
|
1399
|
+
static void ares_detach_query(ares_query_t *query)
|
1540
1400
|
{
|
1541
1401
|
/* Remove the query from all the lists in which it is linked */
|
1542
|
-
|
1543
|
-
|
1544
|
-
|
1545
|
-
|
1546
|
-
/* Zero out some important stuff, to help catch bugs */
|
1547
|
-
query->callback = NULL;
|
1548
|
-
query->arg = NULL;
|
1549
|
-
/* Deallocate the memory associated with the query */
|
1550
|
-
ares_free(query->tcpbuf);
|
1551
|
-
ares_free(query->server_info);
|
1552
|
-
ares_free(query);
|
1402
|
+
ares_query_remove_from_conn(query);
|
1403
|
+
ares_htable_szvp_remove(query->channel->queries_by_qid, query->qid);
|
1404
|
+
ares_llist_node_destroy(query->node_all_queries);
|
1405
|
+
query->node_all_queries = NULL;
|
1553
1406
|
}
|
1554
1407
|
|
1555
|
-
|
1556
|
-
|
1408
|
+
static void end_query(ares_channel_t *channel, ares_server_t *server,
|
1409
|
+
ares_query_t *query, ares_status_t status,
|
1410
|
+
const ares_dns_record_t *dnsrec)
|
1557
1411
|
{
|
1558
|
-
|
1559
|
-
|
1560
|
-
|
1561
|
-
|
1562
|
-
|
1563
|
-
else
|
1564
|
-
return socket(af, type, protocol);
|
1565
|
-
}
|
1412
|
+
/* If we were probing for the server to come back online, lets mark it as
|
1413
|
+
* no longer being probed */
|
1414
|
+
if (server != NULL) {
|
1415
|
+
server->probe_pending = ARES_FALSE;
|
1416
|
+
}
|
1566
1417
|
|
1567
|
-
|
1568
|
-
|
1569
|
-
|
1570
|
-
|
1571
|
-
|
1572
|
-
|
1573
|
-
|
1574
|
-
|
1575
|
-
|
1576
|
-
|
1577
|
-
|
1578
|
-
|
1418
|
+
ares_metrics_record(query, server, status, dnsrec);
|
1419
|
+
|
1420
|
+
/* Invoke the callback. */
|
1421
|
+
query->callback(query->arg, status, query->timeouts, dnsrec);
|
1422
|
+
ares_free_query(query);
|
1423
|
+
|
1424
|
+
/* Check and notify if no other queries are enqueued on the channel. This
|
1425
|
+
* must come after the callback and freeing the query for 2 reasons.
|
1426
|
+
* 1) The callback itself may enqueue a new query
|
1427
|
+
* 2) Technically the current query isn't detached until it is free()'d.
|
1428
|
+
*/
|
1429
|
+
ares_queue_notify_empty(channel);
|
1579
1430
|
}
|
1580
1431
|
|
1581
|
-
void
|
1432
|
+
void ares_free_query(ares_query_t *query)
|
1582
1433
|
{
|
1583
|
-
|
1584
|
-
|
1585
|
-
|
1586
|
-
|
1434
|
+
ares_detach_query(query);
|
1435
|
+
/* Zero out some important stuff, to help catch bugs */
|
1436
|
+
query->callback = NULL;
|
1437
|
+
query->arg = NULL;
|
1438
|
+
/* Deallocate the memory associated with the query */
|
1439
|
+
ares_dns_record_destroy(query->query);
|
1440
|
+
|
1441
|
+
ares_free(query);
|
1587
1442
|
}
|