grpc 1.2.5 → 1.3.4
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of grpc might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Makefile +1434 -399
- data/etc/roots.pem +34 -150
- data/include/grpc/grpc.h +71 -0
- data/include/grpc/impl/codegen/atm.h +5 -0
- data/include/grpc/impl/codegen/atm_gcc_atomic.h +6 -0
- data/include/grpc/impl/codegen/atm_gcc_sync.h +2 -0
- data/include/grpc/impl/codegen/atm_windows.h +11 -0
- data/include/grpc/impl/codegen/grpc_types.h +54 -13
- data/include/grpc/impl/codegen/port_platform.h +15 -1
- data/include/grpc/support/alloc.h +2 -1
- data/include/grpc/support/sync.h +4 -0
- data/include/grpc/support/tls.h +1 -1
- data/src/core/ext/census/gen/trace_context.pb.h +1 -1
- data/src/core/ext/census/grpc_filter.c +14 -10
- data/src/core/ext/census/grpc_plugin.c +3 -1
- data/src/core/ext/census/trace_label.h +1 -1
- data/src/core/ext/census/trace_propagation.h +1 -1
- data/src/core/ext/census/trace_status.h +1 -1
- data/src/core/ext/census/trace_string.h +1 -1
- data/src/core/ext/census/tracing.h +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/channel_connectivity.c +56 -27
- data/src/core/ext/{client_channel → filters/client_channel}/client_channel.c +407 -202
- data/src/core/ext/{client_channel → filters/client_channel}/client_channel.h +10 -6
- data/src/core/ext/{client_channel → filters/client_channel}/client_channel_factory.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/client_channel_factory.h +4 -4
- data/src/core/ext/{client_channel → filters/client_channel}/client_channel_plugin.c +12 -7
- data/src/core/ext/{client_channel → filters/client_channel}/connector.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/connector.h +3 -5
- data/src/core/ext/{client_channel → filters/client_channel}/http_connect_handshaker.c +6 -6
- data/src/core/ext/{client_channel → filters/client_channel}/http_connect_handshaker.h +3 -3
- data/src/core/ext/{client_channel → filters/client_channel}/http_proxy.c +4 -4
- data/src/core/ext/{client_channel → filters/client_channel}/http_proxy.h +3 -3
- data/src/core/ext/{client_channel → filters/client_channel}/lb_policy.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/lb_policy.h +4 -4
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/grpclb/grpclb.c +22 -20
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/grpclb/grpclb.h +4 -4
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/grpclb/grpclb_channel.h +5 -4
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/grpclb/grpclb_channel_secure.c +2 -2
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/grpclb/load_balancer_api.c +1 -1
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/grpclb/load_balancer_api.h +6 -5
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/grpclb/proto/grpc/lb/v1/load_balancer.pb.c +1 -1
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/grpclb/proto/grpc/lb/v1/load_balancer.pb.h +0 -0
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/pick_first/pick_first.c +20 -15
- data/src/core/ext/{lb_policy → filters/client_channel/lb_policy}/round_robin/round_robin.c +21 -16
- data/src/core/ext/{client_channel → filters/client_channel}/lb_policy_factory.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/lb_policy_factory.h +5 -5
- data/src/core/ext/{client_channel → filters/client_channel}/lb_policy_registry.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/lb_policy_registry.h +4 -4
- data/src/core/ext/{client_channel → filters/client_channel}/parse_address.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/parse_address.h +4 -4
- data/src/core/ext/{client_channel → filters/client_channel}/proxy_mapper.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/proxy_mapper.h +3 -3
- data/src/core/ext/{client_channel → filters/client_channel}/proxy_mapper_registry.c +10 -4
- data/src/core/ext/{client_channel → filters/client_channel}/proxy_mapper_registry.h +4 -4
- data/src/core/ext/{client_channel → filters/client_channel}/resolver.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/resolver.h +4 -4
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +350 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h +66 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +319 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c +289 -0
- data/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h +64 -0
- data/src/core/ext/{resolver → filters/client_channel/resolver}/dns/native/dns_resolver.c +21 -5
- data/src/core/ext/{resolver → filters/client_channel/resolver}/sockaddr/sockaddr_resolver.c +3 -3
- data/src/core/ext/{client_channel → filters/client_channel}/resolver_factory.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/resolver_factory.h +6 -6
- data/src/core/ext/{client_channel → filters/client_channel}/resolver_registry.c +1 -2
- data/src/core/ext/{client_channel → filters/client_channel}/resolver_registry.h +4 -4
- data/src/core/ext/filters/client_channel/retry_throttle.c +210 -0
- data/src/core/ext/filters/client_channel/retry_throttle.h +65 -0
- data/src/core/ext/{client_channel → filters/client_channel}/subchannel.c +49 -43
- data/src/core/ext/{client_channel → filters/client_channel}/subchannel.h +21 -7
- data/src/core/ext/{client_channel → filters/client_channel}/subchannel_index.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/subchannel_index.h +5 -5
- data/src/core/ext/{client_channel → filters/client_channel}/uri_parser.c +1 -1
- data/src/core/ext/{client_channel → filters/client_channel}/uri_parser.h +3 -3
- data/src/core/ext/{load_reporting → filters/load_reporting}/load_reporting.c +4 -2
- data/src/core/ext/{load_reporting → filters/load_reporting}/load_reporting.h +3 -3
- data/src/core/ext/{load_reporting → filters/load_reporting}/load_reporting_filter.c +17 -14
- data/src/core/ext/{load_reporting → filters/load_reporting}/load_reporting_filter.h +4 -4
- data/src/core/ext/filters/max_age/max_age_filter.c +439 -0
- data/src/core/ext/filters/max_age/max_age_filter.h +39 -0
- data/src/core/ext/transport/chttp2/client/chttp2_connector.c +6 -41
- data/src/core/ext/transport/chttp2/client/chttp2_connector.h +1 -1
- data/src/core/ext/transport/chttp2/client/insecure/channel_create.c +2 -2
- data/src/core/ext/transport/chttp2/client/secure/secure_channel_create.c +3 -3
- data/src/core/ext/transport/chttp2/server/chttp2_server.c +2 -2
- data/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c +2 -5
- data/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c +2 -2
- data/src/core/ext/transport/chttp2/transport/chttp2_transport.c +449 -204
- data/src/core/ext/transport/chttp2/transport/frame_data.c +10 -7
- data/src/core/ext/transport/chttp2/transport/frame_goaway.c +3 -2
- data/src/core/ext/transport/chttp2/transport/frame_ping.c +37 -7
- data/src/core/ext/transport/chttp2/transport/frame_ping.h +3 -0
- data/src/core/ext/transport/chttp2/transport/frame_rst_stream.c +4 -3
- data/src/core/ext/transport/chttp2/transport/frame_settings.c +18 -38
- data/src/core/ext/transport/chttp2/transport/frame_settings.h +1 -29
- data/src/core/ext/transport/chttp2/transport/frame_window_update.c +2 -2
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.c +64 -37
- data/src/core/ext/transport/chttp2/transport/hpack_encoder.h +11 -4
- data/src/core/ext/transport/chttp2/transport/hpack_parser.c +60 -39
- data/src/core/ext/transport/chttp2/transport/hpack_table.c +2 -2
- data/src/core/ext/transport/chttp2/transport/http2_settings.c +75 -0
- data/src/core/ext/transport/chttp2/transport/http2_settings.h +74 -0
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.c +22 -43
- data/src/core/ext/transport/chttp2/transport/incoming_metadata.h +8 -10
- data/src/core/ext/transport/chttp2/transport/internal.h +24 -2
- data/src/core/ext/transport/chttp2/transport/parsing.c +33 -15
- data/src/core/ext/transport/chttp2/transport/writing.c +56 -10
- data/src/core/lib/channel/channel_args.c +7 -0
- data/src/core/lib/channel/channel_args.h +2 -0
- data/src/core/lib/channel/channel_stack.c +20 -27
- data/src/core/lib/channel/channel_stack.h +18 -16
- data/src/core/lib/channel/compress_filter.c +20 -18
- data/src/core/lib/channel/connected_channel.c +9 -8
- data/src/core/lib/channel/deadline_filter.c +28 -24
- data/src/core/lib/channel/deadline_filter.h +3 -3
- data/src/core/lib/channel/handshaker.c +3 -2
- data/src/core/lib/channel/http_client_filter.c +119 -61
- data/src/core/lib/channel/http_server_filter.c +124 -69
- data/src/core/lib/channel/message_size_filter.c +23 -19
- data/src/core/lib/http/httpcli.c +8 -6
- data/src/core/lib/http/httpcli_security_connector.c +5 -5
- data/src/core/lib/http/parser.c +57 -31
- data/src/core/lib/iomgr/closure.c +15 -0
- data/src/core/lib/iomgr/closure.h +4 -0
- data/src/core/lib/iomgr/combiner.c +8 -0
- data/src/core/lib/iomgr/endpoint_pair.h +2 -3
- data/src/core/lib/iomgr/endpoint_pair_posix.c +10 -7
- data/src/core/lib/iomgr/endpoint_pair_uv.c +2 -3
- data/src/core/lib/iomgr/endpoint_pair_windows.c +9 -6
- data/src/core/lib/iomgr/error.c +360 -177
- data/src/core/lib/iomgr/error.h +31 -33
- data/src/core/lib/iomgr/error_internal.h +30 -9
- data/src/core/lib/iomgr/ev_epoll_linux.c +25 -239
- data/src/core/lib/iomgr/ev_poll_posix.c +11 -7
- data/src/core/lib/iomgr/ev_posix.c +6 -0
- data/src/core/lib/iomgr/ev_posix.h +3 -0
- data/src/core/lib/iomgr/exec_ctx.c +6 -0
- data/src/core/lib/iomgr/executor.c +8 -2
- data/src/core/lib/iomgr/load_file.c +6 -3
- data/src/core/lib/iomgr/lockfree_event.c +238 -0
- data/src/core/{ext/client_channel/initial_connect_string.h → lib/iomgr/lockfree_event.h} +17 -13
- data/src/core/lib/iomgr/pollset.h +4 -0
- data/src/core/lib/iomgr/pollset_windows.c +2 -2
- data/src/core/lib/iomgr/port.h +9 -0
- data/src/core/lib/iomgr/resolve_address_posix.c +15 -9
- data/src/core/lib/iomgr/resolve_address_uv.c +8 -6
- data/src/core/lib/iomgr/resolve_address_windows.c +2 -2
- data/src/core/lib/iomgr/resource_quota.c +19 -4
- data/src/core/lib/iomgr/resource_quota.h +2 -0
- data/src/core/lib/iomgr/sockaddr_utils.c +3 -1
- data/src/core/lib/iomgr/socket_factory_posix.c +110 -0
- data/src/core/lib/iomgr/socket_factory_posix.h +90 -0
- data/src/core/lib/iomgr/socket_utils_common_posix.c +25 -9
- data/src/core/lib/iomgr/socket_utils_posix.h +7 -0
- data/src/core/lib/iomgr/tcp_client.h +0 -4
- data/src/core/lib/iomgr/tcp_client_posix.c +15 -31
- data/src/core/lib/iomgr/tcp_client_uv.c +10 -6
- data/src/core/lib/iomgr/tcp_client_windows.c +9 -19
- data/src/core/lib/iomgr/tcp_posix.c +111 -22
- data/src/core/lib/iomgr/tcp_posix.h +3 -4
- data/src/core/lib/iomgr/tcp_server_posix.c +39 -417
- data/src/core/lib/iomgr/tcp_server_utils_posix.h +135 -0
- data/src/core/lib/iomgr/tcp_server_utils_posix_common.c +221 -0
- data/src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c +196 -0
- data/src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c +49 -0
- data/src/core/lib/iomgr/tcp_server_uv.c +43 -16
- data/src/core/lib/iomgr/tcp_server_windows.c +10 -22
- data/src/core/lib/iomgr/tcp_uv.c +16 -13
- data/src/core/lib/iomgr/tcp_windows.c +24 -12
- data/src/core/lib/iomgr/tcp_windows.h +2 -2
- data/src/core/lib/iomgr/timer.h +3 -0
- data/src/core/lib/iomgr/timer_generic.c +257 -72
- data/src/core/lib/iomgr/timer_generic.h +1 -1
- data/src/core/lib/iomgr/timer_heap.c +8 -8
- data/src/core/lib/iomgr/udp_server.c +54 -24
- data/src/core/lib/iomgr/udp_server.h +7 -7
- data/src/core/lib/iomgr/unix_sockets_posix.c +1 -1
- data/src/core/lib/iomgr/unix_sockets_posix_noop.c +2 -1
- data/src/core/lib/iomgr/wakeup_fd_posix.h +1 -1
- data/src/core/lib/profiling/basic_timers.c +1 -1
- data/src/core/lib/security/credentials/credentials.h +1 -1
- data/src/core/lib/security/credentials/google_default/google_default_credentials.c +10 -9
- data/src/core/lib/security/credentials/jwt/json_token.c +1 -1
- data/src/core/lib/security/credentials/jwt/jwt_verifier.c +2 -2
- data/src/core/lib/security/transport/client_auth_filter.c +33 -26
- data/src/core/lib/security/transport/secure_endpoint.c +8 -5
- data/src/core/lib/security/transport/security_connector.c +37 -37
- data/src/core/lib/security/transport/security_connector.h +1 -1
- data/src/core/lib/security/transport/security_handshaker.c +15 -12
- data/src/core/lib/security/transport/server_auth_filter.c +20 -18
- data/src/core/lib/security/transport/tsi_error.c +5 -3
- data/src/core/lib/security/transport/tsi_error.h +1 -1
- data/src/core/lib/{security/util → slice}/b64.c +21 -6
- data/src/core/lib/{security/util → slice}/b64.h +16 -4
- data/src/core/lib/slice/slice.c +4 -2
- data/src/core/lib/slice/slice_buffer.c +16 -14
- data/src/core/lib/support/arena.c +98 -0
- data/src/core/{ext/client_channel/initial_connect_string.c → lib/support/arena.h} +17 -15
- data/src/core/{ext/client_channel/default_initial_connect_string.c → lib/support/atm.c} +14 -5
- data/src/core/lib/support/cpu_linux.c +5 -0
- data/src/core/lib/support/sync.c +4 -0
- data/src/core/lib/support/time.c +4 -10
- data/src/core/lib/support/wrap_memcpy.c +3 -1
- data/src/core/lib/surface/call.c +252 -221
- data/src/core/lib/surface/channel.c +72 -21
- data/src/core/lib/surface/channel.h +8 -0
- data/src/core/lib/surface/completion_queue.c +2 -3
- data/src/core/lib/surface/completion_queue_factory.c +77 -0
- data/src/core/lib/surface/completion_queue_factory.h +51 -0
- data/src/core/lib/surface/init_secure.c +3 -1
- data/src/core/lib/surface/lame_client.c +18 -14
- data/src/core/lib/surface/server.c +43 -41
- data/src/core/lib/surface/validate_metadata.c +8 -4
- data/src/core/lib/surface/version.c +2 -2
- data/src/core/lib/transport/bdp_estimator.h +1 -1
- data/src/core/lib/transport/connectivity_state.c +2 -1
- data/src/core/lib/transport/error_utils.c +17 -17
- data/src/core/lib/transport/error_utils.h +1 -1
- data/src/core/lib/transport/metadata_batch.c +6 -7
- data/src/core/lib/transport/pid_controller.c +1 -0
- data/src/core/lib/transport/service_config.c +12 -0
- data/src/core/lib/transport/service_config.h +6 -0
- data/src/core/lib/transport/transport.c +29 -17
- data/src/core/lib/transport/transport.h +85 -42
- data/src/core/lib/transport/transport_impl.h +5 -3
- data/src/core/lib/transport/transport_op_string.c +20 -14
- data/src/core/plugin_registry/grpc_plugin_registry.c +8 -0
- data/src/core/{lib/tsi → tsi}/fake_transport_security.c +2 -2
- data/src/core/{lib/tsi → tsi}/fake_transport_security.h +4 -4
- data/src/core/{lib/tsi → tsi}/ssl_transport_security.c +40 -79
- data/src/core/{lib/tsi → tsi}/ssl_transport_security.h +44 -21
- data/src/core/{lib/tsi → tsi}/ssl_types.h +3 -3
- data/src/core/{lib/tsi → tsi}/transport_security.c +2 -2
- data/src/core/{lib/tsi → tsi}/transport_security.h +4 -4
- data/src/core/{lib/tsi → tsi}/transport_security_interface.h +3 -3
- data/src/ruby/ext/grpc/extconf.rb +1 -0
- data/src/ruby/ext/grpc/rb_call_credentials.c +2 -2
- data/src/ruby/ext/grpc/rb_channel.c +520 -93
- data/src/ruby/ext/grpc/rb_channel.h +2 -0
- data/src/ruby/ext/grpc/rb_channel_credentials.c +3 -0
- data/src/ruby/ext/grpc/rb_compression_options.c +5 -2
- data/src/ruby/ext/grpc/rb_event_thread.c +6 -6
- data/src/ruby/ext/grpc/rb_grpc.c +29 -7
- data/src/ruby/ext/grpc/rb_grpc.h +2 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.c +10 -0
- data/src/ruby/ext/grpc/rb_grpc_imports.generated.h +15 -0
- data/src/ruby/ext/grpc/rb_server.c +5 -3
- data/src/ruby/lib/grpc/version.rb +1 -1
- data/src/ruby/spec/channel_connection_spec.rb +173 -0
- data/src/ruby/spec/channel_spec.rb +29 -0
- data/src/ruby/spec/generic/rpc_server_pool_spec.rb +27 -17
- data/third_party/cares/ares_build.h +264 -0
- data/third_party/cares/cares/ares.h +636 -0
- data/third_party/cares/cares/ares__close_sockets.c +61 -0
- data/third_party/cares/cares/ares__get_hostent.c +261 -0
- data/third_party/cares/cares/ares__read_line.c +73 -0
- data/third_party/cares/cares/ares__timeval.c +111 -0
- data/third_party/cares/cares/ares_cancel.c +63 -0
- data/third_party/cares/cares/ares_create_query.c +202 -0
- data/third_party/cares/cares/ares_data.c +221 -0
- data/third_party/cares/cares/ares_data.h +72 -0
- data/third_party/cares/cares/ares_destroy.c +108 -0
- data/third_party/cares/cares/ares_dns.h +103 -0
- data/third_party/cares/cares/ares_expand_name.c +205 -0
- data/third_party/cares/cares/ares_expand_string.c +70 -0
- data/third_party/cares/cares/ares_fds.c +59 -0
- data/third_party/cares/cares/ares_free_hostent.c +41 -0
- data/third_party/cares/cares/ares_free_string.c +25 -0
- data/third_party/cares/cares/ares_getenv.c +30 -0
- data/third_party/cares/cares/ares_getenv.h +26 -0
- data/third_party/cares/cares/ares_gethostbyaddr.c +294 -0
- data/third_party/cares/cares/ares_gethostbyname.c +518 -0
- data/third_party/cares/cares/ares_getnameinfo.c +422 -0
- data/third_party/cares/cares/ares_getopt.c +122 -0
- data/third_party/cares/cares/ares_getopt.h +53 -0
- data/third_party/cares/cares/ares_getsock.c +66 -0
- data/third_party/cares/cares/ares_inet_net_pton.h +25 -0
- data/third_party/cares/cares/ares_init.c +2146 -0
- data/third_party/cares/cares/ares_iphlpapi.h +221 -0
- data/third_party/cares/cares/ares_ipv6.h +78 -0
- data/third_party/cares/cares/ares_library_init.c +167 -0
- data/third_party/cares/cares/ares_library_init.h +42 -0
- data/third_party/cares/cares/ares_llist.c +63 -0
- data/third_party/cares/cares/ares_llist.h +39 -0
- data/third_party/cares/cares/ares_mkquery.c +24 -0
- data/third_party/cares/cares/ares_nowarn.c +260 -0
- data/third_party/cares/cares/ares_nowarn.h +61 -0
- data/third_party/cares/cares/ares_options.c +402 -0
- data/third_party/cares/cares/ares_parse_a_reply.c +264 -0
- data/third_party/cares/cares/ares_parse_aaaa_reply.c +264 -0
- data/third_party/cares/cares/ares_parse_mx_reply.c +170 -0
- data/third_party/cares/cares/ares_parse_naptr_reply.c +188 -0
- data/third_party/cares/cares/ares_parse_ns_reply.c +183 -0
- data/third_party/cares/cares/ares_parse_ptr_reply.c +219 -0
- data/third_party/cares/cares/ares_parse_soa_reply.c +133 -0
- data/third_party/cares/cares/ares_parse_srv_reply.c +179 -0
- data/third_party/cares/cares/ares_parse_txt_reply.c +220 -0
- data/third_party/cares/cares/ares_platform.c +11035 -0
- data/third_party/cares/cares/ares_platform.h +43 -0
- data/third_party/cares/cares/ares_private.h +363 -0
- data/third_party/cares/cares/ares_process.c +1359 -0
- data/third_party/cares/cares/ares_query.c +186 -0
- data/third_party/cares/cares/ares_rules.h +125 -0
- data/third_party/cares/cares/ares_search.c +316 -0
- data/third_party/cares/cares/ares_send.c +131 -0
- data/third_party/cares/cares/ares_setup.h +217 -0
- data/third_party/cares/cares/ares_strcasecmp.c +66 -0
- data/third_party/cares/cares/ares_strcasecmp.h +30 -0
- data/third_party/cares/cares/ares_strdup.c +49 -0
- data/third_party/cares/cares/ares_strdup.h +24 -0
- data/third_party/cares/cares/ares_strerror.c +56 -0
- data/third_party/cares/cares/ares_timeout.c +88 -0
- data/third_party/cares/cares/ares_version.c +11 -0
- data/third_party/cares/cares/ares_version.h +24 -0
- data/third_party/cares/cares/ares_writev.c +79 -0
- data/third_party/cares/cares/bitncmp.c +59 -0
- data/third_party/cares/cares/bitncmp.h +26 -0
- data/third_party/cares/cares/config-win32.h +377 -0
- data/third_party/cares/cares/inet_net_pton.c +450 -0
- data/third_party/cares/cares/inet_ntop.c +208 -0
- data/third_party/cares/cares/setup_once.h +554 -0
- data/third_party/cares/cares/windows_port.c +22 -0
- data/third_party/cares/config_darwin/ares_config.h +523 -0
- data/third_party/cares/config_linux/ares_config.h +524 -0
- metadata +164 -68
@@ -0,0 +1,43 @@
|
|
1
|
+
#ifndef HEADER_CARES_PLATFORM_H
|
2
|
+
#define HEADER_CARES_PLATFORM_H
|
3
|
+
|
4
|
+
|
5
|
+
/* Copyright 1998 by the Massachusetts Institute of Technology.
|
6
|
+
* Copyright (C) 2004 - 2011 by Daniel Stenberg et al
|
7
|
+
*
|
8
|
+
* Permission to use, copy, modify, and distribute this
|
9
|
+
* software and its documentation for any purpose and without
|
10
|
+
* fee is hereby granted, provided that the above copyright
|
11
|
+
* notice appear in all copies and that both that copyright
|
12
|
+
* notice and this permission notice appear in supporting
|
13
|
+
* documentation, and that the name of M.I.T. not be used in
|
14
|
+
* advertising or publicity pertaining to distribution of the
|
15
|
+
* software without specific, written prior permission.
|
16
|
+
* M.I.T. makes no representations about the suitability of
|
17
|
+
* this software for any purpose. It is provided "as is"
|
18
|
+
* without express or implied warranty.
|
19
|
+
*/
|
20
|
+
|
21
|
+
#include "ares_setup.h"
|
22
|
+
|
23
|
+
#if defined(WIN32) && !defined(MSDOS)
|
24
|
+
|
25
|
+
typedef enum {
|
26
|
+
WIN_UNKNOWN,
|
27
|
+
WIN_3X,
|
28
|
+
WIN_9X,
|
29
|
+
WIN_NT,
|
30
|
+
WIN_CE
|
31
|
+
} win_platform;
|
32
|
+
|
33
|
+
win_platform ares__getplatform(void);
|
34
|
+
|
35
|
+
#endif
|
36
|
+
|
37
|
+
#if defined(_WIN32_WCE)
|
38
|
+
|
39
|
+
struct servent *getservbyport(int port, const char *proto);
|
40
|
+
|
41
|
+
#endif
|
42
|
+
|
43
|
+
#endif /* HEADER_CARES_PLATFORM_H */
|
@@ -0,0 +1,363 @@
|
|
1
|
+
#ifndef __ARES_PRIVATE_H
|
2
|
+
#define __ARES_PRIVATE_H
|
3
|
+
|
4
|
+
|
5
|
+
/* Copyright 1998 by the Massachusetts Institute of Technology.
|
6
|
+
* Copyright (C) 2004-2010 by Daniel Stenberg
|
7
|
+
*
|
8
|
+
* Permission to use, copy, modify, and distribute this
|
9
|
+
* software and its documentation for any purpose and without
|
10
|
+
* fee is hereby granted, provided that the above copyright
|
11
|
+
* notice appear in all copies and that both that copyright
|
12
|
+
* notice and this permission notice appear in supporting
|
13
|
+
* documentation, and that the name of M.I.T. not be used in
|
14
|
+
* advertising or publicity pertaining to distribution of the
|
15
|
+
* software without specific, written prior permission.
|
16
|
+
* M.I.T. makes no representations about the suitability of
|
17
|
+
* this software for any purpose. It is provided "as is"
|
18
|
+
* without express or implied warranty.
|
19
|
+
*/
|
20
|
+
|
21
|
+
/*
|
22
|
+
* Define WIN32 when build target is Win32 API
|
23
|
+
*/
|
24
|
+
|
25
|
+
#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32)
|
26
|
+
#define WIN32
|
27
|
+
#endif
|
28
|
+
|
29
|
+
#ifdef HAVE_NETINET_IN_H
|
30
|
+
#include <netinet/in.h>
|
31
|
+
#endif
|
32
|
+
|
33
|
+
#ifdef WATT32
|
34
|
+
#include <tcp.h>
|
35
|
+
#include <sys/ioctl.h>
|
36
|
+
#define writev(s,v,c) writev_s(s,v,c)
|
37
|
+
#define HAVE_WRITEV 1
|
38
|
+
#endif
|
39
|
+
|
40
|
+
#define DEFAULT_TIMEOUT 5000 /* milliseconds */
|
41
|
+
#define DEFAULT_TRIES 4
|
42
|
+
#ifndef INADDR_NONE
|
43
|
+
#define INADDR_NONE 0xffffffff
|
44
|
+
#endif
|
45
|
+
|
46
|
+
#ifdef CARES_EXPOSE_STATICS
|
47
|
+
/* Make some internal functions visible for testing */
|
48
|
+
#define STATIC_TESTABLE
|
49
|
+
#else
|
50
|
+
#define STATIC_TESTABLE static
|
51
|
+
#endif
|
52
|
+
|
53
|
+
#if defined(WIN32) && !defined(WATT32)
|
54
|
+
|
55
|
+
#define WIN_NS_9X "System\\CurrentControlSet\\Services\\VxD\\MSTCP"
|
56
|
+
#define WIN_NS_NT_KEY "System\\CurrentControlSet\\Services\\Tcpip\\Parameters"
|
57
|
+
#define NAMESERVER "NameServer"
|
58
|
+
#define DHCPNAMESERVER "DhcpNameServer"
|
59
|
+
#define DATABASEPATH "DatabasePath"
|
60
|
+
#define WIN_PATH_HOSTS "\\hosts"
|
61
|
+
|
62
|
+
#elif defined(WATT32)
|
63
|
+
|
64
|
+
#define PATH_RESOLV_CONF "/dev/ENV/etc/resolv.conf"
|
65
|
+
|
66
|
+
#elif defined(NETWARE)
|
67
|
+
|
68
|
+
#define PATH_RESOLV_CONF "sys:/etc/resolv.cfg"
|
69
|
+
#define PATH_HOSTS "sys:/etc/hosts"
|
70
|
+
|
71
|
+
#elif defined(__riscos__)
|
72
|
+
|
73
|
+
#define PATH_HOSTS "InetDBase:Hosts"
|
74
|
+
|
75
|
+
#else
|
76
|
+
|
77
|
+
#define PATH_RESOLV_CONF "/etc/resolv.conf"
|
78
|
+
#ifdef ETC_INET
|
79
|
+
#define PATH_HOSTS "/etc/inet/hosts"
|
80
|
+
#else
|
81
|
+
#define PATH_HOSTS "/etc/hosts"
|
82
|
+
#endif
|
83
|
+
|
84
|
+
#endif
|
85
|
+
|
86
|
+
#define ARES_ID_KEY_LEN 31
|
87
|
+
|
88
|
+
#include "ares_ipv6.h"
|
89
|
+
#include "ares_llist.h"
|
90
|
+
|
91
|
+
#ifndef HAVE_GETENV
|
92
|
+
# include "ares_getenv.h"
|
93
|
+
# define getenv(ptr) ares_getenv(ptr)
|
94
|
+
#endif
|
95
|
+
|
96
|
+
#include "ares_strdup.h"
|
97
|
+
|
98
|
+
#ifndef HAVE_STRCASECMP
|
99
|
+
# include "ares_strcasecmp.h"
|
100
|
+
# define strcasecmp(p1,p2) ares_strcasecmp(p1,p2)
|
101
|
+
#endif
|
102
|
+
|
103
|
+
#ifndef HAVE_STRNCASECMP
|
104
|
+
# include "ares_strcasecmp.h"
|
105
|
+
# define strncasecmp(p1,p2,n) ares_strncasecmp(p1,p2,n)
|
106
|
+
#endif
|
107
|
+
|
108
|
+
#ifndef HAVE_WRITEV
|
109
|
+
# include "ares_writev.h"
|
110
|
+
# define writev(s,ptr,cnt) ares_writev(s,ptr,cnt)
|
111
|
+
#endif
|
112
|
+
|
113
|
+
/********* EDNS defines section ******/
|
114
|
+
#define EDNSPACKETSZ 1280 /* Reasonable UDP payload size, as suggested
|
115
|
+
in RFC2671 */
|
116
|
+
#define MAXENDSSZ 4096 /* Maximum (local) limit for edns packet size */
|
117
|
+
#define EDNSFIXEDSZ 11 /* Size of EDNS header */
|
118
|
+
/********* EDNS defines section ******/
|
119
|
+
|
120
|
+
struct ares_addr {
|
121
|
+
int family;
|
122
|
+
union {
|
123
|
+
struct in_addr addr4;
|
124
|
+
struct ares_in6_addr addr6;
|
125
|
+
} addr;
|
126
|
+
int udp_port; /* stored in network order */
|
127
|
+
int tcp_port; /* stored in network order */
|
128
|
+
};
|
129
|
+
#define addrV4 addr.addr4
|
130
|
+
#define addrV6 addr.addr6
|
131
|
+
|
132
|
+
struct query;
|
133
|
+
|
134
|
+
struct send_request {
|
135
|
+
/* Remaining data to send */
|
136
|
+
const unsigned char *data;
|
137
|
+
size_t len;
|
138
|
+
|
139
|
+
/* The query for which we're sending this data */
|
140
|
+
struct query* owner_query;
|
141
|
+
/* The buffer we're using, if we have our own copy of the packet */
|
142
|
+
unsigned char *data_storage;
|
143
|
+
|
144
|
+
/* Next request in queue */
|
145
|
+
struct send_request *next;
|
146
|
+
};
|
147
|
+
|
148
|
+
struct server_state {
|
149
|
+
struct ares_addr addr;
|
150
|
+
ares_socket_t udp_socket;
|
151
|
+
ares_socket_t tcp_socket;
|
152
|
+
|
153
|
+
/* Mini-buffer for reading the length word */
|
154
|
+
unsigned char tcp_lenbuf[2];
|
155
|
+
int tcp_lenbuf_pos;
|
156
|
+
int tcp_length;
|
157
|
+
|
158
|
+
/* Buffer for reading actual TCP data */
|
159
|
+
unsigned char *tcp_buffer;
|
160
|
+
int tcp_buffer_pos;
|
161
|
+
|
162
|
+
/* TCP output queue */
|
163
|
+
struct send_request *qhead;
|
164
|
+
struct send_request *qtail;
|
165
|
+
|
166
|
+
/* Which incarnation of this connection is this? We don't want to
|
167
|
+
* retransmit requests into the very same socket, but if the server
|
168
|
+
* closes on us and we re-open the connection, then we do want to
|
169
|
+
* re-send. */
|
170
|
+
int tcp_connection_generation;
|
171
|
+
|
172
|
+
/* Circular, doubly-linked list of outstanding queries to this server */
|
173
|
+
struct list_node queries_to_server;
|
174
|
+
|
175
|
+
/* Link back to owning channel */
|
176
|
+
ares_channel channel;
|
177
|
+
|
178
|
+
/* Is this server broken? We mark connections as broken when a
|
179
|
+
* request that is queued for sending times out.
|
180
|
+
*/
|
181
|
+
int is_broken;
|
182
|
+
};
|
183
|
+
|
184
|
+
/* State to represent a DNS query */
|
185
|
+
struct query {
|
186
|
+
/* Query ID from qbuf, for faster lookup, and current timeout */
|
187
|
+
unsigned short qid;
|
188
|
+
struct timeval timeout;
|
189
|
+
|
190
|
+
/*
|
191
|
+
* Links for the doubly-linked lists in which we insert a query.
|
192
|
+
* These circular, doubly-linked lists that are hash-bucketed based
|
193
|
+
* the attributes we care about, help making most important
|
194
|
+
* operations O(1).
|
195
|
+
*/
|
196
|
+
struct list_node queries_by_qid; /* hopefully in same cache line as qid */
|
197
|
+
struct list_node queries_by_timeout;
|
198
|
+
struct list_node queries_to_server;
|
199
|
+
struct list_node all_queries;
|
200
|
+
|
201
|
+
/* Query buf with length at beginning, for TCP transmission */
|
202
|
+
unsigned char *tcpbuf;
|
203
|
+
int tcplen;
|
204
|
+
|
205
|
+
/* Arguments passed to ares_send() (qbuf points into tcpbuf) */
|
206
|
+
const unsigned char *qbuf;
|
207
|
+
int qlen;
|
208
|
+
ares_callback callback;
|
209
|
+
void *arg;
|
210
|
+
|
211
|
+
/* Query status */
|
212
|
+
int try_count; /* Number of times we tried this query already. */
|
213
|
+
int server; /* Server this query has last been sent to. */
|
214
|
+
struct query_server_info *server_info; /* per-server state */
|
215
|
+
int using_tcp;
|
216
|
+
int error_status;
|
217
|
+
int timeouts; /* number of timeouts we saw for this request */
|
218
|
+
};
|
219
|
+
|
220
|
+
/* Per-server state for a query */
|
221
|
+
struct query_server_info {
|
222
|
+
int skip_server; /* should we skip server, due to errors, etc? */
|
223
|
+
int tcp_connection_generation; /* into which TCP connection did we send? */
|
224
|
+
};
|
225
|
+
|
226
|
+
/* An IP address pattern; matches an IP address X if X & mask == addr */
|
227
|
+
#define PATTERN_MASK 0x1
|
228
|
+
#define PATTERN_CIDR 0x2
|
229
|
+
|
230
|
+
struct apattern {
|
231
|
+
union
|
232
|
+
{
|
233
|
+
struct in_addr addr4;
|
234
|
+
struct ares_in6_addr addr6;
|
235
|
+
} addr;
|
236
|
+
union
|
237
|
+
{
|
238
|
+
struct in_addr addr4;
|
239
|
+
struct ares_in6_addr addr6;
|
240
|
+
unsigned short bits;
|
241
|
+
} mask;
|
242
|
+
int family;
|
243
|
+
unsigned short type;
|
244
|
+
};
|
245
|
+
|
246
|
+
typedef struct rc4_key
|
247
|
+
{
|
248
|
+
unsigned char state[256];
|
249
|
+
unsigned char x;
|
250
|
+
unsigned char y;
|
251
|
+
} rc4_key;
|
252
|
+
|
253
|
+
struct ares_channeldata {
|
254
|
+
/* Configuration data */
|
255
|
+
int flags;
|
256
|
+
int timeout; /* in milliseconds */
|
257
|
+
int tries;
|
258
|
+
int ndots;
|
259
|
+
int rotate; /* if true, all servers specified are used */
|
260
|
+
int udp_port; /* stored in network order */
|
261
|
+
int tcp_port; /* stored in network order */
|
262
|
+
int socket_send_buffer_size;
|
263
|
+
int socket_receive_buffer_size;
|
264
|
+
char **domains;
|
265
|
+
int ndomains;
|
266
|
+
struct apattern *sortlist;
|
267
|
+
int nsort;
|
268
|
+
char *lookups;
|
269
|
+
int ednspsz;
|
270
|
+
|
271
|
+
/* For binding to local devices and/or IP addresses. Leave
|
272
|
+
* them null/zero for no binding.
|
273
|
+
*/
|
274
|
+
char local_dev_name[32];
|
275
|
+
unsigned int local_ip4;
|
276
|
+
unsigned char local_ip6[16];
|
277
|
+
|
278
|
+
int optmask; /* the option bitfield passed in at init time */
|
279
|
+
|
280
|
+
/* Server addresses and communications state */
|
281
|
+
struct server_state *servers;
|
282
|
+
int nservers;
|
283
|
+
|
284
|
+
/* ID to use for next query */
|
285
|
+
unsigned short next_id;
|
286
|
+
/* key to use when generating new ids */
|
287
|
+
rc4_key id_key;
|
288
|
+
|
289
|
+
/* Generation number to use for the next TCP socket open/close */
|
290
|
+
int tcp_connection_generation;
|
291
|
+
|
292
|
+
/* The time at which we last called process_timeouts(). Uses integer seconds
|
293
|
+
just to draw the line somewhere. */
|
294
|
+
time_t last_timeout_processed;
|
295
|
+
|
296
|
+
/* Last server we sent a query to. */
|
297
|
+
int last_server;
|
298
|
+
|
299
|
+
/* Circular, doubly-linked list of queries, bucketed various ways.... */
|
300
|
+
/* All active queries in a single list: */
|
301
|
+
struct list_node all_queries;
|
302
|
+
/* Queries bucketed by qid, for quickly dispatching DNS responses: */
|
303
|
+
#define ARES_QID_TABLE_SIZE 2048
|
304
|
+
struct list_node queries_by_qid[ARES_QID_TABLE_SIZE];
|
305
|
+
/* Queries bucketed by timeout, for quickly handling timeouts: */
|
306
|
+
#define ARES_TIMEOUT_TABLE_SIZE 1024
|
307
|
+
struct list_node queries_by_timeout[ARES_TIMEOUT_TABLE_SIZE];
|
308
|
+
|
309
|
+
ares_sock_state_cb sock_state_cb;
|
310
|
+
void *sock_state_cb_data;
|
311
|
+
|
312
|
+
ares_sock_create_callback sock_create_cb;
|
313
|
+
void *sock_create_cb_data;
|
314
|
+
|
315
|
+
ares_sock_config_callback sock_config_cb;
|
316
|
+
void *sock_config_cb_data;
|
317
|
+
};
|
318
|
+
|
319
|
+
/* Memory management functions */
|
320
|
+
extern void *(*ares_malloc)(size_t size);
|
321
|
+
extern void *(*ares_realloc)(void *ptr, size_t size);
|
322
|
+
extern void (*ares_free)(void *ptr);
|
323
|
+
|
324
|
+
/* return true if now is exactly check time or later */
|
325
|
+
int ares__timedout(struct timeval *now,
|
326
|
+
struct timeval *check);
|
327
|
+
|
328
|
+
void ares__send_query(ares_channel channel, struct query *query,
|
329
|
+
struct timeval *now);
|
330
|
+
void ares__close_sockets(ares_channel channel, struct server_state *server);
|
331
|
+
int ares__get_hostent(FILE *fp, int family, struct hostent **host);
|
332
|
+
int ares__read_line(FILE *fp, char **buf, size_t *bufsize);
|
333
|
+
void ares__free_query(struct query *query);
|
334
|
+
unsigned short ares__generate_new_id(rc4_key* key);
|
335
|
+
struct timeval ares__tvnow(void);
|
336
|
+
int ares__expand_name_for_response(const unsigned char *encoded,
|
337
|
+
const unsigned char *abuf, int alen,
|
338
|
+
char **s, long *enclen);
|
339
|
+
void ares__init_servers_state(ares_channel channel);
|
340
|
+
void ares__destroy_servers_state(ares_channel channel);
|
341
|
+
#if 0 /* Not used */
|
342
|
+
long ares__tvdiff(struct timeval t1, struct timeval t2);
|
343
|
+
#endif
|
344
|
+
|
345
|
+
#define ARES_SWAP_BYTE(a,b) \
|
346
|
+
{ unsigned char swapByte = *(a); *(a) = *(b); *(b) = swapByte; }
|
347
|
+
|
348
|
+
#define SOCK_STATE_CALLBACK(c, s, r, w) \
|
349
|
+
do { \
|
350
|
+
if ((c)->sock_state_cb) \
|
351
|
+
(c)->sock_state_cb((c)->sock_state_cb_data, (s), (r), (w)); \
|
352
|
+
} WHILE_FALSE
|
353
|
+
|
354
|
+
#ifdef CURLDEBUG
|
355
|
+
/* This is low-level hard-hacking memory leak tracking and similar. Using the
|
356
|
+
libcurl lowlevel code from within library is ugly and only works when
|
357
|
+
c-ares is built and linked with a similarly curldebug-enabled libcurl,
|
358
|
+
but we do this anyway for convenience. */
|
359
|
+
#define HEADER_CURL_SETUP_ONCE_H
|
360
|
+
#include "../lib/memdebug.h"
|
361
|
+
#endif
|
362
|
+
|
363
|
+
#endif /* __ARES_PRIVATE_H */
|
@@ -0,0 +1,1359 @@
|
|
1
|
+
|
2
|
+
/* Copyright 1998 by the Massachusetts Institute of Technology.
|
3
|
+
* Copyright (C) 2004-2016 by Daniel Stenberg
|
4
|
+
*
|
5
|
+
* Permission to use, copy, modify, and distribute this
|
6
|
+
* software and its documentation for any purpose and without
|
7
|
+
* fee is hereby granted, provided that the above copyright
|
8
|
+
* notice appear in all copies and that both that copyright
|
9
|
+
* notice and this permission notice appear in supporting
|
10
|
+
* documentation, and that the name of M.I.T. not be used in
|
11
|
+
* advertising or publicity pertaining to distribution of the
|
12
|
+
* software without specific, written prior permission.
|
13
|
+
* M.I.T. makes no representations about the suitability of
|
14
|
+
* this software for any purpose. It is provided "as is"
|
15
|
+
* without express or implied warranty.
|
16
|
+
*/
|
17
|
+
|
18
|
+
#include "ares_setup.h"
|
19
|
+
|
20
|
+
#ifdef HAVE_SYS_UIO_H
|
21
|
+
# include <sys/uio.h>
|
22
|
+
#endif
|
23
|
+
#ifdef HAVE_NETINET_IN_H
|
24
|
+
# include <netinet/in.h>
|
25
|
+
#endif
|
26
|
+
#ifdef HAVE_NETINET_TCP_H
|
27
|
+
# include <netinet/tcp.h>
|
28
|
+
#endif
|
29
|
+
#ifdef HAVE_NETDB_H
|
30
|
+
# include <netdb.h>
|
31
|
+
#endif
|
32
|
+
#ifdef HAVE_ARPA_NAMESER_H
|
33
|
+
# include <arpa/nameser.h>
|
34
|
+
#else
|
35
|
+
# include "nameser.h"
|
36
|
+
#endif
|
37
|
+
#ifdef HAVE_ARPA_NAMESER_COMPAT_H
|
38
|
+
# include <arpa/nameser_compat.h>
|
39
|
+
#endif
|
40
|
+
|
41
|
+
#ifdef HAVE_STRINGS_H
|
42
|
+
# include <strings.h>
|
43
|
+
#endif
|
44
|
+
#ifdef HAVE_SYS_IOCTL_H
|
45
|
+
# include <sys/ioctl.h>
|
46
|
+
#endif
|
47
|
+
#ifdef NETWARE
|
48
|
+
# include <sys/filio.h>
|
49
|
+
#endif
|
50
|
+
|
51
|
+
#include <assert.h>
|
52
|
+
#include <fcntl.h>
|
53
|
+
|
54
|
+
#include "ares.h"
|
55
|
+
#include "ares_dns.h"
|
56
|
+
#include "ares_nowarn.h"
|
57
|
+
#include "ares_private.h"
|
58
|
+
|
59
|
+
|
60
|
+
static int try_again(int errnum);
|
61
|
+
static void write_tcp_data(ares_channel channel, fd_set *write_fds,
|
62
|
+
ares_socket_t write_fd, struct timeval *now);
|
63
|
+
static void read_tcp_data(ares_channel channel, fd_set *read_fds,
|
64
|
+
ares_socket_t read_fd, struct timeval *now);
|
65
|
+
static void read_udp_packets(ares_channel channel, fd_set *read_fds,
|
66
|
+
ares_socket_t read_fd, struct timeval *now);
|
67
|
+
static void advance_tcp_send_queue(ares_channel channel, int whichserver,
|
68
|
+
ssize_t num_bytes);
|
69
|
+
static void process_timeouts(ares_channel channel, struct timeval *now);
|
70
|
+
static void process_broken_connections(ares_channel channel,
|
71
|
+
struct timeval *now);
|
72
|
+
static void process_answer(ares_channel channel, unsigned char *abuf,
|
73
|
+
int alen, int whichserver, int tcp,
|
74
|
+
struct timeval *now);
|
75
|
+
static void handle_error(ares_channel channel, int whichserver,
|
76
|
+
struct timeval *now);
|
77
|
+
static void skip_server(ares_channel channel, struct query *query,
|
78
|
+
int whichserver);
|
79
|
+
static void next_server(ares_channel channel, struct query *query,
|
80
|
+
struct timeval *now);
|
81
|
+
static int open_tcp_socket(ares_channel channel, struct server_state *server);
|
82
|
+
static int open_udp_socket(ares_channel channel, struct server_state *server);
|
83
|
+
static int same_questions(const unsigned char *qbuf, int qlen,
|
84
|
+
const unsigned char *abuf, int alen);
|
85
|
+
static int same_address(struct sockaddr *sa, struct ares_addr *aa);
|
86
|
+
static void end_query(ares_channel channel, struct query *query, int status,
|
87
|
+
unsigned char *abuf, int alen);
|
88
|
+
|
89
|
+
/* return true if now is exactly check time or later */
|
90
|
+
int ares__timedout(struct timeval *now,
|
91
|
+
struct timeval *check)
|
92
|
+
{
|
93
|
+
long secs = (now->tv_sec - check->tv_sec);
|
94
|
+
|
95
|
+
if(secs > 0)
|
96
|
+
return 1; /* yes, timed out */
|
97
|
+
if(secs < 0)
|
98
|
+
return 0; /* nope, not timed out */
|
99
|
+
|
100
|
+
/* if the full seconds were identical, check the sub second parts */
|
101
|
+
return (now->tv_usec - check->tv_usec >= 0);
|
102
|
+
}
|
103
|
+
|
104
|
+
/* add the specific number of milliseconds to the time in the first argument */
|
105
|
+
static void timeadd(struct timeval *now, int millisecs)
|
106
|
+
{
|
107
|
+
now->tv_sec += millisecs/1000;
|
108
|
+
now->tv_usec += (millisecs%1000)*1000;
|
109
|
+
|
110
|
+
if(now->tv_usec >= 1000000) {
|
111
|
+
++(now->tv_sec);
|
112
|
+
now->tv_usec -= 1000000;
|
113
|
+
}
|
114
|
+
}
|
115
|
+
|
116
|
+
/*
|
117
|
+
* generic process function
|
118
|
+
*/
|
119
|
+
static void processfds(ares_channel channel,
|
120
|
+
fd_set *read_fds, ares_socket_t read_fd,
|
121
|
+
fd_set *write_fds, ares_socket_t write_fd)
|
122
|
+
{
|
123
|
+
struct timeval now = ares__tvnow();
|
124
|
+
|
125
|
+
write_tcp_data(channel, write_fds, write_fd, &now);
|
126
|
+
read_tcp_data(channel, read_fds, read_fd, &now);
|
127
|
+
read_udp_packets(channel, read_fds, read_fd, &now);
|
128
|
+
process_timeouts(channel, &now);
|
129
|
+
process_broken_connections(channel, &now);
|
130
|
+
}
|
131
|
+
|
132
|
+
/* Something interesting happened on the wire, or there was a timeout.
|
133
|
+
* See what's up and respond accordingly.
|
134
|
+
*/
|
135
|
+
void ares_process(ares_channel channel, fd_set *read_fds, fd_set *write_fds)
|
136
|
+
{
|
137
|
+
processfds(channel, read_fds, ARES_SOCKET_BAD, write_fds, ARES_SOCKET_BAD);
|
138
|
+
}
|
139
|
+
|
140
|
+
/* Something interesting happened on the wire, or there was a timeout.
|
141
|
+
* See what's up and respond accordingly.
|
142
|
+
*/
|
143
|
+
void ares_process_fd(ares_channel channel,
|
144
|
+
ares_socket_t read_fd, /* use ARES_SOCKET_BAD or valid
|
145
|
+
file descriptors */
|
146
|
+
ares_socket_t write_fd)
|
147
|
+
{
|
148
|
+
processfds(channel, NULL, read_fd, NULL, write_fd);
|
149
|
+
}
|
150
|
+
|
151
|
+
|
152
|
+
/* Return 1 if the specified error number describes a readiness error, or 0
|
153
|
+
* otherwise. This is mostly for HP-UX, which could return EAGAIN or
|
154
|
+
* EWOULDBLOCK. See this man page
|
155
|
+
*
|
156
|
+
* http://devrsrc1.external.hp.com/STKS/cgi-bin/man2html?
|
157
|
+
* manpage=/usr/share/man/man2.Z/send.2
|
158
|
+
*/
|
159
|
+
static int try_again(int errnum)
|
160
|
+
{
|
161
|
+
#if !defined EWOULDBLOCK && !defined EAGAIN
|
162
|
+
#error "Neither EWOULDBLOCK nor EAGAIN defined"
|
163
|
+
#endif
|
164
|
+
switch (errnum)
|
165
|
+
{
|
166
|
+
#ifdef EWOULDBLOCK
|
167
|
+
case EWOULDBLOCK:
|
168
|
+
return 1;
|
169
|
+
#endif
|
170
|
+
#if defined EAGAIN && EAGAIN != EWOULDBLOCK
|
171
|
+
case EAGAIN:
|
172
|
+
return 1;
|
173
|
+
#endif
|
174
|
+
}
|
175
|
+
return 0;
|
176
|
+
}
|
177
|
+
|
178
|
+
/* If any TCP sockets select true for writing, write out queued data
|
179
|
+
* we have for them.
|
180
|
+
*/
|
181
|
+
static void write_tcp_data(ares_channel channel,
|
182
|
+
fd_set *write_fds,
|
183
|
+
ares_socket_t write_fd,
|
184
|
+
struct timeval *now)
|
185
|
+
{
|
186
|
+
struct server_state *server;
|
187
|
+
struct send_request *sendreq;
|
188
|
+
struct iovec *vec;
|
189
|
+
int i;
|
190
|
+
ssize_t scount;
|
191
|
+
ssize_t wcount;
|
192
|
+
size_t n;
|
193
|
+
|
194
|
+
if(!write_fds && (write_fd == ARES_SOCKET_BAD))
|
195
|
+
/* no possible action */
|
196
|
+
return;
|
197
|
+
|
198
|
+
for (i = 0; i < channel->nservers; i++)
|
199
|
+
{
|
200
|
+
/* Make sure server has data to send and is selected in write_fds or
|
201
|
+
write_fd. */
|
202
|
+
server = &channel->servers[i];
|
203
|
+
if (!server->qhead || server->tcp_socket == ARES_SOCKET_BAD ||
|
204
|
+
server->is_broken)
|
205
|
+
continue;
|
206
|
+
|
207
|
+
if(write_fds) {
|
208
|
+
if(!FD_ISSET(server->tcp_socket, write_fds))
|
209
|
+
continue;
|
210
|
+
}
|
211
|
+
else {
|
212
|
+
if(server->tcp_socket != write_fd)
|
213
|
+
continue;
|
214
|
+
}
|
215
|
+
|
216
|
+
if(write_fds)
|
217
|
+
/* If there's an error and we close this socket, then open
|
218
|
+
* another with the same fd to talk to another server, then we
|
219
|
+
* don't want to think that it was the new socket that was
|
220
|
+
* ready. This is not disastrous, but is likely to result in
|
221
|
+
* extra system calls and confusion. */
|
222
|
+
FD_CLR(server->tcp_socket, write_fds);
|
223
|
+
|
224
|
+
/* Count the number of send queue items. */
|
225
|
+
n = 0;
|
226
|
+
for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
|
227
|
+
n++;
|
228
|
+
|
229
|
+
/* Allocate iovecs so we can send all our data at once. */
|
230
|
+
vec = ares_malloc(n * sizeof(struct iovec));
|
231
|
+
if (vec)
|
232
|
+
{
|
233
|
+
/* Fill in the iovecs and send. */
|
234
|
+
n = 0;
|
235
|
+
for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
|
236
|
+
{
|
237
|
+
vec[n].iov_base = (char *) sendreq->data;
|
238
|
+
vec[n].iov_len = sendreq->len;
|
239
|
+
n++;
|
240
|
+
}
|
241
|
+
wcount = (ssize_t)writev(server->tcp_socket, vec, (int)n);
|
242
|
+
ares_free(vec);
|
243
|
+
if (wcount < 0)
|
244
|
+
{
|
245
|
+
if (!try_again(SOCKERRNO))
|
246
|
+
handle_error(channel, i, now);
|
247
|
+
continue;
|
248
|
+
}
|
249
|
+
|
250
|
+
/* Advance the send queue by as many bytes as we sent. */
|
251
|
+
advance_tcp_send_queue(channel, i, wcount);
|
252
|
+
}
|
253
|
+
else
|
254
|
+
{
|
255
|
+
/* Can't allocate iovecs; just send the first request. */
|
256
|
+
sendreq = server->qhead;
|
257
|
+
|
258
|
+
scount = swrite(server->tcp_socket, sendreq->data, sendreq->len);
|
259
|
+
if (scount < 0)
|
260
|
+
{
|
261
|
+
if (!try_again(SOCKERRNO))
|
262
|
+
handle_error(channel, i, now);
|
263
|
+
continue;
|
264
|
+
}
|
265
|
+
|
266
|
+
/* Advance the send queue by as many bytes as we sent. */
|
267
|
+
advance_tcp_send_queue(channel, i, scount);
|
268
|
+
}
|
269
|
+
}
|
270
|
+
}
|
271
|
+
|
272
|
+
/* Consume the given number of bytes from the head of the TCP send queue. */
|
273
|
+
static void advance_tcp_send_queue(ares_channel channel, int whichserver,
|
274
|
+
ssize_t num_bytes)
|
275
|
+
{
|
276
|
+
struct send_request *sendreq;
|
277
|
+
struct server_state *server = &channel->servers[whichserver];
|
278
|
+
while (num_bytes > 0) {
|
279
|
+
sendreq = server->qhead;
|
280
|
+
if ((size_t)num_bytes >= sendreq->len) {
|
281
|
+
num_bytes -= sendreq->len;
|
282
|
+
server->qhead = sendreq->next;
|
283
|
+
if (sendreq->data_storage)
|
284
|
+
ares_free(sendreq->data_storage);
|
285
|
+
ares_free(sendreq);
|
286
|
+
if (server->qhead == NULL) {
|
287
|
+
SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 0);
|
288
|
+
server->qtail = NULL;
|
289
|
+
|
290
|
+
/* qhead is NULL so we cannot continue this loop */
|
291
|
+
break;
|
292
|
+
}
|
293
|
+
}
|
294
|
+
else {
|
295
|
+
sendreq->data += num_bytes;
|
296
|
+
sendreq->len -= num_bytes;
|
297
|
+
num_bytes = 0;
|
298
|
+
}
|
299
|
+
}
|
300
|
+
}
|
301
|
+
|
302
|
+
/* If any TCP socket selects true for reading, read some data,
|
303
|
+
* allocate a buffer if we finish reading the length word, and process
|
304
|
+
* a packet if we finish reading one.
|
305
|
+
*/
|
306
|
+
static void read_tcp_data(ares_channel channel, fd_set *read_fds,
|
307
|
+
ares_socket_t read_fd, struct timeval *now)
|
308
|
+
{
|
309
|
+
struct server_state *server;
|
310
|
+
int i;
|
311
|
+
ssize_t count;
|
312
|
+
|
313
|
+
if(!read_fds && (read_fd == ARES_SOCKET_BAD))
|
314
|
+
/* no possible action */
|
315
|
+
return;
|
316
|
+
|
317
|
+
for (i = 0; i < channel->nservers; i++)
|
318
|
+
{
|
319
|
+
/* Make sure the server has a socket and is selected in read_fds. */
|
320
|
+
server = &channel->servers[i];
|
321
|
+
if (server->tcp_socket == ARES_SOCKET_BAD || server->is_broken)
|
322
|
+
continue;
|
323
|
+
|
324
|
+
if(read_fds) {
|
325
|
+
if(!FD_ISSET(server->tcp_socket, read_fds))
|
326
|
+
continue;
|
327
|
+
}
|
328
|
+
else {
|
329
|
+
if(server->tcp_socket != read_fd)
|
330
|
+
continue;
|
331
|
+
}
|
332
|
+
|
333
|
+
if(read_fds)
|
334
|
+
/* If there's an error and we close this socket, then open another
|
335
|
+
* with the same fd to talk to another server, then we don't want to
|
336
|
+
* think that it was the new socket that was ready. This is not
|
337
|
+
* disastrous, but is likely to result in extra system calls and
|
338
|
+
* confusion. */
|
339
|
+
FD_CLR(server->tcp_socket, read_fds);
|
340
|
+
|
341
|
+
if (server->tcp_lenbuf_pos != 2)
|
342
|
+
{
|
343
|
+
/* We haven't yet read a length word, so read that (or
|
344
|
+
* what's left to read of it).
|
345
|
+
*/
|
346
|
+
count = sread(server->tcp_socket,
|
347
|
+
server->tcp_lenbuf + server->tcp_lenbuf_pos,
|
348
|
+
2 - server->tcp_lenbuf_pos);
|
349
|
+
if (count <= 0)
|
350
|
+
{
|
351
|
+
if (!(count == -1 && try_again(SOCKERRNO)))
|
352
|
+
handle_error(channel, i, now);
|
353
|
+
continue;
|
354
|
+
}
|
355
|
+
|
356
|
+
server->tcp_lenbuf_pos += (int)count;
|
357
|
+
if (server->tcp_lenbuf_pos == 2)
|
358
|
+
{
|
359
|
+
/* We finished reading the length word. Decode the
|
360
|
+
* length and allocate a buffer for the data.
|
361
|
+
*/
|
362
|
+
server->tcp_length = server->tcp_lenbuf[0] << 8
|
363
|
+
| server->tcp_lenbuf[1];
|
364
|
+
server->tcp_buffer = ares_malloc(server->tcp_length);
|
365
|
+
if (!server->tcp_buffer) {
|
366
|
+
handle_error(channel, i, now);
|
367
|
+
return; /* bail out on malloc failure. TODO: make this
|
368
|
+
function return error codes */
|
369
|
+
}
|
370
|
+
server->tcp_buffer_pos = 0;
|
371
|
+
}
|
372
|
+
}
|
373
|
+
else
|
374
|
+
{
|
375
|
+
/* Read data into the allocated buffer. */
|
376
|
+
count = sread(server->tcp_socket,
|
377
|
+
server->tcp_buffer + server->tcp_buffer_pos,
|
378
|
+
server->tcp_length - server->tcp_buffer_pos);
|
379
|
+
if (count <= 0)
|
380
|
+
{
|
381
|
+
if (!(count == -1 && try_again(SOCKERRNO)))
|
382
|
+
handle_error(channel, i, now);
|
383
|
+
continue;
|
384
|
+
}
|
385
|
+
|
386
|
+
server->tcp_buffer_pos += (int)count;
|
387
|
+
if (server->tcp_buffer_pos == server->tcp_length)
|
388
|
+
{
|
389
|
+
/* We finished reading this answer; process it and
|
390
|
+
* prepare to read another length word.
|
391
|
+
*/
|
392
|
+
process_answer(channel, server->tcp_buffer, server->tcp_length,
|
393
|
+
i, 1, now);
|
394
|
+
ares_free(server->tcp_buffer);
|
395
|
+
server->tcp_buffer = NULL;
|
396
|
+
server->tcp_lenbuf_pos = 0;
|
397
|
+
server->tcp_buffer_pos = 0;
|
398
|
+
}
|
399
|
+
}
|
400
|
+
}
|
401
|
+
}
|
402
|
+
|
403
|
+
/* If any UDP sockets select true for reading, process them. */
|
404
|
+
static void read_udp_packets(ares_channel channel, fd_set *read_fds,
|
405
|
+
ares_socket_t read_fd, struct timeval *now)
|
406
|
+
{
|
407
|
+
struct server_state *server;
|
408
|
+
int i;
|
409
|
+
ssize_t count;
|
410
|
+
unsigned char buf[MAXENDSSZ + 1];
|
411
|
+
#ifdef HAVE_RECVFROM
|
412
|
+
ares_socklen_t fromlen;
|
413
|
+
union {
|
414
|
+
struct sockaddr sa;
|
415
|
+
struct sockaddr_in sa4;
|
416
|
+
struct sockaddr_in6 sa6;
|
417
|
+
} from;
|
418
|
+
#endif
|
419
|
+
|
420
|
+
if(!read_fds && (read_fd == ARES_SOCKET_BAD))
|
421
|
+
/* no possible action */
|
422
|
+
return;
|
423
|
+
|
424
|
+
for (i = 0; i < channel->nservers; i++)
|
425
|
+
{
|
426
|
+
/* Make sure the server has a socket and is selected in read_fds. */
|
427
|
+
server = &channel->servers[i];
|
428
|
+
|
429
|
+
if (server->udp_socket == ARES_SOCKET_BAD || server->is_broken)
|
430
|
+
continue;
|
431
|
+
|
432
|
+
if(read_fds) {
|
433
|
+
if(!FD_ISSET(server->udp_socket, read_fds))
|
434
|
+
continue;
|
435
|
+
}
|
436
|
+
else {
|
437
|
+
if(server->udp_socket != read_fd)
|
438
|
+
continue;
|
439
|
+
}
|
440
|
+
|
441
|
+
if(read_fds)
|
442
|
+
/* If there's an error and we close this socket, then open
|
443
|
+
* another with the same fd to talk to another server, then we
|
444
|
+
* don't want to think that it was the new socket that was
|
445
|
+
* ready. This is not disastrous, but is likely to result in
|
446
|
+
* extra system calls and confusion. */
|
447
|
+
FD_CLR(server->udp_socket, read_fds);
|
448
|
+
|
449
|
+
/* To reduce event loop overhead, read and process as many
|
450
|
+
* packets as we can. */
|
451
|
+
do {
|
452
|
+
if (server->udp_socket == ARES_SOCKET_BAD)
|
453
|
+
count = 0;
|
454
|
+
|
455
|
+
else {
|
456
|
+
#ifdef HAVE_RECVFROM
|
457
|
+
if (server->addr.family == AF_INET)
|
458
|
+
fromlen = sizeof(from.sa4);
|
459
|
+
else
|
460
|
+
fromlen = sizeof(from.sa6);
|
461
|
+
count = (ssize_t)recvfrom(server->udp_socket, (void *)buf,
|
462
|
+
sizeof(buf), 0, &from.sa, &fromlen);
|
463
|
+
#else
|
464
|
+
count = sread(server->udp_socket, buf, sizeof(buf));
|
465
|
+
#endif
|
466
|
+
}
|
467
|
+
|
468
|
+
if (count == -1 && try_again(SOCKERRNO))
|
469
|
+
continue;
|
470
|
+
else if (count <= 0)
|
471
|
+
handle_error(channel, i, now);
|
472
|
+
#ifdef HAVE_RECVFROM
|
473
|
+
else if (!same_address(&from.sa, &server->addr))
|
474
|
+
/* The address the response comes from does not match the address we
|
475
|
+
* sent the request to. Someone may be attempting to perform a cache
|
476
|
+
* poisoning attack. */
|
477
|
+
break;
|
478
|
+
#endif
|
479
|
+
else
|
480
|
+
process_answer(channel, buf, (int)count, i, 0, now);
|
481
|
+
} while (count > 0);
|
482
|
+
}
|
483
|
+
}
|
484
|
+
|
485
|
+
/* If any queries have timed out, note the timeout and move them on. */
|
486
|
+
static void process_timeouts(ares_channel channel, struct timeval *now)
|
487
|
+
{
|
488
|
+
time_t t; /* the time of the timeouts we're processing */
|
489
|
+
struct query *query;
|
490
|
+
struct list_node* list_head;
|
491
|
+
struct list_node* list_node;
|
492
|
+
|
493
|
+
/* Process all the timeouts that have fired since the last time we processed
|
494
|
+
* timeouts. If things are going well, then we'll have hundreds/thousands of
|
495
|
+
* queries that fall into future buckets, and only a handful of requests
|
496
|
+
* that fall into the "now" bucket, so this should be quite quick.
|
497
|
+
*/
|
498
|
+
for (t = channel->last_timeout_processed; t <= now->tv_sec; t++)
|
499
|
+
{
|
500
|
+
list_head = &(channel->queries_by_timeout[t % ARES_TIMEOUT_TABLE_SIZE]);
|
501
|
+
for (list_node = list_head->next; list_node != list_head; )
|
502
|
+
{
|
503
|
+
query = list_node->data;
|
504
|
+
list_node = list_node->next; /* in case the query gets deleted */
|
505
|
+
if (query->timeout.tv_sec && ares__timedout(now, &query->timeout))
|
506
|
+
{
|
507
|
+
query->error_status = ARES_ETIMEOUT;
|
508
|
+
++query->timeouts;
|
509
|
+
next_server(channel, query, now);
|
510
|
+
}
|
511
|
+
}
|
512
|
+
}
|
513
|
+
channel->last_timeout_processed = now->tv_sec;
|
514
|
+
}
|
515
|
+
|
516
|
+
/* Handle an answer from a server. */
|
517
|
+
static void process_answer(ares_channel channel, unsigned char *abuf,
|
518
|
+
int alen, int whichserver, int tcp,
|
519
|
+
struct timeval *now)
|
520
|
+
{
|
521
|
+
int tc, rcode, packetsz;
|
522
|
+
unsigned short id;
|
523
|
+
struct query *query;
|
524
|
+
struct list_node* list_head;
|
525
|
+
struct list_node* list_node;
|
526
|
+
|
527
|
+
/* If there's no room in the answer for a header, we can't do much
|
528
|
+
* with it. */
|
529
|
+
if (alen < HFIXEDSZ)
|
530
|
+
return;
|
531
|
+
|
532
|
+
/* Grab the query ID, truncate bit, and response code from the packet. */
|
533
|
+
id = DNS_HEADER_QID(abuf);
|
534
|
+
tc = DNS_HEADER_TC(abuf);
|
535
|
+
rcode = DNS_HEADER_RCODE(abuf);
|
536
|
+
|
537
|
+
/* Find the query corresponding to this packet. The queries are
|
538
|
+
* hashed/bucketed by query id, so this lookup should be quick. Note that
|
539
|
+
* both the query id and the questions must be the same; when the query id
|
540
|
+
* wraps around we can have multiple outstanding queries with the same query
|
541
|
+
* id, so we need to check both the id and question.
|
542
|
+
*/
|
543
|
+
query = NULL;
|
544
|
+
list_head = &(channel->queries_by_qid[id % ARES_QID_TABLE_SIZE]);
|
545
|
+
for (list_node = list_head->next; list_node != list_head;
|
546
|
+
list_node = list_node->next)
|
547
|
+
{
|
548
|
+
struct query *q = list_node->data;
|
549
|
+
if ((q->qid == id) && same_questions(q->qbuf, q->qlen, abuf, alen))
|
550
|
+
{
|
551
|
+
query = q;
|
552
|
+
break;
|
553
|
+
}
|
554
|
+
}
|
555
|
+
if (!query)
|
556
|
+
return;
|
557
|
+
|
558
|
+
packetsz = PACKETSZ;
|
559
|
+
/* If we use EDNS and server answers with one of these RCODES, the protocol
|
560
|
+
* extension is not understood by the responder. We must retry the query
|
561
|
+
* without EDNS enabled.
|
562
|
+
*/
|
563
|
+
if (channel->flags & ARES_FLAG_EDNS)
|
564
|
+
{
|
565
|
+
packetsz = channel->ednspsz;
|
566
|
+
if (rcode == NOTIMP || rcode == FORMERR || rcode == SERVFAIL)
|
567
|
+
{
|
568
|
+
int qlen = (query->tcplen - 2) - EDNSFIXEDSZ;
|
569
|
+
channel->flags ^= ARES_FLAG_EDNS;
|
570
|
+
query->tcplen -= EDNSFIXEDSZ;
|
571
|
+
query->qlen -= EDNSFIXEDSZ;
|
572
|
+
query->tcpbuf[0] = (unsigned char)((qlen >> 8) & 0xff);
|
573
|
+
query->tcpbuf[1] = (unsigned char)(qlen & 0xff);
|
574
|
+
DNS_HEADER_SET_ARCOUNT(query->tcpbuf + 2, 0);
|
575
|
+
query->tcpbuf = ares_realloc(query->tcpbuf, query->tcplen);
|
576
|
+
query->qbuf = query->tcpbuf + 2;
|
577
|
+
ares__send_query(channel, query, now);
|
578
|
+
return;
|
579
|
+
}
|
580
|
+
}
|
581
|
+
|
582
|
+
/* If we got a truncated UDP packet and are not ignoring truncation,
|
583
|
+
* don't accept the packet, and switch the query to TCP if we hadn't
|
584
|
+
* done so already.
|
585
|
+
*/
|
586
|
+
if ((tc || alen > packetsz) && !tcp && !(channel->flags & ARES_FLAG_IGNTC))
|
587
|
+
{
|
588
|
+
if (!query->using_tcp)
|
589
|
+
{
|
590
|
+
query->using_tcp = 1;
|
591
|
+
ares__send_query(channel, query, now);
|
592
|
+
}
|
593
|
+
return;
|
594
|
+
}
|
595
|
+
|
596
|
+
/* Limit alen to PACKETSZ if we aren't using TCP (only relevant if we
|
597
|
+
* are ignoring truncation.
|
598
|
+
*/
|
599
|
+
if (alen > packetsz && !tcp)
|
600
|
+
alen = packetsz;
|
601
|
+
|
602
|
+
/* If we aren't passing through all error packets, discard packets
|
603
|
+
* with SERVFAIL, NOTIMP, or REFUSED response codes.
|
604
|
+
*/
|
605
|
+
if (!(channel->flags & ARES_FLAG_NOCHECKRESP))
|
606
|
+
{
|
607
|
+
if (rcode == SERVFAIL || rcode == NOTIMP || rcode == REFUSED)
|
608
|
+
{
|
609
|
+
skip_server(channel, query, whichserver);
|
610
|
+
if (query->server == whichserver)
|
611
|
+
next_server(channel, query, now);
|
612
|
+
return;
|
613
|
+
}
|
614
|
+
}
|
615
|
+
|
616
|
+
end_query(channel, query, ARES_SUCCESS, abuf, alen);
|
617
|
+
}
|
618
|
+
|
619
|
+
/* Close all the connections that are no longer usable. */
|
620
|
+
static void process_broken_connections(ares_channel channel,
|
621
|
+
struct timeval *now)
|
622
|
+
{
|
623
|
+
int i;
|
624
|
+
for (i = 0; i < channel->nservers; i++)
|
625
|
+
{
|
626
|
+
struct server_state *server = &channel->servers[i];
|
627
|
+
if (server->is_broken)
|
628
|
+
{
|
629
|
+
handle_error(channel, i, now);
|
630
|
+
}
|
631
|
+
}
|
632
|
+
}
|
633
|
+
|
634
|
+
/* Swap the contents of two lists */
|
635
|
+
static void swap_lists(struct list_node* head_a,
|
636
|
+
struct list_node* head_b)
|
637
|
+
{
|
638
|
+
int is_a_empty = ares__is_list_empty(head_a);
|
639
|
+
int is_b_empty = ares__is_list_empty(head_b);
|
640
|
+
struct list_node old_a = *head_a;
|
641
|
+
struct list_node old_b = *head_b;
|
642
|
+
|
643
|
+
if (is_a_empty) {
|
644
|
+
ares__init_list_head(head_b);
|
645
|
+
} else {
|
646
|
+
*head_b = old_a;
|
647
|
+
old_a.next->prev = head_b;
|
648
|
+
old_a.prev->next = head_b;
|
649
|
+
}
|
650
|
+
if (is_b_empty) {
|
651
|
+
ares__init_list_head(head_a);
|
652
|
+
} else {
|
653
|
+
*head_a = old_b;
|
654
|
+
old_b.next->prev = head_a;
|
655
|
+
old_b.prev->next = head_a;
|
656
|
+
}
|
657
|
+
}
|
658
|
+
|
659
|
+
static void handle_error(ares_channel channel, int whichserver,
|
660
|
+
struct timeval *now)
|
661
|
+
{
|
662
|
+
struct server_state *server;
|
663
|
+
struct query *query;
|
664
|
+
struct list_node list_head;
|
665
|
+
struct list_node* list_node;
|
666
|
+
|
667
|
+
server = &channel->servers[whichserver];
|
668
|
+
|
669
|
+
/* Reset communications with this server. */
|
670
|
+
ares__close_sockets(channel, server);
|
671
|
+
|
672
|
+
/* Tell all queries talking to this server to move on and not try this
|
673
|
+
* server again. We steal the current list of queries that were in-flight to
|
674
|
+
* this server, since when we call next_server this can cause the queries to
|
675
|
+
* be re-sent to this server, which will re-insert these queries in that
|
676
|
+
* same server->queries_to_server list.
|
677
|
+
*/
|
678
|
+
ares__init_list_head(&list_head);
|
679
|
+
swap_lists(&list_head, &(server->queries_to_server));
|
680
|
+
for (list_node = list_head.next; list_node != &list_head; )
|
681
|
+
{
|
682
|
+
query = list_node->data;
|
683
|
+
list_node = list_node->next; /* in case the query gets deleted */
|
684
|
+
assert(query->server == whichserver);
|
685
|
+
skip_server(channel, query, whichserver);
|
686
|
+
next_server(channel, query, now);
|
687
|
+
}
|
688
|
+
/* Each query should have removed itself from our temporary list as
|
689
|
+
* it re-sent itself or finished up...
|
690
|
+
*/
|
691
|
+
assert(ares__is_list_empty(&list_head));
|
692
|
+
}
|
693
|
+
|
694
|
+
static void skip_server(ares_channel channel, struct query *query,
|
695
|
+
int whichserver)
|
696
|
+
{
|
697
|
+
/* The given server gave us problems with this query, so if we have the
|
698
|
+
* luxury of using other servers, then let's skip the potentially broken
|
699
|
+
* server and just use the others. If we only have one server and we need to
|
700
|
+
* retry then we should just go ahead and re-use that server, since it's our
|
701
|
+
* only hope; perhaps we just got unlucky, and retrying will work (eg, the
|
702
|
+
* server timed out our TCP connection just as we were sending another
|
703
|
+
* request).
|
704
|
+
*/
|
705
|
+
if (channel->nservers > 1)
|
706
|
+
{
|
707
|
+
query->server_info[whichserver].skip_server = 1;
|
708
|
+
}
|
709
|
+
}
|
710
|
+
|
711
|
+
static void next_server(ares_channel channel, struct query *query,
|
712
|
+
struct timeval *now)
|
713
|
+
{
|
714
|
+
/* We need to try each server channel->tries times. We have channel->nservers
|
715
|
+
* servers to try. In total, we need to do channel->nservers * channel->tries
|
716
|
+
* attempts. Use query->try to remember how many times we already attempted
|
717
|
+
* this query. Use modular arithmetic to find the next server to try. */
|
718
|
+
while (++(query->try_count) < (channel->nservers * channel->tries))
|
719
|
+
{
|
720
|
+
struct server_state *server;
|
721
|
+
|
722
|
+
/* Move on to the next server. */
|
723
|
+
query->server = (query->server + 1) % channel->nservers;
|
724
|
+
server = &channel->servers[query->server];
|
725
|
+
|
726
|
+
/* We don't want to use this server if (1) we decided this connection is
|
727
|
+
* broken, and thus about to be closed, (2) we've decided to skip this
|
728
|
+
* server because of earlier errors we encountered, or (3) we already
|
729
|
+
* sent this query over this exact connection.
|
730
|
+
*/
|
731
|
+
if (!server->is_broken &&
|
732
|
+
!query->server_info[query->server].skip_server &&
|
733
|
+
!(query->using_tcp &&
|
734
|
+
(query->server_info[query->server].tcp_connection_generation ==
|
735
|
+
server->tcp_connection_generation)))
|
736
|
+
{
|
737
|
+
ares__send_query(channel, query, now);
|
738
|
+
return;
|
739
|
+
}
|
740
|
+
|
741
|
+
/* You might think that with TCP we only need one try. However, even
|
742
|
+
* when using TCP, servers can time-out our connection just as we're
|
743
|
+
* sending a request, or close our connection because they die, or never
|
744
|
+
* send us a reply because they get wedged or tickle a bug that drops
|
745
|
+
* our request.
|
746
|
+
*/
|
747
|
+
}
|
748
|
+
|
749
|
+
/* If we are here, all attempts to perform query failed. */
|
750
|
+
end_query(channel, query, query->error_status, NULL, 0);
|
751
|
+
}
|
752
|
+
|
753
|
+
void ares__send_query(ares_channel channel, struct query *query,
|
754
|
+
struct timeval *now)
|
755
|
+
{
|
756
|
+
struct send_request *sendreq;
|
757
|
+
struct server_state *server;
|
758
|
+
int timeplus;
|
759
|
+
|
760
|
+
server = &channel->servers[query->server];
|
761
|
+
if (query->using_tcp)
|
762
|
+
{
|
763
|
+
/* Make sure the TCP socket for this server is set up and queue
|
764
|
+
* a send request.
|
765
|
+
*/
|
766
|
+
if (server->tcp_socket == ARES_SOCKET_BAD)
|
767
|
+
{
|
768
|
+
if (open_tcp_socket(channel, server) == -1)
|
769
|
+
{
|
770
|
+
skip_server(channel, query, query->server);
|
771
|
+
next_server(channel, query, now);
|
772
|
+
return;
|
773
|
+
}
|
774
|
+
}
|
775
|
+
sendreq = ares_malloc(sizeof(struct send_request));
|
776
|
+
if (!sendreq)
|
777
|
+
{
|
778
|
+
end_query(channel, query, ARES_ENOMEM, NULL, 0);
|
779
|
+
return;
|
780
|
+
}
|
781
|
+
memset(sendreq, 0, sizeof(struct send_request));
|
782
|
+
/* To make the common case fast, we avoid copies by using the query's
|
783
|
+
* tcpbuf for as long as the query is alive. In the rare case where the
|
784
|
+
* query ends while it's queued for transmission, then we give the
|
785
|
+
* sendreq its own copy of the request packet and put it in
|
786
|
+
* sendreq->data_storage.
|
787
|
+
*/
|
788
|
+
sendreq->data_storage = NULL;
|
789
|
+
sendreq->data = query->tcpbuf;
|
790
|
+
sendreq->len = query->tcplen;
|
791
|
+
sendreq->owner_query = query;
|
792
|
+
sendreq->next = NULL;
|
793
|
+
if (server->qtail)
|
794
|
+
server->qtail->next = sendreq;
|
795
|
+
else
|
796
|
+
{
|
797
|
+
SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 1);
|
798
|
+
server->qhead = sendreq;
|
799
|
+
}
|
800
|
+
server->qtail = sendreq;
|
801
|
+
query->server_info[query->server].tcp_connection_generation =
|
802
|
+
server->tcp_connection_generation;
|
803
|
+
}
|
804
|
+
else
|
805
|
+
{
|
806
|
+
if (server->udp_socket == ARES_SOCKET_BAD)
|
807
|
+
{
|
808
|
+
if (open_udp_socket(channel, server) == -1)
|
809
|
+
{
|
810
|
+
skip_server(channel, query, query->server);
|
811
|
+
next_server(channel, query, now);
|
812
|
+
return;
|
813
|
+
}
|
814
|
+
}
|
815
|
+
if (swrite(server->udp_socket, query->qbuf, query->qlen) == -1)
|
816
|
+
{
|
817
|
+
/* FIXME: Handle EAGAIN here since it likely can happen. */
|
818
|
+
skip_server(channel, query, query->server);
|
819
|
+
next_server(channel, query, now);
|
820
|
+
return;
|
821
|
+
}
|
822
|
+
}
|
823
|
+
timeplus = channel->timeout << (query->try_count / channel->nservers);
|
824
|
+
timeplus = (timeplus * (9 + (rand () & 7))) / 16;
|
825
|
+
query->timeout = *now;
|
826
|
+
timeadd(&query->timeout, timeplus);
|
827
|
+
/* Keep track of queries bucketed by timeout, so we can process
|
828
|
+
* timeout events quickly.
|
829
|
+
*/
|
830
|
+
ares__remove_from_list(&(query->queries_by_timeout));
|
831
|
+
ares__insert_in_list(
|
832
|
+
&(query->queries_by_timeout),
|
833
|
+
&(channel->queries_by_timeout[query->timeout.tv_sec %
|
834
|
+
ARES_TIMEOUT_TABLE_SIZE]));
|
835
|
+
|
836
|
+
/* Keep track of queries bucketed by server, so we can process server
|
837
|
+
* errors quickly.
|
838
|
+
*/
|
839
|
+
ares__remove_from_list(&(query->queries_to_server));
|
840
|
+
ares__insert_in_list(&(query->queries_to_server),
|
841
|
+
&(server->queries_to_server));
|
842
|
+
}
|
843
|
+
|
844
|
+
/*
|
845
|
+
* setsocknonblock sets the given socket to either blocking or non-blocking
|
846
|
+
* mode based on the 'nonblock' boolean argument. This function is highly
|
847
|
+
* portable.
|
848
|
+
*/
|
849
|
+
static int setsocknonblock(ares_socket_t sockfd, /* operate on this */
|
850
|
+
int nonblock /* TRUE or FALSE */)
|
851
|
+
{
|
852
|
+
#if defined(USE_BLOCKING_SOCKETS)
|
853
|
+
|
854
|
+
return 0; /* returns success */
|
855
|
+
|
856
|
+
#elif defined(HAVE_FCNTL_O_NONBLOCK)
|
857
|
+
|
858
|
+
/* most recent unix versions */
|
859
|
+
int flags;
|
860
|
+
flags = fcntl(sockfd, F_GETFL, 0);
|
861
|
+
if (FALSE != nonblock)
|
862
|
+
return fcntl(sockfd, F_SETFL, flags | O_NONBLOCK);
|
863
|
+
else
|
864
|
+
return fcntl(sockfd, F_SETFL, flags & (~O_NONBLOCK)); /* LCOV_EXCL_LINE */
|
865
|
+
|
866
|
+
#elif defined(HAVE_IOCTL_FIONBIO)
|
867
|
+
|
868
|
+
/* older unix versions */
|
869
|
+
int flags = nonblock ? 1 : 0;
|
870
|
+
return ioctl(sockfd, FIONBIO, &flags);
|
871
|
+
|
872
|
+
#elif defined(HAVE_IOCTLSOCKET_FIONBIO)
|
873
|
+
|
874
|
+
#ifdef WATT32
|
875
|
+
char flags = nonblock ? 1 : 0;
|
876
|
+
#else
|
877
|
+
/* Windows */
|
878
|
+
unsigned long flags = nonblock ? 1UL : 0UL;
|
879
|
+
#endif
|
880
|
+
return ioctlsocket(sockfd, FIONBIO, &flags);
|
881
|
+
|
882
|
+
#elif defined(HAVE_IOCTLSOCKET_CAMEL_FIONBIO)
|
883
|
+
|
884
|
+
/* Amiga */
|
885
|
+
long flags = nonblock ? 1L : 0L;
|
886
|
+
return IoctlSocket(sockfd, FIONBIO, flags);
|
887
|
+
|
888
|
+
#elif defined(HAVE_SETSOCKOPT_SO_NONBLOCK)
|
889
|
+
|
890
|
+
/* BeOS */
|
891
|
+
long b = nonblock ? 1L : 0L;
|
892
|
+
return setsockopt(sockfd, SOL_SOCKET, SO_NONBLOCK, &b, sizeof(b));
|
893
|
+
|
894
|
+
#else
|
895
|
+
# error "no non-blocking method was found/used/set"
|
896
|
+
#endif
|
897
|
+
}
|
898
|
+
|
899
|
+
static int configure_socket(ares_socket_t s, int family, ares_channel channel)
|
900
|
+
{
|
901
|
+
union {
|
902
|
+
struct sockaddr sa;
|
903
|
+
struct sockaddr_in sa4;
|
904
|
+
struct sockaddr_in6 sa6;
|
905
|
+
} local;
|
906
|
+
|
907
|
+
(void)setsocknonblock(s, TRUE);
|
908
|
+
|
909
|
+
#if defined(FD_CLOEXEC) && !defined(MSDOS)
|
910
|
+
/* Configure the socket fd as close-on-exec. */
|
911
|
+
if (fcntl(s, F_SETFD, FD_CLOEXEC) == -1)
|
912
|
+
return -1; /* LCOV_EXCL_LINE */
|
913
|
+
#endif
|
914
|
+
|
915
|
+
/* Set the socket's send and receive buffer sizes. */
|
916
|
+
if ((channel->socket_send_buffer_size > 0) &&
|
917
|
+
setsockopt(s, SOL_SOCKET, SO_SNDBUF,
|
918
|
+
(void *)&channel->socket_send_buffer_size,
|
919
|
+
sizeof(channel->socket_send_buffer_size)) == -1)
|
920
|
+
return -1;
|
921
|
+
|
922
|
+
if ((channel->socket_receive_buffer_size > 0) &&
|
923
|
+
setsockopt(s, SOL_SOCKET, SO_RCVBUF,
|
924
|
+
(void *)&channel->socket_receive_buffer_size,
|
925
|
+
sizeof(channel->socket_receive_buffer_size)) == -1)
|
926
|
+
return -1;
|
927
|
+
|
928
|
+
#ifdef SO_BINDTODEVICE
|
929
|
+
if (channel->local_dev_name[0]) {
|
930
|
+
if (setsockopt(s, SOL_SOCKET, SO_BINDTODEVICE,
|
931
|
+
channel->local_dev_name, sizeof(channel->local_dev_name))) {
|
932
|
+
/* Only root can do this, and usually not fatal if it doesn't work, so */
|
933
|
+
/* just continue on. */
|
934
|
+
}
|
935
|
+
}
|
936
|
+
#endif
|
937
|
+
|
938
|
+
if (family == AF_INET) {
|
939
|
+
if (channel->local_ip4) {
|
940
|
+
memset(&local.sa4, 0, sizeof(local.sa4));
|
941
|
+
local.sa4.sin_family = AF_INET;
|
942
|
+
local.sa4.sin_addr.s_addr = htonl(channel->local_ip4);
|
943
|
+
if (bind(s, &local.sa, sizeof(local.sa4)) < 0)
|
944
|
+
return -1;
|
945
|
+
}
|
946
|
+
}
|
947
|
+
else if (family == AF_INET6) {
|
948
|
+
if (memcmp(channel->local_ip6, &ares_in6addr_any,
|
949
|
+
sizeof(channel->local_ip6)) != 0) {
|
950
|
+
memset(&local.sa6, 0, sizeof(local.sa6));
|
951
|
+
local.sa6.sin6_family = AF_INET6;
|
952
|
+
memcpy(&local.sa6.sin6_addr, channel->local_ip6,
|
953
|
+
sizeof(channel->local_ip6));
|
954
|
+
if (bind(s, &local.sa, sizeof(local.sa6)) < 0)
|
955
|
+
return -1;
|
956
|
+
}
|
957
|
+
}
|
958
|
+
|
959
|
+
return 0;
|
960
|
+
}
|
961
|
+
|
962
|
+
static int open_tcp_socket(ares_channel channel, struct server_state *server)
|
963
|
+
{
|
964
|
+
ares_socket_t s;
|
965
|
+
int opt;
|
966
|
+
ares_socklen_t salen;
|
967
|
+
union {
|
968
|
+
struct sockaddr_in sa4;
|
969
|
+
struct sockaddr_in6 sa6;
|
970
|
+
} saddr;
|
971
|
+
struct sockaddr *sa;
|
972
|
+
|
973
|
+
switch (server->addr.family)
|
974
|
+
{
|
975
|
+
case AF_INET:
|
976
|
+
sa = (void *)&saddr.sa4;
|
977
|
+
salen = sizeof(saddr.sa4);
|
978
|
+
memset(sa, 0, salen);
|
979
|
+
saddr.sa4.sin_family = AF_INET;
|
980
|
+
if (server->addr.tcp_port) {
|
981
|
+
saddr.sa4.sin_port = aresx_sitous(server->addr.tcp_port);
|
982
|
+
} else {
|
983
|
+
saddr.sa4.sin_port = aresx_sitous(channel->tcp_port);
|
984
|
+
}
|
985
|
+
memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4,
|
986
|
+
sizeof(server->addr.addrV4));
|
987
|
+
break;
|
988
|
+
case AF_INET6:
|
989
|
+
sa = (void *)&saddr.sa6;
|
990
|
+
salen = sizeof(saddr.sa6);
|
991
|
+
memset(sa, 0, salen);
|
992
|
+
saddr.sa6.sin6_family = AF_INET6;
|
993
|
+
if (server->addr.tcp_port) {
|
994
|
+
saddr.sa6.sin6_port = aresx_sitous(server->addr.tcp_port);
|
995
|
+
} else {
|
996
|
+
saddr.sa6.sin6_port = aresx_sitous(channel->tcp_port);
|
997
|
+
}
|
998
|
+
memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6,
|
999
|
+
sizeof(server->addr.addrV6));
|
1000
|
+
break;
|
1001
|
+
default:
|
1002
|
+
return -1; /* LCOV_EXCL_LINE */
|
1003
|
+
}
|
1004
|
+
|
1005
|
+
/* Acquire a socket. */
|
1006
|
+
s = socket(server->addr.family, SOCK_STREAM, 0);
|
1007
|
+
if (s == ARES_SOCKET_BAD)
|
1008
|
+
return -1;
|
1009
|
+
|
1010
|
+
/* Configure it. */
|
1011
|
+
if (configure_socket(s, server->addr.family, channel) < 0)
|
1012
|
+
{
|
1013
|
+
sclose(s);
|
1014
|
+
return -1;
|
1015
|
+
}
|
1016
|
+
|
1017
|
+
#ifdef TCP_NODELAY
|
1018
|
+
/*
|
1019
|
+
* Disable the Nagle algorithm (only relevant for TCP sockets, and thus not
|
1020
|
+
* in configure_socket). In general, in DNS lookups we're pretty much
|
1021
|
+
* interested in firing off a single request and then waiting for a reply,
|
1022
|
+
* so batching isn't very interesting.
|
1023
|
+
*/
|
1024
|
+
opt = 1;
|
1025
|
+
if (setsockopt(s, IPPROTO_TCP, TCP_NODELAY,
|
1026
|
+
(void *)&opt, sizeof(opt)) == -1)
|
1027
|
+
{
|
1028
|
+
sclose(s);
|
1029
|
+
return -1;
|
1030
|
+
}
|
1031
|
+
#endif
|
1032
|
+
|
1033
|
+
if (channel->sock_config_cb)
|
1034
|
+
{
|
1035
|
+
int err = channel->sock_config_cb(s, SOCK_STREAM,
|
1036
|
+
channel->sock_config_cb_data);
|
1037
|
+
if (err < 0)
|
1038
|
+
{
|
1039
|
+
sclose(s);
|
1040
|
+
return err;
|
1041
|
+
}
|
1042
|
+
}
|
1043
|
+
|
1044
|
+
/* Connect to the server. */
|
1045
|
+
if (connect(s, sa, salen) == -1)
|
1046
|
+
{
|
1047
|
+
int err = SOCKERRNO;
|
1048
|
+
|
1049
|
+
if (err != EINPROGRESS && err != EWOULDBLOCK)
|
1050
|
+
{
|
1051
|
+
sclose(s);
|
1052
|
+
return -1;
|
1053
|
+
}
|
1054
|
+
}
|
1055
|
+
|
1056
|
+
if (channel->sock_create_cb)
|
1057
|
+
{
|
1058
|
+
int err = channel->sock_create_cb(s, SOCK_STREAM,
|
1059
|
+
channel->sock_create_cb_data);
|
1060
|
+
if (err < 0)
|
1061
|
+
{
|
1062
|
+
sclose(s);
|
1063
|
+
return err;
|
1064
|
+
}
|
1065
|
+
}
|
1066
|
+
|
1067
|
+
SOCK_STATE_CALLBACK(channel, s, 1, 0);
|
1068
|
+
server->tcp_buffer_pos = 0;
|
1069
|
+
server->tcp_socket = s;
|
1070
|
+
server->tcp_connection_generation = ++channel->tcp_connection_generation;
|
1071
|
+
return 0;
|
1072
|
+
}
|
1073
|
+
|
1074
|
+
static int open_udp_socket(ares_channel channel, struct server_state *server)
|
1075
|
+
{
|
1076
|
+
ares_socket_t s;
|
1077
|
+
ares_socklen_t salen;
|
1078
|
+
union {
|
1079
|
+
struct sockaddr_in sa4;
|
1080
|
+
struct sockaddr_in6 sa6;
|
1081
|
+
} saddr;
|
1082
|
+
struct sockaddr *sa;
|
1083
|
+
|
1084
|
+
switch (server->addr.family)
|
1085
|
+
{
|
1086
|
+
case AF_INET:
|
1087
|
+
sa = (void *)&saddr.sa4;
|
1088
|
+
salen = sizeof(saddr.sa4);
|
1089
|
+
memset(sa, 0, salen);
|
1090
|
+
saddr.sa4.sin_family = AF_INET;
|
1091
|
+
if (server->addr.udp_port) {
|
1092
|
+
saddr.sa4.sin_port = aresx_sitous(server->addr.udp_port);
|
1093
|
+
} else {
|
1094
|
+
saddr.sa4.sin_port = aresx_sitous(channel->udp_port);
|
1095
|
+
}
|
1096
|
+
memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4,
|
1097
|
+
sizeof(server->addr.addrV4));
|
1098
|
+
break;
|
1099
|
+
case AF_INET6:
|
1100
|
+
sa = (void *)&saddr.sa6;
|
1101
|
+
salen = sizeof(saddr.sa6);
|
1102
|
+
memset(sa, 0, salen);
|
1103
|
+
saddr.sa6.sin6_family = AF_INET6;
|
1104
|
+
if (server->addr.udp_port) {
|
1105
|
+
saddr.sa6.sin6_port = aresx_sitous(server->addr.udp_port);
|
1106
|
+
} else {
|
1107
|
+
saddr.sa6.sin6_port = aresx_sitous(channel->udp_port);
|
1108
|
+
}
|
1109
|
+
memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6,
|
1110
|
+
sizeof(server->addr.addrV6));
|
1111
|
+
break;
|
1112
|
+
default:
|
1113
|
+
return -1; /* LCOV_EXCL_LINE */
|
1114
|
+
}
|
1115
|
+
|
1116
|
+
/* Acquire a socket. */
|
1117
|
+
s = socket(server->addr.family, SOCK_DGRAM, 0);
|
1118
|
+
if (s == ARES_SOCKET_BAD)
|
1119
|
+
return -1;
|
1120
|
+
|
1121
|
+
/* Set the socket non-blocking. */
|
1122
|
+
if (configure_socket(s, server->addr.family, channel) < 0)
|
1123
|
+
{
|
1124
|
+
sclose(s);
|
1125
|
+
return -1;
|
1126
|
+
}
|
1127
|
+
|
1128
|
+
if (channel->sock_config_cb)
|
1129
|
+
{
|
1130
|
+
int err = channel->sock_config_cb(s, SOCK_DGRAM,
|
1131
|
+
channel->sock_config_cb_data);
|
1132
|
+
if (err < 0)
|
1133
|
+
{
|
1134
|
+
sclose(s);
|
1135
|
+
return err;
|
1136
|
+
}
|
1137
|
+
}
|
1138
|
+
|
1139
|
+
/* Connect to the server. */
|
1140
|
+
if (connect(s, sa, salen) == -1)
|
1141
|
+
{
|
1142
|
+
int err = SOCKERRNO;
|
1143
|
+
|
1144
|
+
if (err != EINPROGRESS && err != EWOULDBLOCK)
|
1145
|
+
{
|
1146
|
+
sclose(s);
|
1147
|
+
return -1;
|
1148
|
+
}
|
1149
|
+
}
|
1150
|
+
|
1151
|
+
if (channel->sock_create_cb)
|
1152
|
+
{
|
1153
|
+
int err = channel->sock_create_cb(s, SOCK_DGRAM,
|
1154
|
+
channel->sock_create_cb_data);
|
1155
|
+
if (err < 0)
|
1156
|
+
{
|
1157
|
+
sclose(s);
|
1158
|
+
return err;
|
1159
|
+
}
|
1160
|
+
}
|
1161
|
+
|
1162
|
+
SOCK_STATE_CALLBACK(channel, s, 1, 0);
|
1163
|
+
|
1164
|
+
server->udp_socket = s;
|
1165
|
+
return 0;
|
1166
|
+
}
|
1167
|
+
|
1168
|
+
static int same_questions(const unsigned char *qbuf, int qlen,
|
1169
|
+
const unsigned char *abuf, int alen)
|
1170
|
+
{
|
1171
|
+
struct {
|
1172
|
+
const unsigned char *p;
|
1173
|
+
int qdcount;
|
1174
|
+
char *name;
|
1175
|
+
long namelen;
|
1176
|
+
int type;
|
1177
|
+
int dnsclass;
|
1178
|
+
} q, a;
|
1179
|
+
int i, j;
|
1180
|
+
|
1181
|
+
if (qlen < HFIXEDSZ || alen < HFIXEDSZ)
|
1182
|
+
return 0;
|
1183
|
+
|
1184
|
+
/* Extract qdcount from the request and reply buffers and compare them. */
|
1185
|
+
q.qdcount = DNS_HEADER_QDCOUNT(qbuf);
|
1186
|
+
a.qdcount = DNS_HEADER_QDCOUNT(abuf);
|
1187
|
+
if (q.qdcount != a.qdcount)
|
1188
|
+
return 0;
|
1189
|
+
|
1190
|
+
/* For each question in qbuf, find it in abuf. */
|
1191
|
+
q.p = qbuf + HFIXEDSZ;
|
1192
|
+
for (i = 0; i < q.qdcount; i++)
|
1193
|
+
{
|
1194
|
+
/* Decode the question in the query. */
|
1195
|
+
if (ares_expand_name(q.p, qbuf, qlen, &q.name, &q.namelen)
|
1196
|
+
!= ARES_SUCCESS)
|
1197
|
+
return 0;
|
1198
|
+
q.p += q.namelen;
|
1199
|
+
if (q.p + QFIXEDSZ > qbuf + qlen)
|
1200
|
+
{
|
1201
|
+
ares_free(q.name);
|
1202
|
+
return 0;
|
1203
|
+
}
|
1204
|
+
q.type = DNS_QUESTION_TYPE(q.p);
|
1205
|
+
q.dnsclass = DNS_QUESTION_CLASS(q.p);
|
1206
|
+
q.p += QFIXEDSZ;
|
1207
|
+
|
1208
|
+
/* Search for this question in the answer. */
|
1209
|
+
a.p = abuf + HFIXEDSZ;
|
1210
|
+
for (j = 0; j < a.qdcount; j++)
|
1211
|
+
{
|
1212
|
+
/* Decode the question in the answer. */
|
1213
|
+
if (ares_expand_name(a.p, abuf, alen, &a.name, &a.namelen)
|
1214
|
+
!= ARES_SUCCESS)
|
1215
|
+
{
|
1216
|
+
ares_free(q.name);
|
1217
|
+
return 0;
|
1218
|
+
}
|
1219
|
+
a.p += a.namelen;
|
1220
|
+
if (a.p + QFIXEDSZ > abuf + alen)
|
1221
|
+
{
|
1222
|
+
ares_free(q.name);
|
1223
|
+
ares_free(a.name);
|
1224
|
+
return 0;
|
1225
|
+
}
|
1226
|
+
a.type = DNS_QUESTION_TYPE(a.p);
|
1227
|
+
a.dnsclass = DNS_QUESTION_CLASS(a.p);
|
1228
|
+
a.p += QFIXEDSZ;
|
1229
|
+
|
1230
|
+
/* Compare the decoded questions. */
|
1231
|
+
if (strcasecmp(q.name, a.name) == 0 && q.type == a.type
|
1232
|
+
&& q.dnsclass == a.dnsclass)
|
1233
|
+
{
|
1234
|
+
ares_free(a.name);
|
1235
|
+
break;
|
1236
|
+
}
|
1237
|
+
ares_free(a.name);
|
1238
|
+
}
|
1239
|
+
|
1240
|
+
ares_free(q.name);
|
1241
|
+
if (j == a.qdcount)
|
1242
|
+
return 0;
|
1243
|
+
}
|
1244
|
+
return 1;
|
1245
|
+
}
|
1246
|
+
|
1247
|
+
static int same_address(struct sockaddr *sa, struct ares_addr *aa)
|
1248
|
+
{
|
1249
|
+
void *addr1;
|
1250
|
+
void *addr2;
|
1251
|
+
|
1252
|
+
if (sa->sa_family == aa->family)
|
1253
|
+
{
|
1254
|
+
switch (aa->family)
|
1255
|
+
{
|
1256
|
+
case AF_INET:
|
1257
|
+
addr1 = &aa->addrV4;
|
1258
|
+
addr2 = &((struct sockaddr_in *)sa)->sin_addr;
|
1259
|
+
if (memcmp(addr1, addr2, sizeof(aa->addrV4)) == 0)
|
1260
|
+
return 1; /* match */
|
1261
|
+
break;
|
1262
|
+
case AF_INET6:
|
1263
|
+
addr1 = &aa->addrV6;
|
1264
|
+
addr2 = &((struct sockaddr_in6 *)sa)->sin6_addr;
|
1265
|
+
if (memcmp(addr1, addr2, sizeof(aa->addrV6)) == 0)
|
1266
|
+
return 1; /* match */
|
1267
|
+
break;
|
1268
|
+
default:
|
1269
|
+
break; /* LCOV_EXCL_LINE */
|
1270
|
+
}
|
1271
|
+
}
|
1272
|
+
return 0; /* different */
|
1273
|
+
}
|
1274
|
+
|
1275
|
+
static void end_query (ares_channel channel, struct query *query, int status,
|
1276
|
+
unsigned char *abuf, int alen)
|
1277
|
+
{
|
1278
|
+
int i;
|
1279
|
+
|
1280
|
+
/* First we check to see if this query ended while one of our send
|
1281
|
+
* queues still has pointers to it.
|
1282
|
+
*/
|
1283
|
+
for (i = 0; i < channel->nservers; i++)
|
1284
|
+
{
|
1285
|
+
struct server_state *server = &channel->servers[i];
|
1286
|
+
struct send_request *sendreq;
|
1287
|
+
for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
|
1288
|
+
if (sendreq->owner_query == query)
|
1289
|
+
{
|
1290
|
+
sendreq->owner_query = NULL;
|
1291
|
+
assert(sendreq->data_storage == NULL);
|
1292
|
+
if (status == ARES_SUCCESS)
|
1293
|
+
{
|
1294
|
+
/* We got a reply for this query, but this queued sendreq
|
1295
|
+
* points into this soon-to-be-gone query's tcpbuf. Probably
|
1296
|
+
* this means we timed out and queued the query for
|
1297
|
+
* retransmission, then received a response before actually
|
1298
|
+
* retransmitting. This is perfectly fine, so we want to keep
|
1299
|
+
* the connection running smoothly if we can. But in the worst
|
1300
|
+
* case we may have sent only some prefix of the query, with
|
1301
|
+
* some suffix of the query left to send. Also, the buffer may
|
1302
|
+
* be queued on multiple queues. To prevent dangling pointers
|
1303
|
+
* to the query's tcpbuf and handle these cases, we just give
|
1304
|
+
* such sendreqs their own copy of the query packet.
|
1305
|
+
*/
|
1306
|
+
sendreq->data_storage = ares_malloc(sendreq->len);
|
1307
|
+
if (sendreq->data_storage != NULL)
|
1308
|
+
{
|
1309
|
+
memcpy(sendreq->data_storage, sendreq->data, sendreq->len);
|
1310
|
+
sendreq->data = sendreq->data_storage;
|
1311
|
+
}
|
1312
|
+
}
|
1313
|
+
if ((status != ARES_SUCCESS) || (sendreq->data_storage == NULL))
|
1314
|
+
{
|
1315
|
+
/* We encountered an error (probably a timeout, suggesting the
|
1316
|
+
* DNS server we're talking to is probably unreachable,
|
1317
|
+
* wedged, or severely overloaded) or we couldn't copy the
|
1318
|
+
* request, so mark the connection as broken. When we get to
|
1319
|
+
* process_broken_connections() we'll close the connection and
|
1320
|
+
* try to re-send requests to another server.
|
1321
|
+
*/
|
1322
|
+
server->is_broken = 1;
|
1323
|
+
/* Just to be paranoid, zero out this sendreq... */
|
1324
|
+
sendreq->data = NULL;
|
1325
|
+
sendreq->len = 0;
|
1326
|
+
}
|
1327
|
+
}
|
1328
|
+
}
|
1329
|
+
|
1330
|
+
/* Invoke the callback */
|
1331
|
+
query->callback(query->arg, status, query->timeouts, abuf, alen);
|
1332
|
+
ares__free_query(query);
|
1333
|
+
|
1334
|
+
/* Simple cleanup policy: if no queries are remaining, close all network
|
1335
|
+
* sockets unless STAYOPEN is set.
|
1336
|
+
*/
|
1337
|
+
if (!(channel->flags & ARES_FLAG_STAYOPEN) &&
|
1338
|
+
ares__is_list_empty(&(channel->all_queries)))
|
1339
|
+
{
|
1340
|
+
for (i = 0; i < channel->nservers; i++)
|
1341
|
+
ares__close_sockets(channel, &channel->servers[i]);
|
1342
|
+
}
|
1343
|
+
}
|
1344
|
+
|
1345
|
+
void ares__free_query(struct query *query)
|
1346
|
+
{
|
1347
|
+
/* Remove the query from all the lists in which it is linked */
|
1348
|
+
ares__remove_from_list(&(query->queries_by_qid));
|
1349
|
+
ares__remove_from_list(&(query->queries_by_timeout));
|
1350
|
+
ares__remove_from_list(&(query->queries_to_server));
|
1351
|
+
ares__remove_from_list(&(query->all_queries));
|
1352
|
+
/* Zero out some important stuff, to help catch bugs */
|
1353
|
+
query->callback = NULL;
|
1354
|
+
query->arg = NULL;
|
1355
|
+
/* Deallocate the memory associated with the query */
|
1356
|
+
ares_free(query->tcpbuf);
|
1357
|
+
ares_free(query->server_info);
|
1358
|
+
ares_free(query);
|
1359
|
+
}
|