dnsruby 1.59.0 → 1.59.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/RELEASE_NOTES.md +6 -0
- data/Rakefile +1 -0
- data/dnsruby.gemspec +1 -0
- data/lib/dnsruby/message/encoder.rb +11 -3
- data/lib/dnsruby/packet_sender.rb +41 -17
- data/lib/dnsruby/resource/TSIG.rb +8 -2
- data/lib/dnsruby/version.rb +1 -1
- data/test/spec_helper.rb +10 -0
- data/test/tc_cache.rb +21 -21
- data/test/tc_single_resolver.rb +14 -14
- data/test/tc_tcp_pipelining.rb +86 -42
- data/test/test_dnsserver.rb +109 -88
- metadata +16 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 47fddae1c07436f1882cf04f2815f0f0f4e57991
|
4
|
+
data.tar.gz: 799ab6fe078f84d5ca9d651c4ca6701ed862a005
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 7d83561ecdb469f15880d544c6d15b85f026700d24248a9e6c51804b85cc7a90da89be23a8d348b20dbe84519434e6f661b877881cac0bd533ea2fe9c8e7056c
|
7
|
+
data.tar.gz: 9969e39d04e70734c7ee077bfb137f7a36d1e2726a1ab0719cb31f322c34365a774efd7dd0edc2e11bd8418fa0d2a60250384e23bca35d0aca84a35c7011c467
|
data/RELEASE_NOTES.md
CHANGED
data/Rakefile
CHANGED
data/dnsruby.gemspec
CHANGED
@@ -35,6 +35,7 @@ DNSSEC NSEC3 support.'
|
|
35
35
|
s.add_development_dependency 'minitest', '~> 5.4'
|
36
36
|
s.add_development_dependency 'rubydns', '~> 1.0'
|
37
37
|
s.add_development_dependency 'nio4r', '~> 1.1'
|
38
|
+
s.add_development_dependency 'minitest-display', '>= 0.3.0'
|
38
39
|
|
39
40
|
if RUBY_VERSION >= "1.9.3"
|
40
41
|
s.add_development_dependency 'coveralls', '~> 0.7'
|
@@ -15,7 +15,11 @@ class MessageEncoder #:nodoc: all
|
|
15
15
|
end
|
16
16
|
|
17
17
|
def put_pack(template, *d)
|
18
|
-
|
18
|
+
begin
|
19
|
+
@data << d.pack(template)
|
20
|
+
rescue Encoding::CompatibilityError => e
|
21
|
+
raise Dnsruby::OtherResolvError.new("IDN support currently requires punycode string")
|
22
|
+
end
|
19
23
|
end
|
20
24
|
|
21
25
|
def put_length16
|
@@ -28,8 +32,12 @@ class MessageEncoder #:nodoc: all
|
|
28
32
|
end
|
29
33
|
|
30
34
|
def put_string(d)
|
31
|
-
|
32
|
-
|
35
|
+
begin
|
36
|
+
self.put_pack("C", d.length)
|
37
|
+
@data << d
|
38
|
+
rescue Encoding::CompatibilityError => e
|
39
|
+
raise Dnsruby::OtherResolvError.new("IDN support currently requires punycode string")
|
40
|
+
end
|
33
41
|
end
|
34
42
|
|
35
43
|
def put_string_list(ds)
|
@@ -392,7 +392,7 @@ module Dnsruby
|
|
392
392
|
end
|
393
393
|
rescue Errno::EISCONN
|
394
394
|
#already connected, do nothing and reuse!
|
395
|
-
rescue IOError #close by remote host, reconnect
|
395
|
+
rescue IOError, Errno::ECONNRESET #close by remote host, reconnect
|
396
396
|
@pipeline_socket = nil
|
397
397
|
Dnsruby.log.debug("Connection closed - recreating socket")
|
398
398
|
end
|
@@ -401,21 +401,27 @@ module Dnsruby
|
|
401
401
|
create_pipeline_socket = -> do
|
402
402
|
@tcp_pipeline_local_port = src_port
|
403
403
|
src_address = @ipv6 ? @src_address6 : @src_address
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
404
|
+
begin
|
405
|
+
@pipeline_socket = Socket.new(AF_INET, SOCK_STREAM, 0)
|
406
|
+
@pipeline_socket.bind(Addrinfo.tcp(src_address, src_port))
|
407
|
+
@pipeline_socket.connect(sockaddr)
|
408
|
+
Dnsruby.log.debug("Creating socket #{src_address}:#{src_port}")
|
409
|
+
@use_counts[@pipeline_socket] = 0
|
410
|
+
rescue Exception => e
|
411
|
+
@pipeline_socket = nil
|
412
|
+
raise e
|
413
|
+
end
|
409
414
|
end
|
410
415
|
|
411
416
|
# Don't combine the following 2 statements; the reuse lambda can set the
|
412
417
|
# socket to nil and if so we'd want to call the create lambda to recreate it.
|
413
418
|
reuse_pipeline_socket.() if @pipeline_socket
|
419
|
+
new_socket = @pipeline_socket.nil?
|
414
420
|
create_pipeline_socket.() unless @pipeline_socket
|
415
421
|
|
416
422
|
@use_counts[@pipeline_socket] += 1
|
417
423
|
|
418
|
-
@pipeline_socket
|
424
|
+
[@pipeline_socket, new_socket]
|
419
425
|
end
|
420
426
|
|
421
427
|
# This method sends the packet using the built-in pure Ruby event loop, with no dependencies.
|
@@ -437,10 +443,11 @@ module Dnsruby
|
|
437
443
|
if (use_tcp)
|
438
444
|
begin
|
439
445
|
if (@tcp_pipelining)
|
440
|
-
socket = tcp_pipeline_socket(src_port)
|
446
|
+
socket, new_socket = tcp_pipeline_socket(src_port)
|
441
447
|
src_port = @tcp_pipeline_local_port
|
442
448
|
else
|
443
449
|
socket = TCPSocket.new(@server, @port, src_address, src_port)
|
450
|
+
new_socket = true
|
444
451
|
end
|
445
452
|
rescue Errno::EBADF, Errno::ENETUNREACH => e
|
446
453
|
# Can't create a connection
|
@@ -458,6 +465,7 @@ module Dnsruby
|
|
458
465
|
# ipv6 = @src_address =~ /:/
|
459
466
|
socket = UDPSocket.new(@ipv6 ? Socket::AF_INET6 : Socket::AF_INET)
|
460
467
|
end
|
468
|
+
new_socket = true
|
461
469
|
socket.bind(src_address, src_port)
|
462
470
|
socket.connect(@server, @port)
|
463
471
|
end
|
@@ -465,7 +473,8 @@ module Dnsruby
|
|
465
473
|
rescue Exception => e
|
466
474
|
if (socket!=nil)
|
467
475
|
begin
|
468
|
-
socket
|
476
|
+
#let the select thread close the socket if tcp_pipeli
|
477
|
+
socket.close unless @tcp_pipelining && !new_socket
|
469
478
|
rescue Exception
|
470
479
|
end
|
471
480
|
end
|
@@ -473,9 +482,15 @@ module Dnsruby
|
|
473
482
|
# Maybe try a max number of times?
|
474
483
|
if ((e.class != Errno::EADDRINUSE) || (numtries > 50) ||
|
475
484
|
((e.class == Errno::EADDRINUSE) && (src_port == @src_port[0])))
|
476
|
-
|
477
|
-
|
478
|
-
|
485
|
+
err_msg = "dnsruby can't connect to #{@server}:#{@port} from #{src_address}:#{src_port}, use_tcp=#{use_tcp}, exception = #{e.class}, #{e} #{e.backtrace}"
|
486
|
+
err=IOError.new(err_msg)
|
487
|
+
Dnsruby.log.error( "#{err}")
|
488
|
+
Dnsruby.log.error(e.backtrace)
|
489
|
+
if @tcp_pipelining
|
490
|
+
st.push_exception_to_select(client_query_id, client_queue, SocketEofResolvError.new(err_msg), nil)
|
491
|
+
else
|
492
|
+
st.push_exception_to_select(client_query_id, client_queue, err, nil)
|
493
|
+
end
|
479
494
|
return
|
480
495
|
end
|
481
496
|
end
|
@@ -498,15 +513,24 @@ module Dnsruby
|
|
498
513
|
socket.send(lenmsg, 0)
|
499
514
|
end
|
500
515
|
socket.send(query_bytes, 0)
|
501
|
-
|
502
|
-
#
|
516
|
+
|
517
|
+
# The select thread will now wait for the response and send that or a
|
518
|
+
# timeout back to the client_queue.
|
503
519
|
st.add_to_select(query_settings)
|
504
520
|
rescue Exception => e
|
505
|
-
|
521
|
+
err_msg = "Send failed to #{@server}:#{@port} from #{src_address}:#{src_port}, use_tcp=#{use_tcp}, exception : #{e}"
|
522
|
+
err=IOError.new(err_msg)
|
506
523
|
Dnsruby.log.error { "#{err}" }
|
507
|
-
|
524
|
+
Dnsruby.log.error(e.backtrace)
|
525
|
+
if @tcp_pipelining
|
526
|
+
st.push_exception_to_select(client_query_id, client_queue, SocketEofResolvError.new(err_msg), nil) if new_socket
|
527
|
+
else
|
528
|
+
st.push_exception_to_select(client_query_id, client_queue, err, nil)
|
529
|
+
end
|
508
530
|
begin
|
509
|
-
socket
|
531
|
+
#we let the select_thread close the socket when doing tcp
|
532
|
+
#pipelining
|
533
|
+
socket.close unless @tcp_pipelining && !new_socket
|
510
534
|
rescue Exception
|
511
535
|
end
|
512
536
|
return
|
@@ -54,6 +54,7 @@ module Dnsruby
|
|
54
54
|
HMAC_MD5 = Name.create("HMAC-MD5.SIG-ALG.REG.INT.")
|
55
55
|
HMAC_SHA1 = Name.create("hmac-sha1.")
|
56
56
|
HMAC_SHA256 = Name.create("hmac-sha256.")
|
57
|
+
HMAC_SHA512 = Name.create("hmac-sha512.")
|
57
58
|
|
58
59
|
DEFAULT_FUDGE = 300
|
59
60
|
|
@@ -157,6 +158,8 @@ module Dnsruby
|
|
157
158
|
mac = OpenSSL::HMAC.digest(OpenSSL::Digest::SHA1.new, key, data)
|
158
159
|
elsif (algorithm == HMAC_SHA256)
|
159
160
|
mac = OpenSSL::HMAC.digest(OpenSSL::Digest::SHA256.new, key, data)
|
161
|
+
elsif (algorithm == HMAC_SHA512)
|
162
|
+
mac = OpenSSL::HMAC.digest(OpenSSL::Digest::SHA512.new, key, data)
|
160
163
|
else
|
161
164
|
# Should we allow client to pass in their own signing function?
|
162
165
|
raise VerifyError.new("Algorithm #{algorithm} unsupported by TSIG")
|
@@ -515,6 +518,7 @@ module Dnsruby
|
|
515
518
|
# * hmac-md5
|
516
519
|
# * hmac-sha1
|
517
520
|
# * hmac-sha256
|
521
|
+
# * hmac-sha512
|
518
522
|
def algorithm=(alg)
|
519
523
|
if (alg.class == String)
|
520
524
|
if (alg.downcase=="hmac-md5")
|
@@ -523,11 +527,13 @@ module Dnsruby
|
|
523
527
|
@algorithm = HMAC_SHA1;
|
524
528
|
elsif (alg.downcase=="hmac-sha256")
|
525
529
|
@algorithm = HMAC_SHA256;
|
530
|
+
elsif (alg.downcase=="hmac-sha512")
|
531
|
+
@algorithm = HMAC_SHA512;
|
526
532
|
else
|
527
533
|
raise ArgumentError.new("Invalid TSIG algorithm")
|
528
534
|
end
|
529
535
|
elsif (alg.class == Name)
|
530
|
-
if (alg!=HMAC_MD5 && alg!=HMAC_SHA1 && alg!=HMAC_SHA256)
|
536
|
+
if (alg!=HMAC_MD5 && alg!=HMAC_SHA1 && alg!=HMAC_SHA256 && alg!=HMAC_SHA512)
|
531
537
|
raise ArgumentException.new("Invalid TSIG algorithm")
|
532
538
|
end
|
533
539
|
@algorithm=alg
|
@@ -590,4 +596,4 @@ module Dnsruby
|
|
590
596
|
end
|
591
597
|
end
|
592
598
|
end
|
593
|
-
end
|
599
|
+
end
|
data/lib/dnsruby/version.rb
CHANGED
data/test/spec_helper.rb
CHANGED
@@ -14,7 +14,17 @@ end
|
|
14
14
|
|
15
15
|
require 'minitest'
|
16
16
|
require 'minitest/autorun'
|
17
|
+
require 'minitest/display'
|
17
18
|
|
19
|
+
MiniTest::Display.options = {
|
20
|
+
suite_names: true,
|
21
|
+
color: true,
|
22
|
+
print: {
|
23
|
+
success: ".",
|
24
|
+
failure: "F",
|
25
|
+
error: "R"
|
26
|
+
}
|
27
|
+
}
|
18
28
|
# This is in a self invoking anonymous lambda so local variables do not
|
19
29
|
# leak to the outer scope.
|
20
30
|
-> do
|
data/test/tc_cache.rb
CHANGED
@@ -67,27 +67,27 @@ class TestCache < Minitest::Test
|
|
67
67
|
Dnsruby::Cache.max_size=1
|
68
68
|
res = Resolver.new()
|
69
69
|
Dnsruby::PacketSender.clear_caches()
|
70
|
-
assert
|
70
|
+
assert(Dnsruby::PacketSender.recursive_cache_length == 0)
|
71
71
|
msg = res.query("example.com")
|
72
|
-
assert
|
73
|
-
assert
|
72
|
+
assert(!msg.cached)
|
73
|
+
assert(Dnsruby::PacketSender.recursive_cache_length == 1)
|
74
74
|
msg = res.query("example.com")
|
75
|
-
assert
|
76
|
-
assert
|
75
|
+
assert(msg.cached)
|
76
|
+
assert(Dnsruby::PacketSender.recursive_cache_length == 1)
|
77
77
|
msg = res.query("google.com")
|
78
|
-
assert
|
79
|
-
assert
|
78
|
+
assert(!msg.cached)
|
79
|
+
assert(Dnsruby::PacketSender.recursive_cache_length == 1)
|
80
80
|
msg = res.query("example.com")
|
81
|
-
assert
|
82
|
-
assert
|
81
|
+
assert(!msg.cached)
|
82
|
+
assert(Dnsruby::PacketSender.recursive_cache_length == 1)
|
83
83
|
Dnsruby::Cache.max_size=2
|
84
|
-
assert
|
84
|
+
assert(Dnsruby::PacketSender.recursive_cache_length == 1)
|
85
85
|
msg = res.query("example.com")
|
86
|
-
assert
|
87
|
-
assert
|
86
|
+
assert(msg.cached)
|
87
|
+
assert(Dnsruby::PacketSender.recursive_cache_length == 1)
|
88
88
|
msg = res.query("google.com")
|
89
|
-
assert
|
90
|
-
assert
|
89
|
+
assert(!msg.cached)
|
90
|
+
assert(Dnsruby::PacketSender.recursive_cache_length == 2)
|
91
91
|
end
|
92
92
|
|
93
93
|
def test_resolver_do_caching
|
@@ -98,7 +98,7 @@ class TestCache < Minitest::Test
|
|
98
98
|
res.do_caching = false
|
99
99
|
assert(!res.do_caching)
|
100
100
|
res.udp_size = 4096
|
101
|
-
ret = res.query("
|
101
|
+
ret = res.query("net-dns.org", Types.TXT)
|
102
102
|
# ret = res.query("overflow.dnsruby.validation-test-servers.nominet.org.uk", Types.TXT)
|
103
103
|
# print "#{ret}\n"
|
104
104
|
assert(!ret.cached)
|
@@ -106,12 +106,12 @@ class TestCache < Minitest::Test
|
|
106
106
|
assert(ret.header.aa)
|
107
107
|
# Store the ttls
|
108
108
|
first_ttls = ret.answer.rrset(
|
109
|
-
"
|
109
|
+
"net-dns.org", Types.TXT).ttl
|
110
110
|
# "overflow.dnsruby.validation-test-servers.nominet.org.uk", Types.TXT).ttl
|
111
111
|
# Wait a while
|
112
112
|
sleep(1)
|
113
113
|
# Ask for the same records
|
114
|
-
ret = res.query("
|
114
|
+
ret = res.query("net-dns.org", Types.TXT)
|
115
115
|
# ret = res.query("overflow.dnsruby.validation-test-servers.nominet.org.uk", Types.TXT)
|
116
116
|
# print "#{ret}\n"
|
117
117
|
assert(ret.rcode == RCode.NoError)
|
@@ -125,7 +125,7 @@ class TestCache < Minitest::Test
|
|
125
125
|
res = SingleResolver.new("ns.nlnetlabs.nl.")
|
126
126
|
# res = SingleResolver.new("ns0.validation-test-servers.nominet.org.uk.")
|
127
127
|
res.udp_size = 4096
|
128
|
-
query = Message.new("
|
128
|
+
query = Message.new("net-dns.org", Types.TXT)
|
129
129
|
# query = Message.new("overflow.dnsruby.validation-test-servers.nominet.org.uk", Types.TXT)
|
130
130
|
ret = res.send_message(query)
|
131
131
|
# print "#{ret}\n"
|
@@ -134,19 +134,19 @@ class TestCache < Minitest::Test
|
|
134
134
|
assert(ret.header.aa)
|
135
135
|
# Store the ttls
|
136
136
|
first_ttls = ret.answer.rrset(
|
137
|
-
"
|
137
|
+
"net-dns.org", Types.TXT).ttl
|
138
138
|
# "overflow.dnsruby.validation-test-servers.nominet.org.uk", Types.TXT).ttl
|
139
139
|
# Wait a while
|
140
140
|
sleep(1)
|
141
141
|
# Ask for the same records
|
142
|
-
query = Message.new("
|
142
|
+
query = Message.new("net-dns.org", Types.TXT)
|
143
143
|
# query = Message.new("overflow.dnsruby.validation-test-servers.nominet.org.uk", Types.TXT)
|
144
144
|
ret = res.send_message(query)
|
145
145
|
# print "#{ret}\n"
|
146
146
|
assert(ret.rcode == RCode.NoError)
|
147
147
|
assert(ret.cached)
|
148
148
|
second_ttls = ret.answer.rrset(
|
149
|
-
"
|
149
|
+
"net-dns.org", Types.TXT).ttl
|
150
150
|
# "overflow.dnsruby.validation-test-servers.nominet.org.uk", Types.TXT).ttl
|
151
151
|
# make sure the ttl is less the time we waited
|
152
152
|
assert((second_ttls == first_ttls - 1) || (second_ttls == first_ttls - 2),
|
data/test/tc_single_resolver.rb
CHANGED
@@ -195,20 +195,20 @@ class TestSingleResolver < Minitest::Test
|
|
195
195
|
assert_equal('10.0.1.128', ip.to_s, 'nameserver() looks up cname.')
|
196
196
|
end
|
197
197
|
|
198
|
-
def test_truncated_response
|
199
|
-
res = SingleResolver.new
|
200
|
-
# print "Dnssec = #{res.dnssec}\n"
|
201
|
-
# res.server=('ns0.validation-test-servers.nominet.org.uk')
|
202
|
-
res.server=('ns.nlnetlabs.nl')
|
203
|
-
res.packet_timeout = 15
|
204
|
-
begin
|
205
|
-
m = res.query("overflow.net-dns.org", 'txt')
|
206
|
-
assert(m.header.ancount == 62, "62 answer records expected, got #{m.header.ancount}")
|
207
|
-
assert(!m.header.tc, "Message was truncated!")
|
208
|
-
rescue ResolvTimeout => e
|
209
|
-
rescue ServFail => e # not sure why, but we get this on Travis...
|
210
|
-
end
|
211
|
-
end
|
198
|
+
# def test_truncated_response
|
199
|
+
# res = SingleResolver.new
|
200
|
+
# # print "Dnssec = #{res.dnssec}\n"
|
201
|
+
# # res.server=('ns0.validation-test-servers.nominet.org.uk')
|
202
|
+
# res.server=('ns.nlnetlabs.nl')
|
203
|
+
# res.packet_timeout = 15
|
204
|
+
# begin
|
205
|
+
# m = res.query("overflow.net-dns.org", 'txt')
|
206
|
+
# assert(m.header.ancount == 62, "62 answer records expected, got #{m.header.ancount}")
|
207
|
+
# assert(!m.header.tc, "Message was truncated!")
|
208
|
+
# rescue ResolvTimeout => e
|
209
|
+
# rescue ServFail => e # not sure why, but we get this on Travis...
|
210
|
+
# end
|
211
|
+
# end
|
212
212
|
|
213
213
|
def test_illegal_src_port
|
214
214
|
# Try to set src_port to an illegal value - make sure error raised, and port OK
|
data/test/tc_tcp_pipelining.rb
CHANGED
@@ -17,27 +17,30 @@
|
|
17
17
|
require_relative 'spec_helper'
|
18
18
|
require_relative 'test_dnsserver'
|
19
19
|
|
20
|
-
# The TCPPipeliningServer links our
|
20
|
+
# The TCPPipeliningServer links our NioTcpPipeliningHandler on
|
21
21
|
# the loopback interface.
|
22
|
-
class TCPPipeliningServer < RubyDNS::
|
23
|
-
|
24
|
-
PORT = 53937
|
22
|
+
class TCPPipeliningServer < RubyDNS::Server
|
23
|
+
PORT = 53937
|
25
24
|
IP = '127.0.0.1'
|
26
25
|
|
26
|
+
DEFAULT_MAX_REQUESTS = 4
|
27
|
+
DEFAULT_TIMEOUT = 3
|
28
|
+
|
27
29
|
@@stats = Stats.new
|
28
30
|
|
29
31
|
def self.stats
|
30
32
|
@@stats
|
31
33
|
end
|
32
34
|
|
35
|
+
def process(name, resource_class, transaction)
|
36
|
+
@logger.debug "name: #{name}"
|
37
|
+
transaction.respond!("93.184.216.34", { resource_class: Resolv::DNS::Resource::IN::A })
|
38
|
+
end
|
39
|
+
|
33
40
|
def run
|
34
41
|
fire(:setup)
|
35
42
|
|
36
|
-
link
|
37
|
-
IP,
|
38
|
-
PORT,
|
39
|
-
TCPPipeliningHandler::DEFAULT_MAX_REQUESTS,
|
40
|
-
TCPPipeliningHandler::DEFAULT_TIMEOUT)
|
43
|
+
link NioTcpPipeliningHandler.new(self, IP, PORT, DEFAULT_MAX_REQUESTS, DEFAULT_TIMEOUT) #4 max request
|
41
44
|
|
42
45
|
fire(:start)
|
43
46
|
end
|
@@ -45,8 +48,6 @@ end
|
|
45
48
|
|
46
49
|
class TestTCPPipelining < Minitest::Test
|
47
50
|
|
48
|
-
QUERIES = %w(psi.net passport.net verisigninc.com google.com yahoo.com apple.com)
|
49
|
-
|
50
51
|
class << self
|
51
52
|
attr_accessor :query_id
|
52
53
|
end
|
@@ -56,6 +57,7 @@ class TestTCPPipelining < Minitest::Test
|
|
56
57
|
Celluloid.boot
|
57
58
|
# By default, Celluloid logs output to console. Use Dnsruby.log instead
|
58
59
|
Celluloid.logger = Dnsruby.log
|
60
|
+
#Dnsruby.log.level = Logger::ERROR
|
59
61
|
@initialized = true
|
60
62
|
@query_id = 0
|
61
63
|
end
|
@@ -63,9 +65,6 @@ class TestTCPPipelining < Minitest::Test
|
|
63
65
|
|
64
66
|
def setup
|
65
67
|
self.class.init
|
66
|
-
@@upstream ||= RubyDNS::Resolver.new([
|
67
|
-
[:udp, '193.0.14.129', 53],
|
68
|
-
[:tcp, '193.0.14.129', 53]])
|
69
68
|
|
70
69
|
# Instantiate a new server that uses our tcp pipelining handler
|
71
70
|
# For each query the server sends the query upstream (193.0.14.129)
|
@@ -74,11 +73,7 @@ class TestTCPPipelining < Minitest::Test
|
|
74
73
|
asynchronous: true
|
75
74
|
}
|
76
75
|
|
77
|
-
@@supervisor ||= RubyDNS::run_server(options)
|
78
|
-
otherwise do |transaction|
|
79
|
-
transaction.passthrough!(@@upstream)
|
80
|
-
end
|
81
|
-
end
|
76
|
+
@@supervisor ||= RubyDNS::run_server(options)
|
82
77
|
|
83
78
|
# Instantiate our resolver. The resolver will use the same pipeline as much as possible.
|
84
79
|
# If a timeout occurs or max_request_per_connection a new connection should be initiated
|
@@ -95,9 +90,11 @@ class TestTCPPipelining < Minitest::Test
|
|
95
90
|
|
96
91
|
# Send x number of queries asynchronously to our resolver
|
97
92
|
def send_async_messages(number_of_messages, queue, wait_seconds = 0)
|
98
|
-
|
93
|
+
Celluloid.logger.debug "Sending #{number_of_messages} messages"
|
99
94
|
number_of_messages.times do
|
100
|
-
|
95
|
+
name = "#{self.class.query_id}.com"
|
96
|
+
Celluloid.logger.debug "Sending #{name}"
|
97
|
+
message = Dnsruby::Message.new(name)
|
101
98
|
# self.class.query_id identifies our query, must be different for each message
|
102
99
|
@@resolver.send_async(message, queue, self.class.query_id)
|
103
100
|
self.class.query_id += 1
|
@@ -117,9 +114,39 @@ class TestTCPPipelining < Minitest::Test
|
|
117
114
|
end
|
118
115
|
end
|
119
116
|
|
117
|
+
def accept_wait(accept_count, max)
|
118
|
+
i = 0
|
119
|
+
while TCPPipeliningServer.stats.accept_count < accept_count
|
120
|
+
sleep 0.5
|
121
|
+
i+=0.5
|
122
|
+
assert(i<max, "Max wait for accept reached #{TCPPipeliningServer.stats.accept_count} accepts < #{accept_count}")
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
def connection_wait(connection_count, max)
|
127
|
+
i = 0
|
128
|
+
while TCPPipeliningServer.stats.connections > connection_count
|
129
|
+
sleep 0.5
|
130
|
+
i+=0.5
|
131
|
+
assert(i<max, "Max wait for connection reached: #{TCPPipeliningServer.stats.connections} active connections > #{connection_count}")
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
def timeout_wait(timeout_count, max)
|
136
|
+
i = 0
|
137
|
+
while TCPPipeliningServer.stats.timeout_count < timeout_count
|
138
|
+
sleep 0.5
|
139
|
+
i+=0.5
|
140
|
+
assert(i<max, "Max wait for timeout reached #{TCPPipeliningServer.stats.timeout_count} timeounts < #{timeout_count}")
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
120
144
|
# This test initiates multiple asynchronous requests and verifies they go on the same tcp
|
121
145
|
# pipeline or a new one depending on timeouts
|
122
146
|
def test_TCP_pipelining_timeout
|
147
|
+
Celluloid.logger.debug "test_TCP_pipelining_timeout"
|
148
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
149
|
+
|
123
150
|
accept_count = TCPPipeliningServer.stats.accept_count
|
124
151
|
timeout_count = TCPPipeliningServer.stats.timeout_count
|
125
152
|
|
@@ -133,8 +160,8 @@ class TestTCPPipelining < Minitest::Test
|
|
133
160
|
|
134
161
|
assert_equal(accept_count + 1, TCPPipeliningServer.stats.accept_count)
|
135
162
|
|
136
|
-
|
137
|
-
|
163
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
164
|
+
timeout_wait(timeout_count + 1, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
138
165
|
|
139
166
|
assert_equal(timeout_count + 1, TCPPipeliningServer.stats.timeout_count)
|
140
167
|
|
@@ -142,39 +169,48 @@ class TestTCPPipelining < Minitest::Test
|
|
142
169
|
send_async_messages(3, query_queue)
|
143
170
|
verify_responses(3, query_queue)
|
144
171
|
|
145
|
-
|
146
|
-
|
147
|
-
# Wait for the timeout to occur and check timeout_count
|
148
|
-
sleep TCPPipeliningHandler::DEFAULT_TIMEOUT + 0.5
|
172
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
173
|
+
timeout_wait(timeout_count + 2, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
149
174
|
|
175
|
+
assert_equal(accept_count + 2, TCPPipeliningServer.stats.accept_count)
|
150
176
|
assert_equal(timeout_count + 2, TCPPipeliningServer.stats.timeout_count)
|
177
|
+
|
178
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
151
179
|
end
|
152
180
|
|
153
181
|
# Test timeout occurs and new connection is initiated inbetween 2 sends
|
154
182
|
def test_TCP_pipelining_timeout_in_send
|
183
|
+
Celluloid.logger.debug "test_TCP_pipelining_timeout_in_send"
|
184
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
185
|
+
|
155
186
|
accept_count = TCPPipeliningServer.stats.accept_count
|
156
187
|
timeout_count = TCPPipeliningServer.stats.timeout_count
|
157
188
|
|
158
189
|
query_queue = Queue.new
|
159
190
|
|
160
|
-
# Initiate another
|
191
|
+
# Initiate another 2 queries wait and then send a final query
|
161
192
|
# Check accept_count. Wait for timeout and verify we got 2 additional timeouts.
|
162
|
-
send_async_messages(
|
163
|
-
verify_responses(
|
193
|
+
send_async_messages(2, query_queue)
|
194
|
+
verify_responses(2, query_queue)
|
195
|
+
|
196
|
+
accept_wait(accept_count+1, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
197
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
198
|
+
|
199
|
+
send_async_messages(1, query_queue)
|
200
|
+
|
201
|
+
verify_responses(1, query_queue)
|
164
202
|
|
165
203
|
assert_equal(accept_count + 2, TCPPipeliningServer.stats.accept_count)
|
166
204
|
|
167
|
-
|
205
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
168
206
|
|
169
|
-
|
207
|
+
timeout_wait(timeout_count + 2, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
170
208
|
end
|
171
209
|
|
172
210
|
# Test that we get a SocketEofResolvError if the servers closes the socket before
|
173
211
|
# all queries are answered
|
174
212
|
def test_TCP_pipelining_socket_eof
|
175
|
-
|
176
|
-
timeout_count = TCPPipeliningServer.stats.timeout_count
|
177
|
-
max_count = TCPPipeliningServer.stats.max_count
|
213
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
178
214
|
|
179
215
|
query_queue = Queue.new
|
180
216
|
|
@@ -183,11 +219,21 @@ class TestTCPPipelining < Minitest::Test
|
|
183
219
|
# Verify we got max_count was incremented
|
184
220
|
send_async_messages(6, query_queue)
|
185
221
|
|
186
|
-
|
222
|
+
responses = []
|
223
|
+
|
187
224
|
6.times do
|
188
|
-
|
189
|
-
|
190
|
-
|
225
|
+
response = query_queue.pop
|
226
|
+
responses << response
|
227
|
+
end
|
228
|
+
|
229
|
+
responses.sort_by { |response| response[0] }
|
230
|
+
|
231
|
+
step = 0
|
232
|
+
|
233
|
+
responses.each do | response |
|
234
|
+
_response_id, response, exception = response
|
235
|
+
if step < TCPPipeliningServer::DEFAULT_MAX_REQUESTS
|
236
|
+
assert_nil(exception, "Exception not nil for msg #{step} < #{TCPPipeliningServer::DEFAULT_MAX_REQUESTS} requests")
|
191
237
|
assert(response.is_a?(Dnsruby::Message))
|
192
238
|
else
|
193
239
|
assert_equal(Dnsruby::SocketEofResolvError, exception.class)
|
@@ -196,8 +242,6 @@ class TestTCPPipelining < Minitest::Test
|
|
196
242
|
step += 1
|
197
243
|
end
|
198
244
|
|
199
|
-
|
200
|
-
assert_equal(timeout_count, TCPPipeliningServer.stats.timeout_count)
|
201
|
-
assert_equal(max_count + 1, TCPPipeliningServer.stats.max_count)
|
245
|
+
connection_wait(0, TCPPipeliningServer::DEFAULT_TIMEOUT*5)
|
202
246
|
end
|
203
247
|
end
|
data/test/test_dnsserver.rb
CHANGED
@@ -17,22 +17,62 @@
|
|
17
17
|
require 'rubydns'
|
18
18
|
require 'nio'
|
19
19
|
require 'socket'
|
20
|
+
require 'thread'
|
20
21
|
|
21
|
-
|
22
|
+
class SimpleTimers
|
23
|
+
def initialize
|
24
|
+
@events = {}
|
25
|
+
end
|
26
|
+
|
27
|
+
def empty?
|
28
|
+
@events.empty?
|
29
|
+
end
|
30
|
+
|
31
|
+
def after(seconds, &block)
|
32
|
+
eventTime = Time.now + seconds
|
33
|
+
@events[eventTime] ||= []
|
34
|
+
@events[eventTime] << block
|
35
|
+
end
|
36
|
+
|
37
|
+
def fire
|
38
|
+
now = Time.now
|
39
|
+
|
40
|
+
events = @events.select { |key, value| key <= now }
|
41
|
+
|
42
|
+
(events || []).each do |key, blocks|
|
43
|
+
blocks.each do |block|
|
44
|
+
block.call
|
45
|
+
end
|
46
|
+
@events.delete(key)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
def wait_interval
|
51
|
+
next_event = @events.keys.min
|
52
|
+
next_event.nil? ? nil : next_event - Time.now
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
# NioTcpPipeliningHandler accepts new tcp connection and reads data from the sockets until
|
22
57
|
# either the client closes the connection, @max_requests_per_connection is reached
|
23
58
|
# or @timeout is attained.
|
24
59
|
|
25
60
|
class NioTcpPipeliningHandler < RubyDNS::GenericHandler
|
26
61
|
|
27
62
|
DEFAULT_MAX_REQUESTS = 4
|
28
|
-
|
63
|
+
DEFAULT_TIMEOUT = 3
|
29
64
|
# TODO Add timeout
|
30
|
-
def initialize(server, host, port, max_requests = DEFAULT_MAX_REQUESTS)
|
65
|
+
def initialize(server, host, port, max_requests = DEFAULT_MAX_REQUESTS, timeout = DEFAULT_TIMEOUT)
|
31
66
|
super(server)
|
32
67
|
@max_requests_per_connection = max_requests
|
68
|
+
@timeout = timeout
|
33
69
|
@socket = TCPServer.new(host, port)
|
34
70
|
@count = {}
|
35
71
|
|
72
|
+
@server.class.stats.connections = @count.keys.count
|
73
|
+
|
74
|
+
@timers = SimpleTimers.new
|
75
|
+
|
36
76
|
@selector = NIO::Selector.new
|
37
77
|
monitor = @selector.register(@socket, :r)
|
38
78
|
monitor.value = proc { accept }
|
@@ -62,18 +102,26 @@ class NioTcpPipeliningHandler < RubyDNS::GenericHandler
|
|
62
102
|
_, _remote_port, remote_host = socket.peeraddr
|
63
103
|
options = { peer: remote_host }
|
64
104
|
|
65
|
-
|
66
|
-
response = process_query(input_data, options)
|
67
|
-
RubyDNS::StreamTransport.write_message(socket, response)
|
68
|
-
|
105
|
+
new_connection = @count[socket].nil?
|
69
106
|
@count[socket] ||= 0
|
70
107
|
@count[socket] += 1
|
108
|
+
@server.class.stats.connection_accept(new_connection, @count.keys.count)
|
109
|
+
|
110
|
+
#we read all data until timeout
|
111
|
+
input_data = RubyDNS::StreamTransport.read_chunk(socket)
|
71
112
|
|
113
|
+
if @count[socket] <= @max_requests_per_connection
|
114
|
+
response = process_query(input_data, options)
|
115
|
+
RubyDNS::StreamTransport.write_message(socket, response)
|
116
|
+
end
|
117
|
+
|
118
|
+
=begin
|
72
119
|
if @count[socket] >= @max_requests_per_connection
|
73
120
|
_, port, host = socket.peeraddr
|
74
121
|
@logger.debug("*** max request for #{host}:#{port}")
|
75
122
|
remove(socket)
|
76
123
|
end
|
124
|
+
=end
|
77
125
|
rescue EOFError
|
78
126
|
_, port, host = socket.peeraddr
|
79
127
|
@logger.debug("*** #{host}:#{port} disconnected")
|
@@ -81,118 +129,92 @@ class NioTcpPipeliningHandler < RubyDNS::GenericHandler
|
|
81
129
|
remove(socket)
|
82
130
|
end
|
83
131
|
|
84
|
-
def remove(socket)
|
132
|
+
def remove(socket, update_connections=true)
|
85
133
|
@logger.debug("Removing socket from selector")
|
86
134
|
socket.close rescue nil
|
87
|
-
@selector.deregister(socket)
|
88
|
-
@count.delete(socket)
|
135
|
+
@selector.deregister(socket) rescue nil
|
136
|
+
socket_count = @count.delete(socket)
|
137
|
+
@server.class.stats.connections = @count.keys.count if update_connections
|
138
|
+
socket_count
|
89
139
|
end
|
90
140
|
|
91
141
|
def create_selector_thread
|
92
142
|
Thread.new do
|
93
143
|
loop do
|
94
|
-
|
95
|
-
|
144
|
+
begin
|
145
|
+
@timers.fire
|
146
|
+
intervals = [@timers.wait_interval || 0.1, 0.1]
|
147
|
+
|
148
|
+
@selector.select(intervals.min > 0 ? intervals.min : 0.1) do
|
149
|
+
|monitor| monitor.value.call(monitor)
|
150
|
+
end
|
151
|
+
|
152
|
+
@logger.debug "Woke up"
|
153
|
+
break if @selector.closed?
|
154
|
+
rescue Exception => e
|
155
|
+
@logger.debug "Exception #{e}"
|
156
|
+
@logger.debug "Backtrace #{e.backtrace}"
|
157
|
+
end
|
96
158
|
end
|
97
159
|
end
|
98
160
|
end
|
99
161
|
|
100
162
|
def handle_connection(socket)
|
101
163
|
@logger.debug "New connection"
|
102
|
-
@server.class.stats.increment_connection
|
103
|
-
|
104
164
|
@logger.debug "Add socket to @selector"
|
165
|
+
|
105
166
|
monitor = @selector.register(socket, :r)
|
106
167
|
monitor.value = proc { process_socket(socket) }
|
107
|
-
end
|
108
|
-
end
|
109
|
-
|
110
|
-
class TCPPipeliningHandler < RubyDNS::GenericHandler
|
111
|
-
DEFAULT_MAX_REQUESTS = 4
|
112
|
-
DEFAULT_TIMEOUT = 3.0
|
113
|
-
|
114
|
-
def initialize(server, host, port, max_requests = DEFAULT_MAX_REQUESTS, timeout = DEFAULT_TIMEOUT)
|
115
|
-
super(server)
|
116
|
-
@timeout = timeout
|
117
|
-
@max_requests_per_connection = max_requests
|
118
|
-
@socket = TCPServer.new(host, port)
|
119
168
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
def run
|
130
|
-
loop { async.handle_connection(@socket.accept) }
|
131
|
-
end
|
132
|
-
|
133
|
-
|
134
|
-
def handle_connection(socket)
|
135
|
-
_, _remote_port, remote_host = socket.peeraddr
|
136
|
-
options = { peer: remote_host }
|
137
|
-
|
138
|
-
@logger.debug "New connection"
|
139
|
-
@server.class.stats.increment_connection
|
140
|
-
|
141
|
-
timeout = @timeout
|
142
|
-
msg_count = 0
|
143
|
-
|
144
|
-
loop do
|
145
|
-
start_time = Time.now
|
146
|
-
@logger.debug "Waiting for #{timeout} max"
|
147
|
-
sockets = ::IO.select([socket], nil , nil, timeout)
|
148
|
-
duration = Time.now - start_time
|
149
|
-
|
150
|
-
@logger.debug "Slept for #{duration}"
|
151
|
-
|
152
|
-
timeout -= duration
|
153
|
-
|
154
|
-
if sockets
|
155
|
-
input_data = RubyDNS::StreamTransport.read_chunk(socket)
|
156
|
-
response = process_query(input_data, options)
|
157
|
-
RubyDNS::StreamTransport.write_message(socket, response)
|
158
|
-
|
159
|
-
msg_count += 1
|
160
|
-
@logger.debug "Responded to message #{msg_count}"
|
161
|
-
else
|
162
|
-
@logger.debug "TCP session timeout!"
|
163
|
-
@server.class.stats.increment_timeout
|
164
|
-
break
|
169
|
+
@logger.debug "Add socket timer of #{@timeout}"
|
170
|
+
@timers.after(@timeout) do
|
171
|
+
@logger.debug "Timeout fired for socket #{socket}"
|
172
|
+
count = remove(socket, false)
|
173
|
+
unless count.nil?
|
174
|
+
@logger.debug "Timeout for socket #{socket}"
|
175
|
+
@logger.debug "Increasing timeout count"
|
176
|
+
@server.class.stats.connection_timeout(@count.keys.count)
|
165
177
|
end
|
166
|
-
|
167
|
-
if msg_count >= @max_requests_per_connection
|
168
|
-
@logger.debug "Max number of requests attained (#{@max_requests_per_connection})"
|
169
|
-
@server.class.stats.increment_max
|
170
|
-
break
|
171
|
-
end
|
172
|
-
|
173
178
|
end
|
174
|
-
rescue EOFError
|
175
|
-
@logger.warn "TCP session ended (closed by client)"
|
176
|
-
rescue DecodeError
|
177
|
-
@logger.warn "Could not decode incoming TCP data!"
|
178
|
-
ensure
|
179
|
-
socket.close
|
180
179
|
end
|
181
180
|
end
|
182
181
|
|
183
182
|
# Stats collects statistics from our tcp handler
|
184
183
|
class Stats
|
185
184
|
def initialize()
|
186
|
-
@mutex
|
187
|
-
@accept_count
|
185
|
+
@mutex = Mutex.new
|
186
|
+
@accept_count = 0
|
188
187
|
@timeout_count = 0
|
189
|
-
@max_count
|
188
|
+
@max_count = 0
|
189
|
+
@connections = 0
|
190
190
|
end
|
191
191
|
|
192
192
|
def increment_max; @mutex.synchronize { @max_count += 1 } end
|
193
193
|
def increment_timeout; @mutex.synchronize { @timeout_count += 1 } end
|
194
194
|
def increment_connection; @mutex.synchronize { @accept_count += 1 } end
|
195
195
|
|
196
|
+
def connection_timeout(active_connections)
|
197
|
+
@mutex.synchronize do
|
198
|
+
@timeout_count += 1
|
199
|
+
@connections = active_connections
|
200
|
+
end
|
201
|
+
end
|
202
|
+
|
203
|
+
def connection_accept(new_connection, active_connections)
|
204
|
+
@mutex.synchronize {
|
205
|
+
@connections = active_connections
|
206
|
+
@accept_count += 1 if new_connection
|
207
|
+
}
|
208
|
+
end
|
209
|
+
|
210
|
+
def connections=(active_connections)
|
211
|
+
@mutex.synchronize { @connections = active_connections }
|
212
|
+
end
|
213
|
+
|
214
|
+
def connections
|
215
|
+
@mutex.synchronize { @connections }
|
216
|
+
end
|
217
|
+
|
196
218
|
def accept_count
|
197
219
|
@mutex.synchronize { @accept_count }
|
198
220
|
end
|
@@ -204,5 +226,4 @@ class Stats
|
|
204
226
|
def max_count
|
205
227
|
@mutex.synchronize { @max_count }
|
206
228
|
end
|
207
|
-
|
208
229
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: dnsruby
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.59.
|
4
|
+
version: 1.59.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alex Dalitz
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2016-01-16 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: pry
|
@@ -100,6 +100,20 @@ dependencies:
|
|
100
100
|
- - "~>"
|
101
101
|
- !ruby/object:Gem::Version
|
102
102
|
version: '1.1'
|
103
|
+
- !ruby/object:Gem::Dependency
|
104
|
+
name: minitest-display
|
105
|
+
requirement: !ruby/object:Gem::Requirement
|
106
|
+
requirements:
|
107
|
+
- - ">="
|
108
|
+
- !ruby/object:Gem::Version
|
109
|
+
version: 0.3.0
|
110
|
+
type: :development
|
111
|
+
prerelease: false
|
112
|
+
version_requirements: !ruby/object:Gem::Requirement
|
113
|
+
requirements:
|
114
|
+
- - ">="
|
115
|
+
- !ruby/object:Gem::Version
|
116
|
+
version: 0.3.0
|
103
117
|
- !ruby/object:Gem::Dependency
|
104
118
|
name: coveralls
|
105
119
|
requirement: !ruby/object:Gem::Requirement
|