resilient_socket 0.1.0 → 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- data/README.md +13 -18
- data/lib/resilient_socket.rb +0 -3
- data/lib/resilient_socket/exceptions.rb +30 -4
- data/lib/resilient_socket/tcp_client.rb +148 -71
- data/lib/resilient_socket/version.rb +1 -1
- data/nbproject/private/private.xml +4 -0
- data/nbproject/private/rake-d.txt +4 -4
- data/nbproject/project.properties +0 -1
- data/test.log +0 -0
- data/test.log.working +0 -0
- data/test/tcp_client_test.rb +26 -7
- metadata +4 -4
- data/resilient_socket-0.0.1.gem +0 -0
- data/resilient_socket-0.0.2.gem +0 -0
data/README.md
CHANGED
@@ -36,30 +36,26 @@ to adjust the retry logic
|
|
36
36
|
|
37
37
|
### Dependencies
|
38
38
|
|
39
|
-
- Ruby MRI 1.8.7 (or above) Or
|
40
|
-
- SemanticLogger
|
39
|
+
- Ruby MRI 1.8.7 (or above), Ruby 1.9.3, Or JRuby 1.6.3 (or above)
|
40
|
+
- [SemanticLogger](http://github.com/ClarityServices/semantic-logger)
|
41
41
|
|
42
42
|
### Install
|
43
43
|
|
44
|
-
gem install
|
45
|
-
|
46
|
-
To log to MongoDB
|
47
|
-
|
48
|
-
gem install mongo
|
44
|
+
gem install resilient_socket
|
49
45
|
|
50
46
|
### Future
|
51
47
|
|
52
|
-
-
|
48
|
+
- Look into using https://github.com/tarcieri/nio4r for Async IO
|
53
49
|
|
54
50
|
Development
|
55
51
|
-----------
|
56
52
|
|
57
|
-
Want to contribute to
|
53
|
+
Want to contribute to Resilient Socket?
|
58
54
|
|
59
55
|
First clone the repo and run the tests:
|
60
56
|
|
61
|
-
git clone git://github.com/ClarityServices/
|
62
|
-
cd
|
57
|
+
git clone git://github.com/ClarityServices/resilient_socket.git
|
58
|
+
cd resilient_socket
|
63
59
|
jruby -S rake test
|
64
60
|
|
65
61
|
Feel free to ping the mailing list with any issues and we'll try to resolve it.
|
@@ -69,20 +65,19 @@ Contributing
|
|
69
65
|
|
70
66
|
Once you've made your great commits:
|
71
67
|
|
72
|
-
1. [Fork](http://help.github.com/forking/)
|
68
|
+
1. [Fork](http://help.github.com/forking/) resilient_socket
|
73
69
|
2. Create a topic branch - `git checkout -b my_branch`
|
74
70
|
3. Push to your branch - `git push origin my_branch`
|
75
|
-
4. Create an [Issue](http://github.com/ClarityServices/
|
71
|
+
4. Create an [Issue](http://github.com/ClarityServices/resilient_socket/issues) with a link to your branch
|
76
72
|
5. That's it!
|
77
73
|
|
78
74
|
Meta
|
79
75
|
----
|
80
76
|
|
81
|
-
* Code: `git clone git://github.com/ClarityServices/
|
82
|
-
* Home: <https://github.com/ClarityServices/
|
83
|
-
*
|
84
|
-
*
|
85
|
-
* Gems: <http://rubygems.org/gems/semantic-logger>
|
77
|
+
* Code: `git clone git://github.com/ClarityServices/resilient_socket.git`
|
78
|
+
* Home: <https://github.com/ClarityServices/resilient_socket>
|
79
|
+
* Bugs: <http://github.com/reidmorrison/resilient_socket/issues>
|
80
|
+
* Gems: <http://rubygems.org/gems/resilient_socket>
|
86
81
|
|
87
82
|
This project uses [Semantic Versioning](http://semver.org/).
|
88
83
|
|
data/lib/resilient_socket.rb
CHANGED
@@ -1,8 +1,34 @@
|
|
1
|
+
require 'socket'
|
1
2
|
module ResilientSocket
|
2
3
|
|
3
|
-
class
|
4
|
-
class
|
5
|
-
|
6
|
-
|
4
|
+
class ConnectionTimeout < ::SocketError; end
|
5
|
+
class ReadTimeout < ::SocketError; end
|
6
|
+
|
7
|
+
# Raised by ResilientSocket whenever a Socket connection failure has occurred
|
8
|
+
class ConnectionFailure < ::SocketError
|
9
|
+
# Returns the hostname and port against which the connection failure occurred
|
10
|
+
attr_reader :server
|
11
|
+
|
12
|
+
# Returns the original exception that caused the connection failure
|
13
|
+
# For example instances of Errno::ECONNRESET
|
14
|
+
attr_reader :cause
|
15
|
+
|
16
|
+
# Parameters
|
17
|
+
# message [String]
|
18
|
+
# Text message of the reason for the failure and/or where it occurred
|
19
|
+
#
|
20
|
+
# server [String]
|
21
|
+
# Hostname and port
|
22
|
+
# For example: "localhost:2000"
|
23
|
+
#
|
24
|
+
# cause [Exception]
|
25
|
+
# Original Exception if any, otherwise nil
|
26
|
+
def initialize(message, server, cause=nil)
|
27
|
+
@server = server
|
28
|
+
@cause = cause
|
29
|
+
super(message)
|
30
|
+
end
|
31
|
+
|
32
|
+
end
|
7
33
|
|
8
34
|
end
|
@@ -1,3 +1,5 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'semantic_logger'
|
1
3
|
module ResilientSocket
|
2
4
|
|
3
5
|
# Make Socket calls resilient by adding timeouts, retries and specific
|
@@ -22,17 +24,25 @@ module ResilientSocket
|
|
22
24
|
# Raises ReadTimeout when the read timeout is exceeded
|
23
25
|
# Raises ConnectionFailure when a network error occurs whilst reading or writing
|
24
26
|
#
|
27
|
+
# Note: Only the following methods currently have auto-reconnect enabled:
|
28
|
+
# * read
|
29
|
+
# * write
|
30
|
+
#
|
25
31
|
# Future:
|
26
32
|
#
|
27
33
|
# * Automatic failover to another server should the current server not respond
|
28
34
|
# to a connection request by supplying an array of host names
|
35
|
+
# * Add auto-reconnect feature to sysread, syswrite, etc...
|
36
|
+
# * To be a drop-in replacement to TCPSocket should also need to implement the
|
37
|
+
# following TCPSocket instance methods: :addr, :peeraddr
|
38
|
+
#
|
39
|
+
# Design Notes:
|
40
|
+
# * Does not inherit from Socket or TCP Socket because the socket instance
|
41
|
+
# has to be completely destroyed and recreated after a connection failure
|
29
42
|
#
|
30
43
|
class TCPClient
|
31
44
|
# Supports embedding user supplied data along with this connection
|
32
45
|
# such as sequence number, etc.
|
33
|
-
# TCPClient will reset this value to nil on connection start and
|
34
|
-
# after a connection is re-established. For example on automatic reconnect
|
35
|
-
# due to a failed connection to the server
|
36
46
|
attr_accessor :user_data
|
37
47
|
|
38
48
|
# Returns [String] Name of the server connected to including the port number
|
@@ -44,6 +54,26 @@ module ResilientSocket
|
|
44
54
|
# Returns [TrueClass|FalseClass] Whether send buffering is enabled for this connection
|
45
55
|
attr_reader :buffered
|
46
56
|
|
57
|
+
@@reconnect_on_errors = [
|
58
|
+
Errno::ECONNABORTED,
|
59
|
+
Errno::ECONNREFUSED,
|
60
|
+
Errno::ECONNRESET,
|
61
|
+
Errno::EHOSTUNREACH,
|
62
|
+
Errno::EIO,
|
63
|
+
Errno::ENETDOWN,
|
64
|
+
Errno::ENETRESET,
|
65
|
+
Errno::EPIPE,
|
66
|
+
Errno::ETIMEDOUT,
|
67
|
+
EOFError,
|
68
|
+
]
|
69
|
+
|
70
|
+
# Return the array of errors that will result in an automatic connection retry
|
71
|
+
# To add any additional errors to the standard list:
|
72
|
+
# ResilientSocket::TCPClient.reconnect_on_errors << Errno::EPROTO
|
73
|
+
def self.reconnect_on_errors
|
74
|
+
@@reconnect_on_errors
|
75
|
+
end
|
76
|
+
|
47
77
|
# Create a connection, call the supplied block and close the connection on
|
48
78
|
# completion of the block
|
49
79
|
#
|
@@ -118,6 +148,13 @@ module ResilientSocket
|
|
118
148
|
# Number of seconds between connection retry attempts after the first failed attempt
|
119
149
|
# Default: 0.5
|
120
150
|
#
|
151
|
+
# :retry_count [Fixnum]
|
152
|
+
# Number of times to retry when calling #retry_on_connection_failure
|
153
|
+
# This is independent of :connect_retry_count which still applies with
|
154
|
+
# connection failures. This retry controls upto how many times to retry the
|
155
|
+
# supplied block should a connection failure occurr during the block
|
156
|
+
# Default: 3
|
157
|
+
#
|
121
158
|
# :on_connect [Proc]
|
122
159
|
# Directly after a connection is established and before it is made available
|
123
160
|
# for use this Block is invoked.
|
@@ -149,6 +186,7 @@ module ResilientSocket
|
|
149
186
|
buffered = params.delete(:buffered)
|
150
187
|
@buffered = buffered.nil? ? true : buffered
|
151
188
|
@connect_retry_count = params.delete(:connect_retry_count) || 10
|
189
|
+
@retry_count = params.delete(:retry_count) || 3
|
152
190
|
@connect_retry_interval = (params.delete(:connect_retry_interval) || 0.5).to_f
|
153
191
|
@on_connect = params.delete(:on_connect)
|
154
192
|
|
@@ -156,7 +194,7 @@ module ResilientSocket
|
|
156
194
|
raise "Missing mandatory :server or :servers" unless server = params.delete(:server)
|
157
195
|
@servers = [ server ]
|
158
196
|
end
|
159
|
-
@logger = SemanticLogger::Logger.new("#{self.class.name} #{@servers.inspect}", params
|
197
|
+
@logger = SemanticLogger::Logger.new("#{self.class.name} #{@servers.inspect}", params.delete(:log_level) || SemanticLogger::Logger.default_level)
|
160
198
|
params.each_pair {|k,v| @logger.warn "Ignoring unknown option #{k} = #{v}"}
|
161
199
|
|
162
200
|
# Connect to the Server
|
@@ -185,19 +223,22 @@ module ResilientSocket
|
|
185
223
|
# Note: When multiple servers are supplied it will only try to connect to
|
186
224
|
# the subsequent servers once the retry count has been exceeded
|
187
225
|
#
|
226
|
+
# Note: Calling #connect on an open connection will close the current connection
|
227
|
+
# and create a new connection
|
188
228
|
def connect
|
189
|
-
if @
|
229
|
+
@socket.close if @socket && !@socket.closed?
|
230
|
+
if @servers.size > 1
|
190
231
|
# Try each server in sequence
|
191
232
|
@servers.each_with_index do |server, server_id|
|
192
233
|
begin
|
193
|
-
|
234
|
+
connect_to_server(server)
|
194
235
|
rescue ConnectionFailure => exc
|
195
236
|
# Raise Exception once it has also failed to connect to the last server
|
196
237
|
raise(exc) if @servers.size <= (server_id + 1)
|
197
238
|
end
|
198
239
|
end
|
199
240
|
else
|
200
|
-
|
241
|
+
connect_to_server(@servers.first)
|
201
242
|
end
|
202
243
|
|
203
244
|
# Invoke user supplied Block every time a new connection has been established
|
@@ -212,64 +253,86 @@ module ResilientSocket
|
|
212
253
|
# Raises ConnectionFailure whenever the send fails
|
213
254
|
# For a description of the errors, see Socket#write
|
214
255
|
#
|
215
|
-
def
|
216
|
-
@logger.trace("==>
|
217
|
-
@logger.benchmark_debug("==>
|
256
|
+
def write(data)
|
257
|
+
@logger.trace("#write ==> sending", data)
|
258
|
+
@logger.benchmark_debug("#write ==> sent #{data.length} bytes") do
|
218
259
|
begin
|
219
260
|
@socket.write(data)
|
220
261
|
rescue SystemCallError => exception
|
221
|
-
@logger.warn "#
|
262
|
+
@logger.warn "#write Connection failure: #{exception.class}: #{exception.message}"
|
222
263
|
close
|
223
|
-
raise ConnectionFailure.new("Send Connection failure: #{exception.class}: #{exception.message}")
|
264
|
+
raise ConnectionFailure.new("Send Connection failure: #{exception.class}: #{exception.message}", @server, exception)
|
224
265
|
end
|
225
266
|
end
|
226
267
|
end
|
227
268
|
|
228
|
-
#
|
229
|
-
#
|
230
|
-
#
|
231
|
-
#
|
232
|
-
#
|
233
|
-
#
|
269
|
+
# Returns a response from the server
|
270
|
+
#
|
271
|
+
# Raises ConnectionTimeout when the time taken to create a connection
|
272
|
+
# exceeds the :connect_timeout
|
273
|
+
# Connection is closed
|
274
|
+
# Raises ConnectionFailure whenever Socket raises an error such as
|
275
|
+
# Error::EACCESS etc, see Socket#connect for more information
|
276
|
+
# Connection is closed
|
277
|
+
# Raises ReadTimeout if the timeout has been exceeded waiting for the
|
278
|
+
# requested number of bytes from the server
|
279
|
+
# Partial data will not be returned
|
280
|
+
# Connection is _not_ closed and #read can be called again later
|
281
|
+
# to read the respnse from the connection
|
234
282
|
#
|
235
283
|
# Parameters
|
236
|
-
#
|
237
|
-
# The
|
238
|
-
#
|
284
|
+
# length [Fixnum]
|
285
|
+
# The number of bytes to return
|
286
|
+
# #read will not return unitl 'length' bytes have been received from
|
287
|
+
# the server
|
239
288
|
#
|
240
289
|
# timeout [Float]
|
241
290
|
# Optional: Override the default read timeout for this read
|
242
291
|
# Number of seconds before raising ReadTimeout when no data has
|
243
292
|
# been returned
|
244
293
|
# Default: :read_timeout supplied to #initialize
|
245
|
-
|
246
|
-
|
247
|
-
|
294
|
+
#
|
295
|
+
# Note: After a ResilientSocket::ReadTimeout #read can be called again on
|
296
|
+
# the same socket to read the response later.
|
297
|
+
# If the application no longers want the connection after a
|
298
|
+
# ResilientSocket::ReadTimeout, then the #close method _must_ be called
|
299
|
+
# before calling _connect_ or _retry_on_connection_failure_ to create
|
300
|
+
# a new connection
|
301
|
+
#
|
302
|
+
def read(length, buffer=nil, timeout=nil)
|
303
|
+
result = nil
|
304
|
+
@logger.benchmark_debug("#read <== read #{length} bytes") do
|
248
305
|
# Block on data to read for @read_timeout seconds
|
249
306
|
begin
|
250
307
|
ready = IO.select([@socket], nil, [@socket], timeout || @read_timeout)
|
251
308
|
unless ready
|
252
309
|
@logger.warn "#read Timeout waiting for server to reply"
|
253
|
-
close
|
254
310
|
raise ReadTimeout.new("Timedout after #{timeout || @read_timeout} seconds trying to read from #{@server}")
|
255
311
|
end
|
256
312
|
rescue IOError => exception
|
257
313
|
@logger.warn "#read Connection failure while waiting for data: #{exception.class}: #{exception.message}"
|
258
314
|
close
|
259
|
-
raise ConnectionFailure
|
315
|
+
raise ConnectionFailure.new("#{exception.class}: #{exception.message}", @server, exception)
|
260
316
|
end
|
261
317
|
|
262
318
|
# Read data from socket
|
263
319
|
begin
|
264
|
-
@socket.
|
265
|
-
@logger.trace("
|
320
|
+
result = buffer.nil? ? @socket.read(length) : @socket.read(length, buffer)
|
321
|
+
@logger.trace("#read <== received", result.inspect)
|
322
|
+
|
323
|
+
# EOF before all the data was returned
|
324
|
+
if result.nil? || (result.length < length)
|
325
|
+
close
|
326
|
+
@logger.warn "#read server closed the connection before #{length} bytes were returned"
|
327
|
+
raise ConnectionFailure.new("Connection lost while reading data", @server, EOFError.new("end of file reached"))
|
328
|
+
end
|
266
329
|
rescue SystemCallError, IOError => exception
|
267
|
-
@logger.warn "#read Connection failure while reading data: #{exception.class}: #{exception.message}"
|
268
330
|
close
|
269
|
-
|
331
|
+
@logger.warn "#read Connection failure while reading data: #{exception.class}: #{exception.message}"
|
332
|
+
raise ConnectionFailure.new("#{exception.class}: #{exception.message}", @server, exception)
|
270
333
|
end
|
271
334
|
end
|
272
|
-
|
335
|
+
result
|
273
336
|
end
|
274
337
|
|
275
338
|
# Send and/or receive data with automatic retry on connection failure
|
@@ -306,46 +369,35 @@ module ResilientSocket
|
|
306
369
|
# # Server returns "SAVED" if the call was successfull
|
307
370
|
# result = client.read(20).strip
|
308
371
|
#
|
309
|
-
# 3. Example of a resilient request that _modifies_ data on the server:
|
310
|
-
#
|
311
|
-
# When changing state on the server, for example when updating a value
|
312
|
-
# Wrap _only_ the send with #retry_on_connection_failure
|
313
|
-
# The read must be outside the #retry_on_connection_failure since we must
|
314
|
-
# not retry the send if the connection fails during the #read
|
315
|
-
#
|
316
|
-
# value = 45
|
317
|
-
# # Only the send is within the retry block since we cannot re-send once
|
318
|
-
# # the send was successful since the server may have made the change
|
319
|
-
# client.retry_on_connection_failure do
|
320
|
-
# client.send("SETVALUE:#{count}\n")
|
321
|
-
# end
|
322
|
-
# # Server returns "SAVED" if the call was successfull
|
323
|
-
# saved = (client.read(20).strip == 'SAVED')
|
324
|
-
#
|
325
|
-
#
|
326
372
|
# Error handling is implemented as follows:
|
327
373
|
# If a network failure occurrs during the block invocation the block
|
328
374
|
# will be called again with a new connection to the server.
|
329
375
|
# It will only be retried up to 3 times
|
330
376
|
# The re-connect will independently retry and timeout using all the
|
331
377
|
# rules of #connect
|
332
|
-
#
|
333
|
-
#
|
334
378
|
def retry_on_connection_failure
|
335
379
|
retries = 0
|
336
380
|
begin
|
337
381
|
connect if closed?
|
338
382
|
yield(self)
|
339
383
|
rescue ConnectionFailure => exception
|
384
|
+
# Connection no longer usable. The next call to #retry_on_connection_failure
|
385
|
+
# will create a new connection since this one is now closed
|
340
386
|
close
|
341
|
-
|
387
|
+
|
388
|
+
exc_str = exception.cause ? "#{exception.cause.class}: #{exception.cause.message}" : exception.message
|
389
|
+
# Re-raise exceptions that should not be retried
|
390
|
+
if !self.class.reconnect_on_errors.include?(exception.cause.class)
|
391
|
+
@logger.warn "#retry_on_connection_failure not configured to retry: #{exc_str}"
|
392
|
+
raise exception
|
393
|
+
elsif retries < @retry_count
|
342
394
|
retries += 1
|
343
|
-
@logger.warn "#retry_on_connection_failure
|
395
|
+
@logger.warn "#retry_on_connection_failure retry #{retries} due to #{exception.class}: #{exception.message}"
|
344
396
|
connect
|
345
397
|
retry
|
346
398
|
end
|
347
399
|
@logger.error "#retry_on_connection_failure Connection failure: #{exception.class}: #{exception.message}. Giving up after #{retries} retries"
|
348
|
-
raise ConnectionFailure.new("After #{retries}
|
400
|
+
raise ConnectionFailure.new("After #{retries} retries to host '#{server}': #{exc_str}", @server, exception.cause)
|
349
401
|
rescue Exception => exc
|
350
402
|
# With any other exception we have to close the connection since the connection
|
351
403
|
# is now in an unknown state
|
@@ -354,7 +406,7 @@ module ResilientSocket
|
|
354
406
|
end
|
355
407
|
end
|
356
408
|
|
357
|
-
# Close the socket
|
409
|
+
# Close the socket only if it is not already closed
|
358
410
|
#
|
359
411
|
# Logs a warning if an error occurs trying to close the socket
|
360
412
|
def close
|
@@ -368,6 +420,31 @@ module ResilientSocket
|
|
368
420
|
@socket.closed?
|
369
421
|
end
|
370
422
|
|
423
|
+
# Returns whether the connection to the server is alive
|
424
|
+
#
|
425
|
+
# It is useful to call this method before making a call to the server
|
426
|
+
# that would change data on the server
|
427
|
+
#
|
428
|
+
# Note: This method is only useful if the server closed the connection or
|
429
|
+
# if a previous connection failure occurred.
|
430
|
+
# If the server is hard killed this will still return true until one
|
431
|
+
# or more writes are attempted
|
432
|
+
#
|
433
|
+
# Note: In testing the overhead of this call is rather low, with the ability to
|
434
|
+
# make about 120,000 calls per second against an active connection.
|
435
|
+
# I.e. About 8.3 micro seconds per call
|
436
|
+
def alive?
|
437
|
+
return false if @socket.closed?
|
438
|
+
|
439
|
+
if IO.select([@socket], nil, nil, 0)
|
440
|
+
!@socket.eof? rescue false
|
441
|
+
else
|
442
|
+
true
|
443
|
+
end
|
444
|
+
rescue IOError
|
445
|
+
false
|
446
|
+
end
|
447
|
+
|
371
448
|
# See: Socket#setsockopt
|
372
449
|
def setsockopt(level, optname, optval)
|
373
450
|
@socket.setsockopt(level, optname, optval)
|
@@ -382,44 +459,44 @@ module ResilientSocket
|
|
382
459
|
# Raises ConnectionTimeout when the connection timeout has been exceeded
|
383
460
|
# Raises ConnectionFailure
|
384
461
|
def connect_to_server(server)
|
385
|
-
|
462
|
+
# Have to use Socket internally instead of TCPSocket since TCPSocket
|
463
|
+
# does not offer async connect API amongst others:
|
464
|
+
# :accept, :accept_nonblock, :bind, :connect, :connect_nonblock, :getpeereid,
|
465
|
+
# :ipv6only!, :listen, :recvfrom_nonblock, :sysaccept
|
386
466
|
retries = 0
|
387
467
|
@logger.benchmark_info "Connecting to server #{server}" do
|
388
|
-
|
389
|
-
|
390
|
-
port = port.to_i
|
391
|
-
address = Socket.getaddrinfo('localhost', nil, Socket::AF_INET)
|
468
|
+
host_name, port = server.split(":")
|
469
|
+
port = port.to_i
|
392
470
|
|
393
|
-
|
394
|
-
|
471
|
+
address = Socket.getaddrinfo(host_name, nil, Socket::AF_INET)
|
472
|
+
socket_address = Socket.pack_sockaddr_in(port, address[0][3])
|
395
473
|
|
396
|
-
|
474
|
+
begin
|
397
475
|
begin
|
398
|
-
|
399
|
-
socket.
|
476
|
+
@socket = Socket.new(Socket.const_get(address[0][0]), Socket::SOCK_STREAM, 0)
|
477
|
+
@socket.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) unless buffered
|
478
|
+
@socket.connect_nonblock(socket_address)
|
400
479
|
rescue Errno::EINPROGRESS
|
401
|
-
resp = IO.select(nil, [socket], nil, @connect_timeout)
|
402
|
-
raise(ConnectionTimeout.new("Timedout after #{@connect_timeout} seconds trying to connect to #{
|
480
|
+
resp = IO.select(nil, [@socket], nil, @connect_timeout)
|
481
|
+
raise(ConnectionTimeout.new("Timedout after #{@connect_timeout} seconds trying to connect to #{server}")) unless resp
|
403
482
|
begin
|
404
|
-
|
405
|
-
socket.connect_nonblock(socket_address)
|
483
|
+
@socket.connect_nonblock(socket_address)
|
406
484
|
rescue Errno::EISCONN
|
407
485
|
end
|
408
486
|
end
|
409
487
|
break
|
410
488
|
rescue SystemCallError => exception
|
411
|
-
if retries < @connect_retry_count
|
489
|
+
if retries < @connect_retry_count && self.class.reconnect_on_errors.include?(exception.class)
|
412
490
|
retries += 1
|
413
491
|
@logger.warn "Connection failure: #{exception.class}: #{exception.message}. Retry: #{retries}"
|
414
492
|
sleep @connect_retry_interval
|
415
493
|
retry
|
416
494
|
end
|
417
495
|
@logger.error "Connection failure: #{exception.class}: #{exception.message}. Giving up after #{retries} retries"
|
418
|
-
raise ConnectionFailure.new("After #{retries} attempts: #{exception.class}: #{exception.message}")
|
496
|
+
raise ConnectionFailure.new("After #{retries} connection attempts to host '#{server}': #{exception.class}: #{exception.message}", @server, exception)
|
419
497
|
end
|
420
498
|
end
|
421
499
|
@server = server
|
422
|
-
socket
|
423
500
|
end
|
424
501
|
|
425
502
|
end
|
@@ -1,4 +1,4 @@
|
|
1
|
-
clean=
|
2
|
-
clobber=
|
3
|
-
gem=
|
4
|
-
test=
|
1
|
+
clean=
|
2
|
+
clobber=
|
3
|
+
gem=
|
4
|
+
test=
|
data/test.log
CHANGED
Binary file
|
data/test.log.working
ADDED
Binary file
|
data/test/tcp_client_test.rb
CHANGED
@@ -23,7 +23,7 @@ class TCPClientTest < Test::Unit::TestCase
|
|
23
23
|
:connect_retry_interval => 0.1,
|
24
24
|
:connect_retry_count => 5)
|
25
25
|
end
|
26
|
-
assert_match /After 5 attempts: Errno::ECONNREFUSED/, exception.message
|
26
|
+
assert_match /After 5 connection attempts to host 'localhost:3300': Errno::ECONNREFUSED/, exception.message
|
27
27
|
end
|
28
28
|
|
29
29
|
end
|
@@ -58,35 +58,54 @@ class TCPClientTest < Test::Unit::TestCase
|
|
58
58
|
:server => @server_name,
|
59
59
|
:read_timeout => @read_timeout
|
60
60
|
)
|
61
|
+
assert @client.alive?
|
61
62
|
end
|
62
63
|
|
63
64
|
def teardown
|
64
|
-
|
65
|
+
if @client
|
66
|
+
@client.close
|
67
|
+
assert !@client.alive?
|
68
|
+
end
|
65
69
|
end
|
66
70
|
|
67
71
|
should "successfully send and receive data" do
|
68
72
|
request = { 'action' => 'test1' }
|
69
|
-
@client.
|
73
|
+
@client.write(BSON.serialize(request))
|
70
74
|
reply = read_bson_document(@client)
|
71
75
|
assert_equal 'test1', reply['result']
|
72
76
|
end
|
73
77
|
|
74
78
|
should "timeout on receive" do
|
75
79
|
request = { 'action' => 'sleep', 'duration' => @read_timeout + 0.5}
|
76
|
-
@client.
|
80
|
+
@client.write(BSON.serialize(request))
|
77
81
|
|
78
82
|
exception = assert_raise ResilientSocket::ReadTimeout do
|
79
83
|
# Read 4 bytes from server
|
80
84
|
@client.read(4)
|
81
85
|
end
|
86
|
+
assert @client.alive?
|
82
87
|
assert_match /Timedout after #{@read_timeout} seconds trying to read from #{@server_name}/, exception.message
|
83
88
|
end
|
84
89
|
|
90
|
+
should "timeout on first receive and then successfully read the response" do
|
91
|
+
request = { 'action' => 'sleep', 'duration' => @read_timeout + 0.5}
|
92
|
+
@client.write(BSON.serialize(request))
|
93
|
+
|
94
|
+
exception = assert_raise ResilientSocket::ReadTimeout do
|
95
|
+
# Read 4 bytes from server
|
96
|
+
@client.read(4)
|
97
|
+
end
|
98
|
+
assert @client.alive?
|
99
|
+
assert_match /Timedout after #{@read_timeout} seconds trying to read from #{@server_name}/, exception.message
|
100
|
+
reply = read_bson_document(@client)
|
101
|
+
assert_equal 'sleep', reply['result']
|
102
|
+
end
|
103
|
+
|
85
104
|
should "retry on connection failure" do
|
86
105
|
attempt = 0
|
87
106
|
reply = @client.retry_on_connection_failure do
|
88
107
|
request = { 'action' => 'fail', 'attempt' => (attempt+=1) }
|
89
|
-
@client.
|
108
|
+
@client.write(BSON.serialize(request))
|
90
109
|
# Note: Do not put the read in this block if it should never send the
|
91
110
|
# same request twice to the server
|
92
111
|
read_bson_document(@client)
|
@@ -105,7 +124,7 @@ class TCPClientTest < Test::Unit::TestCase
|
|
105
124
|
assert_equal @server_name, client.server
|
106
125
|
|
107
126
|
request = { 'action' => 'test1' }
|
108
|
-
client.
|
127
|
+
client.write(BSON.serialize(request))
|
109
128
|
reply = read_bson_document(client)
|
110
129
|
assert_equal 'test1', reply['result']
|
111
130
|
|
@@ -125,7 +144,7 @@ class TCPClientTest < Test::Unit::TestCase
|
|
125
144
|
assert_equal 1, client.user_data[:sequence]
|
126
145
|
|
127
146
|
request = { 'action' => 'test1' }
|
128
|
-
client.
|
147
|
+
client.write(BSON.serialize(request))
|
129
148
|
reply = read_bson_document(client)
|
130
149
|
assert_equal 'test1', reply['result']
|
131
150
|
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: resilient_socket
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.2.0
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2012-10-
|
12
|
+
date: 2012-10-15 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: semantic_logger
|
@@ -41,16 +41,16 @@ files:
|
|
41
41
|
- LICENSE.txt
|
42
42
|
- nbproject/private/config.properties
|
43
43
|
- nbproject/private/private.properties
|
44
|
+
- nbproject/private/private.xml
|
44
45
|
- nbproject/private/rake-d.txt
|
45
46
|
- nbproject/project.properties
|
46
47
|
- nbproject/project.xml
|
47
48
|
- Rakefile
|
48
49
|
- README.md
|
49
|
-
- resilient_socket-0.0.1.gem
|
50
|
-
- resilient_socket-0.0.2.gem
|
51
50
|
- test/simple_tcp_server.rb
|
52
51
|
- test/tcp_client_test.rb
|
53
52
|
- test.log
|
53
|
+
- test.log.working
|
54
54
|
homepage: https://github.com/ClarityServices/resilient_socket
|
55
55
|
licenses: []
|
56
56
|
post_install_message:
|
data/resilient_socket-0.0.1.gem
DELETED
Binary file
|
data/resilient_socket-0.0.2.gem
DELETED
Binary file
|