analogger 0.5.0 → 0.9.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/Gemfile +6 -0
- data/Gemfile.lock +24 -0
- data/INSTALL +1 -1
- data/Rakefile +11 -0
- data/analogger.gemspec +37 -35
- data/bin/analogger +66 -60
- data/external/package.rb +12 -11
- data/external/test_support.rb +3 -6
- data/lib/swiftcore/Analogger.rb +308 -0
- data/{src/swiftcore/Analogger.rb → lib/swiftcore/Analogger.rb.orig} +64 -66
- data/lib/swiftcore/Analogger/AnaloggerProtocol.rb +78 -0
- data/lib/swiftcore/Analogger/Client.rb +406 -0
- data/lib/swiftcore/Analogger/EMClient.rb +100 -0
- data/lib/swiftcore/Analogger/version.rb +5 -0
- data/lib/swiftcore/LoggerInterface.rb +316 -0
- data/setup.rb +11 -10
- metadata +117 -70
- data/src/swiftcore/Analogger/Client.rb +0 -103
- data/test/TC_Analogger.rb +0 -151
- data/test/analogger.cnf +0 -22
- data/test/analogger2.cnf +0 -9
- data/test/tc_template.rb +0 -15
@@ -2,6 +2,7 @@ require 'socket'
|
|
2
2
|
begin
|
3
3
|
load_attempted ||= false
|
4
4
|
require 'eventmachine'
|
5
|
+
require 'benchmark'
|
5
6
|
rescue LoadError => e
|
6
7
|
unless load_attempted
|
7
8
|
load_attempted = true
|
@@ -19,18 +20,25 @@ module Swiftcore
|
|
19
20
|
Cdaemonize = 'daemonize'.freeze
|
20
21
|
Cdefault = 'default'.freeze
|
21
22
|
Cdefault_log = 'default_log'.freeze
|
23
|
+
Cepoll = 'epoll'.freeze
|
22
24
|
Chost = 'host'.freeze
|
23
25
|
Cinterval = 'interval'.freeze
|
24
26
|
Ckey = 'key'.freeze
|
27
|
+
Ckqueue = 'kqueue'.freeze
|
28
|
+
Clevels = 'levels'.freeze
|
25
29
|
Clogfile = 'logfile'.freeze
|
26
30
|
Clogs = 'logs'.freeze
|
31
|
+
Cpidfile = 'pidfile'.freeze
|
27
32
|
Cport = 'port'.freeze
|
33
|
+
Croll = 'roll'.freeze
|
34
|
+
Croll_age = 'roll_age'.freeze
|
35
|
+
Croll_size = 'roll_size'.freeze
|
36
|
+
Croll_interval = 'roll_interval'.freeze
|
28
37
|
Csecret = 'secret'.freeze
|
29
38
|
Cservice = 'service'.freeze
|
30
|
-
Clevels = 'levels'.freeze
|
31
39
|
Csyncinterval = 'syncinterval'.freeze
|
32
|
-
Cpidfile = 'pidfile'.freeze
|
33
40
|
DefaultSeverityLevels = ['debug','info','warn','error','fatal'].inject({}){|h,k|h[k]=true;h}
|
41
|
+
TimeFormat = '%Y/%m/%d %H:%M:%S'.freeze
|
34
42
|
|
35
43
|
class NoPortProvided < Exception; def to_s; "The port to bind to was not provided."; end; end
|
36
44
|
class BadPort < Exception
|
@@ -41,7 +49,14 @@ module Swiftcore
|
|
41
49
|
def to_s; "The port provided (#{@port}) is invalid."; end
|
42
50
|
end
|
43
51
|
|
52
|
+
EXIT_SIGNALS = %w[INT TERM]
|
53
|
+
RELOAD_SIGNALS = %w[HUP]
|
54
|
+
|
44
55
|
class << self
|
56
|
+
def safe_trap(siglist, &operation)
|
57
|
+
(Signal.list.keys & siglist).each {|sig| trap(sig, &operation)}
|
58
|
+
end
|
59
|
+
|
45
60
|
def start(config,protocol = AnaloggerProtocol)
|
46
61
|
@config = config
|
47
62
|
daemonize if @config[Cdaemonize]
|
@@ -54,14 +69,22 @@ module Swiftcore
|
|
54
69
|
set_config_defaults
|
55
70
|
@rcount = 0
|
56
71
|
@wcount = 0
|
57
|
-
|
58
|
-
|
59
|
-
|
72
|
+
safe_trap(EXIT_SIGNALS) {cleanup;exit}
|
73
|
+
safe_trap(RELOAD_SIGNALS) {cleanup;throw :hup}
|
74
|
+
|
75
|
+
if @config[Cepoll] or @config[Ckqueue]
|
76
|
+
EventMachine.epoll if @config[Cepoll]
|
77
|
+
EventMachine.kqueue if @config[Ckqueue]
|
78
|
+
|
79
|
+
EventMachine.set_descriptor_table_size(4096)
|
80
|
+
end
|
81
|
+
|
60
82
|
EventMachine.run {
|
61
83
|
EventMachine.start_server @config[Chost], @config[Cport], protocol
|
62
84
|
EventMachine.add_periodic_timer(1) {Analogger.update_now}
|
63
85
|
EventMachine.add_periodic_timer(@config[Cinterval]) {write_queue}
|
64
86
|
EventMachine.add_periodic_timer(@config[Csyncinterval]) {flush_queue}
|
87
|
+
EventMachine.add_periodic_timer(@config[Crollinterval]) {roll logs}
|
65
88
|
}
|
66
89
|
end
|
67
90
|
|
@@ -76,8 +99,8 @@ module Swiftcore
|
|
76
99
|
puts "Platform (#{RUBY_PLATFORM}) does not appear to support fork/setsid; skipping"
|
77
100
|
end
|
78
101
|
|
79
|
-
def new_log(facility = Cdefault, levels = @config[Clevels] || DefaultSeverityLevels, log = @config[Cdefault_log], cull = true)
|
80
|
-
Log.new({Cservice => facility, Clevels => levels, Clogfile => log, Ccull => cull})
|
102
|
+
def new_log(facility = Cdefault, levels = @config[Clevels] || DefaultSeverityLevels, log = @config[Cdefault_log], cull = true, roll = @config[Croll], roll_age = @config[Croll_age], roll_size = @config[Croll_size])
|
103
|
+
Log.new({Cservice => facility, Clevels => levels, Clogfile => log, Ccull => cull, Croll => roll, Croll_age => roll_age, Croll_size => roll_size})
|
81
104
|
end
|
82
105
|
|
83
106
|
def cleanup
|
@@ -87,8 +110,13 @@ module Swiftcore
|
|
87
110
|
end
|
88
111
|
end
|
89
112
|
|
113
|
+
def roll_logs
|
114
|
+
@logs.each do |service,l|
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
90
118
|
def update_now
|
91
|
-
@now = Time.now.strftime(
|
119
|
+
@now = Time.now.strftime(TimeFormat)
|
92
120
|
end
|
93
121
|
|
94
122
|
def config
|
@@ -102,12 +130,13 @@ module Swiftcore
|
|
102
130
|
def populate_logs
|
103
131
|
@config[Clogs].each do |log|
|
104
132
|
next unless log[Cservice]
|
133
|
+
roll = log[Croll] || log[Croll_age] || log[Croll_size] ? true : false
|
105
134
|
if Array === log[Cservice]
|
106
135
|
log[Cservice].each do |loglog|
|
107
|
-
@logs[loglog] = new_log(loglog,log[Clevels],logfile_destination(log[Clogfile]),log[Ccull])
|
136
|
+
@logs[loglog] = new_log(loglog,log[Clevels],logfile_destination(log[Clogfile]),log[Ccull],roll,log[Croll_age],log[Croll_size])
|
108
137
|
end
|
109
138
|
else
|
110
|
-
@logs[log[Cservice]] = new_log(log[Cservice],log[Clevels],logfile_destination(log[Clogfile]),log[Ccull])
|
139
|
+
@logs[log[Cservice]] = new_log(log[Cservice],log[Clevels],logfile_destination(log[Clogfile]),log[Ccull],roll,log[Croll_age],log[Croll_size])
|
111
140
|
end
|
112
141
|
end
|
113
142
|
end
|
@@ -153,6 +182,13 @@ module Swiftcore
|
|
153
182
|
end
|
154
183
|
|
155
184
|
def logfile_destination(logfile)
|
185
|
+
# We're reloading if it's already an IO.
|
186
|
+
if logfile.is_a?(IO)
|
187
|
+
return $stdout if logfile == $stdout
|
188
|
+
return $stderr if logfile == $stderr
|
189
|
+
return logfile.reopen(logfile.path, 'ab+')
|
190
|
+
end
|
191
|
+
|
156
192
|
if logfile =~ /^STDOUT$/i
|
157
193
|
$stdout
|
158
194
|
elsif logfile =~ /^STDERR$/i
|
@@ -183,19 +219,19 @@ module Swiftcore
|
|
183
219
|
last_count += 1
|
184
220
|
next
|
185
221
|
elsif last_count > 0
|
186
|
-
lf.
|
222
|
+
lf.write_nonblock "#{@now}|#{last_sv.join(C_bar)}|Last message repeated #{last_count} times\n"
|
187
223
|
last_sv = last_m = nil
|
188
224
|
last_count = 0
|
189
225
|
end
|
190
|
-
lf.
|
226
|
+
lf.write_nonblock "#{@now}|#{m.join(C_bar)}\n"
|
191
227
|
last_m = m.last
|
192
228
|
last_sv = m[0..1]
|
193
229
|
else
|
194
|
-
lf.
|
230
|
+
lf.write_nonblock "#{@now}|#{m.join(C_bar)}\n"
|
195
231
|
end
|
196
232
|
@wcount += 1
|
197
233
|
end
|
198
|
-
lf.
|
234
|
+
lf.write_nonblock "#{@now}|#{last_sv.join(C_bar)}|Last message repeated #{last_count} times\n" if cull and last_count > 0
|
199
235
|
end
|
200
236
|
@queue.each {|service,q| q.clear}
|
201
237
|
end
|
@@ -213,13 +249,16 @@ module Swiftcore
|
|
213
249
|
end
|
214
250
|
|
215
251
|
class Log
|
216
|
-
attr_reader :service, :levels, :logfile, :cull
|
252
|
+
attr_reader :service, :levels, :logfile, :cull, :roll, :roll_age, :roll_size
|
217
253
|
|
218
254
|
def initialize(spec)
|
219
255
|
@service = spec[Analogger::Cservice]
|
220
256
|
@levels = spec[Analogger::Clevels]
|
221
257
|
@logfile = spec[Analogger::Clogfile]
|
222
258
|
@cull = spec[Analogger::Ccull]
|
259
|
+
@roll = spec[Analogger::Croll]
|
260
|
+
@roll_inteval = spec[Analogger::Croll_age]
|
261
|
+
@roll_size = spec[Analogger::Croll_size]
|
223
262
|
end
|
224
263
|
|
225
264
|
def to_s
|
@@ -238,56 +277,15 @@ module Swiftcore
|
|
238
277
|
setup
|
239
278
|
end
|
240
279
|
|
241
|
-
def setup
|
242
|
-
@length = nil
|
243
|
-
@logchunk = ''
|
244
|
-
@authenticated = nil
|
245
|
-
end
|
246
|
-
|
247
|
-
def receive_data data
|
248
|
-
@logchunk << data
|
249
|
-
decompose = true
|
250
|
-
while decompose
|
251
|
-
unless @length
|
252
|
-
if @logchunk.length > 7
|
253
|
-
l = @logchunk[0..3].unpack(Ci).first
|
254
|
-
ck = @logchunk[4..7].unpack(Ci).first
|
255
|
-
if l == ck and l < MaxMessageLength
|
256
|
-
@length = l + 7
|
257
|
-
else
|
258
|
-
decompose = false
|
259
|
-
peer = get_peername
|
260
|
-
peer = peer ? ::Socket.unpack_sockaddr_in(peer)[1] : 'UNK'
|
261
|
-
if l == ck
|
262
|
-
LoggerClass.add_log([:default,:error,"Max Length Exceeded from #{peer} -- #{l}/#{MaxMessageLength}"])
|
263
|
-
close_connection
|
264
|
-
else
|
265
|
-
LoggerClass.add_log([:default,:error,"checksum failed from #{peer} -- #{l}/#{ck}"])
|
266
|
-
close_connection
|
267
|
-
end
|
268
|
-
end
|
269
|
-
end
|
270
|
-
end
|
271
|
-
|
272
|
-
if @length and @logchunk.length > @length
|
273
|
-
msg = @logchunk.slice!(0..@length).split(Rcolon,4)
|
274
|
-
unless @authenticated
|
275
|
-
if msg.last == LoggerClass.key
|
276
|
-
@authenticated = true
|
277
|
-
else
|
278
|
-
close_connection
|
279
|
-
end
|
280
|
-
else
|
281
|
-
msg[0] = nil
|
282
|
-
msg.shift
|
283
|
-
LoggerClass.add_log(msg)
|
284
|
-
end
|
285
|
-
@length = nil
|
286
|
-
else
|
287
|
-
decompose = false
|
288
|
-
end
|
289
|
-
end
|
290
|
-
end
|
291
|
-
|
292
280
|
end
|
293
281
|
end
|
282
|
+
|
283
|
+
case RUBY_VERSION
|
284
|
+
when /^1.8/
|
285
|
+
require 'swiftcore/Analogger/receive_data_18.rb'
|
286
|
+
when /^1.9/
|
287
|
+
require 'swiftcore/Analogger/receive_data_19.rb'
|
288
|
+
else
|
289
|
+
raise "We're sorry, but Analogger is not supported for ruby versions prior to 1.8.x (and is untested below 1.8.5)."
|
290
|
+
end
|
291
|
+
|
@@ -0,0 +1,78 @@
|
|
1
|
+
module Swiftcore
|
2
|
+
class AnaloggerProtocol < EventMachine::Connection
|
3
|
+
|
4
|
+
MaxMessageLength = 8192
|
5
|
+
MaxLengthBytes = MaxMessageLength.to_s.length
|
6
|
+
Semaphore = "||"
|
7
|
+
|
8
|
+
def setup
|
9
|
+
@length = nil
|
10
|
+
@pos = 0
|
11
|
+
@logchunk = ''
|
12
|
+
@authenticated = nil
|
13
|
+
end
|
14
|
+
|
15
|
+
def send_data data
|
16
|
+
super data
|
17
|
+
end
|
18
|
+
|
19
|
+
def receive_data data
|
20
|
+
@logchunk << data
|
21
|
+
decompose = true
|
22
|
+
while decompose
|
23
|
+
unless @length
|
24
|
+
if @logchunk.length - @pos > 7
|
25
|
+
l = @logchunk[@pos + 0..@pos + 3].to_i
|
26
|
+
ck = @logchunk[@pos + 4..@pos + 7].to_i
|
27
|
+
if l == ck and l < MaxMessageLength
|
28
|
+
@length = l
|
29
|
+
else
|
30
|
+
decompose = false
|
31
|
+
peer = get_peername
|
32
|
+
peer = peer ? ::Socket.unpack_sockaddr_in(peer)[1] : 'UNK'
|
33
|
+
if l == ck
|
34
|
+
LoggerClass.add_log([:default, :error, "Max Length Exceeded from #{peer} -- #{l}/#{MaxMessageLength}"])
|
35
|
+
send_data "error: max length exceeded\n"
|
36
|
+
close_connection_after_writing
|
37
|
+
else
|
38
|
+
LoggerClass.add_log([:default, :error, "checksum failed from #{peer} -- #{l}/#{ck}"])
|
39
|
+
send_data "error: checksum failed\n"
|
40
|
+
close_connection_after_writing
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
if @length && @length < 8
|
47
|
+
decompose = false
|
48
|
+
end
|
49
|
+
|
50
|
+
if @length and @length > 0 and @logchunk.length - @pos >= @length
|
51
|
+
msg = nil
|
52
|
+
msg = @logchunk[@pos..@length + @pos - 1].split(Rcolon, 4)
|
53
|
+
@pos += @length
|
54
|
+
unless @authenticated
|
55
|
+
if msg.last == LoggerClass.key
|
56
|
+
@authenticated = true
|
57
|
+
send_data "accepted\n"
|
58
|
+
else
|
59
|
+
send_data "denied\n"
|
60
|
+
close_connection_after_writing
|
61
|
+
end
|
62
|
+
else
|
63
|
+
msg[0] = nil
|
64
|
+
msg.shift
|
65
|
+
LoggerClass.add_log(msg)
|
66
|
+
end
|
67
|
+
@length = nil
|
68
|
+
else
|
69
|
+
decompose = false
|
70
|
+
end
|
71
|
+
end
|
72
|
+
if @pos >= @logchunk.length
|
73
|
+
@logchunk.clear
|
74
|
+
@pos = 0
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
@@ -0,0 +1,406 @@
|
|
1
|
+
require 'tmpdir'
|
2
|
+
require 'socket'
|
3
|
+
|
4
|
+
include Socket::Constants
|
5
|
+
|
6
|
+
module Swiftcore
|
7
|
+
class Analogger
|
8
|
+
|
9
|
+
# Swift::Analogger::Client is the client library for writing logging
|
10
|
+
# messages to the Swift Analogger asynchronous logging server.
|
11
|
+
#
|
12
|
+
# To use the Analogger client, instantiate an instance of the Client
|
13
|
+
# class.
|
14
|
+
#
|
15
|
+
# logger = Swift::Analogger::Client.new(:myapplog,'127.0.0.1',12345)
|
16
|
+
#
|
17
|
+
# Four arguments are accepted when a new Client is created. The first
|
18
|
+
# is the name of the logging facility that this Client will write to.
|
19
|
+
# The second is the hostname where the Analogger process is running,
|
20
|
+
# and the third is the port number that it is listening on for
|
21
|
+
# connections.
|
22
|
+
#
|
23
|
+
# The fourth argument is optional. Analogger can require an
|
24
|
+
# authentication key before it will allow logging clients to use its
|
25
|
+
# facilities. If the Analogger that one is connecting to requires
|
26
|
+
# an authentication key, it must be passed to the new() call as the
|
27
|
+
# fourth argument. If the key is incorrect, the connection will be
|
28
|
+
# closed.
|
29
|
+
#
|
30
|
+
# If a Client connects to the Analogger using a facility that is
|
31
|
+
# undefined in the Analogger, the log messages will still be accepted,
|
32
|
+
# but they will be dumped to the default logging destination.
|
33
|
+
#
|
34
|
+
# Once connected, the Client is ready to deliver messages to the
|
35
|
+
# Analogger. To send a messagine, the log() method is used:
|
36
|
+
#
|
37
|
+
# logger.log(:debug,"The logging client is now connected.")
|
38
|
+
#
|
39
|
+
# The log() method takes two arguments. The first is the severity of
|
40
|
+
# the message, and the second is the message itself. The default
|
41
|
+
# Analogger severity levels are the same as in the standard Ruby
|
42
|
+
#
|
43
|
+
class Client
|
44
|
+
|
45
|
+
class FailedToAuthenticate < StandardError
|
46
|
+
def initialize(hots = "UNK", port = 6766)
|
47
|
+
super("Failed to authenticate to the Analogger server at #{destination}:#{port}")
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
Cauthentication = 'authentication'.freeze
|
52
|
+
Ci = 'i'.freeze
|
53
|
+
|
54
|
+
MaxMessageLength = 8192
|
55
|
+
MaxLengthBytes = MaxMessageLength.to_s.length
|
56
|
+
Semaphore = "||"
|
57
|
+
ConnectionFailureTimeout = 86400 * 2 # Log locally for a long time if Analogger server goes down.
|
58
|
+
MaxFailureCount = (2**(0.size * 8 - 2) - 1) # Max integer -- i.e. really big
|
59
|
+
PersistentQueueLimit = 10737412742 # Default to allowing around 10GB temporary local log storage
|
60
|
+
ReconnectThrottleInterval = 0.1
|
61
|
+
|
62
|
+
def log(severity, msg)
|
63
|
+
if @destination == :local
|
64
|
+
_local_log(@service, severity, msg)
|
65
|
+
else
|
66
|
+
_remote_log(@service, severity, msg)
|
67
|
+
end
|
68
|
+
rescue Exception
|
69
|
+
@authenticated = false
|
70
|
+
setup_local_logging
|
71
|
+
setup_reconnect_thread
|
72
|
+
end
|
73
|
+
|
74
|
+
#----- Various class accessors -- use these to set defaults
|
75
|
+
|
76
|
+
def self.connection_failure_timeout
|
77
|
+
@connection_failure_timeout ||= ConnectionFailureTimeout
|
78
|
+
end
|
79
|
+
|
80
|
+
def self.connection_failure_timeout=(val)
|
81
|
+
@connection_failure_timeout = val.to_i
|
82
|
+
end
|
83
|
+
|
84
|
+
def self.max_failure_count
|
85
|
+
@max_failure_count ||= MaxFailureCount
|
86
|
+
end
|
87
|
+
|
88
|
+
def self.max_failure_count=(val)
|
89
|
+
@max_failure_count = val.to_i
|
90
|
+
end
|
91
|
+
|
92
|
+
def self.persistent_queue_limit
|
93
|
+
@persistent_queue_limit ||= PersistentQueueLimit
|
94
|
+
end
|
95
|
+
|
96
|
+
def self.persistent_queue_limit=(val)
|
97
|
+
@persistent_queue_limit = val.to_i
|
98
|
+
end
|
99
|
+
|
100
|
+
def self.tmplog
|
101
|
+
@tmplog
|
102
|
+
end
|
103
|
+
|
104
|
+
def self.tmplog=(val)
|
105
|
+
@tmplog = val
|
106
|
+
end
|
107
|
+
|
108
|
+
def self.reconnect_throttle_interval
|
109
|
+
@reconnect_throttle_interval ||= ReconnectThrottleInterval
|
110
|
+
end
|
111
|
+
|
112
|
+
def self.reconnect_throttle_interval=(val)
|
113
|
+
@reconnect_throttle_interval = val.to_i
|
114
|
+
end
|
115
|
+
|
116
|
+
#-----
|
117
|
+
|
118
|
+
def initialize(service = 'default', host = '127.0.0.1' , port = 6766, key = nil)
|
119
|
+
@service = service.to_s
|
120
|
+
@key = key
|
121
|
+
@host = host
|
122
|
+
@port = port
|
123
|
+
klass = self.class
|
124
|
+
@connection_failure_timeout = klass.connection_failure_timeout
|
125
|
+
@max_failure_count = klass.max_failure_count
|
126
|
+
@persistent_queue_limit = klass.persistent_queue_limit
|
127
|
+
@authenticated = false
|
128
|
+
@total_count = 0
|
129
|
+
@logfile = nil
|
130
|
+
@swamp_drainer = nil
|
131
|
+
|
132
|
+
clear_failure
|
133
|
+
|
134
|
+
connect
|
135
|
+
end
|
136
|
+
|
137
|
+
#----- Various instance accessors
|
138
|
+
|
139
|
+
def total_count
|
140
|
+
@total_count
|
141
|
+
end
|
142
|
+
|
143
|
+
def connection_failure_timeout
|
144
|
+
@connection_failure_timeout
|
145
|
+
end
|
146
|
+
|
147
|
+
def connection_failure_timeout=(val)
|
148
|
+
@connection_failure_timeout = val.to_i
|
149
|
+
end
|
150
|
+
|
151
|
+
def max_failure_count
|
152
|
+
@max_failure_count
|
153
|
+
end
|
154
|
+
|
155
|
+
def max_failure_count=(val)
|
156
|
+
@max_failure_count = val.to_i
|
157
|
+
end
|
158
|
+
|
159
|
+
def ram_queue_limit
|
160
|
+
@ram_queue_limit
|
161
|
+
end
|
162
|
+
|
163
|
+
def ram_queue_limit=(val)
|
164
|
+
@ram_queue_limit = val.to_i
|
165
|
+
end
|
166
|
+
|
167
|
+
def persistent_queue_limit
|
168
|
+
@persistent_queue_limit
|
169
|
+
end
|
170
|
+
|
171
|
+
def persistent_queue_limit=(val)
|
172
|
+
@persistent_queue_limit = val.to_i
|
173
|
+
end
|
174
|
+
|
175
|
+
def tmplog_prefix
|
176
|
+
File.join(Dir.tmpdir, "analogger-SERVICE-PID.log")
|
177
|
+
end
|
178
|
+
|
179
|
+
def tmplog
|
180
|
+
@tmplog ||= tmplog_prefix.gsub(/SERVICE/, @service).gsub(/PID/,$$.to_s)
|
181
|
+
end
|
182
|
+
|
183
|
+
def tmplogs
|
184
|
+
Dir[tmplog_prefix.gsub(/SERVICE/, @service).gsub(/PID/,'*')].sort_by {|f| File.mtime(f)}
|
185
|
+
end
|
186
|
+
|
187
|
+
def tmplog=(val)
|
188
|
+
@tmplog = val
|
189
|
+
end
|
190
|
+
|
191
|
+
def reconnect_throttle_interval
|
192
|
+
@reconnect_throttle_interval ||= self.class.reconnect_throttle_interval
|
193
|
+
end
|
194
|
+
|
195
|
+
def reconnect_throttle_interval=(val)
|
196
|
+
@reconnect_throttle_interval = val.to_i
|
197
|
+
end
|
198
|
+
|
199
|
+
#----- The meat of the client
|
200
|
+
|
201
|
+
def connect
|
202
|
+
@socket = open_connection(@host, @port)
|
203
|
+
authenticate
|
204
|
+
raise FailedToAuthenticate(@host, @port) unless authenticated?
|
205
|
+
clear_failure
|
206
|
+
|
207
|
+
if there_is_a_swamp?
|
208
|
+
drain_the_swamp
|
209
|
+
else
|
210
|
+
setup_remote_logging
|
211
|
+
end
|
212
|
+
|
213
|
+
rescue Exception => e
|
214
|
+
register_failure
|
215
|
+
close_connection
|
216
|
+
setup_reconnect_thread unless @reconnection_thread && Thread.current == @reconnection_thread
|
217
|
+
setup_local_logging
|
218
|
+
raise e if fail_connect?
|
219
|
+
end
|
220
|
+
|
221
|
+
private
|
222
|
+
|
223
|
+
def setup_local_logging
|
224
|
+
unless @logfile && !@logfile.closed?
|
225
|
+
@logfile = File.open(tmplog,"a+")
|
226
|
+
@destination = :local
|
227
|
+
end
|
228
|
+
end
|
229
|
+
|
230
|
+
def setup_remote_logging
|
231
|
+
@destination = :remote
|
232
|
+
end
|
233
|
+
|
234
|
+
def setup_reconnect_thread
|
235
|
+
return if @reconnection_thread
|
236
|
+
@reconnection_thread = Thread.new do
|
237
|
+
while true
|
238
|
+
sleep reconnect_throttle_interval
|
239
|
+
connect rescue nil
|
240
|
+
break if @socket && !closed?
|
241
|
+
end
|
242
|
+
@reconnection_thread = nil
|
243
|
+
end
|
244
|
+
end
|
245
|
+
|
246
|
+
def _remote_log(service, severity, message)
|
247
|
+
@total_count += 1
|
248
|
+
len = MaxLengthBytes + MaxLengthBytes + service.length + severity.length + message.length + 3
|
249
|
+
ll = sprintf("%0#{MaxLengthBytes}i%0#{MaxLengthBytes}i", len, len)
|
250
|
+
@socket.write "#{ll}:#{service}:#{severity}:#{message}"
|
251
|
+
end
|
252
|
+
|
253
|
+
def _local_log(service, severity, message)
|
254
|
+
# Convert newlines to a different marker so that log messages can be stuffed onto a single file line.
|
255
|
+
@logfile.flock File::LOCK_EX
|
256
|
+
@logfile.puts "#{service}:#{severity}:#{message.gsub(/\n/,"\x00\x00")}"
|
257
|
+
ensure
|
258
|
+
@logfile.flock File::LOCK_UN
|
259
|
+
end
|
260
|
+
|
261
|
+
def open_connection(host, port)
|
262
|
+
socket = Socket.new(AF_INET,SOCK_STREAM,0)
|
263
|
+
sockaddr = Socket.pack_sockaddr_in(port,host)
|
264
|
+
socket.connect(sockaddr)
|
265
|
+
socket
|
266
|
+
end
|
267
|
+
|
268
|
+
def close_connection
|
269
|
+
@socket.close if @socket and !@socket.closed?
|
270
|
+
end
|
271
|
+
|
272
|
+
def register_failure
|
273
|
+
@failed_at ||= Time.now
|
274
|
+
@failure_count += 1
|
275
|
+
end
|
276
|
+
|
277
|
+
def fail_connect?
|
278
|
+
failed_too_many? || failed_too_long?
|
279
|
+
end
|
280
|
+
|
281
|
+
def failed?
|
282
|
+
!@failed_at.nil?
|
283
|
+
end
|
284
|
+
|
285
|
+
def failed_too_many?
|
286
|
+
@failure_count > @max_failure_count
|
287
|
+
end
|
288
|
+
|
289
|
+
def failed_too_long?
|
290
|
+
failed? && ( @failed_at + @connection_failure_timeout ) < Time.now
|
291
|
+
end
|
292
|
+
|
293
|
+
def clear_failure
|
294
|
+
@failed_at = nil
|
295
|
+
@failure_count = 0
|
296
|
+
end
|
297
|
+
|
298
|
+
def authenticate
|
299
|
+
begin
|
300
|
+
_remote_log(@service, Cauthentication, "#{@key}")
|
301
|
+
response = @socket.gets
|
302
|
+
rescue Exception
|
303
|
+
response = nil
|
304
|
+
end
|
305
|
+
|
306
|
+
if response && response =~ /accepted/
|
307
|
+
@authenticated = true
|
308
|
+
else
|
309
|
+
@authenticated = false
|
310
|
+
end
|
311
|
+
end
|
312
|
+
|
313
|
+
def there_is_a_swamp?
|
314
|
+
tmplogs.each do |logfile|
|
315
|
+
break true if FileTest.exist?(logfile) && File.size(logfile) > 0
|
316
|
+
end
|
317
|
+
end
|
318
|
+
|
319
|
+
def drain_the_swamp
|
320
|
+
unless @swamp_drainer
|
321
|
+
@swap_drainer = Thread.new { _drain_the_swamp }
|
322
|
+
end
|
323
|
+
end
|
324
|
+
|
325
|
+
def non_blocking_lock_on_file_handle(fh, &block)
|
326
|
+
fh.flock(File::LOCK_EX|File::LOCK_NB) ? yield : false
|
327
|
+
ensure
|
328
|
+
fh.flock File::LOCK_UN
|
329
|
+
end
|
330
|
+
|
331
|
+
def _drain_the_swamp
|
332
|
+
# As soon as we start emptying the local log file, ensure that no data
|
333
|
+
# gets missed because of IO buffering. Otherwise, during high rates of
|
334
|
+
# message sending, it is possible to get an EOF on file reading, and
|
335
|
+
# assume all data has been sent, when there are actually records which
|
336
|
+
# are buffered and just haven't been written yet.
|
337
|
+
@logfile && (@logfile.sync = true)
|
338
|
+
|
339
|
+
tmplogs.each do |logfile|
|
340
|
+
buffer = ''
|
341
|
+
|
342
|
+
FileTest.exist?(logfile) && File.open(logfile) do |fh|
|
343
|
+
non_blocking_lock_on_file_handle(fh) do # Only one process should read a given file.
|
344
|
+
fh.fdatasync rescue fh.fsync
|
345
|
+
logfile_not_empty = true
|
346
|
+
while logfile_not_empty
|
347
|
+
begin
|
348
|
+
buffer << fh.read_nonblock(8192) unless closed?
|
349
|
+
rescue EOFError
|
350
|
+
logfile_not_empty = false
|
351
|
+
end
|
352
|
+
records = buffer.scan(/^.*?\n/)
|
353
|
+
buffer = buffer[(records.inject(0) {|n, e| n += e.length})..-1] # truncate buffer
|
354
|
+
records.each_index do |n|
|
355
|
+
record = records[n]
|
356
|
+
next if record =~ /^\#/
|
357
|
+
service, severity, msg = record.split(":", 3)
|
358
|
+
msg = msg.chomp.gsub(/\x00\x00/, "\n")
|
359
|
+
begin
|
360
|
+
_remote_log(service, severity, msg)
|
361
|
+
rescue
|
362
|
+
# FAIL while draining the swamp. Just reset the buffer from wherever we are, and
|
363
|
+
# keep trying, after a short sleep to allow for recovery.
|
364
|
+
new_buffer = ''
|
365
|
+
records[n..-1].each {|r| new_buffer << r}
|
366
|
+
new_buffer << buffer
|
367
|
+
buffer = new_buffer
|
368
|
+
sleep 1
|
369
|
+
end
|
370
|
+
end
|
371
|
+
end
|
372
|
+
File.unlink logfile
|
373
|
+
end
|
374
|
+
if tmplog == logfile
|
375
|
+
setup_remote_logging
|
376
|
+
end
|
377
|
+
end
|
378
|
+
end
|
379
|
+
|
380
|
+
|
381
|
+
@swamp_drainer = nil
|
382
|
+
rescue Exception => e
|
383
|
+
STDERR.puts "ERROR SENDING LOCALLY SAVED LOGS: #{e}\n#{e.backtrace.inspect}"
|
384
|
+
end
|
385
|
+
|
386
|
+
public
|
387
|
+
|
388
|
+
def authenticated?
|
389
|
+
@authenticated
|
390
|
+
end
|
391
|
+
|
392
|
+
def reconnect
|
393
|
+
connect(@host,@port)
|
394
|
+
end
|
395
|
+
|
396
|
+
def close
|
397
|
+
@socket.close
|
398
|
+
end
|
399
|
+
|
400
|
+
def closed?
|
401
|
+
@socket.closed?
|
402
|
+
end
|
403
|
+
|
404
|
+
end
|
405
|
+
end
|
406
|
+
end
|