semantic_logger 4.1.1 → 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/semantic_logger.rb +6 -13
- data/lib/semantic_logger/ansi_colors.rb +10 -10
- data/lib/semantic_logger/appender.rb +42 -26
- data/lib/semantic_logger/appender/async.rb +179 -0
- data/lib/semantic_logger/appender/async_batch.rb +95 -0
- data/lib/semantic_logger/appender/bugsnag.rb +2 -1
- data/lib/semantic_logger/appender/elasticsearch.rb +113 -81
- data/lib/semantic_logger/appender/elasticsearch_http.rb +1 -3
- data/lib/semantic_logger/appender/file.rb +1 -3
- data/lib/semantic_logger/appender/graylog.rb +6 -5
- data/lib/semantic_logger/appender/honeybadger.rb +0 -2
- data/lib/semantic_logger/appender/http.rb +25 -10
- data/lib/semantic_logger/appender/kafka.rb +1 -3
- data/lib/semantic_logger/appender/mongodb.rb +1 -3
- data/lib/semantic_logger/appender/new_relic.rb +7 -3
- data/lib/semantic_logger/appender/sentry.rb +6 -7
- data/lib/semantic_logger/appender/splunk.rb +1 -2
- data/lib/semantic_logger/appender/splunk_http.rb +3 -4
- data/lib/semantic_logger/appender/syslog.rb +1 -3
- data/lib/semantic_logger/appender/tcp.rb +7 -9
- data/lib/semantic_logger/appender/udp.rb +0 -2
- data/lib/semantic_logger/appender/wrapper.rb +0 -2
- data/lib/semantic_logger/base.rb +76 -19
- data/lib/semantic_logger/formatters.rb +37 -0
- data/lib/semantic_logger/formatters/base.rb +10 -3
- data/lib/semantic_logger/formatters/json.rb +2 -6
- data/lib/semantic_logger/formatters/one_line.rb +18 -0
- data/lib/semantic_logger/formatters/raw.rb +8 -2
- data/lib/semantic_logger/formatters/signalfx.rb +169 -0
- data/lib/semantic_logger/log.rb +23 -14
- data/lib/semantic_logger/loggable.rb +88 -15
- data/lib/semantic_logger/logger.rb +0 -20
- data/lib/semantic_logger/metric/new_relic.rb +75 -0
- data/lib/semantic_logger/metric/signalfx.rb +123 -0
- data/lib/semantic_logger/{metrics → metric}/statsd.rb +20 -8
- data/lib/semantic_logger/processor.rb +67 -169
- data/lib/semantic_logger/semantic_logger.rb +7 -31
- data/lib/semantic_logger/subscriber.rb +32 -36
- data/lib/semantic_logger/utils.rb +47 -0
- data/lib/semantic_logger/version.rb +1 -1
- data/test/appender/async_batch_test.rb +61 -0
- data/test/appender/async_test.rb +45 -0
- data/test/appender/elasticsearch_http_test.rb +3 -3
- data/test/appender/elasticsearch_test.rb +211 -49
- data/test/appender/file_test.rb +9 -8
- data/test/appender/mongodb_test.rb +3 -3
- data/test/appender/newrelic_rpm.rb +6 -0
- data/test/appender/sentry_test.rb +3 -1
- data/test/appender/wrapper_test.rb +29 -0
- data/test/concerns/compatibility_test.rb +64 -60
- data/test/debug_as_trace_logger_test.rb +62 -77
- data/test/formatters/one_line_test.rb +61 -0
- data/test/formatters/signalfx_test.rb +200 -0
- data/test/formatters_test.rb +36 -0
- data/test/in_memory_appender.rb +9 -0
- data/test/in_memory_appender_helper.rb +43 -0
- data/test/in_memory_batch_appender.rb +9 -0
- data/test/in_memory_metrics_appender.rb +14 -0
- data/test/loggable_test.rb +15 -30
- data/test/logger_test.rb +181 -135
- data/test/measure_test.rb +212 -113
- data/test/metric/new_relic_test.rb +36 -0
- data/test/metric/signalfx_test.rb +78 -0
- data/test/semantic_logger_test.rb +58 -65
- data/test/test_helper.rb +19 -2
- metadata +33 -7
- data/lib/semantic_logger/metrics/new_relic.rb +0 -30
- data/lib/semantic_logger/metrics/udp.rb +0 -80
- data/test/mock_logger.rb +0 -29
@@ -43,19 +43,18 @@ class SemanticLogger::Appender::Sentry < SemanticLogger::Subscriber
|
|
43
43
|
|
44
44
|
# Send an error notification to sentry
|
45
45
|
def log(log)
|
46
|
-
|
47
|
-
# Ignore logs coming from Ravent itself
|
46
|
+
# Ignore logs coming from Raven itself
|
48
47
|
return false if log.name == 'Raven'
|
49
48
|
|
50
49
|
context = formatter.call(log, self)
|
50
|
+
attrs = {
|
51
|
+
level: context.delete(:level),
|
52
|
+
extra: context
|
53
|
+
}
|
51
54
|
if log.exception
|
52
55
|
context.delete(:exception)
|
53
|
-
Raven.capture_exception(log.exception,
|
56
|
+
Raven.capture_exception(log.exception, attrs)
|
54
57
|
else
|
55
|
-
attrs = {
|
56
|
-
level: context.delete(:level),
|
57
|
-
extra: context
|
58
|
-
}
|
59
58
|
attrs[:extra][:backtrace] = log.backtrace if log.backtrace
|
60
59
|
Raven.capture_message(context[:message], attrs)
|
61
60
|
end
|
@@ -98,7 +98,7 @@ class SemanticLogger::Appender::Splunk < SemanticLogger::Subscriber
|
|
98
98
|
# open the handles to resources
|
99
99
|
def reopen
|
100
100
|
# Connect to splunk. Connect is a synonym for creating a Service by hand and calling login.
|
101
|
-
self.service
|
101
|
+
self.service = Splunk::connect(config)
|
102
102
|
|
103
103
|
# The index we are logging to
|
104
104
|
self.service_index = service.indexes[index]
|
@@ -106,7 +106,6 @@ class SemanticLogger::Appender::Splunk < SemanticLogger::Subscriber
|
|
106
106
|
|
107
107
|
# Log the message to Splunk
|
108
108
|
def log(log)
|
109
|
-
return false unless should_log?(log)
|
110
109
|
event = formatter.call(log, self)
|
111
110
|
service_index.submit(event.delete(:message), event)
|
112
111
|
true
|
@@ -73,7 +73,7 @@ class SemanticLogger::Appender::SplunkHttp < SemanticLogger::Appender::Http
|
|
73
73
|
@source_type = source_type
|
74
74
|
@index = index
|
75
75
|
|
76
|
-
super(url: url, compress: compress, ssl: ssl,
|
76
|
+
super(url: url, compress: compress, ssl: ssl, read_timeout: read_timeout, open_timeout: open_timeout, continue_timeout: continue_timeout,
|
77
77
|
level: level, formatter: formatter, filter: filter, application: application, host: host, &block)
|
78
78
|
|
79
79
|
# Put splunk auth token in the header of every HTTP post.
|
@@ -85,12 +85,11 @@ class SemanticLogger::Appender::SplunkHttp < SemanticLogger::Appender::Http
|
|
85
85
|
# For splunk format requirements see:
|
86
86
|
# http://dev.splunk.com/view/event-collector/SP-CAAAE6P
|
87
87
|
def call(log, logger)
|
88
|
-
h
|
89
|
-
h.delete(:time)
|
88
|
+
h = SemanticLogger::Formatters::Raw.new(time_format: :seconds).call(log, logger)
|
90
89
|
message = {
|
91
90
|
source: logger.application,
|
92
91
|
host: logger.host,
|
93
|
-
time:
|
92
|
+
time: h.delete(:time),
|
94
93
|
event: h
|
95
94
|
}
|
96
95
|
message[:source_type] = source_type if source_type
|
@@ -167,7 +167,7 @@ module SemanticLogger
|
|
167
167
|
::Syslog.open(application, options, facility)
|
168
168
|
when :tcp
|
169
169
|
# Use the local logger for @remote_syslog so errors with the remote logger can be recorded locally.
|
170
|
-
@tcp_client_options[:logger] =
|
170
|
+
@tcp_client_options[:logger] = logger
|
171
171
|
@tcp_client_options[:server] = "#{@server}:#{@port}"
|
172
172
|
@remote_syslog = Net::TCPClient.new(@tcp_client_options)
|
173
173
|
when :udp
|
@@ -179,8 +179,6 @@ module SemanticLogger
|
|
179
179
|
|
180
180
|
# Write the log using the specified protocol and server.
|
181
181
|
def log(log)
|
182
|
-
return false unless should_log?(log)
|
183
|
-
|
184
182
|
case @protocol
|
185
183
|
when :syslog
|
186
184
|
# Since the Ruby Syslog API supports sprintf format strings, double up all existing '%'
|
@@ -188,12 +188,12 @@ module SemanticLogger
|
|
188
188
|
on_connect: nil, proxy_server: nil, ssl: nil,
|
189
189
|
level: nil, formatter: nil, filter: nil, application: nil, host: nil, &block
|
190
190
|
)
|
191
|
-
@separator
|
192
|
-
@options
|
193
|
-
server:
|
194
|
-
servers:
|
195
|
-
policy:
|
196
|
-
buffered:
|
191
|
+
@separator = separator
|
192
|
+
@options = {
|
193
|
+
server: server,
|
194
|
+
servers: servers,
|
195
|
+
policy: policy,
|
196
|
+
buffered: buffered,
|
197
197
|
#keepalive: keepalive,
|
198
198
|
connect_timeout: connect_timeout,
|
199
199
|
read_timeout: read_timeout,
|
@@ -208,7 +208,7 @@ module SemanticLogger
|
|
208
208
|
}
|
209
209
|
|
210
210
|
# Use the internal logger so that errors with remote logging are only written locally.
|
211
|
-
Net::TCPClient.logger =
|
211
|
+
Net::TCPClient.logger = logger
|
212
212
|
Net::TCPClient.logger.name = 'Net::TCPClient'
|
213
213
|
|
214
214
|
super(level: level, formatter: formatter, filter: filter, application: application, host: host, &block)
|
@@ -223,8 +223,6 @@ module SemanticLogger
|
|
223
223
|
|
224
224
|
# Write the log using the specified protocol and server.
|
225
225
|
def log(log)
|
226
|
-
return false unless should_log?(log)
|
227
|
-
|
228
226
|
message = formatter.call(log, self)
|
229
227
|
@tcp_client.retry_on_connection_failure do
|
230
228
|
@tcp_client.write("#{message}#{separator}")
|
@@ -54,8 +54,6 @@ module SemanticLogger
|
|
54
54
|
# trace entries are mapped to debug since :trace is not supported by the
|
55
55
|
# Ruby or Rails Loggers
|
56
56
|
def log(log)
|
57
|
-
return false unless should_log?(log)
|
58
|
-
|
59
57
|
@logger.send(log.level == :trace ? :debug : log.level, formatter.call(log, self))
|
60
58
|
true
|
61
59
|
end
|
data/lib/semantic_logger/base.rb
CHANGED
@@ -124,8 +124,16 @@ module SemanticLogger
|
|
124
124
|
alias_method :benchmark, :measure
|
125
125
|
|
126
126
|
# Log a thread backtrace
|
127
|
-
def backtrace(thread: Thread.current,
|
128
|
-
|
127
|
+
def backtrace(thread: Thread.current,
|
128
|
+
level: :warn,
|
129
|
+
message: 'Backtrace:',
|
130
|
+
payload: nil,
|
131
|
+
metric: nil,
|
132
|
+
metric_amount: nil)
|
133
|
+
|
134
|
+
log = Log.new(name, level)
|
135
|
+
return false unless meets_log_level?(log)
|
136
|
+
|
129
137
|
backtrace =
|
130
138
|
if thread == Thread.current
|
131
139
|
Log.cleanse_backtrace
|
@@ -142,8 +150,10 @@ module SemanticLogger
|
|
142
150
|
message << backtrace.join("\n")
|
143
151
|
end
|
144
152
|
|
145
|
-
if log.assign(message: message, backtrace: backtrace, payload: payload, metric: metric, metric_amount: metric_amount) &&
|
153
|
+
if log.assign(message: message, backtrace: backtrace, payload: payload, metric: metric, metric_amount: metric_amount) && !filtered?(log)
|
146
154
|
self.log(log)
|
155
|
+
else
|
156
|
+
false
|
147
157
|
end
|
148
158
|
end
|
149
159
|
|
@@ -232,13 +242,16 @@ module SemanticLogger
|
|
232
242
|
SemanticLogger.named_tags
|
233
243
|
end
|
234
244
|
|
235
|
-
protected
|
236
|
-
|
237
245
|
# Write log data to underlying data storage
|
238
246
|
def log(log_)
|
239
247
|
raise NotImplementedError.new('Logging Appender must implement #log(log)')
|
240
248
|
end
|
241
249
|
|
250
|
+
# Whether this log entry meets the criteria to be logged by this appender.
|
251
|
+
def should_log?(log)
|
252
|
+
meets_log_level?(log) && !filtered?(log)
|
253
|
+
end
|
254
|
+
|
242
255
|
private
|
243
256
|
|
244
257
|
# Initializer for Abstract Class SemanticLogger::Base
|
@@ -281,20 +294,21 @@ module SemanticLogger
|
|
281
294
|
end
|
282
295
|
|
283
296
|
# Whether to log the supplied message based on the current filter if any
|
284
|
-
def
|
285
|
-
return
|
297
|
+
def filtered?(log)
|
298
|
+
return false if @filter.nil?
|
286
299
|
|
287
300
|
if @filter.is_a?(Regexp)
|
288
|
-
(@filter =~ log.name)
|
301
|
+
(@filter =~ log.name) == nil
|
289
302
|
elsif @filter.is_a?(Proc)
|
290
|
-
@filter.call(log)
|
303
|
+
@filter.call(log) != true
|
304
|
+
else
|
305
|
+
raise(ArgumentError, "Unrecognized semantic logger filter: #{@filter.inspect}, must be a Regexp or a Proc")
|
291
306
|
end
|
292
307
|
end
|
293
308
|
|
294
|
-
#
|
295
|
-
def
|
296
|
-
|
297
|
-
(level_index <= (log.level_index || 0)) && include_message?(log)
|
309
|
+
# Ensure minimum log level is met
|
310
|
+
def meets_log_level?(log)
|
311
|
+
(level_index <= (log.level_index || 0))
|
298
312
|
end
|
299
313
|
|
300
314
|
# Log message at the specified level
|
@@ -302,12 +316,18 @@ module SemanticLogger
|
|
302
316
|
log = Log.new(name, level, index)
|
303
317
|
should_log =
|
304
318
|
if payload.nil? && exception.nil? && message.is_a?(Hash)
|
305
|
-
|
319
|
+
# Check if someone just logged a hash payload instead of meaning to call semantic logger
|
320
|
+
if message.has_key?(:message) || message.has_key?(:payload) || message.has_key?(:exception) || message.has_key?(:metric)
|
321
|
+
log.assign(message)
|
322
|
+
else
|
323
|
+
log.assign_positional(nil, message, nil, &block)
|
324
|
+
end
|
306
325
|
else
|
307
326
|
log.assign_positional(message, payload, exception, &block)
|
308
327
|
end
|
309
328
|
|
310
|
-
|
329
|
+
# Log level may change during assign due to :on_exception_level
|
330
|
+
self.log(log) if should_log && should_log?(log)
|
311
331
|
end
|
312
332
|
|
313
333
|
# Measure the supplied block and log the message
|
@@ -346,26 +366,63 @@ module SemanticLogger
|
|
346
366
|
end
|
347
367
|
|
348
368
|
# Extract options after block completes so that block can modify any of the options
|
349
|
-
payload
|
369
|
+
payload = params[:payload]
|
350
370
|
|
371
|
+
# May return false due to elastic logging
|
351
372
|
should_log = log.assign(
|
352
373
|
message: message,
|
353
374
|
payload: payload,
|
354
375
|
min_duration: params[:min_duration] || 0.0,
|
355
376
|
exception: exception,
|
356
377
|
metric: params[:metric],
|
357
|
-
metric_amount:
|
378
|
+
metric_amount: params[:metric_amount],
|
358
379
|
duration: duration,
|
359
|
-
backtrace: nil,
|
360
380
|
log_exception: params[:log_exception] || :partial,
|
361
381
|
on_exception_level: params[:on_exception_level]
|
362
382
|
)
|
363
383
|
|
364
|
-
|
384
|
+
# Log level may change during assign due to :on_exception_level
|
385
|
+
self.log(log) if should_log && should_log?(log)
|
365
386
|
raise exception if exception
|
366
387
|
result
|
367
388
|
end
|
368
389
|
end
|
369
390
|
|
391
|
+
# For measuring methods and logging their duration.
|
392
|
+
def measure_method(index:,
|
393
|
+
level:,
|
394
|
+
message:,
|
395
|
+
min_duration:,
|
396
|
+
metric:,
|
397
|
+
log_exception:,
|
398
|
+
on_exception_level:,
|
399
|
+
&block)
|
400
|
+
|
401
|
+
# Ignores filter, silence, payload
|
402
|
+
exception = nil
|
403
|
+
start = Time.now
|
404
|
+
begin
|
405
|
+
yield
|
406
|
+
rescue Exception => exc
|
407
|
+
exception = exc
|
408
|
+
ensure
|
409
|
+
log = Log.new(name, level, index)
|
410
|
+
# May return false due to elastic logging
|
411
|
+
should_log = log.assign(
|
412
|
+
message: message,
|
413
|
+
min_duration: min_duration,
|
414
|
+
exception: exception,
|
415
|
+
metric: metric,
|
416
|
+
duration: 1000.0 * (Time.now - start),
|
417
|
+
log_exception: log_exception,
|
418
|
+
on_exception_level: on_exception_level
|
419
|
+
)
|
420
|
+
|
421
|
+
# Log level may change during assign due to :on_exception_level
|
422
|
+
log(log) if should_log && should_log?(log)
|
423
|
+
raise exception if exception
|
424
|
+
end
|
425
|
+
end
|
426
|
+
|
370
427
|
end
|
371
428
|
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
module SemanticLogger
|
2
|
+
module Formatters
|
3
|
+
# @formatter:off
|
4
|
+
autoload :Base, 'semantic_logger/formatters/base'
|
5
|
+
autoload :Color, 'semantic_logger/formatters/color'
|
6
|
+
autoload :Default, 'semantic_logger/formatters/default'
|
7
|
+
autoload :Json, 'semantic_logger/formatters/json'
|
8
|
+
autoload :Raw, 'semantic_logger/formatters/raw'
|
9
|
+
autoload :OneLine, 'semantic_logger/formatters/one_line'
|
10
|
+
autoload :Signalfx, 'semantic_logger/formatters/signalfx'
|
11
|
+
autoload :Syslog, 'semantic_logger/formatters/syslog'
|
12
|
+
# @formatter:on
|
13
|
+
|
14
|
+
# Return formatter that responds to call.
|
15
|
+
#
|
16
|
+
# Supports formatter supplied as:
|
17
|
+
# - Symbol
|
18
|
+
# - Hash ( Symbol => { options })
|
19
|
+
# - Instance of any of SemanticLogger::Formatters
|
20
|
+
# - Proc
|
21
|
+
# - Any object that responds to :call
|
22
|
+
def self.factory(formatter)
|
23
|
+
case
|
24
|
+
when formatter.is_a?(Symbol)
|
25
|
+
SemanticLogger::Utils.constantize_symbol(formatter, 'SemanticLogger::Formatters').new
|
26
|
+
when formatter.is_a?(Hash) && formatter.size > 0
|
27
|
+
fmt, options = formatter.first
|
28
|
+
SemanticLogger::Utils.constantize_symbol(fmt.to_sym, 'SemanticLogger::Formatters').new(options)
|
29
|
+
when formatter.respond_to?(:call)
|
30
|
+
formatter
|
31
|
+
else
|
32
|
+
raise(ArgumentError, "Unknown formatter: #{formatter.inspect}")
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
end
|
37
|
+
end
|
@@ -9,7 +9,7 @@ module SemanticLogger
|
|
9
9
|
PRECISION =
|
10
10
|
if defined?(JRuby)
|
11
11
|
if (JRUBY_VERSION.to_f >= 9.1)
|
12
|
-
maint = JRUBY_VERSION.match(/\A\d
|
12
|
+
maint = JRUBY_VERSION.match(/\A\d+\.\d+\.(\d+)\./)[1].to_i
|
13
13
|
(maint >= 8) || (JRUBY_VERSION.to_f > 9.1) ? 6 : 3
|
14
14
|
else
|
15
15
|
3
|
@@ -21,8 +21,9 @@ module SemanticLogger
|
|
21
21
|
|
22
22
|
# Parameters
|
23
23
|
# time_format: [String|Symbol|nil]
|
24
|
-
# See Time#strftime for the format of this string
|
25
|
-
# :iso_8601 Outputs an ISO8601 Formatted timestamp
|
24
|
+
# See Time#strftime for the format of this string.
|
25
|
+
# :iso_8601 Outputs an ISO8601 Formatted timestamp.
|
26
|
+
# :ms Output in miliseconds since epoch.
|
26
27
|
# nil: Returns Empty string for time ( no time is output ).
|
27
28
|
# Default: '%Y-%m-%d %H:%M:%S.%6N'
|
28
29
|
def initialize(time_format: TIME_FORMAT, log_host: true, log_application: true)
|
@@ -43,6 +44,12 @@ module SemanticLogger
|
|
43
44
|
case time_format
|
44
45
|
when :iso_8601
|
45
46
|
time.utc.iso8601(PRECISION)
|
47
|
+
when :ms
|
48
|
+
(time.to_f * 1_000).to_i
|
49
|
+
when :none
|
50
|
+
time
|
51
|
+
when :seconds
|
52
|
+
time.to_f
|
46
53
|
when nil
|
47
54
|
''
|
48
55
|
else
|
@@ -3,12 +3,8 @@ module SemanticLogger
|
|
3
3
|
module Formatters
|
4
4
|
class Json < Raw
|
5
5
|
# Default JSON time format is ISO8601
|
6
|
-
def initialize(time_format: :iso_8601, log_host: true, log_application: true)
|
7
|
-
super(time_format: time_format, log_host: log_host, log_application: log_application)
|
8
|
-
end
|
9
|
-
|
10
|
-
def time
|
11
|
-
hash[:timestamp] = format_time(log.time)
|
6
|
+
def initialize(time_format: :iso_8601, log_host: true, log_application: true, time_key: :timestamp)
|
7
|
+
super(time_format: time_format, log_host: log_host, log_application: log_application, time_key: time_key)
|
12
8
|
end
|
13
9
|
|
14
10
|
# Returns log messages in JSON format
|
@@ -0,0 +1,18 @@
|
|
1
|
+
module SemanticLogger
|
2
|
+
module Formatters
|
3
|
+
# Only output one line for each log entry.
|
4
|
+
#
|
5
|
+
# Notes:
|
6
|
+
# * New lines are stripped from log messages.
|
7
|
+
# * Exceptions only include the class and message, the stack trace is not shown.
|
8
|
+
class OneLine < Default
|
9
|
+
def message
|
10
|
+
"-- #{log.message.gsub("\n", '')}" if log.message
|
11
|
+
end
|
12
|
+
|
13
|
+
def exception
|
14
|
+
"-- Exception: #{log.exception.class}: #{log.exception.message.gsub("\n", '')}" if log.exception
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
@@ -4,7 +4,13 @@ module SemanticLogger
|
|
4
4
|
class Raw < Base
|
5
5
|
|
6
6
|
# Fields are added by populating this hash.
|
7
|
-
attr_accessor :hash, :log, :logger
|
7
|
+
attr_accessor :hash, :log, :logger, :time_key
|
8
|
+
|
9
|
+
# By default Raw formatter does not reformat the time
|
10
|
+
def initialize(time_format: :none, log_host: true, log_application: true, time_key: :time)
|
11
|
+
@time_key = time_key
|
12
|
+
super(time_format: time_format, log_host: log_host, log_application: log_application)
|
13
|
+
end
|
8
14
|
|
9
15
|
# Host name
|
10
16
|
def host
|
@@ -18,7 +24,7 @@ module SemanticLogger
|
|
18
24
|
|
19
25
|
# Date & time
|
20
26
|
def time
|
21
|
-
hash[
|
27
|
+
hash[time_key] = format_time(log.time)
|
22
28
|
end
|
23
29
|
|
24
30
|
# Log level
|
@@ -0,0 +1,169 @@
|
|
1
|
+
require 'json'
|
2
|
+
module SemanticLogger
|
3
|
+
module Formatters
|
4
|
+
class Signalfx < Base
|
5
|
+
attr_accessor :token, :dimensions, :hash, :log, :logger, :gauge_name, :counter_name, :environment
|
6
|
+
|
7
|
+
def initialize(token:,
|
8
|
+
dimensions: nil,
|
9
|
+
log_host: true,
|
10
|
+
log_application: true,
|
11
|
+
gauge_name: 'Application.average',
|
12
|
+
counter_name: 'Application.counter',
|
13
|
+
environment: true)
|
14
|
+
|
15
|
+
@token = token
|
16
|
+
@dimensions = dimensions.map(&:to_sym) if dimensions
|
17
|
+
@gauge_name = gauge_name
|
18
|
+
@counter_name = counter_name
|
19
|
+
|
20
|
+
if environment == true
|
21
|
+
@environment = defined?(Rails) ? Rails.env : ENV['RAILS_ENV'] || ENV['RACK_ENV'] || 'development'
|
22
|
+
elsif environment
|
23
|
+
@environment = environment
|
24
|
+
end
|
25
|
+
|
26
|
+
super(time_format: :ms, log_host: log_host, log_application: log_application)
|
27
|
+
end
|
28
|
+
|
29
|
+
# Create SignalFx friendly metric.
|
30
|
+
# Strip leading '/'
|
31
|
+
# Convert remaining '/' to '.'
|
32
|
+
def metric
|
33
|
+
if log.dimensions
|
34
|
+
name = log.metric.to_s.sub(/\A\/+/, '')
|
35
|
+
name.gsub!('/', '.')
|
36
|
+
hash[:metric] = name
|
37
|
+
else
|
38
|
+
# Extract class and action from metric name
|
39
|
+
name = log.metric.to_s.sub(/\A\/+/, '')
|
40
|
+
names = name.split('/')
|
41
|
+
h = (hash[:dimensions] ||= {})
|
42
|
+
if names.size > 1
|
43
|
+
h[:action] = names.pop
|
44
|
+
h[:class] = names.join('::')
|
45
|
+
else
|
46
|
+
h[:class] = 'Unknown'
|
47
|
+
h[:action] = names.first || log.metric
|
48
|
+
end
|
49
|
+
|
50
|
+
hash[:metric] = log.duration ? gauge_name : counter_name
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Date & time
|
55
|
+
def time
|
56
|
+
# 1 second resolution, represented as ms.
|
57
|
+
hash[:timestamp] = log.time.to_i * 1000
|
58
|
+
end
|
59
|
+
|
60
|
+
# Value of this metric
|
61
|
+
def value
|
62
|
+
hash[:value] = log.metric_amount || log.duration || 1
|
63
|
+
end
|
64
|
+
|
65
|
+
# Dimensions for this metric
|
66
|
+
def format_dimensions
|
67
|
+
h = (hash[:dimensions] ||= {})
|
68
|
+
if log.dimensions
|
69
|
+
log.dimensions.each_pair do |name, value|
|
70
|
+
value = value.to_s
|
71
|
+
h[name] = value unless value.empty?
|
72
|
+
end
|
73
|
+
else
|
74
|
+
log.named_tags.each_pair do |name, value|
|
75
|
+
name = name.to_sym
|
76
|
+
value = value.to_s
|
77
|
+
next if value.empty?
|
78
|
+
h[name] = value if dimensions && dimensions.include?(name)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
h[:host] = logger.host if log_host && logger.host
|
82
|
+
h[:application] = logger.application if log_application && logger.application
|
83
|
+
h[:environment] = environment if environment
|
84
|
+
end
|
85
|
+
|
86
|
+
# Returns [Hash] log message in Signalfx format.
|
87
|
+
def call(log, logger)
|
88
|
+
self.hash = {}
|
89
|
+
self.log = log
|
90
|
+
self.logger = logger
|
91
|
+
|
92
|
+
metric; time; value; format_dimensions
|
93
|
+
|
94
|
+
# gauge, counter, or cumulative_counter
|
95
|
+
data = {}
|
96
|
+
if log.duration
|
97
|
+
data[:gauge] = [hash]
|
98
|
+
# Also send a count metric whenever it is a gauge so that it can be counted.
|
99
|
+
unless log.dimensions
|
100
|
+
count_hash = hash.dup
|
101
|
+
count_hash[:value] = log.metric_amount || 1
|
102
|
+
count_hash[:metric] = counter_name
|
103
|
+
data[:counter] = [count_hash]
|
104
|
+
end
|
105
|
+
else
|
106
|
+
data[:counter] = [hash]
|
107
|
+
end
|
108
|
+
|
109
|
+
data.to_json
|
110
|
+
end
|
111
|
+
|
112
|
+
# Returns [Hash] a batch of log messages.
|
113
|
+
# Signalfx has a minimum resolution of 1 second.
|
114
|
+
# Metrics of the same type, time (second), and dimensions can be aggregated together.
|
115
|
+
def batch(logs, logger)
|
116
|
+
self.logger = logger
|
117
|
+
|
118
|
+
data = {}
|
119
|
+
logs.each do |log|
|
120
|
+
self.hash = {}
|
121
|
+
self.log = log
|
122
|
+
|
123
|
+
metric; time; value; format_dimensions
|
124
|
+
|
125
|
+
if log.duration
|
126
|
+
gauges = (data[:gauge] ||= [])
|
127
|
+
add_gauge(gauges, hash)
|
128
|
+
|
129
|
+
# Also send a count metric whenever it is a gauge so that it can be counted.
|
130
|
+
unless log.dimensions
|
131
|
+
count_hash = hash.dup
|
132
|
+
count_hash[:value] = log.metric_amount || 1
|
133
|
+
count_hash[:metric] = counter_name
|
134
|
+
counters = (data[:counter] ||= [])
|
135
|
+
add_counter(counters, count_hash)
|
136
|
+
end
|
137
|
+
else
|
138
|
+
counters = (data[:counter] ||= [])
|
139
|
+
add_counter(counters, hash)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
data.to_json
|
144
|
+
end
|
145
|
+
|
146
|
+
private
|
147
|
+
|
148
|
+
def add_gauge(gauges, metric)
|
149
|
+
gauges << metric
|
150
|
+
end
|
151
|
+
|
152
|
+
# Sum counters with the same time (second), name, and dimensions.
|
153
|
+
def add_counter(counters, metric)
|
154
|
+
existing = find_match(counters, metric)
|
155
|
+
existing ? existing[:value] += metric[:value] : counters << metric
|
156
|
+
end
|
157
|
+
|
158
|
+
# Find Metrics with the same timestamp, metric name, and dimensions.
|
159
|
+
def find_match(list, metric)
|
160
|
+
list.find do |item|
|
161
|
+
(item[:timestamp] == metric[:timestamp]) &&
|
162
|
+
(item[:metric] == metric[:metric]) &&
|
163
|
+
(item[:dimensions] == metric[:dimensions])
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|