semantic_logger 4.3.1 → 4.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/lib/semantic_logger.rb +7 -1
  3. data/lib/semantic_logger/appender.rb +3 -0
  4. data/lib/semantic_logger/appender/async.rb +29 -10
  5. data/lib/semantic_logger/appender/rabbitmq.rb +120 -0
  6. data/lib/semantic_logger/appenders.rb +89 -0
  7. data/lib/semantic_logger/base.rb +3 -3
  8. data/lib/semantic_logger/concerns/compatibility.rb +2 -2
  9. data/lib/semantic_logger/formatters.rb +1 -0
  10. data/lib/semantic_logger/formatters/base.rb +28 -6
  11. data/lib/semantic_logger/formatters/color.rb +4 -3
  12. data/lib/semantic_logger/formatters/fluentd.rb +37 -0
  13. data/lib/semantic_logger/formatters/json.rb +4 -2
  14. data/lib/semantic_logger/formatters/raw.rb +2 -2
  15. data/lib/semantic_logger/formatters/signalfx.rb +4 -3
  16. data/lib/semantic_logger/levels.rb +38 -0
  17. data/lib/semantic_logger/log.rb +11 -6
  18. data/lib/semantic_logger/loggable.rb +1 -1
  19. data/lib/semantic_logger/logger.rb +43 -1
  20. data/lib/semantic_logger/processor.rb +10 -130
  21. data/lib/semantic_logger/reporters/minitest.rb +49 -0
  22. data/lib/semantic_logger/semantic_logger.rb +40 -75
  23. data/lib/semantic_logger/version.rb +1 -1
  24. metadata +9 -81
  25. data/test/appender/async_batch_test.rb +0 -60
  26. data/test/appender/async_test.rb +0 -44
  27. data/test/appender/bugsnag_test.rb +0 -81
  28. data/test/appender/elasticsearch_http_test.rb +0 -74
  29. data/test/appender/elasticsearch_test.rb +0 -248
  30. data/test/appender/file_test.rb +0 -120
  31. data/test/appender/graylog_test.rb +0 -82
  32. data/test/appender/honeybadger_test.rb +0 -45
  33. data/test/appender/http_test.rb +0 -63
  34. data/test/appender/kafka_test.rb +0 -35
  35. data/test/appender/mongodb_test.rb +0 -104
  36. data/test/appender/new_relic_test.rb +0 -80
  37. data/test/appender/newrelic_rpm.rb +0 -14
  38. data/test/appender/sentry_test.rb +0 -47
  39. data/test/appender/splunk_http_test.rb +0 -79
  40. data/test/appender/splunk_test.rb +0 -83
  41. data/test/appender/syslog_test.rb +0 -61
  42. data/test/appender/tcp_test.rb +0 -66
  43. data/test/appender/udp_test.rb +0 -59
  44. data/test/appender/wrapper_test.rb +0 -95
  45. data/test/concerns/compatibility_test.rb +0 -117
  46. data/test/debug_as_trace_logger_test.rb +0 -81
  47. data/test/formatters/color_test.rb +0 -153
  48. data/test/formatters/default_test.rb +0 -175
  49. data/test/formatters/one_line_test.rb +0 -60
  50. data/test/formatters/signalfx_test.rb +0 -197
  51. data/test/formatters_test.rb +0 -36
  52. data/test/in_memory_appender.rb +0 -8
  53. data/test/in_memory_appender_helper.rb +0 -43
  54. data/test/in_memory_batch_appender.rb +0 -8
  55. data/test/in_memory_metrics_appender.rb +0 -13
  56. data/test/loggable_test.rb +0 -103
  57. data/test/logger_test.rb +0 -334
  58. data/test/measure_test.rb +0 -346
  59. data/test/metric/new_relic_test.rb +0 -35
  60. data/test/metric/signalfx_test.rb +0 -77
  61. data/test/semantic_logger_test.rb +0 -303
  62. data/test/test_helper.rb +0 -31
@@ -60,12 +60,13 @@ module SemanticLogger
60
60
  # ColorMaps each of the log levels to a color
61
61
  def initialize(ap: {multiline: false},
62
62
  color_map: ColorMap.new,
63
- time_format: TIME_FORMAT,
63
+ time_format: nil,
64
64
  log_host: false,
65
- log_application: false)
65
+ log_application: false,
66
+ precision: PRECISION)
66
67
  @ai_options = ap
67
68
  @color_map = color_map.is_a?(ColorMap) ? color_map : ColorMap.new(color_map)
68
- super(time_format: time_format, log_host: log_host, log_application: log_application)
69
+ super(time_format: time_format, log_host: log_host, log_application: log_application, precision: precision)
69
70
  end
70
71
 
71
72
  def level
@@ -0,0 +1,37 @@
1
+ require 'json'
2
+
3
+ module SemanticLogger
4
+ module Formatters
5
+ # Fluentd is similar to SemanticLogger::Formatters::Json but with log level that are recongnized
6
+ # by kubernetes fluentd.
7
+ class Fluentd < Json
8
+ attr_reader :need_process_info
9
+
10
+ def initialize(log_host: true, log_application: true, need_process_info: false)
11
+ @need_process_info = need_process_info
12
+ super(log_host: log_host, log_application: log_application, time_key: 'time', time_format: :rfc_3339)
13
+ end
14
+
15
+ def severity
16
+ hash['severity'] = log.level
17
+ hash['severity_index'] = log.level_index
18
+ end
19
+
20
+ def process_info
21
+ # Ignore fields: pid, thread, file and line by default
22
+ super() if need_process_info
23
+ end
24
+
25
+ def call(log, logger)
26
+ self.hash = {}
27
+ self.log = log
28
+ self.logger = logger
29
+
30
+ host; application; time; severity; process_info; duration; tags; named_tags; name; message; payload; exception; metric
31
+ hash
32
+
33
+ hash.to_json
34
+ end
35
+ end
36
+ end
37
+ end
@@ -3,8 +3,10 @@ module SemanticLogger
3
3
  module Formatters
4
4
  class Json < Raw
5
5
  # Default JSON time format is ISO8601
6
- def initialize(time_format: :iso_8601, log_host: true, log_application: true, time_key: :timestamp)
7
- super(time_format: time_format, log_host: log_host, log_application: log_application, time_key: time_key)
6
+ def initialize(time_format: :iso_8601, log_host: true, log_application: true, time_key: :timestamp,
7
+ precision: PRECISION)
8
+ super(time_format: time_format, log_host: log_host, log_application: log_application, time_key: time_key,
9
+ precision: precision)
8
10
  end
9
11
 
10
12
  # Returns log messages in JSON format
@@ -6,9 +6,9 @@ module SemanticLogger
6
6
  attr_accessor :hash, :log, :logger, :time_key
7
7
 
8
8
  # By default Raw formatter does not reformat the time
9
- def initialize(time_format: :none, log_host: true, log_application: true, time_key: :time)
9
+ def initialize(time_format: :none, log_host: true, log_application: true, time_key: :time, precision: PRECISION)
10
10
  @time_key = time_key
11
- super(time_format: time_format, log_host: log_host, log_application: log_application)
11
+ super(time_format: time_format, log_host: log_host, log_application: log_application, precision: precision)
12
12
  end
13
13
 
14
14
  # Host name
@@ -10,7 +10,8 @@ module SemanticLogger
10
10
  log_application: true,
11
11
  gauge_name: 'Application.average',
12
12
  counter_name: 'Application.counter',
13
- environment: true)
13
+ environment: true,
14
+ precision: PRECISION)
14
15
 
15
16
  @token = token
16
17
  @dimensions = dimensions.map(&:to_sym) if dimensions
@@ -18,12 +19,12 @@ module SemanticLogger
18
19
  @counter_name = counter_name
19
20
 
20
21
  if environment == true
21
- @environment = defined?(Rails) ? Rails.env : ENV['RAILS_ENV'] || ENV['RACK_ENV'] || 'development'
22
+ @environment = defined?(Rails) && Rails.respond_to?(:env) ? Rails.env : ENV['RAILS_ENV'] || ENV['RACK_ENV'] || 'development'
22
23
  elsif environment
23
24
  @environment = environment
24
25
  end
25
26
 
26
- super(time_format: :ms, log_host: log_host, log_application: log_application)
27
+ super(time_format: :ms, log_host: log_host, log_application: log_application, precision: precision)
27
28
  end
28
29
 
29
30
  # Create SignalFx friendly metric.
@@ -0,0 +1,38 @@
1
+ module SemanticLogger
2
+ module Levels
3
+ # Logging levels in order of most detailed to most severe
4
+ LEVELS = %i[trace debug info warn error fatal].freeze
5
+
6
+ # Internal method to return the log level as an internal index
7
+ # Also supports mapping the ::Logger levels to SemanticLogger levels
8
+ def self.index(level)
9
+ return if level.nil?
10
+
11
+ index =
12
+ if level.is_a?(Symbol)
13
+ LEVELS.index(level)
14
+ elsif level.is_a?(String)
15
+ level = level.downcase.to_sym
16
+ LEVELS.index(level)
17
+ elsif level.is_a?(Integer) && defined?(::Logger::Severity)
18
+ # Mapping of Rails and Ruby Logger levels to SemanticLogger levels
19
+ @map_levels ||= begin
20
+ levels = []
21
+ ::Logger::Severity.constants.each do |constant|
22
+ levels[::Logger::Severity.const_get(constant)] =
23
+ LEVELS.find_index(constant.downcase.to_sym) || LEVELS.find_index(:error)
24
+ end
25
+ levels
26
+ end
27
+ @map_levels[level]
28
+ end
29
+ raise "Invalid level:#{level.inspect} being requested. Must be one of #{LEVELS.inspect}" unless index
30
+ index
31
+ end
32
+
33
+ # Returns the symbolic level for the supplied level index
34
+ def self.level(level_index)
35
+ LEVELS[level_index]
36
+ end
37
+ end
38
+ end
@@ -59,7 +59,7 @@ module SemanticLogger
59
59
  @time = Time.now
60
60
  @tags = SemanticLogger.tags
61
61
  @named_tags = SemanticLogger.named_tags
62
- @level_index = index.nil? ? SemanticLogger.level_to_index(level) : index
62
+ @level_index = index.nil? ? Levels.index(level) : index
63
63
  end
64
64
 
65
65
  # Assign named arguments to this log entry, supplying defaults where applicable
@@ -87,7 +87,12 @@ module SemanticLogger
87
87
  end
88
88
 
89
89
  self.message = message
90
- self.payload = payload
90
+ if payload && payload.is_a?(Hash)
91
+ self.payload = payload
92
+ elsif payload
93
+ self.message = message.nil? ? payload.to_s : "#{message} -- #{payload}"
94
+ self.payload = nil
95
+ end
91
96
 
92
97
  if exception
93
98
  case log_exception
@@ -104,7 +109,7 @@ module SemanticLogger
104
109
  # On exception change the log level
105
110
  if on_exception_level
106
111
  self.level = on_exception_level
107
- self.level_index = SemanticLogger.level_to_index(level)
112
+ self.level_index = Levels.index(level)
108
113
  end
109
114
  end
110
115
 
@@ -120,7 +125,6 @@ module SemanticLogger
120
125
  self.dimensions = dimensions
121
126
  end
122
127
 
123
- self.payload = payload if payload&.size&.positive?
124
128
  true
125
129
  end
126
130
 
@@ -140,8 +144,9 @@ module SemanticLogger
140
144
  elsif exception.nil? && payload && payload.respond_to?(:backtrace) && payload.respond_to?(:message)
141
145
  exception = payload
142
146
  payload = nil
143
- elsif payload.is_a?(String)
147
+ elsif payload && !payload.is_a?(Hash)
144
148
  message = message.nil? ? payload : "#{message} -- #{payload}"
149
+ payload = nil
145
150
  end
146
151
 
147
152
  # Add result of block as message or payload if not nil
@@ -279,7 +284,7 @@ module SemanticLogger
279
284
 
280
285
  # DEPRECATED
281
286
  def formatted_time
282
- time.strftime(Formatters::Base::TIME_FORMAT)
287
+ time.strftime(Formatters::Base.build_time_format)
283
288
  end
284
289
 
285
290
  DeprecatedLogger = Struct.new(:host, :application)
@@ -82,7 +82,7 @@ module SemanticLogger
82
82
  # return false
83
83
  # end
84
84
 
85
- index = SemanticLogger.level_to_index(level)
85
+ index = Levels.index(level)
86
86
 
87
87
  logger_measure_module.module_eval(<<~MEASURE_METHOD, __FILE__, __LINE__ + 1)
88
88
  def #{method_name}(*args, &block)
@@ -5,6 +5,25 @@ module SemanticLogger
5
5
  class Logger < Base
6
6
  include SemanticLogger::Concerns::Compatibility
7
7
 
8
+ def self.subscribe(object = nil, &block)
9
+ subscriber = block || object
10
+
11
+ unless subscriber.is_a?(Proc) || subscriber.respond_to?(:call)
12
+ raise('When supplying an on_log subscriber, it must support the #call method')
13
+ end
14
+
15
+ subscribers = (@subscribers ||= Concurrent::Array.new)
16
+ subscribers << subscriber unless subscribers.include?(subscriber)
17
+ end
18
+
19
+ class << self
20
+ attr_reader :subscribers
21
+ end
22
+
23
+ def self.processor
24
+ @processor
25
+ end
26
+
8
27
  # Returns a Logger instance
9
28
  #
10
29
  # Return the logger for a specific class, supports class specific log levels
@@ -32,10 +51,33 @@ module SemanticLogger
32
51
 
33
52
  # Place log request on the queue for the Appender thread to write to each
34
53
  # appender in the order that they were registered
54
+ #
55
+ # Subscribers are called inline before handing off to the queue so that
56
+ # they can capture additional context information as needed.
35
57
  def log(log, message = nil, progname = nil, &block)
36
58
  # Compatibility with ::Logger
37
59
  return add(log, message, progname, &block) unless log.is_a?(SemanticLogger::Log)
38
- Processor << log
60
+
61
+ Logger.call_subscribers(log)
62
+
63
+ Logger.processor.log(log)
64
+ end
65
+
66
+ private
67
+
68
+ @processor = Processor.new
69
+ @subscribers = nil
70
+
71
+ def self.call_subscribers(log)
72
+ return unless @subscribers
73
+
74
+ @subscribers.each do |subscriber|
75
+ begin
76
+ subscriber.call(log)
77
+ rescue Exception => exc
78
+ self.class.processor.logger.error('Exception calling :on_log subscriber', exc)
79
+ end
80
+ end
39
81
  end
40
82
  end
41
83
  end
@@ -1,61 +1,6 @@
1
1
  module SemanticLogger
2
2
  # Thread that submits and processes log requests
3
- class Processor
4
- # Returns [Appender::Async => SemanticLogger::Processor] the global instance of this processor
5
- # wrapped in the Async proxy so that all logging is asynchronous in a thread of its own.
6
- #
7
- # More than one instance can be created if needed.
8
- def self.instance
9
- @processor
10
- end
11
-
12
- # Start the appender thread
13
- def self.start
14
- return false if instance.active?
15
- instance.thread
16
- true
17
- end
18
-
19
- # Returns true if the appender_thread is active
20
- def self.active?
21
- instance.alive?
22
- end
23
-
24
- # Returns [Integer] the number of log entries waiting to be written to the appenders.
25
- #
26
- # When this number grows it is because the logging appender thread is not
27
- # able to write to the appenders fast enough. Either reduce the amount of
28
- # logging, increase the log level, reduce the number of appenders, or
29
- # look into speeding up the appenders themselves
30
- def self.queue_size
31
- instance.queue.size
32
- end
33
-
34
- # Add log request to the queue for processing.
35
- # Log subscribers are called inline before handing off to the queue.
36
- def self.<<(log)
37
- instance.appender.send(:call_log_subscribers, log)
38
- instance.log(log)
39
- end
40
-
41
- # Returns the check_interval which is the number of messages between checks
42
- # to determine if the appender thread is falling behind.
43
- def self.lag_check_interval
44
- instance.lag_check_interval
45
- end
46
-
47
- # Set the check_interval which is the number of messages between checks
48
- # to determine if the appender thread is falling behind.
49
- def self.lag_check_interval=(lag_check_interval)
50
- instance.lag_check_interval = lag_check_interval
51
- end
52
-
53
- # Returns the amount of time in seconds
54
- # to determine if the appender thread is falling behind.
55
- def self.lag_threshold_s
56
- instance.lag_threshold_s
57
- end
58
-
3
+ class Processor < Appender::Async
59
4
  # Allow the internal logger to be overridden from its default of STDERR
60
5
  # Can be replaced with another Ruby logger or Rails logger, but never to
61
6
  # SemanticLogger::Logger itself since it is for reporting problems
@@ -75,83 +20,18 @@ module SemanticLogger
75
20
  end
76
21
  end
77
22
 
78
- attr_accessor :logger, :log_subscribers
23
+ attr_reader :appenders
79
24
 
80
- def initialize
81
- @log_subscribers = nil
82
- @logger = self.class.logger.dup
83
- @logger.name = self.class.name
25
+ def initialize(max_queue_size: -1)
26
+ @appenders = Appenders.new(self.class.logger.dup)
27
+ super(appender: @appenders, max_queue_size: max_queue_size)
84
28
  end
85
29
 
86
- def on_log(object = nil, &block)
87
- subscriber = block || object
88
-
89
- unless subscriber.is_a?(Proc) || subscriber.respond_to?(:call)
90
- raise('When supplying an on_log subscriber, it must support the #call method')
91
- end
92
-
93
- subscribers = (@log_subscribers ||= Concurrent::Array.new)
94
- subscribers << subscriber unless subscribers.include?(subscriber)
95
- end
96
-
97
- def log(log)
98
- SemanticLogger.appenders.each do |appender|
99
- begin
100
- appender.log(log) if appender.should_log?(log)
101
- rescue Exception => exc
102
- logger.error "Appender thread: Failed to log to appender: #{appender.inspect}", exc
103
- end
104
- end
105
- end
106
-
107
- def flush
108
- SemanticLogger.appenders.each do |appender|
109
- begin
110
- logger.trace "Appender thread: Flushing appender: #{appender.name}"
111
- appender.flush
112
- rescue Exception => exc
113
- logger.error "Appender thread: Failed to flush appender: #{appender.inspect}", exc
114
- end
115
- end
116
- logger.trace 'Appender thread: All appenders flushed'
117
- end
118
-
119
- def close
120
- SemanticLogger.appenders.each do |appender|
121
- begin
122
- logger.trace "Appender thread: Closing appender: #{appender.name}"
123
- appender.flush
124
- appender.close
125
- SemanticLogger.remove_appender(appender)
126
- rescue Exception => exc
127
- logger.error "Appender thread: Failed to close appender: #{appender.inspect}", exc
128
- end
129
- end
130
- logger.trace 'Appender thread: All appenders closed and removed from appender list'
131
- end
132
-
133
- private
134
-
135
- def self.create_instance
136
- SemanticLogger::Appender::Async.new(appender: new, max_queue_size: -1)
137
- end
138
-
139
- private_class_method :create_instance
140
-
141
- @processor = create_instance
142
-
143
- # Call on_log subscribers
144
- def call_log_subscribers(log)
145
- # If no subscribers registered, then return immediately
146
- return unless log_subscribers
147
-
148
- log_subscribers.each do |subscriber|
149
- begin
150
- subscriber.call(log)
151
- rescue Exception => exc
152
- logger.error 'Exception calling :on_log subscriber', exc
153
- end
154
- end
30
+ # Start the appender thread
31
+ def start
32
+ return false if active?
33
+ thread
34
+ true
155
35
  end
156
36
  end
157
37
  end
@@ -0,0 +1,49 @@
1
+ module SemanticLogger
2
+ module Reporters
3
+ # When using Minitest to run tests, log start and end messages for every test to the log file.
4
+ # On completion the time it took to run the test is also logged.
5
+ #
6
+ # For example, add the following lines to `test_helper.rb`:
7
+ # reporters = [
8
+ # Minitest::Reporters::ProgressReporter.new,
9
+ # SemanticLogger::Reporters::Minitest.new
10
+ # ]
11
+ # Minitest::Reporters.use!(reporters)
12
+ #
13
+ # Log entries similar to the following should show up in the log file:
14
+ #
15
+ # 2019-01-30 14:41:21.590383 I [9989:70268303433760] (9.958ms) Minitest -- Passed: test_0002_infinite timeout
16
+ # 2019-01-30 14:41:21.590951 I [9989:70268303433760] Minitest -- Started: test_0002_must return the servers in the supplied order
17
+ # 2019-01-30 14:41:21.592012 I [9989:70268303433760] (1.019ms) Minitest -- Passed: test_0002_must return the servers in the supplied order
18
+ # 2019-01-30 14:41:21.592054 I [9989:70268303433760] Minitest -- Started: test_0003_must handle an empty list of servers
19
+ # 2019-01-30 14:41:21.592094 I [9989:70268303433760] (0.014ms) Minitest -- Passed: test_0003_must handle an empty list of servers
20
+ # 2019-01-30 14:41:21.592118 I [9989:70268303433760] Minitest -- Started: test_0001_must return one server, once
21
+ # 2019-01-30 14:41:21.592510 I [9989:70268303433760] (0.361ms) Minitest -- Passed: test_0001_must return one server, once
22
+ #
23
+ # Note:
24
+ # - To use `Minitest::Reporters::ProgressReporter` the gem `minitest-reporters` is required, as well as the
25
+ # following line in `test_helper.rb`:
26
+ # `require 'minitest/reporters'`
27
+ class Minitest < ::Minitest::AbstractReporter
28
+ include SemanticLogger::Loggable
29
+
30
+ logger.name = 'Minitest'
31
+
32
+ attr_accessor :io
33
+
34
+ def before_test(test)
35
+ logger.info('START', name: test.name)
36
+ end
37
+
38
+ def after_test(test)
39
+ if test.error?
40
+ logger.benchmark_error('FAIL', payload: {name: test.name}, duration: test.time * 1_000, metric: 'minitest/fail')
41
+ elsif test.skipped?
42
+ logger.benchmark_warn('SKIP', payload: {name: test.name}, duration: test.time * 1_000, metric: 'minitest/skip')
43
+ else
44
+ logger.benchmark_info('PASS', payload: {name: test.name}, duration: test.time * 1_000, metric: 'minitest/pass')
45
+ end
46
+ end
47
+ end
48
+ end
49
+ end