semantic_logger 4.2.0 → 4.2.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (88) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +3 -3
  3. data/Rakefile +1 -1
  4. data/lib/semantic_logger/ansi_colors.rb +11 -12
  5. data/lib/semantic_logger/appender.rb +4 -5
  6. data/lib/semantic_logger/appender/async.rb +24 -16
  7. data/lib/semantic_logger/appender/async_batch.rb +1 -4
  8. data/lib/semantic_logger/appender/bugsnag.rb +67 -63
  9. data/lib/semantic_logger/appender/elasticsearch.rb +154 -157
  10. data/lib/semantic_logger/appender/elasticsearch_http.rb +59 -55
  11. data/lib/semantic_logger/appender/file.rb +1 -3
  12. data/lib/semantic_logger/appender/graylog.rb +114 -110
  13. data/lib/semantic_logger/appender/honeybadger.rb +54 -51
  14. data/lib/semantic_logger/appender/http.rb +194 -190
  15. data/lib/semantic_logger/appender/kafka.rb +152 -149
  16. data/lib/semantic_logger/appender/mongodb.rb +3 -3
  17. data/lib/semantic_logger/appender/new_relic.rb +52 -49
  18. data/lib/semantic_logger/appender/sentry.rb +59 -54
  19. data/lib/semantic_logger/appender/splunk.rb +108 -103
  20. data/lib/semantic_logger/appender/splunk_http.rb +82 -79
  21. data/lib/semantic_logger/appender/syslog.rb +4 -5
  22. data/lib/semantic_logger/appender/tcp.rb +8 -29
  23. data/lib/semantic_logger/appender/udp.rb +2 -3
  24. data/lib/semantic_logger/appender/wrapper.rb +2 -2
  25. data/lib/semantic_logger/base.rb +18 -16
  26. data/lib/semantic_logger/concerns/compatibility.rb +0 -1
  27. data/lib/semantic_logger/core_ext/thread.rb +0 -1
  28. data/lib/semantic_logger/formatters.rb +3 -5
  29. data/lib/semantic_logger/formatters/base.rb +2 -3
  30. data/lib/semantic_logger/formatters/color.rb +29 -12
  31. data/lib/semantic_logger/formatters/default.rb +10 -10
  32. data/lib/semantic_logger/formatters/json.rb +0 -2
  33. data/lib/semantic_logger/formatters/one_line.rb +2 -2
  34. data/lib/semantic_logger/formatters/raw.rb +7 -10
  35. data/lib/semantic_logger/formatters/signalfx.rb +3 -5
  36. data/lib/semantic_logger/formatters/syslog.rb +2 -3
  37. data/lib/semantic_logger/formatters/syslog_cee.rb +2 -3
  38. data/lib/semantic_logger/jruby/garbage_collection_logger.rb +8 -5
  39. data/lib/semantic_logger/log.rb +17 -17
  40. data/lib/semantic_logger/loggable.rb +6 -9
  41. data/lib/semantic_logger/logger.rb +0 -1
  42. data/lib/semantic_logger/metric/new_relic.rb +58 -55
  43. data/lib/semantic_logger/metric/signalfx.rb +108 -106
  44. data/lib/semantic_logger/metric/statsd.rb +2 -3
  45. data/lib/semantic_logger/processor.rb +9 -9
  46. data/lib/semantic_logger/semantic_logger.rb +50 -30
  47. data/lib/semantic_logger/subscriber.rb +0 -1
  48. data/lib/semantic_logger/utils.rb +37 -37
  49. data/lib/semantic_logger/version.rb +2 -2
  50. data/test/appender/async_batch_test.rb +0 -1
  51. data/test/appender/async_test.rb +0 -1
  52. data/test/appender/bugsnag_test.rb +7 -8
  53. data/test/appender/elasticsearch_http_test.rb +5 -6
  54. data/test/appender/elasticsearch_test.rb +14 -10
  55. data/test/appender/file_test.rb +5 -6
  56. data/test/appender/graylog_test.rb +8 -8
  57. data/test/appender/honeybadger_test.rb +6 -7
  58. data/test/appender/http_test.rb +4 -5
  59. data/test/appender/kafka_test.rb +5 -6
  60. data/test/appender/mongodb_test.rb +11 -13
  61. data/test/appender/new_relic_test.rb +8 -9
  62. data/test/appender/newrelic_rpm.rb +1 -1
  63. data/test/appender/sentry_test.rb +7 -8
  64. data/test/appender/splunk_http_test.rb +4 -4
  65. data/test/appender/splunk_test.rb +1 -3
  66. data/test/appender/syslog_test.rb +3 -5
  67. data/test/appender/tcp_test.rb +4 -5
  68. data/test/appender/udp_test.rb +4 -5
  69. data/test/appender/wrapper_test.rb +2 -3
  70. data/test/concerns/compatibility_test.rb +0 -1
  71. data/test/debug_as_trace_logger_test.rb +0 -1
  72. data/test/formatters/color_test.rb +5 -6
  73. data/test/formatters/default_test.rb +16 -17
  74. data/test/formatters/one_line_test.rb +1 -2
  75. data/test/formatters/signalfx_test.rb +8 -11
  76. data/test/formatters_test.rb +3 -3
  77. data/test/in_memory_appender.rb +0 -1
  78. data/test/in_memory_appender_helper.rb +1 -1
  79. data/test/in_memory_batch_appender.rb +0 -1
  80. data/test/in_memory_metrics_appender.rb +0 -1
  81. data/test/loggable_test.rb +2 -3
  82. data/test/logger_test.rb +11 -14
  83. data/test/measure_test.rb +13 -15
  84. data/test/metric/new_relic_test.rb +2 -3
  85. data/test/metric/signalfx_test.rb +4 -5
  86. data/test/semantic_logger_test.rb +28 -3
  87. data/test/test_helper.rb +6 -7
  88. metadata +34 -34
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: b67fae09283837194e489b469dace9f3b5103901
4
- data.tar.gz: fc112ededd82585ff40307585e65769ab92601ce
3
+ metadata.gz: 65aba46ee7052dc3653a61a40fad4d69aed81e97
4
+ data.tar.gz: 2c7b49cb33e90874a966b9cadbbadf99bc4f4291
5
5
  SHA512:
6
- metadata.gz: a548305f79d04f28a1c2989132eb148f29da9415a5a3bc06b7879d7d95cabe94627533ad56c50a925137f3dfeca1dec62c43707472ade54eb331fd9f764f91a4
7
- data.tar.gz: 7bf65721bfb19e865601ef929774877ab514607d86f150e86bb59be969cbecf080d2330e83c8ae2ba4aa2c44ea9b428773e5a791e281a142aeb20b4028036492
6
+ metadata.gz: a2ef7fb0d78bdb3ecac1ff6d8ef6cef1bb4797a7a0ac55b2cfab4f057d8f7c2c16519b37863a6fc1d8e728277765c678824d8234e3cfdf733386cf7f5ddeb073
7
+ data.tar.gz: e8df29aebb5e16b47fe87d530daf5f822c436ab18ddba47d682799ddb5c048041ce63841f1c6430987ca89e196f99853c48ecb38c8a9b2a0b65b8e3fbfae1d7a
data/README.md CHANGED
@@ -1,9 +1,9 @@
1
- # semantic_logger
1
+ # Semantic Logger
2
2
  [![Gem Version](https://img.shields.io/gem/v/semantic_logger.svg)](https://rubygems.org/gems/semantic_logger) [![Build Status](https://travis-ci.org/rocketjob/semantic_logger.svg?branch=master)](https://travis-ci.org/rocketjob/semantic_logger) [![Downloads](https://img.shields.io/gem/dt/semantic_logger.svg)](https://rubygems.org/gems/semantic_logger) [![License](https://img.shields.io/badge/license-Apache%202.0-brightgreen.svg)](http://opensource.org/licenses/Apache-2.0) ![](https://img.shields.io/badge/status-Production%20Ready-blue.svg) [![Gitter chat](https://img.shields.io/badge/IRC%20(gitter)-Support-brightgreen.svg)](https://gitter.im/rocketjob/support)
3
3
 
4
- Low latency, high throughput, enterprise-scale logging system for Ruby.
4
+ Semantic Logger is a feature rich logging framework, and replacement for existing Ruby & Rails loggers.
5
5
 
6
- * http://github.com/rocketjob/semantic_logger
6
+ * https://rocketjob.github.io/semantic_logger/
7
7
 
8
8
  ## Documentation
9
9
 
data/Rakefile CHANGED
@@ -1,7 +1,7 @@
1
1
  require 'rake/clean'
2
2
  require 'rake/testtask'
3
3
 
4
- $LOAD_PATH.unshift File.expand_path("../lib", __FILE__)
4
+ $LOAD_PATH.unshift File.expand_path('lib', __dir__)
5
5
  require 'semantic_logger/version'
6
6
 
7
7
  task :gem do
@@ -1,16 +1,16 @@
1
1
  module SemanticLogger
2
2
  # Formatting & colors used by optional color formatter
3
3
  module AnsiColors
4
- CLEAR = "\e[0m"
5
- BOLD = "\e[1m"
6
- BLACK = "\e[30m"
7
- RED = "\e[31m"
8
- GREEN = "\e[32m"
9
- YELLOW = "\e[33m"
10
- BLUE = "\e[34m"
11
- MAGENTA = "\e[35m"
12
- CYAN = "\e[36m"
13
- WHITE = "\e[37m"
4
+ CLEAR = "\e[0m".freeze
5
+ BOLD = "\e[1m".freeze
6
+ BLACK = "\e[30m".freeze
7
+ RED = "\e[31m".freeze
8
+ GREEN = "\e[32m".freeze
9
+ YELLOW = "\e[33m".freeze
10
+ BLUE = "\e[34m".freeze
11
+ MAGENTA = "\e[35m".freeze
12
+ CYAN = "\e[36m".freeze
13
+ WHITE = "\e[37m".freeze
14
14
 
15
15
  # DEPRECATED - NOT USED
16
16
  LEVEL_MAP = {
@@ -20,7 +20,6 @@ module SemanticLogger
20
20
  warn: BOLD,
21
21
  error: RED,
22
22
  fatal: RED
23
- }
23
+ }.freeze
24
24
  end
25
-
26
25
  end
@@ -61,15 +61,13 @@ module SemanticLogger
61
61
  end
62
62
  end
63
63
 
64
- private
65
-
66
- ASYNC_OPTION_KEYS = [:max_queue_size, :lag_threshold_s, :batch_size, :batch_seconds, :lag_check_interval]
64
+ ASYNC_OPTION_KEYS = %i[max_queue_size lag_threshold_s batch_size batch_seconds lag_check_interval].freeze
67
65
 
68
66
  # Returns [Subscriber] instance from the supplied options.
69
67
  def self.build(options, &block)
70
68
  if options[:io] || options[:file_name]
71
69
  SemanticLogger::Appender::File.new(options, &block)
72
- elsif appender = options.delete(:appender)
70
+ elsif (appender = options.delete(:appender))
73
71
  if appender.is_a?(Symbol)
74
72
  SemanticLogger::Utils.constantize_symbol(appender).new(options)
75
73
  elsif appender.is_a?(Subscriber)
@@ -77,7 +75,7 @@ module SemanticLogger
77
75
  else
78
76
  raise(ArgumentError, "Parameter :appender must be either a Symbol or an object derived from SemanticLogger::Subscriber, not: #{appender.inspect}")
79
77
  end
80
- elsif appender = options.delete(:metric)
78
+ elsif (appender = options.delete(:metric))
81
79
  if appender.is_a?(Symbol)
82
80
  SemanticLogger::Utils.constantize_symbol(appender, 'SemanticLogger::Metric').new(options)
83
81
  elsif appender.is_a?(Subscriber)
@@ -90,5 +88,6 @@ module SemanticLogger
90
88
  end
91
89
  end
92
90
 
91
+ private_class_method :build
93
92
  end
94
93
  end
@@ -22,9 +22,6 @@ module SemanticLogger
22
22
  # Appender proxy to allow an existing appender to run asynchronously in a separate thread.
23
23
  #
24
24
  # Parameters:
25
- # name: [String]
26
- # Name to use for the log thread and the log name when logging any errors from this appender.
27
- #
28
25
  # max_queue_size: [Integer]
29
26
  # The maximum number of log messages to hold on the queue before blocking attempts to add to the queue.
30
27
  # -1: The queue size is uncapped and will never block no matter how long the queue is.
@@ -38,7 +35,6 @@ module SemanticLogger
38
35
  # Number of messages to process before checking for slow logging.
39
36
  # Default: 1,000
40
37
  def initialize(appender:,
41
- name: appender.class.name,
42
38
  max_queue_size: 10_000,
43
39
  lag_check_interval: 1_000,
44
40
  lag_threshold_s: 30)
@@ -66,13 +62,13 @@ module SemanticLogger
66
62
  #
67
63
  # Starts the worker thread if not running.
68
64
  def thread
69
- return @thread if @thread && @thread.alive?
65
+ return @thread if @thread&.alive?
70
66
  @thread = Thread.new { process }
71
67
  end
72
68
 
73
69
  # Returns true if the worker thread is active
74
70
  def active?
75
- @thread && @thread.alive?
71
+ @thread&.alive?
76
72
  end
77
73
 
78
74
  # Add log message for processing.
@@ -99,26 +95,38 @@ module SemanticLogger
99
95
  # This thread is designed to never go down unless the main thread terminates
100
96
  # or the appender is closed.
101
97
  Thread.current.name = logger.name
102
- logger.trace "Async: Appender thread active"
98
+ logger.trace 'Async: Appender thread active'
103
99
  begin
104
100
  process_messages
105
101
  rescue StandardError => exception
106
102
  # This block may be called after the file handles have been released by Ruby
107
- logger.error('Async: Restarting due to exception', exception) rescue nil
103
+ begin
104
+ logger.error('Async: Restarting due to exception', exception)
105
+ rescue StandardError
106
+ nil
107
+ end
108
108
  retry
109
109
  rescue Exception => exception
110
110
  # This block may be called after the file handles have been released by Ruby
111
- logger.error('Async: Stopping due to fatal exception', exception) rescue nil
111
+ begin
112
+ logger.error('Async: Stopping due to fatal exception', exception)
113
+ rescue StandardError
114
+ nil
115
+ end
112
116
  ensure
113
117
  @thread = nil
114
118
  # This block may be called after the file handles have been released by Ruby
115
- logger.trace('Async: Thread has stopped') rescue nil
119
+ begin
120
+ logger.trace('Async: Thread has stopped')
121
+ rescue StandardError
122
+ nil
123
+ end
116
124
  end
117
125
  end
118
126
 
119
127
  def process_messages
120
128
  count = 0
121
- while message = queue.pop
129
+ while (message = queue.pop)
122
130
  if message.is_a?(Log)
123
131
  appender.log(message)
124
132
  count += 1
@@ -150,9 +158,10 @@ module SemanticLogger
150
158
  end
151
159
 
152
160
  def check_lag(log)
153
- if (diff = Time.now - log.time) > lag_threshold_s
154
- logger.warn "Async: Appender thread has fallen behind by #{diff} seconds with #{queue.size} messages queued up. Consider reducing the log level or changing the appenders"
155
- end
161
+ diff = Time.now - log.time
162
+ return unless diff > lag_threshold_s
163
+
164
+ logger.warn "Async: Appender thread has fallen behind by #{diff} seconds with #{queue.size} messages queued up. Consider reducing the log level or changing the appenders"
156
165
  end
157
166
 
158
167
  # Submit command and wait for reply
@@ -165,7 +174,7 @@ module SemanticLogger
165
174
  logger.warn msg
166
175
  elsif queue_size > 100
167
176
  logger.info msg
168
- elsif queue_size > 0
177
+ elsif queue_size.positive?
169
178
  logger.trace msg
170
179
  end
171
180
 
@@ -173,7 +182,6 @@ module SemanticLogger
173
182
  queue << {command: command, reply_queue: reply_queue}
174
183
  reply_queue.pop
175
184
  end
176
-
177
185
  end
178
186
  end
179
187
  end
@@ -28,7 +28,6 @@ module SemanticLogger
28
28
  # * `lag_check_interval` is not applicable to batches, since the first message of every batch
29
29
  # is the oldest and is always checked to see if the lag interval has been exceeded.
30
30
  def initialize(appender:,
31
- name: appender.class.name,
32
31
  max_queue_size: 10_000,
33
32
  lag_threshold_s: 30,
34
33
  batch_size: 300,
@@ -39,7 +38,6 @@ module SemanticLogger
39
38
  @signal = Concurrent::Event.new
40
39
  super(
41
40
  appender: appender,
42
- name: name,
43
41
  max_queue_size: max_queue_size,
44
42
  lag_threshold_s: lag_threshold_s
45
43
  )
@@ -79,7 +77,7 @@ module SemanticLogger
79
77
  process_message(message)
80
78
  end
81
79
  end
82
- appender.batch(logs) if logs.size > 0
80
+ appender.batch(logs) if logs.size.positive?
83
81
  signal.reset unless queue.size >= batch_size
84
82
  end
85
83
  end
@@ -89,7 +87,6 @@ module SemanticLogger
89
87
  signal.set
90
88
  super
91
89
  end
92
-
93
90
  end
94
91
  end
95
92
  end
@@ -9,77 +9,81 @@ end
9
9
  # Example:
10
10
  # SemanticLogger.add_appender(appender: :bugsnag)
11
11
  #
12
- class SemanticLogger::Appender::Bugsnag < SemanticLogger::Subscriber
13
- # Create Bugsnag Error / Exception Appender
14
- #
15
- # Parameters
16
- # level: [:trace | :debug | :info | :warn | :error | :fatal]
17
- # Override the log level for this appender.
18
- # Default: :error
19
- #
20
- # formatter: [Object|Proc]
21
- # An instance of a class that implements #call, or a Proc to be used to format
22
- # the output from this appender
23
- # Default: Use the built-in formatter (See: #call)
24
- #
25
- # filter: [Regexp|Proc]
26
- # RegExp: Only include log messages where the class name matches the supplied.
27
- # regular expression. All other messages will be ignored.
28
- # Proc: Only include log messages where the supplied Proc returns true
29
- # The Proc must return true or false.
30
- def initialize(level: :error, formatter: nil, filter: nil, application: nil, host: nil, &block)
31
- raise 'Bugsnag only supports :info, :warn, or :error log levels' unless [:info, :warn, :error, :fatal].include?(level)
12
+ module SemanticLogger
13
+ module Appender
14
+ class Bugsnag < SemanticLogger::Subscriber
15
+ # Create Bugsnag Error / Exception Appender
16
+ #
17
+ # Parameters
18
+ # level: [:trace | :debug | :info | :warn | :error | :fatal]
19
+ # Override the log level for this appender.
20
+ # Default: :error
21
+ #
22
+ # formatter: [Object|Proc]
23
+ # An instance of a class that implements #call, or a Proc to be used to format
24
+ # the output from this appender
25
+ # Default: Use the built-in formatter (See: #call)
26
+ #
27
+ # filter: [Regexp|Proc]
28
+ # RegExp: Only include log messages where the class name matches the supplied.
29
+ # regular expression. All other messages will be ignored.
30
+ # Proc: Only include log messages where the supplied Proc returns true
31
+ # The Proc must return true or false.
32
+ def initialize(level: :error, formatter: nil, filter: nil, application: nil, host: nil, &block)
33
+ raise 'Bugsnag only supports :info, :warn, or :error log levels' unless %i[info warn error fatal].include?(level)
32
34
 
33
- # Replace the Bugsnag logger so that we can identify its log messages and not forward them to Bugsnag
34
- Bugsnag.configure { |config| config.logger = SemanticLogger[Bugsnag] }
35
+ # Replace the Bugsnag logger so that we can identify its log messages and not forward them to Bugsnag
36
+ ::Bugsnag.configure { |config| config.logger = SemanticLogger[Bugsnag] }
35
37
 
36
- super(level: level, formatter: formatter, filter: filter, application: application, host: host, &block)
37
- end
38
+ super(level: level, formatter: formatter, filter: filter, application: application, host: host, &block)
39
+ end
38
40
 
39
- # Returns [Hash] of parameters to send to Bugsnag.
40
- def call(log, logger)
41
- h = SemanticLogger::Formatters::Raw.new.call(log, logger)
42
- h[:severity] = log_level(log)
43
- h.delete(:message) if h[:exception] && (h[:message] == h[:exception][:message])
44
- h.delete(:time)
45
- h.delete(:exception)
46
- h
47
- end
41
+ # Returns [Hash] of parameters to send to Bugsnag.
42
+ def call(log, logger)
43
+ h = SemanticLogger::Formatters::Raw.new.call(log, logger)
44
+ h[:severity] = log_level(log)
45
+ h.delete(:message) if h[:exception] && (h[:message] == h[:exception][:message])
46
+ h.delete(:time)
47
+ h.delete(:exception)
48
+ h
49
+ end
48
50
 
49
- # Send an error notification to Bugsnag
50
- def log(log)
51
- # Ignore logs coming from Bugsnag itself
52
- return false if log.name == 'Bugsnag'
51
+ # Send an error notification to Bugsnag
52
+ def log(log)
53
+ # Ignore logs coming from Bugsnag itself
54
+ return false if log.name == 'Bugsnag'
53
55
 
54
- # Send error messages as Runtime exceptions
55
- exception =
56
- if log.exception
57
- # Manually constructed Exception, without a backtrace.
58
- log.exception.set_backtrace(log.backtrace) if !log.exception.backtrace && log.backtrace
59
- log.exception
60
- else
61
- error = RuntimeError.new(log.message)
62
- error.set_backtrace(log.backtrace) if log.backtrace
63
- error
64
- end
56
+ # Send error messages as Runtime exceptions
57
+ exception =
58
+ if log.exception
59
+ # Manually constructed Exception, without a backtrace.
60
+ log.exception.set_backtrace(log.backtrace) if !log.exception.backtrace && log.backtrace
61
+ log.exception
62
+ else
63
+ error = RuntimeError.new(log.message)
64
+ error.set_backtrace(log.backtrace) if log.backtrace
65
+ error
66
+ end
65
67
 
66
- # For more documentation on the Bugsnag.notify method see:
67
- # https://bugsnag.com/docs/notifiers/ruby#sending-handled-exceptions
68
- Bugsnag.notify(exception, formatter.call(log, self))
69
- true
70
- end
68
+ # For more documentation on the Bugsnag.notify method see:
69
+ # https://bugsnag.com/docs/notifiers/ruby#sending-handled-exceptions
70
+ ::Bugsnag.notify(exception, formatter.call(log, self))
71
+ true
72
+ end
71
73
 
72
- private
74
+ private
73
75
 
74
- # Bugsnag supports: error, warning or info
75
- def log_level(log)
76
- case log.level
77
- when :error, :fatal
78
- 'error'
79
- when :warn
80
- 'warning'
81
- else
82
- 'info'
76
+ # Bugsnag supports: error, warning or info
77
+ def log_level(log)
78
+ case log.level
79
+ when :error, :fatal
80
+ 'error'
81
+ when :warn
82
+ 'warning'
83
+ else
84
+ 'info'
85
+ end
86
+ end
83
87
  end
84
88
  end
85
89
  end
@@ -14,174 +14,171 @@ require 'date'
14
14
  # appender: :elasticsearch,
15
15
  # url: 'http://localhost:9200'
16
16
  # )
17
- class SemanticLogger::Appender::Elasticsearch < SemanticLogger::Subscriber
18
- attr_accessor :url, :index, :type, :client, :flush_interval, :timeout_interval, :batch_size, :elasticsearch_args
17
+ module SemanticLogger
18
+ module Appender
19
+ class Elasticsearch < SemanticLogger::Subscriber
20
+ attr_accessor :url, :index, :type, :client, :flush_interval, :timeout_interval, :batch_size, :elasticsearch_args
19
21
 
20
- # Create Elasticsearch appender over persistent HTTP(S)
21
- #
22
- # Parameters:
23
- # index: [String]
24
- # Prefix of the index to store the logs in Elasticsearch.
25
- # The final index appends the date so that indexes are used per day.
26
- # I.e. The final index will look like 'semantic_logger-YYYY.MM.DD'
27
- # Default: 'semantic_logger'
28
- #
29
- # type: [String]
30
- # Document type to associate with logs when they are written.
31
- # Default: 'log'
32
- #
33
- # level: [:trace | :debug | :info | :warn | :error | :fatal]
34
- # Override the log level for this appender.
35
- # Default: SemanticLogger.default_level
36
- #
37
- # formatter: [Object|Proc|Symbol|Hash]
38
- # An instance of a class that implements #call, or a Proc to be used to format
39
- # the output from this appender
40
- # Default: :raw_json (See: #call)
41
- #
42
- # filter: [Regexp|Proc]
43
- # RegExp: Only include log messages where the class name matches the supplied.
44
- # regular expression. All other messages will be ignored.
45
- # Proc: Only include log messages where the supplied Proc returns true
46
- # The Proc must return true or false.
47
- #
48
- # host: [String]
49
- # Name of this host to appear in log messages.
50
- # Default: SemanticLogger.host
51
- #
52
- # application: [String]
53
- # Name of this application to appear in log messages.
54
- # Default: SemanticLogger.application
55
- #
56
- # Elasticsearch Parameters:
57
- # url: [String]
58
- # Fully qualified address to the Elasticsearch service.
59
- # Default: 'http://localhost:9200'
60
- #
61
- # hosts: [String|Hash|Array]
62
- # Single host passed as a String or Hash, or multiple hosts
63
- # passed as an Array; `host` or `url` keys are also valid.
64
- # Note:
65
- # :url above is ignored when supplying this option.
66
- #
67
- # resurrect_after [Float]
68
- # After how many seconds a dead connection should be tried again.
69
- #
70
- # reload_connections [true|false|Integer]
71
- # Reload connections after X requests.
72
- # Default: false
73
- #
74
- # randomize_hosts [true|false]
75
- # Shuffle connections on initialization and reload.
76
- # Default: false
77
- #
78
- # sniffer_timeout [Integer]
79
- # Timeout for reloading connections in seconds.
80
- # Default: 1
81
- #
82
- # retry_on_failure [true|false|Integer]
83
- # Retry X times when request fails before raising and exception.
84
- # Default: false
85
- #
86
- # retry_on_status [Array<Number>]
87
- # Retry when specific status codes are returned.
88
- #
89
- # reload_on_failure [true|false]
90
- # Reload connections after failure.
91
- # Default: false
92
- #
93
- # request_timeout [Integer]
94
- # The request timeout to be passed to transport in options.
95
- #
96
- # adapter [Symbol]
97
- # A specific adapter for Faraday (e.g. `:patron`)
98
- #
99
- # transport_options [Hash]
100
- # Options to be passed to the `Faraday::Connection` constructor.
101
- #
102
- # transport_class [Constant]
103
- # A specific transport class to use, will be initialized by
104
- # the client and passed hosts and all arguments.
105
- #
106
- # transport [Object]
107
- # A specific transport instance.
108
- #
109
- # serializer_class [Constant]
110
- # A specific serializer class to use, will be initialized by
111
- # the transport and passed the transport instance.
112
- #
113
- # selector [Elasticsearch::Transport::Transport::Connections::Selector::Base]
114
- # An instance of selector strategy derived from `Elasticsearch::Transport::Transport::Connections::Selector::Base`.
115
- #
116
- # send_get_body_as [String]
117
- # Specify the HTTP method to use for GET requests with a body.
118
- # Default: 'GET'
119
- def initialize(url: 'http://localhost:9200',
120
- index: 'semantic_logger',
121
- type: 'log',
122
- level: nil,
123
- formatter: nil,
124
- filter: nil,
125
- application: nil,
126
- host: nil,
127
- **elasticsearch_args,
128
- &block)
22
+ # Create Elasticsearch appender over persistent HTTP(S)
23
+ #
24
+ # Parameters:
25
+ # index: [String]
26
+ # Prefix of the index to store the logs in Elasticsearch.
27
+ # The final index appends the date so that indexes are used per day.
28
+ # I.e. The final index will look like 'semantic_logger-YYYY.MM.DD'
29
+ # Default: 'semantic_logger'
30
+ #
31
+ # type: [String]
32
+ # Document type to associate with logs when they are written.
33
+ # Default: 'log'
34
+ #
35
+ # level: [:trace | :debug | :info | :warn | :error | :fatal]
36
+ # Override the log level for this appender.
37
+ # Default: SemanticLogger.default_level
38
+ #
39
+ # formatter: [Object|Proc|Symbol|Hash]
40
+ # An instance of a class that implements #call, or a Proc to be used to format
41
+ # the output from this appender
42
+ # Default: :raw_json (See: #call)
43
+ #
44
+ # filter: [Regexp|Proc]
45
+ # RegExp: Only include log messages where the class name matches the supplied.
46
+ # regular expression. All other messages will be ignored.
47
+ # Proc: Only include log messages where the supplied Proc returns true
48
+ # The Proc must return true or false.
49
+ #
50
+ # host: [String]
51
+ # Name of this host to appear in log messages.
52
+ # Default: SemanticLogger.host
53
+ #
54
+ # application: [String]
55
+ # Name of this application to appear in log messages.
56
+ # Default: SemanticLogger.application
57
+ #
58
+ # Elasticsearch Parameters:
59
+ # url: [String]
60
+ # Fully qualified address to the Elasticsearch service.
61
+ # Default: 'http://localhost:9200'
62
+ #
63
+ # hosts: [String|Hash|Array]
64
+ # Single host passed as a String or Hash, or multiple hosts
65
+ # passed as an Array; `host` or `url` keys are also valid.
66
+ # Note:
67
+ # :url above is ignored when supplying this option.
68
+ #
69
+ # resurrect_after [Float]
70
+ # After how many seconds a dead connection should be tried again.
71
+ #
72
+ # reload_connections [true|false|Integer]
73
+ # Reload connections after X requests.
74
+ # Default: false
75
+ #
76
+ # randomize_hosts [true|false]
77
+ # Shuffle connections on initialization and reload.
78
+ # Default: false
79
+ #
80
+ # sniffer_timeout [Integer]
81
+ # Timeout for reloading connections in seconds.
82
+ # Default: 1
83
+ #
84
+ # retry_on_failure [true|false|Integer]
85
+ # Retry X times when request fails before raising and exception.
86
+ # Default: false
87
+ #
88
+ # retry_on_status [Array<Number>]
89
+ # Retry when specific status codes are returned.
90
+ #
91
+ # reload_on_failure [true|false]
92
+ # Reload connections after failure.
93
+ # Default: false
94
+ #
95
+ # request_timeout [Integer]
96
+ # The request timeout to be passed to transport in options.
97
+ #
98
+ # adapter [Symbol]
99
+ # A specific adapter for Faraday (e.g. `:patron`)
100
+ #
101
+ # transport_options [Hash]
102
+ # Options to be passed to the `Faraday::Connection` constructor.
103
+ #
104
+ # transport_class [Constant]
105
+ # A specific transport class to use, will be initialized by
106
+ # the client and passed hosts and all arguments.
107
+ #
108
+ # transport [Object]
109
+ # A specific transport instance.
110
+ #
111
+ # serializer_class [Constant]
112
+ # A specific serializer class to use, will be initialized by
113
+ # the transport and passed the transport instance.
114
+ #
115
+ # selector [Elasticsearch::Transport::Transport::Connections::Selector::Base]
116
+ # An instance of selector strategy derived from `Elasticsearch::Transport::Transport::Connections::Selector::Base`.
117
+ #
118
+ # send_get_body_as [String]
119
+ # Specify the HTTP method to use for GET requests with a body.
120
+ # Default: 'GET'
121
+ def initialize(url: 'http://localhost:9200',
122
+ index: 'semantic_logger',
123
+ type: 'log',
124
+ level: nil,
125
+ formatter: nil,
126
+ filter: nil,
127
+ application: nil,
128
+ host: nil,
129
+ **elasticsearch_args,
130
+ &block)
129
131
 
130
- @url = url
131
- @index = index
132
- @type = type
133
- @elasticsearch_args = elasticsearch_args.dup
134
- @elasticsearch_args[:url] = url if url && !elasticsearch_args[:hosts]
135
- @elasticsearch_args[:logger] = logger
132
+ @url = url
133
+ @index = index
134
+ @type = type
135
+ @elasticsearch_args = elasticsearch_args.dup
136
+ @elasticsearch_args[:url] = url if url && !elasticsearch_args[:hosts]
137
+ @elasticsearch_args[:logger] = logger
136
138
 
137
- super(level: level, formatter: formatter, filter: filter, application: application, host: host, &block)
138
- reopen
139
- end
139
+ super(level: level, formatter: formatter, filter: filter, application: application, host: host, &block)
140
+ reopen
141
+ end
140
142
 
141
- def reopen
142
- @client = Elasticsearch::Client.new(@elasticsearch_args)
143
- end
143
+ def reopen
144
+ @client = ::Elasticsearch::Client.new(@elasticsearch_args)
145
+ end
144
146
 
145
- # Log to the index for today
146
- def log(log)
147
- bulk_payload = formatter.call(log, self)
148
- write_to_elasticsearch([bulk_index(log), bulk_payload])
149
- true
150
- end
147
+ # Log to the index for today
148
+ def log(log)
149
+ bulk_payload = formatter.call(log, self)
150
+ write_to_elasticsearch([bulk_index(log), bulk_payload])
151
+ true
152
+ end
153
+
154
+ def batch(logs)
155
+ messages = []
156
+ logs.each do |log|
157
+ messages << bulk_index(log) << formatter.call(log, self)
158
+ end
151
159
 
152
- def batch(logs)
153
- messages = []
154
- day = nil
155
- logs.each do |log|
156
- # Only write the bulk index once per day per batch. Supports mixed dates in a batch.
157
- if log.time.day != day
158
- messages << bulk_index(log)
159
- day = log.time.day
160
+ write_to_elasticsearch(messages)
161
+ true
160
162
  end
161
- messages << formatter.call(log, self)
162
- end
163
163
 
164
- write_to_elasticsearch(messages)
165
- true
166
- end
164
+ private
167
165
 
168
- private
166
+ def write_to_elasticsearch(messages)
167
+ bulk_result = @client.bulk(body: messages)
168
+ return unless bulk_result['errors']
169
169
 
170
- def write_to_elasticsearch(messages)
171
- bulk_result = @client.bulk(body: messages)
172
- if bulk_result["errors"]
173
- failed = bulk_result["items"].select { |x| x["status"] != 201 }
174
- logger.error("ElasticSearch: Write failed. Messages discarded. : #{failed}")
175
- end
176
- end
170
+ failed = bulk_result['items'].reject { |x| x['status'] == 201 }
171
+ logger.error("ElasticSearch: Write failed. Messages discarded. : #{failed}")
172
+ end
177
173
 
178
- def bulk_index(log)
179
- daily_index = log.time.strftime("#{index}-%Y.%m.%d")
180
- {'index' => {'_index' => daily_index, '_type' => type}}
181
- end
174
+ def bulk_index(log)
175
+ daily_index = log.time.strftime("#{index}-%Y.%m.%d")
176
+ {'index' => {'_index' => daily_index, '_type' => type}}
177
+ end
182
178
 
183
- def default_formatter
184
- SemanticLogger::Formatters::Raw.new(time_format: :iso_8601, time_key: :timestamp)
179
+ def default_formatter
180
+ SemanticLogger::Formatters::Raw.new(time_format: :iso_8601, time_key: :timestamp)
181
+ end
182
+ end
185
183
  end
186
-
187
184
  end