fluent-plugin-google-cloud 0.12.10 → 0.12.11

Sign up to get free protection for your applications and to get access to all the features.
@@ -86,7 +86,7 @@ module Fluent
86
86
 
87
87
  # For Google plugins, we collect metrics on the params listed here.
88
88
  GOOGLE_PLUGIN_PARAMS = {
89
- 'google_cloud' => %w(
89
+ 'google_cloud' => %w[
90
90
  adjust_invalid_timestamps
91
91
  auth_method
92
92
  autoformat_stackdriver_trace
@@ -118,8 +118,8 @@ module Fluent
118
118
  vm_id
119
119
  vm_name
120
120
  zone
121
- ),
122
- 'detect_exceptions' => %w(
121
+ ],
122
+ 'detect_exceptions' => %w[
123
123
  languages
124
124
  max_bytes
125
125
  max_lines
@@ -127,7 +127,7 @@ module Fluent
127
127
  multiline_flush_interval
128
128
  remove_tag_prefix
129
129
  stream
130
- )
130
+ ]
131
131
  }.freeze
132
132
  end
133
133
 
@@ -166,7 +166,8 @@ module Fluent
166
166
  @log = $log # rubocop:disable Style/GlobalVars
167
167
 
168
168
  @log.info(
169
- 'analyze_config plugin: Started the plugin to analyze configuration.')
169
+ 'analyze_config plugin: Started the plugin to analyze configuration.'
170
+ )
170
171
  end
171
172
 
172
173
  def parse_config(path)
@@ -185,32 +186,32 @@ module Fluent
185
186
  end
186
187
 
187
188
  # Returns a name for identifying plugins we ship by default.
188
- def default_plugin_name(e)
189
- case e['@type']
189
+ def default_plugin_name(conf_element)
190
+ case conf_element['@type']
190
191
  when 'syslog'
191
- "#{e.name}/syslog/#{e['protocol_type']}"
192
+ "#{conf_element.name}/syslog/#{conf_element['protocol_type']}"
192
193
  when 'tail'
193
- "#{e.name}/tail/#{File.basename(e['pos_file'], '.pos')}"
194
+ "#{conf_element.name}/tail/#{File.basename(conf_element['pos_file'], '.pos')}"
194
195
  else
195
- "#{e.name}/#{e['@type']}"
196
+ "#{conf_element.name}/#{conf_element['@type']}"
196
197
  end
197
198
  end
198
199
 
199
200
  # Returns a name for identifying plugins not in our default
200
201
  # config. This should not contain arbitrary user-supplied data.
201
- def custom_plugin_name(e)
202
- if KNOWN_PLUGINS.key?(e.name) &&
203
- KNOWN_PLUGINS[e.name].include?(e['@type'])
204
- "#{e.name}/#{e['@type']}"
202
+ def custom_plugin_name(conf_element)
203
+ if KNOWN_PLUGINS.key?(conf_element.name) &&
204
+ KNOWN_PLUGINS[conf_element.name].include?(conf_element['@type'])
205
+ "#{conf_element.name}/#{conf_element['@type']}"
205
206
  else
206
- e.name.to_s
207
+ conf_element.name.to_s
207
208
  end
208
209
  end
209
210
 
210
- def embedded_ruby?(e)
211
- (e.arg.include?('#{') ||
212
- e.any? { |_, v| v.include?('#{') } ||
213
- e.elements.any? { |ee| embedded_ruby?(ee) })
211
+ def embedded_ruby?(conf_element)
212
+ (conf_element.arg.include?('#{') ||
213
+ conf_element.any? { |_, v| v.include?('#{') } ||
214
+ conf_element.elements.any? { |e| embedded_ruby?(e) })
214
215
  end
215
216
 
216
217
  def configure(conf)
@@ -223,7 +224,8 @@ module Fluent
223
224
  " #{@google_fluentd_config_path}. " \
224
225
  'google-fluentd baseline configuration file found at' \
225
226
  " #{@google_fluentd_baseline_config_path}. " \
226
- 'google-fluentd Analyzing configuration.')
227
+ 'google-fluentd Analyzing configuration.'
228
+ )
227
229
 
228
230
  utils = Common::Utils.new(@log)
229
231
  platform = utils.detect_platform(true)
@@ -233,68 +235,76 @@ module Fluent
233
235
 
234
236
  # All metadata parameters must now be set.
235
237
  utils.check_required_metadata_variables(
236
- platform, project_id, zone, vm_id)
238
+ platform, project_id, zone, vm_id
239
+ )
237
240
 
238
241
  # Retrieve monitored resource.
239
242
  # Fail over to retrieve monitored resource via the legacy path if we
240
243
  # fail to get it from Metadata Agent.
241
244
  resource = utils.determine_agent_level_monitored_resource_via_legacy(
242
- platform, nil, false, vm_id, zone)
245
+ platform, nil, false, vm_id, zone
246
+ )
243
247
 
244
248
  unless Monitoring::MonitoringRegistryFactory.supports_monitoring_type(
245
- @monitoring_type)
249
+ @monitoring_type
250
+ )
246
251
  @log.warn(
247
252
  "analyze_config plugin: monitoring_type #{@monitoring_type} is " \
248
- 'unknown; there will be no metrics.')
253
+ 'unknown; there will be no metrics.'
254
+ )
249
255
  end
250
256
 
251
257
  @registry = Monitoring::MonitoringRegistryFactory.create(
252
- @monitoring_type, project_id, resource, @gcm_service_address)
258
+ @monitoring_type, project_id, resource, @gcm_service_address
259
+ )
253
260
  # Export metrics every 60 seconds.
254
261
  timer_execute(:export_config_analysis_metrics, 60) do
255
- if @registry.respond_to? :update_timestamps
256
- @registry.update_timestamps(PREFIX)
257
- end
262
+ @registry.update_timestamps(PREFIX) if @registry.respond_to? :update_timestamps
258
263
  @registry.export
259
264
  end
260
265
 
261
266
  @log.info('analyze_config plugin: Registering counters.')
262
267
  enabled_plugins_counter = @registry.counter(
263
268
  :enabled_plugins,
264
- [:plugin_name, :is_default_plugin,
265
- :has_default_config, :has_ruby_snippet],
269
+ %i[plugin_name is_default_plugin has_default_config has_ruby_snippet],
266
270
  'Enabled plugins',
267
271
  PREFIX,
268
- 'GAUGE')
272
+ 'GAUGE'
273
+ )
269
274
  @log.info(
270
275
  'analyze_config plugin: registered enable_plugins counter. ' \
271
- "#{enabled_plugins_counter}")
276
+ "#{enabled_plugins_counter}"
277
+ )
272
278
  plugin_config_counter = @registry.counter(
273
279
  :plugin_config,
274
- [:plugin_name, :param, :is_present, :has_default_config],
280
+ %i[plugin_name param is_present has_default_config],
275
281
  'Configuration parameter usage for plugins relevant to Google Cloud.',
276
282
  PREFIX,
277
- 'GAUGE')
283
+ 'GAUGE'
284
+ )
278
285
  @log.info('analyze_config plugin: registered plugin_config counter. ' \
279
286
  "#{plugin_config_counter}")
280
287
  config_bool_values_counter = @registry.counter(
281
288
  :config_bool_values,
282
- [:plugin_name, :param, :value],
289
+ %i[plugin_name param value],
283
290
  'Values for bool parameters in Google Cloud plugins',
284
291
  PREFIX,
285
- 'GAUGE')
292
+ 'GAUGE'
293
+ )
286
294
  @log.info('analyze_config plugin: registered config_bool_values ' \
287
295
  "counter. #{config_bool_values_counter}")
288
296
 
289
297
  config = parse_config(@google_fluentd_config_path)
290
298
  @log.debug(
291
299
  'analyze_config plugin: successfully parsed google-fluentd' \
292
- " configuration file at #{@google_fluentd_config_path}. #{config}")
300
+ " configuration file at #{@google_fluentd_config_path}. #{config}"
301
+ )
293
302
  baseline_config = parse_config(@google_fluentd_baseline_config_path)
294
303
  @log.debug(
295
304
  'analyze_config plugin: successfully parsed google-fluentd' \
296
305
  ' baseline configuration file at' \
297
- " #{@google_fluentd_baseline_config_path}: #{baseline_config}")
306
+ " #{@google_fluentd_baseline_config_path}: #{baseline_config}"
307
+ )
298
308
 
299
309
  # Create hash of all baseline elements by their plugin names.
300
310
  baseline_elements = Hash[baseline_config.elements.collect do |e|
@@ -331,11 +341,13 @@ module Fluent
331
341
  has_default_config: has_default_config,
332
342
  has_ruby_snippet: embedded_ruby?(e)
333
343
  },
334
- by: 1)
344
+ by: 1
345
+ )
335
346
 
336
347
  # Additional metric for Google plugins (google_cloud and
337
348
  # detect_exceptions).
338
349
  next unless GOOGLE_PLUGIN_PARAMS.key?(e['@type'])
350
+
339
351
  GOOGLE_PLUGIN_PARAMS[e['@type']].each do |p|
340
352
  plugin_config_counter.increment(
341
353
  labels: {
@@ -346,40 +358,46 @@ module Fluent
346
358
  baseline_google_element.key?(p) &&
347
359
  e[p] == baseline_google_element[p])
348
360
  },
349
- by: 1)
350
- next unless e.key?(p) && %w(true false).include?(e[p])
361
+ by: 1
362
+ )
363
+ next unless e.key?(p) && %w[true false].include?(e[p])
364
+
351
365
  config_bool_values_counter.increment(
352
366
  labels: {
353
367
  plugin_name: e['@type'],
354
368
  param: p,
355
369
  value: e[p] == 'true'
356
370
  },
357
- by: 1)
371
+ by: 1
372
+ )
358
373
  end
359
374
  end
360
375
  @log.info(
361
- 'analyze_config plugin: Successfully finished analyzing config.')
376
+ 'analyze_config plugin: Successfully finished analyzing config.'
377
+ )
362
378
  else
363
379
  @log.info(
364
380
  'analyze_config plugin: google-fluentd configuration file does not ' \
365
381
  "exist at #{@google_fluentd_config_path} or google-fluentd " \
366
382
  'baseline configuration file does not exist at' \
367
383
  " #{@google_fluentd_baseline_config_path}. Skipping configuration " \
368
- 'analysis.')
384
+ 'analysis.'
385
+ )
369
386
  end
370
- rescue => e
387
+ rescue StandardError => e
371
388
  # Do not crash the agent due to configuration analysis failures.
372
389
  @log.warn(
373
390
  'analyze_config plugin: Failed to optionally analyze the ' \
374
391
  "google-fluentd configuration file. Proceeding anyway. Error: #{e}. " \
375
- "Trace: #{e.backtrace}")
392
+ "Trace: #{e.backtrace}"
393
+ )
376
394
  end
377
395
 
378
396
  def shutdown
379
397
  super
380
398
  # Export metrics on shutdown. This is a best-effort attempt, and it might
381
399
  # fail, for instance if there was a recent write to the same time series.
382
- @registry.export unless @registry.nil?
400
+ @registry&.export
383
401
  end
384
402
 
385
403
  # rubocop:disable Lint/UnusedMethodArgument
@@ -48,7 +48,7 @@ module Fluent
48
48
  def on_timer
49
49
  GC.start
50
50
  # Use Tempfile.create to open the file, in order to preserve the file.
51
- file = Tempfile.create(['heap-' + fluentd_worker_id.to_s + '-', '.json'])
51
+ file = Tempfile.create(["heap-#{fluentd_worker_id}-", '.json'])
52
52
  begin
53
53
  log.info 'dumping object space to',
54
54
  filepath: file.path,
@@ -15,13 +15,15 @@
15
15
  module Monitoring
16
16
  # Base class for the counter.
17
17
  class BaseCounter
18
- def increment(*)
18
+ def increment(by: 1, labels: {})
19
+ # No default behavior
19
20
  end
20
21
  end
21
22
 
22
23
  # Prometheus implementation of counters.
23
24
  class PrometheusCounter < BaseCounter
24
25
  def initialize(prometheus_counter)
26
+ super()
25
27
  @counter = prometheus_counter
26
28
  end
27
29
 
@@ -33,7 +35,9 @@ module Monitoring
33
35
  # OpenCensus implementation of counters.
34
36
  class OpenCensusCounter < BaseCounter
35
37
  def initialize(recorder, measure, translator)
38
+ super()
36
39
  raise ArgumentError, 'measure must not be nil' if measure.nil?
40
+
37
41
  @recorder = recorder
38
42
  @measure = measure
39
43
  @translator = translator
@@ -42,7 +46,8 @@ module Monitoring
42
46
  def increment(by: 1, labels: {})
43
47
  labels = @translator.translate_labels(labels)
44
48
  tag_map = OpenCensus::Tags::TagMap.new(
45
- labels.map { |k, v| [k.to_s, v.to_s] }.to_h)
49
+ labels.map { |k, v| [k.to_s, v.to_s] }.to_h
50
+ )
46
51
  @recorder.record(@measure.create_measurement(value: by, tags: tag_map))
47
52
  end
48
53
  end
@@ -50,6 +55,7 @@ module Monitoring
50
55
  # Base class for the monitoring registry.
51
56
  class BaseMonitoringRegistry
52
57
  def initialize(_project_id, _monitored_resource, _gcm_service_address)
58
+ # no default behavior
53
59
  end
54
60
 
55
61
  def counter(_name, _labels, _docstring, _prefix, _aggregation)
@@ -80,9 +86,9 @@ module Monitoring
80
86
  # labels in the metric constructor. The 'labels' field in
81
87
  # Prometheus client 0.9.0 has a different function and will not
82
88
  # work as intended.
83
- return PrometheusCounter.new(@registry.counter(name, docstring))
89
+ PrometheusCounter.new(@registry.counter(name, docstring))
84
90
  rescue Prometheus::Client::Registry::AlreadyRegisteredError
85
- return PrometheusCounter.new(@registry.get(name))
91
+ PrometheusCounter.new(@registry.get(name))
86
92
  end
87
93
  end
88
94
 
@@ -104,7 +110,8 @@ module Monitoring
104
110
  @exporters = {}
105
111
  @log.info(
106
112
  'monitoring module: Successfully initialized Open Census monitoring ' \
107
- 'registry.')
113
+ 'registry.'
114
+ )
108
115
  end
109
116
 
110
117
  def counter(name, labels, docstring, prefix, aggregation)
@@ -113,7 +120,8 @@ module Monitoring
113
120
  if measure.nil?
114
121
  @log.info(
115
122
  'monitoring module: Registering a new measure registry for ' \
116
- "#{translator.name}")
123
+ "#{translator.name}"
124
+ )
117
125
  measure = OpenCensus::Stats.create_measure_int(
118
126
  name: translator.name,
119
127
  unit: OpenCensus::Stats::Measure::UNIT_NONE,
@@ -123,7 +131,8 @@ module Monitoring
123
131
  unless @exporters.keys.include?(prefix)
124
132
  @log.info(
125
133
  'monitoring module: Registering a new exporter for ' \
126
- "#{prefix}")
134
+ "#{prefix}"
135
+ )
127
136
  @recorders[prefix] = OpenCensus::Stats::Recorder.new
128
137
  @exporters[prefix] = \
129
138
  OpenCensus::Stats::Exporters::Stackdriver.new(
@@ -135,13 +144,14 @@ module Monitoring
135
144
  )
136
145
  @log.info(
137
146
  'monitoring module: Registered recorders and exporters for ' \
138
- "#{prefix}.\n#{@exporters[prefix]}")
139
- end
140
- if aggregation == 'GAUGE'
141
- stats_aggregation = OpenCensus::Stats.create_last_value_aggregation
142
- else
143
- stats_aggregation = OpenCensus::Stats.create_sum_aggregation
147
+ "#{prefix}.\n#{@exporters[prefix]}"
148
+ )
144
149
  end
150
+ stats_aggregation = if aggregation == 'GAUGE'
151
+ OpenCensus::Stats.create_last_value_aggregation
152
+ else
153
+ OpenCensus::Stats.create_sum_aggregation
154
+ end
145
155
  @recorders[prefix].register_view(
146
156
  OpenCensus::Stats::View.new(
147
157
  name: translator.name,
@@ -154,7 +164,8 @@ module Monitoring
154
164
  counter = OpenCensusCounter.new(@recorders[prefix], measure, translator)
155
165
  @log.info(
156
166
  'monitoring module: Successfully initialized Open Census counter for ' \
157
- "#{prefix}/#{name}.")
167
+ "#{prefix}/#{name}."
168
+ )
158
169
  counter
159
170
  rescue StandardError => e
160
171
  @log.warn "Failed to count metrics for #{name}.", error: e
@@ -172,20 +183,20 @@ module Monitoring
172
183
  view_data.data.each_value do |aggr_data|
173
184
  # Apply this only to GAUGE metrics. This could fail if the metric uses
174
185
  # Distribution or other fancier aggregators.
175
- if aggr_data.is_a? OpenCensus::Stats::AggregationData::LastValue
176
- aggr_data.add aggr_data.value, new_time
177
- end
186
+ aggr_data.add aggr_data.value, new_time if aggr_data.is_a? OpenCensus::Stats::AggregationData::LastValue
178
187
  end
179
188
  end
180
189
  end
181
190
 
182
191
  def export
183
192
  @log.debug(
184
- "monitoring module: Exporting metrics for #{@exporters.keys}.")
185
- @exporters.keys.each do |prefix|
193
+ "monitoring module: Exporting metrics for #{@exporters.keys}."
194
+ )
195
+ @exporters.each_key do |prefix|
186
196
  @log.debug(
187
197
  "monitoring module: Exporting metrics for #{prefix}. " \
188
- "#{@recorders[prefix].views_data}")
198
+ "#{@recorders[prefix].views_data}"
199
+ )
189
200
  @exporters[prefix].export @recorders[prefix].views_data
190
201
  end
191
202
  rescue StandardError => e
@@ -223,8 +234,7 @@ module Monitoring
223
234
  # Avoid this mechanism for new metrics by defining them in their final form,
224
235
  # so they don't need translation.
225
236
  class MetricTranslator
226
- attr_reader :name
227
- attr_reader :view_labels
237
+ attr_reader :name, :view_labels
228
238
 
229
239
  def initialize(name, metric_labels)
230
240
  @legacy = true
@@ -247,8 +257,9 @@ module Monitoring
247
257
 
248
258
  def translate_labels(labels)
249
259
  return labels unless @legacy
260
+
250
261
  translation = { code: :response_code, grpc: :grpc }
251
- labels.map { |k, v| [translation[k], v] }.to_h
262
+ labels.transform_keys { |k| translation[k] }
252
263
  end
253
264
  end
254
265
  end