fluent-plugin-google-cloud 0.10.9 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -51,9 +51,6 @@ module Fluent
51
51
  desc 'The field name for insertIds in the log record.'
52
52
  config_param :insert_id_key, :string, default: DEFAULT_INSERT_ID_KEY
53
53
 
54
- # Expose attr_readers for testing.
55
- attr_reader :insert_id_key
56
-
57
54
  def start
58
55
  super
59
56
  @log = $log # rubocop:disable Style/GlobalVars
@@ -65,21 +62,11 @@ module Fluent
65
62
  @log.info "Initialized the insert ID key to #{@insert_id}."
66
63
  end
67
64
 
68
- def configure(conf)
69
- super
70
- end
71
-
72
- def shutdown
73
- super
74
- end
75
-
76
65
  # rubocop:disable Lint/UnusedMethodArgument
77
66
  def filter(tag, time, record)
78
67
  # Only generate and add an insertId field if the record is a hash and
79
68
  # the insert ID field is not already set (or set to an empty string).
80
- if record.is_a?(Hash) && record[@insert_id_key].to_s.empty?
81
- record[@insert_id_key] = increment_insert_id
82
- end
69
+ record[@insert_id_key] = increment_insert_id if record.is_a?(Hash) && record[@insert_id_key].to_s.empty?
83
70
  record
84
71
  end
85
72
  # rubocop:enable Lint/UnusedMethodArgument
@@ -38,6 +38,8 @@ module Fluent
38
38
  helpers :timer
39
39
 
40
40
  module Constants
41
+ PREFIX = 'agent.googleapis.com/agent/internal/logging/config'.freeze
42
+
41
43
  # Built-in plugins that are ok to reference in metrics.
42
44
  KNOWN_PLUGINS = {
43
45
  'filter' => Set[
@@ -84,7 +86,7 @@ module Fluent
84
86
 
85
87
  # For Google plugins, we collect metrics on the params listed here.
86
88
  GOOGLE_PLUGIN_PARAMS = {
87
- 'google_cloud' => %w(
89
+ 'google_cloud' => %w[
88
90
  adjust_invalid_timestamps
89
91
  auth_method
90
92
  autoformat_stackdriver_trace
@@ -116,8 +118,8 @@ module Fluent
116
118
  vm_id
117
119
  vm_name
118
120
  zone
119
- ),
120
- 'detect_exceptions' => %w(
121
+ ],
122
+ 'detect_exceptions' => %w[
121
123
  languages
122
124
  max_bytes
123
125
  max_lines
@@ -125,7 +127,7 @@ module Fluent
125
127
  multiline_flush_interval
126
128
  remove_tag_prefix
127
129
  stream
128
- )
130
+ ]
129
131
  }.freeze
130
132
  end
131
133
 
@@ -164,7 +166,8 @@ module Fluent
164
166
  @log = $log # rubocop:disable Style/GlobalVars
165
167
 
166
168
  @log.info(
167
- 'analyze_config plugin: Started the plugin to analyze configuration.')
169
+ 'analyze_config plugin: Started the plugin to analyze configuration.'
170
+ )
168
171
  end
169
172
 
170
173
  def parse_config(path)
@@ -183,32 +186,32 @@ module Fluent
183
186
  end
184
187
 
185
188
  # Returns a name for identifying plugins we ship by default.
186
- def default_plugin_name(e)
187
- case e['@type']
189
+ def default_plugin_name(conf_element)
190
+ case conf_element['@type']
188
191
  when 'syslog'
189
- "#{e.name}/syslog/#{e['protocol_type']}"
192
+ "#{conf_element.name}/syslog/#{conf_element['protocol_type']}"
190
193
  when 'tail'
191
- "#{e.name}/tail/#{File.basename(e['pos_file'], '.pos')}"
194
+ "#{conf_element.name}/tail/#{File.basename(conf_element['pos_file'], '.pos')}"
192
195
  else
193
- "#{e.name}/#{e['@type']}"
196
+ "#{conf_element.name}/#{conf_element['@type']}"
194
197
  end
195
198
  end
196
199
 
197
200
  # Returns a name for identifying plugins not in our default
198
201
  # config. This should not contain arbitrary user-supplied data.
199
- def custom_plugin_name(e)
200
- if KNOWN_PLUGINS.key?(e.name) &&
201
- KNOWN_PLUGINS[e.name].include?(e['@type'])
202
- "#{e.name}/#{e['@type']}"
202
+ def custom_plugin_name(conf_element)
203
+ if KNOWN_PLUGINS.key?(conf_element.name) &&
204
+ KNOWN_PLUGINS[conf_element.name].include?(conf_element['@type'])
205
+ "#{conf_element.name}/#{conf_element['@type']}"
203
206
  else
204
- e.name.to_s
207
+ conf_element.name.to_s
205
208
  end
206
209
  end
207
210
 
208
- def embedded_ruby?(e)
209
- (e.arg.include?('#{') ||
210
- e.any? { |_, v| v.include?('#{') } ||
211
- e.elements.any? { |ee| embedded_ruby?(ee) })
211
+ def embedded_ruby?(conf_element)
212
+ (conf_element.arg.include?('#{') ||
213
+ conf_element.any? { |_, v| v.include?('#{') } ||
214
+ conf_element.elements.any? { |e| embedded_ruby?(e) })
212
215
  end
213
216
 
214
217
  def configure(conf)
@@ -221,7 +224,8 @@ module Fluent
221
224
  " #{@google_fluentd_config_path}. " \
222
225
  'google-fluentd baseline configuration file found at' \
223
226
  " #{@google_fluentd_baseline_config_path}. " \
224
- 'google-fluentd Analyzing configuration.')
227
+ 'google-fluentd Analyzing configuration.'
228
+ )
225
229
 
226
230
  utils = Common::Utils.new(@log)
227
231
  platform = utils.detect_platform(true)
@@ -231,63 +235,76 @@ module Fluent
231
235
 
232
236
  # All metadata parameters must now be set.
233
237
  utils.check_required_metadata_variables(
234
- platform, project_id, zone, vm_id)
238
+ platform, project_id, zone, vm_id
239
+ )
235
240
 
236
241
  # Retrieve monitored resource.
237
242
  # Fail over to retrieve monitored resource via the legacy path if we
238
243
  # fail to get it from Metadata Agent.
239
244
  resource = utils.determine_agent_level_monitored_resource_via_legacy(
240
- platform, nil, false, vm_id, zone)
245
+ platform, nil, false, vm_id, zone
246
+ )
241
247
 
242
248
  unless Monitoring::MonitoringRegistryFactory.supports_monitoring_type(
243
- @monitoring_type)
249
+ @monitoring_type
250
+ )
244
251
  @log.warn(
245
252
  "analyze_config plugin: monitoring_type #{@monitoring_type} is " \
246
- 'unknown; there will be no metrics.')
253
+ 'unknown; there will be no metrics.'
254
+ )
247
255
  end
248
256
 
249
257
  @registry = Monitoring::MonitoringRegistryFactory.create(
250
- @monitoring_type, project_id, resource, @gcm_service_address)
258
+ @monitoring_type, project_id, resource, @gcm_service_address
259
+ )
251
260
  # Export metrics every 60 seconds.
252
- timer_execute(:export_config_analysis_metrics, 60) { @registry.export }
261
+ timer_execute(:export_config_analysis_metrics, 60) do
262
+ @registry.update_timestamps(PREFIX) if @registry.respond_to? :update_timestamps
263
+ @registry.export
264
+ end
253
265
 
254
266
  @log.info('analyze_config plugin: Registering counters.')
255
267
  enabled_plugins_counter = @registry.counter(
256
268
  :enabled_plugins,
257
- [:plugin_name, :is_default_plugin,
258
- :has_default_config, :has_ruby_snippet],
269
+ %i[plugin_name is_default_plugin has_default_config has_ruby_snippet],
259
270
  'Enabled plugins',
260
- 'agent.googleapis.com/agent/internal/logging/config',
261
- 'GAUGE')
271
+ PREFIX,
272
+ 'GAUGE'
273
+ )
262
274
  @log.info(
263
275
  'analyze_config plugin: registered enable_plugins counter. ' \
264
- "#{enabled_plugins_counter}")
276
+ "#{enabled_plugins_counter}"
277
+ )
265
278
  plugin_config_counter = @registry.counter(
266
279
  :plugin_config,
267
- [:plugin_name, :param, :is_present, :has_default_config],
280
+ %i[plugin_name param is_present has_default_config],
268
281
  'Configuration parameter usage for plugins relevant to Google Cloud.',
269
- 'agent.googleapis.com/agent/internal/logging/config',
270
- 'GAUGE')
282
+ PREFIX,
283
+ 'GAUGE'
284
+ )
271
285
  @log.info('analyze_config plugin: registered plugin_config counter. ' \
272
286
  "#{plugin_config_counter}")
273
287
  config_bool_values_counter = @registry.counter(
274
288
  :config_bool_values,
275
- [:plugin_name, :param, :value],
289
+ %i[plugin_name param value],
276
290
  'Values for bool parameters in Google Cloud plugins',
277
- 'agent.googleapis.com/agent/internal/logging/config',
278
- 'GAUGE')
291
+ PREFIX,
292
+ 'GAUGE'
293
+ )
279
294
  @log.info('analyze_config plugin: registered config_bool_values ' \
280
295
  "counter. #{config_bool_values_counter}")
281
296
 
282
297
  config = parse_config(@google_fluentd_config_path)
283
298
  @log.debug(
284
299
  'analyze_config plugin: successfully parsed google-fluentd' \
285
- " configuration file at #{@google_fluentd_config_path}. #{config}")
300
+ " configuration file at #{@google_fluentd_config_path}. #{config}"
301
+ )
286
302
  baseline_config = parse_config(@google_fluentd_baseline_config_path)
287
303
  @log.debug(
288
304
  'analyze_config plugin: successfully parsed google-fluentd' \
289
305
  ' baseline configuration file at' \
290
- " #{@google_fluentd_baseline_config_path}: #{baseline_config}")
306
+ " #{@google_fluentd_baseline_config_path}: #{baseline_config}"
307
+ )
291
308
 
292
309
  # Create hash of all baseline elements by their plugin names.
293
310
  baseline_elements = Hash[baseline_config.elements.collect do |e|
@@ -324,11 +341,13 @@ module Fluent
324
341
  has_default_config: has_default_config,
325
342
  has_ruby_snippet: embedded_ruby?(e)
326
343
  },
327
- by: 1)
344
+ by: 1
345
+ )
328
346
 
329
347
  # Additional metric for Google plugins (google_cloud and
330
348
  # detect_exceptions).
331
349
  next unless GOOGLE_PLUGIN_PARAMS.key?(e['@type'])
350
+
332
351
  GOOGLE_PLUGIN_PARAMS[e['@type']].each do |p|
333
352
  plugin_config_counter.increment(
334
353
  labels: {
@@ -339,40 +358,46 @@ module Fluent
339
358
  baseline_google_element.key?(p) &&
340
359
  e[p] == baseline_google_element[p])
341
360
  },
342
- by: 1)
343
- next unless e.key?(p) && %w(true false).include?(e[p])
361
+ by: 1
362
+ )
363
+ next unless e.key?(p) && %w[true false].include?(e[p])
364
+
344
365
  config_bool_values_counter.increment(
345
366
  labels: {
346
367
  plugin_name: e['@type'],
347
368
  param: p,
348
369
  value: e[p] == 'true'
349
370
  },
350
- by: 1)
371
+ by: 1
372
+ )
351
373
  end
352
374
  end
353
375
  @log.info(
354
- 'analyze_config plugin: Successfully finished analyzing config.')
376
+ 'analyze_config plugin: Successfully finished analyzing config.'
377
+ )
355
378
  else
356
379
  @log.info(
357
380
  'analyze_config plugin: google-fluentd configuration file does not ' \
358
381
  "exist at #{@google_fluentd_config_path} or google-fluentd " \
359
382
  'baseline configuration file does not exist at' \
360
383
  " #{@google_fluentd_baseline_config_path}. Skipping configuration " \
361
- 'analysis.')
384
+ 'analysis.'
385
+ )
362
386
  end
363
- rescue => e
387
+ rescue StandardError => e
364
388
  # Do not crash the agent due to configuration analysis failures.
365
389
  @log.warn(
366
390
  'analyze_config plugin: Failed to optionally analyze the ' \
367
391
  "google-fluentd configuration file. Proceeding anyway. Error: #{e}. " \
368
- "Trace: #{e.backtrace}")
392
+ "Trace: #{e.backtrace}"
393
+ )
369
394
  end
370
395
 
371
396
  def shutdown
372
397
  super
373
398
  # Export metrics on shutdown. This is a best-effort attempt, and it might
374
399
  # fail, for instance if there was a recent write to the same time series.
375
- @registry.export unless @registry.nil?
400
+ @registry&.export
376
401
  end
377
402
 
378
403
  # rubocop:disable Lint/UnusedMethodArgument
@@ -48,7 +48,7 @@ module Fluent
48
48
  def on_timer
49
49
  GC.start
50
50
  # Use Tempfile.create to open the file, in order to preserve the file.
51
- file = Tempfile.create(['heap-' + fluentd_worker_id.to_s + '-', '.json'])
51
+ file = Tempfile.create(["heap-#{fluentd_worker_id}-", '.json'])
52
52
  begin
53
53
  log.info 'dumping object space to',
54
54
  filepath: file.path,
@@ -15,13 +15,15 @@
15
15
  module Monitoring
16
16
  # Base class for the counter.
17
17
  class BaseCounter
18
- def increment(*)
18
+ def increment(by: 1, labels: {})
19
+ # No default behavior
19
20
  end
20
21
  end
21
22
 
22
23
  # Prometheus implementation of counters.
23
24
  class PrometheusCounter < BaseCounter
24
25
  def initialize(prometheus_counter)
26
+ super()
25
27
  @counter = prometheus_counter
26
28
  end
27
29
 
@@ -33,7 +35,9 @@ module Monitoring
33
35
  # OpenCensus implementation of counters.
34
36
  class OpenCensusCounter < BaseCounter
35
37
  def initialize(recorder, measure, translator)
38
+ super()
36
39
  raise ArgumentError, 'measure must not be nil' if measure.nil?
40
+
37
41
  @recorder = recorder
38
42
  @measure = measure
39
43
  @translator = translator
@@ -42,7 +46,8 @@ module Monitoring
42
46
  def increment(by: 1, labels: {})
43
47
  labels = @translator.translate_labels(labels)
44
48
  tag_map = OpenCensus::Tags::TagMap.new(
45
- labels.map { |k, v| [k.to_s, v.to_s] }.to_h)
49
+ labels.map { |k, v| [k.to_s, v.to_s] }.to_h
50
+ )
46
51
  @recorder.record(@measure.create_measurement(value: by, tags: tag_map))
47
52
  end
48
53
  end
@@ -50,6 +55,7 @@ module Monitoring
50
55
  # Base class for the monitoring registry.
51
56
  class BaseMonitoringRegistry
52
57
  def initialize(_project_id, _monitored_resource, _gcm_service_address)
58
+ # no default behavior
53
59
  end
54
60
 
55
61
  def counter(_name, _labels, _docstring, _prefix, _aggregation)
@@ -80,9 +86,9 @@ module Monitoring
80
86
  # labels in the metric constructor. The 'labels' field in
81
87
  # Prometheus client 0.9.0 has a different function and will not
82
88
  # work as intended.
83
- return PrometheusCounter.new(@registry.counter(name, docstring))
89
+ PrometheusCounter.new(@registry.counter(name, docstring))
84
90
  rescue Prometheus::Client::Registry::AlreadyRegisteredError
85
- return PrometheusCounter.new(@registry.get(name))
91
+ PrometheusCounter.new(@registry.get(name))
86
92
  end
87
93
  end
88
94
 
@@ -104,7 +110,8 @@ module Monitoring
104
110
  @exporters = {}
105
111
  @log.info(
106
112
  'monitoring module: Successfully initialized Open Census monitoring ' \
107
- 'registry.')
113
+ 'registry.'
114
+ )
108
115
  end
109
116
 
110
117
  def counter(name, labels, docstring, prefix, aggregation)
@@ -113,7 +120,8 @@ module Monitoring
113
120
  if measure.nil?
114
121
  @log.info(
115
122
  'monitoring module: Registering a new measure registry for ' \
116
- "#{translator.name}")
123
+ "#{translator.name}"
124
+ )
117
125
  measure = OpenCensus::Stats.create_measure_int(
118
126
  name: translator.name,
119
127
  unit: OpenCensus::Stats::Measure::UNIT_NONE,
@@ -123,7 +131,8 @@ module Monitoring
123
131
  unless @exporters.keys.include?(prefix)
124
132
  @log.info(
125
133
  'monitoring module: Registering a new exporter for ' \
126
- "#{prefix}")
134
+ "#{prefix}"
135
+ )
127
136
  @recorders[prefix] = OpenCensus::Stats::Recorder.new
128
137
  @exporters[prefix] = \
129
138
  OpenCensus::Stats::Exporters::Stackdriver.new(
@@ -135,13 +144,14 @@ module Monitoring
135
144
  )
136
145
  @log.info(
137
146
  'monitoring module: Registered recorders and exporters for ' \
138
- "#{prefix}.\n#{@exporters[prefix]}")
139
- end
140
- if aggregation == 'GAUGE'
141
- stats_aggregation = OpenCensus::Stats.create_last_value_aggregation
142
- else
143
- stats_aggregation = OpenCensus::Stats.create_sum_aggregation
147
+ "#{prefix}.\n#{@exporters[prefix]}"
148
+ )
144
149
  end
150
+ stats_aggregation = if aggregation == 'GAUGE'
151
+ OpenCensus::Stats.create_last_value_aggregation
152
+ else
153
+ OpenCensus::Stats.create_sum_aggregation
154
+ end
145
155
  @recorders[prefix].register_view(
146
156
  OpenCensus::Stats::View.new(
147
157
  name: translator.name,
@@ -154,20 +164,39 @@ module Monitoring
154
164
  counter = OpenCensusCounter.new(@recorders[prefix], measure, translator)
155
165
  @log.info(
156
166
  'monitoring module: Successfully initialized Open Census counter for ' \
157
- "#{prefix}/#{name}.")
167
+ "#{prefix}/#{name}."
168
+ )
158
169
  counter
159
170
  rescue StandardError => e
160
171
  @log.warn "Failed to count metrics for #{name}.", error: e
161
172
  raise e
162
173
  end
163
174
 
175
+ # Update timestamps for each existing AggregationData without altering tags
176
+ # or values.
177
+ # This is currently only used for config analysis metrics, because we want
178
+ # to repeatedly send the exact same metrics as created at start-up.
179
+ def update_timestamps(prefix)
180
+ new_time = Time.now.utc
181
+ recorder = @recorders[prefix]
182
+ recorder.views_data.each do |view_data|
183
+ view_data.data.each_value do |aggr_data|
184
+ # Apply this only to GAUGE metrics. This could fail if the metric uses
185
+ # Distribution or other fancier aggregators.
186
+ aggr_data.add aggr_data.value, new_time if aggr_data.is_a? OpenCensus::Stats::AggregationData::LastValue
187
+ end
188
+ end
189
+ end
190
+
164
191
  def export
165
192
  @log.debug(
166
- "monitoring module: Exporting metrics for #{@exporters.keys}.")
167
- @exporters.keys.each do |prefix|
193
+ "monitoring module: Exporting metrics for #{@exporters.keys}."
194
+ )
195
+ @exporters.each_key do |prefix|
168
196
  @log.debug(
169
197
  "monitoring module: Exporting metrics for #{prefix}. " \
170
- "#{@recorders[prefix].views_data}")
198
+ "#{@recorders[prefix].views_data}"
199
+ )
171
200
  @exporters[prefix].export @recorders[prefix].views_data
172
201
  end
173
202
  rescue StandardError => e
@@ -205,8 +234,7 @@ module Monitoring
205
234
  # Avoid this mechanism for new metrics by defining them in their final form,
206
235
  # so they don't need translation.
207
236
  class MetricTranslator
208
- attr_reader :name
209
- attr_reader :view_labels
237
+ attr_reader :name, :view_labels
210
238
 
211
239
  def initialize(name, metric_labels)
212
240
  @legacy = true
@@ -229,8 +257,9 @@ module Monitoring
229
257
 
230
258
  def translate_labels(labels)
231
259
  return labels unless @legacy
260
+
232
261
  translation = { code: :response_code, grpc: :grpc }
233
- labels.map { |k, v| [translation[k], v] }.to_h
262
+ labels.transform_keys { |k| translation[k] }
234
263
  end
235
264
  end
236
265
  end