logstash-core 6.0.1-java → 6.1.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/gemspec_jars.rb +1 -1
  3. data/lib/logstash-core/logstash-core.jar +0 -0
  4. data/lib/logstash-core/logstash-core.rb +14 -2
  5. data/lib/logstash-core_jars.rb +4 -2
  6. data/lib/logstash/agent.rb +8 -2
  7. data/lib/logstash/api/modules/node.rb +11 -5
  8. data/lib/logstash/api/modules/stats.rb +13 -7
  9. data/lib/logstash/compiler.rb +6 -10
  10. data/lib/logstash/compiler/lscl.rb +10 -1
  11. data/lib/logstash/compiler/lscl/helpers.rb +3 -1
  12. data/lib/logstash/config/mixin.rb +2 -2
  13. data/lib/logstash/environment.rb +1 -6
  14. data/lib/logstash/errors.rb +1 -1
  15. data/lib/logstash/event.rb +0 -2
  16. data/lib/logstash/filter_delegator.rb +1 -2
  17. data/lib/logstash/instrument/metric_type/counter.rb +1 -1
  18. data/lib/logstash/instrument/metric_type/gauge.rb +1 -1
  19. data/lib/logstash/instrument/wrapped_write_client.rb +1 -1
  20. data/lib/logstash/java_filter_delegator.rb +79 -0
  21. data/lib/logstash/java_pipeline.rb +690 -0
  22. data/lib/logstash/json.rb +4 -29
  23. data/lib/logstash/output_delegator.rb +3 -2
  24. data/lib/logstash/patches/bugfix_jruby_2558.rb +1 -1
  25. data/lib/logstash/pipeline.rb +32 -89
  26. data/lib/logstash/pipeline_action/create.rb +8 -2
  27. data/lib/logstash/pipeline_action/reload.rb +6 -1
  28. data/lib/logstash/pipeline_reporter.rb +2 -1
  29. data/lib/logstash/pipeline_settings.rb +1 -0
  30. data/lib/logstash/plugins/plugin_factory.rb +100 -0
  31. data/lib/logstash/plugins/registry.rb +18 -7
  32. data/lib/logstash/queue_factory.rb +3 -1
  33. data/lib/logstash/runner.rb +13 -56
  34. data/lib/logstash/settings.rb +2 -2
  35. data/lib/logstash/timestamp.rb +0 -1
  36. data/lib/logstash/util.rb +13 -21
  37. data/lib/logstash/util/java_version.rb +0 -1
  38. data/lib/logstash/util/settings_helper.rb +79 -0
  39. data/lib/logstash/util/{environment_variables.rb → substitution_variables.rb} +10 -8
  40. data/lib/logstash/util/wrapped_acked_queue.rb +17 -108
  41. data/lib/logstash/util/wrapped_synchronous_queue.rb +38 -178
  42. data/locales/en.yml +2 -0
  43. data/spec/conditionals_spec.rb +235 -80
  44. data/spec/logstash/api/modules/node_spec.rb +11 -0
  45. data/spec/logstash/compiler/compiler_spec.rb +28 -2
  46. data/spec/logstash/environment_spec.rb +0 -5
  47. data/spec/logstash/event_spec.rb +7 -2
  48. data/spec/logstash/filter_delegator_spec.rb +1 -1
  49. data/spec/logstash/filters/base_spec.rb +30 -28
  50. data/spec/logstash/instrument/wrapped_write_client_spec.rb +2 -2
  51. data/spec/logstash/java_filter_delegator_spec.rb +176 -0
  52. data/spec/logstash/java_pipeline_spec.rb +933 -0
  53. data/spec/logstash/json_spec.rb +27 -45
  54. data/spec/logstash/plugins/registry_spec.rb +7 -0
  55. data/spec/logstash/queue_factory_spec.rb +5 -2
  56. data/spec/logstash/settings_spec.rb +1 -1
  57. data/spec/logstash/util/java_version_spec.rb +1 -3
  58. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +27 -24
  59. data/spec/logstash/webserver_spec.rb +3 -6
  60. data/spec/support/helpers.rb +5 -0
  61. data/spec/support/pipeline/pipeline_helpers.rb +97 -0
  62. data/versions-gem-copy.yml +5 -2
  63. metadata +14 -5
  64. data/lib/logstash/patches/rubygems.rb +0 -38
@@ -1,36 +1,12 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/environment"
3
- require "logstash/errors"
4
- if LogStash::Environment.jruby?
5
- require "jrjackson"
6
- require "logstash/java_integration"
7
- else
8
- require "oj"
9
- end
3
+ require "jrjackson"
4
+ require "logstash/java_integration"
10
5
 
11
6
  module LogStash
12
7
  module Json
13
- class ParserError < LogStash::Error; end
14
- class GeneratorError < LogStash::Error; end
15
-
16
8
  extend self
17
9
 
18
- ### MRI
19
-
20
- def mri_load(data, options = {})
21
- Oj.load(data)
22
- rescue Oj::ParseError => e
23
- raise LogStash::Json::ParserError.new(e.message)
24
- end
25
-
26
- def mri_dump(o)
27
- Oj.dump(o, :mode => :compat, :use_to_json => true)
28
- rescue => e
29
- raise LogStash::Json::GeneratorError.new(e.message)
30
- end
31
-
32
- ### JRuby
33
-
34
10
  def jruby_load(data, options = {})
35
11
  # TODO [guyboertje] remove these comments in 5.0
36
12
  # options[:symbolize_keys] ? JrJackson::Raw.parse_sym(data) : JrJackson::Raw.parse_raw(data)
@@ -52,9 +28,8 @@ module LogStash
52
28
  raise LogStash::Json::GeneratorError.new(e.message)
53
29
  end
54
30
 
55
- prefix = LogStash::Environment.jruby? ? "jruby" : "mri"
56
- alias_method :load, "#{prefix}_load".to_sym
57
- alias_method :dump, "#{prefix}_dump".to_sym
31
+ alias_method :load, "jruby_load".to_sym
32
+ alias_method :dump, "jruby_dump".to_sym
58
33
 
59
34
  end
60
35
  end
@@ -44,11 +44,12 @@ module LogStash class OutputDelegator
44
44
  end
45
45
 
46
46
  def multi_receive(events)
47
- @in_counter.increment(events.length)
47
+ count = events.size
48
+ @in_counter.increment(count)
48
49
  start_time = java.lang.System.nano_time
49
50
  @strategy.multi_receive(events)
50
51
  @time_metric.increment((java.lang.System.nano_time - start_time) / 1_000_000)
51
- @out_counter.increment(events.length)
52
+ @out_counter.increment(count)
52
53
  end
53
54
 
54
55
  def do_close
@@ -1,7 +1,7 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/environment"
3
3
 
4
- if LogStash::Environment.windows? && LogStash::Environment.jruby?
4
+ if LogStash::Environment.windows?
5
5
  require "socket"
6
6
  module JRubyBug2558SocketPeerAddrBugFix
7
7
  def peeraddr(*args)
@@ -22,6 +22,7 @@ require "logstash/util/dead_letter_queue_manager"
22
22
  require "logstash/output_delegator"
23
23
  require "logstash/filter_delegator"
24
24
  require "logstash/queue_factory"
25
+ require "logstash/plugins/plugin_factory"
25
26
  require "logstash/compiler"
26
27
  require "logstash/execution_context"
27
28
  require "securerandom"
@@ -29,6 +30,7 @@ require "securerandom"
29
30
  java_import org.logstash.common.DeadLetterQueueFactory
30
31
  java_import org.logstash.common.SourceWithMetadata
31
32
  java_import org.logstash.common.io.DeadLetterQueueWriter
33
+ java_import org.logstash.config.ir.ConfigCompiler
32
34
 
33
35
  module LogStash; class BasePipeline
34
36
  include LogStash::Util::Loggable
@@ -46,16 +48,12 @@ module LogStash; class BasePipeline
46
48
  @settings = pipeline_config.settings
47
49
  @config_hash = Digest::SHA1.hexdigest(@config_str)
48
50
 
49
- @lir = compile_lir
50
-
51
- # Every time #plugin is invoked this is incremented to give each plugin
52
- # a unique id when auto-generating plugin ids
53
- @plugin_counter ||= 0
51
+ @lir = ConfigCompiler.configToPipelineIR(
52
+ @config_str, @settings.get_value("config.support_escapes")
53
+ )
54
54
 
55
55
  @pipeline_id = @settings.get_value("pipeline.id") || self.object_id
56
56
 
57
- # A list of plugins indexed by id
58
- @plugins_by_id = {}
59
57
  @inputs = nil
60
58
  @filters = nil
61
59
  @outputs = nil
@@ -63,6 +61,12 @@ module LogStash; class BasePipeline
63
61
 
64
62
  @dlq_writer = dlq_writer
65
63
 
64
+ @plugin_factory = LogStash::Plugins::PluginFactory.new(
65
+ # use NullMetric if called in the BasePipeline context otherwise use the @metric value
66
+ @lir, LogStash::Plugins::PluginMetricFactory.new(pipeline_id, @metric || Instrument::NullMetric.new),
67
+ @logger, LogStash::Plugins::ExecutionContextFactory.new(@agent, self, @dlq_writer),
68
+ FilterDelegator
69
+ )
66
70
  grammar = LogStashConfigParser.new
67
71
  parsed_config = grammar.parse(config_str)
68
72
  raise(ConfigurationError, grammar.failure_reason) if parsed_config.nil?
@@ -101,61 +105,13 @@ module LogStash; class BasePipeline
101
105
  end
102
106
 
103
107
  def compile_lir
104
- sources_with_metadata = [
105
- SourceWithMetadata.new("str", "pipeline", 0, 0, self.config_str)
106
- ]
107
- LogStash::Compiler.compile_sources(sources_with_metadata, @settings)
108
+ org.logstash.config.ir.ConfigCompiler.configToPipelineIR(
109
+ self.config_str, @settings.get_value("config.support_escapes")
110
+ )
108
111
  end
109
112
 
110
113
  def plugin(plugin_type, name, line, column, *args)
111
- @plugin_counter += 1
112
-
113
- # Collapse the array of arguments into a single merged hash
114
- args = args.reduce({}, &:merge)
115
-
116
- if plugin_type == "codec"
117
- id = SecureRandom.uuid # codecs don't really use their IDs for metrics, so we can use anything here
118
- else
119
- # Pull the ID from LIR to keep IDs consistent between the two representations
120
- id = lir.graph.vertices.filter do |v|
121
- v.source_with_metadata &&
122
- v.source_with_metadata.line == line &&
123
- v.source_with_metadata.column == column
124
- end.findFirst.get.id
125
- end
126
-
127
- args["id"] = id # some code pulls the id out of the args
128
-
129
- if !id
130
- raise ConfigurationError, "Could not determine ID for #{plugin_type}/#{plugin_name}"
131
- end
132
-
133
- raise ConfigurationError, "Two plugins have the id '#{id}', please fix this conflict" if @plugins_by_id[id]
134
- @plugins_by_id[id] = true
135
-
136
- # use NullMetric if called in the BasePipeline context otherwise use the @metric value
137
- metric = @metric || Instrument::NullMetric.new
138
-
139
- pipeline_scoped_metric = metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :plugins])
140
- # Scope plugins of type 'input' to 'inputs'
141
- type_scoped_metric = pipeline_scoped_metric.namespace("#{plugin_type}s".to_sym)
142
-
143
- klass = Plugin.lookup(plugin_type, name)
144
-
145
- execution_context = ExecutionContext.new(self, @agent, id, klass.config_name, @dlq_writer)
146
-
147
- if plugin_type == "output"
148
- OutputDelegator.new(@logger, klass, type_scoped_metric, execution_context, OutputDelegatorStrategyRegistry.instance, args)
149
- elsif plugin_type == "filter"
150
- FilterDelegator.new(@logger, klass, type_scoped_metric, execution_context, args)
151
- else # input
152
- input_plugin = klass.new(args)
153
- scoped_metric = type_scoped_metric.namespace(id.to_sym)
154
- scoped_metric.gauge(:name, input_plugin.config_name)
155
- input_plugin.metric = scoped_metric
156
- input_plugin.execution_context = execution_context
157
- input_plugin
158
- end
114
+ @plugin_factory.plugin(plugin_type, name, line, column, *args)
159
115
  end
160
116
 
161
117
  def reloadable?
@@ -241,7 +197,6 @@ module LogStash; class Pipeline < BasePipeline
241
197
  @ready = Concurrent::AtomicBoolean.new(false)
242
198
  @running = Concurrent::AtomicBoolean.new(false)
243
199
  @flushing = Concurrent::AtomicReference.new(false)
244
- @force_shutdown = Concurrent::AtomicBoolean.new(false)
245
200
  @outputs_registered = Concurrent::AtomicBoolean.new(false)
246
201
  end # def initialize
247
202
 
@@ -434,10 +389,11 @@ module LogStash; class Pipeline < BasePipeline
434
389
  end
435
390
 
436
391
  pipeline_workers.times do |t|
437
- @worker_threads << Thread.new do
438
- Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
439
- worker_loop(batch_size, batch_delay)
392
+ thread = Thread.new(batch_size, batch_delay, self) do |_b_size, _b_delay, _pipeline|
393
+ _pipeline.worker_loop(_b_size, _b_delay)
440
394
  end
395
+ thread.name="[#{pipeline_id}]>worker#{t}"
396
+ @worker_threads << thread
441
397
  end
442
398
 
443
399
  # inputs should be started last, after all workers
@@ -466,25 +422,24 @@ module LogStash; class Pipeline < BasePipeline
466
422
  shutdown_requested = false
467
423
 
468
424
  @filter_queue_client.set_batch_dimensions(batch_size, batch_delay)
469
-
425
+ output_events_map = Hash.new { |h, k| h[k] = [] }
470
426
  while true
471
427
  signal = @signal_queue.poll || NO_SIGNAL
472
428
  shutdown_requested |= signal.shutdown? # latch on shutdown signal
473
429
 
474
430
  batch = @filter_queue_client.read_batch # metrics are started in read_batch
475
- if batch.size > 0
476
- @events_consumed.increment(batch.size)
431
+ batch_size = batch.size
432
+ if batch_size > 0
433
+ @events_consumed.increment(batch_size)
477
434
  filter_batch(batch)
478
435
  end
479
436
  flush_filters_to_batch(batch, :final => false) if signal.flush?
480
437
  if batch.size > 0
481
- output_batch(batch)
482
- unless @force_shutdown.true? # ack the current batch
483
- @filter_queue_client.close_batch(batch)
484
- end
438
+ output_batch(batch, output_events_map)
439
+ @filter_queue_client.close_batch(batch)
485
440
  end
486
441
  # keep break at end of loop, after the read_batch operation, some pipeline specs rely on this "final read_batch" before shutdown.
487
- break if (shutdown_requested && !draining_queue?) || @force_shutdown.true?
442
+ break if (shutdown_requested && !draining_queue?)
488
443
  end
489
444
 
490
445
  # we are shutting down, queue is drained if it was required, now perform a final flush.
@@ -492,8 +447,7 @@ module LogStash; class Pipeline < BasePipeline
492
447
  batch = @filter_queue_client.new_batch
493
448
  @filter_queue_client.start_metrics(batch) # explicitly call start_metrics since we dont do a read_batch here
494
449
  flush_filters_to_batch(batch, :final => true)
495
- return if @force_shutdown.true? # Do not ack the current batch
496
- output_batch(batch)
450
+ output_batch(batch, output_events_map)
497
451
  @filter_queue_client.close_batch(batch)
498
452
  end
499
453
 
@@ -502,7 +456,7 @@ module LogStash; class Pipeline < BasePipeline
502
456
  #these are both original and generated events
503
457
  batch.merge(e) unless e.cancelled?
504
458
  end
505
- @filter_queue_client.add_filtered_metrics(batch)
459
+ @filter_queue_client.add_filtered_metrics(batch.filtered_size)
506
460
  @events_filtered.increment(batch.size)
507
461
  rescue Exception => e
508
462
  # Plugins authors should manage their own exceptions in the plugin code
@@ -518,27 +472,23 @@ module LogStash; class Pipeline < BasePipeline
518
472
  end
519
473
 
520
474
  # Take an array of events and send them to the correct output
521
- def output_batch(batch)
475
+ def output_batch(batch, output_events_map)
522
476
  # Build a mapping of { output_plugin => [events...]}
523
- output_events_map = Hash.new { |h, k| h[k] = [] }
524
477
  batch.each do |event|
525
478
  # We ask the AST to tell us which outputs to send each event to
526
479
  # Then, we stick it in the correct bin
527
-
528
- # output_func should never return anything other than an Array but we have lots of legacy specs
529
- # that monkeypatch it and return nil. We can deprecate "|| []" after fixing these specs
530
- (output_func(event) || []).each do |output|
480
+ output_func(event).each do |output|
531
481
  output_events_map[output].push(event)
532
482
  end
533
483
  end
534
484
  # Now that we have our output to event mapping we can just invoke each output
535
485
  # once with its list of events
536
486
  output_events_map.each do |output, events|
537
- return if @force_shutdown.true?
538
487
  output.multi_receive(events)
488
+ events.clear
539
489
  end
540
490
 
541
- @filter_queue_client.add_output_metrics(batch)
491
+ @filter_queue_client.add_output_metrics(batch.filtered_size)
542
492
  end
543
493
 
544
494
  def wait_inputs
@@ -618,10 +568,6 @@ module LogStash; class Pipeline < BasePipeline
618
568
  @logger.info("Pipeline terminated", "pipeline.id" => @pipeline_id)
619
569
  end # def shutdown
620
570
 
621
- def force_shutdown!
622
- @force_shutdown.make_true
623
- end
624
-
625
571
  def wait_for_workers
626
572
  @logger.debug("Closing inputs", default_logging_keys)
627
573
  @worker_threads.map(&:join)
@@ -668,7 +614,6 @@ module LogStash; class Pipeline < BasePipeline
668
614
  flushers = options[:final] ? @shutdown_flushers : @periodic_flushers
669
615
 
670
616
  flushers.each do |flusher|
671
- return if @force_shutdown.true?
672
617
  flusher.call(options, &block)
673
618
  end
674
619
  end
@@ -710,8 +655,6 @@ module LogStash; class Pipeline < BasePipeline
710
655
  # @param options [Hash]
711
656
  def flush_filters_to_batch(batch, options = {})
712
657
  flush_filters(options) do |event|
713
- return if @force_shutdown.true?
714
-
715
658
  unless event.cancelled?
716
659
  @logger.debug? and @logger.debug("Pushing flushed events", default_logging_keys(:event => event))
717
660
  batch.merge(event)
@@ -1,6 +1,7 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/pipeline_action/base"
3
3
  require "logstash/pipeline"
4
+ require "logstash/java_pipeline"
4
5
  require "logstash/converge_result"
5
6
  require "logstash/util/loggable"
6
7
 
@@ -32,8 +33,13 @@ module LogStash module PipelineAction
32
33
  # The execute assume that the thread safety access of the pipeline
33
34
  # is managed by the caller.
34
35
  def execute(agent, pipelines)
35
- pipeline = LogStash::Pipeline.new(@pipeline_config, @metric, agent)
36
-
36
+ pipeline =
37
+ if @pipeline_config.settings.get_value("pipeline.java_execution")
38
+ LogStash::JavaPipeline.new(@pipeline_config, @metric, agent)
39
+ else
40
+ LogStash::Pipeline.new(@pipeline_config, @metric, agent)
41
+ end
42
+
37
43
  status = pipeline.start # block until the pipeline is correctly started or crashed
38
44
 
39
45
  if status
@@ -27,7 +27,12 @@ module LogStash module PipelineAction
27
27
  end
28
28
 
29
29
  begin
30
- pipeline_validator = LogStash::BasePipeline.new(@pipeline_config)
30
+ pipeline_validator =
31
+ if @pipeline_config.settings.get_value("pipeline.java_execution")
32
+ LogStash::JavaBasePipeline.new(@pipeline_config)
33
+ else
34
+ LogStash::BasePipeline.new(@pipeline_config)
35
+ end
31
36
  rescue => e
32
37
  return LogStash::ConvergeResult::FailedAction.from_exception(e)
33
38
  end
@@ -87,7 +87,8 @@ module LogStash; class PipelineReporter
87
87
  def worker_states(batch_map)
88
88
  pipeline.worker_threads.map.with_index do |thread, idx|
89
89
  status = thread.status || "dead"
90
- inflight_count = batch_map[thread] ? batch_map[thread].size : 0
90
+ batch = batch_map[thread]
91
+ inflight_count = batch ? batch.size : 0
91
92
  {
92
93
  :status => status,
93
94
  :alive => thread.alive?,
@@ -14,6 +14,7 @@ module LogStash
14
14
  "dead_letter_queue.enable",
15
15
  "dead_letter_queue.max_bytes",
16
16
  "metric.collect",
17
+ "pipeline.java_execution",
17
18
  "path.config",
18
19
  "path.dead_letter_queue",
19
20
  "path.queue",
@@ -0,0 +1,100 @@
1
+ # encoding: utf-8
2
+
3
+ module LogStash
4
+ module Plugins
5
+
6
+ class ExecutionContextFactory
7
+
8
+ def initialize(agent, pipeline, dlq_writer)
9
+ @agent = agent
10
+ @pipeline = pipeline
11
+ @dlq_writer = dlq_writer
12
+ end
13
+
14
+ def create(id, klass_cfg_name)
15
+ ExecutionContext.new(@pipeline, @agent, id, klass_cfg_name, @dlq_writer)
16
+ end
17
+ end
18
+
19
+ class PluginMetricFactory
20
+
21
+ def initialize(pipeline_id, metric)
22
+ @pipeline_id = pipeline_id.to_s.to_sym
23
+ @metric = metric
24
+ end
25
+
26
+ def create(plugin_type)
27
+ @metric.namespace([:stats, :pipelines, @pipeline_id, :plugins])
28
+ .namespace("#{plugin_type}s".to_sym)
29
+ end
30
+ end
31
+
32
+ class PluginFactory
33
+ include org.logstash.config.ir.compiler.RubyIntegration::PluginFactory
34
+
35
+ def initialize(lir, metric_factory, logger, exec_factory, filter_class)
36
+ @lir = lir
37
+ @plugins_by_id = {}
38
+ @metric_factory = metric_factory
39
+ @logger = logger
40
+ @exec_factory = exec_factory
41
+ @filter_class = filter_class
42
+ end
43
+
44
+ def buildOutput(name, line, column, *args)
45
+ plugin("output", name, line, column, *args)
46
+ end
47
+
48
+ def buildFilter(name, line, column, *args)
49
+ plugin("filter", name, line, column, *args)
50
+ end
51
+
52
+ def buildInput(name, line, column, *args)
53
+ plugin("input", name, line, column, *args)
54
+ end
55
+
56
+ def buildCodec(name, *args)
57
+ plugin("codec", name, 0, 0, *args)
58
+ end
59
+
60
+ def plugin(plugin_type, name, line, column, *args)
61
+ # Collapse the array of arguments into a single merged hash
62
+ args = args.reduce({}, &:merge)
63
+
64
+ if plugin_type == "codec"
65
+ id = SecureRandom.uuid # codecs don't really use their IDs for metrics, so we can use anything here
66
+ else
67
+ # Pull the ID from LIR to keep IDs consistent between the two representations
68
+ id = @lir.graph.vertices.filter do |v|
69
+ v.source_with_metadata &&
70
+ v.source_with_metadata.line == line &&
71
+ v.source_with_metadata.column == column
72
+ end.findFirst.get.id
73
+ end
74
+ args["id"] = id # some code pulls the id out of the args
75
+
76
+ raise ConfigurationError, "Could not determine ID for #{plugin_type}/#{plugin_name}" unless id
77
+ raise ConfigurationError, "Two plugins have the id '#{id}', please fix this conflict" if @plugins_by_id[id]
78
+
79
+ @plugins_by_id[id] = true
80
+ # Scope plugins of type 'input' to 'inputs'
81
+ type_scoped_metric = @metric_factory.create(plugin_type)
82
+ klass = Plugin.lookup(plugin_type, name)
83
+ execution_context = @exec_factory.create(id, klass.config_name)
84
+
85
+ if plugin_type == "output"
86
+ OutputDelegator.new(@logger, klass, type_scoped_metric, execution_context, OutputDelegatorStrategyRegistry.instance, args)
87
+ elsif plugin_type == "filter"
88
+ @filter_class.new(@logger, klass, type_scoped_metric, execution_context, args)
89
+ else # input or codec plugin
90
+ plugin_instance = klass.new(args)
91
+ scoped_metric = type_scoped_metric.namespace(id.to_sym)
92
+ scoped_metric.gauge(:name, plugin_instance.config_name)
93
+ plugin_instance.metric = scoped_metric
94
+ plugin_instance.execution_context = execution_context
95
+ plugin_instance
96
+ end
97
+ end
98
+ end
99
+ end
100
+ end