logstash-core 5.3.3-java → 5.4.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (85) hide show
  1. checksums.yaml +4 -4
  2. data/gemspec_jars.rb +2 -0
  3. data/lib/logstash-core/logstash-core.jar +0 -0
  4. data/lib/logstash-core/version.rb +1 -1
  5. data/lib/logstash-core_jars.rb +4 -0
  6. data/lib/logstash/agent.rb +15 -6
  7. data/lib/logstash/api/modules/base.rb +1 -1
  8. data/lib/logstash/api/rack_app.rb +1 -1
  9. data/lib/logstash/config/config_ast.rb +13 -13
  10. data/lib/logstash/config/mixin.rb +33 -28
  11. data/lib/logstash/environment.rb +11 -0
  12. data/lib/logstash/event.rb +56 -0
  13. data/lib/logstash/event_dispatcher.rb +2 -2
  14. data/lib/logstash/execution_context.rb +10 -0
  15. data/lib/logstash/filter_delegator.rb +3 -2
  16. data/lib/logstash/inputs/base.rb +15 -1
  17. data/lib/logstash/instrument/collector.rb +1 -1
  18. data/lib/logstash/instrument/metric.rb +4 -2
  19. data/lib/logstash/instrument/metric_store.rb +9 -5
  20. data/lib/logstash/instrument/null_metric.rb +1 -0
  21. data/lib/logstash/instrument/periodic_poller/cgroup.rb +3 -3
  22. data/lib/logstash/instrument/periodic_poller/jvm.rb +11 -8
  23. data/lib/logstash/instrument/periodic_poller/load_average.rb +4 -2
  24. data/lib/logstash/instrument/wrapped_write_client.rb +59 -0
  25. data/lib/logstash/java_integration.rb +2 -2
  26. data/lib/logstash/output_delegator.rb +2 -2
  27. data/lib/logstash/output_delegator_strategies/legacy.rb +5 -2
  28. data/lib/logstash/output_delegator_strategies/shared.rb +2 -1
  29. data/lib/logstash/output_delegator_strategies/single.rb +2 -1
  30. data/lib/logstash/outputs/base.rb +8 -0
  31. data/lib/logstash/patches/cabin.rb +1 -1
  32. data/lib/logstash/patches/stronger_openssl_defaults.rb +1 -1
  33. data/lib/logstash/pipeline.rb +47 -19
  34. data/lib/logstash/plugin.rb +3 -1
  35. data/lib/logstash/plugins/hooks_registry.rb +6 -6
  36. data/lib/logstash/plugins/registry.rb +2 -2
  37. data/lib/logstash/queue_factory.rb +7 -5
  38. data/lib/logstash/runner.rb +15 -1
  39. data/lib/logstash/settings.rb +14 -2
  40. data/lib/logstash/string_interpolation.rb +18 -0
  41. data/lib/logstash/timestamp.rb +27 -0
  42. data/lib/logstash/util.rb +1 -1
  43. data/lib/logstash/util/prctl.rb +1 -1
  44. data/lib/logstash/util/retryable.rb +1 -1
  45. data/lib/logstash/util/wrapped_acked_queue.rb +53 -22
  46. data/lib/logstash/util/wrapped_synchronous_queue.rb +51 -33
  47. data/lib/logstash/version.rb +1 -1
  48. data/locales/en.yml +4 -2
  49. data/logstash-core.gemspec +0 -3
  50. data/spec/api/lib/api/node_stats_spec.rb +2 -1
  51. data/spec/api/spec_helper.rb +1 -1
  52. data/spec/logstash/acked_queue_concurrent_stress_spec.rb +291 -0
  53. data/spec/logstash/agent_spec.rb +24 -0
  54. data/spec/logstash/config/mixin_spec.rb +11 -2
  55. data/spec/logstash/event_dispatcher_spec.rb +8 -1
  56. data/spec/logstash/event_spec.rb +346 -0
  57. data/spec/logstash/execution_context_spec.rb +13 -0
  58. data/spec/logstash/filter_delegator_spec.rb +4 -2
  59. data/spec/logstash/inputs/base_spec.rb +41 -0
  60. data/spec/logstash/instrument/metric_spec.rb +2 -1
  61. data/spec/logstash/instrument/metric_store_spec.rb +14 -0
  62. data/spec/logstash/instrument/namespaced_metric_spec.rb +2 -1
  63. data/spec/logstash/instrument/periodic_poller/cgroup_spec.rb +1 -1
  64. data/spec/logstash/instrument/periodic_poller/jvm_spec.rb +35 -0
  65. data/spec/logstash/instrument/periodic_poller/load_average_spec.rb +1 -5
  66. data/spec/logstash/instrument/wrapped_write_client_spec.rb +113 -0
  67. data/spec/logstash/json_spec.rb +1 -1
  68. data/spec/logstash/legacy_ruby_event_spec.rb +636 -0
  69. data/spec/logstash/legacy_ruby_timestamp_spec.rb +170 -0
  70. data/spec/logstash/output_delegator_spec.rb +6 -3
  71. data/spec/logstash/outputs/base_spec.rb +23 -0
  72. data/spec/logstash/pipeline_pq_file_spec.rb +18 -8
  73. data/spec/logstash/pipeline_spec.rb +41 -5
  74. data/spec/logstash/plugin_spec.rb +15 -3
  75. data/spec/logstash/plugins/hooks_registry_spec.rb +2 -2
  76. data/spec/logstash/runner_spec.rb +33 -2
  77. data/spec/logstash/settings/port_range_spec.rb +1 -1
  78. data/spec/logstash/settings_spec.rb +21 -0
  79. data/spec/logstash/timestamp_spec.rb +29 -0
  80. data/spec/logstash/util/accessors_spec.rb +179 -0
  81. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +4 -11
  82. data/spec/logstash/util_spec.rb +1 -1
  83. data/spec/logstash/webserver_spec.rb +1 -1
  84. data/spec/support/mocks_classes.rb +65 -53
  85. metadata +25 -30
@@ -0,0 +1,10 @@
1
+ # encoding: utf-8
2
+ module LogStash
3
+ class ExecutionContext
4
+ attr_reader :pipeline_id
5
+
6
+ def initialize(pipeline_id)
7
+ @pipeline_id = pipeline_id
8
+ end
9
+ end
10
+ end
@@ -14,7 +14,7 @@ module LogStash
14
14
  ]
15
15
  def_delegators :@filter, *DELEGATED_METHODS
16
16
 
17
- def initialize(logger, klass, metric, plugin_args)
17
+ def initialize(logger, klass, metric, execution_context, plugin_args)
18
18
  @logger = logger
19
19
  @klass = klass
20
20
  @id = plugin_args["id"]
@@ -23,6 +23,7 @@ module LogStash
23
23
  # Scope the metrics to the plugin
24
24
  namespaced_metric = metric.namespace(@id.to_sym)
25
25
  @filter.metric = namespaced_metric
26
+ @filter.execution_context = execution_context
26
27
 
27
28
  @metric_events = namespaced_metric.namespace(:events)
28
29
  namespaced_metric.gauge(:name, config_name)
@@ -42,7 +43,7 @@ module LogStash
42
43
  new_events = @filter.multi_filter(events)
43
44
  clock.stop
44
45
 
45
- # There is no garantee in the context of filter
46
+ # There is no guarantee in the context of filter
46
47
  # that EVENTS_INT == EVENTS_OUT, see the aggregates and
47
48
  # the split filter
48
49
  c = new_events.count { |event| !event.cancelled? }
@@ -89,11 +89,25 @@ class LogStash::Inputs::Base < LogStash::Plugin
89
89
  stop
90
90
  end
91
91
 
92
- # stop? should never be overriden
92
+ # stop? should never be overridden
93
93
  public
94
94
  def stop?
95
95
  @stop_called.value
96
96
  end
97
+
98
+ def clone
99
+ cloned = super
100
+ cloned.codec = @codec.clone if @codec
101
+ cloned
102
+ end
103
+
104
+ def execution_context=(context)
105
+ super
106
+ # There is no easy way to propage an instance variable into the codec, because the codec
107
+ # are created at the class level
108
+ @codec.execution_context = context
109
+ context
110
+ end
97
111
 
98
112
  protected
99
113
  def decorate(event)
@@ -29,7 +29,7 @@ module LogStash module Instrument
29
29
  # of update the metric
30
30
  #
31
31
  # If there is a problem with the key or the type of metric we will record an error
32
- # but we wont stop processing events, theses errors are not considered fatal.
32
+ # but we won't stop processing events, theses errors are not considered fatal.
33
33
  #
34
34
  def push(namespaces_path, key, type, *metric_type_params)
35
35
  begin
@@ -77,7 +77,7 @@ module LogStash module Instrument
77
77
  private
78
78
  # Allow to calculate the execution of a block of code.
79
79
  # This class support 2 differents syntax a block or the return of
80
- # the object itself, but in the later case the metric wont be recorded
80
+ # the object itself, but in the later case the metric won't be recorded
81
81
  # Until we call `#stop`.
82
82
  #
83
83
  # @see LogStash::Instrument::Metric#time
@@ -96,7 +96,9 @@ module LogStash module Instrument
96
96
  end
97
97
 
98
98
  def stop
99
- @metric.report_time(@namespace, @key, (MILLISECONDS * (Time.now - @start_time)).to_i)
99
+ execution_time = (MILLISECONDS * (Time.now - @start_time)).to_i
100
+ @metric.report_time(@namespace, @key, execution_time)
101
+ execution_time
100
102
  end
101
103
  end
102
104
  end
@@ -52,7 +52,7 @@ module LogStash module Instrument
52
52
  # BUT. If the value is not present in the `@fast_lookup` the value will be inserted and
53
53
  # `#puf_if_absent` will return nil. With this returned value of nil we assume that we don't
54
54
  # have it in the `@metric_store` for structured search so we add it there too.
55
- if found_value = @fast_lookup.put_if_absent([namespaces, key], provided_value)
55
+ if found_value = @fast_lookup.put_if_absent(namespaces.dup << key, provided_value)
56
56
  return found_value
57
57
  else
58
58
  @structured_lookup_mutex.synchronize do
@@ -73,7 +73,7 @@ module LogStash module Instrument
73
73
  # If you use the `,` on a key the metric store will return the both values at that level
74
74
  #
75
75
  # The returned hash will keep the same structure as it had in the `Concurrent::Map`
76
- # but will be a normal ruby hash. This will allow the api to easily seriliaze the content
76
+ # but will be a normal ruby hash. This will allow the api to easily serialize the content
77
77
  # of the map
78
78
  #
79
79
  # @param [Array] The path where values should be located
@@ -131,7 +131,7 @@ module LogStash module Instrument
131
131
  # }
132
132
  def extract_metrics(path, *keys)
133
133
  keys.reduce({}) do |acc,k|
134
- # Simplifiy 1-length keys
134
+ # Simplify 1-length keys
135
135
  k = k.first if k.is_a?(Array) && k.size == 1
136
136
 
137
137
  # If we have array values here we need to recurse
@@ -162,6 +162,10 @@ module LogStash module Instrument
162
162
  end
163
163
  end
164
164
 
165
+ def has_metric?(*path)
166
+ @fast_lookup[path]
167
+ end
168
+
165
169
  # Return all the individuals Metric,
166
170
  # This call mimic a Enum's each if a block is provided
167
171
  #
@@ -179,9 +183,9 @@ module LogStash module Instrument
179
183
  alias_method :all, :each
180
184
 
181
185
  def prune(path)
182
- key_paths = key_paths(path).map {|k| k.to_sym }
186
+ key_paths = key_paths(path).map(&:to_sym)
183
187
  @structured_lookup_mutex.synchronize do
184
- keys_to_delete = @fast_lookup.keys.select {|namespace, _| (key_paths - namespace).empty? }
188
+ keys_to_delete = @fast_lookup.keys.select {|namespace| (key_paths - namespace[0..-2]).empty? }
185
189
  keys_to_delete.each {|k| @fast_lookup.delete(k) }
186
190
  delete_from_map(@store, key_paths)
187
191
  end
@@ -54,6 +54,7 @@ module LogStash module Instrument
54
54
  # @see LogStash::Instrument::TimedExecution`
55
55
  class NullTimedExecution
56
56
  def self.stop
57
+ 0
57
58
  end
58
59
  end
59
60
  end
@@ -9,7 +9,7 @@ module LogStash module Instrument module PeriodicPoller
9
9
  include LogStash::Util::Loggable
10
10
 
11
11
  CONTROL_GROUP_RE = Regexp.compile("\\d+:([^:,]+(?:,[^:,]+)?):(/.*)");
12
- CONTROLLER_SEPERATOR_RE = ","
12
+ CONTROLLER_SEPARATOR_RE = ","
13
13
 
14
14
  PROC_SELF_CGROUP_FILE = Pathname.new("/proc/self/cgroup")
15
15
  PROC_CGROUP_CPU_DIR = Pathname.new("/sys/fs/cgroup/cpu")
@@ -36,8 +36,8 @@ module LogStash module Instrument module PeriodicPoller
36
36
 
37
37
  read_proc_self_cgroup_lines.each do |line|
38
38
  matches = CONTROL_GROUP_RE.match(line)
39
- # multiples controlles, same hierachy
40
- controllers = matches[1].split(CONTROLLER_SEPERATOR_RE)
39
+ # multiples controls, same hierarchy
40
+ controllers = matches[1].split(CONTROLLER_SEPARATOR_RE)
41
41
  controllers.each_with_object(response) { |controller| response[controller] = matches[2] }
42
42
  end
43
43
 
@@ -34,6 +34,13 @@ module LogStash module Instrument module PeriodicPoller
34
34
  end
35
35
  end
36
36
 
37
+ MEMORY_TRANSPOSE_MAP = {
38
+ "usage.used" => :used_in_bytes,
39
+ "usage.committed" => :committed_in_bytes,
40
+ "usage.max" => :max_in_bytes,
41
+ "peak.max" => :peak_max_in_bytes,
42
+ "peak.used" => :peak_used_in_bytes
43
+ }
37
44
 
38
45
  attr_reader :metric
39
46
 
@@ -52,8 +59,6 @@ module LogStash module Instrument module PeriodicPoller
52
59
  collect_load_average
53
60
  end
54
61
 
55
- private
56
-
57
62
  def collect_gc_stats
58
63
  garbage_collectors = ManagementFactory.getGarbageCollectorMXBeans()
59
64
 
@@ -141,7 +146,6 @@ module LogStash module Instrument module PeriodicPoller
141
146
  end
142
147
  end
143
148
 
144
-
145
149
  def build_pools_metrics(data)
146
150
  heap = data["heap"]
147
151
  old = {}
@@ -164,11 +168,10 @@ module LogStash module Instrument module PeriodicPoller
164
168
  collection.reduce(default_information_accumulator) do |m,e|
165
169
  e = { e[0] => e[1] } if e.is_a?(Array)
166
170
  e.each_pair do |k,v|
167
- m[:used_in_bytes] += v if k.include?("used")
168
- m[:committed_in_bytes] += v if k.include?("committed")
169
- m[:max_in_bytes] += v if k.include?("max")
170
- m[:peak_max_in_bytes] += v if k.include?("peak.max")
171
- m[:peak_used_in_bytes] += v if k.include?("peak.used")
171
+ if MEMORY_TRANSPOSE_MAP.include?(k)
172
+ transpose_key = MEMORY_TRANSPOSE_MAP[k]
173
+ m[transpose_key] += v
174
+ end
172
175
  end
173
176
  m
174
177
  end
@@ -1,4 +1,6 @@
1
1
  # encoding: utf-8
2
+ java_import "java.lang.management.ManagementFactory"
3
+
2
4
  module LogStash module Instrument module PeriodicPoller
3
5
  class LoadAverage
4
6
  class Windows
@@ -11,8 +13,8 @@ module LogStash module Instrument module PeriodicPoller
11
13
  LOAD_AVG_FILE = "/proc/loadavg"
12
14
  TOKEN_SEPARATOR = " "
13
15
 
14
- def self.get
15
- load_average = ::File.read(LOAD_AVG_FILE).chomp.split(TOKEN_SEPARATOR)
16
+ def self.get(content = ::File.read(LOAD_AVG_FILE))
17
+ load_average = content.chomp.split(TOKEN_SEPARATOR)
16
18
 
17
19
  {
18
20
  :"1m" => load_average[0].to_f,
@@ -0,0 +1,59 @@
1
+ # encoding: utf-8
2
+ module LogStash module Instrument
3
+ class WrappedWriteClient
4
+ def initialize(write_client, pipeline, metric, plugin)
5
+ @write_client = write_client
6
+
7
+ pipeline_id = pipeline.pipeline_id.to_s.to_sym
8
+ plugin_type = "#{plugin.class.plugin_type}s".to_sym
9
+
10
+ @events_metrics = metric.namespace([:stats, :events])
11
+ @pipeline_metrics = metric.namespace([:stats, :pipelines, pipeline_id, :events])
12
+ @plugin_events_metrics = metric.namespace([:stats, :pipelines, pipeline_id, :plugins, plugin_type, plugin.id.to_sym, :events])
13
+
14
+ define_initial_metrics_values
15
+ end
16
+
17
+ def get_new_batch
18
+ @write_client.get_new_batch
19
+ end
20
+
21
+ def push(event)
22
+ record_metric { @write_client.push(event) }
23
+ end
24
+ alias_method(:<<, :push)
25
+
26
+ def push_batch(batch)
27
+ record_metric(batch.size) { @write_client.push_batch(batch) }
28
+ end
29
+
30
+ private
31
+ def record_metric(size = 1)
32
+ @events_metrics.increment(:in, size)
33
+ @pipeline_metrics.increment(:in, size)
34
+ @plugin_events_metrics.increment(:out, size)
35
+
36
+ clock = @events_metrics.time(:queue_push_duration_in_millis)
37
+
38
+ result = yield
39
+
40
+ # Reuse the same values for all the endpoints to make sure we don't have skew in times.
41
+ execution_time = clock.stop
42
+
43
+ @pipeline_metrics.report_time(:queue_push_duration_in_millis, execution_time)
44
+ @plugin_events_metrics.report_time(:queue_push_duration_in_millis, execution_time)
45
+
46
+ result
47
+ end
48
+
49
+ def define_initial_metrics_values
50
+ @events_metrics.increment(:in, 0)
51
+ @pipeline_metrics.increment(:in, 0)
52
+ @plugin_events_metrics.increment(:out, 0)
53
+
54
+ @events_metrics.report_time(:queue_push_duration_in_millis, 0)
55
+ @pipeline_metrics.report_time(:queue_push_duration_in_millis, 0)
56
+ @plugin_events_metrics.report_time(:queue_push_duration_in_millis, 0)
57
+ end
58
+ end
59
+ end end
@@ -1,7 +1,7 @@
1
1
  # encoding: utf-8
2
2
  require "java"
3
3
 
4
- # this is mainly for usage with JrJackson json parsing in :raw mode which genenerates
4
+ # this is mainly for usage with JrJackson json parsing in :raw mode which generates
5
5
  # Java::JavaUtil::ArrayList and Java::JavaUtil::LinkedHashMap native objects for speed.
6
6
  # these object already quacks like their Ruby equivalents Array and Hash but they will
7
7
  # not test for is_a?(Array) or is_a?(Hash) and we do not want to include tests for
@@ -35,7 +35,7 @@ map_mixin = lambda do
35
35
  # this bug makes has_key? (and all its aliases) return false for a key that has a nil value.
36
36
  # Only LinkedHashMap is patched here because patching the Map interface is not working.
37
37
  # TODO find proper fix, and submit upstream
38
- # releavant JRuby files:
38
+ # relevant JRuby files:
39
39
  # https://github.com/jruby/jruby/blob/master/core/src/main/ruby/jruby/java/java_ext/java.util.rb
40
40
  # https://github.com/jruby/jruby/blob/master/core/src/main/java/org/jruby/java/proxies/MapJavaProxy.java
41
41
  def has_key?(key)
@@ -7,7 +7,7 @@ require "logstash/output_delegator_strategies/legacy"
7
7
  module LogStash class OutputDelegator
8
8
  attr_reader :metric, :metric_events, :strategy, :namespaced_metric, :metric_events, :id
9
9
 
10
- def initialize(logger, output_class, metric, strategy_registry, plugin_args)
10
+ def initialize(logger, output_class, metric, execution_context, strategy_registry, plugin_args)
11
11
  @logger = logger
12
12
  @output_class = output_class
13
13
  @metric = metric
@@ -22,7 +22,7 @@ module LogStash class OutputDelegator
22
22
 
23
23
  @strategy = strategy_registry.
24
24
  class_for(self.concurrency).
25
- new(@logger, @output_class, @namespaced_metric, plugin_args)
25
+ new(@logger, @output_class, @namespaced_metric, execution_context, plugin_args)
26
26
  end
27
27
 
28
28
  def config_name
@@ -2,10 +2,13 @@
2
2
  module LogStash module OutputDelegatorStrategies class Legacy
3
3
  attr_reader :worker_count, :workers
4
4
 
5
- def initialize(logger, klass, metric, plugin_args)
5
+ def initialize(logger, klass, metric, execution_context, plugin_args)
6
6
  @worker_count = (plugin_args["workers"] || 1).to_i
7
7
  @workers = @worker_count.times.map { klass.new(plugin_args) }
8
- @workers.each {|w| w.metric = metric }
8
+ @workers.each do |w|
9
+ w.metric = metric
10
+ w.execution_context = execution_context
11
+ end
9
12
  @worker_queue = SizedQueue.new(@worker_count)
10
13
  @workers.each {|w| @worker_queue << w}
11
14
  end
@@ -1,7 +1,8 @@
1
1
  module LogStash module OutputDelegatorStrategies class Shared
2
- def initialize(logger, klass, metric, plugin_args)
2
+ def initialize(logger, klass, metric, execution_context, plugin_args)
3
3
  @output = klass.new(plugin_args)
4
4
  @output.metric = metric
5
+ @output.execution_context = execution_context
5
6
  end
6
7
 
7
8
  def register
@@ -1,7 +1,8 @@
1
1
  module LogStash module OutputDelegatorStrategies class Single
2
- def initialize(logger, klass, metric, plugin_args)
2
+ def initialize(logger, klass, metric, execution_context, plugin_args)
3
3
  @output = klass.new(plugin_args)
4
4
  @output.metric = metric
5
+ @output.execution_context = execution_context
5
6
  @mutex = Mutex.new
6
7
  end
7
8
 
@@ -105,6 +105,14 @@ class LogStash::Outputs::Base < LogStash::Plugin
105
105
  self.class.concurrency
106
106
  end
107
107
 
108
+ def execution_context=(context)
109
+ super
110
+ # There is no easy way to propage an instance variable into the codec, because the codec
111
+ # are created at the class level
112
+ @codec.execution_context = context
113
+ context
114
+ end
115
+
108
116
  private
109
117
  def output?(event)
110
118
  # TODO: noop for now, remove this once we delete this call from all plugins
@@ -9,7 +9,7 @@ if ENV["PROFILE_BAD_LOG_CALLS"] || ($DEBUGLIST || []).include?("log")
9
9
  # Basically, the following is wastes tons of effort creating objects that are
10
10
  # never used if the log level hides the log:
11
11
  #
12
- # logger.debug("something happend", :what => Happened)
12
+ # logger.debug("something happened", :what => Happened)
13
13
  #
14
14
  # This is shown to be 4x faster:
15
15
  #
@@ -54,7 +54,7 @@ class OpenSSL::SSL::SSLContext
54
54
  #
55
55
  # This monkeypatch doesn't enforce a `VERIFY_MODE` on the SSLContext,
56
56
  # SSLContext are both used for the client and the server implementation,
57
- # If set the `verify_mode` to peer the server wont accept any connection,
57
+ # If set the `verify_mode` to peer the server won't accept any connection,
58
58
  # because it will try to verify the client certificate, this is a protocol
59
59
  # details implemented at the plugin level.
60
60
  #
@@ -17,9 +17,11 @@ require "logstash/instrument/namespaced_metric"
17
17
  require "logstash/instrument/null_metric"
18
18
  require "logstash/instrument/namespaced_null_metric"
19
19
  require "logstash/instrument/collector"
20
+ require "logstash/instrument/wrapped_write_client"
20
21
  require "logstash/output_delegator"
21
22
  require "logstash/filter_delegator"
22
23
  require "logstash/queue_factory"
24
+ require "logstash/execution_context"
23
25
 
24
26
  module LogStash; class BasePipeline
25
27
  include LogStash::Util::Loggable
@@ -41,6 +43,7 @@ module LogStash; class BasePipeline
41
43
  @inputs = nil
42
44
  @filters = nil
43
45
  @outputs = nil
46
+ @execution_context = LogStash::ExecutionContext.new(@pipeline_id)
44
47
 
45
48
  grammar = LogStashConfigParser.new
46
49
  parsed_config = grammar.parse(config_str)
@@ -88,12 +91,15 @@ module LogStash; class BasePipeline
88
91
  klass = Plugin.lookup(plugin_type, name)
89
92
 
90
93
  if plugin_type == "output"
91
- OutputDelegator.new(@logger, klass, type_scoped_metric, OutputDelegatorStrategyRegistry.instance, args)
94
+ OutputDelegator.new(@logger, klass, type_scoped_metric, @execution_context, OutputDelegatorStrategyRegistry.instance, args)
92
95
  elsif plugin_type == "filter"
93
- FilterDelegator.new(@logger, klass, type_scoped_metric, args)
96
+ FilterDelegator.new(@logger, klass, type_scoped_metric, @execution_context, args)
94
97
  else # input
95
98
  input_plugin = klass.new(args)
96
- input_plugin.metric = type_scoped_metric.namespace(id)
99
+ scoped_metric = type_scoped_metric.namespace(id.to_sym)
100
+ scoped_metric.gauge(:name, input_plugin.config_name)
101
+ input_plugin.metric = scoped_metric
102
+ input_plugin.execution_context = @execution_context
97
103
  input_plugin
98
104
  end
99
105
  end
@@ -148,12 +154,13 @@ module LogStash; class Pipeline < BasePipeline
148
154
  @input_queue_client = @queue.write_client
149
155
  @filter_queue_client = @queue.read_client
150
156
  @signal_queue = Queue.new
151
- # Note that @infilght_batches as a central mechanism for tracking inflight
157
+ # Note that @inflight_batches as a central mechanism for tracking inflight
152
158
  # batches will fail if we have multiple read clients here.
153
159
  @filter_queue_client.set_events_metric(metric.namespace([:stats, :events]))
154
160
  @filter_queue_client.set_pipeline_metric(
155
161
  metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :events])
156
162
  )
163
+ @drain_queue = @settings.get_value("queue.drain")
157
164
 
158
165
  @events_filtered = Concurrent::AtomicFixnum.new(0)
159
166
  @events_consumed = Concurrent::AtomicFixnum.new(0)
@@ -250,6 +257,10 @@ module LogStash; class Pipeline < BasePipeline
250
257
  @running.false?
251
258
  end
252
259
 
260
+ def system?
261
+ settings.get_value("pipeline.system")
262
+ end
263
+
253
264
  # register_plugin simply calls the plugin #register method and catches & logs any error
254
265
  # @param plugin [Plugin] the plugin to register
255
266
  # @return [Plugin] the registered plugin
@@ -326,26 +337,32 @@ module LogStash; class Pipeline < BasePipeline
326
337
  # Main body of what a worker thread does
327
338
  # Repeatedly takes batches off the queue, filters, then outputs them
328
339
  def worker_loop(batch_size, batch_delay)
329
- running = true
340
+ shutdown_requested = false
330
341
 
331
342
  @filter_queue_client.set_batch_dimensions(batch_size, batch_delay)
332
343
 
333
- while running
334
- batch = @filter_queue_client.take_batch
344
+ while true
335
345
  signal = @signal_queue.empty? ? NO_SIGNAL : @signal_queue.pop
336
- running = !signal.shutdown?
346
+ shutdown_requested |= signal.shutdown? # latch on shutdown signal
337
347
 
348
+ batch = @filter_queue_client.read_batch # metrics are started in read_batch
338
349
  @events_consumed.increment(batch.size)
339
-
340
350
  filter_batch(batch)
341
-
342
- if signal.flush? || signal.shutdown?
343
- flush_filters_to_batch(batch, :final => signal.shutdown?)
344
- end
345
-
351
+ flush_filters_to_batch(batch, :final => false) if signal.flush?
346
352
  output_batch(batch)
347
353
  @filter_queue_client.close_batch(batch)
354
+
355
+ # keep break at end of loop, after the read_batch operation, some pipeline specs rely on this "final read_batch" before shutdown.
356
+ break if shutdown_requested && !draining_queue?
348
357
  end
358
+
359
+ # we are shutting down, queue is drained if it was required, now perform a final flush.
360
+ # for this we need to create a new empty batch to contain the final flushed events
361
+ batch = @filter_queue_client.new_batch
362
+ @filter_queue_client.start_metrics(batch) # explicitly call start_metrics since we dont do a read_batch here
363
+ flush_filters_to_batch(batch, :final => true)
364
+ output_batch(batch)
365
+ @filter_queue_client.close_batch(batch)
349
366
  end
350
367
 
351
368
  def filter_batch(batch)
@@ -411,7 +428,7 @@ module LogStash; class Pipeline < BasePipeline
411
428
  # first make sure we can register all input plugins
412
429
  register_plugins(@inputs)
413
430
 
414
- # then after all input plugins are sucessfully registered, start them
431
+ # then after all input plugins are successfully registered, start them
415
432
  @inputs.each { |input| start_input(input) }
416
433
  end
417
434
 
@@ -422,7 +439,8 @@ module LogStash; class Pipeline < BasePipeline
422
439
  def inputworker(plugin)
423
440
  Util::set_thread_name("[#{pipeline_id}]<#{plugin.class.config_name}")
424
441
  begin
425
- plugin.run(@input_queue_client)
442
+ input_queue_client = wrapped_write_client(plugin)
443
+ plugin.run(input_queue_client)
426
444
  rescue => e
427
445
  if plugin.stop?
428
446
  @logger.debug("Input plugin raised exception during shutdown, ignoring it.",
@@ -456,11 +474,11 @@ module LogStash; class Pipeline < BasePipeline
456
474
  # @param before_stop [Proc] code block called before performing stop operation on input plugins
457
475
  def shutdown(&before_stop)
458
476
  # shutdown can only start once the pipeline has completed its startup.
459
- # avoid potential race conditoon between the startup sequence and this
477
+ # avoid potential race condition between the startup sequence and this
460
478
  # shutdown method which can be called from another thread at any time
461
479
  sleep(0.1) while !ready?
462
480
 
463
- # TODO: should we also check against calling shutdown multiple times concurently?
481
+ # TODO: should we also check against calling shutdown multiple times concurrently?
464
482
 
465
483
  before_stop.call if block_given?
466
484
 
@@ -604,4 +622,14 @@ module LogStash; class Pipeline < BasePipeline
604
622
  :flushing => @flushing
605
623
  }
606
624
  end
607
- end end
625
+
626
+ private
627
+
628
+ def draining_queue?
629
+ @drain_queue ? !@filter_queue_client.empty? : false
630
+ end
631
+
632
+ def wrapped_write_client(plugin)
633
+ LogStash::Instrument::WrappedWriteClient.new(@input_queue_client, self, metric, plugin)
634
+ end
635
+ end; end