logstash-core 6.0.1-java → 6.1.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (64) hide show
  1. checksums.yaml +4 -4
  2. data/gemspec_jars.rb +1 -1
  3. data/lib/logstash-core/logstash-core.jar +0 -0
  4. data/lib/logstash-core/logstash-core.rb +14 -2
  5. data/lib/logstash-core_jars.rb +4 -2
  6. data/lib/logstash/agent.rb +8 -2
  7. data/lib/logstash/api/modules/node.rb +11 -5
  8. data/lib/logstash/api/modules/stats.rb +13 -7
  9. data/lib/logstash/compiler.rb +6 -10
  10. data/lib/logstash/compiler/lscl.rb +10 -1
  11. data/lib/logstash/compiler/lscl/helpers.rb +3 -1
  12. data/lib/logstash/config/mixin.rb +2 -2
  13. data/lib/logstash/environment.rb +1 -6
  14. data/lib/logstash/errors.rb +1 -1
  15. data/lib/logstash/event.rb +0 -2
  16. data/lib/logstash/filter_delegator.rb +1 -2
  17. data/lib/logstash/instrument/metric_type/counter.rb +1 -1
  18. data/lib/logstash/instrument/metric_type/gauge.rb +1 -1
  19. data/lib/logstash/instrument/wrapped_write_client.rb +1 -1
  20. data/lib/logstash/java_filter_delegator.rb +79 -0
  21. data/lib/logstash/java_pipeline.rb +690 -0
  22. data/lib/logstash/json.rb +4 -29
  23. data/lib/logstash/output_delegator.rb +3 -2
  24. data/lib/logstash/patches/bugfix_jruby_2558.rb +1 -1
  25. data/lib/logstash/pipeline.rb +32 -89
  26. data/lib/logstash/pipeline_action/create.rb +8 -2
  27. data/lib/logstash/pipeline_action/reload.rb +6 -1
  28. data/lib/logstash/pipeline_reporter.rb +2 -1
  29. data/lib/logstash/pipeline_settings.rb +1 -0
  30. data/lib/logstash/plugins/plugin_factory.rb +100 -0
  31. data/lib/logstash/plugins/registry.rb +18 -7
  32. data/lib/logstash/queue_factory.rb +3 -1
  33. data/lib/logstash/runner.rb +13 -56
  34. data/lib/logstash/settings.rb +2 -2
  35. data/lib/logstash/timestamp.rb +0 -1
  36. data/lib/logstash/util.rb +13 -21
  37. data/lib/logstash/util/java_version.rb +0 -1
  38. data/lib/logstash/util/settings_helper.rb +79 -0
  39. data/lib/logstash/util/{environment_variables.rb → substitution_variables.rb} +10 -8
  40. data/lib/logstash/util/wrapped_acked_queue.rb +17 -108
  41. data/lib/logstash/util/wrapped_synchronous_queue.rb +38 -178
  42. data/locales/en.yml +2 -0
  43. data/spec/conditionals_spec.rb +235 -80
  44. data/spec/logstash/api/modules/node_spec.rb +11 -0
  45. data/spec/logstash/compiler/compiler_spec.rb +28 -2
  46. data/spec/logstash/environment_spec.rb +0 -5
  47. data/spec/logstash/event_spec.rb +7 -2
  48. data/spec/logstash/filter_delegator_spec.rb +1 -1
  49. data/spec/logstash/filters/base_spec.rb +30 -28
  50. data/spec/logstash/instrument/wrapped_write_client_spec.rb +2 -2
  51. data/spec/logstash/java_filter_delegator_spec.rb +176 -0
  52. data/spec/logstash/java_pipeline_spec.rb +933 -0
  53. data/spec/logstash/json_spec.rb +27 -45
  54. data/spec/logstash/plugins/registry_spec.rb +7 -0
  55. data/spec/logstash/queue_factory_spec.rb +5 -2
  56. data/spec/logstash/settings_spec.rb +1 -1
  57. data/spec/logstash/util/java_version_spec.rb +1 -3
  58. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +27 -24
  59. data/spec/logstash/webserver_spec.rb +3 -6
  60. data/spec/support/helpers.rb +5 -0
  61. data/spec/support/pipeline/pipeline_helpers.rb +97 -0
  62. data/versions-gem-copy.yml +5 -2
  63. metadata +14 -5
  64. data/lib/logstash/patches/rubygems.rb +0 -38
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9948f10f7f434f0e67b751dca73b6bc83f577f1b933985b1251dbc6bca7a59dc
4
- data.tar.gz: 405b2588cab170eaea4e861e2f559c8698ad40ea3bcc4468e4fcd0e84e869269
3
+ metadata.gz: 83b5b84eb3c3b4cbf13f2b52b49c71dbdd039314d626ef132ecc39c4c99def2f
4
+ data.tar.gz: b253d715ef3b14356a4043399289f460fa33d3d07ec47af6525fca0fb1089432
5
5
  SHA512:
6
- metadata.gz: 61314838211ae44135b607178f642b449b86ca2dd83f38c5a5d9979438f5ae77b1c1f692dd5384fdf4f9069a8caf521f111b8e22ea17778166b23a33e3389502
7
- data.tar.gz: e8660a1b7f2781cc4fa2df9299c50f3c4f31ecec1921a884867c7004264d12bffa1f18959675a81f98802897c63e46e10b44c6b4b7f83763eec62a3e91786734
6
+ metadata.gz: abb3868e2c57adb416b1db38c793236d2a022592e33a3cd7b0c99a8a51781fa3d98fdba53dfdee1890b93547c7b3c12af1d0de4b07af38669109e94c859cd312
7
+ data.tar.gz: fc6f31b28c7ce4aa978838af1f60ef6d752fabe639c22b00468db86c706b648511062baa9b4e69bea60414f5dccda814946751e601d92763c4b12a242f37c10d
@@ -8,5 +8,5 @@ gem.requirements << "jar org.apache.logging.log4j:log4j-core, 2.6.2"
8
8
  gem.requirements << "jar com.fasterxml.jackson.core:jackson-core, 2.9.1"
9
9
  gem.requirements << "jar com.fasterxml.jackson.core:jackson-databind, 2.9.1"
10
10
  gem.requirements << "jar com.fasterxml.jackson.core:jackson-annotations, 2.9.1"
11
- gem.requirements << "jar com.fasterxml.jackson.module:jackson-module-afterburner, 2.9.1"
11
+ gem.requirements << "jar org.codehaus.janino:janino, 3.0.7"
12
12
  gem.requirements << "jar com.fasterxml.jackson.dataformat:jackson-dataformat-cbor, 2.9.1"
@@ -8,8 +8,16 @@ end
8
8
  require "logstash-core_jars"
9
9
 
10
10
  # local dev setup
11
- classes_dir = File.expand_path("../../../out/production/classes", __FILE__)
12
- resources_dir = File.expand_path("../../../out/production/resources", __FILE__)
11
+ alt_classdir = File.expand_path("../../../out/production/classes", __FILE__) #IntelliJ's gradle output as of 2017.02 https://youtrack.jetbrains.com/issue/IDEA-175172
12
+ if File.directory?(alt_classdir)
13
+ classes_dir = alt_classdir
14
+ resources_dir = File.expand_path("../../../out/production/resources", __FILE__)
15
+ else
16
+ classes_dir = File.expand_path("../../../build/classes/java/main", __FILE__)
17
+ resources_dir = File.expand_path("../../../build/resources/main", __FILE__)
18
+ end
19
+
20
+
13
21
 
14
22
  if File.directory?(classes_dir) && File.directory?(resources_dir)
15
23
  # if in local dev setup, add target to classpath
@@ -23,3 +31,7 @@ else
23
31
  raise("Error loading logstash-core/logstash-core.jar file, cause: #{e.message}")
24
32
  end
25
33
  end
34
+
35
+ # Load Logstash's Java-defined RubyClasses by classloading RubyUtil which sets them up in its
36
+ # static constructor
37
+ java_import org.logstash.RubyUtil
@@ -8,9 +8,10 @@ rescue LoadError
8
8
  require 'org/slf4j/slf4j-api/1.7.21/slf4j-api-1.7.21.jar'
9
9
  require 'com/fasterxml/jackson/core/jackson-annotations/2.9.1/jackson-annotations-2.9.1.jar'
10
10
  require 'org/apache/logging/log4j/log4j-slf4j-impl/2.6.2/log4j-slf4j-impl-2.6.2.jar'
11
- require 'com/fasterxml/jackson/module/jackson-module-afterburner/2.9.1/jackson-module-afterburner-2.9.1.jar'
12
11
  require 'com/fasterxml/jackson/dataformat/jackson-dataformat-cbor/2.9.1/jackson-dataformat-cbor-2.9.1.jar'
12
+ require 'org/codehaus/janino/commons-compiler/3.0.7/commons-compiler-3.0.7.jar'
13
13
  require 'com/fasterxml/jackson/core/jackson-core/2.9.1/jackson-core-2.9.1.jar'
14
+ require 'org/codehaus/janino/janino/3.0.7/janino-3.0.7.jar'
14
15
  end
15
16
 
16
17
  if defined? Jars
@@ -20,7 +21,8 @@ if defined? Jars
20
21
  require_jar( 'org.slf4j', 'slf4j-api', '1.7.21' )
21
22
  require_jar( 'com.fasterxml.jackson.core', 'jackson-annotations', '2.9.1' )
22
23
  require_jar( 'org.apache.logging.log4j', 'log4j-slf4j-impl', '2.6.2' )
23
- require_jar( 'com.fasterxml.jackson.module', 'jackson-module-afterburner', '2.9.1' )
24
24
  require_jar( 'com.fasterxml.jackson.dataformat', 'jackson-dataformat-cbor', '2.9.1' )
25
+ require_jar( 'org.codehaus.janino', 'commons-compiler', '3.0.7' )
25
26
  require_jar( 'com.fasterxml.jackson.core', 'jackson-core', '2.9.1' )
27
+ require_jar( 'org.codehaus.janino', 'janino', '3.0.7' )
26
28
  end
@@ -389,14 +389,20 @@ class LogStash::Agent
389
389
  def start_webserver
390
390
  options = {:http_host => @http_host, :http_ports => @http_port, :http_environment => @http_environment }
391
391
  @webserver = LogStash::WebServer.new(@logger, self, options)
392
- Thread.new(@webserver) do |webserver|
392
+ @webserver_thread = Thread.new(@webserver) do |webserver|
393
393
  LogStash::Util.set_thread_name("Api Webserver")
394
394
  webserver.run
395
395
  end
396
396
  end
397
397
 
398
398
  def stop_webserver
399
- @webserver.stop if @webserver
399
+ if @webserver
400
+ @webserver.stop
401
+ if @webserver_thread.join(5).nil?
402
+ @webserver_thread.kill
403
+ @webserver_thread.join
404
+ end
405
+ end
400
406
  end
401
407
 
402
408
  def configure_metrics_collectors
@@ -11,13 +11,19 @@ module LogStash
11
11
  end
12
12
 
13
13
  get "/hot_threads" do
14
- ignore_idle_threads = params["ignore_idle_threads"] || true
14
+ begin
15
+ ignore_idle_threads = params["ignore_idle_threads"] || true
15
16
 
16
- options = { :ignore_idle_threads => as_boolean(ignore_idle_threads) }
17
- options[:threads] = params["threads"].to_i if params.has_key?("threads")
17
+ options = {:ignore_idle_threads => as_boolean(ignore_idle_threads)}
18
+ options[:threads] = params["threads"].to_i if params.has_key?("threads")
18
19
 
19
- as = human? ? :string : :json
20
- respond_with(node.hot_threads(options), {:as => as})
20
+ as = human? ? :string : :json
21
+ respond_with(node.hot_threads(options), {:as => as})
22
+ rescue ArgumentError => e
23
+ response = respond_with({"error" => e.message})
24
+ status(400)
25
+ response
26
+ end
21
27
  end
22
28
 
23
29
  get "/pipelines/:id" do
@@ -9,14 +9,20 @@ module LogStash
9
9
 
10
10
  # return hot threads information
11
11
  get "/jvm/hot_threads" do
12
- top_threads_count = params["threads"] || 3
13
- ignore_idle_threads = params["ignore_idle_threads"] || true
14
- options = {
15
- :threads => top_threads_count.to_i,
16
- :ignore_idle_threads => as_boolean(ignore_idle_threads)
17
- }
12
+ begin
13
+ top_threads_count = params["threads"] || 3
14
+ ignore_idle_threads = params["ignore_idle_threads"] || true
15
+ options = {
16
+ :threads => top_threads_count.to_i,
17
+ :ignore_idle_threads => as_boolean(ignore_idle_threads)
18
+ }
18
19
 
19
- respond_with(stats_command.hot_threads(options))
20
+ respond_with(stats_command.hot_threads(options))
21
+ rescue ArgumentError => e
22
+ response = respond_with({"error" => e.message})
23
+ status(400)
24
+ response
25
+ end
20
26
  end
21
27
 
22
28
  # return hot threads information
@@ -7,9 +7,9 @@ java_import org.logstash.config.ir.graph.Graph
7
7
  module LogStash; class Compiler
8
8
  include ::LogStash::Util::Loggable
9
9
 
10
- def self.compile_sources(sources_with_metadata, settings)
10
+ def self.compile_sources(sources_with_metadata, support_escapes)
11
11
  graph_sections = sources_with_metadata.map do |swm|
12
- self.compile_graph(swm, settings)
12
+ self.compile_graph(swm, support_escapes)
13
13
  end
14
14
 
15
15
  input_graph = Graph.combine(*graph_sections.map {|s| s[:input] }).graph
@@ -30,7 +30,7 @@ module LogStash; class Compiler
30
30
  PipelineIR.new(input_graph, filter_graph, output_graph, original_source)
31
31
  end
32
32
 
33
- def self.compile_ast(source_with_metadata, settings)
33
+ def self.compile_imperative(source_with_metadata, support_escapes)
34
34
  if !source_with_metadata.is_a?(org.logstash.common.SourceWithMetadata)
35
35
  raise ArgumentError, "Expected 'org.logstash.common.SourceWithMetadata', got #{source_with_metadata.class}"
36
36
  end
@@ -42,15 +42,11 @@ module LogStash; class Compiler
42
42
  raise ConfigurationError, grammar.failure_reason
43
43
  end
44
44
 
45
- config.process_escape_sequences = settings.get_value("config.support_escapes")
45
+ config.process_escape_sequences = support_escapes
46
46
  config.compile(source_with_metadata)
47
47
  end
48
48
 
49
- def self.compile_imperative(source_with_metadata, settings)
50
- compile_ast(source_with_metadata, settings)
51
- end
52
-
53
- def self.compile_graph(source_with_metadata, settings)
54
- Hash[compile_imperative(source_with_metadata, settings).map {|section,icompiled| [section, icompiled.toGraph]}]
49
+ def self.compile_graph(source_with_metadata, support_escapes)
50
+ Hash[compile_imperative(source_with_metadata, support_escapes).map {|section,icompiled| [section, icompiled.toGraph]}]
55
51
  end
56
52
  end; end
@@ -113,12 +113,13 @@ module LogStashCompilerLSCLGrammar; module LogStash; module Compiler; module LSC
113
113
  # interpreted as `{"match" => {"baz" => "bar", "foo" => "blub"}}`.
114
114
  # (NOTE: this bypasses `AST::Hash`'s ability to detect duplicate keys)
115
115
  hash[k] = existing.merge(v)
116
+ elsif existing.kind_of?(::Array)
117
+ hash[k] = existing.push(*v)
116
118
  else
117
119
  hash[k] = existing + v
118
120
  end
119
121
  hash
120
122
  end
121
-
122
123
  end
123
124
  end
124
125
 
@@ -339,8 +340,12 @@ module LogStashCompilerLSCLGrammar; module LogStash; module Compiler; module LSC
339
340
  case op
340
341
  when :and
341
342
  return jdsl.eAnd(left, right);
343
+ when :nand
344
+ return jdsl.eNand(left, right);
342
345
  when :or
343
346
  return jdsl.eOr(left, right);
347
+ when :xor
348
+ return jdsl.eXor(left, right);
344
349
  else
345
350
  raise "Unknown op #{jop}"
346
351
  end
@@ -523,8 +528,12 @@ module LogStashCompilerLSCLGrammar; module LogStash; module Compiler; module LSC
523
528
  case self.text_value
524
529
  when "and"
525
530
  AND_METHOD
531
+ when "nand"
532
+ NAND_METHOD
526
533
  when "or"
527
534
  OR_METHOD
535
+ when "xor"
536
+ XOR_METHOD
528
537
  else
529
538
  raise "Unknown operator #{self.text_value}"
530
539
  end
@@ -50,6 +50,8 @@ module LogStashCompilerLSCLGrammar; module LogStash; module Compiler; module LSC
50
50
  end
51
51
 
52
52
  AND_METHOD = jdsl.method(:eAnd)
53
+ NAND_METHOD = jdsl.method(:eNand)
53
54
  OR_METHOD = jdsl.method(:eOr)
55
+ XOR_METHOD = jdsl.method(:eXor)
54
56
  end
55
- end; end; end; end; end
57
+ end; end; end; end; end
@@ -34,7 +34,7 @@ LogStash::Environment.load_locale!
34
34
  #
35
35
  module LogStash::Config::Mixin
36
36
 
37
- include LogStash::Util::EnvironmentVariables
37
+ include LogStash::Util::SubstitutionVariables
38
38
 
39
39
  attr_accessor :config
40
40
  attr_accessor :original_params
@@ -144,7 +144,7 @@ module LogStash::Config::Mixin
144
144
 
145
145
  module DSL
146
146
 
147
- include LogStash::Util::EnvironmentVariables
147
+ include LogStash::Util::SubstitutionVariables
148
148
 
149
149
  attr_accessor :flags
150
150
 
@@ -40,6 +40,7 @@ module LogStash
40
40
  Setting::PositiveInteger.new("pipeline.batch.size", 125),
41
41
  Setting::Numeric.new("pipeline.batch.delay", 5), # in milliseconds
42
42
  Setting::Boolean.new("pipeline.unsafe_shutdown", false),
43
+ Setting::Boolean.new("pipeline.java_execution", false),
43
44
  Setting::Boolean.new("pipeline.reloadable", true),
44
45
  Setting.new("path.plugins", Array, []),
45
46
  Setting::NullableString.new("interactive", nil, false),
@@ -131,8 +132,6 @@ module LogStash
131
132
  end
132
133
 
133
134
  def load_jars!(pattern)
134
- raise(LogStash::EnvironmentError, I18n.t("logstash.environment.jruby-required")) unless LogStash::Environment.jruby?
135
-
136
135
  jar_files = find_jars(pattern)
137
136
  require_jars! jar_files
138
137
  end
@@ -155,10 +154,6 @@ module LogStash
155
154
  ENV["USE_RUBY"] == "1" ? "ruby" : File.join("vendor", "jruby", "bin", "jruby")
156
155
  end
157
156
 
158
- def jruby?
159
- @jruby ||= !!(RUBY_PLATFORM == "java")
160
- end
161
-
162
157
  def windows?
163
158
  RbConfig::CONFIG['host_os'] =~ WINDOW_OS_RE
164
159
  end
@@ -1,6 +1,6 @@
1
1
  # encoding: utf-8
2
+
2
3
  module LogStash
3
- class Error < ::StandardError; end
4
4
  class EnvironmentError < Error; end
5
5
  class ConfigurationError < Error; end
6
6
  class PluginLoadingError < Error; end
@@ -2,8 +2,6 @@
2
2
 
3
3
  require "logstash/namespace"
4
4
  require "logstash/json"
5
- require "jruby_event_ext"
6
- require "jruby_timestamp_ext"
7
5
  require "logstash/timestamp"
8
6
 
9
7
  # transient pipeline events for normal in-flow signaling as opposed to
@@ -49,10 +49,9 @@ module LogStash
49
49
  @metric_events_time.increment((java.lang.System.nano_time - start_time) / 1_000_000)
50
50
 
51
51
  # There is no guarantee in the context of filter
52
- # that EVENTS_INT == EVENTS_OUT, see the aggregates and
52
+ # that EVENTS_IN == EVENTS_OUT, see the aggregates and
53
53
  # the split filter
54
54
  c = new_events.count { |event| !event.cancelled? }
55
-
56
55
  @metric_events_out.increment(c) if c > 0
57
56
  new_events
58
57
  end
@@ -5,7 +5,7 @@ module LogStash module Instrument module MetricType
5
5
  class Counter < LongCounter
6
6
 
7
7
  def initialize(namespaces, key)
8
- super(namespaces, key.to_s)
8
+ super(key.to_s)
9
9
 
10
10
  end
11
11
 
@@ -4,7 +4,7 @@ module LogStash module Instrument module MetricType
4
4
  class Gauge < LazyDelegatingGauge
5
5
 
6
6
  def initialize(namespaces, key)
7
- super(namespaces, key.to_s)
7
+ super(key.to_s)
8
8
  end
9
9
 
10
10
  def execute(action, value = nil)
@@ -20,7 +20,7 @@ module LogStash module Instrument
20
20
  end
21
21
 
22
22
  def get_new_batch
23
- @write_client.get_new_batch
23
+ []
24
24
  end
25
25
 
26
26
  def push(event)
@@ -0,0 +1,79 @@
1
+ # encoding: utf-8
2
+ #
3
+ module LogStash
4
+ class JavaFilterDelegator
5
+ include org.logstash.config.ir.compiler.RubyIntegration::Filter
6
+ extend Forwardable
7
+ DELEGATED_METHODS = [
8
+ :register,
9
+ :close,
10
+ :threadsafe?,
11
+ :do_close,
12
+ :do_stop,
13
+ :periodic_flush,
14
+ :reloadable?
15
+ ]
16
+ def_delegators :@filter, *DELEGATED_METHODS
17
+
18
+ attr_reader :id
19
+
20
+ def initialize(logger, klass, metric, execution_context, plugin_args)
21
+ @logger = logger
22
+ @klass = klass
23
+ @id = plugin_args["id"]
24
+ @filter = klass.new(plugin_args)
25
+
26
+ # Scope the metrics to the plugin
27
+ namespaced_metric = metric.namespace(@id.to_sym)
28
+ @filter.metric = namespaced_metric
29
+ @filter.execution_context = execution_context
30
+
31
+ @metric_events = namespaced_metric.namespace(:events)
32
+ @metric_events_in = @metric_events.counter(:in)
33
+ @metric_events_out = @metric_events.counter(:out)
34
+ @metric_events_time = @metric_events.counter(:duration_in_millis)
35
+ namespaced_metric.gauge(:name, config_name)
36
+
37
+ # Not all the filters will do bufferings
38
+ @flushes = @filter.respond_to?(:flush)
39
+ end
40
+
41
+ def toRuby
42
+ self
43
+ end
44
+
45
+ def config_name
46
+ @klass.config_name
47
+ end
48
+
49
+ def multi_filter(events)
50
+ @metric_events_in.increment(events.size)
51
+
52
+ start_time = java.lang.System.nano_time
53
+ new_events = @filter.multi_filter(events)
54
+ @metric_events_time.increment((java.lang.System.nano_time - start_time) / 1_000_000)
55
+
56
+ # There is no guarantee in the context of filter
57
+ # that EVENTS_IN == EVENTS_OUT, see the aggregates and
58
+ # the split filter
59
+ c = new_events.count { |event| !event.cancelled? }
60
+ @metric_events_out.increment(c) if c > 0
61
+ new_events
62
+ end
63
+
64
+ def has_flush
65
+ @flushes
66
+ end
67
+
68
+ def flush(options = {})
69
+ # we also need to trace the number of events
70
+ # coming from a specific filters.
71
+ new_events = @filter.flush(options)
72
+
73
+ # Filter plugins that does buffering or spooling of events like the
74
+ # `Logstash-filter-aggregates` can return `NIL` and will flush on the next flush ticks.
75
+ @metric_events_out.increment(new_events.size) if new_events && new_events.size > 0
76
+ new_events
77
+ end
78
+ end
79
+ end
@@ -0,0 +1,690 @@
1
+ # encoding: utf-8
2
+ require "thread"
3
+ require "stud/interval"
4
+ require "concurrent"
5
+ require "logstash/namespace"
6
+ require "logstash/errors"
7
+ require "logstash-core/logstash-core"
8
+ require "logstash/event"
9
+ require "logstash/filters/base"
10
+ require "logstash/inputs/base"
11
+ require "logstash/outputs/base"
12
+ require "logstash/shutdown_watcher"
13
+ require "logstash/pipeline_reporter"
14
+ require "logstash/instrument/metric"
15
+ require "logstash/instrument/namespaced_metric"
16
+ require "logstash/instrument/null_metric"
17
+ require "logstash/instrument/namespaced_null_metric"
18
+ require "logstash/instrument/collector"
19
+ require "logstash/instrument/wrapped_write_client"
20
+ require "logstash/util/dead_letter_queue_manager"
21
+ require "logstash/output_delegator"
22
+ require "logstash/java_filter_delegator"
23
+ require "logstash/queue_factory"
24
+ require "logstash/compiler"
25
+ require "logstash/execution_context"
26
+ require "securerandom"
27
+
28
+ java_import org.logstash.common.DeadLetterQueueFactory
29
+ java_import org.logstash.common.SourceWithMetadata
30
+ java_import org.logstash.common.io.DeadLetterQueueWriter
31
+ java_import org.logstash.config.ir.CompiledPipeline
32
+ java_import org.logstash.config.ir.ConfigCompiler
33
+
34
+ module LogStash; class JavaBasePipeline
35
+ include LogStash::Util::Loggable
36
+
37
+ attr_reader :settings, :config_str, :config_hash, :inputs, :filters, :outputs, :pipeline_id, :lir, :execution_context, :ephemeral_id
38
+ attr_reader :pipeline_config
39
+
40
+ def initialize(pipeline_config, namespaced_metric = nil, agent = nil)
41
+ @logger = self.logger
42
+ @mutex = Mutex.new
43
+ @ephemeral_id = SecureRandom.uuid
44
+
45
+ @pipeline_config = pipeline_config
46
+ @config_str = pipeline_config.config_string
47
+ @settings = pipeline_config.settings
48
+ @config_hash = Digest::SHA1.hexdigest(@config_str)
49
+
50
+ @lir = ConfigCompiler.configToPipelineIR(
51
+ @config_str, @settings.get_value("config.support_escapes")
52
+ )
53
+
54
+ @pipeline_id = @settings.get_value("pipeline.id") || self.object_id
55
+ @agent = agent
56
+ @dlq_writer = dlq_writer
57
+ @plugin_factory = LogStash::Plugins::PluginFactory.new(
58
+ # use NullMetric if called in the BasePipeline context otherwise use the @metric value
59
+ @lir, LogStash::Plugins::PluginMetricFactory.new(pipeline_id, @metric || Instrument::NullMetric.new),
60
+ @logger, LogStash::Plugins::ExecutionContextFactory.new(@agent, self, @dlq_writer),
61
+ JavaFilterDelegator
62
+ )
63
+ @lir_execution = CompiledPipeline.new(@lir, @plugin_factory)
64
+ if settings.get_value("config.debug") && @logger.debug?
65
+ @logger.debug("Compiled pipeline code", default_logging_keys(:code => @lir.get_graph.to_string))
66
+ end
67
+ @inputs = @lir_execution.inputs
68
+ @filters = @lir_execution.filters
69
+ @outputs = @lir_execution.outputs
70
+ end
71
+
72
+ def dlq_writer
73
+ if settings.get_value("dead_letter_queue.enable")
74
+ @dlq_writer = DeadLetterQueueFactory.getWriter(pipeline_id, settings.get_value("path.dead_letter_queue"), settings.get_value("dead_letter_queue.max_bytes"))
75
+ else
76
+ @dlq_writer = LogStash::Util::DummyDeadLetterQueueWriter.new
77
+ end
78
+ end
79
+
80
+ def close_dlq_writer
81
+ @dlq_writer.close
82
+ if settings.get_value("dead_letter_queue.enable")
83
+ DeadLetterQueueFactory.release(pipeline_id)
84
+ end
85
+ end
86
+
87
+ def buildOutput(name, line, column, *args)
88
+ plugin("output", name, line, column, *args)
89
+ end
90
+
91
+ def buildFilter(name, line, column, *args)
92
+ plugin("filter", name, line, column, *args)
93
+ end
94
+
95
+ def buildInput(name, line, column, *args)
96
+ plugin("input", name, line, column, *args)
97
+ end
98
+
99
+ def buildCodec(name, *args)
100
+ plugin("codec", name, 0, 0, *args)
101
+ end
102
+
103
+ def plugin(plugin_type, name, line, column, *args)
104
+ @plugin_factory.plugin(plugin_type, name, line, column, *args)
105
+ end
106
+
107
+ def reloadable?
108
+ configured_as_reloadable? && reloadable_plugins?
109
+ end
110
+
111
+ def configured_as_reloadable?
112
+ settings.get("pipeline.reloadable")
113
+ end
114
+
115
+ def reloadable_plugins?
116
+ non_reloadable_plugins.empty?
117
+ end
118
+
119
+ def non_reloadable_plugins
120
+ (inputs + filters + outputs).select { |plugin| !plugin.reloadable? }
121
+ end
122
+
123
+ private
124
+
125
+ def default_logging_keys(other_keys = {})
126
+ { :pipeline_id => pipeline_id }.merge(other_keys)
127
+ end
128
+ end; end
129
+
130
+ module LogStash; class JavaPipeline < JavaBasePipeline
131
+ attr_reader \
132
+ :worker_threads,
133
+ :events_consumed,
134
+ :events_filtered,
135
+ :reporter,
136
+ :started_at,
137
+ :thread,
138
+ :settings,
139
+ :metric,
140
+ :filter_queue_client,
141
+ :input_queue_client,
142
+ :queue
143
+
144
+ MAX_INFLIGHT_WARN_THRESHOLD = 10_000
145
+
146
+ def initialize(pipeline_config, namespaced_metric = nil, agent = nil)
147
+ @settings = pipeline_config.settings
148
+ # This needs to be configured before we call super which will evaluate the code to make
149
+ # sure the metric instance is correctly send to the plugins to make the namespace scoping work
150
+ @metric = if namespaced_metric
151
+ settings.get("metric.collect") ? namespaced_metric : Instrument::NullMetric.new(namespaced_metric.collector)
152
+ else
153
+ Instrument::NullMetric.new
154
+ end
155
+
156
+ @ephemeral_id = SecureRandom.uuid
157
+ @settings = settings
158
+ @reporter = PipelineReporter.new(@logger, self)
159
+ @worker_threads = []
160
+
161
+ super
162
+
163
+ begin
164
+ @queue = LogStash::QueueFactory.create(settings)
165
+ rescue => e
166
+ @logger.error("Logstash failed to create queue", default_logging_keys("exception" => e.message, "backtrace" => e.backtrace))
167
+ raise e
168
+ end
169
+
170
+ @input_queue_client = @queue.write_client
171
+ @filter_queue_client = @queue.read_client
172
+ @signal_queue = java.util.concurrent.LinkedBlockingQueue.new
173
+ # Note that @inflight_batches as a central mechanism for tracking inflight
174
+ # batches will fail if we have multiple read clients here.
175
+ @filter_queue_client.set_events_metric(metric.namespace([:stats, :events]))
176
+ @filter_queue_client.set_pipeline_metric(
177
+ metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :events])
178
+ )
179
+ @drain_queue = @settings.get_value("queue.drain")
180
+
181
+ @events_filtered = Concurrent::AtomicFixnum.new(0)
182
+ @events_consumed = Concurrent::AtomicFixnum.new(0)
183
+
184
+ @input_threads = []
185
+ # @ready requires thread safety since it is typically polled from outside the pipeline thread
186
+ @ready = Concurrent::AtomicBoolean.new(false)
187
+ @running = Concurrent::AtomicBoolean.new(false)
188
+ @flushing = Concurrent::AtomicReference.new(false)
189
+ @outputs_registered = Concurrent::AtomicBoolean.new(false)
190
+ @finished_execution = Concurrent::AtomicBoolean.new(false)
191
+ end # def initialize
192
+
193
+ def ready?
194
+ @ready.value
195
+ end
196
+
197
+ def safe_pipeline_worker_count
198
+ default = @settings.get_default("pipeline.workers")
199
+ pipeline_workers = @settings.get("pipeline.workers") #override from args "-w 8" or config
200
+ safe_filters, unsafe_filters = @filters.partition(&:threadsafe?)
201
+ plugins = unsafe_filters.collect { |f| f.config_name }
202
+
203
+ return pipeline_workers if unsafe_filters.empty?
204
+
205
+ if @settings.set?("pipeline.workers")
206
+ if pipeline_workers > 1
207
+ @logger.warn("Warning: Manual override - there are filters that might not work with multiple worker threads", default_logging_keys(:worker_threads => pipeline_workers, :filters => plugins))
208
+ end
209
+ else
210
+ # user did not specify a worker thread count
211
+ # warn if the default is multiple
212
+ if default > 1
213
+ @logger.warn("Defaulting pipeline worker threads to 1 because there are some filters that might not work with multiple worker threads",
214
+ default_logging_keys(:count_was => default, :filters => plugins))
215
+ return 1 # can't allow the default value to propagate if there are unsafe filters
216
+ end
217
+ end
218
+ pipeline_workers
219
+ end
220
+
221
+ def filters?
222
+ @filters.any?
223
+ end
224
+
225
+ def start
226
+ # Since we start lets assume that the metric namespace is cleared
227
+ # this is useful in the context of pipeline reloading
228
+ collect_stats
229
+ collect_dlq_stats
230
+
231
+ @logger.debug("Starting pipeline", default_logging_keys)
232
+
233
+ @finished_execution.make_false
234
+
235
+ @thread = Thread.new do
236
+ begin
237
+ LogStash::Util.set_thread_name("pipeline.#{pipeline_id}")
238
+ run
239
+ @finished_execution.make_true
240
+ rescue => e
241
+ close
242
+ logger.error("Pipeline aborted due to error", default_logging_keys(:exception => e, :backtrace => e.backtrace))
243
+ end
244
+ end
245
+
246
+ status = wait_until_started
247
+
248
+ if status
249
+ logger.debug("Pipeline started successfully", default_logging_keys(:pipeline_id => pipeline_id))
250
+ end
251
+
252
+ status
253
+ end
254
+
255
+ def wait_until_started
256
+ while true do
257
+ # This should be changed with an appropriate FSM
258
+ # It's an edge case, if we have a pipeline with
259
+ # a generator { count => 1 } its possible that `Thread#alive?` doesn't return true
260
+ # because the execution of the thread was successful and complete
261
+ if @finished_execution.true?
262
+ return true
263
+ elsif thread.nil? || !thread.alive?
264
+ return false
265
+ elsif running?
266
+ return true
267
+ else
268
+ sleep 0.01
269
+ end
270
+ end
271
+ end
272
+
273
+ def run
274
+ @started_at = Time.now
275
+ @thread = Thread.current
276
+ Util.set_thread_name("[#{pipeline_id}]-pipeline-manager")
277
+
278
+ start_workers
279
+
280
+ @logger.info("Pipeline started", "pipeline.id" => @pipeline_id)
281
+
282
+ # Block until all inputs have stopped
283
+ # Generally this happens if SIGINT is sent and `shutdown` is called from an external thread
284
+
285
+ transition_to_running
286
+ start_flusher # Launches a non-blocking thread for flush events
287
+ wait_inputs
288
+ transition_to_stopped
289
+
290
+ @logger.debug("Input plugins stopped! Will shutdown filter/output workers.", default_logging_keys)
291
+
292
+ shutdown_flusher
293
+ shutdown_workers
294
+
295
+ close
296
+
297
+ @logger.debug("Pipeline has been shutdown", default_logging_keys)
298
+
299
+ # exit code
300
+ return 0
301
+ end # def run
302
+
303
+ def close
304
+ @filter_queue_client.close
305
+ @queue.close
306
+ close_dlq_writer
307
+ end
308
+
309
+ def transition_to_running
310
+ @running.make_true
311
+ end
312
+
313
+ def transition_to_stopped
314
+ @running.make_false
315
+ end
316
+
317
+ def running?
318
+ @running.true?
319
+ end
320
+
321
+ def stopped?
322
+ @running.false?
323
+ end
324
+
325
+ def system?
326
+ settings.get_value("pipeline.system")
327
+ end
328
+
329
+ # register_plugins calls #register_plugin on the plugins list and upon exception will call Plugin#do_close on all registered plugins
330
+ # @param plugins [Array[Plugin]] the list of plugins to register
331
+ def register_plugins(plugins)
332
+ registered = []
333
+ plugins.each { |plugin| registered << @lir_execution.registerPlugin(plugin) }
334
+ rescue => e
335
+ registered.each(&:do_close)
336
+ raise e
337
+ end
338
+
339
+ def start_workers
340
+ @worker_threads.clear # In case we're restarting the pipeline
341
+ @outputs_registered.make_false
342
+ begin
343
+ maybe_setup_out_plugins
344
+
345
+ pipeline_workers = safe_pipeline_worker_count
346
+ batch_size = @settings.get("pipeline.batch.size")
347
+ batch_delay = @settings.get("pipeline.batch.delay")
348
+
349
+ max_inflight = batch_size * pipeline_workers
350
+
351
+ config_metric = metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :config])
352
+ config_metric.gauge(:workers, pipeline_workers)
353
+ config_metric.gauge(:batch_size, batch_size)
354
+ config_metric.gauge(:batch_delay, batch_delay)
355
+ config_metric.gauge(:config_reload_automatic, @settings.get("config.reload.automatic"))
356
+ config_metric.gauge(:config_reload_interval, @settings.get("config.reload.interval"))
357
+ config_metric.gauge(:dead_letter_queue_enabled, dlq_enabled?)
358
+ config_metric.gauge(:dead_letter_queue_path, @dlq_writer.get_path.to_absolute_path.to_s) if dlq_enabled?
359
+
360
+
361
+ @logger.info("Starting pipeline", default_logging_keys(
362
+ "pipeline.workers" => pipeline_workers,
363
+ "pipeline.batch.size" => batch_size,
364
+ "pipeline.batch.delay" => batch_delay,
365
+ "pipeline.max_inflight" => max_inflight))
366
+ if max_inflight > MAX_INFLIGHT_WARN_THRESHOLD
367
+ @logger.warn("CAUTION: Recommended inflight events max exceeded! Logstash will run with up to #{max_inflight} events in memory in your current configuration. If your message sizes are large this may cause instability with the default heap size. Please consider setting a non-standard heap size, changing the batch size (currently #{batch_size}), or changing the number of pipeline workers (currently #{pipeline_workers})", default_logging_keys)
368
+ end
369
+
370
+ @filter_queue_client.set_batch_dimensions(batch_size, batch_delay)
371
+
372
+ pipeline_workers.times do |t|
373
+ batched_execution = @lir_execution.buildExecution
374
+ thread = Thread.new(self, batched_execution) do |_pipeline, _batched_execution|
375
+ _pipeline.worker_loop(_batched_execution)
376
+ end
377
+ thread.name="[#{pipeline_id}]>worker#{t}"
378
+ @worker_threads << thread
379
+ end
380
+
381
+ # inputs should be started last, after all workers
382
+ begin
383
+ start_inputs
384
+ rescue => e
385
+ # if there is any exception in starting inputs, make sure we shutdown workers.
386
+ # exception will already by logged in start_inputs
387
+ shutdown_workers
388
+ raise e
389
+ end
390
+ ensure
391
+ # it is important to guarantee @ready to be true after the startup sequence has been completed
392
+ # to potentially unblock the shutdown method which may be waiting on @ready to proceed
393
+ @ready.make_true
394
+ end
395
+ end
396
+
397
+ def dlq_enabled?
398
+ @settings.get("dead_letter_queue.enable")
399
+ end
400
+
401
+ # Main body of what a worker thread does
402
+ # Repeatedly takes batches off the queue, filters, then outputs them
403
+ def worker_loop(batched_execution)
404
+ shutdown_requested = false
405
+ while true
406
+ signal = @signal_queue.poll || NO_SIGNAL
407
+ shutdown_requested |= signal.shutdown? # latch on shutdown signal
408
+
409
+ batch = @filter_queue_client.read_batch # metrics are started in read_batch
410
+ @events_consumed.increment(batch.size)
411
+ execute_batch(batched_execution, batch, signal.flush?)
412
+ @filter_queue_client.close_batch(batch)
413
+ # keep break at end of loop, after the read_batch operation, some pipeline specs rely on this "final read_batch" before shutdown.
414
+ break if (shutdown_requested && !draining_queue?)
415
+ end
416
+
417
+ # we are shutting down, queue is drained if it was required, now perform a final flush.
418
+ # for this we need to create a new empty batch to contain the final flushed events
419
+ batch = @filter_queue_client.new_batch
420
+ @filter_queue_client.start_metrics(batch) # explicitly call start_metrics since we dont do a read_batch here
421
+ batched_execution.compute(batch.to_a, true, true)
422
+ @filter_queue_client.close_batch(batch)
423
+ end
424
+
425
+ def wait_inputs
426
+ @input_threads.each(&:join)
427
+ end
428
+
429
+ def start_inputs
430
+ moreinputs = []
431
+ @inputs.each do |input|
432
+ if input.threadable && input.threads > 1
433
+ (input.threads - 1).times do |i|
434
+ moreinputs << input.clone
435
+ end
436
+ end
437
+ end
438
+ @inputs += moreinputs
439
+
440
+ # first make sure we can register all input plugins
441
+ register_plugins(@inputs)
442
+
443
+ # then after all input plugins are successfully registered, start them
444
+ @inputs.each { |input| start_input(input) }
445
+ end
446
+
447
+ def start_input(plugin)
448
+ @input_threads << Thread.new { inputworker(plugin) }
449
+ end
450
+
451
+ def inputworker(plugin)
452
+ Util::set_thread_name("[#{pipeline_id}]<#{plugin.class.config_name}")
453
+ begin
454
+ input_queue_client = wrapped_write_client(plugin)
455
+ plugin.run(input_queue_client)
456
+ rescue => e
457
+ if plugin.stop?
458
+ @logger.debug("Input plugin raised exception during shutdown, ignoring it.",
459
+ default_logging_keys(:plugin => plugin.class.config_name, :exception => e.message, :backtrace => e.backtrace))
460
+ return
461
+ end
462
+
463
+ # otherwise, report error and restart
464
+ @logger.error(I18n.t("logstash.pipeline.worker-error-debug",
465
+ default_logging_keys(
466
+ :plugin => plugin.inspect,
467
+ :error => e.message,
468
+ :exception => e.class,
469
+ :stacktrace => e.backtrace.join("\n"))))
470
+
471
+ # Assuming the failure that caused this exception is transient,
472
+ # let's sleep for a bit and execute #run again
473
+ sleep(1)
474
+ retry
475
+ ensure
476
+ plugin.do_close
477
+ end
478
+ end # def inputworker
479
+
480
+ # initiate the pipeline shutdown sequence
481
+ # this method is intended to be called from outside the pipeline thread
482
+ # @param before_stop [Proc] code block called before performing stop operation on input plugins
483
+ def shutdown(&before_stop)
484
+ # shutdown can only start once the pipeline has completed its startup.
485
+ # avoid potential race condition between the startup sequence and this
486
+ # shutdown method which can be called from another thread at any time
487
+ sleep(0.1) while !ready?
488
+
489
+ # TODO: should we also check against calling shutdown multiple times concurrently?
490
+
491
+ before_stop.call if block_given?
492
+
493
+ stop_inputs
494
+
495
+ # We make this call blocking, so we know for sure when the method return the shtudown is
496
+ # stopped
497
+ wait_for_workers
498
+ clear_pipeline_metrics
499
+ @logger.info("Pipeline terminated", "pipeline.id" => @pipeline_id)
500
+ end # def shutdown
501
+
502
+ def wait_for_workers
503
+ @logger.debug("Closing inputs", default_logging_keys)
504
+ @worker_threads.map(&:join)
505
+ @logger.debug("Worker closed", default_logging_keys)
506
+ end
507
+
508
+ def stop_inputs
509
+ @logger.debug("Closing inputs", default_logging_keys)
510
+ @inputs.each(&:do_stop)
511
+ @logger.debug("Closed inputs", default_logging_keys)
512
+ end
513
+
514
+ # After `shutdown` is called from an external thread this is called from the main thread to
515
+ # tell the worker threads to stop and then block until they've fully stopped
516
+ # This also stops all filter and output plugins
517
+ def shutdown_workers
518
+ # Each worker thread will receive this exactly once!
519
+ @worker_threads.each do |t|
520
+ @logger.debug("Pushing shutdown", default_logging_keys(:thread => t.inspect))
521
+ @signal_queue.put(SHUTDOWN)
522
+ end
523
+
524
+ @worker_threads.each do |t|
525
+ @logger.debug("Shutdown waiting for worker thread" , default_logging_keys(:thread => t.inspect))
526
+ t.join
527
+ end
528
+
529
+ @filters.each(&:do_close)
530
+ @outputs.each(&:do_close)
531
+ end
532
+
533
+ # for backward compatibility in devutils for the rspec helpers, this method is not used
534
+ # anymore and just here to not break TestPipeline that inherits this class.
535
+ def filter(event, &block)
536
+ end
537
+
538
+ # for backward compatibility in devutils for the rspec helpers, this method is not used
539
+ # anymore and just here to not break TestPipeline that inherits this class.
540
+ def flush_filters(options = {}, &block)
541
+ end
542
+
543
+ def start_flusher
544
+ # Invariant to help detect improper initialization
545
+ raise "Attempted to start flusher on a stopped pipeline!" if stopped?
546
+
547
+ @flusher_thread = Thread.new do
548
+ while Stud.stoppable_sleep(5, 0.1) { stopped? }
549
+ flush
550
+ break if stopped?
551
+ end
552
+ end
553
+ end
554
+
555
+ def shutdown_flusher
556
+ @flusher_thread.join
557
+ end
558
+
559
+ def flush
560
+ if @flushing.compare_and_set(false, true)
561
+ @logger.debug? && @logger.debug("Pushing flush onto pipeline", default_logging_keys)
562
+ @signal_queue.put(FLUSH)
563
+ end
564
+ end
565
+
566
+ # Calculate the uptime in milliseconds
567
+ #
568
+ # @return [Fixnum] Uptime in milliseconds, 0 if the pipeline is not started
569
+ def uptime
570
+ return 0 if started_at.nil?
571
+ ((Time.now.to_f - started_at.to_f) * 1000.0).to_i
572
+ end
573
+
574
+ def plugin_threads_info
575
+ input_threads = @input_threads.select {|t| t.alive? }
576
+ worker_threads = @worker_threads.select {|t| t.alive? }
577
+ (input_threads + worker_threads).map {|t| Util.thread_info(t) }
578
+ end
579
+
580
+ def stalling_threads_info
581
+ plugin_threads_info
582
+ .reject {|t| t["blocked_on"] } # known benign blocking statuses
583
+ .each {|t| t.delete("backtrace") }
584
+ .each {|t| t.delete("blocked_on") }
585
+ .each {|t| t.delete("status") }
586
+ end
587
+
588
+ def collect_dlq_stats
589
+ if dlq_enabled?
590
+ dlq_metric = @metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :dlq])
591
+ dlq_metric.gauge(:queue_size_in_bytes, @dlq_writer.get_current_queue_size)
592
+ end
593
+ end
594
+
595
+ def collect_stats
596
+ pipeline_metric = @metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :queue])
597
+ pipeline_metric.gauge(:type, settings.get("queue.type"))
598
+ if @queue.is_a?(LogStash::Util::WrappedAckedQueue) && @queue.queue.is_a?(LogStash::AckedQueue)
599
+ queue = @queue.queue
600
+ dir_path = queue.dir_path
601
+ file_store = Files.get_file_store(Paths.get(dir_path))
602
+
603
+ pipeline_metric.namespace([:capacity]).tap do |n|
604
+ n.gauge(:page_capacity_in_bytes, queue.page_capacity)
605
+ n.gauge(:max_queue_size_in_bytes, queue.max_size_in_bytes)
606
+ n.gauge(:max_unread_events, queue.max_unread_events)
607
+ n.gauge(:queue_size_in_bytes, queue.persisted_size_in_bytes)
608
+ end
609
+ pipeline_metric.namespace([:data]).tap do |n|
610
+ n.gauge(:free_space_in_bytes, file_store.get_unallocated_space)
611
+ n.gauge(:storage_type, file_store.type)
612
+ n.gauge(:path, dir_path)
613
+ end
614
+
615
+ pipeline_metric.gauge(:events, queue.unread_count)
616
+ end
617
+ end
618
+
619
+ def clear_pipeline_metrics
620
+ # TODO(ph): I think the metric should also proxy that call correctly to the collector
621
+ # this will simplify everything since the null metric would simply just do a noop
622
+ collector = @metric.collector
623
+
624
+ unless collector.nil?
625
+ # selectively reset metrics we don't wish to keep after reloading
626
+ # these include metrics about the plugins and number of processed events
627
+ # we want to keep other metrics like reload counts and error messages
628
+ collector.clear("stats/pipelines/#{pipeline_id}/plugins")
629
+ collector.clear("stats/pipelines/#{pipeline_id}/events")
630
+ end
631
+ end
632
+
633
+ # Sometimes we log stuff that will dump the pipeline which may contain
634
+ # sensitive information (like the raw syntax tree which can contain passwords)
635
+ # We want to hide most of what's in here
636
+ def inspect
637
+ {
638
+ :pipeline_id => @pipeline_id,
639
+ :settings => @settings.inspect,
640
+ :ready => @ready,
641
+ :running => @running,
642
+ :flushing => @flushing
643
+ }
644
+ end
645
+
646
+ private
647
+
648
+ def execute_batch(batched_execution, batch, flush)
649
+ batched_execution.compute(batch.to_a, flush, false)
650
+ @events_filtered.increment(batch.size)
651
+ filtered_size = batch.filtered_size
652
+ @filter_queue_client.add_output_metrics(filtered_size)
653
+ @filter_queue_client.add_filtered_metrics(filtered_size)
654
+ rescue Exception => e
655
+ # Plugins authors should manage their own exceptions in the plugin code
656
+ # but if an exception is raised up to the worker thread they are considered
657
+ # fatal and logstash will not recover from this situation.
658
+ #
659
+ # Users need to check their configuration or see if there is a bug in the
660
+ # plugin.
661
+ @logger.error("Exception in pipelineworker, the pipeline stopped processing new events, please check your filter configuration and restart Logstash.",
662
+ default_logging_keys("exception" => e.message, "backtrace" => e.backtrace))
663
+
664
+ raise e
665
+ end
666
+
667
+ def maybe_setup_out_plugins
668
+ if @outputs_registered.make_true
669
+ register_plugins(@outputs)
670
+ register_plugins(@filters)
671
+ end
672
+ end
673
+
674
+ def default_logging_keys(other_keys = {})
675
+ keys = super
676
+ keys[:thread] ||= thread.inspect if thread
677
+ keys
678
+ end
679
+
680
+ def draining_queue?
681
+ @drain_queue ? !@filter_queue_client.empty? : false
682
+ end
683
+
684
+ def wrapped_write_client(plugin)
685
+ #need to ensure that metrics are initialized one plugin at a time, else a race condition can exist.
686
+ @mutex.synchronize do
687
+ LogStash::Instrument::WrappedWriteClient.new(@input_queue_client, self, metric, plugin)
688
+ end
689
+ end
690
+ end; end