logstash-core 6.1.4-java → 6.2.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/lib/logstash-core/logstash-core.rb +10 -31
  3. data/lib/logstash/agent.rb +3 -23
  4. data/lib/logstash/api/modules/logging.rb +11 -0
  5. data/lib/logstash/config/source/multi_local.rb +5 -3
  6. data/lib/logstash/environment.rb +10 -3
  7. data/lib/logstash/event.rb +0 -1
  8. data/lib/logstash/filter_delegator.rb +1 -2
  9. data/lib/logstash/inputs/base.rb +1 -1
  10. data/lib/logstash/instrument/periodic_poller/base.rb +4 -4
  11. data/lib/logstash/instrument/periodic_poller/jvm.rb +5 -3
  12. data/lib/logstash/java_filter_delegator.rb +1 -2
  13. data/lib/logstash/java_pipeline.rb +6 -2
  14. data/lib/logstash/modules/kibana_client.rb +1 -1
  15. data/lib/logstash/output_delegator.rb +2 -3
  16. data/lib/logstash/output_delegator_strategies/legacy.rb +4 -4
  17. data/lib/logstash/output_delegator_strategies/shared.rb +4 -4
  18. data/lib/logstash/output_delegator_strategies/single.rb +2 -2
  19. data/lib/logstash/pipeline.rb +16 -24
  20. data/lib/logstash/plugin.rb +1 -1
  21. data/lib/logstash/plugins/plugin_factory.rb +3 -4
  22. data/lib/logstash/runner.rb +5 -0
  23. data/lib/logstash/settings.rb +5 -0
  24. data/lib/logstash/timestamp.rb +2 -25
  25. data/lib/logstash/util/secretstore.rb +36 -0
  26. data/lib/logstash/util/settings_helper.rb +1 -0
  27. data/lib/logstash/util/substitution_variables.rb +18 -5
  28. data/lib/logstash/util/wrapped_acked_queue.rb +1 -1
  29. data/lib/logstash/util/wrapped_synchronous_queue.rb +3 -35
  30. data/locales/en.yml +4 -4
  31. data/logstash-core.gemspec +0 -7
  32. data/spec/conditionals_spec.rb +21 -24
  33. data/spec/logstash/filter_delegator_spec.rb +3 -4
  34. data/spec/logstash/java_filter_delegator_spec.rb +3 -4
  35. data/spec/logstash/java_pipeline_spec.rb +97 -2
  36. data/spec/logstash/legacy_ruby_timestamp_spec.rb +0 -1
  37. data/spec/logstash/output_delegator_spec.rb +5 -7
  38. data/spec/logstash/queue_factory_spec.rb +1 -1
  39. data/spec/logstash/settings_spec.rb +49 -22
  40. data/spec/logstash/timestamp_spec.rb +0 -1
  41. data/spec/logstash/util/secretstore_spec.rb +69 -0
  42. data/spec/support/mocks_classes.rb +21 -0
  43. data/versions-gem-copy.yml +2 -2
  44. metadata +6 -42
  45. data/gemspec_jars.rb +0 -12
  46. data/lib/logstash-core/logstash-core.jar +0 -0
  47. data/lib/logstash-core_jars.rb +0 -28
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b169d383bfb031f3f23268331a911211c1f040f27d7f68f9001500fd584ae386
4
- data.tar.gz: d14b9d18184b1353cd4a7b3726b9d7f7f667d25f95f223c720a902657f3073d2
3
+ metadata.gz: 863042f74fef490f23b6393fd8f2e5e46ed2d41feefa150e3c6b5f32f6502b42
4
+ data.tar.gz: 33f0feed45740a7104a6e6c0a5f894ceb21a5916545971033288ca7604c65dd0
5
5
  SHA512:
6
- metadata.gz: 768f160b92db5c33a94e33a6e73664bc46dcbdbf260b81c8419b076c67b02a75aa713cfe385641d56780bec9dc203cd248396c2c67334d13ffc802402191975d
7
- data.tar.gz: ff7eafa5203c4e35597002e49bba9bc88787ec62ea89737c6242ea804a5d9f0ffefef0dce04f391727e52a220235a83601285da5359f9c4f0abb0820f08aa1c5
6
+ metadata.gz: d8969c92ee13601ac5e00ff2a06ee5f9c41d2d47b4739de30852a116f10a58a8bc3c9508bc42d972fcf4da5ecfcb59eddc597b8dcc7c7d5f40a9ed1b35783a8a
7
+ data.tar.gz: 3b3000cb8f55315c756e6651694313c73a746e92124582173cb23515317a2defcfd869269612f5d115891f41b916827978778ad393a0c1c6b066044211be55dc
@@ -2,36 +2,15 @@
2
2
 
3
3
  require "java"
4
4
 
5
- module LogStash
6
- end
7
-
8
- require "logstash-core_jars"
9
-
10
- # local dev setup
11
- alt_classdir = File.expand_path("../../../out/production/classes", __FILE__) #IntelliJ's gradle output as of 2017.02 https://youtrack.jetbrains.com/issue/IDEA-175172
12
- if File.directory?(alt_classdir)
13
- classes_dir = alt_classdir
14
- resources_dir = File.expand_path("../../../out/production/resources", __FILE__)
15
- else
16
- classes_dir = File.expand_path("../../../build/classes/java/main", __FILE__)
17
- resources_dir = File.expand_path("../../../build/resources/main", __FILE__)
18
- end
19
-
20
-
21
-
22
- if File.directory?(classes_dir) && File.directory?(resources_dir)
23
- # if in local dev setup, add target to classpath
24
- $CLASSPATH << classes_dir unless $CLASSPATH.include?(classes_dir)
25
- $CLASSPATH << resources_dir unless $CLASSPATH.include?(resources_dir)
26
- else
27
- # otherwise use included jar
28
- begin
29
- require "logstash-core/logstash-core.jar"
30
- rescue Exception => e
31
- raise("Error loading logstash-core/logstash-core.jar file, cause: #{e.message}")
5
+ # This block is used to load Logstash's Java libraries when using a Ruby entrypoint and
6
+ # LS_JARS_LOADED is not globally set.
7
+ # Currently this happens when using the `bin/rspec` executable to invoke specs instead of the JUnit
8
+ # wrapper.
9
+ unless $LS_JARS_LOADED
10
+ jar_path = File.join(File.dirname(File.dirname(__FILE__)), "jars")
11
+ $:.unshift jar_path
12
+ Dir.glob(jar_path + '/*.jar') do |jar|
13
+ require File.basename(jar)
32
14
  end
15
+ java_import org.logstash.RubyUtil
33
16
  end
34
-
35
- # Load Logstash's Java-defined RubyClasses by classloading RubyUtil which sets them up in its
36
- # static constructor
37
- java_import org.logstash.RubyUtil
@@ -81,7 +81,7 @@ class LogStash::Agent
81
81
 
82
82
  def execute
83
83
  @thread = Thread.current # this var is implicitly used by Stud.stop?
84
- logger.debug("starting agent")
84
+ logger.debug("Starting agent")
85
85
 
86
86
  start_webserver
87
87
 
@@ -272,24 +272,6 @@ class LogStash::Agent
272
272
  end
273
273
  end
274
274
 
275
- def close_pipeline(id)
276
- with_pipelines do |pipelines|
277
- pipeline = pipelines[id]
278
- if pipeline
279
- @logger.warn("closing pipeline", :id => id)
280
- pipeline.close
281
- end
282
- end
283
- end
284
-
285
- def close_pipelines
286
- with_pipelines do |pipelines|
287
- pipelines.each do |id, _|
288
- close_pipeline(id)
289
- end
290
- end
291
- end
292
-
293
275
  private
294
276
  def transition_to_stopped
295
277
  @running.make_false
@@ -310,12 +292,10 @@ class LogStash::Agent
310
292
  # for other tasks.
311
293
  #
312
294
  def converge_state(pipeline_actions)
313
- logger.debug("Converging pipelines")
295
+ logger.debug("Converging pipelines state", :actions_count => pipeline_actions.size)
314
296
 
315
297
  converge_result = LogStash::ConvergeResult.new(pipeline_actions.size)
316
298
 
317
- logger.debug("Needed actions to converge", :actions_count => pipeline_actions.size) unless pipeline_actions.empty?
318
-
319
299
  pipeline_actions.each do |action|
320
300
  # We execute every task we need to converge the current state of pipelines
321
301
  # for every task we will record the action result, that will help us
@@ -409,7 +389,7 @@ class LogStash::Agent
409
389
  @collector = LogStash::Instrument::Collector.new
410
390
 
411
391
  @metric = if collect_metrics?
412
- @logger.debug("Agent: Configuring metric collection")
392
+ @logger.debug("Setting up metric collection")
413
393
  LogStash::Instrument::Metric.new(@collector)
414
394
  else
415
395
  LogStash::Instrument::NullMetric.new(@collector)
@@ -36,6 +36,17 @@ module LogStash
36
36
  end
37
37
  end
38
38
 
39
+ put "/reset" do
40
+ context = LogStash::Logging::Logger::get_logging_context
41
+ if context.nil?
42
+ status 500
43
+ respond_with({"error" => "Logstash loggers were not initialized properly"})
44
+ else
45
+ context.reconfigure
46
+ respond_with({"acknowledged" => true})
47
+ end
48
+ end
49
+
39
50
  get "/" do
40
51
  context = LogStash::Logging::Logger::get_logging_context
41
52
  if context.nil?
@@ -31,9 +31,11 @@ module LogStash module Config module Source
31
31
  end
32
32
 
33
33
  def match?
34
+ if modules_cli? || modules? || config_string? || config_path?
35
+ return false
36
+ end
34
37
  detect_pipelines if !@detect_pipelines_called
35
- # see basic settings predicates and getters defined in the base class
36
- return !(invalid_pipelines_detected? || modules_cli? || modules? || config_string? || config_path?)
38
+ return !(invalid_pipelines_detected?)
37
39
  end
38
40
 
39
41
  def invalid_pipelines_detected?
@@ -41,10 +43,10 @@ module LogStash module Config module Source
41
43
  end
42
44
 
43
45
  def config_conflict?
44
- detect_pipelines if !@detect_pipelines_called
45
46
  @conflict_messages.clear
46
47
  # are there any auto-reload conflicts?
47
48
  if !(modules_cli? || modules? || config_string? || config_path?)
49
+ detect_pipelines if !@detect_pipelines_called
48
50
  if @detected_marker.nil?
49
51
  @conflict_messages << I18n.t("logstash.runner.config-pipelines-failed-read", :path => pipelines_yaml_location)
50
52
  elsif @detected_marker == false
@@ -1,4 +1,5 @@
1
1
  # encoding: utf-8
2
+ require "logstash-core/logstash-core"
2
3
  require "logstash/errors"
3
4
  require "logstash/java_integration"
4
5
  require "logstash/config/cpu_core_strategy"
@@ -38,7 +39,7 @@ module LogStash
38
39
  Setting::PositiveInteger.new("pipeline.workers", LogStash::Config::CpuCoreStrategy.maximum),
39
40
  Setting::PositiveInteger.new("pipeline.output.workers", 1),
40
41
  Setting::PositiveInteger.new("pipeline.batch.size", 125),
41
- Setting::Numeric.new("pipeline.batch.delay", 5), # in milliseconds
42
+ Setting::Numeric.new("pipeline.batch.delay", 50), # in milliseconds
42
43
  Setting::Boolean.new("pipeline.unsafe_shutdown", false),
43
44
  Setting::Boolean.new("pipeline.java_execution", false),
44
45
  Setting::Boolean.new("pipeline.reloadable", true),
@@ -54,7 +55,7 @@ module LogStash
54
55
  Setting::String.new("http.environment", "production"),
55
56
  Setting::String.new("queue.type", "memory", true, ["persisted", "memory", "memory_acked"]),
56
57
  Setting::Boolean.new("queue.drain", false),
57
- Setting::Bytes.new("queue.page_capacity", "250mb"),
58
+ Setting::Bytes.new("queue.page_capacity", "64mb"),
58
59
  Setting::Bytes.new("queue.max_bytes", "1024mb"),
59
60
  Setting::Numeric.new("queue.max_events", 0), # 0 is unlimited
60
61
  Setting::Numeric.new("queue.checkpoint.acks", 1024), # 0 is unlimited
@@ -65,9 +66,14 @@ module LogStash
65
66
  Setting::TimeValue.new("slowlog.threshold.warn", "-1"),
66
67
  Setting::TimeValue.new("slowlog.threshold.info", "-1"),
67
68
  Setting::TimeValue.new("slowlog.threshold.debug", "-1"),
68
- Setting::TimeValue.new("slowlog.threshold.trace", "-1")
69
+ Setting::TimeValue.new("slowlog.threshold.trace", "-1"),
70
+ Setting::String.new("keystore.classname", "org.logstash.secret.store.backend.JavaKeyStore"),
71
+ Setting::String.new("keystore.file", ::File.join(::File.join(LogStash::Environment::LOGSTASH_HOME, "config"), "logstash.keystore"), false) # will be populated on
72
+ # post_process
69
73
  ].each {|setting| SETTINGS.register(setting) }
70
74
 
75
+
76
+
71
77
  # Compute the default queue path based on `path.data`
72
78
  default_queue_file_path = ::File.join(SETTINGS.get("path.data"), "queue")
73
79
  SETTINGS.register Setting::WritableDirectory.new("path.queue", default_queue_file_path)
@@ -75,6 +81,7 @@ module LogStash
75
81
  default_dlq_file_path = ::File.join(SETTINGS.get("path.data"), "dead_letter_queue")
76
82
  SETTINGS.register Setting::WritableDirectory.new("path.dead_letter_queue", default_dlq_file_path)
77
83
 
84
+
78
85
  SETTINGS.on_post_process do |settings|
79
86
  # If the data path is overridden but the queue path isn't recompute the queue path
80
87
  # We need to do this at this stage because of the weird execution order
@@ -2,7 +2,6 @@
2
2
 
3
3
  require "logstash/namespace"
4
4
  require "logstash/json"
5
- require "logstash/timestamp"
6
5
 
7
6
  # transient pipeline events for normal in-flow signaling as opposed to
8
7
  # flow altering exceptions. for now having base classes is adequate and
@@ -16,8 +16,7 @@ module LogStash
16
16
 
17
17
  attr_reader :id
18
18
 
19
- def initialize(logger, klass, metric, execution_context, plugin_args)
20
- @logger = logger
19
+ def initialize(klass, metric, execution_context, plugin_args)
21
20
  @klass = klass
22
21
  @id = plugin_args["id"]
23
22
  @filter = klass.new(plugin_args)
@@ -84,7 +84,7 @@ class LogStash::Inputs::Base < LogStash::Plugin
84
84
 
85
85
  public
86
86
  def do_stop
87
- @logger.debug("stopping", :plugin => self.class.name)
87
+ @logger.debug("Stopping", :plugin => self.class.name)
88
88
  @stop_called.make_true
89
89
  stop
90
90
  end
@@ -27,7 +27,7 @@ module LogStash module Instrument module PeriodicPoller
27
27
  if exception.is_a?(Concurrent::TimeoutError)
28
28
  # On a busy system this can happen, we just log it as a debug
29
29
  # event instead of an error, Some of the JVM calls can take a long time or block.
30
- logger.debug("PeriodicPoller: Timeout exception",
30
+ logger.debug("Timeout exception",
31
31
  :poller => self,
32
32
  :result => result,
33
33
  :polling_timeout => @options[:polling_timeout],
@@ -35,7 +35,7 @@ module LogStash module Instrument module PeriodicPoller
35
35
  :exception => exception.class,
36
36
  :executed_at => time)
37
37
  else
38
- logger.error("PeriodicPoller: exception",
38
+ logger.error("Exception",
39
39
  :poller => self,
40
40
  :result => result,
41
41
  :exception => exception.class,
@@ -50,7 +50,7 @@ module LogStash module Instrument module PeriodicPoller
50
50
  end
51
51
 
52
52
  def start
53
- logger.debug("PeriodicPoller: Starting",
53
+ logger.debug("Starting",
54
54
  :polling_interval => @options[:polling_interval],
55
55
  :polling_timeout => @options[:polling_timeout]) if logger.debug?
56
56
 
@@ -59,7 +59,7 @@ module LogStash module Instrument module PeriodicPoller
59
59
  end
60
60
 
61
61
  def stop
62
- logger.debug("PeriodicPoller: Stopping")
62
+ logger.debug("Stopping")
63
63
  @task.shutdown
64
64
  end
65
65
 
@@ -20,8 +20,8 @@ java_import 'org.logstash.instrument.reports.ProcessReport'
20
20
  module LogStash module Instrument module PeriodicPoller
21
21
  class JVM < Base
22
22
  class GarbageCollectorName
23
- YOUNG_GC_NAMES = Set.new(["Copy", "PS Scavenge", "ParNew", "G1 Young Generation"])
24
- OLD_GC_NAMES = Set.new(["MarkSweepCompact", "PS MarkSweep", "ConcurrentMarkSweep", "G1 Old Generation"])
23
+ YOUNG_GC_NAMES = Set.new(["Copy", "PS Scavenge", "ParNew", "G1 Young Generation", "scavenge"])
24
+ OLD_GC_NAMES = Set.new(["MarkSweepCompact", "PS MarkSweep", "ConcurrentMarkSweep", "G1 Old Generation", "global"])
25
25
 
26
26
  YOUNG = :young
27
27
  OLD = :old
@@ -64,7 +64,9 @@ module LogStash module Instrument module PeriodicPoller
64
64
  garbage_collectors = ManagementFactory.getGarbageCollectorMXBeans()
65
65
 
66
66
  garbage_collectors.each do |collector|
67
- name = GarbageCollectorName.get(collector.getName())
67
+ collector_name = collector.getName()
68
+ logger.debug("collector name", :name => collector_name)
69
+ name = GarbageCollectorName.get(collector_name)
68
70
  if name.nil?
69
71
  logger.error("Unknown garbage collector name", :name => name)
70
72
  else
@@ -17,8 +17,7 @@ module LogStash
17
17
 
18
18
  attr_reader :id
19
19
 
20
- def initialize(logger, klass, metric, execution_context, plugin_args)
21
- @logger = logger
20
+ def initialize(klass, metric, execution_context, plugin_args)
22
21
  @klass = klass
23
22
  @id = plugin_args["id"]
24
23
  @filter = klass.new(plugin_args)
@@ -57,7 +57,7 @@ module LogStash; class JavaBasePipeline
57
57
  @plugin_factory = LogStash::Plugins::PluginFactory.new(
58
58
  # use NullMetric if called in the BasePipeline context otherwise use the @metric value
59
59
  @lir, LogStash::Plugins::PluginMetricFactory.new(pipeline_id, @metric || Instrument::NullMetric.new),
60
- @logger, LogStash::Plugins::ExecutionContextFactory.new(@agent, self, @dlq_writer),
60
+ LogStash::Plugins::ExecutionContextFactory.new(@agent, self, @dlq_writer),
61
61
  JavaFilterDelegator
62
62
  )
63
63
  @lir_execution = CompiledPipeline.new(@lir, @plugin_factory)
@@ -330,7 +330,10 @@ module LogStash; class JavaPipeline < JavaBasePipeline
330
330
  # @param plugins [Array[Plugin]] the list of plugins to register
331
331
  def register_plugins(plugins)
332
332
  registered = []
333
- plugins.each { |plugin| registered << @lir_execution.registerPlugin(plugin) }
333
+ plugins.each do |plugin|
334
+ plugin.register
335
+ registered << plugin
336
+ end
334
337
  rescue => e
335
338
  registered.each(&:do_close)
336
339
  raise e
@@ -651,6 +654,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
651
654
  filtered_size = batch.filtered_size
652
655
  @filter_queue_client.add_output_metrics(filtered_size)
653
656
  @filter_queue_client.add_filtered_metrics(filtered_size)
657
+ @flushing.set(false) if flush
654
658
  rescue Exception => e
655
659
  # Plugins authors should manage their own exceptions in the plugin code
656
660
  # but if an exception is raised up to the worker thread they are considered
@@ -2,7 +2,7 @@
2
2
  require "logstash/namespace"
3
3
  require "logstash/logging"
4
4
  require "logstash/json"
5
- require "manticore/client"
5
+ require "manticore"
6
6
 
7
7
  module LogStash module Modules class KibanaClient
8
8
  include LogStash::Util::Loggable
@@ -7,8 +7,7 @@ require "logstash/output_delegator_strategies/legacy"
7
7
  module LogStash class OutputDelegator
8
8
  attr_reader :metric, :metric_events, :strategy, :namespaced_metric, :metric_events, :id
9
9
 
10
- def initialize(logger, output_class, metric, execution_context, strategy_registry, plugin_args)
11
- @logger = logger
10
+ def initialize(output_class, metric, execution_context, strategy_registry, plugin_args)
12
11
  @output_class = output_class
13
12
  @metric = metric
14
13
  @id = plugin_args["id"]
@@ -24,7 +23,7 @@ module LogStash class OutputDelegator
24
23
  @time_metric = @metric_events.counter(:duration_in_millis)
25
24
  @strategy = strategy_registry.
26
25
  class_for(self.concurrency).
27
- new(@logger, @output_class, @namespaced_metric, execution_context, plugin_args)
26
+ new(@output_class, @namespaced_metric, execution_context, plugin_args)
28
27
  end
29
28
 
30
29
  def config_name
@@ -1,8 +1,8 @@
1
1
  # Remove this in Logstash 6.0
2
2
  module LogStash module OutputDelegatorStrategies class Legacy
3
3
  attr_reader :worker_count, :workers
4
-
5
- def initialize(logger, klass, metric, execution_context, plugin_args)
4
+
5
+ def initialize(klass, metric, execution_context, plugin_args)
6
6
  @worker_count = (plugin_args["workers"] || 1).to_i
7
7
  @workers = @worker_count.times.map { klass.new(plugin_args) }
8
8
  @workers.each do |w|
@@ -12,11 +12,11 @@ module LogStash module OutputDelegatorStrategies class Legacy
12
12
  @worker_queue = SizedQueue.new(@worker_count)
13
13
  @workers.each {|w| @worker_queue << w}
14
14
  end
15
-
15
+
16
16
  def register
17
17
  @workers.each(&:register)
18
18
  end
19
-
19
+
20
20
  def multi_receive(events)
21
21
  worker = @worker_queue.pop
22
22
  worker.multi_receive(events)
@@ -1,10 +1,10 @@
1
1
  module LogStash module OutputDelegatorStrategies class Shared
2
- def initialize(logger, klass, metric, execution_context, plugin_args)
2
+ def initialize(klass, metric, execution_context, plugin_args)
3
3
  @output = klass.new(plugin_args)
4
4
  @output.metric = metric
5
5
  @output.execution_context = execution_context
6
6
  end
7
-
7
+
8
8
  def register
9
9
  @output.register
10
10
  end
@@ -13,10 +13,10 @@ module LogStash module OutputDelegatorStrategies class Shared
13
13
  @output.multi_receive(events)
14
14
  end
15
15
 
16
- def do_close
16
+ def do_close
17
17
  @output.do_close
18
18
  end
19
19
 
20
- ::LogStash::OutputDelegatorStrategyRegistry.instance.register(:shared, self)
20
+ ::LogStash::OutputDelegatorStrategyRegistry.instance.register(:shared, self)
21
21
  end; end; end
22
22
 
@@ -1,5 +1,5 @@
1
1
  module LogStash module OutputDelegatorStrategies class Single
2
- def initialize(logger, klass, metric, execution_context, plugin_args)
2
+ def initialize(klass, metric, execution_context, plugin_args)
3
3
  @output = klass.new(plugin_args)
4
4
  @output.metric = metric
5
5
  @output.execution_context = execution_context
@@ -9,7 +9,7 @@ module LogStash module OutputDelegatorStrategies class Single
9
9
  def register
10
10
  @output.register
11
11
  end
12
-
12
+
13
13
  def multi_receive(events)
14
14
  @mutex.synchronize do
15
15
  @output.multi_receive(events)
@@ -64,7 +64,7 @@ module LogStash; class BasePipeline
64
64
  @plugin_factory = LogStash::Plugins::PluginFactory.new(
65
65
  # use NullMetric if called in the BasePipeline context otherwise use the @metric value
66
66
  @lir, LogStash::Plugins::PluginMetricFactory.new(pipeline_id, @metric || Instrument::NullMetric.new),
67
- @logger, LogStash::Plugins::ExecutionContextFactory.new(@agent, self, @dlq_writer),
67
+ LogStash::Plugins::ExecutionContextFactory.new(@agent, self, @dlq_writer),
68
68
  FilterDelegator
69
69
  )
70
70
  grammar = LogStashConfigParser.new
@@ -74,9 +74,7 @@ module LogStash; class BasePipeline
74
74
  parsed_config.process_escape_sequences = settings.get_value("config.support_escapes")
75
75
  config_code = parsed_config.compile
76
76
 
77
- # config_code = BasePipeline.compileConfig(config_str)
78
-
79
- if settings.get_value("config.debug") && @logger.debug?
77
+ if settings.get_value("config.debug")
80
78
  @logger.debug("Compiled pipeline code", default_logging_keys(:code => config_code))
81
79
  end
82
80
 
@@ -238,7 +236,10 @@ module LogStash; class Pipeline < BasePipeline
238
236
  collect_stats
239
237
  collect_dlq_stats
240
238
 
241
- @logger.debug("Starting pipeline", default_logging_keys)
239
+ @logger.info("Starting pipeline", default_logging_keys(
240
+ "pipeline.workers" => @settings.get("pipeline.workers"),
241
+ "pipeline.batch.size" => @settings.get("pipeline.batch.size"),
242
+ "pipeline.batch.delay" => @settings.get("pipeline.batch.delay")))
242
243
 
243
244
  @finished_execution = Concurrent::AtomicBoolean.new(false)
244
245
 
@@ -249,14 +250,14 @@ module LogStash; class Pipeline < BasePipeline
249
250
  @finished_execution.make_true
250
251
  rescue => e
251
252
  close
252
- logger.error("Pipeline aborted due to error", default_logging_keys(:exception => e, :backtrace => e.backtrace))
253
+ @logger.error("Pipeline aborted due to error", default_logging_keys(:exception => e, :backtrace => e.backtrace))
253
254
  end
254
255
  end
255
256
 
256
257
  status = wait_until_started
257
258
 
258
259
  if status
259
- logger.debug("Pipeline started successfully", default_logging_keys(:pipeline_id => pipeline_id))
260
+ @logger.info("Pipeline started succesfully", default_logging_keys)
260
261
  end
261
262
 
262
263
  status
@@ -287,8 +288,6 @@ module LogStash; class Pipeline < BasePipeline
287
288
 
288
289
  start_workers
289
290
 
290
- @logger.info("Pipeline started", "pipeline.id" => @pipeline_id)
291
-
292
291
  # Block until all inputs have stopped
293
292
  # Generally this happens if SIGINT is sent and `shutdown` is called from an external thread
294
293
 
@@ -297,14 +296,13 @@ module LogStash; class Pipeline < BasePipeline
297
296
  wait_inputs
298
297
  transition_to_stopped
299
298
 
300
- @logger.debug("Input plugins stopped! Will shutdown filter/output workers.", default_logging_keys)
301
-
302
299
  shutdown_flusher
300
+ @logger.debug("Shutting down filter/output workers", default_logging_keys)
303
301
  shutdown_workers
304
302
 
305
303
  close
306
304
 
307
- @logger.debug("Pipeline has been shutdown", default_logging_keys)
305
+ @logger.info("Pipeline has terminated", default_logging_keys)
308
306
 
309
307
  # exit code
310
308
  return 0
@@ -378,12 +376,6 @@ module LogStash; class Pipeline < BasePipeline
378
376
  config_metric.gauge(:dead_letter_queue_enabled, dlq_enabled?)
379
377
  config_metric.gauge(:dead_letter_queue_path, @dlq_writer.get_path.to_absolute_path.to_s) if dlq_enabled?
380
378
 
381
-
382
- @logger.info("Starting pipeline", default_logging_keys(
383
- "pipeline.workers" => pipeline_workers,
384
- "pipeline.batch.size" => batch_size,
385
- "pipeline.batch.delay" => batch_delay,
386
- "pipeline.max_inflight" => max_inflight))
387
379
  if max_inflight > MAX_INFLIGHT_WARN_THRESHOLD
388
380
  @logger.warn("CAUTION: Recommended inflight events max exceeded! Logstash will run with up to #{max_inflight} events in memory in your current configuration. If your message sizes are large this may cause instability with the default heap size. Please consider setting a non-standard heap size, changing the batch size (currently #{batch_size}), or changing the number of pipeline workers (currently #{pipeline_workers})", default_logging_keys)
389
381
  end
@@ -565,19 +557,19 @@ module LogStash; class Pipeline < BasePipeline
565
557
  # stopped
566
558
  wait_for_workers
567
559
  clear_pipeline_metrics
568
- @logger.info("Pipeline terminated", "pipeline.id" => @pipeline_id)
569
560
  end # def shutdown
570
561
 
571
562
  def wait_for_workers
572
- @logger.debug("Closing inputs", default_logging_keys)
573
- @worker_threads.map(&:join)
574
- @logger.debug("Worker closed", default_logging_keys)
563
+ @worker_threads.each do |t|
564
+ t.join
565
+ @logger.debug("Worker terminated", default_logging_keys(:thread => t.inspect))
566
+ end
575
567
  end
576
568
 
577
569
  def stop_inputs
578
- @logger.debug("Closing inputs", default_logging_keys)
570
+ @logger.debug("Stopping inputs", default_logging_keys)
579
571
  @inputs.each(&:do_stop)
580
- @logger.debug("Closed inputs", default_logging_keys)
572
+ @logger.debug("Stopped inputs", default_logging_keys)
581
573
  end
582
574
 
583
575
  # After `shutdown` is called from an external thread this is called from the main thread to