logstash-core 5.2.1-java → 5.2.2-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: c4df5ad1b48f429f25bc3f16f97973b531b3c5d6
4
- data.tar.gz: 1a59f8132f4156de4cb846d05a417e2d44ba6164
3
+ metadata.gz: 72571fd4dcaa96000b854ac8362522c0db5d3e82
4
+ data.tar.gz: 6ff9cf5d47543adfb8e45d6b28163bebe9ce40ac
5
5
  SHA512:
6
- metadata.gz: 1487f73f2dba8861338bf93548c9b5dfac360fc2ecc26c6a776cffa0cec848426e1422dc9ae1178c7cdb2759fcf62be3d8f690419cbf9bce0a8b04f75ad06370
7
- data.tar.gz: 24a249aca51af3100d5c533f5a0491480718ad145b806f148ea36e366e05a35378366aa2819d98bb5c6f2a7dda0f0446103a04d33a27d6ac47d4296741ab2c90
6
+ metadata.gz: a7aaa38c146c061f56e6654b70bcfda14c89516ebca3a98fdad2f7b347b423bc78eae89cef47547ee6a1aaa725fda4ab2c9849942ec1619eecba862996d34458
7
+ data.tar.gz: 3111f88041c5367e1724c741c625875c3bbe0e8035107b86ac60fb7651922c8b4efa5f71d02495674549eec75d5a34fbcb6ddf80c49b515e6bd73eedf64c5ffb
@@ -5,4 +5,4 @@
5
5
  # Note to authors: this should not include dashes because 'gem' barfs if
6
6
  # you include a dash in the version string.
7
7
 
8
- LOGSTASH_CORE_VERSION = "5.2.1"
8
+ LOGSTASH_CORE_VERSION = "5.2.2"
@@ -189,7 +189,22 @@ class LogStash::Agent
189
189
  end
190
190
  end
191
191
 
192
+ def close_pipeline(id)
193
+ pipeline = @pipelines[id]
194
+ if pipeline
195
+ @logger.warn("closing pipeline", :id => id)
196
+ pipeline.close
197
+ end
198
+ end
199
+
200
+ def close_pipelines
201
+ @pipelines.each do |id, _|
202
+ close_pipeline(id)
203
+ end
204
+ end
205
+
192
206
  private
207
+
193
208
  def start_webserver
194
209
  options = {:http_host => @http_host, :http_ports => @http_port, :http_environment => @http_environment }
195
210
  @webserver = LogStash::WebServer.new(@logger, self, options)
@@ -232,7 +247,21 @@ class LogStash::Agent
232
247
  @collect_metric
233
248
  end
234
249
 
235
- def create_pipeline(settings, config=nil)
250
+ def increment_reload_failures_metrics(id, message, backtrace = nil)
251
+ @instance_reload_metric.increment(:failures)
252
+ @pipeline_reload_metric.namespace([id.to_sym, :reloads]).tap do |n|
253
+ n.increment(:failures)
254
+ n.gauge(:last_error, { :message => message, :backtrace =>backtrace})
255
+ n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
256
+ end
257
+ if @logger.debug?
258
+ @logger.error("Cannot load an invalid configuration", :reason => message, :backtrace => backtrace)
259
+ else
260
+ @logger.error("Cannot load an invalid configuration", :reason => message)
261
+ end
262
+ end
263
+
264
+ def create_pipeline(settings, config = nil)
236
265
  if config.nil?
237
266
  begin
238
267
  config = fetch_config(settings)
@@ -245,17 +274,7 @@ class LogStash::Agent
245
274
  begin
246
275
  LogStash::Pipeline.new(config, settings, metric)
247
276
  rescue => e
248
- @instance_reload_metric.increment(:failures)
249
- @pipeline_reload_metric.namespace([settings.get("pipeline.id").to_sym, :reloads]).tap do |n|
250
- n.increment(:failures)
251
- n.gauge(:last_error, { :message => e.message, :backtrace => e.backtrace})
252
- n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
253
- end
254
- if @logger.debug?
255
- @logger.error("fetched an invalid config", :config => config, :reason => e.message, :backtrace => e.backtrace)
256
- else
257
- @logger.error("fetched an invalid config", :config => config, :reason => e.message)
258
- end
277
+ increment_reload_failures_metrics(settings.get("pipeline.id"), e.message, e.backtrace)
259
278
  return
260
279
  end
261
280
  end
@@ -264,30 +283,96 @@ class LogStash::Agent
264
283
  @config_loader.format_config(settings.get("path.config"), settings.get("config.string"))
265
284
  end
266
285
 
267
- # since this method modifies the @pipelines hash it is
268
- # wrapped in @upgrade_mutex in the parent call `reload_state!`
286
+ # reload_pipeline trys to reloads the pipeline with id using a potential new configuration if it changed
287
+ # since this method modifies the @pipelines hash it is wrapped in @upgrade_mutex in the parent call `reload_state!`
288
+ # @param id [String] the pipeline id to reload
269
289
  def reload_pipeline!(id)
270
290
  old_pipeline = @pipelines[id]
271
291
  new_config = fetch_config(old_pipeline.settings)
292
+
272
293
  if old_pipeline.config_str == new_config
273
- @logger.debug("no configuration change for pipeline",
274
- :pipeline => id, :config => new_config)
294
+ @logger.debug("no configuration change for pipeline", :pipeline => id)
295
+ return
296
+ end
297
+
298
+ # check if this pipeline is not reloadable. it should not happen as per the check below
299
+ # but keep it here as a safety net if a reloadable pipeline was releoaded with a non reloadable pipeline
300
+ if old_pipeline.non_reloadable_plugins.any?
301
+ @logger.error("pipeline is not reloadable", :pipeline => id)
302
+ return
303
+ end
304
+
305
+ # BasePipeline#initialize will compile the config, and load all plugins and raise an exception
306
+ # on an invalid configuration
307
+ begin
308
+ pipeline_validator = LogStash::BasePipeline.new(new_config, old_pipeline.settings)
309
+ rescue => e
310
+ increment_reload_failures_metrics(id, e.message, e.backtrace)
311
+ return
312
+ end
313
+
314
+ # check if the new pipeline will be reloadable in which case we want to log that as an error and abort
315
+ if pipeline_validator.non_reloadable_plugins.any?
316
+ @logger.error(I18n.t("logstash.agent.non_reloadable_config_reload"), :pipeline_id => id, :plugins => pipeline_validator.non_reloadable_plugins.map(&:class))
317
+ increment_reload_failures_metrics(id, "non reloadable pipeline")
275
318
  return
276
319
  end
277
320
 
278
- new_pipeline = create_pipeline(old_pipeline.settings, new_config)
321
+ # we know configis valid so we are fairly comfortable to first stop old pipeline and then start new one
322
+ upgrade_pipeline(id, old_pipeline.settings, new_config)
323
+ end
324
+
325
+ # upgrade_pipeline first stops the old pipeline and starts the new one
326
+ # this method exists only for specs to be able to expects this to be executed
327
+ # @params pipeline_id [String] the pipeline id to upgrade
328
+ # @params settings [Settings] the settings for the new pipeline
329
+ # @params new_config [String] the new pipeline config
330
+ def upgrade_pipeline(pipeline_id, settings, new_config)
331
+ @logger.warn("fetched new config for pipeline. upgrading..", :pipeline => pipeline_id, :config => new_config)
332
+
333
+ # first step: stop the old pipeline.
334
+ # IMPORTANT: a new pipeline with same settings should not be instantiated before the previous one is shutdown
335
+
336
+ stop_pipeline(pipeline_id)
337
+ reset_pipeline_metrics(pipeline_id)
338
+
339
+ # second step create and start a new pipeline now that the old one is shutdown
279
340
 
280
- return if new_pipeline.nil?
341
+ new_pipeline = create_pipeline(settings, new_config)
342
+ if new_pipeline.nil?
343
+ # this is a scenario where the configuration is valid (compilable) but the new pipeline refused to start
344
+ # and at this point NO pipeline is running
345
+ @logger.error("failed to create the reloaded pipeline and no pipeline is currently running", :pipeline => pipeline_id)
346
+ increment_reload_failures_metrics(pipeline_id, "failed to create the reloaded pipeline")
347
+ return
348
+ end
281
349
 
350
+ ### at this point pipeline#close must be called if upgrade_pipeline does not succeed
351
+
352
+ # check if the new pipeline will be reloadable in which case we want to log that as an error and abort. this should normally not
353
+ # happen since the check should be done in reload_pipeline! prior to get here.
282
354
  if new_pipeline.non_reloadable_plugins.any?
283
- @logger.error(I18n.t("logstash.agent.non_reloadable_config_reload"),
284
- :pipeline_id => id,
285
- :plugins => new_pipeline.non_reloadable_plugins.map(&:class))
355
+ @logger.error(I18n.t("logstash.agent.non_reloadable_config_reload"), :pipeline_id => pipeline_id, :plugins => new_pipeline.non_reloadable_plugins.map(&:class))
356
+ increment_reload_failures_metrics(pipeline_id, "non reloadable pipeline")
357
+ new_pipeline.close
286
358
  return
287
- else
288
- @logger.warn("fetched new config for pipeline. upgrading..",
289
- :pipeline => id, :config => new_pipeline.config_str)
290
- upgrade_pipeline(id, new_pipeline)
359
+ end
360
+
361
+ # @pipelines[pipeline_id] must be initialized before #start_pipeline below which uses it
362
+ @pipelines[pipeline_id] = new_pipeline
363
+
364
+ if !start_pipeline(pipeline_id)
365
+ @logger.error("failed to start the reloaded pipeline and no pipeline is currently running", :pipeline => pipeline_id)
366
+ # do not call increment_reload_failures_metrics here since #start_pipeline already does it on failure
367
+ new_pipeline.close
368
+ return
369
+ end
370
+
371
+ # pipeline started successfuly, update reload success metrics
372
+ @instance_reload_metric.increment(:successes)
373
+ @pipeline_reload_metric.namespace([pipeline_id.to_sym, :reloads]).tap do |n|
374
+ n.increment(:successes)
375
+ n.gauge(:last_success_timestamp, LogStash::Timestamp.now)
291
376
  end
292
377
  end
293
378
 
@@ -349,20 +434,6 @@ class LogStash::Agent
349
434
  thread.is_a?(Thread) && thread.alive?
350
435
  end
351
436
 
352
- def upgrade_pipeline(pipeline_id, new_pipeline)
353
- stop_pipeline(pipeline_id)
354
- reset_pipeline_metrics(pipeline_id)
355
- @pipelines[pipeline_id] = new_pipeline
356
- if start_pipeline(pipeline_id) # pipeline started successfuly
357
- @instance_reload_metric.increment(:successes)
358
- @pipeline_reload_metric.namespace([pipeline_id.to_sym, :reloads]).tap do |n|
359
- n.increment(:successes)
360
- n.gauge(:last_success_timestamp, LogStash::Timestamp.now)
361
- end
362
-
363
- end
364
- end
365
-
366
437
  def clean_state?
367
438
  @pipelines.empty?
368
439
  end
@@ -44,7 +44,7 @@ module LogStash module Instrument module PeriodicPoller
44
44
 
45
45
  def collect
46
46
  raw = JRMonitor.memory.generate
47
- collect_jvm_metrics(raw)
47
+ collect_jvm_metrics(raw)
48
48
  collect_pools_metrics(raw)
49
49
  collect_threads_metrics
50
50
  collect_process_metrics
@@ -69,15 +69,10 @@ module LogStash module Instrument module PeriodicPoller
69
69
  end
70
70
 
71
71
  def collect_threads_metrics
72
- threads = JRMonitor.threads.generate
72
+ threads_mx = ManagementFactory.getThreadMXBean()
73
73
 
74
- current = threads.count
75
- if @peak_threads.nil? || @peak_threads < current
76
- @peak_threads = current
77
- end
78
-
79
- metric.gauge([:jvm, :threads], :count, threads.count)
80
- metric.gauge([:jvm, :threads], :peak_count, @peak_threads)
74
+ metric.gauge([:jvm, :threads], :count, threads_mx.getThreadCount())
75
+ metric.gauge([:jvm, :threads], :peak_count, threads_mx.getPeakThreadCount())
81
76
  end
82
77
 
83
78
  def collect_process_metrics
@@ -22,43 +22,24 @@ require "logstash/instrument/collector"
22
22
  require "logstash/output_delegator"
23
23
  require "logstash/filter_delegator"
24
24
 
25
- module LogStash; class Pipeline
25
+ module LogStash; class BasePipeline
26
26
  include LogStash::Util::Loggable
27
27
 
28
- attr_reader :inputs,
29
- :filters,
30
- :outputs,
31
- :worker_threads,
32
- :events_consumed,
33
- :events_filtered,
34
- :reporter,
35
- :pipeline_id,
36
- :started_at,
37
- :thread,
38
- :config_str,
39
- :config_hash,
40
- :settings,
41
- :metric,
42
- :filter_queue_client,
43
- :input_queue_client,
44
- :queue
45
-
46
- MAX_INFLIGHT_WARN_THRESHOLD = 10_000
28
+ attr_reader :config_str, :config_hash, :inputs, :filters, :outputs, :pipeline_id
47
29
 
48
30
  RELOAD_INCOMPATIBLE_PLUGINS = [
49
- "LogStash::Inputs::Stdin"
31
+ "LogStash::Inputs::Stdin"
50
32
  ]
51
33
 
52
- def initialize(config_str, settings = SETTINGS, namespaced_metric = nil)
34
+ def initialize(config_str, settings = SETTINGS)
53
35
  @logger = self.logger
54
36
  @config_str = config_str
55
37
  @config_hash = Digest::SHA1.hexdigest(@config_str)
56
38
  # Every time #plugin is invoked this is incremented to give each plugin
57
39
  # a unique id when auto-generating plugin ids
58
40
  @plugin_counter ||= 0
59
- @settings = settings
60
- @pipeline_id = @settings.get_value("pipeline.id") || self.object_id
61
- @reporter = PipelineReporter.new(@logger, self)
41
+
42
+ @pipeline_id = settings.get_value("pipeline.id") || self.object_id
62
43
 
63
44
  # A list of plugins indexed by id
64
45
  @plugins_by_id = {}
@@ -66,8 +47,88 @@ module LogStash; class Pipeline
66
47
  @filters = nil
67
48
  @outputs = nil
68
49
 
69
- @worker_threads = []
50
+ grammar = LogStashConfigParser.new
51
+ parsed_config = grammar.parse(config_str)
52
+ raise(ConfigurationError, grammar.failure_reason) if parsed_config.nil?
53
+
54
+ config_code = parsed_config.compile
55
+
56
+ # config_code = BasePipeline.compileConfig(config_str)
57
+
58
+ if settings.get_value("config.debug") && @logger.debug?
59
+ @logger.debug("Compiled pipeline code", :code => config_code)
60
+ end
61
+
62
+ # Evaluate the config compiled code that will initialize all the plugins and define the
63
+ # filter and output methods.
64
+ begin
65
+ eval(config_code)
66
+ rescue => e
67
+ raise e
68
+ end
69
+ end
70
+
71
+ def plugin(plugin_type, name, *args)
72
+ @plugin_counter += 1
73
+
74
+ # Collapse the array of arguments into a single merged hash
75
+ args = args.reduce({}, &:merge)
76
+
77
+ id = if args["id"].nil? || args["id"].empty?
78
+ args["id"] = "#{@config_hash}-#{@plugin_counter}"
79
+ else
80
+ args["id"]
81
+ end
82
+
83
+ raise ConfigurationError, "Two plugins have the id '#{id}', please fix this conflict" if @plugins_by_id[id]
84
+
85
+ @plugins_by_id[id] = true
86
+
87
+ # use NullMetric if called in the BasePipeline context otherwise use the @metric value
88
+ metric = @metric || Instrument::NullMetric.new
89
+
90
+ pipeline_scoped_metric = metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :plugins])
91
+
92
+ # Scope plugins of type 'input' to 'inputs'
93
+ type_scoped_metric = pipeline_scoped_metric.namespace("#{plugin_type}s".to_sym)
94
+
95
+ klass = Plugin.lookup(plugin_type, name)
96
+
97
+ if plugin_type == "output"
98
+ OutputDelegator.new(@logger, klass, type_scoped_metric, OutputDelegatorStrategyRegistry.instance, args)
99
+ elsif plugin_type == "filter"
100
+ FilterDelegator.new(@logger, klass, type_scoped_metric, args)
101
+ else # input
102
+ input_plugin = klass.new(args)
103
+ input_plugin.metric = type_scoped_metric.namespace(id)
104
+ input_plugin
105
+ end
106
+ end
107
+
108
+ def non_reloadable_plugins
109
+ (inputs + filters + outputs).select do |plugin|
110
+ RELOAD_INCOMPATIBLE_PLUGINS.include?(plugin.class.name)
111
+ end
112
+ end
113
+ end; end
114
+
115
+ module LogStash; class Pipeline < BasePipeline
116
+ attr_reader \
117
+ :worker_threads,
118
+ :events_consumed,
119
+ :events_filtered,
120
+ :reporter,
121
+ :started_at,
122
+ :thread,
123
+ :settings,
124
+ :metric,
125
+ :filter_queue_client,
126
+ :input_queue_client,
127
+ :queue
128
+
129
+ MAX_INFLIGHT_WARN_THRESHOLD = 10_000
70
130
 
131
+ def initialize(config_str, settings = SETTINGS, namespaced_metric = nil)
71
132
  # This needs to be configured before we evaluate the code to make
72
133
  # sure the metric instance is correctly send to the plugins to make the namespace scoping work
73
134
  @metric = if namespaced_metric
@@ -76,29 +137,12 @@ module LogStash; class Pipeline
76
137
  Instrument::NullMetric.new
77
138
  end
78
139
 
79
- grammar = LogStashConfigParser.new
80
- @config = grammar.parse(config_str)
81
- if @config.nil?
82
- raise ConfigurationError, grammar.failure_reason
83
- end
84
- # This will compile the config to ruby and evaluate the resulting code.
85
- # The code will initialize all the plugins and define the
86
- # filter and output methods.
87
- code = @config.compile
88
- @code = code
140
+ @settings = settings
141
+ @reporter = PipelineReporter.new(@logger, self)
142
+ @worker_threads = []
89
143
 
90
- # The config code is hard to represent as a log message...
91
- # So just print it.
144
+ super(config_str, settings)
92
145
 
93
- if @settings.get_value("config.debug") && @logger.debug?
94
- @logger.debug("Compiled pipeline code", :code => code)
95
- end
96
-
97
- begin
98
- eval(code)
99
- rescue => e
100
- raise
101
- end
102
146
  @queue = build_queue_from_settings
103
147
  @input_queue_client = @queue.write_client
104
148
  @filter_queue_client = @queue.read_client
@@ -202,8 +246,7 @@ module LogStash; class Pipeline
202
246
  shutdown_flusher
203
247
  shutdown_workers
204
248
 
205
- @filter_queue_client.close
206
- @queue.close
249
+ close
207
250
 
208
251
  @logger.debug("Pipeline #{@pipeline_id} has been shutdown")
209
252
 
@@ -211,6 +254,11 @@ module LogStash; class Pipeline
211
254
  return 0
212
255
  end # def run
213
256
 
257
+ def close
258
+ @filter_queue_client.close
259
+ @queue.close
260
+ end
261
+
214
262
  def transition_to_running
215
263
  @running.make_true
216
264
  end
@@ -227,12 +275,32 @@ module LogStash; class Pipeline
227
275
  @running.false?
228
276
  end
229
277
 
278
+ # register_plugin simply calls the plugin #register method and catches & logs any error
279
+ # @param plugin [Plugin] the plugin to register
280
+ # @return [Plugin] the registered plugin
281
+ def register_plugin(plugin)
282
+ plugin.register
283
+ plugin
284
+ rescue => e
285
+ @logger.error("Error registering plugin", :plugin => plugin.inspect, :error => e.message)
286
+ raise e
287
+ end
288
+
289
+ # register_plugins calls #register_plugin on the plugins list and upon exception will call Plugin#do_close on all registered plugins
290
+ # @param plugins [Array[Plugin]] the list of plugins to register
291
+ def register_plugins(plugins)
292
+ registered = []
293
+ plugins.each { |plugin| registered << register_plugin(plugin) }
294
+ rescue => e
295
+ registered.each(&:do_close)
296
+ raise e
297
+ end
298
+
230
299
  def start_workers
231
300
  @worker_threads.clear # In case we're restarting the pipeline
232
301
  begin
233
- start_inputs
234
- @outputs.each {|o| o.register }
235
- @filters.each {|f| f.register }
302
+ register_plugins(@outputs)
303
+ register_plugins(@filters)
236
304
 
237
305
  pipeline_workers = safe_pipeline_worker_count
238
306
  batch_size = @settings.get("pipeline.batch.size")
@@ -263,6 +331,16 @@ module LogStash; class Pipeline
263
331
  worker_loop(batch_size, batch_delay)
264
332
  end
265
333
  end
334
+
335
+ # inputs should be started last, after all workers
336
+ begin
337
+ start_inputs
338
+ rescue => e
339
+ # if there is any exception in starting inputs, make sure we shutdown workers.
340
+ # exception will already by logged in start_inputs
341
+ shutdown_workers
342
+ raise e
343
+ end
266
344
  ensure
267
345
  # it is important to guarantee @ready to be true after the startup sequence has been completed
268
346
  # to potentially unblock the shutdown method which may be waiting on @ready to proceed
@@ -354,10 +432,11 @@ module LogStash; class Pipeline
354
432
  end
355
433
  @inputs += moreinputs
356
434
 
357
- @inputs.each do |input|
358
- input.register
359
- start_input(input)
360
- end
435
+ # first make sure we can register all input plugins
436
+ register_plugins(@inputs)
437
+
438
+ # then after all input plugins are sucessfully registered, start them
439
+ @inputs.each { |input| start_input(input) }
361
440
  end
362
441
 
363
442
  def start_input(plugin)
@@ -433,41 +512,6 @@ module LogStash; class Pipeline
433
512
  @outputs.each(&:do_close)
434
513
  end
435
514
 
436
- def plugin(plugin_type, name, *args)
437
- @plugin_counter += 1
438
-
439
- # Collapse the array of arguments into a single merged hash
440
- args = args.reduce({}, &:merge)
441
-
442
- id = if args["id"].nil? || args["id"].empty?
443
- args["id"] = "#{@config_hash}-#{@plugin_counter}"
444
- else
445
- args["id"]
446
- end
447
-
448
- raise ConfigurationError, "Two plugins have the id '#{id}', please fix this conflict" if @plugins_by_id[id]
449
-
450
- pipeline_scoped_metric = metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :plugins])
451
-
452
- klass = Plugin.lookup(plugin_type, name)
453
-
454
- # Scope plugins of type 'input' to 'inputs'
455
- type_scoped_metric = pipeline_scoped_metric.namespace("#{plugin_type}s".to_sym)
456
- plugin = if plugin_type == "output"
457
- OutputDelegator.new(@logger, klass, type_scoped_metric,
458
- OutputDelegatorStrategyRegistry.instance,
459
- args)
460
- elsif plugin_type == "filter"
461
- FilterDelegator.new(@logger, klass, type_scoped_metric, args)
462
- else # input
463
- input_plugin = klass.new(args)
464
- input_plugin.metric = type_scoped_metric.namespace(id)
465
- input_plugin
466
- end
467
-
468
- @plugins_by_id[id] = plugin
469
- end
470
-
471
515
  # for backward compatibility in devutils for the rspec helpers, this method is not used
472
516
  # in the pipeline anymore.
473
517
  def filter(event, &block)
@@ -548,12 +592,6 @@ module LogStash; class Pipeline
548
592
  .each {|t| t.delete("status") }
549
593
  end
550
594
 
551
- def non_reloadable_plugins
552
- (inputs + filters + outputs).select do |plugin|
553
- RELOAD_INCOMPATIBLE_PLUGINS.include?(plugin.class.name)
554
- end
555
- end
556
-
557
595
  def collect_stats
558
596
  pipeline_metric = @metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :queue])
559
597
  pipeline_metric.gauge(:type, settings.get("queue.type"))
@@ -590,5 +628,4 @@ module LogStash; class Pipeline
590
628
  :flushing => @flushing
591
629
  }
592
630
  end
593
-
594
- end end
631
+ end; end
@@ -249,7 +249,7 @@ class LogStash::Runner < Clamp::StrictCommand
249
249
  config_loader = LogStash::Config::Loader.new(logger)
250
250
  config_str = config_loader.format_config(setting("path.config"), setting("config.string"))
251
251
  begin
252
- LogStash::Pipeline.new(config_str)
252
+ LogStash::BasePipeline.new(config_str)
253
253
  puts "Configuration OK"
254
254
  logger.info "Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash"
255
255
  return 0
@@ -11,4 +11,4 @@
11
11
  # eventually this file should be in the root logstash lib fir and dependencies in logstash-core should be
12
12
  # fixed.
13
13
 
14
- LOGSTASH_VERSION = "5.2.1"
14
+ LOGSTASH_VERSION = "5.2.2"
@@ -52,6 +52,10 @@ describe LogStash::Agent do
52
52
  }
53
53
  end
54
54
 
55
+ after(:each) do
56
+ subject.close_pipelines
57
+ end
58
+
55
59
  it "should delegate settings to new pipeline" do
56
60
  expect(LogStash::Pipeline).to receive(:new) do |arg1, arg2|
57
61
  expect(arg1).to eq(config_string)
@@ -262,10 +266,14 @@ describe LogStash::Agent do
262
266
  subject.register_pipeline(pipeline_id, pipeline_settings)
263
267
  end
264
268
 
269
+ after(:each) do
270
+ subject.close_pipelines
271
+ end
272
+
265
273
  context "when fetching a new state" do
266
274
  it "upgrades the state" do
267
275
  expect(subject).to receive(:fetch_config).and_return(second_pipeline_config)
268
- expect(subject).to receive(:upgrade_pipeline).with(pipeline_id, kind_of(LogStash::Pipeline))
276
+ expect(subject).to receive(:upgrade_pipeline).with(pipeline_id, kind_of(LogStash::Settings), second_pipeline_config)
269
277
  subject.reload_state!
270
278
  end
271
279
  end
@@ -295,6 +303,7 @@ describe LogStash::Agent do
295
303
 
296
304
  after :each do
297
305
  ENV["FOO"] = @foo_content
306
+ subject.close_pipelines
298
307
  end
299
308
 
300
309
  it "doesn't upgrade the state" do
@@ -319,14 +328,16 @@ describe LogStash::Agent do
319
328
  end
320
329
 
321
330
  after(:each) do
322
- subject.shutdown
331
+ subject.close_pipelines
323
332
  end
324
333
 
325
334
  context "when the upgrade fails" do
326
335
  before :each do
327
336
  allow(subject).to receive(:fetch_config).and_return(new_pipeline_config)
328
337
  allow(subject).to receive(:create_pipeline).and_return(nil)
329
- allow(subject).to receive(:stop_pipeline)
338
+ allow(subject).to receive(:stop_pipeline) do |id|
339
+ subject.close_pipeline(id)
340
+ end
330
341
  end
331
342
 
332
343
  it "leaves the state untouched" do
@@ -346,16 +357,20 @@ describe LogStash::Agent do
346
357
  let(:new_config) { "input { generator { count => 1 } } output { }" }
347
358
  before :each do
348
359
  allow(subject).to receive(:fetch_config).and_return(new_config)
349
- allow(subject).to receive(:stop_pipeline)
350
360
  allow(subject).to receive(:start_pipeline)
361
+ allow(subject).to receive(:stop_pipeline) do |id|
362
+ subject.close_pipeline(id)
363
+ end
351
364
  end
352
365
  it "updates the state" do
353
366
  subject.send(:"reload_pipeline!", pipeline_id)
354
367
  expect(subject.pipelines[pipeline_id].config_str).to eq(new_config)
355
368
  end
356
369
  it "starts the pipeline" do
357
- expect(subject).to receive(:stop_pipeline)
358
370
  expect(subject).to receive(:start_pipeline)
371
+ expect(subject).to receive(:stop_pipeline) do |id|
372
+ subject.close_pipeline(id)
373
+ end
359
374
  subject.send(:"reload_pipeline!", pipeline_id)
360
375
  end
361
376
  end
@@ -416,6 +431,12 @@ describe LogStash::Agent do
416
431
  let!(:dummy_output) { LogStash::Outputs::DroppingDummyOutput.new }
417
432
  let!(:dummy_output2) { DummyOutput2.new }
418
433
  let(:initial_generator_threshold) { 1000 }
434
+ let(:pipeline_thread) do
435
+ Thread.new do
436
+ subject.register_pipeline("main", pipeline_settings)
437
+ subject.execute
438
+ end
439
+ end
419
440
 
420
441
  before :each do
421
442
  allow(LogStash::Outputs::DroppingDummyOutput).to receive(:new).at_least(:once).with(anything).and_return(dummy_output)
@@ -429,10 +450,11 @@ describe LogStash::Agent do
429
450
  @abort_on_exception = Thread.abort_on_exception
430
451
  Thread.abort_on_exception = true
431
452
 
432
- @t = Thread.new do
433
- subject.register_pipeline("main", pipeline_settings)
434
- subject.execute
435
- end
453
+ pipeline_thread
454
+ # @t = Thread.new do
455
+ # subject.register_pipeline("main", pipeline_settings)
456
+ # subject.execute
457
+ # end
436
458
 
437
459
  # wait for some events to reach the dummy_output
438
460
  sleep(0.01) until dummy_output.events_received > initial_generator_threshold
@@ -441,8 +463,8 @@ describe LogStash::Agent do
441
463
  after :each do
442
464
  begin
443
465
  subject.shutdown
444
- Stud.stop!(@t)
445
- @t.join
466
+ Stud.stop!(pipeline_thread)
467
+ pipeline_thread.join
446
468
  ensure
447
469
  Thread.abort_on_exception = @abort_on_exception
448
470
  end
@@ -451,8 +473,8 @@ describe LogStash::Agent do
451
473
  context "when reloading a good config" do
452
474
  let(:new_config_generator_counter) { 500 }
453
475
  let(:new_config) { "input { generator { count => #{new_config_generator_counter} } } output { dummyoutput2 {} }" }
454
- before :each do
455
476
 
477
+ before :each do
456
478
  File.open(config_path, "w") do |f|
457
479
  f.write(new_config)
458
480
  f.fsync
@@ -138,7 +138,7 @@ describe LogStash::Pipeline do
138
138
  Thread.abort_on_exception = true
139
139
 
140
140
  pipeline = LogStash::Pipeline.new(config, pipeline_settings_obj)
141
- Thread.new { pipeline.run }
141
+ t = Thread.new { pipeline.run }
142
142
  sleep 0.1 while !pipeline.ready?
143
143
  wait(3).for do
144
144
  # give us a bit of time to flush the events
@@ -149,6 +149,7 @@ describe LogStash::Pipeline do
149
149
  expect(output.events[0].get("tags")).to eq(["notdropped"])
150
150
  expect(output.events[1].get("tags")).to eq(["notdropped"])
151
151
  pipeline.shutdown
152
+ t.join
152
153
 
153
154
  Thread.abort_on_exception = abort_on_exception_state
154
155
  end
@@ -192,12 +193,14 @@ describe LogStash::Pipeline do
192
193
  pipeline_settings_obj.set("config.debug", false)
193
194
  expect(logger).not_to receive(:debug).with(/Compiled pipeline/, anything)
194
195
  pipeline = TestPipeline.new(test_config_with_filters)
196
+ pipeline.close
195
197
  end
196
198
 
197
199
  it "should print the compiled code if config.debug is set to true" do
198
200
  pipeline_settings_obj.set("config.debug", true)
199
201
  expect(logger).to receive(:debug).with(/Compiled pipeline/, anything)
200
202
  pipeline = TestPipeline.new(test_config_with_filters, pipeline_settings_obj)
203
+ pipeline.close
201
204
  end
202
205
  end
203
206
 
@@ -385,9 +388,12 @@ describe LogStash::Pipeline do
385
388
  allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(DummyCodec)
386
389
  allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
387
390
  allow(logger).to receive(:warn)
388
- thread = Thread.new { pipeline.run }
391
+ # pipeline must be first called outside the thread context because it lazyly initialize
392
+ p = pipeline
393
+ t = Thread.new { p.run }
394
+ sleep(0.1) until pipeline.ready?
389
395
  pipeline.shutdown
390
- thread.join
396
+ t.join
391
397
  end
392
398
 
393
399
  it "should not raise a max inflight warning if the max_inflight count isn't exceeded" do
@@ -440,6 +446,10 @@ describe LogStash::Pipeline do
440
446
  let(:settings) { LogStash::SETTINGS.clone }
441
447
  subject { LogStash::Pipeline.new(config, settings, metric) }
442
448
 
449
+ after :each do
450
+ subject.close
451
+ end
452
+
443
453
  context "when metric.collect is disabled" do
444
454
  before :each do
445
455
  settings.set("metric.collect", false)
@@ -528,9 +538,21 @@ describe LogStash::Pipeline do
528
538
  allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutputmore").and_return(DummyOutputMore)
529
539
  end
530
540
 
541
+ # multiple pipelines cannot be instantiated using the same PQ settings, force memory queue
542
+ before :each do
543
+ pipeline_workers_setting = LogStash::SETTINGS.get_setting("queue.type")
544
+ allow(pipeline_workers_setting).to receive(:value).and_return("memory")
545
+ pipeline_settings.each {|k, v| pipeline_settings_obj.set(k, v) }
546
+ end
547
+
531
548
  let(:pipeline1) { LogStash::Pipeline.new("input { dummyinputgenerator {} } filter { dummyfilter {} } output { dummyoutput {}}") }
532
549
  let(:pipeline2) { LogStash::Pipeline.new("input { dummyinputgenerator {} } filter { dummyfilter {} } output { dummyoutputmore {}}") }
533
550
 
551
+ after do
552
+ pipeline1.close
553
+ pipeline2.close
554
+ end
555
+
534
556
  it "should handle evaluating different config" do
535
557
  expect(pipeline1.output_func(LogStash::Event.new)).not_to include(nil)
536
558
  expect(pipeline1.filter_func(LogStash::Event.new)).not_to include(nil)
@@ -573,7 +595,7 @@ describe LogStash::Pipeline do
573
595
  it "flushes the buffered contents of the filter" do
574
596
  Thread.abort_on_exception = true
575
597
  pipeline = LogStash::Pipeline.new(config, pipeline_settings_obj)
576
- Thread.new { pipeline.run }
598
+ t = Thread.new { pipeline.run }
577
599
  sleep 0.1 while !pipeline.ready?
578
600
  wait(3).for do
579
601
  # give us a bit of time to flush the events
@@ -582,6 +604,7 @@ describe LogStash::Pipeline do
582
604
  event = output.events.pop
583
605
  expect(event.get("message").count("\n")).to eq(99)
584
606
  pipeline.shutdown
607
+ t.join
585
608
  end
586
609
  end
587
610
 
@@ -596,6 +619,13 @@ describe LogStash::Pipeline do
596
619
  let(:pipeline1) { LogStash::Pipeline.new("input { generator {} } filter { dummyfilter {} } output { dummyoutput {}}") }
597
620
  let(:pipeline2) { LogStash::Pipeline.new("input { generator {} } filter { dummyfilter {} } output { dummyoutput {}}") }
598
621
 
622
+ # multiple pipelines cannot be instantiated using the same PQ settings, force memory queue
623
+ before :each do
624
+ pipeline_workers_setting = LogStash::SETTINGS.get_setting("queue.type")
625
+ allow(pipeline_workers_setting).to receive(:value).and_return("memory")
626
+ pipeline_settings.each {|k, v| pipeline_settings_obj.set(k, v) }
627
+ end
628
+
599
629
  it "should handle evaluating different config" do
600
630
  # When the functions are compiled from the AST it will generate instance
601
631
  # variables that are unique to the actual config, the intances are pointing
@@ -626,8 +656,14 @@ describe LogStash::Pipeline do
626
656
 
627
657
  subject { described_class.new(config) }
628
658
 
629
- it "returns nil when the pipeline isnt started" do
630
- expect(subject.started_at).to be_nil
659
+ context "when the pipeline is not started" do
660
+ after :each do
661
+ subject.close
662
+ end
663
+
664
+ it "returns nil when the pipeline isnt started" do
665
+ expect(subject.started_at).to be_nil
666
+ end
631
667
  end
632
668
 
633
669
  it "return when the pipeline started working" do
@@ -648,6 +684,10 @@ describe LogStash::Pipeline do
648
684
  subject { described_class.new(config) }
649
685
 
650
686
  context "when the pipeline is not started" do
687
+ after :each do
688
+ subject.close
689
+ end
690
+
651
691
  it "returns 0" do
652
692
  expect(subject.uptime).to eq(0)
653
693
  end
@@ -655,10 +695,14 @@ describe LogStash::Pipeline do
655
695
 
656
696
  context "when the pipeline is started" do
657
697
  it "return the duration in milliseconds" do
658
- t = Thread.new { subject.run }
698
+ # subject must be first call outside the thread context because of lazy initialization
699
+ s = subject
700
+ t = Thread.new { s.run }
701
+ sleep(0.1) until subject.ready?
659
702
  sleep(0.1)
660
703
  expect(subject.uptime).to be > 0
661
704
  subject.shutdown
705
+ t.join
662
706
  end
663
707
  end
664
708
  end
@@ -704,6 +748,12 @@ describe LogStash::Pipeline do
704
748
  end
705
749
  let(:dummyoutput) { ::LogStash::Outputs::DummyOutput.new({ "id" => dummy_output_id }) }
706
750
  let(:metric_store) { subject.metric.collector.snapshot_metric.metric_store }
751
+ let(:pipeline_thread) do
752
+ # subject has to be called for the first time outside the thread because it will create a race condition
753
+ # with the subject.ready? call since subject is lazily initialized
754
+ s = subject
755
+ Thread.new { s.run }
756
+ end
707
757
 
708
758
  before :each do
709
759
  allow(::LogStash::Outputs::DummyOutput).to receive(:new).with(any_args).and_return(dummyoutput)
@@ -712,7 +762,9 @@ describe LogStash::Pipeline do
712
762
  allow(LogStash::Plugin).to receive(:lookup).with("filter", "multiline").and_return(LogStash::Filters::Multiline)
713
763
  allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
714
764
 
715
- Thread.new { subject.run }
765
+ pipeline_thread
766
+ sleep(0.1) until subject.ready?
767
+
716
768
  # make sure we have received all the generated events
717
769
  wait(3).for do
718
770
  # give us a bit of time to flush the events
@@ -722,6 +774,7 @@ describe LogStash::Pipeline do
722
774
 
723
775
  after :each do
724
776
  subject.shutdown
777
+ pipeline_thread.join
725
778
  end
726
779
 
727
780
  context "global metric" do
@@ -787,6 +840,13 @@ describe LogStash::Pipeline do
787
840
  let(:pipeline1) { LogStash::Pipeline.new("input { generator {} } filter { dummyfilter {} } output { dummyoutput {}}") }
788
841
  let(:pipeline2) { LogStash::Pipeline.new("input { generator {} } filter { dummyfilter {} } output { dummyoutput {}}") }
789
842
 
843
+ # multiple pipelines cannot be instantiated using the same PQ settings, force memory queue
844
+ before :each do
845
+ pipeline_workers_setting = LogStash::SETTINGS.get_setting("queue.type")
846
+ allow(pipeline_workers_setting).to receive(:value).and_return("memory")
847
+ pipeline_settings.each {|k, v| pipeline_settings_obj.set(k, v) }
848
+ end
849
+
790
850
  it "should not add ivars" do
791
851
  expect(pipeline1.instance_variables).to eq(pipeline2.instance_variables)
792
852
  end
metadata CHANGED
@@ -1,21 +1,21 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-core
3
3
  version: !ruby/object:Gem::Version
4
- version: 5.2.1
4
+ version: 5.2.2
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-02-09 00:00:00.000000000 Z
11
+ date: 2017-02-24 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement
15
15
  requirements:
16
16
  - - '='
17
17
  - !ruby/object:Gem::Version
18
- version: 5.2.1
18
+ version: 5.2.2
19
19
  name: logstash-core-event-java
20
20
  prerelease: false
21
21
  type: :runtime
@@ -23,13 +23,13 @@ dependencies:
23
23
  requirements:
24
24
  - - '='
25
25
  - !ruby/object:Gem::Version
26
- version: 5.2.1
26
+ version: 5.2.2
27
27
  - !ruby/object:Gem::Dependency
28
28
  requirement: !ruby/object:Gem::Requirement
29
29
  requirements:
30
30
  - - '='
31
31
  - !ruby/object:Gem::Version
32
- version: 5.2.1
32
+ version: 5.2.2
33
33
  name: logstash-core-queue-jruby
34
34
  prerelease: false
35
35
  type: :runtime
@@ -37,7 +37,7 @@ dependencies:
37
37
  requirements:
38
38
  - - '='
39
39
  - !ruby/object:Gem::Version
40
- version: 5.2.1
40
+ version: 5.2.2
41
41
  - !ruby/object:Gem::Dependency
42
42
  requirement: !ruby/object:Gem::Requirement
43
43
  requirements: