logstash-core 2.1.3-java → 2.2.0-java

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of logstash-core might be problematic. Click here for more details.

Files changed (71) hide show
  1. checksums.yaml +4 -4
  2. data/lib/logstash-core.rb +1 -3
  3. data/lib/logstash-core/logstash-core.rb +3 -0
  4. data/lib/logstash-core/version.rb +8 -0
  5. data/lib/logstash/agent.rb +48 -20
  6. data/lib/logstash/codecs/base.rb +2 -2
  7. data/lib/logstash/config/config_ast.rb +8 -3
  8. data/lib/logstash/environment.rb +0 -16
  9. data/lib/logstash/filters/base.rb +9 -5
  10. data/lib/logstash/inputs/base.rb +1 -1
  11. data/lib/logstash/output_delegator.rb +150 -0
  12. data/lib/logstash/outputs/base.rb +37 -40
  13. data/lib/logstash/pipeline.rb +259 -178
  14. data/lib/logstash/pipeline_reporter.rb +114 -0
  15. data/lib/logstash/plugin.rb +1 -1
  16. data/lib/logstash/{shutdown_controller.rb → shutdown_watcher.rb} +10 -37
  17. data/lib/logstash/util.rb +17 -0
  18. data/lib/logstash/util/decorators.rb +14 -7
  19. data/lib/logstash/util/worker_threads_default_printer.rb +4 -4
  20. data/lib/logstash/util/wrapped_synchronous_queue.rb +41 -0
  21. data/lib/logstash/version.rb +10 -2
  22. data/locales/en.yml +8 -3
  23. data/logstash-core.gemspec +5 -3
  24. data/spec/{core/conditionals_spec.rb → conditionals_spec.rb} +0 -0
  25. data/spec/{core/config_spec.rb → logstash/config/config_ast_spec.rb} +0 -0
  26. data/spec/{core/config_cpu_core_strategy_spec.rb → logstash/config/cpu_core_strategy_spec.rb} +0 -0
  27. data/spec/{core/config_defaults_spec.rb → logstash/config/defaults_spec.rb} +0 -0
  28. data/spec/{core/config_mixin_spec.rb → logstash/config/mixin_spec.rb} +0 -0
  29. data/spec/{core → logstash}/environment_spec.rb +0 -0
  30. data/spec/{filters → logstash/filters}/base_spec.rb +0 -0
  31. data/spec/{inputs → logstash/inputs}/base_spec.rb +0 -0
  32. data/spec/{lib/logstash → logstash}/java_integration_spec.rb +0 -0
  33. data/spec/{util → logstash}/json_spec.rb +0 -0
  34. data/spec/logstash/output_delegator_spec.rb +126 -0
  35. data/spec/logstash/outputs/base_spec.rb +40 -0
  36. data/spec/logstash/pipeline_reporter_spec.rb +85 -0
  37. data/spec/{core → logstash}/pipeline_spec.rb +128 -16
  38. data/spec/{core → logstash}/plugin_spec.rb +47 -1
  39. data/spec/logstash/runner_spec.rb +68 -0
  40. data/spec/{core/shutdown_controller_spec.rb → logstash/shutdown_watcher_spec.rb} +17 -11
  41. data/spec/{util → logstash/util}/buftok_spec.rb +0 -0
  42. data/spec/{util → logstash/util}/charset_spec.rb +0 -0
  43. data/spec/{util → logstash/util}/defaults_printer_spec.rb +4 -4
  44. data/spec/{util → logstash/util}/java_version_spec.rb +0 -0
  45. data/spec/{util → logstash/util}/plugin_version_spec.rb +0 -0
  46. data/spec/{util → logstash/util}/unicode_trimmer_spec.rb +0 -0
  47. data/spec/{util → logstash/util}/worker_threads_default_printer_spec.rb +8 -8
  48. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +28 -0
  49. data/spec/{util_spec.rb → logstash/util_spec.rb} +0 -0
  50. metadata +74 -81
  51. data/lib/logstash/event.rb +0 -275
  52. data/lib/logstash/patches/bundler.rb +0 -36
  53. data/lib/logstash/sized_queue.rb +0 -8
  54. data/lib/logstash/string_interpolation.rb +0 -140
  55. data/lib/logstash/timestamp.rb +0 -97
  56. data/lib/logstash/util/accessors.rb +0 -123
  57. data/spec/core/event_spec.rb +0 -518
  58. data/spec/core/runner_spec.rb +0 -40
  59. data/spec/core/timestamp_spec.rb +0 -84
  60. data/spec/coverage_helper.rb +0 -24
  61. data/spec/lib/logstash/bundler_spec.rb +0 -121
  62. data/spec/license_spec.rb +0 -67
  63. data/spec/outputs/base_spec.rb +0 -26
  64. data/spec/plugin_manager/install_spec.rb +0 -28
  65. data/spec/plugin_manager/update_spec.rb +0 -39
  66. data/spec/plugin_manager/util_spec.rb +0 -71
  67. data/spec/spec_helper.rb +0 -11
  68. data/spec/util/accessors_spec.rb +0 -170
  69. data/spec/util/compress_spec.rb +0 -121
  70. data/spec/util/gemfile_spec.rb +0 -212
  71. data/spec/util/retryable_spec.rb +0 -139
@@ -11,21 +11,38 @@ require "logstash/inputs/base"
11
11
  require "logstash/outputs/base"
12
12
  require "logstash/config/cpu_core_strategy"
13
13
  require "logstash/util/defaults_printer"
14
- require "logstash/shutdown_controller"
14
+ require "logstash/shutdown_watcher"
15
+ require "logstash/util/wrapped_synchronous_queue"
16
+ require "logstash/pipeline_reporter"
17
+ require "logstash/output_delegator"
15
18
 
16
19
  module LogStash; class Pipeline
17
- attr_reader :inputs, :filters, :outputs, :input_to_filter, :filter_to_output
18
-
19
- def initialize(configstr)
20
+ attr_reader :inputs, :filters, :outputs, :worker_threads, :events_consumed, :events_filtered, :reporter, :pipeline_id, :logger
21
+
22
+ DEFAULT_SETTINGS = {
23
+ :default_pipeline_workers => LogStash::Config::CpuCoreStrategy.maximum,
24
+ :pipeline_batch_size => 125,
25
+ :pipeline_batch_delay => 5, # in milliseconds
26
+ :flush_interval => 5, # in seconds
27
+ :flush_timeout_interval => 60 # in seconds
28
+ }
29
+ MAX_INFLIGHT_WARN_THRESHOLD = 10_000
30
+
31
+ def initialize(config_str, settings = {})
32
+ @pipeline_id = settings[:pipeline_id] || self.object_id
20
33
  @logger = Cabin::Channel.get(LogStash)
34
+ @settings = DEFAULT_SETTINGS.clone
35
+ settings.each {|setting, value| configure(setting, value) }
36
+ @reporter = LogStash::PipelineReporter.new(@logger, self)
21
37
 
22
38
  @inputs = nil
23
39
  @filters = nil
24
40
  @outputs = nil
25
41
 
26
- grammar = LogStashConfigParser.new
27
- @config = grammar.parse(configstr)
42
+ @worker_threads = []
28
43
 
44
+ grammar = LogStashConfigParser.new
45
+ @config = grammar.parse(config_str)
29
46
  if @config.nil?
30
47
  raise LogStash::ConfigurationError, grammar.failure_reason
31
48
  end
@@ -42,18 +59,21 @@ module LogStash; class Pipeline
42
59
  raise
43
60
  end
44
61
 
45
- @input_to_filter = SizedQueue.new(20)
46
- # if no filters, pipe inputs directly to outputs
47
- @filter_to_output = filters? ? SizedQueue.new(20) : @input_to_filter
48
-
49
- @settings = {
50
- "default-filter-workers" => LogStash::Config::CpuCoreStrategy.fifty_percent
51
- }
62
+ @input_queue = LogStash::Util::WrappedSynchronousQueue.new
63
+ @events_filtered = Concurrent::AtomicFixnum.new(0)
64
+ @events_consumed = Concurrent::AtomicFixnum.new(0)
52
65
 
66
+ # We generally only want one thread at a time able to access pop/take/poll operations
67
+ # from this queue. We also depend on this to be able to block consumers while we snapshot
68
+ # in-flight buffers
69
+ @input_queue_pop_mutex = Mutex.new
70
+ @input_threads = []
53
71
  # @ready requires thread safety since it is typically polled from outside the pipeline thread
54
72
  @ready = Concurrent::AtomicBoolean.new(false)
55
- @input_threads = []
56
- @filter_threads = []
73
+ @running = Concurrent::AtomicBoolean.new(false)
74
+ @flushing = Concurrent::AtomicReference.new(false)
75
+
76
+ start_flusher
57
77
  end # def initialize
58
78
 
59
79
  def ready?
@@ -64,24 +84,29 @@ module LogStash; class Pipeline
64
84
  @settings[setting] = value
65
85
  end
66
86
 
67
- def safe_filter_worker_count
68
- default = @settings["default-filter-workers"]
69
- thread_count = @settings["filter-workers"] #override from args "-w 8" or config
87
+ def safe_pipeline_worker_count
88
+ default = DEFAULT_SETTINGS[:default_pipeline_workers]
89
+ thread_count = @settings[:pipeline_workers] #override from args "-w 8" or config
70
90
  safe_filters, unsafe_filters = @filters.partition(&:threadsafe?)
91
+
71
92
  if unsafe_filters.any?
72
93
  plugins = unsafe_filters.collect { |f| f.class.config_name }
73
94
  case thread_count
74
95
  when nil
75
96
  # user did not specify a worker thread count
76
97
  # warn if the default is multiple
77
- @logger.warn("Defaulting filter worker threads to 1 because there are some filters that might not work with multiple worker threads",
78
- :count_was => default, :filters => plugins) if default > 1
98
+
99
+ if default > 1
100
+ @logger.warn("Defaulting pipeline worker threads to 1 because there are some filters that might not work with multiple worker threads",
101
+ :count_was => default, :filters => plugins)
102
+ end
103
+
79
104
  1 # can't allow the default value to propagate if there are unsafe filters
80
105
  when 0, 1
81
106
  1
82
107
  else
83
108
  @logger.warn("Warning: Manual override - there are filters that might not work with multiple worker threads",
84
- :worker_threads => thread_count, :filters => plugins)
109
+ :worker_threads => thread_count, :filters => plugins)
85
110
  thread_count # allow user to force this even if there are unsafe filters
86
111
  end
87
112
  else
@@ -94,31 +119,25 @@ module LogStash; class Pipeline
94
119
  end
95
120
 
96
121
  def run
122
+ LogStash::Util.set_thread_name("[#{pipeline_id}]-pipeline-manager")
97
123
  @logger.terminal(LogStash::Util::DefaultsPrinter.print(@settings))
98
124
 
99
- begin
100
- start_inputs
101
- start_filters if filters?
102
- start_outputs
103
- ensure
104
- # it is important to garantee @ready to be true after the startup sequence has been completed
105
- # to potentially unblock the shutdown method which may be waiting on @ready to proceed
106
- @ready.make_true
107
- end
125
+ start_workers
108
126
 
109
127
  @logger.info("Pipeline started")
110
128
  @logger.terminal("Logstash startup completed")
111
129
 
130
+ # Block until all inputs have stopped
131
+ # Generally this happens if SIGINT is sent and `shutdown` is called from an external thread
132
+
133
+ @running.make_true
112
134
  wait_inputs
135
+ @running.make_false
113
136
 
114
- if filters?
115
- shutdown_filters
116
- wait_filters
117
- flush_filters_to_output!(:final => true)
118
- end
137
+ @logger.info("Input plugins stopped! Will shutdown filter/output workers.")
119
138
 
120
- shutdown_outputs
121
- wait_outputs
139
+ shutdown_flusher
140
+ shutdown_workers
122
141
 
123
142
  @logger.info("Pipeline shutdown complete.")
124
143
  @logger.terminal("Logstash shutdown completed")
@@ -127,27 +146,147 @@ module LogStash; class Pipeline
127
146
  return 0
128
147
  end # def run
129
148
 
130
- def wait_inputs
131
- @input_threads.each(&:join)
149
+ def start_workers
150
+ @inflight_batches = {}
151
+
152
+ @worker_threads.clear # In case we're restarting the pipeline
153
+ begin
154
+ start_inputs
155
+ @outputs.each {|o| o.register }
156
+ @filters.each {|f| f.register}
157
+
158
+ pipeline_workers = safe_pipeline_worker_count
159
+ batch_size = @settings[:pipeline_batch_size]
160
+ batch_delay = @settings[:pipeline_batch_delay]
161
+ max_inflight = batch_size * pipeline_workers
162
+ @logger.info("Starting pipeline",
163
+ :id => self.pipeline_id,
164
+ :pipeline_workers => pipeline_workers,
165
+ :batch_size => batch_size,
166
+ :batch_delay => batch_delay,
167
+ :max_inflight => max_inflight)
168
+ if max_inflight > MAX_INFLIGHT_WARN_THRESHOLD
169
+ @logger.warn "CAUTION: Recommended inflight events max exceeded! Logstash will run with up to #{max_inflight} events in memory in your current configuration. If your message sizes are large this may cause instability with the default heap size. Please consider setting a non-standard heap size, changing the batch size (currently #{batch_size}), or changing the number of pipeline workers (currently #{pipeline_workers})"
170
+ end
171
+
172
+ pipeline_workers.times do |t|
173
+ @worker_threads << Thread.new do
174
+ LogStash::Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
175
+ worker_loop(batch_size, batch_delay)
176
+ end
177
+ end
178
+ ensure
179
+ # it is important to garantee @ready to be true after the startup sequence has been completed
180
+ # to potentially unblock the shutdown method which may be waiting on @ready to proceed
181
+ @ready.make_true
182
+ end
132
183
  end
133
184
 
134
- def shutdown_filters
135
- @flusher_thread.kill
136
- @input_to_filter.push(LogStash::SHUTDOWN)
185
+ # Main body of what a worker thread does
186
+ # Repeatedly takes batches off the queu, filters, then outputs them
187
+ def worker_loop(batch_size, batch_delay)
188
+ running = true
189
+
190
+ while running
191
+ # To understand the purpose behind this synchronize please read the body of take_batch
192
+ input_batch, signal = @input_queue_pop_mutex.synchronize { take_batch(batch_size, batch_delay) }
193
+ running = false if signal == LogStash::SHUTDOWN
194
+
195
+ @events_consumed.increment(input_batch.size)
196
+
197
+ filtered_batch = filter_batch(input_batch)
198
+
199
+ if signal # Flush on SHUTDOWN or FLUSH
200
+ flush_options = (signal == LogStash::SHUTDOWN) ? {:final => true} : {}
201
+ flush_filters_to_batch(filtered_batch, flush_options)
202
+ end
203
+
204
+ @events_filtered.increment(filtered_batch.size)
205
+
206
+ output_batch(filtered_batch)
207
+
208
+ inflight_batches_synchronize { set_current_thread_inflight_batch(nil) }
209
+ end
137
210
  end
138
211
 
139
- def wait_filters
140
- @filter_threads.each(&:join)
212
+ def take_batch(batch_size, batch_delay)
213
+ batch = []
214
+ # Since this is externally synchronized in `worker_look` wec can guarantee that the visibility of an insight batch
215
+ # guaranteed to be a full batch not a partial batch
216
+ set_current_thread_inflight_batch(batch)
217
+
218
+ signal = false
219
+ batch_size.times do |t|
220
+ event = (t == 0) ? @input_queue.take : @input_queue.poll(batch_delay)
221
+
222
+ if event.nil?
223
+ next
224
+ elsif event == LogStash::SHUTDOWN || event == LogStash::FLUSH
225
+ # We MUST break here. If a batch consumes two SHUTDOWN events
226
+ # then another worker may have its SHUTDOWN 'stolen', thus blocking
227
+ # the pipeline. We should stop doing work after flush as well.
228
+ signal = event
229
+ break
230
+ else
231
+ batch << event
232
+ end
233
+ end
234
+
235
+ [batch, signal]
141
236
  end
142
237
 
143
- def shutdown_outputs
144
- # nothing, filters will do this
145
- @filter_to_output.push(LogStash::SHUTDOWN)
238
+ def filter_batch(batch)
239
+ batch.reduce([]) do |acc,e|
240
+ if e.is_a?(LogStash::Event)
241
+ filtered = filter_func(e)
242
+ filtered.each {|fe| acc << fe unless fe.cancelled?}
243
+ end
244
+ acc
245
+ end
246
+ rescue Exception => e
247
+ # Plugins authors should manage their own exceptions in the plugin code
248
+ # but if an exception is raised up to the worker thread they are considered
249
+ # fatal and logstash will not recover from this situation.
250
+ #
251
+ # Users need to check their configuration or see if there is a bug in the
252
+ # plugin.
253
+ @logger.error("Exception in pipelineworker, the pipeline stopped processing new events, please check your filter configuration and restart Logstash.",
254
+ "exception" => e, "backtrace" => e.backtrace)
255
+ raise
256
+ end
257
+
258
+ # Take an array of events and send them to the correct output
259
+ def output_batch(batch)
260
+ # Build a mapping of { output_plugin => [events...]}
261
+ outputs_events = batch.reduce(Hash.new { |h, k| h[k] = [] }) do |acc, event|
262
+ # We ask the AST to tell us which outputs to send each event to
263
+ # Then, we stick it in the correct bin
264
+
265
+ # output_func should never return anything other than an Array but we have lots of legacy specs
266
+ # that monkeypatch it and return nil. We can deprecate "|| []" after fixing these specs
267
+ outputs_for_event = output_func(event) || []
268
+
269
+ outputs_for_event.each { |output| acc[output] << event }
270
+ acc
271
+ end
272
+
273
+ # Now that we have our output to event mapping we can just invoke each output
274
+ # once with its list of events
275
+ outputs_events.each { |output, events| output.multi_receive(events) }
146
276
  end
147
277
 
148
- def wait_outputs
149
- # Wait for the outputs to stop
150
- @output_threads.each(&:join)
278
+ def set_current_thread_inflight_batch(batch)
279
+ @inflight_batches[Thread.current] = batch
280
+ end
281
+
282
+ def inflight_batches_synchronize
283
+ @input_queue_pop_mutex.synchronize do
284
+ yield(@inflight_batches)
285
+ end
286
+ end
287
+
288
+ def wait_inputs
289
+ @input_threads.each(&:join)
151
290
  end
152
291
 
153
292
  def start_inputs
@@ -167,45 +306,15 @@ module LogStash; class Pipeline
167
306
  end
168
307
  end
169
308
 
170
- def start_filters
171
- @filters.each(&:register)
172
- # dynamically get thread count based on filter threadsafety
173
- # moved this test to here to allow for future config reloading
174
- to_start = safe_filter_worker_count
175
- @filter_threads = to_start.times.collect do |i|
176
- Thread.new do
177
- LogStash::Util.set_thread_name("|filterworker.#{i}")
178
- filterworker
179
- end
180
- end
181
- actually_started = @filter_threads.select(&:alive?).size
182
- msg = "Worker threads expected: #{to_start}, worker threads started: #{actually_started}"
183
- if actually_started < to_start
184
- @logger.warn(msg)
185
- else
186
- @logger.info(msg)
187
- end
188
- @flusher_thread = Thread.new { Stud.interval(5) { @input_to_filter.push(LogStash::FLUSH) } }
189
- end
190
-
191
- def start_outputs
192
- @outputs.each(&:register)
193
- @output_threads = [
194
- Thread.new { outputworker }
195
- ]
196
- end
197
-
198
309
  def start_input(plugin)
199
310
  @input_threads << Thread.new { inputworker(plugin) }
200
311
  end
201
312
 
202
313
  def inputworker(plugin)
203
- LogStash::Util.set_thread_name("<#{plugin.class.config_name}")
204
- LogStash::Util.set_thread_plugin(plugin)
314
+ LogStash::Util::set_thread_name("[#{pipeline_id}]<#{plugin.class.config_name}")
205
315
  begin
206
- plugin.run(@input_to_filter)
316
+ plugin.run(@input_queue)
207
317
  rescue => e
208
- # if plugin is stopping, ignore uncatched exceptions and exit worker
209
318
  if plugin.stop?
210
319
  @logger.debug("Input plugin raised exception during shutdown, ignoring it.",
211
320
  :plugin => plugin.class.config_name, :exception => e,
@@ -233,56 +342,6 @@ module LogStash; class Pipeline
233
342
  end
234
343
  end # def inputworker
235
344
 
236
- def filterworker
237
- begin
238
- while true
239
- event = @input_to_filter.pop
240
-
241
- case event
242
- when LogStash::Event
243
- # filter_func returns all filtered events, including cancelled ones
244
- filter_func(event).each { |e| @filter_to_output.push(e) unless e.cancelled? }
245
- when LogStash::FlushEvent
246
- # handle filter flushing here so that non threadsafe filters (thus only running one filterworker)
247
- # don't have to deal with thread safety implementing the flush method
248
- flush_filters_to_output!
249
- when LogStash::ShutdownEvent
250
- # pass it down to any other filterworker and stop this worker
251
- @input_to_filter.push(event)
252
- break
253
- end
254
- end
255
- rescue Exception => e
256
- # Plugins authors should manage their own exceptions in the plugin code
257
- # but if an exception is raised up to the worker thread they are considered
258
- # fatal and logstash will not recover from this situation.
259
- #
260
- # Users need to check their configuration or see if there is a bug in the
261
- # plugin.
262
- @logger.error("Exception in filterworker, the pipeline stopped processing new events, please check your filter configuration and restart Logstash.",
263
- "exception" => e, "backtrace" => e.backtrace)
264
- raise
265
- ensure
266
- @filters.each(&:do_close)
267
- end
268
- end # def filterworker
269
-
270
- def outputworker
271
- LogStash::Util.set_thread_name(">output")
272
- @outputs.each(&:worker_setup)
273
-
274
- while true
275
- event = @filter_to_output.pop
276
- break if event == LogStash::SHUTDOWN
277
- output_func(event)
278
- LogStash::Util.set_thread_plugin(nil)
279
- end
280
- ensure
281
- @outputs.each do |output|
282
- output.worker_plugins.each(&:do_close)
283
- end
284
- end # def outputworker
285
-
286
345
  # initiate the pipeline shutdown sequence
287
346
  # this method is intended to be called from outside the pipeline thread
288
347
  # @param before_stop [Proc] code block called before performing stop operation on input plugins
@@ -296,13 +355,44 @@ module LogStash; class Pipeline
296
355
 
297
356
  before_stop.call if block_given?
298
357
 
358
+ @logger.info "Closing inputs"
299
359
  @inputs.each(&:do_stop)
360
+ @logger.info "Closed inputs"
300
361
  end # def shutdown
301
362
 
363
+ # After `shutdown` is called from an external thread this is called from the main thread to
364
+ # tell the worker threads to stop and then block until they've fully stopped
365
+ # This also stops all filter and output plugins
366
+ def shutdown_workers
367
+ # Each worker thread will receive this exactly once!
368
+ @worker_threads.each do |t|
369
+ @logger.debug("Pushing shutdown", :thread => t)
370
+ @input_queue.push(LogStash::SHUTDOWN)
371
+ end
372
+
373
+ @worker_threads.each do |t|
374
+ @logger.debug("Shutdown waiting for worker thread #{t}")
375
+ t.join
376
+ end
377
+
378
+ @filters.each(&:do_close)
379
+ @outputs.each(&:do_close)
380
+ end
381
+
302
382
  def plugin(plugin_type, name, *args)
303
383
  args << {} if args.empty?
384
+
304
385
  klass = LogStash::Plugin.lookup(plugin_type, name)
305
- return klass.new(*args)
386
+
387
+ if plugin_type == "output"
388
+ LogStash::OutputDelegator.new(@logger, klass, default_output_workers, *args)
389
+ else
390
+ klass.new(*args)
391
+ end
392
+ end
393
+
394
+ def default_output_workers
395
+ @settings[:pipeline_workers] || @settings[:default_pipeline_workers]
306
396
  end
307
397
 
308
398
  # for backward compatibility in devutils for the rspec helpers, this method is not used
@@ -312,6 +402,7 @@ module LogStash; class Pipeline
312
402
  filter_func(event).each { |e| block.call(e) }
313
403
  end
314
404
 
405
+
315
406
  # perform filters flush and yeild flushed event to the passed block
316
407
  # @param options [Hash]
317
408
  # @option options [Boolean] :final => true to signal a final shutdown flush
@@ -323,61 +414,51 @@ module LogStash; class Pipeline
323
414
  end
324
415
  end
325
416
 
417
+ def start_flusher
418
+ @flusher_thread = Thread.new do
419
+ while Stud.stoppable_sleep(5, 0.1) { @running.false? }
420
+ flush
421
+ break if @running.false?
422
+ end
423
+ end
424
+ end
425
+
426
+ def shutdown_flusher
427
+ @flusher_thread.join
428
+ end
429
+
430
+ def flush
431
+ if @flushing.compare_and_set(false, true)
432
+ @logger.debug? && @logger.debug("Pushing flush onto pipeline")
433
+ @input_queue.push(LogStash::FLUSH)
434
+ end
435
+ end
436
+
326
437
  # perform filters flush into the output queue
327
438
  # @param options [Hash]
328
439
  # @option options [Boolean] :final => true to signal a final shutdown flush
329
- def flush_filters_to_output!(options = {})
440
+ def flush_filters_to_batch(batch, options = {})
330
441
  flush_filters(options) do |event|
331
442
  unless event.cancelled?
332
443
  @logger.debug? and @logger.debug("Pushing flushed events", :event => event)
333
- @filter_to_output.push(event)
444
+ batch << event
334
445
  end
335
446
  end
336
- end # flush_filters_to_output!
337
-
338
- def inflight_count
339
- data = {}
340
- total = 0
341
-
342
- input_to_filter = @input_to_filter.size
343
- total += input_to_filter
344
- filter_to_output = @filter_to_output.size
345
- total += filter_to_output
346
-
347
- data["input_to_filter"] = input_to_filter if input_to_filter > 0
348
- data["filter_to_output"] = filter_to_output if filter_to_output > 0
349
-
350
- output_worker_queues = []
351
- @outputs.each do |output|
352
- next unless output.worker_queue && output.worker_queue.size > 0
353
- plugin_info = output.debug_info
354
- size = output.worker_queue.size
355
- total += size
356
- plugin_info << size
357
- output_worker_queues << plugin_info
358
- end
359
- data["output_worker_queues"] = output_worker_queues unless output_worker_queues.empty?
360
- data["total"] = total
361
- data
362
- end
363
447
 
364
- def stalling_threads
365
- plugin_threads
366
- .reject {|t| t["blocked_on"] } # known begnin blocking statuses
367
- .each {|t| t.delete("backtrace") }
368
- .each {|t| t.delete("blocked_on") }
369
- .each {|t| t.delete("status") }
370
- end
448
+ @flushing.set(false)
449
+ end # flush_filters_to_output!
371
450
 
372
- def plugin_threads
373
- input_threads = @input_threads.select {|t| t.alive? }.map {|t| thread_info(t) }
374
- filter_threads = @filter_threads.select {|t| t.alive? }.map {|t| thread_info(t) }
375
- output_threads = @output_threads.select {|t| t.alive? }.map {|t| thread_info(t) }
376
- output_worker_threads = @outputs.flat_map {|output| output.worker_threads }.map {|t| thread_info(t) }
377
- input_threads + filter_threads + output_threads + output_worker_threads
451
+ def plugin_threads_info
452
+ input_threads = @input_threads.select {|t| t.alive? }
453
+ worker_threads = @worker_threads.select {|t| t.alive? }
454
+ (input_threads + worker_threads).map {|t| LogStash::Util.thread_info(t) }
378
455
  end
379
456
 
380
- def thread_info(thread)
381
- LogStash::Util.thread_info(thread)
457
+ def stalling_threads_info
458
+ plugin_threads_info
459
+ .reject {|t| t["blocked_on"] } # known benign blocking statuses
460
+ .each {|t| t.delete("backtrace") }
461
+ .each {|t| t.delete("blocked_on") }
462
+ .each {|t| t.delete("status") }
382
463
  end
383
- end; end
464
+ end end