logstash-core 7.0.0.alpha2-java → 7.0.0.beta1-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/lib/logstash/agent.rb +62 -57
  3. data/lib/logstash/compiler/lscl.rb +2 -3
  4. data/lib/logstash/config/config_ast.rb +59 -17
  5. data/lib/logstash/environment.rb +1 -1
  6. data/lib/logstash/instrument/metric_store.rb +1 -1
  7. data/lib/logstash/instrument/periodic_poller/dlq.rb +5 -7
  8. data/lib/logstash/instrument/periodic_poller/pq.rb +6 -8
  9. data/lib/logstash/instrument/periodic_pollers.rb +3 -3
  10. data/lib/logstash/java_pipeline.rb +36 -15
  11. data/lib/logstash/patches/resolv.rb +0 -21
  12. data/lib/logstash/pipeline.rb +27 -10
  13. data/lib/logstash/pipeline_action/base.rb +1 -1
  14. data/lib/logstash/pipeline_action/create.rb +7 -13
  15. data/lib/logstash/pipeline_action/reload.rb +35 -12
  16. data/lib/logstash/pipeline_action/stop.rb +4 -6
  17. data/lib/logstash/pipeline_settings.rb +1 -1
  18. data/lib/logstash/pipelines_registry.rb +166 -0
  19. data/lib/logstash/settings.rb +5 -5
  20. data/lib/logstash/state_resolver.rb +5 -5
  21. data/lib/logstash/util/duration_formatter.rb +1 -1
  22. data/lib/logstash/util/safe_uri.rb +1 -0
  23. data/lib/logstash/util.rb +11 -1
  24. data/locales/en.yml +1 -1
  25. data/logstash-core.gemspec +17 -20
  26. data/spec/logstash/acked_queue_concurrent_stress_spec.rb +1 -1
  27. data/spec/logstash/agent/converge_spec.rb +25 -31
  28. data/spec/logstash/agent_spec.rb +5 -5
  29. data/spec/logstash/event_spec.rb +2 -2
  30. data/spec/logstash/instrument/wrapped_write_client_spec.rb +1 -1
  31. data/spec/logstash/legacy_ruby_event_spec.rb +6 -5
  32. data/spec/logstash/pipeline_action/create_spec.rb +9 -8
  33. data/spec/logstash/pipeline_action/reload_spec.rb +10 -9
  34. data/spec/logstash/pipeline_action/stop_spec.rb +4 -3
  35. data/spec/logstash/pipelines_registry_spec.rb +220 -0
  36. data/spec/logstash/queue_factory_spec.rb +2 -1
  37. data/spec/logstash/runner_spec.rb +2 -0
  38. data/spec/logstash/settings/array_coercible_spec.rb +1 -1
  39. data/spec/logstash/settings/bytes_spec.rb +2 -2
  40. data/spec/logstash/settings/port_range_spec.rb +1 -1
  41. data/spec/logstash/state_resolver_spec.rb +26 -22
  42. data/spec/logstash/util/safe_uri_spec.rb +40 -0
  43. data/spec/logstash/util/time_value_spec.rb +1 -1
  44. data/spec/logstash/util/wrapped_acked_queue_spec.rb +1 -1
  45. data/spec/support/matchers.rb +25 -19
  46. data/spec/support/shared_contexts.rb +3 -3
  47. data/versions-gem-copy.yml +6 -6
  48. metadata +73 -88
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5e824ba00a89915dc5806c980c92edc6fcdb117f7c92eba1ce76bdee8a6730b6
4
- data.tar.gz: 9bc5a09b8af714e11b2ac6390d05698a85d5523d0b2eff30ef3d197f2fad8f5c
3
+ metadata.gz: 650eff422fce2ce58c826a22c759238b5cb14bb407232b3cb0ffe2119635df29
4
+ data.tar.gz: 44a83cccdc93c56fcc4952746675702b7593e0f494b167efcfc4fe002566dfb6
5
5
  SHA512:
6
- metadata.gz: 31f101ef4e58029d9353da647dd5cef4c22d96b41ab905be18fe469ad6028cc67cfaa863a809e352f8cb887cf728f517500040a730e1c0d5664945f2c7977e7b
7
- data.tar.gz: 99f257a1da0b6c3a30aba429edffca4b0981ab7700a95c41f8ba90701415c98153f5ed927f4b50e2da3c50783bddf8ba52eaf82d6ce2337de97bc26c3dda9a5d
6
+ metadata.gz: 94c5e143d649f75006330dfc9f77d3551d011aabef1e18017cb0eff8f96ae16d6690b5487acbabec3e9adae5eb0d3a4cb8bfb974a174cbb8d4073f659ab821cf
7
+ data.tar.gz: 1500544458de19cb215a0f45e6b83a00b4d8ef0b05f951536a080f2fad359502eaf847792b20719e1781727caddc11349d7c504c82d0cfe47aa5ff8e158f66e8
@@ -8,6 +8,7 @@ require "logstash/webserver"
8
8
  require "logstash/config/source_loader"
9
9
  require "logstash/pipeline_action"
10
10
  require "logstash/state_resolver"
11
+ require "logstash/pipelines_registry"
11
12
  require "stud/trap"
12
13
  require "uri"
13
14
  require "socket"
@@ -19,7 +20,7 @@ class LogStash::Agent
19
20
  include LogStash::Util::Loggable
20
21
  STARTED_AT = Time.now.freeze
21
22
 
22
- attr_reader :metric, :name, :settings, :webserver, :dispatcher, :ephemeral_id, :pipelines, :pipeline_bus
23
+ attr_reader :metric, :name, :settings, :dispatcher, :ephemeral_id, :pipeline_bus
23
24
  attr_accessor :logger
24
25
 
25
26
  # initialize method for LogStash::Agent
@@ -36,11 +37,12 @@ class LogStash::Agent
36
37
  # Mutex to synchonize in the exclusive method
37
38
  # Initial usage for the Ruby pipeline initialization which is not thread safe
38
39
  @exclusive_lock = Mutex.new
40
+ @webserver_control_lock = Mutex.new
39
41
 
40
42
  # Special bus object for inter-pipelines communications. Used by the `pipeline` input/output
41
43
  @pipeline_bus = org.logstash.plugins.pipeline.PipelineBus.new
42
44
 
43
- @pipelines = java.util.concurrent.ConcurrentHashMap.new();
45
+ @pipelines_registry = LogStash::PipelinesRegistry.new
44
46
 
45
47
  @name = setting("node.name")
46
48
  @http_host = setting("http.host")
@@ -114,14 +116,17 @@ class LogStash::Agent
114
116
  converge_state_and_update unless stopped?
115
117
  end
116
118
  else
117
- return 1 if clean_state?
119
+ # exit with error status if the initial converge_state_and_update did not create any pipeline
120
+ return 1 if @pipelines_registry.empty?
118
121
 
119
122
  while !Stud.stop?
120
- if clean_state? || running_user_defined_pipelines?
121
- sleep(0.5)
122
- else
123
- break
124
- end
123
+ # exit if all pipelines are terminated and none are reloading
124
+ break if no_pipeline?
125
+
126
+ # exit if there are no user defined pipelines (not system pipeline) and none are reloading
127
+ break if !running_user_defined_pipelines?
128
+
129
+ sleep(0.5)
125
130
  end
126
131
  end
127
132
 
@@ -135,11 +140,11 @@ class LogStash::Agent
135
140
  end
136
141
 
137
142
  def running?
138
- @running.value
143
+ @running.true?
139
144
  end
140
145
 
141
146
  def stopped?
142
- !@running.value
147
+ @running.false?
143
148
  end
144
149
 
145
150
  def converge_state_and_update
@@ -178,7 +183,7 @@ class LogStash::Agent
178
183
 
179
184
  # Calculate the Logstash uptime in milliseconds
180
185
  #
181
- # @return [Fixnum] Uptime in milliseconds
186
+ # @return [Integer] Uptime in milliseconds
182
187
  def uptime
183
188
  ((Time.now.to_f - STARTED_AT.to_f) * 1000.0).to_i
184
189
  end
@@ -233,43 +238,48 @@ class LogStash::Agent
233
238
  @id_path ||= ::File.join(settings.get("path.data"), "uuid")
234
239
  end
235
240
 
241
+ #
242
+ # Backward compatibility proxies to the PipelineRegistry
243
+ #
244
+
236
245
  def get_pipeline(pipeline_id)
237
- pipelines.get(pipeline_id)
246
+ @pipelines_registry.get_pipeline(pipeline_id)
238
247
  end
239
248
 
240
249
  def pipelines_count
241
- pipelines.size
250
+ @pipelines_registry.size
242
251
  end
243
252
 
244
253
  def running_pipelines
245
- pipelines.select {|id,pipeline| running_pipeline?(id) }
246
- end
254
+ @pipelines_registry.running_pipelines
255
+ end
247
256
 
248
257
  def non_running_pipelines
249
- pipelines.select {|id,pipeline| !running_pipeline?(id) }
258
+ @pipelines_registry.non_running_pipelines
250
259
  end
251
260
 
252
261
  def running_pipelines?
253
- running_pipelines_count > 0
262
+ @pipelines_registry.running_pipelines.any?
254
263
  end
255
264
 
256
265
  def running_pipelines_count
257
- running_pipelines.size
266
+ @pipelines_registry.running_pipelines.size
258
267
  end
259
268
 
260
269
  def running_user_defined_pipelines?
261
- !running_user_defined_pipelines.empty?
270
+ @pipelines_registry.running_user_defined_pipelines.any?
262
271
  end
263
272
 
264
273
  def running_user_defined_pipelines
265
- pipelines.select {|id, pipeline| running_pipeline?(id) && !pipeline.system? }
274
+ @pipelines_registry.running_user_defined_pipelines
266
275
  end
267
276
 
268
- def with_running_user_defined_pipelines
269
- yield running_user_defined_pipelines
277
+ def no_pipeline?
278
+ @pipelines_registry.running_pipelines.empty?
270
279
  end
271
280
 
272
281
  private
282
+
273
283
  def transition_to_stopped
274
284
  @running.make_false
275
285
  end
@@ -294,7 +304,7 @@ class LogStash::Agent
294
304
  converge_result = LogStash::ConvergeResult.new(pipeline_actions.size)
295
305
 
296
306
  pipeline_actions.map do |action|
297
- Thread.new do
307
+ Thread.new(action, converge_result) do |action, converge_result|
298
308
  java.lang.Thread.currentThread().setName("Converge #{action}");
299
309
  # We execute every task we need to converge the current state of pipelines
300
310
  # for every task we will record the action result, that will help us
@@ -310,34 +320,35 @@ class LogStash::Agent
310
320
  # that we currently have.
311
321
  begin
312
322
  logger.debug("Executing action", :action => action)
313
- action_result = action.execute(self, pipelines)
323
+ action_result = action.execute(self, @pipelines_registry)
314
324
  converge_result.add(action, action_result)
315
325
 
316
326
  unless action_result.successful?
317
- logger.error("Failed to execute action", :id => action.pipeline_id,
318
- :action_type => action_result.class, :message => action_result.message,
319
- :backtrace => action_result.backtrace)
327
+ logger.error("Failed to execute action",
328
+ :id => action.pipeline_id,
329
+ :action_type => action_result.class,
330
+ :message => action_result.message,
331
+ :backtrace => action_result.backtrace
332
+ )
320
333
  end
321
- rescue SystemExit => e
322
- converge_result.add(action, e)
323
- rescue Exception => e
334
+ rescue SystemExit, Exception => e
324
335
  logger.error("Failed to execute action", :action => action, :exception => e.class.name, :message => e.message, :backtrace => e.backtrace)
325
336
  converge_result.add(action, e)
326
337
  end
327
338
  end
328
339
  end.each(&:join)
329
340
 
330
- if logger.trace?
331
- logger.trace("Converge results", :success => converge_result.success?,
332
- :failed_actions => converge_result.failed_actions.collect { |a, r| "id: #{a.pipeline_id}, action_type: #{a.class}, message: #{r.message}" },
333
- :successful_actions => converge_result.successful_actions.collect { |a, r| "id: #{a.pipeline_id}, action_type: #{a.class}" })
334
- end
341
+ logger.trace? && logger.trace("Converge results",
342
+ :success => converge_result.success?,
343
+ :failed_actions => converge_result.failed_actions.collect { |a, r| "id: #{a.pipeline_id}, action_type: #{a.class}, message: #{r.message}" },
344
+ :successful_actions => converge_result.successful_actions.collect { |a, r| "id: #{a.pipeline_id}, action_type: #{a.class}" }
345
+ )
335
346
 
336
347
  converge_result
337
348
  end
338
349
 
339
350
  def resolve_actions(pipeline_configs)
340
- @state_resolver.resolve(@pipelines, pipeline_configs)
351
+ @state_resolver.resolve(@pipelines_registry, pipeline_configs)
341
352
  end
342
353
 
343
354
  def dispatch_events(converge_results)
@@ -354,20 +365,24 @@ class LogStash::Agent
354
365
  end
355
366
 
356
367
  def start_webserver
357
- options = {:http_host => @http_host, :http_ports => @http_port, :http_environment => @http_environment }
358
- @webserver = LogStash::WebServer.new(@logger, self, options)
359
- @webserver_thread = Thread.new(@webserver) do |webserver|
360
- LogStash::Util.set_thread_name("Api Webserver")
361
- webserver.run
368
+ @webserver_control_lock.synchronize do
369
+ options = {:http_host => @http_host, :http_ports => @http_port, :http_environment => @http_environment }
370
+ @webserver = LogStash::WebServer.new(@logger, self, options)
371
+ @webserver_thread = Thread.new(@webserver) do |webserver|
372
+ LogStash::Util.set_thread_name("Api Webserver")
373
+ webserver.run
374
+ end
362
375
  end
363
376
  end
364
377
 
365
378
  def stop_webserver
366
- if @webserver
367
- @webserver.stop
368
- if @webserver_thread.join(5).nil?
369
- @webserver_thread.kill
370
- @webserver_thread.join
379
+ @webserver_control_lock.synchronize do
380
+ if @webserver
381
+ @webserver.stop
382
+ if @webserver_thread.join(5).nil?
383
+ @webserver_thread.kill
384
+ @webserver_thread.join
385
+ end
371
386
  end
372
387
  end
373
388
  end
@@ -395,7 +410,7 @@ class LogStash::Agent
395
410
  end
396
411
 
397
412
  def shutdown_pipelines
398
- logger.debug("Shutting down all pipelines", :pipelines_count => pipelines_count)
413
+ logger.debug("Shutting down all pipelines", :pipelines_count => running_pipelines_count)
399
414
 
400
415
  # In this context I could just call shutdown, but I've decided to
401
416
  # use the stop action implementation for that so we have the same code.
@@ -404,16 +419,6 @@ class LogStash::Agent
404
419
  converge_state(pipeline_actions)
405
420
  end
406
421
 
407
- def running_pipeline?(pipeline_id)
408
- pipeline = get_pipeline(pipeline_id)
409
- return false unless pipeline
410
- thread = pipeline.thread
411
- thread.is_a?(Thread) && thread.alive?
412
- end
413
-
414
- def clean_state?
415
- pipelines.empty?
416
- end
417
422
 
418
423
  def setting(key)
419
424
  @settings.get(key)
@@ -317,10 +317,9 @@ module LogStashCompilerLSCLGrammar; module LogStash; module Compiler; module LSC
317
317
 
318
318
  def precedence(op)
319
319
  # Believe this is right for logstash?
320
- case op
321
- when AND_METHOD
320
+ if op == AND_METHOD
322
321
  2
323
- when OR_METHOD
322
+ elsif op == OR_METHOD
324
323
  1
325
324
  else
326
325
  raise ArgumentError, "Unexpected operator #{op}"
@@ -7,28 +7,60 @@ require "logstash/compiler/treetop_monkeypatches"
7
7
  module LogStash; module Config; module AST
8
8
  PROCESS_ESCAPE_SEQUENCES = :process_escape_sequences
9
9
 
10
- def self.deferred_conditionals=(val)
11
- @deferred_conditionals = val
12
- end
10
+ class << self
11
+ # @api private
12
+ MUTEX = Mutex.new
13
13
 
14
- def self.deferred_conditionals
15
- @deferred_conditionals
16
- end
14
+ # Executes the given block with exclusive access to the AST global variables
15
+ #
16
+ # @yieldreturn [Object]: the object that is returned from the block is returned by this method
17
+ #
18
+ # @return [Object]
19
+ def exclusive
20
+ MUTEX.synchronize { yield }
21
+ end
17
22
 
18
- def self.deferred_conditionals_index
19
- @deferred_conditionals_index
20
- end
23
+ def deferred_conditionals=(val)
24
+ ensure_exclusive!
25
+ @deferred_conditionals = val
26
+ end
21
27
 
22
- def self.deferred_conditionals_index=(val)
23
- @deferred_conditionals_index = val
24
- end
28
+ def deferred_conditionals
29
+ ensure_exclusive!
30
+ @deferred_conditionals
31
+ end
25
32
 
26
- def self.plugin_instance_index
27
- @plugin_instance_index
28
- end
33
+ def deferred_conditionals_index
34
+ ensure_exclusive!
35
+ @deferred_conditionals_index
36
+ end
37
+
38
+ def deferred_conditionals_index=(val)
39
+ ensure_exclusive!
40
+ @deferred_conditionals_index = val
41
+ end
29
42
 
30
- def self.plugin_instance_index=(val)
31
- @plugin_instance_index = val
43
+ def plugin_instance_index
44
+ ensure_exclusive!
45
+ @plugin_instance_index
46
+ end
47
+
48
+ def plugin_instance_index=(val)
49
+ ensure_exclusive!
50
+ @plugin_instance_index = val
51
+ end
52
+
53
+ private
54
+
55
+ # Raises a descriptive error if the thread in which it is invoked does
56
+ # not have exclusive access.
57
+ #
58
+ # @raise [RuntimeError]
59
+ def ensure_exclusive!
60
+ return if MUTEX.owned?
61
+
62
+ raise "Illegal access without exclusive lock at `#{caller[1]}`"
63
+ end
32
64
  end
33
65
 
34
66
  class Node < Treetop::Runtime::SyntaxNode
@@ -46,6 +78,15 @@ module LogStash; module Config; module AST
46
78
 
47
79
 
48
80
  def compile
81
+ LogStash::Config::AST.exclusive { do_compile }
82
+ end
83
+
84
+ private
85
+
86
+ # NON-threadsafe method compiles an AST into executable Ruby code.
87
+ # @see Config#compile, which is a threadsafe wrapper around this method.
88
+ # @api private
89
+ def do_compile
49
90
  LogStash::Config::AST.deferred_conditionals = []
50
91
  LogStash::Config::AST.deferred_conditionals_index = 0
51
92
  LogStash::Config::AST.plugin_instance_index = 0
@@ -491,6 +532,7 @@ module LogStash; module Config; module AST
491
532
  end; end; end
492
533
 
493
534
 
535
+
494
536
  # Monkeypatch Treetop::Runtime::SyntaxNode's inspect method to skip
495
537
  # any Whitespace or SyntaxNodes with no children.
496
538
  class Treetop::Runtime::SyntaxNode
@@ -38,7 +38,6 @@ module LogStash
38
38
  Setting::String.new("pipeline.id", "main"),
39
39
  Setting::Boolean.new("pipeline.system", false),
40
40
  Setting::PositiveInteger.new("pipeline.workers", LogStash::Config::CpuCoreStrategy.maximum),
41
- Setting::PositiveInteger.new("pipeline.output.workers", 1),
42
41
  Setting::PositiveInteger.new("pipeline.batch.size", 125),
43
42
  Setting::Numeric.new("pipeline.batch.delay", 50), # in milliseconds
44
43
  Setting::Boolean.new("pipeline.unsafe_shutdown", false),
@@ -62,6 +61,7 @@ module LogStash
62
61
  Setting::Numeric.new("queue.checkpoint.acks", 1024), # 0 is unlimited
63
62
  Setting::Numeric.new("queue.checkpoint.writes", 1024), # 0 is unlimited
64
63
  Setting::Numeric.new("queue.checkpoint.interval", 1000), # 0 is no time-based checkpointing
64
+ Setting::Boolean.new("queue.checkpoint.retry", false),
65
65
  Setting::Boolean.new("dead_letter_queue.enable", false),
66
66
  Setting::Bytes.new("dead_letter_queue.max_bytes", "1024mb"),
67
67
  Setting::TimeValue.new("slowlog.threshold.warn", "-1"),
@@ -302,7 +302,7 @@ module LogStash module Instrument
302
302
  #
303
303
  # @param [Concurrent::Map] Map to search for the key
304
304
  # @param [Array] List of path to create
305
- # @param [Fixnum] Which part from the list to create
305
+ # @param [Integer] Which part from the list to create
306
306
  #
307
307
  def fetch_or_store_namespace_recursively(map, namespaces_path, idx = 0)
308
308
  current = namespaces_path[idx]
@@ -10,13 +10,11 @@ module LogStash module Instrument module PeriodicPoller
10
10
  end
11
11
 
12
12
  def collect
13
- pipelines = @agent.with_running_user_defined_pipelines {|pipelines| pipelines}
14
- unless pipelines.nil?
15
- pipelines.each {|_, pipeline|
16
- unless pipeline.nil?
17
- pipeline.collect_dlq_stats
18
- end
19
- }
13
+ pipelines = @agent.running_user_defined_pipelines
14
+ pipelines.each do |_, pipeline|
15
+ unless pipeline.nil?
16
+ pipeline.collect_dlq_stats
17
+ end
20
18
  end
21
19
  end
22
20
  end
@@ -11,14 +11,12 @@ module LogStash module Instrument module PeriodicPoller
11
11
  end
12
12
 
13
13
  def collect
14
- pipelines = @agent.with_running_user_defined_pipelines {|pipelines| pipelines}
15
- unless pipelines.nil?
16
- pipelines.each {|_, pipeline|
17
- unless pipeline.nil?
18
- pipeline.collect_stats
19
- end
20
- }
14
+ pipelines = @agent.running_user_defined_pipelines
15
+ pipelines.each do |_, pipeline|
16
+ unless pipeline.nil?
17
+ pipeline.collect_stats
18
+ end
21
19
  end
22
20
  end
23
21
  end
24
- end; end; end
22
+ end end end
@@ -11,12 +11,12 @@ module LogStash module Instrument
11
11
  class PeriodicPollers
12
12
  attr_reader :metric
13
13
 
14
- def initialize(metric, queue_type, pipelines)
14
+ def initialize(metric, queue_type, agent)
15
15
  @metric = metric
16
16
  @periodic_pollers = [PeriodicPoller::Os.new(metric),
17
17
  PeriodicPoller::JVM.new(metric),
18
- PeriodicPoller::PersistentQueue.new(metric, queue_type, pipelines),
19
- PeriodicPoller::DeadLetterQueue.new(metric, pipelines)]
18
+ PeriodicPoller::PersistentQueue.new(metric, queue_type, agent),
19
+ PeriodicPoller::DeadLetterQueue.new(metric, agent)]
20
20
  end
21
21
 
22
22
  def start
@@ -25,8 +25,6 @@ module LogStash; class JavaPipeline < JavaBasePipeline
25
25
 
26
26
  @worker_threads = []
27
27
 
28
- @java_inputs_controller = org.logstash.execution.InputsController.new(lir_execution.javaInputs)
29
-
30
28
  @drain_queue = settings.get_value("queue.drain") || settings.get("queue.type") == "memory"
31
29
 
32
30
  @events_filtered = java.util.concurrent.atomic.LongAdder.new
@@ -40,9 +38,23 @@ module LogStash; class JavaPipeline < JavaBasePipeline
40
38
  @flushRequested = java.util.concurrent.atomic.AtomicBoolean.new(false)
41
39
  @shutdownRequested = java.util.concurrent.atomic.AtomicBoolean.new(false)
42
40
  @outputs_registered = Concurrent::AtomicBoolean.new(false)
41
+
42
+ # @finished_execution signals that the pipeline thread has finished its execution
43
+ # regardless of any exceptions; it will always be true when the thread completes
43
44
  @finished_execution = Concurrent::AtomicBoolean.new(false)
45
+
46
+ # @finished_run signals that the run methods called in the pipeline thread was completed
47
+ # without errors and it will NOT be set if the run method exits from an exception; this
48
+ # is by design and necessary for the wait_until_started semantic
49
+ @finished_run = Concurrent::AtomicBoolean.new(false)
50
+
51
+ @thread = nil
44
52
  end # def initialize
45
53
 
54
+ def finished_execution?
55
+ @finished_execution.true?
56
+ end
57
+
46
58
  def ready?
47
59
  @ready.value
48
60
  end
@@ -84,15 +96,18 @@ module LogStash; class JavaPipeline < JavaBasePipeline
84
96
  @logger.debug("Starting pipeline", default_logging_keys)
85
97
 
86
98
  @finished_execution.make_false
99
+ @finished_run.make_false
87
100
 
88
101
  @thread = Thread.new do
89
102
  begin
90
103
  LogStash::Util.set_thread_name("pipeline.#{pipeline_id}")
91
104
  run
92
- @finished_execution.make_true
105
+ @finished_run.make_true
93
106
  rescue => e
94
107
  close
95
108
  logger.error("Pipeline aborted due to error", default_logging_keys(:exception => e, :backtrace => e.backtrace))
109
+ ensure
110
+ @finished_execution.make_true
96
111
  end
97
112
  end
98
113
 
@@ -107,15 +122,14 @@ module LogStash; class JavaPipeline < JavaBasePipeline
107
122
 
108
123
  def wait_until_started
109
124
  while true do
110
- # This should be changed with an appropriate FSM
111
- # It's an edge case, if we have a pipeline with
112
- # a generator { count => 1 } its possible that `Thread#alive?` doesn't return true
113
- # because the execution of the thread was successful and complete
114
- if @finished_execution.true?
125
+ if @finished_run.true?
126
+ # it completed run without exception
115
127
  return true
116
128
  elsif thread.nil? || !thread.alive?
129
+ # some exception occurred and the thread is dead
117
130
  return false
118
131
  elsif running?
132
+ # fully initialized and running
119
133
  return true
120
134
  else
121
135
  sleep 0.01
@@ -217,11 +231,11 @@ module LogStash; class JavaPipeline < JavaBasePipeline
217
231
 
218
232
  pipeline_workers.times do |t|
219
233
  thread = Thread.new do
234
+ Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
220
235
  org.logstash.execution.WorkerLoop.new(
221
236
  lir_execution, filter_queue_client, @events_filtered, @events_consumed,
222
237
  @flushRequested, @flushing, @shutdownRequested, @drain_queue).run
223
238
  end
224
- Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
225
239
  @worker_threads << thread
226
240
  end
227
241
 
@@ -242,8 +256,13 @@ module LogStash; class JavaPipeline < JavaBasePipeline
242
256
  end
243
257
 
244
258
  def wait_inputs
245
- @input_threads.each(&:join)
246
- @java_inputs_controller.awaitStop
259
+ @input_threads.each do |thread|
260
+ if thread.class == Java::JavaObject
261
+ thread.to_java.join
262
+ else
263
+ thread.join
264
+ end
265
+ end
247
266
  end
248
267
 
249
268
  def start_inputs
@@ -262,11 +281,14 @@ module LogStash; class JavaPipeline < JavaBasePipeline
262
281
 
263
282
  # then after all input plugins are successfully registered, start them
264
283
  inputs.each { |input| start_input(input) }
265
- @java_inputs_controller.startInputs(self)
266
284
  end
267
285
 
268
286
  def start_input(plugin)
269
- @input_threads << Thread.new { inputworker(plugin) }
287
+ if plugin.class == LogStash::JavaInputDelegator
288
+ @input_threads << plugin.start
289
+ else
290
+ @input_threads << Thread.new { inputworker(plugin) }
291
+ end
270
292
  end
271
293
 
272
294
  def inputworker(plugin)
@@ -328,7 +350,6 @@ module LogStash; class JavaPipeline < JavaBasePipeline
328
350
  def stop_inputs
329
351
  @logger.debug("Closing inputs", default_logging_keys)
330
352
  inputs.each(&:do_stop)
331
- @java_inputs_controller.stopInputs
332
353
  @logger.debug("Closed inputs", default_logging_keys)
333
354
  end
334
355
 
@@ -377,7 +398,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
377
398
  end
378
399
 
379
400
  def plugin_threads_info
380
- input_threads = @input_threads.select {|t| t.alive? }
401
+ input_threads = @input_threads.select {|t| t.class == Thread && t.alive? }
381
402
  worker_threads = @worker_threads.select {|t| t.alive? }
382
403
  (input_threads + worker_threads).map {|t| Util.thread_info(t) }
383
404
  end
@@ -10,7 +10,6 @@ require "resolv"
10
10
 
11
11
  # make sure we abort if a known correct JRuby version is installed
12
12
  # to avoid having an unnecessary legacy patch being applied in the future.
13
- raise("Unnecessary patch on resolv.rb for JRuby version 9.1.16+") if Gem::Version.new(JRUBY_VERSION) >= Gem::Version.new("9.1.16.0")
14
13
 
15
14
  # The code below is copied from JRuby 9.1.16.0 resolv.rb:
16
15
  # https://github.com/jruby/jruby/blob/9.1.16.0/lib/ruby/stdlib/resolv.rb#L775-L784
@@ -18,23 +17,3 @@ raise("Unnecessary patch on resolv.rb for JRuby version 9.1.16+") if Gem::Versio
18
17
  # JRuby is Copyright (c) 2007-2017 The JRuby project, and is released
19
18
  # under a tri EPL/GPL/LGPL license.
20
19
  # Full license available at https://github.com/jruby/jruby/blob/9.1.16.0/COPYING
21
-
22
- class Resolv
23
- class DNS
24
- class Requester
25
- class UnconnectedUDP
26
- def sender(msg, data, host, port=Port)
27
- sock = @socks_hash[host.index(':') ? "::" : "0.0.0.0"]
28
- return nil if !sock
29
- service = [IPAddr.new(host), port]
30
- id = DNS.allocate_request_id(service[0], service[1])
31
- request = msg.encode
32
- request[0,2] = [id].pack('n')
33
- return @senders[[service, id]] =
34
- Sender.new(request, data, sock, host, port)
35
- end
36
- end
37
- end
38
- end
39
- end
40
-