logstash-core 6.8.16-java → 7.0.0.alpha1-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/logstash-core/version.rb +3 -1
- data/lib/logstash/agent.rb +69 -85
- data/lib/logstash/api/modules/stats.rb +1 -1
- data/lib/logstash/compiler/lscl.rb +7 -7
- data/lib/logstash/config/config_ast.rb +1 -1
- data/lib/logstash/config/mixin.rb +1 -1
- data/lib/logstash/config/modules_common.rb +3 -3
- data/lib/logstash/dependency_report.rb +1 -2
- data/lib/logstash/environment.rb +4 -9
- data/lib/logstash/event.rb +1 -24
- data/lib/logstash/filter_delegator.rb +69 -2
- data/lib/logstash/filters/base.rb +2 -0
- data/lib/logstash/instrument/metric_store.rb +1 -1
- data/lib/logstash/instrument/periodic_poller/dlq.rb +7 -5
- data/lib/logstash/instrument/periodic_poller/jvm.rb +3 -3
- data/lib/logstash/instrument/periodic_poller/pq.rb +8 -6
- data/lib/logstash/instrument/periodic_pollers.rb +3 -3
- data/lib/logstash/java_pipeline.rb +11 -38
- data/lib/logstash/modules/kibana_config.rb +1 -1
- data/lib/logstash/modules/logstash_config.rb +1 -1
- data/lib/logstash/patches/resolv.rb +32 -17
- data/lib/logstash/pipeline.rb +11 -28
- data/lib/logstash/pipeline_action/base.rb +1 -1
- data/lib/logstash/pipeline_action/create.rb +13 -7
- data/lib/logstash/pipeline_action/reload.rb +12 -35
- data/lib/logstash/pipeline_action/stop.rb +6 -4
- data/lib/logstash/pipeline_settings.rb +1 -2
- data/lib/logstash/plugins/registry.rb +2 -5
- data/lib/logstash/runner.rb +0 -24
- data/lib/logstash/settings.rb +5 -5
- data/lib/logstash/state_resolver.rb +5 -5
- data/lib/logstash/util.rb +1 -11
- data/lib/logstash/util/duration_formatter.rb +1 -1
- data/lib/logstash/util/safe_uri.rb +0 -1
- data/lib/logstash/util/substitution_variables.rb +1 -22
- data/lib/logstash/util/thread_dump.rb +1 -1
- data/locales/en.yml +7 -16
- data/logstash-core.gemspec +11 -2
- data/spec/logstash/acked_queue_concurrent_stress_spec.rb +2 -2
- data/spec/logstash/agent/converge_spec.rb +31 -25
- data/spec/logstash/agent/metrics_spec.rb +1 -1
- data/spec/logstash/agent_spec.rb +7 -6
- data/spec/logstash/compiler/compiler_spec.rb +0 -28
- data/spec/logstash/config/config_ast_spec.rb +0 -15
- data/spec/logstash/config/mixin_spec.rb +2 -3
- data/spec/logstash/converge_result_spec.rb +1 -1
- data/spec/logstash/environment_spec.rb +4 -4
- data/spec/logstash/event_spec.rb +2 -10
- data/spec/logstash/filter_delegator_spec.rb +12 -2
- data/spec/logstash/filters/base_spec.rb +9 -45
- data/spec/logstash/instrument/periodic_poller/cgroup_spec.rb +2 -0
- data/spec/logstash/instrument/wrapped_write_client_spec.rb +1 -1
- data/spec/logstash/java_filter_delegator_spec.rb +11 -1
- data/spec/logstash/legacy_ruby_event_spec.rb +5 -6
- data/spec/logstash/patches_spec.rb +3 -1
- data/spec/logstash/pipeline_action/create_spec.rb +8 -14
- data/spec/logstash/pipeline_action/reload_spec.rb +9 -16
- data/spec/logstash/pipeline_action/stop_spec.rb +3 -4
- data/spec/logstash/queue_factory_spec.rb +1 -2
- data/spec/logstash/runner_spec.rb +0 -2
- data/spec/logstash/settings/array_coercible_spec.rb +1 -1
- data/spec/logstash/settings/bytes_spec.rb +2 -2
- data/spec/logstash/settings/port_range_spec.rb +1 -1
- data/spec/logstash/settings_spec.rb +0 -10
- data/spec/logstash/state_resolver_spec.rb +22 -26
- data/spec/logstash/util/safe_uri_spec.rb +0 -40
- data/spec/logstash/util/secretstore_spec.rb +1 -1
- data/spec/logstash/util/time_value_spec.rb +1 -1
- data/spec/logstash/util/wrapped_acked_queue_spec.rb +1 -1
- data/spec/logstash/webserver_spec.rb +5 -9
- data/spec/support/matchers.rb +19 -25
- data/spec/support/shared_contexts.rb +3 -3
- data/versions-gem-copy.yml +9 -9
- metadata +31 -44
- data/lib/logstash/patches/resolv_9270.rb +0 -2903
- data/lib/logstash/pipelines_registry.rb +0 -166
- data/lib/logstash/util/lazy_singleton.rb +0 -33
- data/spec/logstash/jruby_version_spec.rb +0 -15
- data/spec/logstash/pipelines_registry_spec.rb +0 -220
data/lib/logstash/event.rb
CHANGED
@@ -1,12 +1,6 @@
|
|
1
1
|
# encoding: utf-8
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
# transient pipeline events for normal in-flow signaling as opposed to
|
6
|
-
# flow altering exceptions. for now having base classes is adequate and
|
7
|
-
# in the future it might be necessary to refactor using like a BaseEvent
|
8
|
-
# class to have a common interface for all pipeline events to support
|
9
|
-
# eventual queueing persistence for example, TBD.
|
3
|
+
# used only in the Ruby execution engine
|
10
4
|
module LogStash
|
11
5
|
class SignalEvent
|
12
6
|
def flush?; raise "abstract method"; end;
|
@@ -31,21 +25,4 @@ module LogStash
|
|
31
25
|
FLUSH = FlushEvent.new
|
32
26
|
SHUTDOWN = ShutdownEvent.new
|
33
27
|
NO_SIGNAL = NoSignal.new
|
34
|
-
|
35
|
-
class Event
|
36
|
-
MSG_BRACKETS_METHOD_MISSING = "Direct event field references (i.e. event['field']) have been disabled in favor of using event get and set methods (e.g. event.get('field')). Please consult the Logstash 5.0 breaking changes documentation for more details.".freeze
|
37
|
-
MSG_BRACKETS_EQUALS_METHOD_MISSING = "Direct event field references (i.e. event['field'] = 'value') have been disabled in favor of using event get and set methods (e.g. event.set('field', 'value')). Please consult the Logstash 5.0 breaking changes documentation for more details.".freeze
|
38
|
-
RE_BRACKETS_METHOD = /^\[\]$/.freeze
|
39
|
-
RE_BRACKETS_EQUALS_METHOD = /^\[\]=$/.freeze
|
40
|
-
|
41
|
-
def method_missing(method_name, *arguments, &block)
|
42
|
-
if RE_BRACKETS_METHOD.match(method_name.to_s)
|
43
|
-
raise NoMethodError.new(MSG_BRACKETS_METHOD_MISSING)
|
44
|
-
end
|
45
|
-
if RE_BRACKETS_EQUALS_METHOD.match(method_name.to_s)
|
46
|
-
raise NoMethodError.new(MSG_BRACKETS_EQUALS_METHOD_MISSING)
|
47
|
-
end
|
48
|
-
super
|
49
|
-
end
|
50
|
-
end
|
51
28
|
end
|
@@ -1,2 +1,69 @@
|
|
1
|
-
#
|
2
|
-
#
|
1
|
+
# encoding: utf-8
|
2
|
+
#
|
3
|
+
module LogStash
|
4
|
+
class FilterDelegator
|
5
|
+
extend Forwardable
|
6
|
+
DELEGATED_METHODS = [
|
7
|
+
:register,
|
8
|
+
:close,
|
9
|
+
:threadsafe?,
|
10
|
+
:do_close,
|
11
|
+
:do_stop,
|
12
|
+
:periodic_flush,
|
13
|
+
:reloadable?
|
14
|
+
]
|
15
|
+
def_delegators :@filter, *DELEGATED_METHODS
|
16
|
+
|
17
|
+
attr_reader :id
|
18
|
+
|
19
|
+
def initialize(filter, id)
|
20
|
+
@klass = filter.class
|
21
|
+
@id = id
|
22
|
+
@filter = filter
|
23
|
+
|
24
|
+
# Scope the metrics to the plugin
|
25
|
+
namespaced_metric = filter.metric
|
26
|
+
@metric_events = namespaced_metric.namespace(:events)
|
27
|
+
@metric_events_in = @metric_events.counter(:in)
|
28
|
+
@metric_events_out = @metric_events.counter(:out)
|
29
|
+
@metric_events_time = @metric_events.counter(:duration_in_millis)
|
30
|
+
namespaced_metric.gauge(:name, config_name)
|
31
|
+
|
32
|
+
# Not all the filters will do bufferings
|
33
|
+
define_flush_method if @filter.respond_to?(:flush)
|
34
|
+
end
|
35
|
+
|
36
|
+
def config_name
|
37
|
+
@klass.config_name
|
38
|
+
end
|
39
|
+
|
40
|
+
def multi_filter(events)
|
41
|
+
@metric_events_in.increment(events.size)
|
42
|
+
|
43
|
+
start_time = java.lang.System.nano_time
|
44
|
+
new_events = @filter.multi_filter(events)
|
45
|
+
@metric_events_time.increment((java.lang.System.nano_time - start_time) / 1_000_000)
|
46
|
+
|
47
|
+
# There is no guarantee in the context of filter
|
48
|
+
# that EVENTS_IN == EVENTS_OUT, see the aggregates and
|
49
|
+
# the split filter
|
50
|
+
c = new_events.count { |event| !event.cancelled? }
|
51
|
+
@metric_events_out.increment(c) if c > 0
|
52
|
+
new_events
|
53
|
+
end
|
54
|
+
|
55
|
+
private
|
56
|
+
def define_flush_method
|
57
|
+
define_singleton_method(:flush) do |options = {}|
|
58
|
+
# we also need to trace the number of events
|
59
|
+
# coming from a specific filters.
|
60
|
+
new_events = @filter.flush(options)
|
61
|
+
|
62
|
+
# Filter plugins that does buffering or spooling of events like the
|
63
|
+
# `Logstash-filter-aggregates` can return `NIL` and will flush on the next flush ticks.
|
64
|
+
@metric_events_out.increment(new_events.size) if new_events && new_events.size > 0
|
65
|
+
new_events
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
@@ -194,6 +194,8 @@ class LogStash::Filters::Base < LogStash::Plugin
|
|
194
194
|
# this is important because a construct like event["tags"].delete(tag) will not work
|
195
195
|
# in the current Java event implementation. see https://github.com/elastic/logstash/issues/4140
|
196
196
|
|
197
|
+
return if @remove_tag.empty?
|
198
|
+
|
197
199
|
tags = event.get("tags")
|
198
200
|
return unless tags
|
199
201
|
|
@@ -302,7 +302,7 @@ module LogStash module Instrument
|
|
302
302
|
#
|
303
303
|
# @param [Concurrent::Map] Map to search for the key
|
304
304
|
# @param [Array] List of path to create
|
305
|
-
# @param [
|
305
|
+
# @param [Fixnum] Which part from the list to create
|
306
306
|
#
|
307
307
|
def fetch_or_store_namespace_recursively(map, namespaces_path, idx = 0)
|
308
308
|
current = namespaces_path[idx]
|
@@ -10,11 +10,13 @@ module LogStash module Instrument module PeriodicPoller
|
|
10
10
|
end
|
11
11
|
|
12
12
|
def collect
|
13
|
-
pipelines = @agent.
|
14
|
-
pipelines.
|
15
|
-
|
16
|
-
pipeline.
|
17
|
-
|
13
|
+
pipelines = @agent.with_running_user_defined_pipelines {|pipelines| pipelines}
|
14
|
+
unless pipelines.nil?
|
15
|
+
pipelines.each {|_, pipeline|
|
16
|
+
unless pipeline.nil?
|
17
|
+
pipeline.collect_dlq_stats
|
18
|
+
end
|
19
|
+
}
|
18
20
|
end
|
19
21
|
end
|
20
22
|
end
|
@@ -20,8 +20,8 @@ java_import 'org.logstash.instrument.reports.ProcessReport'
|
|
20
20
|
module LogStash module Instrument module PeriodicPoller
|
21
21
|
class JVM < Base
|
22
22
|
class GarbageCollectorName
|
23
|
-
YOUNG_GC_NAMES = Set.new(["Copy", "PS Scavenge", "ParNew", "G1 Young Generation", "scavenge"
|
24
|
-
OLD_GC_NAMES = Set.new(["MarkSweepCompact", "PS MarkSweep", "ConcurrentMarkSweep", "G1 Old Generation", "global"
|
23
|
+
YOUNG_GC_NAMES = Set.new(["Copy", "PS Scavenge", "ParNew", "G1 Young Generation", "scavenge"])
|
24
|
+
OLD_GC_NAMES = Set.new(["MarkSweepCompact", "PS MarkSweep", "ConcurrentMarkSweep", "G1 Old Generation", "global"])
|
25
25
|
|
26
26
|
YOUNG = :young
|
27
27
|
OLD = :old
|
@@ -68,7 +68,7 @@ module LogStash module Instrument module PeriodicPoller
|
|
68
68
|
logger.debug("collector name", :name => collector_name)
|
69
69
|
name = GarbageCollectorName.get(collector_name)
|
70
70
|
if name.nil?
|
71
|
-
logger.error("Unknown garbage collector name", :name =>
|
71
|
+
logger.error("Unknown garbage collector name", :name => name)
|
72
72
|
else
|
73
73
|
metric.gauge([:jvm, :gc, :collectors, name], :collection_count, collector.getCollectionCount())
|
74
74
|
metric.gauge([:jvm, :gc, :collectors, name], :collection_time_in_millis, collector.getCollectionTime())
|
@@ -11,12 +11,14 @@ module LogStash module Instrument module PeriodicPoller
|
|
11
11
|
end
|
12
12
|
|
13
13
|
def collect
|
14
|
-
pipelines = @agent.
|
15
|
-
pipelines.
|
16
|
-
|
17
|
-
pipeline.
|
18
|
-
|
14
|
+
pipelines = @agent.with_running_user_defined_pipelines {|pipelines| pipelines}
|
15
|
+
unless pipelines.nil?
|
16
|
+
pipelines.each {|_, pipeline|
|
17
|
+
unless pipeline.nil?
|
18
|
+
pipeline.collect_stats
|
19
|
+
end
|
20
|
+
}
|
19
21
|
end
|
20
22
|
end
|
21
23
|
end
|
22
|
-
end end end
|
24
|
+
end; end; end
|
@@ -11,12 +11,12 @@ module LogStash module Instrument
|
|
11
11
|
class PeriodicPollers
|
12
12
|
attr_reader :metric
|
13
13
|
|
14
|
-
def initialize(metric, queue_type,
|
14
|
+
def initialize(metric, queue_type, pipelines)
|
15
15
|
@metric = metric
|
16
16
|
@periodic_pollers = [PeriodicPoller::Os.new(metric),
|
17
17
|
PeriodicPoller::JVM.new(metric),
|
18
|
-
PeriodicPoller::PersistentQueue.new(metric, queue_type,
|
19
|
-
PeriodicPoller::DeadLetterQueue.new(metric,
|
18
|
+
PeriodicPoller::PersistentQueue.new(metric, queue_type, pipelines),
|
19
|
+
PeriodicPoller::DeadLetterQueue.new(metric, pipelines)]
|
20
20
|
end
|
21
21
|
|
22
22
|
def start
|
@@ -1,7 +1,6 @@
|
|
1
1
|
# encoding: utf-8
|
2
2
|
require "thread"
|
3
3
|
require "concurrent"
|
4
|
-
require "logstash/event"
|
5
4
|
require "logstash/filters/base"
|
6
5
|
require "logstash/inputs/base"
|
7
6
|
require "logstash/outputs/base"
|
@@ -39,23 +38,9 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
39
38
|
@flushRequested = java.util.concurrent.atomic.AtomicBoolean.new(false)
|
40
39
|
@shutdownRequested = java.util.concurrent.atomic.AtomicBoolean.new(false)
|
41
40
|
@outputs_registered = Concurrent::AtomicBoolean.new(false)
|
42
|
-
|
43
|
-
# @finished_execution signals that the pipeline thread has finished its execution
|
44
|
-
# regardless of any exceptions; it will always be true when the thread completes
|
45
41
|
@finished_execution = Concurrent::AtomicBoolean.new(false)
|
46
|
-
|
47
|
-
# @finished_run signals that the run methods called in the pipeline thread was completed
|
48
|
-
# without errors and it will NOT be set if the run method exits from an exception; this
|
49
|
-
# is by design and necessary for the wait_until_started semantic
|
50
|
-
@finished_run = Concurrent::AtomicBoolean.new(false)
|
51
|
-
|
52
|
-
@thread = nil
|
53
42
|
end # def initialize
|
54
43
|
|
55
|
-
def finished_execution?
|
56
|
-
@finished_execution.true?
|
57
|
-
end
|
58
|
-
|
59
44
|
def ready?
|
60
45
|
@ready.value
|
61
46
|
end
|
@@ -97,18 +82,15 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
97
82
|
@logger.debug("Starting pipeline", default_logging_keys)
|
98
83
|
|
99
84
|
@finished_execution.make_false
|
100
|
-
@finished_run.make_false
|
101
85
|
|
102
86
|
@thread = Thread.new do
|
103
87
|
begin
|
104
88
|
LogStash::Util.set_thread_name("pipeline.#{pipeline_id}")
|
105
89
|
run
|
106
|
-
@
|
90
|
+
@finished_execution.make_true
|
107
91
|
rescue => e
|
108
92
|
close
|
109
93
|
logger.error("Pipeline aborted due to error", default_logging_keys(:exception => e, :backtrace => e.backtrace))
|
110
|
-
ensure
|
111
|
-
@finished_execution.make_true
|
112
94
|
end
|
113
95
|
end
|
114
96
|
|
@@ -123,14 +105,15 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
123
105
|
|
124
106
|
def wait_until_started
|
125
107
|
while true do
|
126
|
-
|
127
|
-
|
108
|
+
# This should be changed with an appropriate FSM
|
109
|
+
# It's an edge case, if we have a pipeline with
|
110
|
+
# a generator { count => 1 } its possible that `Thread#alive?` doesn't return true
|
111
|
+
# because the execution of the thread was successful and complete
|
112
|
+
if @finished_execution.true?
|
128
113
|
return true
|
129
114
|
elsif thread.nil? || !thread.alive?
|
130
|
-
# some exception occurred and the thread is dead
|
131
115
|
return false
|
132
116
|
elsif running?
|
133
|
-
# fully initialized and running
|
134
117
|
return true
|
135
118
|
else
|
136
119
|
sleep 0.01
|
@@ -232,11 +215,11 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
232
215
|
|
233
216
|
pipeline_workers.times do |t|
|
234
217
|
thread = Thread.new do
|
235
|
-
Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
|
236
218
|
org.logstash.execution.WorkerLoop.new(
|
237
219
|
lir_execution, filter_queue_client, @events_filtered, @events_consumed,
|
238
220
|
@flushRequested, @flushing, @shutdownRequested, @drain_queue).run
|
239
221
|
end
|
222
|
+
Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
|
240
223
|
@worker_threads << thread
|
241
224
|
end
|
242
225
|
|
@@ -257,13 +240,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
257
240
|
end
|
258
241
|
|
259
242
|
def wait_inputs
|
260
|
-
@input_threads.each
|
261
|
-
if thread.class == Java::JavaObject
|
262
|
-
thread.to_java.join
|
263
|
-
else
|
264
|
-
thread.join
|
265
|
-
end
|
266
|
-
end
|
243
|
+
@input_threads.each(&:join)
|
267
244
|
end
|
268
245
|
|
269
246
|
def start_inputs
|
@@ -285,11 +262,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
285
262
|
end
|
286
263
|
|
287
264
|
def start_input(plugin)
|
288
|
-
|
289
|
-
@input_threads << plugin.start
|
290
|
-
else
|
291
|
-
@input_threads << Thread.new { inputworker(plugin) }
|
292
|
-
end
|
265
|
+
@input_threads << Thread.new { inputworker(plugin) }
|
293
266
|
end
|
294
267
|
|
295
268
|
def inputworker(plugin)
|
@@ -335,7 +308,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
335
308
|
|
336
309
|
stop_inputs
|
337
310
|
|
338
|
-
# We make this call blocking, so we know for sure when the method return the
|
311
|
+
# We make this call blocking, so we know for sure when the method return the shutdown is
|
339
312
|
# stopped
|
340
313
|
wait_for_workers
|
341
314
|
clear_pipeline_metrics
|
@@ -399,7 +372,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
399
372
|
end
|
400
373
|
|
401
374
|
def plugin_threads_info
|
402
|
-
input_threads = @input_threads.select {|t| t.
|
375
|
+
input_threads = @input_threads.select {|t| t.alive? }
|
403
376
|
worker_threads = @worker_threads.select {|t| t.alive? }
|
404
377
|
(input_threads + worker_threads).map {|t| Util.thread_info(t) }
|
405
378
|
end
|
@@ -8,7 +8,7 @@ module LogStash module Modules class KibanaConfig
|
|
8
8
|
include LogStash::Util::Loggable
|
9
9
|
|
10
10
|
ALLOWED_DIRECTORIES = ["search", "visualization"]
|
11
|
-
attr_reader :index_name # not used when importing via kibana but for BWC with
|
11
|
+
attr_reader :index_name # not used when importing via kibana but for BWC with ElasticsearchConfig
|
12
12
|
|
13
13
|
# We name it `modul` here because `module` has meaning in Ruby.
|
14
14
|
def initialize(modul, settings)
|
@@ -39,7 +39,7 @@ module LogStash module Modules class LogStashConfig
|
|
39
39
|
def get_setting(setting_class)
|
40
40
|
raw_value = @settings[setting_class.name]
|
41
41
|
# If we dont check for NIL, the Settings class will try to coerce the value
|
42
|
-
# and most of the it will fails when a NIL value is
|
42
|
+
# and most of the it will fails when a NIL value is explicitly set.
|
43
43
|
# This will be fixed once we wrap the plugins settings into a Settings class
|
44
44
|
setting_class.set(raw_value) unless raw_value.nil?
|
45
45
|
setting_class.value
|
@@ -1,25 +1,40 @@
|
|
1
1
|
require "resolv"
|
2
2
|
|
3
|
-
# ref:
|
4
|
-
# https://github.com/logstash-plugins/logstash-filter-dns/issues/51
|
5
|
-
# https://github.com/jruby/jruby/pull/5722
|
3
|
+
# ref: https://github.com/logstash-plugins/logstash-filter-dns/issues/40
|
6
4
|
#
|
7
|
-
# JRuby versions
|
8
|
-
#
|
5
|
+
# JRuby 9k versions prior to 9.1.16.0 have a bug which crashes IP address
|
6
|
+
# resolution after 64k unique IP addresses resolutions.
|
9
7
|
#
|
10
|
-
#
|
11
|
-
#
|
12
|
-
# this will be fixed and we want to avoid potential conflicting monkey patches.
|
13
|
-
# A spec which will break on JRuby upgrade will redirect here
|
14
|
-
# to make a manual verification and eventually remove that patch here once the fix is
|
15
|
-
# made in the JRuby version of resolv.rb.
|
8
|
+
# Note that the oldest JRuby version in LS 6 is 9.1.13.0 and
|
9
|
+
# JRuby 1.7.25 and 1.7.27 (the 2 versions used across LS 5) are not affected by this bug.
|
16
10
|
|
17
|
-
if
|
18
|
-
|
19
|
-
|
20
|
-
$VERBOSE = nil
|
11
|
+
# make sure we abort if a known correct JRuby version is installed
|
12
|
+
# to avoid having an unnecessary legacy patch being applied in the future.
|
13
|
+
raise("Unnecessary patch on resolv.rb for JRuby version 9.1.16+") if Gem::Version.new(JRUBY_VERSION) >= Gem::Version.new("9.1.16.0")
|
21
14
|
|
22
|
-
|
15
|
+
# The code below is copied from JRuby 9.1.16.0 resolv.rb:
|
16
|
+
# https://github.com/jruby/jruby/blob/9.1.16.0/lib/ruby/stdlib/resolv.rb#L775-L784
|
17
|
+
#
|
18
|
+
# JRuby is Copyright (c) 2007-2017 The JRuby project, and is released
|
19
|
+
# under a tri EPL/GPL/LGPL license.
|
20
|
+
# Full license available at https://github.com/jruby/jruby/blob/9.1.16.0/COPYING
|
23
21
|
|
24
|
-
|
22
|
+
class Resolv
|
23
|
+
class DNS
|
24
|
+
class Requester
|
25
|
+
class UnconnectedUDP
|
26
|
+
def sender(msg, data, host, port=Port)
|
27
|
+
sock = @socks_hash[host.index(':') ? "::" : "0.0.0.0"]
|
28
|
+
return nil if !sock
|
29
|
+
service = [IPAddr.new(host), port]
|
30
|
+
id = DNS.allocate_request_id(service[0], service[1])
|
31
|
+
request = msg.encode
|
32
|
+
request[0,2] = [id].pack('n')
|
33
|
+
return @senders[[service, id]] =
|
34
|
+
Sender.new(request, data, sock, host, port)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
25
39
|
end
|
40
|
+
|
data/lib/logstash/pipeline.rb
CHANGED
@@ -107,23 +107,8 @@ module LogStash; class Pipeline < BasePipeline
|
|
107
107
|
@flushing = Concurrent::AtomicReference.new(false)
|
108
108
|
@outputs_registered = Concurrent::AtomicBoolean.new(false)
|
109
109
|
@worker_shutdown = java.util.concurrent.atomic.AtomicBoolean.new(false)
|
110
|
-
|
111
|
-
# @finished_execution signals that the pipeline thread has finished its execution
|
112
|
-
# regardless of any exceptions; it will always be true when the thread completes
|
113
|
-
@finished_execution = Concurrent::AtomicBoolean.new(false)
|
114
|
-
|
115
|
-
# @finished_run signals that the run methods called in the pipeline thread was completed
|
116
|
-
# without errors and it will NOT be set if the run method exits from an exception; this
|
117
|
-
# is by design and necessary for the wait_until_started semantic
|
118
|
-
@finished_run = Concurrent::AtomicBoolean.new(false)
|
119
|
-
|
120
|
-
@thread = nil
|
121
110
|
end # def initialize
|
122
111
|
|
123
|
-
def finished_execution?
|
124
|
-
@finished_execution.true?
|
125
|
-
end
|
126
|
-
|
127
112
|
def ready?
|
128
113
|
@ready.value
|
129
114
|
end
|
@@ -167,19 +152,16 @@ module LogStash; class Pipeline < BasePipeline
|
|
167
152
|
"pipeline.batch.size" => settings.get("pipeline.batch.size"),
|
168
153
|
"pipeline.batch.delay" => settings.get("pipeline.batch.delay")))
|
169
154
|
|
170
|
-
@finished_execution.
|
171
|
-
@finished_run.make_false
|
155
|
+
@finished_execution = Concurrent::AtomicBoolean.new(false)
|
172
156
|
|
173
157
|
@thread = Thread.new do
|
174
158
|
begin
|
175
159
|
LogStash::Util.set_thread_name("pipeline.#{pipeline_id}")
|
176
160
|
run
|
177
|
-
@
|
161
|
+
@finished_execution.make_true
|
178
162
|
rescue => e
|
179
163
|
close
|
180
164
|
@logger.error("Pipeline aborted due to error", default_logging_keys(:exception => e, :backtrace => e.backtrace))
|
181
|
-
ensure
|
182
|
-
@finished_execution.make_true
|
183
165
|
end
|
184
166
|
end
|
185
167
|
|
@@ -194,14 +176,15 @@ module LogStash; class Pipeline < BasePipeline
|
|
194
176
|
|
195
177
|
def wait_until_started
|
196
178
|
while true do
|
197
|
-
|
198
|
-
|
179
|
+
# This should be changed with an appropriate FSM
|
180
|
+
# It's an edge case, if we have a pipeline with
|
181
|
+
# a generator { count => 1 } its possible that `Thread#alive?` doesn't return true
|
182
|
+
# because the execution of the thread was successful and complete
|
183
|
+
if @finished_execution.true?
|
199
184
|
return true
|
200
|
-
elsif
|
201
|
-
# some exception occured and the thread is dead
|
185
|
+
elsif !thread.alive?
|
202
186
|
return false
|
203
187
|
elsif running?
|
204
|
-
# fully initialized and running
|
205
188
|
return true
|
206
189
|
else
|
207
190
|
sleep 0.01
|
@@ -300,9 +283,9 @@ module LogStash; class Pipeline < BasePipeline
|
|
300
283
|
|
301
284
|
pipeline_workers.times do |t|
|
302
285
|
thread = Thread.new(batch_size, batch_delay, self) do |_b_size, _b_delay, _pipeline|
|
303
|
-
Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
|
304
286
|
_pipeline.worker_loop(_b_size, _b_delay)
|
305
287
|
end
|
288
|
+
Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
|
306
289
|
@worker_threads << thread
|
307
290
|
end
|
308
291
|
|
@@ -463,7 +446,7 @@ module LogStash; class Pipeline < BasePipeline
|
|
463
446
|
|
464
447
|
stop_inputs
|
465
448
|
|
466
|
-
# We make this call blocking, so we know for sure when the method return the
|
449
|
+
# We make this call blocking, so we know for sure when the method return the shutdown is
|
467
450
|
# stopped
|
468
451
|
wait_for_workers
|
469
452
|
clear_pipeline_metrics
|
@@ -542,7 +525,7 @@ module LogStash; class Pipeline < BasePipeline
|
|
542
525
|
|
543
526
|
# Calculate the uptime in milliseconds
|
544
527
|
#
|
545
|
-
# @return [
|
528
|
+
# @return [Fixnum] Uptime in milliseconds, 0 if the pipeline is not started
|
546
529
|
def uptime
|
547
530
|
return 0 if started_at.nil?
|
548
531
|
((Time.now.to_f - started_at.to_f) * 1000.0).to_i
|