logstash-core 6.5.4-java → 6.6.0-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/logstash/api/modules/stats.rb +1 -1
- data/lib/logstash/config/config_ast.rb +1 -1
- data/lib/logstash/environment.rb +1 -0
- data/lib/logstash/filter_delegator.rb +2 -69
- data/lib/logstash/instrument/periodic_poller/jvm.rb +3 -3
- data/lib/logstash/java_pipeline.rb +6 -1
- data/lib/logstash/pipeline.rb +1 -1
- data/lib/logstash/pipeline_settings.rb +1 -0
- data/lib/logstash/plugins/registry.rb +5 -2
- data/lib/logstash/util/thread_dump.rb +1 -1
- data/locales/en.yml +1 -1
- data/spec/logstash/acked_queue_concurrent_stress_spec.rb +1 -1
- data/spec/logstash/filter_delegator_spec.rb +2 -12
- data/spec/logstash/instrument/wrapped_write_client_spec.rb +1 -1
- data/spec/logstash/java_filter_delegator_spec.rb +1 -11
- data/spec/logstash/queue_factory_spec.rb +1 -0
- data/spec/logstash/util/wrapped_acked_queue_spec.rb +1 -1
- data/versions-gem-copy.yml +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: cbe096068df3326925926019e2137eb56527f92c91c9d14861616d85e19ac46b
|
4
|
+
data.tar.gz: 8a5a795fbb82efc19a01232f8ef173d668948a33f75aaa749df73e0cc075e206
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 99c5e85d59c293bd4479ca1d4805cb0dff72b225ed97aa7351060c452c1e73e495a24bf3fbc4142df0e67f01bb1eaa90893af1cd8c30d22f6a61d4fed2a0cfd0
|
7
|
+
data.tar.gz: 977df2140b2d9596ad4d08e784bd03cbb063c2c40b9a158771eaf91b6b4f09f80e0804dce33f801c002dfe2cf4664b8a544f588565c4aa492fa2f83c7c964339
|
@@ -10,7 +10,7 @@ module LogStash
|
|
10
10
|
# return hot threads information
|
11
11
|
get "/jvm/hot_threads" do
|
12
12
|
begin
|
13
|
-
top_threads_count = params["threads"] ||
|
13
|
+
top_threads_count = params["threads"] || 10
|
14
14
|
ignore_idle_threads = params["ignore_idle_threads"] || true
|
15
15
|
options = {
|
16
16
|
:threads => top_threads_count.to_i,
|
@@ -136,7 +136,7 @@ module LogStash; module Config; module AST
|
|
136
136
|
events.each{|e| block.call(e)}
|
137
137
|
end
|
138
138
|
|
139
|
-
if @generated_objects[:#{name}].
|
139
|
+
if !@generated_objects[:#{name}].nil? && @generated_objects[:#{name}].has_flush
|
140
140
|
@periodic_flushers << @generated_objects[:#{name}_flush] if @generated_objects[:#{name}].periodic_flush
|
141
141
|
@shutdown_flushers << @generated_objects[:#{name}_flush]
|
142
142
|
end
|
data/lib/logstash/environment.rb
CHANGED
@@ -62,6 +62,7 @@ module LogStash
|
|
62
62
|
Setting::Numeric.new("queue.checkpoint.acks", 1024), # 0 is unlimited
|
63
63
|
Setting::Numeric.new("queue.checkpoint.writes", 1024), # 0 is unlimited
|
64
64
|
Setting::Numeric.new("queue.checkpoint.interval", 1000), # 0 is no time-based checkpointing
|
65
|
+
Setting::Boolean.new("queue.checkpoint.retry", false),
|
65
66
|
Setting::Boolean.new("dead_letter_queue.enable", false),
|
66
67
|
Setting::Bytes.new("dead_letter_queue.max_bytes", "1024mb"),
|
67
68
|
Setting::TimeValue.new("slowlog.threshold.warn", "-1"),
|
@@ -1,69 +1,2 @@
|
|
1
|
-
#
|
2
|
-
#
|
3
|
-
module LogStash
|
4
|
-
class FilterDelegator
|
5
|
-
extend Forwardable
|
6
|
-
DELEGATED_METHODS = [
|
7
|
-
:register,
|
8
|
-
:close,
|
9
|
-
:threadsafe?,
|
10
|
-
:do_close,
|
11
|
-
:do_stop,
|
12
|
-
:periodic_flush,
|
13
|
-
:reloadable?
|
14
|
-
]
|
15
|
-
def_delegators :@filter, *DELEGATED_METHODS
|
16
|
-
|
17
|
-
attr_reader :id
|
18
|
-
|
19
|
-
def initialize(filter, id)
|
20
|
-
@klass = filter.class
|
21
|
-
@id = id
|
22
|
-
@filter = filter
|
23
|
-
|
24
|
-
# Scope the metrics to the plugin
|
25
|
-
namespaced_metric = filter.metric
|
26
|
-
@metric_events = namespaced_metric.namespace(:events)
|
27
|
-
@metric_events_in = @metric_events.counter(:in)
|
28
|
-
@metric_events_out = @metric_events.counter(:out)
|
29
|
-
@metric_events_time = @metric_events.counter(:duration_in_millis)
|
30
|
-
namespaced_metric.gauge(:name, config_name)
|
31
|
-
|
32
|
-
# Not all the filters will do bufferings
|
33
|
-
define_flush_method if @filter.respond_to?(:flush)
|
34
|
-
end
|
35
|
-
|
36
|
-
def config_name
|
37
|
-
@klass.config_name
|
38
|
-
end
|
39
|
-
|
40
|
-
def multi_filter(events)
|
41
|
-
@metric_events_in.increment(events.size)
|
42
|
-
|
43
|
-
start_time = java.lang.System.nano_time
|
44
|
-
new_events = @filter.multi_filter(events)
|
45
|
-
@metric_events_time.increment((java.lang.System.nano_time - start_time) / 1_000_000)
|
46
|
-
|
47
|
-
# There is no guarantee in the context of filter
|
48
|
-
# that EVENTS_IN == EVENTS_OUT, see the aggregates and
|
49
|
-
# the split filter
|
50
|
-
c = new_events.count { |event| !event.cancelled? }
|
51
|
-
@metric_events_out.increment(c) if c > 0
|
52
|
-
new_events
|
53
|
-
end
|
54
|
-
|
55
|
-
private
|
56
|
-
def define_flush_method
|
57
|
-
define_singleton_method(:flush) do |options = {}|
|
58
|
-
# we also need to trace the number of events
|
59
|
-
# coming from a specific filters.
|
60
|
-
new_events = @filter.flush(options)
|
61
|
-
|
62
|
-
# Filter plugins that does buffering or spooling of events like the
|
63
|
-
# `Logstash-filter-aggregates` can return `NIL` and will flush on the next flush ticks.
|
64
|
-
@metric_events_out.increment(new_events.size) if new_events && new_events.size > 0
|
65
|
-
new_events
|
66
|
-
end
|
67
|
-
end
|
68
|
-
end
|
69
|
-
end
|
1
|
+
# The contents of this file have been ported to Java. It is included for for compatibility
|
2
|
+
# with plugins that directly include it.
|
@@ -20,8 +20,8 @@ java_import 'org.logstash.instrument.reports.ProcessReport'
|
|
20
20
|
module LogStash module Instrument module PeriodicPoller
|
21
21
|
class JVM < Base
|
22
22
|
class GarbageCollectorName
|
23
|
-
YOUNG_GC_NAMES = Set.new(["Copy", "PS Scavenge", "ParNew", "G1 Young Generation", "scavenge"])
|
24
|
-
OLD_GC_NAMES = Set.new(["MarkSweepCompact", "PS MarkSweep", "ConcurrentMarkSweep", "G1 Old Generation", "global"])
|
23
|
+
YOUNG_GC_NAMES = Set.new(["Copy", "PS Scavenge", "ParNew", "G1 Young Generation", "scavenge", "GPGC New"])
|
24
|
+
OLD_GC_NAMES = Set.new(["MarkSweepCompact", "PS MarkSweep", "ConcurrentMarkSweep", "G1 Old Generation", "global", "GPGC Old"])
|
25
25
|
|
26
26
|
YOUNG = :young
|
27
27
|
OLD = :old
|
@@ -68,7 +68,7 @@ module LogStash module Instrument module PeriodicPoller
|
|
68
68
|
logger.debug("collector name", :name => collector_name)
|
69
69
|
name = GarbageCollectorName.get(collector_name)
|
70
70
|
if name.nil?
|
71
|
-
logger.error("Unknown garbage collector name", :name =>
|
71
|
+
logger.error("Unknown garbage collector name", :name => collector_name)
|
72
72
|
else
|
73
73
|
metric.gauge([:jvm, :gc, :collectors, name], :collection_count, collector.getCollectionCount())
|
74
74
|
metric.gauge([:jvm, :gc, :collectors, name], :collection_time_in_millis, collector.getCollectionTime())
|
@@ -26,6 +26,8 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
26
26
|
|
27
27
|
@worker_threads = []
|
28
28
|
|
29
|
+
@java_inputs_controller = org.logstash.execution.InputsController.new(lir_execution.javaInputs)
|
30
|
+
|
29
31
|
@drain_queue = settings.get_value("queue.drain") || settings.get("queue.type") == "memory"
|
30
32
|
|
31
33
|
@events_filtered = java.util.concurrent.atomic.LongAdder.new
|
@@ -216,11 +218,11 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
216
218
|
|
217
219
|
pipeline_workers.times do |t|
|
218
220
|
thread = Thread.new do
|
221
|
+
Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
|
219
222
|
org.logstash.execution.WorkerLoop.new(
|
220
223
|
lir_execution, filter_queue_client, @events_filtered, @events_consumed,
|
221
224
|
@flushRequested, @flushing, @shutdownRequested, @drain_queue).run
|
222
225
|
end
|
223
|
-
Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
|
224
226
|
@worker_threads << thread
|
225
227
|
end
|
226
228
|
|
@@ -242,6 +244,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
242
244
|
|
243
245
|
def wait_inputs
|
244
246
|
@input_threads.each(&:join)
|
247
|
+
@java_inputs_controller.awaitStop
|
245
248
|
end
|
246
249
|
|
247
250
|
def start_inputs
|
@@ -260,6 +263,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
260
263
|
|
261
264
|
# then after all input plugins are successfully registered, start them
|
262
265
|
inputs.each { |input| start_input(input) }
|
266
|
+
@java_inputs_controller.startInputs(self)
|
263
267
|
end
|
264
268
|
|
265
269
|
def start_input(plugin)
|
@@ -325,6 +329,7 @@ module LogStash; class JavaPipeline < JavaBasePipeline
|
|
325
329
|
def stop_inputs
|
326
330
|
@logger.debug("Closing inputs", default_logging_keys)
|
327
331
|
inputs.each(&:do_stop)
|
332
|
+
@java_inputs_controller.stopInputs
|
328
333
|
@logger.debug("Closed inputs", default_logging_keys)
|
329
334
|
end
|
330
335
|
|
data/lib/logstash/pipeline.rb
CHANGED
@@ -283,9 +283,9 @@ module LogStash; class Pipeline < BasePipeline
|
|
283
283
|
|
284
284
|
pipeline_workers.times do |t|
|
285
285
|
thread = Thread.new(batch_size, batch_delay, self) do |_b_size, _b_delay, _pipeline|
|
286
|
+
Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
|
286
287
|
_pipeline.worker_loop(_b_size, _b_delay)
|
287
288
|
end
|
288
|
-
Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
|
289
289
|
@worker_threads << thread
|
290
290
|
end
|
291
291
|
|
@@ -262,11 +262,14 @@ module LogStash module Plugins
|
|
262
262
|
# @param name [String] plugin name
|
263
263
|
# @return [Boolean] true if klass is a valid plugin for name
|
264
264
|
def is_a_plugin?(klass, name)
|
265
|
-
klass.
|
265
|
+
(klass.class == Java::JavaClass && klass.simple_name.downcase == name.gsub('_','')) ||
|
266
|
+
(klass.ancestors.include?(LogStash::Plugin) && klass.respond_to?(:config_name) && klass.config_name == name)
|
266
267
|
end
|
267
268
|
|
268
269
|
def add_plugin(type, name, klass)
|
269
|
-
if
|
270
|
+
if klass.respond_to?("javaClass", true)
|
271
|
+
@registry[key_for(type, name)] = PluginSpecification.new(type, name, klass.javaClass)
|
272
|
+
elsif !exists?(type, name)
|
270
273
|
specification_klass = type == :universal ? UniversalPluginSpecification : PluginSpecification
|
271
274
|
@registry[key_for(type, name)] = specification_klass.new(type, name, klass)
|
272
275
|
else
|
@@ -5,7 +5,7 @@ module LogStash
|
|
5
5
|
module Util
|
6
6
|
class ThreadDump
|
7
7
|
SKIPPED_THREADS = [ "Finalizer", "Reference Handler", "Signal Dispatcher" ].freeze
|
8
|
-
THREADS_COUNT_DEFAULT =
|
8
|
+
THREADS_COUNT_DEFAULT = 10.freeze
|
9
9
|
IGNORE_IDLE_THREADS_DEFAULT = true.freeze
|
10
10
|
|
11
11
|
attr_reader :top_count, :ignore, :dump
|
data/locales/en.yml
CHANGED
@@ -298,7 +298,7 @@ en:
|
|
298
298
|
pipeline-workers: |+
|
299
299
|
Sets the number of pipeline workers to run.
|
300
300
|
java-execution: |+
|
301
|
-
|
301
|
+
Use Java execution engine.
|
302
302
|
pipeline-batch-size: |+
|
303
303
|
Size of batches the pipeline is to work in.
|
304
304
|
pipeline-batch-delay: |+
|
@@ -14,7 +14,7 @@ describe LogStash::WrappedAckedQueue, :stress_test => true do
|
|
14
14
|
let(:reject_memo_keys) { [:reject_memo_keys, :path, :queue, :writer_threads, :collector, :metric, :reader_threads, :output_strings] }
|
15
15
|
|
16
16
|
let(:queue) do
|
17
|
-
described_class.new(path, page_capacity, 0, queue_checkpoint_acks, queue_checkpoint_writes, queue_checkpoint_interval, queue_capacity)
|
17
|
+
described_class.new(path, page_capacity, 0, queue_checkpoint_acks, queue_checkpoint_writes, queue_checkpoint_interval, false, queue_capacity)
|
18
18
|
end
|
19
19
|
|
20
20
|
let(:writer_threads) do
|
@@ -51,7 +51,7 @@ describe LogStash::FilterDelegator do
|
|
51
51
|
end
|
52
52
|
|
53
53
|
it "defines a flush method" do
|
54
|
-
expect(subject.
|
54
|
+
expect(subject.has_flush).to be_truthy
|
55
55
|
end
|
56
56
|
|
57
57
|
context "when the flush return events" do
|
@@ -128,7 +128,7 @@ describe LogStash::FilterDelegator do
|
|
128
128
|
end
|
129
129
|
|
130
130
|
it "doesnt define a flush method" do
|
131
|
-
expect(subject.
|
131
|
+
expect(subject.has_flush).to be_falsey
|
132
132
|
end
|
133
133
|
|
134
134
|
it "increments the in/out of the metric" do
|
@@ -145,14 +145,4 @@ describe LogStash::FilterDelegator do
|
|
145
145
|
end
|
146
146
|
end
|
147
147
|
|
148
|
-
context "delegate methods to the original plugin" do
|
149
|
-
# I am not testing the behavior of these methods
|
150
|
-
# this is done in the plugin tests. I just want to make sure
|
151
|
-
# the proxy delegates the methods.
|
152
|
-
LogStash::FilterDelegator::DELEGATED_METHODS.each do |method|
|
153
|
-
it "delegate method: `#{method}` to the filter" do
|
154
|
-
expect(subject.respond_to?(method))
|
155
|
-
end
|
156
|
-
end
|
157
|
-
end
|
158
148
|
end
|
@@ -110,7 +110,7 @@ describe LogStash::WrappedWriteClient do
|
|
110
110
|
|
111
111
|
context "WrappedAckedQueue" do
|
112
112
|
let(:path) { Stud::Temporary.directory }
|
113
|
-
let(:queue) { LogStash::WrappedAckedQueue.new(path, 1024, 10, 1024, 1024, 1024, 4096) }
|
113
|
+
let(:queue) { LogStash::WrappedAckedQueue.new(path, 1024, 10, 1024, 1024, 1024, false, 4096) }
|
114
114
|
|
115
115
|
before do
|
116
116
|
read_client.set_events_metric(metric.namespace([:stats, :events]))
|
@@ -6,7 +6,7 @@ require "support/shared_contexts"
|
|
6
6
|
|
7
7
|
java_import org.logstash.RubyUtil
|
8
8
|
|
9
|
-
describe LogStash::
|
9
|
+
describe LogStash::FilterDelegator do
|
10
10
|
|
11
11
|
class MockGauge
|
12
12
|
def increment(_)
|
@@ -182,14 +182,4 @@ describe LogStash::JavaFilterDelegator do
|
|
182
182
|
end
|
183
183
|
end
|
184
184
|
|
185
|
-
context "delegate methods to the original plugin" do
|
186
|
-
# I am not testing the behavior of these methods
|
187
|
-
# this is done in the plugin tests. I just want to make sure
|
188
|
-
# the proxy delegates the methods.
|
189
|
-
LogStash::FilterDelegator::DELEGATED_METHODS.each do |method|
|
190
|
-
it "delegate method: `#{method}` to the filter" do
|
191
|
-
expect(subject.respond_to?(method))
|
192
|
-
end
|
193
|
-
end
|
194
|
-
end
|
195
185
|
end
|
@@ -14,6 +14,7 @@ describe LogStash::QueueFactory do
|
|
14
14
|
LogStash::Setting::Numeric.new("queue.checkpoint.acks", 1024),
|
15
15
|
LogStash::Setting::Numeric.new("queue.checkpoint.writes", 1024),
|
16
16
|
LogStash::Setting::Numeric.new("queue.checkpoint.interval", 1000),
|
17
|
+
LogStash::Setting::Boolean.new("queue.checkpoint.retry", false),
|
17
18
|
LogStash::Setting::String.new("pipeline.id", pipeline_id),
|
18
19
|
LogStash::Setting::PositiveInteger.new("pipeline.batch.size", 125),
|
19
20
|
LogStash::Setting::PositiveInteger.new("pipeline.workers", LogStash::Config::CpuCoreStrategy.maximum)
|
@@ -38,7 +38,7 @@ describe LogStash::WrappedAckedQueue do
|
|
38
38
|
let(:checkpoint_writes) { 1024 }
|
39
39
|
let(:checkpoint_interval) { 0 }
|
40
40
|
let(:path) { Stud::Temporary.directory }
|
41
|
-
let(:queue) { LogStash::WrappedAckedQueue.new(path, page_capacity, max_events, checkpoint_acks, checkpoint_writes, checkpoint_interval, max_bytes) }
|
41
|
+
let(:queue) { LogStash::WrappedAckedQueue.new(path, page_capacity, max_events, checkpoint_acks, checkpoint_writes, checkpoint_interval, false, max_bytes) }
|
42
42
|
|
43
43
|
after do
|
44
44
|
queue.close
|
data/versions-gem-copy.yml
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-core
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 6.
|
4
|
+
version: 6.6.0
|
5
5
|
platform: java
|
6
6
|
authors:
|
7
7
|
- Elastic
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2019-01-24 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|