logstash-core 5.4.0-java → 5.4.1-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/logstash-core/logstash-core.jar +0 -0
- data/lib/logstash-core/version.rb +1 -1
- data/lib/logstash/api/commands/hot_threads_reporter.rb +1 -1
- data/lib/logstash/runner.rb +1 -1
- data/lib/logstash/version.rb +1 -1
- data/spec/logstash/agent_spec.rb +20 -88
- data/spec/logstash/instrument/wrapped_write_client_spec.rb +34 -19
- data/spec/logstash/pipeline_spec.rb +29 -17
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1a5a0bad713dd49bae31f5ce35302a518688e034
|
4
|
+
data.tar.gz: fce1138b7e1a930dae5e232bff1bbe74afc505e0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 27455700bac45bcf838b065c0fa716299f6167d1f70b658f31e2a5cedf6c678e4be7dcacdb7aba04a2cfc78c2c92850c46a93f14ac120d48a369940f054c3e18
|
7
|
+
data.tar.gz: 30278142ebf380fc9e1254652e028ed31d24902a65ab2b363614978a7b6193104b4f40467f964af189cc5396779bd7da30de11c13cac897081b012cc6ba0f3d1
|
Binary file
|
data/lib/logstash/runner.rb
CHANGED
@@ -202,7 +202,7 @@ class LogStash::Runner < Clamp::StrictCommand
|
|
202
202
|
java.lang.System.setProperty("ls.log.level", setting("log.level"))
|
203
203
|
unless java.lang.System.getProperty("log4j.configurationFile")
|
204
204
|
log4j_config_location = ::File.join(setting("path.settings"), "log4j2.properties")
|
205
|
-
LogStash::Logging::Logger::initialize("file
|
205
|
+
LogStash::Logging::Logger::initialize("file:///" + log4j_config_location)
|
206
206
|
end
|
207
207
|
# override log level that may have been introduced from a custom log4j config file
|
208
208
|
LogStash::Logging::Logger::configure_logging(setting("log.level"))
|
data/lib/logstash/version.rb
CHANGED
data/spec/logstash/agent_spec.rb
CHANGED
@@ -15,6 +15,7 @@ describe LogStash::Agent do
|
|
15
15
|
let(:pipeline_args) { {} }
|
16
16
|
let(:config_file) { Stud::Temporary.pathname }
|
17
17
|
let(:config_file_txt) { "input { generator { count => 100000 } } output { }" }
|
18
|
+
let(:logger) { double("logger") }
|
18
19
|
|
19
20
|
subject { LogStash::Agent.new(agent_settings) }
|
20
21
|
|
@@ -30,6 +31,9 @@ describe LogStash::Agent do
|
|
30
31
|
pipeline_args.each do |key, value|
|
31
32
|
pipeline_settings.set(key, value)
|
32
33
|
end
|
34
|
+
allow(described_class).to receive(:logger).and_return(logger)
|
35
|
+
[:debug, :info, :error, :warn, :fatal, :trace].each {|level| allow(logger).to receive(level) }
|
36
|
+
[:debug?, :info?, :error?, :warn?, :fatal?, :trace?].each {|level| allow(logger).to receive(level) }
|
33
37
|
end
|
34
38
|
|
35
39
|
after :each do
|
@@ -134,55 +138,12 @@ describe LogStash::Agent do
|
|
134
138
|
expect(subject).to_not receive(:reload_state!)
|
135
139
|
t = Thread.new { subject.execute }
|
136
140
|
|
137
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
138
|
-
# a bad test design or missing class functionality.
|
139
|
-
sleep(0.1)
|
140
141
|
Stud.stop!(t)
|
141
142
|
t.join
|
142
143
|
subject.shutdown
|
143
144
|
end
|
144
145
|
end
|
145
146
|
|
146
|
-
context "when calling reload_pipeline!" do
|
147
|
-
context "with a config that contains reload incompatible plugins" do
|
148
|
-
let(:second_pipeline_config) { "input { stdin {} } filter { } output { }" }
|
149
|
-
|
150
|
-
it "does not upgrade the new config" do
|
151
|
-
t = Thread.new { subject.execute }
|
152
|
-
sleep(0.1) until subject.running_pipelines? && subject.pipelines.values.first.ready?
|
153
|
-
expect(subject).to_not receive(:upgrade_pipeline)
|
154
|
-
File.open(config_file, "w") { |f| f.puts second_pipeline_config }
|
155
|
-
subject.send(:"reload_pipeline!", "main")
|
156
|
-
|
157
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
158
|
-
# a bad test design or missing class functionality.
|
159
|
-
sleep(0.1)
|
160
|
-
Stud.stop!(t)
|
161
|
-
t.join
|
162
|
-
subject.shutdown
|
163
|
-
end
|
164
|
-
end
|
165
|
-
|
166
|
-
context "with a config that does not contain reload incompatible plugins" do
|
167
|
-
let(:second_pipeline_config) { "input { generator { } } filter { } output { }" }
|
168
|
-
|
169
|
-
it "does upgrade the new config" do
|
170
|
-
t = Thread.new { subject.execute }
|
171
|
-
sleep(0.1) until subject.running_pipelines? && subject.pipelines.values.first.ready?
|
172
|
-
expect(subject).to receive(:upgrade_pipeline).once.and_call_original
|
173
|
-
File.open(config_file, "w") { |f| f.puts second_pipeline_config }
|
174
|
-
subject.send(:"reload_pipeline!", "main")
|
175
|
-
|
176
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
177
|
-
# a bad test design or missing class functionality.
|
178
|
-
sleep(0.1)
|
179
|
-
Stud.stop!(t)
|
180
|
-
t.join
|
181
|
-
subject.shutdown
|
182
|
-
end
|
183
|
-
end
|
184
|
-
|
185
|
-
end
|
186
147
|
context "when calling reload_state!" do
|
187
148
|
context "with a pipeline with auto reloading turned off" do
|
188
149
|
let(:second_pipeline_config) { "input { generator { } } filter { } output { }" }
|
@@ -197,7 +158,6 @@ describe LogStash::Agent do
|
|
197
158
|
|
198
159
|
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
199
160
|
# a bad test design or missing class functionality.
|
200
|
-
sleep(0.1)
|
201
161
|
Stud.stop!(t)
|
202
162
|
t.join
|
203
163
|
subject.shutdown
|
@@ -215,9 +175,6 @@ describe LogStash::Agent do
|
|
215
175
|
File.open(config_file, "w") { |f| f.puts second_pipeline_config }
|
216
176
|
subject.reload_state!
|
217
177
|
|
218
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
219
|
-
# a bad test design or missing class functionality.
|
220
|
-
sleep(0.1)
|
221
178
|
Stud.stop!(t)
|
222
179
|
t.join
|
223
180
|
subject.shutdown
|
@@ -244,60 +201,25 @@ describe LogStash::Agent do
|
|
244
201
|
allow(subject).to receive(:clean_state?).and_return(false)
|
245
202
|
t = Thread.new { subject.execute }
|
246
203
|
sleep(0.01) until subject.running_pipelines? && subject.pipelines.values.first.running?
|
204
|
+
|
247
205
|
expect(subject).to receive(:reload_state!).at_least(2).times
|
248
206
|
|
249
|
-
|
250
|
-
|
251
|
-
sleep(0.1)
|
207
|
+
sleep 1
|
208
|
+
|
252
209
|
Stud.stop!(t)
|
253
210
|
t.join
|
254
211
|
subject.shutdown
|
255
212
|
end
|
256
213
|
end
|
214
|
+
end
|
257
215
|
|
258
|
-
|
259
|
-
context "with a config that contains reload incompatible plugins" do
|
260
|
-
let(:second_pipeline_config) { "input { stdin {} } filter { } output { }" }
|
261
|
-
|
262
|
-
it "does not upgrade the new config" do
|
263
|
-
t = Thread.new { subject.execute }
|
264
|
-
sleep(0.01) until subject.running_pipelines? && subject.pipelines.values.first.running?
|
265
|
-
expect(subject).to_not receive(:upgrade_pipeline)
|
266
|
-
File.open(config_file, "w") { |f| f.puts second_pipeline_config }
|
267
|
-
|
268
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
269
|
-
# a bad test design or missing class functionality.
|
270
|
-
sleep(0.1)
|
271
|
-
Stud.stop!(t)
|
272
|
-
t.join
|
273
|
-
subject.shutdown
|
274
|
-
end
|
275
|
-
end
|
276
|
-
|
277
|
-
context "with a config that does not contain reload incompatible plugins" do
|
278
|
-
let(:second_pipeline_config) { "input { generator { } } filter { } output { }" }
|
279
|
-
|
280
|
-
it "does upgrade the new config" do
|
281
|
-
t = Thread.new { subject.execute }
|
282
|
-
sleep(0.01) until subject.running_pipelines? && subject.pipelines.values.first.running?
|
283
|
-
expect(subject).to receive(:upgrade_pipeline).once.and_call_original
|
284
|
-
File.open(config_file, "w") { |f| f.puts second_pipeline_config }
|
285
|
-
|
286
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
287
|
-
# a bad test design or missing class functionality.
|
288
|
-
sleep(0.1)
|
289
|
-
Stud.stop!(t)
|
290
|
-
t.join
|
291
|
-
subject.shutdown
|
292
|
-
end
|
293
|
-
end
|
294
|
-
end
|
216
|
+
context "when calling reload_state!" do
|
295
217
|
end
|
296
218
|
end
|
297
219
|
|
298
220
|
describe "#reload_state!" do
|
299
221
|
let(:first_pipeline_config) { "input { } filter { } output { }" }
|
300
|
-
let(:second_pipeline_config) { "input { generator {} } filter { } output { }" }
|
222
|
+
let(:second_pipeline_config) { "input { generator { count => 10000 } } filter { } output { }" }
|
301
223
|
let(:pipeline_args) { {
|
302
224
|
"config.string" => first_pipeline_config,
|
303
225
|
"pipeline.workers" => 4,
|
@@ -326,6 +248,16 @@ describe LogStash::Agent do
|
|
326
248
|
subject.reload_state!
|
327
249
|
end
|
328
250
|
end
|
251
|
+
|
252
|
+
context "with a config that contains reload incompatible plugins" do
|
253
|
+
let(:second_pipeline_config) { "input { stdin {} } filter { } output { }" }
|
254
|
+
|
255
|
+
it "does not upgrade the new config" do
|
256
|
+
expect(subject).to receive(:fetch_config).and_return(second_pipeline_config)
|
257
|
+
expect(subject).to_not receive(:upgrade_pipeline)
|
258
|
+
subject.reload_state!
|
259
|
+
end
|
260
|
+
end
|
329
261
|
end
|
330
262
|
|
331
263
|
describe "Environment Variables In Configs" do
|
@@ -7,8 +7,8 @@ require_relative "../../support/mocks_classes"
|
|
7
7
|
require "spec_helper"
|
8
8
|
|
9
9
|
describe LogStash::Instrument::WrappedWriteClient do
|
10
|
-
let(:write_client) { queue.write_client }
|
11
|
-
let(:read_client) { queue.read_client }
|
10
|
+
let!(:write_client) { queue.write_client }
|
11
|
+
let!(:read_client) { queue.read_client }
|
12
12
|
let(:pipeline) { double("pipeline", :pipeline_id => :main) }
|
13
13
|
let(:collector) { LogStash::Instrument::Collector.new }
|
14
14
|
let(:metric) { LogStash::Instrument::Metric.new(collector) }
|
@@ -18,39 +18,54 @@ describe LogStash::Instrument::WrappedWriteClient do
|
|
18
18
|
|
19
19
|
subject { described_class.new(write_client, pipeline, metric, plugin) }
|
20
20
|
|
21
|
+
def threaded_read_client
|
22
|
+
Thread.new do
|
23
|
+
started_at = Time.now
|
24
|
+
|
25
|
+
batch_size = 0
|
26
|
+
loop {
|
27
|
+
if Time.now - started_at > 60
|
28
|
+
raise "Took too much time to read from the queue"
|
29
|
+
end
|
30
|
+
batch_size = read_client.read_batch.size
|
31
|
+
|
32
|
+
break if batch_size > 0
|
33
|
+
}
|
34
|
+
expect(batch_size).to eq(1)
|
35
|
+
end
|
36
|
+
end
|
21
37
|
|
22
38
|
shared_examples "queue tests" do
|
23
39
|
it "pushes single event to the `WriteClient`" do
|
24
|
-
|
25
|
-
|
40
|
+
pusher_thread = Thread.new(subject, event) do |_subject, _event|
|
41
|
+
_subject.push(_event)
|
26
42
|
end
|
27
|
-
|
28
|
-
|
29
|
-
|
43
|
+
|
44
|
+
reader_thread = threaded_read_client
|
45
|
+
|
46
|
+
[pusher_thread, reader_thread].collect(&:join)
|
30
47
|
end
|
31
48
|
|
32
49
|
it "pushes batch to the `WriteClient`" do
|
33
50
|
batch = write_client.get_new_batch
|
34
51
|
batch << event
|
35
52
|
|
36
|
-
|
37
|
-
|
53
|
+
pusher_thread = Thread.new(subject, batch) do |_subject, _batch|
|
54
|
+
_subject.push_batch(_batch)
|
38
55
|
end
|
39
56
|
|
40
|
-
|
41
|
-
|
42
|
-
t.kill rescue nil
|
57
|
+
reader_thread = threaded_read_client
|
58
|
+
[pusher_thread, reader_thread].collect(&:join)
|
43
59
|
end
|
44
60
|
|
45
61
|
context "recorded metrics" do
|
46
62
|
before do
|
47
|
-
|
48
|
-
|
63
|
+
pusher_thread = Thread.new(subject, event) do |_subject, _event|
|
64
|
+
_subject.push(_event)
|
49
65
|
end
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
t.kill rescue nil
|
66
|
+
|
67
|
+
reader_thread = threaded_read_client
|
68
|
+
[pusher_thread, reader_thread].collect(&:join)
|
54
69
|
end
|
55
70
|
|
56
71
|
let(:snapshot_store) { collector.snapshot_metric.metric_store }
|
@@ -97,7 +112,7 @@ describe LogStash::Instrument::WrappedWriteClient do
|
|
97
112
|
end
|
98
113
|
|
99
114
|
context "AckedMemoryQueue" do
|
100
|
-
let(:queue) { LogStash::Util::WrappedAckedQueue.create_memory_based("", 1024, 10,
|
115
|
+
let(:queue) { LogStash::Util::WrappedAckedQueue.create_memory_based("", 1024, 10, 4096) }
|
101
116
|
|
102
117
|
before do
|
103
118
|
read_client.set_events_metric(metric.namespace([:stats, :events]))
|
@@ -80,6 +80,21 @@ class DummySafeFilter < LogStash::Filters::Base
|
|
80
80
|
def close() end
|
81
81
|
end
|
82
82
|
|
83
|
+
class DummyFlushingFilter < LogStash::Filters::Base
|
84
|
+
config_name "dummyflushingfilter"
|
85
|
+
milestone 2
|
86
|
+
|
87
|
+
def register() end
|
88
|
+
def filter(event) end
|
89
|
+
def periodic_flush
|
90
|
+
true
|
91
|
+
end
|
92
|
+
def flush(options)
|
93
|
+
return [::LogStash::Event.new("message" => "dummy_flush")]
|
94
|
+
end
|
95
|
+
def close() end
|
96
|
+
end
|
97
|
+
|
83
98
|
class TestPipeline < LogStash::Pipeline
|
84
99
|
attr_reader :outputs, :settings
|
85
100
|
end
|
@@ -250,6 +265,7 @@ describe LogStash::Pipeline do
|
|
250
265
|
}
|
251
266
|
|
252
267
|
it "starts multiple filter threads" do
|
268
|
+
skip("This test has been failing periodically since November 2016. Tracked as https://github.com/elastic/logstash/issues/6245")
|
253
269
|
pipeline = TestPipeline.new(test_config_with_filters)
|
254
270
|
pipeline.run
|
255
271
|
expect(pipeline.worker_threads.size).to eq(worker_thread_count)
|
@@ -564,23 +580,16 @@ describe LogStash::Pipeline do
|
|
564
580
|
end
|
565
581
|
|
566
582
|
context "Periodic Flush" do
|
567
|
-
let(:number_of_events) { 100 }
|
568
583
|
let(:config) do
|
569
584
|
<<-EOS
|
570
585
|
input {
|
571
|
-
|
572
|
-
count => #{number_of_events}
|
573
|
-
}
|
586
|
+
dummy_input {}
|
574
587
|
}
|
575
588
|
filter {
|
576
|
-
|
577
|
-
pattern => "^NeverMatch"
|
578
|
-
negate => true
|
579
|
-
what => "previous"
|
580
|
-
}
|
589
|
+
dummy_flushing_filter {}
|
581
590
|
}
|
582
591
|
output {
|
583
|
-
|
592
|
+
dummy_output {}
|
584
593
|
}
|
585
594
|
EOS
|
586
595
|
end
|
@@ -588,24 +597,27 @@ describe LogStash::Pipeline do
|
|
588
597
|
|
589
598
|
before do
|
590
599
|
allow(::LogStash::Outputs::DummyOutput).to receive(:new).with(any_args).and_return(output)
|
591
|
-
allow(LogStash::Plugin).to receive(:lookup).with("input", "
|
600
|
+
allow(LogStash::Plugin).to receive(:lookup).with("input", "dummy_input").and_return(DummyInput)
|
601
|
+
allow(LogStash::Plugin).to receive(:lookup).with("filter", "dummy_flushing_filter").and_return(DummyFlushingFilter)
|
602
|
+
allow(LogStash::Plugin).to receive(:lookup).with("output", "dummy_output").and_return(::LogStash::Outputs::DummyOutput)
|
592
603
|
allow(LogStash::Plugin).to receive(:lookup).with("codec", "plain").and_return(LogStash::Codecs::Plain)
|
593
|
-
allow(LogStash::Plugin).to receive(:lookup).with("filter", "multiline").and_return(LogStash::Filters::Multiline)
|
594
|
-
allow(LogStash::Plugin).to receive(:lookup).with("output", "dummyoutput").and_return(::LogStash::Outputs::DummyOutput)
|
595
604
|
end
|
596
605
|
|
597
|
-
it "
|
606
|
+
it "flush periodically" do
|
598
607
|
Thread.abort_on_exception = true
|
608
|
+
|
599
609
|
pipeline = LogStash::Pipeline.new(config, pipeline_settings_obj)
|
600
610
|
t = Thread.new { pipeline.run }
|
601
611
|
sleep(0.1) until pipeline.ready?
|
602
|
-
wait(
|
612
|
+
wait(10).for do
|
603
613
|
# give us a bit of time to flush the events
|
604
614
|
output.events.empty?
|
605
615
|
end.to be_falsey
|
606
|
-
|
607
|
-
expect(
|
616
|
+
|
617
|
+
expect(output.events.any? {|e| e.get("message") == "dummy_flush"}).to eq(true)
|
618
|
+
|
608
619
|
pipeline.shutdown
|
620
|
+
|
609
621
|
t.join
|
610
622
|
end
|
611
623
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: logstash-core
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 5.4.
|
4
|
+
version: 5.4.1
|
5
5
|
platform: java
|
6
6
|
authors:
|
7
7
|
- Elastic
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2017-
|
11
|
+
date: 2017-05-29 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
requirement: !ruby/object:Gem::Requirement
|