logstash-core 7.0.0.alpha2-java → 7.0.0.beta1-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/logstash/agent.rb +62 -57
- data/lib/logstash/compiler/lscl.rb +2 -3
- data/lib/logstash/config/config_ast.rb +59 -17
- data/lib/logstash/environment.rb +1 -1
- data/lib/logstash/instrument/metric_store.rb +1 -1
- data/lib/logstash/instrument/periodic_poller/dlq.rb +5 -7
- data/lib/logstash/instrument/periodic_poller/pq.rb +6 -8
- data/lib/logstash/instrument/periodic_pollers.rb +3 -3
- data/lib/logstash/java_pipeline.rb +36 -15
- data/lib/logstash/patches/resolv.rb +0 -21
- data/lib/logstash/pipeline.rb +27 -10
- data/lib/logstash/pipeline_action/base.rb +1 -1
- data/lib/logstash/pipeline_action/create.rb +7 -13
- data/lib/logstash/pipeline_action/reload.rb +35 -12
- data/lib/logstash/pipeline_action/stop.rb +4 -6
- data/lib/logstash/pipeline_settings.rb +1 -1
- data/lib/logstash/pipelines_registry.rb +166 -0
- data/lib/logstash/settings.rb +5 -5
- data/lib/logstash/state_resolver.rb +5 -5
- data/lib/logstash/util/duration_formatter.rb +1 -1
- data/lib/logstash/util/safe_uri.rb +1 -0
- data/lib/logstash/util.rb +11 -1
- data/locales/en.yml +1 -1
- data/logstash-core.gemspec +17 -20
- data/spec/logstash/acked_queue_concurrent_stress_spec.rb +1 -1
- data/spec/logstash/agent/converge_spec.rb +25 -31
- data/spec/logstash/agent_spec.rb +5 -5
- data/spec/logstash/event_spec.rb +2 -2
- data/spec/logstash/instrument/wrapped_write_client_spec.rb +1 -1
- data/spec/logstash/legacy_ruby_event_spec.rb +6 -5
- data/spec/logstash/pipeline_action/create_spec.rb +9 -8
- data/spec/logstash/pipeline_action/reload_spec.rb +10 -9
- data/spec/logstash/pipeline_action/stop_spec.rb +4 -3
- data/spec/logstash/pipelines_registry_spec.rb +220 -0
- data/spec/logstash/queue_factory_spec.rb +2 -1
- data/spec/logstash/runner_spec.rb +2 -0
- data/spec/logstash/settings/array_coercible_spec.rb +1 -1
- data/spec/logstash/settings/bytes_spec.rb +2 -2
- data/spec/logstash/settings/port_range_spec.rb +1 -1
- data/spec/logstash/state_resolver_spec.rb +26 -22
- data/spec/logstash/util/safe_uri_spec.rb +40 -0
- data/spec/logstash/util/time_value_spec.rb +1 -1
- data/spec/logstash/util/wrapped_acked_queue_spec.rb +1 -1
- data/spec/support/matchers.rb +25 -19
- data/spec/support/shared_contexts.rb +3 -3
- data/versions-gem-copy.yml +6 -6
- metadata +73 -88
@@ -49,7 +49,7 @@ describe LogStash::Agent do
|
|
49
49
|
|
50
50
|
context "system pipeline" do
|
51
51
|
|
52
|
-
let(:system_pipeline_config) { mock_pipeline_config(:system_pipeline, "input {
|
52
|
+
let(:system_pipeline_config) { mock_pipeline_config(:system_pipeline, "input { dummyblockinginput { } } output { null {} }", { "pipeline.system" => true }) }
|
53
53
|
|
54
54
|
context "when we have a finite pipeline and a system pipeline running" do
|
55
55
|
|
@@ -65,40 +65,40 @@ describe LogStash::Agent do
|
|
65
65
|
end
|
66
66
|
|
67
67
|
context "when we have an infinite pipeline and a system pipeline running" do
|
68
|
-
let(:infinite_pipeline_config) { mock_pipeline_config(:main, "input {
|
68
|
+
let(:infinite_pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput { } } output { null {} }") }
|
69
69
|
|
70
70
|
let(:source_loader) do
|
71
71
|
TestSourceLoader.new(infinite_pipeline_config, system_pipeline_config)
|
72
72
|
end
|
73
73
|
|
74
74
|
before(:each) do
|
75
|
-
|
75
|
+
@agent_task = start_agent(subject)
|
76
76
|
end
|
77
77
|
|
78
78
|
after(:each) do
|
79
|
-
|
79
|
+
@agent_task.stop!
|
80
|
+
@agent_task.wait
|
81
|
+
subject.shutdown
|
80
82
|
end
|
81
83
|
|
82
84
|
describe "#running_user_defined_pipelines" do
|
83
85
|
it "returns the user defined pipelines" do
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
end
|
86
|
+
# wait is necessary to accommodate for pipelines startup time
|
87
|
+
wait(60).for {subject.running_user_defined_pipelines.keys}.to eq([:main])
|
88
|
+
end
|
88
89
|
end
|
89
90
|
|
90
91
|
describe "#running_user_defined_pipelines?" do
|
91
92
|
it "returns true" do
|
92
|
-
|
93
|
-
|
94
|
-
end.to be_truthy
|
93
|
+
# wait is necessary to accommodate for pipelines startup time
|
94
|
+
wait(60).for {subject.running_user_defined_pipelines?}.to be_truthy
|
95
95
|
end
|
96
96
|
end
|
97
97
|
end
|
98
98
|
end
|
99
99
|
|
100
100
|
context "when `config.reload.automatic`" do
|
101
|
-
let(:pipeline_config) { mock_pipeline_config(:main, "input {
|
101
|
+
let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput {} } output { null {} }") }
|
102
102
|
|
103
103
|
let(:source_loader) do
|
104
104
|
TestSourceLoader.new(pipeline_config)
|
@@ -114,14 +114,14 @@ describe LogStash::Agent do
|
|
114
114
|
|
115
115
|
after(:each) do
|
116
116
|
@agent_task.stop!
|
117
|
+
@agent_task.wait
|
118
|
+
subject.shutdown
|
117
119
|
end
|
118
120
|
|
119
121
|
it "converge only once" do
|
120
122
|
wait(60).for { source_loader.fetch_count }.to eq(1)
|
121
|
-
|
123
|
+
# no need to wait here because have_running_pipeline? does the wait
|
122
124
|
expect(subject).to have_running_pipeline?(pipeline_config)
|
123
|
-
|
124
|
-
subject.shutdown
|
125
125
|
end
|
126
126
|
end
|
127
127
|
|
@@ -135,8 +135,6 @@ describe LogStash::Agent do
|
|
135
135
|
|
136
136
|
expect(source_loader.fetch_count).to eq(1)
|
137
137
|
expect(subject.pipelines_count).to eq(0)
|
138
|
-
|
139
|
-
subject.shutdown
|
140
138
|
end
|
141
139
|
end
|
142
140
|
end
|
@@ -149,26 +147,25 @@ describe LogStash::Agent do
|
|
149
147
|
"config.reload.interval" => interval
|
150
148
|
)
|
151
149
|
end
|
150
|
+
|
152
151
|
before(:each) do
|
153
152
|
@agent_task = start_agent(subject)
|
154
153
|
end
|
155
154
|
|
156
155
|
after(:each) do
|
157
156
|
@agent_task.stop!
|
157
|
+
@agent_task.wait
|
158
|
+
subject.shutdown
|
158
159
|
end
|
159
160
|
|
160
161
|
context "and successfully load the config" do
|
161
162
|
it "converges periodically the pipelines from the configs source" do
|
162
|
-
|
163
|
+
# no need to wait here because have_running_pipeline? does the wait
|
163
164
|
expect(subject).to have_running_pipeline?(pipeline_config)
|
164
165
|
|
165
166
|
# we rely on a periodic thread to call fetch count, we have seen unreliable run on
|
166
167
|
# travis, so lets add a few retries
|
167
|
-
try
|
168
|
-
expect(source_loader.fetch_count).to be > 1
|
169
|
-
end
|
170
|
-
|
171
|
-
subject.shutdown
|
168
|
+
try { expect(source_loader.fetch_count).to be > 1 }
|
172
169
|
end
|
173
170
|
end
|
174
171
|
|
@@ -178,12 +175,9 @@ describe LogStash::Agent do
|
|
178
175
|
end
|
179
176
|
|
180
177
|
it "it will keep trying to converge" do
|
181
|
-
|
182
178
|
sleep(agent_settings.get("config.reload.interval") / 1_000_000_000.0 * 20) # let the interval reload a few times
|
183
179
|
expect(subject.pipelines_count).to eq(0)
|
184
180
|
expect(source_loader.fetch_count).to be > 1
|
185
|
-
|
186
|
-
subject.shutdown
|
187
181
|
end
|
188
182
|
end
|
189
183
|
end
|
@@ -191,8 +185,8 @@ describe LogStash::Agent do
|
|
191
185
|
end
|
192
186
|
|
193
187
|
context "when shutting down the agent" do
|
194
|
-
let(:pipeline_config) { mock_pipeline_config(:main, "input {
|
195
|
-
let(:new_pipeline_config) { mock_pipeline_config(:new, "input {
|
188
|
+
let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput {} } output { null {} }") }
|
189
|
+
let(:new_pipeline_config) { mock_pipeline_config(:new, "input { dummyblockinginput { id => 'new' } } output { null {} }") }
|
196
190
|
|
197
191
|
let(:source_loader) do
|
198
192
|
TestSourceLoader.new([pipeline_config, new_pipeline_config])
|
@@ -205,8 +199,8 @@ describe LogStash::Agent do
|
|
205
199
|
end
|
206
200
|
|
207
201
|
context "Configuration converge scenario" do
|
208
|
-
let(:pipeline_config) { mock_pipeline_config(:main, "input {
|
209
|
-
let(:new_pipeline_config) { mock_pipeline_config(:new, "input {
|
202
|
+
let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput {} } output { null {} }", { "pipeline.reloadable" => true }) }
|
203
|
+
let(:new_pipeline_config) { mock_pipeline_config(:new, "input { dummyblockinginput {} } output { null {} }", { "pipeline.reloadable" => true }) }
|
210
204
|
|
211
205
|
before do
|
212
206
|
# Set the Agent to an initial state of pipelines
|
@@ -263,7 +257,7 @@ describe LogStash::Agent do
|
|
263
257
|
end
|
264
258
|
|
265
259
|
context "when the source return a modified pipeline" do
|
266
|
-
let(:modified_pipeline_config) { mock_pipeline_config(:main, "input {
|
260
|
+
let(:modified_pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput { id => 'new-and-modified' } } output { null {} }", { "pipeline.reloadable" => true }) }
|
267
261
|
|
268
262
|
let(:source_loader) do
|
269
263
|
TestSequenceSourceLoader.new(
|
data/spec/logstash/agent_spec.rb
CHANGED
@@ -119,7 +119,7 @@ describe LogStash::Agent do
|
|
119
119
|
context "if state is clean" do
|
120
120
|
before :each do
|
121
121
|
allow(subject).to receive(:running_user_defined_pipelines?).and_return(true)
|
122
|
-
allow(subject).to receive(:
|
122
|
+
allow(subject).to receive(:no_pipeline?).and_return(false)
|
123
123
|
end
|
124
124
|
|
125
125
|
it "should not converge state more than once" do
|
@@ -142,7 +142,7 @@ describe LogStash::Agent do
|
|
142
142
|
it "does not upgrade the new config" do
|
143
143
|
t = Thread.new { subject.execute }
|
144
144
|
wait(timeout)
|
145
|
-
.for { subject.running_pipelines? && subject.
|
145
|
+
.for { subject.running_pipelines? && subject.running_pipelines.values.first.ready? }
|
146
146
|
.to eq(true)
|
147
147
|
expect(subject.converge_state_and_update).not_to be_a_successful_converge
|
148
148
|
expect(subject).to have_running_pipeline?(mock_config_pipeline)
|
@@ -162,7 +162,7 @@ describe LogStash::Agent do
|
|
162
162
|
it "does upgrade the new config" do
|
163
163
|
t = Thread.new { subject.execute }
|
164
164
|
Timeout.timeout(timeout) do
|
165
|
-
sleep(0.1) until subject.
|
165
|
+
sleep(0.1) until subject.running_pipelines_count > 0 && subject.running_pipelines.values.first.ready?
|
166
166
|
end
|
167
167
|
|
168
168
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
@@ -186,7 +186,7 @@ describe LogStash::Agent do
|
|
186
186
|
it "does not try to reload the pipeline" do
|
187
187
|
t = Thread.new { subject.execute }
|
188
188
|
Timeout.timeout(timeout) do
|
189
|
-
sleep(0.1) until subject.running_pipelines? && subject.
|
189
|
+
sleep(0.1) until subject.running_pipelines? && subject.running_pipelines.values.first.running?
|
190
190
|
end
|
191
191
|
expect(subject.converge_state_and_update).not_to be_a_successful_converge
|
192
192
|
expect(subject).to have_running_pipeline?(mock_config_pipeline)
|
@@ -206,7 +206,7 @@ describe LogStash::Agent do
|
|
206
206
|
it "tries to reload the pipeline" do
|
207
207
|
t = Thread.new { subject.execute }
|
208
208
|
Timeout.timeout(timeout) do
|
209
|
-
sleep(0.1) until subject.running_pipelines? && subject.
|
209
|
+
sleep(0.1) until subject.running_pipelines? && subject.running_pipelines.values.first.running?
|
210
210
|
end
|
211
211
|
|
212
212
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
data/spec/logstash/event_spec.rb
CHANGED
@@ -141,10 +141,10 @@ describe LogStash::Event do
|
|
141
141
|
expect(e.get("foo")).to eq(BigDecimal.new(1))
|
142
142
|
end
|
143
143
|
|
144
|
-
it "should set
|
144
|
+
it "should set RubyInteger" do
|
145
145
|
e = LogStash::Event.new()
|
146
146
|
e.set("[foo]", -9223372036854776000)
|
147
|
-
expect(e.get("foo")).to be_kind_of(
|
147
|
+
expect(e.get("foo")).to be_kind_of(Integer)
|
148
148
|
expect(e.get("foo")).to eq(-9223372036854776000)
|
149
149
|
end
|
150
150
|
|
@@ -110,7 +110,7 @@ describe LogStash::WrappedWriteClient do
|
|
110
110
|
|
111
111
|
context "WrappedAckedQueue" do
|
112
112
|
let(:path) { Stud::Temporary.directory }
|
113
|
-
let(:queue) { LogStash::WrappedAckedQueue.new(path, 1024, 10, 1024, 1024, 1024, 4096) }
|
113
|
+
let(:queue) { LogStash::WrappedAckedQueue.new(path, 1024, 10, 1024, 1024, 1024, false, 4096) }
|
114
114
|
|
115
115
|
before do
|
116
116
|
read_client.set_events_metric(metric.namespace([:stats, :events]))
|
@@ -82,7 +82,7 @@ describe LogStash::Event do
|
|
82
82
|
expect(event.get("reference_test")).not_to eq(data)
|
83
83
|
end
|
84
84
|
|
85
|
-
it "should not return
|
85
|
+
it "should not return an Integer reference" do
|
86
86
|
data = 1
|
87
87
|
event = LogStash::Event.new({ "reference" => data })
|
88
88
|
LogStash::Util::Decorators.add_fields({"reference_test" => "%{reference}"}, event, "dummy-plugin")
|
@@ -98,11 +98,12 @@ describe LogStash::Event do
|
|
98
98
|
expect(subject.sprintf("bonjour")).to eq("bonjour")
|
99
99
|
end
|
100
100
|
|
101
|
-
it "should raise error
|
101
|
+
it "should not raise error and should format as empty string when @timestamp field is missing" do
|
102
102
|
str = "hello-%{+%s}"
|
103
103
|
subj = subject.clone
|
104
104
|
subj.remove("[@timestamp]")
|
105
|
-
expect{ subj.sprintf(str) }.
|
105
|
+
expect{ subj.sprintf(str) }.not_to raise_error(LogStash::Error)
|
106
|
+
expect(subj.sprintf(str)).to eq("hello-")
|
106
107
|
end
|
107
108
|
|
108
109
|
it "should report a time with %{+format} syntax", :if => RUBY_ENGINE == "jruby" do
|
@@ -115,11 +116,11 @@ describe LogStash::Event do
|
|
115
116
|
expect(subject.sprintf("foo %{+YYYY-MM-dd} %{type}")).to eq("foo 2013-01-01 sprintf")
|
116
117
|
end
|
117
118
|
|
118
|
-
it "should raise error with %{+format} syntax when @timestamp field is missing", :if => RUBY_ENGINE == "jruby" do
|
119
|
+
it "should not raise error with %{+format} syntax when @timestamp field is missing", :if => RUBY_ENGINE == "jruby" do
|
119
120
|
str = "logstash-%{+YYYY}"
|
120
121
|
subj = subject.clone
|
121
122
|
subj.remove("[@timestamp]")
|
122
|
-
expect{ subj.sprintf(str) }.
|
123
|
+
expect{ subj.sprintf(str) }.not_to raise_error(LogStash::Error)
|
123
124
|
end
|
124
125
|
|
125
126
|
it "should report fields with %{field} syntax" do
|
@@ -2,13 +2,14 @@
|
|
2
2
|
require "spec_helper"
|
3
3
|
require_relative "../../support/helpers"
|
4
4
|
require_relative "../../support/matchers"
|
5
|
+
require "logstash/pipelines_registry"
|
5
6
|
require "logstash/pipeline_action/create"
|
6
7
|
require "logstash/inputs/generator"
|
7
8
|
|
8
9
|
describe LogStash::PipelineAction::Create do
|
9
10
|
let(:metric) { LogStash::Instrument::NullMetric.new(LogStash::Instrument::Collector.new) }
|
10
|
-
let(:pipeline_config) { mock_pipeline_config(:main, "input {
|
11
|
-
let(:pipelines) {
|
11
|
+
let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput { id => '123' } } output { null {} }") }
|
12
|
+
let(:pipelines) { LogStash::PipelinesRegistry.new }
|
12
13
|
let(:agent) { double("agent") }
|
13
14
|
|
14
15
|
before do
|
@@ -18,7 +19,7 @@ describe LogStash::PipelineAction::Create do
|
|
18
19
|
subject { described_class.new(pipeline_config, metric) }
|
19
20
|
|
20
21
|
after do
|
21
|
-
pipelines.
|
22
|
+
pipelines.running_pipelines do |_, pipeline|
|
22
23
|
pipeline.shutdown
|
23
24
|
pipeline.thread.join
|
24
25
|
end
|
@@ -44,7 +45,7 @@ describe LogStash::PipelineAction::Create do
|
|
44
45
|
|
45
46
|
it "starts the pipeline" do
|
46
47
|
subject.execute(agent, pipelines)
|
47
|
-
expect(pipelines
|
48
|
+
expect(pipelines.get_pipeline(:main).running?).to be_truthy
|
48
49
|
end
|
49
50
|
|
50
51
|
it "returns a successful execution status" do
|
@@ -54,7 +55,7 @@ describe LogStash::PipelineAction::Create do
|
|
54
55
|
|
55
56
|
context "when the pipeline doesn't start" do
|
56
57
|
context "with a syntax error" do
|
57
|
-
let(:pipeline_config) { mock_pipeline_config(:main, "input {
|
58
|
+
let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput { id => '123' } } output { stdout ") } # bad syntax
|
58
59
|
|
59
60
|
it "raises the exception upstream" do
|
60
61
|
expect { subject.execute(agent, pipelines) }.to raise_error
|
@@ -62,7 +63,7 @@ describe LogStash::PipelineAction::Create do
|
|
62
63
|
end
|
63
64
|
|
64
65
|
context "with an error raised during `#register`" do
|
65
|
-
let(:pipeline_config) { mock_pipeline_config(:main, "input {
|
66
|
+
let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput { id => '123' } } filter { ruby { init => '1/0' code => '1+2' } } output { null {} }") }
|
66
67
|
|
67
68
|
it "returns false" do
|
68
69
|
expect(subject.execute(agent, pipelines)).not_to be_a_successful_action
|
@@ -71,8 +72,8 @@ describe LogStash::PipelineAction::Create do
|
|
71
72
|
end
|
72
73
|
|
73
74
|
context "when sorting create action" do
|
74
|
-
let(:pipeline_config) { mock_pipeline_config(:main, "input {
|
75
|
-
let(:system_pipeline_config) { mock_pipeline_config(:main_2, "input {
|
75
|
+
let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput { id => '123' } } output { null {} }") }
|
76
|
+
let(:system_pipeline_config) { mock_pipeline_config(:main_2, "input { dummyblockinginput { id => '123' } } output { null {} }", { "pipeline.system" => true }) }
|
76
77
|
|
77
78
|
it "should give higher priority to system pipeline" do
|
78
79
|
action_user_pipeline = described_class.new(pipeline_config, metric)
|
@@ -2,15 +2,16 @@
|
|
2
2
|
require "spec_helper"
|
3
3
|
require_relative "../../support/helpers"
|
4
4
|
require_relative "../../support/matchers"
|
5
|
+
require "logstash/pipelines_registry"
|
5
6
|
require "logstash/pipeline_action/reload"
|
6
7
|
|
7
8
|
describe LogStash::PipelineAction::Reload do
|
8
9
|
let(:metric) { LogStash::Instrument::NullMetric.new(LogStash::Instrument::Collector.new) }
|
9
10
|
let(:pipeline_id) { :main }
|
10
|
-
let(:new_pipeline_config) { mock_pipeline_config(pipeline_id, "input {
|
11
|
-
let(:pipeline_config) { "input {
|
11
|
+
let(:new_pipeline_config) { mock_pipeline_config(pipeline_id, "input { dummyblockinginput { id => 'new' } } output { null {} }", { "pipeline.reloadable" => true}) }
|
12
|
+
let(:pipeline_config) { "input { dummyblockinginput {} } output { null {} }" }
|
12
13
|
let(:pipeline) { mock_pipeline_from_string(pipeline_config, mock_settings("pipeline.reloadable" => true)) }
|
13
|
-
let(:pipelines) {
|
14
|
+
let(:pipelines) { r = LogStash::PipelinesRegistry.new; r.create_pipeline(pipeline_id, pipeline) { true }; r }
|
14
15
|
let(:agent) { double("agent") }
|
15
16
|
|
16
17
|
subject { described_class.new(new_pipeline_config, metric) }
|
@@ -21,7 +22,7 @@ describe LogStash::PipelineAction::Reload do
|
|
21
22
|
end
|
22
23
|
|
23
24
|
after do
|
24
|
-
pipelines.
|
25
|
+
pipelines.running_pipelines do |_, pipeline|
|
25
26
|
pipeline.shutdown
|
26
27
|
pipeline.thread.join
|
27
28
|
end
|
@@ -38,12 +39,12 @@ describe LogStash::PipelineAction::Reload do
|
|
38
39
|
|
39
40
|
it "start the new pipeline" do
|
40
41
|
subject.execute(agent, pipelines)
|
41
|
-
expect(pipelines
|
42
|
+
expect(pipelines.get_pipeline(pipeline_id).running?).to be_truthy
|
42
43
|
end
|
43
44
|
|
44
45
|
it "run the new pipeline code" do
|
45
46
|
subject.execute(agent, pipelines)
|
46
|
-
expect(pipelines
|
47
|
+
expect(pipelines.get_pipeline(pipeline_id).config_hash).to eq(new_pipeline_config.config_hash)
|
47
48
|
end
|
48
49
|
end
|
49
50
|
|
@@ -58,7 +59,7 @@ describe LogStash::PipelineAction::Reload do
|
|
58
59
|
end
|
59
60
|
|
60
61
|
context "when the new pipeline is not reloadable" do
|
61
|
-
let(:new_pipeline_config) { mock_pipeline_config(pipeline_id, "input {
|
62
|
+
let(:new_pipeline_config) { mock_pipeline_config(pipeline_id, "input { dummyblockinginput { id => 'new' } } output { null {} }", { "pipeline.reloadable" => false}) }
|
62
63
|
|
63
64
|
it "cannot successfully execute the action" do
|
64
65
|
expect(subject.execute(agent, pipelines)).not_to be_a_successful_action
|
@@ -66,7 +67,7 @@ describe LogStash::PipelineAction::Reload do
|
|
66
67
|
end
|
67
68
|
|
68
69
|
context "when the new pipeline has syntax errors" do
|
69
|
-
let(:new_pipeline_config) { mock_pipeline_config(pipeline_id, "input
|
70
|
+
let(:new_pipeline_config) { mock_pipeline_config(pipeline_id, "input dummyblockinginput { id => 'new' } } output { null {} }", { "pipeline.reloadable" => false}) }
|
70
71
|
|
71
72
|
it "cannot successfully execute the action" do
|
72
73
|
expect(subject.execute(agent, pipelines)).not_to be_a_successful_action
|
@@ -75,7 +76,7 @@ describe LogStash::PipelineAction::Reload do
|
|
75
76
|
|
76
77
|
context "when there is an error in the register" do
|
77
78
|
before do
|
78
|
-
allow_any_instance_of(LogStash::Inputs::
|
79
|
+
allow_any_instance_of(LogStash::Inputs::DummyBlockingInput).to receive(:register).and_raise("Bad value")
|
79
80
|
end
|
80
81
|
|
81
82
|
it "cannot successfully execute the action" do
|
@@ -1,14 +1,15 @@
|
|
1
1
|
# encoding: utf-8
|
2
2
|
require "spec_helper"
|
3
3
|
require_relative "../../support/helpers"
|
4
|
+
require "logstash/pipelines_registry"
|
4
5
|
require "logstash/pipeline_action/stop"
|
5
6
|
require "logstash/pipeline"
|
6
7
|
|
7
8
|
describe LogStash::PipelineAction::Stop do
|
8
|
-
let(:pipeline_config) { "input {
|
9
|
+
let(:pipeline_config) { "input { dummyblockinginput {} } output { null {} }" }
|
9
10
|
let(:pipeline_id) { :main }
|
10
11
|
let(:pipeline) { mock_pipeline_from_string(pipeline_config) }
|
11
|
-
let(:pipelines) { chm =
|
12
|
+
let(:pipelines) { chm = LogStash::PipelinesRegistry.new; chm.create_pipeline(pipeline_id, pipeline) { true }; chm }
|
12
13
|
let(:agent) { double("agent") }
|
13
14
|
|
14
15
|
subject { described_class.new(pipeline_id) }
|
@@ -31,6 +32,6 @@ describe LogStash::PipelineAction::Stop do
|
|
31
32
|
end
|
32
33
|
|
33
34
|
it "removes the pipeline from the running pipelines" do
|
34
|
-
expect { subject.execute(agent, pipelines) }.to change { pipelines.
|
35
|
+
expect { subject.execute(agent, pipelines) }.to change { pipelines.running_pipelines.keys }.from([:main]).to([])
|
35
36
|
end
|
36
37
|
end
|
@@ -0,0 +1,220 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require "spec_helper"
|
3
|
+
require "logstash/pipelines_registry"
|
4
|
+
|
5
|
+
describe LogStash::PipelinesRegistry do
|
6
|
+
|
7
|
+
let(:pipeline_id) { "test" }
|
8
|
+
let(:pipeline) { double("Pipeline") }
|
9
|
+
let (:logger) { double("Logger") }
|
10
|
+
|
11
|
+
context "at object creation" do
|
12
|
+
it "should be empty" do
|
13
|
+
expect(subject.size).to eq(0)
|
14
|
+
expect(subject.empty?).to be_truthy
|
15
|
+
expect(subject.running_pipelines).to be_empty
|
16
|
+
expect(subject.non_running_pipelines).to be_empty
|
17
|
+
expect(subject.running_user_defined_pipelines).to be_empty
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
context "creating a pipeline" do
|
22
|
+
context "without existing same pipeline id" do
|
23
|
+
it "registry should not have a state for pipeline_id" do
|
24
|
+
expect(subject.get_pipeline(pipeline_id)).to be_nil
|
25
|
+
end
|
26
|
+
|
27
|
+
it "should return block return value" do
|
28
|
+
expect(subject.create_pipeline(pipeline_id, pipeline) { "dummy" }).to eq("dummy")
|
29
|
+
end
|
30
|
+
|
31
|
+
it "should register the new pipeline upon successful create block" do
|
32
|
+
subject.create_pipeline(pipeline_id, pipeline) { true }
|
33
|
+
expect(subject.get_pipeline(pipeline_id)).to eq(pipeline)
|
34
|
+
end
|
35
|
+
|
36
|
+
it "should not register the new pipeline upon unsuccessful create block" do
|
37
|
+
subject.create_pipeline(pipeline_id, pipeline) { false }
|
38
|
+
expect(subject.get_pipeline(pipeline_id)).to be_nil
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
context "with existing pipeline id" do
|
43
|
+
before :each do
|
44
|
+
subject.create_pipeline(pipeline_id, pipeline) { true }
|
45
|
+
end
|
46
|
+
|
47
|
+
it "registry should have a state for pipeline_id" do
|
48
|
+
expect(subject.get_pipeline(pipeline_id)).to eq(pipeline)
|
49
|
+
end
|
50
|
+
|
51
|
+
context "when existing pipeline is not terminated" do
|
52
|
+
before :each do
|
53
|
+
expect(pipeline).to receive(:finished_execution?).and_return(false)
|
54
|
+
end
|
55
|
+
|
56
|
+
it "should return false" do
|
57
|
+
expect(subject.create_pipeline(pipeline_id, pipeline) { "dummy" }).to be_falsey
|
58
|
+
end
|
59
|
+
|
60
|
+
it "should not call block and log error if pipeline is not terminated" do
|
61
|
+
expect(LogStash::PipelinesRegistry).to receive(:logger).and_return(logger)
|
62
|
+
expect(logger).to receive(:error)
|
63
|
+
expect { |b| subject.create_pipeline(pipeline_id, pipeline, &b) }.not_to yield_control
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
context "when existing pipeline is terminated" do
|
68
|
+
let (:new_pipeline) { double("New Pipeline") }
|
69
|
+
|
70
|
+
before :each do
|
71
|
+
expect(pipeline).to receive(:finished_execution?).and_return(true)
|
72
|
+
end
|
73
|
+
|
74
|
+
it "should return block value" do
|
75
|
+
expect(subject.create_pipeline(pipeline_id, new_pipeline) { "dummy" }).to eq("dummy")
|
76
|
+
end
|
77
|
+
|
78
|
+
it "should return block value" do
|
79
|
+
expect(subject.create_pipeline(pipeline_id, new_pipeline) { "dummy" }).to eq("dummy")
|
80
|
+
end
|
81
|
+
|
82
|
+
it "should register new pipeline" do
|
83
|
+
subject.create_pipeline(pipeline_id, new_pipeline) { true }
|
84
|
+
expect(subject.get_pipeline(pipeline_id)).to eq(new_pipeline)
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
context "terminating a pipeline" do
|
91
|
+
context "without existing pipeline id" do
|
92
|
+
it "should log error" do
|
93
|
+
expect(LogStash::PipelinesRegistry).to receive(:logger).and_return(logger)
|
94
|
+
expect(logger).to receive(:error)
|
95
|
+
subject.terminate_pipeline(pipeline_id) { "dummy" }
|
96
|
+
end
|
97
|
+
|
98
|
+
it "should not yield to block" do
|
99
|
+
expect { |b| subject.terminate_pipeline(pipeline_id, &b) }.not_to yield_control
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
context "with existing pipeline id" do
|
104
|
+
before :each do
|
105
|
+
subject.create_pipeline(pipeline_id, pipeline) { true }
|
106
|
+
end
|
107
|
+
|
108
|
+
it "should yield to block" do
|
109
|
+
expect { |b| subject.terminate_pipeline(pipeline_id, &b) }.to yield_control
|
110
|
+
end
|
111
|
+
|
112
|
+
it "should keep pipeline id" do
|
113
|
+
subject.terminate_pipeline(pipeline_id) { "dummy" }
|
114
|
+
expect(subject.get_pipeline(pipeline_id)).to eq(pipeline)
|
115
|
+
end
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
context "reloading a pipeline" do
|
120
|
+
it "should log error with inexistent pipeline id" do
|
121
|
+
expect(LogStash::PipelinesRegistry).to receive(:logger).and_return(logger)
|
122
|
+
expect(logger).to receive(:error)
|
123
|
+
subject.reload_pipeline(pipeline_id) { }
|
124
|
+
end
|
125
|
+
|
126
|
+
context "with existing pipeline id" do
|
127
|
+
before :each do
|
128
|
+
subject.create_pipeline(pipeline_id, pipeline) { true }
|
129
|
+
end
|
130
|
+
|
131
|
+
it "should return block value" do
|
132
|
+
expect(subject.reload_pipeline(pipeline_id) { ["dummy", pipeline] }).to eq("dummy")
|
133
|
+
end
|
134
|
+
|
135
|
+
it "should not be terminated while reloading" do
|
136
|
+
expect(pipeline).to receive(:finished_execution?).and_return(false, true, true)
|
137
|
+
|
138
|
+
# 1st call: finished_execution? is false
|
139
|
+
expect(subject.running_pipelines).not_to be_empty
|
140
|
+
|
141
|
+
# 2nd call: finished_execution? is true
|
142
|
+
expect(subject.running_pipelines).to be_empty
|
143
|
+
|
144
|
+
|
145
|
+
queue = Queue.new # threadsafe queue
|
146
|
+
in_block = Concurrent::AtomicBoolean.new(false)
|
147
|
+
|
148
|
+
thread = Thread.new(subject, pipeline_id, pipeline, queue, in_block) do |subject, pipeline_id, pipeline, queue, in_block|
|
149
|
+
subject.reload_pipeline(pipeline_id) do
|
150
|
+
in_block.make_true
|
151
|
+
queue.pop
|
152
|
+
[true, pipeline]
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
# make sure we entered the block executioin
|
157
|
+
wait(10).for {in_block.true?}.to be_truthy
|
158
|
+
|
159
|
+
# at this point the thread is suspended waiting on queue
|
160
|
+
|
161
|
+
# since in reloading state, running_pipelines is not empty
|
162
|
+
expect(subject.running_pipelines).not_to be_empty
|
163
|
+
|
164
|
+
# unblock thread
|
165
|
+
queue.push(:dummy)
|
166
|
+
thread.join
|
167
|
+
|
168
|
+
# 3rd call: finished_execution? is true
|
169
|
+
expect(subject.running_pipelines).to be_empty
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
context "pipelines collections" do
|
175
|
+
context "with a non terminated pipelines" do
|
176
|
+
before :each do
|
177
|
+
subject.create_pipeline(pipeline_id, pipeline) { true }
|
178
|
+
expect(pipeline).to receive(:finished_execution?).and_return(false)
|
179
|
+
end
|
180
|
+
|
181
|
+
it "should find running pipelines" do
|
182
|
+
expect(subject.running_pipelines).not_to be_empty
|
183
|
+
end
|
184
|
+
|
185
|
+
it "should not find non_running pipelines" do
|
186
|
+
expect(subject.non_running_pipelines).to be_empty
|
187
|
+
end
|
188
|
+
|
189
|
+
it "should find running_user_defined_pipelines" do
|
190
|
+
expect(pipeline).to receive(:system?).and_return(false)
|
191
|
+
expect(subject.running_user_defined_pipelines).not_to be_empty
|
192
|
+
end
|
193
|
+
|
194
|
+
it "should not find running_user_defined_pipelines" do
|
195
|
+
expect(pipeline).to receive(:system?).and_return(true)
|
196
|
+
expect(subject.running_user_defined_pipelines).to be_empty
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
context "with a terminated pipelines" do
|
201
|
+
before :each do
|
202
|
+
subject.create_pipeline(pipeline_id, pipeline) { true }
|
203
|
+
expect(pipeline).to receive(:finished_execution?).and_return(true)
|
204
|
+
end
|
205
|
+
|
206
|
+
it "should not find running pipelines" do
|
207
|
+
expect(subject.running_pipelines).to be_empty
|
208
|
+
end
|
209
|
+
|
210
|
+
it "should find non_running pipelines" do
|
211
|
+
expect(subject.non_running_pipelines).not_to be_empty
|
212
|
+
end
|
213
|
+
|
214
|
+
it "should not find running_user_defined_pipelines" do
|
215
|
+
expect(subject.running_user_defined_pipelines).to be_empty
|
216
|
+
end
|
217
|
+
end
|
218
|
+
|
219
|
+
end
|
220
|
+
end
|
@@ -14,6 +14,7 @@ describe LogStash::QueueFactory do
|
|
14
14
|
LogStash::Setting::Numeric.new("queue.checkpoint.acks", 1024),
|
15
15
|
LogStash::Setting::Numeric.new("queue.checkpoint.writes", 1024),
|
16
16
|
LogStash::Setting::Numeric.new("queue.checkpoint.interval", 1000),
|
17
|
+
LogStash::Setting::Boolean.new("queue.checkpoint.retry", false),
|
17
18
|
LogStash::Setting::String.new("pipeline.id", pipeline_id),
|
18
19
|
LogStash::Setting::PositiveInteger.new("pipeline.batch.size", 125),
|
19
20
|
LogStash::Setting::PositiveInteger.new("pipeline.workers", LogStash::Config::CpuCoreStrategy.maximum)
|
@@ -46,7 +47,7 @@ describe LogStash::QueueFactory do
|
|
46
47
|
let(:queue_path) { ::File.join(settings.get("path.queue"), pipeline_id) }
|
47
48
|
|
48
49
|
after :each do
|
49
|
-
FileUtils.
|
50
|
+
FileUtils.rm_rf(queue_path)
|
50
51
|
end
|
51
52
|
|
52
53
|
it "creates a queue directory based on the pipeline id" do
|