logstash-core 6.4.3-java → 6.5.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '0535700835dd16374433f064d770af9b96e7f226fce537eb5ea81fc802cf56de'
4
- data.tar.gz: aaa23c13f0cf82109a1e05a527e28505c387558f7730d1e054ad2a48cfcae898
3
+ metadata.gz: 47897fa20536fa113fdf69a3f7f738ecf49e0571d0d087eab504b9ad8a1fb6ca
4
+ data.tar.gz: 72d006015291ab365aea0676cfd024bae8c04ee7e989c3c91d5e5cbca5ede6af
5
5
  SHA512:
6
- metadata.gz: cc82b0baeecec5bfaad991e94b7c72203a6b4cbebe5cdfa6c954fb7551b7814eb08b5fd3c8d6b6aa92ab5afa40fbeab351f027d33a4193f5a018e84128ed4d9e
7
- data.tar.gz: 532c63a9bc508a8bdec9f2e50203e1c13938204529b5c167a51018f06219652c1a3edf78322dba1e46e6b2f20b500f8098cbb3dea8f07834ef36c575f290b3fb
6
+ metadata.gz: c3ddc6a8206cdc4b44e03fd247f13e77a5b8033932f92812f0d71f209fb125f1a2cc550c152f5ee11e0675a27c513a22b6283f0581e8f9388df0ad124a4c74de
7
+ data.tar.gz: 12daf679b7934532e7d456b23d368a76e6207e9601b4ee1482d942fb8a3e9772604e1bdd9e2b4eb1eea3c05705849ebc0968a9020ce053ee100e2aa91ab81183
@@ -33,6 +33,10 @@ class LogStash::Agent
33
33
  @auto_reload = setting("config.reload.automatic")
34
34
  @ephemeral_id = SecureRandom.uuid
35
35
 
36
+ # Mutex to synchonize in the exclusive method
37
+ # Initial usage for the Ruby pipeline initialization which is not thread safe
38
+ @exclusive_lock = Mutex.new
39
+
36
40
  # Special bus object for inter-pipelines communications. Used by the `pipeline` input/output
37
41
  @pipeline_bus = org.logstash.plugins.pipeline.PipelineBus.new
38
42
 
@@ -84,6 +88,10 @@ class LogStash::Agent
84
88
  @running = Concurrent::AtomicBoolean.new(false)
85
89
  end
86
90
 
91
+ def exclusive(&block)
92
+ @exclusive_lock.synchronize { block.call }
93
+ end
94
+
87
95
  def execute
88
96
  @thread = Thread.current # this var is implicitly used by Stud.stop?
89
97
  logger.debug("Starting agent")
@@ -56,11 +56,25 @@ module LogStash module Modules class LogStashConfig
56
56
  get_setting(LogStash::Setting::NullableString.new(name, default.to_s))
57
57
  when Numeric
58
58
  get_setting(LogStash::Setting::Numeric.new(name, default))
59
+ when true, false
60
+ get_setting(LogStash::Setting::Boolean.new(name, default))
59
61
  else
60
62
  get_setting(LogStash::Setting::NullableString.new(name, default.to_s))
61
63
  end
62
64
  end
63
65
 
66
+ def has_setting?(name)
67
+ @settings.key?(name)
68
+ end
69
+
70
+ def raw_setting(name)
71
+ @settings[name]
72
+ end
73
+
74
+ def fetch_raw_setting(name, default)
75
+ @settings.fetch(name, default)
76
+ end
77
+
64
78
  def elasticsearch_output_config(type_string = nil)
65
79
  hosts = array_to_string(get_setting(LogStash::Setting::SplittableStringArray.new("var.elasticsearch.hosts", String, ["localhost:9200"])))
66
80
  index = "#{@name}-#{setting("var.elasticsearch.index_suffix", "%{+YYYY.MM.dd}")}"
@@ -35,7 +35,12 @@ module LogStash module PipelineAction
35
35
  if @pipeline_config.settings.get_value("pipeline.java_execution")
36
36
  LogStash::JavaPipeline.new(@pipeline_config, @metric, agent)
37
37
  else
38
- LogStash::Pipeline.new(@pipeline_config, @metric, agent)
38
+ agent.exclusive do
39
+ # The Ruby pipeline initialization is not thread safe because of the module level
40
+ # shared state in LogsStash::Config::AST. When using multiple pipelines this gets
41
+ # executed simultaneously in different threads and we need to synchonize this initialization.
42
+ LogStash::Pipeline.new(@pipeline_config, @metric, agent)
43
+ end
39
44
  end
40
45
 
41
46
  status = nil
@@ -32,7 +32,12 @@ module LogStash module PipelineAction
32
32
  if @pipeline_config.settings.get_value("pipeline.java_execution")
33
33
  LogStash::JavaBasePipeline.new(@pipeline_config, nil, logger, nil)
34
34
  else
35
- LogStash::BasePipeline.new(@pipeline_config)
35
+ agent.exclusive do
36
+ # The Ruby pipeline initialization is not thread safe because of the module level
37
+ # shared state in LogsStash::Config::AST. When using multiple pipelines this can gets
38
+ # executed simultaneously in different threads and we need to synchonize this initialization.
39
+ LogStash::BasePipeline.new(@pipeline_config)
40
+ end
36
41
  end
37
42
  rescue => e
38
43
  return LogStash::ConvergeResult::FailedAction.from_exception(e)
@@ -108,8 +108,8 @@ class LogStash::Runner < Clamp::StrictCommand
108
108
  :attribute_name => "pipeline.workers",
109
109
  :default => LogStash::SETTINGS.get_default("pipeline.workers")
110
110
 
111
- option ["--experimental-java-execution"], :flag,
112
- I18n.t("logstash.runner.flag.experimental-java-execution"),
111
+ option ["--java-execution"], :flag,
112
+ I18n.t("logstash.runner.flag.java-execution"),
113
113
  :attribute_name => "pipeline.java_execution",
114
114
  :default => LogStash::SETTINGS.get_default("pipeline.java_execution")
115
115
 
@@ -348,6 +348,8 @@ class LogStash::Runner < Clamp::StrictCommand
348
348
  # lock path.data before starting the agent
349
349
  @data_path_lock = FileLockFactory.obtainLock(java.nio.file.Paths.get(setting("path.data")).to_absolute_path, ".lock")
350
350
 
351
+ logger.info("Starting Logstash", "logstash.version" => LOGSTASH_VERSION)
352
+
351
353
  @dispatcher.fire(:before_agent)
352
354
  @agent = create_agent(@settings, @source_loader)
353
355
  @dispatcher.fire(:after_agent)
@@ -357,8 +359,6 @@ class LogStash::Runner < Clamp::StrictCommand
357
359
  sigint_id = trap_sigint()
358
360
  sigterm_id = trap_sigterm()
359
361
 
360
- logger.info("Starting Logstash", "logstash.version" => LOGSTASH_VERSION)
361
-
362
362
  @agent_task = Stud::Task.new { @agent.execute }
363
363
 
364
364
  # no point in enabling config reloading before the agent starts
data/locales/en.yml CHANGED
@@ -297,8 +297,8 @@ en:
297
297
  Sets the ID of the pipeline.
298
298
  pipeline-workers: |+
299
299
  Sets the number of pipeline workers to run.
300
- experimental-java-execution: |+
301
- (Experimental) Use new Java execution engine.
300
+ java-execution: |+
301
+ (Beta) Use new Java execution engine.
302
302
  pipeline-batch-size: |+
303
303
  Size of batches the pipeline is to work in.
304
304
  pipeline-batch-delay: |+
@@ -12,131 +12,118 @@ require_relative "../support/matchers"
12
12
  java_import org.logstash.Timestamp
13
13
 
14
14
  describe LogStash::Agent do
15
- let(:agent_settings) { mock_settings({}) }
16
- let(:agent_args) { {} }
17
- let(:pipeline_settings) { agent_settings.clone }
18
- let(:pipeline_args) { {} }
19
- let(:default_pipeline_id) { agent_settings.get("pipeline.id") }
20
- let(:config_string) { "input { } filter { } output { }" }
21
- let(:config_file) { Stud::Temporary.pathname }
22
- let(:config_file_txt) { config_string }
23
- let(:default_source_loader) do
24
- sl = LogStash::Config::SourceLoader.new
25
- sl.add_source(LogStash::Config::Source::Local.new(agent_settings))
26
- sl
27
- end
28
- let(:logger) { double("logger") }
29
- let(:timeout) {160} #seconds
30
-
31
- subject { LogStash::Agent.new(agent_settings, default_source_loader) }
32
-
33
- before :each do
34
- # This MUST run first, before `subject` is invoked to ensure clean state
35
- clear_data_dir
36
15
 
37
- File.open(config_file, "w") { |f| f.puts(config_file_txt) }
38
-
39
- agent_args.each do |key, value|
40
- agent_settings.set(key, value)
41
- pipeline_settings.set(key, value)
42
- end
43
- pipeline_args.each do |key, value|
44
- pipeline_settings.set(key, value)
16
+ shared_examples "all Agent tests" do
17
+
18
+ let(:agent_settings) { mock_settings({}) }
19
+ let(:agent_args) { {} }
20
+ let(:pipeline_settings) { agent_settings.clone }
21
+ let(:pipeline_args) { {} }
22
+ let(:default_pipeline_id) { agent_settings.get("pipeline.id") }
23
+ let(:config_string) { "input { } filter { } output { }" }
24
+ let(:config_file) { Stud::Temporary.pathname }
25
+ let(:config_file_txt) { config_string }
26
+ let(:default_source_loader) do
27
+ sl = LogStash::Config::SourceLoader.new
28
+ sl.add_source(LogStash::Config::Source::Local.new(agent_settings))
29
+ sl
45
30
  end
46
- allow(described_class).to receive(:logger).and_return(logger)
47
- [:debug, :info, :error, :fatal, :trace].each {|level| allow(logger).to receive(level) }
48
- [:debug?, :info?, :error?, :fatal?, :trace?].each {|level| allow(logger).to receive(level) }
49
- end
31
+ let(:logger) { double("logger") }
32
+ let(:timeout) { 160 } #seconds
50
33
 
51
- after :each do
52
- subject.shutdown
53
- LogStash::SETTINGS.reset
34
+ subject { LogStash::Agent.new(agent_settings, default_source_loader) }
54
35
 
55
- FileUtils.rm(config_file)
56
- FileUtils.rm_rf(subject.id_path)
57
- end
58
-
59
- it "fallback to hostname when no name is provided" do
60
- expect(LogStash::Agent.new(agent_settings, default_source_loader).name).to eq(Socket.gethostname)
61
- end
36
+ before :each do
37
+ # This MUST run first, before `subject` is invoked to ensure clean state
38
+ clear_data_dir
62
39
 
63
- describe "adding a new pipeline" do
64
- let(:agent_args) { { "config.string" => config_string } }
40
+ File.open(config_file, "w") { |f| f.puts(config_file_txt) }
65
41
 
66
- it "should delegate settings to new pipeline" do
67
- expect(LogStash::Pipeline).to receive(:new) do |arg1, arg2|
68
- expect(arg1).to eq(config_string)
69
- expect(arg2.to_hash).to include(agent_args)
42
+ agent_args.each do |key, value|
43
+ agent_settings.set(key, value)
44
+ pipeline_settings.set(key, value)
45
+ end
46
+ pipeline_args.each do |key, value|
47
+ pipeline_settings.set(key, value)
70
48
  end
71
- subject.converge_state_and_update
49
+ allow(described_class).to receive(:logger).and_return(logger)
50
+ [:debug, :info, :error, :fatal, :trace].each {|level| allow(logger).to receive(level) }
51
+ [:debug?, :info?, :error?, :fatal?, :trace?].each {|level| allow(logger).to receive(level) }
72
52
  end
73
- end
74
53
 
75
- describe "#id" do
76
- let(:id_file_data) { File.open(subject.id_path) {|f| f.read } }
54
+ after :each do
55
+ subject.shutdown
56
+ LogStash::SETTINGS.reset
77
57
 
78
- it "should return a UUID" do
79
- expect(subject.id).to be_a(String)
80
- expect(subject.id.size).to be > 0
58
+ FileUtils.rm(config_file)
59
+ FileUtils.rm_rf(subject.id_path)
81
60
  end
82
61
 
83
- it "should write out the persistent UUID" do
84
- expect(id_file_data).to eql(subject.id)
62
+ it "fallback to hostname when no name is provided" do
63
+ expect(LogStash::Agent.new(agent_settings, default_source_loader).name).to eq(Socket.gethostname)
85
64
  end
86
- end
87
65
 
88
- describe "ephemeral_id" do
89
- it "create a ephemeral id at creation time" do
90
- expect(subject.ephemeral_id).to_not be_nil
66
+ describe "adding a new pipeline" do
67
+ let(:agent_args) { { "config.string" => config_string } }
68
+
69
+ it "should delegate settings to new pipeline" do
70
+ expect(LogStash::Pipeline).to receive(:new) do |arg1, arg2|
71
+ expect(arg1).to eq(config_string)
72
+ expect(arg2.to_hash).to include(agent_args)
73
+ end
74
+ subject.converge_state_and_update
75
+ end
91
76
  end
92
- end
93
77
 
94
- describe "#execute" do
95
- let(:config_string) { "input { generator { id => 'old'} } output { }" }
96
- let(:mock_config_pipeline) { mock_pipeline_config(:main, config_string, pipeline_settings) }
78
+ describe "#id" do
79
+ let(:id_file_data) { File.open(subject.id_path) {|f| f.read } }
97
80
 
98
- let(:source_loader) { TestSourceLoader.new(mock_config_pipeline) }
99
- subject { described_class.new(agent_settings, source_loader) }
81
+ it "should return a UUID" do
82
+ expect(subject.id).to be_a(String)
83
+ expect(subject.id.size).to be > 0
84
+ end
100
85
 
101
- before :each do
102
- allow(subject).to receive(:start_webserver).and_return(false)
103
- allow(subject).to receive(:stop_webserver).and_return(false)
86
+ it "should write out the persistent UUID" do
87
+ expect(id_file_data).to eql(subject.id)
88
+ end
104
89
  end
105
90
 
106
- context "when auto_reload is false" do
107
- let(:agent_settings) { mock_settings("config.reload.automatic" => false) }
108
- let(:agent_args) { { "path.config" => config_file } }
91
+ describe "ephemeral_id" do
92
+ it "create a ephemeral id at creation time" do
93
+ expect(subject.ephemeral_id).to_not be_nil
94
+ end
95
+ end
109
96
 
110
- context "if state is clean" do
111
- before :each do
112
- allow(subject).to receive(:running_user_defined_pipelines?).and_return(true)
113
- allow(subject).to receive(:clean_state?).and_return(false)
114
- end
97
+ describe "#execute" do
98
+ let(:config_string) { "input { generator { id => 'old'} } output { }" }
99
+ let(:mock_config_pipeline) { mock_pipeline_config(:main, config_string, pipeline_settings) }
115
100
 
116
- it "should not converge state more than once" do
117
- expect(subject).to receive(:converge_state_and_update).once
118
- t = Thread.new { subject.execute }
101
+ let(:source_loader) { TestSourceLoader.new(mock_config_pipeline) }
102
+ subject { described_class.new(agent_settings, source_loader) }
119
103
 
120
- Stud.stop!(t)
121
- t.join
122
- subject.shutdown
123
- end
104
+ before :each do
105
+ allow(subject).to receive(:start_webserver).and_return(false)
106
+ allow(subject).to receive(:stop_webserver).and_return(false)
124
107
  end
125
108
 
126
- context "when calling reloading a pipeline" do
127
- context "with a config that contains reload incompatible plugins" do
128
- let(:second_pipeline_config) { "input { stdin {} } filter { } output { }" }
129
- let(:mock_second_pipeline_config) { mock_pipeline_config(:main, second_pipeline_config, pipeline_settings) }
109
+ context "when auto_reload is false" do
110
+ let(:agent_args) { { "config.reload.automatic" => false, "path.config" => config_file } }
130
111
 
131
- let(:source_loader) { TestSequenceSourceLoader.new(mock_config_pipeline, mock_second_pipeline_config)}
112
+ context "verify settings" do
113
+ it "should not auto reload" do
114
+ expect(subject.settings.get("config.reload.automatic")).to eq(false)
115
+ end
116
+ end
117
+
118
+ context "if state is clean" do
119
+ before :each do
120
+ allow(subject).to receive(:running_user_defined_pipelines?).and_return(true)
121
+ allow(subject).to receive(:clean_state?).and_return(false)
122
+ end
132
123
 
133
- it "does not upgrade the new config" do
124
+ it "should not converge state more than once" do
125
+ expect(subject).to receive(:converge_state_and_update).once
134
126
  t = Thread.new { subject.execute }
135
- wait(timeout)
136
- .for { subject.running_pipelines? && subject.pipelines.values.first.ready? }
137
- .to eq(true)
138
- expect(subject.converge_state_and_update).not_to be_a_successful_converge
139
- expect(subject).to have_running_pipeline?(mock_config_pipeline)
140
127
 
141
128
  Stud.stop!(t)
142
129
  t.join
@@ -144,364 +131,405 @@ describe LogStash::Agent do
144
131
  end
145
132
  end
146
133
 
147
- context "with a config that does not contain reload incompatible plugins" do
148
- let(:second_pipeline_config) { "input { generator { } } filter { } output { }" }
149
- let(:mock_second_pipeline_config) { mock_pipeline_config(:main, second_pipeline_config, pipeline_settings) }
134
+ context "when calling reloading a pipeline" do
135
+ context "with a config that contains reload incompatible plugins" do
136
+ let(:second_pipeline_config) { "input { stdin {} } filter { } output { }" }
137
+ let(:mock_second_pipeline_config) { mock_pipeline_config(:main, second_pipeline_config, pipeline_settings) }
150
138
 
151
- let(:source_loader) { TestSequenceSourceLoader.new(mock_config_pipeline, mock_second_pipeline_config)}
139
+ let(:source_loader) { TestSequenceSourceLoader.new(mock_config_pipeline, mock_second_pipeline_config)}
152
140
 
153
- it "does upgrade the new config" do
154
- t = Thread.new { subject.execute }
155
- Timeout.timeout(timeout) do
156
- sleep(0.01) until subject.pipelines_count > 0 && subject.pipelines.values.first.ready?
141
+ it "does not upgrade the new config" do
142
+ t = Thread.new { subject.execute }
143
+ wait(timeout)
144
+ .for { subject.running_pipelines? && subject.pipelines.values.first.ready? }
145
+ .to eq(true)
146
+ expect(subject.converge_state_and_update).not_to be_a_successful_converge
147
+ expect(subject).to have_running_pipeline?(mock_config_pipeline)
148
+
149
+ Stud.stop!(t)
150
+ t.join
151
+ subject.shutdown
157
152
  end
153
+ end
158
154
 
159
- expect(subject.converge_state_and_update).to be_a_successful_converge
160
- expect(subject).to have_running_pipeline?(mock_second_pipeline_config)
155
+ context "with a config that does not contain reload incompatible plugins" do
156
+ let(:second_pipeline_config) { "input { generator { } } filter { } output { }" }
157
+ let(:mock_second_pipeline_config) { mock_pipeline_config(:main, second_pipeline_config, pipeline_settings) }
161
158
 
162
- Stud.stop!(t)
163
- t.join
164
- subject.shutdown
165
- end
166
- end
159
+ let(:source_loader) { TestSequenceSourceLoader.new(mock_config_pipeline, mock_second_pipeline_config)}
167
160
 
168
- end
169
- context "when calling reload_state!" do
170
- context "with a pipeline with auto reloading turned off" do
171
- let(:second_pipeline_config) { "input { generator { } } filter { } output { }" }
172
- let(:pipeline_args) { { "pipeline.reloadable" => false } }
173
- let(:mock_second_pipeline_config) { mock_pipeline_config(:main, second_pipeline_config, mock_settings(pipeline_args)) }
161
+ it "does upgrade the new config" do
162
+ t = Thread.new { subject.execute }
163
+ Timeout.timeout(timeout) do
164
+ sleep(0.1) until subject.pipelines_count > 0 && subject.pipelines.values.first.ready?
165
+ end
174
166
 
175
- let(:source_loader) { TestSequenceSourceLoader.new(mock_config_pipeline, mock_second_pipeline_config)}
167
+ expect(subject.converge_state_and_update).to be_a_successful_converge
168
+ expect(subject).to have_running_pipeline?(mock_second_pipeline_config)
176
169
 
177
- it "does not try to reload the pipeline" do
178
- t = Thread.new { subject.execute }
179
- Timeout.timeout(timeout) do
180
- sleep(0.01) until subject.running_pipelines? && subject.pipelines.values.first.running?
170
+ Stud.stop!(t)
171
+ t.join
172
+ subject.shutdown
181
173
  end
182
- expect(subject.converge_state_and_update).not_to be_a_successful_converge
183
- expect(subject).to have_running_pipeline?(mock_config_pipeline)
184
-
185
- Stud.stop!(t)
186
- t.join
187
- subject.shutdown
188
174
  end
175
+
189
176
  end
177
+ context "when calling reload_state!" do
178
+ context "with a pipeline with auto reloading turned off" do
179
+ let(:second_pipeline_config) { "input { generator { } } filter { } output { }" }
180
+ let(:pipeline_args) { { "pipeline.reloadable" => false } }
181
+ let(:mock_second_pipeline_config) { mock_pipeline_config(:main, second_pipeline_config, mock_settings(pipeline_args)) }
182
+
183
+ let(:source_loader) { TestSequenceSourceLoader.new(mock_config_pipeline, mock_second_pipeline_config)}
184
+
185
+ it "does not try to reload the pipeline" do
186
+ t = Thread.new { subject.execute }
187
+ Timeout.timeout(timeout) do
188
+ sleep(0.1) until subject.running_pipelines? && subject.pipelines.values.first.running?
189
+ end
190
+ expect(subject.converge_state_and_update).not_to be_a_successful_converge
191
+ expect(subject).to have_running_pipeline?(mock_config_pipeline)
192
+
193
+ Stud.stop!(t)
194
+ t.join
195
+ subject.shutdown
196
+ end
197
+ end
190
198
 
191
- context "with a pipeline with auto reloading turned on" do
192
- let(:second_pipeline_config) { "input { generator { id => 'second' } } filter { } output { }" }
193
- let(:pipeline_args) { { "pipeline.reloadable" => true } }
194
- let(:mock_second_pipeline_config) { mock_pipeline_config(:main, second_pipeline_config, mock_settings(pipeline_args)) }
195
- let(:source_loader) { TestSequenceSourceLoader.new(mock_config_pipeline, mock_second_pipeline_config)}
199
+ context "with a pipeline with auto reloading turned on" do
200
+ let(:second_pipeline_config) { "input { generator { id => 'second' } } filter { } output { }" }
201
+ let(:pipeline_args) { { "pipeline.reloadable" => true } }
202
+ let(:mock_second_pipeline_config) { mock_pipeline_config(:main, second_pipeline_config, mock_settings(pipeline_args)) }
203
+ let(:source_loader) { TestSequenceSourceLoader.new(mock_config_pipeline, mock_second_pipeline_config)}
196
204
 
197
- it "tries to reload the pipeline" do
198
- t = Thread.new { subject.execute }
199
- Timeout.timeout(timeout) do
200
- sleep(0.01) until subject.running_pipelines? && subject.pipelines.values.first.running?
201
- end
205
+ it "tries to reload the pipeline" do
206
+ t = Thread.new { subject.execute }
207
+ Timeout.timeout(timeout) do
208
+ sleep(0.1) until subject.running_pipelines? && subject.pipelines.values.first.running?
209
+ end
202
210
 
203
- expect(subject.converge_state_and_update).to be_a_successful_converge
204
- expect(subject).to have_running_pipeline?(mock_second_pipeline_config)
211
+ expect(subject.converge_state_and_update).to be_a_successful_converge
212
+ expect(subject).to have_running_pipeline?(mock_second_pipeline_config)
205
213
 
206
- Stud.stop!(t)
207
- t.join
208
- subject.shutdown
214
+ Stud.stop!(t)
215
+ t.join
216
+ subject.shutdown
217
+ end
209
218
  end
210
219
  end
211
220
  end
212
221
  end
213
- end
214
-
215
- describe "Environment Variables In Configs" do
216
- let(:temporary_file) { Stud::Temporary.file.path }
217
222
 
218
- let(:pipeline_config) { "input { generator { message => '${FOO}-bar' count => 1 } } filter { } output { file { path => '#{temporary_file}' } }" }
219
- let(:agent_args) { {
220
- "config.reload.automatic" => false,
221
- "config.reload.interval" => "10ms",
222
- "config.string" => pipeline_config
223
- } }
223
+ describe "Environment Variables In Configs" do
224
+ let(:temporary_file) { Stud::Temporary.file.path }
224
225
 
225
- let(:source_loader) {
226
- TestSourceLoader.new(mock_pipeline_config(default_pipeline_id, pipeline_config))
227
- }
226
+ let(:pipeline_config) { "input { generator { message => '${FOO}-bar' count => 1 } } filter { } output { file { path => '#{temporary_file}' } }" }
227
+ let(:agent_args) { {
228
+ "config.reload.automatic" => false,
229
+ "config.reload.interval" => "10ms",
230
+ "config.string" => pipeline_config
231
+ } }
228
232
 
229
- subject { described_class.new(mock_settings(agent_args), source_loader) }
233
+ let(:source_loader) {
234
+ TestSourceLoader.new(mock_pipeline_config(default_pipeline_id, pipeline_config))
235
+ }
230
236
 
231
- after do
232
- subject.shutdown
233
- end
237
+ subject { described_class.new(mock_settings(agent_args), source_loader) }
234
238
 
235
- context "environment variable templating" do
236
- before :each do
237
- @foo_content = ENV["FOO"]
238
- ENV["FOO"] = "foo"
239
+ after do
240
+ subject.shutdown
239
241
  end
240
242
 
241
- after :each do
242
- ENV["FOO"] = @foo_content
243
- end
243
+ context "environment variable templating" do
244
+ before :each do
245
+ @foo_content = ENV["FOO"]
246
+ ENV["FOO"] = "foo"
247
+ end
244
248
 
245
- it "are evaluated at plugins creation" do
246
- expect(subject.converge_state_and_update).to be_a_successful_converge
249
+ after :each do
250
+ ENV["FOO"] = @foo_content
251
+ end
247
252
 
248
- # Since the pipeline is running in another threads
249
- # the content of the file wont be instant.
250
- sleep(0.01) until ::File.size(temporary_file) > 0
251
- json_document = LogStash::Json.load(File.read(temporary_file).chomp)
252
- expect(json_document["message"]).to eq("foo-bar")
253
+ it "are evaluated at plugins creation" do
254
+ expect(subject.converge_state_and_update).to be_a_successful_converge
255
+
256
+ # Since the pipeline is running in another threads
257
+ # the content of the file wont be instant.
258
+ Timeout.timeout(timeout) do
259
+ sleep(0.1) until ::File.size(temporary_file) > 0
260
+ end
261
+ json_document = LogStash::Json.load(File.read(temporary_file).chomp)
262
+ expect(json_document["message"]).to eq("foo-bar")
263
+ end
253
264
  end
254
265
  end
255
- end
256
-
257
- describe "#upgrade_pipeline" do
258
- let(:pipeline_config) { "input { generator {} } filter { } output { }" }
259
- let(:pipeline_args) { { "pipeline.workers" => 4 } }
260
- let(:mocked_pipeline_config) { mock_pipeline_config(default_pipeline_id, pipeline_config, mock_settings(pipeline_args))}
261
266
 
262
- let(:new_pipeline_config) { "input generator {} } output { }" }
263
- let(:mocked_new_pipeline_config) { mock_pipeline_config(default_pipeline_id, new_pipeline_config, mock_settings(pipeline_args))}
264
- let(:source_loader) { TestSequenceSourceLoader.new(mocked_pipeline_config, mocked_new_pipeline_config)}
267
+ describe "#upgrade_pipeline" do
268
+ let(:pipeline_config) { "input { generator {} } filter { } output { }" }
269
+ let(:pipeline_args) { { "pipeline.workers" => 4 } }
270
+ let(:mocked_pipeline_config) { mock_pipeline_config(default_pipeline_id, pipeline_config, mock_settings(pipeline_args))}
265
271
 
266
- subject { described_class.new(agent_settings, source_loader) }
272
+ let(:new_pipeline_config) { "input generator {} } output { }" }
273
+ let(:mocked_new_pipeline_config) { mock_pipeline_config(default_pipeline_id, new_pipeline_config, mock_settings(pipeline_args))}
274
+ let(:source_loader) { TestSequenceSourceLoader.new(mocked_pipeline_config, mocked_new_pipeline_config)}
267
275
 
268
- before(:each) do
269
- # Run the initial config
270
- expect(subject.converge_state_and_update).to be_a_successful_converge
271
- end
276
+ subject { described_class.new(agent_settings, source_loader) }
272
277
 
273
- after(:each) do
274
- # new pipelines will be created part of the upgrade process so we need
275
- # to close any initialized pipelines
276
- subject.shutdown
277
- end
278
+ before(:each) do
279
+ # Run the initial config
280
+ expect(subject.converge_state_and_update).to be_a_successful_converge
281
+ end
278
282
 
279
- context "when the upgrade fails" do
280
- it "leaves the state untouched" do
281
- expect(subject.converge_state_and_update).not_to be_a_successful_converge
282
- expect(subject.get_pipeline(default_pipeline_id).config_str).to eq(pipeline_config)
283
+ after(:each) do
284
+ # new pipelines will be created part of the upgrade process so we need
285
+ # to close any initialized pipelines
286
+ subject.shutdown
283
287
  end
284
288
 
285
- # TODO(ph): This valid?
286
- xcontext "and current state is empty" do
287
- it "should not start a pipeline" do
288
- expect(subject).to_not receive(:start_pipeline)
289
- subject.send(:"reload_pipeline!", default_pipeline_id)
289
+ context "when the upgrade fails" do
290
+ it "leaves the state untouched" do
291
+ expect(subject.converge_state_and_update).not_to be_a_successful_converge
292
+ expect(subject.get_pipeline(default_pipeline_id).config_str).to eq(pipeline_config)
293
+ end
294
+
295
+ # TODO(ph): This valid?
296
+ xcontext "and current state is empty" do
297
+ it "should not start a pipeline" do
298
+ expect(subject).to_not receive(:start_pipeline)
299
+ subject.send(:"reload_pipeline!", default_pipeline_id)
300
+ end
290
301
  end
291
302
  end
292
- end
293
303
 
294
- context "when the upgrade succeeds" do
295
- let(:new_config) { "input { generator { id => 'abc' count => 1000000 } } output { }" }
296
- let(:mocked_new_pipeline_config) { mock_pipeline_config(default_pipeline_id, new_config, mock_settings(pipeline_args)) }
304
+ context "when the upgrade succeeds" do
305
+ let(:new_config) { "input { generator { id => 'abc' count => 1000000 } } output { }" }
306
+ let(:mocked_new_pipeline_config) { mock_pipeline_config(default_pipeline_id, new_config, mock_settings(pipeline_args)) }
297
307
 
298
- it "updates the state" do
299
- expect(subject.converge_state_and_update).to be_a_successful_converge
300
- expect(subject.get_pipeline(default_pipeline_id).config_str).to eq(new_config)
301
- end
308
+ it "updates the state" do
309
+ expect(subject.converge_state_and_update).to be_a_successful_converge
310
+ expect(subject.get_pipeline(default_pipeline_id).config_str).to eq(new_config)
311
+ end
302
312
 
303
- it "starts the pipeline" do
304
- expect(subject.converge_state_and_update).to be_a_successful_converge
305
- expect(subject.get_pipeline(default_pipeline_id).running?).to be_truthy
313
+ it "starts the pipeline" do
314
+ expect(subject.converge_state_and_update).to be_a_successful_converge
315
+ expect(subject.get_pipeline(default_pipeline_id).running?).to be_truthy
316
+ end
306
317
  end
307
318
  end
308
- end
309
319
 
310
- context "#started_at" do
311
- it "return the start time when the agent is started" do
312
- expect(described_class::STARTED_AT).to be_kind_of(Time)
320
+ context "#started_at" do
321
+ it "return the start time when the agent is started" do
322
+ expect(described_class::STARTED_AT).to be_kind_of(Time)
323
+ end
313
324
  end
314
- end
315
325
 
316
- context "#uptime" do
317
- it "return the number of milliseconds since start time" do
318
- expect(subject.uptime).to be >= 0
326
+ context "#uptime" do
327
+ it "return the number of milliseconds since start time" do
328
+ expect(subject.uptime).to be >= 0
329
+ end
319
330
  end
320
- end
321
331
 
322
- context "metrics after config reloading" do
332
+ context "metrics after config reloading" do
323
333
 
324
- let(:initial_generator_threshold) { 1000 }
325
- let(:original_config_output) { Stud::Temporary.pathname }
326
- let(:new_config_output) { Stud::Temporary.pathname }
334
+ let(:initial_generator_threshold) { 1000 }
335
+ let(:original_config_output) { Stud::Temporary.pathname }
336
+ let(:new_config_output) { Stud::Temporary.pathname }
327
337
 
328
- let(:config_file_txt) { "input { generator { count => #{initial_generator_threshold*2} } } output { file { path => '#{original_config_output}'} }" }
338
+ let(:config_file_txt) { "input { generator { count => #{initial_generator_threshold*2} } } output { file { path => '#{original_config_output}'} }" }
329
339
 
330
- let(:agent_args) do
331
- {
332
- "metric.collect" => true,
333
- "path.config" => config_file
334
- }
335
- end
340
+ let(:agent_args) do
341
+ {
342
+ "metric.collect" => true,
343
+ "path.config" => config_file
344
+ }
345
+ end
336
346
 
337
- subject { described_class.new(agent_settings, default_source_loader) }
347
+ subject { described_class.new(agent_settings, default_source_loader) }
338
348
 
339
- let(:agent_thread) do
340
- # subject has to be called for the first time outside the thread because it could create a race condition
341
- # with subsequent subject calls
342
- s = subject
343
- Thread.new { s.execute }
344
- end
349
+ let(:agent_thread) do
350
+ # subject has to be called for the first time outside the thread because it could create a race condition
351
+ # with subsequent subject calls
352
+ s = subject
353
+ Thread.new { s.execute }
354
+ end
345
355
 
346
- before(:each) do
347
- @abort_on_exception = Thread.abort_on_exception
348
- Thread.abort_on_exception = true
356
+ before(:each) do
357
+ @abort_on_exception = Thread.abort_on_exception
358
+ Thread.abort_on_exception = true
349
359
 
350
- agent_thread
360
+ agent_thread
351
361
 
352
- # wait for some events to reach the dummy_output
353
- Timeout.timeout(timeout) do
354
- # wait for file existence otherwise it will raise exception on Windows
355
- sleep(0.1) until ::File.exist?(original_config_output)
356
- sleep(0.1) until IO.readlines(original_config_output).size > initial_generator_threshold
357
- end
362
+ # wait for some events to reach the dummy_output
363
+ Timeout.timeout(timeout) do
364
+ # wait for file existence otherwise it will raise exception on Windows
365
+ sleep(0.3) until ::File.exist?(original_config_output)
366
+ sleep(0.3) until IO.readlines(original_config_output).size > initial_generator_threshold
367
+ end
358
368
 
359
- # write new config
360
- File.open(config_file, "w") { |f| f.write(new_config) }
361
- end
369
+ # write new config
370
+ File.open(config_file, "w") { |f| f.write(new_config) }
371
+ end
362
372
 
363
- after :each do
364
- begin
365
- Stud.stop!(agent_thread) rescue nil # it may be dead already
366
- agent_thread.join
367
- subject.shutdown
373
+ after :each do
374
+ begin
375
+ Stud.stop!(agent_thread) rescue nil # it may be dead already
376
+ agent_thread.join
377
+ subject.shutdown
368
378
 
369
- FileUtils.rm(original_config_output)
370
- FileUtils.rm(new_config_output) if File.exist?(new_config_output)
371
- rescue
372
- #don't care about errors here.
373
- ensure
374
- Thread.abort_on_exception = @abort_on_exception
379
+ FileUtils.rm(original_config_output)
380
+ FileUtils.rm(new_config_output) if File.exist?(new_config_output)
381
+ rescue
382
+ #don't care about errors here.
383
+ ensure
384
+ Thread.abort_on_exception = @abort_on_exception
385
+ end
375
386
  end
376
- end
377
387
 
378
- context "when reloading a good config" do
379
- let(:new_config_generator_counter) { 500 }
380
- let(:new_config) { "input { generator { count => #{new_config_generator_counter} } } output { file { path => '#{new_config_output}'} }" }
388
+ context "when reloading a good config" do
389
+ let(:new_config_generator_counter) { 500 }
390
+ let(:new_config) { "input { generator { count => #{new_config_generator_counter} } } output { file { path => '#{new_config_output}'} }" }
381
391
 
382
- before :each do
383
- subject.converge_state_and_update
392
+ before :each do
393
+ subject.converge_state_and_update
394
+
395
+ # wait for file existence otherwise it will raise exception on Windows
396
+ wait(timeout)
397
+ .for { ::File.exists?(new_config_output) && !::File.read(new_config_output).chomp.empty? }
398
+ .to eq(true)
399
+ # ensure the converge_state_and_update method has updated metrics by
400
+ # invoking the mutex
401
+ subject.running_pipelines?
402
+ end
384
403
 
385
- # wait for file existence otherwise it will raise exception on Windows
386
- wait(timeout)
387
- .for { ::File.exists?(new_config_output) && !::File.read(new_config_output).chomp.empty? }
388
- .to eq(true)
389
- # ensure the converge_state_and_update method has updated metrics by
390
- # invoking the mutex
391
- subject.running_pipelines?
392
- end
404
+ it "resets the pipeline metric collector" do
405
+ snapshot = subject.metric.collector.snapshot_metric
406
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:events][:in].value
407
+ expect(value).to be <= new_config_generator_counter
408
+ end
393
409
 
394
- it "resets the pipeline metric collector" do
395
- snapshot = subject.metric.collector.snapshot_metric
396
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:events][:in].value
397
- expect(value).to be <= new_config_generator_counter
398
- end
410
+ it "does not reset the global event count" do
411
+ snapshot = subject.metric.collector.snapshot_metric
412
+ value = snapshot.metric_store.get_with_path("/stats/events")[:stats][:events][:in].value
413
+ expect(value).to be > initial_generator_threshold
414
+ end
399
415
 
400
- it "does not reset the global event count" do
401
- snapshot = subject.metric.collector.snapshot_metric
402
- value = snapshot.metric_store.get_with_path("/stats/events")[:stats][:events][:in].value
403
- expect(value).to be > initial_generator_threshold
404
- end
416
+ it "increases the successful reload count" do
417
+ skip("This test fails randomly, tracked in https://github.com/elastic/logstash/issues/8005")
418
+ snapshot = subject.metric.collector.snapshot_metric
419
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:successes].value
420
+ expect(value).to eq(1)
421
+ instance_value = snapshot.metric_store.get_with_path("/stats")[:stats][:reloads][:successes].value
422
+ expect(instance_value).to eq(1)
423
+ end
405
424
 
406
- it "increases the successful reload count" do
407
- snapshot = subject.metric.collector.snapshot_metric
408
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:successes].value
409
- expect(value).to eq(1)
410
- instance_value = snapshot.metric_store.get_with_path("/stats")[:stats][:reloads][:successes].value
411
- expect(instance_value).to eq(1)
412
- end
425
+ it "does not set the failure reload timestamp" do
426
+ snapshot = subject.metric.collector.snapshot_metric
427
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_failure_timestamp].value
428
+ expect(value).to be(nil)
429
+ end
413
430
 
414
- it "does not set the failure reload timestamp" do
415
- snapshot = subject.metric.collector.snapshot_metric
416
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_failure_timestamp].value
417
- expect(value).to be(nil)
418
- end
431
+ it "sets the success reload timestamp" do
432
+ snapshot = subject.metric.collector.snapshot_metric
433
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_success_timestamp].value
434
+ expect(value).to be_a(Timestamp)
435
+ end
419
436
 
420
- it "sets the success reload timestamp" do
421
- snapshot = subject.metric.collector.snapshot_metric
422
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_success_timestamp].value
423
- expect(value).to be_a(Timestamp)
437
+ it "does not set the last reload error" do
438
+ snapshot = subject.metric.collector.snapshot_metric
439
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_error].value
440
+ expect(value).to be(nil)
441
+ end
424
442
  end
425
443
 
426
- it "does not set the last reload error" do
427
- snapshot = subject.metric.collector.snapshot_metric
428
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_error].value
429
- expect(value).to be(nil)
430
- end
431
- end
444
+ context "when reloading a bad config" do
445
+ let(:new_config) { "input { generator { count => " }
446
+ before(:each) { subject.converge_state_and_update }
432
447
 
433
- context "when reloading a bad config" do
434
- let(:new_config) { "input { generator { count => " }
435
- before(:each) { subject.converge_state_and_update }
448
+ it "does not increase the successful reload count" do
449
+ snapshot = subject.metric.collector.snapshot_metric
450
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:successes].value
451
+ expect(value).to eq(0)
452
+ end
436
453
 
437
- it "does not increase the successful reload count" do
438
- snapshot = subject.metric.collector.snapshot_metric
439
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:successes].value
440
- expect(value).to eq(0)
441
- end
454
+ it "does not set the successful reload timestamp" do
455
+ snapshot = subject.metric.collector.snapshot_metric
456
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_success_timestamp].value
457
+ expect(value).to be(nil)
458
+ end
442
459
 
443
- it "does not set the successful reload timestamp" do
444
- snapshot = subject.metric.collector.snapshot_metric
445
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_success_timestamp].value
446
- expect(value).to be(nil)
447
- end
460
+ it "sets the failure reload timestamp" do
461
+ snapshot = subject.metric.collector.snapshot_metric
462
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_failure_timestamp].value
463
+ expect(value).to be_a(Timestamp)
464
+ end
448
465
 
449
- it "sets the failure reload timestamp" do
450
- snapshot = subject.metric.collector.snapshot_metric
451
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_failure_timestamp].value
452
- expect(value).to be_a(Timestamp)
453
- end
466
+ it "sets the last reload error" do
467
+ snapshot = subject.metric.collector.snapshot_metric
468
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_error].value
469
+ expect(value).to be_a(Hash)
470
+ expect(value).to include(:message, :backtrace)
471
+ end
454
472
 
455
- it "sets the last reload error" do
456
- snapshot = subject.metric.collector.snapshot_metric
457
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:last_error].value
458
- expect(value).to be_a(Hash)
459
- expect(value).to include(:message, :backtrace)
473
+ it "increases the failed reload count" do
474
+ snapshot = subject.metric.collector.snapshot_metric
475
+ value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:failures].value
476
+ expect(value).to be > 0
477
+ end
460
478
  end
461
479
 
462
- it "increases the failed reload count" do
463
- snapshot = subject.metric.collector.snapshot_metric
464
- value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:failures].value
465
- expect(value).to be > 0
466
- end
467
- end
480
+ context "when reloading a config that raises exception on pipeline.run" do
481
+ let(:new_config) { "input { generator { count => 10000 } } output { null {} }" }
482
+ let(:agent_args) do
483
+ {
484
+ "config.reload.automatic" => false,
485
+ "pipeline.batch.size" => 1,
486
+ "metric.collect" => true,
487
+ "path.config" => config_file
488
+ }
489
+ end
468
490
 
469
- context "when reloading a config that raises exception on pipeline.run" do
470
- let(:new_config) { "input { generator { count => 10000 } } output { null {} }" }
471
- let(:agent_args) do
472
- {
473
- "config.reload.automatic" => false,
474
- "pipeline.batch.size" => 1,
475
- "metric.collect" => true,
476
- "path.config" => config_file
477
- }
478
- end
491
+ class BrokenGenerator < LogStash::Inputs::Generator
492
+ def register
493
+ raise ArgumentError
494
+ end
495
+ end
479
496
 
480
- class BrokenGenerator < LogStash::Inputs::Generator
481
- def register
482
- raise ArgumentError
497
+ before :each do
498
+ allow(LogStash::Plugin).to receive(:lookup).with("input", "generator").and_return(BrokenGenerator)
483
499
  end
484
- end
485
500
 
486
- before :each do
487
- allow(LogStash::Plugin).to receive(:lookup).with("input", "generator").and_return(BrokenGenerator)
488
- end
501
+ it "does not increase the successful reload count" do
502
+ expect { subject.converge_state_and_update }.to_not change {
503
+ snapshot = subject.metric.collector.snapshot_metric
504
+ reload_metrics = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads]
505
+ reload_metrics[:successes].value
506
+ }
507
+ end
489
508
 
490
- it "does not increase the successful reload count" do
491
- expect { subject.converge_state_and_update }.to_not change {
492
- snapshot = subject.metric.collector.snapshot_metric
493
- reload_metrics = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads]
494
- reload_metrics[:successes].value
495
- }
509
+ it "increases the failures reload count" do
510
+ expect { subject.converge_state_and_update }.to change {
511
+ snapshot = subject.metric.collector.snapshot_metric
512
+ reload_metrics = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads]
513
+ reload_metrics[:failures].value
514
+ }.by(1)
515
+ end
496
516
  end
517
+ end
518
+ end
497
519
 
498
- it "increases the failures reload count" do
499
- expect { subject.converge_state_and_update }.to change {
500
- snapshot = subject.metric.collector.snapshot_metric
501
- reload_metrics = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads]
502
- reload_metrics[:failures].value
503
- }.by(1)
504
- end
520
+ # running all agent tests both using memory and persisted queue is important to make sure we
521
+ # don't introduce regressions in the queue/pipeline initialization sequence which typically surface
522
+ # in agent tests and in particular around config reloading
523
+
524
+ describe "using memory queue" do
525
+ it_behaves_like "all Agent tests" do
526
+ let(:agent_settings) { mock_settings("queue.type" => "memory") }
527
+ end
528
+ end
529
+
530
+ describe "using persisted queue" do
531
+ it_behaves_like "all Agent tests" do
532
+ let(:agent_settings) { mock_settings("queue.type" => "persisted", "queue.drain" => true) }
505
533
  end
506
534
  end
507
535
  end
@@ -33,21 +33,25 @@ describe LogStash::PipelineAction::Create do
33
33
  let(:pipeline_config) { mock_pipeline_config(:main, "input { generator { count => 1 } } output { null {} }") }
34
34
 
35
35
  it "returns a successful execution status" do
36
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
36
37
  expect(subject.execute(agent, pipelines)).to be_truthy
37
38
  end
38
39
  end
39
40
 
40
41
  context "when the pipeline successfully start" do
41
42
  it "adds the pipeline to the current pipelines" do
43
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
42
44
  expect { subject.execute(agent, pipelines) }.to change(pipelines, :size).by(1)
43
45
  end
44
46
 
45
47
  it "starts the pipeline" do
48
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
46
49
  subject.execute(agent, pipelines)
47
50
  expect(pipelines[:main].running?).to be_truthy
48
51
  end
49
52
 
50
53
  it "returns a successful execution status" do
54
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
51
55
  expect(subject.execute(agent, pipelines)).to be_truthy
52
56
  end
53
57
  end
@@ -65,6 +69,7 @@ describe LogStash::PipelineAction::Create do
65
69
  let(:pipeline_config) { mock_pipeline_config(:main, "input { generator { id => '123' } } filter { ruby { init => '1/0' code => '1+2' } } output { null {} }") }
66
70
 
67
71
  it "returns false" do
72
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
68
73
  expect(subject.execute(agent, pipelines)).not_to be_a_successful_action
69
74
  end
70
75
  end
@@ -33,15 +33,18 @@ describe LogStash::PipelineAction::Reload do
33
33
 
34
34
  context "when existing pipeline and new pipeline are both reloadable" do
35
35
  it "stop the previous pipeline" do
36
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
36
37
  expect { subject.execute(agent, pipelines) }.to change(pipeline, :running?).from(true).to(false)
37
38
  end
38
39
 
39
40
  it "start the new pipeline" do
41
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
40
42
  subject.execute(agent, pipelines)
41
43
  expect(pipelines[pipeline_id].running?).to be_truthy
42
44
  end
43
45
 
44
46
  it "run the new pipeline code" do
47
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
45
48
  subject.execute(agent, pipelines)
46
49
  expect(pipelines[pipeline_id].config_hash).to eq(new_pipeline_config.config_hash)
47
50
  end
@@ -61,6 +64,7 @@ describe LogStash::PipelineAction::Reload do
61
64
  let(:new_pipeline_config) { mock_pipeline_config(pipeline_id, "input { generator { id => 'new' } } output { null {} }", { "pipeline.reloadable" => false}) }
62
65
 
63
66
  it "cannot successfully execute the action" do
67
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
64
68
  expect(subject.execute(agent, pipelines)).not_to be_a_successful_action
65
69
  end
66
70
  end
@@ -69,6 +73,7 @@ describe LogStash::PipelineAction::Reload do
69
73
  let(:new_pipeline_config) { mock_pipeline_config(pipeline_id, "input generator { id => 'new' } } output { null {} }", { "pipeline.reloadable" => false}) }
70
74
 
71
75
  it "cannot successfully execute the action" do
76
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
72
77
  expect(subject.execute(agent, pipelines)).not_to be_a_successful_action
73
78
  end
74
79
  end
@@ -79,6 +84,7 @@ describe LogStash::PipelineAction::Reload do
79
84
  end
80
85
 
81
86
  it "cannot successfully execute the action" do
87
+ allow(agent).to receive(:exclusive) { |&arg| arg.call }
82
88
  expect(subject.execute(agent, pipelines)).not_to be_a_successful_action
83
89
  end
84
90
  end
@@ -1,6 +1,6 @@
1
1
  ---
2
- logstash: 6.4.3
3
- logstash-core: 6.4.3
2
+ logstash: 6.5.0
3
+ logstash-core: 6.5.0
4
4
  logstash-core-plugin-api: 2.1.16
5
5
 
6
6
  # jruby must reference a *released* version of jruby which can be downloaded from the official download url
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-core
3
3
  version: !ruby/object:Gem::Version
4
- version: 6.4.3
4
+ version: 6.5.0
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-10-31 00:00:00.000000000 Z
11
+ date: 2018-11-09 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement