logstash-core 6.0.0.alpha1-java → 6.0.0.alpha2-java
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/logstash-core/logstash-core.jar +0 -0
- data/lib/logstash-core/version.rb +1 -1
- data/lib/logstash/agent.rb +81 -45
- data/lib/logstash/api/commands/hot_threads_reporter.rb +3 -3
- data/lib/logstash/api/commands/node.rb +13 -6
- data/lib/logstash/api/commands/stats.rb +18 -6
- data/lib/logstash/api/modules/node.rb +7 -0
- data/lib/logstash/api/modules/node_stats.rb +12 -5
- data/lib/logstash/bootstrap_check/default_config.rb +3 -7
- data/lib/logstash/compiler.rb +33 -15
- data/lib/logstash/compiler/lscl.rb +16 -8
- data/lib/logstash/config/mixin.rb +5 -42
- data/lib/logstash/config/pipeline_config.rb +1 -1
- data/lib/logstash/config/source/local.rb +28 -13
- data/lib/logstash/config/source/multi_local.rb +72 -0
- data/lib/logstash/config/source_loader.rb +1 -2
- data/lib/logstash/environment.rb +12 -3
- data/lib/logstash/execution_context.rb +7 -3
- data/lib/logstash/inputs/base.rb +2 -0
- data/lib/logstash/instrument/metric_type.rb +0 -2
- data/lib/logstash/instrument/periodic_poller/jvm.rb +5 -5
- data/lib/logstash/instrument/periodic_poller/pq.rb +1 -1
- data/lib/logstash/outputs/base.rb +2 -0
- data/lib/logstash/pipeline.rb +31 -14
- data/lib/logstash/pipeline_action/create.rb +1 -2
- data/lib/logstash/pipeline_action/reload.rb +2 -1
- data/lib/logstash/pipeline_settings.rb +50 -0
- data/lib/logstash/plugin.rb +1 -0
- data/lib/logstash/runner.rb +7 -5
- data/lib/logstash/settings.rb +11 -3
- data/lib/logstash/shutdown_watcher.rb +26 -0
- data/lib/logstash/state_resolver.rb +1 -3
- data/lib/logstash/util/dead_letter_queue_manager.rb +61 -0
- data/lib/logstash/util/environment_variables.rb +43 -0
- data/lib/logstash/util/thread_dump.rb +3 -1
- data/lib/logstash/version.rb +1 -1
- data/locales/en.yml +4 -0
- data/logstash-core.gemspec +4 -1
- data/spec/logstash/agent/converge_spec.rb +36 -35
- data/spec/logstash/agent_spec.rb +48 -177
- data/spec/{api/lib/commands/stats.rb → logstash/api/commands/stats_spec.rb} +7 -2
- data/spec/{api/lib → logstash/api}/errors_spec.rb +1 -1
- data/spec/{api/lib/api → logstash/api/modules}/logging_spec.rb +1 -10
- data/spec/{api/lib/api → logstash/api/modules}/node_plugins_spec.rb +1 -2
- data/spec/{api/lib/api → logstash/api/modules}/node_spec.rb +9 -8
- data/spec/{api/lib/api → logstash/api/modules}/node_stats_spec.rb +11 -9
- data/spec/{api/lib/api → logstash/api/modules}/plugins_spec.rb +4 -3
- data/spec/{api/lib/api → logstash/api/modules}/root_spec.rb +2 -2
- data/spec/{api/lib → logstash/api}/rack_app_spec.rb +0 -0
- data/spec/logstash/compiler/compiler_spec.rb +72 -9
- data/spec/logstash/config/source/local_spec.rb +20 -4
- data/spec/logstash/config/source/multi_local_spec.rb +113 -0
- data/spec/logstash/execution_context_spec.rb +14 -4
- data/spec/logstash/inputs/base_spec.rb +1 -1
- data/spec/logstash/instrument/wrapped_write_client_spec.rb +34 -19
- data/spec/logstash/output_delegator_spec.rb +1 -1
- data/spec/logstash/outputs/base_spec.rb +1 -1
- data/spec/logstash/pipeline_action/reload_spec.rb +1 -1
- data/spec/logstash/pipeline_action/stop_spec.rb +1 -1
- data/spec/logstash/pipeline_dlq_commit_spec.rb +107 -0
- data/spec/logstash/pipeline_pq_file_spec.rb +3 -1
- data/spec/logstash/pipeline_reporter_spec.rb +2 -1
- data/spec/logstash/pipeline_spec.rb +54 -43
- data/spec/logstash/runner_spec.rb +27 -36
- data/spec/logstash/settings/array_coercible_spec.rb +65 -0
- data/spec/logstash/settings_spec.rb +91 -0
- data/spec/logstash/shutdown_watcher_spec.rb +10 -16
- data/spec/logstash/state_resolver_spec.rb +6 -4
- data/spec/support/helpers.rb +16 -3
- data/spec/support/shared_contexts.rb +26 -2
- metadata +42 -39
- data/lib/logstash/instrument/metric_type/mean.rb +0 -33
- data/spec/api/lib/api/support/resource_dsl_methods.rb +0 -87
- data/spec/api/spec_helper.rb +0 -106
@@ -0,0 +1,43 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
module ::LogStash::Util::EnvironmentVariables
|
3
|
+
|
4
|
+
ENV_PLACEHOLDER_REGEX = /\${(?<name>[a-zA-Z_.][a-zA-Z0-9_.]*)(:(?<default>[^}]*))?}/
|
5
|
+
|
6
|
+
# Recursive method to replace environment variable references in parameters
|
7
|
+
def deep_replace(value)
|
8
|
+
if value.is_a?(Hash)
|
9
|
+
value.each do |valueHashKey, valueHashValue|
|
10
|
+
value[valueHashKey.to_s] = deep_replace(valueHashValue)
|
11
|
+
end
|
12
|
+
else
|
13
|
+
if value.is_a?(Array)
|
14
|
+
value.each_index do | valueArrayIndex|
|
15
|
+
value[valueArrayIndex] = deep_replace(value[valueArrayIndex])
|
16
|
+
end
|
17
|
+
else
|
18
|
+
return replace_env_placeholders(value)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
# Replace all environment variable references in 'value' param by environment variable value and return updated value
|
24
|
+
# Process following patterns : $VAR, ${VAR}, ${VAR:defaultValue}
|
25
|
+
def replace_env_placeholders(value)
|
26
|
+
return value unless value.is_a?(String)
|
27
|
+
|
28
|
+
value.gsub(ENV_PLACEHOLDER_REGEX) do |placeholder|
|
29
|
+
# Note: Ruby docs claim[1] Regexp.last_match is thread-local and scoped to
|
30
|
+
# the call, so this should be thread-safe.
|
31
|
+
#
|
32
|
+
# [1] http://ruby-doc.org/core-2.1.1/Regexp.html#method-c-last_match
|
33
|
+
name = Regexp.last_match(:name)
|
34
|
+
default = Regexp.last_match(:default)
|
35
|
+
|
36
|
+
replacement = ENV.fetch(name, default)
|
37
|
+
if replacement.nil?
|
38
|
+
raise LogStash::ConfigurationError, "Cannot evaluate `#{placeholder}`. Environment variable `#{name}` is not set and there is no default value given."
|
39
|
+
end
|
40
|
+
replacement
|
41
|
+
end
|
42
|
+
end # def replace_env_placeholders
|
43
|
+
end
|
@@ -1,4 +1,6 @@
|
|
1
1
|
# encoding: utf-8
|
2
|
+
java_import 'org.logstash.instrument.reports.ThreadsReport'
|
3
|
+
|
2
4
|
module LogStash
|
3
5
|
module Util
|
4
6
|
class ThreadDump
|
@@ -10,7 +12,7 @@ module LogStash
|
|
10
12
|
|
11
13
|
def initialize(options={})
|
12
14
|
@options = options
|
13
|
-
@dump = options.fetch(:dump,
|
15
|
+
@dump = options.fetch(:dump, ThreadsReport.generate({}))
|
14
16
|
@top_count = options.fetch(:threads, THREADS_COUNT_DEFAULT)
|
15
17
|
@ignore = options.fetch(:ignore_idle_threads, IGNORE_IDLE_THREADS_DEFAULT)
|
16
18
|
end
|
data/lib/logstash/version.rb
CHANGED
data/locales/en.yml
CHANGED
@@ -98,8 +98,12 @@ en:
|
|
98
98
|
missing-configuration: >-
|
99
99
|
No configuration file was specified. Perhaps you forgot to provide
|
100
100
|
the '-f yourlogstash.conf' flag?
|
101
|
+
config-string-path-exclusive:
|
102
|
+
Settings 'path.config' (-f) and 'config.string' (-e) can't be used simultaneously.
|
101
103
|
reload-without-config-path: >-
|
102
104
|
Configuration reloading also requires passing a configuration path with '-f yourlogstash.conf'
|
105
|
+
reload-with-config-string: >-
|
106
|
+
Configuration reloading can't be used with 'config.string' (-e).
|
103
107
|
locked-data-path: >-
|
104
108
|
Logstash could not be started because there is already another instance using the configured data directory. If you wish to run multiple instances, you must change the "path.data" setting.
|
105
109
|
invalid-shell: >-
|
data/logstash-core.gemspec
CHANGED
@@ -25,11 +25,14 @@ Gem::Specification.new do |gem|
|
|
25
25
|
gem.add_runtime_dependency "filesize", "0.0.4" #(MIT license) for :bytes config validator
|
26
26
|
gem.add_runtime_dependency "gems", "~> 0.8.3" #(MIT license)
|
27
27
|
gem.add_runtime_dependency "concurrent-ruby", "1.0.0"
|
28
|
+
|
29
|
+
# Later versions are ruby 2.0 only. We should remove the rack dep once we support 9k
|
30
|
+
gem.add_runtime_dependency "rack", '1.6.6'
|
31
|
+
|
28
32
|
gem.add_runtime_dependency "sinatra", '~> 1.4', '>= 1.4.6'
|
29
33
|
gem.add_runtime_dependency 'puma', '~> 2.16'
|
30
34
|
gem.add_runtime_dependency "jruby-openssl", "0.9.16" # >= 0.9.13 Required to support TLSv1.2
|
31
35
|
gem.add_runtime_dependency "chronic_duration", "0.10.6"
|
32
|
-
gem.add_runtime_dependency "jrmonitor", '~> 0.4.2'
|
33
36
|
|
34
37
|
# TODO(sissel): Treetop 1.5.x doesn't seem to work well, but I haven't
|
35
38
|
# investigated what the cause might be. -Jordan
|
@@ -26,18 +26,6 @@ describe LogStash::Agent do
|
|
26
26
|
expect(converge_result).to be_a_successful_converge
|
27
27
|
end
|
28
28
|
|
29
|
-
|
30
|
-
describe "passing the agent to the pipeline" do
|
31
|
-
let(:source_loader) { TestSourceLoader.new(pipeline_config) }
|
32
|
-
let(:pipeline_config) { mock_pipeline_config(:main, "input { generator { count => 10 } } output { null {} }") }
|
33
|
-
|
34
|
-
before { subject.execute }
|
35
|
-
|
36
|
-
it "execute the pipeline and stop execution" do
|
37
|
-
expect(subject.get_pipeline(:main).execution_context.agent).to eq(subject)
|
38
|
-
end
|
39
|
-
end
|
40
|
-
|
41
29
|
context "Agent execute options" do
|
42
30
|
let(:source_loader) do
|
43
31
|
TestSourceLoader.new(finite_pipeline_config)
|
@@ -60,33 +48,46 @@ describe LogStash::Agent do
|
|
60
48
|
end
|
61
49
|
|
62
50
|
context "system pipeline" do
|
63
|
-
|
51
|
+
|
64
52
|
let(:system_pipeline_config) { mock_pipeline_config(:system_pipeline, "input { generator { } } output { null {} }", { "pipeline.system" => true }) }
|
65
53
|
|
66
|
-
let(:source_loader) do
|
67
|
-
TestSourceLoader.new(finite_pipeline_config, system_pipeline_config)
|
68
|
-
end
|
69
|
-
|
70
54
|
context "when we have a finite pipeline and a system pipeline running" do
|
55
|
+
|
56
|
+
let(:finite_pipeline_config) { mock_pipeline_config(:main, "input { generator { count => 1000 } } output { null {} }") }
|
57
|
+
|
58
|
+
let(:source_loader) do
|
59
|
+
TestSourceLoader.new(finite_pipeline_config, system_pipeline_config)
|
60
|
+
end
|
61
|
+
|
71
62
|
it "execute the pipeline and stop execution" do
|
72
63
|
expect(subject.execute).to eq(0)
|
73
64
|
end
|
74
65
|
end
|
75
66
|
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
subject.shutdown
|
67
|
+
context "when we have an infinite pipeline and a system pipeline running" do
|
68
|
+
let(:infinite_pipeline_config) { mock_pipeline_config(:main, "input { generator { } } output { null {} }") }
|
69
|
+
|
70
|
+
let(:source_loader) do
|
71
|
+
TestSourceLoader.new(infinite_pipeline_config, system_pipeline_config)
|
82
72
|
end
|
83
|
-
end
|
84
73
|
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
74
|
+
describe "#running_user_defined_pipelines" do
|
75
|
+
it "returns the user defined pipelines" do
|
76
|
+
start_agent(subject)
|
77
|
+
subject.with_running_user_defined_pipelines do |pipelines|
|
78
|
+
expect(pipelines).to include(:main)
|
79
|
+
expect(pipelines).not_to include(:system_pipeline)
|
80
|
+
end
|
81
|
+
subject.shutdown
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
describe "#running_user_defined_pipelines?" do
|
86
|
+
it "returns true" do
|
87
|
+
start_agent(subject)
|
88
|
+
expect(subject.running_user_defined_pipelines?).to be_truthy
|
89
|
+
subject.shutdown
|
90
|
+
end
|
90
91
|
end
|
91
92
|
end
|
92
93
|
end
|
@@ -186,7 +187,7 @@ describe LogStash::Agent do
|
|
186
187
|
|
187
188
|
it "stops the running pipelines" do
|
188
189
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
189
|
-
expect { subject.shutdown }.to change { subject.
|
190
|
+
expect { subject.shutdown }.to change { subject.running_pipelines_count }.from(2).to(0)
|
190
191
|
end
|
191
192
|
end
|
192
193
|
|
@@ -207,7 +208,7 @@ describe LogStash::Agent do
|
|
207
208
|
it "creates and starts the new pipeline" do
|
208
209
|
expect {
|
209
210
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
210
|
-
}.to change { subject.
|
211
|
+
}.to change { subject.running_pipelines_count }.from(0).to(1)
|
211
212
|
expect(subject).to have_running_pipeline?(pipeline_config)
|
212
213
|
end
|
213
214
|
end
|
@@ -224,7 +225,7 @@ describe LogStash::Agent do
|
|
224
225
|
it "start a new pipeline and keep the original" do
|
225
226
|
expect {
|
226
227
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
227
|
-
}.to change { subject.
|
228
|
+
}.to change { subject.running_pipelines_count }.from(1).to(2)
|
228
229
|
expect(subject).to have_running_pipeline?(pipeline_config)
|
229
230
|
expect(subject).to have_running_pipeline?(new_pipeline_config)
|
230
231
|
end
|
@@ -241,7 +242,7 @@ describe LogStash::Agent do
|
|
241
242
|
it "stops the missing pipeline and start the new one" do
|
242
243
|
expect {
|
243
244
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
244
|
-
}.not_to change { subject.
|
245
|
+
}.not_to change { subject.running_pipelines_count }
|
245
246
|
expect(subject).not_to have_pipeline?(pipeline_config)
|
246
247
|
expect(subject).to have_running_pipeline?(new_pipeline_config)
|
247
248
|
end
|
@@ -261,7 +262,7 @@ describe LogStash::Agent do
|
|
261
262
|
it "reloads the modified pipeline" do
|
262
263
|
expect {
|
263
264
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
264
|
-
}.not_to change { subject.
|
265
|
+
}.not_to change { subject.running_pipelines_count }
|
265
266
|
expect(subject).to have_running_pipeline?(modified_pipeline_config)
|
266
267
|
expect(subject).not_to have_pipeline?(pipeline_config)
|
267
268
|
end
|
@@ -278,7 +279,7 @@ describe LogStash::Agent do
|
|
278
279
|
it "stops all the pipelines" do
|
279
280
|
expect {
|
280
281
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
281
|
-
}.to change { subject.
|
282
|
+
}.to change { subject.running_pipelines_count }.from(2).to(0)
|
282
283
|
expect(subject).not_to have_pipeline?(pipeline_config)
|
283
284
|
end
|
284
285
|
end
|
data/spec/logstash/agent_spec.rb
CHANGED
@@ -10,18 +10,20 @@ require_relative "../support/helpers"
|
|
10
10
|
require_relative "../support/matchers"
|
11
11
|
|
12
12
|
describe LogStash::Agent do
|
13
|
-
let(:agent_settings) { mock_settings(
|
14
|
-
let(:default_pipeline_id) { LogStash::SETTINGS.get("pipeline.id") }
|
13
|
+
let(:agent_settings) { mock_settings({}) }
|
15
14
|
let(:agent_args) { {} }
|
16
15
|
let(:pipeline_settings) { agent_settings.clone }
|
17
16
|
let(:pipeline_args) { {} }
|
17
|
+
let(:default_pipeline_id) { agent_settings.get("pipeline.id") }
|
18
|
+
let(:config_string) { "input { } filter { } output { }" }
|
18
19
|
let(:config_file) { Stud::Temporary.pathname }
|
19
|
-
let(:config_file_txt) {
|
20
|
+
let(:config_file_txt) { config_string }
|
20
21
|
let(:default_source_loader) do
|
21
22
|
sl = LogStash::Config::SourceLoader.new
|
22
23
|
sl.add_source(LogStash::Config::Source::Local.new(agent_settings))
|
23
24
|
sl
|
24
25
|
end
|
26
|
+
let(:logger) { double("logger") }
|
25
27
|
|
26
28
|
subject { LogStash::Agent.new(agent_settings, default_source_loader) }
|
27
29
|
|
@@ -38,31 +40,24 @@ describe LogStash::Agent do
|
|
38
40
|
pipeline_args.each do |key, value|
|
39
41
|
pipeline_settings.set(key, value)
|
40
42
|
end
|
43
|
+
allow(described_class).to receive(:logger).and_return(logger)
|
44
|
+
[:debug, :info, :error, :fatal, :trace].each {|level| allow(logger).to receive(level) }
|
45
|
+
[:debug?, :info?, :error?, :fatal?, :trace?].each {|level| allow(logger).to receive(level) }
|
41
46
|
end
|
42
47
|
|
43
48
|
after :each do
|
49
|
+
subject.shutdown
|
44
50
|
LogStash::SETTINGS.reset
|
45
51
|
File.unlink(config_file)
|
52
|
+
File.unlink(subject.id_path)
|
46
53
|
end
|
47
54
|
|
48
55
|
it "fallback to hostname when no name is provided" do
|
49
56
|
expect(LogStash::Agent.new(agent_settings, default_source_loader).name).to eq(Socket.gethostname)
|
50
57
|
end
|
51
58
|
|
52
|
-
after(:each) do
|
53
|
-
subject.shutdown # shutdown/close the pipelines
|
54
|
-
end
|
55
|
-
|
56
59
|
describe "adding a new pipeline" do
|
57
|
-
let(:
|
58
|
-
let(:agent_args) do
|
59
|
-
{
|
60
|
-
"config.string" => config_string,
|
61
|
-
"config.reload.automatic" => true,
|
62
|
-
"config.reload.interval" => 0.01,
|
63
|
-
"pipeline.workers" => 4,
|
64
|
-
}
|
65
|
-
end
|
60
|
+
let(:agent_args) { { "config.string" => config_string } }
|
66
61
|
|
67
62
|
it "should delegate settings to new pipeline" do
|
68
63
|
expect(LogStash::Pipeline).to receive(:new) do |arg1, arg2|
|
@@ -74,7 +69,6 @@ describe LogStash::Agent do
|
|
74
69
|
end
|
75
70
|
|
76
71
|
describe "#id" do
|
77
|
-
let(:config_file_txt) { "" }
|
78
72
|
let(:id_file_data) { File.open(subject.id_path) {|f| f.read } }
|
79
73
|
|
80
74
|
it "should return a UUID" do
|
@@ -88,8 +82,8 @@ describe LogStash::Agent do
|
|
88
82
|
end
|
89
83
|
|
90
84
|
describe "#execute" do
|
91
|
-
let(:
|
92
|
-
let(:mock_config_pipeline) { mock_pipeline_config(:main,
|
85
|
+
let(:config_string) { "input { generator { id => 'old'} } output { }" }
|
86
|
+
let(:mock_config_pipeline) { mock_pipeline_config(:main, config_string, pipeline_settings) }
|
93
87
|
|
94
88
|
let(:source_loader) { TestSourceLoader.new(mock_config_pipeline) }
|
95
89
|
subject { described_class.new(agent_settings, source_loader) }
|
@@ -100,13 +94,8 @@ describe LogStash::Agent do
|
|
100
94
|
end
|
101
95
|
|
102
96
|
context "when auto_reload is false" do
|
103
|
-
let(:
|
104
|
-
|
105
|
-
"config.reload.automatic" => false,
|
106
|
-
"path.config" => config_file
|
107
|
-
}
|
108
|
-
end
|
109
|
-
|
97
|
+
let(:agent_settings) { mock_settings("config.reload.automatic" => false) }
|
98
|
+
let(:agent_args) { { "path.config" => config_file } }
|
110
99
|
|
111
100
|
context "if state is clean" do
|
112
101
|
before :each do
|
@@ -118,9 +107,6 @@ describe LogStash::Agent do
|
|
118
107
|
expect(subject).to receive(:converge_state_and_update).once
|
119
108
|
t = Thread.new { subject.execute }
|
120
109
|
|
121
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
122
|
-
# a bad test design or missing class functionality.
|
123
|
-
sleep(0.1)
|
124
110
|
Stud.stop!(t)
|
125
111
|
t.join
|
126
112
|
subject.shutdown
|
@@ -136,14 +122,11 @@ describe LogStash::Agent do
|
|
136
122
|
|
137
123
|
it "does not upgrade the new config" do
|
138
124
|
t = Thread.new { subject.execute }
|
139
|
-
sleep(0.
|
125
|
+
sleep(0.01) until subject.with_pipelines {|pipelines| subject.running_pipelines? && pipelines.values.first.ready? }
|
140
126
|
|
141
127
|
expect(subject.converge_state_and_update).not_to be_a_successful_converge
|
142
128
|
expect(subject).to have_running_pipeline?(mock_config_pipeline)
|
143
129
|
|
144
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
145
|
-
# a bad test design or missing class functionality.
|
146
|
-
sleep(0.1)
|
147
130
|
Stud.stop!(t)
|
148
131
|
t.join
|
149
132
|
subject.shutdown
|
@@ -158,14 +141,11 @@ describe LogStash::Agent do
|
|
158
141
|
|
159
142
|
it "does upgrade the new config" do
|
160
143
|
t = Thread.new { subject.execute }
|
161
|
-
sleep(0.
|
144
|
+
sleep(0.01) until subject.with_pipelines {|pipelines| subject.pipelines_count > 0 && pipelines.values.first.ready? }
|
162
145
|
|
163
146
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
164
147
|
expect(subject).to have_running_pipeline?(mock_second_pipeline_config)
|
165
148
|
|
166
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
167
|
-
# a bad test design or missing class functionality.
|
168
|
-
sleep(0.1)
|
169
149
|
Stud.stop!(t)
|
170
150
|
t.join
|
171
151
|
subject.shutdown
|
@@ -183,14 +163,11 @@ describe LogStash::Agent do
|
|
183
163
|
|
184
164
|
it "does not try to reload the pipeline" do
|
185
165
|
t = Thread.new { subject.execute }
|
186
|
-
sleep(0.01) until subject.running_pipelines? &&
|
166
|
+
sleep(0.01) until subject.with_pipelines {|pipelines| subject.running_pipelines? && pipelines.values.first.running? }
|
187
167
|
|
188
168
|
expect(subject.converge_state_and_update).not_to be_a_successful_converge
|
189
169
|
expect(subject).to have_running_pipeline?(mock_config_pipeline)
|
190
170
|
|
191
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
192
|
-
# a bad test design or missing class functionality.
|
193
|
-
sleep(0.1)
|
194
171
|
Stud.stop!(t)
|
195
172
|
t.join
|
196
173
|
subject.shutdown
|
@@ -205,95 +182,16 @@ describe LogStash::Agent do
|
|
205
182
|
|
206
183
|
it "tries to reload the pipeline" do
|
207
184
|
t = Thread.new { subject.execute }
|
208
|
-
sleep(0.01) until subject.running_pipelines? &&
|
185
|
+
sleep(0.01) until subject.with_pipelines {|pipelines| subject.running_pipelines? && pipelines.values.first.running? }
|
209
186
|
|
210
187
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
211
188
|
expect(subject).to have_running_pipeline?(mock_second_pipeline_config)
|
212
189
|
|
213
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
214
|
-
# a bad test design or missing class functionality.
|
215
|
-
sleep(0.1)
|
216
|
-
Stud.stop!(t)
|
217
|
-
t.join
|
218
|
-
subject.shutdown
|
219
|
-
end
|
220
|
-
end
|
221
|
-
end
|
222
|
-
end
|
223
|
-
|
224
|
-
context "when auto_reload is true" do
|
225
|
-
subject { described_class.new(agent_settings, default_source_loader) }
|
226
|
-
|
227
|
-
let(:agent_args) do
|
228
|
-
{
|
229
|
-
"config.string" => "",
|
230
|
-
"config.reload.automatic" => true,
|
231
|
-
"config.reload.interval" => 0.01,
|
232
|
-
"path.config" => config_file
|
233
|
-
}
|
234
|
-
end
|
235
|
-
|
236
|
-
context "if state is clean" do
|
237
|
-
it "should periodically reload_state" do
|
238
|
-
allow(subject).to receive(:clean_state?).and_return(false)
|
239
|
-
t = Thread.new { subject.execute }
|
240
|
-
sleep(0.05) until subject.running_pipelines? && subject.pipelines.values.first.running?
|
241
|
-
expect(subject).to receive(:converge_state_and_update).at_least(2).times
|
242
|
-
|
243
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
244
|
-
# a bad test design or missing class functionality.
|
245
|
-
sleep(0.1)
|
246
|
-
Stud.stop!(t)
|
247
|
-
t.join
|
248
|
-
subject.shutdown
|
249
|
-
end
|
250
|
-
end
|
251
|
-
|
252
|
-
context "when calling reload_state!" do
|
253
|
-
xcontext "with a config that contains reload incompatible plugins" do
|
254
|
-
let(:second_pipeline_config) { "input { stdin { id => '123' } } filter { } output { }" }
|
255
|
-
|
256
|
-
it "does not upgrade the new config" do
|
257
|
-
t = Thread.new { subject.execute }
|
258
|
-
sleep(0.05) until subject.running_pipelines? && subject.pipelines.values.first.running?
|
259
|
-
File.open(config_file, "w") { |f| f.puts second_pipeline_config }
|
260
|
-
sleep(0.2) # lets us catch the new file
|
261
|
-
|
262
|
-
try do
|
263
|
-
expect(subject.pipelines[default_pipeline_id.to_sym].config_str).not_to eq(second_pipeline_config)
|
264
|
-
end
|
265
|
-
|
266
190
|
Stud.stop!(t)
|
267
191
|
t.join
|
268
192
|
subject.shutdown
|
269
193
|
end
|
270
194
|
end
|
271
|
-
|
272
|
-
context "with a config that does not contain reload incompatible plugins" do
|
273
|
-
let(:second_pipeline_config) { "input { generator { id => 'new' } } filter { } output { }" }
|
274
|
-
|
275
|
-
it "does upgrade the new config" do
|
276
|
-
t = Thread.new { subject.execute }
|
277
|
-
|
278
|
-
sleep(0.05) until subject.running_pipelines? && subject.pipelines.values.first.running?
|
279
|
-
|
280
|
-
File.open(config_file, "w") { |f| f.puts second_pipeline_config }
|
281
|
-
sleep(5) # lets us catch the new file
|
282
|
-
|
283
|
-
try do
|
284
|
-
expect(subject.pipelines[default_pipeline_id.to_sym]).not_to be_nil
|
285
|
-
expect(subject.pipelines[default_pipeline_id.to_sym].config_str).to match(second_pipeline_config)
|
286
|
-
end
|
287
|
-
|
288
|
-
# TODO: refactor this. forcing an arbitrary fixed delay for thread concurrency issues is an indication of
|
289
|
-
# a bad test design or missing class functionality.
|
290
|
-
sleep(0.1)
|
291
|
-
Stud.stop!(t)
|
292
|
-
t.join
|
293
|
-
expect(subject.get_pipeline(:main).config_str).to match(second_pipeline_config)
|
294
|
-
subject.shutdown
|
295
|
-
end
|
296
|
-
end
|
297
195
|
end
|
298
196
|
end
|
299
197
|
end
|
@@ -333,7 +231,7 @@ describe LogStash::Agent do
|
|
333
231
|
|
334
232
|
# Since the pipeline is running in another threads
|
335
233
|
# the content of the file wont be instant.
|
336
|
-
sleep(0.
|
234
|
+
sleep(0.01) until ::File.size(temporary_file) > 0
|
337
235
|
json_document = LogStash::Json.load(File.read(temporary_file).chomp)
|
338
236
|
expect(json_document["message"]).to eq("foo-bar")
|
339
237
|
end
|
@@ -365,7 +263,7 @@ describe LogStash::Agent do
|
|
365
263
|
context "when the upgrade fails" do
|
366
264
|
it "leaves the state untouched" do
|
367
265
|
expect(subject.converge_state_and_update).not_to be_a_successful_converge
|
368
|
-
expect(subject.
|
266
|
+
expect(subject.get_pipeline(default_pipeline_id).config_str).to eq(pipeline_config)
|
369
267
|
end
|
370
268
|
|
371
269
|
# TODO(ph): This valid?
|
@@ -383,12 +281,12 @@ describe LogStash::Agent do
|
|
383
281
|
|
384
282
|
it "updates the state" do
|
385
283
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
386
|
-
expect(subject.
|
284
|
+
expect(subject.get_pipeline(default_pipeline_id).config_str).to eq(new_config)
|
387
285
|
end
|
388
286
|
|
389
287
|
it "starts the pipeline" do
|
390
288
|
expect(subject.converge_state_and_update).to be_a_successful_converge
|
391
|
-
expect(subject.
|
289
|
+
expect(subject.get_pipeline(default_pipeline_id).running?).to be_truthy
|
392
290
|
end
|
393
291
|
end
|
394
292
|
end
|
@@ -406,53 +304,39 @@ describe LogStash::Agent do
|
|
406
304
|
end
|
407
305
|
|
408
306
|
context "metrics after config reloading" do
|
307
|
+
|
308
|
+
let(:initial_generator_threshold) { 1000 }
|
409
309
|
let(:temporary_file) { Stud::Temporary.file.path }
|
410
|
-
let(:
|
411
|
-
|
412
|
-
let(:config_path) do
|
413
|
-
f = Stud::Temporary.file
|
414
|
-
f.write(config)
|
415
|
-
f.fsync
|
416
|
-
f.close
|
417
|
-
f.path
|
418
|
-
end
|
310
|
+
let(:config_file_txt) { "input { generator { count => #{initial_generator_threshold*2} } } output { file { path => '#{temporary_file}'} }" }
|
419
311
|
|
420
312
|
let(:agent_args) do
|
421
313
|
{
|
422
|
-
"config.reload.automatic" => true,
|
423
|
-
"config.reload.interval" => 0.01,
|
424
|
-
"pipeline.batch.size" => 1,
|
425
314
|
"metric.collect" => true,
|
426
|
-
"path.config" =>
|
315
|
+
"path.config" => config_file
|
427
316
|
}
|
428
317
|
end
|
429
318
|
|
430
|
-
let(:initial_generator_threshold) { 1000 }
|
431
|
-
let(:pipeline_thread) do
|
432
|
-
Thread.new do
|
433
|
-
subject.execute
|
434
|
-
end
|
435
|
-
end
|
436
|
-
|
437
319
|
subject { described_class.new(agent_settings, default_source_loader) }
|
438
320
|
|
439
|
-
before
|
321
|
+
before(:each) do
|
440
322
|
@abort_on_exception = Thread.abort_on_exception
|
441
323
|
Thread.abort_on_exception = true
|
442
324
|
|
443
|
-
@t = Thread.new
|
444
|
-
subject.execute
|
445
|
-
end
|
325
|
+
@t = Thread.new { subject.execute }
|
446
326
|
|
447
327
|
# wait for some events to reach the dummy_output
|
448
|
-
sleep(0.
|
328
|
+
sleep(0.01) until IO.readlines(temporary_file).size > initial_generator_threshold
|
329
|
+
|
330
|
+
# write new config
|
331
|
+
File.open(config_file, "w") { |f| f.write(new_config) }
|
449
332
|
end
|
450
333
|
|
451
334
|
after :each do
|
452
335
|
begin
|
453
336
|
subject.shutdown
|
454
|
-
Stud.stop!(
|
455
|
-
|
337
|
+
Stud.stop!(@t) rescue nil # it may be dead already
|
338
|
+
@t.join
|
339
|
+
File.unlink(temporary_file)
|
456
340
|
ensure
|
457
341
|
Thread.abort_on_exception = @abort_on_exception
|
458
342
|
end
|
@@ -460,19 +344,19 @@ describe LogStash::Agent do
|
|
460
344
|
|
461
345
|
context "when reloading a good config" do
|
462
346
|
let(:new_config_generator_counter) { 500 }
|
463
|
-
let(:
|
464
|
-
let(:new_config) { "input { generator { count => #{new_config_generator_counter} } } output { file { path => '#{
|
347
|
+
let(:new_file) { Stud::Temporary.file.path }
|
348
|
+
let(:new_config) { "input { generator { count => #{new_config_generator_counter} } } output { file { path => '#{new_file}'} }" }
|
465
349
|
|
466
350
|
before :each do
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
# wait until pipeline restarts
|
473
|
-
sleep(1) if ::File.read(output_file).empty?
|
351
|
+
subject.converge_state_and_update
|
352
|
+
sleep(0.01) while ::File.read(new_file).chomp.empty?
|
353
|
+
# ensure the converge_state_and_update method has updated metrics by
|
354
|
+
# invoking the mutex
|
355
|
+
subject.running_pipelines?
|
474
356
|
end
|
475
357
|
|
358
|
+
after(:each) { File.unlink(new_file) }
|
359
|
+
|
476
360
|
it "resets the pipeline metric collector" do
|
477
361
|
snapshot = subject.metric.collector.snapshot_metric
|
478
362
|
value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:events][:in].value
|
@@ -488,9 +372,9 @@ describe LogStash::Agent do
|
|
488
372
|
it "increases the successful reload count" do
|
489
373
|
snapshot = subject.metric.collector.snapshot_metric
|
490
374
|
value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:successes].value
|
375
|
+
expect(value).to eq(1)
|
491
376
|
instance_value = snapshot.metric_store.get_with_path("/stats")[:stats][:reloads][:successes].value
|
492
377
|
expect(instance_value).to eq(1)
|
493
|
-
expect(value).to eq(1)
|
494
378
|
end
|
495
379
|
|
496
380
|
it "does not set the failure reload timestamp" do
|
@@ -514,15 +398,7 @@ describe LogStash::Agent do
|
|
514
398
|
|
515
399
|
context "when reloading a bad config" do
|
516
400
|
let(:new_config) { "input { generator { count => " }
|
517
|
-
before
|
518
|
-
|
519
|
-
File.open(config_path, "w") do |f|
|
520
|
-
f.write(new_config)
|
521
|
-
f.fsync
|
522
|
-
end
|
523
|
-
|
524
|
-
sleep(1)
|
525
|
-
end
|
401
|
+
before(:each) { subject.converge_state_and_update }
|
526
402
|
|
527
403
|
it "does not increase the successful reload count" do
|
528
404
|
snapshot = subject.metric.collector.snapshot_metric
|
@@ -563,7 +439,7 @@ describe LogStash::Agent do
|
|
563
439
|
"config.reload.automatic" => false,
|
564
440
|
"pipeline.batch.size" => 1,
|
565
441
|
"metric.collect" => true,
|
566
|
-
"path.config" =>
|
442
|
+
"path.config" => config_file
|
567
443
|
}
|
568
444
|
end
|
569
445
|
|
@@ -575,11 +451,6 @@ describe LogStash::Agent do
|
|
575
451
|
|
576
452
|
before :each do
|
577
453
|
allow(LogStash::Plugin).to receive(:lookup).with("input", "generator").and_return(BrokenGenerator)
|
578
|
-
|
579
|
-
File.open(config_path, "w") do |f|
|
580
|
-
f.write(new_config)
|
581
|
-
f.fsync
|
582
|
-
end
|
583
454
|
end
|
584
455
|
|
585
456
|
it "does not increase the successful reload count" do
|