logstash-core 6.6.0-java → 6.6.1-java

Sign up to get free protection for your applications and to get access to all the features.
@@ -3,6 +3,7 @@ require "logstash/pipeline_action/base"
3
3
  require "logstash/pipeline"
4
4
  require "logstash/java_pipeline"
5
5
 
6
+
6
7
  module LogStash module PipelineAction
7
8
  class Create < Base
8
9
  include LogStash::Util::Loggable
@@ -30,33 +31,26 @@ module LogStash module PipelineAction
30
31
 
31
32
  # The execute assume that the thread safety access of the pipeline
32
33
  # is managed by the caller.
33
- def execute(agent, pipelines)
34
- pipeline =
34
+ def execute(agent, pipelines_registry)
35
+ new_pipeline =
35
36
  if @pipeline_config.settings.get_value("pipeline.java_execution")
36
37
  LogStash::JavaPipeline.new(@pipeline_config, @metric, agent)
37
38
  else
38
39
  agent.exclusive do
39
40
  # The Ruby pipeline initialization is not thread safe because of the module level
40
41
  # shared state in LogsStash::Config::AST. When using multiple pipelines this gets
41
- # executed simultaneously in different threads and we need to synchonize this initialization.
42
+ # executed simultaneously in different threads and we need to synchronize this initialization.
42
43
  LogStash::Pipeline.new(@pipeline_config, @metric, agent)
43
44
  end
44
45
  end
45
46
 
46
- status = nil
47
- pipelines.compute(pipeline_id) do |id,value|
48
- if value
49
- LogStash::ConvergeResult::ActionResult.create(self, true)
50
- end
51
- status = pipeline.start # block until the pipeline is correctly started or crashed
52
- pipeline # The pipeline is successfully started we can add it to the map
47
+ success = pipelines_registry.create_pipeline(pipeline_id, new_pipeline) do
48
+ new_pipeline.start # block until the pipeline is correctly started or crashed
53
49
  end
54
50
 
55
-
56
- LogStash::ConvergeResult::ActionResult.create(self, status)
51
+ LogStash::ConvergeResult::ActionResult.create(self, success)
57
52
  end
58
53
 
59
-
60
54
  def to_s
61
55
  "PipelineAction::Create<#{pipeline_id}>"
62
56
  end
@@ -20,8 +20,12 @@ module LogStash module PipelineAction
20
20
  "PipelineAction::Reload<#{pipeline_id}>"
21
21
  end
22
22
 
23
- def execute(agent, pipelines)
24
- old_pipeline = pipelines[pipeline_id]
23
+ def execute(agent, pipelines_registry)
24
+ old_pipeline = pipelines_registry.get_pipeline(pipeline_id)
25
+
26
+ if old_pipeline.nil?
27
+ return LogStash::ConvergeResult::FailedAction.new("Cannot reload pipeline, because the pipeline does not exist")
28
+ end
25
29
 
26
30
  if !old_pipeline.reloadable?
27
31
  return LogStash::ConvergeResult::FailedAction.new("Cannot reload pipeline, because the existing pipeline is not reloadable")
@@ -34,8 +38,8 @@ module LogStash module PipelineAction
34
38
  else
35
39
  agent.exclusive do
36
40
  # The Ruby pipeline initialization is not thread safe because of the module level
37
- # shared state in LogsStash::Config::AST. When using multiple pipelines this can gets
38
- # executed simultaneously in different threads and we need to synchonize this initialization.
41
+ # shared state in LogsStash::Config::AST. When using multiple pipelines this gets
42
+ # executed simultaneously in different threads and we need to synchronize this initialization.
39
43
  LogStash::BasePipeline.new(@pipeline_config)
40
44
  end
41
45
  end
@@ -49,16 +53,35 @@ module LogStash module PipelineAction
49
53
 
50
54
  logger.info("Reloading pipeline", "pipeline.id" => pipeline_id)
51
55
 
52
- pipelines.compute(pipeline_id) do |_,pipeline|
53
- status = Stop.new(pipeline_id).execute(agent, pipelines)
56
+ success = pipelines_registry.reload_pipeline(pipeline_id) do
57
+ # important NOT to explicitly return from block here
58
+ # the block must emit a success boolean value
54
59
 
55
- if status
56
- return Create.new(@pipeline_config, @metric).execute(agent, pipelines)
57
- else
58
- return status
59
- end
60
- pipeline
60
+ # First shutdown old pipeline
61
+ old_pipeline.shutdown { LogStash::ShutdownWatcher.start(old_pipeline) }
62
+ old_pipeline.thread.join
63
+
64
+ # Then create a new pipeline
65
+ new_pipeline =
66
+ if @pipeline_config.settings.get_value("pipeline.java_execution")
67
+ LogStash::JavaPipeline.new(@pipeline_config, @metric, agent)
68
+ else
69
+ agent.exclusive do
70
+ # The Ruby pipeline initialization is not thread safe because of the module level
71
+ # shared state in LogsStash::Config::AST. When using multiple pipelines this gets
72
+ # executed simultaneously in different threads and we need to synchronize this initialization.
73
+ LogStash::Pipeline.new(@pipeline_config, @metric, agent)
74
+ end
75
+ end
76
+
77
+ success = new_pipeline.start # block until the pipeline is correctly started or crashed
78
+
79
+ # return success and new_pipeline to registry reload_pipeline
80
+ [success, new_pipeline]
61
81
  end
82
+
83
+ LogStash::ConvergeResult::ActionResult.create(self, success)
62
84
  end
85
+
63
86
  end
64
87
  end end
@@ -9,15 +9,13 @@ module LogStash module PipelineAction
9
9
  @pipeline_id = pipeline_id
10
10
  end
11
11
 
12
- def execute(agent, pipelines)
13
- pipelines.compute(pipeline_id) do |_,pipeline|
12
+ def execute(agent, pipelines_registry)
13
+ pipelines_registry.terminate_pipeline(pipeline_id) do |pipeline|
14
14
  pipeline.shutdown { LogStash::ShutdownWatcher.start(pipeline) }
15
15
  pipeline.thread.join
16
- nil # delete the pipeline
17
16
  end
18
- # If we reach this part of the code we have succeeded because
19
- # the shutdown call will block.
20
- return LogStash::ConvergeResult::SuccessfulAction.new
17
+
18
+ LogStash::ConvergeResult::SuccessfulAction.new
21
19
  end
22
20
 
23
21
  def to_s
@@ -0,0 +1,166 @@
1
+ # encoding: utf-8
2
+
3
+ module LogStash
4
+ class PipelineState
5
+ attr_reader :pipeline_id, :pipeline
6
+
7
+ def initialize(pipeline_id, pipeline)
8
+ @pipeline_id = pipeline_id
9
+ @pipeline = pipeline
10
+ @reloading = Concurrent::AtomicBoolean.new(false)
11
+ end
12
+
13
+ def terminated?
14
+ # a reloading pipeline is never considered terminated
15
+ @reloading.false? && @pipeline.finished_execution?
16
+ end
17
+
18
+ def set_reloading(is_reloading)
19
+ @reloading.value = is_reloading
20
+ end
21
+
22
+ def set_pipeline(pipeline)
23
+ raise(ArgumentError, "invalid nil pipeline") if pipeline.nil?
24
+ @pipeline = pipeline
25
+ end
26
+ end
27
+
28
+ class PipelinesRegistry
29
+ attr_reader :states
30
+ include LogStash::Util::Loggable
31
+
32
+ def initialize
33
+ # we leverage the semantic of the Java ConcurrentHashMap for the
34
+ # compute() method which is atomic; calling compute() concurrently
35
+ # will block until the other compute finishes so no mutex is necessary
36
+ # for synchronizing compute calls
37
+ @states = java.util.concurrent.ConcurrentHashMap.new
38
+ end
39
+
40
+ # Execute the passed creation logic block and create a new state upon success
41
+ # @param pipeline_id [String, Symbol] the pipeline id
42
+ # @param pipeline [Pipeline] the new pipeline to create
43
+ # @param create_block [Block] the creation execution logic
44
+ #
45
+ # @yieldreturn [Boolean] the new pipeline creation success
46
+ #
47
+ # @return [Boolean] new pipeline creation success
48
+ def create_pipeline(pipeline_id, pipeline, &create_block)
49
+ success = false
50
+
51
+ @states.compute(pipeline_id) do |_, state|
52
+ if state
53
+ if state.terminated?
54
+ success = yield
55
+ state.set_pipeline(pipeline)
56
+ else
57
+ logger.error("Attempted to create a pipeline that already exists", :pipeline_id => pipeline_id)
58
+ end
59
+ state
60
+ else
61
+ success = yield
62
+ success ? PipelineState.new(pipeline_id, pipeline) : nil
63
+ end
64
+ end
65
+
66
+ success
67
+ end
68
+
69
+ # Execute the passed termination logic block
70
+ # @param pipeline_id [String, Symbol] the pipeline id
71
+ # @param stop_block [Block] the termination execution logic
72
+ #
73
+ # @yieldparam [Pipeline] the pipeline to terminate
74
+ def terminate_pipeline(pipeline_id, &stop_block)
75
+ @states.compute(pipeline_id) do |_, state|
76
+ if state.nil?
77
+ logger.error("Attempted to terminate a pipeline that does not exists", :pipeline_id => pipeline_id)
78
+ nil
79
+ else
80
+ yield(state.pipeline)
81
+ state
82
+ end
83
+ end
84
+ end
85
+
86
+ # Execute the passed reloading logic block in the context of the reloading state and set new pipeline in state
87
+ # @param pipeline_id [String, Symbol] the pipeline id
88
+ # @param reload_block [Block] the reloading execution logic
89
+ #
90
+ # @yieldreturn [Array<Boolean, Pipeline>] the new pipeline creation success and new pipeline object
91
+ #
92
+ # @return [Boolean] new pipeline creation success
93
+ def reload_pipeline(pipeline_id, &reload_block)
94
+ success = false
95
+
96
+ @states.compute(pipeline_id) do |_, state|
97
+ if state.nil?
98
+ logger.error("Attempted to reload a pipeline that does not exists", :pipeline_id => pipeline_id)
99
+ nil
100
+ else
101
+ state.set_reloading(true)
102
+ begin
103
+ success, new_pipeline = yield
104
+ state.set_pipeline(new_pipeline)
105
+ ensure
106
+ state.set_reloading(false)
107
+ end
108
+ state
109
+ end
110
+ end
111
+
112
+ success
113
+ end
114
+
115
+ # @param pipeline_id [String, Symbol] the pipeline id
116
+ # @return [Pipeline] the pipeline object or nil if none for pipeline_id
117
+ def get_pipeline(pipeline_id)
118
+ state = @states.get(pipeline_id)
119
+ state.nil? ? nil : state.pipeline
120
+ end
121
+
122
+ # @return [Fixnum] number of items in the states collection
123
+ def size
124
+ @states.size
125
+ end
126
+
127
+ # @return [Boolean] true if the states collection is empty.
128
+ def empty?
129
+ @states.isEmpty
130
+ end
131
+
132
+ # @return [Hash{String=>Pipeline}]
133
+ def running_pipelines
134
+ select_pipelines { |state| !state.terminated? }
135
+ end
136
+
137
+ # @return [Hash{String=>Pipeline}]
138
+ def non_running_pipelines
139
+ select_pipelines { |state| state.terminated? }
140
+ end
141
+
142
+ # @return [Hash{String=>Pipeline}]
143
+ def running_user_defined_pipelines
144
+ select_pipelines { |state | !state.terminated? && !state.pipeline.system? }
145
+ end
146
+
147
+ private
148
+
149
+ # Returns a mapping of pipelines by their ids.
150
+ # Pipelines can optionally be filtered by their `PipelineState` by passing
151
+ # a block that returns truthy when a pipeline should be included in the
152
+ # result.
153
+ #
154
+ # @yieldparam [PipelineState]
155
+ # @yieldreturn [Boolean]
156
+ #
157
+ # @return [Hash{String=>Pipeline}]
158
+ def select_pipelines(&optional_state_filter)
159
+ @states.each_with_object({}) do |(id, state), memo|
160
+ if state && (!block_given? || yield(state))
161
+ memo[id] = state.pipeline
162
+ end
163
+ end
164
+ end
165
+ end
166
+ end
@@ -10,11 +10,11 @@ module LogStash
10
10
  @metric = metric
11
11
  end
12
12
 
13
- def resolve(pipelines, pipeline_configs)
13
+ def resolve(pipelines_registry, pipeline_configs)
14
14
  actions = []
15
15
 
16
16
  pipeline_configs.each do |pipeline_config|
17
- pipeline = pipelines[pipeline_config.pipeline_id]
17
+ pipeline = pipelines_registry.get_pipeline(pipeline_config.pipeline_id)
18
18
 
19
19
  if pipeline.nil?
20
20
  actions << LogStash::PipelineAction::Create.new(pipeline_config, @metric)
@@ -25,12 +25,12 @@ module LogStash
25
25
  end
26
26
  end
27
27
 
28
- running_pipelines = pipeline_configs.collect(&:pipeline_id)
28
+ configured_pipelines = pipeline_configs.collect(&:pipeline_id)
29
29
 
30
30
  # If one of the running pipeline is not in the pipeline_configs, we assume that we need to
31
31
  # stop it.
32
- pipelines.keys
33
- .select { |pipeline_id| !running_pipelines.include?(pipeline_id) }
32
+ pipelines_registry.running_pipelines.keys
33
+ .select { |pipeline_id| !configured_pipelines.include?(pipeline_id) }
34
34
  .each { |pipeline_id| actions << LogStash::PipelineAction::Stop.new(pipeline_id) }
35
35
 
36
36
  actions.sort # See logstash/pipeline_action.rb
@@ -26,6 +26,7 @@ class LogStash::Util::SafeURI
26
26
  else
27
27
  raise ArgumentError, "Expected a string, java.net.URI, or URI, got a #{arg.class} creating a URL"
28
28
  end
29
+ raise ArgumentError, "URI is not valid - host is not specified" if @uri.host.nil?
29
30
  end
30
31
 
31
32
  def to_s
@@ -49,7 +49,7 @@ describe LogStash::Agent do
49
49
 
50
50
  context "system pipeline" do
51
51
 
52
- let(:system_pipeline_config) { mock_pipeline_config(:system_pipeline, "input { generator { } } output { null {} }", { "pipeline.system" => true }) }
52
+ let(:system_pipeline_config) { mock_pipeline_config(:system_pipeline, "input { dummyblockinginput { } } output { null {} }", { "pipeline.system" => true }) }
53
53
 
54
54
  context "when we have a finite pipeline and a system pipeline running" do
55
55
 
@@ -65,40 +65,40 @@ describe LogStash::Agent do
65
65
  end
66
66
 
67
67
  context "when we have an infinite pipeline and a system pipeline running" do
68
- let(:infinite_pipeline_config) { mock_pipeline_config(:main, "input { generator { } } output { null {} }") }
68
+ let(:infinite_pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput { } } output { null {} }") }
69
69
 
70
70
  let(:source_loader) do
71
71
  TestSourceLoader.new(infinite_pipeline_config, system_pipeline_config)
72
72
  end
73
73
 
74
74
  before(:each) do
75
- @agent_task = start_agent(subject)
75
+ @agent_task = start_agent(subject)
76
76
  end
77
77
 
78
78
  after(:each) do
79
- @agent_task.stop!
79
+ @agent_task.stop!
80
+ @agent_task.wait
81
+ subject.shutdown
80
82
  end
81
83
 
82
84
  describe "#running_user_defined_pipelines" do
83
85
  it "returns the user defined pipelines" do
84
- wait_for do
85
- subject.with_running_user_defined_pipelines {|pipelines| pipelines.keys }
86
- end.to eq([:main])
87
- end
86
+ # wait is necessary to accommodate for pipelines startup time
87
+ wait(60).for {subject.running_user_defined_pipelines.keys}.to eq([:main])
88
+ end
88
89
  end
89
90
 
90
91
  describe "#running_user_defined_pipelines?" do
91
92
  it "returns true" do
92
- wait_for do
93
- subject.running_user_defined_pipelines?
94
- end.to be_truthy
93
+ # wait is necessary to accommodate for pipelines startup time
94
+ wait(60).for {subject.running_user_defined_pipelines?}.to be_truthy
95
95
  end
96
96
  end
97
97
  end
98
98
  end
99
99
 
100
100
  context "when `config.reload.automatic`" do
101
- let(:pipeline_config) { mock_pipeline_config(:main, "input { generator {} } output { null {} }") }
101
+ let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput {} } output { null {} }") }
102
102
 
103
103
  let(:source_loader) do
104
104
  TestSourceLoader.new(pipeline_config)
@@ -114,14 +114,14 @@ describe LogStash::Agent do
114
114
 
115
115
  after(:each) do
116
116
  @agent_task.stop!
117
+ @agent_task.wait
118
+ subject.shutdown
117
119
  end
118
120
 
119
121
  it "converge only once" do
120
122
  wait(60).for { source_loader.fetch_count }.to eq(1)
121
-
123
+ # no need to wait here because have_running_pipeline? does the wait
122
124
  expect(subject).to have_running_pipeline?(pipeline_config)
123
-
124
- subject.shutdown
125
125
  end
126
126
  end
127
127
 
@@ -135,8 +135,6 @@ describe LogStash::Agent do
135
135
 
136
136
  expect(source_loader.fetch_count).to eq(1)
137
137
  expect(subject.pipelines_count).to eq(0)
138
-
139
- subject.shutdown
140
138
  end
141
139
  end
142
140
  end
@@ -149,26 +147,25 @@ describe LogStash::Agent do
149
147
  "config.reload.interval" => interval
150
148
  )
151
149
  end
150
+
152
151
  before(:each) do
153
152
  @agent_task = start_agent(subject)
154
153
  end
155
154
 
156
155
  after(:each) do
157
156
  @agent_task.stop!
157
+ @agent_task.wait
158
+ subject.shutdown
158
159
  end
159
160
 
160
161
  context "and successfully load the config" do
161
162
  it "converges periodically the pipelines from the configs source" do
162
- sleep(2) # let the interval reload a few times
163
+ # no need to wait here because have_running_pipeline? does the wait
163
164
  expect(subject).to have_running_pipeline?(pipeline_config)
164
165
 
165
166
  # we rely on a periodic thread to call fetch count, we have seen unreliable run on
166
167
  # travis, so lets add a few retries
167
- try do
168
- expect(source_loader.fetch_count).to be > 1
169
- end
170
-
171
- subject.shutdown
168
+ try { expect(source_loader.fetch_count).to be > 1 }
172
169
  end
173
170
  end
174
171
 
@@ -178,12 +175,9 @@ describe LogStash::Agent do
178
175
  end
179
176
 
180
177
  it "it will keep trying to converge" do
181
-
182
178
  sleep(agent_settings.get("config.reload.interval") / 1_000_000_000.0 * 20) # let the interval reload a few times
183
179
  expect(subject.pipelines_count).to eq(0)
184
180
  expect(source_loader.fetch_count).to be > 1
185
-
186
- subject.shutdown
187
181
  end
188
182
  end
189
183
  end
@@ -191,8 +185,8 @@ describe LogStash::Agent do
191
185
  end
192
186
 
193
187
  context "when shutting down the agent" do
194
- let(:pipeline_config) { mock_pipeline_config(:main, "input { generator {} } output { null {} }") }
195
- let(:new_pipeline_config) { mock_pipeline_config(:new, "input { generator { id => 'new' } } output { null {} }") }
188
+ let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput {} } output { null {} }") }
189
+ let(:new_pipeline_config) { mock_pipeline_config(:new, "input { dummyblockinginput { id => 'new' } } output { null {} }") }
196
190
 
197
191
  let(:source_loader) do
198
192
  TestSourceLoader.new([pipeline_config, new_pipeline_config])
@@ -205,8 +199,8 @@ describe LogStash::Agent do
205
199
  end
206
200
 
207
201
  context "Configuration converge scenario" do
208
- let(:pipeline_config) { mock_pipeline_config(:main, "input { generator {} } output { null {} }", { "pipeline.reloadable" => true }) }
209
- let(:new_pipeline_config) { mock_pipeline_config(:new, "input { generator {} } output { null {} }", { "pipeline.reloadable" => true }) }
202
+ let(:pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput {} } output { null {} }", { "pipeline.reloadable" => true }) }
203
+ let(:new_pipeline_config) { mock_pipeline_config(:new, "input { dummyblockinginput {} } output { null {} }", { "pipeline.reloadable" => true }) }
210
204
 
211
205
  before do
212
206
  # Set the Agent to an initial state of pipelines
@@ -263,7 +257,7 @@ describe LogStash::Agent do
263
257
  end
264
258
 
265
259
  context "when the source return a modified pipeline" do
266
- let(:modified_pipeline_config) { mock_pipeline_config(:main, "input { generator { id => 'new-and-modified' } } output { null {} }", { "pipeline.reloadable" => true }) }
260
+ let(:modified_pipeline_config) { mock_pipeline_config(:main, "input { dummyblockinginput { id => 'new-and-modified' } } output { null {} }", { "pipeline.reloadable" => true }) }
267
261
 
268
262
  let(:source_loader) do
269
263
  TestSequenceSourceLoader.new(