logstash-core 7.0.0.alpha2-java → 7.0.0.beta1-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/lib/logstash/agent.rb +62 -57
  3. data/lib/logstash/compiler/lscl.rb +2 -3
  4. data/lib/logstash/config/config_ast.rb +59 -17
  5. data/lib/logstash/environment.rb +1 -1
  6. data/lib/logstash/instrument/metric_store.rb +1 -1
  7. data/lib/logstash/instrument/periodic_poller/dlq.rb +5 -7
  8. data/lib/logstash/instrument/periodic_poller/pq.rb +6 -8
  9. data/lib/logstash/instrument/periodic_pollers.rb +3 -3
  10. data/lib/logstash/java_pipeline.rb +36 -15
  11. data/lib/logstash/patches/resolv.rb +0 -21
  12. data/lib/logstash/pipeline.rb +27 -10
  13. data/lib/logstash/pipeline_action/base.rb +1 -1
  14. data/lib/logstash/pipeline_action/create.rb +7 -13
  15. data/lib/logstash/pipeline_action/reload.rb +35 -12
  16. data/lib/logstash/pipeline_action/stop.rb +4 -6
  17. data/lib/logstash/pipeline_settings.rb +1 -1
  18. data/lib/logstash/pipelines_registry.rb +166 -0
  19. data/lib/logstash/settings.rb +5 -5
  20. data/lib/logstash/state_resolver.rb +5 -5
  21. data/lib/logstash/util/duration_formatter.rb +1 -1
  22. data/lib/logstash/util/safe_uri.rb +1 -0
  23. data/lib/logstash/util.rb +11 -1
  24. data/locales/en.yml +1 -1
  25. data/logstash-core.gemspec +17 -20
  26. data/spec/logstash/acked_queue_concurrent_stress_spec.rb +1 -1
  27. data/spec/logstash/agent/converge_spec.rb +25 -31
  28. data/spec/logstash/agent_spec.rb +5 -5
  29. data/spec/logstash/event_spec.rb +2 -2
  30. data/spec/logstash/instrument/wrapped_write_client_spec.rb +1 -1
  31. data/spec/logstash/legacy_ruby_event_spec.rb +6 -5
  32. data/spec/logstash/pipeline_action/create_spec.rb +9 -8
  33. data/spec/logstash/pipeline_action/reload_spec.rb +10 -9
  34. data/spec/logstash/pipeline_action/stop_spec.rb +4 -3
  35. data/spec/logstash/pipelines_registry_spec.rb +220 -0
  36. data/spec/logstash/queue_factory_spec.rb +2 -1
  37. data/spec/logstash/runner_spec.rb +2 -0
  38. data/spec/logstash/settings/array_coercible_spec.rb +1 -1
  39. data/spec/logstash/settings/bytes_spec.rb +2 -2
  40. data/spec/logstash/settings/port_range_spec.rb +1 -1
  41. data/spec/logstash/state_resolver_spec.rb +26 -22
  42. data/spec/logstash/util/safe_uri_spec.rb +40 -0
  43. data/spec/logstash/util/time_value_spec.rb +1 -1
  44. data/spec/logstash/util/wrapped_acked_queue_spec.rb +1 -1
  45. data/spec/support/matchers.rb +25 -19
  46. data/spec/support/shared_contexts.rb +3 -3
  47. data/versions-gem-copy.yml +6 -6
  48. metadata +73 -88
@@ -107,8 +107,23 @@ module LogStash; class Pipeline < BasePipeline
107
107
  @flushing = Concurrent::AtomicReference.new(false)
108
108
  @outputs_registered = Concurrent::AtomicBoolean.new(false)
109
109
  @worker_shutdown = java.util.concurrent.atomic.AtomicBoolean.new(false)
110
+
111
+ # @finished_execution signals that the pipeline thread has finished its execution
112
+ # regardless of any exceptions; it will always be true when the thread completes
113
+ @finished_execution = Concurrent::AtomicBoolean.new(false)
114
+
115
+ # @finished_run signals that the run methods called in the pipeline thread was completed
116
+ # without errors and it will NOT be set if the run method exits from an exception; this
117
+ # is by design and necessary for the wait_until_started semantic
118
+ @finished_run = Concurrent::AtomicBoolean.new(false)
119
+
120
+ @thread = nil
110
121
  end # def initialize
111
122
 
123
+ def finished_execution?
124
+ @finished_execution.true?
125
+ end
126
+
112
127
  def ready?
113
128
  @ready.value
114
129
  end
@@ -152,16 +167,19 @@ module LogStash; class Pipeline < BasePipeline
152
167
  "pipeline.batch.size" => settings.get("pipeline.batch.size"),
153
168
  "pipeline.batch.delay" => settings.get("pipeline.batch.delay")))
154
169
 
155
- @finished_execution = Concurrent::AtomicBoolean.new(false)
170
+ @finished_execution.make_false
171
+ @finished_run.make_false
156
172
 
157
173
  @thread = Thread.new do
158
174
  begin
159
175
  LogStash::Util.set_thread_name("pipeline.#{pipeline_id}")
160
176
  run
161
- @finished_execution.make_true
177
+ @finished_run.make_true
162
178
  rescue => e
163
179
  close
164
180
  @logger.error("Pipeline aborted due to error", default_logging_keys(:exception => e, :backtrace => e.backtrace))
181
+ ensure
182
+ @finished_execution.make_true
165
183
  end
166
184
  end
167
185
 
@@ -176,15 +194,14 @@ module LogStash; class Pipeline < BasePipeline
176
194
 
177
195
  def wait_until_started
178
196
  while true do
179
- # This should be changed with an appropriate FSM
180
- # It's an edge case, if we have a pipeline with
181
- # a generator { count => 1 } its possible that `Thread#alive?` doesn't return true
182
- # because the execution of the thread was successful and complete
183
- if @finished_execution.true?
197
+ if @finished_run.true?
198
+ # it completed run without exception
184
199
  return true
185
- elsif !thread.alive?
200
+ elsif thread.nil? || !thread.alive?
201
+ # some exception occured and the thread is dead
186
202
  return false
187
203
  elsif running?
204
+ # fully initialized and running
188
205
  return true
189
206
  else
190
207
  sleep 0.01
@@ -283,9 +300,9 @@ module LogStash; class Pipeline < BasePipeline
283
300
 
284
301
  pipeline_workers.times do |t|
285
302
  thread = Thread.new(batch_size, batch_delay, self) do |_b_size, _b_delay, _pipeline|
303
+ Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
286
304
  _pipeline.worker_loop(_b_size, _b_delay)
287
305
  end
288
- Util.set_thread_name("[#{pipeline_id}]>worker#{t}")
289
306
  @worker_threads << thread
290
307
  end
291
308
 
@@ -525,7 +542,7 @@ module LogStash; class Pipeline < BasePipeline
525
542
 
526
543
  # Calculate the uptime in milliseconds
527
544
  #
528
- # @return [Fixnum] Uptime in milliseconds, 0 if the pipeline is not started
545
+ # @return [Integer] Uptime in milliseconds, 0 if the pipeline is not started
529
546
  def uptime
530
547
  return 0 if started_at.nil?
531
548
  ((Time.now.to_f - started_at.to_f) * 1000.0).to_i
@@ -12,7 +12,7 @@ module LogStash module PipelineAction
12
12
  end
13
13
  alias_method :to_s, :inspect
14
14
 
15
- def execute(agent, pipelines)
15
+ def execute(agent, pipelines_registry)
16
16
  raise "`#execute` Not implemented!"
17
17
  end
18
18
 
@@ -3,6 +3,7 @@ require "logstash/pipeline_action/base"
3
3
  require "logstash/pipeline"
4
4
  require "logstash/java_pipeline"
5
5
 
6
+
6
7
  module LogStash module PipelineAction
7
8
  class Create < Base
8
9
  include LogStash::Util::Loggable
@@ -30,33 +31,26 @@ module LogStash module PipelineAction
30
31
 
31
32
  # The execute assume that the thread safety access of the pipeline
32
33
  # is managed by the caller.
33
- def execute(agent, pipelines)
34
- pipeline =
34
+ def execute(agent, pipelines_registry)
35
+ new_pipeline =
35
36
  if @pipeline_config.settings.get_value("pipeline.java_execution")
36
37
  LogStash::JavaPipeline.new(@pipeline_config, @metric, agent)
37
38
  else
38
39
  agent.exclusive do
39
40
  # The Ruby pipeline initialization is not thread safe because of the module level
40
41
  # shared state in LogsStash::Config::AST. When using multiple pipelines this gets
41
- # executed simultaneously in different threads and we need to synchonize this initialization.
42
+ # executed simultaneously in different threads and we need to synchronize this initialization.
42
43
  LogStash::Pipeline.new(@pipeline_config, @metric, agent)
43
44
  end
44
45
  end
45
46
 
46
- status = nil
47
- pipelines.compute(pipeline_id) do |id,value|
48
- if value
49
- LogStash::ConvergeResult::ActionResult.create(self, true)
50
- end
51
- status = pipeline.start # block until the pipeline is correctly started or crashed
52
- pipeline # The pipeline is successfully started we can add it to the map
47
+ success = pipelines_registry.create_pipeline(pipeline_id, new_pipeline) do
48
+ new_pipeline.start # block until the pipeline is correctly started or crashed
53
49
  end
54
50
 
55
-
56
- LogStash::ConvergeResult::ActionResult.create(self, status)
51
+ LogStash::ConvergeResult::ActionResult.create(self, success)
57
52
  end
58
53
 
59
-
60
54
  def to_s
61
55
  "PipelineAction::Create<#{pipeline_id}>"
62
56
  end
@@ -20,8 +20,12 @@ module LogStash module PipelineAction
20
20
  "PipelineAction::Reload<#{pipeline_id}>"
21
21
  end
22
22
 
23
- def execute(agent, pipelines)
24
- old_pipeline = pipelines[pipeline_id]
23
+ def execute(agent, pipelines_registry)
24
+ old_pipeline = pipelines_registry.get_pipeline(pipeline_id)
25
+
26
+ if old_pipeline.nil?
27
+ return LogStash::ConvergeResult::FailedAction.new("Cannot reload pipeline, because the pipeline does not exist")
28
+ end
25
29
 
26
30
  if !old_pipeline.reloadable?
27
31
  return LogStash::ConvergeResult::FailedAction.new("Cannot reload pipeline, because the existing pipeline is not reloadable")
@@ -34,8 +38,8 @@ module LogStash module PipelineAction
34
38
  else
35
39
  agent.exclusive do
36
40
  # The Ruby pipeline initialization is not thread safe because of the module level
37
- # shared state in LogsStash::Config::AST. When using multiple pipelines this can gets
38
- # executed simultaneously in different threads and we need to synchonize this initialization.
41
+ # shared state in LogsStash::Config::AST. When using multiple pipelines this gets
42
+ # executed simultaneously in different threads and we need to synchronize this initialization.
39
43
  LogStash::BasePipeline.new(@pipeline_config)
40
44
  end
41
45
  end
@@ -49,16 +53,35 @@ module LogStash module PipelineAction
49
53
 
50
54
  logger.info("Reloading pipeline", "pipeline.id" => pipeline_id)
51
55
 
52
- pipelines.compute(pipeline_id) do |_,pipeline|
53
- status = Stop.new(pipeline_id).execute(agent, pipelines)
56
+ success = pipelines_registry.reload_pipeline(pipeline_id) do
57
+ # important NOT to explicitly return from block here
58
+ # the block must emit a success boolean value
54
59
 
55
- if status
56
- return Create.new(@pipeline_config, @metric).execute(agent, pipelines)
57
- else
58
- return status
59
- end
60
- pipeline
60
+ # First shutdown old pipeline
61
+ old_pipeline.shutdown { LogStash::ShutdownWatcher.start(old_pipeline) }
62
+ old_pipeline.thread.join
63
+
64
+ # Then create a new pipeline
65
+ new_pipeline =
66
+ if @pipeline_config.settings.get_value("pipeline.java_execution")
67
+ LogStash::JavaPipeline.new(@pipeline_config, @metric, agent)
68
+ else
69
+ agent.exclusive do
70
+ # The Ruby pipeline initialization is not thread safe because of the module level
71
+ # shared state in LogsStash::Config::AST. When using multiple pipelines this gets
72
+ # executed simultaneously in different threads and we need to synchronize this initialization.
73
+ LogStash::Pipeline.new(@pipeline_config, @metric, agent)
74
+ end
75
+ end
76
+
77
+ success = new_pipeline.start # block until the pipeline is correctly started or crashed
78
+
79
+ # return success and new_pipeline to registry reload_pipeline
80
+ [success, new_pipeline]
61
81
  end
82
+
83
+ LogStash::ConvergeResult::ActionResult.create(self, success)
62
84
  end
85
+
63
86
  end
64
87
  end end
@@ -9,15 +9,13 @@ module LogStash module PipelineAction
9
9
  @pipeline_id = pipeline_id
10
10
  end
11
11
 
12
- def execute(agent, pipelines)
13
- pipelines.compute(pipeline_id) do |_,pipeline|
12
+ def execute(agent, pipelines_registry)
13
+ pipelines_registry.terminate_pipeline(pipeline_id) do |pipeline|
14
14
  pipeline.shutdown { LogStash::ShutdownWatcher.start(pipeline) }
15
15
  pipeline.thread.join
16
- nil # delete the pipeline
17
16
  end
18
- # If we reach this part of the code we have succeeded because
19
- # the shutdown call will block.
20
- return LogStash::ConvergeResult::SuccessfulAction.new
17
+
18
+ LogStash::ConvergeResult::SuccessfulAction.new
21
19
  end
22
20
 
23
21
  def to_s
@@ -21,13 +21,13 @@ module LogStash
21
21
  "pipeline.batch.delay",
22
22
  "pipeline.batch.size",
23
23
  "pipeline.id",
24
- "pipeline.output.workers",
25
24
  "pipeline.reloadable",
26
25
  "pipeline.system",
27
26
  "pipeline.workers",
28
27
  "queue.checkpoint.acks",
29
28
  "queue.checkpoint.interval",
30
29
  "queue.checkpoint.writes",
30
+ "queue.checkpoint.retry",
31
31
  "queue.drain",
32
32
  "queue.max_bytes",
33
33
  "queue.max_events",
@@ -0,0 +1,166 @@
1
+ # encoding: utf-8
2
+
3
+ module LogStash
4
+ class PipelineState
5
+ attr_reader :pipeline_id, :pipeline
6
+
7
+ def initialize(pipeline_id, pipeline)
8
+ @pipeline_id = pipeline_id
9
+ @pipeline = pipeline
10
+ @reloading = Concurrent::AtomicBoolean.new(false)
11
+ end
12
+
13
+ def terminated?
14
+ # a reloading pipeline is never considered terminated
15
+ @reloading.false? && @pipeline.finished_execution?
16
+ end
17
+
18
+ def set_reloading(is_reloading)
19
+ @reloading.value = is_reloading
20
+ end
21
+
22
+ def set_pipeline(pipeline)
23
+ raise(ArgumentError, "invalid nil pipeline") if pipeline.nil?
24
+ @pipeline = pipeline
25
+ end
26
+ end
27
+
28
+ class PipelinesRegistry
29
+ attr_reader :states
30
+ include LogStash::Util::Loggable
31
+
32
+ def initialize
33
+ # we leverage the semantic of the Java ConcurrentHashMap for the
34
+ # compute() method which is atomic; calling compute() concurrently
35
+ # will block until the other compute finishes so no mutex is necessary
36
+ # for synchronizing compute calls
37
+ @states = java.util.concurrent.ConcurrentHashMap.new
38
+ end
39
+
40
+ # Execute the passed creation logic block and create a new state upon success
41
+ # @param pipeline_id [String, Symbol] the pipeline id
42
+ # @param pipeline [Pipeline] the new pipeline to create
43
+ # @param create_block [Block] the creation execution logic
44
+ #
45
+ # @yieldreturn [Boolean] the new pipeline creation success
46
+ #
47
+ # @return [Boolean] new pipeline creation success
48
+ def create_pipeline(pipeline_id, pipeline, &create_block)
49
+ success = false
50
+
51
+ @states.compute(pipeline_id) do |_, state|
52
+ if state
53
+ if state.terminated?
54
+ success = yield
55
+ state.set_pipeline(pipeline)
56
+ else
57
+ logger.error("Attempted to create a pipeline that already exists", :pipeline_id => pipeline_id)
58
+ end
59
+ state
60
+ else
61
+ success = yield
62
+ success ? PipelineState.new(pipeline_id, pipeline) : nil
63
+ end
64
+ end
65
+
66
+ success
67
+ end
68
+
69
+ # Execute the passed termination logic block
70
+ # @param pipeline_id [String, Symbol] the pipeline id
71
+ # @param stop_block [Block] the termination execution logic
72
+ #
73
+ # @yieldparam [Pipeline] the pipeline to terminate
74
+ def terminate_pipeline(pipeline_id, &stop_block)
75
+ @states.compute(pipeline_id) do |_, state|
76
+ if state.nil?
77
+ logger.error("Attempted to terminate a pipeline that does not exists", :pipeline_id => pipeline_id)
78
+ nil
79
+ else
80
+ yield(state.pipeline)
81
+ state
82
+ end
83
+ end
84
+ end
85
+
86
+ # Execute the passed reloading logic block in the context of the reloading state and set new pipeline in state
87
+ # @param pipeline_id [String, Symbol] the pipeline id
88
+ # @param reload_block [Block] the reloading execution logic
89
+ #
90
+ # @yieldreturn [Array<Boolean, Pipeline>] the new pipeline creation success and new pipeline object
91
+ #
92
+ # @return [Boolean] new pipeline creation success
93
+ def reload_pipeline(pipeline_id, &reload_block)
94
+ success = false
95
+
96
+ @states.compute(pipeline_id) do |_, state|
97
+ if state.nil?
98
+ logger.error("Attempted to reload a pipeline that does not exists", :pipeline_id => pipeline_id)
99
+ nil
100
+ else
101
+ state.set_reloading(true)
102
+ begin
103
+ success, new_pipeline = yield
104
+ state.set_pipeline(new_pipeline)
105
+ ensure
106
+ state.set_reloading(false)
107
+ end
108
+ state
109
+ end
110
+ end
111
+
112
+ success
113
+ end
114
+
115
+ # @param pipeline_id [String, Symbol] the pipeline id
116
+ # @return [Pipeline] the pipeline object or nil if none for pipeline_id
117
+ def get_pipeline(pipeline_id)
118
+ state = @states.get(pipeline_id)
119
+ state.nil? ? nil : state.pipeline
120
+ end
121
+
122
+ # @return [Fixnum] number of items in the states collection
123
+ def size
124
+ @states.size
125
+ end
126
+
127
+ # @return [Boolean] true if the states collection is empty.
128
+ def empty?
129
+ @states.isEmpty
130
+ end
131
+
132
+ # @return [Hash{String=>Pipeline}]
133
+ def running_pipelines
134
+ select_pipelines { |state| !state.terminated? }
135
+ end
136
+
137
+ # @return [Hash{String=>Pipeline}]
138
+ def non_running_pipelines
139
+ select_pipelines { |state| state.terminated? }
140
+ end
141
+
142
+ # @return [Hash{String=>Pipeline}]
143
+ def running_user_defined_pipelines
144
+ select_pipelines { |state | !state.terminated? && !state.pipeline.system? }
145
+ end
146
+
147
+ private
148
+
149
+ # Returns a mapping of pipelines by their ids.
150
+ # Pipelines can optionally be filtered by their `PipelineState` by passing
151
+ # a block that returns truthy when a pipeline should be included in the
152
+ # result.
153
+ #
154
+ # @yieldparam [PipelineState]
155
+ # @yieldreturn [Boolean]
156
+ #
157
+ # @return [Hash{String=>Pipeline}]
158
+ def select_pipelines(&optional_state_filter)
159
+ @states.each_with_object({}) do |(id, state), memo|
160
+ if state && (!block_given? || yield(state))
161
+ memo[id] = state.pipeline
162
+ end
163
+ end
164
+ end
165
+ end
166
+ end
@@ -368,7 +368,7 @@ module LogStash
368
368
  case value
369
369
  when ::Range
370
370
  value
371
- when ::Fixnum
371
+ when ::Integer
372
372
  value..value
373
373
  when ::String
374
374
  first, last = value.split(PORT_SEPARATOR)
@@ -481,11 +481,11 @@ module LogStash
481
481
 
482
482
  class Bytes < Coercible
483
483
  def initialize(name, default=nil, strict=true)
484
- super(name, ::Fixnum, default, strict=true) { |value| valid?(value) }
484
+ super(name, ::Integer, default, strict=true) { |value| valid?(value) }
485
485
  end
486
486
 
487
487
  def valid?(value)
488
- value.is_a?(Fixnum) && value >= 0
488
+ value.is_a?(::Integer) && value >= 0
489
489
  end
490
490
 
491
491
  def coerce(value)
@@ -508,11 +508,11 @@ module LogStash
508
508
 
509
509
  class TimeValue < Coercible
510
510
  def initialize(name, default, strict=true, &validator_proc)
511
- super(name, ::Fixnum, default, strict, &validator_proc)
511
+ super(name, ::Integer, default, strict, &validator_proc)
512
512
  end
513
513
 
514
514
  def coerce(value)
515
- return value if value.is_a?(::Fixnum)
515
+ return value if value.is_a?(::Integer)
516
516
  Util::TimeValue.from_value(value).to_nanos
517
517
  end
518
518
  end
@@ -10,11 +10,11 @@ module LogStash
10
10
  @metric = metric
11
11
  end
12
12
 
13
- def resolve(pipelines, pipeline_configs)
13
+ def resolve(pipelines_registry, pipeline_configs)
14
14
  actions = []
15
15
 
16
16
  pipeline_configs.each do |pipeline_config|
17
- pipeline = pipelines[pipeline_config.pipeline_id]
17
+ pipeline = pipelines_registry.get_pipeline(pipeline_config.pipeline_id)
18
18
 
19
19
  if pipeline.nil?
20
20
  actions << LogStash::PipelineAction::Create.new(pipeline_config, @metric)
@@ -25,12 +25,12 @@ module LogStash
25
25
  end
26
26
  end
27
27
 
28
- running_pipelines = pipeline_configs.collect(&:pipeline_id)
28
+ configured_pipelines = pipeline_configs.collect(&:pipeline_id)
29
29
 
30
30
  # If one of the running pipeline is not in the pipeline_configs, we assume that we need to
31
31
  # stop it.
32
- pipelines.keys
33
- .select { |pipeline_id| !running_pipelines.include?(pipeline_id) }
32
+ pipelines_registry.running_pipelines.keys
33
+ .select { |pipeline_id| !configured_pipelines.include?(pipeline_id) }
34
34
  .each { |pipeline_id| actions << LogStash::PipelineAction::Stop.new(pipeline_id) }
35
35
 
36
36
  actions.sort # See logstash/pipeline_action.rb
@@ -7,7 +7,7 @@ module LogStash::Util::DurationFormatter
7
7
  # a format that a human can understand. This is currently used by
8
8
  # the API.
9
9
  #
10
- # @param [Fixnum] Duration in milliseconds
10
+ # @param [Integer] Duration in milliseconds
11
11
  # @return [String] Duration in human format
12
12
  def self.human_format(duration)
13
13
  ChronicDuration.output(duration / 1000, CHRONIC_OPTIONS)
@@ -26,6 +26,7 @@ class LogStash::Util::SafeURI
26
26
  else
27
27
  raise ArgumentError, "Expected a string, java.net.URI, or URI, got a #{arg.class} creating a URL"
28
28
  end
29
+ raise ArgumentError, "URI is not valid - host is not specified" if @uri.host.nil?
29
30
  end
30
31
 
31
32
  def to_s
data/lib/logstash/util.rb CHANGED
@@ -9,6 +9,8 @@ module LogStash::Util
9
9
 
10
10
  PR_SET_NAME = 15
11
11
  def self.set_thread_name(name)
12
+ previous_name = Java::java.lang.Thread.currentThread.getName() if block_given?
13
+
12
14
  if RUBY_ENGINE == "jruby"
13
15
  # Keep java and ruby thread names in sync.
14
16
  Java::java.lang.Thread.currentThread.setName(name)
@@ -21,6 +23,14 @@ module LogStash::Util
21
23
  # since MRI 1.9, JRuby, and Rubinius use system threads for this.
22
24
  LibC.prctl(PR_SET_NAME, name[0..16], 0, 0, 0)
23
25
  end
26
+
27
+ if block_given?
28
+ begin
29
+ yield
30
+ ensure
31
+ set_thread_name(previous_name)
32
+ end
33
+ end
24
34
  end # def set_thread_name
25
35
 
26
36
  def self.set_thread_plugin(plugin)
@@ -191,7 +201,7 @@ module LogStash::Util
191
201
  o.inject({}) {|h, (k,v)| h[k] = deep_clone(v); h }
192
202
  when Array
193
203
  o.map {|v| deep_clone(v) }
194
- when Fixnum, Symbol, IO, TrueClass, FalseClass, NilClass
204
+ when Integer, Symbol, IO, TrueClass, FalseClass, NilClass
195
205
  o
196
206
  when LogStash::Codecs::Base
197
207
  o.clone
data/locales/en.yml CHANGED
@@ -289,7 +289,7 @@ en:
289
289
  pipeline-workers: |+
290
290
  Sets the number of pipeline workers to run.
291
291
  java-execution: |+
292
- (Beta) Use new Java execution engine.
292
+ Use Java execution engine.
293
293
  pipeline-batch-size: |+
294
294
  Size of batches the pipeline is to work in.
295
295
  pipeline-batch-delay: |+
@@ -47,32 +47,29 @@ Gem::Specification.new do |gem|
47
47
 
48
48
  gem.platform = "java"
49
49
 
50
- gem.add_runtime_dependency "pry", "~> 0.10.1" #(Ruby license)
50
+ gem.add_runtime_dependency "pry", "~> 0.12" #(Ruby license)
51
51
  gem.add_runtime_dependency "stud", "~> 0.0.19" #(Apache 2.0 license)
52
- gem.add_runtime_dependency "clamp", "~> 0.6.5" #(MIT license) for command line args/flags
53
- gem.add_runtime_dependency "filesize", "0.0.4" #(MIT license) for :bytes config validator
54
- gem.add_runtime_dependency "gems", "~> 0.8.3" #(MIT license)
55
- gem.add_runtime_dependency "concurrent-ruby", "~> 1.0", ">= 1.0.5"
56
- gem.add_runtime_dependency "rack", '~> 1.6', '>= 1.6.11'
57
- gem.add_runtime_dependency "sinatra", '~> 1.4', '>= 1.4.6'
58
- gem.add_runtime_dependency 'puma', '~> 2.16'
59
- gem.add_runtime_dependency "jruby-openssl", ">= 0.9.20" # >= 0.9.13 Required to support TLSv1.2
60
- gem.add_runtime_dependency "chronic_duration", "0.10.6"
52
+ gem.add_runtime_dependency "clamp", "~> 0.6" #(MIT license) for command line args/flags
53
+ gem.add_runtime_dependency "filesize", "~> 0.2" #(MIT license) for :bytes config validator
54
+ gem.add_runtime_dependency "gems", "~> 1" #(MIT license)
55
+ gem.add_runtime_dependency "concurrent-ruby", "~> 1"
56
+ gem.add_runtime_dependency "rack", '~> 1', '>= 1.6.11'
57
+ gem.add_runtime_dependency "sinatra", '~> 1', '>= 1.4.6'
58
+ gem.add_runtime_dependency 'puma', '~> 2'
59
+ gem.add_runtime_dependency "jruby-openssl", "~> 0.10" # >= 0.9.13 Required to support TLSv1.2
60
+ gem.add_runtime_dependency "chronic_duration", "~> 0.10"
61
61
 
62
- # TODO(sissel): Treetop 1.5.x doesn't seem to work well, but I haven't
63
- # investigated what the cause might be. -Jordan
64
- gem.add_runtime_dependency "treetop", "< 1.5.0" #(MIT license)
62
+ gem.add_runtime_dependency "treetop", "~> 1" #(MIT license)
65
63
 
66
- # upgrade i18n only post 0.6.11, see https://github.com/svenfuchs/i18n/issues/270
67
- gem.add_runtime_dependency "i18n", "= 0.6.9" #(MIT license)
64
+ gem.add_runtime_dependency "i18n", "~> 1" #(MIT license)
68
65
 
69
66
  # filetools and rakelib
70
- gem.add_runtime_dependency "minitar", "~> 0.6.1"
71
- gem.add_runtime_dependency "rubyzip", "~> 1.2.1"
72
- gem.add_runtime_dependency "thread_safe", "~> 0.3.5" #(Apache 2.0 license)
67
+ gem.add_runtime_dependency "minitar", "~> 0.8"
68
+ gem.add_runtime_dependency "rubyzip", "~> 1"
69
+ gem.add_runtime_dependency "thread_safe", "~> 0.3.6" #(Apache 2.0 license)
73
70
 
74
71
  gem.add_runtime_dependency "jrjackson", "= #{ALL_VERSIONS.fetch('jrjackson')}" #(Apache 2.0 license)
75
72
 
76
- gem.add_runtime_dependency "elasticsearch", "~> 5.0", ">= 5.0.4" # Ruby client for ES (Apache 2.0 license)
77
- gem.add_runtime_dependency "manticore", '>= 0.5.4', '< 1.0.0'
73
+ gem.add_runtime_dependency "elasticsearch", "~> 5"
74
+ gem.add_runtime_dependency "manticore", '~> 0.6'
78
75
  end
@@ -14,7 +14,7 @@ describe LogStash::WrappedAckedQueue, :stress_test => true do
14
14
  let(:reject_memo_keys) { [:reject_memo_keys, :path, :queue, :writer_threads, :collector, :metric, :reader_threads, :output_strings] }
15
15
 
16
16
  let(:queue) do
17
- described_class.new(path, page_capacity, 0, queue_checkpoint_acks, queue_checkpoint_writes, queue_checkpoint_interval, queue_capacity)
17
+ described_class.new(path, page_capacity, 0, queue_checkpoint_acks, queue_checkpoint_writes, queue_checkpoint_interval, false, queue_capacity)
18
18
  end
19
19
 
20
20
  let(:writer_threads) do