logstash-core 5.3.3-java → 5.4.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (85) hide show
  1. checksums.yaml +4 -4
  2. data/gemspec_jars.rb +2 -0
  3. data/lib/logstash-core/logstash-core.jar +0 -0
  4. data/lib/logstash-core/version.rb +1 -1
  5. data/lib/logstash-core_jars.rb +4 -0
  6. data/lib/logstash/agent.rb +15 -6
  7. data/lib/logstash/api/modules/base.rb +1 -1
  8. data/lib/logstash/api/rack_app.rb +1 -1
  9. data/lib/logstash/config/config_ast.rb +13 -13
  10. data/lib/logstash/config/mixin.rb +33 -28
  11. data/lib/logstash/environment.rb +11 -0
  12. data/lib/logstash/event.rb +56 -0
  13. data/lib/logstash/event_dispatcher.rb +2 -2
  14. data/lib/logstash/execution_context.rb +10 -0
  15. data/lib/logstash/filter_delegator.rb +3 -2
  16. data/lib/logstash/inputs/base.rb +15 -1
  17. data/lib/logstash/instrument/collector.rb +1 -1
  18. data/lib/logstash/instrument/metric.rb +4 -2
  19. data/lib/logstash/instrument/metric_store.rb +9 -5
  20. data/lib/logstash/instrument/null_metric.rb +1 -0
  21. data/lib/logstash/instrument/periodic_poller/cgroup.rb +3 -3
  22. data/lib/logstash/instrument/periodic_poller/jvm.rb +11 -8
  23. data/lib/logstash/instrument/periodic_poller/load_average.rb +4 -2
  24. data/lib/logstash/instrument/wrapped_write_client.rb +59 -0
  25. data/lib/logstash/java_integration.rb +2 -2
  26. data/lib/logstash/output_delegator.rb +2 -2
  27. data/lib/logstash/output_delegator_strategies/legacy.rb +5 -2
  28. data/lib/logstash/output_delegator_strategies/shared.rb +2 -1
  29. data/lib/logstash/output_delegator_strategies/single.rb +2 -1
  30. data/lib/logstash/outputs/base.rb +8 -0
  31. data/lib/logstash/patches/cabin.rb +1 -1
  32. data/lib/logstash/patches/stronger_openssl_defaults.rb +1 -1
  33. data/lib/logstash/pipeline.rb +47 -19
  34. data/lib/logstash/plugin.rb +3 -1
  35. data/lib/logstash/plugins/hooks_registry.rb +6 -6
  36. data/lib/logstash/plugins/registry.rb +2 -2
  37. data/lib/logstash/queue_factory.rb +7 -5
  38. data/lib/logstash/runner.rb +15 -1
  39. data/lib/logstash/settings.rb +14 -2
  40. data/lib/logstash/string_interpolation.rb +18 -0
  41. data/lib/logstash/timestamp.rb +27 -0
  42. data/lib/logstash/util.rb +1 -1
  43. data/lib/logstash/util/prctl.rb +1 -1
  44. data/lib/logstash/util/retryable.rb +1 -1
  45. data/lib/logstash/util/wrapped_acked_queue.rb +53 -22
  46. data/lib/logstash/util/wrapped_synchronous_queue.rb +51 -33
  47. data/lib/logstash/version.rb +1 -1
  48. data/locales/en.yml +4 -2
  49. data/logstash-core.gemspec +0 -3
  50. data/spec/api/lib/api/node_stats_spec.rb +2 -1
  51. data/spec/api/spec_helper.rb +1 -1
  52. data/spec/logstash/acked_queue_concurrent_stress_spec.rb +291 -0
  53. data/spec/logstash/agent_spec.rb +24 -0
  54. data/spec/logstash/config/mixin_spec.rb +11 -2
  55. data/spec/logstash/event_dispatcher_spec.rb +8 -1
  56. data/spec/logstash/event_spec.rb +346 -0
  57. data/spec/logstash/execution_context_spec.rb +13 -0
  58. data/spec/logstash/filter_delegator_spec.rb +4 -2
  59. data/spec/logstash/inputs/base_spec.rb +41 -0
  60. data/spec/logstash/instrument/metric_spec.rb +2 -1
  61. data/spec/logstash/instrument/metric_store_spec.rb +14 -0
  62. data/spec/logstash/instrument/namespaced_metric_spec.rb +2 -1
  63. data/spec/logstash/instrument/periodic_poller/cgroup_spec.rb +1 -1
  64. data/spec/logstash/instrument/periodic_poller/jvm_spec.rb +35 -0
  65. data/spec/logstash/instrument/periodic_poller/load_average_spec.rb +1 -5
  66. data/spec/logstash/instrument/wrapped_write_client_spec.rb +113 -0
  67. data/spec/logstash/json_spec.rb +1 -1
  68. data/spec/logstash/legacy_ruby_event_spec.rb +636 -0
  69. data/spec/logstash/legacy_ruby_timestamp_spec.rb +170 -0
  70. data/spec/logstash/output_delegator_spec.rb +6 -3
  71. data/spec/logstash/outputs/base_spec.rb +23 -0
  72. data/spec/logstash/pipeline_pq_file_spec.rb +18 -8
  73. data/spec/logstash/pipeline_spec.rb +41 -5
  74. data/spec/logstash/plugin_spec.rb +15 -3
  75. data/spec/logstash/plugins/hooks_registry_spec.rb +2 -2
  76. data/spec/logstash/runner_spec.rb +33 -2
  77. data/spec/logstash/settings/port_range_spec.rb +1 -1
  78. data/spec/logstash/settings_spec.rb +21 -0
  79. data/spec/logstash/timestamp_spec.rb +29 -0
  80. data/spec/logstash/util/accessors_spec.rb +179 -0
  81. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +4 -11
  82. data/spec/logstash/util_spec.rb +1 -1
  83. data/spec/logstash/webserver_spec.rb +1 -1
  84. data/spec/support/mocks_classes.rb +65 -53
  85. metadata +25 -30
@@ -8,7 +8,8 @@ require "securerandom"
8
8
 
9
9
  class LogStash::Plugin
10
10
  include LogStash::Util::Loggable
11
- attr_accessor :params
11
+
12
+ attr_accessor :params, :execution_context
12
13
 
13
14
  NL = "\n"
14
15
 
@@ -122,6 +123,7 @@ class LogStash::Plugin
122
123
  LogStash::Instrument::NamespacedNullMetric.new(@metric, :null)
123
124
  end
124
125
  end
126
+
125
127
  # return the configured name of this plugin
126
128
  # @return [String] The name of the plugin defined by `config_name`
127
129
  def config_name
@@ -6,17 +6,17 @@ module LogStash module Plugins
6
6
  java_import "java.util.concurrent.CopyOnWriteArrayList"
7
7
 
8
8
  def initialize
9
- @registered_emmitters = ConcurrentHashMap.new
9
+ @registered_emitters = ConcurrentHashMap.new
10
10
  @registered_hooks = ConcurrentHashMap.new
11
11
  end
12
12
 
13
13
  def register_emitter(emitter_scope, dispatcher)
14
- @registered_emmitters.put(emitter_scope, dispatcher)
14
+ @registered_emitters.put(emitter_scope, dispatcher)
15
15
  sync_hooks
16
16
  end
17
17
 
18
18
  def remove_emitter(emitter_scope)
19
- @registered_emmitters.remove(emitter_scope)
19
+ @registered_emitters.remove(emitter_scope)
20
20
  end
21
21
 
22
22
  def register_hooks(emitter_scope, callback)
@@ -28,8 +28,8 @@ module LogStash module Plugins
28
28
  sync_hooks
29
29
  end
30
30
 
31
- def emmitters_count
32
- @registered_emmitters.size
31
+ def emitters_count
32
+ @registered_emitters.size
33
33
  end
34
34
 
35
35
  def hooks_count(emitter_scope = nil)
@@ -43,7 +43,7 @@ module LogStash module Plugins
43
43
 
44
44
  private
45
45
  def sync_hooks
46
- @registered_emmitters.each do |emitter, dispatcher|
46
+ @registered_emitters.each do |emitter, dispatcher|
47
47
  listeners = @registered_hooks.get(emitter)
48
48
 
49
49
  unless listeners.nil?
@@ -111,7 +111,7 @@ module LogStash module Plugins
111
111
 
112
112
  def load_available_plugins
113
113
  GemRegistry.logstash_plugins.each do |plugin_context|
114
- # When a plugin has a HOOK_FILE defined, its the responsability of the plugin
114
+ # When a plugin has a HOOK_FILE defined, its the responsibility of the plugin
115
115
  # to register itself to the registry of available plugins.
116
116
  #
117
117
  # Legacy plugin will lazy register themselves
@@ -205,7 +205,7 @@ module LogStash module Plugins
205
205
  private
206
206
  # lookup a plugin by type and name in the existing LogStash module namespace
207
207
  # ex.: namespace_lookup("filter", "grok") looks for LogStash::Filters::Grok
208
- # @param type [String] plugin type, "input", "ouput", "filter"
208
+ # @param type [String] plugin type, "input", "output", "filter"
209
209
  # @param name [String] plugin name, ex.: "grok"
210
210
  # @return [Class] the plugin class or raises NameError
211
211
  # @raise NameError if plugin class does not exist or is invalid
@@ -16,18 +16,20 @@ module LogStash
16
16
  checkpoint_max_writes = settings.get("queue.checkpoint.writes")
17
17
  checkpoint_max_interval = settings.get("queue.checkpoint.interval")
18
18
 
19
+ queue_path = ::File.join(settings.get("path.queue"), settings.get("pipeline.id"))
20
+
19
21
  case queue_type
20
22
  when "memory_acked"
21
23
  # memory_acked is used in tests/specs
22
- LogStash::Util::WrappedAckedQueue.create_memory_based("", queue_page_capacity, queue_max_events, queue_max_bytes)
23
- when "memory"
24
- # memory is the legacy and default setting
25
- LogStash::Util::WrappedSynchronousQueue.new
24
+ FileUtils.mkdir_p(queue_path)
25
+ LogStash::Util::WrappedAckedQueue.create_memory_based(queue_path, queue_page_capacity, queue_max_events, queue_max_bytes)
26
26
  when "persisted"
27
27
  # persisted is the disk based acked queue
28
- queue_path = ::File.join(settings.get("path.queue"), settings.get("pipeline.id"))
29
28
  FileUtils.mkdir_p(queue_path)
30
29
  LogStash::Util::WrappedAckedQueue.create_file_based(queue_path, queue_page_capacity, queue_max_events, checkpoint_max_writes, checkpoint_max_acks, checkpoint_max_interval, queue_max_bytes)
30
+ when "memory"
31
+ # memory is the legacy and default setting
32
+ LogStash::Util::WrappedSynchronousQueue.new
31
33
  else
32
34
  raise ConfigurationError, "Invalid setting `#{queue_type}` for `queue.type`, supported types are: 'memory_acked', 'memory', 'persisted'"
33
35
  end
@@ -20,6 +20,8 @@ require "logstash/settings"
20
20
  require "logstash/version"
21
21
  require "logstash/plugins/registry"
22
22
 
23
+ java_import 'org.logstash.FileLockFactory'
24
+
23
25
  class LogStash::Runner < Clamp::StrictCommand
24
26
  include LogStash::Util::Loggable
25
27
  # The `path.settings` and `path.logs` need to be defined in the runner instead of the `logstash-core/lib/logstash/environment.rb`
@@ -184,6 +186,11 @@ class LogStash::Runner < Clamp::StrictCommand
184
186
  end
185
187
 
186
188
  def execute
189
+ # Only when execute is have the CLI options been added to the @settings
190
+ # We invoke post_process to apply extra logic to them.
191
+ # The post_process callbacks have been added in environment.rb
192
+ @settings.post_process
193
+
187
194
  require "logstash/util"
188
195
  require "logstash/util/java_version"
189
196
  require "stud/task"
@@ -195,7 +202,7 @@ class LogStash::Runner < Clamp::StrictCommand
195
202
  java.lang.System.setProperty("ls.log.level", setting("log.level"))
196
203
  unless java.lang.System.getProperty("log4j.configurationFile")
197
204
  log4j_config_location = ::File.join(setting("path.settings"), "log4j2.properties")
198
- LogStash::Logging::Logger::initialize("file:///" + log4j_config_location)
205
+ LogStash::Logging::Logger::initialize("file://" + log4j_config_location)
199
206
  end
200
207
  # override log level that may have been introduced from a custom log4j config file
201
208
  LogStash::Logging::Logger::configure_logging(setting("log.level"))
@@ -259,6 +266,9 @@ class LogStash::Runner < Clamp::StrictCommand
259
266
  end
260
267
  end
261
268
 
269
+ # lock path.data before starting the agent
270
+ @data_path_lock = FileLockFactory.getDefault().obtainLock(setting("path.data"), ".lock");
271
+
262
272
  @agent = create_agent(@settings)
263
273
 
264
274
  @agent.register_pipeline(@settings)
@@ -283,6 +293,9 @@ class LogStash::Runner < Clamp::StrictCommand
283
293
 
284
294
  agent_return
285
295
 
296
+ rescue org.logstash.LockException => e
297
+ logger.fatal(I18n.t("logstash.runner.locked-data-path", :path => setting("path.data")))
298
+ return 1
286
299
  rescue Clamp::UsageError => e
287
300
  $stderr.puts "ERROR: #{e.message}"
288
301
  show_short_help
@@ -299,6 +312,7 @@ class LogStash::Runner < Clamp::StrictCommand
299
312
  Stud::untrap("INT", sigint_id) unless sigint_id.nil?
300
313
  Stud::untrap("TERM", sigterm_id) unless sigterm_id.nil?
301
314
  Stud::untrap("HUP", sighup_id) unless sighup_id.nil?
315
+ FileLockFactory.getDefault().releaseLock(@data_path_lock) if @data_path_lock
302
316
  @log_fd.close if @log_fd
303
317
  end # def self.main
304
318
 
@@ -108,6 +108,20 @@ module LogStash
108
108
  def from_yaml(yaml_path)
109
109
  settings = read_yaml(::File.join(yaml_path, "logstash.yml"))
110
110
  self.merge(flatten_hash(settings), true)
111
+ self
112
+ end
113
+
114
+ def post_process
115
+ if @post_process_callbacks
116
+ @post_process_callbacks.each do |callback|
117
+ callback.call(self)
118
+ end
119
+ end
120
+ end
121
+
122
+ def on_post_process(&block)
123
+ @post_process_callbacks ||= []
124
+ @post_process_callbacks << block
111
125
  end
112
126
 
113
127
  def validate_all
@@ -232,7 +246,6 @@ module LogStash
232
246
  @default = default
233
247
  end
234
248
  end
235
-
236
249
  def set(value)
237
250
  coerced_value = coerce(value)
238
251
  validate(coerced_value)
@@ -520,4 +533,3 @@ module LogStash
520
533
 
521
534
  SETTINGS = Settings.new
522
535
  end
523
-
@@ -0,0 +1,18 @@
1
+ # encoding: utf-8
2
+
3
+ module LogStash
4
+ module StringInterpolation
5
+ extend self
6
+
7
+ # clear the global compiled templates cache
8
+ def clear_cache
9
+ Java::OrgLogstash::StringInterpolation.get_instance.clear_cache;
10
+ end
11
+
12
+ # @return [Fixnum] the compiled templates cache size
13
+ def cache_size
14
+ Java::OrgLogstash::StringInterpolation.get_instance.cache_size;
15
+ end
16
+ end
17
+ end
18
+
@@ -0,0 +1,27 @@
1
+ # encoding: utf-8
2
+
3
+ require "logstash/namespace"
4
+
5
+ module LogStash
6
+ class TimestampParserError < StandardError; end
7
+
8
+ class Timestamp
9
+ include Comparable
10
+
11
+ # TODO (colin) implement in Java
12
+ def <=>(other)
13
+ self.time <=> other.time
14
+ end
15
+
16
+ # TODO (colin) implement in Java
17
+ def +(other)
18
+ self.time + other
19
+ end
20
+
21
+ # TODO (colin) implement in Java
22
+ def -(value)
23
+ self.time - (value.is_a?(Timestamp) ? value.time : value)
24
+ end
25
+
26
+ end
27
+ end
@@ -141,7 +141,7 @@ module LogStash::Util
141
141
  end # def hash_merge_many
142
142
 
143
143
 
144
- # nomalize method definition based on platform.
144
+ # normalize method definition based on platform.
145
145
  # normalize is used to convert an object create through
146
146
  # json deserialization from JrJackson in :raw mode to pure Ruby
147
147
  # to support these pure Ruby object monkey patches.
@@ -4,7 +4,7 @@ module LibC
4
4
  extend FFI::Library
5
5
  ffi_lib 'c'
6
6
 
7
- # Ok so the 2nd arg isn't really a string... but whaatever
7
+ # Ok so the 2nd arg isn't really a string... but whatever
8
8
  attach_function :prctl, [:int, :string, :long, :long, :long], :int
9
9
  end
10
10
 
@@ -3,7 +3,7 @@ module LogStash
3
3
  module Retryable
4
4
  # execute retryable code block
5
5
  # @param [Hash] options retryable options
6
- # @option options [Fixnum] :tries retries to perform, default 1, set to 0 for infite retries. 1 means that upon exception the block will be retried once
6
+ # @option options [Fixnum] :tries retries to perform, default 1, set to 0 for infinite retries. 1 means that upon exception the block will be retried once
7
7
  # @option options [Fixnum] :base_sleep seconds to sleep on first retry, default 1
8
8
  # @option options [Fixnum] :max_sleep max seconds to sleep upon exponential backoff, default 1
9
9
  # @option options [Exception] :rescue exception class list to retry on, defaults is Exception, which retries on any Exception.
@@ -1,6 +1,7 @@
1
1
  # encoding: utf-8
2
2
 
3
- require "logstash-core-queue-jruby/logstash-core-queue-jruby"
3
+ require "jruby_acked_queue_ext"
4
+ require "jruby_acked_batch_ext"
4
5
  require "concurrent"
5
6
  # This is an adapted copy of the wrapped_synchronous_queue file
6
7
  # ideally this should be moved to Java/JRuby
@@ -62,7 +63,7 @@ module LogStash; module Util
62
63
  #
63
64
  # @param [Object] Object to add to the queue
64
65
  # @param [Integer] Time in milliseconds to wait before giving up
65
- # @return [Boolean] True if adding was successfull if not it return false
66
+ # @return [Boolean] True if adding was successful if not it return false
66
67
  def offer(obj, timeout_ms)
67
68
  raise NotImplementedError.new("The offer method is not implemented. There is no non blocking write operation yet.")
68
69
  end
@@ -125,6 +126,10 @@ module LogStash; module Util
125
126
  @queue.close
126
127
  end
127
128
 
129
+ def empty?
130
+ @mutex.synchronize { @queue.is_fully_acked? }
131
+ end
132
+
128
133
  def set_batch_dimensions(batch_size, wait_for)
129
134
  @batch_size = batch_size
130
135
  @wait_for = wait_for
@@ -143,7 +148,6 @@ module LogStash; module Util
143
148
  def define_initial_metrics_values(namespaced_metric)
144
149
  namespaced_metric.report_time(:duration_in_millis, 0)
145
150
  namespaced_metric.increment(:filtered, 0)
146
- namespaced_metric.increment(:in, 0)
147
151
  namespaced_metric.increment(:out, 0)
148
152
  end
149
153
 
@@ -157,16 +161,28 @@ module LogStash; module Util
157
161
  @inflight_batches.fetch(Thread.current, [])
158
162
  end
159
163
 
160
- def take_batch
164
+ # create a new empty batch
165
+ # @return [ReadBatch] a new empty read batch
166
+ def new_batch
167
+ ReadBatch.new(@queue, @batch_size, @wait_for)
168
+ end
169
+
170
+ def read_batch
161
171
  if @queue.closed?
162
172
  raise QueueClosedError.new("Attempt to take a batch from a closed AckedQueue")
163
173
  end
174
+
175
+ batch = new_batch
176
+ @mutex.synchronize { batch.read_next }
177
+ start_metrics(batch)
178
+ batch
179
+ end
180
+
181
+ def start_metrics(batch)
164
182
  @mutex.synchronize do
165
- batch = ReadBatch.new(@queue, @batch_size, @wait_for)
166
- add_starting_metrics(batch)
183
+ # there seems to be concurrency issues with metrics, keep it in the mutex
167
184
  set_current_thread_inflight_batch(batch)
168
185
  start_clock
169
- batch
170
186
  end
171
187
  end
172
188
 
@@ -177,21 +193,30 @@ module LogStash; module Util
177
193
  def close_batch(batch)
178
194
  @mutex.synchronize do
179
195
  batch.close
196
+
197
+ # there seems to be concurrency issues with metrics, keep it in the mutex
180
198
  @inflight_batches.delete(Thread.current)
181
- stop_clock
199
+ stop_clock(batch)
182
200
  end
183
201
  end
184
202
 
185
203
  def start_clock
186
204
  @inflight_clocks[Thread.current] = [
187
- @event_metric.time(:duration_in_millis),
188
- @pipeline_metric.time(:duration_in_millis)
205
+ @event_metric.time(:duration_in_millis),
206
+ @pipeline_metric.time(:duration_in_millis)
189
207
  ]
190
208
  end
191
209
 
192
- def stop_clock
193
- @inflight_clocks[Thread.current].each(&:stop)
194
- @inflight_clocks.delete(Thread.current)
210
+ def stop_clock(batch)
211
+ unless @inflight_clocks[Thread.current].nil?
212
+ if batch.size > 0
213
+ # onl/y stop (which also records) the metrics if the batch is non-empty.
214
+ # start_clock is now called at empty batch creation and an empty batch could
215
+ # stay empty all the way down to the close_batch call.
216
+ @inflight_clocks[Thread.current].each(&:stop)
217
+ end
218
+ @inflight_clocks.delete(Thread.current)
219
+ end
195
220
  end
196
221
 
197
222
  def add_starting_metrics(batch)
@@ -213,6 +238,10 @@ module LogStash; module Util
213
238
 
214
239
  class ReadBatch
215
240
  def initialize(queue, size, wait)
241
+ @queue = queue
242
+ @size = size
243
+ @wait = wait
244
+
216
245
  @originals = Hash.new
217
246
 
218
247
  # TODO: disabled for https://github.com/elastic/logstash/issues/6055 - will have to properly refactor
@@ -221,7 +250,13 @@ module LogStash; module Util
221
250
  @generated = Hash.new
222
251
  @iterating_temp = Hash.new
223
252
  @iterating = false # Atomic Boolean maybe? Although batches are not shared across threads
224
- take_originals_from_queue(queue, size, wait) # this sets a reference to @acked_batch
253
+ @acked_batch = nil
254
+ end
255
+
256
+ def read_next
257
+ @acked_batch = @queue.read_batch(@size, @wait)
258
+ return if @acked_batch.nil?
259
+ @acked_batch.get_elements.each { |e| @originals[e] = true }
225
260
  end
226
261
 
227
262
  def close
@@ -301,14 +336,6 @@ module LogStash; module Util
301
336
  @generated.update(@iterating_temp)
302
337
  @iterating_temp.clear
303
338
  end
304
-
305
- def take_originals_from_queue(queue, size, wait)
306
- @acked_batch = queue.read_batch(size, wait)
307
- return if @acked_batch.nil?
308
- @acked_batch.get_elements.each do |e|
309
- @originals[e] = true
310
- end
311
- end
312
339
  end
313
340
 
314
341
  class WriteClient
@@ -343,6 +370,10 @@ module LogStash; module Util
343
370
  @events = []
344
371
  end
345
372
 
373
+ def size
374
+ @events.size
375
+ end
376
+
346
377
  def push(event)
347
378
  @events.push(event)
348
379
  end
@@ -18,12 +18,12 @@ module LogStash; module Util
18
18
  end
19
19
  alias_method(:<<, :push)
20
20
 
21
- # Offer an object to the queue, wait for the specified amout of time.
22
- # If adding to the queue was successfull it wil return true, false otherwise.
21
+ # Offer an object to the queue, wait for the specified amount of time.
22
+ # If adding to the queue was successful it wil return true, false otherwise.
23
23
  #
24
24
  # @param [Object] Object to add to the queue
25
25
  # @param [Integer] Time in milliseconds to wait before giving up
26
- # @return [Boolean] True if adding was successfull if not it return false
26
+ # @return [Boolean] True if adding was successful if not it return false
27
27
  def offer(obj, timeout_ms)
28
28
  @queue.offer(obj, timeout_ms, TimeUnit::MILLISECONDS)
29
29
  end
@@ -58,7 +58,7 @@ module LogStash; module Util
58
58
  def initialize(queue, batch_size = 125, wait_for = 250)
59
59
  @queue = queue
60
60
  @mutex = Mutex.new
61
- # Note that @infilght_batches as a central mechanism for tracking inflight
61
+ # Note that @inflight_batches as a central mechanism for tracking inflight
62
62
  # batches will fail if we have multiple read clients in the pipeline.
63
63
  @inflight_batches = {}
64
64
 
@@ -72,6 +72,10 @@ module LogStash; module Util
72
72
  # noop, compat with acked queue read client
73
73
  end
74
74
 
75
+ def empty?
76
+ true # synchronous queue is alway empty
77
+ end
78
+
75
79
  def set_batch_dimensions(batch_size, wait_for)
76
80
  @batch_size = batch_size
77
81
  @wait_for = wait_for
@@ -90,7 +94,6 @@ module LogStash; module Util
90
94
  def define_initial_metrics_values(namespaced_metric)
91
95
  namespaced_metric.report_time(:duration_in_millis, 0)
92
96
  namespaced_metric.increment(:filtered, 0)
93
- namespaced_metric.increment(:in, 0)
94
97
  namespaced_metric.increment(:out, 0)
95
98
  end
96
99
 
@@ -104,18 +107,24 @@ module LogStash; module Util
104
107
  @inflight_batches.fetch(Thread.current, [])
105
108
  end
106
109
 
107
- def take_batch
110
+ # create a new empty batch
111
+ # @return [ReadBatch] a new empty read batch
112
+ def new_batch
113
+ ReadBatch.new(@queue, @batch_size, @wait_for)
114
+ end
115
+
116
+ def read_batch
117
+ batch = new_batch
118
+ @mutex.synchronize { batch.read_next }
119
+ start_metrics(batch)
120
+ batch
121
+ end
122
+
123
+ def start_metrics(batch)
108
124
  @mutex.synchronize do
109
- batch = ReadBatch.new(@queue, @batch_size, @wait_for)
125
+ # there seems to be concurrency issues with metrics, keep it in the mutex
110
126
  set_current_thread_inflight_batch(batch)
111
-
112
- # We dont actually have any events to work on so lets
113
- # not bother with recording metrics for them
114
- if batch.size > 0
115
- add_starting_metrics(batch)
116
- start_clock
117
- end
118
- batch
127
+ start_clock
119
128
  end
120
129
  end
121
130
 
@@ -125,8 +134,9 @@ module LogStash; module Util
125
134
 
126
135
  def close_batch(batch)
127
136
  @mutex.synchronize do
137
+ # there seems to be concurrency issues with metrics, keep it in the mutex
128
138
  @inflight_batches.delete(Thread.current)
129
- stop_clock
139
+ stop_clock(batch)
130
140
  end
131
141
  end
132
142
 
@@ -137,18 +147,18 @@ module LogStash; module Util
137
147
  ]
138
148
  end
139
149
 
140
- def stop_clock
150
+ def stop_clock(batch)
141
151
  unless @inflight_clocks[Thread.current].nil?
142
- @inflight_clocks[Thread.current].each(&:stop)
152
+ if batch.size > 0
153
+ # only stop (which also records) the metrics if the batch is non-empty.
154
+ # start_clock is now called at empty batch creation and an empty batch could
155
+ # stay empty all the way down to the close_batch call.
156
+ @inflight_clocks[Thread.current].each(&:stop)
157
+ end
143
158
  @inflight_clocks.delete(Thread.current)
144
159
  end
145
160
  end
146
161
 
147
- def add_starting_metrics(batch)
148
- @event_metric.increment(:in, batch.starting_size)
149
- @pipeline_metric.increment(:in, batch.starting_size)
150
- end
151
-
152
162
  def add_filtered_metrics(batch)
153
163
  @event_metric.increment(:filtered, batch.filtered_size)
154
164
  @pipeline_metric.increment(:filtered, batch.filtered_size)
@@ -162,6 +172,10 @@ module LogStash; module Util
162
172
 
163
173
  class ReadBatch
164
174
  def initialize(queue, size, wait)
175
+ @queue = queue
176
+ @size = size
177
+ @wait = wait
178
+
165
179
  @originals = Hash.new
166
180
 
167
181
  # TODO: disabled for https://github.com/elastic/logstash/issues/6055 - will have to properly refactor
@@ -170,7 +184,16 @@ module LogStash; module Util
170
184
  @generated = Hash.new
171
185
  @iterating_temp = Hash.new
172
186
  @iterating = false # Atomic Boolean maybe? Although batches are not shared across threads
173
- take_originals_from_queue(queue, size, wait)
187
+ @acked_batch = nil
188
+ end
189
+
190
+ def read_next
191
+ @size.times do |t|
192
+ event = @queue.poll(@wait)
193
+ return if event.nil? # queue poll timed out
194
+
195
+ @originals[event] = true
196
+ end
174
197
  end
175
198
 
176
199
  def merge(event)
@@ -235,15 +258,6 @@ module LogStash; module Util
235
258
  @generated.update(@iterating_temp)
236
259
  @iterating_temp.clear
237
260
  end
238
-
239
- def take_originals_from_queue(queue, size, wait)
240
- size.times do |t|
241
- event = queue.poll(wait)
242
- return if event.nil? # queue poll timed out
243
-
244
- @originals[event] = true
245
- end
246
- end
247
261
  end
248
262
 
249
263
  class WriteClient
@@ -272,6 +286,10 @@ module LogStash; module Util
272
286
  @events = []
273
287
  end
274
288
 
289
+ def size
290
+ @events.size
291
+ end
292
+
275
293
  def push(event)
276
294
  @events.push(event)
277
295
  end