logstash-core 5.0.0.alpha6.snapshot5-java → 5.0.0-java

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of logstash-core might be problematic. Click here for more details.

Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/lib/jars.rb +1 -1
  3. data/lib/logstash-core/version.rb +1 -1
  4. data/lib/logstash/agent.rb +45 -11
  5. data/lib/logstash/api/app_helpers.rb +43 -7
  6. data/lib/logstash/api/commands/stats.rb +2 -1
  7. data/lib/logstash/api/errors.rb +28 -0
  8. data/lib/logstash/api/modules/base.rb +9 -7
  9. data/lib/logstash/api/modules/logging.rb +52 -0
  10. data/lib/logstash/api/modules/node.rb +13 -9
  11. data/lib/logstash/api/modules/root.rb +0 -2
  12. data/lib/logstash/api/modules/stats.rb +0 -2
  13. data/lib/logstash/api/rack_app.rb +5 -3
  14. data/lib/logstash/environment.rb +4 -5
  15. data/lib/logstash/instrument/collector.rb +4 -0
  16. data/lib/logstash/instrument/metric_store.rb +27 -2
  17. data/lib/logstash/logging/logger.rb +15 -4
  18. data/lib/logstash/patches/puma.rb +44 -0
  19. data/lib/logstash/pipeline.rb +8 -15
  20. data/lib/logstash/runner.rb +31 -17
  21. data/lib/logstash/settings.rb +34 -9
  22. data/lib/logstash/util/wrapped_synchronous_queue.rb +26 -9
  23. data/lib/logstash/version.rb +1 -1
  24. data/lib/logstash/webserver.rb +13 -2
  25. data/locales/en.yml +7 -2
  26. data/logstash-core.gemspec +1 -1
  27. data/spec/api/lib/api/logging_spec.rb +41 -0
  28. data/spec/api/lib/api/node_plugins_spec.rb +4 -3
  29. data/spec/api/lib/api/node_spec.rb +2 -0
  30. data/spec/api/lib/api/node_stats_spec.rb +2 -0
  31. data/spec/api/lib/api/plugins_spec.rb +3 -1
  32. data/spec/api/lib/api/root_spec.rb +3 -0
  33. data/spec/api/lib/errors_spec.rb +27 -0
  34. data/spec/api/lib/rack_app_spec.rb +4 -4
  35. data/spec/logstash/agent_spec.rb +112 -26
  36. data/spec/logstash/instrument/metric_store_spec.rb +37 -0
  37. data/spec/logstash/pipeline_spec.rb +54 -0
  38. data/spec/logstash/runner_spec.rb +2 -1
  39. data/spec/logstash/setting_spec.rb +23 -1
  40. data/spec/logstash/settings/string_spec.rb +1 -1
  41. data/spec/logstash/settings_spec.rb +27 -0
  42. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +49 -11
  43. data/spec/logstash/webserver_spec.rb +76 -18
  44. data/spec/support/helpers.rb +8 -0
  45. data/spec/support/mocks_classes.rb +22 -0
  46. data/spec/support/shared_examples.rb +10 -0
  47. data/vendor/jars/org/logstash/logstash-core/{5.0.0-alpha6/logstash-core-5.0.0-alpha6.jar → 5.0.0/logstash-core-5.0.0.jar} +0 -0
  48. metadata +16 -7
@@ -17,9 +17,9 @@ module LogStash
17
17
 
18
18
  [
19
19
  Setting::String.new("node.name", Socket.gethostname),
20
- Setting::String.new("path.config", nil, false),
20
+ Setting::NullableString.new("path.config", nil, false),
21
21
  Setting::WritableDirectory.new("path.data", ::File.join(LogStash::Environment::LOGSTASH_HOME, "data")),
22
- Setting::String.new("config.string", nil, false),
22
+ Setting::NullableString.new("config.string", nil, false),
23
23
  Setting::Boolean.new("config.test_and_exit", false),
24
24
  Setting::Boolean.new("config.reload.automatic", false),
25
25
  Setting::Numeric.new("config.reload.interval", 3), # in seconds
@@ -31,12 +31,11 @@ module LogStash
31
31
  Setting::Numeric.new("pipeline.batch.delay", 5), # in milliseconds
32
32
  Setting::Boolean.new("pipeline.unsafe_shutdown", false),
33
33
  Setting.new("path.plugins", Array, []),
34
- Setting::String.new("interactive", nil, false),
34
+ Setting::NullableString.new("interactive", nil, false),
35
35
  Setting::Boolean.new("config.debug", false),
36
- Setting::String.new("log.level", "warn", true, ["fatal", "error", "warn", "debug", "info", "trace"]),
36
+ Setting::String.new("log.level", "info", true, ["fatal", "error", "warn", "debug", "info", "trace"]),
37
37
  Setting::Boolean.new("version", false),
38
38
  Setting::Boolean.new("help", false),
39
- Setting::String.new("path.log", nil, false),
40
39
  Setting::String.new("log.format", "plain", true, ["json", "plain"]),
41
40
  Setting::String.new("http.host", "127.0.0.1"),
42
41
  Setting::PortRange.new("http.port", 9600..9700),
@@ -104,5 +104,9 @@ module LogStash module Instrument
104
104
  logger.debug("Collector: Sending snapshot to observers", :created_at => created_at) if logger.debug?
105
105
  notify_observers(snapshot_metric)
106
106
  end
107
+
108
+ def clear(keypath)
109
+ @metric_store.prune(keypath)
110
+ end
107
111
  end
108
112
  end; end
@@ -80,8 +80,7 @@ module LogStash module Instrument
80
80
  # @param [Array] The path where values should be located
81
81
  # @return [Hash]
82
82
  def get_with_path(path)
83
- key_paths = path.gsub(/^#{KEY_PATH_SEPARATOR}+/, "").split(KEY_PATH_SEPARATOR)
84
- get(*key_paths)
83
+ get(*key_paths(path))
85
84
  end
86
85
 
87
86
  # Similar to `get_with_path` but use symbols instead of string
@@ -180,11 +179,28 @@ module LogStash module Instrument
180
179
  end
181
180
  alias_method :all, :each
182
181
 
182
+ def prune(path)
183
+ key_paths = key_paths(path).map {|k| k.to_sym }
184
+ @structured_lookup_mutex.synchronize do
185
+ keys_to_delete = @fast_lookup.keys.select {|namespace, _| (key_paths - namespace).empty? }
186
+ keys_to_delete.each {|k| @fast_lookup.delete(k) }
187
+ delete_from_map(@store, key_paths)
188
+ end
189
+ end
190
+
191
+ def size
192
+ @fast_lookup.size
193
+ end
194
+
183
195
  private
184
196
  def get_all
185
197
  @fast_lookup.values
186
198
  end
187
199
 
200
+ def key_paths(path)
201
+ path.gsub(/^#{KEY_PATH_SEPARATOR}+/, "").split(KEY_PATH_SEPARATOR)
202
+ end
203
+
188
204
  # This method take an array of keys and recursively search the metric store structure
189
205
  # and return a filtered hash of the structure. This method also take into consideration
190
206
  # getting two different branchs.
@@ -294,5 +310,14 @@ module LogStash module Instrument
294
310
  new_map = map.fetch_or_store(current) { Concurrent::Map.new }
295
311
  return fetch_or_store_namespace_recursively(new_map, namespaces_path, idx + 1)
296
312
  end
313
+
314
+ def delete_from_map(map, keys)
315
+ key = keys.first
316
+ if keys.size == 1
317
+ map.delete(key)
318
+ else
319
+ delete_from_map(map[key], keys[1..-1]) unless map[key].nil?
320
+ end
321
+ end
297
322
  end
298
323
  end; end
@@ -6,6 +6,8 @@ module LogStash
6
6
  java_import org.apache.logging.log4j.Level
7
7
  java_import org.apache.logging.log4j.LogManager
8
8
  java_import org.apache.logging.log4j.core.config.Configurator
9
+ @@config_mutex = Mutex.new
10
+ @@logging_context = nil
9
11
 
10
12
  def initialize(name)
11
13
  @logger = LogManager.getLogger(name)
@@ -59,13 +61,22 @@ module LogStash
59
61
  @logger.trace(message, data)
60
62
  end
61
63
 
62
- # Point logging at a specific path.
63
64
  def self.configure_logging(level, path = LogManager::ROOT_LOGGER_NAME)
64
- Configurator.setLevel(path, Level.toLevel(level))
65
- end # def configure_logging
65
+ @@config_mutex.synchronize { Configurator.setLevel(path, Level.valueOf(level)) }
66
+ rescue Exception => e
67
+ raise ArgumentError, "invalid level[#{level}] for logger[#{path}]"
68
+ end
66
69
 
67
70
  def self.initialize(config_location)
68
- Configurator.initialize(nil, config_location)
71
+ @@config_mutex.synchronize do
72
+ if @@logging_context.nil?
73
+ @@logging_context = Configurator.initialize(nil, config_location)
74
+ end
75
+ end
76
+ end
77
+
78
+ def self.get_logging_context
79
+ return @@logging_context
69
80
  end
70
81
  end
71
82
  end
@@ -0,0 +1,44 @@
1
+ # encoding: utf-8
2
+ #
3
+ # Patch to replace the usage of STDERR and STDOUT
4
+ # see: https://github.com/elastic/logstash/issues/5912
5
+ module LogStash
6
+ class NullLogger
7
+ def self.debug(message)
8
+ end
9
+ end
10
+
11
+ # Puma uses by default the STDERR an the STDOUT for all his error
12
+ # handling, the server class accept custom a events object that can accept custom io object,
13
+ # so I just wrap the logger into an IO like object.
14
+ class IOWrappedLogger
15
+ def initialize(new_logger)
16
+ @logger_lock = Mutex.new
17
+ @logger = new_logger
18
+ end
19
+
20
+ def sync=(v)
21
+ # noop
22
+ end
23
+
24
+ def logger=(logger)
25
+ @logger_lock.synchronize { @logger = logger }
26
+ end
27
+
28
+ def puts(str)
29
+ # The logger only accept a str as the first argument
30
+ @logger_lock.synchronize { @logger.debug(str.to_s) }
31
+ end
32
+ alias_method :write, :puts
33
+ alias_method :<<, :puts
34
+ end
35
+
36
+ end
37
+
38
+ # Reopen the puma class to create a scoped STDERR and STDOUT
39
+ # This operation is thread safe since its done at the class level
40
+ # and force JRUBY to flush his classes cache.
41
+ module Puma
42
+ STDERR = LogStash::IOWrappedLogger.new(LogStash::NullLogger)
43
+ STDOUT = LogStash::IOWrappedLogger.new(LogStash::NullLogger)
44
+ end
@@ -162,12 +162,12 @@ module LogStash; class Pipeline
162
162
  wait_inputs
163
163
  transition_to_stopped
164
164
 
165
- @logger.info("Input plugins stopped! Will shutdown filter/output workers.")
165
+ @logger.debug("Input plugins stopped! Will shutdown filter/output workers.")
166
166
 
167
167
  shutdown_flusher
168
168
  shutdown_workers
169
169
 
170
- @logger.info("Pipeline #{@pipeline_id} has been shutdown")
170
+ @logger.debug("Pipeline #{@pipeline_id} has been shutdown")
171
171
 
172
172
  # exit code
173
173
  return 0
@@ -257,14 +257,9 @@ module LogStash; class Pipeline
257
257
  def filter_batch(batch)
258
258
  batch.each do |event|
259
259
  if event.is_a?(Event)
260
- filtered = filter_func(event)
261
- filtered.each do |e|
262
- #these are both original and generated events
263
- if e.cancelled?
264
- batch.cancel(e)
265
- else
266
- batch.merge(e)
267
- end
260
+ filter_func(event).each do |e|
261
+ # these are both original and generated events
262
+ batch.merge(e) unless e.cancelled?
268
263
  end
269
264
  end
270
265
  end
@@ -375,9 +370,9 @@ module LogStash; class Pipeline
375
370
 
376
371
  before_stop.call if block_given?
377
372
 
378
- @logger.info "Closing inputs"
373
+ @logger.debug "Closing inputs"
379
374
  @inputs.each(&:do_stop)
380
- @logger.info "Closed inputs"
375
+ @logger.debug "Closed inputs"
381
376
  end # def shutdown
382
377
 
383
378
  # After `shutdown` is called from an external thread this is called from the main thread to
@@ -492,9 +487,7 @@ module LogStash; class Pipeline
492
487
  def flush_filters_to_batch(batch, options = {})
493
488
  options[:final] = batch.shutdown_signal_received?
494
489
  flush_filters(options) do |event|
495
- if event.cancelled?
496
- batch.cancel(event)
497
- else
490
+ unless event.cancelled?
498
491
  @logger.debug? and @logger.debug("Pushing flushed events", :event => event)
499
492
  batch.merge(event)
500
493
  end
@@ -19,11 +19,12 @@ require "logstash/version"
19
19
 
20
20
  class LogStash::Runner < Clamp::StrictCommand
21
21
  include LogStash::Util::Loggable
22
- # The `path.settings` need to be defined in the runner instead of the `logstash-core/lib/logstash/environment.rb`
22
+ # The `path.settings` and `path.logs` need to be defined in the runner instead of the `logstash-core/lib/logstash/environment.rb`
23
23
  # because the `Environment::LOGSTASH_HOME` doesn't exist in the context of the `logstash-core` gem.
24
24
  #
25
25
  # See issue https://github.com/elastic/logstash/issues/5361
26
26
  LogStash::SETTINGS.register(LogStash::Setting::String.new("path.settings", ::File.join(LogStash::Environment::LOGSTASH_HOME, "config")))
27
+ LogStash::SETTINGS.register(LogStash::Setting::String.new("path.logs", ::File.join(LogStash::Environment::LOGSTASH_HOME, "logs")))
27
28
 
28
29
  # Node Settings
29
30
  option ["-n", "--node.name"], "NAME",
@@ -77,9 +78,10 @@ class LogStash::Runner < Clamp::StrictCommand
77
78
  :default => LogStash::SETTINGS.get_default("path.plugins")
78
79
 
79
80
  # Logging Settings
80
- option ["-l", "--path.log"], "FILE",
81
+ option ["-l", "--path.logs"], "PATH",
81
82
  I18n.t("logstash.runner.flag.log"),
82
- :attribute_name => "path.log"
83
+ :attribute_name => "path.logs",
84
+ :default => LogStash::SETTINGS.get_default("path.logs")
83
85
 
84
86
  option "--log.level", "LEVEL", I18n.t("logstash.runner.flag.log_level"),
85
87
  :default => LogStash::SETTINGS.get_default("log.level"),
@@ -163,20 +165,12 @@ class LogStash::Runner < Clamp::StrictCommand
163
165
  rescue => e
164
166
  # abort unless we're just looking for the help
165
167
  if (["--help", "-h"] & args).empty?
166
- $stderr.puts "INFO: Logstash has a new settings file which defines start up time settings. This file is typically located in $LS_HOME/config or /etc/logstash. If you installed Logstash through a package and are starting it manually please specify the location to this settings file by passing in \"--path.settings=/path/..\" in the command line options"
168
+ $stderr.puts "INFO: Logstash requires a setting file which is typically located in $LS_HOME/config or /etc/logstash. If you installed Logstash through a package and are starting it manually please specify the location to this settings file by passing in \"--path.settings=/path/..\""
167
169
  $stderr.puts "ERROR: Failed to load settings file from \"path.settings\". Aborting... path.setting=#{LogStash::SETTINGS.get("path.settings")}, exception=#{e.class}, message=>#{e.message}"
168
170
  return 1
169
171
  end
170
172
  end
171
173
 
172
- # Configure Logstash logging facility, this need to be done before everything else to
173
- # make sure the logger has the correct settings and the log level is correctly defined.
174
- # TODO(talevy): cleanly support `path.logs` setting in log4j
175
- unless java.lang.System.getProperty("log4j.configurationFile")
176
- log4j_config_location = setting("path.settings") + "/log4j2.properties"
177
- LogStash::Logging::Logger::initialize(log4j_config_location)
178
- end
179
-
180
174
  super(*[args])
181
175
  end
182
176
 
@@ -185,7 +179,23 @@ class LogStash::Runner < Clamp::StrictCommand
185
179
  require "logstash/util/java_version"
186
180
  require "stud/task"
187
181
 
182
+ # Configure Logstash logging facility, this need to be done before everything else to
183
+ # make sure the logger has the correct settings and the log level is correctly defined.
184
+ java.lang.System.setProperty("ls.logs", setting("path.logs"))
185
+ java.lang.System.setProperty("ls.log.format", setting("log.format"))
186
+ java.lang.System.setProperty("ls.log.level", setting("log.level"))
187
+ unless java.lang.System.getProperty("log4j.configurationFile")
188
+ log4j_config_location = ::File.join(setting("path.settings"), "log4j2.properties")
189
+ LogStash::Logging::Logger::initialize("file://" + log4j_config_location)
190
+ end
191
+ # override log level that may have been introduced from a custom log4j config file
188
192
  LogStash::Logging::Logger::configure_logging(setting("log.level"))
193
+
194
+ # Adding this here because a ton of LS users install LS via packages and try to manually
195
+ # start Logstash using bin/logstash. See #5986. I think not logging to console is good for
196
+ # services, but until LS users re-learn that logs end up in path.logs, we should keep this
197
+ # message. Otherwise we'll be answering the same question again and again.
198
+ puts "Sending Logstash logs to #{setting("path.logs")} which is now configured via log4j2.properties."
189
199
 
190
200
  if setting("config.debug") && logger.debug?
191
201
  logger.warn("--config.debug was specified, but log.level was not set to \'debug\'! No config info will be logged.")
@@ -194,14 +204,14 @@ class LogStash::Runner < Clamp::StrictCommand
194
204
  LogStash::Util::set_thread_name(self.class.name)
195
205
 
196
206
  if RUBY_VERSION < "1.9.2"
197
- $stderr.puts "Ruby 1.9.2 or later is required. (You are running: " + RUBY_VERSION + ")"
207
+ logger.fatal "Ruby 1.9.2 or later is required. (You are running: " + RUBY_VERSION + ")"
198
208
  return 1
199
209
  end
200
210
 
201
211
  # Exit on bad java versions
202
212
  java_version = LogStash::Util::JavaVersion.version
203
213
  if LogStash::Util::JavaVersion.bad_java_version?(java_version)
204
- $stderr.puts "Java version 1.8.0 or later is required. (You are running: #{java_version})"
214
+ logger.fatal "Java version 1.8.0 or later is required. (You are running: #{java_version})"
205
215
  return 1
206
216
  end
207
217
 
@@ -216,7 +226,9 @@ class LogStash::Runner < Clamp::StrictCommand
216
226
 
217
227
  return start_shell(setting("interactive"), binding) if setting("interactive")
218
228
 
219
- @settings.format_settings.each {|line| logger.info(line) }
229
+ @settings.validate_all
230
+
231
+ @settings.format_settings.each {|line| logger.debug(line) }
220
232
 
221
233
  if setting("config.string").nil? && setting("path.config").nil?
222
234
  fail(I18n.t("logstash.runner.missing-configuration"))
@@ -233,6 +245,7 @@ class LogStash::Runner < Clamp::StrictCommand
233
245
  begin
234
246
  LogStash::Pipeline.new(config_str)
235
247
  puts "Configuration OK"
248
+ logger.info "Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash"
236
249
  return 0
237
250
  rescue => e
238
251
  logger.fatal I18n.t("logstash.runner.invalid-configuration", :error => e.message)
@@ -252,7 +265,8 @@ class LogStash::Runner < Clamp::StrictCommand
252
265
  @agent_task = Stud::Task.new { @agent.execute }
253
266
 
254
267
  # no point in enabling config reloading before the agent starts
255
- sighup_id = trap_sighup()
268
+ # also windows doesn't have SIGHUP. we can skip it
269
+ sighup_id = LogStash::Environment.windows? ? nil : trap_sighup()
256
270
 
257
271
  agent_return = @agent_task.wait
258
272
 
@@ -280,7 +294,7 @@ class LogStash::Runner < Clamp::StrictCommand
280
294
  def show_version
281
295
  show_version_logstash
282
296
 
283
- if logger.is_info_enabled
297
+ if logger.info?
284
298
  show_version_ruby
285
299
  show_version_java if LogStash::Environment.jruby?
286
300
  show_gems if logger.debug?
@@ -95,6 +95,12 @@ module LogStash
95
95
  self.merge(flatten_hash(settings))
96
96
  end
97
97
 
98
+ def validate_all
99
+ @settings.each do |name, setting|
100
+ setting.validate_value
101
+ end
102
+ end
103
+
98
104
  private
99
105
  def read_yaml(path)
100
106
  YAML.safe_load(IO.read(path)) || {}
@@ -123,8 +129,9 @@ module LogStash
123
129
  @validator_proc = validator_proc
124
130
  @value = nil
125
131
  @value_is_set = false
132
+ @strict = strict
126
133
 
127
- validate(default) if strict
134
+ validate(default) if @strict
128
135
  @default = default
129
136
  end
130
137
 
@@ -136,8 +143,12 @@ module LogStash
136
143
  @value_is_set
137
144
  end
138
145
 
146
+ def strict?
147
+ @strict
148
+ end
149
+
139
150
  def set(value)
140
- validate(value)
151
+ validate(value) if @strict
141
152
  @value = value
142
153
  @value_is_set = true
143
154
  @value
@@ -167,12 +178,18 @@ module LogStash
167
178
  self.to_hash == other.to_hash
168
179
  end
169
180
 
170
- private
171
- def validate(value)
172
- if !value.is_a?(@klass)
173
- raise ArgumentError.new("Setting \"#{@name}\" must be a #{@klass}. Received: #{value} (#{value.class})")
174
- elsif @validator_proc && !@validator_proc.call(value)
175
- raise ArgumentError.new("Failed to validate setting \"#{@name}\" with value: #{value}")
181
+ def validate_value
182
+ validate(value)
183
+ end
184
+
185
+ protected
186
+ def validate(input)
187
+ if !input.is_a?(@klass)
188
+ raise ArgumentError.new("Setting \"#{@name}\" must be a #{@klass}. Received: #{input} (#{input.class})")
189
+ end
190
+
191
+ if @validator_proc && !@validator_proc.call(input)
192
+ raise ArgumentError.new("Failed to validate setting \"#{@name}\" with value: #{input}")
176
193
  end
177
194
  end
178
195
 
@@ -351,6 +368,13 @@ module LogStash
351
368
  end
352
369
  end
353
370
 
371
+ class NullableString < String
372
+ def validate(value)
373
+ return if value.nil?
374
+ super(value)
375
+ end
376
+ end
377
+
354
378
  class ExistingFilePath < Setting
355
379
  def initialize(name, default=nil, strict=true)
356
380
  super(name, ::String, default, strict) do |file_path|
@@ -364,7 +388,7 @@ module LogStash
364
388
  end
365
389
 
366
390
  class WritableDirectory < Setting
367
- def initialize(name, default=nil, strict=true)
391
+ def initialize(name, default=nil, strict=false)
368
392
  super(name, ::String, default, strict) do |path|
369
393
  if ::File.directory?(path) && ::File.writable?(path)
370
394
  true
@@ -378,3 +402,4 @@ module LogStash
378
402
 
379
403
  SETTINGS = Settings.new
380
404
  end
405
+
@@ -90,9 +90,14 @@ module LogStash; module Util
90
90
  def take_batch
91
91
  @mutex.synchronize do
92
92
  batch = ReadBatch.new(@queue, @batch_size, @wait_for)
93
- add_starting_metrics(batch)
94
93
  set_current_thread_inflight_batch(batch)
95
- start_clock
94
+
95
+ # We dont actually have any events to work on so lets
96
+ # not bother with recording metrics for them
97
+ if batch.size > 0
98
+ add_starting_metrics(batch)
99
+ start_clock
100
+ end
96
101
  batch
97
102
  end
98
103
  end
@@ -116,8 +121,10 @@ module LogStash; module Util
116
121
  end
117
122
 
118
123
  def stop_clock
119
- @inflight_clocks[Thread.current].each(&:stop)
120
- @inflight_clocks.delete(Thread.current)
124
+ unless @inflight_clocks[Thread.current].nil?
125
+ @inflight_clocks[Thread.current].each(&:stop)
126
+ @inflight_clocks.delete(Thread.current)
127
+ end
121
128
  end
122
129
 
123
130
  def add_starting_metrics(batch)
@@ -141,7 +148,10 @@ module LogStash; module Util
141
148
  @shutdown_signal_received = false
142
149
  @flush_signal_received = false
143
150
  @originals = Hash.new
144
- @cancelled = Hash.new
151
+
152
+ # TODO: disabled for https://github.com/elastic/logstash/issues/6055 - will have to properly refactor
153
+ # @cancelled = Hash.new
154
+
145
155
  @generated = Hash.new
146
156
  @iterating_temp = Hash.new
147
157
  @iterating = false # Atomic Boolean maybe? Although batches are not shared across threads
@@ -161,17 +171,22 @@ module LogStash; module Util
161
171
  end
162
172
 
163
173
  def cancel(event)
164
- @cancelled[event] = true
174
+ # TODO: disabled for https://github.com/elastic/logstash/issues/6055 - will have to properly refactor
175
+ raise("cancel is unsupported")
176
+ # @cancelled[event] = true
165
177
  end
166
178
 
167
179
  def each(&blk)
168
180
  # take care not to cause @originals or @generated to change during iteration
169
181
  @iterating = true
182
+
183
+ # below the checks for @cancelled.include?(e) have been replaced by e.cancelled?
184
+ # TODO: for https://github.com/elastic/logstash/issues/6055 = will have to properly refactor
170
185
  @originals.each do |e, _|
171
- blk.call(e) unless @cancelled.include?(e)
186
+ blk.call(e) unless e.cancelled?
172
187
  end
173
188
  @generated.each do |e, _|
174
- blk.call(e) unless @cancelled.include?(e)
189
+ blk.call(e) unless e.cancelled?
175
190
  end
176
191
  @iterating = false
177
192
  update_generated
@@ -190,7 +205,9 @@ module LogStash; module Util
190
205
  end
191
206
 
192
207
  def cancelled_size
193
- @cancelled.size
208
+ # TODO: disabled for https://github.com/elastic/logstash/issues/6055 = will have to properly refactor
209
+ raise("cancelled_size is unsupported ")
210
+ # @cancelled.size
194
211
  end
195
212
 
196
213
  def shutdown_signal_received?