logstash-core 5.0.0.alpha5.snapshot1-java → 5.0.0.alpha6.snapshot1-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of logstash-core might be problematic. Click here for more details.

Files changed (52) hide show
  1. checksums.yaml +4 -4
  2. data/lib/logstash-core/version.rb +1 -1
  3. data/lib/logstash/agent.rb +1 -1
  4. data/lib/logstash/api/commands/default_metadata.rb +1 -1
  5. data/lib/logstash/api/commands/hot_threads_reporter.rb +4 -7
  6. data/lib/logstash/api/commands/node.rb +5 -4
  7. data/lib/logstash/api/commands/stats.rb +8 -3
  8. data/lib/logstash/api/modules/base.rb +5 -0
  9. data/lib/logstash/api/modules/node.rb +1 -2
  10. data/lib/logstash/api/modules/node_stats.rb +1 -2
  11. data/lib/logstash/codecs/base.rb +29 -1
  12. data/lib/logstash/config/mixin.rb +1 -1
  13. data/lib/logstash/environment.rb +5 -5
  14. data/lib/logstash/filter_delegator.rb +4 -5
  15. data/lib/logstash/instrument/periodic_poller/jvm.rb +43 -10
  16. data/lib/logstash/output_delegator.rb +33 -168
  17. data/lib/logstash/output_delegator_strategies/legacy.rb +29 -0
  18. data/lib/logstash/output_delegator_strategies/shared.rb +20 -0
  19. data/lib/logstash/output_delegator_strategies/single.rb +23 -0
  20. data/lib/logstash/output_delegator_strategy_registry.rb +36 -0
  21. data/lib/logstash/outputs/base.rb +39 -26
  22. data/lib/logstash/patches/clamp.rb +6 -0
  23. data/lib/logstash/pipeline.rb +42 -14
  24. data/lib/logstash/pipeline_reporter.rb +2 -8
  25. data/lib/logstash/plugin.rb +6 -10
  26. data/lib/logstash/runner.rb +12 -9
  27. data/lib/logstash/settings.rb +124 -21
  28. data/lib/logstash/util/wrapped_synchronous_queue.rb +17 -1
  29. data/lib/logstash/version.rb +1 -1
  30. data/lib/logstash/webserver.rb +44 -33
  31. data/locales/en.yml +5 -1
  32. data/logstash-core.gemspec +2 -2
  33. data/spec/api/lib/api/node_spec.rb +62 -10
  34. data/spec/api/lib/api/node_stats_spec.rb +16 -3
  35. data/spec/api/lib/api/support/resource_dsl_methods.rb +11 -1
  36. data/spec/api/spec_helper.rb +1 -1
  37. data/spec/conditionals_spec.rb +12 -1
  38. data/spec/logstash/agent_spec.rb +3 -0
  39. data/spec/logstash/codecs/base_spec.rb +74 -0
  40. data/spec/logstash/instrument/periodic_poller/jvm_spec.rb +37 -10
  41. data/spec/logstash/output_delegator_spec.rb +64 -89
  42. data/spec/logstash/outputs/base_spec.rb +91 -15
  43. data/spec/logstash/pipeline_reporter_spec.rb +1 -6
  44. data/spec/logstash/pipeline_spec.rb +20 -22
  45. data/spec/logstash/plugin_spec.rb +3 -3
  46. data/spec/logstash/runner_spec.rb +86 -3
  47. data/spec/logstash/settings/integer_spec.rb +20 -0
  48. data/spec/logstash/settings/numeric_spec.rb +28 -0
  49. data/spec/logstash/settings/port_range_spec.rb +93 -0
  50. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +6 -0
  51. data/spec/logstash/webserver_spec.rb +95 -0
  52. metadata +20 -6
@@ -0,0 +1,29 @@
1
+ # Remove this in Logstash 6.0
2
+ module LogStash module OutputDelegatorStrategies class Legacy
3
+ attr_reader :worker_count, :workers
4
+
5
+ def initialize(logger, klass, metric, plugin_args)
6
+ @worker_count = (plugin_args["workers"] || 1).to_i
7
+ @workers = @worker_count.times.map {|t| klass.new(plugin_args)}
8
+ @worker_queue = SizedQueue.new(@worker_count)
9
+ @workers.each {|w| @worker_queue << w}
10
+ end
11
+
12
+ def register
13
+ @workers.each(&:register)
14
+ end
15
+
16
+ def multi_receive(events)
17
+ worker = @worker_queue.pop
18
+ worker.multi_receive(events)
19
+ ensure
20
+ @worker_queue << worker if worker
21
+ end
22
+
23
+ def do_close
24
+ # No mutex needed since this is only called when the pipeline is clear
25
+ @workers.each(&:do_close)
26
+ end
27
+
28
+ ::LogStash::OutputDelegatorStrategyRegistry.instance.register(:legacy, self)
29
+ end; end; end
@@ -0,0 +1,20 @@
1
+ module LogStash module OutputDelegatorStrategies class Shared
2
+ def initialize(logger, klass, metric, plugin_args)
3
+ @output = klass.new(plugin_args)
4
+ end
5
+
6
+ def register
7
+ @output.register
8
+ end
9
+
10
+ def multi_receive(events)
11
+ @output.multi_receive(events)
12
+ end
13
+
14
+ def do_close
15
+ @output.do_close
16
+ end
17
+
18
+ ::LogStash::OutputDelegatorStrategyRegistry.instance.register(:shared, self)
19
+ end; end; end
20
+
@@ -0,0 +1,23 @@
1
+ module LogStash module OutputDelegatorStrategies class Single
2
+ def initialize(logger, klass, metric, plugin_args)
3
+ @output = klass.new(plugin_args)
4
+ @mutex = Mutex.new
5
+ end
6
+
7
+ def register
8
+ @output.register
9
+ end
10
+
11
+ def multi_receive(events)
12
+ @mutex.synchronize do
13
+ @output.multi_receive(events)
14
+ end
15
+ end
16
+
17
+ def do_close
18
+ # No mutex needed since this is only called when the pipeline is clear
19
+ @output.do_close
20
+ end
21
+
22
+ ::LogStash::OutputDelegatorStrategyRegistry.instance.register(:single, self)
23
+ end; end; end
@@ -0,0 +1,36 @@
1
+ module LogStash; class OutputDelegatorStrategyRegistry
2
+ class InvalidStrategyError < StandardError; end
3
+
4
+ # This is generally used as a singleton
5
+ # Except perhaps during testing
6
+ def self.instance
7
+ @instance ||= self.new
8
+ end
9
+
10
+ def initialize()
11
+ @map = {}
12
+ end
13
+
14
+ def classes
15
+ @map.values
16
+ end
17
+
18
+ def types
19
+ @map.keys
20
+ end
21
+
22
+ def class_for(type)
23
+ klass = @map[type]
24
+
25
+ if !klass
26
+ raise InvalidStrategyError, "Could not find output delegator strategy of type '#{type}'. Valid strategies: #{@strategy_registry.types}"
27
+ end
28
+
29
+ klass
30
+ end
31
+
32
+ def register(type, klass)
33
+ @map[type] = klass
34
+ end
35
+
36
+ end; end
@@ -20,42 +20,37 @@ class LogStash::Outputs::Base < LogStash::Plugin
20
20
 
21
21
  # The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output, without needing a separate filter in your Logstash pipeline.
22
22
  config :codec, :validate => :codec, :default => "plain"
23
+ # TODO remove this in Logstash 6.0
24
+ # when we no longer support the :legacy type
25
+ # This is hacky, but it can only be herne
26
+ config :workers, :type => :number, :default => 1
27
+
28
+ # Set or return concurrency type
29
+ def self.concurrency(type=nil)
30
+ if type
31
+ @concurrency = type
32
+ else
33
+ @concurrency || :legacy # default is :legacyo
34
+ end
35
+ end
23
36
 
24
- # The number of workers to use for this output.
25
- # Note that this setting may not be useful for all outputs.
26
- config :workers, :validate => :number, :default => 1
27
-
28
- attr_reader :worker_plugins, :available_workers, :workers, :worker_plugins, :workers_not_supported
29
-
37
+ # Deprecated: Favor `concurrency :shared`
30
38
  def self.declare_threadsafe!
31
- declare_workers_not_supported!
32
- @threadsafe = true
39
+ concurrency :shared
33
40
  end
34
41
 
42
+ # Deprecated: Favor `#concurrency`
35
43
  def self.threadsafe?
36
- @threadsafe == true
44
+ concurrency == :shared
37
45
  end
38
46
 
47
+ # Deprecated: Favor `concurrency :single`
48
+ # Remove in Logstash 6.0.0
39
49
  def self.declare_workers_not_supported!(message=nil)
40
- @workers_not_supported_message = message
41
- @workers_not_supported = true
42
- end
43
-
44
- def self.workers_not_supported_message
45
- @workers_not_supported_message
46
- end
47
-
48
- def self.workers_not_supported?
49
- !!@workers_not_supported
50
+ concurrency :single
50
51
  end
51
52
 
52
53
  public
53
- # TODO: Remove this in the next major version after Logstash 2.x
54
- # Post 2.x it should raise an error and tell people to use the class level
55
- # declaration
56
- def workers_not_supported(message=nil)
57
- self.class.declare_workers_not_supported!(message)
58
- end
59
54
 
60
55
  def self.plugin_type
61
56
  "output"
@@ -66,9 +61,15 @@ class LogStash::Outputs::Base < LogStash::Plugin
66
61
  super
67
62
  config_init(@params)
68
63
 
64
+ if self.workers != 1
65
+ raise LogStash::ConfigurationError, "You are using a plugin that doesn't support workers but have set the workers value explicitly! This plugin uses the #{concurrency} and doesn't need this option"
66
+ end
67
+
69
68
  # If we're running with a single thread we must enforce single-threaded concurrency by default
70
69
  # Maybe in a future version we'll assume output plugins are threadsafe
71
70
  @single_worker_mutex = Mutex.new
71
+
72
+ @receives_encoded = self.methods.include?(:multi_receive_encoded)
72
73
  end
73
74
 
74
75
  public
@@ -84,7 +85,19 @@ class LogStash::Outputs::Base < LogStash::Plugin
84
85
  public
85
86
  # To be overriden in implementations
86
87
  def multi_receive(events)
87
- events.each {|event| receive(event) }
88
+ if @receives_encoded
89
+ self.multi_receive_encoded(codec.multi_encode(events))
90
+ else
91
+ events.each {|event| receive(event) }
92
+ end
93
+ end
94
+
95
+ def codec
96
+ params["codec"]
97
+ end
98
+
99
+ def concurrency
100
+ self.class.concurrency
88
101
  end
89
102
 
90
103
  private
@@ -63,6 +63,12 @@ module Clamp
63
63
  class << self
64
64
  include ::Clamp::Option::StrictDeclaration
65
65
  end
66
+
67
+ def handle_remaining_arguments
68
+ unless remaining_arguments.empty?
69
+ signal_usage_error "Unknown command '#{remaining_arguments.first}'"
70
+ end
71
+ end
66
72
  end
67
73
  end
68
74
 
@@ -32,6 +32,7 @@ module LogStash; class Pipeline
32
32
  :started_at,
33
33
  :thread,
34
34
  :config_str,
35
+ :config_hash,
35
36
  :settings,
36
37
  :metric,
37
38
  :filter_queue_client,
@@ -45,11 +46,18 @@ module LogStash; class Pipeline
45
46
 
46
47
  def initialize(config_str, settings = LogStash::SETTINGS, namespaced_metric = nil)
47
48
  @config_str = config_str
49
+ @config_hash = Digest::SHA1.hexdigest(@config_str)
50
+ # Every time #plugin is invoked this is incremented to give each plugin
51
+ # a unique id when auto-generating plugin ids
52
+ @plugin_counter ||= 0
53
+
48
54
  @logger = Cabin::Channel.get(LogStash)
49
55
  @settings = settings
50
56
  @pipeline_id = @settings.get_value("pipeline.id") || self.object_id
51
57
  @reporter = LogStash::PipelineReporter.new(@logger, self)
52
58
 
59
+ # A list of plugins indexed by id
60
+ @plugins_by_id = {}
53
61
  @inputs = nil
54
62
  @filters = nil
55
63
  @outputs = nil
@@ -87,7 +95,7 @@ module LogStash; class Pipeline
87
95
  queue = LogStash::Util::WrappedSynchronousQueue.new
88
96
  @input_queue_client = queue.write_client
89
97
  @filter_queue_client = queue.read_client
90
- # Note that @infilght_batches as a central mechanism for tracking inflight
98
+ # Note that @inflight_batches as a central mechanism for tracking inflight
91
99
  # batches will fail if we have multiple read clients here.
92
100
  @filter_queue_client.set_events_metric(metric.namespace([:stats, :events]))
93
101
  @filter_queue_client.set_pipeline_metric(
@@ -198,6 +206,8 @@ module LogStash; class Pipeline
198
206
  config_metric.gauge(:workers, pipeline_workers)
199
207
  config_metric.gauge(:batch_size, batch_size)
200
208
  config_metric.gauge(:batch_delay, batch_delay)
209
+ config_metric.gauge(:config_reload_automatic, @settings.get("config.reload.automatic"))
210
+ config_metric.gauge(:config_reload_interval, @settings.get("config.reload.interval"))
201
211
 
202
212
  @logger.info("Starting pipeline",
203
213
  "id" => self.pipeline_id,
@@ -288,7 +298,10 @@ module LogStash; class Pipeline
288
298
  end
289
299
  # Now that we have our output to event mapping we can just invoke each output
290
300
  # once with its list of events
291
- output_events_map.each { |output, events| output.multi_receive(events) }
301
+ output_events_map.each do |output, events|
302
+ output.multi_receive(events)
303
+ end
304
+
292
305
  @filter_queue_client.add_output_metrics(batch)
293
306
  end
294
307
 
@@ -387,23 +400,38 @@ module LogStash; class Pipeline
387
400
  end
388
401
 
389
402
  def plugin(plugin_type, name, *args)
390
- args << {} if args.empty?
403
+ @plugin_counter += 1
404
+
405
+ # Collapse the array of arguments into a single merged hash
406
+ args = args.reduce({}, &:merge)
407
+
408
+ id = if args["id"].nil? || args["id"].empty?
409
+ args["id"] = "#{@config_hash}-#{@plugin_counter}"
410
+ else
411
+ args["id"]
412
+ end
391
413
 
414
+ raise LogStash::ConfigurationError, "Two plugins have the id '#{id}', please fix this conflict" if @plugins_by_id[id]
415
+
392
416
  pipeline_scoped_metric = metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :plugins])
393
417
 
394
418
  klass = LogStash::Plugin.lookup(plugin_type, name)
395
419
 
396
- if plugin_type == "output"
397
- LogStash::OutputDelegator.new(@logger, klass, @settings.get("pipeline.output.workers"), pipeline_scoped_metric.namespace(:outputs), *args)
398
- elsif plugin_type == "filter"
399
- LogStash::FilterDelegator.new(@logger, klass, pipeline_scoped_metric.namespace(:filters), *args)
400
- else
401
- new_plugin = klass.new(*args)
402
- inputs_metric = pipeline_scoped_metric.namespace(:inputs)
403
- namespaced_metric = inputs_metric.namespace(new_plugin.plugin_unique_name.to_sym)
404
- new_plugin.metric = namespaced_metric
405
- new_plugin
406
- end
420
+ # Scope plugins of type 'input' to 'inputs'
421
+ type_scoped_metric = pipeline_scoped_metric.namespace("#{plugin_type}s".to_sym)
422
+ plugin = if plugin_type == "output"
423
+ OutputDelegator.new(@logger, klass, type_scoped_metric,
424
+ ::LogStash::OutputDelegatorStrategyRegistry.instance,
425
+ args)
426
+ elsif plugin_type == "filter"
427
+ LogStash::FilterDelegator.new(@logger, klass, type_scoped_metric, args)
428
+ else # input
429
+ input_plugin = klass.new(args)
430
+ input_plugin.metric = type_scoped_metric.namespace(id)
431
+ input_plugin
432
+ end
433
+
434
+ @plugins_by_id[id] = plugin
407
435
  end
408
436
 
409
437
  # for backward compatibility in devutils for the rspec helpers, this method is not used
@@ -60,7 +60,6 @@ module LogStash; class PipelineReporter
60
60
  {
61
61
  :events_filtered => events_filtered,
62
62
  :events_consumed => events_consumed,
63
- :worker_count => pipeline.worker_threads.size,
64
63
  :inflight_count => inflight_count,
65
64
  :worker_states => worker_states_snap,
66
65
  :output_info => output_info,
@@ -100,15 +99,10 @@ module LogStash; class PipelineReporter
100
99
 
101
100
  def output_info
102
101
  pipeline.outputs.map do |output_delegator|
103
- is_multi_worker = output_delegator.worker_count > 1
104
-
105
102
  {
106
103
  :type => output_delegator.config_name,
107
- :config => output_delegator.config,
108
- :is_multi_worker => is_multi_worker,
109
- :events_received => output_delegator.events_received,
110
- :workers => output_delegator.workers,
111
- :busy_workers => output_delegator.busy_workers
104
+ :plugin_args => output_delegator.plugin_args,
105
+ :concurrency => output_delegator.concurrency,
112
106
  }
113
107
  end
114
108
  end
@@ -45,8 +45,11 @@ class LogStash::Plugin
45
45
  self.class.name == other.class.name && @params == other.params
46
46
  end
47
47
 
48
- def initialize(params=nil)
48
+ def initialize(params=nil)
49
49
  @params = LogStash::Util.deep_clone(params)
50
+ # The id should always be defined normally, but in tests that might not be the case
51
+ # In the future we may make this more strict in the Plugin API
52
+ @params["id"] ||= "#{self.class.config_name}_#{SecureRandom.uuid}"
50
53
  @logger = Cabin::Channel.get(LogStash)
51
54
  end
52
55
 
@@ -57,15 +60,7 @@ class LogStash::Plugin
57
60
  #
58
61
  # @return [String] A plugin ID
59
62
  def id
60
- (@params["id"].nil? || @params["id"].empty?) ? SecureRandom.uuid : @params["id"]
61
- end
62
-
63
- # Return a unique_name, This is composed by the name of
64
- # the plugin and the generated ID (of the configured one)
65
- #
66
- # @return [String] a unique name
67
- def plugin_unique_name
68
- "#{config_name}_#{id}"
63
+ @params["id"]
69
64
  end
70
65
 
71
66
  # close is called during shutdown, after the plugin worker
@@ -127,6 +122,7 @@ class LogStash::Plugin
127
122
  LogStash::Registry.instance.lookup(type ,name) do |plugin_klass, plugin_name|
128
123
  is_a_plugin?(plugin_klass, plugin_name)
129
124
  end
125
+
130
126
  rescue LoadError, NameError => e
131
127
  logger.debug("Problems loading the plugin with", :type => type, :name => name, :path => path)
132
128
  raise(LogStash::PluginLoadingError, I18n.t("logstash.pipeline.plugin-loading-error", :type => type, :name => name, :path => path, :error => e.to_s))
@@ -47,17 +47,17 @@ class LogStash::Runner < Clamp::StrictCommand
47
47
  option ["-w", "--pipeline.workers"], "COUNT",
48
48
  I18n.t("logstash.runner.flag.pipeline-workers"),
49
49
  :attribute_name => "pipeline.workers",
50
- :default => LogStash::SETTINGS.get_default("pipeline.workers"), &:to_i
50
+ :default => LogStash::SETTINGS.get_default("pipeline.workers")
51
51
 
52
52
  option ["-b", "--pipeline.batch.size"], "SIZE",
53
53
  I18n.t("logstash.runner.flag.pipeline-batch-size"),
54
54
  :attribute_name => "pipeline.batch.size",
55
- :default => LogStash::SETTINGS.get_default("pipeline.batch.size"), &:to_i
55
+ :default => LogStash::SETTINGS.get_default("pipeline.batch.size")
56
56
 
57
57
  option ["-u", "--pipeline.batch.delay"], "DELAY_IN_MS",
58
58
  I18n.t("logstash.runner.flag.pipeline-batch-delay"),
59
59
  :attribute_name => "pipeline.batch.delay",
60
- :default => LogStash::SETTINGS.get_default("pipeline.batch.delay"), &:to_i
60
+ :default => LogStash::SETTINGS.get_default("pipeline.batch.delay")
61
61
 
62
62
  option ["--pipeline.unsafe_shutdown"], :flag,
63
63
  I18n.t("logstash.runner.flag.unsafe_shutdown"),
@@ -110,7 +110,7 @@ class LogStash::Runner < Clamp::StrictCommand
110
110
  option ["--config.reload.interval"], "RELOAD_INTERVAL",
111
111
  I18n.t("logstash.runner.flag.reload_interval"),
112
112
  :attribute_name => "config.reload.interval",
113
- :default => LogStash::SETTINGS.get_default("config.reload.interval"), &:to_i
113
+ :default => LogStash::SETTINGS.get_default("config.reload.interval")
114
114
 
115
115
  option ["--http.host"], "HTTP_HOST",
116
116
  I18n.t("logstash.runner.flag.http_host"),
@@ -120,7 +120,7 @@ class LogStash::Runner < Clamp::StrictCommand
120
120
  option ["--http.port"], "HTTP_PORT",
121
121
  I18n.t("logstash.runner.flag.http_port"),
122
122
  :attribute_name => "http.port",
123
- :default => LogStash::SETTINGS.get_default("http.port"), &:to_i
123
+ :default => LogStash::SETTINGS.get_default("http.port")
124
124
 
125
125
  option ["--log.format"], "FORMAT",
126
126
  I18n.t("logstash.runner.flag.log_format"),
@@ -148,10 +148,13 @@ class LogStash::Runner < Clamp::StrictCommand
148
148
  begin
149
149
  LogStash::SETTINGS.from_yaml(LogStash::SETTINGS.get("path.settings"))
150
150
  rescue => e
151
- @logger.subscribe(STDOUT)
152
- @logger.warn("Logstash has a new settings file which defines start up time settings. This file is typically located in $LS_HOME/config or /etc/logstash. If you installed Logstash through a package and are starting it manually please specify the location to this settings file by passing in \"--path.settings=/path/..\" in the command line options")
153
- @logger.fatal("Failed to load settings file from \"path.settings\". Aborting...", "path.settings" => LogStash::SETTINGS.get("path.settings"), "exception" => e.class, "message" => e.message)
154
- exit(-1)
151
+ # abort unless we're just looking for the help
152
+ if (["--help", "-h"] & args).empty?
153
+ @logger.subscribe(STDOUT)
154
+ @logger.warn("Logstash has a new settings file which defines start up time settings. This file is typically located in $LS_HOME/config or /etc/logstash. If you installed Logstash through a package and are starting it manually please specify the location to this settings file by passing in \"--path.settings=/path/..\" in the command line options")
155
+ @logger.fatal("Failed to load settings file from \"path.settings\". Aborting...", "path.settings" => LogStash::SETTINGS.get("path.settings"), "exception" => e.class, "message" => e.message)
156
+ return 1
157
+ end
155
158
  end
156
159
 
157
160
  super(*[args])