logstash-core 5.6.16-java → 6.0.0.alpha1-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (156) hide show
  1. checksums.yaml +4 -4
  2. data/gemspec_jars.rb +4 -7
  3. data/lib/logstash-core/logstash-core.jar +0 -0
  4. data/lib/logstash-core/version.rb +4 -8
  5. data/lib/logstash-core_jars.rb +12 -26
  6. data/lib/logstash/agent.rb +261 -246
  7. data/lib/logstash/api/commands/default_metadata.rb +1 -1
  8. data/lib/logstash/api/commands/hot_threads_reporter.rb +5 -11
  9. data/lib/logstash/api/commands/node.rb +3 -2
  10. data/lib/logstash/api/commands/stats.rb +3 -2
  11. data/lib/logstash/bootstrap_check/bad_java.rb +16 -0
  12. data/lib/logstash/bootstrap_check/bad_ruby.rb +12 -0
  13. data/lib/logstash/bootstrap_check/default_config.rb +17 -0
  14. data/lib/logstash/compiler.rb +38 -0
  15. data/lib/logstash/compiler/lscl.rb +566 -0
  16. data/lib/logstash/compiler/lscl/lscl_grammar.rb +3503 -0
  17. data/lib/logstash/compiler/treetop_monkeypatches.rb +92 -0
  18. data/lib/logstash/config/config_ast.rb +4 -82
  19. data/lib/logstash/config/mixin.rb +73 -41
  20. data/lib/logstash/config/pipeline_config.rb +48 -0
  21. data/lib/logstash/config/source/base.rb +16 -0
  22. data/lib/logstash/config/source/local.rb +215 -0
  23. data/lib/logstash/config/source_loader.rb +125 -0
  24. data/lib/logstash/converge_result.rb +103 -0
  25. data/lib/logstash/environment.rb +6 -19
  26. data/lib/logstash/errors.rb +2 -0
  27. data/lib/logstash/execution_context.rb +4 -7
  28. data/lib/logstash/filter_delegator.rb +6 -9
  29. data/lib/logstash/inputs/base.rb +0 -2
  30. data/lib/logstash/instrument/collector.rb +5 -7
  31. data/lib/logstash/instrument/metric_store.rb +12 -12
  32. data/lib/logstash/instrument/metric_type/mean.rb +0 -5
  33. data/lib/logstash/instrument/namespaced_metric.rb +0 -4
  34. data/lib/logstash/instrument/namespaced_null_metric.rb +0 -4
  35. data/lib/logstash/instrument/null_metric.rb +0 -10
  36. data/lib/logstash/instrument/periodic_poller/cgroup.rb +85 -168
  37. data/lib/logstash/instrument/periodic_poller/jvm.rb +5 -5
  38. data/lib/logstash/instrument/periodic_poller/pq.rb +3 -7
  39. data/lib/logstash/instrument/periodic_pollers.rb +1 -3
  40. data/lib/logstash/instrument/wrapped_write_client.rb +24 -33
  41. data/lib/logstash/logging/logger.rb +15 -47
  42. data/lib/logstash/namespace.rb +0 -1
  43. data/lib/logstash/output_delegator.rb +5 -7
  44. data/lib/logstash/outputs/base.rb +0 -2
  45. data/lib/logstash/pipeline.rb +159 -87
  46. data/lib/logstash/pipeline_action.rb +13 -0
  47. data/lib/logstash/pipeline_action/base.rb +29 -0
  48. data/lib/logstash/pipeline_action/create.rb +47 -0
  49. data/lib/logstash/pipeline_action/reload.rb +48 -0
  50. data/lib/logstash/pipeline_action/stop.rb +23 -0
  51. data/lib/logstash/plugin.rb +0 -1
  52. data/lib/logstash/plugins/hooks_registry.rb +6 -0
  53. data/lib/logstash/plugins/registry.rb +0 -1
  54. data/lib/logstash/program.rb +14 -0
  55. data/lib/logstash/queue_factory.rb +5 -1
  56. data/lib/logstash/runner.rb +58 -80
  57. data/lib/logstash/settings.rb +3 -27
  58. data/lib/logstash/state_resolver.rb +41 -0
  59. data/lib/logstash/util/java_version.rb +6 -0
  60. data/lib/logstash/util/safe_uri.rb +12 -148
  61. data/lib/logstash/util/thread_dump.rb +4 -7
  62. data/lib/logstash/util/wrapped_acked_queue.rb +36 -39
  63. data/lib/logstash/util/wrapped_synchronous_queue.rb +29 -39
  64. data/lib/logstash/version.rb +10 -8
  65. data/locales/en.yml +3 -54
  66. data/logstash-core.gemspec +8 -35
  67. data/spec/{logstash/api/modules → api/lib/api}/logging_spec.rb +10 -1
  68. data/spec/{logstash/api/modules → api/lib/api}/node_plugins_spec.rb +2 -1
  69. data/spec/{logstash/api/modules → api/lib/api}/node_spec.rb +3 -3
  70. data/spec/{logstash/api/modules → api/lib/api}/node_stats_spec.rb +3 -7
  71. data/spec/{logstash/api/modules → api/lib/api}/plugins_spec.rb +3 -4
  72. data/spec/{logstash/api/modules → api/lib/api}/root_spec.rb +2 -2
  73. data/spec/api/lib/api/support/resource_dsl_methods.rb +87 -0
  74. data/spec/{logstash/api/commands/stats_spec.rb → api/lib/commands/stats.rb} +2 -7
  75. data/spec/{logstash/api → api/lib}/errors_spec.rb +1 -1
  76. data/spec/{logstash/api → api/lib}/rack_app_spec.rb +0 -0
  77. data/spec/api/spec_helper.rb +106 -0
  78. data/spec/logstash/agent/converge_spec.rb +286 -0
  79. data/spec/logstash/agent/metrics_spec.rb +244 -0
  80. data/spec/logstash/agent_spec.rb +213 -225
  81. data/spec/logstash/compiler/compiler_spec.rb +584 -0
  82. data/spec/logstash/config/config_ast_spec.rb +8 -47
  83. data/spec/logstash/config/mixin_spec.rb +2 -42
  84. data/spec/logstash/config/pipeline_config_spec.rb +75 -0
  85. data/spec/logstash/config/source/local_spec.rb +395 -0
  86. data/spec/logstash/config/source_loader_spec.rb +122 -0
  87. data/spec/logstash/converge_result_spec.rb +179 -0
  88. data/spec/logstash/event_spec.rb +0 -66
  89. data/spec/logstash/execution_context_spec.rb +8 -12
  90. data/spec/logstash/filter_delegator_spec.rb +12 -24
  91. data/spec/logstash/inputs/base_spec.rb +7 -5
  92. data/spec/logstash/instrument/periodic_poller/cgroup_spec.rb +92 -225
  93. data/spec/logstash/instrument/periodic_poller/jvm_spec.rb +1 -1
  94. data/spec/logstash/instrument/periodic_poller/os_spec.rb +32 -29
  95. data/spec/logstash/instrument/wrapped_write_client_spec.rb +33 -33
  96. data/spec/logstash/legacy_ruby_event_spec.rb +13 -4
  97. data/spec/logstash/output_delegator_spec.rb +11 -20
  98. data/spec/logstash/outputs/base_spec.rb +7 -5
  99. data/spec/logstash/pipeline_action/create_spec.rb +83 -0
  100. data/spec/logstash/pipeline_action/reload_spec.rb +83 -0
  101. data/spec/logstash/pipeline_action/stop_spec.rb +37 -0
  102. data/spec/logstash/pipeline_pq_file_spec.rb +1 -1
  103. data/spec/logstash/pipeline_spec.rb +81 -137
  104. data/spec/logstash/plugin_spec.rb +2 -1
  105. data/spec/logstash/plugins/hooks_registry_spec.rb +6 -0
  106. data/spec/logstash/queue_factory_spec.rb +13 -1
  107. data/spec/logstash/runner_spec.rb +29 -140
  108. data/spec/logstash/settings/writable_directory_spec.rb +10 -13
  109. data/spec/logstash/settings_spec.rb +0 -91
  110. data/spec/logstash/state_resolver_spec.rb +156 -0
  111. data/spec/logstash/timestamp_spec.rb +2 -6
  112. data/spec/logstash/util/java_version_spec.rb +22 -0
  113. data/spec/logstash/util/safe_uri_spec.rb +0 -56
  114. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +22 -0
  115. data/spec/support/helpers.rb +9 -11
  116. data/spec/support/matchers.rb +96 -6
  117. data/spec/support/mocks_classes.rb +80 -0
  118. data/spec/support/shared_contexts.rb +2 -27
  119. metadata +100 -149
  120. data/lib/logstash/config/loader.rb +0 -107
  121. data/lib/logstash/config/modules_common.rb +0 -103
  122. data/lib/logstash/config/source/modules.rb +0 -55
  123. data/lib/logstash/config/string_escape.rb +0 -27
  124. data/lib/logstash/dependency_report.rb +0 -131
  125. data/lib/logstash/dependency_report_runner.rb +0 -17
  126. data/lib/logstash/elasticsearch_client.rb +0 -142
  127. data/lib/logstash/instrument/global_metrics.rb +0 -13
  128. data/lib/logstash/instrument/periodic_poller/dlq.rb +0 -24
  129. data/lib/logstash/modules/cli_parser.rb +0 -74
  130. data/lib/logstash/modules/elasticsearch_config.rb +0 -22
  131. data/lib/logstash/modules/elasticsearch_importer.rb +0 -37
  132. data/lib/logstash/modules/elasticsearch_resource.rb +0 -10
  133. data/lib/logstash/modules/file_reader.rb +0 -36
  134. data/lib/logstash/modules/kibana_base.rb +0 -24
  135. data/lib/logstash/modules/kibana_client.rb +0 -124
  136. data/lib/logstash/modules/kibana_config.rb +0 -105
  137. data/lib/logstash/modules/kibana_dashboards.rb +0 -36
  138. data/lib/logstash/modules/kibana_importer.rb +0 -17
  139. data/lib/logstash/modules/kibana_resource.rb +0 -10
  140. data/lib/logstash/modules/kibana_settings.rb +0 -40
  141. data/lib/logstash/modules/logstash_config.rb +0 -120
  142. data/lib/logstash/modules/resource_base.rb +0 -38
  143. data/lib/logstash/modules/scaffold.rb +0 -52
  144. data/lib/logstash/modules/settings_merger.rb +0 -23
  145. data/lib/logstash/modules/util.rb +0 -17
  146. data/lib/logstash/util/dead_letter_queue_manager.rb +0 -61
  147. data/lib/logstash/util/environment_variables.rb +0 -43
  148. data/spec/logstash/config/loader_spec.rb +0 -38
  149. data/spec/logstash/config/string_escape_spec.rb +0 -24
  150. data/spec/logstash/instrument/periodic_poller/dlq_spec.rb +0 -17
  151. data/spec/logstash/modules/logstash_config_spec.rb +0 -56
  152. data/spec/logstash/modules/scaffold_spec.rb +0 -234
  153. data/spec/logstash/pipeline_dlq_commit_spec.rb +0 -109
  154. data/spec/logstash/settings/splittable_string_array_spec.rb +0 -51
  155. data/spec/logstash/util/wrapped_acked_queue_spec.rb +0 -49
  156. data/versions-gem-copy.yml +0 -12
@@ -7,13 +7,10 @@ module LogStash
7
7
  java_import org.apache.logging.log4j.LogManager
8
8
  java_import org.apache.logging.log4j.core.config.Configurator
9
9
  java_import org.apache.logging.log4j.core.config.DefaultConfiguration
10
- java_import org.apache.logging.log4j.core.config.LoggerConfig
11
- java_import org.logstash.log.LogstashLoggerContextFactory
12
- java_import org.apache.logging.log4j.core.LoggerContext
13
- java_import java.net.URI
14
10
 
15
11
  class Logger
16
12
  @@config_mutex = Mutex.new
13
+ @@logging_context = nil
17
14
 
18
15
  def initialize(name)
19
16
  @logger = LogManager.getLogger(name)
@@ -68,60 +65,31 @@ module LogStash
68
65
  end
69
66
 
70
67
  def self.configure_logging(level, path = LogManager::ROOT_LOGGER_NAME)
71
- @@config_mutex.synchronize { set_level(level, path) }
68
+ @@config_mutex.synchronize { Configurator.setLevel(path, Level.valueOf(level)) }
72
69
  rescue Exception => e
73
70
  raise ArgumentError, "invalid level[#{level}] for logger[#{path}]"
74
71
  end
75
72
 
76
- def self.reconfigure(config_location)
73
+ def self.initialize(config_location)
77
74
  @@config_mutex.synchronize do
78
- config_location_uri = URI.create(config_location)
79
- file_path = config_location_uri.path
80
- if ::File.exists?(file_path)
81
- logs_location = java.lang.System.getProperty("ls.logs")
82
- puts "Sending Logstash's logs to #{logs_location} which is now configured via log4j2.properties"
83
- #reconfigure the default context to use our log4j2.properties file
84
- get_logging_context.setConfigLocation(URI.create(config_location))
85
- #ensure everyone agrees which context to use for the LogManager
86
- context_factory = LogstashLoggerContextFactory.new(get_logging_context)
87
- LogManager.setFactory(context_factory)
88
- else
89
- # fall back to default config
90
- puts "Could not find log4j2 configuration at path #{file_path}. Using default config which logs errors to the console"
75
+ if @@logging_context.nil?
76
+ file_path = URI(config_location).path
77
+ if ::File.exists?(file_path)
78
+ logs_location = java.lang.System.getProperty("ls.logs")
79
+ puts "Sending Logstash's logs to #{logs_location} which is now configured via log4j2.properties"
80
+ @@logging_context = Configurator.initialize(nil, config_location)
81
+ else
82
+ # fall back to default config
83
+ puts "Could not find log4j2 configuration at path #{file_path}. Using default config which logs to console"
84
+ @@logging_context = Configurator.initialize(DefaultConfiguration.new)
85
+ end
91
86
  end
92
87
  end
93
88
  end
94
89
 
95
- # until dev_utils/rspec/spec_helper is changed, we need to have both methods
96
- singleton_class.send(:alias_method, :initialize, :reconfigure)
97
-
98
90
  def self.get_logging_context
99
- return LoggerContext.getContext(false)
100
- end
101
-
102
- # Clone of org.apache.logging.log4j.core.config.Configurator.setLevel(), but ensure the proper context is used
103
- def self.set_level(_level, path)
104
- configuration = get_logging_context.getConfiguration()
105
- level = Level.valueOf(_level)
106
- if path.nil? || path.strip.empty?
107
- root_logger = configuration.getRootLogger()
108
- if root_logger.getLevel() != level
109
- root_logger.setLevel(level)
110
- get_logging_context.updateLoggers()
111
- end
112
- else
113
- package_logger = configuration.getLoggerConfig(path)
114
- if package_logger.name != path #no package logger found
115
- configuration.addLogger(path, LoggerConfig.new(path, level, true))
116
- get_logging_context.updateLoggers()
117
- elsif package_logger.getLevel() != level
118
- package_logger.setLevel(level)
119
- get_logging_context.updateLoggers()
120
- end
121
- end
91
+ return @@logging_context
122
92
  end
123
-
124
- private_class_method :set_level
125
93
  end
126
94
 
127
95
  class SlowLogger
@@ -11,5 +11,4 @@ module LogStash
11
11
  module PluginMixins; end
12
12
  module PluginManager; end
13
13
  module Api; end
14
- module Modules; end
15
14
  end # module LogStash
@@ -19,9 +19,7 @@ module LogStash class OutputDelegator
19
19
  @namespaced_metric = metric.namespace(id.to_sym)
20
20
  @namespaced_metric.gauge(:name, config_name)
21
21
  @metric_events = @namespaced_metric.namespace(:events)
22
- @in_counter = @metric_events.counter(:in)
23
- @out_counter = @metric_events.counter(:out)
24
- @time_metric = @metric_events.counter(:duration_in_millis)
22
+
25
23
  @strategy = strategy_registry.
26
24
  class_for(self.concurrency).
27
25
  new(@logger, @output_class, @namespaced_metric, execution_context, plugin_args)
@@ -44,11 +42,11 @@ module LogStash class OutputDelegator
44
42
  end
45
43
 
46
44
  def multi_receive(events)
47
- @in_counter.increment(events.length)
48
- start_time = java.lang.System.current_time_millis
45
+ @metric_events.increment(:in, events.length)
46
+ clock = @metric_events.time(:duration_in_millis)
49
47
  @strategy.multi_receive(events)
50
- @time_metric.increment(java.lang.System.current_time_millis - start_time)
51
- @out_counter.increment(events.length)
48
+ clock.stop
49
+ @metric_events.increment(:out, events.length)
52
50
  end
53
51
 
54
52
  def do_close
@@ -109,8 +109,6 @@ class LogStash::Outputs::Base < LogStash::Plugin
109
109
  super
110
110
  # There is no easy way to propage an instance variable into the codec, because the codec
111
111
  # are created at the class level
112
- # TODO(talevy): Codecs should have their own execution_context, for now they will inherit their
113
- # parent plugin's
114
112
  @codec.execution_context = context
115
113
  context
116
114
  end
@@ -18,25 +18,26 @@ require "logstash/instrument/null_metric"
18
18
  require "logstash/instrument/namespaced_null_metric"
19
19
  require "logstash/instrument/collector"
20
20
  require "logstash/instrument/wrapped_write_client"
21
- require "logstash/util/dead_letter_queue_manager"
22
21
  require "logstash/output_delegator"
23
22
  require "logstash/filter_delegator"
24
23
  require "logstash/queue_factory"
24
+ require "logstash/compiler"
25
25
  require "logstash/execution_context"
26
26
 
27
- java_import org.logstash.common.DeadLetterQueueFactory
28
- java_import org.logstash.common.io.DeadLetterQueueWriter
29
-
30
27
  module LogStash; class BasePipeline
31
28
  include LogStash::Util::Loggable
32
29
 
33
- attr_reader :config_str, :config_hash, :inputs, :filters, :outputs, :pipeline_id
30
+ attr_reader :settings, :config_str, :config_hash, :inputs, :filters, :outputs, :pipeline_id, :lir, :execution_context
34
31
 
35
- def initialize(config_str, settings = SETTINGS)
32
+ def initialize(config_str, settings = SETTINGS, namespaced_metric = nil, agent = nil)
36
33
  @logger = self.logger
37
- @mutex = Mutex.new
34
+
38
35
  @config_str = config_str
36
+ @settings = settings
39
37
  @config_hash = Digest::SHA1.hexdigest(@config_str)
38
+
39
+ @lir = compile_lir
40
+
40
41
  # Every time #plugin is invoked this is incremented to give each plugin
41
42
  # a unique id when auto-generating plugin ids
42
43
  @plugin_counter ||= 0
@@ -48,25 +49,18 @@ module LogStash; class BasePipeline
48
49
  @inputs = nil
49
50
  @filters = nil
50
51
  @outputs = nil
51
-
52
- if settings.get_value("dead_letter_queue.enable")
53
- @dlq_writer = DeadLetterQueueFactory.getWriter(pipeline_id, settings.get_value("path.dead_letter_queue"), settings.get_value("dead_letter_queue.max_bytes"))
54
- else
55
- @dlq_writer = LogStash::Util::DummyDeadLetterQueueWriter.new
56
- end
52
+ @execution_context = LogStash::ExecutionContext.new(self, agent)
57
53
 
58
54
  grammar = LogStashConfigParser.new
59
55
  parsed_config = grammar.parse(config_str)
60
56
  raise(ConfigurationError, grammar.failure_reason) if parsed_config.nil?
61
57
 
62
- parsed_config.process_escape_sequences = settings.get_value("config.support_escapes")
63
-
64
58
  config_code = parsed_config.compile
65
59
 
66
60
  # config_code = BasePipeline.compileConfig(config_str)
67
61
 
68
62
  if settings.get_value("config.debug") && @logger.debug?
69
- @logger.debug("Compiled pipeline code", :code => config_code)
63
+ @logger.debug("Compiled pipeline code", default_logging_keys(:code => config_code))
70
64
  end
71
65
 
72
66
  # Evaluate the config compiled code that will initialize all the plugins and define the
@@ -78,6 +72,10 @@ module LogStash; class BasePipeline
78
72
  end
79
73
  end
80
74
 
75
+ def compile_lir
76
+ LogStash::Compiler.compile_pipeline(self.config_str)
77
+ end
78
+
81
79
  def plugin(plugin_type, name, *args)
82
80
  @plugin_counter += 1
83
81
 
@@ -102,34 +100,34 @@ module LogStash; class BasePipeline
102
100
 
103
101
  klass = Plugin.lookup(plugin_type, name)
104
102
 
105
- execution_context = ExecutionContext.new(self, id, klass.config_name, @dlq_writer)
106
-
107
103
  if plugin_type == "output"
108
- OutputDelegator.new(@logger, klass, type_scoped_metric, execution_context, OutputDelegatorStrategyRegistry.instance, args)
104
+ OutputDelegator.new(@logger, klass, type_scoped_metric, @execution_context, OutputDelegatorStrategyRegistry.instance, args)
109
105
  elsif plugin_type == "filter"
110
- FilterDelegator.new(@logger, klass, type_scoped_metric, execution_context, args)
106
+ FilterDelegator.new(@logger, klass, type_scoped_metric, @execution_context, args)
111
107
  else # input
112
108
  input_plugin = klass.new(args)
113
109
  scoped_metric = type_scoped_metric.namespace(id.to_sym)
114
110
  scoped_metric.gauge(:name, input_plugin.config_name)
115
111
  input_plugin.metric = scoped_metric
116
- input_plugin.execution_context = execution_context
112
+ input_plugin.execution_context = @execution_context
117
113
  input_plugin
118
114
  end
119
115
  end
120
116
 
121
117
  def reloadable?
122
- non_reloadable_plugins.empty?
118
+ configured_as_reloadable? && reloadable_plugins?
123
119
  end
124
120
 
125
- def non_reloadable_plugins
126
- (inputs + filters + outputs).select { |plugin| !plugin.reloadable? }
121
+ def configured_as_reloadable?
122
+ settings.get("pipeline.reloadable")
127
123
  end
128
124
 
129
- private
125
+ def reloadable_plugins?
126
+ non_reloadable_plugins.empty?
127
+ end
130
128
 
131
- def default_logging_keys(other_keys = {})
132
- { :pipeline_id => pipeline_id }.merge(other_keys)
129
+ def non_reloadable_plugins
130
+ (inputs + filters + outputs).select { |plugin| !plugin.reloadable? }
133
131
  end
134
132
  end; end
135
133
 
@@ -149,7 +147,7 @@ module LogStash; class Pipeline < BasePipeline
149
147
 
150
148
  MAX_INFLIGHT_WARN_THRESHOLD = 10_000
151
149
 
152
- def initialize(config_str, settings = SETTINGS, namespaced_metric = nil)
150
+ def initialize(config_str, settings = SETTINGS, namespaced_metric = nil, agent = nil)
153
151
  # This needs to be configured before we call super which will evaluate the code to make
154
152
  # sure the metric instance is correctly send to the plugins to make the namespace scoping work
155
153
  @metric = if namespaced_metric
@@ -162,12 +160,12 @@ module LogStash; class Pipeline < BasePipeline
162
160
  @reporter = PipelineReporter.new(@logger, self)
163
161
  @worker_threads = []
164
162
 
165
- super(config_str, settings)
163
+ super
166
164
 
167
165
  begin
168
166
  @queue = LogStash::QueueFactory.create(settings)
169
167
  rescue => e
170
- @logger.error("Logstash failed to create queue", "exception" => e.message, "backtrace" => e.backtrace)
168
+ @logger.error("Logstash failed to create queue", default_logging_keys("exception" => e.message, "backtrace" => e.backtrace))
171
169
  raise e
172
170
  end
173
171
 
@@ -182,7 +180,6 @@ module LogStash; class Pipeline < BasePipeline
182
180
  )
183
181
  @drain_queue = @settings.get_value("queue.drain")
184
182
 
185
-
186
183
  @events_filtered = Concurrent::AtomicFixnum.new(0)
187
184
  @events_consumed = Concurrent::AtomicFixnum.new(0)
188
185
 
@@ -191,8 +188,11 @@ module LogStash; class Pipeline < BasePipeline
191
188
  @ready = Concurrent::AtomicBoolean.new(false)
192
189
  @running = Concurrent::AtomicBoolean.new(false)
193
190
  @flushing = Concurrent::AtomicReference.new(false)
191
+ @force_shutdown = Concurrent::AtomicBoolean.new(false)
194
192
  end # def initialize
195
193
 
194
+
195
+
196
196
  def ready?
197
197
  @ready.value
198
198
  end
@@ -207,15 +207,14 @@ module LogStash; class Pipeline < BasePipeline
207
207
 
208
208
  if @settings.set?("pipeline.workers")
209
209
  if pipeline_workers > 1
210
- @logger.warn("Warning: Manual override - there are filters that might not work with multiple worker threads",
211
- :worker_threads => pipeline_workers, :filters => plugins)
210
+ @logger.warn("Warning: Manual override - there are filters that might not work with multiple worker threads", default_logging_keys(:worker_threads => pipeline_workers, :filters => plugins))
212
211
  end
213
212
  else
214
213
  # user did not specify a worker thread count
215
214
  # warn if the default is multiple
216
215
  if default > 1
217
216
  @logger.warn("Defaulting pipeline worker threads to 1 because there are some filters that might not work with multiple worker threads",
218
- :count_was => default, :filters => plugins)
217
+ default_logging_keys(:count_was => default, :filters => plugins))
219
218
  return 1 # can't allow the default value to propagate if there are unsafe filters
220
219
  end
221
220
  end
@@ -226,15 +225,61 @@ module LogStash; class Pipeline < BasePipeline
226
225
  return @filters.any?
227
226
  end
228
227
 
228
+ def start
229
+ # Since we start lets assume that the metric namespace is cleared
230
+ # this is useful in the context of pipeline reloading
231
+ collect_stats
232
+
233
+ logger.debug("Starting pipeline", default_logging_keys)
234
+
235
+ @finished_execution = Concurrent::AtomicBoolean.new(false)
236
+
237
+ @thread = Thread.new do
238
+ begin
239
+ LogStash::Util.set_thread_name("pipeline.#{pipeline_id}")
240
+ run
241
+ @finished_execution.make_true
242
+ rescue => e
243
+ close
244
+ logger.error("Pipeline aborted due to error", default_logging_keys(:exception => e, :backtrace => e.backtrace))
245
+ end
246
+ end
247
+
248
+ status = wait_until_started
249
+
250
+ if status
251
+ logger.debug("Pipeline started successfully", default_logging_keys(:pipeline_id => pipeline_id))
252
+ end
253
+
254
+ status
255
+ end
256
+
257
+ def wait_until_started
258
+ while true do
259
+ # This should be changed with an appropriate FSM
260
+ # It's an edge case, if we have a pipeline with
261
+ # a generator { count => 1 } its possible that `Thread#alive?` doesn't return true
262
+ # because the execution of the thread was successful and complete
263
+ if @finished_execution.true?
264
+ return true
265
+ elsif !thread.alive?
266
+ return false
267
+ elsif running?
268
+ return true
269
+ else
270
+ sleep 0.01
271
+ end
272
+ end
273
+ end
274
+
229
275
  def run
230
276
  @started_at = Time.now
231
-
232
277
  @thread = Thread.current
233
278
  Util.set_thread_name("[#{pipeline_id}]-pipeline-manager")
234
279
 
235
280
  start_workers
236
281
 
237
- @logger.info("Pipeline #{@pipeline_id} started")
282
+ @logger.info("Pipeline started", default_logging_keys)
238
283
 
239
284
  # Block until all inputs have stopped
240
285
  # Generally this happens if SIGINT is sent and `shutdown` is called from an external thread
@@ -244,14 +289,14 @@ module LogStash; class Pipeline < BasePipeline
244
289
  wait_inputs
245
290
  transition_to_stopped
246
291
 
247
- @logger.debug("Input plugins stopped! Will shutdown filter/output workers.")
292
+ @logger.debug("Input plugins stopped! Will shutdown filter/output workers.", default_logging_keys)
248
293
 
249
294
  shutdown_flusher
250
295
  shutdown_workers
251
296
 
252
297
  close
253
298
 
254
- @logger.debug("Pipeline #{@pipeline_id} has been shutdown")
299
+ @logger.debug("Pipeline has been shutdown", default_logging_keys)
255
300
 
256
301
  # exit code
257
302
  return 0
@@ -260,7 +305,6 @@ module LogStash; class Pipeline < BasePipeline
260
305
  def close
261
306
  @filter_queue_client.close
262
307
  @queue.close
263
- @dlq_writer.close
264
308
  end
265
309
 
266
310
  def transition_to_running
@@ -290,7 +334,7 @@ module LogStash; class Pipeline < BasePipeline
290
334
  plugin.register
291
335
  plugin
292
336
  rescue => e
293
- @logger.error("Error registering plugin", :plugin => plugin.inspect, :error => e.message)
337
+ @logger.error("Error registering plugin", default_logging_keys(:plugin => plugin.inspect, :error => e.message))
294
338
  raise e
295
339
  end
296
340
 
@@ -322,18 +366,14 @@ module LogStash; class Pipeline < BasePipeline
322
366
  config_metric.gauge(:batch_delay, batch_delay)
323
367
  config_metric.gauge(:config_reload_automatic, @settings.get("config.reload.automatic"))
324
368
  config_metric.gauge(:config_reload_interval, @settings.get("config.reload.interval"))
325
- config_metric.gauge(:dead_letter_queue_enabled, dlq_enabled?)
326
- config_metric.gauge(:dead_letter_queue_path, @dlq_writer.get_path.to_absolute_path.to_s) if dlq_enabled?
327
-
328
369
 
329
- @logger.info("Starting pipeline",
330
- "id" => self.pipeline_id,
331
- "pipeline.workers" => pipeline_workers,
332
- "pipeline.batch.size" => batch_size,
333
- "pipeline.batch.delay" => batch_delay,
334
- "pipeline.max_inflight" => max_inflight)
370
+ @logger.info("Starting pipeline", default_logging_keys(
371
+ "pipeline.workers" => pipeline_workers,
372
+ "pipeline.batch.size" => batch_size,
373
+ "pipeline.batch.delay" => batch_delay,
374
+ "pipeline.max_inflight" => max_inflight))
335
375
  if max_inflight > MAX_INFLIGHT_WARN_THRESHOLD
336
- @logger.warn "CAUTION: Recommended inflight events max exceeded! Logstash will run with up to #{max_inflight} events in memory in your current configuration. If your message sizes are large this may cause instability with the default heap size. Please consider setting a non-standard heap size, changing the batch size (currently #{batch_size}), or changing the number of pipeline workers (currently #{pipeline_workers})"
376
+ @logger.warn("CAUTION: Recommended inflight events max exceeded! Logstash will run with up to #{max_inflight} events in memory in your current configuration. If your message sizes are large this may cause instability with the default heap size. Please consider setting a non-standard heap size, changing the batch size (currently #{batch_size}), or changing the number of pipeline workers (currently #{pipeline_workers})", default_logging_keys)
337
377
  end
338
378
 
339
379
  pipeline_workers.times do |t|
@@ -359,10 +399,6 @@ module LogStash; class Pipeline < BasePipeline
359
399
  end
360
400
  end
361
401
 
362
- def dlq_enabled?
363
- @settings.get("dead_letter_queue.enable")
364
- end
365
-
366
402
  # Main body of what a worker thread does
367
403
  # Repeatedly takes batches off the queue, filters, then outputs them
368
404
  def worker_loop(batch_size, batch_delay)
@@ -379,6 +415,7 @@ module LogStash; class Pipeline < BasePipeline
379
415
  filter_batch(batch)
380
416
  flush_filters_to_batch(batch, :final => false) if signal.flush?
381
417
  output_batch(batch)
418
+ break if @force_shutdown.true? # Do not ack the current batch
382
419
  @filter_queue_client.close_batch(batch)
383
420
 
384
421
  # keep break at end of loop, after the read_batch operation, some pipeline specs rely on this "final read_batch" before shutdown.
@@ -390,14 +427,19 @@ module LogStash; class Pipeline < BasePipeline
390
427
  batch = @filter_queue_client.new_batch
391
428
  @filter_queue_client.start_metrics(batch) # explicitly call start_metrics since we dont do a read_batch here
392
429
  flush_filters_to_batch(batch, :final => true)
430
+ return if @force_shutdown.true? # Do not ack the current batch
393
431
  output_batch(batch)
394
432
  @filter_queue_client.close_batch(batch)
395
433
  end
396
434
 
397
435
  def filter_batch(batch)
398
- filter_func(batch.to_a).each do |e|
399
- #these are both original and generated events
400
- batch.merge(e) unless e.cancelled?
436
+ batch.each do |event|
437
+ return if @force_shutdown.true?
438
+
439
+ filter_func(event).each do |e|
440
+ #these are both original and generated events
441
+ batch.merge(e) unless e.cancelled?
442
+ end
401
443
  end
402
444
  @filter_queue_client.add_filtered_metrics(batch)
403
445
  @events_filtered.increment(batch.size)
@@ -409,7 +451,7 @@ module LogStash; class Pipeline < BasePipeline
409
451
  # Users need to check their configuration or see if there is a bug in the
410
452
  # plugin.
411
453
  @logger.error("Exception in pipelineworker, the pipeline stopped processing new events, please check your filter configuration and restart Logstash.",
412
- "exception" => e.message, "backtrace" => e.backtrace)
454
+ default_logging_keys("exception" => e.message, "backtrace" => e.backtrace))
413
455
 
414
456
  raise e
415
457
  end
@@ -431,6 +473,7 @@ module LogStash; class Pipeline < BasePipeline
431
473
  # Now that we have our output to event mapping we can just invoke each output
432
474
  # once with its list of events
433
475
  output_events_map.each do |output, events|
476
+ return if @force_shutdown.true?
434
477
  output.multi_receive(events)
435
478
  end
436
479
 
@@ -471,20 +514,21 @@ module LogStash; class Pipeline < BasePipeline
471
514
  rescue => e
472
515
  if plugin.stop?
473
516
  @logger.debug("Input plugin raised exception during shutdown, ignoring it.",
474
- :plugin => plugin.class.config_name, :exception => e.message,
475
- :backtrace => e.backtrace)
517
+ default_logging_keys(:plugin => plugin.class.config_name, :exception => e.message, :backtrace => e.backtrace))
476
518
  return
477
519
  end
478
520
 
479
521
  # otherwise, report error and restart
480
522
  if @logger.debug?
481
523
  @logger.error(I18n.t("logstash.pipeline.worker-error-debug",
482
- :plugin => plugin.inspect, :error => e.message,
483
- :exception => e.class,
484
- :stacktrace => e.backtrace.join("\n")))
524
+ default_logging_keys(
525
+ :plugin => plugin.inspect,
526
+ :error => e.message,
527
+ :exception => e.class,
528
+ :stacktrace => e.backtrace.join("\n"))))
485
529
  else
486
530
  @logger.error(I18n.t("logstash.pipeline.worker-error",
487
- :plugin => plugin.inspect, :error => e.message))
531
+ default_logging_keys(:plugin => plugin.inspect, :error => e.message)))
488
532
  end
489
533
 
490
534
  # Assuming the failure that caused this exception is transient,
@@ -509,23 +553,42 @@ module LogStash; class Pipeline < BasePipeline
509
553
 
510
554
  before_stop.call if block_given?
511
555
 
512
- @logger.debug "Closing inputs"
513
- @inputs.each(&:do_stop)
514
- @logger.debug "Closed inputs"
556
+ stop_inputs
557
+
558
+ # We make this call blocking, so we know for sure when the method return the shtudown is
559
+ # stopped
560
+ wait_for_workers
561
+ clear_pipeline_metrics
515
562
  end # def shutdown
516
563
 
564
+ def force_shutdown!
565
+ @force_shutdown.make_true
566
+ end
567
+
568
+ def wait_for_workers
569
+ @logger.debug("Closing inputs", default_logging_keys)
570
+ @worker_threads.map(&:join)
571
+ @logger.debug("Worker closed", default_logging_keys)
572
+ end
573
+
574
+ def stop_inputs
575
+ @logger.debug("Closing inputs", default_logging_keys)
576
+ @inputs.each(&:do_stop)
577
+ @logger.debug("Closed inputs", default_logging_keys)
578
+ end
579
+
517
580
  # After `shutdown` is called from an external thread this is called from the main thread to
518
581
  # tell the worker threads to stop and then block until they've fully stopped
519
582
  # This also stops all filter and output plugins
520
583
  def shutdown_workers
521
584
  # Each worker thread will receive this exactly once!
522
585
  @worker_threads.each do |t|
523
- @logger.debug("Pushing shutdown", :thread => t.inspect)
586
+ @logger.debug("Pushing shutdown", default_logging_keys(:thread => t.inspect))
524
587
  @signal_queue.push(SHUTDOWN)
525
588
  end
526
589
 
527
590
  @worker_threads.each do |t|
528
- @logger.debug("Shutdown waiting for worker thread #{t}")
591
+ @logger.debug("Shutdown waiting for worker thread" , default_logging_keys(:thread => t.inspect))
529
592
  t.join
530
593
  end
531
594
 
@@ -537,7 +600,7 @@ module LogStash; class Pipeline < BasePipeline
537
600
  # in the pipeline anymore.
538
601
  def filter(event, &block)
539
602
  # filter_func returns all filtered events, including cancelled ones
540
- filter_func([event]).each {|e| block.call(e)}
603
+ filter_func(event).each { |e| block.call(e) }
541
604
  end
542
605
 
543
606
 
@@ -548,6 +611,7 @@ module LogStash; class Pipeline < BasePipeline
548
611
  flushers = options[:final] ? @shutdown_flushers : @periodic_flushers
549
612
 
550
613
  flushers.each do |flusher|
614
+ return if @force_shutdown.true?
551
615
  flusher.call(options, &block)
552
616
  end
553
617
  end
@@ -570,12 +634,11 @@ module LogStash; class Pipeline < BasePipeline
570
634
 
571
635
  def flush
572
636
  if @flushing.compare_and_set(false, true)
573
- @logger.debug? && @logger.debug("Pushing flush onto pipeline")
637
+ @logger.debug? && @logger.debug("Pushing flush onto pipeline", default_logging_keys)
574
638
  @signal_queue.push(FLUSH)
575
639
  end
576
640
  end
577
641
 
578
-
579
642
  # Calculate the uptime in milliseconds
580
643
  #
581
644
  # @return [Fixnum] Uptime in milliseconds, 0 if the pipeline is not started
@@ -590,8 +653,10 @@ module LogStash; class Pipeline < BasePipeline
590
653
  # @param options [Hash]
591
654
  def flush_filters_to_batch(batch, options = {})
592
655
  flush_filters(options) do |event|
656
+ return if @force_shutdown.true?
657
+
593
658
  unless event.cancelled?
594
- @logger.debug? and @logger.debug("Pushing flushed events", :event => event)
659
+ @logger.debug? and @logger.debug("Pushing flushed events", default_logging_keys(:event => event))
595
660
  batch.merge(event)
596
661
  end
597
662
  end
@@ -613,13 +678,6 @@ module LogStash; class Pipeline < BasePipeline
613
678
  .each {|t| t.delete("status") }
614
679
  end
615
680
 
616
- def collect_dlq_stats
617
- if dlq_enabled?
618
- dlq_metric = @metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :dlq])
619
- dlq_metric.gauge(:queue_size_in_bytes, @dlq_writer.get_current_queue_size)
620
- end
621
- end
622
-
623
681
  def collect_stats
624
682
  pipeline_metric = @metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :queue])
625
683
  pipeline_metric.gauge(:type, settings.get("queue.type"))
@@ -645,6 +703,20 @@ module LogStash; class Pipeline < BasePipeline
645
703
  end
646
704
  end
647
705
 
706
+ def clear_pipeline_metrics
707
+ # TODO(ph): I think the metric should also proxy that call correctly to the collector
708
+ # this will simplify everything since the null metric would simply just do a noop
709
+ collector = @metric.collector
710
+
711
+ unless collector.nil?
712
+ # selectively reset metrics we don't wish to keep after reloading
713
+ # these include metrics about the plugins and number of processed events
714
+ # we want to keep other metrics like reload counts and error messages
715
+ collector.clear("stats/pipelines/#{pipeline_id}/plugins")
716
+ collector.clear("stats/pipelines/#{pipeline_id}/events")
717
+ end
718
+ end
719
+
648
720
  # Sometimes we log stuff that will dump the pipeline which may contain
649
721
  # sensitive information (like the raw syntax tree which can contain passwords)
650
722
  # We want to hide most of what's in here
@@ -661,9 +733,12 @@ module LogStash; class Pipeline < BasePipeline
661
733
  private
662
734
 
663
735
  def default_logging_keys(other_keys = {})
664
- keys = super
665
- keys[:thread] = thread.inspect if thread
666
- keys
736
+ default_options = if thread
737
+ { :pipeline_id => pipeline_id, :thread => thread.inspect }
738
+ else
739
+ { :pipeline_id => pipeline_id }
740
+ end
741
+ default_options.merge(other_keys)
667
742
  end
668
743
 
669
744
  def draining_queue?
@@ -671,9 +746,6 @@ module LogStash; class Pipeline < BasePipeline
671
746
  end
672
747
 
673
748
  def wrapped_write_client(plugin)
674
- #need to ensure that metrics are initialized one plugin at a time, else a race condition can exist.
675
- @mutex.synchronize do
676
- LogStash::Instrument::WrappedWriteClient.new(@input_queue_client, self, metric, plugin)
677
- end
749
+ LogStash::Instrument::WrappedWriteClient.new(@input_queue_client, self, metric, plugin)
678
750
  end
679
751
  end; end