logstash-core 5.0.0.alpha4.snapshot1-java → 5.0.0.alpha4.snapshot2-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of logstash-core might be problematic. Click here for more details.

Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/lib/logstash-core/version.rb +1 -1
  3. data/lib/logstash/agent.rb +31 -36
  4. data/lib/logstash/api/command_factory.rb +3 -1
  5. data/lib/logstash/api/commands/base.rb +4 -0
  6. data/lib/logstash/api/commands/node.rb +116 -0
  7. data/lib/logstash/api/commands/stats.rb +28 -77
  8. data/lib/logstash/api/modules/base.rb +2 -2
  9. data/lib/logstash/api/modules/node.rb +23 -6
  10. data/lib/logstash/api/modules/node_stats.rb +15 -1
  11. data/lib/logstash/api/rack_app.rb +9 -6
  12. data/lib/logstash/api/service.rb +8 -47
  13. data/lib/logstash/config/config_ast.rb +11 -3
  14. data/lib/logstash/config/mixin.rb +60 -22
  15. data/lib/logstash/inputs/metrics.rb +2 -2
  16. data/lib/logstash/instrument/collector.rb +5 -6
  17. data/lib/logstash/instrument/metric.rb +1 -1
  18. data/lib/logstash/instrument/metric_store.rb +54 -0
  19. data/lib/logstash/pipeline.rb +10 -4
  20. data/lib/logstash/runner.rb +2 -2
  21. data/lib/logstash/util/safe_uri.rb +48 -0
  22. data/lib/logstash/version.rb +1 -1
  23. data/lib/logstash/webserver.rb +8 -7
  24. data/logstash-core.gemspec +1 -1
  25. data/spec/api/lib/api/node_plugins_spec.rb +32 -0
  26. data/spec/api/lib/api/node_spec.rb +41 -7
  27. data/spec/api/lib/api/node_stats_spec.rb +31 -6
  28. data/spec/api/lib/api/plugins_spec.rb +1 -7
  29. data/spec/api/lib/api/root_spec.rb +2 -7
  30. data/spec/api/lib/api/support/resource_dsl_methods.rb +14 -7
  31. data/spec/api/spec_helper.rb +24 -50
  32. data/spec/logstash/agent_spec.rb +36 -13
  33. data/spec/logstash/config/config_ast_spec.rb +43 -0
  34. data/spec/logstash/config/mixin_spec.rb +138 -0
  35. data/spec/logstash/inputs/metrics_spec.rb +10 -11
  36. data/spec/logstash/instrument/collector_spec.rb +1 -1
  37. data/spec/logstash/instrument/metric_store_spec.rb +61 -0
  38. data/spec/logstash/instrument/periodic_poller/jvm_spec.rb +6 -3
  39. data/spec/logstash/pipeline_spec.rb +9 -9
  40. data/spec/support/mocks_classes.rb +2 -1
  41. metadata +39 -35
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 34ea2c9c79b6a5828714ad66dc7ce558d7952280
4
- data.tar.gz: 76dcab330cb125653bfe94ef514f82f80a08a974
3
+ metadata.gz: bd0ebd412460fd45c6abdef1ae4a21b75f24b6c5
4
+ data.tar.gz: 4addbb53a17c98501b27f8f8fa33f8475a44bca4
5
5
  SHA512:
6
- metadata.gz: 7274fedd8a22a9518fec19299c53295243832c17a376c85f59c2ea6dfe85c139d2161b0f2d5e6854e348824c38b3427fa60ea4d0e7281a6232bfee628848e45f
7
- data.tar.gz: f1432ba6f7d34a38c3fbf57a7ddc6f02fefc6916f1349775a3254f75d720735101ad24da81971f81edbf42cd091c783959b2e46e2e2502162e0c2f10d39742ec
6
+ metadata.gz: b602623cc49f59c90e0af86c1d43e90e2b611aef12e44d5590a8536c3efd0cf06deab5644f49477fdcc2a4d7615340bef5e4fcac613ddfff9e6e792594511958
7
+ data.tar.gz: 7dcc49def008f07dae78a07e78c6ba8fa7e2942e30341637aa873068bf71cbc3ccb82557299b915e79934f523eb469d98a4dfaaeb39abcffe715efe66057bfe3
@@ -5,4 +5,4 @@
5
5
  # Note to authors: this should not include dashes because 'gem' barfs if
6
6
  # you include a dash in the version string.
7
7
 
8
- LOGSTASH_CORE_VERSION = "5.0.0-alpha4.snapshot1"
8
+ LOGSTASH_CORE_VERSION = "5.0.0-alpha4.snapshot2"
@@ -45,15 +45,15 @@ class LogStash::Agent
45
45
  @upgrade_mutex = Mutex.new
46
46
 
47
47
  @collect_metric = setting("metric.collect")
48
- @metric = create_metric_collector
49
- @periodic_pollers = LogStash::Instrument::PeriodicPollers.new(create_metric_collector)
48
+
49
+ # Create the collectors and configured it with the library
50
+ configure_metrics_collectors
50
51
  end
51
52
 
52
53
  def execute
53
54
  @thread = Thread.current # this var is implicilty used by Stud.stop?
54
55
  @logger.info("starting agent")
55
56
 
56
- start_background_services
57
57
  start_pipelines
58
58
  start_webserver
59
59
 
@@ -112,8 +112,13 @@ class LogStash::Agent
112
112
  ((Time.now.to_f - STARTED_AT.to_f) * 1000.0).to_i
113
113
  end
114
114
 
115
+ def stop_collecting_metrics
116
+ @collector.stop
117
+ @periodic_pollers.stop
118
+ end
119
+
115
120
  def shutdown
116
- stop_background_services
121
+ stop_collecting_metrics
117
122
  stop_webserver
118
123
  shutdown_pipelines
119
124
  end
@@ -131,7 +136,7 @@ class LogStash::Agent
131
136
  private
132
137
  def start_webserver
133
138
  options = {:http_host => @http_host, :http_port => @http_port, :http_environment => @http_environment }
134
- @webserver = LogStash::WebServer.new(@logger, options)
139
+ @webserver = LogStash::WebServer.new(@logger, self, options)
135
140
  Thread.new(@webserver) do |webserver|
136
141
  LogStash::Util.set_thread_name("Api Webserver")
137
142
  webserver.run
@@ -142,28 +147,24 @@ class LogStash::Agent
142
147
  @webserver.stop if @webserver
143
148
  end
144
149
 
145
- def start_background_services
146
- if collect_metrics?
147
- @logger.debug("Agent: Starting metric periodic pollers")
148
- @periodic_pollers.start
149
- end
150
- end
150
+ def configure_metrics_collectors
151
+ @collector = LogStash::Instrument::Collector.new
151
152
 
152
- def stop_background_services
153
- if collect_metrics?
154
- @logger.debug("Agent: Stopping metric periodic pollers")
155
- @periodic_pollers.stop
156
- end
153
+ @metric = if collect_metrics?
154
+ @logger.debug("Agent: Configuring metric collection")
155
+ LogStash::Instrument::Metric.new(@collector)
156
+ else
157
+ LogStash::Instrument::NullMetric.new
158
+ end
159
+
160
+
161
+ @periodic_pollers = LogStash::Instrument::PeriodicPollers.new(@metric)
162
+ @periodic_pollers.start
157
163
  end
158
164
 
159
- def create_metric_collector
160
- if collect_metrics?
161
- @logger.debug("Agent: Configuring metric collection")
162
- LogStash::Instrument::Collector.instance.agent = self
163
- LogStash::Instrument::Metric.new
164
- else
165
- LogStash::Instrument::NullMetric.new
166
- end
165
+ def reset_metrics_collectors
166
+ stop_collecting_metrics
167
+ configure_metrics_collectors
167
168
  end
168
169
 
169
170
  def collect_metrics?
@@ -171,7 +172,6 @@ class LogStash::Agent
171
172
  end
172
173
 
173
174
  def create_pipeline(settings, config=nil)
174
-
175
175
  if config.nil?
176
176
  begin
177
177
  config = fetch_config(settings)
@@ -182,7 +182,7 @@ class LogStash::Agent
182
182
  end
183
183
 
184
184
  begin
185
- LogStash::Pipeline.new(config, settings)
185
+ LogStash::Pipeline.new(config, settings, metric)
186
186
  rescue => e
187
187
  @logger.error("fetched an invalid config", :config => config, :reason => e.message)
188
188
  return
@@ -204,6 +204,11 @@ class LogStash::Agent
204
204
  return
205
205
  end
206
206
 
207
+ # Reset the current collected stats,
208
+ # starting a pipeline with a new configuration should be the same as restarting
209
+ # logstash.
210
+ reset_metrics_collectors
211
+
207
212
  new_pipeline = create_pipeline(old_pipeline.settings, new_config)
208
213
 
209
214
  return if new_pipeline.nil?
@@ -225,12 +230,6 @@ class LogStash::Agent
225
230
  return unless pipeline.is_a?(LogStash::Pipeline)
226
231
  return if pipeline.ready?
227
232
  @logger.info("starting pipeline", :id => id)
228
-
229
- # Reset the current collected stats,
230
- # starting a pipeline with a new configuration should be the same as restarting
231
- # logstash.
232
- reset_collector
233
-
234
233
  Thread.new do
235
234
  LogStash::Util.set_thread_name("pipeline.#{id}")
236
235
  begin
@@ -273,10 +272,6 @@ class LogStash::Agent
273
272
  @pipelines.empty?
274
273
  end
275
274
 
276
- def reset_collector
277
- LogStash::Instrument::Collector.instance.clear
278
- end
279
-
280
275
  def setting(key)
281
276
  @settings.get(key)
282
277
  end
@@ -3,6 +3,7 @@ require "logstash/api/service"
3
3
  require "logstash/api/commands/system/basicinfo_command"
4
4
  require "logstash/api/commands/system/plugins_command"
5
5
  require "logstash/api/commands/stats"
6
+ require "logstash/api/commands/node"
6
7
 
7
8
 
8
9
  module LogStash
@@ -15,7 +16,8 @@ module LogStash
15
16
  @factory = {
16
17
  :system_basic_info => ::LogStash::Api::Commands::System::BasicInfo,
17
18
  :plugins_command => ::LogStash::Api::Commands::System::Plugins,
18
- :stats => ::LogStash::Api::Commands::Stats
19
+ :stats => ::LogStash::Api::Commands::Stats,
20
+ :node => ::LogStash::Api::Commands::Node
19
21
  }
20
22
  end
21
23
 
@@ -19,6 +19,10 @@ module LogStash
19
19
  def started_at
20
20
  (LogStash::Agent::STARTED_AT.to_f * 1000.0).to_i
21
21
  end
22
+
23
+ def extract_metrics(path, *keys)
24
+ service.extract_metrics(path, *keys)
25
+ end
22
26
  end
23
27
  end
24
28
  end
@@ -0,0 +1,116 @@
1
+ require "logstash/api/commands/base"
2
+
3
+ module LogStash
4
+ module Api
5
+ module Commands
6
+ class Node < Commands::Base
7
+ def all
8
+ {
9
+ :pipeline => pipeline,
10
+ :os => os,
11
+ :jvm => jvm
12
+ }
13
+ end
14
+
15
+ def pipeline
16
+ extract_metrics(
17
+ [:stats, :pipelines, :main, :config],
18
+ :workers, :batch_size, :batch_delay
19
+ )
20
+ end
21
+
22
+ def os
23
+ {
24
+ :name => java.lang.System.getProperty("os.name"),
25
+ :arch => java.lang.System.getProperty("os.arch"),
26
+ :version => java.lang.System.getProperty("os.version"),
27
+ :available_processors => java.lang.Runtime.getRuntime().availableProcessors()
28
+ }
29
+ end
30
+
31
+ def jvm
32
+ memory_bean = ManagementFactory.getMemoryMXBean()
33
+ {
34
+ :pid => ManagementFactory.getRuntimeMXBean().getName().split("@").first.to_i,
35
+ :version => java.lang.System.getProperty("java.version"),
36
+ :vm_name => java.lang.System.getProperty("java.vm.name"),
37
+ :vm_version => java.lang.System.getProperty("java.version"),
38
+ :vm_vendor => java.lang.System.getProperty("java.vendor"),
39
+ :vm_name => java.lang.System.getProperty("java.vm.name"),
40
+ :start_time_in_millis => started_at,
41
+ :mem => {
42
+ :heap_init_in_bytes => (memory_bean.getHeapMemoryUsage().getInit() < 0 ? 0 : memory_bean.getHeapMemoryUsage().getInit()),
43
+ :heap_max_in_bytes => (memory_bean.getHeapMemoryUsage().getMax() < 0 ? 0 : memory_bean.getHeapMemoryUsage().getMax()),
44
+ :non_heap_init_in_bytes => (memory_bean.getNonHeapMemoryUsage().getInit() < 0 ? 0 : memory_bean.getNonHeapMemoryUsage().getInit()),
45
+ :non_heap_max_in_bytes => (memory_bean.getNonHeapMemoryUsage().getMax() < 0 ? 0 : memory_bean.getNonHeapMemoryUsage().getMax())
46
+ }
47
+ }
48
+ end
49
+
50
+ def hot_threads(options={})
51
+ HotThreadsReport.new(self, options)
52
+ end
53
+
54
+ class HotThreadsReport
55
+ HOT_THREADS_STACK_TRACES_SIZE_DEFAULT = 10.freeze
56
+
57
+ def initialize(cmd, options)
58
+ @cmd = cmd
59
+ filter = { :stacktrace_size => options.fetch(:stacktrace_size, HOT_THREADS_STACK_TRACES_SIZE_DEFAULT) }
60
+ jr_dump = JRMonitor.threads.generate(filter)
61
+ @thread_dump = ::LogStash::Util::ThreadDump.new(options.merge(:dump => jr_dump))
62
+ end
63
+
64
+ def to_s
65
+ hash = to_hash
66
+ report = "#{I18n.t("logstash.web_api.hot_threads.title", :hostname => hash[:hostname], :time => hash[:time], :top_count => @thread_dump.top_count )} \n"
67
+ report << '=' * 80
68
+ report << "\n"
69
+ hash[:threads].each do |thread|
70
+ thread_report = ""
71
+ thread_report = "#{I18n.t("logstash.web_api.
72
+ hot_threads.thread_title", :percent_of_cpu_time => thread[:percent_of_cpu_time], :thread_state => thread[:state], :thread_name => thread[:name])} \n"
73
+ thread_report = "#{thread[:percent_of_cpu_time]} % of of cpu usage by #{thread[:state]} thread named '#{thread[:name]}'\n"
74
+ thread_report << "#{thread[:path]}\n" if thread[:path]
75
+ thread[:traces].each do |trace|
76
+ thread_report << "\t#{trace}\n"
77
+ end
78
+ report << thread_report
79
+ report << '-' * 80
80
+ report << "\n"
81
+ end
82
+ report
83
+ end
84
+
85
+ def to_hash
86
+ hash = { :hostname => @cmd.hostname, :time => Time.now.iso8601, :busiest_threads => @thread_dump.top_count, :threads => [] }
87
+ @thread_dump.each do |thread_name, _hash|
88
+ thread_name, thread_path = _hash["thread.name"].split(": ")
89
+ thread = { :name => thread_name,
90
+ :percent_of_cpu_time => cpu_time_as_percent(_hash),
91
+ :state => _hash["thread.state"]
92
+ }
93
+ thread[:path] = thread_path if thread_path
94
+ traces = []
95
+ _hash["thread.stacktrace"].each do |trace|
96
+ traces << trace
97
+ end
98
+ thread[:traces] = traces unless traces.empty?
99
+ hash[:threads] << thread
100
+ end
101
+ hash
102
+ end
103
+
104
+ def cpu_time_as_percent(hash)
105
+ (((cpu_time(hash) / @cmd.uptime * 1.0)*10000).to_i)/100.0
106
+ end
107
+
108
+ def cpu_time(hash)
109
+ hash["cpu.time"] / 1000000.0
110
+ end
111
+
112
+ end
113
+ end
114
+ end
115
+ end
116
+ end
@@ -5,99 +5,50 @@ module LogStash
5
5
  module Api
6
6
  module Commands
7
7
  class Stats < Commands::Base
8
-
9
8
  def jvm
10
- {:threads => service.get_shallow(:jvm, :threads)}
9
+ {
10
+ :threads => extract_metrics(
11
+ [:jvm, :threads],
12
+ :count,
13
+ :peak_count
14
+ )
15
+ }
11
16
  end
12
17
 
13
18
  def process
14
- service.get_shallow(:jvm, :process)
19
+ extract_metrics(
20
+ [:jvm, :process],
21
+ :open_file_descriptors,
22
+ :peak_open_file_descriptors,
23
+ :max_file_descriptors,
24
+ [:mem, [:total_virtual_in_bytes]],
25
+ [:cpu, [:total_in_millis, :percent]]
26
+ )
15
27
  end
16
28
 
17
29
  def events
18
- service.get_shallow(:stats, :events)
30
+ extract_metrics(
31
+ [:stats, :events],
32
+ :in, :filtered, :out
33
+ )
19
34
  end
20
35
 
21
36
  def memory
22
- memory = LogStash::Json.load(service.get(:jvm_memory_stats))
37
+ memory = service.get_shallow(:jvm, :memory)
23
38
  {
24
- :heap_used_in_bytes => memory["heap"]["used_in_bytes"],
25
- :heap_used_percent => memory["heap"]["used_percent"],
26
- :heap_committed_in_bytes => memory["heap"]["committed_in_bytes"],
27
- :heap_max_in_bytes => memory["heap"]["max_in_bytes"],
28
- :heap_used_in_bytes => memory["heap"]["used_in_bytes"],
29
- :non_heap_used_in_bytes => memory["non_heap"]["used_in_bytes"],
30
- :non_heap_committed_in_bytes => memory["non_heap"]["committed_in_bytes"],
31
- :pools => memory["pools"].inject({}) do |acc, (type, hash)|
39
+ :heap_used_in_bytes => memory[:heap][:used_in_bytes],
40
+ :heap_used_percent => memory[:heap][:used_percent],
41
+ :heap_committed_in_bytes => memory[:heap][:committed_in_bytes],
42
+ :heap_max_in_bytes => memory[:heap][:max_in_bytes],
43
+ :heap_used_in_bytes => memory[:heap][:used_in_bytes],
44
+ :non_heap_used_in_bytes => memory[:non_heap][:used_in_bytes],
45
+ :non_heap_committed_in_bytes => memory[:non_heap][:committed_in_bytes],
46
+ :pools => memory[:pools].inject({}) do |acc, (type, hash)|
32
47
  hash.delete("committed_in_bytes")
33
48
  acc[type] = hash
34
49
  acc
35
50
  end
36
51
  }
37
- end
38
-
39
- def hot_threads(options={})
40
- HotThreadsReport.new(self, options)
41
- end
42
-
43
- class HotThreadsReport
44
- HOT_THREADS_STACK_TRACES_SIZE_DEFAULT = 10.freeze
45
-
46
- def initialize(cmd, options)
47
- @cmd = cmd
48
- filter = { :stacktrace_size => options.fetch(:stacktrace_size, HOT_THREADS_STACK_TRACES_SIZE_DEFAULT) }
49
- jr_dump = JRMonitor.threads.generate(filter)
50
- @thread_dump = ::LogStash::Util::ThreadDump.new(options.merge(:dump => jr_dump))
51
- end
52
-
53
- def to_s
54
- hash = to_hash
55
- report = "#{I18n.t("logstash.web_api.hot_threads.title", :hostname => hash[:hostname], :time => hash[:time], :top_count => @thread_dump.top_count )} \n"
56
- report << '=' * 80
57
- report << "\n"
58
- hash[:threads].each do |thread|
59
- thread_report = ""
60
- thread_report = "#{I18n.t("logstash.web_api.
61
- hot_threads.thread_title", :percent_of_cpu_time => thread[:percent_of_cpu_time], :thread_state => thread[:state], :thread_name => thread[:name])} \n"
62
- thread_report = "#{thread[:percent_of_cpu_time]} % of of cpu usage by #{thread[:state]} thread named '#{thread[:name]}'\n"
63
- thread_report << "#{thread[:path]}\n" if thread[:path]
64
- thread[:traces].each do |trace|
65
- thread_report << "\t#{trace}\n"
66
- end
67
- report << thread_report
68
- report << '-' * 80
69
- report << "\n"
70
- end
71
- report
72
- end
73
-
74
- def to_hash
75
- hash = { :hostname => @cmd.hostname, :time => Time.now.iso8601, :busiest_threads => @thread_dump.top_count, :threads => [] }
76
- @thread_dump.each do |thread_name, _hash|
77
- thread_name, thread_path = _hash["thread.name"].split(": ")
78
- thread = { :name => thread_name,
79
- :percent_of_cpu_time => cpu_time_as_percent(_hash),
80
- :state => _hash["thread.state"]
81
- }
82
- thread[:path] = thread_path if thread_path
83
- traces = []
84
- _hash["thread.stacktrace"].each do |trace|
85
- traces << trace
86
- end
87
- thread[:traces] = traces unless traces.empty?
88
- hash[:threads] << thread
89
- end
90
- hash
91
- end
92
-
93
- def cpu_time_as_percent(hash)
94
- (((cpu_time(hash) / @cmd.uptime * 1.0)*10000).to_i)/100.0
95
- end
96
-
97
- def cpu_time(hash)
98
- hash["cpu.time"] / 1000000.0
99
- end
100
-
101
52
  end
102
53
  end
103
54
  end
@@ -21,9 +21,9 @@ module LogStash
21
21
 
22
22
  helpers AppHelpers
23
23
 
24
- def initialize(app=nil)
24
+ def initialize(app=nil, agent)
25
25
  super(app)
26
- @factory = ::LogStash::Api::CommandFactory.new(LogStash::Api::Service.instance)
26
+ @factory = ::LogStash::Api::CommandFactory.new(LogStash::Api::Service.new(agent))
27
27
  end
28
28
 
29
29
  not_found do
@@ -5,7 +5,26 @@ module LogStash
5
5
  module Api
6
6
  module Modules
7
7
  class Node < ::LogStash::Api::Modules::Base
8
- # return hot threads information
8
+ def node
9
+ factory.build(:node)
10
+ end
11
+
12
+ get "/" do
13
+ respond_with node.all
14
+ end
15
+
16
+ get "/os" do
17
+ respond_with :os => node.os
18
+ end
19
+
20
+ get "/jvm" do
21
+ respond_with :jvm => node.jvm
22
+ end
23
+
24
+ get "/pipeline" do
25
+ respond_with :pipeline => node.pipeline
26
+ end
27
+
9
28
  get "/hot_threads" do
10
29
  ignore_idle_threads = params["ignore_idle_threads"] || true
11
30
 
@@ -15,11 +34,9 @@ module LogStash
15
34
  }
16
35
  options[:threads] = params["threads"].to_i if params.has_key?("threads")
17
36
 
18
- stats = factory.build(:stats)
19
- as = options[:human] ? :string : :json
20
- respond_with(stats.hot_threads(options), {:as => as})
21
- end
22
-
37
+ as = options[:human] ? :string : :json
38
+ respond_with({:hot_threads => node.hot_threads(options)}, {:as => as})
39
+ end
23
40
  end
24
41
  end
25
42
  end