logstash-core 5.1.2-java → 5.2.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/gemspec_jars.rb +0 -1
  3. data/lib/logstash-core/logstash-core.jar +0 -0
  4. data/lib/logstash-core/version.rb +1 -1
  5. data/lib/logstash-core_jars.rb +0 -2
  6. data/lib/logstash/agent.rb +26 -10
  7. data/lib/logstash/api/commands/default_metadata.rb +3 -1
  8. data/lib/logstash/api/commands/stats.rb +17 -1
  9. data/lib/logstash/api/modules/node_stats.rb +9 -0
  10. data/lib/logstash/api/modules/stats.rb +3 -2
  11. data/lib/logstash/config/mixin.rb +5 -8
  12. data/lib/logstash/instrument/collector.rb +1 -46
  13. data/lib/logstash/instrument/periodic_poller/base.rb +2 -0
  14. data/lib/logstash/instrument/periodic_poller/cgroup.rb +137 -0
  15. data/lib/logstash/instrument/periodic_poller/jvm.rb +1 -2
  16. data/lib/logstash/instrument/periodic_poller/os.rb +21 -0
  17. data/lib/logstash/instrument/periodic_poller/pq.rb +20 -0
  18. data/lib/logstash/instrument/periodic_pollers.rb +4 -2
  19. data/lib/logstash/output_delegator.rb +2 -0
  20. data/lib/logstash/pipeline.rb +31 -2
  21. data/lib/logstash/runner.rb +6 -1
  22. data/lib/logstash/util/wrapped_acked_queue.rb +11 -0
  23. data/lib/logstash/util/wrapped_synchronous_queue.rb +9 -0
  24. data/lib/logstash/version.rb +1 -1
  25. data/lib/logstash/webserver.rb +9 -1
  26. data/locales/en.yml +0 -3
  27. data/spec/api/lib/api/node_stats_spec.rb +5 -1
  28. data/spec/api/spec_helper.rb +3 -1
  29. data/spec/logstash/agent_spec.rb +2 -0
  30. data/spec/logstash/instrument/collector_spec.rb +4 -0
  31. data/spec/logstash/instrument/periodic_poller/cgroup_spec.rb +148 -0
  32. data/spec/logstash/instrument/periodic_poller/os_spec.rb +85 -0
  33. data/spec/logstash/output_delegator_spec.rb +12 -4
  34. data/spec/logstash/pipeline_reporter_spec.rb +2 -26
  35. data/spec/logstash/pipeline_spec.rb +102 -40
  36. data/spec/logstash/plugin_spec.rb +2 -6
  37. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +34 -4
  38. data/spec/support/mocks_classes.rb +2 -2
  39. metadata +12 -7
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 8de8152e9d47ea5ce222f2811e96ec13216542ec
4
- data.tar.gz: da613e3b8bee64c8be84dbf804f45511fdb0fc07
3
+ metadata.gz: 50660404c5ec13b21a5dccc7fd62b0d282dea262
4
+ data.tar.gz: 5b5b44f759032fda3022a8d08f982cf35ef5179a
5
5
  SHA512:
6
- metadata.gz: 5f5989dfd6273c39a46964798d8c4549f1ad9c196894afd0a5df53cb3b0d05ca35947c9d4032d4f3e41a5f12a50657465a249ec030ce1b2d6adc98bce5bf15b1
7
- data.tar.gz: 8c5b2a3b25634f0f7c4d7543a1267a4ce536faa2eee61048c00519ad7d89cf68f165ea8fbe52c68dc5d6f0da121b2b8be0cbd79c90f0c47fee2a699442b83fc9
6
+ metadata.gz: 134ef54d0f7ef058c201420d997e702775bf4520e747a6c40ec76a6593597192a6448a537bad0e1df236214d636b469c6d95faa4894492cb05c23c826b0ce4e9
7
+ data.tar.gz: fa146b9c7ccd8891ed64dc892026313ff6356d87511613e23a270b8a30fa33a664a87d39b586b51b3e94961a3a18dd3640087746bdf965a5a603ff3bfd514b21
data/gemspec_jars.rb CHANGED
@@ -2,7 +2,6 @@
2
2
  # runtime dependencies to generate this gemspec dependencies file to be eval'ed by the gemspec
3
3
  # for the jar-dependencies requirements.
4
4
 
5
- gem.requirements << "jar org.apache.logging.log4j:log4j-1.2-api, 2.6.2"
6
5
  gem.requirements << "jar org.apache.logging.log4j:log4j-api, 2.6.2"
7
6
  gem.requirements << "jar org.apache.logging.log4j:log4j-core, 2.6.2"
8
7
  gem.requirements << "jar com.fasterxml.jackson.core:jackson-core, 2.7.4"
Binary file
@@ -5,4 +5,4 @@
5
5
  # Note to authors: this should not include dashes because 'gem' barfs if
6
6
  # you include a dash in the version string.
7
7
 
8
- LOGSTASH_CORE_VERSION = "5.1.2"
8
+ LOGSTASH_CORE_VERSION = "5.2.0"
@@ -5,7 +5,6 @@ rescue LoadError
5
5
  require 'org/apache/logging/log4j/log4j-core/2.6.2/log4j-core-2.6.2.jar'
6
6
  require 'org/apache/logging/log4j/log4j-api/2.6.2/log4j-api-2.6.2.jar'
7
7
  require 'com/fasterxml/jackson/core/jackson-core/2.7.4/jackson-core-2.7.4.jar'
8
- require 'org/apache/logging/log4j/log4j-1.2-api/2.6.2/log4j-1.2-api-2.6.2.jar'
9
8
  require 'com/fasterxml/jackson/core/jackson-annotations/2.7.0/jackson-annotations-2.7.0.jar'
10
9
  require 'com/fasterxml/jackson/core/jackson-databind/2.7.4/jackson-databind-2.7.4.jar'
11
10
  end
@@ -14,7 +13,6 @@ if defined? Jars
14
13
  require_jar( 'org.apache.logging.log4j', 'log4j-core', '2.6.2' )
15
14
  require_jar( 'org.apache.logging.log4j', 'log4j-api', '2.6.2' )
16
15
  require_jar( 'com.fasterxml.jackson.core', 'jackson-core', '2.7.4' )
17
- require_jar( 'org.apache.logging.log4j', 'log4j-1.2-api', '2.6.2' )
18
16
  require_jar( 'com.fasterxml.jackson.core', 'jackson-annotations', '2.7.0' )
19
17
  require_jar( 'com.fasterxml.jackson.core', 'jackson-databind', '2.7.4' )
20
18
  end
@@ -52,7 +52,8 @@ class LogStash::Agent
52
52
  # Create the collectors and configured it with the library
53
53
  configure_metrics_collectors
54
54
 
55
- @reload_metric = metric.namespace([:stats, :pipelines])
55
+ @pipeline_reload_metric = metric.namespace([:stats, :pipelines])
56
+ @instance_reload_metric = metric.namespace([:stats, :reloads])
56
57
 
57
58
  @dispatcher = LogStash::EventDispatcher.new(self)
58
59
  LogStash::PLUGIN_REGISTRY.hooks.register_emitter(self.class, dispatcher)
@@ -109,7 +110,8 @@ class LogStash::Agent
109
110
  begin
110
111
  reload_pipeline!(pipeline_id)
111
112
  rescue => e
112
- @reload_metric.namespace([pipeline_id.to_sym, :reloads]).tap do |n|
113
+ @instance_reload_metric.increment(:failures)
114
+ @pipeline_reload_metric.namespace([pipeline_id.to_sym, :reloads]).tap do |n|
113
115
  n.increment(:failures)
114
116
  n.gauge(:last_error, { :message => e.message, :backtrace => e.backtrace})
115
117
  n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
@@ -128,7 +130,6 @@ class LogStash::Agent
128
130
  end
129
131
 
130
132
  def stop_collecting_metrics
131
- @collector.stop
132
133
  @periodic_pollers.stop
133
134
  end
134
135
 
@@ -176,6 +177,12 @@ class LogStash::Agent
176
177
  @id_path ||= ::File.join(settings.get("path.data"), "uuid")
177
178
  end
178
179
 
180
+ def running_pipelines
181
+ @upgrade_mutex.synchronize do
182
+ @pipelines.select {|pipeline_id, _| running_pipeline?(pipeline_id) }
183
+ end
184
+ end
185
+
179
186
  def running_pipelines?
180
187
  @upgrade_mutex.synchronize do
181
188
  @pipelines.select {|pipeline_id, _| running_pipeline?(pipeline_id) }.any?
@@ -207,7 +214,9 @@ class LogStash::Agent
207
214
  end
208
215
 
209
216
 
210
- @periodic_pollers = LogStash::Instrument::PeriodicPollers.new(@metric)
217
+ @periodic_pollers = LogStash::Instrument::PeriodicPollers.new(@metric,
218
+ settings.get("queue.type"),
219
+ self)
211
220
  @periodic_pollers.start
212
221
  end
213
222
 
@@ -236,7 +245,8 @@ class LogStash::Agent
236
245
  begin
237
246
  LogStash::Pipeline.new(config, settings, metric)
238
247
  rescue => e
239
- @reload_metric.namespace([settings.get("pipeline.id").to_sym, :reloads]).tap do |n|
248
+ @instance_reload_metric.increment(:failures)
249
+ @pipeline_reload_metric.namespace([settings.get("pipeline.id").to_sym, :reloads]).tap do |n|
240
250
  n.increment(:failures)
241
251
  n.gauge(:last_error, { :message => e.message, :backtrace => e.backtrace})
242
252
  n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
@@ -291,7 +301,8 @@ class LogStash::Agent
291
301
  begin
292
302
  pipeline.run
293
303
  rescue => e
294
- @reload_metric.namespace([id.to_sym, :reloads]).tap do |n|
304
+ @instance_reload_metric.increment(:failures)
305
+ @pipeline_reload_metric.namespace([id.to_sym, :reloads]).tap do |n|
295
306
  n.increment(:failures)
296
307
  n.gauge(:last_error, { :message => e.message, :backtrace => e.backtrace})
297
308
  n.gauge(:last_failure_timestamp, LogStash::Timestamp.now)
@@ -302,7 +313,7 @@ class LogStash::Agent
302
313
  while true do
303
314
  if !t.alive?
304
315
  return false
305
- elsif pipeline.ready?
316
+ elsif pipeline.running?
306
317
  return true
307
318
  else
308
319
  sleep 0.01
@@ -319,8 +330,11 @@ class LogStash::Agent
319
330
  end
320
331
 
321
332
  def start_pipelines
322
- @pipelines.each do |id, _|
333
+ @instance_reload_metric.increment(:successes, 0)
334
+ @instance_reload_metric.increment(:failures, 0)
335
+ @pipelines.each do |id, pipeline|
323
336
  start_pipeline(id)
337
+ pipeline.collect_stats
324
338
  # no reloads yet, initalize all the reload metrics
325
339
  init_pipeline_reload_metrics(id)
326
340
  end
@@ -340,10 +354,12 @@ class LogStash::Agent
340
354
  reset_pipeline_metrics(pipeline_id)
341
355
  @pipelines[pipeline_id] = new_pipeline
342
356
  if start_pipeline(pipeline_id) # pipeline started successfuly
343
- @reload_metric.namespace([pipeline_id.to_sym, :reloads]).tap do |n|
357
+ @instance_reload_metric.increment(:successes)
358
+ @pipeline_reload_metric.namespace([pipeline_id.to_sym, :reloads]).tap do |n|
344
359
  n.increment(:successes)
345
360
  n.gauge(:last_success_timestamp, LogStash::Timestamp.now)
346
361
  end
362
+
347
363
  end
348
364
  end
349
365
 
@@ -356,7 +372,7 @@ class LogStash::Agent
356
372
  end
357
373
 
358
374
  def init_pipeline_reload_metrics(id)
359
- @reload_metric.namespace([id.to_sym, :reloads]).tap do |n|
375
+ @pipeline_reload_metric.namespace([id.to_sym, :reloads]).tap do |n|
360
376
  n.increment(:successes, 0)
361
377
  n.increment(:failures, 0)
362
378
  n.gauge(:last_error, nil)
@@ -20,7 +20,9 @@ module LogStash
20
20
  end
21
21
 
22
22
  def http_address
23
- service.agent.webserver.address
23
+ @http_address ||= service.get_shallow(:http_address).value
24
+ rescue ::LogStash::Instrument::MetricStore::MetricNotFound, NoMethodError => e
25
+ nil
24
26
  end
25
27
  end
26
28
  end
@@ -3,6 +3,9 @@ require "logstash/api/commands/base"
3
3
  require 'logstash/util/thread_dump'
4
4
  require_relative "hot_threads_reporter"
5
5
 
6
+ java_import java.nio.file.Files
7
+ java_import java.nio.file.Paths
8
+
6
9
  module LogStash
7
10
  module Api
8
11
  module Commands
@@ -16,10 +19,14 @@ module LogStash
16
19
  ),
17
20
  :mem => memory,
18
21
  :gc => gc,
19
- :uptime_in_millis => service.get_shallow(:jvm, :uptime_in_millis)
22
+ :uptime_in_millis => service.get_shallow(:jvm, :uptime_in_millis),
20
23
  }
21
24
  end
22
25
 
26
+ def reloads
27
+ service.get_shallow(:stats, :reloads)
28
+ end
29
+
23
30
  def process
24
31
  extract_metrics(
25
32
  [:jvm, :process],
@@ -61,6 +68,14 @@ module LogStash
61
68
  }
62
69
  end
63
70
 
71
+ def os
72
+ service.get_shallow(:os)
73
+ rescue
74
+ # The only currently fetch OS information is about the linux
75
+ # containers.
76
+ {}
77
+ end
78
+
64
79
  def gc
65
80
  service.get_shallow(:jvm, :gc)
66
81
  end
@@ -94,6 +109,7 @@ module LogStash
94
109
  :outputs => plugin_stats(stats, :outputs)
95
110
  },
96
111
  :reloads => stats[:reloads],
112
+ :queue => stats[:queue]
97
113
  }
98
114
  end
99
115
  end # module PluginsStats
@@ -13,11 +13,16 @@ module LogStash
13
13
  :jvm => jvm_payload,
14
14
  :process => process_payload,
15
15
  :pipeline => pipeline_payload,
16
+ :reloads => reloads,
17
+ :os => os_payload
16
18
  }
17
19
  respond_with(payload, {:filter => params["filter"]})
18
20
  end
19
21
 
20
22
  private
23
+ def os_payload
24
+ @stats.os
25
+ end
21
26
 
22
27
  def events_payload
23
28
  @stats.events
@@ -27,6 +32,10 @@ module LogStash
27
32
  @stats.jvm
28
33
  end
29
34
 
35
+ def reloads
36
+ @stats.reloads
37
+ end
38
+
30
39
  def process_payload
31
40
  @stats.process
32
41
  end
@@ -30,8 +30,9 @@ module LogStash
30
30
  :jvm => {
31
31
  :timestamp => stats_command.started_at,
32
32
  :uptime_in_millis => stats_command.uptime,
33
- :memory => stats_command.memory
34
- }
33
+ :memory => stats_command.memory,
34
+ },
35
+ :os => stats_command.os
35
36
  }
36
37
  respond_with(payload, {:filter => params["filter"]})
37
38
  end
@@ -268,6 +268,7 @@ module LogStash::Config::Mixin
268
268
  return is_valid
269
269
  end # def validate
270
270
 
271
+ # TODO: Remove in 6.0
271
272
  def print_version_notice
272
273
  return if @@version_notice_given
273
274
 
@@ -288,14 +289,10 @@ module LogStash::Config::Mixin
288
289
  end
289
290
  end
290
291
  rescue LogStash::PluginNoVersionError
291
- # If we cannot find a version in the currently installed gems we
292
- # will display this message. This could happen in the test, if you
293
- # create an anonymous class to test a plugin.
294
- self.logger.warn(I18n.t("logstash.plugin.no_version",
295
- :type => @plugin_type,
296
- :name => @config_name,
297
- :LOGSTASH_VERSION => LOGSTASH_VERSION))
298
- ensure
292
+ # This can happen because of one of the following:
293
+ # - The plugin is loaded from the plugins.path and contains no gemspec.
294
+ # - The plugin is defined in a universal plugin, so the loaded plugin doesn't correspond to an actual gemspec.
295
+ ensure
299
296
  @@version_notice_given = true
300
297
  end
301
298
  end
@@ -11,12 +11,8 @@ module LogStash module Instrument
11
11
  # The Collector is the single point of reference for all
12
12
  # the metrics collection inside logstash, the metrics library will make
13
13
  # direct calls to this class.
14
- #
15
- # This class is an observable responsable of periodically emitting view of the system
16
- # to other components like the internal metrics pipelines.
17
14
  class Collector
18
15
  include LogStash::Util::Loggable
19
- include Observable
20
16
 
21
17
  SNAPSHOT_ROTATION_TIME_SECS = 1 # seconds
22
18
  SNAPSHOT_ROTATION_TIMEOUT_INTERVAL_SECS = 10 * 60 # seconds
@@ -26,7 +22,6 @@ module LogStash module Instrument
26
22
  def initialize
27
23
  @metric_store = MetricStore.new
28
24
  @agent = nil
29
- start_periodic_snapshotting
30
25
  end
31
26
 
32
27
  # The metric library will call this unique interface
@@ -43,8 +38,6 @@ module LogStash module Instrument
43
38
  end
44
39
 
45
40
  metric.execute(*metric_type_params)
46
-
47
- changed # we had changes coming in so we can notify the observers
48
41
  rescue MetricStore::NamespacesExpectedError => e
49
42
  logger.error("Collector: Cannot record metric", :exception => e)
50
43
  rescue NameError => e
@@ -58,51 +51,13 @@ module LogStash module Instrument
58
51
  end
59
52
  end
60
53
 
61
- # Monitor the `Concurrent::TimerTask` this update is triggered on every successful or not
62
- # run of the task, TimerTask implement Observable and the collector acts as
63
- # the observer and will keep track if something went wrong in the execution.
64
- #
65
- # @param [Time] Time of execution
66
- # @param [result] Result of the execution
67
- # @param [Exception] Exception
68
- def update(time_of_execution, result, exception)
69
- return true if exception.nil?
70
- logger.error("Collector: Something went wrong went sending data to the observers",
71
- :execution_time => time_of_execution,
72
- :result => result,
73
- :exception => exception.class.name)
74
- end
75
-
76
54
  # Snapshot the current Metric Store and return it immediately,
77
55
  # This is useful if you want to get access to the current metric store without
78
56
  # waiting for a periodic call.
79
57
  #
80
58
  # @return [LogStash::Instrument::MetricStore]
81
59
  def snapshot_metric
82
- Snapshot.new(@metric_store)
83
- end
84
-
85
- # Configure and start the periodic task for snapshotting the `MetricStore`
86
- def start_periodic_snapshotting
87
- @snapshot_task = Concurrent::TimerTask.new { publish_snapshot }
88
- @snapshot_task.execution_interval = SNAPSHOT_ROTATION_TIME_SECS
89
- @snapshot_task.timeout_interval = SNAPSHOT_ROTATION_TIMEOUT_INTERVAL_SECS
90
- @snapshot_task.add_observer(self)
91
- @snapshot_task.execute
92
- end
93
-
94
- def stop
95
- @snapshot_task.shutdown
96
- end
97
-
98
- # Create a snapshot of the MetricStore and send it to to the registered observers
99
- # The observer will receive the following signature in the update methode.
100
- #
101
- # `#update(created_at, metric_store)`
102
- def publish_snapshot
103
- created_at = Time.now
104
- logger.debug("Collector: Sending snapshot to observers", :created_at => created_at) if logger.debug?
105
- notify_observers(snapshot_metric)
60
+ Snapshot.new(@metric_store.dup)
106
61
  end
107
62
 
108
63
  def clear(keypath)
@@ -12,6 +12,8 @@ module LogStash module Instrument module PeriodicPoller
12
12
  :polling_timeout => 120
13
13
  }
14
14
 
15
+ attr_reader :metric
16
+
15
17
  public
16
18
  def initialize(metric, options = {})
17
19
  @metric = metric
@@ -0,0 +1,137 @@
1
+ # encoding: utf-8
2
+ require "pathname"
3
+ require "logstash/util/loggable"
4
+
5
+ # Logic from elasticsearch/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java
6
+ # Move to ruby to remove any existing dependency
7
+ module LogStash module Instrument module PeriodicPoller
8
+ class Cgroup
9
+ include LogStash::Util::Loggable
10
+
11
+ CONTROL_GROUP_RE = Regexp.compile("\\d+:([^:,]+(?:,[^:,]+)?):(/.*)");
12
+ CONTROLLER_SEPERATOR_RE = ","
13
+
14
+ PROC_SELF_CGROUP_FILE = Pathname.new("/proc/self/cgroup")
15
+ PROC_CGROUP_CPU_DIR = Pathname.new("/sys/fs/cgroup/cpu")
16
+ PROC_CGROUP_CPUACCT_DIR = Pathname.new("/sys/fs/cgroup/cpuacct")
17
+
18
+ GROUP_CPUACCT = "cpuacct"
19
+ CPUACCT_USAGE_FILE = "cpuacct.usage"
20
+
21
+ GROUP_CPU = "cpu"
22
+ CPU_FS_PERIOD_US_FILE = "cpu.cfs_period_us"
23
+ CPU_FS_QUOTA_US_FILE = "cpu.cfs_quota_us"
24
+
25
+ CPU_STATS_FILE = "cpu.stat"
26
+
27
+ class << self
28
+ def are_cgroup_available?
29
+ [::File.exist?(PROC_SELF_CGROUP_FILE),
30
+ Dir.exist?(PROC_CGROUP_CPU_DIR),
31
+ Dir.exist?(PROC_CGROUP_CPUACCT_DIR)].all?
32
+ end
33
+
34
+ def control_groups
35
+ response = {}
36
+
37
+ read_proc_self_cgroup_lines.each do |line|
38
+ matches = CONTROL_GROUP_RE.match(line)
39
+ # multiples controlles, same hierachy
40
+ controllers = matches[1].split(CONTROLLER_SEPERATOR_RE)
41
+ controllers.each_with_object(response) { |controller| response[controller] = matches[2] }
42
+ end
43
+
44
+ response
45
+ end
46
+
47
+ def read_first_line(path)
48
+ IO.readlines(path).first
49
+ end
50
+
51
+ def cgroup_cpuacct_usage_nanos(control_group)
52
+ read_first_line(::File.join(PROC_CGROUP_CPUACCT_DIR, control_group, CPUACCT_USAGE_FILE)).to_i
53
+ end
54
+
55
+ def cgroup_cpu_fs_period_micros(control_group)
56
+ read_first_line(::File.join(PROC_CGROUP_CPUACCT_DIR, control_group, CPU_FS_PERIOD_US_FILE)).to_i
57
+ end
58
+
59
+ def cgroup_cpu_fs_quota_micros(control_group)
60
+ read_first_line(::File.join(PROC_CGROUP_CPUACCT_DIR, control_group, CPU_FS_QUOTA_US_FILE)).to_i
61
+ end
62
+
63
+ def read_proc_self_cgroup_lines
64
+ IO.readlines(PROC_SELF_CGROUP_FILE)
65
+ end
66
+
67
+ class CpuStats
68
+ attr_reader :number_of_elapsed_periods, :number_of_times_throttled, :time_throttled_nanos
69
+
70
+ def initialize(number_of_elapsed_periods, number_of_times_throttled, time_throttled_nanos)
71
+ @number_of_elapsed_periods = number_of_elapsed_periods
72
+ @number_of_times_throttled = number_of_times_throttled
73
+ @time_throttled_nanos = time_throttled_nanos
74
+ end
75
+ end
76
+
77
+ def read_sys_fs_cgroup_cpuacct_cpu_stat(control_group)
78
+ IO.readlines(::File.join(PROC_CGROUP_CPU_DIR, control_group, CPU_STATS_FILE))
79
+ end
80
+
81
+ def cgroup_cpuacct_cpu_stat(control_group)
82
+ lines = read_sys_fs_cgroup_cpuacct_cpu_stat(control_group);
83
+
84
+ number_of_elapsed_periods = -1;
85
+ number_of_times_throttled = -1;
86
+ time_throttled_nanos = -1;
87
+
88
+ lines.each do |line|
89
+ fields = line.split(/\s+/)
90
+ case fields.first
91
+ when "nr_periods" then number_of_elapsed_periods = fields[1].to_i
92
+ when "nr_throttled" then number_of_times_throttled= fields[1].to_i
93
+ when "throttled_time" then time_throttled_nanos = fields[1].to_i
94
+ end
95
+ end
96
+
97
+ CpuStats.new(number_of_elapsed_periods, number_of_times_throttled, time_throttled_nanos)
98
+ end
99
+
100
+ def get_all
101
+ groups = control_groups
102
+ return if groups.empty?
103
+
104
+ cgroups_stats = {
105
+ :cpuacct => {},
106
+ :cpu => {}
107
+ }
108
+
109
+ cpuacct_group = groups[GROUP_CPUACCT]
110
+ cgroups_stats[:cpuacct][:control_group] = cpuacct_group
111
+ cgroups_stats[:cpuacct][:usage_nanos] = cgroup_cpuacct_usage_nanos(cpuacct_group)
112
+
113
+ cpu_group = groups[GROUP_CPU]
114
+ cgroups_stats[:cpu][:control_group] = cpu_group
115
+ cgroups_stats[:cpu][:cfs_period_micros] = cgroup_cpu_fs_period_micros(cpu_group)
116
+ cgroups_stats[:cpu][:cfs_quota_micros] = cgroup_cpu_fs_quota_micros(cpu_group)
117
+
118
+ cpu_stats = cgroup_cpuacct_cpu_stat(cpu_group)
119
+
120
+ cgroups_stats[:cpu][:stat] = {
121
+ :number_of_elapsed_periods => cpu_stats.number_of_elapsed_periods,
122
+ :number_of_times_throttled => cpu_stats.number_of_times_throttled,
123
+ :time_throttled_nanos => cpu_stats.time_throttled_nanos
124
+ }
125
+
126
+ cgroups_stats
127
+ rescue => e
128
+ logger.debug("Error, cannot retrieve cgroups information", :exception => e.class.name, :message => e.message) if logger.debug?
129
+ nil
130
+ end
131
+
132
+ def get
133
+ are_cgroup_available? ? get_all : nil
134
+ end
135
+ end
136
+ end
137
+ end end end