logstash-core 5.1.2-java → 5.2.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/gemspec_jars.rb +0 -1
  3. data/lib/logstash-core/logstash-core.jar +0 -0
  4. data/lib/logstash-core/version.rb +1 -1
  5. data/lib/logstash-core_jars.rb +0 -2
  6. data/lib/logstash/agent.rb +26 -10
  7. data/lib/logstash/api/commands/default_metadata.rb +3 -1
  8. data/lib/logstash/api/commands/stats.rb +17 -1
  9. data/lib/logstash/api/modules/node_stats.rb +9 -0
  10. data/lib/logstash/api/modules/stats.rb +3 -2
  11. data/lib/logstash/config/mixin.rb +5 -8
  12. data/lib/logstash/instrument/collector.rb +1 -46
  13. data/lib/logstash/instrument/periodic_poller/base.rb +2 -0
  14. data/lib/logstash/instrument/periodic_poller/cgroup.rb +137 -0
  15. data/lib/logstash/instrument/periodic_poller/jvm.rb +1 -2
  16. data/lib/logstash/instrument/periodic_poller/os.rb +21 -0
  17. data/lib/logstash/instrument/periodic_poller/pq.rb +20 -0
  18. data/lib/logstash/instrument/periodic_pollers.rb +4 -2
  19. data/lib/logstash/output_delegator.rb +2 -0
  20. data/lib/logstash/pipeline.rb +31 -2
  21. data/lib/logstash/runner.rb +6 -1
  22. data/lib/logstash/util/wrapped_acked_queue.rb +11 -0
  23. data/lib/logstash/util/wrapped_synchronous_queue.rb +9 -0
  24. data/lib/logstash/version.rb +1 -1
  25. data/lib/logstash/webserver.rb +9 -1
  26. data/locales/en.yml +0 -3
  27. data/spec/api/lib/api/node_stats_spec.rb +5 -1
  28. data/spec/api/spec_helper.rb +3 -1
  29. data/spec/logstash/agent_spec.rb +2 -0
  30. data/spec/logstash/instrument/collector_spec.rb +4 -0
  31. data/spec/logstash/instrument/periodic_poller/cgroup_spec.rb +148 -0
  32. data/spec/logstash/instrument/periodic_poller/os_spec.rb +85 -0
  33. data/spec/logstash/output_delegator_spec.rb +12 -4
  34. data/spec/logstash/pipeline_reporter_spec.rb +2 -26
  35. data/spec/logstash/pipeline_spec.rb +102 -40
  36. data/spec/logstash/plugin_spec.rb +2 -6
  37. data/spec/logstash/util/wrapped_synchronous_queue_spec.rb +34 -4
  38. data/spec/support/mocks_classes.rb +2 -2
  39. metadata +12 -7
@@ -39,7 +39,6 @@ module LogStash module Instrument module PeriodicPoller
39
39
 
40
40
  def initialize(metric, options = {})
41
41
  super(metric, options)
42
- @metric = metric
43
42
  @load_average = LoadAverage.create
44
43
  end
45
44
 
@@ -114,7 +113,7 @@ module LogStash module Instrument module PeriodicPoller
114
113
 
115
114
  metric.gauge([:jvm, :process, :cpu], :load_average, load_average) unless load_average.nil?
116
115
  end
117
-
116
+
118
117
  def collect_jvm_metrics(data)
119
118
  runtime_mx_bean = ManagementFactory.getRuntimeMXBean()
120
119
  metric.gauge([:jvm], :uptime_in_millis, runtime_mx_bean.getUptime())
@@ -1,5 +1,6 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/instrument/periodic_poller/base"
3
+ require "logstash/instrument/periodic_poller/cgroup"
3
4
 
4
5
  module LogStash module Instrument module PeriodicPoller
5
6
  class Os < Base
@@ -8,6 +9,26 @@ module LogStash module Instrument module PeriodicPoller
8
9
  end
9
10
 
10
11
  def collect
12
+ collect_cgroup
13
+ end
14
+
15
+ def collect_cgroup
16
+ if stats = Cgroup.get
17
+ save_metric([:os], :cgroup, stats)
18
+ end
19
+ end
20
+
21
+ # Recursive function to create the Cgroups values form the created hash
22
+ def save_metric(namespace, k, v)
23
+ if v.is_a?(Hash)
24
+ v.each do |new_key, new_value|
25
+ n = namespace.dup
26
+ n << k.to_sym
27
+ save_metric(n, new_key, new_value)
28
+ end
29
+ else
30
+ metric.gauge(namespace, k.to_sym, v)
31
+ end
11
32
  end
12
33
  end
13
34
  end; end; end
@@ -0,0 +1,20 @@
1
+ # encoding: utf-8
2
+ require "logstash/instrument/periodic_poller/base"
3
+
4
+ module LogStash module Instrument module PeriodicPoller
5
+ class PersistentQueue < Base
6
+ def initialize(metric, queue_type, agent, options = {})
7
+ super(metric, options)
8
+ @metric = metric
9
+ @queue_type = queue_type
10
+ @agent = agent
11
+ end
12
+
13
+ def collect
14
+ pipeline_id, pipeline = @agent.running_pipelines.first
15
+ unless pipeline.nil?
16
+ pipeline.collect_stats
17
+ end
18
+ end
19
+ end
20
+ end; end; end
@@ -1,6 +1,7 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/instrument/periodic_poller/os"
3
3
  require "logstash/instrument/periodic_poller/jvm"
4
+ require "logstash/instrument/periodic_poller/pq"
4
5
 
5
6
  module LogStash module Instrument
6
7
  # Each PeriodPoller manager his own thread to do the poller
@@ -9,10 +10,11 @@ module LogStash module Instrument
9
10
  class PeriodicPollers
10
11
  attr_reader :metric
11
12
 
12
- def initialize(metric)
13
+ def initialize(metric, queue_type, pipelines)
13
14
  @metric = metric
14
15
  @periodic_pollers = [PeriodicPoller::Os.new(metric),
15
- PeriodicPoller::JVM.new(metric)]
16
+ PeriodicPoller::JVM.new(metric),
17
+ PeriodicPoller::PersistentQueue.new(metric, queue_type, pipelines)]
16
18
  end
17
19
 
18
20
  def start
@@ -39,7 +39,9 @@ module LogStash class OutputDelegator
39
39
 
40
40
  def multi_receive(events)
41
41
  @metric_events.increment(:in, events.length)
42
+ clock = @metric_events.time(:duration_in_millis)
42
43
  @strategy.multi_receive(events)
44
+ clock.stop
43
45
  @metric_events.increment(:out, events.length)
44
46
  end
45
47
 
@@ -40,7 +40,8 @@ module LogStash; class Pipeline
40
40
  :settings,
41
41
  :metric,
42
42
  :filter_queue_client,
43
- :input_queue_client
43
+ :input_queue_client,
44
+ :queue
44
45
 
45
46
  MAX_INFLIGHT_WARN_THRESHOLD = 10_000
46
47
 
@@ -69,7 +70,11 @@ module LogStash; class Pipeline
69
70
 
70
71
  # This needs to be configured before we evaluate the code to make
71
72
  # sure the metric instance is correctly send to the plugins to make the namespace scoping work
72
- @metric = namespaced_metric.nil? ? Instrument::NullMetric.new : namespaced_metric
73
+ @metric = if namespaced_metric
74
+ settings.get("metric.collect") ? namespaced_metric : Instrument::NullMetric.new(namespaced_metric.collector)
75
+ else
76
+ Instrument::NullMetric.new
77
+ end
73
78
 
74
79
  grammar = LogStashConfigParser.new
75
80
  @config = grammar.parse(config_str)
@@ -549,6 +554,30 @@ module LogStash; class Pipeline
549
554
  end
550
555
  end
551
556
 
557
+ def collect_stats
558
+ pipeline_metric = @metric.namespace([:stats, :pipelines, pipeline_id.to_s.to_sym, :queue])
559
+ pipeline_metric.gauge(:type, settings.get("queue.type"))
560
+
561
+ if @queue.is_a?(LogStash::Util::WrappedAckedQueue) && @queue.queue.is_a?(LogStash::AckedQueue)
562
+ queue = @queue.queue
563
+ dir_path = queue.dir_path
564
+ file_store = Files.get_file_store(Paths.get(dir_path))
565
+
566
+ pipeline_metric.namespace([:capacity]).tap do |n|
567
+ n.gauge(:page_capacity_in_bytes, queue.page_capacity)
568
+ n.gauge(:max_queue_size_in_bytes, queue.max_size_in_bytes)
569
+ n.gauge(:max_unread_events, queue.max_unread_events)
570
+ end
571
+ pipeline_metric.namespace([:data]).tap do |n|
572
+ n.gauge(:free_space_in_bytes, file_store.get_unallocated_space)
573
+ n.gauge(:storage_type, file_store.type)
574
+ n.gauge(:path, dir_path)
575
+ end
576
+
577
+ pipeline_metric.gauge(:events, queue.unread_count)
578
+ end
579
+ end
580
+
552
581
  # Sometimes we log stuff that will dump the pipeline which may contain
553
582
  # sensitive information (like the raw syntax tree which can contain passwords)
554
583
  # We want to hide most of what's in here
@@ -170,7 +170,12 @@ class LogStash::Runner < Clamp::StrictCommand
170
170
  rescue => e
171
171
  # abort unless we're just looking for the help
172
172
  unless cli_help?(args)
173
- $stderr.puts "ERROR: Failed to load settings file from \"path.settings\". Aborting... path.setting=#{LogStash::SETTINGS.get("path.settings")}, exception=#{e.class}, message=>#{e.message}"
173
+ if e.kind_of?(Psych::Exception)
174
+ yaml_file_path = ::File.join(LogStash::SETTINGS.get("path.settings"), "logstash.yml")
175
+ $stderr.puts "ERROR: Failed to parse YAML file \"#{yaml_file_path}\". Please confirm if the YAML structure is valid (e.g. look for incorrect usage of whitespace or indentation). Aborting... parser_error=>#{e.message}"
176
+ else
177
+ $stderr.puts "ERROR: Failed to load settings file from \"path.settings\". Aborting... path.setting=#{LogStash::SETTINGS.get("path.settings")}, exception=#{e.class}, message=>#{e.message}"
178
+ end
174
179
  return 1
175
180
  end
176
181
  end
@@ -33,6 +33,8 @@ module LogStash; module Util
33
33
 
34
34
  private_class_method :new
35
35
 
36
+ attr_reader :queue
37
+
36
38
  def with_queue(queue)
37
39
  @queue = queue
38
40
  @queue.open
@@ -130,10 +132,19 @@ module LogStash; module Util
130
132
 
131
133
  def set_events_metric(metric)
132
134
  @event_metric = metric
135
+ define_initial_metrics_values(@event_metric)
133
136
  end
134
137
 
135
138
  def set_pipeline_metric(metric)
136
139
  @pipeline_metric = metric
140
+ define_initial_metrics_values(@pipeline_metric)
141
+ end
142
+
143
+ def define_initial_metrics_values(namespaced_metric)
144
+ namespaced_metric.report_time(:duration_in_millis, 0)
145
+ namespaced_metric.increment(:filtered, 0)
146
+ namespaced_metric.increment(:in, 0)
147
+ namespaced_metric.increment(:out, 0)
137
148
  end
138
149
 
139
150
  def inflight_batches
@@ -79,10 +79,19 @@ module LogStash; module Util
79
79
 
80
80
  def set_events_metric(metric)
81
81
  @event_metric = metric
82
+ define_initial_metrics_values(@event_metric)
82
83
  end
83
84
 
84
85
  def set_pipeline_metric(metric)
85
86
  @pipeline_metric = metric
87
+ define_initial_metrics_values(@pipeline_metric)
88
+ end
89
+
90
+ def define_initial_metrics_values(namespaced_metric)
91
+ namespaced_metric.report_time(:duration_in_millis, 0)
92
+ namespaced_metric.increment(:filtered, 0)
93
+ namespaced_metric.increment(:in, 0)
94
+ namespaced_metric.increment(:out, 0)
86
95
  end
87
96
 
88
97
  def inflight_batches
@@ -11,4 +11,4 @@
11
11
  # eventually this file should be in the root logstash lib fir and dependencies in logstash-core should be
12
12
  # fixed.
13
13
 
14
- LOGSTASH_VERSION = "5.1.2"
14
+ LOGSTASH_VERSION = "5.2.0"
@@ -87,9 +87,17 @@ module LogStash
87
87
  @server = ::Puma::Server.new(app, events)
88
88
  @server.add_tcp_listener(http_host, port)
89
89
 
90
- logger.info("Successfully started Logstash API endpoint", :port => @port)
90
+ logger.info("Successfully started Logstash API endpoint", :port => port)
91
+
92
+ set_http_address_metric("#{http_host}:#{port}")
91
93
 
92
94
  @server.run.join
93
95
  end
96
+
97
+ private
98
+ def set_http_address_metric(value)
99
+ return unless @agent.metric
100
+ @agent.metric.gauge([], :http_address, value)
101
+ end
94
102
  end
95
103
  end
data/locales/en.yml CHANGED
@@ -44,9 +44,6 @@ en:
44
44
  %{plugin} plugin is using the 'milestone' method to declare the version
45
45
  of the plugin this method is deprecated in favor of declaring the
46
46
  version inside the gemspec.
47
- no_version: >-
48
- %{name} plugin doesn't have a version. This plugin isn't well
49
- supported by the community and likely has no maintainer.
50
47
  version:
51
48
  0-9-x:
52
49
  Using version 0.9.x %{type} plugin '%{name}'. This plugin should work but
@@ -80,7 +80,11 @@ describe LogStash::Api::Modules::NodeStats do
80
80
  "filtered" => Numeric,
81
81
  "out" => Numeric
82
82
  }
83
- }
83
+ },
84
+ "reloads" => {
85
+ "successes" => Numeric,
86
+ "failures" => Numeric
87
+ }
84
88
  }
85
89
 
86
90
  test_api_and_resources(root_structure)
@@ -20,7 +20,9 @@ end
20
20
  module LogStash
21
21
  class DummyAgent < Agent
22
22
  def start_webserver
23
- @webserver = Struct.new(:address).new("#{Socket.gethostname}:#{::LogStash::WebServer::DEFAULT_PORTS.first}")
23
+ http_address = "#{Socket.gethostname}:#{::LogStash::WebServer::DEFAULT_PORTS.first}"
24
+ @webserver = Struct.new(:address).new(http_address)
25
+ self.metric.gauge([], :http_address, http_address)
24
26
  end
25
27
  def stop_webserver; end
26
28
  end
@@ -479,7 +479,9 @@ describe LogStash::Agent do
479
479
  it "increases the successful reload count" do
480
480
  snapshot = subject.metric.collector.snapshot_metric
481
481
  value = snapshot.metric_store.get_with_path("/stats/pipelines")[:stats][:pipelines][:main][:reloads][:successes].value
482
+ instance_value = snapshot.metric_store.get_with_path("/stats")[:stats][:reloads][:successes].value
482
483
  expect(value).to eq(1)
484
+ expect(instance_value).to eq(1)
483
485
  end
484
486
 
485
487
  it "does not set the failure reload timestamp" do
@@ -45,5 +45,9 @@ describe LogStash::Instrument::Collector do
45
45
  it "return a `LogStash::Instrument::MetricStore`" do
46
46
  expect(subject.snapshot_metric).to be_kind_of(LogStash::Instrument::Snapshot)
47
47
  end
48
+
49
+ it "returns a clone of the metric store" do
50
+ expect(subject.snapshot_metric).not_to eq(subject.snapshot_metric)
51
+ end
48
52
  end
49
53
  end
@@ -0,0 +1,148 @@
1
+ # encoding: utf-8
2
+ require "logstash/instrument/periodic_poller/cgroup"
3
+ require "spec_helper"
4
+
5
+ describe LogStash::Instrument::PeriodicPoller::Cgroup do
6
+ subject { described_class }
7
+
8
+ context ".are_cgroup_available?" do
9
+ context "all the file exist" do
10
+ before do
11
+ allow(::File).to receive(:exist?).with(subject::PROC_SELF_CGROUP_FILE).and_return(true)
12
+ allow(::Dir).to receive(:exist?).with(subject::PROC_CGROUP_CPU_DIR).and_return(true)
13
+ allow(::Dir).to receive(:exist?).with(subject::PROC_CGROUP_CPUACCT_DIR).and_return(true)
14
+ end
15
+
16
+ it "returns true" do
17
+ expect(subject.are_cgroup_available?).to be_truthy
18
+ end
19
+ end
20
+
21
+ context "not all the file exist" do
22
+ before do
23
+ allow(::File).to receive(:exist?).with(subject::PROC_SELF_CGROUP_FILE).and_return(true)
24
+ allow(::Dir).to receive(:exist?).with(subject::PROC_CGROUP_CPU_DIR).and_return(false)
25
+ allow(::Dir).to receive(:exist?).with(subject::PROC_CGROUP_CPUACCT_DIR).and_return(true)
26
+ end
27
+
28
+ it "returns false" do
29
+ expect(subject.are_cgroup_available?).to be_falsey
30
+ end
31
+ end
32
+ end
33
+
34
+ context ".control_groups" do
35
+ let(:proc_self_cgroup_content) {
36
+ %w(14:name=systemd,holaunlimited:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
37
+ 13:pids:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
38
+ 12:hugetlb:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
39
+ 11:net_prio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
40
+ 10:perf_event:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
41
+ 9:net_cls:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
42
+ 8:freezer:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
43
+ 7:devices:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
44
+ 6:memory:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
45
+ 5:blkio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
46
+ 4:cpuacct:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
47
+ 3:cpu:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
48
+ 2:cpuset:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
49
+ 1:name=openrc:/docker) }
50
+
51
+ before do
52
+ allow(subject).to receive(:read_proc_self_cgroup_lines).and_return(proc_self_cgroup_content)
53
+ end
54
+
55
+ it "returns the control groups" do
56
+ expect(subject.control_groups).to match({
57
+ "name=systemd" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
58
+ "holaunlimited" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
59
+ "pids" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
60
+ "hugetlb" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
61
+ "net_prio" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
62
+ "perf_event" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
63
+ "net_cls" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
64
+ "freezer" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
65
+ "devices" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
66
+ "memory" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
67
+ "blkio" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
68
+ "cpuacct" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
69
+ "cpu" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
70
+ "cpuset" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
71
+ "name=openrc" => "/docker"
72
+ })
73
+ end
74
+ end
75
+
76
+ context ".get_all" do
77
+ context "when we can retreive the stats" do
78
+ let(:cpuacct_control_group) { "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61" }
79
+ let(:cpuacct_usage) { 1982 }
80
+ let(:cpu_control_group) { "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61" }
81
+ let(:cfs_period_micros) { 500 }
82
+ let(:cfs_quota_micros) { 98 }
83
+ let(:cpu_stats_number_of_periods) { 1 }
84
+ let(:cpu_stats_number_of_time_throttled) { 2 }
85
+ let(:cpu_stats_time_throttled_nanos) { 3 }
86
+ let(:proc_self_cgroup_content) {
87
+ %W(14:name=systemd,holaunlimited:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
88
+ 13:pids:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
89
+ 12:hugetlb:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
90
+ 11:net_prio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
91
+ 10:perf_event:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
92
+ 9:net_cls:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
93
+ 8:freezer:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
94
+ 7:devices:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
95
+ 6:memory:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
96
+ 5:blkio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
97
+ 4:cpuacct:#{cpuacct_control_group}
98
+ 3:cpu:#{cpu_control_group}
99
+ 2:cpuset:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
100
+ 1:name=openrc:/docker) }
101
+ let(:cpu_stat_file_content) {
102
+ [
103
+ "nr_periods #{cpu_stats_number_of_periods}",
104
+ "nr_throttled #{cpu_stats_number_of_time_throttled}",
105
+ "throttled_time #{cpu_stats_time_throttled_nanos}"
106
+ ]
107
+ }
108
+
109
+ before do
110
+ allow(subject).to receive(:read_proc_self_cgroup_lines).and_return(proc_self_cgroup_content)
111
+ allow(subject).to receive(:read_sys_fs_cgroup_cpuacct_cpu_stat).and_return(cpu_stat_file_content)
112
+
113
+ allow(subject).to receive(:cgroup_cpuacct_usage_nanos).with(cpuacct_control_group).and_return(cpuacct_usage)
114
+ allow(subject).to receive(:cgroup_cpu_fs_period_micros).with(cpu_control_group).and_return(cfs_period_micros)
115
+ allow(subject).to receive(:cgroup_cpu_fs_quota_micros).with(cpu_control_group).and_return(cfs_quota_micros)
116
+ end
117
+
118
+ it "returns all the stats" do
119
+ expect(subject.get_all).to match(
120
+ :cpuacct => {
121
+ :control_group => cpuacct_control_group,
122
+ :usage_nanos => cpuacct_usage,
123
+ },
124
+ :cpu => {
125
+ :control_group => cpu_control_group,
126
+ :cfs_period_micros => cfs_period_micros,
127
+ :cfs_quota_micros => cfs_quota_micros,
128
+ :stat => {
129
+ :number_of_elapsed_periods => cpu_stats_number_of_periods,
130
+ :number_of_times_throttled => cpu_stats_number_of_time_throttled,
131
+ :time_throttled_nanos => cpu_stats_time_throttled_nanos
132
+ }
133
+ }
134
+ )
135
+ end
136
+ end
137
+
138
+ context "when an exception is raised" do
139
+ before do
140
+ allow(subject).to receive(:control_groups).and_raise("Something went wrong")
141
+ end
142
+
143
+ it "returns nil" do
144
+ expect(subject.get_all).to be_nil
145
+ end
146
+ end
147
+ end
148
+ end