logstash-core 6.4.1-java → 6.4.2-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 8145b2303fc0c15e198142a1201d06e4462fed9cf1d32b0048c931007e139fe3
4
- data.tar.gz: 60f764737970347eea41598ff33a2829af597f4a6b2b5efe2597d9b60272a95d
3
+ metadata.gz: a950c2ba81f4f59944d4c45811741de0b4b1fff78fd9b8e75e30c8070b1f1e5e
4
+ data.tar.gz: 662ea370b3242107f58d72c3596d55619a96b067495c7e2f3637d307a58c6831
5
5
  SHA512:
6
- metadata.gz: 430c9647d501ce1f1afa84b86f0cb81a8159627a026427c7eb9783f3474cd26a1cab8dcc52cd6da048c42f8e16b63cbdc24cff7dbd98dd94fc5898fa32952add
7
- data.tar.gz: f67eb745c785e6d8fe2ae666602c754222100ada5e4408f39bd03af4665a1b64f6a1e5cc178919d673289fe4c2fa366cc76aebeb6da711c617083822d1324eb0
6
+ metadata.gz: d05a2b20dc5119d616711c52d82c7db3f2cbe5f19134b3345d87b191d889d424f2e098f0873dbf0e7853791041e6b4ab7bd142ebbcfa3b9738725692061cdf40
7
+ data.tar.gz: 27c4c12c68a038b4e0e1db38b003f8def9d746c05c0fb228f63e33e1da6f25506a1933e65c7df4b6dd6d9660448032b7ff937715ad2ea756836395af07acb265
@@ -1,135 +1,218 @@
1
1
  # encoding: utf-8
2
- require "pathname"
3
2
 
4
3
  # Logic from elasticsearch/core/src/main/java/org/elasticsearch/monitor/os/OsProbe.java
5
4
  # Move to ruby to remove any existing dependency
6
5
  module LogStash module Instrument module PeriodicPoller
7
6
  class Cgroup
8
7
  include LogStash::Util::Loggable
8
+ class Override
9
+ attr_reader :key, :value
10
+ def initialize(key)
11
+ @key = key
12
+ @value = java.lang.System.getProperty(@key)
13
+ end
9
14
 
10
- CONTROL_GROUP_RE = Regexp.compile("\\d+:([^:,]+(?:,[^:,]+)?):(/.*)");
11
- CONTROLLER_SEPARATOR_RE = ","
12
-
13
- PROC_SELF_CGROUP_FILE = Pathname.new("/proc/self/cgroup")
14
- PROC_CGROUP_CPU_DIR = Pathname.new("/sys/fs/cgroup/cpu")
15
- PROC_CGROUP_CPUACCT_DIR = Pathname.new("/sys/fs/cgroup/cpuacct")
16
-
17
- GROUP_CPUACCT = "cpuacct"
18
- CPUACCT_USAGE_FILE = "cpuacct.usage"
19
-
20
- GROUP_CPU = "cpu"
21
- CPU_FS_PERIOD_US_FILE = "cpu.cfs_period_us"
22
- CPU_FS_QUOTA_US_FILE = "cpu.cfs_quota_us"
15
+ def nil?
16
+ value.nil?
17
+ end
23
18
 
24
- CPU_STATS_FILE = "cpu.stat"
19
+ def override(other)
20
+ nil? ? other : value
21
+ end
22
+ end
25
23
 
26
- class << self
27
- def are_cgroup_available?
28
- [::File.exist?(PROC_SELF_CGROUP_FILE),
29
- Dir.exist?(PROC_CGROUP_CPU_DIR),
30
- Dir.exist?(PROC_CGROUP_CPUACCT_DIR)].all?
24
+ ## `/proc/self/cgroup` contents look like this
25
+ # 5:cpu,cpuacct:/
26
+ # 4:cpuset:/
27
+ # 2:net_cls,net_prio:/
28
+ # 0::/user.slice/user-1000.slice/session-932.scope
29
+ ## e.g. N:controller:/path-to-info
30
+ # we find the controller and path
31
+ # we skip the line without a controller e.g. 0::/path
32
+ # we assume there are these symlinks:
33
+ # `/sys/fs/cgroup/cpu` -> `/sys/fs/cgroup/cpu,cpuacct
34
+ # `/sys/fs/cgroup/cpuacct` -> `/sys/fs/cgroup/cpu,cpuacct
35
+
36
+ CGROUP_FILE = "/proc/self/cgroup"
37
+ CPUACCT_DIR = "/sys/fs/cgroup/cpuacct"
38
+ CPU_DIR = "/sys/fs/cgroup/cpu"
39
+ CRITICAL_PATHS = [CGROUP_FILE, CPUACCT_DIR, CPU_DIR]
40
+
41
+ CONTROLLER_CPUACCT_LABEL = "cpuacct"
42
+ CONTROLLER_CPU_LABEL = "cpu"
43
+
44
+ class CGroupResources
45
+ CONTROL_GROUP_RE = Regexp.compile("\\d+:([^:,]+(?:,[^:,]+)?):(/.*)")
46
+ CONTROLLER_SEPARATOR_RE = ","
47
+
48
+ def cgroup_available?
49
+ # don't cache to ivar, in case the files are mounted after logstash starts??
50
+ CRITICAL_PATHS.all?{|path| ::File.exist?(path)}
31
51
  end
32
52
 
33
- def control_groups
53
+ def controller_groups
34
54
  response = {}
35
-
36
- read_proc_self_cgroup_lines.each do |line|
55
+ IO.readlines(CGROUP_FILE).each do |line|
37
56
  matches = CONTROL_GROUP_RE.match(line)
57
+ next if matches.nil?
38
58
  # multiples controls, same hierarchy
39
59
  controllers = matches[1].split(CONTROLLER_SEPARATOR_RE)
40
- controllers.each_with_object(response) { |controller| response[controller] = matches[2] }
60
+ controllers.each do |controller|
61
+ case controller
62
+ when CONTROLLER_CPU_LABEL
63
+ response[controller] = CpuResource.new(matches[2])
64
+ when CONTROLLER_CPUACCT_LABEL
65
+ response[controller] = CpuAcctResource.new(matches[2])
66
+ else
67
+ response[controller] = UnimplementedResource.new(controller, matches[2])
68
+ end
69
+ end
41
70
  end
42
-
43
71
  response
44
72
  end
73
+ end
45
74
 
46
- def read_first_line(path)
47
- IO.readlines(path).first
75
+ module ControllerResource
76
+ attr_reader :base_path, :override, :offset_path
77
+ def implemented?
78
+ true
48
79
  end
49
-
50
- def cgroup_cpuacct_usage_nanos(control_group)
51
- read_first_line(::File.join(PROC_CGROUP_CPUACCT_DIR, control_group, CPUACCT_USAGE_FILE)).to_i
80
+ private
81
+ def common_initialize(base, override_key, original_path)
82
+ @base_path = base
83
+ # override is needed here for the logging statements
84
+ @override = Override.new(override_key)
85
+ @offset_path = @override.override(original_path)
86
+ @procs = {}
87
+ @procs[:read_int] = lambda {|path| IO.readlines(path).first.to_i }
88
+ @procs[:read_lines] = lambda {|path| IO.readlines(path) }
52
89
  end
53
-
54
- def cgroup_cpu_fs_period_micros(control_group)
55
- read_first_line(::File.join(PROC_CGROUP_CPUACCT_DIR, control_group, CPU_FS_PERIOD_US_FILE)).to_i
90
+ def call_if_file_exists(call_key, file, not_found_value)
91
+ path = ::File.join(@base_path, @offset_path, file)
92
+ if ::File.exist?(path)
93
+ @procs[call_key].call(path)
94
+ else
95
+ message = "File #{path} cannot be found, "
96
+ if override.nil?
97
+ message.concat("try providing an override '#{override.key}' in the Logstash JAVA_OPTS environment variable")
98
+ else
99
+ message.concat("even though the '#{override.key}' override is: '#{override.value}'")
100
+ end
101
+ logger.debug(message)
102
+ not_found_value
103
+ end
56
104
  end
105
+ end
57
106
 
58
- def cgroup_cpu_fs_quota_micros(control_group)
59
- read_first_line(::File.join(PROC_CGROUP_CPUACCT_DIR, control_group, CPU_FS_QUOTA_US_FILE)).to_i
107
+ class CpuAcctResource
108
+ include LogStash::Util::Loggable
109
+ include ControllerResource
110
+ def initialize(original_path)
111
+ common_initialize(CPUACCT_DIR, "ls.cgroup.cpuacct.path.override", original_path)
60
112
  end
61
-
62
- def read_proc_self_cgroup_lines
63
- IO.readlines(PROC_SELF_CGROUP_FILE)
113
+ def to_hash
114
+ {:control_group => offset_path, :usage_nanos => cpuacct_usage}
64
115
  end
65
-
66
- class CpuStats
67
- attr_reader :number_of_elapsed_periods, :number_of_times_throttled, :time_throttled_nanos
68
-
69
- def initialize(number_of_elapsed_periods, number_of_times_throttled, time_throttled_nanos)
70
- @number_of_elapsed_periods = number_of_elapsed_periods
71
- @number_of_times_throttled = number_of_times_throttled
72
- @time_throttled_nanos = time_throttled_nanos
73
- end
116
+ private
117
+ def cpuacct_usage
118
+ call_if_file_exists(:read_int, "cpuacct.usage", -1)
74
119
  end
120
+ end
75
121
 
76
- def read_sys_fs_cgroup_cpuacct_cpu_stat(control_group)
77
- IO.readlines(::File.join(PROC_CGROUP_CPU_DIR, control_group, CPU_STATS_FILE))
122
+ class CpuResource
123
+ include LogStash::Util::Loggable
124
+ include ControllerResource
125
+ def initialize(original_path)
126
+ common_initialize(CPU_DIR, "ls.cgroup.cpu.path.override", original_path)
78
127
  end
128
+ def to_hash
129
+ {
130
+ :control_group => offset_path,
131
+ :cfs_period_micros => cfs_period_us,
132
+ :cfs_quota_micros => cfs_quota_us,
133
+ :stat => build_cpu_stats_hash
134
+ }
135
+ end
136
+ private
137
+ def cfs_period_us
138
+ call_if_file_exists(:read_int, "cpu.cfs_period_us", -1)
139
+ end
140
+ def cfs_quota_us
141
+ call_if_file_exists(:read_int, "cpu.cfs_quota_us", -1)
142
+ end
143
+ def build_cpu_stats_hash
144
+ stats = CpuStats.new
145
+ lines = call_if_file_exists(:read_lines, "cpu.stat", [])
146
+ stats.update(lines)
147
+ stats.to_hash
148
+ end
149
+ end
79
150
 
80
- def cgroup_cpuacct_cpu_stat(control_group)
81
- lines = read_sys_fs_cgroup_cpuacct_cpu_stat(control_group);
82
-
83
- number_of_elapsed_periods = -1;
84
- number_of_times_throttled = -1;
85
- time_throttled_nanos = -1;
151
+ class UnimplementedResource
152
+ attr_reader :controller, :original_path
153
+ def initialize(controller, original_path)
154
+ @controller, @original_path = controller, original_path
155
+ end
156
+ def implemented?
157
+ false
158
+ end
159
+ end
86
160
 
161
+ class CpuStats
162
+ def initialize
163
+ @number_of_elapsed_periods = -1
164
+ @number_of_times_throttled = -1
165
+ @time_throttled_nanos = -1
166
+ end
167
+ def update(lines)
87
168
  lines.each do |line|
88
169
  fields = line.split(/\s+/)
170
+ next unless fields.size > 1
89
171
  case fields.first
90
- when "nr_periods" then number_of_elapsed_periods = fields[1].to_i
91
- when "nr_throttled" then number_of_times_throttled= fields[1].to_i
92
- when "throttled_time" then time_throttled_nanos = fields[1].to_i
172
+ when "nr_periods" then @number_of_elapsed_periods = fields[1].to_i
173
+ when "nr_throttled" then @number_of_times_throttled = fields[1].to_i
174
+ when "throttled_time" then @time_throttled_nanos = fields[1].to_i
93
175
  end
94
176
  end
95
-
96
- CpuStats.new(number_of_elapsed_periods, number_of_times_throttled, time_throttled_nanos)
97
177
  end
178
+ def to_hash
179
+ {
180
+ :number_of_elapsed_periods => @number_of_elapsed_periods,
181
+ :number_of_times_throttled => @number_of_times_throttled,
182
+ :time_throttled_nanos => @time_throttled_nanos
183
+ }
184
+ end
185
+ end
98
186
 
99
- def get_all
100
- groups = control_groups
101
- return if groups.empty?
102
-
103
- cgroups_stats = {
104
- :cpuacct => {},
105
- :cpu => {}
106
- }
107
-
108
- cpuacct_group = groups[GROUP_CPUACCT]
109
- cgroups_stats[:cpuacct][:control_group] = cpuacct_group
110
- cgroups_stats[:cpuacct][:usage_nanos] = cgroup_cpuacct_usage_nanos(cpuacct_group)
187
+ CGROUP_RESOURCES = CGroupResources.new
111
188
 
112
- cpu_group = groups[GROUP_CPU]
113
- cgroups_stats[:cpu][:control_group] = cpu_group
114
- cgroups_stats[:cpu][:cfs_period_micros] = cgroup_cpu_fs_period_micros(cpu_group)
115
- cgroups_stats[:cpu][:cfs_quota_micros] = cgroup_cpu_fs_quota_micros(cpu_group)
189
+ class << self
190
+ def get_all
191
+ unless CGROUP_RESOURCES.cgroup_available?
192
+ logger.debug("One or more required cgroup files or directories not found: #{CRITICAL_PATHS.join(', ')}")
193
+ return
194
+ end
116
195
 
117
- cpu_stats = cgroup_cpuacct_cpu_stat(cpu_group)
196
+ groups = CGROUP_RESOURCES.controller_groups
118
197
 
119
- cgroups_stats[:cpu][:stat] = {
120
- :number_of_elapsed_periods => cpu_stats.number_of_elapsed_periods,
121
- :number_of_times_throttled => cpu_stats.number_of_times_throttled,
122
- :time_throttled_nanos => cpu_stats.time_throttled_nanos
123
- }
198
+ if groups.empty?
199
+ logger.debug("The main cgroup file did not have any controllers: #{CGROUP_FILE}")
200
+ return
201
+ end
124
202
 
125
- cgroups_stats
203
+ cgroups_stats = {}
204
+ groups.each do |name, controller|
205
+ next unless controller.implemented?
206
+ cgroups_stats[name.to_sym] = controller.to_hash
207
+ end
208
+ cgroups_stats
126
209
  rescue => e
127
- logger.debug("Error, cannot retrieve cgroups information", :exception => e.class.name, :message => e.message) if logger.debug?
210
+ logger.debug("Error, cannot retrieve cgroups information", :exception => e.class.name, :message => e.message, :backtrace => e.backtrace.take(4)) if logger.debug?
128
211
  nil
129
212
  end
130
213
 
131
214
  def get
132
- are_cgroup_available? ? get_all : nil
215
+ get_all
133
216
  end
134
217
  end
135
218
  end
@@ -2,127 +2,258 @@
2
2
  require "logstash/instrument/periodic_poller/cgroup"
3
3
  require "spec_helper"
4
4
 
5
- describe LogStash::Instrument::PeriodicPoller::Cgroup do
6
- subject { described_class }
5
+ LogStash::Logging::Logger::configure_logging("DEBUG")
7
6
 
8
- context ".are_cgroup_available?" do
9
- context "all the file exist" do
7
+ module LogStash module Instrument module PeriodicPoller
8
+ describe "cgroup stats" do
9
+ let(:relative_path) { "/docker/a1f61" }
10
+ let(:proc_self_cgroup_content) do
11
+ %W(14:name=systemd,holaunlimited:#{relative_path}
12
+ 13:pids:#{relative_path}
13
+ 12:hugetlb:#{relative_path}
14
+ 11:net_prio:#{relative_path}
15
+ 10:perf_event:#{relative_path}
16
+ 9:net_cls:#{relative_path}
17
+ 8:freezer:#{relative_path}
18
+ 7:devices:#{relative_path}
19
+ 6:memory:#{relative_path}
20
+ 5:blkio:#{relative_path}
21
+ 4:cpuacct:#{relative_path}
22
+ 3:cpu:#{relative_path}
23
+ 2:cpuset:#{relative_path}
24
+ 1:name=openrc:/docker
25
+ 0::/docker)
26
+ end
27
+ describe Cgroup::CGroupResources do
28
+ subject(:cgroup_resources) { described_class.new }
29
+ context "method: cgroup_available?" do
30
+ context "resources exist" do
31
+ before do
32
+ allow(::File).to receive(:exist?).and_return(true)
33
+ end
34
+ it "returns true" do
35
+ expect(cgroup_resources.cgroup_available?).to be_truthy
36
+ end
37
+ end
38
+ context "resources do not exist" do
39
+ subject { described_class.new }
40
+ before do
41
+ allow(::File).to receive(:exist?).and_return(true)
42
+ allow(::File).to receive(:exist?).with("/proc/self/cgroup").and_return(false)
43
+ end
44
+ it "returns false" do
45
+ expect(cgroup_resources.cgroup_available?).to be_falsey
46
+ end
47
+ end
48
+ end
49
+
50
+ context "method: controller_groups" do
10
51
  before do
11
- allow(::File).to receive(:exist?).with(subject::PROC_SELF_CGROUP_FILE).and_return(true)
12
- allow(::Dir).to receive(:exist?).with(subject::PROC_CGROUP_CPU_DIR).and_return(true)
13
- allow(::Dir).to receive(:exist?).with(subject::PROC_CGROUP_CPUACCT_DIR).and_return(true)
52
+ allow(IO).to receive(:readlines).with("/proc/self/cgroup").and_return(proc_self_cgroup_content)
14
53
  end
15
54
 
16
- it "returns true" do
17
- expect(subject.are_cgroup_available?).to be_truthy
55
+ it "returns the control groups" do
56
+ controllers = cgroup_resources.controller_groups
57
+
58
+ controller = controllers["cpuacct"]
59
+ expect(controller).to be_a(Cgroup::CpuAcctResource)
60
+ expect(controller.base_path).to eq("/sys/fs/cgroup/cpuacct")
61
+ expect(controller.offset_path).to eq(relative_path)
62
+ expect(controller.override).to be_a(Cgroup::Override)
63
+ expect(controller.override.nil?).to be_truthy
64
+
65
+ controller = controllers["cpu"]
66
+ expect(controller).to be_a(Cgroup::CpuResource)
67
+ expect(controller.base_path).to eq("/sys/fs/cgroup/cpu")
68
+ expect(controller.offset_path).to eq(relative_path)
69
+ expect(controller.override).to be_a(Cgroup::Override)
70
+ expect(controller.override.nil?).to be_truthy
71
+
72
+ controller = controllers["name=systemd"]
73
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
74
+ expect(controller.controller).to eq("name=systemd")
75
+ expect(controller.original_path).to eq(relative_path)
76
+
77
+ controller = controllers["holaunlimited"]
78
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
79
+ expect(controller.controller).to eq("holaunlimited")
80
+ expect(controller.original_path).to eq(relative_path)
81
+
82
+ controller = controllers["pids"]
83
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
84
+ expect(controller.controller).to eq("pids")
85
+ expect(controller.original_path).to eq(relative_path)
86
+
87
+ controller = controllers["hugetlb"]
88
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
89
+ expect(controller.controller).to eq("hugetlb")
90
+ expect(controller.original_path).to eq(relative_path)
91
+
92
+ controller = controllers["net_prio"]
93
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
94
+ expect(controller.controller).to eq("net_prio")
95
+ expect(controller.original_path).to eq(relative_path)
96
+
97
+ controller = controllers["perf_event"]
98
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
99
+ expect(controller.controller).to eq("perf_event")
100
+ expect(controller.original_path).to eq(relative_path)
101
+
102
+ controller = controllers["net_cls"]
103
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
104
+ expect(controller.controller).to eq("net_cls")
105
+ expect(controller.original_path).to eq(relative_path)
106
+
107
+ controller = controllers["freezer"]
108
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
109
+ expect(controller.controller).to eq("freezer")
110
+ expect(controller.original_path).to eq(relative_path)
111
+
112
+ controller = controllers["devices"]
113
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
114
+ expect(controller.controller).to eq("devices")
115
+ expect(controller.original_path).to eq(relative_path)
116
+
117
+ controller = controllers["memory"]
118
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
119
+ expect(controller.controller).to eq("memory")
120
+ expect(controller.original_path).to eq(relative_path)
121
+
122
+ controller = controllers["blkio"]
123
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
124
+ expect(controller.controller).to eq("blkio")
125
+ expect(controller.original_path).to eq(relative_path)
126
+
127
+ controller = controllers["cpuset"]
128
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
129
+ expect(controller.controller).to eq("cpuset")
130
+ expect(controller.original_path).to eq(relative_path)
131
+
132
+ controller = controllers["name=openrc"]
133
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
134
+ expect(controller.controller).to eq("name=openrc")
135
+ expect(controller.original_path).to eq("/docker")
18
136
  end
19
137
  end
20
138
 
21
- context "not all the file exist" do
139
+ context "method: controller_groups with override" do
22
140
  before do
23
- allow(::File).to receive(:exist?).with(subject::PROC_SELF_CGROUP_FILE).and_return(true)
24
- allow(::Dir).to receive(:exist?).with(subject::PROC_CGROUP_CPU_DIR).and_return(false)
25
- allow(::Dir).to receive(:exist?).with(subject::PROC_CGROUP_CPUACCT_DIR).and_return(true)
141
+ java.lang.System.setProperty("ls.cgroup.cpu.path.override", "/foo")
142
+ java.lang.System.setProperty("ls.cgroup.cpuacct.path.override", "/bar")
143
+ allow(IO).to receive(:readlines).with("/proc/self/cgroup").and_return(proc_self_cgroup_content)
26
144
  end
145
+ after do
146
+ java.lang.System.clearProperty("ls.cgroup.cpu.path.override")
147
+ java.lang.System.clearProperty("ls.cgroup.cpuacct.path.override")
148
+ end
149
+ it "returns overridden control groups" do
150
+ controllers = cgroup_resources.controller_groups
151
+ controller = controllers["cpuacct"]
152
+ expect(controller).to be_a(Cgroup::CpuAcctResource)
153
+ expect(controller.override.nil?).to be_falsey
154
+ expect(controller.base_path).to eq("/sys/fs/cgroup/cpuacct")
155
+ expect(controller.offset_path).to eq("/bar")
156
+ expect(controller.override).to be_a(Cgroup::Override)
157
+
158
+ controller = controllers["cpu"]
159
+ expect(controller).to be_a(Cgroup::CpuResource)
160
+ expect(controller.override.nil?).to be_falsey
161
+ expect(controller.base_path).to eq("/sys/fs/cgroup/cpu")
162
+ expect(controller.offset_path).to eq("/foo")
163
+ expect(controller.override).to be_a(Cgroup::Override)
27
164
 
28
- it "returns false" do
29
- expect(subject.are_cgroup_available?).to be_falsey
165
+ controller = controllers["cpuset"]
166
+ expect(controller).to be_a(Cgroup::UnimplementedResource)
167
+ expect(controller.controller).to eq("cpuset")
168
+ expect(controller.original_path).to eq(relative_path)
30
169
  end
31
170
  end
32
171
  end
33
172
 
34
- context ".control_groups" do
35
- let(:proc_self_cgroup_content) {
36
- %w(14:name=systemd,holaunlimited:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
37
- 13:pids:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
38
- 12:hugetlb:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
39
- 11:net_prio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
40
- 10:perf_event:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
41
- 9:net_cls:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
42
- 8:freezer:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
43
- 7:devices:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
44
- 6:memory:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
45
- 5:blkio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
46
- 4:cpuacct:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
47
- 3:cpu:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
48
- 2:cpuset:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
49
- 1:name=openrc:/docker) }
50
-
51
- before do
52
- allow(subject).to receive(:read_proc_self_cgroup_lines).and_return(proc_self_cgroup_content)
173
+ describe Cgroup::CpuAcctResource do
174
+ subject(:cpuacct_resource) { described_class.new("/bar") }
175
+ describe "method: to_hash, without override" do
176
+ context "when the files cannot be found" do
177
+ it "fills in the hash with minus one" do
178
+ expect(cpuacct_resource.base_path).to eq("/sys/fs/cgroup/cpuacct")
179
+ expect(cpuacct_resource.offset_path).to eq("/bar")
180
+ expect(cpuacct_resource.to_hash).to eq({:control_group=>"/bar", :usage_nanos=>-1})
181
+ end
182
+ end
183
+ end
184
+ describe "method: to_hash, with override" do
185
+ before do
186
+ java.lang.System.setProperty("ls.cgroup.cpuacct.path.override", "/quux")
187
+ end
188
+ after do
189
+ java.lang.System.clearProperty("ls.cgroup.cpuacct.path.override")
190
+ end
191
+ context "when the files cannot be found" do
192
+ it "fills in the hash with minus one" do
193
+ expect(cpuacct_resource.base_path).to eq("/sys/fs/cgroup/cpuacct")
194
+ expect(cpuacct_resource.offset_path).to eq("/quux")
195
+ expect(cpuacct_resource.to_hash).to eq({:control_group=>"/quux", :usage_nanos=>-1})
196
+ end
197
+ end
53
198
  end
199
+ end
54
200
 
55
- it "returns the control groups" do
56
- expect(subject.control_groups).to match({
57
- "name=systemd" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
58
- "holaunlimited" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
59
- "pids" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
60
- "hugetlb" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
61
- "net_prio" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
62
- "perf_event" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
63
- "net_cls" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
64
- "freezer" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
65
- "devices" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
66
- "memory" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
67
- "blkio" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
68
- "cpuacct" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
69
- "cpu" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
70
- "cpuset" => "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61",
71
- "name=openrc" => "/docker"
72
- })
201
+ describe Cgroup::CpuResource do
202
+ subject(:cpu_resource) { described_class.new("/bar") }
203
+ describe "method: fill, without override" do
204
+ context "when the files cannot be found" do
205
+ it "fills in the hash with minus one" do
206
+ expect(cpu_resource.base_path).to eq("/sys/fs/cgroup/cpu")
207
+ expect(cpu_resource.offset_path).to eq("/bar")
208
+ expect(cpu_resource.to_hash).to eq({:cfs_period_micros=>-1, :cfs_quota_micros=>-1, :control_group=>"/bar", :stat=>{:number_of_elapsed_periods=>-1, :number_of_times_throttled=>-1, :time_throttled_nanos=>-1}})
209
+ end
210
+ end
211
+ end
212
+ describe "method: fill, with override" do
213
+ before do
214
+ java.lang.System.setProperty("ls.cgroup.cpu.path.override", "/quux")
215
+ end
216
+ after do
217
+ java.lang.System.clearProperty("ls.cgroup.cpu.path.override")
218
+ end
219
+ let(:target) { Hash.new }
220
+ context "when the files cannot be found" do
221
+ it "fills in the hash with minus one" do
222
+ expect(cpu_resource.base_path).to eq("/sys/fs/cgroup/cpu")
223
+ expect(cpu_resource.offset_path).to eq("/quux")
224
+ expect(cpu_resource.to_hash).to eq({:cfs_period_micros=>-1, :cfs_quota_micros=>-1, :control_group=>"/quux", :stat=>{:number_of_elapsed_periods=>-1, :number_of_times_throttled=>-1, :time_throttled_nanos=>-1}})
225
+ end
226
+ end
73
227
  end
74
228
  end
75
229
 
76
- context ".get_all" do
77
- context "when we can retrieve the stats" do
78
- let(:cpuacct_control_group) { "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61" }
230
+ describe Cgroup do
231
+ describe "class method: get_all" do
79
232
  let(:cpuacct_usage) { 1982 }
80
- let(:cpu_control_group) { "/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61" }
81
233
  let(:cfs_period_micros) { 500 }
82
234
  let(:cfs_quota_micros) { 98 }
83
235
  let(:cpu_stats_number_of_periods) { 1 }
84
236
  let(:cpu_stats_number_of_time_throttled) { 2 }
85
237
  let(:cpu_stats_time_throttled_nanos) { 3 }
86
- let(:proc_self_cgroup_content) {
87
- %W(14:name=systemd,holaunlimited:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
88
- 13:pids:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
89
- 12:hugetlb:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
90
- 11:net_prio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
91
- 10:perf_event:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
92
- 9:net_cls:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
93
- 8:freezer:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
94
- 7:devices:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
95
- 6:memory:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
96
- 5:blkio:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
97
- 4:cpuacct:#{cpuacct_control_group}
98
- 3:cpu:#{cpu_control_group}
99
- 2:cpuset:/docker/a10687343f90e97bbb1f7181bd065a42de96c40c4aa91764a9d526ea30475f61
100
- 1:name=openrc:/docker) }
101
- let(:cpu_stat_file_content) {
102
- [
103
- "nr_periods #{cpu_stats_number_of_periods}",
104
- "nr_throttled #{cpu_stats_number_of_time_throttled}",
105
- "throttled_time #{cpu_stats_time_throttled_nanos}"
106
- ]
107
- }
108
-
238
+ let(:cpu_stat_file_content) do
239
+ ["nr_periods #{cpu_stats_number_of_periods}", "nr_throttled #{cpu_stats_number_of_time_throttled}", "throttled_time #{cpu_stats_time_throttled_nanos}"]
240
+ end
109
241
  before do
110
- allow(subject).to receive(:read_proc_self_cgroup_lines).and_return(proc_self_cgroup_content)
111
- allow(subject).to receive(:read_sys_fs_cgroup_cpuacct_cpu_stat).and_return(cpu_stat_file_content)
112
-
113
- allow(subject).to receive(:cgroup_cpuacct_usage_nanos).with(cpuacct_control_group).and_return(cpuacct_usage)
114
- allow(subject).to receive(:cgroup_cpu_fs_period_micros).with(cpu_control_group).and_return(cfs_period_micros)
115
- allow(subject).to receive(:cgroup_cpu_fs_quota_micros).with(cpu_control_group).and_return(cfs_quota_micros)
242
+ allow(::File).to receive(:exist?).and_return(true)
243
+ allow(IO).to receive(:readlines).with("/sys/fs/cgroup/cpuacct#{relative_path}/cpuacct.usage").and_return([cpuacct_usage])
244
+ allow(IO).to receive(:readlines).with("/sys/fs/cgroup/cpu#{relative_path}/cpu.cfs_period_us").and_return([cfs_period_micros])
245
+ allow(IO).to receive(:readlines).with("/sys/fs/cgroup/cpu#{relative_path}/cpu.cfs_quota_us").and_return([cfs_quota_micros])
246
+ allow(IO).to receive(:readlines).with("/sys/fs/cgroup/cpu#{relative_path}/cpu.stat").and_return(cpu_stat_file_content)
247
+ allow(IO).to receive(:readlines).with("/proc/self/cgroup").and_return(proc_self_cgroup_content)
116
248
  end
117
-
118
249
  it "returns all the stats" do
119
- expect(subject.get_all).to match(
250
+ expect(described_class.get_all).to match(
120
251
  :cpuacct => {
121
- :control_group => cpuacct_control_group,
252
+ :control_group => relative_path,
122
253
  :usage_nanos => cpuacct_usage,
123
254
  },
124
255
  :cpu => {
125
- :control_group => cpu_control_group,
256
+ :control_group => relative_path,
126
257
  :cfs_period_micros => cfs_period_micros,
127
258
  :cfs_quota_micros => cfs_quota_micros,
128
259
  :stat => {
@@ -137,12 +268,14 @@ describe LogStash::Instrument::PeriodicPoller::Cgroup do
137
268
 
138
269
  context "when an exception is raised" do
139
270
  before do
140
- allow(subject).to receive(:control_groups).and_raise("Something went wrong")
271
+ allow(::File).to receive(:exist?).and_return(true)
272
+ allow(Cgroup::CGROUP_RESOURCES).to receive(:controller_groups).and_raise("Something went wrong")
141
273
  end
142
274
 
143
- it "returns nil" do
144
- expect(subject.get_all).to be_nil
275
+ it "method: get_all returns nil" do
276
+ expect(described_class.get_all).to be_nil
145
277
  end
146
278
  end
147
279
  end
148
280
  end
281
+ end end end
@@ -1,6 +1,6 @@
1
1
  ---
2
- logstash: 6.4.1
3
- logstash-core: 6.4.1
2
+ logstash: 6.4.2
3
+ logstash-core: 6.4.2
4
4
  logstash-core-plugin-api: 2.1.16
5
5
 
6
6
  # jruby must reference a *released* version of jruby which can be downloaded from the official download url
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-core
3
3
  version: !ruby/object:Gem::Version
4
- version: 6.4.1
4
+ version: 6.4.2
5
5
  platform: java
6
6
  authors:
7
7
  - Elastic
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-09-13 00:00:00.000000000 Z
11
+ date: 2018-09-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  requirement: !ruby/object:Gem::Requirement