beaker-benchmark 0.0.1 → 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/acceptance/tests/measure_perf_on.rb +17 -0
- data/lib/beaker-benchmark/helpers.rb +84 -38
- data/lib/beaker-benchmark/version.rb +1 -1
- data/spec/beaker-benchmark/helpers_spec.rb +0 -31
- data/spec/beaker-benchmark/performance_result_spec.rb +68 -0
- metadata +5 -3
- data/acceptance/tests/first.rb +0 -8
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d3d7e588bdbad5a3f018e49fa02f007aa9a6b80c
|
4
|
+
data.tar.gz: b2f79746d5e092f9e4fbdca47fe02010167a7e17
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ad55ce1d5233f656f454b359491a12a21c04ece6dff547d6b0e687eb04fbdba234a29ee72038d0da1ac8f09eda556bee80840c38729475c3f1ac8b00e08426e1
|
7
|
+
data.tar.gz: ec1b08f2300581e6e8f20692b5f85084991561b6c9d611512cc9adc8143d6b41f32b28ae65a89e3e57941b28dd0f1062eaffc4c37a4f689eab1c2e70d34ccf24
|
@@ -0,0 +1,17 @@
|
|
1
|
+
require 'beaker-benchmark'
|
2
|
+
require 'fileutils'
|
3
|
+
|
4
|
+
# Acceptance level testing goes into files in the tests directory like this one,
|
5
|
+
# Each file corresponding to a new test made up of individual testing steps
|
6
|
+
test_name "measure_perf_on test" do
|
7
|
+
if Dir.exist?('tmp/atop')
|
8
|
+
FileUtils.rm_r('tmp/atop')
|
9
|
+
end
|
10
|
+
result = measure_perf_on master, 'sleep test' do
|
11
|
+
on master, 'sleep 10'
|
12
|
+
end
|
13
|
+
|
14
|
+
assert(File.exist?('tmp/atop/ubuntu-server-1404-x64/atop_log_measure_perf_on_test_sleep_test.log'))
|
15
|
+
result.log_csv
|
16
|
+
assert(File.exist?('tmp/atop/ubuntu-server-1404-x64/atop_log_measure_perf_on_test_sleep_test.csv'))
|
17
|
+
end
|
@@ -1,11 +1,36 @@
|
|
1
1
|
require 'csv'
|
2
|
+
require 'fileutils'
|
2
3
|
|
3
4
|
module Beaker
|
4
5
|
module DSL
|
5
6
|
module BeakerBenchmark
|
6
7
|
module Helpers
|
8
|
+
include Beaker::HostPrebuiltSteps
|
9
|
+
# Column indexes for atop CSV style output
|
10
|
+
MEASURE_TYPE_INDEX = 0
|
7
11
|
|
8
|
-
|
12
|
+
SYSTEM_CPU_INDEX = 8
|
13
|
+
USR_CPU_INDEX = 9
|
14
|
+
IOWAIT_CPU_INDEX = 12
|
15
|
+
IDLE_CPU_INDEX = 11
|
16
|
+
|
17
|
+
MEM_INDEX = 10
|
18
|
+
|
19
|
+
DISK_READ_INDEX = 9
|
20
|
+
DISK_WRITE_INDEX = 11
|
21
|
+
|
22
|
+
PROC_PID_INDEX = 6
|
23
|
+
|
24
|
+
PROC_CPU_INDEX = 16
|
25
|
+
|
26
|
+
PROC_MEM_INDEX = 11
|
27
|
+
|
28
|
+
PROC_DISK_READ_INDEX = 11
|
29
|
+
PROC_DISK_WRITE_INDEX = 13
|
30
|
+
|
31
|
+
TMP_DIR = 'tmp/atop'
|
32
|
+
|
33
|
+
# Example usage:
|
9
34
|
# test_name('measure_perf_on_puppetserver_start') {
|
10
35
|
# on(master, 'puppet resource service pe-puppetserver ensure=stopped')
|
11
36
|
# result = measure_perf_on(master, 'start_pe-puppetserver', true) {
|
@@ -21,63 +46,69 @@ module Beaker
|
|
21
46
|
# Append action name to test case name if test name is available
|
22
47
|
action_name = metadata[:case][:name] + "_#{action_name}" if defined? metadata && metadata[:case] && metadata[:case][:name]
|
23
48
|
|
24
|
-
|
25
|
-
|
26
|
-
start_monitoring(infrastructure_host, atop_log, include_processes)
|
49
|
+
start_monitoring(infrastructure_host, action_name, include_processes)
|
27
50
|
|
28
51
|
yield
|
29
52
|
|
30
|
-
stop_monitoring(infrastructure_host,
|
53
|
+
stop_monitoring(infrastructure_host, include_processes.nil? ? nil : process_regex)
|
31
54
|
end
|
32
55
|
|
33
56
|
def setup_atop(infrastructure_host)
|
34
|
-
@benchmark_tmpdir = Dir.mktmpdir
|
35
57
|
# Only install atop once per host
|
36
58
|
unless infrastructure_host.check_for_package('atop')
|
37
|
-
add_el_extras(infrastructure_host)
|
59
|
+
add_el_extras(infrastructure_host, @options)
|
38
60
|
infrastructure_host.install_package('atop')
|
39
61
|
end
|
40
62
|
end
|
41
63
|
|
42
|
-
def start_monitoring(infrastructure_host,
|
64
|
+
def start_monitoring(infrastructure_host, action_name, include_processes=false, sample_interval=1)
|
65
|
+
raise('Monitoring already in progress, call stop_monitoring before calling start_monitoring a second time') unless @beaker_benchmark_start.nil?
|
66
|
+
@atop_log = "atop_log_#{action_name.downcase.gsub(/[^a-z0-9]/i, '_')}.log"
|
67
|
+
@action_name = action_name
|
43
68
|
setup_atop(infrastructure_host)
|
44
69
|
additional_args = ''
|
45
70
|
additional_args = ',PRC,PRM,PRD' if include_processes
|
46
|
-
atop_cmd = "sh -c 'nohup atop -P CPU,SWP,DSK#{additional_args} -i #{sample_interval} >
|
71
|
+
atop_cmd = "sh -c 'nohup atop -P CPU,SWP,DSK#{additional_args} -i #{sample_interval} > #{@atop_log} 2>&1 &'"
|
47
72
|
|
48
73
|
on(infrastructure_host, atop_cmd)
|
49
74
|
@beaker_benchmark_start = Time.now
|
50
75
|
end
|
51
76
|
|
52
|
-
def stop_monitoring(infrastructure_host,
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
77
|
+
def stop_monitoring(infrastructure_host, process_regex='.*')
|
78
|
+
begin
|
79
|
+
if defined?@beaker_benchmark_start && !@beaker_benchmark_start.nil?
|
80
|
+
duration = Time.now - @beaker_benchmark_start
|
81
|
+
else
|
82
|
+
raise('No monitoring in progress, call start_monitoring before calling stop_monitoring')
|
83
|
+
end
|
58
84
|
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
85
|
+
# The atop process sticks around unless killed
|
86
|
+
# It can also take some time to kill depending on how long it has been running and sampling rate.
|
87
|
+
retry_on infrastructure_host, 'pkill -15 -f atop', {:max_retries => 3, :retry_interval => 5}
|
88
|
+
set_processes_to_monitor(infrastructure_host, process_regex)
|
89
|
+
parse_atop_log(infrastructure_host, duration)
|
90
|
+
ensure
|
91
|
+
@beaker_benchmark_start = nil
|
92
|
+
end
|
63
93
|
end
|
64
94
|
|
65
|
-
def parse_atop_log(infrastructure_host,
|
66
|
-
unless infrastructure_host.file_exist?(
|
67
|
-
raise("atop log does not exist at #{
|
95
|
+
def parse_atop_log(infrastructure_host, duration)
|
96
|
+
unless infrastructure_host.file_exist?(@atop_log)
|
97
|
+
raise("atop log does not exist at #{@atop_log}")
|
68
98
|
end
|
69
99
|
|
70
|
-
|
100
|
+
log_dir = "#{TMP_DIR}/#{infrastructure_host.hostname}"
|
101
|
+
FileUtils::mkdir_p log_dir unless Dir.exist? log_dir
|
102
|
+
scp_from(infrastructure_host, @atop_log, log_dir)
|
71
103
|
cpu_usage = []
|
72
104
|
mem_usage = []
|
73
105
|
disk_read = []
|
74
106
|
disk_write = []
|
75
107
|
|
76
|
-
process_cpu = []
|
77
108
|
skip = true
|
78
|
-
CSV.parse(File.read(
|
109
|
+
CSV.parse(File.read(File.expand_path(File.basename(@atop_log), log_dir)), { :col_sep => ' ' }) do |row|
|
79
110
|
#skip the first entry, until the first separator 'SEP'.
|
80
|
-
measure_type = row[
|
111
|
+
measure_type = row[MEASURE_TYPE_INDEX]
|
81
112
|
if skip
|
82
113
|
skip = (measure_type != 'SEP')
|
83
114
|
next
|
@@ -85,28 +116,28 @@ module Beaker
|
|
85
116
|
case measure_type
|
86
117
|
when 'CPU'
|
87
118
|
# system + usr + iowait
|
88
|
-
cpu_active = row[
|
119
|
+
cpu_active = row[SYSTEM_CPU_INDEX].to_i + row[USR_CPU_INDEX].to_i + row[IOWAIT_CPU_INDEX].to_i
|
89
120
|
# active + idle
|
90
|
-
cpu_total = cpu_active + row[
|
121
|
+
cpu_total = cpu_active + row[IDLE_CPU_INDEX].to_i
|
91
122
|
cpu_percent = cpu_active * 100 / cpu_total
|
92
123
|
cpu_usage.push(cpu_percent)
|
93
124
|
when 'SWP'
|
94
|
-
mem_usage.push(row[
|
125
|
+
mem_usage.push(row[MEM_INDEX].to_i)
|
95
126
|
when 'DSK'
|
96
|
-
disk_read.push(row[
|
97
|
-
disk_write.push(row[
|
127
|
+
disk_read.push(row[DISK_READ_INDEX].to_i)
|
128
|
+
disk_write.push(row[DISK_WRITE_INDEX].to_i)
|
98
129
|
when 'PRC'
|
99
|
-
add_process_measure(:cpu_usage, row[
|
130
|
+
add_process_measure(:cpu_usage, row[PROC_PID_INDEX], row[PROC_CPU_INDEX].to_i)
|
100
131
|
when 'PRM'
|
101
|
-
add_process_measure(:mem_usage, row[
|
132
|
+
add_process_measure(:mem_usage, row[PROC_PID_INDEX], row[PROC_MEM_INDEX].to_i)
|
102
133
|
when 'PRD'
|
103
134
|
# TODO: investigate why atop always shows disk_read as 0
|
104
|
-
# add_process_measure(:disk_read, row[
|
105
|
-
add_process_measure(:disk_write, row[
|
135
|
+
# add_process_measure(:disk_read, row[PROC_PID_INDEX], row[PROC_DISK_READ_INDEX].to_i)
|
136
|
+
add_process_measure(:disk_write, row[PROC_PID_INDEX], row[PROC_DISK_WRITE_INDEX].to_i)
|
106
137
|
end
|
107
138
|
end
|
108
139
|
|
109
|
-
PerformanceResult.new({ :cpu => cpu_usage, :mem => mem_usage, :disk_read => disk_read, :disk_write => disk_write, :action => action_name, :duration => duration, :processes => @processes_to_monitor, :logger => @logger})
|
140
|
+
PerformanceResult.new({ :cpu => cpu_usage, :mem => mem_usage, :disk_read => disk_read, :disk_write => disk_write, :action => @action_name, :duration => duration, :processes => @processes_to_monitor, :logger => @logger, :hostname => infrastructure_host})
|
110
141
|
end
|
111
142
|
|
112
143
|
def set_processes_to_monitor(infrastructure_host, process_regex)
|
@@ -120,7 +151,6 @@ module Beaker
|
|
120
151
|
@processes_to_monitor[line.split(' ').first] = { :cmd => line.split(' ')[1..-1].join(' '), :cpu_usage => [], :mem_usage => [], :disk_read => [], :disk_write => [] }
|
121
152
|
end
|
122
153
|
end
|
123
|
-
@logger.info result.stdout
|
124
154
|
end
|
125
155
|
|
126
156
|
def add_process_measure measure_type, pid, value
|
@@ -136,7 +166,7 @@ module Beaker
|
|
136
166
|
# Process pid: 14067, command: '/opt/puppetlabs/server/apps/postgresql/bin/postgres -D /opt/puppetlabs/server/data/postgresql/9.6/data -c log_directory=/var/log/puppetlabs/postgresql'
|
137
167
|
# Avg CPU: '1', Avg MEM: 48888, Avg DSK Write: 20
|
138
168
|
class PerformanceResult
|
139
|
-
attr_accessor :avg_cpu, :avg_mem, :avg_disk_read, :avg_disk_write, :action_name, :duration, :processes
|
169
|
+
attr_accessor :avg_cpu, :avg_mem, :avg_disk_read, :avg_disk_write, :action_name, :duration, :processes, :hostname
|
140
170
|
def initialize(args)
|
141
171
|
@avg_cpu = args[:cpu].empty? ? 0 : args[:cpu].inject{ |sum, el| sum + el } / args[:cpu].size
|
142
172
|
@avg_mem = args[:mem].empty? ? 0 : args[:mem].inject{ |sum, el| sum + el } / args[:mem].size
|
@@ -146,6 +176,8 @@ module Beaker
|
|
146
176
|
@duration = args[:duration]
|
147
177
|
@processes = args[:processes]
|
148
178
|
@logger = args[:logger]
|
179
|
+
@hostname = args[:hostname] || ''
|
180
|
+
|
149
181
|
|
150
182
|
@processes.keys.each do |key|
|
151
183
|
@processes[key][:avg_cpu] = @processes[key][:cpu_usage].inject{ |sum, el| sum + el } / @processes[key][:cpu_usage].size unless @processes[key][:cpu_usage].empty?
|
@@ -165,6 +197,20 @@ module Beaker
|
|
165
197
|
@logger.info " Avg CPU: '#{@processes[key][:avg_cpu]}%', Avg MEM: #{@processes[key][:avg_mem]}, Avg DSK read: #{@processes[key][:avg_disk_read]}, Avg DSK Write: #{@processes[key][:avg_disk_write]}"
|
166
198
|
end
|
167
199
|
end
|
200
|
+
|
201
|
+
def log_csv file_path=nil
|
202
|
+
file_path = file_path || "#{TMP_DIR}/#{@hostname}/atop_log_#{action_name.downcase.gsub(/[^a-z0-9]/i, '_')}.csv"
|
203
|
+
FileUtils.mkdir_p "#{TMP_DIR}/#{@hostname}/" unless Dir.exist?("#{TMP_DIR}/#{@hostname}/")
|
204
|
+
file = File.open file_path, 'w'
|
205
|
+
file.write "Action,Duration,Avg CPU,Avg MEM,Avg DSK read,Avg DSK Write\n"
|
206
|
+
file.write "#{@action_name},#{@duration},#{@avg_cpu},#{@avg_mem},#{@avg_disk_read},#{@avg_disk_write}\n\n"
|
207
|
+
file.write "Process pid,command,Avg CPU,Avg MEM,Avg DSK read,Avg DSK Write\n"
|
208
|
+
@processes.keys.each do |key|
|
209
|
+
file.write "#{key},'#{@processes[key][:cmd]}','#{@processes[key][:avg_cpu]},#{@processes[key][:avg_mem]},#{@processes[key][:avg_disk_read]},#{@processes[key][:avg_disk_write]}\n"
|
210
|
+
end
|
211
|
+
file.close
|
212
|
+
file.path
|
213
|
+
end
|
168
214
|
end
|
169
215
|
|
170
216
|
end
|
@@ -117,37 +117,6 @@ describe ClassMixedWithDSLHelpers do
|
|
117
117
|
|
118
118
|
end
|
119
119
|
|
120
|
-
describe 'PerformanceResult::initialize' do
|
121
|
-
|
122
|
-
it 'properly averages arrays of values' do
|
123
|
-
|
124
|
-
end
|
125
|
-
|
126
|
-
it 'sets overall averages to 0 if no values are set' do
|
127
|
-
|
128
|
-
end
|
129
|
-
|
130
|
-
it 'raises an exception if :mem, :cpu, :disk_read or :disk_write args do not exist' do
|
131
|
-
|
132
|
-
end
|
133
|
-
|
134
|
-
it 'creates a result without no process data if @processes_to_monitor is empty or nil' do
|
135
|
-
|
136
|
-
end
|
137
|
-
|
138
|
-
it 'raises an exception if :mem, :cpu, :disk_read or :disk_write args do not exist' do
|
139
|
-
|
140
|
-
end
|
141
|
-
|
142
|
-
end
|
143
|
-
|
144
|
-
describe 'PerformanceResult::log' do
|
145
|
-
|
146
|
-
it 'prints out the results' do
|
147
|
-
|
148
|
-
end
|
149
|
-
|
150
|
-
end
|
151
120
|
end
|
152
121
|
|
153
122
|
|
@@ -0,0 +1,68 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
class ClassMixedWithDSLHelpers
|
4
|
+
include BeakerTestHelpers
|
5
|
+
include Beaker::DSL::BeakerBenchmark::Helpers
|
6
|
+
|
7
|
+
def logger
|
8
|
+
RSpec::Mocks::Double.new('logger').as_null_object
|
9
|
+
end
|
10
|
+
|
11
|
+
end
|
12
|
+
|
13
|
+
describe ClassMixedWithDSLHelpers do
|
14
|
+
subject { Beaker::DSL::BeakerBenchmark::Helpers::PerformanceResult.new (
|
15
|
+
{:cpu => [60, 40], :mem => [6000, 4000], :disk_write => [600, 400], :action => 'test_action', :duration => 10,
|
16
|
+
:processes => {1000 => {:cmd => 'proc1', :cpu_usage => [10, 20], :mem_usage => [1000, 2000], :disk_write => [100, 200]},
|
17
|
+
2000 => {:cmd => 'proc2', :cpu_usage => [20, 40], :mem_usage => [2000, 4000], :disk_write => [200, 400]}},
|
18
|
+
:logger => logger, :hostname => 'my_host'}) }
|
19
|
+
|
20
|
+
describe 'initialize' do
|
21
|
+
|
22
|
+
it 'properly averages arrays of values' do
|
23
|
+
|
24
|
+
end
|
25
|
+
|
26
|
+
it 'sets overall averages to 0 if no values are set' do
|
27
|
+
|
28
|
+
end
|
29
|
+
|
30
|
+
it 'raises an exception if :mem, :cpu, :disk_read or :disk_write args do not exist' do
|
31
|
+
|
32
|
+
end
|
33
|
+
|
34
|
+
it 'creates a result without no process data if @processes_to_monitor is empty or nil' do
|
35
|
+
|
36
|
+
end
|
37
|
+
|
38
|
+
it 'raises an exception if :mem, :cpu, :disk_read or :disk_write args do not exist' do
|
39
|
+
|
40
|
+
end
|
41
|
+
|
42
|
+
end
|
43
|
+
|
44
|
+
describe 'log_summary' do
|
45
|
+
|
46
|
+
it 'prints out the results' do
|
47
|
+
|
48
|
+
end
|
49
|
+
|
50
|
+
end
|
51
|
+
|
52
|
+
describe 'log_csv' do
|
53
|
+
|
54
|
+
it 'prints out the results in CSV format' do
|
55
|
+
file_path = subject.log_csv
|
56
|
+
file = File.open file_path
|
57
|
+
csv_file_content = file.readlines
|
58
|
+
expected_content = <<-EOS
|
59
|
+
Process pid,command,Avg CPU,Avg MEM,Avg DSK read,Avg DSK Write
|
60
|
+
1000,'proc1','15,1500,,150
|
61
|
+
2000,'proc2','30,3000,,300
|
62
|
+
EOS
|
63
|
+
expect(csv_file_content == expected_content)
|
64
|
+
end
|
65
|
+
|
66
|
+
end
|
67
|
+
|
68
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: beaker-benchmark
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Puppetlabs
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2018-02-15 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rspec
|
@@ -167,13 +167,14 @@ files:
|
|
167
167
|
- README.md
|
168
168
|
- Rakefile
|
169
169
|
- acceptance/config/nodes/vagrant-ubuntu-1404.yml
|
170
|
-
- acceptance/tests/
|
170
|
+
- acceptance/tests/measure_perf_on.rb
|
171
171
|
- beaker-benchmark.gemspec
|
172
172
|
- bin/beaker-benchmark
|
173
173
|
- lib/beaker-benchmark.rb
|
174
174
|
- lib/beaker-benchmark/helpers.rb
|
175
175
|
- lib/beaker-benchmark/version.rb
|
176
176
|
- spec/beaker-benchmark/helpers_spec.rb
|
177
|
+
- spec/beaker-benchmark/performance_result_spec.rb
|
177
178
|
- spec/beaker_test_helpers.rb
|
178
179
|
- spec/helpers.rb
|
179
180
|
- spec/spec_helper.rb
|
@@ -203,6 +204,7 @@ specification_version: 4
|
|
203
204
|
summary: Beaker benchmark Helpers!
|
204
205
|
test_files:
|
205
206
|
- spec/beaker-benchmark/helpers_spec.rb
|
207
|
+
- spec/beaker-benchmark/performance_result_spec.rb
|
206
208
|
- spec/beaker_test_helpers.rb
|
207
209
|
- spec/helpers.rb
|
208
210
|
- spec/spec_helper.rb
|
data/acceptance/tests/first.rb
DELETED
@@ -1,8 +0,0 @@
|
|
1
|
-
|
2
|
-
# Acceptance level testing goes into files in the tests directory like this one,
|
3
|
-
# Each file corresponding to a new test made up of individual testing steps
|
4
|
-
test_name "Template Acceptance Test Example"
|
5
|
-
|
6
|
-
step "Fail fast!"
|
7
|
-
|
8
|
-
fail_test("There are no acceptance tests yet!")
|