perfmonger 0.13.1 → 0.14.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1a2bc69e72d02745481dafb5bec13073d4289a3f38cdf8eb253416e649e56ff5
4
- data.tar.gz: fd5db8b9b288b62060df2ab5881ec8c777c320479d53420ddef3e4420ade9c9a
3
+ metadata.gz: 7cafa0eb63dce619c0f5cf2f23042db020e435905eb7d00e2338d3c499495d02
4
+ data.tar.gz: ab6875fdfe16bd70461db8569b4c5572bbd261e931a6efa98c2bede6660e9d1b
5
5
  SHA512:
6
- metadata.gz: c7184d7eeb69b60188a740031143eede5188b56dbf84cd6a4bdd28302439faba79f390f8baf27f1e33275d04355c1e99c42eb09b1eb663a243c0046e1cc5a29e
7
- data.tar.gz: cda569ba86554c985e46249d77d1fdc606fbe126150591dcec55ccbe82eabd13b6081ed428c6b7301004c1955fdf00273549a82696f6ed07cd3c2bb55387dd45
6
+ metadata.gz: 17bb023ddebbde5fda03b2ed90411274a335e34ac5ddc83e4df7821454e40484957688ed7dfe72b9245190006fee0c2e1e39aa375fe17c12f95bba9838f2d767
7
+ data.tar.gz: 259636bb0f5049e404ec752edf0e659b23b8bc1adcaea69ef2f0db4cc3bfccac028793fd48920d9e4a54e5cda86a3a52581eb2dc250abd0b5f8476dec815772f
data/NEWS CHANGED
@@ -1,4 +1,12 @@
1
- ## 2021-XX-XX: PerfMonger 0.14.0
1
+ ## 2021-XX-XX: PerfMonger 0.15.0
2
+
3
+ ## 2021-07-27: PerfMonger 0.14.0
4
+ * New features
5
+ * [plot] subcommand:
6
+ * Added memory usage plot
7
+ * Changes
8
+ * [record] subcommand:
9
+ * Additionaly record 'Hugepagesize' in memory usage
2
10
 
3
11
  ## 2021-07-26: PerfMonger 0.13.1
4
12
  * New features
@@ -0,0 +1,24 @@
1
+ # perfmonger-plot-formatter
2
+
3
+ `perfmonger-plot-formatter` is a command to generate data files for plotting
4
+ various metrices recorded by `perfmonger-recorder`.
5
+
6
+ ## Notes on memory usage records
7
+
8
+ ### `free(1)` compatible summarization
9
+
10
+ - total: `MemStat.MemTotal`
11
+ - used: `if (memUsed < 0) { MemStat.MemTotal - MemStat.MemFree } else { memUsed }`
12
+ - memUsed := `MemStat.MemTotal - MemStat.MemFree - mainCached - MemStat.Buffers`
13
+ - mainCached := `MemStat.Cached + MemStat.SReclaimable`
14
+ - free: `MemStat.MemFree`
15
+ - shared: `MemStat.Shmem`
16
+ - buffers: `MemStat.Buffers`
17
+ - cache: `mainCached`
18
+ - mainCached := `MemStat.Cached + MemStat.SReclaimable`
19
+ - available: `MemStat.MemAvailable`
20
+
21
+ ### Additional info
22
+
23
+ - hugeTotal: `MemStat.HugePages_Total`
24
+ - hugeUsed: `MemStat.HugePages_Total - MemStat.HugePages_Free`
@@ -13,14 +13,16 @@ import (
13
13
  "os"
14
14
  "regexp"
15
15
  "sort"
16
+ "strings"
16
17
 
17
- ss "github.com/hayamiz/perfmonger/core/subsystem"
18
18
  "github.com/hayamiz/perfmonger/core"
19
+ ss "github.com/hayamiz/perfmonger/core/subsystem"
19
20
  )
20
21
 
21
22
  type CmdOption struct {
22
23
  DiskFile string
23
24
  CpuFile string
25
+ MemFile string
24
26
  PerfmongerFile string
25
27
  disk_only string
26
28
  disk_only_regex *regexp.Regexp
@@ -31,8 +33,9 @@ func parseArgs() *CmdOption {
31
33
 
32
34
  opt := new(CmdOption)
33
35
 
34
- flag.StringVar(&opt.DiskFile, "diskfile", "./disk.dat", "Disk performance data file")
35
- flag.StringVar(&opt.CpuFile, "cpufile", "./cpu.dat", "CPU performance data file")
36
+ flag.StringVar(&opt.DiskFile, "diskfile", "./disk.dat", "Disk usage data file for gnuplot")
37
+ flag.StringVar(&opt.CpuFile, "cpufile", "./cpu.dat", "CPU usage data file for gnuplot")
38
+ flag.StringVar(&opt.MemFile, "memfile", "./mem.dat", "Memory usage data file for gnuplot")
36
39
  flag.StringVar(&opt.PerfmongerFile, "perfmonger", "", "Perfmonger log file")
37
40
  flag.StringVar(&opt.disk_only, "disk-only",
38
41
  "", "Select disk devices by regex")
@@ -80,6 +83,13 @@ type DiskDatTmpFile struct {
80
83
  Idx int
81
84
  }
82
85
 
86
+ type MemDatTmpFile struct {
87
+ Name string
88
+ Path string
89
+ File *os.File
90
+ Writer *bufio.Writer
91
+ }
92
+
83
93
  type CpuDatTmpFile struct {
84
94
  CoreId int
85
95
  Path string
@@ -118,6 +128,20 @@ func makeCpuDatTmpFile(coreid int) *CpuDatTmpFile {
118
128
  return ret
119
129
  }
120
130
 
131
+ func makeMemDatTmpFile() *MemDatTmpFile {
132
+ ret := new(MemDatTmpFile)
133
+
134
+ f, err := ioutil.TempFile("", "perfmonger-mem")
135
+ if err != nil {
136
+ panic(err)
137
+ }
138
+ ret.File = f
139
+ ret.Path = f.Name()
140
+ ret.Writer = bufio.NewWriter(f)
141
+
142
+ return ret
143
+ }
144
+
121
145
  func printCoreUsage(writer *bufio.Writer, elapsed_time float64, coreusage *ss.CpuCoreUsage) {
122
146
  writer.WriteString(
123
147
  fmt.Sprintf("%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",
@@ -133,6 +157,80 @@ func printCoreUsage(writer *bufio.Writer, elapsed_time float64, coreusage *ss.Cp
133
157
  coreusage.Idle))
134
158
  }
135
159
 
160
+ func printMemUsage(writer *bufio.Writer, elapsed_time float64, mem *ss.MemStat) {
161
+ if mem == nil {
162
+ writer.WriteString("#")
163
+ writer.WriteString(
164
+ strings.Join([]string{
165
+ "elapsed_time", // 1
166
+ "mem_total", // 2
167
+ "mem_used", // 3
168
+ "mem_free", // 4
169
+ "buffers", // 5
170
+ "cached", // 6
171
+ "swap_cached", // 7
172
+ "active", // 8
173
+ "inactive", // 9
174
+ "swap_total", // 10
175
+ "swap_free", // 11
176
+ "dirty", // 12
177
+ "writeback", // 13
178
+ "anon_pages", // 14
179
+ "mapped", // 15
180
+ "shmem", // 16
181
+ "slab", // 17
182
+ "s_reclaimable", // 18
183
+ "s_unreclaim", // 19
184
+ "kernel_stack", // 20
185
+ "page_tables", // 21
186
+ "nfs_unstable", // 22
187
+ "bounce", // 23
188
+ "commit_limit", // 24
189
+ "committed_as", // 25
190
+ "anon_huge_pages", // 26
191
+ "huge_pages_total", // 27
192
+ "huge_pages_free", // 28
193
+ "huge_pages_rsvd", // 29
194
+ "huge_pages_surp", // 30
195
+ "hugepagesize"}, // 31
196
+ "\t"))
197
+ writer.WriteString("\n")
198
+ } else {
199
+ writer.WriteString(fmt.Sprintf("%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
200
+ elapsed_time,
201
+ mem.MemTotal,
202
+ mem.MemTotal-mem.MemFree-mem.Buffers-mem.Cached-mem.SReclaimable,
203
+ mem.MemFree,
204
+ mem.Buffers,
205
+ mem.Cached,
206
+ mem.SwapCached,
207
+ mem.Active,
208
+ mem.Inactive,
209
+ mem.SwapTotal,
210
+ mem.SwapFree,
211
+ mem.Dirty,
212
+ mem.Writeback,
213
+ mem.AnonPages,
214
+ mem.Mapped,
215
+ mem.Shmem,
216
+ mem.Slab,
217
+ mem.SReclaimable,
218
+ mem.SUnreclaim,
219
+ mem.KernelStack,
220
+ mem.PageTables,
221
+ mem.NFS_Unstable,
222
+ mem.Bounce,
223
+ mem.CommitLimit,
224
+ mem.Committed_AS,
225
+ mem.AnonHugePages,
226
+ mem.HugePages_Total,
227
+ mem.HugePages_Free,
228
+ mem.HugePages_Rsvd,
229
+ mem.HugePages_Surp,
230
+ mem.Hugepagesize))
231
+ }
232
+ }
233
+
136
234
  func main() {
137
235
  opt := parseArgs()
138
236
 
@@ -193,6 +291,13 @@ func main() {
193
291
  cpu_writer.WriteString("# All cpu usage\n")
194
292
  cpu_writer.WriteString("# elapsed_time %usr %nice %sys %iowait %hardirq %softirq %steal %guest %idle\n")
195
293
 
294
+ f, err = os.Create(opt.MemFile)
295
+ if err != nil {
296
+ panic(err)
297
+ }
298
+ defer f.Close()
299
+ mem_writer := bufio.NewWriter(f)
300
+
196
301
  for {
197
302
  prev_rec := &records[curr^1]
198
303
  cur_rec := &records[curr]
@@ -284,6 +389,12 @@ func main() {
284
389
  }
285
390
  printCoreUsage(cpu_writer, prev_rec.Time.Sub(t0).Seconds(), cusage.All)
286
391
 
392
+ if !meta_set {
393
+ // print column labels
394
+ printMemUsage(mem_writer, prev_rec.Time.Sub(t0).Seconds(), nil)
395
+ }
396
+ printMemUsage(mem_writer, prev_rec.Time.Sub(t0).Seconds(), cur_rec.Mem)
397
+
287
398
  curr ^= 1
288
399
  meta_set = true
289
400
  }
@@ -332,6 +443,7 @@ func main() {
332
443
  os.Remove(cpu_dat.Path)
333
444
  }
334
445
  cpu_writer.Flush()
446
+ mem_writer.Flush()
335
447
 
336
448
  json_enc := json.NewEncoder(os.Stdout)
337
449
  json_enc.Encode(meta)
@@ -469,6 +469,8 @@ func ReadMemStat(record *StatRecord) error {
469
469
  mem_stat.HugePages_Free = val
470
470
  case "HugePages_Total:":
471
471
  mem_stat.HugePages_Total = val
472
+ case "Hugepagesize:":
473
+ mem_stat.Hugepagesize = val
472
474
  case "AnonHugePages:":
473
475
  mem_stat.AnonHugePages = val
474
476
  case "Committed_AS:":
@@ -132,6 +132,7 @@ type MemStat struct {
132
132
  HugePages_Free int64
133
133
  HugePages_Rsvd int64
134
134
  HugePages_Surp int64
135
+ Hugepagesize int64
135
136
  }
136
137
 
137
138
  type StatRecord struct {
@@ -286,6 +287,7 @@ func (entry *MemStat) Clear() {
286
287
  entry.HugePages_Free = 0
287
288
  entry.HugePages_Rsvd = 0
288
289
  entry.HugePages_Surp = 0
290
+ entry.Hugepagesize = 0
289
291
  }
290
292
 
291
293
  func NewStatRecord() *StatRecord {
Binary file
@@ -145,10 +145,13 @@ EOS
145
145
 
146
146
  @disk_dat = File.expand_path("disk.dat", @tmpdir)
147
147
  @cpu_dat = File.expand_path("cpu.dat", @tmpdir)
148
+ @mem_dat = File.expand_path("mem.dat", @tmpdir)
148
149
 
149
150
  meta_json = nil
150
- cmd = [formatter_bin, "-perfmonger", @data_file, "-cpufile", @cpu_dat,
151
- "-diskfile", @disk_dat]
151
+ cmd = [formatter_bin, "-perfmonger", @data_file,
152
+ "-cpufile", @cpu_dat,
153
+ "-diskfile", @disk_dat,
154
+ "-memfile", @mem_dat]
152
155
  if @disk_only_regex
153
156
  cmd << "-disk-only"
154
157
  cmd << @disk_only
@@ -164,6 +167,7 @@ EOS
164
167
 
165
168
  plot_disk(meta)
166
169
  plot_cpu(meta)
170
+ plot_mem(meta)
167
171
 
168
172
  FileUtils.rm_rf(@tmpdir)
169
173
 
@@ -508,6 +512,139 @@ EOS
508
512
  end
509
513
  end # def
510
514
 
515
+ def plot_mem(meta)
516
+ pdf_filename = @output_prefix + 'mem.pdf'
517
+ gp_filename = @output_prefix + 'mem.gp'
518
+ dat_filename = @output_prefix + 'mem.dat'
519
+
520
+ if @output_type != 'pdf'
521
+ img_filename = @output_prefix + 'cpu.' + @output_type
522
+ else
523
+ img_filename = nil
524
+ end
525
+
526
+ start_time = meta["start_time"]
527
+ end_time = meta["end_time"]
528
+
529
+ mem_scaling = 2.0**20 # KB -> GB
530
+
531
+ # "elapsed_time", // 1
532
+ # "mem_total", // 2
533
+ # "mem_used", // 3
534
+ # "mem_free", // 4
535
+ # "buffers", // 5
536
+ # "cached", // 6
537
+ # "swap_cached", // 7
538
+ # "active", // 8
539
+ # "inactive", // 9
540
+ # "swap_total", // 10
541
+ # "swap_free", // 11
542
+ # "dirty", // 12
543
+ # "writeback", // 13
544
+ # "anon_pages", // 14
545
+ # "mapped", // 15
546
+ # "shmem", // 16
547
+ # "slab", // 17
548
+ # "s_reclaimable", // 18
549
+ # "s_unreclaim", // 19
550
+ # "kernel_stack", // 20
551
+ # "page_tables", // 21
552
+ # "nfs_unstable", // 22
553
+ # "bounce", // 23
554
+ # "commit_limit", // 24
555
+ # "committed_as", // 25
556
+ # "anon_huge_pages", // 26
557
+ # "huge_pages_total", // 27
558
+ # "huge_pages_free", // 28
559
+ # "huge_pages_rsvd", // 29
560
+ # "huge_pages_surp"}, // 30
561
+ # "hugepagesize"}, // 31
562
+
563
+ Dir.chdir(@tmpdir) do
564
+ total = `tail -n+2 #{dat_filename}|head -n1`.split[1].to_f
565
+ if total == 0.0
566
+ raise RuntimeError.new("Failed to get MemTotal value from mem.dat file: #{dat_filename}")
567
+ end
568
+
569
+ gpfile = File.open(gp_filename, 'w')
570
+
571
+ pdf_file = File.join(@output_dir, "mem.pdf")
572
+ gpfile.puts <<EOS
573
+ set term pdfcairo enhanced color size 6in,2.5in
574
+ set title "Memory usage"
575
+ set output "#{pdf_filename}"
576
+ set key outside center bottom horizontal
577
+ set size 1.0, 1.0
578
+
579
+ set xlabel "elapsed time [sec]"
580
+ set ylabel "memory usage [GB]"
581
+
582
+ # scaling
583
+ s = #{mem_scaling}
584
+
585
+ set grid
586
+ set xrange [#{@offset_time}:#{end_time - start_time}]
587
+ set yrange [0:#{total * 1.2}/s]
588
+
589
+ # line styles
590
+ set style line 1 lt 1 lc rgb '#66C2A5' # teal
591
+ set style line 2 lt 1 lc rgb '#FC8D62' # orange
592
+ set style line 3 lt 1 lc rgb '#8DA0CB' # lilac
593
+ set style line 4 lt 1 lc rgb '#E78AC3' # magentat
594
+ set style line 5 lt 1 lc rgb '#A6D854' # lime green
595
+ set style line 6 lt 1 lc rgb '#FFD92F' # banana
596
+ set style line 7 lt 1 lc rgb '#E5C494' # tan
597
+ set style line 8 lt 1 lc rgb '#B3B3B3' # grey
598
+
599
+ # palette
600
+ set palette maxcolors 8
601
+ set palette defined ( 0 '#66C2A5',\
602
+ 1 '#FC8D62',\
603
+ 2 '#8DA0CB',\
604
+ 3 '#E78AC3',\
605
+ 4 '#A6D854',\
606
+ 5 '#FFD92F',\
607
+ 6 '#E5C494',\
608
+ 7 '#B3B3B3' )
609
+
610
+ used(total, free, cached, buffers, srecl) = \\
611
+ ( (total - free - cache(cached, srecl) - buffers < 0) ? \\
612
+ (total - free) : \\
613
+ (total - free - cache(cached, srecl) - buffers) )
614
+
615
+ cache(cached, srecl) = cached + srecl
616
+
617
+ plot "#{dat_filename}" usi 1:($4/s+$5/s+cache($6, $18)/s+$16/s+used($2, $4, $6, $5, $18)/s) wi filledcurves x1 ls 8 ti "free", \\
618
+ "#{dat_filename}" usi 1:($5/s+cache($6, $18)/s+$16/s+used($2, $4, $6, $5, $18)/s) wi filledcurves x1 ls 3 ti "buffers", \\
619
+ "#{dat_filename}" usi 1:(cache($6, $18)/s+$16/s+used($2, $4, $6, $5, $18)/s) wi filledcurves x1 ls 5 ti "cached", \\
620
+ "#{dat_filename}" usi 1:($16/s+used($2, $4, $6, $5, $18)/s) wi filledcurves x1 ls 6 ti "shared", \\
621
+ "#{dat_filename}" usi 1:(used($2, $4, $6, $5, $18)/s) wi filledcurves x1 ls 2 ti "used", \\
622
+ "#{dat_filename}" usi 1:(($27-$28)*$31/s) wi lines dt (4,4) lc rgb 'black' lw 2.5 ti 'used (hugepage)'
623
+
624
+ EOS
625
+
626
+ gpfile.close
627
+ system("gnuplot #{gpfile.path}")
628
+
629
+ if @output_type != 'pdf'
630
+ system("convert -density 150 -background white #{pdf_filename} #{img_filename}")
631
+ end
632
+ end # chdir
633
+
634
+ copy_targets = []
635
+ copy_targets << pdf_filename
636
+ copy_targets << img_filename if img_filename
637
+
638
+ if @save_gpfiles
639
+ copy_targets << gp_filename
640
+ copy_targets << dat_filename
641
+ end
642
+
643
+ copy_targets.each do |target|
644
+ FileUtils.copy(File.join(@tmpdir, target), @output_dir)
645
+ end
646
+ end # def
647
+
511
648
  private
512
649
  def factors(n)
513
650
  (2..([n, n / 2].max).to_i).select do |x|
@@ -1,3 +1,3 @@
1
1
  module PerfMonger
2
- VERSION = "0.13.1"
2
+ VERSION = "0.14.0"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: perfmonger
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.1
4
+ version: 0.14.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Yuto HAYAMIZU
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2021-07-26 00:00:00.000000000 Z
11
+ date: 2021-07-27 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -102,6 +102,7 @@ files:
102
102
  - core/Makefile
103
103
  - core/build.sh
104
104
  - core/cmd/perfmonger-player/perfmonger-player.go
105
+ - core/cmd/perfmonger-plot-formatter/README.md
105
106
  - core/cmd/perfmonger-plot-formatter/perfmonger-plot-formatter.go
106
107
  - core/cmd/perfmonger-recorder/perfmonger-recorder.go
107
108
  - core/cmd/perfmonger-summarizer/perfmonger-summarizer.go