perfmonger 0.6.1 → 0.7.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (87) hide show
  1. checksums.yaml +5 -13
  2. data/.gitignore +6 -0
  3. data/.tachikoma.yml +1 -0
  4. data/.travis.yml +18 -6
  5. data/Gemfile +1 -3
  6. data/Guardfile +26 -0
  7. data/NEWS +21 -0
  8. data/README.md +8 -9
  9. data/Rakefile +33 -1
  10. data/core/Makefile +23 -0
  11. data/core/build.sh +48 -0
  12. data/core/perfmonger-player.go +165 -0
  13. data/core/perfmonger-recorder.go +296 -0
  14. data/core/perfmonger-summarizer.go +207 -0
  15. data/core/subsystem/Makefile +3 -0
  16. data/core/subsystem/perfmonger.go +60 -0
  17. data/core/subsystem/perfmonger_darwin.go +22 -0
  18. data/core/subsystem/perfmonger_linux.go +292 -0
  19. data/core/subsystem/perfmonger_linux_test.go +73 -0
  20. data/core/subsystem/stat.go +214 -0
  21. data/core/subsystem/stat_test.go +281 -0
  22. data/core/subsystem/usage.go +410 -0
  23. data/core/subsystem/usage_test.go +496 -0
  24. data/lib/exec/operationBinding.rb.svn-base +59 -0
  25. data/lib/exec/perfmonger-player_darwin_amd64 +0 -0
  26. data/lib/exec/perfmonger-player_linux_386 +0 -0
  27. data/lib/exec/perfmonger-player_linux_amd64 +0 -0
  28. data/lib/exec/perfmonger-recorder_darwin_amd64 +0 -0
  29. data/lib/exec/perfmonger-recorder_linux_386 +0 -0
  30. data/lib/exec/perfmonger-recorder_linux_amd64 +0 -0
  31. data/lib/exec/perfmonger-summarizer_darwin_amd64 +0 -0
  32. data/lib/exec/perfmonger-summarizer_linux_386 +0 -0
  33. data/lib/exec/perfmonger-summarizer_linux_amd64 +0 -0
  34. data/lib/exec/perfmonger-summary_linux_386 +0 -0
  35. data/lib/exec/perfmonger-summary_linux_amd64 +0 -0
  36. data/lib/perfmonger/cli.rb +8 -3
  37. data/lib/perfmonger/command/core.rb +62 -0
  38. data/lib/perfmonger/command/live.rb +39 -0
  39. data/lib/perfmonger/command/play.rb +56 -0
  40. data/lib/perfmonger/command/plot.rb +30 -22
  41. data/lib/perfmonger/command/record.rb +3 -2
  42. data/lib/perfmonger/command/record_option.rb +40 -59
  43. data/lib/perfmonger/command/server.rb +7 -2
  44. data/lib/perfmonger/command/stat.rb +2 -2
  45. data/lib/perfmonger/command/stat_option.rb +1 -1
  46. data/lib/perfmonger/command/summary.rb +11 -326
  47. data/lib/perfmonger/version.rb +1 -3
  48. data/lib/perfmonger.rb +3 -0
  49. data/misc/_perfmonger +128 -0
  50. data/misc/perfmonger-completion.bash +49 -0
  51. data/perfmonger.gemspec +6 -5
  52. data/spec/data/busy100.pgr +0 -0
  53. data/spec/fingerprint_spec.rb +35 -0
  54. data/spec/live_spec.rb +25 -0
  55. data/spec/perfmonger_spec.rb +37 -0
  56. data/spec/play_spec.rb +21 -0
  57. data/spec/plot_spec.rb +42 -0
  58. data/spec/record_spec.rb +15 -0
  59. data/spec/spec_helper.rb +33 -0
  60. data/spec/stat_spec.rb +15 -0
  61. data/spec/summary_spec.rb +51 -0
  62. data/spec/support/aruba.rb +11 -0
  63. data/wercker.yml +59 -0
  64. metadata +117 -45
  65. data/ext/perfmonger/extconf.rb +0 -19
  66. data/ext/perfmonger/perfmonger.h +0 -58
  67. data/ext/perfmonger/perfmonger_record.c +0 -754
  68. data/ext/perfmonger/sysstat/common.c +0 -627
  69. data/ext/perfmonger/sysstat/common.h +0 -207
  70. data/ext/perfmonger/sysstat/ioconf.c +0 -515
  71. data/ext/perfmonger/sysstat/ioconf.h +0 -84
  72. data/ext/perfmonger/sysstat/iostat.c +0 -1100
  73. data/ext/perfmonger/sysstat/iostat.h +0 -121
  74. data/ext/perfmonger/sysstat/libsysstat.h +0 -19
  75. data/ext/perfmonger/sysstat/mpstat.c +0 -953
  76. data/ext/perfmonger/sysstat/mpstat.h +0 -79
  77. data/ext/perfmonger/sysstat/rd_stats.c +0 -2388
  78. data/ext/perfmonger/sysstat/rd_stats.h +0 -651
  79. data/ext/perfmonger/sysstat/sysconfig.h +0 -13
  80. data/test/run-test.sh +0 -39
  81. data/test/spec/bin_spec.rb +0 -37
  82. data/test/spec/data/2devices.expected +0 -42
  83. data/test/spec/data/2devices.output +0 -42
  84. data/test/spec/spec_helper.rb +0 -20
  85. data/test/spec/summary_spec.rb +0 -193
  86. data/test/test-perfmonger.c +0 -145
  87. data/test/test.h +0 -9
@@ -0,0 +1,281 @@
1
+ package subsystem
2
+
3
+ import (
4
+ "reflect"
5
+ "testing"
6
+ )
7
+
8
+ func getField(val interface{}, field string) reflect.Value {
9
+ return reflect.ValueOf(val).FieldByName(field)
10
+ }
11
+
12
+ func TestCpuCoreStatUptime(t *testing.T) {
13
+ var corestat *CpuCoreStat
14
+
15
+ corestat = new(CpuCoreStat)
16
+
17
+ if corestat.Uptime() != 0 {
18
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 0)
19
+ }
20
+
21
+ corestat.User += 3
22
+ if corestat.Uptime() != 3 {
23
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 10)
24
+ }
25
+
26
+ corestat.Sys += 5
27
+ if corestat.Uptime() != 8 {
28
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 8)
29
+ }
30
+
31
+ corestat.Iowait += 7
32
+ if corestat.Uptime() != 15 {
33
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 15)
34
+ }
35
+
36
+ corestat.Iowait += 11
37
+ if corestat.Uptime() != 26 {
38
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 26)
39
+ }
40
+
41
+ corestat.Steal += 13
42
+ if corestat.Uptime() != 39 {
43
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 39)
44
+ }
45
+
46
+ corestat.Hardirq += 17
47
+ if corestat.Uptime() != 56 {
48
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 56)
49
+ }
50
+
51
+ corestat.Softirq += 19
52
+ if corestat.Uptime() != 75 {
53
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 75)
54
+ }
55
+
56
+ corestat.Idle += 23
57
+ if corestat.Uptime() != 98 {
58
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 98)
59
+ }
60
+
61
+ corestat.Nice += 29
62
+ if corestat.Uptime() != 127 {
63
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 127)
64
+ }
65
+
66
+ corestat.Guest += 31
67
+ if corestat.Uptime() != 127 {
68
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 127)
69
+ }
70
+
71
+ corestat.GuestNice += 37
72
+ if corestat.Uptime() != 127 {
73
+ t.Errorf("corestat.Uptime() = %v, want %v", corestat.Uptime(), 127)
74
+ }
75
+ }
76
+
77
+ func TestCpuCoreStatClear(t *testing.T) {
78
+ var corestat *CpuCoreStat
79
+
80
+ corestat = new(CpuCoreStat)
81
+
82
+ corestat.User = 3
83
+ corestat.Sys = 5
84
+ corestat.Iowait = 7
85
+ corestat.Idle = 11
86
+ corestat.Steal = 13
87
+ corestat.Hardirq = 17
88
+ corestat.Softirq = 19
89
+ corestat.Nice = 23
90
+ corestat.Guest = 29
91
+ corestat.GuestNice = 31
92
+
93
+ if corestat.User == 0 ||
94
+ corestat.Sys == 0 ||
95
+ corestat.Iowait == 0 ||
96
+ corestat.Idle == 0 ||
97
+ corestat.Steal == 0 ||
98
+ corestat.Hardirq == 0 ||
99
+ corestat.Softirq == 0 ||
100
+ corestat.Nice == 0 ||
101
+ corestat.Guest == 0 ||
102
+ corestat.GuestNice == 0 {
103
+ t.Errorf("corestat = %v, want all fields not to be 0.",
104
+ corestat)
105
+ }
106
+
107
+ corestat.Clear()
108
+
109
+ if corestat.User != 0 ||
110
+ corestat.Sys != 0 ||
111
+ corestat.Iowait != 0 ||
112
+ corestat.Idle != 0 ||
113
+ corestat.Steal != 0 ||
114
+ corestat.Hardirq != 0 ||
115
+ corestat.Softirq != 0 ||
116
+ corestat.Nice != 0 ||
117
+ corestat.Guest != 0 ||
118
+ corestat.GuestNice != 0 {
119
+ t.Errorf("corestat = %v, want all fields to be 0.",
120
+ corestat)
121
+ }
122
+ }
123
+
124
+ func TestNewCpuStat(t *testing.T) {
125
+ var cpustat *CpuStat
126
+ var num_core int
127
+
128
+ num_core = 0
129
+ cpustat = NewCpuStat(num_core)
130
+ if cpustat != nil {
131
+ t.Errorf("NewCore(%d) = %v, want %v",
132
+ num_core, cpustat, nil)
133
+ }
134
+
135
+ num_core = 1
136
+ cpustat = NewCpuStat(num_core)
137
+ if cpustat == nil {
138
+ t.Errorf("NewCore(%d) = %v, expect != %v",
139
+ num_core, cpustat, nil)
140
+ }
141
+
142
+ for num_core = 1; num_core < 1024; num_core++ {
143
+ cpustat = NewCpuStat(num_core)
144
+ if len(cpustat.CoreStats) != num_core {
145
+ t.Errorf("len(cpustat.CoreStats) = %v, want %v",
146
+ len(cpustat.CoreStats), num_core)
147
+ }
148
+ }
149
+ }
150
+
151
+ func TestCpuStatClear(t *testing.T) {
152
+ num_core := 4
153
+ cpustat := NewCpuStat(num_core)
154
+
155
+ cores := []*CpuCoreStat{&cpustat.All}
156
+ for i := 0; i < num_core; i++ {
157
+ cores = append(cores, &cpustat.CoreStats[i])
158
+ }
159
+
160
+ for _, corestat := range cores {
161
+ corestat.User = 3
162
+ corestat.Sys = 5
163
+ corestat.Iowait = 7
164
+ corestat.Idle = 11
165
+ corestat.Steal = 13
166
+ corestat.Hardirq = 17
167
+ corestat.Softirq = 19
168
+ corestat.Nice = 23
169
+ corestat.Guest = 29
170
+ corestat.GuestNice = 31
171
+ }
172
+
173
+ for _, corestat := range cores {
174
+ if corestat.User == 0 ||
175
+ corestat.Sys == 0 ||
176
+ corestat.Iowait == 0 ||
177
+ corestat.Idle == 0 ||
178
+ corestat.Steal == 0 ||
179
+ corestat.Hardirq == 0 ||
180
+ corestat.Softirq == 0 ||
181
+ corestat.Nice == 0 ||
182
+ corestat.Guest == 0 ||
183
+ corestat.GuestNice == 0 {
184
+ t.Errorf("corestat = %v, want all fields not to be 0.",
185
+ corestat)
186
+ }
187
+ }
188
+
189
+ cpustat.Clear()
190
+
191
+ for _, corestat := range cores {
192
+ if corestat.User != 0 ||
193
+ corestat.Sys != 0 ||
194
+ corestat.Iowait != 0 ||
195
+ corestat.Idle != 0 ||
196
+ corestat.Steal != 0 ||
197
+ corestat.Hardirq != 0 ||
198
+ corestat.Softirq != 0 ||
199
+ corestat.Nice != 0 ||
200
+ corestat.Guest != 0 ||
201
+ corestat.GuestNice != 0 {
202
+ t.Errorf("corestat = %v, want all fields to be 0.",
203
+ corestat)
204
+ }
205
+ }
206
+ }
207
+
208
+ func TestNewNetStatEntry(t *testing.T) {
209
+ entry := NewNetStatEntry()
210
+
211
+ checkField := func(e *NetStatEntry, fieldName string, wanted int64) {
212
+ val := reflect.ValueOf(*e).FieldByName(fieldName).Int()
213
+ if val != wanted {
214
+ t.Errorf(".%s = %v, want %v",
215
+ fieldName, val, wanted)
216
+ }
217
+ }
218
+
219
+ checkStrField := func(e *NetStatEntry, fieldName string, wanted string) {
220
+ val := reflect.ValueOf(*e).FieldByName(fieldName).String()
221
+ if val != wanted {
222
+ t.Errorf(".%s = %v, want %v",
223
+ fieldName, val, wanted)
224
+ }
225
+ }
226
+
227
+ checkStrField(entry, "Name", "")
228
+ checkField(entry, "RxBytes", 0)
229
+ checkField(entry, "RxPackets", 0)
230
+ checkField(entry, "RxErrors", 0)
231
+ checkField(entry, "RxDrops", 0)
232
+ checkField(entry, "RxFifo", 0)
233
+ checkField(entry, "RxFrame", 0)
234
+ checkField(entry, "RxCompressed", 0)
235
+ checkField(entry, "RxMulticast", 0)
236
+ checkField(entry, "TxBytes", 0)
237
+ checkField(entry, "TxPackets", 0)
238
+ checkField(entry, "TxErrors", 0)
239
+ checkField(entry, "TxDrops", 0)
240
+ checkField(entry, "TxFifo", 0)
241
+ checkField(entry, "TxFrame", 0)
242
+ checkField(entry, "TxCompressed", 0)
243
+ checkField(entry, "TxMulticast", 0)
244
+
245
+ entry.Name = "lo"
246
+ entry.RxBytes = 10
247
+ entry.RxPackets = 20
248
+
249
+ entry.Clear()
250
+
251
+ checkStrField(entry, "Name", "")
252
+ checkField(entry, "RxBytes", 0)
253
+ checkField(entry, "RxPackets", 0)
254
+ }
255
+
256
+ func TestNewNetStat(t *testing.T) {
257
+ netstat := NewNetStat()
258
+
259
+ if len(netstat.Entries) != 0 {
260
+ t.Errorf("len(netstat.Entries) = %v, want %v",
261
+ len(netstat.Entries), 0)
262
+ }
263
+ }
264
+
265
+ func TestNewStatRecord(t *testing.T) {
266
+ stat_record := NewStatRecord()
267
+
268
+ checkFieldIsNil := func(field string) {
269
+ val := getField(*stat_record, field)
270
+ if !val.IsNil() {
271
+ t.Errorf("stat_record.%s = %v, want %v",
272
+ field, val, nil)
273
+ }
274
+ }
275
+
276
+ checkFieldIsNil("Cpu")
277
+ checkFieldIsNil("Proc")
278
+ checkFieldIsNil("Disk")
279
+ checkFieldIsNil("Softirq")
280
+ checkFieldIsNil("Net")
281
+ }
@@ -0,0 +1,410 @@
1
+ package subsystem
2
+
3
+ import (
4
+ "bytes"
5
+ "encoding/json"
6
+ "errors"
7
+ "fmt"
8
+ "sort"
9
+ "time"
10
+ )
11
+
12
+ type CpuCoreUsage struct {
13
+ User float64
14
+ Nice float64
15
+ Sys float64
16
+ Idle float64
17
+ Iowait float64
18
+ Hardirq float64
19
+ Softirq float64
20
+ Steal float64
21
+ Guest float64
22
+ GuestNice float64
23
+ }
24
+
25
+ type CpuUsage struct {
26
+ All *CpuCoreUsage
27
+ NumCore int
28
+ CoreUsages []*CpuCoreUsage
29
+ }
30
+
31
+ type DiskUsageEntry struct {
32
+ Interval time.Duration
33
+
34
+ RdIops float64
35
+ WrIops float64
36
+ RdSectors int64
37
+ WrSectors int64
38
+ RdSecps float64 // sectors per second
39
+ WrSecps float64 // sectors per second
40
+ RdLatency float64 // msec
41
+ WrLatency float64 // msec
42
+ AvgRdSize float64 // sectors
43
+ AvgWrSize float64 // sectors
44
+ ReqQlen float64
45
+ }
46
+
47
+ type DiskUsage map[string]*DiskUsageEntry
48
+
49
+ type NetUsageEntry struct {
50
+ Interval time.Duration
51
+
52
+ RxBytesPerSec float64
53
+ RxPacketsPerSec float64
54
+ RxErrorsPerSec float64
55
+ RxDropsPerSec float64
56
+ RxFifoPerSec float64
57
+ RxFramePerSec float64
58
+ RxCompressedPerSec float64
59
+ RxMulticastPerSec float64
60
+ TxBytesPerSec float64
61
+ TxPacketsPerSec float64
62
+ TxErrorsPerSec float64
63
+ TxDropsPerSec float64
64
+ TxFifoPerSec float64
65
+ TxFramePerSec float64
66
+ TxCompressedPerSec float64
67
+ TxMulticastPerSec float64
68
+ }
69
+
70
+ type NetUsage map[string]*NetUsageEntry
71
+
72
+ func (ccusage *CpuCoreUsage) WriteJsonTo(buf *bytes.Buffer) {
73
+ buf.WriteString(
74
+ fmt.Sprintf(`{"usr":%.2f,"nice":%.2f,"sys":%.2f,"idle":%.2f,"iowait":%.2f,"hardirq":%.2f,"softirq":%.2f,"steal":%.2f,"guest":%.2f,"guestnice":%.2f}`,
75
+ ccusage.User, ccusage.Nice, ccusage.Sys, ccusage.Idle, ccusage.Iowait,
76
+ ccusage.Hardirq, ccusage.Softirq, ccusage.Steal, ccusage.Guest, ccusage.GuestNice))
77
+ }
78
+
79
+ func (cusage *CpuUsage) WriteJsonTo(buf *bytes.Buffer) {
80
+ buf.WriteString(
81
+ fmt.Sprintf(`{"num_core":%d,"all":`, cusage.NumCore))
82
+ cusage.All.WriteJsonTo(buf)
83
+ buf.WriteString(`,"cores":[`)
84
+ for idx, ccusage := range cusage.CoreUsages {
85
+ if idx > 0 {
86
+ buf.WriteString(",")
87
+ }
88
+ ccusage.WriteJsonTo(buf)
89
+ }
90
+ buf.WriteString(`]}`)
91
+ }
92
+
93
+ func GetCpuCoreUsage(c1 *CpuCoreStat, c2 *CpuCoreStat) (*CpuCoreUsage, error) {
94
+ usage := new(CpuCoreUsage)
95
+ itv := c2.Uptime() - c1.Uptime()
96
+
97
+ if itv == 0 {
98
+ return nil, errors.New("uptime difference is zero")
99
+ } else if itv < 0 {
100
+ return nil, errors.New("uptime difference is negative")
101
+ }
102
+
103
+ user := usageItem(c1.User-c1.Guest, c2.User-c2.Guest, itv)
104
+ if user < 0.0 {
105
+ user = 0.0
106
+ }
107
+ nice := usageItem(c1.Nice-c1.GuestNice, c2.Nice-c2.GuestNice, itv)
108
+ if nice < 0.0 {
109
+ nice = 0.0
110
+ }
111
+
112
+ usage.User = user
113
+ usage.Nice = nice
114
+ usage.Sys = usageItem(c1.Sys, c2.Sys, itv)
115
+ usage.Idle = usageItem(c1.Idle, c2.Idle, itv)
116
+ usage.Iowait = usageItem(c1.Iowait, c2.Iowait, itv)
117
+ usage.Hardirq = usageItem(c1.Hardirq, c2.Hardirq, itv)
118
+ usage.Softirq = usageItem(c1.Softirq, c2.Softirq, itv)
119
+ usage.Steal = usageItem(c1.Steal, c2.Steal, itv)
120
+ usage.Guest = usageItem(c1.Guest, c2.Guest, itv)
121
+ usage.GuestNice = usageItem(c1.GuestNice, c2.GuestNice, itv)
122
+
123
+ return usage, nil
124
+ }
125
+
126
+ func GetCpuUsage(c1 *CpuStat, c2 *CpuStat) (*CpuUsage, error) {
127
+ var err error
128
+
129
+ usage := new(CpuUsage)
130
+ usage.NumCore = c1.NumCore
131
+
132
+ usage.CoreUsages = make([]*CpuCoreUsage, usage.NumCore)
133
+ for idx, _ := range usage.CoreUsages {
134
+ usage.CoreUsages[idx], err = GetCpuCoreUsage(&c1.CoreStats[idx], &c2.CoreStats[idx])
135
+ if err != nil {
136
+ return nil, err
137
+ }
138
+ }
139
+ usage.All, err = GetCpuCoreUsage(&c1.All, &c2.All)
140
+ if err != nil {
141
+ return nil, err
142
+ }
143
+
144
+ // scale: NumCore * 100% as maximum
145
+ usage.All.User *= float64(usage.NumCore)
146
+ usage.All.Nice *= float64(usage.NumCore)
147
+ usage.All.Sys *= float64(usage.NumCore)
148
+ usage.All.Idle *= float64(usage.NumCore)
149
+ usage.All.Iowait *= float64(usage.NumCore)
150
+ usage.All.Hardirq *= float64(usage.NumCore)
151
+ usage.All.Softirq *= float64(usage.NumCore)
152
+ usage.All.Steal *= float64(usage.NumCore)
153
+ usage.All.Guest *= float64(usage.NumCore)
154
+ usage.All.GuestNice *= float64(usage.NumCore)
155
+
156
+ return usage, nil
157
+ }
158
+
159
+ func (duentry *DiskUsageEntry) WriteJsonTo(buf *bytes.Buffer) {
160
+ fmt.Fprintf(buf,
161
+ `{"riops":%.2f,"wiops":%.2f,"rkbyteps":%.2f,"wkbyteps":%.2f,"rlatency":%.3f,"wlatency":%.3f,"rsize":%.2f,"wsize":%.2f,"qlen":%.2f}`,
162
+ duentry.RdIops, duentry.WrIops, duentry.RdSecps/2.0, duentry.WrSecps/2.0,
163
+ duentry.RdLatency, duentry.WrLatency,
164
+ duentry.AvgRdSize, duentry.AvgWrSize, duentry.ReqQlen)
165
+ }
166
+
167
+ func (dusage *DiskUsage) WriteJsonTo(buf *bytes.Buffer) {
168
+ var devices []string
169
+
170
+ for device, _ := range *dusage {
171
+ if device != "total" {
172
+ devices = append(devices, device)
173
+ }
174
+ }
175
+ sort.Strings(devices)
176
+
177
+ bytes, err := json.Marshal(devices)
178
+ if err != nil {
179
+ panic(err)
180
+ }
181
+ fmt.Fprintf(buf, `{"devices":%s`, string(bytes))
182
+
183
+ devices = append(devices, "total")
184
+
185
+ for _, device := range devices {
186
+ usage := (*dusage)[device]
187
+ buf.WriteString(`,"`)
188
+ buf.WriteString(device)
189
+ buf.WriteString(`":`)
190
+ usage.WriteJsonTo(buf)
191
+ }
192
+
193
+ buf.WriteByte('}')
194
+ }
195
+
196
+ func avgDelta(v int64, w int64, interval float64) float64 {
197
+ ret := float64(w-v) / interval
198
+ return ret
199
+ }
200
+
201
+ func GetDiskUsage(t1 time.Time, d1 *DiskStat, t2 time.Time, d2 *DiskStat) (*DiskUsage, error) {
202
+ interval := t2.Sub(t1)
203
+ itv := interval.Seconds()
204
+
205
+ if itv <= 0.0 {
206
+ return nil, errors.New("negative interval")
207
+ }
208
+
209
+ if len(d1.Entries) == 0 || len(d2.Entries) == 0 {
210
+ return nil, errors.New("no DiskEntry")
211
+ }
212
+
213
+ usage := new(DiskUsage)
214
+ (*usage) = make(DiskUsage)
215
+ total := new(DiskUsageEntry)
216
+
217
+ var total_rd_ios int64 = 0
218
+ var total_wr_ios int64 = 0
219
+
220
+ for _, entry1 := range d1.Entries {
221
+ name := entry1.Name
222
+ var entry2 *DiskStatEntry = nil
223
+ for _, e := range d2.Entries {
224
+ if e.Name == entry1.Name {
225
+ entry2 = e
226
+ break
227
+ }
228
+ }
229
+ if entry2 == nil {
230
+ continue
231
+ }
232
+
233
+ rd_latency := 0.0
234
+ wr_latency := 0.0
235
+ avg_rd_sz := 0.0
236
+ avg_wr_sz := 0.0
237
+ if entry2.RdIos != entry1.RdIos {
238
+ rd_latency = float64(entry2.RdTicks-entry1.RdTicks) / float64(entry2.RdIos-entry1.RdIos)
239
+ avg_rd_sz = float64(entry2.RdSectors-entry1.RdSectors) / float64(entry2.RdIos-entry1.RdIos)
240
+ }
241
+ if entry2.WrIos != entry1.WrIos {
242
+ wr_latency = float64(entry2.WrTicks-entry1.WrTicks) / float64(entry2.WrIos-entry1.WrIos)
243
+ avg_wr_sz = float64(entry2.WrSectors-entry1.WrSectors) / float64(entry2.WrIos-entry1.WrIos)
244
+ }
245
+
246
+ entry := &DiskUsageEntry{
247
+ interval,
248
+ avgDelta(entry1.RdIos, entry2.RdIos, itv),
249
+ avgDelta(entry1.WrIos, entry2.WrIos, itv),
250
+ entry2.RdSectors - entry1.RdSectors,
251
+ entry2.WrSectors - entry1.WrSectors,
252
+ avgDelta(entry1.RdSectors, entry2.RdSectors, itv),
253
+ avgDelta(entry1.WrSectors, entry2.WrSectors, itv),
254
+ rd_latency,
255
+ wr_latency,
256
+ avg_rd_sz,
257
+ avg_wr_sz,
258
+ float64(entry2.ReqTicks-entry1.ReqTicks) / itv / 1.0e3,
259
+ }
260
+
261
+ (*usage)[name] = entry
262
+
263
+ total.RdIops += entry.RdIops
264
+ total.WrIops += entry.WrIops
265
+ total.RdSectors += entry.RdSectors
266
+ total.WrSectors += entry.WrSectors
267
+ total.RdSecps += entry.RdSecps
268
+ total.WrSecps += entry.WrSecps
269
+ total.RdLatency += entry.RdLatency * float64(entry2.RdIos-entry1.RdIos)
270
+ total.WrLatency += entry.WrLatency * float64(entry2.WrIos-entry1.WrIos)
271
+ total.AvgRdSize += entry.AvgRdSize * float64(entry2.RdIos-entry1.RdIos)
272
+ total.AvgWrSize += entry.AvgWrSize * float64(entry2.WrIos-entry1.WrIos)
273
+ total.ReqQlen += entry.ReqQlen
274
+
275
+ total_rd_ios += entry2.RdIos - entry1.RdIos
276
+ total_wr_ios += entry2.WrIos - entry1.WrIos
277
+ }
278
+
279
+ if total_rd_ios > 0 {
280
+ total.RdLatency /= float64(total_rd_ios)
281
+ total.AvgRdSize /= float64(total_rd_ios)
282
+ }
283
+ if total_wr_ios > 0 {
284
+ total.WrLatency /= float64(total_wr_ios)
285
+ total.AvgWrSize /= float64(total_wr_ios)
286
+ }
287
+
288
+ (*usage)["total"] = total
289
+
290
+ return usage, nil
291
+ }
292
+
293
+ func usageItem(v1 int64, v2 int64, itv int64) float64 {
294
+ return float64(v2-v1) / float64(itv) * 100.0
295
+ }
296
+
297
+ func GetNetUsage(t1 time.Time, d1 *NetStat, t2 time.Time, d2 *NetStat) (*NetUsage, error) {
298
+ if len(d1.Entries) == 0 && len(d2.Entries) == 0 {
299
+ return nil, errors.New("no entries")
300
+ }
301
+
302
+ interval := t2.Sub(t1)
303
+ itv := interval.Seconds()
304
+
305
+ if itv <= 0 {
306
+ return nil, errors.New("Non-positive interval")
307
+ }
308
+
309
+ net_usage := new(NetUsage)
310
+ (*net_usage) = make(NetUsage)
311
+ total := new(NetUsageEntry)
312
+
313
+ for _, d1_entry := range d1.Entries {
314
+ devname := d1_entry.Name
315
+
316
+ // find devname in d2
317
+ var d2_entry *NetStatEntry = nil
318
+ for _, e := range d2.Entries {
319
+ if e.Name == devname {
320
+ d2_entry = e
321
+ break
322
+ }
323
+ }
324
+
325
+ if d2_entry == nil {
326
+ continue
327
+ }
328
+
329
+ ue := new(NetUsageEntry)
330
+
331
+ ue.Interval = interval
332
+ ue.RxBytesPerSec = avgDelta(d1_entry.RxBytes, d2_entry.RxBytes, itv)
333
+ ue.RxPacketsPerSec = avgDelta(d1_entry.RxPackets, d2_entry.RxPackets, itv)
334
+ ue.RxErrorsPerSec = avgDelta(d1_entry.RxErrors, d2_entry.RxErrors, itv)
335
+ ue.RxDropsPerSec = avgDelta(d1_entry.RxDrops, d2_entry.RxDrops, itv)
336
+ ue.RxFifoPerSec = avgDelta(d1_entry.RxFifo, d2_entry.RxFifo, itv)
337
+ ue.RxFramePerSec = avgDelta(d1_entry.RxFrame, d2_entry.RxFrame, itv)
338
+ ue.RxCompressedPerSec = avgDelta(d1_entry.RxCompressed, d2_entry.RxCompressed, itv)
339
+ ue.RxMulticastPerSec = avgDelta(d1_entry.RxMulticast, d2_entry.RxMulticast, itv)
340
+ ue.TxBytesPerSec = avgDelta(d1_entry.TxBytes, d2_entry.TxBytes, itv)
341
+ ue.TxPacketsPerSec = avgDelta(d1_entry.TxPackets, d2_entry.TxPackets, itv)
342
+ ue.TxErrorsPerSec = avgDelta(d1_entry.TxErrors, d2_entry.TxErrors, itv)
343
+ ue.TxDropsPerSec = avgDelta(d1_entry.TxDrops, d2_entry.TxDrops, itv)
344
+ ue.TxFifoPerSec = avgDelta(d1_entry.TxFifo, d2_entry.TxFifo, itv)
345
+ ue.TxFramePerSec = avgDelta(d1_entry.TxFrame, d2_entry.TxFrame, itv)
346
+ ue.TxCompressedPerSec = avgDelta(d1_entry.TxCompressed, d2_entry.TxCompressed, itv)
347
+ ue.TxMulticastPerSec = avgDelta(d1_entry.TxMulticast, d2_entry.TxMulticast, itv)
348
+
349
+ (*net_usage)[devname] = ue
350
+
351
+ total.RxBytesPerSec += ue.RxBytesPerSec
352
+ total.RxPacketsPerSec += ue.RxPacketsPerSec
353
+ total.RxErrorsPerSec += ue.RxErrorsPerSec
354
+ total.RxDropsPerSec += ue.RxDropsPerSec
355
+ total.RxFifoPerSec += ue.RxFifoPerSec
356
+ total.RxFramePerSec += ue.RxFramePerSec
357
+ total.RxCompressedPerSec += ue.RxCompressedPerSec
358
+ total.RxMulticastPerSec += ue.RxMulticastPerSec
359
+ total.TxBytesPerSec += ue.TxBytesPerSec
360
+ total.TxPacketsPerSec += ue.TxPacketsPerSec
361
+ total.TxErrorsPerSec += ue.TxErrorsPerSec
362
+ total.TxDropsPerSec += ue.TxDropsPerSec
363
+ total.TxFifoPerSec += ue.TxFifoPerSec
364
+ total.TxFramePerSec += ue.TxFramePerSec
365
+ total.TxCompressedPerSec += ue.TxCompressedPerSec
366
+ total.TxMulticastPerSec += ue.TxMulticastPerSec
367
+ }
368
+
369
+ (*net_usage)["total"] = total
370
+
371
+ return net_usage, nil
372
+ }
373
+
374
+ func (nusage *NetUsage) WriteJsonTo(buf *bytes.Buffer) {
375
+ var devices []string
376
+
377
+ for device, _ := range *nusage {
378
+ if device != "total" {
379
+ devices = append(devices, device)
380
+ }
381
+ }
382
+ sort.Strings(devices)
383
+
384
+ bytes, err := json.Marshal(devices)
385
+ if err != nil {
386
+ panic(err)
387
+ }
388
+ fmt.Fprintf(buf, `{"devices":%s`, string(bytes))
389
+
390
+ devices = append(devices, "total")
391
+
392
+ for _, device := range devices {
393
+ usage := (*nusage)[device]
394
+ buf.WriteString(`,"`)
395
+ buf.WriteString(device)
396
+ buf.WriteString(`":`)
397
+ usage.WriteJsonTo(buf)
398
+ }
399
+
400
+ buf.WriteByte('}')
401
+ }
402
+
403
+ func (entry *NetUsageEntry) WriteJsonTo(buf *bytes.Buffer) {
404
+ buf.WriteString(
405
+ fmt.Sprintf(`{"rxkbyteps":%.2f,"rxpktps":%.2f,"rxerrps":%.2f,"rxdropps":%.2f,"txkbyteps":%.2f,"txpktps":%.2f,"txerrps":%.2f,"txdropps":%.2f}`,
406
+ entry.RxBytesPerSec/1024.0, entry.RxPacketsPerSec,
407
+ entry.RxErrorsPerSec, entry.RxDropsPerSec,
408
+ entry.TxBytesPerSec/1024.0, entry.TxPacketsPerSec,
409
+ entry.TxErrorsPerSec, entry.TxDropsPerSec))
410
+ }