riemann-tools 1.0.0 → 1.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/dependabot.yml +11 -0
- data/.github/workflows/ci.yml +13 -0
- data/.github/workflows/codeql-analysis.yml +72 -0
- data/.rubocop.yml +32 -0
- data/CHANGELOG.md +31 -2
- data/README.markdown +8 -24
- data/Rakefile +4 -2
- data/SECURITY.md +42 -0
- data/bin/riemann-apache-status +92 -78
- data/bin/riemann-bench +54 -49
- data/bin/riemann-cloudant +44 -40
- data/bin/riemann-consul +82 -76
- data/bin/riemann-dir-files-count +53 -47
- data/bin/riemann-dir-space +53 -47
- data/bin/riemann-diskstats +78 -75
- data/bin/riemann-fd +68 -48
- data/bin/riemann-freeswitch +108 -103
- data/bin/riemann-haproxy +46 -40
- data/bin/riemann-health +4 -343
- data/bin/riemann-kvminstance +18 -13
- data/bin/riemann-memcached +35 -29
- data/bin/riemann-net +4 -104
- data/bin/riemann-nginx-status +74 -67
- data/bin/riemann-ntp +4 -33
- data/bin/riemann-portcheck +40 -31
- data/bin/riemann-proc +96 -90
- data/bin/riemann-varnish +51 -45
- data/bin/riemann-zookeeper +38 -34
- data/lib/riemann/tools/health.rb +347 -0
- data/lib/riemann/tools/net.rb +104 -0
- data/lib/riemann/tools/ntp.rb +41 -0
- data/lib/riemann/tools/version.rb +1 -1
- data/lib/riemann/tools.rb +37 -40
- data/riemann-tools.gemspec +4 -1
- data/tools/riemann-aws/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-aws/bin/riemann-aws-billing +72 -66
- data/tools/riemann-aws/bin/riemann-aws-rds-status +55 -41
- data/tools/riemann-aws/bin/riemann-aws-sqs-status +37 -31
- data/tools/riemann-aws/bin/riemann-aws-status +63 -51
- data/tools/riemann-aws/bin/riemann-elb-metrics +149 -148
- data/tools/riemann-aws/bin/riemann-s3-list +70 -65
- data/tools/riemann-aws/bin/riemann-s3-status +85 -82
- data/tools/riemann-chronos/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-chronos/bin/riemann-chronos +136 -119
- data/tools/riemann-docker/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-docker/bin/riemann-docker +163 -174
- data/tools/riemann-elasticsearch/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-elasticsearch/bin/riemann-elasticsearch +155 -147
- data/tools/riemann-marathon/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-marathon/bin/riemann-marathon +138 -122
- data/tools/riemann-mesos/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-mesos/bin/riemann-mesos +125 -110
- data/tools/riemann-munin/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-munin/bin/riemann-munin +28 -22
- data/tools/riemann-rabbitmq/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-rabbitmq/bin/riemann-rabbitmq +226 -222
- data/tools/riemann-riak/{Rakefile.rb → Rakefile} +2 -0
- data/tools/riemann-riak/bin/riemann-riak +281 -289
- data/tools/riemann-riak/riak_status/riak_status.rb +39 -39
- metadata +65 -16
@@ -1,166 +1,167 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
|
-
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
Process.setproctitle($PROGRAM_NAME)
|
3
5
|
|
4
6
|
require 'riemann/tools'
|
5
7
|
|
6
8
|
$0 = __FILE__
|
7
9
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
end
|
69
|
-
|
70
|
-
def base_metrics
|
71
|
-
# get last 60 seconds
|
72
|
-
start_time = (Time.now.utc - 60).iso8601
|
73
|
-
end_time = Time.now.utc.iso8601
|
74
|
-
|
75
|
-
# The base query that all metrics would get
|
76
|
-
metric_base = {
|
77
|
-
"Namespace" => "AWS/ELB",
|
78
|
-
"StartTime" => start_time,
|
79
|
-
"EndTime" => end_time,
|
80
|
-
"Period" => 60,
|
81
|
-
}
|
82
|
-
|
83
|
-
metric_base
|
84
|
-
end
|
85
|
-
|
86
|
-
|
87
|
-
def tick
|
88
|
-
if options[:fog_credentials_file]
|
89
|
-
Fog.credentials_path = options[:fog_credentials_file]
|
90
|
-
Fog.credential = options[:fog_credential].to_sym
|
91
|
-
connection = Fog::AWS::CloudWatch.new
|
92
|
-
else
|
93
|
-
if options[:aws_access] && options[:aws_secret]
|
94
|
-
connection = Fog::AWS::CloudWatch.new({
|
95
|
-
:aws_access_key_id => options[:aws_access],
|
96
|
-
:aws_secret_access_key => options[:aws_secret],
|
97
|
-
:region => options[:aws_region]
|
98
|
-
})
|
99
|
-
else
|
100
|
-
connection = Fog::AWS::CloudWatch.new({
|
101
|
-
:use_iam_profile => true,
|
102
|
-
:region => options[:aws_region]
|
103
|
-
})
|
10
|
+
module Riemann
|
11
|
+
module Tools
|
12
|
+
class ELBMetrics
|
13
|
+
include Riemann::Tools
|
14
|
+
require 'fog'
|
15
|
+
require 'time'
|
16
|
+
|
17
|
+
opt :fog_credentials_file, 'Fog credentials file', type: String
|
18
|
+
opt :fog_credential, 'Fog credentials to use', type: String
|
19
|
+
opt :aws_access, 'AWS Access Key', type: String
|
20
|
+
opt :aws_secret, 'AWS Secret Key', type: String
|
21
|
+
opt :aws_region, 'AWS Region', type: String, default: 'eu-west-1'
|
22
|
+
opt :aws_azs, 'List of AZs to aggregate against', type: :strings, default: ['all_az']
|
23
|
+
opt :elbs, 'List of ELBs to pull metrics from', type: :strings, required: true
|
24
|
+
|
25
|
+
def standard_metrics
|
26
|
+
# ELB metric types, from:
|
27
|
+
# http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html#elb-metricscollected
|
28
|
+
{
|
29
|
+
'Latency' => {
|
30
|
+
'Unit' => 'Seconds',
|
31
|
+
'Statistics' => %w[Maximum Minimum Average],
|
32
|
+
},
|
33
|
+
'RequestCount' => {
|
34
|
+
'Unit' => 'Count',
|
35
|
+
'Statistics' => ['Sum'],
|
36
|
+
},
|
37
|
+
'HealthyHostCount' => {
|
38
|
+
'Units' => 'Count',
|
39
|
+
'Statistics' => %w[Minimum Maximum Average],
|
40
|
+
},
|
41
|
+
'UnHealthyHostCount' => {
|
42
|
+
'Units' => 'Count',
|
43
|
+
'Statistics' => %w[Minimum Maximum Average],
|
44
|
+
},
|
45
|
+
'HTTPCode_ELB_4XX' => {
|
46
|
+
'Units' => 'Count',
|
47
|
+
'Statistics' => ['Sum'],
|
48
|
+
},
|
49
|
+
'HTTPCode_ELB_5XX' => {
|
50
|
+
'Units' => 'Count',
|
51
|
+
'Statistics' => ['Sum'],
|
52
|
+
},
|
53
|
+
'HTTPCode_Backend_2XX' => {
|
54
|
+
'Units' => 'Count',
|
55
|
+
'Statistics' => ['Sum'],
|
56
|
+
},
|
57
|
+
'HTTPCode_Backend_3XX' => {
|
58
|
+
'Units' => 'Count',
|
59
|
+
'Statistics' => ['Sum'],
|
60
|
+
},
|
61
|
+
'HTTPCode_Backend_4XX' => {
|
62
|
+
'Units' => 'Count',
|
63
|
+
'Statistics' => ['Sum'],
|
64
|
+
},
|
65
|
+
'HTTPCode_Backend_5XX' => {
|
66
|
+
'Units' => 'Count',
|
67
|
+
'Statistics' => ['Sum'],
|
68
|
+
},
|
69
|
+
}
|
104
70
|
end
|
105
|
-
end
|
106
71
|
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
{ "Name" => "LoadBalancerName", "Value" => lb },
|
121
|
-
{ "Name" => "AvailabilityZone" , "Value" => az}
|
122
|
-
]
|
123
|
-
end
|
72
|
+
def base_metrics
|
73
|
+
# get last 60 seconds
|
74
|
+
start_time = (Time.now.utc - 60).iso8601
|
75
|
+
end_time = Time.now.utc.iso8601
|
76
|
+
|
77
|
+
# The base query that all metrics would get
|
78
|
+
{
|
79
|
+
'Namespace' => 'AWS/ELB',
|
80
|
+
'StartTime' => start_time,
|
81
|
+
'EndTime' => end_time,
|
82
|
+
'Period' => 60,
|
83
|
+
}
|
84
|
+
end
|
124
85
|
|
125
|
-
|
86
|
+
def tick
|
87
|
+
if options[:fog_credentials_file]
|
88
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
89
|
+
Fog.credential = options[:fog_credential].to_sym
|
90
|
+
connection = Fog::AWS::CloudWatch.new
|
91
|
+
else
|
92
|
+
connection = if options[:aws_access] && options[:aws_secret]
|
93
|
+
Fog::AWS::CloudWatch.new({
|
94
|
+
aws_access_key_id: options[:aws_access],
|
95
|
+
aws_secret_access_key: options[:aws_secret],
|
96
|
+
region: options[:aws_region],
|
97
|
+
})
|
98
|
+
else
|
99
|
+
Fog::AWS::CloudWatch.new({
|
100
|
+
use_iam_profile: true,
|
101
|
+
region: options[:aws_region],
|
102
|
+
})
|
103
|
+
end
|
104
|
+
end
|
126
105
|
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
106
|
+
options[:elbs].each do |lb|
|
107
|
+
metric_options = standard_metrics
|
108
|
+
metric_base_options = base_metrics
|
109
|
+
|
110
|
+
options[:aws_azs].each do |az|
|
111
|
+
metric_options.keys.sort.each do |metric_type|
|
112
|
+
merged_options = metric_base_options.merge(metric_options[metric_type])
|
113
|
+
merged_options['MetricName'] = metric_type
|
114
|
+
merged_options['Dimensions'] = if az == 'all_az'
|
115
|
+
[{ 'Name' => 'LoadBalancerName', 'Value' => lb }]
|
116
|
+
else
|
117
|
+
[
|
118
|
+
{ 'Name' => 'LoadBalancerName', 'Value' => lb },
|
119
|
+
{ 'Name' => 'AvailabilityZone', 'Value' => az },
|
120
|
+
]
|
121
|
+
end
|
122
|
+
|
123
|
+
result = connection.get_metric_statistics(merged_options)
|
124
|
+
|
125
|
+
# "If no response codes in the category 2XX-5XX range are sent to clients within
|
126
|
+
# the given time period, values for these metrics will not be recorded in CloudWatch"
|
127
|
+
# next if result.body["GetMetricStatisticsResult"]["Datapoints"].empty? && metric_type =~ /[2345]XX/
|
128
|
+
#
|
129
|
+
if result.body['GetMetricStatisticsResult']['Datapoints'].empty?
|
130
|
+
standard_metrics[metric_type]['Statistics'].each do |stat_type|
|
131
|
+
event = event(lb, az, metric_type, stat_type, 0.0)
|
132
|
+
report(event)
|
133
|
+
end
|
134
|
+
next
|
135
|
+
end
|
136
|
+
|
137
|
+
# We should only ever have a single data point
|
138
|
+
result.body['GetMetricStatisticsResult']['Datapoints'][0].keys.sort.each do |stat_type|
|
139
|
+
next if stat_type == 'Unit'
|
140
|
+
next if stat_type == 'Timestamp'
|
141
|
+
|
142
|
+
unit = result.body['GetMetricStatisticsResult']['Datapoints'][0]['Unit']
|
143
|
+
metric = result.body['GetMetricStatisticsResult']['Datapoints'][0][stat_type]
|
144
|
+
event = event(lb, az, metric_type, stat_type, metric, unit)
|
145
|
+
report(event)
|
146
|
+
end
|
135
147
|
end
|
136
|
-
next
|
137
|
-
end
|
138
|
-
|
139
|
-
# We should only ever have a single data point
|
140
|
-
result.body["GetMetricStatisticsResult"]["Datapoints"][0].keys.sort.each do |stat_type|
|
141
|
-
next if stat_type == "Unit"
|
142
|
-
next if stat_type == "Timestamp"
|
143
|
-
|
144
|
-
unit = result.body["GetMetricStatisticsResult"]["Datapoints"][0]["Unit"]
|
145
|
-
metric = result.body["GetMetricStatisticsResult"]["Datapoints"][0][stat_type]
|
146
|
-
event = event(lb, az, metric_type, stat_type, metric, unit)
|
147
|
-
report(event)
|
148
148
|
end
|
149
149
|
end
|
150
150
|
end
|
151
|
-
end
|
152
|
-
end
|
153
151
|
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
152
|
+
private
|
153
|
+
|
154
|
+
def event(lb, az, metric_type, stat_type, metric, unit = nil)
|
155
|
+
{
|
156
|
+
host: lb,
|
157
|
+
service: "elb.#{az}.#{metric_type}.#{stat_type}",
|
158
|
+
ttl: 60,
|
159
|
+
description: "#{lb} #{metric_type} #{stat_type} (#{unit})",
|
160
|
+
tags: ['elb_metrics'],
|
161
|
+
metric: metric,
|
162
|
+
}
|
163
|
+
end
|
164
|
+
end
|
164
165
|
end
|
165
166
|
end
|
166
167
|
|
@@ -1,82 +1,87 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
|
-
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
Process.setproctitle($PROGRAM_NAME)
|
3
5
|
|
4
6
|
require 'riemann/tools'
|
5
7
|
|
6
8
|
$0 = __FILE__
|
7
9
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
opt :fog_credential, "Fog credentials to use", :type => String
|
15
|
-
opt :aws_access, "AWS Access Key", :type => String
|
16
|
-
opt :aws_secret, "AWS Secret Key", :type => String
|
17
|
-
opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
|
18
|
-
opt :buckets, "Buckets to pull metrics from, multi=true, can have a prefix like mybucket/prefix", :type => String, :multi => true, :required => true
|
19
|
-
opt :max_objects, "Max number of objects to list before stopping to save bandwidth", :default => -1
|
10
|
+
module Riemann
|
11
|
+
module Tools
|
12
|
+
class S3Metrics
|
13
|
+
include Riemann::Tools
|
14
|
+
require 'fog'
|
15
|
+
require 'time'
|
20
16
|
|
17
|
+
opt :fog_credentials_file, 'Fog credentials file', type: String
|
18
|
+
opt :fog_credential, 'Fog credentials to use', type: String
|
19
|
+
opt :aws_access, 'AWS Access Key', type: String
|
20
|
+
opt :aws_secret, 'AWS Secret Key', type: String
|
21
|
+
opt :aws_region, 'AWS Region', type: String, default: 'eu-west-1'
|
22
|
+
opt :buckets, 'Buckets to pull metrics from, multi=true, can have a prefix like mybucket/prefix', type: String,
|
23
|
+
multi: true, required: true
|
24
|
+
opt :max_objects, 'Max number of objects to list before stopping to save bandwidth', default: -1
|
21
25
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
26
|
+
def tick
|
27
|
+
if options[:fog_credentials_file]
|
28
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
29
|
+
Fog.credential = options[:fog_credential].to_sym
|
30
|
+
connection = Fog::Storage.new
|
31
|
+
else
|
32
|
+
connection = if options[:aws_access] && options[:aws_secret]
|
33
|
+
Fog::Storage.new({
|
34
|
+
provider: 'AWS',
|
35
|
+
aws_access_key_id: options[:aws_access],
|
36
|
+
aws_secret_access_key: options[:aws_secret],
|
37
|
+
region: options[:aws_region],
|
38
|
+
})
|
39
|
+
else
|
40
|
+
Fog::Storage.new({
|
41
|
+
provider: 'AWS',
|
42
|
+
use_iam_profile: true,
|
43
|
+
region: options[:aws_region],
|
44
|
+
})
|
45
|
+
end
|
46
|
+
end
|
43
47
|
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
48
|
+
options[:buckets].each do |url|
|
49
|
+
split = url.split('/')
|
50
|
+
bucket = split[0]
|
51
|
+
prefix = ''
|
52
|
+
prefix = url[(split[0].length + 1)..] if split[1]
|
53
|
+
count = 0
|
54
|
+
connection.directories.get(bucket, prefix: prefix).files.map do |_file|
|
55
|
+
count += 1
|
56
|
+
break if options[:max_objects].positive? && count > options[:max_objects]
|
57
|
+
end
|
58
|
+
event = if options[:max_objects].positive? && count > options[:max_objects]
|
59
|
+
event(
|
60
|
+
url, 'objectCount', count, "count was bigger than threshold #{options[:max_objects]}",
|
61
|
+
'warning',
|
62
|
+
)
|
63
|
+
else
|
64
|
+
event(url, 'objectCount', count, "All objects counted, threshold=#{options[:max_objects]}", 'ok')
|
65
|
+
end
|
66
|
+
report(event)
|
56
67
|
end
|
57
68
|
end
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
69
|
+
|
70
|
+
private
|
71
|
+
|
72
|
+
def event(bucket, label, metric, description, severity)
|
73
|
+
{
|
74
|
+
host: "bucket_#{bucket}",
|
75
|
+
service: "s3.#{label}",
|
76
|
+
ttl: 300,
|
77
|
+
description: "#{bucket} #{description}",
|
78
|
+
tags: ['s3_metrics'],
|
79
|
+
metric: metric,
|
80
|
+
state: severity,
|
81
|
+
}
|
64
82
|
end
|
65
83
|
end
|
66
84
|
end
|
67
|
-
|
68
|
-
private
|
69
|
-
def event(bucket, label, metric, description, severity)
|
70
|
-
event = {
|
71
|
-
host: "bucket_#{bucket}",
|
72
|
-
service: "s3.#{label}",
|
73
|
-
ttl: 300,
|
74
|
-
description: "#{bucket} #{description}",
|
75
|
-
tags: ["s3_metrics"],
|
76
|
-
metric: metric,
|
77
|
-
state: severity
|
78
|
-
}
|
79
|
-
end
|
80
85
|
end
|
81
86
|
|
82
87
|
Riemann::Tools::S3Metrics.run
|
@@ -1,98 +1,101 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
|
-
|
2
|
+
# frozen_string_literal: true
|
3
|
+
|
4
|
+
Process.setproctitle($PROGRAM_NAME)
|
3
5
|
|
4
6
|
require 'riemann/tools'
|
5
7
|
|
6
8
|
$0 = __FILE__
|
7
9
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
end
|
38
|
-
|
39
|
-
|
40
|
-
def tick
|
41
|
-
if options[:fog_credentials_file]
|
42
|
-
Fog.credentials_path = options[:fog_credentials_file]
|
43
|
-
Fog.credential = options[:fog_credential].to_sym
|
44
|
-
connection = Fog::AWS::CloudWatch.new
|
45
|
-
else
|
46
|
-
if options[:aws_access] && options[:aws_secret]
|
47
|
-
connection = Fog::AWS::CloudWatch.new({
|
48
|
-
:aws_access_key_id => options[:aws_access],
|
49
|
-
:aws_secret_access_key => options[:aws_secret],
|
50
|
-
:region => options[:aws_region]
|
51
|
-
})
|
52
|
-
else
|
53
|
-
connection = Fog::AWS::CloudWatch.new({
|
54
|
-
:use_iam_profile => true,
|
55
|
-
:region => options[:aws_region]
|
56
|
-
})
|
10
|
+
module Riemann
|
11
|
+
module Tools
|
12
|
+
class S3Metrics
|
13
|
+
include Riemann::Tools
|
14
|
+
require 'fog'
|
15
|
+
require 'time'
|
16
|
+
|
17
|
+
opt :fog_credentials_file, 'Fog credentials file', type: String
|
18
|
+
opt :fog_credential, 'Fog credentials to use', type: String
|
19
|
+
opt :aws_access, 'AWS Access Key', type: String
|
20
|
+
opt :aws_secret, 'AWS Secret Key', type: String
|
21
|
+
opt :aws_region, 'AWS Region', type: String, default: 'eu-west-1'
|
22
|
+
opt :buckets, 'Buckets to pull metrics from, multi=true', type: String, multi: true, required: true
|
23
|
+
opt :statistic, 'Statistic to retrieve, multi=true, e.g. --statistic=Average --statistic=Maximum', type: String,
|
24
|
+
multi: true, required: true
|
25
|
+
|
26
|
+
def base_metrics
|
27
|
+
# get last 60 seconds
|
28
|
+
start_time = (Time.now.utc - 3600 * 24 * 1).iso8601
|
29
|
+
end_time = Time.now.utc.iso8601
|
30
|
+
|
31
|
+
# The base query that all metrics would get
|
32
|
+
{
|
33
|
+
'Namespace' => 'AWS/S3',
|
34
|
+
'StartTime' => start_time,
|
35
|
+
'EndTime' => end_time,
|
36
|
+
'Period' => 3600,
|
37
|
+
'MetricName' => 'NumberOfObjects',
|
38
|
+
}
|
57
39
|
end
|
58
|
-
end
|
59
|
-
|
60
|
-
options[:statistic].each do |statistic|
|
61
|
-
options[:buckets].each do |bucket|
|
62
|
-
|
63
|
-
metric_base_options = base_metrics
|
64
|
-
metric_base_options["Statistics"] = statistic
|
65
|
-
metric_base_options["Dimensions"] = [
|
66
|
-
{"Name" => "BucketName", "Value" => bucket},
|
67
|
-
{"Name" => "StorageType", "Value" => "AllStorageTypes"}]
|
68
40
|
|
69
|
-
|
70
|
-
if
|
71
|
-
|
41
|
+
def tick
|
42
|
+
if options[:fog_credentials_file]
|
43
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
44
|
+
Fog.credential = options[:fog_credential].to_sym
|
45
|
+
connection = Fog::AWS::CloudWatch.new
|
46
|
+
else
|
47
|
+
connection = if options[:aws_access] && options[:aws_secret]
|
48
|
+
Fog::AWS::CloudWatch.new({
|
49
|
+
aws_access_key_id: options[:aws_access],
|
50
|
+
aws_secret_access_key: options[:aws_secret],
|
51
|
+
region: options[:aws_region],
|
52
|
+
})
|
53
|
+
else
|
54
|
+
Fog::AWS::CloudWatch.new({
|
55
|
+
use_iam_profile: true,
|
56
|
+
region: options[:aws_region],
|
57
|
+
})
|
58
|
+
end
|
72
59
|
end
|
73
|
-
result.body["GetMetricStatisticsResult"]["Datapoints"][0].keys.sort.each do |stat_type|
|
74
|
-
next if stat_type == "Unit"
|
75
|
-
next if stat_type == "Timestamp"
|
76
60
|
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
61
|
+
options[:statistic].each do |statistic|
|
62
|
+
options[:buckets].each do |bucket|
|
63
|
+
metric_base_options = base_metrics
|
64
|
+
metric_base_options['Statistics'] = statistic
|
65
|
+
metric_base_options['Dimensions'] = [
|
66
|
+
{ 'Name' => 'BucketName', 'Value' => bucket },
|
67
|
+
{ 'Name' => 'StorageType', 'Value' => 'AllStorageTypes' },
|
68
|
+
]
|
69
|
+
|
70
|
+
result = connection.get_metric_statistics(metric_base_options)
|
71
|
+
next if result.body['GetMetricStatisticsResult']['Datapoints'].empty?
|
72
|
+
|
73
|
+
result.body['GetMetricStatisticsResult']['Datapoints'][0].keys.sort.each do |stat_type|
|
74
|
+
next if stat_type == 'Unit'
|
75
|
+
next if stat_type == 'Timestamp'
|
76
|
+
|
77
|
+
unit = result.body['GetMetricStatisticsResult']['Datapoints'][0]['Unit']
|
78
|
+
metric = result.body['GetMetricStatisticsResult']['Datapoints'][0][stat_type]
|
79
|
+
event = event(bucket, result.body['GetMetricStatisticsResult']['Label'], stat_type, unit, metric)
|
80
|
+
report(event)
|
81
|
+
end
|
82
|
+
end
|
81
83
|
end
|
82
84
|
end
|
83
|
-
end
|
84
|
-
end
|
85
85
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
86
|
+
private
|
87
|
+
|
88
|
+
def event(bucket, label, metric_type, stat_type, metric, unit = nil)
|
89
|
+
{
|
90
|
+
host: "bucket_#{bucket}",
|
91
|
+
service: "s3.#{label}.#{metric_type}.#{stat_type}",
|
92
|
+
ttl: 300,
|
93
|
+
description: "#{bucket} #{metric_type} #{stat_type} (#{unit})",
|
94
|
+
tags: ['s3_metrics'],
|
95
|
+
metric: metric,
|
96
|
+
}
|
97
|
+
end
|
98
|
+
end
|
96
99
|
end
|
97
100
|
end
|
98
101
|
|