riemann-tools 1.0.0 → 1.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/dependabot.yml +11 -0
- data/.github/workflows/ci.yml +15 -0
- data/.github/workflows/codeql-analysis.yml +72 -0
- data/.gitignore +2 -0
- data/.rubocop.yml +40 -0
- data/.ruby-version +1 -0
- data/CHANGELOG.md +62 -2
- data/README.markdown +8 -24
- data/Rakefile +14 -5
- data/SECURITY.md +42 -0
- data/bin/riemann-apache-status +3 -94
- data/bin/riemann-bench +4 -67
- data/bin/riemann-cloudant +3 -54
- data/bin/riemann-consul +3 -102
- data/bin/riemann-dir-files-count +3 -51
- data/bin/riemann-dir-space +3 -51
- data/bin/riemann-diskstats +3 -91
- data/bin/riemann-fd +4 -63
- data/bin/riemann-freeswitch +4 -116
- data/bin/riemann-haproxy +3 -54
- data/bin/riemann-health +3 -344
- data/bin/riemann-kvminstance +4 -19
- data/bin/riemann-memcached +3 -33
- data/bin/riemann-net +3 -105
- data/bin/riemann-nginx-status +3 -80
- data/bin/riemann-ntp +3 -34
- data/bin/riemann-portcheck +3 -37
- data/bin/riemann-proc +3 -104
- data/bin/riemann-varnish +3 -50
- data/bin/riemann-wrapper +75 -0
- data/bin/riemann-zookeeper +3 -37
- data/lib/riemann/tools/apache_status.rb +107 -0
- data/lib/riemann/tools/bench.rb +72 -0
- data/lib/riemann/tools/cloudant.rb +57 -0
- data/lib/riemann/tools/consul_health.rb +107 -0
- data/lib/riemann/tools/dir_files_count.rb +56 -0
- data/lib/riemann/tools/dir_space.rb +56 -0
- data/lib/riemann/tools/diskstats.rb +94 -0
- data/lib/riemann/tools/fd.rb +81 -0
- data/lib/riemann/tools/freeswitch.rb +119 -0
- data/lib/riemann/tools/haproxy.rb +59 -0
- data/lib/riemann/tools/health.rb +478 -0
- data/lib/riemann/tools/kvm.rb +23 -0
- data/lib/riemann/tools/memcached.rb +38 -0
- data/lib/riemann/tools/net.rb +105 -0
- data/lib/riemann/tools/nginx_status.rb +86 -0
- data/lib/riemann/tools/ntp.rb +42 -0
- data/lib/riemann/tools/portcheck.rb +45 -0
- data/lib/riemann/tools/proc.rb +109 -0
- data/lib/riemann/tools/riemann_client_wrapper.rb +43 -0
- data/lib/riemann/tools/uptime_parser.tab.rb +323 -0
- data/lib/riemann/tools/varnish.rb +55 -0
- data/lib/riemann/tools/version.rb +1 -1
- data/lib/riemann/tools/zookeeper.rb +40 -0
- data/lib/riemann/tools.rb +31 -52
- data/riemann-tools.gemspec +8 -2
- data/tools/riemann-aws/{Rakefile.rb → Rakefile} +8 -9
- data/tools/riemann-aws/bin/riemann-aws-billing +4 -83
- data/tools/riemann-aws/bin/riemann-aws-rds-status +4 -50
- data/tools/riemann-aws/bin/riemann-aws-sqs-status +4 -40
- data/tools/riemann-aws/bin/riemann-aws-status +4 -67
- data/tools/riemann-aws/bin/riemann-elb-metrics +4 -163
- data/tools/riemann-aws/bin/riemann-s3-list +4 -78
- data/tools/riemann-aws/bin/riemann-s3-status +4 -95
- data/tools/riemann-aws/lib/riemann/tools/aws/billing.rb +87 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/elb_metrics.rb +163 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/rds_status.rb +63 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/s3_list.rb +82 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/s3_status.rb +97 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/sqs_status.rb +45 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/status.rb +74 -0
- data/tools/riemann-chronos/{Rakefile.rb → Rakefile} +8 -9
- data/tools/riemann-chronos/bin/riemann-chronos +3 -139
- data/tools/riemann-chronos/lib/riemann/tools/chronos.rb +157 -0
- data/tools/riemann-docker/{Rakefile.rb → Rakefile} +7 -8
- data/tools/riemann-docker/bin/riemann-docker +4 -213
- data/tools/riemann-docker/lib/riemann/tools/docker.rb +200 -0
- data/tools/riemann-elasticsearch/{Rakefile.rb → Rakefile} +8 -9
- data/tools/riemann-elasticsearch/bin/riemann-elasticsearch +3 -161
- data/tools/riemann-elasticsearch/lib/riemann/tools/elasticsearch.rb +170 -0
- data/tools/riemann-marathon/{Rakefile.rb → Rakefile} +8 -9
- data/tools/riemann-marathon/bin/riemann-marathon +3 -142
- data/tools/riemann-marathon/lib/riemann/tools/marathon.rb +159 -0
- data/tools/riemann-mesos/{Rakefile.rb → Rakefile} +8 -9
- data/tools/riemann-mesos/bin/riemann-mesos +3 -126
- data/tools/riemann-mesos/lib/riemann/tools/mesos.rb +142 -0
- data/tools/riemann-munin/{Rakefile.rb → Rakefile} +7 -8
- data/tools/riemann-munin/bin/riemann-munin +3 -32
- data/tools/riemann-munin/lib/riemann/tools/munin.rb +37 -0
- data/tools/riemann-rabbitmq/{Rakefile.rb → Rakefile} +8 -9
- data/tools/riemann-rabbitmq/bin/riemann-rabbitmq +3 -264
- data/tools/riemann-rabbitmq/lib/riemann/tools/rabbitmq.rb +269 -0
- data/tools/riemann-riak/{Rakefile.rb → Rakefile} +7 -8
- data/tools/riemann-riak/bin/riemann-riak +3 -326
- data/tools/riemann-riak/bin/riemann-riak-keys +0 -1
- data/tools/riemann-riak/bin/riemann-riak-ring +0 -1
- data/tools/riemann-riak/lib/riemann/tools/riak.rb +317 -0
- metadata +112 -16
- data/.travis.yml +0 -31
- data/tools/riemann-riak/riak_status/key_count.erl +0 -13
- data/tools/riemann-riak/riak_status/riak_status.rb +0 -152
- data/tools/riemann-riak/riak_status/ringready.erl +0 -9
@@ -1,167 +1,8 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
|
-
|
2
|
+
# frozen_string_literal: true
|
3
3
|
|
4
|
-
|
4
|
+
Process.setproctitle($PROGRAM_NAME)
|
5
5
|
|
6
|
-
|
6
|
+
require 'riemann/tools/aws/elb_metrics'
|
7
7
|
|
8
|
-
|
9
|
-
include Riemann::Tools
|
10
|
-
require 'fog'
|
11
|
-
require 'time'
|
12
|
-
|
13
|
-
opt :fog_credentials_file, "Fog credentials file", :type => String
|
14
|
-
opt :fog_credential, "Fog credentials to use", :type => String
|
15
|
-
opt :aws_access, "AWS Access Key", :type => String
|
16
|
-
opt :aws_secret, "AWS Secret Key", :type => String
|
17
|
-
opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
|
18
|
-
opt :aws_azs, "List of AZs to aggregate against", :type => :strings, :default => [ "all_az" ]
|
19
|
-
opt :elbs, "List of ELBs to pull metrics from", :type => :strings, :required => true
|
20
|
-
|
21
|
-
def standard_metrics
|
22
|
-
# ELB metric types, from:
|
23
|
-
# http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html#elb-metricscollected
|
24
|
-
metric_options = {
|
25
|
-
"Latency" => {
|
26
|
-
"Unit" => "Seconds",
|
27
|
-
"Statistics" => ["Maximum", "Minimum", "Average" ]
|
28
|
-
},
|
29
|
-
"RequestCount" => {
|
30
|
-
"Unit" => "Count",
|
31
|
-
"Statistics" => [ "Sum" ]
|
32
|
-
},
|
33
|
-
"HealthyHostCount" => {
|
34
|
-
"Units" => "Count",
|
35
|
-
"Statistics" => [ "Minimum", "Maximum", "Average" ]
|
36
|
-
},
|
37
|
-
"UnHealthyHostCount" => {
|
38
|
-
"Units" => "Count",
|
39
|
-
"Statistics" => [ "Minimum", "Maximum", "Average" ]
|
40
|
-
},
|
41
|
-
"HTTPCode_ELB_4XX" => {
|
42
|
-
"Units" => "Count",
|
43
|
-
"Statistics" => [ "Sum" ]
|
44
|
-
},
|
45
|
-
"HTTPCode_ELB_5XX" => {
|
46
|
-
"Units" => "Count",
|
47
|
-
"Statistics" => [ "Sum" ]
|
48
|
-
},
|
49
|
-
"HTTPCode_Backend_2XX" => {
|
50
|
-
"Units" => "Count",
|
51
|
-
"Statistics" => [ "Sum" ]
|
52
|
-
},
|
53
|
-
"HTTPCode_Backend_3XX" => {
|
54
|
-
"Units" => "Count",
|
55
|
-
"Statistics" => [ "Sum" ]
|
56
|
-
},
|
57
|
-
"HTTPCode_Backend_4XX" => {
|
58
|
-
"Units" => "Count",
|
59
|
-
"Statistics" => [ "Sum" ]
|
60
|
-
},
|
61
|
-
"HTTPCode_Backend_5XX" => {
|
62
|
-
"Units" => "Count",
|
63
|
-
"Statistics" => [ "Sum" ]
|
64
|
-
}
|
65
|
-
}
|
66
|
-
|
67
|
-
metric_options
|
68
|
-
end
|
69
|
-
|
70
|
-
def base_metrics
|
71
|
-
# get last 60 seconds
|
72
|
-
start_time = (Time.now.utc - 60).iso8601
|
73
|
-
end_time = Time.now.utc.iso8601
|
74
|
-
|
75
|
-
# The base query that all metrics would get
|
76
|
-
metric_base = {
|
77
|
-
"Namespace" => "AWS/ELB",
|
78
|
-
"StartTime" => start_time,
|
79
|
-
"EndTime" => end_time,
|
80
|
-
"Period" => 60,
|
81
|
-
}
|
82
|
-
|
83
|
-
metric_base
|
84
|
-
end
|
85
|
-
|
86
|
-
|
87
|
-
def tick
|
88
|
-
if options[:fog_credentials_file]
|
89
|
-
Fog.credentials_path = options[:fog_credentials_file]
|
90
|
-
Fog.credential = options[:fog_credential].to_sym
|
91
|
-
connection = Fog::AWS::CloudWatch.new
|
92
|
-
else
|
93
|
-
if options[:aws_access] && options[:aws_secret]
|
94
|
-
connection = Fog::AWS::CloudWatch.new({
|
95
|
-
:aws_access_key_id => options[:aws_access],
|
96
|
-
:aws_secret_access_key => options[:aws_secret],
|
97
|
-
:region => options[:aws_region]
|
98
|
-
})
|
99
|
-
else
|
100
|
-
connection = Fog::AWS::CloudWatch.new({
|
101
|
-
:use_iam_profile => true,
|
102
|
-
:region => options[:aws_region]
|
103
|
-
})
|
104
|
-
end
|
105
|
-
end
|
106
|
-
|
107
|
-
options[:elbs].each do |lb|
|
108
|
-
|
109
|
-
metric_options = standard_metrics
|
110
|
-
metric_base_options = base_metrics
|
111
|
-
|
112
|
-
options[:aws_azs].each do |az|
|
113
|
-
metric_options.keys.sort.each do |metric_type|
|
114
|
-
merged_options = metric_base_options.merge(metric_options[metric_type])
|
115
|
-
merged_options["MetricName"] = metric_type
|
116
|
-
if az == "all_az"
|
117
|
-
merged_options["Dimensions"] = [ { "Name" => "LoadBalancerName", "Value" => lb } ]
|
118
|
-
else
|
119
|
-
merged_options["Dimensions"] = [
|
120
|
-
{ "Name" => "LoadBalancerName", "Value" => lb },
|
121
|
-
{ "Name" => "AvailabilityZone" , "Value" => az}
|
122
|
-
]
|
123
|
-
end
|
124
|
-
|
125
|
-
result = connection.get_metric_statistics(merged_options)
|
126
|
-
|
127
|
-
# "If no response codes in the category 2XX-5XX range are sent to clients within
|
128
|
-
# the given time period, values for these metrics will not be recorded in CloudWatch"
|
129
|
-
#next if result.body["GetMetricStatisticsResult"]["Datapoints"].empty? && metric_type =~ /[2345]XX/
|
130
|
-
#
|
131
|
-
if result.body["GetMetricStatisticsResult"]["Datapoints"].empty?
|
132
|
-
standard_metrics[metric_type]['Statistics'].each do |stat_type|
|
133
|
-
event = event(lb, az, metric_type, stat_type, 0.0)
|
134
|
-
report(event)
|
135
|
-
end
|
136
|
-
next
|
137
|
-
end
|
138
|
-
|
139
|
-
# We should only ever have a single data point
|
140
|
-
result.body["GetMetricStatisticsResult"]["Datapoints"][0].keys.sort.each do |stat_type|
|
141
|
-
next if stat_type == "Unit"
|
142
|
-
next if stat_type == "Timestamp"
|
143
|
-
|
144
|
-
unit = result.body["GetMetricStatisticsResult"]["Datapoints"][0]["Unit"]
|
145
|
-
metric = result.body["GetMetricStatisticsResult"]["Datapoints"][0][stat_type]
|
146
|
-
event = event(lb, az, metric_type, stat_type, metric, unit)
|
147
|
-
report(event)
|
148
|
-
end
|
149
|
-
end
|
150
|
-
end
|
151
|
-
end
|
152
|
-
end
|
153
|
-
|
154
|
-
private
|
155
|
-
def event(lb, az, metric_type, stat_type, metric, unit=nil)
|
156
|
-
event = {
|
157
|
-
host: lb,
|
158
|
-
service: "elb.#{az}.#{metric_type}.#{stat_type}",
|
159
|
-
ttl: 60,
|
160
|
-
description: "#{lb} #{metric_type} #{stat_type} (#{unit})",
|
161
|
-
tags: ["elb_metrics"],
|
162
|
-
metric: metric
|
163
|
-
}
|
164
|
-
end
|
165
|
-
end
|
166
|
-
|
167
|
-
Riemann::Tools::ELBMetrics.run
|
8
|
+
Riemann::Tools::Aws::ElbMetrics.run
|
@@ -1,82 +1,8 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
|
-
|
2
|
+
# frozen_string_literal: true
|
3
3
|
|
4
|
-
|
4
|
+
Process.setproctitle($PROGRAM_NAME)
|
5
5
|
|
6
|
-
|
6
|
+
require 'riemann/tools/aws/s3_list'
|
7
7
|
|
8
|
-
|
9
|
-
include Riemann::Tools
|
10
|
-
require 'fog'
|
11
|
-
require 'time'
|
12
|
-
|
13
|
-
opt :fog_credentials_file, "Fog credentials file", :type => String
|
14
|
-
opt :fog_credential, "Fog credentials to use", :type => String
|
15
|
-
opt :aws_access, "AWS Access Key", :type => String
|
16
|
-
opt :aws_secret, "AWS Secret Key", :type => String
|
17
|
-
opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
|
18
|
-
opt :buckets, "Buckets to pull metrics from, multi=true, can have a prefix like mybucket/prefix", :type => String, :multi => true, :required => true
|
19
|
-
opt :max_objects, "Max number of objects to list before stopping to save bandwidth", :default => -1
|
20
|
-
|
21
|
-
|
22
|
-
def tick
|
23
|
-
if options[:fog_credentials_file]
|
24
|
-
Fog.credentials_path = options[:fog_credentials_file]
|
25
|
-
Fog.credential = options[:fog_credential].to_sym
|
26
|
-
connection = Fog::Storage.new
|
27
|
-
else
|
28
|
-
if options[:aws_access] && options[:aws_secret]
|
29
|
-
connection = Fog::Storage.new({
|
30
|
-
:provider => "AWS",
|
31
|
-
:aws_access_key_id => options[:aws_access],
|
32
|
-
:aws_secret_access_key => options[:aws_secret],
|
33
|
-
:region => options[:aws_region]
|
34
|
-
})
|
35
|
-
else
|
36
|
-
connection = Fog::Storage.new({
|
37
|
-
:provider => "AWS",
|
38
|
-
:use_iam_profile => true,
|
39
|
-
:region => options[:aws_region]
|
40
|
-
})
|
41
|
-
end
|
42
|
-
end
|
43
|
-
|
44
|
-
options[:buckets].each do |url|
|
45
|
-
split = url.split('/')
|
46
|
-
bucket = split[0]
|
47
|
-
prefix = ""
|
48
|
-
if (split[1])
|
49
|
-
prefix = url[(split[0].length+1)..-1]
|
50
|
-
end
|
51
|
-
count = 0
|
52
|
-
connection.directories.get(bucket, prefix: prefix).files.map do |file|
|
53
|
-
count = count +1
|
54
|
-
if (options[:max_objects]>0 && count>options[:max_objects])
|
55
|
-
break
|
56
|
-
end
|
57
|
-
end
|
58
|
-
if (options[:max_objects]>0 && count>options[:max_objects])
|
59
|
-
event = event(url, "objectCount", count, "count was bigger than threshold #{options[:max_objects]}", "warning")
|
60
|
-
report(event)
|
61
|
-
else
|
62
|
-
event = event(url, "objectCount", count, "All objects counted, threshold=#{options[:max_objects]}", "ok")
|
63
|
-
report(event)
|
64
|
-
end
|
65
|
-
end
|
66
|
-
end
|
67
|
-
|
68
|
-
private
|
69
|
-
def event(bucket, label, metric, description, severity)
|
70
|
-
event = {
|
71
|
-
host: "bucket_#{bucket}",
|
72
|
-
service: "s3.#{label}",
|
73
|
-
ttl: 300,
|
74
|
-
description: "#{bucket} #{description}",
|
75
|
-
tags: ["s3_metrics"],
|
76
|
-
metric: metric,
|
77
|
-
state: severity
|
78
|
-
}
|
79
|
-
end
|
80
|
-
end
|
81
|
-
|
82
|
-
Riemann::Tools::S3Metrics.run
|
8
|
+
Riemann::Tools::Aws::S3List.run
|
@@ -1,99 +1,8 @@
|
|
1
1
|
#!/usr/bin/env ruby
|
2
|
-
|
2
|
+
# frozen_string_literal: true
|
3
3
|
|
4
|
-
|
4
|
+
Process.setproctitle($PROGRAM_NAME)
|
5
5
|
|
6
|
-
|
6
|
+
require 'riemann/tools/aws/s3_status'
|
7
7
|
|
8
|
-
|
9
|
-
include Riemann::Tools
|
10
|
-
require 'fog'
|
11
|
-
require 'time'
|
12
|
-
|
13
|
-
opt :fog_credentials_file, "Fog credentials file", :type => String
|
14
|
-
opt :fog_credential, "Fog credentials to use", :type => String
|
15
|
-
opt :aws_access, "AWS Access Key", :type => String
|
16
|
-
opt :aws_secret, "AWS Secret Key", :type => String
|
17
|
-
opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
|
18
|
-
opt :buckets, "Buckets to pull metrics from, multi=true", :type => String, :multi => true, :required => true
|
19
|
-
opt :statistic, "Statistic to retrieve, multi=true, e.g. --statistic=Average --statistic=Maximum", :type => String, :multi => true, :required => true
|
20
|
-
|
21
|
-
|
22
|
-
def base_metrics
|
23
|
-
# get last 60 seconds
|
24
|
-
start_time = (Time.now.utc - 3600 * 24 * 1).iso8601
|
25
|
-
end_time = Time.now.utc.iso8601
|
26
|
-
|
27
|
-
# The base query that all metrics would get
|
28
|
-
metric_base = {
|
29
|
-
"Namespace" => "AWS/S3",
|
30
|
-
"StartTime" => start_time,
|
31
|
-
"EndTime" => end_time,
|
32
|
-
"Period" => 3600,
|
33
|
-
"MetricName" => "NumberOfObjects",
|
34
|
-
}
|
35
|
-
|
36
|
-
metric_base
|
37
|
-
end
|
38
|
-
|
39
|
-
|
40
|
-
def tick
|
41
|
-
if options[:fog_credentials_file]
|
42
|
-
Fog.credentials_path = options[:fog_credentials_file]
|
43
|
-
Fog.credential = options[:fog_credential].to_sym
|
44
|
-
connection = Fog::AWS::CloudWatch.new
|
45
|
-
else
|
46
|
-
if options[:aws_access] && options[:aws_secret]
|
47
|
-
connection = Fog::AWS::CloudWatch.new({
|
48
|
-
:aws_access_key_id => options[:aws_access],
|
49
|
-
:aws_secret_access_key => options[:aws_secret],
|
50
|
-
:region => options[:aws_region]
|
51
|
-
})
|
52
|
-
else
|
53
|
-
connection = Fog::AWS::CloudWatch.new({
|
54
|
-
:use_iam_profile => true,
|
55
|
-
:region => options[:aws_region]
|
56
|
-
})
|
57
|
-
end
|
58
|
-
end
|
59
|
-
|
60
|
-
options[:statistic].each do |statistic|
|
61
|
-
options[:buckets].each do |bucket|
|
62
|
-
|
63
|
-
metric_base_options = base_metrics
|
64
|
-
metric_base_options["Statistics"] = statistic
|
65
|
-
metric_base_options["Dimensions"] = [
|
66
|
-
{"Name" => "BucketName", "Value" => bucket},
|
67
|
-
{"Name" => "StorageType", "Value" => "AllStorageTypes"}]
|
68
|
-
|
69
|
-
result = connection.get_metric_statistics(metric_base_options)
|
70
|
-
if result.body["GetMetricStatisticsResult"]["Datapoints"].empty?
|
71
|
-
next
|
72
|
-
end
|
73
|
-
result.body["GetMetricStatisticsResult"]["Datapoints"][0].keys.sort.each do |stat_type|
|
74
|
-
next if stat_type == "Unit"
|
75
|
-
next if stat_type == "Timestamp"
|
76
|
-
|
77
|
-
unit = result.body["GetMetricStatisticsResult"]["Datapoints"][0]["Unit"]
|
78
|
-
metric = result.body["GetMetricStatisticsResult"]["Datapoints"][0][stat_type]
|
79
|
-
event = event(bucket, result.body["GetMetricStatisticsResult"]["Label"], stat_type, unit, metric)
|
80
|
-
report(event)
|
81
|
-
end
|
82
|
-
end
|
83
|
-
end
|
84
|
-
end
|
85
|
-
|
86
|
-
private
|
87
|
-
def event(bucket, label, metric_type, stat_type, metric, unit=nil)
|
88
|
-
event = {
|
89
|
-
host: "bucket_#{bucket}",
|
90
|
-
service: "s3.#{label}.#{metric_type}.#{stat_type}",
|
91
|
-
ttl: 300,
|
92
|
-
description: "#{bucket} #{metric_type} #{stat_type} (#{unit})",
|
93
|
-
tags: ["s3_metrics"],
|
94
|
-
metric: metric
|
95
|
-
}
|
96
|
-
end
|
97
|
-
end
|
98
|
-
|
99
|
-
Riemann::Tools::S3Metrics.run
|
8
|
+
Riemann::Tools::Aws::S3Status.run
|
@@ -0,0 +1,87 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'riemann/tools'
|
4
|
+
|
5
|
+
module Riemann
|
6
|
+
module Tools
|
7
|
+
module Aws
|
8
|
+
class Billing
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog/aws'
|
11
|
+
|
12
|
+
opt :fog_credentials_file, 'Fog credentials file', type: String
|
13
|
+
opt :fog_credential, 'Fog credentials to use', type: String
|
14
|
+
|
15
|
+
opt :access_key, 'AWS access key', type: String
|
16
|
+
opt :secret_key, 'Secret access key', type: String
|
17
|
+
opt :services, 'AWS services: AmazonEC2 AmazonS3 AWSDataTransfer', type: :strings, multi: true,
|
18
|
+
default: %w[AmazonEC2 AmazonS3 AWSDataTransfer]
|
19
|
+
|
20
|
+
opt :time_start, 'Start time in seconds of the metrics period (2hrs ago default)', type: Integer, default: 7200
|
21
|
+
opt :time_end, 'End time in seconds of the metrics period ', type: Integer, default: 60
|
22
|
+
|
23
|
+
def initialize
|
24
|
+
if options[:fog_credentials_file]
|
25
|
+
Fog.credentials_path = opts[:fog_credentials_file]
|
26
|
+
Fog.credential = opts[:fog_credential].to_sym
|
27
|
+
@cloudwatch = Fog::AWS::CloudWatch.new
|
28
|
+
else
|
29
|
+
creds = if opts.key?('secret_key') && opts.key?('access_key')
|
30
|
+
{
|
31
|
+
aws_secret_access_key: opts[:secret_key],
|
32
|
+
aws_access_key_id: opts[:access_key],
|
33
|
+
}
|
34
|
+
else
|
35
|
+
{ use_iam_profile: true }
|
36
|
+
end
|
37
|
+
@cloudwatch = Fog::AWS::CloudWatch.new(creds)
|
38
|
+
end
|
39
|
+
@start_time = (Time.now.utc - opts[:time_start]).iso8601
|
40
|
+
@end_time = (Time.now.utc - opts[:time_end]).iso8601
|
41
|
+
end
|
42
|
+
|
43
|
+
def tick
|
44
|
+
opts[:services].each do |service|
|
45
|
+
data = @cloudwatch.get_metric_statistics({
|
46
|
+
'Statistics' => ['Maximum'],
|
47
|
+
'StartTime' => @start_time,
|
48
|
+
'EndTime' => @end_time,
|
49
|
+
'Period' => 3600,
|
50
|
+
'Unit' => 'None',
|
51
|
+
'MetricName' => 'EstimatedCharges',
|
52
|
+
'Namespace' => 'AWS/Billing',
|
53
|
+
'Dimensions' => [
|
54
|
+
{
|
55
|
+
'Name' => 'ServiceName',
|
56
|
+
'Value' => service,
|
57
|
+
},
|
58
|
+
{
|
59
|
+
'Name' => 'Currency',
|
60
|
+
'Value' => 'USD',
|
61
|
+
},
|
62
|
+
],
|
63
|
+
}).body['GetMetricStatisticsResult']['Datapoints']
|
64
|
+
|
65
|
+
data.each do |metrics|
|
66
|
+
name = "AWScloudwatch.Billing.#{service}"
|
67
|
+
value = metrics['Maximum']
|
68
|
+
timestamp = metrics['Timestamp'].to_i
|
69
|
+
|
70
|
+
event = {
|
71
|
+
host: nil,
|
72
|
+
service: name,
|
73
|
+
time: timestamp,
|
74
|
+
description: "AWS Estimate Charges for #{service}",
|
75
|
+
tags: ['aws_billing'],
|
76
|
+
state: 'ok',
|
77
|
+
metric: value,
|
78
|
+
}
|
79
|
+
|
80
|
+
report event
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,163 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'riemann/tools'
|
4
|
+
|
5
|
+
module Riemann
|
6
|
+
module Tools
|
7
|
+
module Aws
|
8
|
+
class ElbMetrics
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog/aws'
|
11
|
+
require 'time'
|
12
|
+
|
13
|
+
opt :fog_credentials_file, 'Fog credentials file', type: String
|
14
|
+
opt :fog_credential, 'Fog credentials to use', type: String
|
15
|
+
opt :access_key, 'AWS Access Key', type: String
|
16
|
+
opt :secret_key, 'AWS Secret Key', type: String
|
17
|
+
opt :region, 'AWS Region', type: String, default: 'eu-west-1'
|
18
|
+
opt :azs, 'List of AZs to aggregate against', type: :strings, default: ['all_az']
|
19
|
+
opt :elbs, 'List of ELBs to pull metrics from', type: :strings, required: true
|
20
|
+
|
21
|
+
def standard_metrics
|
22
|
+
# ELB metric types, from:
|
23
|
+
# http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html#elb-metricscollected
|
24
|
+
{
|
25
|
+
'Latency' => {
|
26
|
+
'Unit' => 'Seconds',
|
27
|
+
'Statistics' => %w[Maximum Minimum Average],
|
28
|
+
},
|
29
|
+
'RequestCount' => {
|
30
|
+
'Unit' => 'Count',
|
31
|
+
'Statistics' => ['Sum'],
|
32
|
+
},
|
33
|
+
'HealthyHostCount' => {
|
34
|
+
'Units' => 'Count',
|
35
|
+
'Statistics' => %w[Minimum Maximum Average],
|
36
|
+
},
|
37
|
+
'UnHealthyHostCount' => {
|
38
|
+
'Units' => 'Count',
|
39
|
+
'Statistics' => %w[Minimum Maximum Average],
|
40
|
+
},
|
41
|
+
'HTTPCode_ELB_4XX' => {
|
42
|
+
'Units' => 'Count',
|
43
|
+
'Statistics' => ['Sum'],
|
44
|
+
},
|
45
|
+
'HTTPCode_ELB_5XX' => {
|
46
|
+
'Units' => 'Count',
|
47
|
+
'Statistics' => ['Sum'],
|
48
|
+
},
|
49
|
+
'HTTPCode_Backend_2XX' => {
|
50
|
+
'Units' => 'Count',
|
51
|
+
'Statistics' => ['Sum'],
|
52
|
+
},
|
53
|
+
'HTTPCode_Backend_3XX' => {
|
54
|
+
'Units' => 'Count',
|
55
|
+
'Statistics' => ['Sum'],
|
56
|
+
},
|
57
|
+
'HTTPCode_Backend_4XX' => {
|
58
|
+
'Units' => 'Count',
|
59
|
+
'Statistics' => ['Sum'],
|
60
|
+
},
|
61
|
+
'HTTPCode_Backend_5XX' => {
|
62
|
+
'Units' => 'Count',
|
63
|
+
'Statistics' => ['Sum'],
|
64
|
+
},
|
65
|
+
}
|
66
|
+
end
|
67
|
+
|
68
|
+
def base_metrics
|
69
|
+
# get last 60 seconds
|
70
|
+
start_time = (Time.now.utc - 60).iso8601
|
71
|
+
end_time = Time.now.utc.iso8601
|
72
|
+
|
73
|
+
# The base query that all metrics would get
|
74
|
+
{
|
75
|
+
'Namespace' => 'AWS/ELB',
|
76
|
+
'StartTime' => start_time,
|
77
|
+
'EndTime' => end_time,
|
78
|
+
'Period' => 60,
|
79
|
+
}
|
80
|
+
end
|
81
|
+
|
82
|
+
def tick
|
83
|
+
if options[:fog_credentials_file]
|
84
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
85
|
+
Fog.credential = options[:fog_credential].to_sym
|
86
|
+
connection = Fog::AWS::CloudWatch.new
|
87
|
+
else
|
88
|
+
connection = if options[:access_key] && options[:secret_key]
|
89
|
+
Fog::AWS::CloudWatch.new({
|
90
|
+
aws_access_key_id: options[:access_key],
|
91
|
+
aws_secret_access_key: options[:secret_key],
|
92
|
+
region: options[:region],
|
93
|
+
})
|
94
|
+
else
|
95
|
+
Fog::AWS::CloudWatch.new({
|
96
|
+
use_iam_profile: true,
|
97
|
+
region: options[:region],
|
98
|
+
})
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
options[:elbs].each do |lb|
|
103
|
+
metric_options = standard_metrics
|
104
|
+
metric_base_options = base_metrics
|
105
|
+
|
106
|
+
options[:azs].each do |az|
|
107
|
+
metric_options.keys.sort.each do |metric_type|
|
108
|
+
merged_options = metric_base_options.merge(metric_options[metric_type])
|
109
|
+
merged_options['MetricName'] = metric_type
|
110
|
+
merged_options['Dimensions'] = if az == 'all_az'
|
111
|
+
[{ 'Name' => 'LoadBalancerName', 'Value' => lb }]
|
112
|
+
else
|
113
|
+
[
|
114
|
+
{ 'Name' => 'LoadBalancerName', 'Value' => lb },
|
115
|
+
{ 'Name' => 'AvailabilityZone', 'Value' => az },
|
116
|
+
]
|
117
|
+
end
|
118
|
+
|
119
|
+
result = connection.get_metric_statistics(merged_options)
|
120
|
+
|
121
|
+
# "If no response codes in the category 2XX-5XX range are sent to clients within
|
122
|
+
# the given time period, values for these metrics will not be recorded in CloudWatch"
|
123
|
+
# next if result.body["GetMetricStatisticsResult"]["Datapoints"].empty? && metric_type =~ /[2345]XX/
|
124
|
+
#
|
125
|
+
if result.body['GetMetricStatisticsResult']['Datapoints'].empty?
|
126
|
+
standard_metrics[metric_type]['Statistics'].each do |stat_type|
|
127
|
+
event = event(lb, az, metric_type, stat_type, 0.0)
|
128
|
+
report(event)
|
129
|
+
end
|
130
|
+
next
|
131
|
+
end
|
132
|
+
|
133
|
+
# We should only ever have a single data point
|
134
|
+
result.body['GetMetricStatisticsResult']['Datapoints'][0].keys.sort.each do |stat_type|
|
135
|
+
next if stat_type == 'Unit'
|
136
|
+
next if stat_type == 'Timestamp'
|
137
|
+
|
138
|
+
unit = result.body['GetMetricStatisticsResult']['Datapoints'][0]['Unit']
|
139
|
+
metric = result.body['GetMetricStatisticsResult']['Datapoints'][0][stat_type]
|
140
|
+
event = event(lb, az, metric_type, stat_type, metric, unit)
|
141
|
+
report(event)
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
private
|
149
|
+
|
150
|
+
def event(lb, az, metric_type, stat_type, metric, unit = nil)
|
151
|
+
{
|
152
|
+
host: lb,
|
153
|
+
service: "elb.#{az}.#{metric_type}.#{stat_type}",
|
154
|
+
ttl: 60,
|
155
|
+
description: "#{lb} #{metric_type} #{stat_type} (#{unit})",
|
156
|
+
tags: ['elb_metrics'],
|
157
|
+
metric: metric,
|
158
|
+
}
|
159
|
+
end
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'riemann/tools'
|
4
|
+
|
5
|
+
module Riemann
|
6
|
+
module Tools
|
7
|
+
module Aws
|
8
|
+
class RdsStatus
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog/aws'
|
11
|
+
require 'date'
|
12
|
+
require 'time'
|
13
|
+
require 'json'
|
14
|
+
|
15
|
+
opt :access_key, 'AWS access key', type: String
|
16
|
+
opt :secret_key, 'Secret access key', type: String
|
17
|
+
opt :region, 'AWS region', type: String, default: 'eu-west-1'
|
18
|
+
opt :dbinstance_identifier, 'DBInstanceIdentifier', type: String
|
19
|
+
def initialize
|
20
|
+
abort 'FATAL: specify a DB instance name, see --help for usage' unless opts[:dbinstance_identifier]
|
21
|
+
creds = if opts[:access_key] && opts[:secret_key]
|
22
|
+
{
|
23
|
+
aws_access_key_id: opts[:access_key],
|
24
|
+
aws_secret_access_key: opts[:secret_key],
|
25
|
+
}
|
26
|
+
else
|
27
|
+
{ use_iam_profile: true }
|
28
|
+
end
|
29
|
+
creds['region'] = opts[:region]
|
30
|
+
@cloudwatch = Fog::AWS::CloudWatch.new(creds)
|
31
|
+
end
|
32
|
+
|
33
|
+
def tick
|
34
|
+
time = Time.new
|
35
|
+
%w[DatabaseConnections FreeableMemory FreeStorageSpace NetworkReceiveThroughput
|
36
|
+
NetworkTransmitThroughput ReadThroughput CPUUtilization].each do |metric|
|
37
|
+
result = @cloudwatch.get_metric_statistics(
|
38
|
+
'Namespace' => 'AWS/RDS',
|
39
|
+
'MetricName' => metric.to_s,
|
40
|
+
'Statistics' => 'Average',
|
41
|
+
'Dimensions' => [{ 'Name' => 'DBInstanceIdentifier', 'Value' => opts[:dbinstance_identifier].to_s }],
|
42
|
+
'StartTime' => (time - 120).to_time.iso8601,
|
43
|
+
'EndTime' => time.to_time.iso8601, 'Period' => 60,
|
44
|
+
)
|
45
|
+
metrics_result = result.data[:body]['GetMetricStatisticsResult']
|
46
|
+
next unless metrics_result['Datapoints'].length.positive?
|
47
|
+
|
48
|
+
datapoint = metrics_result['Datapoints'][0]
|
49
|
+
ev = {
|
50
|
+
metric: datapoint['Average'],
|
51
|
+
service: "#{opts[:dbinstance_identifier]}.#{metric} (#{datapoint['Unit']})",
|
52
|
+
description: JSON.dump(metrics_result),
|
53
|
+
state: 'ok',
|
54
|
+
ttl: 300,
|
55
|
+
}
|
56
|
+
|
57
|
+
report ev
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|