riemann-tools 0.2.11 → 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/.docker/Dockerfile +7 -0
- data/.docker/publish.sh +35 -0
- data/.github/workflows/ci.yml +29 -0
- data/.gitignore +6 -0
- data/.rspec +2 -0
- data/.travis.yml +31 -0
- data/CHANGELOG.md +393 -0
- data/Gemfile +6 -0
- data/ISSUE_TEMPLATE.md +15 -0
- data/README.markdown +17 -1
- data/Rakefile +21 -0
- data/bin/riemann-apache-status +1 -0
- data/bin/riemann-bench +1 -0
- data/bin/riemann-cloudant +1 -0
- data/bin/riemann-consul +3 -2
- data/bin/riemann-dir-files-count +1 -0
- data/bin/riemann-dir-space +1 -0
- data/bin/riemann-diskstats +1 -0
- data/bin/riemann-fd +1 -0
- data/bin/riemann-freeswitch +1 -0
- data/bin/riemann-haproxy +1 -0
- data/bin/riemann-health +87 -10
- data/bin/riemann-kvminstance +1 -0
- data/bin/riemann-memcached +1 -0
- data/bin/riemann-net +3 -2
- data/bin/riemann-nginx-status +1 -0
- data/bin/riemann-ntp +1 -0
- data/bin/riemann-portcheck +42 -0
- data/bin/riemann-proc +2 -1
- data/bin/riemann-varnish +1 -0
- data/bin/riemann-zookeeper +1 -0
- data/lib/riemann/tools/utils.rb +17 -0
- data/lib/riemann/tools/version.rb +7 -0
- data/lib/riemann/tools.rb +15 -5
- data/riemann-tools.gemspec +39 -0
- data/tools/riemann-aws/LICENSE +21 -0
- data/tools/riemann-aws/README.md +54 -0
- data/tools/riemann-aws/Rakefile.rb +35 -0
- data/tools/riemann-aws/bin/riemann-aws-billing +87 -0
- data/tools/riemann-aws/bin/riemann-aws-rds-status +54 -0
- data/tools/riemann-aws/bin/riemann-aws-sqs-status +44 -0
- data/tools/riemann-aws/bin/riemann-aws-status +71 -0
- data/tools/riemann-aws/bin/riemann-elb-metrics +167 -0
- data/tools/riemann-aws/bin/riemann-s3-list +82 -0
- data/tools/riemann-aws/bin/riemann-s3-status +99 -0
- data/tools/riemann-chronos/LICENSE +21 -0
- data/tools/riemann-chronos/README.md +10 -0
- data/tools/riemann-chronos/Rakefile.rb +35 -0
- data/tools/riemann-chronos/bin/riemann-chronos +144 -0
- data/tools/riemann-docker/LICENSE +21 -0
- data/tools/riemann-docker/README.md +10 -0
- data/tools/riemann-docker/Rakefile.rb +34 -0
- data/tools/riemann-docker/bin/riemann-docker +217 -0
- data/tools/riemann-elasticsearch/LICENSE +21 -0
- data/tools/riemann-elasticsearch/README.md +10 -0
- data/tools/riemann-elasticsearch/Rakefile.rb +35 -0
- data/tools/riemann-elasticsearch/bin/riemann-elasticsearch +166 -0
- data/tools/riemann-marathon/LICENSE +21 -0
- data/tools/riemann-marathon/README.md +10 -0
- data/tools/riemann-marathon/Rakefile.rb +35 -0
- data/tools/riemann-marathon/bin/riemann-marathon +147 -0
- data/tools/riemann-mesos/LICENSE +21 -0
- data/tools/riemann-mesos/README.md +10 -0
- data/tools/riemann-mesos/Rakefile.rb +35 -0
- data/tools/riemann-mesos/bin/riemann-mesos +131 -0
- data/tools/riemann-munin/LICENSE +21 -0
- data/tools/riemann-munin/README.md +10 -0
- data/tools/riemann-munin/Rakefile.rb +34 -0
- data/tools/riemann-munin/bin/riemann-munin +37 -0
- data/tools/riemann-rabbitmq/LICENSE +21 -0
- data/tools/riemann-rabbitmq/README.md +10 -0
- data/tools/riemann-rabbitmq/Rakefile.rb +35 -0
- data/tools/riemann-rabbitmq/bin/riemann-rabbitmq +269 -0
- data/tools/riemann-riak/LICENSE +21 -0
- data/tools/riemann-riak/README.md +10 -0
- data/tools/riemann-riak/Rakefile.rb +34 -0
- data/tools/riemann-riak/bin/riemann-riak +331 -0
- data/tools/riemann-riak/bin/riemann-riak-keys +13 -0
- data/tools/riemann-riak/bin/riemann-riak-ring +9 -0
- data/tools/riemann-riak/riak_status/key_count.erl +13 -0
- data/tools/riemann-riak/riak_status/riak_status.rb +152 -0
- data/tools/riemann-riak/riak_status/ringready.erl +9 -0
- metadata +130 -16
@@ -0,0 +1,87 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
Process.setproctitle($0)
|
3
|
+
|
4
|
+
require 'riemann/tools'
|
5
|
+
|
6
|
+
$0 = __FILE__
|
7
|
+
|
8
|
+
class Riemann::Tools::AWSBilling
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog'
|
11
|
+
|
12
|
+
opt :fog_credentials_file, "Fog credentials file", :type => String
|
13
|
+
opt :fog_credential, "Fog credentials to use", :type => String
|
14
|
+
|
15
|
+
opt :access_key, "AWS access key", :type => String
|
16
|
+
opt :secret_key, "Secret access key", :type => String
|
17
|
+
opt :services, "AWS services: AmazonEC2 AmazonS3 AWSDataTransfer", :type => :strings, :multi => true, :default => ["AmazonEC2", "AmazonS3", "AWSDataTransfer"]
|
18
|
+
|
19
|
+
opt :time_start, "Start time in seconds of the metrics period (2hrs ago default)", :type => Integer, :default => 7200
|
20
|
+
opt :time_end, "End time in seconds of the metrics period ", :type => Integer, :default => 60
|
21
|
+
|
22
|
+
|
23
|
+
def initialize
|
24
|
+
if options[:fog_credentials_file]
|
25
|
+
Fog.credentials_path = opts[:fog_credentials_file]
|
26
|
+
Fog.credential = opts[:fog_credential].to_sym
|
27
|
+
@cloudwatch = Fog::AWS::CloudWatch.new
|
28
|
+
else
|
29
|
+
if opts.has_key?('secret_key') and opts.has_key?('access_key')
|
30
|
+
creds = {
|
31
|
+
:aws_secret_access_key => opts[:secret_key],
|
32
|
+
:aws_access_key_id => opts[:access_key]
|
33
|
+
}
|
34
|
+
else
|
35
|
+
creds = { :use_iam_profile => true }
|
36
|
+
end
|
37
|
+
@cloudwatch = Fog::AWS::CloudWatch.new(creds)
|
38
|
+
end
|
39
|
+
@start_time = (Time.now.utc - opts[:time_start]).iso8601
|
40
|
+
@end_time = (Time.now.utc - opts[:time_end]).iso8601
|
41
|
+
end
|
42
|
+
|
43
|
+
def tick
|
44
|
+
opts[:services].each do |service|
|
45
|
+
data = @cloudwatch.get_metric_statistics({
|
46
|
+
'Statistics' => ["Maximum"],
|
47
|
+
'StartTime' => @start_time,
|
48
|
+
'EndTime' => @end_time,
|
49
|
+
'Period' => 3600,
|
50
|
+
'Unit' => "None",
|
51
|
+
'MetricName' => "EstimatedCharges",
|
52
|
+
'Namespace' => "AWS/Billing",
|
53
|
+
'Dimensions' => [
|
54
|
+
{
|
55
|
+
'Name' => "ServiceName",
|
56
|
+
'Value' => service
|
57
|
+
},
|
58
|
+
{
|
59
|
+
'Name' => "Currency",
|
60
|
+
'Value' => "USD"
|
61
|
+
}
|
62
|
+
]
|
63
|
+
}).body['GetMetricStatisticsResult']['Datapoints']
|
64
|
+
|
65
|
+
|
66
|
+
data.each do |metrics|
|
67
|
+
name = "AWScloudwatch.Billing." + service
|
68
|
+
value = metrics["Maximum"]
|
69
|
+
timestamp = metrics["Timestamp"].to_i
|
70
|
+
|
71
|
+
event = {
|
72
|
+
host: nil,
|
73
|
+
service: name,
|
74
|
+
time: timestamp,
|
75
|
+
description: "AWS Estimate Charges for #{service}",
|
76
|
+
tags: ["aws_billing"],
|
77
|
+
state: "ok",
|
78
|
+
metric: value
|
79
|
+
}
|
80
|
+
|
81
|
+
report event
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
Riemann::Tools::AWSBilling.run
|
@@ -0,0 +1,54 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
Process.setproctitle($0)
|
3
|
+
|
4
|
+
require 'riemann/tools'
|
5
|
+
|
6
|
+
$0 = __FILE__ # Let's not expose our AWS keys in the process list
|
7
|
+
|
8
|
+
class Riemann::Tools::AWS
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog'
|
11
|
+
require 'date'
|
12
|
+
require 'time'
|
13
|
+
require 'json'
|
14
|
+
|
15
|
+
opt :access_key, "AWS access key", :type => String
|
16
|
+
opt :secret_key, "Secret access key", :type => String
|
17
|
+
opt :region, "AWS region", :type => String, :default => 'eu-west-1'
|
18
|
+
opt :dbinstance_identifier, "DBInstanceIdentifier", :type => String
|
19
|
+
def initialize
|
20
|
+
abort "FATAL: specify a DB instance name, see --help for usage" unless opts[:dbinstance_identifier]
|
21
|
+
if opts[:access_key] and opts[:secret_key]
|
22
|
+
creds = {
|
23
|
+
:aws_access_key_id => opts[:access_key],
|
24
|
+
:aws_secret_access_key => opts[:secret_key]
|
25
|
+
}
|
26
|
+
else
|
27
|
+
creds = { :use_iam_profile => true }
|
28
|
+
end
|
29
|
+
creds['region'] = opts[:region]
|
30
|
+
@cloudwatch = Fog::AWS::CloudWatch.new(creds)
|
31
|
+
end
|
32
|
+
|
33
|
+
def tick
|
34
|
+
time = Time.new
|
35
|
+
['DatabaseConnections', 'FreeableMemory', 'FreeStorageSpace', 'NetworkReceiveThroughput', 'NetworkTransmitThroughput', 'ReadThroughput', 'CPUUtilization'].each do |metric|
|
36
|
+
result = @cloudwatch.get_metric_statistics({"Namespace" => 'AWS/RDS', "MetricName" => "#{metric}", "Statistics" => 'Average', "Dimensions" => [{"Name" => "DBInstanceIdentifier", "Value" => "#{opts[:dbinstance_identifier]}"}], "StartTime" => (time-120).to_time.iso8601, "EndTime" => time.to_time.iso8601, "Period" => 60})
|
37
|
+
metricsResult = result.data[:body]['GetMetricStatisticsResult']
|
38
|
+
if (metricsResult['Datapoints'].length>0)
|
39
|
+
datapoint = metricsResult['Datapoints'][0]
|
40
|
+
ev = {:metric => datapoint['Average'],
|
41
|
+
:service => "#{opts[:dbinstance_identifier]}.#{metric} (#{datapoint['Unit']})",
|
42
|
+
:description => JSON.dump(metricsResult),
|
43
|
+
:state => "ok",
|
44
|
+
:ttl => 300}
|
45
|
+
|
46
|
+
|
47
|
+
report ev
|
48
|
+
end
|
49
|
+
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
Riemann::Tools::AWS.run
|
@@ -0,0 +1,44 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
Process.setproctitle($0)
|
3
|
+
|
4
|
+
require 'riemann/tools'
|
5
|
+
|
6
|
+
$0 = __FILE__ # Let's not expose our AWS keys in the process list
|
7
|
+
|
8
|
+
class Riemann::Tools::AWS
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog'
|
11
|
+
|
12
|
+
opt :access_key, "AWS access key", :type => String
|
13
|
+
opt :secret_key, "Secret access key", :type => String
|
14
|
+
opt :region, "AWS region", :type => String, :default => 'us-east-1'
|
15
|
+
opt :queue, "SQS Queue name", :type => String
|
16
|
+
def initialize
|
17
|
+
if opts.has_key?('access_key') and opts.has_key?('secret_key')
|
18
|
+
creds = {
|
19
|
+
:aws_access_key_id => opts[:access_key],
|
20
|
+
:aws_secret_access_key => opts[:secret_key]
|
21
|
+
}
|
22
|
+
else
|
23
|
+
creds = { :use_iam_profile => true }
|
24
|
+
end
|
25
|
+
creds['region'] = opts[:region]
|
26
|
+
@sqs = Fog::AWS::SQS.new(creds)
|
27
|
+
response = @sqs.list_queues({'QueueNamePrefix' => opts[:queue]})
|
28
|
+
@queue_url = response[:body]['QueueUrls'].first
|
29
|
+
end
|
30
|
+
|
31
|
+
def tick
|
32
|
+
response = @sqs.get_queue_attributes(@queue_url, 'All')
|
33
|
+
['ApproximateNumberOfMessages', 'ApproximateNumberOfMessagesNotVisible'].each do |attr|
|
34
|
+
msg = {
|
35
|
+
metric: response[:body]['Attributes'][attr],
|
36
|
+
service: "#{opts[:queue]} #{attr}",
|
37
|
+
state: 'ok'
|
38
|
+
}
|
39
|
+
report msg
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
Riemann::Tools::AWS.run
|
@@ -0,0 +1,71 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
Process.setproctitle($0)
|
3
|
+
|
4
|
+
require 'riemann/tools'
|
5
|
+
|
6
|
+
$0 = __FILE__ # Let's not expose our AWS keys in the process list
|
7
|
+
|
8
|
+
class Riemann::Tools::AWS
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog'
|
11
|
+
require 'date'
|
12
|
+
|
13
|
+
opt :access_key, "AWS access key", :type => String
|
14
|
+
opt :secret_key, "Secret access key", :type => String
|
15
|
+
opt :region, "AWS region", :type => String, :default => 'eu-west-1'
|
16
|
+
|
17
|
+
opt :retirement_critical, "Number of days before retirement. Defaults to 2", :default => 2
|
18
|
+
opt :event_warning, "Number of days before event. Defaults to nil (i.e. when the event appears)", :default => nil
|
19
|
+
|
20
|
+
def initialize
|
21
|
+
if opts.has_key?('secret_key') and opts.has_key?('access_key')
|
22
|
+
creds = {
|
23
|
+
:aws_secret_access_key => opts[:secret_key],
|
24
|
+
:aws_access_key_id => opts[:access_key]
|
25
|
+
}
|
26
|
+
else
|
27
|
+
creds = { :use_iam_profile => true }
|
28
|
+
end
|
29
|
+
creds['region'] = opts[:region]
|
30
|
+
creds['provider'] = 'AWS'
|
31
|
+
@compute = Fog::Compute.new(creds)
|
32
|
+
end
|
33
|
+
|
34
|
+
def tick
|
35
|
+
instance_status = @compute.describe_instance_status.body["instanceStatusSet"]
|
36
|
+
status = instance_status.inject({}) do |acc,i|
|
37
|
+
acc[i.delete("instanceId")] = i
|
38
|
+
acc
|
39
|
+
end
|
40
|
+
|
41
|
+
hosts = @compute.servers.select { |s| s.state == "running" }
|
42
|
+
inject([status, {}]) do |(status, acc), host|
|
43
|
+
acc[host.private_dns_name] = status.delete(host.id); [status, acc]
|
44
|
+
end[1]
|
45
|
+
|
46
|
+
hosts.each do |host, status|
|
47
|
+
status['eventsSet'].each do |event|
|
48
|
+
before, after = ['notBefore', 'notAfter'].map { |k| Date.parse event[k].to_s if event[k] }
|
49
|
+
|
50
|
+
ev = {:host => host,
|
51
|
+
:service => "aws_instance_status",
|
52
|
+
:description => "#{event['code']}\n\nstart #{event['notBefore']}\nend #{event['notAfter']}\n\n#{event['description']}",
|
53
|
+
:state => "ok",
|
54
|
+
:ttl => 300}
|
55
|
+
|
56
|
+
ev2 = if (event['code'] == 'instance-retirement') and
|
57
|
+
Date.today >= before-opts[:retirement_critical]
|
58
|
+
{:state => "critical"}
|
59
|
+
elsif opts[:event_warning] and Date.today >= before-opts[:event_warning]
|
60
|
+
{:state => "warning"}
|
61
|
+
else
|
62
|
+
{:state => "warning"}
|
63
|
+
end
|
64
|
+
|
65
|
+
report ev.merge(ev2)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
Riemann::Tools::AWS.run
|
@@ -0,0 +1,167 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
Process.setproctitle($0)
|
3
|
+
|
4
|
+
require 'riemann/tools'
|
5
|
+
|
6
|
+
$0 = __FILE__
|
7
|
+
|
8
|
+
class Riemann::Tools::ELBMetrics
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog'
|
11
|
+
require 'time'
|
12
|
+
|
13
|
+
opt :fog_credentials_file, "Fog credentials file", :type => String
|
14
|
+
opt :fog_credential, "Fog credentials to use", :type => String
|
15
|
+
opt :aws_access, "AWS Access Key", :type => String
|
16
|
+
opt :aws_secret, "AWS Secret Key", :type => String
|
17
|
+
opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
|
18
|
+
opt :aws_azs, "List of AZs to aggregate against", :type => :strings, :default => [ "all_az" ]
|
19
|
+
opt :elbs, "List of ELBs to pull metrics from", :type => :strings, :required => true
|
20
|
+
|
21
|
+
def standard_metrics
|
22
|
+
# ELB metric types, from:
|
23
|
+
# http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html#elb-metricscollected
|
24
|
+
metric_options = {
|
25
|
+
"Latency" => {
|
26
|
+
"Unit" => "Seconds",
|
27
|
+
"Statistics" => ["Maximum", "Minimum", "Average" ]
|
28
|
+
},
|
29
|
+
"RequestCount" => {
|
30
|
+
"Unit" => "Count",
|
31
|
+
"Statistics" => [ "Sum" ]
|
32
|
+
},
|
33
|
+
"HealthyHostCount" => {
|
34
|
+
"Units" => "Count",
|
35
|
+
"Statistics" => [ "Minimum", "Maximum", "Average" ]
|
36
|
+
},
|
37
|
+
"UnHealthyHostCount" => {
|
38
|
+
"Units" => "Count",
|
39
|
+
"Statistics" => [ "Minimum", "Maximum", "Average" ]
|
40
|
+
},
|
41
|
+
"HTTPCode_ELB_4XX" => {
|
42
|
+
"Units" => "Count",
|
43
|
+
"Statistics" => [ "Sum" ]
|
44
|
+
},
|
45
|
+
"HTTPCode_ELB_5XX" => {
|
46
|
+
"Units" => "Count",
|
47
|
+
"Statistics" => [ "Sum" ]
|
48
|
+
},
|
49
|
+
"HTTPCode_Backend_2XX" => {
|
50
|
+
"Units" => "Count",
|
51
|
+
"Statistics" => [ "Sum" ]
|
52
|
+
},
|
53
|
+
"HTTPCode_Backend_3XX" => {
|
54
|
+
"Units" => "Count",
|
55
|
+
"Statistics" => [ "Sum" ]
|
56
|
+
},
|
57
|
+
"HTTPCode_Backend_4XX" => {
|
58
|
+
"Units" => "Count",
|
59
|
+
"Statistics" => [ "Sum" ]
|
60
|
+
},
|
61
|
+
"HTTPCode_Backend_5XX" => {
|
62
|
+
"Units" => "Count",
|
63
|
+
"Statistics" => [ "Sum" ]
|
64
|
+
}
|
65
|
+
}
|
66
|
+
|
67
|
+
metric_options
|
68
|
+
end
|
69
|
+
|
70
|
+
def base_metrics
|
71
|
+
# get last 60 seconds
|
72
|
+
start_time = (Time.now.utc - 60).iso8601
|
73
|
+
end_time = Time.now.utc.iso8601
|
74
|
+
|
75
|
+
# The base query that all metrics would get
|
76
|
+
metric_base = {
|
77
|
+
"Namespace" => "AWS/ELB",
|
78
|
+
"StartTime" => start_time,
|
79
|
+
"EndTime" => end_time,
|
80
|
+
"Period" => 60,
|
81
|
+
}
|
82
|
+
|
83
|
+
metric_base
|
84
|
+
end
|
85
|
+
|
86
|
+
|
87
|
+
def tick
|
88
|
+
if options[:fog_credentials_file]
|
89
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
90
|
+
Fog.credential = options[:fog_credential].to_sym
|
91
|
+
connection = Fog::AWS::CloudWatch.new
|
92
|
+
else
|
93
|
+
if options[:aws_access] && options[:aws_secret]
|
94
|
+
connection = Fog::AWS::CloudWatch.new({
|
95
|
+
:aws_access_key_id => options[:aws_access],
|
96
|
+
:aws_secret_access_key => options[:aws_secret],
|
97
|
+
:region => options[:aws_region]
|
98
|
+
})
|
99
|
+
else
|
100
|
+
connection = Fog::AWS::CloudWatch.new({
|
101
|
+
:use_iam_profile => true,
|
102
|
+
:region => options[:aws_region]
|
103
|
+
})
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
options[:elbs].each do |lb|
|
108
|
+
|
109
|
+
metric_options = standard_metrics
|
110
|
+
metric_base_options = base_metrics
|
111
|
+
|
112
|
+
options[:aws_azs].each do |az|
|
113
|
+
metric_options.keys.sort.each do |metric_type|
|
114
|
+
merged_options = metric_base_options.merge(metric_options[metric_type])
|
115
|
+
merged_options["MetricName"] = metric_type
|
116
|
+
if az == "all_az"
|
117
|
+
merged_options["Dimensions"] = [ { "Name" => "LoadBalancerName", "Value" => lb } ]
|
118
|
+
else
|
119
|
+
merged_options["Dimensions"] = [
|
120
|
+
{ "Name" => "LoadBalancerName", "Value" => lb },
|
121
|
+
{ "Name" => "AvailabilityZone" , "Value" => az}
|
122
|
+
]
|
123
|
+
end
|
124
|
+
|
125
|
+
result = connection.get_metric_statistics(merged_options)
|
126
|
+
|
127
|
+
# "If no response codes in the category 2XX-5XX range are sent to clients within
|
128
|
+
# the given time period, values for these metrics will not be recorded in CloudWatch"
|
129
|
+
#next if result.body["GetMetricStatisticsResult"]["Datapoints"].empty? && metric_type =~ /[2345]XX/
|
130
|
+
#
|
131
|
+
if result.body["GetMetricStatisticsResult"]["Datapoints"].empty?
|
132
|
+
standard_metrics[metric_type]['Statistics'].each do |stat_type|
|
133
|
+
event = event(lb, az, metric_type, stat_type, 0.0)
|
134
|
+
report(event)
|
135
|
+
end
|
136
|
+
next
|
137
|
+
end
|
138
|
+
|
139
|
+
# We should only ever have a single data point
|
140
|
+
result.body["GetMetricStatisticsResult"]["Datapoints"][0].keys.sort.each do |stat_type|
|
141
|
+
next if stat_type == "Unit"
|
142
|
+
next if stat_type == "Timestamp"
|
143
|
+
|
144
|
+
unit = result.body["GetMetricStatisticsResult"]["Datapoints"][0]["Unit"]
|
145
|
+
metric = result.body["GetMetricStatisticsResult"]["Datapoints"][0][stat_type]
|
146
|
+
event = event(lb, az, metric_type, stat_type, metric, unit)
|
147
|
+
report(event)
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
private
|
155
|
+
def event(lb, az, metric_type, stat_type, metric, unit=nil)
|
156
|
+
event = {
|
157
|
+
host: lb,
|
158
|
+
service: "elb.#{az}.#{metric_type}.#{stat_type}",
|
159
|
+
ttl: 60,
|
160
|
+
description: "#{lb} #{metric_type} #{stat_type} (#{unit})",
|
161
|
+
tags: ["elb_metrics"],
|
162
|
+
metric: metric
|
163
|
+
}
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
Riemann::Tools::ELBMetrics.run
|
@@ -0,0 +1,82 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
Process.setproctitle($0)
|
3
|
+
|
4
|
+
require 'riemann/tools'
|
5
|
+
|
6
|
+
$0 = __FILE__
|
7
|
+
|
8
|
+
class Riemann::Tools::S3Metrics
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog'
|
11
|
+
require 'time'
|
12
|
+
|
13
|
+
opt :fog_credentials_file, "Fog credentials file", :type => String
|
14
|
+
opt :fog_credential, "Fog credentials to use", :type => String
|
15
|
+
opt :aws_access, "AWS Access Key", :type => String
|
16
|
+
opt :aws_secret, "AWS Secret Key", :type => String
|
17
|
+
opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
|
18
|
+
opt :buckets, "Buckets to pull metrics from, multi=true, can have a prefix like mybucket/prefix", :type => String, :multi => true, :required => true
|
19
|
+
opt :max_objects, "Max number of objects to list before stopping to save bandwidth", :default => -1
|
20
|
+
|
21
|
+
|
22
|
+
def tick
|
23
|
+
if options[:fog_credentials_file]
|
24
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
25
|
+
Fog.credential = options[:fog_credential].to_sym
|
26
|
+
connection = Fog::Storage.new
|
27
|
+
else
|
28
|
+
if options[:aws_access] && options[:aws_secret]
|
29
|
+
connection = Fog::Storage.new({
|
30
|
+
:provider => "AWS",
|
31
|
+
:aws_access_key_id => options[:aws_access],
|
32
|
+
:aws_secret_access_key => options[:aws_secret],
|
33
|
+
:region => options[:aws_region]
|
34
|
+
})
|
35
|
+
else
|
36
|
+
connection = Fog::Storage.new({
|
37
|
+
:provider => "AWS",
|
38
|
+
:use_iam_profile => true,
|
39
|
+
:region => options[:aws_region]
|
40
|
+
})
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
options[:buckets].each do |url|
|
45
|
+
split = url.split('/')
|
46
|
+
bucket = split[0]
|
47
|
+
prefix = ""
|
48
|
+
if (split[1])
|
49
|
+
prefix = url[(split[0].length+1)..-1]
|
50
|
+
end
|
51
|
+
count = 0
|
52
|
+
connection.directories.get(bucket, prefix: prefix).files.map do |file|
|
53
|
+
count = count +1
|
54
|
+
if (options[:max_objects]>0 && count>options[:max_objects])
|
55
|
+
break
|
56
|
+
end
|
57
|
+
end
|
58
|
+
if (options[:max_objects]>0 && count>options[:max_objects])
|
59
|
+
event = event(url, "objectCount", count, "count was bigger than threshold #{options[:max_objects]}", "warning")
|
60
|
+
report(event)
|
61
|
+
else
|
62
|
+
event = event(url, "objectCount", count, "All objects counted, threshold=#{options[:max_objects]}", "ok")
|
63
|
+
report(event)
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
private
|
69
|
+
def event(bucket, label, metric, description, severity)
|
70
|
+
event = {
|
71
|
+
host: "bucket_#{bucket}",
|
72
|
+
service: "s3.#{label}",
|
73
|
+
ttl: 300,
|
74
|
+
description: "#{bucket} #{description}",
|
75
|
+
tags: ["s3_metrics"],
|
76
|
+
metric: metric,
|
77
|
+
state: severity
|
78
|
+
}
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
Riemann::Tools::S3Metrics.run
|
@@ -0,0 +1,99 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
Process.setproctitle($0)
|
3
|
+
|
4
|
+
require 'riemann/tools'
|
5
|
+
|
6
|
+
$0 = __FILE__
|
7
|
+
|
8
|
+
class Riemann::Tools::S3Metrics
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog'
|
11
|
+
require 'time'
|
12
|
+
|
13
|
+
opt :fog_credentials_file, "Fog credentials file", :type => String
|
14
|
+
opt :fog_credential, "Fog credentials to use", :type => String
|
15
|
+
opt :aws_access, "AWS Access Key", :type => String
|
16
|
+
opt :aws_secret, "AWS Secret Key", :type => String
|
17
|
+
opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
|
18
|
+
opt :buckets, "Buckets to pull metrics from, multi=true", :type => String, :multi => true, :required => true
|
19
|
+
opt :statistic, "Statistic to retrieve, multi=true, e.g. --statistic=Average --statistic=Maximum", :type => String, :multi => true, :required => true
|
20
|
+
|
21
|
+
|
22
|
+
def base_metrics
|
23
|
+
# get last 60 seconds
|
24
|
+
start_time = (Time.now.utc - 3600 * 24 * 1).iso8601
|
25
|
+
end_time = Time.now.utc.iso8601
|
26
|
+
|
27
|
+
# The base query that all metrics would get
|
28
|
+
metric_base = {
|
29
|
+
"Namespace" => "AWS/S3",
|
30
|
+
"StartTime" => start_time,
|
31
|
+
"EndTime" => end_time,
|
32
|
+
"Period" => 3600,
|
33
|
+
"MetricName" => "NumberOfObjects",
|
34
|
+
}
|
35
|
+
|
36
|
+
metric_base
|
37
|
+
end
|
38
|
+
|
39
|
+
|
40
|
+
def tick
|
41
|
+
if options[:fog_credentials_file]
|
42
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
43
|
+
Fog.credential = options[:fog_credential].to_sym
|
44
|
+
connection = Fog::AWS::CloudWatch.new
|
45
|
+
else
|
46
|
+
if options[:aws_access] && options[:aws_secret]
|
47
|
+
connection = Fog::AWS::CloudWatch.new({
|
48
|
+
:aws_access_key_id => options[:aws_access],
|
49
|
+
:aws_secret_access_key => options[:aws_secret],
|
50
|
+
:region => options[:aws_region]
|
51
|
+
})
|
52
|
+
else
|
53
|
+
connection = Fog::AWS::CloudWatch.new({
|
54
|
+
:use_iam_profile => true,
|
55
|
+
:region => options[:aws_region]
|
56
|
+
})
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
options[:statistic].each do |statistic|
|
61
|
+
options[:buckets].each do |bucket|
|
62
|
+
|
63
|
+
metric_base_options = base_metrics
|
64
|
+
metric_base_options["Statistics"] = statistic
|
65
|
+
metric_base_options["Dimensions"] = [
|
66
|
+
{"Name" => "BucketName", "Value" => bucket},
|
67
|
+
{"Name" => "StorageType", "Value" => "AllStorageTypes"}]
|
68
|
+
|
69
|
+
result = connection.get_metric_statistics(metric_base_options)
|
70
|
+
if result.body["GetMetricStatisticsResult"]["Datapoints"].empty?
|
71
|
+
next
|
72
|
+
end
|
73
|
+
result.body["GetMetricStatisticsResult"]["Datapoints"][0].keys.sort.each do |stat_type|
|
74
|
+
next if stat_type == "Unit"
|
75
|
+
next if stat_type == "Timestamp"
|
76
|
+
|
77
|
+
unit = result.body["GetMetricStatisticsResult"]["Datapoints"][0]["Unit"]
|
78
|
+
metric = result.body["GetMetricStatisticsResult"]["Datapoints"][0][stat_type]
|
79
|
+
event = event(bucket, result.body["GetMetricStatisticsResult"]["Label"], stat_type, unit, metric)
|
80
|
+
report(event)
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
def event(bucket, label, metric_type, stat_type, metric, unit=nil)
|
88
|
+
event = {
|
89
|
+
host: "bucket_#{bucket}",
|
90
|
+
service: "s3.#{label}.#{metric_type}.#{stat_type}",
|
91
|
+
ttl: 300,
|
92
|
+
description: "#{bucket} #{metric_type} #{stat_type} (#{unit})",
|
93
|
+
tags: ["s3_metrics"],
|
94
|
+
metric: metric
|
95
|
+
}
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
Riemann::Tools::S3Metrics.run
|
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2011 Kyle Kingsbury
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
@@ -0,0 +1,35 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'rubygems/package_task'
|
3
|
+
require 'rdoc/task'
|
4
|
+
require 'find'
|
5
|
+
|
6
|
+
# Don't include resource forks in tarballs on Mac OS X.
|
7
|
+
ENV['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
|
8
|
+
ENV['COPYFILE_DISABLE'] = 'true'
|
9
|
+
|
10
|
+
# Gemspec
|
11
|
+
gemspec = Gem::Specification.new do |s|
|
12
|
+
s.rubyforge_project = 'riemann-chronos'
|
13
|
+
|
14
|
+
s.name = 'riemann-chronos'
|
15
|
+
s.version = '0.1.1'
|
16
|
+
s.author = 'Peter Ericson'
|
17
|
+
s.email = 'peter.ericson@cba.com.au'
|
18
|
+
s.homepage = 'https://github.com/riemann/riemann-tools'
|
19
|
+
s.platform = Gem::Platform::RUBY
|
20
|
+
s.summary = 'Submits Chronos stats to riemann.'
|
21
|
+
s.license = 'MIT'
|
22
|
+
|
23
|
+
s.add_dependency 'riemann-tools', '>= 0.2.13'
|
24
|
+
s.add_dependency 'faraday', '>= 0.8.5'
|
25
|
+
s.add_dependency 'json'
|
26
|
+
|
27
|
+
s.files = FileList['bin/*', 'LICENSE', 'README.md'].to_a
|
28
|
+
s.executables |= Dir.entries('bin/')
|
29
|
+
s.has_rdoc = false
|
30
|
+
|
31
|
+
s.required_ruby_version = '>= 1.8.7'
|
32
|
+
end
|
33
|
+
|
34
|
+
Gem::PackageTask.new gemspec do |p|
|
35
|
+
end
|