riemann-tools 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. checksums.yaml +4 -4
  2. data/.github/dependabot.yml +11 -0
  3. data/.github/workflows/ci.yml +13 -0
  4. data/.github/workflows/codeql-analysis.yml +72 -0
  5. data/.rubocop.yml +32 -0
  6. data/CHANGELOG.md +31 -2
  7. data/README.markdown +8 -24
  8. data/Rakefile +4 -2
  9. data/SECURITY.md +42 -0
  10. data/bin/riemann-apache-status +92 -78
  11. data/bin/riemann-bench +54 -49
  12. data/bin/riemann-cloudant +44 -40
  13. data/bin/riemann-consul +82 -76
  14. data/bin/riemann-dir-files-count +53 -47
  15. data/bin/riemann-dir-space +53 -47
  16. data/bin/riemann-diskstats +78 -75
  17. data/bin/riemann-fd +68 -48
  18. data/bin/riemann-freeswitch +108 -103
  19. data/bin/riemann-haproxy +46 -40
  20. data/bin/riemann-health +4 -343
  21. data/bin/riemann-kvminstance +18 -13
  22. data/bin/riemann-memcached +35 -29
  23. data/bin/riemann-net +4 -104
  24. data/bin/riemann-nginx-status +74 -67
  25. data/bin/riemann-ntp +4 -33
  26. data/bin/riemann-portcheck +40 -31
  27. data/bin/riemann-proc +96 -90
  28. data/bin/riemann-varnish +51 -45
  29. data/bin/riemann-zookeeper +38 -34
  30. data/lib/riemann/tools/health.rb +347 -0
  31. data/lib/riemann/tools/net.rb +104 -0
  32. data/lib/riemann/tools/ntp.rb +41 -0
  33. data/lib/riemann/tools/version.rb +1 -1
  34. data/lib/riemann/tools.rb +37 -40
  35. data/riemann-tools.gemspec +4 -1
  36. data/tools/riemann-aws/{Rakefile.rb → Rakefile} +2 -0
  37. data/tools/riemann-aws/bin/riemann-aws-billing +72 -66
  38. data/tools/riemann-aws/bin/riemann-aws-rds-status +55 -41
  39. data/tools/riemann-aws/bin/riemann-aws-sqs-status +37 -31
  40. data/tools/riemann-aws/bin/riemann-aws-status +63 -51
  41. data/tools/riemann-aws/bin/riemann-elb-metrics +149 -148
  42. data/tools/riemann-aws/bin/riemann-s3-list +70 -65
  43. data/tools/riemann-aws/bin/riemann-s3-status +85 -82
  44. data/tools/riemann-chronos/{Rakefile.rb → Rakefile} +2 -0
  45. data/tools/riemann-chronos/bin/riemann-chronos +136 -119
  46. data/tools/riemann-docker/{Rakefile.rb → Rakefile} +2 -0
  47. data/tools/riemann-docker/bin/riemann-docker +163 -174
  48. data/tools/riemann-elasticsearch/{Rakefile.rb → Rakefile} +2 -0
  49. data/tools/riemann-elasticsearch/bin/riemann-elasticsearch +155 -147
  50. data/tools/riemann-marathon/{Rakefile.rb → Rakefile} +2 -0
  51. data/tools/riemann-marathon/bin/riemann-marathon +138 -122
  52. data/tools/riemann-mesos/{Rakefile.rb → Rakefile} +2 -0
  53. data/tools/riemann-mesos/bin/riemann-mesos +125 -110
  54. data/tools/riemann-munin/{Rakefile.rb → Rakefile} +2 -0
  55. data/tools/riemann-munin/bin/riemann-munin +28 -22
  56. data/tools/riemann-rabbitmq/{Rakefile.rb → Rakefile} +2 -0
  57. data/tools/riemann-rabbitmq/bin/riemann-rabbitmq +226 -222
  58. data/tools/riemann-riak/{Rakefile.rb → Rakefile} +2 -0
  59. data/tools/riemann-riak/bin/riemann-riak +281 -289
  60. data/tools/riemann-riak/riak_status/riak_status.rb +39 -39
  61. metadata +65 -16
@@ -1,166 +1,167 @@
1
1
  #!/usr/bin/env ruby
2
- Process.setproctitle($0)
2
+ # frozen_string_literal: true
3
+
4
+ Process.setproctitle($PROGRAM_NAME)
3
5
 
4
6
  require 'riemann/tools'
5
7
 
6
8
  $0 = __FILE__
7
9
 
8
- class Riemann::Tools::ELBMetrics
9
- include Riemann::Tools
10
- require 'fog'
11
- require 'time'
12
-
13
- opt :fog_credentials_file, "Fog credentials file", :type => String
14
- opt :fog_credential, "Fog credentials to use", :type => String
15
- opt :aws_access, "AWS Access Key", :type => String
16
- opt :aws_secret, "AWS Secret Key", :type => String
17
- opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
18
- opt :aws_azs, "List of AZs to aggregate against", :type => :strings, :default => [ "all_az" ]
19
- opt :elbs, "List of ELBs to pull metrics from", :type => :strings, :required => true
20
-
21
- def standard_metrics
22
- # ELB metric types, from:
23
- # http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html#elb-metricscollected
24
- metric_options = {
25
- "Latency" => {
26
- "Unit" => "Seconds",
27
- "Statistics" => ["Maximum", "Minimum", "Average" ]
28
- },
29
- "RequestCount" => {
30
- "Unit" => "Count",
31
- "Statistics" => [ "Sum" ]
32
- },
33
- "HealthyHostCount" => {
34
- "Units" => "Count",
35
- "Statistics" => [ "Minimum", "Maximum", "Average" ]
36
- },
37
- "UnHealthyHostCount" => {
38
- "Units" => "Count",
39
- "Statistics" => [ "Minimum", "Maximum", "Average" ]
40
- },
41
- "HTTPCode_ELB_4XX" => {
42
- "Units" => "Count",
43
- "Statistics" => [ "Sum" ]
44
- },
45
- "HTTPCode_ELB_5XX" => {
46
- "Units" => "Count",
47
- "Statistics" => [ "Sum" ]
48
- },
49
- "HTTPCode_Backend_2XX" => {
50
- "Units" => "Count",
51
- "Statistics" => [ "Sum" ]
52
- },
53
- "HTTPCode_Backend_3XX" => {
54
- "Units" => "Count",
55
- "Statistics" => [ "Sum" ]
56
- },
57
- "HTTPCode_Backend_4XX" => {
58
- "Units" => "Count",
59
- "Statistics" => [ "Sum" ]
60
- },
61
- "HTTPCode_Backend_5XX" => {
62
- "Units" => "Count",
63
- "Statistics" => [ "Sum" ]
64
- }
65
- }
66
-
67
- metric_options
68
- end
69
-
70
- def base_metrics
71
- # get last 60 seconds
72
- start_time = (Time.now.utc - 60).iso8601
73
- end_time = Time.now.utc.iso8601
74
-
75
- # The base query that all metrics would get
76
- metric_base = {
77
- "Namespace" => "AWS/ELB",
78
- "StartTime" => start_time,
79
- "EndTime" => end_time,
80
- "Period" => 60,
81
- }
82
-
83
- metric_base
84
- end
85
-
86
-
87
- def tick
88
- if options[:fog_credentials_file]
89
- Fog.credentials_path = options[:fog_credentials_file]
90
- Fog.credential = options[:fog_credential].to_sym
91
- connection = Fog::AWS::CloudWatch.new
92
- else
93
- if options[:aws_access] && options[:aws_secret]
94
- connection = Fog::AWS::CloudWatch.new({
95
- :aws_access_key_id => options[:aws_access],
96
- :aws_secret_access_key => options[:aws_secret],
97
- :region => options[:aws_region]
98
- })
99
- else
100
- connection = Fog::AWS::CloudWatch.new({
101
- :use_iam_profile => true,
102
- :region => options[:aws_region]
103
- })
10
+ module Riemann
11
+ module Tools
12
+ class ELBMetrics
13
+ include Riemann::Tools
14
+ require 'fog'
15
+ require 'time'
16
+
17
+ opt :fog_credentials_file, 'Fog credentials file', type: String
18
+ opt :fog_credential, 'Fog credentials to use', type: String
19
+ opt :aws_access, 'AWS Access Key', type: String
20
+ opt :aws_secret, 'AWS Secret Key', type: String
21
+ opt :aws_region, 'AWS Region', type: String, default: 'eu-west-1'
22
+ opt :aws_azs, 'List of AZs to aggregate against', type: :strings, default: ['all_az']
23
+ opt :elbs, 'List of ELBs to pull metrics from', type: :strings, required: true
24
+
25
+ def standard_metrics
26
+ # ELB metric types, from:
27
+ # http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html#elb-metricscollected
28
+ {
29
+ 'Latency' => {
30
+ 'Unit' => 'Seconds',
31
+ 'Statistics' => %w[Maximum Minimum Average],
32
+ },
33
+ 'RequestCount' => {
34
+ 'Unit' => 'Count',
35
+ 'Statistics' => ['Sum'],
36
+ },
37
+ 'HealthyHostCount' => {
38
+ 'Units' => 'Count',
39
+ 'Statistics' => %w[Minimum Maximum Average],
40
+ },
41
+ 'UnHealthyHostCount' => {
42
+ 'Units' => 'Count',
43
+ 'Statistics' => %w[Minimum Maximum Average],
44
+ },
45
+ 'HTTPCode_ELB_4XX' => {
46
+ 'Units' => 'Count',
47
+ 'Statistics' => ['Sum'],
48
+ },
49
+ 'HTTPCode_ELB_5XX' => {
50
+ 'Units' => 'Count',
51
+ 'Statistics' => ['Sum'],
52
+ },
53
+ 'HTTPCode_Backend_2XX' => {
54
+ 'Units' => 'Count',
55
+ 'Statistics' => ['Sum'],
56
+ },
57
+ 'HTTPCode_Backend_3XX' => {
58
+ 'Units' => 'Count',
59
+ 'Statistics' => ['Sum'],
60
+ },
61
+ 'HTTPCode_Backend_4XX' => {
62
+ 'Units' => 'Count',
63
+ 'Statistics' => ['Sum'],
64
+ },
65
+ 'HTTPCode_Backend_5XX' => {
66
+ 'Units' => 'Count',
67
+ 'Statistics' => ['Sum'],
68
+ },
69
+ }
104
70
  end
105
- end
106
71
 
107
- options[:elbs].each do |lb|
108
-
109
- metric_options = standard_metrics
110
- metric_base_options = base_metrics
111
-
112
- options[:aws_azs].each do |az|
113
- metric_options.keys.sort.each do |metric_type|
114
- merged_options = metric_base_options.merge(metric_options[metric_type])
115
- merged_options["MetricName"] = metric_type
116
- if az == "all_az"
117
- merged_options["Dimensions"] = [ { "Name" => "LoadBalancerName", "Value" => lb } ]
118
- else
119
- merged_options["Dimensions"] = [
120
- { "Name" => "LoadBalancerName", "Value" => lb },
121
- { "Name" => "AvailabilityZone" , "Value" => az}
122
- ]
123
- end
72
+ def base_metrics
73
+ # get last 60 seconds
74
+ start_time = (Time.now.utc - 60).iso8601
75
+ end_time = Time.now.utc.iso8601
76
+
77
+ # The base query that all metrics would get
78
+ {
79
+ 'Namespace' => 'AWS/ELB',
80
+ 'StartTime' => start_time,
81
+ 'EndTime' => end_time,
82
+ 'Period' => 60,
83
+ }
84
+ end
124
85
 
125
- result = connection.get_metric_statistics(merged_options)
86
+ def tick
87
+ if options[:fog_credentials_file]
88
+ Fog.credentials_path = options[:fog_credentials_file]
89
+ Fog.credential = options[:fog_credential].to_sym
90
+ connection = Fog::AWS::CloudWatch.new
91
+ else
92
+ connection = if options[:aws_access] && options[:aws_secret]
93
+ Fog::AWS::CloudWatch.new({
94
+ aws_access_key_id: options[:aws_access],
95
+ aws_secret_access_key: options[:aws_secret],
96
+ region: options[:aws_region],
97
+ })
98
+ else
99
+ Fog::AWS::CloudWatch.new({
100
+ use_iam_profile: true,
101
+ region: options[:aws_region],
102
+ })
103
+ end
104
+ end
126
105
 
127
- # "If no response codes in the category 2XX-5XX range are sent to clients within
128
- # the given time period, values for these metrics will not be recorded in CloudWatch"
129
- #next if result.body["GetMetricStatisticsResult"]["Datapoints"].empty? && metric_type =~ /[2345]XX/
130
- #
131
- if result.body["GetMetricStatisticsResult"]["Datapoints"].empty?
132
- standard_metrics[metric_type]['Statistics'].each do |stat_type|
133
- event = event(lb, az, metric_type, stat_type, 0.0)
134
- report(event)
106
+ options[:elbs].each do |lb|
107
+ metric_options = standard_metrics
108
+ metric_base_options = base_metrics
109
+
110
+ options[:aws_azs].each do |az|
111
+ metric_options.keys.sort.each do |metric_type|
112
+ merged_options = metric_base_options.merge(metric_options[metric_type])
113
+ merged_options['MetricName'] = metric_type
114
+ merged_options['Dimensions'] = if az == 'all_az'
115
+ [{ 'Name' => 'LoadBalancerName', 'Value' => lb }]
116
+ else
117
+ [
118
+ { 'Name' => 'LoadBalancerName', 'Value' => lb },
119
+ { 'Name' => 'AvailabilityZone', 'Value' => az },
120
+ ]
121
+ end
122
+
123
+ result = connection.get_metric_statistics(merged_options)
124
+
125
+ # "If no response codes in the category 2XX-5XX range are sent to clients within
126
+ # the given time period, values for these metrics will not be recorded in CloudWatch"
127
+ # next if result.body["GetMetricStatisticsResult"]["Datapoints"].empty? && metric_type =~ /[2345]XX/
128
+ #
129
+ if result.body['GetMetricStatisticsResult']['Datapoints'].empty?
130
+ standard_metrics[metric_type]['Statistics'].each do |stat_type|
131
+ event = event(lb, az, metric_type, stat_type, 0.0)
132
+ report(event)
133
+ end
134
+ next
135
+ end
136
+
137
+ # We should only ever have a single data point
138
+ result.body['GetMetricStatisticsResult']['Datapoints'][0].keys.sort.each do |stat_type|
139
+ next if stat_type == 'Unit'
140
+ next if stat_type == 'Timestamp'
141
+
142
+ unit = result.body['GetMetricStatisticsResult']['Datapoints'][0]['Unit']
143
+ metric = result.body['GetMetricStatisticsResult']['Datapoints'][0][stat_type]
144
+ event = event(lb, az, metric_type, stat_type, metric, unit)
145
+ report(event)
146
+ end
135
147
  end
136
- next
137
- end
138
-
139
- # We should only ever have a single data point
140
- result.body["GetMetricStatisticsResult"]["Datapoints"][0].keys.sort.each do |stat_type|
141
- next if stat_type == "Unit"
142
- next if stat_type == "Timestamp"
143
-
144
- unit = result.body["GetMetricStatisticsResult"]["Datapoints"][0]["Unit"]
145
- metric = result.body["GetMetricStatisticsResult"]["Datapoints"][0][stat_type]
146
- event = event(lb, az, metric_type, stat_type, metric, unit)
147
- report(event)
148
148
  end
149
149
  end
150
150
  end
151
- end
152
- end
153
151
 
154
- private
155
- def event(lb, az, metric_type, stat_type, metric, unit=nil)
156
- event = {
157
- host: lb,
158
- service: "elb.#{az}.#{metric_type}.#{stat_type}",
159
- ttl: 60,
160
- description: "#{lb} #{metric_type} #{stat_type} (#{unit})",
161
- tags: ["elb_metrics"],
162
- metric: metric
163
- }
152
+ private
153
+
154
+ def event(lb, az, metric_type, stat_type, metric, unit = nil)
155
+ {
156
+ host: lb,
157
+ service: "elb.#{az}.#{metric_type}.#{stat_type}",
158
+ ttl: 60,
159
+ description: "#{lb} #{metric_type} #{stat_type} (#{unit})",
160
+ tags: ['elb_metrics'],
161
+ metric: metric,
162
+ }
163
+ end
164
+ end
164
165
  end
165
166
  end
166
167
 
@@ -1,82 +1,87 @@
1
1
  #!/usr/bin/env ruby
2
- Process.setproctitle($0)
2
+ # frozen_string_literal: true
3
+
4
+ Process.setproctitle($PROGRAM_NAME)
3
5
 
4
6
  require 'riemann/tools'
5
7
 
6
8
  $0 = __FILE__
7
9
 
8
- class Riemann::Tools::S3Metrics
9
- include Riemann::Tools
10
- require 'fog'
11
- require 'time'
12
-
13
- opt :fog_credentials_file, "Fog credentials file", :type => String
14
- opt :fog_credential, "Fog credentials to use", :type => String
15
- opt :aws_access, "AWS Access Key", :type => String
16
- opt :aws_secret, "AWS Secret Key", :type => String
17
- opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
18
- opt :buckets, "Buckets to pull metrics from, multi=true, can have a prefix like mybucket/prefix", :type => String, :multi => true, :required => true
19
- opt :max_objects, "Max number of objects to list before stopping to save bandwidth", :default => -1
10
+ module Riemann
11
+ module Tools
12
+ class S3Metrics
13
+ include Riemann::Tools
14
+ require 'fog'
15
+ require 'time'
20
16
 
17
+ opt :fog_credentials_file, 'Fog credentials file', type: String
18
+ opt :fog_credential, 'Fog credentials to use', type: String
19
+ opt :aws_access, 'AWS Access Key', type: String
20
+ opt :aws_secret, 'AWS Secret Key', type: String
21
+ opt :aws_region, 'AWS Region', type: String, default: 'eu-west-1'
22
+ opt :buckets, 'Buckets to pull metrics from, multi=true, can have a prefix like mybucket/prefix', type: String,
23
+ multi: true, required: true
24
+ opt :max_objects, 'Max number of objects to list before stopping to save bandwidth', default: -1
21
25
 
22
- def tick
23
- if options[:fog_credentials_file]
24
- Fog.credentials_path = options[:fog_credentials_file]
25
- Fog.credential = options[:fog_credential].to_sym
26
- connection = Fog::Storage.new
27
- else
28
- if options[:aws_access] && options[:aws_secret]
29
- connection = Fog::Storage.new({
30
- :provider => "AWS",
31
- :aws_access_key_id => options[:aws_access],
32
- :aws_secret_access_key => options[:aws_secret],
33
- :region => options[:aws_region]
34
- })
35
- else
36
- connection = Fog::Storage.new({
37
- :provider => "AWS",
38
- :use_iam_profile => true,
39
- :region => options[:aws_region]
40
- })
41
- end
42
- end
26
+ def tick
27
+ if options[:fog_credentials_file]
28
+ Fog.credentials_path = options[:fog_credentials_file]
29
+ Fog.credential = options[:fog_credential].to_sym
30
+ connection = Fog::Storage.new
31
+ else
32
+ connection = if options[:aws_access] && options[:aws_secret]
33
+ Fog::Storage.new({
34
+ provider: 'AWS',
35
+ aws_access_key_id: options[:aws_access],
36
+ aws_secret_access_key: options[:aws_secret],
37
+ region: options[:aws_region],
38
+ })
39
+ else
40
+ Fog::Storage.new({
41
+ provider: 'AWS',
42
+ use_iam_profile: true,
43
+ region: options[:aws_region],
44
+ })
45
+ end
46
+ end
43
47
 
44
- options[:buckets].each do |url|
45
- split = url.split('/')
46
- bucket = split[0]
47
- prefix = ""
48
- if (split[1])
49
- prefix = url[(split[0].length+1)..-1]
50
- end
51
- count = 0
52
- connection.directories.get(bucket, prefix: prefix).files.map do |file|
53
- count = count +1
54
- if (options[:max_objects]>0 && count>options[:max_objects])
55
- break
48
+ options[:buckets].each do |url|
49
+ split = url.split('/')
50
+ bucket = split[0]
51
+ prefix = ''
52
+ prefix = url[(split[0].length + 1)..] if split[1]
53
+ count = 0
54
+ connection.directories.get(bucket, prefix: prefix).files.map do |_file|
55
+ count += 1
56
+ break if options[:max_objects].positive? && count > options[:max_objects]
57
+ end
58
+ event = if options[:max_objects].positive? && count > options[:max_objects]
59
+ event(
60
+ url, 'objectCount', count, "count was bigger than threshold #{options[:max_objects]}",
61
+ 'warning',
62
+ )
63
+ else
64
+ event(url, 'objectCount', count, "All objects counted, threshold=#{options[:max_objects]}", 'ok')
65
+ end
66
+ report(event)
56
67
  end
57
68
  end
58
- if (options[:max_objects]>0 && count>options[:max_objects])
59
- event = event(url, "objectCount", count, "count was bigger than threshold #{options[:max_objects]}", "warning")
60
- report(event)
61
- else
62
- event = event(url, "objectCount", count, "All objects counted, threshold=#{options[:max_objects]}", "ok")
63
- report(event)
69
+
70
+ private
71
+
72
+ def event(bucket, label, metric, description, severity)
73
+ {
74
+ host: "bucket_#{bucket}",
75
+ service: "s3.#{label}",
76
+ ttl: 300,
77
+ description: "#{bucket} #{description}",
78
+ tags: ['s3_metrics'],
79
+ metric: metric,
80
+ state: severity,
81
+ }
64
82
  end
65
83
  end
66
84
  end
67
-
68
- private
69
- def event(bucket, label, metric, description, severity)
70
- event = {
71
- host: "bucket_#{bucket}",
72
- service: "s3.#{label}",
73
- ttl: 300,
74
- description: "#{bucket} #{description}",
75
- tags: ["s3_metrics"],
76
- metric: metric,
77
- state: severity
78
- }
79
- end
80
85
  end
81
86
 
82
87
  Riemann::Tools::S3Metrics.run
@@ -1,98 +1,101 @@
1
1
  #!/usr/bin/env ruby
2
- Process.setproctitle($0)
2
+ # frozen_string_literal: true
3
+
4
+ Process.setproctitle($PROGRAM_NAME)
3
5
 
4
6
  require 'riemann/tools'
5
7
 
6
8
  $0 = __FILE__
7
9
 
8
- class Riemann::Tools::S3Metrics
9
- include Riemann::Tools
10
- require 'fog'
11
- require 'time'
12
-
13
- opt :fog_credentials_file, "Fog credentials file", :type => String
14
- opt :fog_credential, "Fog credentials to use", :type => String
15
- opt :aws_access, "AWS Access Key", :type => String
16
- opt :aws_secret, "AWS Secret Key", :type => String
17
- opt :aws_region, "AWS Region", :type => String, :default => "eu-west-1"
18
- opt :buckets, "Buckets to pull metrics from, multi=true", :type => String, :multi => true, :required => true
19
- opt :statistic, "Statistic to retrieve, multi=true, e.g. --statistic=Average --statistic=Maximum", :type => String, :multi => true, :required => true
20
-
21
-
22
- def base_metrics
23
- # get last 60 seconds
24
- start_time = (Time.now.utc - 3600 * 24 * 1).iso8601
25
- end_time = Time.now.utc.iso8601
26
-
27
- # The base query that all metrics would get
28
- metric_base = {
29
- "Namespace" => "AWS/S3",
30
- "StartTime" => start_time,
31
- "EndTime" => end_time,
32
- "Period" => 3600,
33
- "MetricName" => "NumberOfObjects",
34
- }
35
-
36
- metric_base
37
- end
38
-
39
-
40
- def tick
41
- if options[:fog_credentials_file]
42
- Fog.credentials_path = options[:fog_credentials_file]
43
- Fog.credential = options[:fog_credential].to_sym
44
- connection = Fog::AWS::CloudWatch.new
45
- else
46
- if options[:aws_access] && options[:aws_secret]
47
- connection = Fog::AWS::CloudWatch.new({
48
- :aws_access_key_id => options[:aws_access],
49
- :aws_secret_access_key => options[:aws_secret],
50
- :region => options[:aws_region]
51
- })
52
- else
53
- connection = Fog::AWS::CloudWatch.new({
54
- :use_iam_profile => true,
55
- :region => options[:aws_region]
56
- })
10
+ module Riemann
11
+ module Tools
12
+ class S3Metrics
13
+ include Riemann::Tools
14
+ require 'fog'
15
+ require 'time'
16
+
17
+ opt :fog_credentials_file, 'Fog credentials file', type: String
18
+ opt :fog_credential, 'Fog credentials to use', type: String
19
+ opt :aws_access, 'AWS Access Key', type: String
20
+ opt :aws_secret, 'AWS Secret Key', type: String
21
+ opt :aws_region, 'AWS Region', type: String, default: 'eu-west-1'
22
+ opt :buckets, 'Buckets to pull metrics from, multi=true', type: String, multi: true, required: true
23
+ opt :statistic, 'Statistic to retrieve, multi=true, e.g. --statistic=Average --statistic=Maximum', type: String,
24
+ multi: true, required: true
25
+
26
+ def base_metrics
27
+ # get last 60 seconds
28
+ start_time = (Time.now.utc - 3600 * 24 * 1).iso8601
29
+ end_time = Time.now.utc.iso8601
30
+
31
+ # The base query that all metrics would get
32
+ {
33
+ 'Namespace' => 'AWS/S3',
34
+ 'StartTime' => start_time,
35
+ 'EndTime' => end_time,
36
+ 'Period' => 3600,
37
+ 'MetricName' => 'NumberOfObjects',
38
+ }
57
39
  end
58
- end
59
-
60
- options[:statistic].each do |statistic|
61
- options[:buckets].each do |bucket|
62
-
63
- metric_base_options = base_metrics
64
- metric_base_options["Statistics"] = statistic
65
- metric_base_options["Dimensions"] = [
66
- {"Name" => "BucketName", "Value" => bucket},
67
- {"Name" => "StorageType", "Value" => "AllStorageTypes"}]
68
40
 
69
- result = connection.get_metric_statistics(metric_base_options)
70
- if result.body["GetMetricStatisticsResult"]["Datapoints"].empty?
71
- next
41
+ def tick
42
+ if options[:fog_credentials_file]
43
+ Fog.credentials_path = options[:fog_credentials_file]
44
+ Fog.credential = options[:fog_credential].to_sym
45
+ connection = Fog::AWS::CloudWatch.new
46
+ else
47
+ connection = if options[:aws_access] && options[:aws_secret]
48
+ Fog::AWS::CloudWatch.new({
49
+ aws_access_key_id: options[:aws_access],
50
+ aws_secret_access_key: options[:aws_secret],
51
+ region: options[:aws_region],
52
+ })
53
+ else
54
+ Fog::AWS::CloudWatch.new({
55
+ use_iam_profile: true,
56
+ region: options[:aws_region],
57
+ })
58
+ end
72
59
  end
73
- result.body["GetMetricStatisticsResult"]["Datapoints"][0].keys.sort.each do |stat_type|
74
- next if stat_type == "Unit"
75
- next if stat_type == "Timestamp"
76
60
 
77
- unit = result.body["GetMetricStatisticsResult"]["Datapoints"][0]["Unit"]
78
- metric = result.body["GetMetricStatisticsResult"]["Datapoints"][0][stat_type]
79
- event = event(bucket, result.body["GetMetricStatisticsResult"]["Label"], stat_type, unit, metric)
80
- report(event)
61
+ options[:statistic].each do |statistic|
62
+ options[:buckets].each do |bucket|
63
+ metric_base_options = base_metrics
64
+ metric_base_options['Statistics'] = statistic
65
+ metric_base_options['Dimensions'] = [
66
+ { 'Name' => 'BucketName', 'Value' => bucket },
67
+ { 'Name' => 'StorageType', 'Value' => 'AllStorageTypes' },
68
+ ]
69
+
70
+ result = connection.get_metric_statistics(metric_base_options)
71
+ next if result.body['GetMetricStatisticsResult']['Datapoints'].empty?
72
+
73
+ result.body['GetMetricStatisticsResult']['Datapoints'][0].keys.sort.each do |stat_type|
74
+ next if stat_type == 'Unit'
75
+ next if stat_type == 'Timestamp'
76
+
77
+ unit = result.body['GetMetricStatisticsResult']['Datapoints'][0]['Unit']
78
+ metric = result.body['GetMetricStatisticsResult']['Datapoints'][0][stat_type]
79
+ event = event(bucket, result.body['GetMetricStatisticsResult']['Label'], stat_type, unit, metric)
80
+ report(event)
81
+ end
82
+ end
81
83
  end
82
84
  end
83
- end
84
- end
85
85
 
86
- private
87
- def event(bucket, label, metric_type, stat_type, metric, unit=nil)
88
- event = {
89
- host: "bucket_#{bucket}",
90
- service: "s3.#{label}.#{metric_type}.#{stat_type}",
91
- ttl: 300,
92
- description: "#{bucket} #{metric_type} #{stat_type} (#{unit})",
93
- tags: ["s3_metrics"],
94
- metric: metric
95
- }
86
+ private
87
+
88
+ def event(bucket, label, metric_type, stat_type, metric, unit = nil)
89
+ {
90
+ host: "bucket_#{bucket}",
91
+ service: "s3.#{label}.#{metric_type}.#{stat_type}",
92
+ ttl: 300,
93
+ description: "#{bucket} #{metric_type} #{stat_type} (#{unit})",
94
+ tags: ['s3_metrics'],
95
+ metric: metric,
96
+ }
97
+ end
98
+ end
96
99
  end
97
100
  end
98
101