riemann-tools 1.1.0 → 1.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +2 -0
- data/.gitignore +2 -0
- data/.rubocop.yml +8 -0
- data/.ruby-version +1 -0
- data/CHANGELOG.md +55 -2
- data/Rakefile +17 -3
- data/bin/riemann-apache-status +1 -106
- data/bin/riemann-bench +2 -70
- data/bin/riemann-cloudant +1 -56
- data/bin/riemann-consul +1 -106
- data/bin/riemann-dir-files-count +1 -55
- data/bin/riemann-dir-space +1 -55
- data/bin/riemann-diskstats +1 -92
- data/bin/riemann-fd +2 -81
- data/bin/riemann-freeswitch +2 -119
- data/bin/riemann-haproxy +1 -58
- data/bin/riemann-health +0 -2
- data/bin/riemann-kvminstance +2 -22
- data/bin/riemann-md +8 -0
- data/bin/riemann-memcached +1 -37
- data/bin/riemann-net +0 -2
- data/bin/riemann-nginx-status +1 -85
- data/bin/riemann-ntp +0 -2
- data/bin/riemann-portcheck +1 -44
- data/bin/riemann-proc +1 -108
- data/bin/riemann-varnish +1 -54
- data/bin/riemann-wrapper +113 -0
- data/bin/riemann-zookeeper +1 -39
- data/bin/riemann-zpool +8 -0
- data/lib/riemann/tools/apache_status.rb +107 -0
- data/lib/riemann/tools/bench.rb +72 -0
- data/lib/riemann/tools/cloudant.rb +57 -0
- data/lib/riemann/tools/consul_health.rb +107 -0
- data/lib/riemann/tools/dir_files_count.rb +56 -0
- data/lib/riemann/tools/dir_space.rb +56 -0
- data/lib/riemann/tools/diskstats.rb +94 -0
- data/lib/riemann/tools/fd.rb +81 -0
- data/lib/riemann/tools/freeswitch.rb +119 -0
- data/lib/riemann/tools/haproxy.rb +59 -0
- data/lib/riemann/tools/health.rb +150 -19
- data/lib/riemann/tools/kvm.rb +23 -0
- data/lib/riemann/tools/md.rb +35 -0
- data/lib/riemann/tools/mdstat_parser.tab.rb +340 -0
- data/lib/riemann/tools/memcached.rb +38 -0
- data/lib/riemann/tools/net.rb +2 -1
- data/lib/riemann/tools/nginx_status.rb +86 -0
- data/lib/riemann/tools/ntp.rb +1 -0
- data/lib/riemann/tools/portcheck.rb +45 -0
- data/lib/riemann/tools/proc.rb +109 -0
- data/lib/riemann/tools/riemann_client_wrapper.rb +43 -0
- data/lib/riemann/tools/uptime_parser.tab.rb +323 -0
- data/lib/riemann/tools/varnish.rb +55 -0
- data/lib/riemann/tools/version.rb +1 -1
- data/lib/riemann/tools/zookeeper.rb +40 -0
- data/lib/riemann/tools/zpool.rb +29 -0
- data/lib/riemann/tools.rb +2 -20
- data/riemann-tools.gemspec +10 -1
- data/tools/riemann-aws/Rakefile +6 -9
- data/tools/riemann-aws/bin/riemann-aws-billing +2 -87
- data/tools/riemann-aws/bin/riemann-aws-rds-status +2 -62
- data/tools/riemann-aws/bin/riemann-aws-sqs-status +2 -44
- data/tools/riemann-aws/bin/riemann-aws-status +2 -77
- data/tools/riemann-aws/bin/riemann-elb-metrics +2 -162
- data/tools/riemann-aws/bin/riemann-s3-list +2 -81
- data/tools/riemann-aws/bin/riemann-s3-status +2 -96
- data/tools/riemann-aws/lib/riemann/tools/aws/billing.rb +87 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/elb_metrics.rb +163 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/rds_status.rb +63 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/s3_list.rb +82 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/s3_status.rb +97 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/sqs_status.rb +45 -0
- data/tools/riemann-aws/lib/riemann/tools/aws/status.rb +74 -0
- data/tools/riemann-chronos/Rakefile +6 -9
- data/tools/riemann-chronos/bin/riemann-chronos +1 -154
- data/tools/riemann-chronos/lib/riemann/tools/chronos.rb +157 -0
- data/tools/riemann-docker/Rakefile +5 -8
- data/tools/riemann-docker/bin/riemann-docker +2 -200
- data/tools/riemann-docker/lib/riemann/tools/docker.rb +200 -0
- data/tools/riemann-elasticsearch/Rakefile +6 -9
- data/tools/riemann-elasticsearch/bin/riemann-elasticsearch +1 -167
- data/tools/riemann-elasticsearch/lib/riemann/tools/elasticsearch.rb +170 -0
- data/tools/riemann-marathon/Rakefile +6 -9
- data/tools/riemann-marathon/bin/riemann-marathon +1 -156
- data/tools/riemann-marathon/lib/riemann/tools/marathon.rb +159 -0
- data/tools/riemann-mesos/Rakefile +6 -9
- data/tools/riemann-mesos/bin/riemann-mesos +1 -139
- data/tools/riemann-mesos/lib/riemann/tools/mesos.rb +142 -0
- data/tools/riemann-munin/Rakefile +5 -8
- data/tools/riemann-munin/bin/riemann-munin +1 -36
- data/tools/riemann-munin/lib/riemann/tools/munin.rb +37 -0
- data/tools/riemann-rabbitmq/Rakefile +6 -9
- data/tools/riemann-rabbitmq/bin/riemann-rabbitmq +1 -266
- data/tools/riemann-rabbitmq/lib/riemann/tools/rabbitmq.rb +269 -0
- data/tools/riemann-riak/Rakefile +5 -8
- data/tools/riemann-riak/bin/riemann-riak +1 -316
- data/tools/riemann-riak/bin/riemann-riak-keys +0 -1
- data/tools/riemann-riak/bin/riemann-riak-ring +0 -1
- data/tools/riemann-riak/lib/riemann/tools/riak.rb +317 -0
- metadata +61 -7
- data/.travis.yml +0 -31
- data/tools/riemann-riak/riak_status/key_count.erl +0 -13
- data/tools/riemann-riak/riak_status/riak_status.rb +0 -152
- data/tools/riemann-riak/riak_status/ringready.erl +0 -9
@@ -0,0 +1,82 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'riemann/tools'
|
4
|
+
|
5
|
+
module Riemann
|
6
|
+
module Tools
|
7
|
+
module Aws
|
8
|
+
class S3List
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog/aws'
|
11
|
+
require 'time'
|
12
|
+
|
13
|
+
opt :fog_credentials_file, 'Fog credentials file', type: String
|
14
|
+
opt :fog_credential, 'Fog credentials to use', type: String
|
15
|
+
opt :access_key, 'AWS Access Key', type: String
|
16
|
+
opt :secret_key, 'AWS Secret Key', type: String
|
17
|
+
opt :region, 'AWS Region', type: String, default: 'eu-west-1'
|
18
|
+
opt :buckets, 'Buckets to pull metrics from, multi=true, can have a prefix like mybucket/prefix', type: String,
|
19
|
+
multi: true, required: true
|
20
|
+
opt :max_objects, 'Max number of objects to list before stopping to save bandwidth', default: -1
|
21
|
+
|
22
|
+
def tick
|
23
|
+
if options[:fog_credentials_file]
|
24
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
25
|
+
Fog.credential = options[:fog_credential].to_sym
|
26
|
+
connection = Fog::Storage.new
|
27
|
+
else
|
28
|
+
connection = if options[:access_key] && options[:secret_key]
|
29
|
+
Fog::Storage.new({
|
30
|
+
provider: 'AWS',
|
31
|
+
aws_access_key_id: options[:access_key],
|
32
|
+
aws_secret_access_key: options[:secret_key],
|
33
|
+
region: options[:region],
|
34
|
+
})
|
35
|
+
else
|
36
|
+
Fog::Storage.new({
|
37
|
+
provider: 'AWS',
|
38
|
+
use_iam_profile: true,
|
39
|
+
region: options[:region],
|
40
|
+
})
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
options[:buckets].each do |url|
|
45
|
+
split = url.split('/')
|
46
|
+
bucket = split[0]
|
47
|
+
prefix = ''
|
48
|
+
prefix = url[(split[0].length + 1)..] if split[1]
|
49
|
+
count = 0
|
50
|
+
connection.directories.get(bucket, prefix: prefix).files.map do |_file|
|
51
|
+
count += 1
|
52
|
+
break if options[:max_objects].positive? && count > options[:max_objects]
|
53
|
+
end
|
54
|
+
event = if options[:max_objects].positive? && count > options[:max_objects]
|
55
|
+
event(
|
56
|
+
url, 'objectCount', count, "count was bigger than threshold #{options[:max_objects]}",
|
57
|
+
'warning',
|
58
|
+
)
|
59
|
+
else
|
60
|
+
event(url, 'objectCount', count, "All objects counted, threshold=#{options[:max_objects]}", 'ok')
|
61
|
+
end
|
62
|
+
report(event)
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
private
|
67
|
+
|
68
|
+
def event(bucket, label, metric, description, severity)
|
69
|
+
{
|
70
|
+
host: "bucket_#{bucket}",
|
71
|
+
service: "s3.#{label}",
|
72
|
+
ttl: 300,
|
73
|
+
description: "#{bucket} #{description}",
|
74
|
+
tags: ['s3_metrics'],
|
75
|
+
metric: metric,
|
76
|
+
state: severity,
|
77
|
+
}
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
@@ -0,0 +1,97 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'riemann/tools'
|
4
|
+
|
5
|
+
module Riemann
|
6
|
+
module Tools
|
7
|
+
module Aws
|
8
|
+
class S3Status
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog/aws'
|
11
|
+
require 'time'
|
12
|
+
|
13
|
+
opt :fog_credentials_file, 'Fog credentials file', type: String
|
14
|
+
opt :fog_credential, 'Fog credentials to use', type: String
|
15
|
+
opt :access_key, 'AWS Access Key', type: String
|
16
|
+
opt :secret_key, 'AWS Secret Key', type: String
|
17
|
+
opt :region, 'AWS Region', type: String, default: 'eu-west-1'
|
18
|
+
opt :buckets, 'Buckets to pull metrics from, multi=true', type: String, multi: true, required: true
|
19
|
+
opt :statistic, 'Statistic to retrieve, multi=true, e.g. --statistic=Average --statistic=Maximum', type: String,
|
20
|
+
multi: true, required: true
|
21
|
+
|
22
|
+
def base_metrics
|
23
|
+
# get last 60 seconds
|
24
|
+
start_time = (Time.now.utc - 3600 * 24 * 1).iso8601
|
25
|
+
end_time = Time.now.utc.iso8601
|
26
|
+
|
27
|
+
# The base query that all metrics would get
|
28
|
+
{
|
29
|
+
'Namespace' => 'AWS/S3',
|
30
|
+
'StartTime' => start_time,
|
31
|
+
'EndTime' => end_time,
|
32
|
+
'Period' => 3600,
|
33
|
+
'MetricName' => 'NumberOfObjects',
|
34
|
+
}
|
35
|
+
end
|
36
|
+
|
37
|
+
def tick
|
38
|
+
if options[:fog_credentials_file]
|
39
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
40
|
+
Fog.credential = options[:fog_credential].to_sym
|
41
|
+
connection = Fog::AWS::CloudWatch.new
|
42
|
+
else
|
43
|
+
connection = if options[:access_key] && options[:secret_key]
|
44
|
+
Fog::AWS::CloudWatch.new({
|
45
|
+
aws_access_key_id: options[:access_key],
|
46
|
+
aws_secret_access_key: options[:secret_key],
|
47
|
+
region: options[:region],
|
48
|
+
})
|
49
|
+
else
|
50
|
+
Fog::AWS::CloudWatch.new({
|
51
|
+
use_iam_profile: true,
|
52
|
+
region: options[:region],
|
53
|
+
})
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
options[:statistic].each do |statistic|
|
58
|
+
options[:buckets].each do |bucket|
|
59
|
+
metric_base_options = base_metrics
|
60
|
+
metric_base_options['Statistics'] = statistic
|
61
|
+
metric_base_options['Dimensions'] = [
|
62
|
+
{ 'Name' => 'BucketName', 'Value' => bucket },
|
63
|
+
{ 'Name' => 'StorageType', 'Value' => 'AllStorageTypes' },
|
64
|
+
]
|
65
|
+
|
66
|
+
result = connection.get_metric_statistics(metric_base_options)
|
67
|
+
next if result.body['GetMetricStatisticsResult']['Datapoints'].empty?
|
68
|
+
|
69
|
+
result.body['GetMetricStatisticsResult']['Datapoints'][0].keys.sort.each do |stat_type|
|
70
|
+
next if stat_type == 'Unit'
|
71
|
+
next if stat_type == 'Timestamp'
|
72
|
+
|
73
|
+
unit = result.body['GetMetricStatisticsResult']['Datapoints'][0]['Unit']
|
74
|
+
metric = result.body['GetMetricStatisticsResult']['Datapoints'][0][stat_type]
|
75
|
+
event = event(bucket, result.body['GetMetricStatisticsResult']['Label'], stat_type, unit, metric)
|
76
|
+
report(event)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
|
84
|
+
def event(bucket, label, metric_type, stat_type, metric, unit = nil)
|
85
|
+
{
|
86
|
+
host: "bucket_#{bucket}",
|
87
|
+
service: "s3.#{label}.#{metric_type}.#{stat_type}",
|
88
|
+
ttl: 300,
|
89
|
+
description: "#{bucket} #{metric_type} #{stat_type} (#{unit})",
|
90
|
+
tags: ['s3_metrics'],
|
91
|
+
metric: metric,
|
92
|
+
}
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'riemann/tools'
|
4
|
+
|
5
|
+
module Riemann
|
6
|
+
module Tools
|
7
|
+
module Aws
|
8
|
+
class SqsStatus
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog/aws'
|
11
|
+
|
12
|
+
opt :access_key, 'AWS access key', type: String
|
13
|
+
opt :secret_key, 'Secret access key', type: String
|
14
|
+
opt :region, 'AWS region', type: String, default: 'us-east-1'
|
15
|
+
opt :queue, 'SQS Queue name', type: String
|
16
|
+
def initialize
|
17
|
+
creds = if opts.key?('access_key') && opts.key?('secret_key')
|
18
|
+
{
|
19
|
+
aws_access_key_id: opts[:access_key],
|
20
|
+
aws_secret_access_key: opts[:secret_key],
|
21
|
+
}
|
22
|
+
else
|
23
|
+
{ use_iam_profile: true }
|
24
|
+
end
|
25
|
+
creds['region'] = opts[:region]
|
26
|
+
@sqs = Fog::AWS::SQS.new(creds)
|
27
|
+
response = @sqs.list_queues({ 'QueueNamePrefix' => opts[:queue] })
|
28
|
+
@queue_url = response[:body]['QueueUrls'].first
|
29
|
+
end
|
30
|
+
|
31
|
+
def tick
|
32
|
+
response = @sqs.get_queue_attributes(@queue_url, 'All')
|
33
|
+
%w[ApproximateNumberOfMessages ApproximateNumberOfMessagesNotVisible].each do |attr|
|
34
|
+
msg = {
|
35
|
+
metric: response[:body]['Attributes'][attr],
|
36
|
+
service: "#{opts[:queue]} #{attr}",
|
37
|
+
state: 'ok',
|
38
|
+
}
|
39
|
+
report msg
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,74 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'riemann/tools'
|
4
|
+
|
5
|
+
module Riemann
|
6
|
+
module Tools
|
7
|
+
module Aws
|
8
|
+
class Status
|
9
|
+
include Riemann::Tools
|
10
|
+
require 'fog/aws'
|
11
|
+
require 'date'
|
12
|
+
|
13
|
+
opt :fog_credentials_file, 'Fog credentials file', type: String
|
14
|
+
opt :fog_credential, 'Fog credentials to use', type: String
|
15
|
+
opt :access_key, 'AWS access key', type: String
|
16
|
+
opt :secret_key, 'Secret access key', type: String
|
17
|
+
opt :region, 'AWS region', type: String, default: 'eu-west-1'
|
18
|
+
|
19
|
+
opt :retirement_critical, 'Number of days before retirement. Defaults to 2', default: 2
|
20
|
+
opt :event_warning, 'Number of days before event. Defaults to nil (i.e. when the event appears)', default: nil
|
21
|
+
|
22
|
+
def initialize
|
23
|
+
if options[:fog_credentials_file]
|
24
|
+
Fog.credentials_path = options[:fog_credentials_file]
|
25
|
+
Fog.credential = options[:fog_credential].to_sym
|
26
|
+
@compute = Fog::AWS::Compute.new
|
27
|
+
else
|
28
|
+
@compute = if options[:access_key] && options[:secret_key]
|
29
|
+
Fog::AWS::Compute.new({
|
30
|
+
access_key_key_id: options[:access_key],
|
31
|
+
secret_key_access_key: options[:secret_key],
|
32
|
+
region: options[:region],
|
33
|
+
})
|
34
|
+
else
|
35
|
+
Fog::AWS::Compute.new({
|
36
|
+
use_iam_profile: true,
|
37
|
+
region: options[:region],
|
38
|
+
})
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
def tick
|
44
|
+
hosts = @compute.servers.select { |s| s.state == 'running' }
|
45
|
+
|
46
|
+
hosts.each do |host, host_status|
|
47
|
+
host_status['eventsSet'].each do |event|
|
48
|
+
before, _after = %w[notBefore notAfter].map { |k| Date.parse event[k].to_s if event[k] }
|
49
|
+
|
50
|
+
ev = {
|
51
|
+
host: host,
|
52
|
+
service: 'aws_instance_status',
|
53
|
+
description: "#{event['code']}\n\nstart #{event['notBefore']}\nend #{event['notAfter']}\n\n#{event['description']}",
|
54
|
+
state: 'ok',
|
55
|
+
ttl: 300,
|
56
|
+
}
|
57
|
+
|
58
|
+
ev2 = if (event['code'] == 'instance-retirement') &&
|
59
|
+
(Date.today >= before - opts[:retirement_critical])
|
60
|
+
{ state: 'critical' }
|
61
|
+
elsif opts[:event_warning] && (Date.today >= before - opts[:event_warning])
|
62
|
+
{ state: 'warning' }
|
63
|
+
else
|
64
|
+
{ state: 'warning' }
|
65
|
+
end
|
66
|
+
|
67
|
+
report ev.merge(ev2)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
@@ -11,10 +11,8 @@ ENV['COPYFILE_DISABLE'] = 'true'
|
|
11
11
|
|
12
12
|
# Gemspec
|
13
13
|
gemspec = Gem::Specification.new do |s|
|
14
|
-
s.rubyforge_project = 'riemann-chronos'
|
15
|
-
|
16
14
|
s.name = 'riemann-chronos'
|
17
|
-
s.version = '0.1.
|
15
|
+
s.version = '0.1.3'
|
18
16
|
s.author = 'Peter Ericson'
|
19
17
|
s.email = 'peter.ericson@cba.com.au'
|
20
18
|
s.homepage = 'https://github.com/riemann/riemann-tools'
|
@@ -22,15 +20,14 @@ gemspec = Gem::Specification.new do |s|
|
|
22
20
|
s.summary = 'Submits Chronos stats to riemann.'
|
23
21
|
s.license = 'MIT'
|
24
22
|
|
25
|
-
s.
|
26
|
-
s.
|
27
|
-
s.
|
23
|
+
s.add_runtime_dependency 'riemann-tools', '~> 1.0', '>= 1.1.1'
|
24
|
+
s.add_runtime_dependency 'faraday', '~> 2.3', '>= 2.3.0'
|
25
|
+
s.add_runtime_dependency 'json', '~> 2.6', '>=2.6.2'
|
28
26
|
|
29
|
-
s.files = FileList['bin/*', 'LICENSE', 'README.md'].to_a
|
27
|
+
s.files = FileList['bin/*', 'lib/**/*.rb', 'LICENSE', 'README.md'].to_a
|
30
28
|
s.executables |= Dir.entries('bin/')
|
31
|
-
s.has_rdoc = false
|
32
29
|
|
33
|
-
s.required_ruby_version = '>=
|
30
|
+
s.required_ruby_version = Gem::Requirement.new('>= 2.6.0')
|
34
31
|
end
|
35
32
|
|
36
33
|
Gem::PackageTask.new gemspec do |p|
|
@@ -3,159 +3,6 @@
|
|
3
3
|
|
4
4
|
Process.setproctitle($PROGRAM_NAME)
|
5
5
|
|
6
|
-
require 'riemann/tools'
|
6
|
+
require 'riemann/tools/chronos'
|
7
7
|
|
8
|
-
module Riemann
|
9
|
-
module Tools
|
10
|
-
class Chronos
|
11
|
-
include Riemann::Tools
|
12
|
-
|
13
|
-
require 'faraday'
|
14
|
-
require 'json'
|
15
|
-
require 'uri'
|
16
|
-
|
17
|
-
opt :read_timeout, 'Faraday read timeout', type: :int, default: 2
|
18
|
-
opt :open_timeout, 'Faraday open timeout', type: :int, default: 1
|
19
|
-
opt :path_prefix,
|
20
|
-
'Chronos path prefix for proxied installations e.g. "chronos" for target http://localhost/chronos/metrics', default: '/'
|
21
|
-
opt :chronos_host, 'Chronos host', default: 'localhost'
|
22
|
-
opt :chronos_port, 'Chronos port', type: :int, default: 4400
|
23
|
-
|
24
|
-
def initialize
|
25
|
-
options[:interval] = 60
|
26
|
-
options[:ttl] = 120
|
27
|
-
end
|
28
|
-
|
29
|
-
# Handles HTTP connections and GET requests safely
|
30
|
-
def safe_get(uri)
|
31
|
-
# Handle connection timeouts
|
32
|
-
response = nil
|
33
|
-
begin
|
34
|
-
connection = Faraday.new(uri)
|
35
|
-
response = connection.get do |req|
|
36
|
-
req.options[:timeout] = options[:read_timeout]
|
37
|
-
req.options[:open_timeout] = options[:open_timeout]
|
38
|
-
end
|
39
|
-
rescue StandardError => e
|
40
|
-
report(
|
41
|
-
host: uri.host,
|
42
|
-
service: 'chronos health',
|
43
|
-
state: 'critical',
|
44
|
-
description: "HTTP connection error: #{e.class} - #{e.message}",
|
45
|
-
)
|
46
|
-
end
|
47
|
-
response
|
48
|
-
end
|
49
|
-
|
50
|
-
def health_url
|
51
|
-
path_prefix = options[:path_prefix]
|
52
|
-
path_prefix[0] = '' if path_prefix[0] == '/'
|
53
|
-
path_prefix[path_prefix.length - 1] = '' if path_prefix[path_prefix.length - 1] == '/'
|
54
|
-
"http://#{options[:chronos_host]}:#{options[:chronos_port]}#{path_prefix.length.positive? ? '/' : ''}#{path_prefix}/metrics"
|
55
|
-
end
|
56
|
-
|
57
|
-
def jobs_url
|
58
|
-
path_prefix = options[:path_prefix]
|
59
|
-
path_prefix[0] = '' if path_prefix[0] == '/'
|
60
|
-
path_prefix[path_prefix.length - 1] = '' if path_prefix[path_prefix.length - 1] == '/'
|
61
|
-
"http://#{options[:chronos_host]}:#{options[:chronos_port]}#{path_prefix.length.positive? ? '/' : ''}#{path_prefix}/scheduler/jobs"
|
62
|
-
end
|
63
|
-
|
64
|
-
def tick
|
65
|
-
tick_health
|
66
|
-
tick_jobs
|
67
|
-
end
|
68
|
-
|
69
|
-
def tick_health
|
70
|
-
uri = URI(health_url)
|
71
|
-
response = safe_get(uri)
|
72
|
-
|
73
|
-
return if response.nil?
|
74
|
-
|
75
|
-
if response.status != 200
|
76
|
-
report(
|
77
|
-
host: uri.host,
|
78
|
-
service: 'chronos health',
|
79
|
-
state: 'critical',
|
80
|
-
description: "HTTP connection error: #{response.status} - #{response.body}",
|
81
|
-
)
|
82
|
-
else
|
83
|
-
# Assuming that a 200 will give json
|
84
|
-
json = JSON.parse(response.body)
|
85
|
-
|
86
|
-
report(
|
87
|
-
host: uri.host,
|
88
|
-
service: 'chronos health',
|
89
|
-
state: 'ok',
|
90
|
-
)
|
91
|
-
|
92
|
-
json.each_pair do |t, d|
|
93
|
-
next unless d.respond_to? :each_pair
|
94
|
-
|
95
|
-
d.each_pair do |service, counters|
|
96
|
-
report(
|
97
|
-
host: uri.host,
|
98
|
-
service: "chronos_metric #{t} #{service}",
|
99
|
-
metric: 1,
|
100
|
-
tags: ['metric_name'],
|
101
|
-
ttl: 600,
|
102
|
-
)
|
103
|
-
next unless counters.respond_to? :each_pair
|
104
|
-
|
105
|
-
counters.each_pair do |k, v|
|
106
|
-
next unless v.is_a? Numeric
|
107
|
-
|
108
|
-
report(
|
109
|
-
host: uri.host,
|
110
|
-
service: "chronos #{service} #{k}",
|
111
|
-
metric: v,
|
112
|
-
tags: ['metric', t.to_s],
|
113
|
-
ttl: 600,
|
114
|
-
)
|
115
|
-
end
|
116
|
-
end
|
117
|
-
end
|
118
|
-
end
|
119
|
-
end
|
120
|
-
|
121
|
-
def tick_jobs
|
122
|
-
uri = URI(jobs_url)
|
123
|
-
response = safe_get(uri)
|
124
|
-
|
125
|
-
return if response.nil?
|
126
|
-
|
127
|
-
if response.status != 200
|
128
|
-
report(
|
129
|
-
host: uri.host,
|
130
|
-
service: 'chronos health',
|
131
|
-
state: 'critical',
|
132
|
-
description: "HTTP connection error: #{response.status} - #{response.body}",
|
133
|
-
)
|
134
|
-
else
|
135
|
-
# Assuming that a 200 will give json
|
136
|
-
json = JSON.parse(response.body)
|
137
|
-
|
138
|
-
report(
|
139
|
-
host: uri.host,
|
140
|
-
service: 'chronos health',
|
141
|
-
state: 'ok',
|
142
|
-
)
|
143
|
-
|
144
|
-
json.each do |job|
|
145
|
-
job.each_pair do |k, v|
|
146
|
-
next unless v.is_a? Numeric
|
147
|
-
|
148
|
-
report(
|
149
|
-
host: uri.host,
|
150
|
-
service: "chronos job #{job['name']} #{k}",
|
151
|
-
metric: v,
|
152
|
-
ttl: 120,
|
153
|
-
)
|
154
|
-
end
|
155
|
-
end
|
156
|
-
end
|
157
|
-
end
|
158
|
-
end
|
159
|
-
end
|
160
|
-
end
|
161
8
|
Riemann::Tools::Chronos.run
|
@@ -0,0 +1,157 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'riemann/tools'
|
4
|
+
|
5
|
+
module Riemann
|
6
|
+
module Tools
|
7
|
+
class Chronos
|
8
|
+
include Riemann::Tools
|
9
|
+
|
10
|
+
require 'faraday'
|
11
|
+
require 'json'
|
12
|
+
require 'uri'
|
13
|
+
|
14
|
+
opt :read_timeout, 'Faraday read timeout', type: :int, default: 2
|
15
|
+
opt :open_timeout, 'Faraday open timeout', type: :int, default: 1
|
16
|
+
opt :path_prefix,
|
17
|
+
'Chronos path prefix for proxied installations e.g. "chronos" for target http://localhost/chronos/metrics', default: '/'.dup
|
18
|
+
opt :chronos_host, 'Chronos host', default: 'localhost'
|
19
|
+
opt :chronos_port, 'Chronos port', type: :int, default: 4400
|
20
|
+
|
21
|
+
def initialize
|
22
|
+
options[:interval] = 60
|
23
|
+
options[:ttl] = 120
|
24
|
+
end
|
25
|
+
|
26
|
+
# Handles HTTP connections and GET requests safely
|
27
|
+
def safe_get(uri)
|
28
|
+
# Handle connection timeouts
|
29
|
+
response = nil
|
30
|
+
begin
|
31
|
+
connection = Faraday.new(uri)
|
32
|
+
response = connection.get do |req|
|
33
|
+
req.options[:timeout] = options[:read_timeout]
|
34
|
+
req.options[:open_timeout] = options[:open_timeout]
|
35
|
+
end
|
36
|
+
rescue StandardError => e
|
37
|
+
report(
|
38
|
+
host: uri.host,
|
39
|
+
service: 'chronos health',
|
40
|
+
state: 'critical',
|
41
|
+
description: "HTTP connection error: #{e.class} - #{e.message}",
|
42
|
+
)
|
43
|
+
end
|
44
|
+
response
|
45
|
+
end
|
46
|
+
|
47
|
+
def health_url
|
48
|
+
path_prefix = options[:path_prefix]
|
49
|
+
path_prefix[0] = '' if path_prefix[0] == '/'
|
50
|
+
path_prefix[path_prefix.length - 1] = '' if path_prefix[path_prefix.length - 1] == '/'
|
51
|
+
"http://#{options[:chronos_host]}:#{options[:chronos_port]}#{path_prefix.length.positive? ? '/' : ''}#{path_prefix}/metrics"
|
52
|
+
end
|
53
|
+
|
54
|
+
def jobs_url
|
55
|
+
path_prefix = options[:path_prefix]
|
56
|
+
path_prefix[0] = '' if path_prefix[0] == '/'
|
57
|
+
path_prefix[path_prefix.length - 1] = '' if path_prefix[path_prefix.length - 1] == '/'
|
58
|
+
"http://#{options[:chronos_host]}:#{options[:chronos_port]}#{path_prefix.length.positive? ? '/' : ''}#{path_prefix}/scheduler/jobs"
|
59
|
+
end
|
60
|
+
|
61
|
+
def tick
|
62
|
+
tick_health
|
63
|
+
tick_jobs
|
64
|
+
end
|
65
|
+
|
66
|
+
def tick_health
|
67
|
+
uri = URI(health_url)
|
68
|
+
response = safe_get(uri)
|
69
|
+
|
70
|
+
return if response.nil?
|
71
|
+
|
72
|
+
if response.status != 200
|
73
|
+
report(
|
74
|
+
host: uri.host,
|
75
|
+
service: 'chronos health',
|
76
|
+
state: 'critical',
|
77
|
+
description: "HTTP connection error: #{response.status} - #{response.body}",
|
78
|
+
)
|
79
|
+
else
|
80
|
+
# Assuming that a 200 will give json
|
81
|
+
json = JSON.parse(response.body)
|
82
|
+
|
83
|
+
report(
|
84
|
+
host: uri.host,
|
85
|
+
service: 'chronos health',
|
86
|
+
state: 'ok',
|
87
|
+
)
|
88
|
+
|
89
|
+
json.each_pair do |t, d|
|
90
|
+
next unless d.respond_to? :each_pair
|
91
|
+
|
92
|
+
d.each_pair do |service, counters|
|
93
|
+
report(
|
94
|
+
host: uri.host,
|
95
|
+
service: "chronos_metric #{t} #{service}",
|
96
|
+
metric: 1,
|
97
|
+
tags: ['metric_name'],
|
98
|
+
ttl: 600,
|
99
|
+
)
|
100
|
+
next unless counters.respond_to? :each_pair
|
101
|
+
|
102
|
+
counters.each_pair do |k, v|
|
103
|
+
next unless v.is_a? Numeric
|
104
|
+
|
105
|
+
report(
|
106
|
+
host: uri.host,
|
107
|
+
service: "chronos #{service} #{k}",
|
108
|
+
metric: v,
|
109
|
+
tags: ['metric', t.to_s],
|
110
|
+
ttl: 600,
|
111
|
+
)
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
def tick_jobs
|
119
|
+
uri = URI(jobs_url)
|
120
|
+
response = safe_get(uri)
|
121
|
+
|
122
|
+
return if response.nil?
|
123
|
+
|
124
|
+
if response.status != 200
|
125
|
+
report(
|
126
|
+
host: uri.host,
|
127
|
+
service: 'chronos health',
|
128
|
+
state: 'critical',
|
129
|
+
description: "HTTP connection error: #{response.status} - #{response.body}",
|
130
|
+
)
|
131
|
+
else
|
132
|
+
# Assuming that a 200 will give json
|
133
|
+
json = JSON.parse(response.body)
|
134
|
+
|
135
|
+
report(
|
136
|
+
host: uri.host,
|
137
|
+
service: 'chronos health',
|
138
|
+
state: 'ok',
|
139
|
+
)
|
140
|
+
|
141
|
+
json.each do |job|
|
142
|
+
job.each_pair do |k, v|
|
143
|
+
next unless v.is_a? Numeric
|
144
|
+
|
145
|
+
report(
|
146
|
+
host: uri.host,
|
147
|
+
service: "chronos job #{job['name']} #{k}",
|
148
|
+
metric: v,
|
149
|
+
ttl: 120,
|
150
|
+
)
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
end
|
156
|
+
end
|
157
|
+
end
|
@@ -11,10 +11,8 @@ ENV['COPYFILE_DISABLE'] = 'true'
|
|
11
11
|
|
12
12
|
# Gemspec
|
13
13
|
gemspec = Gem::Specification.new do |s|
|
14
|
-
s.rubyforge_project = 'riemann-docker'
|
15
|
-
|
16
14
|
s.name = 'riemann-docker'
|
17
|
-
s.version = '0.1.
|
15
|
+
s.version = '0.1.5'
|
18
16
|
s.author = 'Shani Elharrar'
|
19
17
|
s.email = ''
|
20
18
|
s.homepage = 'https://github.com/riemann/riemann-tools'
|
@@ -22,14 +20,13 @@ gemspec = Gem::Specification.new do |s|
|
|
22
20
|
s.summary = 'Submits Docker container stats to riemann.'
|
23
21
|
s.license = 'MIT'
|
24
22
|
|
25
|
-
s.
|
26
|
-
s.
|
23
|
+
s.add_runtime_dependency 'riemann-tools', '~> 1.0', '>= 1.1.1'
|
24
|
+
s.add_runtime_dependency 'docker-api', '~> 1.22', '>= 1.22.0'
|
27
25
|
|
28
|
-
s.files = FileList['bin/*', 'LICENSE', 'README.md'].to_a
|
26
|
+
s.files = FileList['bin/*', 'lib/**/*.rb', 'LICENSE', 'README.md'].to_a
|
29
27
|
s.executables |= Dir.entries('bin/')
|
30
|
-
s.has_rdoc = false
|
31
28
|
|
32
|
-
s.required_ruby_version = '>=
|
29
|
+
s.required_ruby_version = Gem::Requirement.new('>= 2.6.0')
|
33
30
|
end
|
34
31
|
|
35
32
|
Gem::PackageTask.new gemspec do |p|
|