sensu-plugins-mongodb-wt 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +156 -0
- data/LICENSE +22 -0
- data/README.md +26 -0
- data/bin/check-mongodb-metric.rb +144 -0
- data/bin/check-mongodb.py +1471 -0
- data/bin/check-mongodb.rb +6 -0
- data/bin/metrics-mongodb-replication.rb +268 -0
- data/bin/metrics-mongodb.rb +133 -0
- data/lib/sensu-plugins-mongodb.rb +1 -0
- data/lib/sensu-plugins-mongodb/metrics.rb +449 -0
- data/lib/sensu-plugins-mongodb/version.rb +9 -0
- metadata +237 -0
@@ -0,0 +1,268 @@
|
|
1
|
+
#! /usr/bin/env ruby
|
2
|
+
#
|
3
|
+
# metrics-mongodb-replication.rb
|
4
|
+
#
|
5
|
+
# DESCRIPTION:
|
6
|
+
#
|
7
|
+
# OUTPUT:
|
8
|
+
# metric data
|
9
|
+
#
|
10
|
+
# PLATFORMS:
|
11
|
+
# Linux
|
12
|
+
#
|
13
|
+
# DEPENDENCIES:
|
14
|
+
# gem: sensu-plugin
|
15
|
+
# gem: mongo
|
16
|
+
# gem: bson
|
17
|
+
# gem: bson_ext
|
18
|
+
#
|
19
|
+
# USAGE:
|
20
|
+
# #YELLOW
|
21
|
+
#
|
22
|
+
# NOTES::
|
23
|
+
# Basics from github.com/sensu-plugins/sensu-plugins-mongodb/bin/metrics-mongodb
|
24
|
+
#
|
25
|
+
# Replication lag is calculated by obtaining the last optime from primary and
|
26
|
+
# secondary members. The last optime of the secondary is subtracted from the
|
27
|
+
# last optime of the primary to produce the difference in seconds, minutes and hours
|
28
|
+
#
|
29
|
+
# LICENSE:
|
30
|
+
# Copyright 2016 Rycroft Solutions
|
31
|
+
# Released under the same terms as Sensu (the MIT license); see LICENSE
|
32
|
+
# for details.
|
33
|
+
#
|
34
|
+
|
35
|
+
require 'sensu-plugin/metric/cli'
|
36
|
+
require 'mongo'
|
37
|
+
require 'date'
|
38
|
+
include Mongo
|
39
|
+
|
40
|
+
#
|
41
|
+
# Mongodb
|
42
|
+
#
|
43
|
+
|
44
|
+
class MongoDB < Sensu::Plugin::Metric::CLI::Graphite
|
45
|
+
option :host,
|
46
|
+
description: 'MongoDB host',
|
47
|
+
long: '--host HOST',
|
48
|
+
default: 'localhost'
|
49
|
+
|
50
|
+
option :port,
|
51
|
+
description: 'MongoDB port',
|
52
|
+
long: '--port PORT',
|
53
|
+
default: 27_017
|
54
|
+
|
55
|
+
option :user,
|
56
|
+
description: 'MongoDB user',
|
57
|
+
long: '--user USER',
|
58
|
+
default: nil
|
59
|
+
|
60
|
+
option :password,
|
61
|
+
description: 'MongoDB password',
|
62
|
+
long: '--password PASSWORD',
|
63
|
+
default: nil
|
64
|
+
|
65
|
+
option :ssl,
|
66
|
+
description: 'Connect using SSL',
|
67
|
+
long: '--ssl',
|
68
|
+
default: false
|
69
|
+
|
70
|
+
option :ssl_cert,
|
71
|
+
description: 'The certificate file used to identify the local connection against mongod',
|
72
|
+
long: '--ssl-cert SSL_CERT',
|
73
|
+
default: ''
|
74
|
+
|
75
|
+
option :ssl_key,
|
76
|
+
description: 'The private key used to identify the local connection against mongod',
|
77
|
+
long: '--ssl-key SSL_KEY',
|
78
|
+
default: ''
|
79
|
+
|
80
|
+
option :ssl_ca_cert,
|
81
|
+
description: 'The set of concatenated CA certificates, which are used to validate certificates passed from the other end of the connection',
|
82
|
+
long: '--ssl-ca-cert SSL_CA_CERT',
|
83
|
+
default: ''
|
84
|
+
|
85
|
+
option :ssl_verify,
|
86
|
+
description: 'Whether or not to do peer certification validation',
|
87
|
+
long: '--ssl-verify',
|
88
|
+
default: false
|
89
|
+
|
90
|
+
option :scheme,
|
91
|
+
description: 'Metric naming scheme',
|
92
|
+
long: '--scheme SCHEME',
|
93
|
+
short: '-s SCHEME',
|
94
|
+
default: "#{Socket.gethostname}.mongodb"
|
95
|
+
|
96
|
+
option :password,
|
97
|
+
description: 'MongoDB password',
|
98
|
+
long: '--password PASSWORD',
|
99
|
+
default: nil
|
100
|
+
|
101
|
+
option :debug,
|
102
|
+
description: 'Enable debug',
|
103
|
+
long: '--debug',
|
104
|
+
default: false
|
105
|
+
|
106
|
+
def get_mongo_doc(command)
|
107
|
+
rs = @db.command(command)
|
108
|
+
unless rs.successful?
|
109
|
+
return nil
|
110
|
+
end
|
111
|
+
rs.documents[0]
|
112
|
+
end
|
113
|
+
|
114
|
+
# connects to mongo and sets @db, works with MongoClient < 2.0.0
|
115
|
+
def connect_mongo_db
|
116
|
+
if Gem.loaded_specs['mongo'].version < Gem::Version.new('2.0.0')
|
117
|
+
mongo_client = MongoClient.new(host, port)
|
118
|
+
@db = mongo_client.db(db_name)
|
119
|
+
@db.authenticate(db_user, db_password) unless db_user.nil?
|
120
|
+
else
|
121
|
+
address_str = "#{config[:host]}:#{config[:port]}"
|
122
|
+
client_opts = {}
|
123
|
+
client_opts[:database] = 'admin'
|
124
|
+
unless config[:user].nil?
|
125
|
+
client_opts[:user] = config[:user]
|
126
|
+
client_opts[:password] = config[:password]
|
127
|
+
end
|
128
|
+
if config[:ssl]
|
129
|
+
client_opts[:ssl] = true
|
130
|
+
client_opts[:ssl_cert] = config[:ssl_cert]
|
131
|
+
client_opts[:ssl_key] = config[:ssl_key]
|
132
|
+
client_opts[:ssl_ca_cert] = config[:ssl_ca_cert]
|
133
|
+
client_opts[:ssl_verify] = config[:ssl_verify]
|
134
|
+
end
|
135
|
+
mongo_client = Mongo::Client.new([address_str], client_opts)
|
136
|
+
@db = mongo_client.database
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
def run
|
141
|
+
Mongo::Logger.logger.level = Logger::FATAL
|
142
|
+
@debug = config[:debug]
|
143
|
+
if @debug
|
144
|
+
Mongo::Logger.logger.level = Logger::DEBUG
|
145
|
+
config_debug = config.clone
|
146
|
+
config_debug[:password] = '***'
|
147
|
+
puts 'arguments:' + config_debug.inspect
|
148
|
+
end
|
149
|
+
|
150
|
+
connect_mongo_db
|
151
|
+
|
152
|
+
_result = false
|
153
|
+
# check if master
|
154
|
+
begin
|
155
|
+
@is_master = get_mongo_doc('isMaster' => 1)
|
156
|
+
unless @is_master.nil?
|
157
|
+
_result = @is_master['ok'] == 1
|
158
|
+
end
|
159
|
+
rescue StandardError => e
|
160
|
+
if @debug
|
161
|
+
puts 'Error checking isMaster:' + e.message
|
162
|
+
puts e.backtrace.inspect
|
163
|
+
end
|
164
|
+
exit(1)
|
165
|
+
end
|
166
|
+
|
167
|
+
replication_status = get_mongo_doc('replSetGetStatus' => 1)
|
168
|
+
|
169
|
+
# get the replication metrics
|
170
|
+
begin
|
171
|
+
metrics = {}
|
172
|
+
if !replication_status.nil? && replication_status['ok'] == 1
|
173
|
+
metrics.update(gather_replication_metrics(replication_status))
|
174
|
+
timestamp = Time.now.to_i
|
175
|
+
metrics.each do |k, v|
|
176
|
+
unless v.nil?
|
177
|
+
output [config[:scheme], 'replication', k].join('.'), v, timestamp
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
rescue StandardError => e
|
182
|
+
if @debug
|
183
|
+
puts 'Error checking replicationStatus:' + e.message
|
184
|
+
puts e.backtrace.inspect
|
185
|
+
end
|
186
|
+
exit(2)
|
187
|
+
end
|
188
|
+
|
189
|
+
# Get the repllication member metrics
|
190
|
+
begin
|
191
|
+
metrics = {}
|
192
|
+
replication_members = replication_status['members']
|
193
|
+
unless replication_members.nil?
|
194
|
+
state_map = {
|
195
|
+
'PRIMARY' => 1,
|
196
|
+
'SECONDARY' => 2
|
197
|
+
}
|
198
|
+
state_map.default = 3
|
199
|
+
replication_members.sort! { |x, y| state_map[x['stateStr']] <=> state_map[y['stateStr']] }
|
200
|
+
|
201
|
+
replication_members.each do |replication_member_details|
|
202
|
+
metrics.update(gather_replication_member_metrics(replication_member_details))
|
203
|
+
member_id = replication_member_details['_id']
|
204
|
+
timestamp = Time.now.to_i
|
205
|
+
metrics.each do |k, v|
|
206
|
+
unless v.nil?
|
207
|
+
output [config[:scheme], "member_#{member_id}", k].join('.'), v, timestamp
|
208
|
+
end
|
209
|
+
end
|
210
|
+
end
|
211
|
+
end
|
212
|
+
rescue StandardError => e
|
213
|
+
if @debug
|
214
|
+
puts 'Error checking replicationMemberStatus:' + e.message
|
215
|
+
puts e.backtrace.inspect
|
216
|
+
end
|
217
|
+
exit(2)
|
218
|
+
end
|
219
|
+
|
220
|
+
# done!
|
221
|
+
ok
|
222
|
+
end
|
223
|
+
|
224
|
+
def gather_replication_metrics(replication_status)
|
225
|
+
replication_metrics = {}
|
226
|
+
|
227
|
+
replication_metrics['replica_set'] = replication_status['set']
|
228
|
+
replication_metrics['date'] = replication_status['date']
|
229
|
+
replication_metrics['myState'] = replication_status['myState']
|
230
|
+
replication_metrics['term'] = replication_status['term']
|
231
|
+
replication_metrics['heartbeatIntervalMillis'] = replication_status['heartbeatIntervalMillis']
|
232
|
+
|
233
|
+
replication_metrics
|
234
|
+
end
|
235
|
+
|
236
|
+
def gather_replication_member_metrics(replication_member_details)
|
237
|
+
replication_member_metrics = {}
|
238
|
+
|
239
|
+
replication_member_metrics['id'] = replication_member_details['_id']
|
240
|
+
replication_member_metrics['name'] = replication_member_details['name']
|
241
|
+
replication_member_metrics['health'] = replication_member_details['health']
|
242
|
+
replication_member_metrics['state'] = replication_member_details['state']
|
243
|
+
replication_member_metrics['stateStr'] = replication_member_details['stateStr']
|
244
|
+
member_hierarchy = replication_member_details['stateStr']
|
245
|
+
if member_hierarchy == 'PRIMARY'
|
246
|
+
@primary_optime_date = replication_member_details['optimeDate']
|
247
|
+
replication_member_metrics['primary.startOptimeDate'] = @primary_optime_date
|
248
|
+
end
|
249
|
+
if member_hierarchy == 'SECONDARY'
|
250
|
+
@secondary_optime_date = replication_member_details['optimeDate']
|
251
|
+
difference_in_seconds = (@primary_optime_date - @secondary_optime_date).to_i
|
252
|
+
difference_in_minutes = ((@primary_optime_date - @secondary_optime_date) / 60).to_i
|
253
|
+
difference_in_hours = ((@primary_optime_date - @secondary_optime_date) / 3600).to_i
|
254
|
+
replication_member_metrics['secondsBehindPrimary'] = difference_in_seconds
|
255
|
+
replication_member_metrics['minutesBehindPrimary'] = difference_in_minutes
|
256
|
+
replication_member_metrics['hoursBehindPrimary'] = difference_in_hours
|
257
|
+
end
|
258
|
+
replication_member_metrics['optimeDate'] = replication_member_details['optimeDate']
|
259
|
+
replication_member_metrics['uptime'] = replication_member_details['uptime']
|
260
|
+
replication_member_metrics['lastHeartbeat'] = replication_member_details['lastHeartbeat']
|
261
|
+
replication_member_metrics['lastHeartbeatRecv'] = replication_member_details['lastHeartbeatiRecv']
|
262
|
+
replication_member_metrics['pingMs'] = replication_member_details['pingMs']
|
263
|
+
replication_member_metrics['syncingTo'] = replication_member_details['syncingTo']
|
264
|
+
replication_member_metrics['configVersion'] = replication_member_details['configVersion']
|
265
|
+
|
266
|
+
replication_member_metrics
|
267
|
+
end
|
268
|
+
end
|
@@ -0,0 +1,133 @@
|
|
1
|
+
#! /usr/bin/env ruby
|
2
|
+
#
|
3
|
+
# metrics-mongodb.rb
|
4
|
+
#
|
5
|
+
# DESCRIPTION:
|
6
|
+
#
|
7
|
+
# OUTPUT:
|
8
|
+
# metric data
|
9
|
+
#
|
10
|
+
# PLATFORMS:
|
11
|
+
# Linux
|
12
|
+
#
|
13
|
+
# DEPENDENCIES:
|
14
|
+
# gem: sensu-plugin
|
15
|
+
# gem: mongo
|
16
|
+
# gem: bson
|
17
|
+
# gem: bson_ext
|
18
|
+
#
|
19
|
+
# USAGE:
|
20
|
+
# #YELLOW
|
21
|
+
#
|
22
|
+
# NOTES:
|
23
|
+
# Basics from github.com/mantree/mongodb-graphite-metrics
|
24
|
+
#
|
25
|
+
# LICENSE:
|
26
|
+
# Copyright 2013 github.com/foomatty
|
27
|
+
# Released under the same terms as Sensu (the MIT license); see LICENSE
|
28
|
+
# for details.
|
29
|
+
#
|
30
|
+
|
31
|
+
require 'sensu-plugin/metric/cli'
|
32
|
+
require 'sensu-plugins-mongodb/metrics'
|
33
|
+
require 'mongo'
|
34
|
+
include Mongo
|
35
|
+
|
36
|
+
#
|
37
|
+
# Mongodb
|
38
|
+
#
|
39
|
+
|
40
|
+
class MongoDB < Sensu::Plugin::Metric::CLI::Graphite
|
41
|
+
option :host,
|
42
|
+
description: 'MongoDB host',
|
43
|
+
long: '--host HOST',
|
44
|
+
default: 'localhost'
|
45
|
+
|
46
|
+
option :port,
|
47
|
+
description: 'MongoDB port',
|
48
|
+
long: '--port PORT',
|
49
|
+
default: 27_017
|
50
|
+
|
51
|
+
option :user,
|
52
|
+
description: 'MongoDB user',
|
53
|
+
long: '--user USER',
|
54
|
+
default: nil
|
55
|
+
|
56
|
+
option :password,
|
57
|
+
description: 'MongoDB password',
|
58
|
+
long: '--password PASSWORD',
|
59
|
+
default: nil
|
60
|
+
|
61
|
+
option :ssl,
|
62
|
+
description: 'Connect using SSL',
|
63
|
+
long: '--ssl',
|
64
|
+
default: false
|
65
|
+
|
66
|
+
option :ssl_cert,
|
67
|
+
description: 'The certificate file used to identify the local connection against mongod',
|
68
|
+
long: '--ssl-cert SSL_CERT',
|
69
|
+
default: ''
|
70
|
+
|
71
|
+
option :ssl_key,
|
72
|
+
description: 'The private key used to identify the local connection against mongod',
|
73
|
+
long: '--ssl-key SSL_KEY',
|
74
|
+
default: ''
|
75
|
+
|
76
|
+
option :ssl_ca_cert,
|
77
|
+
description: 'The set of concatenated CA certificates, which are used to validate certificates passed from the other end of the connection',
|
78
|
+
long: '--ssl-ca-cert SSL_CA_CERT',
|
79
|
+
default: ''
|
80
|
+
|
81
|
+
option :ssl_verify,
|
82
|
+
description: 'Whether or not to do peer certification validation',
|
83
|
+
long: '--ssl-verify',
|
84
|
+
default: false
|
85
|
+
|
86
|
+
option :debug,
|
87
|
+
description: 'Enable debug',
|
88
|
+
long: '--debug',
|
89
|
+
default: false
|
90
|
+
|
91
|
+
option :scheme,
|
92
|
+
description: 'Metric naming scheme',
|
93
|
+
long: '--scheme SCHEME',
|
94
|
+
short: '-s SCHEME',
|
95
|
+
default: "#{Socket.gethostname}.mongodb"
|
96
|
+
|
97
|
+
option :require_master,
|
98
|
+
description: 'Require the node to be a master node',
|
99
|
+
long: '--require-master',
|
100
|
+
default: false
|
101
|
+
|
102
|
+
option :exclude_db_sizes,
|
103
|
+
description: 'Exclude database sizes',
|
104
|
+
long: '--exclude-db-sizes',
|
105
|
+
default: false
|
106
|
+
|
107
|
+
def run
|
108
|
+
Mongo::Logger.logger.level = Logger::FATAL
|
109
|
+
@debug = config[:debug]
|
110
|
+
if @debug
|
111
|
+
Mongo::Logger.logger.level = Logger::DEBUG
|
112
|
+
config_debug = config.clone
|
113
|
+
config_debug[:password] = '***'
|
114
|
+
puts 'Arguments: ' + config_debug.inspect
|
115
|
+
end
|
116
|
+
|
117
|
+
# Get the metrics.
|
118
|
+
collector = SensuPluginsMongoDB::Metrics.new(config)
|
119
|
+
collector.connect_mongo_db('admin')
|
120
|
+
exit(1) if config[:require_master] && !collector.master?
|
121
|
+
metrics = collector.server_metrics
|
122
|
+
metrics = metrics.reject { |k, _v| k[/databaseSizes/] } if config[:exclude_db_sizes]
|
123
|
+
|
124
|
+
# Print them in graphite format.
|
125
|
+
timestamp = Time.now.to_i
|
126
|
+
metrics.each do |k, v|
|
127
|
+
output [config[:scheme], k].join('.'), v, timestamp
|
128
|
+
end
|
129
|
+
|
130
|
+
# done!
|
131
|
+
ok
|
132
|
+
end
|
133
|
+
end
|
@@ -0,0 +1 @@
|
|
1
|
+
require 'sensu-plugins-mongodb/version'
|
@@ -0,0 +1,449 @@
|
|
1
|
+
require 'mongo'
|
2
|
+
include Mongo
|
3
|
+
|
4
|
+
module SensuPluginsMongoDB
|
5
|
+
class Metrics
|
6
|
+
# Initializes a Metrics collector.
|
7
|
+
#
|
8
|
+
# @param config [Mesh]
|
9
|
+
# the config object parsed from the command line.
|
10
|
+
# Must include: :host, :port, :user, :password, :debug
|
11
|
+
def initialize(config)
|
12
|
+
@config = config
|
13
|
+
@connected = false
|
14
|
+
@db = nil
|
15
|
+
@mongo_client = nil
|
16
|
+
end
|
17
|
+
|
18
|
+
# Connects to a mongo database.
|
19
|
+
#
|
20
|
+
# @param db_name [String] the name of the db to connect to.
|
21
|
+
def connect_mongo_db(db_name)
|
22
|
+
if @connected
|
23
|
+
raise 'Already connected to a database'
|
24
|
+
end
|
25
|
+
|
26
|
+
db_user = @config[:user]
|
27
|
+
db_password = @config[:password]
|
28
|
+
|
29
|
+
if Gem.loaded_specs['mongo'].version < Gem::Version.new('2.0.0')
|
30
|
+
@mongo_client = get_mongo_client(db_name)
|
31
|
+
@db = @mongo_client.db(db_name)
|
32
|
+
@db.authenticate(db_user, db_password) unless db_user.nil?
|
33
|
+
else
|
34
|
+
@mongo_client = get_mongo_client(db_name)
|
35
|
+
@db = @mongo_client.database
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
# Fetches a document from the mongo db.
|
40
|
+
#
|
41
|
+
# @param command [Mesh] the command to search documents with.
|
42
|
+
# @return [Mesh, nil] the first document or nil.
|
43
|
+
def get_mongo_doc(command)
|
44
|
+
unless @connected
|
45
|
+
raise 'Cannot fetch documents before connecting.'
|
46
|
+
end
|
47
|
+
unless @db
|
48
|
+
raise 'Cannot fetch documents without a db.'
|
49
|
+
end
|
50
|
+
|
51
|
+
rs = @db.command(command)
|
52
|
+
unless rs.successful?
|
53
|
+
return nil
|
54
|
+
end
|
55
|
+
rs.documents[0]
|
56
|
+
end
|
57
|
+
|
58
|
+
# Checks if the connected node is the master node.
|
59
|
+
#
|
60
|
+
# @return [true, false] true when the node is a master node.
|
61
|
+
def master?
|
62
|
+
result = false
|
63
|
+
begin
|
64
|
+
@is_master = get_mongo_doc('isMaster' => 1)
|
65
|
+
unless @is_master.nil?
|
66
|
+
result = @is_master['ok'] == 1 && @is_master['ismaster']
|
67
|
+
end
|
68
|
+
rescue StandardError => e
|
69
|
+
if @config[:debug]
|
70
|
+
puts 'Error checking isMaster: ' + e.message
|
71
|
+
puts e.backtrace.inspect
|
72
|
+
end
|
73
|
+
end
|
74
|
+
result
|
75
|
+
end
|
76
|
+
|
77
|
+
# Fetches the status of the server (which includes the metrics).
|
78
|
+
#
|
79
|
+
# @return [Mash, nil] the document showing the server status or nil.
|
80
|
+
def server_status
|
81
|
+
status = get_mongo_doc('serverStatus' => 1)
|
82
|
+
return nil if status.nil? || status['ok'] != 1
|
83
|
+
return status
|
84
|
+
rescue StandardError => e
|
85
|
+
if @debug
|
86
|
+
puts 'Error checking serverStatus: ' + e.message
|
87
|
+
puts e.backtrace.inspect
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
# Fetches the replicaset status of the server (which includes the metrics).
|
92
|
+
#
|
93
|
+
# @return [Mash, nil] the document showing the replicaset status or nil.
|
94
|
+
def replicaset_status
|
95
|
+
status = get_mongo_doc('replSetGetStatus' => 1)
|
96
|
+
return nil if status.nil?
|
97
|
+
return status
|
98
|
+
rescue StandardError => e
|
99
|
+
if @debug
|
100
|
+
puts 'Error checking replSetGetStatus: ' + e.message
|
101
|
+
puts e.backtrace.inspect
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
# Fetches metrics for the server we are connected to.
|
106
|
+
#
|
107
|
+
# @return [Mash] the metrics for the server.
|
108
|
+
# rubocop:disable Metrics/AbcSize
|
109
|
+
def server_metrics
|
110
|
+
server_status = self.server_status
|
111
|
+
replicaset_status = self.replicaset_status
|
112
|
+
server_metrics = {}
|
113
|
+
# Handle versions like "2.6.11-pre" etc
|
114
|
+
mongo_version = server_status['version'].gsub(/[^0-9\.]/i, '')
|
115
|
+
|
116
|
+
server_metrics['lock.ratio'] = sprintf('%.5f', server_status['globalLock']['ratio']).to_s unless server_status['globalLock']['ratio'].nil?
|
117
|
+
|
118
|
+
# Asserts
|
119
|
+
asserts = server_status['asserts']
|
120
|
+
server_metrics['asserts.warnings'] = asserts['warning']
|
121
|
+
server_metrics['asserts.errors'] = asserts['msg']
|
122
|
+
server_metrics['asserts.regular'] = asserts['regular']
|
123
|
+
server_metrics['asserts.user'] = asserts['user']
|
124
|
+
server_metrics['asserts.rollovers'] = asserts['rollovers']
|
125
|
+
|
126
|
+
# Background flushing
|
127
|
+
if server_status.key?('backgroundFlushing')
|
128
|
+
bg_flushing = server_status['backgroundFlushing']
|
129
|
+
server_metrics['backgroundFlushing.flushes'] = bg_flushing['flushes']
|
130
|
+
server_metrics['backgroundFlushing.total_ms'] = bg_flushing['total_ms']
|
131
|
+
server_metrics['backgroundFlushing.average_ms'] = bg_flushing['average_ms']
|
132
|
+
server_metrics['backgroundFlushing.last_ms'] = bg_flushing['last_ms']
|
133
|
+
end
|
134
|
+
|
135
|
+
# Connections
|
136
|
+
connections = server_status['connections']
|
137
|
+
server_metrics['connections.current'] = connections['current']
|
138
|
+
server_metrics['connections.available'] = connections['available']
|
139
|
+
server_metrics['connections.totalCreated'] = connections['totalCreated']
|
140
|
+
|
141
|
+
# Cursors (use new metrics.cursor from mongo 2.6+)
|
142
|
+
if Gem::Version.new(mongo_version) < Gem::Version.new('2.6.0')
|
143
|
+
cursors = server_status['cursors']
|
144
|
+
server_metrics['clientCursors.size'] = cursors['clientCursors_size']
|
145
|
+
server_metrics['cursors.timedOut'] = cursors['timedOut']
|
146
|
+
|
147
|
+
# Metric names match the version 2.6+ format for standardization!
|
148
|
+
server_metrics['cursors.open.NoTimeout'] = cursors['totalNoTimeout']
|
149
|
+
server_metrics['cursors.open.pinned'] = cursors['pinned']
|
150
|
+
server_metrics['cursors.open.total'] = cursors['totalOpen']
|
151
|
+
else
|
152
|
+
cursors = server_status['metrics']['cursor']
|
153
|
+
server_metrics['cursors.timedOut'] = cursors['timedOut']
|
154
|
+
# clientCursors.size has been replaced by cursors.open.total
|
155
|
+
|
156
|
+
open = cursors['open']
|
157
|
+
server_metrics['cursors.open.noTimeout'] = open['noTimeout']
|
158
|
+
server_metrics['cursors.open.pinned'] = open['pinned']
|
159
|
+
server_metrics['cursors.open.total'] = open['total']
|
160
|
+
|
161
|
+
unless Gem::Version.new(mongo_version) < Gem::Version.new('3.0.0')
|
162
|
+
server_metrics['cursors.open.multiTarget'] = open['multiTarget']
|
163
|
+
server_metrics['cursors.open.singleTarget'] = open['singleTarget']
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
# Database Sizes
|
168
|
+
@mongo_client.database_names.each do |name|
|
169
|
+
@mongo_client = @mongo_client.use(name)
|
170
|
+
db = @mongo_client.database
|
171
|
+
result = db.command(dbstats: 1).documents.first
|
172
|
+
server_metrics["databaseSizes.#{name}.collections"] = result['collections']
|
173
|
+
server_metrics["databaseSizes.#{name}.objects"] = result['objects']
|
174
|
+
server_metrics["databaseSizes.#{name}.avgObjSize"] = result['avgObjSize']
|
175
|
+
server_metrics["databaseSizes.#{name}.dataSize"] = result['dataSize']
|
176
|
+
server_metrics["databaseSizes.#{name}.storageSize"] = result['storageSize']
|
177
|
+
server_metrics["databaseSizes.#{name}.numExtents"] = result['numExtents']
|
178
|
+
server_metrics["databaseSizes.#{name}.indexes"] = result['indexes']
|
179
|
+
server_metrics["databaseSizes.#{name}.indexSize"] = result['indexSize']
|
180
|
+
server_metrics["databaseSizes.#{name}.fileSize"] = result['fileSize']
|
181
|
+
server_metrics["databaseSizes.#{name}.nsSizeMB"] = result['nsSizeMB']
|
182
|
+
end
|
183
|
+
# Reset back to previous database
|
184
|
+
@mongo_client = @mongo_client.use(@db.name)
|
185
|
+
|
186
|
+
# Journaling (durability)
|
187
|
+
if server_status.key?('dur')
|
188
|
+
dur = server_status['dur']
|
189
|
+
server_metrics['journal.commits'] = dur['commits']
|
190
|
+
server_metrics['journaled_MB'] = dur['journaledMB']
|
191
|
+
server_metrics['journal.timeMs.writeToDataFiles'] = dur['timeMs']['writeToDataFiles']
|
192
|
+
server_metrics['journal.writeToDataFilesMB'] = dur['writeToDataFilesMB']
|
193
|
+
server_metrics['journal.compression'] = dur['compression']
|
194
|
+
server_metrics['journal.commitsInWriteLock'] = dur['commitsInWriteLock']
|
195
|
+
server_metrics['journal.timeMs.dt'] = dur['timeMs']['dt']
|
196
|
+
server_metrics['journal.timeMs.prepLogBuffer'] = dur['timeMs']['prepLogBuffer']
|
197
|
+
server_metrics['journal.timeMs.writeToJournal'] = dur['timeMs']['writeToJournal']
|
198
|
+
server_metrics['journal.timeMs.remapPrivateView'] = dur['timeMs']['remapPrivateView']
|
199
|
+
end
|
200
|
+
|
201
|
+
# Extra info
|
202
|
+
extra_info = server_status['extra_info']
|
203
|
+
server_metrics['mem.heap_usage_bytes'] = extra_info['heap_usage_bytes']
|
204
|
+
server_metrics['mem.pageFaults'] = extra_info['page_faults']
|
205
|
+
|
206
|
+
# Global Lock
|
207
|
+
global_lock = server_status['globalLock']
|
208
|
+
server_metrics['lock.totalTime'] = global_lock['totalTime']
|
209
|
+
server_metrics['lock.queue_total'] = global_lock['currentQueue']['total']
|
210
|
+
server_metrics['lock.queue_readers'] = global_lock['currentQueue']['readers']
|
211
|
+
server_metrics['lock.queue_writers'] = global_lock['currentQueue']['writers']
|
212
|
+
server_metrics['lock.clients_total'] = global_lock['activeClients']['total']
|
213
|
+
server_metrics['lock.clients_readers'] = global_lock['activeClients']['readers']
|
214
|
+
server_metrics['lock.clients_writers'] = global_lock['activeClients']['writers']
|
215
|
+
|
216
|
+
# Index counters
|
217
|
+
if Gem::Version.new(mongo_version) < Gem::Version.new('3.0.0')
|
218
|
+
index_counters = server_status['indexCounters']
|
219
|
+
index_counters = server_status['indexCounters']['btree'] unless server_status['indexCounters']['btree'].nil?
|
220
|
+
|
221
|
+
server_metrics['indexes.missRatio'] = sprintf('%.5f', index_counters['missRatio']).to_s
|
222
|
+
server_metrics['indexes.hits'] = index_counters['hits']
|
223
|
+
server_metrics['indexes.misses'] = index_counters['misses']
|
224
|
+
server_metrics['indexes.accesses'] = index_counters['accesses']
|
225
|
+
server_metrics['indexes.resets'] = index_counters['resets']
|
226
|
+
end
|
227
|
+
|
228
|
+
# Locks (from mongo 3.0+ only)
|
229
|
+
unless Gem::Version.new(mongo_version) < Gem::Version.new('3.0.0')
|
230
|
+
locks = server_status['locks']
|
231
|
+
lock_namespaces = %w(
|
232
|
+
Collection Global Database Metadata
|
233
|
+
MMAPV1Journal oplog
|
234
|
+
)
|
235
|
+
lock_dimentions = %w(
|
236
|
+
acquireCount acquireWaitCount
|
237
|
+
timeAcquiringMicros deadlockCount
|
238
|
+
)
|
239
|
+
|
240
|
+
lock_namespaces.each do |ns|
|
241
|
+
lock_dimentions.each do |dm|
|
242
|
+
next unless locks.key?(ns) && locks[ns].key?(dm)
|
243
|
+
lock = locks[ns][dm]
|
244
|
+
server_metrics["locks.#{ns}.#{dm}_r"] = lock['r'] if lock.key?('r')
|
245
|
+
server_metrics["locks.#{ns}.#{dm}_w"] = lock['r'] if lock.key?('w')
|
246
|
+
server_metrics["locks.#{ns}.#{dm}_R"] = lock['r'] if lock.key?('R')
|
247
|
+
server_metrics["locks.#{ns}.#{dm}_W"] = lock['r'] if lock.key?('W')
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
# Network
|
253
|
+
network = server_status['network']
|
254
|
+
server_metrics['network.bytesIn'] = network['bytesIn']
|
255
|
+
server_metrics['network.bytesOut'] = network['bytesOut']
|
256
|
+
server_metrics['network.numRequests'] = network['numRequests']
|
257
|
+
|
258
|
+
# Opcounters
|
259
|
+
opcounters = server_status['opcounters']
|
260
|
+
server_metrics['opcounters.insert'] = opcounters['insert']
|
261
|
+
server_metrics['opcounters.query'] = opcounters['query']
|
262
|
+
server_metrics['opcounters.update'] = opcounters['update']
|
263
|
+
server_metrics['opcounters.delete'] = opcounters['delete']
|
264
|
+
server_metrics['opcounters.getmore'] = opcounters['getmore']
|
265
|
+
server_metrics['opcounters.command'] = opcounters['command']
|
266
|
+
|
267
|
+
# Opcounters Replication
|
268
|
+
opcounters_repl = server_status['opcountersRepl']
|
269
|
+
server_metrics['opcountersRepl.insert'] = opcounters_repl['insert']
|
270
|
+
server_metrics['opcountersRepl.query'] = opcounters_repl['query']
|
271
|
+
server_metrics['opcountersRepl.update'] = opcounters_repl['update']
|
272
|
+
server_metrics['opcountersRepl.delete'] = opcounters_repl['delete']
|
273
|
+
server_metrics['opcountersRepl.getmore'] = opcounters_repl['getmore']
|
274
|
+
server_metrics['opcountersRepl.command'] = opcounters_repl['command']
|
275
|
+
|
276
|
+
# Memory
|
277
|
+
mem = server_status['mem']
|
278
|
+
server_metrics['mem.residentMb'] = mem['resident']
|
279
|
+
server_metrics['mem.virtualMb'] = mem['virtual']
|
280
|
+
server_metrics['mem.mapped'] = mem['mapped']
|
281
|
+
server_metrics['mem.mappedWithJournal'] = mem['mappedWithJournal']
|
282
|
+
|
283
|
+
# Metrics (documents)
|
284
|
+
document = server_status['metrics']['document']
|
285
|
+
server_metrics['metrics.document.deleted'] = document['deleted']
|
286
|
+
server_metrics['metrics.document.inserted'] = document['inserted']
|
287
|
+
server_metrics['metrics.document.returned'] = document['returned']
|
288
|
+
server_metrics['metrics.document.updated'] = document['updated']
|
289
|
+
|
290
|
+
# Metrics (getLastError)
|
291
|
+
get_last_error = server_status['metrics']['getLastError']
|
292
|
+
server_metrics['metrics.getLastError.wtime_num'] = get_last_error['wtime']['num']
|
293
|
+
server_metrics['metrics.getLastError.wtime_totalMillis'] = get_last_error['wtime']['totalMillis']
|
294
|
+
server_metrics['metrics.getLastError.wtimeouts'] = get_last_error['wtimeouts']
|
295
|
+
|
296
|
+
# Metrics (operation)
|
297
|
+
operation = server_status['metrics']['operation']
|
298
|
+
server_metrics['metrics.operation.fastmod'] = operation['fastmod']
|
299
|
+
server_metrics['metrics.operation.idhack'] = operation['idhack']
|
300
|
+
server_metrics['metrics.operation.scanAndOrder'] = operation['scanAndOrder']
|
301
|
+
|
302
|
+
# Metrics (operation)
|
303
|
+
query_executor = server_status['metrics']['queryExecutor']
|
304
|
+
server_metrics['metrics.queryExecutor.scanned'] = query_executor['scanned']
|
305
|
+
server_metrics['metrics.queryExecutor.scannedObjects'] = query_executor['scannedObjects']
|
306
|
+
server_metrics['metrics.record.moves'] = server_status['metrics']['record']['moves']
|
307
|
+
|
308
|
+
# Metrics (repl)
|
309
|
+
repl = server_status['metrics']['repl']
|
310
|
+
server_metrics['metrics.repl.apply.batches_num'] = repl['apply']['batches']['num']
|
311
|
+
server_metrics['metrics.repl.apply.batches_totalMillis'] = repl['apply']['batches']['totalMillis']
|
312
|
+
server_metrics['metrics.repl.apply.ops'] = repl['apply']['ops']
|
313
|
+
server_metrics['metrics.repl.buffer.count'] = repl['buffer']['count']
|
314
|
+
server_metrics['metrics.repl.buffer.maxSizeBytes'] = repl['buffer']['maxSizeBytes']
|
315
|
+
server_metrics['metrics.repl.buffer.sizeBytes'] = repl['buffer']['sizeBytes']
|
316
|
+
server_metrics['metrics.repl.network.bytes'] = repl['network']['bytes']
|
317
|
+
server_metrics['metrics.repl.network.getmores_num'] = repl['network']['getmores']['num']
|
318
|
+
server_metrics['metrics.repl.network.getmores_totalMillis'] = repl['network']['getmores']['totalMillis']
|
319
|
+
server_metrics['metrics.repl.network.ops'] = repl['network']['ops']
|
320
|
+
server_metrics['metrics.repl.network.readersCreated'] = repl['network']['readersCreated']
|
321
|
+
server_metrics['metrics.repl.preload.docs_num'] = repl['preload']['docs']['num']
|
322
|
+
server_metrics['metrics.repl.preload.docs_totalMillis'] = repl['preload']['docs']['totalMillis']
|
323
|
+
server_metrics['metrics.repl.preload.indexes_num'] = repl['preload']['indexes']['num']
|
324
|
+
server_metrics['metrics.repl.preload.indexes_totalMillis'] = repl['preload']['indexes']['totalMillis']
|
325
|
+
|
326
|
+
# Metrics (replicaset status)
|
327
|
+
# MongoDB will fail if not running with --replSet, hence the check for nil
|
328
|
+
unless replicaset_status.nil?
|
329
|
+
server_metrics['metrics.replicaset.state'] = replicaset_status['myState']
|
330
|
+
end
|
331
|
+
|
332
|
+
# Metrics (storage)
|
333
|
+
if Gem::Version.new(mongo_version) >= Gem::Version.new('2.6.0')
|
334
|
+
freelist = server_status['metrics']['storage']['freelist']
|
335
|
+
server_metrics['metrics.storage.freelist.search_bucketExhauseted'] = freelist['search']['bucketExhausted']
|
336
|
+
server_metrics['metrics.storage.freelist.search_requests'] = freelist['search']['requests']
|
337
|
+
server_metrics['metrics.storage.freelist.search_scanned'] = freelist['search']['scanned']
|
338
|
+
end
|
339
|
+
|
340
|
+
# Metrics (ttl)
|
341
|
+
ttl = server_status['metrics']['ttl']
|
342
|
+
server_metrics['metrics.ttl.deletedDocuments'] = ttl['deletedDocuments']
|
343
|
+
server_metrics['metrics.ttl.passes'] = ttl['passes']
|
344
|
+
|
345
|
+
# Metrics (wired_tiger)
|
346
|
+
if server_status.key?('wiredTiger')
|
347
|
+
|
348
|
+
# block-manager
|
349
|
+
wired_tiger_block_manager = server_status['wiredTiger']['block-manager']
|
350
|
+
server_metrics['metrics.wired_tiger.block_manager.blocks_read'] = wired_tiger_block_manager['blocks read']
|
351
|
+
server_metrics['metrics.wired_tiger.block_manager.blocks_written'] = wired_tiger_block_manager['blocks written']
|
352
|
+
server_metrics['metrics.wired_tiger.block_manager.mapped_blocks_read'] = wired_tiger_block_manager['mapped blocks read']
|
353
|
+
|
354
|
+
# cache
|
355
|
+
wired_tiger_cache = server_status['wiredTiger']['cache']
|
356
|
+
server_metrics['metrics.wired_tiger.cache.bytes_currently_in_the_cache'] = wired_tiger_cache['bytes currently in the cache']
|
357
|
+
server_metrics['metrics.wired_tiger.cache.bytes_read_into_cache'] = wired_tiger_cache['bytes read into cache']
|
358
|
+
server_metrics['metrics.wired_tiger.cache.bytes_written_from_cache'] = wired_tiger_cache['bytes written from cache']
|
359
|
+
server_metrics['metrics.wired_tiger.cache.eviction_server_evicting_pages'] = wired_tiger_cache['eviction server evicting pages']
|
360
|
+
server_metrics['metrics.wired_tiger.cache.maximum_bytes_configured'] = wired_tiger_cache['maximum bytes configured']
|
361
|
+
server_metrics['metrics.wired_tiger.cache.modified_pages_evicted'] = wired_tiger_cache['modified pages evicted']
|
362
|
+
server_metrics['metrics.wired_tiger.cache.pages_currently_held_in_the_cache'] = wired_tiger_cache['pages currently held in the cache']
|
363
|
+
server_metrics['metrics.wired_tiger.cache.tracked_dirty_bytes_in_the_cache'] = wired_tiger_cache['tracked dirty bytes in the cache']
|
364
|
+
server_metrics['metrics.wired_tiger.cache.tracked_dirty_pages_in_the_cache'] = wired_tiger_cache['tracked dirty pages in the cache']
|
365
|
+
server_metrics['metrics.wired_tiger.cache.unmodified_pages_evicted'] = wired_tiger_cache['unmodified pages evicted']
|
366
|
+
|
367
|
+
# concurrentTransactions
|
368
|
+
wired_tiger_cc_tx = server_status['wiredTiger']['concurrentTransactions']
|
369
|
+
server_metrics['metrics.wired_tiger.concurrent_transaction.write.out'] = wired_tiger_cc_tx['write']['out']
|
370
|
+
server_metrics['metrics.wired_tiger.concurrent_transaction.write.available'] = wired_tiger_cc_tx['write']['available']
|
371
|
+
server_metrics['metrics.wired_tiger.concurrent_transaction.write.totalTickets'] = wired_tiger_cc_tx['write']['totalTickets']
|
372
|
+
server_metrics['metrics.wired_tiger.concurrent_transaction.read.out'] = wired_tiger_cc_tx['read']['out']
|
373
|
+
server_metrics['metrics.wired_tiger.concurrent_transaction.read.available'] = wired_tiger_cc_tx['read']['available']
|
374
|
+
server_metrics['metrics.wired_tiger.concurrent_transaction.read.totalTickets'] = wired_tiger_cc_tx['read']['totalTickets']
|
375
|
+
|
376
|
+
# log
|
377
|
+
wired_tiger_log = server_status['wiredTiger']['log']
|
378
|
+
server_metrics['metrics.wired_tiger.log.log_flush_operations'] = wired_tiger_log['log flush operations']
|
379
|
+
server_metrics['metrics.wired_tiger.log.log_bytes_written'] = wired_tiger_log['log bytes written']
|
380
|
+
server_metrics['metrics.wired_tiger.log.log_records_compressed'] = wired_tiger_log['log records compressed']
|
381
|
+
server_metrics['metrics.wired_tiger.log.log_records_not_compressed'] = wired_tiger_log['log records not compressed']
|
382
|
+
server_metrics['metrics.wired_tiger.log.log_sync_operations'] = wired_tiger_log['log sync operations']
|
383
|
+
server_metrics['metrics.wired_tiger.log.log_write_operations'] = wired_tiger_log['log write operations']
|
384
|
+
|
385
|
+
# session
|
386
|
+
wired_tiger_sessions = server_status['wiredTiger']['session']
|
387
|
+
server_metrics['metrics.wired_tiger.session.open_session_count'] = wired_tiger_sessions['open session count']
|
388
|
+
|
389
|
+
# transaction
|
390
|
+
wired_tiger_tx = server_status['wiredTiger']['transaction']
|
391
|
+
server_metrics['metrics.wired_tiger.transaction.transaction_checkpoint_max_time_msecs'] = wired_tiger_tx['transaction checkpoint max time (msecs)']
|
392
|
+
server_metrics['metrics.wired_tiger.transaction.transaction_checkpoint_min_time_msecs'] = wired_tiger_tx['transaction checkpoint min time (msecs)']
|
393
|
+
server_metrics['metrics.wired_tiger.transaction.transaction_checkpoint_most_recent_time_msecs'] = wired_tiger_tx[
|
394
|
+
'transaction checkpoint most recent time (msecs)'
|
395
|
+
]
|
396
|
+
server_metrics['metrics.wired_tiger.transaction.transactions_committed'] = wired_tiger_tx['transactions committed']
|
397
|
+
server_metrics['metrics.wired_tiger.transaction.transactions_rolled_back'] = wired_tiger_tx['transactions rolled back']
|
398
|
+
end
|
399
|
+
|
400
|
+
# Return metrics map.
|
401
|
+
# MongoDB returns occasional nils and floats as {"floatApprox": x}.
|
402
|
+
# Clean up the results once here to avoid per-metric logic.
|
403
|
+
clean_metrics = {}
|
404
|
+
server_metrics.each do |k, v|
|
405
|
+
next if v.nil?
|
406
|
+
if v.is_a?(Hash) && v.key?('floatApprox')
|
407
|
+
v = v['floatApprox']
|
408
|
+
end
|
409
|
+
clean_metrics[k] = v
|
410
|
+
end
|
411
|
+
clean_metrics
|
412
|
+
end
|
413
|
+
|
414
|
+
private
|
415
|
+
|
416
|
+
def get_mongo_client(db_name)
|
417
|
+
@connected = true
|
418
|
+
host = @config[:host]
|
419
|
+
port = @config[:port]
|
420
|
+
db_user = @config[:user]
|
421
|
+
db_password = @config[:password]
|
422
|
+
ssl = @config[:ssl]
|
423
|
+
ssl_cert = @config[:ssl_cert]
|
424
|
+
ssl_key = @config[:ssl_key]
|
425
|
+
ssl_ca_cert = @config[:ssl_ca_cert]
|
426
|
+
ssl_verify = @config[:ssl_verify]
|
427
|
+
|
428
|
+
if Gem.loaded_specs['mongo'].version < Gem::Version.new('2.0.0')
|
429
|
+
MongoClient.new(host, port)
|
430
|
+
else
|
431
|
+
address_str = "#{host}:#{port}"
|
432
|
+
client_opts = {}
|
433
|
+
client_opts[:database] = db_name
|
434
|
+
unless db_user.nil?
|
435
|
+
client_opts[:user] = db_user
|
436
|
+
client_opts[:password] = db_password
|
437
|
+
end
|
438
|
+
if ssl
|
439
|
+
client_opts[:ssl] = true
|
440
|
+
client_opts[:ssl_cert] = ssl_cert
|
441
|
+
client_opts[:ssl_key] = ssl_key
|
442
|
+
client_opts[:ssl_ca_cert] = ssl_ca_cert
|
443
|
+
client_opts[:ssl_verify] = ssl_verify
|
444
|
+
end
|
445
|
+
Mongo::Client.new([address_str], client_opts)
|
446
|
+
end
|
447
|
+
end
|
448
|
+
end
|
449
|
+
end
|