serverdensity-heroku 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +18 -0
- data/Gemfile +4 -0
- data/Gemfile.lock +14 -0
- data/LICENSE.txt +22 -0
- data/README.md +4 -0
- data/Rakefile +1 -0
- data/bin/LICENSE +27 -0
- data/bin/LICENSE-minjson +504 -0
- data/bin/agent-heroku-test.py +2 -0
- data/bin/agent.py +503 -0
- data/bin/checks.py +2497 -0
- data/bin/checks.pyc +0 -0
- data/bin/config.cfg +56 -0
- data/bin/daemon.py +174 -0
- data/bin/daemon.pyc +0 -0
- data/bin/minjson.py +280 -0
- data/bin/plugins.py +315 -0
- data/bin/sd-agent-pkg.init +85 -0
- data/bin/sd-agent.init +73 -0
- data/bin/sd-deploy.py +317 -0
- data/lib/serverdensity-heroku/version.rb +5 -0
- data/lib/serverdensity-heroku.rb +10 -0
- data/serverdensity-heroku.gemspec +19 -0
- metadata +82 -0
data/bin/checks.py
ADDED
@@ -0,0 +1,2497 @@
|
|
1
|
+
'''
|
2
|
+
Server Density
|
3
|
+
www.serverdensity.com
|
4
|
+
----
|
5
|
+
Server monitoring agent for Linux, FreeBSD and Mac OS X
|
6
|
+
|
7
|
+
Licensed under Simplified BSD License (see LICENSE)
|
8
|
+
'''
|
9
|
+
|
10
|
+
# SO references
|
11
|
+
# http://stackoverflow.com/questions/446209/possible-values-from-sys-platform/446210#446210
|
12
|
+
# http://stackoverflow.com/questions/682446/splitting-out-the-output-of-ps-using-python/682464#682464
|
13
|
+
# http://stackoverflow.com/questions/1052589/how-can-i-parse-the-output-of-proc-net-dev-into-keyvalue-pairs-per-interface-us
|
14
|
+
|
15
|
+
# Core modules
|
16
|
+
import httplib # Used only for handling httplib.HTTPException (case #26701)
|
17
|
+
import logging
|
18
|
+
import logging.handlers
|
19
|
+
import os
|
20
|
+
import platform
|
21
|
+
import re
|
22
|
+
import subprocess
|
23
|
+
import sys
|
24
|
+
import urllib
|
25
|
+
import urllib2
|
26
|
+
|
27
|
+
try:
|
28
|
+
from hashlib import md5
|
29
|
+
except ImportError: # Python < 2.5
|
30
|
+
from md5 import new as md5
|
31
|
+
|
32
|
+
# We need to return the data using JSON. As of Python 2.6+, there is a core JSON
|
33
|
+
# module. We have a 2.4/2.5 compatible lib included with the agent but if we're
|
34
|
+
# on 2.6 or above, we should use the core module which will be faster
|
35
|
+
pythonVersion = platform.python_version_tuple()
|
36
|
+
python24 = platform.python_version().startswith('2.4')
|
37
|
+
|
38
|
+
# Build the request headers
|
39
|
+
headers = {
|
40
|
+
'User-Agent': 'Server Density Agent',
|
41
|
+
'Content-Type': 'application/x-www-form-urlencoded',
|
42
|
+
'Accept': 'text/html, */*',
|
43
|
+
}
|
44
|
+
|
45
|
+
if int(pythonVersion[1]) >= 6: # Don't bother checking major version since we only support v2 anyway
|
46
|
+
import json
|
47
|
+
else:
|
48
|
+
import minjson
|
49
|
+
|
50
|
+
class checks:
|
51
|
+
|
52
|
+
def __init__(self, agentConfig, rawConfig, mainLogger):
|
53
|
+
self.agentConfig = agentConfig
|
54
|
+
self.rawConfig = rawConfig
|
55
|
+
self.mainLogger = mainLogger
|
56
|
+
|
57
|
+
self.mysqlConnectionsStore = None
|
58
|
+
self.mysqlSlowQueriesStore = None
|
59
|
+
self.mysqlVersion = None
|
60
|
+
self.networkTrafficStore = {}
|
61
|
+
self.nginxRequestsStore = None
|
62
|
+
self.mongoDBStore = None
|
63
|
+
self.apacheTotalAccesses = None
|
64
|
+
self.plugins = None
|
65
|
+
self.topIndex = 0
|
66
|
+
self.os = None
|
67
|
+
self.linuxProcFsLocation = None
|
68
|
+
|
69
|
+
# Set global timeout to 15 seconds for all sockets (case 31033). Should be long enough
|
70
|
+
import socket
|
71
|
+
socket.setdefaulttimeout(15)
|
72
|
+
|
73
|
+
#
|
74
|
+
# Checks
|
75
|
+
#
|
76
|
+
|
77
|
+
def getApacheStatus(self):
|
78
|
+
self.mainLogger.debug('getApacheStatus: start')
|
79
|
+
|
80
|
+
if 'apacheStatusUrl' in self.agentConfig and self.agentConfig['apacheStatusUrl'] != 'http://www.example.com/server-status/?auto': # Don't do it if the status URL hasn't been provided
|
81
|
+
self.mainLogger.debug('getApacheStatus: config set')
|
82
|
+
|
83
|
+
try:
|
84
|
+
self.mainLogger.debug('getApacheStatus: attempting urlopen')
|
85
|
+
|
86
|
+
if 'apacheStatusUser' in self.agentConfig and 'apacheStatusPass' in self.agentConfig and self.agentConfig['apacheStatusUrl'] != '' and self.agentConfig['apacheStatusPass'] != '':
|
87
|
+
self.mainLogger.debug('getApacheStatus: u/p config set')
|
88
|
+
|
89
|
+
passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
90
|
+
passwordMgr.add_password(None, self.agentConfig['apacheStatusUrl'], self.agentConfig['apacheStatusUser'], self.agentConfig['apacheStatusPass'])
|
91
|
+
|
92
|
+
handler = urllib2.HTTPBasicAuthHandler(passwordMgr)
|
93
|
+
|
94
|
+
# create "opener" (OpenerDirector instance)
|
95
|
+
opener = urllib2.build_opener(handler)
|
96
|
+
|
97
|
+
# use the opener to fetch a URL
|
98
|
+
opener.open(self.agentConfig['apacheStatusUrl'])
|
99
|
+
|
100
|
+
# Install the opener.
|
101
|
+
# Now all calls to urllib2.urlopen use our opener.
|
102
|
+
urllib2.install_opener(opener)
|
103
|
+
|
104
|
+
req = urllib2.Request(self.agentConfig['apacheStatusUrl'], None, headers)
|
105
|
+
request = urllib2.urlopen(req)
|
106
|
+
response = request.read()
|
107
|
+
|
108
|
+
except urllib2.HTTPError, e:
|
109
|
+
self.mainLogger.error('Unable to get Apache status - HTTPError = ' + str(e))
|
110
|
+
return False
|
111
|
+
|
112
|
+
except urllib2.URLError, e:
|
113
|
+
self.mainLogger.error('Unable to get Apache status - URLError = ' + str(e))
|
114
|
+
return False
|
115
|
+
|
116
|
+
except httplib.HTTPException, e:
|
117
|
+
self.mainLogger.error('Unable to get Apache status - HTTPException = ' + str(e))
|
118
|
+
return False
|
119
|
+
|
120
|
+
except Exception, e:
|
121
|
+
import traceback
|
122
|
+
self.mainLogger.error('Unable to get Apache status - Exception = ' + traceback.format_exc())
|
123
|
+
return False
|
124
|
+
|
125
|
+
self.mainLogger.debug('getApacheStatus: urlopen success, start parsing')
|
126
|
+
|
127
|
+
# Split out each line
|
128
|
+
lines = response.split('\n')
|
129
|
+
|
130
|
+
# Loop over each line and get the values
|
131
|
+
apacheStatus = {}
|
132
|
+
|
133
|
+
self.mainLogger.debug('getApacheStatus: parsing, loop')
|
134
|
+
|
135
|
+
# Loop through and extract the numerical values
|
136
|
+
for line in lines:
|
137
|
+
values = line.split(': ')
|
138
|
+
|
139
|
+
try:
|
140
|
+
apacheStatus[str(values[0])] = values[1]
|
141
|
+
|
142
|
+
except IndexError:
|
143
|
+
break
|
144
|
+
|
145
|
+
self.mainLogger.debug('getApacheStatus: parsed')
|
146
|
+
|
147
|
+
apacheStatusReturn = {}
|
148
|
+
|
149
|
+
try:
|
150
|
+
|
151
|
+
if apacheStatus['Total Accesses'] != False:
|
152
|
+
|
153
|
+
self.mainLogger.debug('getApacheStatus: processing total accesses')
|
154
|
+
|
155
|
+
totalAccesses = float(apacheStatus['Total Accesses'])
|
156
|
+
|
157
|
+
if self.apacheTotalAccesses is None or self.apacheTotalAccesses <= 0 or totalAccesses <= 0:
|
158
|
+
|
159
|
+
apacheStatusReturn['reqPerSec'] = 0.0
|
160
|
+
|
161
|
+
self.apacheTotalAccesses = totalAccesses
|
162
|
+
|
163
|
+
self.mainLogger.debug('getApacheStatus: no cached total accesses (or totalAccesses == 0), so storing for first time / resetting stored value')
|
164
|
+
|
165
|
+
else:
|
166
|
+
|
167
|
+
self.mainLogger.debug('getApacheStatus: cached data exists, so calculating per sec metrics')
|
168
|
+
|
169
|
+
apacheStatusReturn['reqPerSec'] = (totalAccesses - self.apacheTotalAccesses) / 60
|
170
|
+
|
171
|
+
self.apacheTotalAccesses = totalAccesses
|
172
|
+
|
173
|
+
else:
|
174
|
+
|
175
|
+
self.mainLogger.error('getApacheStatus: Total Accesses not present in mod_status output. Is ExtendedStatus enabled?')
|
176
|
+
|
177
|
+
except IndexError:
|
178
|
+
self.mainLogger.error('getApacheStatus: IndexError - Total Accesses not present in mod_status output. Is ExtendedStatus enabled?')
|
179
|
+
|
180
|
+
except KeyError:
|
181
|
+
self.mainLogger.error('getApacheStatus: KeyError - Total Accesses not present in mod_status output. Is ExtendedStatus enabled?')
|
182
|
+
|
183
|
+
try:
|
184
|
+
|
185
|
+
if apacheStatus['BusyWorkers'] != False and apacheStatus['IdleWorkers'] != False:
|
186
|
+
|
187
|
+
apacheStatusReturn['busyWorkers'] = apacheStatus['BusyWorkers']
|
188
|
+
apacheStatusReturn['idleWorkers'] = apacheStatus['IdleWorkers']
|
189
|
+
|
190
|
+
else:
|
191
|
+
|
192
|
+
self.mainLogger.error('getApacheStatus: BusyWorkers/IdleWorkers not present in mod_status output. Is the URL correct (must have ?auto at the end)?')
|
193
|
+
|
194
|
+
except IndexError:
|
195
|
+
self.mainLogger.error('getApacheStatus: IndexError - BusyWorkers/IdleWorkers not present in mod_status output. Is the URL correct (must have ?auto at the end)?')
|
196
|
+
|
197
|
+
except KeyError:
|
198
|
+
self.mainLogger.error('getApacheStatus: KeyError - BusyWorkers/IdleWorkers not present in mod_status output. Is the URL correct (must have ?auto at the end)?')
|
199
|
+
|
200
|
+
if 'reqPerSec' in apacheStatusReturn or 'BusyWorkers' in apacheStatusReturn or 'IdleWorkers' in apacheStatusReturn:
|
201
|
+
|
202
|
+
return apacheStatusReturn
|
203
|
+
|
204
|
+
else:
|
205
|
+
|
206
|
+
return False
|
207
|
+
|
208
|
+
else:
|
209
|
+
self.mainLogger.debug('getApacheStatus: config not set')
|
210
|
+
|
211
|
+
return False
|
212
|
+
|
213
|
+
def getCouchDBStatus(self):
|
214
|
+
self.mainLogger.debug('getCouchDBStatus: start')
|
215
|
+
|
216
|
+
if ('CouchDBServer' not in self.agentConfig or self.agentConfig['CouchDBServer'] == ''):
|
217
|
+
self.mainLogger.debug('getCouchDBStatus: config not set')
|
218
|
+
return False
|
219
|
+
|
220
|
+
self.mainLogger.debug('getCouchDBStatus: config set')
|
221
|
+
|
222
|
+
# The dictionary to be returned.
|
223
|
+
couchdb = {'stats': None, 'databases': {}}
|
224
|
+
|
225
|
+
# First, get overall statistics.
|
226
|
+
endpoint = '/_stats/'
|
227
|
+
|
228
|
+
try:
|
229
|
+
url = '%s%s' % (self.agentConfig['CouchDBServer'], endpoint)
|
230
|
+
self.mainLogger.debug('getCouchDBStatus: attempting urlopen')
|
231
|
+
req = urllib2.Request(url, None, headers)
|
232
|
+
|
233
|
+
# Do the request, log any errors
|
234
|
+
request = urllib2.urlopen(req)
|
235
|
+
response = request.read()
|
236
|
+
except urllib2.HTTPError, e:
|
237
|
+
self.mainLogger.error('Unable to get CouchDB statistics - HTTPError = ' + str(e))
|
238
|
+
return False
|
239
|
+
|
240
|
+
except urllib2.URLError, e:
|
241
|
+
self.mainLogger.error('Unable to get CouchDB statistics - URLError = ' + str(e))
|
242
|
+
return False
|
243
|
+
|
244
|
+
except httplib.HTTPException, e:
|
245
|
+
self.mainLogger.error('Unable to get CouchDB statistics - HTTPException = ' + str(e))
|
246
|
+
return False
|
247
|
+
|
248
|
+
except Exception, e:
|
249
|
+
import traceback
|
250
|
+
self.mainLogger.error('Unable to get CouchDB statistics - Exception = ' + traceback.format_exc())
|
251
|
+
return False
|
252
|
+
|
253
|
+
try:
|
254
|
+
|
255
|
+
if int(pythonVersion[1]) >= 6:
|
256
|
+
self.mainLogger.debug('getCouchDBStatus: json read')
|
257
|
+
stats = json.loads(response)
|
258
|
+
|
259
|
+
else:
|
260
|
+
self.mainLogger.debug('getCouchDBStatus: minjson read')
|
261
|
+
stats = minjson.safeRead(response)
|
262
|
+
|
263
|
+
except Exception, e:
|
264
|
+
import traceback
|
265
|
+
self.mainLogger.error('Unable to load CouchDB database JSON - Exception = ' + traceback.format_exc())
|
266
|
+
return False
|
267
|
+
|
268
|
+
couchdb['stats'] = stats
|
269
|
+
|
270
|
+
# Next, get all database names.
|
271
|
+
endpoint = '/_all_dbs/'
|
272
|
+
|
273
|
+
try:
|
274
|
+
url = '%s%s' % (self.agentConfig['CouchDBServer'], endpoint)
|
275
|
+
self.mainLogger.debug('getCouchDBStatus: attempting urlopen')
|
276
|
+
req = urllib2.Request(url, None, headers)
|
277
|
+
|
278
|
+
# Do the request, log any errors
|
279
|
+
request = urllib2.urlopen(req)
|
280
|
+
response = request.read()
|
281
|
+
except urllib2.HTTPError, e:
|
282
|
+
self.mainLogger.error('Unable to get CouchDB status - HTTPError = ' + str(e))
|
283
|
+
return False
|
284
|
+
|
285
|
+
except urllib2.URLError, e:
|
286
|
+
self.mainLogger.error('Unable to get CouchDB status - URLError = ' + str(e))
|
287
|
+
return False
|
288
|
+
|
289
|
+
except httplib.HTTPException, e:
|
290
|
+
self.mainLogger.error('Unable to get CouchDB status - HTTPException = ' + str(e))
|
291
|
+
return False
|
292
|
+
|
293
|
+
except Exception, e:
|
294
|
+
import traceback
|
295
|
+
self.mainLogger.error('Unable to get CouchDB status - Exception = ' + traceback.format_exc())
|
296
|
+
return False
|
297
|
+
|
298
|
+
try:
|
299
|
+
|
300
|
+
if int(pythonVersion[1]) >= 6:
|
301
|
+
self.mainLogger.debug('getCouchDBStatus: json read')
|
302
|
+
databases = json.loads(response)
|
303
|
+
|
304
|
+
else:
|
305
|
+
self.mainLogger.debug('getCouchDBStatus: minjson read')
|
306
|
+
databases = minjson.safeRead(response)
|
307
|
+
|
308
|
+
except Exception, e:
|
309
|
+
import traceback
|
310
|
+
self.mainLogger.error('Unable to load CouchDB database JSON - Exception = ' + traceback.format_exc())
|
311
|
+
return False
|
312
|
+
|
313
|
+
for dbName in databases:
|
314
|
+
endpoint = '/%s/' % dbName
|
315
|
+
|
316
|
+
try:
|
317
|
+
url = '%s%s' % (self.agentConfig['CouchDBServer'], endpoint)
|
318
|
+
self.mainLogger.debug('getCouchDBStatus: attempting urlopen')
|
319
|
+
req = urllib2.Request(url, None, headers)
|
320
|
+
|
321
|
+
# Do the request, log any errors
|
322
|
+
request = urllib2.urlopen(req)
|
323
|
+
response = request.read()
|
324
|
+
except urllib2.HTTPError, e:
|
325
|
+
self.mainLogger.error('Unable to get CouchDB database status - HTTPError = ' + str(e))
|
326
|
+
return False
|
327
|
+
|
328
|
+
except urllib2.URLError, e:
|
329
|
+
self.mainLogger.error('Unable to get CouchDB database status - URLError = ' + str(e))
|
330
|
+
return False
|
331
|
+
|
332
|
+
except httplib.HTTPException, e:
|
333
|
+
self.mainLogger.error('Unable to get CouchDB database status - HTTPException = ' + str(e))
|
334
|
+
return False
|
335
|
+
|
336
|
+
except Exception, e:
|
337
|
+
import traceback
|
338
|
+
self.mainLogger.error('Unable to get CouchDB database status - Exception = ' + traceback.format_exc())
|
339
|
+
return False
|
340
|
+
|
341
|
+
try:
|
342
|
+
|
343
|
+
if int(pythonVersion[1]) >= 6:
|
344
|
+
self.mainLogger.debug('getCouchDBStatus: json read')
|
345
|
+
couchdb['databases'][dbName] = json.loads(response)
|
346
|
+
|
347
|
+
else:
|
348
|
+
self.mainLogger.debug('getCouchDBStatus: minjson read')
|
349
|
+
couchdb['databases'][dbName] = minjson.safeRead(response)
|
350
|
+
|
351
|
+
except Exception, e:
|
352
|
+
import traceback
|
353
|
+
self.mainLogger.error('Unable to load CouchDB database JSON - Exception = ' + traceback.format_exc())
|
354
|
+
return False
|
355
|
+
|
356
|
+
self.mainLogger.debug('getCouchDBStatus: completed, returning')
|
357
|
+
return couchdb
|
358
|
+
|
359
|
+
def getCPUStats(self):
|
360
|
+
self.mainLogger.debug('getCPUStats: start')
|
361
|
+
|
362
|
+
cpuStats = {}
|
363
|
+
|
364
|
+
if sys.platform == 'linux2':
|
365
|
+
self.mainLogger.debug('getCPUStats: linux2')
|
366
|
+
|
367
|
+
headerRegexp = re.compile(r'.*?([%][a-zA-Z0-9]+)[\s+]?')
|
368
|
+
itemRegexp = re.compile(r'.*?\s+(\d+)[\s+]?')
|
369
|
+
valueRegexp = re.compile(r'\d+\.\d+')
|
370
|
+
|
371
|
+
try:
|
372
|
+
proc = subprocess.Popen(['mpstat', '-P', 'ALL', '1', '1'], stdout=subprocess.PIPE, close_fds=True)
|
373
|
+
stats = proc.communicate()[0]
|
374
|
+
|
375
|
+
if int(pythonVersion[1]) >= 6:
|
376
|
+
try:
|
377
|
+
proc.kill()
|
378
|
+
except Exception, e:
|
379
|
+
self.mainLogger.debug('Process already terminated')
|
380
|
+
|
381
|
+
stats = stats.split('\n')
|
382
|
+
header = stats[2]
|
383
|
+
headerNames = re.findall(headerRegexp, header)
|
384
|
+
device = None
|
385
|
+
|
386
|
+
for statsIndex in range(4, len(stats)): # skip "all"
|
387
|
+
row = stats[statsIndex]
|
388
|
+
|
389
|
+
if not row: # skip the averages
|
390
|
+
break
|
391
|
+
|
392
|
+
deviceMatch = re.match(itemRegexp, row)
|
393
|
+
|
394
|
+
if deviceMatch is not None:
|
395
|
+
device = 'CPU%s' % deviceMatch.groups()[0]
|
396
|
+
|
397
|
+
values = re.findall(valueRegexp, row.replace(',', '.'))
|
398
|
+
|
399
|
+
cpuStats[device] = {}
|
400
|
+
for headerIndex in range(0, len(headerNames)):
|
401
|
+
headerName = headerNames[headerIndex]
|
402
|
+
cpuStats[device][headerName] = values[headerIndex]
|
403
|
+
|
404
|
+
except Exception, ex:
|
405
|
+
if int(pythonVersion[1]) >= 6:
|
406
|
+
try:
|
407
|
+
proc.kill()
|
408
|
+
except UnboundLocalError, e:
|
409
|
+
self.mainLogger.debug('Process already terminated')
|
410
|
+
except Exception, e:
|
411
|
+
self.mainLogger.debug('Process already terminated')
|
412
|
+
|
413
|
+
import traceback
|
414
|
+
self.mainLogger.error('getCPUStats: exception = ' + traceback.format_exc())
|
415
|
+
return False
|
416
|
+
else:
|
417
|
+
self.mainLogger.debug('getCPUStats: unsupported platform')
|
418
|
+
return False
|
419
|
+
|
420
|
+
self.mainLogger.debug('getCPUStats: completed, returning')
|
421
|
+
return cpuStats
|
422
|
+
|
423
|
+
def getDiskUsage(self):
|
424
|
+
self.mainLogger.debug('getDiskUsage: start')
|
425
|
+
|
426
|
+
# Get output from df
|
427
|
+
try:
|
428
|
+
try:
|
429
|
+
self.mainLogger.debug('getDiskUsage: attempting Popen')
|
430
|
+
|
431
|
+
proc = subprocess.Popen(['df', '-k'], stdout=subprocess.PIPE, close_fds=True) # -k option uses 1024 byte blocks so we can calculate into MB
|
432
|
+
|
433
|
+
df = proc.communicate()[0]
|
434
|
+
|
435
|
+
if int(pythonVersion[1]) >= 6:
|
436
|
+
try:
|
437
|
+
proc.kill()
|
438
|
+
except Exception, e:
|
439
|
+
self.mainLogger.debug('Process already terminated')
|
440
|
+
|
441
|
+
except Exception, e:
|
442
|
+
import traceback
|
443
|
+
self.mainLogger.error('getDiskUsage: df -k exception = ' + traceback.format_exc())
|
444
|
+
return False
|
445
|
+
|
446
|
+
finally:
|
447
|
+
if int(pythonVersion[1]) >= 6:
|
448
|
+
try:
|
449
|
+
proc.kill()
|
450
|
+
except Exception, e:
|
451
|
+
self.mainLogger.debug('Process already terminated')
|
452
|
+
|
453
|
+
self.mainLogger.debug('getDiskUsage: Popen success, start parsing')
|
454
|
+
|
455
|
+
# Split out each volume
|
456
|
+
volumes = df.split('\n')
|
457
|
+
|
458
|
+
self.mainLogger.debug('getDiskUsage: parsing, split')
|
459
|
+
|
460
|
+
# Remove first (headings) and last (blank)
|
461
|
+
volumes.pop(0)
|
462
|
+
volumes.pop()
|
463
|
+
|
464
|
+
self.mainLogger.debug('getDiskUsage: parsing, pop')
|
465
|
+
|
466
|
+
usageData = []
|
467
|
+
|
468
|
+
regexp = re.compile(r'([0-9]+)')
|
469
|
+
|
470
|
+
# Set some defaults
|
471
|
+
previousVolume = None
|
472
|
+
volumeCount = 0
|
473
|
+
|
474
|
+
self.mainLogger.debug('getDiskUsage: parsing, start loop')
|
475
|
+
|
476
|
+
for volume in volumes:
|
477
|
+
self.mainLogger.debug('getDiskUsage: parsing volume: ' + volume)
|
478
|
+
|
479
|
+
# Split out the string
|
480
|
+
volume = volume.split(None, 10)
|
481
|
+
|
482
|
+
# Handle df output wrapping onto multiple lines (case 27078 and case 30997)
|
483
|
+
# Thanks to http://github.com/sneeu
|
484
|
+
if len(volume) == 1: # If the length is 1 then this just has the mount name
|
485
|
+
previousVolume = volume[0] # We store it, then continue the for
|
486
|
+
continue
|
487
|
+
|
488
|
+
if previousVolume != None: # If the previousVolume was set (above) during the last loop
|
489
|
+
volume.insert(0, previousVolume) # then we need to insert it into the volume
|
490
|
+
previousVolume = None # then reset so we don't use it again
|
491
|
+
|
492
|
+
volumeCount = volumeCount + 1
|
493
|
+
|
494
|
+
# Sometimes the first column will have a space, which is usually a system line that isn't relevant
|
495
|
+
# e.g. map -hosts 0 0 0 100% /net
|
496
|
+
# so we just get rid of it
|
497
|
+
# Also ignores lines with no values (AGENT-189)
|
498
|
+
if re.match(regexp, volume[1]) == None or re.match(regexp, volume[2]) == None or re.match(regexp, volume[3]) == None:
|
499
|
+
|
500
|
+
pass
|
501
|
+
|
502
|
+
else:
|
503
|
+
try:
|
504
|
+
volume[2] = int(volume[2]) / 1024 / 1024 # Used
|
505
|
+
volume[3] = int(volume[3]) / 1024 / 1024 # Available
|
506
|
+
except Exception, e:
|
507
|
+
self.mainLogger.error('getDiskUsage: parsing, loop %s - Used or Available not present' % (repr(e),))
|
508
|
+
|
509
|
+
usageData.append(volume)
|
510
|
+
|
511
|
+
self.mainLogger.debug('getDiskUsage: completed, returning')
|
512
|
+
|
513
|
+
return usageData
|
514
|
+
|
515
|
+
def getIOStats(self):
|
516
|
+
self.mainLogger.debug('getIOStats: start')
|
517
|
+
|
518
|
+
ioStats = {}
|
519
|
+
|
520
|
+
if sys.platform == 'linux2':
|
521
|
+
self.mainLogger.debug('getIOStats: linux2')
|
522
|
+
|
523
|
+
headerRegexp = re.compile(r'([%\\/\-\_a-zA-Z0-9]+)[\s+]?')
|
524
|
+
itemRegexp = re.compile(r'^([a-zA-Z0-9\/]+)')
|
525
|
+
valueRegexp = re.compile(r'\d+\.\d+')
|
526
|
+
|
527
|
+
try:
|
528
|
+
try:
|
529
|
+
proc = subprocess.Popen(['iostat', '-d', '1', '2', '-x', '-k'], stdout=subprocess.PIPE, close_fds=True)
|
530
|
+
stats = proc.communicate()[0]
|
531
|
+
|
532
|
+
if int(pythonVersion[1]) >= 6:
|
533
|
+
try:
|
534
|
+
proc.kill()
|
535
|
+
except Exception, e:
|
536
|
+
self.mainLogger.debug('Process already terminated')
|
537
|
+
|
538
|
+
recentStats = stats.split('Device:')[2].split('\n')
|
539
|
+
header = recentStats[0]
|
540
|
+
headerNames = re.findall(headerRegexp, header)
|
541
|
+
device = None
|
542
|
+
|
543
|
+
for statsIndex in range(1, len(recentStats)):
|
544
|
+
row = recentStats[statsIndex]
|
545
|
+
|
546
|
+
if not row:
|
547
|
+
# Ignore blank lines.
|
548
|
+
continue
|
549
|
+
|
550
|
+
deviceMatch = re.match(itemRegexp, row)
|
551
|
+
|
552
|
+
if deviceMatch is not None:
|
553
|
+
# Sometimes device names span two lines.
|
554
|
+
device = deviceMatch.groups()[0]
|
555
|
+
|
556
|
+
values = re.findall(valueRegexp, row.replace(',', '.'))
|
557
|
+
|
558
|
+
if not values:
|
559
|
+
# Sometimes values are on the next line so we encounter
|
560
|
+
# instances of [].
|
561
|
+
continue
|
562
|
+
|
563
|
+
ioStats[device] = {}
|
564
|
+
|
565
|
+
for headerIndex in range(0, len(headerNames)):
|
566
|
+
headerName = headerNames[headerIndex]
|
567
|
+
ioStats[device][headerName] = values[headerIndex]
|
568
|
+
|
569
|
+
except Exception, ex:
|
570
|
+
import traceback
|
571
|
+
self.mainLogger.error('getIOStats: exception = ' + traceback.format_exc())
|
572
|
+
return False
|
573
|
+
finally:
|
574
|
+
if int(pythonVersion[1]) >= 6:
|
575
|
+
try:
|
576
|
+
proc.kill()
|
577
|
+
except Exception, e:
|
578
|
+
self.mainLogger.debug('Process already terminated')
|
579
|
+
|
580
|
+
else:
|
581
|
+
self.mainLogger.debug('getIOStats: unsupported platform')
|
582
|
+
return False
|
583
|
+
|
584
|
+
self.mainLogger.debug('getIOStats: completed, returning')
|
585
|
+
return ioStats
|
586
|
+
|
587
|
+
def getLoadAvrgs(self):
|
588
|
+
self.mainLogger.debug('getLoadAvrgs: start')
|
589
|
+
|
590
|
+
# If Linux like procfs system is present and mounted we use loadavg, else we use uptime
|
591
|
+
if sys.platform == 'linux2':
|
592
|
+
|
593
|
+
self.mainLogger.debug('getLoadAvrgs: linux2')
|
594
|
+
|
595
|
+
try:
|
596
|
+
self.mainLogger.debug('getLoadAvrgs: attempting open')
|
597
|
+
|
598
|
+
if sys.platform == 'linux2':
|
599
|
+
loadAvrgProc = open('/proc/loadavg', 'r')
|
600
|
+
else:
|
601
|
+
loadAvrgProc = open(self.linuxProcFsLocation + '/loadavg', 'r')
|
602
|
+
|
603
|
+
uptime = loadAvrgProc.readlines()
|
604
|
+
|
605
|
+
except IOError, e:
|
606
|
+
self.mainLogger.error('getLoadAvrgs: exception = ' + str(e))
|
607
|
+
return False
|
608
|
+
|
609
|
+
self.mainLogger.debug('getLoadAvrgs: open success')
|
610
|
+
|
611
|
+
loadAvrgProc.close()
|
612
|
+
|
613
|
+
uptime = uptime[0] # readlines() provides a list but we want a string
|
614
|
+
|
615
|
+
elif sys.platform.find('freebsd') != -1:
|
616
|
+
self.mainLogger.debug('getLoadAvrgs: freebsd (uptime)')
|
617
|
+
|
618
|
+
try:
|
619
|
+
try:
|
620
|
+
self.mainLogger.debug('getLoadAvrgs: attempting Popen')
|
621
|
+
|
622
|
+
proc = subprocess.Popen(['uptime'], stdout=subprocess.PIPE, close_fds=True)
|
623
|
+
uptime = proc.communicate()[0]
|
624
|
+
|
625
|
+
if int(pythonVersion[1]) >= 6:
|
626
|
+
try:
|
627
|
+
proc.kill()
|
628
|
+
except Exception, e:
|
629
|
+
self.mainLogger.debug('Process already terminated')
|
630
|
+
|
631
|
+
except Exception, e:
|
632
|
+
import traceback
|
633
|
+
self.mainLogger.error('getLoadAvrgs: exception = ' + traceback.format_exc())
|
634
|
+
return False
|
635
|
+
finally:
|
636
|
+
if int(pythonVersion[1]) >= 6:
|
637
|
+
try:
|
638
|
+
proc.kill()
|
639
|
+
except Exception, e:
|
640
|
+
self.mainLogger.debug('Process already terminated')
|
641
|
+
|
642
|
+
self.mainLogger.debug('getLoadAvrgs: Popen success')
|
643
|
+
|
644
|
+
elif sys.platform == 'darwin':
|
645
|
+
self.mainLogger.debug('getLoadAvrgs: darwin')
|
646
|
+
|
647
|
+
# Get output from uptime
|
648
|
+
try:
|
649
|
+
try:
|
650
|
+
self.mainLogger.debug('getLoadAvrgs: attempting Popen')
|
651
|
+
|
652
|
+
proc = subprocess.Popen(['uptime'], stdout=subprocess.PIPE, close_fds=True)
|
653
|
+
uptime = proc.communicate()[0]
|
654
|
+
|
655
|
+
if int(pythonVersion[1]) >= 6:
|
656
|
+
try:
|
657
|
+
proc.kill()
|
658
|
+
except Exception, e:
|
659
|
+
self.mainLogger.debug('Process already terminated')
|
660
|
+
|
661
|
+
except Exception, e:
|
662
|
+
import traceback
|
663
|
+
self.mainLogger.error('getLoadAvrgs: exception = ' + traceback.format_exc())
|
664
|
+
return False
|
665
|
+
finally:
|
666
|
+
if int(pythonVersion[1]) >= 6:
|
667
|
+
try:
|
668
|
+
proc.kill()
|
669
|
+
except Exception, e:
|
670
|
+
self.mainLogger.debug('Process already terminated')
|
671
|
+
|
672
|
+
self.mainLogger.debug('getLoadAvrgs: Popen success')
|
673
|
+
|
674
|
+
elif sys.platform.find('sunos') != -1:
|
675
|
+
self.mainLogger.debug('getLoadAvrgs: solaris (uptime)')
|
676
|
+
|
677
|
+
try:
|
678
|
+
try:
|
679
|
+
self.mainLogger.debug('getLoadAvrgs: attempting Popen')
|
680
|
+
|
681
|
+
proc = subprocess.Popen(['uptime'], stdout=subprocess.PIPE, close_fds=True)
|
682
|
+
uptime = proc.communicate()[0]
|
683
|
+
|
684
|
+
if int(pythonVersion[1]) >= 6:
|
685
|
+
try:
|
686
|
+
proc.kill()
|
687
|
+
except Exception, e:
|
688
|
+
self.mainLogger.debug('Process already terminated')
|
689
|
+
|
690
|
+
except Exception, e:
|
691
|
+
import traceback
|
692
|
+
self.mainLogger.error('getLoadAvrgs: exception = ' + traceback.format_exc())
|
693
|
+
return False
|
694
|
+
finally:
|
695
|
+
if int(pythonVersion[1]) >= 6:
|
696
|
+
try:
|
697
|
+
proc.kill()
|
698
|
+
except Exception, e:
|
699
|
+
self.mainLogger.debug('Process already terminated')
|
700
|
+
|
701
|
+
self.mainLogger.debug('getLoadAvrgs: Popen success')
|
702
|
+
|
703
|
+
else:
|
704
|
+
self.mainLogger.debug('getLoadAvrgs: other platform, returning')
|
705
|
+
return False
|
706
|
+
|
707
|
+
self.mainLogger.debug('getLoadAvrgs: parsing')
|
708
|
+
|
709
|
+
# Split out the 3 load average values
|
710
|
+
loadAvrgs = [res.replace(',', '.') for res in re.findall(r'([0-9]+[\.,]\d+)', uptime)]
|
711
|
+
loadAvrgs = {'1': loadAvrgs[0], '5': loadAvrgs[1], '15': loadAvrgs[2]}
|
712
|
+
|
713
|
+
self.mainLogger.debug('getLoadAvrgs: completed, returning')
|
714
|
+
|
715
|
+
return loadAvrgs
|
716
|
+
|
717
|
+
def getMemoryUsage(self):
|
718
|
+
self.mainLogger.debug('getMemoryUsage: start')
|
719
|
+
|
720
|
+
# If Linux like procfs system is present and mounted we use meminfo, else we use "native" mode (vmstat and swapinfo)
|
721
|
+
if sys.platform == 'linux2':
|
722
|
+
|
723
|
+
self.mainLogger.debug('getMemoryUsage: linux2')
|
724
|
+
|
725
|
+
try:
|
726
|
+
self.mainLogger.debug('getMemoryUsage: attempting open')
|
727
|
+
|
728
|
+
if sys.platform == 'linux2':
|
729
|
+
meminfoProc = open('/proc/meminfo', 'r')
|
730
|
+
else:
|
731
|
+
meminfoProc = open(self.linuxProcFsLocation + '/meminfo', 'r')
|
732
|
+
|
733
|
+
lines = meminfoProc.readlines()
|
734
|
+
|
735
|
+
except IOError, e:
|
736
|
+
self.mainLogger.error('getMemoryUsage: exception = ' + str(e))
|
737
|
+
return False
|
738
|
+
|
739
|
+
self.mainLogger.debug('getMemoryUsage: Popen success, parsing')
|
740
|
+
|
741
|
+
meminfoProc.close()
|
742
|
+
|
743
|
+
self.mainLogger.debug('getMemoryUsage: open success, parsing')
|
744
|
+
|
745
|
+
regexp = re.compile(r'([0-9]+)') # We run this several times so one-time compile now
|
746
|
+
|
747
|
+
meminfo = {}
|
748
|
+
|
749
|
+
self.mainLogger.debug('getMemoryUsage: parsing, looping')
|
750
|
+
|
751
|
+
# Loop through and extract the numerical values
|
752
|
+
for line in lines:
|
753
|
+
values = line.split(':')
|
754
|
+
|
755
|
+
try:
|
756
|
+
# Picks out the key (values[0]) and makes a list with the value as the meminfo value (values[1])
|
757
|
+
# We are only interested in the KB data so regexp that out
|
758
|
+
match = re.search(regexp, values[1])
|
759
|
+
|
760
|
+
if match != None:
|
761
|
+
meminfo[str(values[0])] = match.group(0)
|
762
|
+
|
763
|
+
except IndexError:
|
764
|
+
break
|
765
|
+
|
766
|
+
self.mainLogger.debug('getMemoryUsage: parsing, looped')
|
767
|
+
|
768
|
+
memData = {}
|
769
|
+
memData['physFree'] = 0
|
770
|
+
memData['physUsed'] = 0
|
771
|
+
memData['cached'] = 0
|
772
|
+
memData['swapFree'] = 0
|
773
|
+
memData['swapUsed'] = 0
|
774
|
+
|
775
|
+
# Phys
|
776
|
+
try:
|
777
|
+
self.mainLogger.debug('getMemoryUsage: formatting (phys)')
|
778
|
+
|
779
|
+
physTotal = int(meminfo['MemTotal'])
|
780
|
+
physFree = int(meminfo['MemFree'])
|
781
|
+
physUsed = physTotal - physFree
|
782
|
+
|
783
|
+
# Convert to MB
|
784
|
+
memData['physFree'] = physFree / 1024
|
785
|
+
memData['physUsed'] = physUsed / 1024
|
786
|
+
memData['cached'] = int(meminfo['Cached']) / 1024
|
787
|
+
|
788
|
+
# Stops the agent crashing if one of the meminfo elements isn't set
|
789
|
+
except IndexError:
|
790
|
+
self.mainLogger.error('getMemoryUsage: formatting (phys) IndexError - Cached, MemTotal or MemFree not present')
|
791
|
+
|
792
|
+
except KeyError:
|
793
|
+
self.mainLogger.error('getMemoryUsage: formatting (phys) KeyError - Cached, MemTotal or MemFree not present')
|
794
|
+
|
795
|
+
self.mainLogger.debug('getMemoryUsage: formatted (phys)')
|
796
|
+
|
797
|
+
# Swap
|
798
|
+
try:
|
799
|
+
self.mainLogger.debug('getMemoryUsage: formatting (swap)')
|
800
|
+
|
801
|
+
swapTotal = int(meminfo['SwapTotal'])
|
802
|
+
swapFree = int(meminfo['SwapFree'])
|
803
|
+
swapUsed = swapTotal - swapFree
|
804
|
+
|
805
|
+
# Convert to MB
|
806
|
+
memData['swapFree'] = swapFree / 1024
|
807
|
+
memData['swapUsed'] = swapUsed / 1024
|
808
|
+
|
809
|
+
# Stops the agent crashing if one of the meminfo elements isn't set
|
810
|
+
except IndexError:
|
811
|
+
self.mainLogger.error('getMemoryUsage: formatting (swap) IndexError - SwapTotal or SwapFree not present')
|
812
|
+
|
813
|
+
except KeyError:
|
814
|
+
self.mainLogger.error('getMemoryUsage: formatting (swap) KeyError - SwapTotal or SwapFree not present')
|
815
|
+
|
816
|
+
self.mainLogger.debug('getMemoryUsage: formatted (swap), completed, returning')
|
817
|
+
|
818
|
+
return memData
|
819
|
+
|
820
|
+
elif sys.platform.find('freebsd') != -1:
|
821
|
+
self.mainLogger.debug('getMemoryUsage: freebsd (native)')
|
822
|
+
|
823
|
+
physFree = None
|
824
|
+
|
825
|
+
try:
|
826
|
+
try:
|
827
|
+
self.mainLogger.debug('getMemoryUsage: attempting sysinfo')
|
828
|
+
|
829
|
+
proc = subprocess.Popen(['sysinfo', '-v', 'mem'], stdout = subprocess.PIPE, close_fds = True)
|
830
|
+
sysinfo = proc.communicate()[0]
|
831
|
+
|
832
|
+
if int(pythonVersion[1]) >= 6:
|
833
|
+
try:
|
834
|
+
proc.kill()
|
835
|
+
except Exception, e:
|
836
|
+
self.mainLogger.debug('Process already terminated')
|
837
|
+
|
838
|
+
sysinfo = sysinfo.split('\n')
|
839
|
+
|
840
|
+
regexp = re.compile(r'([0-9]+)') # We run this several times so one-time compile now
|
841
|
+
|
842
|
+
for line in sysinfo:
|
843
|
+
|
844
|
+
parts = line.split(' ')
|
845
|
+
|
846
|
+
if parts[0] == 'Free':
|
847
|
+
|
848
|
+
self.mainLogger.debug('getMemoryUsage: parsing free')
|
849
|
+
|
850
|
+
for part in parts:
|
851
|
+
|
852
|
+
match = re.search(regexp, part)
|
853
|
+
|
854
|
+
if match != None:
|
855
|
+
physFree = match.group(0)
|
856
|
+
self.mainLogger.debug('getMemoryUsage: sysinfo: found free %s', physFree)
|
857
|
+
|
858
|
+
if parts[0] == 'Active':
|
859
|
+
|
860
|
+
self.mainLogger.debug('getMemoryUsage: parsing used')
|
861
|
+
|
862
|
+
for part in parts:
|
863
|
+
|
864
|
+
match = re.search(regexp, part)
|
865
|
+
|
866
|
+
if match != None:
|
867
|
+
physUsed = match.group(0)
|
868
|
+
self.mainLogger.debug('getMemoryUsage: sysinfo: found used %s', physUsed)
|
869
|
+
|
870
|
+
if parts[0] == 'Cached':
|
871
|
+
|
872
|
+
self.mainLogger.debug('getMemoryUsage: parsing cached')
|
873
|
+
|
874
|
+
for part in parts:
|
875
|
+
|
876
|
+
match = re.search(regexp, part)
|
877
|
+
|
878
|
+
if match != None:
|
879
|
+
cached = match.group(0)
|
880
|
+
self.mainLogger.debug('getMemoryUsage: sysinfo: found cached %s', cached)
|
881
|
+
|
882
|
+
except OSError, e:
|
883
|
+
|
884
|
+
self.mainLogger.debug('getMemoryUsage: sysinfo not available')
|
885
|
+
|
886
|
+
except Exception, e:
|
887
|
+
import traceback
|
888
|
+
self.mainLogger.error('getMemoryUsage: exception = ' + traceback.format_exc())
|
889
|
+
finally:
|
890
|
+
if int(pythonVersion[1]) >= 6:
|
891
|
+
try:
|
892
|
+
proc.kill()
|
893
|
+
except Exception, e:
|
894
|
+
self.mainLogger.debug('Process already terminated')
|
895
|
+
|
896
|
+
if physFree == None:
|
897
|
+
|
898
|
+
self.mainLogger.info('getMemoryUsage: sysinfo not installed so falling back on sysctl. sysinfo provides more accurate memory info so is recommended. http://www.freshports.org/sysutils/sysinfo')
|
899
|
+
|
900
|
+
try:
|
901
|
+
try:
|
902
|
+
self.mainLogger.debug('getMemoryUsage: attempting Popen (sysctl)')
|
903
|
+
|
904
|
+
proc = subprocess.Popen(['sysctl', '-n', 'hw.physmem'], stdout = subprocess.PIPE, close_fds = True)
|
905
|
+
physTotal = proc.communicate()[0]
|
906
|
+
|
907
|
+
if int(pythonVersion[1]) >= 6:
|
908
|
+
try:
|
909
|
+
proc.kill()
|
910
|
+
except Exception, e:
|
911
|
+
self.mainLogger.debug('Process already terminated')
|
912
|
+
|
913
|
+
self.mainLogger.debug('getMemoryUsage: attempting Popen (vmstat)')
|
914
|
+
proc = subprocess.Popen(['vmstat', '-H'], stdout = subprocess.PIPE, close_fds = True)
|
915
|
+
vmstat = proc.communicate()[0]
|
916
|
+
|
917
|
+
if int(pythonVersion[1]) >= 6:
|
918
|
+
try:
|
919
|
+
proc.kill()
|
920
|
+
except Exception, e:
|
921
|
+
self.mainLogger.debug('Process already terminated')
|
922
|
+
|
923
|
+
except Exception, e:
|
924
|
+
import traceback
|
925
|
+
self.mainLogger.error('getMemoryUsage: exception = ' + traceback.format_exc())
|
926
|
+
|
927
|
+
return False
|
928
|
+
finally:
|
929
|
+
if int(pythonVersion[1]) >= 6:
|
930
|
+
try:
|
931
|
+
proc.kill()
|
932
|
+
except Exception, e:
|
933
|
+
self.mainLogger.debug('Process already terminated')
|
934
|
+
|
935
|
+
self.mainLogger.debug('getMemoryUsage: Popen success, parsing')
|
936
|
+
|
937
|
+
# First we parse the information about the real memory
|
938
|
+
lines = vmstat.split('\n')
|
939
|
+
physParts = lines[2].split(' ')
|
940
|
+
|
941
|
+
physMem = []
|
942
|
+
|
943
|
+
# We need to loop through and capture the numerical values
|
944
|
+
# because sometimes there will be strings and spaces
|
945
|
+
for k, v in enumerate(physParts):
|
946
|
+
|
947
|
+
if re.match(r'([0-9]+)', v) != None:
|
948
|
+
physMem.append(v)
|
949
|
+
|
950
|
+
physTotal = int(physTotal.strip()) / 1024 # physFree is returned in B, but we need KB so we convert it
|
951
|
+
physFree = int(physMem[4])
|
952
|
+
physUsed = int(physTotal - physFree)
|
953
|
+
|
954
|
+
self.mainLogger.debug('getMemoryUsage: parsed vmstat')
|
955
|
+
|
956
|
+
# Convert everything to MB
|
957
|
+
physUsed = int(physUsed) / 1024
|
958
|
+
physFree = int(physFree) / 1024
|
959
|
+
|
960
|
+
cached = 'NULL'
|
961
|
+
|
962
|
+
#
|
963
|
+
# Swap memory details
|
964
|
+
#
|
965
|
+
|
966
|
+
self.mainLogger.debug('getMemoryUsage: attempting Popen (swapinfo)')
|
967
|
+
|
968
|
+
try:
|
969
|
+
try:
|
970
|
+
proc = subprocess.Popen(['swapinfo', '-k'], stdout = subprocess.PIPE, close_fds = True)
|
971
|
+
swapinfo = proc.communicate()[0]
|
972
|
+
|
973
|
+
if int(pythonVersion[1]) >= 6:
|
974
|
+
try:
|
975
|
+
proc.kill()
|
976
|
+
except Exception, e:
|
977
|
+
self.mainLogger.debug('Process already terminated')
|
978
|
+
|
979
|
+
except Exception, e:
|
980
|
+
import traceback
|
981
|
+
self.mainLogger.error('getMemoryUsage: exception = ' + traceback.format_exc())
|
982
|
+
|
983
|
+
return False
|
984
|
+
finally:
|
985
|
+
if int(pythonVersion[1]) >= 6:
|
986
|
+
try:
|
987
|
+
proc.kill()
|
988
|
+
except Exception, e:
|
989
|
+
self.mainLogger.debug('Process already terminated')
|
990
|
+
|
991
|
+
lines = swapinfo.split('\n')
|
992
|
+
swapUsed = 0
|
993
|
+
swapFree = 0
|
994
|
+
|
995
|
+
for index in range(1, len(lines)):
|
996
|
+
swapParts = re.findall(r'(\d+)', lines[index])
|
997
|
+
|
998
|
+
if swapParts != None:
|
999
|
+
try:
|
1000
|
+
swapUsed += int(swapParts[len(swapParts) - 3]) / 1024
|
1001
|
+
swapFree += int(swapParts[len(swapParts) - 2]) / 1024
|
1002
|
+
except IndexError, e:
|
1003
|
+
pass
|
1004
|
+
|
1005
|
+
self.mainLogger.debug('getMemoryUsage: parsed swapinfo, completed, returning')
|
1006
|
+
|
1007
|
+
return {'physUsed' : physUsed, 'physFree' : physFree, 'swapUsed' : swapUsed, 'swapFree' : swapFree, 'cached' : cached}
|
1008
|
+
|
1009
|
+
elif sys.platform == 'darwin':
|
1010
|
+
self.mainLogger.debug('getMemoryUsage: darwin')
|
1011
|
+
|
1012
|
+
try:
|
1013
|
+
try:
|
1014
|
+
self.mainLogger.debug('getMemoryUsage: attempting Popen (top)')
|
1015
|
+
|
1016
|
+
proc = subprocess.Popen(['top', '-l 1'], stdout=subprocess.PIPE, close_fds=True)
|
1017
|
+
top = proc.communicate()[0]
|
1018
|
+
|
1019
|
+
if int(pythonVersion[1]) >= 6:
|
1020
|
+
try:
|
1021
|
+
proc.kill()
|
1022
|
+
except Exception, e:
|
1023
|
+
self.mainLogger.debug('Process already terminated')
|
1024
|
+
|
1025
|
+
self.mainLogger.debug('getMemoryUsage: attempting Popen (sysctl)')
|
1026
|
+
proc = subprocess.Popen(['sysctl', 'vm.swapusage'], stdout=subprocess.PIPE, close_fds=True)
|
1027
|
+
sysctl = proc.communicate()[0]
|
1028
|
+
|
1029
|
+
if int(pythonVersion[1]) >= 6:
|
1030
|
+
try:
|
1031
|
+
proc.kill()
|
1032
|
+
except Exception, e:
|
1033
|
+
self.mainLogger.debug('Process already terminated')
|
1034
|
+
|
1035
|
+
except Exception, e:
|
1036
|
+
import traceback
|
1037
|
+
self.mainLogger.error('getMemoryUsage: exception = ' + traceback.format_exc())
|
1038
|
+
return False
|
1039
|
+
finally:
|
1040
|
+
if int(pythonVersion[1]) >= 6:
|
1041
|
+
try:
|
1042
|
+
proc.kill()
|
1043
|
+
except Exception, e:
|
1044
|
+
self.mainLogger.debug('Process already terminated')
|
1045
|
+
|
1046
|
+
self.mainLogger.debug('getMemoryUsage: Popen success, parsing')
|
1047
|
+
|
1048
|
+
# Deal with top
|
1049
|
+
lines = top.split('\n')
|
1050
|
+
physParts = re.findall(r'([0-9]\d+)', lines[self.topIndex])
|
1051
|
+
|
1052
|
+
self.mainLogger.debug('getMemoryUsage: parsed top')
|
1053
|
+
|
1054
|
+
# Deal with sysctl
|
1055
|
+
swapParts = re.findall(r'([0-9]+\.\d+)', sysctl)
|
1056
|
+
|
1057
|
+
self.mainLogger.debug('getMemoryUsage: parsed sysctl, completed, returning')
|
1058
|
+
|
1059
|
+
return {'physUsed' : physParts[3], 'physFree' : physParts[4], 'swapUsed' : swapParts[1], 'swapFree' : swapParts[2], 'cached' : 'NULL'}
|
1060
|
+
|
1061
|
+
else:
|
1062
|
+
self.mainLogger.debug('getMemoryUsage: other platform, returning')
|
1063
|
+
return False
|
1064
|
+
|
1065
|
+
def getMongoDBStatus(self):
|
1066
|
+
self.mainLogger.debug('getMongoDBStatus: start')
|
1067
|
+
|
1068
|
+
if 'MongoDBServer' not in self.agentConfig or self.agentConfig['MongoDBServer'] == '':
|
1069
|
+
self.mainLogger.debug('getMongoDBStatus: config not set')
|
1070
|
+
return False
|
1071
|
+
|
1072
|
+
self.mainLogger.debug('getMongoDBStatus: config set')
|
1073
|
+
|
1074
|
+
try:
|
1075
|
+
import pymongo
|
1076
|
+
from pymongo import Connection
|
1077
|
+
|
1078
|
+
except ImportError:
|
1079
|
+
self.mainLogger.error('Unable to import pymongo library')
|
1080
|
+
return False
|
1081
|
+
|
1082
|
+
# The dictionary to be returned.
|
1083
|
+
mongodb = {}
|
1084
|
+
|
1085
|
+
try:
|
1086
|
+
import urlparse
|
1087
|
+
parsed = urlparse.urlparse(self.agentConfig['MongoDBServer'])
|
1088
|
+
|
1089
|
+
mongoURI = ''
|
1090
|
+
|
1091
|
+
# Can't use attributes on Python 2.4
|
1092
|
+
if parsed[0] != 'mongodb':
|
1093
|
+
|
1094
|
+
mongoURI = 'mongodb://'
|
1095
|
+
|
1096
|
+
if parsed[2]:
|
1097
|
+
|
1098
|
+
if parsed[0]:
|
1099
|
+
|
1100
|
+
mongoURI = mongoURI + parsed[0] + ':' + parsed[2]
|
1101
|
+
|
1102
|
+
else:
|
1103
|
+
mongoURI = mongoURI + parsed[2]
|
1104
|
+
|
1105
|
+
else:
|
1106
|
+
|
1107
|
+
mongoURI = self.agentConfig['MongoDBServer']
|
1108
|
+
|
1109
|
+
self.mainLogger.debug('-- mongoURI: %s', mongoURI)
|
1110
|
+
|
1111
|
+
conn = Connection(mongoURI, slave_okay=True)
|
1112
|
+
|
1113
|
+
self.mainLogger.debug('Connected to MongoDB')
|
1114
|
+
|
1115
|
+
except Exception, ex:
|
1116
|
+
import traceback
|
1117
|
+
self.mainLogger.error('Unable to connect to MongoDB server %s - Exception = ' + traceback.format_exc(), mongoURI)
|
1118
|
+
return False
|
1119
|
+
|
1120
|
+
# Older versions of pymongo did not support the command()
|
1121
|
+
# method below.
|
1122
|
+
try:
|
1123
|
+
db = conn['local']
|
1124
|
+
|
1125
|
+
# Server status
|
1126
|
+
statusOutput = db.command('serverStatus') # Shorthand for {'serverStatus': 1}
|
1127
|
+
|
1128
|
+
self.mainLogger.debug('getMongoDBStatus: executed serverStatus')
|
1129
|
+
|
1130
|
+
# Setup
|
1131
|
+
import datetime
|
1132
|
+
status = {}
|
1133
|
+
|
1134
|
+
# Version
|
1135
|
+
try:
|
1136
|
+
status['version'] = statusOutput['version']
|
1137
|
+
|
1138
|
+
self.mainLogger.debug('getMongoDBStatus: version ' + str(statusOutput['version']))
|
1139
|
+
|
1140
|
+
except KeyError, ex:
|
1141
|
+
self.mainLogger.error('getMongoDBStatus: version KeyError exception - ' + str(ex))
|
1142
|
+
pass
|
1143
|
+
|
1144
|
+
# Global locks
|
1145
|
+
try:
|
1146
|
+
self.mainLogger.debug('getMongoDBStatus: globalLock')
|
1147
|
+
|
1148
|
+
status['globalLock'] = {}
|
1149
|
+
status['globalLock']['ratio'] = statusOutput['globalLock']['ratio']
|
1150
|
+
|
1151
|
+
status['globalLock']['currentQueue'] = {}
|
1152
|
+
status['globalLock']['currentQueue']['total'] = statusOutput['globalLock']['currentQueue']['total']
|
1153
|
+
status['globalLock']['currentQueue']['readers'] = statusOutput['globalLock']['currentQueue']['readers']
|
1154
|
+
status['globalLock']['currentQueue']['writers'] = statusOutput['globalLock']['currentQueue']['writers']
|
1155
|
+
|
1156
|
+
except KeyError, ex:
|
1157
|
+
self.mainLogger.error('getMongoDBStatus: globalLock KeyError exception - ' + str(ex))
|
1158
|
+
pass
|
1159
|
+
|
1160
|
+
# Memory
|
1161
|
+
try:
|
1162
|
+
self.mainLogger.debug('getMongoDBStatus: memory')
|
1163
|
+
|
1164
|
+
status['mem'] = {}
|
1165
|
+
status['mem']['resident'] = statusOutput['mem']['resident']
|
1166
|
+
status['mem']['virtual'] = statusOutput['mem']['virtual']
|
1167
|
+
status['mem']['mapped'] = statusOutput['mem']['mapped']
|
1168
|
+
|
1169
|
+
except KeyError, ex:
|
1170
|
+
self.mainLogger.error('getMongoDBStatus: memory KeyError exception - ' + str(ex))
|
1171
|
+
pass
|
1172
|
+
|
1173
|
+
# Connections
|
1174
|
+
try:
|
1175
|
+
self.mainLogger.debug('getMongoDBStatus: connections')
|
1176
|
+
|
1177
|
+
status['connections'] = {}
|
1178
|
+
status['connections']['current'] = statusOutput['connections']['current']
|
1179
|
+
status['connections']['available'] = statusOutput['connections']['available']
|
1180
|
+
|
1181
|
+
except KeyError, ex:
|
1182
|
+
self.mainLogger.error('getMongoDBStatus: connections KeyError exception - ' + str(ex))
|
1183
|
+
pass
|
1184
|
+
|
1185
|
+
# Extra info (Linux only)
|
1186
|
+
try:
|
1187
|
+
self.mainLogger.debug('getMongoDBStatus: extra info')
|
1188
|
+
|
1189
|
+
status['extraInfo'] = {}
|
1190
|
+
status['extraInfo']['heapUsage'] = statusOutput['extra_info']['heap_usage_bytes']
|
1191
|
+
status['extraInfo']['pageFaults'] = statusOutput['extra_info']['page_faults']
|
1192
|
+
|
1193
|
+
except KeyError, ex:
|
1194
|
+
self.mainLogger.debug('getMongoDBStatus: extra info KeyError exception - ' + str(ex))
|
1195
|
+
pass
|
1196
|
+
|
1197
|
+
# Background flushing
|
1198
|
+
try:
|
1199
|
+
self.mainLogger.debug('getMongoDBStatus: backgroundFlushing')
|
1200
|
+
|
1201
|
+
status['backgroundFlushing'] = {}
|
1202
|
+
delta = datetime.datetime.utcnow() - statusOutput['backgroundFlushing']['last_finished']
|
1203
|
+
status['backgroundFlushing']['secondsSinceLastFlush'] = delta.seconds
|
1204
|
+
status['backgroundFlushing']['lastFlushLength'] = statusOutput['backgroundFlushing']['last_ms']
|
1205
|
+
status['backgroundFlushing']['flushLengthAvrg'] = statusOutput['backgroundFlushing']['average_ms']
|
1206
|
+
|
1207
|
+
except KeyError, ex:
|
1208
|
+
self.mainLogger.debug('getMongoDBStatus: backgroundFlushing KeyError exception - ' + str(ex))
|
1209
|
+
pass
|
1210
|
+
|
1211
|
+
# Per second metric calculations (opcounts and asserts)
|
1212
|
+
try:
|
1213
|
+
if self.mongoDBStore == None:
|
1214
|
+
self.mainLogger.debug('getMongoDBStatus: per second metrics no cached data, so storing for first time')
|
1215
|
+
self.setMongoDBStore(statusOutput)
|
1216
|
+
|
1217
|
+
else:
|
1218
|
+
self.mainLogger.debug('getMongoDBStatus: per second metrics cached data exists')
|
1219
|
+
|
1220
|
+
accessesPS = float(statusOutput['indexCounters']['btree']['accesses'] - self.mongoDBStore['indexCounters']['btree']['accessesPS']) / 60
|
1221
|
+
|
1222
|
+
if accessesPS >= 0:
|
1223
|
+
status['indexCounters'] = {}
|
1224
|
+
status['indexCounters']['btree'] = {}
|
1225
|
+
status['indexCounters']['btree']['accessesPS'] = accessesPS
|
1226
|
+
status['indexCounters']['btree']['hitsPS'] = float(statusOutput['indexCounters']['btree']['hits'] - self.mongoDBStore['indexCounters']['btree']['hitsPS']) / 60
|
1227
|
+
status['indexCounters']['btree']['missesPS'] = float(statusOutput['indexCounters']['btree']['misses'] - self.mongoDBStore['indexCounters']['btree']['missesPS']) / 60
|
1228
|
+
status['indexCounters']['btree']['missRatioPS'] = float(statusOutput['indexCounters']['btree']['missRatio'] - self.mongoDBStore['indexCounters']['btree']['missRatioPS']) / 60
|
1229
|
+
|
1230
|
+
status['opcounters'] = {}
|
1231
|
+
status['opcounters']['insertPS'] = float(statusOutput['opcounters']['insert'] - self.mongoDBStore['opcounters']['insertPS']) / 60
|
1232
|
+
status['opcounters']['queryPS'] = float(statusOutput['opcounters']['query'] - self.mongoDBStore['opcounters']['queryPS']) / 60
|
1233
|
+
status['opcounters']['updatePS'] = float(statusOutput['opcounters']['update'] - self.mongoDBStore['opcounters']['updatePS']) / 60
|
1234
|
+
status['opcounters']['deletePS'] = float(statusOutput['opcounters']['delete'] - self.mongoDBStore['opcounters']['deletePS']) / 60
|
1235
|
+
status['opcounters']['getmorePS'] = float(statusOutput['opcounters']['getmore'] - self.mongoDBStore['opcounters']['getmorePS']) / 60
|
1236
|
+
status['opcounters']['commandPS'] = float(statusOutput['opcounters']['command'] - self.mongoDBStore['opcounters']['commandPS']) / 60
|
1237
|
+
|
1238
|
+
status['asserts'] = {}
|
1239
|
+
status['asserts']['regularPS'] = float(statusOutput['asserts']['regular'] - self.mongoDBStore['asserts']['regularPS']) / 60
|
1240
|
+
status['asserts']['warningPS'] = float(statusOutput['asserts']['warning'] - self.mongoDBStore['asserts']['warningPS']) / 60
|
1241
|
+
status['asserts']['msgPS'] = float(statusOutput['asserts']['msg'] - self.mongoDBStore['asserts']['msgPS']) / 60
|
1242
|
+
status['asserts']['userPS'] = float(statusOutput['asserts']['user'] - self.mongoDBStore['asserts']['userPS']) / 60
|
1243
|
+
status['asserts']['rolloversPS'] = float(statusOutput['asserts']['rollovers'] - self.mongoDBStore['asserts']['rolloversPS']) / 60
|
1244
|
+
|
1245
|
+
self.setMongoDBStore(statusOutput)
|
1246
|
+
else:
|
1247
|
+
self.mainLogger.debug('getMongoDBStatus: per second metrics negative value calculated, mongod likely restarted, so clearing cache')
|
1248
|
+
self.setMongoDBStore(statusOutput)
|
1249
|
+
|
1250
|
+
except KeyError, ex:
|
1251
|
+
self.mainLogger.error('getMongoDBStatus: per second metrics KeyError exception - ' + str(ex))
|
1252
|
+
pass
|
1253
|
+
|
1254
|
+
# Cursors
|
1255
|
+
try:
|
1256
|
+
self.mainLogger.debug('getMongoDBStatus: cursors')
|
1257
|
+
|
1258
|
+
status['cursors'] = {}
|
1259
|
+
status['cursors']['totalOpen'] = statusOutput['cursors']['totalOpen']
|
1260
|
+
|
1261
|
+
except KeyError, ex:
|
1262
|
+
self.mainLogger.error('getMongoDBStatus: cursors KeyError exception - ' + str(ex))
|
1263
|
+
pass
|
1264
|
+
|
1265
|
+
# Replica set status
|
1266
|
+
if 'MongoDBReplSet' in self.agentConfig and self.agentConfig['MongoDBReplSet'] == 'yes':
|
1267
|
+
self.mainLogger.debug('getMongoDBStatus: get replset status too')
|
1268
|
+
|
1269
|
+
# isMaster (to get state
|
1270
|
+
isMaster = db.command('isMaster')
|
1271
|
+
|
1272
|
+
self.mainLogger.debug('getMongoDBStatus: executed isMaster')
|
1273
|
+
|
1274
|
+
status['replSet'] = {}
|
1275
|
+
status['replSet']['setName'] = isMaster['setName']
|
1276
|
+
status['replSet']['isMaster'] = isMaster['ismaster']
|
1277
|
+
status['replSet']['isSecondary'] = isMaster['secondary']
|
1278
|
+
|
1279
|
+
if 'arbiterOnly' in isMaster:
|
1280
|
+
status['replSet']['isArbiter'] = isMaster['arbiterOnly']
|
1281
|
+
|
1282
|
+
self.mainLogger.debug('getMongoDBStatus: finished isMaster')
|
1283
|
+
|
1284
|
+
# rs.status()
|
1285
|
+
db = conn['admin']
|
1286
|
+
replSet = db.command('replSetGetStatus')
|
1287
|
+
|
1288
|
+
self.mainLogger.debug('getMongoDBStatus: executed replSetGetStatus')
|
1289
|
+
|
1290
|
+
status['replSet']['myState'] = replSet['myState']
|
1291
|
+
|
1292
|
+
status['replSet']['members'] = {}
|
1293
|
+
|
1294
|
+
for member in replSet['members']:
|
1295
|
+
|
1296
|
+
self.mainLogger.debug('getMongoDBStatus: replSetGetStatus looping - ' + str(member['name']))
|
1297
|
+
|
1298
|
+
status['replSet']['members'][str(member['_id'])] = {}
|
1299
|
+
|
1300
|
+
status['replSet']['members'][str(member['_id'])]['name'] = member['name']
|
1301
|
+
status['replSet']['members'][str(member['_id'])]['state'] = member['state']
|
1302
|
+
|
1303
|
+
# Optime delta (only available from not self)
|
1304
|
+
# Calculation is from http://docs.python.org/library/datetime.html#datetime.timedelta.total_seconds
|
1305
|
+
if 'optimeDate' in member: # Only available as of 1.7.2
|
1306
|
+
deltaOptime = datetime.datetime.utcnow() - member['optimeDate']
|
1307
|
+
status['replSet']['members'][str(member['_id'])]['optimeDate'] = (deltaOptime.microseconds + (deltaOptime.seconds + deltaOptime.days * 24 * 3600) * 10**6) / 10**6
|
1308
|
+
|
1309
|
+
if 'self' in member:
|
1310
|
+
status['replSet']['myId'] = member['_id']
|
1311
|
+
|
1312
|
+
# Have to do it manually because total_seconds() is only available as of Python 2.7
|
1313
|
+
else:
|
1314
|
+
if 'lastHeartbeat' in member:
|
1315
|
+
deltaHeartbeat = datetime.datetime.utcnow() - member['lastHeartbeat']
|
1316
|
+
status['replSet']['members'][str(member['_id'])]['lastHeartbeat'] = (deltaHeartbeat.microseconds + (deltaHeartbeat.seconds + deltaHeartbeat.days * 24 * 3600) * 10**6) / 10**6
|
1317
|
+
|
1318
|
+
if 'errmsg' in member:
|
1319
|
+
status['replSet']['members'][str(member['_id'])]['error'] = member['errmsg']
|
1320
|
+
|
1321
|
+
# db.stats()
|
1322
|
+
if 'MongoDBDBStats' in self.agentConfig and self.agentConfig['MongoDBDBStats'] == 'yes':
|
1323
|
+
self.mainLogger.debug('getMongoDBStatus: db.stats() too')
|
1324
|
+
|
1325
|
+
status['dbStats'] = {}
|
1326
|
+
|
1327
|
+
for database in conn.database_names():
|
1328
|
+
|
1329
|
+
if database != 'config' and database != 'local' and database != 'admin' and database != 'test':
|
1330
|
+
|
1331
|
+
self.mainLogger.debug('getMongoDBStatus: executing db.stats() for ' + str(database))
|
1332
|
+
|
1333
|
+
status['dbStats'][database] = conn[database].command('dbstats')
|
1334
|
+
status['dbStats'][database]['namespaces'] = conn[database]['system']['namespaces'].count()
|
1335
|
+
|
1336
|
+
# Ensure all strings to prevent JSON parse errors. We typecast on the server
|
1337
|
+
for key in status['dbStats'][database].keys():
|
1338
|
+
|
1339
|
+
status['dbStats'][database][key] = str(status['dbStats'][database][key])
|
1340
|
+
|
1341
|
+
|
1342
|
+
except Exception, ex:
|
1343
|
+
import traceback
|
1344
|
+
self.mainLogger.error('Unable to get MongoDB status - Exception = ' + traceback.format_exc())
|
1345
|
+
return False
|
1346
|
+
|
1347
|
+
self.mainLogger.debug('getMongoDBStatus: completed, returning')
|
1348
|
+
|
1349
|
+
return status
|
1350
|
+
|
1351
|
+
def setMongoDBStore(self, statusOutput):
|
1352
|
+
self.mongoDBStore = {}
|
1353
|
+
|
1354
|
+
self.mongoDBStore['indexCounters'] = {}
|
1355
|
+
self.mongoDBStore['indexCounters']['btree'] = {}
|
1356
|
+
self.mongoDBStore['indexCounters']['btree']['accessesPS'] = statusOutput['indexCounters']['btree']['accesses']
|
1357
|
+
self.mongoDBStore['indexCounters']['btree']['hitsPS'] = statusOutput['indexCounters']['btree']['hits']
|
1358
|
+
self.mongoDBStore['indexCounters']['btree']['missesPS'] = statusOutput['indexCounters']['btree']['misses']
|
1359
|
+
self.mongoDBStore['indexCounters']['btree']['missRatioPS'] = statusOutput['indexCounters']['btree']['missRatio']
|
1360
|
+
|
1361
|
+
self.mongoDBStore['opcounters'] = {}
|
1362
|
+
self.mongoDBStore['opcounters']['insertPS'] = statusOutput['opcounters']['insert']
|
1363
|
+
self.mongoDBStore['opcounters']['queryPS'] = statusOutput['opcounters']['query']
|
1364
|
+
self.mongoDBStore['opcounters']['updatePS'] = statusOutput['opcounters']['update']
|
1365
|
+
self.mongoDBStore['opcounters']['deletePS'] = statusOutput['opcounters']['delete']
|
1366
|
+
self.mongoDBStore['opcounters']['getmorePS'] = statusOutput['opcounters']['getmore']
|
1367
|
+
self.mongoDBStore['opcounters']['commandPS'] = statusOutput['opcounters']['command']
|
1368
|
+
|
1369
|
+
self.mongoDBStore['asserts'] = {}
|
1370
|
+
self.mongoDBStore['asserts']['regularPS'] = statusOutput['asserts']['regular']
|
1371
|
+
self.mongoDBStore['asserts']['warningPS'] = statusOutput['asserts']['warning']
|
1372
|
+
self.mongoDBStore['asserts']['msgPS'] = statusOutput['asserts']['msg']
|
1373
|
+
self.mongoDBStore['asserts']['userPS'] = statusOutput['asserts']['user']
|
1374
|
+
self.mongoDBStore['asserts']['rolloversPS'] = statusOutput['asserts']['rollovers']
|
1375
|
+
|
1376
|
+
def getMySQLStatus(self):
|
1377
|
+
self.mainLogger.debug('getMySQLStatus: start')
|
1378
|
+
|
1379
|
+
if 'MySQLServer' in self.agentConfig and 'MySQLUser' in self.agentConfig and self.agentConfig['MySQLServer'] != '' and self.agentConfig['MySQLUser'] != '':
|
1380
|
+
|
1381
|
+
self.mainLogger.debug('getMySQLStatus: config')
|
1382
|
+
|
1383
|
+
# Try import MySQLdb - http://sourceforge.net/projects/mysql-python/files/
|
1384
|
+
try:
|
1385
|
+
import MySQLdb
|
1386
|
+
|
1387
|
+
except ImportError, e:
|
1388
|
+
self.mainLogger.error('getMySQLStatus: unable to import MySQLdb')
|
1389
|
+
return False
|
1390
|
+
|
1391
|
+
if 'MySQLPort' not in self.agentConfig:
|
1392
|
+
|
1393
|
+
self.agentConfig['MySQLPort'] = 3306
|
1394
|
+
|
1395
|
+
if 'MySQLSocket' not in self.agentConfig:
|
1396
|
+
|
1397
|
+
# Connect
|
1398
|
+
try:
|
1399
|
+
db = MySQLdb.connect(host=self.agentConfig['MySQLServer'], user=self.agentConfig['MySQLUser'], passwd=self.agentConfig['MySQLPass'], port=int(self.agentConfig['MySQLPort']))
|
1400
|
+
|
1401
|
+
except MySQLdb.OperationalError, message:
|
1402
|
+
|
1403
|
+
self.mainLogger.error('getMySQLStatus: MySQL connection error (server): ' + str(message))
|
1404
|
+
return False
|
1405
|
+
|
1406
|
+
else:
|
1407
|
+
|
1408
|
+
# Connect
|
1409
|
+
try:
|
1410
|
+
db = MySQLdb.connect(host='localhost', user=self.agentConfig['MySQLUser'], passwd=self.agentConfig['MySQLPass'], port=int(self.agentConfig['MySQLPort']), unix_socket=self.agentConfig['MySQLSocket'])
|
1411
|
+
|
1412
|
+
except MySQLdb.OperationalError, message:
|
1413
|
+
|
1414
|
+
self.mainLogger.error('getMySQLStatus: MySQL connection error (socket): ' + str(message))
|
1415
|
+
return False
|
1416
|
+
|
1417
|
+
self.mainLogger.debug('getMySQLStatus: connected')
|
1418
|
+
|
1419
|
+
# Get MySQL version
|
1420
|
+
if self.mysqlVersion == None:
|
1421
|
+
|
1422
|
+
self.mainLogger.debug('getMySQLStatus: mysqlVersion unset storing for first time')
|
1423
|
+
|
1424
|
+
try:
|
1425
|
+
cursor = db.cursor()
|
1426
|
+
cursor.execute('SELECT VERSION()')
|
1427
|
+
result = cursor.fetchone()
|
1428
|
+
|
1429
|
+
except MySQLdb.OperationalError, message:
|
1430
|
+
|
1431
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting version: ' + str(message))
|
1432
|
+
|
1433
|
+
version = result[0].split('-') # Case 31237. Might include a description e.g. 4.1.26-log. See http://dev.mysql.com/doc/refman/4.1/en/information-functions.html#function_version
|
1434
|
+
version = version[0].split('.')
|
1435
|
+
|
1436
|
+
self.mysqlVersion = []
|
1437
|
+
|
1438
|
+
# Make sure the version is only an int. Case 31647
|
1439
|
+
for string in version:
|
1440
|
+
number = re.match('([0-9]+)', string)
|
1441
|
+
number = number.group(0)
|
1442
|
+
self.mysqlVersion.append(number)
|
1443
|
+
|
1444
|
+
self.mainLogger.debug('getMySQLStatus: getting Connections')
|
1445
|
+
|
1446
|
+
# Connections
|
1447
|
+
try:
|
1448
|
+
cursor = db.cursor()
|
1449
|
+
cursor.execute('SHOW STATUS LIKE "Connections"')
|
1450
|
+
result = cursor.fetchone()
|
1451
|
+
|
1452
|
+
except MySQLdb.OperationalError, message:
|
1453
|
+
|
1454
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting Connections: ' + str(message))
|
1455
|
+
|
1456
|
+
if self.mysqlConnectionsStore == None:
|
1457
|
+
|
1458
|
+
self.mainLogger.debug('getMySQLStatus: mysqlConnectionsStore unset storing for first time')
|
1459
|
+
|
1460
|
+
self.mysqlConnectionsStore = result[1]
|
1461
|
+
|
1462
|
+
connections = 0
|
1463
|
+
|
1464
|
+
else:
|
1465
|
+
|
1466
|
+
self.mainLogger.debug('getMySQLStatus: mysqlConnectionsStore set so calculating')
|
1467
|
+
self.mainLogger.debug('getMySQLStatus: self.mysqlConnectionsStore = ' + str(self.mysqlConnectionsStore))
|
1468
|
+
self.mainLogger.debug('getMySQLStatus: result = ' + str(result[1]))
|
1469
|
+
|
1470
|
+
connections = float(float(result[1]) - float(self.mysqlConnectionsStore)) / 60
|
1471
|
+
|
1472
|
+
self.mysqlConnectionsStore = result[1]
|
1473
|
+
|
1474
|
+
self.mainLogger.debug('getMySQLStatus: connections = ' + str(connections))
|
1475
|
+
|
1476
|
+
self.mainLogger.debug('getMySQLStatus: getting Connections - done')
|
1477
|
+
|
1478
|
+
self.mainLogger.debug('getMySQLStatus: getting Created_tmp_disk_tables')
|
1479
|
+
|
1480
|
+
# Created_tmp_disk_tables
|
1481
|
+
|
1482
|
+
# Determine query depending on version. For 5.02 and above we need the GLOBAL keyword (case 31015)
|
1483
|
+
if int(self.mysqlVersion[0]) >= 5 and int(self.mysqlVersion[2]) >= 2:
|
1484
|
+
query = 'SHOW GLOBAL STATUS LIKE "Created_tmp_disk_tables"'
|
1485
|
+
|
1486
|
+
else:
|
1487
|
+
query = 'SHOW STATUS LIKE "Created_tmp_disk_tables"'
|
1488
|
+
|
1489
|
+
try:
|
1490
|
+
cursor = db.cursor()
|
1491
|
+
cursor.execute(query)
|
1492
|
+
result = cursor.fetchone()
|
1493
|
+
|
1494
|
+
except MySQLdb.OperationalError, message:
|
1495
|
+
|
1496
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting Created_tmp_disk_tables: ' + str(message))
|
1497
|
+
|
1498
|
+
createdTmpDiskTables = float(result[1])
|
1499
|
+
|
1500
|
+
self.mainLogger.debug('getMySQLStatus: createdTmpDiskTables = ' + str(createdTmpDiskTables))
|
1501
|
+
|
1502
|
+
self.mainLogger.debug('getMySQLStatus: getting Created_tmp_disk_tables - done')
|
1503
|
+
|
1504
|
+
self.mainLogger.debug('getMySQLStatus: getting Max_used_connections')
|
1505
|
+
|
1506
|
+
# Max_used_connections
|
1507
|
+
try:
|
1508
|
+
cursor = db.cursor()
|
1509
|
+
cursor.execute('SHOW STATUS LIKE "Max_used_connections"')
|
1510
|
+
result = cursor.fetchone()
|
1511
|
+
|
1512
|
+
except MySQLdb.OperationalError, message:
|
1513
|
+
|
1514
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting Max_used_connections: ' + str(message))
|
1515
|
+
|
1516
|
+
maxUsedConnections = result[1]
|
1517
|
+
|
1518
|
+
self.mainLogger.debug('getMySQLStatus: maxUsedConnections = ' + str(createdTmpDiskTables))
|
1519
|
+
|
1520
|
+
self.mainLogger.debug('getMySQLStatus: getting Max_used_connections - done')
|
1521
|
+
|
1522
|
+
self.mainLogger.debug('getMySQLStatus: getting Open_files')
|
1523
|
+
|
1524
|
+
# Open_files
|
1525
|
+
try:
|
1526
|
+
cursor = db.cursor()
|
1527
|
+
cursor.execute('SHOW STATUS LIKE "Open_files"')
|
1528
|
+
result = cursor.fetchone()
|
1529
|
+
|
1530
|
+
except MySQLdb.OperationalError, message:
|
1531
|
+
|
1532
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting Open_files: ' + str(message))
|
1533
|
+
|
1534
|
+
openFiles = result[1]
|
1535
|
+
|
1536
|
+
self.mainLogger.debug('getMySQLStatus: openFiles = ' + str(openFiles))
|
1537
|
+
|
1538
|
+
self.mainLogger.debug('getMySQLStatus: getting Open_files - done')
|
1539
|
+
|
1540
|
+
self.mainLogger.debug('getMySQLStatus: getting Slow_queries')
|
1541
|
+
|
1542
|
+
# Slow_queries
|
1543
|
+
|
1544
|
+
# Determine query depending on version. For 5.02 and above we need the GLOBAL keyword (case 31015)
|
1545
|
+
if int(self.mysqlVersion[0]) >= 5 and int(self.mysqlVersion[2]) >= 2:
|
1546
|
+
query = 'SHOW GLOBAL STATUS LIKE "Slow_queries"'
|
1547
|
+
|
1548
|
+
else:
|
1549
|
+
query = 'SHOW STATUS LIKE "Slow_queries"'
|
1550
|
+
|
1551
|
+
try:
|
1552
|
+
cursor = db.cursor()
|
1553
|
+
cursor.execute(query)
|
1554
|
+
result = cursor.fetchone()
|
1555
|
+
|
1556
|
+
except MySQLdb.OperationalError, message:
|
1557
|
+
|
1558
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting Slow_queries: ' + str(message))
|
1559
|
+
|
1560
|
+
if self.mysqlSlowQueriesStore == None:
|
1561
|
+
|
1562
|
+
self.mainLogger.debug('getMySQLStatus: mysqlSlowQueriesStore unset so storing for first time')
|
1563
|
+
|
1564
|
+
self.mysqlSlowQueriesStore = result[1]
|
1565
|
+
|
1566
|
+
slowQueries = 0
|
1567
|
+
|
1568
|
+
else:
|
1569
|
+
|
1570
|
+
self.mainLogger.debug('getMySQLStatus: mysqlSlowQueriesStore set so calculating')
|
1571
|
+
self.mainLogger.debug('getMySQLStatus: self.mysqlSlowQueriesStore = ' + str(self.mysqlSlowQueriesStore))
|
1572
|
+
self.mainLogger.debug('getMySQLStatus: result = ' + str(result[1]))
|
1573
|
+
|
1574
|
+
slowQueries = float(float(result[1]) - float(self.mysqlSlowQueriesStore)) / 60
|
1575
|
+
|
1576
|
+
self.mysqlSlowQueriesStore = result[1]
|
1577
|
+
|
1578
|
+
self.mainLogger.debug('getMySQLStatus: slowQueries = ' + str(slowQueries))
|
1579
|
+
|
1580
|
+
self.mainLogger.debug('getMySQLStatus: getting Slow_queries - done')
|
1581
|
+
|
1582
|
+
self.mainLogger.debug('getMySQLStatus: getting Table_locks_waited')
|
1583
|
+
|
1584
|
+
# Table_locks_waited
|
1585
|
+
try:
|
1586
|
+
cursor = db.cursor()
|
1587
|
+
cursor.execute('SHOW STATUS LIKE "Table_locks_waited"')
|
1588
|
+
result = cursor.fetchone()
|
1589
|
+
|
1590
|
+
except MySQLdb.OperationalError, message:
|
1591
|
+
|
1592
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting Table_locks_waited: ' + str(message))
|
1593
|
+
|
1594
|
+
tableLocksWaited = float(result[1])
|
1595
|
+
|
1596
|
+
self.mainLogger.debug('getMySQLStatus: tableLocksWaited = ' + str(tableLocksWaited))
|
1597
|
+
|
1598
|
+
self.mainLogger.debug('getMySQLStatus: getting Table_locks_waited - done')
|
1599
|
+
|
1600
|
+
self.mainLogger.debug('getMySQLStatus: getting Threads_connected')
|
1601
|
+
|
1602
|
+
# Threads_connected
|
1603
|
+
try:
|
1604
|
+
cursor = db.cursor()
|
1605
|
+
cursor.execute('SHOW STATUS LIKE "Threads_connected"')
|
1606
|
+
result = cursor.fetchone()
|
1607
|
+
|
1608
|
+
except MySQLdb.OperationalError, message:
|
1609
|
+
|
1610
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting Threads_connected: ' + str(message))
|
1611
|
+
|
1612
|
+
threadsConnected = result[1]
|
1613
|
+
|
1614
|
+
self.mainLogger.debug('getMySQLStatus: threadsConnected = ' + str(threadsConnected))
|
1615
|
+
|
1616
|
+
self.mainLogger.debug('getMySQLStatus: getting Threads_connected - done')
|
1617
|
+
|
1618
|
+
self.mainLogger.debug('getMySQLStatus: getting Seconds_Behind_Master')
|
1619
|
+
|
1620
|
+
if 'MySQLNoRepl' not in self.agentConfig:
|
1621
|
+
# Seconds_Behind_Master
|
1622
|
+
try:
|
1623
|
+
cursor = db.cursor(MySQLdb.cursors.DictCursor)
|
1624
|
+
cursor.execute('SHOW SLAVE STATUS')
|
1625
|
+
result = cursor.fetchone()
|
1626
|
+
|
1627
|
+
except MySQLdb.OperationalError, message:
|
1628
|
+
|
1629
|
+
self.mainLogger.error('getMySQLStatus: MySQL query error when getting SHOW SLAVE STATUS: ' + str(message))
|
1630
|
+
result = None
|
1631
|
+
|
1632
|
+
if result != None:
|
1633
|
+
try:
|
1634
|
+
secondsBehindMaster = result['Seconds_Behind_Master']
|
1635
|
+
|
1636
|
+
self.mainLogger.debug('getMySQLStatus: secondsBehindMaster = ' + str(secondsBehindMaster))
|
1637
|
+
|
1638
|
+
except IndexError, e:
|
1639
|
+
secondsBehindMaster = None
|
1640
|
+
|
1641
|
+
self.mainLogger.debug('getMySQLStatus: secondsBehindMaster empty')
|
1642
|
+
|
1643
|
+
else:
|
1644
|
+
secondsBehindMaster = None
|
1645
|
+
|
1646
|
+
self.mainLogger.debug('getMySQLStatus: secondsBehindMaster empty')
|
1647
|
+
|
1648
|
+
self.mainLogger.debug('getMySQLStatus: getting Seconds_Behind_Master - done')
|
1649
|
+
|
1650
|
+
return {'connections' : connections, 'createdTmpDiskTables' : createdTmpDiskTables, 'maxUsedConnections' : maxUsedConnections, 'openFiles' : openFiles, 'slowQueries' : slowQueries, 'tableLocksWaited' : tableLocksWaited, 'threadsConnected' : threadsConnected, 'secondsBehindMaster' : secondsBehindMaster}
|
1651
|
+
|
1652
|
+
else:
|
1653
|
+
|
1654
|
+
self.mainLogger.debug('getMySQLStatus: config not set')
|
1655
|
+
return False
|
1656
|
+
|
1657
|
+
def getNetworkTraffic(self):
|
1658
|
+
self.mainLogger.debug('getNetworkTraffic: start')
|
1659
|
+
|
1660
|
+
if sys.platform == 'linux2':
|
1661
|
+
self.mainLogger.debug('getNetworkTraffic: linux2')
|
1662
|
+
|
1663
|
+
try:
|
1664
|
+
self.mainLogger.debug('getNetworkTraffic: attempting open')
|
1665
|
+
|
1666
|
+
proc = open('/proc/net/dev', 'r')
|
1667
|
+
lines = proc.readlines()
|
1668
|
+
|
1669
|
+
proc.close()
|
1670
|
+
|
1671
|
+
except IOError, e:
|
1672
|
+
self.mainLogger.error('getNetworkTraffic: exception = ' + str(e))
|
1673
|
+
return False
|
1674
|
+
|
1675
|
+
self.mainLogger.debug('getNetworkTraffic: open success, parsing')
|
1676
|
+
|
1677
|
+
columnLine = lines[1]
|
1678
|
+
_, receiveCols , transmitCols = columnLine.split('|')
|
1679
|
+
receiveCols = map(lambda a:'recv_' + a, receiveCols.split())
|
1680
|
+
transmitCols = map(lambda a:'trans_' + a, transmitCols.split())
|
1681
|
+
|
1682
|
+
cols = receiveCols + transmitCols
|
1683
|
+
|
1684
|
+
self.mainLogger.debug('getNetworkTraffic: parsing, looping')
|
1685
|
+
|
1686
|
+
faces = {}
|
1687
|
+
for line in lines[2:]:
|
1688
|
+
if line.find(':') < 0: continue
|
1689
|
+
face, data = line.split(':')
|
1690
|
+
faceData = dict(zip(cols, data.split()))
|
1691
|
+
faces[face] = faceData
|
1692
|
+
|
1693
|
+
self.mainLogger.debug('getNetworkTraffic: parsed, looping')
|
1694
|
+
|
1695
|
+
interfaces = {}
|
1696
|
+
|
1697
|
+
# Now loop through each interface
|
1698
|
+
for face in faces:
|
1699
|
+
key = face.strip()
|
1700
|
+
|
1701
|
+
# We need to work out the traffic since the last check so first time we store the current value
|
1702
|
+
# then the next time we can calculate the difference
|
1703
|
+
try:
|
1704
|
+
if key in self.networkTrafficStore:
|
1705
|
+
interfaces[key] = {}
|
1706
|
+
interfaces[key]['recv_bytes'] = long(faces[face]['recv_bytes']) - long(self.networkTrafficStore[key]['recv_bytes'])
|
1707
|
+
interfaces[key]['trans_bytes'] = long(faces[face]['trans_bytes']) - long(self.networkTrafficStore[key]['trans_bytes'])
|
1708
|
+
|
1709
|
+
if interfaces[key]['recv_bytes'] < 0:
|
1710
|
+
interfaces[key]['recv_bytes'] = long(faces[face]['recv_bytes'])
|
1711
|
+
|
1712
|
+
if interfaces[key]['trans_bytes'] < 0:
|
1713
|
+
interfaces[key]['trans_bytes'] = long(faces[face]['trans_bytes'])
|
1714
|
+
|
1715
|
+
interfaces[key]['recv_bytes'] = str(interfaces[key]['recv_bytes'])
|
1716
|
+
interfaces[key]['trans_bytes'] = str(interfaces[key]['trans_bytes'])
|
1717
|
+
|
1718
|
+
# And update the stored value to subtract next time round
|
1719
|
+
self.networkTrafficStore[key]['recv_bytes'] = faces[face]['recv_bytes']
|
1720
|
+
self.networkTrafficStore[key]['trans_bytes'] = faces[face]['trans_bytes']
|
1721
|
+
|
1722
|
+
else:
|
1723
|
+
self.networkTrafficStore[key] = {}
|
1724
|
+
self.networkTrafficStore[key]['recv_bytes'] = faces[face]['recv_bytes']
|
1725
|
+
self.networkTrafficStore[key]['trans_bytes'] = faces[face]['trans_bytes']
|
1726
|
+
|
1727
|
+
# Logging
|
1728
|
+
self.mainLogger.debug('getNetworkTraffic: %s = recv_bytes', key, self.networkTrafficStore[key]['recv_bytes'])
|
1729
|
+
self.mainLogger.debug('getNetworkTraffic: %s = trans_bytes', key, self.networkTrafficStore[key]['trans_bytes'])
|
1730
|
+
|
1731
|
+
except KeyError, ex:
|
1732
|
+
self.mainLogger.error('getNetworkTraffic: no data for %s', key)
|
1733
|
+
|
1734
|
+
except ValueError, ex:
|
1735
|
+
self.mainLogger.error('getNetworkTraffic: invalid data for %s', key)
|
1736
|
+
|
1737
|
+
self.mainLogger.debug('getNetworkTraffic: completed, returning')
|
1738
|
+
|
1739
|
+
return interfaces
|
1740
|
+
|
1741
|
+
elif sys.platform.find('freebsd') != -1:
|
1742
|
+
self.mainLogger.debug('getNetworkTraffic: freebsd')
|
1743
|
+
|
1744
|
+
try:
|
1745
|
+
try:
|
1746
|
+
self.mainLogger.debug('getNetworkTraffic: attempting Popen (netstat)')
|
1747
|
+
|
1748
|
+
proc = subprocess.Popen(['netstat', '-nbid'], stdout=subprocess.PIPE, close_fds=True)
|
1749
|
+
netstat = proc.communicate()[0]
|
1750
|
+
|
1751
|
+
if int(pythonVersion[1]) >= 6:
|
1752
|
+
try:
|
1753
|
+
proc.kill()
|
1754
|
+
except Exception, e:
|
1755
|
+
self.mainLogger.debug('Process already terminated')
|
1756
|
+
|
1757
|
+
except Exception, e:
|
1758
|
+
import traceback
|
1759
|
+
self.mainLogger.error('getNetworkTraffic: exception = ' + traceback.format_exc())
|
1760
|
+
|
1761
|
+
return False
|
1762
|
+
finally:
|
1763
|
+
if int(pythonVersion[1]) >= 6:
|
1764
|
+
try:
|
1765
|
+
proc.kill()
|
1766
|
+
except Exception, e:
|
1767
|
+
self.mainLogger.debug('Process already terminated')
|
1768
|
+
|
1769
|
+
self.mainLogger.debug('getNetworkTraffic: open success, parsing')
|
1770
|
+
|
1771
|
+
lines = netstat.split('\n')
|
1772
|
+
|
1773
|
+
# Loop over available data for each inteface
|
1774
|
+
faces = {}
|
1775
|
+
rxKey = None
|
1776
|
+
txKey = None
|
1777
|
+
|
1778
|
+
for line in lines:
|
1779
|
+
self.mainLogger.debug('getNetworkTraffic: %s', line)
|
1780
|
+
|
1781
|
+
line = re.split(r'\s+', line)
|
1782
|
+
|
1783
|
+
# Figure out which index we need
|
1784
|
+
if rxKey == None and txKey == None:
|
1785
|
+
for k, part in enumerate(line):
|
1786
|
+
self.mainLogger.debug('getNetworkTraffic: looping parts (%s)', part)
|
1787
|
+
|
1788
|
+
if part == 'Ibytes':
|
1789
|
+
rxKey = k
|
1790
|
+
self.mainLogger.debug('getNetworkTraffic: found rxKey = %s', k)
|
1791
|
+
elif part == 'Obytes':
|
1792
|
+
txKey = k
|
1793
|
+
self.mainLogger.debug('getNetworkTraffic: found txKey = %s', k)
|
1794
|
+
|
1795
|
+
else:
|
1796
|
+
if line[0] not in faces:
|
1797
|
+
try:
|
1798
|
+
self.mainLogger.debug('getNetworkTraffic: parsing (rx: %s = %s / tx: %s = %s)', rxKey, line[rxKey], txKey, line[txKey])
|
1799
|
+
faceData = {'recv_bytes': line[rxKey], 'trans_bytes': line[txKey]}
|
1800
|
+
|
1801
|
+
face = line[0]
|
1802
|
+
faces[face] = faceData
|
1803
|
+
except IndexError, e:
|
1804
|
+
continue
|
1805
|
+
|
1806
|
+
self.mainLogger.debug('getNetworkTraffic: parsed, looping')
|
1807
|
+
|
1808
|
+
interfaces = {}
|
1809
|
+
|
1810
|
+
# Now loop through each interface
|
1811
|
+
for face in faces:
|
1812
|
+
key = face.strip()
|
1813
|
+
|
1814
|
+
try:
|
1815
|
+
# We need to work out the traffic since the last check so first time we store the current value
|
1816
|
+
# then the next time we can calculate the difference
|
1817
|
+
if key in self.networkTrafficStore:
|
1818
|
+
interfaces[key] = {}
|
1819
|
+
interfaces[key]['recv_bytes'] = long(faces[face]['recv_bytes']) - long(self.networkTrafficStore[key]['recv_bytes'])
|
1820
|
+
interfaces[key]['trans_bytes'] = long(faces[face]['trans_bytes']) - long(self.networkTrafficStore[key]['trans_bytes'])
|
1821
|
+
|
1822
|
+
interfaces[key]['recv_bytes'] = str(interfaces[key]['recv_bytes'])
|
1823
|
+
interfaces[key]['trans_bytes'] = str(interfaces[key]['trans_bytes'])
|
1824
|
+
|
1825
|
+
if interfaces[key]['recv_bytes'] < 0:
|
1826
|
+
interfaces[key]['recv_bytes'] = long(faces[face]['recv_bytes'])
|
1827
|
+
|
1828
|
+
if interfaces[key]['trans_bytes'] < 0:
|
1829
|
+
interfaces[key]['trans_bytes'] = long(faces[face]['trans_bytes'])
|
1830
|
+
|
1831
|
+
# And update the stored value to subtract next time round
|
1832
|
+
self.networkTrafficStore[key]['recv_bytes'] = faces[face]['recv_bytes']
|
1833
|
+
self.networkTrafficStore[key]['trans_bytes'] = faces[face]['trans_bytes']
|
1834
|
+
|
1835
|
+
else:
|
1836
|
+
self.networkTrafficStore[key] = {}
|
1837
|
+
self.networkTrafficStore[key]['recv_bytes'] = faces[face]['recv_bytes']
|
1838
|
+
self.networkTrafficStore[key]['trans_bytes'] = faces[face]['trans_bytes']
|
1839
|
+
|
1840
|
+
except KeyError, ex:
|
1841
|
+
self.mainLogger.error('getNetworkTraffic: no data for %s', key)
|
1842
|
+
|
1843
|
+
except ValueError, ex:
|
1844
|
+
self.mainLogger.error('getNetworkTraffic: invalid data for %s', key)
|
1845
|
+
|
1846
|
+
self.mainLogger.debug('getNetworkTraffic: completed, returning')
|
1847
|
+
|
1848
|
+
return interfaces
|
1849
|
+
|
1850
|
+
else:
|
1851
|
+
self.mainLogger.debug('getNetworkTraffic: other platform, returning')
|
1852
|
+
|
1853
|
+
return False
|
1854
|
+
|
1855
|
+
def getNginxStatus(self):
|
1856
|
+
self.mainLogger.debug('getNginxStatus: start')
|
1857
|
+
|
1858
|
+
if 'nginxStatusUrl' in self.agentConfig and self.agentConfig['nginxStatusUrl'] != 'http://www.example.com/nginx_status': # Don't do it if the status URL hasn't been provided
|
1859
|
+
self.mainLogger.debug('getNginxStatus: config set')
|
1860
|
+
|
1861
|
+
try:
|
1862
|
+
self.mainLogger.debug('getNginxStatus: attempting urlopen')
|
1863
|
+
|
1864
|
+
req = urllib2.Request(self.agentConfig['nginxStatusUrl'], None, headers)
|
1865
|
+
|
1866
|
+
# Do the request, log any errors
|
1867
|
+
request = urllib2.urlopen(req)
|
1868
|
+
response = request.read()
|
1869
|
+
|
1870
|
+
except urllib2.HTTPError, e:
|
1871
|
+
self.mainLogger.error('Unable to get Nginx status - HTTPError = ' + str(e))
|
1872
|
+
return False
|
1873
|
+
|
1874
|
+
except urllib2.URLError, e:
|
1875
|
+
self.mainLogger.error('Unable to get Nginx status - URLError = ' + str(e))
|
1876
|
+
return False
|
1877
|
+
|
1878
|
+
except httplib.HTTPException, e:
|
1879
|
+
self.mainLogger.error('Unable to get Nginx status - HTTPException = ' + str(e))
|
1880
|
+
return False
|
1881
|
+
|
1882
|
+
except Exception, e:
|
1883
|
+
import traceback
|
1884
|
+
self.mainLogger.error('Unable to get Nginx status - Exception = ' + traceback.format_exc())
|
1885
|
+
return False
|
1886
|
+
|
1887
|
+
self.mainLogger.debug('getNginxStatus: urlopen success, start parsing')
|
1888
|
+
|
1889
|
+
# Thanks to http://hostingfu.com/files/nginx/nginxstats.py for this code
|
1890
|
+
|
1891
|
+
self.mainLogger.debug('getNginxStatus: parsing connections')
|
1892
|
+
|
1893
|
+
try:
|
1894
|
+
# Connections
|
1895
|
+
parsed = re.search(r'Active connections:\s+(\d+)', response)
|
1896
|
+
connections = int(parsed.group(1))
|
1897
|
+
|
1898
|
+
self.mainLogger.debug('getNginxStatus: parsed connections')
|
1899
|
+
self.mainLogger.debug('getNginxStatus: parsing reqs')
|
1900
|
+
|
1901
|
+
# Requests per second
|
1902
|
+
parsed = re.search(r'\s*(\d+)\s+(\d+)\s+(\d+)', response)
|
1903
|
+
|
1904
|
+
if not parsed:
|
1905
|
+
self.mainLogger.debug('getNginxStatus: could not parse response')
|
1906
|
+
return False
|
1907
|
+
|
1908
|
+
requests = int(parsed.group(3))
|
1909
|
+
|
1910
|
+
self.mainLogger.debug('getNginxStatus: parsed reqs')
|
1911
|
+
|
1912
|
+
if self.nginxRequestsStore == None or self.nginxRequestsStore < 0:
|
1913
|
+
|
1914
|
+
self.mainLogger.debug('getNginxStatus: no reqs so storing for first time')
|
1915
|
+
|
1916
|
+
self.nginxRequestsStore = requests
|
1917
|
+
|
1918
|
+
requestsPerSecond = 0
|
1919
|
+
|
1920
|
+
else:
|
1921
|
+
|
1922
|
+
self.mainLogger.debug('getNginxStatus: reqs stored so calculating')
|
1923
|
+
self.mainLogger.debug('getNginxStatus: self.nginxRequestsStore = ' + str(self.nginxRequestsStore))
|
1924
|
+
self.mainLogger.debug('getNginxStatus: requests = ' + str(requests))
|
1925
|
+
|
1926
|
+
requestsPerSecond = float(requests - self.nginxRequestsStore) / 60
|
1927
|
+
|
1928
|
+
self.mainLogger.debug('getNginxStatus: requestsPerSecond = ' + str(requestsPerSecond))
|
1929
|
+
|
1930
|
+
self.nginxRequestsStore = requests
|
1931
|
+
|
1932
|
+
if connections != None and requestsPerSecond != None:
|
1933
|
+
|
1934
|
+
self.mainLogger.debug('getNginxStatus: returning with data')
|
1935
|
+
|
1936
|
+
return {'connections' : connections, 'reqPerSec' : requestsPerSecond}
|
1937
|
+
|
1938
|
+
else:
|
1939
|
+
|
1940
|
+
self.mainLogger.debug('getNginxStatus: returning without data')
|
1941
|
+
|
1942
|
+
return False
|
1943
|
+
|
1944
|
+
except Exception, e:
|
1945
|
+
import traceback
|
1946
|
+
self.mainLogger.error('Unable to get Nginx status - %s - Exception = ' + traceback.format_exc(), response)
|
1947
|
+
return False
|
1948
|
+
|
1949
|
+
else:
|
1950
|
+
self.mainLogger.debug('getNginxStatus: config not set')
|
1951
|
+
|
1952
|
+
return False
|
1953
|
+
|
1954
|
+
def getProcesses(self):
|
1955
|
+
self.mainLogger.debug('getProcesses: start')
|
1956
|
+
|
1957
|
+
# Get output from ps
|
1958
|
+
try:
|
1959
|
+
try:
|
1960
|
+
self.mainLogger.debug('getProcesses: attempting Popen')
|
1961
|
+
|
1962
|
+
proc = subprocess.Popen(['ps', 'auxww'], stdout=subprocess.PIPE, close_fds=True)
|
1963
|
+
ps = proc.communicate()[0]
|
1964
|
+
|
1965
|
+
if int(pythonVersion[1]) >= 6:
|
1966
|
+
try:
|
1967
|
+
proc.kill()
|
1968
|
+
except Exception, e:
|
1969
|
+
self.mainLogger.debug('Process already terminated')
|
1970
|
+
|
1971
|
+
self.mainLogger.debug('getProcesses: ps result - ' + str(ps))
|
1972
|
+
|
1973
|
+
except Exception, e:
|
1974
|
+
import traceback
|
1975
|
+
self.mainLogger.error('getProcesses: exception = ' + traceback.format_exc())
|
1976
|
+
return False
|
1977
|
+
finally:
|
1978
|
+
if int(pythonVersion[1]) >= 6:
|
1979
|
+
try:
|
1980
|
+
proc.kill()
|
1981
|
+
except Exception, e:
|
1982
|
+
self.mainLogger.debug('Process already terminated')
|
1983
|
+
|
1984
|
+
self.mainLogger.debug('getProcesses: Popen success, parsing')
|
1985
|
+
|
1986
|
+
# Split out each process
|
1987
|
+
processLines = ps.split('\n')
|
1988
|
+
|
1989
|
+
del processLines[0] # Removes the headers
|
1990
|
+
processLines.pop() # Removes a trailing empty line
|
1991
|
+
|
1992
|
+
processes = []
|
1993
|
+
|
1994
|
+
self.mainLogger.debug('getProcesses: Popen success, parsing, looping')
|
1995
|
+
|
1996
|
+
for line in processLines:
|
1997
|
+
self.mainLogger.debug('getProcesses: Popen success, parsing, loop...')
|
1998
|
+
line = line.replace("'", '') # These will break JSON. ZD38282
|
1999
|
+
line = line.replace('"', '')
|
2000
|
+
line = line.replace('\\', '\\\\')
|
2001
|
+
line = line.split(None, 10)
|
2002
|
+
processes.append(line)
|
2003
|
+
|
2004
|
+
self.mainLogger.debug('getProcesses: completed, returning')
|
2005
|
+
|
2006
|
+
return processes
|
2007
|
+
|
2008
|
+
def getRabbitMQStatus(self):
|
2009
|
+
self.mainLogger.debug('getRabbitMQStatus: start')
|
2010
|
+
|
2011
|
+
if 'rabbitMQStatusUrl' not in self.agentConfig or \
|
2012
|
+
'rabbitMQUser' not in self.agentConfig or \
|
2013
|
+
'rabbitMQPass' not in self.agentConfig or \
|
2014
|
+
self.agentConfig['rabbitMQStatusUrl'] == 'http://www.example.com:55672/json':
|
2015
|
+
|
2016
|
+
self.mainLogger.debug('getRabbitMQStatus: config not set')
|
2017
|
+
return False
|
2018
|
+
|
2019
|
+
self.mainLogger.debug('getRabbitMQStatus: config set')
|
2020
|
+
|
2021
|
+
try:
|
2022
|
+
self.mainLogger.debug('getRabbitMQStatus: attempting authentication setup')
|
2023
|
+
|
2024
|
+
manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
2025
|
+
manager.add_password(None, self.agentConfig['rabbitMQStatusUrl'], self.agentConfig['rabbitMQUser'], self.agentConfig['rabbitMQPass'])
|
2026
|
+
handler = urllib2.HTTPBasicAuthHandler(manager)
|
2027
|
+
opener = urllib2.build_opener(handler)
|
2028
|
+
urllib2.install_opener(opener)
|
2029
|
+
|
2030
|
+
self.mainLogger.debug('getRabbitMQStatus: attempting urlopen')
|
2031
|
+
req = urllib2.Request(self.agentConfig['rabbitMQStatusUrl'], None, headers)
|
2032
|
+
|
2033
|
+
# Do the request, log any errors
|
2034
|
+
request = urllib2.urlopen(req)
|
2035
|
+
response = request.read()
|
2036
|
+
|
2037
|
+
except urllib2.HTTPError, e:
|
2038
|
+
self.mainLogger.error('Unable to get RabbitMQ status - HTTPError = ' + str(e))
|
2039
|
+
return False
|
2040
|
+
|
2041
|
+
except urllib2.URLError, e:
|
2042
|
+
self.mainLogger.error('Unable to get RabbitMQ status - URLError = ' + str(e))
|
2043
|
+
return False
|
2044
|
+
|
2045
|
+
except httplib.HTTPException, e:
|
2046
|
+
self.mainLogger.error('Unable to get RabbitMQ status - HTTPException = ' + str(e))
|
2047
|
+
return False
|
2048
|
+
|
2049
|
+
except Exception, e:
|
2050
|
+
import traceback
|
2051
|
+
self.mainLogger.error('Unable to get RabbitMQ status - Exception = ' + traceback.format_exc())
|
2052
|
+
return False
|
2053
|
+
|
2054
|
+
try:
|
2055
|
+
if int(pythonVersion[1]) >= 6:
|
2056
|
+
self.mainLogger.debug('getRabbitMQStatus: json read')
|
2057
|
+
status = json.loads(response)
|
2058
|
+
|
2059
|
+
else:
|
2060
|
+
self.mainLogger.debug('getRabbitMQStatus: minjson read')
|
2061
|
+
status = minjson.safeRead(response)
|
2062
|
+
|
2063
|
+
self.mainLogger.debug(status)
|
2064
|
+
|
2065
|
+
if 'connections' not in status:
|
2066
|
+
# We are probably using the newer RabbitMQ 2.x status plugin, so try to parse that instead.
|
2067
|
+
status = {}
|
2068
|
+
connections = {}
|
2069
|
+
queues = {}
|
2070
|
+
self.mainLogger.debug('getRabbitMQStatus: using 2.x management plugin data')
|
2071
|
+
import urlparse
|
2072
|
+
|
2073
|
+
split_url = urlparse.urlsplit(self.agentConfig['rabbitMQStatusUrl'])
|
2074
|
+
|
2075
|
+
# Connections
|
2076
|
+
url = split_url[0] + '://' + split_url[1] + '/api/connections'
|
2077
|
+
self.mainLogger.debug('getRabbitMQStatus: attempting urlopen on %s', url)
|
2078
|
+
manager.add_password(None, url, self.agentConfig['rabbitMQUser'], self.agentConfig['rabbitMQPass'])
|
2079
|
+
req = urllib2.Request(url, None, headers)
|
2080
|
+
# Do the request, log any errors
|
2081
|
+
request = urllib2.urlopen(req)
|
2082
|
+
response = request.read()
|
2083
|
+
|
2084
|
+
if int(pythonVersion[1]) >= 6:
|
2085
|
+
self.mainLogger.debug('getRabbitMQStatus: connections json read')
|
2086
|
+
connections = json.loads(response)
|
2087
|
+
else:
|
2088
|
+
self.mainLogger.debug('getRabbitMQStatus: connections minjson read')
|
2089
|
+
connections = minjson.safeRead(response)
|
2090
|
+
|
2091
|
+
status['connections'] = len(connections)
|
2092
|
+
self.mainLogger.debug('getRabbitMQStatus: connections = %s', status['connections'])
|
2093
|
+
|
2094
|
+
# Queues
|
2095
|
+
url = split_url[0] + '://' + split_url[1] + '/api/queues'
|
2096
|
+
self.mainLogger.debug('getRabbitMQStatus: attempting urlopen on %s', url)
|
2097
|
+
manager.add_password(None, url, self.agentConfig['rabbitMQUser'], self.agentConfig['rabbitMQPass'])
|
2098
|
+
req = urllib2.Request(url, None, headers)
|
2099
|
+
# Do the request, log any errors
|
2100
|
+
request = urllib2.urlopen(req)
|
2101
|
+
response = request.read()
|
2102
|
+
|
2103
|
+
if int(pythonVersion[1]) >= 6:
|
2104
|
+
self.mainLogger.debug('getRabbitMQStatus: queues json read')
|
2105
|
+
queues = json.loads(response)
|
2106
|
+
else:
|
2107
|
+
self.mainLogger.debug('getRabbitMQStatus: queues minjson read')
|
2108
|
+
queues = minjson.safeRead(response)
|
2109
|
+
|
2110
|
+
status['queues'] = queues
|
2111
|
+
self.mainLogger.debug(status['queues'])
|
2112
|
+
|
2113
|
+
except Exception, e:
|
2114
|
+
import traceback
|
2115
|
+
self.mainLogger.error('Unable to load RabbitMQ status JSON - Exception = ' + traceback.format_exc())
|
2116
|
+
return False
|
2117
|
+
|
2118
|
+
self.mainLogger.debug('getRabbitMQStatus: completed, returning')
|
2119
|
+
|
2120
|
+
# Fix for queues with the same name (case 32788)
|
2121
|
+
for queue in status.get('queues', []):
|
2122
|
+
vhost = queue.get('vhost', '/')
|
2123
|
+
if vhost == '/':
|
2124
|
+
continue
|
2125
|
+
|
2126
|
+
queue['name'] = '%s/%s' % (vhost, queue['name'])
|
2127
|
+
|
2128
|
+
return status
|
2129
|
+
|
2130
|
+
#
|
2131
|
+
# Plugins
|
2132
|
+
#
|
2133
|
+
|
2134
|
+
def getPlugins(self):
|
2135
|
+
self.mainLogger.debug('getPlugins: start')
|
2136
|
+
|
2137
|
+
if 'pluginDirectory' in self.agentConfig and self.agentConfig['pluginDirectory'] != '':
|
2138
|
+
|
2139
|
+
if os.access(self.agentConfig['pluginDirectory'], os.R_OK) == False:
|
2140
|
+
self.mainLogger.warning('getPlugins: Plugin path %s is set but not readable by agent. Skipping plugins.', self.agentConfig['pluginDirectory'])
|
2141
|
+
|
2142
|
+
return False
|
2143
|
+
|
2144
|
+
else:
|
2145
|
+
return False
|
2146
|
+
|
2147
|
+
# Have we already imported the plugins?
|
2148
|
+
# Only load the plugins once
|
2149
|
+
if self.plugins == None:
|
2150
|
+
self.mainLogger.debug('getPlugins: initial load from ' + self.agentConfig['pluginDirectory'])
|
2151
|
+
|
2152
|
+
sys.path.append(self.agentConfig['pluginDirectory'])
|
2153
|
+
|
2154
|
+
self.plugins = []
|
2155
|
+
plugins = []
|
2156
|
+
|
2157
|
+
# Loop through all the plugin files
|
2158
|
+
for root, dirs, files in os.walk(self.agentConfig['pluginDirectory']):
|
2159
|
+
for name in files:
|
2160
|
+
self.mainLogger.debug('getPlugins: considering: ' + name)
|
2161
|
+
|
2162
|
+
name = name.split('.', 1)
|
2163
|
+
|
2164
|
+
# Only pull in .py files (ignores others, inc .pyc files)
|
2165
|
+
try:
|
2166
|
+
if name[1] == 'py':
|
2167
|
+
|
2168
|
+
self.mainLogger.debug('getPlugins: ' + name[0] + '.' + name[1] + ' is a plugin')
|
2169
|
+
|
2170
|
+
plugins.append(name[0])
|
2171
|
+
|
2172
|
+
except IndexError, e:
|
2173
|
+
|
2174
|
+
continue
|
2175
|
+
|
2176
|
+
# Loop through all the found plugins, import them then create new objects
|
2177
|
+
for pluginName in plugins:
|
2178
|
+
self.mainLogger.debug('getPlugins: loading ' + pluginName)
|
2179
|
+
|
2180
|
+
pluginPath = os.path.join(self.agentConfig['pluginDirectory'], '%s.py' % pluginName)
|
2181
|
+
|
2182
|
+
if os.access(pluginPath, os.R_OK) == False:
|
2183
|
+
self.mainLogger.error('getPlugins: Unable to read %s so skipping this plugin.', pluginPath)
|
2184
|
+
continue
|
2185
|
+
|
2186
|
+
try:
|
2187
|
+
# Import the plugin, but only from the pluginDirectory (ensures no conflicts with other module names elsehwhere in the sys.path
|
2188
|
+
import imp
|
2189
|
+
importedPlugin = imp.load_source(pluginName, pluginPath)
|
2190
|
+
|
2191
|
+
self.mainLogger.debug('getPlugins: imported ' + pluginName)
|
2192
|
+
|
2193
|
+
# Find out the class name and then instantiate it
|
2194
|
+
pluginClass = getattr(importedPlugin, pluginName)
|
2195
|
+
|
2196
|
+
try:
|
2197
|
+
pluginObj = pluginClass(self.agentConfig, self.mainLogger, self.rawConfig)
|
2198
|
+
|
2199
|
+
except TypeError:
|
2200
|
+
|
2201
|
+
try:
|
2202
|
+
pluginObj = pluginClass(self.agentConfig, self.mainLogger)
|
2203
|
+
except TypeError:
|
2204
|
+
# Support older plugins.
|
2205
|
+
pluginObj = pluginClass()
|
2206
|
+
|
2207
|
+
self.mainLogger.debug('getPlugins: instantiated ' + pluginName)
|
2208
|
+
|
2209
|
+
# Store in class var so we can execute it again on the next cycle
|
2210
|
+
self.plugins.append(pluginObj)
|
2211
|
+
|
2212
|
+
except Exception, ex:
|
2213
|
+
import traceback
|
2214
|
+
self.mainLogger.error('getPlugins (' + pluginName + '): exception = ' + traceback.format_exc())
|
2215
|
+
|
2216
|
+
# Now execute the objects previously created
|
2217
|
+
if self.plugins != None:
|
2218
|
+
self.mainLogger.debug('getPlugins: executing plugins')
|
2219
|
+
|
2220
|
+
# Execute the plugins
|
2221
|
+
output = {}
|
2222
|
+
|
2223
|
+
for plugin in self.plugins:
|
2224
|
+
self.mainLogger.info('getPlugins: executing ' + plugin.__class__.__name__)
|
2225
|
+
|
2226
|
+
try:
|
2227
|
+
output[plugin.__class__.__name__] = plugin.run()
|
2228
|
+
|
2229
|
+
except Exception, ex:
|
2230
|
+
import traceback
|
2231
|
+
self.mainLogger.error('getPlugins: exception = ' + traceback.format_exc())
|
2232
|
+
|
2233
|
+
self.mainLogger.info('getPlugins: executed ' + plugin.__class__.__name__)
|
2234
|
+
|
2235
|
+
self.mainLogger.debug('getPlugins: returning')
|
2236
|
+
|
2237
|
+
# Each plugin should output a dictionary so we can convert it to JSON later
|
2238
|
+
return output
|
2239
|
+
|
2240
|
+
else:
|
2241
|
+
self.mainLogger.debug('getPlugins: no plugins, returning false')
|
2242
|
+
|
2243
|
+
return False
|
2244
|
+
|
2245
|
+
#
|
2246
|
+
# Postback
|
2247
|
+
#
|
2248
|
+
|
2249
|
+
def doPostBack(self, postBackData):
|
2250
|
+
self.mainLogger.debug('doPostBack: start')
|
2251
|
+
|
2252
|
+
try:
|
2253
|
+
self.mainLogger.debug('doPostBack: attempting postback: ' + self.agentConfig['sdUrl'])
|
2254
|
+
|
2255
|
+
# Build the request handler
|
2256
|
+
request = urllib2.Request(self.agentConfig['sdUrl'] + '/postback/', postBackData, headers)
|
2257
|
+
|
2258
|
+
# Do the request, log any errors
|
2259
|
+
response = urllib2.urlopen(request)
|
2260
|
+
|
2261
|
+
self.mainLogger.info('Postback response: %s', response.read())
|
2262
|
+
|
2263
|
+
except urllib2.HTTPError, e:
|
2264
|
+
self.mainLogger.error('doPostBack: HTTPError = %s', e)
|
2265
|
+
return False
|
2266
|
+
|
2267
|
+
except urllib2.URLError, e:
|
2268
|
+
self.mainLogger.error('doPostBack: URLError = %s', e)
|
2269
|
+
return False
|
2270
|
+
|
2271
|
+
except httplib.HTTPException, e: # Added for case #26701
|
2272
|
+
self.mainLogger.error('doPostBack: HTTPException = %s', e)
|
2273
|
+
return False
|
2274
|
+
|
2275
|
+
except Exception, e:
|
2276
|
+
import traceback
|
2277
|
+
self.mainLogger.error('doPostBack: Exception = ' + traceback.format_exc())
|
2278
|
+
return False
|
2279
|
+
|
2280
|
+
self.mainLogger.debug('doPostBack: completed')
|
2281
|
+
|
2282
|
+
def doChecks(self, sc, firstRun, systemStats=False):
|
2283
|
+
macV = None
|
2284
|
+
if sys.platform == 'darwin':
|
2285
|
+
macV = platform.mac_ver()
|
2286
|
+
|
2287
|
+
if not self.topIndex: # We cache the line index from which to read from top
|
2288
|
+
# Output from top is slightly modified on OS X 10.6+ (case #28239)
|
2289
|
+
if macV and [int(v) for v in macV[0].split('.')] >= [10, 6, 0]:
|
2290
|
+
self.topIndex = 6
|
2291
|
+
else:
|
2292
|
+
self.topIndex = 5
|
2293
|
+
|
2294
|
+
if not self.os:
|
2295
|
+
if macV:
|
2296
|
+
self.os = 'mac'
|
2297
|
+
elif sys.platform.find('freebsd') != -1:
|
2298
|
+
self.os = 'freebsd'
|
2299
|
+
else:
|
2300
|
+
self.os = 'linux'
|
2301
|
+
|
2302
|
+
# We only need to set this if we're on FreeBSD
|
2303
|
+
if self.linuxProcFsLocation == None and self.os == 'freebsd':
|
2304
|
+
self.linuxProcFsLocation = self.getMountedLinuxProcFsLocation()
|
2305
|
+
else:
|
2306
|
+
self.linuxProcFsLocation = '/proc'
|
2307
|
+
|
2308
|
+
self.mainLogger.debug('doChecks: start')
|
2309
|
+
|
2310
|
+
# Do the checks
|
2311
|
+
apacheStatus = self.getApacheStatus()
|
2312
|
+
diskUsage = self.getDiskUsage()
|
2313
|
+
loadAvrgs = self.getLoadAvrgs()
|
2314
|
+
memory = self.getMemoryUsage()
|
2315
|
+
mysqlStatus = self.getMySQLStatus()
|
2316
|
+
networkTraffic = self.getNetworkTraffic()
|
2317
|
+
nginxStatus = self.getNginxStatus()
|
2318
|
+
processes = self.getProcesses()
|
2319
|
+
rabbitmq = self.getRabbitMQStatus()
|
2320
|
+
mongodb = self.getMongoDBStatus()
|
2321
|
+
couchdb = self.getCouchDBStatus()
|
2322
|
+
plugins = self.getPlugins()
|
2323
|
+
ioStats = self.getIOStats();
|
2324
|
+
cpuStats = self.getCPUStats();
|
2325
|
+
|
2326
|
+
if processes is not False and len(processes) > 4194304:
|
2327
|
+
self.mainLogger.warn('doChecks: process list larger than 4MB limit, so it has been stripped')
|
2328
|
+
|
2329
|
+
processes = []
|
2330
|
+
|
2331
|
+
self.mainLogger.debug('doChecks: checks success, build payload')
|
2332
|
+
|
2333
|
+
self.mainLogger.info('doChecks: agent key = ' + self.agentConfig['agentKey'])
|
2334
|
+
|
2335
|
+
checksData = {}
|
2336
|
+
|
2337
|
+
# Basic payload items
|
2338
|
+
checksData['os'] = self.os
|
2339
|
+
checksData['agentKey'] = self.agentConfig['agentKey']
|
2340
|
+
checksData['agentVersion'] = self.agentConfig['version']
|
2341
|
+
|
2342
|
+
if diskUsage != False:
|
2343
|
+
|
2344
|
+
checksData['diskUsage'] = diskUsage
|
2345
|
+
|
2346
|
+
if loadAvrgs != False:
|
2347
|
+
|
2348
|
+
checksData['loadAvrg'] = loadAvrgs['1']
|
2349
|
+
|
2350
|
+
if memory != False:
|
2351
|
+
|
2352
|
+
checksData['memPhysUsed'] = memory['physUsed']
|
2353
|
+
checksData['memPhysFree'] = memory['physFree']
|
2354
|
+
checksData['memSwapUsed'] = memory['swapUsed']
|
2355
|
+
checksData['memSwapFree'] = memory['swapFree']
|
2356
|
+
checksData['memCached'] = memory['cached']
|
2357
|
+
|
2358
|
+
if networkTraffic != False:
|
2359
|
+
|
2360
|
+
checksData['networkTraffic'] = networkTraffic
|
2361
|
+
|
2362
|
+
if processes != False:
|
2363
|
+
|
2364
|
+
checksData['processes'] = processes
|
2365
|
+
|
2366
|
+
# Apache Status
|
2367
|
+
if apacheStatus != False:
|
2368
|
+
|
2369
|
+
if 'reqPerSec' in apacheStatus:
|
2370
|
+
checksData['apacheReqPerSec'] = apacheStatus['reqPerSec']
|
2371
|
+
|
2372
|
+
if 'busyWorkers' in apacheStatus:
|
2373
|
+
checksData['apacheBusyWorkers'] = apacheStatus['busyWorkers']
|
2374
|
+
|
2375
|
+
if 'idleWorkers' in apacheStatus:
|
2376
|
+
checksData['apacheIdleWorkers'] = apacheStatus['idleWorkers']
|
2377
|
+
|
2378
|
+
self.mainLogger.debug('doChecks: built optional payload apacheStatus')
|
2379
|
+
|
2380
|
+
# MySQL Status
|
2381
|
+
if mysqlStatus != False:
|
2382
|
+
|
2383
|
+
checksData['mysqlConnections'] = mysqlStatus['connections']
|
2384
|
+
checksData['mysqlCreatedTmpDiskTables'] = mysqlStatus['createdTmpDiskTables']
|
2385
|
+
checksData['mysqlMaxUsedConnections'] = mysqlStatus['maxUsedConnections']
|
2386
|
+
checksData['mysqlOpenFiles'] = mysqlStatus['openFiles']
|
2387
|
+
checksData['mysqlSlowQueries'] = mysqlStatus['slowQueries']
|
2388
|
+
checksData['mysqlTableLocksWaited'] = mysqlStatus['tableLocksWaited']
|
2389
|
+
checksData['mysqlThreadsConnected'] = mysqlStatus['threadsConnected']
|
2390
|
+
|
2391
|
+
if mysqlStatus['secondsBehindMaster'] != None:
|
2392
|
+
checksData['mysqlSecondsBehindMaster'] = mysqlStatus['secondsBehindMaster']
|
2393
|
+
|
2394
|
+
# Nginx Status
|
2395
|
+
if nginxStatus != False:
|
2396
|
+
checksData['nginxConnections'] = nginxStatus['connections']
|
2397
|
+
checksData['nginxReqPerSec'] = nginxStatus['reqPerSec']
|
2398
|
+
|
2399
|
+
# RabbitMQ
|
2400
|
+
if rabbitmq != False:
|
2401
|
+
checksData['rabbitMQ'] = rabbitmq
|
2402
|
+
|
2403
|
+
# MongoDB
|
2404
|
+
if mongodb != False:
|
2405
|
+
checksData['mongoDB'] = mongodb
|
2406
|
+
|
2407
|
+
# CouchDB
|
2408
|
+
if couchdb != False:
|
2409
|
+
checksData['couchDB'] = couchdb
|
2410
|
+
|
2411
|
+
# Plugins
|
2412
|
+
if plugins != False:
|
2413
|
+
checksData['plugins'] = plugins
|
2414
|
+
|
2415
|
+
if ioStats != False:
|
2416
|
+
checksData['ioStats'] = ioStats
|
2417
|
+
|
2418
|
+
if cpuStats != False:
|
2419
|
+
checksData['cpuStats'] = cpuStats
|
2420
|
+
|
2421
|
+
# Include system stats on first postback
|
2422
|
+
if firstRun == True:
|
2423
|
+
checksData['systemStats'] = systemStats
|
2424
|
+
self.mainLogger.debug('doChecks: built optional payload systemStats')
|
2425
|
+
|
2426
|
+
# Include server indentifiers
|
2427
|
+
import socket
|
2428
|
+
|
2429
|
+
try:
|
2430
|
+
checksData['internalHostname'] = socket.gethostname()
|
2431
|
+
self.mainLogger.info('doChecks: hostname = ' + checksData['internalHostname'])
|
2432
|
+
|
2433
|
+
except socket.error, e:
|
2434
|
+
self.mainLogger.debug('Unable to get hostname: ' + str(e))
|
2435
|
+
|
2436
|
+
self.mainLogger.debug('doChecks: payload: %s' % checksData)
|
2437
|
+
self.mainLogger.debug('doChecks: payloads built, convert to json')
|
2438
|
+
|
2439
|
+
# Post back the data
|
2440
|
+
if int(pythonVersion[1]) >= 6:
|
2441
|
+
self.mainLogger.debug('doChecks: json convert')
|
2442
|
+
|
2443
|
+
try:
|
2444
|
+
payload = json.dumps(checksData, encoding='latin1').encode('utf-8')
|
2445
|
+
|
2446
|
+
except Exception, e:
|
2447
|
+
import traceback
|
2448
|
+
self.mainLogger.error('doChecks: failed encoding payload to json. Exception = ' + traceback.format_exc())
|
2449
|
+
return False
|
2450
|
+
|
2451
|
+
else:
|
2452
|
+
self.mainLogger.debug('doChecks: minjson convert')
|
2453
|
+
|
2454
|
+
payload = minjson.write(checksData)
|
2455
|
+
|
2456
|
+
self.mainLogger.debug('doChecks: json converted, hash')
|
2457
|
+
|
2458
|
+
payloadHash = md5(payload).hexdigest()
|
2459
|
+
postBackData = urllib.urlencode({'payload' : payload, 'hash' : payloadHash})
|
2460
|
+
|
2461
|
+
self.mainLogger.debug('doChecks: hashed, doPostBack')
|
2462
|
+
|
2463
|
+
self.doPostBack(postBackData)
|
2464
|
+
|
2465
|
+
self.mainLogger.debug('doChecks: posted back, reschedule')
|
2466
|
+
|
2467
|
+
sc.enter(self.agentConfig['checkFreq'], 1, self.doChecks, (sc, False))
|
2468
|
+
|
2469
|
+
def getMountedLinuxProcFsLocation(self):
|
2470
|
+
self.mainLogger.debug('getMountedLinuxProcFsLocation: attempting to fetch mounted partitions')
|
2471
|
+
|
2472
|
+
# Lets check if the Linux like style procfs is mounted
|
2473
|
+
try:
|
2474
|
+
proc = subprocess.Popen(['mount'], stdout = subprocess.PIPE, close_fds = True)
|
2475
|
+
mountedPartitions = proc.communicate()[0]
|
2476
|
+
|
2477
|
+
if int(pythonVersion[1]) >= 6:
|
2478
|
+
try:
|
2479
|
+
proc.kill()
|
2480
|
+
except Exception, e:
|
2481
|
+
self.mainLogger.debug('Process already terminated')
|
2482
|
+
|
2483
|
+
location = re.search(r'linprocfs on (.*?) \(.*?\)', mountedPartitions)
|
2484
|
+
|
2485
|
+
except OSError, e:
|
2486
|
+
self.mainLogger.error('getMountedLinuxProcFsLocation: OS error: ' + str(e))
|
2487
|
+
|
2488
|
+
# Linux like procfs file system is not mounted so we return False, else we return mount point location
|
2489
|
+
if location == None:
|
2490
|
+
self.mainLogger.debug('getMountedLinuxProcFsLocation: none found so using /proc')
|
2491
|
+
return '/proc' # Can't find anything so we might as well try this
|
2492
|
+
|
2493
|
+
location = location.group(1)
|
2494
|
+
|
2495
|
+
self.mainLogger.debug('getMountedLinuxProcFsLocation: using' + location)
|
2496
|
+
|
2497
|
+
return location
|