nv-client 1.66.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +4 -0
- data/Gemfile +4 -0
- data/README +5 -0
- data/Rakefile +2 -0
- data/bin/nv +709 -0
- data/lib/nv-client.rb +5 -0
- data/lib/nv-client/version.rb +5 -0
- data/lib/nventory.rb +2086 -0
- data/nv-client.gemspec +22 -0
- metadata +85 -0
data/lib/nv-client.rb
ADDED
data/lib/nventory.rb
ADDED
|
@@ -0,0 +1,2086 @@
|
|
|
1
|
+
begin
|
|
2
|
+
# Try loading facter w/o gems first so that we don't introduce a
|
|
3
|
+
# dependency on gems if it is not needed.
|
|
4
|
+
require 'facter' # Facter
|
|
5
|
+
rescue LoadError
|
|
6
|
+
require 'rubygems'
|
|
7
|
+
require 'facter'
|
|
8
|
+
end
|
|
9
|
+
require 'facter/util/memory' # used for converting MB to GB and stuff
|
|
10
|
+
require 'uri'
|
|
11
|
+
require 'net/http'
|
|
12
|
+
require 'net/https'
|
|
13
|
+
require 'cgi'
|
|
14
|
+
require 'rexml/document'
|
|
15
|
+
require 'yaml'
|
|
16
|
+
|
|
17
|
+
# Only use json gem if its there
|
|
18
|
+
begin
|
|
19
|
+
require 'rubygems'
|
|
20
|
+
require 'json'
|
|
21
|
+
HAS_JSON_GEM = true
|
|
22
|
+
rescue LoadError
|
|
23
|
+
HAS_JSON_GEM = false
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# fix for ruby http bug where it encodes the params incorrectly
|
|
27
|
+
class Net::HTTP::Put
|
|
28
|
+
def set_form_data(params, sep = '&')
|
|
29
|
+
params_array = params.map do |k,v|
|
|
30
|
+
if v.is_a? Array
|
|
31
|
+
v.inject([]){|c, val| c << "#{urlencode(k.to_s)}=#{urlencode(val.to_s)}"}.join(sep)
|
|
32
|
+
else
|
|
33
|
+
"#{urlencode(k.to_s)}=#{urlencode(v.to_s)}"
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
self.body = params_array.join(sep)
|
|
37
|
+
self.content_type = 'application/x-www-form-urlencoded'
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
module PasswordCallback
|
|
42
|
+
@@password = nil
|
|
43
|
+
def self.get_password
|
|
44
|
+
while !@@password
|
|
45
|
+
system "stty -echo"
|
|
46
|
+
print "Password: "
|
|
47
|
+
@@password = $stdin.gets.chomp
|
|
48
|
+
system "stty echo"
|
|
49
|
+
end
|
|
50
|
+
@@password
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# Module and class names are constants, and thus have to start with a
|
|
55
|
+
# capital letter.
|
|
56
|
+
module NVentory
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
CONFIG_FILES = ['/etc/nventory.conf', "#{ENV['HOME']}/.nventory.conf"]
|
|
60
|
+
|
|
61
|
+
class NVentory::Client
|
|
62
|
+
attr_accessor :delete
|
|
63
|
+
|
|
64
|
+
def initialize(data=nil,*moredata)
|
|
65
|
+
if data || moredata
|
|
66
|
+
parms = legacy_initializeparms(data,moredata)
|
|
67
|
+
# def initialize(debug=false, dryrun=false, configfile=nil, server=nil)
|
|
68
|
+
parms[:debug] ? (@debug = parms[:debug]) : @debug = (nil)
|
|
69
|
+
parms[:dryrun] ? (@dryrun = parms[:dryrun]) : @dryrun = (nil)
|
|
70
|
+
parms[:server] ? (@server = parms[:server]) : @server = (nil)
|
|
71
|
+
parms[:cookiefile] ? @cookiefile = parms[:cookiefile] : @cookiefile = "#{ENV['HOME']}/.nventory_cookie"
|
|
72
|
+
if parms[:proxy_server] == false
|
|
73
|
+
@proxy_server = 'nil'
|
|
74
|
+
elsif parms[:proxy_server]
|
|
75
|
+
@proxy_server = parms[:proxy_server]
|
|
76
|
+
else
|
|
77
|
+
@proxy_server = nil
|
|
78
|
+
end
|
|
79
|
+
parms[:sso_server] ? (@sso_server = parms[:sso_server]) : (@sso_server = nil)
|
|
80
|
+
parms[:configfile] ? (configfile = parms[:configfile]) : (configfile = nil)
|
|
81
|
+
end
|
|
82
|
+
@ca_file = nil
|
|
83
|
+
@ca_path = nil
|
|
84
|
+
@dhparams = '/etc/nventory/dhparams'
|
|
85
|
+
@delete = false # Initialize the variable, see attr_accessor above
|
|
86
|
+
@dmi_data = nil
|
|
87
|
+
|
|
88
|
+
CONFIG_FILES << configfile if configfile
|
|
89
|
+
|
|
90
|
+
CONFIG_FILES.each do |configfile|
|
|
91
|
+
if File.exist?(configfile)
|
|
92
|
+
IO.foreach(configfile) do |line|
|
|
93
|
+
line.chomp!
|
|
94
|
+
next if (line =~ /^\s*$/); # Skip blank lines
|
|
95
|
+
next if (line =~ /^\s*#/); # Skip comments
|
|
96
|
+
key, value = line.split(/\s*=\s*/, 2)
|
|
97
|
+
if key == 'server'
|
|
98
|
+
@server = value
|
|
99
|
+
# Warn the user, as this could potentially be confusing
|
|
100
|
+
# if they don't realize there's a config file lying
|
|
101
|
+
# around
|
|
102
|
+
warn "Using server #{@server} from #{configfile}" if (@debug)
|
|
103
|
+
elsif key == 'sso_server' && !@sso_server
|
|
104
|
+
@sso_server = value
|
|
105
|
+
warn "Using sso_server #{@sso_server} from #{configfile}" if (@debug)
|
|
106
|
+
elsif key == 'proxy_server' && !@proxy_server
|
|
107
|
+
@proxy_server = value
|
|
108
|
+
warn "Using proxy_server #{@proxy_server} from #{configfile}" if (@debug)
|
|
109
|
+
elsif key == 'ca_file'
|
|
110
|
+
@ca_file = value
|
|
111
|
+
warn "Using ca_file #{@ca_file} from #{configfile}" if (@debug)
|
|
112
|
+
elsif key == 'ca_path'
|
|
113
|
+
@ca_path = value
|
|
114
|
+
warn "Using ca_path #{@ca_path} from #{configfile}" if (@debug)
|
|
115
|
+
elsif key == 'dhparams'
|
|
116
|
+
@dhparams = value
|
|
117
|
+
warn "Using dhparams #{@dhparams} from #{configfile}" if (@debug)
|
|
118
|
+
elsif key == 'cookiefile'
|
|
119
|
+
@cookiefile = value
|
|
120
|
+
warn "Using cookiefile #{@cookiefile} from #{configfile}" if (@debug)
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
end
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
unless @server
|
|
127
|
+
@server = 'http://nventory/'
|
|
128
|
+
warn "Using server #{@server}" if @debug
|
|
129
|
+
end
|
|
130
|
+
@sso_server = 'https://sso.example.com/' unless @sso_server
|
|
131
|
+
|
|
132
|
+
# Make sure the server URL ends in a / so that we can append paths to it
|
|
133
|
+
# using URI.join
|
|
134
|
+
if @server !~ %r{/$}
|
|
135
|
+
@server << '/'
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
def legacy_initializeparms(data,moredata)
|
|
140
|
+
# if data is string, it is legacy method of supplying initialize params:
|
|
141
|
+
# def initialize(debug=false, dryrun=false, configfile=nil, server=nil)
|
|
142
|
+
newdata = {}
|
|
143
|
+
if data.kind_of?(Hash)
|
|
144
|
+
newdata = data
|
|
145
|
+
elsif data || moredata
|
|
146
|
+
newdata[:debug] = data
|
|
147
|
+
newdata[:dryrun] = moredata[0]
|
|
148
|
+
newdata[:configfile] = moredata[1]
|
|
149
|
+
if moredata[2]
|
|
150
|
+
server = moredata[2] if moredata[2]
|
|
151
|
+
if server =~ /^http/
|
|
152
|
+
(server =~ /\/$/) ? (newdata[:server] = server) : (newdata[:server] = "#{server}/")
|
|
153
|
+
else
|
|
154
|
+
newdata[:server] = "http://#{server}/"
|
|
155
|
+
end
|
|
156
|
+
end
|
|
157
|
+
newdata[:proxy_server] = moredata[3]
|
|
158
|
+
else
|
|
159
|
+
raise 'Syntax Error'
|
|
160
|
+
end
|
|
161
|
+
warn "** Using server #{newdata[:server]} **" if newdata[:server]
|
|
162
|
+
warn "** Using proxy_server #{newdata[:proxy_server]} **" if newdata[:proxy_server]
|
|
163
|
+
return newdata
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
def legacy_getparms(data,moredata)
|
|
167
|
+
# if data is string, it is legacy method of supplying get_objects params:
|
|
168
|
+
# def get_objects(objecttype, get, exactget, regexget, exclude, andget, includes=nil, login=nil, password_callback=PasswordCallback)
|
|
169
|
+
newdata = {}
|
|
170
|
+
if data.kind_of?(String)
|
|
171
|
+
raise 'Syntax Error: Missing :objecttype' unless data.kind_of?(String)
|
|
172
|
+
newdata[:objecttype] = data
|
|
173
|
+
newdata[:get] = moredata[0]
|
|
174
|
+
newdata[:exactget] = moredata[1]
|
|
175
|
+
newdata[:regexget] = moredata[2]
|
|
176
|
+
newdata[:exclude] = moredata[3]
|
|
177
|
+
newdata[:andget] = moredata[4]
|
|
178
|
+
newdata[:includes] = moredata[5]
|
|
179
|
+
newdata[:login] = moredata[6]
|
|
180
|
+
newdata[:password_callback] = PasswordCallback
|
|
181
|
+
elsif data.kind_of?(Hash)
|
|
182
|
+
raise 'Syntax Error: Missing :objecttype' unless data[:objecttype].kind_of?(String)
|
|
183
|
+
newdata = data
|
|
184
|
+
newdata[:password_callback] = PasswordCallback unless newdata[:password_callback]
|
|
185
|
+
else
|
|
186
|
+
raise 'Syntax Error'
|
|
187
|
+
end
|
|
188
|
+
return newdata
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
# FIXME: get, exactget, regexget, exclude and includes should all merge into
|
|
192
|
+
# a single search options hash parameter
|
|
193
|
+
def get_objects(data,*moredata)
|
|
194
|
+
parms = legacy_getparms(data,moredata)
|
|
195
|
+
# def get_objects(objecttype, get, exactget, regexget, exclude, andget, includes=nil, login=nil, password_callback=PasswordCallback)
|
|
196
|
+
objecttype = parms[:objecttype]
|
|
197
|
+
get = parms[:get]
|
|
198
|
+
exactget = parms[:exactget]
|
|
199
|
+
regexget = parms[:regexget]
|
|
200
|
+
exclude = parms[:exclude]
|
|
201
|
+
andget = parms[:andget]
|
|
202
|
+
includes = parms[:includes]
|
|
203
|
+
login = parms[:login]
|
|
204
|
+
password_callback = parms[:password_callback]
|
|
205
|
+
# PS-704 - node_groups controller when format.xml, includes some custom model methods that create a lot of querying joins, so this is
|
|
206
|
+
# a way to 'override' it on cli side - the server will look for that param to skip these def methods when it renders. webparams = {:nodefmeth => 1}
|
|
207
|
+
webparams = parms[:webparams]
|
|
208
|
+
#
|
|
209
|
+
# Package up the search parameters in the format the server expects
|
|
210
|
+
#
|
|
211
|
+
metaget = []
|
|
212
|
+
if get
|
|
213
|
+
get.each_pair do |key,values|
|
|
214
|
+
if key == 'enable_aliases' && values == 1
|
|
215
|
+
metaget << "#{key}=#{values}"
|
|
216
|
+
elsif values.length > 1
|
|
217
|
+
values.each do |value|
|
|
218
|
+
metaget << "#{key}[]=#{CGI.escape(value)}"
|
|
219
|
+
end
|
|
220
|
+
else
|
|
221
|
+
# This isn't strictly necessary, specifying a single value via
|
|
222
|
+
# 'key[]=[value]' would work fine, but this makes for a cleaner URL
|
|
223
|
+
# and slightly reduced processing on the backend
|
|
224
|
+
metaget << "#{key}=#{CGI.escape(values[0])}"
|
|
225
|
+
end
|
|
226
|
+
end
|
|
227
|
+
end
|
|
228
|
+
if exactget
|
|
229
|
+
exactget.each_pair do |key,values|
|
|
230
|
+
if key == 'enable_aliases' && values == 1
|
|
231
|
+
metaget << "#{key}=#{values}"
|
|
232
|
+
elsif values.length > 1
|
|
233
|
+
values.each do |value|
|
|
234
|
+
metaget << "exact_#{key}[]=#{CGI.escape(value)}"
|
|
235
|
+
end
|
|
236
|
+
else
|
|
237
|
+
# This isn't strictly necessary, specifying a single value via
|
|
238
|
+
# 'key[]=[value]' would work fine, but this makes for a cleaner URL
|
|
239
|
+
# and slightly reduced processing on the backend
|
|
240
|
+
metaget << "exact_#{key}=#{CGI.escape(values[0])}"
|
|
241
|
+
end
|
|
242
|
+
end
|
|
243
|
+
end
|
|
244
|
+
if regexget
|
|
245
|
+
regexget.each_pair do |key,values|
|
|
246
|
+
if values.length > 1
|
|
247
|
+
values.each do |value|
|
|
248
|
+
metaget << "regex_#{key}[]=#{CGI.escape(value)}"
|
|
249
|
+
end
|
|
250
|
+
else
|
|
251
|
+
# This isn't strictly necessary, specifying a single value via
|
|
252
|
+
# 'key[]=[value]' would work fine, but this makes for a cleaner URL
|
|
253
|
+
# and slightly reduced processing on the backend
|
|
254
|
+
metaget << "regex_#{key}=#{CGI.escape(values[0])}"
|
|
255
|
+
end
|
|
256
|
+
end
|
|
257
|
+
end
|
|
258
|
+
if exclude
|
|
259
|
+
exclude.each_pair do |key,values|
|
|
260
|
+
if values.length > 1
|
|
261
|
+
values.each do |value|
|
|
262
|
+
metaget << "exclude_#{key}[]=#{CGI.escape(value)}"
|
|
263
|
+
end
|
|
264
|
+
else
|
|
265
|
+
# This isn't strictly necessary, specifying a single value via
|
|
266
|
+
# 'key[]=[value]' would work fine, but this makes for a cleaner URL
|
|
267
|
+
# and slightly reduced processing on the backend
|
|
268
|
+
metaget << "exclude_#{key}=#{CGI.escape(values[0])}"
|
|
269
|
+
end
|
|
270
|
+
end
|
|
271
|
+
end
|
|
272
|
+
if andget
|
|
273
|
+
andget.each_pair do |key,values|
|
|
274
|
+
if values.length > 1
|
|
275
|
+
values.each do |value|
|
|
276
|
+
metaget << "and_#{key}[]=#{CGI.escape(value)}"
|
|
277
|
+
end
|
|
278
|
+
else
|
|
279
|
+
# This isn't strictly necessary, specifying a single value via
|
|
280
|
+
# 'key[]=[value]' would work fine, but this makes for a cleaner URL
|
|
281
|
+
# and slightly reduced processing on the backend
|
|
282
|
+
metaget << "and_#{key}=#{CGI.escape(values[0])}"
|
|
283
|
+
end
|
|
284
|
+
end
|
|
285
|
+
end
|
|
286
|
+
if includes
|
|
287
|
+
# includes = ['status', 'rack:datacenter']
|
|
288
|
+
# maps to
|
|
289
|
+
# include[status]=&include[rack]=datacenter
|
|
290
|
+
includes.each do |inc|
|
|
291
|
+
incstring = ''
|
|
292
|
+
if inc.include?(':')
|
|
293
|
+
incparts = inc.split(':')
|
|
294
|
+
lastpart = incparts.pop
|
|
295
|
+
incstring = 'include'
|
|
296
|
+
incparts.each { |part| incstring << "[#{part}]" }
|
|
297
|
+
incstring << "=#{lastpart}"
|
|
298
|
+
else
|
|
299
|
+
incstring = "include[#{inc}]="
|
|
300
|
+
end
|
|
301
|
+
metaget << incstring
|
|
302
|
+
end
|
|
303
|
+
end
|
|
304
|
+
if webparams && webparams.kind_of?(Hash)
|
|
305
|
+
webparams.each_pair{|k,v| metaget << "#{k}=#{v}"}
|
|
306
|
+
end
|
|
307
|
+
|
|
308
|
+
querystring = metaget.join('&')
|
|
309
|
+
|
|
310
|
+
#
|
|
311
|
+
# Send the query to the server
|
|
312
|
+
#
|
|
313
|
+
|
|
314
|
+
if parms[:format] == 'json'
|
|
315
|
+
if HAS_JSON_GEM
|
|
316
|
+
uri = URI::join(@server, "#{objecttype}.json?#{querystring}")
|
|
317
|
+
else
|
|
318
|
+
warn "Warning: Cannot use json format because json gem is not installed. Using xml format instead."
|
|
319
|
+
parms[:format] = 'xml'
|
|
320
|
+
uri = URI::join(@server, "#{objecttype}.xml?#{querystring}")
|
|
321
|
+
end
|
|
322
|
+
else
|
|
323
|
+
uri = URI::join(@server, "#{objecttype}.xml?#{querystring}")
|
|
324
|
+
end
|
|
325
|
+
|
|
326
|
+
req = Net::HTTP::Get.new(uri.request_uri)
|
|
327
|
+
warn "GET URL: #{uri}" if (@debug)
|
|
328
|
+
response = send_request(req, uri, login, password_callback)
|
|
329
|
+
while response.kind_of?(Net::HTTPMovedPermanently)
|
|
330
|
+
uri = URI.parse(response['Location'])
|
|
331
|
+
req = Net::HTTP::Get.new(uri.request_uri)
|
|
332
|
+
response = send_request(req, uri, login, password_callback)
|
|
333
|
+
end
|
|
334
|
+
if !response.kind_of?(Net::HTTPOK)
|
|
335
|
+
puts response.body
|
|
336
|
+
response.error!
|
|
337
|
+
end
|
|
338
|
+
|
|
339
|
+
if parms[:format] == 'json'
|
|
340
|
+
results = JSON.parse(response.body)
|
|
341
|
+
else
|
|
342
|
+
#
|
|
343
|
+
# Parse the XML data from the server
|
|
344
|
+
# This tries to render the XML into the best possible representation
|
|
345
|
+
# as a Perl hash. It may need to evolve over time.
|
|
346
|
+
puts response.body if (@debug)
|
|
347
|
+
results_xml = REXML::Document.new(response.body)
|
|
348
|
+
results = {}
|
|
349
|
+
if results_xml.root.elements["/#{objecttype}"]
|
|
350
|
+
results_xml.root.elements["/#{objecttype}"].each do |elem|
|
|
351
|
+
# For some reason Elements[] is returning things other than elements,
|
|
352
|
+
# like text nodes
|
|
353
|
+
next if elem.node_type != :element
|
|
354
|
+
data = xml_to_ruby(elem)
|
|
355
|
+
name = data['name'] || data['id']
|
|
356
|
+
if !results[name].nil?
|
|
357
|
+
warn "Duplicate entries for #{name}. Only one will be shown."
|
|
358
|
+
end
|
|
359
|
+
results[name] = data
|
|
360
|
+
end
|
|
361
|
+
end
|
|
362
|
+
end
|
|
363
|
+
|
|
364
|
+
#puts results.inspect if (@debug)
|
|
365
|
+
puts YAML.dump(results) if (@debug)
|
|
366
|
+
results
|
|
367
|
+
end
|
|
368
|
+
|
|
369
|
+
def get_field_names(objecttype, login=nil, password_callback=PasswordCallback)
|
|
370
|
+
uri = URI::join(@server, "#{objecttype}/field_names.xml")
|
|
371
|
+
req = Net::HTTP::Get.new(uri.request_uri)
|
|
372
|
+
warn "GET URL: #{uri}" if (@debug)
|
|
373
|
+
response = send_request(req, uri, login, password_callback)
|
|
374
|
+
while response.kind_of?(Net::HTTPMovedPermanently)
|
|
375
|
+
uri = URI.parse(response['Location'])
|
|
376
|
+
req = Net::HTTP::Get.new(uri.request_uri)
|
|
377
|
+
response = send_request(req, uri, login, password_callback)
|
|
378
|
+
end
|
|
379
|
+
if !response.kind_of?(Net::HTTPOK)
|
|
380
|
+
puts response.body
|
|
381
|
+
response.error!
|
|
382
|
+
end
|
|
383
|
+
|
|
384
|
+
puts response.body if (@debug)
|
|
385
|
+
results_xml = REXML::Document.new(response.body)
|
|
386
|
+
field_names = []
|
|
387
|
+
results_xml.root.elements['/field_names'].each do |elem|
|
|
388
|
+
# For some reason Elements[] is returning things other than elements,
|
|
389
|
+
# like text nodes
|
|
390
|
+
next if elem.node_type != :element
|
|
391
|
+
field_names << elem.text
|
|
392
|
+
end
|
|
393
|
+
|
|
394
|
+
field_names
|
|
395
|
+
end
|
|
396
|
+
|
|
397
|
+
def get_expanded_nodegroup(nodegroup)
|
|
398
|
+
getdata = {}
|
|
399
|
+
getdata[:objecttype] = 'node_groups'
|
|
400
|
+
getdata[:exactget] = {'name' => [nodegroup]}
|
|
401
|
+
getdata[:includes] = ['nodes', 'child_groups']
|
|
402
|
+
results = get_objects(getdata)
|
|
403
|
+
nodes = {}
|
|
404
|
+
if results.has_key?(nodegroup)
|
|
405
|
+
if results[nodegroup].has_key?('nodes')
|
|
406
|
+
results[nodegroup]['nodes'].each { |node| nodes[node['name']] = true }
|
|
407
|
+
end
|
|
408
|
+
if results[nodegroup].has_key?('child_groups')
|
|
409
|
+
results[nodegroup]['child_groups'].each do |child_group|
|
|
410
|
+
get_expanded_nodegroup(child_group['name']).each { |child_group_node| nodes[child_group_node] = true }
|
|
411
|
+
end
|
|
412
|
+
end
|
|
413
|
+
end
|
|
414
|
+
nodes.keys.sort
|
|
415
|
+
end
|
|
416
|
+
|
|
417
|
+
# The results argument can be a reference to a hash returned by a
|
|
418
|
+
# call to get_objects, in which case the data will be PUT to each object
|
|
419
|
+
# there, thus updating them. Or it can be nil, in which case the
|
|
420
|
+
# data will be POSTed to create a new entry.
|
|
421
|
+
def set_objects(objecttypes, results, data, login, password_callback=PasswordCallback)
|
|
422
|
+
# Convert any keys which don't already specify a model
|
|
423
|
+
# from 'foo' to 'objecttype[foo]'
|
|
424
|
+
objecttype = singularize(objecttypes)
|
|
425
|
+
cleandata = {}
|
|
426
|
+
data.each_pair do |key, value|
|
|
427
|
+
if key !~ /\[.+\]/
|
|
428
|
+
cleandata["#{objecttype}[#{key}]"] = value
|
|
429
|
+
else
|
|
430
|
+
cleandata[key] = value
|
|
431
|
+
end
|
|
432
|
+
end
|
|
433
|
+
|
|
434
|
+
#puts cleandata.inspect if (@debug)
|
|
435
|
+
puts YAML.dump(cleandata) if (@debug)
|
|
436
|
+
|
|
437
|
+
successcount = 0
|
|
438
|
+
if results && !results.empty?
|
|
439
|
+
results.each_pair do |result_name, result|
|
|
440
|
+
if @delete
|
|
441
|
+
warn "Deleting objects via set_objects is deprecated, use delete_objects instead"
|
|
442
|
+
uri = URI::join(@server, "#{objecttypes}/#{result['id']}.xml")
|
|
443
|
+
req = Net::HTTP::Delete.new(uri.request_uri)
|
|
444
|
+
req.set_form_data(cleandata)
|
|
445
|
+
response = send_request(req, uri, login, password_callback)
|
|
446
|
+
while response.kind_of?(Net::HTTPMovedPermanently)
|
|
447
|
+
uri = URI.parse(response['Location'])
|
|
448
|
+
req = Net::HTTP::Delete.new(uri.request_uri)
|
|
449
|
+
response = send_request(req, uri, login, password_callback)
|
|
450
|
+
end
|
|
451
|
+
if response.kind_of?(Net::HTTPOK)
|
|
452
|
+
successcount += 1
|
|
453
|
+
else
|
|
454
|
+
puts "DELETE to #{uri} failed for #{result_name}:"
|
|
455
|
+
puts response.body
|
|
456
|
+
end
|
|
457
|
+
# PUT to update an existing object
|
|
458
|
+
elsif result['id']
|
|
459
|
+
uri = URI::join(@server, "#{objecttypes}/#{result['id']}.xml")
|
|
460
|
+
req = Net::HTTP::Put.new(uri.request_uri)
|
|
461
|
+
req.set_form_data(cleandata)
|
|
462
|
+
warn "PUT to URL: #{uri}" if (@debug)
|
|
463
|
+
if !@dryrun
|
|
464
|
+
response = send_request(req, uri, login, password_callback)
|
|
465
|
+
while response.kind_of?(Net::HTTPMovedPermanently)
|
|
466
|
+
uri = URI.parse(response['Location'])
|
|
467
|
+
req = Net::HTTP::Put.new(uri.request_uri)
|
|
468
|
+
req.set_form_data(cleandata)
|
|
469
|
+
response = send_request(req, uri, login, password_callback)
|
|
470
|
+
end
|
|
471
|
+
if response.kind_of?(Net::HTTPOK)
|
|
472
|
+
successcount += 1
|
|
473
|
+
else
|
|
474
|
+
puts "PUT to #{uri} failed for #{result_name}:"
|
|
475
|
+
puts response.body
|
|
476
|
+
end
|
|
477
|
+
end
|
|
478
|
+
else
|
|
479
|
+
warn "set_objects passed a bogus results hash, #{result_name} has no id field"
|
|
480
|
+
end
|
|
481
|
+
end
|
|
482
|
+
else
|
|
483
|
+
uri = URI::join(@server, "#{objecttypes}.xml")
|
|
484
|
+
req = Net::HTTP::Post.new(uri.request_uri)
|
|
485
|
+
req.set_form_data(cleandata)
|
|
486
|
+
warn "POST to URL: #{uri}" if (@debug)
|
|
487
|
+
if !@dryrun
|
|
488
|
+
response = send_request(req, uri, login, password_callback)
|
|
489
|
+
while response.kind_of?(Net::HTTPMovedPermanently)
|
|
490
|
+
uri = URI.parse(response['Location'])
|
|
491
|
+
req = Net::HTTP::Post.new(uri.request_uri)
|
|
492
|
+
req.set_form_data(cleandata)
|
|
493
|
+
response = send_request(req, uri, login, password_callback)
|
|
494
|
+
end
|
|
495
|
+
if response.kind_of?(Net::HTTPOK) || response.kind_of?(Net::HTTPCreated)
|
|
496
|
+
successcount += 1
|
|
497
|
+
else
|
|
498
|
+
puts "POST to #{uri} failed."
|
|
499
|
+
puts response.body
|
|
500
|
+
end
|
|
501
|
+
end
|
|
502
|
+
end
|
|
503
|
+
|
|
504
|
+
successcount
|
|
505
|
+
end
|
|
506
|
+
|
|
507
|
+
# The results argument should be a reference to a hash returned by a
|
|
508
|
+
# call to get_objects.
|
|
509
|
+
def delete_objects(objecttypes, results, login, password_callback=PasswordCallback)
|
|
510
|
+
successcount = 0
|
|
511
|
+
results.each_pair do |result_name, result|
|
|
512
|
+
if result['id']
|
|
513
|
+
uri = URI::join(@server, "#{objecttypes}/#{result['id']}.xml")
|
|
514
|
+
req = Net::HTTP::Delete.new(uri.request_uri)
|
|
515
|
+
response = send_request(req, uri, login, password_callback)
|
|
516
|
+
while response.kind_of?(Net::HTTPMovedPermanently)
|
|
517
|
+
uri = URI.parse(response['Location'])
|
|
518
|
+
req = Net::HTTP::Delete.new(uri.request_uri)
|
|
519
|
+
response = send_request(req, uri, login, password_callback)
|
|
520
|
+
end
|
|
521
|
+
if response.kind_of?(Net::HTTPOK)
|
|
522
|
+
successcount = 0
|
|
523
|
+
else
|
|
524
|
+
warn "Delete of #{result_name} (#{result['id']}) failed:\n" + response.body
|
|
525
|
+
end
|
|
526
|
+
else
|
|
527
|
+
warn "delete_objects passed a bogus results hash, #{result_name} has no id field"
|
|
528
|
+
end
|
|
529
|
+
end
|
|
530
|
+
successcount
|
|
531
|
+
end
|
|
532
|
+
|
|
533
|
+
def register
|
|
534
|
+
data = {}
|
|
535
|
+
|
|
536
|
+
# Tell facter to load everything, otherwise it tries to dynamically
|
|
537
|
+
# load the individual fact libraries using a very broken mechanism
|
|
538
|
+
Facter.loadfacts
|
|
539
|
+
|
|
540
|
+
#
|
|
541
|
+
# Gather software-related information
|
|
542
|
+
#
|
|
543
|
+
data['name'] = Facter['fqdn'].value
|
|
544
|
+
data['updated_at'] = Time.now.strftime("%Y-%m-%d %H:%M:%S")
|
|
545
|
+
if Facter['kernel'] && Facter['kernel'].value == 'Linux' &&
|
|
546
|
+
Facter['lsbdistdescription'] && Facter['lsbdistdescription'].value
|
|
547
|
+
# Strip release version and code name from lsbdistdescription
|
|
548
|
+
lsbdistdesc = Facter['lsbdistdescription'].value
|
|
549
|
+
lsbdistdesc.gsub!(/ release \S+/, '')
|
|
550
|
+
lsbdistdesc.gsub!(/ \([^)]\)/, '')
|
|
551
|
+
data['operating_system[variant]'] = lsbdistdesc
|
|
552
|
+
data['operating_system[version_number]'] = Facter['lsbdistrelease'].value
|
|
553
|
+
elsif Facter['kernel'] && Facter['kernel'].value == 'Darwin' &&
|
|
554
|
+
Facter['macosx_productname'] && Facter['macosx_productname'].value
|
|
555
|
+
data['operating_system[variant]'] = Facter['macosx_productname'].value
|
|
556
|
+
data['operating_system[version_number]'] = Facter['macosx_productversion'].value
|
|
557
|
+
else
|
|
558
|
+
data['operating_system[variant]'] = Facter['operatingsystem'].value
|
|
559
|
+
data['operating_system[version_number]'] = Facter['operatingsystemrelease'].value
|
|
560
|
+
end
|
|
561
|
+
if Facter['architecture'] && Facter['architecture'].value
|
|
562
|
+
data['operating_system[architecture]'] = Facter['architecture'].value
|
|
563
|
+
else
|
|
564
|
+
# Not sure if this is reasonable
|
|
565
|
+
data['operating_system[architecture]'] = Facter['hardwaremodel'].value
|
|
566
|
+
end
|
|
567
|
+
data['kernel_version'] = Facter['kernelrelease'].value
|
|
568
|
+
if Facter.value('memorysize')
|
|
569
|
+
data['os_memory'] = Facter.value('memorysize')
|
|
570
|
+
elsif Facter.value('sp_physical_memory') # Mac OS X
|
|
571
|
+
# More or less a safe bet that OS memory == physical memory on Mac OS X
|
|
572
|
+
data['os_memory'] = Facter.value('sp_physical_memory')
|
|
573
|
+
end
|
|
574
|
+
if Facter['swapsize']
|
|
575
|
+
data['swap'] = Facter['swapsize'].value
|
|
576
|
+
end
|
|
577
|
+
# Currently the processorcount fact doesn't even get defined on most platforms
|
|
578
|
+
if Facter['processorcount'] && Facter['processorcount'].value
|
|
579
|
+
# This is generally a virtual processor count (cores and HTs),
|
|
580
|
+
# not a physical CPU count
|
|
581
|
+
data['os_processor_count'] = Facter['processorcount'].value
|
|
582
|
+
elsif Facter['sp_number_processors'] && Facter['sp_number_processors'].value
|
|
583
|
+
data['os_processor_count'] = Facter['sp_number_processors'].value
|
|
584
|
+
end
|
|
585
|
+
data['timezone'] = Facter['timezone'].value if Facter['timezone']
|
|
586
|
+
|
|
587
|
+
# Need custom facts for these
|
|
588
|
+
#data['virtual_client_ids'] =
|
|
589
|
+
|
|
590
|
+
cpu_percent = getcpupercent
|
|
591
|
+
login_count = getlogincount
|
|
592
|
+
disk_usage = getdiskusage
|
|
593
|
+
# have to round it up because server code only takes integer
|
|
594
|
+
data['utilization_metric[percent_cpu][value]'] = cpu_percent.round if cpu_percent
|
|
595
|
+
data['utilization_metric[login_count][value]'] = login_count if login_count
|
|
596
|
+
data['used_space'] = disk_usage[:used_space] if disk_usage
|
|
597
|
+
data['avail_space'] = disk_usage[:avail_space] if disk_usage
|
|
598
|
+
getvolumes.each do |key, value|
|
|
599
|
+
data[key] = value
|
|
600
|
+
end
|
|
601
|
+
|
|
602
|
+
#
|
|
603
|
+
# Gather hardware-related information
|
|
604
|
+
#
|
|
605
|
+
hardware_profile = NVentory::Client::get_hardware_profile
|
|
606
|
+
data['hardware_profile[manufacturer]'] = hardware_profile[:manufacturer]
|
|
607
|
+
data['hardware_profile[model]'] = hardware_profile[:model]
|
|
608
|
+
if Facter['serialnumber'] && Facter['serialnumber'].value
|
|
609
|
+
data['serial_number'] = Facter['serialnumber'].value
|
|
610
|
+
elsif Facter['sp_serial_number'] && Facter['sp_serial_number'].value # Mac OS X
|
|
611
|
+
data['serial_number'] = Facter['sp_serial_number'].value
|
|
612
|
+
end
|
|
613
|
+
if Facter['processor0'] && Facter['processor0'].value
|
|
614
|
+
# FIXME: Parsing this string is less than ideal, as these things
|
|
615
|
+
# are reported as seperate fields by dmidecode, but facter isn't
|
|
616
|
+
# reading that data.
|
|
617
|
+
# Example: Intel(R) Core(TM)2 Duo CPU T7300 @ 2.00GHz
|
|
618
|
+
# Example: Intel(R) Pentium(R) 4 CPU 3.60GHz
|
|
619
|
+
processor = Facter['processor0'].value
|
|
620
|
+
if processor =~ /(\S+)\s(.+)/
|
|
621
|
+
manufacturer = $1
|
|
622
|
+
model = $2
|
|
623
|
+
speed = nil
|
|
624
|
+
if model =~ /(.+\S)\s+\@\s+([\d\.]+.Hz)/
|
|
625
|
+
model = $1
|
|
626
|
+
speed = $2
|
|
627
|
+
elsif model =~ /(.+\S)\s+([\d\.]+.Hz)/
|
|
628
|
+
model = $1
|
|
629
|
+
speed = $2
|
|
630
|
+
end
|
|
631
|
+
data['processor_manufacturer'] = manufacturer.gsub(/\(R\)/, '')
|
|
632
|
+
data['processor_model'] = model
|
|
633
|
+
data['processor_speed'] = speed
|
|
634
|
+
end
|
|
635
|
+
elsif Facter['sp_cpu_type'] && Facter['sp_cpu_type'].value
|
|
636
|
+
# FIXME: Assuming the manufacturer is the first word is
|
|
637
|
+
# less than ideal
|
|
638
|
+
cpu_type = Facter['sp_cpu_type'].value
|
|
639
|
+
if cpu_type =~ /(\S+)\s(.+)/
|
|
640
|
+
data['processor_manufacturer'] = $1
|
|
641
|
+
data['processor_model'] = $2
|
|
642
|
+
end
|
|
643
|
+
data['processor_speed'] = Facter['sp_current_processor_speed'].value
|
|
644
|
+
# It's not clear if system_profiler is reporting the number
|
|
645
|
+
# of physical CPUs or the number seen by the OS. I'm not
|
|
646
|
+
# sure if there are situations in Mac OS where those two can
|
|
647
|
+
# get out of sync. As such this is identical to what is reported
|
|
648
|
+
# for the os_processor_count above.
|
|
649
|
+
data['processor_count'] = Facter['sp_number_processors'].value
|
|
650
|
+
end
|
|
651
|
+
|
|
652
|
+
if Facter['physicalprocessorcount']
|
|
653
|
+
data['processor_count'] = Facter['physicalprocessorcount'].value
|
|
654
|
+
else
|
|
655
|
+
# need to get from dmidecode
|
|
656
|
+
end
|
|
657
|
+
|
|
658
|
+
data['processor_core_count'] = get_cpu_core_count
|
|
659
|
+
#data['processor_socket_count'] =
|
|
660
|
+
#data['power_supply_count'] =
|
|
661
|
+
#data['physical_memory_sizes'] =
|
|
662
|
+
|
|
663
|
+
physical_memory = get_physical_memory
|
|
664
|
+
data['physical_memory'] = Facter::Memory.scale_number(physical_memory, "MB") if physical_memory
|
|
665
|
+
|
|
666
|
+
nics = []
|
|
667
|
+
if Facter['interfaces'] && Facter['interfaces'].value
|
|
668
|
+
nics = Facter['interfaces'].value.split(',')
|
|
669
|
+
nics.each do |nic|
|
|
670
|
+
data["network_interfaces[#{nic}][name]"] = nic
|
|
671
|
+
data["network_interfaces[#{nic}][hardware_address]"] = Facter["macaddress_#{nic}"].value
|
|
672
|
+
#data["network_interfaces[#{nic}][interface_type]"]
|
|
673
|
+
#data["network_interfaces[#{nic}][physical]"] =
|
|
674
|
+
#data["network_interfaces[#{nic}][up]"] =
|
|
675
|
+
#data["network_interfaces[#{nic}][link]"] =
|
|
676
|
+
#data["network_interfaces[#{nic}][autonegotiate]"] =
|
|
677
|
+
#data["network_interfaces[#{nic}][speed]"] =
|
|
678
|
+
#data["network_interfaces[#{nic}][full_duplex]"] =
|
|
679
|
+
# Facter only captures one address per interface
|
|
680
|
+
data["network_interfaces[#{nic}][ip_addresses][0][address]"] = Facter["ipaddress_#{nic}"].value
|
|
681
|
+
data["network_interfaces[#{nic}][ip_addresses][0][address_type]"] = 'ipv4'
|
|
682
|
+
data["network_interfaces[#{nic}][ip_addresses][0][netmask]"] = Facter["netmask_#{nic}"].value
|
|
683
|
+
#data["network_interfaces[#{nic}][ip_addresses][0][broadcast]"] =
|
|
684
|
+
end
|
|
685
|
+
end
|
|
686
|
+
# get additional nic info that facter doesn't know about
|
|
687
|
+
nic_info = get_nic_info
|
|
688
|
+
nic_info.each do |nic, info|
|
|
689
|
+
next if !nics.include?(nic)
|
|
690
|
+
info.each do |key, value|
|
|
691
|
+
data["network_interfaces[#{nic}][#{key}]"] = value
|
|
692
|
+
end
|
|
693
|
+
end
|
|
694
|
+
|
|
695
|
+
# Mark our NIC data as authoritative so that the server properly
|
|
696
|
+
# updates its records (removing any NICs and IPs we don't specify)
|
|
697
|
+
data['network_interfaces[authoritative]'] = true
|
|
698
|
+
|
|
699
|
+
data['uniqueid'] = NVentory::Client::get_uniqueid
|
|
700
|
+
|
|
701
|
+
# TODO: figure out list of guests if it's a host
|
|
702
|
+
vmstatus = getvmstatus
|
|
703
|
+
if vmstatus == 'xenu'
|
|
704
|
+
data['virtualmode'] = 'guest'
|
|
705
|
+
data['virtualarch'] = 'xen'
|
|
706
|
+
elsif vmstatus == 'xen0'
|
|
707
|
+
data['virtualmode'] = 'host'
|
|
708
|
+
data['virtualarch'] = 'xen'
|
|
709
|
+
elsif vmstatus == 'vmware_server'
|
|
710
|
+
data['virtualmode'] = 'host'
|
|
711
|
+
data['virtualarch'] = 'vmware'
|
|
712
|
+
elsif vmstatus == 'vmware'
|
|
713
|
+
data['virtualmode'] = 'guest'
|
|
714
|
+
data['virtualarch'] = 'vmware'
|
|
715
|
+
elsif vmstatus == 'kvm_host'
|
|
716
|
+
data['virtualmode'] = 'host'
|
|
717
|
+
data['virtualarch'] = 'kvm'
|
|
718
|
+
end
|
|
719
|
+
|
|
720
|
+
if vmstatus == 'kvm_host'
|
|
721
|
+
guests = get_kvm_hostinfo
|
|
722
|
+
guests.each do |vm, vminfo|
|
|
723
|
+
data["vmguest[#{vm}][vmimg_size]"] = vminfo['vmimg_size']
|
|
724
|
+
data["vmguest[#{vm}][vmspace_used]"] = vminfo['vmspace_used']
|
|
725
|
+
end
|
|
726
|
+
end
|
|
727
|
+
|
|
728
|
+
# Looks like this no longer works. virtual_client_ids is not valid
|
|
729
|
+
# field and causes ALL nodes to return....
|
|
730
|
+
# if data['hardware_profile[model]'] == 'VMware Virtual Platform'
|
|
731
|
+
# getdata = {}
|
|
732
|
+
# getdata[:objecttype] = 'nodes'
|
|
733
|
+
# getdata[:exactget] = {'virtual_client_ids' => [data['uniqueid']]}
|
|
734
|
+
# getdata[:login] = 'autoreg'
|
|
735
|
+
# results = get_objects(getdata)
|
|
736
|
+
# if results.length == 1
|
|
737
|
+
# data['virtual_parent_node_id'] = results.values.first['id']
|
|
738
|
+
# elsif results.length > 1
|
|
739
|
+
# warn "Multiple hosts claim this virtual client: #{results.keys.sort.join(',')}"
|
|
740
|
+
# end
|
|
741
|
+
# end
|
|
742
|
+
|
|
743
|
+
# Get console info
|
|
744
|
+
console_type = get_console_type
|
|
745
|
+
if console_type == "Dell DRAC"
|
|
746
|
+
data['console_type'] = "Dell DRAC"
|
|
747
|
+
|
|
748
|
+
drac_info = get_drac_info
|
|
749
|
+
|
|
750
|
+
# Create a NIC for the DRAC and associate it this node
|
|
751
|
+
unless drac_info.empty?
|
|
752
|
+
drac_name = (drac_info[:name] && !drac_info[:name].empty?)? drac_info[:name] : "DRAC"
|
|
753
|
+
data["network_interfaces[#{drac_name}][name]"] = drac_name
|
|
754
|
+
data["network_interfaces[#{drac_name}][hardware_address]"] = drac_info[:mac_address]
|
|
755
|
+
data["network_interfaces[#{drac_name}][ip_addresses][0][address]"] = drac_info[:ip_address]
|
|
756
|
+
data["network_interfaces[#{drac_name}][ip_addresses][0][address_type]"] = "ipv4"
|
|
757
|
+
end
|
|
758
|
+
end
|
|
759
|
+
|
|
760
|
+
# See what chassis/blade enclosure the node is in
|
|
761
|
+
chassis = get_chassis_info
|
|
762
|
+
data["chassis[service_tag]"] = chassis[:service_tag] if !chassis.empty?
|
|
763
|
+
data["chassis[slot_num]"] = chassis[:slot_num] if !chassis.empty?
|
|
764
|
+
|
|
765
|
+
#
|
|
766
|
+
# Report data to server
|
|
767
|
+
#
|
|
768
|
+
|
|
769
|
+
# Check to see if there's an existing entry for this host that matches
|
|
770
|
+
# our unique id. If so we want to update it, even if the hostname
|
|
771
|
+
# doesn't match our current hostname (as it probably indicates this
|
|
772
|
+
# host was renamed).
|
|
773
|
+
results = nil
|
|
774
|
+
if data['uniqueid']
|
|
775
|
+
getdata = {}
|
|
776
|
+
getdata[:objecttype] = 'nodes'
|
|
777
|
+
getdata[:exactget] = {'uniqueid' => [data['uniqueid']]}
|
|
778
|
+
getdata[:login] = 'autoreg'
|
|
779
|
+
results = get_objects(getdata)
|
|
780
|
+
#
|
|
781
|
+
# Check for a match of the reverse uniqueid.
|
|
782
|
+
# Background:
|
|
783
|
+
# Dmidecode versions earlier than 2.10 display
|
|
784
|
+
# the first three fields of the UUID in reverse order
|
|
785
|
+
# due to the use of Big-endian rather than Little-endian
|
|
786
|
+
# byte encoding.
|
|
787
|
+
# Starting with version 2.10, dmidecode uses Little-endian
|
|
788
|
+
# when it finds an SMBIOS >= 2.6. UUID's reported from SMBIOS'
|
|
789
|
+
# earlier than 2.6 are considered "incorrect".
|
|
790
|
+
#
|
|
791
|
+
# After a rebuild/upgrade, rather than creating a new node
|
|
792
|
+
# entry for an existing asset, we'll check for the flipped
|
|
793
|
+
# version of the uniqueid.
|
|
794
|
+
#
|
|
795
|
+
if results.empty? && data['uniqueid'].include?('-')
|
|
796
|
+
reverse_uniqueid = [data['uniqueid'].split('-')[0..2].map { |n| n.split(/(\w\w)/).reverse.join }.join('-'), data['uniqueid'].split('-',4)[3]].join('-')
|
|
797
|
+
getdata[:exactget] = {'uniqueid' => [reverse_uniqueid]}
|
|
798
|
+
results = get_objects(getdata)
|
|
799
|
+
end
|
|
800
|
+
end
|
|
801
|
+
|
|
802
|
+
# If we failed to find an existing entry based on the unique id,
|
|
803
|
+
# fall back to the hostname.
|
|
804
|
+
if results.empty? && data['name']
|
|
805
|
+
getdata = {}
|
|
806
|
+
getdata[:objecttype] = 'nodes'
|
|
807
|
+
getdata[:exactget] = {'name' => [data['name']]}
|
|
808
|
+
getdata[:login] = 'autoreg'
|
|
809
|
+
results = get_objects(getdata)
|
|
810
|
+
end
|
|
811
|
+
|
|
812
|
+
# If we failed to find an existing entry based on the uniqueid and hostname,
|
|
813
|
+
# fall back to using serial number. This may still fail to find an entry,
|
|
814
|
+
# if this is a new host, but that's OK as it will leave %results
|
|
815
|
+
# as undef, which triggers set_nodes to create a new entry on the
|
|
816
|
+
# server.
|
|
817
|
+
if results.empty? && data['serial_number'] &&
|
|
818
|
+
!data['serial_number'].empty? &&
|
|
819
|
+
data['serial_number'] != "Not Specified"
|
|
820
|
+
getdata = {}
|
|
821
|
+
getdata[:objecttype] = 'nodes'
|
|
822
|
+
getdata[:exactget] = {'serial_number' => [data['serial_number']]}
|
|
823
|
+
getdata[:login] = 'autoreg'
|
|
824
|
+
results = get_objects(getdata)
|
|
825
|
+
end
|
|
826
|
+
|
|
827
|
+
setresults = set_objects('nodes', results, data, 'autoreg')
|
|
828
|
+
puts "Command successful" if setresults == 1
|
|
829
|
+
end
|
|
830
|
+
|
|
831
|
+
# Add the given node into the given nodegroup by directly
|
|
832
|
+
# creating the node_group_node_assignment
|
|
833
|
+
# First argument is the id of the node
|
|
834
|
+
# Second argument is the id of the nodegroup
|
|
835
|
+
def add_node_group_node_assignment(node_id, node_group_id, login, password_callback=PasswordCallback)
|
|
836
|
+
setdata = {:node_id => node_id, :node_group_id => node_group_id}
|
|
837
|
+
puts "Adding using the following setdata #{setdata.inspect}"
|
|
838
|
+
set_objects('node_group_node_assignments', nil, setdata, login, password_callback)
|
|
839
|
+
end
|
|
840
|
+
|
|
841
|
+
# The first argument is a hash returned by a 'nodes' call to get_objects
|
|
842
|
+
# The second argument is a hash returned by a 'node_groups'
|
|
843
|
+
# call to get_objects
|
|
844
|
+
# This method does the same thing as the add_nodes_to_nodegroups method. However, it
|
|
845
|
+
# will not be susceptible to the race condition mentioned in add_nodes_to_nodegroups method
|
|
846
|
+
# This is because it directly talks to the node_group_node_assignments controller
|
|
847
|
+
def add_node_group_node_assignments(nodes, nodegroups, login, password_callback=PasswordCallback)
|
|
848
|
+
nodegroups.each do |nodegroup_name, nodegroup|
|
|
849
|
+
nodes.each do |nodename, node|
|
|
850
|
+
add_node_group_node_assignment(node['id'], nodegroup['id'], login, password_callback)
|
|
851
|
+
end
|
|
852
|
+
end
|
|
853
|
+
end
|
|
854
|
+
|
|
855
|
+
# The first argument is a hash returned by a 'nodes' call to get_objects
|
|
856
|
+
# The second argument is a hash returned by a 'node_groups'
|
|
857
|
+
# call to get_objects
|
|
858
|
+
# NOTE: For the node groups you must have requested that the server include 'nodes' in the result
|
|
859
|
+
def add_nodes_to_nodegroups(nodes, nodegroups, login, password_callback=PasswordCallback)
|
|
860
|
+
# The server only supports setting a complete list of members of
|
|
861
|
+
# a node group. So we need to retreive the current list of members
|
|
862
|
+
# for each group, merge in the additional nodes that the user wants
|
|
863
|
+
# added, and pass that off to set_nodegroup_assignments to perform
|
|
864
|
+
# the update.
|
|
865
|
+
# FIXME: This should talk directly to the node_group_node_assignments
|
|
866
|
+
# controller, so that we aren't exposed to the race conditions this
|
|
867
|
+
# method currently suffers from.
|
|
868
|
+
nodegroups.each_pair do |nodegroup_name, nodegroup|
|
|
869
|
+
# Use a hash to merge the current and new members and
|
|
870
|
+
# eliminate duplicates
|
|
871
|
+
merged_nodes = nodes.clone
|
|
872
|
+
nodegroup["nodes"].each do |node|
|
|
873
|
+
name = node['name']
|
|
874
|
+
merged_nodes[name] = node
|
|
875
|
+
end
|
|
876
|
+
set_nodegroup_node_assignments(merged_nodes, {nodegroup_name => nodegroup}, login, password_callback)
|
|
877
|
+
end
|
|
878
|
+
end
|
|
879
|
+
# The first argument is a hash returned by a 'nodes' call to get_objects
|
|
880
|
+
# The second argument is a hash returned by a 'node_groups'
|
|
881
|
+
# call to get_objects
|
|
882
|
+
# NOTE: For the node groups you must have requested that the server include 'nodes' in the result
|
|
883
|
+
def remove_nodes_from_nodegroups(nodes, nodegroups, login, password_callback=PasswordCallback)
|
|
884
|
+
# The server only supports setting a complete list of members of
|
|
885
|
+
# a node group. So we need to retreive the current list of members
|
|
886
|
+
# for each group, remove the nodes that the user wants
|
|
887
|
+
# removed, and pass that off to set_nodegroup_assignments to perform
|
|
888
|
+
# the update.
|
|
889
|
+
# FIXME: This should talk directly to the node_group_node_assignments
|
|
890
|
+
# controller, so that we aren't exposed to the race conditions this
|
|
891
|
+
# method currently suffers from.
|
|
892
|
+
nodegroups.each_pair do |nodegroup_name, nodegroup|
|
|
893
|
+
desired_nodes = {}
|
|
894
|
+
|
|
895
|
+
nodegroup['nodes'].each do |node|
|
|
896
|
+
name = node['name']
|
|
897
|
+
if !nodes.has_key?(name)
|
|
898
|
+
desired_nodes[name] = node
|
|
899
|
+
end
|
|
900
|
+
end
|
|
901
|
+
|
|
902
|
+
set_nodegroup_node_assignments(desired_nodes, {nodegroup_name => nodegroup}, login, password_callback)
|
|
903
|
+
end
|
|
904
|
+
end
|
|
905
|
+
# The first argument is a hash returned by a 'nodes' call to get_objects
|
|
906
|
+
# The second argument is a hash returned by a 'node_groups'
|
|
907
|
+
# call to get_objects
|
|
908
|
+
def set_nodegroup_node_assignments(nodes, nodegroups, login, password_callback=PasswordCallback)
|
|
909
|
+
node_ids = []
|
|
910
|
+
nodes.each_pair do |node_name, node|
|
|
911
|
+
if node['id']
|
|
912
|
+
node_ids << node['id']
|
|
913
|
+
else
|
|
914
|
+
warn "set_nodegroup_node_assignments passed a bogus nodes hash, #{node_name} has no id field"
|
|
915
|
+
end
|
|
916
|
+
end
|
|
917
|
+
|
|
918
|
+
nodegroupdata = {}
|
|
919
|
+
node_ids = 'nil' if node_ids.empty?
|
|
920
|
+
nodegroupdata['node_group_node_assignments[nodes][]'] = node_ids
|
|
921
|
+
|
|
922
|
+
set_objects('node_groups', nodegroups, nodegroupdata, login, password_callback)
|
|
923
|
+
end
|
|
924
|
+
|
|
925
|
+
# Both arguments are hashes returned by a 'node_groups' call to get_objects
|
|
926
|
+
# NOTE: For the parent groups you must have requested that the server include 'child_groups' in the result
|
|
927
|
+
def add_nodegroups_to_nodegroups(child_groups, parent_groups, login, password_callback=PasswordCallback)
|
|
928
|
+
# The server only supports setting a complete list of assignments for
|
|
929
|
+
# a node group. So we need to retreive the current list of assignments
|
|
930
|
+
# for each group, merge in the additional node groups that the user wants
|
|
931
|
+
# added, and pass that off to set_nodegroup_nodegroup_assignments to perform
|
|
932
|
+
# the update.
|
|
933
|
+
# FIXME: This should talk directly to the node_group_node_groups_assignments
|
|
934
|
+
# controller, so that we aren't exposed to the race conditions this
|
|
935
|
+
# method currently suffers from.
|
|
936
|
+
parent_groups.each_pair do |parent_group_name, parent_group|
|
|
937
|
+
# Use a hash to merge the current and new members and
|
|
938
|
+
# eliminate duplicates
|
|
939
|
+
merged_nodegroups = child_groups
|
|
940
|
+
|
|
941
|
+
if parent_group['child_groups']
|
|
942
|
+
parent_group['child_groups'].each do |child_group|
|
|
943
|
+
name = child_group['name']
|
|
944
|
+
merged_nodegroups[name] = child_group
|
|
945
|
+
end
|
|
946
|
+
end
|
|
947
|
+
|
|
948
|
+
set_nodegroup_nodegroup_assignments(merged_nodegroups, {parent_group_name => parent_group}, login, password_callback)
|
|
949
|
+
end
|
|
950
|
+
end
|
|
951
|
+
# Both arguments are hashes returned by a 'node_groups' call to get_objects
|
|
952
|
+
# NOTE: For the parent groups you must have requested that the server include 'child_groups' in the result
|
|
953
|
+
def remove_nodegroups_from_nodegroups(child_groups, parent_groups, login, password_callback=PasswordCallback)
|
|
954
|
+
# The server only supports setting a complete list of assignments for
|
|
955
|
+
# a node group. So we need to retrieve the current list of assignments
|
|
956
|
+
# for each group, remove the node groups that the user wants
|
|
957
|
+
# removed, and pass that off to set_nodegroup_nodegroup_assignments to perform
|
|
958
|
+
# the update.
|
|
959
|
+
# FIXME: This should talk directly to the node_group_node_groups_assignments
|
|
960
|
+
# controller, so that we aren't exposed to the race conditions this
|
|
961
|
+
# method currently suffers from.
|
|
962
|
+
parent_groups.each_pair do |parent_group_name, parent_group|
|
|
963
|
+
desired_child_groups = {}
|
|
964
|
+
if parent_group['child_groups']
|
|
965
|
+
parent_group['child_groups'].each do |child_group|
|
|
966
|
+
name = child_group['name']
|
|
967
|
+
if !child_groups.has_key?(name)
|
|
968
|
+
desired_child_groups[name] = child_group
|
|
969
|
+
end
|
|
970
|
+
end
|
|
971
|
+
end
|
|
972
|
+
|
|
973
|
+
set_nodegroup_nodegroup_assignments(desired_child_groups, {parent_group_name => parent_group}, login, password_callback)
|
|
974
|
+
end
|
|
975
|
+
end
|
|
976
|
+
|
|
977
|
+
# Both arguments are hashes returned by a 'node_groups' call to get_objects
|
|
978
|
+
def set_nodegroup_nodegroup_assignments(child_groups, parent_groups, login, password_callback=PasswordCallback)
|
|
979
|
+
child_ids = []
|
|
980
|
+
child_groups.each_pair do |child_group_name, child_group|
|
|
981
|
+
if child_group['id']
|
|
982
|
+
child_ids << child_group['id']
|
|
983
|
+
else
|
|
984
|
+
warn "set_nodegroup_nodegroup_assignments passed a bogus child groups hash, #{child_group_name} has no id field"
|
|
985
|
+
end
|
|
986
|
+
end
|
|
987
|
+
# cannot pass empty hash therefore, add a 'nil' string. nasty hack and accomodated on the server side code
|
|
988
|
+
child_ids << 'nil' if child_ids.empty?
|
|
989
|
+
nodegroupdata = {}
|
|
990
|
+
nodegroupdata['node_group_node_group_assignments[child_groups][]'] = child_ids
|
|
991
|
+
set_objects('node_groups', parent_groups, nodegroupdata, login, password_callback)
|
|
992
|
+
end
|
|
993
|
+
|
|
994
|
+
# Add a new or pre-existing tag (by name string) to a node_group (by hash returned from get_objects)
|
|
995
|
+
def add_tag_to_node_group(ng_hash, tag_name, login, password_callback=PasswordCallback)
|
|
996
|
+
tag_found = get_objects({:objecttype => 'tags', :exactget => {:name => tag_name}})
|
|
997
|
+
if tag_found.empty?
|
|
998
|
+
tagset_data = { :name => tag_name }
|
|
999
|
+
set_objects('tags',{},tagset_data,login, password_callback)
|
|
1000
|
+
tag_found = get_objects({:objecttype => 'tags', :exactget => {:name => tag_name}})
|
|
1001
|
+
end
|
|
1002
|
+
# tag_found is hash, even tho only one result
|
|
1003
|
+
(tag_data = tag_found[tag_found.keys.first]) && (tag_id = tag_data['id'])
|
|
1004
|
+
ng_hash.each_pair do |ng_name,ng_data|
|
|
1005
|
+
setdata = { :taggable_type => 'NodeGroup', :taggable_id => ng_data['id'], :tag_id => tag_id }
|
|
1006
|
+
set_objects('taggings',{},setdata,login,password_callback)
|
|
1007
|
+
end
|
|
1008
|
+
end
|
|
1009
|
+
|
|
1010
|
+
# Add a new or pre-existing tag (by name string) to a node_group (by hash returned from get_objects)
|
|
1011
|
+
def remove_tag_from_node_group(ng_hash, tag_name, login, password_callback=PasswordCallback)
|
|
1012
|
+
tag_found = get_objects({:objecttype => 'tags', :exactget => {:name => tag_name}})
|
|
1013
|
+
if tag_found.empty?
|
|
1014
|
+
puts "ERROR: Could not find any tags with the name #{tag_name}"
|
|
1015
|
+
exit
|
|
1016
|
+
end
|
|
1017
|
+
# tag_found is hash, even tho only one result
|
|
1018
|
+
(tag_data = tag_found[tag_found.keys.first]) && (tag_id = tag_data['id'])
|
|
1019
|
+
taggings_to_del = {}
|
|
1020
|
+
ng_hash.each_pair do |ng_name,ng_data|
|
|
1021
|
+
get_data = {:objecttype => 'taggings',
|
|
1022
|
+
:exactget => { :taggable_type => 'NodeGroup', :taggable_id => ng_data['id'], :tag_id => tag_id } }
|
|
1023
|
+
tagging_found = get_objects(get_data)
|
|
1024
|
+
unless tagging_found.empty?
|
|
1025
|
+
taggings_to_del.merge!(tagging_found)
|
|
1026
|
+
end
|
|
1027
|
+
end
|
|
1028
|
+
if taggings_to_del.empty?
|
|
1029
|
+
puts "ERROR: Could not find any tags \"#{tag_name}\" assigned to those node_groups"
|
|
1030
|
+
else
|
|
1031
|
+
delete_objects('taggings', taggings_to_del, login, password_callback=PasswordCallback)
|
|
1032
|
+
end
|
|
1033
|
+
end
|
|
1034
|
+
|
|
1035
|
+
# Add a new graffiti to given objects. We're assuming that graffiti is a string
|
|
1036
|
+
# of "key:value" format
|
|
1037
|
+
# obj_type is a string that describe the type of the obj (e.g NodeGroup)
|
|
1038
|
+
# obj_hash is the hash returned from calling get_objects
|
|
1039
|
+
def add_graffiti(obj_type, obj_hash, graffiti, login, password_callback=PasswordCallback)
|
|
1040
|
+
name,value = graffiti.split(':')
|
|
1041
|
+
obj_hash.each_value do |obj|
|
|
1042
|
+
set_objects('graffitis', nil,
|
|
1043
|
+
{ :name => name,
|
|
1044
|
+
:value => value,
|
|
1045
|
+
:graffitiable_id => obj['id'],
|
|
1046
|
+
:graffitiable_type => obj_type,
|
|
1047
|
+
},
|
|
1048
|
+
login, password_callback);
|
|
1049
|
+
end
|
|
1050
|
+
end
|
|
1051
|
+
|
|
1052
|
+
# Delete the graffiti (based on the name) from the given objects
|
|
1053
|
+
# obj_type is a string that describe the type of the obj (e.g NodeGroup)
|
|
1054
|
+
# obj_hash is the hash returned from calling get_objects
|
|
1055
|
+
def delete_graffiti(obj_type, obj_hash, graffiti_name, login, password_callback=PasswordCallback)
|
|
1056
|
+
obj_hash.each_value do |obj|
|
|
1057
|
+
getdata = {:objecttype => 'graffitis',
|
|
1058
|
+
:exactget => {:name => graffiti_name,
|
|
1059
|
+
:graffitiable_id => obj['id'],
|
|
1060
|
+
:graffitiable_type => obj_type}
|
|
1061
|
+
}
|
|
1062
|
+
graffitis_to_delete = get_objects(getdata)
|
|
1063
|
+
delete_objects('graffitis', graffitis_to_delete, login, password_callback)
|
|
1064
|
+
end
|
|
1065
|
+
end
|
|
1066
|
+
|
|
1067
|
+
def get_service_tree(service_name)
|
|
1068
|
+
getdata = {}
|
|
1069
|
+
getdata[:objecttype] = 'services'
|
|
1070
|
+
getdata[:exactget] = {'name' => [service_name]}
|
|
1071
|
+
getdata[:includes] = ['nodes', 'parent_services']
|
|
1072
|
+
services = {service_name => []}
|
|
1073
|
+
results = get_objects(getdata)
|
|
1074
|
+
|
|
1075
|
+
if results.has_key?(service_name)
|
|
1076
|
+
if results[service_name].has_key?('parent_services') && !results[service_name]['parent_services'].empty?
|
|
1077
|
+
results[service_name]['parent_services'].each do |service|
|
|
1078
|
+
services[service_name] << get_service_tree(service['name'])
|
|
1079
|
+
end
|
|
1080
|
+
else
|
|
1081
|
+
return service_name
|
|
1082
|
+
end
|
|
1083
|
+
else # no such service
|
|
1084
|
+
return {}
|
|
1085
|
+
end
|
|
1086
|
+
return services
|
|
1087
|
+
end
|
|
1088
|
+
|
|
1089
|
+
#
|
|
1090
|
+
# Helper methods
|
|
1091
|
+
#
|
|
1092
|
+
def self.get_uniqueid
|
|
1093
|
+
os = Facter['kernel'].value
|
|
1094
|
+
hardware_profile = NVentory::Client::get_hardware_profile
|
|
1095
|
+
if os == 'Linux' or os == 'FreeBSD'
|
|
1096
|
+
#
|
|
1097
|
+
if File.exist?('/proc/modules') && `grep -q ^xen /proc/modules` && $? == 0
|
|
1098
|
+
uuid = Facter['macaddress'].value
|
|
1099
|
+
# Dell C6100 don't have unique uuid
|
|
1100
|
+
elsif hardware_profile[:manufacturer] =~ /Dell/ && hardware_profile[:model] == 'C6100'
|
|
1101
|
+
uuid = Facter['macaddress'].value
|
|
1102
|
+
else
|
|
1103
|
+
# best to use UUID from dmidecode
|
|
1104
|
+
uuid = getuuid
|
|
1105
|
+
end
|
|
1106
|
+
# Stupid SeaMicro boxes all have the same UUID below. So we won't
|
|
1107
|
+
# want to use it, use mac address instead
|
|
1108
|
+
if uuid && uuid != "78563412-3412-7856-90AB-CDDEEFAABBCC"
|
|
1109
|
+
uniqueid = uuid
|
|
1110
|
+
# next best thing to use is macaddress
|
|
1111
|
+
else
|
|
1112
|
+
uniqueid = Facter['macaddress'].value
|
|
1113
|
+
end
|
|
1114
|
+
elsif Facter['uniqueid'] && Facter['uniqueid'].value
|
|
1115
|
+
# This sucks, it's just using hostid, which is generally tied to an
|
|
1116
|
+
# IP address, not the physical hardware
|
|
1117
|
+
uniqueid = Facter['uniqueid'].value
|
|
1118
|
+
elsif Facter['sp_serial_number'] && Facter['sp_serial_number'].value
|
|
1119
|
+
# I imagine Mac serial numbers are unique
|
|
1120
|
+
uniqueid = Facter['sp_serial_number'].value
|
|
1121
|
+
end
|
|
1122
|
+
return uniqueid
|
|
1123
|
+
end
|
|
1124
|
+
|
|
1125
|
+
def self.getuuid
|
|
1126
|
+
uuid = nil
|
|
1127
|
+
# dmidecode will fail if not run as root
|
|
1128
|
+
if Process.euid != 0
|
|
1129
|
+
raise "This must be run as root"
|
|
1130
|
+
end
|
|
1131
|
+
uuid_entry = `/usr/sbin/dmidecode | grep UUID`
|
|
1132
|
+
if uuid_entry
|
|
1133
|
+
uuid = uuid_entry.split(":")[1]
|
|
1134
|
+
end
|
|
1135
|
+
if uuid
|
|
1136
|
+
return uuid.strip
|
|
1137
|
+
else
|
|
1138
|
+
return nil
|
|
1139
|
+
end
|
|
1140
|
+
end
|
|
1141
|
+
|
|
1142
|
+
def self.get_hardware_profile
|
|
1143
|
+
result = {:manufacturer => 'Unknown', :model => 'Unknown'}
|
|
1144
|
+
if Facter['manufacturer'] && Facter['manufacturer'].value # dmidecode
|
|
1145
|
+
result[:manufacturer] = Facter['manufacturer'].value.strip
|
|
1146
|
+
result[:model] = Facter['productname'].value.strip
|
|
1147
|
+
elsif Facter['sp_machine_name'] && Facter['sp_machine_name'].value # Mac OS X
|
|
1148
|
+
# There's a small chance of this not being true...
|
|
1149
|
+
result[:manufacturer] = 'Apple'
|
|
1150
|
+
result[:model] = Facter['sp_machine_name'].value.strip
|
|
1151
|
+
end
|
|
1152
|
+
return result
|
|
1153
|
+
end
|
|
1154
|
+
|
|
1155
|
+
#
|
|
1156
|
+
# Private methods
|
|
1157
|
+
#
|
|
1158
|
+
private
|
|
1159
|
+
|
|
1160
|
+
def make_http(uri)
|
|
1161
|
+
http = nil
|
|
1162
|
+
if @proxy_server
|
|
1163
|
+
proxyuri = URI.parse(@proxy_server)
|
|
1164
|
+
proxy = Net::HTTP::Proxy(proxyuri.host, proxyuri.port)
|
|
1165
|
+
http = proxy.new(uri.host, uri.port)
|
|
1166
|
+
else
|
|
1167
|
+
http = Net::HTTP.new(uri.host, uri.port)
|
|
1168
|
+
end
|
|
1169
|
+
if uri.scheme == "https"
|
|
1170
|
+
# Eliminate the OpenSSL "using default DH parameters" warning
|
|
1171
|
+
if File.exist?(@dhparams)
|
|
1172
|
+
dh = OpenSSL::PKey::DH.new(IO.read(@dhparams))
|
|
1173
|
+
Net::HTTP.ssl_context_accessor(:tmp_dh_callback)
|
|
1174
|
+
http.tmp_dh_callback = proc { dh }
|
|
1175
|
+
end
|
|
1176
|
+
http.use_ssl = true
|
|
1177
|
+
if @ca_file && File.exist?(@ca_file)
|
|
1178
|
+
http.ca_file = @ca_file
|
|
1179
|
+
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
|
|
1180
|
+
end
|
|
1181
|
+
if @ca_path && File.directory?(@ca_path)
|
|
1182
|
+
http.ca_path = @ca_path
|
|
1183
|
+
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
|
|
1184
|
+
end
|
|
1185
|
+
end
|
|
1186
|
+
http
|
|
1187
|
+
end
|
|
1188
|
+
|
|
1189
|
+
# Returns the path to the cookiefile to be used.
|
|
1190
|
+
# Create the file and correct permissions on
|
|
1191
|
+
# the cookiefile if needed
|
|
1192
|
+
def get_cookiefile(login=nil)
|
|
1193
|
+
# autoreg has a special file
|
|
1194
|
+
if login == 'autoreg'
|
|
1195
|
+
@cookiefile = '/root/.nventory_cookie_autoreg'
|
|
1196
|
+
if ! File.directory?('/root')
|
|
1197
|
+
Dir.mkdir('/root')
|
|
1198
|
+
end
|
|
1199
|
+
end
|
|
1200
|
+
# Create the cookie file if it doesn't already exist
|
|
1201
|
+
if !File.exist?(@cookiefile)
|
|
1202
|
+
warn "Creating #{@cookiefile}"
|
|
1203
|
+
File.open(@cookiefile, 'w') { |file| }
|
|
1204
|
+
end
|
|
1205
|
+
# Ensure the permissions on the cookie file are appropriate,
|
|
1206
|
+
# as it will contain a session key that could be used by others
|
|
1207
|
+
# to impersonate this user to the server.
|
|
1208
|
+
st = File.stat(@cookiefile)
|
|
1209
|
+
if st.mode & 07177 != 0
|
|
1210
|
+
warn "Correcting permissions on #{@cookiefile}"
|
|
1211
|
+
File.chmod(st.mode & 0600, @cookiefile)
|
|
1212
|
+
end
|
|
1213
|
+
@cookiefile
|
|
1214
|
+
end
|
|
1215
|
+
|
|
1216
|
+
# Sigh, Ruby doesn't have a library for handling a persistent
|
|
1217
|
+
# cookie store so we have to do the dirty work ourselves. This
|
|
1218
|
+
# is by no means a full implementation, it's just enough to do
|
|
1219
|
+
# what's needed here.
|
|
1220
|
+
|
|
1221
|
+
# Break's a Set-Cookie line up into its constituent parts
|
|
1222
|
+
# Example from http://en.wikipedia.org/wiki/HTTP_cookie:
|
|
1223
|
+
# Set-Cookie: RMID=732423sdfs73242; expires=Fri, 31-Dec-2010 23:59:59 GMT; path=/; domain=.example.net
|
|
1224
|
+
def parse_cookie(line)
|
|
1225
|
+
cookie = nil
|
|
1226
|
+
if line =~ /^Set-Cookie\d?: .+=.+/
|
|
1227
|
+
cookie = {}
|
|
1228
|
+
line.chomp!
|
|
1229
|
+
cookie[:line] = line
|
|
1230
|
+
# Remove the Set-Cookie portion of the line
|
|
1231
|
+
setcookie, rest = line.split(' ', 2)
|
|
1232
|
+
# Then break off the name and value from the cookie attributes
|
|
1233
|
+
namevalue, rawattributes = rest.split('; ', 2)
|
|
1234
|
+
name, value = namevalue.split('=', 2)
|
|
1235
|
+
cookie[:name] = name
|
|
1236
|
+
cookie[:value] = value
|
|
1237
|
+
attributes = {}
|
|
1238
|
+
rawattributes.split('; ').each do |attribute|
|
|
1239
|
+
attrname, attrvalue = attribute.split('=', 2)
|
|
1240
|
+
# The Perl cookie jar uses a non-standard syntax, which seems to
|
|
1241
|
+
# include wrapping some fields (particularly path) in quotes. The
|
|
1242
|
+
# Perl nVentory library uses the Perl cookie jar code so we need to be
|
|
1243
|
+
# compatible with it.
|
|
1244
|
+
if attrvalue =~ /^".*"$/
|
|
1245
|
+
attrvalue.sub!(/^"/, '')
|
|
1246
|
+
attrvalue.sub!(/"$/, '')
|
|
1247
|
+
end
|
|
1248
|
+
# rfc2965, 3.2.2:
|
|
1249
|
+
# If an attribute appears more than once in a cookie, the client
|
|
1250
|
+
# SHALL use only the value associated with the first appearance of
|
|
1251
|
+
# the attribute; a client MUST ignore values after the first.
|
|
1252
|
+
if !attributes[attrname]
|
|
1253
|
+
attributes[attrname] = attrvalue
|
|
1254
|
+
end
|
|
1255
|
+
end
|
|
1256
|
+
cookie[:attributes] = attributes
|
|
1257
|
+
else
|
|
1258
|
+
# Invalid lines in the form of comments and blank lines are to be
|
|
1259
|
+
# expected when we're called by read_cookiefile, so don't treat this as
|
|
1260
|
+
# a big deal.
|
|
1261
|
+
puts "parse_cookie passed invalid line: #{line}" if (@debug)
|
|
1262
|
+
end
|
|
1263
|
+
cookie
|
|
1264
|
+
end
|
|
1265
|
+
|
|
1266
|
+
# Returns an array of cookies from the specified cookiefile
|
|
1267
|
+
def read_cookiefile(cookiefile)
|
|
1268
|
+
warn "Using cookies from #{cookiefile}" if (@debug)
|
|
1269
|
+
cookies = []
|
|
1270
|
+
IO.foreach(cookiefile) do |line|
|
|
1271
|
+
cookie = parse_cookie(line)
|
|
1272
|
+
if cookie && cookie[:attributes] && cookie[:attributes]["expires"]
|
|
1273
|
+
if DateTime.parse(cookie[:attributes]["expires"]) < DateTime.now
|
|
1274
|
+
warn "Cookie expired: #{cookie[:line]}" if @debug
|
|
1275
|
+
next
|
|
1276
|
+
end
|
|
1277
|
+
end
|
|
1278
|
+
if cookie
|
|
1279
|
+
cookies << cookie
|
|
1280
|
+
end
|
|
1281
|
+
end
|
|
1282
|
+
cookies
|
|
1283
|
+
end
|
|
1284
|
+
|
|
1285
|
+
# This returns any cookies in the cookiefile which have domain and path
|
|
1286
|
+
# settings that match the specified uri.
|
|
1287
|
+
def get_cookies_for_uri(cookiefile, uri)
|
|
1288
|
+
cookies = []
|
|
1289
|
+
latest_cookie = []
|
|
1290
|
+
counter = 0
|
|
1291
|
+
read_cookiefile(cookiefile).each do |cookie|
|
|
1292
|
+
next unless uri.host =~ Regexp.new("#{cookie[:attributes]['domain']}$")
|
|
1293
|
+
next unless uri.path =~ Regexp.new("^#{cookie[:attributes]['path'].gsub(/,.*/,'')}") # gsub in case didn't parse out comma seperator for new cookie
|
|
1294
|
+
# if there are more than 1 cookie , we only want the one w/ latest expiration
|
|
1295
|
+
if cookie[:attributes]["expires"]
|
|
1296
|
+
unless latest_cookie.empty?
|
|
1297
|
+
cookie_expiration = DateTime.parse(cookie[:attributes]["expires"])
|
|
1298
|
+
latest_cookie[0] < cookie_expiration ? (latest_cookie = [cookie_expiration, cookie]) : next
|
|
1299
|
+
else
|
|
1300
|
+
latest_cookie = [ DateTime.parse(cookie[:attributes]["expires"]), cookie ]
|
|
1301
|
+
end
|
|
1302
|
+
else
|
|
1303
|
+
cookies << cookie
|
|
1304
|
+
end
|
|
1305
|
+
end
|
|
1306
|
+
cookies << latest_cookie[1] unless latest_cookie.empty?
|
|
1307
|
+
cookies
|
|
1308
|
+
end
|
|
1309
|
+
|
|
1310
|
+
# Extract cookie from response and save it to the user's cookie store
|
|
1311
|
+
def extract_cookie(response, uri, login=nil)
|
|
1312
|
+
if response['set-cookie']
|
|
1313
|
+
cookiefile = get_cookiefile(login)
|
|
1314
|
+
# It doesn't look like it matters for our purposes at the moment, but
|
|
1315
|
+
# according to rfc2965, 3.2.2 the Set-Cookie header can contain more
|
|
1316
|
+
# than one cookie, separated by commas.
|
|
1317
|
+
puts "extract_cookie processing #{response['set-cookie']}" if (@debug)
|
|
1318
|
+
newcookie = parse_cookie('Set-Cookie: ' + response['set-cookie'])
|
|
1319
|
+
return if newcookie.nil?
|
|
1320
|
+
|
|
1321
|
+
# Some cookie fields are optional, and should default to the
|
|
1322
|
+
# values in the request. We need to insert these so that we
|
|
1323
|
+
# save them properly.
|
|
1324
|
+
# http://cgi.netscape.com/newsref/std/cookie_spec.html
|
|
1325
|
+
if !newcookie[:attributes]['domain']
|
|
1326
|
+
puts "Adding domain #{uri.host} to cookie" if (@debug)
|
|
1327
|
+
newcookie = parse_cookie(newcookie[:line] + "; domain=#{uri.host}")
|
|
1328
|
+
end
|
|
1329
|
+
if !newcookie[:attributes]['path']
|
|
1330
|
+
puts "Adding path #{uri.path} to cookie" if (@debug)
|
|
1331
|
+
newcookie = parse_cookie(newcookie[:line] + "; path=#{uri.path}")
|
|
1332
|
+
end
|
|
1333
|
+
cookies = []
|
|
1334
|
+
change = false
|
|
1335
|
+
existing_cookie = false
|
|
1336
|
+
read_cookiefile(cookiefile).each do |cookie|
|
|
1337
|
+
# Remove any existing cookies with the same name, domain and path
|
|
1338
|
+
puts "Comparing #{cookie.inspect} to #{newcookie.inspect}" if (@debug)
|
|
1339
|
+
if cookie[:name] == newcookie[:name] &&
|
|
1340
|
+
cookie[:attributes]['domain'] == newcookie[:attributes]['domain'] &&
|
|
1341
|
+
cookie[:attributes]['path'] == newcookie[:attributes]['path']
|
|
1342
|
+
existing_cookie = true
|
|
1343
|
+
if cookie == newcookie
|
|
1344
|
+
puts "Existing cookie is identical to new cookie" if (@debug)
|
|
1345
|
+
else
|
|
1346
|
+
# Cookie removed by virtue of us not saving it here
|
|
1347
|
+
puts "Replacing existing but not identical cookie #{cookie.inspect}" if (@debug)
|
|
1348
|
+
cookies << newcookie
|
|
1349
|
+
change = true
|
|
1350
|
+
end
|
|
1351
|
+
else
|
|
1352
|
+
puts "Keeping non-matching cookie #{cookie.inspect}" if (@debug)
|
|
1353
|
+
cookies << cookie
|
|
1354
|
+
end
|
|
1355
|
+
end
|
|
1356
|
+
if !existing_cookie
|
|
1357
|
+
puts "No existing cookie matching new cookie, adding new cookie" if (@debug)
|
|
1358
|
+
cookies << newcookie
|
|
1359
|
+
change = true
|
|
1360
|
+
end
|
|
1361
|
+
if change
|
|
1362
|
+
puts "Updating cookiefile #{cookiefile}" if (@debug)
|
|
1363
|
+
File.open(cookiefile, 'w') { |file| file.puts(cookies.collect{|cookie| cookie[:line]}.join("\n")) }
|
|
1364
|
+
else
|
|
1365
|
+
puts "No cookie changes, leaving cookiefile untouched" if (@debug)
|
|
1366
|
+
end
|
|
1367
|
+
else
|
|
1368
|
+
puts "extract_cookie finds no cookie in response" if (@debug)
|
|
1369
|
+
end
|
|
1370
|
+
end
|
|
1371
|
+
|
|
1372
|
+
# Sends requests to the nVentory server and handles any redirects to
|
|
1373
|
+
# authentication pages or services.
|
|
1374
|
+
def send_request(req, uri, login, password_callback=PasswordCallback,loopcounter=0,stopflag=false)
|
|
1375
|
+
if loopcounter > 7
|
|
1376
|
+
if stopflag
|
|
1377
|
+
raise "Infinite loop detected"
|
|
1378
|
+
else
|
|
1379
|
+
warn "Loop detected. Clearing out cookiefile.."
|
|
1380
|
+
loopcounter = 0
|
|
1381
|
+
stopflag = true
|
|
1382
|
+
File.open(get_cookiefile(login), 'w') { |file| file.write(nil) }
|
|
1383
|
+
end
|
|
1384
|
+
end
|
|
1385
|
+
cookies = get_cookies_for_uri(get_cookiefile(login), uri)
|
|
1386
|
+
if !cookies.empty?
|
|
1387
|
+
cookiestring = cookies.collect{|cookie| "#{cookie[:name]}=#{cookie[:value]}" }.join('; ')
|
|
1388
|
+
puts "Inserting cookies into request: #{cookiestring}" if (@debug)
|
|
1389
|
+
req['Cookie'] = cookiestring
|
|
1390
|
+
end
|
|
1391
|
+
|
|
1392
|
+
response = make_http(uri).request(req)
|
|
1393
|
+
extract_cookie(response, uri, login)
|
|
1394
|
+
|
|
1395
|
+
# Check for signs that the server wants us to authenticate
|
|
1396
|
+
password = nil
|
|
1397
|
+
if login == 'autoreg'
|
|
1398
|
+
password = 'mypassword'
|
|
1399
|
+
end
|
|
1400
|
+
# nVentory will redirect to the login controller if authentication is
|
|
1401
|
+
# required. The scheme and port in the redirect location could be either
|
|
1402
|
+
# the standard server or the https variant, depending on whether or not
|
|
1403
|
+
# the server administration has turned on the ssl_requirement plugin.
|
|
1404
|
+
if response.kind_of?(Net::HTTPFound) &&
|
|
1405
|
+
response['Location'] &&
|
|
1406
|
+
URI.parse(response['Location']).host == URI.parse(@server).host &&
|
|
1407
|
+
URI.parse(response['Location']).path == URI.join(@server, 'login/login').path
|
|
1408
|
+
puts "Server responsed with redirect to nVentory login: #{response['Location']}" if (@debug)
|
|
1409
|
+
loginuri = URI.parse(response['Location'])
|
|
1410
|
+
####################### Fix by darrendao - force it to use https ##########################
|
|
1411
|
+
# This is needed because if you're not usign https, then you will get
|
|
1412
|
+
# redirected to https login page, rather than being logged in. So the check down there will
|
|
1413
|
+
# will.
|
|
1414
|
+
loginuri.scheme = 'https'
|
|
1415
|
+
loginuri = URI.parse(loginuri.to_s)
|
|
1416
|
+
############################################################################################
|
|
1417
|
+
loginreq = Net::HTTP::Post.new(loginuri.request_uri)
|
|
1418
|
+
if password_callback.kind_of?(Module)
|
|
1419
|
+
password = password_callback.get_password if (!password)
|
|
1420
|
+
else
|
|
1421
|
+
password = password_callback if !password
|
|
1422
|
+
end
|
|
1423
|
+
loginreq.set_form_data({'login' => login, 'password' => password})
|
|
1424
|
+
# Include the cookies so the server doesn't have to generate another
|
|
1425
|
+
# session for us.
|
|
1426
|
+
loginreq['Cookie'] = cookiestring
|
|
1427
|
+
loginresponse = make_http(loginuri).request(loginreq)
|
|
1428
|
+
if @debug
|
|
1429
|
+
puts "nVentory auth POST response (#{loginresponse.code}):"
|
|
1430
|
+
if loginresponse.body.strip.empty?
|
|
1431
|
+
puts '<Body empty>'
|
|
1432
|
+
else
|
|
1433
|
+
puts loginresponse.body
|
|
1434
|
+
end
|
|
1435
|
+
end
|
|
1436
|
+
# The server always sends back a 302 redirect in response to a login
|
|
1437
|
+
# attempt. You get redirected back to the login page if your login
|
|
1438
|
+
# failed, or redirected to your original page or the main page if the
|
|
1439
|
+
# login succeeded.
|
|
1440
|
+
if loginresponse.kind_of?(Net::HTTPFound) &&
|
|
1441
|
+
URI.parse(loginresponse['Location']).path != loginuri.path
|
|
1442
|
+
puts "Authentication against nVentory server succeeded" if (@debug)
|
|
1443
|
+
extract_cookie(loginresponse, loginuri, login)
|
|
1444
|
+
puts "Resending original request now that we've authenticated" if (@debug)
|
|
1445
|
+
return send_request(req, uri, login, password_callback)
|
|
1446
|
+
else
|
|
1447
|
+
puts "Authentication against nVentory server failed" if (@debug)
|
|
1448
|
+
end
|
|
1449
|
+
end
|
|
1450
|
+
|
|
1451
|
+
# An SSO-enabled app will redirect to SSO if authentication is required
|
|
1452
|
+
if response.kind_of?(Net::HTTPFound) && response['Location'] && URI.parse(response['Location']).host == URI.parse(@sso_server).host
|
|
1453
|
+
puts "Server responsed with redirect to SSO login: #{response['Location']}" if (@debug)
|
|
1454
|
+
if login == 'autoreg'
|
|
1455
|
+
loginuri = URI.join(@server, 'login/login')
|
|
1456
|
+
puts "** Login user is 'autoreg'. Changing loginuri to #{loginuri.to_s}" if @debug
|
|
1457
|
+
unless loginuri.scheme == 'https'
|
|
1458
|
+
loginuri.scheme = 'https'
|
|
1459
|
+
loginuri = URI.parse(loginuri.to_s)
|
|
1460
|
+
end
|
|
1461
|
+
else
|
|
1462
|
+
loginuri = URI.parse(response['Location'])
|
|
1463
|
+
end
|
|
1464
|
+
# update the loginuri to the non-redirect uri of sso
|
|
1465
|
+
loginuri.path = '/login'
|
|
1466
|
+
loginuri.query = 'noredirects=1'
|
|
1467
|
+
loginreq = Net::HTTP::Post.new(loginuri.request_uri)
|
|
1468
|
+
if password_callback.kind_of?(Module)
|
|
1469
|
+
password = password_callback.get_password if (!password)
|
|
1470
|
+
else
|
|
1471
|
+
password = password_callback if !password
|
|
1472
|
+
end
|
|
1473
|
+
loginreq.set_form_data({'login' => login, 'password' => password})
|
|
1474
|
+
# It probably doesn't matter, but include the cookies again for good
|
|
1475
|
+
# measure
|
|
1476
|
+
loginreq['Cookie'] = cookiestring
|
|
1477
|
+
# Telling the SSO server we want XML back gets responses that are easier
|
|
1478
|
+
# to parse.
|
|
1479
|
+
loginreq['Accept'] = 'application/xml'
|
|
1480
|
+
loginresponse = make_http(loginuri).request(loginreq)
|
|
1481
|
+
# if it's a redirect (such as due to NON-fqdn) loop so that it follows until no further redirect
|
|
1482
|
+
while [Net::HTTPMovedPermanently, Net::HTTPFound].include?(loginresponse.class)
|
|
1483
|
+
if loginresponse.kind_of?(Net::HTTPFound) && loginresponse['Location'] =~ /sso.*\/session\/token.*/
|
|
1484
|
+
puts "** Found session token" if @debug
|
|
1485
|
+
break
|
|
1486
|
+
end
|
|
1487
|
+
puts "** Following redirect #{loginresponse.class.to_s} => #{loginresponse['Location'].to_s}" if @debug
|
|
1488
|
+
loginuri = URI.parse(loginresponse['Location'])
|
|
1489
|
+
loginreq = Net::HTTP::Post.new(loginuri.request_uri)
|
|
1490
|
+
loginreq.set_form_data({'login' => login, 'password' => password})
|
|
1491
|
+
loginresponse = make_http(loginuri).request(loginreq)
|
|
1492
|
+
end # while loginresponse.kind_of?(Net::HTTPMovedPermanently)
|
|
1493
|
+
|
|
1494
|
+
if @debug
|
|
1495
|
+
puts "AUTH POST response (#{loginresponse.code}):"
|
|
1496
|
+
if loginresponse.body.strip.empty?
|
|
1497
|
+
puts '<Body empty>'
|
|
1498
|
+
else
|
|
1499
|
+
puts loginresponse.body
|
|
1500
|
+
end
|
|
1501
|
+
end
|
|
1502
|
+
|
|
1503
|
+
# SSO does a number of redirects until you get to the right domain but should just follow once and get the cookie, will become Net::HTTPNotAcceptable (406).
|
|
1504
|
+
if loginresponse.kind_of?(Net::HTTPFound) && loginresponse['Location'] =~ /sso.*\/session\/token.*/
|
|
1505
|
+
puts "** Following redirect #{loginresponse.class.to_s} => #{loginresponse['Location'].to_s}" if @debug
|
|
1506
|
+
loginuri = URI.parse(loginresponse['Location'])
|
|
1507
|
+
loginreq = Net::HTTP::Get.new(loginuri.request_uri)
|
|
1508
|
+
loginresponse = make_http(loginuri).request(loginreq)
|
|
1509
|
+
end
|
|
1510
|
+
|
|
1511
|
+
# The SSO server sends back 200 if authentication succeeds, 401 or 403
|
|
1512
|
+
# if it does not.
|
|
1513
|
+
if loginresponse.kind_of?(Net::HTTPSuccess) || (loginresponse.kind_of?(Net::HTTPFound) && loginresponse['Location'] =~ /^#{loginuri.scheme}:\/\/#{loginuri.host}\/$/ ) || loginresponse.kind_of?(Net::HTTPNotAcceptable)
|
|
1514
|
+
puts "Authentication against server succeeded" if (@debug)
|
|
1515
|
+
extract_cookie(loginresponse, loginuri, login)
|
|
1516
|
+
puts "Resending original request now that we've authenticated" if (@debug)
|
|
1517
|
+
loopcounter += 1
|
|
1518
|
+
return send_request(req, uri, login, password_callback, loopcounter,stopflag)
|
|
1519
|
+
else
|
|
1520
|
+
puts "Authentication against server failed" if (@debug)
|
|
1521
|
+
end
|
|
1522
|
+
end
|
|
1523
|
+
|
|
1524
|
+
response
|
|
1525
|
+
end
|
|
1526
|
+
|
|
1527
|
+
def xml_to_ruby(xmlnode)
|
|
1528
|
+
# The server includes a hint as to the type of data structure
|
|
1529
|
+
# in the XML
|
|
1530
|
+
data = nil
|
|
1531
|
+
if xmlnode.attributes['type'] == 'array'
|
|
1532
|
+
data = []
|
|
1533
|
+
xmlnode.elements.each { |child| data << xml_to_ruby(child) }
|
|
1534
|
+
elsif xmlnode.size <= 1
|
|
1535
|
+
data = xmlnode.text
|
|
1536
|
+
else
|
|
1537
|
+
data = {}
|
|
1538
|
+
xmlnode.elements.each do |child|
|
|
1539
|
+
field = child.name
|
|
1540
|
+
data[field] = xml_to_ruby(child)
|
|
1541
|
+
end
|
|
1542
|
+
end
|
|
1543
|
+
data
|
|
1544
|
+
end
|
|
1545
|
+
|
|
1546
|
+
# FIXME: Would be really nice to figure out a way to use the Rails inflector
|
|
1547
|
+
def singularize(word)
|
|
1548
|
+
singular = nil
|
|
1549
|
+
# statuses -> status
|
|
1550
|
+
# ip_addresses -> ip_address
|
|
1551
|
+
if (word =~ /(.*s)es$/)
|
|
1552
|
+
singular = $1
|
|
1553
|
+
# nodes -> node
|
|
1554
|
+
# vips -> vip
|
|
1555
|
+
elsif (word =~ /(.*)s$/)
|
|
1556
|
+
singular = $1
|
|
1557
|
+
else
|
|
1558
|
+
singular = word
|
|
1559
|
+
end
|
|
1560
|
+
singular
|
|
1561
|
+
end
|
|
1562
|
+
|
|
1563
|
+
def find_sar
|
|
1564
|
+
path_env = (ENV['PATH'] || "").split(':')
|
|
1565
|
+
other_paths = ["/usr/bin", "/data/svc/sysstat/bin"]
|
|
1566
|
+
sarname = 'sar'
|
|
1567
|
+
(path_env | other_paths).each do |path|
|
|
1568
|
+
if File.executable?(File.join(path, sarname))
|
|
1569
|
+
return File.join(path, sarname)
|
|
1570
|
+
end
|
|
1571
|
+
end
|
|
1572
|
+
end
|
|
1573
|
+
|
|
1574
|
+
def get_sar_data(sar_dir=nil, day = nil)
|
|
1575
|
+
sar = find_sar
|
|
1576
|
+
result = []
|
|
1577
|
+
cmd = nil
|
|
1578
|
+
ENV['LC_TIME']='POSIX'
|
|
1579
|
+
if day
|
|
1580
|
+
cmd = "#{sar} -u -f #{sar_dir}/sa#{day}"
|
|
1581
|
+
else
|
|
1582
|
+
cmd = "#{sar} -u"
|
|
1583
|
+
end
|
|
1584
|
+
output = `#{cmd}`
|
|
1585
|
+
output.split("\n").each do |line|
|
|
1586
|
+
result << line unless line =~ /(average|cpu|%|linux)/i
|
|
1587
|
+
end
|
|
1588
|
+
result
|
|
1589
|
+
end
|
|
1590
|
+
|
|
1591
|
+
# I'm sure there's a better way to do all of these. However,
|
|
1592
|
+
# I'm just following the way the code was written in Perl.
|
|
1593
|
+
def getcpupercent
|
|
1594
|
+
return nil if !Facter['kernel'] or Facter['kernel'].value != 'Linux'
|
|
1595
|
+
sar_dir = "/var/log/sa"
|
|
1596
|
+
end_time = Time.now
|
|
1597
|
+
start_time = end_time - 60*60*3
|
|
1598
|
+
end_date = end_time.strftime("%d")
|
|
1599
|
+
start_date = start_time.strftime("%d")
|
|
1600
|
+
|
|
1601
|
+
data_points = []
|
|
1602
|
+
# all hours in same day so just make list of all hours to look for
|
|
1603
|
+
if end_date == start_date
|
|
1604
|
+
today_sar = get_sar_data
|
|
1605
|
+
return false if today_sar.empty?
|
|
1606
|
+
|
|
1607
|
+
# We only take avg of last 3 hours
|
|
1608
|
+
(start_time.hour..end_time.hour).each do | hour |
|
|
1609
|
+
hour = "0#{hour}" if hour < 10
|
|
1610
|
+
today_sar.each do |line|
|
|
1611
|
+
data_points << $1.to_f if line =~ /^#{hour}:.*\s(\S+)$/
|
|
1612
|
+
end
|
|
1613
|
+
end
|
|
1614
|
+
else
|
|
1615
|
+
today_sar = get_sar_data
|
|
1616
|
+
yesterday_sar = get_sar_data(sar_dir, start_date)
|
|
1617
|
+
return false if today_sar.empty? or yesterday_sar.empty?
|
|
1618
|
+
# Parse today sar data
|
|
1619
|
+
(0..end_time.hour).each do | hour |
|
|
1620
|
+
hour = "0#{hour}" if hour < 10
|
|
1621
|
+
today_sar.each do |line|
|
|
1622
|
+
data_points << $1.to_f if line =~ /^#{hour}:.*\s(\S+)$/
|
|
1623
|
+
end
|
|
1624
|
+
end
|
|
1625
|
+
|
|
1626
|
+
# Parse yesterday sar data
|
|
1627
|
+
(start_time.hour..23).each do | hour |
|
|
1628
|
+
hour = "0#{hour}" if hour < 10
|
|
1629
|
+
yesterday_sar.each do |line|
|
|
1630
|
+
data_points << $1.to_f if line =~ /^#{hour}:.*\s(\S+)$/
|
|
1631
|
+
end
|
|
1632
|
+
end
|
|
1633
|
+
end
|
|
1634
|
+
|
|
1635
|
+
# no data points
|
|
1636
|
+
return nil if data_points.empty?
|
|
1637
|
+
|
|
1638
|
+
avg = data_points.inject(0.0) { |sum, el| sum + el } / data_points.size
|
|
1639
|
+
# sar reports % idle, so need the opposite
|
|
1640
|
+
result = 100 - avg
|
|
1641
|
+
return result
|
|
1642
|
+
end
|
|
1643
|
+
|
|
1644
|
+
# This is based on the perl version in OSInfo.pm
|
|
1645
|
+
def getlogincount
|
|
1646
|
+
|
|
1647
|
+
# darrendao: Looks like this number has to match up with how often
|
|
1648
|
+
# nventory-client is run in the crontab, otherwise, nventory server ends up
|
|
1649
|
+
# miscalculating the sum... bad...
|
|
1650
|
+
# How many hours of data we need to sample, not to exceed 24h
|
|
1651
|
+
minus_hours = 3
|
|
1652
|
+
|
|
1653
|
+
# get unix cmd 'last' content
|
|
1654
|
+
begin
|
|
1655
|
+
content = `last`
|
|
1656
|
+
rescue
|
|
1657
|
+
warn "Failed to run 'last' command"
|
|
1658
|
+
return nil
|
|
1659
|
+
end
|
|
1660
|
+
|
|
1661
|
+
|
|
1662
|
+
counter = 0
|
|
1663
|
+
|
|
1664
|
+
(0..minus_hours).each do | minus_hour |
|
|
1665
|
+
target_time = Time.now - 60*60*minus_hour
|
|
1666
|
+
time_str = target_time.strftime("%b %d %H")
|
|
1667
|
+
content.split("\n").each do |line|
|
|
1668
|
+
counter += 1 if line =~ /#{time_str}/
|
|
1669
|
+
end
|
|
1670
|
+
end
|
|
1671
|
+
return counter
|
|
1672
|
+
end
|
|
1673
|
+
|
|
1674
|
+
# This is based on the perl version in OSInfo.pm
|
|
1675
|
+
def getdiskusage
|
|
1676
|
+
content = ""
|
|
1677
|
+
begin
|
|
1678
|
+
content = `df -k`
|
|
1679
|
+
rescue
|
|
1680
|
+
warn "Failed to run df command"
|
|
1681
|
+
return nil
|
|
1682
|
+
end
|
|
1683
|
+
used_space = 0
|
|
1684
|
+
avail_space = 0
|
|
1685
|
+
content.split("\n").each do |line|
|
|
1686
|
+
if line =~ /\s+\d+\s+(\d+)\s+(\d+)\s+\d+%\s+\/($|home$)/
|
|
1687
|
+
used_space += $1.to_i
|
|
1688
|
+
avail_space += $2.to_i
|
|
1689
|
+
end
|
|
1690
|
+
end
|
|
1691
|
+
return {:avail_space => avail_space, :used_space => used_space}
|
|
1692
|
+
end
|
|
1693
|
+
|
|
1694
|
+
def getvolumes
|
|
1695
|
+
return getmountedvolumes.merge(getservedvolumes)
|
|
1696
|
+
end
|
|
1697
|
+
|
|
1698
|
+
# This is based on the perl version in OSInfo.pm
|
|
1699
|
+
def getservedvolumes
|
|
1700
|
+
# only support Linux for now
|
|
1701
|
+
return {} unless Facter['kernel'] && Facter['kernel'].value == 'Linux'
|
|
1702
|
+
|
|
1703
|
+
# Don't do anything if exports file is not there
|
|
1704
|
+
return {} if !File.exists?("/etc/exports")
|
|
1705
|
+
|
|
1706
|
+
served = {}
|
|
1707
|
+
|
|
1708
|
+
IO.foreach("/etc/exports") do |line|
|
|
1709
|
+
if line =~ /(\S+)\s+/
|
|
1710
|
+
vol = $1
|
|
1711
|
+
served["volumes[served][#{vol}][config]"] = "/etc/exports"
|
|
1712
|
+
served["volumes[served][#{vol}][type]"] = 'nfs'
|
|
1713
|
+
end
|
|
1714
|
+
end
|
|
1715
|
+
return served
|
|
1716
|
+
end
|
|
1717
|
+
|
|
1718
|
+
# This is based on the perl version in OSInfo.pm
|
|
1719
|
+
def getmountedvolumes
|
|
1720
|
+
# only support Linux for now
|
|
1721
|
+
return {} unless Facter['kernel'] && Facter['kernel'].value == 'Linux'
|
|
1722
|
+
|
|
1723
|
+
dir = "/etc"
|
|
1724
|
+
mounted = {}
|
|
1725
|
+
|
|
1726
|
+
# AUTOFS - gather only files named auto[._]*
|
|
1727
|
+
Dir.glob(File.join(dir, "*")).each do |file|
|
|
1728
|
+
next if file !~ /^auto[._].*/
|
|
1729
|
+
|
|
1730
|
+
# AUTOFS - match only lines that look like nfs syntax such as host:/path
|
|
1731
|
+
IO.foreach(file) do |line|
|
|
1732
|
+
if line =~ /\w:\S/ && line !~ /^\s*#/
|
|
1733
|
+
# Parse it, Example : " nventory_backup -noatime,intr irvnetappbk:/vol/nventory_backup "
|
|
1734
|
+
if line =~ /^(\w[\w\S]+)\s+\S+\s+(\w[\w\S]+):(\S+)/
|
|
1735
|
+
mnt = $1
|
|
1736
|
+
host = $2
|
|
1737
|
+
vol = $3
|
|
1738
|
+
mounted["volumes[mounted][/mnt/#{mnt}][config]"] = file
|
|
1739
|
+
mounted["volumes[mounted][/mnt/#{mnt}][volume_server]"] = host
|
|
1740
|
+
mounted["volumes[mounted][/mnt/#{mnt}][volume]"] = vol
|
|
1741
|
+
mounted["volumes[mounted][/mnt/#{mnt}][type]"] = 'nfs'
|
|
1742
|
+
end
|
|
1743
|
+
end
|
|
1744
|
+
end # IO.foreach
|
|
1745
|
+
end # Dir.glob
|
|
1746
|
+
|
|
1747
|
+
# FSTAB - has diff syntax than AUTOFS. Example: "server:/usr/local/pub /pub nfs rsize=8192,wsize=8192,timeo=14,intr"
|
|
1748
|
+
IO.foreach("/etc/fstab") do |line|
|
|
1749
|
+
if line =~ /^(\w[\w\S]+):(\S+)\s+(\S+)\s+nfs/
|
|
1750
|
+
host = $1
|
|
1751
|
+
vol = $2
|
|
1752
|
+
mnt = $3
|
|
1753
|
+
mounted["volumes[mounted][#{mnt}][config]"] = "/etc/fstab"
|
|
1754
|
+
mounted["volumes[mounted][#{mnt}][volume_server]"] = host
|
|
1755
|
+
mounted["volumes[mounted][#{mnt}][volume]"] = vol
|
|
1756
|
+
mounted["volumes[mounted][#{mnt}][type]"] = 'nfs'
|
|
1757
|
+
end
|
|
1758
|
+
end # IO.foreach
|
|
1759
|
+
return mounted
|
|
1760
|
+
end
|
|
1761
|
+
|
|
1762
|
+
def getvmstatus
|
|
1763
|
+
# facter virtual makes calls to commands that are under /sbin
|
|
1764
|
+
vmstatus = Facter.virtual
|
|
1765
|
+
vmstatus.chomp!
|
|
1766
|
+
|
|
1767
|
+
# extra check to see if we're running kvm hypervisor
|
|
1768
|
+
os = Facter['kernel'].value
|
|
1769
|
+
if os == 'Linux'
|
|
1770
|
+
begin
|
|
1771
|
+
`grep ^kvm /proc/modules`
|
|
1772
|
+
vmstatus = "kvm_host" if $? == 0
|
|
1773
|
+
rescue
|
|
1774
|
+
warn "Failed to get modules information"
|
|
1775
|
+
end
|
|
1776
|
+
end
|
|
1777
|
+
return vmstatus
|
|
1778
|
+
end
|
|
1779
|
+
|
|
1780
|
+
# This is based on the perl version in HardwareInfo.pm
|
|
1781
|
+
def get_cpu_core_count
|
|
1782
|
+
# only support Linux for now
|
|
1783
|
+
os = Facter['kernel'].value
|
|
1784
|
+
physicalid = nil
|
|
1785
|
+
coreid = nil
|
|
1786
|
+
corecount = nil
|
|
1787
|
+
cores = {}
|
|
1788
|
+
if os == 'Linux'
|
|
1789
|
+
IO.foreach("/proc/cpuinfo") do |line|
|
|
1790
|
+
if line =~ /^processor\s*: (\d+)/
|
|
1791
|
+
physicalid = nil
|
|
1792
|
+
coreid = nil
|
|
1793
|
+
elsif line =~ /^physical id\s*: (\d+)/
|
|
1794
|
+
physicalid = $1
|
|
1795
|
+
elsif line =~ /^core id\s*: (\d+)/
|
|
1796
|
+
coreid = $1;
|
|
1797
|
+
end
|
|
1798
|
+
if physicalid && coreid
|
|
1799
|
+
cores["#{physicalid}:#{coreid}"] = 1;
|
|
1800
|
+
end
|
|
1801
|
+
end # IO.foreach
|
|
1802
|
+
corecount = cores.size
|
|
1803
|
+
end # if statement
|
|
1804
|
+
return corecount
|
|
1805
|
+
end
|
|
1806
|
+
|
|
1807
|
+
def get_console_type
|
|
1808
|
+
console_type = nil
|
|
1809
|
+
# only support Linux for now
|
|
1810
|
+
os = Facter['kernel'].value
|
|
1811
|
+
if os == 'Linux'
|
|
1812
|
+
if get_racadm
|
|
1813
|
+
console_type = "Dell DRAC"
|
|
1814
|
+
end
|
|
1815
|
+
end
|
|
1816
|
+
return console_type
|
|
1817
|
+
end
|
|
1818
|
+
|
|
1819
|
+
def get_drac_info
|
|
1820
|
+
info = {}
|
|
1821
|
+
result = nil
|
|
1822
|
+
racadm = get_racadm
|
|
1823
|
+
begin
|
|
1824
|
+
timeout(10) do
|
|
1825
|
+
cmd = "#{racadm} getsysinfo"
|
|
1826
|
+
result = `#{cmd}` || ""
|
|
1827
|
+
end
|
|
1828
|
+
result.split("\n").each do |line|
|
|
1829
|
+
if line =~ /^Current IP Address\s*=/i
|
|
1830
|
+
info[:ip_address] = line.split("=")[1].strip
|
|
1831
|
+
elsif line =~ /^MAC Address\s*=/i
|
|
1832
|
+
info[:mac_address] = line.split("=")[1].strip
|
|
1833
|
+
elsif line =~ /^DNS RAC Name\s*=/i
|
|
1834
|
+
info[:name] = line.split("=")[1].strip
|
|
1835
|
+
end
|
|
1836
|
+
end
|
|
1837
|
+
rescue Timeout::Error
|
|
1838
|
+
warn "Timed out when trying to get drac info"
|
|
1839
|
+
rescue Exception => e
|
|
1840
|
+
warn e.inspect
|
|
1841
|
+
warn "Failed to get DRAC IP"
|
|
1842
|
+
end
|
|
1843
|
+
return info
|
|
1844
|
+
end
|
|
1845
|
+
|
|
1846
|
+
def get_racadm
|
|
1847
|
+
path_env = (ENV['PATH'] || "").split(':')
|
|
1848
|
+
other_paths = ["/usr/sbin", "/opt/dell/srvadmin/sbin"]
|
|
1849
|
+
(path_env | other_paths).each do |path|
|
|
1850
|
+
if File.executable?(File.join(path, 'racadm'))
|
|
1851
|
+
return File.join(path, 'racadm')
|
|
1852
|
+
end
|
|
1853
|
+
end
|
|
1854
|
+
return nil
|
|
1855
|
+
end
|
|
1856
|
+
|
|
1857
|
+
def get_chassis_info
|
|
1858
|
+
chassis_info = {}
|
|
1859
|
+
manufacturer = nil
|
|
1860
|
+
# Only support Dell hardware for now
|
|
1861
|
+
if Facter['manufacturer'] && Facter['manufacturer'].value
|
|
1862
|
+
manufacturer = Facter['manufacturer'].value
|
|
1863
|
+
if manufacturer =~ /Dell/
|
|
1864
|
+
chassis_info = get_dell_chassis_info
|
|
1865
|
+
end
|
|
1866
|
+
end
|
|
1867
|
+
return chassis_info
|
|
1868
|
+
end
|
|
1869
|
+
|
|
1870
|
+
# call Dell's omreport command to get service tag
|
|
1871
|
+
# of the chassis, and the slot value of where the
|
|
1872
|
+
# given node resides in. Result is stored in hash with
|
|
1873
|
+
# service_tag and slot_num as the keys
|
|
1874
|
+
def get_dell_chassis_info
|
|
1875
|
+
ENV['PATH'] = "#{ENV['PATH']}:/opt/dell/srvadmin/bin/"
|
|
1876
|
+
chassis = {}
|
|
1877
|
+
result = nil
|
|
1878
|
+
begin
|
|
1879
|
+
#result = `omreport modularenclosure -fmt ssv`
|
|
1880
|
+
#result.split("\n").each do |line|
|
|
1881
|
+
# if line =~ /Service Tag/
|
|
1882
|
+
# chassis[:service_tag] = line.split(";")[1].strip
|
|
1883
|
+
# break
|
|
1884
|
+
# end
|
|
1885
|
+
#end
|
|
1886
|
+
timeout(5) do
|
|
1887
|
+
result = `omreport chassis info -fmt ssv`
|
|
1888
|
+
end
|
|
1889
|
+
result.split("\n").each do |line|
|
|
1890
|
+
if line =~ /Server Module Location;Slot (\d+)/
|
|
1891
|
+
chassis[:slot_num] = $1.to_i
|
|
1892
|
+
elsif line =~ /Chassis Service Tag/
|
|
1893
|
+
chassis[:service_tag] = line.split(";")[1].strip
|
|
1894
|
+
end
|
|
1895
|
+
end
|
|
1896
|
+
# if no slot_number then the blade isn't really in a chassis/blade enclosure
|
|
1897
|
+
# such as the case with Dell PowerEdge 1950
|
|
1898
|
+
return {} if chassis[:slot_num].nil?
|
|
1899
|
+
rescue Timeout::Error
|
|
1900
|
+
warn "Timed out when trying to run omreport"
|
|
1901
|
+
rescue
|
|
1902
|
+
warn "Failed to run/parse Dell's omreport command"
|
|
1903
|
+
end
|
|
1904
|
+
return chassis
|
|
1905
|
+
end
|
|
1906
|
+
|
|
1907
|
+
# Currently, the only info this method gathers is the info
|
|
1908
|
+
# of the guests running on this kvm host
|
|
1909
|
+
def get_kvm_hostinfo
|
|
1910
|
+
guests = {}
|
|
1911
|
+
begin
|
|
1912
|
+
result = `virsh list --all`
|
|
1913
|
+
result.split("\n").each do |line|
|
|
1914
|
+
if line =~ /(\d+)\s+(\S+)\s+(\S+)/
|
|
1915
|
+
guest_hostname = $2
|
|
1916
|
+
guests[guest_hostname] = get_kvm_guest_info(guest_hostname)
|
|
1917
|
+
end
|
|
1918
|
+
end
|
|
1919
|
+
rescue
|
|
1920
|
+
warn "Failed to run/parse virsh command"
|
|
1921
|
+
end
|
|
1922
|
+
return guests
|
|
1923
|
+
end
|
|
1924
|
+
|
|
1925
|
+
# Currently, the only info this method gathers is the
|
|
1926
|
+
# image size
|
|
1927
|
+
def get_kvm_guest_info(guest)
|
|
1928
|
+
info = {}
|
|
1929
|
+
result = `virsh dumpxml #{guest}`
|
|
1930
|
+
result.split("\n").each do |line|
|
|
1931
|
+
if line =~ /source file='(.+)'/
|
|
1932
|
+
img_path = $1
|
|
1933
|
+
if File.exists?(img_path)
|
|
1934
|
+
# nVentory expects the value to be in KB
|
|
1935
|
+
info['vmimg_size'] = File.stat(img_path).size.to_i / 1024
|
|
1936
|
+
# how to calculate this?
|
|
1937
|
+
# info['vmspace_used'] = ???
|
|
1938
|
+
end
|
|
1939
|
+
end
|
|
1940
|
+
end
|
|
1941
|
+
return info.clone
|
|
1942
|
+
end
|
|
1943
|
+
|
|
1944
|
+
# Most of the code in this method are based on the code of the
|
|
1945
|
+
# perl nVentory client
|
|
1946
|
+
def get_nic_info
|
|
1947
|
+
info = {}
|
|
1948
|
+
os = Facter['kernel'].value
|
|
1949
|
+
# only support Linux right now
|
|
1950
|
+
return info if os != 'Linux'
|
|
1951
|
+
|
|
1952
|
+
nic = nil
|
|
1953
|
+
result = `/sbin/ifconfig -a`
|
|
1954
|
+
result.split("\n").each do |line|
|
|
1955
|
+
if line =~ /^(\w+\S+)/
|
|
1956
|
+
nic = $1
|
|
1957
|
+
info[nic] = {}
|
|
1958
|
+
end
|
|
1959
|
+
if line =~ /(?:HWaddr|ether) ([\da-fA-F:]+)/
|
|
1960
|
+
info[nic][:hardware_address] = $1
|
|
1961
|
+
if line =~ /ether/i
|
|
1962
|
+
info[nic][:interface_type] = 'Ethernet'
|
|
1963
|
+
end
|
|
1964
|
+
elsif line =~ /^\s+UP / || line =~ /flags=.*UP,/
|
|
1965
|
+
info[nic][:up] = 1
|
|
1966
|
+
end
|
|
1967
|
+
end
|
|
1968
|
+
|
|
1969
|
+
# Get additional info
|
|
1970
|
+
info.each do |nic, nic_info|
|
|
1971
|
+
next if nic_info[:interface_type] != 'Ethernet'
|
|
1972
|
+
next if nic =~ /virbr|veth|vif|peth/
|
|
1973
|
+
result = `/sbin/ethtool #{nic}`
|
|
1974
|
+
result.split("\n").each do |line|
|
|
1975
|
+
if line =~ /Speed: (\d+)Mb/
|
|
1976
|
+
nic_info[:speed] = $1
|
|
1977
|
+
elsif line =~ /Duplex: (\w+)/
|
|
1978
|
+
($1.downcase == 'full')? nic_info[:full_duplex] = 1 : nic_info[:full_duplex] = 0
|
|
1979
|
+
elsif line =~ /Advertised auto-negotiation: (.*)/
|
|
1980
|
+
($1.downcase == 'yes')? nic_info[:autonegotiate] = 1 : nic_info[:autonegotiate] = 0
|
|
1981
|
+
elsif line =~ /Link detected: (\w+)/
|
|
1982
|
+
($1.downcase == 'yes')? nic_info[:link] = 1: nic_info[:link] = 0
|
|
1983
|
+
end
|
|
1984
|
+
end
|
|
1985
|
+
end
|
|
1986
|
+
return info
|
|
1987
|
+
end
|
|
1988
|
+
|
|
1989
|
+
# Parse dmidecode data and put it into a hash
|
|
1990
|
+
# This method is based on the corresponding method in the perl client
|
|
1991
|
+
def get_dmi_data
|
|
1992
|
+
return @dmi_data if @dmi_data
|
|
1993
|
+
|
|
1994
|
+
case Facter.value(:kernel)
|
|
1995
|
+
when 'Linux'
|
|
1996
|
+
return nil unless FileTest.exists?("/usr/sbin/dmidecode")
|
|
1997
|
+
|
|
1998
|
+
output=%x{/usr/sbin/dmidecode 2>/dev/null}
|
|
1999
|
+
when 'FreeBSD'
|
|
2000
|
+
return nil unless FileTest.exists?("/usr/local/sbin/dmidecode")
|
|
2001
|
+
|
|
2002
|
+
output=%x{/usr/local/sbin/dmidecode 2>/dev/null}
|
|
2003
|
+
when 'NetBSD'
|
|
2004
|
+
return nil unless FileTest.exists?("/usr/pkg/sbin/dmidecode")
|
|
2005
|
+
|
|
2006
|
+
output=%x{/usr/pkg/sbin/dmidecode 2>/dev/null}
|
|
2007
|
+
when 'SunOS'
|
|
2008
|
+
return nil unless FileTest.exists?("/usr/sbin/smbios")
|
|
2009
|
+
|
|
2010
|
+
output=%x{/usr/sbin/smbios 2>/dev/null}
|
|
2011
|
+
else
|
|
2012
|
+
warn "Can't get dmi_data because of unsupported OS"
|
|
2013
|
+
return
|
|
2014
|
+
end
|
|
2015
|
+
|
|
2016
|
+
look_for_section_name = false
|
|
2017
|
+
dmi_section = nil
|
|
2018
|
+
dmi_section_data = {}
|
|
2019
|
+
dmi_section_array = nil
|
|
2020
|
+
@dmi_data = {}
|
|
2021
|
+
|
|
2022
|
+
output.split("\n").each do |line|
|
|
2023
|
+
if line =~ /^Handle/
|
|
2024
|
+
if dmi_section && !dmi_section_data.empty?
|
|
2025
|
+
@dmi_data[dmi_section] ||= []
|
|
2026
|
+
@dmi_data[dmi_section] << dmi_section_data
|
|
2027
|
+
end
|
|
2028
|
+
dmi_section = nil
|
|
2029
|
+
dmi_section_data = {}
|
|
2030
|
+
dmi_section_array = nil
|
|
2031
|
+
look_for_section_name = true
|
|
2032
|
+
elsif look_for_section_name
|
|
2033
|
+
next if line =~ /^\s*DMI type/
|
|
2034
|
+
if line =~ /^\s*(.*)/
|
|
2035
|
+
dmi_section = $1
|
|
2036
|
+
look_for_section_name = false
|
|
2037
|
+
end
|
|
2038
|
+
elsif dmi_section && line =~ /^\s*([^:]+):\s*(\S.*)/
|
|
2039
|
+
dmi_section_data[$1] = $2;
|
|
2040
|
+
dmi_section_array = nil
|
|
2041
|
+
elsif dmi_section && line =~ /^\s*([^:]+):$/
|
|
2042
|
+
dmi_section_array = $1
|
|
2043
|
+
elsif dmi_section && dmi_section_array && line =~ /^\s*(\S.+)$/
|
|
2044
|
+
dmi_section_data[dmi_section_array] ||= []
|
|
2045
|
+
dmi_section_data[dmi_section_array] << $1
|
|
2046
|
+
end
|
|
2047
|
+
end
|
|
2048
|
+
@dmi_data
|
|
2049
|
+
end
|
|
2050
|
+
|
|
2051
|
+
# This method is based on the one in the perl client
|
|
2052
|
+
def get_physical_memory
|
|
2053
|
+
# only support Linux and FreeBSD right now
|
|
2054
|
+
os = Facter['kernel']
|
|
2055
|
+
return if os.nil? or (os.value != 'Linux' and os.value != 'FreeBSD')
|
|
2056
|
+
|
|
2057
|
+
physical_memory = 0
|
|
2058
|
+
dmi_data = get_dmi_data
|
|
2059
|
+
|
|
2060
|
+
return if dmi_data.nil? or dmi_data['Memory Device'].nil?
|
|
2061
|
+
|
|
2062
|
+
dmi_data['Memory Device'].each do |mem_dev|
|
|
2063
|
+
|
|
2064
|
+
size = mem_dev['Size']
|
|
2065
|
+
form_factor = mem_dev['Form Factor']
|
|
2066
|
+
locator = mem_dev['Locator']
|
|
2067
|
+
# Some systems report little chunks of memory other than
|
|
2068
|
+
# main system memory as Memory Devices, the 'DIMM' as
|
|
2069
|
+
# form factor seems to indicate main system memory.
|
|
2070
|
+
# Unfortunately some DIMMs are reported with a form
|
|
2071
|
+
# factor of '<OUT OF SPEC>'. In that case fall back to
|
|
2072
|
+
# checking for signs of it being a DIMM in the locator
|
|
2073
|
+
# field.
|
|
2074
|
+
if (size != 'No Module Installed' &&
|
|
2075
|
+
((form_factor == 'DIMM' || form_factor == 'FB-DIMM' || form_factor == 'SODIMM') ||
|
|
2076
|
+
(form_factor == '<OUT OF SPEC>' && locator =~ /DIMM/)))
|
|
2077
|
+
megs, units = size.split(' ')
|
|
2078
|
+
|
|
2079
|
+
next if units != 'MB'
|
|
2080
|
+
physical_memory += megs.to_i;
|
|
2081
|
+
end
|
|
2082
|
+
end
|
|
2083
|
+
physical_memory
|
|
2084
|
+
end
|
|
2085
|
+
|
|
2086
|
+
end
|