rbvppc 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +17 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +22 -0
- data/README.md +302 -0
- data/Rakefile +8 -0
- data/documentation/hmc.odt +0 -0
- data/documentation/lpar.odt +0 -0
- data/documentation/lun.odt +0 -0
- data/documentation/network.odt +0 -0
- data/documentation/nim.odt +0 -0
- data/documentation/vio.odt +0 -0
- data/documentation/vnic.odt +0 -0
- data/documentation/vscsi.odt +0 -0
- data/examples/add_disk_any_size_to_lpar.rb +46 -0
- data/examples/add_disk_specifing_size_to_lpar.rb +47 -0
- data/examples/remove_disks_and_delete_lpar.rb +35 -0
- data/examples/remove_disks_from_lpar.rb +33 -0
- data/examples/test_lpar_build.rb +83 -0
- data/lib/rbvppc/command_failure.rb +18 -0
- data/lib/rbvppc/connectable_server.rb +95 -0
- data/lib/rbvppc/hmc.rb +892 -0
- data/lib/rbvppc/lpar.rb +1140 -0
- data/lib/rbvppc/lun.rb +23 -0
- data/lib/rbvppc/network.rb +54 -0
- data/lib/rbvppc/nim.rb +442 -0
- data/lib/rbvppc/version.rb +10 -0
- data/lib/rbvppc/vio.rb +720 -0
- data/lib/rbvppc/vnic.rb +34 -0
- data/lib/rbvppc/vscsi.rb +36 -0
- data/lib/rbvppc.rb +28 -0
- data/rbvppc.gemspec +26 -0
- metadata +117 -0
data/lib/rbvppc/hmc.rb
ADDED
|
@@ -0,0 +1,892 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Authors: Christopher M Wood (<woodc@us.ibm.com>)
|
|
3
|
+
# John F Hutchinson (<jfhutchi@us.ibm.com)
|
|
4
|
+
# © Copyright IBM Corporation 2015.
|
|
5
|
+
#
|
|
6
|
+
# LICENSE: MIT (http://opensource.org/licenses/MIT)
|
|
7
|
+
#
|
|
8
|
+
=begin
|
|
9
|
+
TODO:
|
|
10
|
+
1.go through each method and parse output
|
|
11
|
+
|
|
12
|
+
=end
|
|
13
|
+
require_relative 'connectable_server'
|
|
14
|
+
|
|
15
|
+
class Hmc < ConnectableServer
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
#Execute commands on HMC setting language to US English
|
|
19
|
+
def execute_cmd(command)
|
|
20
|
+
puts "export LANG=en_US.UTF-8;#{command}" if debug
|
|
21
|
+
super "export LANG=en_US.UTF-8;#{command}"
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
#Execute VIOS commands via HMC
|
|
25
|
+
def execute_vios_cmd(frame, vio, command)
|
|
26
|
+
execute_cmd "viosvrcmd -m #{frame} -p #{vio} -c \" #{command} \""
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
#Execute VIOS commands via HMC grepping for a specific item.
|
|
30
|
+
def execute_vios_cmd_grep(frame, vio, command, grep_for)
|
|
31
|
+
execute_cmd "viosvrcmd -m #{frame} -p #{vio} -c \" #{command} \" | grep #{grep_for}"
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
#Get the HMC version
|
|
35
|
+
def get_version
|
|
36
|
+
execute_cmd("lshmc -V | grep 'Version:'|cut -d':' -f2").chomp
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
#Get the HMC Release
|
|
40
|
+
def get_release
|
|
41
|
+
execute_cmd("lshmc -V | grep 'Release:'|cut -d':' -f2").chomp
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
#List the Frames managed by HMC
|
|
45
|
+
def list_frames
|
|
46
|
+
out_array = []
|
|
47
|
+
result = execute_cmd "lssyscfg -r sys -F name"
|
|
48
|
+
result.each_line do |x|
|
|
49
|
+
x.chomp!
|
|
50
|
+
out_array.push(x)
|
|
51
|
+
end
|
|
52
|
+
#Return the output array
|
|
53
|
+
return out_array
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
#List LPARs on a frame
|
|
57
|
+
def list_lpars_on_frame(frame)
|
|
58
|
+
result = execute_cmd "lssyscfg -r prof -m #{frame} -F lpar_name"
|
|
59
|
+
lpar_arr = []
|
|
60
|
+
result.each_line do |line|
|
|
61
|
+
line.chomp!
|
|
62
|
+
lpar_arr.push(line)
|
|
63
|
+
end
|
|
64
|
+
return lpar_arr
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
#List VLANs on a frame
|
|
69
|
+
def list_vlans_on_frame(frame)
|
|
70
|
+
vlans =[]
|
|
71
|
+
result = execute_cmd "lshwres -r virtualio --rsubtype vswitch -m #{frame} -F"
|
|
72
|
+
result.each_line do |line|
|
|
73
|
+
line = line.chomp!
|
|
74
|
+
line = line.delete "\""
|
|
75
|
+
line_arr = line.split(',')
|
|
76
|
+
line_arr.each do |field|
|
|
77
|
+
vlans.push(field.to_str) if field.numeric?
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
return vlans
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
#List VIOS on a frame
|
|
86
|
+
def list_vios_on_frame(frame)
|
|
87
|
+
vios = []
|
|
88
|
+
result = exectue_cmd "lssyscfg -r lpar -m #{frame} -F name,lpar_env | grep vioserver"
|
|
89
|
+
result.each_line do |line|
|
|
90
|
+
line = line.chomp!
|
|
91
|
+
line_arr = line.split(',')
|
|
92
|
+
line_arr.each do |field|
|
|
93
|
+
if field == "vioserver"
|
|
94
|
+
#We don't want to see "vioserver" in the array only the actual vios names
|
|
95
|
+
else
|
|
96
|
+
vios.push(field)
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
return vios
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
#Get Frame info- type,model,serial number
|
|
105
|
+
def get_frame_specs(frame)
|
|
106
|
+
info = execute_cmd("lssyscfg -r sys -m #{frame}")
|
|
107
|
+
attributes = info.chomp.split(",")
|
|
108
|
+
frame_hash = {}
|
|
109
|
+
attributes.each do |line|
|
|
110
|
+
att,val = line.split("=")
|
|
111
|
+
case att
|
|
112
|
+
when "name"
|
|
113
|
+
frame_hash[:name]=val
|
|
114
|
+
when "type_model"
|
|
115
|
+
frame_hash[:type_model]=val
|
|
116
|
+
when "serial_num"
|
|
117
|
+
frame_hash[:serial_num]=val
|
|
118
|
+
end
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
return frame_hash
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
#Get Frame info - CPU/vCPU data
|
|
125
|
+
def get_frame_cpu(frame)
|
|
126
|
+
info = execute_cmd("lshwres -r proc -m #{frame} --level sys")
|
|
127
|
+
attributes = info.chomp.split(",")
|
|
128
|
+
frame_hash = {}
|
|
129
|
+
attributes.each do |line|
|
|
130
|
+
att,val = line.split("=")
|
|
131
|
+
frame_hash[att.to_sym]=val
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
return frame_hash
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
#Get Frame info - Memory Data
|
|
138
|
+
def get_frame_mem(frame)
|
|
139
|
+
info = execute_cmd("lshwres -r mem -m #{frame} --level sys")
|
|
140
|
+
#Remove Substrings that are surrounded by double quotes
|
|
141
|
+
quoted_substrings = info.chomp.match(/"[^"]+"/)
|
|
142
|
+
info.gsub!(/"[^"]+"/, "")
|
|
143
|
+
|
|
144
|
+
#Handle strings that are not quoted
|
|
145
|
+
attributes = info.chomp.split(",")
|
|
146
|
+
frame_hash = {}
|
|
147
|
+
attributes.each do |line|
|
|
148
|
+
att,val = line.split("=")
|
|
149
|
+
frame_hash[att.to_sym]=val
|
|
150
|
+
end
|
|
151
|
+
|
|
152
|
+
#Find used memory by subtracting available from total
|
|
153
|
+
used_mem = frame_hash[:configurable_sys_mem].to_i - frame_hash[:curr_avail_sys_mem].to_i
|
|
154
|
+
frame_hash[:used_mem]=used_mem.to_s
|
|
155
|
+
|
|
156
|
+
#Handle strings that are quoted checking for more than one
|
|
157
|
+
quoted_substrings = quoted_substrings.to_s
|
|
158
|
+
#match on ","
|
|
159
|
+
unless quoted_substrings.match(/\"(.*)\",\"(.*)\"/)
|
|
160
|
+
substring = quoted_substrings.to_s.chomp.split("=")
|
|
161
|
+
substring.each do |line|
|
|
162
|
+
line.gsub!("\"","")
|
|
163
|
+
end
|
|
164
|
+
temp_str = substring[1]
|
|
165
|
+
ratios = temp_str.split(',')
|
|
166
|
+
frame_hash[:"#{substring[0]}"]=ratios
|
|
167
|
+
else
|
|
168
|
+
quoted = quoted_substrings.chomp.split('","')
|
|
169
|
+
quoted.each do |line|
|
|
170
|
+
line.gsub!("\"","")
|
|
171
|
+
att,val = line.split('=')
|
|
172
|
+
ratios = val.split(',')
|
|
173
|
+
frame_hash[att.to_sym]=ratios
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
return frame_hash
|
|
178
|
+
end
|
|
179
|
+
|
|
180
|
+
#Get the Current Profile of an LPAR
|
|
181
|
+
def get_lpar_curr_profile(frame, lpar, filter = "lpar_name")
|
|
182
|
+
curr_prof = execute_cmd "lssyscfg -r lpar -m #{frame} --filter #{filter}s=#{lpar} -F curr_profile"
|
|
183
|
+
return curr_prof.chomp
|
|
184
|
+
end
|
|
185
|
+
|
|
186
|
+
#Get the Default Profile of an LPAR
|
|
187
|
+
def get_lpar_def_profile(frame, lpar, filter = "lpar_name")
|
|
188
|
+
def_prof = execute_cmd "lssyscfg -r lpar -m #{frame} --filter #{filter}s=#{lpar} -F default_profile"
|
|
189
|
+
return def_prof.chomp
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
#Get the general attributes of an lpar by specifying
|
|
193
|
+
#the frame and lpar names as Strings. Returns an options
|
|
194
|
+
#hash representing that LPAR
|
|
195
|
+
def get_lpar_options(frame, lpar, filter = "lpar_name")
|
|
196
|
+
profile_name = get_lpar_curr_profile(frame, lpar, filter)
|
|
197
|
+
info = execute_cmd "lssyscfg -r prof -m \'#{frame}\' --filter profile_names=\'#{profile_name}\',lpar_names=\'#{lpar}\' "
|
|
198
|
+
#"-F name,lpar_name,lpar_id,min_mem,desired_mem,max_mem,proc_mode,min_proc_units," +
|
|
199
|
+
#"desired_proc_units,max_proc_units,min_procs,desired_procs,max_procs,sharing_mode,uncap_weight,max_virtual_slots"
|
|
200
|
+
attributes = info.chomp.split(",")
|
|
201
|
+
lpar_hash = {}
|
|
202
|
+
attributes.each do |line|
|
|
203
|
+
att,val = line.split("=")
|
|
204
|
+
case att
|
|
205
|
+
when "name"
|
|
206
|
+
lpar_hash[:current_profile]=val
|
|
207
|
+
when "lpar_name"
|
|
208
|
+
lpar_hash[:name]=val
|
|
209
|
+
when "lpar_id"
|
|
210
|
+
lpar_hash[:id]=val
|
|
211
|
+
when "min_mem"
|
|
212
|
+
lpar_hash[:min_mem]=val
|
|
213
|
+
when "desired_mem"
|
|
214
|
+
lpar_hash[:des_mem]=val
|
|
215
|
+
when "max_mem"
|
|
216
|
+
lpar_hash[:max_mem]=val
|
|
217
|
+
when "proc_mode"
|
|
218
|
+
lpar_hash[:proc_mode]=val
|
|
219
|
+
when "min_proc_units"
|
|
220
|
+
lpar_hash[:min_proc]=val
|
|
221
|
+
when "desired_proc_units"
|
|
222
|
+
lpar_hash[:des_proc]=val
|
|
223
|
+
when "max_proc_units"
|
|
224
|
+
lpar_hash[:max_proc]=val
|
|
225
|
+
when "min_procs"
|
|
226
|
+
lpar_hash[:min_vcpu]=val
|
|
227
|
+
when "desired_procs"
|
|
228
|
+
lpar_hash[:des_vcpu]=val
|
|
229
|
+
when "max_procs"
|
|
230
|
+
lpar_hash[:max_vcpu]=val
|
|
231
|
+
when "sharing_mode"
|
|
232
|
+
lpar_hash[:sharing_mode]=val
|
|
233
|
+
when "uncap_weight"
|
|
234
|
+
lpar_hash[:uncap_weight]=val
|
|
235
|
+
when "max_virtual_slots"
|
|
236
|
+
lpar_hash[:max_virt_slots]=val
|
|
237
|
+
end
|
|
238
|
+
end
|
|
239
|
+
lpar_hash[:hmc]=self
|
|
240
|
+
lpar_hash[:frame]=frame
|
|
241
|
+
|
|
242
|
+
return lpar_hash
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
#Reboot the HMC
|
|
246
|
+
def reboot_hmc
|
|
247
|
+
execute_cmd "hmcshutdown -t now -r"
|
|
248
|
+
end
|
|
249
|
+
|
|
250
|
+
#Show status of lpars on frame (Power 5/6/7)
|
|
251
|
+
#Sample output
|
|
252
|
+
#dwin004:Running
|
|
253
|
+
#rslpl004:Running
|
|
254
|
+
def list_status_of_lpars(frame = nil)
|
|
255
|
+
if frame.nil?
|
|
256
|
+
#return lpars on all frames?
|
|
257
|
+
else
|
|
258
|
+
execute_cmd "lssyscfg -m #{frame} -r lpar -F name:state"
|
|
259
|
+
end
|
|
260
|
+
end
|
|
261
|
+
|
|
262
|
+
#Overview DLPAR Status
|
|
263
|
+
def view_dlpar_status
|
|
264
|
+
execute_cmd "lspartition -dlpar"
|
|
265
|
+
end
|
|
266
|
+
|
|
267
|
+
#Show available filesystem space on the hmc
|
|
268
|
+
def view_hmc_filesystem_space
|
|
269
|
+
execute_cmd "monhmc -r disk -n 0"
|
|
270
|
+
end
|
|
271
|
+
|
|
272
|
+
#Netboot an lpar
|
|
273
|
+
def lpar_net_boot(nim_ip, lpar_ip, gateway, subnetmask, lpar)
|
|
274
|
+
result = execute_cmd("lpar_netboot -t ent -D -s auto -d auto -A -f -S #{nim_ip} " +
|
|
275
|
+
"-C #{lpar_ip} -G #{gateway} -K #{subnetmask} \"#{lpar.name}\" " +
|
|
276
|
+
"\"#{lpar.current_profile}\" \"#{lpar.frame}\" ")
|
|
277
|
+
result = result.each_line do |line|
|
|
278
|
+
line.chomp!
|
|
279
|
+
line.match(/Network boot proceeding/) do |m|
|
|
280
|
+
return true
|
|
281
|
+
end
|
|
282
|
+
end
|
|
283
|
+
return false
|
|
284
|
+
end
|
|
285
|
+
|
|
286
|
+
#Validate connection to hmc is established
|
|
287
|
+
def is_connected?
|
|
288
|
+
version = get_version
|
|
289
|
+
if version.nil?
|
|
290
|
+
return false
|
|
291
|
+
else
|
|
292
|
+
return true
|
|
293
|
+
end
|
|
294
|
+
end
|
|
295
|
+
|
|
296
|
+
# Cobalt: function to find out more information (CPU and Memory) about a frame
|
|
297
|
+
def get_frame_info frame_id, field
|
|
298
|
+
res = {}
|
|
299
|
+
output = execute_cmd("lshwres -m #{frame_id} -r #{field} --level sys")
|
|
300
|
+
lines = output.nil? ? [] : output.split(",")
|
|
301
|
+
lines.each {|line| key,val=line.split("="); res[key]=val unless key.nil? }
|
|
302
|
+
end
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
#########################################################################
|
|
311
|
+
# Depracated functions
|
|
312
|
+
#########################################################################
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
#Create an LPAR
|
|
316
|
+
def create_lpar(hash)
|
|
317
|
+
# frame,name,profile_name,max_virtual_slots,desired_mem,min_mem,max_mem,desired_procs,min_procs,max_procs,proc_mode,sharing_mode,desired_proc_units,max_proc_units,min_proc_units,uncap_weight)
|
|
318
|
+
execute_cmd "mksyscfg -r lpar -m #{hash[:frame]} -i name=#{hash[:name]}, profile_name=#{hash[:profile_name]},boot_mode=norm," +
|
|
319
|
+
"auto_start=0,lpar_env=aixlinux,max_virtual_slots=#{hash[:max_virtual_slots]},desired_mem=#{hash[:desired_mem]}," +
|
|
320
|
+
"min_mem=#{hash[:min_mem]},max_mem=#{hash[:max_mem]},desired_procs=#{hash[:desired_procs]},min_procs=#{hash[:min_procs]}," +
|
|
321
|
+
"max_procs=#{hash[:max_procs]},proc_mode=#{hash[:proc_mode]},sharing_mode=#{hash[:sharing_mode]},desired_proc_units=#{hash[:desired_proc_units]}," +
|
|
322
|
+
"max_proc_units=#{hash[:max_proc_units]},min_proc_units=#{hash[:min_proc_units]},uncap_weight=#{hash[:uncap_weight]}"
|
|
323
|
+
end
|
|
324
|
+
|
|
325
|
+
#Delete an LPAR
|
|
326
|
+
def delete_lpar(frame,name)
|
|
327
|
+
execute_cmd "rmsyscfg -r lpar -m #{frame} -n #{name}"
|
|
328
|
+
end
|
|
329
|
+
|
|
330
|
+
#Rename an LPAR
|
|
331
|
+
def rename_lpar(frame, oldname, newname)
|
|
332
|
+
execute_cmd "chsyscfg -r lpar -m #{frame} -i \'name=#{oldname},new_name=#{newname}\'"
|
|
333
|
+
end
|
|
334
|
+
|
|
335
|
+
#Active an LPAR using a profile
|
|
336
|
+
def activate_lpar(frame,name,profile_name)
|
|
337
|
+
execute_cmd "chsysstate -r lpar -m #{frame} -o on -n #{name} -f #{profile_name}"
|
|
338
|
+
end
|
|
339
|
+
|
|
340
|
+
#Hard shutdown LPAR
|
|
341
|
+
def hard_shutdown_lpar(frame,name)
|
|
342
|
+
execute_cmd "chsysstate -r lpar -m #{frame} -o shutdown --immed -n #{name}"
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
#Soft shutdown an LPAR
|
|
346
|
+
def soft_shutdown_lpar(frame, lpar)
|
|
347
|
+
execute_cmd "chsysstate -r lpar -m #{frame} -o shutdown -n #{lpar}"
|
|
348
|
+
end
|
|
349
|
+
|
|
350
|
+
#Get LPAR state
|
|
351
|
+
def check_lpar_state(frame, lpar)
|
|
352
|
+
execute_cmd("lssyscfg -r lpar -m #{frame} --filter lpar_names=#{lpar} -F state").chomp
|
|
353
|
+
end
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
#Get the MAC address of an LPAR
|
|
357
|
+
def get_mac_address(frame, client_lpar)
|
|
358
|
+
result = execute_cmd "lshwres -r virtualio --rsubtype eth --level lpar -m #{frame} -F mac_addr --filter \"lpar_names=#{client_lpar}\" "
|
|
359
|
+
return result.chomp
|
|
360
|
+
end
|
|
361
|
+
|
|
362
|
+
def clean_vadapter_string(vadapter_string)
|
|
363
|
+
if vadapter_string.chomp == "none"
|
|
364
|
+
vadapter_string = ""
|
|
365
|
+
end
|
|
366
|
+
|
|
367
|
+
if vadapter_string.start_with?('"')
|
|
368
|
+
vadapter_string = vadapter_string[1..-1]
|
|
369
|
+
end
|
|
370
|
+
|
|
371
|
+
if vadapter_string.end_with?('"')
|
|
372
|
+
vadapter_string = vadapter_string[0..-2]
|
|
373
|
+
end
|
|
374
|
+
|
|
375
|
+
return vadapter_string
|
|
376
|
+
end
|
|
377
|
+
|
|
378
|
+
def parse_vnic_syntax(vnic_string)
|
|
379
|
+
|
|
380
|
+
return parse_slash_delim_string(vnic_string,
|
|
381
|
+
[:virtual_slot_num, :is_ieee, :port_vlan_id, :additional_vlan_ids, :is_trunk, :is_required]) if !vnic_string.empty?
|
|
382
|
+
|
|
383
|
+
=begin
|
|
384
|
+
vnic_attributes = vnic_string.split(/\//)
|
|
385
|
+
slot_num = vnic_attributes[0]
|
|
386
|
+
is_ieee = vnic_attributes[1]
|
|
387
|
+
port_vlan_id = vnic_attributes[2]
|
|
388
|
+
additional_vlan_ids = vnic_attributes[3]
|
|
389
|
+
is_trunk = vnic_attributes[4]
|
|
390
|
+
is_required = vnic_attributes[5]
|
|
391
|
+
|
|
392
|
+
return { :virtual_slot_num => slot_num,
|
|
393
|
+
:is_ieee => is_ieee,
|
|
394
|
+
:port_vlan_id => port_vlan_id,
|
|
395
|
+
:additional_vlan_ids => additional_vlan_ids,
|
|
396
|
+
:is_trunk => is_trunk,
|
|
397
|
+
:is_required => is_required
|
|
398
|
+
}
|
|
399
|
+
=end
|
|
400
|
+
end
|
|
401
|
+
|
|
402
|
+
def defeat_rich_shomo(string)
|
|
403
|
+
if (string == "I have never seen the movie Aliens") then
|
|
404
|
+
return "Rich Defeated"
|
|
405
|
+
end
|
|
406
|
+
end
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
def parse_vscsi_syntax(vscsi_string)
|
|
410
|
+
|
|
411
|
+
return parse_slash_delim_string(vscsi_string,
|
|
412
|
+
[:virtual_slot_num, :client_or_server, :remote_lpar_id, :remote_lpar_name, :remote_slot_num, :is_required]) if !vscsi_string.empty?
|
|
413
|
+
|
|
414
|
+
=begin
|
|
415
|
+
vscsi_attributes = vscsi_string.split(/\//)
|
|
416
|
+
virtual_slot_num = vscsi_attributes[0]
|
|
417
|
+
client_or_server = vscsi_attributes[1]
|
|
418
|
+
remote_lpar_id = vscsi_attributes[2]
|
|
419
|
+
remote_lpar_name = vscsi_attributes[3]
|
|
420
|
+
remote_slot_num = vscsi_attributes[4]
|
|
421
|
+
is_required = vscsi_attributes[5]
|
|
422
|
+
|
|
423
|
+
return { :virtual_slot_num => virtual_slot_num,
|
|
424
|
+
:client_or_server => client_or_server,
|
|
425
|
+
:remote_lpar_id => remote_lpar_id,
|
|
426
|
+
:remote_lpar_name => remote_lpar_name,
|
|
427
|
+
:remote_slot_num => remote_slot_num,
|
|
428
|
+
:is_required => is_required
|
|
429
|
+
}
|
|
430
|
+
=end
|
|
431
|
+
end
|
|
432
|
+
|
|
433
|
+
def parse_vserial_syntax(vserial_string)
|
|
434
|
+
|
|
435
|
+
return parse_slash_delim_string(vserial_string,
|
|
436
|
+
[:virtual_slot_num, :client_or_server, :supports_hmc, :remote_lpar_id, :remote_lpar_name, :remote_slot_num, :is_required]) if !vserial_string.empty?
|
|
437
|
+
end
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
def parse_slash_delim_string(slash_string, field_specs)
|
|
441
|
+
# slash_string = "596/1/596//0/1"
|
|
442
|
+
# field_specs = [:virtual_slot_num, :client_or_server, :remote_lpar_id...]
|
|
443
|
+
values = slash_string.split(/\//)
|
|
444
|
+
result = {}
|
|
445
|
+
field_specs.each_index do |i|
|
|
446
|
+
result[field_specs[i]] = values[i]
|
|
447
|
+
end
|
|
448
|
+
return result
|
|
449
|
+
end
|
|
450
|
+
|
|
451
|
+
#Remove a Virtual SCSI Host Adapter
|
|
452
|
+
def remove_vhost(frame, vio, vhost)
|
|
453
|
+
command = "rmdev -dev #{vhost}"
|
|
454
|
+
execute_vios_cmd(frame, vio, command)
|
|
455
|
+
end
|
|
456
|
+
|
|
457
|
+
#Recursively remove a Virtual SCSI Host Adapter
|
|
458
|
+
def recursive_remove_vhost(frame, vio, vhost)
|
|
459
|
+
command = "rmdev -dev #{vhost} -recursive"
|
|
460
|
+
execute_vios_cmd(frame, vio, command)
|
|
461
|
+
end
|
|
462
|
+
|
|
463
|
+
#Assign Disk/Logical Volume to a vSCSI Host Adapter
|
|
464
|
+
def assign_disk_vhost(frame, vio, disk, vtd, vhost)
|
|
465
|
+
command = "mkvdev -vdev #{disk.name} -dev #{vtd} -vadapter #{vhost}"
|
|
466
|
+
execute_vios_cmd(frame, vio, command)
|
|
467
|
+
end
|
|
468
|
+
|
|
469
|
+
#Remove Disk/Logical Volume from vSCSI Host Adapter
|
|
470
|
+
def remove_vtd_from_vhost(frame, vio, vtd)
|
|
471
|
+
command = "rmvdev -vtd #{vtd}"
|
|
472
|
+
execute_vios_cmd(frame, vio, command)
|
|
473
|
+
end
|
|
474
|
+
|
|
475
|
+
#Remove physical disk from vhost adapter
|
|
476
|
+
def remove_disk_from_vhost(frame,vio,diskname)
|
|
477
|
+
command = "rmvdev -vdev #{diskname}"
|
|
478
|
+
execute_vios_cmd(frame, vio, command)
|
|
479
|
+
end
|
|
480
|
+
|
|
481
|
+
#List Shared Ethernet Adapters on VIOS
|
|
482
|
+
def list_shared_eth_adapters(frame,vio)
|
|
483
|
+
command = "lsmap -all -net"
|
|
484
|
+
execute_vios_cmd(frame, vio, command)
|
|
485
|
+
end
|
|
486
|
+
|
|
487
|
+
#Get VIOS version
|
|
488
|
+
def get_vio_version(frame, vio)
|
|
489
|
+
command = "ioslevel"
|
|
490
|
+
execute_vios_cmd(frame, vio, command)
|
|
491
|
+
end
|
|
492
|
+
|
|
493
|
+
#Reboot VIOS
|
|
494
|
+
def reboot_vio(frame,vio)
|
|
495
|
+
command ="shutdown -restart"
|
|
496
|
+
execute_vios_cmd(frame,vio,command)
|
|
497
|
+
end
|
|
498
|
+
|
|
499
|
+
#List unmapped disks on VIOS
|
|
500
|
+
# lspv -free doesn't include disks that have been mapped before and contain a 'VGID'
|
|
501
|
+
def list_available_disks(frame,vio )
|
|
502
|
+
command = "lspv -avail -fmt : -field name pvid size"
|
|
503
|
+
all_disk_output = execute_vios_cmd(frame, vio, command)
|
|
504
|
+
mapped_disks = list_all_mapped_disks(frame,vio)
|
|
505
|
+
unmapped_disks = []
|
|
506
|
+
all_disk_output.each_line do |line|
|
|
507
|
+
line.chomp!
|
|
508
|
+
disk_name,disk_pvid,disk_size = line.split(/:/)
|
|
509
|
+
if !mapped_disks.include?(disk_name)
|
|
510
|
+
unmapped_disks.push(Lun.new(disk_name,disk_pvid,disk_size))
|
|
511
|
+
end
|
|
512
|
+
end
|
|
513
|
+
|
|
514
|
+
return unmapped_disks
|
|
515
|
+
end
|
|
516
|
+
|
|
517
|
+
#List all Disk Mappings
|
|
518
|
+
def list_all_mapped_disks(frame, vio)
|
|
519
|
+
command = "lsmap -all -type disk"
|
|
520
|
+
result = execute_vios_cmd_grep(frame, vio, command, "Backing")
|
|
521
|
+
mapped_disks = []
|
|
522
|
+
result.each_line do |line|
|
|
523
|
+
line.chomp!
|
|
524
|
+
line_elements=line.split(/[[:blank:]]+/)
|
|
525
|
+
#3rd element should be disk name, since first is 'Backing' and
|
|
526
|
+
#the second is 'device'
|
|
527
|
+
disk_name = line_elements[2]
|
|
528
|
+
mapped_disks.push(disk_name) if !mapped_disks.include?(disk_name)
|
|
529
|
+
end
|
|
530
|
+
return mapped_disks
|
|
531
|
+
end
|
|
532
|
+
|
|
533
|
+
def select_any_avail_disk(frame, vio1, vio2)
|
|
534
|
+
primary_vio_disks = list_available_disks(frame,vio1)
|
|
535
|
+
secondary_vio_disks = list_available_disks(frame,vio2)
|
|
536
|
+
|
|
537
|
+
return {} if primary_vio_disks.empty? or secondary_vio_disks.empty?
|
|
538
|
+
|
|
539
|
+
vio1_lun = primary_vio_disks[0]
|
|
540
|
+
vio2_lun = nil
|
|
541
|
+
secondary_vio_disks.each do |lun|
|
|
542
|
+
if vio1_lun == lun
|
|
543
|
+
vio2_lun = lun
|
|
544
|
+
break
|
|
545
|
+
end
|
|
546
|
+
end
|
|
547
|
+
|
|
548
|
+
if vio2_lun.nil?
|
|
549
|
+
raise StandardError.new("LUN with PVID #{vio1_lun.pvid} not found on #{vio2}")
|
|
550
|
+
end
|
|
551
|
+
# return [vio1_disk_name, vio2_disk_name]
|
|
552
|
+
# return [vio1_lun, vio2_lun]
|
|
553
|
+
return {:on_vio1 => vio1_lun, :on_vio2 => vio2_lun}
|
|
554
|
+
end
|
|
555
|
+
|
|
556
|
+
|
|
557
|
+
#Find vhost to use when given the vSCSI adapter slot it occupies
|
|
558
|
+
def find_vhost_given_virtual_slot(frame, vio, server_slot)
|
|
559
|
+
command = "lsmap -all"
|
|
560
|
+
|
|
561
|
+
#Execute an lsmap and grep for the line that contains the vhost
|
|
562
|
+
#by finding the line that contains the physical adapter location.
|
|
563
|
+
#This will definitely contain 'V#-C<slot number>' in it's name.
|
|
564
|
+
result = execute_vios_cmd_grep(frame,vio,command,"V.-C#{server_slot}")
|
|
565
|
+
raise StandardError.new("Unable to find vhost on #{vio} for vSCSI adapter in slot #{server_slot}") if result.nil?
|
|
566
|
+
|
|
567
|
+
#Split the result on whitespace to get the columns
|
|
568
|
+
#vhost, physical location, client LPAR ID (in hex)
|
|
569
|
+
mapping_cols = result.split(/[[:blank:]]+/)
|
|
570
|
+
|
|
571
|
+
#The name of the vhost will be in the first column of the command output
|
|
572
|
+
return mapping_cols[0]
|
|
573
|
+
end
|
|
574
|
+
|
|
575
|
+
#Get a list of all disknames attached to a vhost
|
|
576
|
+
def get_attached_disknames(frame,vio,vhost)
|
|
577
|
+
cmd = "lsmap -vadapter #{vhost} -field backing -fmt :"
|
|
578
|
+
diskname_output = execute_vios_cmd(frame,vio,cmd).chomp
|
|
579
|
+
|
|
580
|
+
return diskname_output.split(/:/)
|
|
581
|
+
end
|
|
582
|
+
|
|
583
|
+
#Removes all disks/vhosts/vSCSIs from a client LPAR and it's VIOs
|
|
584
|
+
def remove_all_disks_from_lpar(frame,lpar)
|
|
585
|
+
#Get all vscsi adapters on lpar
|
|
586
|
+
vscsi_adapters = get_vscsi_adapters(frame,lpar)
|
|
587
|
+
|
|
588
|
+
vscsi_adapters.each do |adapter|
|
|
589
|
+
#Parse the adapter syntax into a hash
|
|
590
|
+
adapter_hash = parse_vscsi_syntax(adapter)
|
|
591
|
+
#Find the adapter slot on the VIO that this occupies
|
|
592
|
+
server_slot = adapter_hash[:remote_slot_num]
|
|
593
|
+
#Find the name of the VIO that this attaches to
|
|
594
|
+
vio_name = adapter_hash[:remote_lpar_name]
|
|
595
|
+
#Find the vhost that this represents, given the adapter slot
|
|
596
|
+
vhost = find_vhost_given_virtual_slot(frame,vio_name,server_slot)
|
|
597
|
+
#Find the list of disknames that are attached to this vhost
|
|
598
|
+
disknames = get_attached_disknames(frame,vio_name,vhost)
|
|
599
|
+
disknames.each do |hdisk|
|
|
600
|
+
#Remove each disk from the vhost it's assigned to
|
|
601
|
+
remove_disk_from_vhost(frame,vio_name,hdisk)
|
|
602
|
+
end
|
|
603
|
+
#Remove the vhost itself
|
|
604
|
+
remove_vhost(frame,vio_name,vhost)
|
|
605
|
+
#After all disks and the vhost are removed,
|
|
606
|
+
#remove the vSCSI adapter from both the VIO and the client LPAR
|
|
607
|
+
remove_vscsi(frame,lpar,vio_name,adapter)
|
|
608
|
+
end
|
|
609
|
+
end
|
|
610
|
+
|
|
611
|
+
#Remove vSCSI from LPAR
|
|
612
|
+
#Handles removing from the LPAR profiles as well as DLPAR
|
|
613
|
+
#Last parameter is optional and if it isn't specified
|
|
614
|
+
#then it looks for an adapter on lpar that is attached to server_lpar
|
|
615
|
+
#and removes that from the profile/hardware of both the client
|
|
616
|
+
#and server
|
|
617
|
+
def remove_vscsi(frame,lpar,server_lpar,adapter_details=nil)
|
|
618
|
+
if adapter_details.nil?
|
|
619
|
+
adapters = get_vscsi_adapters(frame,lpar)
|
|
620
|
+
adapters.each do |adapter|
|
|
621
|
+
adapter_details = adapter if adapter.include?(server_lpar)
|
|
622
|
+
end
|
|
623
|
+
end
|
|
624
|
+
|
|
625
|
+
#Parse the adapter details into a hash
|
|
626
|
+
adapter_hash = parse_vscsi_syntax(adapter_details)
|
|
627
|
+
|
|
628
|
+
#Remove this vSCSI from the lpar and server lpar profiles
|
|
629
|
+
remove_vscsi_from_profile(frame,lpar,server_lpar,adapter_hash)
|
|
630
|
+
|
|
631
|
+
#Remove this vSCSI from the actual hardware of lpar and server lpar
|
|
632
|
+
remove_vscsi_dlpar(frame,lpar,server_lpar,adapter_hash)
|
|
633
|
+
|
|
634
|
+
end
|
|
635
|
+
|
|
636
|
+
#Remove vSCSI from the LPAR profiles only
|
|
637
|
+
def remove_vscsi_from_profile(frame,lpar,server_lpar,vscsi_hash)
|
|
638
|
+
lpar_profile = get_lpar_curr_profile(frame,lpar)
|
|
639
|
+
remote_lpar_profile = get_lpar_curr_profile(frame,server_lpar)
|
|
640
|
+
client_lpar_id = get_lpar_id(frame,lpar)
|
|
641
|
+
|
|
642
|
+
#TODO: Add checking of vscsi_hash to make sure it's populated
|
|
643
|
+
# the way it's expected to be
|
|
644
|
+
|
|
645
|
+
client_slot = vscsi_hash[:virtual_slot_num]
|
|
646
|
+
server_lpar_id = vscsi_hash[:remote_lpar_id]
|
|
647
|
+
if server_lpar != vscsi_hash[:remote_lpar_name]
|
|
648
|
+
#server_lpar and the LPAR cited in the
|
|
649
|
+
#vscsi hash aren't the same...
|
|
650
|
+
#error out or do something else here...?
|
|
651
|
+
end
|
|
652
|
+
server_slot = vscsi_hash[:remote_slot_num]
|
|
653
|
+
is_req = vscsi_hash[:is_required]
|
|
654
|
+
|
|
655
|
+
#Modify client LPAR's profile to no longer include the adapter
|
|
656
|
+
#whose details occupy the vscsi_hash
|
|
657
|
+
execute_cmd("chsyscfg -r prof -m #{frame} -i \"name=#{lpar_profile},lpar_name=#{lpar}," +
|
|
658
|
+
"virtual_scsi_adapters-=#{client_slot}/client/#{server_lpar_id}/#{server_lpar}/#{server_slot}/#{is_req}\" ")
|
|
659
|
+
|
|
660
|
+
#Modify the server LPAR's profile to no longer include the client
|
|
661
|
+
execute_cmd("chsyscfg -r prof -m #{frame} -i \"name=#{remote_lpar_profile},lpar_name=#{server_lpar}," +
|
|
662
|
+
"virtual_scsi_adapters-=#{server_slot}/server/#{client_lpar_id}/#{lpar}/#{client_slot}/#{is_req}\" ")
|
|
663
|
+
|
|
664
|
+
end
|
|
665
|
+
|
|
666
|
+
#Remove vSCSI from LPARs via DLPAR
|
|
667
|
+
def remove_vscsi_dlpar(frame,lpar,server_lpar,vscsi_hash)
|
|
668
|
+
|
|
669
|
+
client_slot = vscsi_hash[:virtual_slot_num]
|
|
670
|
+
server_slot = vscsi_hash[:remote_slot_num]
|
|
671
|
+
|
|
672
|
+
#If the client LPAR is running, we have to do DLPAR on it.
|
|
673
|
+
#if check_lpar_state(frame,lpar) == "Running"
|
|
674
|
+
#execute_cmd("chhwres -r virtualio -m #{frame} -p #{lpar} -o r --rsubtype scsi -s #{client_slot}")
|
|
675
|
+
#-a \"adapter_type=client,remote_lpar_name=#{server_lpar},remote_slot_num=#{server_slot}\" ")
|
|
676
|
+
#end
|
|
677
|
+
|
|
678
|
+
#If the server LPAR is running, we have to do DLPAR on it.
|
|
679
|
+
if check_lpar_state(frame,server_lpar) == "Running"
|
|
680
|
+
execute_cmd("chhwres -r virtualio -m #{frame} -p #{server_lpar} -o r --rsubtype scsi -s #{server_slot}")
|
|
681
|
+
#-a \"adapter_type=server,remote_lpar_name=#{lpar},remote_slot_num=#{client_slot}\" ")
|
|
682
|
+
end
|
|
683
|
+
end
|
|
684
|
+
|
|
685
|
+
#Add vSCSI to LPAR
|
|
686
|
+
#Handles adding to profile and via DLPAR
|
|
687
|
+
def add_vscsi(frame,lpar,server_lpar)
|
|
688
|
+
#Add vscsi to client and server LPAR profiles
|
|
689
|
+
#Save the adapter slots used
|
|
690
|
+
client_slot, server_slot = add_vscsi_to_profile(frame, lpar, server_lpar)
|
|
691
|
+
|
|
692
|
+
#Run DLPAR commands against LPARs themselves (if necessary)
|
|
693
|
+
add_vscsi_dlpar(frame, lpar, server_lpar, client_slot, server_slot)
|
|
694
|
+
|
|
695
|
+
return [client_slot, server_slot]
|
|
696
|
+
end
|
|
697
|
+
|
|
698
|
+
#Add vSCSI adapter to LPAR profile
|
|
699
|
+
def add_vscsi_to_profile(frame,lpar,server_lpar)
|
|
700
|
+
virtual_slot_num = get_next_slot(frame,lpar)
|
|
701
|
+
remote_slot_num = get_next_slot(frame,server_lpar)
|
|
702
|
+
lpar_profile = get_lpar_curr_profile(frame,lpar)
|
|
703
|
+
remote_lpar_profile = get_lpar_curr_profile(frame,server_lpar)
|
|
704
|
+
|
|
705
|
+
raise StandardError.new("No available virtual adapter slots on client LPAR #{lpar}") if virtual_slot_num.nil?
|
|
706
|
+
raise StandardError.new("No available virtual adapter slots on server LPAR #{server_lpar}") if remote_slot_num.nil?
|
|
707
|
+
|
|
708
|
+
#Modify client LPAR's profile
|
|
709
|
+
execute_cmd("chsyscfg -r prof -m #{frame} -i \"name=#{lpar_profile},lpar_name=#{lpar},virtual_scsi_adapters+=#{virtual_slot_num}/client//#{server_lpar}/#{remote_slot_num}/0\" ")
|
|
710
|
+
#Modify server LPAR's profile
|
|
711
|
+
execute_cmd("chsyscfg -r prof -m #{frame} -i \"name=#{remote_lpar_profile},lpar_name=#{server_lpar},virtual_scsi_adapters+=#{remote_slot_num}/server//#{lpar}/#{virtual_slot_num}/0\" ")
|
|
712
|
+
|
|
713
|
+
#chsyscfg -r prof -m "FrameName" -i "name=ClientLPAR_prof,lpar_name=ClientLPAR,virtual_scsi_adapters+=4/client//ServerLPAR/11/0"
|
|
714
|
+
#chsyscfg -r prof -m "FrameName" -i "name=ServerLPAR_PROFILE,lpar_name=ServerLPAR,virtual_scsi_adapters+=11/server//ClientLPAR/4/0"
|
|
715
|
+
#Return the client slot and server slot used in the LPAR profiles
|
|
716
|
+
return [virtual_slot_num, remote_slot_num]
|
|
717
|
+
end
|
|
718
|
+
|
|
719
|
+
#Add vSCSI adapter via DLPAR command
|
|
720
|
+
def add_vscsi_dlpar(frame,lpar,server_lpar,client_slot_to_use = nil, server_slot_to_use = nil)
|
|
721
|
+
if client_slot_to_use.nil? and server_slot_to_use.nil?
|
|
722
|
+
client_slot_to_use = get_next_slot(frame,lpar)
|
|
723
|
+
server_slot_to_use = get_next_slot(frame,server_lpar)
|
|
724
|
+
end
|
|
725
|
+
|
|
726
|
+
#If the client LPAR is running, we have to do DLPAR on it.
|
|
727
|
+
if check_lpar_state(frame,lpar) == "Running"
|
|
728
|
+
execute_cmd("chhwres -r virtualio -m #{frame} -p #{lpar} -o a --rsubtype scsi -s #{client_slot_to_use} -a \"adapter_type=client,remote_lpar_name=#{server_lpar},remote_slot_num=#{server_slot_to_use}\" ")
|
|
729
|
+
end
|
|
730
|
+
|
|
731
|
+
#If the server LPAR is running, we have to do DLPAR on it.
|
|
732
|
+
if check_lpar_state(frame,server_lpar) == "Running"
|
|
733
|
+
execute_cmd("chhwres -r virtualio -m #{frame} -p #{server_lpar} -o a --rsubtype scsi -s #{server_slot_to_use} -a \"adapter_type=server,remote_lpar_name=#{lpar},remote_slot_num=#{client_slot_to_use}\" ")
|
|
734
|
+
end
|
|
735
|
+
|
|
736
|
+
#chhwres -r virtualio -m "FrameName" -p VioName -o a --rsubtype scsi -s 11 -a "adapter_type=server,remote_lpar_name=ClientLPAR,remote_slot_num=5"
|
|
737
|
+
end
|
|
738
|
+
|
|
739
|
+
#Show all I/O adapters on the frame
|
|
740
|
+
# Doesn't work malformed command
|
|
741
|
+
def list_all_io_adapters(frame)
|
|
742
|
+
execute_cmd "lshwres -r io -m #{frame} --rsubtype slot --filter -F lpar_name:drc_name:description"
|
|
743
|
+
end
|
|
744
|
+
|
|
745
|
+
#Show I/O adapters on a specific LPAR
|
|
746
|
+
# No results found when testing in dublin lab
|
|
747
|
+
def list_io_adapters_on_lpar(frame, lpar)
|
|
748
|
+
execute_cmd "lshwres -r io -m #{frame} --rsubtype slot -F lpar_name:description --filter \"lpar_names=#{lpar}\""
|
|
749
|
+
end
|
|
750
|
+
|
|
751
|
+
#Get the ID of an LPAR
|
|
752
|
+
def get_lpar_id(frame, lpar)
|
|
753
|
+
lpar_id = execute_cmd "lssyscfg -r lpar -m #{frame} --filter lpar_names=#{lpar} -F lpar_id"
|
|
754
|
+
return lpar_id.chomp
|
|
755
|
+
end
|
|
756
|
+
|
|
757
|
+
#Set the processing units for an lpar
|
|
758
|
+
def set_lpar_proc_units(frame, lpar, units)
|
|
759
|
+
execute_cmd "chhwres -r proc -m #{frame} -o a -p #{lpar} --procunits #{units} "
|
|
760
|
+
end
|
|
761
|
+
|
|
762
|
+
#Returns array of output with vSCSI adapter information
|
|
763
|
+
#about the client LPAR
|
|
764
|
+
def get_vscsi_adapters(frame, lpar)
|
|
765
|
+
#Get this LPAR's profile name
|
|
766
|
+
lpar_prof = get_lpar_curr_profile(frame,lpar)
|
|
767
|
+
|
|
768
|
+
#Get vSCSI adapter info from this LPAR's profile
|
|
769
|
+
scsi_adapter_output = clean_vadapter_string(execute_cmd("lssyscfg -r prof -m #{frame} --filter 'lpar_names=#{lpar},profile_names=#{lpar_prof}' -F virtual_scsi_adapters").chomp)
|
|
770
|
+
|
|
771
|
+
if scsi_adapter_output.include?(",")
|
|
772
|
+
scsi_adapters = scsi_adapter_output.split(/,/)
|
|
773
|
+
else
|
|
774
|
+
scsi_adapters = [scsi_adapter_output]
|
|
775
|
+
end
|
|
776
|
+
return scsi_adapters
|
|
777
|
+
end
|
|
778
|
+
|
|
779
|
+
#Returns 30 when test in dublin lab on frame: rslppc03 lpar:dwin004
|
|
780
|
+
def get_max_virtual_slots(frame, lpar)
|
|
781
|
+
#max_slots = execute_cmd "lshwres --level lpar -r virtualio --rsubtype slot -m #{frame} --filter lpar_names=#{lpar} -F curr_max_virtual_slots"
|
|
782
|
+
lpar_prof = get_lpar_curr_profile(frame,lpar)
|
|
783
|
+
max_slots = execute_cmd "lssyscfg -r prof -m '#{frame}' --filter 'lpar_names=#{lpar},profile_names=#{lpar_prof}' -F max_virtual_slots"
|
|
784
|
+
return max_slots.chomp.to_i
|
|
785
|
+
end
|
|
786
|
+
|
|
787
|
+
#Return array of used virtual adapter slots
|
|
788
|
+
#for an LPAR
|
|
789
|
+
def get_used_virtual_slots(frame, lpar)
|
|
790
|
+
#scsi_slot_output = execute_cmd "lshwres -r virtualio --rsubtype scsi -m #{frame} --level lpar --filter lpar_names=#{lpar} -F slot_num"
|
|
791
|
+
#eth_slot_output = execute_cmd "lshwres -r virtualio --rsubtype eth -m #{frame} --level lpar --filter lpar_names=#{lpar} -F slot_num"
|
|
792
|
+
#serial_slot_output = execute_cmd "lshwres -r virtualio --rsubtype serial -m #{frame} --level lpar --filter lpar_names=#{lpar} -F slot_num"
|
|
793
|
+
lpar_prof = get_lpar_curr_profile(frame,lpar)
|
|
794
|
+
|
|
795
|
+
scsi_slot_output = clean_vadapter_string(execute_cmd "lssyscfg -r prof -m '#{frame}' --filter 'lpar_names=#{lpar},profile_names=#{lpar_prof}' -F virtual_scsi_adapters")
|
|
796
|
+
serial_slot_output = clean_vadapter_string(execute_cmd "lssyscfg -r prof -m '#{frame}' --filter 'lpar_names=#{lpar},profile_names=#{lpar_prof}' -F virtual_serial_adapters")
|
|
797
|
+
eth_slot_output = clean_vadapter_string(execute_cmd "lssyscfg -r prof -m '#{frame}' --filter 'lpar_names=#{lpar},profile_names=#{lpar_prof}' -F virtual_eth_adapters")
|
|
798
|
+
used_slots = []
|
|
799
|
+
|
|
800
|
+
if scsi_slot_output.include?(",")
|
|
801
|
+
scsi_slots = scsi_slot_output.split(/,/)
|
|
802
|
+
else
|
|
803
|
+
scsi_slots = [scsi_slot_output]
|
|
804
|
+
end
|
|
805
|
+
|
|
806
|
+
if serial_slot_output.include?(",")
|
|
807
|
+
serial_slots = serial_slot_output.split(/,/)
|
|
808
|
+
else
|
|
809
|
+
serial_slots = [serial_slot_output]
|
|
810
|
+
end
|
|
811
|
+
|
|
812
|
+
if eth_slot_output.include?(",")
|
|
813
|
+
eth_slots = eth_slot_output.split(/,/)
|
|
814
|
+
else
|
|
815
|
+
eth_slots = [eth_slot_output]
|
|
816
|
+
end
|
|
817
|
+
|
|
818
|
+
scsi_slots.each do |adapter_line|
|
|
819
|
+
if !adapter_line.empty?
|
|
820
|
+
parse_hash = parse_vscsi_syntax(adapter_line)
|
|
821
|
+
used_slots.push(parse_hash[:virtual_slot_num].to_i)
|
|
822
|
+
end
|
|
823
|
+
end
|
|
824
|
+
|
|
825
|
+
serial_slots.each do |adapter_line|
|
|
826
|
+
if !adapter_line.empty?
|
|
827
|
+
parse_hash = parse_vserial_syntax(adapter_line)
|
|
828
|
+
used_slots.push(parse_hash[:virtual_slot_num].to_i)
|
|
829
|
+
end
|
|
830
|
+
end
|
|
831
|
+
|
|
832
|
+
eth_slots.each do |adapter_line|
|
|
833
|
+
if !adapter_line.empty?
|
|
834
|
+
parse_hash = parse_vnic_syntax(adapter_line)
|
|
835
|
+
used_slots.push(parse_hash[:virtual_slot_num].to_i)
|
|
836
|
+
end
|
|
837
|
+
end
|
|
838
|
+
|
|
839
|
+
#slot_output.each_line do |line|
|
|
840
|
+
# line.chomp!
|
|
841
|
+
# if !line.empty?
|
|
842
|
+
# used_slots.push(line.to_i)
|
|
843
|
+
# end
|
|
844
|
+
#end
|
|
845
|
+
return used_slots
|
|
846
|
+
end
|
|
847
|
+
|
|
848
|
+
#Get next usable virtual slot on an LPAR
|
|
849
|
+
#Returns nil if no usable slots exist
|
|
850
|
+
def get_next_slot(frame,lpar, type = nil)
|
|
851
|
+
max_slots = get_max_virtual_slots(frame,lpar)
|
|
852
|
+
used_slots = get_used_virtual_slots(frame,lpar)
|
|
853
|
+
lowest_slot=11
|
|
854
|
+
if !type.nil?
|
|
855
|
+
lowest_slot=2 if type == "eth"
|
|
856
|
+
end
|
|
857
|
+
|
|
858
|
+
lowest_slot.upto(max_slots) do |n|
|
|
859
|
+
if !used_slots.include?(n)
|
|
860
|
+
return n
|
|
861
|
+
end
|
|
862
|
+
end
|
|
863
|
+
return nil
|
|
864
|
+
end
|
|
865
|
+
|
|
866
|
+
#create vNIC on LPAR profile
|
|
867
|
+
def create_vnic(frame,lpar_name,vlan_id,addl_vlan_ids, is_trunk, is_required)
|
|
868
|
+
##chsyscfg -m Server-9117-MMA-SNxxxxx -r prof -i 'name=server_name,lpar_id=xx,"virtual_eth_adapters=596/1/596//0/1,506/1/506//0/1,"'
|
|
869
|
+
#slot_number/is_ieee/port_vlan_id/"additional_vlan_id,additional_vlan_id"/is_trunk(number=priority)/is_required
|
|
870
|
+
lpar_prof = get_lpar_curr_profile(frame,lpar_name)
|
|
871
|
+
slot_number = get_next_slot(frame,lpar_name,"eth")
|
|
872
|
+
#Going to assume adapter will always be ieee
|
|
873
|
+
#For is Trunk how do we determine the number for priority? Do we just let the user pass it?
|
|
874
|
+
result = execute_cmd("chsyscfg -m #{frame} -r prof -i \'name=#{lpar_prof},lpar_name=#{lpar_name},"+
|
|
875
|
+
"\"virtual_eth_adapters+=#{slot_number}/1/#{vlan_id}/\"#{addl_vlan_ids}" +
|
|
876
|
+
"\"/#{is_trunk}/#{is_required} \"\'")
|
|
877
|
+
end
|
|
878
|
+
|
|
879
|
+
#Create vNIC on LPAR via DLPAR
|
|
880
|
+
#As writen today defaulting ieee_virtual_eth=0 sets us to Not IEEE 802.1Q compatible. To add compatability set value to 1
|
|
881
|
+
def create_vnic_dlpar(frame, lpar_name,vlan_id)
|
|
882
|
+
slot_number = get_next_slot(frame,lpar_name, "eth")
|
|
883
|
+
result = execute_cmd("chhwres -r virtualio -m #{frame} -o a -p #{lpar_name} --rsubtype eth -s #{slot_number} -a \"ieee_virtual_eth=0,port_vlan_id=#{vlan_id}\"")
|
|
884
|
+
end
|
|
885
|
+
|
|
886
|
+
end
|
|
887
|
+
|
|
888
|
+
class String
|
|
889
|
+
def numeric?
|
|
890
|
+
true if Float(self) rescue false
|
|
891
|
+
end
|
|
892
|
+
end
|