rbvppc 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/rbvppc/vio.rb ADDED
@@ -0,0 +1,720 @@
1
+ #
2
+ # Authors: Christopher M Wood (<woodc@us.ibm.com>)
3
+ # John F Hutchinson (<jfhutchi@us.ibm.com)
4
+ # © Copyright IBM Corporation 2015.
5
+ #
6
+ # LICENSE: MIT (http://opensource.org/licenses/MIT)
7
+ #
8
+ require_relative 'lpar'
9
+ require_relative 'lun'
10
+
11
+ class Vio < Lpar
12
+
13
+ attr_reader :available_disks, :used_disks
14
+
15
+ def initialize(hmc,frame,name)
16
+
17
+ raise StandardError.new("A VIO cannot be defined without a managing HMC") if hmc.nil?
18
+ raise StandardError.new("A VIO cannot be defined without a name") if name.nil?
19
+ raise StandardError.new("A VIO cannot be difined without specifying the frame that it resides on") if frame.nil?
20
+
21
+ #Connect to the HMC and pull all of the LPAR attributes required for
22
+ #the superclass' constructor
23
+ hmc.connect
24
+ options_hash = hmc.get_lpar_options(frame,name)
25
+
26
+ super(options_hash)
27
+
28
+ #Get an initial list of the available (and used) disks
29
+ list_available_disks
30
+ end
31
+
32
+ #Get VIOS version
33
+ def get_vio_version
34
+ command = "ioslevel"
35
+ execute_vios_cmd(command)
36
+ end
37
+
38
+ #Reboot VIOS
39
+ def reboot
40
+ command ="shutdown -restart"
41
+ execute_vios_cmd(command)
42
+ end
43
+
44
+
45
+ ####################################
46
+ # VIO listing functions
47
+ ####################################
48
+
49
+ #List unmapped disks on VIOS
50
+ # lspv -free doesn't include disks that have been mapped before and contain a 'VGID'
51
+ def list_available_disks
52
+ command = "lspv -avail -fmt : -field name pvid size"
53
+ all_disk_output = execute_vios_cmd(command)
54
+ mapped_disks = list_mapped_disks
55
+ unmapped_disks = []
56
+ all_disk_output.each_line do |line|
57
+ line.chomp!
58
+ disk_name,disk_pvid,disk_size = line.split(/:/)
59
+ temp_lun = Lun.new(disk_name,disk_pvid,disk_size)
60
+ if !mapped_disks.include?(temp_lun)
61
+ unmapped_disks.push(temp_lun)
62
+ end
63
+ end
64
+
65
+ #Update the objects local list of available disks and return it
66
+ @available_disks = unmapped_disks
67
+ return unmapped_disks
68
+ end
69
+
70
+ #List all Disk Mappings
71
+ def list_mapped_disks
72
+ command = "lsmap -all -type disk"
73
+ result = execute_vios_cmd_grep(command, "Backing")
74
+
75
+ mapped_disknames = []
76
+ result.each_line do |line|
77
+ line.chomp!
78
+ line_elements=line.split(/[[:blank:]]+/)
79
+ #3rd element should be disk name, since first is 'Backing' and
80
+ #the second is 'device'
81
+ disk_name = line_elements[2]
82
+ mapped_disknames.push(disk_name) if !mapped_disknames.include?(disk_name)
83
+ end
84
+
85
+ command = "lspv -avail -fmt : -field name pvid size"
86
+ full_disk_info = execute_vios_cmd_grep(command, "hdisk")
87
+ mapped_disks = []
88
+ full_disk_info.each_line do |line|
89
+ line.chomp!
90
+ disk_name,disk_pvid,disk_size = line.split(/:/)
91
+ if mapped_disknames.include?(disk_name)
92
+ mapped_disks.push(Lun.new(disk_name,disk_pvid,disk_size))
93
+ end
94
+ end
95
+
96
+ @used_disks = mapped_disks
97
+ return mapped_disks
98
+ end
99
+
100
+ #Get a list of all disknames attached to a vhost
101
+ def get_attached_disks(vhost)
102
+ cmd = "lsmap -vadapter #{vhost} -field backing -fmt :"
103
+ diskname_output = execute_vios_cmd(cmd).chomp
104
+ disk_names = diskname_output.split(/:/)
105
+ disks = []
106
+
107
+ #After getting the list of disk names, iterate
108
+ #over the used disks and collect an array of the
109
+ #Luns found to be used.
110
+ used_disks.each do |disk|
111
+ if disk_names.include?(disk.name)
112
+ disks.push(disk)
113
+ end
114
+ end
115
+ return disks
116
+ end
117
+
118
+ #Find vhost to use when given the vSCSI adapter slot it occupies
119
+ def find_vhost_given_virtual_slot(server_slot)
120
+ command = "lsmap -all"
121
+
122
+ #TODO: Save the vhost-to-virtualslot mapping somewhere in the class
123
+ #and simply iterate over that, refreshing what the mappings are any
124
+ #time an adapter is added or removed from this LPAR (???)
125
+
126
+ #Execute an lsmap and grep for the line that contains the vhost
127
+ #by finding the line that contains the physical adapter location.
128
+ #This will definitely contain 'V#-C<slot number>' in it's name.
129
+ result = execute_vios_cmd_grep(command,"V.-C#{server_slot}")
130
+ raise StandardError.new("Unable to find vhost on #{name} for vSCSI adapter in slot #{server_slot}") if result.nil?
131
+
132
+ #Split the result on whitespace to get the columns
133
+ #vhost, physical location, client LPAR ID (in hex)
134
+ mapping_cols = result.split(/[[:blank:]]+/)
135
+
136
+ #The name of the vhost will be in the first column of the command output
137
+ return mapping_cols[0]
138
+ end
139
+
140
+ ####################################
141
+ # Disk Mapping Functions
142
+ ####################################
143
+
144
+ #Map any disk on a pair of VIO's given the respective vhosts to map them to
145
+ def map_any_disk(vhost, second_vio, second_vhost)
146
+ #Select disk on each VIO and return a hash containing
147
+ #the LUN object from each of the VIOs
148
+ lun_hash = select_any_avail_disk(second_vio)
149
+
150
+ #Generate the vtd name to use on each VIO
151
+ #TODO: Need to enhance this.
152
+ vtd1_name = "vtd_" + lun_hash[:on_vio1].name
153
+ vtd2_name = "vtd_" + lun_hash[:on_vio2].name
154
+
155
+ #Assign disk to the first VIO (self)
156
+ assign_disk_vhost(lun_hash[:on_vio1],vtd1_name,vhost)
157
+
158
+ #Assign disk to the second VIO
159
+ second_vio.assign_disk_vhost(lun_hash[:on_vio2],vtd2_name,second_vhost)
160
+ end
161
+
162
+ #Map a disk on a pair of VIOs given the respective vhosts to map them to, as well as, the disk's PVID
163
+ def map_by_pvid!(vhost, second_vhost, second_vio, pvid)
164
+ #Select disk on each VIO and return a hash containing
165
+ #the LUNC object from each of the VIOs
166
+ lun_hash = select_disk_by_pvid(second_vio,pvid)
167
+
168
+ #Generate the vtd name to use on each VIO
169
+ #TODO: Need to enhance this.
170
+ vtd1_name = "vtd_" + lun_hash[:on_vio1].name
171
+ vtd2_name = "vtd_" + lun_hash[:on_vio2].name
172
+
173
+ #Assign disk to the first VIO (self)
174
+ assign_disk_vhost(lun_hash[:on_vio1],vtd1_name,vhost)
175
+
176
+ #Assign disk to the second VIO
177
+ second_vio.assign_disk_vhost(lun_hash[:on_vio2],vtd2_name,second_vhost)
178
+
179
+ end
180
+
181
+ #Maps a group of disks to the specified vhosts on a pair of VIOs
182
+ #based on a given total size requirement
183
+ def map_by_size(vhost,second_vio,second_vhost,total_size_in_gb)
184
+ lun_hash = select_disks_by_size(second_vio,total_size_in_gb)
185
+
186
+ #Raise an error if lun_hash is an empty hash
187
+ raise StandardError.new("VIO pair does not have a subset of available disks to satisfy the requested size of #{total_size_in_gb}") if lun_hash.empty?
188
+
189
+ vio1_disks = lun_hash[:on_vio1]
190
+ vio2_disks = lun_hash[:on_vio2]
191
+
192
+ # TODO: Possibly find a way to test that the vhost exists
193
+ # prior to doing anything (ie, make sure the client LPAR
194
+ # that this serves has vSCSIs defined for this)
195
+
196
+ #Assign all disks to first VIO
197
+ vio1_disks.each do |disk|
198
+ vtd_name = "vtd_" + disk.name
199
+ assign_disk_vhost(disk,vtd_name,vhost)
200
+ end
201
+
202
+ #Assign all disks to second VIO
203
+ vio2_disks.each do |disk|
204
+ vtd_name = "vtd_" + disk.name
205
+ second_vio.assign_disk_vhost(disk,vtd_name,second_vhost)
206
+ end
207
+ end
208
+
209
+ #Maps a group of disks to the specified vhosts on a pair of VIOs
210
+ #based on a given total size requirement
211
+ def map_single_disk_by_size(vhost,second_vio,second_vhost,total_size_in_gb)
212
+ lun_hash = select_single_disk_by_size(second_vio,total_size_in_gb)
213
+
214
+ #Raise an error if lun_hash is an empty hash
215
+ raise StandardError.new("VIO pair does not have a subset of available disks to satisfy the requested size of #{total_size_in_gb}") if lun_hash.empty?
216
+
217
+ vio1_disks = lun_hash[:on_vio1]
218
+ vio2_disks = lun_hash[:on_vio2]
219
+
220
+ # TODO: Possibly find a way to test that the vhost exists
221
+ # prior to doing anything (ie, make sure the client LPAR
222
+ # that this serves has vSCSIs defined for this)
223
+
224
+ #Assign all disks to first VIO
225
+ vio1_disks.each do |disk|
226
+ vtd_name = "vtd_" + disk.name
227
+ assign_disk_vhost(disk,vtd_name,vhost)
228
+ end
229
+
230
+ #Assign all disks to second VIO
231
+ vio2_disks.each do |disk|
232
+ vtd_name = "vtd_" + disk.name
233
+ second_vio.assign_disk_vhost(disk,vtd_name,second_vhost)
234
+ end
235
+ end
236
+
237
+ #Unmap all disks on given LPAR from this VIO and the given secondary VIO
238
+ #and remove their associated vSCSI adapters
239
+ def unmap_all_disks(second_vio,client_lpar)
240
+ vscsi_adapters = client_lpar.get_vscsi_adapters
241
+
242
+ #Repeat for each vSCSI found on the client LPAR
243
+ #TODO: Add better logic for determining which vSCSIs
244
+ #to use to avoid cases where multiple vSCSIs to each VIO
245
+ #exist.
246
+ vscsi_adapters.each do |vscsi|
247
+ #Determine if this adapter is attached to the primary VIO (self)
248
+ #or the secondary VIO (second_vio), and assign that to a temp
249
+ #variable to prevent rewriting the same procedure for both VIOs.
250
+ if vscsi.remote_lpar_name == name
251
+ current_vio = self
252
+ elsif vscsi.remote_lpar_name == second_vio.name
253
+ current_vio = second_vio
254
+ else
255
+ next
256
+ end
257
+
258
+ #Find the vhost associated with this vSCSI on the current VIO
259
+ vhost = current_vio.find_vhost_given_virtual_slot(vscsi.remote_slot_num)
260
+
261
+ #Use the vhost to find all of the disks attached to it
262
+ disks = current_vio.get_attached_disks(vhost)
263
+
264
+ #Remove all of the disks from that vhost
265
+ disks.each do |disk|
266
+ current_vio.remove_disk_from_vhost(disk)
267
+ end
268
+
269
+ #Remove that vhost
270
+ current_vio.remove_vhost(vhost)
271
+
272
+ #Remove the client LPAR's vSCSI now that all the disks are detached from it
273
+ client_lpar.remove_vscsi(current_vio,vscsi)
274
+
275
+ end
276
+ end
277
+
278
+ #Unmap a disk on the given LPAR from this VIO and the given secondary VIO
279
+ #by the disks PVID
280
+ def unmap_by_pvid(second_vio,pvid)
281
+ # Iterate over the primary VIO's used disks, find the one
282
+ # we want to remove by it's PVID, find that disk on the Secondary VIO
283
+ # and unmap this disk from each VIO
284
+ used_disks.each do |vio1_disk|
285
+ if vio1_disk.pvid == pvid
286
+ #Find this disk on second_vio
287
+ second_vio_disks = second_vio.used_disks
288
+ i = second_vio_disks.index(vio1_disk)
289
+ raise StandardError.new("Disk with PVID #{pvid} not mapped on #{second_vio.name}. Please ensure this disk is attached to both VIOs in the pair") if i.nil?
290
+ vio2_disk = second_vio_disks[i]
291
+
292
+ #Unmap disk on first VIO
293
+ remove_disk_from_vhost(vio1_disk)
294
+
295
+ #Unmap disk on second VIO
296
+ second_vio.remove_disk_from_vhost(vio2_disk)
297
+
298
+ return
299
+ end
300
+ end
301
+ raise StandardError.new("Disk with PVID #{pvid} not mappped on #{name}. Please ensure this disk is attached to the VIO")
302
+ end
303
+
304
+
305
+ ############################################
306
+ # VIO command functions
307
+ ############################################
308
+
309
+ #List Shared Ethernet Adapters on VIOS
310
+ def list_shared_eth_adapters
311
+ command = "lsmap -all -net"
312
+ execute_vios_cmd(command)
313
+ end
314
+
315
+ #Assign Disk/Logical Volume to a vSCSI Host Adapter
316
+ def assign_disk_vhost(disk, vtd, vhost)
317
+ command = "mkvdev -vdev #{disk.name} -dev #{vtd} -vadapter #{vhost}"
318
+ execute_vios_cmd(command)
319
+
320
+ #If this succeeds, remove disk from @available_disks
321
+ #and add it to @used_disks
322
+ @available_disks.delete(disk)
323
+ @used_disks.push(disk)
324
+ end
325
+
326
+ #Recursively remove a Virtual SCSI Host Adapter
327
+ def recursive_remove_vhost(vhost)
328
+ command = "rmdev -dev #{vhost} -recursive"
329
+ execute_vios_cmd(command)
330
+ end
331
+
332
+ #Remove Disk/Logical Volume from vSCSI Host Adapter
333
+ def remove_vtd_from_vhost(vtd)
334
+ command = "rmvdev -vtd #{vtd}"
335
+ execute_vios_cmd(command)
336
+ end
337
+
338
+ #Remove physical disk from vhost adapter
339
+ def remove_disk_from_vhost(disk)
340
+ command = "rmvdev -vdev #{disk.name}"
341
+ execute_vios_cmd(command)
342
+
343
+ #If this succeeds, remove disk from @used_disks
344
+ #and add it to @available_disks
345
+ @used_disks.delete(disk)
346
+ @available_disks.push(disk)
347
+ end
348
+
349
+ #Remove a Virtual SCSI Host Adapter
350
+ def remove_vhost(vhost)
351
+ command = "rmdev -dev #{vhost}"
352
+ execute_vios_cmd(command)
353
+ end
354
+
355
+
356
+ #########################################
357
+ # Base LPAR function overrides
358
+ # to prevent VIOs from performing
359
+ # actions that may destroy/adversely
360
+ # effect an environment's VIOs
361
+ # ie, we shouldn't be able to delete,
362
+ # or create VIOs, just manage them.
363
+ #########################################
364
+ def create
365
+ warn "Unable to execute create on a VIO"
366
+ end
367
+
368
+ def delete
369
+ warn "Unable to execute delete on a VIO"
370
+ end
371
+
372
+ def hard_shutdown
373
+ warn "Unable to execute hard_shutdown on a VIO"
374
+ end
375
+
376
+ def soft_shutdown
377
+ warn "Unable to execute soft_shutdown on a VIO"
378
+ end
379
+
380
+ def desired_vcpu=(units)
381
+ warn "Unable to change the vcpu on a VIO"
382
+ end
383
+
384
+ def min_vcpu=(units)
385
+ warn "Unable to change the vcpu on a VIO"
386
+ end
387
+
388
+ def max_vcpu=(units)
389
+ warn "Unable to change the vcpu on a VIO"
390
+ end
391
+
392
+ def desired_proc_units=(units)
393
+ warn "Unable to change the proc units on a VIO"
394
+ end
395
+
396
+ def max_proc_units=(units)
397
+ warn "Unable to change the proc units on a VIO"
398
+ end
399
+
400
+ def min_proc_units=(units)
401
+ warn "Unable to change the proc units on a VIO"
402
+ end
403
+
404
+ def desired_memory=(units)
405
+ warn "Unable to change the memory on a VIO"
406
+ end
407
+
408
+ def min_memory=(units)
409
+ warn "Unable to change the memory on a VIO"
410
+ end
411
+
412
+ def max_memory=(units)
413
+ warn "Unable to change the memeory on a VIO"
414
+ end
415
+
416
+ def remove_vscsi_from_profile(server_lpar,vscsi)
417
+ warn "Unable to remove vscsi from VIO object"
418
+ end
419
+
420
+ def remove_vscsi_dlpar(server_lpar,vscsi)
421
+ warn "Unable to remove vscsi on a VIO"
422
+ end
423
+
424
+ def remove_vscsi(server_lpar,adapter_details=nil)
425
+ warn "Unable to remove vscsi on a VIO"
426
+ end
427
+
428
+ def add_vscsi(server_lpar)
429
+ warn "Unable to add vscsi on a VIO"
430
+ end
431
+
432
+ def add_vscsi_to_profile(server_lpar)
433
+ warn "Unable to add vscsi on a VIO"
434
+ end
435
+
436
+ def add_vscsi_dlpar(server_lpar,client_slot_to_use = nil, server_slot_to_use = nil)
437
+ warn "Unable to add vscsi on a VIO"
438
+ end
439
+
440
+ def create_vnic(vlan_id,addl_vlan_ids = "")
441
+ warn "Unable to create vnic on a VIO"
442
+ end
443
+
444
+ def create_vnic_profile(slot_number, vlan_id, addl_vlan_ids, is_trunk, is_required)
445
+ warn "Unable to create vnic on a VIO"
446
+ end
447
+
448
+ def create_vnic_dlpar(slot_number,vlan_id)
449
+ warn "Unable to create vnic on a VIO"
450
+ end
451
+
452
+ #All private methods..
453
+ private
454
+
455
+ #Use this in coordination with another VIO to find an available disk between the both of them
456
+ #A hash is returned with a selected Lun object on each VIO
457
+ def select_any_avail_disk(second_vio)
458
+ primary_vio_disks = available_disks
459
+ secondary_vio_disks = second_vio.available_disks
460
+
461
+ return {} if primary_vio_disks.empty? or secondary_vio_disks.empty?
462
+
463
+ vio1_lun = primary_vio_disks[0]
464
+ vio2_lun = nil
465
+ secondary_vio_disks.each do |lun|
466
+ if vio1_lun == lun
467
+ vio2_lun = lun
468
+ break
469
+ end
470
+ end
471
+
472
+ if vio2_lun.nil?
473
+ raise StandardError.new("LUN with PVID #{vio1_lun.pvid} not found on #{vio2}")
474
+ end
475
+ # return [vio1_disk_name, vio2_disk_name]
476
+ # return [vio1_lun, vio2_lun]
477
+ return {:on_vio1 => vio1_lun, :on_vio2 => vio2_lun}
478
+ end
479
+
480
+ #Use this in coordination with another VIO to find an available disk between the both of them via PVID
481
+ #A hash is returned with a selected Lun object on each VIO
482
+ def select_disk_by_pvid(second_vio,pvid)
483
+ primary_vio_disks = @available_disks + @used_disks
484
+ secondary_vio_disks = second_vio.available_disks + second_vio.used_disks
485
+ return{} if primary_vio_disks.empty? or secondary_vio_disks.empty?
486
+ #loop through primary_vio_disks to find the pvid
487
+ vio1_lun = nil
488
+ vio2_lun = nil
489
+ primary_vio_disks.each do |lun|
490
+ if lun.pvid == pvid
491
+ vio1_lun = lun
492
+ end
493
+ end
494
+ secondary_vio_disks.each do |lun|
495
+ if lun.pvid == pvid
496
+ vio2_lun = lun
497
+ end
498
+ end
499
+ if vio1_lun == vio2_lun
500
+ return {:on_vio1 => vio1_lun, :on_vio2 => vio2_lun}
501
+ else
502
+ raise StandardError.new("LUN with PVID #{pvid} not found on both VIOs")
503
+ end
504
+
505
+ end
506
+
507
+ #Select a subset of available disks on this VIO and the given Secondary VIO
508
+ #that satisfies at least the size requirement provided
509
+ def select_disks_by_size(second_vio,total_size_in_gb)
510
+ primary_vio_disks = available_disks
511
+ secondary_vio_disks = second_vio.available_disks
512
+
513
+ #Return an empty hash if one or both of the VIOs have no free disks to use
514
+ return {} if primary_vio_disks.empty? or secondary_vio_disks.empty?
515
+
516
+ #Find a collection of disks that works on one of the VIOs
517
+ sorted_disks = primary_vio_disks.sort { |x,y| y.size_in_mb <=> x.size_in_mb }
518
+
519
+ #Convert the requested size to MB, as all LUN sizes are in those units
520
+ #And initialize a variable to hold the 'currently' allocated amount in MB
521
+ space_left = total_size_in_gb*1024
522
+ space_allocated = 0
523
+
524
+ #Let the upper limit of what we can allocate be
525
+ #the total size in GB, plus 64GB (a typical size of a LUN), converted to MB
526
+ upper_limit = (total_size_in_gb+64)*1024
527
+
528
+ #Array that will be built up to hold the selected disks on the primary VIO
529
+ selected_disks = []
530
+ disks_found = false
531
+ last_disk = nil
532
+ until disks_found
533
+ if space_left <= 0
534
+ disks_found = true
535
+ break
536
+ end
537
+ #Save the current space_left for use later to determine if it was decremented in this iteration
538
+ old_space_left = space_left
539
+
540
+ sorted_disks.each do |cur_disk|
541
+ last_disk = cur_disk if last_disk.nil?
542
+ cur_disk_size = cur_disk.size_in_mb
543
+ #Test if this disk is larger than what is left and smaller than what our
544
+ #upper bound limit on allocating is
545
+ if (cur_disk_size >= space_left && space_allocated + cur_disk_size <= upper_limit)
546
+ #puts "Entered block 1"
547
+ #Add this disk to selected_disks
548
+ selected_disks.push(cur_disk)
549
+ #Decrement space_left
550
+ space_left -= cur_disk_size
551
+ #increment space_allocated
552
+ space_allocated += cur_disk_size
553
+ #set last_disk to cur_disk
554
+ last_disk = cur_disk
555
+ #remove cur_disk from sorted_disks
556
+ sorted_disks.delete(cur_disk)
557
+ #break out of loop to select next disk
558
+ break
559
+ end
560
+
561
+ #Test if this disk is less than what we have left to allocate
562
+ #and still does not put us over the upper bound
563
+ if (cur_disk_size < space_left && cur_disk_size+space_allocated <= upper_limit)
564
+ #puts "Entered block 2"
565
+ if cur_disk_size <= (space_left - cur_disk_size)
566
+ #puts "Entered block 2.1"
567
+ #Add this disk to selected_disks
568
+ selected_disks.push(cur_disk)
569
+ #Decrement space_left
570
+ space_left -= cur_disk_size
571
+ #Increment space_allocated
572
+ space_allocated += cur_disk_size
573
+ #set last_disk to cur_disk
574
+ last_disk = cur_disk
575
+ #remove cur_disk from sorted_disks
576
+ sorted_disks.delete(cur_disk)
577
+ #break out of loop to select next disk
578
+ break
579
+ else
580
+ #puts "Entered block 2.2"
581
+ old_poss_next_disk = nil
582
+ sorted_disks.reverse.each do |poss_next_disk|
583
+ old_poss_next_disk = poss_next_disk if old_poss_next_disk.nil?
584
+ poss_next_size = poss_next_disk.size_in_mb
585
+ if (cur_disk_size + poss_next_size + space_allocated) <= upper_limit
586
+ #Add this disk to selected_disks
587
+ selected_disks.push(cur_disk)
588
+ #decrement space_left
589
+ space_left -= cur_disk_size
590
+ #increment space_allocated
591
+ space_allocated += cur_disk_size
592
+ #Set last_disk to cur_disk
593
+ last_disk = cur_disk
594
+ #remove cur_disk from sorted_disks
595
+ sorted_disks.delete(cur_disk)
596
+ #break out of loop to select next disk
597
+ break
598
+ end
599
+ end
600
+ end
601
+
602
+ end
603
+ end
604
+
605
+ #If after iterating over the entire list of disks, we haven't
606
+ #decremented space_left, then fail out, since it wasn't possible
607
+ #to find another disk to fit the requested size
608
+ if old_space_left == space_left
609
+ warn "Unable to select a subset of disks that fulfills the size requested"
610
+ return {}
611
+ end
612
+ end
613
+
614
+ #Iterate over the disks that were found on the first VIO
615
+ #and generate a list of their counterparts on the second VIO
616
+ selected_disks_vio1 = selected_disks
617
+ selected_disks_vio2 = []
618
+ selected_disks_vio1.each do |disk|
619
+ i = secondary_vio_disks.index(disk)
620
+ selected_disks_vio2.push(secondary_vio_disks[i])
621
+ end
622
+
623
+ #Return a hash of two arrays, one of which is a list of Luns on the first VIO
624
+ #and ther other of which is a list of their counterparts on the second VIO
625
+ return { :on_vio1 => selected_disks_vio1, :on_vio2 => selected_disks_vio2}
626
+
627
+ end
628
+
629
+ #Select a single available disk on this VIO and the given secondary VIO
630
+ #that satisfies at least the size requirement provided.
631
+ #This version of select by size is used for rootvg disks as it is best practice
632
+ #to have rootvg on a single disk.
633
+ def select_single_disk_by_size(second_vio,total_size_in_gb)
634
+ primary_vio_disks = available_disks
635
+ secondary_vio_disks = second_vio.available_disks
636
+
637
+ #Return an empty hash if one or both of the VIOs have no free disks to use
638
+ return {} if primary_vio_disks.empty? or secondary_vio_disks.empty?
639
+
640
+ #Find a collection of disks that works on one of the VIOs
641
+ sorted_disks = primary_vio_disks.sort { |x,y| x.size_in_mb <=> y.size_in_mb }
642
+
643
+ #Convert the requested size to MB, as all LUN sizes are in those units
644
+ #And initialize a variable to hold the 'currently' allocated amount in MB
645
+ space_left = total_size_in_gb*1024
646
+ space_allocated = 0
647
+
648
+ #Array that will be built up to hold the selected disks on the primary VIO
649
+ selected_disks = []
650
+ disks_found = false
651
+ last_disk = nil
652
+ until disks_found
653
+ if space_left <= 0
654
+ disks_found = true
655
+ break
656
+ end
657
+ #Save the current space_left for use later to determine if it was decremented in this iteration
658
+ old_space_left = space_left
659
+
660
+ sorted_disks.each do |cur_disk|
661
+ last_disk = cur_disk if last_disk.nil?
662
+ cur_disk_size = cur_disk.size_in_mb
663
+ #Test if this disk is larger than what is left and smaller than what our
664
+ #upper bound limit on allocating is
665
+ if (cur_disk_size >= space_left && space_allocated + cur_disk_size)
666
+ #puts "Entered block 1"
667
+ #Add this disk to selected_disks
668
+ selected_disks.push(cur_disk)
669
+ #Decrement space_left
670
+ space_left -= cur_disk_size
671
+ #increment space_allocated
672
+ space_allocated += cur_disk_size
673
+ #set last_disk to cur_disk
674
+ last_disk = cur_disk
675
+ #remove cur_disk from sorted_disks
676
+ sorted_disks.delete(cur_disk)
677
+ #break out of loop to select next disk
678
+ break
679
+ end
680
+ end
681
+
682
+ #If after iterating over the entire list of disks, we haven't
683
+ #decremented space_left, then fail out, since it wasn't possible
684
+ #to find another disk to fit the requested size
685
+ if old_space_left == space_left
686
+ warn "Unable to select a subset of disks that fulfills the size requested"
687
+ return {}
688
+ end
689
+ end
690
+
691
+ #Iterate over the disks that were found on the first VIO
692
+ #and generate a list of their counterparts on the second VIO
693
+ selected_disks_vio1 = selected_disks
694
+ selected_disks_vio2 = []
695
+ selected_disks_vio1.each do |disk|
696
+ i = secondary_vio_disks.index(disk)
697
+ selected_disks_vio2.push(secondary_vio_disks[i])
698
+ end
699
+
700
+ #Return a hash of two arrays, one of which is a list of Luns on the first VIO
701
+ #and ther other of which is a list of their counterparts on the second VIO
702
+ return { :on_vio1 => selected_disks_vio1, :on_vio2 => selected_disks_vio2}
703
+
704
+ end
705
+
706
+ ###################################
707
+ # Execute VIOS commands
708
+ ###################################
709
+
710
+ #Execute VIOS commands via HMC
711
+ def execute_vios_cmd(command)
712
+ hmc.execute_cmd "viosvrcmd -m #{frame} -p #{name} -c \" #{command} \""
713
+ end
714
+
715
+ #Execute VIOS commands via HMC grepping for a specific item.
716
+ def execute_vios_cmd_grep(command, grep_for)
717
+ hmc.execute_cmd "viosvrcmd -m #{frame} -p #{name} -c \" #{command} \" | grep #{grep_for}"
718
+ end
719
+
720
+ end