opennebula-cli 4.12.1 → 4.13.80.beta1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/NOTICE +1 -6
- data/bin/oneacl +1 -1
- data/bin/oneflow +1 -1
- data/bin/onehost +13 -0
- data/bin/oneimage +31 -1
- data/bin/onevcenter +6 -76
- data/bin/onevm +154 -59
- data/lib/one_helper.rb +3 -3
- data/lib/one_helper/oneacct_helper.rb +29 -7
- data/lib/one_helper/onehost_helper.rb +41 -0
- data/lib/one_helper/oneimage_helper.rb +47 -2
- data/lib/one_helper/onequota_helper.rb +3 -3
- data/lib/one_helper/onevm_helper.rb +364 -21
- data/lib/one_helper/onevnet_helper.rb +4 -4
- data/lib/one_helper/onezone_helper.rb +5 -5
- metadata +15 -15
data/lib/one_helper.rb
CHANGED
@@ -649,13 +649,13 @@ user #{self.rname} of the user identified by the username
|
|
649
649
|
EOT
|
650
650
|
end
|
651
651
|
|
652
|
-
def self.table_conf
|
653
|
-
path = "#{ENV["HOME"]}/.one/cli/#{
|
652
|
+
def self.table_conf(conf_file=self.conf_file)
|
653
|
+
path = "#{ENV["HOME"]}/.one/cli/#{conf_file}"
|
654
654
|
|
655
655
|
if File.exists?(path)
|
656
656
|
return path
|
657
657
|
else
|
658
|
-
return "#{TABLE_CONF_PATH}/#{
|
658
|
+
return "#{TABLE_CONF_PATH}/#{conf_file}"
|
659
659
|
end
|
660
660
|
end
|
661
661
|
|
@@ -114,7 +114,7 @@ class AcctHelper < OpenNebulaHelper::OneHelper
|
|
114
114
|
ACCT_OPTIONS = [START_TIME_ACCT, END_TIME_ACCT, USERFILTER, GROUP, HOST, XPATH, XML, JSON, SPLIT]
|
115
115
|
SHOWBACK_OPTIONS = [START_TIME_SHOWBACK, END_TIME_SHOWBACK, USERFILTER, GROUP, XML, JSON]
|
116
116
|
|
117
|
-
ACCT_TABLE = CLIHelper::ShowTable.new("oneacct.yaml", nil) do
|
117
|
+
ACCT_TABLE = CLIHelper::ShowTable.new(self.table_conf("oneacct.yaml"), nil) do
|
118
118
|
column :UID, "User ID", :size=>4 do |d|
|
119
119
|
d["UID"]
|
120
120
|
end
|
@@ -155,20 +155,42 @@ class AcctHelper < OpenNebulaHelper::OneHelper
|
|
155
155
|
d["VM"]["TEMPLATE"]["CPU"]
|
156
156
|
end
|
157
157
|
|
158
|
-
column :
|
158
|
+
column :NETRX, "Data received from the network", :size=>6 do |d|
|
159
159
|
# NET is measured in bytes, unit_to_str expects KBytes
|
160
|
-
OpenNebulaHelper.unit_to_str(d["VM"]["
|
160
|
+
OpenNebulaHelper.unit_to_str(d["VM"]["MONITORING/NETRX"].to_i / 1024.0, {})
|
161
161
|
end
|
162
162
|
|
163
|
-
column :
|
163
|
+
column :NETTX, "Data sent to the network", :size=>6 do |d|
|
164
164
|
# NET is measured in bytes, unit_to_str expects KBytes
|
165
|
-
OpenNebulaHelper.unit_to_str(d["VM"]["
|
165
|
+
OpenNebulaHelper.unit_to_str(d["VM"]["MONITORING/NETTX"].to_i / 1024.0, {})
|
166
166
|
end
|
167
167
|
|
168
|
-
|
168
|
+
column :DISK, "Total disk size used", :size=>6 do |d|
|
169
|
+
# DISK size is measured in mb, unit_to_str expects KBytes
|
170
|
+
total_disk_size = 0
|
171
|
+
|
172
|
+
vm_id = d["VM"]["ID"].to_i
|
173
|
+
|
174
|
+
disks_all = [d["VM"]["TEMPLATE"]["DISK"]].flatten.compact rescue []
|
175
|
+
disks_all.each do |disk|
|
176
|
+
total_disk_size += disk["SIZE"].to_i
|
177
|
+
end
|
178
|
+
|
179
|
+
snapshots_all = [d["VM"]["SNAPSHOTS"]].flatten.compact rescue []
|
180
|
+
snapshots_all.each do |snapshot|
|
181
|
+
snapshot_disk = [snapshot["SNAPSHOT"]].flatten.compact rescue []
|
182
|
+
snapshot_disk.each do |snapshot|
|
183
|
+
total_disk_size += snapshot["SIZE"].to_i
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
OpenNebulaHelper.unit_to_str(total_disk_size * 1024.0, {})
|
188
|
+
end
|
189
|
+
|
190
|
+
default :VID, :HOSTNAME, :ACTION, :REASON, :START_TIME, :END_TIME, :MEMORY, :CPU, :NETRX, :NETTX, :DISK
|
169
191
|
end
|
170
192
|
|
171
|
-
SHOWBACK_TABLE = CLIHelper::ShowTable.new("oneshowback.yaml", nil) do
|
193
|
+
SHOWBACK_TABLE = CLIHelper::ShowTable.new(self.table_conf("oneshowback.yaml"), nil) do
|
172
194
|
column :UID, "User ID", :size=>4 do |d|
|
173
195
|
d["UID"]
|
174
196
|
end
|
@@ -167,6 +167,11 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
|
|
167
167
|
|
168
168
|
NUM_THREADS = 15
|
169
169
|
def sync(host_ids, options)
|
170
|
+
if `id -u`.to_i == 0 || `id -G`.split.collect{|e| e.to_i}.include?(0)
|
171
|
+
STDERR.puts("Cannot run 'onehost sync' as root")
|
172
|
+
exit -1
|
173
|
+
end
|
174
|
+
|
170
175
|
begin
|
171
176
|
current_version = File.read(REMOTES_LOCATION+'/VERSION').strip
|
172
177
|
rescue
|
@@ -406,8 +411,44 @@ class OneHostHelper < OpenNebulaHelper::OneHelper
|
|
406
411
|
|
407
412
|
CLIHelper.print_header(str_h1 % "MONITORING INFORMATION", false)
|
408
413
|
|
414
|
+
wilds = host.wilds
|
415
|
+
|
416
|
+
host.delete_element("TEMPLATE/VM")
|
417
|
+
host.delete_element("TEMPLATE_WILDS")
|
418
|
+
|
409
419
|
puts host.template_str
|
410
420
|
|
421
|
+
puts
|
422
|
+
CLIHelper.print_header("WILD VIRTUAL MACHINES", false)
|
423
|
+
puts
|
424
|
+
|
425
|
+
format = "%30s %36s %4s %10s"
|
426
|
+
CLIHelper.print_header(format % ["NAME", "IMPORT_ID", "CPU", "MEMORY"],
|
427
|
+
true)
|
428
|
+
|
429
|
+
wilds.each do |wild|
|
430
|
+
if wild['IMPORT_TEMPLATE']
|
431
|
+
wild_tmplt = Base64::decode64(wild['IMPORT_TEMPLATE']).split("\n")
|
432
|
+
name = wild_tmplt.select { |line|
|
433
|
+
line[/^NAME/]
|
434
|
+
}[0].split("=")[1].gsub("\"", " ").strip
|
435
|
+
import = wild_tmplt.select { |line|
|
436
|
+
line[/^IMPORT_VM_ID/]
|
437
|
+
}[0].split("=")[1].gsub("\"", " ").strip
|
438
|
+
memory = wild_tmplt.select { |line|
|
439
|
+
line[/^MEMORY/]
|
440
|
+
}[0].split("=")[1].gsub("\"", " ").strip
|
441
|
+
cpu = wild_tmplt.select { |line|
|
442
|
+
line[/^CPU/]
|
443
|
+
}[0].split("=")[1].gsub("\"", " ").strip
|
444
|
+
else
|
445
|
+
name = wild['DEPLOY_ID']
|
446
|
+
import = memory = cpu = "-"
|
447
|
+
end
|
448
|
+
|
449
|
+
puts format % [name, import, cpu, memory]
|
450
|
+
end
|
451
|
+
|
411
452
|
puts
|
412
453
|
CLIHelper.print_header("VIRTUAL MACHINES", false)
|
413
454
|
puts
|
@@ -86,10 +86,10 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
|
86
86
|
path=Dir.pwd+"/"+o
|
87
87
|
end
|
88
88
|
|
89
|
-
if File.
|
89
|
+
if File.readable?(path)
|
90
90
|
[0, path]
|
91
91
|
else
|
92
|
-
[-1, "File '#{path}' does not exist."]
|
92
|
+
[-1, "File '#{path}' does not exist or is not readable."]
|
93
93
|
end
|
94
94
|
end
|
95
95
|
},
|
@@ -292,6 +292,13 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
|
292
292
|
|
293
293
|
puts str % [e, mask]
|
294
294
|
}
|
295
|
+
|
296
|
+
if image.has_elements?("/IMAGE/SNAPSHOTS")
|
297
|
+
puts
|
298
|
+
CLIHelper.print_header(str_h1 % "IMAGE SNAPSHOTS",false)
|
299
|
+
format_snapshots(image)
|
300
|
+
end
|
301
|
+
|
295
302
|
puts
|
296
303
|
|
297
304
|
CLIHelper.print_header(str_h1 % "IMAGE TEMPLATE",false)
|
@@ -311,6 +318,44 @@ class OneImageHelper < OpenNebulaHelper::OneHelper
|
|
311
318
|
end
|
312
319
|
end
|
313
320
|
|
321
|
+
def format_snapshots(image)
|
322
|
+
table=CLIHelper::ShowTable.new(nil, self) do
|
323
|
+
column :AC , "Is active", :left, :size => 2 do |d|
|
324
|
+
if d["ACTIVE"] == "YES"
|
325
|
+
"=>"
|
326
|
+
else
|
327
|
+
""
|
328
|
+
end
|
329
|
+
end
|
330
|
+
column :ID, "Snapshot ID", :size=>3 do |d|
|
331
|
+
d["ID"]
|
332
|
+
end
|
333
|
+
|
334
|
+
column :PARENT, "Snapshot Parent ID", :size=>6 do |d|
|
335
|
+
d["PARENT"]
|
336
|
+
end
|
337
|
+
|
338
|
+
column :CHILDREN, "Snapshot Children IDs", :size=>10 do |d|
|
339
|
+
d["CHILDREN"]
|
340
|
+
end
|
341
|
+
|
342
|
+
column :NAME, "Snapshot Name", :left, :size=>45 do |d|
|
343
|
+
d["NAME"]
|
344
|
+
end
|
345
|
+
|
346
|
+
column :DATE, "Snapshot creation date", :size=>15 do |d|
|
347
|
+
OpenNebulaHelper.time_to_str(d["DATE"])
|
348
|
+
end
|
349
|
+
|
350
|
+
default :AC, :ID, :PARENT, :DATE, :CHILDREN, :NAME
|
351
|
+
end
|
352
|
+
|
353
|
+
# Convert snapshot data to an array
|
354
|
+
image_hash = image.to_hash
|
355
|
+
image_snapshots = [image_hash['IMAGE']['SNAPSHOTS']].flatten.first
|
356
|
+
table.show(image_snapshots)
|
357
|
+
end
|
358
|
+
|
314
359
|
def self.create_image_variables(options, name)
|
315
360
|
if Array===name
|
316
361
|
names=name
|
@@ -74,7 +74,7 @@ class OneQuotaHelper
|
|
74
74
|
|
75
75
|
# Edits the quota template of a resource
|
76
76
|
# @param [XMLElement] resource to get the current info from
|
77
|
-
# @param [String] path to the new contents. If nil a editor will be
|
77
|
+
# @param [String] path to the new contents. If nil a editor will be
|
78
78
|
# used
|
79
79
|
# @param [True|False] is_default To change the help text
|
80
80
|
# @return [String] contents of the new quotas
|
@@ -122,7 +122,7 @@ class OneQuotaHelper
|
|
122
122
|
|
123
123
|
# Retrieves a clean quota template, without any existing resource
|
124
124
|
# information
|
125
|
-
# @param path [String] path to the new contents. If nil a editor will be
|
125
|
+
# @param path [String] path to the new contents. If nil a editor will be
|
126
126
|
# used
|
127
127
|
# @return [String] contents of the new quotas
|
128
128
|
def self.get_batch_quota(path)
|
@@ -159,7 +159,7 @@ class OneQuotaHelper
|
|
159
159
|
# Edits the quota template of a resource, adding the quotas set in str
|
160
160
|
# @param resource [PoolElement] to get the current info from
|
161
161
|
# @param str [String] quota template, created by get_batch_quota()
|
162
|
-
# @return [String, OpenNebula::Error] merged contents of the new quotas on
|
162
|
+
# @return [String, OpenNebula::Error] merged contents of the new quotas on
|
163
163
|
# success, Error if the user info could not be retrieved
|
164
164
|
def self.merge_quota(resource, str)
|
165
165
|
rc = resource.info
|
@@ -14,8 +14,38 @@
|
|
14
14
|
# limitations under the License. #
|
15
15
|
#--------------------------------------------------------------------------- #
|
16
16
|
|
17
|
+
if !ONE_LOCATION
|
18
|
+
MAD_LOCATION = "/usr/lib/one/mads"
|
19
|
+
VAR_LOCATION = "/var/lib/one"
|
20
|
+
else
|
21
|
+
MAD_LOCATION = ONE_LOCATION + "/lib/mads"
|
22
|
+
VAR_LOCATION = ONE_LOCATION + "/var"
|
23
|
+
end
|
24
|
+
|
25
|
+
VMS_LOCATION = VAR_LOCATION + "/vms"
|
26
|
+
|
27
|
+
$: << MAD_LOCATION
|
28
|
+
|
17
29
|
require 'one_helper'
|
18
30
|
require 'optparse/time'
|
31
|
+
require 'one_tm'
|
32
|
+
|
33
|
+
class String
|
34
|
+
def red
|
35
|
+
colorize(31)
|
36
|
+
end
|
37
|
+
|
38
|
+
def green
|
39
|
+
colorize(32)
|
40
|
+
end
|
41
|
+
|
42
|
+
private
|
43
|
+
|
44
|
+
def colorize(color_code)
|
45
|
+
"\e[#{color_code}m#{self}\e[0m"
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
19
49
|
|
20
50
|
class OneVMHelper < OpenNebulaHelper::OneHelper
|
21
51
|
MULTIPLE={
|
@@ -123,6 +153,45 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
|
123
153
|
return short_state_str
|
124
154
|
end
|
125
155
|
|
156
|
+
# Return the IP or several IPs of a VM
|
157
|
+
def self.ip_str(vm)
|
158
|
+
ips = []
|
159
|
+
|
160
|
+
vm_nics = []
|
161
|
+
|
162
|
+
if !vm["TEMPLATE"]["NIC"].nil?
|
163
|
+
vm_nics = [vm["TEMPLATE"]['NIC']].flatten
|
164
|
+
end
|
165
|
+
|
166
|
+
vm_nics.each do |nic|
|
167
|
+
if nic.has_key?("IP")
|
168
|
+
ips.push(nic["IP"])
|
169
|
+
end
|
170
|
+
|
171
|
+
if nic.has_key?("IP6_GLOBAL")
|
172
|
+
ips.push(nic["IP6_GLOBAL"])
|
173
|
+
end
|
174
|
+
|
175
|
+
if nic.has_key?("IP6_ULA")
|
176
|
+
ips.push(nic["IP6_ULA"])
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
VirtualMachine::EXTERNAL_IP_ATTRS.each do |attr|
|
181
|
+
external_ip = vm["MONITORING"][attr]
|
182
|
+
|
183
|
+
if !external_ip.nil? && !ips.include?(external_ip)
|
184
|
+
ips.push(external_ip)
|
185
|
+
end
|
186
|
+
end
|
187
|
+
|
188
|
+
if ips.empty?
|
189
|
+
return "--"
|
190
|
+
else
|
191
|
+
return ips.join(",")
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
126
195
|
def format_pool(options)
|
127
196
|
config_file = self.class.table_conf
|
128
197
|
|
@@ -208,6 +277,10 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
|
208
277
|
OpenNebulaHelper.period_to_str(dtime, false)
|
209
278
|
end
|
210
279
|
|
280
|
+
column :IP, "VM IP addresses", :left, :donottruncate, :size=>15 do |d|
|
281
|
+
OneVMHelper.ip_str(d)
|
282
|
+
end
|
283
|
+
|
211
284
|
default :ID, :USER, :GROUP, :NAME, :STAT, :UCPU, :UMEM, :HOST,
|
212
285
|
:TIME
|
213
286
|
end
|
@@ -247,6 +320,123 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
|
247
320
|
end
|
248
321
|
end
|
249
322
|
|
323
|
+
RECOVER_RETRY_STEPS = {
|
324
|
+
:PROLOG_MIGRATE_FAILURE => :migrate,
|
325
|
+
:PROLOG_MIGRATE_POWEROFF_FAILURE => :migrate,
|
326
|
+
:PROLOG_MIGRATE_SUSPEND_FAILURE => :migrate,
|
327
|
+
:PROLOG_FAILURE => :prolog,
|
328
|
+
:PROLOG_RESUME_FAILURE => :resume,
|
329
|
+
:PROLOG_UNDEPLOY_FAILURE => :resume,
|
330
|
+
:EPILOG_FAILURE => :epilog,
|
331
|
+
:EPILOG_STOP_FAILURE => :stop,
|
332
|
+
:EPILOG_UNDEPLOY_FAILURE => :stop
|
333
|
+
}
|
334
|
+
|
335
|
+
def recover_retry_interactive(vm)
|
336
|
+
# Disable CTRL-C in the menu
|
337
|
+
trap("SIGINT") { }
|
338
|
+
|
339
|
+
if !File.readable?(VAR_LOCATION+"/config")
|
340
|
+
STDERR.puts "Error reading #{VAR_LOCATION+'/config'}. The " <<
|
341
|
+
"TM Debug Interactive Environment must be executed as " <<
|
342
|
+
"oneadmin in the frontend."
|
343
|
+
exit -1
|
344
|
+
end
|
345
|
+
|
346
|
+
rc = vm.info
|
347
|
+
if OpenNebula.is_error?(rc)
|
348
|
+
STDERR.puts rc.message
|
349
|
+
exit -1
|
350
|
+
end
|
351
|
+
|
352
|
+
if !RECOVER_RETRY_STEPS.include?(vm.lcm_state_str.to_sym)
|
353
|
+
STDERR.puts "Current LCM STATE '#{vm.lcm_state_str}' not " <<
|
354
|
+
"compatible with RECOVER RETRY action."
|
355
|
+
exit -1
|
356
|
+
end
|
357
|
+
|
358
|
+
seq = vm['/VM/HISTORY_RECORDS/HISTORY[last()]/SEQ']
|
359
|
+
|
360
|
+
tm_action = RECOVER_RETRY_STEPS[vm.lcm_state_str.to_sym]
|
361
|
+
|
362
|
+
tm_file = "#{VMS_LOCATION}/#{vm.id}/transfer.#{seq}.#{tm_action}"
|
363
|
+
|
364
|
+
if !File.readable?(tm_file)
|
365
|
+
STDERR.puts "Cannot read #{tm_file}"
|
366
|
+
exit -1
|
367
|
+
end
|
368
|
+
|
369
|
+
@tm_action_list = File.read(tm_file)
|
370
|
+
|
371
|
+
puts "TM Debug Interactive Environment.".green
|
372
|
+
puts
|
373
|
+
print_tm_action_list
|
374
|
+
|
375
|
+
@tm = TransferManagerDriver.new(nil)
|
376
|
+
i=0
|
377
|
+
@tm_action_list.lines.each do |tm_command|
|
378
|
+
i+=1
|
379
|
+
success=false
|
380
|
+
|
381
|
+
while !success
|
382
|
+
puts "Current action (#{i}):".green
|
383
|
+
puts tm_command
|
384
|
+
puts
|
385
|
+
|
386
|
+
puts <<-EOF.gsub(/^\s+/,"")
|
387
|
+
Choose action:
|
388
|
+
(r) Run action
|
389
|
+
(n) Skip to next action
|
390
|
+
(a) Show all actions
|
391
|
+
(q) Quit
|
392
|
+
EOF
|
393
|
+
|
394
|
+
ans = ""
|
395
|
+
while !%w(n a r q).include?(ans)
|
396
|
+
printf "> "
|
397
|
+
ans = STDIN.gets.strip.downcase
|
398
|
+
|
399
|
+
puts
|
400
|
+
|
401
|
+
case ans
|
402
|
+
when "n"
|
403
|
+
success = true
|
404
|
+
when "a"
|
405
|
+
print_tm_action_list
|
406
|
+
when "q"
|
407
|
+
exit -1
|
408
|
+
when "r"
|
409
|
+
result, result_message = @tm.do_transfer_action(@id, tm_command.split)
|
410
|
+
|
411
|
+
if result == "SUCCESS"
|
412
|
+
success = true
|
413
|
+
puts "#{result}"
|
414
|
+
puts
|
415
|
+
else
|
416
|
+
puts
|
417
|
+
puts "#{result}. Repeat command.".red
|
418
|
+
puts
|
419
|
+
end
|
420
|
+
end
|
421
|
+
end
|
422
|
+
end
|
423
|
+
end
|
424
|
+
|
425
|
+
puts "If all the TM actions have been successful and you want to"
|
426
|
+
puts "recover the Virtual Machine to the RUNNING state execute this command:"
|
427
|
+
puts "$ onevm recover #{vm.id} --success"
|
428
|
+
end
|
429
|
+
|
430
|
+
def print_tm_action_list
|
431
|
+
puts "TM Action list:".green
|
432
|
+
i=0
|
433
|
+
@tm_action_list.lines.each do |line|
|
434
|
+
i+=1
|
435
|
+
puts "(#{i}) #{line}"
|
436
|
+
end
|
437
|
+
puts
|
438
|
+
end
|
439
|
+
|
250
440
|
private
|
251
441
|
|
252
442
|
def factory(id=nil)
|
@@ -312,22 +502,37 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
|
312
502
|
puts
|
313
503
|
|
314
504
|
CLIHelper.print_header(str_h1 % "VIRTUAL MACHINE MONITORING",false)
|
315
|
-
poll_attrs = {
|
316
|
-
"USED MEMORY" => "MEMORY",
|
317
|
-
"USED CPU" => "CPU",
|
318
|
-
"NET_TX" => "NET_TX",
|
319
|
-
"NET_RX" => "NET_RX"
|
320
|
-
}
|
321
505
|
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
506
|
+
vm_monitoring = vm.to_hash['VM']['MONITORING']
|
507
|
+
|
508
|
+
order_attrs = %w(CPU MEMORY NETTX NETRX)
|
509
|
+
|
510
|
+
vm_monitoring_sort = []
|
511
|
+
order_attrs.each do |key|
|
512
|
+
if (val = vm_monitoring.delete(key))
|
513
|
+
vm_monitoring_sort << [key, val]
|
514
|
+
end
|
515
|
+
end
|
516
|
+
|
517
|
+
vm_monitoring_sort.sort{|a,b| a[0]<=>b[0]}
|
518
|
+
|
519
|
+
filter_attrs = %w(STATE DISK_SIZE SNAPSHOT_SIZE)
|
520
|
+
vm_monitoring.each do |key, val|
|
521
|
+
if !filter_attrs.include?(key)
|
522
|
+
vm_monitoring_sort << [key, val]
|
523
|
+
end
|
524
|
+
end
|
525
|
+
|
526
|
+
vm_monitoring_sort.each do |k,v|
|
527
|
+
if k == "MEMORY"
|
528
|
+
puts str % [k, OpenNebulaHelper.unit_to_str(v.to_i, {})]
|
529
|
+
elsif k =~ /NET.X/
|
530
|
+
puts str % [k, OpenNebulaHelper.unit_to_str(v.to_i/1024, {})]
|
327
531
|
else
|
328
|
-
puts str % [k,
|
532
|
+
puts str % [k, v]
|
329
533
|
end
|
330
|
-
|
534
|
+
end
|
535
|
+
|
331
536
|
puts
|
332
537
|
|
333
538
|
CLIHelper.print_header(str_h1 % "PERMISSIONS",false)
|
@@ -341,7 +546,33 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
|
341
546
|
puts str % [e, mask]
|
342
547
|
}
|
343
548
|
|
549
|
+
vm_disks = []
|
550
|
+
|
344
551
|
if vm.has_elements?("/VM/TEMPLATE/DISK")
|
552
|
+
vm_disks = [vm.to_hash['VM']['TEMPLATE']['DISK']].flatten
|
553
|
+
end
|
554
|
+
|
555
|
+
if vm.has_elements?("/VM/TEMPLATE/CONTEXT")
|
556
|
+
context_disk = vm.to_hash['VM']['TEMPLATE']['CONTEXT']
|
557
|
+
|
558
|
+
context_disk["IMAGE"] = "CONTEXT"
|
559
|
+
context_disk["DATASTORE"] = "-"
|
560
|
+
context_disk["TYPE"] = "-"
|
561
|
+
context_disk["READONLY"] = "-"
|
562
|
+
context_disk["SAVE"] = "-"
|
563
|
+
context_disk["CLONE"] = "-"
|
564
|
+
context_disk["SAVE_AS"] = "-"
|
565
|
+
|
566
|
+
vm_disks.push(context_disk)
|
567
|
+
end
|
568
|
+
|
569
|
+
# get monitoring data
|
570
|
+
vm_disks.each do |disk|
|
571
|
+
disk_id = disk["DISK_ID"]
|
572
|
+
disk["MONITOR_SIZE"] = vm["MONITORING/DISK_SIZE[ID='#{disk_id}']/SIZE"]
|
573
|
+
end
|
574
|
+
|
575
|
+
if !vm_disks.empty?
|
345
576
|
puts
|
346
577
|
CLIHelper.print_header(str_h1 % "VM DISKS",false)
|
347
578
|
CLIHelper::ShowTable.new(nil, self) do
|
@@ -374,6 +605,30 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
|
374
605
|
end
|
375
606
|
end
|
376
607
|
|
608
|
+
column :SIZE, "", :left, :size=>9 do |d|
|
609
|
+
if d["SIZE"]
|
610
|
+
size = OpenNebulaHelper.unit_to_str(
|
611
|
+
d['SIZE'].to_i,
|
612
|
+
{},
|
613
|
+
"M"
|
614
|
+
)
|
615
|
+
else
|
616
|
+
size = "-"
|
617
|
+
end
|
618
|
+
|
619
|
+
if d["MONITOR_SIZE"]
|
620
|
+
monitor_size = OpenNebulaHelper.unit_to_str(
|
621
|
+
d['MONITOR_SIZE'].to_i,
|
622
|
+
{},
|
623
|
+
"M"
|
624
|
+
)
|
625
|
+
else
|
626
|
+
monitor_size = "-"
|
627
|
+
end
|
628
|
+
|
629
|
+
"#{monitor_size}/#{size}"
|
630
|
+
end
|
631
|
+
|
377
632
|
column :TYPE, "", :left, :size=>4 do |d|
|
378
633
|
d["TYPE"].downcase
|
379
634
|
end
|
@@ -390,20 +645,21 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
|
390
645
|
d["CLONE"]
|
391
646
|
end
|
392
647
|
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
default :ID, :TARGET, :IMAGE, :TYPE,
|
399
|
-
:SAVE, :SAVE_AS
|
400
|
-
end.show([vm.to_hash['VM']['TEMPLATE']['DISK']].flatten, {})
|
648
|
+
default :ID, :DATASTORE, :TARGET, :IMAGE, :SIZE, :TYPE,
|
649
|
+
:SAVE
|
650
|
+
end.show(vm_disks, {})
|
401
651
|
|
402
652
|
while vm.has_elements?("/VM/TEMPLATE/DISK")
|
403
653
|
vm.delete_element("/VM/TEMPLATE/DISK")
|
404
654
|
end if !options[:all]
|
405
655
|
end
|
406
656
|
|
657
|
+
if vm.has_elements?("/VM/SNAPSHOTS")
|
658
|
+
puts
|
659
|
+
CLIHelper.print_header(str_h1 % "VM DISK SNAPSHOTS",false)
|
660
|
+
format_snapshots(vm)
|
661
|
+
end
|
662
|
+
|
407
663
|
sg_nics = []
|
408
664
|
|
409
665
|
if (vm.has_elements?("/VM/TEMPLATE/NIC/SECURITY_GROUPS"))
|
@@ -730,4 +986,91 @@ class OneVMHelper < OpenNebulaHelper::OneHelper
|
|
730
986
|
|
731
987
|
table.show(history)
|
732
988
|
end
|
989
|
+
|
990
|
+
def format_snapshots(vm)
|
991
|
+
table=CLIHelper::ShowTable.new(nil, self) do
|
992
|
+
column :AC , "Is active", :left, :size => 2 do |d|
|
993
|
+
if d["ACTIVE"] == "YES"
|
994
|
+
"=>"
|
995
|
+
else
|
996
|
+
""
|
997
|
+
end
|
998
|
+
end
|
999
|
+
column :ID, "Snapshot ID", :size=>3 do |d|
|
1000
|
+
d["ID"]
|
1001
|
+
end
|
1002
|
+
|
1003
|
+
column :DISK, "Disk ID", :size=>4 do |d|
|
1004
|
+
d["DISK_ID"]
|
1005
|
+
end
|
1006
|
+
|
1007
|
+
column :PARENT, "Snapshot Parent ID", :size=>6 do |d|
|
1008
|
+
d["PARENT"]
|
1009
|
+
end
|
1010
|
+
|
1011
|
+
column :CHILDREN, "Snapshot Children IDs", :size=>10 do |d|
|
1012
|
+
d["CHILDREN"]
|
1013
|
+
end
|
1014
|
+
|
1015
|
+
column :SIZE, "", :left, :size=>9 do |d|
|
1016
|
+
if d["SIZE"]
|
1017
|
+
size = OpenNebulaHelper.unit_to_str(
|
1018
|
+
d['SIZE'].to_i,
|
1019
|
+
{},
|
1020
|
+
"M"
|
1021
|
+
)
|
1022
|
+
else
|
1023
|
+
size = "-"
|
1024
|
+
end
|
1025
|
+
|
1026
|
+
if d["MONITOR_SIZE"]
|
1027
|
+
monitor_size = OpenNebulaHelper.unit_to_str(
|
1028
|
+
d['MONITOR_SIZE'].to_i,
|
1029
|
+
{},
|
1030
|
+
"M"
|
1031
|
+
)
|
1032
|
+
else
|
1033
|
+
monitor_size = "-"
|
1034
|
+
end
|
1035
|
+
|
1036
|
+
"#{monitor_size}/#{size}"
|
1037
|
+
end
|
1038
|
+
|
1039
|
+
column :NAME, "Snapshot Name", :left, :size=>26 do |d|
|
1040
|
+
d["NAME"]
|
1041
|
+
end
|
1042
|
+
|
1043
|
+
column :DATE, "Snapshot creation date", :size=>10 do |d|
|
1044
|
+
OpenNebulaHelper.time_to_str(d["DATE"])
|
1045
|
+
end
|
1046
|
+
|
1047
|
+
default :AC, :ID, :DISK, :PARENT, :DATE, :CHILDREN, :SIZE, :NAME
|
1048
|
+
end
|
1049
|
+
|
1050
|
+
# Convert snapshot data to an array
|
1051
|
+
vm_hash = vm.to_hash
|
1052
|
+
vm_snapshots = [vm_hash['VM']['SNAPSHOTS']].flatten
|
1053
|
+
|
1054
|
+
snapshots = []
|
1055
|
+
|
1056
|
+
vm_snapshots.each do |disk|
|
1057
|
+
disk_id = disk['DISK_ID']
|
1058
|
+
|
1059
|
+
sshots = [disk['SNAPSHOT']].flatten
|
1060
|
+
sshots.each do |snapshot|
|
1061
|
+
data = snapshot.merge({ 'DISK_ID' => disk_id })
|
1062
|
+
snapshots << data
|
1063
|
+
end
|
1064
|
+
end
|
1065
|
+
|
1066
|
+
# get monitoring data
|
1067
|
+
snapshots.each do |snapshot|
|
1068
|
+
disk_id = snapshot["DISK_ID"]
|
1069
|
+
snap_id = snapshot["ID"]
|
1070
|
+
xpath = "MONITORING/SNAPSHOT_SIZE[ID='#{snap_id}' and DISK_ID='#{disk_id}']/SIZE"
|
1071
|
+
snapshot["MONITOR_SIZE"] = vm[xpath]
|
1072
|
+
end
|
1073
|
+
|
1074
|
+
table.show(snapshots)
|
1075
|
+
end
|
733
1076
|
end
|