vagrant-xenserver 0.0.11 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +103 -25
  3. data/example_box/install_wheezy.sh +128 -0
  4. data/example_box/mkbox.sh +17 -0
  5. data/lib/vagrant-xenserver/action.rb +14 -4
  6. data/lib/vagrant-xenserver/action/clone_disk.rb +1 -1
  7. data/lib/vagrant-xenserver/action/clone_vm.rb +35 -0
  8. data/lib/vagrant-xenserver/action/connect_xs.rb +19 -11
  9. data/lib/vagrant-xenserver/action/create_template.rb +86 -0
  10. data/lib/vagrant-xenserver/action/create_vifs.rb +44 -23
  11. data/lib/vagrant-xenserver/action/create_vm.rb +14 -10
  12. data/lib/vagrant-xenserver/action/destroy_vm.rb +9 -6
  13. data/lib/vagrant-xenserver/action/download_xva.rb +96 -0
  14. data/lib/vagrant-xenserver/action/halt_vm.rb +1 -5
  15. data/lib/vagrant-xenserver/action/prepare_nfs_valid_ids.rb +1 -1
  16. data/lib/vagrant-xenserver/action/read_ssh_info.rb +48 -12
  17. data/lib/vagrant-xenserver/action/read_state.rb +4 -5
  18. data/lib/vagrant-xenserver/action/resume_vm.rb +3 -3
  19. data/lib/vagrant-xenserver/action/set_vm_params.rb +28 -0
  20. data/lib/vagrant-xenserver/action/start_vm.rb +7 -2
  21. data/lib/vagrant-xenserver/action/suspend_vm.rb +3 -3
  22. data/lib/vagrant-xenserver/action/upload_vhd.rb +130 -115
  23. data/lib/vagrant-xenserver/action/upload_xva.rb +63 -50
  24. data/lib/vagrant-xenserver/config.rb +45 -0
  25. data/lib/vagrant-xenserver/errors.rb +20 -0
  26. data/lib/vagrant-xenserver/plugin.rb +8 -0
  27. data/lib/vagrant-xenserver/util/exnhandler.rb +49 -0
  28. data/lib/vagrant-xenserver/version.rb +1 -1
  29. data/locales/en.yml +20 -1
  30. data/vagrant-xenserver.gemspec +1 -0
  31. metadata +24 -5
  32. data/lib/vagrant-xenserver/action/maybe_upload_disk.rb +0 -85
  33. data/test +0 -0
@@ -0,0 +1,86 @@
1
+ require "log4r"
2
+ require "xmlrpc/client"
3
+ require "vagrant-xenserver/util/uploader"
4
+ require "rexml/document"
5
+ require "json"
6
+ require "etc"
7
+
8
+ module VagrantPlugins
9
+ module XenServer
10
+ module Action
11
+ class CreateTemplate
12
+ def initialize(app, env)
13
+ @app = app
14
+ @logger = Log4r::Logger.new("vagrant::xenserver::actions::create_template")
15
+ end
16
+
17
+ def call(env)
18
+ if env[:template].nil?
19
+
20
+ box_name = env[:machine].box.name.to_s
21
+ box_version = env[:machine].box.version.to_s
22
+
23
+ # No template - that means it wasn't a downloaded XVA.
24
+ # Let's create a VM and attach the uploaded VDI to it.
25
+ # First see if we've done that already:
26
+
27
+ templates = env[:xc].VM.get_all_records_where("field \"is_a_template\"=\"true\"")
28
+ template = templates.detect { |vm,vmr|
29
+ vmr["other_config"]["box_name"] == box_name &&
30
+ vmr["other_config"]["box_version"] == box_version
31
+ }
32
+
33
+ if template.nil?
34
+ vdi_ref = env[:box_vdi]
35
+
36
+ oim = env[:xc].VM.get_by_name_label("Other install media")[0]
37
+
38
+ template_name = "#{box_name}.#{box_version}"
39
+
40
+ template_ref = env[:xc].VM.clone(oim,template_name)
41
+
42
+ vbd_record = {
43
+ 'VM' => template_ref,
44
+ 'VDI' => env[:box_vdi],
45
+ 'userdevice' => '0',
46
+ 'bootable' => true,
47
+ 'mode' => 'RW',
48
+ 'type' => 'Disk',
49
+ 'unpluggable' => false,
50
+ 'empty' => false,
51
+ 'other_config' => {},
52
+ 'qos_algorithm_type' => '',
53
+ 'qos_algorithm_params' => {}
54
+ }
55
+
56
+ vbd_res = env[:xc].VBD.create(vbd_record)
57
+
58
+ @logger.info("vbd_res=" + vbd_res.to_s)
59
+
60
+ env[:xc].VM.add_to_other_config(template_ref, "box_name", box_name)
61
+ env[:xc].VM.add_to_other_config(template_ref, "box_version", box_version)
62
+
63
+ if env[:machine].provider_config.pv
64
+ env[:xc].VM.set_HVM_boot_policy(template_ref,"")
65
+ env[:xc].VM.set_PV_bootloader(template_ref,"pygrub")
66
+ end
67
+
68
+ mem = ((env[:machine].provider_config.memory) * (1024*1024)).to_s
69
+ env[:xc].VM.set_memory_limits(template_ref,mem,mem,mem,mem)
70
+
71
+ env[:template] = template_ref
72
+
73
+ else
74
+ @logger.info("Found pre-existing template for this box")
75
+ (template_ref, template_rec) = template
76
+ env[:template] = template_ref
77
+ end
78
+
79
+ end
80
+
81
+ @app.call env
82
+ end
83
+ end
84
+ end
85
+ end
86
+ end
@@ -9,40 +9,61 @@ module VagrantPlugins
9
9
  @app = app
10
10
  @logger = Log4r::Logger.new("vagrant::xenserver::actions::create_vifs")
11
11
  end
12
-
12
+
13
+ def create_vif(env, vm, network, mac)
14
+ vif_devices = env[:xc].VM.get_allowed_VIF_devices(vm)
15
+
16
+ vif_record = {
17
+ 'VM' => vm,
18
+ 'network' => network,
19
+ 'device' => vif_devices[0],
20
+ 'MAC' => mac,
21
+ 'MTU' => '1500',
22
+ 'other_config' => {},
23
+ 'qos_algorithm_type' => '',
24
+ 'qos_algorithm_params' => {},
25
+ 'locking_mode' => 'network_default',
26
+ 'ipv4_allowed' => [],
27
+ 'ipv6_allowed' => []
28
+ }
29
+
30
+ vif_res = env[:xc].VIF.create(vif_record)
31
+
32
+ return vif_res
33
+ end
34
+
13
35
  def call(env)
14
- myvm = env[:machine].id
36
+ vm_ref = env[:machine].id
37
+
38
+ networks = env[:xc].network.get_all_records
39
+
40
+ # Remove all current VIFs
41
+ current_vifs = env[:xc].VM.get_VIFs(vm_ref)
42
+ current_vifs.each { |vif| env[:xc].VIF.destroy(vif) }
43
+
44
+ # If a HIMN VIF has been asked for, create one
45
+ if env[:machine].provider_config.use_himn
46
+ himn = networks.find { |ref,net| net['other_config']['is_host_internal_management_network'] }
47
+ (himn_ref,himn_rec) = himn
48
+
49
+ @logger.debug("himn="+himn.to_s)
50
+
51
+ create_vif(env, vm_ref, himn_ref, '')
52
+ end
53
+
15
54
 
16
55
  env[:machine].config.vm.networks.each do |type, options|
17
- next if type == :forwarded_port
18
56
  @logger.info "got an interface: #{type} #{options}"
19
57
 
20
58
  if type == :public_network then
21
59
  bridge = options[:bridge]
22
-
23
- networks = env[:xc].call("network.get_all_records",env[:session])['Value']
60
+ mac = options[:mac] || ''
24
61
 
25
62
  netrefrec = networks.find { |ref,net| net['bridge']==bridge }
26
63
  (net_ref,net_rec) = netrefrec
27
64
 
28
- vif_devices = env[:xc].call("VM.get_allowed_VIF_devices",env[:session],myvm)['Value']
29
-
30
- vif_record = {
31
- 'VM' => myvm,
32
- 'network' => net_ref,
33
- 'device' => vif_devices[0],
34
- 'MAC' => '',
35
- 'MTU' => '1500',
36
- 'other_config' => {},
37
- 'qos_algorithm_type' => '',
38
- 'qos_algorithm_params' => {},
39
- 'locking_mode' => 'network_default',
40
- 'ipv4_allowed' => [],
41
- 'ipv6_allowed' => []
42
- }
43
-
44
- vif_res = env[:xc].call("VIF.create",env[:session],vif_record)
45
-
65
+ vif_res = create_vif(env, vm_ref, net_ref, mac)
66
+
46
67
  @logger.info("vif_res=" + vif_res.to_s)
47
68
  end
48
69
  end
@@ -17,7 +17,7 @@ module VagrantPlugins
17
17
  def call(env)
18
18
  vdi_ref = env[:my_vdi]
19
19
 
20
- networks = env[:xc].call("network.get_all_records",env[:session])['Value']
20
+ networks = env[:xc].network.get_all_records
21
21
 
22
22
  himn = networks.find { |ref,net| net['other_config']['is_host_internal_management_network'] }
23
23
  (himn_ref,himn_rec) = himn
@@ -26,14 +26,18 @@ module VagrantPlugins
26
26
 
27
27
  username = Etc.getlogin
28
28
 
29
- oim = env[:xc].call("VM.get_by_name_label",env[:session],"Other install media")['Value'][0]
29
+ oim = env[:xc].VM.get_by_name_label("Other install media")[0]
30
30
 
31
31
  box_name = env[:machine].box.name.to_s
32
32
  box_version = env[:machine].box.version.to_s
33
33
 
34
- vm_name = "#{username}/#{box_name}/#{box_version}"
34
+ if env[:machine].provider_config.name.nil?
35
+ vm_name = "#{username}/#{box_name}/#{box_version}"
36
+ else
37
+ vm_name = env[:machine].provider_config.name
38
+ end
35
39
 
36
- vm_ref = env[:xc].call("VM.clone",env[:session],oim,vm_name)['Value']
40
+ vm_ref = env[:xc].VM.clone(oim,vm_name)
37
41
 
38
42
  vbd_record = {
39
43
  'VM' => vm_ref,
@@ -49,7 +53,7 @@ module VagrantPlugins
49
53
  'qos_algorithm_params' => {}
50
54
  }
51
55
 
52
- vbd_res = env[:xc].call("VBD.create",env[:session],vbd_record)
56
+ vbd_res = env[:xc].VBD.create(vbd_record)
53
57
 
54
58
  @logger.info("vbd_res=" + vbd_res.to_s)
55
59
 
@@ -67,19 +71,19 @@ module VagrantPlugins
67
71
  'ipv6_allowed' => []
68
72
  }
69
73
 
70
- vif_res = env[:xc].call("VIF.create",env[:session],vif_record)
74
+ vif_res = env[:xc].VIF.create(vif_record)
71
75
 
72
76
  @logger.info("vif_res=" + vif_res.to_s)
73
77
 
74
78
  if env[:machine].provider_config.pv
75
- env[:xc].call("VM.set_HVM_boot_policy",env[:session],vm_ref,"")
76
- env[:xc].call("VM.set_PV_bootloader",env[:session],vm_ref,"pygrub")
79
+ env[:xc].VM.set_HVM_boot_policy(vm_ref,"")
80
+ env[:xc].VM.set_PV_bootloader(vm_ref,"pygrub")
77
81
  end
78
82
 
79
83
  mem = ((env[:machine].provider_config.memory) * (1024*1024)).to_s
80
84
 
81
- env[:xc].call("VM.set_memory_limits",env[:session],vm_ref,mem,mem,mem,mem)
82
- env[:xc].call("VM.provision",env[:session],vm_ref)
85
+ env[:xc].VM.set_memory_limits(vm_ref,mem,mem,mem,mem)
86
+ env[:xc].VM.provision(vm_ref)
83
87
 
84
88
  env[:machine].id = vm_ref
85
89
 
@@ -11,18 +11,21 @@ module VagrantPlugins
11
11
  end
12
12
 
13
13
  def call(env)
14
- env[:xc].call("VM.hard_shutdown",env[:session],env[:machine].id)
15
-
16
- vbds = env[:xc].call("VM.get_VBDs",env[:session],env[:machine].id)['Value']
14
+ begin
15
+ env[:xc].VM.hard_shutdown(env[:machine].id)
16
+ rescue
17
+ end
18
+
19
+ vbds = env[:xc].VM.get_VBDs(env[:machine].id)
17
20
 
18
21
  vbds.each { |vbd|
19
- vbd_rec = env[:xc].call("VBD.get_record",env[:session],vbd)['Value']
22
+ vbd_rec = env[:xc].VBD.get_record(vbd)
20
23
  if vbd_rec['type'] == "Disk"
21
- env[:xc].call("VDI.destroy",env[:session],vbd_rec['VDI'])
24
+ env[:xc].VDI.destroy(vbd_rec['VDI'])
22
25
  end
23
26
  }
24
27
 
25
- env[:xc].call("VM.destroy",env[:session],env[:machine].id)
28
+ env[:xc].VM.destroy(env[:machine].id)
26
29
 
27
30
  env[:machine].id = nil
28
31
 
@@ -0,0 +1,96 @@
1
+ require "log4r"
2
+ require "xmlrpc/client"
3
+ require "vagrant-xenserver/util/uploader"
4
+ require "vagrant-xenserver/util/exnhandler"
5
+ require "rexml/document"
6
+ require "vagrant/util/busy"
7
+ require "vagrant/util/platform"
8
+ require "vagrant/util/subprocess"
9
+
10
+ module VagrantPlugins
11
+ module XenServer
12
+ module Action
13
+ class DownloadXVA
14
+ def initialize(app, env)
15
+ @app = app
16
+ @logger = Log4r::Logger.new("vagrant::xenserver::actions::download_xva")
17
+ end
18
+
19
+ def call(env)
20
+ xva_url = env[:machine].provider_config.xva_url
21
+
22
+ box_name = env[:machine].box.name.to_s
23
+ box_version = env[:machine].box.version.to_s
24
+
25
+ @logger.info("xva_url="+xva_url.to_s)
26
+ # Check whether we've already downloaded a VM from this URL
27
+ # When we do, we set an other_config key 'xva_url', so we
28
+ # can just scan through the VMs looking for it.
29
+ templates = env[:xc].VM.get_all_records_where("field \"is_a_template\"=\"true\"")
30
+ template = templates.detect { |vm,vmr|
31
+ vmr["other_config"]["box_name"] == box_name &&
32
+ vmr["other_config"]["box_version"] == box_version
33
+ }
34
+
35
+ @logger.info("template="+template.to_s)
36
+
37
+ if template.nil? && (not xva_url.nil?)
38
+ # No template, let's download it.
39
+ pool=env[:xc].pool.get_all
40
+ default_sr=env[:xc].pool.get_default_SR(pool[0])
41
+
42
+ env[:ui].output("Downloading XVA. This may take some time. Source URL: "+xva_url)
43
+ task = env[:xc].Async.VM.import(xva_url, default_sr, false, false)
44
+
45
+ begin
46
+ sleep(2.0)
47
+ task_status = env[:xc].task.get_status(task)
48
+ task_progress = env[:xc].task.get_progress(task) * 100.0
49
+ output = "Progress: #{task_progress.round(0)}%"
50
+ env[:ui].clear_line
51
+ env[:ui].detail(output, new_line: false)
52
+ end while task_status == "pending"
53
+
54
+ env[:ui].clear_line
55
+
56
+ if task_status != "success"
57
+ # Task failed - let's find out why:
58
+ error_list = env[:xc].task.get_error_info(task)
59
+ MyUtil::Exnhandler.handle("Async.VM.import", error_list)
60
+ end
61
+
62
+ task_result = env[:xc].task.get_result(task)
63
+
64
+ doc = REXML::Document.new(task_result)
65
+
66
+ @logger.debug("task_result=\"#{task_result}\"")
67
+ template_ref = doc.elements['value/array/data/value'].text
68
+
69
+ # Make sure it's really a template, and add the xva_url to other_config:
70
+ env[:xc].VM.set_is_a_template(template_ref,true)
71
+ env[:xc].VM.add_to_other_config(template_ref,"xva_url",xva_url)
72
+ env[:xc].VM.add_to_other_config(template_ref,"box_name",box_name)
73
+ env[:xc].VM.add_to_other_config(template_ref,"box_version",box_version)
74
+
75
+ # Hackity hack: HVM booting guests don't need to set the bootable flag
76
+ # on their VBDs, but PV do. Let's set bootable=true on VBD device=0
77
+ # just in case.
78
+
79
+ vbds = env[:xc].VM.get_VBDs(template_ref)
80
+ vbds.each { |vbd|
81
+ if env[:xc].VBD.get_userdevice(vbd) == "0"
82
+ env[:xc].VBD.set_bootable(vbd, true)
83
+ end
84
+ }
85
+ env[:template] = template_ref
86
+ else
87
+ (template_ref, template_rec) = template
88
+ env[:template] = template_ref
89
+ end
90
+
91
+ @app.call(env)
92
+ end
93
+ end
94
+ end
95
+ end
96
+ end
@@ -13,12 +13,8 @@ module VagrantPlugins
13
13
  def call(env)
14
14
  myvm = env[:machine].id
15
15
 
16
- shutdown_result = env[:xc].call("VM.clean_shutdown",env[:session],myvm)
16
+ shutdown_result = env[:xc].VM.clean_shutdown(myvm)
17
17
 
18
- if shutdown_result["Status"] != "Success"
19
- raise Errors::APIError
20
- end
21
-
22
18
  @app.call env
23
19
  end
24
20
  end
@@ -8,7 +8,7 @@ module VagrantPlugins
8
8
  end
9
9
 
10
10
  def call(env)
11
- env[:nfs_valid_ids] = env[:xc].call("VM.get_all",env[:session])['Value']
11
+ env[:nfs_valid_ids] = env[:xc].VM.get_all
12
12
  @app.call(env)
13
13
  end
14
14
  end
@@ -12,30 +12,33 @@ module VagrantPlugins
12
12
  end
13
13
 
14
14
  def call(env)
15
- env[:machine_ssh_info] = read_ssh_info(env)
15
+ if env[:machine].provider_config.use_himn
16
+ env[:machine_ssh_info] = read_ssh_info_himn(env)
17
+ else
18
+ env[:machine_ssh_info] = read_ssh_info(env)
19
+ end
16
20
 
17
21
  @app.call(env)
18
22
  end
19
23
 
20
- def read_ssh_info(env)
24
+ def read_ssh_info_himn(env)
21
25
  machine = env[:machine]
22
26
  return nil if machine.id.nil?
23
27
 
24
28
  # Find the machine
25
- networks = env[:xc].call("network.get_all_records",env[:session])['Value']
26
- vif_result = env[:xc].call("VM.get_VIFs",env[:session],machine.id)
29
+ networks = env[:xc].network.get_all_records
27
30
 
28
- himn = networks.find { |ref,net| net['other_config']['is_host_internal_management_network'] }
29
- (himn_ref,himn_rec) = himn
30
-
31
- if vif_result['Status']=='Failure'
32
- # The machine can't be found
31
+ begin
32
+ vifs = env[:xc].VM.get_VIFs(machine.id)
33
+ rescue
33
34
  @logger.info("Machine couldn't be found, assuming it got destroyed.")
34
35
  machine.id = nil
35
36
  return nil
36
37
  end
37
38
 
38
- vifs = vif_result['Value']
39
+ himn = networks.find { |ref,net| net['other_config']['is_host_internal_management_network'] }
40
+ (himn_ref,himn_rec) = himn
41
+
39
42
  assigned_ips = himn_rec['assigned_ips']
40
43
  (vif,ip) = assigned_ips.find { |vif,ip| vifs.include? vif }
41
44
 
@@ -46,15 +49,48 @@ module VagrantPlugins
46
49
  :forward_agent => machine.config.ssh.forward_agent,
47
50
  :forward_x11 => machine.config.ssh.forward_x11,
48
51
  }
49
-
52
+
50
53
  ssh_info[:proxy_command] = "ssh '#{machine.provider_config.xs_host}' -l '#{machine.provider_config.xs_username}' nc %h %p"
51
54
 
52
55
  if not ssh_info[:username]
53
56
  ssh_info[:username] = machine.config.ssh.default.username
54
57
  end
55
58
 
56
- ssh_info
59
+ return ssh_info
60
+ end
61
+
62
+ def read_ssh_info(env)
63
+ machine = env[:machine]
64
+ return nil if machine.id.nil?
65
+
66
+ gm = env[:xc].VM.get_guest_metrics(machine.id)
67
+
68
+ begin
69
+ networks = env[:xc].VM_guest_metrics.get_networks(gm)
70
+ rescue
71
+ return nil
72
+ end
73
+
74
+ ip = networks["0/ip"]
75
+ if ip.nil?
76
+ return nil
77
+ end
78
+
79
+ ssh_info = {
80
+ :host => ip,
81
+ :port => machine.config.ssh.guest_port,
82
+ :username => machine.config.ssh.username,
83
+ :forward_agent => machine.config.ssh.forward_agent,
84
+ :forward_x11 => machine.config.ssh.forward_x11,
85
+ }
86
+
87
+ if not ssh_info[:username]
88
+ ssh_info[:username] = machine.config.ssh.default.username
89
+ end
90
+
91
+ return ssh_info
57
92
  end
93
+
58
94
  end
59
95
  end
60
96
  end