vagrant-xenserver-jc 0.0.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +9 -0
- data/CHANGELOG.md +40 -0
- data/Gemfile +14 -0
- data/LICENSE +8 -0
- data/README.md +167 -0
- data/Rakefile +14 -0
- data/example_box/install_wheezy.sh +128 -0
- data/example_box/metadata.json +3 -0
- data/example_box/mkbox.sh +17 -0
- data/lib/vagrant-xenserver/action/clone_disk.rb +30 -0
- data/lib/vagrant-xenserver/action/clone_vm.rb +30 -0
- data/lib/vagrant-xenserver/action/configure_network.rb +61 -0
- data/lib/vagrant-xenserver/action/connect_xs.rb +47 -0
- data/lib/vagrant-xenserver/action/create_template.rb +87 -0
- data/lib/vagrant-xenserver/action/create_vifs.rb +86 -0
- data/lib/vagrant-xenserver/action/create_vm.rb +95 -0
- data/lib/vagrant-xenserver/action/destroy_vm.rb +37 -0
- data/lib/vagrant-xenserver/action/download_xva.rb +101 -0
- data/lib/vagrant-xenserver/action/dummy.rb +16 -0
- data/lib/vagrant-xenserver/action/halt_vm.rb +23 -0
- data/lib/vagrant-xenserver/action/is_created.rb +20 -0
- data/lib/vagrant-xenserver/action/is_running.rb +20 -0
- data/lib/vagrant-xenserver/action/is_suspended.rb +20 -0
- data/lib/vagrant-xenserver/action/prepare_nfs_settings.rb +85 -0
- data/lib/vagrant-xenserver/action/prepare_nfs_valid_ids.rb +17 -0
- data/lib/vagrant-xenserver/action/read_ssh_info.rb +97 -0
- data/lib/vagrant-xenserver/action/read_state.rb +35 -0
- data/lib/vagrant-xenserver/action/resume_vm.rb +30 -0
- data/lib/vagrant-xenserver/action/set_vm_params.rb +28 -0
- data/lib/vagrant-xenserver/action/start_vm.rb +31 -0
- data/lib/vagrant-xenserver/action/suspend_vm.rb +30 -0
- data/lib/vagrant-xenserver/action/upload_vhd.rb +164 -0
- data/lib/vagrant-xenserver/action/upload_xva.rb +100 -0
- data/lib/vagrant-xenserver/action/validate_network.rb +112 -0
- data/lib/vagrant-xenserver/action/wait_himn.rb +58 -0
- data/lib/vagrant-xenserver/action.rb +272 -0
- data/lib/vagrant-xenserver/config.rb +102 -0
- data/lib/vagrant-xenserver/errors.rb +68 -0
- data/lib/vagrant-xenserver/plugin.rb +70 -0
- data/lib/vagrant-xenserver/provider.rb +36 -0
- data/lib/vagrant-xenserver/util/exnhandler.rb +49 -0
- data/lib/vagrant-xenserver/util/uploader.rb +215 -0
- data/lib/vagrant-xenserver/version.rb +6 -0
- data/lib/vagrant-xenserver.rb +17 -0
- data/locales/en.yml +38 -0
- data/vagrant-xenserver.gemspec +27 -0
- metadata +173 -0
@@ -0,0 +1,86 @@
|
|
1
|
+
require "log4r"
|
2
|
+
require "xmlrpc/client"
|
3
|
+
|
4
|
+
module VagrantPlugins
|
5
|
+
module XenServer
|
6
|
+
module Action
|
7
|
+
class CreateVIFs
|
8
|
+
def initialize(app, env)
|
9
|
+
@app = app
|
10
|
+
@logger = Log4r::Logger.new("vagrant::xenserver::actions::create_vifs")
|
11
|
+
end
|
12
|
+
|
13
|
+
def create_vif(env, vm, network, mac)
|
14
|
+
vif_devices = env[:xc].VM.get_allowed_VIF_devices(vm)
|
15
|
+
|
16
|
+
vif_record = {
|
17
|
+
'VM' => vm,
|
18
|
+
'network' => network,
|
19
|
+
'device' => vif_devices[0],
|
20
|
+
'MAC' => mac,
|
21
|
+
'MTU' => '1500',
|
22
|
+
'other_config' => {},
|
23
|
+
'qos_algorithm_type' => '',
|
24
|
+
'qos_algorithm_params' => {},
|
25
|
+
'locking_mode' => 'network_default',
|
26
|
+
'ipv4_allowed' => [],
|
27
|
+
'ipv6_allowed' => []
|
28
|
+
}
|
29
|
+
|
30
|
+
vif_res = env[:xc].VIF.create(vif_record)
|
31
|
+
|
32
|
+
return vif_res
|
33
|
+
end
|
34
|
+
|
35
|
+
def call(env)
|
36
|
+
vm_ref = env[:machine].id
|
37
|
+
|
38
|
+
networks = env[:xc].network.get_all_records
|
39
|
+
|
40
|
+
# Remove all current VIFs
|
41
|
+
current_vifs = env[:xc].VM.get_VIFs(vm_ref)
|
42
|
+
current_vifs.each { |vif| env[:xc].VIF.destroy(vif) }
|
43
|
+
|
44
|
+
# If a HIMN VIF has been asked for, create one
|
45
|
+
if env[:machine].provider_config.use_himn
|
46
|
+
himn = networks.find { |ref,net| net['other_config']['is_host_internal_management_network'] }
|
47
|
+
(himn_ref,himn_rec) = himn
|
48
|
+
|
49
|
+
@logger.debug("himn="+himn.to_s)
|
50
|
+
|
51
|
+
create_vif(env, vm_ref, himn_ref, '')
|
52
|
+
end
|
53
|
+
|
54
|
+
|
55
|
+
env[:machine].config.vm.networks.each do |type, options|
|
56
|
+
@logger.info "got an interface: #{type} #{options}"
|
57
|
+
|
58
|
+
if type == :public_network then
|
59
|
+
bridge = options[:bridge]
|
60
|
+
mac = options[:mac] || ''
|
61
|
+
name_label = options[:network] || ''
|
62
|
+
|
63
|
+
if name_label then
|
64
|
+
netrefrec = networks.find { |ref,net| net['name_label']==name_label }
|
65
|
+
else
|
66
|
+
netrefrec = networks.find { |ref,net| net['bridge']==bridge }
|
67
|
+
end
|
68
|
+
|
69
|
+
(net_ref,net_rec) = netrefrec
|
70
|
+
if net_ref.nil? then
|
71
|
+
@logger.error("Error finding bridge #{bridge} on host")
|
72
|
+
raise Errors::NoHostsAvailable
|
73
|
+
end
|
74
|
+
|
75
|
+
vif_res = create_vif(env, vm_ref, net_ref, mac)
|
76
|
+
|
77
|
+
@logger.info("vif_res=" + vif_res.to_s)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
@app.call env
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
@@ -0,0 +1,95 @@
|
|
1
|
+
require "log4r"
|
2
|
+
require "xmlrpc/client"
|
3
|
+
require "vagrant-xenserver/util/uploader"
|
4
|
+
require "rexml/document"
|
5
|
+
require "json"
|
6
|
+
require "etc"
|
7
|
+
|
8
|
+
module VagrantPlugins
|
9
|
+
module XenServer
|
10
|
+
module Action
|
11
|
+
class CreateVM
|
12
|
+
def initialize(app, env)
|
13
|
+
@app = app
|
14
|
+
@logger = Log4r::Logger.new("vagrant::xenserver::actions::create_vm")
|
15
|
+
end
|
16
|
+
|
17
|
+
def call(env)
|
18
|
+
vdi_ref = env[:my_vdi]
|
19
|
+
|
20
|
+
networks = env[:xc].network.get_all_records
|
21
|
+
|
22
|
+
himn = networks.find { |ref,net| net['other_config']['is_host_internal_management_network'] }
|
23
|
+
(himn_ref,himn_rec) = himn
|
24
|
+
|
25
|
+
@logger.info("himn_uuid="+himn_rec['uuid'])
|
26
|
+
|
27
|
+
username = Etc.getlogin
|
28
|
+
|
29
|
+
oim = env[:xc].VM.get_by_name_label("Other install media")[0]
|
30
|
+
|
31
|
+
box_name = env[:machine].box.name.to_s
|
32
|
+
box_version = env[:machine].box.version.to_s
|
33
|
+
|
34
|
+
if env[:machine].provider_config.name.nil?
|
35
|
+
vm_name = "#{username}/#{box_name}/#{box_version}"
|
36
|
+
else
|
37
|
+
vm_name = env[:machine].provider_config.name
|
38
|
+
end
|
39
|
+
|
40
|
+
vm_ref = env[:xc].VM.clone(oim,vm_name)
|
41
|
+
|
42
|
+
vbd_record = {
|
43
|
+
'VM' => vm_ref,
|
44
|
+
'VDI' => env[:my_vdi],
|
45
|
+
'userdevice' => '0',
|
46
|
+
'bootable' => true,
|
47
|
+
'mode' => 'RW',
|
48
|
+
'type' => 'Disk',
|
49
|
+
'unpluggable' => false,
|
50
|
+
'empty' => false,
|
51
|
+
'other_config' => {},
|
52
|
+
'qos_algorithm_type' => '',
|
53
|
+
'qos_algorithm_params' => {}
|
54
|
+
}
|
55
|
+
|
56
|
+
vbd_res = env[:xc].VBD.create(vbd_record)
|
57
|
+
|
58
|
+
@logger.info("vbd_res=" + vbd_res.to_s)
|
59
|
+
|
60
|
+
vif_record = {
|
61
|
+
'VM' => vm_ref,
|
62
|
+
'network' => himn_ref,
|
63
|
+
'device' => '0',
|
64
|
+
'MAC' => '',
|
65
|
+
'MTU' => '1500',
|
66
|
+
'other_config' => {},
|
67
|
+
'qos_algorithm_type' => '',
|
68
|
+
'qos_algorithm_params' => {},
|
69
|
+
'locking_mode' => 'network_default',
|
70
|
+
'ipv4_allowed' => [],
|
71
|
+
'ipv6_allowed' => []
|
72
|
+
}
|
73
|
+
|
74
|
+
vif_res = env[:xc].VIF.create(vif_record)
|
75
|
+
|
76
|
+
@logger.info("vif_res=" + vif_res.to_s)
|
77
|
+
|
78
|
+
if env[:machine].provider_config.pv
|
79
|
+
env[:xc].VM.set_HVM_boot_policy(vm_ref,"")
|
80
|
+
env[:xc].VM.set_PV_bootloader(vm_ref,"pygrub")
|
81
|
+
end
|
82
|
+
|
83
|
+
mem = ((env[:machine].provider_config.memory) * (1024*1024)).to_s
|
84
|
+
|
85
|
+
env[:xc].VM.set_memory_limits(vm_ref,mem,mem,mem,mem)
|
86
|
+
env[:xc].VM.provision(vm_ref)
|
87
|
+
|
88
|
+
env[:machine].id = vm_ref
|
89
|
+
|
90
|
+
@app.call env
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
require "log4r"
|
2
|
+
require "xmlrpc/client"
|
3
|
+
|
4
|
+
module VagrantPlugins
|
5
|
+
module XenServer
|
6
|
+
module Action
|
7
|
+
class DestroyVM
|
8
|
+
def initialize(app, env)
|
9
|
+
@app = app
|
10
|
+
@logger = Log4r::Logger.new("vagrant::xenserver::actions::destroy_vm")
|
11
|
+
end
|
12
|
+
|
13
|
+
def call(env)
|
14
|
+
begin
|
15
|
+
env[:xc].VM.hard_shutdown(env[:machine].id)
|
16
|
+
rescue
|
17
|
+
end
|
18
|
+
|
19
|
+
vbds = env[:xc].VM.get_VBDs(env[:machine].id)
|
20
|
+
|
21
|
+
vbds.each { |vbd|
|
22
|
+
vbd_rec = env[:xc].VBD.get_record(vbd)
|
23
|
+
if vbd_rec['type'] == "Disk"
|
24
|
+
env[:xc].VDI.destroy(vbd_rec['VDI'])
|
25
|
+
end
|
26
|
+
}
|
27
|
+
|
28
|
+
env[:xc].VM.destroy(env[:machine].id)
|
29
|
+
|
30
|
+
env[:machine].id = nil
|
31
|
+
|
32
|
+
@app.call env
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
require "log4r"
|
2
|
+
require "xmlrpc/client"
|
3
|
+
require "vagrant-xenserver/util/uploader"
|
4
|
+
require "vagrant-xenserver/util/exnhandler"
|
5
|
+
require "rexml/document"
|
6
|
+
require "vagrant/util/busy"
|
7
|
+
require "vagrant/util/platform"
|
8
|
+
require "vagrant/util/subprocess"
|
9
|
+
|
10
|
+
module VagrantPlugins
|
11
|
+
module XenServer
|
12
|
+
module Action
|
13
|
+
class DownloadXVA
|
14
|
+
def initialize(app, env)
|
15
|
+
@app = app
|
16
|
+
@logger = Log4r::Logger.new("vagrant::xenserver::actions::download_xva")
|
17
|
+
end
|
18
|
+
|
19
|
+
def call(env)
|
20
|
+
xva_url = env[:machine].provider_config.xva_url
|
21
|
+
|
22
|
+
box_name = env[:machine].box.name.to_s
|
23
|
+
box_version = env[:machine].box.version.to_s
|
24
|
+
|
25
|
+
@logger.info("xva_url="+xva_url.to_s)
|
26
|
+
# Check whether we've already downloaded a VM from this URL
|
27
|
+
# When we do, we set an other_config key 'xva_url', so we
|
28
|
+
# can just scan through the VMs looking for it.
|
29
|
+
|
30
|
+
template = nil
|
31
|
+
|
32
|
+
Action.getlock.synchronize do
|
33
|
+
templates = env[:xc].VM.get_all_records_where("field \"is_a_template\"=\"true\" and field \"is_a_snapshot\"=\"false\"")
|
34
|
+
template = templates.detect { |vm,vmr|
|
35
|
+
vmr["other_config"]["box_name"] == box_name &&
|
36
|
+
vmr["other_config"]["box_version"] == box_version
|
37
|
+
}
|
38
|
+
|
39
|
+
@logger.info("template="+template.to_s)
|
40
|
+
|
41
|
+
if template.nil? && (not xva_url.nil?)
|
42
|
+
# No template, let's download it.
|
43
|
+
pool=env[:xc].pool.get_all
|
44
|
+
default_sr=env[:xc].pool.get_default_SR(pool[0])
|
45
|
+
|
46
|
+
env[:ui].output("Downloading XVA. This may take some time. Source URL: "+xva_url)
|
47
|
+
task = env[:xc].Async.VM.import(xva_url, default_sr, false, false)
|
48
|
+
|
49
|
+
begin
|
50
|
+
sleep(2.0)
|
51
|
+
task_status = env[:xc].task.get_status(task)
|
52
|
+
task_progress = env[:xc].task.get_progress(task) * 100.0
|
53
|
+
output = "Progress: #{task_progress.round(0)}%"
|
54
|
+
env[:ui].clear_line
|
55
|
+
env[:ui].detail(output, new_line: false)
|
56
|
+
end while task_status == "pending"
|
57
|
+
|
58
|
+
env[:ui].clear_line
|
59
|
+
|
60
|
+
if task_status != "success"
|
61
|
+
# Task failed - let's find out why:
|
62
|
+
error_list = env[:xc].task.get_error_info(task)
|
63
|
+
MyUtil::Exnhandler.handle("Async.VM.import", error_list)
|
64
|
+
end
|
65
|
+
|
66
|
+
task_result = env[:xc].task.get_result(task)
|
67
|
+
|
68
|
+
doc = REXML::Document.new(task_result)
|
69
|
+
|
70
|
+
@logger.debug("task_result=\"#{task_result}\"")
|
71
|
+
template_ref = doc.elements['value/array/data/value'].text
|
72
|
+
|
73
|
+
# Make sure it's really a template, and add the xva_url to other_config:
|
74
|
+
env[:xc].VM.set_is_a_template(template_ref,true)
|
75
|
+
env[:xc].VM.add_to_other_config(template_ref,"xva_url",xva_url)
|
76
|
+
env[:xc].VM.add_to_other_config(template_ref,"box_name",box_name)
|
77
|
+
env[:xc].VM.add_to_other_config(template_ref,"box_version",box_version)
|
78
|
+
|
79
|
+
# Hackity hack: HVM booting guests don't need to set the bootable flag
|
80
|
+
# on their VBDs, but PV do. Let's set bootable=true on VBD device=0
|
81
|
+
# just in case.
|
82
|
+
|
83
|
+
vbds = env[:xc].VM.get_VBDs(template_ref)
|
84
|
+
vbds.each { |vbd|
|
85
|
+
if env[:xc].VBD.get_userdevice(vbd) == "0"
|
86
|
+
env[:xc].VBD.set_bootable(vbd, true)
|
87
|
+
end
|
88
|
+
}
|
89
|
+
env[:template] = template_ref
|
90
|
+
else
|
91
|
+
(template_ref, template_rec) = template
|
92
|
+
env[:template] = template_ref
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
@app.call(env)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
require "log4r"
|
2
|
+
require "xmlrpc/client"
|
3
|
+
|
4
|
+
module VagrantPlugins
|
5
|
+
module XenServer
|
6
|
+
module Action
|
7
|
+
class HaltVM
|
8
|
+
def initialize(app, env)
|
9
|
+
@app = app
|
10
|
+
@logger = Log4r::Logger.new("vagrant::xenserver::actions::halt_vm")
|
11
|
+
end
|
12
|
+
|
13
|
+
def call(env)
|
14
|
+
myvm = env[:machine].id
|
15
|
+
|
16
|
+
shutdown_result = env[:xc].VM.clean_shutdown(myvm)
|
17
|
+
|
18
|
+
@app.call env
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module XenServer
|
5
|
+
module Action
|
6
|
+
class IsCreated
|
7
|
+
def initialize(app, env)
|
8
|
+
@app = app
|
9
|
+
@logger = Log4r::Logger.new('vagrant_xenserver::action::is_created')
|
10
|
+
end
|
11
|
+
|
12
|
+
def call(env)
|
13
|
+
env[:result] = env[:machine].state.id != :not_created
|
14
|
+
@logger.info("env[:machine].state.id="+env[:machine].state.id.to_s)
|
15
|
+
@app.call(env)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module XenServer
|
5
|
+
module Action
|
6
|
+
class IsRunning
|
7
|
+
def initialize(app, env)
|
8
|
+
@app = app
|
9
|
+
@logger = Log4r::Logger.new('vagrant_xenserver::action::is_running')
|
10
|
+
end
|
11
|
+
|
12
|
+
def call(env)
|
13
|
+
@logger.info("env[:machine].state.id="+env[:machine].state.id.to_s)
|
14
|
+
env[:result] = env[:machine].state.id == 'Running'
|
15
|
+
@app.call(env)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module XenServer
|
5
|
+
module Action
|
6
|
+
class IsSuspended
|
7
|
+
def initialize(app, env)
|
8
|
+
@app = app
|
9
|
+
@logger = Log4r::Logger.new('vagrant_xenserver::action::is_suspended')
|
10
|
+
end
|
11
|
+
|
12
|
+
def call(env)
|
13
|
+
@logger.info("env[:machine].state.id="+env[:machine].state.id.to_s)
|
14
|
+
env[:result] = env[:machine].state.id == 'Suspended'
|
15
|
+
@app.call(env)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,85 @@
|
|
1
|
+
require 'nokogiri'
|
2
|
+
require 'socket'
|
3
|
+
require 'rbconfig'
|
4
|
+
|
5
|
+
def os
|
6
|
+
@os ||= (
|
7
|
+
host_os = RbConfig::CONFIG['host_os']
|
8
|
+
case host_os
|
9
|
+
when /mswin|msys|mingw|cygwin|bccwin|wince|emc/
|
10
|
+
:windows
|
11
|
+
when /darwin|mac os/
|
12
|
+
:macosx
|
13
|
+
when /linux/
|
14
|
+
:linux
|
15
|
+
when /solaris|bsd/
|
16
|
+
:unix
|
17
|
+
else
|
18
|
+
raise Vagrant::Errors::UnknownOS # "unknown os: #{host_os.inspect}"
|
19
|
+
end
|
20
|
+
)
|
21
|
+
end
|
22
|
+
|
23
|
+
|
24
|
+
module VagrantPlugins
|
25
|
+
module XenServer
|
26
|
+
module Action
|
27
|
+
class PrepareNFSSettings
|
28
|
+
include Vagrant::Action::Builtin::MixinSyncedFolders
|
29
|
+
|
30
|
+
def initialize(app,env)
|
31
|
+
@app = app
|
32
|
+
@logger = Log4r::Logger.new("vagrant::action::vm::nfs")
|
33
|
+
end
|
34
|
+
|
35
|
+
def call(env)
|
36
|
+
@machine = env[:machine]
|
37
|
+
@app.call(env)
|
38
|
+
|
39
|
+
if using_nfs?
|
40
|
+
@logger.info("Using NFS, preparing NFS settings by reading host IP and machine IP")
|
41
|
+
env[:nfs_host_ip] = read_host_ip(env[:machine],env)
|
42
|
+
env[:nfs_machine_ip] = env[:xs_host_ip]
|
43
|
+
|
44
|
+
@logger.info("host IP: #{env[:nfs_host_ip]} machine IP: #{env[:nfs_machine_ip]}")
|
45
|
+
|
46
|
+
raise Vagrant::Errors::NFSNoHostonlyNetwork if !env[:nfs_machine_ip] || !env[:nfs_host_ip]
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
# We're using NFS if we have any synced folder with NFS configured. If
|
51
|
+
# we are not using NFS we don't need to do the extra work to
|
52
|
+
# populate these fields in the environment.
|
53
|
+
def using_nfs?
|
54
|
+
!!synced_folders(@machine)[:nfs]
|
55
|
+
end
|
56
|
+
|
57
|
+
# Returns the IP address of the interface that will route to the xs_host
|
58
|
+
#
|
59
|
+
# @param [Machine] machine
|
60
|
+
# @return [String]
|
61
|
+
def read_host_ip(machine,env)
|
62
|
+
ip = Socket.getaddrinfo(env[:machine].provider_config.xs_host,nil)[0][2]
|
63
|
+
env[:xs_host_ip] = ip
|
64
|
+
def get_local_ip_linux(ip)
|
65
|
+
re = /src ([0-9\.]+)/
|
66
|
+
match = `ip route get to #{ip} | head -n 1`.match re
|
67
|
+
match[1]
|
68
|
+
end
|
69
|
+
def get_local_ip_mac(ip)
|
70
|
+
re = /interface: ([a-z0-9]+)/
|
71
|
+
match = `route get #{ip} | grep interface | head -n 1`.match re
|
72
|
+
interface = match[1]
|
73
|
+
re = /inet ([0-9\.]+)/
|
74
|
+
match = `ifconfig #{interface} inet | tail -1`.match re
|
75
|
+
match[1]
|
76
|
+
end
|
77
|
+
if os == :linux then get_local_ip_linux(ip)
|
78
|
+
elsif os == :macosx then get_local_ip_mac(ip)
|
79
|
+
else raise Vagrant::Errors::UnknownOS # "unknown os: #{host_os.inspect}"
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
module VagrantPlugins
|
2
|
+
module XenServer
|
3
|
+
module Action
|
4
|
+
class PrepareNFSValidIds
|
5
|
+
def initialize(app, env)
|
6
|
+
@app = app
|
7
|
+
@logger = Log4r::Logger.new("vagrant::xenserver::action::vm::nfs")
|
8
|
+
end
|
9
|
+
|
10
|
+
def call(env)
|
11
|
+
env[:nfs_valid_ids] = env[:xc].VM.get_all
|
12
|
+
@app.call(env)
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -0,0 +1,97 @@
|
|
1
|
+
require "log4r"
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module XenServer
|
5
|
+
module Action
|
6
|
+
# This action reads the SSH info for the machine and puts it into the
|
7
|
+
# `:machine_ssh_info` key in the environment.
|
8
|
+
class ReadSSHInfo
|
9
|
+
def initialize(app, env)
|
10
|
+
@app = app
|
11
|
+
@logger = Log4r::Logger.new("vagrant_xenserver::action::read_ssh_info")
|
12
|
+
end
|
13
|
+
|
14
|
+
def call(env)
|
15
|
+
if env[:machine].provider_config.use_himn
|
16
|
+
env[:machine_ssh_info] = read_ssh_info_himn(env)
|
17
|
+
else
|
18
|
+
env[:machine_ssh_info] = read_ssh_info(env)
|
19
|
+
end
|
20
|
+
|
21
|
+
@app.call(env)
|
22
|
+
end
|
23
|
+
|
24
|
+
def read_ssh_info_himn(env)
|
25
|
+
machine = env[:machine]
|
26
|
+
return nil if machine.id.nil?
|
27
|
+
|
28
|
+
# Find the machine
|
29
|
+
networks = env[:xc].network.get_all_records
|
30
|
+
|
31
|
+
begin
|
32
|
+
vifs = env[:xc].VM.get_VIFs(machine.id)
|
33
|
+
rescue
|
34
|
+
@logger.info("Machine couldn't be found, assuming it got destroyed.")
|
35
|
+
machine.id = nil
|
36
|
+
return nil
|
37
|
+
end
|
38
|
+
|
39
|
+
himn = networks.find { |ref,net| net['other_config']['is_host_internal_management_network'] }
|
40
|
+
(himn_ref,himn_rec) = himn
|
41
|
+
|
42
|
+
assigned_ips = himn_rec['assigned_ips']
|
43
|
+
(vif,ip) = assigned_ips.find { |vif,ip| vifs.include? vif }
|
44
|
+
|
45
|
+
ssh_info = {
|
46
|
+
:host => ip,
|
47
|
+
:port => machine.config.ssh.guest_port,
|
48
|
+
:username => machine.config.ssh.username,
|
49
|
+
:forward_agent => machine.config.ssh.forward_agent,
|
50
|
+
:forward_x11 => machine.config.ssh.forward_x11,
|
51
|
+
}
|
52
|
+
|
53
|
+
ssh_info[:proxy_command] = "ssh '#{machine.provider_config.xs_host}' -l '#{machine.provider_config.xs_username}' -W %h:%p"
|
54
|
+
|
55
|
+
if not ssh_info[:username]
|
56
|
+
ssh_info[:username] = machine.config.ssh.default.username
|
57
|
+
end
|
58
|
+
|
59
|
+
return ssh_info
|
60
|
+
end
|
61
|
+
|
62
|
+
def read_ssh_info(env)
|
63
|
+
machine = env[:machine]
|
64
|
+
return nil if machine.id.nil?
|
65
|
+
|
66
|
+
gm = env[:xc].VM.get_guest_metrics(machine.id)
|
67
|
+
|
68
|
+
begin
|
69
|
+
networks = env[:xc].VM_guest_metrics.get_networks(gm)
|
70
|
+
rescue
|
71
|
+
return nil
|
72
|
+
end
|
73
|
+
|
74
|
+
ip = networks["0/ip"]
|
75
|
+
if ip.nil?
|
76
|
+
return nil
|
77
|
+
end
|
78
|
+
|
79
|
+
ssh_info = {
|
80
|
+
:host => ip,
|
81
|
+
:port => machine.config.ssh.guest_port,
|
82
|
+
:username => machine.config.ssh.username,
|
83
|
+
:forward_agent => machine.config.ssh.forward_agent,
|
84
|
+
:forward_x11 => machine.config.ssh.forward_x11,
|
85
|
+
}
|
86
|
+
|
87
|
+
if not ssh_info[:username]
|
88
|
+
ssh_info[:username] = machine.config.ssh.default.username
|
89
|
+
end
|
90
|
+
|
91
|
+
return ssh_info
|
92
|
+
end
|
93
|
+
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
require "log4r"
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module XenServer
|
5
|
+
module Action
|
6
|
+
class ReadState
|
7
|
+
def initialize(app, env)
|
8
|
+
@app = app
|
9
|
+
@logger = Log4r::Logger.new("vagrant::xenserver::actions::read_state")
|
10
|
+
end
|
11
|
+
|
12
|
+
def call(env)
|
13
|
+
@logger.debug("XXXXX In ReadState")
|
14
|
+
env[:machine_state_id] = read_state(env[:xc], env[:session], env[:machine])
|
15
|
+
@logger.debug("state="+env[:machine_state_id].to_s)
|
16
|
+
@app.call(env)
|
17
|
+
end
|
18
|
+
|
19
|
+
def read_state(xc, session, machine)
|
20
|
+
return :not_created if machine.id.nil?
|
21
|
+
return :not_created if not machine.id.start_with?("OpaqueRef")
|
22
|
+
|
23
|
+
begin
|
24
|
+
result = xc.VM.get_record(machine.id)
|
25
|
+
return result['power_state']
|
26
|
+
rescue
|
27
|
+
@logger.info("Machine not found. Assuming it has been destroyed.")
|
28
|
+
machine.id = nil
|
29
|
+
return :not_created
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
require "log4r"
|
2
|
+
require "xmlrpc/client"
|
3
|
+
|
4
|
+
module VagrantPlugins
|
5
|
+
module XenServer
|
6
|
+
module Action
|
7
|
+
class ResumeVM
|
8
|
+
def initialize(app, env)
|
9
|
+
@app = app
|
10
|
+
@logger = Log4r::Logger.new("vagrant::xenserver::actions::resume_vm")
|
11
|
+
end
|
12
|
+
|
13
|
+
def call(env)
|
14
|
+
myvm = env[:machine].id
|
15
|
+
|
16
|
+
resume_task = env[:xc].Async.VM.resume(myvm,false,false)
|
17
|
+
while env[:xc].task.get_status(resume_task) == 'pending' do
|
18
|
+
sleep 1
|
19
|
+
end
|
20
|
+
resume_result = env[:xc].task.get_status(resume_task)
|
21
|
+
if resume_result != "success"
|
22
|
+
raise Errors::APIError
|
23
|
+
end
|
24
|
+
|
25
|
+
@app.call env
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|