vagrant-libvirt 0.8.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +9 -6
  3. data/lib/vagrant-libvirt/action/create_domain.rb +19 -91
  4. data/lib/vagrant-libvirt/action/destroy_domain.rb +20 -4
  5. data/lib/vagrant-libvirt/action/destroy_networks.rb +1 -1
  6. data/lib/vagrant-libvirt/action/handle_box_image.rb +2 -0
  7. data/lib/vagrant-libvirt/action/resolve_disk_settings.rb +174 -0
  8. data/lib/vagrant-libvirt/action/start_domain.rb +41 -1
  9. data/lib/vagrant-libvirt/action.rb +14 -6
  10. data/lib/vagrant-libvirt/config.rb +12 -4
  11. data/lib/vagrant-libvirt/templates/domain.xml.erb +13 -8
  12. data/lib/vagrant-libvirt/util/domain_flags.rb +15 -0
  13. data/lib/vagrant-libvirt/util.rb +1 -0
  14. data/lib/vagrant-libvirt/version +1 -1
  15. data/locales/en.yml +4 -2
  16. data/spec/support/libvirt_context.rb +4 -0
  17. data/spec/unit/action/cleanup_on_failure_spec.rb +0 -2
  18. data/spec/unit/action/create_domain_spec.rb +113 -147
  19. data/spec/unit/action/create_domain_volume_spec.rb +0 -3
  20. data/spec/unit/action/destroy_domain_spec.rb +43 -10
  21. data/spec/unit/action/handle_box_image_spec.rb +13 -13
  22. data/spec/unit/action/package_domain_spec.rb +0 -5
  23. data/spec/unit/action/resolve_disk_settings_spec/default_domain.xml +43 -0
  24. data/spec/unit/action/resolve_disk_settings_spec/default_no_aliases.xml +42 -0
  25. data/spec/unit/action/{create_domain_spec → resolve_disk_settings_spec}/default_system_storage_pool.xml +0 -0
  26. data/spec/unit/action/resolve_disk_settings_spec/multi_volume_box.xml +55 -0
  27. data/spec/unit/action/resolve_disk_settings_spec/multi_volume_box_additional_and_custom_no_aliases.xml +67 -0
  28. data/spec/unit/action/resolve_disk_settings_spec/multi_volume_box_additional_storage.xml +67 -0
  29. data/spec/unit/action/resolve_disk_settings_spec.rb +361 -0
  30. data/spec/unit/action/start_domain_spec/existing_added_nvram.xml +62 -0
  31. data/spec/unit/action/start_domain_spec/nvram_domain.xml +64 -0
  32. data/spec/unit/action/start_domain_spec/nvram_domain_other_setting.xml +64 -0
  33. data/spec/unit/action/start_domain_spec/nvram_domain_removed.xml +64 -0
  34. data/spec/unit/action/start_domain_spec.rb +64 -6
  35. data/spec/unit/action/wait_till_up_spec.rb +0 -2
  36. data/spec/unit/action_spec.rb +0 -3
  37. data/spec/unit/config_spec.rb +18 -0
  38. data/spec/unit/driver_spec.rb +2 -0
  39. data/spec/unit/templates/domain_all_settings.xml +8 -2
  40. data/spec/unit/templates/domain_spec.rb +3 -2
  41. metadata +38 -16
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: b1bfaefd1dab7cbdb1293e867e29fef05a9eb101fae5226a38203593de84e717
4
- data.tar.gz: d36d4f487ed2ad0b4f00ba09c263a5cd8deda6c268eef50974376e2796b7e64d
3
+ metadata.gz: 79027c1097a7e8136b8d56f74fd0b27932bf17c48387de84c57626c2bd348714
4
+ data.tar.gz: cd6acea3cf183191ea6bbee6e60d7a1fc76a5f0b556f273f60657238283841ce
5
5
  SHA512:
6
- metadata.gz: 510c1b6ffd2fb4eea02ede8d7612c3a41a89e5c50b630370bec6fd70b79986caa222e6ce1cc0f4c2732b310f7c134866aa2283ee449441c92078b148c4734503
7
- data.tar.gz: cb6ab754dcff508ec150c6ff67cf82b603243596b35ab6a5ef79d4dfe36042a933dde8eebc3630d5a2473c8fcd398111a98d12a64a66950ad0abe942c15bce40
6
+ metadata.gz: 6d6483a73ce398e6fd057b2fc0503392c628ad1fc228d9d954b9a5e31d9a65ab883c25f30cb9360ce5eff46554492dd010b68d741f3c8935e64228207bf2f210
7
+ data.tar.gz: 1a083a93899bb48d4d750aa8967bdcb8e6b1b0b9a8fb0d88c912097444cd202c52fe0330f81a1e4803dde184b06e035c8bd8d32397035c186653b978db790e12
data/README.md CHANGED
@@ -353,11 +353,11 @@ into_srpm.sh -d c8s
353
353
  cd SRPMS
354
354
 
355
355
  # common commands (make sure to adjust verison accordingly)
356
- rpm2cpio libssh-0.9.0-5.fc30.src.rpm | cpio -imdV
357
- tar xf libssh-0.9.0.tar.xz
356
+ rpm2cpio libssh-0.9.4-1c8s.src.rpm | cpio -imdV
357
+ tar xf libssh-0.9.4.tar.xz
358
358
  mkdir build
359
359
  cd build
360
- cmake ../libssh-0.9.0 -DOPENSSL_ROOT_DIR=/opt/vagrant/embedded/
360
+ cmake ../libssh-0.9.4 -DOPENSSL_ROOT_DIR=/opt/vagrant/embedded/
361
361
  make
362
362
  sudo cp lib/libssh* /opt/vagrant/embedded/lib64
363
363
  ```
@@ -376,18 +376,21 @@ dnf download --source krb5-libs
376
376
 
377
377
  # centos 8 stream, doesn't provide source RPMs, so you need to download like so
378
378
  git clone https://git.centos.org/centos-git-common
379
+ # make get_sources.sh executable as it is needed in krb5
380
+ chmod +x centos-git-common/get_sources.sh
379
381
  # centos-git-common needs its tools in PATH
380
382
  export PATH=$(readlink -f ./centos-git-common):$PATH
381
383
  git clone https://git.centos.org/rpms/krb5
382
384
  cd krb5
383
385
  git checkout imports/c8s/krb5-1.18.2-8.el8
386
+ get_sources.sh
384
387
  into_srpm.sh -d c8s
385
388
  cd SRPMS
386
389
 
387
390
  # common commands (make sure to adjust verison accordingly)
388
- rpm2cpio krb5-1.18-1.fc32.src.rpm | cpio -imdV
389
- tar xf krb5-1.18.tar.gz
390
- cd krb5-1.18/src
391
+ rpm2cpio krb5-1.18.2-8c8s.src.rpm | cpio -imdV
392
+ tar xf krb5-1.18.2.tar.gz
393
+ cd krb5-1.18.2/src
391
394
  ./configure
392
395
  make
393
396
  sudo cp -P lib/crypto/libk5crypto.* /opt/vagrant/embedded/lib64/
@@ -16,20 +16,6 @@ module VagrantPlugins
16
16
  @app = app
17
17
  end
18
18
 
19
- def _disk_name(name, disk)
20
- "#{name}-#{disk[:device]}.#{disk[:type]}" # disk name
21
- end
22
-
23
- def _disks_print(disks)
24
- disks.collect do |x|
25
- "#{x[:device]}(#{x[:type]}, #{x[:bus]}, #{x[:size]})"
26
- end.join(', ')
27
- end
28
-
29
- def _cdroms_print(cdroms)
30
- cdroms.collect { |x| x[:dev] }.join(', ')
31
- end
32
-
33
19
  def call(env)
34
20
  # Get config.
35
21
  config = env[:machine].provider_config
@@ -58,8 +44,6 @@ module VagrantPlugins
58
44
  @nvram = config.nvram
59
45
  @machine_type = config.machine_type
60
46
  @machine_arch = config.machine_arch
61
- @disk_bus = config.disk_bus
62
- @disk_device = config.disk_device
63
47
  @disk_driver_opts = config.disk_driver_opts
64
48
  @nested = config.nested
65
49
  @memory_size = config.memory.to_i * 1024
@@ -94,9 +78,8 @@ module VagrantPlugins
94
78
 
95
79
  # Storage
96
80
  @storage_pool_name = config.storage_pool_name
97
- @snapshot_pool_name = config.snapshot_pool_name
98
- @domain_volumes = []
99
- @disks = config.disks
81
+ @domain_volumes = env[:domain_volumes] || []
82
+ @disks = env[:disks] || []
100
83
  @cdroms = config.cdroms
101
84
 
102
85
  # Input
@@ -139,77 +122,19 @@ module VagrantPlugins
139
122
  @memballoon_pci_bus = config.memballoon_pci_bus
140
123
  @memballoon_pci_slot = config.memballoon_pci_slot
141
124
 
142
- config = env[:machine].provider_config
143
125
  @domain_type = config.driver
144
126
 
145
127
  @os_type = 'hvm'
146
128
 
147
- resolver = ::VagrantPlugins::ProviderLibvirt::Util::DiskDeviceResolver.new(prefix=@disk_device[0..1])
148
-
149
- # Get path to domain image from the storage pool selected if we have a box.
150
- if env[:machine].config.vm.box
151
- if @snapshot_pool_name != @storage_pool_name
152
- pool_name = @snapshot_pool_name
153
- else
154
- pool_name = @storage_pool_name
155
- end
156
-
157
- # special handling for domain volume
158
- env[:box_volumes][0][:device] = env[:box_volumes][0].fetch(:device, @disk_device)
159
-
160
- resolver.resolve!(env[:box_volumes])
161
-
162
- @logger.debug "Search for volumes in pool: #{pool_name}"
163
- env[:box_volumes].each_index do |index|
164
- suffix_index = index > 0 ? "_#{index}" : ''
165
- domain_volume = env[:machine].provider.driver.connection.volumes.all(
166
- name: "#{@name}#{suffix_index}.img"
167
- ).find { |x| x.pool_name == pool_name }
168
- raise Errors::DomainVolumeExists if domain_volume.nil?
169
-
170
- @domain_volumes.push({
171
- :dev => env[:box_volumes][index][:device],
172
- :cache => @domain_volume_cache,
173
- :bus => @disk_bus,
174
- :path => domain_volume.path,
175
- :virtual_size => env[:box_volumes][index][:virtual_size]
176
- })
177
- end
178
-
129
+ env[:domain_volumes].each_with_index do |vol, index|
130
+ suffix_index = index > 0 ? "_#{index}" : ''
131
+ domain_volume = env[:machine].provider.driver.connection.volumes.all(
132
+ name: "#{@name}#{suffix_index}.img"
133
+ ).find { |x| x.pool_name == vol[:pool] }
134
+ raise Errors::NoDomainVolume if domain_volume.nil?
179
135
  end
180
136
 
181
- # If we have a box, take the path from the domain volume and set our storage_prefix.
182
- # If not, we dump the storage pool xml to get its defined path.
183
- # the default storage prefix is typically: /var/lib/libvirt/images/
184
- if env[:machine].config.vm.box
185
- storage_prefix = File.dirname(@domain_volumes[0][:path]) + '/' # steal
186
- else
187
- storage_prefix = get_disk_storage_prefix(env, @storage_pool_name)
188
- end
189
-
190
- resolver.resolve!(@disks)
191
-
192
137
  @disks.each do |disk|
193
- disk[:path] ||= _disk_name(@name, disk)
194
-
195
- # On volume creation, the <path> element inside <target>
196
- # is oddly ignored; instead the path is taken from the
197
- # <name> element:
198
- # http://www.redhat.com/archives/libvir-list/2008-August/msg00329.html
199
- disk[:name] = disk[:path]
200
-
201
- disk[:absolute_path] = storage_prefix + disk[:path]
202
-
203
- if not disk[:pool].nil?
204
- disk_pool_name = disk[:pool]
205
- @logger.debug "Overriding pool name with: #{disk_pool_name}"
206
- disk_storage_prefix = get_disk_storage_prefix(env, disk_pool_name)
207
- disk[:absolute_path] = disk_storage_prefix + disk[:path]
208
- @logger.debug "Overriding disk path with: #{disk[:absolute_path]}"
209
- else
210
- disk_pool_name = @storage_pool_name
211
- end
212
-
213
138
  # make the disk. equivalent to:
214
139
  # qemu-img create -f qcow2 <path> 5g
215
140
  begin
@@ -221,13 +146,13 @@ module VagrantPlugins
221
146
  owner: storage_uid(env),
222
147
  group: storage_gid(env),
223
148
  #:allocation => ?,
224
- pool_name: disk_pool_name
149
+ pool_name: disk[:pool],
225
150
  )
226
151
  rescue Libvirt::Error => e
227
152
  # It is hard to believe that e contains just a string
228
153
  # and no useful error code!
229
154
  msg = "Call to virStorageVolCreateXML failed: " +
230
- "storage volume '#{disk[:path]}' exists already"
155
+ "storage volume '#{disk[:absolute_path]}' exists already"
231
156
  if e.message == msg and disk[:allow_existing]
232
157
  disk[:preexisting] = true
233
158
  else
@@ -300,7 +225,7 @@ module VagrantPlugins
300
225
  end
301
226
  env[:ui].info(" -- Storage pool: #{@storage_pool_name}")
302
227
  @domain_volumes.each do |volume|
303
- env[:ui].info(" -- Image(#{volume[:dev]}): #{volume[:path]}, #{volume[:bus]}, #{volume[:virtual_size].to_GB}G")
228
+ env[:ui].info(" -- Image(#{volume[:device]}): #{volume[:absolute_path]}, #{volume[:bus]}, #{volume[:virtual_size].to_GB}G")
304
229
  end
305
230
 
306
231
  if not @disk_driver_opts.empty?
@@ -466,11 +391,14 @@ module VagrantPlugins
466
391
  end
467
392
 
468
393
  private
469
- def get_disk_storage_prefix(env, disk_pool_name)
470
- disk_storage_pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name(disk_pool_name)
471
- raise Errors::NoStoragePool if disk_storage_pool.nil?
472
- xml = Nokogiri::XML(disk_storage_pool.xml_desc)
473
- disk_storage_prefix = xml.xpath('/pool/target/path').inner_text.to_s + '/'
394
+ def _disks_print(disks)
395
+ disks.collect do |x|
396
+ "#{x[:device]}(#{x[:type]}, #{x[:bus]}, #{x[:size]})"
397
+ end.join(', ')
398
+ end
399
+
400
+ def _cdroms_print(cdroms)
401
+ cdroms.collect { |x| x[:dev] }.join(', ')
474
402
  end
475
403
  end
476
404
  end
@@ -1,7 +1,12 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'log4r'
4
- require 'rexml'
4
+
5
+ begin
6
+ require 'rexml'
7
+ rescue LoadError
8
+ require 'rexml/rexml'
9
+ end
5
10
 
6
11
  module VagrantPlugins
7
12
  module ProviderLibvirt
@@ -43,21 +48,24 @@ module VagrantPlugins
43
48
 
44
49
  domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
45
50
 
51
+ undefine_flags = 0
52
+ undefine_flags |= ProviderLibvirt::Util::DomainFlags::VIR_DOMAIN_UNDEFINE_KEEP_NVRAM if env[:machine].provider_config.nvram
53
+
46
54
  if env[:machine].provider_config.disks.empty? &&
47
55
  env[:machine].provider_config.cdroms.empty?
48
56
  # if using default configuration of disks and cdroms
49
57
  # cdroms are consider volumes, but cannot be destroyed
50
- domain.destroy(destroy_volumes: true)
58
+ destroy_domain(domain, destroy_volumes: true, flags: undefine_flags)
51
59
  else
52
60
  domain_xml = libvirt_domain.xml_desc(1)
53
61
  xml_descr = REXML::Document.new(domain_xml)
54
62
  disks_xml = REXML::XPath.match(xml_descr, '/domain/devices/disk[@device="disk"]')
55
63
  have_aliases = !(REXML::XPath.match(disks_xml, './alias[@name="ua-box-volume-0"]').first).nil?
56
64
  if !have_aliases
57
- env[:ui].warn(I18n.t('vagrant_libvirt.destroy.obsolete_method'))
65
+ env[:ui].warn(I18n.t('vagrant_libvirt.domain_xml.obsolete_method'))
58
66
  end
59
67
 
60
- domain.destroy(destroy_volumes: false)
68
+ destroy_domain(domain, destroy_volumes: false, flags: undefine_flags)
61
69
 
62
70
  volumes = domain.volumes
63
71
 
@@ -164,6 +172,14 @@ module VagrantPlugins
164
172
  libvirt_disk.destroy if libvirt_disk
165
173
  end
166
174
  end
175
+
176
+ def destroy_domain(domain, destroy_volumes:, flags:)
177
+ if domain.method(:destroy).parameters.first.include?(:flags)
178
+ domain.destroy(destroy_volumes: destroy_volumes, flags: flags)
179
+ else
180
+ domain.destroy(destroy_volumes: destroy_volumes)
181
+ end
182
+ end
167
183
  end
168
184
  end
169
185
  end
@@ -68,7 +68,7 @@ module VagrantPlugins
68
68
  # Shutdown network first.
69
69
  # Undefine network.
70
70
  begin
71
- libvirt_network.destroy
71
+ libvirt_network.destroy if libvirt_network.active?
72
72
  libvirt_network.undefine
73
73
  @logger.info 'Undefined it'
74
74
  rescue => e
@@ -104,6 +104,8 @@ module VagrantPlugins
104
104
  end
105
105
  # save for use by later actions
106
106
  env[:box_volumes][0][:virtual_size] = box_virtual_size
107
+ # special handling for domain volume
108
+ env[:box_volumes][0][:device] ||= config.disk_device
107
109
 
108
110
  # while inside the synchronize block take care not to call the next
109
111
  # action in the chain, as must exit this block first to prevent
@@ -0,0 +1,174 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'log4r'
4
+ require 'rexml'
5
+
6
+ require 'vagrant-libvirt/util/resolvers'
7
+
8
+ module VagrantPlugins
9
+ module ProviderLibvirt
10
+ module Action
11
+ class ResolveDiskSettings
12
+ def initialize(app, _env)
13
+ @logger = Log4r::Logger.new('vagrant_libvirt::action::resolve_disk_devices')
14
+ @app = app
15
+ end
16
+
17
+ def call(env)
18
+ # Get config.
19
+ config = env[:machine].provider_config
20
+
21
+ domain_name = env[:domain_name] # only set on create
22
+ disk_bus = config.disk_bus
23
+ disk_device = config.disk_device
24
+ domain_volume_cache = config.volume_cache || 'default'
25
+
26
+ # Storage
27
+ storage_pool_name = config.storage_pool_name
28
+ snapshot_pool_name = config.snapshot_pool_name
29
+ domain_volumes = []
30
+ disks = config.disks.dup
31
+
32
+ resolver = ::VagrantPlugins::ProviderLibvirt::Util::DiskDeviceResolver.new(disk_device[0..1])
33
+
34
+ # Get path to domain image from the storage pool selected if we have a box.
35
+ if env[:machine].config.vm.box
36
+ pool_name = if snapshot_pool_name == storage_pool_name
37
+ storage_pool_name
38
+ else
39
+ snapshot_pool_name
40
+ end
41
+
42
+ if env[:box_volumes].nil?
43
+ # domain must be already created, need to read domain volumes from domain XML
44
+ libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(
45
+ env[:machine].id
46
+ )
47
+ domain_xml = libvirt_domain.xml_desc(1)
48
+ xml_descr = REXML::Document.new(domain_xml)
49
+ domain_name = xml_descr.elements['domain'].elements['name'].text
50
+ disks_xml = REXML::XPath.match(xml_descr, '/domain/devices/disk[@device="disk"]')
51
+ have_aliases = !REXML::XPath.match(disks_xml, './alias[@name="ua-box-volume-0"]').first.nil?
52
+ env[:ui].warn(I18n.t('vagrant_libvirt.domain_xml.obsolete_method')) unless have_aliases
53
+
54
+ if have_aliases
55
+ REXML::XPath.match(disks_xml,
56
+ './alias[contains(@name, "ua-box-volume-")]').each_with_index do |alias_xml, idx|
57
+ domain_volumes.push(volume_from_xml(alias_xml.parent, domain_name, idx))
58
+ end
59
+ else
60
+ # fallback to try and infer which boxes are box images, as they are listed first
61
+ # as soon as there is no match, can exit
62
+ disks_xml.each_with_index do |box_disk_xml, idx|
63
+ diskname = box_disk_xml.elements['source'].attributes['file'].rpartition('/').last
64
+
65
+ break if volume_name(domain_name, idx) != diskname
66
+
67
+ domain_volumes.push(volume_from_xml(box_disk_xml, domain_name, idx))
68
+ end
69
+ end
70
+ else
71
+
72
+ @logger.debug "Search for volumes in pool: #{pool_name}"
73
+ env[:box_volumes].each_index do |index|
74
+ domain_volume = env[:machine].provider.driver.connection.volumes.all(
75
+ name: volume_name(domain_name, index)
76
+ ).find { |x| x.pool_name == pool_name }
77
+ raise Errors::NoDomainVolume if domain_volume.nil?
78
+
79
+ domain_volumes.push(
80
+ {
81
+ name: volume_name(domain_name, index),
82
+ device: env[:box_volumes][index][:device],
83
+ cache: domain_volume_cache,
84
+ bus: disk_bus,
85
+ absolute_path: domain_volume.path,
86
+ virtual_size: env[:box_volumes][index][:virtual_size],
87
+ pool: pool_name,
88
+ }
89
+ )
90
+ end
91
+ end
92
+
93
+ resolver.resolve!(domain_volumes)
94
+
95
+ # If we have a box, take the path from the domain volume and set our storage_prefix.
96
+ # If not, we dump the storage pool xml to get its defined path.
97
+ # the default storage prefix is typically: /var/lib/libvirt/images/
98
+ storage_prefix = "#{File.dirname(domain_volumes[0][:absolute_path])}/" # steal
99
+ else
100
+ # Ensure domain name is set for subsequent steps if restarting a machine without a box
101
+ libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(
102
+ env[:machine].id
103
+ )
104
+ domain_xml = libvirt_domain.xml_desc(1)
105
+ xml_descr = REXML::Document.new(domain_xml)
106
+ domain_name = xml_descr.elements['domain'].elements['name'].text
107
+
108
+ storage_prefix = get_disk_storage_prefix(env[:machine], storage_pool_name)
109
+ end
110
+
111
+ resolver.resolve!(disks)
112
+
113
+ disks.each do |disk|
114
+ disk[:path] ||= disk_name(domain_name, disk)
115
+
116
+ # On volume creation, the <path> element inside <target>
117
+ # is oddly ignored; instead the path is taken from the
118
+ # <name> element:
119
+ # http://www.redhat.com/archives/libvir-list/2008-August/msg00329.html
120
+ disk[:name] = disk[:path]
121
+
122
+ disk[:absolute_path] = storage_prefix + disk[:path]
123
+
124
+ if disk[:pool].nil?
125
+ disk[:pool] = storage_pool_name
126
+ else
127
+ @logger.debug "Overriding pool name with: #{disk[:pool]}"
128
+ disk_storage_prefix = get_disk_storage_prefix(env[:machine], disk[:pool])
129
+ disk[:absolute_path] = disk_storage_prefix + disk[:path]
130
+ @logger.debug "Overriding disk path with: #{disk[:absolute_path]}"
131
+ end
132
+ end
133
+
134
+ env[:domain_volumes] = domain_volumes
135
+ env[:disks] = disks
136
+
137
+ @app.call(env)
138
+ end
139
+
140
+ private
141
+
142
+ def disk_name(name, disk)
143
+ "#{name}-#{disk[:device]}.#{disk[:type]}" # disk name
144
+ end
145
+
146
+ def get_disk_storage_prefix(machine, disk_pool_name)
147
+ disk_storage_pool = machine.provider.driver.connection.client.lookup_storage_pool_by_name(disk_pool_name)
148
+ raise Errors::NoStoragePool if disk_storage_pool.nil?
149
+
150
+ xml = Nokogiri::XML(disk_storage_pool.xml_desc)
151
+ "#{xml.xpath('/pool/target/path').inner_text}/"
152
+ end
153
+
154
+ def volume_name(domain_name, index)
155
+ domain_name + (index.zero? ? '.img' : "_#{index}.img")
156
+ end
157
+
158
+ def volume_from_xml(device_xml, domain_name, index)
159
+ driver = device_xml.elements['driver']
160
+ source = device_xml.elements['source']
161
+ target = device_xml.elements['target']
162
+
163
+ {
164
+ name: volume_name(domain_name, index),
165
+ device: target.attributes['dev'],
166
+ cache: driver.attributes['cache'],
167
+ bus: target.attributes['bus'],
168
+ absolute_path: source.attributes['file'],
169
+ }
170
+ end
171
+ end
172
+ end
173
+ end
174
+ end
@@ -375,10 +375,50 @@ module VagrantPlugins
375
375
  end
376
376
  end
377
377
 
378
+ loader = REXML::XPath.first(xml_descr, '/domain/os/loader')
379
+ if config.loader
380
+ if loader.nil?
381
+ descr_changed = true
382
+ loader = REXML::Element.new('loader')
383
+ REXML::XPath.first(xml_descr, '/domain/os').insert_after('//type', loader)
384
+ loader.text = config.loader
385
+ else
386
+ if (loader.text or '').strip != config.loader
387
+ descr_changed = true
388
+ loader.text = config.loader
389
+ end
390
+ end
391
+ loader.attributes['type'] = config.nvram ? 'pflash' : 'rom'
392
+ elsif !loader.nil?
393
+ descr_changed = true
394
+ loader.parent.delete_element(loader)
395
+ end
396
+
397
+ undefine_flags = 0
398
+ nvram = REXML::XPath.first(xml_descr, '/domain/os/nvram')
399
+ if config.nvram
400
+ if nvram.nil?
401
+ descr_changed = true
402
+ nvram = REXML::Element.new('nvram')
403
+ REXML::XPath.first(xml_descr, '/domain/os').insert_after(loader, nvram)
404
+ nvram.text = config.nvram
405
+ else
406
+ if (nvram.text or '').strip != config.nvram
407
+ descr_changed = true
408
+ nvram.text = config.nvram
409
+ end
410
+ undefine_flags |= ProviderLibvirt::Util::DomainFlags::VIR_DOMAIN_UNDEFINE_KEEP_NVRAM
411
+ end
412
+ elsif !nvram.nil?
413
+ descr_changed = true
414
+ undefine_flags |= ProviderLibvirt::Util::DomainFlags::VIR_DOMAIN_UNDEFINE_NVRAM
415
+ nvram.parent.delete_element(nvram)
416
+ end
417
+
378
418
  # Apply
379
419
  if descr_changed
380
420
  begin
381
- libvirt_domain.undefine
421
+ libvirt_domain.undefine(undefine_flags)
382
422
  new_descr = String.new
383
423
  xml_descr.write new_descr
384
424
  env[:machine].provider.driver.connection.servers.create(xml: new_descr)
@@ -35,6 +35,7 @@ module VagrantPlugins
35
35
  autoload :ReadMacAddresses, action_root.join('read_mac_addresses')
36
36
  autoload :RemoveLibvirtImage, action_root.join('remove_libvirt_image')
37
37
  autoload :RemoveStaleVolume, action_root.join('remove_stale_volume')
38
+ autoload :ResolveDiskSettings, action_root.join('resolve_disk_settings')
38
39
  autoload :ResumeDomain, action_root.join('resume_domain')
39
40
  autoload :SetNameOfDomain, action_root.join('set_name_of_domain')
40
41
  autoload :SetBootOrder, action_root.join('set_boot_order')
@@ -48,15 +49,12 @@ module VagrantPlugins
48
49
  autoload :TimedProvision, action_root.join('timed_provision')
49
50
  autoload :WaitTillUp, action_root.join('wait_till_up')
50
51
 
51
- autoload :HandleBox, 'vagrant/action/builtin/handle_box'
52
52
  autoload :Package, 'vagrant/action/general/package'
53
53
  autoload :PackageSetupFiles, 'vagrant/action/general/package_setup_files'
54
54
  autoload :PackageSetupFolders, 'vagrant/action/general/package_setup_folders'
55
55
  autoload :ProvisionerCleanup, 'vagrant/action/builtin/provisioner_cleanup'
56
56
  autoload :SSHRun, 'vagrant/action/builtin/ssh_run'
57
57
  autoload :SyncedFolderCleanup, 'vagrant/action/builtin/synced_folder_cleanup'
58
- autoload :SyncedFolders, 'vagrant/action/builtin/synced_folders'
59
- autoload :WaitForCommunicator, 'vagrant/action/builtin/wait_for_communicator'
60
58
 
61
59
  # Include the built-in & general modules so we can use them as top-level things.
62
60
  include Vagrant::Action::Builtin
@@ -77,11 +75,14 @@ module VagrantPlugins
77
75
  b.use BoxCheckOutdated
78
76
  b.use Call, IsCreated do |env, b2|
79
77
  b2.use CleanupOnFailure
78
+ b2.use Provision
80
79
 
81
80
  # Create VM if not yet created.
82
81
  if !env[:result]
83
82
  b2.use SetNameOfDomain
83
+
84
84
  if !env[:machine].config.vm.box
85
+ b2.use ResolveDiskSettings
85
86
  b2.use CreateDomain
86
87
  b2.use CreateNetworks
87
88
  b2.use CreateNetworkInterfaces
@@ -91,9 +92,11 @@ module VagrantPlugins
91
92
  b2.use SetupComplete
92
93
  else
93
94
  b2.use HandleStoragePool
95
+ require 'vagrant/action/builtin/handle_box'
94
96
  b2.use HandleBox
95
97
  b2.use HandleBoxImage
96
98
  b2.use CreateDomainVolume
99
+ b2.use ResolveDiskSettings
97
100
  b2.use CreateDomain
98
101
  b2.use CreateNetworks
99
102
  b2.use CreateNetworkInterfaces
@@ -104,6 +107,7 @@ module VagrantPlugins
104
107
  end
105
108
  else
106
109
  env[:halt_on_error] = true
110
+ b2.use ResolveDiskSettings
107
111
  b2.use CreateNetworks
108
112
  b2.use action_start
109
113
  end
@@ -117,7 +121,7 @@ module VagrantPlugins
117
121
  # Assuming VM is created, just start it. This action is not called
118
122
  # directly by any subcommand. VM can be suspended, already running or in
119
123
  # poweroff state.
120
- def self.action_start
124
+ private_class_method def self.action_start
121
125
  Vagrant::Action::Builder.new.tap do |b|
122
126
  b.use ConfigValidate
123
127
  b.use Call, IsRunning do |env, b2|
@@ -140,10 +144,9 @@ module VagrantPlugins
140
144
  b3.use StartDomain
141
145
  else
142
146
  # VM is not running or suspended.
143
- b3.use Provision
144
-
145
147
  b3.use PrepareNFSValidIds
146
148
  b3.use SyncedFolderCleanup
149
+ require 'vagrant/action/builtin/synced_folders'
147
150
  b3.use SyncedFolders
148
151
  b3.use PrepareNFSSettings
149
152
  b3.use ShareFolders
@@ -156,6 +159,7 @@ module VagrantPlugins
156
159
  # Machine should gain IP address when comming up,
157
160
  # so wait for dhcp lease and store IP into machines data_dir.
158
161
  b3.use WaitTillUp
162
+ require 'vagrant/action/builtin/wait_for_communicator'
159
163
  b3.use WaitForCommunicator, [:running]
160
164
 
161
165
  b3.use ForwardPorts
@@ -213,6 +217,7 @@ module VagrantPlugins
213
217
  end
214
218
 
215
219
  b2.use ConfigValidate
220
+ b2.use Provision
216
221
  b2.use action_halt
217
222
  b2.use action_start
218
223
  end
@@ -352,6 +357,9 @@ module VagrantPlugins
352
357
  end
353
358
  b3.use CreateNetworks
354
359
  b3.use ResumeDomain
360
+ b3.use Provision
361
+ require 'vagrant/action/builtin/wait_for_communicator'
362
+ b3.use WaitForCommunicator, [:running]
355
363
  end
356
364
  end
357
365
  end
@@ -528,10 +528,14 @@ module VagrantPlugins
528
528
  pci_domain = options[:domain]
529
529
  end
530
530
 
531
- @pcis.push(domain: pci_domain,
532
- bus: options[:bus],
533
- slot: options[:slot],
534
- function: options[:function])
531
+ @pcis.push(domain: pci_domain,
532
+ bus: options[:bus],
533
+ slot: options[:slot],
534
+ function: options[:function],
535
+ guest_domain: options[:guest_domain],
536
+ guest_bus: options[:guest_bus],
537
+ guest_slot: options[:guest_slot],
538
+ guest_function: options[:guest_function])
535
539
  end
536
540
 
537
541
  def watchdog(options = {})
@@ -990,6 +994,10 @@ module VagrantPlugins
990
994
  errors << "libvirt.qemu_use_agent must be a boolean."
991
995
  end
992
996
 
997
+ if !@nvram.nil? && @loader.nil?
998
+ errors << "use of 'nvram' requires a 'loader' to be specified, please add one to the configuration"
999
+ end
1000
+
993
1001
  if @qemu_use_agent == true
994
1002
  # if qemu agent is used to optain domain ip configuration, at least
995
1003
  # one qemu channel has to be configured. As there are various options,