vagrant-libvirt 0.7.0 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (57) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +25 -9
  3. data/lib/vagrant-libvirt/action/cleanup_on_failure.rb +76 -0
  4. data/lib/vagrant-libvirt/action/create_domain.rb +45 -23
  5. data/lib/vagrant-libvirt/action/create_network_interfaces.rb +5 -1
  6. data/lib/vagrant-libvirt/action/create_networks.rb +13 -0
  7. data/lib/vagrant-libvirt/action/destroy_domain.rb +106 -21
  8. data/lib/vagrant-libvirt/action/destroy_networks.rb +1 -1
  9. data/lib/vagrant-libvirt/action/forward_ports.rb +12 -11
  10. data/lib/vagrant-libvirt/action/wait_till_up.rb +6 -32
  11. data/lib/vagrant-libvirt/action.rb +67 -80
  12. data/lib/vagrant-libvirt/config.rb +45 -33
  13. data/lib/vagrant-libvirt/driver.rb +3 -1
  14. data/lib/vagrant-libvirt/errors.rb +8 -0
  15. data/lib/vagrant-libvirt/templates/domain.xml.erb +223 -226
  16. data/lib/vagrant-libvirt/templates/private_network.xml.erb +4 -1
  17. data/lib/vagrant-libvirt/util/network_util.rb +13 -2
  18. data/lib/vagrant-libvirt/util/resolvers.rb +80 -0
  19. data/lib/vagrant-libvirt/version +1 -1
  20. data/locales/en.yml +13 -0
  21. data/spec/spec_helper.rb +33 -28
  22. data/spec/support/libvirt_context.rb +3 -3
  23. data/spec/unit/action/cleanup_on_failure_spec.rb +131 -0
  24. data/spec/unit/action/create_domain_spec/additional_disks_domain.xml +6 -18
  25. data/spec/unit/action/create_domain_spec/custom_disk_settings.xml +43 -0
  26. data/spec/unit/action/create_domain_spec/default_domain.xml +6 -18
  27. data/spec/unit/action/create_domain_spec/two_disk_settings.xml +49 -0
  28. data/spec/unit/action/create_domain_spec.rb +51 -7
  29. data/spec/unit/action/create_domain_volume_spec.rb +5 -3
  30. data/spec/unit/action/destroy_domain_spec/additional_disks_domain.xml +47 -0
  31. data/spec/unit/action/destroy_domain_spec/box_multiple_disks.xml +55 -0
  32. data/spec/unit/action/destroy_domain_spec/box_multiple_disks_and_additional_and_custom_disks.xml +72 -0
  33. data/spec/unit/action/destroy_domain_spec/box_multiple_disks_and_additional_and_custom_disks_no_aliases.xml +67 -0
  34. data/spec/unit/action/destroy_domain_spec/box_multiple_disks_and_additional_disks.xml +67 -0
  35. data/spec/unit/action/destroy_domain_spec/cdrom_domain.xml +48 -0
  36. data/spec/unit/action/destroy_domain_spec.rb +134 -30
  37. data/spec/unit/action/forward_ports_spec.rb +10 -2
  38. data/spec/unit/action/prepare_nfs_settings_spec.rb +4 -0
  39. data/spec/unit/action/start_domain_spec/clock_timer_rtc.xml +6 -18
  40. data/spec/unit/action/start_domain_spec/default.xml +6 -18
  41. data/spec/unit/action/start_domain_spec/default_added_tpm_path.xml +6 -18
  42. data/spec/unit/action/start_domain_spec/default_added_tpm_version.xml +6 -18
  43. data/spec/unit/action/start_domain_spec/existing.xml +1 -1
  44. data/spec/unit/action/wait_till_up_spec.rb +2 -42
  45. data/spec/unit/action_spec.rb +2 -0
  46. data/spec/unit/config_spec.rb +85 -26
  47. data/spec/unit/driver_spec.rb +17 -8
  48. data/spec/unit/provider_spec.rb +11 -0
  49. data/spec/unit/templates/domain_all_settings.xml +52 -79
  50. data/spec/unit/templates/domain_cpu_mode_passthrough.xml +39 -0
  51. data/spec/unit/templates/domain_custom_cpu_model.xml +6 -18
  52. data/spec/unit/templates/domain_defaults.xml +6 -18
  53. data/spec/unit/templates/domain_spec.rb +36 -13
  54. data/spec/unit/templates/tpm/version_1.2.xml +6 -18
  55. data/spec/unit/templates/tpm/version_2.0.xml +6 -18
  56. data/spec/unit/util/resolvers_spec.rb +116 -0
  57. metadata +62 -64
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 696535927498f1996c3fd02f6e5503bd8ac47e92eb226d03cd28c46329232b93
4
- data.tar.gz: 73cfeb05a09dad74122a6f09f562b5d56ac7c43a7ec029f2e8f5c8dcfa2a9413
3
+ metadata.gz: b1bfaefd1dab7cbdb1293e867e29fef05a9eb101fae5226a38203593de84e717
4
+ data.tar.gz: d36d4f487ed2ad0b4f00ba09c263a5cd8deda6c268eef50974376e2796b7e64d
5
5
  SHA512:
6
- metadata.gz: b4237cc046b138f9fbd5efad00bcc80d97b964d6a374e68b94c20cc19fd668e6eda2ea2939d8782ca05b9ea80478907a9728c3b34692b1b2adbb6305f1798cf4
7
- data.tar.gz: 5a3e8094b553fd2557be01514f782139bfa52edca8c7dc811138932a84c7f3bffe44e7c0a5a5b322868e648016146efc9b67f5e3a7356e98b2d58b0e6ca2db74
6
+ metadata.gz: 510c1b6ffd2fb4eea02ede8d7612c3a41a89e5c50b630370bec6fd70b79986caa222e6ce1cc0f4c2732b310f7c134866aa2283ee449441c92078b148c4734503
7
+ data.tar.gz: cb6ab754dcff508ec150c6ff67cf82b603243596b35ab6a5ef79d4dfe36042a933dde8eebc3630d5a2473c8fcd398111a98d12a64a66950ad0abe942c15bce40
data/README.md CHANGED
@@ -49,8 +49,8 @@ can help a lot :-)
49
49
  * [USB Device Passthrough](#usb-device-passthrough)
50
50
  * [USB Redirector Devices](#usb-redirector-devices)
51
51
  * [Filter for USB Redirector Devices](#filter-for-usb-redirector-devices)
52
- * [Random number generator passthrough](#random-number-generator-passthrough)
53
52
  * [Serial Console Devices](#serial-console-devices)
53
+ * [Random number generator passthrough](#random-number-generator-passthrough)
54
54
  * [Watchdog device](#watchdog-device)
55
55
  * [Smartcard device](#smartcard-device)
56
56
  * [Hypervisor Features](#hypervisor-features)
@@ -141,7 +141,7 @@ docker pull vagrantlibvirt/vagrant-libvirt:edge
141
141
 
142
142
  Running the image:
143
143
  ```bash
144
- docker run -it --rm \
144
+ docker run -i --rm \
145
145
  -e LIBVIRT_DEFAULT_URI \
146
146
  -v /var/run/libvirt/:/var/run/libvirt/ \
147
147
  -v ~/.vagrant.d:/.vagrant.d \
@@ -155,7 +155,7 @@ docker run -it --rm \
155
155
  It's possible to define a function in `~/.bashrc`, for example:
156
156
  ```bash
157
157
  vagrant(){
158
- docker run -it --rm \
158
+ docker run -i --rm \
159
159
  -e LIBVIRT_DEFAULT_URI \
160
160
  -v /var/run/libvirt/:/var/run/libvirt/ \
161
161
  -v ~/.vagrant.d:/.vagrant.d \
@@ -184,7 +184,7 @@ To run with Podman you need to include
184
184
  --security-opt label=disable \
185
185
  -v ~/.vagrant.d/boxes:/vagrant/boxes \
186
186
  -v ~/.vagrant.d/data:/vagrant/data \
187
- -v ~/.vagrant.d/data:/vagrant/tmp \
187
+ -v ~/.vagrant.d/tmp:/vagrant/tmp \
188
188
  ```
189
189
 
190
190
  for example:
@@ -196,7 +196,7 @@ vagrant(){
196
196
  -v /var/run/libvirt/:/var/run/libvirt/ \
197
197
  -v ~/.vagrant.d/boxes:/vagrant/boxes \
198
198
  -v ~/.vagrant.d/data:/vagrant/data \
199
- -v ~/.vagrant.d/data:/vagrant/tmp \
199
+ -v ~/.vagrant.d/tmp:/vagrant/tmp \
200
200
  -v $(realpath "${PWD}"):${PWD} \
201
201
  -w $(realpath "${PWD}") \
202
202
  --network host \
@@ -610,7 +610,7 @@ end
610
610
  values](http://libvirt.org/formatdomain.html#elementsVideo) are "vga",
611
611
  "cirrus", "vmvga", "xen", "vbox", or "qxl".
612
612
  * `video_vram` - Used by some graphics card types to vary the amount of RAM
613
- dedicated to video. Defaults to 9216.
613
+ dedicated to video. Defaults to 16384.
614
614
  * `video_accel3d` - Set to `true` to enable 3D acceleration. Defaults to
615
615
  `false`.
616
616
  * `sound_type` - [Set the virtual sound card](https://libvirt.org/formatdomain.html#elementsSound)
@@ -859,6 +859,7 @@ starts with `libvirt__` string. Here is a list of those options:
859
859
  * `:libvirt__dhcp_bootp_server` - The server that runs the DHCP server. Used
860
860
  only when dhcp is enabled.By default is the same host that runs the DHCP
861
861
  server.
862
+ * `:libvirt__tftp_root` - Path to the root directory served via TFTP.
862
863
  * `:libvirt__adapter` - Number specifiyng sequence number of interface.
863
864
  * `:libvirt__forward_mode` - Specify one of `veryisolated`, `none`, `open`, `nat`
864
865
  or `route` options. This option is used only when creating new network. Mode
@@ -977,6 +978,8 @@ used by this network are configurable at the provider level.
977
978
  * `management_network_domain` - Domain name assigned to the management network.
978
979
  * `management_network_mtu` - MTU size of management network. If not specified,
979
980
  the Libvirt default (1500) will be used.
981
+ * `management_network_keep` - Starting from version *0.7.0*, *always_destroy* is set to *true* by default for any network.
982
+ This option allows to change this behaviour for the management network.
980
983
 
981
984
  You may wonder how vagrant-libvirt knows the IP address a VM received. Libvirt
982
985
  doesn't provide a standard way to find out the IP address of a running domain.
@@ -999,6 +1002,19 @@ if it detects an attached channel during boot.
999
1002
  * `qemu_use_agent` - false by default, if set to true, attempt to extract configured
1000
1003
  ip address via qemu agent.
1001
1004
 
1005
+ By default if `qemu_use_agent` is set to `true` the code will automatically
1006
+ inject a suitable channel unless there already exists an entry with a
1007
+ `:target_name` matching `'org.qemu.guest_agent.'`.
1008
+ Alternatively if setting `qemu_use_agent` but, needing to disable the addition
1009
+ of the channel, simply use a disabled flag as follows:
1010
+ ```ruby
1011
+ Vagrant.configure(2) do |config|
1012
+ config.vm.provider :libvirt do |libvirt|
1013
+ libvirt.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :disabled => true
1014
+ end
1015
+ end
1016
+ ```
1017
+
1002
1018
  To use the management network interface with an external dhcp service you need
1003
1019
  to setup a bridged host network manually and define it via
1004
1020
  `management_network_name` in your Vagrantfile.
@@ -1196,9 +1212,9 @@ Bus 001 Device 002: ID 1234:abcd Example device
1196
1212
  Vagrant.configure("2") do |config|
1197
1213
  config.vm.provider :libvirt do |libvirt|
1198
1214
  # pass through specific device based on identifying it
1199
- libvirt.usbdev :vendor => '0x1234', :product => '0xabcd'
1215
+ libvirt.usb :vendor => '0x1234', :product => '0xabcd'
1200
1216
  # pass through a host device where multiple of the same vendor/product exist
1201
- libvirt.usbdev :bus => '1', :device => '1'
1217
+ libvirt.usb :bus => '1', :device => '1'
1202
1218
  end
1203
1219
  end
1204
1220
  ```
@@ -1279,7 +1295,7 @@ Currently only redirecting to a file is supported.
1279
1295
  Vagrant.configure("2") do |config|
1280
1296
  config.vm.define :test do |test|
1281
1297
  test.vm.provider :libvirt do |domain|
1282
- domain.serial :type => "file", :source => {:path => "/var/log/vm_consoles/test.log}
1298
+ domain.serial :type => "file", :source => {:path => "/var/log/vm_consoles/test.log"}
1283
1299
  end
1284
1300
  end
1285
1301
  end
@@ -0,0 +1,76 @@
1
+ # frozen_string_literal: true
2
+
3
+
4
+ module VagrantPlugins
5
+ module ProviderLibvirt
6
+ module Action
7
+ class CleanupOnFailure
8
+ def initialize(app, _env)
9
+ @logger = Log4r::Logger.new('vagrant_libvirt::action::cleanup_on_failure')
10
+ @app = app
11
+ @cleanup = true
12
+ end
13
+
14
+ def call(env)
15
+ # passing a value doesn't work as the env that is updated may be dupped from
16
+ # the original meaning the latter action's update is discarded. Instead pass
17
+ # a reference to the method on this class that will toggle the instance
18
+ # variable indicating whether cleanup is needed or not.
19
+ env['vagrant-libvirt.complete'] = method(:completed)
20
+
21
+ @app.call(env)
22
+ end
23
+
24
+ def recover(env)
25
+ return unless env[:machine] && env[:machine].state.id != :not_created
26
+
27
+ # only destroy if failed to complete bring up
28
+ unless @cleanup
29
+ @logger.debug('VM provider setup was completed, no need to halt/destroy')
30
+ return
31
+ end
32
+
33
+ # If we're not supposed to destroy on error then just return
34
+ return unless env[:destroy_on_error]
35
+
36
+ if env[:halt_on_error]
37
+ halt_env = env.dup
38
+ halt_env.delete(:interrupted)
39
+ halt_env[:config_validate] = false
40
+ env[:action_runner].run(Action.action_halt, halt_env)
41
+ else
42
+ destroy_env = env.dup
43
+ destroy_env.delete(:interrupted)
44
+ destroy_env[:config_validate] = false
45
+ destroy_env[:force_confirm_destroy] = true
46
+ env[:action_runner].run(Action.action_destroy, destroy_env)
47
+ end
48
+ end
49
+
50
+ def completed
51
+ @cleanup = false
52
+ end
53
+ end
54
+
55
+ class SetupComplete
56
+ def initialize(app, _env)
57
+ @logger = Log4r::Logger.new('vagrant_libvirt::action::setup_complete')
58
+ @app = app
59
+ end
60
+
61
+ def call(env)
62
+ if env['vagrant-libvirt.complete'].nil? or !env['vagrant-libvirt.complete'].respond_to? :call
63
+ raise Errors::CallChainError, require_action: CleanupOnFailure.name, current_action: SetupComplete.name
64
+ end
65
+
66
+ @logger.debug('Marking provider setup as completed')
67
+ # mark provider as finished setup so that any failure after this
68
+ # point doesn't result in destroying or shutting down the VM
69
+ env['vagrant-libvirt.complete'].call
70
+
71
+ @app.call(env)
72
+ end
73
+ end
74
+ end
75
+ end
76
+ end
@@ -2,6 +2,8 @@
2
2
 
3
3
  require 'log4r'
4
4
 
5
+ require 'vagrant-libvirt/util/resolvers'
6
+
5
7
  module VagrantPlugins
6
8
  module ProviderLibvirt
7
9
  module Action
@@ -20,7 +22,7 @@ module VagrantPlugins
20
22
 
21
23
  def _disks_print(disks)
22
24
  disks.collect do |x|
23
- "#{x[:device]}(#{x[:type]},#{x[:size]})"
25
+ "#{x[:device]}(#{x[:type]}, #{x[:bus]}, #{x[:size]})"
24
26
  end.join(', ')
25
27
  end
26
28
 
@@ -73,11 +75,7 @@ module VagrantPlugins
73
75
  @graphics_autoport = config.graphics_autoport
74
76
  @graphics_port = config.graphics_port
75
77
  @graphics_ip = config.graphics_ip
76
- @graphics_passwd = if config.graphics_passwd.to_s.empty?
77
- ''
78
- else
79
- "passwd='#{config.graphics_passwd}'"
80
- end
78
+ @graphics_passwd = config.graphics_passwd
81
79
  @graphics_gl = config.graphics_gl
82
80
  @video_type = config.video_type
83
81
  @sound_type = config.sound_type
@@ -135,11 +133,19 @@ module VagrantPlugins
135
133
  # RNG device passthrough
136
134
  @rng = config.rng
137
135
 
136
+ # Memballoon
137
+ @memballoon_enabled = config.memballoon_enabled
138
+ @memballoon_model = config.memballoon_model
139
+ @memballoon_pci_bus = config.memballoon_pci_bus
140
+ @memballoon_pci_slot = config.memballoon_pci_slot
141
+
138
142
  config = env[:machine].provider_config
139
143
  @domain_type = config.driver
140
144
 
141
145
  @os_type = 'hvm'
142
146
 
147
+ resolver = ::VagrantPlugins::ProviderLibvirt::Util::DiskDeviceResolver.new(prefix=@disk_device[0..1])
148
+
143
149
  # Get path to domain image from the storage pool selected if we have a box.
144
150
  if env[:machine].config.vm.box
145
151
  if @snapshot_pool_name != @storage_pool_name
@@ -147,6 +153,12 @@ module VagrantPlugins
147
153
  else
148
154
  pool_name = @storage_pool_name
149
155
  end
156
+
157
+ # special handling for domain volume
158
+ env[:box_volumes][0][:device] = env[:box_volumes][0].fetch(:device, @disk_device)
159
+
160
+ resolver.resolve!(env[:box_volumes])
161
+
150
162
  @logger.debug "Search for volumes in pool: #{pool_name}"
151
163
  env[:box_volumes].each_index do |index|
152
164
  suffix_index = index > 0 ? "_#{index}" : ''
@@ -154,14 +166,16 @@ module VagrantPlugins
154
166
  name: "#{@name}#{suffix_index}.img"
155
167
  ).find { |x| x.pool_name == pool_name }
156
168
  raise Errors::DomainVolumeExists if domain_volume.nil?
169
+
157
170
  @domain_volumes.push({
158
- :dev => (index+1).vdev.to_s,
171
+ :dev => env[:box_volumes][index][:device],
159
172
  :cache => @domain_volume_cache,
160
173
  :bus => @disk_bus,
161
174
  :path => domain_volume.path,
162
175
  :virtual_size => env[:box_volumes][index][:virtual_size]
163
176
  })
164
- end
177
+ end
178
+
165
179
  end
166
180
 
167
181
  # If we have a box, take the path from the domain volume and set our storage_prefix.
@@ -173,19 +187,7 @@ module VagrantPlugins
173
187
  storage_prefix = get_disk_storage_prefix(env, @storage_pool_name)
174
188
  end
175
189
 
176
- @serials = config.serials
177
-
178
- @serials.each do |serial|
179
- next unless serial[:source] && serial[:source][:path]
180
-
181
- dir = File.dirname(serial[:source][:path])
182
- begin
183
- FileUtils.mkdir_p(dir)
184
- rescue ::Errno::EACCES
185
- raise Errors::SerialCannotCreatePathError,
186
- path: dir
187
- end
188
- end
190
+ resolver.resolve!(@disks)
189
191
 
190
192
  @disks.each do |disk|
191
193
  disk[:path] ||= _disk_name(@name, disk)
@@ -235,6 +237,20 @@ module VagrantPlugins
235
237
  end
236
238
  end
237
239
 
240
+ @serials = config.serials
241
+
242
+ @serials.each do |serial|
243
+ next unless serial[:source] && serial[:source][:path]
244
+
245
+ dir = File.dirname(serial[:source][:path])
246
+ begin
247
+ FileUtils.mkdir_p(dir)
248
+ rescue ::Errno::EACCES
249
+ raise Errors::SerialCannotCreatePathError,
250
+ path: dir
251
+ end
252
+ end
253
+
238
254
  # Output the settings we're going to use to the user
239
255
  env[:ui].info(I18n.t('vagrant_libvirt.creating_domain'))
240
256
  env[:ui].info(" -- Name: #{@name}")
@@ -284,7 +300,7 @@ module VagrantPlugins
284
300
  end
285
301
  env[:ui].info(" -- Storage pool: #{@storage_pool_name}")
286
302
  @domain_volumes.each do |volume|
287
- env[:ui].info(" -- Image(#{volume[:device]}): #{volume[:path]}, #{volume[:virtual_size].to_GB}G")
303
+ env[:ui].info(" -- Image(#{volume[:dev]}): #{volume[:path]}, #{volume[:bus]}, #{volume[:virtual_size].to_GB}G")
288
304
  end
289
305
 
290
306
  if not @disk_driver_opts.empty?
@@ -298,7 +314,7 @@ module VagrantPlugins
298
314
  env[:ui].info(" -- Graphics Type: #{@graphics_type}")
299
315
  env[:ui].info(" -- Graphics Port: #{@graphics_port}")
300
316
  env[:ui].info(" -- Graphics IP: #{@graphics_ip}")
301
- env[:ui].info(" -- Graphics Password: #{@graphics_passwd.empty? ? 'Not defined' : 'Defined'}")
317
+ env[:ui].info(" -- Graphics Password: #{@graphics_passwd.nil? ? 'Not defined' : 'Defined'}")
302
318
  env[:ui].info(" -- Video Type: #{@video_type}")
303
319
  env[:ui].info(" -- Video VRAM: #{@video_vram}")
304
320
  env[:ui].info(" -- Video 3D accel: #{@video_accel3d}")
@@ -312,6 +328,12 @@ module VagrantPlugins
312
328
  env[:ui].info(" -- TPM Path: #{@tpm_path}")
313
329
  end
314
330
 
331
+ if @memballoon_enabled
332
+ env[:ui].info(" -- Memballoon model: #{@memballoon_model}")
333
+ env[:ui].info(" -- Memballoon bus: #{@memballoon_pci_bus}")
334
+ env[:ui].info(" -- Memballoon slot: #{@memballoon_pci_slot}")
335
+ end
336
+
315
337
  @boot_order.each do |device|
316
338
  env[:ui].info(" -- Boot device: #{device}")
317
339
  end
@@ -213,8 +213,12 @@ module VagrantPlugins
213
213
  type: :static,
214
214
  ip: options[:ip],
215
215
  netmask: options[:netmask],
216
- gateway: options[:gateway]
216
+ gateway: options[:gateway],
217
+ route: options[:route]
217
218
  }.merge(network)
219
+ if IPAddr.new(options[:ip]).ipv6?
220
+ network[:type] = :static6
221
+ end
218
222
  else
219
223
  network[:type] = :dhcp
220
224
  end
@@ -30,6 +30,15 @@ module VagrantPlugins
30
30
 
31
31
  def call(env)
32
32
  if env[:machine].provider_config.qemu_use_session
33
+ # Get a list of all (active and inactive) Libvirt networks. This
34
+ # triggers a side effect to ensure networking is fully available
35
+ # for VMs using sessions. It is likely that this should be done
36
+ # to determine the correct virtual device for the management
37
+ # network for sessions instead of assuming the default of virbr0.
38
+ @available_networks = libvirt_networks(
39
+ env[:machine].provider.driver.system_connection
40
+ )
41
+
33
42
  @app.call(env)
34
43
  return
35
44
  end
@@ -343,6 +352,10 @@ module VagrantPlugins
343
352
  @network_dhcp_enabled = false
344
353
  end
345
354
 
355
+ if @options[:tftp_root]
356
+ @tftp_root = @options[:tftp_root]
357
+ end
358
+
346
359
  @network_domain_name = @options[:domain_name]
347
360
 
348
361
  begin
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require 'log4r'
4
+ require 'rexml'
4
5
 
5
6
  module VagrantPlugins
6
7
  module ProviderLibvirt
@@ -48,37 +49,121 @@ module VagrantPlugins
48
49
  # cdroms are consider volumes, but cannot be destroyed
49
50
  domain.destroy(destroy_volumes: true)
50
51
  else
52
+ domain_xml = libvirt_domain.xml_desc(1)
53
+ xml_descr = REXML::Document.new(domain_xml)
54
+ disks_xml = REXML::XPath.match(xml_descr, '/domain/devices/disk[@device="disk"]')
55
+ have_aliases = !(REXML::XPath.match(disks_xml, './alias[@name="ua-box-volume-0"]').first).nil?
56
+ if !have_aliases
57
+ env[:ui].warn(I18n.t('vagrant_libvirt.destroy.obsolete_method'))
58
+ end
59
+
51
60
  domain.destroy(destroy_volumes: false)
52
61
 
53
- env[:machine].provider_config.disks.each do |disk|
54
- # shared disks remove only manually or ???
55
- next if disk[:allow_existing]
56
- diskname = libvirt_domain.name + '-' + disk[:device] + '.' + disk[:type].to_s
57
- # diskname is unique
58
- libvirt_disk = domain.volumes.select do |x|
59
- x.name == diskname
60
- end.first
61
- if libvirt_disk
62
- libvirt_disk.destroy
63
- elsif disk[:path]
64
- poolname = env[:machine].provider_config.storage_pool_name
65
- libvirt_disk = domain.volumes.select do |x|
66
- # FIXME: can remove pool/target.img and pool/123/target.img
67
- x.path =~ /\/#{disk[:path]}$/ && x.pool_name == poolname
62
+ volumes = domain.volumes
63
+
64
+ # Remove root storage. If no aliases available, perform the removal by name and keep track
65
+ # of how many matches there are in the volumes. This will provide a fallback offset to where
66
+ # the additional storage devices are.
67
+ detected_box_volumes = 0
68
+ if have_aliases
69
+ REXML::XPath.match(disks_xml, './alias[contains(@name, "ua-box-volume-")]').each do |box_disk|
70
+ diskname = box_disk.parent.elements['source'].attributes['file'].rpartition('/').last
71
+ detected_box_volumes += 1
72
+
73
+ destroy_volume(volumes, diskname, env)
74
+ end
75
+ else
76
+ # fallback to try and infer which boxes are box images, as they are listed first
77
+ # as soon as there is no match, can exit
78
+ disks_xml.each_with_index do |box_disk, idx|
79
+ name = libvirt_domain.name + (idx == 0 ? '.img' : "_#{idx}.img")
80
+ diskname = box_disk.elements['source'].attributes['file'].rpartition('/').last
81
+
82
+ break if name != diskname
83
+ detected_box_volumes += 1
84
+
85
+ root_disk = volumes.select do |x|
86
+ x.name == name if x
68
87
  end.first
69
- libvirt_disk.destroy if libvirt_disk
88
+ if root_disk
89
+ root_disk.destroy
90
+ end
70
91
  end
71
92
  end
72
93
 
73
- # remove root storage
74
- root_disk = domain.volumes.select do |x|
75
- x.name == libvirt_domain.name + '.img' if x
76
- end.first
77
- root_disk.destroy if root_disk
94
+ # work out if there are any custom disks attached that wasn't done by vagrant-libvirt,
95
+ # and warn there might be unexpected behaviour
96
+ total_disks = disks_xml.length
97
+ offset = total_disks - env[:machine].provider_config.disks.length
98
+ if offset != detected_box_volumes
99
+ env[:ui].warn(I18n.t('vagrant_libvirt.destroy.unexpected_volumes'))
100
+ end
101
+
102
+ if !have_aliases
103
+ # if no aliases found, see if it's possible to check the number of box disks
104
+ # otherwise the destroy could remove the wrong disk by accident.
105
+ if env[:machine].box != nil
106
+ box_disks = env[:machine].box.metadata.fetch('disks', [1])
107
+ offset = box_disks.length
108
+ if offset != detected_box_volumes
109
+ env[:ui].warn(I18n.t('vagrant_libvirt.destroy.expected_removal_mismatch'))
110
+ end
111
+ else
112
+ env[:ui].warn(I18n.t('vagrant_libvirt.destroy.box_metadata_unavailable'))
113
+ end
114
+
115
+ # offset only used when no aliases available
116
+ offset = detected_box_volumes
117
+ end
118
+
119
+ env[:machine].provider_config.disks.each_with_index.each do |disk, index|
120
+ # shared disks remove only manually or ???
121
+ next if disk[:allow_existing]
122
+
123
+ # look for exact match using aliases which will be used
124
+ # for subsequent domain creations
125
+ if have_aliases
126
+ domain_disk = REXML::XPath.match(disks_xml, './alias[@name="ua-disk-volume-' + index.to_s + '"]').first
127
+ domain_disk = domain_disk.parent if !domain_disk.nil?
128
+ else
129
+ # otherwise fallback to find the disk by device if specified by user
130
+ # and finally index counting with offset and hope the match is correct
131
+ if !disk[:device].nil?
132
+ domain_disk = REXML::XPath.match(disks_xml, './target[@dev="' + disk[:device] + '"]').first
133
+ domain_disk = domain_disk.parent if !domain_disk.nil?
134
+ else
135
+ domain_disk = disks_xml[offset + index]
136
+ end
137
+ end
138
+
139
+ next if domain_disk.nil?
140
+
141
+ diskname = domain_disk.elements['source'].attributes['file'].rpartition('/').last
142
+ destroy_volume(volumes, diskname, env)
143
+ end
78
144
  end
79
145
 
80
146
  @app.call(env)
81
147
  end
148
+
149
+ protected
150
+
151
+ def destroy_volume(volumes, diskname, env)
152
+ # diskname is unique
153
+ libvirt_disk = volumes.select do |x|
154
+ x.name == diskname if x
155
+ end.first
156
+ if libvirt_disk
157
+ libvirt_disk.destroy
158
+ elsif disk[:path]
159
+ poolname = env[:machine].provider_config.storage_pool_name
160
+ libvirt_disk = volumes.select do |x|
161
+ # FIXME: can remove pool/target.img and pool/123/target.img
162
+ x.path =~ /\/#{disk[:path]}$/ && x.pool_name == poolname
163
+ end.first
164
+ libvirt_disk.destroy if libvirt_disk
165
+ end
166
+ end
82
167
  end
83
168
  end
84
169
  end
@@ -47,7 +47,7 @@ module VagrantPlugins
47
47
  )
48
48
  rescue Libvirt::RetrieveError => e
49
49
  # this network is already destroyed, so move on
50
- if e.message =~ /Network not found/
50
+ if e.libvirt_code == ProviderLibvirt::Util::ErrorCodes::VIR_ERR_NO_NETWORK
51
51
  @logger.info 'It is already undefined'
52
52
  next
53
53
  # some other error occured, so raise it again
@@ -87,12 +87,13 @@ module VagrantPlugins
87
87
  gateway_ports)
88
88
  ssh_info = machine.ssh_info
89
89
  params = %W(
90
+ -n
90
91
  -L
91
92
  #{host_ip}:#{host_port}:#{guest_ip}:#{guest_port}
92
93
  -N
93
94
  #{ssh_info[:host]}
94
- ).join(' ')
95
- params += ' -g' if gateway_ports
95
+ )
96
+ params += '-g' if gateway_ports
96
97
 
97
98
  options = (%W(
98
99
  User=#{ssh_info[:username]}
@@ -105,32 +106,32 @@ module VagrantPlugins
105
106
  ForwardX11=#{ssh_info[:forward_x11] ? 'yes' : 'no'}
106
107
  IdentitiesOnly=#{ssh_info[:keys_only] ? 'yes' : 'no'}
107
108
  ) + ssh_info[:private_key_path].map do |pk|
108
- "IdentityFile='\"#{pk}\"'"
109
- end).map { |s| "-o #{s}" }.join(' ')
109
+ "IdentityFile=\"#{pk}\""
110
+ end
111
+ ).map { |s| ['-o', s] }.flatten
110
112
 
111
- options += " -o ProxyCommand=\"#{ssh_info[:proxy_command]}\"" if machine.provider_config.proxy_command
113
+ options += ['-o', "ProxyCommand=\"#{ssh_info[:proxy_command]}\""] if machine.provider_config.proxy_command
114
+
115
+ ssh_cmd = ['ssh'] + options + params
112
116
 
113
117
  # TODO: instead of this, try and lock and get the stdin from spawn...
114
- ssh_cmd = ''
115
118
  if host_port <= 1024
116
119
  @@lock.synchronize do
117
120
  # TODO: add i18n
118
121
  env[:ui].info 'Requesting sudo for host port(s) <= 1024'
119
122
  r = system('sudo -v')
120
123
  if r
121
- ssh_cmd += 'sudo ' # add sudo prefix
124
+ ssh_cmd.unshift('sudo') # add sudo prefix
122
125
  end
123
126
  end
124
127
  end
125
128
 
126
- ssh_cmd += "ssh -n #{options} #{params}"
127
-
128
- @logger.debug "Forwarding port with `#{ssh_cmd}`"
129
+ @logger.debug "Forwarding port with `#{ssh_cmd.join(' ')}`"
129
130
  log_file = ssh_forward_log_file(
130
131
  env[:machine], host_ip, host_port, guest_ip, guest_port,
131
132
  )
132
133
  @logger.info "Logging to #{log_file}"
133
- spawn(ssh_cmd, [:out, :err] => [log_file, 'w'], :pgroup => true)
134
+ spawn(*ssh_cmd, [:out, :err] => [log_file, 'w'], :pgroup => true)
134
135
  end
135
136
 
136
137
  def ssh_forward_log_file(machine, host_ip, host_port, guest_ip, guest_port)