vagrant-libvirt 0.0.30 → 0.0.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +20 -0
  3. data/Gemfile +6 -1
  4. data/README.md +146 -6
  5. data/example_box/Vagrantfile +1 -1
  6. data/example_box/metadata.json +1 -1
  7. data/lib/vagrant-libvirt.rb +3 -15
  8. data/lib/vagrant-libvirt/action.rb +59 -73
  9. data/lib/vagrant-libvirt/action/create_domain.rb +47 -19
  10. data/lib/vagrant-libvirt/action/create_domain_volume.rb +5 -5
  11. data/lib/vagrant-libvirt/action/create_network_interfaces.rb +82 -36
  12. data/lib/vagrant-libvirt/action/create_networks.rb +99 -54
  13. data/lib/vagrant-libvirt/action/destroy_domain.rb +4 -4
  14. data/lib/vagrant-libvirt/action/destroy_networks.rb +2 -2
  15. data/lib/vagrant-libvirt/action/halt_domain.rb +1 -1
  16. data/lib/vagrant-libvirt/action/handle_box_image.rb +25 -5
  17. data/lib/vagrant-libvirt/action/handle_storage_pool.rb +9 -7
  18. data/lib/vagrant-libvirt/action/is_running.rb +1 -1
  19. data/lib/vagrant-libvirt/action/is_suspended.rb +1 -1
  20. data/lib/vagrant-libvirt/action/package_domain.rb +3 -3
  21. data/lib/vagrant-libvirt/action/prepare_nfs_settings.rb +8 -5
  22. data/lib/vagrant-libvirt/action/prepare_nfs_valid_ids.rb +1 -1
  23. data/lib/vagrant-libvirt/action/prune_nfs_exports.rb +1 -1
  24. data/lib/vagrant-libvirt/action/read_mac_addresses.rb +1 -1
  25. data/lib/vagrant-libvirt/action/remove_libvirt_image.rb +1 -1
  26. data/lib/vagrant-libvirt/action/remove_stale_volume.rb +2 -2
  27. data/lib/vagrant-libvirt/action/resume_domain.rb +1 -1
  28. data/lib/vagrant-libvirt/action/set_boot_order.rb +66 -0
  29. data/lib/vagrant-libvirt/action/set_name_of_domain.rb +3 -2
  30. data/lib/vagrant-libvirt/action/start_domain.rb +1 -1
  31. data/lib/vagrant-libvirt/action/suspend_domain.rb +1 -1
  32. data/lib/vagrant-libvirt/action/wait_till_up.rb +1 -1
  33. data/lib/vagrant-libvirt/cap/mount_p9.rb +2 -1
  34. data/lib/vagrant-libvirt/cap/synced_folder.rb +11 -5
  35. data/lib/vagrant-libvirt/config.rb +44 -5
  36. data/lib/vagrant-libvirt/driver.rb +121 -0
  37. data/lib/vagrant-libvirt/errors.rb +4 -0
  38. data/lib/vagrant-libvirt/plugin.rb +7 -5
  39. data/lib/vagrant-libvirt/provider.rb +54 -12
  40. data/lib/vagrant-libvirt/templates/domain.xml.erb +18 -12
  41. data/lib/vagrant-libvirt/templates/filesystem.xml.erb +1 -1
  42. data/lib/vagrant-libvirt/templates/tunnel_interface.xml.erb +11 -0
  43. data/lib/vagrant-libvirt/util/network_util.rb +11 -1
  44. data/lib/vagrant-libvirt/version.rb +1 -1
  45. data/locales/en.yml +24 -15
  46. data/spec/support/environment_helper.rb +1 -1
  47. data/tools/prepare_redhat_for_box.sh +1 -2
  48. metadata +6 -5
  49. data/lib/vagrant-libvirt/action/connect_libvirt.rb +0 -51
  50. data/lib/vagrant-libvirt/action/read_ssh_info.rb +0 -68
  51. data/lib/vagrant-libvirt/action/read_state.rb +0 -60
@@ -17,7 +17,7 @@ module VagrantPlugins
17
17
  # Fog libvirt currently doesn't support snapshots. Use
18
18
  # ruby-libvirt client directly. Note this is racy, see
19
19
  # http://www.libvirt.org/html/libvirt-libvirt.html#virDomainSnapshotListNames
20
- libvirt_domain = env[:libvirt_compute].client.lookup_domain_by_uuid(
20
+ libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(
21
21
  env[:machine].id)
22
22
  libvirt_domain.list_snapshots.each do |name|
23
23
  @logger.info("Deleting snapshot '#{name}'")
@@ -28,7 +28,7 @@ module VagrantPlugins
28
28
  end
29
29
  end
30
30
 
31
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
31
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
32
32
 
33
33
  if env[:machine].provider_config.disks.empty?
34
34
  # if using default configuration of disks
@@ -37,10 +37,10 @@ module VagrantPlugins
37
37
  domain.destroy(destroy_volumes: false)
38
38
 
39
39
  env[:machine].provider_config.disks.each do |disk|
40
- # shared disks remove only manualy or ???
40
+ # shared disks remove only manually or ???
41
41
  next if disk[:allow_existing]
42
42
  diskname = libvirt_domain.name + '-' + disk[:device] + '.' + disk[:type].to_s
43
- # diskname is uniq
43
+ # diskname is unique
44
44
  libvirt_disk = domain.volumes.select do |x|
45
45
  x.name == diskname
46
46
  end.first
@@ -36,7 +36,7 @@ module VagrantPlugins
36
36
  # lookup_network_by_uuid throws same exception
37
37
  # if there is an error or if the network just doesn't exist
38
38
  begin
39
- libvirt_network = env[:libvirt_compute].client.lookup_network_by_uuid(
39
+ libvirt_network = env[:machine].provider.driver.connection.client.lookup_network_by_uuid(
40
40
  network_uuid)
41
41
  rescue Libvirt::RetrieveError => e
42
42
  # this network is already destroyed, so move on
@@ -66,7 +66,7 @@ module VagrantPlugins
66
66
  libvirt_network.undefine
67
67
  @logger.info "Undefined it"
68
68
  rescue => e
69
- raise Error::DestroyNetworkError,
69
+ raise Errors::DestroyNetworkError,
70
70
  network_name: libvirt_network.name,
71
71
  error_message: e.message
72
72
  end
@@ -13,7 +13,7 @@ module VagrantPlugins
13
13
  def call(env)
14
14
  env[:ui].info(I18n.t("vagrant_libvirt.halt_domain"))
15
15
 
16
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
16
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
17
17
  raise Errors::NoDomainError if domain == nil
18
18
 
19
19
  @logger.info("Trying gracefull shutdown.")
@@ -37,10 +37,30 @@ module VagrantPlugins
37
37
  env[:box_volume_name] = env[:machine].box.name.to_s.dup.gsub("/", "-VAGRANTSLASH-")
38
38
  env[:box_volume_name] << "_vagrant_box_image_#{env[:machine].box.version.to_s rescue ''}.img"
39
39
 
40
+ # Override box_virtual_size
41
+ if config.machine_virtual_size
42
+ if config.machine_virtual_size < box_virtual_size
43
+ # Warn that a virtual size less than the box metadata size
44
+ # is not supported and will be ignored
45
+ env[:ui].warn I18n.t(
46
+ 'vagrant_libvirt.warnings.ignoring_virtual_size_too_small',
47
+ requested: config.machine_virtual_size, minimum: box_virtual_size
48
+ )
49
+ else
50
+ env[:ui].info I18n.t('vagrant_libvirt.manual_resize_required')
51
+ box_virtual_size = config.machine_virtual_size
52
+ end
53
+ end
54
+ # save for use by later actions
55
+ env[:box_virtual_size] = box_virtual_size
56
+
57
+ # while inside the synchronize block take care not to call the next
58
+ # action in the chain, as must exit this block first to prevent
59
+ # locking all subsequent actions as well.
40
60
  @@lock.synchronize do
41
61
  # Don't continue if image already exists in storage pool.
42
- return @app.call(env) if ProviderLibvirt::Util::Collection.find_matching(
43
- env[:libvirt_compute].volumes.all, env[:box_volume_name])
62
+ break if ProviderLibvirt::Util::Collection.find_matching(
63
+ env[:machine].provider.driver.connection.volumes.all, env[:box_volume_name])
44
64
 
45
65
  # Box is not available as a storage pool volume. Create and upload
46
66
  # it as a copy of local box image.
@@ -53,7 +73,7 @@ module VagrantPlugins
53
73
  message << " in storage pool #{config.storage_pool_name}."
54
74
  @logger.info(message)
55
75
  begin
56
- fog_volume = env[:libvirt_compute].volumes.create(
76
+ fog_volume = env[:machine].provider.driver.connection.volumes.create(
57
77
  name: env[:box_volume_name],
58
78
  allocation: "#{box_image_size/1024/1024}M",
59
79
  capacity: "#{box_virtual_size}G",
@@ -97,10 +117,10 @@ module VagrantPlugins
97
117
  image_size = File.size(image_file) # B
98
118
 
99
119
  begin
100
- pool = env[:libvirt_compute].client.lookup_storage_pool_by_name(
120
+ pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name(
101
121
  pool_name)
102
122
  volume = pool.lookup_volume_by_name(volume_name)
103
- stream = env[:libvirt_compute].client.stream
123
+ stream = env[:machine].provider.driver.connection.client.stream
104
124
  volume.upload(stream, offset=0, length=image_size)
105
125
 
106
126
  # Exception ProviderLibvirt::RetrieveError can be raised if buffer is
@@ -14,14 +14,16 @@ module VagrantPlugins
14
14
  end
15
15
 
16
16
  def call(env)
17
- @@lock.synchronize do
18
- # Get config options.
19
- config = env[:machine].provider_config
17
+ # Get config options.
18
+ config = env[:machine].provider_config
20
19
 
20
+ # while inside the synchronize block take care not to call the next
21
+ # action in the chain, as must exit this block first to prevent
22
+ # locking all subsequent actions as well.
23
+ @@lock.synchronize do
21
24
  # Check for storage pool, where box image should be created
22
- fog_pool = ProviderLibvirt::Util::Collection.find_matching(
23
- env[:libvirt_compute].pools.all, config.storage_pool_name)
24
- return @app.call(env) if fog_pool
25
+ break if ProviderLibvirt::Util::Collection.find_matching(
26
+ env[:machine].provider.driver.connection.pools.all, config.storage_pool_name)
25
27
 
26
28
  @logger.info("No storage pool '#{config.storage_pool_name}' is available.")
27
29
 
@@ -34,7 +36,7 @@ module VagrantPlugins
34
36
  # Fog libvirt currently doesn't support creating pools. Use
35
37
  # ruby-libvirt client directly.
36
38
  begin
37
- libvirt_pool = env[:libvirt_compute].client.define_storage_pool_xml(
39
+ libvirt_pool = env[:machine].provider.driver.connection.client.define_storage_pool_xml(
38
40
  to_xml('default_storage_pool'))
39
41
  libvirt_pool.build
40
42
  libvirt_pool.create
@@ -9,7 +9,7 @@ module VagrantPlugins
9
9
  end
10
10
 
11
11
  def call(env)
12
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
12
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
13
13
  raise Errors::NoDomainError if domain == nil
14
14
  env[:result] = domain.state.to_s == 'running'
15
15
 
@@ -9,7 +9,7 @@ module VagrantPlugins
9
9
  end
10
10
 
11
11
  def call(env)
12
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
12
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
13
13
  raise Errors::NoDomainError if domain == nil
14
14
  env[:result] = domain.state.to_s == 'paused'
15
15
 
@@ -14,9 +14,9 @@ module VagrantPlugins
14
14
 
15
15
  def call(env)
16
16
  env[:ui].info(I18n.t('vagrant_libvirt.package_domain'))
17
- libvirt_domain = env[:libvirt_compute].client.lookup_domain_by_uuid(
17
+ libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(
18
18
  env[:machine].id)
19
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
19
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
20
20
  root_disk = domain.volumes.select do |x|
21
21
  x.name == libvirt_domain.name + '.img'
22
22
  end.first
@@ -37,7 +37,7 @@ module VagrantPlugins
37
37
  `qemu-img rebase -p -b "" #{@tmp_img}`
38
38
  # remove hw association with interface
39
39
  # working for centos with lvs default disks
40
- `virt-sysprep --no-logfile -a #{@tmp_img} `
40
+ `virt-sysprep --no-logfile --operations defaults,-ssh-userdir -a #{@tmp_img} `
41
41
  Dir.chdir(@tmp_dir)
42
42
  img_size = `qemu-img info #{@tmp_img} | grep 'virtual size' | awk '{print $3;}' | tr -d 'G'`.chomp
43
43
  File.write(@tmp_dir + '/metadata.json', metadata_content(img_size))
@@ -39,13 +39,16 @@ module VagrantPlugins
39
39
  #
40
40
  # @param [Machine] machine
41
41
  # @return [String]
42
- def read_host_ip(ip)
43
- UDPSocket.open do |s|
42
+ def read_host_ip(ip)
43
+ UDPSocket.open do |s|
44
+ if(ip.kind_of?(Array))
45
+ s.connect(ip.last, 1)
46
+ else
44
47
  s.connect(ip, 1)
45
- s.addr.last
46
- end
47
48
  end
48
-
49
+ s.addr.last
50
+ end
51
+ end
49
52
  # Returns the IP address of the guest
50
53
  #
51
54
  # @param [Machine] machine
@@ -8,7 +8,7 @@ module VagrantPlugins
8
8
  end
9
9
 
10
10
  def call(env)
11
- env[:nfs_valid_ids] = env[:libvirt_compute].servers.all.map(&:id)
11
+ env[:nfs_valid_ids] = env[:machine].provider.driver.connection.servers.all.map(&:id)
12
12
  @app.call(env)
13
13
  end
14
14
  end
@@ -12,7 +12,7 @@ module VagrantPlugins
12
12
  if env[:host]
13
13
  uuid = env[:machine].id
14
14
  # get all uuids
15
- uuids = env[:libvirt_compute].servers.all.map(&:id)
15
+ uuids = env[:machine].provider.driver.connection.servers.all.map(&:id)
16
16
  # not exiisted in array will removed from nfs
17
17
  uuids.delete(uuid)
18
18
  env[:host].capability(
@@ -10,7 +10,7 @@ module VagrantPlugins
10
10
  end
11
11
 
12
12
  def call(env)
13
- env[:machine_mac_addresses] = read_mac_addresses(env[:libvirt_compute], env[:machine])
13
+ env[:machine_mac_addresses] = read_mac_addresses(env[:machine].provider.driver.connection, env[:machine])
14
14
  end
15
15
 
16
16
  def read_mac_addresses(libvirt, machine)
@@ -11,7 +11,7 @@ module VagrantPlugins
11
11
 
12
12
  def call(env)
13
13
  env[:ui].info("Vagrant-libvirt plugin removed box only from you LOCAL ~/.vagrant/boxes directory")
14
- env[:ui].info("From libvirt storage pool you have to delete image manualy(virsh, virt-manager or by any other tool)")
14
+ env[:ui].info("From libvirt storage pool you have to delete image manually(virsh, virt-manager or by any other tool)")
15
15
  @app.call(env)
16
16
  end
17
17
  end
@@ -22,7 +22,7 @@ module VagrantPlugins
22
22
  config = env[:machine].provider_config
23
23
  # Check for storage pool, where box image should be created
24
24
  fog_pool = ProviderLibvirt::Util::Collection.find_matching(
25
- env[:libvirt_compute].pools.all, config.storage_pool_name)
25
+ env[:machine].provider.driver.connection.pools.all, config.storage_pool_name)
26
26
  @logger.debug("**** Pool #{fog_pool.name}")
27
27
 
28
28
  # This is name of newly created image for vm.
@@ -31,7 +31,7 @@ module VagrantPlugins
31
31
 
32
32
  # remove root storage
33
33
  box_volume = ProviderLibvirt::Util::Collection.find_matching(
34
- env[:libvirt_compute].volumes.all, name)
34
+ env[:machine].provider.driver.connection.volumes.all, name)
35
35
  if box_volume && box_volume.pool_name == fog_pool.name
36
36
  @logger.info("Deleting volume #{box_volume.key}")
37
37
  box_volume.destroy
@@ -13,7 +13,7 @@ module VagrantPlugins
13
13
  def call(env)
14
14
  env[:ui].info(I18n.t("vagrant_libvirt.resuming_domain"))
15
15
 
16
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
16
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
17
17
  raise Errors::NoDomainError if domain == nil
18
18
 
19
19
  domain.resume
@@ -0,0 +1,66 @@
1
+ require "log4r"
2
+ require 'nokogiri'
3
+
4
+ module VagrantPlugins
5
+ module ProviderLibvirt
6
+ module Action
7
+ class SetBootOrder
8
+ def initialize(app, env)
9
+ @app = app
10
+ @logger = Log4r::Logger.new("vagrant_libvirt::action::set_boot_order")
11
+ config = env[:machine].provider_config
12
+ @boot_order = config.boot_order
13
+ end
14
+
15
+ def call(env)
16
+ # Get domain first
17
+ begin
18
+ domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(
19
+ env[:machine].id.to_s)
20
+ rescue => e
21
+ raise Errors::NoDomainError,
22
+ :error_message => e.message
23
+ end
24
+
25
+ # Only execute specific boot ordering if this is defined in the Vagrant file
26
+ if @boot_order.count >= 1
27
+
28
+ # If a domain is initially defined with no box or disk or with an explicit boot order, libvirt adds <boot dev="foo">
29
+ # This conflicts with an explicit boot_order configuration, so we need to remove it from the domain xml and feed it back.
30
+ # Also see https://bugzilla.redhat.com/show_bug.cgi?id=1248514 as to why we have to do this after all devices have been defined.
31
+ xml = Nokogiri::XML(domain.xml_desc)
32
+ xml.search("/domain/os/boot").each do |node|
33
+ node.remove
34
+ end
35
+
36
+ # Parse the XML and find each defined drive and network interfacee
37
+ hd = xml.search("/domain/devices/disk[@device='disk']")
38
+ cdrom = xml.search("/domain/devices/disk[@device='cdrom']")
39
+ network = xml.search("/domain/devices/interface[@type='network']")
40
+
41
+ # Generate an array per device group and a flattened array from all of those
42
+ devices = {"hd" => hd, "cdrom" => cdrom, "network" => network}
43
+ final_boot_order = @boot_order.flat_map {|category| devices[category] }
44
+
45
+ # Loop over the entire defined boot order array and create boot order entries in the domain XML
46
+ final_boot_order.each_with_index do |node, index|
47
+ boot = "<boot order='#{index+1}'/>"
48
+ node.add_child(boot)
49
+ if node.name == 'disk'
50
+ @logger.debug "Setting #{node['device']} to boot index #{index+1}"
51
+ elsif node.name == 'interface'
52
+ @logger.debug "Setting #{node.name} to boot index #{index+1}"
53
+ end
54
+ end
55
+
56
+ # Finally redefine the domain XML through libvirt to apply the boot ordering
57
+ env[:machine].provider.driver.connection.client.define_domain_xml(xml.to_s)
58
+ end
59
+
60
+ @app.call(env)
61
+
62
+ end
63
+ end
64
+ end
65
+ end
66
+ end
@@ -14,11 +14,12 @@ module VagrantPlugins
14
14
  env[:domain_name] = build_domain_name(env)
15
15
 
16
16
  begin
17
- @logger.info("Looking for domain #{env[:domain_name]} through list #{env[:libvirt_compute].servers.all}")
17
+ @logger.info("Looking for domain #{env[:domain_name]} through list " +
18
+ "#{env[:machine].provider.driver.connection.servers.all}")
18
19
  # Check if the domain name is not already taken
19
20
 
20
21
  domain = ProviderLibvirt::Util::Collection.find_matching(
21
- env[:libvirt_compute].servers.all, env[:domain_name])
22
+ env[:machine].provider.driver.connection.servers.all, env[:domain_name])
22
23
  rescue Fog::Errors::Error => e
23
24
  @logger.info("#{e}")
24
25
  domain = nil
@@ -14,7 +14,7 @@ module VagrantPlugins
14
14
  def call(env)
15
15
  env[:ui].info(I18n.t("vagrant_libvirt.starting_domain"))
16
16
 
17
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
17
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
18
18
  raise Errors::NoDomainError if domain == nil
19
19
 
20
20
  begin
@@ -14,7 +14,7 @@ module VagrantPlugins
14
14
  def call(env)
15
15
  env[:ui].info(I18n.t("vagrant_libvirt.suspending_domain"))
16
16
 
17
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
17
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
18
18
  raise Errors::NoDomainError if domain == nil
19
19
 
20
20
  domain.suspend
@@ -21,7 +21,7 @@ module VagrantPlugins
21
21
  env[:metrics] ||= {}
22
22
 
23
23
  # Get domain object
24
- domain = env[:libvirt_compute].servers.get(env[:machine].id.to_s)
24
+ domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s)
25
25
  raise NoDomainError if domain == nil
26
26
 
27
27
  # Wait for domain to obtain an ip address. Ip address is searched
@@ -1,3 +1,4 @@
1
+ require "digest/md5"
1
2
  require "vagrant/util/retryable"
2
3
 
3
4
  module VagrantPlugins
@@ -16,7 +17,7 @@ module VagrantPlugins
16
17
  machine.communicate.sudo("mkdir -p #{expanded_guest_path}")
17
18
 
18
19
  # Mount
19
- mount_tag = name.dup
20
+ mount_tag = Digest::MD5.new.update(opts[:hostpath]).to_s[0,31]
20
21
 
21
22
  mount_opts="-o trans=virtio"
22
23
  mount_opts += ",access=#{opts[:owner]}" if opts[:owner]
@@ -1,6 +1,7 @@
1
1
  require 'log4r'
2
2
  require 'ostruct'
3
3
  require 'nokogiri'
4
+ require "digest/md5"
4
5
 
5
6
  require 'vagrant/util/subprocess'
6
7
  require 'vagrant/errors'
@@ -24,13 +25,13 @@ module VagrantPlugins
24
25
 
25
26
  # <filesystem/> support in device attach/detach introduced in 1.2.2
26
27
  # version number format is major * 1,000,000 + minor * 1,000 + release
27
- libvirt_version = ProviderLibvirt.libvirt_connection.client.libversion
28
+ libvirt_version = machine.provider.driver.connection.client.libversion
28
29
  libvirt_version >= 1_002_002
29
30
  end
30
31
 
31
32
  def prepare(machine, folders, _opts)
32
- raise Vagrant::Errors::Error('No libvirt connection') if ProviderLibvirt.libvirt_connection.nil?
33
- @conn = ProviderLibvirt.libvirt_connection.client
33
+ raise Vagrant::Errors::Error('No libvirt connection') if machine.provider.driver.connection.nil?
34
+ @conn = machine.provider.driver.connection.client
34
35
 
35
36
  begin
36
37
  # loop through folders
@@ -38,7 +39,12 @@ module VagrantPlugins
38
39
  folder_opts.merge!({ target: id,
39
40
  accessmode: 'passthrough',
40
41
  readonly: nil }) { |_k, ov, _nv| ov }
42
+
43
+ mount_tag = Digest::MD5.new.update(folder_opts[:hostpath]).to_s[0,31]
44
+ folder_opts[:mount_tag] = mount_tag
45
+
41
46
  machine.ui.info "================\nMachine id: #{machine.id}\nShould be mounting folders\n #{id}, opts: #{folder_opts}"
47
+
42
48
  xml = to_xml('filesystem', folder_opts)
43
49
  # puts "<<<<< XML:\n #{xml}\n >>>>>"
44
50
  @conn.lookup_domain_by_uuid(machine.id).attach_device(xml, 0)
@@ -67,10 +73,10 @@ module VagrantPlugins
67
73
  end
68
74
 
69
75
  def cleanup(machine, _opts)
70
- if ProviderLibvirt.libvirt_connection.nil?
76
+ if machine.provider.driver.connection.nil?
71
77
  raise Vagrant::Errors::Error('No libvirt connection')
72
78
  end
73
- @conn = ProviderLibvirt.libvirt_connection.client
79
+ @conn = machine.provider.driver.connection.client
74
80
  begin
75
81
  if machine.id && machine.id != ''
76
82
  dom = @conn.lookup_domain_by_uuid(machine.id)