vagrant-libvirt 0.0.41 → 0.0.42
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.coveralls.yml +1 -0
- data/.github/issue_template.md +37 -0
- data/.gitignore +21 -0
- data/.travis.yml +24 -0
- data/Gemfile +26 -0
- data/LICENSE +22 -0
- data/README.md +1380 -0
- data/Rakefile +8 -0
- data/example_box/README.md +29 -0
- data/example_box/Vagrantfile +60 -0
- data/example_box/metadata.json +5 -0
- data/lib/vagrant-libvirt.rb +29 -0
- data/lib/vagrant-libvirt/action.rb +370 -0
- data/lib/vagrant-libvirt/action/create_domain.rb +322 -0
- data/lib/vagrant-libvirt/action/create_domain_volume.rb +87 -0
- data/lib/vagrant-libvirt/action/create_network_interfaces.rb +302 -0
- data/lib/vagrant-libvirt/action/create_networks.rb +361 -0
- data/lib/vagrant-libvirt/action/destroy_domain.rb +83 -0
- data/lib/vagrant-libvirt/action/destroy_networks.rb +95 -0
- data/lib/vagrant-libvirt/action/forward_ports.rb +227 -0
- data/lib/vagrant-libvirt/action/halt_domain.rb +41 -0
- data/lib/vagrant-libvirt/action/handle_box_image.rb +156 -0
- data/lib/vagrant-libvirt/action/handle_storage_pool.rb +57 -0
- data/lib/vagrant-libvirt/action/is_created.rb +18 -0
- data/lib/vagrant-libvirt/action/is_running.rb +21 -0
- data/lib/vagrant-libvirt/action/is_suspended.rb +42 -0
- data/lib/vagrant-libvirt/action/message_already_created.rb +16 -0
- data/lib/vagrant-libvirt/action/message_not_created.rb +16 -0
- data/lib/vagrant-libvirt/action/message_not_running.rb +16 -0
- data/lib/vagrant-libvirt/action/message_not_suspended.rb +16 -0
- data/lib/vagrant-libvirt/action/message_will_not_destroy.rb +17 -0
- data/lib/vagrant-libvirt/action/package_domain.rb +105 -0
- data/lib/vagrant-libvirt/action/prepare_nfs_settings.rb +94 -0
- data/lib/vagrant-libvirt/action/prepare_nfs_valid_ids.rb +17 -0
- data/lib/vagrant-libvirt/action/prune_nfs_exports.rb +27 -0
- data/lib/vagrant-libvirt/action/read_mac_addresses.rb +40 -0
- data/lib/vagrant-libvirt/action/remove_libvirt_image.rb +20 -0
- data/lib/vagrant-libvirt/action/remove_stale_volume.rb +50 -0
- data/lib/vagrant-libvirt/action/resume_domain.rb +34 -0
- data/lib/vagrant-libvirt/action/set_boot_order.rb +109 -0
- data/lib/vagrant-libvirt/action/set_name_of_domain.rb +64 -0
- data/lib/vagrant-libvirt/action/share_folders.rb +71 -0
- data/lib/vagrant-libvirt/action/start_domain.rb +307 -0
- data/lib/vagrant-libvirt/action/suspend_domain.rb +40 -0
- data/lib/vagrant-libvirt/action/wait_till_up.rb +109 -0
- data/lib/vagrant-libvirt/cap/mount_p9.rb +42 -0
- data/lib/vagrant-libvirt/cap/nic_mac_addresses.rb +17 -0
- data/lib/vagrant-libvirt/cap/synced_folder.rb +113 -0
- data/lib/vagrant-libvirt/config.rb +746 -0
- data/lib/vagrant-libvirt/driver.rb +118 -0
- data/lib/vagrant-libvirt/errors.rb +153 -0
- data/lib/vagrant-libvirt/plugin.rb +92 -0
- data/lib/vagrant-libvirt/provider.rb +130 -0
- data/lib/vagrant-libvirt/templates/default_storage_pool.xml.erb +13 -0
- data/lib/vagrant-libvirt/templates/domain.xml.erb +244 -0
- data/lib/vagrant-libvirt/templates/private_network.xml.erb +42 -0
- data/lib/vagrant-libvirt/templates/public_interface.xml.erb +26 -0
- data/lib/vagrant-libvirt/util.rb +11 -0
- data/lib/vagrant-libvirt/util/collection.rb +19 -0
- data/lib/vagrant-libvirt/util/erb_template.rb +22 -0
- data/lib/vagrant-libvirt/util/error_codes.rb +100 -0
- data/lib/vagrant-libvirt/util/network_util.rb +151 -0
- data/lib/vagrant-libvirt/util/timer.rb +17 -0
- data/lib/vagrant-libvirt/version.rb +5 -0
- data/locales/en.yml +162 -0
- data/spec/spec_helper.rb +9 -0
- data/spec/support/environment_helper.rb +46 -0
- data/spec/support/libvirt_context.rb +30 -0
- data/spec/support/sharedcontext.rb +34 -0
- data/spec/unit/action/destroy_domain_spec.rb +97 -0
- data/spec/unit/action/set_name_of_domain_spec.rb +21 -0
- data/spec/unit/action/wait_till_up_spec.rb +127 -0
- data/spec/unit/config_spec.rb +113 -0
- data/spec/unit/templates/domain_all_settings.xml +137 -0
- data/spec/unit/templates/domain_defaults.xml +46 -0
- data/spec/unit/templates/domain_spec.rb +84 -0
- data/tools/create_box.sh +130 -0
- data/tools/prepare_redhat_for_box.sh +119 -0
- data/vagrant-libvirt.gemspec +54 -0
- metadata +93 -3
@@ -0,0 +1,322 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module ProviderLibvirt
|
5
|
+
module Action
|
6
|
+
class CreateDomain
|
7
|
+
include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate
|
8
|
+
|
9
|
+
def initialize(app, _env)
|
10
|
+
@logger = Log4r::Logger.new('vagrant_libvirt::action::create_domain')
|
11
|
+
@app = app
|
12
|
+
end
|
13
|
+
|
14
|
+
def _disk_name(name, disk)
|
15
|
+
"#{name}-#{disk[:device]}.#{disk[:type]}" # disk name
|
16
|
+
end
|
17
|
+
|
18
|
+
def _disks_print(disks)
|
19
|
+
disks.collect do |x|
|
20
|
+
"#{x[:device]}(#{x[:type]},#{x[:size]})"
|
21
|
+
end.join(', ')
|
22
|
+
end
|
23
|
+
|
24
|
+
def _cdroms_print(cdroms)
|
25
|
+
cdroms.collect { |x| x[:dev] }.join(', ')
|
26
|
+
end
|
27
|
+
|
28
|
+
def call(env)
|
29
|
+
# Get config.
|
30
|
+
config = env[:machine].provider_config
|
31
|
+
|
32
|
+
# Gather some info about domain
|
33
|
+
@name = env[:domain_name]
|
34
|
+
@uuid = config.uuid
|
35
|
+
@cpus = config.cpus.to_i
|
36
|
+
@cpu_features = config.cpu_features
|
37
|
+
@cpu_topology = config.cpu_topology
|
38
|
+
@features = config.features
|
39
|
+
@cpu_mode = config.cpu_mode
|
40
|
+
@cpu_model = config.cpu_model
|
41
|
+
@cpu_fallback = config.cpu_fallback
|
42
|
+
@numa_nodes = config.numa_nodes
|
43
|
+
@loader = config.loader
|
44
|
+
@machine_type = config.machine_type
|
45
|
+
@machine_arch = config.machine_arch
|
46
|
+
@disk_bus = config.disk_bus
|
47
|
+
@disk_device = config.disk_device
|
48
|
+
@nested = config.nested
|
49
|
+
@memory_size = config.memory.to_i * 1024
|
50
|
+
@memory_backing = config.memory_backing
|
51
|
+
@management_network_mac = config.management_network_mac
|
52
|
+
@domain_volume_cache = config.volume_cache
|
53
|
+
@kernel = config.kernel
|
54
|
+
@cmd_line = config.cmd_line
|
55
|
+
@emulator_path = config.emulator_path
|
56
|
+
@initrd = config.initrd
|
57
|
+
@dtb = config.dtb
|
58
|
+
@graphics_type = config.graphics_type
|
59
|
+
@graphics_autoport = config.graphics_autoport
|
60
|
+
@graphics_port = config.graphics_port
|
61
|
+
@graphics_ip = config.graphics_ip
|
62
|
+
@graphics_passwd = if config.graphics_passwd.to_s.empty?
|
63
|
+
''
|
64
|
+
else
|
65
|
+
"passwd='#{config.graphics_passwd}'"
|
66
|
+
end
|
67
|
+
@video_type = config.video_type
|
68
|
+
@sound_type = config.sound_type
|
69
|
+
@video_vram = config.video_vram
|
70
|
+
@keymap = config.keymap
|
71
|
+
@kvm_hidden = config.kvm_hidden
|
72
|
+
|
73
|
+
@tpm_model = config.tpm_model
|
74
|
+
@tpm_type = config.tpm_type
|
75
|
+
@tpm_path = config.tpm_path
|
76
|
+
|
77
|
+
# Boot order
|
78
|
+
@boot_order = config.boot_order
|
79
|
+
|
80
|
+
# Storage
|
81
|
+
@storage_pool_name = config.storage_pool_name
|
82
|
+
@disks = config.disks
|
83
|
+
@cdroms = config.cdroms
|
84
|
+
|
85
|
+
# Input
|
86
|
+
@inputs = config.inputs
|
87
|
+
|
88
|
+
# Channels
|
89
|
+
@channels = config.channels
|
90
|
+
|
91
|
+
# PCI device passthrough
|
92
|
+
@pcis = config.pcis
|
93
|
+
|
94
|
+
# Watchdog device
|
95
|
+
@watchdog_dev = config.watchdog_dev
|
96
|
+
|
97
|
+
# USB device passthrough
|
98
|
+
@usbs = config.usbs
|
99
|
+
|
100
|
+
# Redirected devices
|
101
|
+
@redirdevs = config.redirdevs
|
102
|
+
@redirfilters = config.redirfilters
|
103
|
+
|
104
|
+
# smartcard device
|
105
|
+
@smartcard_dev = config.smartcard_dev
|
106
|
+
|
107
|
+
# RNG device passthrough
|
108
|
+
@rng = config.rng
|
109
|
+
|
110
|
+
config = env[:machine].provider_config
|
111
|
+
@domain_type = config.driver
|
112
|
+
|
113
|
+
@os_type = 'hvm'
|
114
|
+
|
115
|
+
# Get path to domain image from the storage pool selected if we have a box.
|
116
|
+
if env[:machine].config.vm.box
|
117
|
+
actual_volumes =
|
118
|
+
env[:machine].provider.driver.connection.volumes.all.select do |x|
|
119
|
+
x.pool_name == @storage_pool_name
|
120
|
+
end
|
121
|
+
domain_volume = ProviderLibvirt::Util::Collection.find_matching(
|
122
|
+
actual_volumes, "#{@name}.img"
|
123
|
+
)
|
124
|
+
raise Errors::DomainVolumeExists if domain_volume.nil?
|
125
|
+
@domain_volume_path = domain_volume.path
|
126
|
+
end
|
127
|
+
|
128
|
+
# If we have a box, take the path from the domain volume and set our storage_prefix.
|
129
|
+
# If not, we dump the storage pool xml to get its defined path.
|
130
|
+
# the default storage prefix is typically: /var/lib/libvirt/images/
|
131
|
+
if env[:machine].config.vm.box
|
132
|
+
storage_prefix = File.dirname(@domain_volume_path) + '/' # steal
|
133
|
+
else
|
134
|
+
storage_pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name(@storage_pool_name)
|
135
|
+
raise Errors::NoStoragePool if storage_pool.nil?
|
136
|
+
xml = Nokogiri::XML(storage_pool.xml_desc)
|
137
|
+
storage_prefix = xml.xpath('/pool/target/path').inner_text.to_s + '/'
|
138
|
+
end
|
139
|
+
|
140
|
+
@disks.each do |disk|
|
141
|
+
disk[:path] ||= _disk_name(@name, disk)
|
142
|
+
|
143
|
+
# On volume creation, the <path> element inside <target>
|
144
|
+
# is oddly ignored; instead the path is taken from the
|
145
|
+
# <name> element:
|
146
|
+
# http://www.redhat.com/archives/libvir-list/2008-August/msg00329.html
|
147
|
+
disk[:name] = disk[:path]
|
148
|
+
|
149
|
+
disk[:absolute_path] = storage_prefix + disk[:path]
|
150
|
+
|
151
|
+
if env[:machine].provider.driver.connection.volumes.select do |x|
|
152
|
+
x.name == disk[:name] && x.pool_name == @storage_pool_name
|
153
|
+
end.empty?
|
154
|
+
# make the disk. equivalent to:
|
155
|
+
# qemu-img create -f qcow2 <path> 5g
|
156
|
+
begin
|
157
|
+
env[:machine].provider.driver.connection.volumes.create(
|
158
|
+
name: disk[:name],
|
159
|
+
format_type: disk[:type],
|
160
|
+
path: disk[:absolute_path],
|
161
|
+
capacity: disk[:size],
|
162
|
+
#:allocation => ?,
|
163
|
+
pool_name: @storage_pool_name
|
164
|
+
)
|
165
|
+
rescue Fog::Errors::Error => e
|
166
|
+
raise Errors::FogDomainVolumeCreateError,
|
167
|
+
error_message: e.message
|
168
|
+
end
|
169
|
+
else
|
170
|
+
disk[:preexisting] = true
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
# Output the settings we're going to use to the user
|
175
|
+
env[:ui].info(I18n.t('vagrant_libvirt.creating_domain'))
|
176
|
+
env[:ui].info(" -- Name: #{@name}")
|
177
|
+
env[:ui].info(" -- Forced UUID: #{@uuid}") if @uuid != ''
|
178
|
+
env[:ui].info(" -- Domain type: #{@domain_type}")
|
179
|
+
env[:ui].info(" -- Cpus: #{@cpus}")
|
180
|
+
if not @cpu_topology.empty?
|
181
|
+
env[:ui].info(" -- CPU topology: sockets=#{@cpu_topology[:sockets]}, cores=#{@cpu_topology[:cores]}, threads=#{@cpu_topology[:threads]}")
|
182
|
+
end
|
183
|
+
env[:ui].info("")
|
184
|
+
@cpu_features.each do |cpu_feature|
|
185
|
+
env[:ui].info(" -- CPU Feature: name=#{cpu_feature[:name]}, policy=#{cpu_feature[:policy]}")
|
186
|
+
end
|
187
|
+
@features.each do |feature|
|
188
|
+
env[:ui].info(" -- Feature: #{feature}")
|
189
|
+
end
|
190
|
+
env[:ui].info(" -- Memory: #{@memory_size / 1024}M")
|
191
|
+
@memory_backing.each do |backing|
|
192
|
+
env[:ui].info(" -- Memory Backing: #{backing[:name]}: #{backing[:config].map { |k,v| "#{k}='#{v}'"}.join(' ')}")
|
193
|
+
end
|
194
|
+
env[:ui].info(" -- Management MAC: #{@management_network_mac}")
|
195
|
+
env[:ui].info(" -- Loader: #{@loader}")
|
196
|
+
if env[:machine].config.vm.box
|
197
|
+
env[:ui].info(" -- Base box: #{env[:machine].box.name}")
|
198
|
+
end
|
199
|
+
env[:ui].info(" -- Storage pool: #{@storage_pool_name}")
|
200
|
+
env[:ui].info(" -- Image: #{@domain_volume_path} (#{env[:box_virtual_size]}G)")
|
201
|
+
env[:ui].info(" -- Volume Cache: #{@domain_volume_cache}")
|
202
|
+
env[:ui].info(" -- Kernel: #{@kernel}")
|
203
|
+
env[:ui].info(" -- Initrd: #{@initrd}")
|
204
|
+
env[:ui].info(" -- Graphics Type: #{@graphics_type}")
|
205
|
+
env[:ui].info(" -- Graphics Port: #{@graphics_port}")
|
206
|
+
env[:ui].info(" -- Graphics IP: #{@graphics_ip}")
|
207
|
+
env[:ui].info(" -- Graphics Password: #{@graphics_passwd.empty? ? 'Not defined' : 'Defined'}")
|
208
|
+
env[:ui].info(" -- Video Type: #{@video_type}")
|
209
|
+
env[:ui].info(" -- Video VRAM: #{@video_vram}")
|
210
|
+
env[:ui].info(" -- Sound Type: #{@sound_type}")
|
211
|
+
env[:ui].info(" -- Keymap: #{@keymap}")
|
212
|
+
env[:ui].info(" -- TPM Path: #{@tpm_path}")
|
213
|
+
|
214
|
+
@boot_order.each do |device|
|
215
|
+
env[:ui].info(" -- Boot device: #{device}")
|
216
|
+
end
|
217
|
+
|
218
|
+
unless @disks.empty?
|
219
|
+
env[:ui].info(" -- Disks: #{_disks_print(@disks)}")
|
220
|
+
end
|
221
|
+
|
222
|
+
@disks.each do |disk|
|
223
|
+
msg = " -- Disk(#{disk[:device]}): #{disk[:absolute_path]}"
|
224
|
+
msg += ' Shared' if disk[:shareable]
|
225
|
+
msg += ' (Remove only manually)' if disk[:allow_existing]
|
226
|
+
msg += ' Not created - using existed.' if disk[:preexisting]
|
227
|
+
env[:ui].info(msg)
|
228
|
+
end
|
229
|
+
|
230
|
+
unless @cdroms.empty?
|
231
|
+
env[:ui].info(" -- CDROMS: #{_cdroms_print(@cdroms)}")
|
232
|
+
end
|
233
|
+
|
234
|
+
@cdroms.each do |cdrom|
|
235
|
+
env[:ui].info(" -- CDROM(#{cdrom[:dev]}): #{cdrom[:path]}")
|
236
|
+
end
|
237
|
+
|
238
|
+
@inputs.each do |input|
|
239
|
+
env[:ui].info(" -- INPUT: type=#{input[:type]}, bus=#{input[:bus]}")
|
240
|
+
end
|
241
|
+
|
242
|
+
@channels.each do |channel|
|
243
|
+
env[:ui].info(" -- CHANNEL: type=#{channel[:type]}, mode=#{channel[:source_mode]}")
|
244
|
+
env[:ui].info(" -- CHANNEL: target_type=#{channel[:target_type]}, target_name=#{channel[:target_name]}")
|
245
|
+
end
|
246
|
+
|
247
|
+
@pcis.each do |pci|
|
248
|
+
env[:ui].info(" -- PCI passthrough: #{pci[:bus]}:#{pci[:slot]}.#{pci[:function]}")
|
249
|
+
end
|
250
|
+
|
251
|
+
unless @rng[:model].nil?
|
252
|
+
env[:ui].info(" -- RNG device model: #{@rng[:model]}")
|
253
|
+
end
|
254
|
+
|
255
|
+
if not @watchdog_dev.empty?
|
256
|
+
env[:ui].info(" -- Watchdog device: model=#{@watchdog_dev[:model]}, action=#{@watchdog_dev[:action]}")
|
257
|
+
end
|
258
|
+
|
259
|
+
@usbs.each do |usb|
|
260
|
+
usb_dev = []
|
261
|
+
usb_dev.push("bus=#{usb[:bus]}") if usb[:bus]
|
262
|
+
usb_dev.push("device=#{usb[:device]}") if usb[:device]
|
263
|
+
usb_dev.push("vendor=#{usb[:vendor]}") if usb[:vendor]
|
264
|
+
usb_dev.push("product=#{usb[:product]}") if usb[:product]
|
265
|
+
env[:ui].info(" -- USB passthrough: #{usb_dev.join(', ')}")
|
266
|
+
end
|
267
|
+
|
268
|
+
unless @redirdevs.empty?
|
269
|
+
env[:ui].info(' -- Redirected Devices: ')
|
270
|
+
@redirdevs.each do |redirdev|
|
271
|
+
msg = " -> bus=usb, type=#{redirdev[:type]}"
|
272
|
+
env[:ui].info(msg)
|
273
|
+
end
|
274
|
+
end
|
275
|
+
|
276
|
+
unless @redirfilters.empty?
|
277
|
+
env[:ui].info(' -- USB Device filter for Redirected Devices: ')
|
278
|
+
@redirfilters.each do |redirfilter|
|
279
|
+
msg = " -> class=#{redirfilter[:class]}, "
|
280
|
+
msg += "vendor=#{redirfilter[:vendor]}, "
|
281
|
+
msg += "product=#{redirfilter[:product]}, "
|
282
|
+
msg += "version=#{redirfilter[:version]}, "
|
283
|
+
msg += "allow=#{redirfilter[:allow]}"
|
284
|
+
env[:ui].info(msg)
|
285
|
+
end
|
286
|
+
end
|
287
|
+
|
288
|
+
if not @smartcard_dev.empty?
|
289
|
+
env[:ui].info(" -- smartcard device: mode=#{@smartcard_dev[:mode]}, type=#{@smartcard_dev[:type]}")
|
290
|
+
end
|
291
|
+
|
292
|
+
@qargs = config.qemu_args
|
293
|
+
if not @qargs.empty?
|
294
|
+
env[:ui].info(' -- Command line args: ')
|
295
|
+
@qargs.each do |arg|
|
296
|
+
msg = " -> value=#{arg[:value]}, "
|
297
|
+
env[:ui].info(msg)
|
298
|
+
end
|
299
|
+
end
|
300
|
+
|
301
|
+
env[:ui].info(" -- Command line : #{@cmd_line}") unless @cmd_line.empty?
|
302
|
+
|
303
|
+
# Create libvirt domain.
|
304
|
+
# Is there a way to tell fog to create new domain with already
|
305
|
+
# existing volume? Use domain creation from template..
|
306
|
+
begin
|
307
|
+
server = env[:machine].provider.driver.connection.servers.create(
|
308
|
+
xml: to_xml('domain')
|
309
|
+
)
|
310
|
+
rescue Fog::Errors::Error => e
|
311
|
+
raise Errors::FogCreateServerError, error_message: e.message
|
312
|
+
end
|
313
|
+
|
314
|
+
# Immediately save the ID since it is created at this point.
|
315
|
+
env[:machine].id = server.id
|
316
|
+
|
317
|
+
@app.call(env)
|
318
|
+
end
|
319
|
+
end
|
320
|
+
end
|
321
|
+
end
|
322
|
+
end
|
@@ -0,0 +1,87 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module ProviderLibvirt
|
5
|
+
module Action
|
6
|
+
# Create a snapshot of base box image. This new snapshot is just new
|
7
|
+
# cow image with backing storage pointing to base box image. Use this
|
8
|
+
# image as new domain volume.
|
9
|
+
class CreateDomainVolume
|
10
|
+
include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate
|
11
|
+
|
12
|
+
def initialize(app, _env)
|
13
|
+
@logger = Log4r::Logger.new('vagrant_libvirt::action::create_domain_volume')
|
14
|
+
@app = app
|
15
|
+
end
|
16
|
+
|
17
|
+
def call(env)
|
18
|
+
env[:ui].info(I18n.t('vagrant_libvirt.creating_domain_volume'))
|
19
|
+
|
20
|
+
# Get config options.
|
21
|
+
config = env[:machine].provider_config
|
22
|
+
|
23
|
+
# This is name of newly created image for vm.
|
24
|
+
@name = "#{env[:domain_name]}.img"
|
25
|
+
|
26
|
+
# Verify the volume doesn't exist already.
|
27
|
+
domain_volume = ProviderLibvirt::Util::Collection.find_matching(
|
28
|
+
env[:machine].provider.driver.connection.volumes.all, @name
|
29
|
+
)
|
30
|
+
raise Errors::DomainVolumeExists if domain_volume
|
31
|
+
|
32
|
+
# Get path to backing image - box volume.
|
33
|
+
box_volume = ProviderLibvirt::Util::Collection.find_matching(
|
34
|
+
env[:machine].provider.driver.connection.volumes.all, env[:box_volume_name]
|
35
|
+
)
|
36
|
+
@backing_file = box_volume.path
|
37
|
+
|
38
|
+
# Virtual size of image. Take value worked out by HandleBoxImage
|
39
|
+
@capacity = env[:box_virtual_size] # G
|
40
|
+
|
41
|
+
# Create new volume from xml template. Fog currently doesn't support
|
42
|
+
# volume snapshots directly.
|
43
|
+
begin
|
44
|
+
xml = Nokogiri::XML::Builder.new do |xml|
|
45
|
+
xml.volume do
|
46
|
+
xml.name(@name)
|
47
|
+
xml.capacity(@capacity, unit: 'G')
|
48
|
+
xml.target do
|
49
|
+
xml.format(type: 'qcow2')
|
50
|
+
xml.permissions do
|
51
|
+
xml.owner 0
|
52
|
+
xml.group 0
|
53
|
+
xml.mode '0600'
|
54
|
+
xml.label 'virt_image_t'
|
55
|
+
end
|
56
|
+
end
|
57
|
+
xml.backingStore do
|
58
|
+
xml.path(@backing_file)
|
59
|
+
xml.format(type: 'qcow2')
|
60
|
+
xml.permissions do
|
61
|
+
xml.owner 0
|
62
|
+
xml.group 0
|
63
|
+
xml.mode '0600'
|
64
|
+
xml.label 'virt_image_t'
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end.to_xml(
|
69
|
+
save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION |
|
70
|
+
Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS |
|
71
|
+
Nokogiri::XML::Node::SaveOptions::FORMAT
|
72
|
+
)
|
73
|
+
domain_volume = env[:machine].provider.driver.connection.volumes.create(
|
74
|
+
xml: xml,
|
75
|
+
pool_name: config.storage_pool_name
|
76
|
+
)
|
77
|
+
rescue Fog::Errors::Error => e
|
78
|
+
raise Errors::FogDomainVolumeCreateError,
|
79
|
+
error_message: e.message
|
80
|
+
end
|
81
|
+
|
82
|
+
@app.call(env)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,302 @@
|
|
1
|
+
require 'log4r'
|
2
|
+
require 'vagrant/util/network_ip'
|
3
|
+
require 'vagrant/util/scoped_hash_override'
|
4
|
+
|
5
|
+
module VagrantPlugins
|
6
|
+
module ProviderLibvirt
|
7
|
+
module Action
|
8
|
+
# Create network interfaces for domain, before domain is running.
|
9
|
+
# Networks for connecting those interfaces should be already prepared.
|
10
|
+
class CreateNetworkInterfaces
|
11
|
+
include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate
|
12
|
+
include VagrantPlugins::ProviderLibvirt::Util::NetworkUtil
|
13
|
+
include Vagrant::Util::NetworkIP
|
14
|
+
include Vagrant::Util::ScopedHashOverride
|
15
|
+
|
16
|
+
def initialize(app, env)
|
17
|
+
@logger = Log4r::Logger.new('vagrant_libvirt::action::create_network_interfaces')
|
18
|
+
@management_network_name = env[:machine].provider_config.management_network_name
|
19
|
+
config = env[:machine].provider_config
|
20
|
+
@nic_model_type = config.nic_model_type || 'virtio'
|
21
|
+
@nic_adapter_count = config.nic_adapter_count
|
22
|
+
@app = app
|
23
|
+
end
|
24
|
+
|
25
|
+
def call(env)
|
26
|
+
# Get domain first.
|
27
|
+
begin
|
28
|
+
domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(
|
29
|
+
env[:machine].id.to_s
|
30
|
+
)
|
31
|
+
rescue => e
|
32
|
+
raise Errors::NoDomainError,
|
33
|
+
error_message: e.message
|
34
|
+
end
|
35
|
+
|
36
|
+
# Setup list of interfaces before creating them.
|
37
|
+
adapters = []
|
38
|
+
|
39
|
+
# Vagrant gives you adapter 0 by default
|
40
|
+
# Assign interfaces to slots.
|
41
|
+
configured_networks(env, @logger).each do |options|
|
42
|
+
# dont need to create interface for this type
|
43
|
+
next if options[:iface_type] == :forwarded_port
|
44
|
+
|
45
|
+
# TODO: fill first ifaces with adapter option specified.
|
46
|
+
if options[:adapter]
|
47
|
+
if adapters[options[:adapter]]
|
48
|
+
raise Errors::InterfaceSlotNotAvailable
|
49
|
+
end
|
50
|
+
|
51
|
+
free_slot = options[:adapter].to_i
|
52
|
+
@logger.debug "Using specified adapter slot #{free_slot}"
|
53
|
+
else
|
54
|
+
free_slot = find_empty(adapters)
|
55
|
+
@logger.debug "Adapter not specified so found slot #{free_slot}"
|
56
|
+
raise Errors::InterfaceSlotExhausted if free_slot.nil?
|
57
|
+
end
|
58
|
+
|
59
|
+
# We have slot for interface, fill it with interface configuration.
|
60
|
+
adapters[free_slot] = options
|
61
|
+
adapters[free_slot][:network_name] = interface_network(
|
62
|
+
env[:machine].provider.driver.connection.client, adapters[free_slot]
|
63
|
+
)
|
64
|
+
end
|
65
|
+
|
66
|
+
# Create each interface as new domain device.
|
67
|
+
@macs_per_network = Hash.new(0)
|
68
|
+
adapters.each_with_index do |iface_configuration, slot_number|
|
69
|
+
@iface_number = slot_number
|
70
|
+
@network_name = iface_configuration[:network_name]
|
71
|
+
@source_options = {
|
72
|
+
network: @network_name
|
73
|
+
}
|
74
|
+
@mac = iface_configuration.fetch(:mac, false)
|
75
|
+
@model_type = iface_configuration.fetch(:model_type, @nic_model_type)
|
76
|
+
@driver_name = iface_configuration.fetch(:driver_name, false)
|
77
|
+
@driver_queues = iface_configuration.fetch(:driver_queues, false)
|
78
|
+
@device_name = iface_configuration.fetch(:iface_name, false)
|
79
|
+
@mtu = iface_configuration.fetch(:mtu, nil)
|
80
|
+
@pci_bus = iface_configuration.fetch(:bus, nil)
|
81
|
+
@pci_slot = iface_configuration.fetch(:slot, nil)
|
82
|
+
template_name = 'interface'
|
83
|
+
# Configuration for public interfaces which use the macvtap driver
|
84
|
+
if iface_configuration[:iface_type] == :public_network
|
85
|
+
@device = iface_configuration.fetch(:dev, 'eth0')
|
86
|
+
@mode = iface_configuration.fetch(:mode, 'bridge')
|
87
|
+
@type = iface_configuration.fetch(:type, 'direct')
|
88
|
+
@model_type = iface_configuration.fetch(:model_type, @nic_model_type)
|
89
|
+
@driver_name = iface_configuration.fetch(:driver_name, false)
|
90
|
+
@driver_queues = iface_configuration.fetch(:driver_queues, false)
|
91
|
+
@portgroup = iface_configuration.fetch(:portgroup, nil)
|
92
|
+
@network_name = iface_configuration.fetch(:network_name, @network_name)
|
93
|
+
template_name = 'public_interface'
|
94
|
+
@logger.info("Setting up public interface using device #{@device} in mode #{@mode}")
|
95
|
+
@ovs = iface_configuration.fetch(:ovs, false)
|
96
|
+
@trust_guest_rx_filters = iface_configuration.fetch(:trust_guest_rx_filters, false)
|
97
|
+
# configuration for udp or tcp tunnel interfaces (p2p conn btwn guest OSes)
|
98
|
+
elsif iface_configuration.fetch(:tunnel_type, nil)
|
99
|
+
@type = iface_configuration.fetch(:tunnel_type)
|
100
|
+
@tunnel_port = iface_configuration.fetch(:tunnel_port, nil)
|
101
|
+
raise Errors::TunnelPortNotDefined if @tunnel_port.nil?
|
102
|
+
if @type == 'udp'
|
103
|
+
# default udp tunnel source to 127.0.0.1
|
104
|
+
@udp_tunnel={
|
105
|
+
address: iface_configuration.fetch(:tunnel_local_ip,'127.0.0.1'),
|
106
|
+
port: iface_configuration.fetch(:tunnel_local_port)
|
107
|
+
}
|
108
|
+
end
|
109
|
+
# default mcast tunnel to 239.255.1.1. Web search says this
|
110
|
+
# 239.255.x.x is a safe range to use for general use mcast
|
111
|
+
default_ip = if @type == 'mcast'
|
112
|
+
'239.255.1.1'
|
113
|
+
else
|
114
|
+
'127.0.0.1'
|
115
|
+
end
|
116
|
+
@source_options = {
|
117
|
+
address: iface_configuration.fetch(:tunnel_ip, default_ip),
|
118
|
+
port: @tunnel_port
|
119
|
+
}
|
120
|
+
@tunnel_type = iface_configuration.fetch(:model_type, @nic_model_type)
|
121
|
+
@driver_name = iface_configuration.fetch(:driver_name, false)
|
122
|
+
@driver_queues = iface_configuration.fetch(:driver_queues, false)
|
123
|
+
template_name = 'tunnel_interface'
|
124
|
+
@logger.info("Setting up #{@type} tunnel interface using #{@tunnel_ip} port #{@tunnel_port}")
|
125
|
+
end
|
126
|
+
|
127
|
+
message = "Creating network interface eth#{@iface_number}"
|
128
|
+
message << " connected to network #{@network_name}."
|
129
|
+
if @mac
|
130
|
+
@mac = @mac.scan(/(\h{2})/).join(':')
|
131
|
+
message << " Using MAC address: #{@mac}"
|
132
|
+
end
|
133
|
+
@logger.info(message)
|
134
|
+
|
135
|
+
begin
|
136
|
+
# FIXME: all options for network driver should be hash from Vagrantfile
|
137
|
+
driver_options = {}
|
138
|
+
driver_options[:name] = @driver_name if @driver_name
|
139
|
+
driver_options[:queues] = @driver_queues if @driver_queues
|
140
|
+
@udp_tunnel ||= {}
|
141
|
+
xml = if template_name == 'interface' or
|
142
|
+
template_name == 'tunnel_interface'
|
143
|
+
interface_xml(@type,
|
144
|
+
@source_options,
|
145
|
+
@mac,
|
146
|
+
@device_name,
|
147
|
+
@iface_number,
|
148
|
+
@model_type,
|
149
|
+
@mtu,
|
150
|
+
driver_options,
|
151
|
+
@udp_tunnel,
|
152
|
+
@pci_bus,
|
153
|
+
@pci_slot)
|
154
|
+
else
|
155
|
+
to_xml(template_name)
|
156
|
+
end
|
157
|
+
domain.attach_device(xml)
|
158
|
+
rescue => e
|
159
|
+
raise Errors::AttachDeviceError,
|
160
|
+
error_message: e.message
|
161
|
+
end
|
162
|
+
|
163
|
+
# Re-read the network configuration and grab the MAC address
|
164
|
+
if iface_configuration[:iface_type] == :public_network
|
165
|
+
xml = Nokogiri::XML(domain.xml_desc)
|
166
|
+
source = "@network='#{@network_name}'"
|
167
|
+
if @type == 'direct'
|
168
|
+
source = "@dev='#{@device}'"
|
169
|
+
elsif @portgroup.nil?
|
170
|
+
source = "@bridge='#{@device}'"
|
171
|
+
end
|
172
|
+
if not @mac
|
173
|
+
macs = xml.xpath("/domain/devices/interface[source[#{source}]]/mac/@address")
|
174
|
+
@mac = macs[@macs_per_network[source]]
|
175
|
+
iface_configuration[:mac] = @mac.to_s
|
176
|
+
end
|
177
|
+
@macs_per_network[source] += 1
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
# Continue the middleware chain.
|
182
|
+
@app.call(env)
|
183
|
+
|
184
|
+
if env[:machine].config.vm.box
|
185
|
+
# Configure interfaces that user requested. Machine should be up and
|
186
|
+
# running now.
|
187
|
+
networks_to_configure = []
|
188
|
+
|
189
|
+
adapters.each_with_index do |options, slot_number|
|
190
|
+
# Skip configuring the management network, which is on the first interface.
|
191
|
+
# It's used for provisioning and it has to be available during provisioning,
|
192
|
+
# ifdown command is not acceptable here.
|
193
|
+
next if slot_number.zero?
|
194
|
+
next if options[:auto_config] === false
|
195
|
+
@logger.debug "Configuring interface slot_number #{slot_number} options #{options}"
|
196
|
+
|
197
|
+
network = {
|
198
|
+
interface: slot_number,
|
199
|
+
use_dhcp_assigned_default_route: options[:use_dhcp_assigned_default_route],
|
200
|
+
mac_address: options[:mac]
|
201
|
+
}
|
202
|
+
|
203
|
+
if options[:ip]
|
204
|
+
network = {
|
205
|
+
type: :static,
|
206
|
+
ip: options[:ip],
|
207
|
+
netmask: options[:netmask],
|
208
|
+
gateway: options[:gateway]
|
209
|
+
}.merge(network)
|
210
|
+
else
|
211
|
+
network[:type] = :dhcp
|
212
|
+
end
|
213
|
+
|
214
|
+
# do not run configure_networks for tcp tunnel interfaces
|
215
|
+
next if options.fetch(:tunnel_type, nil)
|
216
|
+
|
217
|
+
networks_to_configure << network
|
218
|
+
end
|
219
|
+
|
220
|
+
env[:ui].info I18n.t('vagrant.actions.vm.network.configuring')
|
221
|
+
env[:machine].guest.capability(
|
222
|
+
:configure_networks, networks_to_configure
|
223
|
+
)
|
224
|
+
|
225
|
+
end
|
226
|
+
end
|
227
|
+
|
228
|
+
private
|
229
|
+
|
230
|
+
def target_dev_name(device_name, type, iface_number)
|
231
|
+
if device_name
|
232
|
+
device_name
|
233
|
+
elsif type == 'network'
|
234
|
+
"vnet#{iface_number}"
|
235
|
+
else
|
236
|
+
# TODO can we use same name vnet#ifnum?
|
237
|
+
#"tnet#{iface_number}" FIXME plugin vagrant-libvirt trying to create second tnet0 interface
|
238
|
+
"vnet#{iface_number}"
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
def interface_xml(type, source_options, mac, device_name,
|
243
|
+
iface_number, model_type, mtu, driver_options,
|
244
|
+
udp_tunnel={}, pci_bus, pci_slot)
|
245
|
+
Nokogiri::XML::Builder.new do |xml|
|
246
|
+
xml.interface(type: type || 'network') do
|
247
|
+
xml.source(source_options) do
|
248
|
+
xml.local(udp_tunnel) if type == 'udp'
|
249
|
+
end
|
250
|
+
xml.mac(address: mac) if mac
|
251
|
+
xml.target(dev: target_dev_name(device_name, type, iface_number))
|
252
|
+
xml.alias(name: "net#{iface_number}")
|
253
|
+
xml.model(type: model_type.to_s)
|
254
|
+
xml.mtu(size: Integer(mtu)) if mtu
|
255
|
+
xml.driver(driver_options)
|
256
|
+
xml.address(type: 'pci', bus: pci_bus, slot: pci_slot) if pci_bus and pci_slot
|
257
|
+
end
|
258
|
+
end.to_xml(
|
259
|
+
save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION |
|
260
|
+
Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS |
|
261
|
+
Nokogiri::XML::Node::SaveOptions::FORMAT
|
262
|
+
)
|
263
|
+
end
|
264
|
+
|
265
|
+
def find_empty(array, start = 0, stop = @nic_adapter_count)
|
266
|
+
(start..stop).each do |i|
|
267
|
+
return i unless array[i]
|
268
|
+
end
|
269
|
+
nil
|
270
|
+
end
|
271
|
+
|
272
|
+
# Return network name according to interface options.
|
273
|
+
def interface_network(libvirt_client, options)
|
274
|
+
# no need to get interface network for tcp tunnel config
|
275
|
+
return 'tunnel_interface' if options.fetch(:tunnel_type, nil)
|
276
|
+
|
277
|
+
if options[:network_name]
|
278
|
+
@logger.debug 'Found network by name'
|
279
|
+
return options[:network_name]
|
280
|
+
end
|
281
|
+
|
282
|
+
# Get list of all (active and inactive) libvirt networks.
|
283
|
+
available_networks = libvirt_networks(libvirt_client)
|
284
|
+
|
285
|
+
return 'public' if options[:iface_type] == :public_network
|
286
|
+
|
287
|
+
if options[:ip]
|
288
|
+
address = network_address(options[:ip], options[:netmask])
|
289
|
+
available_networks.each do |network|
|
290
|
+
if address == network[:network_address]
|
291
|
+
@logger.debug 'Found network by ip'
|
292
|
+
return network[:name]
|
293
|
+
end
|
294
|
+
end
|
295
|
+
end
|
296
|
+
|
297
|
+
raise Errors::NetworkNotAvailableError, network_name: options[:ip]
|
298
|
+
end
|
299
|
+
end
|
300
|
+
end
|
301
|
+
end
|
302
|
+
end
|