vagrant-zones 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (67) hide show
  1. checksums.yaml +7 -0
  2. data/.github/ISSUE_TEMPLATE/bug_report.md +27 -0
  3. data/.github/ISSUE_TEMPLATE/feature_request.md +20 -0
  4. data/.github/dependabot.yml +6 -0
  5. data/.github/workflows/codeql-analysis.yml +72 -0
  6. data/.github/workflows/lint-release-and-publish.yml +70 -0
  7. data/.github/workflows/ruby-lint.yml +35 -0
  8. data/.gitignore +35 -0
  9. data/.rspec +2 -0
  10. data/.rubocop.yml +143 -0
  11. data/CHANGELOG.md +0 -0
  12. data/CODE_OF_CONDUCT.md +128 -0
  13. data/CONTRIBUTING.md +96 -0
  14. data/Gemfile +14 -0
  15. data/LICENSE +651 -0
  16. data/PULL_REQUEST_TEMPLATE.md +39 -0
  17. data/README.md +81 -0
  18. data/RELEASE.md +15 -0
  19. data/Rakefile +32 -0
  20. data/SECURITY.md +19 -0
  21. data/docs/CNAME +1 -0
  22. data/docs/_config.yml +1 -0
  23. data/docs/css/main.css +55 -0
  24. data/docs/css/styles.css +8678 -0
  25. data/docs/index.html +127 -0
  26. data/lib/vagrant-zones/action/create.rb +29 -0
  27. data/lib/vagrant-zones/action/destroy.rb +27 -0
  28. data/lib/vagrant-zones/action/halt.rb +24 -0
  29. data/lib/vagrant-zones/action/import.rb +112 -0
  30. data/lib/vagrant-zones/action/is_created.rb +22 -0
  31. data/lib/vagrant-zones/action/network.rb +26 -0
  32. data/lib/vagrant-zones/action/not_created.rb +20 -0
  33. data/lib/vagrant-zones/action/package.rb +134 -0
  34. data/lib/vagrant-zones/action/prepare_nfs_valid_ids.rb +24 -0
  35. data/lib/vagrant-zones/action/restart.rb +53 -0
  36. data/lib/vagrant-zones/action/setup.rb +26 -0
  37. data/lib/vagrant-zones/action/shutdown.rb +47 -0
  38. data/lib/vagrant-zones/action/start.rb +25 -0
  39. data/lib/vagrant-zones/action/wait_till_boot.rb +59 -0
  40. data/lib/vagrant-zones/action/wait_till_up.rb +65 -0
  41. data/lib/vagrant-zones/action.rb +204 -0
  42. data/lib/vagrant-zones/command/configure_snapshots.rb +49 -0
  43. data/lib/vagrant-zones/command/console.rb +63 -0
  44. data/lib/vagrant-zones/command/create_snapshots.rb +46 -0
  45. data/lib/vagrant-zones/command/delete_snapshots.rb +38 -0
  46. data/lib/vagrant-zones/command/guest_power_controls.rb +58 -0
  47. data/lib/vagrant-zones/command/list_snapshots.rb +44 -0
  48. data/lib/vagrant-zones/command/restart_guest.rb +29 -0
  49. data/lib/vagrant-zones/command/shutdown_guest.rb +29 -0
  50. data/lib/vagrant-zones/command/vnc_console.rb +48 -0
  51. data/lib/vagrant-zones/command/webvnc_console.rb +49 -0
  52. data/lib/vagrant-zones/command/zfssnapshot.rb +67 -0
  53. data/lib/vagrant-zones/command/zlogin_console.rb +40 -0
  54. data/lib/vagrant-zones/command/zone.rb +73 -0
  55. data/lib/vagrant-zones/config.rb +78 -0
  56. data/lib/vagrant-zones/driver.rb +1710 -0
  57. data/lib/vagrant-zones/errors.rb +61 -0
  58. data/lib/vagrant-zones/executor.rb +38 -0
  59. data/lib/vagrant-zones/plugin.rb +79 -0
  60. data/lib/vagrant-zones/provider.rb +83 -0
  61. data/lib/vagrant-zones/util/subprocess.rb +31 -0
  62. data/lib/vagrant-zones/util/timer.rb +19 -0
  63. data/lib/vagrant-zones/version.rb +7 -0
  64. data/lib/vagrant-zones.rb +29 -0
  65. data/locales/en.yml +326 -0
  66. data/vagrant-zones.gemspec +51 -0
  67. metadata +412 -0
@@ -0,0 +1,1710 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'log4r'
4
+ require 'fileutils'
5
+ require 'digest/md5'
6
+ require 'io/console'
7
+ require 'ruby_expect'
8
+ require 'netaddr'
9
+ require 'ipaddr'
10
+ require 'vagrant/util/numeric'
11
+ require 'pty'
12
+ require 'expect'
13
+ require 'vagrant'
14
+ require 'resolv'
15
+ require 'vagrant-zones/util/timer'
16
+ require 'vagrant-zones/util/subprocess'
17
+ require 'vagrant/util/retryable'
18
+
19
+ module VagrantPlugins
20
+ module ProviderZone
21
+ # This class does the heavy lifting of the zone provider
22
+ class Driver
23
+ include Vagrant::Util::Retryable
24
+ attr_accessor :executor
25
+
26
+ def initialize(machine)
27
+ @logger = Log4r::Logger.new('vagrant_zones::driver')
28
+ @machine = machine
29
+ @executor = Executor::Exec.new
30
+ @pfexec = if Process.uid.zero?
31
+ ''
32
+ elsif system('sudo -v')
33
+ 'sudo'
34
+ else
35
+ 'pfexec'
36
+ end
37
+ end
38
+
39
+ def state(machine)
40
+ name = machine.name
41
+ vm_state = execute(false, "#{@pfexec} zoneadm -z #{name} list -p | awk -F: '{ print $3 }'")
42
+ case vm_state
43
+ when 'running'
44
+ :running
45
+ when 'configured'
46
+ :preparing
47
+ when 'installed'
48
+ :stopped
49
+ when 'incomplete'
50
+ :incomplete
51
+ else
52
+ :not_created
53
+ end
54
+ end
55
+
56
+ # Execute System commands
57
+ def execute(*cmd, **opts, &block)
58
+ @executor.execute(*cmd, **opts, &block)
59
+ end
60
+
61
+ ## Begin installation for zone
62
+ def install(uii)
63
+ config = @machine.provider_config
64
+ name = @machine.name
65
+ case config.brand
66
+ when 'lx'
67
+ box = "#{@machine.data_dir}/#{@machine.config.vm.box}"
68
+ results = execute(false, "#{@pfexec} zoneadm -z #{name} install -s #{box}")
69
+ raise Errors::InvalidLXBrand if results.include? 'unknown brand'
70
+ when 'bhyve'
71
+ results = execute(false, "#{@pfexec} zoneadm -z #{name} install")
72
+ raise Errors::InvalidbhyveBrand if results.include? 'unknown brand'
73
+ when 'kvm' || 'illumos'
74
+ raise Errors::NotYetImplemented
75
+ end
76
+ uii.info(I18n.t('vagrant_zones.installing_zone'))
77
+ uii.info(" #{config.brand}")
78
+ end
79
+
80
+ ## Control the zone from inside the zone OS
81
+ def control(uii, control)
82
+ config = @machine.provider_config
83
+ uii.info(I18n.t('vagrant_zones.control')) if config.debug
84
+ case control
85
+ when 'restart'
86
+ command = 'sudo shutdown -r'
87
+ command = config.safe_restart unless config.safe_restart.nil?
88
+ ssh_run_command(uii, command)
89
+ when 'shutdown'
90
+ command = 'sudo init 0 || true'
91
+ command = config.safe_shutdown unless config.safe_shutdown.nil?
92
+ ssh_run_command(uii, command)
93
+ else
94
+ uii.info(I18n.t('vagrant_zones.control_no_cmd'))
95
+ end
96
+ end
97
+
98
+ ## Run commands over SSH instead of ZLogin
99
+ def ssh_run_command(uii, command)
100
+ config = @machine.provider_config
101
+ ip = get_ip_address('runsshcommmand')
102
+ user = user(@machine)
103
+ key = userprivatekeypath(@machine).to_s
104
+ port = sshport(@machine).to_s
105
+ port = 22 if sshport(@machine).to_s.nil?
106
+ execute_return = ''
107
+ Util::Timer.time do
108
+ retryable(on: Errors::TimeoutError, tries: 60) do
109
+ # If we're interrupted don't worry about waiting
110
+ ssh_string = "#{@pfexec} ssh -o 'StrictHostKeyChecking=no' -p"
111
+ execute_return = execute(false, %(#{ssh_string} #{port} -i #{key} #{user}@#{ip} "#{command}"))
112
+ uii.info(I18n.t('vagrant_zones.ssh_run_command')) if config.debug
113
+ uii.info(I18n.t('vagrant_zones.ssh_run_command') + command) if config.debug
114
+ loop do
115
+ break if @machine.communicate.ready?
116
+ end
117
+ end
118
+ end
119
+ execute_return
120
+ end
121
+
122
+ ## Function to provide console, vnc, or webvnc access
123
+ ## Future To-Do: Should probably split this up
124
+ def console(uii, command, ip, port, exit)
125
+ uii.info(I18n.t('vagrant_zones.console'))
126
+ detach = exit[:detach]
127
+ kill = exit[:kill]
128
+ name = @machine.name
129
+ config = @machine.provider_config
130
+ if port.nil?
131
+ port = if config.consoleport.nil?
132
+ ''
133
+ else
134
+ config.consoleport
135
+ end
136
+ end
137
+ ipaddr = '0.0.0.0'
138
+ ipaddr = config.consolehost if config.consolehost =~ Resolv::IPv4::Regex
139
+ ipaddr = ip if ip =~ Resolv::IPv4::Regex
140
+ netport = "#{ipaddr}:#{port}"
141
+ pid = 0
142
+ if File.exist?("#{name}.pid")
143
+ pid = File.readlines("#{name}.pid")[0].strip
144
+ ctype = File.readlines("#{name}.pid")[1].strip
145
+ ts = File.readlines("#{name}.pid")[2].strip
146
+ vmname = File.readlines("#{name}.pid")[3].strip
147
+ nport = File.readlines("#{name}.pid")[4].strip
148
+ uii.info("Session running with PID: #{pid} since: #{ts} as console type: #{ctype} served at: #{nport}\n") if vmname[name.to_s]
149
+ if kill == 'yes'
150
+ File.delete("#{name}.pid")
151
+ Process.kill 'TERM', pid.to_i
152
+ Process.detach pid.to_i
153
+ uii.info('Session Terminated')
154
+ end
155
+ else
156
+ case command
157
+ when /vnc/
158
+ run = "pfexec zadm #{command} #{netport} #{name}"
159
+ pid = spawn(run)
160
+ Process.wait pid if detach == 'no'
161
+ Process.detach(pid) if detach == 'yes'
162
+ time = Time.new.strftime('%Y-%m-%d-%H:%M:%S')
163
+ File.write("#{name}.pid", "#{pid}\n#{command}\n#{time}\n#{name}\n#{netport}") if detach == 'yes'
164
+ uii.info("Session running with PID: #{pid} as console type: #{command} served at: #{netport}") if detach == 'yes'
165
+ when 'zlogin'
166
+ run = "#{@pfexec} zadm console #{name}"
167
+ exec(run)
168
+ end
169
+ end
170
+ end
171
+
172
+ ## Boot the Machine
173
+ def boot(uii)
174
+ name = @machine.name
175
+ uii.info(I18n.t('vagrant_zones.starting_zone'))
176
+ execute(false, "#{@pfexec} zoneadm -z #{name} boot")
177
+ end
178
+
179
+ # This filters the VM usage for VNIC Naming Purposes
180
+ def vtype(uii)
181
+ config = @machine.provider_config
182
+ uii.info(I18n.t('vagrant_zones.vtype')) if config.debug
183
+ case config.vm_type
184
+ when /template/
185
+ '1'
186
+ when /development/
187
+ '2'
188
+ when /production/ || nil
189
+ '3'
190
+ when /firewall/
191
+ '4'
192
+ when /other/
193
+ '5'
194
+ end
195
+ end
196
+
197
+ # This filters the NIC Types
198
+ def nictype(opts)
199
+ case opts[:nictype]
200
+ when /external/ || nil
201
+ 'e'
202
+ when /internal/
203
+ 'i'
204
+ when /carp/
205
+ 'c'
206
+ when /management/
207
+ 'm'
208
+ when /host/
209
+ 'h'
210
+ end
211
+ end
212
+
213
+ # This Sanitizes the DNS Records
214
+ def dnsservers(uii)
215
+ config = @machine.provider_config
216
+ servers = []
217
+ config.dns.each do |server|
218
+ servers.append(server)
219
+ end
220
+ servers = [{ 'nameserver' => '1.1.1.1' }, { 'nameserver' => '8.8.8.8' }] if config.dns.nil?
221
+ uii.info(I18n.t('vagrant_zones.nsservers') + servers.to_s) if config.debug
222
+ servers
223
+ end
224
+
225
+ # This Sanitizes the Mac Address
226
+ def macaddress(uii, opts)
227
+ config = @machine.provider_config
228
+ regex = /^(?:[[:xdigit:]]{2}([-:]))(?:[[:xdigit:]]{2}\1){4}[[:xdigit:]]{2}$/
229
+ mac = opts[:mac] unless opts[:mac].nil?
230
+ mac = 'auto' unless mac.match(regex)
231
+ uii.info(I18n.t('vagrant_zones.mac') + mac) if config.debug
232
+ mac
233
+ end
234
+
235
+ # This Sanitizes the IP Address to set
236
+ def ipaddress(uii, opts)
237
+ config = @machine.provider_config
238
+ ip = if opts[:ip].empty?
239
+ nil
240
+ else
241
+ opts[:ip].gsub(/\t/, '')
242
+ end
243
+ uii.info(I18n.t('vagrant_zones.ipaddress') + ip) if config.debug
244
+ ip
245
+ end
246
+
247
+ # This Sanitizes the AllowedIP Address to set for Cloudinit
248
+ def allowedaddress(uii, opts)
249
+ config = @machine.provider_config
250
+ ip = ipaddress(uii, opts)
251
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
252
+ allowed_address = "#{ip}/#{shrtsubnet}"
253
+ uii.info(I18n.t('vagrant_zones.allowedaddress') + allowed_address) if config.debug
254
+ allowed_address
255
+ end
256
+
257
+ # This Sanitizes the VNIC Name
258
+ def vname(uii, opts)
259
+ config = @machine.provider_config
260
+ vnic_name = "vnic#{nictype(opts)}#{vtype(uii)}_#{config.partition_id}_#{opts[:nic_number]}"
261
+ uii.info(I18n.t('vagrant_zones.vnic_name') + vnic_name) if config.debug
262
+ vnic_name
263
+ end
264
+
265
+ ## If DHCP and Zlogin, get the IP address
266
+ def get_ip_address(_function)
267
+ config = @machine.provider_config
268
+ name = @machine.name
269
+ # uii.info(I18n.t('vagrant_zones.get_ip_address')) if config.debug
270
+ @machine.config.vm.networks.each do |_adaptertype, opts|
271
+ responses = []
272
+ nic_type = nictype(opts)
273
+ if opts[:dhcp] && opts[:managed]
274
+ vnic_name = "vnic#{nic_type}#{vtype(uii)}_#{config.partition_id}_#{opts[:nic_number]}"
275
+ PTY.spawn("pfexec zlogin -C #{name}") do |zlogin_read, zlogin_write, pid|
276
+ command = "ip -4 addr show dev #{vnic_name} | head -n -1 | tail -1 | awk '{ print $2 }' | cut -f1 -d\"/\" \n"
277
+ zlogin_read.expect(/\n/) { zlogin_write.printf(command) }
278
+ Timeout.timeout(config.clean_shutdown_time) do
279
+ loop do
280
+ zlogin_read.expect(/\r\n/) { |line| responses.push line }
281
+ if responses[-1].to_s.match(/(?:[0-9]{1,3}\.){3}[0-9]{1,3}/)
282
+ ip = responses[-1][0].rstrip.gsub(/\e\[\?2004l/, '').lstrip
283
+ return nil if ip.empty?
284
+ return ip.gsub(/\t/, '') unless ip.empty?
285
+
286
+ break
287
+ end
288
+ errormessage = "==> #{name} ==> Command ==> #{cmd} \nFailed with ==> #{responses[-1]}"
289
+ raise errormessage if responses[-1].to_s.match(/Error Code: \b(?!0\b)\d{1,4}\b/)
290
+ end
291
+ end
292
+ Process.kill('HUP', pid)
293
+ end
294
+ elsif (opts[:dhcp] == false || opts[:dhcp].nil?) && opts[:managed]
295
+ ip = opts[:ip].to_s
296
+ return nil if ip.empty?
297
+
298
+ return ip.gsub(/\t/, '')
299
+ end
300
+ end
301
+ end
302
+
303
+ ## Manage Network Interfaces
304
+ def network(uii, state)
305
+ config = @machine.provider_config
306
+ uii.info(I18n.t('vagrant_zones.creating_networking_interfaces')) if state == 'create'
307
+ @machine.config.vm.networks.each do |adaptertype, opts|
308
+ case adaptertype.to_s
309
+ when 'public_network'
310
+ zonenicdel(uii, opts) if state == 'delete'
311
+ zonecfgnicconfig(uii, opts) if state == 'config'
312
+ zoneniccreate(uii, opts) if state == 'create'
313
+ zonenicstpzloginsetup(uii, opts, config) if state == 'setup' && config.setup_method == 'zlogin'
314
+ when 'private_network'
315
+ zonenicdel(uii, opts) if state == 'delete'
316
+ zonedhcpentriesrem(uii, opts) if state == 'delete'
317
+ zonenatclean(uii, opts) if state == 'delete'
318
+ etherstubdelhvnic(uii, opts) if state == 'delete'
319
+ etherstubdelete(uii, opts) if state == 'delete'
320
+ natnicconfig(uii, opts) if state == 'config'
321
+ etherstub = etherstubcreate(uii, opts) if state == 'create'
322
+ zonenatniccreate(uii, opts, etherstub) if state == 'create'
323
+ etherstubcreatehvnic(uii, opts, etherstub) if state == 'create'
324
+ zonenatforward(uii, opts) if state == 'create'
325
+ zonenatentries(uii, opts) if state == 'create'
326
+ zonedhcpentries(uii, opts) if state == 'create'
327
+ zonedhcpcheckaddr(uii, opts) if state == 'setup'
328
+ zonenicnatsetup(uii, opts) if state == 'setup'
329
+ end
330
+ end
331
+ end
332
+
333
+ ## Delete DHCP entries for Zones
334
+ def zonedhcpentriesrem(uii, opts)
335
+ config = @machine.provider_config
336
+
337
+ ip = ipaddress(uii, opts)
338
+ name = @machine.name
339
+ defrouter = opts[:gateway].to_s
340
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
341
+ hvnic_name = "h_vnic_#{config.partition_id}_#{opts[:nic_number]}"
342
+ mac = macaddress(uii, opts)
343
+
344
+ ## if mac is auto, then grab NIC from VNIC
345
+ if mac == 'auto'
346
+ mac = ''
347
+ cmd = "#{@pfexec} dladm show-vnic #{hvnic_name} | tail -n +2 | awk '{ print $4 }'"
348
+ vnicmac = execute(false, cmd.to_s)
349
+ vnicmac.split(':').each { |x| mac += "#{format('%02x', x.to_i(16))}:" }
350
+ mac = mac[0..-2]
351
+ end
352
+ uii.info(I18n.t('vagrant_zones.deconfiguring_dhcp'))
353
+ uii.info(" #{hvnic_name}")
354
+ broadcast = IPAddr.new(defrouter).mask(shrtsubnet).to_s
355
+ subnet = %(subnet #{broadcast} netmask #{opts[:netmask]} { option routers #{defrouter}; })
356
+ subnetopts = %(host #{name} { option host-name "#{name}"; hardware ethernet #{mac}; fixed-address #{ip}; })
357
+ File.open('/etc/dhcpd.conf-temp', 'w') do |out_file|
358
+ File.foreach('/etc/dhcpd.conf') do |entry|
359
+ out_file.puts line unless entry == subnet || subnetopts
360
+ end
361
+ end
362
+ FileUtils.mv('/etc/dhcpd.conf-temp', '/etc/dhcpd.conf')
363
+ subawk = '{ $1=""; $2=""; sub(/^[ \\t]+/, ""); print}'
364
+ awk = %(| awk '#{subawk}' | tr ' ' '\\n' | tr -d '"')
365
+ cmd = 'svccfg -s dhcp:ipv4 listprop config/listen_ifnames '
366
+ nicsused = execute(false, cmd + awk.to_s).split("\n")
367
+ newdhcpnics = []
368
+ nicsused.each do |nic|
369
+ newdhcpnics << nic unless nic.to_s == hvnic_name.to_s
370
+ end
371
+ if newdhcpnics.empty?
372
+ dhcpcmdnewstr = '\(\"\"\)'
373
+ else
374
+ dhcpcmdnewstr = '\('
375
+ newdhcpnics.each do |nic|
376
+ dhcpcmdnewstr += %(\\"#{nic}\\")
377
+ end
378
+ dhcpcmdnewstr += '\)'
379
+ end
380
+ execute(false, "#{@pfexec} svccfg -s dhcp:ipv4 setprop config/listen_ifnames = #{dhcpcmdnewstr}")
381
+ execute(false, "#{@pfexec} svcadm refresh dhcp:ipv4")
382
+ execute(false, "#{@pfexec} svcadm disable dhcp:ipv4")
383
+ execute(false, "#{@pfexec} svcadm enable dhcp:ipv4")
384
+ end
385
+
386
+ def zonenatclean(uii, opts)
387
+ vnic_name = vname(uii, opts)
388
+ defrouter = opts[:gateway].to_s
389
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
390
+ broadcast = IPAddr.new(defrouter).mask(shrtsubnet).to_s
391
+ uii.info(I18n.t('vagrant_zones.deconfiguring_nat'))
392
+ uii.info(" #{vnic_name}")
393
+ line1 = %(map #{opts[:bridge]} #{broadcast}/#{shrtsubnet} -> 0/32 portmap tcp/udp auto)
394
+ line2 = %(map #{opts[:bridge]} #{broadcast}/#{shrtsubnet} -> 0/32)
395
+ File.open('/etc/ipf/ipnat.conf-temp', 'w') do |out_file|
396
+ File.foreach('/etc/ipf/ipnat.conf') do |entry|
397
+ out_file.puts line unless entry == line1 || line2
398
+ end
399
+ end
400
+ FileUtils.mv('/etc/ipf/ipnat.conf-temp', '/etc/ipf/ipnat.conf')
401
+ execute(false, "#{@pfexec} svcadm refresh network/ipfilter")
402
+ end
403
+
404
+ def zonenicdel(uii, opts)
405
+ vnic_name = vname(uii, opts)
406
+ vnic_configured = execute(false, "#{@pfexec} dladm show-vnic | grep #{vnic_name} | awk '{ print $1 }' ")
407
+ uii.info(I18n.t('vagrant_zones.removing_vnic')) if vnic_configured == vnic_name.to_s
408
+ uii.info(" #{vnic_name}") if vnic_configured == vnic_name.to_s
409
+ execute(false, "#{@pfexec} dladm delete-vnic #{vnic_name}") if vnic_configured == vnic_name.to_s
410
+ uii.info(I18n.t('vagrant_zones.no_removing_vnic')) unless vnic_configured == vnic_name.to_s
411
+ end
412
+
413
+ ## Delete etherstubs vnic
414
+ def etherstubdelhvnic(uii, opts)
415
+ config = @machine.provider_config
416
+ hvnic_name = "h_vnic_#{config.partition_id}_#{opts[:nic_number]}"
417
+ vnic_configured = execute(false, "#{@pfexec} dladm show-vnic | grep #{hvnic_name} | awk '{ print $1 }' ")
418
+ uii.info(I18n.t('vagrant_zones.removing_host_vnic')) if vnic_configured == hvnic_name.to_s
419
+ uii.info(" #{hvnic_name}") if vnic_configured == hvnic_name.to_s
420
+ execute(false, "#{@pfexec} ipadm delete-if #{hvnic_name}") if vnic_configured == hvnic_name.to_s
421
+ execute(false, "#{@pfexec} dladm delete-vnic #{hvnic_name}") if vnic_configured == hvnic_name.to_s
422
+ uii.info(I18n.t('vagrant_zones.no_removing_host_vnic')) unless vnic_configured == hvnic_name.to_s
423
+ end
424
+
425
+ ## Delete etherstubs
426
+ def etherstubdelete(uii, opts)
427
+ config = @machine.provider_config
428
+ ether_name = "stub_#{config.partition_id}_#{opts[:nic_number]}"
429
+ ether_configured = execute(false, "#{@pfexec} dladm show-etherstub | grep #{ether_name} | awk '{ print $1 }' ")
430
+ uii.info(I18n.t('vagrant_zones.delete_ethervnic')) if ether_configured == ether_name
431
+ uii.info(" #{ether_name}") if ether_configured == ether_name
432
+ uii.info(I18n.t('vagrant_zones.no_delete_ethervnic')) unless ether_configured == ether_name
433
+ execute(false, "#{@pfexec} dladm delete-etherstub #{ether_name}") if ether_configured == ether_name
434
+ end
435
+
436
+ ## Create etherstubs for Zones
437
+ def etherstubcreate(uii, opts)
438
+ config = @machine.provider_config
439
+ ether_name = "stub_#{config.partition_id}_#{opts[:nic_number]}"
440
+ ether_configured = execute(false, "#{@pfexec} dladm show-etherstub | grep #{ether_name} | awk '{ print $1 }' ")
441
+ uii.info(I18n.t('vagrant_zones.creating_etherstub')) unless ether_configured == ether_name
442
+ uii.info(" #{ether_name}") unless ether_configured == ether_name
443
+ execute(false, "#{@pfexec} dladm create-etherstub #{ether_name}") unless ether_configured == ether_name
444
+ ether_name
445
+ end
446
+
447
+ ## Create ethervnics for Zones
448
+ def zonenatniccreate(uii, opts, etherstub)
449
+ vnic_name = vname(uii, opts)
450
+ mac = macaddress(uii, opts)
451
+ uii.info(I18n.t('vagrant_zones.creating_ethervnic'))
452
+ uii.info(" #{vnic_name}")
453
+ execute(false, "#{@pfexec} dladm create-vnic -l #{etherstub} -m #{mac} #{vnic_name}")
454
+ end
455
+
456
+ ## Create Host VNIC on etherstubs for IP for Zones DHCP
457
+ def etherstubcreatehvnic(uii, opts, etherstub)
458
+ config = @machine.provider_config
459
+ defrouter = opts[:gateway].to_s
460
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
461
+ hvnic_name = "h_vnic_#{config.partition_id}_#{opts[:nic_number]}"
462
+ uii.info(I18n.t('vagrant_zones.creating_etherhostvnic'))
463
+ uii.info(" #{hvnic_name}")
464
+ execute(false, "#{@pfexec} dladm create-vnic -l #{etherstub} #{hvnic_name}")
465
+ execute(false, "#{@pfexec} ipadm create-if #{hvnic_name}")
466
+ execute(false, "#{@pfexec} ipadm create-addr -T static -a local=#{defrouter}/#{shrtsubnet} #{hvnic_name}/v4")
467
+ end
468
+
469
+ ## Setup vnics for Zones using nat/dhcp
470
+ def zonenicnatsetup(uii, opts)
471
+ config = @machine.provider_config
472
+ return if config.cloud_init_enabled
473
+
474
+ vnic_name = vname(uii, opts)
475
+ mac = macaddress(uii, opts)
476
+ ## if mac is auto, then grab NIC from VNIC
477
+ if mac == 'auto'
478
+ mac = ''
479
+ cmd = "#{@pfexec} dladm show-vnic #{vnic_name} | tail -n +2 | awk '{ print $4 }'"
480
+ vnicmac = execute(false, cmd.to_s)
481
+ vnicmac.split(':').each { |x| mac += "#{format('%02x', x.to_i(16))}:" }
482
+ mac = mac[0..-2]
483
+ end
484
+
485
+ ## Code Block to Detect OS
486
+ uii.info(I18n.t('vagrant_zones.os_detect'))
487
+ cmd = 'uname -a'
488
+ os_type = config.os_type
489
+ uii.info("Zone OS configured as: #{os_type}")
490
+ os_detected = ssh_run_command(uii, cmd.to_s)
491
+ uii.info("Zone OS detected as: #{os_detected}")
492
+
493
+ ## Check if Ansible is Installed to enable easier configuration
494
+ uii.info(I18n.t('vagrant_zones.ansible_detect'))
495
+ cmd = 'which ansible > /dev/null 2>&1 ; echo $?'
496
+ ansible_detected = ssh_run_command(uii, cmd.to_s)
497
+ uii.info('Ansible detected') if ansible_detected == '0'
498
+
499
+ # Run Network Configuration
500
+ zonenicnatsetup_netplan(uii, opts, mac) unless os_detected.to_s.match(/SunOS/)
501
+ zonenicnatsetup_dladm(uii, opts, mac) if os_detected.to_s.match(/SunOS/)
502
+ end
503
+
504
+ ## Setup vnics for Zones using nat/dhcp using netplan
505
+ def zonenicnatsetup_netplan(uii, opts, mac)
506
+ ssh_run_command(uii, 'sudo rm -rf /etc/netplan/*.yaml')
507
+ ip = ipaddress(uii, opts)
508
+ defrouter = opts[:gateway].to_s
509
+ vnic_name = vname(uii, opts)
510
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
511
+ servers = dnsservers(uii)
512
+ ## Begin of code block to move to Netplan function
513
+ uii.info(I18n.t('vagrant_zones.configure_interface_using_vnic'))
514
+ uii.info(" #{vnic_name}")
515
+
516
+ netplan1 = %(network:\n version: 2\n ethernets:\n #{vnic_name}:\n match:\n macaddress: #{mac}\n)
517
+ netplan2 = %( dhcp-identifier: mac\n dhcp4: #{opts[:dhcp]}\n dhcp6: #{opts[:dhcp6]}\n)
518
+ netplan3 = %( set-name: #{vnic_name}\n addresses: [#{ip}/#{shrtsubnet}]\n gateway4: #{defrouter}\n)
519
+ netplan4 = %( nameservers:\n addresses: [#{servers[0]['nameserver']} , #{servers[1]['nameserver']}] )
520
+ netplan = netplan1 + netplan2 + netplan3 + netplan4
521
+ cmd = "echo -e '#{netplan}' | sudo tee /etc/netplan/#{vnic_name}.yaml"
522
+ infomessage = I18n.t('vagrant_zones.netplan_applied_static') + "/etc/netplan/#{vnic_name}.yaml"
523
+ uii.info(infomessage) if ssh_run_command(uii, cmd)
524
+
525
+ ## Apply the Configuration
526
+ uii.info(I18n.t('vagrant_zones.netplan_applied')) if ssh_run_command(uii, 'sudo netplan apply')
527
+ ## End of code block to move to Netplan function
528
+ end
529
+
530
+ ## Setup vnics for Zones using nat/dhcp over dladm
531
+ def zonenicnatsetup_dladm(uii, opts, mac)
532
+ ip = ipaddress(uii, opts)
533
+ defrouter = opts[:gateway].to_s
534
+ vnic_name = vname(uii, opts)
535
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
536
+ servers = dnsservers(uii)
537
+ uii.info(I18n.t('vagrant_zones.configure_interface_using_vnic_dladm'))
538
+ uii.info(" #{vnic_name}")
539
+
540
+ # loop through each phys if and run code if physif matches #{mac}
541
+ phys_if = 'pfexec dladm show-phys -m -o LINK,ADDRESS,CLIENT | tail -n +2'
542
+ phys_if_results = ssh_run_command(uii, phys_if).split("\n")
543
+ device = ''
544
+ phys_if_results.each do |entry|
545
+ e_mac = ''
546
+ entries = entry.strip.split
547
+ entries[1].split(':').each { |x| e_mac += "#{format('%02x', x.to_i(16))}:" }
548
+ e_mac = e_mac[0..-2]
549
+ device = entries[0] if e_mac.match(/#{mac}/)
550
+ end
551
+
552
+ delete_if = "pfexec ipadm delete-if #{device}"
553
+ rename_link = "pfexec dladm rename-link #{device} #{vnic_name}"
554
+ if_create = "pfexec ipadm create-if #{vnic_name}"
555
+ static_addr = "pfexec ipadm create-addr -T static -a #{ip}/#{shrtsubnet} #{vnic_name}/v4vagrant"
556
+ net_cmd = "#{delete_if} && #{rename_link} && #{if_create} && #{static_addr}"
557
+ uii.info(I18n.t('vagrant_zones.dladm_applied')) if ssh_run_command(uii, net_cmd)
558
+
559
+ route_add = "pfexec route -p add default #{defrouter}"
560
+ uii.info(I18n.t('vagrant_zones.dladm_route_applied')) if ssh_run_command(uii, route_add)
561
+
562
+ ns_string = "nameserver #{servers[0]['nameserver']}\nnameserver #{servers[1]['nameserver']}"
563
+ dns_set = "pfexec echo '#{ns_string}' | pfexec tee /etc/resolv.conf"
564
+ uii.info(I18n.t('vagrant_zones.dladm_dns_applied')) if ssh_run_command(uii, dns_set.to_s)
565
+ end
566
+
567
+ ## zonecfg function for for nat Networking
568
+ def natnicconfig(uii, opts)
569
+ config = @machine.provider_config
570
+ allowed_address = allowedaddress(uii, opts)
571
+ defrouter = opts[:gateway].to_s
572
+ vnic_name = vname(uii, opts)
573
+ uii.info(I18n.t('vagrant_zones.nat_vnic_setup'))
574
+ uii.info(" #{vnic_name}")
575
+ strt = "#{@pfexec} zonecfg -z #{@machine.name} "
576
+ cie = config.cloud_init_enabled
577
+ case config.brand
578
+ when 'lx'
579
+ shrtstr1 = %(set allowed-address=#{allowed_address}; add property (name=gateway,value="#{defrouter}"); )
580
+ shrtstr2 = %(add property (name=ips,value="#{allowed_address}"); add property (name=primary,value="true"); end;)
581
+ execute(false, %(#{strt}set global-nic=auto; #{shrtstr1} #{shrtstr2}"))
582
+ when 'bhyve'
583
+ execute(false, %(#{strt}"add net; set physical=#{vnic_name}; end;")) unless cie
584
+ execute(false, %(#{strt}"add net; set physical=#{vnic_name}; set allowed-address=#{allowed_address}; end;")) if cie
585
+ end
586
+ end
587
+
588
+ ## Set NatForwarding on global interface
589
+ def zonenatforward(uii, opts)
590
+ config = @machine.provider_config
591
+ hvnic_name = "h_vnic_#{config.partition_id}_#{opts[:nic_number]}"
592
+ uii.info(I18n.t('vagrant_zones.forwarding_nat'))
593
+ uii.info(" #{hvnic_name}")
594
+ execute(false, "#{@pfexec} routeadm -u -e ipv4-forwarding")
595
+ execute(false, "#{@pfexec} ipadm set-ifprop -p forwarding=on -m ipv4 #{opts[:bridge]}")
596
+ execute(false, "#{@pfexec} ipadm set-ifprop -p forwarding=on -m ipv4 #{hvnic_name}")
597
+ end
598
+
599
+ ## Create nat entries for the zone
600
+ def zonenatentries(uii, opts)
601
+ vnic_name = vname(uii, opts)
602
+ defrouter = opts[:gateway].to_s
603
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
604
+ uii.info(I18n.t('vagrant_zones.configuring_nat'))
605
+ uii.info(" #{vnic_name}")
606
+ broadcast = IPAddr.new(defrouter).mask(shrtsubnet).to_s
607
+ ## Read NAT File, Check for these lines, if exist, warn, but continue
608
+ natentries = execute(false, "#{@pfexec} cat /etc/ipf/ipnat.conf").split("\n")
609
+ line1 = %(map #{opts[:bridge]} #{broadcast}/#{shrtsubnet} -> 0/32 portmap tcp/udp auto)
610
+ line2 = %(map #{opts[:bridge]} #{broadcast}/#{shrtsubnet} -> 0/32)
611
+ line1exists = false
612
+ line2exists = false
613
+ natentries.each do |entry|
614
+ line1exists = true if entry == line1
615
+ line2exists = true if entry == line2
616
+ end
617
+ execute(false, %(#{@pfexec} echo "#{line1}" | #{@pfexec} tee -a /etc/ipf/ipnat.conf)) unless line1exists
618
+ execute(false, %(#{@pfexec} echo "#{line2}" | #{@pfexec} tee -a /etc/ipf/ipnat.conf)) unless line2exists
619
+ execute(false, "#{@pfexec} svcadm refresh network/ipfilter")
620
+ execute(false, "#{@pfexec} svcadm disable network/ipfilter")
621
+ execute(false, "#{@pfexec} svcadm enable network/ipfilter")
622
+ end
623
+
624
+ ## Create dhcp entries for the zone
625
+ def zonedhcpentries(uii, opts)
626
+ config = @machine.provider_config
627
+ ip = ipaddress(uii, opts)
628
+ name = @machine.name
629
+ vnic_name = vname(uii, opts)
630
+ defrouter = opts[:gateway].to_s
631
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
632
+ hvnic_name = "h_vnic_#{config.partition_id}_#{opts[:nic_number]}"
633
+ ## Set Mac address from VNIC
634
+ mac = macaddress(uii, opts)
635
+ if mac == 'auto'
636
+ mac = ''
637
+ cmd = "#{@pfexec} dladm show-vnic #{vnic_name} | tail -n +2 | awk '{ print $4 }'"
638
+ vnicmac = execute(false, cmd.to_s)
639
+ vnicmac.split(':').each { |x| mac += "#{format('%02x', x.to_i(16))}:" }
640
+ mac = mac[0..-2]
641
+ end
642
+ uii.info(I18n.t('vagrant_zones.configuring_dhcp'))
643
+ broadcast = IPAddr.new(defrouter).mask(shrtsubnet).to_s
644
+ dhcpentries = execute(false, "#{@pfexec} cat /etc/dhcpd.conf").split("\n")
645
+ subnet = %(subnet #{broadcast} netmask #{opts[:netmask]} { option routers #{defrouter}; })
646
+ subnetopts = %(host #{name} { option host-name "#{name}"; hardware ethernet #{mac}; fixed-address #{ip}; })
647
+ subnetexists = false
648
+ subnetoptsexists = false
649
+ dhcpentries.each do |entry|
650
+ subnetexists = true if entry == subnet
651
+ subnetoptsexists = true if entry == subnetopts
652
+ end
653
+ execute(false, "#{@pfexec} echo '#{subnet}' | #{@pfexec} tee -a /etc/dhcpd.conf") unless subnetexists
654
+ execute(false, "#{@pfexec} echo '#{subnetopts}' | #{@pfexec} tee -a /etc/dhcpd.conf") unless subnetoptsexists
655
+ execute(false, "#{@pfexec} svccfg -s dhcp:ipv4 setprop config/listen_ifnames = #{hvnic_name}")
656
+ execute(false, "#{@pfexec} svcadm refresh dhcp:ipv4")
657
+ execute(false, "#{@pfexec} svcadm disable dhcp:ipv4")
658
+ execute(false, "#{@pfexec} svcadm enable dhcp:ipv4")
659
+ end
660
+
661
+ ## Check if Address shows up in lease list /var/db/dhcpd ping address after
662
+ def zonedhcpcheckaddr(uii, opts)
663
+ # vnic_name = vname(uii, opts)
664
+ # ip = ipaddress(uii, opts)
665
+ uii.info(I18n.t('vagrant_zones.chk_dhcp_addr'))
666
+ uii.info(" #{opts[:ip]}")
667
+ # execute(false, "#{@pfexec} ping #{ip} ")
668
+ end
669
+
670
+ ## Create vnics for Zones
671
+ def zoneniccreate(uii, opts)
672
+ mac = macaddress(uii, opts)
673
+ vnic_name = vname(uii, opts)
674
+ if opts[:vlan].nil?
675
+ uii.info(I18n.t('vagrant_zones.creating_vnic'))
676
+ uii.info(" #{vnic_name}")
677
+ execute(false, "#{@pfexec} dladm create-vnic -l #{opts[:bridge]} -m #{mac} #{vnic_name}")
678
+ else
679
+ vlan = opts[:vlan]
680
+ uii.info(I18n.t('vagrant_zones.creating_vnic'))
681
+ uii.info(" #{vnic_name}")
682
+ execute(false, "#{@pfexec} dladm create-vnic -l #{opts[:bridge]} -m #{mac} -v #{vlan} #{vnic_name}")
683
+ end
684
+ end
685
+
686
+ # This helps us create all the datasets for the zone
687
+ def create_dataset(uii)
688
+ config = @machine.provider_config
689
+ name = @machine.name
690
+ bootconfigs = config.boot
691
+ datasetpath = "#{bootconfigs['array']}/#{bootconfigs['dataset']}/#{name}"
692
+ datasetroot = "#{datasetpath}/#{bootconfigs['volume_name']}"
693
+ sparse = '-s '
694
+ sparse = '' unless bootconfigs['sparse']
695
+ refres = "-o refreservation=#{bootconfigs['refreservation']}" unless bootconfigs['refreservation'].nil?
696
+ refres = '-o refreservation=none' if bootconfigs['refreservation'] == 'none' || bootconfigs['refreservation'].nil?
697
+ uii.info(I18n.t('vagrant_zones.begin_create_datasets'))
698
+ ## Create Boot Volume
699
+ case config.brand
700
+ when 'lx'
701
+ uii.info(I18n.t('vagrant_zones.lx_zone_dataset'))
702
+ uii.info(" #{datasetroot}")
703
+ execute(false, "#{@pfexec} zfs create -o zoned=on -p #{datasetroot}")
704
+ when 'bhyve'
705
+ ## Create root dataset
706
+ uii.info(I18n.t('vagrant_zones.bhyve_zone_dataset_root'))
707
+ uii.info(" #{datasetpath}")
708
+ execute(false, "#{@pfexec} zfs create #{datasetpath}")
709
+
710
+ # Create boot volume
711
+ cinfo = "#{datasetroot}, #{bootconfigs['size']}"
712
+ uii.info(I18n.t('vagrant_zones.bhyve_zone_dataset_boot'))
713
+ uii.info(" #{cinfo}")
714
+ execute(false, "#{@pfexec} zfs create #{sparse} #{refres} -V #{bootconfigs['size']} #{datasetroot}")
715
+
716
+ ## Import template to boot volume
717
+ commandtransfer = "#{@pfexec} pv -n #{@machine.box.directory.join('box.zss')} | #{@pfexec} zfs recv -u -v -F #{datasetroot} "
718
+ uii.info(I18n.t('vagrant_zones.template_import_path'))
719
+ uii.info(" #{@machine.box.directory.join('box.zss')}")
720
+ Util::Subprocess.new commandtransfer do |_stdout, stderr, _thread|
721
+ uii.rewriting do |uiprogress|
722
+ uiprogress.clear_line
723
+ uiprogress.info(I18n.t('vagrant_zones.importing_box_image_to_disk') + "#{datasetroot} ", new_line: false)
724
+ uiprogress.report_progress(stderr, 100, false)
725
+ end
726
+ end
727
+ uii.clear_line
728
+ when 'illumos' || 'kvm'
729
+ raise Errors::NotYetImplemented
730
+ else
731
+ raise Errors::InvalidBrand
732
+ end
733
+ ## Create Additional Disks
734
+ return if config.additional_disks.nil?
735
+
736
+ config.additional_disks.each do |disk|
737
+ shrtpath = "#{disk['array']}/#{disk['dataset']}/#{name}"
738
+ dataset = "#{shrtpath}/#{disk['volume_name']}"
739
+ sparse = '-s '
740
+ sparse = '' unless disk['sparse']
741
+ refres = "-o refreservation=#{bootconfigs['refreservation']}" unless bootconfigs['refreservation'].nil?
742
+ refres = '-o refreservation=none' if bootconfigs['refreservation'] == 'none' || bootconfigs['refreservation'].nil?
743
+ ## If the root data set doesn't exist create it
744
+ addsrtexists = execute(false, "#{@pfexec} zfs list | grep #{shrtpath} | awk '{ print $1 }' | head -n 1 || true")
745
+ cinfo = shrtpath.to_s
746
+ uii.info(I18n.t('vagrant_zones.bhyve_zone_dataset_additional_volume_root')) unless addsrtexists == shrtpath.to_s
747
+ uii.info(" #{cinfo}") unless addsrtexists == shrtpath.to_s
748
+ ## Create the Additional volume
749
+ execute(false, "#{@pfexec} zfs create #{shrtpath}") unless addsrtexists == shrtpath.to_s
750
+ cinfo = "#{dataset}, #{disk['size']}"
751
+ uii.info(I18n.t('vagrant_zones.bhyve_zone_dataset_additional_volume'))
752
+ uii.info(" #{cinfo}")
753
+ execute(false, "#{@pfexec} zfs create #{sparse} #{refres} -V #{disk['size']} #{dataset}")
754
+ end
755
+ end
756
+
757
+ # This helps us delete any associated datasets of the zone
758
+ ## Future To-Do: Should probably split this up and clean it up
759
+ def delete_dataset(uii)
760
+ config = @machine.provider_config
761
+ name = @machine.name
762
+ # datadir = machine.data_dir
763
+ bootconfigs = config.boot
764
+ datasetpath = "#{bootconfigs['array']}/#{bootconfigs['dataset']}/#{name}"
765
+ datasetroot = "#{datasetpath}/#{bootconfigs['volume_name']}"
766
+ uii.info(I18n.t('vagrant_zones.delete_disks'))
767
+
768
+ ## Check if Boot Dataset exists
769
+ zp = datasetpath.delete_prefix('/').to_s
770
+ dataset_boot_exists = execute(false, "#{@pfexec} zfs list | grep #{datasetroot} | awk '{ print $1 }' || true")
771
+
772
+ ## Destroy Boot dataset
773
+ uii.info(I18n.t('vagrant_zones.destroy_dataset')) if dataset_boot_exists == datasetroot.to_s
774
+ uii.info(" #{datasetroot}") if dataset_boot_exists == datasetroot.to_s
775
+ execute(false, "#{@pfexec} zfs destroy -r #{datasetroot}") if dataset_boot_exists == datasetroot.to_s
776
+ ## Insert Error Checking Here in case disk is busy
777
+ uii.info(I18n.t('vagrant_zones.boot_dataset_nil')) unless dataset_boot_exists == datasetroot.to_s
778
+
779
+ ## Destroy Additional Disks
780
+ unless config.additional_disks.nil?
781
+ disks = config.additional_disks
782
+ disks.each do |disk|
783
+ diskpath = "#{disk['array']}/#{disk['dataset']}/#{name}"
784
+ addataset = "#{diskpath}/#{disk['volume_name']}"
785
+ cinfo = addataset.to_s
786
+ dataset_exists = execute(false, "#{@pfexec} zfs list | grep #{addataset} | awk '{ print $1 }' || true")
787
+ uii.info(I18n.t('vagrant_zones.bhyve_zone_dataset_additional_volume_destroy')) if dataset_exists == addataset
788
+ uii.info(" #{cinfo}") if dataset_exists == addataset
789
+ execute(false, "#{@pfexec} zfs destroy -r #{addataset}") if dataset_exists == addataset
790
+ uii.info(I18n.t('vagrant_zones.additional_dataset_nil')) unless dataset_exists == addataset
791
+ cinfo = diskpath.to_s
792
+ addsrtexists = execute(false, "#{@pfexec} zfs list | grep #{diskpath} | awk '{ print $1 }' | head -n 1 || true")
793
+ uii.info(I18n.t('vagrant_zones.addtl_volume_destroy_root')) if addsrtexists == diskpath && addsrtexists != zp.to_s
794
+ uii.info(" #{cinfo}") if addsrtexists == diskpath && addsrtexists != zp.to_s
795
+ execute(false, "#{@pfexec} zfs destroy -r #{diskpath}") if addsrtexists == diskpath && addsrtexists != zp.to_s
796
+ end
797
+ end
798
+
799
+ ## Check if root dataset exists
800
+ dataset_root_exists = execute(false, "#{@pfexec} zfs list | grep #{zp} | awk '{ print $1 }' | grep -v path || true")
801
+ uii.info(I18n.t('vagrant_zones.destroy_root_dataset')) if dataset_root_exists == zp.to_s
802
+ uii.info(" #{zp}") if dataset_root_exists == zp.to_s
803
+ execute(false, "#{@pfexec} zfs destroy -r #{zp}") if dataset_root_exists == zp.to_s
804
+ uii.info(I18n.t('vagrant_zones.root_dataset_nil')) unless dataset_root_exists == zp.to_s
805
+ end
806
+
807
+ ## zonecfg function for bhyve
808
+ def zonecfgbhyve(uii, name, config, zcfg)
809
+ return unless config.brand == 'bhyve'
810
+
811
+ bootconfigs = config.boot
812
+ datasetpath = "#{bootconfigs['array']}/#{bootconfigs['dataset']}/#{name}"
813
+ datasetroot = "#{datasetpath}/#{bootconfigs['volume_name']}"
814
+ execute(false, %(#{zcfg}"create ; set zonepath=/#{datasetpath}/path"))
815
+ execute(false, %(#{zcfg}"set brand=#{config.brand}"))
816
+ execute(false, %(#{zcfg}"set autoboot=#{config.autoboot}"))
817
+ execute(false, %(#{zcfg}"set ip-type=exclusive"))
818
+ execute(false, %(#{zcfg}"add attr; set name=acpi; set value=#{config.acpi}; set type=string; end;"))
819
+ execute(false, %(#{zcfg}"add attr; set name=ram; set value=#{config.memory}; set type=string; end;"))
820
+ execute(false, %(#{zcfg}"add attr; set name=bootrom; set value=#{firmware(uii)}; set type=string; end;"))
821
+ execute(false, %(#{zcfg}"add attr; set name=hostbridge; set value=#{config.hostbridge}; set type=string; end;"))
822
+ execute(false, %(#{zcfg}"add attr; set name=diskif; set value=#{config.diskif}; set type=string; end;"))
823
+ execute(false, %(#{zcfg}"add attr; set name=netif; set value=#{config.netif}; set type=string; end;"))
824
+ execute(false, %(#{zcfg}"add attr; set name=bootdisk; set value=#{datasetroot.delete_prefix('/')}; set type=string; end;"))
825
+ execute(false, %(#{zcfg}"add attr; set name=type; set value=#{config.os_type}; set type=string; end;"))
826
+ execute(false, %(#{zcfg}"add attr; set name=xhci; set value=#{config.xhci_enabled}; set type=string; end;"))
827
+ execute(false, %(#{zcfg}"add device; set match=/dev/zvol/rdsk/#{datasetroot}; end;"))
828
+ uii.info(I18n.t('vagrant_zones.bhyve_zone_config_gen'))
829
+ end
830
+
831
+ ## zonecfg function for lx
832
+ def zonecfglx(uii, name, config, zcfg)
833
+ return unless config.brand == 'lx'
834
+
835
+ datasetpath = "#{config.boot['array']}/#{config.boot['dataset']}/#{name}"
836
+ datasetroot = "#{datasetpath}/#{config.boot['volume_name']}"
837
+ uii.info(I18n.t('vagrant_zones.lx_zone_config_gen'))
838
+ @machine.config.vm.networks.each do |adaptertype, opts|
839
+ next unless adaptertype.to_s == 'public_network'
840
+
841
+ @ip = opts[:ip].to_s
842
+ cinfo = "#{opts[:ip]}/#{opts[:netmask]}"
843
+ @network = NetAddr.parse_net(cinfo)
844
+ @defrouter = opts[:gateway]
845
+ end
846
+ execute(false, %(#{zcfg}"create ; set zonepath=/#{datasetpath}/path"))
847
+ execute(false, %(#{zcfg}"set brand=#{config.brand}"))
848
+ execute(false, %(#{zcfg}"set autoboot=#{config.autoboot}"))
849
+ execute(false, %(#{zcfg}"add attr; set name=kernel-version; set value=#{config.kernel}; set type=string; end;"))
850
+ cmss = ' add capped-memory; set physical='
851
+ execute(false, %(#{zcfg + cmss}"#{config.memory}; set swap=#{config.kernel}; set locked=#{config.memory}; end;"))
852
+ execute(false, %(#{zcfg}"add dataset; set name=#{datasetroot}; end;"))
853
+ execute(false, %(#{zcfg}"set max-lwps=2000"))
854
+ end
855
+
856
+ ## zonecfg function for KVM
857
+ def zonecfgkvm(uii, name, config, _zcfg)
858
+ return unless config.brand == 'kvm'
859
+
860
+ bootconfigs = config.boot
861
+ config = @machine.provider_config
862
+ datasetpath = "#{bootconfigs['array']}/#{bootconfigs['dataset']}/#{name}"
863
+ datasetroot = "#{datasetpath}/#{bootconfigs['volume_name']}"
864
+ uii.info(datasetroot) if config.debug
865
+ ###### RESERVED ######
866
+ end
867
+
868
+ ## zonecfg function for Shared Disk Configurations
869
+ def zonecfgshareddisks(uii, _name, config, zcfg)
870
+ return unless config.shared_disk_enabled
871
+
872
+ uii.info(I18n.t('vagrant_zones.setting_alt_shared_disk_configurations') + path.path)
873
+ execute(false, %(#{zcfg}"add fs; set dir=/vagrant; set special=#{config.shared_dir}; set type=lofs; end;"))
874
+ end
875
+
876
+ ## zonecfg function for CPU Configurations
877
+ def zonecfgcpu(uii, _name, config, zcfg)
878
+ uii.info(I18n.t('vagrant_zones.zonecfgcpu')) if config.debug
879
+ if config.cpu_configuration == 'simple' && (config.brand == 'bhyve' || config.brand == 'kvm')
880
+ execute(false, %(#{zcfg}"add attr; set name=vcpus; set value=#{config.cpus}; set type=string; end;"))
881
+ elsif config.cpu_configuration == 'complex' && (config.brand == 'bhyve' || config.brand == 'kvm')
882
+ hash = config.complex_cpu_conf[0]
883
+ cstring = %(sockets=#{hash['sockets']},cores=#{hash['cores']},threads=#{hash['threads']})
884
+ execute(false, %(#{zcfg}'add attr; set name=vcpus; set value="#{cstring}"; set type=string; end;'))
885
+ end
886
+ end
887
+
888
+ ## zonecfg function for CDROM Configurations
889
+ def zonecfgcdrom(uii, _name, config, zcfg)
890
+ return if config.cdroms.nil?
891
+
892
+ cdroms = config.cdroms
893
+ cdrun = 0
894
+ cdroms.each do |cdrom|
895
+ cdname = 'cdrom'
896
+ uii.info(I18n.t('vagrant_zones.setting_cd_rom_configurations'))
897
+ uii.info(" #{cdrom['path']}")
898
+ cdname += cdrun.to_s if cdrun.positive?
899
+ cdrun += 1
900
+ shrtstrng = 'set type=lofs; add options nodevices; add options ro; end;'
901
+ execute(false, %(#{zcfg}"add attr; set name=#{cdname}; set value=#{cdrom['path']}; set type=string; end;"))
902
+ execute(false, %(#{zcfg}"add fs; set dir=#{cdrom['path']}; set special=#{cdrom['path']}; #{shrtstrng}"))
903
+ end
904
+ end
905
+
906
+ ## zonecfg function for PCI Configurations
907
+ def zonecfgpci(uii, _name, config, _zcfg)
908
+ return if config.debug
909
+
910
+ uii.info(I18n.t('vagrant_zones.pci')) if config.debug
911
+ ##### RESERVED
912
+ end
913
+
914
+ ## zonecfg function for AdditionalDisks
915
+ def zonecfgadditionaldisks(uii, name, config, zcfg)
916
+ return if config.additional_disks.nil?
917
+
918
+ diskrun = 0
919
+ config.additional_disks.each do |disk|
920
+ diskname = 'disk'
921
+ dset = "#{disk['array']}/#{disk['dataset']}/#{name}/#{disk['volume_name']}"
922
+ cinfo = "#{dset}, #{disk['size']}"
923
+ uii.info(I18n.t('vagrant_zones.setting_additional_disks_configurations'))
924
+ uii.info(" #{cinfo}")
925
+ diskname += diskrun.to_s if diskrun.positive?
926
+ diskrun += 1
927
+ execute(false, %(#{zcfg}"add device; set match=/dev/zvol/rdsk/#{dset}; end;"))
928
+ execute(false, %(#{zcfg}"add attr; set name=#{diskname}; set value=#{dset}; set type=string; end;"))
929
+ end
930
+ end
931
+
932
+ ## zonecfg function for Console Access
933
+ def zonecfgconsole(uii, _name, config, zcfg)
934
+ return if config.console.nil? || config.console == 'disabled'
935
+
936
+ port = if %w[console].include?(config.console) && config.consoleport.nil?
937
+ 'socket,/tmp/vm.com1'
938
+ elsif %w[webvnc].include?(config.console) || %w[vnc].include?(config.console)
939
+ config.console = 'vnc'
940
+ 'on'
941
+ else
942
+ config.consoleport
943
+ end
944
+ port += ',wait' if config.console_onboot
945
+ cp = config.consoleport
946
+ ch = config.consolehost
947
+ cb = config.console_onboot
948
+ ct = config.console
949
+ cinfo = "Console type: #{ct}, State: #{port}, Port: #{cp}, Host: #{ch}, Wait: #{cb}"
950
+ uii.info(I18n.t('vagrant_zones.setting_console_access'))
951
+ uii.info(" #{cinfo}")
952
+ execute(false, %(#{zcfg}"add attr; set name=#{ct}; set value=#{port}; set type=string; end;"))
953
+ end
954
+
955
+ ## zonecfg function for Cloud-init
956
+ def zonecfgcloudinit(uii, _name, config, zcfg)
957
+ return unless config.cloud_init_enabled
958
+
959
+ cloudconfig = 'on' if config.cloud_init_conf.nil?
960
+ cloudconfig = config.cloud_init_conf.to_s unless config.cloud_init_conf.nil?
961
+
962
+ uii.info(I18n.t('vagrant_zones.setting_cloud_init_access'))
963
+ execute(false, %(#{zcfg}"add attr; set name=cloud-init; set value=#{cloudconfig}; set type=string; end;"))
964
+
965
+ ccid = config.cloud_init_dnsdomain
966
+ uii.info(I18n.t('vagrant_zones.setting_cloud_dnsdomain')) unless ccid.nil?
967
+ uii.info(" #{ccid}") unless ccid.nil?
968
+ execute(false, %(#{zcfg}"add attr; set name=dns-domain; set value=#{ccid}; set type=string; end;")) unless ccid.nil?
969
+
970
+ ccip = config.cloud_init_password
971
+ uii.info(I18n.t('vagrant_zones.setting_cloud_password')) unless ccip.nil?
972
+ uii.info(" #{ccip}") unless ccip.nil?
973
+ execute(false, %(#{zcfg}"add attr; set name=password; set value=#{ccip}; set type=string; end;")) unless ccip.nil?
974
+
975
+ cclir = config.cloud_init_resolvers
976
+ uii.info(I18n.t('vagrant_zones.setting_cloud_resolvers')) unless cclir.nil?
977
+ uii.info(" #{cclir}") unless cclir.nil?
978
+ execute(false, %(#{zcfg}"add attr; set name=resolvers; set value=#{cclir}; set type=string; end;")) unless cclir.nil?
979
+
980
+ ccisk = config.cloud_init_sshkey
981
+ uii.info(I18n.t('vagrant_zones.setting_cloud_ssh_key')) unless ccisk.nil?
982
+ uii.info(" #{ccisk}") unless ccisk.nil?
983
+ execute(false, %(#{zcfg}"add attr; set name=sshkey; set value=#{ccisk}; set type=string; end;")) unless ccisk.nil?
984
+ end
985
+
986
+ ## zonecfg function for for Networking
987
+ def zonecfgnicconfig(uii, opts)
988
+ allowed_address = allowedaddress(uii, opts)
989
+ defrouter = opts[:gateway].to_s
990
+ vnic_name = vname(uii, opts)
991
+ config = @machine.provider_config
992
+ uii.info(I18n.t('vagrant_zones.vnic_setup'))
993
+ uii.info(" #{vnic_name}")
994
+ strt = "#{@pfexec} zonecfg -z #{@machine.name} "
995
+ cie = config.cloud_init_enabled
996
+ aa = config.allowed_address
997
+ case config.brand
998
+ when 'lx'
999
+ shrtstr1 = %(set allowed-address=#{allowed_address}; add property (name=gateway,value="#{defrouter}"); )
1000
+ shrtstr2 = %(add property (name=ips,value="#{allowed_address}"); add property (name=primary,value="true"); end;)
1001
+ execute(false, %(#{strt}set global-nic=auto; #{shrtstr1} #{shrtstr2}"))
1002
+ when 'bhyve'
1003
+ execute(false, %(#{strt}"add net; set physical=#{vnic_name}; end;")) unless cie
1004
+ execute(false, %(#{strt}"add net; set physical=#{vnic_name}; set allowed-address=#{allowed_address}; end;")) if cie && aa
1005
+ execute(false, %(#{strt}"add net; set physical=#{vnic_name}; end;")) if cie && !aa
1006
+ end
1007
+ end
1008
+
1009
+ # This helps us set the zone configurations for the zone
1010
+ def zonecfg(uii)
1011
+ name = @machine.name
1012
+ config = @machine.provider_config
1013
+ zcfg = "#{@pfexec} zonecfg -z #{name} "
1014
+ ## Create LX zonecfg
1015
+ zonecfglx(uii, name, config, zcfg)
1016
+ ## Create bhyve zonecfg
1017
+ zonecfgbhyve(uii, name, config, zcfg)
1018
+ ## Create kvm zonecfg
1019
+ zonecfgkvm(uii, name, config, zcfg)
1020
+ ## Shared Disk Configurations
1021
+ zonecfgshareddisks(uii, name, config, zcfg)
1022
+ ## CPU Configurations
1023
+ zonecfgcpu(uii, name, config, zcfg)
1024
+ ## CDROM Configurations
1025
+ zonecfgcdrom(uii, name, config, zcfg)
1026
+ ### Passthrough PCI Devices
1027
+ zonecfgpci(uii, name, config, zcfg)
1028
+ ## Additional Disk Configurations
1029
+ zonecfgadditionaldisks(uii, name, config, zcfg)
1030
+ ## Console access configuration
1031
+ zonecfgconsole(uii, name, config, zcfg)
1032
+ ## Cloud-init settings
1033
+ zonecfgcloudinit(uii, name, config, zcfg)
1034
+ ## Nic Configurations
1035
+ network(uii, 'config')
1036
+ end
1037
+
1038
+ ## Setup vnics for Zones using Zlogin
1039
+ def zonenicstpzloginsetup(uii, opts, config)
1040
+ vnic_name = vname(uii, opts)
1041
+ mac = macaddress(uii, opts)
1042
+ ## if mac is auto, then grab NIC from VNIC
1043
+ if mac == 'auto'
1044
+ mac = ''
1045
+ cmd = "#{@pfexec} dladm show-vnic #{vnic_name} | tail -n +2 | awk '{ print $4 }'"
1046
+ vnicmac = execute(false, cmd.to_s)
1047
+ vnicmac.split(':').each { |x| mac += "#{format('%02x', x.to_i(16))}:" }
1048
+ mac = mac[0..-2]
1049
+ end
1050
+
1051
+ zoneniczloginsetup_windows(uii, opts, mac) if config.os_type.to_s.match(/windows/)
1052
+ zoneniczloginsetup_netplan(uii, opts, mac) unless config.os_type.to_s.match(/windows/)
1053
+ end
1054
+
1055
+ ## This setups the Netplan based OS Networking via Zlogin
1056
+ def zoneniczloginsetup_netplan(uii, opts, mac)
1057
+ zlogin(uii, 'rm -rf /etc/netplan/*.yaml')
1058
+ ip = ipaddress(uii, opts)
1059
+ vnic_name = vname(uii, opts)
1060
+ servers = dnsservers(uii)
1061
+ shrtsubnet = IPAddr.new(opts[:netmask].to_s).to_i.to_s(2).count('1').to_s
1062
+ defrouter = opts[:gateway].to_s
1063
+ uii.info(I18n.t('vagrant_zones.configure_interface_using_vnic'))
1064
+ uii.info(" #{vnic_name}")
1065
+ netplan1 = %(network:\n version: 2\n ethernets:\n #{vnic_name}:\n match:\n macaddress: #{mac}\n)
1066
+ netplan2 = %( dhcp-identifier: mac\n dhcp4: #{opts[:dhcp]}\n dhcp6: #{opts[:dhcp6]}\n)
1067
+ netplan3 = %( set-name: #{vnic_name}\n addresses: [#{ip}/#{shrtsubnet}]\n gateway4: #{defrouter}\n)
1068
+ netplan4 = %( nameservers:\n addresses: [#{servers[0]['nameserver']} , #{servers[1]['nameserver']}] )
1069
+ netplan = netplan1 + netplan2 + netplan3 + netplan4
1070
+ cmd = "echo '#{netplan}' > /etc/netplan/#{vnic_name}.yaml"
1071
+ infomessage = I18n.t('vagrant_zones.netplan_applied_static') + "/etc/netplan/#{vnic_name}.yaml"
1072
+ uii.info(infomessage) if zlogin(uii, cmd)
1073
+ ## Apply the Configuration
1074
+ uii.info(I18n.t('vagrant_zones.netplan_applied')) if zlogin(uii, 'netplan apply')
1075
+ end
1076
+
1077
+ # This ensures the zone is safe to boot
1078
+ def check_zone_support(uii)
1079
+ uii.info(I18n.t('vagrant_zones.preflight_checks'))
1080
+ config = @machine.provider_config
1081
+ ## Detect if Virtualbox is Running
1082
+ ## LX, KVM, and Bhyve cannot run conncurently with Virtualbox:
1083
+ ### https://illumos.topicbox-beta.com/groups/omnios-discuss/Tce3bbd08cace5349-M5fc864e9c1a7585b94a7c080
1084
+ uii.info(I18n.t('vagrant_zones.vbox_run_check'))
1085
+ result = execute(true, "#{@pfexec} VBoxManage list runningvms")
1086
+ raise Errors::VirtualBoxRunningConflictDetected if result.zero?
1087
+
1088
+ ## https://man.omnios.org/man5/brands
1089
+ case config.brand
1090
+ when 'lx'
1091
+ uii.info(I18n.t('vagrant_zones.lx_check'))
1092
+ when 'ipkg'
1093
+ uii.info(I18n.t('vagrant_zones.ipkg_check'))
1094
+ when 'lipkg'
1095
+ uii.info(I18n.t('vagrant_zones.lipkg_check'))
1096
+ when 'pkgsrc'
1097
+ uii.info(I18n.t('vagrant_zones.pkgsrc_check'))
1098
+ when 'sparse'
1099
+ uii.info(I18n.t('vagrant_zones.sparse_check'))
1100
+ when 'kvm'
1101
+ ## https://man.omnios.org/man5/kvm
1102
+ uii.info(I18n.t('vagrant_zones.kvm_check'))
1103
+ when 'illumos'
1104
+ uii.info(I18n.t('vagrant_zones.illumos_check'))
1105
+ when 'bhyve'
1106
+ ## https://man.omnios.org/man5/bhyve
1107
+ ## Check for bhhwcompat
1108
+ result = execute(true, "#{@pfexec} test -f /usr/sbin/bhhwcompat ; echo $?")
1109
+ if result == 1
1110
+ bhhwcompaturl = 'https://downloads.omnios.org/misc/bhyve/bhhwcompat'
1111
+ execute(true, "#{@pfexec} curl -o /usr/sbin/bhhwcompat #{bhhwcompaturl} && #{@pfexec} chmod +x /usr/sbin/bhhwcompat")
1112
+ result = execute(true, "#{@pfexec} test -f /usr/sbin/bhhwcompat ; echo $?")
1113
+ raise Errors::MissingCompatCheckTool if result.zero?
1114
+ end
1115
+
1116
+ # Check whether OmniOS version is lower than r30
1117
+ cutoff_release = '1510380'
1118
+ cutoff_release = cutoff_release[0..-2].to_i
1119
+ uii.info(I18n.t('vagrant_zones.bhyve_check'))
1120
+ uii.info(" #{cutoff_release}")
1121
+ release = File.open('/etc/release', &:readline)
1122
+ release = release.scan(/\w+/).values_at(-1)
1123
+ release = release[0][1..-2].to_i
1124
+ raise Errors::SystemVersionIsTooLow if release < cutoff_release
1125
+
1126
+ # Check Bhyve compatability
1127
+ uii.info(I18n.t('vagrant_zones.bhyve_compat_check'))
1128
+ result = execute(false, "#{@pfexec} bhhwcompat -s")
1129
+ raise Errors::MissingBhyve if result.length == 1
1130
+ end
1131
+ end
1132
+
1133
+ # This helps us set up the networking of the VM
1134
+ def setup(uii)
1135
+ config = @machine.provider_config
1136
+ uii.info(I18n.t('vagrant_zones.network_setup')) if config.brand && !config.cloud_init_enabled
1137
+ network(uii, 'setup') if config.brand == 'bhyve' && !config.cloud_init_enabled
1138
+ end
1139
+
1140
+ ## this allows us a terminal to pass commands and manipulate the VM OS via serial/tty
1141
+ def zloginboot(uii)
1142
+ name = @machine.name
1143
+ config = @machine.provider_config
1144
+ lcheck = config.lcheck
1145
+ lcheck = ':~' if config.lcheck.nil?
1146
+ alcheck = config.alcheck
1147
+ alcheck = 'login:' if config.alcheck.nil?
1148
+ bstring = ' OK ' if config.booted_string.nil?
1149
+ bstring = config.booted_string unless config.booted_string.nil?
1150
+ zunlockboot = 'Importing ZFS root pool'
1151
+ zunlockbootkey = config.zunlockbootkey unless config.zunlockbootkey.nil?
1152
+ pcheck = 'Password:'
1153
+ uii.info(I18n.t('vagrant_zones.automated-zlogin'))
1154
+ PTY.spawn("pfexec zlogin -C #{name}") do |zlogin_read, zlogin_write, pid|
1155
+ Timeout.timeout(config.setup_wait) do
1156
+ rsp = []
1157
+
1158
+ loop do
1159
+ zlogin_read.expect(/\r\n/) { |line| rsp.push line }
1160
+ uii.info(rsp[-1]) if config.debug_boot
1161
+ sleep(2) if rsp[-1].to_s.match(/#{zunlockboot}/)
1162
+ zlogin_write.printf("#{zunlockbootkey}\n") if rsp[-1].to_s.match(/#{zunlockboot}/)
1163
+ zlogin_write.printf("\n") if rsp[-1].to_s.match(/#{zunlockboot}/)
1164
+ uii.info(I18n.t('vagrant_zones.automated-zbootunlock')) if rsp[-1].to_s.match(/#{zunlockboot}/)
1165
+ sleep(15) if rsp[-1].to_s.match(/#{bstring}/)
1166
+ zlogin_write.printf("\n") if rsp[-1].to_s.match(/#{bstring}/)
1167
+ break if rsp[-1].to_s.match(/#{bstring}/)
1168
+ end
1169
+
1170
+ if zlogin_read.expect(/#{alcheck}/)
1171
+ uii.info(I18n.t('vagrant_zones.automated-zlogin-user'))
1172
+ zlogin_write.printf("#{user(@machine)}\n")
1173
+ sleep(5)
1174
+ end
1175
+
1176
+ if zlogin_read.expect(/#{pcheck}/)
1177
+ uii.info(I18n.t('vagrant_zones.automated-zlogin-pass'))
1178
+ zlogin_write.printf("#{vagrantuserpass(@machine)}\n")
1179
+ sleep(10)
1180
+ end
1181
+
1182
+ zlogin_write.printf("\n")
1183
+ if zlogin_read.expect(/#{lcheck}/)
1184
+ uii.info(I18n.t('vagrant_zones.automated-zlogin-root'))
1185
+ zlogin_write.printf("sudo su\n")
1186
+ sleep(10)
1187
+ Process.kill('HUP', pid)
1188
+ end
1189
+ end
1190
+ end
1191
+ end
1192
+
1193
+ def natloginboot(uii, metrics, interrupted)
1194
+ metrics ||= {}
1195
+ metrics['instance_dhcp_ssh_time'] = Util::Timer.time do
1196
+ retryable(on: Errors::TimeoutError, tries: 60) do
1197
+ # If we're interrupted don't worry about waiting
1198
+ next if interrupted
1199
+
1200
+ loop do
1201
+ break if interrupted
1202
+ break if @machine.communicate.ready?
1203
+ end
1204
+ end
1205
+ end
1206
+ uii.info(I18n.t('vagrant_zones.dhcp_boot_ready') + " in #{metrics['instance_dhcp_ssh_time']} Seconds")
1207
+ end
1208
+
1209
+ # This helps up wait for the boot of the vm by using zlogin
1210
+ def waitforboot(uii, metrics, interrupted)
1211
+ config = @machine.provider_config
1212
+ uii.info(I18n.t('vagrant_zones.wait_for_boot'))
1213
+ case config.brand
1214
+ when 'bhyve'
1215
+ return if config.cloud_init_enabled
1216
+
1217
+ zloginboot(uii) if config.setup_method == 'zlogin' && !config.os_type.to_s.match(/windows/)
1218
+ zlogin_win_boot(uii) if config.setup_method == 'zlogin' && config.os_type.to_s.match(/windows/)
1219
+ natloginboot(uii, metrics, interrupted) if config.setup_method == 'dhcp'
1220
+ when 'lx'
1221
+ unless user_exists?(uii, config.vagrant_user)
1222
+ # zlogincommand(uii, %('echo nameserver 1.1.1.1 >> /etc/resolv.conf'))
1223
+ # zlogincommand(uii, %('echo nameserver 1.0.0.1 >> /etc/resolv.conf'))
1224
+ zlogincommand(uii, 'useradd -m -s /bin/bash -U vagrant')
1225
+ zlogincommand(uii, 'echo "vagrant ALL=(ALL:ALL) NOPASSWD:ALL" \\> /etc/sudoers.d/vagrant')
1226
+ zlogincommand(uii, 'mkdir -p /home/vagrant/.ssh')
1227
+ key_url = 'https://raw.githubusercontent.com/hashicorp/vagrant/master/keys/vagrant.pub'
1228
+ zlogincommand(uii, "curl #{key_url} -O /home/vagrant/.ssh/authorized_keys")
1229
+
1230
+ id_rsa = 'https://raw.githubusercontent.com/hashicorp/vagrant/master/keys/vagrant'
1231
+ command = "#{@pfexec} curl #{id_rsa} -O id_rsa"
1232
+ Util::Subprocess.new command do |_stdout, stderr, _thread|
1233
+ uii.rewriting do |uisp|
1234
+ uisp.clear_line
1235
+ uisp.info(I18n.t('vagrant_zones.importing_vagrant_key'), new_line: false)
1236
+ uisp.report_progress(stderr, 100, false)
1237
+ end
1238
+ end
1239
+ uii.clear_line
1240
+ zlogincommand(uii, 'chown -R vagrant:vagrant /home/vagrant/.ssh')
1241
+ zlogincommand(uii, 'chmod 600 /home/vagrant/.ssh/authorized_keys')
1242
+ end
1243
+ end
1244
+ end
1245
+
1246
+ ## This setups the Windows Networking via Zlogin
1247
+ def zoneniczloginsetup_windows(uii, opts, _mac)
1248
+ ip = ipaddress(uii, opts)
1249
+ vnic_name = vname(uii, opts)
1250
+ servers = dnsservers(uii)
1251
+ defrouter = opts[:gateway].to_s
1252
+ uii.info(I18n.t('vagrant_zones.configure_win_interface_using_vnic'))
1253
+ sleep(60)
1254
+
1255
+ ## Insert code to get the list of interfaces by mac address in order
1256
+ ## to set the proper VNIC name if using multiple adapters
1257
+ rename_adapter = %(netsh interface set interface name = "Ethernet" newname = "#{vnic_name}")
1258
+ cmd = %(netsh interface ipv4 set address name="#{vnic_name}" static #{ip} #{opts[:netmask]} #{defrouter})
1259
+ dns1 = %(netsh int ipv4 set dns name="#{vnic_name}" static #{servers[0]['nameserver']} primary validate=no)
1260
+ dns2 = %(netsh int ipv4 add dns name="#{vnic_name}" #{servers[1]['nameserver']} index=2 validate=no)
1261
+
1262
+ uii.info(I18n.t('vagrant_zones.win_applied_rename_adapter')) if zlogin(uii, rename_adapter)
1263
+ uii.info(I18n.t('vagrant_zones.win_applied_static')) if zlogin(uii, cmd)
1264
+ uii.info(I18n.t('vagrant_zones.win_applied_dns1')) if zlogin(uii, dns1)
1265
+ uii.info(I18n.t('vagrant_zones.win_applied_dns2')) if zlogin(uii, dns2)
1266
+ end
1267
+
1268
+ def zlogin_win_boot(uii)
1269
+ ## use Windows SAC to setup networking
1270
+ name = @machine.name
1271
+ config = @machine.provider_config
1272
+ event_cmd_available = 'EVENT: The CMD command is now available'
1273
+ event_channel_created = 'EVENT: A new channel has been created'
1274
+ channel_access_prompt = 'Use any other key to view this channel'
1275
+ cmd = 'system32>'
1276
+ uii.info(I18n.t('vagrant_zones.automated-windows-zlogin'))
1277
+ PTY.spawn("pfexec zlogin -C #{name}") do |zlogin_read, zlogin_write, pid|
1278
+ Timeout.timeout(config.setup_wait) do
1279
+ uii.info(I18n.t('vagrant_zones.windows_skip_first_boot')) if zlogin_read.expect(/#{event_cmd_available}/)
1280
+ sleep(3)
1281
+ if zlogin_read.expect(/#{event_cmd_available}/)
1282
+ uii.info(I18n.t('vagrant_zones.windows_start_cmd'))
1283
+ zlogin_write.printf("cmd\n")
1284
+ end
1285
+ if zlogin_read.expect(/#{event_channel_created}/)
1286
+ uii.info(I18n.t('vagrant_zones.windows_access_session'))
1287
+ zlogin_write.printf("\e\t")
1288
+ end
1289
+ if zlogin_read.expect(/#{channel_access_prompt}/)
1290
+ uii.info(I18n.t('vagrant_zones.windows_access_session_presskey'))
1291
+ zlogin_write.printf('o')
1292
+ end
1293
+ if zlogin_read.expect(/Username:/)
1294
+ uii.info(I18n.t('vagrant_zones.windows_enter_username'))
1295
+ zlogin_write.printf("Administrator\n")
1296
+ end
1297
+ if zlogin_read.expect(/Domain/)
1298
+ uii.info(I18n.t('vagrant_zones.windows_enter_domain'))
1299
+ zlogin_write.printf("\n")
1300
+ end
1301
+ if zlogin_read.expect(/Password/)
1302
+ uii.info(I18n.t('vagrant_zones.windows_enter_password'))
1303
+ zlogin_write.printf("P@ssWord22\n")
1304
+ end
1305
+ if zlogin_read.expect(/#{cmd}/)
1306
+ uii.info(I18n.t('vagrant_zones.windows_cmd_accessible'))
1307
+ sleep(5)
1308
+ Process.kill('HUP', pid)
1309
+ end
1310
+ end
1311
+ end
1312
+ end
1313
+
1314
+ # This gives us a console to the VM to issue commands
1315
+ def zlogin(uii, cmd)
1316
+ name = @machine.name
1317
+ config = @machine.provider_config
1318
+ rsp = []
1319
+ PTY.spawn("pfexec zlogin -C #{name}") do |zread, zwrite, pid|
1320
+ Timeout.timeout(config.setup_wait) do
1321
+ error_check = "echo \"Error Code: $?\"\n"
1322
+ error_check = "echo Error Code: %%ERRORLEVEL%% \r\n\r\n" if config.os_type.to_s.match(/windows/)
1323
+ runonce = true
1324
+ loop do
1325
+ zread.expect(/\n/) { |line| rsp.push line }
1326
+ puts(rsp[-1].to_s) if config.debug
1327
+ zwrite.printf("#{cmd}\r\n") if runonce
1328
+ zwrite.printf(error_check.to_s) if runonce
1329
+ runonce = false
1330
+ break if rsp[-1].to_s.match(/Error Code: 0/)
1331
+
1332
+ em = "#{cmd} \nFailed with ==> #{rsp[-1]}"
1333
+ uii.info(I18n.t('vagrant_zones.console_failed') + em) if rsp[-1].to_s.match(/Error Code: \b(?!0\b)\d{1,4}\b/)
1334
+ raise Errors::ConsoleFailed if rsp[-1].to_s.match(/Error Code: \b(?!0\b)\d{1,4}\b/)
1335
+ end
1336
+ end
1337
+ Process.kill('HUP', pid)
1338
+ end
1339
+ end
1340
+
1341
+ # This checks if the user exists on the VM, usually for LX zones
1342
+ def user_exists?(uii, user = 'vagrant')
1343
+ name = @machine.name
1344
+ config = @machine.provider_config
1345
+ ret = execute(true, "#{@pfexec} zlogin #{name} id -u #{user}")
1346
+ uii.info(I18n.t('vagrant_zones.userexists')) if config.debug
1347
+ return true if ret.zero?
1348
+
1349
+ false
1350
+ end
1351
+
1352
+ # This gives the user a terminal console
1353
+ def zlogincommand(uii, cmd)
1354
+ name = @machine.name
1355
+ config = @machine.provider_config
1356
+ uii.info(I18n.t('vagrant_zones.zonelogincmd')) if config.debug
1357
+ execute(false, "#{@pfexec} zlogin #{name} #{cmd}")
1358
+ end
1359
+
1360
+ # This filters the vagrantuser
1361
+ def user(machine)
1362
+ config = machine.provider_config
1363
+ user = config.vagrant_user unless config.vagrant_user.nil?
1364
+ user = 'vagrant' if config.vagrant_user.nil?
1365
+ user
1366
+ end
1367
+
1368
+ # This filters the userprivatekeypath
1369
+ def userprivatekeypath(machine)
1370
+ config = machine.provider_config
1371
+ userkey = config.vagrant_user_private_key_path.to_s
1372
+ if config.vagrant_user_private_key_path.to_s.nil?
1373
+ id_rsa = 'https://raw.githubusercontent.com/hashicorp/vagrant/master/keys/vagrant'
1374
+ file = './id_rsa'
1375
+ command = "#{@pfexec} curl #{id_rsa} -O #{file}"
1376
+ Util::Subprocess.new command do |_stdout, stderr, _thread|
1377
+ uii.rewriting do |uipkp|
1378
+ uipkp.clear_line
1379
+ uipkp.info(I18n.t('vagrant_zones.importing_vagrant_key'), new_line: false)
1380
+ uipkp.report_progress(stderr, 100, false)
1381
+ end
1382
+ end
1383
+ uii.clear_line
1384
+ userkey = './id_rsa'
1385
+ end
1386
+ userkey
1387
+ end
1388
+
1389
+ # This filters the sshport
1390
+ def sshport(machine)
1391
+ config = machine.provider_config
1392
+ sshport = '22'
1393
+ sshport = config.sshport.to_s unless config.sshport.to_s.nil? || config.sshport.to_i.zero?
1394
+ # uii.info(I18n.t('vagrant_zones.sshport')) if config.debug
1395
+ sshport
1396
+ end
1397
+
1398
+ # This filters the firmware
1399
+ def firmware(uii)
1400
+ config = @machine.provider_config
1401
+ uii.info(I18n.t('vagrant_zones.firmware')) if config.debug
1402
+ ft = case config.firmware_type
1403
+ when /compatability/
1404
+ 'BHYVE_RELEASE_CSM'
1405
+ when /UEFI/
1406
+ 'BHYVE_RELEASE'
1407
+ when /BIOS/
1408
+ 'BHYVE_CSM'
1409
+ when /BHYVE_DEBUG/
1410
+ 'UEFI_DEBUG'
1411
+ when /BHYVE_RELEASE_CSM/
1412
+ 'BIOS_DEBUG'
1413
+ end
1414
+ ft.to_s
1415
+ end
1416
+
1417
+ # This filters the rdpport
1418
+ def rdpport(uii)
1419
+ config = @machine.provider_config
1420
+ uii.info(I18n.t('vagrant_zones.rdpport')) if config.debug
1421
+ config.rdpport.to_s unless config.rdpport.to_s.nil?
1422
+ end
1423
+
1424
+ # This filters the vagrantuserpass
1425
+ def vagrantuserpass(machine)
1426
+ config = machine.provider_config
1427
+ # uii.info(I18n.t('vagrant_zones.vagrantuserpass')) if config.debug
1428
+ config.vagrant_user_pass unless config.vagrant_user_pass.to_s.nil?
1429
+ end
1430
+
1431
+ ## List ZFS Snapshots, helper function to sort and display
1432
+ def zfssnaplistdisp(zfs_snapshots, uii, index, disk)
1433
+ uii.info("\n Disk Number: #{index}\n Disk Path: #{disk}")
1434
+ zfssnapshots = zfs_snapshots.split(/\n/).reverse
1435
+ zfssnapshots << "Snapshot\t\t\t\tUsed\tAvailable\tRefer\tPath"
1436
+ pml, rml, aml, uml, sml = 0
1437
+ zfssnapshots.reverse.each do |snapshot|
1438
+ ar = snapshot.gsub(/\s+/m, ' ').strip.split
1439
+ sml = ar[0].length.to_i if ar[0].length.to_i > sml.to_i
1440
+ uml = ar[1].length.to_i if ar[1].length.to_i > uml.to_i
1441
+ aml = ar[2].length.to_i if ar[2].length.to_i > aml.to_i
1442
+ rml = ar[3].length.to_i if ar[3].length.to_i > rml.to_i
1443
+ pml = ar[4].length.to_i if ar[4].length.to_i > pml.to_i
1444
+ end
1445
+ zfssnapshots.reverse.each_with_index do |snapshot, si|
1446
+ ar = snapshot.gsub(/\s+/m, ' ').strip.split
1447
+ strg1 = "%<sym>5s %<s>-#{sml}s %<u>-#{uml}s %<a>-#{aml}s %<r>-#{rml}s %<p>-#{pml}s"
1448
+ strg2 = "%<si>5s %<s>-#{sml}s %<u>-#{uml}s %<a>-#{aml}s %<r>-#{rml}s %<p>-#{pml}s"
1449
+ if si.zero?
1450
+ puts format strg1.to_s, sym: '#', s: ar[0], u: ar[1], a: ar[2], r: ar[3], p: ar[4]
1451
+ else
1452
+ puts format strg2.to_s, si: si - 2, s: ar[0], u: ar[1], a: ar[2], r: ar[3], p: ar[4]
1453
+ end
1454
+ end
1455
+ end
1456
+
1457
+ ## List ZFS Snapshots
1458
+ def zfssnaplist(datasets, opts, uii)
1459
+ # config = @machine.provider_config
1460
+ # name = @machine.name
1461
+ uii.info(I18n.t('vagrant_zones.zfs_snapshot_list'))
1462
+ datasets.each_with_index do |disk, index|
1463
+ zfs_snapshots = execute(false, "#{@pfexec} zfs list -t snapshot | grep #{disk} || true")
1464
+ next if zfs_snapshots.nil?
1465
+
1466
+ ds = opts[:dataset].scan(/\D/).empty? unless opts[:dataset].nil?
1467
+ if ds
1468
+ next if opts[:dataset].to_i != index
1469
+ else
1470
+ next unless opts[:dataset] == disk || opts[:dataset].nil? || opts[:dataset] == 'all'
1471
+ end
1472
+ zfssnaplistdisp(zfs_snapshots, uii, index, disk)
1473
+ end
1474
+ end
1475
+
1476
+ ## Create ZFS Snapshots
1477
+ def zfssnapcreate(datasets, opts, uii)
1478
+ uii.info(I18n.t('vagrant_zones.zfs_snapshot_create'))
1479
+ if opts[:dataset] == 'all'
1480
+ datasets.each do |disk|
1481
+ uii.info(" - #{disk}@#{opts[:snapshot_name]}")
1482
+ execute(false, "#{@pfexec} zfs snapshot #{disk}@#{opts[:snapshot_name]}")
1483
+ end
1484
+ else
1485
+ ds = opts[:dataset].scan(/\D/).empty? unless opts[:dataset].nil?
1486
+ if ds
1487
+ datasets.each_with_index do |disk, index|
1488
+ next unless opts[:dataset].to_i == index.to_i
1489
+
1490
+ execute(false, "#{@pfexec} zfs snapshot #{disk}@#{opts[:snapshot_name]}")
1491
+ uii.info(" - #{disk}@#{opts[:snapshot_name]}")
1492
+ end
1493
+ else
1494
+ execute(false, "#{@pfexec} zfs snapshot #{opts[:dataset]}@#{opts[:snapshot_name]}") if datasets.include?(opts[:dataset])
1495
+ uii.info(" - #{opts[:dataset]}@#{opts[:snapshot_name]}") if datasets.include?(opts[:dataset])
1496
+ end
1497
+ end
1498
+ end
1499
+
1500
+ ## Destroy ZFS Snapshots
1501
+ def zfssnapdestroy(datasets, opts, uii)
1502
+ uii.info(I18n.t('vagrant_zones.zfs_snapshot_destroy'))
1503
+ if opts[:dataset].to_s == 'all'
1504
+ datasets.each do |disk|
1505
+ output = execute(false, "#{@pfexec} zfs list -t snapshot -o name | grep #{disk}")
1506
+ ## Never delete the source when doing all
1507
+ output = output.split(/\n/).drop(1)
1508
+ ## Delete in Reverse order
1509
+ output.reverse.each do |snaps|
1510
+ cmd = "#{@pfexec} zfs destroy #{snaps}"
1511
+ execute(false, cmd)
1512
+ uii.info(" - #{snaps}")
1513
+ end
1514
+ end
1515
+ else
1516
+ ## Specify the dataset by number
1517
+ datasets.each_with_index do |disk, dindex|
1518
+ next unless dindex.to_i == opts[:dataset].to_i
1519
+
1520
+ output = execute(false, "#{@pfexec} zfs list -t snapshot -o name | grep #{disk}")
1521
+ output = output.split(/\n/).drop(1)
1522
+ output.each_with_index do |snaps, spindex|
1523
+ if opts[:snapshot_name].to_i == spindex && opts[:snapshot_name].to_s != 'all'
1524
+ uii.info(" - #{snaps}")
1525
+ execute(false, "#{@pfexec} zfs destroy #{snaps}")
1526
+ end
1527
+ if opts[:snapshot_name].to_s == 'all'
1528
+ uii.info(" - #{snaps}")
1529
+ execute(false, "#{@pfexec} zfs destroy #{snaps}")
1530
+ end
1531
+ end
1532
+ end
1533
+ ## Specify the Dataset by path
1534
+ cmd = "#{@pfexec} zfs destroy #{opts[:dataset]}@#{opts[:snapshot_name]}"
1535
+ execute(false, cmd) if datasets.include?("#{opts[:dataset]}@#{opts[:snapshot_name]}")
1536
+ end
1537
+ end
1538
+
1539
+ ## This will list Cron Jobs for Snapshots to take place
1540
+ def zfssnapcronlist(uii, disk, opts, cronjobs)
1541
+ return unless opts[:dataset].to_s == disk.to_s || opts[:dataset].to_s == 'all'
1542
+
1543
+ # config = @machine.provider_config
1544
+ # name = @machine.name
1545
+ uii.info(I18n.t('vagrant_zones.cron_entries'))
1546
+ h = { h: 'hourly', d: 'daily', w: 'weekly', m: 'monthly' }
1547
+ h.each do |_k, d|
1548
+ next unless opts[:list] == d || opts[:list] == 'all'
1549
+
1550
+ uii.info(cronjobs[d.to_sym]) unless cronjobs[d.to_sym].nil?
1551
+ end
1552
+ end
1553
+
1554
+ ## This will delete Cron Jobs for Snapshots to take place
1555
+ def zfssnapcrondelete(uii, disk, opts, cronjobs)
1556
+ return unless opts[:dataset].to_s == disk.to_s || opts[:dataset].to_s == 'all'
1557
+
1558
+ sc = "#{@pfexec} crontab"
1559
+ rmcr = "#{sc} -l | grep -v "
1560
+ h = { h: 'hourly', d: 'daily', w: 'weekly', m: 'monthly' }
1561
+ uii.info(I18n.t('vagrant_zones.cron_delete'))
1562
+ h.each do |_k, d|
1563
+ next unless opts[:delete] == d || opts[:delete] == 'all'
1564
+
1565
+ cj = cronjobs[d.to_sym].to_s.gsub(/\*/, '\*')
1566
+ rc = "#{rmcr}'#{cj}' | #{sc}"
1567
+ uii.info(" - Removing Cron: #{cj}") unless cronjobs[d.to_sym].nil?
1568
+ execute(false, rc) unless cronjobs[d.to_sym].nil?
1569
+ end
1570
+ end
1571
+
1572
+ ## This will set Cron Jobs for Snapshots to take place
1573
+ def zfssnapcronset(uii, disk, opts, cronjobs)
1574
+ return unless opts[:dataset].to_s == disk.to_s || opts[:dataset].to_s == 'all'
1575
+
1576
+ config = @machine.provider_config
1577
+ name = @machine.name
1578
+ uii.info(I18n.t('vagrant_zones.cron_set'))
1579
+ snpshtr = config.snapshot_script.to_s
1580
+ shrtcr = "( #{@pfexec} crontab -l; echo "
1581
+ h = {}
1582
+ sf = { freq: opts[:set_frequency], rtn: opts[:set_frequency_rtn] }
1583
+ rtn = { h: 24, d: 8, w: 5, m: 1 }
1584
+ ct = { h: '0 1-23 * * * ', d: '0 0 * * 0-5 ', w: '0 0 * * 6 ', m: '0 0 1 * * ' }
1585
+ h[:hourly] = { rtn: rtn[:h], ct: ct[:h] }
1586
+ h[:daily] = { rtn: rtn[:d], ct: ct[:d] }
1587
+ h[:weekly] = { rtn: rtn[:w], ct: ct[:w] }
1588
+ h[:monthly] = { rtn: rtn[:m], ct: ct[:m] }
1589
+ h.each do |k, d|
1590
+ next unless (k.to_s == sf[:freq] || sf[:freq] == 'all') && cronjobs[k].nil?
1591
+
1592
+ cj = "#{d[:ct]}#{snpshtr} -p #{k} -r -n #{sf[:rtn]} #{disk} # #{name}" unless sf[:rtn].nil?
1593
+ cj = "#{d[:ct]}#{snpshtr} -p #{k} -r -n #{d[:rtn]} #{disk} # #{name}" if sf[:rtn].nil?
1594
+ h[k] = { rtn: rtn[:h], ct: ct[:h], cj: cj }
1595
+ setcron = "#{shrtcr}'#{cj}' ) | #{@pfexec} crontab"
1596
+ uii.info("Setting Cron: #{setcron}")
1597
+ execute(false, setcron)
1598
+ end
1599
+ end
1600
+
1601
+ ## Configure ZFS Snapshots Crons
1602
+ def zfssnapcron(datasets, opts, uii)
1603
+ name = @machine.name
1604
+ # config = @machine.provider_config
1605
+ crons = execute(false, "#{@pfexec} crontab -l").split("\n")
1606
+ rtnregex = '-p (weekly|monthly|daily|hourly)'
1607
+ opts[:dataset] = 'all' if opts[:dataset].nil?
1608
+ datasets.each do |disk|
1609
+ cronjobs = {}
1610
+ crons.each do |tasks|
1611
+ next if tasks.empty? || tasks[/^#/]
1612
+
1613
+ case tasks[/#{rtnregex}/, 1]
1614
+ when 'hourly'
1615
+ hourly = tasks if tasks[/# #{name}/] && tasks[/#{disk}/]
1616
+ cronjobs.merge!(hourly: hourly) if tasks[/# #{name}/] && tasks[/#{disk}/]
1617
+ when 'daily'
1618
+ daily = tasks if tasks[/# #{name}/] && tasks[/#{disk}/]
1619
+ cronjobs.merge!(daily: daily) if tasks[/# #{name}/] && tasks[/#{disk}/]
1620
+ when 'weekly'
1621
+ weekly = tasks if tasks[/# #{name}/] && tasks[/#{disk}/]
1622
+ cronjobs.merge!(weekly: weekly) if tasks[/# #{name}/] && tasks[/#{disk}/]
1623
+ when 'monthly'
1624
+ monthly = tasks if tasks[/# #{name}/] && tasks[/#{disk}/]
1625
+ cronjobs.merge!(monthly: monthly) if tasks[/# #{name}/] && tasks[/#{disk}/]
1626
+ end
1627
+ end
1628
+ zfssnapcronlist(uii, disk, opts, cronjobs) unless opts[:list].nil?
1629
+ zfssnapcrondelete(uii, disk, opts, cronjobs) unless opts[:delete].nil?
1630
+ zfssnapcronset(uii, disk, opts, cronjobs) unless opts[:set_frequency].nil?
1631
+ end
1632
+ end
1633
+
1634
+ # This helps us manage ZFS Snapshots
1635
+ def zfs(uii, job, opts)
1636
+ name = @machine.name
1637
+ config = @machine.provider_config
1638
+ bootconfigs = config.boot
1639
+ datasetroot = "#{bootconfigs['array']}/#{bootconfigs['dataset']}/#{name}/#{bootconfigs['volume_name']}"
1640
+ datasets = []
1641
+ datasets << datasetroot.to_s
1642
+ config.additional_disks&.each do |disk|
1643
+ additionaldataset = "#{disk['array']}/#{disk['dataset']}/#{name}/#{disk['volume_name']}"
1644
+ datasets << additionaldataset.to_s
1645
+ end
1646
+ case job
1647
+ when 'list'
1648
+ zfssnaplist(datasets, opts, uii)
1649
+ when 'create'
1650
+ zfssnapcreate(datasets, opts, uii)
1651
+ when 'destroy'
1652
+ zfssnapdestroy(datasets, opts, uii)
1653
+ when 'cron'
1654
+ zfssnapcron(datasets, opts, uii)
1655
+ end
1656
+ end
1657
+
1658
+ # Halts the Zone, first via shutdown command, then a halt.
1659
+ def halt(uii)
1660
+ name = @machine.name
1661
+ config = @machine.provider_config
1662
+
1663
+ ## Check state in zoneadm
1664
+ vm_state = execute(false, "#{@pfexec} zoneadm -z #{name} list -p | awk -F: '{ print $3 }'")
1665
+ uii.info(I18n.t('vagrant_zones.graceful_shutdown'))
1666
+ begin
1667
+ Timeout.timeout(config.clean_shutdown_time) do
1668
+ execute(false, "#{@pfexec} zoneadm -z #{name} shutdown") if vm_state == 'running'
1669
+ end
1670
+ rescue Timeout::Error
1671
+ uii.info(I18n.t('vagrant_zones.graceful_shutdown_failed') + config.clean_shutdown_time.to_s)
1672
+ begin
1673
+ Timeout.timeout(config.clean_shutdown_time) do
1674
+ execute(false, "#{@pfexec} zoneadm -z #{name} halt")
1675
+ end
1676
+ rescue Timeout::Error
1677
+ raise Errors::TimeoutHalt
1678
+ end
1679
+ end
1680
+ end
1681
+
1682
+ # Destroys the Zone configurations and path
1683
+ def destroy(id)
1684
+ name = @machine.name
1685
+ id.info(I18n.t('vagrant_zones.leaving'))
1686
+ id.info(I18n.t('vagrant_zones.destroy_zone'))
1687
+ vm_state = execute(false, "#{@pfexec} zoneadm -z #{name} list -p | awk -F: '{ print $3 }'")
1688
+
1689
+ ## If state is installed, uninstall from zoneadm and destroy from zonecfg
1690
+ if vm_state == 'installed'
1691
+ id.info(I18n.t('vagrant_zones.bhyve_zone_config_uninstall'))
1692
+ execute(false, "#{@pfexec} zoneadm -z #{name} uninstall -F")
1693
+ id.info(I18n.t('vagrant_zones.bhyve_zone_config_remove'))
1694
+ execute(false, "#{@pfexec} zonecfg -z #{name} delete -F")
1695
+ end
1696
+
1697
+ ## If state is configured or incomplete, uninstall from destroy from zonecfg
1698
+ if %w[incomplete configured].include?(vm_state)
1699
+ id.info(I18n.t('vagrant_zones.bhyve_zone_config_remove'))
1700
+ execute(false, "#{@pfexec} zonecfg -z #{name} delete -F")
1701
+ end
1702
+
1703
+ ### Nic Configurations
1704
+ state = 'delete'
1705
+ id.info(I18n.t('vagrant_zones.networking_int_remove'))
1706
+ network(id, state)
1707
+ end
1708
+ end
1709
+ end
1710
+ end