chef-metal-vagrant 0.3.1 → 0.4.beta
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +1 -1
- data/lib/chef/provider/vagrant_cluster.rb +3 -5
- data/lib/chef/resource/vagrant_box.rb +1 -1
- data/lib/chef/resource/vagrant_cluster.rb +1 -1
- data/lib/chef_metal/driver_init/vagrant.rb +3 -0
- data/lib/chef_metal_vagrant/vagrant_driver.rb +461 -0
- data/lib/chef_metal_vagrant/version.rb +1 -1
- data/lib/chef_metal_vagrant.rb +13 -11
- metadata +8 -8
- data/lib/chef_metal/provisioner_init/vagrant_cluster_init.rb +0 -4
- data/lib/chef_metal_vagrant/vagrant_provisioner.rb +0 -361
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA1:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 869d2fb17c59772fa9c03e88440319b308bd277d
|
|
4
|
+
data.tar.gz: 1da119611bf939a3f1c633ab971fbd48699b7361
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 22757248dcbc506598c6430e8bba41d15745c98b56fcbcff5beed457b91fa203acf8d105d061c3f8481ddd04809e1ec457e9e629db24e1caed7257c2b35ffac5
|
|
7
|
+
data.tar.gz: 3236ad050f5bac33de4adc8550036902cae02b0678a1b01b7119ec99b8c20d33d5a2e1f8c5e68b2a30d973b573c9fbfd2047fb9e8a0a832a45ac793865738c51
|
data/README.md
CHANGED
|
@@ -1,10 +1,8 @@
|
|
|
1
1
|
require 'chef/provider/lwrp_base'
|
|
2
|
-
require '
|
|
2
|
+
require 'cheffish'
|
|
3
3
|
|
|
4
4
|
class Chef::Provider::VagrantCluster < Chef::Provider::LWRPBase
|
|
5
5
|
|
|
6
|
-
include ChefMetal::ProviderActionHandler
|
|
7
|
-
|
|
8
6
|
use_inline_resources
|
|
9
7
|
|
|
10
8
|
def whyrun_supported?
|
|
@@ -13,7 +11,7 @@ class Chef::Provider::VagrantCluster < Chef::Provider::LWRPBase
|
|
|
13
11
|
|
|
14
12
|
action :create do
|
|
15
13
|
the_base_path = new_resource.path
|
|
16
|
-
|
|
14
|
+
Cheffish.inline_resource(self, :create) do
|
|
17
15
|
directory the_base_path
|
|
18
16
|
file ::File.join(the_base_path, 'Vagrantfile') do
|
|
19
17
|
content <<EOM
|
|
@@ -27,7 +25,7 @@ EOM
|
|
|
27
25
|
|
|
28
26
|
action :delete do
|
|
29
27
|
the_base_path = new_resource.path
|
|
30
|
-
|
|
28
|
+
Cheffish.inline_resource(self, :delete) do
|
|
31
29
|
file ::File.join(the_base_path, 'Vagrantfile') do
|
|
32
30
|
action :delete
|
|
33
31
|
end
|
|
@@ -9,7 +9,7 @@ class Chef::Resource::VagrantBox < Chef::Resource::LWRPBase
|
|
|
9
9
|
|
|
10
10
|
attribute :name, :kind_of => String, :name_attribute => true
|
|
11
11
|
attribute :url, :kind_of => String
|
|
12
|
-
attribute :
|
|
12
|
+
attribute :driver_options, :kind_of => Hash
|
|
13
13
|
|
|
14
14
|
def after_created
|
|
15
15
|
super
|
|
@@ -0,0 +1,461 @@
|
|
|
1
|
+
require 'chef/mixin/shell_out'
|
|
2
|
+
require 'chef_metal/driver'
|
|
3
|
+
require 'chef_metal/machine/windows_machine'
|
|
4
|
+
require 'chef_metal/machine/unix_machine'
|
|
5
|
+
require 'chef_metal/convergence_strategy/install_msi'
|
|
6
|
+
require 'chef_metal/convergence_strategy/install_cached'
|
|
7
|
+
require 'chef_metal/transport/winrm'
|
|
8
|
+
require 'chef_metal/transport/ssh'
|
|
9
|
+
require 'chef_metal_vagrant/version'
|
|
10
|
+
require 'chef/resource/vagrant_cluster'
|
|
11
|
+
require 'chef/provider/vagrant_cluster'
|
|
12
|
+
|
|
13
|
+
module ChefMetalVagrant
|
|
14
|
+
# Provisions machines in vagrant.
|
|
15
|
+
class VagrantDriver < ChefMetal::Driver
|
|
16
|
+
|
|
17
|
+
include Chef::Mixin::ShellOut
|
|
18
|
+
|
|
19
|
+
# Create a new vagrant driver.
|
|
20
|
+
#
|
|
21
|
+
# ## Parameters
|
|
22
|
+
# cluster_path - path to the directory containing the vagrant files, which
|
|
23
|
+
# should have been created with the vagrant_cluster resource.
|
|
24
|
+
def initialize(driver_url, config)
|
|
25
|
+
super
|
|
26
|
+
scheme, cluster_path = driver_url.split(':', 2)
|
|
27
|
+
@cluster_path = cluster_path
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
attr_reader :cluster_path
|
|
31
|
+
|
|
32
|
+
def self.from_url(driver_url, config)
|
|
33
|
+
scheme, cluster_path = driver_url.split(':', 2)
|
|
34
|
+
cluster_path = File.expand_path(cluster_path || File.join(Chef::Config.config_dir, 'vms'))
|
|
35
|
+
VagrantDriver.new("vagrant:#{cluster_path}", config)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
# Acquire a machine, generally by provisioning it. Returns a Machine
|
|
39
|
+
# object pointing at the machine, allowing useful actions like setup,
|
|
40
|
+
# converge, execute, file and directory.
|
|
41
|
+
def allocate_machine(action_handler, machine_spec, machine_options)
|
|
42
|
+
ensure_vagrant_cluster(action_handler)
|
|
43
|
+
vm_name = machine_spec.name
|
|
44
|
+
vm_file_path = File.join(cluster_path, "#{machine_spec.name}.vm")
|
|
45
|
+
vm_file_updated = create_vm_file(action_handler, vm_name, vm_file_path, machine_options)
|
|
46
|
+
if vm_file_updated || !machine_spec.location
|
|
47
|
+
old_location = machine_spec.location
|
|
48
|
+
machine_spec.location = {
|
|
49
|
+
'driver_url' => driver_url,
|
|
50
|
+
'driver_version' => ChefMetalVagrant::VERSION,
|
|
51
|
+
'vm_name' => vm_name,
|
|
52
|
+
'vm_file_path' => vm_file_path,
|
|
53
|
+
'allocated_at' => Time.now.utc.to_s,
|
|
54
|
+
'host_node' => action_handler.host_node
|
|
55
|
+
}
|
|
56
|
+
machine_spec.location['needs_reload'] = true if vm_file_updated
|
|
57
|
+
if machine_options[:vagrant_options]
|
|
58
|
+
%w(vm.guest winrm.host winrm.port winrm.username winrm.password).each do |key|
|
|
59
|
+
machine_spec.location[key] = machine_options[:vagrant_options][key] if machine_options[:vagrant_options][key]
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
machine_spec.location['chef_client_timeout'] = machine_options[:chef_client_timeout] if machine_options[:chef_client_timeout]
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def ready_machine(action_handler, machine_spec, machine_options)
|
|
67
|
+
start_machine(action_handler, machine_spec, machine_options)
|
|
68
|
+
machine_for(machine_spec, machine_options)
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
# Connect to machine without acquiring it
|
|
72
|
+
def connect_to_machine(machine_spec, machine_options)
|
|
73
|
+
machine_for(machine_spec, machine_options)
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def destroy_machine(action_handler, machine_spec, machine_options)
|
|
77
|
+
if machine_spec.location
|
|
78
|
+
vm_name = machine_spec.location['vm_name']
|
|
79
|
+
current_status = vagrant_status(vm_name)
|
|
80
|
+
if current_status != 'not created'
|
|
81
|
+
action_handler.perform_action "run vagrant destroy -f #{vm_name} (status was '#{current_status}')" do
|
|
82
|
+
result = shell_out("vagrant destroy -f #{vm_name}", :cwd => cluster_path)
|
|
83
|
+
if result.exitstatus != 0
|
|
84
|
+
raise "vagrant destroy failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
convergence_strategy_for(machine_spec, machine_options).
|
|
90
|
+
cleanup_convergence(action_handler, machine_spec)
|
|
91
|
+
|
|
92
|
+
vm_file_path = machine_spec.location['vm_file_path']
|
|
93
|
+
ChefMetal.inline_resource(action_handler) do
|
|
94
|
+
file vm_file_path do
|
|
95
|
+
action :delete
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
def stop_machine(action_handler, machine_spec, machine_options)
|
|
102
|
+
if machine_spec.location
|
|
103
|
+
vm_name = machine_spec.location['vm_name']
|
|
104
|
+
current_status = vagrant_status(vm_name)
|
|
105
|
+
if current_status == 'running'
|
|
106
|
+
action_handler.perform_action "run vagrant halt #{vm_name} (status was '#{current_status}')" do
|
|
107
|
+
result = shell_out("vagrant halt #{vm_name}", :cwd => cluster_path)
|
|
108
|
+
if result.exitstatus != 0
|
|
109
|
+
raise "vagrant halt failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
110
|
+
end
|
|
111
|
+
end
|
|
112
|
+
end
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def ready_machines(action_handler, specs_and_options, parallelizer)
|
|
117
|
+
start_machines(action_handler, specs_and_options)
|
|
118
|
+
machines = []
|
|
119
|
+
specs_and_options.each_pair do |spec, options|
|
|
120
|
+
machines.push(machine_for(spec, options))
|
|
121
|
+
end
|
|
122
|
+
machines
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
def destroy_machines(action_handler, specs_and_options, parallelizer)
|
|
126
|
+
all_names = []
|
|
127
|
+
all_status = []
|
|
128
|
+
all_outputs = {}
|
|
129
|
+
specs_and_options.each_key do |spec|
|
|
130
|
+
if spec.location
|
|
131
|
+
vm_name = spec.location['vm_name']
|
|
132
|
+
current_status = vagrant_status(vm_name)
|
|
133
|
+
if current_status != 'not created'
|
|
134
|
+
all_names.push(vm_name)
|
|
135
|
+
all_status.push(current_status)
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
end
|
|
139
|
+
if all_names.length > 0
|
|
140
|
+
names = all_names.join(" ")
|
|
141
|
+
statuses = all_status.join(", ")
|
|
142
|
+
action_handler.perform_action "run vagrant destroy -f #{names} (status was '#{statuses}')" do
|
|
143
|
+
result = shell_out("vagrant destroy -f #{names}", :cwd => cluster_path)
|
|
144
|
+
if result.exitstatus != 0
|
|
145
|
+
raise "vagrant destroy failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
specs_and_options.each_pair do |spec, options|
|
|
150
|
+
convergence_strategy_for(spec, options).
|
|
151
|
+
cleanup_convergence(action_handler, spec)
|
|
152
|
+
|
|
153
|
+
vm_file_path = spec.location['vm_file_path']
|
|
154
|
+
ChefMetal.inline_resource(action_handler) do
|
|
155
|
+
file vm_file_path do
|
|
156
|
+
action :delete
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
end
|
|
160
|
+
end
|
|
161
|
+
|
|
162
|
+
def stop_machines(action_handler, specs_and_options, parallelizer)
|
|
163
|
+
all_names = []
|
|
164
|
+
specs_and_options.each_key do |spec|
|
|
165
|
+
if spec.location
|
|
166
|
+
vm_name = spec.location['vm_name']
|
|
167
|
+
current_status = vagrant_status(vm_name)
|
|
168
|
+
if current_status == 'running'
|
|
169
|
+
all_names.push(vm_name)
|
|
170
|
+
end
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
if all_names.length > 0
|
|
174
|
+
names = all_names.join(" ")
|
|
175
|
+
action_handler.perform_action "run vagrant halt #{names} (status was 'running')" do
|
|
176
|
+
result = shell_out("vagrant halt #{names}", :cwd => cluster_path)
|
|
177
|
+
if result.exitstatus != 0
|
|
178
|
+
raise "vagrant halt failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
179
|
+
end
|
|
180
|
+
end
|
|
181
|
+
end
|
|
182
|
+
end
|
|
183
|
+
|
|
184
|
+
# Used by vagrant_cluster and machine to get the string used to configure vagrant
|
|
185
|
+
def self.vagrant_config_string(vagrant_config, variable, line_prefix)
|
|
186
|
+
hostname = name.gsub(/[^A-Za-z0-9\-]/, '-')
|
|
187
|
+
|
|
188
|
+
result = ''
|
|
189
|
+
vagrant_config.each_pair do |key, value|
|
|
190
|
+
result += "#{line_prefix}#{variable}.#{key} = #{value.inspect}\n"
|
|
191
|
+
end
|
|
192
|
+
result
|
|
193
|
+
end
|
|
194
|
+
|
|
195
|
+
def driver_url
|
|
196
|
+
"vagrant:#{cluster_path}"
|
|
197
|
+
end
|
|
198
|
+
|
|
199
|
+
protected
|
|
200
|
+
|
|
201
|
+
def ensure_vagrant_cluster(action_handler)
|
|
202
|
+
_cluster_path = cluster_path
|
|
203
|
+
ChefMetal.inline_resource(action_handler) do
|
|
204
|
+
vagrant_cluster _cluster_path
|
|
205
|
+
end
|
|
206
|
+
end
|
|
207
|
+
|
|
208
|
+
def create_vm_file(action_handler, vm_name, vm_file_path, machine_options)
|
|
209
|
+
# Determine contents of vm file
|
|
210
|
+
vm_file_content = "Vagrant.configure('2') do |outer_config|\n"
|
|
211
|
+
vm_file_content << " outer_config.vm.define #{vm_name.inspect} do |config|\n"
|
|
212
|
+
merged_vagrant_options = { 'vm.hostname' => vm_name }
|
|
213
|
+
if machine_options[:vagrant_options]
|
|
214
|
+
merged_vagrant_options = Cheffish::MergedConfig.new(machine_options[:vagrant_options], merged_vagrant_options)
|
|
215
|
+
end
|
|
216
|
+
merged_vagrant_options.each_pair do |key, value|
|
|
217
|
+
vm_file_content << " config.#{key} = #{value.inspect}\n"
|
|
218
|
+
end
|
|
219
|
+
vm_file_content << machine_options[:vagrant_config] if machine_options[:vagrant_config]
|
|
220
|
+
vm_file_content << " end\nend\n"
|
|
221
|
+
|
|
222
|
+
# Set up vagrant file
|
|
223
|
+
ChefMetal.inline_resource(action_handler) do
|
|
224
|
+
file vm_file_path do
|
|
225
|
+
content vm_file_content
|
|
226
|
+
action :create
|
|
227
|
+
end
|
|
228
|
+
end
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
def start_machine(action_handler, machine_spec, machine_options)
|
|
232
|
+
vm_name = machine_spec.location['vm_name']
|
|
233
|
+
up_timeout = machine_options[:up_timeout] || 10*60
|
|
234
|
+
|
|
235
|
+
current_status = vagrant_status(vm_name)
|
|
236
|
+
vm_file_updated = machine_spec.location['needs_reload']
|
|
237
|
+
machine_spec.location['needs_reload'] = false
|
|
238
|
+
if current_status != 'running'
|
|
239
|
+
# Run vagrant up if vm is not running
|
|
240
|
+
action_handler.perform_action "run vagrant up #{vm_name} (status was '#{current_status}')" do
|
|
241
|
+
result = shell_out("vagrant up #{vm_name}", :cwd => cluster_path,
|
|
242
|
+
:timeout => up_timeout)
|
|
243
|
+
if result.exitstatus != 0
|
|
244
|
+
raise "vagrant up #{vm_name} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
245
|
+
end
|
|
246
|
+
parse_vagrant_up(result.stdout, machine_spec)
|
|
247
|
+
end
|
|
248
|
+
elsif vm_file_updated
|
|
249
|
+
# Run vagrant reload if vm is running and vm file changed
|
|
250
|
+
action_handler.perform_action "run vagrant reload #{vm_name}" do
|
|
251
|
+
result = shell_out("vagrant reload #{vm_name}", :cwd => cluster_path,
|
|
252
|
+
:timeout => up_timeout)
|
|
253
|
+
if result.exitstatus != 0
|
|
254
|
+
raise "vagrant reload #{vm_name} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
255
|
+
end
|
|
256
|
+
parse_vagrant_up(result.stdout, machine_spec)
|
|
257
|
+
end
|
|
258
|
+
end
|
|
259
|
+
end
|
|
260
|
+
|
|
261
|
+
def start_machines(action_handler, specs_and_options)
|
|
262
|
+
up_names = []
|
|
263
|
+
up_status = []
|
|
264
|
+
up_specs = {}
|
|
265
|
+
update_names = []
|
|
266
|
+
update_specs = {}
|
|
267
|
+
timeouts = []
|
|
268
|
+
specs_and_options.each_pair do |spec, options|
|
|
269
|
+
vm_name = spec.location['vm_name']
|
|
270
|
+
|
|
271
|
+
vm_file_updated = spec.location['needs_reload']
|
|
272
|
+
spec.location['needs_reload'] = false
|
|
273
|
+
|
|
274
|
+
current_status = vagrant_status(vm_name)
|
|
275
|
+
if current_status != 'running'
|
|
276
|
+
up_names.push(vm_name)
|
|
277
|
+
up_status.push(current_status)
|
|
278
|
+
up_specs[vm_name] = spec
|
|
279
|
+
elsif vm_file_updated
|
|
280
|
+
update_names.push(vm_name)
|
|
281
|
+
update_specs[vm_name] = spec
|
|
282
|
+
end
|
|
283
|
+
timeouts.push(options[:up_timeout])
|
|
284
|
+
end
|
|
285
|
+
# Use the highest timeout, if any exist
|
|
286
|
+
up_timeout = timeouts.compact.max
|
|
287
|
+
up_timeout ||= 10*60
|
|
288
|
+
if up_names.length > 0
|
|
289
|
+
# Run vagrant up if vm is not running
|
|
290
|
+
names = up_names.join(" ")
|
|
291
|
+
statuses = up_status.join(", ")
|
|
292
|
+
action_handler.perform_action "run vagrant up --parallel #{names} (status was '#{statuses}')" do
|
|
293
|
+
result = shell_out("vagrant up --parallel #{names}", :cwd => cluster_path,
|
|
294
|
+
:timeout => up_timeout)
|
|
295
|
+
if result.exitstatus != 0
|
|
296
|
+
raise "vagrant up #{names} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
297
|
+
end
|
|
298
|
+
parse_multi_vagrant_up(result.stdout, up_specs)
|
|
299
|
+
end
|
|
300
|
+
end
|
|
301
|
+
if update_names.length > 0
|
|
302
|
+
names = update_names.join(" ")
|
|
303
|
+
# Run vagrant reload if vm is running and vm file changed
|
|
304
|
+
action_handler.perform_action "run vagrant reload #{names}" do
|
|
305
|
+
result = shell_out("vagrant reload #{names}", :cwd => cluster_path,
|
|
306
|
+
:timeout => up_timeout)
|
|
307
|
+
if result.exitstatus != 0
|
|
308
|
+
raise "vagrant reload #{names} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
309
|
+
end
|
|
310
|
+
parse_multi_vagrant_up(result.stdout, update_specs)
|
|
311
|
+
end
|
|
312
|
+
end
|
|
313
|
+
end
|
|
314
|
+
|
|
315
|
+
def parse_vagrant_up(output, machine_spec)
|
|
316
|
+
# Grab forwarded port info
|
|
317
|
+
machine_spec.location['forwarded_ports'] = {}
|
|
318
|
+
in_forwarding_ports = false
|
|
319
|
+
output.lines.each do |line|
|
|
320
|
+
if in_forwarding_ports
|
|
321
|
+
if line =~ /-- (\d+) => (\d+)/
|
|
322
|
+
machine_spec.location['forwarded_ports'][$1] = $2
|
|
323
|
+
else
|
|
324
|
+
in_forwarding_ports = false
|
|
325
|
+
end
|
|
326
|
+
elsif line =~ /Forwarding ports...$/
|
|
327
|
+
in_forwarding_ports = true
|
|
328
|
+
end
|
|
329
|
+
end
|
|
330
|
+
end
|
|
331
|
+
|
|
332
|
+
def parse_multi_vagrant_up(output, all_machine_specs)
|
|
333
|
+
# Grab forwarded port info
|
|
334
|
+
in_forwarding_ports = {}
|
|
335
|
+
all_machine_specs.each_pair do |key, spec|
|
|
336
|
+
spec.location['forwarded_ports'] = {}
|
|
337
|
+
in_forwarding_ports[key] = false
|
|
338
|
+
end
|
|
339
|
+
output.lines.each do |line|
|
|
340
|
+
/^\[(.*?)\]/.match(line)
|
|
341
|
+
node_name = $1
|
|
342
|
+
if in_forwarding_ports[node_name]
|
|
343
|
+
if line =~ /-- (\d+) => (\d+)/
|
|
344
|
+
spec = all_machine_specs[node_name]
|
|
345
|
+
spec.location['forwarded_ports'][$1] = $2
|
|
346
|
+
else
|
|
347
|
+
in_forwarding_ports[node_name] = false
|
|
348
|
+
end
|
|
349
|
+
elsif line =~ /Forwarding ports...$/
|
|
350
|
+
in_forwarding_ports[node_name] = true
|
|
351
|
+
end
|
|
352
|
+
end
|
|
353
|
+
end
|
|
354
|
+
|
|
355
|
+
def machine_for(machine_spec, machine_options)
|
|
356
|
+
if machine_spec.location['vm.guest'].to_s == 'windows'
|
|
357
|
+
ChefMetal::Machine::WindowsMachine.new(machine_spec, transport_for(machine_spec),
|
|
358
|
+
convergence_strategy_for(machine_spec, machine_options))
|
|
359
|
+
else
|
|
360
|
+
ChefMetal::Machine::UnixMachine.new(machine_spec, transport_for(machine_spec),
|
|
361
|
+
convergence_strategy_for(machine_spec, machine_options))
|
|
362
|
+
end
|
|
363
|
+
end
|
|
364
|
+
|
|
365
|
+
def convergence_strategy_for(machine_spec, machine_options)
|
|
366
|
+
if machine_spec.location['vm.guest'].to_s == 'windows'
|
|
367
|
+
@windows_convergence_strategy ||= begin
|
|
368
|
+
ChefMetal::ConvergenceStrategy::InstallMsi.
|
|
369
|
+
new(machine_options[:convergence_options], config)
|
|
370
|
+
end
|
|
371
|
+
else
|
|
372
|
+
@unix_convergence_strategy ||= begin
|
|
373
|
+
ChefMetal::ConvergenceStrategy::InstallCached.
|
|
374
|
+
new(machine_options[:convergence_options], config)
|
|
375
|
+
end
|
|
376
|
+
end
|
|
377
|
+
end
|
|
378
|
+
|
|
379
|
+
def transport_for(machine_spec)
|
|
380
|
+
if machine_spec.location['vm.guest'].to_s == 'windows'
|
|
381
|
+
create_winrm_transport(machine_spec)
|
|
382
|
+
else
|
|
383
|
+
create_ssh_transport(machine_spec)
|
|
384
|
+
end
|
|
385
|
+
end
|
|
386
|
+
|
|
387
|
+
def vagrant_status(name)
|
|
388
|
+
status_output = shell_out("vagrant status #{name}", :cwd => cluster_path).stdout
|
|
389
|
+
if status_output =~ /^#{name}\s+([^\n]+)\s+\(([^\n]+)\)$/m
|
|
390
|
+
$1
|
|
391
|
+
else
|
|
392
|
+
'not created'
|
|
393
|
+
end
|
|
394
|
+
end
|
|
395
|
+
|
|
396
|
+
def create_winrm_transport(machine_spec)
|
|
397
|
+
forwarded_ports = machine_spec.location['forwarded_ports']
|
|
398
|
+
|
|
399
|
+
# TODO IPv6 loopback? What do we do for that?
|
|
400
|
+
hostname = machine_spec.location['winrm.host'] || '127.0.0.1'
|
|
401
|
+
port = machine_spec.location['winrm.port'] || 5985
|
|
402
|
+
port = forwarded_ports[port] if forwarded_ports[port]
|
|
403
|
+
endpoint = "http://#{hostname}:#{port}/wsman"
|
|
404
|
+
type = :plaintext
|
|
405
|
+
options = {
|
|
406
|
+
:user => machine_spec.location['winrm.username'] || 'vagrant',
|
|
407
|
+
:pass => machine_spec.location['winrm.password'] || 'vagrant',
|
|
408
|
+
:disable_sspi => true
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
ChefMetal::Transport::WinRM.new(endpoint, type, options)
|
|
412
|
+
end
|
|
413
|
+
|
|
414
|
+
def create_ssh_transport(machine_spec)
|
|
415
|
+
vagrant_ssh_config = vagrant_ssh_config_for(machine_spec)
|
|
416
|
+
hostname = vagrant_ssh_config['HostName']
|
|
417
|
+
username = vagrant_ssh_config['User']
|
|
418
|
+
ssh_options = {
|
|
419
|
+
:port => vagrant_ssh_config['Port'],
|
|
420
|
+
:auth_methods => ['publickey'],
|
|
421
|
+
:user_known_hosts_file => vagrant_ssh_config['UserKnownHostsFile'],
|
|
422
|
+
:paranoid => yes_or_no(vagrant_ssh_config['StrictHostKeyChecking']),
|
|
423
|
+
:keys => [ strip_quotes(vagrant_ssh_config['IdentityFile']) ],
|
|
424
|
+
:keys_only => yes_or_no(vagrant_ssh_config['IdentitiesOnly'])
|
|
425
|
+
}
|
|
426
|
+
ssh_options[:auth_methods] = %w(password) if yes_or_no(vagrant_ssh_config['PasswordAuthentication'])
|
|
427
|
+
options = {
|
|
428
|
+
:prefix => 'sudo '
|
|
429
|
+
}
|
|
430
|
+
ChefMetal::Transport::SSH.new(hostname, username, ssh_options, options, config)
|
|
431
|
+
end
|
|
432
|
+
|
|
433
|
+
def vagrant_ssh_config_for(machine_spec)
|
|
434
|
+
vagrant_ssh_config = {}
|
|
435
|
+
result = shell_out("vagrant ssh-config #{machine_spec.location['vm_name']}",
|
|
436
|
+
:cwd => cluster_path)
|
|
437
|
+
result.stdout.lines.inject({}) do |result, line|
|
|
438
|
+
line =~ /^\s*(\S+)\s+(.+)/
|
|
439
|
+
vagrant_ssh_config[$1] = $2
|
|
440
|
+
end
|
|
441
|
+
vagrant_ssh_config
|
|
442
|
+
end
|
|
443
|
+
|
|
444
|
+
def yes_or_no(str)
|
|
445
|
+
case str
|
|
446
|
+
when 'yes'
|
|
447
|
+
true
|
|
448
|
+
else
|
|
449
|
+
false
|
|
450
|
+
end
|
|
451
|
+
end
|
|
452
|
+
|
|
453
|
+
def strip_quotes(str)
|
|
454
|
+
if str[0] == '"' && str[-1] == '"' && str.size >= 2
|
|
455
|
+
str[1..-2]
|
|
456
|
+
else
|
|
457
|
+
str
|
|
458
|
+
end
|
|
459
|
+
end
|
|
460
|
+
end
|
|
461
|
+
end
|
data/lib/chef_metal_vagrant.rb
CHANGED
|
@@ -3,29 +3,31 @@ require 'chef/resource/vagrant_cluster'
|
|
|
3
3
|
require 'chef/provider/vagrant_cluster'
|
|
4
4
|
require 'chef/resource/vagrant_box'
|
|
5
5
|
require 'chef/provider/vagrant_box'
|
|
6
|
-
require 'chef_metal_vagrant/
|
|
6
|
+
require 'chef_metal_vagrant/vagrant_driver'
|
|
7
7
|
|
|
8
8
|
module ChefMetalVagrant
|
|
9
9
|
def self.with_vagrant_box(run_context, box_name, vagrant_options = {}, &block)
|
|
10
10
|
if box_name.is_a?(Chef::Resource::VagrantBox)
|
|
11
|
-
new_options = {
|
|
12
|
-
new_options[
|
|
11
|
+
new_options = { :vagrant_options => { 'vm.box' => box_name.name } }
|
|
12
|
+
new_options[:vagrant_options]['vm.box_url'] = box_name.url if box_name.url
|
|
13
13
|
else
|
|
14
|
-
new_options = {
|
|
14
|
+
new_options = { :vagrant_options => { 'vm.box' => box_name } }
|
|
15
15
|
end
|
|
16
16
|
|
|
17
|
-
run_context.chef_metal.
|
|
17
|
+
run_context.chef_metal.add_machine_options(new_options, &block)
|
|
18
18
|
end
|
|
19
19
|
end
|
|
20
20
|
|
|
21
21
|
class Chef
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
22
|
+
module DSL
|
|
23
|
+
module Recipe
|
|
24
|
+
def with_vagrant_cluster(cluster_path, &block)
|
|
25
|
+
with_driver("vagrant:#{cluster_path}", &block)
|
|
26
|
+
end
|
|
26
27
|
|
|
27
|
-
|
|
28
|
-
|
|
28
|
+
def with_vagrant_box(box_name, vagrant_options = {}, &block)
|
|
29
|
+
ChefMetalVagrant.with_vagrant_box(run_context, box_name, vagrant_options, &block)
|
|
30
|
+
end
|
|
29
31
|
end
|
|
30
32
|
end
|
|
31
33
|
end
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: chef-metal-vagrant
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.
|
|
4
|
+
version: 0.4.beta
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- John Keiser
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2014-05-
|
|
11
|
+
date: 2014-05-23 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: chef
|
|
@@ -52,7 +52,7 @@ dependencies:
|
|
|
52
52
|
- - '>='
|
|
53
53
|
- !ruby/object:Gem::Version
|
|
54
54
|
version: '0'
|
|
55
|
-
description:
|
|
55
|
+
description: Driver for creating Vagrant instances in Chef Metal.
|
|
56
56
|
email: jkeiser@getchef.com
|
|
57
57
|
executables: []
|
|
58
58
|
extensions: []
|
|
@@ -67,8 +67,8 @@ files:
|
|
|
67
67
|
- lib/chef/provider/vagrant_cluster.rb
|
|
68
68
|
- lib/chef/resource/vagrant_box.rb
|
|
69
69
|
- lib/chef/resource/vagrant_cluster.rb
|
|
70
|
-
- lib/chef_metal/
|
|
71
|
-
- lib/chef_metal_vagrant/
|
|
70
|
+
- lib/chef_metal/driver_init/vagrant.rb
|
|
71
|
+
- lib/chef_metal_vagrant/vagrant_driver.rb
|
|
72
72
|
- lib/chef_metal_vagrant/version.rb
|
|
73
73
|
- lib/chef_metal_vagrant.rb
|
|
74
74
|
homepage: https://github.com/opscode/chef-metal-fog
|
|
@@ -85,14 +85,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
|
85
85
|
version: '0'
|
|
86
86
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
|
87
87
|
requirements:
|
|
88
|
-
- - '
|
|
88
|
+
- - '>'
|
|
89
89
|
- !ruby/object:Gem::Version
|
|
90
|
-
version:
|
|
90
|
+
version: 1.3.1
|
|
91
91
|
requirements: []
|
|
92
92
|
rubyforge_project:
|
|
93
93
|
rubygems_version: 2.0.3
|
|
94
94
|
signing_key:
|
|
95
95
|
specification_version: 4
|
|
96
|
-
summary:
|
|
96
|
+
summary: Driver for creating Vagrant instances in Chef Metal.
|
|
97
97
|
test_files: []
|
|
98
98
|
has_rdoc:
|
|
@@ -1,361 +0,0 @@
|
|
|
1
|
-
require 'chef/mixin/shell_out'
|
|
2
|
-
require 'chef_metal/provisioner'
|
|
3
|
-
require 'chef_metal/machine/windows_machine'
|
|
4
|
-
require 'chef_metal/machine/unix_machine'
|
|
5
|
-
require 'chef_metal/convergence_strategy/install_msi'
|
|
6
|
-
require 'chef_metal/convergence_strategy/install_cached'
|
|
7
|
-
require 'chef_metal/transport/winrm'
|
|
8
|
-
require 'chef_metal/transport/ssh'
|
|
9
|
-
|
|
10
|
-
module ChefMetalVagrant
|
|
11
|
-
# Provisions machines in vagrant.
|
|
12
|
-
class VagrantProvisioner < ChefMetal::Provisioner
|
|
13
|
-
|
|
14
|
-
include Chef::Mixin::ShellOut
|
|
15
|
-
|
|
16
|
-
# Create a new vagrant provisioner.
|
|
17
|
-
#
|
|
18
|
-
# ## Parameters
|
|
19
|
-
# cluster_path - path to the directory containing the vagrant files, which
|
|
20
|
-
# should have been created with the vagrant_cluster resource.
|
|
21
|
-
def initialize(cluster_path)
|
|
22
|
-
@cluster_path = cluster_path
|
|
23
|
-
end
|
|
24
|
-
|
|
25
|
-
attr_reader :cluster_path
|
|
26
|
-
|
|
27
|
-
# Inflate a provisioner from node information; we don't want to force the
|
|
28
|
-
# driver to figure out what the provisioner really needs, since it varies
|
|
29
|
-
# from provisioner to provisioner.
|
|
30
|
-
#
|
|
31
|
-
# ## Parameters
|
|
32
|
-
# node - node to inflate the provisioner for
|
|
33
|
-
#
|
|
34
|
-
# returns a VagrantProvisioner
|
|
35
|
-
def self.inflate(node)
|
|
36
|
-
node_url = node['normal']['provisioner_output']['provisioner_url']
|
|
37
|
-
cluster_path = node_url.split(':', 2)[1].sub(/^\/\//, "")
|
|
38
|
-
self.new(cluster_path)
|
|
39
|
-
end
|
|
40
|
-
|
|
41
|
-
# Acquire a machine, generally by provisioning it. Returns a Machine
|
|
42
|
-
# object pointing at the machine, allowing useful actions like setup,
|
|
43
|
-
# converge, execute, file and directory. The Machine object will have a
|
|
44
|
-
# "node" property which must be saved to the server (if it is any
|
|
45
|
-
# different from the original node object).
|
|
46
|
-
#
|
|
47
|
-
# ## Parameters
|
|
48
|
-
# action_handler - the action_handler object that is calling this method; this
|
|
49
|
-
# is generally a action_handler, but could be anything that can support the
|
|
50
|
-
# ChefMetal::ActionHandler interface (i.e., in the case of the test
|
|
51
|
-
# kitchen metal driver for acquiring and destroying VMs; see the base
|
|
52
|
-
# class for what needs providing).
|
|
53
|
-
# node - node object (deserialized json) representing this machine. If
|
|
54
|
-
# the node has a provisioner_options hash in it, these will be used
|
|
55
|
-
# instead of options provided by the provisioner. TODO compare and
|
|
56
|
-
# fail if different?
|
|
57
|
-
# node will have node['normal']['provisioner_options'] in it with any options.
|
|
58
|
-
# It is a hash with this format (all keys are strings):
|
|
59
|
-
#
|
|
60
|
-
# -- provisioner_url: vagrant:<cluster_path>
|
|
61
|
-
# -- vagrant_options: hash of properties of the "config"
|
|
62
|
-
# object, i.e. "vm.box" => "ubuntu12" and "vm.box_url"
|
|
63
|
-
# -- vagrant_config: string containing other vagrant config.
|
|
64
|
-
# Should assume the variable "config" represents machine config.
|
|
65
|
-
# Will be written verbatim into the vm's Vagrantfile.
|
|
66
|
-
# -- transport_options: hash of options specifying the transport.
|
|
67
|
-
# :type => :ssh
|
|
68
|
-
# :type => :winrm
|
|
69
|
-
# If not specified, ssh is used unless vm.guest is :windows. If that is
|
|
70
|
-
# the case, the windows options are used and the port forward for 5985
|
|
71
|
-
# is detected.
|
|
72
|
-
# -- up_timeout: maximum time, in seconds, to wait for vagrant
|
|
73
|
-
# to bring up the machine. Defaults to 10 minutes.
|
|
74
|
-
# -- chef_client_timeout: maximum time, in seconds, to wait for chef-client
|
|
75
|
-
# to complete. 0 or nil for no timeout. Defaults to 2 hours.
|
|
76
|
-
#
|
|
77
|
-
# node['normal']['provisioner_output'] will be populated with information
|
|
78
|
-
# about the created machine. For vagrant, it is a hash with this
|
|
79
|
-
# format:
|
|
80
|
-
#
|
|
81
|
-
# -- provisioner_url: vagrant_cluster://<current_node>/<cluster_path>
|
|
82
|
-
# -- vm_name: name of vagrant vm created
|
|
83
|
-
# -- vm_file_path: path to machine-specific vagrant config file
|
|
84
|
-
# on disk
|
|
85
|
-
# -- forwarded_ports: hash with key as guest_port => host_port
|
|
86
|
-
#
|
|
87
|
-
def acquire_machine(action_handler, node)
|
|
88
|
-
# Set up the provisioner output
|
|
89
|
-
provisioner_options = node['normal']['provisioner_options']
|
|
90
|
-
vm_name = node['name']
|
|
91
|
-
old_provisioner_output = node['normal']['provisioner_output']
|
|
92
|
-
node['normal']['provisioner_output'] = provisioner_output = {
|
|
93
|
-
'provisioner_url' => provisioner_url(vm_name),
|
|
94
|
-
'vm_name' => vm_name,
|
|
95
|
-
'vm_file_path' => File.join(cluster_path, "#{vm_name}.vm")
|
|
96
|
-
}
|
|
97
|
-
# Preserve existing forwarded ports
|
|
98
|
-
provisioner_output['forwarded_ports'] = old_provisioner_output['forwarded_ports'] if old_provisioner_output
|
|
99
|
-
|
|
100
|
-
# TODO compare new options to existing and fail if we cannot change it
|
|
101
|
-
# over (perhaps introduce a boolean that will force a delete and recreate
|
|
102
|
-
# in such a case)
|
|
103
|
-
|
|
104
|
-
#
|
|
105
|
-
# This is where the work gets done:
|
|
106
|
-
# Create the .vm file, start the vm, and return the Machine
|
|
107
|
-
#
|
|
108
|
-
vm_file_updated = create_vm_file(action_handler, vm_name, provisioner_output['vm_file_path'], provisioner_options)
|
|
109
|
-
start_machine(action_handler, vm_name, vm_file_updated, provisioner_output, provisioner_options['up_timeout'])
|
|
110
|
-
machine_for(node)
|
|
111
|
-
end
|
|
112
|
-
|
|
113
|
-
# Connect to machine without acquiring it
|
|
114
|
-
def connect_to_machine(node)
|
|
115
|
-
machine_for(node)
|
|
116
|
-
end
|
|
117
|
-
|
|
118
|
-
def delete_machine(action_handler, node)
|
|
119
|
-
if node['normal'] && node['normal']['provisioner_output']
|
|
120
|
-
provisioner_output = node['normal']['provisioner_output']
|
|
121
|
-
else
|
|
122
|
-
provisioner_output = {}
|
|
123
|
-
end
|
|
124
|
-
vm_name = provisioner_output['vm_name'] || node['name']
|
|
125
|
-
current_status = vagrant_status(vm_name)
|
|
126
|
-
if current_status != 'not created'
|
|
127
|
-
action_handler.perform_action "run vagrant destroy -f #{vm_name} (status was '#{current_status}')" do
|
|
128
|
-
result = shell_out("vagrant destroy -f #{vm_name}", :cwd => cluster_path)
|
|
129
|
-
if result.exitstatus != 0
|
|
130
|
-
raise "vagrant destroy failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
131
|
-
end
|
|
132
|
-
end
|
|
133
|
-
end
|
|
134
|
-
|
|
135
|
-
convergence_strategy_for(node).cleanup_convergence(action_handler, node)
|
|
136
|
-
|
|
137
|
-
vm_file_path = provisioner_output['vm_file_path'] || File.join(cluster_path, "#{vm_name}.vm")
|
|
138
|
-
ChefMetal.inline_resource(action_handler) do
|
|
139
|
-
file vm_file_path do
|
|
140
|
-
action :delete
|
|
141
|
-
end
|
|
142
|
-
end
|
|
143
|
-
end
|
|
144
|
-
|
|
145
|
-
def stop_machine(action_handler, node)
|
|
146
|
-
if node['normal'] && node['normal']['provisioner_output']
|
|
147
|
-
provisioner_output = node['normal']['provisioner_output']
|
|
148
|
-
else
|
|
149
|
-
provisioner_output = {}
|
|
150
|
-
end
|
|
151
|
-
vm_name = provisioner_output['vm_name'] || node['name']
|
|
152
|
-
current_status = vagrant_status(vm_name)
|
|
153
|
-
if current_status == 'running'
|
|
154
|
-
action_handler.perform_action "run vagrant halt #{vm_name} (status was '#{current_status}')" do
|
|
155
|
-
result = shell_out("vagrant halt #{vm_name}", :cwd => cluster_path)
|
|
156
|
-
if result.exitstatus != 0
|
|
157
|
-
raise "vagrant halt failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
158
|
-
end
|
|
159
|
-
end
|
|
160
|
-
end
|
|
161
|
-
end
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
# Used by vagrant_cluster and machine to get the string used to configure vagrant
|
|
165
|
-
def self.vagrant_config_string(vagrant_config, variable, line_prefix)
|
|
166
|
-
hostname = name.gsub(/[^A-Za-z0-9\-]/, '-')
|
|
167
|
-
|
|
168
|
-
result = ''
|
|
169
|
-
vagrant_config.each_pair do |key, value|
|
|
170
|
-
result += "#{line_prefix}#{variable}.#{key} = #{value.inspect}\n"
|
|
171
|
-
end
|
|
172
|
-
result
|
|
173
|
-
end
|
|
174
|
-
|
|
175
|
-
protected
|
|
176
|
-
|
|
177
|
-
def provisioner_url(vm_name)
|
|
178
|
-
"vagrant_cluster://#{vm_name}#{cluster_path}"
|
|
179
|
-
end
|
|
180
|
-
|
|
181
|
-
def create_vm_file(action_handler, vm_name, vm_file_path, provisioner_options)
|
|
182
|
-
# Determine contents of vm file
|
|
183
|
-
vm_file_content = "Vagrant.configure('2') do |outer_config|\n"
|
|
184
|
-
vm_file_content << " outer_config.vm.define #{vm_name.inspect} do |config|\n"
|
|
185
|
-
merged_vagrant_options = { 'vm.hostname' => vm_name }
|
|
186
|
-
merged_vagrant_options.merge!(provisioner_options['vagrant_options']) if provisioner_options['vagrant_options']
|
|
187
|
-
merged_vagrant_options.each_pair do |key, value|
|
|
188
|
-
vm_file_content << " config.#{key} = #{value.inspect}\n"
|
|
189
|
-
end
|
|
190
|
-
vm_file_content << provisioner_options['vagrant_config'] if provisioner_options['vagrant_config']
|
|
191
|
-
vm_file_content << " end\nend\n"
|
|
192
|
-
|
|
193
|
-
# Set up vagrant file
|
|
194
|
-
ChefMetal.inline_resource(action_handler) do
|
|
195
|
-
file vm_file_path do
|
|
196
|
-
content vm_file_content
|
|
197
|
-
action :create
|
|
198
|
-
end
|
|
199
|
-
end
|
|
200
|
-
end
|
|
201
|
-
|
|
202
|
-
def start_machine(action_handler, vm_name, vm_file_updated, provisioner_output, up_timeout)
|
|
203
|
-
# Check current status of vm
|
|
204
|
-
current_status = vagrant_status(vm_name)
|
|
205
|
-
up_timeout ||= 10*60
|
|
206
|
-
|
|
207
|
-
if current_status != 'running'
|
|
208
|
-
# Run vagrant up if vm is not running
|
|
209
|
-
action_handler.perform_action "run vagrant up #{vm_name} (status was '#{current_status}')" do
|
|
210
|
-
result = shell_out("vagrant up #{vm_name}", :cwd => cluster_path, :timeout => up_timeout)
|
|
211
|
-
if result.exitstatus != 0
|
|
212
|
-
raise "vagrant up #{vm_name} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
213
|
-
end
|
|
214
|
-
parse_vagrant_up(result.stdout, provisioner_output)
|
|
215
|
-
end
|
|
216
|
-
elsif vm_file_updated
|
|
217
|
-
# Run vagrant reload if vm is running and vm file changed
|
|
218
|
-
action_handler.perform_action "run vagrant reload #{vm_name}" do
|
|
219
|
-
result = shell_out("vagrant reload #{vm_name}", :cwd => cluster_path, :timeout => up_timeout)
|
|
220
|
-
if result.exitstatus != 0
|
|
221
|
-
raise "vagrant reload #{vm_name} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
|
222
|
-
end
|
|
223
|
-
parse_vagrant_up(result.stdout, provisioner_output)
|
|
224
|
-
end
|
|
225
|
-
end
|
|
226
|
-
end
|
|
227
|
-
|
|
228
|
-
def parse_vagrant_up(output, provisioner_output)
|
|
229
|
-
# Grab forwarded port info
|
|
230
|
-
provisioner_output['forwarded_ports'] = {}
|
|
231
|
-
in_forwarding_ports = false
|
|
232
|
-
output.lines.each do |line|
|
|
233
|
-
if in_forwarding_ports
|
|
234
|
-
if line =~ /-- (\d+) => (\d+)/
|
|
235
|
-
provisioner_output['forwarded_ports'][$1] = $2
|
|
236
|
-
else
|
|
237
|
-
in_forwarding_ports = false
|
|
238
|
-
end
|
|
239
|
-
elsif line =~ /Forwarding ports...$/
|
|
240
|
-
in_forwarding_ports = true
|
|
241
|
-
end
|
|
242
|
-
end
|
|
243
|
-
end
|
|
244
|
-
|
|
245
|
-
def machine_for(node)
|
|
246
|
-
if vagrant_option(node, 'vm.guest').to_s == 'windows'
|
|
247
|
-
ChefMetal::Machine::WindowsMachine.new(node, transport_for(node), convergence_strategy_for(node))
|
|
248
|
-
else
|
|
249
|
-
ChefMetal::Machine::UnixMachine.new(node, transport_for(node), convergence_strategy_for(node))
|
|
250
|
-
end
|
|
251
|
-
end
|
|
252
|
-
|
|
253
|
-
def convergence_strategy_for(node)
|
|
254
|
-
if vagrant_option(node, 'vm.guest').to_s == 'windows'
|
|
255
|
-
@windows_convergence_strategy ||= begin
|
|
256
|
-
options = {}
|
|
257
|
-
provisioner_options = node['normal']['provisioner_options'] || {}
|
|
258
|
-
options[:chef_client_timeout] = provisioner_options['chef_client_timeout'] if provisioner_options.has_key?('chef_client_timeout')
|
|
259
|
-
ChefMetal::ConvergenceStrategy::InstallMsi.new(options)
|
|
260
|
-
end
|
|
261
|
-
else
|
|
262
|
-
@unix_convergence_strategy ||= begin
|
|
263
|
-
options = {}
|
|
264
|
-
provisioner_options = node['normal']['provisioner_options'] || {}
|
|
265
|
-
options[:chef_client_timeout] = provisioner_options['chef_client_timeout'] if provisioner_options.has_key?('chef_client_timeout')
|
|
266
|
-
ChefMetal::ConvergenceStrategy::InstallCached.new(options)
|
|
267
|
-
end
|
|
268
|
-
end
|
|
269
|
-
end
|
|
270
|
-
|
|
271
|
-
def transport_for(node)
|
|
272
|
-
if vagrant_option(node, 'vm.guest').to_s == 'windows'
|
|
273
|
-
create_winrm_transport(node)
|
|
274
|
-
else
|
|
275
|
-
create_ssh_transport(node)
|
|
276
|
-
end
|
|
277
|
-
end
|
|
278
|
-
|
|
279
|
-
def vagrant_option(node, option)
|
|
280
|
-
if node['normal']['provisioner_options'] &&
|
|
281
|
-
node['normal']['provisioner_options']['vagrant_options']
|
|
282
|
-
node['normal']['provisioner_options']['vagrant_options'][option]
|
|
283
|
-
else
|
|
284
|
-
nil
|
|
285
|
-
end
|
|
286
|
-
end
|
|
287
|
-
|
|
288
|
-
def vagrant_status(name)
|
|
289
|
-
status_output = shell_out("vagrant status #{name}", :cwd => cluster_path).stdout
|
|
290
|
-
if status_output =~ /^#{name}\s+([^\n]+)\s+\(([^\n]+)\)$/m
|
|
291
|
-
$1
|
|
292
|
-
else
|
|
293
|
-
'not created'
|
|
294
|
-
end
|
|
295
|
-
end
|
|
296
|
-
|
|
297
|
-
def create_winrm_transport(node)
|
|
298
|
-
provisioner_output = node['default']['provisioner_output'] || {}
|
|
299
|
-
forwarded_ports = provisioner_output['forwarded_ports'] || {}
|
|
300
|
-
|
|
301
|
-
# TODO IPv6 loopback? What do we do for that?
|
|
302
|
-
hostname = vagrant_option(node, 'winrm.host') || '127.0.0.1'
|
|
303
|
-
port = vagrant_option(node, 'winrm.port') || forwarded_ports[5985] || 5985
|
|
304
|
-
endpoint = "http://#{hostname}:#{port}/wsman"
|
|
305
|
-
type = :plaintext
|
|
306
|
-
options = {
|
|
307
|
-
:user => vagrant_option(node, 'winrm.username') || 'vagrant',
|
|
308
|
-
:pass => vagrant_option(node, 'winrm.password') || 'vagrant',
|
|
309
|
-
:disable_sspi => true
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
ChefMetal::Transport::WinRM.new(endpoint, type, options)
|
|
313
|
-
end
|
|
314
|
-
|
|
315
|
-
def create_ssh_transport(node)
|
|
316
|
-
vagrant_ssh_config = vagrant_ssh_config_for(node)
|
|
317
|
-
hostname = vagrant_ssh_config['HostName']
|
|
318
|
-
username = vagrant_ssh_config['User']
|
|
319
|
-
ssh_options = {
|
|
320
|
-
:port => vagrant_ssh_config['Port'],
|
|
321
|
-
:auth_methods => ['publickey'],
|
|
322
|
-
:user_known_hosts_file => vagrant_ssh_config['UserKnownHostsFile'],
|
|
323
|
-
:paranoid => yes_or_no(vagrant_ssh_config['StrictHostKeyChecking']),
|
|
324
|
-
:keys => [ strip_quotes(vagrant_ssh_config['IdentityFile']) ],
|
|
325
|
-
:keys_only => yes_or_no(vagrant_ssh_config['IdentitiesOnly'])
|
|
326
|
-
}
|
|
327
|
-
ssh_options[:auth_methods] = %w(password) if yes_or_no(vagrant_ssh_config['PasswordAuthentication'])
|
|
328
|
-
options = {
|
|
329
|
-
:prefix => 'sudo '
|
|
330
|
-
}
|
|
331
|
-
ChefMetal::Transport::SSH.new(hostname, username, ssh_options, options)
|
|
332
|
-
end
|
|
333
|
-
|
|
334
|
-
def vagrant_ssh_config_for(node)
|
|
335
|
-
vagrant_ssh_config = {}
|
|
336
|
-
result = shell_out("vagrant ssh-config #{node['normal']['provisioner_output']['vm_name']}", :cwd => cluster_path)
|
|
337
|
-
result.stdout.lines.inject({}) do |result, line|
|
|
338
|
-
line =~ /^\s*(\S+)\s+(.+)/
|
|
339
|
-
vagrant_ssh_config[$1] = $2
|
|
340
|
-
end
|
|
341
|
-
vagrant_ssh_config
|
|
342
|
-
end
|
|
343
|
-
|
|
344
|
-
def yes_or_no(str)
|
|
345
|
-
case str
|
|
346
|
-
when 'yes'
|
|
347
|
-
true
|
|
348
|
-
else
|
|
349
|
-
false
|
|
350
|
-
end
|
|
351
|
-
end
|
|
352
|
-
|
|
353
|
-
def strip_quotes(str)
|
|
354
|
-
if str[0] == '"' && str[-1] == '"' && str.size >= 2
|
|
355
|
-
str[1..-2]
|
|
356
|
-
else
|
|
357
|
-
str
|
|
358
|
-
end
|
|
359
|
-
end
|
|
360
|
-
end
|
|
361
|
-
end
|