chef-provisioning-vagrant 0.8.3 → 0.9.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +57 -0
- data/lib/chef/provider/vagrant_box.rb +22 -8
- data/lib/chef/provisioning/vagrant_driver.rb +1 -0
- data/lib/chef/provisioning/vagrant_driver/driver.rb +393 -394
- data/lib/chef/provisioning/vagrant_driver/version.rb +5 -5
- data/lib/chef/resource/vagrant_box.rb +1 -0
- data/spec/spec_helper.rb +21 -0
- data/spec/vagrant_spec.rb +13 -0
- data/spec/vagrant_support.rb +53 -0
- metadata +7 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c414846467021e7b6d77af954a82e0125e097d79
|
4
|
+
data.tar.gz: 107449a704ecbb8178ba46a865c74328c3687391
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 363a9472afae1cc7209175709a7a60b5656b7b2c1c8b8ebefa5a149b2355e484370db17645d5dfbf3a6be262a56eb117f01649d46cbe24f83c46a4bfa54df576
|
7
|
+
data.tar.gz: 4c29e4fc70988c8036f1dbd1d1b44852e1fd1c1d24c426a2a8bff017d6aeff1e26a0fd8e43295acf591e2f3dc5cc3162136ec0177ede888df04ce3c13a7e82c5
|
data/README.md
CHANGED
@@ -1,3 +1,60 @@
|
|
1
1
|
# chef-provisioning-vagrant
|
2
2
|
|
3
3
|
This is the Vagrant driver for chef-provisioning.
|
4
|
+
|
5
|
+
# Resources
|
6
|
+
|
7
|
+
## vagrant_box
|
8
|
+
|
9
|
+
Specifying this resource will verify that the correct vagrant box is downloaded to the box directory (default of ```~/.chefdk/vms``` when using the chefdk) for the correct vagrant provider. The vagrant provider defaults to virtualbox.
|
10
|
+
|
11
|
+
This example will ```vagrant box add``` the box if it is not currently on your system.
|
12
|
+
```
|
13
|
+
vagrant_box 'opscode-centos-6.4' do
|
14
|
+
url 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_centos-6.4_chef-provisionerless.box'
|
15
|
+
vagrant_provider 'vmware_desktop'
|
16
|
+
end
|
17
|
+
```
|
18
|
+
This example will ensure that the vmware_desktop/fusion based box exists on your system, and will fail if the box does not exist. If this fails, you can always add the URL source as per the previous example. **Note: since bento boxes appear as 'vmware_desktop', 'vmware_fusion' will not work here**
|
19
|
+
```
|
20
|
+
vagrant_box 'custom_box' do
|
21
|
+
vagrant_provider 'vmware_desktop'
|
22
|
+
end
|
23
|
+
```
|
24
|
+
This example will ensure that the virtualbox based box already exists on your system, and will fail if the box does not exist. As before, adding the URL will cause it to download the box.
|
25
|
+
```
|
26
|
+
vagrant_box 'custom_box'
|
27
|
+
```
|
28
|
+
# Machine Options
|
29
|
+
|
30
|
+
An example of machine options would be as follows:
|
31
|
+
```
|
32
|
+
options = {
|
33
|
+
vagrant_options: {
|
34
|
+
'vm.box' => 'opscode-centos-6.4',
|
35
|
+
},
|
36
|
+
}
|
37
|
+
|
38
|
+
machine 'marley' do
|
39
|
+
machine_options options
|
40
|
+
converge true
|
41
|
+
end
|
42
|
+
```
|
43
|
+
You can also add a ```vagrant_provider``` attribute to override the default virtualbox provider:
|
44
|
+
```
|
45
|
+
options = {
|
46
|
+
vagrant_provider: 'vmware_fusion'
|
47
|
+
vagrant_options: {
|
48
|
+
'vm.box' => 'opscode-centos-6.4',
|
49
|
+
},
|
50
|
+
}
|
51
|
+
|
52
|
+
machine 'marley' do
|
53
|
+
machine_options options
|
54
|
+
converge true
|
55
|
+
end
|
56
|
+
```
|
57
|
+
**Note: even though the bento based boxes appear as 'vmware_desktop', 'vmware_fusion' is required here, as it is the name of the vagrant provider**
|
58
|
+
|
59
|
+
# Known Issues
|
60
|
+
It would be really nice to do some magic to make the vmware_fusion vs vmware_desktop providers match in the machine_options and the vagrant_box resource, but some magic would happen there...
|
@@ -12,10 +12,10 @@ class Chef::Provider::VagrantBox < Chef::Provider::LWRPBase
|
|
12
12
|
end
|
13
13
|
|
14
14
|
action :create do
|
15
|
-
if !
|
15
|
+
if !box_exists?(new_resource)
|
16
16
|
if new_resource.url
|
17
|
-
converge_by "run 'vagrant box add #{new_resource.name} #{new_resource.url}'" do
|
18
|
-
shell_out("vagrant box add #{new_resource.name} #{new_resource.url}").error!
|
17
|
+
converge_by "run 'vagrant box add #{new_resource.name} #{new_resource.url} --provider #{new_resource.vagrant_provider}'" do
|
18
|
+
shell_out("vagrant box add #{new_resource.name} #{new_resource.url} --provider #{new_resource.vagrant_provider}").error!
|
19
19
|
end
|
20
20
|
else
|
21
21
|
raise "Box #{new_resource.name} does not exist"
|
@@ -24,21 +24,35 @@ class Chef::Provider::VagrantBox < Chef::Provider::LWRPBase
|
|
24
24
|
end
|
25
25
|
|
26
26
|
action :delete do
|
27
|
-
if
|
28
|
-
converge_by "run 'vagrant box remove #{new_resource.name} #{list_boxes[new_resource.name]}'" do
|
29
|
-
shell_out("vagrant box remove #{new_resource.name} #{list_boxes[new_resource.name]}").error!
|
27
|
+
if box_exists?(new_resource.name)
|
28
|
+
converge_by "run 'vagrant box remove #{new_resource.name} #{list_boxes[new_resource.name]} --provider #{new_resource.vagrant_provider}'" do
|
29
|
+
shell_out("vagrant box remove #{new_resource.name} #{list_boxes[new_resource.name]} --provider #{new_resource.vagrant_provider}").error!
|
30
30
|
end
|
31
31
|
end
|
32
32
|
end
|
33
33
|
|
34
|
+
# Since all box names must be unique for a particular vagrant provider, this hash now
|
35
|
+
# keys off the provider name, as opposed to the box name. The version is not currently
|
36
|
+
# used, but is collected as metadata for future consumption
|
34
37
|
def list_boxes
|
35
38
|
@list_boxes ||= shell_out("vagrant box list").stdout.lines.inject({}) do |result, line|
|
36
|
-
line =~ /^(\S+)\s+\((.+)\)\s*$/
|
37
|
-
result
|
39
|
+
line =~ /^(\S+)\s+\((.+),(.+)\)\s*$/
|
40
|
+
if result.has_key?($2)
|
41
|
+
result[$2][$1] = $3
|
42
|
+
else
|
43
|
+
result[$2] = { $1 => $3 }
|
44
|
+
end
|
38
45
|
result
|
39
46
|
end
|
40
47
|
end
|
41
48
|
|
49
|
+
# In some rather strained logic, we hook into the vagrant provider, then
|
50
|
+
# the box name to make sure we have the correct box already installed.
|
51
|
+
def box_exists?(new_resource)
|
52
|
+
boxes = list_boxes
|
53
|
+
boxes[new_resource.vagrant_provider].has_key?(new_resource.name)
|
54
|
+
end
|
55
|
+
|
42
56
|
def load_current_resource
|
43
57
|
end
|
44
58
|
end
|
@@ -12,6 +12,7 @@ class Chef
|
|
12
12
|
if box_name.is_a?(Chef::Resource::VagrantBox)
|
13
13
|
new_options = { :vagrant_options => { 'vm.box' => box_name.name } }
|
14
14
|
new_options[:vagrant_options]['vm.box_url'] = box_name.url if box_name.url
|
15
|
+
new_options[:vagrant_provider] = box_name.vagrant_provider
|
15
16
|
else
|
16
17
|
new_options = { :vagrant_options => { 'vm.box' => box_name } }
|
17
18
|
end
|
@@ -11,461 +11,460 @@ require 'chef/resource/vagrant_cluster'
|
|
11
11
|
require 'chef/provider/vagrant_cluster'
|
12
12
|
|
13
13
|
class Chef
|
14
|
-
module Provisioning
|
15
|
-
module VagrantDriver
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
14
|
+
module Provisioning
|
15
|
+
module VagrantDriver
|
16
|
+
# Provisions machines in vagrant.
|
17
|
+
class Driver < Chef::Provisioning::Driver
|
18
|
+
|
19
|
+
include Chef::Mixin::ShellOut
|
20
|
+
|
21
|
+
# Create a new vagrant driver.
|
22
|
+
#
|
23
|
+
# ## Parameters
|
24
|
+
# cluster_path - path to the directory containing the vagrant files, which
|
25
|
+
# should have been created with the vagrant_cluster resource.
|
26
|
+
def initialize(driver_url, config)
|
27
|
+
super
|
28
|
+
scheme, cluster_path = driver_url.split(':', 2)
|
29
|
+
@cluster_path = cluster_path
|
30
|
+
end
|
31
31
|
|
32
|
-
|
32
|
+
attr_reader :cluster_path
|
33
33
|
|
34
|
-
|
35
|
-
|
36
|
-
|
34
|
+
def self.from_url(driver_url, config)
|
35
|
+
Driver.new(driver_url, config)
|
36
|
+
end
|
37
37
|
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
38
|
+
def self.canonicalize_url(driver_url, config)
|
39
|
+
scheme, cluster_path = driver_url.split(':', 2)
|
40
|
+
cluster_path = File.expand_path(cluster_path || File.join(Chef::Config.config_dir, 'vms'))
|
41
|
+
"vagrant:#{cluster_path}"
|
42
|
+
end
|
43
43
|
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
44
|
+
# Acquire a machine, generally by provisioning it. Returns a Machine
|
45
|
+
# object pointing at the machine, allowing useful actions like setup,
|
46
|
+
# converge, execute, file and directory.
|
47
|
+
def allocate_machine(action_handler, machine_spec, machine_options)
|
48
|
+
ensure_vagrant_cluster(action_handler)
|
49
|
+
vm_name = machine_spec.name
|
50
|
+
vm_file_path = File.join(cluster_path, "#{machine_spec.name}.vm")
|
51
|
+
vm_file_updated = create_vm_file(action_handler, vm_name, vm_file_path, machine_options)
|
52
|
+
if vm_file_updated || !machine_spec.location
|
53
|
+
old_location = machine_spec.location
|
54
|
+
machine_spec.location = {
|
55
|
+
'driver_url' => driver_url,
|
56
|
+
'driver_version' => Chef::Provisioning::VagrantDriver::VERSION,
|
57
|
+
'vm_name' => vm_name,
|
58
|
+
'vm_file_path' => vm_file_path,
|
59
|
+
'allocated_at' => Time.now.utc.to_s,
|
60
|
+
'host_node' => action_handler.host_node
|
61
|
+
}
|
62
|
+
machine_spec.location['needs_reload'] = true if vm_file_updated
|
63
|
+
if machine_options[:vagrant_options]
|
64
|
+
%w(vm.guest winrm.host winrm.port winrm.username winrm.password).each do |key|
|
65
|
+
machine_spec.location[key] = machine_options[:vagrant_options][key] if machine_options[:vagrant_options][key]
|
66
|
+
end
|
67
|
+
end
|
68
|
+
machine_spec.location['chef_client_timeout'] = machine_options[:chef_client_timeout] if machine_options[:chef_client_timeout]
|
66
69
|
end
|
67
70
|
end
|
68
|
-
machine_spec.location['chef_client_timeout'] = machine_options[:chef_client_timeout] if machine_options[:chef_client_timeout]
|
69
|
-
end
|
70
|
-
end
|
71
71
|
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
72
|
+
def ready_machine(action_handler, machine_spec, machine_options)
|
73
|
+
start_machine(action_handler, machine_spec, machine_options)
|
74
|
+
machine_for(machine_spec, machine_options)
|
75
|
+
end
|
76
76
|
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
77
|
+
# Connect to machine without acquiring it
|
78
|
+
def connect_to_machine(machine_spec, machine_options)
|
79
|
+
machine_for(machine_spec, machine_options)
|
80
|
+
end
|
81
81
|
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
82
|
+
def destroy_machine(action_handler, machine_spec, machine_options)
|
83
|
+
if machine_spec.location
|
84
|
+
vm_name = machine_spec.location['vm_name']
|
85
|
+
current_status = vagrant_status(vm_name)
|
86
|
+
if current_status != 'not created'
|
87
|
+
action_handler.perform_action "run vagrant destroy -f #{vm_name} (status was '#{current_status}')" do
|
88
|
+
result = shell_out("vagrant destroy -f #{vm_name}", :cwd => cluster_path)
|
89
|
+
if result.exitstatus != 0
|
90
|
+
raise "vagrant destroy failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
91
|
+
end
|
92
|
+
end
|
91
93
|
end
|
92
|
-
end
|
93
|
-
end
|
94
94
|
|
95
|
-
|
96
|
-
|
95
|
+
convergence_strategy_for(machine_spec, machine_options).
|
96
|
+
cleanup_convergence(action_handler, machine_spec)
|
97
97
|
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
98
|
+
vm_file_path = machine_spec.location['vm_file_path']
|
99
|
+
Chef::Provisioning.inline_resource(action_handler) do
|
100
|
+
file vm_file_path do
|
101
|
+
action :delete
|
102
|
+
end
|
103
|
+
end
|
102
104
|
end
|
103
105
|
end
|
104
|
-
end
|
105
|
-
end
|
106
106
|
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
107
|
+
def stop_machine(action_handler, machine_spec, machine_options)
|
108
|
+
if machine_spec.location
|
109
|
+
vm_name = machine_spec.location['vm_name']
|
110
|
+
current_status = vagrant_status(vm_name)
|
111
|
+
if current_status == 'running'
|
112
|
+
action_handler.perform_action "run vagrant halt #{vm_name} (status was '#{current_status}')" do
|
113
|
+
result = shell_out("vagrant halt #{vm_name}", :cwd => cluster_path)
|
114
|
+
if result.exitstatus != 0
|
115
|
+
raise "vagrant halt failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
116
|
+
end
|
117
|
+
end
|
116
118
|
end
|
117
119
|
end
|
118
120
|
end
|
119
|
-
end
|
120
|
-
end
|
121
121
|
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
end
|
130
|
-
machines
|
131
|
-
end
|
132
|
-
|
133
|
-
def destroy_machines(action_handler, specs_and_options, parallelizer)
|
134
|
-
all_names = []
|
135
|
-
all_status = []
|
136
|
-
all_outputs = {}
|
137
|
-
specs_and_options.each_key do |spec|
|
138
|
-
if spec.location
|
139
|
-
vm_name = spec.location['vm_name']
|
140
|
-
current_status = vagrant_status(vm_name)
|
141
|
-
if current_status != 'not created'
|
142
|
-
all_names.push(vm_name)
|
143
|
-
all_status.push(current_status)
|
122
|
+
def ready_machines(action_handler, specs_and_options, parallelizer)
|
123
|
+
start_machines(action_handler, specs_and_options)
|
124
|
+
machines = []
|
125
|
+
specs_and_options.each_pair do |spec, options|
|
126
|
+
machine = machine_for(spec, options)
|
127
|
+
machines << machine
|
128
|
+
yield machine if block_given?
|
144
129
|
end
|
130
|
+
machines
|
145
131
|
end
|
146
|
-
end
|
147
|
-
if all_names.length > 0
|
148
|
-
names = all_names.join(" ")
|
149
|
-
statuses = all_status.join(", ")
|
150
|
-
action_handler.perform_action "run vagrant destroy -f #{names} (status was '#{statuses}')" do
|
151
|
-
result = shell_out("vagrant destroy -f #{names}", :cwd => cluster_path)
|
152
|
-
if result.exitstatus != 0
|
153
|
-
raise "vagrant destroy failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
154
|
-
end
|
155
|
-
end
|
156
|
-
end
|
157
|
-
specs_and_options.each_pair do |spec, options|
|
158
|
-
convergence_strategy_for(spec, options).cleanup_convergence(action_handler, spec)
|
159
132
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
133
|
+
def destroy_machines(action_handler, specs_and_options, parallelizer)
|
134
|
+
all_names = []
|
135
|
+
all_status = []
|
136
|
+
all_outputs = {}
|
137
|
+
specs_and_options.each_key do |spec|
|
138
|
+
if spec.location
|
139
|
+
vm_name = spec.location['vm_name']
|
140
|
+
current_status = vagrant_status(vm_name)
|
141
|
+
if current_status != 'not created'
|
142
|
+
all_names.push(vm_name)
|
143
|
+
all_status.push(current_status)
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
147
|
+
if all_names.length > 0
|
148
|
+
names = all_names.join(" ")
|
149
|
+
statuses = all_status.join(", ")
|
150
|
+
action_handler.perform_action "run vagrant destroy -f #{names} (status was '#{statuses}')" do
|
151
|
+
result = shell_out("vagrant destroy -f #{names}", :cwd => cluster_path)
|
152
|
+
if result.exitstatus != 0
|
153
|
+
raise "vagrant destroy failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
154
|
+
end
|
155
|
+
end
|
156
|
+
end
|
157
|
+
specs_and_options.each_pair do |spec, options|
|
158
|
+
convergence_strategy_for(spec, options).cleanup_convergence(action_handler, spec)
|
159
|
+
|
160
|
+
vm_file_path = spec.location['vm_file_path']
|
161
|
+
Chef::Provisioning.inline_resource(action_handler) do
|
162
|
+
file vm_file_path do
|
163
|
+
action :delete
|
164
|
+
end
|
165
|
+
end
|
166
|
+
yield spec if block_given?
|
164
167
|
end
|
165
168
|
end
|
166
|
-
yield spec if block_given?
|
167
|
-
end
|
168
|
-
end
|
169
169
|
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
170
|
+
def stop_machines(action_handler, specs_and_options, parallelizer)
|
171
|
+
all_names = []
|
172
|
+
specs_and_options.each_key do |spec|
|
173
|
+
if spec.location
|
174
|
+
vm_name = spec.location['vm_name']
|
175
|
+
current_status = vagrant_status(vm_name)
|
176
|
+
if current_status == 'running'
|
177
|
+
all_names.push(vm_name)
|
178
|
+
end
|
179
|
+
end
|
178
180
|
end
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
181
|
+
if all_names.length > 0
|
182
|
+
names = all_names.join(" ")
|
183
|
+
action_handler.perform_action "run vagrant halt #{names} (status was 'running')" do
|
184
|
+
result = shell_out("vagrant halt #{names}", :cwd => cluster_path)
|
185
|
+
if result.exitstatus != 0
|
186
|
+
raise "vagrant halt failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
187
|
+
end
|
188
|
+
end
|
187
189
|
end
|
188
190
|
end
|
189
|
-
end
|
190
|
-
end
|
191
|
-
|
192
|
-
# Used by vagrant_cluster and machine to get the string used to configure vagrant
|
193
|
-
def self.vagrant_config_string(vagrant_config, variable, line_prefix)
|
194
|
-
hostname = name.gsub(/[^A-Za-z0-9\-]/, '-')
|
195
|
-
|
196
|
-
result = ''
|
197
|
-
vagrant_config.each_pair do |key, value|
|
198
|
-
result += "#{line_prefix}#{variable}.#{key} = #{value.inspect}\n"
|
199
|
-
end
|
200
|
-
result
|
201
|
-
end
|
202
191
|
|
203
|
-
|
204
|
-
|
205
|
-
|
192
|
+
# Used by vagrant_cluster and machine to get the string used to configure vagrant
|
193
|
+
def self.vagrant_config_string(vagrant_config, variable, line_prefix)
|
194
|
+
hostname = name.gsub(/[^A-Za-z0-9\-]/, '-')
|
195
|
+
result = ''
|
196
|
+
vagrant_config.each_pair do |key, value|
|
197
|
+
result += "#{line_prefix}#{variable}.#{key} = #{value.inspect}\n"
|
198
|
+
end
|
199
|
+
result
|
200
|
+
end
|
206
201
|
|
207
|
-
|
202
|
+
def driver_url
|
203
|
+
"vagrant:#{cluster_path}"
|
204
|
+
end
|
208
205
|
|
209
|
-
|
210
|
-
_cluster_path = cluster_path
|
211
|
-
Chef::Provisioning.inline_resource(action_handler) do
|
212
|
-
vagrant_cluster _cluster_path
|
213
|
-
end
|
214
|
-
end
|
206
|
+
protected
|
215
207
|
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
if machine_options[:vagrant_options]
|
222
|
-
merged_vagrant_options = Cheffish::MergedConfig.new(machine_options[:vagrant_options], merged_vagrant_options)
|
223
|
-
end
|
224
|
-
merged_vagrant_options.each_pair do |key, value|
|
225
|
-
vm_file_content << " config.#{key} = #{value.inspect}\n"
|
226
|
-
end
|
227
|
-
vm_file_content << machine_options[:vagrant_config] if machine_options[:vagrant_config]
|
228
|
-
vm_file_content << " end\nend\n"
|
229
|
-
|
230
|
-
# Set up vagrant file
|
231
|
-
Chef::Provisioning.inline_resource(action_handler) do
|
232
|
-
file vm_file_path do
|
233
|
-
content vm_file_content
|
234
|
-
action :create
|
208
|
+
def ensure_vagrant_cluster(action_handler)
|
209
|
+
_cluster_path = cluster_path
|
210
|
+
Chef::Provisioning.inline_resource(action_handler) do
|
211
|
+
vagrant_cluster _cluster_path
|
212
|
+
end
|
235
213
|
end
|
236
|
-
end
|
237
|
-
end
|
238
214
|
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
215
|
+
def create_vm_file(action_handler, vm_name, vm_file_path, machine_options)
|
216
|
+
# Determine contents of vm file
|
217
|
+
vm_file_content = "Vagrant.configure('2') do |outer_config|\n"
|
218
|
+
vm_file_content << " outer_config.vm.define #{vm_name.inspect} do |config|\n"
|
219
|
+
merged_vagrant_options = { 'vm.hostname' => vm_name }
|
220
|
+
if machine_options[:vagrant_options]
|
221
|
+
merged_vagrant_options = Cheffish::MergedConfig.new(machine_options[:vagrant_options], merged_vagrant_options)
|
222
|
+
end
|
223
|
+
merged_vagrant_options.each_pair do |key, value|
|
224
|
+
if key == 'vm.network'
|
225
|
+
vm_file_content << " config.#{key}(#{value})\n"
|
226
|
+
else
|
227
|
+
vm_file_content << " config.#{key} = #{value.inspect}\n"
|
228
|
+
end
|
229
|
+
end
|
230
|
+
vm_file_content << machine_options[:vagrant_config] if machine_options[:vagrant_config]
|
231
|
+
vm_file_content << " end\nend\n"
|
232
|
+
|
233
|
+
# Set up vagrant file
|
234
|
+
Chef::Provisioning.inline_resource(action_handler) do
|
235
|
+
file vm_file_path do
|
236
|
+
content vm_file_content
|
237
|
+
action :create
|
238
|
+
end
|
253
239
|
end
|
254
|
-
parse_vagrant_up(result.stdout, machine_spec)
|
255
240
|
end
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
241
|
+
|
242
|
+
def start_machine(action_handler, machine_spec, machine_options)
|
243
|
+
vm_name = machine_spec.location['vm_name']
|
244
|
+
vm_provider = machine_options.has_key?(:vagrant_provider) ? machine_options[:vagrant_provider] : 'virtualbox'
|
245
|
+
up_timeout = machine_options[:up_timeout] || 10*60
|
246
|
+
current_status = vagrant_status(vm_name)
|
247
|
+
vm_file_updated = machine_spec.location['needs_reload']
|
248
|
+
machine_spec.location['needs_reload'] = false
|
249
|
+
if current_status != 'running'
|
250
|
+
# Run vagrant up if vm is not running
|
251
|
+
action_handler.perform_action "run vagrant up #{vm_name} --provider #{vm_provider} (status was '#{current_status}')" do
|
252
|
+
result = shell_out("vagrant up #{vm_name} --provider #{vm_provider}", :cwd => cluster_path,
|
253
|
+
:timeout => up_timeout)
|
254
|
+
if result.exitstatus != 0
|
255
|
+
raise "vagrant up #{vm_name} --provider #{vm_provider} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
256
|
+
end
|
257
|
+
parse_vagrant_up(result.stdout, machine_spec)
|
258
|
+
end
|
259
|
+
elsif vm_file_updated
|
260
|
+
# Run vagrant reload if vm is running and vm file changed
|
261
|
+
action_handler.perform_action "run vagrant reload #{vm_name}" do
|
262
|
+
result = shell_out("vagrant reload #{vm_name}", :cwd => cluster_path,
|
263
|
+
:timeout => up_timeout)
|
264
|
+
if result.exitstatus != 0
|
265
|
+
raise "vagrant reload #{vm_name} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
266
|
+
end
|
267
|
+
parse_vagrant_up(result.stdout, machine_spec)
|
268
|
+
end
|
263
269
|
end
|
264
|
-
parse_vagrant_up(result.stdout, machine_spec)
|
265
270
|
end
|
266
|
-
end
|
267
|
-
end
|
268
271
|
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
272
|
+
def start_machines(action_handler, specs_and_options)
|
273
|
+
up_names = []
|
274
|
+
up_status = []
|
275
|
+
up_specs = {}
|
276
|
+
update_names = []
|
277
|
+
update_specs = {}
|
278
|
+
timeouts = []
|
279
|
+
specs_and_options.each_pair do |spec, options|
|
280
|
+
vm_name = spec.location['vm_name']
|
281
|
+
|
282
|
+
vm_file_updated = spec.location['needs_reload']
|
283
|
+
spec.location['needs_reload'] = false
|
284
|
+
|
285
|
+
current_status = vagrant_status(vm_name)
|
286
|
+
if current_status != 'running'
|
287
|
+
up_names.push(vm_name)
|
288
|
+
up_status.push(current_status)
|
289
|
+
up_specs[vm_name] = spec
|
290
|
+
elsif vm_file_updated
|
291
|
+
update_names.push(vm_name)
|
292
|
+
update_specs[vm_name] = spec
|
293
|
+
end
|
294
|
+
timeouts.push(options[:up_timeout])
|
295
|
+
end
|
296
|
+
# Use the highest timeout, if any exist
|
297
|
+
up_timeout = timeouts.compact.max
|
298
|
+
up_timeout ||= 10*60
|
299
|
+
if up_names.length > 0
|
300
|
+
# Run vagrant up if vm is not running
|
301
|
+
names = up_names.join(" ")
|
302
|
+
statuses = up_status.join(", ")
|
303
|
+
action_handler.perform_action "run vagrant up --parallel #{names} (status was '#{statuses}')" do
|
304
|
+
result = shell_out("vagrant up --parallel #{names}", :cwd => cluster_path,
|
305
|
+
:timeout => up_timeout)
|
306
|
+
if result.exitstatus != 0
|
307
|
+
raise "vagrant up #{names} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
308
|
+
end
|
309
|
+
parse_multi_vagrant_up(result.stdout, up_specs)
|
310
|
+
end
|
311
|
+
end
|
312
|
+
if update_names.length > 0
|
313
|
+
names = update_names.join(" ")
|
314
|
+
# Run vagrant reload if vm is running and vm file changed
|
315
|
+
action_handler.perform_action "run vagrant reload #{names}" do
|
316
|
+
result = shell_out("vagrant reload #{names}", :cwd => cluster_path,
|
317
|
+
:timeout => up_timeout)
|
318
|
+
if result.exitstatus != 0
|
319
|
+
raise "vagrant reload #{names} failed!\nSTDOUT:#{result.stdout}\nSTDERR:#{result.stderr}"
|
320
|
+
end
|
321
|
+
parse_multi_vagrant_up(result.stdout, update_specs)
|
322
|
+
end
|
323
|
+
end
|
290
324
|
end
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
325
|
+
|
326
|
+
def parse_vagrant_up(output, machine_spec)
|
327
|
+
# Grab forwarded port info
|
328
|
+
machine_spec.location['forwarded_ports'] = {}
|
329
|
+
in_forwarding_ports = false
|
330
|
+
output.lines.each do |line|
|
331
|
+
if in_forwarding_ports
|
332
|
+
if line =~ /-- (\d+) => (\d+)/
|
333
|
+
machine_spec.location['forwarded_ports'][$1] = $2
|
334
|
+
else
|
335
|
+
in_forwarding_ports = false
|
336
|
+
end
|
337
|
+
elsif line =~ /Forwarding ports...$/
|
338
|
+
in_forwarding_ports = true
|
339
|
+
end
|
305
340
|
end
|
306
|
-
parse_multi_vagrant_up(result.stdout, up_specs)
|
307
341
|
end
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
342
|
+
|
343
|
+
def parse_multi_vagrant_up(output, all_machine_specs)
|
344
|
+
# Grab forwarded port info
|
345
|
+
in_forwarding_ports = {}
|
346
|
+
all_machine_specs.each_pair do |key, spec|
|
347
|
+
spec.location['forwarded_ports'] = {}
|
348
|
+
in_forwarding_ports[key] = false
|
349
|
+
end
|
350
|
+
output.lines.each do |line|
|
351
|
+
/^\[(.*?)\]/.match(line)
|
352
|
+
node_name = $1
|
353
|
+
if in_forwarding_ports[node_name]
|
354
|
+
if line =~ /-- (\d+) => (\d+)/
|
355
|
+
spec = all_machine_specs[node_name]
|
356
|
+
spec.location['forwarded_ports'][$1] = $2
|
357
|
+
else
|
358
|
+
in_forwarding_ports[node_name] = false
|
359
|
+
end
|
360
|
+
elsif line =~ /Forwarding ports...$/
|
361
|
+
in_forwarding_ports[node_name] = true
|
362
|
+
end
|
317
363
|
end
|
318
|
-
parse_multi_vagrant_up(result.stdout, update_specs)
|
319
364
|
end
|
320
|
-
end
|
321
|
-
end
|
322
365
|
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
output.lines.each do |line|
|
328
|
-
if in_forwarding_ports
|
329
|
-
if line =~ /-- (\d+) => (\d+)/
|
330
|
-
machine_spec.location['forwarded_ports'][$1] = $2
|
366
|
+
def machine_for(machine_spec, machine_options)
|
367
|
+
if machine_spec.location['vm.guest'].to_s == 'windows'
|
368
|
+
Chef::Provisioning::Machine::WindowsMachine.new(machine_spec, transport_for(machine_spec),
|
369
|
+
convergence_strategy_for(machine_spec, machine_options))
|
331
370
|
else
|
332
|
-
|
371
|
+
Chef::Provisioning::Machine::UnixMachine.new(machine_spec, transport_for(machine_spec),
|
372
|
+
convergence_strategy_for(machine_spec, machine_options))
|
333
373
|
end
|
334
|
-
elsif line =~ /Forwarding ports...$/
|
335
|
-
in_forwarding_ports = true
|
336
374
|
end
|
337
|
-
end
|
338
|
-
end
|
339
375
|
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
spec.location['forwarded_ports'] = {}
|
345
|
-
in_forwarding_ports[key] = false
|
346
|
-
end
|
347
|
-
output.lines.each do |line|
|
348
|
-
/^\[(.*?)\]/.match(line)
|
349
|
-
node_name = $1
|
350
|
-
if in_forwarding_ports[node_name]
|
351
|
-
if line =~ /-- (\d+) => (\d+)/
|
352
|
-
spec = all_machine_specs[node_name]
|
353
|
-
spec.location['forwarded_ports'][$1] = $2
|
376
|
+
def convergence_strategy_for(machine_spec, machine_options)
|
377
|
+
if machine_spec.location['vm.guest'].to_s == 'windows'
|
378
|
+
Chef::Provisioning::ConvergenceStrategy::InstallMsi.
|
379
|
+
new(machine_options[:convergence_options], config)
|
354
380
|
else
|
355
|
-
|
381
|
+
Chef::Provisioning::ConvergenceStrategy::InstallCached.
|
382
|
+
new(machine_options[:convergence_options], config)
|
356
383
|
end
|
357
|
-
elsif line =~ /Forwarding ports...$/
|
358
|
-
in_forwarding_ports[node_name] = true
|
359
384
|
end
|
360
|
-
end
|
361
|
-
end
|
362
385
|
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
convergence_strategy_for(machine_spec, machine_options))
|
370
|
-
end
|
371
|
-
end
|
372
|
-
|
373
|
-
def convergence_strategy_for(machine_spec, machine_options)
|
374
|
-
if machine_spec.location['vm.guest'].to_s == 'windows'
|
375
|
-
@windows_convergence_strategy ||= begin
|
376
|
-
Chef::Provisioning::ConvergenceStrategy::InstallMsi.
|
377
|
-
new(machine_options[:convergence_options], config)
|
378
|
-
end
|
379
|
-
else
|
380
|
-
@unix_convergence_strategy ||= begin
|
381
|
-
Chef::Provisioning::ConvergenceStrategy::InstallCached.
|
382
|
-
new(machine_options[:convergence_options], config)
|
386
|
+
def transport_for(machine_spec)
|
387
|
+
if machine_spec.location['vm.guest'].to_s == 'windows'
|
388
|
+
create_winrm_transport(machine_spec)
|
389
|
+
else
|
390
|
+
create_ssh_transport(machine_spec)
|
391
|
+
end
|
383
392
|
end
|
384
|
-
end
|
385
|
-
end
|
386
393
|
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
def vagrant_status(name)
|
396
|
-
status_output = shell_out("vagrant status #{name}", :cwd => cluster_path).stdout
|
397
|
-
if status_output =~ /^#{name}\s+(.+)\s+\((.+)\)/
|
398
|
-
$1
|
399
|
-
else
|
400
|
-
'not created'
|
401
|
-
end
|
402
|
-
end
|
394
|
+
def vagrant_status(name)
|
395
|
+
status_output = shell_out("vagrant status #{name}", :cwd => cluster_path).stdout
|
396
|
+
if status_output =~ /^#{name}\s+(.+)\s+\((.+)\)/
|
397
|
+
$1
|
398
|
+
else
|
399
|
+
'not created'
|
400
|
+
end
|
401
|
+
end
|
403
402
|
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
403
|
+
def create_winrm_transport(machine_spec)
|
404
|
+
forwarded_ports = machine_spec.location['forwarded_ports']
|
405
|
+
|
406
|
+
# TODO IPv6 loopback? What do we do for that?
|
407
|
+
hostname = machine_spec.location['winrm.host'] || '127.0.0.1'
|
408
|
+
port = machine_spec.location['winrm.port'] || 5985
|
409
|
+
port = forwarded_ports[port] if forwarded_ports[port]
|
410
|
+
endpoint = "http://#{hostname}:#{port}/wsman"
|
411
|
+
type = :plaintext
|
412
|
+
options = {
|
413
|
+
:user => machine_spec.location['winrm.username'] || 'vagrant',
|
414
|
+
:pass => machine_spec.location['winrm.password'] || 'vagrant',
|
415
|
+
:disable_sspi => true
|
416
|
+
}
|
417
|
+
|
418
|
+
Chef::Provisioning::Transport::WinRM.new(endpoint, type, options)
|
419
|
+
end
|
421
420
|
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
421
|
+
def create_ssh_transport(machine_spec)
|
422
|
+
vagrant_ssh_config = vagrant_ssh_config_for(machine_spec)
|
423
|
+
hostname = vagrant_ssh_config['HostName']
|
424
|
+
username = vagrant_ssh_config['User']
|
425
|
+
ssh_options = {
|
426
|
+
:port => vagrant_ssh_config['Port'],
|
427
|
+
:auth_methods => ['publickey'],
|
428
|
+
:user_known_hosts_file => vagrant_ssh_config['UserKnownHostsFile'],
|
429
|
+
:paranoid => yes_or_no(vagrant_ssh_config['StrictHostKeyChecking']),
|
430
|
+
:keys => [ strip_quotes(vagrant_ssh_config['IdentityFile']) ],
|
431
|
+
:keys_only => yes_or_no(vagrant_ssh_config['IdentitiesOnly'])
|
432
|
+
}
|
433
|
+
ssh_options[:auth_methods] = %w(password) if yes_or_no(vagrant_ssh_config['PasswordAuthentication'])
|
434
|
+
options = {
|
435
|
+
:prefix => 'sudo '
|
436
|
+
}
|
437
|
+
Chef::Provisioning::Transport::SSH.new(hostname, username, ssh_options, options, config)
|
438
|
+
end
|
440
439
|
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
440
|
+
def vagrant_ssh_config_for(machine_spec)
|
441
|
+
vagrant_ssh_config = {}
|
442
|
+
result = shell_out("vagrant ssh-config #{machine_spec.location['vm_name']}",
|
443
|
+
:cwd => cluster_path)
|
444
|
+
result.stdout.lines.inject({}) do |result, line|
|
445
|
+
line =~ /^\s*(\S+)\s+(.+?)(\r\n|\r|\n|\z)/
|
446
|
+
vagrant_ssh_config[$1] = $2
|
447
|
+
end
|
448
|
+
vagrant_ssh_config
|
449
|
+
end
|
451
450
|
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
451
|
+
def yes_or_no(str)
|
452
|
+
case str
|
453
|
+
when 'yes'
|
454
|
+
true
|
455
|
+
else
|
456
|
+
false
|
457
|
+
end
|
458
|
+
end
|
460
459
|
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
460
|
+
def strip_quotes(str)
|
461
|
+
if str[0] == '"' && str[-1] == '"' && str.size >= 2
|
462
|
+
str[1..-2]
|
463
|
+
else
|
464
|
+
str
|
465
|
+
end
|
466
|
+
end
|
466
467
|
end
|
467
468
|
end
|
468
469
|
end
|
469
470
|
end
|
470
|
-
end
|
471
|
-
end
|
@@ -9,6 +9,7 @@ class Chef::Resource::VagrantBox < Chef::Resource::LWRPBase
|
|
9
9
|
|
10
10
|
attribute :name, :kind_of => String, :name_attribute => true
|
11
11
|
attribute :url, :kind_of => String
|
12
|
+
attribute :vagrant_provider, :kind_of => String, :default => 'virtualbox'
|
12
13
|
attribute :driver_options, :kind_of => Hash
|
13
14
|
|
14
15
|
def after_created
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
require 'vagrant_support'
|
2
|
+
|
3
|
+
RSpec.configure do |config|
|
4
|
+
config.expect_with :rspec do |expectations|
|
5
|
+
# This option will default to `true` in RSpec 4. It makes the `description`
|
6
|
+
# and `failure_message` of custom matchers include text for helper methods
|
7
|
+
# defined using `chain`, e.g.:
|
8
|
+
# be_bigger_than(2).and_smaller_than(4).description
|
9
|
+
# # => "be bigger than 2 and smaller than 4"
|
10
|
+
# ...rather than:
|
11
|
+
# # => "be bigger than 2"
|
12
|
+
expectations.include_chain_clauses_in_custom_matcher_descriptions = true
|
13
|
+
end
|
14
|
+
|
15
|
+
config.mock_with :rspec do |mocks|
|
16
|
+
mocks.verify_partial_doubles = true
|
17
|
+
end
|
18
|
+
|
19
|
+
config.filter_run :focus
|
20
|
+
config.run_all_when_everything_filtered = true
|
21
|
+
end
|
@@ -0,0 +1,13 @@
|
|
1
|
+
describe "Chef::Provisioning::Vagrant" do
|
2
|
+
extend VagrantSupport
|
3
|
+
# include VagrantConfig # uncomment to get `chef_config` or to mix in code.
|
4
|
+
|
5
|
+
when_the_chef_12_server "exists", server_scope: :context, port: 8900..9000 do
|
6
|
+
with_vagrant "integration tests" do
|
7
|
+
context "machine resource" do
|
8
|
+
it "doesn't run any tests" do
|
9
|
+
end
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
module VagrantSupport
|
2
|
+
|
3
|
+
# your top-level context blocks will use this file like so:
|
4
|
+
# require "vagrant_support"
|
5
|
+
#
|
6
|
+
# describe "Chef::Provisioning::Vagrant" do
|
7
|
+
# extend VagrantSupport
|
8
|
+
# include VagrantConfig # optional, gives you a `chef_config` object.
|
9
|
+
|
10
|
+
require 'cheffish/rspec/chef_run_support'
|
11
|
+
|
12
|
+
# when you `extend VagrantSupport`, your RSpec-context-extending-`VagrantSupport` with then
|
13
|
+
# further `extend ChefRunSupport` to acquire all of the latter's Lucky Charms.
|
14
|
+
def self.extended(other)
|
15
|
+
other.extend Cheffish::RSpec::ChefRunSupport
|
16
|
+
end
|
17
|
+
|
18
|
+
# this creates a `with_vagrant` block method that saves you the repetition of having to load the
|
19
|
+
# driver code, and gives you a common place to put any other driver setup for your specs.
|
20
|
+
#
|
21
|
+
# subtle stuff here. it looks weird because you're taking a block and putting that inside a new block and
|
22
|
+
# then giving *that* to a Cheffish method which will run it for you in the context of a local chef-zero.
|
23
|
+
|
24
|
+
def with_vagrant(description, *tags, &block)
|
25
|
+
|
26
|
+
# take the block you just passed in, and make a new Proc that will call it after loading the driver...
|
27
|
+
context_block = proc do
|
28
|
+
vagrant_driver = Chef::Provisioning.driver_for_url("vagrant")
|
29
|
+
|
30
|
+
@@driver = vagrant_driver
|
31
|
+
def self.driver
|
32
|
+
@@driver
|
33
|
+
end
|
34
|
+
|
35
|
+
# when this Proc runs, this will run the block you just passed to `with_vagrant`...
|
36
|
+
module_eval(&block)
|
37
|
+
end
|
38
|
+
|
39
|
+
# ...now pass that Proc to `Cheffish::RSpec::ChefRunSupport#when_the_repository`, which will:
|
40
|
+
# 1. start up a chef-zero with `*tags` as the parameters, and
|
41
|
+
# 2. run your `context_block` Proc (which contains your original `&block`) using that chef-zero.
|
42
|
+
when_the_repository "exists and #{description}", *tags, &context_block
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
# optional, I'm not sure where I cargo-culted this from.
|
47
|
+
module VagrantConfig
|
48
|
+
def chef_config
|
49
|
+
@chef_config ||= {
|
50
|
+
driver: Chef::Provisioning.driver_for_url("vagrant"),
|
51
|
+
}
|
52
|
+
end
|
53
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: chef-provisioning-vagrant
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.9.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- John Keiser
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2015-
|
11
|
+
date: 2015-08-05 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: chef
|
@@ -85,7 +85,10 @@ files:
|
|
85
85
|
- lib/chef/provisioning/vagrant_driver/version.rb
|
86
86
|
- lib/chef/resource/vagrant_box.rb
|
87
87
|
- lib/chef/resource/vagrant_cluster.rb
|
88
|
-
|
88
|
+
- spec/spec_helper.rb
|
89
|
+
- spec/vagrant_spec.rb
|
90
|
+
- spec/vagrant_support.rb
|
91
|
+
homepage: https://github.com/chef/chef-provisioning-vagrant
|
89
92
|
licenses: []
|
90
93
|
metadata: {}
|
91
94
|
post_install_message:
|
@@ -104,9 +107,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
104
107
|
version: '0'
|
105
108
|
requirements: []
|
106
109
|
rubyforge_project:
|
107
|
-
rubygems_version: 2.4.
|
110
|
+
rubygems_version: 2.4.7
|
108
111
|
signing_key:
|
109
112
|
specification_version: 4
|
110
113
|
summary: Driver for creating Vagrant instances in Chef Provisioning.
|
111
114
|
test_files: []
|
112
|
-
has_rdoc:
|