vagrant-mcs 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +21 -0
- data/.rspec +1 -0
- data/CHANGELOG.md +92 -0
- data/Gemfile +12 -0
- data/LICENSE +8 -0
- data/README.md +292 -0
- data/Rakefile +21 -0
- data/dummy.box +0 -0
- data/example_box/README.md +13 -0
- data/example_box/mcs.box +0 -0
- data/example_box/metadata.json +3 -0
- data/lib/vagrant-mcs/action/connect_mcs.rb +51 -0
- data/lib/vagrant-mcs/action/elb_deregister_instance.rb +24 -0
- data/lib/vagrant-mcs/action/elb_register_instance.rb +24 -0
- data/lib/vagrant-mcs/action/is_created.rb +18 -0
- data/lib/vagrant-mcs/action/is_stopped.rb +18 -0
- data/lib/vagrant-mcs/action/message_already_created.rb +16 -0
- data/lib/vagrant-mcs/action/message_not_created.rb +16 -0
- data/lib/vagrant-mcs/action/message_will_not_destroy.rb +16 -0
- data/lib/vagrant-mcs/action/package_instance.rb +192 -0
- data/lib/vagrant-mcs/action/read_ssh_info.rb +53 -0
- data/lib/vagrant-mcs/action/read_state.rb +38 -0
- data/lib/vagrant-mcs/action/run_instance.rb +274 -0
- data/lib/vagrant-mcs/action/start_instance.rb +81 -0
- data/lib/vagrant-mcs/action/stop_instance.rb +28 -0
- data/lib/vagrant-mcs/action/terminate_instance.rb +51 -0
- data/lib/vagrant-mcs/action/timed_provision.rb +21 -0
- data/lib/vagrant-mcs/action/wait_for_state.rb +41 -0
- data/lib/vagrant-mcs/action/warn_networks.rb +19 -0
- data/lib/vagrant-mcs/action.rb +210 -0
- data/lib/vagrant-mcs/config.rb +405 -0
- data/lib/vagrant-mcs/errors.rb +43 -0
- data/lib/vagrant-mcs/plugin.rb +73 -0
- data/lib/vagrant-mcs/provider.rb +50 -0
- data/lib/vagrant-mcs/util/elb.rb +56 -0
- data/lib/vagrant-mcs/util/timer.rb +17 -0
- data/lib/vagrant-mcs/version.rb +5 -0
- data/lib/vagrant-mcs.rb +18 -0
- data/locales/en.yml +153 -0
- data/mcs.box +0 -0
- data/spec/spec_helper.rb +1 -0
- data/spec/vagrant-aws/config_spec.rb +225 -0
- data/templates/metadata.json.erb +3 -0
- data/templates/vagrant-aws_package_Vagrantfile.erb +5 -0
- data/vagrant-mcs.gemspec +60 -0
- metadata +172 -0
@@ -0,0 +1,38 @@
|
|
1
|
+
require "log4r"
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module MCS
|
5
|
+
module Action
|
6
|
+
# This action reads the state of the machine and puts it in the
|
7
|
+
# `:machine_state_id` key in the environment.
|
8
|
+
class ReadState
|
9
|
+
def initialize(app, env)
|
10
|
+
@app = app
|
11
|
+
@logger = Log4r::Logger.new("vagrant_mcs::action::read_state")
|
12
|
+
end
|
13
|
+
|
14
|
+
def call(env)
|
15
|
+
env[:machine_state_id] = read_state(env[:mcs_compute], env[:machine])
|
16
|
+
|
17
|
+
@app.call(env)
|
18
|
+
end
|
19
|
+
|
20
|
+
def read_state(mcs, machine)
|
21
|
+
return :not_created if machine.id.nil?
|
22
|
+
|
23
|
+
# Find the machine
|
24
|
+
server = mcs.servers.get(machine.id)
|
25
|
+
if server.nil? || [:"shutting-down", :terminated].include?(server.state.to_sym)
|
26
|
+
# The machine can't be found
|
27
|
+
@logger.info("Machine not found or terminated, assuming it got destroyed.")
|
28
|
+
machine.id = nil
|
29
|
+
return :not_created
|
30
|
+
end
|
31
|
+
|
32
|
+
# Return the state
|
33
|
+
return server.state.to_sym
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,274 @@
|
|
1
|
+
require "log4r"
|
2
|
+
require 'json'
|
3
|
+
|
4
|
+
require 'vagrant/util/retryable'
|
5
|
+
|
6
|
+
require 'vagrant-mcs/util/timer'
|
7
|
+
|
8
|
+
module VagrantPlugins
|
9
|
+
module MCS
|
10
|
+
module Action
|
11
|
+
# This runs the configured instance.
|
12
|
+
class RunInstance
|
13
|
+
include Vagrant::Util::Retryable
|
14
|
+
|
15
|
+
def initialize(app, env)
|
16
|
+
@app = app
|
17
|
+
@logger = Log4r::Logger.new("vagrant_mcs::action::run_instance")
|
18
|
+
end
|
19
|
+
|
20
|
+
def call(env)
|
21
|
+
# Initialize metrics if they haven't been
|
22
|
+
env[:metrics] ||= {}
|
23
|
+
|
24
|
+
# Get the region we're going to booting up in
|
25
|
+
region = env[:machine].provider_config.region
|
26
|
+
|
27
|
+
# Get the configs
|
28
|
+
region_config = env[:machine].provider_config.get_region_config(region)
|
29
|
+
ami = region_config.ami
|
30
|
+
availability_zone = region_config.availability_zone
|
31
|
+
instance_type = region_config.instance_type
|
32
|
+
keypair = region_config.keypair_name
|
33
|
+
private_ip_address = region_config.private_ip_address
|
34
|
+
security_groups = region_config.security_groups
|
35
|
+
subnet_id = region_config.subnet_id
|
36
|
+
tags = region_config.tags
|
37
|
+
user_data = region_config.user_data
|
38
|
+
block_device_mapping = region_config.block_device_mapping
|
39
|
+
elastic_ip = region_config.elastic_ip
|
40
|
+
terminate_on_shutdown = region_config.terminate_on_shutdown
|
41
|
+
iam_instance_profile_arn = region_config.iam_instance_profile_arn
|
42
|
+
iam_instance_profile_name = region_config.iam_instance_profile_name
|
43
|
+
monitoring = region_config.monitoring
|
44
|
+
ebs_optimized = region_config.ebs_optimized
|
45
|
+
associate_public_ip = region_config.associate_public_ip
|
46
|
+
|
47
|
+
# If there is no keypair then warn the user
|
48
|
+
if !keypair
|
49
|
+
env[:ui].warn(I18n.t("vagrant_mcs.launch_no_keypair"))
|
50
|
+
end
|
51
|
+
|
52
|
+
# If there is a subnet ID then warn the user
|
53
|
+
if subnet_id && !elastic_ip
|
54
|
+
env[:ui].warn(I18n.t("vagrant_mcs.launch_vpc_warning"))
|
55
|
+
end
|
56
|
+
|
57
|
+
# Launch!
|
58
|
+
env[:ui].info(I18n.t("vagrant_mcs.launching_instance"))
|
59
|
+
env[:ui].info(" -- Type: #{instance_type}")
|
60
|
+
env[:ui].info(" -- AMI: #{ami}")
|
61
|
+
env[:ui].info(" -- Region: #{region}")
|
62
|
+
env[:ui].info(" -- Availability Zone: #{availability_zone}") if availability_zone
|
63
|
+
env[:ui].info(" -- Keypair: #{keypair}") if keypair
|
64
|
+
env[:ui].info(" -- Subnet ID: #{subnet_id}") if subnet_id
|
65
|
+
env[:ui].info(" -- IAM Instance Profile ARN: #{iam_instance_profile_arn}") if iam_instance_profile_arn
|
66
|
+
env[:ui].info(" -- IAM Instance Profile Name: #{iam_instance_profile_name}") if iam_instance_profile_name
|
67
|
+
env[:ui].info(" -- Private IP: #{private_ip_address}") if private_ip_address
|
68
|
+
env[:ui].info(" -- Elastic IP: #{elastic_ip}") if elastic_ip
|
69
|
+
env[:ui].info(" -- User Data: yes") if user_data
|
70
|
+
env[:ui].info(" -- Security Groups: #{security_groups.inspect}") if !security_groups.empty?
|
71
|
+
env[:ui].info(" -- User Data: #{user_data}") if user_data
|
72
|
+
env[:ui].info(" -- Block Device Mapping: #{block_device_mapping}") if block_device_mapping
|
73
|
+
env[:ui].info(" -- Terminate On Shutdown: #{terminate_on_shutdown}")
|
74
|
+
env[:ui].info(" -- Monitoring: #{monitoring}")
|
75
|
+
env[:ui].info(" -- EBS optimized: #{ebs_optimized}")
|
76
|
+
env[:ui].info(" -- Assigning a public IP address in a VPC: #{associate_public_ip}")
|
77
|
+
|
78
|
+
options = {
|
79
|
+
:availability_zone => availability_zone,
|
80
|
+
:flavor_id => instance_type,
|
81
|
+
:image_id => ami,
|
82
|
+
:key_name => keypair,
|
83
|
+
:private_ip_address => private_ip_address,
|
84
|
+
:subnet_id => subnet_id,
|
85
|
+
:iam_instance_profile_arn => iam_instance_profile_arn,
|
86
|
+
:iam_instance_profile_name => iam_instance_profile_name,
|
87
|
+
:tags => tags,
|
88
|
+
:user_data => user_data,
|
89
|
+
:block_device_mapping => block_device_mapping,
|
90
|
+
:instance_initiated_shutdown_behavior => terminate_on_shutdown == true ? "terminate" : nil,
|
91
|
+
:monitoring => monitoring,
|
92
|
+
:ebs_optimized => ebs_optimized,
|
93
|
+
:associate_public_ip => associate_public_ip
|
94
|
+
}
|
95
|
+
if !security_groups.empty?
|
96
|
+
security_group_key = options[:subnet_id].nil? ? :groups : :security_group_ids
|
97
|
+
options[security_group_key] = security_groups
|
98
|
+
env[:ui].warn(I18n.t("vagrant_mcs.warn_ssh_access")) unless allows_ssh_port?(env, security_groups, subnet_id)
|
99
|
+
end
|
100
|
+
|
101
|
+
begin
|
102
|
+
server = env[:mcs_compute].servers.create(options)
|
103
|
+
rescue Fog::Compute::MCS::NotFound => e
|
104
|
+
# Invalid subnet doesn't have its own error so we catch and
|
105
|
+
# check the error message here.
|
106
|
+
if e.message =~ /subnet ID/
|
107
|
+
raise Errors::FogError,
|
108
|
+
:message => "Subnet ID not found: #{subnet_id}"
|
109
|
+
end
|
110
|
+
|
111
|
+
raise
|
112
|
+
rescue Fog::Compute::MCS::Error => e
|
113
|
+
raise Errors::FogError, :message => e.message
|
114
|
+
rescue Excon::Errors::HTTPStatusError => e
|
115
|
+
raise Errors::InternalFogError,
|
116
|
+
:error => e.message,
|
117
|
+
:response => e.response.body
|
118
|
+
end
|
119
|
+
|
120
|
+
# Immediately save the ID since it is created at this point.
|
121
|
+
env[:machine].id = server.id
|
122
|
+
|
123
|
+
# Wait for the instance to be ready first
|
124
|
+
env[:metrics]["instance_ready_time"] = Util::Timer.time do
|
125
|
+
tries = region_config.instance_ready_timeout / 2
|
126
|
+
|
127
|
+
env[:ui].info(I18n.t("vagrant_mcs.waiting_for_ready"))
|
128
|
+
begin
|
129
|
+
retryable(:on => Fog::Errors::TimeoutError, :tries => tries) do
|
130
|
+
# If we're interrupted don't worry about waiting
|
131
|
+
next if env[:interrupted]
|
132
|
+
|
133
|
+
# Wait for the server to be ready
|
134
|
+
server.wait_for(2, 5) { ready? }
|
135
|
+
end
|
136
|
+
rescue Fog::Errors::TimeoutError
|
137
|
+
# Delete the instance
|
138
|
+
terminate(env)
|
139
|
+
|
140
|
+
# Notify the user
|
141
|
+
raise Errors::InstanceReadyTimeout,
|
142
|
+
timeout: region_config.instance_ready_timeout
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
@logger.info("Time to instance ready: #{env[:metrics]["instance_ready_time"]}")
|
147
|
+
|
148
|
+
# Allocate and associate an elastic IP if requested
|
149
|
+
if elastic_ip
|
150
|
+
domain = subnet_id ? 'vpc' : 'standard'
|
151
|
+
do_elastic_ip(env, domain, server, elastic_ip)
|
152
|
+
end
|
153
|
+
|
154
|
+
if !env[:interrupted]
|
155
|
+
env[:metrics]["instance_ssh_time"] = Util::Timer.time do
|
156
|
+
# Wait for SSH to be ready.
|
157
|
+
env[:ui].info(I18n.t("vagrant_mcs.waiting_for_ssh"))
|
158
|
+
while true
|
159
|
+
# If we're interrupted then just back out
|
160
|
+
break if env[:interrupted]
|
161
|
+
break if env[:machine].communicate.ready?
|
162
|
+
sleep 2
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
@logger.info("Time for SSH ready: #{env[:metrics]["instance_ssh_time"]}")
|
167
|
+
|
168
|
+
# Ready and booted!
|
169
|
+
env[:ui].info(I18n.t("vagrant_mcs.ready"))
|
170
|
+
end
|
171
|
+
|
172
|
+
# Terminate the instance if we were interrupted
|
173
|
+
terminate(env) if env[:interrupted]
|
174
|
+
|
175
|
+
@app.call(env)
|
176
|
+
end
|
177
|
+
|
178
|
+
def recover(env)
|
179
|
+
return if env["vagrant.error"].is_a?(Vagrant::Errors::VagrantError)
|
180
|
+
|
181
|
+
if env[:machine].provider.state.id != :not_created
|
182
|
+
# Undo the import
|
183
|
+
terminate(env)
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
def allows_ssh_port?(env, test_sec_groups, is_vpc)
|
188
|
+
port = 22 # TODO get ssh_info port
|
189
|
+
test_sec_groups = [ "default" ] if test_sec_groups.empty? # MCS default security group
|
190
|
+
# filter groups by name or group_id (vpc)
|
191
|
+
groups = test_sec_groups.map do |tsg|
|
192
|
+
env[:mcs_compute].security_groups.all.select { |sg| tsg == (is_vpc ? sg.group_id : sg.name) }
|
193
|
+
end.flatten
|
194
|
+
# filter TCP rules
|
195
|
+
rules = groups.map { |sg| sg.ip_permissions.select { |r| r["ipProtocol"] == "tcp" } }.flatten
|
196
|
+
# test if any range includes port
|
197
|
+
!rules.select { |r| (r["fromPort"]..r["toPort"]).include?(port) }.empty?
|
198
|
+
end
|
199
|
+
|
200
|
+
def do_elastic_ip(env, domain, server, elastic_ip)
|
201
|
+
if elastic_ip =~ /\d+\.\d+\.\d+\.\d+/
|
202
|
+
begin
|
203
|
+
address = env[:mcs_compute].addresses.get(elastic_ip)
|
204
|
+
rescue
|
205
|
+
handle_elastic_ip_error(env, "Could not retrieve Elastic IP: #{elastic_ip}")
|
206
|
+
end
|
207
|
+
if address.nil?
|
208
|
+
handle_elastic_ip_error(env, "Elastic IP not available: #{elastic_ip}")
|
209
|
+
end
|
210
|
+
@logger.debug("Public IP #{address.public_ip}")
|
211
|
+
else
|
212
|
+
begin
|
213
|
+
allocation = env[:mcs_compute].allocate_address(domain)
|
214
|
+
rescue
|
215
|
+
handle_elastic_ip_error(env, "Could not allocate Elastic IP.")
|
216
|
+
end
|
217
|
+
@logger.debug("Public IP #{allocation.body['publicIp']}")
|
218
|
+
end
|
219
|
+
|
220
|
+
# Associate the address and save the metadata to a hash
|
221
|
+
h = nil
|
222
|
+
if domain == 'vpc'
|
223
|
+
# VPC requires an allocation ID to assign an IP
|
224
|
+
if address
|
225
|
+
association = env[:mcs_compute].associate_address(server.id, nil, nil, address.allocation_id)
|
226
|
+
else
|
227
|
+
association = env[:mcs_compute].associate_address(server.id, nil, nil, allocation.body['allocationId'])
|
228
|
+
# Only store release data for an allocated address
|
229
|
+
h = { :allocation_id => allocation.body['allocationId'], :association_id => association.body['associationId'], :public_ip => allocation.body['publicIp'] }
|
230
|
+
end
|
231
|
+
else
|
232
|
+
# Standard EC2 instances only need the allocated IP address
|
233
|
+
if address
|
234
|
+
association = env[:mcs_compute].associate_address(server.id, address.public_ip)
|
235
|
+
else
|
236
|
+
association = env[:mcs_compute].associate_address(server.id, allocation.body['publicIp'])
|
237
|
+
h = { :public_ip => allocation.body['publicIp'] }
|
238
|
+
end
|
239
|
+
end
|
240
|
+
|
241
|
+
unless association.body['return']
|
242
|
+
@logger.debug("Could not associate Elastic IP.")
|
243
|
+
terminate(env)
|
244
|
+
raise Errors::FogError,
|
245
|
+
:message => "Could not allocate Elastic IP."
|
246
|
+
end
|
247
|
+
|
248
|
+
# Save this IP to the data dir so it can be released when the instance is destroyed
|
249
|
+
if h
|
250
|
+
ip_file = env[:machine].data_dir.join('elastic_ip')
|
251
|
+
ip_file.open('w+') do |f|
|
252
|
+
f.write(h.to_json)
|
253
|
+
end
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
def handle_elastic_ip_error(env, message)
|
258
|
+
@logger.debug(message)
|
259
|
+
terminate(env)
|
260
|
+
raise Errors::FogError,
|
261
|
+
:message => message
|
262
|
+
end
|
263
|
+
|
264
|
+
def terminate(env)
|
265
|
+
destroy_env = env.dup
|
266
|
+
destroy_env.delete(:interrupted)
|
267
|
+
destroy_env[:config_validate] = false
|
268
|
+
destroy_env[:force_confirm_destroy] = true
|
269
|
+
env[:action_runner].run(Action.action_destroy, destroy_env)
|
270
|
+
end
|
271
|
+
end
|
272
|
+
end
|
273
|
+
end
|
274
|
+
end
|
@@ -0,0 +1,81 @@
|
|
1
|
+
require "log4r"
|
2
|
+
|
3
|
+
require 'vagrant/util/retryable'
|
4
|
+
|
5
|
+
require 'vagrant-mcs/util/timer'
|
6
|
+
|
7
|
+
module VagrantPlugins
|
8
|
+
module MCS
|
9
|
+
module Action
|
10
|
+
# This starts a stopped instance.
|
11
|
+
class StartInstance
|
12
|
+
include Vagrant::Util::Retryable
|
13
|
+
|
14
|
+
def initialize(app, env)
|
15
|
+
@app = app
|
16
|
+
@logger = Log4r::Logger.new("vagrant_mcs::action::start_instance")
|
17
|
+
end
|
18
|
+
|
19
|
+
def call(env)
|
20
|
+
# Initialize metrics if they haven't been
|
21
|
+
env[:metrics] ||= {}
|
22
|
+
|
23
|
+
server = env[:mcs_compute].servers.get(env[:machine].id)
|
24
|
+
|
25
|
+
env[:ui].info(I18n.t("vagrant_mcs.starting"))
|
26
|
+
|
27
|
+
begin
|
28
|
+
server.start
|
29
|
+
|
30
|
+
region = env[:machine].provider_config.region
|
31
|
+
region_config = env[:machine].provider_config.get_region_config(region)
|
32
|
+
|
33
|
+
# Wait for the instance to be ready first
|
34
|
+
env[:metrics]["instance_ready_time"] = Util::Timer.time do
|
35
|
+
tries = region_config.instance_ready_timeout / 2
|
36
|
+
|
37
|
+
env[:ui].info(I18n.t("vagrant_mcs.waiting_for_ready"))
|
38
|
+
begin
|
39
|
+
retryable(:on => Fog::Errors::TimeoutError, :tries => tries) do
|
40
|
+
# If we're interrupted don't worry about waiting
|
41
|
+
next if env[:interrupted]
|
42
|
+
|
43
|
+
# Wait for the server to be ready
|
44
|
+
server.wait_for(2) { ready? }
|
45
|
+
end
|
46
|
+
rescue Fog::Errors::TimeoutError
|
47
|
+
# Notify the user
|
48
|
+
raise Errors::InstanceReadyTimeout,
|
49
|
+
timeout: region_config.instance_ready_timeout
|
50
|
+
end
|
51
|
+
end
|
52
|
+
rescue Fog::Compute::MCS::Error => e
|
53
|
+
raise Errors::FogError, :message => e.message
|
54
|
+
end
|
55
|
+
|
56
|
+
@logger.info("Time to instance ready: #{env[:metrics]["instance_ready_time"]}")
|
57
|
+
|
58
|
+
if !env[:interrupted]
|
59
|
+
env[:metrics]["instance_ssh_time"] = Util::Timer.time do
|
60
|
+
# Wait for SSH to be ready.
|
61
|
+
env[:ui].info(I18n.t("vagrant_mcs.waiting_for_ssh"))
|
62
|
+
while true
|
63
|
+
# If we're interrupted then just back out
|
64
|
+
break if env[:interrupted]
|
65
|
+
break if env[:machine].communicate.ready?
|
66
|
+
sleep 2
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
@logger.info("Time for SSH ready: #{env[:metrics]["instance_ssh_time"]}")
|
71
|
+
|
72
|
+
# Ready and booted!
|
73
|
+
env[:ui].info(I18n.t("vagrant_mcs.ready"))
|
74
|
+
end
|
75
|
+
|
76
|
+
@app.call(env)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
require "log4r"
|
2
|
+
|
3
|
+
module VagrantPlugins
|
4
|
+
module MCS
|
5
|
+
module Action
|
6
|
+
# This stops the running instance.
|
7
|
+
class StopInstance
|
8
|
+
def initialize(app, env)
|
9
|
+
@app = app
|
10
|
+
@logger = Log4r::Logger.new("vagrant_mcs::action::stop_instance")
|
11
|
+
end
|
12
|
+
|
13
|
+
def call(env)
|
14
|
+
server = env[:mcs_compute].servers.get(env[:machine].id)
|
15
|
+
|
16
|
+
if env[:machine].state.id == :stopped
|
17
|
+
env[:ui].info(I18n.t("vagrant_mcs.already_status", :status => env[:machine].state.id))
|
18
|
+
else
|
19
|
+
env[:ui].info(I18n.t("vagrant_mcs.stopping"))
|
20
|
+
server.stop(!!env[:force_halt])
|
21
|
+
end
|
22
|
+
|
23
|
+
@app.call(env)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
require "log4r"
|
2
|
+
require "json"
|
3
|
+
|
4
|
+
module VagrantPlugins
|
5
|
+
module MCS
|
6
|
+
module Action
|
7
|
+
# This terminates the running instance.
|
8
|
+
class TerminateInstance
|
9
|
+
def initialize(app, env)
|
10
|
+
@app = app
|
11
|
+
@logger = Log4r::Logger.new("vagrant_mcs::action::terminate_instance")
|
12
|
+
end
|
13
|
+
|
14
|
+
def call(env)
|
15
|
+
server = env[:mcs_compute].servers.get(env[:machine].id)
|
16
|
+
region = env[:machine].provider_config.region
|
17
|
+
region_config = env[:machine].provider_config.get_region_config(region)
|
18
|
+
|
19
|
+
elastic_ip = region_config.elastic_ip
|
20
|
+
|
21
|
+
# Release the elastic IP
|
22
|
+
ip_file = env[:machine].data_dir.join('elastic_ip')
|
23
|
+
if ip_file.file?
|
24
|
+
release_address(env,ip_file.read)
|
25
|
+
ip_file.delete
|
26
|
+
end
|
27
|
+
|
28
|
+
# Destroy the server and remove the tracking ID
|
29
|
+
env[:ui].info(I18n.t("vagrant_mcs.terminating"))
|
30
|
+
server.destroy
|
31
|
+
env[:machine].id = nil
|
32
|
+
|
33
|
+
@app.call(env)
|
34
|
+
end
|
35
|
+
|
36
|
+
# Release an elastic IP address
|
37
|
+
def release_address(env,eip)
|
38
|
+
h = JSON.parse(eip)
|
39
|
+
# Use association_id and allocation_id for VPC, use public IP for EC2
|
40
|
+
if h['association_id']
|
41
|
+
env[:mcs_compute].disassociate_address(nil,h['association_id'])
|
42
|
+
env[:mcs_compute].release_address(h['allocation_id'])
|
43
|
+
else
|
44
|
+
env[:mcs_compute].disassociate_address(h['public_ip'])
|
45
|
+
env[:mcs_compute].release_address(h['public_ip'])
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|