appscale-tools 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +37 -0
- data/README +17 -0
- data/bin/appscale-add-keypair +15 -0
- data/bin/appscale-describe-instances +16 -0
- data/bin/appscale-remove-app +13 -0
- data/bin/appscale-reset-pwd +13 -0
- data/bin/appscale-run-instances +15 -0
- data/bin/appscale-terminate-instances +14 -0
- data/bin/appscale-upload-app +13 -0
- data/doc/AdvancedNode.html +163 -0
- data/doc/AppControllerClient.html +831 -0
- data/doc/AppEngineConfigException.html +165 -0
- data/doc/AppScaleException.html +165 -0
- data/doc/AppScaleTools.html +768 -0
- data/doc/BadCommandLineArgException.html +166 -0
- data/doc/BadConfigurationException.html +166 -0
- data/doc/CommonFunctions.html +2559 -0
- data/doc/EncryptionHelper.html +332 -0
- data/doc/GodInterface.html +443 -0
- data/doc/InfrastructureException.html +166 -0
- data/doc/Node.html +470 -0
- data/doc/NodeLayout.html +1297 -0
- data/doc/Object.html +539 -0
- data/doc/ParseArgs.html +268 -0
- data/doc/RemoteLogging.html +268 -0
- data/doc/SimpleNode.html +163 -0
- data/doc/UsageText.html +1204 -0
- data/doc/UserAppClient.html +993 -0
- data/doc/VMTools.html +1365 -0
- data/doc/bin/appscale-add-keypair.html +56 -0
- data/doc/bin/appscale-describe-instances.html +56 -0
- data/doc/bin/appscale-remove-app.html +56 -0
- data/doc/bin/appscale-reset-pwd.html +56 -0
- data/doc/bin/appscale-run-instances.html +56 -0
- data/doc/bin/appscale-terminate-instances.html +56 -0
- data/doc/bin/appscale-upload-app.html +56 -0
- data/doc/created.rid +21 -0
- data/doc/images/add.png +0 -0
- data/doc/images/brick.png +0 -0
- data/doc/images/brick_link.png +0 -0
- data/doc/images/bug.png +0 -0
- data/doc/images/bullet_black.png +0 -0
- data/doc/images/bullet_toggle_minus.png +0 -0
- data/doc/images/bullet_toggle_plus.png +0 -0
- data/doc/images/date.png +0 -0
- data/doc/images/delete.png +0 -0
- data/doc/images/find.png +0 -0
- data/doc/images/loadingAnimation.gif +0 -0
- data/doc/images/macFFBgHack.png +0 -0
- data/doc/images/package.png +0 -0
- data/doc/images/page_green.png +0 -0
- data/doc/images/page_white_text.png +0 -0
- data/doc/images/page_white_width.png +0 -0
- data/doc/images/plugin.png +0 -0
- data/doc/images/ruby.png +0 -0
- data/doc/images/tag_blue.png +0 -0
- data/doc/images/tag_green.png +0 -0
- data/doc/images/transparent.png +0 -0
- data/doc/images/wrench.png +0 -0
- data/doc/images/wrench_orange.png +0 -0
- data/doc/images/zoom.png +0 -0
- data/doc/index.html +116 -0
- data/doc/js/darkfish.js +153 -0
- data/doc/js/jquery.js +18 -0
- data/doc/js/navigation.js +142 -0
- data/doc/js/quicksearch.js +114 -0
- data/doc/js/search.js +94 -0
- data/doc/js/search_index.js +1 -0
- data/doc/js/searcher.js +228 -0
- data/doc/js/thickbox-compressed.js +10 -0
- data/doc/lib/app_controller_client_rb.html +60 -0
- data/doc/lib/appscale_tools_rb.html +88 -0
- data/doc/lib/common_functions_rb.html +78 -0
- data/doc/lib/custom_exceptions_rb.html +54 -0
- data/doc/lib/encryption_helper_rb.html +60 -0
- data/doc/lib/godinterface_rb.html +52 -0
- data/doc/lib/node_layout_rb.html +55 -0
- data/doc/lib/parse_args_rb.html +58 -0
- data/doc/lib/remote_log_rb.html +58 -0
- data/doc/lib/sshcopyid.html +174 -0
- data/doc/lib/usage_text_rb.html +58 -0
- data/doc/lib/user_app_client_rb.html +62 -0
- data/doc/lib/vm_tools_rb.html +62 -0
- data/doc/table_of_contents.html +496 -0
- data/lib/app_controller_client.rb +181 -0
- data/lib/appscale_tools.rb +403 -0
- data/lib/common_functions.rb +1467 -0
- data/lib/custom_exceptions.rb +25 -0
- data/lib/encryption_helper.rb +86 -0
- data/lib/godinterface.rb +152 -0
- data/lib/node_layout.rb +665 -0
- data/lib/parse_args.rb +415 -0
- data/lib/remote_log.rb +46 -0
- data/lib/sshcopyid +65 -0
- data/lib/usage_text.rb +144 -0
- data/lib/user_app_client.rb +245 -0
- data/lib/vm_tools.rb +549 -0
- data/test/tc_app_controller_client.rb +10 -0
- data/test/tc_appscale_add_keypair.rb +44 -0
- data/test/tc_appscale_describe_instances.rb +69 -0
- data/test/tc_appscale_remove_app.rb +128 -0
- data/test/tc_appscale_reset_pwd.rb +156 -0
- data/test/tc_appscale_run_instances.rb +48 -0
- data/test/tc_appscale_terminate_instances.rb +104 -0
- data/test/tc_appscale_upload_app.rb +166 -0
- data/test/tc_common_functions.rb +56 -0
- data/test/tc_encryption_helper.rb +10 -0
- data/test/tc_god_interface.rb +10 -0
- data/test/tc_node_layout.rb +93 -0
- data/test/tc_parse_args.rb +160 -0
- data/test/tc_user_app_client.rb +10 -0
- data/test/tc_vm_tools.rb +10 -0
- data/test/ts_all.rb +20 -0
- metadata +211 -0
data/lib/vm_tools.rb
ADDED
|
@@ -0,0 +1,549 @@
|
|
|
1
|
+
#!/usr/bin/ruby -w
|
|
2
|
+
# Programmer: Chris Bunch
|
|
3
|
+
|
|
4
|
+
require 'base64'
|
|
5
|
+
require 'socket'
|
|
6
|
+
|
|
7
|
+
require 'common_functions'
|
|
8
|
+
require 'custom_exceptions'
|
|
9
|
+
|
|
10
|
+
# The maximum amount of time we should wait for the first node in the system to
|
|
11
|
+
# go from 'pending' to 'running' and acquire a public IP. Our current VM size
|
|
12
|
+
# is about 10GB, so this means that in Eucalyptus it could take up to 30 minutes
|
|
13
|
+
# to start if the image wasn't cached.
|
|
14
|
+
MAX_VM_CREATION_TIME = 2700
|
|
15
|
+
|
|
16
|
+
# The amount of time to sleep between invocations of ec2-describe-instances when
|
|
17
|
+
# starting up the first node in the system. This should definitely not be lower
|
|
18
|
+
# than 5 seconds, as Eucalyptus 2.0.3 or newer will interpret it as a possible
|
|
19
|
+
# replay attack.
|
|
20
|
+
SLEEP_TIME = 20
|
|
21
|
+
|
|
22
|
+
# The regular expressions to use to find the public and private IPs (or FQDNs in
|
|
23
|
+
# case of EC2).
|
|
24
|
+
IP_REGEX = /\d+\.\d+\.\d+\.\d+/
|
|
25
|
+
FQDN_REGEX = /[\w\d\.\-]+/
|
|
26
|
+
IP_OR_FQDN = /#{IP_REGEX}|#{FQDN_REGEX}/
|
|
27
|
+
|
|
28
|
+
# When running over a cloud infrastructure, the user must set the following
|
|
29
|
+
# environment variables
|
|
30
|
+
EC2_ENVIRONMENT_VARIABLES = ["EC2_PRIVATE_KEY", "EC2_CERT", "EC2_SECRET_KEY"] +
|
|
31
|
+
["EC2_ACCESS_KEY"]
|
|
32
|
+
|
|
33
|
+
# When using Eucalyptus in a hybrid cloud deployment, the user must set the
|
|
34
|
+
# following environment variables (prefixed by CLOUDX_, where X is an integer)
|
|
35
|
+
REQUIRED_EUCA_CREDS_FOR_HYBRID = ["TYPE", "EMI", "S3_URL", "EC2_URL"] +
|
|
36
|
+
["EC2_JVM_ARGS", "EUCALYPTUS_CERT", "EC2_USER_ID"] + EC2_ENVIRONMENT_VARIABLES
|
|
37
|
+
|
|
38
|
+
# When using EC2 in a hybrid cloud deployment, the user must set the
|
|
39
|
+
# following environment variables (prefixed by CLOUDX_, where X is an integer)
|
|
40
|
+
REQUIRED_EC2_CREDS_FOR_HYBRID = ["TYPE", "AMI", "S3_URL", "EC2_URL"] +
|
|
41
|
+
["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] + EC2_ENVIRONMENT_VARIABLES
|
|
42
|
+
|
|
43
|
+
module VMTools
|
|
44
|
+
def self.get_initial_layout(head_node, separate, num_of_nodes, total_nodes)
|
|
45
|
+
if head_node
|
|
46
|
+
if separate
|
|
47
|
+
raise NotImplementedError.new("haven't done head node, separate yet")
|
|
48
|
+
else
|
|
49
|
+
layout = "shadow:load_balancer:db_master"
|
|
50
|
+
layout << ":appengine" if total_nodes == 1
|
|
51
|
+
end
|
|
52
|
+
else
|
|
53
|
+
raise NotImplementedError.new("haven't done slave nodes, separate yet")
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
return layout
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Code for local_ip taken from
|
|
60
|
+
# http://coderrr.wordpress.com/2008/05/28/get-your-local-ip-address/
|
|
61
|
+
def self.local_ip
|
|
62
|
+
UDPSocket.open {|s| s.connect("64.233.187.99", 1); s.addr.last }
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def self.get_creds_from_env
|
|
66
|
+
creds = {}
|
|
67
|
+
optional = {"ec2_url" => "https://us-east-1.ec2.amazonaws.com"}
|
|
68
|
+
required = ["ec2_access_key", "ec2_secret_key"]
|
|
69
|
+
|
|
70
|
+
optional.each { |k, v|
|
|
71
|
+
creds[k] = ENV[k.upcase] or v
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
required.each { |var|
|
|
75
|
+
if ENV[var.upcase]
|
|
76
|
+
creds[var] = ENV[var.upcase]
|
|
77
|
+
else
|
|
78
|
+
raise BadConfigurationException.new("The required " +
|
|
79
|
+
"environment variable #{var.upcase} was not set. Please set it " +
|
|
80
|
+
"and try running AppScale again.")
|
|
81
|
+
end
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
return creds
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
def self.ensure_tools_are_installed(infrastructure)
|
|
88
|
+
commands = ["-add-group", "-authorize", "-describe-instances", "-run-instances", "-terminate-instances"]
|
|
89
|
+
commands.each { |cmd|
|
|
90
|
+
full_cmd = "#{infrastructure}#{cmd}"
|
|
91
|
+
if !CommonFunctions.user_has_cmd?(full_cmd)
|
|
92
|
+
raise BadConfigurationException.new("You do not appear to " +
|
|
93
|
+
"have the command '#{full_cmd}'. Please put it in your PATH and " +
|
|
94
|
+
"try again.")
|
|
95
|
+
end
|
|
96
|
+
}
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def self.open_ports_in_cloud(infrastructure, group, verbose)
|
|
100
|
+
retn = CommonFunctions.shell("#{infrastructure}-authorize #{group} -p 1-65535 -s 0.0.0.0/0 -P udp 2>&1")
|
|
101
|
+
puts retn if verbose
|
|
102
|
+
retn = CommonFunctions.shell("#{infrastructure}-authorize #{group} -p 1-65535 -s 0.0.0.0/0 -P tcp 2>&1")
|
|
103
|
+
puts retn if verbose
|
|
104
|
+
retn = CommonFunctions.shell("#{infrastructure}-authorize #{group} -s 0.0.0.0/0 -P icmp -t -1:-1 2>&1")
|
|
105
|
+
puts retn if verbose
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
def self.ensure_keyname_not_in_use(keyname, infrastructure)
|
|
109
|
+
describe_instances = CommonFunctions.shell("#{infrastructure}-describe-instances 2>&1")
|
|
110
|
+
if describe_instances =~ /\s#{keyname}\s/
|
|
111
|
+
raise InfrastructureException.new("The keyname you chose is " +
|
|
112
|
+
"already in use. Please choose another keyname and try again.")
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def self.ensure_min_vms_available(min_vms_needed, instance_type, infrastructure)
|
|
117
|
+
puts "warn: this doesn't work on ec2 - euca only"
|
|
118
|
+
availability_zones = CommonFunctions.shell("#{infrastructure}-describe-availability-zones verbose 2>&1")
|
|
119
|
+
# check for errors from last command
|
|
120
|
+
vms_str = availability_zones.scan(/#{instance_type}\t([\d]+)/).flatten.to_s
|
|
121
|
+
if vms_str.nil? or vms_str.empty?
|
|
122
|
+
raise InfrastructureException.new("There was a problem seeing how many " +
|
|
123
|
+
"virtual machines were available. We saw [#{availability_zones}].")
|
|
124
|
+
end
|
|
125
|
+
|
|
126
|
+
free_vms = Integer(vms_str)
|
|
127
|
+
if free_vms < min_vms_needed
|
|
128
|
+
raise InfrastructureException.new("Not enough VMs were free of the " +
|
|
129
|
+
"type #{instance_type}. Needed #{min_vms_needed} but only #{free_vms}" +
|
|
130
|
+
" were available.")
|
|
131
|
+
end
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
def self.verify_ids(disk, infrastructure)
|
|
135
|
+
ec2_images = CommonFunctions.shell("#{infrastructure}-describe-images 2>&1")
|
|
136
|
+
|
|
137
|
+
if disk !~ /[a|e]mi/
|
|
138
|
+
raise InfrastructureException.new("The disk image you specified was " +
|
|
139
|
+
"not in the proper format. Please correct this and try again.")
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
# if the tools are not configured properly an error message will show up
|
|
143
|
+
# be sure to catch it and die if so
|
|
144
|
+
if ec2_images =~ /\AServer:/
|
|
145
|
+
raise InfrastructureException.new("Problem with " +
|
|
146
|
+
"#{infrastructure}-tools: " + ec2_images)
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
id = "disk"
|
|
150
|
+
id_value = eval(id)
|
|
151
|
+
if ec2_images !~ /IMAGE\t#{id_value}/
|
|
152
|
+
raise InfrastructureException.new("The #{id} image you specified, " +
|
|
153
|
+
"#{id_value}, was not found when querying " +
|
|
154
|
+
"#{infrastructure}-describe-images. Please specify a #{id} image in " +
|
|
155
|
+
"the database and try again.")
|
|
156
|
+
end
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
def self.get_ips(ips, verbose)
|
|
160
|
+
if ips.length % 2 != 0
|
|
161
|
+
raise InfrastructureException.new("ips not even length array")
|
|
162
|
+
end
|
|
163
|
+
reported_public = []
|
|
164
|
+
reported_private = []
|
|
165
|
+
ips.each_index { |index|
|
|
166
|
+
if index % 2 == 0
|
|
167
|
+
reported_public << ips[index]
|
|
168
|
+
else
|
|
169
|
+
reported_private << ips[index]
|
|
170
|
+
end
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
if verbose
|
|
174
|
+
puts "Reported Public IPs: [#{reported_public.join(', ')}]"
|
|
175
|
+
puts "Reported Private IPs: [#{reported_private.join(', ')}]"
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
actual_public = []
|
|
179
|
+
actual_private = []
|
|
180
|
+
|
|
181
|
+
reported_public.each_index { |index|
|
|
182
|
+
pub = reported_public[index]
|
|
183
|
+
pri = reported_private[index]
|
|
184
|
+
if pub != "0.0.0.0" and pri != "0.0.0.0"
|
|
185
|
+
actual_public << pub
|
|
186
|
+
actual_private << pri
|
|
187
|
+
end
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
return actual_public, actual_private
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
def self.get_public_ips(ips, verbose)
|
|
194
|
+
if ips.length % 2 != 0
|
|
195
|
+
raise InfrastructureException.new("ips not even length array")
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
reported_public = []
|
|
199
|
+
reported_private = []
|
|
200
|
+
ips.each_index { |index|
|
|
201
|
+
if index % 2 == 0
|
|
202
|
+
reported_public << ips[index]
|
|
203
|
+
else
|
|
204
|
+
reported_private << ips[index]
|
|
205
|
+
end
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
puts "Reported Public IPs: [#{reported_public.join(', ')}]" if verbose
|
|
209
|
+
puts "Reported Private IPs: [#{reported_private.join(', ')}]" if verbose
|
|
210
|
+
|
|
211
|
+
public_ips = []
|
|
212
|
+
reported_public.each_index { |index|
|
|
213
|
+
if reported_public[index] != "0.0.0.0"
|
|
214
|
+
public_ips << reported_public[index]
|
|
215
|
+
elsif reported_private[index] != "0.0.0.0"
|
|
216
|
+
public_ips << reported_private[index]
|
|
217
|
+
end
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
return public_ips.flatten
|
|
221
|
+
end
|
|
222
|
+
|
|
223
|
+
def self.spawn_vms(num_of_vms_to_spawn, job, image_id, instance_type, keyname,
|
|
224
|
+
infrastructure, group, verbose)
|
|
225
|
+
#adding check first so that we don't do any of this if the infrastructure setting is wrong
|
|
226
|
+
if !VALID_CLOUD_TYPES.include?(infrastructure)
|
|
227
|
+
raise BadConfigurationException.new("Infrastructure must be " +
|
|
228
|
+
"iaas, ec2, or euca, but instead was #{infrastructure}")
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
if infrastructure == "ec2"
|
|
232
|
+
check_group = CommonFunctions.shell("#{infrastructure}-describe-group #{group} 2>&1")
|
|
233
|
+
make_group = check_group.include? 'InvalidGroup.NotFound'
|
|
234
|
+
elsif infrastructure == "euca"
|
|
235
|
+
check_group = CommonFunctions.shell("#{infrastructure}-describe-groups #{group} 2>&1")
|
|
236
|
+
make_group = check_group.empty?
|
|
237
|
+
end
|
|
238
|
+
if make_group
|
|
239
|
+
puts "Creating security group #{group}" if verbose
|
|
240
|
+
create_sec_group = CommonFunctions.shell("#{infrastructure}-add-group #{group} -d #{group} 2>&1")
|
|
241
|
+
puts create_sec_group if verbose
|
|
242
|
+
else # security group exists
|
|
243
|
+
raise InfrastructureException.new("Security group #{group} exists, " +
|
|
244
|
+
"delete this group via #{infrastructure}-delete-group #{group}, " +
|
|
245
|
+
"prior to starting an AppScale cloud")
|
|
246
|
+
end
|
|
247
|
+
puts "Security group #{group} in place" if verbose
|
|
248
|
+
VMTools.open_ports_in_cloud(infrastructure, group, verbose)
|
|
249
|
+
puts "Ports set for security group #{group}" if verbose
|
|
250
|
+
|
|
251
|
+
describe_instances = CommonFunctions.shell("#{infrastructure}-describe-instances 2>&1")
|
|
252
|
+
puts describe_instances if verbose
|
|
253
|
+
all_ip_addrs = describe_instances.scan(/\s+(#{IP_OR_FQDN})\s+(#{IP_OR_FQDN})\s+running\s+#{keyname}\s/).flatten
|
|
254
|
+
ips_up_already = VMTools.get_public_ips(all_ip_addrs, verbose)
|
|
255
|
+
vms_up_already = ips_up_already.length
|
|
256
|
+
|
|
257
|
+
command_to_run = "#{infrastructure}-run-instances -k #{keyname} -n #{num_of_vms_to_spawn} --instance-type #{instance_type} --group #{group} #{image_id}"
|
|
258
|
+
|
|
259
|
+
puts command_to_run if verbose
|
|
260
|
+
run_instances = ""
|
|
261
|
+
loop {
|
|
262
|
+
run_instances = CommonFunctions.shell("#{command_to_run} 2>&1")
|
|
263
|
+
puts "run_instances: [#{run_instances}]" if verbose
|
|
264
|
+
if run_instances =~ /Please try again later./
|
|
265
|
+
puts "Error with run_instances: #{run_instances}. Will try again in a moment."
|
|
266
|
+
elsif run_instances =~ /try --addressing private/
|
|
267
|
+
puts "Need to retry with addressing private. Will try again in a moment."
|
|
268
|
+
command_to_run << " --addressing private"
|
|
269
|
+
elsif run_instances =~ /PROBLEM/
|
|
270
|
+
raise InfrastructureException.new("Saw the following error message " +
|
|
271
|
+
"from iaas tools. Please resolve the issue and try again:\n" +
|
|
272
|
+
"#{run_instances}")
|
|
273
|
+
else
|
|
274
|
+
puts "Run instances message sent successfully. Waiting for the image to start up."
|
|
275
|
+
break
|
|
276
|
+
end
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
instance_ids = run_instances.scan(/INSTANCE\s+(i-\w+)\s+[\w\-\s\.]+#{keyname}\s/).flatten
|
|
280
|
+
|
|
281
|
+
end_time = Time.now + MAX_VM_CREATION_TIME
|
|
282
|
+
while (now = Time.now) < end_time
|
|
283
|
+
describe_instances = CommonFunctions.shell("#{infrastructure}-describe-instances 2>&1")
|
|
284
|
+
puts "[#{Time.now}] #{end_time - now} seconds left until timeout..."
|
|
285
|
+
puts describe_instances if verbose
|
|
286
|
+
|
|
287
|
+
if describe_instances =~ /terminated\s#{keyname}\s/
|
|
288
|
+
raise InfrastructureException.new("An instance was unexpectedly " +
|
|
289
|
+
"terminated. Please contact your cloud administrator to determine " +
|
|
290
|
+
"why and try again. \n#{describe_instances}")
|
|
291
|
+
end
|
|
292
|
+
|
|
293
|
+
all_ip_addrs = describe_instances.scan(/\s+(#{IP_OR_FQDN})\s+(#{IP_OR_FQDN})\s+running\s+#{keyname}\s/).flatten
|
|
294
|
+
instance_ids = describe_instances.scan(/INSTANCE\s+(i-\w+)\s+[\w\-\s\.]+#{keyname}\s/).flatten
|
|
295
|
+
public_ips, private_ips = VMTools.get_ips(all_ip_addrs, verbose)
|
|
296
|
+
break if public_ips.length == num_of_vms_to_spawn + vms_up_already
|
|
297
|
+
sleep(SLEEP_TIME)
|
|
298
|
+
end
|
|
299
|
+
|
|
300
|
+
if public_ips.length.zero?
|
|
301
|
+
raise InfrastructureException.new("No public IPs were able to be " +
|
|
302
|
+
"procured within the time limit.")
|
|
303
|
+
end
|
|
304
|
+
|
|
305
|
+
if public_ips.length != instance_ids.length
|
|
306
|
+
puts "Public IPs: #{public_ips.join(', ')}, Instance ids: #{instance_ids.join(', ')}"
|
|
307
|
+
raise InfrastructureException.new("Public IPs size didn't match " +
|
|
308
|
+
"instance names size")
|
|
309
|
+
end
|
|
310
|
+
|
|
311
|
+
instances_created = []
|
|
312
|
+
public_ips.each_index { |index|
|
|
313
|
+
instances_created << "#{public_ips[index]}:#{private_ips[index]}:#{job}:#{instance_ids[index]}:cloud1"
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
return instances_created
|
|
317
|
+
end
|
|
318
|
+
|
|
319
|
+
def self.terminate_all_vms(keyname, infrastructure)
|
|
320
|
+
desc_instances = CommonFunctions.shell("#{infrastructure}-describe-instances 2>&1")
|
|
321
|
+
instances = desc_instances.scan(/INSTANCE\s+(i-\w+)\s+[\w\-\s\.]+#{keyname}\s/).flatten
|
|
322
|
+
return 0 if instances.length == 0
|
|
323
|
+
puts CommonFunctions.shell("#{infrastructure}-terminate-instances #{instances.join(' ')} 2>&1")
|
|
324
|
+
return instances.length
|
|
325
|
+
end
|
|
326
|
+
|
|
327
|
+
def self.spawn_head_node(head_node, infrastructure, keyname,
|
|
328
|
+
ssh_key_location, ssh_keys, force, machine, instance_type, group, verbose)
|
|
329
|
+
|
|
330
|
+
head_node_jobs = head_node.roles.join(":")
|
|
331
|
+
if VALID_CLOUD_TYPES.include?(infrastructure)
|
|
332
|
+
VMTools.spawn_head_node_via_cloud(infrastructure, keyname,
|
|
333
|
+
ssh_key_location, ssh_keys, force, head_node_jobs, machine,
|
|
334
|
+
instance_type, group, verbose)
|
|
335
|
+
else
|
|
336
|
+
VMTools.spawn_head_node_via_vmm(head_node, keyname, head_node_jobs)
|
|
337
|
+
end
|
|
338
|
+
end
|
|
339
|
+
|
|
340
|
+
def self.spawn_head_node_via_cloud(infrastructure, keyname,
|
|
341
|
+
ssh_key_location, ssh_keys, force, head_node_jobs, machine,
|
|
342
|
+
instance_type, group, verbose)
|
|
343
|
+
|
|
344
|
+
VMTools.ensure_tools_are_installed(infrastructure)
|
|
345
|
+
#VMTools.verify_ids(machine, infrastructure)
|
|
346
|
+
VMTools.ensure_keyname_not_in_use(keyname, infrastructure)
|
|
347
|
+
#VMTools.ensure_min_vms_available(min_images, instance_type, infrastructure)
|
|
348
|
+
EncryptionHelper.generate_ssh_key(verbose, ssh_keys, keyname, infrastructure, force)
|
|
349
|
+
locations = VMTools.spawn_vms(1, head_node_jobs, machine, instance_type, keyname, infrastructure, group, verbose)
|
|
350
|
+
puts "Please wait for your instance to complete the bootup process."
|
|
351
|
+
head_node_ip = locations[0].split(":")[0]
|
|
352
|
+
CommonFunctions.sleep_until_port_is_open(head_node_ip, AppScaleTools::SSH_PORT)
|
|
353
|
+
sleep(10)
|
|
354
|
+
options = "-o StrictHostkeyChecking=no -o NumberOfPasswordPrompts=0"
|
|
355
|
+
enable_root_login = "sudo cp /home/ubuntu/.ssh/authorized_keys /root/.ssh/"
|
|
356
|
+
`ssh -i #{ssh_keys.join(" -i ")} #{options} 2>&1 ubuntu@#{head_node_ip} '#{enable_root_login}'` # kloogy ec2 fix
|
|
357
|
+
CommonFunctions.scp_file(ssh_key_location, "/root/.ssh/id_dsa", head_node_ip, ssh_keys) # kloogy ec2 fix
|
|
358
|
+
CommonFunctions.scp_file(ssh_key_location, "/root/.ssh/id_rsa", head_node_ip, ssh_keys)
|
|
359
|
+
CommonFunctions.shell("scp -i #{ssh_keys.join(" -i ")} -o StrictHostkeyChecking=no 2>&1 root@#{head_node_ip}:/root/.ssh/authorized_keys /tmp/remote_keys")
|
|
360
|
+
remote_keys = (File.open("/tmp/remote_keys") { |f| f.read }).chomp
|
|
361
|
+
public_key_contents = remote_keys.scan(/ssh-rsa [\w+\/=]+ [\w@]+/).flatten.to_s
|
|
362
|
+
File.open("/tmp/id_rsa.pub", "w+") { |file| file.write(public_key_contents) }
|
|
363
|
+
CommonFunctions.scp_file("/tmp/id_rsa.pub", "/root/.ssh/id_rsa.pub", head_node_ip, ssh_keys)
|
|
364
|
+
locations = locations.flatten.to_s
|
|
365
|
+
return locations
|
|
366
|
+
end
|
|
367
|
+
|
|
368
|
+
def self.spawn_head_node_via_vmm(node, keyname, head_node_jobs)
|
|
369
|
+
# We don't care about the instance's ID if not using cloud-tools
|
|
370
|
+
# and for Xen, public ip = private ip
|
|
371
|
+
head_node = "#{node.id}:#{node.id}:#{head_node_jobs}:i-ZFOOBARZ:cloud1"
|
|
372
|
+
locations = [head_node].flatten.to_s
|
|
373
|
+
return locations
|
|
374
|
+
end
|
|
375
|
+
|
|
376
|
+
def self.get_cloud_creds(node_layout, val_hash)
|
|
377
|
+
cloud_creds = {
|
|
378
|
+
"machine" => "#{val_hash['machine']}",
|
|
379
|
+
"instance_type" => "#{val_hash['instance_type']}",
|
|
380
|
+
"infrastructure" => "#{val_hash['infrastructure']}",
|
|
381
|
+
"min_images" => "#{node_layout.min_images}",
|
|
382
|
+
"max_images" => "#{node_layout.max_images}"
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
EC2_ENVIRONMENT_VARIABLES.each { |var|
|
|
386
|
+
cloud_creds["CLOUD1_#{var}"] = ENV[var]
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
if cloud_creds["infrastructure"] == "euca"
|
|
390
|
+
["EC2_URL", "S3_URL"].each { |var|
|
|
391
|
+
cloud_creds["CLOUD1_#{var}"] = ENV[var]
|
|
392
|
+
}
|
|
393
|
+
end
|
|
394
|
+
|
|
395
|
+
cloud_creds.merge!(VMTools.get_creds_from_env)
|
|
396
|
+
return cloud_creds
|
|
397
|
+
end
|
|
398
|
+
|
|
399
|
+
def self.get_vmm_keys(val_hash)
|
|
400
|
+
puts "Generating certificate and private key"
|
|
401
|
+
key, cert = EncryptionHelper.generate_pem_files(val_hash['keyname'])
|
|
402
|
+
key = File.expand_path("~/.appscale/#{val_hash['keyname']}-key.pem")
|
|
403
|
+
cert = File.expand_path("~/.appscale/#{val_hash['keyname']}-cert.pem")
|
|
404
|
+
|
|
405
|
+
return cert, key
|
|
406
|
+
end
|
|
407
|
+
|
|
408
|
+
def self.lookup_cloud_env(cloud)
|
|
409
|
+
cloud_type_var = cloud.upcase + "_TYPE"
|
|
410
|
+
cloud_type = ENV[cloud_type_var]
|
|
411
|
+
|
|
412
|
+
if cloud_type.nil?
|
|
413
|
+
raise BadConfigurationException.new("The environment variable " +
|
|
414
|
+
"#{cloud_type_var} was not set. Please set it and try again.")
|
|
415
|
+
end
|
|
416
|
+
|
|
417
|
+
return cloud_type
|
|
418
|
+
end
|
|
419
|
+
|
|
420
|
+
def self.get_hybrid_creds(node_layout, set_head_node_creds=false)
|
|
421
|
+
cloud_creds = {
|
|
422
|
+
"infrastructure" => "hybrid",
|
|
423
|
+
"min_images" => "#{node_layout.min_images}",
|
|
424
|
+
"max_images" => "#{node_layout.max_images}"
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
cloud_num = 1
|
|
428
|
+
loop {
|
|
429
|
+
cloud_type = ENV["CLOUD#{cloud_num}_TYPE"]
|
|
430
|
+
break if cloud_type.nil?
|
|
431
|
+
|
|
432
|
+
if cloud_num == 1 and set_head_node_creds
|
|
433
|
+
set_vars = true
|
|
434
|
+
else
|
|
435
|
+
set_vars = false
|
|
436
|
+
end
|
|
437
|
+
|
|
438
|
+
cloud_creds.merge!(self.get_hybrid_env_vars(cloud_type, cloud_num, set_vars))
|
|
439
|
+
cloud_num += 1
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
return cloud_creds
|
|
443
|
+
end
|
|
444
|
+
|
|
445
|
+
def self.set_hybrid_creds(node_layout)
|
|
446
|
+
return self.get_hybrid_creds(node_layout, set_head_node_creds=true)
|
|
447
|
+
end
|
|
448
|
+
|
|
449
|
+
def self.get_hybrid_env_vars(cloud_type, cloud_num, set_vars=false)
|
|
450
|
+
creds = {}
|
|
451
|
+
|
|
452
|
+
if cloud_type == "euca"
|
|
453
|
+
required = REQUIRED_EUCA_CREDS_FOR_HYBRID
|
|
454
|
+
elsif cloud_type == "ec2"
|
|
455
|
+
required = REQUIRED_EC2_CREDS_FOR_HYBRID
|
|
456
|
+
else
|
|
457
|
+
puts "Incorrect cloud type of #{cloud_type}"
|
|
458
|
+
fail
|
|
459
|
+
end
|
|
460
|
+
|
|
461
|
+
required.each { |cred|
|
|
462
|
+
key = "CLOUD#{cloud_num}_#{cred}"
|
|
463
|
+
val = ENV[key]
|
|
464
|
+
|
|
465
|
+
if val.nil?
|
|
466
|
+
raise BadConfigurationException.new("The required " +
|
|
467
|
+
"environment variable #{key} was not set. Please set it and " +
|
|
468
|
+
"try again.")
|
|
469
|
+
end
|
|
470
|
+
|
|
471
|
+
if set_vars
|
|
472
|
+
puts "Setting #{cred} to #{val}"
|
|
473
|
+
ENV[cred] = val
|
|
474
|
+
end
|
|
475
|
+
|
|
476
|
+
creds[key] = val
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
return creds
|
|
480
|
+
end
|
|
481
|
+
|
|
482
|
+
def self.get_hybrid_machine(infra, cloud_num)
|
|
483
|
+
if infra == "euca"
|
|
484
|
+
key = "CLOUD#{cloud_num}_EMI"
|
|
485
|
+
elsif infra == "ec2"
|
|
486
|
+
key = "CLOUD#{cloud_num}_AMI"
|
|
487
|
+
else
|
|
488
|
+
raise BadConfigurationException.new("infrastructure #{infra} " +
|
|
489
|
+
"is not a supported value.")
|
|
490
|
+
end
|
|
491
|
+
|
|
492
|
+
return ENV[key]
|
|
493
|
+
end
|
|
494
|
+
|
|
495
|
+
def self.verify_credentials_are_set_correctly(infrastructure)
|
|
496
|
+
# In non-hybrid cloud environments, the user has to provide us with their
|
|
497
|
+
# EC2 credentials. If they didn't, let them know and abort.
|
|
498
|
+
EC2_ENVIRONMENT_VARIABLES.each { |var|
|
|
499
|
+
if ENV[var].nil?
|
|
500
|
+
raise BadConfigurationException.new("The required " +
|
|
501
|
+
"environment variable #{var} was not set. Please set it and try " +
|
|
502
|
+
"again.")
|
|
503
|
+
end
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
VMTools.verify_credentials_exist()
|
|
507
|
+
|
|
508
|
+
# The euca2ools default to using localhost as the EC2_URL and S3_URL if
|
|
509
|
+
# it's not set, so make sure the user has explicitly set it.
|
|
510
|
+
if infrastructure == "euca"
|
|
511
|
+
['EC2_URL', 'S3_URL'].each { |var|
|
|
512
|
+
if ENV[var].nil?
|
|
513
|
+
raise BadConfigurationException.new("When running over " +
|
|
514
|
+
"Eucalyptus, the environment variable #{var} must be set.")
|
|
515
|
+
end
|
|
516
|
+
}
|
|
517
|
+
end
|
|
518
|
+
end
|
|
519
|
+
|
|
520
|
+
# Validates that the private key and certificate for use with EC2 or
|
|
521
|
+
# Eucalyptus both refer to files that exist, aborting if either do not exist.
|
|
522
|
+
def self.verify_credentials_exist()
|
|
523
|
+
["EC2_PRIVATE_KEY", "EC2_CERT"].each { |var|
|
|
524
|
+
file_path = File.expand_path(ENV[var])
|
|
525
|
+
if !File.exists?(file_path)
|
|
526
|
+
raise BadConfigurationException.new("The environment variable" +
|
|
527
|
+
" #{var} pointed to the file #{file_path}, which didn't exist.")
|
|
528
|
+
end
|
|
529
|
+
}
|
|
530
|
+
end
|
|
531
|
+
|
|
532
|
+
def self.terminate_infrastructure_machines(infrastructure, keyname)
|
|
533
|
+
# TODO: if we know all the other ips in the system, contact one
|
|
534
|
+
# of them instead
|
|
535
|
+
|
|
536
|
+
if keyname == "appscale" # appscale keyname messes up the next command
|
|
537
|
+
abort("Error seen trying to terminate your machines - please do so manually.")
|
|
538
|
+
end
|
|
539
|
+
|
|
540
|
+
# for now, just kill them the hard way
|
|
541
|
+
desc = "#{infrastructure}-describe-instances"
|
|
542
|
+
term = "#{infrastructure}-terminate-instances"
|
|
543
|
+
cmd = "#{desc} | grep #{keyname} | awk '{print $2}' | xargs #{term}"
|
|
544
|
+
puts "Unable to contact shadow node, shutting down via tools..."
|
|
545
|
+
puts `#{cmd}`
|
|
546
|
+
cmd = "#{infrastructure}-delete-group appscale"
|
|
547
|
+
puts `#{cmd}`
|
|
548
|
+
end
|
|
549
|
+
end
|