apcera 0.1.6.2 → 0.1.6.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/apcera/version.rb +1 -1
- data/lib/apcera_api_helper.rb +1077 -0
- metadata +2 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e504cd0ce27ad6fb761226d1d57a6b4a6e31c2a5
|
4
|
+
data.tar.gz: 61225f96f94e4a97366f74dd5952f02d4b30ccdd
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 52a6cd2d72bf0fcd07c8e18eaf9cf6248724e4076d9f4e8d3ddfab24a25da9ba4d50224cfac7cd540b3bcda4bad742f6885d1ff4214d8a4c8a99bd3e20b2a19b
|
7
|
+
data.tar.gz: a30bb99ab6c4ee76af2d12f631dec2d4ada84d41ef9dbcfb046685fb28fdbd73a2cfe48022443e46f84dc90fbfba001afa082c6223fb99f7c87e5e647116e756
|
data/lib/apcera/version.rb
CHANGED
@@ -0,0 +1,1077 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'apcera'
|
4
|
+
require 'rubygems'
|
5
|
+
require 'json'
|
6
|
+
require 'rest-client'
|
7
|
+
require 'securerandom'
|
8
|
+
require 'open-uri'
|
9
|
+
require 'yaml'
|
10
|
+
require 'digest/sha1'
|
11
|
+
require 'getoptlong'
|
12
|
+
require 'base64'
|
13
|
+
|
14
|
+
class String
|
15
|
+
def blank?
|
16
|
+
nil? || empty?
|
17
|
+
end
|
18
|
+
|
19
|
+
def nonblank?
|
20
|
+
!nil? && !empty?
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
class ApceraAuth
|
25
|
+
|
26
|
+
# These are the bare minimum
|
27
|
+
#
|
28
|
+
attr_reader :target
|
29
|
+
attr_reader :bearer
|
30
|
+
|
31
|
+
# These are calculated from the above
|
32
|
+
#
|
33
|
+
attr_reader :token
|
34
|
+
attr_reader :token_prefix
|
35
|
+
attr_reader :domain
|
36
|
+
attr_reader :scheme
|
37
|
+
attr_reader :api_host
|
38
|
+
attr_reader :auth_host
|
39
|
+
attr_reader :base_api_url
|
40
|
+
attr_reader :base_auth_url
|
41
|
+
|
42
|
+
# This is situational
|
43
|
+
#
|
44
|
+
attr_reader :cache_file_path
|
45
|
+
|
46
|
+
def initialize(target = "", bearer = "")
|
47
|
+
# If a target or bearer is passed in, use that, otherwise, try to figure it out
|
48
|
+
#
|
49
|
+
cache_credentials = false
|
50
|
+
|
51
|
+
if (target.nonblank? || bearer.nonblank?)
|
52
|
+
@target = target
|
53
|
+
@bearer = bearer
|
54
|
+
else
|
55
|
+
cache_credentials = true
|
56
|
+
|
57
|
+
@cache_file_path = File.expand_path("~/.apcera_api")
|
58
|
+
|
59
|
+
if !File.writable?(File.expand_path("~/"))
|
60
|
+
puts "Cache File #{@cache_file_path} not writable, falling back to tmp"
|
61
|
+
@cache_file_path = "/tmp/.apcera_api"
|
62
|
+
end
|
63
|
+
|
64
|
+
cached_fields = {}
|
65
|
+
if (File.exists?(@cache_file_path))
|
66
|
+
cached_fields = JSON.parse(File.read(@cache_file_path))
|
67
|
+
end
|
68
|
+
|
69
|
+
# First, get the target. If it isn't in the cache or env, prompt for it
|
70
|
+
#
|
71
|
+
if (!cached_fields.has_key?("target") || cached_fields["target"] == "")
|
72
|
+
|
73
|
+
default = "http://demo.proveapcera.io"
|
74
|
+
|
75
|
+
# This can also be set in the environment
|
76
|
+
#
|
77
|
+
if ENV.key?("APC_TARGET")
|
78
|
+
puts "WARNING: Using APC_TARGET from ENV"
|
79
|
+
value = ENV["APC_TARGET"]
|
80
|
+
else
|
81
|
+
value = prompt_for_value("Enter the domain name for the cluster [#{default}] : ").delete!("\n")
|
82
|
+
end
|
83
|
+
|
84
|
+
if value == ""
|
85
|
+
value = default
|
86
|
+
end
|
87
|
+
cached_fields["target"] = value
|
88
|
+
end
|
89
|
+
|
90
|
+
# Just need to make sure that these are set when done
|
91
|
+
#
|
92
|
+
@target = cached_fields["target"]
|
93
|
+
end
|
94
|
+
|
95
|
+
# Build the paths
|
96
|
+
#
|
97
|
+
parts = @target.split(/[:\/]/)
|
98
|
+
|
99
|
+
@scheme = parts.first
|
100
|
+
@domain = parts.last
|
101
|
+
|
102
|
+
@api_host = "api.#{@domain}"
|
103
|
+
@auth_host = "auth.#{@domain}"
|
104
|
+
@base_api_url = "#{scheme}://#{@api_host}"
|
105
|
+
@base_auth_url = "#{scheme}://#{@auth_host}"
|
106
|
+
|
107
|
+
if (target.nonblank? || bearer.nonblank?)
|
108
|
+
@bearer = bearer
|
109
|
+
else
|
110
|
+
# We need to find it.
|
111
|
+
#
|
112
|
+
if (ENV.key?("APC_REFRESH_TOKEN"))
|
113
|
+
puts "WARNING: Using refresh token from environment"
|
114
|
+
cached_fields["refresh_token"] = ENV["APC_REFRESH_TOKEN"]
|
115
|
+
end
|
116
|
+
|
117
|
+
if (!cached_fields.has_key?("refresh_token") || cached_fields["refresh_token"] == "")
|
118
|
+
response = RestClient.get("#{@base_auth_url}/v1/oauth2/device/google/getcode")
|
119
|
+
code = JSON.parse(response)
|
120
|
+
|
121
|
+
ApceraApiHelper.notice "go to \n\n#{code['verification_url']}\n\n and enter code #{code['user_code']}\n\n"
|
122
|
+
|
123
|
+
# This stuff only works on the mac
|
124
|
+
#
|
125
|
+
system("echo #{code['user_code']} | pbcopy")
|
126
|
+
system("open #{code['verification_url']}")
|
127
|
+
|
128
|
+
value = prompt_for_value "Press Y when completed: "
|
129
|
+
|
130
|
+
if value.delete!("\n").casecmp("Y") != 0
|
131
|
+
ApceraApiHelper.notice "Error, giving up."
|
132
|
+
exit
|
133
|
+
end
|
134
|
+
|
135
|
+
device_code = code['device_code']
|
136
|
+
|
137
|
+
redeemed_url = "#{@base_auth_url}/v1/oauth2/device/google/redeemed"
|
138
|
+
|
139
|
+
obj = {:device_code => device_code}
|
140
|
+
|
141
|
+
refresh_token_wrapper = JSON.parse(RestClient.post(redeemed_url, obj.to_json, {:content_type => :json}))
|
142
|
+
cached_fields["refresh_token"] = refresh_token_wrapper["refresh_token"]
|
143
|
+
end
|
144
|
+
|
145
|
+
# If the token isn't there, or is expired, refresh it
|
146
|
+
#
|
147
|
+
if !cached_fields.has_key?("token") || cached_fields["token"] == "" || Time.parse(cached_fields["expires"]) < Time.now
|
148
|
+
refresh_url = "#{@base_auth_url}/v1/oauth2/device/google/refresh"
|
149
|
+
|
150
|
+
refresh_token_wrapper = {:refresh_token => cached_fields["refresh_token"], :token_type => "GOOGLE_REFRESH" }
|
151
|
+
refresh_resp = RestClient.post(refresh_url, refresh_token_wrapper.to_json, {:content_type => :json})
|
152
|
+
refreshed_token = JSON.parse(refresh_resp)
|
153
|
+
|
154
|
+
cached_fields["token"] = "Bearer #{refreshed_token['access_token']}"
|
155
|
+
cached_fields["expires"] = Time.now + refreshed_token["expires_in"].to_i
|
156
|
+
end
|
157
|
+
@bearer = cached_fields["token"]
|
158
|
+
end
|
159
|
+
|
160
|
+
@token_prefix = @bearer.split.first
|
161
|
+
@token = @bearer.split.last
|
162
|
+
|
163
|
+
if (cache_credentials)
|
164
|
+
File.write(cache_file_path, JSON.pretty_generate(cached_fields))
|
165
|
+
end
|
166
|
+
|
167
|
+
end
|
168
|
+
|
169
|
+
def prompt_for_value(*args)
|
170
|
+
print(*args)
|
171
|
+
gets
|
172
|
+
end
|
173
|
+
|
174
|
+
def to_s
|
175
|
+
body = "target [#{@target}]\n"
|
176
|
+
body += "bearer [#{@bearer}]\n"
|
177
|
+
body += "token [#{@token}]\n"
|
178
|
+
body += "token_prefix [#{@token_prefix}]\n"
|
179
|
+
body += "domain [#{@domain}]\n"
|
180
|
+
body += "scheme [#{@scheme}]\n"
|
181
|
+
body += "api_host [#{@api_host}]\n"
|
182
|
+
body += "auth_host [#{@auth_host}]\n"
|
183
|
+
body += "base_api_url [#{@base_api_url}]\n"
|
184
|
+
body += "base_auth_url [#{@base_auth_url}]\n"
|
185
|
+
body += "cache_file_path [#{@cache_file_path}]\n"
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
|
190
|
+
class ApceraApiHelper
|
191
|
+
attr_reader :docker_layer_hash
|
192
|
+
attr_reader :sandbox
|
193
|
+
attr_reader :target
|
194
|
+
attr_reader :api_host
|
195
|
+
|
196
|
+
# cache_file_path = File.expand_path("~/.apcera_api")
|
197
|
+
|
198
|
+
def initialize(enable_debugging, target = "", bearer = "")
|
199
|
+
self.debug_mode = enable_debugging
|
200
|
+
|
201
|
+
apc_env = ApceraAuth.new(target, bearer)
|
202
|
+
if (self.debug_mode)
|
203
|
+
puts apc_env.to_s
|
204
|
+
end
|
205
|
+
|
206
|
+
Apcera.configure do |config|
|
207
|
+
config.scheme = apc_env.scheme
|
208
|
+
config.api_key['authorization'] = apc_env.token
|
209
|
+
config.api_key_prefix['authorization'] = apc_env.token_prefix
|
210
|
+
|
211
|
+
config.host = apc_env.api_host
|
212
|
+
config.base_path = '/v1'
|
213
|
+
|
214
|
+
config.debugging = enable_debugging
|
215
|
+
config.verify_ssl = false
|
216
|
+
end
|
217
|
+
|
218
|
+
@_bindings_api = Apcera::ServicesAndBindingsApi.new
|
219
|
+
@_instances_api ||= Apcera::InstancesApi.new
|
220
|
+
@_jobs_api ||= Apcera::JobsApi.new
|
221
|
+
@_logs_api ||= Apcera::LogsApi.new
|
222
|
+
@_metrics_api ||= Apcera::MetricsApi.new
|
223
|
+
@_packages_api ||= Apcera::PackagesApi.new
|
224
|
+
@_stagingpipeline_api ||= Apcera::StagingPipelinesApi.new
|
225
|
+
@_stagingpipeline_api ||= Apcera::StagingPipelinesApi.new
|
226
|
+
@_utilities_api ||= Apcera::UtilitiesApi.new
|
227
|
+
|
228
|
+
sb = self.get_sandbox()
|
229
|
+
@sandbox = sb.namespace
|
230
|
+
end
|
231
|
+
|
232
|
+
def debug_mode=(mode)
|
233
|
+
@debug_mode = mode
|
234
|
+
Apcera::Configuration.debugging = mode
|
235
|
+
end
|
236
|
+
|
237
|
+
def debug_mode?()
|
238
|
+
@debug_mode
|
239
|
+
end
|
240
|
+
|
241
|
+
def debug_mode()
|
242
|
+
@debug_mode
|
243
|
+
end
|
244
|
+
|
245
|
+
def get_info
|
246
|
+
@_utilities_api.info_get()
|
247
|
+
end
|
248
|
+
|
249
|
+
def get_instance_managers()
|
250
|
+
@_instances_api.instance_managers_get()
|
251
|
+
end
|
252
|
+
|
253
|
+
# Runtime operations
|
254
|
+
#
|
255
|
+
def runtimes_get()
|
256
|
+
@_utilities_api.runtimes_get()
|
257
|
+
end
|
258
|
+
|
259
|
+
def get_sandbox()
|
260
|
+
@_utilities_api.namespace_default_get()
|
261
|
+
end
|
262
|
+
|
263
|
+
# Staging Pipeline operations
|
264
|
+
#
|
265
|
+
def stagingpipelines_get(opts = {})
|
266
|
+
@_stagingpipeline_api.stagingpipelines_get({:fqn => staging_pipeline})
|
267
|
+
end
|
268
|
+
|
269
|
+
def get_staging_pipeline_by_name(name)
|
270
|
+
ret = @_stagingpipeline_api.stagingpipelines_get({:fqn => name})
|
271
|
+
ret.length > 0 ? ret[0] : nil
|
272
|
+
end
|
273
|
+
|
274
|
+
def get_matching_staging_pipeline(name)
|
275
|
+
ApceraApiHelper.notice "get_matching_staging_pipeline for #{name}"
|
276
|
+
runtimes = self.runtimes_get()
|
277
|
+
staging_pipeline = ""
|
278
|
+
|
279
|
+
# First find the matching runtime
|
280
|
+
#
|
281
|
+
runtimes.each do | runtime |
|
282
|
+
runtime.patterns.each do | pattern |
|
283
|
+
# If the name is a star, we are doing the whole directory
|
284
|
+
#
|
285
|
+
if ((name == "*") && (!Dir.glob(pattern).empty?) ||
|
286
|
+
(name != "*") && (File.fnmatch(pattern, name)))
|
287
|
+
staging_pipeline = "stagpipe::/apcera::#{runtime.runtime}"
|
288
|
+
break
|
289
|
+
end
|
290
|
+
end
|
291
|
+
end
|
292
|
+
|
293
|
+
pipeline = self.get_staging_pipeline_by_name(staging_pipeline)
|
294
|
+
return pipeline
|
295
|
+
end
|
296
|
+
|
297
|
+
# Metrics operations
|
298
|
+
#
|
299
|
+
def get_job_metrics(opts = {})
|
300
|
+
@_metrics_api.metrics_jobs_get(opts)
|
301
|
+
end
|
302
|
+
|
303
|
+
def get_instance_manager_metrics(opts = {})
|
304
|
+
@_metrics_api.metrics_instance_managers_get(opts)
|
305
|
+
end
|
306
|
+
|
307
|
+
def get_cluster_metrics(opts = {})
|
308
|
+
@_metrics_api.metrics_cluster_get(opts)
|
309
|
+
end
|
310
|
+
|
311
|
+
def get_route_metrics(opts = {})
|
312
|
+
@_metrics_api.metrics_route_counters_get(opts)
|
313
|
+
end
|
314
|
+
|
315
|
+
def get_namespace_metrics(opts = {})
|
316
|
+
@_metrics_api.metrics_namespace_get(opts)
|
317
|
+
end
|
318
|
+
|
319
|
+
# Package operations
|
320
|
+
#
|
321
|
+
def find_packages(opt = {})
|
322
|
+
@_packages_api.packages_get(opt)
|
323
|
+
end
|
324
|
+
|
325
|
+
def package_exist?(fqn)
|
326
|
+
!get_package_by_name(fqn).nil?
|
327
|
+
end
|
328
|
+
|
329
|
+
def get_package_by_name(fqn)
|
330
|
+
ret = @_packages_api.packages_get({:fqn => fqn})
|
331
|
+
ret.length > 0 ? ret[0] : nil
|
332
|
+
end
|
333
|
+
|
334
|
+
def get_package_for_docker_layer(docker_namespace, layer_id)
|
335
|
+
# ret = @_packages_api.find_packages({:fqn => docker_namespace,
|
336
|
+
# :matchPartialFQN => true,
|
337
|
+
# :tag => "docker_layer_id,#{layer_id}" })
|
338
|
+
ret = @_packages_api.packages_get({:fqn => "package::#{docker_namespace}", :match_partial_fqn => "true", :tag => "docker_layer_id,#{layer_id}" })
|
339
|
+
|
340
|
+
ret.length > 0 ? ret[0] : nil
|
341
|
+
end
|
342
|
+
|
343
|
+
def get_package_uuid_for_docker_layer(docker_namespace, layer_id)
|
344
|
+
ret = self.get_package_for_docker_layer(docker_namespace, layer_id)
|
345
|
+
uuid = !ret.nil? ? ret.uuid : ""
|
346
|
+
uuid
|
347
|
+
end
|
348
|
+
|
349
|
+
def get_package_by_tag(tag)
|
350
|
+
ret = @_packages_api.find_packages({:tag => tag})
|
351
|
+
ret.length > 0 ? ret[0] : nil
|
352
|
+
end
|
353
|
+
|
354
|
+
def get_package_by_uuid(uuid)
|
355
|
+
ret = @_packages_api.packages_uuid_get(uuid)
|
356
|
+
ret.length > 0 ? ret[0] : nil
|
357
|
+
end
|
358
|
+
|
359
|
+
def create_package(package)
|
360
|
+
@_packages_api.packages_post(package)
|
361
|
+
end
|
362
|
+
|
363
|
+
def update_package_resources_by_uuid(uuid, payload)
|
364
|
+
@_packages_api.packages_resources_uuid_put(uuid, payload)
|
365
|
+
end
|
366
|
+
|
367
|
+
def update_package_by_uuid(uuid, package)
|
368
|
+
@_packages_api.packages_uuid_put(uuid, package)
|
369
|
+
end
|
370
|
+
|
371
|
+
def delete_package(package_name)
|
372
|
+
package = self.get_package_by_name(package_name)
|
373
|
+
ApceraApiHelper.notice("Trying to delete package uuid #{package.uuid}")
|
374
|
+
@_packages_api.packages_uuid_delete(package.uuid)
|
375
|
+
end
|
376
|
+
|
377
|
+
def wait_until_package_staged(package_name)
|
378
|
+
Apcera::Configuration.debugging = false
|
379
|
+
ApceraApiHelper.notice("Checking for #{package_name}...")
|
380
|
+
|
381
|
+
pkg = get_package_by_name(package_name)
|
382
|
+
|
383
|
+
if (['staging', 'uploading'].include?(pkg.state))
|
384
|
+
self.spin_until_package_staged(package_name)
|
385
|
+
end
|
386
|
+
end
|
387
|
+
|
388
|
+
def spin_until_package_staged(package_name)
|
389
|
+
save_debug = Apcera::Configuration.debugging
|
390
|
+
Apcera::Configuration.debugging = false
|
391
|
+
|
392
|
+
pkg = get_package_by_name(package_name)
|
393
|
+
|
394
|
+
show_wait_spinner{
|
395
|
+
while (['staging', 'uploading'].include?(pkg.state))
|
396
|
+
pkg = get_package_by_name(package_name)
|
397
|
+
sleep 5
|
398
|
+
end
|
399
|
+
}
|
400
|
+
|
401
|
+
Apcera::Configuration.debugging = save_debug
|
402
|
+
|
403
|
+
# return the package
|
404
|
+
#
|
405
|
+
pkg
|
406
|
+
end
|
407
|
+
|
408
|
+
|
409
|
+
def create_and_stage_git_repo(repo_name, clone_to, repo_subdir, namespace, job_name, change_to = "", pipeline = nil, tags = {})
|
410
|
+
save_dir = Dir.pwd
|
411
|
+
puts "save_dir is #{save_dir}"
|
412
|
+
tmp_dir = Dir.mktmpdir("git-")
|
413
|
+
puts "tmp_dir is #{tmp_dir}"
|
414
|
+
|
415
|
+
Dir.chdir(tmp_dir)
|
416
|
+
puts "now in #{Dir.pwd}"
|
417
|
+
|
418
|
+
system("git clone #{repo_name} #{clone_to}")
|
419
|
+
Dir.chdir("#{clone_to}/#{repo_subdir}")
|
420
|
+
|
421
|
+
create_and_stage_package("*", namespace, job_name, change_to, pipeline, tags)
|
422
|
+
|
423
|
+
Dir.chdir(save_dir)
|
424
|
+
FileUtils.rm_rf(tmp_dir)
|
425
|
+
end
|
426
|
+
|
427
|
+
# If filename is "*" then it does the whole directory
|
428
|
+
# Example:
|
429
|
+
# stage_package("*", "/sandbox/fred.flintstone", "my_new_job")
|
430
|
+
def create_and_stage_package(filename, namespace, job_name, change_to = "", pipeline = nil, tags = {})
|
431
|
+
file_list = filename
|
432
|
+
|
433
|
+
if ((filename != "*") && File.exist?("start.sh"))
|
434
|
+
file_list = "#{filename} start.sh"
|
435
|
+
end
|
436
|
+
|
437
|
+
if pipeline.nil?
|
438
|
+
pipeline = self.get_matching_staging_pipeline(filename)
|
439
|
+
end
|
440
|
+
|
441
|
+
if (pipeline.nil?)
|
442
|
+
ApceraApiHelper.notice "Fatal ERROR, could not determine staging pipeline for #{filename}"
|
443
|
+
exit
|
444
|
+
end
|
445
|
+
|
446
|
+
# when we stage, we only need the uuid
|
447
|
+
#
|
448
|
+
sp = Apcera::StagingPipeline.new({ :uuid => pipeline.uuid })
|
449
|
+
|
450
|
+
self.banner("Tarring up the files")
|
451
|
+
tarball_path = "/tmp/pkg-#{job_name.gsub("/", "-")}.tgz"
|
452
|
+
base_dir_opt = ""
|
453
|
+
if change_to.nonblank?
|
454
|
+
base_dir_opt = "-C #{change_to}"
|
455
|
+
end
|
456
|
+
tar_command = "COPYFILE_DISABLE=1 tar #{base_dir_opt} -czf #{tarball_path} #{file_list}"
|
457
|
+
system(tar_command)
|
458
|
+
|
459
|
+
contents = File.binread(tarball_path)
|
460
|
+
hex_digest = Digest::SHA1.hexdigest(contents.to_s)
|
461
|
+
sha256_digest = Digest::SHA256.hexdigest(contents.to_s)
|
462
|
+
length = File.size(tarball_path)
|
463
|
+
|
464
|
+
# We are now done with the file
|
465
|
+
#
|
466
|
+
File.delete(tarball_path)
|
467
|
+
|
468
|
+
# At some point will need to switch to the sha256 digest. Looks like that is now.
|
469
|
+
#
|
470
|
+
# resource = Apcera::PackageResource.new({:length => length, :digest => sha256_digest})
|
471
|
+
#
|
472
|
+
resource = Apcera::PackageResource.new()
|
473
|
+
package_fqn = "package::#{namespace}::#{job_name}"
|
474
|
+
|
475
|
+
new_pkg = Apcera::Package.new({
|
476
|
+
:name => job_name,
|
477
|
+
:fqn => package_fqn,
|
478
|
+
:staging_pipeline => sp
|
479
|
+
})
|
480
|
+
|
481
|
+
new_pkg.resources = [resource]
|
482
|
+
|
483
|
+
new_pkg.tags = tags
|
484
|
+
|
485
|
+
if self.debug_mode?
|
486
|
+
self.banner("Package Is")
|
487
|
+
ApceraApiHelper.notice new_pkg.to_yaml
|
488
|
+
end
|
489
|
+
|
490
|
+
ApceraApiHelper.notice("Creating a placeholder for the new package #{new_pkg.fqn}")
|
491
|
+
|
492
|
+
begin
|
493
|
+
created_pkg = self.create_package(new_pkg)
|
494
|
+
rescue
|
495
|
+
# Handle the race condition where the package has been created since this function started
|
496
|
+
#
|
497
|
+
ApceraApiHelper.notice "post failed, validate presense"
|
498
|
+
check_pkg = self.get_package_by_name(new_pkg.fqn)
|
499
|
+
if !check_pkg.nil?
|
500
|
+
ApceraApiHelper.notice "Package State is #{check_pkg.state}"
|
501
|
+
return ### SHOULD I REALLY??
|
502
|
+
end
|
503
|
+
end
|
504
|
+
|
505
|
+
|
506
|
+
ApceraApiHelper.notice("Uploading the new package... #{created_pkg.uuid} resource #{created_pkg.resource.uuid}" )
|
507
|
+
|
508
|
+
@_packages_api.packages_package_uuid_resources_resource_uuid_put(created_pkg.uuid, created_pkg.resource.uuid, contents, "sha256:#{sha256_digest}")
|
509
|
+
|
510
|
+
# bar = self.update_package_resources_by_uuid(created_pkg.uuid, contents)
|
511
|
+
|
512
|
+
# ApceraApiHelper.notice self.get_logs("staging.#{created_pkg.uuid}")
|
513
|
+
|
514
|
+
ApceraApiHelper.notice("Waiting for package #{job_name} to stage (might take some time)...")
|
515
|
+
pkg = self.spin_until_package_staged(package_fqn)
|
516
|
+
|
517
|
+
ApceraApiHelper.notice "Package (#{job_name}) staging status: #{pkg.state}"
|
518
|
+
pkg
|
519
|
+
end
|
520
|
+
|
521
|
+
def package_from_files(filename, namespace, job_name, port = 0)
|
522
|
+
# 1) Make sure that a job with that name isn't running
|
523
|
+
#
|
524
|
+
if self.job_exist?("job::#{namespace}::#{job_name}")
|
525
|
+
ApceraApiHelper.notice "Error, job #{namespace}::#{job_name} already exists"
|
526
|
+
return nil
|
527
|
+
end
|
528
|
+
|
529
|
+
# 2) Make sure that a package with that name isn't running
|
530
|
+
#
|
531
|
+
if self.package_exist?("package::#{namespace}::#{job_name}")
|
532
|
+
ApceraApiHelper.notice "Error, package #{namespace}::#{job_name} already exists"
|
533
|
+
return nil
|
534
|
+
end
|
535
|
+
|
536
|
+
create_and_stage_package(filename, namespace, job_name)
|
537
|
+
end
|
538
|
+
|
539
|
+
def job_from_package(namespace, job_name, allow_egress, port = 0, environment = {}, start_command = "")
|
540
|
+
|
541
|
+
fqn_base = "#{namespace}::#{job_name}"
|
542
|
+
package_fqn = "package::#{fqn_base}"
|
543
|
+
job_fqn = "job::#{fqn_base}"
|
544
|
+
|
545
|
+
if self.job_exist?(job_fqn)
|
546
|
+
ApceraApiHelper.notice "Job #{job_fqn} already exists, aborting"
|
547
|
+
return nil
|
548
|
+
end
|
549
|
+
|
550
|
+
self.banner("Getting package uuid for package #{package_fqn}")
|
551
|
+
pkg = self.get_package_by_name(package_fqn)
|
552
|
+
|
553
|
+
if pkg.nil?
|
554
|
+
ApceraApiHelper.notice "Error, package #{package_fqn} not found, aborting"
|
555
|
+
exit
|
556
|
+
end
|
557
|
+
|
558
|
+
# Need to build the route name
|
559
|
+
#
|
560
|
+
route = namespace.split("/").reverse.join(".").chomp(".")
|
561
|
+
|
562
|
+
port_route = Apcera::Port.new({
|
563
|
+
:optional => false,
|
564
|
+
:number => port,
|
565
|
+
:routes => [{:type => "http", :endpoint => "#{job_name}.#{route}.#{@domain}", :weight => 0}]
|
566
|
+
})
|
567
|
+
|
568
|
+
app = Apcera::ProcessObject.new({
|
569
|
+
# :start_command_raw => "",
|
570
|
+
:start_command => start_command,
|
571
|
+
:start_command_timeout => 30,
|
572
|
+
# :stop_command_raw => "",
|
573
|
+
:stop_command => "",
|
574
|
+
:stop_timeout => 5
|
575
|
+
})
|
576
|
+
|
577
|
+
app.environment = environment
|
578
|
+
|
579
|
+
resource = Apcera::Resource.new({
|
580
|
+
:cpu => 0,
|
581
|
+
:memory => 256 * 1024 * 1024,
|
582
|
+
:disk => 256 * 1024 * 1024,
|
583
|
+
:network => 5 * 1024 * 1024,
|
584
|
+
:netmax => 0
|
585
|
+
})
|
586
|
+
rollout = Apcera::RolloutConfig.new({
|
587
|
+
:force_stop_old_instances_after => 0,
|
588
|
+
:flapping_minimum_restarts => 0,
|
589
|
+
:flapping_percent => 0,
|
590
|
+
:flapping_window => 0,
|
591
|
+
:errored_state_window => 0
|
592
|
+
})
|
593
|
+
|
594
|
+
process = Apcera::Process.new({ :app => app})
|
595
|
+
|
596
|
+
restart = Apcera::RestartConfig.new({
|
597
|
+
:restart_mode => "always",
|
598
|
+
:maximum_attempts => 0
|
599
|
+
})
|
600
|
+
|
601
|
+
# Gather up all of the above
|
602
|
+
#
|
603
|
+
job_proto = Apcera::Job.new({
|
604
|
+
:uuid => "",
|
605
|
+
:ports => [port_route],
|
606
|
+
:name => job_name,
|
607
|
+
:fqn => job_fqn,
|
608
|
+
:num_instances => 1,
|
609
|
+
:packages => [{:uuid => pkg.uuid}],
|
610
|
+
:processes => process,
|
611
|
+
:resources => resource,
|
612
|
+
:rollout => rollout,
|
613
|
+
:restart => restart,
|
614
|
+
:state => "unknown"
|
615
|
+
})
|
616
|
+
# Ruby doesn't seem to like setting the tags in-line
|
617
|
+
#
|
618
|
+
job_proto.tags = {"app" => job_name}
|
619
|
+
|
620
|
+
if self.debug_mode?
|
621
|
+
self.banner("NEW JOB #{job_proto.to_yaml}")
|
622
|
+
end
|
623
|
+
|
624
|
+
new_job = self.create_job(job_proto)
|
625
|
+
self.banner("CREATED JOB #{job_name}")
|
626
|
+
|
627
|
+
tmp_pkg = self.get_package_by_name(package_fqn)
|
628
|
+
|
629
|
+
tmp_pkg.tags = {"linked-job" =>"#{new_job.uuid}"}
|
630
|
+
|
631
|
+
self.update_package_by_uuid(tmp_pkg.uuid, tmp_pkg)
|
632
|
+
|
633
|
+
# Then post the bindings
|
634
|
+
#
|
635
|
+
if allow_egress
|
636
|
+
self.banner("Posting the Allow-Egress binding for #{job_name}")
|
637
|
+
ename = fqn_base.gsub("/", "_").gsub("::", "_").gsub(".", "_")
|
638
|
+
binding = Apcera::Binding.new({
|
639
|
+
:name => "egress_for#{ename}",
|
640
|
+
:fqn => "binding::#{namespace}::#{job_name}",
|
641
|
+
:job_fqn => job_fqn,
|
642
|
+
:service_fqn => "service::/apcera::outside"
|
643
|
+
})
|
644
|
+
bound = self.create_binding(binding)
|
645
|
+
end
|
646
|
+
|
647
|
+
# return the new job
|
648
|
+
#
|
649
|
+
self.get_job_by_name(job_fqn)
|
650
|
+
end
|
651
|
+
|
652
|
+
# Job operations
|
653
|
+
#
|
654
|
+
def jobs_get(opt = {})
|
655
|
+
@_jobs_api.jobs_get(opt)
|
656
|
+
end
|
657
|
+
|
658
|
+
def job_exist?(name)
|
659
|
+
!self.get_job_by_name(name).nil?
|
660
|
+
end
|
661
|
+
|
662
|
+
def service_exist?(service)
|
663
|
+
!self.get_service_by_name(service).nil?
|
664
|
+
end
|
665
|
+
|
666
|
+
def create_docker_job_new_api(image_url, job_namespace, job_name, start_command, exposed_port = 0, environment = {}, allow_egress = false, ram_mb = 128, disk_mb = 16)
|
667
|
+
|
668
|
+
job_fqn = "job::#{job_namespace}::#{job_name}"
|
669
|
+
|
670
|
+
sc = start_command.split(" ")
|
671
|
+
puts "Trying to do job #{job_fqn}"
|
672
|
+
|
673
|
+
resource = Apcera::Resource.new({
|
674
|
+
:cpu => 0,
|
675
|
+
:memory => ram_mb * 1024 * 1024,
|
676
|
+
:disk => disk_mb * 1024 * 1024,
|
677
|
+
:network => 1 * 1024 * 1024 * 1024,
|
678
|
+
:netmax => 0
|
679
|
+
})
|
680
|
+
|
681
|
+
restart = Apcera::RestartConfig.new({
|
682
|
+
:restart_mode => "no",
|
683
|
+
:maximum_attempts => 0
|
684
|
+
})
|
685
|
+
|
686
|
+
docker_job = Apcera::CreateDockerJobRequest.new({
|
687
|
+
:'allow_egress' => allow_egress,
|
688
|
+
:'image_url' => image_url,
|
689
|
+
:'job_fqn' => job_fqn,
|
690
|
+
:'start_command' => sc,
|
691
|
+
:'resources' => resource,
|
692
|
+
:'restart' => restart
|
693
|
+
})
|
694
|
+
|
695
|
+
docker_job.env = environment
|
696
|
+
|
697
|
+
# Set up the port
|
698
|
+
#
|
699
|
+
if (!exposed_port.nil? && exposed_port != 0)
|
700
|
+
docker_job.exposed_ports = [exposed_port]
|
701
|
+
|
702
|
+
# need to also set the route also
|
703
|
+
#
|
704
|
+
sub_route = job_namespace.split("/").reverse.join(".").chomp(".")
|
705
|
+
route_name = "http://#{job_name}.#{sub_route}.#{self.get_base_domain}"
|
706
|
+
docker_job.routes = {route_name => exposed_port}
|
707
|
+
end
|
708
|
+
|
709
|
+
task = @_jobs_api.docker_jobs_post(docker_job)
|
710
|
+
task
|
711
|
+
end
|
712
|
+
|
713
|
+
def check_task_status(task_uuid)
|
714
|
+
@_jobs_api.tasks_uuid_get(task_uuid)
|
715
|
+
end
|
716
|
+
|
717
|
+
def create_job(job)
|
718
|
+
@_jobs_api.jobs_post(job)
|
719
|
+
end
|
720
|
+
|
721
|
+
def scale_job(job_fqn, number_of_instances)
|
722
|
+
job = get_job_by_name(job_fqn)
|
723
|
+
return if job.nil?
|
724
|
+
|
725
|
+
puts "Scaling #{job_fqn} from #{job.num_instances} to #{number_of_instances} "
|
726
|
+
job.num_instances = number_of_instances
|
727
|
+
self.update_job(job)
|
728
|
+
end
|
729
|
+
|
730
|
+
def update_job(job)
|
731
|
+
@_jobs_api.jobs_uuid_put(job.uuid, job)
|
732
|
+
end
|
733
|
+
|
734
|
+
def get_job_by_name(name)
|
735
|
+
ret = @_jobs_api.jobs_get({:fqn => name})
|
736
|
+
ret.length > 0 ? ret[0] : nil
|
737
|
+
end
|
738
|
+
|
739
|
+
def get_service_by_name(name)
|
740
|
+
ret = @_bindings_api.services_get({:fqn => name})
|
741
|
+
puts ret.to_yaml
|
742
|
+
ret.length > 0 ? ret[0] : nil
|
743
|
+
end
|
744
|
+
|
745
|
+
def get_job_state_by_name(name)
|
746
|
+
ret = @_jobs_api.jobs_get({:fqn => name})
|
747
|
+
|
748
|
+
ret.length > 0 ? ret[0].state : "JOB_NOT_FOUND"
|
749
|
+
end
|
750
|
+
|
751
|
+
def start_job(job_name, num_instances = 1)
|
752
|
+
job = self.get_job_by_name(job_name)
|
753
|
+
|
754
|
+
if self.debug_mode?
|
755
|
+
ApceraApiHelper.notice job.to_yaml
|
756
|
+
end
|
757
|
+
|
758
|
+
if job.nil?
|
759
|
+
ApceraApiHelper.notice "Error, job #{job_name} was not found"
|
760
|
+
return nil
|
761
|
+
elsif (job.state == "started" && job.num_instances == num_instances)
|
762
|
+
ApceraApiHelper.notice "#{job_name} is already running with requested number of instanes (#{num_instances})"
|
763
|
+
return job
|
764
|
+
end
|
765
|
+
job.state = "started"
|
766
|
+
job.num_instances = num_instances
|
767
|
+
job = self.update_job(job)
|
768
|
+
|
769
|
+
ApceraApiHelper.notice "#{job_name} state is now #{job.state} with #{job.num_instances}"
|
770
|
+
job
|
771
|
+
end
|
772
|
+
|
773
|
+
def delete_job(job_name)
|
774
|
+
job = self.get_job_by_name(job_name)
|
775
|
+
ApceraApiHelper.notice("Trying to delete job w/ uuid #{job.uuid}")
|
776
|
+
@_jobs_api.jobs_uuid_delete(job.uuid)
|
777
|
+
end
|
778
|
+
|
779
|
+
def stop_job(job_name)
|
780
|
+
job = self.get_job_by_name(job_name)
|
781
|
+
|
782
|
+
if self.debug_mode?
|
783
|
+
ApceraApiHelper.notice "JOB is\n\n"
|
784
|
+
ApceraApiHelper.notice job.to_yaml
|
785
|
+
end
|
786
|
+
|
787
|
+
if job.nil? || job.state == "stopped"
|
788
|
+
ApceraApiHelper.notice "Error, job #{job_name} is either already stopped or not found"
|
789
|
+
return job
|
790
|
+
end
|
791
|
+
job.state = "stopped"
|
792
|
+
self.update_job(job)
|
793
|
+
|
794
|
+
ApceraApiHelper.notice "Job #{job_name} state is now " + self.get_job_state_by_name(job_name)
|
795
|
+
end
|
796
|
+
|
797
|
+
def get_job_route(namespace, job_name)
|
798
|
+
job = self.get_job_by_name("job::#{namespace}::#{job_name}")
|
799
|
+
|
800
|
+
url = ""
|
801
|
+
|
802
|
+
if !job.ports.nil?
|
803
|
+
job.ports.each do | port |
|
804
|
+
if !port.routes.nil?
|
805
|
+
route = port.routes[0]
|
806
|
+
url = "#{route.type}://#{route.endpoint}"
|
807
|
+
break
|
808
|
+
end
|
809
|
+
end
|
810
|
+
end
|
811
|
+
url
|
812
|
+
end
|
813
|
+
|
814
|
+
# Binding operations
|
815
|
+
#
|
816
|
+
def create_binding(binding)
|
817
|
+
@_bindings_api.bindings_post(binding)
|
818
|
+
end
|
819
|
+
|
820
|
+
def link_jobs(source_job, target_job, name, port)
|
821
|
+
self.banner("Linking #{source_job} to #{target_job} via #{name} on port #{port}")
|
822
|
+
binding = Apcera::Binding.new({
|
823
|
+
:fqn => "binding::#{self.sandbox}::#{SecureRandom.uuid}",
|
824
|
+
:job_fqn => source_job,
|
825
|
+
:name => name,
|
826
|
+
:target_job_fqn => target_job,
|
827
|
+
:target_job_port => port
|
828
|
+
})
|
829
|
+
|
830
|
+
if self.debug_mode?
|
831
|
+
ApceraApiHelper.notice binding.to_yaml
|
832
|
+
end
|
833
|
+
|
834
|
+
self.create_binding(binding)
|
835
|
+
end
|
836
|
+
|
837
|
+
def add_service_to_job(job_fqn, service_fqn, name)
|
838
|
+
self.banner("Trying to bind service #{service_fqn} to #{job_fqn} as #{name}")
|
839
|
+
|
840
|
+
# first we need the provider for the service
|
841
|
+
#
|
842
|
+
provider = self.get_service_by_name(service_fqn)
|
843
|
+
puts ">>>>Provider is #{provider.to_yaml}"
|
844
|
+
|
845
|
+
binding = Apcera::Binding.new({
|
846
|
+
:fqn => "binding::#{self.sandbox}::#{SecureRandom.uuid}",
|
847
|
+
:job_fqn => job_fqn,
|
848
|
+
:name => name,
|
849
|
+
:provider_fqn => provider.provider_fqn,
|
850
|
+
:service_fqn => service_fqn
|
851
|
+
})
|
852
|
+
|
853
|
+
if self.debug_mode?
|
854
|
+
ApceraApiHelper.notice binding.to_yaml
|
855
|
+
end
|
856
|
+
|
857
|
+
self.create_binding(binding)
|
858
|
+
end
|
859
|
+
|
860
|
+
# get job logs
|
861
|
+
#
|
862
|
+
def get_job_logs(uuid, lines)
|
863
|
+
|
864
|
+
@_jobs_api.jobs_uuid_logs_get(uuid, opts = {:lines => lines})
|
865
|
+
|
866
|
+
end
|
867
|
+
|
868
|
+
# Log Operations
|
869
|
+
#
|
870
|
+
def get_logs(key)
|
871
|
+
@_logs_api.logs_channel_get(key).to_yaml
|
872
|
+
end
|
873
|
+
|
874
|
+
# Docker Operations
|
875
|
+
#
|
876
|
+
def initiate_docker_job(namespace, job_name, image_url, start_command = "" )
|
877
|
+
job_proto = self.build_docker_job_prototype(namespace, job_name, image_url, start_command)
|
878
|
+
@_jobs_api.docker_job_check(job_proto)
|
879
|
+
|
880
|
+
# return the prototype so we can use it later
|
881
|
+
#
|
882
|
+
job_proto
|
883
|
+
end
|
884
|
+
|
885
|
+
def job_from_docker(job_name, job_namespace, registry, image_name, image_tag, start_command = "", allow_egress = false, port = 0, environment = {}, user_name = "", password = "")
|
886
|
+
|
887
|
+
# Make sure that the job isn't running
|
888
|
+
#
|
889
|
+
base_fqn = "#{job_namespace}::#{job_name}"
|
890
|
+
job_fqn = "job::#{base_fqn}"
|
891
|
+
|
892
|
+
# begin
|
893
|
+
if self.job_exist?(job_fqn)
|
894
|
+
ApceraApiHelper.notice "error, job #{job_fqn} exists already, aborting"
|
895
|
+
exit
|
896
|
+
end
|
897
|
+
# rescue
|
898
|
+
# puts "crapped out checking #{job_name}"
|
899
|
+
# exit
|
900
|
+
# end
|
901
|
+
|
902
|
+
# This is the stuff from the scratchpad
|
903
|
+
#
|
904
|
+
parts = registry.split(/[:\/]/)
|
905
|
+
scheme = parts.first
|
906
|
+
domain = parts.last
|
907
|
+
|
908
|
+
userinfo = ""
|
909
|
+
if user_name.nonblank? && password.nonblank?
|
910
|
+
userinfo = URI::encode("#{user_name}:#{password}@")
|
911
|
+
end
|
912
|
+
|
913
|
+
image_suffix = ""
|
914
|
+
if image_tag.nonblank?
|
915
|
+
image_suffix = ":#{image_tag}"
|
916
|
+
end
|
917
|
+
endpoint_scheme = "#{scheme}://#{userinfo}"
|
918
|
+
registry_url = "#{endpoint_scheme}#{domain}"
|
919
|
+
image_url = "#{registry_url}/#{image_name}#{image_suffix}"
|
920
|
+
|
921
|
+
djt = self.create_docker_job_new_api(image_url, job_namespace, job_name, start_command, port, environment, allow_egress)
|
922
|
+
puts "docker job task location for #{job_name} is #{djt.location}"
|
923
|
+
location = djt.location
|
924
|
+
|
925
|
+
uuid = location.split("/").last
|
926
|
+
task = self.check_task_status(uuid)
|
927
|
+
if task.nil?
|
928
|
+
puts "YES, the task is nil for #{job_name}"
|
929
|
+
end
|
930
|
+
while task.state == "running"
|
931
|
+
# sleep a random time between 5 and 10 seconds
|
932
|
+
sleep rand(5..10)
|
933
|
+
task = self.check_task_status(uuid)
|
934
|
+
puts "Staging process (#{job_name}): *** #{task.state} ***"
|
935
|
+
end
|
936
|
+
|
937
|
+
return self.get_job_by_name(job_fqn)
|
938
|
+
end
|
939
|
+
|
940
|
+
def build_docker_job_prototype(namespace, job_name, image_url, start_command)
|
941
|
+
|
942
|
+
docker_origin = Apcera::DockerOrigin.new(
|
943
|
+
"ImageName" => "",
|
944
|
+
"ImageTag" => "",
|
945
|
+
"RegistryURL" => image_url,
|
946
|
+
"Volumes" => nil
|
947
|
+
)
|
948
|
+
|
949
|
+
port = Apcera::Port.new({
|
950
|
+
:optional => true,
|
951
|
+
:number => 222
|
952
|
+
})
|
953
|
+
|
954
|
+
app = Apcera::ProcessObject.new({
|
955
|
+
# :start_command_raw =>,
|
956
|
+
:start_command => start_command,
|
957
|
+
:start_command_timeout => 30,
|
958
|
+
:stop_command_raw => [],
|
959
|
+
:stop_command => "",
|
960
|
+
:stop_timeout => 5,
|
961
|
+
:user => "root"
|
962
|
+
})
|
963
|
+
|
964
|
+
process = Apcera::Process.new({ :app => app})
|
965
|
+
|
966
|
+
resource = Apcera::Resource.new({
|
967
|
+
:cpu => 0,
|
968
|
+
:memory => 768 * 1024 * 1024,
|
969
|
+
:disk => 256 * 1024 * 1024,
|
970
|
+
:network => 1 * 1024 * 1024 * 1024,
|
971
|
+
:netmax => 0
|
972
|
+
})
|
973
|
+
|
974
|
+
restart = Apcera::RestartConfig.new({
|
975
|
+
:restart_mode => "no",
|
976
|
+
:maximum_attempts => 0
|
977
|
+
})
|
978
|
+
|
979
|
+
rollout = Apcera::RolloutConfig.new({
|
980
|
+
:force_stop_old_instances_after => 0,
|
981
|
+
:flapping_minimum_restarts => 0,
|
982
|
+
:flapping_percent => 0,
|
983
|
+
:flapping_window => 0,
|
984
|
+
:errored_state_window => 0
|
985
|
+
})
|
986
|
+
|
987
|
+
job_proto = Apcera::Job.new({
|
988
|
+
:docker_origin => docker_origin,
|
989
|
+
:fqn => "job::#{namespace}::#{job_name}",
|
990
|
+
:name => job_name,
|
991
|
+
:num_instances => 1,
|
992
|
+
:packages => nil,
|
993
|
+
:ports => [port],
|
994
|
+
:processes => process,
|
995
|
+
:resources => resource,
|
996
|
+
:restart => restart,
|
997
|
+
:rollout => rollout,
|
998
|
+
:state => "unknown",
|
999
|
+
:uuid => "",
|
1000
|
+
:version_id => 0,
|
1001
|
+
:weight => 0
|
1002
|
+
})
|
1003
|
+
|
1004
|
+
job_proto.tags = {"app" => job_name, "docker" => job_name, "ssh" => "true"}
|
1005
|
+
|
1006
|
+
job_proto
|
1007
|
+
end
|
1008
|
+
|
1009
|
+
def _get_job_preferences_by_name(namespace, job_name)
|
1010
|
+
@_jobs_api.preferences_job_get("job::#{namespace}::#{job_name}")
|
1011
|
+
end
|
1012
|
+
|
1013
|
+
def get_docker_namespace(job_name)
|
1014
|
+
prefs = self._get_job_preferences_by_name(self.sandbox, job_name)
|
1015
|
+
prefs.docker_cache_namespace
|
1016
|
+
end
|
1017
|
+
|
1018
|
+
# Some route operations
|
1019
|
+
#
|
1020
|
+
def jobs_routes()
|
1021
|
+
@_jobs_api.jobs_routes_get()
|
1022
|
+
end
|
1023
|
+
|
1024
|
+
def jobs_routes_for_endpoint(endpoint_name)
|
1025
|
+
# @_jobs_api.jobs_routes_get()
|
1026
|
+
ApceraApiHelper.notice Base64.urlsafe_encode64(endpoint_name)
|
1027
|
+
@_jobs_api.jobs_routes_endpoint_get(Base64.urlsafe_encode64("zoom.jamie.smith.sandbox.demo.proveapcera.io"))
|
1028
|
+
end
|
1029
|
+
|
1030
|
+
# Utility Operations
|
1031
|
+
#
|
1032
|
+
def show_wait_spinner(fps=10)
|
1033
|
+
chars = %w[| / - \\]
|
1034
|
+
delay = 1.0/fps
|
1035
|
+
iter = 0
|
1036
|
+
spinner = Thread.new do
|
1037
|
+
while iter do # Keep spinning until told otherwise
|
1038
|
+
print chars[(iter+=1) % chars.length]
|
1039
|
+
sleep delay
|
1040
|
+
print "\b"
|
1041
|
+
end
|
1042
|
+
end
|
1043
|
+
yield.tap{ # After yielding to the block, save the return value
|
1044
|
+
iter = false # Tell the thread to exit, cleaning up after itself…
|
1045
|
+
spinner.join # …and wait for it to do so.
|
1046
|
+
} # Use the block's return value as the method's
|
1047
|
+
end
|
1048
|
+
|
1049
|
+
def get_base_domain()
|
1050
|
+
if @_base_domain.nil?
|
1051
|
+
foo = @_utilities_api.info_get()
|
1052
|
+
domain = foo.url.split(/[:\/]/).last
|
1053
|
+
|
1054
|
+
domain.gsub!("api.", "")
|
1055
|
+
@_base_domain = domain
|
1056
|
+
end
|
1057
|
+
|
1058
|
+
return @_base_domain
|
1059
|
+
end
|
1060
|
+
|
1061
|
+
def get_server_info()
|
1062
|
+
foo = @_utilities_api.info_get()
|
1063
|
+
return foo
|
1064
|
+
end
|
1065
|
+
|
1066
|
+
def banner(text)
|
1067
|
+
puts "\n\n"
|
1068
|
+
puts "################################################################################"
|
1069
|
+
puts "# #{text}"
|
1070
|
+
puts "################################################################################"
|
1071
|
+
end
|
1072
|
+
|
1073
|
+
def self.notice(text)
|
1074
|
+
puts text
|
1075
|
+
end
|
1076
|
+
|
1077
|
+
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: apcera
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.6.
|
4
|
+
version: 0.1.6.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jamie Smith
|
@@ -273,6 +273,7 @@ files:
|
|
273
273
|
- lib/apcera/models/virtual_network_endpoint_interface.rb
|
274
274
|
- lib/apcera/version.rb
|
275
275
|
- lib/apcera_api_gem.rb
|
276
|
+
- lib/apcera_api_helper.rb
|
276
277
|
homepage: http://apcera.com
|
277
278
|
licenses:
|
278
279
|
- Apache-2.0
|