kubernetes-deploy 0.1.1 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile +2 -0
- data/exe/kubernetes-deploy +16 -645
- data/lib/kubernetes-deploy.rb +42 -0
- data/lib/kubernetes-deploy/kubernetes_resource.rb +102 -0
- data/lib/kubernetes-deploy/kubernetes_resource/config_map.rb +28 -0
- data/lib/kubernetes-deploy/kubernetes_resource/deployment.rb +62 -0
- data/lib/kubernetes-deploy/kubernetes_resource/ingress.rb +32 -0
- data/lib/kubernetes-deploy/kubernetes_resource/persistent_volume_claim.rb +27 -0
- data/lib/kubernetes-deploy/kubernetes_resource/pod.rb +84 -0
- data/lib/kubernetes-deploy/kubernetes_resource/service.rb +34 -0
- data/lib/kubernetes-deploy/runner.rb +285 -0
- data/lib/kubernetes-deploy/version.rb +1 -1
- metadata +11 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: fe05ac43ce5668b78154fd7cc4e5c62d44513de0
|
4
|
+
data.tar.gz: 67cf8f77d37ec6b3c9ba1cbf73bc4c7185e4e7a0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9a9503f71c75a1640c46e42ef007ca942218553c68a05718c4892e162daaf7b18eb5a05b216337a2cdaced51a1ecdcad20a5845820348af58576cbb8375e630e
|
7
|
+
data.tar.gz: 70d15573a44990b7b2e819569165f553a120f90bf7f57277118e7df63f678646a2c7afa77b92043597eb20f823b546a47e1ea5060a99259a7fdf75f4486a1aae
|
data/Gemfile
CHANGED
data/exe/kubernetes-deploy
CHANGED
@@ -10,653 +10,24 @@
|
|
10
10
|
# Optionally, the following variables can be used to override script defaults:
|
11
11
|
# - ENV['K8S_TEMPLATE_FOLDER']: Location of Kubernetes files to deploy. Default is config/deploy/#{environment}.
|
12
12
|
|
13
|
-
require '
|
14
|
-
require 'securerandom'
|
15
|
-
require 'erb'
|
16
|
-
require 'json'
|
17
|
-
require 'yaml'
|
18
|
-
require 'shellwords'
|
19
|
-
require 'tempfile'
|
20
|
-
require 'logger'
|
21
|
-
require 'active_support/core_ext/object/blank'
|
22
|
-
require 'active_support/descendants_tracker'
|
23
|
-
require 'active_support/core_ext/hash/slice'
|
24
|
-
require 'active_support/core_ext/numeric/time'
|
13
|
+
require 'kubernetes-deploy'
|
25
14
|
|
26
|
-
|
27
|
-
class FatalDeploymentError < StandardError; end
|
15
|
+
require 'optparse'
|
28
16
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
# core/v1/ReplicationController -- superseded by deployments/replicasets
|
35
|
-
# extensions/v1beta1/ReplicaSet -- managed by deployments
|
36
|
-
# core/v1/Secret -- should not committed / managed by shipit
|
37
|
-
PRUNE_WHITELIST = %w(
|
38
|
-
core/v1/ConfigMap
|
39
|
-
core/v1/Pod
|
40
|
-
core/v1/Service
|
41
|
-
batch/v1/Job
|
42
|
-
extensions/v1beta1/DaemonSet
|
43
|
-
extensions/v1beta1/Deployment
|
44
|
-
extensions/v1beta1/HorizontalPodAutoscaler
|
45
|
-
extensions/v1beta1/Ingress
|
46
|
-
apps/v1beta1/StatefulSet
|
47
|
-
).freeze
|
17
|
+
skip_wait = false
|
18
|
+
ARGV.options do |opts|
|
19
|
+
opts.on("-w", "--skip-wait") { skip_wait = true }
|
20
|
+
opts.parse!
|
21
|
+
end
|
48
22
|
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
23
|
+
KubernetesDeploy::Runner.with_friendly_errors do
|
24
|
+
runner = KubernetesDeploy::Runner.new(
|
25
|
+
namespace: ARGV[0],
|
26
|
+
context: ARGV[1],
|
27
|
+
environment: ENV['ENVIRONMENT'],
|
28
|
+
current_sha: ENV['REVISION'],
|
29
|
+
template_folder: ENV['K8S_TEMPLATE_FOLDER'],
|
30
|
+
wait_for_completion: !skip_wait,
|
53
31
|
)
|
54
|
-
|
55
|
-
def initialize(namespace:, environment:, current_sha:, template_folder: nil, context:)
|
56
|
-
@namespace = namespace
|
57
|
-
@context = context
|
58
|
-
@current_sha = current_sha
|
59
|
-
@template_path = File.expand_path('./' + (template_folder || "config/deploy/#{environment}"))
|
60
|
-
# Max length of podname is only 63chars so try to save some room by truncating sha to 8 chars
|
61
|
-
@id = current_sha[0...8] + "-#{SecureRandom.hex(4)}" if current_sha
|
62
|
-
end
|
63
|
-
|
64
|
-
def run
|
65
|
-
@current_phase = 0
|
66
|
-
phase_heading("Validating configuration")
|
67
|
-
validate_configuration
|
68
|
-
|
69
|
-
phase_heading("Configuring kubectl")
|
70
|
-
set_kubectl_context
|
71
|
-
validate_namespace
|
72
|
-
|
73
|
-
phase_heading("Parsing deploy content")
|
74
|
-
resources = discover_resources
|
75
|
-
|
76
|
-
phase_heading("Checking initial resource statuses")
|
77
|
-
resources.each(&:sync)
|
78
|
-
|
79
|
-
phase_heading("Predeploying priority resources")
|
80
|
-
predeploy_priority_resources(resources)
|
81
|
-
|
82
|
-
phase_heading("Deploying all resources")
|
83
|
-
deploy_resources(resources, prune: true)
|
84
|
-
wait_for_completion(resources)
|
85
|
-
|
86
|
-
report_final_status(resources)
|
87
|
-
rescue FatalDeploymentError => error
|
88
|
-
KubernetesDeploy.logger.fatal(error.message)
|
89
|
-
exit 1
|
90
|
-
end
|
91
|
-
|
92
|
-
def template_variables
|
93
|
-
{
|
94
|
-
'current_sha' => @current_sha,
|
95
|
-
'deployment_id' => @id,
|
96
|
-
}
|
97
|
-
end
|
98
|
-
|
99
|
-
private
|
100
|
-
|
101
|
-
def predeploy_priority_resources(resource_list)
|
102
|
-
PREDEPLOY_SEQUENCE.each do |resource_type|
|
103
|
-
matching_resources = resource_list.select { |r| r.type == resource_type }
|
104
|
-
next if matching_resources.empty?
|
105
|
-
deploy_resources(matching_resources)
|
106
|
-
wait_for_completion(matching_resources)
|
107
|
-
fail_count = matching_resources.count { |r| r.deploy_failed? || r.deploy_timed_out? }
|
108
|
-
if fail_count > 0
|
109
|
-
raise FatalDeploymentError, "#{fail_count} priority resources failed to deploy"
|
110
|
-
end
|
111
|
-
end
|
112
|
-
end
|
113
|
-
|
114
|
-
def discover_resources
|
115
|
-
resources = []
|
116
|
-
Dir.foreach(@template_path) do |filename|
|
117
|
-
next unless filename.end_with?(".yml.erb", ".yml")
|
118
|
-
|
119
|
-
split_templates(filename) do |tempfile|
|
120
|
-
resource_id = discover_resource_via_dry_run(tempfile)
|
121
|
-
type, name = resource_id.split("/", 2) # e.g. "pod/web-198612918-dzvfb"
|
122
|
-
resources << KubernetesResource.for_type(type, name, @namespace, tempfile)
|
123
|
-
KubernetesDeploy.logger.info "Discovered template for #{resource_id}"
|
124
|
-
end
|
125
|
-
end
|
126
|
-
resources
|
127
|
-
end
|
128
|
-
|
129
|
-
def discover_resource_via_dry_run(tempfile)
|
130
|
-
resource_id, err, st = run_kubectl("apply", "-f", tempfile.path, "--dry-run", "--output=name")
|
131
|
-
raise FatalDeploymentError, "Dry run failed for template #{File.basename(tempfile.path)}." unless st.success?
|
132
|
-
resource_id
|
133
|
-
end
|
134
|
-
|
135
|
-
def split_templates(filename)
|
136
|
-
file_content = File.read(File.join(@template_path, filename))
|
137
|
-
rendered_content = render_template(filename, file_content)
|
138
|
-
YAML.load_stream(rendered_content) do |doc|
|
139
|
-
f = Tempfile.new(filename)
|
140
|
-
f.write(YAML.dump(doc))
|
141
|
-
f.close
|
142
|
-
yield f
|
143
|
-
end
|
144
|
-
rescue Psych::SyntaxError => e
|
145
|
-
KubernetesDeploy.logger.error(rendered_content)
|
146
|
-
raise FatalDeploymentError, "Template #{filename} cannot be parsed: #{e.message}"
|
147
|
-
end
|
148
|
-
|
149
|
-
def report_final_status(resources)
|
150
|
-
if resources.all?(&:deploy_succeeded?)
|
151
|
-
log_green("Deploy succeeded!")
|
152
|
-
else
|
153
|
-
fail_list = resources.select { |r| r.deploy_failed? || r.deploy_timed_out? }.map(&:id)
|
154
|
-
KubernetesDeploy.logger.error("The following resources failed to deploy: #{fail_list.join(", ")}")
|
155
|
-
raise FatalDeploymentError, "#{fail_list.length} resources failed to deploy"
|
156
|
-
end
|
157
|
-
end
|
158
|
-
|
159
|
-
def wait_for_completion(watched_resources)
|
160
|
-
delay_sync_until = Time.now.utc
|
161
|
-
while watched_resources.present?
|
162
|
-
if Time.now.utc < delay_sync_until
|
163
|
-
sleep (delay_sync_until - Time.now.utc)
|
164
|
-
end
|
165
|
-
delay_sync_until = Time.now.utc + 3 # don't pummel the API if the sync is fast
|
166
|
-
watched_resources.each(&:sync)
|
167
|
-
newly_finished_resources, watched_resources = watched_resources.partition(&:deploy_finished?)
|
168
|
-
newly_finished_resources.each do |resource|
|
169
|
-
next unless resource.deploy_failed? || resource.deploy_timed_out?
|
170
|
-
KubernetesDeploy.logger.error("#{resource.id} failed to deploy with status '#{resource.status}'.")
|
171
|
-
KubernetesDeploy.logger.error("This script will continue to poll until the status of all resources deployed in this phase is resolved, but the deploy is now doomed and you may wish abort it.")
|
172
|
-
KubernetesDeploy.logger.error(resource.status_data)
|
173
|
-
end
|
174
|
-
end
|
175
|
-
end
|
176
|
-
|
177
|
-
def render_template(filename, raw_template)
|
178
|
-
return raw_template unless File.extname(filename) == ".erb"
|
179
|
-
|
180
|
-
erb_template = ERB.new(raw_template)
|
181
|
-
erb_binding = binding
|
182
|
-
template_variables.each do |var_name, value|
|
183
|
-
erb_binding.local_variable_set(var_name, value)
|
184
|
-
end
|
185
|
-
erb_template.result(erb_binding)
|
186
|
-
end
|
187
|
-
|
188
|
-
def validate_configuration
|
189
|
-
errors = []
|
190
|
-
if ENV["KUBECONFIG"].blank? || !File.file?(ENV["KUBECONFIG"])
|
191
|
-
errors << "Kube config not found at #{ENV["KUBECONFIG"]}"
|
192
|
-
end
|
193
|
-
|
194
|
-
if @current_sha.blank?
|
195
|
-
errors << "Current SHA must be specified"
|
196
|
-
end
|
197
|
-
|
198
|
-
if !File.directory?(@template_path)
|
199
|
-
errors << "Template path #{@template_path} doesn't exist"
|
200
|
-
elsif Dir.entries(@template_path).none? { |file| file =~ /\.yml(\.erb)?$/ }
|
201
|
-
errors << "#{@template_path} doesn't contain valid templates (postfix .yml or .yml.erb)"
|
202
|
-
end
|
203
|
-
|
204
|
-
if @namespace.blank?
|
205
|
-
errors << "Namespace must be specified"
|
206
|
-
end
|
207
|
-
|
208
|
-
if @context.blank?
|
209
|
-
errors << "Context must be specified"
|
210
|
-
end
|
211
|
-
|
212
|
-
raise FatalDeploymentError, "Configuration invalid: #{errors.join(", ")}" unless errors.empty?
|
213
|
-
KubernetesDeploy.logger.info("All required parameters and files are present")
|
214
|
-
end
|
215
|
-
|
216
|
-
def deploy_resources(resources, prune: false)
|
217
|
-
command = ["apply", "--namespace=#{@namespace}"]
|
218
|
-
KubernetesDeploy.logger.info("Deploying resources:")
|
219
|
-
|
220
|
-
resources.each do |r|
|
221
|
-
KubernetesDeploy.logger.info("- #{r.id}")
|
222
|
-
command.push("-f", r.file.path)
|
223
|
-
r.deploy_started = Time.now.utc
|
224
|
-
end
|
225
|
-
|
226
|
-
if prune
|
227
|
-
command.push("--prune", "--all")
|
228
|
-
PRUNE_WHITELIST.each { |type| command.push("--prune-whitelist=#{type}") }
|
229
|
-
end
|
230
|
-
|
231
|
-
run_kubectl(*command)
|
232
|
-
end
|
233
|
-
|
234
|
-
def set_kubectl_context
|
235
|
-
out, err, st = run_kubectl("config", "get-contexts", "-o", "name", namespaced: false)
|
236
|
-
available_contexts = out.split("\n")
|
237
|
-
if !st.success?
|
238
|
-
raise FatalDeploymentError, err
|
239
|
-
elsif !available_contexts.include?(@context)
|
240
|
-
raise FatalDeploymentError, "Context #{@context} is not available. Valid contexts: #{available_contexts}"
|
241
|
-
end
|
242
|
-
|
243
|
-
_, err, st = run_kubectl("config", "use-context", @context, namespaced: false)
|
244
|
-
raise FatalDeploymentError, "Kubectl config is not valid: #{err}" unless st.success?
|
245
|
-
KubernetesDeploy.logger.info("Kubectl configured to use context #{@context}")
|
246
|
-
end
|
247
|
-
|
248
|
-
def validate_namespace
|
249
|
-
_, _, st = run_kubectl("get", "namespace", @namespace, namespaced: false)
|
250
|
-
raise FatalDeploymentError, "Failed to validate namespace #{@namespace}" unless st.success?
|
251
|
-
KubernetesDeploy.logger.info("Namespace #{@namespace} validated")
|
252
|
-
end
|
253
|
-
|
254
|
-
def run_kubectl(*args, namespaced: true)
|
255
|
-
args = args.unshift("kubectl")
|
256
|
-
if namespaced
|
257
|
-
raise FatalDeploymentError, "Namespace missing for namespaced command" unless @namespace
|
258
|
-
args.push("--namespace=#{@namespace}")
|
259
|
-
end
|
260
|
-
KubernetesDeploy.logger.debug Shellwords.join(args)
|
261
|
-
out, err, st = Open3.capture3(*args)
|
262
|
-
KubernetesDeploy.logger.debug(out.shellescape)
|
263
|
-
KubernetesDeploy.logger.warn(err) unless st.success?
|
264
|
-
[out.chomp, err.chomp, st]
|
265
|
-
end
|
266
|
-
|
267
|
-
def phase_heading(phase_name)
|
268
|
-
@current_phase += 1
|
269
|
-
heading = "Phase #{@current_phase}: #{phase_name}"
|
270
|
-
padding = (100.0 - heading.length)/2
|
271
|
-
KubernetesDeploy.logger.info("")
|
272
|
-
KubernetesDeploy.logger.info("#{'-' * padding.floor}#{heading}#{'-' * padding.ceil}")
|
273
|
-
end
|
274
|
-
|
275
|
-
def log_green(msg)
|
276
|
-
STDOUT.puts "\033[0;32m#{msg}\x1b[0m\n" # green
|
277
|
-
end
|
278
|
-
|
279
|
-
def self.logger
|
280
|
-
@logger ||= begin
|
281
|
-
l = Logger.new(STDOUT)
|
282
|
-
l.level = ENV["DEBUG"] ? Logger::DEBUG : Logger::INFO
|
283
|
-
l.formatter = proc do |severity, _datetime, _progname, msg|
|
284
|
-
case severity
|
285
|
-
when "FATAL", "ERROR" then "\033[0;31m[#{severity}]\t#{msg}\x1b[0m\n" # red
|
286
|
-
when "WARN" then "\033[0;33m[#{severity}]\t#{msg}\x1b[0m\n" # yellow
|
287
|
-
when "INFO" then "\033[0;36m#{msg}\x1b[0m\n" # blue
|
288
|
-
else "[#{severity}]\t#{msg}\n"
|
289
|
-
end
|
290
|
-
end
|
291
|
-
l
|
292
|
-
end
|
293
|
-
end
|
294
|
-
|
295
|
-
class KubernetesResource
|
296
|
-
extend ActiveSupport::DescendantsTracker
|
297
|
-
|
298
|
-
attr_reader :name, :namespace, :file
|
299
|
-
attr_writer :type, :deploy_started
|
300
|
-
|
301
|
-
TIMEOUT = 5.minutes
|
302
|
-
|
303
|
-
def self.handled_type
|
304
|
-
name.split('::').last
|
305
|
-
end
|
306
|
-
|
307
|
-
def self.for_type(type, name, namespace, file)
|
308
|
-
if subclass = descendants.find { |subclass| subclass.handled_type.downcase == type }
|
309
|
-
subclass.new(name, namespace, file)
|
310
|
-
else
|
311
|
-
self.new(name, namespace, file).tap { |r| r.type = type }
|
312
|
-
end
|
313
|
-
end
|
314
|
-
|
315
|
-
def initialize(name, namespace, file)
|
316
|
-
# subclasses must also set these
|
317
|
-
@name, @namespace, @file = name, namespace, file
|
318
|
-
end
|
319
|
-
|
320
|
-
def id
|
321
|
-
"#{type}/#{name}"
|
322
|
-
end
|
323
|
-
|
324
|
-
def sync
|
325
|
-
log_status
|
326
|
-
end
|
327
|
-
|
328
|
-
def deploy_failed?
|
329
|
-
false
|
330
|
-
end
|
331
|
-
|
332
|
-
def deploy_succeeded?
|
333
|
-
if @deploy_started && !@success_assumption_warning_shown
|
334
|
-
KubernetesDeploy.logger.warn("Don't know how to monitor resources of type #{type}. Assuming #{id} deployed successfully.")
|
335
|
-
@success_assumption_warning_shown = true
|
336
|
-
end
|
337
|
-
true
|
338
|
-
end
|
339
|
-
|
340
|
-
def exists?
|
341
|
-
nil
|
342
|
-
end
|
343
|
-
|
344
|
-
def status
|
345
|
-
@status ||= "Unknown"
|
346
|
-
deploy_timed_out? ? "Timed out with status #{@status}" : @status
|
347
|
-
end
|
348
|
-
|
349
|
-
def type
|
350
|
-
@type || self.class.handled_type
|
351
|
-
end
|
352
|
-
|
353
|
-
def deploy_finished?
|
354
|
-
deploy_failed? || deploy_succeeded? || deploy_timed_out?
|
355
|
-
end
|
356
|
-
|
357
|
-
def deploy_timed_out?
|
358
|
-
return false unless @deploy_started
|
359
|
-
!deploy_succeeded? && !deploy_failed? && (Time.now.utc - @deploy_started > self.class::TIMEOUT)
|
360
|
-
end
|
361
|
-
|
362
|
-
def status_data
|
363
|
-
{
|
364
|
-
group: group_name,
|
365
|
-
name: name,
|
366
|
-
status_string: status,
|
367
|
-
exists: exists?,
|
368
|
-
succeeded: deploy_succeeded?,
|
369
|
-
failed: deploy_failed?,
|
370
|
-
timed_out: deploy_timed_out?
|
371
|
-
}
|
372
|
-
end
|
373
|
-
|
374
|
-
def group_name
|
375
|
-
type + "s"
|
376
|
-
end
|
377
|
-
|
378
|
-
def run_kubectl(*args)
|
379
|
-
raise FatalDeploymentError, "Namespace missing for namespaced command" if namespace.blank?
|
380
|
-
args = args.unshift("kubectl").push("--namespace=#{namespace}")
|
381
|
-
KubernetesDeploy.logger.debug Shellwords.join(args)
|
382
|
-
out, err, st = Open3.capture3(*args)
|
383
|
-
KubernetesDeploy.logger.debug(out.shellescape)
|
384
|
-
KubernetesDeploy.logger.debug("[ERROR] #{err.shellescape}") unless st.success?
|
385
|
-
[out.chomp, st]
|
386
|
-
end
|
387
|
-
|
388
|
-
def log_status
|
389
|
-
STDOUT.puts "[KUBESTATUS] #{JSON.dump(status_data)}"
|
390
|
-
end
|
391
|
-
end
|
392
|
-
|
393
|
-
class ConfigMap < KubernetesResource
|
394
|
-
TIMEOUT = 30.seconds
|
395
|
-
|
396
|
-
def initialize(name, namespace, file)
|
397
|
-
@name, @namespace, @file = name, namespace, file
|
398
|
-
end
|
399
|
-
|
400
|
-
def sync
|
401
|
-
_, st = run_kubectl("get", type, @name)
|
402
|
-
@status = st.success? ? "Available" : "Unknown"
|
403
|
-
@found = st.success?
|
404
|
-
log_status
|
405
|
-
end
|
406
|
-
|
407
|
-
def deploy_succeeded?
|
408
|
-
exists?
|
409
|
-
end
|
410
|
-
|
411
|
-
def deploy_failed?
|
412
|
-
false
|
413
|
-
end
|
414
|
-
|
415
|
-
def exists?
|
416
|
-
@found
|
417
|
-
end
|
418
|
-
end
|
419
|
-
|
420
|
-
class PersistentVolumeClaim < KubernetesResource
|
421
|
-
TIMEOUT = 5.minutes
|
422
|
-
|
423
|
-
def initialize(name, namespace, file)
|
424
|
-
@name, @namespace, @file = name, namespace, file
|
425
|
-
end
|
426
|
-
|
427
|
-
def sync
|
428
|
-
@status, st = run_kubectl("get", type, @name, "--output=jsonpath={.status.phase}")
|
429
|
-
@found = st.success?
|
430
|
-
log_status
|
431
|
-
end
|
432
|
-
|
433
|
-
def deploy_succeeded?
|
434
|
-
@status == "Bound"
|
435
|
-
end
|
436
|
-
|
437
|
-
def deploy_failed?
|
438
|
-
@status == "Lost"
|
439
|
-
end
|
440
|
-
|
441
|
-
def exists?
|
442
|
-
@found
|
443
|
-
end
|
444
|
-
end
|
445
|
-
|
446
|
-
class Ingress < KubernetesResource
|
447
|
-
TIMEOUT = 30.seconds
|
448
|
-
|
449
|
-
def initialize(name, namespace, file)
|
450
|
-
@name, @namespace, @file = name, namespace, file
|
451
|
-
end
|
452
|
-
|
453
|
-
def sync
|
454
|
-
_, st = run_kubectl("get", type, @name)
|
455
|
-
@status = st.success? ? "Created" : "Unknown"
|
456
|
-
@found = st.success?
|
457
|
-
log_status
|
458
|
-
end
|
459
|
-
|
460
|
-
def deploy_succeeded?
|
461
|
-
exists?
|
462
|
-
end
|
463
|
-
|
464
|
-
def deploy_failed?
|
465
|
-
false
|
466
|
-
end
|
467
|
-
|
468
|
-
def exists?
|
469
|
-
@found
|
470
|
-
end
|
471
|
-
|
472
|
-
def group_name
|
473
|
-
"Ingresses"
|
474
|
-
end
|
475
|
-
end
|
476
|
-
|
477
|
-
class Service < KubernetesResource
|
478
|
-
TIMEOUT = 15.minutes
|
479
|
-
|
480
|
-
def initialize(name, namespace, file)
|
481
|
-
@name, @namespace, @file = name, namespace, file
|
482
|
-
end
|
483
|
-
|
484
|
-
def sync
|
485
|
-
_, st = run_kubectl("get", type, @name)
|
486
|
-
@found = st.success?
|
487
|
-
if @found
|
488
|
-
endpoints, st = run_kubectl("get", "endpoints", @name, "--output=jsonpath={.subsets[*].addresses[*].ip}")
|
489
|
-
@num_endpoints = (st.success? ? endpoints.split.length : 0)
|
490
|
-
else
|
491
|
-
@num_endpoints = 0
|
492
|
-
end
|
493
|
-
@status = "#{@num_endpoints} endpoints"
|
494
|
-
log_status
|
495
|
-
end
|
496
|
-
|
497
|
-
def deploy_succeeded?
|
498
|
-
@num_endpoints > 0
|
499
|
-
end
|
500
|
-
|
501
|
-
def deploy_failed?
|
502
|
-
false
|
503
|
-
end
|
504
|
-
|
505
|
-
def exists?
|
506
|
-
@found
|
507
|
-
end
|
508
|
-
end
|
509
|
-
|
510
|
-
class Pod < KubernetesResource
|
511
|
-
TIMEOUT = 15.minutes
|
512
|
-
SUSPICIOUS_CONTAINER_STATES = %w(ImagePullBackOff RunContainerError).freeze
|
513
|
-
|
514
|
-
def initialize(name, namespace, file, parent: nil)
|
515
|
-
@name, @namespace, @file, @parent = name, namespace, file, parent
|
516
|
-
@bare = !@parent
|
517
|
-
end
|
518
|
-
|
519
|
-
def sync
|
520
|
-
out, st = run_kubectl("get", type, @name, "-a", "--output=json")
|
521
|
-
if @found = st.success?
|
522
|
-
pod_data = JSON.parse(out)
|
523
|
-
interpret_json_data(pod_data)
|
524
|
-
else # reset
|
525
|
-
@status = @phase = nil
|
526
|
-
@ready = false
|
527
|
-
@containers = []
|
528
|
-
end
|
529
|
-
display_logs if @bare && deploy_finished?
|
530
|
-
log_status
|
531
|
-
end
|
532
|
-
|
533
|
-
def interpret_json_data(pod_data)
|
534
|
-
@phase = (pod_data["metadata"]["deletionTimestamp"] ? "Terminating" : pod_data["status"]["phase"])
|
535
|
-
@containers = pod_data["spec"]["containers"].map { |c| c["name"] }
|
536
|
-
|
537
|
-
if @deploy_started && pod_data["status"]["containerStatuses"]
|
538
|
-
pod_data["status"]["containerStatuses"].each do |status|
|
539
|
-
waiting_state = status["state"]["waiting"] if status["state"]
|
540
|
-
reason = waiting_state["reason"] if waiting_state
|
541
|
-
next unless SUSPICIOUS_CONTAINER_STATES.include?(reason)
|
542
|
-
KubernetesDeploy.logger.warn("#{id} has container in state #{reason} (#{waiting_state["message"]})")
|
543
|
-
end
|
544
|
-
end
|
545
|
-
|
546
|
-
if @phase == "Failed"
|
547
|
-
@status = "#{@phase} (Reason: #{pod_data["status"]["reason"]})"
|
548
|
-
elsif @phase == "Terminating"
|
549
|
-
@status = @phase
|
550
|
-
else
|
551
|
-
ready_condition = pod_data["status"]["conditions"].find { |condition| condition["type"] == "Ready" }
|
552
|
-
@ready = ready_condition.present? && (ready_condition["status"] == "True")
|
553
|
-
@status = "#{@phase} (Ready: #{@ready})"
|
554
|
-
end
|
555
|
-
end
|
556
|
-
|
557
|
-
def deploy_succeeded?
|
558
|
-
if @bare
|
559
|
-
@phase == "Succeeded"
|
560
|
-
else
|
561
|
-
@phase == "Running" && @ready
|
562
|
-
end
|
563
|
-
end
|
564
|
-
|
565
|
-
def deploy_failed?
|
566
|
-
@phase == "Failed"
|
567
|
-
end
|
568
|
-
|
569
|
-
def exists?
|
570
|
-
@bare ? @found : true
|
571
|
-
end
|
572
|
-
|
573
|
-
def group_name
|
574
|
-
@bare ? "Bare pods" : @parent
|
575
|
-
end
|
576
|
-
|
577
|
-
private
|
578
|
-
|
579
|
-
def display_logs
|
580
|
-
return {} unless exists? && @containers.present? && !@already_displayed
|
581
|
-
|
582
|
-
@containers.each do |container_name|
|
583
|
-
out, st = run_kubectl("logs", @name, "--timestamps=true", "--since-time=#{@deploy_started.to_datetime.rfc3339}")
|
584
|
-
next unless st.success? && out.present?
|
585
|
-
|
586
|
-
KubernetesDeploy.logger.info "Logs from #{id} container #{container_name}:"
|
587
|
-
STDOUT.puts "#{out}"
|
588
|
-
@already_displayed = true
|
589
|
-
end
|
590
|
-
end
|
591
|
-
end
|
592
|
-
|
593
|
-
class Deployment < KubernetesResource
|
594
|
-
TIMEOUT = 15.minutes
|
595
|
-
|
596
|
-
def initialize(name, namespace, file)
|
597
|
-
@name, @namespace, @file = name, namespace, file
|
598
|
-
end
|
599
|
-
|
600
|
-
def sync
|
601
|
-
json_data, st = run_kubectl("get", type, @name, "--output=json")
|
602
|
-
@found = st.success?
|
603
|
-
@rollout_data = {}
|
604
|
-
@status = nil
|
605
|
-
@pods = []
|
606
|
-
|
607
|
-
if @found
|
608
|
-
@rollout_data = JSON.parse(json_data)["status"].slice("updatedReplicas", "replicas", "availableReplicas", "unavailableReplicas")
|
609
|
-
@status, _ = run_kubectl("rollout", "status", type, @name, "--watch=false") if @deploy_started
|
610
|
-
|
611
|
-
pod_list, st = run_kubectl("get", "pods", "-a", "-l", "name=#{name}", "--output=json")
|
612
|
-
if st.success?
|
613
|
-
pods_json = JSON.parse(pod_list)["items"]
|
614
|
-
pods_json.each do |pod_json|
|
615
|
-
pod_name = pod_json["metadata"]["name"]
|
616
|
-
pod = Pod.new(pod_name, namespace, nil, parent: "#{@name.capitalize} deployment")
|
617
|
-
pod.deploy_started = @deploy_started
|
618
|
-
pod.interpret_json_data(pod_json)
|
619
|
-
pod.log_status
|
620
|
-
@pods << pod
|
621
|
-
end
|
622
|
-
end
|
623
|
-
end
|
624
|
-
|
625
|
-
log_status
|
626
|
-
end
|
627
|
-
|
628
|
-
def deploy_succeeded?
|
629
|
-
return false unless @rollout_data.key?("availableReplicas")
|
630
|
-
# TODO: this should look at the current replica set's pods too
|
631
|
-
@rollout_data["availableReplicas"].to_i == @pods.length &&
|
632
|
-
@rollout_data.values.uniq.length == 1 # num desired, current, up-to-date and available are equal
|
633
|
-
end
|
634
|
-
|
635
|
-
def deploy_failed?
|
636
|
-
# TODO: this should look at the current replica set's pods only or it'll never be true for rolling updates
|
637
|
-
@pods.present? && @pods.all?(&:deploy_failed?)
|
638
|
-
end
|
639
|
-
|
640
|
-
def deploy_timed_out?
|
641
|
-
# TODO: this should look at the current replica set's pods only or it'll never be true for rolling updates
|
642
|
-
super || @pods.present? && @pods.all?(&:deploy_timed_out?)
|
643
|
-
end
|
644
|
-
|
645
|
-
def exists?
|
646
|
-
@found
|
647
|
-
end
|
648
|
-
|
649
|
-
def status_data
|
650
|
-
super.merge(replicas: @rollout_data, num_pods: @pods.length)
|
651
|
-
end
|
652
|
-
end
|
32
|
+
runner.run
|
653
33
|
end
|
654
|
-
|
655
|
-
deployment = KubernetesDeploy.new(
|
656
|
-
namespace: ARGV[0],
|
657
|
-
context: ARGV[1],
|
658
|
-
environment: ENV['ENVIRONMENT'],
|
659
|
-
current_sha: ENV['REVISION'],
|
660
|
-
template_folder: ENV['K8S_TEMPLATE_FOLDER']
|
661
|
-
)
|
662
|
-
deployment.run
|