kubernetes-deploy 0.6.6 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/exe/kubernetes-deploy +21 -13
- data/exe/kubernetes-restart +7 -4
- data/exe/kubernetes-run +14 -10
- data/kubernetes-deploy.gemspec +1 -0
- data/lib/kubernetes-deploy.rb +3 -2
- data/lib/kubernetes-deploy/deferred_summary_logging.rb +87 -0
- data/lib/kubernetes-deploy/ejson_secret_provisioner.rb +18 -20
- data/lib/kubernetes-deploy/formatted_logger.rb +42 -0
- data/lib/kubernetes-deploy/kubectl.rb +21 -8
- data/lib/kubernetes-deploy/kubernetes_resource.rb +111 -52
- data/lib/kubernetes-deploy/kubernetes_resource/bugsnag.rb +3 -11
- data/lib/kubernetes-deploy/kubernetes_resource/cloudsql.rb +7 -14
- data/lib/kubernetes-deploy/kubernetes_resource/config_map.rb +5 -9
- data/lib/kubernetes-deploy/kubernetes_resource/deployment.rb +31 -14
- data/lib/kubernetes-deploy/kubernetes_resource/ingress.rb +1 -13
- data/lib/kubernetes-deploy/kubernetes_resource/persistent_volume_claim.rb +2 -9
- data/lib/kubernetes-deploy/kubernetes_resource/pod.rb +48 -22
- data/lib/kubernetes-deploy/kubernetes_resource/pod_disruption_budget.rb +5 -9
- data/lib/kubernetes-deploy/kubernetes_resource/pod_template.rb +5 -9
- data/lib/kubernetes-deploy/kubernetes_resource/redis.rb +9 -15
- data/lib/kubernetes-deploy/kubernetes_resource/service.rb +9 -10
- data/lib/kubernetes-deploy/resource_watcher.rb +22 -10
- data/lib/kubernetes-deploy/restart_task.rb +12 -7
- data/lib/kubernetes-deploy/runner.rb +163 -110
- data/lib/kubernetes-deploy/runner_task.rb +22 -19
- data/lib/kubernetes-deploy/version.rb +1 -1
- metadata +18 -4
- data/lib/kubernetes-deploy/logger.rb +0 -45
- data/lib/kubernetes-deploy/ui_helpers.rb +0 -19
@@ -3,24 +3,16 @@ module KubernetesDeploy
|
|
3
3
|
class Service < KubernetesResource
|
4
4
|
TIMEOUT = 5.minutes
|
5
5
|
|
6
|
-
def initialize(name, namespace, context, file)
|
7
|
-
@name = name
|
8
|
-
@namespace = namespace
|
9
|
-
@context = context
|
10
|
-
@file = file
|
11
|
-
end
|
12
|
-
|
13
6
|
def sync
|
14
|
-
_, _err, st =
|
7
|
+
_, _err, st = kubectl.run("get", type, @name)
|
15
8
|
@found = st.success?
|
16
9
|
if @found
|
17
|
-
endpoints, _err, st =
|
10
|
+
endpoints, _err, st = kubectl.run("get", "endpoints", @name, "--output=jsonpath={.subsets[*].addresses[*].ip}")
|
18
11
|
@num_endpoints = (st.success? ? endpoints.split.length : 0)
|
19
12
|
else
|
20
13
|
@num_endpoints = 0
|
21
14
|
end
|
22
15
|
@status = "#{@num_endpoints} endpoints"
|
23
|
-
log_status
|
24
16
|
end
|
25
17
|
|
26
18
|
def deploy_succeeded?
|
@@ -31,6 +23,13 @@ module KubernetesDeploy
|
|
31
23
|
false
|
32
24
|
end
|
33
25
|
|
26
|
+
def timeout_message
|
27
|
+
<<-MSG.strip_heredoc.strip
|
28
|
+
This service does not have any endpoints. If the related pods are failing, fixing them will solve this as well.
|
29
|
+
If the related pods are up, this service's selector is probably incorrect.
|
30
|
+
MSG
|
31
|
+
end
|
32
|
+
|
34
33
|
def exists?
|
35
34
|
@found
|
36
35
|
end
|
@@ -1,37 +1,49 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
module KubernetesDeploy
|
3
3
|
class ResourceWatcher
|
4
|
-
def initialize(resources)
|
4
|
+
def initialize(resources, logger:)
|
5
5
|
unless resources.is_a?(Enumerable)
|
6
6
|
raise ArgumentError, <<-MSG.strip
|
7
7
|
ResourceWatcher expects Enumerable collection, got `#{resources.class}` instead
|
8
8
|
MSG
|
9
9
|
end
|
10
10
|
@resources = resources
|
11
|
+
@logger = logger
|
11
12
|
end
|
12
13
|
|
13
|
-
def run(delay_sync: 3.seconds
|
14
|
+
def run(delay_sync: 3.seconds)
|
14
15
|
delay_sync_until = Time.now.utc
|
15
16
|
started_at = delay_sync_until
|
16
|
-
human_resources = @resources.map(&:id).join(", ")
|
17
|
-
max_wait_time = @resources.map(&:timeout).max
|
18
|
-
logger.info("Waiting for #{human_resources} with #{max_wait_time}s timeout")
|
19
17
|
|
20
18
|
while @resources.present?
|
21
19
|
if Time.now.utc < delay_sync_until
|
22
20
|
sleep(delay_sync_until - Time.now.utc)
|
23
21
|
end
|
22
|
+
watch_time = (Time.now.utc - started_at).round(1)
|
24
23
|
delay_sync_until = Time.now.utc + delay_sync # don't pummel the API if the sync is fast
|
25
24
|
@resources.each(&:sync)
|
26
25
|
newly_finished_resources, @resources = @resources.partition(&:deploy_finished?)
|
26
|
+
|
27
|
+
new_success_list = []
|
27
28
|
newly_finished_resources.each do |resource|
|
28
|
-
|
29
|
-
|
29
|
+
if resource.deploy_failed?
|
30
|
+
@logger.error("#{resource.id} failed to deploy after #{watch_time}s")
|
31
|
+
elsif resource.deploy_timed_out?
|
32
|
+
@logger.error("#{resource.id} deployment timed out")
|
33
|
+
else
|
34
|
+
new_success_list << resource.id
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
unless new_success_list.empty?
|
39
|
+
success_string = ColorizedString.new("Successfully deployed in #{watch_time}s:").green
|
40
|
+
@logger.info("#{success_string} #{new_success_list.join(', ')}")
|
30
41
|
end
|
31
|
-
end
|
32
42
|
|
33
|
-
|
34
|
-
|
43
|
+
if newly_finished_resources.present? && @resources.present? # something happened this cycle, more to go
|
44
|
+
@logger.info("Continuing to wait for: #{@resources.map(&:id).join(', ')}")
|
45
|
+
end
|
46
|
+
end
|
35
47
|
end
|
36
48
|
end
|
37
49
|
end
|
@@ -1,11 +1,9 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
require 'kubernetes-deploy/kubeclient_builder'
|
3
|
-
require 'kubernetes-deploy/ui_helpers'
|
4
3
|
require 'kubernetes-deploy/resource_watcher'
|
5
4
|
|
6
5
|
module KubernetesDeploy
|
7
6
|
class RestartTask
|
8
|
-
include UIHelpers
|
9
7
|
include KubernetesDeploy::KubeclientBuilder
|
10
8
|
|
11
9
|
class DeploymentNotFoundError < FatalDeploymentError
|
@@ -25,7 +23,7 @@ module KubernetesDeploy
|
|
25
23
|
HTTP_OK_RANGE = 200..299
|
26
24
|
ANNOTATION = "shipit.shopify.io/restart"
|
27
25
|
|
28
|
-
def initialize(context:, namespace:, logger:
|
26
|
+
def initialize(context:, namespace:, logger:)
|
29
27
|
@context = context
|
30
28
|
@namespace = namespace
|
31
29
|
@logger = logger
|
@@ -35,6 +33,7 @@ module KubernetesDeploy
|
|
35
33
|
end
|
36
34
|
|
37
35
|
def perform(deployments_names = nil)
|
36
|
+
@logger.reset
|
38
37
|
verify_namespace
|
39
38
|
|
40
39
|
if deployments_names
|
@@ -53,21 +52,27 @@ module KubernetesDeploy
|
|
53
52
|
end
|
54
53
|
end
|
55
54
|
|
56
|
-
phase_heading("Triggering restart by touching ENV[RESTARTED_AT]")
|
55
|
+
@logger.phase_heading("Triggering restart by touching ENV[RESTARTED_AT]")
|
57
56
|
patch_kubeclient_deployments(deployments)
|
58
57
|
|
59
|
-
phase_heading("Waiting for rollout")
|
58
|
+
@logger.phase_heading("Waiting for rollout")
|
60
59
|
wait_for_rollout(deployments)
|
61
60
|
|
62
61
|
names = deployments.map { |d| "`#{d.metadata.name}`" }
|
63
62
|
@logger.info "Restart of #{names.sort.join(', ')} deployments succeeded"
|
63
|
+
true
|
64
|
+
rescue FatalDeploymentError => error
|
65
|
+
@logger.fatal "#{error.class}: #{error.message}"
|
66
|
+
false
|
64
67
|
end
|
65
68
|
|
66
69
|
private
|
67
70
|
|
68
71
|
def wait_for_rollout(kubeclient_resources)
|
69
|
-
resources = kubeclient_resources.map
|
70
|
-
|
72
|
+
resources = kubeclient_resources.map do |d|
|
73
|
+
Deployment.new(name: d.metadata.name, namespace: @namespace, context: @context, file: nil, logger: @logger)
|
74
|
+
end
|
75
|
+
watcher = ResourceWatcher.new(resources, logger: @logger)
|
71
76
|
watcher.run
|
72
77
|
end
|
73
78
|
|
@@ -22,14 +22,12 @@ require 'kubernetes-deploy/kubernetes_resource'
|
|
22
22
|
require "kubernetes-deploy/kubernetes_resource/#{subresource}"
|
23
23
|
end
|
24
24
|
require 'kubernetes-deploy/resource_watcher'
|
25
|
-
require "kubernetes-deploy/ui_helpers"
|
26
25
|
require 'kubernetes-deploy/kubectl'
|
27
26
|
require 'kubernetes-deploy/kubeclient_builder'
|
28
27
|
require 'kubernetes-deploy/ejson_secret_provisioner'
|
29
28
|
|
30
29
|
module KubernetesDeploy
|
31
30
|
class Runner
|
32
|
-
include UIHelpers
|
33
31
|
include KubeclientBuilder
|
34
32
|
|
35
33
|
PREDEPLOY_SEQUENCE = %w(
|
@@ -68,71 +66,63 @@ module KubernetesDeploy
|
|
68
66
|
PRUNE_WHITELIST_V_1_5 = %w(extensions/v1beta1/HorizontalPodAutoscaler).freeze
|
69
67
|
PRUNE_WHITELIST_V_1_6 = %w(autoscaling/v1/HorizontalPodAutoscaler).freeze
|
70
68
|
|
71
|
-
def
|
72
|
-
yield
|
73
|
-
rescue FatalDeploymentError => error
|
74
|
-
KubernetesDeploy.logger.fatal <<-MSG
|
75
|
-
#{error.class}: #{error.message}
|
76
|
-
#{error.backtrace && error.backtrace.join("\n ")}
|
77
|
-
MSG
|
78
|
-
exit 1
|
79
|
-
end
|
80
|
-
|
81
|
-
def initialize(namespace:, current_sha:, context:, template_dir:,
|
82
|
-
wait_for_completion:, allow_protected_ns: false, prune: true, bindings: {})
|
69
|
+
def initialize(namespace:, context:, current_sha:, template_dir:, logger:, bindings: {})
|
83
70
|
@namespace = namespace
|
84
71
|
@context = context
|
85
72
|
@current_sha = current_sha
|
86
73
|
@template_dir = File.expand_path(template_dir)
|
74
|
+
@logger = logger
|
75
|
+
@bindings = bindings
|
87
76
|
# Max length of podname is only 63chars so try to save some room by truncating sha to 8 chars
|
88
77
|
@id = current_sha[0...8] + "-#{SecureRandom.hex(4)}" if current_sha
|
89
|
-
@wait_for_completion = wait_for_completion
|
90
|
-
@allow_protected_ns = allow_protected_ns
|
91
|
-
@prune = prune
|
92
|
-
@bindings = bindings
|
93
78
|
end
|
94
79
|
|
95
|
-
def
|
96
|
-
@
|
97
|
-
end
|
80
|
+
def run(verify_result: true, allow_protected_ns: false, prune: true)
|
81
|
+
@logger.reset
|
98
82
|
|
99
|
-
|
100
|
-
|
101
|
-
end
|
102
|
-
|
103
|
-
def run
|
104
|
-
phase_heading("Validating configuration")
|
105
|
-
validate_configuration
|
106
|
-
|
107
|
-
phase_heading("Identifying deployment target")
|
83
|
+
@logger.phase_heading("Initializing deploy")
|
84
|
+
validate_configuration(allow_protected_ns: allow_protected_ns, prune: prune)
|
108
85
|
confirm_context_exists
|
109
86
|
confirm_namespace_exists
|
110
|
-
|
111
|
-
phase_heading("Parsing deploy content")
|
112
87
|
resources = discover_resources
|
113
88
|
|
114
|
-
phase_heading("Checking initial resource statuses")
|
89
|
+
@logger.phase_heading("Checking initial resource statuses")
|
115
90
|
resources.each(&:sync)
|
116
|
-
|
117
|
-
|
91
|
+
resources.each { |r| @logger.info(r.pretty_status) }
|
92
|
+
|
93
|
+
ejson = EjsonSecretProvisioner.new(
|
94
|
+
namespace: @namespace,
|
95
|
+
context: @context,
|
96
|
+
template_dir: @template_dir,
|
97
|
+
logger: @logger
|
98
|
+
)
|
118
99
|
if ejson.secret_changes_required?
|
119
|
-
phase_heading("Deploying kubernetes secrets from #{EjsonSecretProvisioner::EJSON_SECRETS_FILE}")
|
100
|
+
@logger.phase_heading("Deploying kubernetes secrets from #{EjsonSecretProvisioner::EJSON_SECRETS_FILE}")
|
120
101
|
ejson.run
|
121
102
|
end
|
122
103
|
|
123
|
-
|
124
|
-
|
104
|
+
if deploy_has_priority_resources?(resources)
|
105
|
+
@logger.phase_heading("Predeploying priority resources")
|
106
|
+
predeploy_priority_resources(resources)
|
107
|
+
end
|
125
108
|
|
126
|
-
phase_heading("Deploying all resources")
|
127
|
-
if PROTECTED_NAMESPACES.include?(@namespace) &&
|
109
|
+
@logger.phase_heading("Deploying all resources")
|
110
|
+
if PROTECTED_NAMESPACES.include?(@namespace) && prune
|
128
111
|
raise FatalDeploymentError, "Refusing to deploy to protected namespace '#{@namespace}' with pruning enabled"
|
129
112
|
end
|
130
113
|
|
131
|
-
deploy_resources(resources, prune:
|
114
|
+
deploy_resources(resources, prune: prune)
|
132
115
|
|
133
|
-
return unless
|
116
|
+
return true unless verify_result
|
134
117
|
wait_for_completion(resources)
|
135
|
-
|
118
|
+
record_statuses(resources)
|
119
|
+
success = resources.all?(&:deploy_succeeded?)
|
120
|
+
rescue FatalDeploymentError => error
|
121
|
+
@logger.summary.add_action(error.message)
|
122
|
+
success = false
|
123
|
+
ensure
|
124
|
+
@logger.print_summary(success)
|
125
|
+
success
|
136
126
|
end
|
137
127
|
|
138
128
|
def template_variables
|
@@ -144,6 +134,23 @@ MSG
|
|
144
134
|
|
145
135
|
private
|
146
136
|
|
137
|
+
def record_statuses(resources)
|
138
|
+
successful_resources, failed_resources = resources.partition(&:deploy_succeeded?)
|
139
|
+
fail_count = failed_resources.length
|
140
|
+
success_count = successful_resources.length
|
141
|
+
|
142
|
+
if success_count > 0
|
143
|
+
@logger.summary.add_action("successfully deployed #{success_count} #{'resource'.pluralize(success_count)}")
|
144
|
+
final_statuses = successful_resources.map(&:pretty_status).join("\n")
|
145
|
+
@logger.summary.add_paragraph("#{ColorizedString.new('Successful resources').green}\n#{final_statuses}")
|
146
|
+
end
|
147
|
+
|
148
|
+
if fail_count > 0
|
149
|
+
@logger.summary.add_action("failed to deploy #{fail_count} #{'resource'.pluralize(fail_count)}")
|
150
|
+
failed_resources.each { |r| @logger.summary.add_paragraph(r.debug_message) }
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
147
154
|
def versioned_prune_whitelist
|
148
155
|
if server_major_version == "1.5"
|
149
156
|
BASE_PRUNE_WHITELIST + PRUNE_WHITELIST_V_1_5
|
@@ -154,7 +161,7 @@ MSG
|
|
154
161
|
|
155
162
|
def server_major_version
|
156
163
|
@server_major_version ||= begin
|
157
|
-
out, _, _ =
|
164
|
+
out, _, _ = kubectl.run('version', '--short')
|
158
165
|
matchdata = /Server Version: v(?<version>\d\.\d)/.match(out)
|
159
166
|
raise "Could not determine server version" unless matchdata[:version]
|
160
167
|
matchdata[:version]
|
@@ -163,7 +170,7 @@ MSG
|
|
163
170
|
|
164
171
|
# Inspect the file referenced in the kubectl stderr
|
165
172
|
# to make it easier for developer to understand what's going on
|
166
|
-
def
|
173
|
+
def find_bad_file_from_kubectl_output(stderr)
|
167
174
|
# Output example:
|
168
175
|
# Error from server (BadRequest): error when creating "/path/to/configmap-gqq5oh.yml20170411-33615-t0t3m":
|
169
176
|
match = stderr.match(%r{BadRequest.*"(?<path>\/\S+\.yml\S+)"})
|
@@ -172,12 +179,12 @@ MSG
|
|
172
179
|
path = match[:path]
|
173
180
|
if path.present? && File.file?(path)
|
174
181
|
suspicious_file = File.read(path)
|
175
|
-
KubernetesDeploy.logger.warn("Inspecting the file mentioned in the error message (#{path})")
|
176
|
-
KubernetesDeploy.logger.warn(suspicious_file)
|
177
|
-
else
|
178
|
-
KubernetesDeploy.logger.warn("Detected a file (#{path.inspect}) referenced in the kubectl stderr " \
|
179
|
-
"but was unable to inspect it")
|
180
182
|
end
|
183
|
+
[File.basename(path, ".*"), suspicious_file]
|
184
|
+
end
|
185
|
+
|
186
|
+
def deploy_has_priority_resources?(resources)
|
187
|
+
resources.any? { |r| PREDEPLOY_SEQUENCE.include?(r.type) }
|
181
188
|
end
|
182
189
|
|
183
190
|
def predeploy_priority_resources(resource_list)
|
@@ -186,31 +193,52 @@ MSG
|
|
186
193
|
next if matching_resources.empty?
|
187
194
|
deploy_resources(matching_resources)
|
188
195
|
wait_for_completion(matching_resources)
|
189
|
-
|
190
|
-
|
191
|
-
|
196
|
+
|
197
|
+
failed_resources = matching_resources.reject(&:deploy_succeeded?)
|
198
|
+
fail_count = failed_resources.length
|
199
|
+
if fail_count > 0
|
200
|
+
failed_resources.each { |r| @logger.summary.add_paragraph(r.debug_message) }
|
201
|
+
raise FatalDeploymentError, "Failed to deploy #{fail_count} priority #{'resource'.pluralize(fail_count)}"
|
192
202
|
end
|
203
|
+
@logger.blank_line
|
193
204
|
end
|
194
205
|
end
|
195
206
|
|
196
207
|
def discover_resources
|
197
208
|
resources = []
|
209
|
+
@logger.info("Discovering templates:")
|
198
210
|
Dir.foreach(@template_dir) do |filename|
|
199
211
|
next unless filename.end_with?(".yml.erb", ".yml")
|
200
212
|
|
201
213
|
split_templates(filename) do |tempfile|
|
202
214
|
resource_id = discover_resource_via_dry_run(tempfile)
|
203
215
|
type, name = resource_id.split("/", 2) # e.g. "pod/web-198612918-dzvfb"
|
204
|
-
resources << KubernetesResource.for_type(type, name, @namespace, @context,
|
205
|
-
|
216
|
+
resources << KubernetesResource.for_type(type: type, name: name, namespace: @namespace, context: @context,
|
217
|
+
file: tempfile, logger: @logger)
|
218
|
+
@logger.info " - #{resource_id}"
|
206
219
|
end
|
207
220
|
end
|
208
221
|
resources
|
209
222
|
end
|
210
223
|
|
211
224
|
def discover_resource_via_dry_run(tempfile)
|
212
|
-
|
213
|
-
|
225
|
+
command = ["create", "-f", tempfile.path, "--dry-run", "--output=name"]
|
226
|
+
resource_id, err, st = kubectl.run(*command, log_failure: false)
|
227
|
+
|
228
|
+
unless st.success?
|
229
|
+
debug_msg = <<-DEBUG_MSG.strip_heredoc
|
230
|
+
This usually means template '#{File.basename(tempfile.path, '.*')}' is not a valid Kubernetes template.
|
231
|
+
|
232
|
+
Error from kubectl:
|
233
|
+
#{err}
|
234
|
+
|
235
|
+
Rendered template content:
|
236
|
+
DEBUG_MSG
|
237
|
+
debug_msg += File.read(tempfile.path)
|
238
|
+
@logger.summary.add_paragraph(debug_msg)
|
239
|
+
|
240
|
+
raise FatalDeploymentError, "Kubectl dry run failed (command: #{Shellwords.join(command)})"
|
241
|
+
end
|
214
242
|
resource_id
|
215
243
|
end
|
216
244
|
|
@@ -226,21 +254,42 @@ MSG
|
|
226
254
|
yield f
|
227
255
|
end
|
228
256
|
rescue Psych::SyntaxError => e
|
229
|
-
|
230
|
-
|
257
|
+
debug_msg = <<-INFO.strip_heredoc
|
258
|
+
Error message: #{e}
|
259
|
+
|
260
|
+
Template content:
|
261
|
+
---
|
262
|
+
INFO
|
263
|
+
debug_msg += rendered_content
|
264
|
+
@logger.summary.add_paragraph(debug_msg)
|
265
|
+
raise FatalDeploymentError, "Template '#{filename}' cannot be parsed"
|
231
266
|
end
|
232
267
|
|
233
|
-
def
|
234
|
-
|
235
|
-
|
268
|
+
def record_apply_failure(err)
|
269
|
+
file_name, file_content = find_bad_file_from_kubectl_output(err)
|
270
|
+
if file_name
|
271
|
+
debug_msg = <<-HELPFUL_MESSAGE.strip_heredoc
|
272
|
+
This usually means your template named '#{file_name}' is invalid.
|
273
|
+
|
274
|
+
Error from kubectl:
|
275
|
+
#{err}
|
276
|
+
|
277
|
+
Rendered template content:
|
278
|
+
HELPFUL_MESSAGE
|
279
|
+
debug_msg += file_content || "Failed to read file"
|
236
280
|
else
|
237
|
-
|
238
|
-
|
281
|
+
debug_msg = <<-FALLBACK_MSG
|
282
|
+
This usually means one of your templates is invalid, but we were unable to automatically identify which one.
|
283
|
+
Please inspect the error message from kubectl:
|
284
|
+
#{err}
|
285
|
+
FALLBACK_MSG
|
239
286
|
end
|
287
|
+
|
288
|
+
@logger.summary.add_paragraph(debug_msg)
|
240
289
|
end
|
241
290
|
|
242
291
|
def wait_for_completion(watched_resources)
|
243
|
-
watcher = ResourceWatcher.new(watched_resources)
|
292
|
+
watcher = ResourceWatcher.new(watched_resources, logger: @logger)
|
244
293
|
watcher.run
|
245
294
|
end
|
246
295
|
|
@@ -253,9 +302,12 @@ MSG
|
|
253
302
|
erb_binding.local_variable_set(var_name, value)
|
254
303
|
end
|
255
304
|
erb_template.result(erb_binding)
|
305
|
+
rescue NameError => e
|
306
|
+
@logger.summary.add_paragraph("Error from renderer:\n #{e.message.tr("\n", ' ')}")
|
307
|
+
raise FatalDeploymentError, "Template '#{filename}' cannot be rendered"
|
256
308
|
end
|
257
309
|
|
258
|
-
def validate_configuration
|
310
|
+
def validate_configuration(allow_protected_ns:, prune:)
|
259
311
|
errors = []
|
260
312
|
if ENV["KUBECONFIG"].blank? || !File.file?(ENV["KUBECONFIG"])
|
261
313
|
errors << "Kube config not found at #{ENV['KUBECONFIG']}"
|
@@ -274,15 +326,13 @@ MSG
|
|
274
326
|
if @namespace.blank?
|
275
327
|
errors << "Namespace must be specified"
|
276
328
|
elsif PROTECTED_NAMESPACES.include?(@namespace)
|
277
|
-
if allow_protected_ns
|
329
|
+
if allow_protected_ns && prune
|
278
330
|
errors << "Refusing to deploy to protected namespace '#{@namespace}' with pruning enabled"
|
279
|
-
elsif allow_protected_ns
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
***Please do not deploy to #{@namespace} unless you really know what you are doing.***
|
284
|
-
WARNING
|
285
|
-
KubernetesDeploy.logger.warn(warning)
|
331
|
+
elsif allow_protected_ns
|
332
|
+
@logger.warn("You're deploying to protected namespace #{@namespace}, which cannot be pruned.")
|
333
|
+
@logger.warn("Existing resources can only be removed manually with kubectl. " \
|
334
|
+
"Removing templates from the set deployed will have no effect.")
|
335
|
+
@logger.warn("***Please do not deploy to #{@namespace} unless you really know what you are doing.***")
|
286
336
|
else
|
287
337
|
errors << "Refusing to deploy to protected namespace '#{@namespace}'"
|
288
338
|
end
|
@@ -292,38 +342,41 @@ MSG
|
|
292
342
|
errors << "Context must be specified"
|
293
343
|
end
|
294
344
|
|
295
|
-
|
296
|
-
|
345
|
+
unless errors.empty?
|
346
|
+
@logger.summary.add_paragraph(errors.map { |err| "- #{err}" }.join("\n"))
|
347
|
+
raise FatalDeploymentError, "Configuration invalid"
|
348
|
+
end
|
349
|
+
|
350
|
+
@logger.info("All required parameters and files are present")
|
297
351
|
end
|
298
352
|
|
299
353
|
def deploy_resources(resources, prune: false)
|
300
|
-
|
354
|
+
@logger.info("Deploying resources:")
|
301
355
|
|
302
356
|
# Apply can be done in one large batch, the rest have to be done individually
|
303
357
|
applyables, individuals = resources.partition { |r| r.deploy_method == :apply }
|
304
358
|
|
305
359
|
individuals.each do |r|
|
306
|
-
|
360
|
+
@logger.info("- #{r.id}")
|
307
361
|
r.deploy_started = Time.now.utc
|
308
362
|
case r.deploy_method
|
309
363
|
when :replace
|
310
|
-
_, _, st =
|
364
|
+
_, _, st = kubectl.run("replace", "-f", r.file.path, log_failure: false)
|
311
365
|
when :replace_force
|
312
|
-
_, _, st =
|
366
|
+
_, _, st = kubectl.run("replace", "--force", "-f", r.file.path, log_failure: false)
|
313
367
|
else
|
314
368
|
# Fail Fast! This is a programmer mistake.
|
315
369
|
raise ArgumentError, "Unexpected deploy method! (#{r.deploy_method.inspect})"
|
316
370
|
end
|
317
371
|
|
372
|
+
next if st.success?
|
373
|
+
# it doesn't exist so we can't replace it
|
374
|
+
_, err, st = kubectl.run("create", "-f", r.file.path, log_failure: false)
|
318
375
|
unless st.success?
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
Failed to replace or create resource: #{r.id}
|
324
|
-
#{err}
|
325
|
-
MSG
|
326
|
-
end
|
376
|
+
raise FatalDeploymentError, <<-MSG.strip_heredoc
|
377
|
+
Failed to replace or create resource: #{r.id}
|
378
|
+
#{err}
|
379
|
+
MSG
|
327
380
|
end
|
328
381
|
end
|
329
382
|
|
@@ -335,7 +388,7 @@ MSG
|
|
335
388
|
|
336
389
|
command = ["apply"]
|
337
390
|
resources.each do |r|
|
338
|
-
|
391
|
+
@logger.info("- #{r.id} (timeout: #{r.timeout}s)")
|
339
392
|
command.push("-f", r.file.path)
|
340
393
|
r.deploy_started = Time.now.utc
|
341
394
|
end
|
@@ -345,43 +398,43 @@ MSG
|
|
345
398
|
versioned_prune_whitelist.each { |type| command.push("--prune-whitelist=#{type}") }
|
346
399
|
end
|
347
400
|
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
#{
|
354
|
-
MSG
|
401
|
+
out, err, st = kubectl.run(*command, log_failure: false)
|
402
|
+
if st.success?
|
403
|
+
log_pruning(out) if prune
|
404
|
+
else
|
405
|
+
record_apply_failure(err)
|
406
|
+
raise FatalDeploymentError, "Command failed: #{Shellwords.join(command)}"
|
355
407
|
end
|
356
408
|
end
|
357
409
|
|
410
|
+
def log_pruning(kubectl_output)
|
411
|
+
pruned = kubectl_output.scan(/^(.*) pruned$/)
|
412
|
+
return unless pruned.present?
|
413
|
+
|
414
|
+
@logger.info("The following resources were pruned: #{pruned.join(', ')}")
|
415
|
+
@logger.summary.add_action("pruned #{pruned.length} resources")
|
416
|
+
end
|
417
|
+
|
358
418
|
def confirm_context_exists
|
359
|
-
out, err, st =
|
419
|
+
out, err, st = kubectl.run("config", "get-contexts", "-o", "name",
|
420
|
+
use_namespace: false, use_context: false, log_failure: false)
|
360
421
|
available_contexts = out.split("\n")
|
361
422
|
if !st.success?
|
362
423
|
raise FatalDeploymentError, err
|
363
424
|
elsif !available_contexts.include?(@context)
|
364
425
|
raise FatalDeploymentError, "Context #{@context} is not available. Valid contexts: #{available_contexts}"
|
365
426
|
end
|
366
|
-
|
427
|
+
@logger.info("Context #{@context} found")
|
367
428
|
end
|
368
429
|
|
369
430
|
def confirm_namespace_exists
|
370
|
-
_, _, st =
|
431
|
+
_, _, st = kubectl.run("get", "namespace", @namespace, use_namespace: false, log_failure: false)
|
371
432
|
raise FatalDeploymentError, "Namespace #{@namespace} not found" unless st.success?
|
372
|
-
|
433
|
+
@logger.info("Namespace #{@namespace} found")
|
373
434
|
end
|
374
435
|
|
375
|
-
def
|
376
|
-
|
377
|
-
raise KubectlError, "Namespace missing for namespaced command" if @namespace.blank?
|
378
|
-
end
|
379
|
-
|
380
|
-
if with_context
|
381
|
-
raise KubectlError, "Explicit context is required to run this command" if @context.blank?
|
382
|
-
end
|
383
|
-
|
384
|
-
Kubectl.run_kubectl(*args, namespace: @namespace, context: @context)
|
436
|
+
def kubectl
|
437
|
+
@kubectl ||= Kubectl.new(namespace: @namespace, context: @context, logger: @logger, log_failure_by_default: true)
|
385
438
|
end
|
386
439
|
end
|
387
440
|
end
|