kubernetes-deploy 0.30.0 → 0.31.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.rubocop.yml +1 -1
- data/CHANGELOG.md +15 -0
- data/CONTRIBUTING.md +2 -2
- data/README.md +1 -1
- data/dev.yml +1 -1
- data/dev/flamegraph-from-tests +1 -1
- data/exe/kubernetes-deploy +11 -9
- data/exe/kubernetes-render +9 -7
- data/exe/kubernetes-restart +3 -3
- data/exe/kubernetes-run +1 -1
- data/kubernetes-deploy.gemspec +3 -3
- data/lib/krane.rb +5 -3
- data/lib/{kubernetes-deploy → krane}/bindings_parser.rb +1 -1
- data/lib/krane/cli/deploy_command.rb +14 -11
- data/lib/krane/cli/global_deploy_command.rb +47 -0
- data/lib/krane/cli/krane.rb +12 -3
- data/lib/krane/cli/render_command.rb +11 -9
- data/lib/krane/cli/restart_command.rb +4 -4
- data/lib/krane/cli/run_command.rb +3 -3
- data/lib/krane/cli/version_command.rb +1 -1
- data/lib/krane/cluster_resource_discovery.rb +102 -0
- data/lib/{kubernetes-deploy → krane}/common.rb +8 -9
- data/lib/krane/concerns/template_reporting.rb +29 -0
- data/lib/{kubernetes-deploy → krane}/concurrency.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/container_logs.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/deferred_summary_logging.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/delayed_exceptions.rb +0 -0
- data/lib/krane/deploy_task.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/deploy_task_config_validator.rb +1 -1
- data/lib/krane/deprecated_deploy_task.rb +404 -0
- data/lib/{kubernetes-deploy → krane}/duration_parser.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/ejson_secret_provisioner.rb +3 -3
- data/lib/krane/errors.rb +28 -0
- data/lib/{kubernetes-deploy → krane}/formatted_logger.rb +2 -2
- data/lib/krane/global_deploy_task.rb +210 -0
- data/lib/krane/global_deploy_task_config_validator.rb +12 -0
- data/lib/{kubernetes-deploy → krane}/kubeclient_builder.rb +11 -3
- data/lib/{kubernetes-deploy → krane}/kubectl.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource.rb +54 -22
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/cloudsql.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/config_map.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/cron_job.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/custom_resource.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/custom_resource_definition.rb +1 -5
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/daemon_set.rb +7 -4
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/deployment.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/horizontal_pod_autoscaler.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/ingress.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/job.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/network_policy.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/persistent_volume_claim.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/pod.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/pod_disruption_budget.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/pod_set_base.rb +3 -3
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/pod_template.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/replica_set.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/resource_quota.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/role.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/role_binding.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/secret.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/service.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/service_account.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/kubernetes_resource/stateful_set.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/label_selector.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/oj.rb +0 -0
- data/lib/{kubernetes-deploy → krane}/options_helper.rb +2 -2
- data/lib/{kubernetes-deploy → krane}/remote_logs.rb +2 -2
- data/lib/krane/render_task.rb +149 -0
- data/lib/{kubernetes-deploy → krane}/renderer.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/resource_cache.rb +4 -3
- data/lib/krane/resource_deployer.rb +265 -0
- data/lib/{kubernetes-deploy → krane}/resource_watcher.rb +6 -6
- data/lib/krane/restart_task.rb +224 -0
- data/lib/{kubernetes-deploy → krane}/rollout_conditions.rb +1 -1
- data/lib/krane/runner_task.rb +212 -0
- data/lib/{kubernetes-deploy → krane}/runner_task_config_validator.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/statsd.rb +13 -27
- data/lib/krane/task_config.rb +19 -0
- data/lib/{kubernetes-deploy → krane}/task_config_validator.rb +1 -1
- data/lib/{kubernetes-deploy → krane}/template_sets.rb +5 -5
- data/lib/krane/version.rb +4 -0
- data/lib/kubernetes-deploy/deploy_task.rb +6 -603
- data/lib/kubernetes-deploy/errors.rb +1 -26
- data/lib/kubernetes-deploy/render_task.rb +5 -139
- data/lib/kubernetes-deploy/rescue_krane_exceptions.rb +18 -0
- data/lib/kubernetes-deploy/restart_task.rb +6 -215
- data/lib/kubernetes-deploy/runner_task.rb +6 -203
- metadata +75 -58
- data/lib/kubernetes-deploy/cluster_resource_discovery.rb +0 -57
- data/lib/kubernetes-deploy/task_config.rb +0 -16
- data/lib/kubernetes-deploy/version.rb +0 -4
@@ -0,0 +1,212 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'tempfile'
|
3
|
+
|
4
|
+
require 'krane/common'
|
5
|
+
require 'krane/kubeclient_builder'
|
6
|
+
require 'krane/kubectl'
|
7
|
+
require 'krane/resource_cache'
|
8
|
+
require 'krane/resource_watcher'
|
9
|
+
require 'krane/kubernetes_resource'
|
10
|
+
require 'krane/kubernetes_resource/pod'
|
11
|
+
require 'krane/runner_task_config_validator'
|
12
|
+
|
13
|
+
module Krane
|
14
|
+
# Run a pod that exits upon completing a task
|
15
|
+
class RunnerTask
|
16
|
+
class TaskTemplateMissingError < TaskConfigurationError; end
|
17
|
+
|
18
|
+
attr_reader :pod_name
|
19
|
+
|
20
|
+
# Initializes the runner task
|
21
|
+
#
|
22
|
+
# @param namespace [String] Kubernetes namespace
|
23
|
+
# @param context [String] Kubernetes context / cluster
|
24
|
+
# @param logger [Object] Logger object (defaults to an instance of Krane::FormattedLogger)
|
25
|
+
# @param max_watch_seconds [Integer] Timeout in seconds
|
26
|
+
def initialize(namespace:, context:, logger: nil, max_watch_seconds: nil)
|
27
|
+
@logger = logger || Krane::FormattedLogger.build(namespace, context)
|
28
|
+
@task_config = Krane::TaskConfig.new(context, namespace, @logger)
|
29
|
+
@namespace = namespace
|
30
|
+
@context = context
|
31
|
+
@max_watch_seconds = max_watch_seconds
|
32
|
+
end
|
33
|
+
|
34
|
+
# Runs the task, returning a boolean representing success or failure
|
35
|
+
#
|
36
|
+
# @return [Boolean]
|
37
|
+
def run(*args)
|
38
|
+
run!(*args)
|
39
|
+
true
|
40
|
+
rescue DeploymentTimeoutError, FatalDeploymentError
|
41
|
+
false
|
42
|
+
end
|
43
|
+
|
44
|
+
# Runs the task, raising exceptions in case of issues
|
45
|
+
#
|
46
|
+
# @param task_template [String] The template file you'll be rendering
|
47
|
+
# @param entrypoint [Array<String>] Override the default command in the container image
|
48
|
+
# @param args [Array<String>] Override the default arguments for the command
|
49
|
+
# @param env_vars [Array<String>] List of env vars
|
50
|
+
# @param verify_result [Boolean] Wait for completion and verify pod success
|
51
|
+
#
|
52
|
+
# @return [nil]
|
53
|
+
def run!(task_template:, entrypoint:, args:, env_vars: [], verify_result: true)
|
54
|
+
start = Time.now.utc
|
55
|
+
@logger.reset
|
56
|
+
|
57
|
+
@logger.phase_heading("Initializing task")
|
58
|
+
|
59
|
+
@logger.info("Validating configuration")
|
60
|
+
verify_config!(task_template, args)
|
61
|
+
@logger.info("Using namespace '#{@namespace}' in context '#{@context}'")
|
62
|
+
|
63
|
+
pod = build_pod(task_template, entrypoint, args, env_vars, verify_result)
|
64
|
+
validate_pod(pod)
|
65
|
+
|
66
|
+
@logger.phase_heading("Running pod")
|
67
|
+
create_pod(pod)
|
68
|
+
|
69
|
+
if verify_result
|
70
|
+
@logger.phase_heading("Streaming logs")
|
71
|
+
watch_pod(pod)
|
72
|
+
else
|
73
|
+
record_status_once(pod)
|
74
|
+
end
|
75
|
+
StatsD.client.distribution('task_runner.duration', StatsD.duration(start), tags: statsd_tags('success'))
|
76
|
+
@logger.print_summary(:success)
|
77
|
+
rescue DeploymentTimeoutError
|
78
|
+
StatsD.client.distribution('task_runner.duration', StatsD.duration(start), tags: statsd_tags('timeout'))
|
79
|
+
@logger.print_summary(:timed_out)
|
80
|
+
raise
|
81
|
+
rescue FatalDeploymentError
|
82
|
+
StatsD.client.distribution('task_runner.duration', StatsD.duration(start), tags: statsd_tags('failure'))
|
83
|
+
@logger.print_summary(:failure)
|
84
|
+
raise
|
85
|
+
end
|
86
|
+
|
87
|
+
private
|
88
|
+
|
89
|
+
def create_pod(pod)
|
90
|
+
@logger.info("Creating pod '#{pod.name}'")
|
91
|
+
pod.deploy_started_at = Time.now.utc
|
92
|
+
kubeclient.create_pod(pod.to_kubeclient_resource)
|
93
|
+
@pod_name = pod.name
|
94
|
+
@logger.info("Pod creation succeeded")
|
95
|
+
rescue Kubeclient::HttpError => e
|
96
|
+
msg = "Failed to create pod: #{e.class.name}: #{e.message}"
|
97
|
+
@logger.summary.add_paragraph(msg)
|
98
|
+
raise FatalDeploymentError, msg
|
99
|
+
end
|
100
|
+
|
101
|
+
def build_pod(template_name, entrypoint, args, env_vars, verify_result)
|
102
|
+
task_template = get_template(template_name)
|
103
|
+
@logger.info("Using template '#{template_name}'")
|
104
|
+
pod_template = build_pod_definition(task_template)
|
105
|
+
set_container_overrides!(pod_template, entrypoint, args, env_vars)
|
106
|
+
ensure_valid_restart_policy!(pod_template, verify_result)
|
107
|
+
Pod.new(namespace: @namespace, context: @context, logger: @logger, stream_logs: true,
|
108
|
+
definition: pod_template.to_hash.deep_stringify_keys, statsd_tags: [])
|
109
|
+
end
|
110
|
+
|
111
|
+
def validate_pod(pod)
|
112
|
+
pod.validate_definition(kubectl)
|
113
|
+
end
|
114
|
+
|
115
|
+
def watch_pod(pod)
|
116
|
+
rw = ResourceWatcher.new(resources: [pod], timeout: @max_watch_seconds,
|
117
|
+
operation_name: "run", task_config: @task_config)
|
118
|
+
rw.run(delay_sync: 1, reminder_interval: 30.seconds)
|
119
|
+
raise DeploymentTimeoutError if pod.deploy_timed_out?
|
120
|
+
raise FatalDeploymentError if pod.deploy_failed?
|
121
|
+
end
|
122
|
+
|
123
|
+
def record_status_once(pod)
|
124
|
+
cache = ResourceCache.new(@task_config)
|
125
|
+
pod.sync(cache)
|
126
|
+
warning = <<~STRING
|
127
|
+
#{ColorizedString.new('Result verification is disabled for this task.').yellow}
|
128
|
+
The following status was observed immediately after pod creation:
|
129
|
+
#{pod.pretty_status}
|
130
|
+
STRING
|
131
|
+
@logger.summary.add_paragraph(warning)
|
132
|
+
end
|
133
|
+
|
134
|
+
def verify_config!(task_template, args)
|
135
|
+
task_config_validator = RunnerTaskConfigValidator.new(task_template, args, @task_config, kubectl,
|
136
|
+
kubeclient_builder)
|
137
|
+
unless task_config_validator.valid?
|
138
|
+
@logger.summary.add_action("Configuration invalid")
|
139
|
+
@logger.summary.add_paragraph([task_config_validator.errors].map { |err| "- #{err}" }.join("\n"))
|
140
|
+
raise Krane::TaskConfigurationError
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
def get_template(template_name)
|
145
|
+
pod_template = kubeclient.get_pod_template(template_name, @namespace)
|
146
|
+
pod_template.template
|
147
|
+
rescue Kubeclient::ResourceNotFoundError
|
148
|
+
msg = "Pod template `#{template_name}` not found in namespace `#{@namespace}`, context `#{@context}`"
|
149
|
+
@logger.summary.add_paragraph(msg)
|
150
|
+
raise TaskTemplateMissingError, msg
|
151
|
+
rescue Kubeclient::HttpError => error
|
152
|
+
raise FatalKubeAPIError, "Error retrieving pod template: #{error.class.name}: #{error.message}"
|
153
|
+
end
|
154
|
+
|
155
|
+
def build_pod_definition(base_template)
|
156
|
+
pod_definition = base_template.dup
|
157
|
+
pod_definition.kind = 'Pod'
|
158
|
+
pod_definition.apiVersion = 'v1'
|
159
|
+
pod_definition.metadata.namespace = @namespace
|
160
|
+
|
161
|
+
unique_name = pod_definition.metadata.name + "-" + SecureRandom.hex(8)
|
162
|
+
@logger.warn("Name is too long, using '#{unique_name[0..62]}'") if unique_name.length > 63
|
163
|
+
pod_definition.metadata.name = unique_name[0..62]
|
164
|
+
|
165
|
+
pod_definition
|
166
|
+
end
|
167
|
+
|
168
|
+
def set_container_overrides!(pod_definition, entrypoint, args, env_vars)
|
169
|
+
container = pod_definition.spec.containers.find { |cont| cont.name == 'task-runner' }
|
170
|
+
if container.nil?
|
171
|
+
message = "Pod spec does not contain a template container called 'task-runner'"
|
172
|
+
@logger.summary.add_paragraph(message)
|
173
|
+
raise TaskConfigurationError, message
|
174
|
+
end
|
175
|
+
|
176
|
+
container.command = entrypoint if entrypoint
|
177
|
+
container.args = args if args
|
178
|
+
|
179
|
+
env_args = env_vars.map do |env|
|
180
|
+
key, value = env.split('=', 2)
|
181
|
+
{ name: key, value: value }
|
182
|
+
end
|
183
|
+
container.env ||= []
|
184
|
+
container.env = container.env.map(&:to_h) + env_args
|
185
|
+
end
|
186
|
+
|
187
|
+
def ensure_valid_restart_policy!(template, verify)
|
188
|
+
restart_policy = template.spec.restartPolicy
|
189
|
+
if verify && restart_policy != "Never"
|
190
|
+
@logger.warn("Changed Pod RestartPolicy from '#{restart_policy}' to 'Never'. Disable "\
|
191
|
+
"result verification to use '#{restart_policy}'.")
|
192
|
+
template.spec.restartPolicy = "Never"
|
193
|
+
end
|
194
|
+
end
|
195
|
+
|
196
|
+
def kubectl
|
197
|
+
@kubectl ||= Kubectl.new(task_config: @task_config, log_failure_by_default: true)
|
198
|
+
end
|
199
|
+
|
200
|
+
def kubeclient
|
201
|
+
@kubeclient ||= kubeclient_builder.build_v1_kubeclient(@context)
|
202
|
+
end
|
203
|
+
|
204
|
+
def kubeclient_builder
|
205
|
+
@kubeclient_builder ||= KubeclientBuilder.new
|
206
|
+
end
|
207
|
+
|
208
|
+
def statsd_tags(status)
|
209
|
+
%W(namespace:#{@namespace} context:#{@context} status:#{status})
|
210
|
+
end
|
211
|
+
end
|
212
|
+
end
|
@@ -2,41 +2,27 @@
|
|
2
2
|
require 'statsd-instrument'
|
3
3
|
require 'logger'
|
4
4
|
|
5
|
-
module
|
5
|
+
module Krane
|
6
6
|
class StatsD
|
7
|
-
extend ::StatsD
|
8
|
-
|
9
7
|
PREFIX = "KubernetesDeploy"
|
10
8
|
|
11
9
|
def self.duration(start_time)
|
12
10
|
(Time.now.utc - start_time).round(1)
|
13
11
|
end
|
14
12
|
|
15
|
-
def self.
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
13
|
+
def self.client
|
14
|
+
@client ||= begin
|
15
|
+
sink = if ::StatsD::Instrument::Environment.current.env.fetch('STATSD_ENV', nil) == 'development'
|
16
|
+
::StatsD::Instrument::LogSink.new(Logger.new($stderr))
|
17
|
+
elsif (addr = ::StatsD::Instrument::Environment.current.env.fetch('STATSD_ADDR', nil))
|
18
|
+
::StatsD::Instrument::UDPSink.for_addr(addr)
|
19
|
+
else
|
20
|
+
::StatsD::Instrument::NullSink.new
|
21
|
+
end
|
22
|
+
::StatsD::Instrument::Client.new(prefix: PREFIX, sink: sink, default_sample_rate: 1.0)
|
23
23
|
end
|
24
24
|
end
|
25
25
|
|
26
|
-
# It is not sufficient to set the prefix field on the KubernetesDeploy::StatsD singleton itself, since its value
|
27
|
-
# is overridden in the underlying calls to the ::StatsD library, hence the need to pass it in as a custom prefix
|
28
|
-
# via the metric_options hash. This is done since KubernetesDeploy may be included as a library and should not
|
29
|
-
# change the global StatsD configuration of the importing application.
|
30
|
-
def self.increment(key, value = 1, **metric_options)
|
31
|
-
metric_options[:prefix] = PREFIX
|
32
|
-
super
|
33
|
-
end
|
34
|
-
|
35
|
-
def self.distribution(key, value = nil, **metric_options, &block)
|
36
|
-
metric_options[:prefix] = PREFIX
|
37
|
-
super
|
38
|
-
end
|
39
|
-
|
40
26
|
module MeasureMethods
|
41
27
|
def measure_method(method_name, metric = nil)
|
42
28
|
unless method_defined?(method_name) || private_method_defined?(method_name)
|
@@ -64,9 +50,9 @@ module KubernetesDeploy
|
|
64
50
|
dynamic_tags << "error:#{error}" if dynamic_tags.is_a?(Array)
|
65
51
|
end
|
66
52
|
|
67
|
-
StatsD.distribution(
|
53
|
+
Krane::StatsD.client.distribution(
|
68
54
|
metric,
|
69
|
-
|
55
|
+
Krane::StatsD.duration(start_time),
|
70
56
|
tags: dynamic_tags
|
71
57
|
)
|
72
58
|
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Krane
|
3
|
+
class TaskConfig
|
4
|
+
attr_reader :context, :namespace, :logger
|
5
|
+
|
6
|
+
def initialize(context, namespace, logger = nil)
|
7
|
+
@context = context
|
8
|
+
@namespace = namespace
|
9
|
+
@logger = logger || FormattedLogger.build(@namespace, @context)
|
10
|
+
end
|
11
|
+
|
12
|
+
def global_kinds
|
13
|
+
@global_kinds ||= begin
|
14
|
+
cluster_resource_discoverer = ClusterResourceDiscovery.new(task_config: self)
|
15
|
+
cluster_resource_discoverer.global_resource_kinds
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -1,8 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
require '
|
3
|
-
require '
|
2
|
+
require 'krane/delayed_exceptions'
|
3
|
+
require 'krane/ejson_secret_provisioner'
|
4
4
|
|
5
|
-
module
|
5
|
+
module Krane
|
6
6
|
class TemplateSets
|
7
7
|
include DelayedExceptions
|
8
8
|
VALID_TEMPLATES = %w(.yml.erb .yml .yaml .yaml.erb)
|
@@ -24,7 +24,7 @@ module KubernetesDeploy
|
|
24
24
|
bindings: bindings,
|
25
25
|
)
|
26
26
|
end
|
27
|
-
with_delayed_exceptions(@files,
|
27
|
+
with_delayed_exceptions(@files, Krane::InvalidTemplateError) do |filename|
|
28
28
|
next if filename.end_with?(EjsonSecretProvisioner::EJSON_SECRETS_FILE)
|
29
29
|
templates(filename: filename, raw: raw) { |r_def| yield r_def, filename }
|
30
30
|
end
|
@@ -110,7 +110,7 @@ module KubernetesDeploy
|
|
110
110
|
end
|
111
111
|
|
112
112
|
def with_resource_definitions_and_filename(render_erb: false, current_sha: nil, bindings: nil, raw: false)
|
113
|
-
with_delayed_exceptions(@template_sets,
|
113
|
+
with_delayed_exceptions(@template_sets, Krane::InvalidTemplateError) do |template_set|
|
114
114
|
template_set.with_resource_definitions_and_filename(
|
115
115
|
render_erb: render_erb,
|
116
116
|
current_sha: current_sha,
|
@@ -1,612 +1,15 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
require '
|
3
|
-
require '
|
4
|
-
require 'tempfile'
|
5
|
-
require 'fileutils'
|
6
|
-
|
7
|
-
require 'kubernetes-deploy/common'
|
8
|
-
require 'kubernetes-deploy/concurrency'
|
9
|
-
require 'kubernetes-deploy/resource_cache'
|
10
|
-
require 'kubernetes-deploy/kubernetes_resource'
|
11
|
-
%w(
|
12
|
-
custom_resource
|
13
|
-
cloudsql
|
14
|
-
config_map
|
15
|
-
deployment
|
16
|
-
ingress
|
17
|
-
persistent_volume_claim
|
18
|
-
pod
|
19
|
-
network_policy
|
20
|
-
service
|
21
|
-
pod_template
|
22
|
-
pod_disruption_budget
|
23
|
-
replica_set
|
24
|
-
service_account
|
25
|
-
daemon_set
|
26
|
-
resource_quota
|
27
|
-
stateful_set
|
28
|
-
cron_job
|
29
|
-
job
|
30
|
-
custom_resource_definition
|
31
|
-
horizontal_pod_autoscaler
|
32
|
-
secret
|
33
|
-
).each do |subresource|
|
34
|
-
require "kubernetes-deploy/kubernetes_resource/#{subresource}"
|
35
|
-
end
|
36
|
-
require 'kubernetes-deploy/resource_watcher'
|
37
|
-
require 'kubernetes-deploy/kubectl'
|
38
|
-
require 'kubernetes-deploy/kubeclient_builder'
|
39
|
-
require 'kubernetes-deploy/ejson_secret_provisioner'
|
40
|
-
require 'kubernetes-deploy/renderer'
|
41
|
-
require 'kubernetes-deploy/cluster_resource_discovery'
|
42
|
-
require 'kubernetes-deploy/template_sets'
|
43
|
-
require 'kubernetes-deploy/deploy_task_config_validator'
|
2
|
+
require 'krane/deprecated_deploy_task'
|
3
|
+
require 'kubernetes-deploy/rescue_krane_exceptions'
|
44
4
|
|
45
5
|
module KubernetesDeploy
|
46
|
-
|
47
|
-
|
48
|
-
extend KubernetesDeploy::StatsD::MeasureMethods
|
49
|
-
|
50
|
-
PROTECTED_NAMESPACES = %w(
|
51
|
-
default
|
52
|
-
kube-system
|
53
|
-
kube-public
|
54
|
-
)
|
55
|
-
# Things removed from default prune whitelist at https://github.com/kubernetes/kubernetes/blob/0dff56b4d88ec7551084bf89028dbeebf569620e/pkg/kubectl/cmd/apply.go#L411:
|
56
|
-
# core/v1/Namespace -- not namespaced
|
57
|
-
# core/v1/PersistentVolume -- not namespaced
|
58
|
-
# core/v1/Endpoints -- managed by services
|
59
|
-
# core/v1/PersistentVolumeClaim -- would delete data
|
60
|
-
# core/v1/ReplicationController -- superseded by deployments/replicasets
|
61
|
-
|
62
|
-
def predeploy_sequence
|
63
|
-
before_crs = %w(
|
64
|
-
ResourceQuota
|
65
|
-
NetworkPolicy
|
66
|
-
)
|
67
|
-
after_crs = %w(
|
68
|
-
ConfigMap
|
69
|
-
PersistentVolumeClaim
|
70
|
-
ServiceAccount
|
71
|
-
Role
|
72
|
-
RoleBinding
|
73
|
-
Secret
|
74
|
-
Pod
|
75
|
-
)
|
76
|
-
|
77
|
-
before_crs + cluster_resource_discoverer.crds.select(&:predeployed?).map(&:kind) + after_crs
|
78
|
-
end
|
79
|
-
|
80
|
-
def prune_whitelist
|
81
|
-
wl = %w(
|
82
|
-
core/v1/ConfigMap
|
83
|
-
core/v1/Pod
|
84
|
-
core/v1/Service
|
85
|
-
core/v1/ResourceQuota
|
86
|
-
core/v1/Secret
|
87
|
-
core/v1/ServiceAccount
|
88
|
-
core/v1/PodTemplate
|
89
|
-
core/v1/PersistentVolumeClaim
|
90
|
-
batch/v1/Job
|
91
|
-
apps/v1/ReplicaSet
|
92
|
-
apps/v1/DaemonSet
|
93
|
-
apps/v1/Deployment
|
94
|
-
extensions/v1beta1/Ingress
|
95
|
-
networking.k8s.io/v1/NetworkPolicy
|
96
|
-
apps/v1/StatefulSet
|
97
|
-
autoscaling/v1/HorizontalPodAutoscaler
|
98
|
-
policy/v1beta1/PodDisruptionBudget
|
99
|
-
batch/v1beta1/CronJob
|
100
|
-
rbac.authorization.k8s.io/v1/Role
|
101
|
-
rbac.authorization.k8s.io/v1/RoleBinding
|
102
|
-
)
|
103
|
-
wl + cluster_resource_discoverer.crds.select(&:prunable?).map(&:group_version_kind)
|
104
|
-
end
|
105
|
-
|
106
|
-
def server_version
|
107
|
-
kubectl.server_version
|
108
|
-
end
|
109
|
-
|
110
|
-
# Initializes the deploy task
|
111
|
-
#
|
112
|
-
# @param namespace [String] Kubernetes namespace
|
113
|
-
# @param context [String] Kubernetes context
|
114
|
-
# @param current_sha [String] The SHA of the commit
|
115
|
-
# @param logger [Object] Logger object (defaults to an instance of KubernetesDeploy::FormattedLogger)
|
116
|
-
# @param kubectl_instance [Kubectl] Kubectl instance
|
117
|
-
# @param bindings [Hash] Bindings parsed by KubernetesDeploy::BindingsParser
|
118
|
-
# @param max_watch_seconds [Integer] Timeout in seconds
|
119
|
-
# @param selector [Hash] Selector(s) parsed by KubernetesDeploy::LabelSelector
|
120
|
-
# @param template_paths [Array<String>] An array of template paths
|
121
|
-
# @param template_dir [String] Path to a directory with templates (deprecated)
|
122
|
-
# @param protected_namespaces [Array<String>] Array of protected Kubernetes namespaces (defaults
|
123
|
-
# to KubernetesDeploy::DeployTask::PROTECTED_NAMESPACES)
|
124
|
-
# @param render_erb [Boolean] Enable ERB rendering
|
125
|
-
def initialize(namespace:, context:, current_sha:, logger: nil, kubectl_instance: nil, bindings: {},
|
126
|
-
max_watch_seconds: nil, selector: nil, template_paths: [], template_dir: nil, protected_namespaces: nil,
|
127
|
-
render_erb: true, allow_globals: false)
|
128
|
-
template_dir = File.expand_path(template_dir) if template_dir
|
129
|
-
template_paths = (template_paths.map { |path| File.expand_path(path) } << template_dir).compact
|
130
|
-
|
131
|
-
@logger = logger || KubernetesDeploy::FormattedLogger.build(namespace, context)
|
132
|
-
@template_sets = TemplateSets.from_dirs_and_files(paths: template_paths, logger: @logger)
|
133
|
-
@task_config = KubernetesDeploy::TaskConfig.new(context, namespace, @logger)
|
134
|
-
@bindings = bindings
|
135
|
-
@namespace = namespace
|
136
|
-
@namespace_tags = []
|
137
|
-
@context = context
|
138
|
-
@current_sha = current_sha
|
139
|
-
@kubectl = kubectl_instance
|
140
|
-
@max_watch_seconds = max_watch_seconds
|
141
|
-
@selector = selector
|
142
|
-
@protected_namespaces = protected_namespaces || PROTECTED_NAMESPACES
|
143
|
-
@render_erb = render_erb
|
144
|
-
@allow_globals = allow_globals
|
145
|
-
end
|
6
|
+
class DeployTask < ::Krane::DeprecatedDeployTask
|
7
|
+
include RescueKraneExceptions
|
146
8
|
|
147
|
-
# Runs the task, returning a boolean representing success or failure
|
148
|
-
#
|
149
|
-
# @return [Boolean]
|
150
9
|
def run(*args)
|
151
|
-
|
152
|
-
|
153
|
-
rescue FatalDeploymentError
|
10
|
+
super(*args)
|
11
|
+
rescue KubernetesDeploy::FatalDeploymentError
|
154
12
|
false
|
155
13
|
end
|
156
|
-
|
157
|
-
# Runs the task, raising exceptions in case of issues
|
158
|
-
#
|
159
|
-
# @param verify_result [Boolean] Wait for completion and verify success
|
160
|
-
# @param allow_protected_ns [Boolean] Enable deploying to protected namespaces
|
161
|
-
# @param prune [Boolean] Enable deletion of resources that do not appear in the template dir
|
162
|
-
#
|
163
|
-
# @return [nil]
|
164
|
-
def run!(verify_result: true, allow_protected_ns: false, prune: true)
|
165
|
-
start = Time.now.utc
|
166
|
-
@logger.reset
|
167
|
-
|
168
|
-
@logger.phase_heading("Initializing deploy")
|
169
|
-
validate_configuration(allow_protected_ns: allow_protected_ns, prune: prune)
|
170
|
-
resources = discover_resources
|
171
|
-
validate_resources(resources)
|
172
|
-
|
173
|
-
@logger.phase_heading("Checking initial resource statuses")
|
174
|
-
check_initial_status(resources)
|
175
|
-
|
176
|
-
if deploy_has_priority_resources?(resources)
|
177
|
-
@logger.phase_heading("Predeploying priority resources")
|
178
|
-
predeploy_priority_resources(resources)
|
179
|
-
end
|
180
|
-
|
181
|
-
@logger.phase_heading("Deploying all resources")
|
182
|
-
if @protected_namespaces.include?(@namespace) && prune
|
183
|
-
raise FatalDeploymentError, "Refusing to deploy to protected namespace '#{@namespace}' with pruning enabled"
|
184
|
-
end
|
185
|
-
|
186
|
-
if verify_result
|
187
|
-
deploy_all_resources(resources, prune: prune, verify: true)
|
188
|
-
failed_resources = resources.reject(&:deploy_succeeded?)
|
189
|
-
success = failed_resources.empty?
|
190
|
-
if !success && failed_resources.all?(&:deploy_timed_out?)
|
191
|
-
raise DeploymentTimeoutError
|
192
|
-
end
|
193
|
-
raise FatalDeploymentError unless success
|
194
|
-
else
|
195
|
-
deploy_all_resources(resources, prune: prune, verify: false)
|
196
|
-
@logger.summary.add_action("deployed #{resources.length} #{'resource'.pluralize(resources.length)}")
|
197
|
-
warning = <<~MSG
|
198
|
-
Deploy result verification is disabled for this deploy.
|
199
|
-
This means the desired changes were communicated to Kubernetes, but the deploy did not make sure they actually succeeded.
|
200
|
-
MSG
|
201
|
-
@logger.summary.add_paragraph(ColorizedString.new(warning).yellow)
|
202
|
-
end
|
203
|
-
StatsD.event("Deployment of #{@namespace} succeeded",
|
204
|
-
"Successfully deployed all #{@namespace} resources to #{@context}",
|
205
|
-
alert_type: "success", tags: statsd_tags << "status:success")
|
206
|
-
StatsD.distribution('all_resources.duration', StatsD.duration(start), tags: statsd_tags << "status:success")
|
207
|
-
@logger.print_summary(:success)
|
208
|
-
rescue DeploymentTimeoutError
|
209
|
-
@logger.print_summary(:timed_out)
|
210
|
-
StatsD.event("Deployment of #{@namespace} timed out",
|
211
|
-
"One or more #{@namespace} resources failed to deploy to #{@context} in time",
|
212
|
-
alert_type: "error", tags: statsd_tags << "status:timeout")
|
213
|
-
StatsD.distribution('all_resources.duration', StatsD.duration(start), tags: statsd_tags << "status:timeout")
|
214
|
-
raise
|
215
|
-
rescue FatalDeploymentError => error
|
216
|
-
@logger.summary.add_action(error.message) if error.message != error.class.to_s
|
217
|
-
@logger.print_summary(:failure)
|
218
|
-
StatsD.event("Deployment of #{@namespace} failed",
|
219
|
-
"One or more #{@namespace} resources failed to deploy to #{@context}",
|
220
|
-
alert_type: "error", tags: statsd_tags << "status:failed")
|
221
|
-
StatsD.distribution('all_resources.duration', StatsD.duration(start), tags: statsd_tags << "status:failed")
|
222
|
-
raise
|
223
|
-
end
|
224
|
-
|
225
|
-
private
|
226
|
-
|
227
|
-
def global_resource_names
|
228
|
-
cluster_resource_discoverer.global_resource_kinds
|
229
|
-
end
|
230
|
-
|
231
|
-
def kubeclient_builder
|
232
|
-
@kubeclient_builder ||= KubeclientBuilder.new
|
233
|
-
end
|
234
|
-
|
235
|
-
def cluster_resource_discoverer
|
236
|
-
@cluster_resource_discoverer ||= ClusterResourceDiscovery.new(
|
237
|
-
task_config: @task_config,
|
238
|
-
namespace_tags: @namespace_tags
|
239
|
-
)
|
240
|
-
end
|
241
|
-
|
242
|
-
def ejson_provisioners
|
243
|
-
@ejson_provisoners ||= @template_sets.ejson_secrets_files.map do |ejson_secret_file|
|
244
|
-
EjsonSecretProvisioner.new(
|
245
|
-
task_config: @task_config,
|
246
|
-
ejson_keys_secret: ejson_keys_secret,
|
247
|
-
ejson_file: ejson_secret_file,
|
248
|
-
statsd_tags: @namespace_tags,
|
249
|
-
selector: @selector,
|
250
|
-
)
|
251
|
-
end
|
252
|
-
end
|
253
|
-
|
254
|
-
def deploy_has_priority_resources?(resources)
|
255
|
-
resources.any? { |r| predeploy_sequence.include?(r.type) }
|
256
|
-
end
|
257
|
-
|
258
|
-
def predeploy_priority_resources(resource_list)
|
259
|
-
bare_pods = resource_list.select { |resource| resource.is_a?(Pod) }
|
260
|
-
if bare_pods.count == 1
|
261
|
-
bare_pods.first.stream_logs = true
|
262
|
-
end
|
263
|
-
|
264
|
-
predeploy_sequence.each do |resource_type|
|
265
|
-
matching_resources = resource_list.select { |r| r.type == resource_type }
|
266
|
-
next if matching_resources.empty?
|
267
|
-
deploy_resources(matching_resources, verify: true, record_summary: false)
|
268
|
-
|
269
|
-
failed_resources = matching_resources.reject(&:deploy_succeeded?)
|
270
|
-
fail_count = failed_resources.length
|
271
|
-
if fail_count > 0
|
272
|
-
KubernetesDeploy::Concurrency.split_across_threads(failed_resources) do |r|
|
273
|
-
r.sync_debug_info(kubectl)
|
274
|
-
end
|
275
|
-
failed_resources.each { |r| @logger.summary.add_paragraph(r.debug_message) }
|
276
|
-
raise FatalDeploymentError, "Failed to deploy #{fail_count} priority #{'resource'.pluralize(fail_count)}"
|
277
|
-
end
|
278
|
-
@logger.blank_line
|
279
|
-
end
|
280
|
-
end
|
281
|
-
measure_method(:predeploy_priority_resources, 'priority_resources.duration')
|
282
|
-
|
283
|
-
def validate_resources(resources)
|
284
|
-
KubernetesDeploy::Concurrency.split_across_threads(resources) do |r|
|
285
|
-
r.validate_definition(kubectl, selector: @selector)
|
286
|
-
end
|
287
|
-
|
288
|
-
resources.select(&:has_warnings?).each do |resource|
|
289
|
-
record_warnings(warning: resource.validation_warning_msg, filename: File.basename(resource.file_path))
|
290
|
-
end
|
291
|
-
|
292
|
-
failed_resources = resources.select(&:validation_failed?)
|
293
|
-
if failed_resources.present?
|
294
|
-
|
295
|
-
failed_resources.each do |r|
|
296
|
-
content = File.read(r.file_path) if File.file?(r.file_path) && !r.sensitive_template_content?
|
297
|
-
record_invalid_template(err: r.validation_error_msg, filename: File.basename(r.file_path), content: content)
|
298
|
-
end
|
299
|
-
raise FatalDeploymentError, "Template validation failed"
|
300
|
-
end
|
301
|
-
validate_globals(resources)
|
302
|
-
end
|
303
|
-
measure_method(:validate_resources)
|
304
|
-
|
305
|
-
def validate_globals(resources)
|
306
|
-
return unless (global = resources.select(&:global?).presence)
|
307
|
-
global_names = global.map do |resource|
|
308
|
-
"#{resource.name} (#{resource.type}) in #{File.basename(resource.file_path)}"
|
309
|
-
end
|
310
|
-
global_names = FormattedLogger.indent_four(global_names.join("\n"))
|
311
|
-
|
312
|
-
if @allow_globals
|
313
|
-
msg = "The ability for this task to deploy global resources will be removed in the next version,"\
|
314
|
-
" which will affect the following resources:"
|
315
|
-
msg += "\n#{global_names}"
|
316
|
-
@logger.summary.add_paragraph(ColorizedString.new(msg).yellow)
|
317
|
-
else
|
318
|
-
@logger.summary.add_paragraph(ColorizedString.new("Global resources:\n#{global_names}").yellow)
|
319
|
-
raise FatalDeploymentError, "This command is namespaced and cannot be used to deploy global resources."
|
320
|
-
end
|
321
|
-
end
|
322
|
-
|
323
|
-
def check_initial_status(resources)
|
324
|
-
cache = ResourceCache.new(@task_config)
|
325
|
-
KubernetesDeploy::Concurrency.split_across_threads(resources) { |r| r.sync(cache) }
|
326
|
-
resources.each { |r| @logger.info(r.pretty_status) }
|
327
|
-
end
|
328
|
-
measure_method(:check_initial_status, "initial_status.duration")
|
329
|
-
|
330
|
-
def secrets_from_ejson
|
331
|
-
ejson_provisioners.flat_map(&:resources)
|
332
|
-
end
|
333
|
-
|
334
|
-
def discover_resources
|
335
|
-
@logger.info("Discovering resources:")
|
336
|
-
resources = []
|
337
|
-
crds_by_kind = cluster_resource_discoverer.crds.group_by(&:kind)
|
338
|
-
@template_sets.with_resource_definitions(render_erb: @render_erb,
|
339
|
-
current_sha: @current_sha, bindings: @bindings) do |r_def|
|
340
|
-
crd = crds_by_kind[r_def["kind"]]&.first
|
341
|
-
r = KubernetesResource.build(namespace: @namespace, context: @context, logger: @logger, definition: r_def,
|
342
|
-
statsd_tags: @namespace_tags, crd: crd, global_names: global_resource_names)
|
343
|
-
resources << r
|
344
|
-
@logger.info(" - #{r.id}")
|
345
|
-
end
|
346
|
-
|
347
|
-
secrets_from_ejson.each do |secret|
|
348
|
-
resources << secret
|
349
|
-
@logger.info(" - #{secret.id} (from ejson)")
|
350
|
-
end
|
351
|
-
|
352
|
-
resources.sort
|
353
|
-
rescue InvalidTemplateError => e
|
354
|
-
record_invalid_template(err: e.message, filename: e.filename, content: e.content)
|
355
|
-
raise FatalDeploymentError, "Failed to render and parse template"
|
356
|
-
end
|
357
|
-
measure_method(:discover_resources)
|
358
|
-
|
359
|
-
def record_invalid_template(err:, filename:, content: nil)
|
360
|
-
debug_msg = ColorizedString.new("Invalid template: #{filename}\n").red
|
361
|
-
debug_msg += "> Error message:\n#{FormattedLogger.indent_four(err)}"
|
362
|
-
if content
|
363
|
-
debug_msg += if content =~ /kind:\s*Secret/
|
364
|
-
"\n> Template content: Suppressed because it may contain a Secret"
|
365
|
-
else
|
366
|
-
"\n> Template content:\n#{FormattedLogger.indent_four(content)}"
|
367
|
-
end
|
368
|
-
end
|
369
|
-
@logger.summary.add_paragraph(debug_msg)
|
370
|
-
end
|
371
|
-
|
372
|
-
def record_warnings(warning:, filename:)
|
373
|
-
warn_msg = "Template warning: #{filename}\n"
|
374
|
-
warn_msg += "> Warning message:\n#{FormattedLogger.indent_four(warning)}"
|
375
|
-
@logger.summary.add_paragraph(ColorizedString.new(warn_msg).yellow)
|
376
|
-
end
|
377
|
-
|
378
|
-
def validate_configuration(allow_protected_ns:, prune:)
|
379
|
-
task_config_validator = DeployTaskConfigValidator.new(@protected_namespaces, allow_protected_ns, prune,
|
380
|
-
@task_config, kubectl, kubeclient_builder)
|
381
|
-
errors = []
|
382
|
-
errors += task_config_validator.errors
|
383
|
-
errors += @template_sets.validate
|
384
|
-
unless errors.empty?
|
385
|
-
@logger.summary.add_action("Configuration invalid")
|
386
|
-
@logger.summary.add_paragraph(errors.map { |err| "- #{err}" }.join("\n"))
|
387
|
-
raise KubernetesDeploy::TaskConfigurationError
|
388
|
-
end
|
389
|
-
|
390
|
-
confirm_ejson_keys_not_prunable if prune
|
391
|
-
@logger.info("Using resource selector #{@selector}") if @selector
|
392
|
-
@namespace_tags |= tags_from_namespace_labels
|
393
|
-
@logger.info("All required parameters and files are present")
|
394
|
-
end
|
395
|
-
measure_method(:validate_configuration)
|
396
|
-
|
397
|
-
def deploy_resources(resources, prune: false, verify:, record_summary: true)
|
398
|
-
return if resources.empty?
|
399
|
-
deploy_started_at = Time.now.utc
|
400
|
-
|
401
|
-
if resources.length > 1
|
402
|
-
@logger.info("Deploying resources:")
|
403
|
-
resources.each do |r|
|
404
|
-
@logger.info("- #{r.id} (#{r.pretty_timeout_type})")
|
405
|
-
end
|
406
|
-
else
|
407
|
-
resource = resources.first
|
408
|
-
@logger.info("Deploying #{resource.id} (#{resource.pretty_timeout_type})")
|
409
|
-
end
|
410
|
-
|
411
|
-
# Apply can be done in one large batch, the rest have to be done individually
|
412
|
-
applyables, individuals = resources.partition { |r| r.deploy_method == :apply }
|
413
|
-
# Prunable resources should also applied so that they can be pruned
|
414
|
-
pruneable_types = prune_whitelist.map { |t| t.split("/").last }
|
415
|
-
applyables += individuals.select { |r| pruneable_types.include?(r.type) }
|
416
|
-
|
417
|
-
individuals.each do |r|
|
418
|
-
r.deploy_started_at = Time.now.utc
|
419
|
-
case r.deploy_method
|
420
|
-
when :replace
|
421
|
-
_, _, replace_st = kubectl.run("replace", "-f", r.file_path, log_failure: false)
|
422
|
-
when :replace_force
|
423
|
-
_, _, replace_st = kubectl.run("replace", "--force", "--cascade", "-f", r.file_path,
|
424
|
-
log_failure: false)
|
425
|
-
else
|
426
|
-
# Fail Fast! This is a programmer mistake.
|
427
|
-
raise ArgumentError, "Unexpected deploy method! (#{r.deploy_method.inspect})"
|
428
|
-
end
|
429
|
-
|
430
|
-
next if replace_st.success?
|
431
|
-
# it doesn't exist so we can't replace it
|
432
|
-
_, err, create_st = kubectl.run("create", "-f", r.file_path, log_failure: false)
|
433
|
-
|
434
|
-
next if create_st.success?
|
435
|
-
raise FatalDeploymentError, <<~MSG
|
436
|
-
Failed to replace or create resource: #{r.id}
|
437
|
-
#{err}
|
438
|
-
MSG
|
439
|
-
end
|
440
|
-
|
441
|
-
apply_all(applyables, prune)
|
442
|
-
|
443
|
-
if verify
|
444
|
-
watcher = ResourceWatcher.new(resources: resources, deploy_started_at: deploy_started_at,
|
445
|
-
timeout: @max_watch_seconds, task_config: @task_config, sha: @current_sha)
|
446
|
-
watcher.run(record_summary: record_summary)
|
447
|
-
end
|
448
|
-
end
|
449
|
-
|
450
|
-
def deploy_all_resources(resources, prune: false, verify:, record_summary: true)
|
451
|
-
deploy_resources(resources, prune: prune, verify: verify, record_summary: record_summary)
|
452
|
-
end
|
453
|
-
measure_method(:deploy_all_resources, 'normal_resources.duration')
|
454
|
-
|
455
|
-
def apply_all(resources, prune)
|
456
|
-
return unless resources.present?
|
457
|
-
command = %w(apply)
|
458
|
-
|
459
|
-
Dir.mktmpdir do |tmp_dir|
|
460
|
-
resources.each do |r|
|
461
|
-
FileUtils.symlink(r.file_path, tmp_dir)
|
462
|
-
r.deploy_started_at = Time.now.utc
|
463
|
-
end
|
464
|
-
command.push("-f", tmp_dir)
|
465
|
-
|
466
|
-
if prune
|
467
|
-
command.push("--prune")
|
468
|
-
if @selector
|
469
|
-
command.push("--selector", @selector.to_s)
|
470
|
-
else
|
471
|
-
command.push("--all")
|
472
|
-
end
|
473
|
-
prune_whitelist.each { |type| command.push("--prune-whitelist=#{type}") }
|
474
|
-
end
|
475
|
-
|
476
|
-
output_is_sensitive = resources.any?(&:sensitive_template_content?)
|
477
|
-
out, err, st = kubectl.run(*command, log_failure: false, output_is_sensitive: output_is_sensitive)
|
478
|
-
|
479
|
-
if st.success?
|
480
|
-
log_pruning(out) if prune
|
481
|
-
else
|
482
|
-
record_apply_failure(err, resources: resources)
|
483
|
-
raise FatalDeploymentError, "Command failed: #{Shellwords.join(command)}"
|
484
|
-
end
|
485
|
-
end
|
486
|
-
end
|
487
|
-
measure_method(:apply_all)
|
488
|
-
|
489
|
-
def log_pruning(kubectl_output)
|
490
|
-
pruned = kubectl_output.scan(/^(.*) pruned$/)
|
491
|
-
return unless pruned.present?
|
492
|
-
|
493
|
-
@logger.info("The following resources were pruned: #{pruned.join(', ')}")
|
494
|
-
@logger.summary.add_action("pruned #{pruned.length} #{'resource'.pluralize(pruned.length)}")
|
495
|
-
end
|
496
|
-
|
497
|
-
def record_apply_failure(err, resources: [])
|
498
|
-
warn_msg = "WARNING: Any resources not mentioned in the error(s) below were likely created/updated. " \
|
499
|
-
"You may wish to roll back this deploy."
|
500
|
-
@logger.summary.add_paragraph(ColorizedString.new(warn_msg).yellow)
|
501
|
-
|
502
|
-
unidentified_errors = []
|
503
|
-
filenames_with_sensitive_content = resources
|
504
|
-
.select(&:sensitive_template_content?)
|
505
|
-
.map { |r| File.basename(r.file_path) }
|
506
|
-
|
507
|
-
server_dry_run_validated_resource = resources
|
508
|
-
.select(&:server_dry_run_validated?)
|
509
|
-
.map { |r| File.basename(r.file_path) }
|
510
|
-
|
511
|
-
err.each_line do |line|
|
512
|
-
bad_files = find_bad_files_from_kubectl_output(line)
|
513
|
-
unless bad_files.present?
|
514
|
-
unidentified_errors << line
|
515
|
-
next
|
516
|
-
end
|
517
|
-
|
518
|
-
bad_files.each do |f|
|
519
|
-
err_msg = f[:err]
|
520
|
-
if filenames_with_sensitive_content.include?(f[:filename])
|
521
|
-
# Hide the error and template contents in case it has sensitive information
|
522
|
-
# we display full error messages as we assume there's no sensitive info leak after server-dry-run
|
523
|
-
err_msg = "SUPPRESSED FOR SECURITY" unless server_dry_run_validated_resource.include?(f[:filename])
|
524
|
-
record_invalid_template(err: err_msg, filename: f[:filename], content: nil)
|
525
|
-
else
|
526
|
-
record_invalid_template(err: err_msg, filename: f[:filename], content: f[:content])
|
527
|
-
end
|
528
|
-
end
|
529
|
-
end
|
530
|
-
return unless unidentified_errors.any?
|
531
|
-
|
532
|
-
if (filenames_with_sensitive_content - server_dry_run_validated_resource).present?
|
533
|
-
warn_msg = "WARNING: There was an error applying some or all resources. The raw output may be sensitive and " \
|
534
|
-
"so cannot be displayed."
|
535
|
-
@logger.summary.add_paragraph(ColorizedString.new(warn_msg).yellow)
|
536
|
-
else
|
537
|
-
heading = ColorizedString.new('Unidentified error(s):').red
|
538
|
-
msg = FormattedLogger.indent_four(unidentified_errors.join)
|
539
|
-
@logger.summary.add_paragraph("#{heading}\n#{msg}")
|
540
|
-
end
|
541
|
-
end
|
542
|
-
|
543
|
-
# Inspect the file referenced in the kubectl stderr
|
544
|
-
# to make it easier for developer to understand what's going on
|
545
|
-
def find_bad_files_from_kubectl_output(line)
|
546
|
-
# stderr often contains one or more lines like the following, from which we can extract the file path(s):
|
547
|
-
# Error from server (TypeOfError): error when creating "/path/to/service-gqq5oh.yml": Service "web" is invalid:
|
548
|
-
|
549
|
-
line.scan(%r{"(/\S+\.ya?ml\S*)"}).each_with_object([]) do |matches, bad_files|
|
550
|
-
matches.each do |path|
|
551
|
-
content = File.read(path) if File.file?(path)
|
552
|
-
bad_files << { filename: File.basename(path), err: line, content: content }
|
553
|
-
end
|
554
|
-
end
|
555
|
-
end
|
556
|
-
|
557
|
-
def namespace_definition
|
558
|
-
@namespace_definition ||= begin
|
559
|
-
definition, _err, st = kubectl.run("get", "namespace", @namespace, use_namespace: false,
|
560
|
-
log_failure: true, raise_if_not_found: true, attempts: 3, output: 'json')
|
561
|
-
st.success? ? JSON.parse(definition, symbolize_names: true) : nil
|
562
|
-
end
|
563
|
-
rescue Kubectl::ResourceNotFoundError
|
564
|
-
nil
|
565
|
-
end
|
566
|
-
|
567
|
-
# make sure to never prune the ejson-keys secret
|
568
|
-
def confirm_ejson_keys_not_prunable
|
569
|
-
return unless ejson_keys_secret.dig("metadata", "annotations", KubernetesResource::LAST_APPLIED_ANNOTATION)
|
570
|
-
|
571
|
-
@logger.error("Deploy cannot proceed because protected resource " \
|
572
|
-
"Secret/#{EjsonSecretProvisioner::EJSON_KEYS_SECRET} would be pruned.")
|
573
|
-
raise EjsonPrunableError
|
574
|
-
rescue Kubectl::ResourceNotFoundError => e
|
575
|
-
@logger.debug("Secret/#{EjsonSecretProvisioner::EJSON_KEYS_SECRET} does not exist: #{e}")
|
576
|
-
end
|
577
|
-
|
578
|
-
def tags_from_namespace_labels
|
579
|
-
return [] if namespace_definition.blank?
|
580
|
-
namespace_labels = namespace_definition.fetch(:metadata, {}).fetch(:labels, {})
|
581
|
-
namespace_labels.map { |key, value| "#{key}:#{value}" }
|
582
|
-
end
|
583
|
-
|
584
|
-
def kubectl
|
585
|
-
@kubectl ||= Kubectl.new(task_config: @task_config, log_failure_by_default: true)
|
586
|
-
end
|
587
|
-
|
588
|
-
def ejson_keys_secret
|
589
|
-
@ejson_keys_secret ||= begin
|
590
|
-
out, err, st = kubectl.run("get", "secret", EjsonSecretProvisioner::EJSON_KEYS_SECRET, output: "json",
|
591
|
-
raise_if_not_found: true, attempts: 3, output_is_sensitive: true, log_failure: true)
|
592
|
-
unless st.success?
|
593
|
-
raise EjsonSecretError, "Error retrieving Secret/#{EjsonSecretProvisioner::EJSON_KEYS_SECRET}: #{err}"
|
594
|
-
end
|
595
|
-
JSON.parse(out)
|
596
|
-
end
|
597
|
-
end
|
598
|
-
|
599
|
-
def statsd_tags
|
600
|
-
%W(namespace:#{@namespace} sha:#{@current_sha} context:#{@context}) | @namespace_tags
|
601
|
-
end
|
602
|
-
|
603
|
-
def with_retries(limit)
|
604
|
-
retried = 0
|
605
|
-
while retried <= limit
|
606
|
-
success = yield
|
607
|
-
break if success
|
608
|
-
retried += 1
|
609
|
-
end
|
610
|
-
end
|
611
14
|
end
|
612
15
|
end
|