kubernetes-deploy 0.13.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.buildkite/pipeline.nightly.yml +28 -0
- data/.buildkite/pipeline.yml +16 -3
- data/CHANGELOG.md +10 -0
- data/bin/ci +4 -4
- data/exe/kubernetes-deploy +2 -2
- data/lib/kubernetes-deploy.rb +2 -2
- data/lib/kubernetes-deploy/{runner.rb → deploy_task.rb} +6 -6
- data/lib/kubernetes-deploy/ejson_secret_provisioner.rb +7 -1
- data/lib/kubernetes-deploy/kubectl.rb +9 -3
- data/lib/kubernetes-deploy/kubernetes_resource.rb +22 -13
- data/lib/kubernetes-deploy/kubernetes_resource/bucket.rb +1 -1
- data/lib/kubernetes-deploy/kubernetes_resource/deployment.rb +38 -12
- data/lib/kubernetes-deploy/kubernetes_resource/elasticsearch.rb +1 -1
- data/lib/kubernetes-deploy/kubernetes_resource/pod.rb +21 -16
- data/lib/kubernetes-deploy/kubernetes_resource/pod_set_base.rb +3 -3
- data/lib/kubernetes-deploy/kubernetes_resource/replica_set.rb +7 -4
- data/lib/kubernetes-deploy/kubernetes_resource/statefulservice.rb +1 -1
- data/lib/kubernetes-deploy/kubernetes_resource/topic.rb +1 -1
- data/lib/kubernetes-deploy/resource_watcher.rb +15 -13
- data/lib/kubernetes-deploy/restart_task.rb +1 -1
- data/lib/kubernetes-deploy/version.rb +1 -1
- data/shipit.yml +4 -0
- metadata +5 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: b83794f813d2a4f1b2b3aba98a9181fc2c041f85
|
4
|
+
data.tar.gz: 1d0dc0a38cdd5a44adb1e914751cea41dd8dbce1
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 82153cb31bd8108540cdbe92fe25d25e6c4d09e3f9781c00f74f664ca2ea1a5fa65311bd940565ea5b7fefa9ec4bcae36f533855cc6f068b4970c3ecf9e06478
|
7
|
+
data.tar.gz: 30fbefaae8d80f503ec2e31664a4ee29097ee80a92f9d398d6ce4e1e76d0f350855fd85048ae6ae7d1fb8932abf6be48b754d4e569626ae0a52e3fb716f54e6b
|
@@ -0,0 +1,28 @@
|
|
1
|
+
- name: 'Run Test Suite (:kubernetes: 1.9-latest)'
|
2
|
+
command: bin/ci
|
3
|
+
agents:
|
4
|
+
queue: minikube-ci
|
5
|
+
env:
|
6
|
+
LOGGING_LEVEL: 4
|
7
|
+
KUBERNETES_VERSION: v1.9-latest
|
8
|
+
- name: 'Run Test Suite (:kubernetes: 1.8-latest)'
|
9
|
+
command: bin/ci
|
10
|
+
agents:
|
11
|
+
queue: minikube-ci
|
12
|
+
env:
|
13
|
+
LOGGING_LEVEL: 4
|
14
|
+
KUBERNETES_VERSION: v1.8-latest
|
15
|
+
- name: 'Run Test Suite (:kubernetes: 1.7-latest)'
|
16
|
+
command: bin/ci
|
17
|
+
agents:
|
18
|
+
queue: minikube-ci
|
19
|
+
env:
|
20
|
+
LOGGING_LEVEL: 4
|
21
|
+
KUBERNETES_VERSION: v1.7-latest
|
22
|
+
- name: 'Run Test Suite (:kubernetes: 1.6.4)'
|
23
|
+
command: bin/ci
|
24
|
+
agents:
|
25
|
+
queue: minikube-ci
|
26
|
+
env:
|
27
|
+
LOGGING_LEVEL: 4
|
28
|
+
KUBERNETES_VERSION: v1.6.4
|
data/.buildkite/pipeline.yml
CHANGED
@@ -1,8 +1,21 @@
|
|
1
|
-
- name: 'Run Test Suite (:kubernetes: 1.
|
1
|
+
- name: 'Run Test Suite (:kubernetes: 1.8-latest)'
|
2
2
|
command: bin/ci
|
3
3
|
agents:
|
4
|
-
queue:
|
4
|
+
queue: minikube-ci
|
5
|
+
env:
|
6
|
+
LOGGING_LEVEL: 4
|
7
|
+
KUBERNETES_VERSION: v1.8-latest
|
8
|
+
- name: 'Run Test Suite (:kubernetes: 1.7-latest)'
|
9
|
+
command: bin/ci
|
10
|
+
agents:
|
11
|
+
queue: minikube-ci
|
12
|
+
env:
|
13
|
+
LOGGING_LEVEL: 4
|
14
|
+
KUBERNETES_VERSION: v1.7-latest
|
5
15
|
- name: 'Run Test Suite (:kubernetes: 1.6.4)'
|
6
16
|
command: bin/ci
|
7
17
|
agents:
|
8
|
-
queue:
|
18
|
+
queue: minikube-ci
|
19
|
+
env:
|
20
|
+
LOGGING_LEVEL: 4
|
21
|
+
KUBERNETES_VERSION: v1.6.4
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,13 @@
|
|
1
|
+
### 0.14.0
|
2
|
+
*Bug Fixes*
|
3
|
+
- Fix incorrect timeouts occasionally observed on deployments using progressDeadlineSeconds in Kubernetes <1.7.7
|
4
|
+
|
5
|
+
*Enhancements*
|
6
|
+
- Renamed `KubernetesDeploy::Runner` (which powers `exe/kubernetes-deploy`) to `KubernetesDeploy::DeployTask`. This increases consistency between our primary class names and avoids confusion with `KubernetesDeploy::RunnerTask` (which powers `exe/kubernetes-run`).
|
7
|
+
- Improved output related to timeouts. For deployments, both failure and timeout output now mentions the referenced replica set.
|
8
|
+
- Small improvements to the reliability of the success polling.
|
9
|
+
- EjsonSecretProvisioner no longer logs kubectl command output (which may contain secret data) when debug-level logging is enabled.
|
10
|
+
|
1
11
|
### 0.13.0
|
2
12
|
*Features*
|
3
13
|
- Added support for StatefulSets for kubernetes 1.7+ using RollingUpdate
|
data/bin/ci
CHANGED
@@ -6,11 +6,11 @@ if [[ -n "${DEBUG:+set}" ]]; then
|
|
6
6
|
fi
|
7
7
|
|
8
8
|
docker run --rm \
|
9
|
-
|
10
|
-
|
11
|
-
-v "$HOME/.minikube"
|
9
|
+
--net=host \
|
10
|
+
-v "$HOME/.kube":"/root/.kube" \
|
11
|
+
-v "$HOME/.minikube":"$HOME/.minikube" \
|
12
12
|
-v "$PWD":/usr/src/app \
|
13
|
-
-v "/usr/bin/
|
13
|
+
-v "/usr/bin/kubectl":"/usr/bin/kubectl" \
|
14
14
|
-e CI=1 \
|
15
15
|
-e CODECOV_TOKEN=$CODECOV_TOKEN \
|
16
16
|
-e COVERAGE=1 \
|
data/exe/kubernetes-deploy
CHANGED
@@ -22,7 +22,7 @@ ARGV.options do |opts|
|
|
22
22
|
end
|
23
23
|
|
24
24
|
opts.on("--skip-wait", "Skip verification of non-priority-resource success (not recommended)") { skip_wait = true }
|
25
|
-
prot_ns = KubernetesDeploy::
|
25
|
+
prot_ns = KubernetesDeploy::DeployTask::PROTECTED_NAMESPACES.join(', ')
|
26
26
|
opts.on("--allow-protected-ns", "Enable deploys to #{prot_ns}; requires --no-prune") { allow_protected_ns = true }
|
27
27
|
opts.on("--no-prune", "Disable deletion of resources that do not appear in the template dir") { prune = false }
|
28
28
|
opts.on("--template-dir=DIR", "Set the template dir (default: config/deploy/$ENVIRONMENT)") { |v| template_dir = v }
|
@@ -58,7 +58,7 @@ namespace = ARGV[0]
|
|
58
58
|
context = ARGV[1]
|
59
59
|
logger = KubernetesDeploy::FormattedLogger.build(namespace, context, verbose_prefix: verbose_log_prefix)
|
60
60
|
|
61
|
-
runner = KubernetesDeploy::
|
61
|
+
runner = KubernetesDeploy::DeployTask.new(
|
62
62
|
namespace: namespace,
|
63
63
|
context: context,
|
64
64
|
current_sha: revision,
|
data/lib/kubernetes-deploy.rb
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# rubocop:disable
|
1
|
+
# rubocop:disable Naming/FileName
|
2
2
|
# frozen_string_literal: true
|
3
3
|
|
4
4
|
require 'active_support/core_ext/object/blank'
|
@@ -13,7 +13,7 @@ require 'colorized_string'
|
|
13
13
|
require 'kubernetes-deploy/version'
|
14
14
|
require 'kubernetes-deploy/errors'
|
15
15
|
require 'kubernetes-deploy/formatted_logger'
|
16
|
-
require 'kubernetes-deploy/
|
16
|
+
require 'kubernetes-deploy/deploy_task'
|
17
17
|
require 'kubernetes-deploy/statsd'
|
18
18
|
require 'kubernetes-deploy/concurrency'
|
19
19
|
|
@@ -37,7 +37,7 @@ require 'kubernetes-deploy/kubeclient_builder'
|
|
37
37
|
require 'kubernetes-deploy/ejson_secret_provisioner'
|
38
38
|
|
39
39
|
module KubernetesDeploy
|
40
|
-
class
|
40
|
+
class DeployTask
|
41
41
|
include KubeclientBuilder
|
42
42
|
|
43
43
|
PREDEPLOY_SEQUENCE = %w(
|
@@ -326,15 +326,15 @@ module KubernetesDeploy
|
|
326
326
|
@logger.info("Deploying resources:")
|
327
327
|
else
|
328
328
|
resource = resources.first
|
329
|
-
@logger.info("Deploying #{resource.id} (
|
329
|
+
@logger.info("Deploying #{resource.id} (#{resource.pretty_timeout_type})")
|
330
330
|
end
|
331
331
|
|
332
332
|
# Apply can be done in one large batch, the rest have to be done individually
|
333
333
|
applyables, individuals = resources.partition { |r| r.deploy_method == :apply }
|
334
334
|
|
335
335
|
individuals.each do |r|
|
336
|
-
@logger.info("- #{r.id} (
|
337
|
-
r.
|
336
|
+
@logger.info("- #{r.id} (#{r.pretty_timeout_type})") if resources.length > 1
|
337
|
+
r.deploy_started_at = Time.now.utc
|
338
338
|
case r.deploy_method
|
339
339
|
when :replace
|
340
340
|
_, _, replace_st = kubectl.run("replace", "-f", r.file_path, log_failure: false)
|
@@ -369,9 +369,9 @@ module KubernetesDeploy
|
|
369
369
|
|
370
370
|
command = ["apply"]
|
371
371
|
resources.each do |r|
|
372
|
-
@logger.info("- #{r.id} (
|
372
|
+
@logger.info("- #{r.id} (#{r.pretty_timeout_type})") if resources.length > 1
|
373
373
|
command.push("-f", r.file_path)
|
374
|
-
r.
|
374
|
+
r.deploy_started_at = Time.now.utc
|
375
375
|
end
|
376
376
|
|
377
377
|
if prune
|
@@ -23,7 +23,13 @@ module KubernetesDeploy
|
|
23
23
|
@ejson_file = "#{template_dir}/#{EJSON_SECRETS_FILE}"
|
24
24
|
@logger = logger
|
25
25
|
@prune = prune
|
26
|
-
@kubectl = Kubectl.new(
|
26
|
+
@kubectl = Kubectl.new(
|
27
|
+
namespace: @namespace,
|
28
|
+
context: @context,
|
29
|
+
logger: @logger,
|
30
|
+
log_failure_by_default: false,
|
31
|
+
output_is_sensitive: true # output may contain ejson secrets
|
32
|
+
)
|
27
33
|
end
|
28
34
|
|
29
35
|
def secret_changes_required?
|
@@ -2,12 +2,14 @@
|
|
2
2
|
|
3
3
|
module KubernetesDeploy
|
4
4
|
class Kubectl
|
5
|
-
def initialize(namespace:, context:, logger:, log_failure_by_default:, default_timeout: '30s'
|
5
|
+
def initialize(namespace:, context:, logger:, log_failure_by_default:, default_timeout: '30s',
|
6
|
+
output_is_sensitive: false)
|
6
7
|
@namespace = namespace
|
7
8
|
@context = context
|
8
9
|
@logger = logger
|
9
10
|
@log_failure_by_default = log_failure_by_default
|
10
11
|
@default_timeout = default_timeout
|
12
|
+
@output_is_sensitive = output_is_sensitive
|
11
13
|
|
12
14
|
raise ArgumentError, "namespace is required" if namespace.blank?
|
13
15
|
raise ArgumentError, "context is required" if context.blank?
|
@@ -23,11 +25,11 @@ module KubernetesDeploy
|
|
23
25
|
|
24
26
|
@logger.debug Shellwords.join(args)
|
25
27
|
out, err, st = Open3.capture3(*args)
|
26
|
-
@logger.debug(out.shellescape)
|
28
|
+
@logger.debug(out.shellescape) unless output_is_sensitive?
|
27
29
|
|
28
30
|
if !st.success? && log_failure
|
29
31
|
@logger.warn("The following command failed: #{Shellwords.join(args)}")
|
30
|
-
@logger.warn(err)
|
32
|
+
@logger.warn(err) unless output_is_sensitive?
|
31
33
|
end
|
32
34
|
[out.chomp, err.chomp, st]
|
33
35
|
end
|
@@ -51,6 +53,10 @@ module KubernetesDeploy
|
|
51
53
|
|
52
54
|
private
|
53
55
|
|
56
|
+
def output_is_sensitive?
|
57
|
+
@output_is_sensitive
|
58
|
+
end
|
59
|
+
|
54
60
|
def extract_version_info_from_kubectl_response(response)
|
55
61
|
info = {}
|
56
62
|
response.each_line do |l|
|
@@ -6,8 +6,8 @@ require 'kubernetes-deploy/kubectl'
|
|
6
6
|
|
7
7
|
module KubernetesDeploy
|
8
8
|
class KubernetesResource
|
9
|
-
attr_reader :name, :namespace, :
|
10
|
-
attr_writer :type, :
|
9
|
+
attr_reader :name, :namespace, :context, :validation_error_msg
|
10
|
+
attr_writer :type, :deploy_started_at
|
11
11
|
|
12
12
|
TIMEOUT = 5.minutes
|
13
13
|
LOG_LINE_COUNT = 250
|
@@ -42,6 +42,10 @@ module KubernetesDeploy
|
|
42
42
|
self.class.timeout
|
43
43
|
end
|
44
44
|
|
45
|
+
def pretty_timeout_type
|
46
|
+
"timeout: #{timeout}s"
|
47
|
+
end
|
48
|
+
|
45
49
|
def initialize(namespace:, context:, definition:, logger:)
|
46
50
|
# subclasses must also set these if they define their own initializer
|
47
51
|
@name = definition.dig("metadata", "name")
|
@@ -86,8 +90,12 @@ module KubernetesDeploy
|
|
86
90
|
false
|
87
91
|
end
|
88
92
|
|
93
|
+
def deploy_started?
|
94
|
+
@deploy_started_at.present?
|
95
|
+
end
|
96
|
+
|
89
97
|
def deploy_succeeded?
|
90
|
-
if
|
98
|
+
if deploy_started? && !@success_assumption_warning_shown
|
91
99
|
@logger.warn("Don't know how to monitor resources of type #{type}. Assuming #{id} deployed successfully.")
|
92
100
|
@success_assumption_warning_shown = true
|
93
101
|
end
|
@@ -103,16 +111,12 @@ module KubernetesDeploy
|
|
103
111
|
end
|
104
112
|
|
105
113
|
def type
|
106
|
-
@type || self.class.name.
|
107
|
-
end
|
108
|
-
|
109
|
-
def deploy_finished?
|
110
|
-
deploy_failed? || deploy_succeeded? || deploy_timed_out?
|
114
|
+
@type || self.class.name.demodulize
|
111
115
|
end
|
112
116
|
|
113
117
|
def deploy_timed_out?
|
114
|
-
return false unless
|
115
|
-
!deploy_succeeded? && !deploy_failed? && (Time.now.utc - @
|
118
|
+
return false unless deploy_started?
|
119
|
+
!deploy_succeeded? && !deploy_failed? && (Time.now.utc - @deploy_started_at > timeout)
|
116
120
|
end
|
117
121
|
|
118
122
|
# Expected values: :apply, :replace, :replace_force
|
@@ -125,9 +129,14 @@ module KubernetesDeploy
|
|
125
129
|
if deploy_failed?
|
126
130
|
helpful_info << ColorizedString.new("#{id}: FAILED").red
|
127
131
|
helpful_info << failure_message if failure_message.present?
|
128
|
-
|
129
|
-
helpful_info << ColorizedString.new("#{id}: TIMED OUT
|
132
|
+
elsif deploy_timed_out?
|
133
|
+
helpful_info << ColorizedString.new("#{id}: TIMED OUT (#{pretty_timeout_type})").yellow
|
130
134
|
helpful_info << timeout_message if timeout_message.present?
|
135
|
+
else
|
136
|
+
# Arriving in debug_message when we neither failed nor timed out is very unexpected. Dump all available info.
|
137
|
+
helpful_info << ColorizedString.new("#{id}: MONITORING ERROR").red
|
138
|
+
helpful_info << failure_message if failure_message.present?
|
139
|
+
helpful_info << timeout_message if timeout_message.present? && timeout_message != STANDARD_TIMEOUT_MESSAGE
|
131
140
|
end
|
132
141
|
helpful_info << " - Final status: #{status}"
|
133
142
|
|
@@ -178,7 +187,7 @@ module KubernetesDeploy
|
|
178
187
|
|
179
188
|
event_collector = Hash.new { |hash, key| hash[key] = [] }
|
180
189
|
Event.extract_all_from_go_template_blob(out).each_with_object(event_collector) do |candidate, events|
|
181
|
-
events[id] << candidate.to_s if candidate.seen_since?(@
|
190
|
+
events[id] << candidate.to_s if candidate.seen_since?(@deploy_started_at - 5.seconds)
|
182
191
|
end
|
183
192
|
end
|
184
193
|
|
@@ -7,7 +7,7 @@ module KubernetesDeploy
|
|
7
7
|
end
|
8
8
|
|
9
9
|
def deploy_succeeded?
|
10
|
-
return false unless
|
10
|
+
return false unless deploy_started?
|
11
11
|
|
12
12
|
unless @success_assumption_warning_shown
|
13
13
|
@logger.warn("Don't know how to monitor resources of type #{type}. Assuming #{id} deployed successfully.")
|
@@ -11,15 +11,21 @@ module KubernetesDeploy
|
|
11
11
|
deployment_data = JSON.parse(raw_json)
|
12
12
|
@desired_replicas = deployment_data["spec"]["replicas"].to_i
|
13
13
|
@latest_rs = find_latest_rs(deployment_data)
|
14
|
+
|
14
15
|
@rollout_data = { "replicas" => 0 }.merge(deployment_data["status"]
|
15
16
|
.slice("replicas", "updatedReplicas", "availableReplicas", "unavailableReplicas"))
|
16
17
|
@status = @rollout_data.map { |state_replicas, num| "#{num} #{state_replicas.chop.pluralize(num)}" }.join(", ")
|
18
|
+
|
17
19
|
conditions = deployment_data.fetch("status", {}).fetch("conditions", [])
|
18
|
-
@
|
20
|
+
@progress_condition = conditions.find { |condition| condition['type'] == 'Progressing' }
|
21
|
+
@progress_deadline = deployment_data['spec']['progressDeadlineSeconds']
|
19
22
|
else # reset
|
20
23
|
@latest_rs = nil
|
21
24
|
@rollout_data = { "replicas" => 0 }
|
22
25
|
@status = nil
|
26
|
+
@progress_condition = nil
|
27
|
+
@progress_deadline = @definition['spec']['progressDeadlineSeconds']
|
28
|
+
@desired_replicas = -1
|
23
29
|
end
|
24
30
|
end
|
25
31
|
|
@@ -35,7 +41,7 @@ module KubernetesDeploy
|
|
35
41
|
end
|
36
42
|
|
37
43
|
def deploy_succeeded?
|
38
|
-
return false unless @latest_rs
|
44
|
+
return false unless @latest_rs.present?
|
39
45
|
|
40
46
|
@latest_rs.deploy_succeeded? &&
|
41
47
|
@latest_rs.desired_replicas == @desired_replicas && # latest RS fully scaled up
|
@@ -44,26 +50,31 @@ module KubernetesDeploy
|
|
44
50
|
end
|
45
51
|
|
46
52
|
def deploy_failed?
|
47
|
-
@latest_rs
|
53
|
+
@latest_rs&.deploy_failed?
|
48
54
|
end
|
49
55
|
|
50
56
|
def failure_message
|
51
|
-
@latest_rs
|
57
|
+
return unless @latest_rs.present?
|
58
|
+
"Latest ReplicaSet: #{@latest_rs.name}\n\n#{@latest_rs.failure_message}"
|
52
59
|
end
|
53
60
|
|
54
61
|
def timeout_message
|
55
|
-
|
56
|
-
|
57
|
-
"Deploy timed out due to progressDeadlineSeconds of #{progress_seconds} seconds, "\
|
58
|
-
" reason: #{@progress['reason']}\n"\
|
59
|
-
"#{@latest_rs&.timeout_message}"
|
62
|
+
reason_msg = if @progress_condition.present?
|
63
|
+
"Timeout reason: #{@progress_condition['reason']}"
|
60
64
|
else
|
61
|
-
|
65
|
+
"Timeout reason: hard deadline for #{type}"
|
62
66
|
end
|
67
|
+
return reason_msg unless @latest_rs.present?
|
68
|
+
"#{reason_msg}\nLatest ReplicaSet: #{@latest_rs.name}\n\n#{@latest_rs.timeout_message}"
|
69
|
+
end
|
70
|
+
|
71
|
+
def pretty_timeout_type
|
72
|
+
@progress_deadline.present? ? "progress deadline: #{@progress_deadline}s" : super
|
63
73
|
end
|
64
74
|
|
65
75
|
def deploy_timed_out?
|
66
|
-
|
76
|
+
# Do not use the hard timeout if progress deadline is set
|
77
|
+
@progress_condition.present? ? deploy_failing_to_progress? : super
|
67
78
|
end
|
68
79
|
|
69
80
|
def exists?
|
@@ -72,6 +83,21 @@ module KubernetesDeploy
|
|
72
83
|
|
73
84
|
private
|
74
85
|
|
86
|
+
def deploy_failing_to_progress?
|
87
|
+
return false unless @progress_condition.present?
|
88
|
+
|
89
|
+
if kubectl.server_version < Gem::Version.new("1.7.7")
|
90
|
+
# Deployments were being updated prematurely with incorrect progress information
|
91
|
+
# https://github.com/kubernetes/kubernetes/issues/49637
|
92
|
+
return false unless Time.now.utc - @deploy_started_at >= @progress_deadline.to_i
|
93
|
+
else
|
94
|
+
return false unless deploy_started?
|
95
|
+
end
|
96
|
+
|
97
|
+
@progress_condition["status"] == 'False' &&
|
98
|
+
Time.parse(@progress_condition["lastUpdateTime"]).to_i >= (@deploy_started_at - 5.seconds).to_i
|
99
|
+
end
|
100
|
+
|
75
101
|
def find_latest_rs(deployment_data)
|
76
102
|
label_string = deployment_data["spec"]["selector"]["matchLabels"].map { |k, v| "#{k}=#{v}" }.join(",")
|
77
103
|
raw_json, _err, st = kubectl.run("get", "replicasets", "--output=json", "--selector=#{label_string}")
|
@@ -92,7 +118,7 @@ module KubernetesDeploy
|
|
92
118
|
definition: latest_rs_data,
|
93
119
|
logger: @logger,
|
94
120
|
parent: "#{@name.capitalize} deployment",
|
95
|
-
|
121
|
+
deploy_started_at: @deploy_started_at
|
96
122
|
)
|
97
123
|
rs.sync(latest_rs_data)
|
98
124
|
rs
|
@@ -7,7 +7,7 @@ module KubernetesDeploy
|
|
7
7
|
end
|
8
8
|
|
9
9
|
def deploy_succeeded?
|
10
|
-
return false unless
|
10
|
+
return false unless deploy_started?
|
11
11
|
|
12
12
|
unless @success_assumption_warning_shown
|
13
13
|
@logger.warn("Don't know how to monitor resources of type #{type}. Assuming #{id} deployed successfully.")
|
@@ -3,9 +3,11 @@ module KubernetesDeploy
|
|
3
3
|
class Pod < KubernetesResource
|
4
4
|
TIMEOUT = 10.minutes
|
5
5
|
|
6
|
-
|
6
|
+
FAILED_PHASE_NAME = "Failed"
|
7
|
+
|
8
|
+
def initialize(namespace:, context:, definition:, logger:, parent: nil, deploy_started_at: nil)
|
7
9
|
@parent = parent
|
8
|
-
@
|
10
|
+
@deploy_started_at = deploy_started_at
|
9
11
|
@containers = definition.fetch("spec", {}).fetch("containers", []).map { |c| Container.new(c) }
|
10
12
|
unless @containers.present?
|
11
13
|
logger.summary.add_paragraph("Rendered template content:\n#{definition.to_yaml}")
|
@@ -19,7 +21,7 @@ module KubernetesDeploy
|
|
19
21
|
if pod_data.blank?
|
20
22
|
raw_json, _err, st = kubectl.run("get", type, @name, "-a", "--output=json")
|
21
23
|
pod_data = JSON.parse(raw_json) if st.success?
|
22
|
-
raise_predates_deploy_error if pod_data.present? && unmanaged? &&
|
24
|
+
raise_predates_deploy_error if pod_data.present? && unmanaged? && !deploy_started?
|
23
25
|
end
|
24
26
|
|
25
27
|
if pod_data.present?
|
@@ -46,8 +48,7 @@ module KubernetesDeploy
|
|
46
48
|
end
|
47
49
|
|
48
50
|
def deploy_failed?
|
49
|
-
|
50
|
-
@containers.any?(&:doomed?)
|
51
|
+
failure_message.present?
|
51
52
|
end
|
52
53
|
|
53
54
|
def exists?
|
@@ -62,19 +63,23 @@ module KubernetesDeploy
|
|
62
63
|
end
|
63
64
|
|
64
65
|
def failure_message
|
65
|
-
|
66
|
-
|
67
|
-
container_messages = doomed_containers.map do |c|
|
68
|
-
red_name = ColorizedString.new(c.name).red
|
69
|
-
"> #{red_name}: #{c.doom_reason}"
|
66
|
+
if @phase == FAILED_PHASE_NAME
|
67
|
+
phase_problem = "Pod status: #{@status}. "
|
70
68
|
end
|
71
69
|
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
70
|
+
doomed_containers = @containers.select(&:doomed?)
|
71
|
+
if doomed_containers.present?
|
72
|
+
container_problems = if unmanaged?
|
73
|
+
"The following containers encountered errors:\n"
|
74
|
+
else
|
75
|
+
"The following containers are in a state that is unlikely to be recoverable:\n"
|
76
|
+
end
|
77
|
+
doomed_containers.each do |c|
|
78
|
+
red_name = ColorizedString.new(c.name).red
|
79
|
+
container_problems += "> #{red_name}: #{c.doom_reason}\n"
|
80
|
+
end
|
76
81
|
end
|
77
|
-
|
82
|
+
"#{phase_problem}#{container_problems}".presence
|
78
83
|
end
|
79
84
|
|
80
85
|
# Returns a hash in the following format:
|
@@ -89,7 +94,7 @@ module KubernetesDeploy
|
|
89
94
|
"logs",
|
90
95
|
@name,
|
91
96
|
"--container=#{container.name}",
|
92
|
-
"--since-time=#{@
|
97
|
+
"--since-time=#{@deploy_started_at.to_datetime.rfc3339}",
|
93
98
|
]
|
94
99
|
cmd << "--tail=#{LOG_LINE_COUNT}" unless unmanaged?
|
95
100
|
out, _err, _st = kubectl.run(*cmd)
|
@@ -23,7 +23,7 @@ module KubernetesDeploy
|
|
23
23
|
"logs",
|
24
24
|
id,
|
25
25
|
"--container=#{container_name}",
|
26
|
-
"--since-time=#{@
|
26
|
+
"--since-time=#{@deploy_started_at.to_datetime.rfc3339}",
|
27
27
|
"--tail=#{LOG_LINE_COUNT}"
|
28
28
|
)
|
29
29
|
container_logs[container_name] = out.split("\n")
|
@@ -59,8 +59,8 @@ module KubernetesDeploy
|
|
59
59
|
context: context,
|
60
60
|
definition: pod_data,
|
61
61
|
logger: @logger,
|
62
|
-
parent: "#{name.capitalize} #{
|
63
|
-
|
62
|
+
parent: "#{name.capitalize} #{type}",
|
63
|
+
deploy_started_at: @deploy_started_at
|
64
64
|
)
|
65
65
|
pod.sync(pod_data)
|
66
66
|
relevant_pods << pod
|
@@ -5,10 +5,11 @@ module KubernetesDeploy
|
|
5
5
|
TIMEOUT = 5.minutes
|
6
6
|
attr_reader :desired_replicas, :pods
|
7
7
|
|
8
|
-
def initialize(namespace:, context:, definition:, logger:, parent: nil,
|
8
|
+
def initialize(namespace:, context:, definition:, logger:, parent: nil, deploy_started_at: nil)
|
9
9
|
@parent = parent
|
10
|
-
@
|
10
|
+
@deploy_started_at = deploy_started_at
|
11
11
|
@rollout_data = { "replicas" => 0 }
|
12
|
+
@desired_replicas = -1
|
12
13
|
@pods = []
|
13
14
|
super(namespace: namespace, context: context, definition: definition, logger: logger)
|
14
15
|
end
|
@@ -22,8 +23,9 @@ module KubernetesDeploy
|
|
22
23
|
if rs_data.present?
|
23
24
|
@found = true
|
24
25
|
@desired_replicas = rs_data["spec"]["replicas"].to_i
|
25
|
-
@rollout_data = { "replicas" => 0 }.merge(
|
26
|
-
.slice("replicas", "availableReplicas", "readyReplicas")
|
26
|
+
@rollout_data = { "replicas" => 0 }.merge(
|
27
|
+
rs_data["status"].slice("replicas", "availableReplicas", "readyReplicas")
|
28
|
+
)
|
27
29
|
@status = @rollout_data.map { |state_replicas, num| "#{num} #{state_replicas.chop.pluralize(num)}" }.join(", ")
|
28
30
|
@pods = find_pods(rs_data)
|
29
31
|
else # reset
|
@@ -31,6 +33,7 @@ module KubernetesDeploy
|
|
31
33
|
@rollout_data = { "replicas" => 0 }
|
32
34
|
@status = nil
|
33
35
|
@pods = []
|
36
|
+
@desired_replicas = -1
|
34
37
|
end
|
35
38
|
end
|
36
39
|
|
@@ -7,7 +7,7 @@ module KubernetesDeploy
|
|
7
7
|
end
|
8
8
|
|
9
9
|
def deploy_succeeded?
|
10
|
-
return false unless
|
10
|
+
return false unless deploy_started?
|
11
11
|
|
12
12
|
unless @success_assumption_warning_shown
|
13
13
|
@logger.warn("Don't know how to monitor resources of type #{type}. Assuming #{id} deployed successfully.")
|
@@ -7,7 +7,7 @@ module KubernetesDeploy
|
|
7
7
|
end
|
8
8
|
|
9
9
|
def deploy_succeeded?
|
10
|
-
return false unless
|
10
|
+
return false unless deploy_started?
|
11
11
|
|
12
12
|
unless @success_assumption_warning_shown
|
13
13
|
@logger.warn("Don't know how to monitor resources of type #{type}. Assuming #{id} deployed successfully.")
|
@@ -24,11 +24,12 @@ module KubernetesDeploy
|
|
24
24
|
delay_sync_until = Time.now.utc + delay_sync # don't pummel the API if the sync is fast
|
25
25
|
|
26
26
|
KubernetesDeploy::Concurrency.split_across_threads(remainder, &:sync)
|
27
|
-
|
27
|
+
new_successes, remainder = remainder.partition(&:deploy_succeeded?)
|
28
|
+
new_failures, remainder = remainder.partition(&:deploy_failed?)
|
29
|
+
new_timeouts, remainder = remainder.partition(&:deploy_timed_out?)
|
28
30
|
|
29
|
-
if
|
30
|
-
|
31
|
-
report_what_just_happened(newly_finished_resources, watch_time)
|
31
|
+
if new_successes.present? || new_failures.present? || new_timeouts.present?
|
32
|
+
report_what_just_happened(new_successes, new_failures, new_timeouts)
|
32
33
|
report_what_is_left(remainder, reminder: false)
|
33
34
|
last_message_logged_at = Time.now.utc
|
34
35
|
elsif due_for_reminder?(last_message_logged_at, reminder_interval)
|
@@ -41,19 +42,20 @@ module KubernetesDeploy
|
|
41
42
|
|
42
43
|
private
|
43
44
|
|
44
|
-
def report_what_just_happened(
|
45
|
-
|
46
|
-
|
47
|
-
new_successes, new_failures = resources.partition(&:deploy_succeeded?)
|
45
|
+
def report_what_just_happened(new_successes, new_failures, new_timeouts)
|
46
|
+
watch_time = (Time.now.utc - @deploy_started_at).round(1)
|
48
47
|
new_failures.each do |resource|
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
48
|
+
resource.report_status_to_statsd(watch_time)
|
49
|
+
@logger.error("#{resource.id} failed to #{@operation_name} after #{watch_time}s")
|
50
|
+
end
|
51
|
+
|
52
|
+
new_timeouts.each do |resource|
|
53
|
+
resource.report_status_to_statsd(watch_time)
|
54
|
+
@logger.error("#{resource.id} rollout timed out after #{watch_time}s")
|
54
55
|
end
|
55
56
|
|
56
57
|
if new_successes.present?
|
58
|
+
new_successes.each { |r| r.report_status_to_statsd(watch_time) }
|
57
59
|
success_string = ColorizedString.new("Successfully #{@operation_name}ed in #{watch_time}s:").green
|
58
60
|
@logger.info("#{success_string} #{new_successes.map(&:id).join(', ')}")
|
59
61
|
end
|
@@ -80,7 +80,7 @@ module KubernetesDeploy
|
|
80
80
|
kubeclient_resources.map do |d|
|
81
81
|
definition = d.to_h.deep_stringify_keys
|
82
82
|
r = Deployment.new(namespace: @namespace, context: @context, definition: definition, logger: @logger)
|
83
|
-
r.
|
83
|
+
r.deploy_started_at = started # we don't care what happened to the resource before the restart cmd ran
|
84
84
|
r
|
85
85
|
end
|
86
86
|
end
|
data/shipit.yml
ADDED
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: kubernetes-deploy
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.14.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Katrina Verey
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: exe
|
11
11
|
cert_chain: []
|
12
|
-
date: 2017-11-
|
12
|
+
date: 2017-11-30 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: activesupport
|
@@ -203,6 +203,7 @@ executables:
|
|
203
203
|
extensions: []
|
204
204
|
extra_rdoc_files: []
|
205
205
|
files:
|
206
|
+
- ".buildkite/pipeline.nightly.yml"
|
206
207
|
- ".buildkite/pipeline.yml"
|
207
208
|
- ".gitignore"
|
208
209
|
- ".rubocop.yml"
|
@@ -224,6 +225,7 @@ files:
|
|
224
225
|
- lib/kubernetes-deploy.rb
|
225
226
|
- lib/kubernetes-deploy/concurrency.rb
|
226
227
|
- lib/kubernetes-deploy/deferred_summary_logging.rb
|
228
|
+
- lib/kubernetes-deploy/deploy_task.rb
|
227
229
|
- lib/kubernetes-deploy/ejson_secret_provisioner.rb
|
228
230
|
- lib/kubernetes-deploy/errors.rb
|
229
231
|
- lib/kubernetes-deploy/formatted_logger.rb
|
@@ -255,7 +257,6 @@ files:
|
|
255
257
|
- lib/kubernetes-deploy/kubernetes_resource/topic.rb
|
256
258
|
- lib/kubernetes-deploy/resource_watcher.rb
|
257
259
|
- lib/kubernetes-deploy/restart_task.rb
|
258
|
-
- lib/kubernetes-deploy/runner.rb
|
259
260
|
- lib/kubernetes-deploy/runner_task.rb
|
260
261
|
- lib/kubernetes-deploy/statsd.rb
|
261
262
|
- lib/kubernetes-deploy/version.rb
|
@@ -264,6 +265,7 @@ files:
|
|
264
265
|
- screenshots/missing-secret-fail.png
|
265
266
|
- screenshots/success.png
|
266
267
|
- screenshots/test-output.png
|
268
|
+
- shipit.yml
|
267
269
|
homepage: https://github.com/Shopify/kubernetes-deploy
|
268
270
|
licenses:
|
269
271
|
- MIT
|