kubernetes-deploy 0.7.8 → 0.7.9
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/kubernetes-deploy/kubernetes_resource.rb +38 -26
- data/lib/kubernetes-deploy/kubernetes_resource/deployment.rb +51 -47
- data/lib/kubernetes-deploy/kubernetes_resource/pod.rb +51 -48
- data/lib/kubernetes-deploy/kubernetes_resource/replica_set.rb +105 -0
- data/lib/kubernetes-deploy/kubernetes_resource/service.rb +23 -1
- data/lib/kubernetes-deploy/restart_task.rb +2 -1
- data/lib/kubernetes-deploy/runner.rb +34 -53
- data/lib/kubernetes-deploy/version.rb +1 -1
- metadata +3 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 13dbeb21aaaa5ab8c43c1f7fb6737ad1a9c2f735
|
4
|
+
data.tar.gz: 817ac9680c47685d2ecc9257952830baab8ee4ef
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a613afd9f8697c843223f20e70b7c22ec174af2d1f0394e6978d3644900b9713f6d6480c7897074c9cf77d594cdd37ee8dfa5ef55f05d240c69e0e226b61a709
|
7
|
+
data.tar.gz: 8247c8ca0ac3fdc7ef2a867e0bb2207741fc86894ad5b7ae80e12f71c29bbecc898be64a94175ca4e57a5f3344ad03976eb36d34f5ee284b52598a8caef81b54
|
@@ -22,27 +22,15 @@ module KubernetesDeploy
|
|
22
22
|
If you have reason to believe it will succeed, retry the deploy to continue to monitor the rollout.
|
23
23
|
MSG
|
24
24
|
|
25
|
-
def self.
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
when 'pod' then Pod
|
31
|
-
when 'redis' then Redis
|
32
|
-
when 'bugsnag' then Bugsnag
|
33
|
-
when 'ingress' then Ingress
|
34
|
-
when 'persistentvolumeclaim' then PersistentVolumeClaim
|
35
|
-
when 'service' then Service
|
36
|
-
when 'podtemplate' then PodTemplate
|
37
|
-
when 'poddisruptionbudget' then PodDisruptionBudget
|
38
|
-
end
|
39
|
-
|
40
|
-
opts = { name: name, namespace: namespace, context: context, file: file, logger: logger }
|
41
|
-
if subclass
|
42
|
-
subclass.new(**opts)
|
25
|
+
def self.build(namespace:, context:, definition:, logger:)
|
26
|
+
opts = { namespace: namespace, context: context, definition: definition, logger: logger }
|
27
|
+
if KubernetesDeploy.const_defined?(definition["kind"])
|
28
|
+
klass = KubernetesDeploy.const_get(definition["kind"])
|
29
|
+
klass.new(**opts)
|
43
30
|
else
|
44
31
|
inst = new(**opts)
|
45
|
-
inst.
|
32
|
+
inst.type = definition["kind"]
|
33
|
+
inst
|
46
34
|
end
|
47
35
|
end
|
48
36
|
|
@@ -54,19 +42,28 @@ module KubernetesDeploy
|
|
54
42
|
self.class.timeout
|
55
43
|
end
|
56
44
|
|
57
|
-
def initialize(
|
45
|
+
def initialize(namespace:, context:, definition:, logger:)
|
58
46
|
# subclasses must also set these if they define their own initializer
|
59
|
-
@name = name
|
47
|
+
@name = definition.fetch("metadata", {})["name"]
|
48
|
+
unless @name.present?
|
49
|
+
logger.summary.add_paragraph("Rendered template content:\n#{definition.to_yaml}")
|
50
|
+
raise FatalDeploymentError, "Template is missing required field metadata.name"
|
51
|
+
end
|
52
|
+
|
60
53
|
@namespace = namespace
|
61
54
|
@context = context
|
62
|
-
@file = file
|
63
55
|
@logger = logger
|
56
|
+
@definition = definition
|
64
57
|
end
|
65
58
|
|
66
59
|
def id
|
67
60
|
"#{type}/#{name}"
|
68
61
|
end
|
69
62
|
|
63
|
+
def file_path
|
64
|
+
file.path
|
65
|
+
end
|
66
|
+
|
70
67
|
def sync
|
71
68
|
end
|
72
69
|
|
@@ -139,10 +136,11 @@ module KubernetesDeploy
|
|
139
136
|
if container_logs.blank? || container_logs.values.all?(&:blank?)
|
140
137
|
helpful_info << " - Logs: #{DEBUG_RESOURCE_NOT_FOUND_MESSAGE}"
|
141
138
|
else
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
139
|
+
sorted_logs = container_logs.sort_by { |_, log_lines| log_lines.length }
|
140
|
+
sorted_logs.each do |identifier, log_lines|
|
141
|
+
helpful_info << " - Logs from container '#{identifier}' (last #{LOG_LINE_COUNT} lines shown):"
|
142
|
+
log_lines.each do |line|
|
143
|
+
helpful_info << " #{line}"
|
146
144
|
end
|
147
145
|
end
|
148
146
|
end
|
@@ -241,5 +239,19 @@ module KubernetesDeploy
|
|
241
239
|
"#{@reason}: #{@message} (#{@count} events)"
|
242
240
|
end
|
243
241
|
end
|
242
|
+
|
243
|
+
private
|
244
|
+
|
245
|
+
def file
|
246
|
+
@file ||= create_definition_tempfile
|
247
|
+
end
|
248
|
+
|
249
|
+
def create_definition_tempfile
|
250
|
+
file = Tempfile.new(["#{type}-#{name}", ".yml"])
|
251
|
+
file.write(YAML.dump(@definition))
|
252
|
+
file
|
253
|
+
ensure
|
254
|
+
file.close if file
|
255
|
+
end
|
244
256
|
end
|
245
257
|
end
|
@@ -4,80 +4,84 @@ module KubernetesDeploy
|
|
4
4
|
TIMEOUT = 5.minutes
|
5
5
|
|
6
6
|
def sync
|
7
|
-
|
7
|
+
raw_json, _err, st = kubectl.run("get", type, @name, "--output=json")
|
8
8
|
@found = st.success?
|
9
|
-
@rollout_data = {}
|
10
|
-
@status = nil
|
11
|
-
@representative_pod = nil
|
12
|
-
@pods = []
|
13
9
|
|
14
10
|
if @found
|
15
|
-
|
16
|
-
|
17
|
-
@
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
pod = Pod.new(
|
25
|
-
name: pod_name,
|
26
|
-
namespace: namespace,
|
27
|
-
context: context,
|
28
|
-
file: nil,
|
29
|
-
parent: "#{@name.capitalize} deployment",
|
30
|
-
logger: @logger
|
31
|
-
)
|
32
|
-
pod.deploy_started = @deploy_started
|
33
|
-
pod.interpret_json_data(pod_json)
|
34
|
-
|
35
|
-
if !@representative_pod && pod_probably_new?(pod_json)
|
36
|
-
@representative_pod = pod
|
37
|
-
end
|
38
|
-
@pods << pod
|
39
|
-
end
|
40
|
-
end
|
11
|
+
deployment_data = JSON.parse(raw_json)
|
12
|
+
@latest_rs = find_latest_rs(deployment_data)
|
13
|
+
@rollout_data = { "replicas" => 0 }.merge(deployment_data["status"]
|
14
|
+
.slice("replicas", "updatedReplicas", "availableReplicas", "unavailableReplicas"))
|
15
|
+
@status = @rollout_data.map { |state_replicas, num| "#{num} #{state_replicas.chop.pluralize(num)}" }.join(", ")
|
16
|
+
else # reset
|
17
|
+
@latest_rs = nil
|
18
|
+
@rollout_data = { "replicas" => 0 }
|
19
|
+
@status = nil
|
41
20
|
end
|
42
21
|
end
|
43
22
|
|
44
|
-
def fetch_logs
|
45
|
-
@representative_pod ? @representative_pod.fetch_logs : {}
|
46
|
-
end
|
47
|
-
|
48
23
|
def fetch_events
|
49
24
|
own_events = super
|
50
|
-
return own_events unless @
|
51
|
-
own_events.merge(@
|
25
|
+
return own_events unless @latest_rs.present?
|
26
|
+
own_events.merge(@latest_rs.fetch_events)
|
27
|
+
end
|
28
|
+
|
29
|
+
def fetch_logs
|
30
|
+
return {} unless @latest_rs.present?
|
31
|
+
@latest_rs.fetch_logs
|
52
32
|
end
|
53
33
|
|
54
34
|
def deploy_succeeded?
|
55
|
-
return false unless @
|
56
|
-
|
35
|
+
return false unless @latest_rs
|
36
|
+
|
37
|
+
@latest_rs.deploy_succeeded? &&
|
38
|
+
@latest_rs.desired_replicas == desired_replicas && # latest RS fully scaled up
|
57
39
|
@rollout_data["updatedReplicas"].to_i == @rollout_data["replicas"].to_i &&
|
58
40
|
@rollout_data["updatedReplicas"].to_i == @rollout_data["availableReplicas"].to_i
|
59
41
|
end
|
60
42
|
|
61
43
|
def deploy_failed?
|
62
|
-
|
63
|
-
@pods.present? && @pods.all?(&:deploy_failed?)
|
44
|
+
@latest_rs && @latest_rs.deploy_failed?
|
64
45
|
end
|
65
46
|
|
66
47
|
def deploy_timed_out?
|
67
|
-
|
68
|
-
super || @pods.present? && @pods.all?(&:deploy_timed_out?)
|
48
|
+
super || @latest_rs && @latest_rs.deploy_timed_out?
|
69
49
|
end
|
70
50
|
|
71
51
|
def exists?
|
72
52
|
@found
|
73
53
|
end
|
74
54
|
|
55
|
+
def desired_replicas
|
56
|
+
@definition["spec"]["replicas"].to_i
|
57
|
+
end
|
58
|
+
|
75
59
|
private
|
76
60
|
|
77
|
-
def
|
78
|
-
|
79
|
-
|
80
|
-
|
61
|
+
def find_latest_rs(deployment_data)
|
62
|
+
label_string = deployment_data["spec"]["selector"]["matchLabels"].map { |k, v| "#{k}=#{v}" }.join(",")
|
63
|
+
raw_json, _err, st = kubectl.run("get", "replicasets", "--output=json", "--selector=#{label_string}")
|
64
|
+
return unless st.success?
|
65
|
+
|
66
|
+
all_rs_data = JSON.parse(raw_json)["items"]
|
67
|
+
current_revision = deployment_data["metadata"]["annotations"]["deployment.kubernetes.io/revision"]
|
68
|
+
|
69
|
+
latest_rs_data = all_rs_data.find do |rs|
|
70
|
+
rs["metadata"]["ownerReferences"].any? { |ref| ref["uid"] == deployment_data["metadata"]["uid"] } &&
|
71
|
+
rs["metadata"]["annotations"]["deployment.kubernetes.io/revision"] == current_revision
|
72
|
+
end
|
73
|
+
return unless latest_rs_data.present?
|
74
|
+
|
75
|
+
rs = ReplicaSet.new(
|
76
|
+
namespace: namespace,
|
77
|
+
context: context,
|
78
|
+
definition: latest_rs_data,
|
79
|
+
logger: @logger,
|
80
|
+
parent: "#{@name.capitalize} deployment",
|
81
|
+
deploy_started: @deploy_started
|
82
|
+
)
|
83
|
+
rs.sync(latest_rs_data)
|
84
|
+
rs
|
81
85
|
end
|
82
86
|
end
|
83
87
|
end
|
@@ -2,52 +2,37 @@
|
|
2
2
|
module KubernetesDeploy
|
3
3
|
class Pod < KubernetesResource
|
4
4
|
TIMEOUT = 10.minutes
|
5
|
-
SUSPICIOUS_CONTAINER_STATES = %w(ImagePullBackOff RunContainerError ErrImagePull).freeze
|
5
|
+
SUSPICIOUS_CONTAINER_STATES = %w(ImagePullBackOff RunContainerError ErrImagePull CrashLoopBackOff).freeze
|
6
6
|
|
7
|
-
def initialize(
|
8
|
-
@name = name
|
9
|
-
@namespace = namespace
|
10
|
-
@context = context
|
11
|
-
@file = file
|
7
|
+
def initialize(namespace:, context:, definition:, logger:, parent: nil, deploy_started: nil)
|
12
8
|
@parent = parent
|
13
|
-
@
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
if @found = st.success?
|
19
|
-
pod_data = JSON.parse(out)
|
20
|
-
interpret_json_data(pod_data)
|
21
|
-
else # reset
|
22
|
-
@status = @phase = nil
|
23
|
-
@ready = false
|
24
|
-
@containers = []
|
9
|
+
@deploy_started = deploy_started
|
10
|
+
@containers = definition.fetch("spec", {}).fetch("containers", {}).map { |c| c["name"] }
|
11
|
+
unless @containers.present?
|
12
|
+
logger.summary.add_paragraph("Rendered template content:\n#{definition.to_yaml}")
|
13
|
+
raise FatalDeploymentError, "Template is missing required field spec.containers"
|
25
14
|
end
|
26
|
-
|
15
|
+
super(namespace: namespace, context: context, definition: definition, logger: logger)
|
27
16
|
end
|
28
17
|
|
29
|
-
def
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
if @deploy_started && pod_data["status"]["containerStatuses"]
|
34
|
-
pod_data["status"]["containerStatuses"].each do |status|
|
35
|
-
waiting_state = status["state"]["waiting"] if status["state"]
|
36
|
-
reason = waiting_state["reason"] if waiting_state
|
37
|
-
next unless SUSPICIOUS_CONTAINER_STATES.include?(reason)
|
38
|
-
@logger.warn("#{id} has container in state #{reason} (#{waiting_state['message']})")
|
39
|
-
end
|
18
|
+
def sync(pod_data = nil)
|
19
|
+
if pod_data.blank?
|
20
|
+
raw_json, _err, st = kubectl.run("get", type, @name, "-a", "--output=json")
|
21
|
+
pod_data = JSON.parse(raw_json) if st.success?
|
40
22
|
end
|
41
23
|
|
42
|
-
if
|
43
|
-
@
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
@
|
24
|
+
if pod_data.present?
|
25
|
+
@found = true
|
26
|
+
interpret_pod_status_data(pod_data["status"], pod_data["metadata"]) # sets @phase, @status and @ready
|
27
|
+
if @deploy_started
|
28
|
+
log_suspicious_states(pod_data["status"].fetch("containerStatuses", []))
|
29
|
+
end
|
30
|
+
else # reset
|
31
|
+
@found = false
|
32
|
+
@phase = @status = nil
|
33
|
+
@ready = false
|
50
34
|
end
|
35
|
+
display_logs if unmanaged? && deploy_succeeded?
|
51
36
|
end
|
52
37
|
|
53
38
|
def deploy_succeeded?
|
@@ -63,17 +48,16 @@ module KubernetesDeploy
|
|
63
48
|
end
|
64
49
|
|
65
50
|
def exists?
|
66
|
-
|
51
|
+
@found
|
67
52
|
end
|
68
53
|
|
69
54
|
# Returns a hash in the following format:
|
70
55
|
# {
|
71
|
-
# "
|
72
|
-
# "
|
56
|
+
# "app" => ["array of log lines", "received from app container"],
|
57
|
+
# "nginx" => ["array of log lines", "received from nginx container"]
|
73
58
|
# }
|
74
59
|
def fetch_logs
|
75
60
|
return {} unless exists? && @containers.present?
|
76
|
-
|
77
61
|
@containers.each_with_object({}) do |container_name, container_logs|
|
78
62
|
cmd = [
|
79
63
|
"logs",
|
@@ -83,12 +67,33 @@ module KubernetesDeploy
|
|
83
67
|
]
|
84
68
|
cmd << "--tail=#{LOG_LINE_COUNT}" unless unmanaged?
|
85
69
|
out, _err, _st = kubectl.run(*cmd)
|
86
|
-
container_logs[
|
70
|
+
container_logs[container_name] = out.split("\n")
|
87
71
|
end
|
88
72
|
end
|
89
73
|
|
90
74
|
private
|
91
75
|
|
76
|
+
def interpret_pod_status_data(status_data, metadata)
|
77
|
+
@status = @phase = (metadata["deletionTimestamp"] ? "Terminating" : status_data["phase"])
|
78
|
+
|
79
|
+
if @phase == "Failed" && status_data['reason'].present?
|
80
|
+
@status += " (Reason: #{status_data['reason']})"
|
81
|
+
elsif @phase != "Terminating"
|
82
|
+
ready_condition = status_data.fetch("conditions", []).find { |condition| condition["type"] == "Ready" }
|
83
|
+
@ready = ready_condition.present? && (ready_condition["status"] == "True")
|
84
|
+
@status += " (Ready: #{@ready})"
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
def log_suspicious_states(container_statuses)
|
89
|
+
container_statuses.each do |status|
|
90
|
+
waiting_state = status["state"]["waiting"] if status["state"]
|
91
|
+
reason = waiting_state["reason"] if waiting_state
|
92
|
+
next unless SUSPICIOUS_CONTAINER_STATES.include?(reason)
|
93
|
+
@logger.warn("#{id} has container in state #{reason} (#{waiting_state['message']})")
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
92
97
|
def unmanaged?
|
93
98
|
@parent.blank?
|
94
99
|
end
|
@@ -104,13 +109,11 @@ module KubernetesDeploy
|
|
104
109
|
|
105
110
|
container_logs.each do |container_identifier, logs|
|
106
111
|
if logs.blank?
|
107
|
-
@logger.warn("No logs found for #{container_identifier}")
|
112
|
+
@logger.warn("No logs found for container '#{container_identifier}'")
|
108
113
|
else
|
109
114
|
@logger.blank_line
|
110
|
-
@logger.info("Logs from #{container_identifier}:")
|
111
|
-
logs.
|
112
|
-
@logger.info("[#{container_identifier}]\t#{line}")
|
113
|
-
end
|
115
|
+
@logger.info("Logs from #{id} container '#{container_identifier}':")
|
116
|
+
logs.each { |line| @logger.info("\t#{line}") }
|
114
117
|
@logger.blank_line
|
115
118
|
end
|
116
119
|
end
|
@@ -0,0 +1,105 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module KubernetesDeploy
|
3
|
+
class ReplicaSet < KubernetesResource
|
4
|
+
TIMEOUT = 5.minutes
|
5
|
+
|
6
|
+
def initialize(namespace:, context:, definition:, logger:, parent: nil, deploy_started: nil)
|
7
|
+
@parent = parent
|
8
|
+
@deploy_started = deploy_started
|
9
|
+
@rollout_data = { "replicas" => 0 }
|
10
|
+
@pods = []
|
11
|
+
super(namespace: namespace, context: context, definition: definition, logger: logger)
|
12
|
+
end
|
13
|
+
|
14
|
+
def sync(rs_data = nil)
|
15
|
+
if rs_data.blank?
|
16
|
+
raw_json, _err, st = kubectl.run("get", type, @name, "--output=json")
|
17
|
+
rs_data = JSON.parse(raw_json) if st.success?
|
18
|
+
end
|
19
|
+
|
20
|
+
if rs_data.present?
|
21
|
+
@found = true
|
22
|
+
@rollout_data = { "replicas" => 0 }.merge(rs_data["status"]
|
23
|
+
.slice("replicas", "availableReplicas", "readyReplicas"))
|
24
|
+
@status = @rollout_data.map { |state_replicas, num| "#{num} #{state_replicas.chop.pluralize(num)}" }.join(", ")
|
25
|
+
@pods = find_pods(rs_data)
|
26
|
+
else # reset
|
27
|
+
@found = false
|
28
|
+
@rollout_data = { "replicas" => 0 }
|
29
|
+
@status = nil
|
30
|
+
@pods = []
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def deploy_succeeded?
|
35
|
+
@rollout_data["replicas"].to_i == @rollout_data["availableReplicas"].to_i &&
|
36
|
+
@rollout_data["replicas"].to_i == @rollout_data["readyReplicas"].to_i
|
37
|
+
end
|
38
|
+
|
39
|
+
def deploy_failed?
|
40
|
+
@pods.present? && @pods.all?(&:deploy_failed?)
|
41
|
+
end
|
42
|
+
|
43
|
+
def deploy_timed_out?
|
44
|
+
super || @pods.present? && @pods.all?(&:deploy_timed_out?)
|
45
|
+
end
|
46
|
+
|
47
|
+
def exists?
|
48
|
+
@found
|
49
|
+
end
|
50
|
+
|
51
|
+
def desired_replicas
|
52
|
+
@definition["spec"]["replicas"].to_i
|
53
|
+
end
|
54
|
+
|
55
|
+
def fetch_events
|
56
|
+
own_events = super
|
57
|
+
return own_events unless @pods.present?
|
58
|
+
own_events.merge(@pods.first.fetch_events)
|
59
|
+
end
|
60
|
+
|
61
|
+
def fetch_logs
|
62
|
+
container_names.each_with_object({}) do |container_name, container_logs|
|
63
|
+
out, _err, _st = kubectl.run(
|
64
|
+
"logs",
|
65
|
+
id,
|
66
|
+
"--container=#{container_name}",
|
67
|
+
"--since-time=#{@deploy_started.to_datetime.rfc3339}",
|
68
|
+
"--tail=#{LOG_LINE_COUNT}"
|
69
|
+
)
|
70
|
+
container_logs[container_name] = out.split("\n")
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
private
|
75
|
+
|
76
|
+
def unmanaged?
|
77
|
+
@parent.blank?
|
78
|
+
end
|
79
|
+
|
80
|
+
def container_names
|
81
|
+
@definition["spec"]["template"]["spec"]["containers"].map { |c| c["name"] }
|
82
|
+
end
|
83
|
+
|
84
|
+
def find_pods(rs_data)
|
85
|
+
label_string = rs_data["spec"]["selector"]["matchLabels"].map { |k, v| "#{k}=#{v}" }.join(",")
|
86
|
+
raw_json, _err, st = kubectl.run("get", "pods", "-a", "--output=json", "--selector=#{label_string}")
|
87
|
+
return [] unless st.success?
|
88
|
+
|
89
|
+
all_pods = JSON.parse(raw_json)["items"]
|
90
|
+
all_pods.each_with_object([]) do |pod_data, relevant_pods|
|
91
|
+
next unless pod_data["metadata"]["ownerReferences"].any? { |ref| ref["uid"] == rs_data["metadata"]["uid"] }
|
92
|
+
pod = Pod.new(
|
93
|
+
namespace: namespace,
|
94
|
+
context: context,
|
95
|
+
definition: pod_data,
|
96
|
+
logger: @logger,
|
97
|
+
parent: "#{@name.capitalize} replica set",
|
98
|
+
deploy_started: @deploy_started
|
99
|
+
)
|
100
|
+
pod.sync(pod_data)
|
101
|
+
relevant_pods << pod
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
105
|
+
end
|
@@ -16,7 +16,11 @@ module KubernetesDeploy
|
|
16
16
|
end
|
17
17
|
|
18
18
|
def deploy_succeeded?
|
19
|
-
|
19
|
+
if exposes_zero_replica_deployment?
|
20
|
+
@num_endpoints == 0
|
21
|
+
else
|
22
|
+
@num_endpoints > 0
|
23
|
+
end
|
20
24
|
end
|
21
25
|
|
22
26
|
def deploy_failed?
|
@@ -33,5 +37,23 @@ module KubernetesDeploy
|
|
33
37
|
def exists?
|
34
38
|
@found
|
35
39
|
end
|
40
|
+
|
41
|
+
private
|
42
|
+
|
43
|
+
def exposes_zero_replica_deployment?
|
44
|
+
related_deployment_replicas && related_deployment_replicas == 0
|
45
|
+
end
|
46
|
+
|
47
|
+
def related_deployment_replicas
|
48
|
+
@related_deployment_replicas ||= begin
|
49
|
+
selector = @definition["spec"]["selector"].map { |k, v| "#{k}=#{v}" }.join(",")
|
50
|
+
raw_json, _err, st = kubectl.run("get", "deployments", "--selector=#{selector}", "--output=json")
|
51
|
+
return unless st.success?
|
52
|
+
|
53
|
+
deployments = JSON.parse(raw_json)["items"]
|
54
|
+
return unless deployments.length == 1
|
55
|
+
deployments.first["spec"]["replicas"].to_i
|
56
|
+
end
|
57
|
+
end
|
36
58
|
end
|
37
59
|
end
|
@@ -70,7 +70,8 @@ module KubernetesDeploy
|
|
70
70
|
|
71
71
|
def wait_for_rollout(kubeclient_resources)
|
72
72
|
resources = kubeclient_resources.map do |d|
|
73
|
-
|
73
|
+
definition = d.to_h.deep_stringify_keys
|
74
|
+
Deployment.new(namespace: @namespace, context: @context, definition: definition, logger: @logger)
|
74
75
|
end
|
75
76
|
watcher = ResourceWatcher.new(resources, logger: @logger)
|
76
77
|
watcher.run
|
@@ -18,6 +18,7 @@ require 'kubernetes-deploy/kubernetes_resource'
|
|
18
18
|
pod_template
|
19
19
|
bugsnag
|
20
20
|
pod_disruption_budget
|
21
|
+
replica_set
|
21
22
|
).each do |subresource|
|
22
23
|
require "kubernetes-deploy/kubernetes_resource/#{subresource}"
|
23
24
|
end
|
@@ -181,14 +182,13 @@ module KubernetesDeploy
|
|
181
182
|
def find_bad_file_from_kubectl_output(stderr)
|
182
183
|
# Output example:
|
183
184
|
# Error from server (BadRequest): error when creating "/path/to/configmap-gqq5oh.yml20170411-33615-t0t3m":
|
184
|
-
match = stderr.match(%r{BadRequest.*"(?<path>\/\S+\.ya?ml\S
|
185
|
+
match = stderr.match(%r{BadRequest.*"(?<path>\/\S+\.ya?ml\S*)"})
|
185
186
|
return unless match
|
186
187
|
|
187
188
|
path = match[:path]
|
188
189
|
if path.present? && File.file?(path)
|
189
|
-
|
190
|
+
File.read(path)
|
190
191
|
end
|
191
|
-
[File.basename(path, ".*"), suspicious_file]
|
192
192
|
end
|
193
193
|
|
194
194
|
def deploy_has_priority_resources?(resources)
|
@@ -217,48 +217,38 @@ module KubernetesDeploy
|
|
217
217
|
Dir.foreach(@template_dir) do |filename|
|
218
218
|
next unless filename.end_with?(".yml.erb", ".yml", ".yaml", ".yaml.erb")
|
219
219
|
|
220
|
-
split_templates(filename) do |
|
221
|
-
|
222
|
-
|
223
|
-
resources <<
|
224
|
-
|
225
|
-
@logger.info " - #{resource_id}"
|
220
|
+
split_templates(filename) do |r_def|
|
221
|
+
r = KubernetesResource.build(namespace: @namespace, context: @context, logger: @logger, definition: r_def)
|
222
|
+
validate_template_via_dry_run(r.file_path, filename)
|
223
|
+
resources << r
|
224
|
+
@logger.info " - #{r.id}"
|
226
225
|
end
|
227
226
|
end
|
228
227
|
resources
|
229
228
|
end
|
230
229
|
|
231
|
-
def
|
232
|
-
command = ["create", "-f",
|
233
|
-
|
230
|
+
def validate_template_via_dry_run(file_path, original_filename)
|
231
|
+
command = ["create", "-f", file_path, "--dry-run", "--output=name"]
|
232
|
+
_, err, st = kubectl.run(*command, log_failure: false)
|
233
|
+
return if st.success?
|
234
234
|
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
DEBUG_MSG
|
244
|
-
debug_msg += File.read(tempfile.path)
|
245
|
-
@logger.summary.add_paragraph(debug_msg)
|
235
|
+
debug_msg = <<-DEBUG_MSG.strip_heredoc
|
236
|
+
This usually means template '#{original_filename}' is not a valid Kubernetes template.
|
237
|
+
Error from kubectl:
|
238
|
+
#{err}
|
239
|
+
Rendered template content:
|
240
|
+
DEBUG_MSG
|
241
|
+
debug_msg += File.read(file_path)
|
242
|
+
@logger.summary.add_paragraph(debug_msg)
|
246
243
|
|
247
|
-
|
248
|
-
end
|
249
|
-
resource_id
|
244
|
+
raise FatalDeploymentError, "Kubectl dry run failed (command: #{Shellwords.join(command)})"
|
250
245
|
end
|
251
246
|
|
252
247
|
def split_templates(filename)
|
253
248
|
file_content = File.read(File.join(@template_dir, filename))
|
254
249
|
rendered_content = render_template(filename, file_content)
|
255
250
|
YAML.load_stream(rendered_content) do |doc|
|
256
|
-
|
257
|
-
|
258
|
-
f = Tempfile.new(filename)
|
259
|
-
f.write(YAML.dump(doc))
|
260
|
-
f.close
|
261
|
-
yield f
|
251
|
+
yield doc unless doc.blank?
|
262
252
|
end
|
263
253
|
rescue Psych::SyntaxError => e
|
264
254
|
debug_msg = <<-INFO.strip_heredoc
|
@@ -273,23 +263,14 @@ module KubernetesDeploy
|
|
273
263
|
end
|
274
264
|
|
275
265
|
def record_apply_failure(err)
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
Rendered template content:
|
285
|
-
HELPFUL_MESSAGE
|
286
|
-
debug_msg += file_content || "Failed to read file"
|
287
|
-
else
|
288
|
-
debug_msg = <<-FALLBACK_MSG
|
289
|
-
This usually means one of your templates is invalid, but we were unable to automatically identify which one.
|
290
|
-
Please inspect the error message from kubectl:
|
291
|
-
#{err}
|
292
|
-
FALLBACK_MSG
|
266
|
+
file_content = find_bad_file_from_kubectl_output(err)
|
267
|
+
debug_msg = <<-HELPFUL_MESSAGE.strip_heredoc
|
268
|
+
This usually means one of your templates is invalid.
|
269
|
+
Error from kubectl:
|
270
|
+
#{err}
|
271
|
+
HELPFUL_MESSAGE
|
272
|
+
if file_content
|
273
|
+
debug_msg += "Rendered template content:\n#{file_content}"
|
293
274
|
end
|
294
275
|
|
295
276
|
@logger.summary.add_paragraph(debug_msg)
|
@@ -376,9 +357,9 @@ module KubernetesDeploy
|
|
376
357
|
r.deploy_started = Time.now.utc
|
377
358
|
case r.deploy_method
|
378
359
|
when :replace
|
379
|
-
_, _, replace_st = kubectl.run("replace", "-f", r.
|
360
|
+
_, _, replace_st = kubectl.run("replace", "-f", r.file_path, log_failure: false)
|
380
361
|
when :replace_force
|
381
|
-
_, _, replace_st = kubectl.run("replace", "--force", "-f", r.
|
362
|
+
_, _, replace_st = kubectl.run("replace", "--force", "-f", r.file_path, log_failure: false)
|
382
363
|
else
|
383
364
|
# Fail Fast! This is a programmer mistake.
|
384
365
|
raise ArgumentError, "Unexpected deploy method! (#{r.deploy_method.inspect})"
|
@@ -386,7 +367,7 @@ module KubernetesDeploy
|
|
386
367
|
|
387
368
|
next if replace_st.success?
|
388
369
|
# it doesn't exist so we can't replace it
|
389
|
-
_, err, create_st = kubectl.run("create", "-f", r.
|
370
|
+
_, err, create_st = kubectl.run("create", "-f", r.file_path, log_failure: false)
|
390
371
|
|
391
372
|
next if create_st.success?
|
392
373
|
raise FatalDeploymentError, <<-MSG.strip_heredoc
|
@@ -405,7 +386,7 @@ module KubernetesDeploy
|
|
405
386
|
command = ["apply"]
|
406
387
|
resources.each do |r|
|
407
388
|
@logger.info("- #{r.id} (timeout: #{r.timeout}s)") if resources.length > 1
|
408
|
-
command.push("-f", r.
|
389
|
+
command.push("-f", r.file_path)
|
409
390
|
r.deploy_started = Time.now.utc
|
410
391
|
end
|
411
392
|
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: kubernetes-deploy
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.7.
|
4
|
+
version: 0.7.9
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Katrina Verey
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: exe
|
11
11
|
cert_chain: []
|
12
|
-
date: 2017-
|
12
|
+
date: 2017-07-04 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: activesupport
|
@@ -206,6 +206,7 @@ files:
|
|
206
206
|
- lib/kubernetes-deploy/kubernetes_resource/pod_disruption_budget.rb
|
207
207
|
- lib/kubernetes-deploy/kubernetes_resource/pod_template.rb
|
208
208
|
- lib/kubernetes-deploy/kubernetes_resource/redis.rb
|
209
|
+
- lib/kubernetes-deploy/kubernetes_resource/replica_set.rb
|
209
210
|
- lib/kubernetes-deploy/kubernetes_resource/service.rb
|
210
211
|
- lib/kubernetes-deploy/resource_watcher.rb
|
211
212
|
- lib/kubernetes-deploy/restart_task.rb
|