dapp 0.13.12 → 0.13.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: ef826e2fa06f5f2d5a3a805f2d1c1899606e77f6
4
- data.tar.gz: fc77c87ea858e86b0c258539529cc081bee15b35
3
+ metadata.gz: 02f03bab43041fee2897358ca3262e4d3c712d69
4
+ data.tar.gz: 68a67d7bc551e1a4b27a40579dad44fd2c5509ec
5
5
  SHA512:
6
- metadata.gz: 28ccc7920b25918c3ad23483ec7dd1ee81e4c187b4734ed6b0e4d31a911fe0fccd20decba500262fa1162692dace01fc0a53468b0c6e4f18cd5d5481290d7952
7
- data.tar.gz: 26154a43ce123ca6f883701d49ddc6ba0fb1a930ef8fe61cb68612490029d3a5c2177a86fa562f2f8136a17bcb62e73e01be319e78328d96a6a0706b559c64d4
6
+ metadata.gz: 324d9d5c00cb1f4fe580ac4a0ea403559513e61287e9828b358223a7c8594f4717946c3baa6a8b832fc15c023c406bddb65d49a4a7673e96d8e1771c576c87d2
7
+ data.tar.gz: 494400490d133e1fc264c8cfd2352026a52c39a5f5644c6f7d917fc65d05c1d87fcf1a0c15e2148d76b175b1e1fe14678060100f9b344d8805f67f5f7a8339ff
@@ -8,6 +8,8 @@ en:
8
8
  dimg_not_run: "Dimg run failed!"
9
9
  git_branch_without_name: "Dimg has specific revision that isn't associated with a branch name!"
10
10
  ci_environment_required: 'CI environment required (Travis or GitLab CI)!'
11
+ kube:
12
+ deploy_timeout: "Deploy timeout!"
11
13
  dappfile:
12
14
  incorrect: "Dappfile with `%{error}`:\n%{message}"
13
15
  build:
data/lib/dapp.rb CHANGED
@@ -113,10 +113,16 @@ require 'dapp/kube/kubernetes/client/error'
113
113
  require 'dapp/kube/kubernetes/client/resource/base'
114
114
  require 'dapp/kube/kubernetes/client/resource/pod'
115
115
  require 'dapp/kube/kubernetes/client/resource/job'
116
+ require 'dapp/kube/kubernetes/client/resource/deployment'
117
+ require 'dapp/kube/kubernetes/client/resource/replicaset'
118
+ require 'dapp/kube/kubernetes/client/resource/event'
116
119
  require 'dapp/kube/kubernetes/manager/base'
117
120
  require 'dapp/kube/kubernetes/manager/pod'
118
121
  require 'dapp/kube/kubernetes/manager/container'
119
122
  require 'dapp/kube/kubernetes/manager/job'
123
+ require 'dapp/kube/kubernetes/manager/deployment'
124
+ require 'dapp/kube/helm'
125
+ require 'dapp/kube/helm/release'
120
126
  require 'dapp/kube/cli/command/base'
121
127
  require 'dapp/kube/cli/command/kube'
122
128
  require 'dapp/kube/cli/command/kube/deploy'
@@ -109,133 +109,6 @@ module Dapp
109
109
  _wait_for_deployment(d, old_d_revision: old_d_revision)
110
110
  end
111
111
 
112
- # NOTICE: old_d_revision на данный момент выводится на экран как информация для дебага.
113
- # NOTICE: deployment.kubernetes.io/revision не меняется при изменении количества реплик, поэтому
114
- # NOTICE: критерий ожидания по изменению ревизии не верен.
115
- # NOTICE: Однако, при обновлении deployment ревизия сбрасывается и ожидание переустановки этой ревизии
116
- # NOTICE: является одним из критериев завершения ожидания на данный момент.
117
- def _wait_for_deployment(d, old_d_revision: nil)
118
- app.deployment.dapp.log_process("Waiting for kubernetes Deployment #{d['metadata']['name']} readiness") do
119
- known_events_by_pod = {}
120
-
121
- loop do
122
- d_revision = d.fetch('metadata', {}).fetch('annotations', {}).fetch('deployment.kubernetes.io/revision', nil)
123
-
124
- app.deployment.dapp.log_step("[#{Time.now}] Poll kubernetes Deployment status")
125
- app.deployment.dapp.with_log_indent do
126
- app.deployment.dapp.log_info("Target replicas: #{_field_value_for_log(d['spec']['replicas'])}")
127
- app.deployment.dapp.log_info("Updated replicas: #{_field_value_for_log(d['status']['updatedReplicas'])} / #{_field_value_for_log(d['spec']['replicas'])}")
128
- app.deployment.dapp.log_info("Available replicas: #{_field_value_for_log(d['status']['availableReplicas'])} / #{_field_value_for_log(d['spec']['replicas'])}")
129
- app.deployment.dapp.log_info("Ready replicas: #{_field_value_for_log(d['status']['readyReplicas'])} / #{_field_value_for_log(d['spec']['replicas'])}")
130
- app.deployment.dapp.log_info("Old deployment.kubernetes.io/revision: #{_field_value_for_log(old_d_revision)}")
131
- app.deployment.dapp.log_info("Current deployment.kubernetes.io/revision: #{_field_value_for_log(d_revision)}")
132
- end
133
-
134
- rs = nil
135
- if d_revision
136
- # Находим актуальный, текущий ReplicaSet.
137
- # Если такая ситуация, когда есть несколько подходящих по revision ReplicaSet, то берем старейший по дате создания.
138
- # Также делает kubectl: https://github.com/kubernetes/kubernetes/blob/d86a01570ba243e8d75057415113a0ff4d68c96b/pkg/controller/deployment/util/deployment_util.go#L664
139
- rs = app.deployment.kubernetes.replicaset_list['items']
140
- .select do |_rs|
141
- Array(_rs['metadata']['ownerReferences']).any? do |owner_reference|
142
- owner_reference['uid'] == d['metadata']['uid']
143
- end
144
- end
145
- .select do |_rs|
146
- rs_revision = _rs.fetch('metadata', {}).fetch('annotations', {}).fetch('deployment.kubernetes.io/revision', nil)
147
- (rs_revision and (d_revision == rs_revision))
148
- end
149
- .sort_by do |_rs|
150
- Time.parse _rs['metadata']['creationTimestamp']
151
- end.first
152
- end
153
-
154
- if rs
155
- # Pod'ы связанные с активным ReplicaSet
156
- rs_pods = app.deployment.kubernetes
157
- .pod_list(labelSelector: labels.map{|k, v| "#{k}=#{v}"}.join(','))['items']
158
- .select do |pod|
159
- Array(pod['metadata']['ownerReferences']).any? do |owner_reference|
160
- owner_reference['uid'] == rs['metadata']['uid']
161
- end
162
- end
163
-
164
- app.deployment.dapp.with_log_indent do
165
- app.deployment.dapp.log_info("Pods:") if rs_pods.any?
166
-
167
- rs_pods.each do |pod|
168
- app.deployment.dapp.with_log_indent do
169
- app.deployment.dapp.log_info("* #{pod['metadata']['name']}")
170
-
171
- known_events_by_pod[pod['metadata']['name']] ||= []
172
- pod_events = app.deployment.kubernetes
173
- .event_list(fieldSelector: "involvedObject.uid=#{pod['metadata']['uid']}")['items']
174
- .reject do |event|
175
- known_events_by_pod[pod['metadata']['name']].include? event['metadata']['uid']
176
- end
177
-
178
- if pod_events.any?
179
- pod_events.each do |event|
180
- app.deployment.dapp.with_log_indent do
181
- app.deployment.dapp.log_info("[#{event['metadata']['creationTimestamp']}] #{event['message']}")
182
- end
183
- known_events_by_pod[pod['metadata']['name']] << event['metadata']['uid']
184
- end
185
- end
186
-
187
- ready_condition = pod['status'].fetch('conditions', {}).find {|condition| condition['type'] == 'Ready'}
188
- next if (not ready_condition) or (ready_condition['status'] == 'True')
189
-
190
- if ready_condition['reason'] == 'ContainersNotReady'
191
- Array(pod['status']['containerStatuses']).each do |container_status|
192
- next if container_status['ready']
193
-
194
- waiting_reason = container_status.fetch('state', {}).fetch('waiting', {}).fetch('reason', nil)
195
- case waiting_reason
196
- when 'ImagePullBackOff', 'ErrImagePull'
197
- raise Error::Base,
198
- code: :image_not_found,
199
- data: {app: app.name,
200
- pod_name: pod['metadata']['name'],
201
- reason: container_status['state']['waiting']['reason'],
202
- message: container_status['state']['waiting']['message']}
203
- when 'CrashLoopBackOff'
204
- raise Error::Base,
205
- code: :container_crash,
206
- data: {app: app.name,
207
- pod_name: pod['metadata']['name'],
208
- reason: container_status['state']['waiting']['reason'],
209
- message: container_status['state']['waiting']['message']}
210
- end
211
- end
212
- else
213
- app.deployment.dapp.with_log_indent do
214
- app.deployment.dapp.log_warning("Unknown pod readiness condition reason '#{ready_condition['reason']}': #{ready_condition}")
215
- end
216
- end
217
- end # with_log_indent
218
- end # rs_pods.each
219
- end # with_log_indent
220
- end
221
-
222
- break if begin
223
- d_revision and
224
- d['spec']['replicas'] and
225
- d['status']['updatedReplicas'] and
226
- d['status']['availableReplicas'] and
227
- d['status']['readyReplicas'] and
228
- (d['status']['updatedReplicas'] >= d['spec']['replicas']) and
229
- (d['status']['availableReplicas'] >= d['spec']['replicas']) and
230
- (d['status']['readyReplicas'] >= d['spec']['replicas'])
231
- end
232
-
233
- sleep 1
234
- d = app.deployment.kubernetes.deployment(d['metadata']['name'])
235
- end
236
- end
237
- end
238
-
239
112
  def _field_value_for_log(value)
240
113
  value ? value : '-'
241
114
  end
@@ -35,6 +35,12 @@ BANNER
35
35
  default: [],
36
36
  proc: proc { |v| composite_options(:helm_secret_values) << v }
37
37
 
38
+ option :timeout,
39
+ long: '--timeout INTEGER_SECONDS',
40
+ default: nil,
41
+ description: 'Default timeout to wait for resources to become ready, 300 seconds by default.',
42
+ proc: proc {|v| Integer(v)}
43
+
38
44
  def run(argv = ARGV)
39
45
  self.class.parse_options(self, argv)
40
46
  repo = self.class.required_argument(self, 'repo')
@@ -13,27 +13,27 @@ module Dapp
13
13
  validate_repo_name!(repo)
14
14
  validate_tag_name!(image_version)
15
15
 
16
+ # TODO: Перенести код процесса выката в Helm::Manager
17
+
16
18
  with_kube_tmp_chart_dir do
17
19
  kube_copy_chart
18
20
  kube_helm_decode_secrets
19
21
  kube_generate_helm_chart_tpl
20
22
 
21
- additional_values = [].tap do |options|
22
- options.concat((kube_values_paths + kube_tmp_chart_secret_values_paths).map { |p| "--values #{p}" })
23
- end
24
-
25
- set_options = [].tap do |options|
26
- options << "--set global.dapp.repo=#{repo}"
27
- options << "--set global.dapp.image_version=#{image_version}"
28
- options << "--set global.namespace=#{kube_namespace}"
29
- options.concat(self.options[:helm_set_options].map { |opt| "--set #{opt}" })
30
- end
31
-
32
- hooks_jobs = kube_helm_hooks_jobs(additional_values, set_options)
33
-
34
- kube_flush_hooks_jobs(hooks_jobs)
35
-
36
- kube_run_deploy(additional_values, set_options, hooks_jobs: hooks_jobs)
23
+ release = Helm::Release.new(
24
+ self,
25
+ name: kube_release_name,
26
+ repo: repo,
27
+ image_version: image_version,
28
+ namespace: kube_namespace,
29
+ chart_path: kube_tmp_chart_path,
30
+ set: self.options[:helm_set_options],
31
+ values: [*kube_values_paths, *kube_tmp_chart_secret_values_paths],
32
+ deploy_timeout: self.options[:timeout] || 300
33
+ )
34
+
35
+ kube_flush_hooks_jobs(release)
36
+ kube_run_deploy(release)
37
37
  end
38
38
  end
39
39
 
@@ -94,34 +94,13 @@ module Dapp
94
94
  kube_tmp_chart_path('templates/_dapp_helpers.tpl').write(cont)
95
95
  end
96
96
 
97
- def kube_flush_hooks_jobs(hooks_jobs)
98
- return if (config_jobs_names = hooks_jobs.keys).empty?
99
- config_jobs_names.select { |name| kube_job_list.include? name }.each do |name|
100
- log_process("Delete hooks job `#{name}` for release #{kube_release_name} ", short: true) { kube_delete_job!(name) }
101
- end
102
- end
103
-
104
- def kube_helm_hooks_jobs(additional_values, set_options)
105
- generator = proc do |text|
106
- text.split(/# Source.*|---/).reject {|c| c.strip.empty? }.map {|c| yaml_load(c) }.reduce({}) do |objects, c|
107
- objects[c['kind']] ||= {}
108
- objects[c['kind']][(c['metadata'] || {})['name']] = c
109
- objects
97
+ def kube_flush_hooks_jobs(release)
98
+ release.hooks.values
99
+ .reject { |job| ['0', 'false'].include? job.annotations["dapp/recreate"].to_s }
100
+ .select { |job| kube_job_list.include? job.name }
101
+ .each do |job|
102
+ log_process("Delete hooks job `#{job.name}` for release #{release.name}", short: true) { kube_delete_job!(job.name) }
110
103
  end
111
- end
112
-
113
- args = [kube_release_name, kube_tmp_chart_path, additional_values, set_options, kube_helm_extra_options(dry_run: true)].flatten
114
- output = shellout!("helm upgrade #{args.join(' ')}").stdout
115
-
116
- manifest_start_index = output.lines.index("MANIFEST:\n") + 1
117
- hook_start_index = output.lines.index("HOOKS:\n") + 1
118
- configs = generator.call(output.lines[hook_start_index..manifest_start_index-2].join)
119
-
120
- (configs['Job'] || {}).reject do |_, c|
121
- c['metadata'] ||= {}
122
- c['metadata']['annotations'] ||= {}
123
- c['metadata']['annotations']['helm.sh/resource-policy'] == 'keep'
124
- end
125
104
  end
126
105
 
127
106
  def kube_job_list
@@ -136,14 +115,12 @@ module Dapp
136
115
  end
137
116
  end
138
117
 
139
- def kube_run_deploy(additional_values, set_options, hooks_jobs: {})
140
- log_process("Deploy release #{kube_release_name}") do
141
- release_exists = shellout("helm status #{kube_release_name}").status.success?
142
-
143
- hooks_jobs_by_type = hooks_jobs
144
- .reduce({}) do |res, (job_name, job_spec)|
145
- job = Kubernetes::Client::Resource::Job.new(job_spec)
118
+ def kube_run_deploy(release)
119
+ log_process("Deploy release #{release.name}") do
120
+ release_exists = shellout("helm status #{release.name}").status.success?
146
121
 
122
+ watch_hooks_by_type = release.jobs.values
123
+ .reduce({}) do |res, job|
147
124
  if job.annotations['dapp/watch-logs'].to_s == 'true'
148
125
  job.annotations['helm.sh/hook'].to_s.split(',').each do |hook_type|
149
126
  res[hook_type] ||= []
@@ -159,44 +136,42 @@ module Dapp
159
136
  end
160
137
  end
161
138
 
162
- watch_jobs = if release_exists
163
- hooks_jobs_by_type['pre-upgrade'].to_a + hooks_jobs_by_type['post-upgrade'].to_a
139
+ watch_hooks = if release_exists
140
+ watch_hooks_by_type['pre-upgrade'].to_a + watch_hooks_by_type['post-upgrade'].to_a
164
141
  else
165
- hooks_jobs_by_type['pre-install'].to_a + hooks_jobs_by_type['post-install'].to_a
142
+ watch_hooks_by_type['pre-install'].to_a + watch_hooks_by_type['post-install'].to_a
166
143
  end
167
144
 
168
- watch_thr = Thread.new do
169
- watch_jobs.each {|job| Kubernetes::Manager::Job.new(self, job.name).watch_till_done!}
145
+ watch_hooks_thr = Thread.new do
146
+ watch_hooks.each {|job| Kubernetes::Manager::Job.new(self, job.name).watch_till_done!}
147
+ puts "DONE!"
170
148
  end
171
149
 
172
- args = [kube_release_name, kube_tmp_chart_path, additional_values, set_options, kube_helm_extra_options].flatten
173
- kubernetes.create_namespace!(kube_namespace) unless kubernetes.namespace?(kube_namespace)
174
- shellout! "helm upgrade #{args.join(' ')}", verbose: true
150
+ deployment_managers = release.deployments.values
151
+ .map {|deployment| Kubernetes::Manager::Deployment.new(self, deployment.name)}
175
152
 
176
- watch_thr.join
177
- end
178
- end
153
+ deployment_managers.each(&:before_deploy)
179
154
 
180
- def kube_check_helm_chart!
181
- raise Error::Command, code: :project_helm_chart_not_found, data: { path: kube_chart_path } unless kube_chart_path.exist?
182
- end
155
+ release.deploy!
183
156
 
184
- def kube_helm_extra_options(dry_run: dry_run?)
185
- [].tap do |options|
186
- options << "--namespace #{kube_namespace}"
187
- options << '--install'
157
+ deployment_managers.each(&:after_deploy)
188
158
 
189
- unless ['1', 'true'].include? ENV['DAPP_HELM_WAIT_DISABLED'].to_s
190
- options << '--wait'
191
- timeout = (ENV['DAPP_HELM_WAIT_TIMEOUT'] || 120).to_i
192
- options << "--timeout #{timeout}"
159
+ begin
160
+ ::Timeout::timeout(self.options[:timeout] || 300) do
161
+ watch_hooks_thr.join
162
+ deployment_managers.each {|deployment_manager| deployment_manager.watch_till_ready!}
163
+ end
164
+ rescue ::Timeout::Error
165
+ watch_hooks_thr.kill if watch_hooks_thr.alive?
166
+ raise Error::Base, code: :deploy_timeout
193
167
  end
194
-
195
- options << '--dry-run' if dry_run
196
- options << '--debug' if dry_run || log_verbose?
197
168
  end
198
169
  end
199
170
 
171
+ def kube_check_helm_chart!
172
+ raise Error::Command, code: :project_helm_chart_not_found, data: { path: kube_chart_path } unless kube_chart_path.exist?
173
+ end
174
+
200
175
  def kube_tmp_chart_secret_path(*path)
201
176
  kube_tmp_chart_path('decoded-secret', *path).tap { |p| p.parent.mkpath }
202
177
  end
@@ -223,6 +198,10 @@ module Dapp
223
198
  def kube_chart_secret_values_path
224
199
  kube_chart_path('secret-values.yaml').expand_path
225
200
  end
201
+
202
+ def kube_helm_manager
203
+ @kube_helm_manager ||= Helm::Manager.new(self)
204
+ end
226
205
  end
227
206
  end
228
207
  end
@@ -1,7 +1,11 @@
1
1
  module Dapp
2
2
  module Kube
3
3
  module Error
4
- class Base < ::Dapp::Error::Base; end
4
+ class Base < ::Dapp::Error::Base
5
+ def initialize(net_status = {})
6
+ super({context: 'kube'}.merge(net_status))
7
+ end
8
+ end
5
9
  end
6
10
  end
7
11
  end
@@ -0,0 +1,5 @@
1
+ module Dapp
2
+ module Kube
3
+ module Helm ; end
4
+ end # Kube
5
+ end # Dapp
@@ -0,0 +1,120 @@
1
+ module Dapp
2
+ module Kube
3
+ class Helm::Release
4
+ include Helper::YAML
5
+
6
+ attr_reader :dapp
7
+
8
+ attr_reader :name
9
+ attr_reader :repo
10
+ attr_reader :image_version
11
+ attr_reader :namespace
12
+ attr_reader :chart_path
13
+ attr_reader :set
14
+ attr_reader :values
15
+ attr_reader :deploy_timeout
16
+
17
+ def initialize(dapp,
18
+ name:, repo:, image_version:, namespace:, chart_path:,
19
+ set: [], values: [], deploy_timeout: nil)
20
+ @dapp = dapp
21
+
22
+ @name = name
23
+ @repo = repo
24
+ @image_version = image_version
25
+ @namespace = namespace
26
+ @chart_path = chart_path
27
+ @set = set
28
+ @values = values
29
+ @deploy_timeout = deploy_timeout
30
+ end
31
+
32
+ def jobs
33
+ (resources_specs['Job'] || {}).map do |name, spec|
34
+ [name, Kubernetes::Client::Resource::Job.new(spec)]
35
+ end.to_h
36
+ end
37
+
38
+ def hooks
39
+ jobs.select do |_, spec|
40
+ spec.annotations.key? "helm.sh/hook"
41
+ end
42
+ end
43
+
44
+ def deployments
45
+ (resources_specs['Deployment'] || {}).map do |name, spec|
46
+ [name, Kubernetes::Client::Resource::Deployment.new(spec)]
47
+ end.to_h
48
+ end
49
+
50
+ def deploy!
51
+ args = [
52
+ name, chart_path, additional_values,
53
+ set_options, extra_options
54
+ ].flatten
55
+
56
+ dapp.kubernetes.create_namespace!(namespace) unless dapp.kubernetes.namespace?(namespace)
57
+
58
+ dapp.shellout! "helm upgrade #{args.join(' ')}", verbose: true
59
+ end
60
+
61
+ protected
62
+
63
+ def evaluation_output
64
+ @evaluation_output ||= begin
65
+ args = [
66
+ name, chart_path, additional_values,
67
+ set_options, extra_options(dry_run: true)
68
+ ].flatten
69
+
70
+ dapp.shellout!("helm upgrade #{args.join(' ')}").stdout
71
+ end
72
+ end
73
+
74
+ def resources_specs
75
+ @resources_specs ||= {}.tap do |specs|
76
+ generator = proc do |text|
77
+ text.split(/# Source.*|---/).reject {|c| c.strip.empty? }.map {|c| yaml_load(c) }.each do |spec|
78
+ specs[spec['kind']] ||= {}
79
+ specs[spec['kind']][(spec['metadata'] || {})['name']] = spec
80
+ end
81
+ end
82
+
83
+ manifest_start_index = evaluation_output.lines.index("MANIFEST:\n") + 1
84
+ hook_start_index = evaluation_output.lines.index("HOOKS:\n") + 1
85
+ manifest_end_index = evaluation_output.lines.index("Release \"#{name}\" has been upgraded. Happy Helming!\n")
86
+
87
+ generator.call(evaluation_output.lines[hook_start_index..manifest_start_index-2].join)
88
+ generator.call(evaluation_output.lines[manifest_start_index..manifest_end_index-2].join)
89
+ end
90
+ end
91
+
92
+ def additional_values
93
+ [].tap do |options|
94
+ options.concat(values.map { |p| "--values #{p}" })
95
+ end
96
+ end
97
+
98
+ def set_options
99
+ [].tap do |options|
100
+ options << "--set global.dapp.repo=#{repo}"
101
+ options << "--set global.dapp.image_version=#{image_version}"
102
+ options << "--set global.namespace=#{namespace}"
103
+ options.concat(set.map { |opt| "--set #{opt}" })
104
+ end
105
+ end
106
+
107
+ def extra_options(dry_run: nil)
108
+ dry_run = dapp.dry_run? if dry_run.nil?
109
+
110
+ [].tap do |options|
111
+ options << "--namespace #{namespace}"
112
+ options << '--install'
113
+ options << '--dry-run' if dry_run
114
+ options << '--debug' if dry_run || dapp.log_verbose?
115
+ options << "--timeout #{deploy_timeout}" if deploy_timeout
116
+ end
117
+ end
118
+ end # Helm::Release
119
+ end # Kube
120
+ end # Dapp
@@ -8,12 +8,24 @@ module Dapp
8
8
  @spec = spec
9
9
  end
10
10
 
11
+ def metadata
12
+ spec.fetch('metadata', {})
13
+ end
14
+
11
15
  def name
12
- spec.fetch('metadata', {})['name']
16
+ metadata['name']
17
+ end
18
+
19
+ def uid
20
+ metadata['uid']
13
21
  end
14
22
 
15
23
  def annotations
16
- spec.fetch('metadata', {}).fetch('annotations', {})
24
+ metadata.fetch('annotations', {})
25
+ end
26
+
27
+ def status
28
+ spec.fetch('status', {})
17
29
  end
18
30
  end # Base
19
31
  end # Kubernetes::Client::Resource
@@ -0,0 +1,11 @@
1
+ module Dapp
2
+ module Kube
3
+ module Kubernetes::Client::Resource
4
+ class Deployment < Base
5
+ def replicas
6
+ spec.fetch('spec', {}).fetch('replicas', nil)
7
+ end
8
+ end # Deployment
9
+ end # Kubernetes::Client::Resource
10
+ end # Kube
11
+ end # Dapp
@@ -0,0 +1,8 @@
1
+ module Dapp
2
+ module Kube
3
+ module Kubernetes::Client::Resource
4
+ class Event < Base
5
+ end # Event
6
+ end # Kubernetes::Client::Resource
7
+ end # Kube
8
+ end # Dapp
@@ -15,8 +15,7 @@ module Dapp
15
15
  end
16
16
 
17
17
  def container_state(container_name)
18
- container_status = spec
19
- .fetch('status', {})
18
+ container_status = status
20
19
  .fetch('containerStatuses', [])
21
20
  .find {|cs| cs['name'] == container_name}
22
21
 
@@ -29,7 +28,7 @@ module Dapp
29
28
  end
30
29
 
31
30
  def phase
32
- spec.fetch('status', {}).fetch('phase', nil)
31
+ status.fetch('phase', nil)
33
32
  end
34
33
 
35
34
  def containers_names
@@ -0,0 +1,8 @@
1
+ module Dapp
2
+ module Kube
3
+ module Kubernetes::Client::Resource
4
+ class Replicaset < Base
5
+ end # Replicaset
6
+ end # Kubernetes::Client::Resource
7
+ end # Kube
8
+ end # Dapp
@@ -40,6 +40,7 @@ module Dapp
40
40
 
41
41
  chunk_lines_by_time = dapp.kubernetes.pod_log(pod_manager.name, container: name, timestamps: true, sinceTime: @processed_log_till_time)
42
42
  .lines
43
+ .map(&:strip)
43
44
  .map do |line|
44
45
  timestamp, _, data = line.partition(' ')
45
46
  [timestamp, data]
@@ -47,14 +48,14 @@ module Dapp
47
48
  .reject {|timestamp, _| @processed_log_timestamps.include? timestamp}
48
49
 
49
50
  chunk_lines_by_time.each do |timestamp, data|
50
- puts data
51
+ dapp.log("[#{timestamp}] #{data}")
51
52
  @processed_log_timestamps.add timestamp
52
53
  end
53
54
 
54
55
  if container_state == 'terminated'
55
56
  failed = (container_state_data['exitCode'].to_i != 0)
56
57
 
57
- warn("".tap do |msg|
58
+ dapp.log_warning("".tap do |msg|
58
59
  msg << "Pod's '#{pod_manager.name}' container '#{name}' has been terminated unsuccessfuly: "
59
60
  msg << container_state_data.to_s
60
61
  end) if failed
@@ -0,0 +1,202 @@
1
+ module Dapp
2
+ module Kube
3
+ module Kubernetes::Manager
4
+ class Deployment < Base
5
+ # NOTICE: @revision_before_deploy на данный момент выводится на экран как информация для дебага.
6
+ # NOTICE: deployment.kubernetes.io/revision не меняется при изменении количества реплик, поэтому
7
+ # NOTICE: критерий ожидания по изменению ревизии не верен.
8
+ # NOTICE: Однако, при обновлении deployment ревизия сбрасывается и ожидание переустановки этой ревизии
9
+ # NOTICE: является одним из критериев завершения ожидания на данный момент.
10
+
11
+ def before_deploy
12
+ if dapp.kubernetes.deployment? name
13
+ d = Kubernetes::Client::Resource::Deployment.new(dapp.kubernetes.deployment(name))
14
+
15
+ @revision_before_deploy = d.annotations['deployment.kubernetes.io/revision']
16
+
17
+ unless @revision_before_deploy.nil?
18
+ new_spec = Marshal.load(Marshal.dump(d.spec))
19
+ new_spec.delete('status')
20
+ new_spec.fetch('metadata', {}).fetch('annotations', {}).delete('deployment.kubernetes.io/revision')
21
+
22
+ @deployment_before_deploy = Kubernetes::Client::Resource::Deployment.new(dapp.kubernetes.replace_deployment!(name, new_spec))
23
+ end
24
+ end
25
+ end
26
+
27
+ def after_deploy
28
+ @deployed_at = Time.now
29
+ end
30
+
31
+ def watch_till_ready!
32
+ dapp.log_process("Watch deployment '#{name}' till ready") do
33
+ known_events_by_pod = {}
34
+ known_log_timestamps_by_pod_and_container = {}
35
+
36
+ d = @deployment_before_deploy || Kubernetes::Client::Resource::Deployment.new(dapp.kubernetes.deployment(name))
37
+
38
+ loop do
39
+ d_revision = d.annotations['deployment.kubernetes.io/revision']
40
+
41
+ dapp.log_step("[#{Time.now}] Poll deployment '#{d.name}' status")
42
+ dapp.with_log_indent do
43
+ dapp.log_info("Target replicas: #{_field_value_for_log(d.replicas)}")
44
+ dapp.log_info("Updated replicas: #{_field_value_for_log(d.status['updatedReplicas'])} / #{_field_value_for_log(d.replicas)}")
45
+ dapp.log_info("Available replicas: #{_field_value_for_log(d.status['availableReplicas'])} / #{_field_value_for_log(d.replicas)}")
46
+ dapp.log_info("Ready replicas: #{_field_value_for_log(d.status['readyReplicas'])} / #{_field_value_for_log(d.replicas)}")
47
+ dapp.log_info("Old deployment.kubernetes.io/revision: #{_field_value_for_log(@revision_before_deploy)}")
48
+ dapp.log_info("Current deployment.kubernetes.io/revision: #{_field_value_for_log(d_revision)}")
49
+ end
50
+
51
+ rs = nil
52
+ if d_revision
53
+ # Находим актуальный, текущий ReplicaSet.
54
+ # Если такая ситуация, когда есть несколько подходящих по revision ReplicaSet, то берем старейший по дате создания.
55
+ # Также делает kubectl: https://github.com/kubernetes/kubernetes/blob/d86a01570ba243e8d75057415113a0ff4d68c96b/pkg/controller/deployment/util/deployment_util.go#L664
56
+ rs = dapp.kubernetes.replicaset_list['items']
57
+ .map {|spec| Kubernetes::Client::Resource::Replicaset.new(spec)}
58
+ .select do |_rs|
59
+ Array(_rs.metadata['ownerReferences']).any? do |owner_reference|
60
+ owner_reference['uid'] == d.metadata['uid']
61
+ end
62
+ end
63
+ .select do |_rs|
64
+ rs_revision = _rs.annotations['deployment.kubernetes.io/revision']
65
+ (rs_revision and (d_revision == rs_revision))
66
+ end
67
+ .sort_by do |_rs|
68
+ if creation_timestamp = _rs.metadata['creationTimestamp']
69
+ Time.parse(creation_timestamp)
70
+ else
71
+ Time.now
72
+ end
73
+ end.first
74
+ end
75
+
76
+ if rs
77
+ # Pod'ы связанные с активным ReplicaSet
78
+ rs_pods = dapp.kubernetes.pod_list['items']
79
+ .map {|spec| Kubernetes::Client::Resource::Pod.new(spec)}
80
+ .select do |pod|
81
+ Array(pod.metadata['ownerReferences']).any? do |owner_reference|
82
+ owner_reference['uid'] == rs.metadata['uid']
83
+ end
84
+ end
85
+
86
+ dapp.with_log_indent do
87
+ dapp.log_step("Pods:") if rs_pods.any?
88
+
89
+ rs_pods.each do |pod|
90
+ dapp.with_log_indent do
91
+ dapp.log_step(pod.name)
92
+
93
+ known_events_by_pod[pod.name] ||= []
94
+ pod_events = dapp.kubernetes
95
+ .event_list(fieldSelector: "involvedObject.uid=#{pod.uid}")['items']
96
+ .map {|spec| Kubernetes::Client::Resource::Event.new(spec)}
97
+ .reject do |event|
98
+ known_events_by_pod[pod.name].include? event.uid
99
+ end
100
+
101
+ if pod_events.any?
102
+ dapp.with_log_indent do
103
+ dapp.log_step("Last events:")
104
+ pod_events.each do |event|
105
+ dapp.with_log_indent do
106
+ dapp.log_info("[#{event.metadata['creationTimestamp']}] #{event.spec['message']}")
107
+ end
108
+ known_events_by_pod[pod.name] << event.uid
109
+ end
110
+ end
111
+ end
112
+
113
+ dapp.with_log_indent do
114
+ pod.containers_names.each do |container_name|
115
+ next if pod.container_state(name).first == 'waiting'
116
+
117
+ known_log_timestamps_by_pod_and_container[pod.name] ||= {}
118
+ known_log_timestamps_by_pod_and_container[pod.name][container_name] ||= Set.new
119
+
120
+ since_time = nil
121
+ since_time = @deployed_at.utc.iso8601(9) if @deployed_at
122
+
123
+ log_lines_by_time = dapp.kubernetes.pod_log(pod.name, container: container_name, timestamps: true, sinceTime: since_time)
124
+ .lines.map(&:strip)
125
+ .map {|line|
126
+ timestamp, _, data = line.partition(' ')
127
+ unless known_log_timestamps_by_pod_and_container[pod.name][container_name].include? timestamp
128
+ known_log_timestamps_by_pod_and_container[pod.name][container_name].add timestamp
129
+ [timestamp, data]
130
+ end
131
+ }.compact
132
+
133
+ if log_lines_by_time.any?
134
+ dapp.log_step("Last container '#{container_name}' log:")
135
+ dapp.with_log_indent do
136
+ log_lines_by_time.each do |timestamp, line|
137
+ dapp.log("[#{timestamp}] #{line}")
138
+ end
139
+ end
140
+ end
141
+ end
142
+ end
143
+
144
+ ready_condition = pod.status.fetch('conditions', {}).find {|condition| condition['type'] == 'Ready'}
145
+ next if (not ready_condition) or (ready_condition['status'] == 'True')
146
+
147
+ if ready_condition['reason'] == 'ContainersNotReady'
148
+ Array(pod.status['containerStatuses']).each do |container_status|
149
+ next if container_status['ready']
150
+
151
+ waiting_reason = container_status.fetch('state', {}).fetch('waiting', {}).fetch('reason', nil)
152
+ case waiting_reason
153
+ when 'ImagePullBackOff', 'ErrImagePull'
154
+ raise Error::Base,
155
+ code: :image_not_found,
156
+ data: {pod_name: pod.name,
157
+ reason: waiting_reason,
158
+ message: container_status['state']['waiting']['message']}
159
+ when 'CrashLoopBackOff'
160
+ raise Error::Base,
161
+ code: :container_crash,
162
+ data: {pod_name: pod.name,
163
+ reason: waiting_reason,
164
+ message: container_status['state']['waiting']['message']}
165
+ end
166
+ end
167
+ else
168
+ dapp.with_log_indent do
169
+ dapp.log_warning("Unknown pod readiness condition reason '#{ready_condition['reason']}': #{ready_condition}")
170
+ end
171
+ end
172
+ end # with_log_indent
173
+ end # rs_pods.each
174
+ end # with_log_indent
175
+ end
176
+
177
+ break if begin
178
+ (d_revision and
179
+ d.replicas and
180
+ d.status['updatedReplicas'] and
181
+ d.status['availableReplicas'] and
182
+ d.status['readyReplicas'] and
183
+ (d.status['updatedReplicas'] >= d.replicas) and
184
+ (d.status['availableReplicas'] >= d.replicas) and
185
+ (d.status['readyReplicas'] >= d.replicas))
186
+ end
187
+
188
+ sleep 5
189
+ d = Kubernetes::Client::Resource::Deployment.new(dapp.kubernetes.deployment(d.name))
190
+ end
191
+ end
192
+ end
193
+
194
+ private
195
+
196
+ def _field_value_for_log(value)
197
+ value ? value : '-'
198
+ end
199
+ end # Deployment
200
+ end # Kubernetes::Manager
201
+ end # Kube
202
+ end # Dapp
data/lib/dapp/version.rb CHANGED
@@ -1,4 +1,4 @@
1
1
  module Dapp
2
- VERSION = '0.13.12'.freeze
2
+ VERSION = '0.13.13'.freeze
3
3
  BUILD_CACHE_VERSION = 15
4
4
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: dapp
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.12
4
+ version: 0.13.13
5
5
  platform: ruby
6
6
  authors:
7
7
  - Dmitry Stolyarov
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-08-01 00:00:00.000000000 Z
11
+ date: 2017-08-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: mixlib-shellout
@@ -647,14 +647,20 @@ files:
647
647
  - lib/dapp/kube/error/base.rb
648
648
  - lib/dapp/kube/error/command.rb
649
649
  - lib/dapp/kube/error/kubernetes.rb
650
+ - lib/dapp/kube/helm.rb
651
+ - lib/dapp/kube/helm/release.rb
650
652
  - lib/dapp/kube/kubernetes.rb
651
653
  - lib/dapp/kube/kubernetes/client.rb
652
654
  - lib/dapp/kube/kubernetes/client/error.rb
653
655
  - lib/dapp/kube/kubernetes/client/resource/base.rb
656
+ - lib/dapp/kube/kubernetes/client/resource/deployment.rb
657
+ - lib/dapp/kube/kubernetes/client/resource/event.rb
654
658
  - lib/dapp/kube/kubernetes/client/resource/job.rb
655
659
  - lib/dapp/kube/kubernetes/client/resource/pod.rb
660
+ - lib/dapp/kube/kubernetes/client/resource/replicaset.rb
656
661
  - lib/dapp/kube/kubernetes/manager/base.rb
657
662
  - lib/dapp/kube/kubernetes/manager/container.rb
663
+ - lib/dapp/kube/kubernetes/manager/deployment.rb
658
664
  - lib/dapp/kube/kubernetes/manager/job.rb
659
665
  - lib/dapp/kube/kubernetes/manager/pod.rb
660
666
  - lib/dapp/kube/secret.rb