kubernetes-deploy 0.10.1 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: cfefc3bd897ba4b3a56a3ad94309571bb2a5ba8f
4
- data.tar.gz: 959b6beaf80885a427c3e35b8b18ddd897784bd6
3
+ metadata.gz: 1fcb28df3ae43f68d38b30177c5131f59c45c602
4
+ data.tar.gz: 26f3440846640400f726ef6fe9acc95460ddef3d
5
5
  SHA512:
6
- metadata.gz: 867e36156d825450b7812e164d167facd2a6a4e150235eaec4bfe5ed1a8a1cb8e07a8ece2d74a5e82af240a93b833367599918104c1dffd8768e7d90c43724d9
7
- data.tar.gz: abcc6c7d04d0cadfb4c63ffdb28ffe17adb639bdd362835a8a4c3a7badbf34b056e43ce8e31915032d925a9227e800a729493abbbd9b79882674141a978cc24d
6
+ metadata.gz: 6e6d10b5aea6ffacfacf466d7d3255e7f22709432ea49924cee5ef173f371b13b9f7d04cfb86b9182610e7cb3207e2f526fefca37cd864bc83ffdd1fc559869a
7
+ data.tar.gz: 7909b9c4475a6151b4b1d286601b49188d4797e0d0174f9526ade8afa67134ecfff07e793b42392d004c2eb145b075b986556dd7f35e1f812e9414737c89924f
@@ -15,6 +15,7 @@ require 'kubernetes-deploy/errors'
15
15
  require 'kubernetes-deploy/formatted_logger'
16
16
  require 'kubernetes-deploy/runner'
17
17
  require 'kubernetes-deploy/statsd'
18
+ require 'kubernetes-deploy/concurrency'
18
19
 
19
20
  module KubernetesDeploy
20
21
  KubernetesDeploy::StatsD.build
@@ -0,0 +1,18 @@
1
+ # frozen_string_literal: true
2
+ module KubernetesDeploy
3
+ module Concurrency
4
+ MAX_THREADS = 8
5
+
6
+ def self.split_across_threads(all_work, &block)
7
+ return if all_work.empty?
8
+ raise ArgumentError, "Block of work is required" unless block_given?
9
+
10
+ slice_size = ((all_work.length + MAX_THREADS - 1) / MAX_THREADS)
11
+ threads = []
12
+ all_work.each_slice(slice_size) do |work_group|
13
+ threads << Thread.new { work_group.each(&block) }
14
+ end
15
+ threads.each(&:join)
16
+ end
17
+ end
18
+ end
@@ -6,7 +6,7 @@ require 'kubernetes-deploy/kubectl'
6
6
 
7
7
  module KubernetesDeploy
8
8
  class KubernetesResource
9
- attr_reader :name, :namespace, :file, :context
9
+ attr_reader :name, :namespace, :file, :context, :validation_error_msg
10
10
  attr_writer :type, :deploy_started
11
11
 
12
12
  TIMEOUT = 5.minutes
@@ -55,6 +55,20 @@ module KubernetesDeploy
55
55
  @logger = logger
56
56
  @definition = definition
57
57
  @statsd_report_done = false
58
+ @validation_error_msg = nil
59
+ end
60
+
61
+ def validate_definition
62
+ @validation_error_msg = nil
63
+ command = ["create", "-f", file_path, "--dry-run", "--output=name"]
64
+ _, err, st = kubectl.run(*command, log_failure: false)
65
+ return true if st.success?
66
+ @validation_error_msg = err
67
+ false
68
+ end
69
+
70
+ def validation_failed?
71
+ @validation_error_msg.present?
58
72
  end
59
73
 
60
74
  def id
@@ -14,7 +14,8 @@ module KubernetesDeploy
14
14
  @rollout_data = { "replicas" => 0 }.merge(deployment_data["status"]
15
15
  .slice("replicas", "updatedReplicas", "availableReplicas", "unavailableReplicas"))
16
16
  @status = @rollout_data.map { |state_replicas, num| "#{num} #{state_replicas.chop.pluralize(num)}" }.join(", ")
17
- @progress = deployment_data["status"]["conditions"].find { |condition| condition['type'] == 'Progressing' }
17
+ conditions = deployment_data.fetch("status", {}).fetch("conditions", [])
18
+ @progress = conditions.find { |condition| condition['type'] == 'Progressing' }
18
19
  else # reset
19
20
  @latest_rs = nil
20
21
  @rollout_data = { "replicas" => 0 }
@@ -21,7 +21,7 @@ module KubernetesDeploy
21
21
  end
22
22
  delay_sync_until = Time.now.utc + delay_sync # don't pummel the API if the sync is fast
23
23
 
24
- @resources.each(&:sync)
24
+ KubernetesDeploy::Concurrency.split_across_threads(@resources, &:sync)
25
25
  newly_finished_resources, @resources = @resources.partition(&:deploy_finished?)
26
26
 
27
27
  if newly_finished_resources.present?
@@ -20,7 +20,6 @@ require 'kubernetes-deploy/kubernetes_resource'
20
20
  pod_disruption_budget
21
21
  replica_set
22
22
  service_account
23
- daemon_set
24
23
  ).each do |subresource|
25
24
  require "kubernetes-deploy/kubernetes_resource/#{subresource}"
26
25
  end
@@ -89,9 +88,10 @@ module KubernetesDeploy
89
88
  confirm_context_exists
90
89
  confirm_namespace_exists
91
90
  resources = discover_resources
91
+ validate_definitions(resources)
92
92
 
93
93
  @logger.phase_heading("Checking initial resource statuses")
94
- resources.each(&:sync)
94
+ KubernetesDeploy::Concurrency.split_across_threads(resources, &:sync)
95
95
  resources.each { |r| @logger.info(r.pretty_status) }
96
96
 
97
97
  ejson = EjsonSecretProvisioner.new(
@@ -192,7 +192,7 @@ module KubernetesDeploy
192
192
  # stderr often contains one or more lines like the following, from which we can extract the file path(s):
193
193
  # Error from server (TypeOfError): error when creating "/path/to/service-gqq5oh.yml": Service "web" is invalid:
194
194
  matches = stderr.scan(%r{"(/\S+\.ya?ml\S*)"})
195
- matches.flatten if matches
195
+ matches&.flatten
196
196
  end
197
197
 
198
198
  def deploy_has_priority_resources?(resources)
@@ -215,15 +215,26 @@ module KubernetesDeploy
215
215
  end
216
216
  end
217
217
 
218
+ def validate_definitions(resources)
219
+ KubernetesDeploy::Concurrency.split_across_threads(resources, &:validate_definition)
220
+ failed_resources = resources.select(&:validation_failed?)
221
+ return unless failed_resources.present?
222
+
223
+ failed_resources.each do |r|
224
+ record_invalid_template(r.validation_error_msg, file_paths: [r.file_path])
225
+ end
226
+ raise FatalDeploymentError, "Template validation failed"
227
+ end
228
+
218
229
  def discover_resources
219
230
  resources = []
220
231
  @logger.info("Discovering templates:")
232
+
221
233
  Dir.foreach(@template_dir) do |filename|
222
234
  next unless filename.end_with?(".yml.erb", ".yml", ".yaml", ".yaml.erb")
223
235
 
224
236
  split_templates(filename) do |r_def|
225
237
  r = KubernetesResource.build(namespace: @namespace, context: @context, logger: @logger, definition: r_def)
226
- validate_template_via_dry_run(r.file_path, filename)
227
238
  resources << r
228
239
  @logger.info " - #{r.id}"
229
240
  end
@@ -231,14 +242,6 @@ module KubernetesDeploy
231
242
  resources
232
243
  end
233
244
 
234
- def validate_template_via_dry_run(file_path, original_filename)
235
- command = ["create", "-f", file_path, "--dry-run", "--output=name"]
236
- _, err, st = kubectl.run(*command, log_failure: false)
237
- return if st.success?
238
- record_invalid_template(err, file_paths: [file_path], original_filenames: [original_filename])
239
- raise FatalDeploymentError, "Template validation failed (command: #{Shellwords.join(command)})"
240
- end
241
-
242
245
  def split_templates(filename)
243
246
  file_content = File.read(File.join(@template_dir, filename))
244
247
  rendered_content = render_template(filename, file_content)
@@ -271,7 +274,6 @@ module KubernetesDeploy
271
274
  if file_content.present?
272
275
  debug_msg += "\n> Rendered template content:\n#{indent_four(file_content)}"
273
276
  end
274
-
275
277
  @logger.summary.add_paragraph(debug_msg)
276
278
  end
277
279
 
@@ -1,4 +1,4 @@
1
1
  # frozen_string_literal: true
2
2
  module KubernetesDeploy
3
- VERSION = "0.10.1"
3
+ VERSION = "0.11.0"
4
4
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: kubernetes-deploy
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.10.1
4
+ version: 0.11.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Katrina Verey
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: exe
11
11
  cert_chain: []
12
- date: 2017-07-26 00:00:00.000000000 Z
12
+ date: 2017-07-28 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: activesupport
@@ -202,6 +202,7 @@ files:
202
202
  - exe/kubernetes-run
203
203
  - kubernetes-deploy.gemspec
204
204
  - lib/kubernetes-deploy.rb
205
+ - lib/kubernetes-deploy/concurrency.rb
205
206
  - lib/kubernetes-deploy/deferred_summary_logging.rb
206
207
  - lib/kubernetes-deploy/ejson_secret_provisioner.rb
207
208
  - lib/kubernetes-deploy/errors.rb
@@ -213,7 +214,6 @@ files:
213
214
  - lib/kubernetes-deploy/kubernetes_resource/bugsnag.rb
214
215
  - lib/kubernetes-deploy/kubernetes_resource/cloudsql.rb
215
216
  - lib/kubernetes-deploy/kubernetes_resource/config_map.rb
216
- - lib/kubernetes-deploy/kubernetes_resource/daemon_set.rb
217
217
  - lib/kubernetes-deploy/kubernetes_resource/deployment.rb
218
218
  - lib/kubernetes-deploy/kubernetes_resource/ingress.rb
219
219
  - lib/kubernetes-deploy/kubernetes_resource/persistent_volume_claim.rb
@@ -1,95 +0,0 @@
1
- # frozen_string_literal: true
2
- module KubernetesDeploy
3
- class DaemonSet < KubernetesResource
4
- TIMEOUT = 5.minutes
5
-
6
- def sync
7
- raw_json, _err, st = kubectl.run("get", type, @name, "--output=json")
8
- @found = st.success?
9
-
10
- if @found
11
- daemonset_data = JSON.parse(raw_json)
12
- @current_generation = daemonset_data["metadata"]["generation"]
13
- @observed_generation = daemonset_data["status"]["observedGeneration"]
14
- @rollout_data = daemonset_data["status"]
15
- .slice("currentNumberScheduled", "desiredNumberScheduled", "numberReady", "numberAvailable")
16
- @status = @rollout_data.map { |state_replicas, num| "#{num} #{state_replicas}" }.join(", ")
17
- @pods = find_pods(daemonset_data)
18
- else # reset
19
- @rollout_data = { "currentNumberScheduled" => 0 }
20
- @current_generation = 1 # to make sure the current and observed generations are different
21
- @observed_generation = 0
22
- @status = nil
23
- @pods = []
24
- end
25
- end
26
-
27
- def deploy_succeeded?
28
- @rollout_data["desiredNumberScheduled"].to_i == @rollout_data["currentNumberScheduled"].to_i &&
29
- @rollout_data["desiredNumberScheduled"].to_i == @rollout_data["numberAvailable"].to_i &&
30
- @current_generation == @observed_generation
31
- end
32
-
33
- def deploy_failed?
34
- @pods.present? && @pods.any?(&:deploy_failed?)
35
- end
36
-
37
- def failure_message
38
- @pods.map(&:failure_message).compact.uniq.join("\n")
39
- end
40
-
41
- def timeout_message
42
- @pods.map(&:timeout_message).compact.uniq.join("\n")
43
- end
44
-
45
- def deploy_timed_out?
46
- super || @pods.present? && @pods.any?(&:deploy_timed_out?)
47
- end
48
-
49
- def exists?
50
- @found
51
- end
52
-
53
- def fetch_events
54
- own_events = super
55
- return own_events unless @pods.present?
56
- most_useful_pod = @pods.find(&:deploy_failed?) || @pods.find(&:deploy_timed_out?) || @pods.first
57
- own_events.merge(most_useful_pod.fetch_events)
58
- end
59
-
60
- def fetch_logs
61
- most_useful_pod = @pods.find(&:deploy_failed?) || @pods.find(&:deploy_timed_out?) || @pods.first
62
- most_useful_pod.fetch_logs
63
- end
64
-
65
- private
66
-
67
- def find_pods(ds_data)
68
- label_string = ds_data["spec"]["selector"]["matchLabels"].map { |k, v| "#{k}=#{v}" }.join(",")
69
- raw_json, _err, st = kubectl.run("get", "pods", "-a", "--output=json", "--selector=#{label_string}")
70
- return [] unless st.success?
71
-
72
- all_pods = JSON.parse(raw_json)["items"]
73
- current_generation = ds_data["metadata"]["generation"]
74
-
75
- latest_pods = all_pods.find_all do |pods|
76
- pods["metadata"]["ownerReferences"].any? { |ref| ref["uid"] == ds_data["metadata"]["uid"] } &&
77
- pods["metadata"]["labels"]["pod-template-generation"].to_i == current_generation.to_i
78
- end
79
- return unless latest_pods.present?
80
-
81
- latest_pods.each_with_object([]) do |pod_data, relevant_pods|
82
- pod = Pod.new(
83
- namespace: namespace,
84
- context: context,
85
- definition: pod_data,
86
- logger: @logger,
87
- parent: "#{@name.capitalize} daemon set",
88
- deploy_started: @deploy_started
89
- )
90
- pod.sync(pod_data)
91
- relevant_pods << pod
92
- end
93
- end
94
- end
95
- end