kube_deploy_tools 3.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +177 -0
- data/README.md +142 -0
- data/bin/deploy +60 -0
- data/bin/generate +28 -0
- data/bin/kdt +17 -0
- data/bin/make_configmap +20 -0
- data/bin/publish +28 -0
- data/bin/push +57 -0
- data/bin/render_deploys_hook +18 -0
- data/bin/templater +34 -0
- data/bin/upgrade +23 -0
- data/lib/kube_deploy_tools.rb +17 -0
- data/lib/kube_deploy_tools/artifact_registry.rb +30 -0
- data/lib/kube_deploy_tools/artifact_registry/driver.rb +13 -0
- data/lib/kube_deploy_tools/artifact_registry/driver_artifactory.rb +155 -0
- data/lib/kube_deploy_tools/artifact_registry/driver_base.rb +37 -0
- data/lib/kube_deploy_tools/artifact_registry/driver_gcs.rb +120 -0
- data/lib/kube_deploy_tools/built_artifacts_file.rb +28 -0
- data/lib/kube_deploy_tools/concurrency.rb +18 -0
- data/lib/kube_deploy_tools/deferred_summary_logging.rb +69 -0
- data/lib/kube_deploy_tools/deploy.rb +215 -0
- data/lib/kube_deploy_tools/deploy/options.rb +114 -0
- data/lib/kube_deploy_tools/deploy_config_file.rb +286 -0
- data/lib/kube_deploy_tools/deploy_config_file/deep_merge.rb +38 -0
- data/lib/kube_deploy_tools/deploy_config_file/util.rb +39 -0
- data/lib/kube_deploy_tools/errors.rb +5 -0
- data/lib/kube_deploy_tools/file_filter.rb +43 -0
- data/lib/kube_deploy_tools/formatted_logger.rb +59 -0
- data/lib/kube_deploy_tools/generate.rb +145 -0
- data/lib/kube_deploy_tools/generate/options.rb +66 -0
- data/lib/kube_deploy_tools/image_registry.rb +30 -0
- data/lib/kube_deploy_tools/image_registry/driver.rb +18 -0
- data/lib/kube_deploy_tools/image_registry/driver/aws.rb +121 -0
- data/lib/kube_deploy_tools/image_registry/driver/base.rb +50 -0
- data/lib/kube_deploy_tools/image_registry/driver/gcp.rb +71 -0
- data/lib/kube_deploy_tools/image_registry/driver/login.rb +26 -0
- data/lib/kube_deploy_tools/image_registry/driver/noop.rb +15 -0
- data/lib/kube_deploy_tools/image_registry/image.rb +17 -0
- data/lib/kube_deploy_tools/kdt.rb +52 -0
- data/lib/kube_deploy_tools/kubectl.rb +25 -0
- data/lib/kube_deploy_tools/kubernetes_resource.rb +57 -0
- data/lib/kube_deploy_tools/kubernetes_resource/deployment.rb +56 -0
- data/lib/kube_deploy_tools/make_configmap.rb +51 -0
- data/lib/kube_deploy_tools/make_configmap/options.rb +39 -0
- data/lib/kube_deploy_tools/object.rb +11 -0
- data/lib/kube_deploy_tools/publish.rb +40 -0
- data/lib/kube_deploy_tools/publish/options.rb +34 -0
- data/lib/kube_deploy_tools/push.rb +129 -0
- data/lib/kube_deploy_tools/push/options.rb +46 -0
- data/lib/kube_deploy_tools/render_deploys_hook.rb +95 -0
- data/lib/kube_deploy_tools/shellrunner.rb +46 -0
- data/lib/kube_deploy_tools/tag.rb +33 -0
- data/lib/kube_deploy_tools/templater.rb +63 -0
- data/lib/kube_deploy_tools/templater/options.rb +74 -0
- data/lib/kube_deploy_tools/version.rb +3 -0
- metadata +191 -0
@@ -0,0 +1,28 @@
|
|
1
|
+
require 'set'
|
2
|
+
require 'yaml'
|
3
|
+
|
4
|
+
module KubeDeployTools
|
5
|
+
class BuiltArtifactsFile
|
6
|
+
attr_accessor :build_id, :images, :extra_files
|
7
|
+
|
8
|
+
def initialize(file)
|
9
|
+
config = {}
|
10
|
+
if File.exist? file and YAML.load_file file
|
11
|
+
config = YAML.load_file(file)
|
12
|
+
end
|
13
|
+
|
14
|
+
@images = config.fetch('images', []).to_set
|
15
|
+
@extra_files = config.fetch('extra_files', []).to_set
|
16
|
+
@build_id = config['build_id'] # ok to be nil
|
17
|
+
end
|
18
|
+
|
19
|
+
def write(file)
|
20
|
+
config = {
|
21
|
+
'build_id' => build_id,
|
22
|
+
'extra_files' => extra_files.to_a,
|
23
|
+
'images' => images.to_a
|
24
|
+
}
|
25
|
+
file.write(config.to_yaml)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module KubernetesDeploy
|
3
|
+
module Concurrency
|
4
|
+
MAX_THREADS = 8
|
5
|
+
|
6
|
+
def self.split_across_threads(all_work, &block)
|
7
|
+
return if all_work.empty?
|
8
|
+
raise ArgumentError, "Block of work is required" unless block_given?
|
9
|
+
|
10
|
+
slice_size = ((all_work.length + MAX_THREADS - 1) / MAX_THREADS)
|
11
|
+
threads = []
|
12
|
+
all_work.each_slice(slice_size) do |work_group|
|
13
|
+
threads << Thread.new { work_group.each(&block) }
|
14
|
+
end
|
15
|
+
threads.each(&:join)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
require 'colorized_string'
|
2
|
+
|
3
|
+
module KubeDeployTools
|
4
|
+
# Adds the methods to your Logger class.
|
5
|
+
# These methods include helpers for logging consistent headings, as well as facilities for
|
6
|
+
# displaying key information later, in a summary section, rather than when it occurred.
|
7
|
+
module DeferredSummaryLogging
|
8
|
+
attr_reader :summary
|
9
|
+
def initialize(*args)
|
10
|
+
reset
|
11
|
+
super
|
12
|
+
end
|
13
|
+
|
14
|
+
def reset
|
15
|
+
@summary = DeferredSummary.new
|
16
|
+
@current_phase = 0
|
17
|
+
end
|
18
|
+
|
19
|
+
def blank_line(level = :info)
|
20
|
+
public_send(level, "")
|
21
|
+
end
|
22
|
+
|
23
|
+
def phase_heading(phase_name)
|
24
|
+
@current_phase += 1
|
25
|
+
heading("Phase #{@current_phase}: #{phase_name}")
|
26
|
+
end
|
27
|
+
|
28
|
+
def heading(text, secondary_msg = '', secondary_msg_color = :cyan)
|
29
|
+
padding = (100.0 - (text.length + secondary_msg.length)) / 2
|
30
|
+
blank_line
|
31
|
+
part1 = ColorizedString.new("#{'-' * padding.floor}#{text}").cyan
|
32
|
+
part2 = ColorizedString.new(secondary_msg).colorize(secondary_msg_color)
|
33
|
+
part3 = ColorizedString.new('-' * padding.ceil).cyan
|
34
|
+
info(part1 + part2 + part3)
|
35
|
+
end
|
36
|
+
|
37
|
+
# Outputs the deferred summary information saved via @logger.summary.add_paragraph
|
38
|
+
def print_summary(success)
|
39
|
+
if success
|
40
|
+
heading("Result: ", "SUCCESS", :green)
|
41
|
+
level = :info
|
42
|
+
else
|
43
|
+
heading("Result: ", "FAILURE", :red)
|
44
|
+
level = :fatal
|
45
|
+
end
|
46
|
+
|
47
|
+
summary.paragraphs.each do |para|
|
48
|
+
blank_line(level)
|
49
|
+
msg_lines = para.split("\n")
|
50
|
+
msg_lines.each { |line| public_send(level, line) }
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
class DeferredSummary
|
55
|
+
attr_reader :paragraphs
|
56
|
+
|
57
|
+
def initialize
|
58
|
+
@paragraphs = []
|
59
|
+
end
|
60
|
+
|
61
|
+
# Adds a paragraph to be displayed in the summary section
|
62
|
+
# Paragraphs will be printed in the order they were added, separated by a blank line
|
63
|
+
# This can be used to log a block of data on a particular topic, e.g. debug info for a particular failed resource
|
64
|
+
def add_paragraph(paragraph)
|
65
|
+
paragraphs << paragraph
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
@@ -0,0 +1,215 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'json'
|
4
|
+
require 'set'
|
5
|
+
require 'yaml'
|
6
|
+
require 'date'
|
7
|
+
require 'kube_deploy_tools/errors'
|
8
|
+
require 'kube_deploy_tools/formatted_logger'
|
9
|
+
require 'kube_deploy_tools/kubernetes_resource'
|
10
|
+
require 'kube_deploy_tools/kubernetes_resource/deployment'
|
11
|
+
require 'kube_deploy_tools/concurrency'
|
12
|
+
require 'kube_deploy_tools/file_filter'
|
13
|
+
|
14
|
+
# NOTE(jmodes): the order matters, and predeploy resources will be deployed
|
15
|
+
# in order.
|
16
|
+
# e.g. Namespaces will be deployed before Services and ConfigMaps, which
|
17
|
+
# are namespaced resources that may depend on deploying Namespaces first.
|
18
|
+
PREDEPLOY_RESOURCES = %w[
|
19
|
+
Namespace
|
20
|
+
StorageClass
|
21
|
+
ServiceAccount
|
22
|
+
ClusterRole
|
23
|
+
Role
|
24
|
+
ClusterRoleBinding
|
25
|
+
RoleBinding
|
26
|
+
CustomResourceDefinition
|
27
|
+
ThirdPartyResource
|
28
|
+
ConfigMap
|
29
|
+
Service
|
30
|
+
].freeze
|
31
|
+
|
32
|
+
module KubeDeployTools
|
33
|
+
class Deploy
|
34
|
+
def initialize(
|
35
|
+
kubectl:,
|
36
|
+
namespace: nil,
|
37
|
+
input_path:,
|
38
|
+
glob_files: [],
|
39
|
+
max_retries: 3,
|
40
|
+
retry_delay: 1
|
41
|
+
)
|
42
|
+
@kubectl = kubectl
|
43
|
+
@namespace = namespace
|
44
|
+
@input_path = input_path
|
45
|
+
|
46
|
+
if !File.exists?(@input_path)
|
47
|
+
Logger.error("Path doesn't exist: #{@input_path}")
|
48
|
+
raise ArgumentError, "Path doesn't exist #{@input_path}"
|
49
|
+
elsif File.directory?(@input_path)
|
50
|
+
@glob_files = glob_files
|
51
|
+
@filtered_files = FileFilter
|
52
|
+
.filter_files(filters: @glob_files, files_path: @input_path)
|
53
|
+
.select { |f| f.end_with?('.yml', '.yaml') }
|
54
|
+
elsif File.file?(@input_path)
|
55
|
+
@filtered_files = [@input_path]
|
56
|
+
if !@glob_files.nil? && @glob_files.length > 0
|
57
|
+
Logger.error("Single-file artifacts do not support glob exclusions: #{@input_path}")
|
58
|
+
raise ArgumentError
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
@max_retries = max_retries.nil? ? 3 : max_retries.to_i
|
63
|
+
@retry_delay = retry_delay.to_i
|
64
|
+
end
|
65
|
+
|
66
|
+
def do_deploy(dry_run)
|
67
|
+
success = false
|
68
|
+
Logger.reset
|
69
|
+
Logger.phase_heading('Initializing deploy')
|
70
|
+
Logger.warn('Running in dry-run mode') if dry_run
|
71
|
+
|
72
|
+
if !@namespace.nil? && @namespace != 'default'
|
73
|
+
Logger.warn("Deploying to non-default Namespace: #{@namespace}")
|
74
|
+
end
|
75
|
+
|
76
|
+
resources = read_resources(@filtered_files)
|
77
|
+
|
78
|
+
Logger.phase_heading('Checking initial resource statuses')
|
79
|
+
KubernetesDeploy::Concurrency.split_across_threads(resources, &:sync)
|
80
|
+
|
81
|
+
Logger.phase_heading('Checking deployment replicas match')
|
82
|
+
deployments = resources
|
83
|
+
.select { |resource| resource.definition['kind'] == 'Deployment' }
|
84
|
+
KubernetesDeploy::Concurrency.split_across_threads(deployments, &:warn_replicas_mismatch)
|
85
|
+
|
86
|
+
Logger.phase_heading('Deploying all resources')
|
87
|
+
# Deploy predeploy resources first, in order.
|
88
|
+
# Then deploy the remaining resources in any order.
|
89
|
+
deploy_resources = resources.sort_by do |r|
|
90
|
+
PREDEPLOY_RESOURCES.index(r.definition['kind']) || PREDEPLOY_RESOURCES.length
|
91
|
+
end
|
92
|
+
|
93
|
+
kubectl_apply(deploy_resources, dry_run: dry_run)
|
94
|
+
|
95
|
+
success = true
|
96
|
+
ensure
|
97
|
+
Logger.print_summary(success)
|
98
|
+
success
|
99
|
+
end
|
100
|
+
|
101
|
+
def run(dry_run: true)
|
102
|
+
do_deploy(dry_run)
|
103
|
+
end
|
104
|
+
|
105
|
+
def project_info
|
106
|
+
git_commit, git_project = git_annotations
|
107
|
+
# send a notification about the deployed code
|
108
|
+
{
|
109
|
+
'git_commit': git_commit,
|
110
|
+
'git_project': git_project,
|
111
|
+
'kubernetes-cluster': kubectl_cluster_server,
|
112
|
+
'kubernetes-cluster-name': kubectl_cluster_name,
|
113
|
+
'time': DateTime.now,
|
114
|
+
'user': current_user
|
115
|
+
}
|
116
|
+
end
|
117
|
+
|
118
|
+
def read_resources(filtered_files = Dir[File.join(@input_path, '**', '*')])
|
119
|
+
resources = []
|
120
|
+
filtered_files.each do |filepath|
|
121
|
+
resource_definition(filepath) do |resource|
|
122
|
+
resources << resource
|
123
|
+
end
|
124
|
+
end
|
125
|
+
resources
|
126
|
+
end
|
127
|
+
|
128
|
+
def resource_definition(filepath)
|
129
|
+
read_resource_definition(filepath) do |resource_definition|
|
130
|
+
yield KubeDeployTools::KubernetesResource.build(
|
131
|
+
definition: resource_definition,
|
132
|
+
kubectl: @kubectl
|
133
|
+
)
|
134
|
+
end
|
135
|
+
end
|
136
|
+
|
137
|
+
def git_annotations
|
138
|
+
resource_definition(@filtered_files.first) do |resource|
|
139
|
+
if resource.annotations
|
140
|
+
git_commit = resource.annotations['git_commit']
|
141
|
+
git_project = resource.annotations['git_project']
|
142
|
+
return [git_commit, git_project]
|
143
|
+
end
|
144
|
+
end
|
145
|
+
[nil, nil]
|
146
|
+
end
|
147
|
+
|
148
|
+
def read_resource_definition(filepath)
|
149
|
+
file_content = File.read(filepath)
|
150
|
+
YAML.load_stream(file_content) do |doc|
|
151
|
+
yield doc if !doc.nil? && !doc.empty?
|
152
|
+
end
|
153
|
+
rescue Psych::SyntaxError => e
|
154
|
+
debug_msg = <<~INFO
|
155
|
+
Error message: #{e}
|
156
|
+
Template content:
|
157
|
+
---
|
158
|
+
INFO
|
159
|
+
debug_msg += file_content
|
160
|
+
Logger.debug(debug_msg)
|
161
|
+
raise FatalDeploymentError, "Template '#{filepath}' cannot be parsed"
|
162
|
+
end
|
163
|
+
|
164
|
+
def kubectl_apply(resources, dry_run: true)
|
165
|
+
resources.each do |resource|
|
166
|
+
@max_retries.times do |try|
|
167
|
+
args = ['apply', '-f', resource.filepath, "--dry-run=#{dry_run}"]
|
168
|
+
out, _, status = @kubectl.run(*args)
|
169
|
+
if status.success?
|
170
|
+
Logger.info(out)
|
171
|
+
break
|
172
|
+
elsif try < @max_retries - 1
|
173
|
+
sleep(@retry_delay)
|
174
|
+
next
|
175
|
+
end
|
176
|
+
raise FatalDeploymentError, "Failed to apply resource '#{resource.filepath}'"
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
def kubectl_cluster_name
|
182
|
+
args = ['config', 'view', '--minify', '--output=jsonpath={..clusters[0].name}']
|
183
|
+
name, _, status = @kubectl.run(*args)
|
184
|
+
unless status.success?
|
185
|
+
raise FatalDeploymentError, 'Failed to determine cluster name'
|
186
|
+
end
|
187
|
+
name
|
188
|
+
end
|
189
|
+
|
190
|
+
def kubectl_cluster_server
|
191
|
+
args = ['config', 'view', '--minify', '--output=jsonpath={..cluster.server}']
|
192
|
+
server, _, status = @kubectl.run(*args)
|
193
|
+
unless status.success?
|
194
|
+
raise FatalDeploymentError, 'Failed to determine cluster server'
|
195
|
+
end
|
196
|
+
server
|
197
|
+
end
|
198
|
+
|
199
|
+
def self.kube_namespace(context:, kubeconfig: nil)
|
200
|
+
args = [
|
201
|
+
'kubectl', 'config', 'view', '--minify', '--output=jsonpath={..namespace}',
|
202
|
+
"--context=#{context}"
|
203
|
+
]
|
204
|
+
args.push("--kubeconfig=#{kubeconfig}") if kubeconfig.present?
|
205
|
+
namespace, = Shellrunner.check_call(*args)
|
206
|
+
namespace = 'default' if namespace.to_s.empty?
|
207
|
+
|
208
|
+
namespace
|
209
|
+
end
|
210
|
+
|
211
|
+
def current_user
|
212
|
+
Shellrunner.run_call('gcloud', 'config', 'list', 'account', '--format', 'value(core.account)')[0]
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
@@ -0,0 +1,114 @@
|
|
1
|
+
require 'optparse'
|
2
|
+
|
3
|
+
require 'kube_deploy_tools/object'
|
4
|
+
|
5
|
+
module KubeDeployTools
|
6
|
+
class Deploy::Optparser
|
7
|
+
class Options
|
8
|
+
attr_accessor :kubeconfig,
|
9
|
+
:context,
|
10
|
+
:from_files,
|
11
|
+
:project,
|
12
|
+
:flavor,
|
13
|
+
:artifact,
|
14
|
+
:build_number,
|
15
|
+
:dry_run,
|
16
|
+
:glob_files,
|
17
|
+
:pre_apply_hook,
|
18
|
+
:max_retries,
|
19
|
+
:retry_delay
|
20
|
+
|
21
|
+
def initialize
|
22
|
+
self.project = File.basename(`git config remote.origin.url`.chomp, '.git')
|
23
|
+
self.flavor = 'default'
|
24
|
+
self.dry_run = true
|
25
|
+
self.glob_files = []
|
26
|
+
end
|
27
|
+
|
28
|
+
def define_options(parser)
|
29
|
+
parser.on('-fPATH', '--from-files FILEPATH', 'Filename, directory, or artifact URL that contains the Kubernetes manifests to apply') do |p|
|
30
|
+
self.from_files = p
|
31
|
+
end
|
32
|
+
|
33
|
+
parser.on('--kubeconfig FILEPATH', 'Path to the kubconfig file to use for kubectl requests') do |p|
|
34
|
+
self.kubeconfig = p
|
35
|
+
end
|
36
|
+
|
37
|
+
parser.on('--context CONTEXT', 'The kubeconfig context to use') do |p|
|
38
|
+
self.context = p
|
39
|
+
end
|
40
|
+
|
41
|
+
parser.on('--project PROJECT', "The project to deploy. Default is '#{project}'.") do |p|
|
42
|
+
self.project = p
|
43
|
+
end
|
44
|
+
|
45
|
+
parser.on('--flavor FLAVOR', "The flavor to deploy. Default is '#{flavor}'") do |p|
|
46
|
+
self.flavor = p
|
47
|
+
end
|
48
|
+
|
49
|
+
parser.on('--artifact ARTIFACT', 'The artifact name to deploy') do |p|
|
50
|
+
self.artifact = p
|
51
|
+
end
|
52
|
+
|
53
|
+
parser.on('--build BUILD', 'The Jenkins build number to deploy') do |p|
|
54
|
+
self.build_number = p
|
55
|
+
end
|
56
|
+
|
57
|
+
parser.on('--dry-run DRY_RUN', TrueClass, "If true, will only dry-run apply Kubernetes manifests without sending them to the apiserver. Default is dry-run mode: #{dry_run}.") do |p|
|
58
|
+
self.dry_run = p
|
59
|
+
end
|
60
|
+
|
61
|
+
parser.on('--include INCLUDE', "Include glob pattern. Example: --include=**/* will include every file. Default is ''.") do |p|
|
62
|
+
self.glob_files.push(["include_files", p])
|
63
|
+
end
|
64
|
+
|
65
|
+
parser.on('--exclude EXCLUDE', "Exclude glob pattern. Example: --exclude=**/gazette/* will exclude every file in gazette folder. Default is ''.") do |p|
|
66
|
+
self.glob_files.push(["exclude_files", p])
|
67
|
+
end
|
68
|
+
|
69
|
+
parser.on('--include-dir INCLUDE', "Recursively include all files in a directory and its subdirectories. Example: --include-dir=gazette/ (equivalent of --include=**/gazette/**/*)") do |p|
|
70
|
+
self.glob_files.push(["include_dir", p])
|
71
|
+
end
|
72
|
+
|
73
|
+
parser.on('--exclude-dir EXCLUDE', "Recursively exclude all files in a directory and its subdirectories. Example: --exclude-dir=gazette/ (equivalent of --exclude=**/gazette/**/*)") do |p|
|
74
|
+
self.glob_files.push(["exclude_dir", p])
|
75
|
+
end
|
76
|
+
|
77
|
+
parser.on("--pre-apply-hook CMD", "Shell command to run with the output directory before applying files") do |p|
|
78
|
+
self.pre_apply_hook = p
|
79
|
+
end
|
80
|
+
|
81
|
+
parser.on('--retry NUM', 'Maximum number of times to retry') do |p|
|
82
|
+
self.max_retries = p
|
83
|
+
end
|
84
|
+
|
85
|
+
parser.on('--retry-delay NUM', 'Delay in seconds between retries') do |p|
|
86
|
+
self.retry_delay = p
|
87
|
+
end
|
88
|
+
|
89
|
+
parser.on('-')
|
90
|
+
end
|
91
|
+
|
92
|
+
def require_options
|
93
|
+
raise ArgumentError, 'Expect --context to be provided' if context.blank?
|
94
|
+
|
95
|
+
files_mode = from_files.present? && (artifact.blank? && build_number.blank?)
|
96
|
+
deploy_artifact_mode = from_files.blank? && (artifact.present? && flavor.present? && build_number.present?)
|
97
|
+
|
98
|
+
if !files_mode && !deploy_artifact_mode
|
99
|
+
raise ArgumentError, 'Expect either --from-files or all of [--artifact, --flavor, --build] to be provided'
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
def parse(args)
|
105
|
+
@options = Options.new
|
106
|
+
OptionParser.new do |parser|
|
107
|
+
@options.define_options(parser)
|
108
|
+
parser.parse(args)
|
109
|
+
@options.require_options
|
110
|
+
end
|
111
|
+
@options
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
@@ -0,0 +1,286 @@
|
|
1
|
+
require 'pathname'
|
2
|
+
require 'set'
|
3
|
+
require 'yaml'
|
4
|
+
|
5
|
+
require 'kube_deploy_tools/deploy_config_file/util'
|
6
|
+
require 'kube_deploy_tools/deploy_config_file/deep_merge'
|
7
|
+
require 'kube_deploy_tools/formatted_logger'
|
8
|
+
require 'kube_deploy_tools/image_registry'
|
9
|
+
require 'kube_deploy_tools/shellrunner'
|
10
|
+
require 'kube_deploy_tools/artifact_registry'
|
11
|
+
|
12
|
+
DEPLOY_YAML = 'deploy.yaml'
|
13
|
+
|
14
|
+
module KubeDeployTools
|
15
|
+
PROJECT = ENV['JOB_NAME'] || File.basename(`git config remote.origin.url`.chomp, '.git')
|
16
|
+
BUILD_NUMBER = ENV.fetch('BUILD_ID', 'dev')
|
17
|
+
|
18
|
+
# Read-only model for the deploy.yaml configuration file.
|
19
|
+
class DeployConfigFile
|
20
|
+
attr_accessor :artifacts, :default_flags, :flavors, :hooks, :image_registries, :valid_image_registries, :expiration, :artifact_registries, :artifact_registry
|
21
|
+
|
22
|
+
include DeployConfigFileUtil
|
23
|
+
|
24
|
+
# TODO(joshk): Refactor into initialize(fp) which takes a file-like object;
|
25
|
+
# after this, auto discovery should go into DeployConfigFile.locate
|
26
|
+
# classmethod. This would require erasing auto-upgrade capability, which
|
27
|
+
# should be possible if we major version bump.
|
28
|
+
def initialize(filename)
|
29
|
+
config = nil
|
30
|
+
if !filename.nil? && Pathname.new(filename).absolute?
|
31
|
+
config = YAML.load_file(filename)
|
32
|
+
else
|
33
|
+
original_dir = Dir.pwd
|
34
|
+
changed_dir = false
|
35
|
+
until Dir.pwd == '/'
|
36
|
+
# Try looking for filename specified by user.
|
37
|
+
# If no filename was specified by the user, then look for
|
38
|
+
# deploy.yml or deploy.yaml.
|
39
|
+
if !filename.nil? && File.exist?(filename)
|
40
|
+
config = YAML.load_file(filename)
|
41
|
+
break
|
42
|
+
elsif filename.nil? && File.exist?(DEPLOY_YAML)
|
43
|
+
filename = DEPLOY_YAML
|
44
|
+
config = YAML.load_file(filename)
|
45
|
+
break
|
46
|
+
end
|
47
|
+
|
48
|
+
# KDT should run in the directory containing the deploy config file.
|
49
|
+
changed_dir = true
|
50
|
+
Dir.chdir('..')
|
51
|
+
end
|
52
|
+
if config.nil?
|
53
|
+
Dir.chdir(original_dir)
|
54
|
+
if ! filename.nil?
|
55
|
+
raise "Could not locate file: config file '#{filename}' in any directory"
|
56
|
+
else
|
57
|
+
raise "Could not locate file: config file '#{DEPLOY_YAML}' in any directory"
|
58
|
+
end
|
59
|
+
end
|
60
|
+
if changed_dir
|
61
|
+
Logger.warn "Changed directory to #{Dir.pwd} (location of #{filename})"
|
62
|
+
end
|
63
|
+
end
|
64
|
+
@filename = filename
|
65
|
+
@original_config = config
|
66
|
+
|
67
|
+
version = config.fetch('version', 1)
|
68
|
+
check_and_warn(
|
69
|
+
config.has_key?('version'),
|
70
|
+
'Expected .version to be specified, but .version is missing. Falling back to version 1 config schema')
|
71
|
+
check_and_err([1, 2].include?(version), "Expected valid version, but received unsupported version '#{version}'")
|
72
|
+
|
73
|
+
case version
|
74
|
+
when 2
|
75
|
+
fetch_and_parse_version2_config!
|
76
|
+
else
|
77
|
+
raise "Unsupported version #{version}"
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def fetch_and_parse_version2_config!
|
82
|
+
# The literal contents of your deploy.yaml are now populated into |self|.
|
83
|
+
config = @original_config
|
84
|
+
@image_registries = parse_image_registries(config.fetch('image_registries', []))
|
85
|
+
@default_flags = config.fetch('default_flags', {})
|
86
|
+
@artifacts = config.fetch('artifacts', [])
|
87
|
+
@flavors = config.fetch('flavors', {})
|
88
|
+
@hooks = config.fetch('hooks', ['default'])
|
89
|
+
@expiration = config.fetch('expiration', [])
|
90
|
+
@artifact_registries = parse_artifact_registries(config.fetch('artifact_registries', []))
|
91
|
+
@artifact_registry = parse_artifact_registry(config.fetch('artifact_registry', ''), @artifact_registries)
|
92
|
+
|
93
|
+
validate_default_flags
|
94
|
+
validate_flavors
|
95
|
+
validate_hooks
|
96
|
+
validate_expiration
|
97
|
+
|
98
|
+
# Augment these literal contents by resolving all libraries.
|
99
|
+
# extend! typically gives the current file precedence when merge conflicts occur,
|
100
|
+
# but the expected precedence of library inclusion is the reverse (library 2 should
|
101
|
+
# overwrite what library 1 specifies), so reverse the libraries list first.
|
102
|
+
config.fetch('libraries', []).reverse.each do |libfn|
|
103
|
+
extend!(load_library(libfn))
|
104
|
+
end
|
105
|
+
|
106
|
+
# Now that we have a complete list of image registries, validation is now possible.
|
107
|
+
# Note that this also populates @valid_image_registries.
|
108
|
+
validate_artifacts!
|
109
|
+
end
|
110
|
+
|
111
|
+
def parse_image_registries(image_registries)
|
112
|
+
check_and_err(image_registries.is_a?(Array), '.image_registries is not an Array')
|
113
|
+
image_registries = image_registries.map { |i| ImageRegistry.new(i) }
|
114
|
+
|
115
|
+
# Validate that only one instance of each driver is registered
|
116
|
+
duplicates = select_duplicates(image_registries.map { |i| i.name })
|
117
|
+
check_and_err(
|
118
|
+
duplicates.count == 0,
|
119
|
+
"Expected .image_registries names to be unique, but found duplicates: #{duplicates}"
|
120
|
+
)
|
121
|
+
|
122
|
+
image_registries
|
123
|
+
.map { |i| [i.name, i] }
|
124
|
+
.to_h
|
125
|
+
end
|
126
|
+
|
127
|
+
def map_image_registry(image_registries)
|
128
|
+
valid_image_registries = {}
|
129
|
+
image_registries.each do |reg_name, reg_info|
|
130
|
+
valid_image_registries[reg_name] = reg_info.prefix
|
131
|
+
end
|
132
|
+
valid_image_registries
|
133
|
+
end
|
134
|
+
|
135
|
+
# .artifacts depends on .default_flags and .image_registries
|
136
|
+
def validate_artifacts!
|
137
|
+
check_and_err(artifacts.is_a?(Array), '.artifacts is not an Array')
|
138
|
+
|
139
|
+
duplicates = select_duplicates(artifacts.map { |i| i.fetch('name') })
|
140
|
+
check_and_err(
|
141
|
+
duplicates.count == 0,
|
142
|
+
"Expected .artifacts names to be unique, but found duplicates: #{duplicates}"
|
143
|
+
)
|
144
|
+
|
145
|
+
@valid_image_registries = map_image_registry(@image_registries)
|
146
|
+
|
147
|
+
artifacts.each_with_index { |artifact, index|
|
148
|
+
check_and_err(
|
149
|
+
artifact.key?('name'),
|
150
|
+
"Expected .artifacts[#{index}].name key to exist, but .name is missing"
|
151
|
+
)
|
152
|
+
name = artifact.fetch('name')
|
153
|
+
check_and_err(
|
154
|
+
artifact.key?('image_registry'),
|
155
|
+
"Expected .artifacts[#{index}].image_registry key to exist, but .image_registry is missing"
|
156
|
+
)
|
157
|
+
|
158
|
+
image_registry = artifact.fetch('image_registry')
|
159
|
+
check_and_err(
|
160
|
+
@valid_image_registries.key?(image_registry),
|
161
|
+
"#{image_registry} is not a valid Image Registry. Has to be one of #{@valid_image_registries.keys}"
|
162
|
+
)
|
163
|
+
|
164
|
+
check_and_err(
|
165
|
+
artifact.key?('flags'),
|
166
|
+
"Expected .artifacts.#{name}.flags key to exist, but .flags is missing"
|
167
|
+
)
|
168
|
+
}
|
169
|
+
end
|
170
|
+
|
171
|
+
def validate_default_flags
|
172
|
+
check_and_err(@default_flags.is_a?(Hash), '.default_flags is not a Hash')
|
173
|
+
end
|
174
|
+
|
175
|
+
def validate_flavors
|
176
|
+
check_and_err(@flavors.is_a?(Hash), '.flavors is not a Hash')
|
177
|
+
end
|
178
|
+
|
179
|
+
def validate_hooks
|
180
|
+
check_and_err(@hooks.is_a?(Array), '.hooks is not an Array')
|
181
|
+
end
|
182
|
+
|
183
|
+
def validate_expiration
|
184
|
+
check_and_err(@expiration.is_a?(Array), '.expiration is not an Array')
|
185
|
+
end
|
186
|
+
|
187
|
+
def parse_artifact_registries(artifact_registries)
|
188
|
+
check_and_err(artifact_registries.is_a?(Array), '.artifact_registries is not an Array')
|
189
|
+
artifact_registries = artifact_registries.map { |r| ArtifactRegistry.new(r) }
|
190
|
+
|
191
|
+
# Validate that each artifact registry is named uniquely
|
192
|
+
duplicates = select_duplicates(artifact_registries.map { |r| r.name })
|
193
|
+
check_and_err(
|
194
|
+
duplicates.count == 0,
|
195
|
+
"Expected .artifact_registries names to be unique, but found duplicates: #{duplicates}"
|
196
|
+
)
|
197
|
+
|
198
|
+
unsupported_drivers = artifact_registries.
|
199
|
+
select { |r| !ArtifactRegistry::Driver::MAPPINGS.key? r.driver_name }.
|
200
|
+
map { |r| r.driver_name }
|
201
|
+
check_and_err(
|
202
|
+
unsupported_drivers.count == 0,
|
203
|
+
"Expected .artifact_registries drivers to be valid, but found unsupported drivers: #{unsupported_drivers}. Must be a driver in: #{ArtifactRegistry::Driver::MAPPINGS.keys}",
|
204
|
+
)
|
205
|
+
|
206
|
+
artifact_registries
|
207
|
+
.select { |r| r.driver_name == "gcs" }
|
208
|
+
.select { |r| !r.config.has_key? "bucket" }
|
209
|
+
.each { |r| check_and_err(false, "Expected .artifact_registries['#{r.config.name}'].config.bucket to exist, but no GCS bucket is specified") }
|
210
|
+
|
211
|
+
|
212
|
+
artifact_registries
|
213
|
+
.map { |r| [r.name, r] }
|
214
|
+
.to_h
|
215
|
+
end
|
216
|
+
|
217
|
+
def parse_artifact_registry(artifact_registry, artifact_registries)
|
218
|
+
check_and_err(artifact_registry.is_a?(String), '.artifact_registry is not a String')
|
219
|
+
check_and_err(
|
220
|
+
artifact_registry.empty? || artifact_registries.key?(artifact_registry),
|
221
|
+
"#{artifact_registry} is not a valid Artifact Registry. Has to be one of #{artifact_registries.keys}"
|
222
|
+
)
|
223
|
+
|
224
|
+
artifact_registry
|
225
|
+
end
|
226
|
+
|
227
|
+
# upgrade! converts the config to a YAML string in the format
|
228
|
+
# of the latest supported version
|
229
|
+
# e.g. with the latest supported version as v2,
|
230
|
+
# to_yaml will always print a valid v2 YAML
|
231
|
+
def upgrade!
|
232
|
+
version = @original_config.fetch('version', 1)
|
233
|
+
case version
|
234
|
+
when 2
|
235
|
+
# TODO(joshk): Any required updates to v3 or remove this entire method
|
236
|
+
true
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
def select_duplicates(array)
|
241
|
+
array.select { |n| array.count(n) > 1 }.uniq
|
242
|
+
end
|
243
|
+
|
244
|
+
# Extend this DeployConfigFile with another instance.
|
245
|
+
def extend!(other)
|
246
|
+
# Any image_registries entry in |self| should take precedence
|
247
|
+
# over any identical key in |other|. The behavior of merge is that
|
248
|
+
# the 'other' hash wins.
|
249
|
+
@image_registries = other.image_registries.merge(@image_registries)
|
250
|
+
|
251
|
+
# Same behavior as above for #default_flags.
|
252
|
+
@default_flags = other.default_flags.merge(@default_flags)
|
253
|
+
|
254
|
+
# artifacts should be merged by 'name'. In other words, if |self| and |other|
|
255
|
+
# specify the same 'name' of a registry, self's config for that registry
|
256
|
+
# should win wholesale (no merging of flags.)
|
257
|
+
@artifacts = (@artifacts + other.artifacts).uniq { |h| h.fetch('name') }
|
258
|
+
|
259
|
+
# Same behavior as for flags and registries, but the flags within the flavor
|
260
|
+
# are in a Hash, so we need a deep merge.
|
261
|
+
@flavors = other.flavors.deep_merge(@flavors)
|
262
|
+
|
263
|
+
# A break from the preceding merging logic - Dependent hooks have to come
|
264
|
+
# first and a given named hook can only be run once. But seriously, you
|
265
|
+
# probably don't want to make a library that specifies hooks.
|
266
|
+
@hooks = (other.hooks + @hooks).uniq
|
267
|
+
|
268
|
+
@expiration = (@expiration + other.expiration).uniq { |h| h.fetch('repository') }
|
269
|
+
end
|
270
|
+
|
271
|
+
def to_h
|
272
|
+
{
|
273
|
+
'image_registries' => @image_registries.values.map(&:to_h),
|
274
|
+
'default_flags' => @default_flags,
|
275
|
+
'artifacts' => @artifacts,
|
276
|
+
'flavors' => @flavors,
|
277
|
+
'hooks' => @hooks,
|
278
|
+
'expiration' => @expiration,
|
279
|
+
}
|
280
|
+
end
|
281
|
+
|
282
|
+
def self.deep_merge(h, other)
|
283
|
+
|
284
|
+
end
|
285
|
+
end
|
286
|
+
end
|