minfra-cli 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (60) hide show
  1. checksums.yaml +7 -0
  2. data/.dockerignore +12 -0
  3. data/.gitignore +16 -0
  4. data/.rspec +2 -0
  5. data/CHANGELOG.md +2 -0
  6. data/Dockerfile +12 -0
  7. data/bin/build +20 -0
  8. data/bin/console +16 -0
  9. data/bin/container_exec +9 -0
  10. data/bin/run_tests +74 -0
  11. data/bin/setup.sh +22 -0
  12. data/exe/minfra +6 -0
  13. data/lib/deep_merge.rb +149 -0
  14. data/lib/hash.rb +28 -0
  15. data/lib/minfra/cli/ask.rb +43 -0
  16. data/lib/minfra/cli/command.rb +35 -0
  17. data/lib/minfra/cli/commands/dev.rb +54 -0
  18. data/lib/minfra/cli/commands/kube.rb +279 -0
  19. data/lib/minfra/cli/commands/project/branch.rb +17 -0
  20. data/lib/minfra/cli/commands/project/tag.rb +40 -0
  21. data/lib/minfra/cli/commands/project.rb +113 -0
  22. data/lib/minfra/cli/commands/setup.rb +49 -0
  23. data/lib/minfra/cli/commands/stack/app_template.rb +65 -0
  24. data/lib/minfra/cli/commands/stack/client_template.rb +36 -0
  25. data/lib/minfra/cli/commands/stack/kube_stack_template.rb +94 -0
  26. data/lib/minfra/cli/commands/stack.rb +120 -0
  27. data/lib/minfra/cli/commands/tag.rb +86 -0
  28. data/lib/minfra/cli/common.rb +41 -0
  29. data/lib/minfra/cli/config.rb +111 -0
  30. data/lib/minfra/cli/document.rb +19 -0
  31. data/lib/minfra/cli/hook.rb +65 -0
  32. data/lib/minfra/cli/logging.rb +26 -0
  33. data/lib/minfra/cli/main_command.rb +32 -0
  34. data/lib/minfra/cli/plugins.rb +34 -0
  35. data/lib/minfra/cli/runner.rb +59 -0
  36. data/lib/minfra/cli/templater.rb +63 -0
  37. data/lib/minfra/cli/version.rb +5 -0
  38. data/lib/minfra/cli.rb +80 -0
  39. data/lib/orchparty/ast.rb +53 -0
  40. data/lib/orchparty/cli.rb +69 -0
  41. data/lib/orchparty/context.rb +22 -0
  42. data/lib/orchparty/dsl_parser.rb +229 -0
  43. data/lib/orchparty/dsl_parser_kubernetes.rb +361 -0
  44. data/lib/orchparty/kubernetes_application.rb +305 -0
  45. data/lib/orchparty/plugin.rb +24 -0
  46. data/lib/orchparty/plugins/env.rb +41 -0
  47. data/lib/orchparty/transformations/all.rb +18 -0
  48. data/lib/orchparty/transformations/mixin.rb +73 -0
  49. data/lib/orchparty/transformations/remove_internal.rb +16 -0
  50. data/lib/orchparty/transformations/sort.rb +10 -0
  51. data/lib/orchparty/transformations/variable.rb +56 -0
  52. data/lib/orchparty/transformations.rb +24 -0
  53. data/lib/orchparty/version.rb +3 -0
  54. data/lib/orchparty.rb +59 -0
  55. data/minfra-cli.gemspec +40 -0
  56. data/project.json +7 -0
  57. data/templates/kind.yaml.erb +33 -0
  58. data/templates/kube_config.yaml.erb +7 -0
  59. data/templates/minfra_config.json.erb +26 -0
  60. metadata +196 -0
@@ -0,0 +1,279 @@
1
+ require 'yaml'
2
+ module Minfra
3
+ module Cli
4
+ class Kube
5
+ include Minfra::Cli::Common
6
+ include Logging
7
+
8
+ attr_reader :options, :env_config, :config
9
+
10
+ def initialize(options, config)
11
+ @options = options
12
+ @config=config
13
+ @env_config=config.orch_env_config
14
+ end
15
+
16
+ def dashboard(stack_name,env,deployment,cluster)
17
+ stack = init(stack_name,env,deployment,cluster)
18
+ exec("k9s --kubeconfig #{kube_config_path} --context #{stack.cluster_name} --namespace #{stack_name} --command pod")
19
+ end
20
+
21
+ def restart
22
+ run %{docker start #{kind_name}-control-plane}
23
+ run %{docker exec #{kind_name}-control-plane bash -c "sed -e '/nameserver 127.0.0.11/ s/^#*/#/' /etc/resolv.conf | cat - >> /etc/resolv.conf"}
24
+ run %{docker exec #{kind_name}-control-plane bash -c "echo nameserver 8.8.8.8 >> /etc/resolv.conf"}
25
+ run %{docker exec #{kind_name}-control-plane bash -c "echo nameserver 8.8.4.4 >> /etc/resolv.conf"}
26
+ end
27
+
28
+ def create
29
+ STDOUT.sync
30
+
31
+ network_mask = @config.project.kind.network.mask
32
+ gateway_ip = @config.project.kind.network.gateway
33
+ panel_ip = @config.project.kind.panel.ip
34
+
35
+ info "step: creating network #{kind_name} #{network_mask} gw #{gateway_ip}"
36
+ # run(%{docker network inspect kind | grep "Subnet"}, exit_on_error: false).success?
37
+ run(%{docker network rm #{kind_name}}, exit_on_error: false)
38
+
39
+ run(%{docker network create --gateway #{gateway_ip} --subnet=#{network_mask} #{kind_name}}, exit_on_error: true)
40
+
41
+ info "step: creating '#{kind_name}' kind cluster (can take some minutes)"
42
+ kind_kube_path=Runner.run('echo $KUBECONFIG').to_s.strip
43
+ info run(%{KIND_EXPERIMENTAL_DOCKER_NETWORK=#{kind_name} kind create cluster --name "#{kind_name}" --config #{@config.kind_config_path}})
44
+
45
+ info "step: configuring kind"
46
+ run %{docker exec #{kind_name}-control-plane bash -c "sed -e '/nameserver 127.0.0.11/ s/^#*/#/' /etc/resolv.conf | cat - >> /etc/resolv.conf"}
47
+ run %{docker exec #{kind_name}-control-plane bash -c "echo nameserver 8.8.8.8 >> /etc/resolv.conf"}
48
+ run %{docker exec #{kind_name}-control-plane bash -c "echo nameserver 8.8.4.4 >> /etc/resolv.conf"}
49
+
50
+ configs = [YAML.load(File.read(kind_kube_path))]
51
+
52
+ existing_config = YAML.load(File.read(kube_config_path))
53
+
54
+ existing_config["clusters"] = existing_config["clusters"].reject { |c| configs.map { |k| k["clusters"] }.flatten.map { |n| n["name"] }.include?(c["name"]) }.concat(configs.map { |k| k["clusters"] }.flatten)
55
+ existing_config["users"] = existing_config["users"].reject { |c| configs.map { |k| k["users"] }.flatten.map { |n| n["name"] }.include?(c["name"]) }.concat(configs.map{ |k| k["users"] }.flatten).uniq { |k| k["name"] }
56
+ existing_config["contexts"] = existing_config["contexts"].reject { |c| configs.map { |k| k["contexts"] }.flatten.map{ |n| n["name"] }.include?(c["name"]) }.concat(configs.map { |k| k["contexts"] }.flatten)
57
+ File.write(@config.kube_config_path, YAML.dump(existing_config))
58
+
59
+ info "step: starting kind"
60
+ run_kubectl %{ config use-context kind-#{kind_name} }
61
+
62
+ run_kubectl %{create clusterrolebinding default-admin --serviceaccount=kube-system:default --clusterrole=cluster-admin}
63
+
64
+ # info "step: attaching newly created kind cluster to its own docker network"
65
+ # info run(%{docker network connect #{kind_name} #{kind_name}-control-plane --ip #{panel_ip}})
66
+ end
67
+
68
+ def push(image)
69
+ run %{kind load docker-image --name #{kind_name} #{image}}
70
+ end
71
+
72
+ def destroy_dev_cluster
73
+ run %(kind delete cluster --name #{kind_name})
74
+ run(%(docker rm -f #{kind_name}-control-plane), exit_on_error: false)
75
+ run(%(docker network rm #{kind_name}), exit_on_error: false)
76
+ end
77
+
78
+ def deploy(stack_name, reason_message)
79
+ #TBD: options is global, avoid it!
80
+
81
+ test=options[:test]
82
+ stack = init(stack_name,
83
+ options[:environment],
84
+ options[:deployment],
85
+ options[:cluster])
86
+ cluster = stack.cluster_name
87
+
88
+ method = options["install"] ? "install" : "upgrade"
89
+ File.open(stack.compose_path(blank: true),"w") do |f|
90
+ Orchparty.print(cluster_name: cluster,
91
+ application_name: stack.name,
92
+ force_variable_definition: false,
93
+ file_name: stack.stack_rb_path.to_s,
94
+ method: method,
95
+ options: options,
96
+ out_io: f)
97
+ end
98
+ #run_cmd(generate_cmd, :bash)
99
+ bash_cmd = ["cd #{stack.release_path}"]
100
+ bash_cmd << "cp #{stack.compose_path(blank: true)} #{stack.compose_path}"
101
+ run_cmd(bash_cmd, :bash)
102
+
103
+
104
+ run_cmd(["cd #{stack.release_path}",
105
+ "git --no-pager diff #{stack.compose_path}",
106
+ ], :bash, silence: true)
107
+
108
+ errors = stack.check_plan
109
+ unless errors.empty?
110
+ if config['force_mem']
111
+ exit_error(errors.join("\n"))
112
+ else
113
+ warn(errors.join("\n"))
114
+ end
115
+ end
116
+
117
+ unless test
118
+ unless @config.dev?
119
+ unless Ask.boolean("Are the changes ok?")
120
+ exit_error("Deployment aborted!")
121
+ end
122
+ end
123
+
124
+ # deploy_cmd = bash_cmd
125
+ # deploy_cmd << "#{env_cmd} orchparty #{method} -c #{cluster} -f #{stack.stack_rb_path} -a #{stack.name}"
126
+
127
+ reason_message = tag_changed_to(release_path: stack.release_path) if reason_message.blank?
128
+
129
+ message = "deploying stack #{stack.name}: #{reason_message}."
130
+ Minfra::Cli::Document.document(@config,"started #{message}")
131
+ Orchparty.send(method, cluster_name: cluster, application_name: stack.name, force_variable_definition: false, file_name: stack.stack_rb_path.to_s, options: options)
132
+ Minfra::Cli::Document.document(@config,"finished #{message}")
133
+ end
134
+ end
135
+
136
+ def rollback(stack_name, env, deployment, cluster)
137
+ stack = init(stack_name,
138
+ env,
139
+ deployment,
140
+ cluster)
141
+
142
+ cluster = stack.cluster_name
143
+
144
+ extra_args = args.dup
145
+ extra_args.delete("rollback")
146
+
147
+ extra_args = extra_args.join(' ')
148
+
149
+ cmd = "helm --kube-context #{cluster} rollback #{options[:stack]} #{extra_args}"
150
+ #puts cmd
151
+ run_cmd(cmd, :exec)
152
+ end
153
+
154
+ def list
155
+ puts run_helm(%{list --all-namespaces})
156
+ end
157
+
158
+ def destroy(stack_name)
159
+ stack = init(stack_name,
160
+ options[:environment],
161
+ options[:deployment],
162
+ options[:cluster])
163
+ # Our convention is that our stack name is the namespace in helm.
164
+ # Sometimes the helm release name can be the same as the stack name (usually in the normal stacks).
165
+ # However, stacks that combine multiple helm charts have the release name different from the stack name, that's why we have to list them all in a sub-command.
166
+ run_helm(%{uninstall --namespace #{stack_name} --kube-context #{stack.cluster_name} $(helm list --namespace #{stack_name} --kube-context #{stack.cluster_name} --short)})
167
+ end
168
+
169
+ def kubectl_command(args)
170
+ unless options['stack']
171
+ exit_error("You must specify a stack name (--stack).")
172
+ end
173
+
174
+ subcommand = args.shift
175
+
176
+ if ['exec', 'logs'].include?(subcommand)
177
+ resource = nil
178
+ implicit_resource = 'pod'
179
+ else
180
+ resource = args.shift
181
+ end
182
+
183
+ stack = init(options[:stack],
184
+ options[:environment],
185
+ options[:deployment],
186
+ options[:cluster])
187
+
188
+ cluster = stack.cluster_name
189
+
190
+ if [resource, implicit_resource].include?('pod') && ['delete', 'describe', 'exec', 'logs'].include?(subcommand)
191
+ cmd_get_pods = "kubectl --kubeconfig #{kube_config_path} --context #{cluster} --namespace #{options[:stack]} get pod -o jsonpath='{range .items[*]}{.metadata.name}{\"\\n\"}'"
192
+
193
+ pods_list = run_cmd(cmd_get_pods).split("\n")
194
+
195
+ fuzzy_pod_name = args.shift
196
+
197
+ matching_pods = pods_list.select { |p| p.include?(fuzzy_pod_name) }
198
+
199
+ if matching_pods.empty?
200
+ exit_error("Could not find any pods that have '#{fuzzy_pod_name}' in their name.")
201
+ end
202
+
203
+ position = 0
204
+ if options[:position]
205
+ p = options[:position].to_i
206
+ if p <= matching_pods.size
207
+ position = p - 1
208
+ else
209
+ exit_error("You specified '--position #{options[:position]} but only #{matching_pods.size} pods matched the name.")
210
+ end
211
+ end
212
+
213
+ pod_name = matching_pods[position]
214
+ end
215
+
216
+ extra_args = args.dup
217
+
218
+ if subcommand == 'exec'
219
+ subcommand = 'exec -ti'
220
+ extra_args << '--' << 'bash'
221
+ end
222
+
223
+ extra_args = extra_args.join(' ')
224
+
225
+ cmd = "kubectl --kubeconfig #{kube_config_path} --context #{cluster} --namespace #{options[:stack]} #{subcommand} #{resource} #{pod_name} #{extra_args}"
226
+ # puts cmd
227
+ run_cmd(cmd, :exec)
228
+ end
229
+
230
+ private
231
+
232
+ def tag_changed_to(release_path:)
233
+ return '' if @config.dev? # We don't use messages in dev
234
+
235
+ diff = run_cmd("cd #{release_path} && git --no-pager diff --unified=0 tags.json").split %r{(\d{4}_\d{2}_\d{2}T\d{2}_\d{2}_\d{2}Z)}
236
+
237
+ raise ArgumentError.new "#{release_path}/tags.json has not changed - supply message" if diff.empty?
238
+
239
+ diff[3]
240
+ end
241
+
242
+ def init(stack_name, env, deployment, explicit_cluster)
243
+ template = Minfra::Cli::StackM::KubeStackTemplate.new(stack_name,
244
+ config,
245
+ deployment: deployment,
246
+ cluster: explicit_cluster)
247
+
248
+ # exit_error(template.error_message) unless template.valid?
249
+ template
250
+ end
251
+
252
+ def kube_config_path
253
+ @config.kube_config_path
254
+ end
255
+
256
+ def run_kubectl(cmd)
257
+ # run(%{kubectl --kubeconfig #{kube_config_path} #{cmd}})
258
+ run(%{kubectl #{cmd}})
259
+ end
260
+
261
+ def run_helm(cmd)
262
+ # run(%{helm --kubeconfig #{kube_config_path} --home #{helm_path} #{cmd}})
263
+ run(%{helm #{cmd}})
264
+ end
265
+
266
+ def helm_path
267
+ @config.me_path.join('helm')
268
+ end
269
+
270
+ def kind_name
271
+ @config.name
272
+ end
273
+
274
+ def run(cmd, **args)
275
+ Runner.run(cmd, **args)
276
+ end
277
+ end
278
+ end
279
+ end
@@ -0,0 +1,17 @@
1
+ module Minfra
2
+ module Cli
3
+ class Project < Command
4
+ class Branch < Command
5
+
6
+ desc "create 'story desc'", 'create branch'
7
+ def create(story_desc)
8
+ story_desc = story_desc.gsub(/[^0-9a-z]/i, '_')
9
+ email = `git config user.email`
10
+ fullname = email.split('@').first
11
+ name = fullname[0] + fullname.split('.').last
12
+ Runner.run("git checkout -b #{name}_#{story_desc}_$(date +%Y%m%d)")
13
+ end
14
+ end
15
+ end
16
+ end
17
+ end
@@ -0,0 +1,40 @@
1
+ module Minfra
2
+ module Cli
3
+ class Project < Command
4
+ class Tag < Command
5
+
6
+ desc "update", 'update stack tag file'
7
+ option "environment", aliases: ['-e'], required: true
8
+ def update(domain, new_tag)
9
+ tags = JSON.parse(File.read(tags_path))
10
+
11
+ raise ArgumentError.new "#{path} doesn't contain #{domain}" unless tags[options[:environment]].has_key?(domain)
12
+
13
+ tags[options[:environment]][domain] = new_tag
14
+ pretty_tags = JSON.pretty_unparse(tags)
15
+ File.write(tags_path, pretty_tags + "\n")
16
+ puts "#{tags_path} - UPDATED"
17
+ puts pretty_tags
18
+ end
19
+
20
+ private
21
+
22
+ def tags_path
23
+ apps_path.join('stacks', stack_name, 'tags.json')
24
+ end
25
+
26
+ def apps_path
27
+ minfra_config.base_path
28
+ end
29
+
30
+ def stack_name
31
+ current_directory.split('/').last.gsub('_', '-')
32
+ end
33
+
34
+ def current_directory
35
+ Dir.getwd
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,113 @@
1
+ require_relative 'project/branch'
2
+ require_relative 'project/tag'
3
+
4
+ module Minfra
5
+ module Cli
6
+ class Project < Command
7
+ class ProjectInfo
8
+ def self.load(app_dir)
9
+ new(app_dir)
10
+ end
11
+ attr_reader :app_dir
12
+
13
+ def initialize(app_dir)
14
+ @app_dir=app_dir
15
+ @project_file_path=app_dir.join('project.json')
16
+ @info=Hashie::Mash.new(JSON.parse(File.read(@project_file_path)))
17
+ end
18
+ def repo_name
19
+ "#{docker.repo}/#{docker.name}"
20
+ end
21
+ def name
22
+ @info['project']
23
+ end
24
+ def method_missing(method)
25
+ @info.send(method)
26
+ end
27
+ def inspect
28
+ @info.inspect
29
+ end
30
+ end
31
+
32
+ desc 'branch', 'manage branches'
33
+ subcommand 'branch', Branch
34
+
35
+ desc 'tag', 'manage tags'
36
+ subcommand 'tag', Tag
37
+
38
+ desc "test", 'run tests'
39
+ def test
40
+ ARGV.delete('project') # ARGV is passed along to `rspec` call
41
+ ARGV.delete('test')
42
+
43
+ if File.exists?('./bin/run_tests')
44
+ # config = Config.load('staging')
45
+ project = ProjectInfo.load(Pathname.pwd)
46
+ # Minfra::Cli::Document.document(config, "Using project specific ./bin/run_tests in #{project.name}")
47
+ debug "Using project specific ./bin/run_tests in #{project.name}"
48
+ system('./bin/run_tests', out: $stdout, err: :out)
49
+ else
50
+ require_relative '../../generic/bin/run_tests'
51
+ end
52
+ end
53
+
54
+ desc "build","build a local build"
55
+ option "noload", aliases: ['-n']
56
+ option "target", aliases: ['-t']
57
+ def build
58
+ p=ProjectInfo.load(Pathname.pwd)
59
+ run_pre_repo
60
+ if options[:target]
61
+ target = options[:target]
62
+ else
63
+ target = p.docker.dev_target
64
+ end
65
+
66
+ cmd = %{docker build #{"--target #{target}" if target} -t #{p.repo_name}:latest #{p.app_dir}}
67
+ # Runner.run(cmd) # this gives us no output ... don't like that
68
+ puts "running: #{cmd}"
69
+ `#{cmd}` || exit(1)
70
+ unless options[:noload]
71
+ debug("loading int KIND")
72
+ Runner.run(%{kind load docker-image #{p.repo_name}:latest --name #{@minfra_config.name}})
73
+ end
74
+ end
75
+
76
+ desc "exec","execute a command (bash is default in the container)"
77
+ def exec(cmd='/bin/bash')
78
+ p=ProjectInfo.load(Pathname.pwd)
79
+ run_pre_repo
80
+ Kernel.exec(%{docker run -ti --rm -v #{p.app_dir}:/code #{p.repo_name}:latest #{cmd}})
81
+ end
82
+
83
+ desc "push", "push directly to the repo"
84
+ option 'tag', aliases: ['-t']
85
+ option 'registry', aliases: ['-r']
86
+ def push
87
+ tag = options[:tag] || `date +%Y%m%d%H%M`
88
+ p=ProjectInfo.load(Pathname.pwd)
89
+
90
+ repo_name = if options[:registry]
91
+ "#{options[:registry]}/#{p.repo_name}"
92
+ else
93
+ p.repo_name
94
+ end
95
+
96
+ Runner.run(%{docker build -t #{p.repo_name}:latest #{p.app_dir}})
97
+ # Runner.run(%{docker push #{p.repo_name}})
98
+ Runner.run(%{docker tag #{p.repo_name}:latest #{repo_name}:#{tag}})
99
+ Runner.run(%{docker push #{repo_name}:#{tag}})
100
+ end
101
+
102
+
103
+
104
+ private
105
+ def run_pre_repo
106
+ Runner.run(%{#{@minfra_config.base_path.join('hooks','pre_repo.sh')}})
107
+ end
108
+ end
109
+ end
110
+ end
111
+
112
+
113
+ Minfra::Cli.register("project", "dealing wit projects", Minfra::Cli::Project)
@@ -0,0 +1,49 @@
1
+ require 'fileutils'
2
+ module Minfra
3
+ module Cli
4
+ class Setup < Thor # not from command! # Dev
5
+ include Logging
6
+ include Hook
7
+
8
+ desc "setup", "creates a default config file on the host"
9
+ def dev
10
+ setup_config
11
+ end
12
+
13
+ private
14
+
15
+ def setup_config
16
+ config= Minfra::Cli.config
17
+ ensure_path_and_template(config.config_path, config.base_path.join('config','me_config.json.erb'))
18
+ config.init!
19
+ config.load('dev')
20
+ templates_path=Minfra::Cli.root_path.join("templates")
21
+ ensure_path_and_template(config.kube_config_path, templates_path.join('kube_config.yaml.erb'))
22
+ ensure_path_and_template(config.kind_config_path, config.base_path.join('config','kind.yaml.erb'),
23
+ {ip: config.project.kind.panel.ip,
24
+ name: config.project.name,
25
+ host_path: config.base_path
26
+ })
27
+ end
28
+
29
+ def ensure_path_and_template(dest_path, template_path, params={})
30
+ unless dest_path.exist?
31
+ if Ask.boolean("missing configuration at: #{dest_path}, should I create it?")
32
+ unless dest_path.dirname.exist?
33
+ info "Generated directory '#{dest_path.dirname}'"
34
+ FileUtils.mkdir_p(dest_path.dirname)
35
+ end
36
+ template=Ask.interactive_template(template_path,dest_path,params)
37
+ else
38
+ error "Leaving your filesystem untouched! But I have to stop!"
39
+ exit 1
40
+ end
41
+ else
42
+ info "SETUP CONFIG: checked #{dest_path}"
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
48
+
49
+ Minfra::Cli.register("setup", "Manage your dev setup.", Minfra::Cli::Setup)
@@ -0,0 +1,65 @@
1
+ require 'pathname'
2
+
3
+ module Minfra
4
+ module Cli
5
+ module StackM
6
+ class AppTemplate
7
+ include ::Minfra::Cli::Logging
8
+
9
+ attr_reader :name, :env, :deployment, :app_path
10
+ def initialize(name, config)
11
+ @name = name
12
+ @path = config.stacks_path.join(name)
13
+ @app_path = @path.join('fxnet-app.json')
14
+ @errors = []
15
+ @config = config
16
+ @env = config.orch_env
17
+ @content = {}
18
+ end
19
+
20
+ def cluster_name
21
+ return @cluster_name if defined?(@cluster_name)
22
+ @cluster_name = @cluster
23
+ @cluster_name ||= "kind-#{@config.name}" if @config.dev?
24
+ if cluster_path.exist? && (@cluster_name.nil? || @cluster_name.empty?)
25
+ @cluster_name = YAML.load(File.read(cluster_path))[env.to_s]
26
+ end
27
+ unless @cluster_name
28
+ error "Cluster name unknown (not given explicitly and '#{cluster_path}' missing)"
29
+ exit 1
30
+ end
31
+ @cluster_name
32
+ end
33
+
34
+ def valid?
35
+ unless @path.exist?
36
+ @errors << "stack path #{@path} doesn't exist"
37
+ end
38
+
39
+ unless @app_path.exist?
40
+ @errors << "stack.rb file #{@app_path} doesn't exist"
41
+ end
42
+ @errors.empty?
43
+ end
44
+
45
+ def read
46
+ t=Minfra::Cli::Templater.new(File.read(@app_path))
47
+ @content = Hashie::Mash.new(JSON.parse(t.render({})))
48
+ end
49
+
50
+ def app
51
+ @content.app
52
+ end
53
+
54
+ def client
55
+ @content.client
56
+ end
57
+
58
+ def to_s
59
+ JSON.generate(@content, {indent: " ", object_nl: "\n"})
60
+ end
61
+
62
+ end
63
+ end
64
+ end
65
+ end
@@ -0,0 +1,36 @@
1
+ require 'pathname'
2
+
3
+ module Minfra
4
+ module Cli
5
+ module StackM
6
+ class ClientTemplate
7
+ include ::Minfra::Cli::Logging
8
+
9
+ attr_reader :content, :path
10
+ def initialize(stack_name, client_name, config)
11
+ @stack_name = stack_name
12
+ @client_name = client_name
13
+ @config = OpenStruct.new
14
+ @path = config.stacks_path.join(stack_name, "fxnet-client-#{client_name}-#{config.orch_env}.json")
15
+ read
16
+ end
17
+
18
+ def exist?
19
+ @path.exist?
20
+ end
21
+
22
+ def read
23
+ if exist?
24
+ t=Minfra::Cli::Templater.new(File.read(@path))
25
+ @content = Hashie::Mash.new(JSON.parse(t.render({})))
26
+ end
27
+ end
28
+
29
+ def to_s
30
+ JSON.generate(@content, {indent: " ", object_nl: "\n"})
31
+ end
32
+
33
+ end
34
+ end
35
+ end
36
+ end
@@ -0,0 +1,94 @@
1
+ require 'pathname'
2
+ module Minfra
3
+ module Cli
4
+ module StackM
5
+ class KubeStackTemplate
6
+ include ::Minfra::Cli::Logging
7
+
8
+ attr_reader :name, :env, :deployment
9
+ def initialize(name, config, deployment: '', cluster:)
10
+ @name = name
11
+ @path = config.stacks_path.join(name)
12
+ @errors = []
13
+ @config = config
14
+ @env = config.orch_env
15
+ @deployment = deployment.freeze
16
+ @cluster = cluster.freeze
17
+ puts "Stack selection: #{@name}, #{@path}, #{@cluster}"
18
+ end
19
+
20
+ def cluster_name
21
+ return @cluster_name if defined?(@cluster_name)
22
+ @cluster_name = @cluster
23
+ @cluster_name ||= "kind-#{@config.name}" if @config.dev?
24
+ if cluster_path.exist? && (@cluster_name.nil? || @cluster_name.empty?)
25
+ @cluster_name = YAML.load(File.read(cluster_path))[env.to_s]
26
+ end
27
+ @cluster_name ||= env
28
+ @cluster_name
29
+ end
30
+
31
+ def mixin_env
32
+ "#{@env}#{dashed(@deployment)}"
33
+ end
34
+
35
+ def valid?
36
+ unless @path.exist?
37
+ @errors << "stack path #{@path} doesn't exist"
38
+ end
39
+
40
+ unless stack_rb_path.exist?
41
+ @errors << "stack.rb file #{stack_rb_path} doesn't exist"
42
+ end
43
+ @errors.empty?
44
+ end
45
+
46
+ def stack_rb_path
47
+ release_path.join('stack.rb')
48
+ end
49
+
50
+ def cluster_path
51
+ release_path.join("cluster.yaml")
52
+ end
53
+
54
+ def compose_path(blank: false)
55
+ if blank
56
+ release_path.join("compose.yaml")
57
+ elsif @cluster
58
+ release_path.join("compose#{dashed(@cluster)}.yaml")
59
+ else
60
+ release_path.join("compose#{dashed(@env)}#{dashed(@deployment)}.yaml")
61
+ end
62
+ end
63
+
64
+ def error_message
65
+ @errors.join(";\n")
66
+ end
67
+
68
+ # we use a special file to flag the this stack is releasable to an environment
69
+ def releasable?
70
+ switch_path.exist?
71
+ end
72
+
73
+ def switch_path
74
+ release_path.join("#{@env}_#{rancher_stack_name}.sh")
75
+ end
76
+
77
+ def release_path
78
+ @path
79
+ end
80
+
81
+
82
+ def check_plan
83
+ errors = []
84
+ errors
85
+ end
86
+
87
+ private
88
+ def dashed(sth,set=nil)
89
+ sth.nil? || sth.empty? ? '' : "-#{set||sth}"
90
+ end
91
+ end
92
+ end
93
+ end
94
+ end