kube-platform 3.3.1.gk.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +7 -0
  2. data/Gemfile +31 -0
  3. data/README.md +192 -0
  4. data/bin/kube-platform +37 -0
  5. data/lib/kube-platform/application.rb +203 -0
  6. data/lib/kube-platform/cli.rb +114 -0
  7. data/lib/kube-platform/client.rb +217 -0
  8. data/lib/kube-platform/cluster.rb +224 -0
  9. data/lib/kube-platform/cluster_definition.rb +115 -0
  10. data/lib/kube-platform/configuration.rb +145 -0
  11. data/lib/kube-platform/exceptions.rb +9 -0
  12. data/lib/kube-platform/handlers/dockerhub_secret_copy.rb +52 -0
  13. data/lib/kube-platform/handlers/ebs_from_snapshot.rb +108 -0
  14. data/lib/kube-platform/handlers/handler.rb +36 -0
  15. data/lib/kube-platform/handlers/recreate_resource.rb +11 -0
  16. data/lib/kube-platform/handlers/secret_copy.rb +43 -0
  17. data/lib/kube-platform/handlers/wait_for_job_completion.rb +69 -0
  18. data/lib/kube-platform/handlers/wait_for_termination.rb +47 -0
  19. data/lib/kube-platform/health_check.rb +19 -0
  20. data/lib/kube-platform/health_checks/pods_ready.rb +188 -0
  21. data/lib/kube-platform/health_checks/r53_records.rb +82 -0
  22. data/lib/kube-platform/helpers/retry.rb +20 -0
  23. data/lib/kube-platform/images/descriptor.rb +49 -0
  24. data/lib/kube-platform/images/docker_hub_image.rb +49 -0
  25. data/lib/kube-platform/images/dockerhub_image_factory.rb +64 -0
  26. data/lib/kube-platform/images/kubernetes_docker_hub_secret_provider.rb +44 -0
  27. data/lib/kube-platform/images/repository.rb +77 -0
  28. data/lib/kube-platform/images/tag_associator.rb +80 -0
  29. data/lib/kube-platform/images/tagged_dockerhub_image.rb +36 -0
  30. data/lib/kube-platform/logger.rb +32 -0
  31. data/lib/kube-platform/manifest.rb +61 -0
  32. data/lib/kube-platform/pre_checks/r53_records.rb +66 -0
  33. data/lib/kube-platform/pre_checks/valid_platform_dependencies.rb +52 -0
  34. data/lib/kube-platform/pre_checks.rb +19 -0
  35. data/lib/kube-platform/resource.rb +152 -0
  36. data/lib/kube-platform/resource_repository.rb +73 -0
  37. data/lib/kube-platform/thor/descriptor_to_option_adapter.rb +33 -0
  38. data/lib/kube-platform/update_checker.rb +39 -0
  39. data/lib/kube-platform/version.rb +5 -0
  40. data/lib/kube-platform.rb +40 -0
  41. metadata +179 -0
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: f0fa30c6eab08a09c802b4448ddebbc4280426b5bc55f4ac6aaad4af29f90d2f
4
+ data.tar.gz: 974dcbd7dbf25549f1f05ddebb6d8f00b81a7fc89bc4721f5bb28ebcdd57fefe
5
+ SHA512:
6
+ metadata.gz: fa7223e94c5afe62c5fd00ee0212f3bbb6da855514827a0fa5a7b129208dcc95eb1af2bdcc4fb99fa9686878a65953decc8ef11b75e07ab0c59e3a26ebbf59a0
7
+ data.tar.gz: 76af5469a7b1098779d503ea0ae0c481e88a386d158e05366c42fc3f40361006b0e072d1cfeaadb93bde96fee9ef8fc2e2248475c0d311b254a78df286e4d5b0
data/Gemfile ADDED
@@ -0,0 +1,31 @@
1
+ # frozen_string_literal: true
2
+
3
+ PRIVATE_GEM_SERVER = "https://gem.fury.io/invoca"
4
+ PUBLIC_GEM_SERVER = "https://rubygems.org"
5
+
6
+ source PUBLIC_GEM_SERVER
7
+
8
+ source PUBLIC_GEM_SERVER do
9
+ gem "openid_connect", "~> 2.2"
10
+ gem "rack", ">= 2.2.6.1"
11
+ gemspec
12
+ end
13
+
14
+ source PRIVATE_GEM_SERVER do
15
+ gem "invocaops_docker-render", "~> 1.0", ">= 1.0.1"
16
+ end
17
+
18
+ group :development do
19
+ source PUBLIC_GEM_SERVER do
20
+ gem "pry"
21
+ gem "pry-byebug"
22
+ gem "rake"
23
+ gem "rspec", "~> 3.11"
24
+ gem "rspec_junit_formatter"
25
+ gem "rubocop"
26
+ gem "simplecov"
27
+ gem "vcr"
28
+ gem "webmock"
29
+ gem "invoca-utils"
30
+ end
31
+ end
data/README.md ADDED
@@ -0,0 +1,192 @@
1
+ # kube-platform
2
+ A tool for managing Invoca developer platforms.
3
+
4
+ `kube-platform` can be run from any location that has a properly configured `kubectl` (configuring `kubectl` is outside the scope of this README). The currently selected context inside `$HOME/.kube/config` will be used by default, but can be overriden with the `--context` flag.
5
+
6
+ See the #dev-qa-infrastructure slack channel for further discussion.
7
+
8
+ ## Installation
9
+
10
+ 1. Follow the instructions for configuring `kubectl` on your local workstation: [kubectl configuration instructions](https://ringrevenue.atlassian.net/wiki/spaces/DEV/pages/86717012/Kubectl+Configuration).
11
+ 2. Install the Invoca tools Homebrew tap.
12
+ ```bash
13
+ $ brew tap invoca/tools git@github.com:Invoca/homebrew-tools.git
14
+ ```
15
+ 3. Install `kube-platform`. The initial install builds a dedicated Ruby, which adds 3-5 minutes to the install time.
16
+ ```
17
+ $ brew install kube-platform
18
+ ```
19
+ 4. [Configure Development AWS IAM account Credentials](https://ringrevenue.atlassian.net/wiki/spaces/DEV/pages/54001806/AWS+Dev+Environment+IAM+User+Guide#AWSDevEnvironmentIAMUserGuide-AWSCredentialsFile) on your local workstation
20
+
21
+ ### Upgrading
22
+
23
+ At times, changes will require that `kube-platform` be upgraded. When necessary you'll receive a message like this:
24
+
25
+ ```bash
26
+ This version of kube-platform is out-of-date: 1.1.0 is required, but running 1.0.0.
27
+ Please update kube-platform: brew update && brew upgrade kube-platform
28
+ ```
29
+
30
+ To upgrade, run
31
+
32
+ ```bash
33
+ brew update && brew upgrade kube-platform
34
+ ```
35
+
36
+ ## Platform Creation
37
+ To create a platform, run:
38
+
39
+ ```
40
+ $ kube-platform create <platform-name> [image tags] [options]
41
+ ```
42
+
43
+ `<platform-name>` corresponds to the Kubernetes namespace that the platform will be started in. A full list of available options can be viewed by running `kube-platform help create`. The [developer mode](#developer-mode), and [continuous testing](#continuous-testing) options are explained below.
44
+
45
+ Image tags for various containers can be provided on the command line. If an image tag is not provided for a container, `latest` is assumed, with one exception: if a tag is provided for `invocaops/web_app`, it will be the default tag for all containers started from `invocaops/web_app`.
46
+
47
+ To list the available tags, run `kube-platform help create`.
48
+
49
+ Platform creation failure? See the troubleshooting runbook in [confluence](https://ringrevenue.atlassian.net/wiki/spaces/DEV/pages/861831619/Cluster+Creation+Failure+-+Troubleshooting+Runbook)
50
+
51
+ ## Platform Update
52
+ To update the image tags of a running platform, run:
53
+
54
+ ```
55
+ $ kube-platform update <platform-name> [image tags]
56
+ ```
57
+
58
+ Any database migrations contained in the new `web` image will be applied before the containers are restarted.
59
+
60
+ ## Platform Destruction
61
+ To tear down your platform once you're done with it, run:
62
+
63
+ ```
64
+ $ kube-platform destroy <platform-name>
65
+ ```
66
+
67
+ ## Developer Mode
68
+
69
+ Applications within containers run as PID 1. When PID 1 exits, it signals to the container manager that the container has stopped, which results in it being torn down and restarted. When the container is restarted, any local changes made inside the container will be lost.
70
+
71
+ If there's a need to edit and restart the code inside a container, the application must not be running as PID 1. The `--dev-mode` flag runs the application inside a simple bash loop, which allows the process to be killed and restarted while maintaining the container's state.
72
+
73
+ Dev mode can be enabled during platform creation with the `--dev-mode` option.
74
+
75
+ ```
76
+ $ kube-platform create <namespace> --dev-mode [image tags] [options]
77
+ ```
78
+
79
+ After enabling dev mode, perform the following steps to restart a process:
80
+
81
+ Login to the container (eg. frontend)
82
+
83
+ ```
84
+ $ kubectl --namespace=<namespace> exec -it frontend-rc-<id> -c unicorn bash
85
+ ```
86
+
87
+ Verify the process is not running as PID 1. See the `unicorn master` PID in this example:
88
+
89
+ ```
90
+ root@frontend-rc-jx467:/srv/app# ps auxw
91
+ USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
92
+ root 1 0.0 0.0 18096 1656 ? Ss 23:40 0:00 /bin/bash /invoca_run.sh frontend
93
+ root 75 84.1 2.5 829512 385848 ? Sl 23:42 0:41 unicorn master -c /srv/app/config/unicorn_docker.rb -E staging
94
+ root 127 0.1 2.4 829512 380168 ? Sl 23:43 0:00 unicorn worker[0] -c /srv/app/config/unicorn_docker.rb -E staging
95
+ root 130 0.1 2.4 829512 380160 ? Sl 23:43 0:00 unicorn worker[1] -c /srv/app/config/unicorn_docker.rb -E staging
96
+ root 133 0.0 2.4 829512 380160 ? Sl 23:43 0:00 unicorn worker[2] -c /srv/app/config/unicorn_docker.rb -E staging
97
+ root 136 0.0 2.4 829512 380160 ? Sl 23:43 0:00 unicorn worker[3] -c /srv/app/config/unicorn_docker.rb -E staging
98
+ root 139 0.1 2.4 829512 380160 ? Sl 23:43 0:00 unicorn worker[4] -c /srv/app/config/unicorn_docker.rb -E staging
99
+ root 142 0.1 2.4 829512 380164 ? Sl 23:43 0:00 unicorn worker[5] -c /srv/app/config/unicorn_docker.rb -E staging
100
+ ```
101
+
102
+ Make any necessary changes to the code, configuration files, etc.
103
+
104
+ Kill the PID of the application, `unicorn master` in this example.
105
+
106
+ ```
107
+ root@frontend-rc-jx467:/srv/app# kill 75
108
+ ```
109
+
110
+ Observe that the unicorn master and its worker processes have restarted after a few seconds and that the container remains up.
111
+
112
+ ```
113
+ root@frontend-rc-jx467:/srv/app# ps auxw
114
+ USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
115
+ root 1 0.0 0.0 18096 1656 ? Ss 23:40 0:00 /bin/bash /invoca_run.sh frontend
116
+ root 157 0.0 0.0 18232 2092 ? Ss 23:43 0:00 bash
117
+ root 242 45.3 2.4 694728 384656 ? Sl 23:50 0:39 unicorn master -c /srv/app/config/unicorn_docker.rb -E staging
118
+ root 291 0.0 2.4 694728 378716 ? Sl 23:51 0:00 unicorn worker[0] -c /srv/app/config/unicorn_docker.rb -E staging
119
+ root 294 0.0 2.4 694728 378700 ? Sl 23:51 0:00 unicorn worker[1] -c /srv/app/config/unicorn_docker.rb -E staging
120
+ root 297 0.0 2.4 694728 378700 ? Sl 23:51 0:00 unicorn worker[2] -c /srv/app/config/unicorn_docker.rb -E staging
121
+ root 300 0.0 2.4 694728 378700 ? Sl 23:51 0:00 unicorn worker[3] -c /srv/app/config/unicorn_docker.rb -E staging
122
+ root 303 0.0 2.4 694728 378700 ? Sl 23:51 0:00 unicorn worker[4] -c /srv/app/config/unicorn_docker.rb -E staging
123
+ root 306 0.0 2.4 694728 378704 ? Sl 23:51 0:00 unicorn worker[5] -c /srv/app/config/unicorn_docker.rb -E staging
124
+ ```
125
+
126
+ ## Continuous Testing Mode
127
+ A normal developer platform requires access from the outside world. Frontends and PNAPI endpoints need to accessible, calls need to be routed into the platform, and so on. Most of the ingress points into a platform are provided by ELBs. This can cause problems during periods of heavy Autobahn usage; AWS rate limits the number of ELBs that can be created at once.
128
+
129
+ Since Autobahn runs inside the platform, the ingress points aren't necessary. Continuous testing mode eliminates the ELBs from the platform and routes all traffic to internal platform IP addresses. This mode will also create an autobahn pod within the platform. **This option should only be used for Autobahn runs**.
130
+
131
+ To create a platform in continuous testing mode, run:
132
+
133
+ ```
134
+ $ kube-platform create <platform-name> --ct [image tags] [options]
135
+ ```
136
+
137
+ ## Autobahn Development Mode
138
+ In this mode, an autobahn pod will be created within the platform, to be used for Autobahn development and testing. Normal platforms created without this option do not include an autobahn pod.
139
+
140
+ To create a platform in autobahn developer mode, run:
141
+
142
+ ```
143
+ $ kube-platform create <platform-name> --autobahn-dev [image tags] [options]
144
+ ```
145
+
146
+ ## FAKETIME
147
+ With `--faketime` option enabled, the date/time presented to select running application can be changed by editing `/srv/app/FAKETIME` file. The running application does not need to be restarted when `/srv/app/FAKETIME` is modified.
148
+
149
+ NOTE: `--faketime` option requires `--dev_mode` therefore `dev_mode` is automatically enabled when `--faketime` option is specified.
150
+
151
+ For example, by putting "-30d" (without quotes) into `/srv/app/FAKETIME`, then killing the master unicorn and allowing the unicorns to come back up, the application will see date as 30 days before today.
152
+
153
+ See https://github.com/wolfcw/libfaketime for more info.
154
+
155
+ Frontend (**unicorn**), **job** and **task** applications will honor changes to `/srv/app/FAKETIME` file when `--faketime` option is enabled.
156
+
157
+ To create a platform with FAKETIME support, include `--faketime` option during platform creation:
158
+
159
+ ```
160
+ $ kube-platform create <platform-name> [image tags] --faketime [options]
161
+ ```
162
+ To include scope to script/con:
163
+
164
+ LD_PRELOAD=/usr/local/lib/libfaketime.so.1 script/console + bundle exec rails console
165
+
166
+ # CONTRIBUTING
167
+ ## The Rough Steps
168
+ - Update kube-platform itself
169
+ - Manually release the new version via Github
170
+ - Seed the new version into Homebrew
171
+ - Make an announcement
172
+ - Update tooling (if required)
173
+
174
+ ## The Detailed Steps
175
+ - Merge or push new kube-platform functionality to mainline.
176
+ - Tag the kube-platform release manually here, bumping the version according to semantic versioning.
177
+ - Clone invoca/homebrew-tools (Invoca’s custom Homebrew tap) and update the kube-platform Homebrew formula with the new version.
178
+ - Open PR and after review, merge to mainline.
179
+ - After merging to mainline, running brew update will pull in the new formula, meaning that a subsequent brew upgrade will update kube-platform.
180
+ - Make an announcement/post in #dev-announcements about the new version
181
+ - If you plan to continue below, make sure to call out that an upgrade will be required in some timeframe (perhaps give a day or two for engineers to brew upgrade).
182
+
183
+ **The below steps are when you want to force all previous version of kube-platform to be upgraded to the new version.**
184
+
185
+ - Clone invoca/invocaops_docker (which contains Invoca's Kubernetes resources) and change the version field in `kube-platform/manifest.jsonnet` to match the new version number. Doing so will cause any kube-platform commands ran from a previous version to fail with a prompt asking the user to upgrade.
186
+ - Open PR and after review, merge to mainline.
187
+ - You do not have to tag this “as a release” since kube-platform pulls the manifest.yaml.erb from the repository with every command.
188
+ - Update Thanatos with the new release of kube-platform since Thanatos uses kube-platform as a gem to handle platform deletions.
189
+ - Run bundle update —conservative kube-platform in Invoca/thanatos
190
+ - Open a PR with the Gemfile.lock changes, and after review merge to mainline.
191
+ - Note! Someone will need to redeploy Thanatos by deleting the Pod when the mainline build completes so the new SHA can come up.
192
+ - kubectl delete pod --context <color>.staging.us-east-1 --namespace=ops-tools -l app=thanatos
data/bin/kube-platform ADDED
@@ -0,0 +1,37 @@
1
+ #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
3
+
4
+ require "bundler/setup"
5
+ require "kube-platform"
6
+ require_relative "../lib/kube-platform/logger.rb"
7
+
8
+ EXPECTED_EXCEPTIONS = [ # no need to show backtrace for these
9
+ KubePlatform::KubePlatformException
10
+ ].freeze
11
+
12
+ def exception_cause_suffix(ex)
13
+ cause_suffix = +""
14
+ ex_next = ex
15
+ while cause = ex_next.cause
16
+ cause_suffix << "\n---caused by---\n#{cause.class.name}: #{cause.message}"
17
+ ex_next = cause
18
+ end
19
+ cause_suffix
20
+ end
21
+
22
+ begin
23
+ STDOUT.isatty and puts ""
24
+ KubePlatform::CLI.start("#{__dir__}/../config/images.yaml", ARGV)
25
+ rescue Interrupt
26
+ KubePlatform::Logger.logger.info("kube-platform interrupted by ^C")
27
+ exit(2)
28
+ rescue *EXPECTED_EXCEPTIONS => ex
29
+ cause_suffix = exception_cause_suffix(ex)
30
+ KubePlatform::Logger.logger.error("kube-platform exception: #{ex.class.name}: #{ex.message}#{cause_suffix}")
31
+ exit(1)
32
+ rescue Exception => ex
33
+ STDOUT.isatty and puts "" # this will leave the last log line visible on the screen, which may be helpful to understand this unexpected exception
34
+ cause_suffix = exception_cause_suffix(ex)
35
+ KubePlatform::Logger.logger.error("kube-platform exception: #{ex.class.name}: #{ex.message}#{cause_suffix}\n#{ex.backtrace.join("\n")}")
36
+ exit(1)
37
+ end
@@ -0,0 +1,203 @@
1
+ # frozen_string_literal: true
2
+
3
+ module KubePlatform
4
+ class Application
5
+ include Logger
6
+
7
+ ImageMissingException = Class.new(KubePlatformException)
8
+ UpdateRequiredException = Class.new(KubePlatformException)
9
+ ImageAvailabilityCheckFailed = Class.new(KubePlatformException)
10
+ InvalidPlatformNameException = Class.new(KubePlatformException)
11
+
12
+ CLUSTER_DEFINITIONS = {
13
+ standard: "standard",
14
+ continuous_testing: "continuous-testing",
15
+ titan_e2e_testing: "titan-testing",
16
+ autobahn_dev: "autobahn-dev"
17
+ }.freeze
18
+ VALID_PLATFORM_NAME = /\A[a-z0-9][a-z0-9-]*[a-z0-9]\z/.freeze
19
+
20
+ def initialize(cluster_name:, image_descriptors:, options:)
21
+ @cluster_name = cluster_name
22
+ @image_descriptors = image_descriptors
23
+ @options = options
24
+ configure_logger
25
+ end
26
+
27
+ def create
28
+ raise_if_platform_name_invalid!
29
+ raise_if_images_unavailable!
30
+ build_cluster(&:create)
31
+ end
32
+
33
+ def update
34
+ execute_cluster_operation(:update) do |cluster|
35
+ existing_key_tag_pairs = cluster.image_key_tag_pairs
36
+ image_repository.add_key_tag_pairs(existing_key_tag_pairs)
37
+ raise_if_images_unavailable!
38
+ end
39
+ end
40
+
41
+ def destroy
42
+ execute_cluster_operation(:delete)
43
+ end
44
+
45
+ def config
46
+ @config ||= config_from_options(@options, @cluster_name, image_repository)
47
+ end
48
+
49
+ private
50
+
51
+ def config_from_options(options, cluster_name, image_repository)
52
+ KubePlatform::Configuration.new do |c|
53
+ c.cluster_definition = cluster_definition_from_options
54
+ c.cluster_name = cluster_name
55
+ c.ct_mode = options[:ct] || options[:titan_e2e_testing]
56
+ c.dev_mode = options[:dev_mode]
57
+ if options[:faketime]
58
+ c.faketime = options[:faketime]
59
+ c.dev_mode = true
60
+ end
61
+ c.dockerhub_secret_name = options[:dockerhub_secret_name]
62
+ c.dockerhub_secret_namespace = options[:dockerhub_secret_namespace]
63
+ c.domain_name = options[:domain_name]
64
+ c.image_repository = image_repository
65
+ c.git_branch = options[:git_branch]
66
+ c.git_resource_path = options[:git_resource_path]
67
+ c.git_url = options[:git_url]
68
+ c.kubectl_context = options[:context]
69
+ c.manifest_path = options[:manifest_path]
70
+ c.user = options[:user]
71
+ c.force_api_calls = options[:force]
72
+ c.api_endpoint = client.api_endpoint_from_context
73
+ end
74
+ end
75
+
76
+ def configure_logger
77
+ if @options[:verbose]
78
+ KubePlatform::Logger.verbose = true
79
+ KubePlatform::Logger.logger.level = ::Logger::DEBUG
80
+ else
81
+ KubePlatform::Logger.logger.level = @options[:log_level]
82
+ end
83
+ end
84
+
85
+ def raise_if_platform_name_invalid!
86
+ if !VALID_PLATFORM_NAME.match(@cluster_name)
87
+ raise InvalidPlatformNameException, "Invalid lowercase RFC 1123 platform name '#{@cluster_name}'. "\
88
+ "Platform name may include only lowercase alphanumeric characters and '-' "\
89
+ "(and must start and end with lowercase alphanumeric characters)."
90
+ end
91
+ end
92
+
93
+ def raise_if_images_unavailable!
94
+ logger.info("Checking for image availability")
95
+ missing = all_check_for_missing_images.uniq
96
+ if missing.empty?
97
+ logger.info("Successfully located all images")
98
+ else
99
+ raise ImageMissingException, "Missing images: #{missing.map(&:name)}"
100
+ end
101
+ end
102
+
103
+ def all_check_for_missing_images
104
+ image_repository.missing_images
105
+ rescue
106
+ raise ImageAvailabilityCheckFailed, <<~EOS
107
+ Failed to check image availability.
108
+ Perhaps you are running in the wrong kubectx context?
109
+
110
+ `kubectx --current` = #{kubectx_current}
111
+ EOS
112
+ end
113
+
114
+ def kubectx_current
115
+ kubectx_output = `kubectx --current`.chomp
116
+ if kubectx_output["USAGE:\n"]
117
+ "Note: you must upgrade kubectx to version 0.7.1 or greater to support the --current option"
118
+ else
119
+ kubectx_output
120
+ end
121
+ rescue => ex
122
+ "Command failed: #{ex.class.name}: #{ex.message}"
123
+ end
124
+
125
+ def image_repository
126
+ @image_repository ||= KubePlatform::Images::Repository.new(image_descriptors: @image_descriptors,
127
+ image_tags: image_tags_from_options,
128
+ image_factory: docker_hub_image_factory)
129
+ end
130
+
131
+ def execute_cluster_operation(action)
132
+ build_cluster_with_definition_switching(cluster_definition_from_options) do |c|
133
+ yield c if block_given?
134
+ c.send(action)
135
+ end
136
+ end
137
+
138
+ def build_cluster_with_definition_switching(cluster_definition_name, &block)
139
+ build_cluster(cluster_definition_name: cluster_definition_name, &block)
140
+ rescue Cluster::PlatformDefinitionMismatchException => ex
141
+ logger.warn("Platform #{@cluster_name} was launched with definition #{ex.expected_definition}, "\
142
+ "but the operation was started with #{ex.received_definition}")
143
+ logger.warn("Switching to platform definition #{ex.expected_definition}")
144
+
145
+ build_cluster_with_definition_switching(ex.expected_definition, &block)
146
+ end
147
+
148
+ def build_cluster(cluster_definition_name: cluster_definition_from_options)
149
+ resource_repository.retrieve do |repository_path|
150
+ manifest = manifest(repository_path)
151
+
152
+ update_checker(manifest).update_required? and raise UpdateRequiredException
153
+ cluster_definition = manifest.cluster_definition_by_name(cluster_definition_name)
154
+ yield Cluster.new(name: config.cluster_name, cluster_definition: cluster_definition, client: client, config: config)
155
+ end
156
+ end
157
+
158
+ def resource_repository
159
+ @resource_repository ||= ResourceRepository.new(url: config.git_url, branch: config.git_branch)
160
+ end
161
+
162
+ def manifest(resource_repository_dir)
163
+ manifest_filename = File.join(resource_repository_dir, config.manifest_path)
164
+ resource_dir = File.join(resource_repository_dir, config.git_resource_path)
165
+ Manifest.new(filename: manifest_filename, resource_dir: resource_dir, config: config)
166
+ end
167
+
168
+ def update_checker(manifest)
169
+ UpdateChecker.new(manifest: manifest)
170
+ end
171
+
172
+ def cluster_definition_from_options
173
+ key = if @options[:ct]
174
+ :continuous_testing
175
+ elsif @options[:autobahn_dev]
176
+ :autobahn_dev
177
+ elsif @options[:titan_e2e_testing]
178
+ :titan_e2e_testing
179
+ else
180
+ :standard
181
+ end
182
+ CLUSTER_DEFINITIONS[key]
183
+ end
184
+
185
+ def image_tags_from_options
186
+ @image_tags_from_options ||= @image_descriptors.map(&:key).each_with_object({}) { |key, tags| tags[key] = @options[key] }
187
+ end
188
+
189
+ def docker_hub_image_factory
190
+ @docker_hub_image_factory ||= KubePlatform::Images::DockerHubImageFactory.new(secret_provider: docker_hub_auth_provider)
191
+ end
192
+
193
+ def docker_hub_auth_provider
194
+ @docker_hub_auth_provider ||= KubePlatform::Images::KubernetesDockerHubSecretProvider.new(
195
+ client: client, secret_name: @options[:dockerhub_secret_name], secret_namespace: @options[:dockerhub_secret_namespace]
196
+ )
197
+ end
198
+
199
+ def client
200
+ @client ||= Client.new(context_name: @options[:context])
201
+ end
202
+ end
203
+ end
@@ -0,0 +1,114 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "thor"
4
+ require_relative "logger"
5
+
6
+ module KubePlatform
7
+ class CLI < Thor
8
+ include Logger
9
+
10
+ COMMANDS_WITH_TAG_OPTIONS = ["create", "update"].freeze
11
+
12
+ def self.start(image_config_path, *args)
13
+ @image_descriptors = KubePlatform::Images::Descriptor.array_from_yaml_file(filename: image_config_path)
14
+ add_image_tag_options
15
+ super(*args)
16
+ end
17
+
18
+ def self.add_image_tag_options
19
+ options = image_descriptors.map { |descriptor| Thor::DescriptorToOptionAdapter.new(descriptor: descriptor) }
20
+ COMMANDS_WITH_TAG_OPTIONS.each do |command|
21
+ options.each { |option| add_image_tag_option(option, command) }
22
+ end
23
+ end
24
+
25
+ def self.add_image_tag_option(option, command)
26
+ class_exec do
27
+ method_option option.key, aliases: option.aliases, desc: option.desc, banner: "tag", type: :string, for: command
28
+ end
29
+ end
30
+
31
+ class << self
32
+ attr_reader :image_descriptors
33
+
34
+ def username
35
+ ENV["USER"] || ""
36
+ end
37
+ end
38
+
39
+
40
+ class_option :ct, desc: "Continuous testing mode", type: :boolean, default: false
41
+ class_option :autobahn_dev, desc: "Autobahn development mode", type: :boolean, default: false
42
+ class_option :titan_e2e_testing, desc: "Continuous testing mode specifically for Titan E2E testing", type: :boolean, default: false
43
+ class_option :context, desc: "kubectl context to use to connect to Kubernetes", type: :string, default: ENV["KUBERNETES_CONTEXT"]
44
+ class_option :log_level, aliases: ["-l"], desc: "Log level (defaults to info when interactive, error if not)",
45
+ type: :string, default: (STDOUT.isatty && !defined?(RSpec) ? "info" : "error"), enum: ["debug", "info", "warn", "error", "fatal"]
46
+
47
+ class_option :verbose, desc: "Verbose logging used for debugging. Automatically sets log level to debug, and disables interactive mode logging",
48
+ type: :boolean, default: false
49
+
50
+ class_option :git_branch, desc: "git branch to use when checking out the Kubernetes resource repository",
51
+ type: :string, default: "master"
52
+
53
+ class_option :git_url, desc: "URL to the git repository containing the Kubernetes resources",
54
+ type: :string, default: "git@github.com:Invoca/invocaops_docker.git"
55
+
56
+ class_option :manifest_path, desc: "Path to the manifest, relative to the root of the resource repository",
57
+ hide: true, type: :string, default: "kube-platform/manifest.jsonnet"
58
+
59
+ class_option :git_resource_path, desc: "Directory inside the resource repository that contains the Kubernetes resources",
60
+ hide: true, type: :string, default: "/"
61
+
62
+ option :user, hide: true, desc: "The username to associate with the platform", type: :string, default: username
63
+ option :dockerhub_secret_name, hide: true, type: :string, default: "dockerhub"
64
+ option :dockerhub_secret_namespace, hide: true, type: :string, default: "default"
65
+ option :domain_name, hide: true, type: :string, default: "invocadev.com"
66
+ option :dev_mode, aliases: ["-d"], desc: "Allows restarting of processes inside containers", type: :boolean, default: false
67
+ option :faketime, desc: "Set date/time by editing FAKETIME", type: :boolean, default: false
68
+
69
+ desc "create PLATFORM_NAME", "Create a new platform"
70
+
71
+ def create(cluster_name)
72
+ create_application(cluster_name).create
73
+ end
74
+
75
+ option :user, hide: true, desc: "The username to associate with the platforms", type: :string, default: username
76
+ option :dockerhub_secret_name, hide: true, type: :string, default: "dockerhub"
77
+ option :dockerhub_secret_namespace, hide: true, type: :string, default: "default"
78
+ option :domain_name, hide: true, type: :string, default: "invocadev.com"
79
+ option :dev_mode, aliases: ["-d"], desc: "Allows restarting of processes inside containers", type: :boolean, default: false
80
+ option :faketime, desc: "Set date/time by editing FAKETIME", type: :boolean, default: false
81
+
82
+ desc "update PLATFORM_NAME", "Update a platform"
83
+
84
+ def update(cluster_name)
85
+ create_application(cluster_name).update
86
+ end
87
+
88
+ desc "destroy PLATFORM_NAME", "Destroy a platform"
89
+
90
+ option :force, aliases: ["--force"], desc: "Ignores checks when deleting platforms", type: :boolean, default: false
91
+
92
+ def destroy(cluster_name)
93
+ create_application(cluster_name).destroy
94
+ end
95
+
96
+ desc "version", "Show the kube-platform version"
97
+
98
+ def version
99
+ puts "kube-platform version #{KubePlatform::VERSION}"
100
+ end
101
+
102
+ private
103
+
104
+ no_tasks do
105
+ def create_application(cluster_name)
106
+ Application.new(cluster_name: cluster_name, image_descriptors: image_descriptors, options: options)
107
+ end
108
+
109
+ def image_descriptors
110
+ self.class.image_descriptors
111
+ end
112
+ end
113
+ end
114
+ end