cpflow 3.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.github/workflows/check_cpln_links.yml +19 -0
- data/.github/workflows/command_docs.yml +24 -0
- data/.github/workflows/rspec-shared.yml +56 -0
- data/.github/workflows/rspec.yml +28 -0
- data/.github/workflows/rubocop.yml +24 -0
- data/.gitignore +18 -0
- data/.overcommit.yml +16 -0
- data/.rubocop.yml +22 -0
- data/.simplecov_spawn.rb +10 -0
- data/CHANGELOG.md +259 -0
- data/CONTRIBUTING.md +73 -0
- data/Gemfile +7 -0
- data/Gemfile.lock +126 -0
- data/LICENSE +21 -0
- data/README.md +546 -0
- data/Rakefile +21 -0
- data/bin/cpflow +6 -0
- data/cpflow +6 -0
- data/cpflow.gemspec +41 -0
- data/docs/assets/grafana-alert.png +0 -0
- data/docs/assets/memcached.png +0 -0
- data/docs/assets/sidekiq-pre-stop-hook.png +0 -0
- data/docs/commands.md +454 -0
- data/docs/dns.md +15 -0
- data/docs/migrating.md +262 -0
- data/docs/postgres.md +436 -0
- data/docs/redis.md +128 -0
- data/docs/secrets-and-env-values.md +42 -0
- data/docs/tips.md +150 -0
- data/docs/troubleshooting.md +6 -0
- data/examples/circleci.yml +104 -0
- data/examples/controlplane.yml +159 -0
- data/lib/command/apply_template.rb +209 -0
- data/lib/command/base.rb +540 -0
- data/lib/command/build_image.rb +49 -0
- data/lib/command/cleanup_images.rb +136 -0
- data/lib/command/cleanup_stale_apps.rb +79 -0
- data/lib/command/config.rb +48 -0
- data/lib/command/copy_image_from_upstream.rb +108 -0
- data/lib/command/delete.rb +149 -0
- data/lib/command/deploy_image.rb +56 -0
- data/lib/command/doctor.rb +47 -0
- data/lib/command/env.rb +22 -0
- data/lib/command/exists.rb +23 -0
- data/lib/command/generate.rb +45 -0
- data/lib/command/info.rb +222 -0
- data/lib/command/latest_image.rb +19 -0
- data/lib/command/logs.rb +49 -0
- data/lib/command/maintenance.rb +42 -0
- data/lib/command/maintenance_off.rb +62 -0
- data/lib/command/maintenance_on.rb +62 -0
- data/lib/command/maintenance_set_page.rb +34 -0
- data/lib/command/no_command.rb +23 -0
- data/lib/command/open.rb +33 -0
- data/lib/command/open_console.rb +26 -0
- data/lib/command/promote_app_from_upstream.rb +38 -0
- data/lib/command/ps.rb +41 -0
- data/lib/command/ps_restart.rb +37 -0
- data/lib/command/ps_start.rb +51 -0
- data/lib/command/ps_stop.rb +82 -0
- data/lib/command/ps_wait.rb +40 -0
- data/lib/command/run.rb +573 -0
- data/lib/command/setup_app.rb +113 -0
- data/lib/command/test.rb +23 -0
- data/lib/command/version.rb +18 -0
- data/lib/constants/exit_code.rb +7 -0
- data/lib/core/config.rb +316 -0
- data/lib/core/controlplane.rb +552 -0
- data/lib/core/controlplane_api.rb +170 -0
- data/lib/core/controlplane_api_direct.rb +112 -0
- data/lib/core/doctor_service.rb +104 -0
- data/lib/core/helpers.rb +26 -0
- data/lib/core/shell.rb +100 -0
- data/lib/core/template_parser.rb +76 -0
- data/lib/cpflow/version.rb +6 -0
- data/lib/cpflow.rb +288 -0
- data/lib/deprecated_commands.json +9 -0
- data/lib/generator_templates/Dockerfile +27 -0
- data/lib/generator_templates/controlplane.yml +62 -0
- data/lib/generator_templates/entrypoint.sh +8 -0
- data/lib/generator_templates/templates/app.yml +21 -0
- data/lib/generator_templates/templates/postgres.yml +176 -0
- data/lib/generator_templates/templates/rails.yml +36 -0
- data/rakelib/create_release.rake +81 -0
- data/script/add_command +37 -0
- data/script/check_command_docs +3 -0
- data/script/check_cpln_links +45 -0
- data/script/rename_command +43 -0
- data/script/update_command_docs +62 -0
- data/templates/app.yml +13 -0
- data/templates/daily-task.yml +32 -0
- data/templates/maintenance.yml +25 -0
- data/templates/memcached.yml +24 -0
- data/templates/postgres.yml +32 -0
- data/templates/rails.yml +27 -0
- data/templates/redis.yml +21 -0
- data/templates/redis2.yml +37 -0
- data/templates/sidekiq.yml +38 -0
- metadata +341 -0
data/lib/command/run.rb
ADDED
@@ -0,0 +1,573 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Command
|
4
|
+
class Run < Base # rubocop:disable Metrics/ClassLength
|
5
|
+
INTERACTIVE_COMMANDS = [
|
6
|
+
"bash",
|
7
|
+
"rails console",
|
8
|
+
"rails c",
|
9
|
+
"rails dbconsole",
|
10
|
+
"rails db"
|
11
|
+
].freeze
|
12
|
+
|
13
|
+
NAME = "run"
|
14
|
+
USAGE = "run COMMAND"
|
15
|
+
REQUIRES_ARGS = true
|
16
|
+
DEFAULT_ARGS = ["bash"].freeze
|
17
|
+
OPTIONS = [
|
18
|
+
app_option(required: true),
|
19
|
+
image_option,
|
20
|
+
log_method_option,
|
21
|
+
workload_option,
|
22
|
+
location_option,
|
23
|
+
use_local_token_option,
|
24
|
+
terminal_size_option,
|
25
|
+
interactive_option,
|
26
|
+
detached_option,
|
27
|
+
cpu_option,
|
28
|
+
memory_option,
|
29
|
+
entrypoint_option
|
30
|
+
].freeze
|
31
|
+
DESCRIPTION = "Runs one-off interactive or non-interactive replicas (analog of `heroku run`)"
|
32
|
+
LONG_DESCRIPTION = <<~DESC
|
33
|
+
- Runs one-off interactive or non-interactive replicas (analog of `heroku run`)
|
34
|
+
- Uses `Cron` workload type and either:
|
35
|
+
- - `cpln workload exec` for interactive mode, with CLI streaming
|
36
|
+
- - log async fetching for non-interactive mode
|
37
|
+
- The Dockerfile entrypoint is used as the command by default, which assumes `exec "${@}"` to be present,
|
38
|
+
and the args ["bash", "-c", cmd_to_run] are passed
|
39
|
+
- The entrypoint can be overridden through `--entrypoint`, which must be a single command or a script path that exists in the container,
|
40
|
+
and the args ["bash", "-c", cmd_to_run] are passed,
|
41
|
+
unless the entrypoint is `bash`, in which case the args ["-c", cmd_to_run] are passed
|
42
|
+
- Providing `--entrypoint none` sets the entrypoint to `bash` by default
|
43
|
+
- If `fix_terminal_size` is `true` in the `.controlplane/controlplane.yml` file,
|
44
|
+
the remote terminal size will be fixed to match the local terminal size (may also be overridden through `--terminal-size`)
|
45
|
+
- By default, all jobs use a CPU size of 1 (1 core) and a memory size of 2Gi (2 gibibytes)
|
46
|
+
(can be configured through `runner_job_default_cpu` and `runner_job_default_memory` in `controlplane.yml`,
|
47
|
+
and also overridden per job through `--cpu` and `--memory`)
|
48
|
+
- By default, the job is stopped if it takes longer than 6 hours to finish
|
49
|
+
(can be configured though `runner_job_timeout` in `controlplane.yml`)
|
50
|
+
DESC
|
51
|
+
EXAMPLES = <<~EX
|
52
|
+
```sh
|
53
|
+
# Opens shell (bash by default).
|
54
|
+
cpflow run -a $APP_NAME
|
55
|
+
|
56
|
+
# Runs interactive command, keeps shell open, and stops job when exiting.
|
57
|
+
cpflow run -a $APP_NAME --interactive -- rails c
|
58
|
+
|
59
|
+
# Some commands are automatically detected as interactive, so no need to pass `--interactive`.
|
60
|
+
#{INTERACTIVE_COMMANDS.map { |cmd| "cpflow run -a $APP_NAME -- #{cmd}" }.join("\n ")}
|
61
|
+
|
62
|
+
# Runs non-interactive command, outputs logs, exits with the exit code of the command and stops job.
|
63
|
+
cpflow run -a $APP_NAME -- rails db:migrate
|
64
|
+
|
65
|
+
# Runs non-iteractive command, detaches, exits with 0, and prints commands to:
|
66
|
+
# - see logs from the job
|
67
|
+
# - stop the job
|
68
|
+
cpflow run -a $APP_NAME --detached -- rails db:migrate
|
69
|
+
|
70
|
+
# The command needs to be quoted if setting an env variable or passing args.
|
71
|
+
cpflow run -a $APP_NAME -- 'SOME_ENV_VAR=some_value rails db:migrate'
|
72
|
+
|
73
|
+
# Uses a different image (which may not be promoted yet).
|
74
|
+
cpflow run -a $APP_NAME --image appimage:123 -- rails db:migrate # Exact image name
|
75
|
+
cpflow run -a $APP_NAME --image latest -- rails db:migrate # Latest sequential image
|
76
|
+
|
77
|
+
# Uses a different workload than `one_off_workload` from `.controlplane/controlplane.yml`.
|
78
|
+
cpflow run -a $APP_NAME -w other-workload -- bash
|
79
|
+
|
80
|
+
# Overrides remote CPLN_TOKEN env variable with local token.
|
81
|
+
# Useful when superuser rights are needed in remote container.
|
82
|
+
cpflow run -a $APP_NAME --use-local-token -- bash
|
83
|
+
|
84
|
+
# Replaces the existing Dockerfile entrypoint with `bash`.
|
85
|
+
cpflow run -a $APP_NAME --entrypoint none -- rails db:migrate
|
86
|
+
|
87
|
+
# Replaces the existing Dockerfile entrypoint.
|
88
|
+
cpflow run -a $APP_NAME --entrypoint /app/alternative-entrypoint.sh -- rails db:migrate
|
89
|
+
```
|
90
|
+
EX
|
91
|
+
|
92
|
+
DEFAULT_JOB_CPU = "1"
|
93
|
+
DEFAULT_JOB_MEMORY = "2Gi"
|
94
|
+
DEFAULT_JOB_TIMEOUT = 21_600 # 6 hours
|
95
|
+
DEFAULT_JOB_HISTORY_LIMIT = 10
|
96
|
+
MAGIC_END = "---cpflow run command finished---"
|
97
|
+
|
98
|
+
attr_reader :interactive, :detached, :location, :original_workload, :runner_workload,
|
99
|
+
:default_image, :default_cpu, :default_memory, :job_timeout, :job_history_limit,
|
100
|
+
:container, :expected_deployed_version, :job, :replica, :command
|
101
|
+
|
102
|
+
def call # rubocop:disable Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity
|
103
|
+
@interactive = config.options[:interactive] || interactive_command?
|
104
|
+
@detached = config.options[:detached]
|
105
|
+
@log_method = config.options[:log_method]
|
106
|
+
|
107
|
+
@location = config.location
|
108
|
+
@original_workload = config.options[:workload] || config[:one_off_workload]
|
109
|
+
@runner_workload = "#{original_workload}-runner"
|
110
|
+
@default_image = "#{config.app}:#{Controlplane::NO_IMAGE_AVAILABLE}"
|
111
|
+
@default_cpu = config.current[:runner_job_default_cpu] || DEFAULT_JOB_CPU
|
112
|
+
@default_memory = config.current[:runner_job_default_memory] || DEFAULT_JOB_MEMORY
|
113
|
+
@job_timeout = config.current[:runner_job_timeout] || DEFAULT_JOB_TIMEOUT
|
114
|
+
@job_history_limit = DEFAULT_JOB_HISTORY_LIMIT
|
115
|
+
|
116
|
+
unless interactive
|
117
|
+
@internal_sigint = false
|
118
|
+
|
119
|
+
# Catch Ctrl+C in the main process
|
120
|
+
trap("SIGINT") do
|
121
|
+
unless @internal_sigint
|
122
|
+
print_detached_commands
|
123
|
+
exit(ExitCode::INTERRUPT)
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
create_runner_workload if cp.fetch_workload(runner_workload).nil?
|
129
|
+
wait_for_runner_workload_deploy
|
130
|
+
update_runner_workload
|
131
|
+
wait_for_runner_workload_update if expected_deployed_version
|
132
|
+
|
133
|
+
start_job
|
134
|
+
wait_for_replica_for_job
|
135
|
+
|
136
|
+
progress.puts
|
137
|
+
if interactive
|
138
|
+
run_interactive
|
139
|
+
else
|
140
|
+
run_non_interactive
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
private
|
145
|
+
|
146
|
+
def interactive_command?
|
147
|
+
INTERACTIVE_COMMANDS.include?(args_join(config.args))
|
148
|
+
end
|
149
|
+
|
150
|
+
def app_workload_replica_args
|
151
|
+
["-a", config.app, "--workload", runner_workload, "--replica", replica]
|
152
|
+
end
|
153
|
+
|
154
|
+
def create_runner_workload # rubocop:disable Metrics/MethodLength
|
155
|
+
step("Creating runner workload '#{runner_workload}' based on '#{original_workload}'") do
|
156
|
+
spec, container_spec = base_workload_specs(original_workload)
|
157
|
+
|
158
|
+
# Remove other containers if any
|
159
|
+
spec["containers"] = [container_spec]
|
160
|
+
|
161
|
+
# Default to using existing Dockerfile entrypoint
|
162
|
+
container_spec.delete("command")
|
163
|
+
|
164
|
+
# Remove props that conflict with job
|
165
|
+
container_spec.delete("ports")
|
166
|
+
container_spec.delete("lifecycle")
|
167
|
+
container_spec.delete("livenessProbe")
|
168
|
+
container_spec.delete("readinessProbe")
|
169
|
+
|
170
|
+
# Set image, CPU, and memory to default values
|
171
|
+
container_spec["image"] = default_image
|
172
|
+
container_spec["cpu"] = default_cpu
|
173
|
+
container_spec["memory"] = default_memory
|
174
|
+
|
175
|
+
# Ensure cron workload won't run per schedule
|
176
|
+
spec["defaultOptions"]["suspend"] = true
|
177
|
+
|
178
|
+
# Ensure no scaling
|
179
|
+
spec["defaultOptions"]["autoscaling"] = {}
|
180
|
+
spec["defaultOptions"]["capacityAI"] = false
|
181
|
+
|
182
|
+
# Set cron job props
|
183
|
+
spec["type"] = "cron"
|
184
|
+
spec["job"] = {
|
185
|
+
# Next job set to run on January 1st, 2029
|
186
|
+
"schedule" => "0 0 1 1 1",
|
187
|
+
|
188
|
+
"restartPolicy" => "Never",
|
189
|
+
"activeDeadlineSeconds" => job_timeout,
|
190
|
+
"historyLimit" => job_history_limit
|
191
|
+
}
|
192
|
+
|
193
|
+
# Create runner workload
|
194
|
+
cp.apply_hash("kind" => "workload", "name" => runner_workload, "spec" => spec)
|
195
|
+
end
|
196
|
+
end
|
197
|
+
|
198
|
+
def update_runner_workload # rubocop:disable Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity
|
199
|
+
should_update = false
|
200
|
+
spec = nil
|
201
|
+
|
202
|
+
step("Checking if runner workload '#{runner_workload}' needs to be updated") do # rubocop:disable Metrics/BlockLength
|
203
|
+
_, original_container_spec = base_workload_specs(original_workload)
|
204
|
+
spec, container_spec = base_workload_specs(runner_workload)
|
205
|
+
|
206
|
+
# Keep ENV synced between original and runner workloads
|
207
|
+
original_env_str = original_container_spec["env"]&.sort_by { |env| env["name"] }.to_s
|
208
|
+
env_str = container_spec["env"]&.sort_by { |env| env["name"] }.to_s
|
209
|
+
if original_env_str != env_str
|
210
|
+
container_spec["env"] = original_container_spec["env"]
|
211
|
+
should_update = true
|
212
|
+
end
|
213
|
+
|
214
|
+
if container_spec["image"] != default_image
|
215
|
+
container_spec["image"] = default_image
|
216
|
+
should_update = true
|
217
|
+
end
|
218
|
+
|
219
|
+
if container_spec["cpu"] != default_cpu
|
220
|
+
container_spec["cpu"] = default_cpu
|
221
|
+
should_update = true
|
222
|
+
end
|
223
|
+
|
224
|
+
if container_spec["memory"] != default_memory
|
225
|
+
container_spec["memory"] = default_memory
|
226
|
+
should_update = true
|
227
|
+
end
|
228
|
+
|
229
|
+
if spec["job"]["activeDeadlineSeconds"] != job_timeout
|
230
|
+
spec["job"]["activeDeadlineSeconds"] = job_timeout
|
231
|
+
should_update = true
|
232
|
+
end
|
233
|
+
|
234
|
+
if spec["job"]["historyLimit"] != job_history_limit
|
235
|
+
spec["job"]["historyLimit"] = job_history_limit
|
236
|
+
should_update = true
|
237
|
+
end
|
238
|
+
|
239
|
+
true
|
240
|
+
end
|
241
|
+
|
242
|
+
return unless should_update
|
243
|
+
|
244
|
+
step("Updating runner workload '#{runner_workload}'") do
|
245
|
+
# Update runner workload
|
246
|
+
@expected_deployed_version = cp.cron_workload_deployed_version(runner_workload) + 1
|
247
|
+
cp.apply_hash("kind" => "workload", "name" => runner_workload, "spec" => spec)
|
248
|
+
end
|
249
|
+
end
|
250
|
+
|
251
|
+
def wait_for_runner_workload_deploy
|
252
|
+
step("Waiting for runner workload '#{runner_workload}' to be deployed", retry_on_failure: true) do
|
253
|
+
!cp.cron_workload_deployed_version(runner_workload).nil?
|
254
|
+
end
|
255
|
+
end
|
256
|
+
|
257
|
+
def wait_for_runner_workload_update
|
258
|
+
step("Waiting for runner workload '#{runner_workload}' to be updated", retry_on_failure: true) do
|
259
|
+
cp.cron_workload_deployed_version(runner_workload) >= expected_deployed_version
|
260
|
+
end
|
261
|
+
end
|
262
|
+
|
263
|
+
def start_job
|
264
|
+
job_start_yaml = build_job_start_yaml
|
265
|
+
|
266
|
+
step("Starting job for runner workload '#{runner_workload}'", retry_on_failure: true) do
|
267
|
+
result = cp.start_cron_workload(runner_workload, job_start_yaml, location: location)
|
268
|
+
@job = result&.dig("items", 0, "id")
|
269
|
+
|
270
|
+
job || false
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
def wait_for_replica_for_job
|
275
|
+
step("Waiting for replica to start, which runs job '#{job}'", retry_on_failure: true) do
|
276
|
+
result = cp.fetch_workload_replicas(runner_workload, location: location)
|
277
|
+
@replica = result["items"].find { |item| item.include?(job) }
|
278
|
+
|
279
|
+
replica || false
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
def run_interactive
|
284
|
+
progress.puts("Connecting to replica '#{replica}'...\n\n")
|
285
|
+
cp.workload_exec(runner_workload, replica, location: location, container: container, command: command)
|
286
|
+
end
|
287
|
+
|
288
|
+
def run_non_interactive
|
289
|
+
if detached
|
290
|
+
print_detached_commands
|
291
|
+
exit(ExitCode::SUCCESS)
|
292
|
+
end
|
293
|
+
|
294
|
+
case @log_method
|
295
|
+
when 1 then run_non_interactive_v1
|
296
|
+
when 2 then run_non_interactive_v2
|
297
|
+
when 3 then run_non_interactive_v3
|
298
|
+
else raise "Invalid log method: #{@log_method}"
|
299
|
+
end
|
300
|
+
end
|
301
|
+
|
302
|
+
def run_non_interactive_v1 # rubocop:disable Metrics/MethodLength
|
303
|
+
logs_pid = Process.fork do
|
304
|
+
# Catch Ctrl+C in the forked process
|
305
|
+
trap("SIGINT") do
|
306
|
+
exit(ExitCode::SUCCESS)
|
307
|
+
end
|
308
|
+
|
309
|
+
Cpflow::Cli.start(["logs", *app_workload_replica_args])
|
310
|
+
end
|
311
|
+
Process.detach(logs_pid)
|
312
|
+
|
313
|
+
exit_status = wait_for_job_status
|
314
|
+
|
315
|
+
# We need to wait a bit for the logs to appear,
|
316
|
+
# otherwise it may exit without showing them
|
317
|
+
Kernel.sleep(30)
|
318
|
+
|
319
|
+
@internal_sigint = true
|
320
|
+
Process.kill("INT", logs_pid)
|
321
|
+
exit(exit_status)
|
322
|
+
end
|
323
|
+
|
324
|
+
def run_non_interactive_v2
|
325
|
+
current_cpflow = File.expand_path("cpflow", "#{__dir__}/../..")
|
326
|
+
logs_pipe = IO.popen([current_cpflow, "logs", *app_workload_replica_args])
|
327
|
+
|
328
|
+
exit_status = wait_for_job_status_and_log(logs_pipe)
|
329
|
+
|
330
|
+
@internal_sigint = true
|
331
|
+
Process.kill("INT", logs_pipe.pid)
|
332
|
+
exit(exit_status)
|
333
|
+
end
|
334
|
+
|
335
|
+
def run_non_interactive_v3
|
336
|
+
exit(show_logs_waiting)
|
337
|
+
end
|
338
|
+
|
339
|
+
def base_workload_specs(workload)
|
340
|
+
spec = cp.fetch_workload!(workload).fetch("spec")
|
341
|
+
container_spec = spec["containers"].detect { _1["name"] == original_workload } || spec["containers"].first
|
342
|
+
|
343
|
+
[spec, container_spec]
|
344
|
+
end
|
345
|
+
|
346
|
+
def build_job_start_yaml # rubocop:disable Metrics/CyclomaticComplexity, Metrics/MethodLength, Metrics/PerceivedComplexity
|
347
|
+
_, original_container_spec = base_workload_specs(original_workload)
|
348
|
+
@container = original_container_spec["name"]
|
349
|
+
|
350
|
+
job_start_hash = { "name" => container }
|
351
|
+
|
352
|
+
if config.options[:use_local_token]
|
353
|
+
job_start_hash["env"] ||= []
|
354
|
+
job_start_hash["env"].push({ "name" => "CPFLOW_TOKEN", "value" => ControlplaneApiDirect.new.api_token[:token] })
|
355
|
+
end
|
356
|
+
|
357
|
+
entrypoint = nil
|
358
|
+
if config.options[:entrypoint]
|
359
|
+
entrypoint = config.options[:entrypoint] == "none" ? "bash" : config.options[:entrypoint]
|
360
|
+
end
|
361
|
+
|
362
|
+
job_start_hash["command"] = entrypoint if entrypoint
|
363
|
+
job_start_hash["args"] ||= []
|
364
|
+
job_start_hash["args"].push("bash") unless entrypoint == "bash"
|
365
|
+
job_start_hash["args"].push("-c")
|
366
|
+
job_start_hash["env"] ||= []
|
367
|
+
job_start_hash["env"].push({ "name" => "CPFLOW_RUNNER_SCRIPT", "value" => runner_script })
|
368
|
+
if interactive
|
369
|
+
job_start_hash["env"].push({ "name" => "CPFLOW_MONITORING_SCRIPT", "value" => interactive_monitoring_script })
|
370
|
+
|
371
|
+
job_start_hash["args"].push('eval "$CPFLOW_MONITORING_SCRIPT"')
|
372
|
+
@command = %(bash -c 'eval "$CPFLOW_RUNNER_SCRIPT"')
|
373
|
+
else
|
374
|
+
job_start_hash["args"].push('eval "$CPFLOW_RUNNER_SCRIPT"')
|
375
|
+
end
|
376
|
+
|
377
|
+
image = config.options[:image]
|
378
|
+
image_link = if image
|
379
|
+
image = cp.latest_image if image == "latest"
|
380
|
+
"/org/#{config.org}/image/#{image}"
|
381
|
+
else
|
382
|
+
original_container_spec["image"]
|
383
|
+
end
|
384
|
+
|
385
|
+
job_start_hash["image"] = image_link
|
386
|
+
job_start_hash["cpu"] = config.options[:cpu] if config.options[:cpu]
|
387
|
+
job_start_hash["memory"] = config.options[:memory] if config.options[:memory]
|
388
|
+
|
389
|
+
job_start_hash.to_yaml
|
390
|
+
end
|
391
|
+
|
392
|
+
def interactive_monitoring_script
|
393
|
+
<<~SCRIPT
|
394
|
+
primary_pid=""
|
395
|
+
|
396
|
+
check_primary() {
|
397
|
+
if ! kill -0 $primary_pid 2>/dev/null; then
|
398
|
+
echo "Primary process has exited. Shutting down."
|
399
|
+
exit 0
|
400
|
+
fi
|
401
|
+
}
|
402
|
+
|
403
|
+
while true; do
|
404
|
+
if [[ -z "$primary_pid" ]]; then
|
405
|
+
primary_pid=$(ps -eo pid,etime,cmd --sort=etime | grep -v "$$" | grep -v 'ps -eo' | grep -v 'grep' | grep 'CPFLOW_RUNNER_SCRIPT' | head -n 1 | awk '{print $1}')
|
406
|
+
if [[ ! -z "$primary_pid" ]]; then
|
407
|
+
echo "Primary process set with PID: $primary_pid"
|
408
|
+
fi
|
409
|
+
else
|
410
|
+
check_primary
|
411
|
+
fi
|
412
|
+
|
413
|
+
sleep 1
|
414
|
+
done
|
415
|
+
SCRIPT
|
416
|
+
end
|
417
|
+
|
418
|
+
def interactive_runner_script
|
419
|
+
script = ""
|
420
|
+
|
421
|
+
# NOTE: fixes terminal size to match local terminal
|
422
|
+
if config.current[:fix_terminal_size] || config.options[:terminal_size]
|
423
|
+
if config.options[:terminal_size]
|
424
|
+
rows, cols = config.options[:terminal_size].split(",")
|
425
|
+
else
|
426
|
+
# NOTE: cannot use `Shell.cmd` here, as `stty size` has to run in a terminal environment
|
427
|
+
rows, cols = `stty size`.split(/\s+/)
|
428
|
+
end
|
429
|
+
script += "stty rows #{rows}\nstty cols #{cols}\n"
|
430
|
+
end
|
431
|
+
|
432
|
+
script
|
433
|
+
end
|
434
|
+
|
435
|
+
def runner_script # rubocop:disable Metrics/MethodLength
|
436
|
+
script = <<~SCRIPT
|
437
|
+
unset CPFLOW_RUNNER_SCRIPT
|
438
|
+
unset CPFLOW_MONITORING_SCRIPT
|
439
|
+
|
440
|
+
if [ -n "$CPFLOW_TOKEN" ]; then
|
441
|
+
CPLN_TOKEN=$CPFLOW_TOKEN
|
442
|
+
unset CPFLOW_TOKEN
|
443
|
+
fi
|
444
|
+
SCRIPT
|
445
|
+
|
446
|
+
script += interactive_runner_script if interactive
|
447
|
+
|
448
|
+
script +=
|
449
|
+
if @log_method == 1 || @interactive
|
450
|
+
args_join(config.args)
|
451
|
+
else
|
452
|
+
<<~SCRIPT
|
453
|
+
( #{args_join(config.args)} )
|
454
|
+
CPFLOW_EXIT_CODE=$?
|
455
|
+
echo '#{MAGIC_END}'
|
456
|
+
exit $CPFLOW_EXIT_CODE
|
457
|
+
SCRIPT
|
458
|
+
end
|
459
|
+
|
460
|
+
script
|
461
|
+
end
|
462
|
+
|
463
|
+
def wait_for_job_status
|
464
|
+
Kernel.sleep(1) until (exit_code = resolve_job_status)
|
465
|
+
exit_code
|
466
|
+
end
|
467
|
+
|
468
|
+
def wait_for_job_status_and_log(logs_pipe) # rubocop:disable Metrics/MethodLength
|
469
|
+
no_logs_counter = 0
|
470
|
+
|
471
|
+
loop do
|
472
|
+
no_logs_counter += 1
|
473
|
+
break if no_logs_counter > 60 # 30s
|
474
|
+
break if logs_pipe.eof?
|
475
|
+
next Kernel.sleep(0.5) unless logs_pipe.ready?
|
476
|
+
|
477
|
+
no_logs_counter = 0
|
478
|
+
line = logs_pipe.gets
|
479
|
+
break if line.chomp == MAGIC_END
|
480
|
+
|
481
|
+
puts(line)
|
482
|
+
end
|
483
|
+
|
484
|
+
resolve_job_status
|
485
|
+
end
|
486
|
+
|
487
|
+
def print_detached_commands
|
488
|
+
return unless replica
|
489
|
+
|
490
|
+
app_workload_replica_config = app_workload_replica_args.join(" ")
|
491
|
+
progress.puts(
|
492
|
+
"\n\n" \
|
493
|
+
"- To view logs from the job, run:\n `cpflow logs #{app_workload_replica_config}`\n" \
|
494
|
+
"- To stop the job, run:\n `cpflow ps:stop #{app_workload_replica_config}`\n"
|
495
|
+
)
|
496
|
+
end
|
497
|
+
|
498
|
+
def resolve_job_status # rubocop:disable Metrics/MethodLength
|
499
|
+
loop do
|
500
|
+
result = cp.fetch_cron_workload(runner_workload, location: location)
|
501
|
+
job_details = result&.dig("items")&.find { |item| item["id"] == job }
|
502
|
+
status = job_details&.dig("status")
|
503
|
+
|
504
|
+
Shell.debug("JOB STATUS", status)
|
505
|
+
|
506
|
+
case status
|
507
|
+
when "active", "pending"
|
508
|
+
sleep 1
|
509
|
+
when "successful"
|
510
|
+
break ExitCode::SUCCESS
|
511
|
+
else
|
512
|
+
break ExitCode::ERROR_DEFAULT
|
513
|
+
end
|
514
|
+
end
|
515
|
+
end
|
516
|
+
|
517
|
+
###########################################
|
518
|
+
### temporary extaction from run:detached
|
519
|
+
###########################################
|
520
|
+
def show_logs_waiting # rubocop:disable Metrics/MethodLength
|
521
|
+
retries = 0
|
522
|
+
begin
|
523
|
+
job_finished_count = 0
|
524
|
+
loop do
|
525
|
+
case print_uniq_logs
|
526
|
+
when :finished
|
527
|
+
break
|
528
|
+
when :changed
|
529
|
+
next
|
530
|
+
else
|
531
|
+
job_finished_count += 1 if resolve_job_status
|
532
|
+
break if job_finished_count > 5
|
533
|
+
|
534
|
+
sleep(1)
|
535
|
+
end
|
536
|
+
end
|
537
|
+
|
538
|
+
resolve_job_status
|
539
|
+
rescue RuntimeError => e
|
540
|
+
raise "#{e} Exiting..." unless retries < 10 # MAX_RETRIES
|
541
|
+
|
542
|
+
progress.puts(Shell.color("ERROR: #{e} Retrying...", :red))
|
543
|
+
retries += 1
|
544
|
+
retry
|
545
|
+
end
|
546
|
+
end
|
547
|
+
|
548
|
+
def print_uniq_logs
|
549
|
+
status = nil
|
550
|
+
|
551
|
+
@printed_log_entries ||= []
|
552
|
+
ts = Time.now.to_i
|
553
|
+
entries = normalized_log_entries(from: ts - 60, to: ts)
|
554
|
+
|
555
|
+
(entries - @printed_log_entries).sort.each do |(_ts, val)|
|
556
|
+
status ||= :changed
|
557
|
+
val.chomp == MAGIC_END ? status = :finished : progress.puts(val)
|
558
|
+
end
|
559
|
+
|
560
|
+
@printed_log_entries = entries # as well truncate old entries if any
|
561
|
+
|
562
|
+
status || :unchanged
|
563
|
+
end
|
564
|
+
|
565
|
+
def normalized_log_entries(from:, to:)
|
566
|
+
log = cp.log_get(workload: runner_workload, from: from, to: to, replica: replica)
|
567
|
+
|
568
|
+
log["data"]["result"]
|
569
|
+
.each_with_object([]) { |obj, result| result.concat(obj["values"]) }
|
570
|
+
.select { |ts, _val| ts[..-10].to_i > from }
|
571
|
+
end
|
572
|
+
end
|
573
|
+
end
|
@@ -0,0 +1,113 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Command
|
4
|
+
class SetupApp < Base
|
5
|
+
NAME = "setup-app"
|
6
|
+
OPTIONS = [
|
7
|
+
app_option(required: true),
|
8
|
+
skip_secret_access_binding_option,
|
9
|
+
skip_secrets_setup_option,
|
10
|
+
skip_post_creation_hook_option
|
11
|
+
].freeze
|
12
|
+
DESCRIPTION = "Creates an app and all its workloads"
|
13
|
+
LONG_DESCRIPTION = <<~DESC
|
14
|
+
- Creates an app and all its workloads
|
15
|
+
- Specify the templates for the app and workloads through `setup_app_templates` in the `.controlplane/controlplane.yml` file
|
16
|
+
- This should only be used for temporary apps like review apps, never for persistent apps like production or staging (to update workloads for those, use 'cpflow apply-template' instead)
|
17
|
+
- Configures app to have org-level secrets with default name "{APP_PREFIX}-secrets"
|
18
|
+
using org-level policy with default name "{APP_PREFIX}-secrets-policy" (names can be customized, see docs)
|
19
|
+
- Creates identity for secrets if it does not exist
|
20
|
+
- Use `--skip-secrets-setup` to prevent the automatic setup of secrets,
|
21
|
+
or set it through `skip_secrets_setup` in the `.controlplane/controlplane.yml` file
|
22
|
+
- Runs a post-creation hook after the app is created if `hooks.post_creation` is specified in the `.controlplane/controlplane.yml` file
|
23
|
+
- If the hook exits with a non-zero code, the command will stop executing and also exit with a non-zero code
|
24
|
+
- Use `--skip-post-creation-hook` to skip the hook if specified in `controlplane.yml`
|
25
|
+
DESC
|
26
|
+
VALIDATIONS = %w[config templates].freeze
|
27
|
+
|
28
|
+
def call # rubocop:disable Metrics/CyclomaticComplexity, Metrics/MethodLength
|
29
|
+
templates = config[:setup_app_templates]
|
30
|
+
|
31
|
+
app = cp.fetch_gvc
|
32
|
+
if app
|
33
|
+
raise "App '#{config.app}' already exists. If you want to update this app, " \
|
34
|
+
"either run 'cpflow delete -a #{config.app}' and then re-run this command, " \
|
35
|
+
"or run 'cpflow apply-template #{templates.join(' ')} -a #{config.app}'."
|
36
|
+
end
|
37
|
+
|
38
|
+
skip_secrets_setup = config.options[:skip_secret_access_binding] ||
|
39
|
+
config.options[:skip_secrets_setup] || config.current[:skip_secrets_setup]
|
40
|
+
|
41
|
+
create_secret_and_policy_if_not_exist unless skip_secrets_setup
|
42
|
+
|
43
|
+
args = []
|
44
|
+
args.push("--add-app-identity") unless skip_secrets_setup
|
45
|
+
Cpflow::Cli.start(["apply-template", *templates, "-a", config.app, *args])
|
46
|
+
|
47
|
+
bind_identity_to_policy unless skip_secrets_setup
|
48
|
+
run_post_creation_hook unless config.options[:skip_post_creation_hook]
|
49
|
+
end
|
50
|
+
|
51
|
+
private
|
52
|
+
|
53
|
+
def create_secret_and_policy_if_not_exist
|
54
|
+
create_secret_if_not_exists
|
55
|
+
create_policy_if_not_exists
|
56
|
+
|
57
|
+
progress.puts
|
58
|
+
end
|
59
|
+
|
60
|
+
def create_secret_if_not_exists
|
61
|
+
if cp.fetch_secret(config.secrets)
|
62
|
+
progress.puts("Secret '#{config.secrets}' already exists. Skipping creation...")
|
63
|
+
else
|
64
|
+
step("Creating secret '#{config.secrets}'") do
|
65
|
+
cp.apply_hash(build_secret_hash)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def create_policy_if_not_exists
|
71
|
+
if cp.fetch_policy(config.secrets_policy)
|
72
|
+
progress.puts("Policy '#{config.secrets_policy}' already exists. Skipping creation...")
|
73
|
+
else
|
74
|
+
step("Creating policy '#{config.secrets_policy}'") do
|
75
|
+
cp.apply_hash(build_policy_hash)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def build_secret_hash
|
81
|
+
{
|
82
|
+
"kind" => "secret",
|
83
|
+
"name" => config.secrets,
|
84
|
+
"type" => "dictionary",
|
85
|
+
"data" => {}
|
86
|
+
}
|
87
|
+
end
|
88
|
+
|
89
|
+
def build_policy_hash
|
90
|
+
{
|
91
|
+
"kind" => "policy",
|
92
|
+
"name" => config.secrets_policy,
|
93
|
+
"targetKind" => "secret",
|
94
|
+
"targetLinks" => ["//secret/#{config.secrets}"]
|
95
|
+
}
|
96
|
+
end
|
97
|
+
|
98
|
+
def bind_identity_to_policy
|
99
|
+
progress.puts
|
100
|
+
|
101
|
+
step("Binding identity '#{config.identity}' to policy '#{config.secrets_policy}'") do
|
102
|
+
cp.bind_identity_to_policy(config.identity_link, config.secrets_policy)
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
def run_post_creation_hook
|
107
|
+
post_creation_hook = config.current.dig(:hooks, :post_creation)
|
108
|
+
return unless post_creation_hook
|
109
|
+
|
110
|
+
run_command_in_latest_image(post_creation_hook, title: "post-creation hook")
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|