floe 0.10.0 → 0.11.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +18 -1
- data/exe/floe +35 -28
- data/lib/floe/container_runner/docker.rb +225 -0
- data/lib/floe/container_runner/docker_mixin.rb +32 -0
- data/lib/floe/container_runner/kubernetes.rb +329 -0
- data/lib/floe/container_runner/podman.rb +104 -0
- data/lib/floe/container_runner.rb +61 -0
- data/lib/floe/runner.rb +82 -0
- data/lib/floe/version.rb +1 -1
- data/lib/floe/workflow/context.rb +3 -1
- data/lib/floe/workflow/states/task.rb +2 -2
- data/lib/floe.rb +2 -18
- metadata +8 -7
- data/lib/floe/workflow/runner/docker.rb +0 -227
- data/lib/floe/workflow/runner/docker_mixin.rb +0 -32
- data/lib/floe/workflow/runner/kubernetes.rb +0 -331
- data/lib/floe/workflow/runner/podman.rb +0 -106
- data/lib/floe/workflow/runner.rb +0 -77
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: floe
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.11.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- ManageIQ Developers
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-05-02 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: awesome_spawn
|
@@ -185,8 +185,14 @@ files:
|
|
185
185
|
- exe/floe
|
186
186
|
- floe.gemspec
|
187
187
|
- lib/floe.rb
|
188
|
+
- lib/floe/container_runner.rb
|
189
|
+
- lib/floe/container_runner/docker.rb
|
190
|
+
- lib/floe/container_runner/docker_mixin.rb
|
191
|
+
- lib/floe/container_runner/kubernetes.rb
|
192
|
+
- lib/floe/container_runner/podman.rb
|
188
193
|
- lib/floe/logging.rb
|
189
194
|
- lib/floe/null_logger.rb
|
195
|
+
- lib/floe/runner.rb
|
190
196
|
- lib/floe/version.rb
|
191
197
|
- lib/floe/workflow.rb
|
192
198
|
- lib/floe/workflow/catcher.rb
|
@@ -200,11 +206,6 @@ files:
|
|
200
206
|
- lib/floe/workflow/payload_template.rb
|
201
207
|
- lib/floe/workflow/reference_path.rb
|
202
208
|
- lib/floe/workflow/retrier.rb
|
203
|
-
- lib/floe/workflow/runner.rb
|
204
|
-
- lib/floe/workflow/runner/docker.rb
|
205
|
-
- lib/floe/workflow/runner/docker_mixin.rb
|
206
|
-
- lib/floe/workflow/runner/kubernetes.rb
|
207
|
-
- lib/floe/workflow/runner/podman.rb
|
208
209
|
- lib/floe/workflow/state.rb
|
209
210
|
- lib/floe/workflow/states/choice.rb
|
210
211
|
- lib/floe/workflow/states/fail.rb
|
@@ -1,227 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Floe
|
4
|
-
class Workflow
|
5
|
-
class Runner
|
6
|
-
class Docker < Floe::Workflow::Runner
|
7
|
-
include DockerMixin
|
8
|
-
|
9
|
-
DOCKER_COMMAND = "docker"
|
10
|
-
|
11
|
-
def initialize(options = {})
|
12
|
-
require "awesome_spawn"
|
13
|
-
require "io/wait"
|
14
|
-
require "tempfile"
|
15
|
-
|
16
|
-
super
|
17
|
-
|
18
|
-
@network = options.fetch("network", "bridge")
|
19
|
-
@pull_policy = options["pull-policy"]
|
20
|
-
end
|
21
|
-
|
22
|
-
def run_async!(resource, env = {}, secrets = {})
|
23
|
-
raise ArgumentError, "Invalid resource" unless resource&.start_with?("docker://")
|
24
|
-
|
25
|
-
image = resource.sub("docker://", "")
|
26
|
-
|
27
|
-
runner_context = {}
|
28
|
-
|
29
|
-
if secrets && !secrets.empty?
|
30
|
-
runner_context["secrets_ref"] = create_secret(secrets)
|
31
|
-
end
|
32
|
-
|
33
|
-
begin
|
34
|
-
runner_context["container_ref"] = run_container(image, env, runner_context["secrets_ref"])
|
35
|
-
runner_context
|
36
|
-
rescue AwesomeSpawn::CommandResultError => err
|
37
|
-
cleanup(runner_context)
|
38
|
-
{"Error" => "States.TaskFailed", "Cause" => err.to_s}
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
|
-
def cleanup(runner_context)
|
43
|
-
container_id, secrets_file = runner_context.values_at("container_ref", "secrets_ref")
|
44
|
-
|
45
|
-
delete_container(container_id) if container_id
|
46
|
-
delete_secret(secrets_file) if secrets_file
|
47
|
-
end
|
48
|
-
|
49
|
-
def wait(timeout: nil, events: %i[create update delete], &block)
|
50
|
-
until_timestamp = Time.now.utc + timeout if timeout
|
51
|
-
|
52
|
-
r, w = IO.pipe
|
53
|
-
|
54
|
-
pid = AwesomeSpawn.run_detached(
|
55
|
-
self.class::DOCKER_COMMAND, :err => :out, :out => w, :params => wait_params(until_timestamp)
|
56
|
-
)
|
57
|
-
|
58
|
-
w.close
|
59
|
-
|
60
|
-
loop do
|
61
|
-
readable_timeout = until_timestamp - Time.now.utc if until_timestamp
|
62
|
-
|
63
|
-
# Wait for our end of the pipe to be readable and if it didn't timeout
|
64
|
-
# get the events from stdout
|
65
|
-
next if r.wait_readable(readable_timeout).nil?
|
66
|
-
|
67
|
-
# Get all events while the pipe is readable
|
68
|
-
notices = []
|
69
|
-
while r.ready?
|
70
|
-
notice = r.gets
|
71
|
-
|
72
|
-
# If the process has exited `r.gets` returns `nil` and the pipe is
|
73
|
-
# always `ready?`
|
74
|
-
break if notice.nil?
|
75
|
-
|
76
|
-
event, runner_context = parse_notice(notice)
|
77
|
-
next if event.nil? || !events.include?(event)
|
78
|
-
|
79
|
-
notices << [event, runner_context]
|
80
|
-
end
|
81
|
-
|
82
|
-
# If we're given a block yield the events otherwise return them
|
83
|
-
if block
|
84
|
-
notices.each(&block)
|
85
|
-
else
|
86
|
-
# Terminate the `docker events` process before returning the events
|
87
|
-
sigterm(pid)
|
88
|
-
|
89
|
-
return notices
|
90
|
-
end
|
91
|
-
|
92
|
-
# Check that the `docker events` process is still alive
|
93
|
-
Process.kill(0, pid)
|
94
|
-
rescue Errno::ESRCH
|
95
|
-
# Break out of the loop if the `docker events` process has exited
|
96
|
-
break
|
97
|
-
end
|
98
|
-
ensure
|
99
|
-
r.close
|
100
|
-
end
|
101
|
-
|
102
|
-
def status!(runner_context)
|
103
|
-
return if runner_context.key?("Error")
|
104
|
-
|
105
|
-
runner_context["container_state"] = inspect_container(runner_context["container_ref"])&.dig("State")
|
106
|
-
end
|
107
|
-
|
108
|
-
def running?(runner_context)
|
109
|
-
!!runner_context.dig("container_state", "Running")
|
110
|
-
end
|
111
|
-
|
112
|
-
def success?(runner_context)
|
113
|
-
runner_context.dig("container_state", "ExitCode") == 0
|
114
|
-
end
|
115
|
-
|
116
|
-
def output(runner_context)
|
117
|
-
return runner_context.slice("Error", "Cause") if runner_context.key?("Error")
|
118
|
-
|
119
|
-
output = docker!("logs", runner_context["container_ref"], :combined_output => true).output
|
120
|
-
runner_context["output"] = output
|
121
|
-
end
|
122
|
-
|
123
|
-
private
|
124
|
-
|
125
|
-
attr_reader :network
|
126
|
-
|
127
|
-
def run_container(image, env, secrets_file)
|
128
|
-
params = run_container_params(image, env, secrets_file)
|
129
|
-
|
130
|
-
logger.debug("Running #{AwesomeSpawn.build_command_line(self.class::DOCKER_COMMAND, params)}")
|
131
|
-
|
132
|
-
result = docker!(*params)
|
133
|
-
result.output
|
134
|
-
end
|
135
|
-
|
136
|
-
def run_container_params(image, env, secrets_file)
|
137
|
-
params = ["run"]
|
138
|
-
params << :detach
|
139
|
-
params += env.map { |k, v| [:e, "#{k}=#{v}"] }
|
140
|
-
params << [:e, "_CREDENTIALS=/run/secrets"] if secrets_file
|
141
|
-
params << [:pull, @pull_policy] if @pull_policy
|
142
|
-
params << [:net, "host"] if @network == "host"
|
143
|
-
params << [:v, "#{secrets_file}:/run/secrets:z"] if secrets_file
|
144
|
-
params << [:name, container_name(image)]
|
145
|
-
params << image
|
146
|
-
end
|
147
|
-
|
148
|
-
def wait_params(until_timestamp)
|
149
|
-
params = ["events", [:format, "{{json .}}"], [:filter, "type=container"], [:since, Time.now.utc.to_i]]
|
150
|
-
params << [:until, until_timestamp.to_i] if until_timestamp
|
151
|
-
params
|
152
|
-
end
|
153
|
-
|
154
|
-
def parse_notice(notice)
|
155
|
-
notice = JSON.parse(notice)
|
156
|
-
|
157
|
-
status = notice["status"]
|
158
|
-
event = docker_event_status_to_event(status)
|
159
|
-
running = event != :delete
|
160
|
-
|
161
|
-
name, exit_code = notice.dig("Actor", "Attributes")&.values_at("name", "exitCode")
|
162
|
-
|
163
|
-
runner_context = {"container_ref" => name, "container_state" => {"Running" => running, "ExitCode" => exit_code.to_i}}
|
164
|
-
|
165
|
-
[event, runner_context]
|
166
|
-
rescue JSON::ParserError
|
167
|
-
[]
|
168
|
-
end
|
169
|
-
|
170
|
-
def docker_event_status_to_event(status)
|
171
|
-
case status
|
172
|
-
when "create"
|
173
|
-
:create
|
174
|
-
when "start"
|
175
|
-
:update
|
176
|
-
when "die", "destroy"
|
177
|
-
:delete
|
178
|
-
else
|
179
|
-
:unkonwn
|
180
|
-
end
|
181
|
-
end
|
182
|
-
|
183
|
-
def inspect_container(container_id)
|
184
|
-
JSON.parse(docker!("inspect", container_id).output).first
|
185
|
-
rescue
|
186
|
-
nil
|
187
|
-
end
|
188
|
-
|
189
|
-
def delete_container(container_id)
|
190
|
-
docker!("rm", container_id)
|
191
|
-
rescue
|
192
|
-
nil
|
193
|
-
end
|
194
|
-
|
195
|
-
def delete_secret(secrets_file)
|
196
|
-
return unless File.exist?(secrets_file)
|
197
|
-
|
198
|
-
File.unlink(secrets_file)
|
199
|
-
rescue
|
200
|
-
nil
|
201
|
-
end
|
202
|
-
|
203
|
-
def create_secret(secrets)
|
204
|
-
secrets_file = Tempfile.new
|
205
|
-
secrets_file.write(secrets.to_json)
|
206
|
-
secrets_file.close
|
207
|
-
secrets_file.path
|
208
|
-
end
|
209
|
-
|
210
|
-
def sigterm(pid)
|
211
|
-
Process.kill("TERM", pid)
|
212
|
-
rescue Errno::ESRCH
|
213
|
-
nil
|
214
|
-
end
|
215
|
-
|
216
|
-
def global_docker_options
|
217
|
-
[]
|
218
|
-
end
|
219
|
-
|
220
|
-
def docker!(*args, **kwargs)
|
221
|
-
params = global_docker_options + args
|
222
|
-
AwesomeSpawn.run!(self.class::DOCKER_COMMAND, :params => params, **kwargs)
|
223
|
-
end
|
224
|
-
end
|
225
|
-
end
|
226
|
-
end
|
227
|
-
end
|
@@ -1,32 +0,0 @@
|
|
1
|
-
module Floe
|
2
|
-
class Workflow
|
3
|
-
class Runner
|
4
|
-
module DockerMixin
|
5
|
-
def image_name(image)
|
6
|
-
image.match(%r{^(?<repository>.+/)?(?<image>.+):(?<tag>.+)$})&.named_captures&.dig("image")
|
7
|
-
end
|
8
|
-
|
9
|
-
# 63 is the max kubernetes pod name length
|
10
|
-
# -5 for the "floe-" prefix
|
11
|
-
# -9 for the random hex suffix and leading hyphen
|
12
|
-
MAX_CONTAINER_NAME_SIZE = 63 - 5 - 9
|
13
|
-
|
14
|
-
def container_name(image)
|
15
|
-
name = image_name(image)
|
16
|
-
raise ArgumentError, "Invalid docker image [#{image}]" if name.nil?
|
17
|
-
|
18
|
-
# Normalize the image name to be used in the container name.
|
19
|
-
# This follows RFC 1123 Label names in Kubernetes as they are the most restrictive
|
20
|
-
# See https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names
|
21
|
-
# and https://github.com/kubernetes/kubernetes/blob/952a9cb0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L178-L184
|
22
|
-
#
|
23
|
-
# This does not follow the leading and trailing character restriction because we will embed it
|
24
|
-
# below with a prefix and suffix that already conform to the RFC.
|
25
|
-
normalized_name = name.downcase.gsub(/[^a-z0-9-]/, "-")[0, MAX_CONTAINER_NAME_SIZE]
|
26
|
-
|
27
|
-
"floe-#{normalized_name}-#{SecureRandom.hex(4)}"
|
28
|
-
end
|
29
|
-
end
|
30
|
-
end
|
31
|
-
end
|
32
|
-
end
|
@@ -1,331 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Floe
|
4
|
-
class Workflow
|
5
|
-
class Runner
|
6
|
-
class Kubernetes < Floe::Workflow::Runner
|
7
|
-
include DockerMixin
|
8
|
-
|
9
|
-
TOKEN_FILE = "/run/secrets/kubernetes.io/serviceaccount/token"
|
10
|
-
CA_CERT_FILE = "/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
11
|
-
RUNNING_PHASES = %w[Pending Running].freeze
|
12
|
-
FAILURE_REASONS = %w[CrashLoopBackOff ImagePullBackOff ErrImagePull].freeze
|
13
|
-
|
14
|
-
def initialize(options = {})
|
15
|
-
require "active_support/core_ext/hash/keys"
|
16
|
-
require "awesome_spawn"
|
17
|
-
require "securerandom"
|
18
|
-
require "base64"
|
19
|
-
require "kubeclient"
|
20
|
-
require "yaml"
|
21
|
-
|
22
|
-
@kubeconfig_file = ENV.fetch("KUBECONFIG", nil) || options.fetch("kubeconfig", File.join(Dir.home, ".kube", "config"))
|
23
|
-
@kubeconfig_context = options["kubeconfig_context"]
|
24
|
-
|
25
|
-
@token = options["token"]
|
26
|
-
@token ||= File.read(options["token_file"]) if options.key?("token_file")
|
27
|
-
@token ||= File.read(TOKEN_FILE) if File.exist?(TOKEN_FILE)
|
28
|
-
|
29
|
-
@server = options["server"]
|
30
|
-
@server ||= URI::HTTPS.build(:host => ENV.fetch("KUBERNETES_SERVICE_HOST"), :port => ENV.fetch("KUBERNETES_SERVICE_PORT", 6443)) if ENV.key?("KUBERNETES_SERVICE_HOST")
|
31
|
-
|
32
|
-
@ca_file = options["ca_file"]
|
33
|
-
@ca_file ||= CA_CERT_FILE if File.exist?(CA_CERT_FILE)
|
34
|
-
|
35
|
-
@verify_ssl = options["verify_ssl"] == "false" ? OpenSSL::SSL::VERIFY_NONE : OpenSSL::SSL::VERIFY_PEER
|
36
|
-
|
37
|
-
if server.nil? && token.nil? && !File.exist?(kubeconfig_file)
|
38
|
-
raise ArgumentError, "Missing connections options, provide a kubeconfig file or pass server and token via --docker-runner-options"
|
39
|
-
end
|
40
|
-
|
41
|
-
@namespace = options.fetch("namespace", "default")
|
42
|
-
|
43
|
-
@pull_policy = options["pull-policy"]
|
44
|
-
@task_service_account = options["task_service_account"]
|
45
|
-
|
46
|
-
super
|
47
|
-
end
|
48
|
-
|
49
|
-
def run_async!(resource, env = {}, secrets = {})
|
50
|
-
raise ArgumentError, "Invalid resource" unless resource&.start_with?("docker://")
|
51
|
-
|
52
|
-
image = resource.sub("docker://", "")
|
53
|
-
name = container_name(image)
|
54
|
-
secret = create_secret!(secrets) if secrets && !secrets.empty?
|
55
|
-
|
56
|
-
runner_context = {"container_ref" => name, "container_state" => {"phase" => "Pending"}, "secrets_ref" => secret}
|
57
|
-
|
58
|
-
begin
|
59
|
-
create_pod!(name, image, env, secret)
|
60
|
-
runner_context
|
61
|
-
rescue Kubeclient::HttpError => err
|
62
|
-
cleanup(runner_context)
|
63
|
-
{"Error" => "States.TaskFailed", "Cause" => err.to_s}
|
64
|
-
end
|
65
|
-
end
|
66
|
-
|
67
|
-
def status!(runner_context)
|
68
|
-
return if runner_context.key?("Error")
|
69
|
-
|
70
|
-
runner_context["container_state"] = pod_info(runner_context["container_ref"]).to_h.deep_stringify_keys["status"]
|
71
|
-
end
|
72
|
-
|
73
|
-
def running?(runner_context)
|
74
|
-
return false unless pod_running?(runner_context)
|
75
|
-
# If a pod is Pending and the containers are waiting with a failure
|
76
|
-
# reason such as ImagePullBackOff or CrashLoopBackOff then the pod
|
77
|
-
# will never be run.
|
78
|
-
return false if container_failed?(runner_context)
|
79
|
-
|
80
|
-
true
|
81
|
-
end
|
82
|
-
|
83
|
-
def success?(runner_context)
|
84
|
-
runner_context.dig("container_state", "phase") == "Succeeded"
|
85
|
-
end
|
86
|
-
|
87
|
-
def output(runner_context)
|
88
|
-
if runner_context.key?("Error")
|
89
|
-
runner_context.slice("Error", "Cause")
|
90
|
-
elsif container_failed?(runner_context)
|
91
|
-
failed_state = failed_container_states(runner_context).first
|
92
|
-
{"Error" => failed_state["reason"], "Cause" => failed_state["message"]}
|
93
|
-
else
|
94
|
-
runner_context["output"] = kubeclient.get_pod_log(runner_context["container_ref"], namespace).body
|
95
|
-
end
|
96
|
-
end
|
97
|
-
|
98
|
-
def cleanup(runner_context)
|
99
|
-
pod, secret = runner_context.values_at("container_ref", "secrets_ref")
|
100
|
-
|
101
|
-
delete_pod(pod) if pod
|
102
|
-
delete_secret(secret) if secret
|
103
|
-
end
|
104
|
-
|
105
|
-
def wait(timeout: nil, events: %i[create update delete])
|
106
|
-
retry_connection = true
|
107
|
-
|
108
|
-
begin
|
109
|
-
watcher = kubeclient.watch_pods(:namespace => namespace)
|
110
|
-
|
111
|
-
retry_connection = true
|
112
|
-
|
113
|
-
if timeout.to_i > 0
|
114
|
-
timeout_thread = Thread.new do
|
115
|
-
sleep(timeout)
|
116
|
-
watcher.finish
|
117
|
-
end
|
118
|
-
end
|
119
|
-
|
120
|
-
watcher.each do |notice|
|
121
|
-
break if error_notice?(notice)
|
122
|
-
|
123
|
-
event = kube_notice_type_to_event(notice.type)
|
124
|
-
next unless events.include?(event)
|
125
|
-
|
126
|
-
runner_context = parse_notice(notice)
|
127
|
-
next if runner_context.nil?
|
128
|
-
|
129
|
-
if block_given?
|
130
|
-
yield [event, runner_context]
|
131
|
-
else
|
132
|
-
timeout_thread&.kill # If we break out before the timeout, kill the timeout thread
|
133
|
-
return [[event, runner_context]]
|
134
|
-
end
|
135
|
-
end
|
136
|
-
rescue Kubeclient::HttpError => err
|
137
|
-
raise unless err.error_code == 401 && retry_connection
|
138
|
-
|
139
|
-
@kubeclient = nil
|
140
|
-
retry_connection = false
|
141
|
-
retry
|
142
|
-
ensure
|
143
|
-
begin
|
144
|
-
watch&.finish
|
145
|
-
rescue
|
146
|
-
nil
|
147
|
-
end
|
148
|
-
|
149
|
-
timeout_thread&.join(0)
|
150
|
-
end
|
151
|
-
end
|
152
|
-
|
153
|
-
private
|
154
|
-
|
155
|
-
attr_reader :ca_file, :kubeconfig_file, :kubeconfig_context, :namespace, :server, :token, :verify_ssl
|
156
|
-
|
157
|
-
def pod_info(pod_name)
|
158
|
-
kubeclient.get_pod(pod_name, namespace)
|
159
|
-
end
|
160
|
-
|
161
|
-
def pod_running?(context)
|
162
|
-
RUNNING_PHASES.include?(context.dig("container_state", "phase"))
|
163
|
-
end
|
164
|
-
|
165
|
-
def failed_container_states(context)
|
166
|
-
container_statuses = context.dig("container_state", "containerStatuses") || []
|
167
|
-
container_statuses.filter_map { |status| status["state"]&.values&.first }
|
168
|
-
.select { |state| FAILURE_REASONS.include?(state["reason"]) }
|
169
|
-
end
|
170
|
-
|
171
|
-
def container_failed?(context)
|
172
|
-
failed_container_states(context).any?
|
173
|
-
end
|
174
|
-
|
175
|
-
def pod_spec(name, image, env, secret = nil)
|
176
|
-
spec = {
|
177
|
-
:kind => "Pod",
|
178
|
-
:apiVersion => "v1",
|
179
|
-
:metadata => {
|
180
|
-
:name => name,
|
181
|
-
:namespace => namespace
|
182
|
-
},
|
183
|
-
:spec => {
|
184
|
-
:containers => [
|
185
|
-
{
|
186
|
-
:name => name[0...-9], # remove the random suffix and its leading hyphen
|
187
|
-
:image => image,
|
188
|
-
:env => env.map { |k, v| {:name => k, :value => v.to_s} }
|
189
|
-
}
|
190
|
-
],
|
191
|
-
:restartPolicy => "Never"
|
192
|
-
}
|
193
|
-
}
|
194
|
-
|
195
|
-
spec[:spec][:imagePullPolicy] = @pull_policy if @pull_policy
|
196
|
-
spec[:spec][:serviceAccountName] = @task_service_account if @task_service_account
|
197
|
-
|
198
|
-
if secret
|
199
|
-
spec[:spec][:volumes] = [
|
200
|
-
{
|
201
|
-
:name => "secret-volume",
|
202
|
-
:secret => {:secretName => secret}
|
203
|
-
}
|
204
|
-
]
|
205
|
-
|
206
|
-
spec[:spec][:containers][0][:env] << {
|
207
|
-
:name => "_CREDENTIALS",
|
208
|
-
:value => "/run/secrets/#{secret}/secret"
|
209
|
-
}
|
210
|
-
|
211
|
-
spec[:spec][:containers][0][:volumeMounts] = [
|
212
|
-
{
|
213
|
-
:name => "secret-volume",
|
214
|
-
:mountPath => "/run/secrets/#{secret}",
|
215
|
-
:readOnly => true
|
216
|
-
}
|
217
|
-
]
|
218
|
-
end
|
219
|
-
|
220
|
-
spec
|
221
|
-
end
|
222
|
-
|
223
|
-
def create_pod!(name, image, env, secret = nil)
|
224
|
-
kubeclient.create_pod(pod_spec(name, image, env, secret))
|
225
|
-
end
|
226
|
-
|
227
|
-
def delete_pod!(name)
|
228
|
-
kubeclient.delete_pod(name, namespace)
|
229
|
-
end
|
230
|
-
|
231
|
-
def delete_pod(name)
|
232
|
-
delete_pod!(name)
|
233
|
-
rescue
|
234
|
-
nil
|
235
|
-
end
|
236
|
-
|
237
|
-
def create_secret!(secrets)
|
238
|
-
secret_name = SecureRandom.uuid
|
239
|
-
|
240
|
-
secret_config = {
|
241
|
-
:kind => "Secret",
|
242
|
-
:apiVersion => "v1",
|
243
|
-
:metadata => {
|
244
|
-
:name => secret_name,
|
245
|
-
:namespace => namespace
|
246
|
-
},
|
247
|
-
:data => {
|
248
|
-
:secret => Base64.urlsafe_encode64(secrets.to_json)
|
249
|
-
},
|
250
|
-
:type => "Opaque"
|
251
|
-
}
|
252
|
-
|
253
|
-
kubeclient.create_secret(secret_config)
|
254
|
-
|
255
|
-
secret_name
|
256
|
-
end
|
257
|
-
|
258
|
-
def delete_secret!(secret_name)
|
259
|
-
kubeclient.delete_secret(secret_name, namespace)
|
260
|
-
end
|
261
|
-
|
262
|
-
def delete_secret(name)
|
263
|
-
delete_secret!(name)
|
264
|
-
rescue
|
265
|
-
nil
|
266
|
-
end
|
267
|
-
|
268
|
-
def kube_notice_type_to_event(type)
|
269
|
-
case type
|
270
|
-
when "ADDED"
|
271
|
-
:create
|
272
|
-
when "MODIFIED"
|
273
|
-
:update
|
274
|
-
when "DELETED"
|
275
|
-
:delete
|
276
|
-
else
|
277
|
-
:unknown
|
278
|
-
end
|
279
|
-
end
|
280
|
-
|
281
|
-
def error_notice?(notice)
|
282
|
-
return false unless notice.type == "ERROR"
|
283
|
-
|
284
|
-
message = notice.object&.message
|
285
|
-
code = notice.object&.code
|
286
|
-
reason = notice.object&.reason
|
287
|
-
|
288
|
-
logger.warn("Received [#{code} #{reason}], [#{message}]")
|
289
|
-
|
290
|
-
true
|
291
|
-
end
|
292
|
-
|
293
|
-
def parse_notice(notice)
|
294
|
-
return if notice.object.nil?
|
295
|
-
|
296
|
-
pod = notice.object
|
297
|
-
container_ref = pod.metadata.name
|
298
|
-
container_state = pod.to_h[:status].deep_stringify_keys
|
299
|
-
|
300
|
-
{"container_ref" => container_ref, "container_state" => container_state}
|
301
|
-
end
|
302
|
-
|
303
|
-
def kubeclient
|
304
|
-
return @kubeclient unless @kubeclient.nil?
|
305
|
-
|
306
|
-
if server && token
|
307
|
-
api_endpoint = server
|
308
|
-
auth_options = {:bearer_token => token}
|
309
|
-
ssl_options = {:verify_ssl => verify_ssl}
|
310
|
-
ssl_options[:ca_file] = ca_file if ca_file
|
311
|
-
else
|
312
|
-
context = kubeconfig&.context(kubeconfig_context)
|
313
|
-
raise ArgumentError, "Missing connections options, provide a kubeconfig file or pass server and token via --docker-runner-options" if context.nil?
|
314
|
-
|
315
|
-
api_endpoint = context.api_endpoint
|
316
|
-
auth_options = context.auth_options
|
317
|
-
ssl_options = context.ssl_options
|
318
|
-
end
|
319
|
-
|
320
|
-
@kubeclient = Kubeclient::Client.new(api_endpoint, "v1", :ssl_options => ssl_options, :auth_options => auth_options).tap(&:discover)
|
321
|
-
end
|
322
|
-
|
323
|
-
def kubeconfig
|
324
|
-
return if kubeconfig_file.nil? || !File.exist?(kubeconfig_file)
|
325
|
-
|
326
|
-
Kubeclient::Config.read(kubeconfig_file)
|
327
|
-
end
|
328
|
-
end
|
329
|
-
end
|
330
|
-
end
|
331
|
-
end
|