ood_core 0.13.0 → 0.16.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +30 -0
- data/CHANGELOG.md +64 -1
- data/README.md +2 -2
- data/lib/ood_core/cluster.rb +11 -5
- data/lib/ood_core/job/adapters/ccq.rb +19 -12
- data/lib/ood_core/job/adapters/kubernetes.rb +193 -0
- data/lib/ood_core/job/adapters/kubernetes/batch.rb +372 -0
- data/lib/ood_core/job/adapters/kubernetes/helper.rb +299 -0
- data/lib/ood_core/job/adapters/kubernetes/k8s_job_info.rb +9 -0
- data/lib/ood_core/job/adapters/kubernetes/resources.rb +82 -0
- data/lib/ood_core/job/adapters/kubernetes/templates/pod.yml.erb +188 -0
- data/lib/ood_core/job/adapters/linux_host/launcher.rb +25 -10
- data/lib/ood_core/job/adapters/linux_host/templates/script_wrapper.erb.sh +3 -14
- data/lib/ood_core/job/adapters/slurm.rb +18 -1
- data/lib/ood_core/version.rb +1 -1
- data/ood_core.gemspec +2 -1
- metadata +32 -6
- data/.travis.yml +0 -9
@@ -0,0 +1,299 @@
|
|
1
|
+
class OodCore::Job::Adapters::Kubernetes::Helper
|
2
|
+
|
3
|
+
require_relative 'resources'
|
4
|
+
require_relative 'k8s_job_info'
|
5
|
+
require 'resolv'
|
6
|
+
require 'base64'
|
7
|
+
require 'active_support/core_ext/hash'
|
8
|
+
|
9
|
+
class K8sDataError < StandardError; end
|
10
|
+
|
11
|
+
# Extract info from json data. The data is expected to be from the kubectl
|
12
|
+
# command and conform to kubernetes' datatype structures.
|
13
|
+
#
|
14
|
+
# Returns K8sJobInfo in the in lieu of writing a connection.yml
|
15
|
+
#
|
16
|
+
# @param pod_json [#to_h]
|
17
|
+
# the pod data returned from 'kubectl get pod abc-123'
|
18
|
+
# @param service_json [#to_h]
|
19
|
+
# the service data returned from 'kubectl get service abc-123-service'
|
20
|
+
# @param secret_json [#to_h]
|
21
|
+
# the secret data returned from 'kubectl get secret abc-123-secret'
|
22
|
+
# @param ns_prefix [#to_s]
|
23
|
+
# the namespace prefix so that namespaces can be converted back to usernames
|
24
|
+
# @return [OodCore::Job::Adapters::Kubernetes::K8sJobInfo]
|
25
|
+
def info_from_json(pod_json: nil, service_json: nil, secret_json: nil, ns_prefix: nil)
|
26
|
+
pod_hash = pod_info_from_json(pod_json, ns_prefix: ns_prefix)
|
27
|
+
service_hash = service_info_from_json(service_json)
|
28
|
+
secret_hash = secret_info_from_json(secret_json)
|
29
|
+
|
30
|
+
pod_hash.deep_merge!(service_hash)
|
31
|
+
pod_hash.deep_merge!(secret_hash)
|
32
|
+
OodCore::Job::Adapters::Kubernetes::K8sJobInfo.new(pod_hash)
|
33
|
+
rescue NoMethodError
|
34
|
+
raise K8sDataError, "unable to read data correctly from json"
|
35
|
+
end
|
36
|
+
|
37
|
+
# Turn a container hash into a Kubernetes::Resources::Container
|
38
|
+
#
|
39
|
+
# @param container [#to_h]
|
40
|
+
# the input container hash
|
41
|
+
# @param default_env [#to_h]
|
42
|
+
# Default env to merge with defined env
|
43
|
+
# @return [OodCore::Job::Adapters::Kubernetes::Resources::Container]
|
44
|
+
def container_from_native(container, default_env)
|
45
|
+
env = container.fetch(:env, {}).to_h.symbolize_keys
|
46
|
+
OodCore::Job::Adapters::Kubernetes::Resources::Container.new(
|
47
|
+
container[:name],
|
48
|
+
container[:image],
|
49
|
+
command: parse_command(container[:command]),
|
50
|
+
port: container[:port],
|
51
|
+
env: default_env.merge(env),
|
52
|
+
memory: container[:memory],
|
53
|
+
cpu: container[:cpu],
|
54
|
+
working_dir: container[:working_dir],
|
55
|
+
restart_policy: container[:restart_policy],
|
56
|
+
image_pull_secret: container[:image_pull_secret]
|
57
|
+
)
|
58
|
+
end
|
59
|
+
|
60
|
+
# Parse a command string given from a user and return an array.
|
61
|
+
# If given an array, the input is simply returned back.
|
62
|
+
#
|
63
|
+
# @param cmd [#to_s]
|
64
|
+
# the command to parse
|
65
|
+
# @return [Array<#to_s>]
|
66
|
+
# the command parsed into an array of arguements
|
67
|
+
def parse_command(cmd)
|
68
|
+
if cmd&.is_a?(Array)
|
69
|
+
cmd
|
70
|
+
else
|
71
|
+
Shellwords.split(cmd.to_s)
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
# Turn a configmap hash into a Kubernetes::Resources::ConfigMap
|
76
|
+
# that can be used in templates. Needs an id so that the resulting
|
77
|
+
# configmap has a known name.
|
78
|
+
#
|
79
|
+
# @param native [#to_h]
|
80
|
+
# the input configmap hash
|
81
|
+
# @param id [#to_s]
|
82
|
+
# the id to use for giving the configmap a name
|
83
|
+
# @return [OodCore::Job::Adapters::Kubernetes::Resources::ConfigMap]
|
84
|
+
def configmap_from_native(native, id)
|
85
|
+
configmap = native.fetch(:configmap, nil)
|
86
|
+
return nil if configmap.nil?
|
87
|
+
|
88
|
+
OodCore::Job::Adapters::Kubernetes::Resources::ConfigMap.new(
|
89
|
+
configmap_name(id),
|
90
|
+
(configmap[:files] || [])
|
91
|
+
)
|
92
|
+
end
|
93
|
+
|
94
|
+
# parse initialization containers from native data
|
95
|
+
#
|
96
|
+
# @param native_data [#to_h]
|
97
|
+
# the native data to parse. Expected key init_ctrs and for that
|
98
|
+
# key to be an array of hashes.
|
99
|
+
# @param default_env [#to_h]
|
100
|
+
# Default env to merge with defined env
|
101
|
+
# @return [Array<OodCore::Job::Adapters::Kubernetes::Resources::Container>]
|
102
|
+
# the array of init containers
|
103
|
+
def init_ctrs_from_native(ctrs, default_env)
|
104
|
+
init_ctrs = []
|
105
|
+
|
106
|
+
ctrs&.each do |ctr_raw|
|
107
|
+
ctr = container_from_native(ctr_raw, default_env)
|
108
|
+
init_ctrs.push(ctr)
|
109
|
+
end
|
110
|
+
|
111
|
+
init_ctrs
|
112
|
+
end
|
113
|
+
|
114
|
+
def service_name(id)
|
115
|
+
id + '-service'
|
116
|
+
end
|
117
|
+
|
118
|
+
def secret_name(id)
|
119
|
+
id + '-secret'
|
120
|
+
end
|
121
|
+
|
122
|
+
def configmap_name(id)
|
123
|
+
id + '-configmap'
|
124
|
+
end
|
125
|
+
|
126
|
+
def seconds_to_duration(s)
|
127
|
+
"%02dh%02dm%02ds" % [s / 3600, s / 60 % 60, s % 60]
|
128
|
+
end
|
129
|
+
|
130
|
+
# Extract pod info from json data. The data is expected to be from the kubectl
|
131
|
+
# command and conform to kubernetes' datatype structures.
|
132
|
+
#
|
133
|
+
# @param json_data [#to_h]
|
134
|
+
# the pod data returned from 'kubectl get pod abc-123'
|
135
|
+
# @param ns_prefix [#to_s]
|
136
|
+
# the namespace prefix so that namespaces can be converted back to usernames
|
137
|
+
# @return [#to_h]
|
138
|
+
# the hash of info expected from adapters
|
139
|
+
def pod_info_from_json(json_data, ns_prefix: nil)
|
140
|
+
{
|
141
|
+
id: json_data.dig(:metadata, :name).to_s,
|
142
|
+
job_name: name_from_metadata(json_data.dig(:metadata)),
|
143
|
+
status: pod_status_from_json(json_data),
|
144
|
+
job_owner: job_owner_from_json(json_data, ns_prefix),
|
145
|
+
submission_time: submission_time(json_data),
|
146
|
+
dispatch_time: dispatch_time(json_data),
|
147
|
+
wallclock_time: wallclock_time(json_data),
|
148
|
+
ood_connection_info: { host: get_host(json_data.dig(:status, :hostIP)) },
|
149
|
+
procs: procs_from_json(json_data)
|
150
|
+
}
|
151
|
+
rescue NoMethodError
|
152
|
+
# gotta raise an error because Info.new will throw an error if id is undefined
|
153
|
+
raise K8sDataError, "unable to read data correctly from json"
|
154
|
+
end
|
155
|
+
|
156
|
+
private
|
157
|
+
|
158
|
+
def get_host(ip)
|
159
|
+
Resolv.getname(ip)
|
160
|
+
rescue Resolv::ResolvError
|
161
|
+
ip
|
162
|
+
end
|
163
|
+
|
164
|
+
def name_from_metadata(metadata)
|
165
|
+
name = metadata.dig(:labels, :'app.kubernetes.io/name')
|
166
|
+
name = metadata.dig(:labels, :'k8s-app') if name.nil?
|
167
|
+
name = metadata.dig(:name) if name.nil? # pod-id but better than nil?
|
168
|
+
name
|
169
|
+
end
|
170
|
+
|
171
|
+
def service_info_from_json(json_data)
|
172
|
+
# all we need is the port - .spec.ports[0].nodePort
|
173
|
+
ports = json_data.dig(:spec, :ports)
|
174
|
+
{ ood_connection_info: { port: ports[0].dig(:nodePort) } }
|
175
|
+
rescue
|
176
|
+
{}
|
177
|
+
end
|
178
|
+
|
179
|
+
def secret_info_from_json(json_data)
|
180
|
+
raw = json_data.dig(:data, :password)
|
181
|
+
{ ood_connection_info: { password: Base64.decode64(raw) } }
|
182
|
+
rescue
|
183
|
+
{}
|
184
|
+
end
|
185
|
+
|
186
|
+
def dispatch_time(json_data)
|
187
|
+
status = pod_status_from_json(json_data)
|
188
|
+
container_statuses = json_data.dig(:status, :containerStatuses)
|
189
|
+
return nil if container_statuses.nil?
|
190
|
+
|
191
|
+
state_data = container_statuses[0].dig(:state)
|
192
|
+
date_string = nil
|
193
|
+
|
194
|
+
if status == 'completed'
|
195
|
+
date_string = state_data.dig(:terminated, :startedAt)
|
196
|
+
elsif status == 'running'
|
197
|
+
date_string = state_data.dig(:running, :startedAt)
|
198
|
+
end
|
199
|
+
|
200
|
+
date_string.nil? ? nil : DateTime.parse(date_string).to_time.to_i
|
201
|
+
end
|
202
|
+
|
203
|
+
def wallclock_time(json_data)
|
204
|
+
status = pod_status_from_json(json_data)
|
205
|
+
container_statuses = json_data.dig(:status, :containerStatuses)
|
206
|
+
return nil if container_statuses.nil?
|
207
|
+
|
208
|
+
state_data = container_statuses[0].dig(:state)
|
209
|
+
start_time = dispatch_time(json_data)
|
210
|
+
return nil if start_time.nil?
|
211
|
+
|
212
|
+
et = end_time(status, state_data)
|
213
|
+
|
214
|
+
et.nil? ? nil : et - start_time
|
215
|
+
end
|
216
|
+
|
217
|
+
def end_time(status, state_data)
|
218
|
+
if status == 'completed'
|
219
|
+
end_time_string = state_data.dig(:terminated, :finishedAt)
|
220
|
+
et = DateTime.parse(end_time_string).to_time.to_i
|
221
|
+
elsif status == 'running'
|
222
|
+
et = DateTime.now.to_time.to_i
|
223
|
+
else
|
224
|
+
et = nil
|
225
|
+
end
|
226
|
+
|
227
|
+
et
|
228
|
+
end
|
229
|
+
|
230
|
+
def submission_time(json_data)
|
231
|
+
status = json_data.dig(:status)
|
232
|
+
start = status.dig(:startTime)
|
233
|
+
|
234
|
+
if start.nil?
|
235
|
+
# the pod is in some pending state limbo
|
236
|
+
conditions = status.dig(:conditions)
|
237
|
+
# best guess to start time is just the first condition's
|
238
|
+
# transition time
|
239
|
+
str = conditions[0].dig(:lastTransitionTime)
|
240
|
+
else
|
241
|
+
str = start
|
242
|
+
end
|
243
|
+
|
244
|
+
DateTime.parse(str).to_time.to_i
|
245
|
+
end
|
246
|
+
|
247
|
+
def pod_status_from_json(json_data)
|
248
|
+
phase = json_data.dig(:status, :phase)
|
249
|
+
state = case phase
|
250
|
+
when "Running"
|
251
|
+
"running"
|
252
|
+
when "Pending"
|
253
|
+
"queued"
|
254
|
+
when "Failed"
|
255
|
+
"suspended"
|
256
|
+
when "Succeeded"
|
257
|
+
"completed"
|
258
|
+
when "Unknown"
|
259
|
+
"undetermined"
|
260
|
+
else
|
261
|
+
"undetermined"
|
262
|
+
end
|
263
|
+
|
264
|
+
OodCore::Job::Status.new(state: state)
|
265
|
+
end
|
266
|
+
|
267
|
+
def terminated_state(status)
|
268
|
+
reason = status.dig(:terminated, :reason)
|
269
|
+
if reason == 'Error'
|
270
|
+
'suspended'
|
271
|
+
else
|
272
|
+
'completed'
|
273
|
+
end
|
274
|
+
end
|
275
|
+
|
276
|
+
def procs_from_json(json_data)
|
277
|
+
containers = json_data.dig(:spec, :containers)
|
278
|
+
resources = containers[0].dig(:resources)
|
279
|
+
|
280
|
+
cpu = resources.dig(:limits, :cpu)
|
281
|
+
millicores_rex = /(\d+)m/
|
282
|
+
|
283
|
+
# ok to return string bc nil.to_i == 0 and we'd rather return
|
284
|
+
# nil (undefined) than 0 which is confusing.
|
285
|
+
if millicores_rex.match?(cpu)
|
286
|
+
millicores = millicores_rex.match(cpu)[1].to_i
|
287
|
+
|
288
|
+
# have to return at least 1 bc 200m could be 0
|
289
|
+
((millicores + 1000) / 1000).to_s
|
290
|
+
else
|
291
|
+
cpu
|
292
|
+
end
|
293
|
+
end
|
294
|
+
|
295
|
+
def job_owner_from_json(json_data = {}, ns_prefix = nil)
|
296
|
+
namespace = json_data.dig(:metadata, :namespace).to_s
|
297
|
+
namespace.delete_prefix(ns_prefix.to_s)
|
298
|
+
end
|
299
|
+
end
|
@@ -0,0 +1,9 @@
|
|
1
|
+
# An object that describes a submitted kubernetes job with extended information
|
2
|
+
class OodCore::Job::Adapters::Kubernetes::K8sJobInfo < OodCore::Job::Info
|
3
|
+
attr_reader :ood_connection_info
|
4
|
+
|
5
|
+
def initialize(ood_connection_info: {}, **options)
|
6
|
+
super(options)
|
7
|
+
@ood_connection_info = ood_connection_info
|
8
|
+
end
|
9
|
+
end
|
@@ -0,0 +1,82 @@
|
|
1
|
+
module OodCore::Job::Adapters::Kubernetes::Resources
|
2
|
+
|
3
|
+
class ConfigMap
|
4
|
+
attr_accessor :name, :files
|
5
|
+
|
6
|
+
def initialize(name, files)
|
7
|
+
@name = name
|
8
|
+
@files = []
|
9
|
+
files.each do |f|
|
10
|
+
@files << ConfigMapFile.new(f)
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def mounts?
|
15
|
+
@files.any? { |f| f.mount_path }
|
16
|
+
end
|
17
|
+
|
18
|
+
def init_mounts?
|
19
|
+
@files.any? { |f| f.init_mount_path }
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
class ConfigMapFile
|
24
|
+
attr_accessor :filename, :data, :mount_path, :sub_path, :init_mount_path, :init_sub_path
|
25
|
+
|
26
|
+
def initialize(data)
|
27
|
+
@filename = data[:filename]
|
28
|
+
@data = data[:data]
|
29
|
+
@mount_path = data[:mount_path]
|
30
|
+
@sub_path = data[:sub_path]
|
31
|
+
@init_mount_path = data[:init_mount_path]
|
32
|
+
@init_sub_path = data[:init_sub_path]
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
class Container
|
37
|
+
attr_accessor :name, :image, :command, :port, :env, :memory, :cpu, :working_dir,
|
38
|
+
:restart_policy, :image_pull_secret, :supplemental_groups
|
39
|
+
|
40
|
+
def initialize(
|
41
|
+
name, image, command: [], port: nil, env: {}, memory: "4Gi", cpu: "1",
|
42
|
+
working_dir: "", restart_policy: "Never", image_pull_secret: nil, supplemental_groups: []
|
43
|
+
)
|
44
|
+
raise ArgumentError, "containers need valid names and images" unless name && image
|
45
|
+
|
46
|
+
@name = name
|
47
|
+
@image = image
|
48
|
+
@command = command.nil? ? [] : command
|
49
|
+
@port = port&.to_i
|
50
|
+
@env = env.nil? ? {} : env
|
51
|
+
@memory = memory.nil? ? "4Gi" : memory
|
52
|
+
@cpu = cpu.nil? ? "1" : cpu
|
53
|
+
@working_dir = working_dir.nil? ? "" : working_dir
|
54
|
+
@restart_policy = restart_policy.nil? ? "Never" : restart_policy
|
55
|
+
@image_pull_secret = image_pull_secret
|
56
|
+
@supplemental_groups = supplemental_groups.nil? ? [] : supplemental_groups
|
57
|
+
end
|
58
|
+
|
59
|
+
def ==(other)
|
60
|
+
name == other.name &&
|
61
|
+
image == other.image &&
|
62
|
+
command == other.command &&
|
63
|
+
port == other.port &&
|
64
|
+
env == other.env &&
|
65
|
+
memory == other.memory &&
|
66
|
+
cpu == other.cpu &&
|
67
|
+
working_dir == other.working_dir &&
|
68
|
+
restart_policy == other.restart_policy &&
|
69
|
+
image_pull_secret == other.image_pull_secret &&
|
70
|
+
supplemental_groups == other.supplemental_groups
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
class PodSpec
|
75
|
+
attr_accessor :container, :init_containers
|
76
|
+
def initialize(container, init_containers: nil)
|
77
|
+
@container = container
|
78
|
+
@init_containers = init_containers
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
end
|
@@ -0,0 +1,188 @@
|
|
1
|
+
apiVersion: v1
|
2
|
+
kind: Pod
|
3
|
+
metadata:
|
4
|
+
namespace: <%= namespace %>
|
5
|
+
name: <%= id %>
|
6
|
+
labels:
|
7
|
+
job: <%= id %>
|
8
|
+
app.kubernetes.io/name: <%= container.name %>
|
9
|
+
app.kubernetes.io/managed-by: open-ondemand
|
10
|
+
<%- if !script.accounting_id.nil? && script.accounting_id != "" -%>
|
11
|
+
account: <%= script.accounting_id %>
|
12
|
+
<%- end -%>
|
13
|
+
annotations:
|
14
|
+
<%- unless script.wall_time.nil? -%>
|
15
|
+
pod.kubernetes.io/lifetime: <%= helper.seconds_to_duration(script.wall_time) %>
|
16
|
+
<%- end -%>
|
17
|
+
spec:
|
18
|
+
restartPolicy: <%= spec.container.restart_policy %>
|
19
|
+
securityContext:
|
20
|
+
runAsUser: <%= run_as_user %>
|
21
|
+
runAsGroup: <%= run_as_group %>
|
22
|
+
runAsNonRoot: true
|
23
|
+
<%- if spec.container.supplemental_groups.empty? -%>
|
24
|
+
supplementalGroups: []
|
25
|
+
<%- else -%>
|
26
|
+
supplementalGroups:
|
27
|
+
<%- spec.container.supplemental_groups.each do |supplemental_group| -%>
|
28
|
+
- "<%= supplemental_group %>"
|
29
|
+
<%- end -%>
|
30
|
+
<%- end -%>
|
31
|
+
fsGroup: <%= fs_group %>
|
32
|
+
hostNetwork: false
|
33
|
+
hostIPC: false
|
34
|
+
hostPID: false
|
35
|
+
<%- unless spec.container.image_pull_secret.nil? -%>
|
36
|
+
imagePullSecrets:
|
37
|
+
- name: <%= spec.container.image_pull_secret %>
|
38
|
+
<%- end -%>
|
39
|
+
containers:
|
40
|
+
- name: "<%= spec.container.name %>"
|
41
|
+
image: <%= spec.container.image %>
|
42
|
+
imagePullPolicy: IfNotPresent
|
43
|
+
<%- unless spec.container.working_dir.empty? -%>
|
44
|
+
workingDir: "<%= spec.container.working_dir %>"
|
45
|
+
<%- end -%>
|
46
|
+
env:
|
47
|
+
- name: POD_NAME
|
48
|
+
valueFrom:
|
49
|
+
fieldRef:
|
50
|
+
fieldPath: metadata.name
|
51
|
+
<%- spec.container.env.each_pair do |name, value| -%>
|
52
|
+
- name: <%= name %>
|
53
|
+
value: "<%= value %>"
|
54
|
+
<%- end # for each env -%>
|
55
|
+
<%- unless spec.container.command.empty? -%>
|
56
|
+
command:
|
57
|
+
<%- spec.container.command.each do |cmd| -%>
|
58
|
+
- "<%= cmd %>"
|
59
|
+
<%- end # for each command -%>
|
60
|
+
<%- end # unless command is nil -%>
|
61
|
+
<%- unless spec.container.port.nil? -%>
|
62
|
+
ports:
|
63
|
+
- containerPort: <%= spec.container.port %>
|
64
|
+
<%- end -%>
|
65
|
+
<%- if configmap.mounts? || !all_mounts.empty? -%>
|
66
|
+
volumeMounts:
|
67
|
+
<%- configmap.files.each do |file| -%>
|
68
|
+
<%- next if file.mount_path.nil? -%>
|
69
|
+
- name: configmap-volume
|
70
|
+
mountPath: <%= file.mount_path %>
|
71
|
+
<%- unless file.sub_path.nil? -%>
|
72
|
+
subPath: <%= file.sub_path %>
|
73
|
+
<%- end # end unless file.sub_path.nil? -%>
|
74
|
+
<%- end # end configmap.files.each -%>
|
75
|
+
<%- all_mounts.each do |mount| -%>
|
76
|
+
- name: <%= mount[:name] %>
|
77
|
+
mountPath: <%= mount[:destination_path] %>
|
78
|
+
<%- end # for each mount -%>
|
79
|
+
<%- end # configmap mounts? and all_mounts not empty -%>
|
80
|
+
resources:
|
81
|
+
limits:
|
82
|
+
memory: "<%= spec.container.memory %>"
|
83
|
+
cpu: "<%= spec.container.cpu %>"
|
84
|
+
requests:
|
85
|
+
memory: "<%= spec.container.memory %>"
|
86
|
+
cpu: "<%= spec.container.cpu %>"
|
87
|
+
securityContext:
|
88
|
+
allowPrivilegeEscalation: false
|
89
|
+
capabilities:
|
90
|
+
drop:
|
91
|
+
- all
|
92
|
+
privileged: false
|
93
|
+
<%- unless spec.init_containers.nil? -%>
|
94
|
+
initContainers:
|
95
|
+
<%- spec.init_containers.each do |ctr| -%>
|
96
|
+
- name: "<%= ctr.name %>"
|
97
|
+
image: "<%= ctr.image %>"
|
98
|
+
env:
|
99
|
+
- name: POD_NAME
|
100
|
+
valueFrom:
|
101
|
+
fieldRef:
|
102
|
+
fieldPath: metadata.name
|
103
|
+
<%- ctr.env.each_pair do |name, value| -%>
|
104
|
+
- name: <%= name %>
|
105
|
+
value: "<%= value %>"
|
106
|
+
<%- end # for each env -%>
|
107
|
+
command:
|
108
|
+
<%- ctr.command.each do |cmd| -%>
|
109
|
+
- "<%= cmd %>"
|
110
|
+
<%- end # command loop -%>
|
111
|
+
<%- if configmap.init_mounts? || !all_mounts.empty? -%>
|
112
|
+
volumeMounts:
|
113
|
+
<%- configmap.files.each do |file| -%>
|
114
|
+
<%- next if file.init_mount_path.nil? -%>
|
115
|
+
- name: configmap-volume
|
116
|
+
mountPath: <%= file.init_mount_path %>
|
117
|
+
<%- unless file.init_sub_path.nil? -%>
|
118
|
+
subPath: <%= file.init_sub_path %>
|
119
|
+
<%- end # end unless file.sub_path.nil? -%>
|
120
|
+
<%- end # end configmap.files.each -%>
|
121
|
+
<%- all_mounts.each do |mount| -%>
|
122
|
+
- name: <%= mount[:name] %>
|
123
|
+
mountPath: <%= mount[:destination_path] %>
|
124
|
+
<%- end # for each mount -%>
|
125
|
+
<%- end # if config_map init mounts and all_mounts not empty -%>
|
126
|
+
securityContext:
|
127
|
+
allowPrivilegeEscalation: false
|
128
|
+
capabilities:
|
129
|
+
drop:
|
130
|
+
- all
|
131
|
+
privileged: false
|
132
|
+
<%- end # init container loop -%>
|
133
|
+
<%- end # if init containers -%>
|
134
|
+
<%- unless (configmap.to_s.empty? && all_mounts.empty?) -%>
|
135
|
+
volumes:
|
136
|
+
<%- unless configmap.to_s.empty? -%>
|
137
|
+
- name: configmap-volume
|
138
|
+
configMap:
|
139
|
+
name: <%= configmap_name(id) %>
|
140
|
+
<%- end -%>
|
141
|
+
<%- all_mounts.each do |mount| -%>
|
142
|
+
<%- if mount[:type] == 'nfs' -%>
|
143
|
+
- name: <%= mount[:name] %>
|
144
|
+
nfs:
|
145
|
+
server: <%= mount[:host] %>
|
146
|
+
path: <%= mount[:path] %>
|
147
|
+
<%- elsif mount[:type] == 'host' -%>
|
148
|
+
- name: <%= mount[:name] %>
|
149
|
+
hostPath:
|
150
|
+
path: <%= mount[:path] %>
|
151
|
+
type: <%= mount[:host_type] %>
|
152
|
+
<%- end # if mount is [host,nfs] -%>
|
153
|
+
<%- end # for each mount -%>
|
154
|
+
<%- end # (configmap.to_s.empty? || all_mounts.empty?) -%>
|
155
|
+
---
|
156
|
+
<%- unless spec.container.port.nil? -%>
|
157
|
+
apiVersion: v1
|
158
|
+
kind: Service
|
159
|
+
metadata:
|
160
|
+
name: <%= service_name(id) %>
|
161
|
+
namespace: <%= namespace %>
|
162
|
+
labels:
|
163
|
+
job: <%= id %>
|
164
|
+
spec:
|
165
|
+
selector:
|
166
|
+
job: <%= id %>
|
167
|
+
ports:
|
168
|
+
- protocol: TCP
|
169
|
+
port: 80
|
170
|
+
targetPort: <%= spec.container.port %>
|
171
|
+
type: NodePort
|
172
|
+
<%- end # end for service -%>
|
173
|
+
---
|
174
|
+
<%- unless configmap.nil? -%>
|
175
|
+
apiVersion: v1
|
176
|
+
kind: ConfigMap
|
177
|
+
metadata:
|
178
|
+
name: <%= configmap_name(id) %>
|
179
|
+
namespace: <%= namespace %>
|
180
|
+
labels:
|
181
|
+
job: <%= id %>
|
182
|
+
data:
|
183
|
+
<%- configmap.files.each do |file| -%>
|
184
|
+
<%- next if file.data.nil? || file.filename.nil? -%>
|
185
|
+
<%= file.filename %>: |
|
186
|
+
<% config_data_lines(file.data).each do |line| %><%= line %><% end %>
|
187
|
+
<%- end # end for configmap files -%>
|
188
|
+
<%- end # end configmap.nil? %>
|