teraslice 2.10.0 → 2.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/interfaces.js +12 -0
- package/dist/src/lib/cluster/cluster_master.js +246 -0
- package/dist/src/lib/cluster/node_master.js +355 -0
- package/dist/src/lib/cluster/services/api.js +663 -0
- package/dist/src/lib/cluster/services/assets.js +226 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetes/index.js +192 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetes/k8s.js +481 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetes/k8sResource.js +414 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetes/k8sState.js +59 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetes/utils.js +43 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/index.js +192 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/interfaces.js +2 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/k8s.js +423 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/k8sDeploymentResource.js +60 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/k8sJobResource.js +55 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/k8sResource.js +359 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/k8sServiceResource.js +37 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/k8sState.js +60 -0
- package/dist/src/lib/cluster/services/cluster/backends/kubernetesV2/utils.js +170 -0
- package/dist/src/lib/cluster/services/cluster/backends/native/dispatch.js +13 -0
- package/dist/src/lib/cluster/services/cluster/backends/native/index.js +526 -0
- package/dist/src/lib/cluster/services/cluster/backends/native/messaging.js +547 -0
- package/dist/src/lib/cluster/services/cluster/backends/state-utils.js +26 -0
- package/dist/src/lib/cluster/services/cluster/index.js +17 -0
- package/dist/src/lib/cluster/services/execution.js +435 -0
- package/dist/src/lib/cluster/services/index.js +6 -0
- package/dist/src/lib/cluster/services/interfaces.js +2 -0
- package/dist/src/lib/cluster/services/jobs.js +454 -0
- package/dist/src/lib/config/default-sysconfig.js +26 -0
- package/dist/src/lib/config/index.js +22 -0
- package/dist/src/lib/config/schemas/system.js +360 -0
- package/dist/src/lib/storage/analytics.js +86 -0
- package/dist/src/lib/storage/assets.js +401 -0
- package/dist/src/lib/storage/backends/elasticsearch_store.js +494 -0
- package/dist/src/lib/storage/backends/mappings/analytics.js +50 -0
- package/dist/src/lib/storage/backends/mappings/asset.js +41 -0
- package/dist/src/lib/storage/backends/mappings/ex.js +62 -0
- package/dist/src/lib/storage/backends/mappings/job.js +38 -0
- package/dist/src/lib/storage/backends/mappings/state.js +38 -0
- package/dist/src/lib/storage/backends/s3_store.js +237 -0
- package/dist/src/lib/storage/execution.js +300 -0
- package/dist/src/lib/storage/index.js +7 -0
- package/dist/src/lib/storage/jobs.js +81 -0
- package/dist/src/lib/storage/state.js +255 -0
- package/dist/src/lib/utils/api_utils.js +157 -0
- package/dist/src/lib/utils/asset_utils.js +94 -0
- package/dist/src/lib/utils/date_utils.js +52 -0
- package/dist/src/lib/utils/encoding_utils.js +27 -0
- package/dist/src/lib/utils/events.js +4 -0
- package/dist/src/lib/utils/file_utils.js +124 -0
- package/dist/src/lib/utils/id_utils.js +15 -0
- package/dist/src/lib/utils/port_utils.js +32 -0
- package/dist/src/lib/workers/assets/index.js +3 -0
- package/dist/src/lib/workers/assets/loader-executable.js +40 -0
- package/dist/src/lib/workers/assets/loader.js +73 -0
- package/dist/src/lib/workers/assets/spawn.js +55 -0
- package/dist/src/lib/workers/context/execution-context.js +12 -0
- package/dist/src/lib/workers/context/terafoundation-context.js +8 -0
- package/dist/src/lib/workers/execution-controller/execution-analytics.js +188 -0
- package/dist/src/lib/workers/execution-controller/index.js +1024 -0
- package/dist/src/lib/workers/execution-controller/recovery.js +151 -0
- package/dist/src/lib/workers/execution-controller/scheduler.js +390 -0
- package/dist/src/lib/workers/execution-controller/slice-analytics.js +96 -0
- package/dist/src/lib/workers/helpers/job.js +80 -0
- package/dist/src/lib/workers/helpers/op-analytics.js +22 -0
- package/dist/src/lib/workers/helpers/terafoundation.js +34 -0
- package/dist/src/lib/workers/helpers/worker-shutdown.js +169 -0
- package/dist/src/lib/workers/metrics/index.js +108 -0
- package/dist/src/lib/workers/worker/index.js +378 -0
- package/dist/src/lib/workers/worker/slice.js +122 -0
- package/dist/test/config/schemas/system_schema-spec.js +37 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/k8s-spec.js +316 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/k8sResource-spec.js +795 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/k8sState-multicluster-spec.js +67 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/k8sState-spec.js +84 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/utils-spec.js +132 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/v2/k8s-v2-spec.js +455 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/v2/k8sResource-v2-spec.js +818 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/v2/k8sState-multicluster-v2-spec.js +67 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/v2/k8sState-v2-spec.js +84 -0
- package/dist/test/lib/cluster/services/cluster/backends/kubernetes/v2/utils-v2-spec.js +320 -0
- package/dist/test/lib/cluster/services/cluster/backends/state-utils-spec.js +37 -0
- package/dist/test/node_master-spec.js +188 -0
- package/dist/test/services/api-spec.js +80 -0
- package/dist/test/services/assets-spec.js +158 -0
- package/dist/test/services/messaging-spec.js +440 -0
- package/dist/test/storage/assets_storage-spec.js +95 -0
- package/dist/test/storage/s3_store-spec.js +138 -0
- package/dist/test/test.config.js +8 -0
- package/dist/test/test.setup.js +6 -0
- package/dist/test/utils/api_utils-spec.js +86 -0
- package/dist/test/utils/asset_utils-spec.js +141 -0
- package/dist/test/utils/elastic_utils-spec.js +25 -0
- package/dist/test/workers/execution-controller/execution-controller-spec.js +371 -0
- package/dist/test/workers/execution-controller/execution-special-test-cases-spec.js +520 -0
- package/dist/test/workers/execution-controller/execution-test-cases-spec.js +338 -0
- package/dist/test/workers/execution-controller/recovery-spec.js +160 -0
- package/dist/test/workers/execution-controller/scheduler-spec.js +249 -0
- package/dist/test/workers/execution-controller/slice-analytics-spec.js +121 -0
- package/dist/test/workers/fixtures/ops/example-op/processor.js +20 -0
- package/dist/test/workers/fixtures/ops/example-op/schema.js +19 -0
- package/dist/test/workers/fixtures/ops/example-reader/fetcher.js +20 -0
- package/dist/test/workers/fixtures/ops/example-reader/schema.js +41 -0
- package/dist/test/workers/fixtures/ops/example-reader/slicer.js +37 -0
- package/dist/test/workers/fixtures/ops/new-op/processor.js +29 -0
- package/dist/test/workers/fixtures/ops/new-op/schema.js +18 -0
- package/dist/test/workers/fixtures/ops/new-reader/fetcher.js +19 -0
- package/dist/test/workers/fixtures/ops/new-reader/schema.js +23 -0
- package/dist/test/workers/fixtures/ops/new-reader/slicer.js +13 -0
- package/dist/test/workers/helpers/configs.js +130 -0
- package/dist/test/workers/helpers/execution-controller-helper.js +49 -0
- package/dist/test/workers/helpers/index.js +5 -0
- package/dist/test/workers/helpers/test-context.js +210 -0
- package/dist/test/workers/helpers/zip-directory.js +25 -0
- package/dist/test/workers/worker/slice-spec.js +333 -0
- package/dist/test/workers/worker/worker-spec.js +356 -0
- package/package.json +94 -93
- package/service.js +0 -0
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { V1Job } from '@kubernetes/client-node';
|
|
2
|
+
import { convertToTSResource, makeTemplate } from './utils.js';
|
|
3
|
+
import { K8sResource } from './k8sResource.js';
|
|
4
|
+
export class K8sJobResource extends K8sResource {
|
|
5
|
+
nodeType = 'execution_controller';
|
|
6
|
+
nameInfix = 'exc';
|
|
7
|
+
templateGenerator;
|
|
8
|
+
templateConfig;
|
|
9
|
+
resource;
|
|
10
|
+
/**
|
|
11
|
+
* K8sJobResource allows the generation of a k8s job based on a template.
|
|
12
|
+
* After creating the object, the k8s job is accessible on the objects
|
|
13
|
+
* .resource property.
|
|
14
|
+
*
|
|
15
|
+
* @param {Object} terasliceConfig - teraslice cluster config from context
|
|
16
|
+
* @param {Object} execution - teraslice execution
|
|
17
|
+
* @param {Logger} logger - teraslice logger
|
|
18
|
+
*/
|
|
19
|
+
constructor(terasliceConfig, execution, logger) {
|
|
20
|
+
super(terasliceConfig, execution, logger);
|
|
21
|
+
this.templateGenerator = makeTemplate('jobs', this.nodeType);
|
|
22
|
+
this.templateConfig = this._makeConfig(this.nameInfix);
|
|
23
|
+
const k8sJob = new V1Job();
|
|
24
|
+
Object.assign(k8sJob, this.templateGenerator(this.templateConfig));
|
|
25
|
+
this.resource = convertToTSResource(k8sJob);
|
|
26
|
+
this._setJobLabels(this.resource);
|
|
27
|
+
// Apply job `targets` setting as k8s nodeAffinity
|
|
28
|
+
// We assume that multiple targets require both to match ...
|
|
29
|
+
// NOTE: If you specify multiple `matchExpressions` associated with
|
|
30
|
+
// `nodeSelectorTerms`, then the pod can be scheduled onto a node
|
|
31
|
+
// only if *all* `matchExpressions` can be satisfied.
|
|
32
|
+
this._setTargets(this.resource);
|
|
33
|
+
this._setResources(this.resource);
|
|
34
|
+
this._setVolumes(this.resource);
|
|
35
|
+
if (process.env.MOUNT_LOCAL_TERASLICE !== undefined) {
|
|
36
|
+
this._mountLocalTeraslice(this.resource);
|
|
37
|
+
}
|
|
38
|
+
this._setEnvVariables();
|
|
39
|
+
this._setAssetsVolume(this.resource);
|
|
40
|
+
this._setImagePullSecret(this.resource);
|
|
41
|
+
this._setEphemeralStorage(this.resource);
|
|
42
|
+
this._setExternalPorts(this.resource);
|
|
43
|
+
this._setPriorityClassName(this.resource);
|
|
44
|
+
// Execution controller targets are required nodeAffinities, if
|
|
45
|
+
// required job targets are also supplied, then *all* of the matches
|
|
46
|
+
// will have to be satisfied for the job to be scheduled. This also
|
|
47
|
+
// adds tolerations for any specified targets
|
|
48
|
+
this._setExecutionControllerTargets(this.resource);
|
|
49
|
+
// override must happen last
|
|
50
|
+
if (this.terasliceConfig.kubernetes_overrides_enabled) {
|
|
51
|
+
this._mergePodSpecOverlay(this.resource);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
//# sourceMappingURL=k8sJobResource.js.map
|
|
@@ -0,0 +1,359 @@
|
|
|
1
|
+
import { isNumber, get, set, has, isEmpty, merge } from '@terascope/utils';
|
|
2
|
+
import { safeEncode } from '../../../../../utils/encoding_utils.js';
|
|
3
|
+
import { setMaxOldSpaceViaEnv } from './utils.js';
|
|
4
|
+
export class K8sResource {
|
|
5
|
+
execution;
|
|
6
|
+
jobLabelPrefix;
|
|
7
|
+
jobPropertyLabelPrefix;
|
|
8
|
+
logger;
|
|
9
|
+
terasliceConfig;
|
|
10
|
+
/**
|
|
11
|
+
* K8sResource allows the generation of k8s resources based on templates.
|
|
12
|
+
* After creating the object, the k8s resource is accessible on the objects
|
|
13
|
+
* .resource property.
|
|
14
|
+
*
|
|
15
|
+
* @param {Object} terasliceConfig - teraslice cluster config from context
|
|
16
|
+
* @param {Object} execution - teraslice execution
|
|
17
|
+
* @param {Logger} logger - teraslice logger
|
|
18
|
+
*/
|
|
19
|
+
constructor(terasliceConfig, execution, logger) {
|
|
20
|
+
this.execution = execution;
|
|
21
|
+
this.jobLabelPrefix = 'job.teraslice.terascope.io';
|
|
22
|
+
this.jobPropertyLabelPrefix = 'job-property.teraslice.terascope.io';
|
|
23
|
+
this.logger = logger;
|
|
24
|
+
this.terasliceConfig = terasliceConfig;
|
|
25
|
+
}
|
|
26
|
+
_setEnvVariables() {
|
|
27
|
+
}
|
|
28
|
+
_mountLocalTeraslice(resource) {
|
|
29
|
+
const devMounts = JSON.parse(process.env.MOUNT_LOCAL_TERASLICE);
|
|
30
|
+
resource.spec.template.spec.containers[0].volumeMounts.push(...devMounts.volumeMounts);
|
|
31
|
+
resource.spec.template.spec.volumes.push(...devMounts.volumes);
|
|
32
|
+
if (resource.spec.template.spec.containers[0]) {
|
|
33
|
+
resource.spec.template.spec.containers[0].args = [
|
|
34
|
+
'node',
|
|
35
|
+
'service.js'
|
|
36
|
+
];
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
_makeConfig(nameInfix, exName, exUid) {
|
|
40
|
+
const clusterName = get(this.terasliceConfig, 'name');
|
|
41
|
+
const clusterNameLabel = clusterName.replace(/[^a-zA-Z0-9_\-.]/g, '_').substring(0, 63);
|
|
42
|
+
const configMapName = get(this.terasliceConfig, 'kubernetes_config_map_name', `${this.terasliceConfig.name}-worker`);
|
|
43
|
+
const dockerImage = this.execution.kubernetes_image
|
|
44
|
+
|| this.terasliceConfig.kubernetes_image;
|
|
45
|
+
// name needs to be a valid DNS name since it is used in the svc name,
|
|
46
|
+
// so we can only permit alphanumeric and - characters. _ is forbidden.
|
|
47
|
+
// -> regex used for validation is '[a-z]([-a-z0-9]*[a-z0-9])?'
|
|
48
|
+
const jobNameLabel = this.execution.name
|
|
49
|
+
.toLowerCase()
|
|
50
|
+
.replace(/[^a-zA-Z0-9\-.]/g, '-')
|
|
51
|
+
.replace(/^[^a-z]/, 'a')
|
|
52
|
+
.replace(/[^a-z0-9]$/, '0')
|
|
53
|
+
.substring(0, 63);
|
|
54
|
+
const name = `ts-${nameInfix}-${jobNameLabel.substring(0, 35)}-${this.execution.job_id.substring(0, 13)}`;
|
|
55
|
+
const shutdownTimeoutMs = get(this.terasliceConfig, 'shutdown_timeout', 60000);
|
|
56
|
+
const shutdownTimeoutSeconds = Math.round(shutdownTimeoutMs / 1000);
|
|
57
|
+
const config = {
|
|
58
|
+
// assetsDirectory: get(this.terasliceConfig, 'assets_directory', ''),
|
|
59
|
+
// assetsVolume: get(this.terasliceConfig, 'assets_volume', ''),
|
|
60
|
+
clusterName,
|
|
61
|
+
clusterNameLabel,
|
|
62
|
+
configMapName,
|
|
63
|
+
dockerImage,
|
|
64
|
+
execution: safeEncode(this.execution),
|
|
65
|
+
exId: this.execution.ex_id,
|
|
66
|
+
exName: exName,
|
|
67
|
+
exUid: exUid,
|
|
68
|
+
jobId: this.execution.job_id,
|
|
69
|
+
jobNameLabel,
|
|
70
|
+
name,
|
|
71
|
+
namespace: get(this.terasliceConfig, 'kubernetes_namespace', 'default'),
|
|
72
|
+
nodeType: this.nodeType,
|
|
73
|
+
replicas: this.execution.workers,
|
|
74
|
+
shutdownTimeout: shutdownTimeoutSeconds
|
|
75
|
+
};
|
|
76
|
+
return config;
|
|
77
|
+
}
|
|
78
|
+
_setWorkerAntiAffinity(resource) {
|
|
79
|
+
if (this.terasliceConfig.kubernetes_worker_antiaffinity) {
|
|
80
|
+
const targetKey = 'spec.template.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution';
|
|
81
|
+
if (!has(this.resource, targetKey)) {
|
|
82
|
+
set(this.resource, targetKey, []);
|
|
83
|
+
}
|
|
84
|
+
resource.spec.template.spec.affinity?.podAntiAffinity
|
|
85
|
+
?.preferredDuringSchedulingIgnoredDuringExecution?.push({
|
|
86
|
+
weight: 1,
|
|
87
|
+
podAffinityTerm: {
|
|
88
|
+
labelSelector: {
|
|
89
|
+
matchExpressions: [
|
|
90
|
+
{
|
|
91
|
+
key: 'app.kubernetes.io/name',
|
|
92
|
+
operator: 'In',
|
|
93
|
+
values: [
|
|
94
|
+
'teraslice'
|
|
95
|
+
]
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
key: 'app.kubernetes.io/instance',
|
|
99
|
+
operator: 'In',
|
|
100
|
+
values: [
|
|
101
|
+
this.templateConfig.clusterNameLabel
|
|
102
|
+
]
|
|
103
|
+
}
|
|
104
|
+
]
|
|
105
|
+
},
|
|
106
|
+
topologyKey: 'kubernetes.io/hostname'
|
|
107
|
+
}
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
/**
|
|
112
|
+
* Execution Controllers get tolerations and required affinities
|
|
113
|
+
*
|
|
114
|
+
* NOTE: We considered changing `execution_controller_targets` to be an
|
|
115
|
+
* object but the inconsistency with `targets` made this awkward. See the
|
|
116
|
+
* `teraslice config with execution_controller_targets and job targets set`
|
|
117
|
+
* test for an example. If the syntax for this were to change, we should
|
|
118
|
+
* also consider changing `execution.targets`, which is a change on the job.
|
|
119
|
+
*/
|
|
120
|
+
_setExecutionControllerTargets(resource) {
|
|
121
|
+
if (this.terasliceConfig.execution_controller_targets) {
|
|
122
|
+
this.terasliceConfig.execution_controller_targets.forEach((target) => {
|
|
123
|
+
this._setTargetRequired(target, resource);
|
|
124
|
+
this._setTargetAccepted(target, resource);
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
_setEphemeralStorage(resource) {
|
|
129
|
+
if (this.execution.ephemeral_storage) {
|
|
130
|
+
resource.spec.template.spec.containers[0].volumeMounts.push({
|
|
131
|
+
name: 'ephemeral-volume',
|
|
132
|
+
mountPath: '/ephemeral0'
|
|
133
|
+
});
|
|
134
|
+
resource.spec.template.spec.volumes.push({
|
|
135
|
+
name: 'ephemeral-volume',
|
|
136
|
+
emptyDir: {}
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
_setExternalPorts(resource) {
|
|
141
|
+
if (this.execution.external_ports) {
|
|
142
|
+
this.execution.external_ports.forEach((portValue) => {
|
|
143
|
+
if (isNumber(portValue)) {
|
|
144
|
+
resource.spec.template.spec.containers[0].ports
|
|
145
|
+
.push({ containerPort: portValue });
|
|
146
|
+
}
|
|
147
|
+
else {
|
|
148
|
+
resource.spec.template.spec.containers[0].ports
|
|
149
|
+
.push({
|
|
150
|
+
name: portValue.name,
|
|
151
|
+
containerPort: portValue.port
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
});
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
_setImagePullSecret(resource) {
|
|
158
|
+
if (this.terasliceConfig.kubernetes_image_pull_secret) {
|
|
159
|
+
if (resource.spec.template.spec.imagePullSecrets) {
|
|
160
|
+
resource.spec.template.spec.imagePullSecrets.push({ name: this.terasliceConfig.kubernetes_image_pull_secret });
|
|
161
|
+
}
|
|
162
|
+
else {
|
|
163
|
+
resource.spec.template.spec.imagePullSecrets = [
|
|
164
|
+
{ name: this.terasliceConfig.kubernetes_image_pull_secret }
|
|
165
|
+
];
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
_setPriorityClassName(resource) {
|
|
170
|
+
if (this.terasliceConfig.kubernetes_priority_class_name) {
|
|
171
|
+
const className = this.terasliceConfig.kubernetes_priority_class_name;
|
|
172
|
+
if (this.nodeType === 'execution_controller') {
|
|
173
|
+
resource.spec.template.spec.priorityClassName = className;
|
|
174
|
+
if (this.execution.stateful) {
|
|
175
|
+
resource.spec.template.metadata.labels[`${this.jobPropertyLabelPrefix}/stateful`] = 'true';
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
if (this.nodeType === 'worker' && this.execution.stateful) {
|
|
179
|
+
resource.spec.template.spec.priorityClassName = className;
|
|
180
|
+
resource.spec.template.metadata.labels[`${this.jobPropertyLabelPrefix}/stateful`] = 'true';
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
_setAssetsVolume(resource) {
|
|
185
|
+
if (this.terasliceConfig.assets_volume
|
|
186
|
+
&& this.terasliceConfig.assets_directory
|
|
187
|
+
&& typeof this.terasliceConfig.assets_directory === 'string') {
|
|
188
|
+
resource.spec.template.spec.volumes.push({
|
|
189
|
+
name: this.terasliceConfig.assets_volume,
|
|
190
|
+
persistentVolumeClaim: { claimName: this.terasliceConfig.assets_volume }
|
|
191
|
+
});
|
|
192
|
+
resource.spec.template.spec.containers[0].volumeMounts.push({
|
|
193
|
+
name: this.terasliceConfig.assets_volume,
|
|
194
|
+
mountPath: this.terasliceConfig.assets_directory
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
_setJobLabels(resource) {
|
|
199
|
+
if (this.execution.labels != null) {
|
|
200
|
+
Object.entries(this.execution.labels).forEach(([k, v]) => {
|
|
201
|
+
const key = `${this.jobLabelPrefix}/${k.replace(/[^a-zA-Z0-9\-._]/g, '-').substring(0, 63)}`;
|
|
202
|
+
const value = v.replace(/[^a-zA-Z0-9\-._]/g, '-').substring(0, 63);
|
|
203
|
+
resource.metadata.labels[key] = value;
|
|
204
|
+
resource.spec.template.metadata.labels[key] = value;
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
_setVolumes(resource) {
|
|
209
|
+
if (this.execution.volumes != null) {
|
|
210
|
+
this.execution.volumes.forEach((volume) => {
|
|
211
|
+
resource.spec.template.spec.volumes.push({
|
|
212
|
+
name: volume.name,
|
|
213
|
+
persistentVolumeClaim: { claimName: volume.name }
|
|
214
|
+
});
|
|
215
|
+
resource.spec.template.spec.containers[0].volumeMounts.push({
|
|
216
|
+
name: volume.name,
|
|
217
|
+
mountPath: volume.path
|
|
218
|
+
});
|
|
219
|
+
});
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
_setResources(resource) {
|
|
223
|
+
let cpu;
|
|
224
|
+
let memory;
|
|
225
|
+
let maxMemory;
|
|
226
|
+
const container = resource.spec.template.spec.containers[0];
|
|
227
|
+
// use teraslice config as defaults and execution config will override it
|
|
228
|
+
const envVars = Object.assign({}, this.terasliceConfig.env_vars, this.execution.env_vars);
|
|
229
|
+
if (this.nodeType === 'worker') {
|
|
230
|
+
if (this.execution.resources_requests_cpu
|
|
231
|
+
|| this.execution.resources_limits_cpu) {
|
|
232
|
+
if (this.execution.resources_requests_cpu) {
|
|
233
|
+
set(container, 'resources.requests.cpu', this.execution.resources_requests_cpu);
|
|
234
|
+
}
|
|
235
|
+
if (this.execution.resources_limits_cpu) {
|
|
236
|
+
set(container, 'resources.limits.cpu', this.execution.resources_limits_cpu);
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
else if (this.execution.cpu || this.terasliceConfig.cpu) {
|
|
240
|
+
// The settings on the executions override the cluster configs
|
|
241
|
+
cpu = this.execution.cpu || this.terasliceConfig.cpu || -1;
|
|
242
|
+
set(container, 'resources.requests.cpu', cpu);
|
|
243
|
+
set(container, 'resources.limits.cpu', cpu);
|
|
244
|
+
}
|
|
245
|
+
if (this.execution.resources_requests_memory
|
|
246
|
+
|| this.execution.resources_limits_memory) {
|
|
247
|
+
set(container, 'resources.requests.memory', this.execution.resources_requests_memory);
|
|
248
|
+
set(container, 'resources.limits.memory', this.execution.resources_limits_memory);
|
|
249
|
+
maxMemory = this.execution.resources_limits_memory;
|
|
250
|
+
}
|
|
251
|
+
else if (this.execution.memory || this.terasliceConfig.memory) {
|
|
252
|
+
// The settings on the executions override the cluster configs
|
|
253
|
+
memory = this.execution.memory || this.terasliceConfig.memory || -1;
|
|
254
|
+
set(container, 'resources.requests.memory', memory);
|
|
255
|
+
set(container, 'resources.limits.memory', memory);
|
|
256
|
+
maxMemory = memory;
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
if (this.nodeType === 'execution_controller') {
|
|
260
|
+
// The settings on the executions override the cluster configs
|
|
261
|
+
cpu = this.execution.cpu_execution_controller
|
|
262
|
+
|| this.terasliceConfig.cpu_execution_controller || -1;
|
|
263
|
+
memory = this.execution.memory_execution_controller
|
|
264
|
+
|| this.terasliceConfig.memory_execution_controller || -1;
|
|
265
|
+
set(container, 'resources.requests.cpu', cpu);
|
|
266
|
+
set(container, 'resources.limits.cpu', cpu);
|
|
267
|
+
set(container, 'resources.requests.memory', memory);
|
|
268
|
+
set(container, 'resources.limits.memory', memory);
|
|
269
|
+
maxMemory = memory;
|
|
270
|
+
}
|
|
271
|
+
// NOTE: This sucks, this manages the memory env var but it ALSO is
|
|
272
|
+
// responsible for doing the config and execution env var merge, which
|
|
273
|
+
// should NOT be in this function
|
|
274
|
+
if (container.env === undefined) {
|
|
275
|
+
throw new Error('Resource container V1EnvVar[] undefined while setting resources.');
|
|
276
|
+
}
|
|
277
|
+
setMaxOldSpaceViaEnv(container.env, envVars, maxMemory);
|
|
278
|
+
}
|
|
279
|
+
_setTargets(resource) {
|
|
280
|
+
if (this.execution.targets && !isEmpty(this.execution.targets)) {
|
|
281
|
+
this.execution.targets?.forEach((target) => {
|
|
282
|
+
// `required` is the default if no `constraint` is provided for
|
|
283
|
+
// backwards compatibility and as the most likely case
|
|
284
|
+
if (target.constraint === 'required' || !has(target, 'constraint')) {
|
|
285
|
+
this._setTargetRequired(target, resource);
|
|
286
|
+
}
|
|
287
|
+
if (target.constraint === 'preferred') {
|
|
288
|
+
this._setTargetPreferred(target, resource);
|
|
289
|
+
}
|
|
290
|
+
if (target.constraint === 'accepted') {
|
|
291
|
+
this._setTargetAccepted(target, resource);
|
|
292
|
+
}
|
|
293
|
+
});
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
_setTargetRequired(target, resource) {
|
|
297
|
+
const targetKey = 'spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution';
|
|
298
|
+
if (!has(this.resource, targetKey)) {
|
|
299
|
+
const nodeSelectorObj = {
|
|
300
|
+
nodeSelectorTerms: [{ matchExpressions: [] }]
|
|
301
|
+
};
|
|
302
|
+
set(this.resource, targetKey, nodeSelectorObj);
|
|
303
|
+
}
|
|
304
|
+
resource.spec.template.spec.affinity?.nodeAffinity
|
|
305
|
+
?.requiredDuringSchedulingIgnoredDuringExecution
|
|
306
|
+
?.nodeSelectorTerms[0].matchExpressions?.push({
|
|
307
|
+
key: target.key,
|
|
308
|
+
operator: 'In',
|
|
309
|
+
values: [target.value]
|
|
310
|
+
});
|
|
311
|
+
}
|
|
312
|
+
_setTargetPreferred(target, resource) {
|
|
313
|
+
const targetKey = 'spec.template.spec.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution';
|
|
314
|
+
if (!has(this.resource, targetKey)) {
|
|
315
|
+
set(this.resource, targetKey, []);
|
|
316
|
+
}
|
|
317
|
+
resource.spec.template.spec.affinity?.nodeAffinity
|
|
318
|
+
?.preferredDuringSchedulingIgnoredDuringExecution?.push({
|
|
319
|
+
weight: 1,
|
|
320
|
+
preference: {
|
|
321
|
+
matchExpressions: [{
|
|
322
|
+
key: target.key,
|
|
323
|
+
operator: 'In',
|
|
324
|
+
values: [target.value]
|
|
325
|
+
}]
|
|
326
|
+
}
|
|
327
|
+
});
|
|
328
|
+
}
|
|
329
|
+
_setTargetAccepted(target, resource) {
|
|
330
|
+
const targetKey = 'spec.template.spec.tolerations';
|
|
331
|
+
if (!has(this.resource, targetKey)) {
|
|
332
|
+
set(this.resource, targetKey, []);
|
|
333
|
+
}
|
|
334
|
+
resource.spec.template.spec.tolerations?.push({
|
|
335
|
+
key: target.key,
|
|
336
|
+
operator: 'Equal',
|
|
337
|
+
value: target.value,
|
|
338
|
+
effect: 'NoSchedule'
|
|
339
|
+
});
|
|
340
|
+
}
|
|
341
|
+
/**
|
|
342
|
+
* _mergePodSpecOverlay - allows the author of the job to override anything
|
|
343
|
+
* in the pod .spec for both the execution controller and the worker pods
|
|
344
|
+
* created in Kubernetes. This can be useful in many ways including these:
|
|
345
|
+
*
|
|
346
|
+
* * add `initContainers` to the pods
|
|
347
|
+
* * add `hostAliases` to the pods
|
|
348
|
+
*
|
|
349
|
+
* Note that this happens at the end of the process, so anything added by
|
|
350
|
+
* this overlay will overwrite any other setting set on the job or by the
|
|
351
|
+
* config.
|
|
352
|
+
*
|
|
353
|
+
* Job setting: `pod_spec_override`
|
|
354
|
+
*/
|
|
355
|
+
_mergePodSpecOverlay(resource) {
|
|
356
|
+
resource.spec.template.spec = merge(resource.spec.template.spec, this.execution.pod_spec_override);
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
//# sourceMappingURL=k8sResource.js.map
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import { V1Service } from '@kubernetes/client-node';
|
|
2
|
+
import { convertToTSResource, makeTemplate } from './utils.js';
|
|
3
|
+
import { K8sResource } from './k8sResource.js';
|
|
4
|
+
export class K8sServiceResource extends K8sResource {
|
|
5
|
+
nodeType = 'execution_controller';
|
|
6
|
+
nameInfix = 'exc';
|
|
7
|
+
templateGenerator;
|
|
8
|
+
templateConfig;
|
|
9
|
+
resource;
|
|
10
|
+
exName;
|
|
11
|
+
exUid;
|
|
12
|
+
/**
|
|
13
|
+
* K8sServiceResource allows the generation of a k8s service based on a template.
|
|
14
|
+
* After creating the object, the k8s service is accessible on the objects
|
|
15
|
+
* .resource property.
|
|
16
|
+
*
|
|
17
|
+
* @param {Object} terasliceConfig - teraslice cluster config from context
|
|
18
|
+
* @param {Object} execution - teraslice execution
|
|
19
|
+
* @param {Logger} logger - teraslice logger
|
|
20
|
+
* @param {String} exName - name from execution resource
|
|
21
|
+
* @param {String} exUid - uid from execution resource
|
|
22
|
+
*/
|
|
23
|
+
constructor(terasliceConfig, execution, logger, exName, exUid) {
|
|
24
|
+
super(terasliceConfig, execution, logger);
|
|
25
|
+
this.execution = execution;
|
|
26
|
+
this.logger = logger;
|
|
27
|
+
this.terasliceConfig = terasliceConfig;
|
|
28
|
+
this.exName = exName;
|
|
29
|
+
this.exUid = exUid;
|
|
30
|
+
this.templateGenerator = makeTemplate('services', this.nodeType);
|
|
31
|
+
this.templateConfig = this._makeConfig(this.nameInfix, exName, exUid);
|
|
32
|
+
const k8sService = new V1Service();
|
|
33
|
+
Object.assign(k8sService, this.templateGenerator(this.templateConfig));
|
|
34
|
+
this.resource = convertToTSResource(k8sService);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
//# sourceMappingURL=k8sServiceResource.js.map
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import { get, has, uniq, difference } from '@terascope/utils';
|
|
2
|
+
/**
|
|
3
|
+
* Given the k8s Pods API output generates the appropriate Teraslice cluster
|
|
4
|
+
* state. NOTE: This assumes the pods have already been filtered to ensure they
|
|
5
|
+
* are teraslice pods and match the cluster in question.
|
|
6
|
+
* @param {TSPodList} k8sPods k8s pods API object (k8s v1.10+)
|
|
7
|
+
* @param {Object} clusterState Teraslice Cluster State
|
|
8
|
+
* @param {String} clusterNameLabel k8s label containing clusterName
|
|
9
|
+
* @param {Logger} logger Teraslice logger
|
|
10
|
+
*/
|
|
11
|
+
export function gen(k8sPods, clusterState) {
|
|
12
|
+
// Make sure we clean up the old
|
|
13
|
+
const hostIPs = uniq(k8sPods.items.map((item) => get(item, 'status.hostIP')));
|
|
14
|
+
const oldHostIps = difference(Object.keys(clusterState), hostIPs);
|
|
15
|
+
oldHostIps.forEach((ip) => {
|
|
16
|
+
delete clusterState[ip];
|
|
17
|
+
});
|
|
18
|
+
// Loop over the nodes in clusterState and set active = [] so we can append
|
|
19
|
+
// later
|
|
20
|
+
Object.keys(clusterState).forEach((nodeId) => {
|
|
21
|
+
clusterState[nodeId].active = [];
|
|
22
|
+
});
|
|
23
|
+
// add a worker for each pod
|
|
24
|
+
k8sPods.items.forEach((pod) => {
|
|
25
|
+
if (!has(clusterState, pod.status.hostIP)) {
|
|
26
|
+
// If the node isn't in clusterState, add it
|
|
27
|
+
clusterState[pod.status.hostIP] = {
|
|
28
|
+
node_id: pod.status.hostIP,
|
|
29
|
+
hostname: pod.status.hostIP,
|
|
30
|
+
pid: 'N/A',
|
|
31
|
+
node_version: 'N/A',
|
|
32
|
+
teraslice_version: 'N/A',
|
|
33
|
+
total: 'N/A',
|
|
34
|
+
state: 'connected',
|
|
35
|
+
available: 'N/A',
|
|
36
|
+
active: []
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
const worker = {
|
|
40
|
+
assets: [],
|
|
41
|
+
assignment: pod.metadata.labels['app.kubernetes.io/component'],
|
|
42
|
+
ex_id: pod.metadata.labels['teraslice.terascope.io/exId'],
|
|
43
|
+
// WARNING: This makes the assumption that the first container
|
|
44
|
+
// in the pod is the teraslice container. Currently it is the
|
|
45
|
+
// only container, so this assumption is safe for now.
|
|
46
|
+
image: pod.spec.containers[0].image,
|
|
47
|
+
job_id: pod.metadata.labels['teraslice.terascope.io/jobId'],
|
|
48
|
+
pod_name: pod.metadata.name,
|
|
49
|
+
pod_ip: pod.status.podIP,
|
|
50
|
+
worker_id: pod.metadata.name,
|
|
51
|
+
};
|
|
52
|
+
// k8s pods can have status.phase = `Pending`, `Running`, `Succeeded`,
|
|
53
|
+
// `Failed`, `Unknown`. We will only add `Running` pods to the
|
|
54
|
+
// Teraslice cluster state.
|
|
55
|
+
if (pod.status.phase === 'Running') {
|
|
56
|
+
clusterState[pod.status.hostIP].active.push(worker);
|
|
57
|
+
}
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
//# sourceMappingURL=k8sState.js.map
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
import fs from 'node:fs';
|
|
2
|
+
import path from 'node:path';
|
|
3
|
+
// @ts-expect-error
|
|
4
|
+
import barbe from 'barbe';
|
|
5
|
+
import * as k8s from '@kubernetes/client-node';
|
|
6
|
+
import { isTest } from '@terascope/utils';
|
|
7
|
+
const MAX_RETRIES = isTest ? 2 : 3;
|
|
8
|
+
const RETRY_DELAY = isTest ? 50 : 1000; // time in ms
|
|
9
|
+
const resourcePath = path.join(process.cwd(), './packages/teraslice/src/lib/cluster/services/cluster/backends/kubernetesV2/');
|
|
10
|
+
export function makeTemplate(folder, fileName) {
|
|
11
|
+
const filePath = path.join(resourcePath, folder, `${fileName}.hbs`);
|
|
12
|
+
const templateData = fs.readFileSync(filePath, 'utf-8');
|
|
13
|
+
const templateKeys = ['{{', '}}'];
|
|
14
|
+
return (config) => {
|
|
15
|
+
if (folder !== 'jobs' && (config.exName === undefined || config.exUid === undefined)) {
|
|
16
|
+
throw new Error(`K8s config requires ${config.exName === undefined ? 'exName' : 'exUid'} to create a ${folder} template`);
|
|
17
|
+
}
|
|
18
|
+
if (folder !== 'services' && config.dockerImage === undefined) {
|
|
19
|
+
throw new Error(`K8s config requires a dockerImage to create a ${folder} template`);
|
|
20
|
+
}
|
|
21
|
+
const templated = barbe(templateData, templateKeys, config);
|
|
22
|
+
return JSON.parse(templated);
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
// Convert bytes to MB and reduce by 10%
|
|
26
|
+
export function getMaxOldSpace(memory) {
|
|
27
|
+
return Math.round(0.9 * (memory / 1024 / 1024));
|
|
28
|
+
}
|
|
29
|
+
export function setMaxOldSpaceViaEnv(envArr, jobEnv, memory) {
|
|
30
|
+
const envObj = {};
|
|
31
|
+
if (memory && memory > -1) {
|
|
32
|
+
// Set NODE_OPTIONS to override max-old-space-size
|
|
33
|
+
const maxOldSpace = getMaxOldSpace(memory);
|
|
34
|
+
envObj.NODE_OPTIONS = `--max-old-space-size=${maxOldSpace}`;
|
|
35
|
+
}
|
|
36
|
+
Object.assign(envObj, jobEnv);
|
|
37
|
+
Object.entries(envObj).forEach(([name, value]) => {
|
|
38
|
+
envArr.push({
|
|
39
|
+
name,
|
|
40
|
+
value
|
|
41
|
+
});
|
|
42
|
+
});
|
|
43
|
+
}
|
|
44
|
+
export function getRetryConfig() {
|
|
45
|
+
return {
|
|
46
|
+
retries: MAX_RETRIES,
|
|
47
|
+
delay: RETRY_DELAY
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
export function isDeployment(resource) {
|
|
51
|
+
return resource instanceof k8s.V1Deployment;
|
|
52
|
+
}
|
|
53
|
+
export function isJob(resource) {
|
|
54
|
+
return resource instanceof k8s.V1Job;
|
|
55
|
+
}
|
|
56
|
+
export function isPod(resource) {
|
|
57
|
+
return resource instanceof k8s.V1Pod;
|
|
58
|
+
}
|
|
59
|
+
export function isReplicaSet(resource) {
|
|
60
|
+
return resource instanceof k8s.V1ReplicaSet;
|
|
61
|
+
}
|
|
62
|
+
export function isService(resource) {
|
|
63
|
+
return resource instanceof k8s.V1Service;
|
|
64
|
+
}
|
|
65
|
+
export function isTSDeployment(manifest) {
|
|
66
|
+
return manifest instanceof k8s.V1Deployment
|
|
67
|
+
&& manifest.metadata?.labels !== undefined
|
|
68
|
+
&& manifest.metadata.name !== undefined
|
|
69
|
+
&& manifest.spec?.replicas !== undefined
|
|
70
|
+
&& manifest.spec.template.metadata?.labels !== undefined
|
|
71
|
+
&& manifest.spec.template.spec?.containers[0].volumeMounts !== undefined
|
|
72
|
+
&& manifest.spec.template.spec.volumes !== undefined;
|
|
73
|
+
}
|
|
74
|
+
export function isTSJob(manifest) {
|
|
75
|
+
return manifest instanceof k8s.V1Job
|
|
76
|
+
&& manifest.metadata?.labels !== undefined
|
|
77
|
+
&& manifest.metadata.name !== undefined
|
|
78
|
+
&& manifest.spec?.template.metadata?.labels !== undefined
|
|
79
|
+
&& manifest.spec.template.spec?.containers[0].volumeMounts !== undefined
|
|
80
|
+
&& manifest.spec.template.spec.volumes !== undefined;
|
|
81
|
+
}
|
|
82
|
+
export function isTSPod(manifest) {
|
|
83
|
+
return manifest instanceof k8s.V1Pod
|
|
84
|
+
&& manifest.metadata?.name !== undefined
|
|
85
|
+
&& manifest.status !== undefined;
|
|
86
|
+
}
|
|
87
|
+
export function isTSReplicaSet(manifest) {
|
|
88
|
+
return manifest instanceof k8s.V1ReplicaSet
|
|
89
|
+
&& manifest.metadata?.name !== undefined
|
|
90
|
+
&& manifest.status !== undefined;
|
|
91
|
+
}
|
|
92
|
+
export function isTSService(manifest) {
|
|
93
|
+
return manifest instanceof k8s.V1Service
|
|
94
|
+
&& manifest.metadata?.name !== undefined
|
|
95
|
+
&& manifest.spec?.selector !== undefined
|
|
96
|
+
&& manifest.spec.ports !== undefined;
|
|
97
|
+
}
|
|
98
|
+
export function convertToTSResource(resource) {
|
|
99
|
+
if (isDeployment(resource) && isTSDeployment(resource)) {
|
|
100
|
+
return resource;
|
|
101
|
+
}
|
|
102
|
+
if (isJob(resource) && isTSJob(resource)) {
|
|
103
|
+
return resource;
|
|
104
|
+
}
|
|
105
|
+
if (isPod(resource) && isTSPod(resource)) {
|
|
106
|
+
return resource;
|
|
107
|
+
}
|
|
108
|
+
if (isReplicaSet(resource) && isTSReplicaSet(resource)) {
|
|
109
|
+
return resource;
|
|
110
|
+
}
|
|
111
|
+
if (isService(resource) && isTSService(resource)) {
|
|
112
|
+
return resource;
|
|
113
|
+
}
|
|
114
|
+
throw new Error('K8sResource missing required field(s) to be converted to TSResource.');
|
|
115
|
+
}
|
|
116
|
+
export function isDeploymentList(manifest) {
|
|
117
|
+
return manifest.kind === 'DeploymentList';
|
|
118
|
+
}
|
|
119
|
+
export function isJobList(manifest) {
|
|
120
|
+
return manifest.kind === 'JobList';
|
|
121
|
+
}
|
|
122
|
+
export function isPodList(manifest) {
|
|
123
|
+
return manifest.kind === 'PodList';
|
|
124
|
+
}
|
|
125
|
+
export function isReplicaSetList(manifest) {
|
|
126
|
+
return manifest.kind === 'ReplicaSetList';
|
|
127
|
+
}
|
|
128
|
+
export function isServiceList(manifest) {
|
|
129
|
+
return manifest.kind === 'ServiceList';
|
|
130
|
+
}
|
|
131
|
+
export function isTSDeploymentList(manifest) {
|
|
132
|
+
return manifest.kind === 'DeploymentList'
|
|
133
|
+
&& (manifest.items[0] ? isTSDeployment(manifest.items[0]) : true);
|
|
134
|
+
}
|
|
135
|
+
export function isTSJobList(manifest) {
|
|
136
|
+
return manifest.kind === 'JobList'
|
|
137
|
+
&& (manifest.items[0] ? isTSJob(manifest.items[0]) : true);
|
|
138
|
+
}
|
|
139
|
+
export function isTSPodList(manifest) {
|
|
140
|
+
return manifest.kind === 'PodList'
|
|
141
|
+
&& (manifest.items[0] ? isTSPod(manifest.items[0]) : true);
|
|
142
|
+
}
|
|
143
|
+
export function isTSReplicaSetList(manifest) {
|
|
144
|
+
return manifest.kind === 'ReplicaSetList'
|
|
145
|
+
&& (manifest.items[0] ? isTSReplicaSet(manifest.items[0]) : true);
|
|
146
|
+
}
|
|
147
|
+
export function isTSServiceList(manifest) {
|
|
148
|
+
return manifest.kind === 'ServiceList'
|
|
149
|
+
&& (manifest.items[0] ? isTSService(manifest.items[0]) : true);
|
|
150
|
+
}
|
|
151
|
+
export function convertToTSResourceList(resourceList) {
|
|
152
|
+
resourceList.items.map((resource) => convertToTSResource(resource));
|
|
153
|
+
if (isDeploymentList(resourceList) && isTSDeploymentList(resourceList)) {
|
|
154
|
+
return resourceList;
|
|
155
|
+
}
|
|
156
|
+
if (isJobList(resourceList) && isTSJobList(resourceList)) {
|
|
157
|
+
return resourceList;
|
|
158
|
+
}
|
|
159
|
+
if (isPodList(resourceList) && isTSPodList(resourceList)) {
|
|
160
|
+
return resourceList;
|
|
161
|
+
}
|
|
162
|
+
if (isReplicaSetList(resourceList) && isTSReplicaSetList(resourceList)) {
|
|
163
|
+
return resourceList;
|
|
164
|
+
}
|
|
165
|
+
if (isServiceList(resourceList) && isTSServiceList(resourceList)) {
|
|
166
|
+
return resourceList;
|
|
167
|
+
}
|
|
168
|
+
throw new Error('K8sResource missing required field(s) to be converted to TSResourceList.');
|
|
169
|
+
}
|
|
170
|
+
//# sourceMappingURL=utils.js.map
|