@highstate/k8s 0.19.1 → 0.21.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-23vn2rdc.js +11 -0
- package/dist/chunk-2pfx13ay.js +11 -0
- package/dist/chunk-46ntav0c.js +299 -0
- package/dist/chunk-556pc9e6.js +155 -0
- package/dist/chunk-7kgjgcft.js +170 -0
- package/dist/{chunk-LGHFSXNT.js → chunk-9hs97f1q.js} +23 -17
- package/dist/chunk-aame3x1b.js +11 -0
- package/dist/chunk-b05q6fm2.js +37 -0
- package/dist/chunk-bmvc9d2d.js +11 -0
- package/dist/chunk-de82bbp2.js +7 -0
- package/dist/chunk-facs31cb.js +624 -0
- package/dist/chunk-h1b79v66.js +1425 -0
- package/dist/chunk-k4w9zpn5.js +215 -0
- package/dist/chunk-pqc6w52f.js +352 -0
- package/dist/chunk-qyshvz32.js +176 -0
- package/dist/chunk-tpfyj6fe.js +199 -0
- package/dist/chunk-z6bmpnm7.js +180 -0
- package/dist/highstate.manifest.json +3 -2
- package/dist/impl/dynamic-endpoint-resolver.js +91 -0
- package/dist/impl/gateway-route.js +226 -166
- package/dist/impl/tls-certificate.js +31 -31
- package/dist/index.js +293 -166
- package/dist/units/cert-manager/index.js +19 -14
- package/dist/units/cluster-patch/index.js +14 -13
- package/dist/units/dns01-issuer/index.js +82 -42
- package/dist/units/existing-cluster/index.js +59 -26
- package/dist/units/gateway-api/index.js +15 -16
- package/dist/units/reduced-access-cluster/index.js +32 -36
- package/package.json +23 -21
- package/src/cluster.ts +12 -8
- package/src/config-map.ts +15 -5
- package/src/container.ts +4 -2
- package/src/cron-job.ts +51 -5
- package/src/deployment.ts +49 -18
- package/src/gateway/backend.ts +3 -3
- package/src/gateway/gateway.ts +12 -56
- package/src/helm.ts +354 -22
- package/src/impl/dynamic-endpoint-resolver.ts +109 -0
- package/src/impl/gateway-route.ts +231 -57
- package/src/impl/tls-certificate.ts +8 -3
- package/src/index.ts +1 -0
- package/src/job.ts +38 -6
- package/src/kubectl.ts +166 -0
- package/src/namespace.ts +47 -3
- package/src/network-policy.ts +1 -1
- package/src/pvc.ts +12 -2
- package/src/rbac.ts +28 -5
- package/src/scripting/bundle.ts +21 -98
- package/src/scripting/environment.ts +4 -10
- package/src/secret.ts +15 -5
- package/src/service.ts +28 -6
- package/src/shared.ts +31 -3
- package/src/stateful-set.ts +49 -18
- package/src/tls.ts +31 -5
- package/src/units/cluster-patch/index.ts +5 -5
- package/src/units/dns01-issuer/index.ts +56 -12
- package/src/units/existing-cluster/index.ts +36 -15
- package/src/units/reduced-access-cluster/index.ts +6 -3
- package/src/worker.ts +4 -2
- package/src/workload.ts +474 -217
- package/LICENSE +0 -21
- package/dist/chunk-4G6LLC2X.js +0 -240
- package/dist/chunk-4G6LLC2X.js.map +0 -1
- package/dist/chunk-BR2CLUUD.js +0 -230
- package/dist/chunk-BR2CLUUD.js.map +0 -1
- package/dist/chunk-DCUMJSO6.js +0 -427
- package/dist/chunk-DCUMJSO6.js.map +0 -1
- package/dist/chunk-FE4SHRAJ.js +0 -286
- package/dist/chunk-FE4SHRAJ.js.map +0 -1
- package/dist/chunk-HH2JJELM.js +0 -13
- package/dist/chunk-HH2JJELM.js.map +0 -1
- package/dist/chunk-KMLRI5UZ.js +0 -155
- package/dist/chunk-KMLRI5UZ.js.map +0 -1
- package/dist/chunk-LGHFSXNT.js.map +0 -1
- package/dist/chunk-MIC2BHGS.js +0 -301
- package/dist/chunk-MIC2BHGS.js.map +0 -1
- package/dist/chunk-OBDQONMV.js +0 -401
- package/dist/chunk-OBDQONMV.js.map +0 -1
- package/dist/chunk-P2VOUU7E.js +0 -1626
- package/dist/chunk-P2VOUU7E.js.map +0 -1
- package/dist/chunk-PZ5AY32C.js +0 -9
- package/dist/chunk-PZ5AY32C.js.map +0 -1
- package/dist/chunk-RVB4WWZZ.js +0 -267
- package/dist/chunk-RVB4WWZZ.js.map +0 -1
- package/dist/chunk-TWBMG6TD.js +0 -315
- package/dist/chunk-TWBMG6TD.js.map +0 -1
- package/dist/chunk-VCXWCZ43.js +0 -279
- package/dist/chunk-VCXWCZ43.js.map +0 -1
- package/dist/chunk-YIJUVPU2.js +0 -297
- package/dist/chunk-YIJUVPU2.js.map +0 -1
- package/dist/cron-job-NX4HD4FI.js +0 -8
- package/dist/cron-job-NX4HD4FI.js.map +0 -1
- package/dist/deployment-O2LJ5WR5.js +0 -8
- package/dist/deployment-O2LJ5WR5.js.map +0 -1
- package/dist/impl/gateway-route.js.map +0 -1
- package/dist/impl/tls-certificate.js.map +0 -1
- package/dist/index.js.map +0 -1
- package/dist/job-SYME6Y43.js +0 -8
- package/dist/job-SYME6Y43.js.map +0 -1
- package/dist/stateful-set-VJYKTQ72.js +0 -8
- package/dist/stateful-set-VJYKTQ72.js.map +0 -1
- package/dist/units/cert-manager/index.js.map +0 -1
- package/dist/units/cluster-patch/index.js.map +0 -1
- package/dist/units/dns01-issuer/index.js.map +0 -1
- package/dist/units/existing-cluster/index.js.map +0 -1
- package/dist/units/gateway-api/index.js.map +0 -1
- package/dist/units/reduced-access-cluster/index.js.map +0 -1
|
@@ -0,0 +1,1425 @@
|
|
|
1
|
+
// @bun
|
|
2
|
+
import {
|
|
3
|
+
Service,
|
|
4
|
+
isEndpointFromCluster,
|
|
5
|
+
mapContainerPortToServicePort,
|
|
6
|
+
mapServiceToLabelSelector
|
|
7
|
+
} from "./chunk-k4w9zpn5.js";
|
|
8
|
+
import {
|
|
9
|
+
Namespace,
|
|
10
|
+
NamespacedResource,
|
|
11
|
+
Secret,
|
|
12
|
+
commonExtraArgs,
|
|
13
|
+
getClusterKubeconfigContent,
|
|
14
|
+
getNamespaceName,
|
|
15
|
+
getProvider,
|
|
16
|
+
getProviderAsync,
|
|
17
|
+
images_default,
|
|
18
|
+
mapMetadata,
|
|
19
|
+
mapNamespaceNameToSelector,
|
|
20
|
+
mapSelectorLikeToSelector
|
|
21
|
+
} from "./chunk-facs31cb.js";
|
|
22
|
+
import {
|
|
23
|
+
__require
|
|
24
|
+
} from "./chunk-b05q6fm2.js";
|
|
25
|
+
|
|
26
|
+
// src/config-map.ts
|
|
27
|
+
import { getOrCreate } from "@highstate/contract";
|
|
28
|
+
import { k8s } from "@highstate/library";
|
|
29
|
+
import {
|
|
30
|
+
interpolate,
|
|
31
|
+
makeEntityOutput,
|
|
32
|
+
output,
|
|
33
|
+
toPromise
|
|
34
|
+
} from "@highstate/pulumi";
|
|
35
|
+
import { core } from "@pulumi/kubernetes";
|
|
36
|
+
class ConfigMap extends NamespacedResource {
|
|
37
|
+
data;
|
|
38
|
+
static apiVersion = "v1";
|
|
39
|
+
static kind = "ConfigMap";
|
|
40
|
+
constructor(type, name, args, opts, metadata, namespace, data) {
|
|
41
|
+
super(type, name, args, opts, metadata, namespace);
|
|
42
|
+
this.data = data;
|
|
43
|
+
}
|
|
44
|
+
get entity() {
|
|
45
|
+
return makeEntityOutput({
|
|
46
|
+
entity: k8s.configMapEntity,
|
|
47
|
+
identity: this.metadata.uid,
|
|
48
|
+
meta: {
|
|
49
|
+
title: this.metadata.name
|
|
50
|
+
},
|
|
51
|
+
value: {
|
|
52
|
+
...this.entityBase
|
|
53
|
+
}
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
static create(name, args, opts) {
|
|
57
|
+
return new CreatedConfigMap(name, args, opts);
|
|
58
|
+
}
|
|
59
|
+
static createOrPatch(name, args, opts) {
|
|
60
|
+
if (args.existing) {
|
|
61
|
+
return new ConfigMapPatch(name, {
|
|
62
|
+
...args,
|
|
63
|
+
name: output(args.existing).metadata.name
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
return new CreatedConfigMap(name, args, opts);
|
|
67
|
+
}
|
|
68
|
+
static async createOrGet(name, args, opts) {
|
|
69
|
+
if (args.existing) {
|
|
70
|
+
return await ConfigMap.forAsync(args.existing, output(args.namespace).cluster);
|
|
71
|
+
}
|
|
72
|
+
return new CreatedConfigMap(name, args, opts);
|
|
73
|
+
}
|
|
74
|
+
static patch(name, args, opts) {
|
|
75
|
+
return new ConfigMapPatch(name, args, opts);
|
|
76
|
+
}
|
|
77
|
+
static wrap(name, args, opts) {
|
|
78
|
+
return new WrappedConfigMap(name, args, opts);
|
|
79
|
+
}
|
|
80
|
+
static get(name, args, opts) {
|
|
81
|
+
return new ExternalConfigMap(name, args, opts);
|
|
82
|
+
}
|
|
83
|
+
static configMapCache = new Map;
|
|
84
|
+
static for(entity, cluster) {
|
|
85
|
+
return getOrCreate(ConfigMap.configMapCache, `${entity.clusterName}.${entity.metadata.namespace}.${entity.metadata.name}.${entity.clusterId}`, (name) => {
|
|
86
|
+
return ConfigMap.get(name, {
|
|
87
|
+
name: entity.metadata.name,
|
|
88
|
+
namespace: Namespace.forResource(entity, cluster)
|
|
89
|
+
});
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
static async forAsync(entity, cluster) {
|
|
93
|
+
const resolvedEntity = await toPromise(entity);
|
|
94
|
+
return ConfigMap.for(resolvedEntity, cluster);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
class CreatedConfigMap extends ConfigMap {
|
|
99
|
+
constructor(name, args, opts) {
|
|
100
|
+
const configMap = output(args.namespace).cluster.apply((cluster) => {
|
|
101
|
+
return new core.v1.ConfigMap(name, {
|
|
102
|
+
metadata: mapMetadata(args, name),
|
|
103
|
+
data: args.data
|
|
104
|
+
}, {
|
|
105
|
+
...opts,
|
|
106
|
+
parent: this,
|
|
107
|
+
provider: getProvider(cluster)
|
|
108
|
+
});
|
|
109
|
+
});
|
|
110
|
+
super("highstate:k8s:ConfigMap", name, args, opts, configMap.metadata, output(args.namespace), configMap.data);
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
class ConfigMapPatch extends ConfigMap {
|
|
115
|
+
constructor(name, args, opts) {
|
|
116
|
+
const configMap = output(args.namespace).cluster.apply((cluster) => {
|
|
117
|
+
return new core.v1.ConfigMapPatch(name, {
|
|
118
|
+
metadata: mapMetadata(args, name),
|
|
119
|
+
data: args.data
|
|
120
|
+
}, {
|
|
121
|
+
...opts,
|
|
122
|
+
parent: this,
|
|
123
|
+
provider: getProvider(cluster)
|
|
124
|
+
});
|
|
125
|
+
});
|
|
126
|
+
super("highstate:k8s:ConfigMapPatch", name, args, opts, configMap.metadata, output(args.namespace), configMap.data);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
class WrappedConfigMap extends ConfigMap {
|
|
131
|
+
constructor(name, args, opts) {
|
|
132
|
+
super("highstate:k8s:WrappedConfigMap", name, args, opts, output(args.configMap).metadata, output(args.namespace), output(args.configMap).data);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
class ExternalConfigMap extends ConfigMap {
|
|
137
|
+
constructor(name, args, opts) {
|
|
138
|
+
const configMap = output(args.namespace).cluster.apply((cluster) => {
|
|
139
|
+
return core.v1.ConfigMap.get(name, interpolate`${output(args.namespace).metadata.name}/${args.name}`, { ...opts, parent: this, provider: getProvider(cluster) });
|
|
140
|
+
});
|
|
141
|
+
super("highstate:k8s:ExternalConfigMap", name, args, opts, configMap.metadata, output(args.namespace), configMap.data);
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// src/pvc.ts
|
|
146
|
+
import { getOrCreate as getOrCreate2 } from "@highstate/contract";
|
|
147
|
+
import { k8s as k8s2 } from "@highstate/library";
|
|
148
|
+
import {
|
|
149
|
+
interpolate as interpolate2,
|
|
150
|
+
makeEntityOutput as makeEntityOutput2,
|
|
151
|
+
output as output2,
|
|
152
|
+
toPromise as toPromise2
|
|
153
|
+
} from "@highstate/pulumi";
|
|
154
|
+
import { core as core2 } from "@pulumi/kubernetes";
|
|
155
|
+
import { deepmerge } from "deepmerge-ts";
|
|
156
|
+
import { omit } from "remeda";
|
|
157
|
+
var extraPersistentVolumeClaimArgs = [...commonExtraArgs, "size"];
|
|
158
|
+
|
|
159
|
+
class PersistentVolumeClaim extends NamespacedResource {
|
|
160
|
+
spec;
|
|
161
|
+
status;
|
|
162
|
+
static apiVersion = "v1";
|
|
163
|
+
static kind = "PersistentVolumeClaim";
|
|
164
|
+
constructor(type, name, args, opts, metadata, namespace, spec, status) {
|
|
165
|
+
super(type, name, args, opts, metadata, namespace);
|
|
166
|
+
this.spec = spec;
|
|
167
|
+
this.status = status;
|
|
168
|
+
}
|
|
169
|
+
get entity() {
|
|
170
|
+
return makeEntityOutput2({
|
|
171
|
+
entity: k8s2.persistentVolumeClaimEntity,
|
|
172
|
+
identity: this.metadata.uid,
|
|
173
|
+
meta: {
|
|
174
|
+
title: this.metadata.name
|
|
175
|
+
},
|
|
176
|
+
value: {
|
|
177
|
+
...this.entityBase
|
|
178
|
+
}
|
|
179
|
+
});
|
|
180
|
+
}
|
|
181
|
+
static create(name, args, opts) {
|
|
182
|
+
return new CreatedPersistentVolumeClaim(name, args, opts);
|
|
183
|
+
}
|
|
184
|
+
static createOrPatch(name, args, opts) {
|
|
185
|
+
if (args.existing) {
|
|
186
|
+
return new PersistentVolumeClaimPatch(name, {
|
|
187
|
+
...args,
|
|
188
|
+
name: output2(args.existing).metadata.name
|
|
189
|
+
});
|
|
190
|
+
}
|
|
191
|
+
return new CreatedPersistentVolumeClaim(name, args, opts);
|
|
192
|
+
}
|
|
193
|
+
static async createOrGet(name, args, opts) {
|
|
194
|
+
if (args.existing) {
|
|
195
|
+
return await PersistentVolumeClaim.forAsync(args.existing, output2(args.namespace).cluster);
|
|
196
|
+
}
|
|
197
|
+
return new CreatedPersistentVolumeClaim(name, args, opts);
|
|
198
|
+
}
|
|
199
|
+
static patch(name, args, opts) {
|
|
200
|
+
return new PersistentVolumeClaimPatch(name, args, opts);
|
|
201
|
+
}
|
|
202
|
+
static wrap(name, args, opts) {
|
|
203
|
+
return new WrappedPersistentVolumeClaim(name, args, opts);
|
|
204
|
+
}
|
|
205
|
+
static get(name, args, opts) {
|
|
206
|
+
return new ExternalPersistentVolumeClaim(name, args, opts);
|
|
207
|
+
}
|
|
208
|
+
static pvcCache = new Map;
|
|
209
|
+
static for(entity, cluster) {
|
|
210
|
+
return getOrCreate2(PersistentVolumeClaim.pvcCache, `${entity.clusterName}.${entity.metadata.namespace}.${entity.metadata.name}.${entity.clusterId}`, (name) => {
|
|
211
|
+
return PersistentVolumeClaim.get(name, {
|
|
212
|
+
name: entity.metadata.name,
|
|
213
|
+
namespace: Namespace.forResource(entity, cluster)
|
|
214
|
+
});
|
|
215
|
+
});
|
|
216
|
+
}
|
|
217
|
+
static async forAsync(entity, cluster) {
|
|
218
|
+
const resolvedEntity = await toPromise2(entity);
|
|
219
|
+
return PersistentVolumeClaim.for(resolvedEntity, cluster);
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
class CreatedPersistentVolumeClaim extends PersistentVolumeClaim {
|
|
224
|
+
constructor(name, args, opts) {
|
|
225
|
+
const pvc = output2(args.namespace).cluster.apply((cluster) => {
|
|
226
|
+
return new core2.v1.PersistentVolumeClaim(name, {
|
|
227
|
+
metadata: mapMetadata(args, name),
|
|
228
|
+
spec: output2(args).apply((args2) => {
|
|
229
|
+
return deepmerge({
|
|
230
|
+
accessModes: ["ReadWriteOnce"],
|
|
231
|
+
resources: {
|
|
232
|
+
requests: {
|
|
233
|
+
storage: args2.size ?? "100Mi"
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
}, omit(args2, extraPersistentVolumeClaimArgs));
|
|
237
|
+
})
|
|
238
|
+
}, {
|
|
239
|
+
...opts,
|
|
240
|
+
parent: this,
|
|
241
|
+
provider: getProvider(cluster)
|
|
242
|
+
});
|
|
243
|
+
});
|
|
244
|
+
super("highstate:k8s:PersistentVolumeClaim", name, args, opts, pvc.metadata, output2(args.namespace), pvc.spec, pvc.status);
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
class PersistentVolumeClaimPatch extends PersistentVolumeClaim {
|
|
249
|
+
constructor(name, args, opts) {
|
|
250
|
+
const pvc = output2(args.namespace).cluster.apply((cluster) => {
|
|
251
|
+
return new core2.v1.PersistentVolumeClaimPatch(name, {
|
|
252
|
+
metadata: mapMetadata(args, name),
|
|
253
|
+
spec: output2(args).apply((args2) => {
|
|
254
|
+
return deepmerge({
|
|
255
|
+
accessModes: ["ReadWriteOnce"],
|
|
256
|
+
resources: {
|
|
257
|
+
requests: {
|
|
258
|
+
storage: args2.size ?? "100Mi"
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
}, omit(args2, extraPersistentVolumeClaimArgs));
|
|
262
|
+
})
|
|
263
|
+
}, {
|
|
264
|
+
...opts,
|
|
265
|
+
parent: this,
|
|
266
|
+
provider: getProvider(cluster)
|
|
267
|
+
});
|
|
268
|
+
});
|
|
269
|
+
super("highstate:k8s:PersistentVolumeClaimPatch", name, args, opts, pvc.metadata, output2(args.namespace), pvc.spec, pvc.status);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
class WrappedPersistentVolumeClaim extends PersistentVolumeClaim {
|
|
274
|
+
constructor(name, args, opts) {
|
|
275
|
+
super("highstate:k8s:WrappedPersistentVolumeClaim", name, args, opts, output2(args.pvc).metadata, output2(args.namespace), output2(args.pvc).spec, output2(args.pvc).status);
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
class ExternalPersistentVolumeClaim extends PersistentVolumeClaim {
|
|
280
|
+
constructor(name, args, opts) {
|
|
281
|
+
const pvc = output2(args.namespace).cluster.apply((cluster) => {
|
|
282
|
+
return core2.v1.PersistentVolumeClaim.get(name, interpolate2`${output2(args.namespace).metadata.name}/${args.name}`, { ...opts, parent: this, provider: getProvider(cluster) });
|
|
283
|
+
});
|
|
284
|
+
super("highstate:k8s:ExternalPersistentVolumeClaim", name, args, opts, pvc.metadata, output2(args.namespace), pvc.spec, pvc.status);
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
function getAutoVolumeName(workloadName, index) {
|
|
288
|
+
if (index === 0) {
|
|
289
|
+
return `${workloadName}-data`;
|
|
290
|
+
}
|
|
291
|
+
return `${workloadName}-data-${index}`;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// src/container.ts
|
|
295
|
+
import {
|
|
296
|
+
normalize,
|
|
297
|
+
output as output3
|
|
298
|
+
} from "@highstate/pulumi";
|
|
299
|
+
import { core as core3 } from "@pulumi/kubernetes";
|
|
300
|
+
import { concat, map, omit as omit2 } from "remeda";
|
|
301
|
+
var containerExtraArgs = [
|
|
302
|
+
"port",
|
|
303
|
+
"volumeMount",
|
|
304
|
+
"volume",
|
|
305
|
+
"environment",
|
|
306
|
+
"environmentSource",
|
|
307
|
+
"environmentSources"
|
|
308
|
+
];
|
|
309
|
+
function getFallbackContainerName(name, index) {
|
|
310
|
+
if (index === 0) {
|
|
311
|
+
return name;
|
|
312
|
+
}
|
|
313
|
+
return `${name}-${index}`;
|
|
314
|
+
}
|
|
315
|
+
function mapContainerToRaw(container, cluster, fallbackName) {
|
|
316
|
+
const containerName = container.name ?? fallbackName;
|
|
317
|
+
const spec = {
|
|
318
|
+
...omit2(container, containerExtraArgs),
|
|
319
|
+
name: containerName,
|
|
320
|
+
ports: normalize(container.port, container.ports),
|
|
321
|
+
volumeMounts: map(normalize(container.volumeMount, container.volumeMounts), mapVolumeMount),
|
|
322
|
+
env: concat(container.environment ? mapContainerEnvironment(container.environment) : [], container.env ?? []),
|
|
323
|
+
envFrom: concat(map(normalize(container.environmentSource, container.environmentSources), mapEnvironmentSource), container.envFrom ?? [])
|
|
324
|
+
};
|
|
325
|
+
if (container.enableTun) {
|
|
326
|
+
spec.securityContext ??= {};
|
|
327
|
+
spec.securityContext.capabilities ??= {};
|
|
328
|
+
spec.securityContext.capabilities.add = ["NET_ADMIN"];
|
|
329
|
+
if (cluster.quirks?.tunDevicePolicy?.type === "plugin") {
|
|
330
|
+
spec.resources ??= {};
|
|
331
|
+
spec.resources.limits ??= {};
|
|
332
|
+
spec.resources.limits[cluster.quirks.tunDevicePolicy.resourceName] = cluster.quirks.tunDevicePolicy.resourceValue;
|
|
333
|
+
} else {
|
|
334
|
+
spec.volumeMounts ??= [];
|
|
335
|
+
spec.volumeMounts.push({
|
|
336
|
+
name: "tun-device",
|
|
337
|
+
mountPath: "/dev/net/tun",
|
|
338
|
+
readOnly: false
|
|
339
|
+
});
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
return spec;
|
|
343
|
+
}
|
|
344
|
+
function mapContainerEnvironment(environment) {
|
|
345
|
+
const envVars = [];
|
|
346
|
+
for (const [name, value] of Object.entries(environment)) {
|
|
347
|
+
if (!value) {
|
|
348
|
+
continue;
|
|
349
|
+
}
|
|
350
|
+
if (typeof value === "string") {
|
|
351
|
+
envVars.push({ name, value });
|
|
352
|
+
continue;
|
|
353
|
+
}
|
|
354
|
+
if ("secret" in value) {
|
|
355
|
+
envVars.push({
|
|
356
|
+
name,
|
|
357
|
+
valueFrom: {
|
|
358
|
+
secretKeyRef: {
|
|
359
|
+
name: value.secret.metadata.name,
|
|
360
|
+
key: value.key
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
});
|
|
364
|
+
continue;
|
|
365
|
+
}
|
|
366
|
+
if ("configMap" in value) {
|
|
367
|
+
envVars.push({
|
|
368
|
+
name,
|
|
369
|
+
valueFrom: {
|
|
370
|
+
configMapKeyRef: {
|
|
371
|
+
name: value.configMap.metadata.name,
|
|
372
|
+
key: value.key
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
});
|
|
376
|
+
continue;
|
|
377
|
+
}
|
|
378
|
+
envVars.push({ name, valueFrom: value });
|
|
379
|
+
}
|
|
380
|
+
return envVars;
|
|
381
|
+
}
|
|
382
|
+
function mapVolumeMount(volumeMount) {
|
|
383
|
+
if ("volume" in volumeMount) {
|
|
384
|
+
return omit2({
|
|
385
|
+
...volumeMount,
|
|
386
|
+
name: output3(volumeMount.volume).apply(mapWorkloadVolume).apply((volume) => output3(volume.name))
|
|
387
|
+
}, ["volume"]);
|
|
388
|
+
}
|
|
389
|
+
return {
|
|
390
|
+
...volumeMount,
|
|
391
|
+
name: volumeMount.name
|
|
392
|
+
};
|
|
393
|
+
}
|
|
394
|
+
function mapEnvironmentSource(envFrom) {
|
|
395
|
+
if (envFrom instanceof core3.v1.ConfigMap || envFrom instanceof ConfigMap) {
|
|
396
|
+
return {
|
|
397
|
+
configMapRef: {
|
|
398
|
+
name: envFrom.metadata.name
|
|
399
|
+
}
|
|
400
|
+
};
|
|
401
|
+
}
|
|
402
|
+
if (envFrom instanceof core3.v1.Secret || envFrom instanceof Secret) {
|
|
403
|
+
return {
|
|
404
|
+
secretRef: {
|
|
405
|
+
name: envFrom.metadata.name
|
|
406
|
+
}
|
|
407
|
+
};
|
|
408
|
+
}
|
|
409
|
+
return envFrom;
|
|
410
|
+
}
|
|
411
|
+
function mapWorkloadVolume(volume) {
|
|
412
|
+
if (volume instanceof PersistentVolumeClaim) {
|
|
413
|
+
return {
|
|
414
|
+
name: volume.metadata.name,
|
|
415
|
+
persistentVolumeClaim: {
|
|
416
|
+
claimName: volume.metadata.name
|
|
417
|
+
}
|
|
418
|
+
};
|
|
419
|
+
}
|
|
420
|
+
if (volume instanceof Secret) {
|
|
421
|
+
return {
|
|
422
|
+
name: volume.metadata.name,
|
|
423
|
+
secret: {
|
|
424
|
+
secretName: volume.metadata.name
|
|
425
|
+
}
|
|
426
|
+
};
|
|
427
|
+
}
|
|
428
|
+
if (volume instanceof ConfigMap) {
|
|
429
|
+
return {
|
|
430
|
+
name: volume.metadata.name,
|
|
431
|
+
configMap: {
|
|
432
|
+
name: volume.metadata.name
|
|
433
|
+
}
|
|
434
|
+
};
|
|
435
|
+
}
|
|
436
|
+
if (core3.v1.PersistentVolumeClaim.isInstance(volume)) {
|
|
437
|
+
return {
|
|
438
|
+
name: volume.metadata.name,
|
|
439
|
+
persistentVolumeClaim: {
|
|
440
|
+
claimName: volume.metadata.name
|
|
441
|
+
}
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
if (core3.v1.ConfigMap.isInstance(volume)) {
|
|
445
|
+
return {
|
|
446
|
+
name: volume.metadata.name,
|
|
447
|
+
configMap: {
|
|
448
|
+
name: volume.metadata.name
|
|
449
|
+
}
|
|
450
|
+
};
|
|
451
|
+
}
|
|
452
|
+
if (core3.v1.Secret.isInstance(volume)) {
|
|
453
|
+
return {
|
|
454
|
+
name: volume.metadata.name,
|
|
455
|
+
secret: {
|
|
456
|
+
secretName: volume.metadata.name
|
|
457
|
+
}
|
|
458
|
+
};
|
|
459
|
+
}
|
|
460
|
+
return volume;
|
|
461
|
+
}
|
|
462
|
+
function getWorkloadVolumeResourceUuid(volume) {
|
|
463
|
+
if (volume instanceof PersistentVolumeClaim) {
|
|
464
|
+
return volume.metadata.uid;
|
|
465
|
+
}
|
|
466
|
+
if (volume instanceof Secret) {
|
|
467
|
+
return volume.metadata.uid;
|
|
468
|
+
}
|
|
469
|
+
if (volume instanceof ConfigMap) {
|
|
470
|
+
return volume.metadata.uid;
|
|
471
|
+
}
|
|
472
|
+
if (core3.v1.PersistentVolumeClaim.isInstance(volume)) {
|
|
473
|
+
return volume.metadata.uid;
|
|
474
|
+
}
|
|
475
|
+
if (core3.v1.ConfigMap.isInstance(volume)) {
|
|
476
|
+
return volume.metadata.uid;
|
|
477
|
+
}
|
|
478
|
+
if (core3.v1.Secret.isInstance(volume)) {
|
|
479
|
+
return volume.metadata.uid;
|
|
480
|
+
}
|
|
481
|
+
return output3(undefined);
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
// src/network.ts
|
|
485
|
+
function getBestEndpoint(endpoints, cluster) {
|
|
486
|
+
if (!endpoints.length) {
|
|
487
|
+
return;
|
|
488
|
+
}
|
|
489
|
+
if (endpoints.length === 1) {
|
|
490
|
+
return endpoints[0];
|
|
491
|
+
}
|
|
492
|
+
if (cluster) {
|
|
493
|
+
const clusterEndpoint = endpoints.find((endpoint) => isEndpointFromCluster(endpoint, cluster) && endpoint.metadata["k8s.service"].isInternal);
|
|
494
|
+
if (clusterEndpoint) {
|
|
495
|
+
return clusterEndpoint;
|
|
496
|
+
}
|
|
497
|
+
}
|
|
498
|
+
const publicEndpoint = endpoints.find((endpoint) => endpoint.metadata["iana.scope"] === "global");
|
|
499
|
+
if (publicEndpoint) {
|
|
500
|
+
return publicEndpoint;
|
|
501
|
+
}
|
|
502
|
+
return endpoints[0];
|
|
503
|
+
}
|
|
504
|
+
function requireBestEndpoint(endpoints, cluster) {
|
|
505
|
+
const endpoint = getBestEndpoint(endpoints, cluster);
|
|
506
|
+
if (!endpoint) {
|
|
507
|
+
throw new Error(`No best endpoint found for cluster "${cluster.name}" (${cluster.id})`);
|
|
508
|
+
}
|
|
509
|
+
return endpoint;
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
// src/network-policy.ts
|
|
513
|
+
import {
|
|
514
|
+
addressToCidr,
|
|
515
|
+
endpointToString,
|
|
516
|
+
ImplementationMediator,
|
|
517
|
+
l3EndpointToCidr,
|
|
518
|
+
parseEndpoint
|
|
519
|
+
} from "@highstate/common";
|
|
520
|
+
import { z } from "@highstate/contract";
|
|
521
|
+
import {
|
|
522
|
+
ComponentResource,
|
|
523
|
+
interpolate as interpolate3,
|
|
524
|
+
normalize as normalize2,
|
|
525
|
+
output as output4,
|
|
526
|
+
toPromise as toPromise3
|
|
527
|
+
} from "@highstate/pulumi";
|
|
528
|
+
import { networking } from "@pulumi/kubernetes";
|
|
529
|
+
import { flat, groupBy, isNonNullish, merge, mergeDeep, uniqueBy } from "remeda";
|
|
530
|
+
var networkPolicyMediator = new ImplementationMediator("network-policy", z.object({ name: z.string(), args: z.custom() }), z.instanceof(ComponentResource));
|
|
531
|
+
|
|
532
|
+
class NetworkPolicy extends ComponentResource {
|
|
533
|
+
networkPolicy;
|
|
534
|
+
constructor(name, args, opts) {
|
|
535
|
+
super("k8s:network-policy", name, args, opts);
|
|
536
|
+
const normalizedArgs = output4(args).apply(async (args2) => {
|
|
537
|
+
const ingressRules = normalize2(args2.ingressRule, args2.ingressRules);
|
|
538
|
+
const egressRules = normalize2(args2.egressRule, args2.egressRules);
|
|
539
|
+
const cluster = await toPromise3(args2.namespace.cluster);
|
|
540
|
+
const extraEgressRules = [];
|
|
541
|
+
if (args2.allowKubeDns) {
|
|
542
|
+
extraEgressRules.push({
|
|
543
|
+
namespaces: ["kube-system"],
|
|
544
|
+
namespaceSelectors: [],
|
|
545
|
+
selectors: [{ matchLabels: { "k8s-app": "kube-dns" } }],
|
|
546
|
+
ports: [{ port: 53, protocol: "UDP" }],
|
|
547
|
+
all: false,
|
|
548
|
+
clusterPods: false,
|
|
549
|
+
cidrs: [],
|
|
550
|
+
fqdns: [],
|
|
551
|
+
services: []
|
|
552
|
+
});
|
|
553
|
+
}
|
|
554
|
+
return {
|
|
555
|
+
...args2,
|
|
556
|
+
podSelector: args2.selector ? mapSelectorLikeToSelector(args2.selector) : {},
|
|
557
|
+
cluster,
|
|
558
|
+
isolateEgress: args2.isolateEgress ?? false,
|
|
559
|
+
isolateIngress: args2.isolateIngress ?? false,
|
|
560
|
+
allowKubeApiServer: args2.allowKubeApiServer ?? false,
|
|
561
|
+
ingressRules: ingressRules.flatMap((rule) => {
|
|
562
|
+
const endpoints = normalize2(rule?.fromEndpoint, rule?.fromEndpoints);
|
|
563
|
+
const parsedEndpoints = endpoints.map((endpoint) => parseEndpoint(endpoint));
|
|
564
|
+
const endpointsNamespaces = groupBy(parsedEndpoints, (endpoint) => {
|
|
565
|
+
const namespace = isEndpointFromCluster(endpoint, cluster) ? endpoint.metadata["k8s.service"].namespace : "";
|
|
566
|
+
return namespace;
|
|
567
|
+
});
|
|
568
|
+
const l3OnlyRule = endpointsNamespaces[""] ? NetworkPolicy.getRuleFromEndpoint(undefined, endpointsNamespaces[""], cluster) : undefined;
|
|
569
|
+
const otherRules = Object.entries(endpointsNamespaces).filter(([key]) => key !== "").map(([, endpoints2]) => {
|
|
570
|
+
return NetworkPolicy.getRuleFromEndpoint(undefined, endpoints2, cluster);
|
|
571
|
+
});
|
|
572
|
+
return [
|
|
573
|
+
{
|
|
574
|
+
all: rule.fromAll ?? false,
|
|
575
|
+
clusterPods: rule.fromClusterPods ?? false,
|
|
576
|
+
cidrs: normalize2(rule.fromCidr, rule.fromCidrs).concat(l3OnlyRule?.cidrs ?? []),
|
|
577
|
+
fqdns: [],
|
|
578
|
+
services: normalize2(rule.fromService, rule.fromServices),
|
|
579
|
+
namespaces: normalize2(rule.fromNamespace, rule.fromNamespaces),
|
|
580
|
+
namespaceSelectors: normalize2(rule.fromNamespaceSelector, rule.fromNamespaceSelectors),
|
|
581
|
+
selectors: normalize2(rule.fromSelector, rule.fromSelectors),
|
|
582
|
+
ports: normalize2(rule.toPort, rule.toPorts)
|
|
583
|
+
},
|
|
584
|
+
...otherRules
|
|
585
|
+
].filter((rule2) => !NetworkPolicy.isEmptyRule(rule2));
|
|
586
|
+
}),
|
|
587
|
+
egressRules: egressRules.flatMap((rule) => {
|
|
588
|
+
const endpoints = normalize2(rule?.toEndpoint, rule?.toEndpoints);
|
|
589
|
+
const parsedEndpoints = endpoints.map((endpoint) => parseEndpoint(endpoint));
|
|
590
|
+
const endpointsByPortsAnsNamespaces = groupBy(parsedEndpoints, (endpoint) => {
|
|
591
|
+
const namespace = isEndpointFromCluster(endpoint, cluster) ? endpoint.metadata["k8s.service"].namespace : "";
|
|
592
|
+
const port = isEndpointFromCluster(endpoint, cluster) ? endpoint.metadata["k8s.service"].targetPort : endpoint.level !== 3 ? endpoint.port : undefined;
|
|
593
|
+
return `${port ?? "0"}:${namespace}`;
|
|
594
|
+
});
|
|
595
|
+
const l3OnlyRule = endpointsByPortsAnsNamespaces["0:"] ? NetworkPolicy.getRuleFromEndpoint(undefined, endpointsByPortsAnsNamespaces["0:"], cluster) : undefined;
|
|
596
|
+
const otherRules = Object.entries(endpointsByPortsAnsNamespaces).filter(([key]) => key !== "0:").map(([key, endpoints2]) => {
|
|
597
|
+
const [port] = key.split(":");
|
|
598
|
+
const portNumber = parseInt(port, 10);
|
|
599
|
+
const portValue = Number.isNaN(portNumber) ? port : portNumber;
|
|
600
|
+
return NetworkPolicy.getRuleFromEndpoint(portValue, endpoints2, cluster);
|
|
601
|
+
});
|
|
602
|
+
return [
|
|
603
|
+
{
|
|
604
|
+
all: rule.toAll ?? false,
|
|
605
|
+
clusterPods: rule.toClusterPods ?? false,
|
|
606
|
+
cidrs: normalize2(rule.toCidr, rule.toCidrs).concat(l3OnlyRule?.cidrs ?? []),
|
|
607
|
+
fqdns: normalize2(rule.toFqdn, rule.toFqdns).concat(l3OnlyRule?.fqdns ?? []),
|
|
608
|
+
services: normalize2(rule.toService, rule.toServices),
|
|
609
|
+
namespaces: normalize2(rule.toNamespace, rule.toNamespaces),
|
|
610
|
+
namespaceSelectors: normalize2(rule.toNamespaceSelector, rule.toNamespaceSelectors),
|
|
611
|
+
selectors: normalize2(rule.toSelector, rule.toSelectors),
|
|
612
|
+
ports: normalize2(rule.toPort, rule.toPorts)
|
|
613
|
+
},
|
|
614
|
+
...otherRules
|
|
615
|
+
].filter((rule2) => !NetworkPolicy.isEmptyRule(rule2));
|
|
616
|
+
}).concat(extraEgressRules)
|
|
617
|
+
};
|
|
618
|
+
});
|
|
619
|
+
this.networkPolicy = output4(normalizedArgs.apply(async (args2) => {
|
|
620
|
+
const cluster = args2.cluster;
|
|
621
|
+
if (cluster.networkPolicyImplRef) {
|
|
622
|
+
return networkPolicyMediator.call(cluster.networkPolicyImplRef, {
|
|
623
|
+
name,
|
|
624
|
+
args: args2
|
|
625
|
+
});
|
|
626
|
+
}
|
|
627
|
+
const nativePolicy = new NativeNetworkPolicy(name, args2, {
|
|
628
|
+
...opts,
|
|
629
|
+
parent: this,
|
|
630
|
+
provider: await getProviderAsync(output4(args2.namespace).cluster)
|
|
631
|
+
});
|
|
632
|
+
return nativePolicy.networkPolicy;
|
|
633
|
+
}));
|
|
634
|
+
}
|
|
635
|
+
static getRuleFromEndpoint(port, endpoints, cluster) {
|
|
636
|
+
const ports = port ? [
|
|
637
|
+
{
|
|
638
|
+
port,
|
|
639
|
+
protocol: endpoints[0].level !== 3 ? endpoints[0].protocol?.toUpperCase() : undefined
|
|
640
|
+
}
|
|
641
|
+
] : [];
|
|
642
|
+
const cidrs = endpoints.filter((endpoint) => !isEndpointFromCluster(endpoint, cluster)).map((endpoint) => endpoint.address ? addressToCidr(endpoint.address) : null).filter(isNonNullish);
|
|
643
|
+
const fqdns = endpoints.filter((endpoint) => endpoint.type === "hostname").map((endpoint) => endpoint.hostname);
|
|
644
|
+
const selectors = endpoints.filter((endpoint) => isEndpointFromCluster(endpoint, cluster)).map((endpoint) => endpoint.metadata["k8s.service"].selector);
|
|
645
|
+
const namespace = endpoints.filter((endpoint) => isEndpointFromCluster(endpoint, cluster)).map((endpoint) => endpoint.metadata["k8s.service"].namespace)[0];
|
|
646
|
+
return {
|
|
647
|
+
all: false,
|
|
648
|
+
clusterPods: false,
|
|
649
|
+
cidrs,
|
|
650
|
+
fqdns,
|
|
651
|
+
services: [],
|
|
652
|
+
namespaces: namespace ? [namespace] : [],
|
|
653
|
+
namespaceSelectors: [],
|
|
654
|
+
selectors,
|
|
655
|
+
ports
|
|
656
|
+
};
|
|
657
|
+
}
|
|
658
|
+
static isEmptyRule(rule) {
|
|
659
|
+
return !rule.all && !rule.clusterPods && rule.cidrs.length === 0 && rule.fqdns.length === 0 && rule.services.length === 0 && rule.namespaces.length === 0 && rule.namespaceSelectors.length === 0 && rule.selectors.length === 0 && rule.ports.length === 0;
|
|
660
|
+
}
|
|
661
|
+
static async isolateNamespace(namespace, opts) {
|
|
662
|
+
const name = await toPromise3(output4(namespace).metadata.name);
|
|
663
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
664
|
+
return new NetworkPolicy(`isolate-namespace.${cluster.name}.${name}.${cluster.id}`, {
|
|
665
|
+
namespace,
|
|
666
|
+
description: "By default, deny all traffic to/from the namespace.",
|
|
667
|
+
isolateEgress: true,
|
|
668
|
+
isolateIngress: true
|
|
669
|
+
}, opts);
|
|
670
|
+
}
|
|
671
|
+
static async allowInsideNamespace(namespace, opts) {
|
|
672
|
+
const nsName = await toPromise3(output4(namespace).metadata.name);
|
|
673
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
674
|
+
return new NetworkPolicy(`allow-inside-namespace.${cluster.name}.${nsName}.${cluster.id}`, {
|
|
675
|
+
namespace,
|
|
676
|
+
description: "Allow all traffic inside the namespace.",
|
|
677
|
+
ingressRule: { fromNamespace: namespace },
|
|
678
|
+
egressRule: { toNamespace: namespace }
|
|
679
|
+
}, opts);
|
|
680
|
+
}
|
|
681
|
+
static async allowKubeApiServer(namespace, opts) {
|
|
682
|
+
const nsName = await toPromise3(output4(namespace).metadata.name);
|
|
683
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
684
|
+
return new NetworkPolicy(`allow-kube-api-server.${cluster.name}.${nsName}.${cluster.id}`, {
|
|
685
|
+
namespace,
|
|
686
|
+
description: "Allow all traffic to the Kubernetes API server from the namespace.",
|
|
687
|
+
allowKubeApiServer: true
|
|
688
|
+
}, opts);
|
|
689
|
+
}
|
|
690
|
+
static async allowKubeDns(namespace, opts) {
|
|
691
|
+
const nsName = await toPromise3(output4(namespace).metadata.name);
|
|
692
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
693
|
+
return new NetworkPolicy(`allow-kube-dns.${cluster.name}.${nsName}.${cluster.id}`, {
|
|
694
|
+
namespace,
|
|
695
|
+
description: "Allow all traffic to the Kubernetes DNS server from the namespace.",
|
|
696
|
+
allowKubeDns: true
|
|
697
|
+
}, opts);
|
|
698
|
+
}
|
|
699
|
+
static async allowAllEgress(namespace, opts) {
|
|
700
|
+
const nsName = await toPromise3(output4(namespace).metadata.name);
|
|
701
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
702
|
+
return new NetworkPolicy(`allow-all-egress.${cluster.name}.${nsName}.${cluster.id}`, {
|
|
703
|
+
namespace,
|
|
704
|
+
description: "Allow all egress traffic from the namespace.",
|
|
705
|
+
egressRule: { toAll: true }
|
|
706
|
+
}, opts);
|
|
707
|
+
}
|
|
708
|
+
static async allowAllIngress(namespace, opts) {
|
|
709
|
+
const nsName = await toPromise3(output4(namespace).metadata.name);
|
|
710
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
711
|
+
return new NetworkPolicy(`allow-all-ingress.${cluster.name}.${nsName}.${cluster.id}`, {
|
|
712
|
+
namespace,
|
|
713
|
+
description: "Allow all ingress traffic to the namespace.",
|
|
714
|
+
ingressRule: { fromAll: true }
|
|
715
|
+
}, opts);
|
|
716
|
+
}
|
|
717
|
+
static async allowEgressToEndpoint(namespace, endpoint, opts) {
|
|
718
|
+
const parsedEndpoint = parseEndpoint(endpoint);
|
|
719
|
+
const endpointStr = endpointToString(parsedEndpoint).replace(/:/g, "-");
|
|
720
|
+
const nsName = await toPromise3(output4(namespace).metadata.name);
|
|
721
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
722
|
+
return new NetworkPolicy(`allow-egress-to-${endpointStr}.${cluster.name}.${nsName}.${cluster.id}`, {
|
|
723
|
+
namespace,
|
|
724
|
+
description: `Allow egress traffic to "${endpointToString(parsedEndpoint)}" from the namespace.`,
|
|
725
|
+
egressRule: { toEndpoint: endpoint }
|
|
726
|
+
}, opts);
|
|
727
|
+
}
|
|
728
|
+
static async allowEgressToBestEndpoint(namespace, endpoints, opts) {
|
|
729
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
730
|
+
const resolvedEndpoints = await toPromise3(output4(endpoints));
|
|
731
|
+
const bestEndpoint = requireBestEndpoint(resolvedEndpoints.map((endpoint) => parseEndpoint(endpoint)), cluster);
|
|
732
|
+
return await NetworkPolicy.allowEgressToEndpoint(namespace, bestEndpoint, opts);
|
|
733
|
+
}
|
|
734
|
+
static async allowIngressFromEndpoint(namespace, endpoint, opts) {
|
|
735
|
+
const parsedEndpoint = parseEndpoint(endpoint);
|
|
736
|
+
const endpointStr = endpointToString(parsedEndpoint).replace(/:/g, "-");
|
|
737
|
+
const nsName = await toPromise3(output4(namespace).metadata.name);
|
|
738
|
+
const cluster = await toPromise3(output4(namespace).cluster);
|
|
739
|
+
return new NetworkPolicy(`allow-ingress-from-${endpointStr}.${cluster.name}.${nsName}.${cluster.id}`, {
|
|
740
|
+
namespace,
|
|
741
|
+
description: interpolate3`Allow ingress traffic from "${endpointToString(parsedEndpoint)}" to the namespace.`,
|
|
742
|
+
ingressRule: { fromEndpoint: endpoint }
|
|
743
|
+
}, opts);
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
class NativeNetworkPolicy extends ComponentResource {
|
|
748
|
+
networkPolicy;
|
|
749
|
+
constructor(name, args, opts) {
|
|
750
|
+
super("k8s:native-network-policy", name, args, opts);
|
|
751
|
+
const ingress = NativeNetworkPolicy.createIngressRules(args);
|
|
752
|
+
const egress = NativeNetworkPolicy.createEgressRules(args);
|
|
753
|
+
const policyTypes = [];
|
|
754
|
+
if (ingress.length > 0 || args.isolateIngress) {
|
|
755
|
+
policyTypes.push("Ingress");
|
|
756
|
+
}
|
|
757
|
+
if (egress.length > 0 || args.isolateEgress) {
|
|
758
|
+
policyTypes.push("Egress");
|
|
759
|
+
}
|
|
760
|
+
this.networkPolicy = new networking.v1.NetworkPolicy(name, {
|
|
761
|
+
metadata: mergeDeep(mapMetadata(args, name), {
|
|
762
|
+
annotations: args.description ? { "kubernetes.io/description": args.description } : undefined
|
|
763
|
+
}),
|
|
764
|
+
spec: {
|
|
765
|
+
podSelector: args.podSelector,
|
|
766
|
+
ingress,
|
|
767
|
+
egress,
|
|
768
|
+
policyTypes
|
|
769
|
+
}
|
|
770
|
+
}, { ...opts, parent: this });
|
|
771
|
+
}
|
|
772
|
+
static fallbackIpBlock = {
|
|
773
|
+
cidr: "0.0.0.0/0",
|
|
774
|
+
except: ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]
|
|
775
|
+
};
|
|
776
|
+
static fallbackDnsRule = {
|
|
777
|
+
to: [
|
|
778
|
+
{
|
|
779
|
+
namespaceSelector: { matchLabels: { "kubernetes.io/metadata.name": "kube-system" } },
|
|
780
|
+
podSelector: { matchLabels: { "k8s-app": "kube-dns" } }
|
|
781
|
+
}
|
|
782
|
+
],
|
|
783
|
+
ports: [{ port: 53, protocol: "UDP" }]
|
|
784
|
+
};
|
|
785
|
+
static createIngressRules(args) {
|
|
786
|
+
return uniqueBy(args.ingressRules.map((rule) => ({
|
|
787
|
+
from: rule.all ? [] : rule.clusterPods ? [{ namespaceSelector: {} }] : NativeNetworkPolicy.createRulePeers(rule),
|
|
788
|
+
ports: NativeNetworkPolicy.mapPorts(rule.ports)
|
|
789
|
+
})), (rule) => JSON.stringify(rule));
|
|
790
|
+
}
|
|
791
|
+
static createEgressRules(args) {
|
|
792
|
+
const extraRules = [];
|
|
793
|
+
const needKubeDns = args.egressRules.some((rule) => rule.fqdns.length > 0);
|
|
794
|
+
if (needKubeDns) {
|
|
795
|
+
extraRules.push(NativeNetworkPolicy.fallbackDnsRule);
|
|
796
|
+
}
|
|
797
|
+
const needFallback = args.egressRules.some((rule) => rule.fqdns.some((fqdn) => !fqdn.endsWith(".cluster.local")));
|
|
798
|
+
if (needFallback) {
|
|
799
|
+
extraRules.push({ to: [{ ipBlock: NativeNetworkPolicy.fallbackIpBlock }] });
|
|
800
|
+
}
|
|
801
|
+
if (args.allowKubeApiServer) {
|
|
802
|
+
const { quirks, apiEndpoints } = args.cluster;
|
|
803
|
+
if (quirks?.fallbackKubeApiAccess) {
|
|
804
|
+
extraRules.push({
|
|
805
|
+
to: [{ ipBlock: { cidr: `${quirks?.fallbackKubeApiAccess.serverIp}/32` } }],
|
|
806
|
+
ports: [{ port: quirks?.fallbackKubeApiAccess.serverPort, protocol: "TCP" }]
|
|
807
|
+
});
|
|
808
|
+
} else {
|
|
809
|
+
const rules = apiEndpoints.filter((endpoint) => endpoint.type !== "hostname").map((endpoint) => ({
|
|
810
|
+
to: [{ ipBlock: { cidr: l3EndpointToCidr(endpoint) } }],
|
|
811
|
+
ports: [{ port: endpoint.port, protocol: "TCP" }]
|
|
812
|
+
}));
|
|
813
|
+
extraRules.push(...rules);
|
|
814
|
+
}
|
|
815
|
+
}
|
|
816
|
+
return uniqueBy(args.egressRules.map((rule) => {
|
|
817
|
+
return {
|
|
818
|
+
to: rule.all ? [] : rule.clusterPods ? [{ namespaceSelector: {} }] : NativeNetworkPolicy.createRulePeers(rule),
|
|
819
|
+
ports: NativeNetworkPolicy.mapPorts(rule.ports)
|
|
820
|
+
};
|
|
821
|
+
}).filter((rule) => rule.to !== undefined).concat(extraRules), (rule) => JSON.stringify(rule));
|
|
822
|
+
}
|
|
823
|
+
static createRulePeers(args) {
|
|
824
|
+
const peers = uniqueBy([
|
|
825
|
+
...NativeNetworkPolicy.createCidrPeers(args),
|
|
826
|
+
...NativeNetworkPolicy.createServicePeers(args),
|
|
827
|
+
...NativeNetworkPolicy.createSelectorPeers(args)
|
|
828
|
+
], (peer) => JSON.stringify(peer));
|
|
829
|
+
return peers.length > 0 ? peers : undefined;
|
|
830
|
+
}
|
|
831
|
+
static createCidrPeers(args) {
|
|
832
|
+
return args.cidrs.map((cidr) => ({ ipBlock: { cidr } }));
|
|
833
|
+
}
|
|
834
|
+
static createServicePeers(args) {
|
|
835
|
+
return args.services.map((service) => {
|
|
836
|
+
const selector = mapServiceToLabelSelector(service);
|
|
837
|
+
return {
|
|
838
|
+
namespaceSelector: mapNamespaceNameToSelector(service.metadata.namespace),
|
|
839
|
+
podSelector: selector
|
|
840
|
+
};
|
|
841
|
+
});
|
|
842
|
+
}
|
|
843
|
+
static createSelectorPeers(args) {
|
|
844
|
+
const selectorPeers = args.selectors.map((selector) => ({
|
|
845
|
+
podSelector: mapSelectorLikeToSelector(selector)
|
|
846
|
+
}));
|
|
847
|
+
const namespacePeers = [
|
|
848
|
+
...args.namespaces.map(NativeNetworkPolicy.createNamespacePeer),
|
|
849
|
+
...args.namespaceSelectors.map(NativeNetworkPolicy.createNamespaceSelectorPeer)
|
|
850
|
+
];
|
|
851
|
+
if (namespacePeers.length === 0) {
|
|
852
|
+
return selectorPeers;
|
|
853
|
+
}
|
|
854
|
+
if (selectorPeers.length === 0) {
|
|
855
|
+
return namespacePeers;
|
|
856
|
+
}
|
|
857
|
+
return flat(selectorPeers.map((selectorPeer) => {
|
|
858
|
+
return namespacePeers.map((namespacePeer) => merge(selectorPeer, namespacePeer));
|
|
859
|
+
}));
|
|
860
|
+
}
|
|
861
|
+
static createNamespacePeer(namespace) {
|
|
862
|
+
const namespaceName = getNamespaceName(namespace);
|
|
863
|
+
const namespaceSelector = mapNamespaceNameToSelector(namespaceName);
|
|
864
|
+
return { namespaceSelector };
|
|
865
|
+
}
|
|
866
|
+
static createNamespaceSelectorPeer(selector) {
|
|
867
|
+
return {
|
|
868
|
+
namespaceSelector: mapSelectorLikeToSelector(selector)
|
|
869
|
+
};
|
|
870
|
+
}
|
|
871
|
+
static mapPorts(ports) {
|
|
872
|
+
return ports.map((port) => {
|
|
873
|
+
if ("port" in port) {
|
|
874
|
+
return {
|
|
875
|
+
port: port.port,
|
|
876
|
+
protocol: port.protocol ?? "TCP"
|
|
877
|
+
};
|
|
878
|
+
}
|
|
879
|
+
return {
|
|
880
|
+
port: port.range[0],
|
|
881
|
+
endPort: port.range[1],
|
|
882
|
+
protocol: port.protocol ?? "TCP"
|
|
883
|
+
};
|
|
884
|
+
});
|
|
885
|
+
}
|
|
886
|
+
}
|
|
887
|
+
|
|
888
|
+
// src/pod.ts
|
|
889
|
+
var podSpecDefaults = {
|
|
890
|
+
automountServiceAccountToken: false
|
|
891
|
+
};
|
|
892
|
+
|
|
893
|
+
// src/workload.ts
|
|
894
|
+
import {
|
|
895
|
+
AccessPointRoute,
|
|
896
|
+
mergeEndpoints
|
|
897
|
+
} from "@highstate/common";
|
|
898
|
+
import { trimIndentation } from "@highstate/contract";
|
|
899
|
+
import {
|
|
900
|
+
makeFileOutput,
|
|
901
|
+
normalize as normalize3,
|
|
902
|
+
normalizeInputs,
|
|
903
|
+
toPromise as toPromise4
|
|
904
|
+
} from "@highstate/pulumi";
|
|
905
|
+
import {
|
|
906
|
+
interpolate as interpolate4,
|
|
907
|
+
output as output5
|
|
908
|
+
} from "@pulumi/pulumi";
|
|
909
|
+
import { sha256 } from "crypto-hash";
|
|
910
|
+
import { deepmerge as deepmerge2 } from "deepmerge-ts";
|
|
911
|
+
import { filter, flat as flat2, isNonNullish as isNonNullish2, omit as omit3, unique, uniqueBy as uniqueBy2 } from "remeda";
|
|
912
|
+
var workloadExtraArgs = [...commonExtraArgs, "container", "containers"];
|
|
913
|
+
function filterPatchOwnedContainersInTemplate(template, ownedTemplate) {
|
|
914
|
+
const ownedContainerNames = unique((ownedTemplate.spec?.containers ?? []).map((container) => container.name).filter(isNonNullish2));
|
|
915
|
+
const ownedInitContainerNames = unique((ownedTemplate.spec?.initContainers ?? []).map((container) => container.name).filter(isNonNullish2));
|
|
916
|
+
const filterByOwnedNames = (source, ownedNames) => {
|
|
917
|
+
if (!source || source.length === 0 || ownedNames.length === 0) {
|
|
918
|
+
return;
|
|
919
|
+
}
|
|
920
|
+
const filtered = source.filter((container) => container.name ? ownedNames.includes(container.name) : false);
|
|
921
|
+
return filtered.length > 0 ? filtered : undefined;
|
|
922
|
+
};
|
|
923
|
+
const containers = filterByOwnedNames(template.spec?.containers, ownedContainerNames);
|
|
924
|
+
const initContainers = filterByOwnedNames(template.spec?.initContainers, ownedInitContainerNames);
|
|
925
|
+
const {
|
|
926
|
+
containers: _containers,
|
|
927
|
+
initContainers: _initContainers,
|
|
928
|
+
...restSpec
|
|
929
|
+
} = template.spec ?? {};
|
|
930
|
+
const spec = {
|
|
931
|
+
...restSpec,
|
|
932
|
+
...containers ? { containers } : {},
|
|
933
|
+
...initContainers ? { initContainers } : {}
|
|
934
|
+
};
|
|
935
|
+
return {
|
|
936
|
+
...template,
|
|
937
|
+
spec
|
|
938
|
+
};
|
|
939
|
+
}
|
|
940
|
+
var workloadServiceExtraArgs = [
|
|
941
|
+
...workloadExtraArgs,
|
|
942
|
+
"service",
|
|
943
|
+
"route",
|
|
944
|
+
"routes"
|
|
945
|
+
];
|
|
946
|
+
var genericWorkloadExtraArgs = [
|
|
947
|
+
"defaultType",
|
|
948
|
+
"existing",
|
|
949
|
+
"deployment",
|
|
950
|
+
"statefulSet",
|
|
951
|
+
"job",
|
|
952
|
+
"cronJob"
|
|
953
|
+
];
|
|
954
|
+
function getWorkloadComponents(name, args, parent, opts, isForPatch) {
|
|
955
|
+
const labels = isForPatch ? undefined : { "app.kubernetes.io/name": name };
|
|
956
|
+
const containers = output5(args).apply((args2) => normalize3(args2.container, args2.containers));
|
|
957
|
+
const initContainers = output5(args).apply((args2) => normalize3(args2.initContainer, args2.initContainers));
|
|
958
|
+
const rawVolumes = output5({ containers, initContainers }).apply(({ containers: containers2, initContainers: initContainers2 }) => {
|
|
959
|
+
const containerVolumes = [...containers2, ...initContainers2].flatMap((container) => normalize3(container.volume, container.volumes));
|
|
960
|
+
const containerVolumeMounts = containers2.flatMap((container) => {
|
|
961
|
+
return normalize3(container.volumeMount, container.volumeMounts).map((volumeMount) => {
|
|
962
|
+
return "volume" in volumeMount ? volumeMount.volume : undefined;
|
|
963
|
+
}).filter(Boolean);
|
|
964
|
+
});
|
|
965
|
+
return output5([...containerVolumes, ...containerVolumeMounts]);
|
|
966
|
+
});
|
|
967
|
+
const volumes = rawVolumes.apply((rawVolumes2) => {
|
|
968
|
+
return output5(rawVolumes2.map(mapWorkloadVolume)).apply(uniqueBy2((volume) => volume.name));
|
|
969
|
+
});
|
|
970
|
+
const podSpec = output5({
|
|
971
|
+
cluster: output5(args.namespace).cluster,
|
|
972
|
+
containers,
|
|
973
|
+
initContainers,
|
|
974
|
+
volumes
|
|
975
|
+
}).apply(({ cluster, containers: containers2, initContainers: initContainers2, volumes: volumes2 }) => {
|
|
976
|
+
const spec = {
|
|
977
|
+
volumes: volumes2,
|
|
978
|
+
containers: containers2.map((container, index) => mapContainerToRaw(container, cluster, getFallbackContainerName(name, index))),
|
|
979
|
+
initContainers: initContainers2.map((container, index) => mapContainerToRaw(container, cluster, getFallbackContainerName(`init-${name}`, index))),
|
|
980
|
+
...podSpecDefaults
|
|
981
|
+
};
|
|
982
|
+
if (containers2.some((container) => container.enableTun) && cluster.quirks?.tunDevicePolicy?.type !== "plugin") {
|
|
983
|
+
spec.volumes = output5(spec.volumes).apply((volumes3) => [
|
|
984
|
+
...volumes3 ?? [],
|
|
985
|
+
{
|
|
986
|
+
name: "tun-device",
|
|
987
|
+
hostPath: {
|
|
988
|
+
path: "/dev/net/tun"
|
|
989
|
+
}
|
|
990
|
+
}
|
|
991
|
+
]);
|
|
992
|
+
}
|
|
993
|
+
return spec;
|
|
994
|
+
});
|
|
995
|
+
const dependencyHash = rawVolumes.apply((rawVolumes2) => {
|
|
996
|
+
return output5(rawVolumes2.map(getWorkloadVolumeResourceUuid)).apply(filter(isNonNullish2)).apply(unique()).apply((ids) => sha256(ids.join(",")));
|
|
997
|
+
});
|
|
998
|
+
const podTemplate = output5({ podSpec, dependencyHash }).apply(({ podSpec: podSpec2, dependencyHash: dependencyHash2 }) => {
|
|
999
|
+
return {
|
|
1000
|
+
metadata: {
|
|
1001
|
+
labels,
|
|
1002
|
+
annotations: {
|
|
1003
|
+
[`highstate.io/dependency-hash-${dependencyHash2.slice(32)}`]: "1"
|
|
1004
|
+
}
|
|
1005
|
+
},
|
|
1006
|
+
spec: podSpec2
|
|
1007
|
+
};
|
|
1008
|
+
});
|
|
1009
|
+
const networkPolicy = output5({ containers }).apply(({ containers: containers2 }) => {
|
|
1010
|
+
if (isForPatch) {
|
|
1011
|
+
return output5(undefined);
|
|
1012
|
+
}
|
|
1013
|
+
const allowedEndpoints = containers2.flatMap((container) => container.allowedEndpoints ?? []);
|
|
1014
|
+
if (allowedEndpoints.length === 0 && !args.networkPolicy) {
|
|
1015
|
+
return output5(undefined);
|
|
1016
|
+
}
|
|
1017
|
+
return output5(new NetworkPolicy(name, {
|
|
1018
|
+
namespace: args.namespace,
|
|
1019
|
+
selector: labels,
|
|
1020
|
+
description: `Network policy for "${name}"`,
|
|
1021
|
+
...args.networkPolicy,
|
|
1022
|
+
egressRules: output5(args.networkPolicy?.egressRules).apply((egressRules) => [
|
|
1023
|
+
...egressRules ?? [],
|
|
1024
|
+
...allowedEndpoints.length > 0 ? [{ toEndpoints: allowedEndpoints }] : []
|
|
1025
|
+
])
|
|
1026
|
+
}, { ...opts, parent: parent() }));
|
|
1027
|
+
});
|
|
1028
|
+
return { labels, containers, volumes, podSpec, podTemplate, networkPolicy };
|
|
1029
|
+
}
|
|
1030
|
+
function getWorkloadServiceComponents(name, args, parent, opts, isForPatch) {
|
|
1031
|
+
const { labels, containers, volumes, podSpec, podTemplate, networkPolicy } = getWorkloadComponents(name, args, parent, opts, isForPatch);
|
|
1032
|
+
const service = output5({
|
|
1033
|
+
existing: args.existing,
|
|
1034
|
+
serviceArgs: args.service,
|
|
1035
|
+
containers
|
|
1036
|
+
}).apply(({ existing, serviceArgs, containers: containers2 }) => {
|
|
1037
|
+
if (!args.service && !args.route && !args.routes) {
|
|
1038
|
+
return;
|
|
1039
|
+
}
|
|
1040
|
+
if (existing?.service) {
|
|
1041
|
+
return Service.for(existing.service, output5(args.namespace).cluster);
|
|
1042
|
+
}
|
|
1043
|
+
if (existing) {
|
|
1044
|
+
return;
|
|
1045
|
+
}
|
|
1046
|
+
const ports = containers2.flatMap((container) => normalize3(container.port, container.ports));
|
|
1047
|
+
return Service.create(name, {
|
|
1048
|
+
...serviceArgs,
|
|
1049
|
+
selector: labels,
|
|
1050
|
+
namespace: args.namespace,
|
|
1051
|
+
ports: !serviceArgs?.port && !serviceArgs?.ports ? ports.map(mapContainerPortToServicePort) : serviceArgs?.ports
|
|
1052
|
+
});
|
|
1053
|
+
});
|
|
1054
|
+
const routes = output5({
|
|
1055
|
+
routesArgs: normalizeInputs(args.route, args.routes),
|
|
1056
|
+
service,
|
|
1057
|
+
namespace: output5(args.namespace)
|
|
1058
|
+
}).apply(async ({ routesArgs, service: service2, namespace }) => {
|
|
1059
|
+
if (!routesArgs.length || !service2) {
|
|
1060
|
+
return [];
|
|
1061
|
+
}
|
|
1062
|
+
if (args.existing) {
|
|
1063
|
+
return [];
|
|
1064
|
+
}
|
|
1065
|
+
const serviceEndpoints = await toPromise4(service2.endpoints);
|
|
1066
|
+
const servicePorts = await toPromise4(service2.spec.ports);
|
|
1067
|
+
const resolveServiceEndpoints = async (servicePort, routeName) => {
|
|
1068
|
+
if (serviceEndpoints.length === 0) {
|
|
1069
|
+
throw new Error(`No endpoints found for workload service in route "${routeName}"`);
|
|
1070
|
+
}
|
|
1071
|
+
let resolvedServicePort;
|
|
1072
|
+
if (servicePort != null) {
|
|
1073
|
+
const requestedServicePort = await toPromise4(servicePort);
|
|
1074
|
+
if (typeof requestedServicePort === "string") {
|
|
1075
|
+
const namedPort = servicePorts?.find((port) => port.name === requestedServicePort);
|
|
1076
|
+
if (!namedPort) {
|
|
1077
|
+
throw new Error(`Named port "${requestedServicePort}" not found for workload service in route "${routeName}"`);
|
|
1078
|
+
}
|
|
1079
|
+
resolvedServicePort = namedPort.port;
|
|
1080
|
+
} else {
|
|
1081
|
+
resolvedServicePort = requestedServicePort;
|
|
1082
|
+
}
|
|
1083
|
+
} else {
|
|
1084
|
+
resolvedServicePort = serviceEndpoints[0]?.port;
|
|
1085
|
+
}
|
|
1086
|
+
if (resolvedServicePort == null) {
|
|
1087
|
+
throw new Error(`Unable to resolve service port for workload service in route "${routeName}"`);
|
|
1088
|
+
}
|
|
1089
|
+
const filteredEndpoints = serviceEndpoints.filter((endpoint) => endpoint.port === resolvedServicePort);
|
|
1090
|
+
if (filteredEndpoints.length === 0) {
|
|
1091
|
+
throw new Error(`No endpoints with port ${resolvedServicePort} found for workload service in route "${routeName}"`);
|
|
1092
|
+
}
|
|
1093
|
+
return filteredEndpoints;
|
|
1094
|
+
};
|
|
1095
|
+
return await Promise.all(routesArgs.map(async (routeArgs, index) => {
|
|
1096
|
+
const routeName = `${name}.${index}`;
|
|
1097
|
+
const routeRules = await toPromise4(routeArgs.rules);
|
|
1098
|
+
const routeRuleValues = Object.values(routeRules ?? {});
|
|
1099
|
+
const needsDefaultBackend = routeRuleValues.length === 0 || routeRuleValues.some((rule) => rule.servicePort == null);
|
|
1100
|
+
const defaultServiceEndpoints = needsDefaultBackend ? await resolveServiceEndpoints(routeArgs.servicePort, routeName) : undefined;
|
|
1101
|
+
const resolvedRules = routeRules ? await Promise.all(Object.entries(routeRules).map(async ([ruleName, rule]) => {
|
|
1102
|
+
const ruleServiceEndpoints = await resolveServiceEndpoints(rule.servicePort ?? routeArgs.servicePort, `${routeName}:${ruleName}`);
|
|
1103
|
+
return [
|
|
1104
|
+
ruleName,
|
|
1105
|
+
{
|
|
1106
|
+
...omit3(rule, ["servicePort"]),
|
|
1107
|
+
backend: {
|
|
1108
|
+
endpoints: ruleServiceEndpoints
|
|
1109
|
+
}
|
|
1110
|
+
}
|
|
1111
|
+
];
|
|
1112
|
+
})) : undefined;
|
|
1113
|
+
const resolvedRulesInput = resolvedRules ? Object.fromEntries(resolvedRules) : undefined;
|
|
1114
|
+
return new AccessPointRoute(routeName, {
|
|
1115
|
+
...omit3(routeArgs, ["servicePort", "rules"]),
|
|
1116
|
+
...defaultServiceEndpoints ? {
|
|
1117
|
+
backend: {
|
|
1118
|
+
endpoints: defaultServiceEndpoints
|
|
1119
|
+
}
|
|
1120
|
+
} : {},
|
|
1121
|
+
rules: resolvedRulesInput,
|
|
1122
|
+
metadata: {
|
|
1123
|
+
...routeArgs.metadata ?? {},
|
|
1124
|
+
"k8s.namespace": namespace
|
|
1125
|
+
}
|
|
1126
|
+
});
|
|
1127
|
+
}));
|
|
1128
|
+
});
|
|
1129
|
+
return { labels, containers, volumes, podSpec, podTemplate, networkPolicy, service, routes };
|
|
1130
|
+
}
|
|
1131
|
+
|
|
1132
|
+
class Workload extends NamespacedResource {
|
|
1133
|
+
name;
|
|
1134
|
+
terminalArgs;
|
|
1135
|
+
containers;
|
|
1136
|
+
podTemplate;
|
|
1137
|
+
networkPolicy;
|
|
1138
|
+
_service;
|
|
1139
|
+
routes;
|
|
1140
|
+
constructor(type, name, args, opts, metadata, namespace, terminalArgs, containers, podTemplate, networkPolicy, _service = output5(undefined), routes = output5([])) {
|
|
1141
|
+
super(type, name, args, opts, metadata, namespace);
|
|
1142
|
+
this.name = name;
|
|
1143
|
+
this.terminalArgs = terminalArgs;
|
|
1144
|
+
this.containers = containers;
|
|
1145
|
+
this.podTemplate = podTemplate;
|
|
1146
|
+
this.networkPolicy = networkPolicy;
|
|
1147
|
+
this._service = _service;
|
|
1148
|
+
this.routes = routes;
|
|
1149
|
+
}
|
|
1150
|
+
set terminal(_value) {}
|
|
1151
|
+
set logsTerminal(_value) {}
|
|
1152
|
+
set terminals(_value) {}
|
|
1153
|
+
set optionalService(_value) {}
|
|
1154
|
+
set service(_value) {}
|
|
1155
|
+
set selector(_value) {}
|
|
1156
|
+
get optionalService() {
|
|
1157
|
+
return this._service;
|
|
1158
|
+
}
|
|
1159
|
+
get service() {
|
|
1160
|
+
return this._service.apply((service) => {
|
|
1161
|
+
if (!service) {
|
|
1162
|
+
throw new Error(`The service of the workload "${this.name}" is not available.`);
|
|
1163
|
+
}
|
|
1164
|
+
return service;
|
|
1165
|
+
});
|
|
1166
|
+
}
|
|
1167
|
+
get endpoints() {
|
|
1168
|
+
return this.routes.apply((routes) => output5(routes.map((route) => route.route.endpoints)).apply((endpoints) => flat2(endpoints)).apply(mergeEndpoints));
|
|
1169
|
+
}
|
|
1170
|
+
get selector() {
|
|
1171
|
+
return this.podTemplate.apply((template) => ({
|
|
1172
|
+
matchLabels: template.metadata?.labels
|
|
1173
|
+
}));
|
|
1174
|
+
}
|
|
1175
|
+
get terminal() {
|
|
1176
|
+
const containerName = this.podTemplate.spec.containers.apply((containers) => containers[0].name);
|
|
1177
|
+
const shell = this.terminalArgs.apply((args) => args.shell ?? "bash");
|
|
1178
|
+
const podLabelSelector = this.templateMetadata.apply((meta) => meta.labels ?? {}).apply((labels) => Object.entries(labels).map(([key, value]) => `${key}=${value}`).join(","));
|
|
1179
|
+
return output5({
|
|
1180
|
+
name: this.metadata.name,
|
|
1181
|
+
meta: this.getTerminalMeta(),
|
|
1182
|
+
spec: {
|
|
1183
|
+
image: images_default["terminal-kubectl"].image,
|
|
1184
|
+
command: ["bash", "/welcome.sh"],
|
|
1185
|
+
files: {
|
|
1186
|
+
"/kubeconfig": makeFileOutput({
|
|
1187
|
+
name: "kubeconfig",
|
|
1188
|
+
content: getClusterKubeconfigContent(this.cluster),
|
|
1189
|
+
isSecret: true
|
|
1190
|
+
}),
|
|
1191
|
+
"/welcome.sh": makeFileOutput({
|
|
1192
|
+
name: "welcome.sh",
|
|
1193
|
+
content: interpolate4`
|
|
1194
|
+
#!/bin/bash
|
|
1195
|
+
set -euo pipefail
|
|
1196
|
+
|
|
1197
|
+
NAMESPACE="${this.metadata.namespace}"
|
|
1198
|
+
RESOURCE_TYPE="${this.kind.toLowerCase()}"
|
|
1199
|
+
RESOURCE_NAME="${this.metadata.name}"
|
|
1200
|
+
CONTAINER_NAME="${containerName}"
|
|
1201
|
+
SHELL="${shell}"
|
|
1202
|
+
LABEL_SELECTOR="${podLabelSelector}"
|
|
1203
|
+
|
|
1204
|
+
echo "Connecting to $RESOURCE_TYPE \\"$RESOURCE_NAME\\" in namespace \\"$NAMESPACE\\""
|
|
1205
|
+
|
|
1206
|
+
# get all pods for this workload
|
|
1207
|
+
PODS=$(kubectl get pods -n "$NAMESPACE" -l "$LABEL_SELECTOR" -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "")
|
|
1208
|
+
|
|
1209
|
+
if [ -z "$PODS" ]; then
|
|
1210
|
+
echo "No pods found"
|
|
1211
|
+
exit 1
|
|
1212
|
+
fi
|
|
1213
|
+
|
|
1214
|
+
# convert space-separated string to array
|
|
1215
|
+
read -ra POD_ARRAY <<< "$PODS"
|
|
1216
|
+
|
|
1217
|
+
if [ \${#POD_ARRAY[@]} -eq 1 ]; then
|
|
1218
|
+
# single pod found, connect directly
|
|
1219
|
+
SELECTED_POD="\${POD_ARRAY[0]}"
|
|
1220
|
+
echo "Found single pod: $SELECTED_POD"
|
|
1221
|
+
else
|
|
1222
|
+
# multiple pods found, use fzf for selection
|
|
1223
|
+
echo "Found \${#POD_ARRAY[@]} pods. Please select one."
|
|
1224
|
+
|
|
1225
|
+
SELECTED_POD=$(printf '%s\n' "\${POD_ARRAY[@]}" | fzf --prompt="Select pod: " --height 10 --border --info=inline)
|
|
1226
|
+
|
|
1227
|
+
if [ -z "$SELECTED_POD" ]; then
|
|
1228
|
+
echo "No pod selected"
|
|
1229
|
+
exit 1
|
|
1230
|
+
fi
|
|
1231
|
+
|
|
1232
|
+
echo "Selected pod: $SELECTED_POD"
|
|
1233
|
+
fi
|
|
1234
|
+
|
|
1235
|
+
# execute into the selected pod
|
|
1236
|
+
exec kubectl exec -it -n "$NAMESPACE" "$SELECTED_POD" -c "$CONTAINER_NAME" -- "$SHELL"
|
|
1237
|
+
`.apply(trimIndentation)
|
|
1238
|
+
})
|
|
1239
|
+
},
|
|
1240
|
+
env: {
|
|
1241
|
+
KUBECONFIG: "/kubeconfig"
|
|
1242
|
+
}
|
|
1243
|
+
}
|
|
1244
|
+
});
|
|
1245
|
+
}
|
|
1246
|
+
get logsTerminal() {
|
|
1247
|
+
const containerName = this.podTemplate.spec.containers.apply((containers) => containers[0].name);
|
|
1248
|
+
const podLabelSelector = this.templateMetadata.apply((meta) => meta.labels ?? {}).apply((labels) => Object.entries(labels).map(([key, value]) => `${key}=${value}`).join(","));
|
|
1249
|
+
return output5({
|
|
1250
|
+
name: interpolate4`${this.metadata.name}.logs`,
|
|
1251
|
+
meta: output5(this.getTerminalMeta()).apply((meta) => ({
|
|
1252
|
+
...meta,
|
|
1253
|
+
title: `${meta.title} Logs`,
|
|
1254
|
+
globalTitle: `${meta.globalTitle} | Logs`,
|
|
1255
|
+
description: `The logs of ${meta.title.toLowerCase()}.`
|
|
1256
|
+
})),
|
|
1257
|
+
spec: {
|
|
1258
|
+
image: images_default["terminal-kubectl"].image,
|
|
1259
|
+
command: ["bash", "/welcome.sh"],
|
|
1260
|
+
files: {
|
|
1261
|
+
"/kubeconfig": makeFileOutput({
|
|
1262
|
+
name: "kubeconfig",
|
|
1263
|
+
content: getClusterKubeconfigContent(this.cluster),
|
|
1264
|
+
isSecret: true
|
|
1265
|
+
}),
|
|
1266
|
+
"/welcome.sh": makeFileOutput({
|
|
1267
|
+
name: "welcome.sh",
|
|
1268
|
+
content: interpolate4`
|
|
1269
|
+
#!/bin/bash
|
|
1270
|
+
set -euo pipefail
|
|
1271
|
+
|
|
1272
|
+
NAMESPACE="${this.metadata.namespace}"
|
|
1273
|
+
RESOURCE_TYPE="${this.kind.toLowerCase()}"
|
|
1274
|
+
RESOURCE_NAME="${this.metadata.name}"
|
|
1275
|
+
CONTAINER_NAME="${containerName}"
|
|
1276
|
+
LABEL_SELECTOR="${podLabelSelector}"
|
|
1277
|
+
|
|
1278
|
+
echo "Connecting to logs of $RESOURCE_TYPE \"$RESOURCE_NAME\" in namespace \"$NAMESPACE\""
|
|
1279
|
+
|
|
1280
|
+
# get all pods for this workload
|
|
1281
|
+
PODS=$(kubectl get pods -n "$NAMESPACE" -l "$LABEL_SELECTOR" -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "")
|
|
1282
|
+
|
|
1283
|
+
if [ -z "$PODS" ]; then
|
|
1284
|
+
echo "No pods found"
|
|
1285
|
+
exit 1
|
|
1286
|
+
fi
|
|
1287
|
+
|
|
1288
|
+
# convert space-separated string to array
|
|
1289
|
+
read -ra POD_ARRAY <<< "$PODS"
|
|
1290
|
+
|
|
1291
|
+
if [ \${#POD_ARRAY[@]} -eq 1 ]; then
|
|
1292
|
+
# single pod found, connect directly
|
|
1293
|
+
SELECTED_POD="\${POD_ARRAY[0]}"
|
|
1294
|
+
echo "Found single pod: $SELECTED_POD"
|
|
1295
|
+
else
|
|
1296
|
+
# multiple pods found, use fzf for selection
|
|
1297
|
+
echo "Found \${#POD_ARRAY[@]} pods. Please select one."
|
|
1298
|
+
|
|
1299
|
+
SELECTED_POD=$(printf '%s\n' "\${POD_ARRAY[@]}" | fzf --prompt="Select pod: " --height 10 --border --info=inline)
|
|
1300
|
+
|
|
1301
|
+
if [ -z "$SELECTED_POD" ]; then
|
|
1302
|
+
echo "No pod selected"
|
|
1303
|
+
exit 1
|
|
1304
|
+
fi
|
|
1305
|
+
|
|
1306
|
+
echo "Selected pod: $SELECTED_POD"
|
|
1307
|
+
fi
|
|
1308
|
+
|
|
1309
|
+
# stream logs for the selected pod
|
|
1310
|
+
exec kubectl logs -f -n "$NAMESPACE" "$SELECTED_POD" -c "$CONTAINER_NAME"
|
|
1311
|
+
`.apply(trimIndentation)
|
|
1312
|
+
})
|
|
1313
|
+
},
|
|
1314
|
+
env: {
|
|
1315
|
+
KUBECONFIG: "/kubeconfig"
|
|
1316
|
+
}
|
|
1317
|
+
}
|
|
1318
|
+
});
|
|
1319
|
+
}
|
|
1320
|
+
get terminals() {
|
|
1321
|
+
return [this.logsTerminal, this.terminal];
|
|
1322
|
+
}
|
|
1323
|
+
createTerminal(name, meta, command, spec) {
|
|
1324
|
+
const containerName = this.podTemplate.spec.containers.apply((containers) => containers[0].name);
|
|
1325
|
+
return output5({
|
|
1326
|
+
name,
|
|
1327
|
+
meta: output5(this.getTerminalMeta()).apply((currentMeta) => ({
|
|
1328
|
+
...currentMeta,
|
|
1329
|
+
...meta
|
|
1330
|
+
})),
|
|
1331
|
+
spec: {
|
|
1332
|
+
image: images_default["terminal-kubectl"].image,
|
|
1333
|
+
command: output5(command).apply((command2) => [
|
|
1334
|
+
"exec",
|
|
1335
|
+
"kubectl",
|
|
1336
|
+
"exec",
|
|
1337
|
+
"-it",
|
|
1338
|
+
"-n",
|
|
1339
|
+
this.metadata.namespace,
|
|
1340
|
+
interpolate4`${this.kind.toLowerCase()}/${this.metadata.name}`,
|
|
1341
|
+
"-c",
|
|
1342
|
+
containerName,
|
|
1343
|
+
"--",
|
|
1344
|
+
...command2
|
|
1345
|
+
]),
|
|
1346
|
+
files: {
|
|
1347
|
+
"/kubeconfig": makeFileOutput({
|
|
1348
|
+
name: "kubeconfig",
|
|
1349
|
+
content: getClusterKubeconfigContent(this.cluster),
|
|
1350
|
+
isSecret: true
|
|
1351
|
+
}),
|
|
1352
|
+
...spec?.files
|
|
1353
|
+
},
|
|
1354
|
+
env: {
|
|
1355
|
+
KUBECONFIG: "/kubeconfig",
|
|
1356
|
+
...spec?.env
|
|
1357
|
+
}
|
|
1358
|
+
}
|
|
1359
|
+
});
|
|
1360
|
+
}
|
|
1361
|
+
static createOrPatchGeneric(name, args, opts) {
|
|
1362
|
+
return output5(args).apply(async (args2) => {
|
|
1363
|
+
const baseArgs = omit3(args2, genericWorkloadExtraArgs);
|
|
1364
|
+
if (args2.existing?.kind === "Deployment") {
|
|
1365
|
+
const { Deployment } = await import("./chunk-aame3x1b.js");
|
|
1366
|
+
const deploymentArgs = deepmerge2(baseArgs, args2.deployment ?? {});
|
|
1367
|
+
return Deployment.patch(name, {
|
|
1368
|
+
...deploymentArgs,
|
|
1369
|
+
name: args2.existing.metadata.name,
|
|
1370
|
+
namespace: Namespace.forResourceAsync(args2.existing, output5(args2.namespace).cluster)
|
|
1371
|
+
}, opts);
|
|
1372
|
+
}
|
|
1373
|
+
if (args2.existing?.kind === "StatefulSet") {
|
|
1374
|
+
const { StatefulSet } = await import("./chunk-23vn2rdc.js");
|
|
1375
|
+
const statefulSetArgs = deepmerge2(baseArgs, args2.statefulSet ?? {});
|
|
1376
|
+
return StatefulSet.patch(name, {
|
|
1377
|
+
...statefulSetArgs,
|
|
1378
|
+
name: args2.existing.metadata.name,
|
|
1379
|
+
namespace: Namespace.forResourceAsync(args2.existing, output5(args2.namespace).cluster)
|
|
1380
|
+
}, opts);
|
|
1381
|
+
}
|
|
1382
|
+
if (args2.existing?.kind === "Job") {
|
|
1383
|
+
const { Job } = await import("./chunk-2pfx13ay.js");
|
|
1384
|
+
const jobArgs = deepmerge2(baseArgs, args2.job ?? {});
|
|
1385
|
+
return Job.patch(name, {
|
|
1386
|
+
...jobArgs,
|
|
1387
|
+
name: args2.existing.metadata.name,
|
|
1388
|
+
namespace: Namespace.forResourceAsync(args2.existing, output5(args2.namespace).cluster)
|
|
1389
|
+
}, opts);
|
|
1390
|
+
}
|
|
1391
|
+
if (args2.existing?.kind === "CronJob") {
|
|
1392
|
+
const { CronJob } = await import("./chunk-bmvc9d2d.js");
|
|
1393
|
+
const cronJobArgs = deepmerge2(baseArgs, args2.cronJob ?? {});
|
|
1394
|
+
return CronJob.patch(name, {
|
|
1395
|
+
...cronJobArgs,
|
|
1396
|
+
name: args2.existing.metadata.name,
|
|
1397
|
+
namespace: Namespace.forResourceAsync(args2.existing, output5(args2.namespace).cluster)
|
|
1398
|
+
}, opts);
|
|
1399
|
+
}
|
|
1400
|
+
if (args2.defaultType === "Deployment") {
|
|
1401
|
+
const { Deployment } = await import("./chunk-aame3x1b.js");
|
|
1402
|
+
const deploymentArgs = deepmerge2(baseArgs, args2.deployment ?? {});
|
|
1403
|
+
return Deployment.create(name, deploymentArgs, opts);
|
|
1404
|
+
}
|
|
1405
|
+
if (args2.defaultType === "StatefulSet") {
|
|
1406
|
+
const { StatefulSet } = await import("./chunk-23vn2rdc.js");
|
|
1407
|
+
const statefulSetArgs = deepmerge2(baseArgs, args2.statefulSet ?? {});
|
|
1408
|
+
return StatefulSet.create(name, statefulSetArgs, opts);
|
|
1409
|
+
}
|
|
1410
|
+
if (args2.defaultType === "Job") {
|
|
1411
|
+
const { Job } = await import("./chunk-2pfx13ay.js");
|
|
1412
|
+
const jobArgs = deepmerge2(baseArgs, args2.job ?? {});
|
|
1413
|
+
return Job.create(name, jobArgs, opts);
|
|
1414
|
+
}
|
|
1415
|
+
if (args2.defaultType === "CronJob") {
|
|
1416
|
+
const { CronJob } = await import("./chunk-bmvc9d2d.js");
|
|
1417
|
+
const cronJobArgs = deepmerge2(baseArgs, args2.cronJob ?? {});
|
|
1418
|
+
return CronJob.create(name, cronJobArgs, opts);
|
|
1419
|
+
}
|
|
1420
|
+
throw new Error(`Unknown workload type: ${args2.defaultType}`);
|
|
1421
|
+
});
|
|
1422
|
+
}
|
|
1423
|
+
}
|
|
1424
|
+
|
|
1425
|
+
export { ConfigMap, PersistentVolumeClaim, getAutoVolumeName, getFallbackContainerName, mapContainerToRaw, mapContainerEnvironment, mapVolumeMount, mapEnvironmentSource, mapWorkloadVolume, getWorkloadVolumeResourceUuid, getBestEndpoint, requireBestEndpoint, networkPolicyMediator, NetworkPolicy, NativeNetworkPolicy, podSpecDefaults, workloadExtraArgs, filterPatchOwnedContainersInTemplate, workloadServiceExtraArgs, getWorkloadComponents, getWorkloadServiceComponents, Workload };
|