@highstate/k8s 0.9.16 → 0.9.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-2EEHJZPD.js +13 -0
- package/dist/chunk-2EEHJZPD.js.map +1 -0
- package/dist/{chunk-OFFSHGC6.js → chunk-4JGXGN2L.js} +66 -48
- package/dist/chunk-4JGXGN2L.js.map +1 -0
- package/dist/chunk-A3XGSDIW.js +306 -0
- package/dist/chunk-A3XGSDIW.js.map +1 -0
- package/dist/chunk-IMTXUK2U.js +244 -0
- package/dist/chunk-IMTXUK2U.js.map +1 -0
- package/dist/chunk-JYNXQ3I3.js +287 -0
- package/dist/chunk-JYNXQ3I3.js.map +1 -0
- package/dist/{chunk-5C2BJGES.js → chunk-KDD6XUWM.js} +30 -23
- package/dist/chunk-KDD6XUWM.js.map +1 -0
- package/dist/chunk-NOFJC3EM.js +236 -0
- package/dist/chunk-NOFJC3EM.js.map +1 -0
- package/dist/chunk-NXSYCA3V.js +337 -0
- package/dist/chunk-NXSYCA3V.js.map +1 -0
- package/dist/chunk-SBC3TUIN.js +1513 -0
- package/dist/chunk-SBC3TUIN.js.map +1 -0
- package/dist/chunk-SI7X6N46.js +338 -0
- package/dist/chunk-SI7X6N46.js.map +1 -0
- package/dist/chunk-WGMJCZSK.js +360 -0
- package/dist/chunk-WGMJCZSK.js.map +1 -0
- package/dist/deployment-752P6JIT.js +8 -0
- package/dist/{deployment-XK3CDJOE.js.map → deployment-752P6JIT.js.map} +1 -1
- package/dist/highstate.manifest.json +8 -7
- package/dist/impl/gateway-route.js +123 -0
- package/dist/impl/gateway-route.js.map +1 -0
- package/dist/impl/tls-certificate.js +32 -0
- package/dist/impl/tls-certificate.js.map +1 -0
- package/dist/index.js +736 -208
- package/dist/index.js.map +1 -1
- package/dist/stateful-set-N64YVKR7.js +8 -0
- package/dist/{stateful-set-7CAQWTV2.js.map → stateful-set-N64YVKR7.js.map} +1 -1
- package/dist/units/cert-manager/index.js +11 -10
- package/dist/units/cert-manager/index.js.map +1 -1
- package/dist/units/cluster-dns/index.js.map +1 -1
- package/dist/units/cluster-patch/index.js.map +1 -1
- package/dist/units/dns01-issuer/index.js +27 -23
- package/dist/units/dns01-issuer/index.js.map +1 -1
- package/dist/units/existing-cluster/index.js +11 -8
- package/dist/units/existing-cluster/index.js.map +1 -1
- package/dist/units/gateway-api/index.js +2 -2
- package/dist/units/gateway-api/index.js.map +1 -1
- package/package.json +40 -14
- package/src/cluster.ts +30 -22
- package/src/config-map.ts +195 -57
- package/src/container.ts +5 -5
- package/src/cron-job.ts +403 -31
- package/src/deployment.ts +260 -120
- package/src/dns01-solver.ts +10 -0
- package/src/gateway/backend.ts +2 -2
- package/src/gateway/gateway.ts +383 -0
- package/src/gateway/http-route.ts +17 -24
- package/src/gateway/index.ts +1 -0
- package/src/helm.ts +83 -53
- package/src/impl/gateway-route.ts +155 -0
- package/src/impl/tls-certificate.ts +33 -0
- package/src/index.ts +22 -67
- package/src/job.ts +393 -28
- package/src/namespace.ts +236 -99
- package/src/network-policy.ts +216 -165
- package/src/network.ts +2 -2
- package/src/pvc.ts +266 -65
- package/src/rbac.ts +218 -0
- package/src/scripting/bundle.ts +9 -20
- package/src/scripting/container.ts +1 -1
- package/src/scripting/environment.ts +5 -5
- package/src/secret.ts +200 -62
- package/src/service.ts +288 -158
- package/src/shared.ts +94 -67
- package/src/stateful-set.ts +270 -117
- package/src/tls.ts +344 -0
- package/src/units/cert-manager/index.ts +2 -3
- package/src/units/dns01-issuer/index.ts +30 -14
- package/src/units/existing-cluster/index.ts +10 -7
- package/src/units/gateway-api/index.ts +2 -2
- package/src/worker.ts +26 -0
- package/src/workload.ts +275 -171
- package/dist/chunk-5C2BJGES.js.map +0 -1
- package/dist/chunk-5TLC5BXR.js +0 -256
- package/dist/chunk-5TLC5BXR.js.map +0 -1
- package/dist/chunk-BBIY3KUN.js +0 -1557
- package/dist/chunk-BBIY3KUN.js.map +0 -1
- package/dist/chunk-OFFSHGC6.js.map +0 -1
- package/dist/chunk-TZHOUJRC.js +0 -202
- package/dist/chunk-TZHOUJRC.js.map +0 -1
- package/dist/chunk-YWRJ4EZM.js +0 -192
- package/dist/chunk-YWRJ4EZM.js.map +0 -1
- package/dist/deployment-XK3CDJOE.js +0 -6
- package/dist/stateful-set-7CAQWTV2.js +0 -6
- package/dist/units/access-point/index.js +0 -21
- package/dist/units/access-point/index.js.map +0 -1
- package/src/access-point.ts +0 -191
- package/src/units/access-point/index.ts +0 -19
- package/src/units/dns01-issuer/solver.ts +0 -23
@@ -0,0 +1,1513 @@
|
|
1
|
+
import { isEndpointFromCluster, mapServiceToLabelSelector, Service, mapContainerPortToServicePort } from './chunk-SI7X6N46.js';
|
2
|
+
import { Secret } from './chunk-IMTXUK2U.js';
|
3
|
+
import { commonExtraArgs, ScopedResource, Namespace, mapMetadata, getProvider, mapSelectorLikeToSelector, getProviderAsync, mapNamespaceNameToSelector, getNamespaceName, images_exports } from './chunk-WGMJCZSK.js';
|
4
|
+
import { z, getOrCreate, trimIndentation } from '@highstate/contract';
|
5
|
+
import { ComponentResource, toPromise, output as output$1, interpolate as interpolate$1, normalize, normalizeInputs, fileFromString } from '@highstate/pulumi';
|
6
|
+
import { core, networking } from '@pulumi/kubernetes';
|
7
|
+
import { output, interpolate } from '@pulumi/pulumi';
|
8
|
+
import { deepmerge } from 'deepmerge-ts';
|
9
|
+
import { omit, concat, map, groupBy, mergeDeep, uniqueBy, flat, merge, filter, isNonNullish, unique } from 'remeda';
|
10
|
+
import { ImplementationMediator, filterEndpoints, parseL34Endpoint, l34EndpointToString, l3EndpointToCidr, AccessPointRoute } from '@highstate/common';
|
11
|
+
import { sha256 } from 'crypto-hash';
|
12
|
+
|
13
|
+
var ConfigMap = class _ConfigMap extends ScopedResource {
|
14
|
+
constructor(type, name, args, opts, apiVersion, kind, namespace, metadata, data) {
|
15
|
+
super(type, name, args, opts, apiVersion, kind, namespace, metadata);
|
16
|
+
this.data = data;
|
17
|
+
}
|
18
|
+
/**
|
19
|
+
* The Highstate config map entity.
|
20
|
+
*/
|
21
|
+
get entity() {
|
22
|
+
return output({
|
23
|
+
type: "config-map",
|
24
|
+
clusterId: this.cluster.id,
|
25
|
+
clusterName: this.cluster.name,
|
26
|
+
metadata: this.metadata
|
27
|
+
});
|
28
|
+
}
|
29
|
+
/**
|
30
|
+
* Creates a new config map.
|
31
|
+
*/
|
32
|
+
static create(name, args, opts) {
|
33
|
+
return new CreatedConfigMap(name, args, opts);
|
34
|
+
}
|
35
|
+
/**
|
36
|
+
* Creates a new config map or patches an existing one.
|
37
|
+
*
|
38
|
+
* @param name The name of the resource. May not be the same as the config map name.
|
39
|
+
* @param args The arguments to create or patch the config map with.
|
40
|
+
* @param opts Optional resource options.
|
41
|
+
*/
|
42
|
+
static createOrPatch(name, args, opts) {
|
43
|
+
if (args.existing) {
|
44
|
+
return new ConfigMapPatch(name, {
|
45
|
+
...args,
|
46
|
+
name: output(args.existing).metadata.name
|
47
|
+
});
|
48
|
+
}
|
49
|
+
return new CreatedConfigMap(name, args, opts);
|
50
|
+
}
|
51
|
+
/**
|
52
|
+
* Creates a new config map or gets an existing one.
|
53
|
+
*
|
54
|
+
* @param name The name of the resource. May not be the same as the config map name. Will not be used when existing config map is retrieved.
|
55
|
+
* @param args The arguments to create or get the config map with.
|
56
|
+
* @param opts Optional resource options.
|
57
|
+
*/
|
58
|
+
static async createOrGet(name, args, opts) {
|
59
|
+
if (args.existing) {
|
60
|
+
return await _ConfigMap.forAsync(args.existing, output(args.namespace).cluster);
|
61
|
+
}
|
62
|
+
return new CreatedConfigMap(name, args, opts);
|
63
|
+
}
|
64
|
+
/**
|
65
|
+
* Patches an existing config map.
|
66
|
+
*
|
67
|
+
* Will throw an error if the config map does not exist.
|
68
|
+
*
|
69
|
+
* @param name The name of the resource. May not be the same as the config map name.
|
70
|
+
* @param args The arguments to patch the config map with.
|
71
|
+
* @param opts Optional resource options.
|
72
|
+
*/
|
73
|
+
static patch(name, args, opts) {
|
74
|
+
return new ConfigMapPatch(name, args, opts);
|
75
|
+
}
|
76
|
+
/**
|
77
|
+
* Wraps an existing Kubernetes config map.
|
78
|
+
*/
|
79
|
+
static wrap(name, args, opts) {
|
80
|
+
return new WrappedConfigMap(name, args, opts);
|
81
|
+
}
|
82
|
+
/**
|
83
|
+
* Gets an existing config map.
|
84
|
+
*
|
85
|
+
* Will throw an error if the config map does not exist.
|
86
|
+
*/
|
87
|
+
static get(name, args, opts) {
|
88
|
+
return new ExternalConfigMap(name, args, opts);
|
89
|
+
}
|
90
|
+
static configMapCache = /* @__PURE__ */ new Map();
|
91
|
+
/**
|
92
|
+
* Gets an existing config map for a given entity.
|
93
|
+
* Prefer this method over `get` when possible.
|
94
|
+
*
|
95
|
+
* It automatically names the resource with the following format: `{clusterName}.{namespace}.{name}.{clusterId}`.
|
96
|
+
*
|
97
|
+
* This method is idempotent and will return the same instance for the same entity.
|
98
|
+
*
|
99
|
+
* @param entity The entity to get the config map for.
|
100
|
+
* @param cluster The cluster where the config map is located.
|
101
|
+
*/
|
102
|
+
static for(entity, cluster) {
|
103
|
+
return getOrCreate(
|
104
|
+
_ConfigMap.configMapCache,
|
105
|
+
`${entity.clusterName}.${entity.metadata.namespace}.${entity.metadata.name}.${entity.clusterId}`,
|
106
|
+
(name) => {
|
107
|
+
return _ConfigMap.get(name, {
|
108
|
+
name: entity.metadata.name,
|
109
|
+
namespace: Namespace.forResource(entity, cluster)
|
110
|
+
});
|
111
|
+
}
|
112
|
+
);
|
113
|
+
}
|
114
|
+
/**
|
115
|
+
* Gets an existing config map for a given entity.
|
116
|
+
* Prefer this method over `get` when possible.
|
117
|
+
*
|
118
|
+
* It automatically names the resource with the following format: `{clusterName}.{namespace}.{name}.{clusterId}`.
|
119
|
+
*
|
120
|
+
* This method is idempotent and will return the same instance for the same entity.
|
121
|
+
*
|
122
|
+
* @param entity The entity to get the config map for.
|
123
|
+
* @param cluster The cluster where the config map is located.
|
124
|
+
*/
|
125
|
+
static async forAsync(entity, cluster) {
|
126
|
+
const resolvedEntity = await toPromise(entity);
|
127
|
+
return _ConfigMap.for(resolvedEntity, cluster);
|
128
|
+
}
|
129
|
+
};
|
130
|
+
var CreatedConfigMap = class extends ConfigMap {
|
131
|
+
constructor(name, args, opts) {
|
132
|
+
const configMap = output(args.namespace).cluster.apply((cluster) => {
|
133
|
+
return new core.v1.ConfigMap(
|
134
|
+
name,
|
135
|
+
{
|
136
|
+
metadata: mapMetadata(args, name),
|
137
|
+
data: args.data
|
138
|
+
},
|
139
|
+
{
|
140
|
+
...opts,
|
141
|
+
parent: this,
|
142
|
+
provider: getProvider(cluster)
|
143
|
+
}
|
144
|
+
);
|
145
|
+
});
|
146
|
+
super(
|
147
|
+
"highstate:k8s:ConfigMap",
|
148
|
+
name,
|
149
|
+
args,
|
150
|
+
opts,
|
151
|
+
configMap.apiVersion,
|
152
|
+
configMap.kind,
|
153
|
+
output(args.namespace),
|
154
|
+
configMap.metadata,
|
155
|
+
configMap.data
|
156
|
+
);
|
157
|
+
}
|
158
|
+
};
|
159
|
+
var ConfigMapPatch = class extends ConfigMap {
|
160
|
+
constructor(name, args, opts) {
|
161
|
+
const configMap = output(args.namespace).cluster.apply((cluster) => {
|
162
|
+
return new core.v1.ConfigMapPatch(
|
163
|
+
name,
|
164
|
+
{
|
165
|
+
metadata: mapMetadata(args, name),
|
166
|
+
data: args.data
|
167
|
+
},
|
168
|
+
{
|
169
|
+
...opts,
|
170
|
+
parent: this,
|
171
|
+
provider: getProvider(cluster)
|
172
|
+
}
|
173
|
+
);
|
174
|
+
});
|
175
|
+
super(
|
176
|
+
"highstate:k8s:ConfigMapPatch",
|
177
|
+
name,
|
178
|
+
args,
|
179
|
+
opts,
|
180
|
+
configMap.apiVersion,
|
181
|
+
configMap.kind,
|
182
|
+
output(args.namespace),
|
183
|
+
configMap.metadata,
|
184
|
+
configMap.data
|
185
|
+
);
|
186
|
+
}
|
187
|
+
};
|
188
|
+
var WrappedConfigMap = class extends ConfigMap {
|
189
|
+
constructor(name, args, opts) {
|
190
|
+
super(
|
191
|
+
"highstate:k8s:WrappedConfigMap",
|
192
|
+
name,
|
193
|
+
args,
|
194
|
+
opts,
|
195
|
+
output(args.configMap).apiVersion,
|
196
|
+
output(args.configMap).kind,
|
197
|
+
output(args.namespace),
|
198
|
+
output(args.configMap).metadata,
|
199
|
+
output(args.configMap).data
|
200
|
+
);
|
201
|
+
}
|
202
|
+
};
|
203
|
+
var ExternalConfigMap = class extends ConfigMap {
|
204
|
+
constructor(name, args, opts) {
|
205
|
+
const configMap = output(args.namespace).cluster.apply((cluster) => {
|
206
|
+
return core.v1.ConfigMap.get(
|
207
|
+
name,
|
208
|
+
interpolate`${output(args.namespace).metadata.name}/${args.name}`,
|
209
|
+
{ ...opts, parent: this, provider: getProvider(cluster) }
|
210
|
+
);
|
211
|
+
});
|
212
|
+
super(
|
213
|
+
"highstate:k8s:ExternalConfigMap",
|
214
|
+
name,
|
215
|
+
args,
|
216
|
+
opts,
|
217
|
+
configMap.apiVersion,
|
218
|
+
configMap.kind,
|
219
|
+
output(args.namespace),
|
220
|
+
configMap.metadata,
|
221
|
+
configMap.data
|
222
|
+
);
|
223
|
+
}
|
224
|
+
};
|
225
|
+
var extraPersistentVolumeClaimArgs = [...commonExtraArgs, "size"];
|
226
|
+
var PersistentVolumeClaim = class _PersistentVolumeClaim extends ScopedResource {
|
227
|
+
constructor(type, name, args, opts, apiVersion, kind, namespace, metadata, spec, status) {
|
228
|
+
super(type, name, args, opts, apiVersion, kind, namespace, metadata);
|
229
|
+
this.spec = spec;
|
230
|
+
this.status = status;
|
231
|
+
}
|
232
|
+
/**
|
233
|
+
* The Highstate PVC entity.
|
234
|
+
*/
|
235
|
+
get entity() {
|
236
|
+
return output$1({
|
237
|
+
type: "persistent-volume-claim",
|
238
|
+
clusterId: this.cluster.id,
|
239
|
+
clusterName: this.cluster.name,
|
240
|
+
metadata: this.metadata
|
241
|
+
});
|
242
|
+
}
|
243
|
+
/**
|
244
|
+
* Creates a new PVC.
|
245
|
+
*/
|
246
|
+
static create(name, args, opts) {
|
247
|
+
return new CreatedPersistentVolumeClaim(name, args, opts);
|
248
|
+
}
|
249
|
+
/**
|
250
|
+
* Creates a new PVC or patches an existing one.
|
251
|
+
*
|
252
|
+
* @param name The name of the resource. May not be the same as the PVC name.
|
253
|
+
* @param args The arguments to create or patch the PVC with.
|
254
|
+
* @param opts Optional resource options.
|
255
|
+
*/
|
256
|
+
static createOrPatch(name, args, opts) {
|
257
|
+
if (args.existing) {
|
258
|
+
return new PersistentVolumeClaimPatch(name, {
|
259
|
+
...args,
|
260
|
+
name: output$1(args.existing).metadata.name
|
261
|
+
});
|
262
|
+
}
|
263
|
+
return new CreatedPersistentVolumeClaim(name, args, opts);
|
264
|
+
}
|
265
|
+
/**
|
266
|
+
* Creates a new PVC or gets an existing one.
|
267
|
+
*
|
268
|
+
* @param name The name of the resource. May not be the same as the PVC name. Will not be used when existing PVC is retrieved.
|
269
|
+
* @param args The arguments to create or get the PVC with.
|
270
|
+
* @param opts Optional resource options.
|
271
|
+
*/
|
272
|
+
static async createOrGet(name, args, opts) {
|
273
|
+
if (args.existing) {
|
274
|
+
return await _PersistentVolumeClaim.forAsync(args.existing, output$1(args.namespace).cluster);
|
275
|
+
}
|
276
|
+
return new CreatedPersistentVolumeClaim(name, args, opts);
|
277
|
+
}
|
278
|
+
/**
|
279
|
+
* Patches an existing PVC.
|
280
|
+
*
|
281
|
+
* Will throw an error if the PVC does not exist.
|
282
|
+
*
|
283
|
+
* @param name The name of the resource. May not be the same as the PVC name.
|
284
|
+
* @param args The arguments to patch the PVC with.
|
285
|
+
* @param opts Optional resource options.
|
286
|
+
*/
|
287
|
+
static patch(name, args, opts) {
|
288
|
+
return new PersistentVolumeClaimPatch(name, args, opts);
|
289
|
+
}
|
290
|
+
/**
|
291
|
+
* Wraps an existing Kubernetes PVC.
|
292
|
+
*/
|
293
|
+
static wrap(name, args, opts) {
|
294
|
+
return new WrappedPersistentVolumeClaim(name, args, opts);
|
295
|
+
}
|
296
|
+
/**
|
297
|
+
* Gets an existing PVC.
|
298
|
+
*
|
299
|
+
* Will throw an error if the PVC does not exist.
|
300
|
+
*/
|
301
|
+
static get(name, args, opts) {
|
302
|
+
return new ExternalPersistentVolumeClaim(name, args, opts);
|
303
|
+
}
|
304
|
+
static pvcCache = /* @__PURE__ */ new Map();
|
305
|
+
/**
|
306
|
+
* Gets an existing PVC for a given entity.
|
307
|
+
* Prefer this method over `get` when possible.
|
308
|
+
*
|
309
|
+
* It automatically names the resource with the following format: `{clusterName}.{namespace}.{name}.{clusterId}`.
|
310
|
+
*
|
311
|
+
* This method is idempotent and will return the same instance for the same entity.
|
312
|
+
*
|
313
|
+
* @param entity The entity to get the PVC for.
|
314
|
+
* @param cluster The cluster where the PVC is located.
|
315
|
+
*/
|
316
|
+
static for(entity, cluster) {
|
317
|
+
return getOrCreate(
|
318
|
+
_PersistentVolumeClaim.pvcCache,
|
319
|
+
`${entity.clusterName}.${entity.metadata.namespace}.${entity.metadata.name}.${entity.clusterId}`,
|
320
|
+
(name) => {
|
321
|
+
return _PersistentVolumeClaim.get(name, {
|
322
|
+
name: entity.metadata.name,
|
323
|
+
namespace: Namespace.forResource(entity, cluster)
|
324
|
+
});
|
325
|
+
}
|
326
|
+
);
|
327
|
+
}
|
328
|
+
/**
|
329
|
+
* Gets an existing PVC for a given entity.
|
330
|
+
* Prefer this method over `get` when possible.
|
331
|
+
*
|
332
|
+
* It automatically names the resource with the following format: `{clusterName}.{namespace}.{name}.{clusterId}`.
|
333
|
+
*
|
334
|
+
* This method is idempotent and will return the same instance for the same entity.
|
335
|
+
*
|
336
|
+
* @param entity The entity to get the PVC for.
|
337
|
+
* @param cluster The cluster where the PVC is located.
|
338
|
+
*/
|
339
|
+
static async forAsync(entity, cluster) {
|
340
|
+
const resolvedEntity = await toPromise(entity);
|
341
|
+
return _PersistentVolumeClaim.for(resolvedEntity, cluster);
|
342
|
+
}
|
343
|
+
};
|
344
|
+
var CreatedPersistentVolumeClaim = class extends PersistentVolumeClaim {
|
345
|
+
constructor(name, args, opts) {
|
346
|
+
const pvc = output$1(args.namespace).cluster.apply((cluster) => {
|
347
|
+
return new core.v1.PersistentVolumeClaim(
|
348
|
+
name,
|
349
|
+
{
|
350
|
+
metadata: mapMetadata(args, name),
|
351
|
+
spec: output$1(args).apply((args2) => {
|
352
|
+
return deepmerge(
|
353
|
+
{
|
354
|
+
accessModes: ["ReadWriteOnce"],
|
355
|
+
resources: {
|
356
|
+
requests: {
|
357
|
+
storage: args2.size ?? "100Mi"
|
358
|
+
}
|
359
|
+
}
|
360
|
+
},
|
361
|
+
omit(args2, extraPersistentVolumeClaimArgs)
|
362
|
+
);
|
363
|
+
})
|
364
|
+
},
|
365
|
+
{
|
366
|
+
...opts,
|
367
|
+
parent: this,
|
368
|
+
provider: getProvider(cluster)
|
369
|
+
}
|
370
|
+
);
|
371
|
+
});
|
372
|
+
super(
|
373
|
+
"highstate:k8s:PersistentVolumeClaim",
|
374
|
+
name,
|
375
|
+
args,
|
376
|
+
opts,
|
377
|
+
pvc.apiVersion,
|
378
|
+
pvc.kind,
|
379
|
+
output$1(args.namespace),
|
380
|
+
pvc.metadata,
|
381
|
+
pvc.spec,
|
382
|
+
pvc.status
|
383
|
+
);
|
384
|
+
}
|
385
|
+
};
|
386
|
+
var PersistentVolumeClaimPatch = class extends PersistentVolumeClaim {
|
387
|
+
constructor(name, args, opts) {
|
388
|
+
const pvc = output$1(args.namespace).cluster.apply((cluster) => {
|
389
|
+
return new core.v1.PersistentVolumeClaimPatch(
|
390
|
+
name,
|
391
|
+
{
|
392
|
+
metadata: mapMetadata(args, name),
|
393
|
+
spec: output$1(args).apply((args2) => {
|
394
|
+
return deepmerge(
|
395
|
+
{
|
396
|
+
accessModes: ["ReadWriteOnce"],
|
397
|
+
resources: {
|
398
|
+
requests: {
|
399
|
+
storage: args2.size ?? "100Mi"
|
400
|
+
}
|
401
|
+
}
|
402
|
+
},
|
403
|
+
omit(args2, extraPersistentVolumeClaimArgs)
|
404
|
+
);
|
405
|
+
})
|
406
|
+
},
|
407
|
+
{
|
408
|
+
...opts,
|
409
|
+
parent: this,
|
410
|
+
provider: getProvider(cluster)
|
411
|
+
}
|
412
|
+
);
|
413
|
+
});
|
414
|
+
super(
|
415
|
+
"highstate:k8s:PersistentVolumeClaimPatch",
|
416
|
+
name,
|
417
|
+
args,
|
418
|
+
opts,
|
419
|
+
pvc.apiVersion,
|
420
|
+
pvc.kind,
|
421
|
+
output$1(args.namespace),
|
422
|
+
pvc.metadata,
|
423
|
+
pvc.spec,
|
424
|
+
pvc.status
|
425
|
+
);
|
426
|
+
}
|
427
|
+
};
|
428
|
+
var WrappedPersistentVolumeClaim = class extends PersistentVolumeClaim {
|
429
|
+
constructor(name, args, opts) {
|
430
|
+
super(
|
431
|
+
"highstate:k8s:WrappedPersistentVolumeClaim",
|
432
|
+
name,
|
433
|
+
args,
|
434
|
+
opts,
|
435
|
+
output$1(args.pvc).apiVersion,
|
436
|
+
output$1(args.pvc).kind,
|
437
|
+
output$1(args.namespace),
|
438
|
+
output$1(args.pvc).metadata,
|
439
|
+
output$1(args.pvc).spec,
|
440
|
+
output$1(args.pvc).status
|
441
|
+
);
|
442
|
+
}
|
443
|
+
};
|
444
|
+
var ExternalPersistentVolumeClaim = class extends PersistentVolumeClaim {
|
445
|
+
constructor(name, args, opts) {
|
446
|
+
const pvc = output$1(args.namespace).cluster.apply((cluster) => {
|
447
|
+
return core.v1.PersistentVolumeClaim.get(
|
448
|
+
name,
|
449
|
+
interpolate$1`${output$1(args.namespace).metadata.name}/${args.name}`,
|
450
|
+
{ ...opts, parent: this, provider: getProvider(cluster) }
|
451
|
+
);
|
452
|
+
});
|
453
|
+
super(
|
454
|
+
"highstate:k8s:ExternalPersistentVolumeClaim",
|
455
|
+
name,
|
456
|
+
args,
|
457
|
+
opts,
|
458
|
+
pvc.apiVersion,
|
459
|
+
pvc.kind,
|
460
|
+
output$1(args.namespace),
|
461
|
+
pvc.metadata,
|
462
|
+
pvc.spec,
|
463
|
+
pvc.status
|
464
|
+
);
|
465
|
+
}
|
466
|
+
};
|
467
|
+
function getAutoVolumeName(workloadName, index) {
|
468
|
+
if (index === 0) {
|
469
|
+
return `${workloadName}-data`;
|
470
|
+
}
|
471
|
+
return `${workloadName}-data-${index}`;
|
472
|
+
}
|
473
|
+
var containerExtraArgs = [
|
474
|
+
"port",
|
475
|
+
"volumeMount",
|
476
|
+
"volume",
|
477
|
+
"environment",
|
478
|
+
"environmentSource",
|
479
|
+
"environmentSources"
|
480
|
+
];
|
481
|
+
function mapContainerToRaw(container, cluster, fallbackName) {
|
482
|
+
const containerName = container.name ?? fallbackName;
|
483
|
+
const spec = {
|
484
|
+
...omit(container, containerExtraArgs),
|
485
|
+
name: containerName,
|
486
|
+
ports: normalize(container.port, container.ports),
|
487
|
+
volumeMounts: map(normalize(container.volumeMount, container.volumeMounts), mapVolumeMount),
|
488
|
+
env: concat(
|
489
|
+
container.environment ? mapContainerEnvironment(container.environment) : [],
|
490
|
+
container.env ?? []
|
491
|
+
),
|
492
|
+
envFrom: concat(
|
493
|
+
map(
|
494
|
+
normalize(container.environmentSource, container.environmentSources),
|
495
|
+
mapEnvironmentSource
|
496
|
+
),
|
497
|
+
container.envFrom ?? []
|
498
|
+
)
|
499
|
+
};
|
500
|
+
if (container.enableTun) {
|
501
|
+
spec.securityContext ??= {};
|
502
|
+
spec.securityContext.capabilities ??= {};
|
503
|
+
spec.securityContext.capabilities.add = ["NET_ADMIN"];
|
504
|
+
if (cluster.quirks?.tunDevicePolicy?.type === "plugin") {
|
505
|
+
spec.resources ??= {};
|
506
|
+
spec.resources.limits ??= {};
|
507
|
+
spec.resources.limits[cluster.quirks.tunDevicePolicy.resourceName] = cluster.quirks.tunDevicePolicy.resourceValue;
|
508
|
+
} else {
|
509
|
+
spec.volumeMounts ??= [];
|
510
|
+
spec.volumeMounts.push({
|
511
|
+
name: "tun-device",
|
512
|
+
mountPath: "/dev/net/tun",
|
513
|
+
readOnly: false
|
514
|
+
});
|
515
|
+
}
|
516
|
+
}
|
517
|
+
return spec;
|
518
|
+
}
|
519
|
+
function mapContainerEnvironment(environment) {
|
520
|
+
const envVars = [];
|
521
|
+
for (const [name, value] of Object.entries(environment)) {
|
522
|
+
if (!value) {
|
523
|
+
continue;
|
524
|
+
}
|
525
|
+
if (typeof value === "string") {
|
526
|
+
envVars.push({ name, value });
|
527
|
+
continue;
|
528
|
+
}
|
529
|
+
if ("secret" in value) {
|
530
|
+
envVars.push({
|
531
|
+
name,
|
532
|
+
valueFrom: {
|
533
|
+
secretKeyRef: {
|
534
|
+
name: value.secret.metadata.name,
|
535
|
+
key: value.key
|
536
|
+
}
|
537
|
+
}
|
538
|
+
});
|
539
|
+
continue;
|
540
|
+
}
|
541
|
+
if ("configMap" in value) {
|
542
|
+
envVars.push({
|
543
|
+
name,
|
544
|
+
valueFrom: {
|
545
|
+
configMapKeyRef: {
|
546
|
+
name: value.configMap.metadata.name,
|
547
|
+
key: value.key
|
548
|
+
}
|
549
|
+
}
|
550
|
+
});
|
551
|
+
continue;
|
552
|
+
}
|
553
|
+
envVars.push({ name, valueFrom: value });
|
554
|
+
}
|
555
|
+
return envVars;
|
556
|
+
}
|
557
|
+
function mapVolumeMount(volumeMount) {
|
558
|
+
if ("volume" in volumeMount) {
|
559
|
+
return omit(
|
560
|
+
{
|
561
|
+
...volumeMount,
|
562
|
+
name: output$1(volumeMount.volume).apply(mapWorkloadVolume).apply((volume) => output$1(volume.name))
|
563
|
+
},
|
564
|
+
["volume"]
|
565
|
+
);
|
566
|
+
}
|
567
|
+
return {
|
568
|
+
...volumeMount,
|
569
|
+
name: volumeMount.name
|
570
|
+
};
|
571
|
+
}
|
572
|
+
function mapEnvironmentSource(envFrom) {
|
573
|
+
if (envFrom instanceof core.v1.ConfigMap) {
|
574
|
+
return {
|
575
|
+
configMapRef: {
|
576
|
+
name: envFrom.metadata.name
|
577
|
+
}
|
578
|
+
};
|
579
|
+
}
|
580
|
+
if (envFrom instanceof core.v1.Secret) {
|
581
|
+
return {
|
582
|
+
secretRef: {
|
583
|
+
name: envFrom.metadata.name
|
584
|
+
}
|
585
|
+
};
|
586
|
+
}
|
587
|
+
return envFrom;
|
588
|
+
}
|
589
|
+
function mapWorkloadVolume(volume) {
|
590
|
+
if (volume instanceof PersistentVolumeClaim) {
|
591
|
+
return {
|
592
|
+
name: volume.metadata.name,
|
593
|
+
persistentVolumeClaim: {
|
594
|
+
claimName: volume.metadata.name
|
595
|
+
}
|
596
|
+
};
|
597
|
+
}
|
598
|
+
if (volume instanceof Secret) {
|
599
|
+
return {
|
600
|
+
name: volume.metadata.name,
|
601
|
+
secret: {
|
602
|
+
secretName: volume.metadata.name
|
603
|
+
}
|
604
|
+
};
|
605
|
+
}
|
606
|
+
if (volume instanceof ConfigMap) {
|
607
|
+
return {
|
608
|
+
name: volume.metadata.name,
|
609
|
+
configMap: {
|
610
|
+
name: volume.metadata.name
|
611
|
+
}
|
612
|
+
};
|
613
|
+
}
|
614
|
+
if (core.v1.PersistentVolumeClaim.isInstance(volume)) {
|
615
|
+
return {
|
616
|
+
name: volume.metadata.name,
|
617
|
+
persistentVolumeClaim: {
|
618
|
+
claimName: volume.metadata.name
|
619
|
+
}
|
620
|
+
};
|
621
|
+
}
|
622
|
+
if (core.v1.ConfigMap.isInstance(volume)) {
|
623
|
+
return {
|
624
|
+
name: volume.metadata.name,
|
625
|
+
configMap: {
|
626
|
+
name: volume.metadata.name
|
627
|
+
}
|
628
|
+
};
|
629
|
+
}
|
630
|
+
if (core.v1.Secret.isInstance(volume)) {
|
631
|
+
return {
|
632
|
+
name: volume.metadata.name,
|
633
|
+
secret: {
|
634
|
+
secretName: volume.metadata.name
|
635
|
+
}
|
636
|
+
};
|
637
|
+
}
|
638
|
+
return volume;
|
639
|
+
}
|
640
|
+
function getWorkloadVolumeResourceUuid(volume) {
|
641
|
+
if (volume instanceof PersistentVolumeClaim) {
|
642
|
+
return volume.metadata.uid;
|
643
|
+
}
|
644
|
+
if (volume instanceof Secret) {
|
645
|
+
return volume.metadata.uid;
|
646
|
+
}
|
647
|
+
if (volume instanceof ConfigMap) {
|
648
|
+
return volume.metadata.uid;
|
649
|
+
}
|
650
|
+
if (core.v1.PersistentVolumeClaim.isInstance(volume)) {
|
651
|
+
return volume.metadata.uid;
|
652
|
+
}
|
653
|
+
if (core.v1.ConfigMap.isInstance(volume)) {
|
654
|
+
return volume.metadata.uid;
|
655
|
+
}
|
656
|
+
if (core.v1.Secret.isInstance(volume)) {
|
657
|
+
return volume.metadata.uid;
|
658
|
+
}
|
659
|
+
return output$1(void 0);
|
660
|
+
}
|
661
|
+
function getBestEndpoint(endpoints, cluster) {
|
662
|
+
if (!endpoints.length) {
|
663
|
+
return void 0;
|
664
|
+
}
|
665
|
+
if (endpoints.length === 1) {
|
666
|
+
return endpoints[0];
|
667
|
+
}
|
668
|
+
if (!cluster) {
|
669
|
+
return filterEndpoints(endpoints)[0];
|
670
|
+
}
|
671
|
+
const clusterEndpoint = endpoints.find((endpoint) => isEndpointFromCluster(endpoint, cluster));
|
672
|
+
if (clusterEndpoint) {
|
673
|
+
return clusterEndpoint;
|
674
|
+
}
|
675
|
+
return filterEndpoints(endpoints)[0];
|
676
|
+
}
|
677
|
+
function requireBestEndpoint(endpoints, cluster) {
|
678
|
+
const endpoint = getBestEndpoint(endpoints, cluster);
|
679
|
+
if (!endpoint) {
|
680
|
+
throw new Error(`No best endpoint found for cluster "${cluster.name}" (${cluster.id})`);
|
681
|
+
}
|
682
|
+
return endpoint;
|
683
|
+
}
|
684
|
+
var networkPolicyMediator = new ImplementationMediator(
|
685
|
+
"network-policy",
|
686
|
+
z.object({ name: z.string(), args: z.custom() }),
|
687
|
+
z.instanceof(ComponentResource)
|
688
|
+
);
|
689
|
+
var NetworkPolicy = class _NetworkPolicy extends ComponentResource {
|
690
|
+
/**
|
691
|
+
* The underlying network policy resource.
|
692
|
+
*/
|
693
|
+
networkPolicy;
|
694
|
+
constructor(name, args, opts) {
|
695
|
+
super("k8s:network-policy", name, args, opts);
|
696
|
+
const normalizedArgs = output$1(args).apply(async (args2) => {
|
697
|
+
const ingressRules = normalize(args2.ingressRule, args2.ingressRules);
|
698
|
+
const egressRules = normalize(args2.egressRule, args2.egressRules);
|
699
|
+
const cluster = await toPromise(args2.namespace.cluster);
|
700
|
+
const extraEgressRules = [];
|
701
|
+
if (args2.allowKubeDns) {
|
702
|
+
extraEgressRules.push({
|
703
|
+
namespaces: ["kube-system"],
|
704
|
+
selectors: [{ matchLabels: { "k8s-app": "kube-dns" } }],
|
705
|
+
ports: [{ port: 53, protocol: "UDP" }],
|
706
|
+
all: false,
|
707
|
+
cidrs: [],
|
708
|
+
fqdns: [],
|
709
|
+
services: []
|
710
|
+
});
|
711
|
+
}
|
712
|
+
return {
|
713
|
+
...args2,
|
714
|
+
podSelector: args2.selector ? mapSelectorLikeToSelector(args2.selector) : {},
|
715
|
+
cluster,
|
716
|
+
isolateEgress: args2.isolateEgress ?? false,
|
717
|
+
isolateIngress: args2.isolateIngress ?? false,
|
718
|
+
allowKubeApiServer: args2.allowKubeApiServer ?? false,
|
719
|
+
ingressRules: ingressRules.flatMap((rule) => {
|
720
|
+
const endpoints = normalize(rule?.fromEndpoint, rule?.fromEndpoints);
|
721
|
+
const parsedEndpoints = endpoints.map(parseL34Endpoint);
|
722
|
+
const endpointsNamespaces = groupBy(parsedEndpoints, (endpoint) => {
|
723
|
+
const namespace = isEndpointFromCluster(endpoint, cluster) ? endpoint.metadata["k8s.service"].namespace : "";
|
724
|
+
return namespace;
|
725
|
+
});
|
726
|
+
const l3OnlyRule = endpointsNamespaces[""] ? _NetworkPolicy.getRuleFromEndpoint(void 0, endpointsNamespaces[""], cluster) : void 0;
|
727
|
+
const otherRules = Object.entries(endpointsNamespaces).filter(([key]) => key !== "").map(([, endpoints2]) => {
|
728
|
+
return _NetworkPolicy.getRuleFromEndpoint(void 0, endpoints2, cluster);
|
729
|
+
});
|
730
|
+
return [
|
731
|
+
{
|
732
|
+
all: rule.fromAll ?? false,
|
733
|
+
cidrs: normalize(rule.fromCidr, rule.fromCidrs).concat(l3OnlyRule?.cidrs ?? []),
|
734
|
+
fqdns: [],
|
735
|
+
services: normalize(rule.fromService, rule.fromServices),
|
736
|
+
namespaces: normalize(rule.fromNamespace, rule.fromNamespaces),
|
737
|
+
selectors: normalize(rule.fromSelector, rule.fromSelectors),
|
738
|
+
ports: normalize(rule.toPort, rule.toPorts)
|
739
|
+
},
|
740
|
+
...otherRules
|
741
|
+
].filter((rule2) => !_NetworkPolicy.isEmptyRule(rule2));
|
742
|
+
}),
|
743
|
+
egressRules: egressRules.flatMap((rule) => {
|
744
|
+
const endpoints = normalize(rule?.toEndpoint, rule?.toEndpoints);
|
745
|
+
const parsedEndpoints = endpoints.map(parseL34Endpoint);
|
746
|
+
const endpointsByPortsAnsNamespaces = groupBy(parsedEndpoints, (endpoint) => {
|
747
|
+
const namespace = isEndpointFromCluster(endpoint, cluster) ? endpoint.metadata["k8s.service"].namespace : "";
|
748
|
+
const port = isEndpointFromCluster(endpoint, cluster) ? endpoint.metadata["k8s.service"].targetPort : endpoint.port;
|
749
|
+
return `${port ?? "0"}:${namespace}`;
|
750
|
+
});
|
751
|
+
const l3OnlyRule = endpointsByPortsAnsNamespaces["0:"] ? _NetworkPolicy.getRuleFromEndpoint(
|
752
|
+
void 0,
|
753
|
+
endpointsByPortsAnsNamespaces["0:"],
|
754
|
+
cluster
|
755
|
+
) : void 0;
|
756
|
+
const otherRules = Object.entries(endpointsByPortsAnsNamespaces).filter(([key]) => key !== "0:").map(([key, endpoints2]) => {
|
757
|
+
const [port] = key.split(":");
|
758
|
+
const portNumber = parseInt(port, 10);
|
759
|
+
const portValue = Number.isNaN(portNumber) ? port : portNumber;
|
760
|
+
return _NetworkPolicy.getRuleFromEndpoint(portValue, endpoints2, cluster);
|
761
|
+
});
|
762
|
+
return [
|
763
|
+
{
|
764
|
+
all: rule.toAll ?? false,
|
765
|
+
cidrs: normalize(rule.toCidr, rule.toCidrs).concat(l3OnlyRule?.cidrs ?? []),
|
766
|
+
fqdns: normalize(rule.toFqdn, rule.toFqdns).concat(l3OnlyRule?.fqdns ?? []),
|
767
|
+
services: normalize(rule.toService, rule.toServices),
|
768
|
+
namespaces: normalize(rule.toNamespace, rule.toNamespaces),
|
769
|
+
selectors: normalize(rule.toSelector, rule.toSelectors),
|
770
|
+
ports: normalize(rule.toPort, rule.toPorts)
|
771
|
+
},
|
772
|
+
...otherRules
|
773
|
+
].filter((rule2) => !_NetworkPolicy.isEmptyRule(rule2));
|
774
|
+
}).concat(extraEgressRules)
|
775
|
+
};
|
776
|
+
});
|
777
|
+
this.networkPolicy = output$1(
|
778
|
+
normalizedArgs.apply(async (args2) => {
|
779
|
+
const cluster = args2.cluster;
|
780
|
+
if (cluster.networkPolicyImplRef) {
|
781
|
+
return networkPolicyMediator.call(cluster.networkPolicyImplRef, {
|
782
|
+
name,
|
783
|
+
args: args2
|
784
|
+
});
|
785
|
+
}
|
786
|
+
const nativePolicy = new NativeNetworkPolicy(name, args2, {
|
787
|
+
...opts,
|
788
|
+
parent: this,
|
789
|
+
provider: await getProviderAsync(output$1(args2.namespace).cluster)
|
790
|
+
});
|
791
|
+
return nativePolicy.networkPolicy;
|
792
|
+
})
|
793
|
+
);
|
794
|
+
}
|
795
|
+
static mapCidrFromEndpoint(result) {
|
796
|
+
if (result.type === "ipv4") {
|
797
|
+
return `${result.address}/32`;
|
798
|
+
}
|
799
|
+
return `${result.address}/128`;
|
800
|
+
}
|
801
|
+
static getRuleFromEndpoint(port, endpoints, cluster) {
|
802
|
+
const ports = port ? [{ port, protocol: endpoints[0].protocol?.toUpperCase() }] : [];
|
803
|
+
const cidrs = endpoints.filter((endpoint) => !isEndpointFromCluster(endpoint, cluster)).filter((endpoint) => endpoint.type === "ipv4" || endpoint.type === "ipv6").map(_NetworkPolicy.mapCidrFromEndpoint);
|
804
|
+
const fqdns = endpoints.filter((endpoint) => endpoint.type === "hostname").map((endpoint) => endpoint.hostname);
|
805
|
+
const selectors = endpoints.filter((endpoint) => isEndpointFromCluster(endpoint, cluster)).map((endpoint) => endpoint.metadata["k8s.service"].selector);
|
806
|
+
const namespace = endpoints.filter((endpoint) => isEndpointFromCluster(endpoint, cluster)).map((endpoint) => endpoint.metadata["k8s.service"].namespace)[0];
|
807
|
+
return {
|
808
|
+
all: false,
|
809
|
+
cidrs,
|
810
|
+
fqdns,
|
811
|
+
services: [],
|
812
|
+
namespaces: namespace ? [namespace] : [],
|
813
|
+
selectors,
|
814
|
+
ports
|
815
|
+
};
|
816
|
+
}
|
817
|
+
static isEmptyRule(rule) {
|
818
|
+
return !rule.all && rule.cidrs.length === 0 && rule.fqdns.length === 0 && rule.services.length === 0 && rule.namespaces.length === 0 && rule.selectors.length === 0 && rule.ports.length === 0;
|
819
|
+
}
|
820
|
+
/**
|
821
|
+
* Creates network policy to isolate the namespace by denying all traffic to/from it.
|
822
|
+
*
|
823
|
+
* Automatically names the policy as: `isolate-namespace.{clusterName}.{namespace}.{clusterId}`.
|
824
|
+
*
|
825
|
+
* @param namespace The namespace to isolate.
|
826
|
+
* @param opts Optional resource options.
|
827
|
+
*/
|
828
|
+
static async isolateNamespace(namespace, opts) {
|
829
|
+
const name = await toPromise(output$1(namespace).metadata.name);
|
830
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
831
|
+
return new _NetworkPolicy(
|
832
|
+
`isolate-namespace.${cluster.name}.${name}.${cluster.id}`,
|
833
|
+
{
|
834
|
+
namespace,
|
835
|
+
description: "By default, deny all traffic to/from the namespace.",
|
836
|
+
isolateEgress: true,
|
837
|
+
isolateIngress: true
|
838
|
+
},
|
839
|
+
opts
|
840
|
+
);
|
841
|
+
}
|
842
|
+
/**
|
843
|
+
* Creates network policy to allow all traffic inside the namespace (pod to pod within same namespace).
|
844
|
+
*
|
845
|
+
* Automatically names the policy as: `allow-inside-namespace.{clusterName}.{namespace}.{clusterId}`.
|
846
|
+
*
|
847
|
+
* @param namespace The namespace to create the policy in.
|
848
|
+
* @param opts Optional resource options.
|
849
|
+
*/
|
850
|
+
static async allowInsideNamespace(namespace, opts) {
|
851
|
+
const nsName = await toPromise(output$1(namespace).metadata.name);
|
852
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
853
|
+
return new _NetworkPolicy(
|
854
|
+
`allow-inside-namespace.${cluster.name}.${nsName}.${cluster.id}`,
|
855
|
+
{
|
856
|
+
namespace,
|
857
|
+
description: "Allow all traffic inside the namespace.",
|
858
|
+
ingressRule: { fromNamespace: namespace },
|
859
|
+
egressRule: { toNamespace: namespace }
|
860
|
+
},
|
861
|
+
opts
|
862
|
+
);
|
863
|
+
}
|
864
|
+
/**
|
865
|
+
* Creates network policy to allow traffic from the namespace to the Kubernetes API server.
|
866
|
+
*
|
867
|
+
* Automatically names the policy as: `allow-kube-api-server.{clusterName}.{namespace}.{clusterId}`.
|
868
|
+
*
|
869
|
+
* @param namespace The namespace to create the policy in.
|
870
|
+
* @param opts Optional resource options.
|
871
|
+
*/
|
872
|
+
static async allowKubeApiServer(namespace, opts) {
|
873
|
+
const nsName = await toPromise(output$1(namespace).metadata.name);
|
874
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
875
|
+
return new _NetworkPolicy(
|
876
|
+
`allow-kube-api-server.${cluster.name}.${nsName}.${cluster.id}`,
|
877
|
+
{
|
878
|
+
namespace,
|
879
|
+
description: "Allow all traffic to the Kubernetes API server from the namespace.",
|
880
|
+
allowKubeApiServer: true
|
881
|
+
},
|
882
|
+
opts
|
883
|
+
);
|
884
|
+
}
|
885
|
+
/**
|
886
|
+
* Creates network policy to allow egress DNS traffic (UDP 53) required for name resolution.
|
887
|
+
*
|
888
|
+
* Automatically names the policy as: `allow-kube-dns.{clusterName}.{namespace}.{clusterId}`.
|
889
|
+
*
|
890
|
+
* @param namespace The namespace to create the policy in.
|
891
|
+
* @param opts Optional resource options.
|
892
|
+
*/
|
893
|
+
static async allowKubeDns(namespace, opts) {
|
894
|
+
const nsName = await toPromise(output$1(namespace).metadata.name);
|
895
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
896
|
+
return new _NetworkPolicy(
|
897
|
+
`allow-kube-dns.${cluster.name}.${nsName}.${cluster.id}`,
|
898
|
+
{
|
899
|
+
namespace,
|
900
|
+
description: "Allow all traffic to the Kubernetes DNS server from the namespace.",
|
901
|
+
allowKubeDns: true
|
902
|
+
},
|
903
|
+
opts
|
904
|
+
);
|
905
|
+
}
|
906
|
+
/**
|
907
|
+
* Creates network policy to allow all egress traffic from the namespace.
|
908
|
+
*
|
909
|
+
* Automatically names the policy as: `allow-all-egress.{clusterName}.{namespace}.{clusterId}`.
|
910
|
+
*
|
911
|
+
* @param namespace The namespace to create the policy in.
|
912
|
+
* @param opts Optional resource options.
|
913
|
+
*/
|
914
|
+
static async allowAllEgress(namespace, opts) {
|
915
|
+
const nsName = await toPromise(output$1(namespace).metadata.name);
|
916
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
917
|
+
return new _NetworkPolicy(
|
918
|
+
`allow-all-egress.${cluster.name}.${nsName}.${cluster.id}`,
|
919
|
+
{
|
920
|
+
namespace,
|
921
|
+
description: "Allow all egress traffic from the namespace.",
|
922
|
+
egressRule: { toAll: true }
|
923
|
+
},
|
924
|
+
opts
|
925
|
+
);
|
926
|
+
}
|
927
|
+
/**
|
928
|
+
* Creates network policy to allow all ingress traffic to the namespace.
|
929
|
+
*
|
930
|
+
* Automatically names the policy as: `allow-all-ingress.{clusterName}.{namespace}.{clusterId}`.
|
931
|
+
*
|
932
|
+
* @param namespace The namespace to create the policy in.
|
933
|
+
* @param opts Optional resource options.
|
934
|
+
*/
|
935
|
+
static async allowAllIngress(namespace, opts) {
|
936
|
+
const nsName = await toPromise(output$1(namespace).metadata.name);
|
937
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
938
|
+
return new _NetworkPolicy(
|
939
|
+
`allow-all-ingress.${cluster.name}.${nsName}.${cluster.id}`,
|
940
|
+
{
|
941
|
+
namespace,
|
942
|
+
description: "Allow all ingress traffic to the namespace.",
|
943
|
+
ingressRule: { fromAll: true }
|
944
|
+
},
|
945
|
+
opts
|
946
|
+
);
|
947
|
+
}
|
948
|
+
/**
|
949
|
+
* Creates network policy to allow egress traffic to a specific L3/L4 endpoint.
|
950
|
+
*
|
951
|
+
* Automatically names the policy as: `allow-egress-to-<endpoint>.{clusterName}.{namespace}.{clusterId}`.
|
952
|
+
*
|
953
|
+
* @param namespace The namespace to create the policy in.
|
954
|
+
* @param endpoint The endpoint to allow egress to.
|
955
|
+
* @param opts Optional resource options.
|
956
|
+
*/
|
957
|
+
static async allowEgressToEndpoint(namespace, endpoint, opts) {
|
958
|
+
const parsedEndpoint = parseL34Endpoint(endpoint);
|
959
|
+
const endpointStr = l34EndpointToString(parsedEndpoint).replace(/:/g, "-");
|
960
|
+
const nsName = await toPromise(output$1(namespace).metadata.name);
|
961
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
962
|
+
return new _NetworkPolicy(
|
963
|
+
`allow-egress-to-${endpointStr}.${cluster.name}.${nsName}.${cluster.id}`,
|
964
|
+
{
|
965
|
+
namespace,
|
966
|
+
description: `Allow egress traffic to "${l34EndpointToString(parsedEndpoint)}" from the namespace.`,
|
967
|
+
egressRule: { toEndpoint: endpoint }
|
968
|
+
},
|
969
|
+
opts
|
970
|
+
);
|
971
|
+
}
|
972
|
+
/**
|
973
|
+
* Creates network policy to allow egress traffic to the best endpoint among provided candidates.
|
974
|
+
*
|
975
|
+
* Automatically names the policy as: `allow-egress-to-<bestEndpoint>.{clusterName}.{namespace}.{clusterId}`.
|
976
|
+
*
|
977
|
+
* @param namespace The namespace to create the policy in.
|
978
|
+
* @param endpoints The candidate endpoints to select from.
|
979
|
+
* @param opts Optional resource options.
|
980
|
+
*/
|
981
|
+
static async allowEgressToBestEndpoint(namespace, endpoints, opts) {
|
982
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
983
|
+
const resolvedEndpoints = await toPromise(output$1(endpoints));
|
984
|
+
const bestEndpoint = requireBestEndpoint(resolvedEndpoints.map(parseL34Endpoint), cluster);
|
985
|
+
return await _NetworkPolicy.allowEgressToEndpoint(namespace, bestEndpoint, opts);
|
986
|
+
}
|
987
|
+
/**
|
988
|
+
* Creates network policy to allow ingress traffic from a specific L3/L4 endpoint.
|
989
|
+
*
|
990
|
+
* Automatically names the policy as: `allow-ingress-from-<endpoint>.{clusterName}.{namespace}.{clusterId}`.
|
991
|
+
*
|
992
|
+
* @param namespace The namespace to create the policy in.
|
993
|
+
* @param endpoint The endpoint to allow ingress from.
|
994
|
+
* @param opts Optional resource options.
|
995
|
+
*/
|
996
|
+
static async allowIngressFromEndpoint(namespace, endpoint, opts) {
|
997
|
+
const parsedEndpoint = parseL34Endpoint(endpoint);
|
998
|
+
const endpointStr = l34EndpointToString(parsedEndpoint).replace(/:/g, "-");
|
999
|
+
const nsName = await toPromise(output$1(namespace).metadata.name);
|
1000
|
+
const cluster = await toPromise(output$1(namespace).cluster);
|
1001
|
+
return new _NetworkPolicy(
|
1002
|
+
`allow-ingress-from-${endpointStr}.${cluster.name}.${nsName}.${cluster.id}`,
|
1003
|
+
{
|
1004
|
+
namespace,
|
1005
|
+
description: interpolate$1`Allow ingress traffic from "${l34EndpointToString(parsedEndpoint)}" to the namespace.`,
|
1006
|
+
ingressRule: { fromEndpoint: endpoint }
|
1007
|
+
},
|
1008
|
+
opts
|
1009
|
+
);
|
1010
|
+
}
|
1011
|
+
};
|
1012
|
+
var NativeNetworkPolicy = class _NativeNetworkPolicy extends ComponentResource {
|
1013
|
+
/**
|
1014
|
+
* The underlying native network policy resource.
|
1015
|
+
*/
|
1016
|
+
networkPolicy;
|
1017
|
+
constructor(name, args, opts) {
|
1018
|
+
super("k8s:native-network-policy", name, args, opts);
|
1019
|
+
const ingress = _NativeNetworkPolicy.createIngressRules(args);
|
1020
|
+
const egress = _NativeNetworkPolicy.createEgressRules(args);
|
1021
|
+
const policyTypes = [];
|
1022
|
+
if (ingress.length > 0 || args.isolateIngress) {
|
1023
|
+
policyTypes.push("Ingress");
|
1024
|
+
}
|
1025
|
+
if (egress.length > 0 || args.isolateEgress) {
|
1026
|
+
policyTypes.push("Egress");
|
1027
|
+
}
|
1028
|
+
this.networkPolicy = new networking.v1.NetworkPolicy(
|
1029
|
+
name,
|
1030
|
+
{
|
1031
|
+
metadata: mergeDeep(mapMetadata(args, name), {
|
1032
|
+
annotations: args.description ? { "kubernetes.io/description": args.description } : void 0
|
1033
|
+
}),
|
1034
|
+
spec: {
|
1035
|
+
podSelector: args.podSelector,
|
1036
|
+
ingress,
|
1037
|
+
egress,
|
1038
|
+
policyTypes
|
1039
|
+
}
|
1040
|
+
},
|
1041
|
+
{ ...opts, parent: this }
|
1042
|
+
);
|
1043
|
+
}
|
1044
|
+
static fallbackIpBlock = {
|
1045
|
+
cidr: "0.0.0.0/0",
|
1046
|
+
except: ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]
|
1047
|
+
};
|
1048
|
+
static fallbackDnsRule = {
|
1049
|
+
to: [
|
1050
|
+
{
|
1051
|
+
namespaceSelector: { matchLabels: { "kubernetes.io/metadata.name": "kube-system" } },
|
1052
|
+
podSelector: { matchLabels: { "k8s-app": "kube-dns" } }
|
1053
|
+
}
|
1054
|
+
],
|
1055
|
+
ports: [{ port: 53, protocol: "UDP" }]
|
1056
|
+
};
|
1057
|
+
static createIngressRules(args) {
|
1058
|
+
return uniqueBy(
|
1059
|
+
args.ingressRules.map((rule) => ({
|
1060
|
+
from: rule.all ? [] : _NativeNetworkPolicy.createRulePeers(rule),
|
1061
|
+
ports: _NativeNetworkPolicy.mapPorts(rule.ports)
|
1062
|
+
})),
|
1063
|
+
(rule) => JSON.stringify(rule)
|
1064
|
+
);
|
1065
|
+
}
|
1066
|
+
static createEgressRules(args) {
|
1067
|
+
const extraRules = [];
|
1068
|
+
const needKubeDns = args.egressRules.some((rule) => rule.fqdns.length > 0);
|
1069
|
+
if (needKubeDns) {
|
1070
|
+
extraRules.push(_NativeNetworkPolicy.fallbackDnsRule);
|
1071
|
+
}
|
1072
|
+
const needFallback = args.egressRules.some(
|
1073
|
+
(rule) => rule.fqdns.some((fqdn) => !fqdn.endsWith(".cluster.local"))
|
1074
|
+
);
|
1075
|
+
if (needFallback) {
|
1076
|
+
extraRules.push({ to: [{ ipBlock: _NativeNetworkPolicy.fallbackIpBlock }] });
|
1077
|
+
}
|
1078
|
+
if (args.allowKubeApiServer) {
|
1079
|
+
const { quirks, apiEndpoints } = args.cluster;
|
1080
|
+
if (quirks?.fallbackKubeApiAccess) {
|
1081
|
+
extraRules.push({
|
1082
|
+
to: [{ ipBlock: { cidr: `${quirks?.fallbackKubeApiAccess.serverIp}/32` } }],
|
1083
|
+
ports: [{ port: quirks?.fallbackKubeApiAccess.serverPort, protocol: "TCP" }]
|
1084
|
+
});
|
1085
|
+
} else {
|
1086
|
+
const rules = apiEndpoints.filter((endpoint) => endpoint.type !== "hostname").map((endpoint) => ({
|
1087
|
+
to: [{ ipBlock: { cidr: l3EndpointToCidr(endpoint) } }],
|
1088
|
+
ports: [{ port: endpoint.port, protocol: "TCP" }]
|
1089
|
+
}));
|
1090
|
+
extraRules.push(...rules);
|
1091
|
+
}
|
1092
|
+
}
|
1093
|
+
return uniqueBy(
|
1094
|
+
args.egressRules.map((rule) => {
|
1095
|
+
return {
|
1096
|
+
to: rule.all ? [] : _NativeNetworkPolicy.createRulePeers(rule),
|
1097
|
+
ports: _NativeNetworkPolicy.mapPorts(rule.ports)
|
1098
|
+
};
|
1099
|
+
}).filter((rule) => rule.to !== void 0).concat(extraRules),
|
1100
|
+
(rule) => JSON.stringify(rule)
|
1101
|
+
);
|
1102
|
+
}
|
1103
|
+
static createRulePeers(args) {
|
1104
|
+
const peers = uniqueBy(
|
1105
|
+
[
|
1106
|
+
..._NativeNetworkPolicy.createCidrPeers(args),
|
1107
|
+
..._NativeNetworkPolicy.createServicePeers(args),
|
1108
|
+
..._NativeNetworkPolicy.createSelectorPeers(args)
|
1109
|
+
],
|
1110
|
+
(peer) => JSON.stringify(peer)
|
1111
|
+
);
|
1112
|
+
return peers.length > 0 ? peers : void 0;
|
1113
|
+
}
|
1114
|
+
static createCidrPeers(args) {
|
1115
|
+
return args.cidrs.map((cidr) => ({ ipBlock: { cidr } }));
|
1116
|
+
}
|
1117
|
+
static createServicePeers(args) {
|
1118
|
+
return args.services.map((service) => {
|
1119
|
+
const selector = mapServiceToLabelSelector(service);
|
1120
|
+
return {
|
1121
|
+
namespaceSelector: mapNamespaceNameToSelector(service.metadata.namespace),
|
1122
|
+
podSelector: selector
|
1123
|
+
};
|
1124
|
+
});
|
1125
|
+
}
|
1126
|
+
static createSelectorPeers(args) {
|
1127
|
+
const selectorPeers = args.selectors.map((selector) => ({
|
1128
|
+
podSelector: mapSelectorLikeToSelector(selector)
|
1129
|
+
}));
|
1130
|
+
const namespacePeers = args.namespaces.map(_NativeNetworkPolicy.createNamespacePeer);
|
1131
|
+
if (namespacePeers.length === 0) {
|
1132
|
+
return selectorPeers;
|
1133
|
+
}
|
1134
|
+
if (selectorPeers.length === 0) {
|
1135
|
+
return namespacePeers;
|
1136
|
+
}
|
1137
|
+
return flat(
|
1138
|
+
selectorPeers.map((selectorPeer) => {
|
1139
|
+
return namespacePeers.map((namespacePeer) => merge(selectorPeer, namespacePeer));
|
1140
|
+
})
|
1141
|
+
);
|
1142
|
+
}
|
1143
|
+
static createNamespacePeer(namespace) {
|
1144
|
+
const namespaceName = getNamespaceName(namespace);
|
1145
|
+
const namespaceSelector = mapNamespaceNameToSelector(namespaceName);
|
1146
|
+
return { namespaceSelector };
|
1147
|
+
}
|
1148
|
+
static mapPorts(ports) {
|
1149
|
+
return ports.map((port) => {
|
1150
|
+
if ("port" in port) {
|
1151
|
+
return {
|
1152
|
+
port: port.port,
|
1153
|
+
protocol: port.protocol ?? "TCP"
|
1154
|
+
};
|
1155
|
+
}
|
1156
|
+
return {
|
1157
|
+
port: port.range[0],
|
1158
|
+
endPort: port.range[1],
|
1159
|
+
protocol: port.protocol ?? "TCP"
|
1160
|
+
};
|
1161
|
+
});
|
1162
|
+
}
|
1163
|
+
};
|
1164
|
+
|
1165
|
+
// src/pod.ts
|
1166
|
+
var podSpecDefaults = {
|
1167
|
+
automountServiceAccountToken: false
|
1168
|
+
};
|
1169
|
+
var workloadExtraArgs = [...commonExtraArgs, "container", "containers"];
|
1170
|
+
var exposableWorkloadExtraArgs = [
|
1171
|
+
...workloadExtraArgs,
|
1172
|
+
"service",
|
1173
|
+
"route",
|
1174
|
+
"routes"
|
1175
|
+
];
|
1176
|
+
function getWorkloadComponents(name, args, parent, opts) {
|
1177
|
+
const labels = {
|
1178
|
+
"app.kubernetes.io/name": name
|
1179
|
+
};
|
1180
|
+
const containers = output(args).apply((args2) => normalize(args2.container, args2.containers));
|
1181
|
+
const rawVolumes = containers.apply((containers2) => {
|
1182
|
+
const containerVolumes = containers2.flatMap(
|
1183
|
+
(container) => normalize(container.volume, container.volumes)
|
1184
|
+
);
|
1185
|
+
const containerVolumeMounts = containers2.flatMap((container) => {
|
1186
|
+
return normalize(container.volumeMount, container.volumeMounts).map((volumeMount) => {
|
1187
|
+
return "volume" in volumeMount ? volumeMount.volume : void 0;
|
1188
|
+
}).filter(Boolean);
|
1189
|
+
});
|
1190
|
+
return output([...containerVolumes, ...containerVolumeMounts]);
|
1191
|
+
});
|
1192
|
+
const volumes = rawVolumes.apply((rawVolumes2) => {
|
1193
|
+
return output(rawVolumes2.map(mapWorkloadVolume)).apply(uniqueBy((volume) => volume.name));
|
1194
|
+
});
|
1195
|
+
const podSpec = output({
|
1196
|
+
cluster: output(args.namespace).cluster,
|
1197
|
+
containers,
|
1198
|
+
volumes
|
1199
|
+
}).apply(({ cluster, containers: containers2, volumes: volumes2 }) => {
|
1200
|
+
const spec = {
|
1201
|
+
volumes: volumes2,
|
1202
|
+
containers: containers2.map((container) => mapContainerToRaw(container, cluster, name)),
|
1203
|
+
...podSpecDefaults
|
1204
|
+
};
|
1205
|
+
if (containers2.some((container) => container.enableTun) && cluster.quirks?.tunDevicePolicy?.type !== "plugin") {
|
1206
|
+
spec.volumes = output(spec.volumes).apply((volumes3) => [
|
1207
|
+
...volumes3 ?? [],
|
1208
|
+
{
|
1209
|
+
name: "tun-device",
|
1210
|
+
hostPath: {
|
1211
|
+
path: "/dev/net/tun"
|
1212
|
+
}
|
1213
|
+
}
|
1214
|
+
]);
|
1215
|
+
}
|
1216
|
+
return spec;
|
1217
|
+
});
|
1218
|
+
const dependencyHash = rawVolumes.apply((rawVolumes2) => {
|
1219
|
+
return output(rawVolumes2.map(getWorkloadVolumeResourceUuid)).apply(filter(isNonNullish)).apply(unique()).apply((ids) => sha256(ids.join(",")));
|
1220
|
+
});
|
1221
|
+
const podTemplate = output({ podSpec, dependencyHash }).apply(({ podSpec: podSpec2, dependencyHash: dependencyHash2 }) => {
|
1222
|
+
return {
|
1223
|
+
metadata: {
|
1224
|
+
labels,
|
1225
|
+
annotations: {
|
1226
|
+
// to trigger a redeployment when the volumes change
|
1227
|
+
"highstate.io/dependency-hash": dependencyHash2
|
1228
|
+
}
|
1229
|
+
},
|
1230
|
+
spec: podSpec2
|
1231
|
+
};
|
1232
|
+
});
|
1233
|
+
const networkPolicy = output({ containers }).apply(({ containers: containers2 }) => {
|
1234
|
+
const allowedEndpoints = containers2.flatMap((container) => container.allowedEndpoints ?? []);
|
1235
|
+
if (allowedEndpoints.length === 0 && !args.networkPolicy) {
|
1236
|
+
return output(void 0);
|
1237
|
+
}
|
1238
|
+
return output(
|
1239
|
+
new NetworkPolicy(
|
1240
|
+
name,
|
1241
|
+
{
|
1242
|
+
namespace: args.namespace,
|
1243
|
+
selector: labels,
|
1244
|
+
description: `Network policy for "${name}"`,
|
1245
|
+
...args.networkPolicy,
|
1246
|
+
egressRules: output(args.networkPolicy?.egressRules).apply((egressRules) => [
|
1247
|
+
...egressRules ?? [],
|
1248
|
+
...allowedEndpoints.length > 0 ? [{ toEndpoints: allowedEndpoints }] : []
|
1249
|
+
])
|
1250
|
+
},
|
1251
|
+
{ ...opts, parent: parent() }
|
1252
|
+
)
|
1253
|
+
);
|
1254
|
+
});
|
1255
|
+
return { labels, containers, volumes, podSpec, podTemplate, networkPolicy };
|
1256
|
+
}
|
1257
|
+
function getExposableWorkloadComponents(name, args, parent, opts) {
|
1258
|
+
const { labels, containers, volumes, podSpec, podTemplate, networkPolicy } = getWorkloadComponents(name, args, parent, opts);
|
1259
|
+
const service = output({
|
1260
|
+
existing: args.existing,
|
1261
|
+
serviceArgs: args.service,
|
1262
|
+
containers
|
1263
|
+
}).apply(({ existing, serviceArgs, containers: containers2 }) => {
|
1264
|
+
if (!args.service && !args.route) {
|
1265
|
+
return void 0;
|
1266
|
+
}
|
1267
|
+
if (existing?.service) {
|
1268
|
+
return Service.for(existing.service, output(args.namespace).cluster);
|
1269
|
+
}
|
1270
|
+
if (existing) {
|
1271
|
+
return void 0;
|
1272
|
+
}
|
1273
|
+
const ports = containers2.flatMap((container) => normalize(container.port, container.ports));
|
1274
|
+
return Service.create(name, {
|
1275
|
+
...serviceArgs,
|
1276
|
+
selector: labels,
|
1277
|
+
namespace: args.namespace,
|
1278
|
+
ports: (
|
1279
|
+
// allow to completely override the ports
|
1280
|
+
!serviceArgs?.port && !serviceArgs?.ports ? ports.map(mapContainerPortToServicePort) : serviceArgs?.ports
|
1281
|
+
)
|
1282
|
+
});
|
1283
|
+
});
|
1284
|
+
const routes = output({
|
1285
|
+
routesArgs: normalizeInputs(args.route, args.routes),
|
1286
|
+
service,
|
1287
|
+
namespace: output(args.namespace)
|
1288
|
+
}).apply(({ routesArgs, service: service2, namespace }) => {
|
1289
|
+
if (!routesArgs.length || !service2) {
|
1290
|
+
return [];
|
1291
|
+
}
|
1292
|
+
if (args.existing) {
|
1293
|
+
return [];
|
1294
|
+
}
|
1295
|
+
return routesArgs.map((routeArgs) => {
|
1296
|
+
return new AccessPointRoute(name, {
|
1297
|
+
...routeArgs,
|
1298
|
+
endpoints: service2.endpoints,
|
1299
|
+
// pass the native data to the route to allow implementation to use it
|
1300
|
+
gatewayNativeData: service2,
|
1301
|
+
tlsCertificateNativeData: namespace
|
1302
|
+
});
|
1303
|
+
});
|
1304
|
+
});
|
1305
|
+
return { labels, containers, volumes, podSpec, podTemplate, networkPolicy, service, routes };
|
1306
|
+
}
|
1307
|
+
var Workload = class extends ScopedResource {
|
1308
|
+
constructor(type, name, args, opts, apiVersion, kind, terminalArgs, containers, namespace, metadata, networkPolicy) {
|
1309
|
+
super(type, name, args, opts, apiVersion, kind, namespace, metadata);
|
1310
|
+
this.name = name;
|
1311
|
+
this.terminalArgs = terminalArgs;
|
1312
|
+
this.containers = containers;
|
1313
|
+
this.networkPolicy = networkPolicy;
|
1314
|
+
}
|
1315
|
+
/**
|
1316
|
+
* The instance terminal to interact with the deployment.
|
1317
|
+
*/
|
1318
|
+
get terminal() {
|
1319
|
+
const containerName = output(this.containers).apply((containers) => {
|
1320
|
+
return containers[0]?.name ?? this.name;
|
1321
|
+
});
|
1322
|
+
const shell = this.terminalArgs.apply((args) => args.shell ?? "bash");
|
1323
|
+
const workloadLabelQuery = output(this.metadata).apply((meta) => meta.labels ?? {}).apply(
|
1324
|
+
(labels) => Object.entries(labels).map(([key, value]) => `${key}=${value}`).join(",")
|
1325
|
+
);
|
1326
|
+
return output({
|
1327
|
+
name: this.metadata.name,
|
1328
|
+
meta: this.getTerminalMeta(),
|
1329
|
+
spec: {
|
1330
|
+
image: images_exports["terminal-kubectl"].image,
|
1331
|
+
command: ["bash", "/welcome.sh"],
|
1332
|
+
files: {
|
1333
|
+
"/kubeconfig": fileFromString("kubeconfig", this.cluster.kubeconfig, { isSecret: true }),
|
1334
|
+
"/welcome.sh": fileFromString(
|
1335
|
+
"welcome.sh",
|
1336
|
+
interpolate`
|
1337
|
+
#!/bin/bash
|
1338
|
+
set -euo pipefail
|
1339
|
+
|
1340
|
+
NAMESPACE="${this.metadata.namespace}"
|
1341
|
+
RESOURCE_TYPE="${this.kind.apply((k) => k.toLowerCase())}"
|
1342
|
+
RESOURCE_NAME="${this.metadata.name}"
|
1343
|
+
CONTAINER_NAME="${containerName}"
|
1344
|
+
SHELL="${shell}"
|
1345
|
+
|
1346
|
+
echo "Connecting to $RESOURCE_TYPE \\"$RESOURCE_NAME\\" in namespace \\"$NAMESPACE\\""
|
1347
|
+
|
1348
|
+
# get all pods for this workload
|
1349
|
+
PODS=$(kubectl get pods -n "$NAMESPACE" -l "${workloadLabelQuery}" -o jsonpath='{.items[*].metadata.name}' 2>/dev/null || echo "")
|
1350
|
+
|
1351
|
+
if [ -z "$PODS" ]; then
|
1352
|
+
echo "No pods found"
|
1353
|
+
exit 1
|
1354
|
+
fi
|
1355
|
+
|
1356
|
+
# convert space-separated string to array
|
1357
|
+
read -ra POD_ARRAY <<< "$PODS"
|
1358
|
+
|
1359
|
+
if [ \${#POD_ARRAY[@]} -eq 1 ]; then
|
1360
|
+
# single pod found, connect directly
|
1361
|
+
SELECTED_POD="\${POD_ARRAY[0]}"
|
1362
|
+
echo "Found single pod: $SELECTED_POD"
|
1363
|
+
else
|
1364
|
+
# multiple pods found, use fzf for selection
|
1365
|
+
echo "Found \${#POD_ARRAY[@]} pods. Please select one."
|
1366
|
+
|
1367
|
+
SELECTED_POD=$(printf '%s\n' "\${POD_ARRAY[@]}" | fzf --prompt="Select pod: " --height 10 --border --info=inline)
|
1368
|
+
|
1369
|
+
if [ -z "$SELECTED_POD" ]; then
|
1370
|
+
echo "No pod selected"
|
1371
|
+
exit 1
|
1372
|
+
fi
|
1373
|
+
|
1374
|
+
echo "Selected pod: $SELECTED_POD"
|
1375
|
+
fi
|
1376
|
+
|
1377
|
+
# execute into the selected pod
|
1378
|
+
exec kubectl exec -it -n "$NAMESPACE" "$SELECTED_POD" -c "$CONTAINER_NAME" -- "$SHELL"
|
1379
|
+
`.apply(trimIndentation)
|
1380
|
+
)
|
1381
|
+
},
|
1382
|
+
env: {
|
1383
|
+
KUBECONFIG: "/kubeconfig"
|
1384
|
+
}
|
1385
|
+
}
|
1386
|
+
});
|
1387
|
+
}
|
1388
|
+
/**
|
1389
|
+
* Creates a terminal with a custom command.
|
1390
|
+
*
|
1391
|
+
* @param meta The metadata for the terminal.
|
1392
|
+
* @param command The command to run in the terminal.
|
1393
|
+
* @param spec Additional spec options for the terminal.
|
1394
|
+
*/
|
1395
|
+
createTerminal(name, meta, command, spec) {
|
1396
|
+
const containerName = output(this.containers).apply((containers) => {
|
1397
|
+
return containers[0]?.name ?? this.name;
|
1398
|
+
});
|
1399
|
+
return output({
|
1400
|
+
name,
|
1401
|
+
meta: output(this.getTerminalMeta()).apply((currentMeta) => ({
|
1402
|
+
...currentMeta,
|
1403
|
+
...meta
|
1404
|
+
})),
|
1405
|
+
spec: {
|
1406
|
+
image: images_exports["terminal-kubectl"].image,
|
1407
|
+
command: output(command).apply((command2) => [
|
1408
|
+
"exec",
|
1409
|
+
"kubectl",
|
1410
|
+
"exec",
|
1411
|
+
"-it",
|
1412
|
+
"-n",
|
1413
|
+
this.metadata.namespace,
|
1414
|
+
interpolate`${this.kind.apply((k) => k.toLowerCase())}/${this.metadata.name}`,
|
1415
|
+
"-c",
|
1416
|
+
containerName,
|
1417
|
+
"--",
|
1418
|
+
...command2
|
1419
|
+
]),
|
1420
|
+
files: {
|
1421
|
+
"/kubeconfig": fileFromString("kubeconfig", this.cluster.kubeconfig, { isSecret: true }),
|
1422
|
+
...spec?.files
|
1423
|
+
},
|
1424
|
+
env: {
|
1425
|
+
KUBECONFIG: "/kubeconfig",
|
1426
|
+
...spec?.env
|
1427
|
+
}
|
1428
|
+
}
|
1429
|
+
});
|
1430
|
+
}
|
1431
|
+
};
|
1432
|
+
var ExposableWorkload = class extends Workload {
|
1433
|
+
constructor(type, name, args, opts, apiVersion, kind, terminalArgs, containers, namespace, metadata, networkPolicy, _service, routes) {
|
1434
|
+
super(
|
1435
|
+
type,
|
1436
|
+
name,
|
1437
|
+
args,
|
1438
|
+
opts,
|
1439
|
+
apiVersion,
|
1440
|
+
kind,
|
1441
|
+
terminalArgs,
|
1442
|
+
containers,
|
1443
|
+
namespace,
|
1444
|
+
metadata,
|
1445
|
+
networkPolicy
|
1446
|
+
);
|
1447
|
+
this._service = _service;
|
1448
|
+
this.routes = routes;
|
1449
|
+
}
|
1450
|
+
/**
|
1451
|
+
* The service associated with the workload.
|
1452
|
+
*/
|
1453
|
+
get optionalService() {
|
1454
|
+
return this._service;
|
1455
|
+
}
|
1456
|
+
/**
|
1457
|
+
* The service associated with the workload.
|
1458
|
+
*
|
1459
|
+
* Will throw an error if the service is not available.
|
1460
|
+
*/
|
1461
|
+
get service() {
|
1462
|
+
return this._service.apply((service) => {
|
1463
|
+
if (!service) {
|
1464
|
+
throw new Error(`The service of the workload "${this.name}" is not available.`);
|
1465
|
+
}
|
1466
|
+
return service;
|
1467
|
+
});
|
1468
|
+
}
|
1469
|
+
/**
|
1470
|
+
* Creates a generic workload or patches the existing one.
|
1471
|
+
*/
|
1472
|
+
static createOrPatchGeneric(name, args, opts) {
|
1473
|
+
return output(args).apply(async (args2) => {
|
1474
|
+
if (args2.existing?.type === "deployment") {
|
1475
|
+
const { Deployment } = await import('./deployment-752P6JIT.js');
|
1476
|
+
return Deployment.patch(
|
1477
|
+
name,
|
1478
|
+
{
|
1479
|
+
...deepmerge(args2, args2.deployment),
|
1480
|
+
name: args2.existing.metadata.name,
|
1481
|
+
namespace: Namespace.forResourceAsync(args2.existing, output(args2.namespace).cluster)
|
1482
|
+
},
|
1483
|
+
opts
|
1484
|
+
);
|
1485
|
+
}
|
1486
|
+
if (args2.existing?.type === "stateful-set") {
|
1487
|
+
const { StatefulSet } = await import('./stateful-set-N64YVKR7.js');
|
1488
|
+
return StatefulSet.patch(
|
1489
|
+
name,
|
1490
|
+
{
|
1491
|
+
...deepmerge(args2, args2.statefulSet),
|
1492
|
+
name: args2.existing.metadata.name,
|
1493
|
+
namespace: Namespace.forResourceAsync(args2.existing, output(args2.namespace).cluster)
|
1494
|
+
},
|
1495
|
+
opts
|
1496
|
+
);
|
1497
|
+
}
|
1498
|
+
if (args2.type === "Deployment") {
|
1499
|
+
const { Deployment } = await import('./deployment-752P6JIT.js');
|
1500
|
+
return Deployment.create(name, deepmerge(args2, args2.deployment), opts);
|
1501
|
+
}
|
1502
|
+
if (args2.type === "StatefulSet") {
|
1503
|
+
const { StatefulSet } = await import('./stateful-set-N64YVKR7.js');
|
1504
|
+
return StatefulSet.create(name, deepmerge(args2, args2.statefulSet), opts);
|
1505
|
+
}
|
1506
|
+
throw new Error(`Unknown workload type: ${args2.type}`);
|
1507
|
+
});
|
1508
|
+
}
|
1509
|
+
};
|
1510
|
+
|
1511
|
+
export { ConfigMap, ExposableWorkload, NativeNetworkPolicy, NetworkPolicy, PersistentVolumeClaim, Workload, exposableWorkloadExtraArgs, getAutoVolumeName, getBestEndpoint, getExposableWorkloadComponents, getWorkloadComponents, getWorkloadVolumeResourceUuid, mapContainerEnvironment, mapContainerToRaw, mapEnvironmentSource, mapVolumeMount, mapWorkloadVolume, networkPolicyMediator, podSpecDefaults, requireBestEndpoint, workloadExtraArgs };
|
1512
|
+
//# sourceMappingURL=chunk-SBC3TUIN.js.map
|
1513
|
+
//# sourceMappingURL=chunk-SBC3TUIN.js.map
|