@highstate/k8s 0.9.14 → 0.9.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-QLQ3QVGT.js → chunk-KBWGED2C.js} +15 -10
- package/dist/chunk-KBWGED2C.js.map +1 -0
- package/dist/{chunk-5S4JPM4M.js → chunk-MKFBWTVZ.js} +36 -4
- package/dist/chunk-MKFBWTVZ.js.map +1 -0
- package/dist/chunk-P2UABKGA.js +1664 -0
- package/dist/chunk-P2UABKGA.js.map +1 -0
- package/dist/chunk-PZ5AY32C.js +10 -0
- package/dist/{chunk-VL7Z5FJQ.js → chunk-QMWFPJQB.js} +23 -3
- package/dist/chunk-QMWFPJQB.js.map +1 -0
- package/dist/{chunk-6L67WIZW.js → chunk-YRC7EI6X.js} +23 -3
- package/dist/chunk-YRC7EI6X.js.map +1 -0
- package/dist/{chunk-Y3LZSX7I.js → chunk-YUMBUWA4.js} +23 -1
- package/dist/chunk-YUMBUWA4.js.map +1 -0
- package/dist/deployment-KOZNZXJA.js +10 -0
- package/dist/highstate.manifest.json +8 -8
- package/dist/index.js +17 -18
- package/dist/index.js.map +1 -1
- package/dist/stateful-set-H5BR3H5D.js +10 -0
- package/dist/stateful-set-H5BR3H5D.js.map +1 -0
- package/dist/units/access-point/index.js +2 -0
- package/dist/units/access-point/index.js.map +1 -1
- package/dist/units/cert-manager/index.js +6 -3
- package/dist/units/cert-manager/index.js.map +1 -1
- package/dist/units/cluster-dns/index.js +2 -0
- package/dist/units/cluster-dns/index.js.map +1 -1
- package/dist/units/cluster-patch/index.js +2 -0
- package/dist/units/cluster-patch/index.js.map +1 -1
- package/dist/units/dns01-issuer/index.js +2 -1
- package/dist/units/dns01-issuer/index.js.map +1 -1
- package/dist/units/existing-cluster/index.js +3 -1
- package/dist/units/existing-cluster/index.js.map +1 -1
- package/dist/units/gateway-api/index.js +2 -1
- package/dist/units/gateway-api/index.js.map +1 -1
- package/package.json +10 -9
- package/src/cluster.ts +14 -9
- package/src/deployment.ts +34 -0
- package/src/helm.ts +38 -2
- package/src/network-policy.ts +2 -5
- package/src/shared.ts +3 -0
- package/src/stateful-set.ts +34 -0
- package/src/workload.ts +22 -3
- package/dist/chunk-5S4JPM4M.js.map +0 -1
- package/dist/chunk-6L67WIZW.js.map +0 -1
- package/dist/chunk-QLQ3QVGT.js.map +0 -1
- package/dist/chunk-SARVLQZY.js +0 -876
- package/dist/chunk-SARVLQZY.js.map +0 -1
- package/dist/chunk-VL7Z5FJQ.js.map +0 -1
- package/dist/chunk-WEKIQRCZ.js +0 -792
- package/dist/chunk-WEKIQRCZ.js.map +0 -1
- package/dist/chunk-Y3LZSX7I.js.map +0 -1
- package/dist/deployment-QTPBNKO5.js +0 -10
- package/dist/stateful-set-K4GV7ZTK.js +0 -10
- package/src/custom.ts +0 -104
- /package/dist/{deployment-QTPBNKO5.js.map → chunk-PZ5AY32C.js.map} +0 -0
- /package/dist/{stateful-set-K4GV7ZTK.js.map → deployment-KOZNZXJA.js.map} +0 -0
@@ -0,0 +1,1664 @@
|
|
1
|
+
import {
|
2
|
+
commonExtraArgs,
|
3
|
+
getProvider,
|
4
|
+
images_exports,
|
5
|
+
mapMetadata,
|
6
|
+
mapNamespaceLikeToNamespaceName,
|
7
|
+
mapNamespaceNameToSelector,
|
8
|
+
mapSelectorLikeToSelector,
|
9
|
+
resourceIdToString,
|
10
|
+
withPatchName
|
11
|
+
} from "./chunk-YUMBUWA4.js";
|
12
|
+
|
13
|
+
// src/service.ts
|
14
|
+
import { core } from "@pulumi/kubernetes";
|
15
|
+
import {
|
16
|
+
ComponentResource,
|
17
|
+
normalize,
|
18
|
+
output
|
19
|
+
} from "@highstate/pulumi";
|
20
|
+
import { omit, uniqueBy } from "remeda";
|
21
|
+
import { deepmerge } from "deepmerge-ts";
|
22
|
+
import { filterEndpoints, l4EndpointToString, parseL3Endpoint } from "@highstate/common";
|
23
|
+
var serviceExtraArgs = [...commonExtraArgs, "port", "ports", "external"];
|
24
|
+
function hasServiceMetadata(endpoint) {
|
25
|
+
return endpoint.metadata?.k8sService !== void 0;
|
26
|
+
}
|
27
|
+
function getServiceMetadata(endpoint) {
|
28
|
+
return endpoint.metadata?.k8sService;
|
29
|
+
}
|
30
|
+
function withServiceMetadata(endpoint, metadata) {
|
31
|
+
return {
|
32
|
+
...endpoint,
|
33
|
+
metadata: {
|
34
|
+
...endpoint.metadata,
|
35
|
+
k8sService: metadata
|
36
|
+
}
|
37
|
+
};
|
38
|
+
}
|
39
|
+
function isFromCluster(endpoint, cluster) {
|
40
|
+
return getServiceMetadata(endpoint)?.clusterId === cluster.id;
|
41
|
+
}
|
42
|
+
var Service = class extends ComponentResource {
|
43
|
+
constructor(type, name, args, opts, cluster, metadata, spec, status) {
|
44
|
+
super(type, name, args, opts);
|
45
|
+
this.cluster = cluster;
|
46
|
+
this.metadata = metadata;
|
47
|
+
this.spec = spec;
|
48
|
+
this.status = status;
|
49
|
+
}
|
50
|
+
/**
|
51
|
+
* The Highstate service entity.
|
52
|
+
*/
|
53
|
+
get entity() {
|
54
|
+
return output({
|
55
|
+
type: "k8s.service",
|
56
|
+
clusterId: this.cluster.id,
|
57
|
+
metadata: this.metadata,
|
58
|
+
endpoints: this.endpoints
|
59
|
+
});
|
60
|
+
}
|
61
|
+
static create(name, args, opts) {
|
62
|
+
return new CreatedService(name, args, opts);
|
63
|
+
}
|
64
|
+
static wrap(name, service, cluster, opts) {
|
65
|
+
return new WrappedService(name, service, cluster, opts);
|
66
|
+
}
|
67
|
+
static get(name, id, cluster, opts) {
|
68
|
+
return new ExternalService(name, id, cluster, opts);
|
69
|
+
}
|
70
|
+
static of(name, entity, cluster, opts) {
|
71
|
+
return new ExternalService(
|
72
|
+
name,
|
73
|
+
output(entity).metadata,
|
74
|
+
output({ cluster, entity }).apply(({ cluster: cluster2, entity: entity2 }) => {
|
75
|
+
if (cluster2.id !== entity2.clusterId) {
|
76
|
+
throw new Error(
|
77
|
+
`Cluster mismatch when wrapping service "${name}": "${cluster2.id}" != "${entity2.clusterId}"`
|
78
|
+
);
|
79
|
+
}
|
80
|
+
return cluster2;
|
81
|
+
}),
|
82
|
+
opts
|
83
|
+
);
|
84
|
+
}
|
85
|
+
/**
|
86
|
+
* Returns the endpoints of the service applying the given filter.
|
87
|
+
*
|
88
|
+
* If no filter is specified, the default behavior of `filterEndpoints` is used.
|
89
|
+
*
|
90
|
+
* @param filter If specified, the endpoints are filtered based on the given filter.
|
91
|
+
* @returns The endpoints of the service.
|
92
|
+
*/
|
93
|
+
filterEndpoints(filter2) {
|
94
|
+
return output({ endpoints: this.endpoints }).apply(({ endpoints }) => {
|
95
|
+
return filterEndpoints(endpoints, filter2);
|
96
|
+
});
|
97
|
+
}
|
98
|
+
/**
|
99
|
+
* Returns the endpoints of the service including both internal and external endpoints.
|
100
|
+
*/
|
101
|
+
get endpoints() {
|
102
|
+
return output({
|
103
|
+
cluster: this.cluster,
|
104
|
+
metadata: this.metadata,
|
105
|
+
spec: this.spec,
|
106
|
+
status: this.status
|
107
|
+
}).apply(({ cluster, metadata, spec, status }) => {
|
108
|
+
const endpointMetadata = {
|
109
|
+
k8sService: {
|
110
|
+
clusterId: cluster.id,
|
111
|
+
name: metadata.name,
|
112
|
+
namespace: metadata.namespace,
|
113
|
+
selector: spec.selector,
|
114
|
+
targetPort: spec.ports[0].targetPort ?? spec.ports[0].port
|
115
|
+
}
|
116
|
+
};
|
117
|
+
const clusterIpEndpoints = spec.clusterIPs?.map((ip) => ({
|
118
|
+
...parseL3Endpoint(ip),
|
119
|
+
visibility: "internal",
|
120
|
+
port: spec.ports[0].port,
|
121
|
+
protocol: spec.ports[0].protocol?.toLowerCase(),
|
122
|
+
metadata: endpointMetadata
|
123
|
+
}));
|
124
|
+
if (clusterIpEndpoints.length > 0) {
|
125
|
+
clusterIpEndpoints.unshift({
|
126
|
+
type: "hostname",
|
127
|
+
visibility: "internal",
|
128
|
+
hostname: `${metadata.name}.${metadata.namespace}.svc.cluster.local`,
|
129
|
+
port: spec.ports[0].port,
|
130
|
+
protocol: spec.ports[0].protocol?.toLowerCase(),
|
131
|
+
metadata: endpointMetadata
|
132
|
+
});
|
133
|
+
}
|
134
|
+
const nodePortEndpoints = spec.type === "NodePort" ? cluster.endpoints.map((endpoint) => ({
|
135
|
+
...endpoint,
|
136
|
+
port: spec.ports[0].nodePort,
|
137
|
+
protocol: spec.ports[0].protocol?.toLowerCase(),
|
138
|
+
metadata: endpointMetadata
|
139
|
+
})) : [];
|
140
|
+
const loadBalancerEndpoints = spec.type === "LoadBalancer" ? status.loadBalancer?.ingress?.map((endpoint) => ({
|
141
|
+
...parseL3Endpoint(endpoint.ip ?? endpoint.hostname),
|
142
|
+
port: spec.ports[0].port,
|
143
|
+
protocol: spec.ports[0].protocol?.toLowerCase(),
|
144
|
+
metadata: endpointMetadata
|
145
|
+
})) : [];
|
146
|
+
return uniqueBy(
|
147
|
+
[
|
148
|
+
...clusterIpEndpoints ?? [],
|
149
|
+
...loadBalancerEndpoints ?? [],
|
150
|
+
...nodePortEndpoints ?? []
|
151
|
+
],
|
152
|
+
(endpoint) => l4EndpointToString(endpoint)
|
153
|
+
);
|
154
|
+
});
|
155
|
+
}
|
156
|
+
};
|
157
|
+
var CreatedService = class extends Service {
|
158
|
+
constructor(name, args, opts) {
|
159
|
+
const service = output(args).apply((args2) => {
|
160
|
+
return new core.v1.Service(
|
161
|
+
name,
|
162
|
+
{
|
163
|
+
metadata: mapMetadata(args2, name),
|
164
|
+
spec: deepmerge(
|
165
|
+
{
|
166
|
+
ports: normalize(args2.port, args2.ports),
|
167
|
+
externalIPs: args2.external ? args2.externalIPs ?? args2.cluster.externalIps : args2.cluster.externalIps,
|
168
|
+
type: getServiceType(args2, args2.cluster)
|
169
|
+
},
|
170
|
+
omit(args2, serviceExtraArgs)
|
171
|
+
)
|
172
|
+
},
|
173
|
+
{ parent: this, ...opts }
|
174
|
+
);
|
175
|
+
});
|
176
|
+
super(
|
177
|
+
"highstate:k8s:Service",
|
178
|
+
name,
|
179
|
+
args,
|
180
|
+
opts,
|
181
|
+
output(args.cluster),
|
182
|
+
service.metadata,
|
183
|
+
service.spec,
|
184
|
+
service.status
|
185
|
+
);
|
186
|
+
}
|
187
|
+
};
|
188
|
+
var WrappedService = class extends Service {
|
189
|
+
constructor(name, service, cluster, opts) {
|
190
|
+
super(
|
191
|
+
"highstate:k8s:WrappedService",
|
192
|
+
name,
|
193
|
+
{ service, clusterInfo: cluster },
|
194
|
+
opts,
|
195
|
+
output(cluster),
|
196
|
+
output(service).metadata,
|
197
|
+
output(service).spec,
|
198
|
+
output(service).status
|
199
|
+
);
|
200
|
+
}
|
201
|
+
};
|
202
|
+
var ExternalService = class extends Service {
|
203
|
+
constructor(name, id, cluster, opts) {
|
204
|
+
const service = output(id).apply((id2) => {
|
205
|
+
return core.v1.Service.get(
|
206
|
+
//
|
207
|
+
name,
|
208
|
+
resourceIdToString(id2),
|
209
|
+
{ ...opts, parent: this }
|
210
|
+
);
|
211
|
+
});
|
212
|
+
super(
|
213
|
+
"highstate:k8s:ExternalService",
|
214
|
+
name,
|
215
|
+
{ id, cluster },
|
216
|
+
opts,
|
217
|
+
output(cluster),
|
218
|
+
service.metadata,
|
219
|
+
service.spec,
|
220
|
+
service.status
|
221
|
+
);
|
222
|
+
}
|
223
|
+
};
|
224
|
+
function mapContainerPortToServicePort(port) {
|
225
|
+
return {
|
226
|
+
name: port.name,
|
227
|
+
port: port.containerPort,
|
228
|
+
targetPort: port.containerPort,
|
229
|
+
protocol: port.protocol
|
230
|
+
};
|
231
|
+
}
|
232
|
+
function mapServiceToLabelSelector(service) {
|
233
|
+
return {
|
234
|
+
matchLabels: service.spec.selector
|
235
|
+
};
|
236
|
+
}
|
237
|
+
function getServiceType(service, cluster) {
|
238
|
+
if (service?.type) {
|
239
|
+
return service.type;
|
240
|
+
}
|
241
|
+
if (!service?.external) {
|
242
|
+
return "ClusterIP";
|
243
|
+
}
|
244
|
+
return cluster.quirks?.externalServiceType === "LoadBalancer" ? "LoadBalancer" : "NodePort";
|
245
|
+
}
|
246
|
+
|
247
|
+
// src/gateway/http-route.ts
|
248
|
+
import {
|
249
|
+
ComponentResource as ComponentResource2,
|
250
|
+
normalize as normalize2,
|
251
|
+
output as output3
|
252
|
+
} from "@highstate/pulumi";
|
253
|
+
import { gateway } from "@highstate/gateway-api";
|
254
|
+
import { map, pipe } from "remeda";
|
255
|
+
|
256
|
+
// src/gateway/backend.ts
|
257
|
+
import "@pulumi/kubernetes";
|
258
|
+
import { output as output2 } from "@highstate/pulumi";
|
259
|
+
function resolveBackendRef(ref) {
|
260
|
+
if (Service.isInstance(ref)) {
|
261
|
+
return output2({
|
262
|
+
name: ref.metadata.name,
|
263
|
+
namespace: ref.metadata.namespace,
|
264
|
+
port: ref.spec.ports[0].port
|
265
|
+
});
|
266
|
+
}
|
267
|
+
if ("service" in ref) {
|
268
|
+
const service = output2(ref.service);
|
269
|
+
return output2({
|
270
|
+
name: service.metadata.name,
|
271
|
+
namespace: service.metadata.namespace,
|
272
|
+
port: ref.port
|
273
|
+
});
|
274
|
+
}
|
275
|
+
return output2({
|
276
|
+
name: ref.name,
|
277
|
+
namespace: ref.namespace,
|
278
|
+
port: ref.port
|
279
|
+
});
|
280
|
+
}
|
281
|
+
|
282
|
+
// src/gateway/http-route.ts
|
283
|
+
var HttpRoute = class extends ComponentResource2 {
|
284
|
+
/**
|
285
|
+
* The underlying Kubernetes resource.
|
286
|
+
*/
|
287
|
+
route;
|
288
|
+
constructor(name, args, opts) {
|
289
|
+
super("highstate:k8s:HttpRoute", name, args, opts);
|
290
|
+
this.route = output3({
|
291
|
+
args,
|
292
|
+
gatewayNamespace: output3(args.gateway).metadata.namespace
|
293
|
+
}).apply(async ({ args: args2, gatewayNamespace }) => {
|
294
|
+
return new gateway.v1.HTTPRoute(
|
295
|
+
name,
|
296
|
+
{
|
297
|
+
metadata: mapMetadata(
|
298
|
+
{
|
299
|
+
...args2,
|
300
|
+
namespace: gatewayNamespace
|
301
|
+
},
|
302
|
+
name
|
303
|
+
),
|
304
|
+
spec: {
|
305
|
+
hostnames: normalize2(args2.hostname, args2.hostnames),
|
306
|
+
parentRefs: [
|
307
|
+
{
|
308
|
+
name: args2.gateway.metadata.name
|
309
|
+
}
|
310
|
+
],
|
311
|
+
rules: normalize2(args2.rule, args2.rules).map((rule) => ({
|
312
|
+
timeouts: rule.timeouts,
|
313
|
+
matches: pipe(
|
314
|
+
normalize2(rule.match, rule.matches),
|
315
|
+
map(mapHttpRouteRuleMatch),
|
316
|
+
addDefaultPathMatch
|
317
|
+
),
|
318
|
+
filters: normalize2(rule.filter, rule.filters),
|
319
|
+
backendRefs: rule.backend ? [resolveBackendRef(rule.backend)] : void 0
|
320
|
+
}))
|
321
|
+
}
|
322
|
+
},
|
323
|
+
{
|
324
|
+
...opts,
|
325
|
+
parent: this,
|
326
|
+
provider: await getProvider(args2.cluster)
|
327
|
+
}
|
328
|
+
);
|
329
|
+
});
|
330
|
+
}
|
331
|
+
};
|
332
|
+
function addDefaultPathMatch(matches) {
|
333
|
+
return matches.length ? matches : [{ path: { type: "PathPrefix", value: "/" } }];
|
334
|
+
}
|
335
|
+
function mapHttpRouteRuleMatch(match) {
|
336
|
+
if (typeof match === "string") {
|
337
|
+
return { path: { type: "PathPrefix", value: match } };
|
338
|
+
}
|
339
|
+
return match;
|
340
|
+
}
|
341
|
+
|
342
|
+
// src/pvc.ts
|
343
|
+
import { core as core3 } from "@pulumi/kubernetes";
|
344
|
+
import {
|
345
|
+
ComponentResource as ComponentResource3,
|
346
|
+
output as output4
|
347
|
+
} from "@highstate/pulumi";
|
348
|
+
import { deepmerge as deepmerge2 } from "deepmerge-ts";
|
349
|
+
import { omit as omit2 } from "remeda";
|
350
|
+
var extraPersistentVolumeClaimArgs = [...commonExtraArgs, "size"];
|
351
|
+
var PersistentVolumeClaim = class extends ComponentResource3 {
|
352
|
+
constructor(type, name, args, opts, cluster, metadata, spec, status) {
|
353
|
+
super(type, name, args, opts);
|
354
|
+
this.cluster = cluster;
|
355
|
+
this.metadata = metadata;
|
356
|
+
this.spec = spec;
|
357
|
+
this.status = status;
|
358
|
+
}
|
359
|
+
/**
|
360
|
+
* The Highstate PVC entity.
|
361
|
+
*/
|
362
|
+
get entity() {
|
363
|
+
return output4({
|
364
|
+
type: "k8s.persistent-volume-claim",
|
365
|
+
clusterId: this.cluster.id,
|
366
|
+
metadata: this.metadata
|
367
|
+
});
|
368
|
+
}
|
369
|
+
static create(name, args, opts) {
|
370
|
+
return new CreatedPersistentVolumeClaim(name, args, opts);
|
371
|
+
}
|
372
|
+
static of(name, entity, cluster, opts) {
|
373
|
+
return new ExternalPersistentVolumeClaim(name, output4(entity).metadata, cluster, opts);
|
374
|
+
}
|
375
|
+
static createOrGet(name, args, opts) {
|
376
|
+
if (!args.existing) {
|
377
|
+
return new CreatedPersistentVolumeClaim(name, args, opts);
|
378
|
+
}
|
379
|
+
return new ExternalPersistentVolumeClaim(
|
380
|
+
name,
|
381
|
+
output4(args.existing).metadata,
|
382
|
+
args.cluster,
|
383
|
+
opts
|
384
|
+
);
|
385
|
+
}
|
386
|
+
};
|
387
|
+
var CreatedPersistentVolumeClaim = class extends PersistentVolumeClaim {
|
388
|
+
constructor(name, args, opts) {
|
389
|
+
const pvc = output4(args).apply(async (args2) => {
|
390
|
+
return new core3.v1.PersistentVolumeClaim(
|
391
|
+
name,
|
392
|
+
{
|
393
|
+
metadata: mapMetadata(args2, name),
|
394
|
+
spec: deepmerge2(
|
395
|
+
{
|
396
|
+
accessModes: ["ReadWriteOnce"],
|
397
|
+
resources: {
|
398
|
+
requests: {
|
399
|
+
storage: args2.size ?? "100Mi"
|
400
|
+
}
|
401
|
+
}
|
402
|
+
},
|
403
|
+
omit2(args2, extraPersistentVolumeClaimArgs)
|
404
|
+
)
|
405
|
+
},
|
406
|
+
{
|
407
|
+
...opts,
|
408
|
+
parent: this,
|
409
|
+
provider: await getProvider(args2.cluster)
|
410
|
+
}
|
411
|
+
);
|
412
|
+
});
|
413
|
+
super(
|
414
|
+
"k8s:PersistentVolumeClaim",
|
415
|
+
name,
|
416
|
+
args,
|
417
|
+
opts,
|
418
|
+
output4(args.cluster),
|
419
|
+
pvc.metadata,
|
420
|
+
pvc.spec,
|
421
|
+
pvc.status
|
422
|
+
);
|
423
|
+
}
|
424
|
+
};
|
425
|
+
var ExternalPersistentVolumeClaim = class extends PersistentVolumeClaim {
|
426
|
+
constructor(name, id, cluster, opts) {
|
427
|
+
const pvc = output4(id).apply(async (id2) => {
|
428
|
+
return core3.v1.PersistentVolumeClaim.get(
|
429
|
+
//
|
430
|
+
name,
|
431
|
+
resourceIdToString(id2),
|
432
|
+
{
|
433
|
+
...opts,
|
434
|
+
parent: this,
|
435
|
+
provider: await getProvider(cluster)
|
436
|
+
}
|
437
|
+
);
|
438
|
+
});
|
439
|
+
super(
|
440
|
+
"highstate:k8s:ExternalPersistentVolumeClaim",
|
441
|
+
name,
|
442
|
+
{ id, cluster },
|
443
|
+
opts,
|
444
|
+
output4(cluster),
|
445
|
+
pvc.metadata,
|
446
|
+
pvc.spec,
|
447
|
+
pvc.status
|
448
|
+
);
|
449
|
+
}
|
450
|
+
};
|
451
|
+
|
452
|
+
// src/secret.ts
|
453
|
+
import { core as core4 } from "@pulumi/kubernetes";
|
454
|
+
import {
|
455
|
+
ComponentResource as ComponentResource4,
|
456
|
+
output as output5
|
457
|
+
} from "@pulumi/pulumi";
|
458
|
+
var Secret = class extends ComponentResource4 {
|
459
|
+
constructor(type, name, args, opts, cluster, metadata, data, stringData) {
|
460
|
+
super(type, name, args, opts);
|
461
|
+
this.cluster = cluster;
|
462
|
+
this.metadata = metadata;
|
463
|
+
this.data = data;
|
464
|
+
this.stringData = stringData;
|
465
|
+
}
|
466
|
+
/**
|
467
|
+
* Creates a new secret.
|
468
|
+
*/
|
469
|
+
static create(name, args, opts) {
|
470
|
+
return new CreatedSecret(name, args, opts);
|
471
|
+
}
|
472
|
+
/**
|
473
|
+
* Creates a new secret or patches an existing one.
|
474
|
+
*
|
475
|
+
* Will throw an error if the secret does not exist when `args.resource` is provided.
|
476
|
+
*/
|
477
|
+
static createOrPatch(name, args, opts) {
|
478
|
+
if (!args.existing) {
|
479
|
+
return new CreatedSecret(name, args, opts);
|
480
|
+
}
|
481
|
+
return new SecretPatch(
|
482
|
+
name,
|
483
|
+
{
|
484
|
+
...args,
|
485
|
+
name: withPatchName("secret", args.existing, args.cluster),
|
486
|
+
namespace: output5(args.existing).metadata.namespace
|
487
|
+
},
|
488
|
+
opts
|
489
|
+
);
|
490
|
+
}
|
491
|
+
/**
|
492
|
+
* Gets an existing secret.
|
493
|
+
*
|
494
|
+
* Will throw an error if the secret does not exist.
|
495
|
+
*/
|
496
|
+
static get(name, id, cluster, opts) {
|
497
|
+
return new ExternalSecret(name, id, cluster, opts);
|
498
|
+
}
|
499
|
+
};
|
500
|
+
var CreatedSecret = class extends Secret {
|
501
|
+
constructor(name, args, opts) {
|
502
|
+
const secret = output5(args).apply(async (args2) => {
|
503
|
+
return new core4.v1.Secret(
|
504
|
+
name,
|
505
|
+
{
|
506
|
+
metadata: mapMetadata(args2, name),
|
507
|
+
data: args2.data,
|
508
|
+
stringData: args2.stringData,
|
509
|
+
type: args2.type,
|
510
|
+
immutable: args2.immutable
|
511
|
+
},
|
512
|
+
{
|
513
|
+
...opts,
|
514
|
+
parent: this,
|
515
|
+
provider: await getProvider(args2.cluster)
|
516
|
+
}
|
517
|
+
);
|
518
|
+
});
|
519
|
+
super(
|
520
|
+
"highstate:k8s:Secret",
|
521
|
+
name,
|
522
|
+
args,
|
523
|
+
opts,
|
524
|
+
output5(args.cluster),
|
525
|
+
secret.metadata,
|
526
|
+
secret.data,
|
527
|
+
secret.stringData
|
528
|
+
);
|
529
|
+
}
|
530
|
+
};
|
531
|
+
var SecretPatch = class extends Secret {
|
532
|
+
constructor(name, args, opts) {
|
533
|
+
const secret = output5(args).apply(async (args2) => {
|
534
|
+
return new core4.v1.SecretPatch(
|
535
|
+
name,
|
536
|
+
{
|
537
|
+
metadata: mapMetadata(args2, name),
|
538
|
+
data: args2.data,
|
539
|
+
stringData: args2.stringData,
|
540
|
+
type: args2.type,
|
541
|
+
immutable: args2.immutable
|
542
|
+
},
|
543
|
+
{
|
544
|
+
...opts,
|
545
|
+
parent: this,
|
546
|
+
provider: await getProvider(args2.cluster)
|
547
|
+
}
|
548
|
+
);
|
549
|
+
});
|
550
|
+
super(
|
551
|
+
"highstate:k8s:SecretPatch",
|
552
|
+
name,
|
553
|
+
args,
|
554
|
+
opts,
|
555
|
+
output5(args.cluster),
|
556
|
+
secret.metadata,
|
557
|
+
secret.data,
|
558
|
+
secret.stringData
|
559
|
+
);
|
560
|
+
}
|
561
|
+
};
|
562
|
+
var ExternalSecret = class extends Secret {
|
563
|
+
constructor(name, id, cluster, opts) {
|
564
|
+
const secret = output5(id).apply(async (realName) => {
|
565
|
+
return core4.v1.Secret.get(
|
566
|
+
//
|
567
|
+
name,
|
568
|
+
realName,
|
569
|
+
{
|
570
|
+
...opts,
|
571
|
+
parent: this,
|
572
|
+
provider: await getProvider(cluster)
|
573
|
+
}
|
574
|
+
);
|
575
|
+
});
|
576
|
+
super(
|
577
|
+
"highstate:k8s:ExternalSecret",
|
578
|
+
name,
|
579
|
+
{ id, cluster },
|
580
|
+
opts,
|
581
|
+
output5(cluster),
|
582
|
+
secret.metadata,
|
583
|
+
secret.data,
|
584
|
+
secret.stringData
|
585
|
+
);
|
586
|
+
}
|
587
|
+
};
|
588
|
+
|
589
|
+
// src/config-map.ts
|
590
|
+
import { core as core5 } from "@pulumi/kubernetes";
|
591
|
+
import {
|
592
|
+
ComponentResource as ComponentResource5,
|
593
|
+
output as output6
|
594
|
+
} from "@pulumi/pulumi";
|
595
|
+
var ConfigMap = class extends ComponentResource5 {
|
596
|
+
constructor(type, name, args, opts, cluster, metadata, data) {
|
597
|
+
super(type, name, args, opts);
|
598
|
+
this.cluster = cluster;
|
599
|
+
this.metadata = metadata;
|
600
|
+
this.data = data;
|
601
|
+
}
|
602
|
+
/**
|
603
|
+
* Creates a new config map.
|
604
|
+
*/
|
605
|
+
static create(name, args, opts) {
|
606
|
+
return new CreatedConfigMap(name, args, opts);
|
607
|
+
}
|
608
|
+
/**
|
609
|
+
* Creates a new config map or patches an existing one.
|
610
|
+
*
|
611
|
+
* Will throw an error if the config map does not exist when `args.resource` is provided.
|
612
|
+
*/
|
613
|
+
static createOrPatch(name, args, opts) {
|
614
|
+
if (!args.existing) {
|
615
|
+
return new CreatedConfigMap(name, args, opts);
|
616
|
+
}
|
617
|
+
return new ConfigMapPatch(
|
618
|
+
name,
|
619
|
+
{
|
620
|
+
...args,
|
621
|
+
name: withPatchName("configmap", args.existing, args.cluster),
|
622
|
+
namespace: output6(args.existing).metadata.namespace
|
623
|
+
},
|
624
|
+
opts
|
625
|
+
);
|
626
|
+
}
|
627
|
+
/**
|
628
|
+
* Gets an existing config map.
|
629
|
+
*
|
630
|
+
* Will throw an error if the config map does not exist.
|
631
|
+
*/
|
632
|
+
static get(name, id, cluster, opts) {
|
633
|
+
return new ExternalConfigMap(name, id, cluster, opts);
|
634
|
+
}
|
635
|
+
};
|
636
|
+
var CreatedConfigMap = class extends ConfigMap {
|
637
|
+
constructor(name, args, opts) {
|
638
|
+
const configMap = output6(args).apply(async (args2) => {
|
639
|
+
return new core5.v1.ConfigMap(
|
640
|
+
name,
|
641
|
+
{
|
642
|
+
metadata: mapMetadata(args2, name),
|
643
|
+
data: args2.data
|
644
|
+
},
|
645
|
+
{
|
646
|
+
...opts,
|
647
|
+
parent: this,
|
648
|
+
provider: await getProvider(args2.cluster)
|
649
|
+
}
|
650
|
+
);
|
651
|
+
});
|
652
|
+
super(
|
653
|
+
"highstate:k8s:ConfigMap",
|
654
|
+
name,
|
655
|
+
args,
|
656
|
+
opts,
|
657
|
+
output6(args.cluster),
|
658
|
+
configMap.metadata,
|
659
|
+
configMap.data
|
660
|
+
);
|
661
|
+
}
|
662
|
+
};
|
663
|
+
var ConfigMapPatch = class extends ConfigMap {
|
664
|
+
constructor(name, args, opts) {
|
665
|
+
const configMap = output6(args).apply(async (args2) => {
|
666
|
+
return new core5.v1.ConfigMapPatch(
|
667
|
+
name,
|
668
|
+
{
|
669
|
+
metadata: mapMetadata(args2, name),
|
670
|
+
data: args2.data
|
671
|
+
},
|
672
|
+
{
|
673
|
+
...opts,
|
674
|
+
parent: this,
|
675
|
+
provider: await getProvider(args2.cluster)
|
676
|
+
}
|
677
|
+
);
|
678
|
+
});
|
679
|
+
super(
|
680
|
+
"highstate:k8s:ConfigMapPatch",
|
681
|
+
name,
|
682
|
+
args,
|
683
|
+
opts,
|
684
|
+
output6(args.cluster),
|
685
|
+
configMap.metadata,
|
686
|
+
configMap.data
|
687
|
+
);
|
688
|
+
}
|
689
|
+
};
|
690
|
+
var ExternalConfigMap = class extends ConfigMap {
|
691
|
+
constructor(name, id, cluster, opts) {
|
692
|
+
const configMap = output6(id).apply(async (realName) => {
|
693
|
+
return core5.v1.ConfigMap.get(name, realName, {
|
694
|
+
...opts,
|
695
|
+
parent: this,
|
696
|
+
provider: await getProvider(cluster)
|
697
|
+
});
|
698
|
+
});
|
699
|
+
super(
|
700
|
+
"highstate:k8s:ExternalConfigMap",
|
701
|
+
name,
|
702
|
+
{ id, cluster },
|
703
|
+
opts,
|
704
|
+
output6(cluster),
|
705
|
+
configMap.metadata,
|
706
|
+
configMap.data
|
707
|
+
);
|
708
|
+
}
|
709
|
+
};
|
710
|
+
|
711
|
+
// src/container.ts
|
712
|
+
import { core as core6 } from "@pulumi/kubernetes";
|
713
|
+
import {
|
714
|
+
normalize as normalize3,
|
715
|
+
output as output7
|
716
|
+
} from "@highstate/pulumi";
|
717
|
+
import { concat, map as map2, omit as omit3 } from "remeda";
|
718
|
+
var containerExtraArgs = [
|
719
|
+
"port",
|
720
|
+
"volumeMount",
|
721
|
+
"volume",
|
722
|
+
"environment",
|
723
|
+
"environmentSource",
|
724
|
+
"environmentSources"
|
725
|
+
];
|
726
|
+
function mapContainerToRaw(container, cluster, fallbackName) {
|
727
|
+
const containerName = container.name ?? fallbackName;
|
728
|
+
const spec = {
|
729
|
+
...omit3(container, containerExtraArgs),
|
730
|
+
name: containerName,
|
731
|
+
ports: normalize3(container.port, container.ports),
|
732
|
+
volumeMounts: map2(normalize3(container.volumeMount, container.volumeMounts), mapVolumeMount),
|
733
|
+
env: concat(
|
734
|
+
container.environment ? mapContainerEnvironment(container.environment) : [],
|
735
|
+
container.env ?? []
|
736
|
+
),
|
737
|
+
envFrom: concat(
|
738
|
+
map2(
|
739
|
+
normalize3(container.environmentSource, container.environmentSources),
|
740
|
+
mapEnvironmentSource
|
741
|
+
),
|
742
|
+
container.envFrom ?? []
|
743
|
+
)
|
744
|
+
};
|
745
|
+
if (container.enableTun) {
|
746
|
+
spec.securityContext ??= {};
|
747
|
+
spec.securityContext.capabilities ??= {};
|
748
|
+
spec.securityContext.capabilities.add = ["NET_ADMIN"];
|
749
|
+
if (cluster.quirks?.tunDevicePolicy?.type === "plugin") {
|
750
|
+
spec.resources ??= {};
|
751
|
+
spec.resources.limits ??= {};
|
752
|
+
spec.resources.limits[cluster.quirks.tunDevicePolicy.resourceName] = cluster.quirks.tunDevicePolicy.resourceValue;
|
753
|
+
} else {
|
754
|
+
spec.volumeMounts ??= [];
|
755
|
+
spec.volumeMounts.push({
|
756
|
+
name: "tun-device",
|
757
|
+
mountPath: "/dev/net/tun",
|
758
|
+
readOnly: false
|
759
|
+
});
|
760
|
+
}
|
761
|
+
}
|
762
|
+
return spec;
|
763
|
+
}
|
764
|
+
function mapContainerEnvironment(environment) {
|
765
|
+
const envVars = [];
|
766
|
+
for (const [name, value] of Object.entries(environment)) {
|
767
|
+
if (!value) {
|
768
|
+
continue;
|
769
|
+
}
|
770
|
+
if (typeof value === "string") {
|
771
|
+
envVars.push({ name, value });
|
772
|
+
continue;
|
773
|
+
}
|
774
|
+
if ("secret" in value) {
|
775
|
+
envVars.push({
|
776
|
+
name,
|
777
|
+
valueFrom: {
|
778
|
+
secretKeyRef: {
|
779
|
+
name: value.secret.metadata.name,
|
780
|
+
key: value.key
|
781
|
+
}
|
782
|
+
}
|
783
|
+
});
|
784
|
+
continue;
|
785
|
+
}
|
786
|
+
if ("configMap" in value) {
|
787
|
+
envVars.push({
|
788
|
+
name,
|
789
|
+
valueFrom: {
|
790
|
+
configMapKeyRef: {
|
791
|
+
name: value.configMap.metadata.name,
|
792
|
+
key: value.key
|
793
|
+
}
|
794
|
+
}
|
795
|
+
});
|
796
|
+
continue;
|
797
|
+
}
|
798
|
+
envVars.push({ name, valueFrom: value });
|
799
|
+
}
|
800
|
+
return envVars;
|
801
|
+
}
|
802
|
+
function mapVolumeMount(volumeMount) {
|
803
|
+
if ("volume" in volumeMount) {
|
804
|
+
return omit3(
|
805
|
+
{
|
806
|
+
...volumeMount,
|
807
|
+
name: output7(volumeMount.volume).apply(mapWorkloadVolume).apply((volume) => output7(volume.name))
|
808
|
+
},
|
809
|
+
["volume"]
|
810
|
+
);
|
811
|
+
}
|
812
|
+
return {
|
813
|
+
...volumeMount,
|
814
|
+
name: volumeMount.name
|
815
|
+
};
|
816
|
+
}
|
817
|
+
function mapEnvironmentSource(envFrom) {
|
818
|
+
if (envFrom instanceof core6.v1.ConfigMap) {
|
819
|
+
return {
|
820
|
+
configMapRef: {
|
821
|
+
name: envFrom.metadata.name
|
822
|
+
}
|
823
|
+
};
|
824
|
+
}
|
825
|
+
if (envFrom instanceof core6.v1.Secret) {
|
826
|
+
return {
|
827
|
+
secretRef: {
|
828
|
+
name: envFrom.metadata.name
|
829
|
+
}
|
830
|
+
};
|
831
|
+
}
|
832
|
+
return envFrom;
|
833
|
+
}
|
834
|
+
function mapWorkloadVolume(volume) {
|
835
|
+
if (volume instanceof PersistentVolumeClaim) {
|
836
|
+
return {
|
837
|
+
name: volume.metadata.name,
|
838
|
+
persistentVolumeClaim: {
|
839
|
+
claimName: volume.metadata.name
|
840
|
+
}
|
841
|
+
};
|
842
|
+
}
|
843
|
+
if (volume instanceof Secret) {
|
844
|
+
return {
|
845
|
+
name: volume.metadata.name,
|
846
|
+
secret: {
|
847
|
+
secretName: volume.metadata.name
|
848
|
+
}
|
849
|
+
};
|
850
|
+
}
|
851
|
+
if (volume instanceof ConfigMap) {
|
852
|
+
return {
|
853
|
+
name: volume.metadata.name,
|
854
|
+
configMap: {
|
855
|
+
name: volume.metadata.name
|
856
|
+
}
|
857
|
+
};
|
858
|
+
}
|
859
|
+
if (core6.v1.PersistentVolumeClaim.isInstance(volume)) {
|
860
|
+
return {
|
861
|
+
name: volume.metadata.name,
|
862
|
+
persistentVolumeClaim: {
|
863
|
+
claimName: volume.metadata.name
|
864
|
+
}
|
865
|
+
};
|
866
|
+
}
|
867
|
+
if (core6.v1.ConfigMap.isInstance(volume)) {
|
868
|
+
return {
|
869
|
+
name: volume.metadata.name,
|
870
|
+
configMap: {
|
871
|
+
name: volume.metadata.name
|
872
|
+
}
|
873
|
+
};
|
874
|
+
}
|
875
|
+
if (core6.v1.Secret.isInstance(volume)) {
|
876
|
+
return {
|
877
|
+
name: volume.metadata.name,
|
878
|
+
secret: {
|
879
|
+
secretName: volume.metadata.name
|
880
|
+
}
|
881
|
+
};
|
882
|
+
}
|
883
|
+
return volume;
|
884
|
+
}
|
885
|
+
function getWorkloadVolumeResourceUuid(volume) {
|
886
|
+
if (volume instanceof PersistentVolumeClaim) {
|
887
|
+
return volume.metadata.uid;
|
888
|
+
}
|
889
|
+
if (volume instanceof Secret) {
|
890
|
+
return volume.metadata.uid;
|
891
|
+
}
|
892
|
+
if (volume instanceof ConfigMap) {
|
893
|
+
return volume.metadata.uid;
|
894
|
+
}
|
895
|
+
if (core6.v1.PersistentVolumeClaim.isInstance(volume)) {
|
896
|
+
return volume.metadata.uid;
|
897
|
+
}
|
898
|
+
if (core6.v1.ConfigMap.isInstance(volume)) {
|
899
|
+
return volume.metadata.uid;
|
900
|
+
}
|
901
|
+
if (core6.v1.Secret.isInstance(volume)) {
|
902
|
+
return volume.metadata.uid;
|
903
|
+
}
|
904
|
+
return output7(void 0);
|
905
|
+
}
|
906
|
+
|
907
|
+
// src/network.ts
|
908
|
+
import { filterEndpoints as filterEndpoints2 } from "@highstate/common";
|
909
|
+
function getBestEndpoint(endpoints, cluster) {
|
910
|
+
if (!endpoints.length) {
|
911
|
+
return void 0;
|
912
|
+
}
|
913
|
+
if (endpoints.length === 1) {
|
914
|
+
return endpoints[0];
|
915
|
+
}
|
916
|
+
if (!cluster) {
|
917
|
+
return filterEndpoints2(endpoints)[0];
|
918
|
+
}
|
919
|
+
const clusterEndpoint = endpoints.find((endpoint) => isFromCluster(endpoint, cluster));
|
920
|
+
if (clusterEndpoint) {
|
921
|
+
return clusterEndpoint;
|
922
|
+
}
|
923
|
+
return filterEndpoints2(endpoints)[0];
|
924
|
+
}
|
925
|
+
function requireBestEndpoint(endpoints, cluster) {
|
926
|
+
const endpoint = getBestEndpoint(endpoints, cluster);
|
927
|
+
if (!endpoint) {
|
928
|
+
throw new Error(`No best endpoint found for cluster "${cluster.name}" (${cluster.id})`);
|
929
|
+
}
|
930
|
+
return endpoint;
|
931
|
+
}
|
932
|
+
|
933
|
+
// src/network-policy.ts
|
934
|
+
import { networking } from "@pulumi/kubernetes";
|
935
|
+
import {
|
936
|
+
ComponentResource as ComponentResource6,
|
937
|
+
interpolate,
|
938
|
+
normalize as normalize4,
|
939
|
+
output as output8
|
940
|
+
} from "@highstate/pulumi";
|
941
|
+
import { capitalize, flat, groupBy, merge, mergeDeep, uniqueBy as uniqueBy2 } from "remeda";
|
942
|
+
import "@highstate/library";
|
943
|
+
import {
|
944
|
+
l34EndpointToString,
|
945
|
+
l3EndpointToCidr,
|
946
|
+
parseL34Endpoint
|
947
|
+
} from "@highstate/common";
|
948
|
+
var NetworkPolicy = class _NetworkPolicy extends ComponentResource6 {
|
949
|
+
/**
|
950
|
+
* The underlying network policy resource.
|
951
|
+
*/
|
952
|
+
networkPolicy;
|
953
|
+
constructor(name, args, opts) {
|
954
|
+
super("k8s:network-policy", name, args, opts);
|
955
|
+
const normalizedArgs = output8(args).apply((args2) => {
|
956
|
+
const ingressRules = normalize4(args2.ingressRule, args2.ingressRules);
|
957
|
+
const egressRules = normalize4(args2.egressRule, args2.egressRules);
|
958
|
+
const extraEgressRules = [];
|
959
|
+
if (args2.allowKubeDns) {
|
960
|
+
extraEgressRules.push({
|
961
|
+
namespaces: ["kube-system"],
|
962
|
+
selectors: [{ matchLabels: { "k8s-app": "kube-dns" } }],
|
963
|
+
ports: [{ port: 53, protocol: "UDP" }],
|
964
|
+
all: false,
|
965
|
+
cidrs: [],
|
966
|
+
fqdns: [],
|
967
|
+
services: []
|
968
|
+
});
|
969
|
+
}
|
970
|
+
return {
|
971
|
+
...args2,
|
972
|
+
podSelector: args2.selector ? mapSelectorLikeToSelector(args2.selector) : {},
|
973
|
+
isolateEgress: args2.isolateEgress ?? false,
|
974
|
+
isolateIngress: args2.isolateIngress ?? false,
|
975
|
+
allowKubeApiServer: args2.allowKubeApiServer ?? false,
|
976
|
+
ingressRules: ingressRules.flatMap((rule) => {
|
977
|
+
const endpoints = normalize4(rule?.fromEndpoint, rule?.fromEndpoints);
|
978
|
+
const parsedEndpoints = endpoints.map(parseL34Endpoint);
|
979
|
+
const endpointsNamespaces = groupBy(parsedEndpoints, (endpoint) => {
|
980
|
+
const namespace = isFromCluster(endpoint, args2.cluster) ? endpoint.metadata.k8sService.namespace : "";
|
981
|
+
return namespace;
|
982
|
+
});
|
983
|
+
const l3OnlyRule = endpointsNamespaces[""] ? _NetworkPolicy.getRuleFromEndpoint(void 0, endpointsNamespaces[""], args2.cluster) : void 0;
|
984
|
+
const otherRules = Object.entries(endpointsNamespaces).filter(([key]) => key !== "").map(([, endpoints2]) => {
|
985
|
+
return _NetworkPolicy.getRuleFromEndpoint(void 0, endpoints2, args2.cluster);
|
986
|
+
});
|
987
|
+
return [
|
988
|
+
{
|
989
|
+
all: rule.fromAll ?? false,
|
990
|
+
cidrs: normalize4(rule.fromCidr, rule.fromCidrs).concat(l3OnlyRule?.cidrs ?? []),
|
991
|
+
fqdns: [],
|
992
|
+
services: normalize4(rule.fromService, rule.fromServices),
|
993
|
+
namespaces: normalize4(rule.fromNamespace, rule.fromNamespaces),
|
994
|
+
selectors: normalize4(rule.fromSelector, rule.fromSelectors),
|
995
|
+
ports: normalize4(rule.toPort, rule.toPorts)
|
996
|
+
},
|
997
|
+
...otherRules
|
998
|
+
].filter((rule2) => !_NetworkPolicy.isEmptyRule(rule2));
|
999
|
+
}),
|
1000
|
+
egressRules: egressRules.flatMap((rule) => {
|
1001
|
+
const endpoints = normalize4(rule?.toEndpoint, rule?.toEndpoints);
|
1002
|
+
const parsedEndpoints = endpoints.map(parseL34Endpoint);
|
1003
|
+
const endpointsByPortsAnsNamespaces = groupBy(parsedEndpoints, (endpoint) => {
|
1004
|
+
const namespace = isFromCluster(endpoint, args2.cluster) ? endpoint.metadata.k8sService.namespace : "";
|
1005
|
+
const port = isFromCluster(endpoint, args2.cluster) ? endpoint.metadata.k8sService.targetPort : endpoint.port;
|
1006
|
+
return `${port ?? "0"}:${namespace}`;
|
1007
|
+
});
|
1008
|
+
const l3OnlyRule = endpointsByPortsAnsNamespaces["0:"] ? _NetworkPolicy.getRuleFromEndpoint(
|
1009
|
+
void 0,
|
1010
|
+
endpointsByPortsAnsNamespaces["0:"],
|
1011
|
+
args2.cluster
|
1012
|
+
) : void 0;
|
1013
|
+
const otherRules = Object.entries(endpointsByPortsAnsNamespaces).filter(([key]) => key !== "0:").map(([key, endpoints2]) => {
|
1014
|
+
const [port] = key.split(":");
|
1015
|
+
const portNumber = parseInt(port, 10);
|
1016
|
+
const portValue = isNaN(portNumber) ? port : portNumber;
|
1017
|
+
return _NetworkPolicy.getRuleFromEndpoint(portValue, endpoints2, args2.cluster);
|
1018
|
+
});
|
1019
|
+
return [
|
1020
|
+
{
|
1021
|
+
all: rule.toAll ?? false,
|
1022
|
+
cidrs: normalize4(rule.toCidr, rule.toCidrs).concat(l3OnlyRule?.cidrs ?? []),
|
1023
|
+
fqdns: normalize4(rule.toFqdn, rule.toFqdns).concat(l3OnlyRule?.fqdns ?? []),
|
1024
|
+
services: normalize4(rule.toService, rule.toServices),
|
1025
|
+
namespaces: normalize4(rule.toNamespace, rule.toNamespaces),
|
1026
|
+
selectors: normalize4(rule.toSelector, rule.toSelectors),
|
1027
|
+
ports: normalize4(rule.toPort, rule.toPorts)
|
1028
|
+
},
|
1029
|
+
...otherRules
|
1030
|
+
].filter((rule2) => !_NetworkPolicy.isEmptyRule(rule2));
|
1031
|
+
}).concat(extraEgressRules)
|
1032
|
+
};
|
1033
|
+
});
|
1034
|
+
this.networkPolicy = output8(
|
1035
|
+
normalizedArgs.apply(async (args2) => {
|
1036
|
+
return output8(
|
1037
|
+
this.create(name, args2, {
|
1038
|
+
...opts,
|
1039
|
+
parent: this,
|
1040
|
+
provider: await getProvider(args2.cluster)
|
1041
|
+
})
|
1042
|
+
);
|
1043
|
+
})
|
1044
|
+
);
|
1045
|
+
}
|
1046
|
+
static mapCidrFromEndpoint(result) {
|
1047
|
+
if (result.type === "ipv4") {
|
1048
|
+
return `${result.address}/32`;
|
1049
|
+
}
|
1050
|
+
return `${result.address}/128`;
|
1051
|
+
}
|
1052
|
+
static getRuleFromEndpoint(port, endpoints, cluster) {
|
1053
|
+
const ports = port ? [{ port, protocol: endpoints[0].protocol?.toUpperCase() }] : [];
|
1054
|
+
const cidrs = endpoints.filter((endpoint) => !isFromCluster(endpoint, cluster)).filter((endpoint) => endpoint.type === "ipv4" || endpoint.type === "ipv6").map(_NetworkPolicy.mapCidrFromEndpoint);
|
1055
|
+
const fqdns = endpoints.filter((endpoint) => endpoint.type === "hostname").map((endpoint) => endpoint.hostname);
|
1056
|
+
const selectors = endpoints.filter((endpoint) => isFromCluster(endpoint, cluster)).map((endpoint) => endpoint.metadata.k8sService.selector);
|
1057
|
+
const namespace = endpoints.filter((endpoint) => isFromCluster(endpoint, cluster)).map((endpoint) => getServiceMetadata(endpoint)?.namespace)[0];
|
1058
|
+
return {
|
1059
|
+
all: false,
|
1060
|
+
cidrs,
|
1061
|
+
fqdns,
|
1062
|
+
services: [],
|
1063
|
+
namespaces: namespace ? [namespace] : [],
|
1064
|
+
selectors,
|
1065
|
+
ports
|
1066
|
+
};
|
1067
|
+
}
|
1068
|
+
static isEmptyRule(rule) {
|
1069
|
+
return !rule.all && rule.cidrs.length === 0 && rule.fqdns.length === 0 && rule.services.length === 0 && rule.namespaces.length === 0 && rule.selectors.length === 0 && rule.ports.length === 0;
|
1070
|
+
}
|
1071
|
+
static create(name, args, opts) {
|
1072
|
+
return output8(args).apply(async (args2) => {
|
1073
|
+
const cni = args2.cluster.cni;
|
1074
|
+
if (cni === "other") {
|
1075
|
+
return new NativeNetworkPolicy(name, args2, opts);
|
1076
|
+
}
|
1077
|
+
const implName = `${capitalize(cni)}NetworkPolicy`;
|
1078
|
+
const implModule = await import(`@highstate/${cni}`);
|
1079
|
+
const implClass = implModule[implName];
|
1080
|
+
if (!implClass) {
|
1081
|
+
throw new Error(`No implementation found for ${cni}`);
|
1082
|
+
}
|
1083
|
+
return new implClass(name, args2, opts);
|
1084
|
+
});
|
1085
|
+
}
|
1086
|
+
static isolate(namespace, cluster, opts) {
|
1087
|
+
return _NetworkPolicy.create(
|
1088
|
+
"isolate",
|
1089
|
+
{
|
1090
|
+
namespace,
|
1091
|
+
cluster,
|
1092
|
+
description: "By default, deny all traffic to/from the namespace.",
|
1093
|
+
isolateEgress: true,
|
1094
|
+
isolateIngress: true
|
1095
|
+
},
|
1096
|
+
opts
|
1097
|
+
);
|
1098
|
+
}
|
1099
|
+
static allowInsideNamespace(namespace, cluster, opts) {
|
1100
|
+
return _NetworkPolicy.create(
|
1101
|
+
"allow-inside-namespace",
|
1102
|
+
{
|
1103
|
+
namespace,
|
1104
|
+
cluster,
|
1105
|
+
description: "Allow all traffic inside the namespace.",
|
1106
|
+
selector: {},
|
1107
|
+
ingressRule: { fromNamespace: namespace },
|
1108
|
+
egressRule: { toNamespace: namespace }
|
1109
|
+
},
|
1110
|
+
opts
|
1111
|
+
);
|
1112
|
+
}
|
1113
|
+
static allowKubeApiServer(namespace, cluster, opts) {
|
1114
|
+
return _NetworkPolicy.create(
|
1115
|
+
"allow-kube-api-server",
|
1116
|
+
{
|
1117
|
+
namespace,
|
1118
|
+
cluster,
|
1119
|
+
description: "Allow all traffic to the Kubernetes API server from the namespace.",
|
1120
|
+
allowKubeApiServer: true
|
1121
|
+
},
|
1122
|
+
opts
|
1123
|
+
);
|
1124
|
+
}
|
1125
|
+
static allowKubeDns(namespace, cluster, opts) {
|
1126
|
+
return _NetworkPolicy.create(
|
1127
|
+
"allow-kube-dns",
|
1128
|
+
{
|
1129
|
+
namespace,
|
1130
|
+
cluster,
|
1131
|
+
description: "Allow all traffic to the Kubernetes DNS server from the namespace.",
|
1132
|
+
allowKubeDns: true
|
1133
|
+
},
|
1134
|
+
opts
|
1135
|
+
);
|
1136
|
+
}
|
1137
|
+
static allowAllEgress(namespace, cluster, opts) {
|
1138
|
+
return _NetworkPolicy.create(
|
1139
|
+
"allow-all-egress",
|
1140
|
+
{
|
1141
|
+
namespace,
|
1142
|
+
cluster,
|
1143
|
+
description: "Allow all egress traffic from the namespace.",
|
1144
|
+
egressRule: { toAll: true }
|
1145
|
+
},
|
1146
|
+
opts
|
1147
|
+
);
|
1148
|
+
}
|
1149
|
+
static allowAllIngress(namespace, cluster, opts) {
|
1150
|
+
return _NetworkPolicy.create(
|
1151
|
+
"allow-all-ingress",
|
1152
|
+
{
|
1153
|
+
namespace,
|
1154
|
+
cluster,
|
1155
|
+
description: "Allow all ingress traffic to the namespace.",
|
1156
|
+
ingressRule: { fromAll: true }
|
1157
|
+
},
|
1158
|
+
opts
|
1159
|
+
);
|
1160
|
+
}
|
1161
|
+
static allowEgressToEndpoint(endpoint, namespace, cluster, opts) {
|
1162
|
+
const parsedEndpoint = parseL34Endpoint(endpoint);
|
1163
|
+
return _NetworkPolicy.create(
|
1164
|
+
`allow-egress-to-${l34EndpointToString(parsedEndpoint).replace(":", "-")}`,
|
1165
|
+
{
|
1166
|
+
namespace,
|
1167
|
+
cluster,
|
1168
|
+
description: interpolate`Allow egress traffic to "${l34EndpointToString(parsedEndpoint)}" from the namespace.`,
|
1169
|
+
egressRule: { toEndpoint: endpoint }
|
1170
|
+
},
|
1171
|
+
opts
|
1172
|
+
);
|
1173
|
+
}
|
1174
|
+
static allowEgressToBestEndpoint(endpoints, namespace, cluster, opts) {
|
1175
|
+
return output8({ endpoints, cluster }).apply(({ endpoints: endpoints2, cluster: cluster2 }) => {
|
1176
|
+
const bestEndpoint = requireBestEndpoint(endpoints2.map(parseL34Endpoint), cluster2);
|
1177
|
+
return _NetworkPolicy.allowEgressToEndpoint(bestEndpoint, namespace, cluster2, opts);
|
1178
|
+
});
|
1179
|
+
}
|
1180
|
+
static allowIngressFromEndpoint(endpoint, namespace, cluster, opts) {
|
1181
|
+
const parsedEndpoint = parseL34Endpoint(endpoint);
|
1182
|
+
return _NetworkPolicy.create(
|
1183
|
+
`allow-ingress-from-${l34EndpointToString(parsedEndpoint)}`,
|
1184
|
+
{
|
1185
|
+
namespace,
|
1186
|
+
cluster,
|
1187
|
+
description: interpolate`Allow ingress traffic from "${l34EndpointToString(parsedEndpoint)}" to the namespace.`,
|
1188
|
+
ingressRule: { fromEndpoint: endpoint }
|
1189
|
+
},
|
1190
|
+
opts
|
1191
|
+
);
|
1192
|
+
}
|
1193
|
+
};
|
1194
|
+
var NativeNetworkPolicy = class _NativeNetworkPolicy extends NetworkPolicy {
|
1195
|
+
create(name, args, opts) {
|
1196
|
+
const ingress = _NativeNetworkPolicy.createIngressRules(args);
|
1197
|
+
const egress = _NativeNetworkPolicy.createEgressRules(args);
|
1198
|
+
const policyTypes = [];
|
1199
|
+
if (ingress.length > 0 || args.isolateIngress) {
|
1200
|
+
policyTypes.push("Ingress");
|
1201
|
+
}
|
1202
|
+
if (egress.length > 0 || args.isolateEgress) {
|
1203
|
+
policyTypes.push("Egress");
|
1204
|
+
}
|
1205
|
+
return new networking.v1.NetworkPolicy(
|
1206
|
+
name,
|
1207
|
+
{
|
1208
|
+
metadata: mergeDeep(mapMetadata(args, name), {
|
1209
|
+
annotations: args.description ? { "kubernetes.io/description": args.description } : void 0
|
1210
|
+
}),
|
1211
|
+
spec: {
|
1212
|
+
podSelector: args.podSelector,
|
1213
|
+
ingress,
|
1214
|
+
egress,
|
1215
|
+
policyTypes
|
1216
|
+
}
|
1217
|
+
},
|
1218
|
+
opts
|
1219
|
+
);
|
1220
|
+
}
|
1221
|
+
static fallbackIpBlock = {
|
1222
|
+
cidr: "0.0.0.0/0",
|
1223
|
+
except: ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]
|
1224
|
+
};
|
1225
|
+
static fallbackDnsRule = {
|
1226
|
+
to: [
|
1227
|
+
{
|
1228
|
+
namespaceSelector: { matchLabels: { "kubernetes.io/metadata.name": "kube-system" } },
|
1229
|
+
podSelector: { matchLabels: { "k8s-app": "kube-dns" } }
|
1230
|
+
}
|
1231
|
+
],
|
1232
|
+
ports: [{ port: 53, protocol: "UDP" }]
|
1233
|
+
};
|
1234
|
+
static createIngressRules(args) {
|
1235
|
+
return uniqueBy2(
|
1236
|
+
args.ingressRules.map((rule) => ({
|
1237
|
+
from: rule.all ? [] : _NativeNetworkPolicy.createRulePeers(rule),
|
1238
|
+
ports: _NativeNetworkPolicy.mapPorts(rule.ports)
|
1239
|
+
})),
|
1240
|
+
(rule) => JSON.stringify(rule)
|
1241
|
+
);
|
1242
|
+
}
|
1243
|
+
static createEgressRules(args) {
|
1244
|
+
const extraRules = [];
|
1245
|
+
const needKubeDns = args.egressRules.some((rule) => rule.fqdns.length > 0);
|
1246
|
+
if (needKubeDns) {
|
1247
|
+
extraRules.push(_NativeNetworkPolicy.fallbackDnsRule);
|
1248
|
+
}
|
1249
|
+
const needFallback = args.egressRules.some(
|
1250
|
+
(rule) => rule.fqdns.some((fqdn) => !fqdn.endsWith(".cluster.local"))
|
1251
|
+
);
|
1252
|
+
if (needFallback) {
|
1253
|
+
extraRules.push({ to: [{ ipBlock: _NativeNetworkPolicy.fallbackIpBlock }] });
|
1254
|
+
}
|
1255
|
+
if (args.allowKubeApiServer) {
|
1256
|
+
const { quirks, apiEndpoints } = args.cluster;
|
1257
|
+
if (quirks?.fallbackKubeApiAccess) {
|
1258
|
+
extraRules.push({
|
1259
|
+
to: [{ ipBlock: { cidr: `${quirks?.fallbackKubeApiAccess.serverIp}/32` } }],
|
1260
|
+
ports: [{ port: quirks?.fallbackKubeApiAccess.serverPort, protocol: "TCP" }]
|
1261
|
+
});
|
1262
|
+
} else {
|
1263
|
+
const rules = apiEndpoints.filter((endpoint) => endpoint.type !== "hostname").map((endpoint) => ({
|
1264
|
+
to: [{ ipBlock: { cidr: l3EndpointToCidr(endpoint) } }],
|
1265
|
+
ports: [{ port: endpoint.port, protocol: "TCP" }]
|
1266
|
+
}));
|
1267
|
+
extraRules.push(...rules);
|
1268
|
+
}
|
1269
|
+
}
|
1270
|
+
return uniqueBy2(
|
1271
|
+
args.egressRules.map((rule) => {
|
1272
|
+
return {
|
1273
|
+
to: rule.all ? [] : _NativeNetworkPolicy.createRulePeers(rule),
|
1274
|
+
ports: _NativeNetworkPolicy.mapPorts(rule.ports)
|
1275
|
+
};
|
1276
|
+
}).filter((rule) => rule.to !== void 0).concat(extraRules),
|
1277
|
+
(rule) => JSON.stringify(rule)
|
1278
|
+
);
|
1279
|
+
}
|
1280
|
+
static createRulePeers(args) {
|
1281
|
+
const peers = uniqueBy2(
|
1282
|
+
[
|
1283
|
+
..._NativeNetworkPolicy.createCidrPeers(args),
|
1284
|
+
..._NativeNetworkPolicy.createServicePeers(args),
|
1285
|
+
..._NativeNetworkPolicy.createSelectorPeers(args)
|
1286
|
+
],
|
1287
|
+
(peer) => JSON.stringify(peer)
|
1288
|
+
);
|
1289
|
+
return peers.length > 0 ? peers : void 0;
|
1290
|
+
}
|
1291
|
+
static createCidrPeers(args) {
|
1292
|
+
return args.cidrs.map((cidr) => ({ ipBlock: { cidr } }));
|
1293
|
+
}
|
1294
|
+
static createServicePeers(args) {
|
1295
|
+
return args.services.map((service) => {
|
1296
|
+
const selector = mapServiceToLabelSelector(service);
|
1297
|
+
return {
|
1298
|
+
namespaceSelector: mapNamespaceNameToSelector(service.metadata.namespace),
|
1299
|
+
podSelector: selector
|
1300
|
+
};
|
1301
|
+
});
|
1302
|
+
}
|
1303
|
+
static createSelectorPeers(args) {
|
1304
|
+
const selectorPeers = args.selectors.map((selector) => ({
|
1305
|
+
podSelector: mapSelectorLikeToSelector(selector)
|
1306
|
+
}));
|
1307
|
+
const namespacePeers = args.namespaces.map(_NativeNetworkPolicy.createNamespacePeer);
|
1308
|
+
if (namespacePeers.length === 0) {
|
1309
|
+
return selectorPeers;
|
1310
|
+
}
|
1311
|
+
if (selectorPeers.length === 0) {
|
1312
|
+
return namespacePeers;
|
1313
|
+
}
|
1314
|
+
return flat(
|
1315
|
+
selectorPeers.map((selectorPeer) => {
|
1316
|
+
return namespacePeers.map((namespacePeer) => merge(selectorPeer, namespacePeer));
|
1317
|
+
})
|
1318
|
+
);
|
1319
|
+
}
|
1320
|
+
static createNamespacePeer(namespace) {
|
1321
|
+
const namespaceName = mapNamespaceLikeToNamespaceName(namespace);
|
1322
|
+
const namespaceSelector = mapNamespaceNameToSelector(namespaceName);
|
1323
|
+
return { namespaceSelector };
|
1324
|
+
}
|
1325
|
+
static mapPorts(ports) {
|
1326
|
+
return ports.map((port) => {
|
1327
|
+
if ("port" in port) {
|
1328
|
+
return {
|
1329
|
+
port: port.port,
|
1330
|
+
protocol: port.protocol ?? "TCP"
|
1331
|
+
};
|
1332
|
+
}
|
1333
|
+
return {
|
1334
|
+
port: port.range[0],
|
1335
|
+
endPort: port.range[1],
|
1336
|
+
protocol: port.protocol ?? "TCP"
|
1337
|
+
};
|
1338
|
+
});
|
1339
|
+
}
|
1340
|
+
};
|
1341
|
+
|
1342
|
+
// src/workload.ts
|
1343
|
+
import {
|
1344
|
+
normalize as normalize5
|
1345
|
+
} from "@highstate/pulumi";
|
1346
|
+
import {
|
1347
|
+
ComponentResource as ComponentResource7,
|
1348
|
+
interpolate as interpolate2,
|
1349
|
+
output as output9
|
1350
|
+
} from "@pulumi/pulumi";
|
1351
|
+
import { filter, isNonNullish, unique, uniqueBy as uniqueBy3 } from "remeda";
|
1352
|
+
import { deepmerge as deepmerge3 } from "deepmerge-ts";
|
1353
|
+
import { sha256 } from "crypto-hash";
|
1354
|
+
|
1355
|
+
// src/pod.ts
|
1356
|
+
var podSpecDefaults = {
|
1357
|
+
automountServiceAccountToken: false
|
1358
|
+
};
|
1359
|
+
|
1360
|
+
// src/workload.ts
|
1361
|
+
var workloadExtraArgs = [...commonExtraArgs, "container", "containers"];
|
1362
|
+
var exposableWorkloadExtraArgs = [...workloadExtraArgs, "service", "httpRoute"];
|
1363
|
+
function getWorkloadComponents(name, args, parent, opts) {
|
1364
|
+
const labels = {
|
1365
|
+
"app.kubernetes.io/name": name
|
1366
|
+
};
|
1367
|
+
const containers = output9(args).apply((args2) => normalize5(args2.container, args2.containers));
|
1368
|
+
const rawVolumes = containers.apply((containers2) => {
|
1369
|
+
const containerVolumes = containers2.flatMap(
|
1370
|
+
(container) => normalize5(container.volume, container.volumes)
|
1371
|
+
);
|
1372
|
+
const containerVolumeMounts = containers2.flatMap((container) => {
|
1373
|
+
return normalize5(container.volumeMount, container.volumeMounts).map((volumeMount) => {
|
1374
|
+
return "volume" in volumeMount ? volumeMount.volume : void 0;
|
1375
|
+
}).filter(Boolean);
|
1376
|
+
});
|
1377
|
+
return output9([...containerVolumes, ...containerVolumeMounts]);
|
1378
|
+
});
|
1379
|
+
const volumes = rawVolumes.apply((rawVolumes2) => {
|
1380
|
+
return output9(rawVolumes2.map(mapWorkloadVolume)).apply(uniqueBy3((volume) => volume.name));
|
1381
|
+
});
|
1382
|
+
const podSpec = output9({ args, containers, volumes }).apply(({ args: args2, containers: containers2, volumes: volumes2 }) => {
|
1383
|
+
const spec = {
|
1384
|
+
volumes: volumes2,
|
1385
|
+
containers: containers2.map((container) => mapContainerToRaw(container, args2.cluster, name)),
|
1386
|
+
...podSpecDefaults
|
1387
|
+
};
|
1388
|
+
if (containers2.some((container) => container.enableTun) && args2.cluster.quirks?.tunDevicePolicy?.type !== "plugin") {
|
1389
|
+
spec.volumes = output9(spec.volumes).apply((volumes3) => [
|
1390
|
+
...volumes3 ?? [],
|
1391
|
+
{
|
1392
|
+
name: "tun-device",
|
1393
|
+
hostPath: {
|
1394
|
+
path: "/dev/net/tun"
|
1395
|
+
}
|
1396
|
+
}
|
1397
|
+
]);
|
1398
|
+
}
|
1399
|
+
return spec;
|
1400
|
+
});
|
1401
|
+
const dependencyHash = rawVolumes.apply((rawVolumes2) => {
|
1402
|
+
return output9(rawVolumes2.map(getWorkloadVolumeResourceUuid)).apply(filter(isNonNullish)).apply(unique()).apply((ids) => sha256(ids.join(",")));
|
1403
|
+
});
|
1404
|
+
const podTemplate = output9({ podSpec, dependencyHash }).apply(({ podSpec: podSpec2, dependencyHash: dependencyHash2 }) => {
|
1405
|
+
return {
|
1406
|
+
metadata: {
|
1407
|
+
labels,
|
1408
|
+
annotations: {
|
1409
|
+
"highstate.io/dependency-hash": dependencyHash2
|
1410
|
+
}
|
1411
|
+
},
|
1412
|
+
spec: podSpec2
|
1413
|
+
};
|
1414
|
+
});
|
1415
|
+
const networkPolicy = output9({ args, containers }).apply(({ args: args2, containers: containers2 }) => {
|
1416
|
+
const allowedEndpoints = containers2.flatMap((container) => container.allowedEndpoints ?? []);
|
1417
|
+
if (allowedEndpoints.length === 0 && !args2.networkPolicy) {
|
1418
|
+
return output9(void 0);
|
1419
|
+
}
|
1420
|
+
return NetworkPolicy.create(
|
1421
|
+
name,
|
1422
|
+
{
|
1423
|
+
cluster: args2.cluster,
|
1424
|
+
namespace: args2.namespace,
|
1425
|
+
selector: labels,
|
1426
|
+
...args2.networkPolicy,
|
1427
|
+
egressRules: [
|
1428
|
+
...args2.networkPolicy?.egressRules ?? [],
|
1429
|
+
...allowedEndpoints.length > 0 ? [{ toEndpoints: allowedEndpoints }] : []
|
1430
|
+
]
|
1431
|
+
},
|
1432
|
+
{ ...opts, parent: parent() }
|
1433
|
+
);
|
1434
|
+
});
|
1435
|
+
return { labels, containers, volumes, podSpec, podTemplate, networkPolicy };
|
1436
|
+
}
|
1437
|
+
function getExposableWorkloadComponents(name, args, parent, opts) {
|
1438
|
+
const { labels, containers, volumes, podSpec, podTemplate, networkPolicy } = getWorkloadComponents(name, args, parent, opts);
|
1439
|
+
const service = output9({ args, containers }).apply(async ({ args: args2, containers: containers2 }) => {
|
1440
|
+
if (!args2.service && !args2.httpRoute) {
|
1441
|
+
return void 0;
|
1442
|
+
}
|
1443
|
+
if (args2.existing?.service) {
|
1444
|
+
return Service.of(name, args2.existing.service, args2.cluster, { ...opts, parent: parent() });
|
1445
|
+
}
|
1446
|
+
if (args2.existing) {
|
1447
|
+
return void 0;
|
1448
|
+
}
|
1449
|
+
const ports = containers2.flatMap((container) => normalize5(container.port, container.ports));
|
1450
|
+
return Service.create(
|
1451
|
+
name,
|
1452
|
+
{
|
1453
|
+
...args2.service,
|
1454
|
+
selector: labels,
|
1455
|
+
cluster: args2.cluster,
|
1456
|
+
namespace: args2.namespace,
|
1457
|
+
ports: (
|
1458
|
+
// allow to completely override the ports
|
1459
|
+
!args2.service?.port && !args2.service?.ports ? ports.map(mapContainerPortToServicePort) : args2.service?.ports
|
1460
|
+
)
|
1461
|
+
},
|
1462
|
+
{
|
1463
|
+
...opts,
|
1464
|
+
parent: parent(),
|
1465
|
+
provider: await getProvider(args2.cluster)
|
1466
|
+
}
|
1467
|
+
);
|
1468
|
+
});
|
1469
|
+
const httpRoute = output9({
|
1470
|
+
args,
|
1471
|
+
service
|
1472
|
+
}).apply(async ({ args: args2, service: service2 }) => {
|
1473
|
+
if (!args2.httpRoute || !service2) {
|
1474
|
+
return void 0;
|
1475
|
+
}
|
1476
|
+
if (args2.existing) {
|
1477
|
+
return void 0;
|
1478
|
+
}
|
1479
|
+
return new HttpRoute(
|
1480
|
+
name,
|
1481
|
+
{
|
1482
|
+
...args2.httpRoute,
|
1483
|
+
cluster: args2.cluster,
|
1484
|
+
rule: {
|
1485
|
+
backend: service2
|
1486
|
+
}
|
1487
|
+
},
|
1488
|
+
{
|
1489
|
+
...opts,
|
1490
|
+
parent: parent(),
|
1491
|
+
provider: await getProvider(args2.cluster)
|
1492
|
+
}
|
1493
|
+
);
|
1494
|
+
});
|
1495
|
+
return { labels, containers, volumes, podSpec, podTemplate, networkPolicy, service, httpRoute };
|
1496
|
+
}
|
1497
|
+
var Workload = class _Workload extends ComponentResource7 {
|
1498
|
+
constructor(type, name, args, opts, resourceType, cluster, metadata, networkPolicy) {
|
1499
|
+
super(type, name, args, opts);
|
1500
|
+
this.name = name;
|
1501
|
+
this.args = args;
|
1502
|
+
this.resourceType = resourceType;
|
1503
|
+
this.cluster = cluster;
|
1504
|
+
this.metadata = metadata;
|
1505
|
+
this.networkPolicy = networkPolicy;
|
1506
|
+
}
|
1507
|
+
/**
|
1508
|
+
* The instance terminal to interact with the deployment.
|
1509
|
+
*/
|
1510
|
+
get terminal() {
|
1511
|
+
const containerName = output9(this.args).apply((args) => {
|
1512
|
+
const containers = normalize5(args.container, args.containers);
|
1513
|
+
return containers[0]?.name ?? this.name;
|
1514
|
+
});
|
1515
|
+
return output9({
|
1516
|
+
name: this.metadata.name,
|
1517
|
+
title: interpolate2`${_Workload.getResourceDisplayType(this.resourceType)} (${this.metadata.name})`,
|
1518
|
+
description: "Connect to the container of the workload",
|
1519
|
+
icon: "devicon:kubernetes",
|
1520
|
+
image: images_exports["terminal-kubectl"].image,
|
1521
|
+
command: [
|
1522
|
+
"exec",
|
1523
|
+
"kubectl",
|
1524
|
+
"exec",
|
1525
|
+
"-it",
|
1526
|
+
"-n",
|
1527
|
+
this.metadata.namespace,
|
1528
|
+
interpolate2`${this.resourceType}/${this.metadata.name}`,
|
1529
|
+
"-c",
|
1530
|
+
containerName,
|
1531
|
+
"--",
|
1532
|
+
this.args.terminalShell ?? "bash"
|
1533
|
+
],
|
1534
|
+
files: {
|
1535
|
+
"/kubeconfig": this.cluster.kubeconfig
|
1536
|
+
},
|
1537
|
+
env: {
|
1538
|
+
KUBECONFIG: "/kubeconfig"
|
1539
|
+
}
|
1540
|
+
});
|
1541
|
+
}
|
1542
|
+
static getResourceDisplayType(resourceType) {
|
1543
|
+
switch (resourceType) {
|
1544
|
+
case "deployment":
|
1545
|
+
return "Deployment";
|
1546
|
+
case "statefulset":
|
1547
|
+
return "StatefulSet";
|
1548
|
+
case "daemonset":
|
1549
|
+
return "DaemonSet";
|
1550
|
+
default:
|
1551
|
+
return resourceType.charAt(0).toUpperCase() + resourceType.slice(1);
|
1552
|
+
}
|
1553
|
+
}
|
1554
|
+
};
|
1555
|
+
var ExposableWorkload = class extends Workload {
|
1556
|
+
constructor(type, name, args, opts, resourceType, cluster, metadata, networkPolicy, _service, _httpRoute) {
|
1557
|
+
super(type, name, args, opts, resourceType, cluster, metadata, networkPolicy);
|
1558
|
+
this.name = name;
|
1559
|
+
this._service = _service;
|
1560
|
+
this._httpRoute = _httpRoute;
|
1561
|
+
}
|
1562
|
+
/**
|
1563
|
+
* The service associated with the workload.
|
1564
|
+
*/
|
1565
|
+
get optionalService() {
|
1566
|
+
return this._service;
|
1567
|
+
}
|
1568
|
+
/**
|
1569
|
+
* The HTTP route associated with the workload.
|
1570
|
+
*/
|
1571
|
+
get optionalHttpRoute() {
|
1572
|
+
return this._httpRoute;
|
1573
|
+
}
|
1574
|
+
/**
|
1575
|
+
* The service associated with the workload.
|
1576
|
+
*
|
1577
|
+
* Will throw an error if the service is not available.
|
1578
|
+
*/
|
1579
|
+
get service() {
|
1580
|
+
return this._service.apply((service) => {
|
1581
|
+
if (!service) {
|
1582
|
+
throw new Error(`The service of the workload "${this.name}" is not available.`);
|
1583
|
+
}
|
1584
|
+
return service;
|
1585
|
+
});
|
1586
|
+
}
|
1587
|
+
/**
|
1588
|
+
* The HTTP route associated with the workload.
|
1589
|
+
*
|
1590
|
+
* Will throw an error if the HTTP route is not available.
|
1591
|
+
*/
|
1592
|
+
get httpRoute() {
|
1593
|
+
return this._httpRoute.apply((httpRoute) => {
|
1594
|
+
if (!httpRoute) {
|
1595
|
+
throw new Error(`The HTTP route of the workload "${this.name}" is not available.`);
|
1596
|
+
}
|
1597
|
+
return httpRoute;
|
1598
|
+
});
|
1599
|
+
}
|
1600
|
+
/**
|
1601
|
+
* Creates a generic workload or patches the existing one.
|
1602
|
+
*/
|
1603
|
+
static createOrPatchGeneric(name, args, opts) {
|
1604
|
+
return output9(args).apply(async (args2) => {
|
1605
|
+
if (args2.existing?.type === "k8s.deployment") {
|
1606
|
+
const { Deployment } = await import("./deployment-KOZNZXJA.js");
|
1607
|
+
return Deployment.patch(
|
1608
|
+
name,
|
1609
|
+
{
|
1610
|
+
...deepmerge3(args2, args2.deployment),
|
1611
|
+
name: args2.existing.metadata.name,
|
1612
|
+
namespace: args2.existing.metadata.namespace
|
1613
|
+
},
|
1614
|
+
opts
|
1615
|
+
);
|
1616
|
+
}
|
1617
|
+
if (args2.existing?.type === "k8s.stateful-set") {
|
1618
|
+
const { StatefulSet } = await import("./stateful-set-H5BR3H5D.js");
|
1619
|
+
return StatefulSet.patch(
|
1620
|
+
name,
|
1621
|
+
{
|
1622
|
+
...deepmerge3(args2, args2.statefulSet),
|
1623
|
+
name: args2.existing.metadata.name,
|
1624
|
+
namespace: args2.existing.metadata.namespace
|
1625
|
+
},
|
1626
|
+
opts
|
1627
|
+
);
|
1628
|
+
}
|
1629
|
+
if (args2.type === "Deployment") {
|
1630
|
+
const { Deployment } = await import("./deployment-KOZNZXJA.js");
|
1631
|
+
return Deployment.create(name, deepmerge3(args2, args2.deployment), opts);
|
1632
|
+
}
|
1633
|
+
if (args2.type === "StatefulSet") {
|
1634
|
+
const { StatefulSet } = await import("./stateful-set-H5BR3H5D.js");
|
1635
|
+
return StatefulSet.create(name, deepmerge3(args2, args2.statefulSet), opts);
|
1636
|
+
}
|
1637
|
+
throw new Error(`Unknown workload type: ${args2.type}`);
|
1638
|
+
});
|
1639
|
+
}
|
1640
|
+
};
|
1641
|
+
|
1642
|
+
export {
|
1643
|
+
hasServiceMetadata,
|
1644
|
+
getServiceMetadata,
|
1645
|
+
withServiceMetadata,
|
1646
|
+
isFromCluster,
|
1647
|
+
Service,
|
1648
|
+
mapContainerPortToServicePort,
|
1649
|
+
mapServiceToLabelSelector,
|
1650
|
+
getServiceType,
|
1651
|
+
HttpRoute,
|
1652
|
+
PersistentVolumeClaim,
|
1653
|
+
Secret,
|
1654
|
+
ConfigMap,
|
1655
|
+
getBestEndpoint,
|
1656
|
+
requireBestEndpoint,
|
1657
|
+
NetworkPolicy,
|
1658
|
+
exposableWorkloadExtraArgs,
|
1659
|
+
getWorkloadComponents,
|
1660
|
+
getExposableWorkloadComponents,
|
1661
|
+
Workload,
|
1662
|
+
ExposableWorkload
|
1663
|
+
};
|
1664
|
+
//# sourceMappingURL=chunk-P2UABKGA.js.map
|