@highstate/k8s 0.9.4 → 0.9.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/dist/chunk-DQSCJM5S.js +183 -0
  2. package/dist/chunk-DQSCJM5S.js.map +1 -0
  3. package/dist/chunk-FKNHHKOL.js +260 -0
  4. package/dist/chunk-FKNHHKOL.js.map +1 -0
  5. package/dist/chunk-HW3NS3MC.js +347 -0
  6. package/dist/chunk-HW3NS3MC.js.map +1 -0
  7. package/dist/chunk-OQ7UXASD.js +193 -0
  8. package/dist/chunk-OQ7UXASD.js.map +1 -0
  9. package/dist/chunk-QGHMLKTW.js +1123 -0
  10. package/dist/chunk-QGHMLKTW.js.map +1 -0
  11. package/dist/chunk-UNVSWG6D.js +214 -0
  12. package/dist/chunk-UNVSWG6D.js.map +1 -0
  13. package/dist/deployment-ZP3ASKPT.js +10 -0
  14. package/dist/deployment-ZP3ASKPT.js.map +1 -0
  15. package/dist/highstate.manifest.json +8 -6
  16. package/dist/index.js +291 -954
  17. package/dist/index.js.map +1 -1
  18. package/dist/stateful-set-2AH7RAF7.js +10 -0
  19. package/dist/stateful-set-2AH7RAF7.js.map +1 -0
  20. package/dist/units/access-point/index.js +6 -1
  21. package/dist/units/access-point/index.js.map +1 -1
  22. package/dist/units/cert-manager/index.js +19 -24
  23. package/dist/units/cert-manager/index.js.map +1 -1
  24. package/dist/units/cluster-dns/index.js +36 -0
  25. package/dist/units/cluster-dns/index.js.map +1 -0
  26. package/dist/units/cluster-patch/index.js +34 -0
  27. package/dist/units/cluster-patch/index.js.map +1 -0
  28. package/dist/units/dns01-issuer/index.js +2 -2
  29. package/dist/units/dns01-issuer/index.js.map +1 -1
  30. package/dist/units/existing-cluster/index.js +22 -14
  31. package/dist/units/existing-cluster/index.js.map +1 -1
  32. package/dist/units/gateway-api/index.js +1 -1
  33. package/package.json +12 -10
  34. package/src/access-point.ts +44 -39
  35. package/src/container.ts +54 -5
  36. package/src/cron-job.ts +14 -30
  37. package/src/deployment.ts +170 -127
  38. package/src/gateway/http-route.ts +7 -5
  39. package/src/helm.ts +57 -8
  40. package/src/index.ts +11 -4
  41. package/src/job.ts +14 -32
  42. package/src/namespace.ts +241 -0
  43. package/src/network-policy.ts +371 -87
  44. package/src/network.ts +41 -0
  45. package/src/pvc.ts +43 -25
  46. package/src/scripting/bundle.ts +125 -22
  47. package/src/scripting/container.ts +16 -11
  48. package/src/scripting/environment.ts +56 -6
  49. package/src/secret.ts +195 -0
  50. package/src/service.ts +209 -89
  51. package/src/shared.ts +42 -51
  52. package/src/stateful-set.ts +193 -88
  53. package/src/units/access-point/index.ts +8 -1
  54. package/src/units/cert-manager/index.ts +15 -20
  55. package/src/units/cluster-dns/index.ts +37 -0
  56. package/src/units/cluster-patch/index.ts +35 -0
  57. package/src/units/dns01-issuer/index.ts +1 -1
  58. package/src/units/existing-cluster/index.ts +24 -14
  59. package/src/workload.ts +342 -44
  60. package/dist/chunk-K4WKJ4L5.js +0 -455
  61. package/dist/chunk-K4WKJ4L5.js.map +0 -1
  62. package/dist/chunk-T5Z2M4JE.js +0 -103
  63. package/dist/chunk-T5Z2M4JE.js.map +0 -1
@@ -0,0 +1,1123 @@
1
+ import {
2
+ HttpRoute,
3
+ Service,
4
+ getServiceMetadata,
5
+ isFromCluster,
6
+ mapContainerPortToServicePort,
7
+ mapServiceToLabelSelector
8
+ } from "./chunk-HW3NS3MC.js";
9
+ import {
10
+ commonExtraArgs,
11
+ getProvider,
12
+ mapMetadata,
13
+ mapNamespaceLikeToNamespaceName,
14
+ mapNamespaceNameToSelector,
15
+ mapSelectorLikeToSelector,
16
+ resourceIdToString,
17
+ withPatchName
18
+ } from "./chunk-FKNHHKOL.js";
19
+
20
+ // src/pvc.ts
21
+ import { core } from "@pulumi/kubernetes";
22
+ import {
23
+ ComponentResource,
24
+ output
25
+ } from "@highstate/pulumi";
26
+ import { deepmerge } from "deepmerge-ts";
27
+ import { omit } from "remeda";
28
+ var extraPersistentVolumeClaimArgs = [...commonExtraArgs, "size"];
29
+ var PersistentVolumeClaim = class extends ComponentResource {
30
+ constructor(type, name, args, opts, cluster, metadata, spec, status) {
31
+ super(type, name, args, opts);
32
+ this.cluster = cluster;
33
+ this.metadata = metadata;
34
+ this.spec = spec;
35
+ this.status = status;
36
+ }
37
+ /**
38
+ * The Highstate PVC entity.
39
+ */
40
+ get entity() {
41
+ return output({
42
+ type: "k8s.persistent-volume-claim",
43
+ clusterId: this.cluster.id,
44
+ metadata: this.metadata
45
+ });
46
+ }
47
+ static create(name, args, opts) {
48
+ return new CreatedPersistentVolumeClaim(name, args, opts);
49
+ }
50
+ static of(name, entity, cluster, opts) {
51
+ return new ExternalPersistentVolumeClaim(name, output(entity).metadata, cluster, opts);
52
+ }
53
+ static createOrGet(name, args, opts) {
54
+ if (!args.existing) {
55
+ return new CreatedPersistentVolumeClaim(name, args, opts);
56
+ }
57
+ return new ExternalPersistentVolumeClaim(
58
+ name,
59
+ output(args.existing).metadata,
60
+ args.cluster,
61
+ opts
62
+ );
63
+ }
64
+ };
65
+ var CreatedPersistentVolumeClaim = class extends PersistentVolumeClaim {
66
+ constructor(name, args, opts) {
67
+ const pvc = output(args).apply(async (args2) => {
68
+ return new core.v1.PersistentVolumeClaim(
69
+ name,
70
+ {
71
+ metadata: mapMetadata(args2, name),
72
+ spec: deepmerge(
73
+ {
74
+ accessModes: ["ReadWriteOnce"],
75
+ resources: {
76
+ requests: {
77
+ storage: args2.size ?? "100Mi"
78
+ }
79
+ }
80
+ },
81
+ omit(args2, extraPersistentVolumeClaimArgs)
82
+ )
83
+ },
84
+ {
85
+ ...opts,
86
+ parent: this,
87
+ provider: await getProvider(args2.cluster)
88
+ }
89
+ );
90
+ });
91
+ super(
92
+ "k8s:PersistentVolumeClaim",
93
+ name,
94
+ args,
95
+ opts,
96
+ output(args.cluster),
97
+ pvc.metadata,
98
+ pvc.spec,
99
+ pvc.status
100
+ );
101
+ }
102
+ };
103
+ var ExternalPersistentVolumeClaim = class extends PersistentVolumeClaim {
104
+ constructor(name, id, cluster, opts) {
105
+ const pvc = output(id).apply(async (id2) => {
106
+ return core.v1.PersistentVolumeClaim.get(
107
+ //
108
+ name,
109
+ resourceIdToString(id2),
110
+ {
111
+ ...opts,
112
+ parent: this,
113
+ provider: await getProvider(cluster)
114
+ }
115
+ );
116
+ });
117
+ super(
118
+ "highstate:k8s:ExternalPersistentVolumeClaim",
119
+ name,
120
+ { id, cluster },
121
+ opts,
122
+ output(cluster),
123
+ pvc.metadata,
124
+ pvc.spec,
125
+ pvc.status
126
+ );
127
+ }
128
+ };
129
+
130
+ // src/secret.ts
131
+ import { core as core2 } from "@pulumi/kubernetes";
132
+ import {
133
+ ComponentResource as ComponentResource2,
134
+ output as output2
135
+ } from "@pulumi/pulumi";
136
+ var Secret = class extends ComponentResource2 {
137
+ constructor(type, name, args, opts, cluster, metadata, data, stringData) {
138
+ super(type, name, args, opts);
139
+ this.cluster = cluster;
140
+ this.metadata = metadata;
141
+ this.data = data;
142
+ this.stringData = stringData;
143
+ }
144
+ /**
145
+ * Creates a new secret.
146
+ */
147
+ static create(name, args, opts) {
148
+ return new CreatedSecret(name, args, opts);
149
+ }
150
+ /**
151
+ * Creates a new secret or patches an existing one.
152
+ *
153
+ * Will throw an error if the secret does not exist when `args.resource` is provided.
154
+ */
155
+ static createOrPatch(name, args, opts) {
156
+ if (!args.existing) {
157
+ return new CreatedSecret(name, args, opts);
158
+ }
159
+ return new SecretPatch(
160
+ name,
161
+ {
162
+ ...args,
163
+ name: withPatchName("secret", args.existing, args.cluster),
164
+ namespace: output2(args.existing).metadata.namespace
165
+ },
166
+ opts
167
+ );
168
+ }
169
+ /**
170
+ * Gets an existing secret.
171
+ *
172
+ * Will throw an error if the secret does not exist.
173
+ */
174
+ static get(name, id, cluster, opts) {
175
+ return new ExternalSecret(name, id, cluster, opts);
176
+ }
177
+ };
178
+ var CreatedSecret = class extends Secret {
179
+ constructor(name, args, opts) {
180
+ const secret = output2(args).apply(async (args2) => {
181
+ return new core2.v1.Secret(
182
+ name,
183
+ {
184
+ metadata: mapMetadata(args2, name),
185
+ data: args2.data,
186
+ stringData: args2.stringData
187
+ },
188
+ {
189
+ ...opts,
190
+ parent: this,
191
+ provider: await getProvider(args2.cluster)
192
+ }
193
+ );
194
+ });
195
+ super(
196
+ "highstate:k8s:Secret",
197
+ name,
198
+ args,
199
+ opts,
200
+ output2(args.cluster),
201
+ secret.metadata,
202
+ secret.data,
203
+ secret.stringData
204
+ );
205
+ }
206
+ };
207
+ var SecretPatch = class extends Secret {
208
+ constructor(name, args, opts) {
209
+ const secret = output2(args).apply(async (args2) => {
210
+ return new core2.v1.SecretPatch(
211
+ name,
212
+ {
213
+ metadata: mapMetadata(args2, name),
214
+ data: args2.data,
215
+ stringData: args2.stringData
216
+ },
217
+ {
218
+ ...opts,
219
+ parent: this,
220
+ provider: await getProvider(args2.cluster)
221
+ }
222
+ );
223
+ });
224
+ super(
225
+ "highstate:k8s:SecretPatch",
226
+ name,
227
+ args,
228
+ opts,
229
+ output2(args.cluster),
230
+ secret.metadata,
231
+ secret.data,
232
+ secret.stringData
233
+ );
234
+ }
235
+ };
236
+ var ExternalSecret = class extends Secret {
237
+ constructor(name, id, cluster, opts) {
238
+ const secret = output2(id).apply(async (realName) => {
239
+ return core2.v1.Secret.get(
240
+ //
241
+ name,
242
+ realName,
243
+ {
244
+ ...opts,
245
+ parent: this,
246
+ provider: await getProvider(cluster)
247
+ }
248
+ );
249
+ });
250
+ super(
251
+ "highstate:k8s:ExternalSecret",
252
+ name,
253
+ { id, cluster },
254
+ opts,
255
+ output2(cluster),
256
+ secret.metadata,
257
+ secret.data,
258
+ secret.stringData
259
+ );
260
+ }
261
+ };
262
+
263
+ // src/container.ts
264
+ import { core as core3 } from "@pulumi/kubernetes";
265
+ import { normalize, output as output3 } from "@highstate/pulumi";
266
+ import { concat, map, omit as omit2 } from "remeda";
267
+ var containerExtraArgs = [
268
+ "port",
269
+ "volumeMount",
270
+ "volume",
271
+ "environment",
272
+ "environmentSource",
273
+ "environmentSources"
274
+ ];
275
+ function mapContainerToRaw(container, cluster, fallbackName) {
276
+ const containerName = container.name ?? fallbackName;
277
+ const spec = {
278
+ ...omit2(container, containerExtraArgs),
279
+ name: containerName,
280
+ ports: normalize(container.port, container.ports),
281
+ volumeMounts: map(normalize(container.volumeMount, container.volumeMounts), mapVolumeMount),
282
+ env: concat(
283
+ container.environment ? mapContainerEnvironment(container.environment) : [],
284
+ container.env ?? []
285
+ ),
286
+ envFrom: concat(
287
+ map(
288
+ normalize(container.environmentSource, container.environmentSources),
289
+ mapEnvironmentSource
290
+ ),
291
+ container.envFrom ?? []
292
+ )
293
+ };
294
+ if (container.enableTun) {
295
+ spec.securityContext ??= {};
296
+ spec.securityContext.capabilities ??= {};
297
+ spec.securityContext.capabilities.add = ["NET_ADMIN"];
298
+ if (cluster.quirks?.tunDevicePolicy?.type === "plugin") {
299
+ spec.resources ??= {};
300
+ spec.resources.limits ??= {};
301
+ spec.resources.limits[cluster.quirks.tunDevicePolicy.resourceName] = cluster.quirks.tunDevicePolicy.resourceValue;
302
+ } else {
303
+ spec.volumeMounts ??= [];
304
+ spec.volumeMounts.push({
305
+ name: "tun-device",
306
+ mountPath: "/dev/net/tun",
307
+ readOnly: false
308
+ });
309
+ }
310
+ }
311
+ return spec;
312
+ }
313
+ function mapContainerEnvironment(environment) {
314
+ const envVars = [];
315
+ for (const [name, value] of Object.entries(environment)) {
316
+ if (!value) {
317
+ continue;
318
+ }
319
+ if (typeof value === "string") {
320
+ envVars.push({ name, value });
321
+ continue;
322
+ }
323
+ if ("secret" in value) {
324
+ envVars.push({
325
+ name,
326
+ valueFrom: {
327
+ secretKeyRef: {
328
+ name: value.secret.metadata.name,
329
+ key: value.key
330
+ }
331
+ }
332
+ });
333
+ continue;
334
+ }
335
+ if ("configMap" in value) {
336
+ envVars.push({
337
+ name,
338
+ valueFrom: {
339
+ configMapKeyRef: {
340
+ name: value.configMap.metadata.name,
341
+ key: value.key
342
+ }
343
+ }
344
+ });
345
+ continue;
346
+ }
347
+ envVars.push({ name, valueFrom: value });
348
+ }
349
+ return envVars;
350
+ }
351
+ function mapVolumeMount(volumeMount) {
352
+ if ("volume" in volumeMount) {
353
+ return omit2(
354
+ {
355
+ ...volumeMount,
356
+ name: output3(volumeMount.volume).apply(mapWorkloadVolume).apply((volume) => output3(volume.name))
357
+ },
358
+ ["volume"]
359
+ );
360
+ }
361
+ return {
362
+ ...volumeMount,
363
+ name: volumeMount.name
364
+ };
365
+ }
366
+ function mapEnvironmentSource(envFrom) {
367
+ if (envFrom instanceof core3.v1.ConfigMap) {
368
+ return {
369
+ configMapRef: {
370
+ name: envFrom.metadata.name
371
+ }
372
+ };
373
+ }
374
+ if (envFrom instanceof core3.v1.Secret) {
375
+ return {
376
+ secretRef: {
377
+ name: envFrom.metadata.name
378
+ }
379
+ };
380
+ }
381
+ return envFrom;
382
+ }
383
+ function mapWorkloadVolume(volume) {
384
+ if (volume instanceof PersistentVolumeClaim) {
385
+ return {
386
+ name: volume.metadata.name,
387
+ persistentVolumeClaim: {
388
+ claimName: volume.metadata.name
389
+ }
390
+ };
391
+ }
392
+ if (volume instanceof Secret) {
393
+ return {
394
+ name: volume.metadata.name,
395
+ secret: {
396
+ secretName: volume.metadata.name
397
+ }
398
+ };
399
+ }
400
+ if (core3.v1.PersistentVolumeClaim.isInstance(volume)) {
401
+ return {
402
+ name: volume.metadata.name,
403
+ persistentVolumeClaim: {
404
+ claimName: volume.metadata.name
405
+ }
406
+ };
407
+ }
408
+ if (core3.v1.ConfigMap.isInstance(volume)) {
409
+ return {
410
+ name: volume.metadata.name,
411
+ configMap: {
412
+ name: volume.metadata.name
413
+ }
414
+ };
415
+ }
416
+ if (core3.v1.Secret.isInstance(volume)) {
417
+ return {
418
+ name: volume.metadata.name,
419
+ secret: {
420
+ secretName: volume.metadata.name
421
+ }
422
+ };
423
+ }
424
+ return volume;
425
+ }
426
+
427
+ // src/network-policy.ts
428
+ import { networking } from "@pulumi/kubernetes";
429
+ import {
430
+ ComponentResource as ComponentResource3,
431
+ interpolate,
432
+ normalize as normalize2,
433
+ output as output4
434
+ } from "@highstate/pulumi";
435
+ import { capitalize, flat, groupBy, merge, mergeDeep, uniqueBy } from "remeda";
436
+ import "@highstate/library";
437
+ import {
438
+ l34EndpointToString,
439
+ l3EndpointToCidr,
440
+ parseL34Endpoint
441
+ } from "@highstate/common";
442
+ var NetworkPolicy = class _NetworkPolicy extends ComponentResource3 {
443
+ /**
444
+ * The underlying network policy resource.
445
+ */
446
+ networkPolicy;
447
+ constructor(name, args, opts) {
448
+ super("k8s:network-policy", name, args, opts);
449
+ const normalizedArgs = output4(args).apply((args2) => {
450
+ const ingressRules = normalize2(args2.ingressRule, args2.ingressRules);
451
+ const egressRules = normalize2(args2.egressRule, args2.egressRules);
452
+ const extraEgressRules = [];
453
+ if (args2.allowKubeDns) {
454
+ extraEgressRules.push({
455
+ namespaces: ["kube-system"],
456
+ selectors: [{ matchLabels: { "k8s-app": "kube-dns" } }],
457
+ ports: [{ port: 53, protocol: "UDP" }],
458
+ all: false,
459
+ cidrs: [],
460
+ fqdns: [],
461
+ services: []
462
+ });
463
+ }
464
+ return {
465
+ ...args2,
466
+ podSelector: args2.selector ? mapSelectorLikeToSelector(args2.selector) : {},
467
+ isolateEgress: args2.isolateEgress ?? false,
468
+ isolateIngress: args2.isolateIngress ?? false,
469
+ allowKubeApiServer: args2.allowKubeApiServer ?? false,
470
+ ingressRules: ingressRules.flatMap((rule) => {
471
+ const endpoints = normalize2(
472
+ args2.ingressRule?.fromEndpoint,
473
+ args2.ingressRule?.fromEndpoints
474
+ );
475
+ const parsedEndpoints = endpoints.map(parseL34Endpoint);
476
+ const endpointsByPortsAndNamespaces = groupBy(parsedEndpoints, (endpoint) => {
477
+ const namespace = isFromCluster(endpoint, args2.cluster) ? endpoint.metadata.k8sService.namespace : "";
478
+ const port = isFromCluster(endpoint, args2.cluster) ? endpoint.metadata.k8sService.targetPort : endpoint.port;
479
+ return `${port ?? "0"}:${namespace}`;
480
+ });
481
+ const l3OnlyRule = endpointsByPortsAndNamespaces["0:"] ? _NetworkPolicy.getRuleFromEndpoint(
482
+ void 0,
483
+ endpointsByPortsAndNamespaces["0:"],
484
+ args2.cluster
485
+ ) : void 0;
486
+ const otherRules = Object.entries(endpointsByPortsAndNamespaces).filter(([key]) => key !== "0:").map(([key, endpoints2]) => {
487
+ const [port] = key.split(":");
488
+ const portNumber = parseInt(port, 10);
489
+ const portValue = isNaN(portNumber) ? port : portNumber;
490
+ return _NetworkPolicy.getRuleFromEndpoint(portValue, endpoints2, args2.cluster);
491
+ });
492
+ return [
493
+ {
494
+ all: rule.fromAll ?? false,
495
+ cidrs: normalize2(rule.fromCidr, rule.fromCidrs).concat(l3OnlyRule?.cidrs ?? []),
496
+ fqdns: [],
497
+ services: normalize2(rule.fromService, rule.fromServices),
498
+ namespaces: normalize2(rule.fromNamespace, rule.fromNamespaces),
499
+ selectors: normalize2(rule.fromSelector, rule.fromSelectors),
500
+ ports: normalize2(rule.toPort, rule.toPorts)
501
+ },
502
+ ...otherRules
503
+ ].filter((rule2) => !_NetworkPolicy.isEmptyRule(rule2));
504
+ }),
505
+ egressRules: egressRules.flatMap((rule) => {
506
+ const endpoints = normalize2(args2.egressRule?.toEndpoint, args2.egressRule?.toEndpoints);
507
+ const parsedEndpoints = endpoints.map(parseL34Endpoint);
508
+ const endpointsByPortsAnsNamespaces = groupBy(parsedEndpoints, (endpoint) => {
509
+ const namespace = isFromCluster(endpoint, args2.cluster) ? endpoint.metadata.k8sService.namespace : "";
510
+ const port = isFromCluster(endpoint, args2.cluster) ? endpoint.metadata.k8sService.targetPort : endpoint.port;
511
+ return `${port ?? "0"}:${namespace}`;
512
+ });
513
+ const l3OnlyRule = endpointsByPortsAnsNamespaces["0:"] ? _NetworkPolicy.getRuleFromEndpoint(
514
+ void 0,
515
+ endpointsByPortsAnsNamespaces["0:"],
516
+ args2.cluster
517
+ ) : void 0;
518
+ const otherRules = Object.entries(endpointsByPortsAnsNamespaces).filter(([key]) => key !== "0:").map(([key, endpoints2]) => {
519
+ const [port] = key.split(":");
520
+ const portNumber = parseInt(port, 10);
521
+ const portValue = isNaN(portNumber) ? port : portNumber;
522
+ return _NetworkPolicy.getRuleFromEndpoint(portValue, endpoints2, args2.cluster);
523
+ });
524
+ return [
525
+ {
526
+ all: rule.toAll ?? false,
527
+ cidrs: normalize2(rule.toCidr, rule.toCidrs).concat(l3OnlyRule?.cidrs ?? []),
528
+ fqdns: normalize2(rule.toFqdn, rule.toFqdns).concat(l3OnlyRule?.fqdns ?? []),
529
+ services: normalize2(rule.toService, rule.toServices),
530
+ namespaces: normalize2(rule.toNamespace, rule.toNamespaces),
531
+ selectors: normalize2(rule.toSelector, rule.toSelectors),
532
+ ports: normalize2(rule.toPort, rule.toPorts)
533
+ },
534
+ ...otherRules
535
+ ].filter((rule2) => !_NetworkPolicy.isEmptyRule(rule2));
536
+ }).concat(extraEgressRules)
537
+ };
538
+ });
539
+ this.networkPolicy = output4(
540
+ normalizedArgs.apply(async (args2) => {
541
+ return output4(
542
+ this.create(name, args2, {
543
+ ...opts,
544
+ parent: this,
545
+ provider: await getProvider(args2.cluster)
546
+ })
547
+ );
548
+ })
549
+ );
550
+ }
551
+ static mapCidrFromEndpoint(result) {
552
+ if (result.type === "ipv4") {
553
+ return `${result.address}/32`;
554
+ }
555
+ return `${result.address}/128`;
556
+ }
557
+ static getRuleFromEndpoint(port, endpoints, cluster) {
558
+ const ports = port ? [{ port, protocol: endpoints[0].protocol?.toUpperCase() }] : [];
559
+ const cidrs = endpoints.filter((endpoint) => !isFromCluster(endpoint, cluster)).filter((endpoint) => endpoint.type === "ipv4" || endpoint.type === "ipv6").map(_NetworkPolicy.mapCidrFromEndpoint);
560
+ const fqdns = endpoints.filter((endpoint) => endpoint.type === "hostname").map((endpoint) => endpoint.hostname);
561
+ const selectors = endpoints.filter((endpoint) => isFromCluster(endpoint, cluster)).map((endpoint) => endpoint.metadata.k8sService.selector);
562
+ const namespace = endpoints.filter((endpoint) => isFromCluster(endpoint, cluster)).map((endpoint) => getServiceMetadata(endpoint)?.namespace)[0];
563
+ return {
564
+ all: false,
565
+ cidrs,
566
+ fqdns,
567
+ services: [],
568
+ namespaces: namespace ? [namespace] : [],
569
+ selectors,
570
+ ports
571
+ };
572
+ }
573
+ static isEmptyRule(rule) {
574
+ return !rule.all && rule.cidrs.length === 0 && rule.fqdns.length === 0 && rule.services.length === 0 && rule.namespaces.length === 0 && rule.selectors.length === 0 && rule.ports.length === 0;
575
+ }
576
+ static create(name, args, opts) {
577
+ return output4(args).apply(async (args2) => {
578
+ const cni = args2.cluster.cni;
579
+ if (cni === "other") {
580
+ return new NativeNetworkPolicy(name, args2, opts);
581
+ }
582
+ const implName = `${capitalize(cni)}NetworkPolicy`;
583
+ const implModule = await import(`@highstate/${cni}`);
584
+ const implClass = implModule[implName];
585
+ if (!implClass) {
586
+ throw new Error(`No implementation found for ${cni}`);
587
+ }
588
+ return new implClass(name, args2, opts);
589
+ });
590
+ }
591
+ static isolate(namespace, cluster, opts) {
592
+ return _NetworkPolicy.create(
593
+ "isolate",
594
+ {
595
+ namespace,
596
+ cluster,
597
+ description: "By default, deny all traffic to/from the namespace.",
598
+ isolateEgress: true,
599
+ isolateIngress: true
600
+ },
601
+ opts
602
+ );
603
+ }
604
+ static allowInsideNamespace(namespace, cluster, opts) {
605
+ return _NetworkPolicy.create(
606
+ "allow-inside-namespace",
607
+ {
608
+ namespace,
609
+ cluster,
610
+ description: "Allow all traffic inside the namespace.",
611
+ selector: {},
612
+ ingressRule: { fromNamespace: namespace },
613
+ egressRule: { toNamespace: namespace }
614
+ },
615
+ opts
616
+ );
617
+ }
618
+ static allowKubeApiServer(namespace, cluster, opts) {
619
+ return _NetworkPolicy.create(
620
+ "allow-kube-api-server",
621
+ {
622
+ namespace,
623
+ cluster,
624
+ description: "Allow all traffic to the Kubernetes API server from the namespace.",
625
+ allowKubeApiServer: true
626
+ },
627
+ opts
628
+ );
629
+ }
630
+ static allowKubeDns(namespace, cluster, opts) {
631
+ return _NetworkPolicy.create(
632
+ "allow-kube-dns",
633
+ {
634
+ namespace,
635
+ cluster,
636
+ description: "Allow all traffic to the Kubernetes DNS server from the namespace.",
637
+ allowKubeDns: true
638
+ },
639
+ opts
640
+ );
641
+ }
642
+ static allowAllEgress(namespace, cluster, opts) {
643
+ return _NetworkPolicy.create(
644
+ "allow-all-egress",
645
+ {
646
+ namespace,
647
+ cluster,
648
+ description: "Allow all egress traffic from the namespace.",
649
+ egressRule: { toAll: true }
650
+ },
651
+ opts
652
+ );
653
+ }
654
+ static allowAllIngress(namespace, cluster, opts) {
655
+ return _NetworkPolicy.create(
656
+ "allow-all-ingress",
657
+ {
658
+ namespace,
659
+ cluster,
660
+ description: "Allow all ingress traffic to the namespace.",
661
+ ingressRule: { fromAll: true }
662
+ },
663
+ opts
664
+ );
665
+ }
666
+ static allowEgressToEndpoint(endpoint, namespace, cluster, opts) {
667
+ const parsedEndpoint = parseL34Endpoint(endpoint);
668
+ return _NetworkPolicy.create(
669
+ `allow-egress-to-${l34EndpointToString(parsedEndpoint)}`,
670
+ {
671
+ namespace,
672
+ cluster,
673
+ description: interpolate`Allow egress traffic to "${l34EndpointToString(parsedEndpoint)}" from the namespace.`,
674
+ egressRule: { toEndpoint: endpoint }
675
+ },
676
+ opts
677
+ );
678
+ }
679
+ static allowIngressFromEndpoint(endpoint, namespace, cluster, opts) {
680
+ const parsedEndpoint = parseL34Endpoint(endpoint);
681
+ return _NetworkPolicy.create(
682
+ `allow-ingress-from-${l34EndpointToString(parsedEndpoint)}`,
683
+ {
684
+ namespace,
685
+ cluster,
686
+ description: interpolate`Allow ingress traffic from "${l34EndpointToString(parsedEndpoint)}" to the namespace.`,
687
+ ingressRule: { fromEndpoint: endpoint }
688
+ },
689
+ opts
690
+ );
691
+ }
692
+ };
693
+ var NativeNetworkPolicy = class _NativeNetworkPolicy extends NetworkPolicy {
694
+ create(name, args, opts) {
695
+ const ingress = _NativeNetworkPolicy.createIngressRules(args);
696
+ const egress = _NativeNetworkPolicy.createEgressRules(args);
697
+ const policyTypes = [];
698
+ if (ingress.length > 0 || args.isolateIngress) {
699
+ policyTypes.push("Ingress");
700
+ }
701
+ if (egress.length > 0 || args.isolateEgress) {
702
+ policyTypes.push("Egress");
703
+ }
704
+ return new networking.v1.NetworkPolicy(
705
+ name,
706
+ {
707
+ metadata: mergeDeep(mapMetadata(args, name), {
708
+ annotations: args.description ? { "kubernetes.io/description": args.description } : void 0
709
+ }),
710
+ spec: {
711
+ podSelector: args.podSelector,
712
+ ingress,
713
+ egress,
714
+ policyTypes
715
+ }
716
+ },
717
+ opts
718
+ );
719
+ }
720
+ static fallbackIpBlock = {
721
+ cidr: "0.0.0.0/0",
722
+ except: ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]
723
+ };
724
+ static fallbackDnsRule = {
725
+ to: [
726
+ {
727
+ namespaceSelector: { matchLabels: { "kubernetes.io/metadata.name": "kube-system" } },
728
+ podSelector: { matchLabels: { "k8s-app": "kube-dns" } }
729
+ }
730
+ ],
731
+ ports: [{ port: 53, protocol: "UDP" }]
732
+ };
733
+ static createIngressRules(args) {
734
+ return uniqueBy(
735
+ args.ingressRules.map((rule) => ({
736
+ from: rule.all ? [] : _NativeNetworkPolicy.createRulePeers(rule),
737
+ ports: _NativeNetworkPolicy.mapPorts(rule.ports)
738
+ })),
739
+ (rule) => JSON.stringify(rule)
740
+ );
741
+ }
742
+ static createEgressRules(args) {
743
+ const extraRules = [];
744
+ const needKubeDns = args.egressRules.some((rule) => rule.fqdns.length > 0);
745
+ if (needKubeDns) {
746
+ extraRules.push(_NativeNetworkPolicy.fallbackDnsRule);
747
+ }
748
+ const needFallback = args.egressRules.some(
749
+ (rule) => rule.fqdns.some((fqdn) => !fqdn.endsWith(".cluster.local"))
750
+ );
751
+ if (needFallback) {
752
+ extraRules.push({ to: [{ ipBlock: _NativeNetworkPolicy.fallbackIpBlock }] });
753
+ }
754
+ if (args.allowKubeApiServer) {
755
+ const { quirks, apiEndpoints } = args.cluster;
756
+ if (quirks?.fallbackKubeApiAccess) {
757
+ extraRules.push({
758
+ to: [{ ipBlock: { cidr: `${quirks?.fallbackKubeApiAccess.serverIp}/32` } }],
759
+ ports: [{ port: quirks?.fallbackKubeApiAccess.serverPort, protocol: "TCP" }]
760
+ });
761
+ } else {
762
+ const rules = apiEndpoints.filter((endpoint) => endpoint.type !== "hostname").map((endpoint) => ({
763
+ to: [{ ipBlock: { cidr: l3EndpointToCidr(endpoint) } }],
764
+ ports: [{ port: endpoint.port, protocol: "TCP" }]
765
+ }));
766
+ extraRules.push(...rules);
767
+ }
768
+ }
769
+ return uniqueBy(
770
+ args.egressRules.map((rule) => {
771
+ return {
772
+ to: rule.all ? [] : _NativeNetworkPolicy.createRulePeers(rule),
773
+ ports: _NativeNetworkPolicy.mapPorts(rule.ports)
774
+ };
775
+ }).filter((rule) => rule.to !== void 0).concat(extraRules),
776
+ (rule) => JSON.stringify(rule)
777
+ );
778
+ }
779
+ static createRulePeers(args) {
780
+ const peers = uniqueBy(
781
+ [
782
+ ..._NativeNetworkPolicy.createCidrPeers(args),
783
+ ..._NativeNetworkPolicy.createServicePeers(args),
784
+ ..._NativeNetworkPolicy.createSelectorPeers(args)
785
+ ],
786
+ (peer) => JSON.stringify(peer)
787
+ );
788
+ return peers.length > 0 ? peers : void 0;
789
+ }
790
+ static createCidrPeers(args) {
791
+ return args.cidrs.map((cidr) => ({ ipBlock: { cidr } }));
792
+ }
793
+ static createServicePeers(args) {
794
+ return args.services.map((service) => {
795
+ const selector = mapServiceToLabelSelector(service);
796
+ return {
797
+ namespaceSelector: mapNamespaceNameToSelector(service.metadata.namespace),
798
+ podSelector: selector
799
+ };
800
+ });
801
+ }
802
+ static createSelectorPeers(args) {
803
+ const selectorPeers = args.selectors.map((selector) => ({
804
+ podSelector: mapSelectorLikeToSelector(selector)
805
+ }));
806
+ const namespacePeers = args.namespaces.map(_NativeNetworkPolicy.createNamespacePeer);
807
+ if (namespacePeers.length === 0) {
808
+ return selectorPeers;
809
+ }
810
+ if (selectorPeers.length === 0) {
811
+ return namespacePeers;
812
+ }
813
+ return flat(
814
+ selectorPeers.map((selectorPeer) => {
815
+ return namespacePeers.map((namespacePeer) => merge(selectorPeer, namespacePeer));
816
+ })
817
+ );
818
+ }
819
+ static createNamespacePeer(namespace) {
820
+ const namespaceName = mapNamespaceLikeToNamespaceName(namespace);
821
+ const namespaceSelector = mapNamespaceNameToSelector(namespaceName);
822
+ return { namespaceSelector };
823
+ }
824
+ static mapPorts(ports) {
825
+ return ports.map((port) => {
826
+ if ("port" in port) {
827
+ return {
828
+ port: port.port,
829
+ protocol: port.protocol ?? "TCP"
830
+ };
831
+ }
832
+ return {
833
+ port: port.range[0],
834
+ endPort: port.range[1],
835
+ protocol: port.protocol ?? "TCP"
836
+ };
837
+ });
838
+ }
839
+ };
840
+
841
+ // src/workload.ts
842
+ import {
843
+ normalize as normalize3
844
+ } from "@highstate/pulumi";
845
+ import {
846
+ ComponentResource as ComponentResource4,
847
+ interpolate as interpolate2,
848
+ output as output5
849
+ } from "@pulumi/pulumi";
850
+ import { uniqueBy as uniqueBy2 } from "remeda";
851
+ import { deepmerge as deepmerge2 } from "deepmerge-ts";
852
+
853
+ // src/pod.ts
854
+ var podSpecDefaults = {
855
+ automountServiceAccountToken: false
856
+ };
857
+
858
+ // src/workload.ts
859
+ var workloadExtraArgs = [...commonExtraArgs, "container", "containers"];
860
+ var exposableWorkloadExtraArgs = [...workloadExtraArgs, "service", "httpRoute"];
861
+ function getWorkloadComponents(name, args, parent, opts) {
862
+ const labels = {
863
+ "app.kubernetes.io/name": name
864
+ };
865
+ const containers = output5(args).apply((args2) => normalize3(args2.container, args2.containers));
866
+ const volumes = containers.apply((containers2) => {
867
+ const containerVolumes = containers2.flatMap((container) => normalize3(container.volume, container.volumes)).map(mapWorkloadVolume);
868
+ const containerVolumeMounts = containers2.flatMap((container) => {
869
+ return normalize3(container.volumeMount, container.volumeMounts).map((volumeMount) => {
870
+ return "volume" in volumeMount ? volumeMount.volume : void 0;
871
+ }).filter(Boolean);
872
+ }).map(mapWorkloadVolume);
873
+ return output5([...containerVolumes, ...containerVolumeMounts]).apply(
874
+ uniqueBy2((volume) => volume.name)
875
+ );
876
+ });
877
+ const podSpec = output5({ args, containers, volumes }).apply(({ args: args2, containers: containers2, volumes: volumes2 }) => {
878
+ const spec = {
879
+ volumes: volumes2,
880
+ containers: containers2.map((container) => mapContainerToRaw(container, args2.cluster, name)),
881
+ ...podSpecDefaults
882
+ };
883
+ if (containers2.some((container) => container.enableTun) && args2.cluster.quirks?.tunDevicePolicy?.type !== "plugin") {
884
+ spec.volumes = output5(spec.volumes).apply((volumes3) => [
885
+ ...volumes3 ?? [],
886
+ {
887
+ name: "tun-device",
888
+ hostPath: {
889
+ path: "/dev/net/tun"
890
+ }
891
+ }
892
+ ]);
893
+ }
894
+ return spec;
895
+ });
896
+ const podTemplate = podSpec.apply((podSpec2) => {
897
+ return {
898
+ metadata: { labels },
899
+ spec: podSpec2
900
+ };
901
+ });
902
+ const networkPolicy = containers.apply((containers2) => {
903
+ const allowedEndpoints = containers2.flatMap((container) => container.allowedEndpoints ?? []);
904
+ if (allowedEndpoints.length === 0) {
905
+ return void 0;
906
+ }
907
+ return NetworkPolicy.create(
908
+ name,
909
+ {
910
+ cluster: args.cluster,
911
+ namespace: args.namespace,
912
+ selector: labels,
913
+ egressRule: {
914
+ toEndpoints: allowedEndpoints
915
+ }
916
+ },
917
+ { ...opts, parent: parent() }
918
+ );
919
+ });
920
+ return { labels, containers, volumes, podSpec, podTemplate, networkPolicy };
921
+ }
922
+ function getExposableWorkloadComponents(name, args, parent, opts) {
923
+ const { labels, containers, volumes, podSpec, podTemplate, networkPolicy } = getWorkloadComponents(name, args, parent, opts);
924
+ const service = output5({ args, containers }).apply(async ({ args: args2, containers: containers2 }) => {
925
+ if (!args2.service && !args2.httpRoute) {
926
+ return void 0;
927
+ }
928
+ if (args2.existing?.service) {
929
+ return Service.of(name, args2.existing.service, args2.cluster, { ...opts, parent: parent() });
930
+ }
931
+ if (args2.existing) {
932
+ return void 0;
933
+ }
934
+ const ports = containers2.flatMap((container) => normalize3(container.port, container.ports));
935
+ return Service.create(
936
+ name,
937
+ {
938
+ ...args2.service,
939
+ selector: labels,
940
+ cluster: args2.cluster,
941
+ namespace: args2.namespace,
942
+ ports: (
943
+ // allow to completely override the ports
944
+ !args2.service?.port && !args2.service?.ports ? ports.map(mapContainerPortToServicePort) : args2.service?.ports
945
+ )
946
+ },
947
+ {
948
+ ...opts,
949
+ parent: parent(),
950
+ provider: await getProvider(args2.cluster)
951
+ }
952
+ );
953
+ });
954
+ const httpRoute = output5({
955
+ args,
956
+ service
957
+ }).apply(async ({ args: args2, service: service2 }) => {
958
+ if (!args2.httpRoute || !service2) {
959
+ return void 0;
960
+ }
961
+ if (args2.existing) {
962
+ return void 0;
963
+ }
964
+ return new HttpRoute(
965
+ name,
966
+ {
967
+ ...args2.httpRoute,
968
+ cluster: args2.cluster,
969
+ rule: {
970
+ backend: service2
971
+ }
972
+ },
973
+ {
974
+ ...opts,
975
+ parent: parent(),
976
+ provider: await getProvider(args2.cluster)
977
+ }
978
+ );
979
+ });
980
+ return { labels, containers, volumes, podSpec, podTemplate, networkPolicy, service, httpRoute };
981
+ }
982
+ var Workload = class extends ComponentResource4 {
983
+ constructor(type, name, args, opts, resourceType, cluster, metadata, networkPolicy) {
984
+ super(type, name, args, opts);
985
+ this.name = name;
986
+ this.args = args;
987
+ this.resourceType = resourceType;
988
+ this.cluster = cluster;
989
+ this.metadata = metadata;
990
+ this.networkPolicy = networkPolicy;
991
+ }
992
+ /**
993
+ * The instance terminal to interact with the deployment.
994
+ */
995
+ get terminal() {
996
+ const containerName = output5(this.args).apply((args) => {
997
+ const containers = normalize3(args.container, args.containers);
998
+ return containers[0]?.name ?? this.name;
999
+ });
1000
+ return output5({
1001
+ name: this.metadata.name,
1002
+ title: this.metadata.name,
1003
+ image: "ghcr.io/exeteres/highstate/terminal-kubectl",
1004
+ command: [
1005
+ "exec",
1006
+ "kubectl",
1007
+ "exec",
1008
+ "-it",
1009
+ "-n",
1010
+ this.metadata.namespace,
1011
+ interpolate2`${this.resourceType}/${this.metadata.name}`,
1012
+ "-c",
1013
+ containerName,
1014
+ "--",
1015
+ this.args.terminalShell ?? "bash"
1016
+ ],
1017
+ files: {
1018
+ "/kubeconfig": this.cluster.kubeconfig
1019
+ },
1020
+ env: {
1021
+ KUBECONFIG: "/kubeconfig"
1022
+ }
1023
+ });
1024
+ }
1025
+ };
1026
+ var ExposableWorkload = class extends Workload {
1027
+ constructor(type, name, args, opts, resourceType, cluster, metadata, networkPolicy, _service, _httpRoute) {
1028
+ super(type, name, args, opts, resourceType, cluster, metadata, networkPolicy);
1029
+ this.name = name;
1030
+ this._service = _service;
1031
+ this._httpRoute = _httpRoute;
1032
+ }
1033
+ /**
1034
+ * The service associated with the workload.
1035
+ */
1036
+ get optionalService() {
1037
+ return this._service;
1038
+ }
1039
+ /**
1040
+ * The HTTP route associated with the workload.
1041
+ */
1042
+ get optionalHttpRoute() {
1043
+ return this._httpRoute;
1044
+ }
1045
+ /**
1046
+ * The service associated with the workload.
1047
+ *
1048
+ * Will throw an error if the service is not available.
1049
+ */
1050
+ get service() {
1051
+ return this._service.apply((service) => {
1052
+ if (!service) {
1053
+ throw new Error(`The service of the workload "${this.name}" is not available.`);
1054
+ }
1055
+ return service;
1056
+ });
1057
+ }
1058
+ /**
1059
+ * The HTTP route associated with the workload.
1060
+ *
1061
+ * Will throw an error if the HTTP route is not available.
1062
+ */
1063
+ get httpRoute() {
1064
+ return this._httpRoute.apply((httpRoute) => {
1065
+ if (!httpRoute) {
1066
+ throw new Error(`The HTTP route of the workload "${this.name}" is not available.`);
1067
+ }
1068
+ return httpRoute;
1069
+ });
1070
+ }
1071
+ /**
1072
+ * Creates a generic workload or patches the existing one.
1073
+ */
1074
+ static createOrPatchGeneric(name, args, opts) {
1075
+ return output5(args).apply(async (args2) => {
1076
+ if (args2.existing?.type === "k8s.deployment") {
1077
+ const { Deployment } = await import("./deployment-ZP3ASKPT.js");
1078
+ return Deployment.patch(
1079
+ name,
1080
+ {
1081
+ ...deepmerge2(args2, args2.deployment),
1082
+ name: args2.existing.metadata.name,
1083
+ namespace: args2.existing.metadata.namespace
1084
+ },
1085
+ opts
1086
+ );
1087
+ }
1088
+ if (args2.existing?.type === "k8s.stateful-set") {
1089
+ const { StatefulSet } = await import("./stateful-set-2AH7RAF7.js");
1090
+ return StatefulSet.patch(
1091
+ name,
1092
+ {
1093
+ ...deepmerge2(args2, args2.statefulSet),
1094
+ name: args2.existing.metadata.name,
1095
+ namespace: args2.existing.metadata.namespace
1096
+ },
1097
+ opts
1098
+ );
1099
+ }
1100
+ if (args2.type === "Deployment") {
1101
+ const { Deployment } = await import("./deployment-ZP3ASKPT.js");
1102
+ return Deployment.create(name, deepmerge2(args2, args2.deployment), opts);
1103
+ }
1104
+ if (args2.type === "StatefulSet") {
1105
+ const { StatefulSet } = await import("./stateful-set-2AH7RAF7.js");
1106
+ return StatefulSet.create(name, deepmerge2(args2, args2.statefulSet), opts);
1107
+ }
1108
+ throw new Error(`Unknown workload type: ${args2.type}`);
1109
+ });
1110
+ }
1111
+ };
1112
+
1113
+ export {
1114
+ PersistentVolumeClaim,
1115
+ Secret,
1116
+ NetworkPolicy,
1117
+ exposableWorkloadExtraArgs,
1118
+ getWorkloadComponents,
1119
+ getExposableWorkloadComponents,
1120
+ Workload,
1121
+ ExposableWorkload
1122
+ };
1123
+ //# sourceMappingURL=chunk-QGHMLKTW.js.map