@meshxdata/fops 0.1.45 → 0.1.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +202 -17
  2. package/package.json +1 -1
  3. package/src/commands/lifecycle.js +81 -5
  4. package/src/commands/setup.js +45 -4
  5. package/src/plugins/bundled/fops-plugin-azure/index.js +29 -0
  6. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-core.js +1185 -0
  7. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-flux.js +1180 -0
  8. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-ingress.js +393 -0
  9. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-naming.js +104 -0
  10. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-network.js +296 -0
  11. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-postgres.js +768 -0
  12. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-reconcilers.js +538 -0
  13. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-secrets.js +849 -0
  14. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-stacks.js +643 -0
  15. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-state.js +145 -0
  16. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-storage.js +496 -0
  17. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks-terraform.js +1032 -0
  18. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks.js +155 -4245
  19. package/src/plugins/bundled/fops-plugin-azure/lib/azure-keyvault.js +186 -0
  20. package/src/plugins/bundled/fops-plugin-azure/lib/azure-results.js +5 -0
  21. package/src/plugins/bundled/fops-plugin-azure/lib/commands/infra-cmds.js +758 -0
  22. package/src/plugins/bundled/fops-plugin-azure/lib/commands/registry-cmds.js +250 -0
  23. package/src/plugins/bundled/fops-plugin-azure/lib/commands/test-cmds.js +2 -1
  24. package/src/plugins/bundled/fops-plugin-foundation/lib/apply.js +3 -2
  25. package/src/plugins/bundled/fops-plugin-foundation/lib/helpers.js +21 -0
  26. package/src/plugins/bundled/fops-plugin-foundation/lib/tools-read.js +3 -5
  27. package/src/ui/tui/App.js +13 -13
  28. package/src/web/dist/assets/index-NXC8Hvnp.css +1 -0
  29. package/src/web/dist/assets/index-QH1N4ejK.js +112 -0
  30. package/src/web/dist/index.html +2 -2
  31. package/src/web/server.js +4 -4
  32. package/src/web/dist/assets/index-BphVaAUd.css +0 -1
  33. package/src/web/dist/assets/index-CSckLzuG.js +0 -129
@@ -0,0 +1,145 @@
1
+ /**
2
+ * azure-aks-state.js - Cluster and stack state management
3
+ *
4
+ * Depends on: azure-aks-naming.js
5
+ */
6
+
7
+ import fs from "node:fs";
8
+ import path from "node:path";
9
+ import { ERR, hint, readState, saveState } from "./azure.js";
10
+ import { AKS_DEFAULTS } from "./azure-aks-naming.js";
11
+
12
+ // ── Cluster state ─────────────────────────────────────────────────────────────
13
+ // State layout: { azure: { ..., activeCluster: "<name>", clusters: { ... } } }
14
+
15
+ export function readAksClusters() {
16
+ const state = readState();
17
+ const az = state.azure || {};
18
+ return {
19
+ activeCluster: az.activeCluster,
20
+ clusters: az.clusters || {},
21
+ };
22
+ }
23
+
24
+ export function readClusterState(name) {
25
+ const { activeCluster, clusters } = readAksClusters();
26
+ if (name) return clusters[name] || null;
27
+ if (activeCluster && clusters[activeCluster]) return clusters[activeCluster];
28
+ return null;
29
+ }
30
+
31
+ export function writeClusterState(name, patch) {
32
+ const state = readState();
33
+ const az = state.azure || {};
34
+ const clusters = az.clusters || {};
35
+ clusters[name] = { ...clusters[name], ...patch, clusterName: name };
36
+ az.clusters = clusters;
37
+ az.activeCluster = name;
38
+ state.azure = az;
39
+ saveState(state);
40
+ }
41
+
42
+ export function clearClusterState(name) {
43
+ const state = readState();
44
+ const az = state.azure || {};
45
+ const clusters = az.clusters || {};
46
+ delete clusters[name];
47
+ if (az.activeCluster === name) {
48
+ const remaining = Object.keys(clusters);
49
+ az.activeCluster = remaining.length > 0 ? remaining[0] : undefined;
50
+ }
51
+ az.clusters = clusters;
52
+ state.azure = az;
53
+ saveState(state);
54
+ }
55
+
56
+ // ── Stack state management ────────────────────────────────────────────────────
57
+
58
+ export function readStackState(clusterName, namespace) {
59
+ const cl = readClusterState(clusterName);
60
+ if (!cl) return null;
61
+ const stacks = cl.stacks || {};
62
+ return stacks[namespace] || null;
63
+ }
64
+
65
+ export function writeStackState(clusterName, namespace, patch) {
66
+ const cl = readClusterState(clusterName) || {};
67
+ const stacks = cl.stacks || {};
68
+ stacks[namespace] = {
69
+ ...stacks[namespace],
70
+ ...patch,
71
+ namespace,
72
+ updatedAt: new Date().toISOString(),
73
+ };
74
+ if (!stacks[namespace].createdAt) {
75
+ stacks[namespace].createdAt = stacks[namespace].updatedAt;
76
+ }
77
+ writeClusterState(clusterName, { stacks });
78
+ }
79
+
80
+ export function deleteStackState(clusterName, namespace) {
81
+ const cl = readClusterState(clusterName);
82
+ if (!cl) return;
83
+ const stacks = cl.stacks || {};
84
+ delete stacks[namespace];
85
+ writeClusterState(clusterName, { stacks });
86
+ }
87
+
88
+ export function listStacks(clusterName) {
89
+ const cl = readClusterState(clusterName);
90
+ if (!cl) return [];
91
+ const stacks = cl.stacks || {};
92
+ return Object.values(stacks);
93
+ }
94
+
95
+ // ── Flux config resolution ────────────────────────────────────────────────────
96
+
97
+ /** Read flux defaults from project root: .fops.json or config/azure-flux.json (azure.fluxOwner, etc.). */
98
+ export function readProjectFluxConfig(projectRoot) {
99
+ if (!projectRoot || !fs.existsSync(projectRoot)) return null;
100
+ const tryFile = (p) => {
101
+ if (!fs.existsSync(p)) return null;
102
+ try {
103
+ const raw = JSON.parse(fs.readFileSync(p, "utf8"));
104
+ const az = raw?.azure || raw;
105
+ const out = {};
106
+ if (az.fluxOwner != null) out.fluxOwner = az.fluxOwner;
107
+ if (az.fluxRepo != null) out.fluxRepo = az.fluxRepo;
108
+ if (az.fluxPath != null) out.fluxPath = az.fluxPath;
109
+ if (az.fluxBranch != null) out.fluxBranch = az.fluxBranch;
110
+ return Object.keys(out).length ? out : null;
111
+ } catch { return null; }
112
+ };
113
+ return tryFile(path.join(projectRoot, ".fops.json")) || tryFile(path.join(projectRoot, "config", "azure-flux.json")) || null;
114
+ }
115
+
116
+ /** Resolve effective Flux repo: CLI opts > cluster state > global azure > project config > AKS_DEFAULTS. */
117
+ export function resolveFluxConfig(clusterName, opts) {
118
+ const state = readState();
119
+ const az = state.azure || {};
120
+ const tracked = readClusterState(clusterName);
121
+ const project = readProjectFluxConfig(az.projectRoot || state.projectRoot);
122
+ return {
123
+ fluxRepo: opts?.fluxRepo ?? tracked?.flux?.repo ?? az.fluxRepo ?? project?.fluxRepo ?? AKS_DEFAULTS.fluxRepo,
124
+ fluxOwner: opts?.fluxOwner ?? tracked?.flux?.owner ?? az.fluxOwner ?? project?.fluxOwner ?? AKS_DEFAULTS.fluxOwner,
125
+ fluxPath: opts?.fluxPath || tracked?.flux?.path || az.fluxPath || project?.fluxPath || AKS_DEFAULTS.fluxPath,
126
+ fluxBranch: opts?.fluxBranch ?? tracked?.flux?.branch ?? az.fluxBranch ?? project?.fluxBranch ?? AKS_DEFAULTS.fluxBranch,
127
+ };
128
+ }
129
+
130
+ // ── Cluster requirement check ─────────────────────────────────────────────────
131
+
132
+ export function requireCluster(name) {
133
+ const cl = readClusterState(name);
134
+ if (!cl || !cl.clusterName) {
135
+ const label = name ? `"${name}"` : "(none active)";
136
+ console.error(ERR(`\n No AKS cluster tracked: ${label}`));
137
+ hint("Create one: fops azure aks up <name>");
138
+ hint("List: fops azure aks list\n");
139
+ process.exit(1);
140
+ }
141
+ return {
142
+ ...cl,
143
+ resourceGroup: cl.resourceGroup ?? AKS_DEFAULTS.resourceGroup,
144
+ };
145
+ }
@@ -0,0 +1,496 @@
1
+ /**
2
+ * azure-aks-storage.js - Storage account, Helm repos, and storage engine
3
+ *
4
+ * Depends on: azure-aks-naming.js, azure-aks-state.js
5
+ */
6
+
7
+ import crypto from "node:crypto";
8
+ import { OK, WARN, hint, subArgs } from "./azure.js";
9
+ import { pgServerName } from "./azure-aks-naming.js";
10
+ import { readClusterState } from "./azure-aks-state.js";
11
+
12
+ // ── Helm Repositories ─────────────────────────────────────────────────────────
13
+
14
+ export const HELM_REPOS = [
15
+ {
16
+ name: "foundation", namespace: "flux-system",
17
+ spec: { type: "oci", interval: "1m0s", url: "oci://meshxregistry.azurecr.io/foundation-charts", secretRef: { name: "meshxregistry-helm-secret" } },
18
+ },
19
+ {
20
+ name: "hive-metastore", namespace: "flux-system",
21
+ spec: { type: "oci", interval: "1m0s", url: "oci://meshxregistry.azurecr.io/foundation-charts", secretRef: { name: "meshxregistry-helm-secret" } },
22
+ },
23
+ {
24
+ name: "trinodb", namespace: "flux-system",
25
+ spec: { interval: "30m", url: "https://trinodb.github.io/charts" },
26
+ },
27
+ ];
28
+
29
+ // ── Old Postgres hosts for migration ──────────────────────────────────────────
30
+
31
+ export const OLD_PG_HOSTS = [
32
+ "az-vel-app-data-demo-uaen-psql.postgres.database.azure.com",
33
+ ];
34
+
35
+ // ── Storage Account reconciliation ────────────────────────────────────────────
36
+
37
+ export async function reconcileStorageAccount(ctx) {
38
+ const { execa, clusterName, rg, sub } = ctx;
39
+ const storageAccountName = `fops${clusterName.replace(/-/g, "")}`.toLowerCase().slice(0, 24);
40
+ const vaultName = `fops-${clusterName}-kv`;
41
+ const containers = ["foundation", "vault"];
42
+
43
+ hint(`Reconciling Azure Storage Account "${storageAccountName}"…`);
44
+
45
+ // 1. Check if storage account exists
46
+ const { exitCode: saExists } = await execa("az", [
47
+ "storage", "account", "show",
48
+ "--name", storageAccountName,
49
+ "--resource-group", rg,
50
+ "--output", "none",
51
+ ...subArgs(sub),
52
+ ], { reject: false, timeout: 30000 });
53
+
54
+ if (saExists !== 0) {
55
+ // Create storage account
56
+ hint(`Creating Storage Account "${storageAccountName}"…`);
57
+ const { exitCode, stderr } = await execa("az", [
58
+ "storage", "account", "create",
59
+ "--name", storageAccountName,
60
+ "--resource-group", rg,
61
+ "--sku", "Standard_LRS",
62
+ "--kind", "StorageV2",
63
+ "--https-only", "true",
64
+ "--min-tls-version", "TLS1_2",
65
+ "--allow-blob-public-access", "false",
66
+ "--output", "none",
67
+ ...subArgs(sub),
68
+ ], { reject: false, timeout: 120000 });
69
+
70
+ if (exitCode !== 0) {
71
+ console.log(WARN(` ⚠ Storage Account creation failed: ${(stderr || "").split("\n")[0]}`));
72
+ return;
73
+ }
74
+ console.log(OK(` ✓ Storage Account "${storageAccountName}" created`));
75
+ } else {
76
+ console.log(OK(` ✓ Storage Account "${storageAccountName}" exists`));
77
+ }
78
+
79
+ // 2. Get storage account key
80
+ const { stdout: keyJson } = await execa("az", [
81
+ "storage", "account", "keys", "list",
82
+ "--account-name", storageAccountName,
83
+ "--resource-group", rg,
84
+ "--query", "[0].value",
85
+ "--output", "tsv",
86
+ ...subArgs(sub),
87
+ ], { timeout: 30000 });
88
+ const storageKey = keyJson?.trim();
89
+
90
+ if (!storageKey) {
91
+ console.log(WARN(" ⚠ Could not retrieve storage account key"));
92
+ return;
93
+ }
94
+
95
+ // 3. Create containers
96
+ for (const container of containers) {
97
+ const { exitCode: containerExists } = await execa("az", [
98
+ "storage", "container", "show",
99
+ "--name", container,
100
+ "--account-name", storageAccountName,
101
+ "--account-key", storageKey,
102
+ "--output", "none",
103
+ ], { reject: false, timeout: 30000 });
104
+
105
+ if (containerExists !== 0) {
106
+ await execa("az", [
107
+ "storage", "container", "create",
108
+ "--name", container,
109
+ "--account-name", storageAccountName,
110
+ "--account-key", storageKey,
111
+ "--output", "none",
112
+ ], { reject: false, timeout: 30000 });
113
+ console.log(OK(` ✓ Container "${container}" created`));
114
+ } else {
115
+ console.log(OK(` ✓ Container "${container}" exists`));
116
+ }
117
+ }
118
+
119
+ // 4. Generate storage-engine auth credentials
120
+ const authIdentity = "storage-engine";
121
+ const authCredential = crypto.randomBytes(24).toString("base64").replace(/[^a-zA-Z0-9]/g, "");
122
+
123
+ // 5. Store credentials in Key Vault
124
+ const secrets = [
125
+ { name: "AZURE-STORAGE-ACCOUNT", value: storageAccountName },
126
+ { name: "AZURE-STORAGE-KEY", value: storageKey },
127
+ // storage-engine secrets as JSON
128
+ { name: "foundation-storage-engine-secrets", value: JSON.stringify({
129
+ AZURE_STORAGE_ACCOUNT_NAME: storageAccountName,
130
+ AZURE_STORAGE_ACCOUNT_KEY: storageKey,
131
+ })},
132
+ { name: "foundation-storage-engine-auth", value: JSON.stringify({
133
+ AUTH_IDENTITY: authIdentity,
134
+ AUTH_CREDENTIAL: authCredential,
135
+ })},
136
+ ];
137
+
138
+ for (const { name, value } of secrets) {
139
+ const { exitCode } = await execa("az", [
140
+ "keyvault", "secret", "set",
141
+ "--vault-name", vaultName,
142
+ "--name", name,
143
+ "--value", value,
144
+ "--output", "none",
145
+ ...subArgs(sub),
146
+ ], { reject: false, timeout: 30000 });
147
+
148
+ if (exitCode === 0) {
149
+ console.log(OK(` ✓ Secret "${name}" stored in Key Vault`));
150
+ }
151
+ }
152
+
153
+ // 6. Grant AKS kubelet identity Storage Blob Data Contributor role
154
+ try {
155
+ const { stdout: aksJson } = await execa("az", [
156
+ "aks", "show", "-g", rg, "-n", clusterName,
157
+ "--query", "identityProfile.kubeletidentity.objectId",
158
+ "-o", "tsv",
159
+ ...subArgs(sub),
160
+ ], { timeout: 30000 });
161
+ const kubeletOid = aksJson?.trim();
162
+
163
+ if (kubeletOid) {
164
+ const { stdout: saId } = await execa("az", [
165
+ "storage", "account", "show",
166
+ "--name", storageAccountName,
167
+ "--resource-group", rg,
168
+ "--query", "id",
169
+ "-o", "tsv",
170
+ ...subArgs(sub),
171
+ ], { timeout: 30000 });
172
+
173
+ const { exitCode: roleExists } = await execa("az", [
174
+ "role", "assignment", "list",
175
+ "--assignee", kubeletOid,
176
+ "--scope", saId?.trim(),
177
+ "--role", "Storage Blob Data Contributor",
178
+ "--query", "[0]",
179
+ "-o", "tsv",
180
+ ...subArgs(sub),
181
+ ], { reject: false, timeout: 30000 });
182
+
183
+ if (roleExists !== 0 || !saId) {
184
+ await execa("az", [
185
+ "role", "assignment", "create",
186
+ "--assignee-object-id", kubeletOid,
187
+ "--assignee-principal-type", "ServicePrincipal",
188
+ "--role", "Storage Blob Data Contributor",
189
+ "--scope", saId?.trim(),
190
+ ...subArgs(sub),
191
+ ], { reject: false, timeout: 60000 });
192
+ console.log(OK(" ✓ AKS kubelet identity granted Storage Blob Data Contributor"));
193
+ } else {
194
+ console.log(OK(" ✓ AKS kubelet identity already has Storage Blob access"));
195
+ }
196
+ }
197
+ } catch (err) {
198
+ console.log(WARN(` ⚠ Could not grant AKS identity blob access: ${err.message?.split("\n")[0]}`));
199
+ }
200
+ }
201
+
202
+ // ── Storage engine deployment ─────────────────────────────────────────────────
203
+
204
+ export async function reconcileStorageEngine(ctx) {
205
+ const { execa, clusterName } = ctx;
206
+ const kubectl = (args, opts = {}) =>
207
+ execa("kubectl", ["--context", clusterName, ...args], { timeout: 30000, reject: false, ...opts });
208
+
209
+ // Check if deployment already exists
210
+ const { exitCode } = await kubectl([
211
+ "get", "deployment", "foundation-storage-engine", "-n", "foundation",
212
+ ]);
213
+ if (exitCode === 0) {
214
+ console.log(OK(" ✓ Storage engine deployment exists"));
215
+ return;
216
+ }
217
+
218
+ hint("Creating foundation-storage-engine deployment…");
219
+ const manifest = JSON.stringify({
220
+ apiVersion: "apps/v1", kind: "Deployment",
221
+ metadata: { name: "foundation-storage-engine", namespace: "foundation", labels: { app: "foundation-storage-engine" } },
222
+ spec: {
223
+ replicas: 1,
224
+ selector: { matchLabels: { app: "foundation-storage-engine" } },
225
+ template: {
226
+ metadata: { labels: { app: "foundation-storage-engine" } },
227
+ spec: {
228
+ containers: [{
229
+ name: "storage-engine",
230
+ image: "minio/minio:RELEASE.2024-11-07T00-52-20Z",
231
+ args: ["server", "/data", "--console-address", ":9001"],
232
+ env: [
233
+ { name: "MINIO_ROOT_USER", value: "minio" },
234
+ { name: "MINIO_ROOT_PASSWORD", value: "minio123" },
235
+ ],
236
+ ports: [
237
+ { containerPort: 9000, name: "api" },
238
+ { containerPort: 9001, name: "console" },
239
+ ],
240
+ volumeMounts: [{ name: "data", mountPath: "/data" }],
241
+ resources: { requests: { cpu: "100m", memory: "256Mi" }, limits: { cpu: "500m", memory: "512Mi" } },
242
+ readinessProbe: { httpGet: { path: "/minio/health/ready", port: 9000 }, initialDelaySeconds: 5, periodSeconds: 10 },
243
+ }],
244
+ volumes: [{ name: "data", emptyDir: {} }],
245
+ },
246
+ },
247
+ },
248
+ });
249
+ await kubectl(["apply", "-f", "-"], { input: manifest });
250
+
251
+ // Service: foundation-storage-engine (port 8080 → 9000)
252
+ const svcManifest = JSON.stringify({
253
+ apiVersion: "v1", kind: "Service",
254
+ metadata: { name: "foundation-storage-engine", namespace: "foundation" },
255
+ spec: {
256
+ selector: { app: "foundation-storage-engine" },
257
+ ports: [
258
+ { port: 8080, targetPort: 9000, name: "api" },
259
+ { port: 9000, targetPort: 9000, name: "s3" },
260
+ ],
261
+ },
262
+ });
263
+ await kubectl(["apply", "-f", "-"], { input: svcManifest });
264
+
265
+ // Also patch the existing "minio" service to point here (Vault uses minio.foundation.svc)
266
+ const minioSvcManifest = JSON.stringify({
267
+ apiVersion: "v1", kind: "Service",
268
+ metadata: { name: "minio", namespace: "foundation" },
269
+ spec: {
270
+ selector: { app: "foundation-storage-engine" },
271
+ ports: [
272
+ { port: 80, targetPort: 9000, name: "http" },
273
+ { port: 9000, targetPort: 9000, name: "s3" },
274
+ { port: 8080, targetPort: 9000, name: "api" },
275
+ ],
276
+ },
277
+ });
278
+ await kubectl(["apply", "-f", "-"], { input: minioSvcManifest });
279
+
280
+ // Wait for the deployment to be ready, then create the vault bucket
281
+ await kubectl(["rollout", "status", "deployment/foundation-storage-engine", "-n", "foundation", "--timeout=60s"], { timeout: 70000 });
282
+
283
+ // Create the vault bucket via a one-shot mc pod
284
+ const mcJobYaml = JSON.stringify({
285
+ apiVersion: "batch/v1", kind: "Job",
286
+ metadata: { name: "fops-mc-init", namespace: "foundation" },
287
+ spec: {
288
+ backoffLimit: 3, ttlSecondsAfterFinished: 60,
289
+ template: {
290
+ spec: {
291
+ restartPolicy: "Never",
292
+ containers: [{
293
+ name: "mc",
294
+ image: "minio/mc:latest",
295
+ command: ["sh", "-c", "mc alias set local http://foundation-storage-engine:8080 minio minio123 && mc mb local/vault --ignore-existing"],
296
+ }],
297
+ },
298
+ },
299
+ },
300
+ });
301
+ await kubectl(["delete", "job", "fops-mc-init", "-n", "foundation", "--ignore-not-found"]);
302
+ await kubectl(["apply", "-f", "-"], { input: mcJobYaml });
303
+ await kubectl(["wait", "--for=condition=complete", "job/fops-mc-init", "-n", "foundation", "--timeout=60s"], { timeout: 70000 });
304
+
305
+ console.log(OK(" ✓ Storage engine deployed with vault bucket"));
306
+ }
307
+
308
+ // ── Helm repos reconciliation ─────────────────────────────────────────────────
309
+
310
+ export async function reconcileHelmRepos(ctx) {
311
+ const { execa, clusterName } = ctx;
312
+ const tracked = readClusterState(clusterName);
313
+ if (!tracked?.flux) return;
314
+
315
+ const kubectl = (args, opts = {}) =>
316
+ execa("kubectl", ["--context", clusterName, ...args], { reject: false, timeout: 30000, ...opts });
317
+
318
+ // Detect which API version the cluster supports
319
+ const { stdout: crdJson } = await kubectl([
320
+ "get", "crd", "helmrepositories.source.toolkit.fluxcd.io",
321
+ "-o", "jsonpath={.spec.versions[*].name}",
322
+ ]);
323
+ const versions = (crdJson || "").split(/\s+/).filter(Boolean);
324
+ const apiVersion = versions.includes("v1")
325
+ ? "source.toolkit.fluxcd.io/v1"
326
+ : versions.includes("v1beta2")
327
+ ? "source.toolkit.fluxcd.io/v1beta2"
328
+ : "source.toolkit.fluxcd.io/v1";
329
+
330
+ // Ensure the ACR helm secret exists in flux-system for HelmRepository auth
331
+ const { exitCode: secretExists } = await kubectl([
332
+ "get", "secret", "meshxregistry-helm-secret", "-n", "flux-system",
333
+ ]);
334
+ if (secretExists !== 0) {
335
+ hint("Creating ACR helm secret in flux-system…");
336
+ const secretNs = ["acr-cache-system", "foundation"].find(async ns => {
337
+ const { exitCode } = await kubectl(["get", "secret", "meshxregistry-helm-secret", "-n", ns]);
338
+ return exitCode === 0;
339
+ });
340
+ if (secretNs) {
341
+ const { stdout: secretYaml } = await kubectl([
342
+ "get", "secret", "meshxregistry-helm-secret", "-n", secretNs, "-o", "json",
343
+ ]);
344
+ if (secretYaml) {
345
+ try {
346
+ const secret = JSON.parse(secretYaml);
347
+ delete secret.metadata.namespace;
348
+ delete secret.metadata.resourceVersion;
349
+ delete secret.metadata.uid;
350
+ delete secret.metadata.creationTimestamp;
351
+ if (secret.metadata.annotations) {
352
+ delete secret.metadata.annotations["kubectl.kubernetes.io/last-applied-configuration"];
353
+ }
354
+ const clean = JSON.stringify(secret);
355
+ await execa("kubectl", [
356
+ "--context", clusterName, "apply", "-n", "flux-system", "-f", "-",
357
+ ], { input: clean, reject: false, timeout: 10000 });
358
+ console.log(OK(" ✓ ACR helm secret replicated to flux-system"));
359
+ } catch { /* reflector should handle this eventually */ }
360
+ }
361
+ }
362
+ }
363
+
364
+ let updated = 0;
365
+ let unchanged = 0;
366
+ let failed = 0;
367
+
368
+ const repos = ctx.opts?.dai ? HELM_REPOS : HELM_REPOS.filter(r => !r.daiOnly);
369
+ for (const repo of repos) {
370
+ const specLines = [];
371
+ for (const [k, v] of Object.entries(repo.spec)) {
372
+ if (k === "secretRef") {
373
+ specLines.push(` secretRef:`);
374
+ specLines.push(` name: ${v.name}`);
375
+ } else {
376
+ specLines.push(` ${k}: "${v}"`);
377
+ }
378
+ }
379
+
380
+ const yaml = [
381
+ `apiVersion: ${apiVersion}`,
382
+ `kind: HelmRepository`,
383
+ `metadata:`,
384
+ ` name: ${repo.name}`,
385
+ ` namespace: ${repo.namespace}`,
386
+ `spec:`,
387
+ ...specLines,
388
+ ].join("\n");
389
+
390
+ const applyResult = await execa("kubectl", [
391
+ "--context", clusterName, "apply", "-f", "-",
392
+ ], { input: yaml, reject: false, timeout: 15000 });
393
+
394
+ if (applyResult.exitCode === 0) {
395
+ const out = (applyResult.stdout || "").trim();
396
+ if (out.includes("configured")) updated++;
397
+ else unchanged++;
398
+ } else {
399
+ failed++;
400
+ const errMsg = (applyResult.stderr || applyResult.stdout || "unknown error").trim().split("\n")[0];
401
+ console.log(WARN(` ⚠ HelmRepository ${repo.namespace}/${repo.name}: ${errMsg}`));
402
+ }
403
+ }
404
+
405
+ // Verify they actually exist
406
+ const { stdout: verify } = await kubectl([
407
+ "get", "helmrepository.source.toolkit.fluxcd.io", "-n", "flux-system",
408
+ "--no-headers", "-o", "custom-columns=NAME:.metadata.name",
409
+ ]);
410
+ const actual = (verify || "").trim().split("\n").filter(Boolean);
411
+ if (updated > 0) {
412
+ console.log(OK(` ✓ ${actual.length} HelmRepository source(s) in flux-system (${updated} updated, ${unchanged} unchanged)`));
413
+ } else if (failed === 0) {
414
+ console.log(OK(` ✓ All ${actual.length} HelmRepository sources up to date`));
415
+ }
416
+ if (actual.length < HELM_REPOS.length) {
417
+ const missing = HELM_REPOS.filter(r => !actual.includes(r.name));
418
+ for (const m of missing) {
419
+ console.log(WARN(` ⚠ Missing: ${m.name}`));
420
+ }
421
+ }
422
+ }
423
+
424
+ // ── Helm values reconciliation ────────────────────────────────────────────────
425
+
426
+ export async function reconcileHelmValues(ctx) {
427
+ const { execa, clusterName } = ctx;
428
+ const kubectl = (args, opts = {}) =>
429
+ execa("kubectl", ["--context", clusterName, ...args], { timeout: 30000, reject: false, ...opts });
430
+
431
+ const pgServer = pgServerName(clusterName);
432
+ const correctHost = `${pgServer}.postgres.database.azure.com`;
433
+
434
+ // List HelmReleases in foundation namespace
435
+ const { stdout: hrJson } = await kubectl(["get", "helmrelease", "-n", "foundation", "-o", "json"]);
436
+ if (!hrJson) return;
437
+
438
+ const hrs = JSON.parse(hrJson).items || [];
439
+ let patched = 0;
440
+
441
+ for (const hr of hrs) {
442
+ const name = hr.metadata?.name;
443
+ const valsStr = JSON.stringify(hr.spec?.values || {});
444
+ let needsPatch = false;
445
+
446
+ for (const oldHost of OLD_PG_HOSTS) {
447
+ if (valsStr.includes(oldHost)) {
448
+ needsPatch = true;
449
+ break;
450
+ }
451
+ }
452
+ if (!needsPatch) continue;
453
+
454
+ let newVals = valsStr;
455
+ for (const oldHost of OLD_PG_HOSTS) {
456
+ newVals = newVals.replaceAll(oldHost, correctHost);
457
+ }
458
+
459
+ const patch = JSON.stringify({ spec: { values: JSON.parse(newVals) } });
460
+ const tmpFile = `/tmp/fops-hr-patch-${name}.json`;
461
+ const { writeFileSync, unlinkSync } = await import("node:fs");
462
+ writeFileSync(tmpFile, patch);
463
+
464
+ const { exitCode } = await kubectl([
465
+ "patch", "helmrelease", name, "-n", "foundation",
466
+ "--type", "merge", "--patch-file", tmpFile,
467
+ ]);
468
+ try { unlinkSync(tmpFile); } catch {}
469
+
470
+ if (exitCode === 0) patched++;
471
+ }
472
+
473
+ if (patched > 0) {
474
+ console.log(OK(` ✓ Patched postgres host in ${patched} HelmRelease(s) → ${correctHost}`));
475
+ } else {
476
+ console.log(OK(" ✓ HelmRelease postgres hosts are correct"));
477
+ }
478
+ }
479
+
480
+ // ── ACR webhooks cleanup ──────────────────────────────────────────────────────
481
+
482
+ export async function reconcileAcrWebhooks(ctx) {
483
+ const { execa, clusterName } = ctx;
484
+ const kubectl = (args, opts = {}) =>
485
+ execa("kubectl", ["--context", clusterName, ...args], { timeout: 15000, reject: false, ...opts });
486
+
487
+ for (const wh of ["acr-pod-webhook", "acr-helm-webhook"]) {
488
+ const { exitCode } = await kubectl([
489
+ "get", "mutatingwebhookconfiguration", wh,
490
+ ]);
491
+ if (exitCode === 0) {
492
+ await kubectl(["delete", "mutatingwebhookconfiguration", wh]);
493
+ console.log(OK(` ✓ Removed orphaned ${wh}`));
494
+ }
495
+ }
496
+ }