@meshxdata/fops 0.1.37 → 0.1.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +374 -0
  2. package/package.json +1 -1
  3. package/src/agent/llm.js +2 -0
  4. package/src/auth/azure.js +92 -0
  5. package/src/auth/cloudflare.js +125 -0
  6. package/src/auth/index.js +2 -0
  7. package/src/commands/index.js +8 -4
  8. package/src/commands/lifecycle.js +31 -10
  9. package/src/doctor.js +27 -5
  10. package/src/plugins/bundled/fops-plugin-1password/index.js +13 -1
  11. package/src/plugins/bundled/fops-plugin-azure/index.js +4 -2
  12. package/src/plugins/bundled/fops-plugin-azure/lib/azure-aks.js +130 -2
  13. package/src/plugins/bundled/fops-plugin-azure/lib/azure-auth.js +66 -6
  14. package/src/plugins/bundled/fops-plugin-azure/lib/azure-helpers.js +64 -2
  15. package/src/plugins/bundled/fops-plugin-azure/lib/azure-ops.js +36 -28
  16. package/src/plugins/bundled/fops-plugin-azure/lib/azure-provision.js +66 -26
  17. package/src/plugins/bundled/fops-plugin-azure/lib/azure-shared-cache.js +1 -1
  18. package/src/plugins/bundled/fops-plugin-azure/lib/azure-sync.js +4 -4
  19. package/src/plugins/bundled/fops-plugin-azure/lib/commands/infra-cmds.js +4 -0
  20. package/src/plugins/bundled/fops-plugin-azure/lib/commands/test-cmds.js +22 -0
  21. package/src/plugins/bundled/fops-plugin-azure/lib/commands/vm-cmds.js +4 -3
  22. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/dai-backend.yaml +13 -0
  23. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/dai-frontend.yaml +13 -0
  24. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-backend.yaml +13 -0
  25. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-frontend.yaml +13 -0
  26. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-hive.yaml +13 -0
  27. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-kafka.yaml +13 -0
  28. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-meltano.yaml +13 -0
  29. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-mlflow.yaml +13 -0
  30. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-opa.yaml +13 -0
  31. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-processor.yaml +13 -0
  32. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-scheduler.yaml +13 -0
  33. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-storage-engine.yaml +13 -0
  34. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-trino.yaml +13 -0
  35. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/apps/foundation-watcher.yaml +13 -0
  36. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/config/repository.yaml +66 -0
  37. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/kustomization.yaml +30 -0
  38. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/acr-webhook-controller.yaml +63 -0
  39. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/externalsecrets.yaml +15 -0
  40. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/istio.yaml +42 -0
  41. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/kafka.yaml +15 -0
  42. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/kube-reflector.yaml +33 -0
  43. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/kubecost.yaml +12 -0
  44. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/nats-server.yaml +15 -0
  45. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/prometheus-agent.yaml +34 -0
  46. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/reloader.yaml +12 -0
  47. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/spark.yaml +112 -0
  48. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/tailscale.yaml +67 -0
  49. package/src/plugins/bundled/fops-plugin-azure/templates/cluster/operator/vertical-pod-autoscaler.yaml +15 -0
  50. package/src/plugins/bundled/fops-plugin-foundation/index.js +44 -7
  51. package/src/plugins/loader.js +23 -6
@@ -527,21 +527,21 @@ async function runUp(program, registry, opts) {
527
527
 
528
528
  if (opts.url) {
529
529
  publicUrl = opts.url.replace(/\/+$/, "");
530
- if (/^FOUNDATION_PUBLIC_URL=/m.test(envContent)) {
531
- envContent = envContent.replace(/^FOUNDATION_PUBLIC_URL=.*/m, `FOUNDATION_PUBLIC_URL=${publicUrl}`);
532
- } else {
530
+ // Only write if not already set in .env — leave the repo's value untouched
531
+ if (!/^FOUNDATION_PUBLIC_URL=/m.test(envContent)) {
533
532
  envContent = envContent.trimEnd() + `\nFOUNDATION_PUBLIC_URL=${publicUrl}\n`;
533
+ fs.writeFileSync(envPath, envContent);
534
+ console.log(chalk.dim(` FOUNDATION_PUBLIC_URL=${publicUrl} written to .env`));
534
535
  }
535
- fs.writeFileSync(envPath, envContent);
536
- console.log(chalk.dim(` FOUNDATION_PUBLIC_URL=${publicUrl} written to .env`));
537
536
  } else {
538
- // Local: ensure compose uses localhost and no remote URL / no traefik
539
- if (/^FOUNDATION_PUBLIC_URL=/m.test(envContent)) {
540
- envContent = envContent.replace(/^FOUNDATION_PUBLIC_URL=.*/m, `FOUNDATION_PUBLIC_URL=${localBaseUrl}`);
541
- } else {
537
+ // Local: only inject localhost fallback if not already set
538
+ if (!/^FOUNDATION_PUBLIC_URL=/m.test(envContent)) {
542
539
  envContent = envContent.trimEnd() + `\nFOUNDATION_PUBLIC_URL=${localBaseUrl}\n`;
540
+ fs.writeFileSync(envPath, envContent);
543
541
  }
544
- fs.writeFileSync(envPath, envContent);
542
+ // Read the actual value from .env for profile resolution below
543
+ const existingUrl = envContent.match(/^FOUNDATION_PUBLIC_URL=(.+)$/m)?.[1]?.trim();
544
+ if (existingUrl && existingUrl !== localBaseUrl) publicUrl = existingUrl;
545
545
  }
546
546
 
547
547
  // Resolve profiles: local up = no traefik; --url with 443 = traefik
@@ -1284,7 +1284,28 @@ async function runUp(program, registry, opts) {
1284
1284
  const forwardOut = createBufferWritable("out");
1285
1285
  const forwardErr = createBufferWritable("err");
1286
1286
 
1287
+ // Detect env misalignment: if FOUNDATION_PUBLIC_URL in .env differs from what's
1288
+ // running in containers, force-recreate so services pick up the new value.
1289
+ let forceRecreate = false;
1290
+ try {
1291
+ const envPublicUrl = (envContent.match(/^FOUNDATION_PUBLIC_URL=(.+)$/m)?.[1] || "").trim();
1292
+ if (envPublicUrl) {
1293
+ const { stdout: inspectOut } = await execa("docker", [
1294
+ "inspect", "--format", "{{range .Config.Env}}{{println .}}{{end}}",
1295
+ "foundation-compose-foundation-frontend-1",
1296
+ ], { reject: false, cwd: root });
1297
+ const runningUrl = (inspectOut || "").split("\n")
1298
+ .find(l => l.startsWith("FOUNDATION_PUBLIC_URL="))
1299
+ ?.slice("FOUNDATION_PUBLIC_URL=".length).trim();
1300
+ if (runningUrl && runningUrl !== envPublicUrl) {
1301
+ console.error(chalk.dim(` ↻ FOUNDATION_PUBLIC_URL mismatch (${runningUrl} → ${envPublicUrl}) — recreating affected services`));
1302
+ forceRecreate = true;
1303
+ }
1304
+ }
1305
+ } catch { /* best-effort */ }
1306
+
1287
1307
  const upArgs = ["compose", ...effectiveProfileArgs, "--progress", "plain", "up", "-d", "--remove-orphans"];
1308
+ if (forceRecreate) upArgs.push("--force-recreate");
1288
1309
  if (serviceList.length > 0) upArgs.push(...serviceList);
1289
1310
  const upProc = execa("docker", upArgs, {
1290
1311
  cwd: root,
package/src/doctor.js CHANGED
@@ -11,6 +11,20 @@ import { rootDir } from "./project.js";
11
11
  import { wslExec, wslHomedir, wslFileExists, wslReadFile, wslCmdVersion } from "./wsl.js";
12
12
  import { getInquirer } from "./lazy.js";
13
13
 
14
+ // Check if sudo is available without a password (cached credentials or NOPASSWD)
15
+ let _sudoOk = null;
16
+ async function canSudo() {
17
+ if (_sudoOk !== null) return _sudoOk;
18
+ if (process.platform === "win32") { _sudoOk = false; return false; }
19
+ try {
20
+ const { exitCode } = await execa("sudo", ["-n", "true"], { reject: false, timeout: 5000 });
21
+ _sudoOk = exitCode === 0;
22
+ } catch { _sudoOk = false; }
23
+ return _sudoOk;
24
+ }
25
+
26
+
27
+
14
28
  const KEY_PORTS = {
15
29
  5432: "Postgres",
16
30
  9092: "Kafka",
@@ -246,6 +260,7 @@ export async function runDoctor(opts = {}, registry = null) {
246
260
  console.log(chalk.green(` ✓ Removed ${b}`));
247
261
  } catch (err) {
248
262
  if (err.code === "EACCES") {
263
+ if (!(await canSudo())) throw new Error(`Permission denied removing ${b} — sudo not available`);
249
264
  console.log(chalk.cyan(` ▶ sudo rm ${b}`));
250
265
  await execa("sudo", ["rm", b], { stdio: "inherit", timeout: 10000 });
251
266
  } else {
@@ -379,6 +394,7 @@ export async function runDoctor(opts = {}, registry = null) {
379
394
  console.log(chalk.cyan(' ▶ start "" "Docker Desktop"'));
380
395
  await execa("cmd", ["/c", "start", "", "Docker Desktop"], { timeout: 10000 });
381
396
  } else {
397
+ if (!(await canSudo())) throw new Error("sudo not available — start Docker manually: sudo systemctl start docker");
382
398
  console.log(chalk.cyan(" ▶ sudo systemctl start docker"));
383
399
  await execa("sudo", ["systemctl", "start", "docker"], { stdio: "inherit", timeout: 30000 });
384
400
  return;
@@ -447,6 +463,7 @@ export async function runDoctor(opts = {}, registry = null) {
447
463
  console.log(chalk.cyan(' ▶ start "" "Docker Desktop"'));
448
464
  await execa("cmd", ["/c", "start", "", "Docker Desktop"], { timeout: 10000 });
449
465
  } else {
466
+ if (!(await canSudo())) throw new Error("sudo not available — install Docker manually: curl -fsSL https://get.docker.com | sudo sh");
450
467
  console.log(chalk.cyan(" ▶ curl -fsSL https://get.docker.com | sudo sh"));
451
468
  await execa("sh", ["-c", "curl -fsSL https://get.docker.com | sudo sh"], {
452
469
  stdio: "inherit", timeout: 300_000,
@@ -497,6 +514,7 @@ export async function runDoctor(opts = {}, registry = null) {
497
514
  await execa("winget", ["install", "Git.Git", "--accept-source-agreements", "--accept-package-agreements"], { stdio: "inherit", timeout: 300_000 });
498
515
  }
499
516
  } else {
517
+ if (!(await canSudo())) throw new Error("sudo not available — install git manually");
500
518
  console.log(chalk.cyan(" ▶ sudo apt-get install -y git"));
501
519
  await execa("sudo", ["apt-get", "install", "-y", "git"], { stdio: "inherit", timeout: 300_000 });
502
520
  }
@@ -540,6 +558,7 @@ export async function runDoctor(opts = {}, registry = null) {
540
558
  console.log(chalk.cyan(" ▶ brew install npm"));
541
559
  await execa("brew", ["install", "npm"], { stdio: "inherit", timeout: 300_000 });
542
560
  } else if (process.platform === "linux" || (process.platform === "win32" && useWsl)) {
561
+ if (!(await canSudo())) throw new Error("sudo not available — install npm manually");
543
562
  console.log(chalk.cyan(" ▶ sudo apt-get install -y npm"));
544
563
  await run("sudo", ["apt-get", "install", "-y", "npm"], { stdio: "inherit", timeout: 300_000 });
545
564
  } else {
@@ -573,9 +592,8 @@ export async function runDoctor(opts = {}, registry = null) {
573
592
  await execa("brew", ["install", "--cask", "1password-cli"], { stdio: "inherit", timeout: 300_000 });
574
593
  } else if (process.platform === "win32") {
575
594
  if (useWsl) {
595
+ if (!(await canSudo())) throw new Error("sudo not available — install 1Password CLI manually");
576
596
  console.log(chalk.cyan(" ▶ [WSL] Installing 1Password CLI via apt…"));
577
- // Authenticate sudo upfront so the long install chain doesn't hang at a password prompt
578
- await run("sudo", ["-v"], { stdio: "inherit", timeout: 30_000 });
579
597
  await run("sh", ["-c", [
580
598
  "curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --batch --yes --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg",
581
599
  '&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/$(dpkg --print-architecture) stable main" | sudo tee /etc/apt/sources.list.d/1password.list',
@@ -591,9 +609,8 @@ export async function runDoctor(opts = {}, registry = null) {
591
609
  }
592
610
  } else {
593
611
  // Linux — install via 1Password apt repository
594
- // Authenticate sudo upfront so the long install chain doesn't hang at a password prompt
612
+ if (!(await canSudo())) throw new Error("sudo not available install 1Password CLI manually");
595
613
  console.log(chalk.cyan(" ▶ Installing 1Password CLI via apt…"));
596
- await execa("sudo", ["-v"], { stdio: "inherit", timeout: 30_000 });
597
614
  await execa("sh", ["-c", [
598
615
  "curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --batch --yes --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg",
599
616
  '&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/$(dpkg --print-architecture) stable main" | sudo tee /etc/apt/sources.list.d/1password.list',
@@ -607,6 +624,7 @@ export async function runDoctor(opts = {}, registry = null) {
607
624
 
608
625
  // GitHub CLI
609
626
  const installGhLinux = async (runner = execa) => {
627
+ if (!(await canSudo())) throw new Error("sudo not available — install gh manually: https://cli.github.com");
610
628
  console.log(chalk.cyan(" ▶ Installing gh via apt…"));
611
629
  await runner("sh", ["-c", [
612
630
  "type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y)",
@@ -948,7 +966,11 @@ export async function runDoctor(opts = {}, registry = null) {
948
966
  }
949
967
  }
950
968
  } else if (ghcrLoggedIn) {
951
- fail("Docker logged into ghcr.io but pull access denied", "token may lack read:packages or write:packages scope", ghcrFixFn);
969
+ fail(
970
+ "Docker logged into ghcr.io but pull access denied",
971
+ "Docker has a stale token — fix: echo $(gh auth token) | docker login ghcr.io -u x-access-token --password-stdin",
972
+ ghcrFixFn,
973
+ );
952
974
  } else {
953
975
  fail("Docker not logged into ghcr.io", "needed to pull/push private images", ghcrFixFn);
954
976
  }
@@ -99,6 +99,10 @@ export function register(api) {
99
99
  } else if (synced > 0) {
100
100
  const files = synced === 1 ? ".env" : `${synced} .env files`;
101
101
  console.log(chalk.green(` ✓ ${totalSecrets} secret(s) synced → ${files}`));
102
+ // Update one-shot marker so auto-sync on next `fops up` is skipped
103
+ const markerDir = path.join(root, ".fops");
104
+ if (!fs.existsSync(markerDir)) fs.mkdirSync(markerDir, { recursive: true });
105
+ fs.writeFileSync(path.join(markerDir, ".1p-synced"), new Date().toISOString() + "\n");
102
106
  } else {
103
107
  console.log(chalk.dim(" Nothing to sync."));
104
108
  }
@@ -181,13 +185,18 @@ export function register(api) {
181
185
  },
182
186
  });
183
187
 
184
- // ── Hook: before:up — auto-sync secrets ────────────
188
+ // ── Hook: before:up — one-shot auto-sync secrets ───
185
189
  api.registerHook("before:up", async () => {
186
190
  if (!config.autoSync) return;
187
191
 
188
192
  const root = findRoot();
189
193
  if (!root) return;
190
194
 
195
+ // One-shot: skip if secrets were already synced once
196
+ const markerDir = path.join(root, ".fops");
197
+ const markerPath = path.join(markerDir, ".1p-synced");
198
+ if (fs.existsSync(markerPath)) return;
199
+
191
200
  const templates = discoverTemplates(root);
192
201
  if (templates.length === 0) return;
193
202
 
@@ -212,6 +221,9 @@ export function register(api) {
212
221
  } else if (synced > 0) {
213
222
  const files = synced === 1 ? ".env" : `${synced} .env files`;
214
223
  console.log(chalk.green(` ✓ ${totalSecrets} secret(s) synced → ${files}`));
224
+ // Mark as done so subsequent `fops up` calls skip auto-sync
225
+ if (!fs.existsSync(markerDir)) fs.mkdirSync(markerDir, { recursive: true });
226
+ fs.writeFileSync(markerPath, new Date().toISOString() + "\n");
215
227
  }
216
228
  });
217
229
 
@@ -138,8 +138,10 @@ export async function register(api) {
138
138
  } else if (process.platform === "linux") {
139
139
  const { exitCode } = await execa("which", ["apt-get"], { reject: false });
140
140
  if (exitCode !== 0) throw new Error("apt-get not found — use https://learn.microsoft.com/en-us/cli/azure/install-azure-cli for your distro");
141
- await execa("sudo", ["apt-get", "update"], { stdio: "inherit", timeout: 120000 });
142
- await execa("sudo", ["apt-get", "install", "-y", "azure-cli"], { stdio: "inherit", timeout: 120000 });
141
+ // Check sudo is available before attempting install
142
+ const sudoCheck = await execa("sudo", ["-n", "true"], { reject: false, timeout: 5000 });
143
+ if (sudoCheck.exitCode !== 0) throw new Error("sudo not available — install Azure CLI manually: curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash");
144
+ await execa("sudo", ["sh", "-c", "curl -sL https://aka.ms/InstallAzureCLIDeb | bash"], { stdio: "inherit", timeout: 300000 });
143
145
  } else {
144
146
  throw new Error("Auto-install only on macOS and Linux — use the Microsoft install link");
145
147
  }
@@ -148,7 +148,7 @@ function resolveFluxConfig(clusterName, opts) {
148
148
  return {
149
149
  fluxRepo: opts?.fluxRepo ?? tracked?.flux?.repo ?? az.fluxRepo ?? project?.fluxRepo ?? AKS_DEFAULTS.fluxRepo,
150
150
  fluxOwner: opts?.fluxOwner ?? tracked?.flux?.owner ?? az.fluxOwner ?? project?.fluxOwner ?? AKS_DEFAULTS.fluxOwner,
151
- fluxPath: opts?.fluxPath || tracked?.flux?.path || az.fluxPath || project?.fluxPath || AKS_DEFAULTS.fluxPath,
151
+ fluxPath: opts?.fluxPath || tracked?.flux?.path || az.fluxPath || project?.fluxPath || `clusters/${clusterName}`,
152
152
  fluxBranch: opts?.fluxBranch ?? tracked?.flux?.branch ?? az.fluxBranch ?? project?.fluxBranch ?? AKS_DEFAULTS.fluxBranch,
153
153
  };
154
154
  }
@@ -182,6 +182,107 @@ function requireCluster(name) {
182
182
  };
183
183
  }
184
184
 
185
+ // ── Flux local-repo scaffolding ───────────────────────────────────────────────
186
+
187
+ /**
188
+ * Auto-detect the local flux repo clone.
189
+ * Searches common relative paths from the project root and CWD.
190
+ */
191
+ function findFluxLocalRepo() {
192
+ const state = readState();
193
+ const projectRoot = state.azure?.projectRoot || state.projectRoot;
194
+
195
+ const candidates = [];
196
+ if (projectRoot) {
197
+ candidates.push(path.resolve(projectRoot, "..", "flux"));
198
+ candidates.push(path.resolve(projectRoot, "flux"));
199
+ }
200
+ candidates.push(path.resolve("../flux"));
201
+ candidates.push(path.resolve("../../flux"));
202
+
203
+ for (const p of candidates) {
204
+ if (fs.existsSync(path.join(p, "clusters"))) return p;
205
+ }
206
+ return null;
207
+ }
208
+
209
+ /**
210
+ * Resolve the bundled cluster template directory shipped with the CLI.
211
+ */
212
+ function resolveClusterTemplate() {
213
+ const thisDir = path.dirname(fileURLToPath(import.meta.url));
214
+ return path.resolve(thisDir, "..", "templates", "cluster");
215
+ }
216
+
217
+ /**
218
+ * Scaffold a new cluster directory in the local flux repo from the bundled
219
+ * template, substituting {{CLUSTER_NAME}} and {{OVERLAY}} placeholders.
220
+ * Then commits and pushes the change.
221
+ */
222
+ async function scaffoldFluxCluster(execa, { clusterName, fluxLocalRepo, overlay }) {
223
+ const templateDir = resolveClusterTemplate();
224
+ const destDir = path.join(fluxLocalRepo, "clusters", clusterName);
225
+
226
+ if (!fs.existsSync(templateDir)) {
227
+ console.log(WARN(` ⚠ Cluster template not found at ${templateDir}`));
228
+ return false;
229
+ }
230
+
231
+ if (fs.existsSync(destDir)) {
232
+ console.log(OK(` ✓ Cluster directory already exists: clusters/${clusterName}`));
233
+ return true;
234
+ }
235
+
236
+ const vars = {
237
+ "{{CLUSTER_NAME}}": clusterName,
238
+ "{{OVERLAY}}": overlay || "demo-azure",
239
+ };
240
+
241
+ hint("Scaffolding Flux cluster manifests…");
242
+
243
+ function copyDir(src, dest) {
244
+ fs.mkdirSync(dest, { recursive: true });
245
+ for (const entry of fs.readdirSync(src, { withFileTypes: true })) {
246
+ const srcPath = path.join(src, entry.name);
247
+ const destPath = path.join(dest, entry.name);
248
+ if (entry.isDirectory()) {
249
+ copyDir(srcPath, destPath);
250
+ } else {
251
+ let content = fs.readFileSync(srcPath, "utf8");
252
+ for (const [k, v] of Object.entries(vars)) {
253
+ content = content.replaceAll(k, v);
254
+ }
255
+ fs.writeFileSync(destPath, content);
256
+ }
257
+ }
258
+ }
259
+
260
+ copyDir(templateDir, destDir);
261
+ console.log(OK(` ✓ Cluster directory created: clusters/${clusterName}`));
262
+
263
+ // List remaining placeholders
264
+ const remaining = [];
265
+ for (const line of fs.readFileSync(path.join(destDir, "kustomization.yaml"), "utf8").split("\n")) {
266
+ const m = line.match(/\{\{(\w+)\}\}/g);
267
+ if (m) remaining.push(...m);
268
+ }
269
+
270
+ // Git add + commit + push
271
+ hint("Committing and pushing to flux repo…");
272
+ try {
273
+ await execa("git", ["-C", fluxLocalRepo, "add", `clusters/${clusterName}`], { timeout: 15000 });
274
+ await execa("git", ["-C", fluxLocalRepo, "commit", "-m", `Add cluster ${clusterName}`], { timeout: 15000 });
275
+ await execa("git", ["-C", fluxLocalRepo, "push"], { timeout: 60000 });
276
+ console.log(OK(` ✓ Pushed clusters/${clusterName} to flux repo`));
277
+ } catch (err) {
278
+ const msg = (err.stderr || err.message || "").split("\n")[0];
279
+ console.log(WARN(` ⚠ Git push failed: ${msg}`));
280
+ hint(`Manually commit and push clusters/${clusterName} in the flux repo`);
281
+ }
282
+
283
+ return true;
284
+ }
285
+
185
286
  // ── Flux helpers ──────────────────────────────────────────────────────────────
186
287
 
187
288
  async function ensureFluxCli(execa) {
@@ -244,6 +345,18 @@ export async function aksUp(opts = {}) {
244
345
  if (exists === 0) {
245
346
  console.log(WARN(`\n Cluster "${clusterName}" already exists — reconciling…`));
246
347
 
348
+ // Scaffold cluster directory if it doesn't exist yet
349
+ if (!opts.noFlux) {
350
+ const fluxLocalRepo = opts.fluxLocalRepo || findFluxLocalRepo();
351
+ if (fluxLocalRepo) {
352
+ await scaffoldFluxCluster(execa, {
353
+ clusterName,
354
+ fluxLocalRepo,
355
+ overlay: opts.overlay,
356
+ });
357
+ }
358
+ }
359
+
247
360
  const maxPods = opts.maxPods || 110;
248
361
  const ctx = { execa, clusterName, rg, sub, opts, minCount, maxCount, maxPods };
249
362
  await reconcileCluster(ctx);
@@ -347,7 +460,22 @@ export async function aksUp(opts = {}) {
347
460
  const fluxRepo = opts.fluxRepo ?? AKS_DEFAULTS.fluxRepo;
348
461
  const fluxOwner = opts.fluxOwner ?? AKS_DEFAULTS.fluxOwner;
349
462
  const fluxBranch = opts.fluxBranch ?? AKS_DEFAULTS.fluxBranch;
350
- const fluxPath = opts.fluxPath || AKS_DEFAULTS.fluxPath;
463
+ const fluxPath = opts.fluxPath || `clusters/${clusterName}`;
464
+
465
+ // Scaffold cluster directory in the flux repo before bootstrapping
466
+ if (!opts.noFlux) {
467
+ const fluxLocalRepo = opts.fluxLocalRepo || findFluxLocalRepo();
468
+ if (fluxLocalRepo) {
469
+ await scaffoldFluxCluster(execa, {
470
+ clusterName,
471
+ fluxLocalRepo,
472
+ templateCluster: opts.templateCluster,
473
+ });
474
+ } else {
475
+ console.log(WARN(" ⚠ Local flux repo not found — skipping cluster scaffolding."));
476
+ hint("Pass --flux-local-repo <path> or clone meshxdata/flux next to foundation-compose.");
477
+ }
478
+ }
351
479
 
352
480
  if (opts.noFlux) {
353
481
  console.log("");
@@ -13,7 +13,7 @@ export function hashContent(text) {
13
13
  }
14
14
 
15
15
  /**
16
- * Resolve Foundation credentials from env → .env~/.fops.json.
16
+ * Resolve Foundation credentials from env → ~/.fops.json → .env files.
17
17
  * Returns { bearerToken } or { user, password } or null.
18
18
  */
19
19
  export function resolveFoundationCreds() {
@@ -26,6 +26,25 @@ export function resolveFoundationCreds() {
26
26
  if (cfg.bearerToken?.trim()) return { bearerToken: cfg.bearerToken.trim() };
27
27
  if (cfg.user?.trim() && cfg.password) return { user: cfg.user.trim(), password: cfg.password };
28
28
  } catch { /* no fops.json */ }
29
+
30
+ // Fall back to .env files for credentials
31
+ const envCandidates = [pathMod.resolve(".env"), pathMod.resolve("..", ".env")];
32
+ try {
33
+ const raw = JSON.parse(fs.readFileSync(pathMod.join(os.homedir(), ".fops.json"), "utf8"));
34
+ if (raw?.projectRoot) envCandidates.unshift(pathMod.join(raw.projectRoot, ".env"));
35
+ } catch { /* ignore */ }
36
+ for (const ep of envCandidates) {
37
+ try {
38
+ const lines = fs.readFileSync(ep, "utf8").split("\n");
39
+ const get = (k) => {
40
+ const ln = lines.find((l) => l.startsWith(`${k}=`));
41
+ return ln ? ln.slice(k.length + 1).trim().replace(/^["']|["']$/g, "") : "";
42
+ };
43
+ const user = get("QA_USERNAME") || get("FOUNDATION_USERNAME");
44
+ const pass = get("QA_PASSWORD") || get("FOUNDATION_PASSWORD");
45
+ if (user && pass) return { user, password: pass };
46
+ } catch { /* try next */ }
47
+ }
29
48
  return null;
30
49
  }
31
50
 
@@ -41,12 +60,44 @@ export function suppressTlsWarning() {
41
60
  };
42
61
  }
43
62
 
63
+ /**
64
+ * Resolve Cloudflare Access service-token headers from env or .env files.
65
+ * Returns { "CF-Access-Client-Id": ..., "CF-Access-Client-Secret": ... } or {}.
66
+ */
67
+ let _cfAccessHeaders;
68
+ export function resolveCfAccessHeaders() {
69
+ if (_cfAccessHeaders !== undefined) return _cfAccessHeaders;
70
+ let id = process.env.CF_ACCESS_CLIENT_ID || "";
71
+ let secret = process.env.CF_ACCESS_CLIENT_SECRET || "";
72
+ if (!id) {
73
+ // Try .env files
74
+ const candidates = [pathMod.resolve(".env"), pathMod.resolve("..", ".env")];
75
+ try {
76
+ const raw = JSON.parse(fs.readFileSync(pathMod.join(os.homedir(), ".fops.json"), "utf8"));
77
+ if (raw?.projectRoot) candidates.unshift(pathMod.join(raw.projectRoot, ".env"));
78
+ } catch {}
79
+ for (const ep of candidates) {
80
+ try {
81
+ const lines = fs.readFileSync(ep, "utf8").split("\n");
82
+ const get = (k) => { const ln = lines.find((l) => l.startsWith(`${k}=`)); return ln ? ln.slice(k.length + 1).trim().replace(/^["']|["']$/g, "") : ""; };
83
+ id = id || get("CF_ACCESS_CLIENT_ID");
84
+ secret = secret || get("CF_ACCESS_CLIENT_SECRET");
85
+ if (id && secret) break;
86
+ } catch {}
87
+ }
88
+ }
89
+ _cfAccessHeaders = id && secret ? { "CF-Access-Client-Id": id, "CF-Access-Client-Secret": secret } : {};
90
+ return _cfAccessHeaders;
91
+ }
92
+
44
93
  export async function vmFetch(url, opts = {}) {
45
94
  suppressTlsWarning();
46
95
  const prev = process.env.NODE_TLS_REJECT_UNAUTHORIZED;
47
96
  process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0";
48
97
  try {
49
- return await fetch(url, { signal: AbortSignal.timeout(10_000), ...opts });
98
+ const cfHeaders = resolveCfAccessHeaders();
99
+ const headers = { ...cfHeaders, ...(opts.headers || {}) };
100
+ return await fetch(url, { signal: AbortSignal.timeout(10_000), ...opts, headers });
50
101
  } finally {
51
102
  if (prev === undefined) delete process.env.NODE_TLS_REJECT_UNAUTHORIZED;
52
103
  else process.env.NODE_TLS_REJECT_UNAUTHORIZED = prev;
@@ -162,7 +213,7 @@ export async function resolveRemoteAuth(opts = {}) {
162
213
 
163
214
  const creds = resolveFoundationCreds();
164
215
  let qaUser = creds?.user || process.env.QA_USERNAME || process.env.FOUNDATION_USERNAME || "operator@local";
165
- let qaPass = creds?.password || process.env.QA_PASSWORD || "";
216
+ let qaPass = creds?.password || process.env.QA_PASSWORD || process.env.FOUNDATION_PASSWORD || "";
166
217
  let bearerToken = creds?.bearerToken || "";
167
218
 
168
219
  // 1) Use local bearer if it's a valid JWT
@@ -172,6 +223,13 @@ export async function resolveRemoteAuth(opts = {}) {
172
223
  bearerToken = "";
173
224
 
174
225
  // 2) Pre-auth against the backend /iam/login
226
+ const cfHeaders = resolveCfAccessHeaders();
227
+ const cfKeys = Object.keys(cfHeaders);
228
+ if (cfKeys.length) {
229
+ log(chalk.dim(` CF Access headers: ${cfKeys.join(", ")} (id=${cfHeaders["CF-Access-Client-Id"]?.slice(0, 8)}…)`));
230
+ } else {
231
+ log(chalk.yellow(" ⚠ No CF Access service token found (set CF_ACCESS_CLIENT_ID + CF_ACCESS_CLIENT_SECRET)"));
232
+ }
175
233
  if (qaUser && qaPass && apiUrl) {
176
234
  try {
177
235
  if (suppressTls) suppressTls();
@@ -180,8 +238,8 @@ export async function resolveRemoteAuth(opts = {}) {
180
238
  try {
181
239
  const resp = await fetch(`${apiUrl}/iam/login`, {
182
240
  method: "POST",
183
- headers: { "Content-Type": "application/json" },
184
- body: JSON.stringify({ username: qaUser, password: qaPass }),
241
+ headers: { "Content-Type": "application/json", ...cfHeaders },
242
+ body: JSON.stringify({ user: qaUser, password: qaPass }),
185
243
  signal: AbortSignal.timeout(10_000),
186
244
  });
187
245
  if (resp.ok) {
@@ -192,7 +250,9 @@ export async function resolveRemoteAuth(opts = {}) {
192
250
  return { bearerToken, qaUser, qaPass, useTokenMode: true };
193
251
  }
194
252
  } else {
195
- log(chalk.dim(` Local creds rejected: HTTP ${resp.status}`));
253
+ const body = await resp.text().catch(() => "");
254
+ log(chalk.dim(` Local creds rejected: HTTP ${resp.status} (user=${qaUser})`));
255
+ if (body) log(chalk.dim(` Response: ${body.slice(0, 200)}`));
196
256
  }
197
257
  } finally {
198
258
  if (prev === undefined) delete process.env.NODE_TLS_REJECT_UNAUTHORIZED;
@@ -275,7 +275,15 @@ export async function ensureAzAuth(execa, { subscription, throwOnMissing = false
275
275
  if (subscription) args.push("--subscription", subscription);
276
276
  const { stdout } = await execa("az", args, { timeout: 15000 });
277
277
  return JSON.parse(stdout);
278
- } catch {
278
+ } catch (err) {
279
+ if (isAzSessionExpiredError(err)) {
280
+ const { suggested } = parseAzReloginHint(err);
281
+ const msg = `Azure session expired (MFA). Run:\n ${suggested.replace(/\n/g, "\n ")}`;
282
+ if (throwOnMissing) throw new Error(msg);
283
+ console.error(chalk.yellow(`\n Azure session expired (MFA or token refresh required).`));
284
+ console.error(chalk.cyan(` Run: ${suggested.split("\n")[0]}\n`));
285
+ process.exit(1);
286
+ }
279
287
  const msg = "Not logged in to Azure. Run: az login";
280
288
  if (throwOnMissing) throw new Error(msg);
281
289
  console.error(chalk.red("\n Not logged in to Azure. Run: az login\n"));
@@ -447,7 +455,61 @@ async function refreshTokenViaGh(execa, missingScopes) {
447
455
  }
448
456
 
449
457
  export async function verifyGithubToken(token) {
450
- if (!token) return { token, login: undefined };
458
+ if (!token) {
459
+ // No token anywhere — try gh CLI auth
460
+ const execa = await lazyExeca();
461
+ try {
462
+ const { stdout: ghToken, exitCode } = await execa("gh", ["auth", "token", "-h", "github.com"], { timeout: 10000, reject: false });
463
+ const existing = (ghToken || "").trim();
464
+ if (exitCode === 0 && existing) {
465
+ console.log(chalk.cyan(" No token in env/netrc — using gh CLI token"));
466
+ token = existing;
467
+ }
468
+ } catch { /* gh not installed or not authed */ }
469
+
470
+ if (!token) {
471
+ // Still no token — offer interactive gh auth login
472
+ console.log(chalk.yellow("\n ⚠ No GitHub token found (checked --github-token, $GITHUB_TOKEN, ~/.netrc, gh CLI)"));
473
+ try {
474
+ const { exitCode: ghExists } = await execa("which", ["gh"], { reject: false, timeout: 5000 });
475
+ if (ghExists === 0) {
476
+ console.log(chalk.cyan(" ▶ Running gh auth login…\n"));
477
+ const { exitCode: loginExit } = await execa("gh", ["auth", "login", "-h", "github.com", "-s", "write:packages,repo"], { stdio: "inherit", reject: false, timeout: 300000 });
478
+ if (loginExit === 0) {
479
+ const { stdout: newToken } = await execa("gh", ["auth", "token", "-h", "github.com"], { timeout: 10000 });
480
+ token = (newToken || "").trim();
481
+ if (token) {
482
+ // Sync to .netrc for future use
483
+ const netrcPath = path.join(os.homedir(), ".netrc");
484
+ const entry = `machine github.com login x-access-token password ${token}`;
485
+ try {
486
+ let content = "";
487
+ try { content = fs.readFileSync(netrcPath, "utf8"); } catch {}
488
+ if (/^machine\s+github\.com\b/m.test(content)) {
489
+ content = content.replace(
490
+ /machine\s+github\.com\b[^\n]*(\n\s*(login|password)\s+[^\n]*)*/gm,
491
+ entry,
492
+ );
493
+ } else {
494
+ content = content.trimEnd() + (content ? "\n" : "") + entry + "\n";
495
+ }
496
+ fs.writeFileSync(netrcPath, content, { mode: 0o600 });
497
+ console.log(chalk.green(" ✓ ~/.netrc updated"));
498
+ } catch {}
499
+ }
500
+ }
501
+ } else {
502
+ console.log(chalk.dim(" Install gh CLI to authenticate: https://cli.github.com"));
503
+ }
504
+ } catch {}
505
+
506
+ if (!token) {
507
+ console.error(chalk.red(" ✗ GitHub authentication required — GHCR pulls will fail without a token."));
508
+ console.error(chalk.dim(" Set $GITHUB_TOKEN, run gh auth login, or pass --github-token.\n"));
509
+ process.exit(1);
510
+ }
511
+ }
512
+ }
451
513
  const execa = await lazyExeca();
452
514
  try {
453
515
  let res = await fetch("https://api.github.com/user", {