@meshxdata/fops 0.1.55 → 0.1.57

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +4 -4
  2. package/package.json +1 -2
  3. package/src/commands/index.js +2 -0
  4. package/src/commands/k3s-cmd.js +124 -0
  5. package/src/commands/lifecycle.js +7 -0
  6. package/src/plugins/builtins/docker-compose.js +17 -35
  7. package/src/plugins/bundled/fops-plugin-azure/lib/azure-openai.js +0 -3
  8. package/src/plugins/bundled/fops-plugin-azure/lib/commands/vm-cmds.js +5 -2
  9. package/src/plugins/bundled/fops-plugin-cloud/api.js +14 -0
  10. package/src/project.js +12 -7
  11. package/src/plugins/bundled/fops-plugin-cloud/ui/postcss.config.cjs +0 -5
  12. package/src/plugins/bundled/fops-plugin-cloud/ui/src/App.jsx +0 -32
  13. package/src/plugins/bundled/fops-plugin-cloud/ui/src/api/client.js +0 -114
  14. package/src/plugins/bundled/fops-plugin-cloud/ui/src/api/queries.js +0 -111
  15. package/src/plugins/bundled/fops-plugin-cloud/ui/src/components/LogPanel.jsx +0 -162
  16. package/src/plugins/bundled/fops-plugin-cloud/ui/src/components/ThemeToggle.jsx +0 -46
  17. package/src/plugins/bundled/fops-plugin-cloud/ui/src/css/additional-styles/utility-patterns.css +0 -147
  18. package/src/plugins/bundled/fops-plugin-cloud/ui/src/css/style.css +0 -138
  19. package/src/plugins/bundled/fops-plugin-cloud/ui/src/favicon.svg +0 -15
  20. package/src/plugins/bundled/fops-plugin-cloud/ui/src/lib/utils.ts +0 -19
  21. package/src/plugins/bundled/fops-plugin-cloud/ui/src/main.jsx +0 -25
  22. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/Audit.jsx +0 -164
  23. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/Costs.jsx +0 -305
  24. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/CreateResource.jsx +0 -285
  25. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/Fleet.jsx +0 -307
  26. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/Resources.jsx +0 -229
  27. package/src/plugins/bundled/fops-plugin-cloud/ui/src/partials/Header.jsx +0 -132
  28. package/src/plugins/bundled/fops-plugin-cloud/ui/src/partials/Sidebar.jsx +0 -174
  29. package/src/plugins/bundled/fops-plugin-cloud/ui/src/partials/SidebarLinkGroup.jsx +0 -21
  30. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/AuthContext.jsx +0 -170
  31. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/Info.jsx +0 -49
  32. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/ThemeContext.jsx +0 -37
  33. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/Transition.jsx +0 -116
  34. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/Utils.js +0 -63
package/CHANGELOG.md CHANGED
@@ -1,5 +1,7 @@
1
- ## [0.1.55] - 2026-03-26
1
+ ## [0.1.57] - 2026-03-26
2
2
 
3
+ - restore all missing services (pgpool, exporters, grafana, etc), add loki to k3s profile, always activate loki profile in fops up (4e2744a)
4
+ - fix: grafana alert-rules provisioning, ENVIRONMENT_NAME from --url, k3s secret sync, vm-sizes endpoint, project root resolution (9839052)
3
5
  - feat(azure): add 'fops azure reconcile <name>' command for VM drift fix (79ba6e2)
4
6
  - fix(otel,loki): remove duplicate spanmetrics dimensions, use .env for loki S3 creds (e3d1def)
5
7
  - fix(loki): pass S3 credentials from .env so loki works without vault-init (c57906d)
@@ -178,14 +180,12 @@
178
180
  - azure stack index.js split (de12272)
179
181
  - Bump ajv from 8.17.1 to 8.18.0 in /operator-cli (76da21f)
180
182
  - packer (9665fbc)
181
- - remove stack api (db0fd4d)
182
- - packer cleanup (fe1bf14)
183
183
 
184
184
  # Changelog
185
185
 
186
186
  All notable changes to @meshxdata/fops (Foundation Operator CLI) are documented here.
187
187
 
188
- ## [0.1.54] - 2026-03-26
188
+ ## [0.1.56] - 2026-03-26
189
189
 
190
190
  - feat(azure): add 'fops azure reconcile <name>' command for VM drift fix (79ba6e2)
191
191
  - fix(otel,loki): remove duplicate spanmetrics dimensions, use .env for loki S3 creds (e3d1def)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@meshxdata/fops",
3
- "version": "0.1.55",
3
+ "version": "0.1.57",
4
4
  "description": "CLI to install and manage data mesh platforms",
5
5
  "keywords": [
6
6
  "fops",
@@ -17,7 +17,6 @@
17
17
  "fops.mjs",
18
18
  "src/",
19
19
  "!src/**/*.test.js",
20
- "!src/**/node_modules",
21
20
  "!scripts/",
22
21
  "README.md",
23
22
  "CHANGELOG.md"
@@ -7,6 +7,7 @@ import { registerPluginCommands } from "./plugin-cmd.js";
7
7
  import { registerIntegrationCommands } from "./integration-cmd.js";
8
8
  import { registerCompletionCommand } from "./completion.js";
9
9
  import { registerEditCommands } from "./edit-cmd.js";
10
+ import { registerK3sCommands } from "./k3s-cmd.js";
10
11
  import { configureColorHelp } from "./help.js";
11
12
 
12
13
  export function registerCommands(program, registry) {
@@ -43,5 +44,6 @@ export function registerCommands(program, registry) {
43
44
  registerPluginCommands(program, registry);
44
45
  registerIntegrationCommands(program, registry);
45
46
  registerEditCommands(program);
47
+ registerK3sCommands(program);
46
48
  registerCompletionCommand(program);
47
49
  }
@@ -0,0 +1,124 @@
1
+ import path from "node:path";
2
+ import chalk from "chalk";
3
+ import { requireRoot } from "../project.js";
4
+
5
+ const DIM = chalk.dim;
6
+ const OK = chalk.green;
7
+ const ERR = chalk.red;
8
+ const WARN = chalk.yellow;
9
+
10
+ const K3S_KUBECTL = ["exec", "-e", "KUBECONFIG=/etc/rancher/k3s/k3s.yaml", "k3s-server", "kubectl"];
11
+ const K3S_KUBECTL_I = ["exec", "-i", "-e", "KUBECONFIG=/etc/rancher/k3s/k3s.yaml", "k3s-server", "kubectl"];
12
+
13
+ async function kubectlApply(execa, args) {
14
+ const create = await execa("docker", [
15
+ ...K3S_KUBECTL, "create", ...args, "--dry-run=client", "-o", "yaml",
16
+ ], { timeout: 15000, reject: false });
17
+ if (create.exitCode !== 0) throw new Error(`kubectl create failed: ${create.stderr}`);
18
+ const apply = await execa("docker", [
19
+ ...K3S_KUBECTL_I, "apply", "-f", "-",
20
+ ], { input: create.stdout, timeout: 15000, reject: false });
21
+ if (apply.exitCode !== 0) throw new Error(`kubectl apply failed: ${apply.stderr}`);
22
+ }
23
+
24
+ async function syncSecrets(root) {
25
+ const { execa } = await import("execa");
26
+ const { loadEnvFromFile } = await import("../utils/load-env.js");
27
+
28
+ // Check k3s is running
29
+ const { exitCode, stdout } = await execa("docker", [
30
+ ...K3S_KUBECTL, "get", "nodes",
31
+ ], { timeout: 10000, reject: false });
32
+ if (exitCode !== 0 || !/Ready/.test(stdout)) {
33
+ console.error(ERR(" k3s cluster is not reachable. Is it running?"));
34
+ console.error(DIM(" Start it with: fops up --k3s"));
35
+ return;
36
+ }
37
+
38
+ // Load credentials from .env (same resolution as setup-kubernetes.sh)
39
+ const env = loadEnvFromFile(path.join(root, ".env"));
40
+ const s3Id = env.BOOTSTRAP_STORAGE_ACCESS_KEY || env.AUTH_IDENTITY || "minio";
41
+ const s3Pw = env.BOOTSTRAP_STORAGE_SECRET_KEY || env.AUTH_CREDENTIAL || "minio123";
42
+
43
+ console.log(DIM(` Storage credentials: ${s3Id} / ${"*".repeat(Math.min(s3Pw.length, 8))}`));
44
+
45
+ // 1. storage-secret in foundation namespace
46
+ console.log(DIM(" Creating storage-secret (foundation)..."));
47
+ await kubectlApply(execa, [
48
+ "secret", "generic", "storage-secret",
49
+ `--from-literal=ACCESS_KEY=${s3Id}`,
50
+ `--from-literal=SECRET_KEY=${s3Pw}`,
51
+ "--namespace=foundation",
52
+ ]);
53
+ console.log(OK(" ✓ storage-secret"));
54
+
55
+ // 2. foundation-storage-engine-auth in foundation namespace
56
+ console.log(DIM(" Creating foundation-storage-engine-auth (foundation)..."));
57
+ await kubectlApply(execa, [
58
+ "secret", "generic", "foundation-storage-engine-auth",
59
+ `--from-literal=AUTH_IDENTITY=${s3Id}`,
60
+ `--from-literal=AUTH_CREDENTIAL=${s3Pw}`,
61
+ "--namespace=foundation",
62
+ ]);
63
+ console.log(OK(" ✓ foundation-storage-engine-auth"));
64
+
65
+ // 3. Default storage system secret for Spark jobs
66
+ const sparkSecretName = "00000000-0000-0000-0000-000000000001-secret";
67
+ console.log(DIM(` Creating ${sparkSecretName} (spark-jobs)...`));
68
+ await kubectlApply(execa, [
69
+ "secret", "generic", sparkSecretName,
70
+ `--from-literal=AWS_ACCESS_KEY_ID=${s3Id}`,
71
+ `--from-literal=AWS_SECRET_ACCESS_KEY=${s3Pw}`,
72
+ `--from-literal=AWS_SECRET_ACCESS_KEY_ID=${s3Pw}`,
73
+ "--from-literal=AWS_ENDPOINT_LOCATION=http://foundation-storage-engine:8080",
74
+ "--from-literal=AWS_REGION=me-central-1",
75
+ "--from-literal=MLFLOW_S3_ENDPOINT_URL=http://foundation-storage-engine:8080",
76
+ "--namespace=spark-jobs",
77
+ ]);
78
+
79
+ // Re-apply label
80
+ await execa("docker", [
81
+ ...K3S_KUBECTL, "label", "secret", sparkSecretName,
82
+ "foundation.io/data-system=storage",
83
+ "--namespace=spark-jobs", "--overwrite",
84
+ ], { timeout: 10000, reject: false });
85
+ console.log(OK(` ✓ ${sparkSecretName} (with label)`));
86
+
87
+ console.log(OK("\n ✓ All storage secrets synced to k3s"));
88
+ }
89
+
90
+ export function registerK3sCommands(program) {
91
+ const k3s = program
92
+ .command("k3s")
93
+ .description("Manage local k3s Kubernetes cluster");
94
+
95
+ k3s
96
+ .command("sync-secrets")
97
+ .description("Sync storage secrets from .env into k3s (fixes S3 AccessDenied)")
98
+ .action(async () => {
99
+ const root = requireRoot(program);
100
+ try {
101
+ await syncSecrets(root);
102
+ } catch (err) {
103
+ console.error(ERR(` ✗ ${err.message}`));
104
+ process.exitCode = 1;
105
+ }
106
+ });
107
+
108
+ k3s
109
+ .command("exec [cmd...]")
110
+ .description("Run a command inside k3s (default: interactive shell)")
111
+ .option("-n, --namespace <ns>", "Kubernetes namespace", "spark-jobs")
112
+ .action(async (cmd, opts) => {
113
+ const { execaNode } = await import("execa");
114
+ const { execSync } = await import("node:child_process");
115
+ const args = cmd.length
116
+ ? [...K3S_KUBECTL, "-n", opts.namespace, ...cmd]
117
+ : ["exec", "-it", "-e", "KUBECONFIG=/etc/rancher/k3s/k3s.yaml", "k3s-server", "/bin/sh"];
118
+ try {
119
+ execSync(`docker ${args.map(a => a.includes(" ") ? `"${a}"` : a).join(" ")}`, { stdio: "inherit" });
120
+ } catch (err) {
121
+ if (err.status) process.exitCode = err.status;
122
+ }
123
+ });
124
+ }
@@ -562,6 +562,12 @@ async function runUp(program, registry, opts) {
562
562
  fs.writeFileSync(envPath, envContent);
563
563
  console.log(chalk.dim(` FOUNDATION_PUBLIC_URL=${publicUrl} written to .env`));
564
564
  }
565
+ // Derive and set ENVIRONMENT_NAME from URL (e.g. https://staging.meshx.app → Staging)
566
+ const envName = publicUrl.replace(/https?:\/\//, "").split(".")[0] || "Local";
567
+ const environmentName = envName.charAt(0).toUpperCase() + envName.slice(1);
568
+ envContent = envContent.replace(/^ENVIRONMENT_NAME=.*\n?/m, "");
569
+ envContent = envContent.trimEnd() + `\nENVIRONMENT_NAME=${environmentName}\n`;
570
+ fs.writeFileSync(envPath, envContent);
565
571
  } else {
566
572
  // Local: only inject localhost fallback if not already set
567
573
  if (!/^FOUNDATION_PUBLIC_URL=/m.test(envContent)) {
@@ -577,6 +583,7 @@ async function runUp(program, registry, opts) {
577
583
  const envProfiles = (process.env.COMPOSE_PROFILES || "").split(",").map(s => s.trim()).filter(Boolean);
578
584
  const activeProfiles = new Set(envProfiles);
579
585
  if (opts.k3s) activeProfiles.add("k3s");
586
+ activeProfiles.add("loki");
580
587
  if (publicUrl) {
581
588
  if (opts.traefik) activeProfiles.add("traefik");
582
589
  try {
@@ -486,60 +486,42 @@ You manage Docker Compose stacks: inspect containers, read logs, restart service
486
486
  ## Role
487
487
  You investigate alerts, diagnose service failures, and suggest fixes. You have direct access to Docker containers, logs, and system metrics. You are called by the Glue bot when monitoring alerts fire.
488
488
 
489
- ## CRITICAL: Always Use Tools — Never Guess
490
- You MUST use your tools to investigate. NEVER give generic checklists or ask the user to check things manually.
491
- - Don't say "check the logs for X" — run compose_logs and find X yourself.
492
- - Don't say "verify auth config" — run compose_inspect or compose_exec to read the actual config.
493
- - Don't say "correlate with metrics" — run compose_stats and report the numbers.
494
- - If you need to grep logs, use compose_exec with grep/jq inside the container.
495
- - Every finding in your response must be backed by tool output, not speculation.
496
-
497
489
  ## Tools Available
498
490
  - **compose_ps**: List all containers and their status (start here)
499
- - **compose_logs**: Read container logs (check for errors, crashes, OOM). Use the tail parameter to get recent logs, and grep for specific patterns.
491
+ - **compose_logs**: Read container logs (check for errors, crashes, OOM)
500
492
  - **compose_inspect**: Get container details (health checks, env vars, mounts, restarts)
501
493
  - **compose_stats**: CPU/memory/network usage per container
502
- - **compose_exec**: Run commands inside containers (e.g. grep logs, check disk, curl endpoints, read config files, test connectivity)
494
+ - **compose_exec**: Run commands inside containers (e.g. check disk, network, processes)
503
495
  - **compose_images**: List images and versions
504
- - **compose_restart**: Restart specific services (only after diagnosing the issue)
496
+ - **compose_restart**: Restart specific services
505
497
  - **embeddings_search**: Search docs, configs, and past knowledge for context
506
498
 
507
499
  ## Investigation Approach
508
500
  1. **Triage**: Run compose_ps to see overall stack health. Identify unhealthy/restarting containers.
509
- 2. **Deep dive**: For each affected container, use MULTIPLE tools:
510
- - compose_logs with tail=200 to find errors, then grep for specific patterns (4xx, 5xx, OOM, connection refused, timeout)
511
- - compose_exec to grep logs for specific status codes: e.g. grep -c "HTTP/1.1 4" or check config files
512
- - compose_inspect for health check failures, restart count, resource limits, env vars
513
- - compose_stats for CPU/memory report actual numbers (e.g. "backend: 450MB/512MB, 85% memory")
514
- 3. **Correlate**: If you see errors, trace them to the root service. Check dependencies (postgres, kafka, storage-engine).
515
- 4. **Root cause**: State the specific cause with evidence from tool output.
516
- 5. **Fix**: Take action if safe (restart a crashed container) or give a specific command to run.
501
+ 2. **Diagnose**: For each affected container:
502
+ - compose_logs to find errors, exceptions, OOM kills, crash traces
503
+ - compose_inspect for health check failures, restart count, resource limits
504
+ - compose_stats for CPU/memory spikes
505
+ 3. **Context**: Use embeddings_search to find relevant docs or known issues.
506
+ 4. **Root cause**: Correlate findings is it a code bug, resource exhaustion, dependency failure, config issue?
507
+ 5. **Fix**: Suggest specific actions (restart, config change, scale, rollback).
517
508
 
518
509
  ## Output Format
519
- Structure your response with blank lines between each section:
520
-
521
- **Status:** One-line summary (e.g. "Processor container restarting due to OOM")
522
-
523
- **Findings:** Specific evidence from tools (include actual log lines, numbers, status codes)
524
-
525
- **Root Cause:** Most likely cause, backed by evidence
526
-
527
- **Actions:** Specific steps to fix (commands, not vague suggestions)
528
-
529
- **Prevention:** How to avoid this in the future
510
+ Structure your response as:
511
+ - **Status**: One-line summary (e.g. "Processor container restarting due to OOM")
512
+ - **Findings**: What you discovered from each tool
513
+ - **Root Cause**: Most likely cause
514
+ - **Actions**: Specific steps to fix
515
+ - **Prevention**: How to avoid this in the future
530
516
 
531
517
  ## Rules
532
518
  - Always check compose_ps first.
533
- - USE TOOLS AGGRESSIVELY. Run 5-10 tool calls per investigation, not 1-2.
534
519
  - Check logs BEFORE suggesting restarts.
535
- - When investigating HTTP errors: grep the actual logs for status codes and show the top error endpoints.
536
- - When investigating performance: show actual CPU/memory numbers from compose_stats.
537
520
  - Look for patterns: repeated restarts, OOM kills, connection refused, timeout errors.
538
521
  - If a dependency is down (postgres, kafka), flag it — fixing the dependency fixes the dependent.
539
522
  - Be concise — this output goes into a Glue chat thread.
540
523
  - Never suggest 'docker compose down' — prefer targeted restarts.
541
- - After restarting, verify with compose_ps.
542
- - IMPORTANT: Always put a blank line between sections in your response so they render as separate paragraphs.`,
524
+ - After restarting, verify with compose_ps.`,
543
525
  });
544
526
 
545
527
  // ── Doctor check: Trivy ───────────────────────────────────────────
@@ -55,9 +55,6 @@ async function ensureAzAuth(execa, { subscription } = {}) {
55
55
  /** Resolve Foundation project root (directory with docker-compose). Exported for use by azure agent sync. */
56
56
  export function findProjectRoot() {
57
57
  const cwd = process.cwd();
58
- if (process.env.FOUNDATION_ROOT && fs.existsSync(process.env.FOUNDATION_ROOT)) {
59
- return path.resolve(process.env.FOUNDATION_ROOT);
60
- }
61
58
  try {
62
59
  const cfgPath = path.join(process.env.HOME || process.env.USERPROFILE || "", ".fops.json");
63
60
  if (fs.existsSync(cfgPath)) {
@@ -263,7 +263,7 @@ export function registerVmCommands(azure, api, registry) {
263
263
  if (opts.dai) opts.k3s = true;
264
264
  const {
265
265
  lazyExeca, ensureAzCli, ensureAzAuth, resolveGithubToken, verifyGithubToken,
266
- reconcileVm, DEFAULTS,
266
+ reconcileVm, DEFAULTS, buildDefaultUrl,
267
267
  } = await import("../azure.js");
268
268
  const { resolveCfToken } = await import("../cloudflare.js");
269
269
  const { readVmState, writeVmState } = await import("../azure-state.js");
@@ -279,7 +279,10 @@ export function registerVmCommands(azure, api, registry) {
279
279
  process.exit(1);
280
280
  }
281
281
  const rg = tracked.resourceGroup;
282
- const desiredUrl = opts.url || tracked.publicUrl;
282
+ const storedUrl = tracked.publicUrl;
283
+ // If the stored URL is IP-based, prefer the default domain-based URL
284
+ const isIpUrl = storedUrl && /^https?:\/\/\d+\.\d+\.\d+\.\d+/.test(storedUrl);
285
+ const desiredUrl = opts.url || (isIpUrl ? buildDefaultUrl(name) : storedUrl);
283
286
  const cfToken = resolveCfToken(opts.cfToken);
284
287
  const { publicIp, publicUrl, rg: actualRg } = await reconcileVm(execa, {
285
288
  vmName: name, rg, sub, subId, location: tracked.location || DEFAULTS.location,
@@ -333,6 +333,20 @@ export function createCloudApi(registry) {
333
333
  return c.json(providers);
334
334
  });
335
335
 
336
+ // ── VM Sizes ────────────────────────────────────────────
337
+
338
+ app.get("/vm-sizes", (c) => {
339
+ return c.json([
340
+ "Standard_D4s_v5",
341
+ "Standard_D8s_v5",
342
+ "Standard_D16s_v5",
343
+ "Standard_D32s_v5",
344
+ "Standard_D48s_v5",
345
+ "Standard_D64s_v5",
346
+ "Standard_D96s_v5",
347
+ ]);
348
+ });
349
+
336
350
  // ── Resources ───────────────────────────────────────────
337
351
 
338
352
  app.get("/resources", async (c) => {
package/src/project.js CHANGED
@@ -32,8 +32,8 @@ function saveProjectRoot(root) {
32
32
  const configPath = path.join(os.homedir(), ".fops.json");
33
33
  let config = {};
34
34
  try { config = JSON.parse(fs.readFileSync(configPath, "utf8")); } catch {}
35
- if (config.projectRoot === root) return; // already saved
36
- config.projectRoot = root;
35
+ if (config.foundationRoot === root || config.projectRoot === root) return; // already saved
36
+ config.foundationRoot = root;
37
37
  try {
38
38
  fs.mkdirSync(path.dirname(configPath), { recursive: true });
39
39
  fs.writeFileSync(configPath, JSON.stringify(config, null, 2) + "\n");
@@ -41,14 +41,19 @@ function saveProjectRoot(root) {
41
41
  }
42
42
 
43
43
  export function rootDir(cwd = process.cwd()) {
44
+
45
+ // Check FOUNDATION_ROOT env var first (explicit override)
44
46
  const envRoot = process.env.FOUNDATION_ROOT;
45
- if (envRoot && fs.existsSync(envRoot)) return path.resolve(envRoot);
47
+ if (envRoot && isFoundationRoot(envRoot)) {
48
+ return path.resolve(envRoot);
49
+ }
46
50
 
47
- // Check ~/.fops.json for saved project root
51
+ // Check ~/.fops.json for saved project root (projectRoot or foundationRoot)
48
52
  try {
49
53
  const fopsConfig = JSON.parse(fs.readFileSync(path.join(os.homedir(), ".fops.json"), "utf8"));
50
- if (fopsConfig.projectRoot && isFoundationRoot(fopsConfig.projectRoot)) {
51
- return path.resolve(fopsConfig.projectRoot);
54
+ const configRoot = fopsConfig.foundationRoot || fopsConfig.projectRoot;
55
+ if (configRoot && isFoundationRoot(configRoot)) {
56
+ return path.resolve(configRoot);
52
57
  }
53
58
  } catch {}
54
59
 
@@ -104,7 +109,7 @@ export function requireRoot(program) {
104
109
  console.error(
105
110
  chalk.red("Not a Foundation project (no docker-compose + Makefile).")
106
111
  );
107
- console.error(chalk.dim(" Run `fops init` to set up, or set FOUNDATION_ROOT."));
112
+ console.error(chalk.dim(" Run `fops init` to set up, or run from the foundation-compose directory."));
108
113
  program.error("", { exitCode: 1 });
109
114
  }
110
115
  return r;
@@ -1,5 +0,0 @@
1
- module.exports = {
2
- plugins: {
3
- "@tailwindcss/postcss": {},
4
- },
5
- };
@@ -1,32 +0,0 @@
1
- import React, { useEffect } from "react";
2
- import { Routes, Route, useLocation } from "react-router-dom";
3
-
4
- import "./css/style.css";
5
-
6
- import Resources from "./pages/Resources";
7
- import CreateResource from "./pages/CreateResource";
8
- import Fleet from "./pages/Fleet";
9
- import Costs from "./pages/Costs";
10
- import Audit from "./pages/Audit";
11
-
12
- function App() {
13
- const location = useLocation();
14
-
15
- useEffect(() => {
16
- document.querySelector("html").style.scrollBehavior = "auto";
17
- window.scroll({ top: 0 });
18
- document.querySelector("html").style.scrollBehavior = "";
19
- }, [location.pathname]);
20
-
21
- return (
22
- <Routes>
23
- <Route exact path="/" element={<Resources />} />
24
- <Route path="/resources/new" element={<CreateResource />} />
25
- <Route path="/fleet" element={<Fleet />} />
26
- <Route path="/costs" element={<Costs />} />
27
- <Route path="/audit" element={<Audit />} />
28
- </Routes>
29
- );
30
- }
31
-
32
- export default App;
@@ -1,114 +0,0 @@
1
- const BASE = "/cloud/api";
2
-
3
- // Token getter — set by AuthContext once Auth0 is initialized
4
- let _getToken = null;
5
-
6
- export function setTokenGetter(fn) {
7
- _getToken = fn;
8
- }
9
-
10
- async function authHeaders(extra = {}) {
11
- if (!_getToken) return extra;
12
- try {
13
- const token = await _getToken();
14
- return { ...extra, Authorization: `Bearer ${token}` };
15
- } catch {
16
- return extra;
17
- }
18
- }
19
-
20
- export async function apiFetch(path, opts = {}) {
21
- const headers = await authHeaders({ "Content-Type": "application/json", ...opts.headers });
22
- const res = await fetch(`${BASE}${path}`, { ...opts, headers });
23
- if (!res.ok) {
24
- const body = await res.json().catch(() => ({}));
25
- throw new Error(body.error || `HTTP ${res.status}`);
26
- }
27
- return res.json();
28
- }
29
-
30
- /**
31
- * Make a streaming POST/DELETE request and call onLine for each SSE event.
32
- * Returns the final result from the "done" event, or throws on "error".
33
- */
34
- export async function apiStream(path, { method = "POST", body, onLine, onJobId } = {}) {
35
- const headers = await authHeaders({ "Content-Type": "application/json" });
36
- const res = await fetch(`${BASE}${path}`, {
37
- method,
38
- headers,
39
- body: body ? JSON.stringify(body) : undefined,
40
- });
41
-
42
- if (!res.ok) {
43
- const err = await res.json().catch(() => ({}));
44
- throw new Error(err.error || `HTTP ${res.status}`);
45
- }
46
-
47
- const reader = res.body.getReader();
48
- const decoder = new TextDecoder();
49
- let buffer = "";
50
- let finalResult = null;
51
- let finalError = null;
52
-
53
- while (true) {
54
- const { done, value } = await reader.read();
55
- if (done) break;
56
- buffer += decoder.decode(value, { stream: true });
57
-
58
- const lines = buffer.split("\n");
59
- buffer = lines.pop();
60
-
61
- for (const line of lines) {
62
- if (!line.startsWith("data: ")) continue;
63
- try {
64
- const evt = JSON.parse(line.slice(6));
65
- if (evt.type === "job") {
66
- onJobId?.(evt.jobId);
67
- } else if (evt.type === "done") {
68
- finalResult = evt.result;
69
- onLine?.("\u2713 Operation complete", "done");
70
- } else if (evt.type === "error" && !evt.text?.startsWith(" ")) {
71
- finalError = evt.text;
72
- } else if (evt.type === "log" || evt.type === "error") {
73
- onLine?.(evt.text, evt.type);
74
- }
75
- } catch { /* ignore */ }
76
- }
77
- }
78
-
79
- if (finalError) throw new Error(finalError);
80
- return finalResult;
81
- }
82
-
83
- /**
84
- * Poll a job's buffered logs for reconnection after page reload.
85
- */
86
- export async function pollJob(jobId, onLine) {
87
- let offset = 0;
88
-
89
- while (true) {
90
- const headers = await authHeaders();
91
- const res = await fetch(`${BASE}/jobs/${jobId}?since=${offset}`, { headers });
92
- if (!res.ok) {
93
- if (res.status === 404) throw new Error("Job not found \u2014 it may have expired");
94
- throw new Error(`HTTP ${res.status}`);
95
- }
96
-
97
- const data = await res.json();
98
-
99
- for (const log of data.logs) {
100
- onLine?.(log.text, log.type);
101
- }
102
- offset = data.offset + data.logs.length;
103
-
104
- if (data.status === "done") {
105
- onLine?.("\u2713 Operation complete", "done");
106
- return { status: "done", result: data.result };
107
- }
108
- if (data.status === "error") {
109
- return { status: "error", error: data.error };
110
- }
111
-
112
- await new Promise((r) => setTimeout(r, 1000));
113
- }
114
- }
@@ -1,111 +0,0 @@
1
- import { useQuery, useMutation, useQueryClient } from "@tanstack/react-query";
2
- import { apiFetch, apiStream } from "./client";
3
-
4
- export function useResources() {
5
- return useQuery({ queryKey: ["resources"], queryFn: () => apiFetch("/resources") });
6
- }
7
-
8
- export function useHealth() {
9
- return useQuery({ queryKey: ["health"], queryFn: () => apiFetch("/health") });
10
- }
11
-
12
- export function useCosts(days = 30) {
13
- return useQuery({
14
- queryKey: ["costs", days],
15
- queryFn: () => apiFetch(`/costs?days=${days}`),
16
- staleTime: 5 * 60 * 1000, // cache for 5 minutes — cost queries are slow
17
- });
18
- }
19
-
20
- export function useFleet() {
21
- return useQuery({ queryKey: ["fleet"], queryFn: () => apiFetch("/fleet") });
22
- }
23
-
24
- export function useAudit() {
25
- return useQuery({ queryKey: ["audit"], queryFn: () => apiFetch("/audit"), staleTime: 300_000 });
26
- }
27
-
28
- /**
29
- * Streaming mutations — accept an onLine callback to show live output.
30
- */
31
-
32
- export function useSyncResources() {
33
- const qc = useQueryClient();
34
- return useMutation({
35
- mutationFn: ({ onLine } = {}) =>
36
- apiStream("/sync", { onLine }),
37
- onSuccess: () => {
38
- qc.invalidateQueries({ queryKey: ["resources"] });
39
- qc.invalidateQueries({ queryKey: ["fleet"] });
40
- qc.invalidateQueries({ queryKey: ["health"] });
41
- },
42
- });
43
- }
44
-
45
- export function useResourceAction() {
46
- const qc = useQueryClient();
47
- return useMutation({
48
- mutationFn: ({ type, name, action, onLine }) =>
49
- apiStream(`/resources/${type}/${name}/${action}`, { onLine }),
50
- onSuccess: () => qc.invalidateQueries({ queryKey: ["resources"] }),
51
- });
52
- }
53
-
54
- export function useDeleteResource() {
55
- const qc = useQueryClient();
56
- return useMutation({
57
- mutationFn: ({ type, name, onLine }) =>
58
- apiStream(`/resources/${type}/${name}`, { method: "DELETE", onLine }),
59
- onSuccess: () => qc.invalidateQueries({ queryKey: ["resources"] }),
60
- });
61
- }
62
-
63
- export function useFeatureFlags(vmName) {
64
- return useQuery({
65
- queryKey: ["flags", vmName],
66
- queryFn: () => apiFetch(`/flags/${vmName}`),
67
- enabled: !!vmName,
68
- });
69
- }
70
-
71
- export function useSetFeatureFlags() {
72
- const qc = useQueryClient();
73
- return useMutation({
74
- mutationFn: ({ vmName, flags, onLine }) =>
75
- apiStream(`/flags/${vmName}`, { body: { flags }, onLine }),
76
- onSuccess: () => qc.invalidateQueries({ queryKey: ["flags"] }),
77
- });
78
- }
79
-
80
- export function useDeploy() {
81
- const qc = useQueryClient();
82
- return useMutation({
83
- mutationFn: ({ vmName, opts = {}, onLine }) =>
84
- apiStream(`/deploy/${vmName}`, { body: opts, onLine }),
85
- onSuccess: () => {
86
- qc.invalidateQueries({ queryKey: ["resources"] });
87
- qc.invalidateQueries({ queryKey: ["fleet"] });
88
- },
89
- });
90
- }
91
-
92
- export function useGrantAdmin() {
93
- const qc = useQueryClient();
94
- return useMutation({
95
- mutationFn: ({ vmName, username, onLine }) =>
96
- apiStream(`/resources/vm/${vmName}/grant-admin`, {
97
- body: username ? { username } : {},
98
- onLine,
99
- }),
100
- onSuccess: () => qc.invalidateQueries({ queryKey: ["fleet"] }),
101
- });
102
- }
103
-
104
- export function useCreateResource() {
105
- const qc = useQueryClient();
106
- return useMutation({
107
- mutationFn: ({ body, onLine, onJobId }) =>
108
- apiStream(`/resources/${body.type}`, { body, onLine, onJobId }),
109
- onSuccess: () => qc.invalidateQueries({ queryKey: ["resources"] }),
110
- });
111
- }