@meshxdata/fops 0.1.54 → 0.1.57

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +184 -1
  2. package/package.json +1 -2
  3. package/src/commands/index.js +2 -0
  4. package/src/commands/k3s-cmd.js +124 -0
  5. package/src/commands/lifecycle.js +7 -0
  6. package/src/plugins/builtins/docker-compose.js +7 -13
  7. package/src/plugins/bundled/fops-plugin-azure/lib/azure-openai.js +0 -3
  8. package/src/plugins/bundled/fops-plugin-azure/lib/commands/vm-cmds.js +5 -2
  9. package/src/plugins/bundled/fops-plugin-cloud/api.js +14 -0
  10. package/src/project.js +12 -7
  11. package/src/plugins/bundled/fops-plugin-cloud/ui/postcss.config.cjs +0 -5
  12. package/src/plugins/bundled/fops-plugin-cloud/ui/src/App.jsx +0 -32
  13. package/src/plugins/bundled/fops-plugin-cloud/ui/src/api/client.js +0 -114
  14. package/src/plugins/bundled/fops-plugin-cloud/ui/src/api/queries.js +0 -111
  15. package/src/plugins/bundled/fops-plugin-cloud/ui/src/components/LogPanel.jsx +0 -162
  16. package/src/plugins/bundled/fops-plugin-cloud/ui/src/components/ThemeToggle.jsx +0 -46
  17. package/src/plugins/bundled/fops-plugin-cloud/ui/src/css/additional-styles/utility-patterns.css +0 -147
  18. package/src/plugins/bundled/fops-plugin-cloud/ui/src/css/style.css +0 -138
  19. package/src/plugins/bundled/fops-plugin-cloud/ui/src/favicon.svg +0 -15
  20. package/src/plugins/bundled/fops-plugin-cloud/ui/src/lib/utils.ts +0 -19
  21. package/src/plugins/bundled/fops-plugin-cloud/ui/src/main.jsx +0 -25
  22. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/Audit.jsx +0 -164
  23. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/Costs.jsx +0 -305
  24. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/CreateResource.jsx +0 -285
  25. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/Fleet.jsx +0 -307
  26. package/src/plugins/bundled/fops-plugin-cloud/ui/src/pages/Resources.jsx +0 -229
  27. package/src/plugins/bundled/fops-plugin-cloud/ui/src/partials/Header.jsx +0 -132
  28. package/src/plugins/bundled/fops-plugin-cloud/ui/src/partials/Sidebar.jsx +0 -174
  29. package/src/plugins/bundled/fops-plugin-cloud/ui/src/partials/SidebarLinkGroup.jsx +0 -21
  30. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/AuthContext.jsx +0 -170
  31. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/Info.jsx +0 -49
  32. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/ThemeContext.jsx +0 -37
  33. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/Transition.jsx +0 -116
  34. package/src/plugins/bundled/fops-plugin-cloud/ui/src/utils/Utils.js +0 -63
package/CHANGELOG.md CHANGED
@@ -1,8 +1,191 @@
1
+ ## [0.1.57] - 2026-03-26
2
+
3
+ - restore all missing services (pgpool, exporters, grafana, etc), add loki to k3s profile, always activate loki profile in fops up (4e2744a)
4
+ - fix: grafana alert-rules provisioning, ENVIRONMENT_NAME from --url, k3s secret sync, vm-sizes endpoint, project root resolution (9839052)
5
+ - feat(azure): add 'fops azure reconcile <name>' command for VM drift fix (79ba6e2)
6
+ - fix(otel,loki): remove duplicate spanmetrics dimensions, use .env for loki S3 creds (e3d1def)
7
+ - fix(loki): pass S3 credentials from .env so loki works without vault-init (c57906d)
8
+ - fix(azure): improve VM provisioning reliability (2ddd669)
9
+ - cluster discovery (009257d)
10
+ - feat(storage): add loki container to provisioning (898c544)
11
+ - feat(azure): add ping command to check backend health (8336825)
12
+ - operator cli bump 0.1.52 (f052cb5)
13
+ - fix(doctor): set KUBECONFIG for k3s kubectl commands (db9359b)
14
+ - fix(azure): move --landscape to test run command, not separate subcommand (4b9b089)
15
+ - feat(azure): add test integration command with landscape support (b2990a0)
16
+ - fix(fleet): skip VMs without public IPs in fleet exec (39acbaa)
17
+ - feat(azure): detect and fix External Secrets identity issues (f907d11)
18
+ - operator cli bump 0.1.51 (db55bdc)
19
+ - feat: add postgres-exporter and Azure tray menu improvements (2a337ac)
20
+ - operator cli plugin fix (4dae908)
21
+ - operator cli plugin fix (25620cc)
22
+ - operator cli test fixes (1d1c18f)
23
+ - feat(test): add setup-users command for QA test user creation (b929507)
24
+ - feat(aks): show HA standby clusters with visual grouping (8fb640c)
25
+ - refactor(provision): extract VM provisioning to dedicated module (af321a7)
26
+ - refactor(provision): extract post-start health checks to dedicated module (6ed5f2d)
27
+ - fix: ping timeout 15s, fix prometheus sed escaping (d11ac14)
28
+ - refactor(vm): extract terraform HCL generation to dedicated module (896a64b)
29
+ - refactor(keyvault): extract key operations to dedicated module (716bbe4)
30
+ - refactor(azure): extract swarm functions to azure-fleet-swarm.js (4690e34)
31
+ - refactor(azure): extract SSH/remote functions to azure-ops-ssh.js (e62b8f0)
32
+ - refactor(azure): split azure-ops.js into smaller modules (4515425)
33
+ - feat(aks): add --ha flag for full cross-region HA setup (ece68c5)
34
+ - feat(fops): inject ENVIRONMENT_NAME on VM provisioning (6ef2a27)
35
+ - fix(postgres): disable SSL mode to fix connection issues (c789ae9)
36
+ - feat(trino): add caching configuration for docker-compose (3668224)
37
+ - fix(fops-azure): run pytest directly instead of missing scripts (29f8410)
38
+ - add -d detach option for local frontend dev, remove hive cpu limits (3306667)
39
+ - release 0.1.49 (dcca32b)
40
+ - release 0.1.48 (9b195e5)
41
+ - stash on updates (2916c01)
42
+ - stash on updates (b5c14df)
43
+ - stash on updates (d0453d1)
44
+ - frontend dev fixes (0ca7b00)
45
+ - fix: update azure test commands (77c81da)
46
+ - default locust to CLI mode, add --web for UI (ca35bff)
47
+ - add locust command for load testing AKS clusters (1278722)
48
+ - update spot node pool default autoscaling to 1-20 (617c182)
49
+ - module for aks (3dd1a61)
50
+ - add hive to PG_SERVICE_DBS for fops pg-setup (afccb16)
51
+ - feat(azure): enhance aks doctor with ExternalSecrets and PGSSLMODE checks (8b14861)
52
+ - add foundation-postgres ExternalName service to reconciler (ea88e11)
53
+ - new flux templates (0e2e372)
54
+ - feat(azure): add storage-engine secrets to Key Vault (a4f488e)
55
+ - feat(azure-aks): add AUTH0_DOMAIN to template rendering variables (216c37e)
56
+ - feat(azure): add storage account creation per cluster (aa1b138)
57
+ - bump watcher (ab24473)
58
+ - fix: concurrent compute calls (#66) (03e2edf)
59
+ - bump backend version (5058ff5)
60
+ - bump fops to 0.1.44 (8c0ef5d)
61
+ - Mlflow and azure plugin fix (176881f)
62
+ - fix lifecycle (a2cb9e7)
63
+ - callback url for localhost (821fb94)
64
+ - disable 4 scaffolding plugin by default. (bfb2b76)
65
+ - jaccard improvements (b7494a0)
66
+ - refactor azure plugin (68dfef4)
67
+ - refactor azure plugin (b24a008)
68
+ - fix trino catalog missing (4928a55)
69
+ - v36 bump and changelog generation on openai (37a0440)
70
+ - v36 bump and changelog generation on openai (a3b02d9)
71
+ - bump (a990058)
72
+ - status bar fix and new plugin for ttyd (27dde1e)
73
+ - file demo and tray (1a3e704)
74
+ - electron app (59ad0bb)
75
+ - compose and fops file plugin (1cf0e81)
76
+ - bump (346ffc1)
77
+ - localhost replaced by 127.0.0.1 (82b9f30)
78
+ - .29 (587b0e1)
79
+ - improve up down and bootstrap script (b79ebaf)
80
+ - checksum (22c8086)
81
+ - checksum (96b434f)
82
+ - checksum (15ed3c0)
83
+ - checksum (8a6543a)
84
+ - bump embed trino linksg (8440504)
85
+ - bump data (765ffd9)
86
+ - bump (cb8b232)
87
+ - broken tests (c532229)
88
+ - release 0.1.18, preflight checks (d902249)
89
+ - fix compute display bug (d10f5d9)
90
+ - cleanup packer files (6330f18)
91
+ - plan mode (cb36a8a)
92
+ - bump to 0.1.16 - agent ui (41ac1a2)
93
+ - bump to 0.1.15 - agent ui (4ebe2e1)
94
+ - bump to 0.1.14 (6c3a7fa)
95
+ - bump to 0.1.13 (8db570f)
96
+ - release 0.1.12 (c1c79e5)
97
+ - bump (11aa3b0)
98
+ - git keep and bump tui (be1678e)
99
+ - skills, index, rrf, compacted context (100k > 10k) (7b2fffd)
100
+ - cloudflare and token consumption, graphs indexing (0ad9eec)
101
+ - bump storage default (22c83ba)
102
+ - storage fix (68a22a0)
103
+ - skills update (7f56500)
104
+ - v9 bump (3864446)
105
+ - bump (c95eedc)
106
+ - rrf (dbf8c95)
107
+ - feat: warning when running predictions (95e8c52)
108
+ - feat: support for local predictions (45cf26b)
109
+ - feat: wip support for predictions + mlflow (3457052)
110
+ - add Reciprocal Rank Fusion (RRF) to knowledge and skill retrieval (61549bc)
111
+ - validate CSV headers in compute_run readiness check (a8c7a43)
112
+ - fix corrupted Iceberg metadata: probe tables + force cleanup on re-apply (50578af)
113
+ - enforce: never use foundation_apply to fix broken products (2e049bf)
114
+ - update SKILL.md with complete tool reference for knowledge retrieval (30b1924)
115
+ - add storage read, input DP table probe, and compute_run improvements (34e6c4c)
116
+ - skills update (1220385)
117
+ - skills update (bb66958)
118
+ - some tui improvement andd tools apply overwrite (e90c35c)
119
+ - skills update (e9227a1)
120
+ - skills update (669c4b3)
121
+ - fix plugin pre-flight checks (f741743)
122
+ - increase agent context (6479aaa)
123
+ - skills and init sql fixes (5fce35e)
124
+ - checksum (3518b56)
125
+ - penging job limit (a139861)
126
+ - checksum (575d28c)
127
+ - bump (92049ba)
128
+ - fix bug per tab status (0a33657)
129
+ - fix bug per tab status (50457c6)
130
+ - checksumming (0ad842e)
131
+ - shot af mardkwon overlapping (51f63b9)
132
+ - add spark dockerfile for multiarch builds (95abbd1)
133
+ - fix plugin initialization (16b9782)
134
+ - split index.js (50902a2)
135
+ - cloudflare cidr (cc4e021)
136
+ - cloduflare restrictions (2f6ba2d)
137
+ - sequential start (86b496e)
138
+ - sequential start (4930fe1)
139
+ - sequential start (353f014)
140
+ - qa tests (2dc6a1a)
141
+ - bump sha for .85 (dc2edfe)
142
+ - preserve env on sudo (7831227)
143
+ - bump sha for .84 (6c052f9)
144
+ - non interactive for azure vms (0aa8a2f)
145
+ - keep .env if present (d072450)
146
+ - bump (7a8e732)
147
+ - ensure opa is on compose if not set (f4a5228)
148
+ - checksum bump (a2ccc20)
149
+ - netrc defensive checks (a0b0ccc)
150
+ - netrc defensive checks (ae37403)
151
+ - checksum (ec45d11)
152
+ - update sync and fix up (7f9af72)
153
+ - expand test for azure and add new per app tag support (388a168)
154
+ - checksum on update (44005fc)
155
+ - cleanup for later (15e5313)
156
+ - cleanup for later (11c9597)
157
+ - switch branch feature (822fecc)
158
+ - add pull (d1c19ab)
159
+ - Bump hono from 4.11.9 to 4.12.0 in /operator-cli (ad25144)
160
+ - tests (f180a9a)
161
+ - cleanup (39c49a3)
162
+ - registry (7b7126a)
163
+ - reconcile kafka (832d0db)
164
+ - gh login bug (025886c)
165
+ - cleanup (bb96cab)
166
+ - strip envs from process (2421180)
167
+ - force use of gh creds not tokens in envs var (fff7787)
168
+ - resolve import between npm installs and npm link (79522e1)
169
+ - fix gh scope and azure states (afd846c)
170
+ - refactoring (da50352)
171
+ - split fops repo (d447638)
172
+ - aks (b791f8f)
173
+ - refactor azure (67d3bad)
174
+ - wildcard (391f023)
175
+ - azure plugin (c074074)
176
+ - zap (d7e6e7f)
177
+ - fix knock (cf89c05)
178
+ - azure (4adec98)
179
+ - Bump tar from 7.5.7 to 7.5.9 in /operator-cli (e41e98e)
180
+ - azure stack index.js split (de12272)
181
+ - Bump ajv from 8.17.1 to 8.18.0 in /operator-cli (76da21f)
182
+ - packer (9665fbc)
183
+
1
184
  # Changelog
2
185
 
3
186
  All notable changes to @meshxdata/fops (Foundation Operator CLI) are documented here.
4
187
 
5
- ## [0.1.54] - 2026-03-26
188
+ ## [0.1.56] - 2026-03-26
6
189
 
7
190
  - feat(azure): add 'fops azure reconcile <name>' command for VM drift fix (79ba6e2)
8
191
  - fix(otel,loki): remove duplicate spanmetrics dimensions, use .env for loki S3 creds (e3d1def)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@meshxdata/fops",
3
- "version": "0.1.54",
3
+ "version": "0.1.57",
4
4
  "description": "CLI to install and manage data mesh platforms",
5
5
  "keywords": [
6
6
  "fops",
@@ -17,7 +17,6 @@
17
17
  "fops.mjs",
18
18
  "src/",
19
19
  "!src/**/*.test.js",
20
- "!src/**/node_modules",
21
20
  "!scripts/",
22
21
  "README.md",
23
22
  "CHANGELOG.md"
@@ -7,6 +7,7 @@ import { registerPluginCommands } from "./plugin-cmd.js";
7
7
  import { registerIntegrationCommands } from "./integration-cmd.js";
8
8
  import { registerCompletionCommand } from "./completion.js";
9
9
  import { registerEditCommands } from "./edit-cmd.js";
10
+ import { registerK3sCommands } from "./k3s-cmd.js";
10
11
  import { configureColorHelp } from "./help.js";
11
12
 
12
13
  export function registerCommands(program, registry) {
@@ -43,5 +44,6 @@ export function registerCommands(program, registry) {
43
44
  registerPluginCommands(program, registry);
44
45
  registerIntegrationCommands(program, registry);
45
46
  registerEditCommands(program);
47
+ registerK3sCommands(program);
46
48
  registerCompletionCommand(program);
47
49
  }
@@ -0,0 +1,124 @@
1
+ import path from "node:path";
2
+ import chalk from "chalk";
3
+ import { requireRoot } from "../project.js";
4
+
5
+ const DIM = chalk.dim;
6
+ const OK = chalk.green;
7
+ const ERR = chalk.red;
8
+ const WARN = chalk.yellow;
9
+
10
+ const K3S_KUBECTL = ["exec", "-e", "KUBECONFIG=/etc/rancher/k3s/k3s.yaml", "k3s-server", "kubectl"];
11
+ const K3S_KUBECTL_I = ["exec", "-i", "-e", "KUBECONFIG=/etc/rancher/k3s/k3s.yaml", "k3s-server", "kubectl"];
12
+
13
+ async function kubectlApply(execa, args) {
14
+ const create = await execa("docker", [
15
+ ...K3S_KUBECTL, "create", ...args, "--dry-run=client", "-o", "yaml",
16
+ ], { timeout: 15000, reject: false });
17
+ if (create.exitCode !== 0) throw new Error(`kubectl create failed: ${create.stderr}`);
18
+ const apply = await execa("docker", [
19
+ ...K3S_KUBECTL_I, "apply", "-f", "-",
20
+ ], { input: create.stdout, timeout: 15000, reject: false });
21
+ if (apply.exitCode !== 0) throw new Error(`kubectl apply failed: ${apply.stderr}`);
22
+ }
23
+
24
+ async function syncSecrets(root) {
25
+ const { execa } = await import("execa");
26
+ const { loadEnvFromFile } = await import("../utils/load-env.js");
27
+
28
+ // Check k3s is running
29
+ const { exitCode, stdout } = await execa("docker", [
30
+ ...K3S_KUBECTL, "get", "nodes",
31
+ ], { timeout: 10000, reject: false });
32
+ if (exitCode !== 0 || !/Ready/.test(stdout)) {
33
+ console.error(ERR(" k3s cluster is not reachable. Is it running?"));
34
+ console.error(DIM(" Start it with: fops up --k3s"));
35
+ return;
36
+ }
37
+
38
+ // Load credentials from .env (same resolution as setup-kubernetes.sh)
39
+ const env = loadEnvFromFile(path.join(root, ".env"));
40
+ const s3Id = env.BOOTSTRAP_STORAGE_ACCESS_KEY || env.AUTH_IDENTITY || "minio";
41
+ const s3Pw = env.BOOTSTRAP_STORAGE_SECRET_KEY || env.AUTH_CREDENTIAL || "minio123";
42
+
43
+ console.log(DIM(` Storage credentials: ${s3Id} / ${"*".repeat(Math.min(s3Pw.length, 8))}`));
44
+
45
+ // 1. storage-secret in foundation namespace
46
+ console.log(DIM(" Creating storage-secret (foundation)..."));
47
+ await kubectlApply(execa, [
48
+ "secret", "generic", "storage-secret",
49
+ `--from-literal=ACCESS_KEY=${s3Id}`,
50
+ `--from-literal=SECRET_KEY=${s3Pw}`,
51
+ "--namespace=foundation",
52
+ ]);
53
+ console.log(OK(" ✓ storage-secret"));
54
+
55
+ // 2. foundation-storage-engine-auth in foundation namespace
56
+ console.log(DIM(" Creating foundation-storage-engine-auth (foundation)..."));
57
+ await kubectlApply(execa, [
58
+ "secret", "generic", "foundation-storage-engine-auth",
59
+ `--from-literal=AUTH_IDENTITY=${s3Id}`,
60
+ `--from-literal=AUTH_CREDENTIAL=${s3Pw}`,
61
+ "--namespace=foundation",
62
+ ]);
63
+ console.log(OK(" ✓ foundation-storage-engine-auth"));
64
+
65
+ // 3. Default storage system secret for Spark jobs
66
+ const sparkSecretName = "00000000-0000-0000-0000-000000000001-secret";
67
+ console.log(DIM(` Creating ${sparkSecretName} (spark-jobs)...`));
68
+ await kubectlApply(execa, [
69
+ "secret", "generic", sparkSecretName,
70
+ `--from-literal=AWS_ACCESS_KEY_ID=${s3Id}`,
71
+ `--from-literal=AWS_SECRET_ACCESS_KEY=${s3Pw}`,
72
+ `--from-literal=AWS_SECRET_ACCESS_KEY_ID=${s3Pw}`,
73
+ "--from-literal=AWS_ENDPOINT_LOCATION=http://foundation-storage-engine:8080",
74
+ "--from-literal=AWS_REGION=me-central-1",
75
+ "--from-literal=MLFLOW_S3_ENDPOINT_URL=http://foundation-storage-engine:8080",
76
+ "--namespace=spark-jobs",
77
+ ]);
78
+
79
+ // Re-apply label
80
+ await execa("docker", [
81
+ ...K3S_KUBECTL, "label", "secret", sparkSecretName,
82
+ "foundation.io/data-system=storage",
83
+ "--namespace=spark-jobs", "--overwrite",
84
+ ], { timeout: 10000, reject: false });
85
+ console.log(OK(` ✓ ${sparkSecretName} (with label)`));
86
+
87
+ console.log(OK("\n ✓ All storage secrets synced to k3s"));
88
+ }
89
+
90
+ export function registerK3sCommands(program) {
91
+ const k3s = program
92
+ .command("k3s")
93
+ .description("Manage local k3s Kubernetes cluster");
94
+
95
+ k3s
96
+ .command("sync-secrets")
97
+ .description("Sync storage secrets from .env into k3s (fixes S3 AccessDenied)")
98
+ .action(async () => {
99
+ const root = requireRoot(program);
100
+ try {
101
+ await syncSecrets(root);
102
+ } catch (err) {
103
+ console.error(ERR(` ✗ ${err.message}`));
104
+ process.exitCode = 1;
105
+ }
106
+ });
107
+
108
+ k3s
109
+ .command("exec [cmd...]")
110
+ .description("Run a command inside k3s (default: interactive shell)")
111
+ .option("-n, --namespace <ns>", "Kubernetes namespace", "spark-jobs")
112
+ .action(async (cmd, opts) => {
113
+ const { execaNode } = await import("execa");
114
+ const { execSync } = await import("node:child_process");
115
+ const args = cmd.length
116
+ ? [...K3S_KUBECTL, "-n", opts.namespace, ...cmd]
117
+ : ["exec", "-it", "-e", "KUBECONFIG=/etc/rancher/k3s/k3s.yaml", "k3s-server", "/bin/sh"];
118
+ try {
119
+ execSync(`docker ${args.map(a => a.includes(" ") ? `"${a}"` : a).join(" ")}`, { stdio: "inherit" });
120
+ } catch (err) {
121
+ if (err.status) process.exitCode = err.status;
122
+ }
123
+ });
124
+ }
@@ -562,6 +562,12 @@ async function runUp(program, registry, opts) {
562
562
  fs.writeFileSync(envPath, envContent);
563
563
  console.log(chalk.dim(` FOUNDATION_PUBLIC_URL=${publicUrl} written to .env`));
564
564
  }
565
+ // Derive and set ENVIRONMENT_NAME from URL (e.g. https://staging.meshx.app → Staging)
566
+ const envName = publicUrl.replace(/https?:\/\//, "").split(".")[0] || "Local";
567
+ const environmentName = envName.charAt(0).toUpperCase() + envName.slice(1);
568
+ envContent = envContent.replace(/^ENVIRONMENT_NAME=.*\n?/m, "");
569
+ envContent = envContent.trimEnd() + `\nENVIRONMENT_NAME=${environmentName}\n`;
570
+ fs.writeFileSync(envPath, envContent);
565
571
  } else {
566
572
  // Local: only inject localhost fallback if not already set
567
573
  if (!/^FOUNDATION_PUBLIC_URL=/m.test(envContent)) {
@@ -577,6 +583,7 @@ async function runUp(program, registry, opts) {
577
583
  const envProfiles = (process.env.COMPOSE_PROFILES || "").split(",").map(s => s.trim()).filter(Boolean);
578
584
  const activeProfiles = new Set(envProfiles);
579
585
  if (opts.k3s) activeProfiles.add("k3s");
586
+ activeProfiles.add("loki");
580
587
  if (publicUrl) {
581
588
  if (opts.traefik) activeProfiles.add("traefik");
582
589
  try {
@@ -507,17 +507,12 @@ You investigate alerts, diagnose service failures, and suggest fixes. You have d
507
507
  5. **Fix**: Suggest specific actions (restart, config change, scale, rollback).
508
508
 
509
509
  ## Output Format
510
- Structure your response with blank lines between each section:
511
-
512
- **Status:** One-line summary (e.g. "Processor container restarting due to OOM")
513
-
514
- **Findings:** What you discovered from each tool
515
-
516
- **Root Cause:** Most likely cause
517
-
518
- **Actions:** Specific steps to fix
519
-
520
- **Prevention:** How to avoid this in the future
510
+ Structure your response as:
511
+ - **Status**: One-line summary (e.g. "Processor container restarting due to OOM")
512
+ - **Findings**: What you discovered from each tool
513
+ - **Root Cause**: Most likely cause
514
+ - **Actions**: Specific steps to fix
515
+ - **Prevention**: How to avoid this in the future
521
516
 
522
517
  ## Rules
523
518
  - Always check compose_ps first.
@@ -526,8 +521,7 @@ Structure your response with blank lines between each section:
526
521
  - If a dependency is down (postgres, kafka), flag it — fixing the dependency fixes the dependent.
527
522
  - Be concise — this output goes into a Glue chat thread.
528
523
  - Never suggest 'docker compose down' — prefer targeted restarts.
529
- - After restarting, verify with compose_ps.
530
- - IMPORTANT: Always put a blank line between sections in your response so they render as separate paragraphs.`,
524
+ - After restarting, verify with compose_ps.`,
531
525
  });
532
526
 
533
527
  // ── Doctor check: Trivy ───────────────────────────────────────────
@@ -55,9 +55,6 @@ async function ensureAzAuth(execa, { subscription } = {}) {
55
55
  /** Resolve Foundation project root (directory with docker-compose). Exported for use by azure agent sync. */
56
56
  export function findProjectRoot() {
57
57
  const cwd = process.cwd();
58
- if (process.env.FOUNDATION_ROOT && fs.existsSync(process.env.FOUNDATION_ROOT)) {
59
- return path.resolve(process.env.FOUNDATION_ROOT);
60
- }
61
58
  try {
62
59
  const cfgPath = path.join(process.env.HOME || process.env.USERPROFILE || "", ".fops.json");
63
60
  if (fs.existsSync(cfgPath)) {
@@ -263,7 +263,7 @@ export function registerVmCommands(azure, api, registry) {
263
263
  if (opts.dai) opts.k3s = true;
264
264
  const {
265
265
  lazyExeca, ensureAzCli, ensureAzAuth, resolveGithubToken, verifyGithubToken,
266
- reconcileVm, DEFAULTS,
266
+ reconcileVm, DEFAULTS, buildDefaultUrl,
267
267
  } = await import("../azure.js");
268
268
  const { resolveCfToken } = await import("../cloudflare.js");
269
269
  const { readVmState, writeVmState } = await import("../azure-state.js");
@@ -279,7 +279,10 @@ export function registerVmCommands(azure, api, registry) {
279
279
  process.exit(1);
280
280
  }
281
281
  const rg = tracked.resourceGroup;
282
- const desiredUrl = opts.url || tracked.publicUrl;
282
+ const storedUrl = tracked.publicUrl;
283
+ // If the stored URL is IP-based, prefer the default domain-based URL
284
+ const isIpUrl = storedUrl && /^https?:\/\/\d+\.\d+\.\d+\.\d+/.test(storedUrl);
285
+ const desiredUrl = opts.url || (isIpUrl ? buildDefaultUrl(name) : storedUrl);
283
286
  const cfToken = resolveCfToken(opts.cfToken);
284
287
  const { publicIp, publicUrl, rg: actualRg } = await reconcileVm(execa, {
285
288
  vmName: name, rg, sub, subId, location: tracked.location || DEFAULTS.location,
@@ -333,6 +333,20 @@ export function createCloudApi(registry) {
333
333
  return c.json(providers);
334
334
  });
335
335
 
336
+ // ── VM Sizes ────────────────────────────────────────────
337
+
338
+ app.get("/vm-sizes", (c) => {
339
+ return c.json([
340
+ "Standard_D4s_v5",
341
+ "Standard_D8s_v5",
342
+ "Standard_D16s_v5",
343
+ "Standard_D32s_v5",
344
+ "Standard_D48s_v5",
345
+ "Standard_D64s_v5",
346
+ "Standard_D96s_v5",
347
+ ]);
348
+ });
349
+
336
350
  // ── Resources ───────────────────────────────────────────
337
351
 
338
352
  app.get("/resources", async (c) => {
package/src/project.js CHANGED
@@ -32,8 +32,8 @@ function saveProjectRoot(root) {
32
32
  const configPath = path.join(os.homedir(), ".fops.json");
33
33
  let config = {};
34
34
  try { config = JSON.parse(fs.readFileSync(configPath, "utf8")); } catch {}
35
- if (config.projectRoot === root) return; // already saved
36
- config.projectRoot = root;
35
+ if (config.foundationRoot === root || config.projectRoot === root) return; // already saved
36
+ config.foundationRoot = root;
37
37
  try {
38
38
  fs.mkdirSync(path.dirname(configPath), { recursive: true });
39
39
  fs.writeFileSync(configPath, JSON.stringify(config, null, 2) + "\n");
@@ -41,14 +41,19 @@ function saveProjectRoot(root) {
41
41
  }
42
42
 
43
43
  export function rootDir(cwd = process.cwd()) {
44
+
45
+ // Check FOUNDATION_ROOT env var first (explicit override)
44
46
  const envRoot = process.env.FOUNDATION_ROOT;
45
- if (envRoot && fs.existsSync(envRoot)) return path.resolve(envRoot);
47
+ if (envRoot && isFoundationRoot(envRoot)) {
48
+ return path.resolve(envRoot);
49
+ }
46
50
 
47
- // Check ~/.fops.json for saved project root
51
+ // Check ~/.fops.json for saved project root (projectRoot or foundationRoot)
48
52
  try {
49
53
  const fopsConfig = JSON.parse(fs.readFileSync(path.join(os.homedir(), ".fops.json"), "utf8"));
50
- if (fopsConfig.projectRoot && isFoundationRoot(fopsConfig.projectRoot)) {
51
- return path.resolve(fopsConfig.projectRoot);
54
+ const configRoot = fopsConfig.foundationRoot || fopsConfig.projectRoot;
55
+ if (configRoot && isFoundationRoot(configRoot)) {
56
+ return path.resolve(configRoot);
52
57
  }
53
58
  } catch {}
54
59
 
@@ -104,7 +109,7 @@ export function requireRoot(program) {
104
109
  console.error(
105
110
  chalk.red("Not a Foundation project (no docker-compose + Makefile).")
106
111
  );
107
- console.error(chalk.dim(" Run `fops init` to set up, or set FOUNDATION_ROOT."));
112
+ console.error(chalk.dim(" Run `fops init` to set up, or run from the foundation-compose directory."));
108
113
  program.error("", { exitCode: 1 });
109
114
  }
110
115
  return r;
@@ -1,5 +0,0 @@
1
- module.exports = {
2
- plugins: {
3
- "@tailwindcss/postcss": {},
4
- },
5
- };
@@ -1,32 +0,0 @@
1
- import React, { useEffect } from "react";
2
- import { Routes, Route, useLocation } from "react-router-dom";
3
-
4
- import "./css/style.css";
5
-
6
- import Resources from "./pages/Resources";
7
- import CreateResource from "./pages/CreateResource";
8
- import Fleet from "./pages/Fleet";
9
- import Costs from "./pages/Costs";
10
- import Audit from "./pages/Audit";
11
-
12
- function App() {
13
- const location = useLocation();
14
-
15
- useEffect(() => {
16
- document.querySelector("html").style.scrollBehavior = "auto";
17
- window.scroll({ top: 0 });
18
- document.querySelector("html").style.scrollBehavior = "";
19
- }, [location.pathname]);
20
-
21
- return (
22
- <Routes>
23
- <Route exact path="/" element={<Resources />} />
24
- <Route path="/resources/new" element={<CreateResource />} />
25
- <Route path="/fleet" element={<Fleet />} />
26
- <Route path="/costs" element={<Costs />} />
27
- <Route path="/audit" element={<Audit />} />
28
- </Routes>
29
- );
30
- }
31
-
32
- export default App;
@@ -1,114 +0,0 @@
1
- const BASE = "/cloud/api";
2
-
3
- // Token getter — set by AuthContext once Auth0 is initialized
4
- let _getToken = null;
5
-
6
- export function setTokenGetter(fn) {
7
- _getToken = fn;
8
- }
9
-
10
- async function authHeaders(extra = {}) {
11
- if (!_getToken) return extra;
12
- try {
13
- const token = await _getToken();
14
- return { ...extra, Authorization: `Bearer ${token}` };
15
- } catch {
16
- return extra;
17
- }
18
- }
19
-
20
- export async function apiFetch(path, opts = {}) {
21
- const headers = await authHeaders({ "Content-Type": "application/json", ...opts.headers });
22
- const res = await fetch(`${BASE}${path}`, { ...opts, headers });
23
- if (!res.ok) {
24
- const body = await res.json().catch(() => ({}));
25
- throw new Error(body.error || `HTTP ${res.status}`);
26
- }
27
- return res.json();
28
- }
29
-
30
- /**
31
- * Make a streaming POST/DELETE request and call onLine for each SSE event.
32
- * Returns the final result from the "done" event, or throws on "error".
33
- */
34
- export async function apiStream(path, { method = "POST", body, onLine, onJobId } = {}) {
35
- const headers = await authHeaders({ "Content-Type": "application/json" });
36
- const res = await fetch(`${BASE}${path}`, {
37
- method,
38
- headers,
39
- body: body ? JSON.stringify(body) : undefined,
40
- });
41
-
42
- if (!res.ok) {
43
- const err = await res.json().catch(() => ({}));
44
- throw new Error(err.error || `HTTP ${res.status}`);
45
- }
46
-
47
- const reader = res.body.getReader();
48
- const decoder = new TextDecoder();
49
- let buffer = "";
50
- let finalResult = null;
51
- let finalError = null;
52
-
53
- while (true) {
54
- const { done, value } = await reader.read();
55
- if (done) break;
56
- buffer += decoder.decode(value, { stream: true });
57
-
58
- const lines = buffer.split("\n");
59
- buffer = lines.pop();
60
-
61
- for (const line of lines) {
62
- if (!line.startsWith("data: ")) continue;
63
- try {
64
- const evt = JSON.parse(line.slice(6));
65
- if (evt.type === "job") {
66
- onJobId?.(evt.jobId);
67
- } else if (evt.type === "done") {
68
- finalResult = evt.result;
69
- onLine?.("\u2713 Operation complete", "done");
70
- } else if (evt.type === "error" && !evt.text?.startsWith(" ")) {
71
- finalError = evt.text;
72
- } else if (evt.type === "log" || evt.type === "error") {
73
- onLine?.(evt.text, evt.type);
74
- }
75
- } catch { /* ignore */ }
76
- }
77
- }
78
-
79
- if (finalError) throw new Error(finalError);
80
- return finalResult;
81
- }
82
-
83
- /**
84
- * Poll a job's buffered logs for reconnection after page reload.
85
- */
86
- export async function pollJob(jobId, onLine) {
87
- let offset = 0;
88
-
89
- while (true) {
90
- const headers = await authHeaders();
91
- const res = await fetch(`${BASE}/jobs/${jobId}?since=${offset}`, { headers });
92
- if (!res.ok) {
93
- if (res.status === 404) throw new Error("Job not found \u2014 it may have expired");
94
- throw new Error(`HTTP ${res.status}`);
95
- }
96
-
97
- const data = await res.json();
98
-
99
- for (const log of data.logs) {
100
- onLine?.(log.text, log.type);
101
- }
102
- offset = data.offset + data.logs.length;
103
-
104
- if (data.status === "done") {
105
- onLine?.("\u2713 Operation complete", "done");
106
- return { status: "done", result: data.result };
107
- }
108
- if (data.status === "error") {
109
- return { status: "error", error: data.error };
110
- }
111
-
112
- await new Promise((r) => setTimeout(r, 1000));
113
- }
114
- }