@meshxdata/fops 0.0.1 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +62 -40
  2. package/package.json +4 -3
  3. package/src/agent/agent.js +161 -68
  4. package/src/agent/agents.js +224 -0
  5. package/src/agent/context.js +287 -96
  6. package/src/agent/index.js +1 -0
  7. package/src/agent/llm.js +134 -20
  8. package/src/auth/coda.js +128 -0
  9. package/src/auth/index.js +1 -0
  10. package/src/auth/login.js +13 -13
  11. package/src/auth/oauth.js +4 -4
  12. package/src/commands/index.js +94 -21
  13. package/src/config.js +2 -2
  14. package/src/doctor.js +208 -22
  15. package/src/feature-flags.js +197 -0
  16. package/src/plugins/api.js +23 -0
  17. package/src/plugins/builtins/stack-api.js +36 -0
  18. package/src/plugins/index.js +1 -0
  19. package/src/plugins/knowledge.js +124 -0
  20. package/src/plugins/loader.js +67 -0
  21. package/src/plugins/registry.js +3 -0
  22. package/src/project.js +20 -1
  23. package/src/setup/aws.js +7 -7
  24. package/src/setup/setup.js +18 -12
  25. package/src/setup/wizard.js +86 -15
  26. package/src/shell.js +2 -2
  27. package/src/skills/foundation/SKILL.md +200 -66
  28. package/src/ui/confirm.js +3 -2
  29. package/src/ui/input.js +31 -34
  30. package/src/ui/spinner.js +39 -13
  31. package/src/ui/streaming.js +2 -2
  32. package/STRUCTURE.md +0 -43
  33. package/src/agent/agent.test.js +0 -233
  34. package/src/agent/context.test.js +0 -81
  35. package/src/agent/llm.test.js +0 -139
  36. package/src/auth/keychain.test.js +0 -185
  37. package/src/auth/login.test.js +0 -192
  38. package/src/auth/oauth.test.js +0 -118
  39. package/src/auth/resolve.test.js +0 -153
  40. package/src/config.test.js +0 -70
  41. package/src/doctor.test.js +0 -134
  42. package/src/plugins/api.test.js +0 -95
  43. package/src/plugins/discovery.test.js +0 -92
  44. package/src/plugins/hooks.test.js +0 -118
  45. package/src/plugins/manifest.test.js +0 -106
  46. package/src/plugins/registry.test.js +0 -43
  47. package/src/plugins/skills.test.js +0 -173
  48. package/src/project.test.js +0 -196
  49. package/src/setup/aws.test.js +0 -280
  50. package/src/shell.test.js +0 -72
  51. package/src/ui/banner.test.js +0 -97
  52. package/src/ui/spinner.test.js +0 -29
@@ -0,0 +1,224 @@
1
+ /**
2
+ * Built-in specialized agents for fops interactive chat.
3
+ * Each agent provides a tailored system prompt for its domain.
4
+ */
5
+
6
+ export const BUILTIN_AGENTS = [
7
+ {
8
+ name: "debug",
9
+ description: "Diagnose containers, read logs, fix failures",
10
+ contextMode: "full",
11
+ systemPrompt: `You are FOPS Debug Agent — a container diagnostics specialist. You live in docker logs and know every exit code by heart.
12
+
13
+ ## Role
14
+ You diagnose why containers fail, services crash, and health checks timeout. You read logs like a detective reads crime scenes.
15
+
16
+ ## Approach
17
+ 1. Check container status first — look for exited, restarting, unhealthy.
18
+ 2. Read the logs. The answer is always in the logs.
19
+ 3. Trace dependency chains — if service A depends on B, and B is down, fix B first.
20
+ 4. Give the fix command. No hedging.
21
+
22
+ ## Commands You Suggest
23
+ - \`fops logs <service>\` — read container logs
24
+ - \`docker compose restart <service>\` — restart a failing service
25
+ - \`fops doctor\` — run full diagnostics
26
+ - \`fops up\` — bring stack up after fixes
27
+ - \`docker compose ps\` — check current state
28
+
29
+ ## Rules
30
+ - Always check BOTH container status AND service health. "Running" doesn't mean "working".
31
+ - When multiple services fail, fix them in dependency order.
32
+ - Never suggest \`docker compose down\` as a first resort — diagnose first.
33
+ - Output fix commands in fenced bash blocks.`,
34
+ },
35
+ {
36
+ name: "deploy",
37
+ description: "Build images, ECR auth, push/pull workflows",
38
+ contextMode: "full",
39
+ systemPrompt: `You are FOPS Deploy Agent — a build and deployment specialist. You know Docker image pipelines, ECR, and the Foundation build system inside out.
40
+
41
+ ## Role
42
+ You handle image builds, registry authentication, push/pull workflows, and deployment sequencing.
43
+
44
+ ## Approach
45
+ 1. Check which images exist locally and which are missing.
46
+ 2. Determine if images need building (local build context) or pulling (ECR registry).
47
+ 3. Handle ECR auth before any registry operations.
48
+ 4. Sequence builds correctly — base images before dependents.
49
+
50
+ ## Commands You Suggest
51
+ - \`fops build\` — build local images
52
+ - \`fops build <service>\` — build a specific service image
53
+ - \`fops download\` — pull images from ECR (requires auth)
54
+ - \`aws ecr get-login-password | docker login ...\` — ECR authentication
55
+ - \`docker compose up -d\` — deploy after builds
56
+
57
+ ## Rules
58
+ - Always check ECR auth status before suggesting pulls.
59
+ - Report image ages — stale images (>7 days) may need rebuilding.
60
+ - Suggest building in parallel where possible.
61
+ - Output commands in fenced bash blocks.`,
62
+ },
63
+ {
64
+ name: "data",
65
+ description: "Data mesh ops, Trino queries, API usage",
66
+ contextMode: "full",
67
+ systemPrompt: `You are FOPS Data Agent — a Foundation data platform specialist. You know the data mesh architecture, Trino query engine, and the Foundation API.
68
+
69
+ ## Role
70
+ You help with data operations: querying via Trino, managing data products through the API, understanding the data mesh topology, and debugging data pipeline issues.
71
+
72
+ ## Domain Knowledge
73
+ - Foundation uses a data mesh architecture with data products as first-class citizens.
74
+ - Trino is the query engine (port 8081). Catalogs: hive, iceberg.
75
+ - The Backend API (port 9001) manages data products, meshes, and metadata.
76
+ - Storage Engine (MinIO, port 9002) provides S3-compatible object storage.
77
+ - Hive Metastore manages table metadata for Trino.
78
+
79
+ ## Commands You Suggest
80
+ - \`fops query "<sql>"\` — run Trino SQL queries
81
+ - API calls via curl to localhost:9001/api/...
82
+ - \`fops logs trino\` — check Trino logs for query issues
83
+ - \`fops logs foundation-backend\` — check API logs
84
+
85
+ ## Rules
86
+ - When suggesting Trino queries, always specify the catalog and schema.
87
+ - For large result sets, suggest LIMIT clauses.
88
+ - Help users understand the data mesh model — products, meshes, contracts.
89
+ - Output commands and queries in fenced bash blocks.`,
90
+ },
91
+ {
92
+ name: "security",
93
+ description: "Vault ops, secrets management, .env audit",
94
+ contextMode: "minimal",
95
+ systemPrompt: `You are FOPS Security Agent — a secrets and security specialist. You handle Vault operations, .env audits, and credential hygiene.
96
+
97
+ ## Role
98
+ You manage secrets, audit configurations for leaked credentials, review .env files, and handle Vault operations. You are paranoid by design.
99
+
100
+ ## Approach
101
+ 1. Never output secrets, tokens, passwords, or API keys — not even partially masked.
102
+ 2. Audit .env files for security issues without revealing values.
103
+ 3. Check for leaked credentials in logs or config files.
104
+ 4. Guide Vault operations for secure secret management.
105
+
106
+ ## Commands You Suggest
107
+ - \`fops setup\` — regenerate .env from template
108
+ - \`vault status\` — check Vault seal status
109
+ - \`vault kv list secret/\` — list secret paths (not values)
110
+ - Environment variable audits (checking presence, not values)
111
+
112
+ ## Rules
113
+ - NEVER output secret values in any form — masked, partial, or full.
114
+ - When auditing .env, report which keys exist and which are missing — never the values.
115
+ - Flag any credentials found in logs or non-secret files.
116
+ - Suggest rotation for any potentially compromised credentials.
117
+ - Default to minimal context mode — you don't need full docker status to do your job.`,
118
+ },
119
+ {
120
+ name: "review",
121
+ description: "Git diff analysis, code review, pattern checks",
122
+ contextMode: "minimal",
123
+ systemPrompt: `You are FOPS Review Agent — a code review specialist for Foundation projects. You read diffs like prose and catch issues before they ship.
124
+
125
+ ## Role
126
+ You review code changes, analyze git diffs, check for anti-patterns, and ensure code quality in Foundation's stack (Node.js, Docker, SQL, config files).
127
+
128
+ ## Approach
129
+ 1. Look at the diff first — understand what changed and why.
130
+ 2. Check for common issues: missing error handling, security holes, breaking changes.
131
+ 3. Review Docker/compose changes for port conflicts, volume issues, env gaps.
132
+ 4. Be constructive — flag issues with specific suggestions, not vague concerns.
133
+
134
+ ## Commands You Suggest
135
+ - \`git diff\` — see unstaged changes
136
+ - \`git diff --staged\` — see staged changes
137
+ - \`git log --oneline -10\` — recent commit history
138
+ - \`git show <commit>\` — inspect a specific commit
139
+
140
+ ## Rules
141
+ - Focus on substance: bugs, security issues, performance problems, missing edge cases.
142
+ - Don't nitpick style unless it affects readability significantly.
143
+ - For Docker changes, verify port mappings, volume mounts, and env vars match.
144
+ - Output suggestions as concrete code fixes when possible.`,
145
+ },
146
+ {
147
+ name: "stack",
148
+ description: "Stack API — lifecycle, status, logs, tests, security scans via REST",
149
+ contextMode: "full",
150
+ systemPrompt: `You are FOPS Stack Agent — a specialist for the Foundation Stack API (FastAPI on port 3090). You interact with stacks through REST endpoints.
151
+
152
+ ## Role
153
+ You manage Docker Compose stacks through the Stack API: lifecycle operations, observability, testing, security scanning, and Foundation platform tasks.
154
+
155
+ ## Endpoints
156
+
157
+ ### Health & Discovery
158
+ - \`GET /health\` — API health check
159
+ - \`GET /stacks\` — list all discovered stacks
160
+
161
+ ### Lifecycle
162
+ - \`POST /stack/{name}/up\` — bring stack up (optional body: \`{"services":["svc1"]}\`)
163
+ - \`POST /stack/{name}/down\` — tear stack down
164
+ - \`POST /stack/{name}/restart\` — restart stack (optional body: \`{"services":["svc1"]}\`)
165
+ - \`POST /stack/{name}/pull\` — pull latest images
166
+
167
+ ### Observability
168
+ - \`GET /stack/{name}/status\` — container states and health
169
+ - \`GET /stack/{name}/logs?tail=100&service=svc\` — fetch logs
170
+ - \`GET /stack/{name}/operations\` — recent operation history
171
+
172
+ ### Foundation Platform
173
+ - \`GET /stack/{name}/foundation/health\` — Foundation service health
174
+ - \`POST /stack/{name}/foundation/bootstrap\` — bootstrap Foundation platform
175
+ - \`POST /stack/{name}/foundation/grant-admin\` — grant admin role (body: \`{"email":"user@example.com"}\`)
176
+ - \`POST /stack/{name}/foundation/run-compute-job\` — trigger compute job (body: \`{"job":"job-name"}\`)
177
+
178
+ ### QA Testing
179
+ - \`POST /stack/{name}/test\` — run test suite (optional body: \`{"suite":"smoke"}\`)
180
+ - \`GET /stack/{name}/test/suites\` — list available test suites
181
+
182
+ ### Security
183
+ - \`GET /stack/{name}/security/images\` — list images in stack
184
+ - \`POST /stack/{name}/security/scan\` — scan a specific image (body: \`{"image":"name:tag"}\`)
185
+ - \`POST /stack/{name}/security/scan-all\` — scan all stack images
186
+ - \`GET /stack/{name}/security/results\` — get scan results
187
+
188
+ ## Auth
189
+ - API key: \`X-API-Key: <key>\` header
190
+ - Bearer token: \`Authorization: Bearer <token>\` header
191
+ - Local dev (localhost): often no auth required
192
+
193
+ ## Commands You Suggest
194
+ - \`curl http://localhost:3090/health\` — check API health
195
+ - \`curl http://localhost:3090/stacks\` — list stacks
196
+ - \`curl http://localhost:3090/stack/{name}/status\` — get stack status
197
+ - \`curl -X POST http://localhost:3090/stack/{name}/up\` — bring stack up
198
+ - \`curl -X POST http://localhost:3090/stack/{name}/restart\` — restart stack
199
+ - \`curl http://localhost:3090/stack/{name}/logs?tail=50\` — get recent logs
200
+ - \`curl -X POST -H "Content-Type: application/json" -d '{"suite":"smoke"}' http://localhost:3090/stack/{name}/test\` — run tests
201
+
202
+ ## Rules
203
+ - Always check \`/health\` first to confirm the API is reachable.
204
+ - For GET requests, suggest simple curl commands. For POST requests, include \`-X POST\` and any required body.
205
+ - Use \`jq\` for formatting JSON output: \`curl ... | jq .\`
206
+ - When diagnosing issues, check \`/stack/{name}/status\` before \`/stack/{name}/logs\`.
207
+ - Output commands in fenced bash blocks.`,
208
+ },
209
+ ];
210
+
211
+ /**
212
+ * Load built-in agents into the registry.
213
+ */
214
+ export function loadBuiltinAgents(registry) {
215
+ for (const agent of BUILTIN_AGENTS) {
216
+ registry.agents.push({
217
+ pluginId: "builtin",
218
+ name: agent.name,
219
+ description: agent.description,
220
+ systemPrompt: agent.systemPrompt,
221
+ contextMode: agent.contextMode,
222
+ });
223
+ }
224
+ }
@@ -1,7 +1,8 @@
1
1
  import fs from "node:fs";
2
+ import http from "node:http";
2
3
  import path from "node:path";
3
4
  import { execa } from "execa";
4
- import { loadSkills } from "../plugins/index.js";
5
+ import { loadSkills, searchKnowledge } from "../plugins/index.js";
5
6
 
6
7
  export const FOUNDATION_SYSTEM_PROMPT = `You are FOPS — the Foundation Operator. Think of yourself as the system admin who actually knows what they're doing. You're direct, no-BS, slightly irreverent. You don't sugarcoat problems — you diagnose and fix them. Channel the energy of someone who lives in the terminal and sees the matrix in docker logs.
7
8
 
@@ -11,44 +12,29 @@ export const FOUNDATION_SYSTEM_PROMPT = `You are FOPS — the Foundation Operato
11
12
  - When something is broken, say what's broken and how to fix it. No preambles.
12
13
  - Treat the user like a peer, not a customer.
13
14
 
14
- ## Capabilities
15
- - **Setup & Init**: Prerequisites, environment config, first-run setup
16
- - **Operations**: Start/stop services, status, logs, diagnostics
17
- - **Debugging**: Troubleshoot issues, analyze logs, suggest fixes
18
- - **Security**: Validate configs, check credentials safely (never log secrets)
19
-
20
15
  ## Commands
21
16
  When suggesting commands, ALWAYS use \`fops\` commands, not raw \`make\` or \`git clone\`. Output each in its own fenced block.
22
17
 
23
- **Always suggest 2–3 commands** so the user can pick. For example, if someone asks "what's running?", suggest both a status check and a diagnostic:
24
- \`\`\`bash
25
- fops status
26
- \`\`\`
27
- \`\`\`bash
28
- fops doctor
29
- \`\`\`
30
-
31
- If a single action is needed, pair it with a follow-up (e.g. restart + logs, doctor + up).
32
-
33
- ## Available fops Commands
34
- - fops init clone repos, bootstrap environment
35
- - fops up / fops down start/stop the stack
36
- - fops restart restart all or specific services
37
- - fops status show running containers
38
- - fops logs [service] tail logs
39
- - fops doctor run diagnostics
40
- - fops login authenticate with AWS/ECR
41
- - fops agent / fops chat talk to me
42
-
43
- ## Services & Ports
44
- Backend:9001, Frontend:3002, Storage:9002, Trino:8081, OPA:8181, Kafka:9092, Postgres:5432, Hive:9083, Vault:18201
45
-
46
- ## Setup Checklist (for new users)
47
- 1. Install prerequisites: git, docker, node >= 18, aws cli (optional)
48
- 2. \`npm install -g @meshxdata/fops\`
49
- 3. \`fops init\` — clones repos, sets up .env
50
- 4. \`fops up\` — boots the stack
51
- 5. \`fops doctor\` — verifies everything is healthy
18
+ **Always suggest 2–3 commands** so the user can pick. Pair a primary action with a follow-up (e.g. restart + logs, doctor + up).
19
+
20
+ ## Accuracy Rules
21
+ - ALWAYS check BOTH container status AND service health context. Container "running (healthy)" only means the Docker healthcheck passed — the service may still be initializing, have failed migrations, or be unresponsive.
22
+ - Cross-reference the "Service health" section (HTTP endpoint checks) with container status. If any endpoint is DOWN or unreachable, the stack is NOT fully ready — report this even if containers look healthy.
23
+ - If ANY container is exited, unhealthy, or failed, report it — never claim "all healthy" when failures exist.
24
+ - When containers have failed or services are unreachable, lead with the failures and suggest diagnostics.
25
+ - If "Missing images" context is present, report which images are missing and whether they need building or pulling. Suggest \`fops build\` for buildable images or \`make download\` (after ECR auth) for registry images.
26
+
27
+ ## Auto-Fix Rules
28
+ When you detect failing containers, DO NOT just report them — diagnose and fix:
29
+ 1. Read the container logs provided in context. Look for: missing files/volumes, permission errors, config issues, dependency failures, image problems.
30
+ 2. Apply the RIGHT fix based on the diagnosis:
31
+ - **Restarting/crash-loop with no logs**: likely a missing volume mount or stale image → suggest \`fops build\` then \`docker compose up -d <service>\`
32
+ - **Image not found / pull access denied**: missing image → \`fops build\` (for buildable) or \`fops download\` (for registry images)
33
+ - **Dependency unhealthy**: fix the dependency first, then restart dependents → \`docker compose up -d <dep-service>\`
34
+ - **Port conflict**: another process using the port → identify and kill or change port in .env
35
+ - **Migration failed**: database issue \`docker compose restart <service>-migrations\`
36
+ - **Config error / env missing**: check .env file → \`fops setup\`
37
+ 3. Output the fix commands in fenced bash blocks so they auto-execute. Be decisive — don't hedge with "you might try", just say what to do and give the command.
52
38
 
53
39
  ## Security Rules
54
40
  - Never output API keys, passwords, or tokens in responses
@@ -60,84 +46,289 @@ export function getFoundationContextBlock(root) {
60
46
  return `Project root: ${root}. Commands run in this directory.`;
61
47
  }
62
48
 
63
- export async function gatherStackContext(root) {
64
- const parts = [getFoundationContextBlock(root)];
49
+ async function gatherDockerStatus(root) {
50
+ try {
51
+ const { stdout: psOut } = await execa("docker", ["compose", "ps", "--format", "json"], { cwd: root, reject: false, timeout: 5000 });
52
+ if (psOut && psOut.trim()) {
53
+ const lines = psOut.trim().split("\n").filter(Boolean);
54
+ const parsed = lines.map((line) => {
55
+ try { return JSON.parse(line); } catch { return null; }
56
+ }).filter(Boolean);
65
57
 
66
- // Check Docker status (only if we have a project root)
67
- if (root) {
68
- try {
69
- const { stdout: psOut } = await execa("docker", ["compose", "ps", "--format", "json"], { cwd: root, reject: false, timeout: 5000 });
70
- if (psOut && psOut.trim()) {
71
- const lines = psOut.trim().split("\n").filter(Boolean);
72
- const services = lines.map((line) => {
73
- try {
74
- const o = JSON.parse(line);
75
- return `${o.Name || o.name || "?"}: ${o.State || o.Status || "?"}`;
76
- } catch {
77
- return null;
78
- }
79
- }).filter(Boolean);
80
- if (services.length) parts.push("Running containers:\n" + services.join("\n"));
81
- else parts.push("No containers running.");
58
+ if (!parsed.length) return "No containers running.";
59
+
60
+ let healthy = 0, unhealthy = 0, exited = 0, running = 0;
61
+ const failingContainers = [];
62
+ const services = parsed.map((o) => {
63
+ const name = o.Name || o.name || o.Service || "?";
64
+ const service = o.Service || name;
65
+ const state = (o.State || "").toLowerCase();
66
+ const status = o.Status || "";
67
+ const health = (o.Health || "").toLowerCase();
68
+ const exitCode = o.ExitCode ?? "";
69
+
70
+ if (state === "exited" || state === "dead") {
71
+ exited++;
72
+ failingContainers.push(service);
73
+ return `${name}: EXITED (code ${exitCode}) — ${status}`;
74
+ }
75
+ if (health === "unhealthy" || state === "restarting") {
76
+ unhealthy++;
77
+ failingContainers.push(service);
78
+ return `${name}: ${state === "restarting" ? "RESTARTING" : "UNHEALTHY"} — ${status}`;
79
+ }
80
+ if (state === "running" && (health === "healthy" || !health)) {
81
+ healthy++;
82
+ running++;
83
+ return `${name}: running ${health ? "(healthy)" : ""} — ${status}`;
84
+ }
85
+ running++;
86
+ return `${name}: ${state} ${health ? `(${health})` : ""} — ${status}`;
87
+ });
88
+
89
+ const summary = [];
90
+ if (running) summary.push(`${running} running`);
91
+ if (healthy) summary.push(`${healthy} healthy`);
92
+ if (unhealthy) summary.push(`${unhealthy} UNHEALTHY/RESTARTING`);
93
+ if (exited) summary.push(`${exited} EXITED/FAILED`);
94
+
95
+ let header = `Container summary: ${parsed.length} total — ${summary.join(", ")}`;
96
+ if (unhealthy || exited) {
97
+ header += "\n⚠ ATTENTION: Some containers are failing. Diagnose and fix the failures.";
82
98
  }
83
- } catch {
84
- parts.push("Docker: not available or not running.");
99
+
100
+ let result = header + "\n\nContainer details:\n" + services.join("\n");
101
+
102
+ // Auto-collect logs from failing containers (last 15 lines each, max 3)
103
+ if (failingContainers.length > 0) {
104
+ const logsToFetch = failingContainers.slice(0, 3);
105
+ const logResults = await Promise.all(
106
+ logsToFetch.map(async (svc) => {
107
+ try {
108
+ const { stdout, stderr } = await execa(
109
+ "docker", ["compose", "logs", svc, "--tail", "15", "--no-color"],
110
+ { cwd: root, reject: false, timeout: 5000 },
111
+ );
112
+ const output = (stdout || "") + (stderr || "");
113
+ if (output.trim()) return `\n--- Logs: ${svc} (last 15 lines) ---\n${output.trim()}`;
114
+ } catch { /* skip */ }
115
+ return `\n--- Logs: ${svc} ---\n(no logs available)`;
116
+ }),
117
+ );
118
+ result += "\n" + logResults.join("\n");
119
+ }
120
+
121
+ return result;
85
122
  }
123
+ } catch {
124
+ return "Docker: not available or not running.";
86
125
  }
126
+ return null;
127
+ }
87
128
 
88
- // Check image ages
89
- if (root) {
90
- try {
91
- const { stdout: imgOut } = await execa("docker", ["compose", "images", "--format", "json"], { cwd: root, reject: false, timeout: 5000 });
92
- if (imgOut?.trim()) {
93
- const ages = [];
94
- for (const line of imgOut.trim().split("\n").filter(Boolean)) {
95
- try {
96
- const img = JSON.parse(line);
97
- const id = img.ID || img.id || "";
98
- const repo = img.Repository || img.repository || "";
99
- const tag = img.Tag || img.tag || "";
100
- if (!id) continue;
101
- const { stdout: created } = await execa("docker", ["image", "inspect", id, "--format", "{{.Created}}"], { reject: false, timeout: 3000 });
102
- if (created?.trim()) {
103
- const days = Math.floor((Date.now() - new Date(created.trim()).getTime()) / 86400000);
104
- const name = `${repo}:${tag}`.replace(/^:|:$/g, "") || id.slice(0, 12);
105
- ages.push(`${name}: ${days}d old`);
106
- }
107
- } catch {}
129
+ /**
130
+ * HTTP GET with timeout. Returns { ok, status } or { ok: false, error }.
131
+ */
132
+ function httpPing(url, timeout = 3000) {
133
+ return new Promise((resolve) => {
134
+ const req = http.get(url, { timeout }, (res) => {
135
+ res.resume(); // drain
136
+ resolve({ ok: res.statusCode < 500, status: res.statusCode });
137
+ });
138
+ req.on("error", (err) => resolve({ ok: false, error: err.code || err.message }));
139
+ req.on("timeout", () => { req.destroy(); resolve({ ok: false, error: "TIMEOUT" }); });
140
+ });
141
+ }
142
+
143
+ const SERVICE_ENDPOINTS = [
144
+ { name: "Backend API", url: "http://localhost:9001/api/data/mesh/list?per_page=1", port: 9001 },
145
+ { name: "Frontend", url: "http://localhost:3002", port: 3002 },
146
+ { name: "Storage Engine (MinIO)", url: "http://localhost:9002/minio/health/live", port: 9002 },
147
+ { name: "Trino", url: "http://localhost:8081/v1/info", port: 8081 },
148
+ ];
149
+
150
+ async function gatherServiceHealth() {
151
+ const results = await Promise.all(
152
+ SERVICE_ENDPOINTS.map(async ({ name, url }) => {
153
+ const r = await httpPing(url);
154
+ if (r.ok) return `${name}: UP (HTTP ${r.status})`;
155
+ return `${name}: DOWN (${r.error || "HTTP " + r.status})`;
156
+ }),
157
+ );
158
+ return "Service health (HTTP checks):\n" + results.join("\n");
159
+ }
160
+
161
+ /**
162
+ * Detect which compose images are missing locally.
163
+ * Compares `docker compose config --images` against `docker images`.
164
+ */
165
+ async function gatherMissingImages(root) {
166
+ try {
167
+ // Get all images required by compose
168
+ const { stdout: configImages } = await execa(
169
+ "docker", ["compose", "config", "--images"],
170
+ { cwd: root, reject: false, timeout: 10000 },
171
+ );
172
+ if (!configImages?.trim()) return null;
173
+
174
+ const required = [...new Set(configImages.trim().split("\n").filter(Boolean))];
175
+
176
+ // Get locally available images
177
+ const { stdout: localImages } = await execa(
178
+ "docker", ["images", "--format", "{{.Repository}}:{{.Tag}}"],
179
+ { reject: false, timeout: 5000 },
180
+ );
181
+ const localSet = new Set((localImages || "").trim().split("\n").filter(Boolean));
182
+
183
+ // Also check by repo without tag (docker sometimes lists <none> tag)
184
+ const localRepos = new Set(
185
+ (localImages || "").trim().split("\n").filter(Boolean).map((i) => i.split(":")[0]),
186
+ );
187
+
188
+ // Find which compose services have build contexts
189
+ const { stdout: configJson } = await execa(
190
+ "docker", ["compose", "config", "--format", "json"],
191
+ { cwd: root, reject: false, timeout: 10000 },
192
+ );
193
+ const buildableImages = new Set();
194
+ if (configJson?.trim()) {
195
+ try {
196
+ const config = JSON.parse(configJson);
197
+ for (const [, svc] of Object.entries(config.services || {})) {
198
+ if (svc.build && svc.image) buildableImages.add(svc.image);
108
199
  }
109
- if (ages.length) parts.push("Image ages:\n" + ages.join("\n"));
110
- }
111
- } catch {}
200
+ } catch { /* ignore */ }
201
+ }
202
+
203
+ const missing = required.filter((img) => !localSet.has(img) && !localRepos.has(img.split(":")[0]));
204
+ if (missing.length === 0) return null;
205
+
206
+ const lines = missing.map((img) => {
207
+ const action = buildableImages.has(img) ? "needs BUILD (has build context)" : "needs PULL from registry";
208
+ return ` ${img} — ${action}`;
209
+ });
210
+
211
+ return `Missing images (${missing.length}/${required.length}):\n` + lines.join("\n") +
212
+ "\n\nTo build local images: make build\nTo pull registry images: make download (requires ECR auth)";
213
+ } catch {
214
+ return null;
112
215
  }
216
+ }
113
217
 
114
- // Check prerequisites (each wrapped individually so one failure doesn't kill the rest)
115
- const prereqs = [];
116
- try { await execa("git", ["--version"], { reject: false, timeout: 3000 }); prereqs.push("git: ✓"); } catch { prereqs.push("git: ✗"); }
117
- try { await execa("docker", ["info"], { reject: false, timeout: 5000 }); prereqs.push("docker: ✓"); } catch { prereqs.push("docker: ✗"); }
118
- try { await execa("aws", ["--version"], { reject: false, timeout: 3000 }); prereqs.push("aws-cli: ✓"); } catch { prereqs.push("aws-cli: ✗ (optional)"); }
119
- parts.push("Prerequisites: " + prereqs.join(", "));
218
+ async function gatherImageAges(root) {
219
+ try {
220
+ const { stdout: imgOut } = await execa("docker", ["compose", "images", "--format", "json"], { cwd: root, reject: false, timeout: 5000 });
221
+ if (imgOut?.trim()) {
222
+ const images = imgOut.trim().split("\n").filter(Boolean).map((line) => {
223
+ try { return JSON.parse(line); } catch { return null; }
224
+ }).filter(Boolean);
120
225
 
121
- // Check for .env file
122
- if (root) {
123
- const envPath = path.join(root, ".env");
124
- const envExamplePath = path.join(root, ".env.example");
125
- if (fs.existsSync(envPath)) {
126
- parts.push(".env: configured");
127
- } else if (fs.existsSync(envExamplePath)) {
128
- parts.push(".env: not configured (run 'fops setup' or 'cp .env.example .env')");
226
+ // Inspect all images in parallel
227
+ const ageResults = await Promise.all(images.map(async (img) => {
228
+ try {
229
+ const id = img.ID || img.id || "";
230
+ const repo = img.Repository || img.repository || "";
231
+ const tag = img.Tag || img.tag || "";
232
+ if (!id) return null;
233
+ const { stdout: created } = await execa("docker", ["image", "inspect", id, "--format", "{{.Created}}"], { reject: false, timeout: 3000 });
234
+ if (created?.trim()) {
235
+ const days = Math.floor((Date.now() - new Date(created.trim()).getTime()) / 86400000);
236
+ const name = `${repo}:${tag}`.replace(/^:|:$/g, "") || id.slice(0, 12);
237
+ return `${name}: ${days}d old`;
238
+ }
239
+ } catch {}
240
+ return null;
241
+ }));
242
+
243
+ const ages = ageResults.filter(Boolean);
244
+ if (ages.length) return "Image ages:\n" + ages.join("\n");
129
245
  }
246
+ } catch {}
247
+ return null;
248
+ }
249
+
250
+ async function gatherPrereqs() {
251
+ const checks = await Promise.all([
252
+ execa("git", ["--version"], { reject: false, timeout: 3000 }).then(() => "git: ✓").catch(() => "git: ✗ (REQUIRED)"),
253
+ execa("docker", ["info"], { reject: false, timeout: 5000 }).then(() => "docker: ✓").catch(() => "docker: ✗ (REQUIRED)"),
254
+ execa("aws", ["--version"], { reject: false, timeout: 3000 }).then(() => "aws-cli: ✓").catch(() => "aws-cli: ✗ (REQUIRED)"),
255
+ ]);
256
+
257
+ // Check git netrc for GitHub auth
258
+ const homedir = (await import("node:os")).default.homedir();
259
+ const netrcPath = (await import("node:path")).default.join(homedir, ".netrc");
260
+ let netrc = "git-netrc: ✗ (REQUIRED — needed for private repo access)";
261
+ try {
262
+ const netrcContent = (await import("node:fs")).default.readFileSync(netrcPath, "utf8");
263
+ if (netrcContent.includes("github.com")) {
264
+ netrc = "git-netrc: ✓ (github.com configured)";
265
+ } else {
266
+ netrc = "git-netrc: ✗ (no github.com entry — REQUIRED)";
267
+ }
268
+ } catch {}
269
+
270
+ checks.push(netrc);
271
+ return "Prerequisites: " + checks.join(", ");
272
+ }
273
+
274
+ function checkEnvFile(root) {
275
+ const envPath = path.join(root, ".env");
276
+ const envExamplePath = path.join(root, ".env.example");
277
+ if (fs.existsSync(envPath)) {
278
+ return ".env: configured";
279
+ } else if (fs.existsSync(envExamplePath)) {
280
+ return ".env: not configured (run 'fops setup' or 'cp .env.example .env')";
130
281
  }
282
+ return null;
283
+ }
131
284
 
132
- // Inject plugin skills into context
285
+ async function gatherSkills(registry) {
133
286
  try {
134
- const skills = await loadSkills();
287
+ const skills = await loadSkills(registry);
135
288
  if (skills.length) {
136
- parts.push("## Additional Skills\n" + skills.map((s) => s.content).join("\n\n"));
289
+ return "## Additional Skills\n" + skills.map((s) => s.content).join("\n\n");
137
290
  }
138
291
  } catch {
139
292
  // skip if skill loading fails
140
293
  }
294
+ return null;
295
+ }
296
+
297
+ export async function gatherStackContext(root, { registry, message } = {}) {
298
+ const parts = [getFoundationContextBlock(root)];
299
+
300
+ if (root) {
301
+ // Run all independent checks in parallel
302
+ const [dockerStatus, serviceHealth, missingImages, imageAges, prereqs, envInfo, skills, knowledge] = await Promise.all([
303
+ gatherDockerStatus(root),
304
+ gatherServiceHealth(),
305
+ gatherMissingImages(root),
306
+ gatherImageAges(root),
307
+ gatherPrereqs(),
308
+ Promise.resolve(checkEnvFile(root)),
309
+ gatherSkills(registry),
310
+ registry && message ? searchKnowledge(registry, message) : Promise.resolve(null),
311
+ ]);
312
+
313
+ if (dockerStatus) parts.push(dockerStatus);
314
+ if (serviceHealth) parts.push(serviceHealth);
315
+ if (missingImages) parts.push(missingImages);
316
+ if (imageAges) parts.push(imageAges);
317
+ parts.push(prereqs);
318
+ if (envInfo) parts.push(envInfo);
319
+ if (skills) parts.push(skills);
320
+ if (knowledge) parts.push(knowledge);
321
+ } else {
322
+ // No root — still check prereqs, skills, and knowledge
323
+ const [prereqs, skills, knowledge] = await Promise.all([
324
+ gatherPrereqs(),
325
+ gatherSkills(registry),
326
+ registry && message ? searchKnowledge(registry, message) : Promise.resolve(null),
327
+ ]);
328
+ parts.push(prereqs);
329
+ if (skills) parts.push(skills);
330
+ if (knowledge) parts.push(knowledge);
331
+ }
141
332
 
142
333
  return parts.join("\n\n");
143
334
  }
@@ -1,2 +1,3 @@
1
1
  export { runAgentSingleTurn, runAgentInteractive } from "./agent.js";
2
2
  export { gatherStackContext } from "./context.js";
3
+ export { BUILTIN_AGENTS, loadBuiltinAgents } from "./agents.js";