wolverine-ai 4.5.3 → 4.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/wolverine.js CHANGED
@@ -27,6 +27,7 @@ ${chalk.bold("Options:")}
27
27
  --single Force single-worker mode (no clustering)
28
28
  --workers <n> Force specific worker count
29
29
  --info Show system info and exit
30
+ --init Scan server/ and build context map (routes, DB, config, deps)
30
31
 
31
32
  ${chalk.bold("Configuration:")}
32
33
  server/config/settings.json Models, telemetry, limits, health checks
@@ -54,6 +55,25 @@ if (args.includes("--info")) {
54
55
  process.exit(0);
55
56
  }
56
57
 
58
+ // --init: scan server/ and build context map
59
+ if (args.includes("--init")) {
60
+ const { scan } = require("../src/core/server-context");
61
+ console.log(chalk.blue("\n 🔍 Scanning server/ directory...\n"));
62
+ const ctx = scan(process.cwd());
63
+ if (!ctx) {
64
+ console.log(chalk.yellow(" No server/ directory found."));
65
+ process.exit(1);
66
+ }
67
+ console.log(chalk.green(` ✅ Server context built:`));
68
+ console.log(chalk.gray(` Routes: ${ctx.routes.reduce((s, r) => s + r.endpoints.length, 0)}`));
69
+ console.log(chalk.gray(` Middleware: ${ctx.middleware.length}`));
70
+ console.log(chalk.gray(` Database: ${ctx.database.type || "none"}${ctx.database.tables.length > 0 ? ` (${ctx.database.tables.length} tables)` : ""}`));
71
+ console.log(chalk.gray(` Env vars: ${ctx.envVars.length}`));
72
+ console.log(chalk.gray(` Files: ${ctx.structure.length}`));
73
+ console.log(chalk.gray(` Saved to: .wolverine/server-context.json\n`));
74
+ process.exit(0);
75
+ }
76
+
57
77
  // --update: safe framework update
58
78
  if (args.includes("--update")) {
59
79
  const { safeUpdate } = require("../src/skills/update");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "wolverine-ai",
3
- "version": "4.5.3",
3
+ "version": "4.6.0",
4
4
  "description": "Self-healing Node.js server framework powered by AI. Catches crashes, diagnoses errors, generates fixes, verifies, and restarts — automatically.",
5
5
  "main": "src/index.js",
6
6
  "bin": {
@@ -42,7 +42,7 @@ const SEED_DOCS = [
42
42
  metadata: { topic: "backup-system" },
43
43
  },
44
44
  {
45
- text: "Wolverine security: file sandbox restricts all reads/writes to the project directory. Prompt injection detection runs on every error (regex layer + AI audit via AUDIT_MODEL). Rate limiter prevents error explosion cost with sliding window, min gap, hourly token budget, and exponential backoff for error loops.",
45
+ text: "Wolverine security: multi-layer defense. (1) File sandbox restricts reads/writes to project dir, vault paths blocked. (2) Injection detector ~50 regex patterns scan errors BEFORE AI sees them: prompt override, role hijack, code exec, env leak, bash escape, reverse shell, data exfiltration, destructive commands, framework source writes. Blocks heal entirely on detection. (3) Bash sandbox escape — _detectSandboxEscape checks every bash_exec: blocks cd outside project, writes to absolute paths, subshell escapes, curl @file exfil, pipe to tee outside project. (4) BLOCKED_COMMANDS 18 patterns: rm -rf, mkfs, dd, shutdown, git push --force, npm publish, curl|bash, wget|sh, curl $(), cat .env, writes to src/. (5) Secret redactor scrubs API keys, hex keys, vault paths from all output. (6) Rate limiter — sliding window + exponential backoff. (7) Adaptive rate limiter — auto-throttles server at 70%/85% CPU/memory thresholds.",
46
46
  metadata: { topic: "security" },
47
47
  },
48
48
  {
@@ -86,7 +86,7 @@ const SEED_DOCS = [
86
86
  metadata: { topic: "server-best-practices" },
87
87
  },
88
88
  {
89
- text: "Wolverine editable scope: only files inside server/ can be modified by the agent. src/, bin/, tests/, .env, package.json, node_modules/ are all protected. The agent's _isProtectedPath guard blocks writes to anything outside server/. Direct edits target the script wolverine was launched with (server/index.js by default).",
89
+ text: "Wolverine editable scope: only files inside server/ can be modified by the agent. src/, bin/, tests/, .env, package.json, node_modules/, .wolverine/vault/ are all protected. The agent's _isProtectedPath guard blocks write_file/edit_file/move_file to protected paths. bash_exec additionally runs through _detectSandboxEscape which blocks shell commands that write outside the project via redirection, cp, mv, tee, or subshell. Vault files (master.key, eth.vault) are blocked by both sandbox and _isProtectedPath.",
90
90
  metadata: { topic: "editable-scope" },
91
91
  },
92
92
  {
@@ -110,7 +110,7 @@ const SEED_DOCS = [
110
110
  metadata: { topic: "sub-agent-workflow" },
111
111
  },
112
112
  {
113
- text: "Sub-agent tool restrictions: explore gets read_file/glob/grep/git_log/git_diff/list_dir/check_env/check_port/check_memory/check_network/list_processes/inspect_db/audit_deps. plan gets read_file/glob/grep/list_dir/inspect_db/check_env/inspect_env/audit_deps/check_migration. fix gets read_file/write_file/edit_file/glob/grep/bash_exec/move_file/run_db_fix/audit_deps/restart_service. verify gets read_file/glob/grep/bash_exec/inspect_db/check_port/check_memory/check_logs. research gets read_file/grep/web_fetch/check_logs. security gets read_file/glob/grep/inspect_db/inspect_env. database gets read_file/write_file/edit_file/glob/grep/bash_exec/inspect_db/run_db_fix. 24 tools total, each sub-agent type gets tools relevant to its role.",
113
+ text: "Sub-agent tool restrictions: explore gets read_file/glob/grep/git_log/git_diff/list_dir/check_env/check_port/check_memory/check_network/list_processes/inspect_db/audit_deps/check_file_descriptors/inspect_cache. plan gets read_file/glob/grep/list_dir/inspect_db/check_env/inspect_env/audit_deps/check_migration/check_event_loop. fix gets read_file/write_file/edit_file/glob/grep/bash_exec/move_file/run_db_fix/audit_deps/restart_service/verify_node_modules/disk_cleanup. verify gets read_file/glob/grep/bash_exec/inspect_db/check_port/check_memory/check_logs/check_websocket/inspect_certificate. research gets read_file/grep/web_fetch/check_logs. security gets read_file/glob/grep/inspect_db/inspect_env/check_event_loop/inspect_certificate. database gets read_file/write_file/edit_file/glob/grep/bash_exec/inspect_db/run_db_fix/inspect_cache. 31 tools total across 9 categories.",
114
114
  metadata: { topic: "sub-agent-tools" },
115
115
  },
116
116
  {
@@ -138,9 +138,13 @@ const SEED_DOCS = [
138
138
  metadata: { topic: "prompt-caching" },
139
139
  },
140
140
  {
141
- text: "Platform telemetry: lightweight background process, zero-config. Default platform: api.wolverinenode.xyz. Auto-registers on first run (retries every 60s until platform responds), saves key to .wolverine/platform-key. Heartbeat payload matches PLATFORM.md spec: instanceId, server (name/port/uptime/status/pid), process (memoryMB/cpuPercent), routes, repairs, usage (tokens/cost/calls/byCategory), brain, backups. Offline-resilient: queues up to 1440 heartbeats locally, drains on reconnect. No chalk dependency, cached version/key in memory, minimal IO. Opt out: WOLVERINE_TELEMETRY=false. Override URL: WOLVERINE_PLATFORM_URL.",
141
+ text: "Platform telemetry: lightweight background process, zero-config. Default platform: api.wolverinenode.xyz. Auto-registers on first run, heartbeats every 60s. Offline-resilient: queues up to 1440 heartbeats locally, drains on reconnect. Opt out: WOLVERINE_TELEMETRY=false.",
142
142
  metadata: { topic: "platform-telemetry" },
143
143
  },
144
+ {
145
+ text: "Server context scanner (wolverine --init): scans server/ directory on every startup to build .wolverine/server-context.json. Extracts routes (HTTP methods + paths from fastify/express), middleware stack, database type + tables, config structure, dependencies, env vars used (process.env.X patterns), and full file tree. Context summary auto-injected into agent's heal prompt so it knows the server's route map, DB schema, and dependencies without re-scanning. Manual scan: wolverine --init. Auto-scan: runs silently on every boot. The context is read-only — never modified by the agent.",
146
+ metadata: { topic: "server-context" },
147
+ },
144
148
  {
145
149
  text: "Telemetry architecture: 4 files, ~250 lines total. heartbeat.js sends one HTTP POST every 60s (5s timeout, non-blocking). register.js auto-registers and caches key in memory + disk. queue.js appends to JSONL file only on failure, trims lazily. telemetry.js collects from subsystems using optional chaining (no crashes if subsystem missing). All secrets redacted before sending. Response bodies drained immediately (res.resume). No blocking, no delays, no busy waits.",
146
150
  metadata: { topic: "telemetry-architecture" },
@@ -226,11 +230,11 @@ const SEED_DOCS = [
226
230
  metadata: { topic: "error-monitor" },
227
231
  },
228
232
  {
229
- text: "Agent tool details: read_file supports offset/limit for large files. edit_file does surgical find-and-replace (preferred for small fixes). glob_files discovers files by pattern (**/*.js). grep_code does regex search with context lines. list_dir shows directory contents with file sizes. move_file relocates/renames files. bash_exec runs shell commands (30s default timeout, 60s hard cap, dangerous commands blocked: rm -rf /, git push --force, npm publish). inspect_db reads SQLite: action=tables (list), action=schema (CREATE statements), action=query (SELECT/PRAGMA only). run_db_fix writes SQLite with SAFETY: auto-snapshots affected rows BEFORE write (SELECT WHERE matching the UPDATE/DELETE), executes the fix, snapshots AFTER, returns before/after comparison so agent can verify. Always backs up the DB file. Agent MUST inspect_db before run_db_fix never write blind. For NaN/null data errors: prefer fixing code to handle edge cases over modifying production data. check_port finds what process is using a port (netstat/lsof). check_env lists environment variables with values redacted. audit_deps runs full npm health check. check_migration returns known upgrade paths. web_fetch retrieves URL content.",
233
+ text: "Agent tool details — FILE: read_file (offset/limit for large files), edit_file (surgical find-and-replace, single match), glob_files (pattern discovery, **/*.js), grep_code (regex search with context, sandbox-enforced reads), list_dir (directory listing with sizes, sandbox-checked), move_file (relocate/rename). SHELL: bash_exec (30s default, 60s cap, 18 blocked command patterns, sandbox escape detection for writes outside project, SSRF-protected). DATABASE: inspect_db (SQLite: tables/schema/query, no stacked queries via ; block), run_db_fix (auto-snapshots before/after, existence check before backup). DIAGNOSTICS: check_port (platform-aware, netstat/lsof), check_env (values redacted, secrets always show SET only), check_memory (RSS/heap/system, OOM warning), list_processes (platform-aware node processes), check_logs (journalctl/logfile with grep filter, capped 1-1000 lines), check_network (DNS/port/URL with sanitized inputs), inspect_env (names only, grouped by category). SERVER: restart_service (flag-file, not direct restart). DEPS: audit_deps (npm health score), check_migration (known upgrade paths). RESEARCH: web_fetch (SSRF blocklist for private IPs/metadata). ADVANCED: verify_node_modules (integrity vs package-lock, broken .bin, cross-platform), inspect_certificate (TLS connect, expiry/SAN/chain/self-signed), inspect_cache (Redis PING/INFO via raw TCP), disk_cleanup (safe targets: old backups + npm cache, dry-run default), check_file_descriptors (Linux /proc/fd count vs ulimit), check_event_loop (static scan for readFileSync/execSync/pbkdf2Sync patterns), check_websocket (real WS upgrade handshake test).",
230
234
  metadata: { topic: "agent-tools-detail" },
231
235
  },
232
236
  {
233
- text: "Server problem categories the agent can fix: CODE BUGS (SyntaxError, TypeError, ReferenceError → edit_file), DEPENDENCIES (Cannot find module → npm install, corrupted node_modules → rm + reinstall), DATABASE (invalid entries → run_db_fix UPDATE, missing table → CREATE TABLE, schema mismatch → ALTER TABLE, constraint violationfix data or schema), CONFIG (invalid JSON → edit_file, missing env vars → write .env, wrong port → edit config), FILESYSTEM (misplaced files → move_file, missing directories → bash_exec mkdir, wrong permissions → chmod), NETWORK (port conflict → check_port + kill, service down → restart, connection refused → check config), STATE (corrupted cache delete + restart, stale locks remove lock file, git conflicts resolve markers), IDEMPOTENCY (double-fireadd idempotencyGuard middleware, missing idempotency keyadd X-Idempotency-Key header support, duplicate DB entries add UNIQUE constraint or use db.idempotent()). The agent investigates before fixing — reads files, checks directories, inspects databases, never guesses.",
237
+ text: "Server problem categories the agent can fix: CODE BUGS (SyntaxError/TypeError/ReferenceError → edit_file). DEPENDENCIES (Cannot find module → npm install, corrupted node_modules → verify_node_modules then rm + reinstall). DATABASE (invalid entries → inspect_db then run_db_fix, schema issues → ALTER TABLE, pool exhaustioninspect_cache for Redis). CONFIG (invalid JSON → edit_file, missing env vars → inspect_env to check then write .env, wrong port → edit config). FILESYSTEM (misplaced files → move_file, missing dirs → bash_exec mkdir, EACCES → chmod, ENOSPC → disk_cleanup). NETWORK (port conflict → check_port + kill, ECONNREFUSED → check_network for DNS/connectivity, Redis down → inspect_cache). SSL/TLS (CERT_EXPIREDinspect_certificate for details, self-signed → check chain). MEMORY (OOM/SIGKILLcheck_memory for pressure, EMFILEcheck_file_descriptors, event loop blockcheck_event_loop scan). WEBSOCKET (1006/disconnectcheck_websocket handshake test). STATE (corrupted cachedelete + restart_service, stale locksremove lock file). The agent investigates before fixing — reads files, checks diagnostics, inspects databases, never guesses.",
234
238
  metadata: { topic: "server-problems" },
235
239
  },
236
240
  {
@@ -211,6 +211,16 @@ class WolverineRunner {
211
211
  console.log(chalk.yellow(` ⚠️ Vault init failed (non-fatal): ${err.message}`));
212
212
  }
213
213
 
214
+ // Scan server context (routes, DB, config, deps) for agent knowledge
215
+ try {
216
+ const { scan, load } = require("./server-context");
217
+ const ctx = scan(this.cwd);
218
+ if (ctx) {
219
+ const routes = ctx.routes.reduce((s, r) => s + r.endpoints.length, 0);
220
+ console.log(chalk.gray(` 🗺️ Server context: ${routes} routes, ${ctx.structure.length} files, ${ctx.envVars.length} env vars`));
221
+ }
222
+ } catch {}
223
+
214
224
  // Log redactor stats
215
225
  const redactorStats = this.redactor.getStats();
216
226
  console.log(chalk.gray(` 🔐 Secret redactor: ${redactorStats.trackedSecrets} secrets tracked from ${redactorStats.envFiles} env file(s)`));
@@ -0,0 +1,229 @@
1
+ const fs = require("fs");
2
+ const path = require("path");
3
+
4
+ /**
5
+ * Server Context Scanner — builds a structured map of the server/ directory.
6
+ *
7
+ * Scans routes, middleware, config, database, dependencies, and file structure
8
+ * to give the AI agent full context when diagnosing errors.
9
+ *
10
+ * Runs automatically on startup (stored in .wolverine/server-context.json).
11
+ * Can be triggered manually: wolverine --init or require('./server-context').scan(cwd)
12
+ *
13
+ * The context is injected into the agent's system prompt so it knows the
14
+ * server's structure without re-scanning on every heal.
15
+ */
16
+
17
+ const CONTEXT_PATH = ".wolverine/server-context.json";
18
+ const MAX_FILE_SCAN = 200; // don't scan more than 200 files
19
+ const SKIP_DIRS = new Set(["node_modules", ".git", ".wolverine", "dist", ".next", ".cache", "coverage", "__pycache__"]);
20
+
21
+ function scan(cwd) {
22
+ const serverDir = path.join(cwd, "server");
23
+ if (!fs.existsSync(serverDir)) return null;
24
+
25
+ const context = {
26
+ scannedAt: new Date().toISOString(),
27
+ routes: [],
28
+ middleware: [],
29
+ database: { tables: [], config: null },
30
+ config: {},
31
+ dependencies: {},
32
+ structure: [],
33
+ exports: [],
34
+ envVars: [],
35
+ };
36
+
37
+ // 1. Scan routes
38
+ const routesDir = path.join(serverDir, "routes");
39
+ if (fs.existsSync(routesDir)) {
40
+ for (const file of _listFiles(routesDir, ".js")) {
41
+ try {
42
+ const code = fs.readFileSync(file, "utf-8");
43
+ const relPath = path.relative(cwd, file);
44
+ const methods = [];
45
+ // Match fastify.get/post/put/delete/patch or app.get/post etc
46
+ const routeRegex = /(?:fastify|app|router)\.(get|post|put|delete|patch|options|head)\s*\(\s*['"`]([^'"`]+)/gi;
47
+ let m;
48
+ while ((m = routeRegex.exec(code)) !== null) {
49
+ methods.push({ method: m[1].toUpperCase(), path: m[2] });
50
+ }
51
+ // Match fastify.register with prefix
52
+ const registerRegex = /register\s*\(.*?prefix\s*:\s*['"`]([^'"`]+)/gi;
53
+ while ((m = registerRegex.exec(code)) !== null) {
54
+ methods.push({ method: "REGISTER", path: m[1] });
55
+ }
56
+ if (methods.length > 0) {
57
+ context.routes.push({ file: relPath, endpoints: methods });
58
+ }
59
+ } catch {}
60
+ }
61
+ }
62
+
63
+ // 2. Scan middleware
64
+ const indexFile = path.join(serverDir, "index.js");
65
+ if (fs.existsSync(indexFile)) {
66
+ try {
67
+ const code = fs.readFileSync(indexFile, "utf-8");
68
+ // Find app.use() or fastify.register() calls
69
+ const mwRegex = /(?:app\.use|fastify\.register)\s*\(\s*(?:require\s*\(\s*['"`]([^'"`]+)['"`]\s*\)|(\w+))/gi;
70
+ let m;
71
+ while ((m = mwRegex.exec(code)) !== null) {
72
+ context.middleware.push(m[1] || m[2]);
73
+ }
74
+ } catch {}
75
+ }
76
+
77
+ // 3. Scan database
78
+ const dbFiles = [
79
+ path.join(serverDir, "lib", "db.js"),
80
+ path.join(serverDir, "db.js"),
81
+ path.join(serverDir, "models", "index.js"),
82
+ path.join(serverDir, "database.js"),
83
+ ];
84
+ for (const dbFile of dbFiles) {
85
+ if (!fs.existsSync(dbFile)) continue;
86
+ try {
87
+ const code = fs.readFileSync(dbFile, "utf-8");
88
+ // Detect database type
89
+ if (/require\s*\(\s*['"]pg['"]/.test(code)) context.database.type = "postgresql";
90
+ else if (/require\s*\(\s*['"]better-sqlite3['"]/.test(code)) context.database.type = "sqlite";
91
+ else if (/require\s*\(\s*['"]mysql/.test(code)) context.database.type = "mysql";
92
+ else if (/require\s*\(\s*['"]mongoose['"]/.test(code)) context.database.type = "mongodb";
93
+ else if (/require\s*\(\s*['"]ioredis['"]/.test(code)) context.database.hasRedis = true;
94
+ // Find CREATE TABLE statements
95
+ const tableRegex = /CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?["`]?(\w+)/gi;
96
+ let m;
97
+ while ((m = tableRegex.exec(code)) !== null) {
98
+ context.database.tables.push(m[1]);
99
+ }
100
+ context.database.config = path.relative(cwd, dbFile);
101
+ } catch {}
102
+ }
103
+
104
+ // 4. Scan config
105
+ const settingsPath = path.join(serverDir, "config", "settings.json");
106
+ if (fs.existsSync(settingsPath)) {
107
+ try {
108
+ const settings = JSON.parse(fs.readFileSync(settingsPath, "utf-8"));
109
+ // Only include non-secret top-level keys
110
+ context.config = Object.keys(settings).reduce((acc, k) => {
111
+ if (typeof settings[k] === "object") acc[k] = Object.keys(settings[k]);
112
+ else acc[k] = typeof settings[k];
113
+ return acc;
114
+ }, {});
115
+ } catch {}
116
+ }
117
+
118
+ // 5. Dependencies
119
+ const pkgPath = path.join(cwd, "package.json");
120
+ if (fs.existsSync(pkgPath)) {
121
+ try {
122
+ const pkg = JSON.parse(fs.readFileSync(pkgPath, "utf-8"));
123
+ context.dependencies = {
124
+ production: Object.keys(pkg.dependencies || {}),
125
+ dev: Object.keys(pkg.devDependencies || {}),
126
+ optional: Object.keys(pkg.optionalDependencies || {}),
127
+ };
128
+ context.nodeVersion = pkg.engines?.node || "unknown";
129
+ context.version = pkg.version;
130
+ } catch {}
131
+ }
132
+
133
+ // 6. File structure (server/ tree)
134
+ const tree = [];
135
+ let fileCount = 0;
136
+ const walk = (dir, depth) => {
137
+ if (depth > 4 || fileCount > MAX_FILE_SCAN) return;
138
+ try {
139
+ for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
140
+ if (SKIP_DIRS.has(entry.name)) continue;
141
+ const rel = path.relative(cwd, path.join(dir, entry.name));
142
+ if (entry.isDirectory()) {
143
+ tree.push(rel + "/");
144
+ walk(path.join(dir, entry.name), depth + 1);
145
+ } else {
146
+ tree.push(rel);
147
+ fileCount++;
148
+ }
149
+ }
150
+ } catch {}
151
+ };
152
+ walk(serverDir, 0);
153
+ context.structure = tree;
154
+
155
+ // 7. Env vars used (scan for process.env.X patterns)
156
+ const envVars = new Set();
157
+ const scanForEnv = (dir) => {
158
+ if (!fs.existsSync(dir)) return;
159
+ for (const file of _listFiles(dir, ".js")) {
160
+ try {
161
+ const code = fs.readFileSync(file, "utf-8");
162
+ const envRegex = /process\.env\.([A-Z_][A-Z0-9_]*)/g;
163
+ let m;
164
+ while ((m = envRegex.exec(code)) !== null) envVars.add(m[1]);
165
+ } catch {}
166
+ }
167
+ };
168
+ scanForEnv(serverDir);
169
+ context.envVars = [...envVars].sort();
170
+
171
+ // Save
172
+ const outPath = path.join(cwd, CONTEXT_PATH);
173
+ fs.mkdirSync(path.dirname(outPath), { recursive: true });
174
+ fs.writeFileSync(outPath, JSON.stringify(context, null, 2), "utf-8");
175
+
176
+ return context;
177
+ }
178
+
179
+ /**
180
+ * Load cached context (fast — no rescan).
181
+ */
182
+ function load(cwd) {
183
+ const ctxPath = path.join(cwd, CONTEXT_PATH);
184
+ if (!fs.existsSync(ctxPath)) return null;
185
+ try {
186
+ return JSON.parse(fs.readFileSync(ctxPath, "utf-8"));
187
+ } catch { return null; }
188
+ }
189
+
190
+ /**
191
+ * Get a compact text summary for injecting into AI prompts.
192
+ */
193
+ function getSummary(cwd) {
194
+ const ctx = load(cwd);
195
+ if (!ctx) return "";
196
+
197
+ const lines = [];
198
+ if (ctx.routes.length > 0) {
199
+ const allEndpoints = ctx.routes.flatMap(r => r.endpoints.map(e => `${e.method} ${e.path}`));
200
+ lines.push(`Routes (${allEndpoints.length}): ${allEndpoints.slice(0, 20).join(", ")}${allEndpoints.length > 20 ? ` +${allEndpoints.length - 20} more` : ""}`);
201
+ }
202
+ if (ctx.middleware.length > 0) lines.push(`Middleware: ${ctx.middleware.join(", ")}`);
203
+ if (ctx.database.type) lines.push(`Database: ${ctx.database.type}${ctx.database.tables.length > 0 ? ` (tables: ${ctx.database.tables.join(", ")})` : ""}${ctx.database.hasRedis ? " + Redis" : ""}`);
204
+ if (ctx.envVars.length > 0) lines.push(`Env vars used: ${ctx.envVars.slice(0, 15).join(", ")}${ctx.envVars.length > 15 ? ` +${ctx.envVars.length - 15} more` : ""}`);
205
+ if (ctx.structure.length > 0) lines.push(`Server files: ${ctx.structure.length}`);
206
+ if (ctx.version) lines.push(`Version: ${ctx.version}`);
207
+
208
+ return lines.length > 0 ? "SERVER CONTEXT:\n" + lines.join("\n") : "";
209
+ }
210
+
211
+ function _listFiles(dir, ext) {
212
+ const results = [];
213
+ let count = 0;
214
+ const walk = (d) => {
215
+ if (count > MAX_FILE_SCAN) return;
216
+ try {
217
+ for (const entry of fs.readdirSync(d, { withFileTypes: true })) {
218
+ if (SKIP_DIRS.has(entry.name)) continue;
219
+ const full = path.join(d, entry.name);
220
+ if (entry.isDirectory()) walk(full);
221
+ else if (!ext || entry.name.endsWith(ext)) { results.push(full); count++; }
222
+ }
223
+ } catch {}
224
+ };
225
+ walk(dir);
226
+ return results;
227
+ }
228
+
229
+ module.exports = { scan, load, getSummary, CONTEXT_PATH };
@@ -208,6 +208,12 @@ async function _healImpl({ stderr, cwd, sandbox, notifier, rateLimiter, backupMa
208
208
  }
209
209
 
210
210
  let brainContext = "";
211
+ // Inject server context (routes, DB, config, deps) if available
212
+ try {
213
+ const { getSummary } = require("./server-context");
214
+ const serverCtx = getSummary(cwd);
215
+ if (serverCtx) brainContext += serverCtx + "\n\n";
216
+ } catch {}
211
217
  // Inject relevant skill context (claw-code: pre-enrich prompt with matched tools)
212
218
  if (skills) {
213
219
  const skillCtx = skills.buildContext(parsed.errorMessage);