@toolbaux/guardian 0.1.10 → 0.1.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -79,6 +79,58 @@ Guardian auto-injects architecture context into `CLAUDE.md` so your AI tool read
79
79
 
80
80
  The block between markers is replaced on every save (VSCode extension) and every commit (pre-commit hook). Your manual content outside the markers is never touched.
81
81
 
82
+ ## MCP Server — AI Tools Connect Directly
83
+
84
+ Guardian includes an MCP server that Claude Code and Cursor connect to automatically. The VSCode extension sets this up on first activation — no manual config needed.
85
+
86
+ **6 compact tools available to AI:**
87
+
88
+ | Tool | Tokens | Purpose |
89
+ |------|--------|---------|
90
+ | `guardian_orient` | ~100 | Project summary at session start |
91
+ | `guardian_context` | ~50-80 | File or endpoint dependencies before editing |
92
+ | `guardian_impact` | ~30 | What breaks if you change a file |
93
+ | `guardian_search` | ~70 | Find endpoints, models, modules by keyword |
94
+ | `guardian_model` | ~90 | Full field details (only when needed) |
95
+ | `guardian_metrics` | ~50 | Session usage stats |
96
+
97
+ All responses are compact JSON — no pretty-printing, no verbose keys. Repeated calls are cached (30s TTL). Usage metrics tracked per session.
98
+
99
+ **Manual setup** (if the extension doesn't auto-configure):
100
+
101
+ Create `.mcp.json` at your project root:
102
+ ```json
103
+ {
104
+ "mcpServers": {
105
+ "guardian": {
106
+ "command": "guardian",
107
+ "args": ["mcp-serve", "--specs", ".specs"]
108
+ }
109
+ }
110
+ }
111
+ ```
112
+
113
+ ## VSCode Extension
114
+
115
+ Install from [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=toolbaux.toolbaux-guardian):
116
+
117
+ Search "ToolBaux Guardian" in Extensions, or:
118
+ ```
119
+ Cmd+Shift+P → "Extensions: Install from VSIX"
120
+ ```
121
+
122
+ **What it does automatically:**
123
+ - Creates `.specs/`, config, and pre-commit hook on first activation
124
+ - Configures MCP server for Claude Code and Cursor (`.mcp.json`)
125
+ - Extracts architecture on every file save (5s debounce)
126
+ - Shows drift status in status bar: `✓ Guardian: stable · 35 ep · 8 pg`
127
+
128
+ **Commands** (Cmd+Shift+P):
129
+ - Guardian: Initialize Project
130
+ - Guardian: Generate AI Context
131
+ - Guardian: Drift Check
132
+ - Guardian: Generate Constraints
133
+
82
134
  ## Key Commands
83
135
 
84
136
  ```bash
@@ -17,6 +17,65 @@
17
17
  import fs from "node:fs/promises";
18
18
  import path from "node:path";
19
19
  import readline from "node:readline";
20
+ const metrics = {
21
+ session_start: Date.now(),
22
+ calls: [],
23
+ intel_reloads: 0,
24
+ cache_hits: 0,
25
+ record(tool, args, responseText, cacheHit) {
26
+ const chars = responseText.length;
27
+ const estimatedTokens = Math.ceil(chars / 3.5); // rough token estimate
28
+ this.calls.push({
29
+ tool,
30
+ args,
31
+ timestamp: Date.now(),
32
+ response_chars: chars,
33
+ estimated_tokens: estimatedTokens,
34
+ cache_hit: cacheHit,
35
+ });
36
+ if (cacheHit)
37
+ this.cache_hits++;
38
+ },
39
+ summary() {
40
+ const duration = Math.round((Date.now() - this.session_start) / 1000);
41
+ const totalCalls = this.calls.length;
42
+ const totalTokensSpent = this.calls.reduce((s, c) => s + c.estimated_tokens, 0);
43
+ // Estimate tokens saved: each guardian call replaces ~3 Read/Grep calls (~400 tokens each)
44
+ const estimatedTokensSaved = totalCalls * 400 - totalTokensSpent;
45
+ const toolBreakdown = {};
46
+ for (const c of this.calls) {
47
+ if (!toolBreakdown[c.tool])
48
+ toolBreakdown[c.tool] = { calls: 0, tokens: 0 };
49
+ toolBreakdown[c.tool].calls++;
50
+ toolBreakdown[c.tool].tokens += c.estimated_tokens;
51
+ }
52
+ return {
53
+ session_duration_seconds: duration,
54
+ total_mcp_calls: totalCalls,
55
+ total_tokens_spent: totalTokensSpent,
56
+ estimated_tokens_saved: Math.max(0, estimatedTokensSaved),
57
+ savings_ratio: totalCalls > 0
58
+ ? `${Math.round((estimatedTokensSaved / (totalCalls * 400)) * 100)}%`
59
+ : "n/a",
60
+ cache_hits: this.cache_hits,
61
+ intel_reloads: this.intel_reloads,
62
+ tool_breakdown: toolBreakdown,
63
+ avg_tokens_per_call: totalCalls > 0 ? Math.round(totalTokensSpent / totalCalls) : 0,
64
+ };
65
+ },
66
+ };
67
+ // ── Response cache (dedup repeated queries) ──
68
+ const responseCache = new Map();
69
+ const CACHE_TTL = 30_000; // 30s cache
70
+ function getCached(key) {
71
+ const entry = responseCache.get(key);
72
+ if (entry && Date.now() - entry.time < CACHE_TTL)
73
+ return entry.text;
74
+ return null;
75
+ }
76
+ function setCache(key, text) {
77
+ responseCache.set(key, { text, time: Date.now() });
78
+ }
20
79
  // ── Intelligence loader ──
21
80
  let intel = null;
22
81
  let intelPath = "";
@@ -39,199 +98,181 @@ async function loadIntel() {
39
98
  }
40
99
  return intel;
41
100
  }
42
- // ── Tool implementations ──
43
- async function fileContext(args) {
44
- const data = await loadIntel();
45
- const file = args.file.replace(/^\.\//, "");
46
- // Find which module this file belongs to
47
- const module = data.service_map?.find((m) => m.path && file.startsWith(m.path.replace(/^\.\//, "")));
48
- // Find endpoints in this file
49
- const endpoints = Object.values(data.api_registry || {}).filter((ep) => ep.file && file.includes(ep.file.replace(/^\.\//, "")));
50
- // Find models in this file
51
- const models = Object.values(data.model_registry || {}).filter((m) => m.file && file.includes(m.file.replace(/^\.\//, "")));
52
- // Find which endpoints call services defined in this file
53
- const calledBy = [];
101
+ // ── Helpers ──
102
+ const SKIP_SERVICES = new Set(["str", "dict", "int", "len", "float", "max", "join", "getattr", "lower", "open", "params.append", "updates.append"]);
103
+ function compact(obj) {
104
+ return JSON.stringify(obj);
105
+ }
106
+ function findModule(data, file) {
107
+ return data.service_map?.find((m) => m.path && file.replace(/^\.\//, "").startsWith(m.path.replace(/^\.\//, "")));
108
+ }
109
+ function findEndpointsInFile(data, file) {
110
+ const f = file.replace(/^\.\//, "");
111
+ return Object.values(data.api_registry || {}).filter((ep) => ep.file && f.includes(ep.file.replace(/^\.\//, "")));
112
+ }
113
+ function findModelsInFile(data, file) {
114
+ const f = file.replace(/^\.\//, "");
115
+ return Object.values(data.model_registry || {}).filter((m) => m.file && f.includes(m.file.replace(/^\.\//, "")));
116
+ }
117
+ // ── Tool implementations (compact JSON, no redundancy) ──
118
+ async function orient() {
119
+ const d = await loadIntel();
120
+ const c = d.meta?.counts || {};
121
+ const mods = (d.service_map || []).filter((m) => m.file_count > 0);
122
+ const topMods = mods.sort((a, b) => (b.endpoint_count || 0) - (a.endpoint_count || 0)).slice(0, 6);
123
+ return compact({
124
+ p: d.meta?.project,
125
+ ep: c.endpoints, mod: c.models, pg: c.pages, m: c.modules,
126
+ top: topMods.map((m) => [m.id, m.endpoint_count, m.layer]),
127
+ pages: (d.frontend_pages || []).map((p) => p.path),
128
+ });
129
+ }
130
+ async function context(args) {
131
+ const d = await loadIntel();
132
+ const t = args.target;
133
+ // Check if target is an endpoint (e.g. "POST /sessions/start")
134
+ const epMatch = t.match(/^(GET|POST|PUT|DELETE|PATCH)\s+(.+)$/i);
135
+ if (epMatch) {
136
+ const ep = d.api_registry?.[`${epMatch[1].toUpperCase()} ${epMatch[2]}`]
137
+ || Object.values(d.api_registry || {}).find((e) => e.method === epMatch[1].toUpperCase() && e.path === epMatch[2]);
138
+ if (!ep)
139
+ return compact({ err: "not found" });
140
+ const svcs = (ep.service_calls || []).filter((s) => !SKIP_SERVICES.has(s));
141
+ return compact({
142
+ ep: `${ep.method} ${ep.path}`, h: ep.handler, f: ep.file, m: ep.module,
143
+ req: ep.request_schema, res: ep.response_schema,
144
+ calls: svcs, ai: ep.ai_operations?.length || 0,
145
+ });
146
+ }
147
+ // Otherwise treat as file path
148
+ const file = t.replace(/^\.\//, "");
149
+ const mod = findModule(d, file);
150
+ const eps = findEndpointsInFile(d, file);
151
+ const models = findModelsInFile(d, file);
54
152
  const fileName = path.basename(file, path.extname(file));
55
- for (const [key, ep] of Object.entries(data.api_registry || {})) {
56
- const e = ep;
57
- if (e.service_calls?.some((s) => s.toLowerCase().includes(fileName.toLowerCase()))) {
58
- calledBy.push(`${e.method} ${e.path} (${e.handler})`);
153
+ const calledBy = [];
154
+ for (const ep of Object.values(d.api_registry || {})) {
155
+ if (ep.service_calls?.some((s) => s.toLowerCase().includes(fileName.toLowerCase()))) {
156
+ calledBy.push(`${ep.method} ${ep.path}`);
59
157
  }
60
158
  }
61
- // Find what this file's endpoints call
62
- const calls = endpoints.flatMap((ep) => (ep.service_calls || []).filter((s) => !["str", "dict", "int", "len", "float", "max", "join", "getattr"].includes(s)));
63
- // Find frontend pages that use APIs from this module
64
- const pages = (data.frontend_pages || []).filter((p) => p.api_calls?.some((call) => endpoints.some((ep) => call.includes(ep.path?.split("{")[0]))));
65
- return JSON.stringify({
66
- file,
67
- module: module ? { id: module.id, layer: module.layer, file_count: module.file_count, imports: module.imports } : null,
68
- endpoints_in_file: endpoints.map((ep) => `${ep.method} ${ep.path} → ${ep.handler}`),
69
- models_in_file: models.map((m) => `${m.name} (${m.framework}, ${m.fields?.length || 0} fields)`),
70
- calls_downstream: [...new Set(calls)],
71
- called_by_upstream: calledBy.slice(0, 10),
72
- frontend_pages_using: pages.map((p) => p.path),
73
- coupling: module?.coupling_score ?? null,
74
- }, null, 2);
159
+ const calls = eps.flatMap((ep) => (ep.service_calls || []).filter((s) => !SKIP_SERVICES.has(s)));
160
+ return compact({
161
+ f: file,
162
+ mod: mod ? [mod.id, mod.layer] : null,
163
+ ep: eps.map((e) => `${e.method} ${e.path}`),
164
+ models: models.map((m) => [m.name, m.fields?.length || 0]),
165
+ calls: [...new Set(calls)],
166
+ calledBy: calledBy.slice(0, 8),
167
+ });
168
+ }
169
+ async function impact(args) {
170
+ const d = await loadIntel();
171
+ const file = args.target.replace(/^\.\//, "");
172
+ const eps = findEndpointsInFile(d, file);
173
+ const models = findModelsInFile(d, file);
174
+ const modelNames = new Set(models.map((m) => m.name));
175
+ const affectedEps = Object.values(d.api_registry || {}).filter((ep) => (ep.request_schema && modelNames.has(ep.request_schema)) ||
176
+ (ep.response_schema && modelNames.has(ep.response_schema)));
177
+ const mod = findModule(d, file);
178
+ const depMods = mod ? (d.service_map || []).filter((m) => m.imports?.includes(mod.id)) : [];
179
+ const affectedPages = (d.frontend_pages || []).filter((p) => p.api_calls?.some((call) => eps.some((ep) => call.includes(ep.path?.split("{")[0]))));
180
+ const total = eps.length + affectedEps.length + depMods.length + affectedPages.length;
181
+ return compact({
182
+ f: file,
183
+ risk: total > 5 ? "HIGH" : total > 2 ? "MED" : "LOW",
184
+ ep: eps.map((e) => `${e.method} ${e.path}`),
185
+ models: models.map((m) => m.name),
186
+ affectedEp: affectedEps.map((e) => `${e.method} ${e.path}`),
187
+ depMods: depMods.map((m) => m.id),
188
+ pages: affectedPages.map((p) => p.path),
189
+ });
75
190
  }
76
191
  async function search(args) {
77
- const data = await loadIntel();
192
+ const d = await loadIntel();
78
193
  const q = args.query.toLowerCase();
79
- const types = (args.types || "models,endpoints,modules").split(",").map((t) => t.trim());
80
- const results = {};
81
- if (types.includes("endpoints")) {
82
- results.endpoints = Object.values(data.api_registry || {})
83
- .filter((ep) => ep.path?.toLowerCase().includes(q) ||
84
- ep.handler?.toLowerCase().includes(q) ||
85
- ep.service_calls?.some((s) => s.toLowerCase().includes(q)))
86
- .slice(0, 10)
87
- .map((ep) => `${ep.method} ${ep.path} → ${ep.handler} [${ep.module}]`);
88
- }
89
- if (types.includes("models")) {
90
- results.models = Object.values(data.model_registry || {})
91
- .filter((m) => m.name?.toLowerCase().includes(q) ||
92
- m.fields?.some((f) => f.toLowerCase().includes(q)))
93
- .slice(0, 10)
94
- .map((m) => `${m.name} (${m.framework}, ${m.fields?.length} fields, ${m.file})`);
95
- }
96
- if (types.includes("modules")) {
97
- results.modules = (data.service_map || [])
98
- .filter((m) => m.id?.toLowerCase().includes(q) ||
99
- m.path?.toLowerCase().includes(q))
100
- .slice(0, 10)
101
- .map((m) => `${m.id} (${m.type}, ${m.endpoint_count} eps, ${m.file_count} files, imports: ${m.imports?.join(",") || "none"})`);
102
- }
103
- return JSON.stringify(results, null, 2);
104
- }
105
- async function endpointTrace(args) {
106
- const data = await loadIntel();
107
- const key = `${args.method.toUpperCase()} ${args.path}`;
108
- const ep = data.api_registry?.[key] || Object.values(data.api_registry || {}).find((e) => e.method === args.method.toUpperCase() && e.path === args.path);
109
- if (!ep)
110
- return JSON.stringify({ error: `Endpoint ${key} not found` });
111
- // Find which frontend pages call this endpoint
112
- const frontendCallers = (data.frontend_pages || []).filter((p) => p.api_calls?.some((call) => call.includes(args.path.split("{")[0])));
113
- // Find what models this endpoint uses
114
- const models = Object.values(data.model_registry || {}).filter((m) => ep.request_schema === m.name || ep.response_schema === m.name);
115
- return JSON.stringify({
116
- endpoint: `${ep.method} ${ep.path}`,
117
- handler: ep.handler,
118
- file: ep.file,
119
- module: ep.module,
120
- request_schema: ep.request_schema,
121
- response_schema: ep.response_schema,
122
- service_calls: ep.service_calls,
123
- ai_operations: ep.ai_operations,
124
- patterns: ep.patterns,
125
- models_used: models.map((m) => ({ name: m.name, fields: m.fields })),
126
- frontend_callers: frontendCallers.map((p) => p.path),
127
- }, null, 2);
128
- }
129
- async function impactCheck(args) {
130
- const data = await loadIntel();
131
- const file = args.file.replace(/^\.\//, "");
132
- // Find all endpoints in this file
133
- const endpoints = Object.values(data.api_registry || {}).filter((ep) => ep.file && file.includes(ep.file.replace(/^\.\//, "")));
134
- // Find all models in this file
135
- const models = Object.values(data.model_registry || {}).filter((m) => m.file && file.includes(m.file.replace(/^\.\//, "")));
136
- // Find endpoints that USE these models
137
- const modelNames = new Set(models.map((m) => m.name));
138
- const affectedEndpoints = Object.values(data.api_registry || {}).filter((ep) => ep.request_schema && modelNames.has(ep.request_schema) ||
139
- ep.response_schema && modelNames.has(ep.response_schema));
140
- // Find modules that import from this file's module
141
- const fileModule = data.service_map?.find((m) => m.path && file.startsWith(m.path.replace(/^\.\//, "")));
142
- const dependentModules = fileModule
143
- ? (data.service_map || []).filter((m) => m.imports?.includes(fileModule.id))
144
- : [];
145
- // Find frontend pages affected
146
- const affectedPages = (data.frontend_pages || []).filter((p) => p.api_calls?.some((call) => endpoints.some((ep) => call.includes(ep.path?.split("{")[0]))));
147
- return JSON.stringify({
148
- file,
149
- direct_endpoints: endpoints.map((ep) => `${ep.method} ${ep.path}`),
150
- models_defined: models.map((m) => m.name),
151
- endpoints_using_these_models: affectedEndpoints.map((ep) => `${ep.method} ${ep.path}`),
152
- dependent_modules: dependentModules.map((m) => m.id),
153
- affected_frontend_pages: affectedPages.map((p) => p.path),
154
- risk: endpoints.length + affectedEndpoints.length + dependentModules.length > 5 ? "HIGH" : "LOW",
155
- }, null, 2);
194
+ const eps = Object.values(d.api_registry || {}).filter((ep) => ep.path?.toLowerCase().includes(q) || ep.handler?.toLowerCase().includes(q) ||
195
+ ep.service_calls?.some((s) => s.toLowerCase().includes(q))).slice(0, 8).map((ep) => `${ep.method} ${ep.path} [${ep.module}]`);
196
+ const models = Object.values(d.model_registry || {}).filter((m) => m.name?.toLowerCase().includes(q) || m.fields?.some((f) => f.toLowerCase().includes(q))).slice(0, 8).map((m) => `${m.name}:${m.fields?.length}f`);
197
+ const mods = (d.service_map || []).filter((m) => m.id?.toLowerCase().includes(q)).slice(0, 5).map((m) => `${m.id}:${m.endpoint_count}ep`);
198
+ return compact({ ep: eps, mod: models, m: mods });
156
199
  }
157
- async function overview() {
158
- const data = await loadIntel();
159
- return JSON.stringify({
160
- project: data.meta?.project,
161
- counts: data.meta?.counts,
162
- modules: (data.service_map || [])
163
- .filter((m) => m.file_count > 0)
164
- .map((m) => ({ id: m.id, type: m.type, layer: m.layer, endpoints: m.endpoint_count, files: m.file_count, imports: m.imports })),
165
- pages: (data.frontend_pages || []).map((p) => ({ route: p.path, component: p.component })),
166
- top_endpoints: Object.values(data.api_registry || {})
167
- .sort((a, b) => (b.service_calls?.length || 0) - (a.service_calls?.length || 0))
168
- .slice(0, 5)
169
- .map((ep) => `${ep.method} ${ep.path} (${ep.service_calls?.length || 0} service calls)`),
170
- }, null, 2);
200
+ async function model(args) {
201
+ const d = await loadIntel();
202
+ const m = d.model_registry?.[args.name];
203
+ if (!m)
204
+ return compact({ err: "not found" });
205
+ const usedBy = Object.values(d.api_registry || {}).filter((ep) => ep.request_schema === args.name || ep.response_schema === args.name).map((ep) => `${ep.method} ${ep.path}`);
206
+ return compact({
207
+ name: m.name, fw: m.framework, f: m.file,
208
+ fields: m.fields, rels: m.relationships,
209
+ usedBy,
210
+ });
171
211
  }
172
212
  // ── MCP protocol ──
173
213
  const TOOLS = [
174
214
  {
175
- name: "guardian_file_context",
176
- description: "Get upstream/downstream dependencies, endpoints, models, and coupling for a file. Call this BEFORE editing any file.",
215
+ name: "guardian_orient",
216
+ description: "Compact project summary. Call at session start. Returns: project name, counts, top modules, page routes.",
217
+ inputSchema: { type: "object", properties: {} },
218
+ },
219
+ {
220
+ name: "guardian_context",
221
+ description: "Get dependencies for a file or endpoint. Pass a file path (e.g. 'backend/service-conversation/engine.py') or an endpoint (e.g. 'POST /sessions/start').",
177
222
  inputSchema: {
178
223
  type: "object",
179
224
  properties: {
180
- file: { type: "string", description: "File path relative to project root (e.g. 'backend/service-conversation/engine.py')" },
225
+ target: { type: "string", description: "File path or 'METHOD /path' endpoint" },
181
226
  },
182
- required: ["file"],
227
+ required: ["target"],
183
228
  },
184
229
  },
185
230
  {
186
- name: "guardian_search",
187
- description: "Search the codebase for endpoints, models, or modules matching a keyword.",
231
+ name: "guardian_impact",
232
+ description: "What breaks if you change this file? Returns affected endpoints, models, modules, pages, and risk level.",
188
233
  inputSchema: {
189
234
  type: "object",
190
235
  properties: {
191
- query: { type: "string", description: "Search keyword (e.g. 'session', 'auth', 'TTS')" },
192
- types: { type: "string", description: "Comma-separated: models,endpoints,modules (default: all)" },
236
+ target: { type: "string", description: "File path to check" },
193
237
  },
194
- required: ["query"],
238
+ required: ["target"],
195
239
  },
196
240
  },
197
241
  {
198
- name: "guardian_endpoint_trace",
199
- description: "Trace an API endpoint's full chain: frontend callers, handler, service calls, models, AI operations.",
242
+ name: "guardian_search",
243
+ description: "Find endpoints, models, modules by keyword. Returns compact one-line results.",
200
244
  inputSchema: {
201
245
  type: "object",
202
246
  properties: {
203
- method: { type: "string", description: "HTTP method (GET, POST, PUT, DELETE)" },
204
- path: { type: "string", description: "Endpoint path (e.g. '/sessions/start')" },
247
+ query: { type: "string", description: "Search keyword" },
205
248
  },
206
- required: ["method", "path"],
249
+ required: ["query"],
207
250
  },
208
251
  },
209
252
  {
210
- name: "guardian_impact_check",
211
- description: "Check what endpoints, models, modules, and pages are affected if you change a file. Call this BEFORE making changes to high-coupling files.",
253
+ name: "guardian_model",
254
+ description: "Get full field list and usage for a specific model. Only call when you need field details.",
212
255
  inputSchema: {
213
256
  type: "object",
214
257
  properties: {
215
- file: { type: "string", description: "File path to check impact for" },
258
+ name: { type: "string", description: "Model name (e.g. 'StartSessionRequest')" },
216
259
  },
217
- required: ["file"],
260
+ required: ["name"],
218
261
  },
219
262
  },
220
263
  {
221
- name: "guardian_overview",
222
- description: "Get project summary: modules, pages, top endpoints, counts. Call this at session start for orientation.",
223
- inputSchema: {
224
- type: "object",
225
- properties: {},
226
- },
264
+ name: "guardian_metrics",
265
+ description: "MCP usage stats for this session. Call at end to evaluate guardian's usefulness.",
266
+ inputSchema: { type: "object", properties: {} },
227
267
  },
228
268
  ];
229
269
  const TOOL_HANDLERS = {
230
- guardian_file_context: fileContext,
270
+ guardian_orient: orient,
271
+ guardian_context: context,
272
+ guardian_impact: impact,
231
273
  guardian_search: search,
232
- guardian_endpoint_trace: endpointTrace,
233
- guardian_impact_check: impactCheck,
234
- guardian_overview: overview,
274
+ guardian_model: model,
275
+ guardian_metrics: async () => compact(metrics.summary()),
235
276
  };
236
277
  function respond(id, result) {
237
278
  const msg = JSON.stringify({ jsonrpc: "2.0", id, result });
@@ -268,7 +309,17 @@ async function handleRequest(req) {
268
309
  break;
269
310
  }
270
311
  try {
312
+ // Check cache first
313
+ const cacheKey = `${toolName}:${JSON.stringify(toolArgs)}`;
314
+ const cached = getCached(cacheKey);
315
+ if (cached) {
316
+ metrics.record(toolName, toolArgs, cached, true);
317
+ respond(req.id, { content: [{ type: "text", text: cached }] });
318
+ break;
319
+ }
271
320
  const result = await handler(toolArgs);
321
+ setCache(cacheKey, result);
322
+ metrics.record(toolName, toolArgs, result, false);
272
323
  respond(req.id, {
273
324
  content: [{ type: "text", text: result }],
274
325
  });
@@ -307,7 +358,18 @@ export async function runMcpServe(options) {
307
358
  respondError(null, -32700, `Parse error: ${err.message}`);
308
359
  }
309
360
  });
310
- rl.on("close", () => {
361
+ rl.on("close", async () => {
362
+ // Persist session metrics to .specs/machine/mcp-metrics.jsonl
363
+ const metricsPath = path.join(specsDir, "machine", "mcp-metrics.jsonl");
364
+ try {
365
+ const entry = JSON.stringify({
366
+ ...metrics.summary(),
367
+ session_end: new Date().toISOString(),
368
+ });
369
+ await fs.appendFile(metricsPath, entry + "\n", "utf8");
370
+ process.stderr.write(`Guardian metrics saved to ${metricsPath}\n`);
371
+ }
372
+ catch { }
311
373
  process.stderr.write("Guardian MCP server stopped.\n");
312
374
  process.exit(0);
313
375
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@toolbaux/guardian",
3
- "version": "0.1.10",
3
+ "version": "0.1.12",
4
4
  "type": "module",
5
5
  "description": "Architectural intelligence for codebases. Verify that AI-generated code matches your architectural intent.",
6
6
  "keywords": [