@radix-ai/ai-memory 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.claude-plugin/marketplace.json +19 -0
  2. package/.cursor-plugin/marketplace.json +19 -0
  3. package/LICENSE +21 -0
  4. package/README.md +331 -0
  5. package/dist/cli/adapters.d.ts +32 -0
  6. package/dist/cli/adapters.d.ts.map +1 -0
  7. package/dist/cli/adapters.js +368 -0
  8. package/dist/cli/adapters.js.map +1 -0
  9. package/dist/cli/environment.d.ts +34 -0
  10. package/dist/cli/environment.d.ts.map +1 -0
  11. package/dist/cli/environment.js +119 -0
  12. package/dist/cli/environment.js.map +1 -0
  13. package/dist/cli/index.d.ts +3 -0
  14. package/dist/cli/index.d.ts.map +1 -0
  15. package/dist/cli/index.js +1108 -0
  16. package/dist/cli/index.js.map +1 -0
  17. package/dist/docs-schema.d.ts +27 -0
  18. package/dist/docs-schema.d.ts.map +1 -0
  19. package/dist/docs-schema.js +100 -0
  20. package/dist/docs-schema.js.map +1 -0
  21. package/dist/evals/index.d.ts +13 -0
  22. package/dist/evals/index.d.ts.map +1 -0
  23. package/dist/evals/index.js +205 -0
  24. package/dist/evals/index.js.map +1 -0
  25. package/dist/evals/performance-comparison.d.ts +23 -0
  26. package/dist/evals/performance-comparison.d.ts.map +1 -0
  27. package/dist/evals/performance-comparison.js +76 -0
  28. package/dist/evals/performance-comparison.js.map +1 -0
  29. package/dist/evals/platform-integration.d.ts +34 -0
  30. package/dist/evals/platform-integration.d.ts.map +1 -0
  31. package/dist/evals/platform-integration.js +186 -0
  32. package/dist/evals/platform-integration.js.map +1 -0
  33. package/dist/formatter/index.d.ts +15 -0
  34. package/dist/formatter/index.d.ts.map +1 -0
  35. package/dist/formatter/index.js +207 -0
  36. package/dist/formatter/index.js.map +1 -0
  37. package/dist/hybrid-search/index.d.ts +40 -0
  38. package/dist/hybrid-search/index.d.ts.map +1 -0
  39. package/dist/hybrid-search/index.js +277 -0
  40. package/dist/hybrid-search/index.js.map +1 -0
  41. package/dist/index.d.ts +4 -0
  42. package/dist/index.d.ts.map +1 -0
  43. package/dist/index.js +5 -0
  44. package/dist/index.js.map +1 -0
  45. package/dist/mcp-server/index.d.ts +6 -0
  46. package/dist/mcp-server/index.d.ts.map +1 -0
  47. package/dist/mcp-server/index.js +114 -0
  48. package/dist/mcp-server/index.js.map +1 -0
  49. package/dist/mcp-server/p0-parser.d.ts +43 -0
  50. package/dist/mcp-server/p0-parser.d.ts.map +1 -0
  51. package/dist/mcp-server/p0-parser.js +108 -0
  52. package/dist/mcp-server/p0-parser.js.map +1 -0
  53. package/dist/mcp-server/resources.d.ts +3 -0
  54. package/dist/mcp-server/resources.d.ts.map +1 -0
  55. package/dist/mcp-server/resources.js +156 -0
  56. package/dist/mcp-server/resources.js.map +1 -0
  57. package/dist/mcp-server/tools.d.ts +15 -0
  58. package/dist/mcp-server/tools.d.ts.map +1 -0
  59. package/dist/mcp-server/tools.js +928 -0
  60. package/dist/mcp-server/tools.js.map +1 -0
  61. package/dist/schema-constants.d.ts +7 -0
  62. package/dist/schema-constants.d.ts.map +1 -0
  63. package/dist/schema-constants.js +33 -0
  64. package/dist/schema-constants.js.map +1 -0
  65. package/package.json +84 -0
  66. package/plugins/adapters/claude-code/.claude-plugin/plugin.json +26 -0
  67. package/plugins/adapters/claude-code/CLAUDE.md +21 -0
  68. package/plugins/adapters/claude-code/README.md +37 -0
  69. package/plugins/adapters/claude-code/agents/governance-checker.md +27 -0
  70. package/plugins/adapters/claude-code/agents/memory-writer.md +31 -0
  71. package/plugins/adapters/claude-code/hooks/SessionStart.js +55 -0
  72. package/plugins/adapters/claude-code/hooks/hooks.json +52 -0
  73. package/plugins/adapters/generic/BOOTSTRAP_INSTRUCTION.md +30 -0
  74. package/plugins/adapters/generic/README.md +50 -0
  75. package/plugins/ai-memory/.claude-plugin/plugin.json +30 -0
  76. package/plugins/ai-memory/.cursor-plugin/plugin.json +18 -0
  77. package/plugins/ai-memory/.mcp.json +19 -0
  78. package/plugins/ai-memory/agents/governance-critic/AGENT.md +57 -0
  79. package/plugins/ai-memory/agents/memory-auditor/AGENT.md +54 -0
  80. package/plugins/ai-memory/rules/context7-tool-reference.md +16 -0
  81. package/plugins/ai-memory/rules/load-memory.md +22 -0
  82. package/plugins/ai-memory/rules/parallel-safe-planning.md +19 -0
  83. package/plugins/ai-memory/skills/mem-auto-review/SKILL.md +51 -0
  84. package/plugins/ai-memory/skills/mem-compound/SKILL.md +105 -0
  85. package/plugins/ai-memory/skills/mem-init/SKILL.md +48 -0
  86. package/plugins/ai-memory/skills/mem-session-close/SKILL.md +38 -0
  87. package/plugins/ai-memory/skills/mem-validate/SKILL.md +54 -0
  88. package/templates/.ai/IDENTITY.md +23 -0
  89. package/templates/.ai/agents/_base-auditor.md +28 -0
  90. package/templates/.ai/agents/_template.md +23 -0
  91. package/templates/.ai/memory/debugging.md +14 -0
  92. package/templates/.ai/memory/decisions.md +8 -0
  93. package/templates/.ai/memory/improvements.md +7 -0
  94. package/templates/.ai/memory/memory-index.md +9 -0
  95. package/templates/.ai/memory/patterns.md +8 -0
  96. package/templates/.ai/reference/PROJECT.md +5 -0
  97. package/templates/.ai/reference/capability-specs.json +31 -0
  98. package/templates/.ai/reference/environment-specs.json +41 -0
  99. package/templates/.ai/sessions/archive/thread-archive.md +15 -0
  100. package/templates/.ai/sessions/open-items.md +13 -0
  101. package/templates/.ai/toolbox/README.md +5 -0
  102. package/templates/.ai/toolbox/browser.md +30 -0
  103. package/templates/.ai/toolbox/integrations.md +44 -0
  104. package/templates/.ai/toolbox/shell.md +38 -0
  105. package/templates/AGENTS.md +4 -0
@@ -0,0 +1,928 @@
1
+ import { readFile, writeFile, readdir, mkdir, unlink, open } from "fs/promises";
2
+ import { join, dirname, resolve, relative } from "path";
3
+ import { existsSync } from "fs";
4
+ import { execFileSync } from "child_process";
5
+ import { minimatch } from "minimatch";
6
+ import matter from "gray-matter";
7
+ import { ListToolsRequestSchema, CallToolRequestSchema, ErrorCode, McpError, } from "@modelcontextprotocol/sdk/types.js";
8
+ import { readP0Entries, compileHarnessRules, generateRuleTests, } from "./p0-parser.js";
9
+ import { VALID_TYPES, VALID_STATUSES } from "../schema-constants.js";
10
+ import { loadDocsSchema, getDocPath, validateDocPlacement, listDocTypes, } from "../docs-schema.js";
11
+ import { hybridSearch, getSearchMode, } from "../hybrid-search/index.js";
12
+ // Paths that are ALWAYS immutable (structural, not content)
13
+ const ALWAYS_IMMUTABLE = ["toolbox/", "acp/", "rules/"];
14
+ // Paths whose immutability is controlled by frontmatter `writable` field
15
+ // IDENTITY.md: writable defaults to false (immutable unless opted in)
16
+ // PROJECT_STATUS.md: writable defaults to true (the AI's evolving program)
17
+ const FRONTMATTER_CONTROLLED = ["IDENTITY.md", "PROJECT_STATUS.md"];
18
+ const WRITABLE_DEFAULTS = {
19
+ "IDENTITY.md": false,
20
+ "PROJECT_STATUS.md": true,
21
+ };
22
+ async function isImmutable(path, aiDir) {
23
+ // Structural paths are always immutable
24
+ if (ALWAYS_IMMUTABLE.some((p) => path === p || path.startsWith(p))) {
25
+ return true;
26
+ }
27
+ // Frontmatter-controlled files
28
+ for (const controlled of FRONTMATTER_CONTROLLED) {
29
+ if (path === controlled) {
30
+ const fullPath = join(aiDir, controlled);
31
+ try {
32
+ const content = await readFile(fullPath, "utf-8");
33
+ const { data } = matter(content);
34
+ // If frontmatter has `writable`, use that; otherwise use default
35
+ if (typeof data.writable === "boolean")
36
+ return !data.writable;
37
+ return !WRITABLE_DEFAULTS[controlled];
38
+ }
39
+ catch {
40
+ return !WRITABLE_DEFAULTS[controlled];
41
+ }
42
+ }
43
+ }
44
+ return false;
45
+ }
46
+ // ─── Session-aware writes ─────────────────────────────────────────────────────
47
+ // Returns git repo root (absolute path) or null if not in a git repo
48
+ function getRepoRoot(cwd) {
49
+ try {
50
+ const out = execFileSync("git", ["rev-parse", "--show-toplevel"], {
51
+ cwd,
52
+ encoding: "utf-8",
53
+ timeout: 5000,
54
+ });
55
+ return out.trim() || null;
56
+ }
57
+ catch {
58
+ return null;
59
+ }
60
+ }
61
+ // Shared path validation: ensures a relative path stays within aiDir
62
+ function assertPathWithinAiDir(aiDir, relPath) {
63
+ const fullPath = resolve(aiDir, relPath);
64
+ const rel = relative(aiDir, fullPath);
65
+ if (rel.startsWith("..") || rel.startsWith("/") || /\.\.[\\/]/.test(rel)) {
66
+ throw new McpError(ErrorCode.InvalidRequest, "Path traversal not allowed.");
67
+ }
68
+ return fullPath;
69
+ }
70
+ function generateSessionId() {
71
+ return `s-${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 6)}`;
72
+ }
73
+ // Claim system: prevents concurrent writes to the same path
74
+ const CLAIM_TTL_MS = 5 * 60 * 1000; // 5 minutes
75
+ async function acquireClaim(aiDir, path, sessionId) {
76
+ const locksDir = join(aiDir, "temp", "locks");
77
+ await mkdir(locksDir, { recursive: true });
78
+ const lockFile = join(locksDir, path.replace(/[/\\]/g, "_") + ".lock");
79
+ // Check for existing valid claim
80
+ if (existsSync(lockFile)) {
81
+ try {
82
+ const existing = JSON.parse(await readFile(lockFile, "utf-8"));
83
+ const age = Date.now() - existing.timestamp;
84
+ if (age < CLAIM_TTL_MS && existing.session_id !== sessionId) {
85
+ throw new McpError(ErrorCode.InvalidRequest, `Path "${path}" is claimed by another session (${existing.session_id}, ${Math.round(age / 1000)}s ago). ` +
86
+ `Wait for the claim to expire (${Math.round(CLAIM_TTL_MS / 1000)}s TTL) or close the other session.`);
87
+ }
88
+ // Expired or same session — remove stale lock before re-creating
89
+ await unlink(lockFile).catch(() => { });
90
+ }
91
+ catch (err) {
92
+ if (err instanceof McpError)
93
+ throw err;
94
+ // Corrupt lock file — remove and re-create
95
+ await unlink(lockFile).catch(() => { });
96
+ }
97
+ }
98
+ // Atomic file creation with O_EXCL — prevents TOCTOU race
99
+ const claim = { session_id: sessionId, timestamp: Date.now(), pid: process.pid };
100
+ const data = JSON.stringify(claim);
101
+ try {
102
+ const fd = await open(lockFile, "wx"); // O_WRONLY | O_CREAT | O_EXCL
103
+ await fd.writeFile(data);
104
+ await fd.close();
105
+ }
106
+ catch (err) {
107
+ if (err.code === "EEXIST") {
108
+ // Another process created the lock between our check and creation
109
+ throw new McpError(ErrorCode.InvalidRequest, `Path "${path}" was just claimed by another session. Retry shortly.`);
110
+ }
111
+ throw err;
112
+ }
113
+ }
114
+ async function releaseClaim(aiDir, path) {
115
+ const lockFile = join(aiDir, "temp", "locks", path.replace(/[/\\]/g, "_") + ".lock");
116
+ try {
117
+ await unlink(lockFile);
118
+ }
119
+ catch { /* already gone */ }
120
+ }
121
+ // Parse diff into sections with file path (from b/ side) and content
122
+ function parseDiffSections(diff) {
123
+ const sections = [];
124
+ const parts = diff.split(/^diff --git /m).slice(1);
125
+ for (const part of parts) {
126
+ const fileMatch = part.match(/^[^\n]*b\/(.+)$/m);
127
+ if (!fileMatch)
128
+ continue;
129
+ const path = fileMatch[1].trim();
130
+ const addedLines = part
131
+ .split("\n")
132
+ .filter((l) => l.startsWith("+") && !l.startsWith("+++"))
133
+ .map((l) => l.slice(1))
134
+ .join("\n");
135
+ const deletedLines = part
136
+ .split("\n")
137
+ .filter((l) => l.startsWith("-") && !l.startsWith("---"))
138
+ .map((l) => l.slice(1))
139
+ .join("\n");
140
+ sections.push({ path, addedLines, deletedLines });
141
+ }
142
+ return sections;
143
+ }
144
+ // Validate a git diff against harness rules using ast-grep and regex.
145
+ // repoRoot: git root for path resolution; falls back to aiDir parent if null.
146
+ // Rules are applied ONLY to diff files matching rule.path (minimatch). Default path: "**/*".
147
+ async function validateDiff(diff, rules, aiDir, repoRoot) {
148
+ const sections = parseDiffSections(diff);
149
+ const violations = [];
150
+ const audit = [];
151
+ for (const rule of rules) {
152
+ const pathGlob = rule.path?.trim() || "**/*";
153
+ const matchingSections = sections.filter((s) => minimatch(s.path, pathGlob));
154
+ if (rule.type === "regex") {
155
+ const scope = rule.scope ?? "additions";
156
+ let content = "";
157
+ if (scope === "additions") {
158
+ content = matchingSections.map((s) => s.addedLines).join("\n");
159
+ }
160
+ else if (scope === "deletions") {
161
+ content = matchingSections.map((s) => s.deletedLines).join("\n");
162
+ }
163
+ else {
164
+ content = matchingSections.map((s) => s.addedLines + "\n" + s.deletedLines).join("\n");
165
+ }
166
+ if (matchingSections.length === 0) {
167
+ audit.push({ rule_id: rule.id, path: pathGlob, status: "skipped" });
168
+ continue;
169
+ }
170
+ let regex;
171
+ try {
172
+ regex = new RegExp(rule.pattern, "gm");
173
+ }
174
+ catch {
175
+ violations.push({ rule_id: rule.id, message: `Invalid regex pattern: ${rule.pattern}`, severity: rule.severity });
176
+ audit.push({ rule_id: rule.id, path: pathGlob, status: "failed" });
177
+ continue;
178
+ }
179
+ const triggered = content.trim() && regex.test(content);
180
+ if (triggered) {
181
+ violations.push({ rule_id: rule.id, message: rule.message, severity: rule.severity });
182
+ audit.push({ rule_id: rule.id, path: pathGlob, status: "failed" });
183
+ }
184
+ else {
185
+ audit.push({ rule_id: rule.id, path: pathGlob, status: "passed" });
186
+ }
187
+ }
188
+ else if (rule.type === "ast") {
189
+ if (matchingSections.length === 0) {
190
+ audit.push({ rule_id: rule.id, path: pathGlob, status: "skipped" });
191
+ continue;
192
+ }
193
+ try {
194
+ const { Lang, parse } = await import("@ast-grep/napi");
195
+ const langKey = (rule.language ?? "typescript").toLowerCase();
196
+ const langMap = {
197
+ typescript: Lang.TypeScript,
198
+ javascript: Lang.JavaScript,
199
+ python: Lang.Python,
200
+ go: Lang.Go,
201
+ rust: Lang.Rust,
202
+ };
203
+ const lang = langMap[langKey] ?? Lang.TypeScript;
204
+ const matcher = rule.where && Object.keys(rule.where).length > 0
205
+ ? { rule: { pattern: rule.pattern }, constraints: rule.where }
206
+ : rule.pattern;
207
+ let found = false;
208
+ for (const section of matchingSections) {
209
+ const addedCode = section.addedLines;
210
+ if (!addedCode.trim())
211
+ continue;
212
+ const tree = parse(lang, addedCode);
213
+ const sgRoot = tree.root();
214
+ const matches = typeof matcher === "string"
215
+ ? sgRoot.findAll(matcher)
216
+ : sgRoot.findAll(matcher);
217
+ if (matches.length > 0) {
218
+ found = true;
219
+ violations.push({ rule_id: rule.id, message: rule.message, severity: rule.severity });
220
+ audit.push({ rule_id: rule.id, path: pathGlob, status: "failed" });
221
+ break;
222
+ }
223
+ }
224
+ if (!found) {
225
+ audit.push({ rule_id: rule.id, path: pathGlob, status: "passed" });
226
+ }
227
+ }
228
+ catch {
229
+ violations.push({
230
+ rule_id: rule.id,
231
+ message: `${rule.message} (ast-grep unavailable — manual review required)`,
232
+ severity: rule.severity,
233
+ });
234
+ audit.push({ rule_id: rule.id, path: pathGlob, status: "failed" });
235
+ }
236
+ }
237
+ }
238
+ return { violations, audit };
239
+ }
240
+ // Validate memory entry frontmatter against canonical schema
241
+ function validateEntrySchema(entry) {
242
+ const errors = [];
243
+ const required = ["id", "type", "status"];
244
+ for (const field of required) {
245
+ if (!entry[field])
246
+ errors.push(`Missing required field: ${field}`);
247
+ }
248
+ if (entry.type && !VALID_TYPES.includes(entry.type)) {
249
+ errors.push(`Invalid type: ${entry.type}. Must be one of: ${VALID_TYPES.join(", ")}`);
250
+ }
251
+ if (entry.status && !VALID_STATUSES.includes(entry.status)) {
252
+ errors.push(`Invalid status: ${entry.status}. Must be one of: ${VALID_STATUSES.join(", ")}`);
253
+ }
254
+ return errors;
255
+ }
256
+ export function registerTools(server, aiDir) {
257
+ server.setRequestHandler(ListToolsRequestSchema, async () => ({
258
+ tools: [
259
+ {
260
+ name: "search_memory",
261
+ description: "Searches across .ai/ memory files (memory/, sessions/, agents/, skills/) and returns ranked results with excerpts. " +
262
+ "Uses hybrid search (keyword + semantic + RRF) by default. AI_SEARCH=keyword|semantic|hybrid. " +
263
+ "On Windows, onnxruntime-node may fail; set AI_SEARCH=keyword for keyword-only, or AI_SEARCH_WASM=1 to try WASM. " +
264
+ "Semantic/hybrid requires Linux or macOS for native; Windows uses keyword-only or WASM. " +
265
+ "Each result includes: file path (relative to .ai/), excerpt, and score. " +
266
+ "For best results: use specific terms from the task (e.g. 'MCP launcher Windows', 'claim locking'); optionally filter by tags if the query mentions them.",
267
+ inputSchema: {
268
+ type: "object",
269
+ properties: {
270
+ query: { type: "string", description: "Search query — use specific terms relevant to the task" },
271
+ tags: { type: "array", items: { type: "string" }, description: "Filter by tags (optional). Only files containing all tags are returned." },
272
+ limit: { type: "number", description: "Max results to return (default 10, max 20). Use lower values for focused queries." },
273
+ include_deprecated: { type: "boolean", description: "Include [DEPRECATED] entries in results (default false). Set true for auditing or history review." },
274
+ },
275
+ required: ["query"],
276
+ },
277
+ },
278
+ {
279
+ name: "get_repo_root",
280
+ description: "Returns the git repository root (absolute path) via `git rev-parse --show-toplevel`. " +
281
+ "Use to resolve paths for validate_context, path traversal checks, or when the agent runs from a subdirectory. " +
282
+ "Returns null if not in a git repo.",
283
+ inputSchema: { type: "object", properties: {} },
284
+ },
285
+ {
286
+ name: "validate_context",
287
+ description: "Validates a git diff against active [P0] constraint rules. Returns violations as errors — hard blocks if any P0 rule is triggered. " +
288
+ "You MUST run generate_harness first (or use init --full) to create .ai/temp/harness.json. " +
289
+ "Each violation includes: rule_id, message, severity (P0/P1/P2). P0 violations cause a hard block; P1/P2 return warnings. " +
290
+ "For best results: pass the full output of `git diff` (staged or unstaged) before committing.",
291
+ inputSchema: {
292
+ type: "object",
293
+ properties: {
294
+ git_diff: { type: "string", description: "Output of git diff (e.g. git diff or git diff --cached)" },
295
+ },
296
+ required: ["git_diff"],
297
+ },
298
+ },
299
+ {
300
+ name: "validate_schema",
301
+ description: "Validates a proposed memory entry's frontmatter against the canonical schema. Returns validation errors. " +
302
+ "Required fields: id, type, status. Valid types: identity, project-status, decision, pattern, debugging, improvement, index, session, reference, agent, skill, rule, acp, toolbox, docs-schema. Valid statuses: active, deprecated, experimental. " +
303
+ "Use before commit_memory when constructing entries programmatically to catch schema errors early.",
304
+ inputSchema: {
305
+ type: "object",
306
+ properties: {
307
+ entry: { type: "object", description: "Memory entry frontmatter fields to validate (id, type, status, etc.)" },
308
+ },
309
+ required: ["entry"],
310
+ },
311
+ },
312
+ {
313
+ name: "commit_memory",
314
+ description: "Writes a memory entry to .ai/. Enforces immutability: IDENTITY.md is immutable by default; PROJECT_STATUS.md is writable by default (configurable via frontmatter `writable`). Hard-blocks writes to toolbox/, acp/, rules/. Uses claim-based locking for multi-agent safety — if another session holds a claim on the path, wait for the 5-minute TTL or close the other session. " +
315
+ "Each write appends a session header for traceability. Use append: false to overwrite (creates new file or replaces). " +
316
+ "For best results: use validate_schema first when constructing entries; pass session_id when coordinating with claim_task or publish_result. When work is done: break down into atomic tasks that fit RALPH loops and avoid conflicts when agents work in parallel.",
317
+ inputSchema: {
318
+ type: "object",
319
+ properties: {
320
+ path: { type: "string", description: "Relative path within .ai/ (e.g. memory/decisions.md, memory/patterns.md)" },
321
+ content: { type: "string", description: "Content to append or write (include frontmatter for memory entries)" },
322
+ append: { type: "boolean", description: "Append to existing file (true) or overwrite (false). Default: true." },
323
+ session_id: { type: "string", description: "Optional session identifier for multi-agent tracking. Auto-generated if not provided." },
324
+ },
325
+ required: ["path", "content"],
326
+ },
327
+ },
328
+ {
329
+ name: "generate_harness",
330
+ description: "Compiles harness.json from current [P0] entries in memory/decisions.md. Writes .ai/temp/harness.json and rule tests to .ai/temp/rule-tests/tests.json. " +
331
+ "Required before validate_context. Run after adding or changing [P0] entries to refresh the rule set.",
332
+ inputSchema: { type: "object", properties: {} },
333
+ },
334
+ {
335
+ name: "get_open_items",
336
+ description: "Returns the current open and closed items from sessions/open-items.md. " +
337
+ "Use at session start to see pending tasks, or before claim_task to avoid duplicate work.",
338
+ inputSchema: { type: "object", properties: {} },
339
+ },
340
+ {
341
+ name: "get_memory",
342
+ description: "Returns a summary of memory for a specific topic. Searches .ai/ and returns top 5 matches with file path and excerpt. " +
343
+ "Use for quick lookups when you need a focused answer (e.g. 'authentication', 'MCP config'). " +
344
+ "For broader exploration, use search_memory instead.",
345
+ inputSchema: {
346
+ type: "object",
347
+ properties: {
348
+ topic: { type: "string", description: "Topic to summarize (e.g. 'authentication', 'database patterns', 'project status')" },
349
+ },
350
+ required: ["topic"],
351
+ },
352
+ },
353
+ {
354
+ name: "prune_memory",
355
+ description: "Identifies stale or deprecated memory entries for archiving. Scans memory/*.md for [DEPRECATED] entries. " +
356
+ "Returns a list of candidates with file and entry count. Use dry_run: true (default) to report without modifying; dry_run: false to flag for manual archiving. " +
357
+ "For best results: run periodically to keep memory lean; review candidates before archiving.",
358
+ inputSchema: {
359
+ type: "object",
360
+ properties: {
361
+ dry_run: { type: "boolean", description: "If true, report candidates without modifying files. Default: true." },
362
+ },
363
+ },
364
+ },
365
+ {
366
+ name: "get_evals",
367
+ description: "Returns the latest eval report from .ai/temp/eval-report.json. " +
368
+ "Use to check memory health and governance metrics. Run `ai-memory eval` to generate the report.",
369
+ inputSchema: { type: "object", properties: {} },
370
+ },
371
+ // ─── Autoresearch collaboration tools ────────────────────────────────
372
+ {
373
+ name: "claim_task",
374
+ description: "Claims a task before starting work. Searches the task source file (open-items.md by default, or a plan file, PROJECT_STATUS.md) for a matching unclaimed item and marks it [CLAIMED:session_id]. Prevents duplicate work across concurrent agents. Claims expire after 5 minutes. " +
375
+ "If no match is found, creates a new claimed task in open-items.md. " +
376
+ "For best results: use task_description that matches the wording in the source (e.g. 'Add Context7 MCP'); specify source when the task lives in a plan file or PROJECT_STATUS.md.",
377
+ inputSchema: {
378
+ type: "object",
379
+ properties: {
380
+ task_description: { type: "string", description: "Description of the task to claim (match wording in source for best match)" },
381
+ source: { type: "string", description: "Relative path to task source within .ai/ (default: sessions/open-items.md). Can be a plan file, PROJECT_STATUS.md, etc." },
382
+ session_id: { type: "string", description: "Session identifier. Auto-generated if not provided. Use same ID for publish_result to link result to claim." },
383
+ },
384
+ required: ["task_description"],
385
+ },
386
+ },
387
+ {
388
+ name: "publish_result",
389
+ description: "Publishes an experiment or task result (success, failure, or partial) to sessions/archive/thread-archive.md. Every result is recorded for collective learning. " +
390
+ "If the task was claimed via claim_task with the same session_id, marks the task complete (success) or reopened (failure) in open-items.md. " +
391
+ "Each entry includes: date, outcome icon, summary, learnings (optional), session_id. " +
392
+ "For best results: include learnings (patterns, anti-patterns, decisions) to enrich the archive.",
393
+ inputSchema: {
394
+ type: "object",
395
+ properties: {
396
+ summary: { type: "string", description: "What was attempted and what happened" },
397
+ outcome: { type: "string", enum: ["success", "failure", "partial"], description: "Outcome: success, failure, or partial" },
398
+ learnings: { type: "string", description: "What was learned (patterns, anti-patterns, decisions). Optional but recommended." },
399
+ session_id: { type: "string", description: "Session identifier. Auto-generated if not provided. Use same ID as claim_task to link." },
400
+ },
401
+ required: ["summary", "outcome"],
402
+ },
403
+ },
404
+ {
405
+ name: "sync_memory",
406
+ description: "Persists all .ai/ changes to git. Stages .ai/, commits with a message, and optionally pushes. Essential for ephemeral environments (worktrees, cloud agents, sandbox). " +
407
+ "Requires a git repository. Returns the commit message and list of files committed. " +
408
+ "For best results: run after commit_memory or other .ai/ writes; use push: true when in a cloud agent or worktree to persist to remote.",
409
+ inputSchema: {
410
+ type: "object",
411
+ properties: {
412
+ message: { type: "string", description: "Commit message. Auto-generated if not provided." },
413
+ push: { type: "boolean", description: "Push to remote after commit. Default: false." },
414
+ },
415
+ },
416
+ },
417
+ // ─── Documentation management ───────────────────────────────────────────
418
+ {
419
+ name: "get_doc_path",
420
+ description: "Returns the canonical path for a documentation type from .ai/docs-schema.json. Use before creating or updating docs — do not infer paths. " +
421
+ "Types: design-system, adr, api-spec, api-guide, model-card, prompts, backlog, decisions-archive, changelog. " +
422
+ "Returns null if schema missing or type unknown. Pass slug for types with * in pattern (e.g. design-system slug=<Project>).",
423
+ inputSchema: {
424
+ type: "object",
425
+ properties: {
426
+ type: { type: "string", description: "Doc type (e.g. design-system, backlog, changelog)" },
427
+ slug: { type: "string", description: "Optional slug for parameterized types (e.g. project name for design-system)" },
428
+ },
429
+ required: ["type"],
430
+ },
431
+ },
432
+ {
433
+ name: "validate_doc_placement",
434
+ description: "Validates a file path against .ai/docs-schema.json. Checks naming convention (SCREAMING_SNAKE by default) and path. " +
435
+ "Returns valid: boolean and errors: string[]. Use before writing docs; run in background during compound. " +
436
+ "If schema missing, returns valid: true.",
437
+ inputSchema: {
438
+ type: "object",
439
+ properties: {
440
+ path: { type: "string", description: "Relative path to validate (e.g. docs/BACKLOG.md)" },
441
+ paths: { type: "array", items: { type: "string" }, description: "Multiple paths to validate" },
442
+ },
443
+ },
444
+ },
445
+ {
446
+ name: "list_doc_types",
447
+ description: "Lists all doc types from .ai/docs-schema.json with their path and pattern. " +
448
+ "Use to discover available types before get_doc_path. Returns empty if schema missing.",
449
+ inputSchema: { type: "object", properties: {} },
450
+ },
451
+ ],
452
+ }));
453
+ server.setRequestHandler(CallToolRequestSchema, async (request) => {
454
+ const { name, arguments: args = {} } = request.params;
455
+ switch (name) {
456
+ case "search_memory": {
457
+ const query = args.query;
458
+ if (typeof query !== "string" || !query.trim()) {
459
+ throw new McpError(ErrorCode.InvalidParams, "query is required and must be a non-empty string.");
460
+ }
461
+ const tags = args.tags;
462
+ const userLimit = Math.min(Number(args.limit) || 10, 20);
463
+ const includeDeprecated = args.include_deprecated ?? false;
464
+ const mode = getSearchMode();
465
+ let resp;
466
+ let fallbackNote = "";
467
+ try {
468
+ resp = await hybridSearch(aiDir, query, { mode, limit: userLimit, tags, includeDeprecated });
469
+ }
470
+ catch (err) {
471
+ if (mode !== "keyword") {
472
+ resp = await hybridSearch(aiDir, query, { mode: "keyword", limit: userLimit, tags, includeDeprecated });
473
+ fallbackNote =
474
+ "Note: Hybrid/semantic search failed (e.g. onnxruntime-node missing on Windows). Using keyword-only. Set AI_SEARCH=keyword to skip, or AI_SEARCH_WASM=1 to try WASM.\n\n";
475
+ }
476
+ else {
477
+ throw err;
478
+ }
479
+ }
480
+ const { results, backend } = resp;
481
+ if (results.length === 0) {
482
+ return { content: [{ type: "text", text: "No results found." }] };
483
+ }
484
+ const backendLabel = backend === "keyword" ? "Keyword-only" : backend === "native" ? "Hybrid (Native)" : "Hybrid (WASM)";
485
+ const text = fallbackNote + `Search backend: ${backendLabel}\n\n` +
486
+ results
487
+ .map((r, i) => {
488
+ const excerpt = r.excerpt.length > 200 ? r.excerpt.slice(0, 200) + "…" : r.excerpt;
489
+ return `${i + 1}. **${r.file}** (score: ${r.score})\n ${excerpt}`;
490
+ })
491
+ .join("\n\n");
492
+ return { content: [{ type: "text", text }] };
493
+ }
494
+ case "get_repo_root": {
495
+ const cwd = resolve(aiDir, "..");
496
+ const root = getRepoRoot(cwd);
497
+ return {
498
+ content: [{ type: "text", text: root ?? "null (not a git repository)" }],
499
+ };
500
+ }
501
+ case "validate_context": {
502
+ const gitDiff = args.git_diff;
503
+ if (typeof gitDiff !== "string" || !gitDiff.trim()) {
504
+ throw new McpError(ErrorCode.InvalidParams, "git_diff is required and must be a non-empty string.");
505
+ }
506
+ const harnessPath = join(aiDir, "temp/harness.json");
507
+ if (!existsSync(harnessPath)) {
508
+ return {
509
+ content: [{ type: "text", text: "No harness.json found. Run generate_harness to create one, or initialize with --full." }],
510
+ };
511
+ }
512
+ let rules;
513
+ try {
514
+ const harnessRaw = await readFile(harnessPath, "utf-8");
515
+ rules = JSON.parse(harnessRaw);
516
+ }
517
+ catch {
518
+ throw new McpError(ErrorCode.InternalError, "Failed to parse harness.json. Run `ai-memory generate-harness` to regenerate.");
519
+ }
520
+ const repoRoot = getRepoRoot(resolve(aiDir, ".."));
521
+ const { violations, audit } = await validateDiff(gitDiff, rules, aiDir, repoRoot);
522
+ const timestamp = new Date().toISOString();
523
+ const harnessVersion = "1.0";
524
+ if (violations.length === 0) {
525
+ const auditLines = audit.map((a) => {
526
+ const icon = a.status === "passed" ? "✓" : a.status === "skipped" ? "○" : "✗";
527
+ return ` ${icon} [P0] ${a.rule_id} (${a.path}) — ${a.status}`;
528
+ });
529
+ const cert = [
530
+ "═══ Stability Certificate ═══",
531
+ `Status: PASSED`,
532
+ `Harness: ${harnessVersion} | ${timestamp}`,
533
+ `repo_root: ${repoRoot ?? "null"}`,
534
+ "",
535
+ "Audit log:",
536
+ ...auditLines,
537
+ "",
538
+ "Stability Surface is 100% compliant with active [P0] constraints.",
539
+ ].join("\n");
540
+ return { content: [{ type: "text", text: cert }] };
541
+ }
542
+ // Hard error on P0 violations
543
+ const p0Violations = violations.filter((v) => v.severity === "P0");
544
+ if (p0Violations.length > 0) {
545
+ const report = [
546
+ "═══ Constraint Violation Report ═══",
547
+ `Status: FAILED`,
548
+ `Harness: ${harnessVersion} | ${timestamp}`,
549
+ `repo_root: ${repoRoot ?? "null"}`,
550
+ "",
551
+ `[HARD BLOCK] ${p0Violations.length} P0 constraint violation(s):`,
552
+ "",
553
+ ...p0Violations.map((v) => `• ${v.rule_id}: ${v.message}`),
554
+ ].join("\n");
555
+ throw new McpError(ErrorCode.InvalidRequest, report);
556
+ }
557
+ const text = violations
558
+ .map((v) => `• [${v.severity}] ${v.rule_id}: ${v.message}`)
559
+ .join("\n");
560
+ return { content: [{ type: "text", text: `Constraint warnings:\n\n${text}` }], isError: true };
561
+ }
562
+ case "validate_schema": {
563
+ const entry = args.entry;
564
+ if (!entry || typeof entry !== "object") {
565
+ throw new McpError(ErrorCode.InvalidParams, "entry is required and must be an object.");
566
+ }
567
+ const errors = validateEntrySchema(entry);
568
+ if (errors.length === 0) {
569
+ return { content: [{ type: "text", text: "✓ Schema valid." }] };
570
+ }
571
+ throw new McpError(ErrorCode.InvalidParams, `Schema validation failed:\n\n${errors.map((e) => `• ${e}`).join("\n")}`);
572
+ }
573
+ case "commit_memory": {
574
+ const memPath = args.path;
575
+ const memContent = args.content;
576
+ if (typeof memPath !== "string" || !memPath.trim()) {
577
+ throw new McpError(ErrorCode.InvalidParams, "path is required and must be a non-empty string.");
578
+ }
579
+ if (typeof memContent !== "string") {
580
+ throw new McpError(ErrorCode.InvalidParams, "content is required and must be a string.");
581
+ }
582
+ const append = args.append ?? true;
583
+ const sessionId = (typeof args.session_id === "string" && args.session_id) || generateSessionId();
584
+ // Check immutability (reads frontmatter for IDENTITY.md/PROJECT_STATUS.md)
585
+ if (await isImmutable(memPath, aiDir)) {
586
+ const reason = FRONTMATTER_CONTROLLED.includes(memPath)
587
+ ? `${memPath} is immutable (set \`writable: true\` in its frontmatter to allow AI writes).`
588
+ : `${memPath} is in a structurally immutable path (${ALWAYS_IMMUTABLE.join(", ")}).`;
589
+ throw new McpError(ErrorCode.InvalidRequest, reason);
590
+ }
591
+ const fullPath = assertPathWithinAiDir(aiDir, memPath);
592
+ // Claim-based locking for multi-agent safety
593
+ await acquireClaim(aiDir, memPath, sessionId);
594
+ try {
595
+ await mkdir(dirname(fullPath), { recursive: true });
596
+ // Session-attributed write header for traceability
597
+ const header = `<!-- session:${sessionId} at:${new Date().toISOString()} -->`;
598
+ if (append && existsSync(fullPath)) {
599
+ const existing = await readFile(fullPath, "utf-8");
600
+ await writeFile(fullPath, existing.trimEnd() + "\n\n" + header + "\n" + memContent);
601
+ }
602
+ else {
603
+ await writeFile(fullPath, header + "\n" + memContent);
604
+ }
605
+ }
606
+ finally {
607
+ await releaseClaim(aiDir, memPath);
608
+ }
609
+ return { content: [{ type: "text", text: `✓ Written to ${memPath} (session: ${sessionId})` }] };
610
+ }
611
+ case "generate_harness": {
612
+ const entries = await readP0Entries(aiDir);
613
+ const rules = compileHarnessRules(entries);
614
+ const tests = generateRuleTests(entries);
615
+ const tempDir = join(aiDir, "temp");
616
+ await mkdir(tempDir, { recursive: true });
617
+ await writeFile(join(tempDir, "harness.json"), JSON.stringify(rules, null, 2));
618
+ if (tests.length > 0) {
619
+ const testsDir = join(tempDir, "rule-tests");
620
+ await mkdir(testsDir, { recursive: true });
621
+ await writeFile(join(testsDir, "tests.json"), JSON.stringify(tests, null, 2));
622
+ }
623
+ return {
624
+ content: [
625
+ {
626
+ type: "text",
627
+ text: `✓ Harness generated: ${rules.length} rule(s) compiled from ${entries.length} [P0] entries.\n${tests.length > 0 ? `${tests.length} rule test(s) written to temp/rule-tests/tests.json` : "No rule tests found — add **Should trigger:** and **Should not trigger:** examples to [P0] entries."}`,
628
+ },
629
+ ],
630
+ };
631
+ }
632
+ case "get_open_items": {
633
+ const openItemsPath = join(aiDir, "sessions/open-items.md");
634
+ try {
635
+ const content = await readFile(openItemsPath, "utf-8");
636
+ return { content: [{ type: "text", text: content }] };
637
+ }
638
+ catch {
639
+ return { content: [{ type: "text", text: "No open-items.md found. Initialize with `ai-memory init`." }] };
640
+ }
641
+ }
642
+ case "get_memory": {
643
+ const topic = args.topic;
644
+ if (typeof topic !== "string" || !topic.trim()) {
645
+ throw new McpError(ErrorCode.InvalidParams, "topic is required and must be a non-empty string.");
646
+ }
647
+ const mode = getSearchMode();
648
+ let resp;
649
+ let fallbackNote = "";
650
+ try {
651
+ resp = await hybridSearch(aiDir, topic, { mode, limit: 5 });
652
+ }
653
+ catch (err) {
654
+ if (mode !== "keyword") {
655
+ resp = await hybridSearch(aiDir, topic, { mode: "keyword", limit: 5 });
656
+ fallbackNote =
657
+ "Note: Hybrid/semantic search failed. Using keyword-only. Set AI_SEARCH=keyword or AI_SEARCH_WASM=1.\n\n";
658
+ }
659
+ else {
660
+ throw err;
661
+ }
662
+ }
663
+ const { results, backend } = resp;
664
+ if (results.length === 0) {
665
+ return { content: [{ type: "text", text: `No memory found for topic: ${topic}` }] };
666
+ }
667
+ const backendLabel = backend === "keyword" ? "Keyword-only" : backend === "native" ? "Hybrid (Native)" : "Hybrid (WASM)";
668
+ const text = fallbackNote + `Search backend: ${backendLabel}\n\nMemory for "${topic}":\n\n` +
669
+ results
670
+ .map((r) => `**${r.file}**: ${r.excerpt}`)
671
+ .join("\n\n");
672
+ return { content: [{ type: "text", text }] };
673
+ }
674
+ case "prune_memory": {
675
+ const dryRun = args.dry_run ?? true;
676
+ const memDir = join(aiDir, "memory");
677
+ const candidates = [];
678
+ // Find [DEPRECATED] entries
679
+ if (existsSync(memDir)) {
680
+ const files = await readdir(memDir);
681
+ for (const file of files) {
682
+ if (!file.endsWith(".md"))
683
+ continue;
684
+ const content = await readFile(join(memDir, file), "utf-8");
685
+ const deprecatedMatches = content.match(/### \[P[0-2]\].+\[DEPRECATED\]/g);
686
+ if (deprecatedMatches) {
687
+ candidates.push(`${file}: ${deprecatedMatches.length} deprecated entry/entries`);
688
+ }
689
+ }
690
+ }
691
+ if (candidates.length === 0) {
692
+ return { content: [{ type: "text", text: "No candidates for pruning found." }] };
693
+ }
694
+ const report = candidates.map((c) => `• ${c}`).join("\n");
695
+ if (dryRun) {
696
+ return {
697
+ content: [
698
+ { type: "text", text: `Prune candidates (dry run — no changes made):\n\n${report}\n\nRun with dry_run: false to archive these entries.` },
699
+ ],
700
+ };
701
+ }
702
+ // Actual prune: move deprecated entries to sessions/archive
703
+ // (simplified: flag for manual review)
704
+ return {
705
+ content: [
706
+ { type: "text", text: `Flagged for archiving:\n\n${report}\n\nReview and move to sessions/archive/ manually, or run \`ai-memory prune\` from the CLI.` },
707
+ ],
708
+ };
709
+ }
710
+ case "get_evals": {
711
+ const evalPath = join(aiDir, "temp/eval-report.json");
712
+ try {
713
+ const content = await readFile(evalPath, "utf-8");
714
+ return { content: [{ type: "text", text: content }] };
715
+ }
716
+ catch {
717
+ return { content: [{ type: "text", text: "No eval report found. Run `ai-memory eval` to generate one." }] };
718
+ }
719
+ }
720
+ // ─── Autoresearch collaboration handlers ─────────────────────────────
721
+ case "claim_task": {
722
+ const taskDesc = args.task_description;
723
+ if (typeof taskDesc !== "string" || !taskDesc.trim()) {
724
+ throw new McpError(ErrorCode.InvalidParams, "task_description is required.");
725
+ }
726
+ const sessionId = (typeof args.session_id === "string" && args.session_id) || generateSessionId();
727
+ let sourcePath = typeof args.source === "string" && args.source
728
+ ? args.source
729
+ : "sessions/open-items.md";
730
+ // Validate source path stays within aiDir
731
+ const sourceFullPath = assertPathWithinAiDir(aiDir, sourcePath);
732
+ let sourceContent = "";
733
+ try {
734
+ sourceContent = await readFile(sourceFullPath, "utf-8");
735
+ }
736
+ catch { /* no file yet */ }
737
+ // Find matching unclaimed task line (supports: - [ ], - TODO, - task, numbered lists)
738
+ const taskTerms = taskDesc.toLowerCase().split(/\s+/).filter(Boolean);
739
+ const lines = sourceContent.split("\n");
740
+ let matchedLine = -1;
741
+ let bestScore = 0;
742
+ const taskLinePattern = /^(\s*[-*]\s*\[[ ]\]|\s*[-*]\s|\s*\d+\.\s)/;
743
+ for (let i = 0; i < lines.length; i++) {
744
+ const line = lines[i];
745
+ if (!taskLinePattern.test(line))
746
+ continue;
747
+ // Skip already claimed or completed tasks
748
+ if (line.includes("[CLAIMED:") || line.match(/\[x\]/i))
749
+ continue;
750
+ const lower = line.toLowerCase();
751
+ const score = taskTerms.filter((t) => lower.includes(t)).length;
752
+ if (score > bestScore) {
753
+ bestScore = score;
754
+ matchedLine = i;
755
+ }
756
+ }
757
+ // Claim the task: mark it in-progress with session_id
758
+ if (matchedLine >= 0 && bestScore >= Math.min(2, taskTerms.length)) {
759
+ const original = lines[matchedLine];
760
+ // Mark claimed — preserve original format, append claim marker
761
+ lines[matchedLine] = original.replace("- [ ]", "- [~]") + ` [CLAIMED:${sessionId}]`;
762
+ await writeFile(sourceFullPath, lines.join("\n"));
763
+ return { content: [{ type: "text", text: `✓ Claimed task from ${sourcePath} (session ${sessionId}):\n${lines[matchedLine]}` }] };
764
+ }
765
+ // No match in source file — add as new claimed item to open-items.md
766
+ const openItemsPath = join(aiDir, "sessions/open-items.md");
767
+ let openItems = "";
768
+ try {
769
+ openItems = await readFile(openItemsPath, "utf-8");
770
+ }
771
+ catch { }
772
+ const newItem = `- [~] ${taskDesc} [CLAIMED:${sessionId}]`;
773
+ const updated = openItems.includes("## Open")
774
+ ? openItems.replace("## Open", `## Open\n\n${newItem}`)
775
+ : openItems + `\n\n## Open\n\n${newItem}`;
776
+ await mkdir(dirname(openItemsPath), { recursive: true });
777
+ await writeFile(openItemsPath, updated);
778
+ return { content: [{ type: "text", text: `✓ No match in ${sourcePath}. Created and claimed new task in open-items.md (session ${sessionId}):\n${newItem}` }] };
779
+ }
780
+ case "publish_result": {
781
+ const summary = args.summary;
782
+ const outcome = args.outcome;
783
+ if (typeof summary !== "string" || !summary.trim()) {
784
+ throw new McpError(ErrorCode.InvalidParams, "summary is required.");
785
+ }
786
+ if (typeof outcome !== "string" || !["success", "failure", "partial"].includes(outcome)) {
787
+ throw new McpError(ErrorCode.InvalidParams, "outcome must be 'success', 'failure', or 'partial'.");
788
+ }
789
+ const learnings = typeof args.learnings === "string" ? args.learnings : "";
790
+ const sessionId = (typeof args.session_id === "string" && args.session_id) || generateSessionId();
791
+ const date = new Date().toISOString().slice(0, 10);
792
+ const icon = outcome === "success" ? "✓" : outcome === "failure" ? "✗" : "~";
793
+ // Write to thread-archive
794
+ const archivePath = join(aiDir, "sessions/archive/thread-archive.md");
795
+ const entry = `[${date}] [${icon} ${outcome}] ${summary}${learnings ? ` — Learnings: ${learnings}` : ""} (session:${sessionId})`;
796
+ try {
797
+ const existing = await readFile(archivePath, "utf-8");
798
+ await writeFile(archivePath, existing.trimEnd() + "\n" + entry + "\n");
799
+ }
800
+ catch {
801
+ await mkdir(join(aiDir, "sessions/archive"), { recursive: true });
802
+ await writeFile(archivePath, entry + "\n");
803
+ }
804
+ // If task was claimed, mark it done in open-items
805
+ const openItemsPath = join(aiDir, "sessions/open-items.md");
806
+ try {
807
+ let openItems = await readFile(openItemsPath, "utf-8");
808
+ // Find lines claimed by this session and mark complete/failed
809
+ const marker = outcome === "success" ? "- [x]" : "- [ ]";
810
+ openItems = openItems.replace(new RegExp(`^- \\[~\\] (.+?)\\[CLAIMED:${sessionId.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}\\]`, "gm"), (_, task) => `${marker} ${task.trim()} [${outcome}:${date}]`);
811
+ await writeFile(openItemsPath, openItems);
812
+ }
813
+ catch { /* no open-items yet */ }
814
+ return { content: [{ type: "text", text: `✓ Result published to thread-archive:\n${entry}` }] };
815
+ }
816
+ case "sync_memory": {
817
+ const commitMsg = typeof args.message === "string" && args.message
818
+ ? args.message
819
+ : `ai-memory: auto-sync ${new Date().toISOString().slice(0, 19)}`;
820
+ const shouldPush = args.push === true;
821
+ const { execFileSync } = await import("child_process");
822
+ const execOpts = { cwd: resolve(aiDir, ".."), encoding: "utf-8", timeout: 30000 };
823
+ try {
824
+ execFileSync("git", ["rev-parse", "--git-dir"], execOpts);
825
+ }
826
+ catch {
827
+ return { content: [{ type: "text", text: "Not a git repository. sync_memory requires git. Commit .ai/ manually." }] };
828
+ }
829
+ try {
830
+ execFileSync("git", ["add", ".ai/"], execOpts);
831
+ const status = execFileSync("git", ["diff", "--cached", "--name-only"], execOpts).trim();
832
+ if (!status) {
833
+ return { content: [{ type: "text", text: "No .ai/ changes to sync." }] };
834
+ }
835
+ execFileSync("git", ["commit", "-m", commitMsg], execOpts);
836
+ let result = `✓ Committed .ai/ changes: ${commitMsg}\nFiles: ${status.split("\n").length} file(s)`;
837
+ if (shouldPush) {
838
+ try {
839
+ execFileSync("git", ["push"], execOpts);
840
+ result += "\n✓ Pushed to remote.";
841
+ }
842
+ catch (pushErr) {
843
+ const msg = pushErr instanceof Error ? pushErr.message : String(pushErr);
844
+ result += `\n⚠ Push failed: ${msg}\nChanges are committed locally. Push manually.`;
845
+ }
846
+ }
847
+ return { content: [{ type: "text", text: result }] };
848
+ }
849
+ catch (err) {
850
+ const msg = err instanceof Error ? err.message : String(err);
851
+ throw new McpError(ErrorCode.InternalError, `sync_memory failed: ${msg}`);
852
+ }
853
+ }
854
+ case "get_doc_path": {
855
+ const docType = args.type;
856
+ if (typeof docType !== "string" || !docType.trim()) {
857
+ throw new McpError(ErrorCode.InvalidParams, "type is required.");
858
+ }
859
+ const projectRoot = resolve(aiDir, "..");
860
+ const schema = await loadDocsSchema(projectRoot);
861
+ if (!schema) {
862
+ return { content: [{ type: "text", text: "No .ai/docs-schema.json found. Run `ai-memory init --full` to create one." }] };
863
+ }
864
+ const slug = typeof args.slug === "string" ? args.slug : undefined;
865
+ let path;
866
+ try {
867
+ path = getDocPath(schema, docType, slug);
868
+ }
869
+ catch (err) {
870
+ if (err instanceof Error && err.message.includes("slug")) {
871
+ throw new McpError(ErrorCode.InvalidParams, err.message);
872
+ }
873
+ throw err;
874
+ }
875
+ if (!path) {
876
+ return {
877
+ content: [
878
+ {
879
+ type: "text",
880
+ text: `Unknown doc type "${docType}". Available: ${Object.keys(schema.docTypes).join(", ")}`,
881
+ },
882
+ ],
883
+ };
884
+ }
885
+ return { content: [{ type: "text", text: path }] };
886
+ }
887
+ case "validate_doc_placement": {
888
+ const projectRoot = resolve(aiDir, "..");
889
+ const schema = await loadDocsSchema(projectRoot);
890
+ if (!schema) {
891
+ return { content: [{ type: "text", text: "valid: true (no schema)" }] };
892
+ }
893
+ const pathsToCheck = [];
894
+ if (typeof args.path === "string" && args.path)
895
+ pathsToCheck.push(args.path);
896
+ if (Array.isArray(args.paths))
897
+ pathsToCheck.push(...args.paths.filter((p) => typeof p === "string"));
898
+ if (pathsToCheck.length === 0) {
899
+ return { content: [{ type: "text", text: "Provide path or paths to validate." }] };
900
+ }
901
+ const allErrors = [];
902
+ for (const p of pathsToCheck) {
903
+ const result = validateDocPlacement(schema, p, projectRoot);
904
+ if (!result.valid)
905
+ allErrors.push(...result.errors.map((e) => `${p}: ${e}`));
906
+ }
907
+ const valid = allErrors.length === 0;
908
+ const text = valid
909
+ ? `valid: true (${pathsToCheck.length} path(s) OK)`
910
+ : `valid: false\n${allErrors.join("\n")}`;
911
+ return { content: [{ type: "text", text }] };
912
+ }
913
+ case "list_doc_types": {
914
+ const projectRoot = resolve(aiDir, "..");
915
+ const schema = await loadDocsSchema(projectRoot);
916
+ if (!schema) {
917
+ return { content: [{ type: "text", text: "No .ai/docs-schema.json found." }] };
918
+ }
919
+ const types = listDocTypes(schema);
920
+ const text = types.map((t) => `- ${t.type}: ${t.path}/${t.pattern}`).join("\n");
921
+ return { content: [{ type: "text", text: text || "No doc types defined." }] };
922
+ }
923
+ default:
924
+ throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`);
925
+ }
926
+ });
927
+ }
928
+ //# sourceMappingURL=tools.js.map