@radix-ai/ai-memory 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.claude-plugin/marketplace.json +19 -0
  2. package/.cursor-plugin/marketplace.json +19 -0
  3. package/LICENSE +21 -0
  4. package/README.md +331 -0
  5. package/dist/cli/adapters.d.ts +32 -0
  6. package/dist/cli/adapters.d.ts.map +1 -0
  7. package/dist/cli/adapters.js +368 -0
  8. package/dist/cli/adapters.js.map +1 -0
  9. package/dist/cli/environment.d.ts +34 -0
  10. package/dist/cli/environment.d.ts.map +1 -0
  11. package/dist/cli/environment.js +119 -0
  12. package/dist/cli/environment.js.map +1 -0
  13. package/dist/cli/index.d.ts +3 -0
  14. package/dist/cli/index.d.ts.map +1 -0
  15. package/dist/cli/index.js +1108 -0
  16. package/dist/cli/index.js.map +1 -0
  17. package/dist/docs-schema.d.ts +27 -0
  18. package/dist/docs-schema.d.ts.map +1 -0
  19. package/dist/docs-schema.js +100 -0
  20. package/dist/docs-schema.js.map +1 -0
  21. package/dist/evals/index.d.ts +13 -0
  22. package/dist/evals/index.d.ts.map +1 -0
  23. package/dist/evals/index.js +205 -0
  24. package/dist/evals/index.js.map +1 -0
  25. package/dist/evals/performance-comparison.d.ts +23 -0
  26. package/dist/evals/performance-comparison.d.ts.map +1 -0
  27. package/dist/evals/performance-comparison.js +76 -0
  28. package/dist/evals/performance-comparison.js.map +1 -0
  29. package/dist/evals/platform-integration.d.ts +34 -0
  30. package/dist/evals/platform-integration.d.ts.map +1 -0
  31. package/dist/evals/platform-integration.js +186 -0
  32. package/dist/evals/platform-integration.js.map +1 -0
  33. package/dist/formatter/index.d.ts +15 -0
  34. package/dist/formatter/index.d.ts.map +1 -0
  35. package/dist/formatter/index.js +207 -0
  36. package/dist/formatter/index.js.map +1 -0
  37. package/dist/hybrid-search/index.d.ts +40 -0
  38. package/dist/hybrid-search/index.d.ts.map +1 -0
  39. package/dist/hybrid-search/index.js +277 -0
  40. package/dist/hybrid-search/index.js.map +1 -0
  41. package/dist/index.d.ts +4 -0
  42. package/dist/index.d.ts.map +1 -0
  43. package/dist/index.js +5 -0
  44. package/dist/index.js.map +1 -0
  45. package/dist/mcp-server/index.d.ts +6 -0
  46. package/dist/mcp-server/index.d.ts.map +1 -0
  47. package/dist/mcp-server/index.js +114 -0
  48. package/dist/mcp-server/index.js.map +1 -0
  49. package/dist/mcp-server/p0-parser.d.ts +43 -0
  50. package/dist/mcp-server/p0-parser.d.ts.map +1 -0
  51. package/dist/mcp-server/p0-parser.js +108 -0
  52. package/dist/mcp-server/p0-parser.js.map +1 -0
  53. package/dist/mcp-server/resources.d.ts +3 -0
  54. package/dist/mcp-server/resources.d.ts.map +1 -0
  55. package/dist/mcp-server/resources.js +156 -0
  56. package/dist/mcp-server/resources.js.map +1 -0
  57. package/dist/mcp-server/tools.d.ts +15 -0
  58. package/dist/mcp-server/tools.d.ts.map +1 -0
  59. package/dist/mcp-server/tools.js +928 -0
  60. package/dist/mcp-server/tools.js.map +1 -0
  61. package/dist/schema-constants.d.ts +7 -0
  62. package/dist/schema-constants.d.ts.map +1 -0
  63. package/dist/schema-constants.js +33 -0
  64. package/dist/schema-constants.js.map +1 -0
  65. package/package.json +84 -0
  66. package/plugins/adapters/claude-code/.claude-plugin/plugin.json +26 -0
  67. package/plugins/adapters/claude-code/CLAUDE.md +21 -0
  68. package/plugins/adapters/claude-code/README.md +37 -0
  69. package/plugins/adapters/claude-code/agents/governance-checker.md +27 -0
  70. package/plugins/adapters/claude-code/agents/memory-writer.md +31 -0
  71. package/plugins/adapters/claude-code/hooks/SessionStart.js +55 -0
  72. package/plugins/adapters/claude-code/hooks/hooks.json +52 -0
  73. package/plugins/adapters/generic/BOOTSTRAP_INSTRUCTION.md +30 -0
  74. package/plugins/adapters/generic/README.md +50 -0
  75. package/plugins/ai-memory/.claude-plugin/plugin.json +30 -0
  76. package/plugins/ai-memory/.cursor-plugin/plugin.json +18 -0
  77. package/plugins/ai-memory/.mcp.json +19 -0
  78. package/plugins/ai-memory/agents/governance-critic/AGENT.md +57 -0
  79. package/plugins/ai-memory/agents/memory-auditor/AGENT.md +54 -0
  80. package/plugins/ai-memory/rules/context7-tool-reference.md +16 -0
  81. package/plugins/ai-memory/rules/load-memory.md +22 -0
  82. package/plugins/ai-memory/rules/parallel-safe-planning.md +19 -0
  83. package/plugins/ai-memory/skills/mem-auto-review/SKILL.md +51 -0
  84. package/plugins/ai-memory/skills/mem-compound/SKILL.md +105 -0
  85. package/plugins/ai-memory/skills/mem-init/SKILL.md +48 -0
  86. package/plugins/ai-memory/skills/mem-session-close/SKILL.md +38 -0
  87. package/plugins/ai-memory/skills/mem-validate/SKILL.md +54 -0
  88. package/templates/.ai/IDENTITY.md +23 -0
  89. package/templates/.ai/agents/_base-auditor.md +28 -0
  90. package/templates/.ai/agents/_template.md +23 -0
  91. package/templates/.ai/memory/debugging.md +14 -0
  92. package/templates/.ai/memory/decisions.md +8 -0
  93. package/templates/.ai/memory/improvements.md +7 -0
  94. package/templates/.ai/memory/memory-index.md +9 -0
  95. package/templates/.ai/memory/patterns.md +8 -0
  96. package/templates/.ai/reference/PROJECT.md +5 -0
  97. package/templates/.ai/reference/capability-specs.json +31 -0
  98. package/templates/.ai/reference/environment-specs.json +41 -0
  99. package/templates/.ai/sessions/archive/thread-archive.md +15 -0
  100. package/templates/.ai/sessions/open-items.md +13 -0
  101. package/templates/.ai/toolbox/README.md +5 -0
  102. package/templates/.ai/toolbox/browser.md +30 -0
  103. package/templates/.ai/toolbox/integrations.md +44 -0
  104. package/templates/.ai/toolbox/shell.md +38 -0
  105. package/templates/AGENTS.md +4 -0
@@ -0,0 +1,1108 @@
1
+ #!/usr/bin/env node
2
+ import { Command } from "commander";
3
+ import { writeFile, mkdir } from "fs/promises";
4
+ import { join, resolve, dirname } from "path";
5
+ import { existsSync, readFileSync } from "fs";
6
+ import { fileURLToPath } from "url";
7
+ import { DEFAULT_DOCS_SCHEMA_JSON } from "../docs-schema.js";
8
+ import { TOOL_ADAPTERS, getMCPJson, MCP_LAUNCHER, MCP_LAUNCHER_PATH, CANONICAL_SKILLS } from "./adapters.js";
9
+ import { detectEnvironments, injectCapabilityConfig } from "./environment.js";
10
+ // Read version from package.json — single source of truth
11
+ const __dirname_cli = dirname(fileURLToPath(import.meta.url));
12
+ const pkgPath = join(__dirname_cli, "..", "..", "package.json");
13
+ const PKG_VERSION = existsSync(pkgPath)
14
+ ? JSON.parse(readFileSync(pkgPath, "utf-8")).version
15
+ : "0.0.0";
16
+ const KEBAB_RE = /^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/;
17
+ function validateKebabCase(name, label) {
18
+ if (!KEBAB_RE.test(name)) {
19
+ console.error(`Invalid ${label} name: "${name}". Must be kebab-case (e.g. my-${label}).`);
20
+ process.exit(1);
21
+ }
22
+ }
23
+ const program = new Command();
24
+ program
25
+ .name("ai-memory")
26
+ .description("Persistent AI memory for any project.")
27
+ .version(PKG_VERSION);
28
+ // ─── init ───────────────────────────────────────────────────────────────────
29
+ program
30
+ .command("init")
31
+ .description("Scaffold .ai/ in the current project")
32
+ .option("--full", "Full tier: adds governance, evals, and ACP")
33
+ .option("--dir <dir>", "Target directory (default: current directory)")
34
+ .option("--download-model", "Pre-download the hybrid search model (~23MB) for faster first search")
35
+ .action(async (opts) => {
36
+ const targetDir = resolve(opts.dir ?? process.cwd());
37
+ const aiDir = join(targetDir, ".ai");
38
+ const full = opts.full ?? false;
39
+ if (existsSync(aiDir)) {
40
+ const updated = await scaffoldUpdates(aiDir, full);
41
+ if (updated.length > 0) {
42
+ console.log(`\n✓ Added ${updated.length} missing file(s) to existing .ai/`);
43
+ }
44
+ else if (full) {
45
+ console.log(`✓ .ai/ already has full tier. Nothing to add.`);
46
+ }
47
+ else {
48
+ console.log(`✓ .ai/ already exists. Use --full to add governance, docs schema, and ACP.`);
49
+ }
50
+ if (opts.downloadModel) {
51
+ console.log(`\nDownloading hybrid search model...`);
52
+ const { warmSearchModel } = await import("../hybrid-search/index.js");
53
+ await warmSearchModel();
54
+ console.log(`✓ Model ready.`);
55
+ }
56
+ process.exit(0);
57
+ }
58
+ console.log(`Initializing ai-memory in ${targetDir}...`);
59
+ await scaffoldAiDir(aiDir, full);
60
+ if (opts.downloadModel) {
61
+ console.log(`\nDownloading hybrid search model (~23MB)...`);
62
+ const { warmSearchModel } = await import("../hybrid-search/index.js");
63
+ await warmSearchModel();
64
+ console.log(`✓ Model ready.`);
65
+ }
66
+ console.log(`\n✓ Done. Next steps:`);
67
+ console.log(` 1. Edit .ai/IDENTITY.md — describe this project and its constraints`);
68
+ console.log(` 2. Edit .ai/PROJECT_STATUS.md — set the current focus`);
69
+ console.log(` 3. Run \`ai-memory install --to cursor\` or \`--to claude-code\` to connect your tool`);
70
+ if (opts.full) {
71
+ console.log(` 4. Add [P0] entries with constraint_pattern to decisions.md for governance`);
72
+ console.log(` 5. Run \`ai-memory generate-harness\` to compile the rule set`);
73
+ }
74
+ });
75
+ async function scaffoldAiDir(aiDir, full) {
76
+ // Core dirs (always)
77
+ const coreDirs = [
78
+ "",
79
+ "memory",
80
+ "agents",
81
+ "skills",
82
+ "toolbox",
83
+ "rules",
84
+ "sessions",
85
+ "sessions/archive",
86
+ "reference",
87
+ ];
88
+ for (const dir of coreDirs) {
89
+ await mkdir(join(aiDir, dir), { recursive: true });
90
+ }
91
+ // Core files
92
+ await writeTemplateFile(aiDir, "IDENTITY.md", DEFAULT_IDENTITY);
93
+ await writeTemplateFile(aiDir, "PROJECT_STATUS.md", DEFAULT_PROJECT_STATUS);
94
+ await writeTemplateFile(aiDir, "memory/decisions.md", DEFAULT_DECISIONS);
95
+ await writeTemplateFile(aiDir, "memory/patterns.md", DEFAULT_PATTERNS);
96
+ await writeTemplateFile(aiDir, "memory/debugging.md", DEFAULT_DEBUGGING);
97
+ await writeTemplateFile(aiDir, "memory/improvements.md", DEFAULT_IMPROVEMENTS);
98
+ await writeTemplateFile(aiDir, "memory/memory-index.md", DEFAULT_MEMORY_INDEX);
99
+ await writeTemplateFile(aiDir, "agents/_base-auditor.md", DEFAULT_BASE_AUDITOR);
100
+ await writeTemplateFile(aiDir, "agents/_template.md", DEFAULT_AGENT_TEMPLATE);
101
+ await writeTemplateFile(aiDir, "sessions/open-items.md", DEFAULT_OPEN_ITEMS);
102
+ await writeTemplateFile(aiDir, "sessions/archive/thread-archive.md", DEFAULT_THREAD_ARCHIVE);
103
+ await writeTemplateFile(aiDir, "reference/PROJECT.md", DEFAULT_PROJECT);
104
+ if (full) {
105
+ await mkdir(join(aiDir, "acp"), { recursive: true });
106
+ await mkdir(join(aiDir, "temp"), { recursive: true });
107
+ await mkdir(join(aiDir, "temp/rule-tests"), { recursive: true });
108
+ await writeTemplateFile(aiDir, "acp/manifest.json", DEFAULT_ACP_MANIFEST);
109
+ await writeTemplateFile(aiDir, "acp/capabilities.md", DEFAULT_ACP_CAPABILITIES);
110
+ await writeTemplateFile(aiDir, "docs-schema.json", DEFAULT_DOCS_SCHEMA_JSON);
111
+ await writeTemplateFile(aiDir, "rules/doc-placement.md", DEFAULT_DOC_PLACEMENT_RULE);
112
+ await writeTemplateFile(aiDir, "agents/docs-manager.md", DEFAULT_DOCS_MANAGER_AGENT);
113
+ }
114
+ }
115
+ async function writeTemplateFile(aiDir, relativePath, content) {
116
+ const fullPath = join(aiDir, relativePath);
117
+ await mkdir(dirname(fullPath), { recursive: true });
118
+ await writeFile(fullPath, content);
119
+ console.log(` + .ai/${relativePath}`);
120
+ }
121
+ /** Add only missing files when .ai/ already exists. Never overwrites. Returns paths added. */
122
+ async function scaffoldUpdates(aiDir, full) {
123
+ const added = [];
124
+ async function addIfMissing(relativePath, content) {
125
+ const fullPath = join(aiDir, relativePath);
126
+ if (!existsSync(fullPath)) {
127
+ await mkdir(dirname(fullPath), { recursive: true });
128
+ await writeFile(fullPath, content);
129
+ console.log(` + .ai/${relativePath}`);
130
+ added.push(relativePath);
131
+ }
132
+ }
133
+ if (full) {
134
+ await mkdir(join(aiDir, "acp"), { recursive: true });
135
+ await mkdir(join(aiDir, "temp"), { recursive: true });
136
+ await mkdir(join(aiDir, "temp/rule-tests"), { recursive: true });
137
+ await addIfMissing("acp/manifest.json", DEFAULT_ACP_MANIFEST);
138
+ await addIfMissing("acp/capabilities.md", DEFAULT_ACP_CAPABILITIES);
139
+ await addIfMissing("docs-schema.json", DEFAULT_DOCS_SCHEMA_JSON);
140
+ await addIfMissing("rules/doc-placement.md", DEFAULT_DOC_PLACEMENT_RULE);
141
+ await addIfMissing("agents/docs-manager.md", DEFAULT_DOCS_MANAGER_AGENT);
142
+ }
143
+ return added;
144
+ }
145
+ // ─── install ─────────────────────────────────────────────────────────────────
146
+ program
147
+ .command("install")
148
+ .description("Install the ai-memory bootstrap for a specific tool")
149
+ .requiredOption("--to <tool>", `Target tool (${Object.keys(TOOL_ADAPTERS).join(", ")})`)
150
+ .option("--dir <dir>", "Project root (default: current directory)")
151
+ .option("--capability <cap>", "Inject capability config (browser, screen_capture). Repeatable.", (v, acc) => (acc ?? []).concat(v), [])
152
+ .action(async (opts) => {
153
+ const tool = opts.to.toLowerCase();
154
+ const adapter = TOOL_ADAPTERS[tool];
155
+ if (!adapter) {
156
+ console.error(`Unknown tool: ${tool}`);
157
+ console.error(`Supported: ${Object.keys(TOOL_ADAPTERS).join(", ")}`);
158
+ process.exit(1);
159
+ }
160
+ const projectRoot = resolve(opts.dir ?? process.cwd());
161
+ const aiDir = join(projectRoot, ".ai");
162
+ if (!existsSync(aiDir)) {
163
+ console.log(`.ai/ not found — running init first...`);
164
+ await scaffoldAiDir(aiDir, false);
165
+ console.log(`✓ Scaffolded .ai/\n`);
166
+ }
167
+ const destPath = join(projectRoot, adapter.dest);
168
+ await mkdir(dirname(destPath), { recursive: true });
169
+ await writeFile(destPath, adapter.content);
170
+ console.log(`✓ Wrote ${adapter.dest}`);
171
+ // Write extra files (e.g., skill stubs, hooks)
172
+ if (adapter.extraFiles) {
173
+ for (const [relPath, content] of Object.entries(adapter.extraFiles)) {
174
+ const extraPath = join(projectRoot, relPath);
175
+ await mkdir(dirname(extraPath), { recursive: true });
176
+ await writeFile(extraPath, content);
177
+ console.log(`✓ Wrote ${relPath}`);
178
+ }
179
+ }
180
+ // Write canonical skill definitions to .ai/skills/
181
+ for (const [skillName, content] of Object.entries(CANONICAL_SKILLS)) {
182
+ const skillPath = join(projectRoot, ".ai", "skills", skillName, "SKILL.md");
183
+ await mkdir(dirname(skillPath), { recursive: true });
184
+ await writeFile(skillPath, content);
185
+ console.log(`✓ Wrote .ai/skills/${skillName}/SKILL.md (canonical)`);
186
+ }
187
+ if (tool === "claude-code") {
188
+ console.log(`\n Hooks installed: SessionStart (context injection), PreCompact (state preservation)`);
189
+ console.log(` Note: Restart Claude Code for hooks to take effect.`);
190
+ }
191
+ if (adapter.mcp) {
192
+ const launcherPath = join(projectRoot, MCP_LAUNCHER_PATH);
193
+ await mkdir(dirname(launcherPath), { recursive: true });
194
+ await writeFile(launcherPath, MCP_LAUNCHER);
195
+ console.log(`✓ Wrote ${MCP_LAUNCHER_PATH}`);
196
+ const mcpRelPath = adapter.mcpPath ?? ".mcp.json";
197
+ const mcpPath = join(projectRoot, mcpRelPath);
198
+ const mcpJson = getMCPJson();
199
+ if (!existsSync(mcpPath)) {
200
+ await mkdir(dirname(mcpPath), { recursive: true });
201
+ await writeFile(mcpPath, mcpJson);
202
+ console.log(`✓ Wrote ${mcpRelPath}`);
203
+ }
204
+ else {
205
+ console.log(` ${mcpRelPath} already exists — skipped`);
206
+ }
207
+ }
208
+ // Capability injection: detect environments and inject MCP config for requested capabilities
209
+ const rawCap = opts.capability;
210
+ const capabilities = Array.isArray(rawCap) ? rawCap : rawCap ? [rawCap] : [];
211
+ if (capabilities.length > 0) {
212
+ const pkgRoot = join(__dirname_cli, "..", "..");
213
+ const envs = detectEnvironments(projectRoot, pkgRoot);
214
+ for (const cap of capabilities) {
215
+ if (cap !== "browser" && cap !== "screen_capture") {
216
+ console.warn(` [warn] Unknown capability: ${cap}. Skipping.`);
217
+ continue;
218
+ }
219
+ let injected = 0;
220
+ for (const envId of envs) {
221
+ try {
222
+ if (injectCapabilityConfig(projectRoot, envId, cap, pkgRoot)) {
223
+ console.log(`✓ Injected ${cap} config for ${envId}`);
224
+ injected++;
225
+ }
226
+ }
227
+ catch (e) {
228
+ console.warn(` [warn] Failed to inject ${cap} for ${envId}: ${e.message}`);
229
+ }
230
+ }
231
+ if (cap === "screen_capture" && injected === 0 && envs.length > 0) {
232
+ console.warn(` [warn] screen_capture has no MCP config — it uses platform tools (e.g. Peekaboo). See capability-specs.json.`);
233
+ }
234
+ }
235
+ }
236
+ console.log(`\nDone. Start a new ${opts.to} session and verify:`);
237
+ console.log(` 1. MCP connected: "Call search_memory with query 'test'" (should return results, not an error)`);
238
+ console.log(` 2. Memory loaded: "What does .ai/IDENTITY.md say about this project?"`);
239
+ console.log(`\nIf search_memory is not available, restart your editor — MCP servers load at startup.`);
240
+ });
241
+ // ─── mcp ────────────────────────────────────────────────────────────────────
242
+ program
243
+ .command("mcp")
244
+ .description("Start the MCP server")
245
+ .option("--dir <dir>", "Path to .ai/ directory (default: ./ai)")
246
+ .option("--http", "Use HTTP transport instead of stdio (for cloud agents)")
247
+ .option("--port <port>", "HTTP port (default: 3100)", parseInt)
248
+ .action(async (opts) => {
249
+ if (opts.dir)
250
+ process.env.AI_DIR = resolve(opts.dir);
251
+ const { main } = await import("../mcp-server/index.js");
252
+ await main({ http: opts.http, port: opts.port });
253
+ });
254
+ // ─── validate ───────────────────────────────────────────────────────────────
255
+ program
256
+ .command("validate")
257
+ .description("Validate all .ai/ files against canonical schema")
258
+ .option("--dir <dir>", "Path to .ai/ directory (default: ./ai)")
259
+ .action(async (opts) => {
260
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
261
+ if (!existsSync(aiDir)) {
262
+ console.error(`No .ai/ directory found at ${aiDir}. Run \`ai-memory init\` first.`);
263
+ process.exit(1);
264
+ }
265
+ const { validateAll } = await import("../formatter/index.js");
266
+ const results = await validateAll(aiDir);
267
+ const errs = results.filter((e) => e.severity !== "warn");
268
+ const warns = results.filter((e) => e.severity === "warn");
269
+ if (warns.length > 0) {
270
+ for (const w of warns)
271
+ console.warn(` [warn] ${w.file}: ${w.message}`);
272
+ }
273
+ if (errs.length === 0) {
274
+ console.log("✓ All files valid.");
275
+ }
276
+ else {
277
+ console.error(`${errs.length} validation error(s):\n`);
278
+ for (const e of errs)
279
+ console.error(` ${e.file}: ${e.message}`);
280
+ process.exit(1);
281
+ }
282
+ });
283
+ // ─── index ───────────────────────────────────────────────────────────────────
284
+ program
285
+ .command("index")
286
+ .description("Regenerate memory-index.md from decisions, patterns, debugging, improvements")
287
+ .option("--dir <dir>", "Path to .ai/ directory (default: ./ai)")
288
+ .action(async (opts) => {
289
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
290
+ if (!existsSync(aiDir)) {
291
+ console.error(`No .ai/ directory found at ${aiDir}. Run \`ai-memory init\` first.`);
292
+ process.exit(1);
293
+ }
294
+ const { generateMemoryIndex } = await import("../formatter/index.js");
295
+ await generateMemoryIndex(aiDir);
296
+ console.log("✓ Regenerated memory-index.md");
297
+ });
298
+ // ─── fmt ────────────────────────────────────────────────────────────────────
299
+ program
300
+ .command("fmt")
301
+ .description("Auto-format YAML frontmatter on .ai/ files")
302
+ .option("--dir <dir>", "Path to .ai/ directory (default: ./ai)")
303
+ .action(async (opts) => {
304
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
305
+ if (!existsSync(aiDir)) {
306
+ console.error(`No .ai/ directory found at ${aiDir}.`);
307
+ process.exit(1);
308
+ }
309
+ const { formatAll } = await import("../formatter/index.js");
310
+ const count = await formatAll(aiDir);
311
+ console.log(`✓ Formatted ${count} file(s).`);
312
+ });
313
+ // ─── eval ────────────────────────────────────────────────────────────────────
314
+ const evalCmd = program
315
+ .command("eval")
316
+ .description("Run memory health report (or manage custom evals)")
317
+ .option("--dir <dir>", "Path to .ai/ directory (default: ./ai)")
318
+ .option("--json", "Output as JSON")
319
+ .action(async (opts) => {
320
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
321
+ if (!existsSync(aiDir)) {
322
+ console.error(`No .ai/ directory found at ${aiDir}.`);
323
+ process.exit(1);
324
+ }
325
+ const { runEvals } = await import("../evals/index.js");
326
+ const report = await runEvals(aiDir);
327
+ if (opts.json) {
328
+ console.log(JSON.stringify(report, null, 2));
329
+ }
330
+ else {
331
+ printEvalReport(report);
332
+ }
333
+ });
334
+ function printEvalReport(report) {
335
+ console.log("\n── ai-memory eval report ──────────────────────\n");
336
+ const metrics = report.metrics ?? [];
337
+ for (const m of metrics) {
338
+ const icon = m.status === "good" ? "✓" : m.status === "warn" ? "⚠" : "✗";
339
+ console.log(` ${icon} ${m.name}: ${m.value}${m.note ? ` (${m.note})` : ""}`);
340
+ }
341
+ console.log("");
342
+ }
343
+ // ─── generate-harness ────────────────────────────────────────────────────────
344
+ program
345
+ .command("generate-harness")
346
+ .description("Compile harness.json from current [P0] entries")
347
+ .option("--dir <dir>", "Path to .ai/ directory (default: ./ai)")
348
+ .action(async (opts) => {
349
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
350
+ const { readP0Entries, compileHarnessRules, generateRuleTests } = await import("../mcp-server/p0-parser.js");
351
+ const entries = await readP0Entries(aiDir);
352
+ const rules = compileHarnessRules(entries);
353
+ const tests = generateRuleTests(entries);
354
+ const tempDir = join(aiDir, "temp");
355
+ await mkdir(tempDir, { recursive: true });
356
+ await writeFile(join(tempDir, "harness.json"), JSON.stringify(rules, null, 2));
357
+ if (tests.length > 0) {
358
+ const testsDir = join(tempDir, "rule-tests");
359
+ await mkdir(testsDir, { recursive: true });
360
+ await writeFile(join(testsDir, "tests.json"), JSON.stringify(tests, null, 2));
361
+ }
362
+ console.log(`✓ ${rules.length} rule(s) compiled from ${entries.length} [P0] entries.`);
363
+ if (tests.length > 0)
364
+ console.log(` ${tests.length} rule test(s) written.`);
365
+ });
366
+ // ─── prune ──────────────────────────────────────────────────────────────────
367
+ program
368
+ .command("prune")
369
+ .description("Review stale or deprecated memory entries")
370
+ .option("--dir <dir>", "Path to .ai/ directory (default: ./ai)")
371
+ .option("--dry-run", "Report candidates without modifying files (default)", true)
372
+ .action(async (opts) => {
373
+ const { readdir, readFile: rf } = await import("fs/promises");
374
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
375
+ const memDir = join(aiDir, "memory");
376
+ if (!existsSync(memDir)) {
377
+ console.error(`No .ai/memory/ directory found at ${memDir}.`);
378
+ process.exit(1);
379
+ }
380
+ const files = await readdir(memDir);
381
+ const candidates = [];
382
+ for (const file of files) {
383
+ if (!file.endsWith(".md"))
384
+ continue;
385
+ const content = await rf(join(memDir, file), "utf-8");
386
+ const matches = content.match(/### \[P[0-2]\].+\[DEPRECATED\]/g);
387
+ if (matches) {
388
+ candidates.push(`${file}: ${matches.length} deprecated entry/entries`);
389
+ }
390
+ }
391
+ if (candidates.length === 0) {
392
+ console.log("✓ No stale entries found.");
393
+ return;
394
+ }
395
+ console.log(`Found ${candidates.length} file(s) with deprecated entries:\n`);
396
+ for (const c of candidates)
397
+ console.log(` • ${c}`);
398
+ if (opts.dryRun !== false) {
399
+ console.log("\nRun with --no-dry-run to archive these entries.");
400
+ }
401
+ });
402
+ // ─── validate-docs ───────────────────────────────────────────────────────────
403
+ program
404
+ .command("validate-docs")
405
+ .description("Validate documentation file placement and naming against .ai/docs-schema.json")
406
+ .option("--dir <dir>", "Project root (default: current directory)")
407
+ .option("--paths <paths>", "Comma-separated paths to check (default: from git diff --name-only)")
408
+ .action(async (opts) => {
409
+ const projectRoot = resolve(opts.dir ?? process.cwd());
410
+ const aiDir = join(projectRoot, ".ai");
411
+ const { loadDocsSchema, validateDocPlacement } = await import("../docs-schema.js");
412
+ const schema = await loadDocsSchema(projectRoot);
413
+ if (!schema) {
414
+ console.log("No .ai/docs-schema.json found. Run `ai-memory init --full` to create one.");
415
+ process.exit(0);
416
+ }
417
+ let paths = [];
418
+ if (opts.paths) {
419
+ paths = opts.paths.split(",").map((p) => p.trim()).filter(Boolean);
420
+ }
421
+ else {
422
+ try {
423
+ const { execFileSync } = await import("child_process");
424
+ const out = execFileSync("git", ["diff", "--cached", "--name-only", "--diff-filter=A"], {
425
+ cwd: projectRoot,
426
+ encoding: "utf-8",
427
+ });
428
+ paths = out.trim().split("\n").filter(Boolean);
429
+ }
430
+ catch {
431
+ console.log("Not a git repo or no staged files. Use --paths to specify files.");
432
+ process.exit(0);
433
+ }
434
+ }
435
+ const docPaths = paths.filter((p) => p.endsWith(".md") || p.endsWith(".yaml") || p.endsWith(".yml"));
436
+ if (docPaths.length === 0) {
437
+ console.log("✓ No documentation files to validate.");
438
+ process.exit(0);
439
+ }
440
+ let hasErrors = false;
441
+ for (const p of docPaths) {
442
+ const rel = p.replace(/\\/g, "/");
443
+ const result = validateDocPlacement(schema, rel, projectRoot);
444
+ if (!result.valid) {
445
+ hasErrors = true;
446
+ for (const e of result.errors)
447
+ console.error(` ${rel}: ${e}`);
448
+ }
449
+ }
450
+ if (hasErrors) {
451
+ console.error("\nFix the above or add to .ai/docs-schema.json. Use SCREAMING_SNAKE_CASE for doc filenames.");
452
+ process.exit(1);
453
+ }
454
+ console.log(`✓ ${docPaths.length} doc file(s) validated.`);
455
+ });
456
+ // ─── agent create ────────────────────────────────────────────────────────────
457
+ program
458
+ .command("agent")
459
+ .description("Manage agents")
460
+ .addCommand(new Command("create")
461
+ .argument("<name>", "Agent name (kebab-case)")
462
+ .description("Scaffold a new agent in .ai/agents/<name>/AGENT.md")
463
+ .option("--dir <dir>", "Path to .ai/ directory")
464
+ .action(async (name, opts) => {
465
+ validateKebabCase(name, "agent");
466
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
467
+ const agentDir = join(aiDir, "agents", name);
468
+ await mkdir(agentDir, { recursive: true });
469
+ const path = join(agentDir, "AGENT.md");
470
+ if (existsSync(path)) {
471
+ console.log(`⚠ Already exists: ${path}`);
472
+ return;
473
+ }
474
+ await writeFile(path, agentTemplate(name));
475
+ console.log(`✓ Created .ai/agents/${name}/AGENT.md`);
476
+ }));
477
+ // ─── skill create ────────────────────────────────────────────────────────────
478
+ program
479
+ .command("skill")
480
+ .description("Manage skills")
481
+ .addCommand(new Command("create")
482
+ .argument("<name>", "Skill name (kebab-case)")
483
+ .description("Scaffold a new skill in .ai/skills/<name>/SKILL.md")
484
+ .option("--dir <dir>", "Path to .ai/ directory")
485
+ .action(async (name, opts) => {
486
+ validateKebabCase(name, "skill");
487
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
488
+ const skillDir = join(aiDir, "skills", name);
489
+ await mkdir(skillDir, { recursive: true });
490
+ const path = join(skillDir, "SKILL.md");
491
+ if (existsSync(path)) {
492
+ console.log(`⚠ Already exists: ${path}`);
493
+ return;
494
+ }
495
+ await writeFile(path, skillTemplate(name));
496
+ console.log(`✓ Created .ai/skills/${name}/SKILL.md`);
497
+ }));
498
+ // ─── rule create ────────────────────────────────────────────────────────────
499
+ program
500
+ .command("rule")
501
+ .description("Manage rules")
502
+ .addCommand(new Command("create")
503
+ .argument("<name>", "Rule name (kebab-case)")
504
+ .description("Scaffold a new rule in .ai/rules/<name>.md")
505
+ .option("--dir <dir>", "Path to .ai/ directory")
506
+ .action(async (name, opts) => {
507
+ validateKebabCase(name, "rule");
508
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
509
+ await mkdir(join(aiDir, "rules"), { recursive: true });
510
+ const rulePath = join(aiDir, "rules", `${name}.md`);
511
+ if (existsSync(rulePath)) {
512
+ console.log(`⚠ Already exists: ${rulePath}`);
513
+ return;
514
+ }
515
+ await writeFile(rulePath, ruleTemplate(name));
516
+ console.log(`✓ Created .ai/rules/${name}.md`);
517
+ }));
518
+ // ─── verify ─────────────────────────────────────────────────────────────────
519
+ program
520
+ .command("verify")
521
+ .description("Verify ai-memory installation: .ai/ structure, bootstrap, MCP config")
522
+ .option("--dir <dir>", "Project root (default: current directory)")
523
+ .option("--json", "Output as JSON")
524
+ .action(async (opts) => {
525
+ const projectRoot = resolve(opts.dir ?? process.cwd());
526
+ const aiDir = join(projectRoot, ".ai");
527
+ const checks = [];
528
+ // 1. .ai/ exists with core files
529
+ const aiExists = existsSync(aiDir);
530
+ checks.push({ name: ".ai/ directory", status: aiExists ? "pass" : "fail", detail: aiExists ? "Found" : "Missing — run `ai-memory init`" });
531
+ if (aiExists) {
532
+ for (const f of ["IDENTITY.md", "PROJECT_STATUS.md", "memory/memory-index.md"]) {
533
+ const exists = existsSync(join(aiDir, f));
534
+ checks.push({ name: f, status: exists ? "pass" : "fail", detail: exists ? "Found" : "Missing" });
535
+ }
536
+ }
537
+ // 2. MCP launcher + config
538
+ const launcherExists = existsSync(join(projectRoot, ".ai/mcp-launcher.cjs"));
539
+ checks.push({ name: "MCP launcher", status: launcherExists ? "pass" : "fail", detail: launcherExists ? "Found" : "Missing — run `ai-memory install --to <tool>`" });
540
+ const mcpLocations = [".mcp.json", ".cursor/mcp.json"].filter((p) => existsSync(join(projectRoot, p)));
541
+ checks.push({ name: "MCP config", status: mcpLocations.length > 0 ? "pass" : "warn", detail: mcpLocations.length > 0 ? mcpLocations.join(", ") : "None found — MCP tools won't be available" });
542
+ // 3. Bootstrap installed for at least one tool
543
+ const bootstrapMap = { "claude-code": "CLAUDE.md", cursor: ".cursor/rules/00-load-ai-memory.mdc", windsurf: ".windsurfrules", cline: ".clinerules", copilot: ".github/copilot-instructions.md" };
544
+ const installed = Object.entries(bootstrapMap).filter(([, p]) => existsSync(join(projectRoot, p))).map(([t]) => t);
545
+ checks.push({ name: "Bootstrap", status: installed.length > 0 ? "pass" : "warn", detail: installed.length > 0 ? `Installed for: ${installed.join(", ")}` : "None — run `ai-memory install --to <tool>`" });
546
+ // 4. Bootstrap has canonical redirect
547
+ if (installed.length > 0) {
548
+ const content = readFileSync(join(projectRoot, bootstrapMap[installed[0]]), "utf-8");
549
+ const hasRedirect = content.includes("canonical memory") || content.includes("not in your tool");
550
+ checks.push({ name: "Canonical memory directive", status: hasRedirect ? "pass" : "warn", detail: hasRedirect ? "Agent told to save to .ai/, not tool-native memory" : "Missing — re-run install to update" });
551
+ }
552
+ // 5. Memory index populated
553
+ const idxPath = join(aiDir, "memory/memory-index.md");
554
+ if (existsSync(idxPath)) {
555
+ const idx = readFileSync(idxPath, "utf-8");
556
+ const populated = idx.includes("[P0]") || idx.includes("[P1]");
557
+ checks.push({ name: "Memory index", status: populated ? "pass" : "warn", detail: populated ? "Has entries" : "Empty — run /mem-compound after a session" });
558
+ }
559
+ // 6. Harness validity
560
+ const harnessPath = join(aiDir, "temp/harness.json");
561
+ if (existsSync(harnessPath)) {
562
+ try {
563
+ const rules = JSON.parse(readFileSync(harnessPath, "utf-8"));
564
+ if (!Array.isArray(rules))
565
+ throw new Error("not an array");
566
+ checks.push({ name: "Harness rules", status: "pass", detail: `${rules.length} rules loaded` });
567
+ // Check rules compile
568
+ let broken = 0;
569
+ for (const rule of rules) {
570
+ if (!rule.id || !rule.type || !rule.pattern || !rule.severity) {
571
+ broken++;
572
+ continue;
573
+ }
574
+ if (rule.type === "regex") {
575
+ try {
576
+ new RegExp(rule.pattern);
577
+ }
578
+ catch {
579
+ broken++;
580
+ }
581
+ }
582
+ }
583
+ checks.push({ name: "Rules compile", status: broken === 0 ? "pass" : "fail", detail: broken === 0 ? "All rules valid" : `${broken} rules have errors` });
584
+ // P0 rule coverage
585
+ let p0Count = 0;
586
+ for (const mf of ["memory/decisions.md", "memory/debugging.md"]) {
587
+ const fp = join(aiDir, mf);
588
+ if (existsSync(fp)) {
589
+ p0Count += (readFileSync(fp, "utf-8").match(/### \[P0\]/g) ?? []).length;
590
+ }
591
+ }
592
+ const cov = p0Count > 0 ? Math.round((rules.length / p0Count) * 100) : 0;
593
+ checks.push({ name: "P0 rule coverage", status: cov >= 80 ? "pass" : cov >= 50 ? "warn" : "fail", detail: `${rules.length}/${p0Count} P0 entries have enforceable patterns (${cov}%)` });
594
+ // Rule tests
595
+ const testsPath = join(aiDir, "temp/rule-tests/tests.json");
596
+ if (existsSync(testsPath)) {
597
+ try {
598
+ const tests = JSON.parse(readFileSync(testsPath, "utf-8"));
599
+ const testedIds = new Set(tests.map((t) => t.rule_id));
600
+ const untested = rules.filter((r) => r.id && !testedIds.has(r.id));
601
+ checks.push({ name: "Rule test coverage", status: untested.length === 0 ? "pass" : "warn", detail: untested.length === 0 ? `All ${rules.length} rules have tests` : `${untested.length} rules without tests` });
602
+ }
603
+ catch {
604
+ checks.push({ name: "Rule tests", status: "warn", detail: "tests.json failed to parse" });
605
+ }
606
+ }
607
+ else {
608
+ checks.push({ name: "Rule tests", status: "warn", detail: "No tests.json — run generate-harness to create" });
609
+ }
610
+ }
611
+ catch (e) {
612
+ checks.push({ name: "Harness rules", status: "fail", detail: `harness.json parse error: ${e.message}` });
613
+ }
614
+ }
615
+ else {
616
+ checks.push({ name: "Harness rules", status: "warn", detail: "No harness.json — run `ai-memory generate-harness` or init --full" });
617
+ }
618
+ // Output
619
+ if (opts.json) {
620
+ const p = checks.filter((c) => c.status === "pass").length;
621
+ console.log(JSON.stringify({ checks, summary: { total: checks.length, passed: p, failed: checks.filter((c) => c.status === "fail").length, warnings: checks.filter((c) => c.status === "warn").length } }, null, 2));
622
+ }
623
+ else {
624
+ console.log("=== ai-memory verify ===\n");
625
+ for (const c of checks) {
626
+ const icon = c.status === "pass" ? "✓" : c.status === "fail" ? "✗" : "⚠";
627
+ console.log(` ${icon} ${c.name}: ${c.detail}`);
628
+ }
629
+ const p = checks.filter((c) => c.status === "pass").length;
630
+ const f = checks.filter((c) => c.status === "fail").length;
631
+ const w = checks.filter((c) => c.status === "warn").length;
632
+ console.log(`\n ${p} passed, ${f} failed, ${w} warnings`);
633
+ if (f > 0) {
634
+ console.log("\n Fix failures, then run `ai-memory verify` again.");
635
+ process.exit(1);
636
+ }
637
+ }
638
+ });
639
+ // ─── eval add (subcommand of eval) ──────────────────────────────────────────
640
+ evalCmd.addCommand(new Command("add")
641
+ .argument("<name>", "Eval metric name (kebab-case)")
642
+ .description("Scaffold a custom eval metric")
643
+ .option("--dir <dir>", "Path to .ai/ directory")
644
+ .action(async (name, opts) => {
645
+ validateKebabCase(name, "eval metric");
646
+ const aiDir = resolve(opts.dir ?? join(process.cwd(), ".ai"));
647
+ const evalsDir = join(aiDir, "temp", "custom-evals");
648
+ await mkdir(evalsDir, { recursive: true });
649
+ const destPath = join(evalsDir, `${name}.ts`);
650
+ if (existsSync(destPath)) {
651
+ console.log(`⚠ Already exists: ${destPath}`);
652
+ return;
653
+ }
654
+ await writeFile(destPath, evalTemplate(name));
655
+ console.log(`✓ Created .ai/temp/custom-evals/${name}.ts`);
656
+ }));
657
+ program.parse(process.argv);
658
+ // ─── Scaffold templates ───────────────────────────────────────────────────────
659
+ function agentTemplate(name) {
660
+ return `---
661
+ name: ${name}
662
+ description: Describe what this agent does and when to invoke it.
663
+ type: agent
664
+ status: active
665
+ ---
666
+
667
+ # ${name}
668
+
669
+ ## Role
670
+
671
+ Describe the agent's role here.
672
+
673
+ ## Methodology
674
+
675
+ 1. Step one
676
+ 2. Step two
677
+ 3. Report findings
678
+
679
+ ## Report Format
680
+
681
+ - **Finding**: Description
682
+ - **Recommendation**: What to do about it
683
+ `;
684
+ }
685
+ function skillTemplate(name) {
686
+ return `---
687
+ name: ${name}
688
+ description: Describe what this skill does and when to use it.
689
+ type: skill
690
+ status: active
691
+ requires:
692
+ capabilities: []
693
+ ---
694
+
695
+ # ${name}
696
+
697
+ Declare capabilities in frontmatter \`requires.capabilities\` — do not reference specific tools. Tool-specific config lives in capability-spec.
698
+
699
+ ## When to use
700
+
701
+ Describe the trigger conditions.
702
+
703
+ ## Steps
704
+
705
+ 1. Step one
706
+ 2. Step two
707
+
708
+ ## Output
709
+
710
+ Describe what the skill produces.
711
+ `;
712
+ }
713
+ function ruleTemplate(name) {
714
+ return `---
715
+ name: ${name}
716
+ description: Describe what behavior this rule enforces.
717
+ type: rule
718
+ status: active
719
+ writable: false
720
+ ---
721
+
722
+ # ${name}
723
+
724
+ Describe the rule here. Use imperative language.
725
+
726
+ ## Why
727
+
728
+ Explain why this rule exists.
729
+ `;
730
+ }
731
+ function evalTemplate(name) {
732
+ return `// Custom eval: ${name}
733
+ // Implement the metric function below.
734
+ // Receives the .ai/ directory path, returns { value, status, note? }.
735
+
736
+ import { readFile } from "fs/promises";
737
+ import { join } from "path";
738
+ import { existsSync } from "fs";
739
+
740
+ export async function evaluate(aiDir: string): Promise<{
741
+ name: string;
742
+ value: string | number;
743
+ status: "good" | "warn" | "bad";
744
+ note?: string;
745
+ }> {
746
+ // Example: count open items
747
+ // const openItems = await readFile(join(aiDir, "sessions/open-items.md"), "utf-8");
748
+ // const count = (openItems.match(/^- \\[ \\]/gm) ?? []).length;
749
+ return {
750
+ name: "${name}",
751
+ value: 0,
752
+ status: "good",
753
+ note: "Implement this metric",
754
+ };
755
+ }
756
+ `;
757
+ }
758
+ // ─── Default file contents ────────────────────────────────────────────────────
759
+ const DEFAULT_IDENTITY = `---
760
+ id: identity
761
+ type: identity
762
+ status: active
763
+ writable: false
764
+ last_updated: ${new Date().toISOString().slice(0, 10)}
765
+ ---
766
+
767
+ # Identity
768
+
769
+ You are a senior developer focused on long-term strategy and production readiness.
770
+
771
+ ## What this project is
772
+
773
+ [Describe the project here — one paragraph]
774
+
775
+ ## Constraints (NEVER without explicit approval)
776
+
777
+ - Never commit secrets, API keys, or .env files
778
+ - Never delete user data without explicit request
779
+ - Never deploy to production without explicit request
780
+ - Never write full protocols to tool directories — canonical content goes in \`.ai/\`
781
+
782
+ ## Before starting any task
783
+
784
+ 1. Read \`.ai/memory/memory-index.md\`
785
+ 2. Search \`.ai/memory/\` for relevant bugs, patterns, and decisions
786
+ 3. Search \`.ai/skills/\` for applicable domain patterns
787
+ 4. Fetch \`.ai/reference/PROJECT.md\` only when the task requires architecture or data model context
788
+ `;
789
+ const DEFAULT_PROJECT_STATUS = `---
790
+ id: project-status
791
+ type: project-status
792
+ status: active
793
+ writable: true
794
+ last_updated: ${new Date().toISOString().slice(0, 10)}
795
+ ---
796
+
797
+ # Project Status
798
+
799
+ > This file evolves with the project. Both humans and AI update it — AI writes what it learned, humans steer the focus. This is your RALPH loop plan file.
800
+
801
+ ## Current Focus
802
+
803
+ [What is actively being worked on]
804
+
805
+ ## Open Questions
806
+
807
+ [Things not yet decided]
808
+
809
+ ## What's Working
810
+
811
+ [Patterns and approaches worth repeating]
812
+
813
+ ## What to Try Next
814
+
815
+ [Directions to explore in upcoming sessions]
816
+ `;
817
+ const DEFAULT_DECISIONS = `---
818
+ id: memory-decisions
819
+ type: decision
820
+ layout: multi-entry
821
+ status: active
822
+ last_updated: ${new Date().toISOString().slice(0, 10)}
823
+ ---
824
+
825
+ # Decisions
826
+
827
+ **Format:** Tag each entry \`[P0]\`, \`[P1]\`, or \`[P2]\`. Include context, decision, rationale, tradeoffs.
828
+ Optional: \`**Supersedes:**\`, \`**Superseded by:**\`, \`**Links to:**\` for linked entries.
829
+
830
+ For \`[P0]\` entries, add a \`constraint_pattern\` block if the rule can be expressed as a code check.
831
+
832
+ ---
833
+
834
+ <!-- Add entries below. -->
835
+ `;
836
+ const DEFAULT_PATTERNS = `---
837
+ id: memory-patterns
838
+ type: pattern
839
+ layout: multi-entry
840
+ status: active
841
+ last_updated: ${new Date().toISOString().slice(0, 10)}
842
+ ---
843
+
844
+ # Patterns
845
+
846
+ **Format:** Tag each entry \`[P0]\`, \`[P1]\`, or \`[P2]\`. Include pattern and anti-pattern.
847
+ Optional: \`**Links to:**\` for related entries.
848
+
849
+ ---
850
+
851
+ <!-- Add entries below. -->
852
+ `;
853
+ const DEFAULT_DEBUGGING = `---
854
+ id: memory-debugging
855
+ type: debugging
856
+ layout: multi-entry
857
+ status: active
858
+ last_updated: ${new Date().toISOString().slice(0, 10)}
859
+ ---
860
+
861
+ # Debugging
862
+
863
+ **Format:** Tag each entry \`[P0]\`, \`[P1]\`, or \`[P2]\`. Include symptom, root cause, fix, and how to prevent recurrence.
864
+
865
+ ---
866
+
867
+ <!-- Add entries below. -->
868
+ `;
869
+ const DEFAULT_IMPROVEMENTS = `---
870
+ id: memory-improvements
871
+ type: decision
872
+ layout: multi-entry
873
+ status: active
874
+ last_updated: ${new Date().toISOString().slice(0, 10)}
875
+ ---
876
+
877
+ # Improvements
878
+
879
+ Incremental improvements discovered over time.
880
+
881
+ ---
882
+
883
+ <!-- Add entries below. -->
884
+ `;
885
+ const DEFAULT_MEMORY_INDEX = `---
886
+ id: memory-index
887
+ type: index
888
+ status: active
889
+ last_updated: ${new Date().toISOString().slice(0, 10)}
890
+ ---
891
+
892
+ # Memory Index
893
+
894
+ **Auto-generated.** Run \`ai-memory index\` or \`/mem-compound\` to regenerate.
895
+
896
+ ---
897
+
898
+ <!-- Index will be generated here. -->
899
+ `;
900
+ const DEFAULT_BASE_AUDITOR = `---
901
+ name: _base-auditor
902
+ description: Shared audit methodology. All auditor agents inherit these principles.
903
+ type: agent
904
+ status: active
905
+ writable: false
906
+ ---
907
+
908
+ # Base Auditor Protocol
909
+
910
+ ## Core Principles
911
+
912
+ - Verify before asserting
913
+ - Cite evidence (file path, line number, log excerpt)
914
+ - Prioritize by impact: CRITICAL > HIGH > MEDIUM > LOW
915
+
916
+ ## Initial Steps
917
+
918
+ 1. Read scope and methodology from the specific agent file
919
+ 2. Gather context (relevant files, configs, rules)
920
+ 3. Execute checks per methodology
921
+
922
+ ## Report Format
923
+
924
+ - **CRITICAL:** Issues that break the build or violate [P0] constraints
925
+ - **HIGH:** Issues likely to cause bugs or inconsistencies
926
+ - **MEDIUM:** Improvements worth making
927
+ - **LOW:** Minor suggestions
928
+
929
+ ## Closing Steps
930
+
931
+ - Summarize findings
932
+ - Recommend remediation order
933
+ - Flag any blockers
934
+ `;
935
+ const DEFAULT_AGENT_TEMPLATE = `---
936
+ name: _template
937
+ description: Template for creating new agents. Copy and rename.
938
+ type: agent
939
+ status: experimental
940
+ ---
941
+
942
+ # Agent Name
943
+
944
+ ## Role
945
+
946
+ Describe what this agent does.
947
+
948
+ ## When to invoke
949
+
950
+ Describe the trigger.
951
+
952
+ ## Methodology
953
+
954
+ 1. Step one
955
+ 2. Step two
956
+
957
+ ## Report Format
958
+
959
+ Describe what the agent produces.
960
+ `;
961
+ const DEFAULT_OPEN_ITEMS = `---
962
+ id: open-items
963
+ type: decision
964
+ status: active
965
+ last_updated: ${new Date().toISOString().slice(0, 10)}
966
+ ---
967
+
968
+ # Open Items
969
+
970
+ **Task format:** Items may be broad or categorical. Work done must be broken down into atomic tasks that fit RALPH loops and avoid conflicts when agents work in parallel.
971
+
972
+ ## Open
973
+
974
+ <!-- Format: \`- [ ] Brief description (source: doc path or BACKLOG)\` -->
975
+
976
+ ## Closed
977
+
978
+ <!-- Format: \`- [x] Brief description (resolved: how)\` -->
979
+ `;
980
+ const DEFAULT_THREAD_ARCHIVE = `---
981
+ id: thread-archive
982
+ type: decision
983
+ status: active
984
+ last_updated: ${new Date().toISOString().slice(0, 10)}
985
+ ---
986
+
987
+ # Thread Archive
988
+
989
+ Curated history of past session decisions. One line per session.
990
+
991
+ ---
992
+
993
+ <!-- Format: [YYYY-MM-DD] Brief description of what was done and decided. -->
994
+ `;
995
+ const DEFAULT_PROJECT = `---
996
+ id: project
997
+ type: toolbox
998
+ status: active
999
+ last_updated: ${new Date().toISOString().slice(0, 10)}
1000
+ ---
1001
+
1002
+ # Project
1003
+
1004
+ **Loaded on demand** — fetched only when a task requires architecture, data models, or integration context.
1005
+
1006
+ ## Architecture
1007
+
1008
+ [Describe the system architecture]
1009
+
1010
+ ## Tech Stack
1011
+
1012
+ [List the key technologies]
1013
+
1014
+ ## Data Models
1015
+
1016
+ [Describe key data structures]
1017
+
1018
+ ## Integrations
1019
+
1020
+ [List external services and APIs]
1021
+ `;
1022
+ const DEFAULT_ACP_MANIFEST = JSON.stringify({
1023
+ name: "ai-memory-agent",
1024
+ description: "Persistent project memory with governance enforcement",
1025
+ version: "0.1.0",
1026
+ capabilities: [
1027
+ "memory.read",
1028
+ "memory.write",
1029
+ "memory.search",
1030
+ "memory.validate",
1031
+ "compound.run",
1032
+ ],
1033
+ transport: {
1034
+ type: "mcp",
1035
+ mode: "stdio",
1036
+ command: "npx @radix-ai/ai-memory mcp",
1037
+ },
1038
+ }, null, 2);
1039
+ const DEFAULT_ACP_CAPABILITIES = `---
1040
+ id: acp-capabilities
1041
+ type: toolbox
1042
+ status: active
1043
+ writable: false
1044
+ ---
1045
+
1046
+ # ACP Capabilities
1047
+
1048
+ Human-readable description of this agent's capabilities for ACP orchestrators.
1049
+
1050
+ ## memory.read
1051
+ Read memory entries, identity, project status, and session archive.
1052
+
1053
+ ## memory.write
1054
+ Write new memory entries via \`commit_memory\`. Immutable paths are enforced.
1055
+
1056
+ ## memory.search
1057
+ Keyword and semantic search across all \`.ai/\` memory files.
1058
+
1059
+ ## memory.validate
1060
+ Validate proposed code changes against [P0] constraint rules (\`validate_context\`).
1061
+ Validate memory entries against canonical schema (\`validate_schema\`).
1062
+
1063
+ ## compound.run
1064
+ Execute the full compound loop: capture → conflict check → governance gate → index.
1065
+ `;
1066
+ const DEFAULT_DOC_PLACEMENT_RULE = `---
1067
+ id: doc-placement
1068
+ type: rule
1069
+ status: active
1070
+ ---
1071
+
1072
+ # Doc placement
1073
+
1074
+ When creating or moving documentation files under \`docs/\` or \`.ai/\` (excluding \`.ai/memory/\`):
1075
+
1076
+ 1. **Before writing:** Call \`get_doc_path\` with the doc type and optional slug. Do not infer paths.
1077
+ 2. **After writing:** Call \`validate_doc_placement\` for the path(s). Fix any errors before committing.
1078
+ 3. **Doc types:** Use \`list_doc_types\` to see available types and patterns. Filenames use SCREAMING_SNAKE_CASE by default.
1079
+ `;
1080
+ const DEFAULT_DOCS_MANAGER_AGENT = `---
1081
+ id: docs-manager
1082
+ type: agent
1083
+ status: active
1084
+ ---
1085
+
1086
+ # Docs Manager
1087
+
1088
+ You manage project documentation structure and schema. Use when migrating docs, creating schemas, or auditing doc placement.
1089
+
1090
+ ## When to run
1091
+
1092
+ - Migrating existing docs to schema-driven paths
1093
+ - Creating or updating \`.ai/docs-schema.json\`
1094
+ - Auditing doc placement across the repo
1095
+
1096
+ ## Tools
1097
+
1098
+ - \`list_doc_types\` — see current schema
1099
+ - \`get_doc_path\` — resolve canonical path for a doc type
1100
+ - \`validate_doc_placement\` — check paths against schema
1101
+
1102
+ ## Methodology
1103
+
1104
+ 1. Load schema via \`list_doc_types\`
1105
+ 2. For each doc to migrate: \`get_doc_path\` → move/update → \`validate_doc_placement\`
1106
+ 3. Update \`.ai/docs-schema.json\` if new doc types are needed
1107
+ `;
1108
+ //# sourceMappingURL=index.js.map