compound-agent 1.3.3 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +71 -1
- package/dist/cli.js +1373 -364
- package/dist/cli.js.map +1 -1
- package/dist/index.d.ts +264 -30
- package/dist/index.js +781 -66
- package/dist/index.js.map +1 -1
- package/docs/research/AgenticAiCodebaseGuide.md +1206 -0
- package/docs/research/BuildingACCompilerAnthropic.md +116 -0
- package/docs/research/HarnessEngineeringOpenAi.md +220 -0
- package/docs/research/code-review/systematic-review-methodology.md +409 -0
- package/docs/research/index.md +64 -0
- package/docs/research/learning-systems/knowledge-compounding-for-agents.md +695 -0
- package/docs/research/property-testing/property-based-testing-and-invariants.md +742 -0
- package/docs/research/tdd/test-driven-development-methodology.md +547 -0
- package/docs/research/test-optimization-strategies.md +401 -0
- package/package.json +9 -5
package/dist/cli.js
CHANGED
|
@@ -1,17 +1,521 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
+
import { createRequire } from 'module';
|
|
3
|
+
import { mkdirSync, writeFileSync, statSync, unlinkSync, existsSync, readFileSync, copyFileSync, chmodSync, lstatSync, readdirSync } from 'fs';
|
|
4
|
+
import path, { join, dirname, resolve, extname, relative } from 'path';
|
|
5
|
+
import { createHash } from 'crypto';
|
|
6
|
+
import * as fs from 'fs/promises';
|
|
7
|
+
import { readFile, mkdir, appendFile, writeFile, chmod, rm, rename, readdir } from 'fs/promises';
|
|
2
8
|
import { Command } from 'commander';
|
|
3
9
|
import { getLlama, resolveModelFile } from 'node-llama-cpp';
|
|
4
|
-
import { mkdirSync, writeFileSync, statSync, unlinkSync, existsSync, readFileSync, copyFileSync, chmodSync, readdirSync } from 'fs';
|
|
5
10
|
import { homedir, tmpdir } from 'os';
|
|
6
|
-
import path, { join, dirname, resolve, relative } from 'path';
|
|
7
|
-
import * as fs from 'fs/promises';
|
|
8
|
-
import { readFile, mkdir, appendFile, writeFile, chmod, rm, rename, readdir } from 'fs/promises';
|
|
9
|
-
import { createHash } from 'crypto';
|
|
10
11
|
import { z } from 'zod';
|
|
11
|
-
import { createRequire } from 'module';
|
|
12
12
|
import { execSync, execFileSync, spawn } from 'child_process';
|
|
13
|
+
import { fileURLToPath } from 'url';
|
|
13
14
|
import chalk from 'chalk';
|
|
14
15
|
|
|
16
|
+
var __defProp = Object.defineProperty;
|
|
17
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
18
|
+
var __esm = (fn, res) => function __init() {
|
|
19
|
+
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
|
|
20
|
+
};
|
|
21
|
+
var __export = (target, all) => {
|
|
22
|
+
for (var name in all)
|
|
23
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
24
|
+
};
|
|
25
|
+
function ensureSqliteAvailable() {
|
|
26
|
+
if (checked) return;
|
|
27
|
+
try {
|
|
28
|
+
const module = require2("better-sqlite3");
|
|
29
|
+
const Constructor = module.default || module;
|
|
30
|
+
const testDb = new Constructor(":memory:");
|
|
31
|
+
testDb.close();
|
|
32
|
+
DatabaseConstructor = Constructor;
|
|
33
|
+
checked = true;
|
|
34
|
+
} catch (cause) {
|
|
35
|
+
throw new Error(
|
|
36
|
+
'better-sqlite3 failed to load.\nRun: npx ca setup (auto-configures pnpm native builds)\nOr manually add to your package.json:\n "pnpm": { "onlyBuiltDependencies": ["better-sqlite3", "node-llama-cpp"] }\nThen run: pnpm install && pnpm rebuild better-sqlite3\nFor npm/yarn, run: npm rebuild better-sqlite3',
|
|
37
|
+
{ cause }
|
|
38
|
+
);
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
function getDatabaseConstructor() {
|
|
42
|
+
ensureSqliteAvailable();
|
|
43
|
+
return DatabaseConstructor;
|
|
44
|
+
}
|
|
45
|
+
var require2, checked, DatabaseConstructor;
|
|
46
|
+
var init_availability = __esm({
|
|
47
|
+
"src/memory/storage/sqlite/availability.ts"() {
|
|
48
|
+
require2 = createRequire(import.meta.url);
|
|
49
|
+
checked = false;
|
|
50
|
+
DatabaseConstructor = null;
|
|
51
|
+
}
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
// src/memory/storage/sqlite-knowledge/schema.ts
|
|
55
|
+
function createKnowledgeSchema(database) {
|
|
56
|
+
database.exec(SCHEMA_SQL2);
|
|
57
|
+
database.pragma(`user_version = ${KNOWLEDGE_SCHEMA_VERSION}`);
|
|
58
|
+
}
|
|
59
|
+
var KNOWLEDGE_SCHEMA_VERSION, SCHEMA_SQL2;
|
|
60
|
+
var init_schema = __esm({
|
|
61
|
+
"src/memory/storage/sqlite-knowledge/schema.ts"() {
|
|
62
|
+
KNOWLEDGE_SCHEMA_VERSION = 2;
|
|
63
|
+
SCHEMA_SQL2 = `
|
|
64
|
+
CREATE TABLE IF NOT EXISTS chunks (
|
|
65
|
+
id TEXT PRIMARY KEY,
|
|
66
|
+
file_path TEXT NOT NULL,
|
|
67
|
+
start_line INTEGER NOT NULL,
|
|
68
|
+
end_line INTEGER NOT NULL,
|
|
69
|
+
content_hash TEXT NOT NULL,
|
|
70
|
+
text TEXT NOT NULL,
|
|
71
|
+
embedding BLOB,
|
|
72
|
+
model TEXT,
|
|
73
|
+
updated_at TEXT NOT NULL
|
|
74
|
+
);
|
|
75
|
+
|
|
76
|
+
CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
|
|
77
|
+
text,
|
|
78
|
+
content='chunks', content_rowid='rowid'
|
|
79
|
+
);
|
|
80
|
+
|
|
81
|
+
CREATE TRIGGER IF NOT EXISTS chunks_ai AFTER INSERT ON chunks BEGIN
|
|
82
|
+
INSERT INTO chunks_fts(rowid, text)
|
|
83
|
+
VALUES (new.rowid, new.text);
|
|
84
|
+
END;
|
|
85
|
+
|
|
86
|
+
CREATE TRIGGER IF NOT EXISTS chunks_ad AFTER DELETE ON chunks BEGIN
|
|
87
|
+
INSERT INTO chunks_fts(chunks_fts, rowid, text)
|
|
88
|
+
VALUES ('delete', old.rowid, old.text);
|
|
89
|
+
END;
|
|
90
|
+
|
|
91
|
+
CREATE TRIGGER IF NOT EXISTS chunks_au AFTER UPDATE ON chunks BEGIN
|
|
92
|
+
INSERT INTO chunks_fts(chunks_fts, rowid, text)
|
|
93
|
+
VALUES ('delete', old.rowid, old.text);
|
|
94
|
+
INSERT INTO chunks_fts(rowid, text)
|
|
95
|
+
VALUES (new.rowid, new.text);
|
|
96
|
+
END;
|
|
97
|
+
|
|
98
|
+
CREATE INDEX IF NOT EXISTS idx_chunks_file_path ON chunks(file_path);
|
|
99
|
+
|
|
100
|
+
CREATE TABLE IF NOT EXISTS metadata (
|
|
101
|
+
key TEXT PRIMARY KEY,
|
|
102
|
+
value TEXT NOT NULL
|
|
103
|
+
);
|
|
104
|
+
`;
|
|
105
|
+
}
|
|
106
|
+
});
|
|
107
|
+
function openKnowledgeDb(repoRoot, options = {}) {
|
|
108
|
+
const { inMemory = false } = options;
|
|
109
|
+
const key = inMemory ? `:memory:${repoRoot}` : join(repoRoot, KNOWLEDGE_DB_PATH);
|
|
110
|
+
const cached = knowledgeDbMap.get(key);
|
|
111
|
+
if (cached) {
|
|
112
|
+
return cached;
|
|
113
|
+
}
|
|
114
|
+
const Database = getDatabaseConstructor();
|
|
115
|
+
let database;
|
|
116
|
+
if (inMemory) {
|
|
117
|
+
database = new Database(":memory:");
|
|
118
|
+
} else {
|
|
119
|
+
const dir = dirname(key);
|
|
120
|
+
mkdirSync(dir, { recursive: true });
|
|
121
|
+
database = new Database(key);
|
|
122
|
+
const version = database.pragma("user_version", { simple: true });
|
|
123
|
+
if (version !== 0 && version !== KNOWLEDGE_SCHEMA_VERSION) {
|
|
124
|
+
database.close();
|
|
125
|
+
try {
|
|
126
|
+
unlinkSync(key);
|
|
127
|
+
} catch {
|
|
128
|
+
}
|
|
129
|
+
database = new Database(key);
|
|
130
|
+
}
|
|
131
|
+
database.pragma("journal_mode = WAL");
|
|
132
|
+
}
|
|
133
|
+
createKnowledgeSchema(database);
|
|
134
|
+
knowledgeDbMap.set(key, database);
|
|
135
|
+
return database;
|
|
136
|
+
}
|
|
137
|
+
function closeKnowledgeDb() {
|
|
138
|
+
for (const database of knowledgeDbMap.values()) {
|
|
139
|
+
database.close();
|
|
140
|
+
}
|
|
141
|
+
knowledgeDbMap.clear();
|
|
142
|
+
}
|
|
143
|
+
var KNOWLEDGE_DB_PATH, knowledgeDbMap;
|
|
144
|
+
var init_connection = __esm({
|
|
145
|
+
"src/memory/storage/sqlite-knowledge/connection.ts"() {
|
|
146
|
+
init_availability();
|
|
147
|
+
init_schema();
|
|
148
|
+
KNOWLEDGE_DB_PATH = ".claude/.cache/knowledge.sqlite";
|
|
149
|
+
knowledgeDbMap = /* @__PURE__ */ new Map();
|
|
150
|
+
}
|
|
151
|
+
});
|
|
152
|
+
function generateChunkId(filePath, startLine, endLine) {
|
|
153
|
+
return createHash("sha256").update(`${filePath}:${startLine}:${endLine}`).digest("hex").slice(0, 16);
|
|
154
|
+
}
|
|
155
|
+
function chunkContentHash(text) {
|
|
156
|
+
return createHash("sha256").update(text).digest("hex");
|
|
157
|
+
}
|
|
158
|
+
var SUPPORTED_EXTENSIONS, CODE_EXTENSIONS;
|
|
159
|
+
var init_types = __esm({
|
|
160
|
+
"src/memory/knowledge/types.ts"() {
|
|
161
|
+
SUPPORTED_EXTENSIONS = /* @__PURE__ */ new Set([
|
|
162
|
+
".md",
|
|
163
|
+
".txt",
|
|
164
|
+
".rst",
|
|
165
|
+
".ts",
|
|
166
|
+
".py",
|
|
167
|
+
".js",
|
|
168
|
+
".tsx",
|
|
169
|
+
".jsx"
|
|
170
|
+
]);
|
|
171
|
+
CODE_EXTENSIONS = /* @__PURE__ */ new Set([
|
|
172
|
+
".ts",
|
|
173
|
+
".tsx",
|
|
174
|
+
".js",
|
|
175
|
+
".jsx",
|
|
176
|
+
".py"
|
|
177
|
+
]);
|
|
178
|
+
}
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
// src/memory/storage/sqlite-knowledge/sync.ts
|
|
182
|
+
function upsertChunks(repoRoot, chunks, embeddings) {
|
|
183
|
+
if (chunks.length === 0) return;
|
|
184
|
+
const database = openKnowledgeDb(repoRoot);
|
|
185
|
+
const insert = database.prepare(`
|
|
186
|
+
INSERT OR REPLACE INTO chunks (id, file_path, start_line, end_line, content_hash, text, embedding, model, updated_at)
|
|
187
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
188
|
+
`);
|
|
189
|
+
const upsertMany = database.transaction((items) => {
|
|
190
|
+
for (const chunk of items) {
|
|
191
|
+
const embBuffer = null;
|
|
192
|
+
insert.run(
|
|
193
|
+
chunk.id,
|
|
194
|
+
chunk.filePath,
|
|
195
|
+
chunk.startLine,
|
|
196
|
+
chunk.endLine,
|
|
197
|
+
chunk.contentHash,
|
|
198
|
+
chunk.text,
|
|
199
|
+
embBuffer,
|
|
200
|
+
chunk.model ?? null,
|
|
201
|
+
chunk.updatedAt
|
|
202
|
+
);
|
|
203
|
+
}
|
|
204
|
+
});
|
|
205
|
+
upsertMany(chunks);
|
|
206
|
+
}
|
|
207
|
+
function deleteChunksByFilePath(repoRoot, filePaths) {
|
|
208
|
+
if (filePaths.length === 0) return;
|
|
209
|
+
const database = openKnowledgeDb(repoRoot);
|
|
210
|
+
const del = database.prepare("DELETE FROM chunks WHERE file_path = ?");
|
|
211
|
+
const deleteMany = database.transaction((paths) => {
|
|
212
|
+
for (const path2 of paths) {
|
|
213
|
+
del.run(path2);
|
|
214
|
+
}
|
|
215
|
+
});
|
|
216
|
+
deleteMany(filePaths);
|
|
217
|
+
}
|
|
218
|
+
function getIndexedFilePaths(repoRoot) {
|
|
219
|
+
const database = openKnowledgeDb(repoRoot);
|
|
220
|
+
const rows = database.prepare("SELECT DISTINCT file_path FROM chunks").all();
|
|
221
|
+
return rows.map((r) => r.file_path);
|
|
222
|
+
}
|
|
223
|
+
function getChunkCount(repoRoot) {
|
|
224
|
+
const database = openKnowledgeDb(repoRoot);
|
|
225
|
+
const row = database.prepare("SELECT COUNT(*) as cnt FROM chunks").get();
|
|
226
|
+
return row.cnt;
|
|
227
|
+
}
|
|
228
|
+
function getChunkCountByFilePath(repoRoot, filePath) {
|
|
229
|
+
const database = openKnowledgeDb(repoRoot);
|
|
230
|
+
const row = database.prepare("SELECT COUNT(*) as cnt FROM chunks WHERE file_path = ?").get(filePath);
|
|
231
|
+
return row.cnt;
|
|
232
|
+
}
|
|
233
|
+
function setLastIndexTime(repoRoot, time) {
|
|
234
|
+
const database = openKnowledgeDb(repoRoot);
|
|
235
|
+
database.prepare("INSERT OR REPLACE INTO metadata (key, value) VALUES ('last_index_time', ?)").run(time);
|
|
236
|
+
}
|
|
237
|
+
var init_sync = __esm({
|
|
238
|
+
"src/memory/storage/sqlite-knowledge/sync.ts"() {
|
|
239
|
+
init_connection();
|
|
240
|
+
}
|
|
241
|
+
});
|
|
242
|
+
function isBinary(content) {
|
|
243
|
+
return content.includes("\0");
|
|
244
|
+
}
|
|
245
|
+
function splitIntoSections(fileLines, ext) {
|
|
246
|
+
if (ext === ".md") {
|
|
247
|
+
return splitMarkdown(fileLines);
|
|
248
|
+
}
|
|
249
|
+
if (ext === ".rst") {
|
|
250
|
+
return splitParagraphs(fileLines);
|
|
251
|
+
}
|
|
252
|
+
if (CODE_EXTENSIONS.has(ext)) {
|
|
253
|
+
return splitCode(fileLines);
|
|
254
|
+
}
|
|
255
|
+
return splitParagraphs(fileLines);
|
|
256
|
+
}
|
|
257
|
+
function splitMarkdown(fileLines) {
|
|
258
|
+
const sections = [];
|
|
259
|
+
let current = [];
|
|
260
|
+
let inCodeBlock = false;
|
|
261
|
+
for (let i = 0; i < fileLines.length; i++) {
|
|
262
|
+
const line = fileLines[i];
|
|
263
|
+
const lineObj = { lineNumber: i + 1, text: line };
|
|
264
|
+
if (line.trimStart().startsWith("```")) {
|
|
265
|
+
inCodeBlock = !inCodeBlock;
|
|
266
|
+
current.push(lineObj);
|
|
267
|
+
continue;
|
|
268
|
+
}
|
|
269
|
+
if (!inCodeBlock && /^#{2,}\s/.test(line) && current.length > 0) {
|
|
270
|
+
sections.push(current);
|
|
271
|
+
current = [lineObj];
|
|
272
|
+
continue;
|
|
273
|
+
}
|
|
274
|
+
if (!inCodeBlock && line.trim() === "" && current.length > 0 && current.some((l) => l.text.trim() !== "")) {
|
|
275
|
+
current.push(lineObj);
|
|
276
|
+
sections.push(current);
|
|
277
|
+
current = [];
|
|
278
|
+
continue;
|
|
279
|
+
}
|
|
280
|
+
current.push(lineObj);
|
|
281
|
+
}
|
|
282
|
+
if (current.length > 0) {
|
|
283
|
+
sections.push(current);
|
|
284
|
+
}
|
|
285
|
+
return sections;
|
|
286
|
+
}
|
|
287
|
+
function splitCode(fileLines) {
|
|
288
|
+
const sections = [];
|
|
289
|
+
let current = [];
|
|
290
|
+
for (let i = 0; i < fileLines.length; i++) {
|
|
291
|
+
const line = fileLines[i];
|
|
292
|
+
const lineObj = { lineNumber: i + 1, text: line };
|
|
293
|
+
if (line.trim() === "" && current.length > 0) {
|
|
294
|
+
let hasNextNonBlank = false;
|
|
295
|
+
for (let j = i + 1; j < fileLines.length; j++) {
|
|
296
|
+
if (fileLines[j].trim() !== "") {
|
|
297
|
+
hasNextNonBlank = true;
|
|
298
|
+
break;
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
if (hasNextNonBlank) {
|
|
302
|
+
sections.push(current);
|
|
303
|
+
current = [lineObj];
|
|
304
|
+
continue;
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
current.push(lineObj);
|
|
308
|
+
}
|
|
309
|
+
if (current.length > 0) {
|
|
310
|
+
sections.push(current);
|
|
311
|
+
}
|
|
312
|
+
return sections;
|
|
313
|
+
}
|
|
314
|
+
function splitParagraphs(fileLines) {
|
|
315
|
+
const sections = [];
|
|
316
|
+
let current = [];
|
|
317
|
+
for (let i = 0; i < fileLines.length; i++) {
|
|
318
|
+
const line = fileLines[i];
|
|
319
|
+
const lineObj = { lineNumber: i + 1, text: line };
|
|
320
|
+
if (line.trim() === "" && current.length > 0) {
|
|
321
|
+
sections.push(current);
|
|
322
|
+
current = [lineObj];
|
|
323
|
+
continue;
|
|
324
|
+
}
|
|
325
|
+
current.push(lineObj);
|
|
326
|
+
}
|
|
327
|
+
if (current.length > 0) {
|
|
328
|
+
sections.push(current);
|
|
329
|
+
}
|
|
330
|
+
return sections;
|
|
331
|
+
}
|
|
332
|
+
function sectionText(section) {
|
|
333
|
+
return section.map((l) => l.text).join("\n");
|
|
334
|
+
}
|
|
335
|
+
function chunkFile(filePath, content, options) {
|
|
336
|
+
if (content.trim() === "") return [];
|
|
337
|
+
if (isBinary(content)) return [];
|
|
338
|
+
const targetSize = DEFAULT_TARGET_SIZE;
|
|
339
|
+
const overlapSize = DEFAULT_OVERLAP_SIZE;
|
|
340
|
+
const fileLines = content.split("\n");
|
|
341
|
+
const ext = extname(filePath).toLowerCase();
|
|
342
|
+
const sections = splitIntoSections(fileLines, ext);
|
|
343
|
+
const chunks = [];
|
|
344
|
+
let accumulated = [];
|
|
345
|
+
let accumulatedLength = 0;
|
|
346
|
+
function emitChunk(lines, overlapLines2) {
|
|
347
|
+
if (lines.length === 0) return [];
|
|
348
|
+
const allLines = [...overlapLines2, ...lines];
|
|
349
|
+
const text = allLines.map((l) => l.text).join("\n");
|
|
350
|
+
const startLine = allLines[0].lineNumber;
|
|
351
|
+
const endLine = allLines[allLines.length - 1].lineNumber;
|
|
352
|
+
chunks.push({
|
|
353
|
+
id: generateChunkId(filePath, startLine, endLine),
|
|
354
|
+
filePath,
|
|
355
|
+
startLine,
|
|
356
|
+
endLine,
|
|
357
|
+
text,
|
|
358
|
+
contentHash: chunkContentHash(text)
|
|
359
|
+
});
|
|
360
|
+
if (overlapSize <= 0) return [];
|
|
361
|
+
const overlapResult = [];
|
|
362
|
+
let overlapLen = 0;
|
|
363
|
+
for (let i = lines.length - 1; i >= 0; i--) {
|
|
364
|
+
const lineLen = lines[i].text.length + 1;
|
|
365
|
+
if (overlapLen + lineLen > overlapSize && overlapResult.length > 0) break;
|
|
366
|
+
overlapResult.unshift(lines[i]);
|
|
367
|
+
overlapLen += lineLen;
|
|
368
|
+
}
|
|
369
|
+
return overlapResult;
|
|
370
|
+
}
|
|
371
|
+
let overlapLines = [];
|
|
372
|
+
for (const section of sections) {
|
|
373
|
+
const sectionLen = sectionText(section).length;
|
|
374
|
+
if (accumulatedLength > 0 && accumulatedLength + sectionLen > targetSize) {
|
|
375
|
+
overlapLines = emitChunk(accumulated, overlapLines);
|
|
376
|
+
accumulated = [];
|
|
377
|
+
accumulatedLength = 0;
|
|
378
|
+
}
|
|
379
|
+
accumulated.push(...section);
|
|
380
|
+
accumulatedLength += sectionLen;
|
|
381
|
+
if (accumulatedLength > targetSize) {
|
|
382
|
+
overlapLines = emitChunk(accumulated, overlapLines);
|
|
383
|
+
accumulated = [];
|
|
384
|
+
accumulatedLength = 0;
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
if (accumulated.length > 0) {
|
|
388
|
+
emitChunk(accumulated, overlapLines);
|
|
389
|
+
}
|
|
390
|
+
return chunks;
|
|
391
|
+
}
|
|
392
|
+
var DEFAULT_TARGET_SIZE, DEFAULT_OVERLAP_SIZE;
|
|
393
|
+
var init_chunking = __esm({
|
|
394
|
+
"src/memory/knowledge/chunking.ts"() {
|
|
395
|
+
init_types();
|
|
396
|
+
DEFAULT_TARGET_SIZE = 1600;
|
|
397
|
+
DEFAULT_OVERLAP_SIZE = 320;
|
|
398
|
+
}
|
|
399
|
+
});
|
|
400
|
+
|
|
401
|
+
// src/memory/knowledge/indexing.ts
|
|
402
|
+
var indexing_exports = {};
|
|
403
|
+
__export(indexing_exports, {
|
|
404
|
+
indexDocs: () => indexDocs
|
|
405
|
+
});
|
|
406
|
+
function fileHash(content) {
|
|
407
|
+
return createHash("sha256").update(content).digest("hex");
|
|
408
|
+
}
|
|
409
|
+
function fileHashKey(relativePath) {
|
|
410
|
+
return "file_hash:" + relativePath;
|
|
411
|
+
}
|
|
412
|
+
function getStoredFileHash(repoRoot, relativePath) {
|
|
413
|
+
const db = openKnowledgeDb(repoRoot);
|
|
414
|
+
const row = db.prepare("SELECT value FROM metadata WHERE key = ?").get(fileHashKey(relativePath));
|
|
415
|
+
return row?.value ?? null;
|
|
416
|
+
}
|
|
417
|
+
function setFileHash(repoRoot, relativePath, hash) {
|
|
418
|
+
const db = openKnowledgeDb(repoRoot);
|
|
419
|
+
db.prepare("INSERT OR REPLACE INTO metadata (key, value) VALUES (?, ?)").run(fileHashKey(relativePath), hash);
|
|
420
|
+
}
|
|
421
|
+
function removeFileHash(repoRoot, relativePath) {
|
|
422
|
+
const db = openKnowledgeDb(repoRoot);
|
|
423
|
+
db.prepare("DELETE FROM metadata WHERE key = ?").run(fileHashKey(relativePath));
|
|
424
|
+
}
|
|
425
|
+
async function walkSupportedFiles(baseDir, repoRoot) {
|
|
426
|
+
const results = [];
|
|
427
|
+
let entries;
|
|
428
|
+
try {
|
|
429
|
+
entries = await readdir(baseDir, { recursive: true, withFileTypes: true });
|
|
430
|
+
} catch {
|
|
431
|
+
return results;
|
|
432
|
+
}
|
|
433
|
+
for (const entry of entries) {
|
|
434
|
+
if (!entry.isFile()) continue;
|
|
435
|
+
const ext = extname(entry.name).toLowerCase();
|
|
436
|
+
if (!SUPPORTED_EXTENSIONS.has(ext)) continue;
|
|
437
|
+
const fullPath = join(entry.parentPath ?? entry.path, entry.name);
|
|
438
|
+
const relPath = relative(repoRoot, fullPath);
|
|
439
|
+
results.push(relPath);
|
|
440
|
+
}
|
|
441
|
+
return results;
|
|
442
|
+
}
|
|
443
|
+
async function indexDocs(repoRoot, options = {}) {
|
|
444
|
+
const start = Date.now();
|
|
445
|
+
const docsDir = options.docsDir ?? "docs";
|
|
446
|
+
const force = options.force ?? false;
|
|
447
|
+
const stats = {
|
|
448
|
+
filesIndexed: 0,
|
|
449
|
+
filesSkipped: 0,
|
|
450
|
+
filesErrored: 0,
|
|
451
|
+
chunksCreated: 0,
|
|
452
|
+
chunksDeleted: 0,
|
|
453
|
+
durationMs: 0
|
|
454
|
+
};
|
|
455
|
+
const docsPath = join(repoRoot, docsDir);
|
|
456
|
+
const filePaths = await walkSupportedFiles(docsPath, repoRoot);
|
|
457
|
+
for (const relPath of filePaths) {
|
|
458
|
+
const fullPath = join(repoRoot, relPath);
|
|
459
|
+
let content;
|
|
460
|
+
try {
|
|
461
|
+
content = await readFile(fullPath, "utf-8");
|
|
462
|
+
} catch {
|
|
463
|
+
stats.filesErrored++;
|
|
464
|
+
continue;
|
|
465
|
+
}
|
|
466
|
+
const hash = fileHash(content);
|
|
467
|
+
const storedHash = getStoredFileHash(repoRoot, relPath);
|
|
468
|
+
if (!force && storedHash === hash) {
|
|
469
|
+
stats.filesSkipped++;
|
|
470
|
+
continue;
|
|
471
|
+
}
|
|
472
|
+
const chunks = chunkFile(relPath, content);
|
|
473
|
+
const now = (/* @__PURE__ */ new Date()).toISOString();
|
|
474
|
+
const knowledgeChunks = chunks.map((chunk) => ({
|
|
475
|
+
id: chunk.id,
|
|
476
|
+
filePath: chunk.filePath,
|
|
477
|
+
startLine: chunk.startLine,
|
|
478
|
+
endLine: chunk.endLine,
|
|
479
|
+
contentHash: chunk.contentHash,
|
|
480
|
+
text: chunk.text,
|
|
481
|
+
updatedAt: now
|
|
482
|
+
}));
|
|
483
|
+
const db = openKnowledgeDb(repoRoot);
|
|
484
|
+
db.transaction(() => {
|
|
485
|
+
deleteChunksByFilePath(repoRoot, [relPath]);
|
|
486
|
+
if (knowledgeChunks.length > 0) {
|
|
487
|
+
upsertChunks(repoRoot, knowledgeChunks);
|
|
488
|
+
}
|
|
489
|
+
setFileHash(repoRoot, relPath, hash);
|
|
490
|
+
})();
|
|
491
|
+
stats.filesIndexed++;
|
|
492
|
+
stats.chunksCreated += knowledgeChunks.length;
|
|
493
|
+
}
|
|
494
|
+
const indexedPaths = getIndexedFilePaths(repoRoot);
|
|
495
|
+
const currentPathSet = new Set(filePaths);
|
|
496
|
+
const stalePaths = indexedPaths.filter((p) => !currentPathSet.has(p));
|
|
497
|
+
if (stalePaths.length > 0) {
|
|
498
|
+
for (const path2 of stalePaths) {
|
|
499
|
+
stats.chunksDeleted += getChunkCountByFilePath(repoRoot, path2);
|
|
500
|
+
}
|
|
501
|
+
deleteChunksByFilePath(repoRoot, stalePaths);
|
|
502
|
+
for (const path2 of stalePaths) {
|
|
503
|
+
removeFileHash(repoRoot, path2);
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
setLastIndexTime(repoRoot, (/* @__PURE__ */ new Date()).toISOString());
|
|
507
|
+
stats.durationMs = Date.now() - start;
|
|
508
|
+
return stats;
|
|
509
|
+
}
|
|
510
|
+
var init_indexing = __esm({
|
|
511
|
+
"src/memory/knowledge/indexing.ts"() {
|
|
512
|
+
init_connection();
|
|
513
|
+
init_sync();
|
|
514
|
+
init_chunking();
|
|
515
|
+
init_types();
|
|
516
|
+
}
|
|
517
|
+
});
|
|
518
|
+
|
|
15
519
|
// src/cli-utils.ts
|
|
16
520
|
function formatBytes(bytes) {
|
|
17
521
|
if (bytes === 0) return "0 B";
|
|
@@ -29,7 +533,7 @@ function parseLimit(value, name) {
|
|
|
29
533
|
return parsed;
|
|
30
534
|
}
|
|
31
535
|
function getRepoRoot() {
|
|
32
|
-
return process.env["COMPOUND_AGENT_ROOT"]
|
|
536
|
+
return process.env["COMPOUND_AGENT_ROOT"] || process.cwd();
|
|
33
537
|
}
|
|
34
538
|
var EPIC_ID_PATTERN = /^[a-zA-Z0-9_-]+$/;
|
|
35
539
|
function validateEpicId(epicId) {
|
|
@@ -340,29 +844,9 @@ async function readLessons(repoRoot, options = {}) {
|
|
|
340
844
|
const lessons = result.items.filter((item) => item.type === "lesson");
|
|
341
845
|
return { lessons, skippedCount: result.skippedCount };
|
|
342
846
|
}
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
function ensureSqliteAvailable() {
|
|
347
|
-
if (checked) return;
|
|
348
|
-
try {
|
|
349
|
-
const module = require2("better-sqlite3");
|
|
350
|
-
const Constructor = module.default || module;
|
|
351
|
-
const testDb = new Constructor(":memory:");
|
|
352
|
-
testDb.close();
|
|
353
|
-
DatabaseConstructor = Constructor;
|
|
354
|
-
checked = true;
|
|
355
|
-
} catch (cause) {
|
|
356
|
-
throw new Error(
|
|
357
|
-
'better-sqlite3 failed to load.\nRun: npx ca setup (auto-configures pnpm native builds)\nOr manually add to your package.json:\n "pnpm": { "onlyBuiltDependencies": ["better-sqlite3", "node-llama-cpp"] }\nThen run: pnpm install && pnpm rebuild better-sqlite3\nFor npm/yarn, run: npm rebuild better-sqlite3',
|
|
358
|
-
{ cause }
|
|
359
|
-
);
|
|
360
|
-
}
|
|
361
|
-
}
|
|
362
|
-
function getDatabaseConstructor() {
|
|
363
|
-
ensureSqliteAvailable();
|
|
364
|
-
return DatabaseConstructor;
|
|
365
|
-
}
|
|
847
|
+
|
|
848
|
+
// src/memory/storage/sqlite/connection.ts
|
|
849
|
+
init_availability();
|
|
366
850
|
|
|
367
851
|
// src/memory/storage/sqlite/schema.ts
|
|
368
852
|
var SCHEMA_VERSION = 3;
|
|
@@ -458,7 +942,10 @@ function openDb(repoRoot, options = {}) {
|
|
|
458
942
|
database = new Database(key);
|
|
459
943
|
if (!hasExpectedVersion(database)) {
|
|
460
944
|
database.close();
|
|
461
|
-
|
|
945
|
+
try {
|
|
946
|
+
unlinkSync(key);
|
|
947
|
+
} catch {
|
|
948
|
+
}
|
|
462
949
|
database = new Database(key);
|
|
463
950
|
}
|
|
464
951
|
database.pragma("journal_mode = WAL");
|
|
@@ -599,7 +1086,65 @@ async function syncIfNeeded(repoRoot, options = {}) {
|
|
|
599
1086
|
return false;
|
|
600
1087
|
}
|
|
601
1088
|
|
|
1089
|
+
// src/memory/search/hybrid.ts
|
|
1090
|
+
var DEFAULT_VECTOR_WEIGHT = 0.7;
|
|
1091
|
+
var DEFAULT_TEXT_WEIGHT = 0.3;
|
|
1092
|
+
var CANDIDATE_MULTIPLIER = 4;
|
|
1093
|
+
var MIN_HYBRID_SCORE = 0.35;
|
|
1094
|
+
function normalizeBm25Rank(rank) {
|
|
1095
|
+
if (!Number.isFinite(rank)) return 0;
|
|
1096
|
+
const abs = Math.abs(rank);
|
|
1097
|
+
return abs / (1 + abs);
|
|
1098
|
+
}
|
|
1099
|
+
function mergeHybridScores(vectorResults, keywordResults, getId, options) {
|
|
1100
|
+
if (vectorResults.length === 0 && keywordResults.length === 0) return [];
|
|
1101
|
+
const rawVecW = options?.vectorWeight ?? DEFAULT_VECTOR_WEIGHT;
|
|
1102
|
+
const rawTxtW = options?.textWeight ?? DEFAULT_TEXT_WEIGHT;
|
|
1103
|
+
const total = rawVecW + rawTxtW;
|
|
1104
|
+
if (total <= 0) return [];
|
|
1105
|
+
const vecW = rawVecW / total;
|
|
1106
|
+
const txtW = rawTxtW / total;
|
|
1107
|
+
const limit = options?.limit;
|
|
1108
|
+
const minScore = options?.minScore;
|
|
1109
|
+
const merged = /* @__PURE__ */ new Map();
|
|
1110
|
+
for (const v of vectorResults) {
|
|
1111
|
+
merged.set(getId(v.item), { item: v.item, vecScore: v.score, txtScore: 0 });
|
|
1112
|
+
}
|
|
1113
|
+
for (const k of keywordResults) {
|
|
1114
|
+
const id = getId(k.item);
|
|
1115
|
+
const existing = merged.get(id);
|
|
1116
|
+
if (existing) {
|
|
1117
|
+
existing.txtScore = k.score;
|
|
1118
|
+
} else {
|
|
1119
|
+
merged.set(id, { item: k.item, vecScore: 0, txtScore: k.score });
|
|
1120
|
+
}
|
|
1121
|
+
}
|
|
1122
|
+
const results = [];
|
|
1123
|
+
for (const entry of merged.values()) {
|
|
1124
|
+
results.push({
|
|
1125
|
+
item: entry.item,
|
|
1126
|
+
score: vecW * entry.vecScore + txtW * entry.txtScore
|
|
1127
|
+
});
|
|
1128
|
+
}
|
|
1129
|
+
results.sort((a, b) => b.score - a.score);
|
|
1130
|
+
const filtered = minScore !== void 0 ? results.filter((r) => r.score >= minScore) : results;
|
|
1131
|
+
return limit !== void 0 ? filtered.slice(0, limit) : filtered;
|
|
1132
|
+
}
|
|
1133
|
+
function mergeHybridResults(vectorResults, keywordResults, options) {
|
|
1134
|
+
const genericVec = vectorResults.map((v) => ({ item: v.lesson, score: v.score }));
|
|
1135
|
+
const genericKw = keywordResults.map((k) => ({ item: k.lesson, score: k.score }));
|
|
1136
|
+
const merged = mergeHybridScores(genericVec, genericKw, (item) => item.id, options);
|
|
1137
|
+
return merged.map((m) => ({ lesson: m.item, score: m.score }));
|
|
1138
|
+
}
|
|
1139
|
+
|
|
602
1140
|
// src/memory/storage/sqlite/search.ts
|
|
1141
|
+
function safeJsonParse(value, fallback) {
|
|
1142
|
+
try {
|
|
1143
|
+
return JSON.parse(value);
|
|
1144
|
+
} catch {
|
|
1145
|
+
return fallback;
|
|
1146
|
+
}
|
|
1147
|
+
}
|
|
603
1148
|
function rowToMemoryItem(row) {
|
|
604
1149
|
const item = {
|
|
605
1150
|
id: row.id,
|
|
@@ -608,9 +1153,9 @@ function rowToMemoryItem(row) {
|
|
|
608
1153
|
insight: row.insight,
|
|
609
1154
|
tags: row.tags ? row.tags.split(",").filter(Boolean) : [],
|
|
610
1155
|
source: row.source,
|
|
611
|
-
context:
|
|
612
|
-
supersedes:
|
|
613
|
-
related:
|
|
1156
|
+
context: safeJsonParse(row.context, {}),
|
|
1157
|
+
supersedes: safeJsonParse(row.supersedes, []),
|
|
1158
|
+
related: safeJsonParse(row.related, []),
|
|
614
1159
|
created: row.created,
|
|
615
1160
|
confirmed: row.confirmed === 1
|
|
616
1161
|
};
|
|
@@ -641,7 +1186,7 @@ function rowToMemoryItem(row) {
|
|
|
641
1186
|
}
|
|
642
1187
|
var FTS_OPERATORS = /* @__PURE__ */ new Set(["AND", "OR", "NOT", "NEAR"]);
|
|
643
1188
|
function sanitizeFtsQuery(query) {
|
|
644
|
-
const stripped = query.replace(/["
|
|
1189
|
+
const stripped = query.replace(/["*^+\-():{}]/g, "");
|
|
645
1190
|
const tokens = stripped.split(/\s+/).filter((t) => t.length > 0 && !FTS_OPERATORS.has(t));
|
|
646
1191
|
return tokens.join(" ");
|
|
647
1192
|
}
|
|
@@ -662,31 +1207,47 @@ function incrementRetrievalCount(repoRoot, lessonIds) {
|
|
|
662
1207
|
});
|
|
663
1208
|
updateMany(lessonIds);
|
|
664
1209
|
}
|
|
665
|
-
|
|
1210
|
+
function executeFtsQuery(repoRoot, query, limit, options) {
|
|
666
1211
|
const database = openDb(repoRoot);
|
|
667
|
-
const countResult = database.prepare("SELECT COUNT(*) as cnt FROM lessons").get();
|
|
668
|
-
if (countResult.cnt === 0) return [];
|
|
669
1212
|
const sanitized = sanitizeFtsQuery(query);
|
|
670
1213
|
if (sanitized === "") return [];
|
|
1214
|
+
const selectCols = options.includeRank ? "l.*, fts.rank" : "l.*";
|
|
1215
|
+
const orderClause = options.includeRank ? "ORDER BY fts.rank" : "";
|
|
1216
|
+
const typeClause = options.typeFilter ? "AND l.type = ?" : "";
|
|
1217
|
+
const sql = `
|
|
1218
|
+
SELECT ${selectCols}
|
|
1219
|
+
FROM lessons l
|
|
1220
|
+
JOIN lessons_fts fts ON l.rowid = fts.rowid
|
|
1221
|
+
WHERE lessons_fts MATCH ?
|
|
1222
|
+
AND l.invalidated_at IS NULL
|
|
1223
|
+
${typeClause}
|
|
1224
|
+
${orderClause}
|
|
1225
|
+
LIMIT ?
|
|
1226
|
+
`;
|
|
1227
|
+
const params = options.typeFilter ? [sanitized, options.typeFilter, limit] : [sanitized, limit];
|
|
671
1228
|
try {
|
|
672
|
-
|
|
673
|
-
const rows = database.prepare(
|
|
674
|
-
`
|
|
675
|
-
SELECT l.*
|
|
676
|
-
FROM lessons l
|
|
677
|
-
JOIN lessons_fts fts ON l.rowid = fts.rowid
|
|
678
|
-
WHERE lessons_fts MATCH ?
|
|
679
|
-
AND l.invalidated_at IS NULL
|
|
680
|
-
LIMIT ?
|
|
681
|
-
`
|
|
682
|
-
).all(sanitized, limit);
|
|
683
|
-
return rows.map(rowToMemoryItem).filter((x) => x !== null);
|
|
1229
|
+
return database.prepare(sql).all(...params);
|
|
684
1230
|
} catch (err) {
|
|
685
1231
|
const message = err instanceof Error ? err.message : "Unknown FTS5 error";
|
|
686
1232
|
console.error(`[compound-agent] search error: ${message}`);
|
|
687
1233
|
return [];
|
|
688
1234
|
}
|
|
689
1235
|
}
|
|
1236
|
+
async function searchKeyword(repoRoot, query, limit, typeFilter) {
|
|
1237
|
+
const rows = executeFtsQuery(repoRoot, query, limit, { includeRank: false, typeFilter });
|
|
1238
|
+
return rows.map(rowToMemoryItem).filter((x) => x !== null);
|
|
1239
|
+
}
|
|
1240
|
+
async function searchKeywordScored(repoRoot, query, limit, typeFilter) {
|
|
1241
|
+
const rows = executeFtsQuery(repoRoot, query, limit, { includeRank: true, typeFilter });
|
|
1242
|
+
const results = [];
|
|
1243
|
+
for (const row of rows) {
|
|
1244
|
+
const lesson = rowToMemoryItem(row);
|
|
1245
|
+
if (lesson) {
|
|
1246
|
+
results.push({ lesson, score: normalizeBm25Rank(row.rank) });
|
|
1247
|
+
}
|
|
1248
|
+
}
|
|
1249
|
+
return results;
|
|
1250
|
+
}
|
|
690
1251
|
function getRetrievalStats(repoRoot) {
|
|
691
1252
|
const database = openDb(repoRoot);
|
|
692
1253
|
const rows = database.prepare("SELECT id, retrieval_count, last_retrieved FROM lessons").all();
|
|
@@ -2094,8 +2655,13 @@ async function loadSessionLessons(repoRoot, limit = DEFAULT_LIMIT2) {
|
|
|
2094
2655
|
// src/memory/retrieval/plan.ts
|
|
2095
2656
|
var DEFAULT_LIMIT3 = 5;
|
|
2096
2657
|
async function retrieveForPlan(repoRoot, planText, limit = DEFAULT_LIMIT3) {
|
|
2097
|
-
const
|
|
2098
|
-
const
|
|
2658
|
+
const candidateLimit = limit * CANDIDATE_MULTIPLIER;
|
|
2659
|
+
const [vectorResults, keywordResults] = await Promise.all([
|
|
2660
|
+
searchVector(repoRoot, planText, { limit: candidateLimit }),
|
|
2661
|
+
searchKeywordScored(repoRoot, planText, candidateLimit)
|
|
2662
|
+
]);
|
|
2663
|
+
const merged = mergeHybridResults(vectorResults, keywordResults, { minScore: MIN_HYBRID_SCORE });
|
|
2664
|
+
const ranked = rankLessons(merged);
|
|
2099
2665
|
const topLessons = ranked.slice(0, limit);
|
|
2100
2666
|
if (topLessons.length > 0) {
|
|
2101
2667
|
incrementRetrievalCount(repoRoot, topLessons.map((item) => item.lesson.id));
|
|
@@ -2118,6 +2684,135 @@ No relevant lessons found for this plan.`;
|
|
|
2118
2684
|
${lessonLines.join("\n")}`;
|
|
2119
2685
|
}
|
|
2120
2686
|
|
|
2687
|
+
// src/memory/storage/sqlite-knowledge/index.ts
|
|
2688
|
+
init_connection();
|
|
2689
|
+
init_schema();
|
|
2690
|
+
|
|
2691
|
+
// src/memory/storage/sqlite-knowledge/cache.ts
|
|
2692
|
+
init_connection();
|
|
2693
|
+
init_types();
|
|
2694
|
+
|
|
2695
|
+
// src/memory/storage/sqlite-knowledge/search.ts
|
|
2696
|
+
init_connection();
|
|
2697
|
+
function rowToChunk(row) {
|
|
2698
|
+
const chunk = {
|
|
2699
|
+
id: row.id,
|
|
2700
|
+
filePath: row.file_path,
|
|
2701
|
+
startLine: row.start_line,
|
|
2702
|
+
endLine: row.end_line,
|
|
2703
|
+
contentHash: row.content_hash,
|
|
2704
|
+
text: row.text,
|
|
2705
|
+
updatedAt: row.updated_at
|
|
2706
|
+
};
|
|
2707
|
+
if (row.model !== null) {
|
|
2708
|
+
chunk.model = row.model;
|
|
2709
|
+
}
|
|
2710
|
+
return chunk;
|
|
2711
|
+
}
|
|
2712
|
+
function searchChunksKeywordScored(repoRoot, query, limit) {
|
|
2713
|
+
const database = openKnowledgeDb(repoRoot);
|
|
2714
|
+
const sanitized = sanitizeFtsQuery(query);
|
|
2715
|
+
if (sanitized === "") return [];
|
|
2716
|
+
try {
|
|
2717
|
+
const rows = database.prepare(
|
|
2718
|
+
`SELECT c.*, fts.rank
|
|
2719
|
+
FROM chunks c
|
|
2720
|
+
JOIN chunks_fts fts ON c.rowid = fts.rowid
|
|
2721
|
+
WHERE chunks_fts MATCH ?
|
|
2722
|
+
ORDER BY fts.rank
|
|
2723
|
+
LIMIT ?`
|
|
2724
|
+
).all(sanitized, limit);
|
|
2725
|
+
return rows.map((row) => ({
|
|
2726
|
+
chunk: rowToChunk(row),
|
|
2727
|
+
score: normalizeBm25Rank(row.rank)
|
|
2728
|
+
}));
|
|
2729
|
+
} catch (err) {
|
|
2730
|
+
const message = err instanceof Error ? err.message : "Unknown FTS5 error";
|
|
2731
|
+
console.error(`[compound-agent] knowledge scored search error: ${message}`);
|
|
2732
|
+
return [];
|
|
2733
|
+
}
|
|
2734
|
+
}
|
|
2735
|
+
|
|
2736
|
+
// src/memory/storage/sqlite-knowledge/index.ts
|
|
2737
|
+
init_sync();
|
|
2738
|
+
|
|
2739
|
+
// src/index.ts
|
|
2740
|
+
init_chunking();
|
|
2741
|
+
init_indexing();
|
|
2742
|
+
|
|
2743
|
+
// src/memory/knowledge/search.ts
|
|
2744
|
+
init_connection();
|
|
2745
|
+
var DEFAULT_KNOWLEDGE_LIMIT = 6;
|
|
2746
|
+
async function searchKnowledgeVector(repoRoot, query, options) {
|
|
2747
|
+
const limit = options?.limit ?? DEFAULT_KNOWLEDGE_LIMIT;
|
|
2748
|
+
const database = openKnowledgeDb(repoRoot);
|
|
2749
|
+
const embRows = database.prepare("SELECT id, embedding FROM chunks WHERE embedding IS NOT NULL").all();
|
|
2750
|
+
if (embRows.length === 0) return [];
|
|
2751
|
+
const queryVector = await embedText(query);
|
|
2752
|
+
const scored = [];
|
|
2753
|
+
for (const row of embRows) {
|
|
2754
|
+
const embFloat = new Float32Array(
|
|
2755
|
+
row.embedding.buffer,
|
|
2756
|
+
row.embedding.byteOffset,
|
|
2757
|
+
row.embedding.byteLength / 4
|
|
2758
|
+
);
|
|
2759
|
+
scored.push({ id: row.id, score: cosineSimilarity(queryVector, embFloat) });
|
|
2760
|
+
}
|
|
2761
|
+
scored.sort((a, b) => b.score - a.score);
|
|
2762
|
+
const topK = scored.slice(0, limit);
|
|
2763
|
+
if (topK.length === 0) return [];
|
|
2764
|
+
const placeholders = topK.map(() => "?").join(",");
|
|
2765
|
+
const sql = `SELECT id, file_path, start_line, end_line, content_hash, text, model, updated_at FROM chunks WHERE id IN (${placeholders})`;
|
|
2766
|
+
const dataRows = database.prepare(sql).all(...topK.map((r) => r.id));
|
|
2767
|
+
const dataMap = new Map(dataRows.map((r) => [r.id, r]));
|
|
2768
|
+
const results = [];
|
|
2769
|
+
for (const { id, score } of topK) {
|
|
2770
|
+
const row = dataMap.get(id);
|
|
2771
|
+
if (!row) continue;
|
|
2772
|
+
const chunk = {
|
|
2773
|
+
id: row.id,
|
|
2774
|
+
filePath: row.file_path,
|
|
2775
|
+
startLine: row.start_line,
|
|
2776
|
+
endLine: row.end_line,
|
|
2777
|
+
contentHash: row.content_hash,
|
|
2778
|
+
text: row.text,
|
|
2779
|
+
updatedAt: row.updated_at
|
|
2780
|
+
};
|
|
2781
|
+
if (row.model !== null) {
|
|
2782
|
+
chunk.model = row.model;
|
|
2783
|
+
}
|
|
2784
|
+
results.push({ item: chunk, score });
|
|
2785
|
+
}
|
|
2786
|
+
return results;
|
|
2787
|
+
}
|
|
2788
|
+
async function searchKnowledge(repoRoot, query, options) {
|
|
2789
|
+
const limit = options?.limit ?? DEFAULT_KNOWLEDGE_LIMIT;
|
|
2790
|
+
const candidateLimit = limit * CANDIDATE_MULTIPLIER;
|
|
2791
|
+
const usability = await isModelUsable();
|
|
2792
|
+
if (usability.usable) {
|
|
2793
|
+
const [vectorResults, keywordResults2] = await Promise.all([
|
|
2794
|
+
searchKnowledgeVector(repoRoot, query, { limit: candidateLimit }),
|
|
2795
|
+
Promise.resolve(searchChunksKeywordScored(repoRoot, query, candidateLimit))
|
|
2796
|
+
]);
|
|
2797
|
+
if (vectorResults.length === 0) {
|
|
2798
|
+
return keywordResults2.map((k) => ({ item: k.chunk, score: k.score })).slice(0, limit);
|
|
2799
|
+
}
|
|
2800
|
+
const genericKw = keywordResults2.map((k) => ({
|
|
2801
|
+
item: k.chunk,
|
|
2802
|
+
score: k.score
|
|
2803
|
+
}));
|
|
2804
|
+
const merged = mergeHybridScores(
|
|
2805
|
+
vectorResults,
|
|
2806
|
+
genericKw,
|
|
2807
|
+
(item) => item.id,
|
|
2808
|
+
{ limit, minScore: MIN_HYBRID_SCORE }
|
|
2809
|
+
);
|
|
2810
|
+
return merged;
|
|
2811
|
+
}
|
|
2812
|
+
const keywordResults = searchChunksKeywordScored(repoRoot, query, limit);
|
|
2813
|
+
return keywordResults.map((k) => ({ item: k.chunk, score: k.score }));
|
|
2814
|
+
}
|
|
2815
|
+
|
|
2121
2816
|
// src/audit/checks/lessons.ts
|
|
2122
2817
|
async function checkLessons(repoRoot) {
|
|
2123
2818
|
const { items } = await readMemoryItems(repoRoot);
|
|
@@ -2445,6 +3140,22 @@ var COMPOUND_AGENT_HOOK_BLOCK = `
|
|
|
2445
3140
|
# Compound Agent pre-commit hook (appended)
|
|
2446
3141
|
npx ca hooks run pre-commit
|
|
2447
3142
|
`;
|
|
3143
|
+
var POST_COMMIT_HOOK_TEMPLATE = `#!/bin/sh
|
|
3144
|
+
# Compound Agent post-commit hook
|
|
3145
|
+
# Auto-indexes docs/ when documentation files change
|
|
3146
|
+
|
|
3147
|
+
# Check if any docs/ files were modified in this commit
|
|
3148
|
+
if git diff-tree --no-commit-id --name-only -r HEAD | grep -q '^docs/'; then
|
|
3149
|
+
npx ca -q index-docs 2>/dev/null &
|
|
3150
|
+
fi
|
|
3151
|
+
`;
|
|
3152
|
+
var POST_COMMIT_HOOK_MARKER = "# Compound Agent post-commit hook";
|
|
3153
|
+
var COMPOUND_AGENT_POST_COMMIT_BLOCK = `
|
|
3154
|
+
# Compound Agent post-commit hook (appended)
|
|
3155
|
+
if git diff-tree --no-commit-id --name-only -r HEAD | grep -q '^docs/'; then
|
|
3156
|
+
npx ca -q index-docs 2>/dev/null &
|
|
3157
|
+
fi
|
|
3158
|
+
`;
|
|
2448
3159
|
var CLAUDE_HOOK_MARKERS = [
|
|
2449
3160
|
"ca prime",
|
|
2450
3161
|
"ca load-session",
|
|
@@ -2457,7 +3168,8 @@ var CLAUDE_HOOK_MARKERS = [
|
|
|
2457
3168
|
"ca hooks run stop-audit",
|
|
2458
3169
|
// v1.2.9 canonical names
|
|
2459
3170
|
"ca hooks run post-read",
|
|
2460
|
-
"ca hooks run phase-audit"
|
|
3171
|
+
"ca hooks run phase-audit",
|
|
3172
|
+
"ca index-docs"
|
|
2461
3173
|
];
|
|
2462
3174
|
var CLAUDE_HOOK_CONFIG = {
|
|
2463
3175
|
matcher: "",
|
|
@@ -2555,6 +3267,7 @@ This project uses compound-agent for session memory via **CLI commands**.
|
|
|
2555
3267
|
| Command | Purpose |
|
|
2556
3268
|
|---------|---------|
|
|
2557
3269
|
| \`npx ca search "query"\` | Search lessons - use BEFORE architectural decisions |
|
|
3270
|
+
| \`npx ca knowledge "query"\` | Search docs knowledge - use BEFORE architectural decisions |
|
|
2558
3271
|
| \`npx ca learn "insight"\` | Capture lessons - use AFTER corrections or discoveries |
|
|
2559
3272
|
| \`npx ca list\` | List all stored lessons |
|
|
2560
3273
|
| \`npx ca show <id>\` | Show details of a specific lesson |
|
|
@@ -2562,7 +3275,7 @@ This project uses compound-agent for session memory via **CLI commands**.
|
|
|
2562
3275
|
|
|
2563
3276
|
### Mandatory Recall
|
|
2564
3277
|
|
|
2565
|
-
You MUST call \`npx ca search\` BEFORE:
|
|
3278
|
+
You MUST call \`npx ca search\` and \`npx ca knowledge\` BEFORE:
|
|
2566
3279
|
- Architectural decisions or complex planning
|
|
2567
3280
|
- Patterns you've implemented before in this repo
|
|
2568
3281
|
- After user corrections ("actually...", "wrong", "use X instead")
|
|
@@ -2612,7 +3325,7 @@ var PLUGIN_MANIFEST = {
|
|
|
2612
3325
|
name: "Nathan Delacr\xE9taz",
|
|
2613
3326
|
url: "https://github.com/Nathandela"
|
|
2614
3327
|
},
|
|
2615
|
-
repository: "https://github.com/Nathandela/
|
|
3328
|
+
repository: "https://github.com/Nathandela/learning_agent",
|
|
2616
3329
|
license: "MIT",
|
|
2617
3330
|
hooks: {
|
|
2618
3331
|
SessionStart: [
|
|
@@ -2960,25 +3673,187 @@ async function ensureGitignore(repoRoot) {
|
|
|
2960
3673
|
function formatError(command, code, message, remediation) {
|
|
2961
3674
|
return `ERROR [${command}] ${code}: ${message} \u2014 ${remediation}`;
|
|
2962
3675
|
}
|
|
2963
|
-
|
|
2964
|
-
|
|
2965
|
-
var
|
|
2966
|
-
|
|
2967
|
-
|
|
2968
|
-
|
|
2969
|
-
|
|
2970
|
-
|
|
2971
|
-
|
|
2972
|
-
|
|
2973
|
-
|
|
2974
|
-
|
|
2975
|
-
|
|
2976
|
-
|
|
2977
|
-
|
|
2978
|
-
|
|
2979
|
-
|
|
2980
|
-
|
|
2981
|
-
|
|
3676
|
+
|
|
3677
|
+
// src/setup/hooks-user-prompt.ts
|
|
3678
|
+
var CORRECTION_PATTERNS = [
|
|
3679
|
+
/\bactually\b/i,
|
|
3680
|
+
/\bno[,.]?\s/i,
|
|
3681
|
+
/\bwrong\b/i,
|
|
3682
|
+
/\bthat'?s not right\b/i,
|
|
3683
|
+
/\bthat'?s incorrect\b/i,
|
|
3684
|
+
/\buse .+ instead\b/i,
|
|
3685
|
+
/\bi told you\b/i,
|
|
3686
|
+
/\bi already said\b/i,
|
|
3687
|
+
/\bnot like that\b/i,
|
|
3688
|
+
/\byou forgot\b/i,
|
|
3689
|
+
/\byou missed\b/i,
|
|
3690
|
+
/\bstop\s*(,\s*)?(doing|using|that)\b/i,
|
|
3691
|
+
/\bwait\s*(,\s*)?(that|no|wrong)\b/i
|
|
3692
|
+
];
|
|
3693
|
+
var HIGH_CONFIDENCE_PLANNING = [
|
|
3694
|
+
/\bdecide\b/i,
|
|
3695
|
+
/\bchoose\b/i,
|
|
3696
|
+
/\bpick\b/i,
|
|
3697
|
+
/\bwhich approach\b/i,
|
|
3698
|
+
/\bwhat do you think\b/i,
|
|
3699
|
+
/\bshould we\b/i,
|
|
3700
|
+
/\bwould you\b/i,
|
|
3701
|
+
/\bhow should\b/i,
|
|
3702
|
+
/\bwhat'?s the best\b/i,
|
|
3703
|
+
/\badd feature\b/i,
|
|
3704
|
+
/\bset up\b/i
|
|
3705
|
+
];
|
|
3706
|
+
var LOW_CONFIDENCE_PLANNING = [
|
|
3707
|
+
/\bimplement\b/i,
|
|
3708
|
+
/\bbuild\b/i,
|
|
3709
|
+
/\bcreate\b/i,
|
|
3710
|
+
/\brefactor\b/i,
|
|
3711
|
+
/\bfix\b/i,
|
|
3712
|
+
/\bwrite\b/i,
|
|
3713
|
+
/\bdevelop\b/i
|
|
3714
|
+
];
|
|
3715
|
+
var CORRECTION_REMINDER = "Remember: You have memory tools available - `npx ca learn` to save insights, `npx ca search` to find past solutions.";
|
|
3716
|
+
var PLANNING_REMINDER = "If you're uncertain or hesitant, remember your memory tools: `npx ca search` may have relevant context from past sessions.";
|
|
3717
|
+
function detectCorrection(prompt) {
|
|
3718
|
+
return CORRECTION_PATTERNS.some((pattern) => pattern.test(prompt));
|
|
3719
|
+
}
|
|
3720
|
+
function detectPlanning(prompt) {
|
|
3721
|
+
if (HIGH_CONFIDENCE_PLANNING.some((pattern) => pattern.test(prompt))) {
|
|
3722
|
+
return true;
|
|
3723
|
+
}
|
|
3724
|
+
const lowMatches = LOW_CONFIDENCE_PLANNING.filter((pattern) => pattern.test(prompt));
|
|
3725
|
+
return lowMatches.length >= 2;
|
|
3726
|
+
}
|
|
3727
|
+
function processUserPrompt(prompt) {
|
|
3728
|
+
if (detectCorrection(prompt)) {
|
|
3729
|
+
return {
|
|
3730
|
+
hookSpecificOutput: {
|
|
3731
|
+
hookEventName: "UserPromptSubmit",
|
|
3732
|
+
additionalContext: CORRECTION_REMINDER
|
|
3733
|
+
}
|
|
3734
|
+
};
|
|
3735
|
+
}
|
|
3736
|
+
if (detectPlanning(prompt)) {
|
|
3737
|
+
return {
|
|
3738
|
+
hookSpecificOutput: {
|
|
3739
|
+
hookEventName: "UserPromptSubmit",
|
|
3740
|
+
additionalContext: PLANNING_REMINDER
|
|
3741
|
+
}
|
|
3742
|
+
};
|
|
3743
|
+
}
|
|
3744
|
+
return {};
|
|
3745
|
+
}
|
|
3746
|
+
var SAME_TARGET_THRESHOLD = 2;
|
|
3747
|
+
var TOTAL_FAILURE_THRESHOLD = 3;
|
|
3748
|
+
var STATE_FILE_NAME = ".ca-failure-state.json";
|
|
3749
|
+
var STATE_MAX_AGE_MS = 60 * 60 * 1e3;
|
|
3750
|
+
var failureCount = 0;
|
|
3751
|
+
var lastFailedTarget = null;
|
|
3752
|
+
var sameTargetCount = 0;
|
|
3753
|
+
function defaultState() {
|
|
3754
|
+
return { count: 0, lastTarget: null, sameTargetCount: 0, timestamp: Date.now() };
|
|
3755
|
+
}
|
|
3756
|
+
function readFailureState(stateDir) {
|
|
3757
|
+
try {
|
|
3758
|
+
const filePath = join(stateDir, STATE_FILE_NAME);
|
|
3759
|
+
if (!existsSync(filePath)) return defaultState();
|
|
3760
|
+
const raw = readFileSync(filePath, "utf-8");
|
|
3761
|
+
const parsed = JSON.parse(raw);
|
|
3762
|
+
if (Date.now() - parsed.timestamp > STATE_MAX_AGE_MS) return defaultState();
|
|
3763
|
+
return parsed;
|
|
3764
|
+
} catch {
|
|
3765
|
+
return defaultState();
|
|
3766
|
+
}
|
|
3767
|
+
}
|
|
3768
|
+
function writeFailureState(stateDir, state) {
|
|
3769
|
+
try {
|
|
3770
|
+
const filePath = join(stateDir, STATE_FILE_NAME);
|
|
3771
|
+
writeFileSync(filePath, JSON.stringify(state), "utf-8");
|
|
3772
|
+
} catch {
|
|
3773
|
+
}
|
|
3774
|
+
}
|
|
3775
|
+
function deleteStateFile(stateDir) {
|
|
3776
|
+
try {
|
|
3777
|
+
const filePath = join(stateDir, STATE_FILE_NAME);
|
|
3778
|
+
if (existsSync(filePath)) unlinkSync(filePath);
|
|
3779
|
+
} catch {
|
|
3780
|
+
}
|
|
3781
|
+
}
|
|
3782
|
+
var FAILURE_TIP = "Tip: Multiple failures detected. `npx ca search` may have solutions for similar issues.";
|
|
3783
|
+
function resetFailureState(stateDir) {
|
|
3784
|
+
failureCount = 0;
|
|
3785
|
+
lastFailedTarget = null;
|
|
3786
|
+
sameTargetCount = 0;
|
|
3787
|
+
if (stateDir) deleteStateFile(stateDir);
|
|
3788
|
+
}
|
|
3789
|
+
function getFailureTarget(toolName, toolInput) {
|
|
3790
|
+
if (toolName === "Bash" && typeof toolInput.command === "string") {
|
|
3791
|
+
const trimmed = toolInput.command.trim();
|
|
3792
|
+
const firstSpace = trimmed.indexOf(" ");
|
|
3793
|
+
return firstSpace === -1 ? trimmed : trimmed.slice(0, firstSpace);
|
|
3794
|
+
}
|
|
3795
|
+
if ((toolName === "Edit" || toolName === "Write") && typeof toolInput.file_path === "string") {
|
|
3796
|
+
return toolInput.file_path;
|
|
3797
|
+
}
|
|
3798
|
+
return null;
|
|
3799
|
+
}
|
|
3800
|
+
function processToolFailure(toolName, toolInput, stateDir) {
|
|
3801
|
+
if (stateDir) {
|
|
3802
|
+
const persisted = readFailureState(stateDir);
|
|
3803
|
+
failureCount = persisted.count;
|
|
3804
|
+
lastFailedTarget = persisted.lastTarget;
|
|
3805
|
+
sameTargetCount = persisted.sameTargetCount;
|
|
3806
|
+
}
|
|
3807
|
+
failureCount++;
|
|
3808
|
+
const target = getFailureTarget(toolName, toolInput);
|
|
3809
|
+
if (target !== null && target === lastFailedTarget) {
|
|
3810
|
+
sameTargetCount++;
|
|
3811
|
+
} else {
|
|
3812
|
+
sameTargetCount = 1;
|
|
3813
|
+
lastFailedTarget = target;
|
|
3814
|
+
}
|
|
3815
|
+
const shouldShowTip = sameTargetCount >= SAME_TARGET_THRESHOLD || failureCount >= TOTAL_FAILURE_THRESHOLD;
|
|
3816
|
+
if (shouldShowTip) {
|
|
3817
|
+
resetFailureState(stateDir);
|
|
3818
|
+
return {
|
|
3819
|
+
hookSpecificOutput: {
|
|
3820
|
+
hookEventName: "PostToolUseFailure",
|
|
3821
|
+
additionalContext: FAILURE_TIP
|
|
3822
|
+
}
|
|
3823
|
+
};
|
|
3824
|
+
}
|
|
3825
|
+
if (stateDir) {
|
|
3826
|
+
writeFailureState(stateDir, {
|
|
3827
|
+
count: failureCount,
|
|
3828
|
+
lastTarget: lastFailedTarget,
|
|
3829
|
+
sameTargetCount,
|
|
3830
|
+
timestamp: Date.now()
|
|
3831
|
+
});
|
|
3832
|
+
}
|
|
3833
|
+
return {};
|
|
3834
|
+
}
|
|
3835
|
+
function processToolSuccess(stateDir) {
|
|
3836
|
+
resetFailureState(stateDir);
|
|
3837
|
+
}
|
|
3838
|
+
var STATE_DIR = ".claude";
|
|
3839
|
+
var STATE_FILE = ".ca-phase-state.json";
|
|
3840
|
+
var PHASE_STATE_MAX_AGE_MS = 72 * 60 * 60 * 1e3;
|
|
3841
|
+
var PHASES = ["brainstorm", "plan", "work", "review", "compound"];
|
|
3842
|
+
var GATES = ["post-plan", "gate-3", "gate-4", "final"];
|
|
3843
|
+
var PHASE_INDEX = {
|
|
3844
|
+
brainstorm: 1,
|
|
3845
|
+
plan: 2,
|
|
3846
|
+
work: 3,
|
|
3847
|
+
review: 4,
|
|
3848
|
+
compound: 5
|
|
3849
|
+
};
|
|
3850
|
+
function getStatePath(repoRoot) {
|
|
3851
|
+
return join(repoRoot, STATE_DIR, STATE_FILE);
|
|
3852
|
+
}
|
|
3853
|
+
function isPhaseName(value) {
|
|
3854
|
+
return typeof value === "string" && PHASES.includes(value);
|
|
3855
|
+
}
|
|
3856
|
+
function isGateName(value) {
|
|
2982
3857
|
return typeof value === "string" && GATES.includes(value);
|
|
2983
3858
|
}
|
|
2984
3859
|
function isIsoDate(value) {
|
|
@@ -3021,7 +3896,13 @@ function getPhaseState(repoRoot) {
|
|
|
3021
3896
|
if (!existsSync(path2)) return null;
|
|
3022
3897
|
const raw = readFileSync(path2, "utf-8");
|
|
3023
3898
|
const parsed = JSON.parse(raw);
|
|
3024
|
-
|
|
3899
|
+
if (!validatePhaseState(parsed)) return null;
|
|
3900
|
+
const age = Date.now() - new Date(parsed.started_at).getTime();
|
|
3901
|
+
if (age > PHASE_STATE_MAX_AGE_MS) {
|
|
3902
|
+
cleanPhaseState(repoRoot);
|
|
3903
|
+
return null;
|
|
3904
|
+
}
|
|
3905
|
+
return parsed;
|
|
3025
3906
|
} catch {
|
|
3026
3907
|
return null;
|
|
3027
3908
|
}
|
|
@@ -3076,7 +3957,7 @@ function printStatusHuman(state) {
|
|
|
3076
3957
|
}
|
|
3077
3958
|
function registerPhaseSubcommands(phaseCheck, getDryRun, repoRoot) {
|
|
3078
3959
|
phaseCheck.command("init <epic-id>").description("Initialize phase state for an epic").action((epicId) => {
|
|
3079
|
-
if (!
|
|
3960
|
+
if (!EPIC_ID_PATTERN.test(epicId)) {
|
|
3080
3961
|
console.error(`Invalid epic ID: "${epicId}"`);
|
|
3081
3962
|
process.exitCode = 1;
|
|
3082
3963
|
return;
|
|
@@ -3145,266 +4026,113 @@ function registerPhaseSubcommands(phaseCheck, getDryRun, repoRoot) {
|
|
|
3145
4026
|
console.log("Phase state cleaned.");
|
|
3146
4027
|
});
|
|
3147
4028
|
}
|
|
3148
|
-
function registerPhaseCheckCommand(program2) {
|
|
3149
|
-
const phaseCheck = program2.command("phase-check").description("Manage LFG phase state").option("--dry-run", "Show what would be done without making changes");
|
|
3150
|
-
const getDryRun = () => phaseCheck.opts().dryRun ?? false;
|
|
3151
|
-
const repoRoot = () =>
|
|
3152
|
-
registerPhaseSubcommands(phaseCheck, getDryRun, repoRoot);
|
|
3153
|
-
program2.command("phase-clean").description("Remove phase state file (alias for `phase-check clean`)").action(() => {
|
|
3154
|
-
cleanPhaseState(repoRoot());
|
|
3155
|
-
console.log("Phase state cleaned.");
|
|
3156
|
-
});
|
|
3157
|
-
}
|
|
3158
|
-
|
|
3159
|
-
// src/setup/hooks-phase-guard.ts
|
|
3160
|
-
function processPhaseGuard(repoRoot, toolName, _toolInput) {
|
|
3161
|
-
try {
|
|
3162
|
-
if (toolName !== "Edit" && toolName !== "Write") return {};
|
|
3163
|
-
const state = getPhaseState(repoRoot);
|
|
3164
|
-
if (state === null || !state.lfg_active) return {};
|
|
3165
|
-
const expectedSkillPath = `.claude/skills/compound/${state.current_phase}/SKILL.md`;
|
|
3166
|
-
const skillRead = state.skills_read.includes(expectedSkillPath);
|
|
3167
|
-
if (!skillRead) {
|
|
3168
|
-
return {
|
|
3169
|
-
hookSpecificOutput: {
|
|
3170
|
-
hookEventName: "PreToolUse",
|
|
3171
|
-
additionalContext: `PHASE GUARD WARNING: You are in LFG phase ${state.phase_index}/5 (${state.current_phase}) but have NOT read the skill file yet. Read ${expectedSkillPath} before continuing.`
|
|
3172
|
-
}
|
|
3173
|
-
};
|
|
3174
|
-
}
|
|
3175
|
-
return {};
|
|
3176
|
-
} catch {
|
|
3177
|
-
return {};
|
|
3178
|
-
}
|
|
3179
|
-
}
|
|
3180
|
-
|
|
3181
|
-
// src/setup/hooks-read-tracker.ts
|
|
3182
|
-
var SKILL_PATH_PATTERN = /(?:^|\/)\.claude\/skills\/compound\/([^/]+)\/SKILL\.md$/;
|
|
3183
|
-
function normalizePath(path2) {
|
|
3184
|
-
return path2.replaceAll("\\", "/");
|
|
3185
|
-
}
|
|
3186
|
-
function toCanonicalSkillPath(filePath) {
|
|
3187
|
-
const normalized = normalizePath(filePath);
|
|
3188
|
-
const match = SKILL_PATH_PATTERN.exec(normalized);
|
|
3189
|
-
if (!match?.[1]) return null;
|
|
3190
|
-
return `.claude/skills/compound/${match[1]}/SKILL.md`;
|
|
3191
|
-
}
|
|
3192
|
-
function processReadTracker(repoRoot, toolName, toolInput) {
|
|
3193
|
-
try {
|
|
3194
|
-
if (toolName !== "Read") return {};
|
|
3195
|
-
const state = getPhaseState(repoRoot);
|
|
3196
|
-
if (state === null || !state.lfg_active) return {};
|
|
3197
|
-
const filePath = typeof toolInput.file_path === "string" ? toolInput.file_path : null;
|
|
3198
|
-
if (filePath === null) return {};
|
|
3199
|
-
const canonicalPath = toCanonicalSkillPath(filePath);
|
|
3200
|
-
if (canonicalPath === null) return {};
|
|
3201
|
-
if (!state.skills_read.includes(canonicalPath)) {
|
|
3202
|
-
updatePhaseState(repoRoot, {
|
|
3203
|
-
skills_read: [...state.skills_read, canonicalPath]
|
|
3204
|
-
});
|
|
3205
|
-
}
|
|
3206
|
-
return {};
|
|
3207
|
-
} catch {
|
|
3208
|
-
return {};
|
|
3209
|
-
}
|
|
3210
|
-
}
|
|
3211
|
-
|
|
3212
|
-
// src/setup/hooks-stop-audit.ts
|
|
3213
|
-
function hasTransitionEvidence(state) {
|
|
3214
|
-
if (state.phase_index === 5) return true;
|
|
3215
|
-
const nextPhase = PHASES[state.phase_index];
|
|
3216
|
-
if (nextPhase === void 0) return false;
|
|
3217
|
-
const nextSkillPath = `.claude/skills/compound/${nextPhase}/SKILL.md`;
|
|
3218
|
-
return state.skills_read.includes(nextSkillPath);
|
|
3219
|
-
}
|
|
3220
|
-
function processStopAudit(repoRoot, stopHookActive = false) {
|
|
3221
|
-
try {
|
|
3222
|
-
if (stopHookActive) return {};
|
|
3223
|
-
const state = getPhaseState(repoRoot);
|
|
3224
|
-
if (state === null || !state.lfg_active) return {};
|
|
3225
|
-
const expectedGate = expectedGateForPhase(state.phase_index);
|
|
3226
|
-
if (expectedGate === null) return {};
|
|
3227
|
-
if (state.gates_passed.includes(expectedGate)) return {};
|
|
3228
|
-
if (!hasTransitionEvidence(state)) return {};
|
|
3229
|
-
return {
|
|
3230
|
-
continue: false,
|
|
3231
|
-
stopReason: `PHASE GATE NOT VERIFIED: ${state.current_phase} requires gate '${expectedGate}'. Run: npx ca phase-check gate ${expectedGate}`
|
|
3232
|
-
};
|
|
3233
|
-
} catch {
|
|
3234
|
-
return {};
|
|
3235
|
-
}
|
|
3236
|
-
}
|
|
3237
|
-
|
|
3238
|
-
// src/setup/hooks.ts
|
|
3239
|
-
var HOOK_FILE_MODE = 493;
|
|
3240
|
-
var CORRECTION_PATTERNS = [
|
|
3241
|
-
/\bactually\b/i,
|
|
3242
|
-
/\bno[,.]?\s/i,
|
|
3243
|
-
/\bwrong\b/i,
|
|
3244
|
-
/\bthat'?s not right\b/i,
|
|
3245
|
-
/\bthat'?s incorrect\b/i,
|
|
3246
|
-
/\buse .+ instead\b/i,
|
|
3247
|
-
/\bi told you\b/i,
|
|
3248
|
-
/\bi already said\b/i,
|
|
3249
|
-
/\bnot like that\b/i,
|
|
3250
|
-
/\byou forgot\b/i,
|
|
3251
|
-
/\byou missed\b/i,
|
|
3252
|
-
/\bstop\s*(,\s*)?(doing|using|that)\b/i,
|
|
3253
|
-
/\bwait\s*(,\s*)?(that|no|wrong)\b/i
|
|
3254
|
-
];
|
|
3255
|
-
var HIGH_CONFIDENCE_PLANNING = [
|
|
3256
|
-
/\bdecide\b/i,
|
|
3257
|
-
/\bchoose\b/i,
|
|
3258
|
-
/\bpick\b/i,
|
|
3259
|
-
/\bwhich approach\b/i,
|
|
3260
|
-
/\bwhat do you think\b/i,
|
|
3261
|
-
/\bshould we\b/i,
|
|
3262
|
-
/\bwould you\b/i,
|
|
3263
|
-
/\bhow should\b/i,
|
|
3264
|
-
/\bwhat'?s the best\b/i,
|
|
3265
|
-
/\badd feature\b/i,
|
|
3266
|
-
/\bset up\b/i
|
|
3267
|
-
];
|
|
3268
|
-
var LOW_CONFIDENCE_PLANNING = [
|
|
3269
|
-
/\bimplement\b/i,
|
|
3270
|
-
/\bbuild\b/i,
|
|
3271
|
-
/\bcreate\b/i,
|
|
3272
|
-
/\brefactor\b/i,
|
|
3273
|
-
/\bfix\b/i,
|
|
3274
|
-
/\bwrite\b/i,
|
|
3275
|
-
/\bdevelop\b/i
|
|
3276
|
-
];
|
|
3277
|
-
var CORRECTION_REMINDER = "Remember: You have memory tools available - `npx ca learn` to save insights, `npx ca search` to find past solutions.";
|
|
3278
|
-
var PLANNING_REMINDER = "If you're uncertain or hesitant, remember your memory tools: `npx ca search` may have relevant context from past sessions.";
|
|
3279
|
-
function detectCorrection(prompt) {
|
|
3280
|
-
return CORRECTION_PATTERNS.some((pattern) => pattern.test(prompt));
|
|
3281
|
-
}
|
|
3282
|
-
function detectPlanning(prompt) {
|
|
3283
|
-
if (HIGH_CONFIDENCE_PLANNING.some((pattern) => pattern.test(prompt))) {
|
|
3284
|
-
return true;
|
|
3285
|
-
}
|
|
3286
|
-
const lowMatches = LOW_CONFIDENCE_PLANNING.filter((pattern) => pattern.test(prompt));
|
|
3287
|
-
return lowMatches.length >= 2;
|
|
3288
|
-
}
|
|
3289
|
-
function processUserPrompt(prompt) {
|
|
3290
|
-
if (detectCorrection(prompt)) {
|
|
3291
|
-
return {
|
|
3292
|
-
hookSpecificOutput: {
|
|
3293
|
-
hookEventName: "UserPromptSubmit",
|
|
3294
|
-
additionalContext: CORRECTION_REMINDER
|
|
3295
|
-
}
|
|
3296
|
-
};
|
|
3297
|
-
}
|
|
3298
|
-
if (detectPlanning(prompt)) {
|
|
3299
|
-
return {
|
|
3300
|
-
hookSpecificOutput: {
|
|
3301
|
-
hookEventName: "UserPromptSubmit",
|
|
3302
|
-
additionalContext: PLANNING_REMINDER
|
|
3303
|
-
}
|
|
3304
|
-
};
|
|
3305
|
-
}
|
|
3306
|
-
return {};
|
|
3307
|
-
}
|
|
3308
|
-
var SAME_TARGET_THRESHOLD = 2;
|
|
3309
|
-
var TOTAL_FAILURE_THRESHOLD = 3;
|
|
3310
|
-
var STATE_FILE_NAME = ".ca-failure-state.json";
|
|
3311
|
-
var STATE_MAX_AGE_MS = 60 * 60 * 1e3;
|
|
3312
|
-
var failureCount = 0;
|
|
3313
|
-
var lastFailedTarget = null;
|
|
3314
|
-
var sameTargetCount = 0;
|
|
3315
|
-
function defaultState() {
|
|
3316
|
-
return { count: 0, lastTarget: null, sameTargetCount: 0, timestamp: Date.now() };
|
|
3317
|
-
}
|
|
3318
|
-
function readFailureState(stateDir) {
|
|
3319
|
-
try {
|
|
3320
|
-
const filePath = join(stateDir, STATE_FILE_NAME);
|
|
3321
|
-
if (!existsSync(filePath)) return defaultState();
|
|
3322
|
-
const raw = readFileSync(filePath, "utf-8");
|
|
3323
|
-
const parsed = JSON.parse(raw);
|
|
3324
|
-
if (Date.now() - parsed.timestamp > STATE_MAX_AGE_MS) return defaultState();
|
|
3325
|
-
return parsed;
|
|
3326
|
-
} catch {
|
|
3327
|
-
return defaultState();
|
|
3328
|
-
}
|
|
4029
|
+
function registerPhaseCheckCommand(program2) {
|
|
4030
|
+
const phaseCheck = program2.command("phase-check").description("Manage LFG phase state").option("--dry-run", "Show what would be done without making changes");
|
|
4031
|
+
const getDryRun = () => phaseCheck.opts().dryRun ?? false;
|
|
4032
|
+
const repoRoot = () => getRepoRoot();
|
|
4033
|
+
registerPhaseSubcommands(phaseCheck, getDryRun, repoRoot);
|
|
4034
|
+
program2.command("phase-clean").description("Remove phase state file (alias for `phase-check clean`)").action(() => {
|
|
4035
|
+
cleanPhaseState(repoRoot());
|
|
4036
|
+
console.log("Phase state cleaned.");
|
|
4037
|
+
});
|
|
3329
4038
|
}
|
|
3330
|
-
|
|
4039
|
+
|
|
4040
|
+
// src/setup/hooks-phase-guard.ts
|
|
4041
|
+
function processPhaseGuard(repoRoot, toolName, _toolInput) {
|
|
3331
4042
|
try {
|
|
3332
|
-
|
|
3333
|
-
|
|
4043
|
+
if (toolName !== "Edit" && toolName !== "Write") return {};
|
|
4044
|
+
const state = getPhaseState(repoRoot);
|
|
4045
|
+
if (state === null || !state.lfg_active) return {};
|
|
4046
|
+
const expectedSkillPath = `.claude/skills/compound/${state.current_phase}/SKILL.md`;
|
|
4047
|
+
const skillRead = state.skills_read.includes(expectedSkillPath);
|
|
4048
|
+
if (!skillRead) {
|
|
4049
|
+
return {
|
|
4050
|
+
hookSpecificOutput: {
|
|
4051
|
+
hookEventName: "PreToolUse",
|
|
4052
|
+
additionalContext: `PHASE GUARD WARNING: You are in LFG phase ${state.phase_index}/5 (${state.current_phase}) but have NOT read the skill file yet. Read ${expectedSkillPath} before continuing.`
|
|
4053
|
+
}
|
|
4054
|
+
};
|
|
4055
|
+
}
|
|
4056
|
+
return {};
|
|
3334
4057
|
} catch {
|
|
4058
|
+
return {};
|
|
3335
4059
|
}
|
|
3336
4060
|
}
|
|
3337
|
-
|
|
4061
|
+
|
|
4062
|
+
// src/setup/hooks-read-tracker.ts
|
|
4063
|
+
var SKILL_PATH_PATTERN = /(?:^|\/)\.claude\/skills\/compound\/([^/]+)\/SKILL\.md$/;
|
|
4064
|
+
function normalizePath(path2) {
|
|
4065
|
+
return path2.replaceAll("\\", "/");
|
|
4066
|
+
}
|
|
4067
|
+
function toCanonicalSkillPath(filePath) {
|
|
4068
|
+
const normalized = normalizePath(filePath);
|
|
4069
|
+
const match = SKILL_PATH_PATTERN.exec(normalized);
|
|
4070
|
+
if (!match?.[1]) return null;
|
|
4071
|
+
return `.claude/skills/compound/${match[1]}/SKILL.md`;
|
|
4072
|
+
}
|
|
4073
|
+
function processReadTracker(repoRoot, toolName, toolInput) {
|
|
3338
4074
|
try {
|
|
3339
|
-
|
|
3340
|
-
|
|
4075
|
+
if (toolName !== "Read") return {};
|
|
4076
|
+
const state = getPhaseState(repoRoot);
|
|
4077
|
+
if (state === null || !state.lfg_active) return {};
|
|
4078
|
+
const filePath = typeof toolInput.file_path === "string" ? toolInput.file_path : null;
|
|
4079
|
+
if (filePath === null) return {};
|
|
4080
|
+
const canonicalPath = toCanonicalSkillPath(filePath);
|
|
4081
|
+
if (canonicalPath === null) return {};
|
|
4082
|
+
if (!state.skills_read.includes(canonicalPath)) {
|
|
4083
|
+
updatePhaseState(repoRoot, {
|
|
4084
|
+
skills_read: [...state.skills_read, canonicalPath]
|
|
4085
|
+
});
|
|
4086
|
+
}
|
|
4087
|
+
return {};
|
|
3341
4088
|
} catch {
|
|
4089
|
+
return {};
|
|
3342
4090
|
}
|
|
3343
4091
|
}
|
|
3344
|
-
|
|
3345
|
-
|
|
3346
|
-
|
|
3347
|
-
|
|
3348
|
-
|
|
3349
|
-
if (
|
|
3350
|
-
}
|
|
3351
|
-
|
|
3352
|
-
if (toolName === "Bash" && typeof toolInput.command === "string") {
|
|
3353
|
-
const trimmed = toolInput.command.trim();
|
|
3354
|
-
const firstSpace = trimmed.indexOf(" ");
|
|
3355
|
-
return firstSpace === -1 ? trimmed : trimmed.slice(0, firstSpace);
|
|
3356
|
-
}
|
|
3357
|
-
if ((toolName === "Edit" || toolName === "Write") && typeof toolInput.file_path === "string") {
|
|
3358
|
-
return toolInput.file_path;
|
|
3359
|
-
}
|
|
3360
|
-
return null;
|
|
4092
|
+
|
|
4093
|
+
// src/setup/hooks-stop-audit.ts
|
|
4094
|
+
function hasTransitionEvidence(state) {
|
|
4095
|
+
if (state.phase_index === 5) return true;
|
|
4096
|
+
const nextPhase = PHASES[state.phase_index];
|
|
4097
|
+
if (nextPhase === void 0) return false;
|
|
4098
|
+
const nextSkillPath = `.claude/skills/compound/${nextPhase}/SKILL.md`;
|
|
4099
|
+
return state.skills_read.includes(nextSkillPath);
|
|
3361
4100
|
}
|
|
3362
|
-
function
|
|
3363
|
-
|
|
3364
|
-
|
|
3365
|
-
|
|
3366
|
-
|
|
3367
|
-
|
|
3368
|
-
|
|
3369
|
-
|
|
3370
|
-
|
|
3371
|
-
if (target !== null && target === lastFailedTarget) {
|
|
3372
|
-
sameTargetCount++;
|
|
3373
|
-
} else {
|
|
3374
|
-
sameTargetCount = 1;
|
|
3375
|
-
lastFailedTarget = target;
|
|
3376
|
-
}
|
|
3377
|
-
const shouldShowTip = sameTargetCount >= SAME_TARGET_THRESHOLD || failureCount >= TOTAL_FAILURE_THRESHOLD;
|
|
3378
|
-
if (shouldShowTip) {
|
|
3379
|
-
resetFailureState(stateDir);
|
|
4101
|
+
function processStopAudit(repoRoot, stopHookActive = false) {
|
|
4102
|
+
try {
|
|
4103
|
+
if (stopHookActive) return {};
|
|
4104
|
+
const state = getPhaseState(repoRoot);
|
|
4105
|
+
if (state === null || !state.lfg_active) return {};
|
|
4106
|
+
const expectedGate = expectedGateForPhase(state.phase_index);
|
|
4107
|
+
if (expectedGate === null) return {};
|
|
4108
|
+
if (state.gates_passed.includes(expectedGate)) return {};
|
|
4109
|
+
if (!hasTransitionEvidence(state)) return {};
|
|
3380
4110
|
return {
|
|
3381
|
-
|
|
3382
|
-
|
|
3383
|
-
additionalContext: FAILURE_TIP
|
|
3384
|
-
}
|
|
4111
|
+
continue: false,
|
|
4112
|
+
stopReason: `PHASE GATE NOT VERIFIED: ${state.current_phase} requires gate '${expectedGate}'. Run: npx ca phase-check gate ${expectedGate}`
|
|
3385
4113
|
};
|
|
4114
|
+
} catch {
|
|
4115
|
+
return {};
|
|
3386
4116
|
}
|
|
3387
|
-
if (stateDir) {
|
|
3388
|
-
writeFailureState(stateDir, {
|
|
3389
|
-
count: failureCount,
|
|
3390
|
-
lastTarget: lastFailedTarget,
|
|
3391
|
-
sameTargetCount,
|
|
3392
|
-
timestamp: Date.now()
|
|
3393
|
-
});
|
|
3394
|
-
}
|
|
3395
|
-
return {};
|
|
3396
|
-
}
|
|
3397
|
-
function processToolSuccess(stateDir) {
|
|
3398
|
-
resetFailureState(stateDir);
|
|
3399
4117
|
}
|
|
4118
|
+
|
|
4119
|
+
// src/setup/hooks.ts
|
|
4120
|
+
var HOOK_FILE_MODE = 493;
|
|
3400
4121
|
function hasCompoundAgentHook(content) {
|
|
3401
4122
|
return content.includes(HOOK_MARKER);
|
|
3402
4123
|
}
|
|
3403
4124
|
async function getGitHooksDir(repoRoot) {
|
|
3404
|
-
const
|
|
3405
|
-
if (!existsSync(
|
|
4125
|
+
const gitPath = join(repoRoot, ".git");
|
|
4126
|
+
if (!existsSync(gitPath)) {
|
|
3406
4127
|
return null;
|
|
3407
4128
|
}
|
|
4129
|
+
let gitDir = gitPath;
|
|
4130
|
+
if (lstatSync(gitPath).isFile()) {
|
|
4131
|
+
const content = readFileSync(gitPath, "utf-8").trim();
|
|
4132
|
+
const match = /^gitdir:\s*(.+)$/.exec(content);
|
|
4133
|
+
if (!match?.[1]) return null;
|
|
4134
|
+
gitDir = resolve(repoRoot, match[1]);
|
|
4135
|
+
}
|
|
3408
4136
|
const configPath2 = join(gitDir, "config");
|
|
3409
4137
|
if (existsSync(configPath2)) {
|
|
3410
4138
|
const config = await readFile(configPath2, "utf-8");
|
|
@@ -3477,6 +4205,36 @@ async function installPreCommitHook(repoRoot) {
|
|
|
3477
4205
|
chmodSync(hookPath, HOOK_FILE_MODE);
|
|
3478
4206
|
return { status: "installed" };
|
|
3479
4207
|
}
|
|
4208
|
+
async function installPostCommitHook(repoRoot) {
|
|
4209
|
+
const gitHooksDir = await getGitHooksDir(repoRoot);
|
|
4210
|
+
if (!gitHooksDir) {
|
|
4211
|
+
return { status: "not_git_repo" };
|
|
4212
|
+
}
|
|
4213
|
+
await mkdir(gitHooksDir, { recursive: true });
|
|
4214
|
+
const hookPath = join(gitHooksDir, "post-commit");
|
|
4215
|
+
if (existsSync(hookPath)) {
|
|
4216
|
+
const content = await readFile(hookPath, "utf-8");
|
|
4217
|
+
if (content.includes(POST_COMMIT_HOOK_MARKER)) {
|
|
4218
|
+
return { status: "already_installed" };
|
|
4219
|
+
}
|
|
4220
|
+
const lines = content.split("\n");
|
|
4221
|
+
const exitLineIndex = findFirstTopLevelExitLine(lines);
|
|
4222
|
+
let newContent;
|
|
4223
|
+
if (exitLineIndex === -1) {
|
|
4224
|
+
newContent = content.trimEnd() + "\n" + COMPOUND_AGENT_POST_COMMIT_BLOCK;
|
|
4225
|
+
} else {
|
|
4226
|
+
const before = lines.slice(0, exitLineIndex);
|
|
4227
|
+
const after = lines.slice(exitLineIndex);
|
|
4228
|
+
newContent = before.join("\n") + COMPOUND_AGENT_POST_COMMIT_BLOCK + after.join("\n");
|
|
4229
|
+
}
|
|
4230
|
+
await writeFile(hookPath, newContent, "utf-8");
|
|
4231
|
+
chmodSync(hookPath, HOOK_FILE_MODE);
|
|
4232
|
+
return { status: "appended" };
|
|
4233
|
+
}
|
|
4234
|
+
await writeFile(hookPath, POST_COMMIT_HOOK_TEMPLATE, "utf-8");
|
|
4235
|
+
chmodSync(hookPath, HOOK_FILE_MODE);
|
|
4236
|
+
return { status: "installed" };
|
|
4237
|
+
}
|
|
3480
4238
|
async function readStdin() {
|
|
3481
4239
|
const chunks = [];
|
|
3482
4240
|
for await (const chunk of process.stdin) {
|
|
@@ -3506,7 +4264,7 @@ async function runPostToolFailureHook() {
|
|
|
3506
4264
|
console.log(JSON.stringify({}));
|
|
3507
4265
|
return;
|
|
3508
4266
|
}
|
|
3509
|
-
const stateDir = join(
|
|
4267
|
+
const stateDir = join(getRepoRoot(), ".claude");
|
|
3510
4268
|
const result = processToolFailure(data.tool_name, data.tool_input ?? {}, stateDir);
|
|
3511
4269
|
console.log(JSON.stringify(result));
|
|
3512
4270
|
} catch {
|
|
@@ -3516,7 +4274,7 @@ async function runPostToolFailureHook() {
|
|
|
3516
4274
|
async function runPostToolSuccessHook() {
|
|
3517
4275
|
try {
|
|
3518
4276
|
await readStdin();
|
|
3519
|
-
const stateDir = join(
|
|
4277
|
+
const stateDir = join(getRepoRoot(), ".claude");
|
|
3520
4278
|
processToolSuccess(stateDir);
|
|
3521
4279
|
console.log(JSON.stringify({}));
|
|
3522
4280
|
} catch {
|
|
@@ -3531,7 +4289,7 @@ async function runToolHook(processor) {
|
|
|
3531
4289
|
console.log(JSON.stringify({}));
|
|
3532
4290
|
return;
|
|
3533
4291
|
}
|
|
3534
|
-
console.log(JSON.stringify(processor(
|
|
4292
|
+
console.log(JSON.stringify(processor(getRepoRoot(), data.tool_name, data.tool_input ?? {})));
|
|
3535
4293
|
} catch {
|
|
3536
4294
|
console.log(JSON.stringify({}));
|
|
3537
4295
|
}
|
|
@@ -3540,7 +4298,7 @@ async function runStopAuditHook() {
|
|
|
3540
4298
|
try {
|
|
3541
4299
|
const input = await readStdin();
|
|
3542
4300
|
const data = JSON.parse(input);
|
|
3543
|
-
console.log(JSON.stringify(processStopAudit(
|
|
4301
|
+
console.log(JSON.stringify(processStopAudit(getRepoRoot(), data.stop_hook_active ?? false)));
|
|
3544
4302
|
} catch {
|
|
3545
4303
|
console.log(JSON.stringify({}));
|
|
3546
4304
|
}
|
|
@@ -3803,6 +4561,10 @@ Analyze the current session's work context: what was accomplished, what problems
|
|
|
3803
4561
|
7. Summarize the work context for lesson extraction
|
|
3804
4562
|
8. For large diffs spanning multiple modules, spawn opus subagents to analyze each module in parallel. Merge findings before sharing.
|
|
3805
4563
|
|
|
4564
|
+
## Literature
|
|
4565
|
+
- Consult \`docs/compound/research/learning-systems/\` for knowledge compounding theory and context analysis methodology
|
|
4566
|
+
- Run \`npx ca knowledge "context analysis work review"\` for indexed knowledge
|
|
4567
|
+
|
|
3806
4568
|
## Collaboration
|
|
3807
4569
|
Share findings with lesson-extractor via direct message so it can extract actionable lessons from the context. Pass results to other compound agents as needed.
|
|
3808
4570
|
|
|
@@ -3841,6 +4603,10 @@ Share findings with pattern-matcher and solution-writer via direct message so th
|
|
|
3841
4603
|
## Deployment
|
|
3842
4604
|
AgentTeam member in the **compound** phase. Spawned via TeamCreate. Communicate with teammates via SendMessage.
|
|
3843
4605
|
|
|
4606
|
+
## Literature
|
|
4607
|
+
- Consult \`docs/compound/research/learning-systems/\` for lesson extraction methodology and knowledge representation
|
|
4608
|
+
- Run \`npx ca knowledge "lesson extraction knowledge management"\` for indexed knowledge
|
|
4609
|
+
|
|
3844
4610
|
## Output Format
|
|
3845
4611
|
Per lesson:
|
|
3846
4612
|
- **Insight**: The actionable directive
|
|
@@ -3874,6 +4640,10 @@ Share classifications with solution-writer via direct message so it knows which
|
|
|
3874
4640
|
## Deployment
|
|
3875
4641
|
AgentTeam member in the **compound** phase. Spawned via TeamCreate. Communicate with teammates via SendMessage.
|
|
3876
4642
|
|
|
4643
|
+
## Literature
|
|
4644
|
+
- Consult \`docs/compound/research/learning-systems/\` for deduplication strategies and knowledge graph methodology
|
|
4645
|
+
- Run \`npx ca knowledge "pattern matching deduplication"\` for indexed knowledge
|
|
4646
|
+
|
|
3877
4647
|
## Output Format
|
|
3878
4648
|
Per lesson:
|
|
3879
4649
|
- **Classification**: New / Duplicate / Reinforcement / Contradiction
|
|
@@ -3903,6 +4673,10 @@ Transform approved lessons into properly formatted memory items that follow the
|
|
|
3903
4673
|
5. Set supersedes or related links when the lesson updates existing knowledge
|
|
3904
4674
|
6. Store via \`npx ca learn\`
|
|
3905
4675
|
|
|
4676
|
+
## Literature
|
|
4677
|
+
- Consult \`docs/compound/research/learning-systems/\` for knowledge representation and lesson schema design
|
|
4678
|
+
- Run \`npx ca knowledge "knowledge storage representation"\` for indexed knowledge
|
|
4679
|
+
|
|
3906
4680
|
## Collaboration
|
|
3907
4681
|
Share findings with other agents via direct message to communicate storage outcomes. Collaborate with pattern-matcher on borderline classifications.
|
|
3908
4682
|
|
|
@@ -3936,6 +4710,10 @@ Write comprehensive failing tests that define expected behavior before any imple
|
|
|
3936
4710
|
7. Do NOT mock the thing being tested
|
|
3937
4711
|
8. For multiple test files, spawn opus subagents to write tests in parallel (1 subagent per test file or module). Coordinate to avoid duplicate test setup.
|
|
3938
4712
|
|
|
4713
|
+
## Literature
|
|
4714
|
+
- Consult \`docs/compound/research/tdd/\` for test-first development evidence and methodology
|
|
4715
|
+
- Run \`npx ca knowledge "TDD test design"\` for indexed knowledge on testing patterns
|
|
4716
|
+
|
|
3939
4717
|
## Memory Integration
|
|
3940
4718
|
Run \`npx ca search\` with the task description before writing tests. Look for known patterns, edge cases, and past mistakes relevant to the feature area.
|
|
3941
4719
|
|
|
@@ -3970,6 +4748,10 @@ Write the minimum code necessary to make failing tests pass. Follow the TDD gree
|
|
|
3970
4748
|
7. After all tests pass, look for obvious refactoring opportunities
|
|
3971
4749
|
8. For multiple implementation files, spawn opus subagents to implement in parallel (1 subagent per module). Coordinate on shared interfaces via SendMessage.
|
|
3972
4750
|
|
|
4751
|
+
## Literature
|
|
4752
|
+
- Consult \`docs/compound/research/tdd/\` for TDD green-phase methodology and minimal implementation strategies
|
|
4753
|
+
- Run \`npx ca knowledge "TDD implementation"\` for indexed knowledge on implementation patterns
|
|
4754
|
+
|
|
3973
4755
|
## Memory Integration
|
|
3974
4756
|
Run \`npx ca search\` with the task description for known patterns, solutions, and implementation approaches relevant to the feature area.
|
|
3975
4757
|
|
|
@@ -4070,6 +4852,10 @@ Review code changes for security vulnerabilities including OWASP top 10, injecti
|
|
|
4070
4852
|
7. Check dependency versions for known CVEs
|
|
4071
4853
|
8. For large diffs, spawn opus subagents to review different file groups in parallel (e.g., 1 per module). Merge findings and deduplicate.
|
|
4072
4854
|
|
|
4855
|
+
## Literature
|
|
4856
|
+
- Consult \`docs/compound/research/code-review/\` for systematic review methodology and severity classification
|
|
4857
|
+
- Run \`npx ca knowledge "security review OWASP"\` for indexed security knowledge
|
|
4858
|
+
|
|
4073
4859
|
## Collaboration
|
|
4074
4860
|
Share cross-cutting findings via SendMessage: security issues impacting architecture go to architecture-reviewer; secrets in test fixtures go to test-coverage-reviewer.
|
|
4075
4861
|
|
|
@@ -4101,6 +4887,10 @@ Review code for architectural consistency, pattern compliance, module boundary i
|
|
|
4101
4887
|
6. Check that dependencies flow in the correct direction
|
|
4102
4888
|
7. For changes spanning multiple modules, spawn opus subagents to review each module boundary in parallel.
|
|
4103
4889
|
|
|
4890
|
+
## Literature
|
|
4891
|
+
- Consult \`docs/compound/research/code-review/\` for systematic review methodology and architectural assessment frameworks
|
|
4892
|
+
- Run \`npx ca knowledge "architecture module design"\` for indexed knowledge on design patterns
|
|
4893
|
+
|
|
4104
4894
|
## Collaboration
|
|
4105
4895
|
Share cross-cutting findings via SendMessage: architecture issues with performance implications go to performance-reviewer; structural violations creating security risks go to security-reviewer.
|
|
4106
4896
|
|
|
@@ -4131,6 +4921,10 @@ Review code for performance bottlenecks, algorithmic complexity issues, unnecess
|
|
|
4131
4921
|
6. Verify resources are properly closed/released
|
|
4132
4922
|
7. For multiple hot paths, spawn opus subagents to profile different modules in parallel.
|
|
4133
4923
|
|
|
4924
|
+
## Literature
|
|
4925
|
+
- Consult \`docs/compound/research/code-review/\` for systematic performance analysis frameworks
|
|
4926
|
+
- Run \`npx ca knowledge "performance review"\` for indexed knowledge on performance patterns
|
|
4927
|
+
|
|
4134
4928
|
## Collaboration
|
|
4135
4929
|
Share cross-cutting findings via SendMessage: performance issues needing test coverage go to test-coverage-reviewer; performance fixes requiring architectural changes go to architecture-reviewer.
|
|
4136
4930
|
|
|
@@ -4162,6 +4956,11 @@ Review tests for meaningful assertions, edge case coverage, and absence of cargo
|
|
|
4162
4956
|
7. Ensure property-based tests exist for pure functions
|
|
4163
4957
|
8. For many test files, spawn opus subagents to review test files in parallel (1 per test file).
|
|
4164
4958
|
|
|
4959
|
+
## Literature
|
|
4960
|
+
- Consult \`docs/compound/research/tdd/\` for test quality assessment and coverage methodology
|
|
4961
|
+
- Consult \`docs/compound/research/property-testing/\` for property-based testing theory
|
|
4962
|
+
- Run \`npx ca knowledge "test coverage quality"\` for indexed knowledge
|
|
4963
|
+
|
|
4165
4964
|
## Collaboration
|
|
4166
4965
|
Share cross-cutting findings via SendMessage: cargo-cult tests hiding security issues go to security-reviewer; unnecessary test complexity goes to simplicity-reviewer.
|
|
4167
4966
|
|
|
@@ -4192,6 +4991,10 @@ Review code for unnecessary complexity, over-engineering, premature abstraction,
|
|
|
4192
4991
|
5. Flag feature flags or config for single-use cases
|
|
4193
4992
|
6. Verify no "just in case" code exists
|
|
4194
4993
|
|
|
4994
|
+
## Literature
|
|
4995
|
+
- Consult \`docs/compound/research/code-review/\` for over-engineering detection and YAGNI assessment methodology
|
|
4996
|
+
- Run \`npx ca knowledge "simplicity over-engineering"\` for indexed knowledge
|
|
4997
|
+
|
|
4195
4998
|
## Collaboration
|
|
4196
4999
|
Share cross-cutting findings via SendMessage: over-engineering obscuring security concerns goes to security-reviewer; premature abstractions creating wrong module boundaries goes to architecture-reviewer.
|
|
4197
5000
|
|
|
@@ -4229,6 +5032,10 @@ Cluster similar lessons from memory and synthesize them into testable CCT (Compo
|
|
|
4229
5032
|
6. Skip singleton lessons (not enough signal to form a pattern)
|
|
4230
5033
|
7. For many clusters, spawn opus subagents to synthesize patterns from different clusters in parallel.
|
|
4231
5034
|
|
|
5035
|
+
## Literature
|
|
5036
|
+
- Consult \`docs/compound/research/learning-systems/\` for knowledge compounding theory and pattern synthesis
|
|
5037
|
+
- Run \`npx ca knowledge "lesson clustering compounding"\` for indexed knowledge on learning systems
|
|
5038
|
+
|
|
4232
5039
|
## Collaboration
|
|
4233
5040
|
Share synthesized patterns with the team lead via direct message for review.
|
|
4234
5041
|
|
|
@@ -4321,6 +5128,11 @@ invariant-designer -> **CCT Subagent** -> test-first-enforcer
|
|
|
4321
5128
|
- Priority (REQUIRED vs SUGGESTED)
|
|
4322
5129
|
5. Pass requirements to test-first-enforcer for inclusion
|
|
4323
5130
|
|
|
5131
|
+
## Literature
|
|
5132
|
+
- Consult \`docs/compound/research/tdd/\` for corrective testing theory and mistake-driven test design
|
|
5133
|
+
- Consult \`docs/compound/research/learning-systems/\` for pattern clustering and knowledge synthesis methodology
|
|
5134
|
+
- Run \`npx ca knowledge "corrective testing patterns"\` for indexed knowledge
|
|
5135
|
+
|
|
4324
5136
|
## Deployment
|
|
4325
5137
|
Subagent in the TDD pipeline. Return findings directly to the caller.
|
|
4326
5138
|
|
|
@@ -4354,6 +5166,10 @@ module-boundary-reviewer -> **Drift Detector** -> implementation-reviewer
|
|
|
4354
5166
|
5. Use \`npx ca search\` for past architectural decisions that may apply
|
|
4355
5167
|
6. Report any deviation, even if the implementation "works"
|
|
4356
5168
|
|
|
5169
|
+
## Literature
|
|
5170
|
+
- Consult \`docs/compound/research/property-testing/\` for invariant-driven development and constraint verification
|
|
5171
|
+
- Run \`npx ca knowledge "invariant drift detection"\` for indexed knowledge on drift patterns
|
|
5172
|
+
|
|
4357
5173
|
## Deployment
|
|
4358
5174
|
Subagent in the TDD pipeline. Return findings directly to the caller.
|
|
4359
5175
|
|
|
@@ -4472,6 +5288,19 @@ $ARGUMENTS
|
|
|
4472
5288
|
# Test Clean
|
|
4473
5289
|
|
|
4474
5290
|
**MANDATORY FIRST STEP -- NON-NEGOTIABLE**: Use the Read tool to open and read \`.claude/skills/compound/test-cleaner/SKILL.md\` NOW. Do NOT proceed until you have read the complete skill file. It contains the full workflow you must follow.
|
|
5291
|
+
`,
|
|
5292
|
+
"get-a-phd.md": `---
|
|
5293
|
+
name: compound:get-a-phd
|
|
5294
|
+
description: Deep PhD-level research for working subagents
|
|
5295
|
+
argument-hint: "<focus area or epic ID>"
|
|
5296
|
+
---
|
|
5297
|
+
$ARGUMENTS
|
|
5298
|
+
|
|
5299
|
+
# Get a PhD
|
|
5300
|
+
|
|
5301
|
+
**MANDATORY FIRST STEP -- NON-NEGOTIABLE**: Use the Read tool to open and read \`.claude/skills/compound/researcher/SKILL.md\` NOW. Do NOT proceed until you have read the complete skill file.
|
|
5302
|
+
|
|
5303
|
+
Then: scan docs/compound/research/ for gaps, propose topics via AskUserQuestion, spawn parallel researcher subagents.
|
|
4475
5304
|
`,
|
|
4476
5305
|
// =========================================================================
|
|
4477
5306
|
// Utility commands (kept: learn, prime)
|
|
@@ -4578,6 +5407,8 @@ npx ca doctor
|
|
|
4578
5407
|
index.jsonl # Memory items (git-tracked source of truth)
|
|
4579
5408
|
.cache/
|
|
4580
5409
|
lessons.sqlite # Rebuildable search index (.gitignore)
|
|
5410
|
+
docs/compound/
|
|
5411
|
+
research/ # PhD-level research docs for agent knowledge
|
|
4581
5412
|
\`\`\`
|
|
4582
5413
|
|
|
4583
5414
|
---
|
|
@@ -4588,6 +5419,7 @@ npx ca doctor
|
|
|
4588
5419
|
|------|---------|
|
|
4589
5420
|
| Capture a lesson | \`npx ca learn "insight" --trigger "what happened"\` |
|
|
4590
5421
|
| Search memory | \`npx ca search "keywords"\` |
|
|
5422
|
+
| Search docs knowledge | \`npx ca knowledge "query"\` |
|
|
4591
5423
|
| Check plan against memory | \`npx ca check-plan --plan "description"\` |
|
|
4592
5424
|
| View stats | \`npx ca stats\` |
|
|
4593
5425
|
| Run full workflow | \`/compound:lfg <epic-id>\` |
|
|
@@ -4942,6 +5774,14 @@ Skills are instructions that Claude reads before executing each phase. They live
|
|
|
4942
5774
|
|
|
4943
5775
|
**What it does**: Validates the epic, runs \`npx ca worktree create <epic-id>\`, verifies output, and informs the user they can proceed with \`/compound:lfg\`.
|
|
4944
5776
|
|
|
5777
|
+
### \`/compound:get-a-phd\`
|
|
5778
|
+
|
|
5779
|
+
**Purpose**: Conduct deep, PhD-level research to build knowledge for working subagents.
|
|
5780
|
+
|
|
5781
|
+
**When invoked**: When agents need domain knowledge not yet covered in \`docs/compound/research/\`.
|
|
5782
|
+
|
|
5783
|
+
**What it does**: Analyzes beads epics for knowledge gaps, checks existing docs coverage, proposes research topics for user confirmation, spawns parallel researcher subagents, and stores output at \`docs/compound/research/<topic>/<slug>.md\`.
|
|
5784
|
+
|
|
4945
5785
|
---
|
|
4946
5786
|
|
|
4947
5787
|
## Skill invocation
|
|
@@ -4956,6 +5796,7 @@ Skills are invoked as Claude Code slash commands:
|
|
|
4956
5796
|
/compound:compound # Start compound phase
|
|
4957
5797
|
/compound:lfg <epic-id> # Run all phases end-to-end
|
|
4958
5798
|
/compound:set-worktree <epic-id> # Set up worktree before LFG
|
|
5799
|
+
/compound:get-a-phd <focus> # Deep research for agent knowledge
|
|
4959
5800
|
\`\`\`
|
|
4960
5801
|
|
|
4961
5802
|
Each skill reads its SKILL.md file from \`.claude/skills/compound/<phase>/SKILL.md\` at invocation time. Skills are never executed from memory.
|
|
@@ -5032,6 +5873,7 @@ Compound-agent installs five hooks into \`.claude/settings.json\`:
|
|
|
5032
5873
|
|
|
5033
5874
|
\`\`\`bash
|
|
5034
5875
|
npx ca search "feature area keywords"
|
|
5876
|
+
npx ca knowledge "architecture topic"
|
|
5035
5877
|
npx ca check-plan --plan "description of what you are about to implement"
|
|
5036
5878
|
\`\`\`
|
|
5037
5879
|
|
|
@@ -5135,7 +5977,7 @@ Explore the problem space before committing to a solution. This phase produces a
|
|
|
5135
5977
|
|
|
5136
5978
|
## Methodology
|
|
5137
5979
|
1. Ask "why" before "how" -- understand the real problem
|
|
5138
|
-
2. Search memory with \`npx ca search\` for similar past features and known constraints
|
|
5980
|
+
2. Search memory with \`npx ca search\` and docs with \`npx ca knowledge "relevant topic"\` for similar past features and known constraints
|
|
5139
5981
|
3. Spawn **subagents** via Task tool in parallel for research (lightweight, no inter-agent coordination):
|
|
5140
5982
|
- Available agents: \`.claude/agents/compound/repo-analyst.md\`, \`memory-analyst.md\`
|
|
5141
5983
|
- Or use \`subagent_type: Explore\` for ad-hoc research
|
|
@@ -5149,7 +5991,7 @@ Explore the problem space before committing to a solution. This phase produces a
|
|
|
5149
5991
|
10. Auto-create ADR files in \`docs/decisions/\` for significant decisions (lightweight: Status, Context, Decision, Consequences)
|
|
5150
5992
|
|
|
5151
5993
|
## Memory Integration
|
|
5152
|
-
- Run \`npx ca search\` with relevant keywords before generating approaches
|
|
5994
|
+
- Run \`npx ca search\` and \`npx ca knowledge "relevant topic"\` with relevant keywords before generating approaches
|
|
5153
5995
|
- Look for past architectural decisions, pitfalls, and preferences
|
|
5154
5996
|
- If the problem domain matches past work, review those lessons first
|
|
5155
5997
|
|
|
@@ -5191,7 +6033,7 @@ Create a concrete implementation plan by decomposing work into small, testable t
|
|
|
5191
6033
|
|
|
5192
6034
|
## Methodology
|
|
5193
6035
|
1. Review brainstorm output for decisions and open questions
|
|
5194
|
-
2. Search memory with \`npx ca search\` for architectural patterns and past mistakes
|
|
6036
|
+
2. Search memory with \`npx ca search\` and docs with \`npx ca knowledge "relevant topic"\` for architectural patterns and past mistakes
|
|
5195
6037
|
3. Spawn **subagents** via Task tool in parallel for research (lightweight, no inter-agent coordination):
|
|
5196
6038
|
- Available agents: \`.claude/agents/compound/repo-analyst.md\`, \`memory-analyst.md\`
|
|
5197
6039
|
- For complex features, deploy MULTIPLE analysts per domain area
|
|
@@ -5207,7 +6049,7 @@ Create a concrete implementation plan by decomposing work into small, testable t
|
|
|
5207
6049
|
12. Run \`npx ca worktree wire-deps <epic-id>\` to connect merge dependencies (graceful no-op if no worktree is active)
|
|
5208
6050
|
|
|
5209
6051
|
## Memory Integration
|
|
5210
|
-
- Run \`npx ca search\` for patterns related to the feature area
|
|
6052
|
+
- Run \`npx ca search\` and \`npx ca knowledge "relevant topic"\` for patterns related to the feature area
|
|
5211
6053
|
- Look for past planning mistakes (missing dependencies, unclear criteria)
|
|
5212
6054
|
- Check for preferred architectural patterns in this codebase
|
|
5213
6055
|
|
|
@@ -5488,7 +6330,7 @@ description: Deep research producing structured survey documents for informed de
|
|
|
5488
6330
|
# Researcher Skill
|
|
5489
6331
|
|
|
5490
6332
|
## Overview
|
|
5491
|
-
Conduct deep research on a topic and produce a structured survey document following the project's research template. This skill spawns parallel research subagents to gather comprehensive information, then synthesizes findings into a PhD-depth document stored in \`docs/research/\`.
|
|
6333
|
+
Conduct deep research on a topic and produce a structured survey document following the project's research template. This skill spawns parallel research subagents to gather comprehensive information, then synthesizes findings into a PhD-depth document stored in \`docs/compound/research/\`.
|
|
5492
6334
|
|
|
5493
6335
|
## Methodology
|
|
5494
6336
|
1. Identify the research question, scope, and exclusions
|
|
@@ -5509,16 +6351,16 @@ Conduct deep research on a topic and produce a structured survey document follow
|
|
|
5509
6351
|
- Conclusion
|
|
5510
6352
|
- References (full citations)
|
|
5511
6353
|
- Practitioner Resources (annotated tools/repos)
|
|
5512
|
-
6. Store output at \`docs/research/<topic-slug>.md\` (kebab-case filename)
|
|
6354
|
+
6. Store output at \`docs/compound/research/<topic-slug>.md\` (kebab-case filename)
|
|
5513
6355
|
7. Report key findings back for upstream skill (brainstorm/plan) to act on
|
|
5514
6356
|
|
|
5515
6357
|
## Memory Integration
|
|
5516
6358
|
- Run \`npx ca search\` with topic keywords before starting research
|
|
5517
|
-
- Check for existing research docs in \`docs/research/\` that overlap
|
|
6359
|
+
- Check for existing research docs in \`docs/compound/research/\` that overlap
|
|
5518
6360
|
- After completion, key findings can be captured via \`npx ca learn\`
|
|
5519
6361
|
|
|
5520
6362
|
## Docs Integration
|
|
5521
|
-
- Scan \`docs/research/\` for prior survey documents on related topics
|
|
6363
|
+
- Scan \`docs/compound/research/\` for prior survey documents on related topics
|
|
5522
6364
|
- Check \`docs/decisions/\` for ADRs that inform or constrain the research scope
|
|
5523
6365
|
- Reference existing project docs as primary sources where relevant
|
|
5524
6366
|
|
|
@@ -5613,7 +6455,7 @@ For each phase:
|
|
|
5613
6455
|
1. Announce: "[Phase N/5] PHASE_NAME"
|
|
5614
6456
|
2. Start state: \`npx ca phase-check start <phase>\`
|
|
5615
6457
|
3. Read the phase skill file (see above)
|
|
5616
|
-
4. Run \`npx ca search\` with the current goal -- display results before proceeding
|
|
6458
|
+
4. Run \`npx ca search\` and \`npx ca knowledge\` with the current goal -- display results before proceeding
|
|
5617
6459
|
5. Execute the phase following the skill instructions
|
|
5618
6460
|
6. Update epic state: \`bd update <epic-id> --notes="Phase: NAME COMPLETE | Next: NEXT"\`
|
|
5619
6461
|
7. Verify phase gate before proceeding to the next phase
|
|
@@ -5850,6 +6692,47 @@ async function installDocTemplates(repoRoot) {
|
|
|
5850
6692
|
}
|
|
5851
6693
|
return created;
|
|
5852
6694
|
}
|
|
6695
|
+
async function installResearchDocs(repoRoot) {
|
|
6696
|
+
const pkgRoot = join(dirname(fileURLToPath(import.meta.url)), "..");
|
|
6697
|
+
const srcDir = join(pkgRoot, "docs", "research");
|
|
6698
|
+
if (!existsSync(srcDir)) {
|
|
6699
|
+
return false;
|
|
6700
|
+
}
|
|
6701
|
+
const destDir = join(repoRoot, "docs", "compound", "research");
|
|
6702
|
+
await mkdir(destDir, { recursive: true });
|
|
6703
|
+
let created = false;
|
|
6704
|
+
async function copyDir(src, dest) {
|
|
6705
|
+
const entries = await readdir(src, { withFileTypes: true });
|
|
6706
|
+
for (const entry of entries) {
|
|
6707
|
+
const srcPath = join(src, entry.name);
|
|
6708
|
+
const destPath = join(dest, entry.name);
|
|
6709
|
+
if (entry.isDirectory()) {
|
|
6710
|
+
await mkdir(destPath, { recursive: true });
|
|
6711
|
+
await copyDir(srcPath, destPath);
|
|
6712
|
+
} else if (!existsSync(destPath) && entry.name.endsWith(".md")) {
|
|
6713
|
+
let content = await readFile(srcPath, "utf-8");
|
|
6714
|
+
if (entry.name === "index.md") {
|
|
6715
|
+
const patched = content.replace(
|
|
6716
|
+
/^# .*/m,
|
|
6717
|
+
"$&\n\n> Shipped by compound-agent. Source: `docs/research/` in the compound-agent package."
|
|
6718
|
+
);
|
|
6719
|
+
content = patched !== content ? patched : `> Shipped by compound-agent.
|
|
6720
|
+
|
|
6721
|
+
${content}`;
|
|
6722
|
+
}
|
|
6723
|
+
await writeFile(destPath, content, "utf-8");
|
|
6724
|
+
created = true;
|
|
6725
|
+
}
|
|
6726
|
+
}
|
|
6727
|
+
}
|
|
6728
|
+
try {
|
|
6729
|
+
await copyDir(srcDir, destDir);
|
|
6730
|
+
} catch (err) {
|
|
6731
|
+
console.error(`Warning: Could not install research docs: ${err.message}`);
|
|
6732
|
+
return false;
|
|
6733
|
+
}
|
|
6734
|
+
return created;
|
|
6735
|
+
}
|
|
5853
6736
|
var REQUIRED_BUILD_DEPS = ["better-sqlite3", "node-llama-cpp"];
|
|
5854
6737
|
async function ensurePnpmBuildConfig(repoRoot) {
|
|
5855
6738
|
const lockPath = join(repoRoot, "pnpm-lock.yaml");
|
|
@@ -6115,6 +6998,10 @@ async function runSetup(options) {
|
|
|
6115
6998
|
if (!options.skipHooks) {
|
|
6116
6999
|
gitHooks = (await installPreCommitHook(repoRoot)).status;
|
|
6117
7000
|
}
|
|
7001
|
+
let postCommitHook = "skipped";
|
|
7002
|
+
if (!options.skipHooks) {
|
|
7003
|
+
postCommitHook = (await installPostCommitHook(repoRoot)).status;
|
|
7004
|
+
}
|
|
6118
7005
|
const { hooks } = await configureClaudeSettings();
|
|
6119
7006
|
const gitignore = await ensureGitignore(repoRoot);
|
|
6120
7007
|
let modelStatus = "skipped";
|
|
@@ -6136,6 +7023,7 @@ async function runSetup(options) {
|
|
|
6136
7023
|
agentsMd: agentsMdUpdated,
|
|
6137
7024
|
hooks,
|
|
6138
7025
|
gitHooks,
|
|
7026
|
+
postCommitHook,
|
|
6139
7027
|
model: modelStatus,
|
|
6140
7028
|
pnpmConfig,
|
|
6141
7029
|
beads,
|
|
@@ -6205,6 +7093,16 @@ async function runUpdate(repoRoot, dryRun) {
|
|
|
6205
7093
|
const gitignore = dryRun ? { added: [] } : await ensureGitignore(repoRoot);
|
|
6206
7094
|
return { updated, added, configUpdated, upgrade, gitignore };
|
|
6207
7095
|
}
|
|
7096
|
+
var POST_COMMIT_STATUS_MSG = {
|
|
7097
|
+
skipped: "Skipped (--skip-hooks)",
|
|
7098
|
+
not_git_repo: "Skipped (not a git repository)",
|
|
7099
|
+
installed: "Installed (auto-indexes docs/ on commit)",
|
|
7100
|
+
appended: "Appended to existing post-commit hook",
|
|
7101
|
+
already_installed: "Already configured"
|
|
7102
|
+
};
|
|
7103
|
+
function printPostCommitHookStatus(status) {
|
|
7104
|
+
console.log(` Post-commit hook: ${POST_COMMIT_STATUS_MSG[status]}`);
|
|
7105
|
+
}
|
|
6208
7106
|
var MODEL_STATUS_MSG = {
|
|
6209
7107
|
skipped: "Skipped (--skip-model)",
|
|
6210
7108
|
downloaded: "Downloaded",
|
|
@@ -6224,6 +7122,7 @@ async function printSetupResult(result, quiet, repoRoot) {
|
|
|
6224
7122
|
console.log(` AGENTS.md: ${result.agentsMd ? "Updated" : "Already configured"}`);
|
|
6225
7123
|
console.log(` Claude hooks: ${result.hooks ? "Installed" : "Already configured"}`);
|
|
6226
7124
|
printSetupGitHooksStatus(result.gitHooks);
|
|
7125
|
+
printPostCommitHookStatus(result.postCommitHook);
|
|
6227
7126
|
printPnpmConfigStatus(result.pnpmConfig);
|
|
6228
7127
|
printGitignoreStatus(result.gitignore);
|
|
6229
7128
|
console.log(` Model: ${MODEL_STATUS_MSG[result.model]}`);
|
|
@@ -6531,6 +7430,7 @@ async function initAction(cmd, options) {
|
|
|
6531
7430
|
await installPhaseSkills(repoRoot);
|
|
6532
7431
|
await installAgentRoleSkills(repoRoot);
|
|
6533
7432
|
await installDocTemplates(repoRoot);
|
|
7433
|
+
await installResearchDocs(repoRoot);
|
|
6534
7434
|
}
|
|
6535
7435
|
let hookResult = null;
|
|
6536
7436
|
if (!options.skipHooks) {
|
|
@@ -7174,6 +8074,7 @@ var TRUST_LANGUAGE_TEMPLATE = `# Compound Agent Active
|
|
|
7174
8074
|
| Command | Purpose |
|
|
7175
8075
|
|---------|---------|
|
|
7176
8076
|
| \`npx ca search "query"\` | Search lessons - call BEFORE architectural decisions |
|
|
8077
|
+
| \`npx ca knowledge "query"\` | Search docs knowledge - use for architecture context |
|
|
7177
8078
|
| \`npx ca learn "insight"\` | Capture lessons - call AFTER corrections or discoveries |
|
|
7178
8079
|
|
|
7179
8080
|
## Core Constraints
|
|
@@ -7186,7 +8087,7 @@ var TRUST_LANGUAGE_TEMPLATE = `# Compound Agent Active
|
|
|
7186
8087
|
|
|
7187
8088
|
## Retrieval Protocol
|
|
7188
8089
|
|
|
7189
|
-
You MUST call \`npx ca search\` BEFORE:
|
|
8090
|
+
You MUST call \`npx ca search\` and \`npx ca knowledge\` BEFORE:
|
|
7190
8091
|
- Architectural decisions or complex planning
|
|
7191
8092
|
- Implementing patterns you've done before in this repo
|
|
7192
8093
|
|
|
@@ -7675,47 +8576,50 @@ function registerVerifyGatesCommand(program2) {
|
|
|
7675
8576
|
}
|
|
7676
8577
|
|
|
7677
8578
|
// src/changelog-data.ts
|
|
7678
|
-
var CHANGELOG_RECENT = `## [1.
|
|
8579
|
+
var CHANGELOG_RECENT = `## [1.4.0] - 2026-02-22
|
|
7679
8580
|
|
|
7680
|
-
###
|
|
8581
|
+
### Fixed
|
|
7681
8582
|
|
|
7682
|
-
- **
|
|
8583
|
+
- **Plugin manifest**: Corrected repository URL from \`compound_agent\` to \`learning_agent\`
|
|
7683
8584
|
|
|
7684
|
-
|
|
8585
|
+
### Changed
|
|
7685
8586
|
|
|
7686
|
-
|
|
8587
|
+
- **Version consolidation**: Roll-up release of v1.3.7\u2013v1.3.9 production readiness fixes (test pipeline hardening, data integrity, two-phase vector search, FTS5 sanitization)
|
|
7687
8588
|
|
|
7688
|
-
|
|
7689
|
-
- **Test coverage**: 19 new tests for \`ca about\` command, changelog extraction/escaping, and \`--update\` doc migration path
|
|
8589
|
+
## [1.3.9] - 2026-02-22
|
|
7690
8590
|
|
|
7691
8591
|
### Fixed
|
|
7692
8592
|
|
|
7693
|
-
-
|
|
7694
|
-
- **
|
|
7695
|
-
- **
|
|
8593
|
+
- **Integration test pipeline reliability**: Moved \`pnpm build\` from vitest globalSetup to npm script pre-step, eliminating EPERM errors from tsx/IPC conflicts inside vitest's process
|
|
8594
|
+
- **Fail-fast globalSetup**: Missing \`dist/cli.js\` now throws a clear error instead of cascading 68+ test failures
|
|
8595
|
+
- **Integration pool isolation**: Changed from \`threads\` to \`forks\` for integration tests \u2014 proper process isolation for subprocess-spawning tests
|
|
8596
|
+
- **Timeout safety net**: Added \`testTimeout: 30_000\` to fallback vitest.config.ts, preventing 5s default under edge conditions
|
|
7696
8597
|
|
|
7697
|
-
|
|
8598
|
+
## [1.3.8] - 2026-02-22
|
|
7698
8599
|
|
|
7699
|
-
|
|
7700
|
-
- **Changelog extraction**: Core parsing/escaping logic extracted to \`scripts/changelog-utils.ts\` (shared between prebuild script and tests)
|
|
7701
|
-
- **Narrowed \`.gitignore\`**: Setup-generated patterns scoped to \`compound/\` subdirectories to avoid hiding tracked TDD agent definitions
|
|
8600
|
+
### Fixed
|
|
7702
8601
|
|
|
7703
|
-
|
|
8602
|
+
- **Integration test reliability**: Dynamic assertion on workflow command count instead of hardcoded magic number; 30s test timeout for integration suite; conditional build in global-setup; 30s timeout on all bare \`execSync\` calls in init tests
|
|
8603
|
+
- **Data integrity**: Indexing pipeline wraps delete/upsert/hash-set in a single transaction for atomic file re-indexing
|
|
8604
|
+
- **FTS5 sanitization**: Extended regex to strip parentheses, colons, and braces in addition to existing special chars
|
|
8605
|
+
- **Safe JSON.parse**: \`rowToMemoryItem\` now uses \`safeJsonParse\` with fallbacks instead of bare \`JSON.parse\`
|
|
8606
|
+
- **ENOENT on schema migration**: \`unlinkSync\` in lessons DB wrapped in try/catch (matches knowledge DB pattern)
|
|
8607
|
+
- **Worktree hook support**: \`getGitHooksDir\` resolves \`.git\` file (\`gitdir:\` reference) in worktrees
|
|
7704
8608
|
|
|
7705
|
-
###
|
|
8609
|
+
### Changed
|
|
7706
8610
|
|
|
7707
|
-
-
|
|
7708
|
-
- **
|
|
7709
|
-
- **
|
|
7710
|
-
- **
|
|
8611
|
+
- **Two-phase vector search**: Knowledge vector search loads only IDs + embeddings in phase 1, hydrates full text for top-k only in phase 2 (reduces memory from O(n * text) to O(n * embedding) + O(k * text))
|
|
8612
|
+
- **Deduplicated FTS5 search**: \`searchKeyword\` and \`searchKeywordScored\` share a single \`executeFtsQuery\` helper
|
|
8613
|
+
- **Removed redundant COUNT pre-checks**: FTS5 naturally returns empty on empty tables
|
|
8614
|
+
- **Extracted chunk count helpers**: \`getChunkCount\` / \`getChunkCountByFilePath\` replace raw SQL in \`knowledge.ts\` and \`indexing.ts\`
|
|
8615
|
+
- **Immutable extension sets**: \`SUPPORTED_EXTENSIONS\` typed as \`ReadonlySet\`; new \`CODE_EXTENSIONS\` constant replaces hardcoded array in chunking
|
|
8616
|
+
- **\`test:all\` builds first**: Script now runs \`pnpm build\` before model download and test run
|
|
8617
|
+
- **Test describe label**: Fixed misleading \`'when stop_hook_active is false'\` to match actual test condition
|
|
7711
8618
|
|
|
7712
|
-
###
|
|
8619
|
+
### Added
|
|
7713
8620
|
|
|
7714
|
-
-
|
|
7715
|
-
-
|
|
7716
|
-
- **Update hint on upgrade**: When \`ca init\` or \`ca setup\` detects an existing install, displays tip to run with \`--update\` to regenerate managed files
|
|
7717
|
-
- **HOW_TO_COMPOUND.md migration**: \`ca setup --update\` automatically removes old monolithic \`HOW_TO_COMPOUND.md\` if it has version frontmatter (generated by compound-agent)
|
|
7718
|
-
- **Doctor doc check**: Now checks for \`docs/compound/README.md\` instead of \`HOW_TO_COMPOUND.md\``;
|
|
8621
|
+
- \`filesErrored\` field in \`IndexResult\` to track file read failures during indexing
|
|
8622
|
+
- \`tsx\` added to devDependencies (was used but not declared)`;
|
|
7719
8623
|
|
|
7720
8624
|
// src/commands/about.ts
|
|
7721
8625
|
function registerAboutCommand(program2) {
|
|
@@ -7729,6 +8633,89 @@ function registerAboutCommand(program2) {
|
|
|
7729
8633
|
console.log(CHANGELOG_RECENT);
|
|
7730
8634
|
});
|
|
7731
8635
|
}
|
|
8636
|
+
|
|
8637
|
+
// src/memory/knowledge/index.ts
|
|
8638
|
+
init_chunking();
|
|
8639
|
+
init_types();
|
|
8640
|
+
init_indexing();
|
|
8641
|
+
|
|
8642
|
+
// src/commands/knowledge.ts
|
|
8643
|
+
var MAX_DISPLAY_TEXT = 200;
|
|
8644
|
+
function registerKnowledgeCommand(program2) {
|
|
8645
|
+
program2.command("knowledge <query>").description("Search docs knowledge base").option("-n, --limit <number>", "Maximum results", "6").action(async function(query, opts) {
|
|
8646
|
+
const globalOpts = getGlobalOpts(this);
|
|
8647
|
+
try {
|
|
8648
|
+
let limit;
|
|
8649
|
+
try {
|
|
8650
|
+
limit = parseLimit(opts.limit, "limit");
|
|
8651
|
+
} catch (err) {
|
|
8652
|
+
const message = err instanceof Error ? err.message : "Invalid limit";
|
|
8653
|
+
console.error(formatError("knowledge", "INVALID_LIMIT", message, "Use -n with a positive integer"));
|
|
8654
|
+
process.exitCode = 1;
|
|
8655
|
+
return;
|
|
8656
|
+
}
|
|
8657
|
+
const repoRoot = getRepoRoot();
|
|
8658
|
+
openKnowledgeDb(repoRoot);
|
|
8659
|
+
if (getChunkCount(repoRoot) === 0) {
|
|
8660
|
+
try {
|
|
8661
|
+
const { indexDocs: indexDocs2 } = await Promise.resolve().then(() => (init_indexing(), indexing_exports));
|
|
8662
|
+
out.info("Knowledge base empty. Indexing docs...");
|
|
8663
|
+
const result = await indexDocs2(repoRoot);
|
|
8664
|
+
if (result.filesIndexed === 0) {
|
|
8665
|
+
out.info("No docs found to index. Add docs/ directory or run: npx ca index-docs --help");
|
|
8666
|
+
return;
|
|
8667
|
+
}
|
|
8668
|
+
} catch (indexErr) {
|
|
8669
|
+
const msg = indexErr instanceof Error ? indexErr.message : "Unknown error";
|
|
8670
|
+
out.info(`Auto-index failed (${msg}). Run manually: npx ca index-docs`);
|
|
8671
|
+
}
|
|
8672
|
+
}
|
|
8673
|
+
const results = await searchKnowledge(repoRoot, query, { limit });
|
|
8674
|
+
if (results.length === 0) {
|
|
8675
|
+
out.info("No matching results found.");
|
|
8676
|
+
return;
|
|
8677
|
+
}
|
|
8678
|
+
for (const r of results) {
|
|
8679
|
+
const { filePath, startLine, endLine, text } = r.item;
|
|
8680
|
+
const truncated = text.length > MAX_DISPLAY_TEXT ? text.slice(0, MAX_DISPLAY_TEXT) + "..." : text;
|
|
8681
|
+
const displayText = truncated.replace(/\n/g, " ");
|
|
8682
|
+
if (globalOpts.verbose) {
|
|
8683
|
+
console.log(`[${filePath}:L${startLine}-L${endLine}] (score: ${r.score.toFixed(2)}) ${displayText}`);
|
|
8684
|
+
} else {
|
|
8685
|
+
console.log(`[${filePath}:L${startLine}-L${endLine}] ${displayText}`);
|
|
8686
|
+
}
|
|
8687
|
+
}
|
|
8688
|
+
} catch (err) {
|
|
8689
|
+
const message = err instanceof Error ? err.message : "Unknown error";
|
|
8690
|
+
console.error(formatError("knowledge", "SEARCH_FAILED", message, "Check that docs are indexed"));
|
|
8691
|
+
process.exitCode = 1;
|
|
8692
|
+
} finally {
|
|
8693
|
+
closeKnowledgeDb();
|
|
8694
|
+
}
|
|
8695
|
+
});
|
|
8696
|
+
}
|
|
8697
|
+
|
|
8698
|
+
// src/commands/knowledge-index.ts
|
|
8699
|
+
function registerKnowledgeIndexCommand(program2) {
|
|
8700
|
+
program2.command("index-docs").description("Index docs/ directory into knowledge base").option("--force", "Re-index all files (ignore cache)").action(async function(options) {
|
|
8701
|
+
const repoRoot = getRepoRoot();
|
|
8702
|
+
out.info("Indexing docs/ directory...");
|
|
8703
|
+
try {
|
|
8704
|
+
const result = await indexDocs(repoRoot, {
|
|
8705
|
+
force: options.force
|
|
8706
|
+
});
|
|
8707
|
+
const skippedPart = result.filesSkipped > 0 ? ` (${result.filesSkipped} skipped)` : "";
|
|
8708
|
+
const deletedPart = result.chunksDeleted > 0 ? `, ${result.chunksDeleted} deleted` : "";
|
|
8709
|
+
const duration = (result.durationMs / 1e3).toFixed(1);
|
|
8710
|
+
out.info(
|
|
8711
|
+
`Indexed ${result.filesIndexed} file${result.filesIndexed !== 1 ? "s" : ""}${skippedPart}, ${result.chunksCreated} chunk${result.chunksCreated !== 1 ? "s" : ""} created${deletedPart}`
|
|
8712
|
+
);
|
|
8713
|
+
out.info(`Duration: ${duration}s`);
|
|
8714
|
+
} finally {
|
|
8715
|
+
closeKnowledgeDb();
|
|
8716
|
+
}
|
|
8717
|
+
});
|
|
8718
|
+
}
|
|
7732
8719
|
function parseWorktreeList(raw) {
|
|
7733
8720
|
const entries = [];
|
|
7734
8721
|
let currentPath = "";
|
|
@@ -7767,13 +8754,16 @@ function runWorktreeCreate(epicId) {
|
|
|
7767
8754
|
const mergeDesc = `INSTRUCTIONS: This task merges the worktree branch back to main. Worktree path: ${worktreePath}. Run \`pnpm exec ca worktree merge ${epicId}\` when all other blocking tasks are resolved.`;
|
|
7768
8755
|
const bdOutput = execFileSync("bd", [
|
|
7769
8756
|
"create",
|
|
8757
|
+
"--silent",
|
|
7770
8758
|
`--title=${mergeTitle}`,
|
|
7771
8759
|
"--type=task",
|
|
7772
8760
|
"--priority=1",
|
|
7773
8761
|
`--description=${mergeDesc}`
|
|
7774
|
-
], {
|
|
7775
|
-
|
|
7776
|
-
|
|
8762
|
+
], {
|
|
8763
|
+
encoding: "utf-8",
|
|
8764
|
+
env: { ...process.env, BEADS_NO_DAEMON: "1" }
|
|
8765
|
+
});
|
|
8766
|
+
const mergeFullId = bdOutput.trim();
|
|
7777
8767
|
if (!mergeFullId) {
|
|
7778
8768
|
throw new Error("bd create returned no task ID");
|
|
7779
8769
|
}
|
|
@@ -7934,12 +8924,16 @@ function addCreateCommand(wt) {
|
|
|
7934
8924
|
const result = runWorktreeCreate(epicId);
|
|
7935
8925
|
if (result.alreadyExists) {
|
|
7936
8926
|
console.log(`Worktree already exists at ${result.worktreePath}`);
|
|
8927
|
+
console.log(` To use: cd ${result.worktreePath} && claude`);
|
|
7937
8928
|
return;
|
|
7938
8929
|
}
|
|
7939
8930
|
console.log(`Worktree created:`);
|
|
7940
8931
|
console.log(` Path: ${result.worktreePath}`);
|
|
7941
8932
|
console.log(` Branch: ${result.branch}`);
|
|
7942
8933
|
console.log(` Merge task: ${result.mergeTaskId}`);
|
|
8934
|
+
console.log("");
|
|
8935
|
+
console.log("Next step: open a NEW Claude session with the worktree as primary directory:");
|
|
8936
|
+
console.log(` cd ${result.worktreePath} && claude`);
|
|
7943
8937
|
} catch (err) {
|
|
7944
8938
|
handleError(err);
|
|
7945
8939
|
}
|
|
@@ -8263,7 +9257,7 @@ function registerCaptureCommands(program2) {
|
|
|
8263
9257
|
await handleCapture(this, options);
|
|
8264
9258
|
});
|
|
8265
9259
|
}
|
|
8266
|
-
var
|
|
9260
|
+
var EPIC_ID_PATTERN2 = /^[a-zA-Z0-9_.-]+$/;
|
|
8267
9261
|
function buildScriptHeader(timestamp, maxRetries, model, epicIds) {
|
|
8268
9262
|
return `#!/usr/bin/env bash
|
|
8269
9263
|
# Infinity Loop - Generated by: ca loop
|
|
@@ -8511,8 +9505,8 @@ function validateOptions(options) {
|
|
|
8511
9505
|
}
|
|
8512
9506
|
if (options.epics) {
|
|
8513
9507
|
for (const id of options.epics) {
|
|
8514
|
-
if (!
|
|
8515
|
-
throw new Error(`Invalid epic ID "${id}": must match ${
|
|
9508
|
+
if (!EPIC_ID_PATTERN2.test(id)) {
|
|
9509
|
+
throw new Error(`Invalid epic ID "${id}": must match ${EPIC_ID_PATTERN2}`);
|
|
8516
9510
|
}
|
|
8517
9511
|
}
|
|
8518
9512
|
}
|
|
@@ -8637,7 +9631,19 @@ async function searchAction(cmd, query, options) {
|
|
|
8637
9631
|
await syncIfNeeded(repoRoot);
|
|
8638
9632
|
let results;
|
|
8639
9633
|
try {
|
|
8640
|
-
|
|
9634
|
+
const usability = await isModelUsable();
|
|
9635
|
+
if (usability.usable) {
|
|
9636
|
+
const candidateLimit = limit * CANDIDATE_MULTIPLIER;
|
|
9637
|
+
const [vectorResults, keywordResults] = await Promise.all([
|
|
9638
|
+
searchVector(repoRoot, query, { limit: candidateLimit }),
|
|
9639
|
+
searchKeywordScored(repoRoot, query, candidateLimit)
|
|
9640
|
+
]);
|
|
9641
|
+
const merged = mergeHybridResults(vectorResults, keywordResults, { minScore: MIN_HYBRID_SCORE });
|
|
9642
|
+
const ranked = rankLessons(merged);
|
|
9643
|
+
results = ranked.slice(0, limit).map((r) => r.lesson);
|
|
9644
|
+
} else {
|
|
9645
|
+
results = await searchKeyword(repoRoot, query, limit);
|
|
9646
|
+
}
|
|
8641
9647
|
} catch (err) {
|
|
8642
9648
|
const message = err instanceof Error ? err.message : "Search failed";
|
|
8643
9649
|
console.error(formatError("search", "SEARCH_FAILED", message, "Check your query syntax"));
|
|
@@ -8761,6 +9767,7 @@ async function checkPlanAction(cmd, options) {
|
|
|
8761
9767
|
process.exitCode = 1;
|
|
8762
9768
|
return;
|
|
8763
9769
|
}
|
|
9770
|
+
await syncIfNeeded(repoRoot);
|
|
8764
9771
|
const usability = await isModelUsable();
|
|
8765
9772
|
if (!usability.usable) {
|
|
8766
9773
|
if (options.json) {
|
|
@@ -8802,7 +9809,7 @@ async function checkPlanAction(cmd, options) {
|
|
|
8802
9809
|
}
|
|
8803
9810
|
}
|
|
8804
9811
|
function registerRetrievalCommands(program2) {
|
|
8805
|
-
program2.command("search <query>").description("Search lessons
|
|
9812
|
+
program2.command("search <query>").description("Search lessons").option("-n, --limit <number>", "Maximum results", DEFAULT_SEARCH_LIMIT).action(async function(query, options) {
|
|
8806
9813
|
await searchAction(this, query, options);
|
|
8807
9814
|
});
|
|
8808
9815
|
program2.command("list").description("List all lessons").option("-n, --limit <number>", "Maximum results", DEFAULT_LIST_LIMIT).option("--invalidated", "Show only invalidated lessons").action(async function(options) {
|
|
@@ -8838,6 +9845,8 @@ function registerManagementCommands(program2) {
|
|
|
8838
9845
|
registerTestSummaryCommand(program2);
|
|
8839
9846
|
registerVerifyGatesCommand(program2);
|
|
8840
9847
|
registerAboutCommand(program2);
|
|
9848
|
+
registerKnowledgeCommand(program2);
|
|
9849
|
+
registerKnowledgeIndexCommand(program2);
|
|
8841
9850
|
registerWorktreeCommands(program2);
|
|
8842
9851
|
}
|
|
8843
9852
|
|