@tekmidian/pai 0.5.6 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +72 -1
- package/README.md +107 -3
- package/dist/{auto-route-BG6I_4B1.mjs → auto-route-C-DrW6BL.mjs} +3 -3
- package/dist/{auto-route-BG6I_4B1.mjs.map → auto-route-C-DrW6BL.mjs.map} +1 -1
- package/dist/cli/index.mjs +1897 -1569
- package/dist/cli/index.mjs.map +1 -1
- package/dist/clusters-JIDQW65f.mjs +201 -0
- package/dist/clusters-JIDQW65f.mjs.map +1 -0
- package/dist/{config-Cf92lGX_.mjs → config-BuhHWyOK.mjs} +21 -6
- package/dist/config-BuhHWyOK.mjs.map +1 -0
- package/dist/daemon/index.mjs +12 -9
- package/dist/daemon/index.mjs.map +1 -1
- package/dist/{daemon-D9evGlgR.mjs → daemon-D3hYb5_C.mjs} +670 -219
- package/dist/daemon-D3hYb5_C.mjs.map +1 -0
- package/dist/daemon-mcp/index.mjs +4597 -4
- package/dist/daemon-mcp/index.mjs.map +1 -1
- package/dist/{db-4lSqLFb8.mjs → db-BtuN768f.mjs} +9 -2
- package/dist/db-BtuN768f.mjs.map +1 -0
- package/dist/db-DdUperSl.mjs +110 -0
- package/dist/db-DdUperSl.mjs.map +1 -0
- package/dist/{detect-BU3Nx_2L.mjs → detect-CdaA48EI.mjs} +1 -1
- package/dist/{detect-BU3Nx_2L.mjs.map → detect-CdaA48EI.mjs.map} +1 -1
- package/dist/{detector-Bp-2SM3x.mjs → detector-jGBuYQJM.mjs} +2 -2
- package/dist/{detector-Bp-2SM3x.mjs.map → detector-jGBuYQJM.mjs.map} +1 -1
- package/dist/{factory-Bzcy70G9.mjs → factory-Ygqe_bVZ.mjs} +7 -5
- package/dist/{factory-Bzcy70G9.mjs.map → factory-Ygqe_bVZ.mjs.map} +1 -1
- package/dist/helpers-BEST-4Gx.mjs +420 -0
- package/dist/helpers-BEST-4Gx.mjs.map +1 -0
- package/dist/hooks/capture-all-events.mjs +19 -4
- package/dist/hooks/capture-all-events.mjs.map +4 -4
- package/dist/hooks/capture-session-summary.mjs +38 -0
- package/dist/hooks/capture-session-summary.mjs.map +3 -3
- package/dist/hooks/cleanup-session-files.mjs +6 -12
- package/dist/hooks/cleanup-session-files.mjs.map +4 -4
- package/dist/hooks/context-compression-hook.mjs +105 -111
- package/dist/hooks/context-compression-hook.mjs.map +4 -4
- package/dist/hooks/initialize-session.mjs +26 -17
- package/dist/hooks/initialize-session.mjs.map +4 -4
- package/dist/hooks/inject-observations.mjs +220 -0
- package/dist/hooks/inject-observations.mjs.map +7 -0
- package/dist/hooks/load-core-context.mjs +18 -2
- package/dist/hooks/load-core-context.mjs.map +4 -4
- package/dist/hooks/load-project-context.mjs +102 -97
- package/dist/hooks/load-project-context.mjs.map +4 -4
- package/dist/hooks/observe.mjs +354 -0
- package/dist/hooks/observe.mjs.map +7 -0
- package/dist/hooks/stop-hook.mjs +174 -90
- package/dist/hooks/stop-hook.mjs.map +4 -4
- package/dist/hooks/sync-todo-to-md.mjs +31 -33
- package/dist/hooks/sync-todo-to-md.mjs.map +4 -4
- package/dist/index.d.mts +32 -9
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +6 -9
- package/dist/indexer-D53l5d1U.mjs +1 -0
- package/dist/{indexer-backend-CIMXedqk.mjs → indexer-backend-jcJFsmB4.mjs} +37 -127
- package/dist/indexer-backend-jcJFsmB4.mjs.map +1 -0
- package/dist/{ipc-client-Bjg_a1dc.mjs → ipc-client-CoyUHPod.mjs} +2 -7
- package/dist/{ipc-client-Bjg_a1dc.mjs.map → ipc-client-CoyUHPod.mjs.map} +1 -1
- package/dist/latent-ideas-bTJo6Omd.mjs +191 -0
- package/dist/latent-ideas-bTJo6Omd.mjs.map +1 -0
- package/dist/neighborhood-BYYbEkUJ.mjs +135 -0
- package/dist/neighborhood-BYYbEkUJ.mjs.map +1 -0
- package/dist/note-context-BK24bX8Y.mjs +126 -0
- package/dist/note-context-BK24bX8Y.mjs.map +1 -0
- package/dist/postgres-CKf-EDtS.mjs +846 -0
- package/dist/postgres-CKf-EDtS.mjs.map +1 -0
- package/dist/{reranker-D7bRAHi6.mjs → reranker-CMNZcfVx.mjs} +1 -1
- package/dist/{reranker-D7bRAHi6.mjs.map → reranker-CMNZcfVx.mjs.map} +1 -1
- package/dist/{search-_oHfguA5.mjs → search-DC1qhkKn.mjs} +2 -58
- package/dist/search-DC1qhkKn.mjs.map +1 -0
- package/dist/{sqlite-WWBq7_2C.mjs → sqlite-l-s9xPjY.mjs} +160 -3
- package/dist/sqlite-l-s9xPjY.mjs.map +1 -0
- package/dist/state-C6_vqz7w.mjs +102 -0
- package/dist/state-C6_vqz7w.mjs.map +1 -0
- package/dist/stop-words-BaMEGVeY.mjs +326 -0
- package/dist/stop-words-BaMEGVeY.mjs.map +1 -0
- package/dist/{indexer-CMPOiY1r.mjs → sync-BOsnEj2-.mjs} +14 -216
- package/dist/sync-BOsnEj2-.mjs.map +1 -0
- package/dist/themes-BvYF0W8T.mjs +148 -0
- package/dist/themes-BvYF0W8T.mjs.map +1 -0
- package/dist/{tools-DV_lsiCc.mjs → tools-DcaJlYDN.mjs} +162 -273
- package/dist/tools-DcaJlYDN.mjs.map +1 -0
- package/dist/trace-CRx9lPuc.mjs +137 -0
- package/dist/trace-CRx9lPuc.mjs.map +1 -0
- package/dist/{vault-indexer-DXWs9pDn.mjs → vault-indexer-Bi2cRmn7.mjs} +174 -138
- package/dist/vault-indexer-Bi2cRmn7.mjs.map +1 -0
- package/dist/zettelkasten-cdajbnPr.mjs +708 -0
- package/dist/zettelkasten-cdajbnPr.mjs.map +1 -0
- package/package.json +1 -2
- package/src/hooks/ts/capture-all-events.ts +6 -0
- package/src/hooks/ts/lib/project-utils/index.ts +50 -0
- package/src/hooks/ts/lib/project-utils/notify.ts +75 -0
- package/src/hooks/ts/lib/project-utils/paths.ts +218 -0
- package/src/hooks/ts/lib/project-utils/session-notes.ts +363 -0
- package/src/hooks/ts/lib/project-utils/todo.ts +178 -0
- package/src/hooks/ts/lib/project-utils/tokens.ts +39 -0
- package/src/hooks/ts/lib/project-utils.ts +40 -999
- package/src/hooks/ts/post-tool-use/observe.ts +327 -0
- package/src/hooks/ts/pre-compact/context-compression-hook.ts +6 -0
- package/src/hooks/ts/session-end/capture-session-summary.ts +41 -0
- package/src/hooks/ts/session-start/initialize-session.ts +7 -1
- package/src/hooks/ts/session-start/inject-observations.ts +254 -0
- package/src/hooks/ts/session-start/load-core-context.ts +7 -0
- package/src/hooks/ts/session-start/load-project-context.ts +8 -1
- package/src/hooks/ts/stop/stop-hook.ts +28 -0
- package/templates/claude-md.template.md +7 -74
- package/templates/skills/user/.gitkeep +0 -0
- package/dist/chunker-CbnBe0s0.mjs +0 -191
- package/dist/chunker-CbnBe0s0.mjs.map +0 -1
- package/dist/config-Cf92lGX_.mjs.map +0 -1
- package/dist/daemon-D9evGlgR.mjs.map +0 -1
- package/dist/db-4lSqLFb8.mjs.map +0 -1
- package/dist/db-Dp8VXIMR.mjs +0 -212
- package/dist/db-Dp8VXIMR.mjs.map +0 -1
- package/dist/indexer-CMPOiY1r.mjs.map +0 -1
- package/dist/indexer-backend-CIMXedqk.mjs.map +0 -1
- package/dist/mcp/index.d.mts +0 -1
- package/dist/mcp/index.mjs +0 -500
- package/dist/mcp/index.mjs.map +0 -1
- package/dist/postgres-FXrHDPcE.mjs +0 -358
- package/dist/postgres-FXrHDPcE.mjs.map +0 -1
- package/dist/schemas-BFIgGntb.mjs +0 -3405
- package/dist/schemas-BFIgGntb.mjs.map +0 -1
- package/dist/search-_oHfguA5.mjs.map +0 -1
- package/dist/sqlite-WWBq7_2C.mjs.map +0 -1
- package/dist/tools-DV_lsiCc.mjs.map +0 -1
- package/dist/vault-indexer-DXWs9pDn.mjs.map +0 -1
- package/dist/zettelkasten-e-a4rW_6.mjs +0 -901
- package/dist/zettelkasten-e-a4rW_6.mjs.map +0 -1
- package/templates/README.md +0 -181
- package/templates/skills/createskill-skill.template.md +0 -78
- package/templates/skills/history-system.template.md +0 -371
- package/templates/skills/hook-system.template.md +0 -913
- package/templates/skills/sessions-skill.template.md +0 -102
- package/templates/skills/skill-system.template.md +0 -214
- package/templates/skills/terminal-tabs.template.md +0 -120
- package/templates/templates.md +0 -20
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
import { existsSync, readdirSync } from "node:fs";
|
|
2
|
+
import { homedir } from "node:os";
|
|
3
|
+
import { basename, join, normalize } from "node:path";
|
|
4
|
+
import { createHash as createHash$1 } from "node:crypto";
|
|
5
|
+
|
|
6
|
+
//#region src/utils/hash.ts
|
|
7
|
+
/**
|
|
8
|
+
* Shared hashing utilities. Centralises all SHA-256 usage so every module
|
|
9
|
+
* obtains digests through the same function rather than inlining createHash.
|
|
10
|
+
*/
|
|
11
|
+
/**
|
|
12
|
+
* Compute a SHA-256 hex digest of the given string.
|
|
13
|
+
* Aliased as sha256File for compatibility with existing call-sites that use
|
|
14
|
+
* that name to hash file contents.
|
|
15
|
+
*/
|
|
16
|
+
function sha256(content) {
|
|
17
|
+
return createHash$1("sha256").update(content).digest("hex");
|
|
18
|
+
}
|
|
19
|
+
/** Alias kept for backwards compatibility with memory/indexer call-sites. */
|
|
20
|
+
const sha256File = sha256;
|
|
21
|
+
|
|
22
|
+
//#endregion
|
|
23
|
+
//#region src/memory/chunker.ts
|
|
24
|
+
/**
|
|
25
|
+
* Markdown text chunker for the PAI memory engine.
|
|
26
|
+
*
|
|
27
|
+
* Splits markdown files into overlapping text segments suitable for BM25
|
|
28
|
+
* full-text indexing. Respects heading boundaries where possible, falling
|
|
29
|
+
* back to paragraph and sentence splitting when sections are large.
|
|
30
|
+
*/
|
|
31
|
+
const DEFAULT_MAX_TOKENS = 400;
|
|
32
|
+
const DEFAULT_OVERLAP = 80;
|
|
33
|
+
/**
|
|
34
|
+
* Approximate token count using a words * 1.3 heuristic.
|
|
35
|
+
* Matches the OpenClaw estimate approach.
|
|
36
|
+
*/
|
|
37
|
+
function estimateTokens(text) {
|
|
38
|
+
const wordCount = text.split(/\s+/).filter(Boolean).length;
|
|
39
|
+
return Math.ceil(wordCount * 1.3);
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Split content into sections delimited by ## or ### headings.
|
|
43
|
+
* Each section starts at its heading line (or at line 1 for a preamble).
|
|
44
|
+
*/
|
|
45
|
+
function splitBySections(lines) {
|
|
46
|
+
const sections = [];
|
|
47
|
+
let current = [];
|
|
48
|
+
for (const line of lines) {
|
|
49
|
+
if (/^#{1,3}\s/.test(line.text) && current.length > 0) {
|
|
50
|
+
const text = current.map((l) => l.text).join("\n");
|
|
51
|
+
sections.push({
|
|
52
|
+
lines: current,
|
|
53
|
+
tokens: estimateTokens(text)
|
|
54
|
+
});
|
|
55
|
+
current = [];
|
|
56
|
+
}
|
|
57
|
+
current.push(line);
|
|
58
|
+
}
|
|
59
|
+
if (current.length > 0) {
|
|
60
|
+
const text = current.map((l) => l.text).join("\n");
|
|
61
|
+
sections.push({
|
|
62
|
+
lines: current,
|
|
63
|
+
tokens: estimateTokens(text)
|
|
64
|
+
});
|
|
65
|
+
}
|
|
66
|
+
return sections;
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Split a LineBlock by double-newline paragraph boundaries.
|
|
70
|
+
*/
|
|
71
|
+
function splitByParagraphs(block) {
|
|
72
|
+
const paragraphs = [];
|
|
73
|
+
let current = [];
|
|
74
|
+
for (const line of block.lines) if (line.text.trim() === "" && current.length > 0) {
|
|
75
|
+
const text = current.map((l) => l.text).join("\n");
|
|
76
|
+
paragraphs.push({
|
|
77
|
+
lines: [...current],
|
|
78
|
+
tokens: estimateTokens(text)
|
|
79
|
+
});
|
|
80
|
+
current = [];
|
|
81
|
+
} else current.push(line);
|
|
82
|
+
if (current.length > 0) {
|
|
83
|
+
const text = current.map((l) => l.text).join("\n");
|
|
84
|
+
paragraphs.push({
|
|
85
|
+
lines: current,
|
|
86
|
+
tokens: estimateTokens(text)
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
return paragraphs.length > 0 ? paragraphs : [block];
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Split a LineBlock by sentence boundaries (. ! ?) when even paragraphs are
|
|
93
|
+
* too large. Works character-by-character within joined lines.
|
|
94
|
+
*/
|
|
95
|
+
function splitBySentences(block, maxTokens) {
|
|
96
|
+
const sentences = block.lines.map((l) => l.text).join(" ").split(/(?<=[.!?])\s+(?=[A-Z"'])/g);
|
|
97
|
+
const result = [];
|
|
98
|
+
let accText = "";
|
|
99
|
+
const startLine = block.lines[0]?.lineNo ?? 1;
|
|
100
|
+
const endLine = block.lines[block.lines.length - 1]?.lineNo ?? startLine;
|
|
101
|
+
const totalLines = endLine - startLine + 1;
|
|
102
|
+
const linesPerSentence = Math.max(1, Math.floor(totalLines / Math.max(1, sentences.length)));
|
|
103
|
+
let sentenceIdx = 0;
|
|
104
|
+
let approxLine = startLine;
|
|
105
|
+
const flush = () => {
|
|
106
|
+
if (!accText.trim()) return;
|
|
107
|
+
const endApprox = Math.min(approxLine + linesPerSentence - 1, endLine);
|
|
108
|
+
result.push({
|
|
109
|
+
lines: [{
|
|
110
|
+
text: accText.trim(),
|
|
111
|
+
lineNo: approxLine
|
|
112
|
+
}],
|
|
113
|
+
tokens: estimateTokens(accText)
|
|
114
|
+
});
|
|
115
|
+
approxLine = endApprox + 1;
|
|
116
|
+
accText = "";
|
|
117
|
+
};
|
|
118
|
+
for (const sentence of sentences) {
|
|
119
|
+
sentenceIdx++;
|
|
120
|
+
const candidateText = accText ? accText + " " + sentence : sentence;
|
|
121
|
+
if (estimateTokens(candidateText) > maxTokens && accText) {
|
|
122
|
+
flush();
|
|
123
|
+
accText = sentence;
|
|
124
|
+
} else accText = candidateText;
|
|
125
|
+
}
|
|
126
|
+
flush();
|
|
127
|
+
return result.length > 0 ? result : [block];
|
|
128
|
+
}
|
|
129
|
+
/**
|
|
130
|
+
* Extract the last `overlapTokens` worth of text from a list of previously
|
|
131
|
+
* emitted chunks to prepend to the next chunk.
|
|
132
|
+
*/
|
|
133
|
+
function buildOverlapPrefix(chunks, overlapTokens) {
|
|
134
|
+
if (overlapTokens <= 0 || chunks.length === 0) return [];
|
|
135
|
+
const lastChunk = chunks[chunks.length - 1];
|
|
136
|
+
if (!lastChunk) return [];
|
|
137
|
+
const lines = lastChunk.text.split("\n");
|
|
138
|
+
const kept = [];
|
|
139
|
+
let acc = 0;
|
|
140
|
+
for (let i = lines.length - 1; i >= 0; i--) {
|
|
141
|
+
const lineTokens = estimateTokens(lines[i] ?? "");
|
|
142
|
+
acc += lineTokens;
|
|
143
|
+
kept.unshift(lines[i] ?? "");
|
|
144
|
+
if (acc >= overlapTokens) break;
|
|
145
|
+
}
|
|
146
|
+
const startLine = lastChunk.endLine - kept.length + 1;
|
|
147
|
+
return kept.map((text, idx) => ({
|
|
148
|
+
text,
|
|
149
|
+
lineNo: Math.max(lastChunk.startLine, startLine + idx)
|
|
150
|
+
}));
|
|
151
|
+
}
|
|
152
|
+
/**
|
|
153
|
+
* Chunk a markdown file into overlapping segments for BM25 indexing.
|
|
154
|
+
*
|
|
155
|
+
* Strategy:
|
|
156
|
+
* 1. Split by headings (##, ###) as natural boundaries.
|
|
157
|
+
* 2. If a section exceeds maxTokens, split by paragraphs.
|
|
158
|
+
* 3. If a paragraph still exceeds maxTokens, split by sentences.
|
|
159
|
+
* 4. Apply overlap: each chunk includes the last `overlap` tokens from the
|
|
160
|
+
* previous chunk.
|
|
161
|
+
*/
|
|
162
|
+
function chunkMarkdown(content, opts) {
|
|
163
|
+
const maxTokens = opts?.maxTokens ?? DEFAULT_MAX_TOKENS;
|
|
164
|
+
const overlapTokens = opts?.overlap ?? DEFAULT_OVERLAP;
|
|
165
|
+
if (!content.trim()) return [];
|
|
166
|
+
const sections = splitBySections(content.split("\n").map((text, idx) => ({
|
|
167
|
+
text,
|
|
168
|
+
lineNo: idx + 1
|
|
169
|
+
})));
|
|
170
|
+
const finalBlocks = [];
|
|
171
|
+
for (const section of sections) {
|
|
172
|
+
if (section.tokens <= maxTokens) {
|
|
173
|
+
finalBlocks.push(section);
|
|
174
|
+
continue;
|
|
175
|
+
}
|
|
176
|
+
const paras = splitByParagraphs(section);
|
|
177
|
+
for (const para of paras) {
|
|
178
|
+
if (para.tokens <= maxTokens) {
|
|
179
|
+
finalBlocks.push(para);
|
|
180
|
+
continue;
|
|
181
|
+
}
|
|
182
|
+
const sentences = splitBySentences(para, maxTokens);
|
|
183
|
+
finalBlocks.push(...sentences);
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
const chunks = [];
|
|
187
|
+
for (const block of finalBlocks) {
|
|
188
|
+
if (block.lines.length === 0) continue;
|
|
189
|
+
const text = [...buildOverlapPrefix(chunks, overlapTokens), ...block.lines].map((l) => l.text).join("\n").trim();
|
|
190
|
+
if (!text) continue;
|
|
191
|
+
const startLine = block.lines[0]?.lineNo ?? 1;
|
|
192
|
+
const endLine = block.lines[block.lines.length - 1]?.lineNo ?? startLine;
|
|
193
|
+
chunks.push({
|
|
194
|
+
text,
|
|
195
|
+
startLine,
|
|
196
|
+
endLine,
|
|
197
|
+
hash: sha256(text)
|
|
198
|
+
});
|
|
199
|
+
}
|
|
200
|
+
return chunks;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
//#endregion
|
|
204
|
+
//#region src/memory/indexer/helpers.ts
|
|
205
|
+
/**
|
|
206
|
+
* Shared helpers for the PAI memory indexers.
|
|
207
|
+
*
|
|
208
|
+
* Contains utilities used by both the sync (SQLite) and async (StorageBackend)
|
|
209
|
+
* indexer paths: hashing, chunk ID generation, directory walking, and path guards.
|
|
210
|
+
*/
|
|
211
|
+
/**
|
|
212
|
+
* Classify a relative file path into one of the four memory tiers.
|
|
213
|
+
*
|
|
214
|
+
* Rules (in priority order):
|
|
215
|
+
* - MEMORY.md anywhere in memory/ → 'evergreen'
|
|
216
|
+
* - YYYY-MM-DD.md in memory/ → 'daily'
|
|
217
|
+
* - anything else in memory/ → 'topic'
|
|
218
|
+
* - anything in Notes/ → 'session'
|
|
219
|
+
*/
|
|
220
|
+
function detectTier(relativePath) {
|
|
221
|
+
const p = relativePath.replace(/\\/g, "/").replace(/^\.\//, "");
|
|
222
|
+
if (p.startsWith("Notes/") || p === "Notes") return "session";
|
|
223
|
+
const fileName = basename(p);
|
|
224
|
+
if (fileName === "MEMORY.md") return "evergreen";
|
|
225
|
+
if (/^\d{4}-\d{2}-\d{2}\.md$/.test(fileName)) return "daily";
|
|
226
|
+
return "topic";
|
|
227
|
+
}
|
|
228
|
+
/**
|
|
229
|
+
* Generate a deterministic chunk ID from its coordinates.
|
|
230
|
+
* Format: sha256("projectId:path:chunkIndex:startLine:endLine")
|
|
231
|
+
*
|
|
232
|
+
* The chunkIndex (0-based position within the file) is included so that
|
|
233
|
+
* chunks with approximated line numbers (e.g. from splitBySentences) never
|
|
234
|
+
* produce colliding IDs even when multiple chunks share the same startLine/endLine.
|
|
235
|
+
*/
|
|
236
|
+
function chunkId(projectId, path, chunkIndex, startLine, endLine) {
|
|
237
|
+
return createHash("sha256").update(`${projectId}:${path}:${chunkIndex}:${startLine}:${endLine}`).digest("hex");
|
|
238
|
+
}
|
|
239
|
+
/**
|
|
240
|
+
* Yield to the Node.js event loop so that IPC server can process requests
|
|
241
|
+
* during long index runs.
|
|
242
|
+
*
|
|
243
|
+
* Uses setTimeout(10ms) rather than setImmediate — the 10ms pause gives the
|
|
244
|
+
* event loop enough time to accept and process incoming IPC connections
|
|
245
|
+
* (socket data, new connections, etc.). Without this, synchronous ONNX
|
|
246
|
+
* inference blocks IPC for the full duration of each embedding (~50-100ms
|
|
247
|
+
* per chunk).
|
|
248
|
+
*/
|
|
249
|
+
function yieldToEventLoop() {
|
|
250
|
+
return new Promise((resolve) => setTimeout(resolve, 10));
|
|
251
|
+
}
|
|
252
|
+
/**
|
|
253
|
+
* Directories to ALWAYS skip, at any depth, during any directory walk.
|
|
254
|
+
* These are build artifacts, dependency trees, and VCS internals that
|
|
255
|
+
* should never be indexed regardless of where they appear in the tree.
|
|
256
|
+
*/
|
|
257
|
+
const ALWAYS_SKIP_DIRS = new Set([
|
|
258
|
+
".git",
|
|
259
|
+
"node_modules",
|
|
260
|
+
"vendor",
|
|
261
|
+
"Pods",
|
|
262
|
+
"dist",
|
|
263
|
+
"build",
|
|
264
|
+
"out",
|
|
265
|
+
"DerivedData",
|
|
266
|
+
".next",
|
|
267
|
+
".venv",
|
|
268
|
+
"venv",
|
|
269
|
+
"__pycache__",
|
|
270
|
+
".cache",
|
|
271
|
+
".bun",
|
|
272
|
+
"snaps",
|
|
273
|
+
".Trashes"
|
|
274
|
+
]);
|
|
275
|
+
/**
|
|
276
|
+
* Directories to skip when doing a root-level content scan.
|
|
277
|
+
* These are either already handled by dedicated scans or should never be indexed.
|
|
278
|
+
*/
|
|
279
|
+
const ROOT_SCAN_SKIP_DIRS = new Set([
|
|
280
|
+
"memory",
|
|
281
|
+
"Notes",
|
|
282
|
+
".claude",
|
|
283
|
+
".DS_Store",
|
|
284
|
+
...ALWAYS_SKIP_DIRS
|
|
285
|
+
]);
|
|
286
|
+
/**
|
|
287
|
+
* Additional directories to skip at the content-scan level (first level below root).
|
|
288
|
+
* These are common macOS/Linux home-directory or repo noise directories that are
|
|
289
|
+
* never meaningful as project content.
|
|
290
|
+
*/
|
|
291
|
+
const CONTENT_SCAN_SKIP_DIRS = new Set([
|
|
292
|
+
"Library",
|
|
293
|
+
"Applications",
|
|
294
|
+
"Music",
|
|
295
|
+
"Movies",
|
|
296
|
+
"Pictures",
|
|
297
|
+
"Desktop",
|
|
298
|
+
"Downloads",
|
|
299
|
+
"Public",
|
|
300
|
+
"coverage",
|
|
301
|
+
...ALWAYS_SKIP_DIRS
|
|
302
|
+
]);
|
|
303
|
+
/**
|
|
304
|
+
* Safety cap: maximum number of .md files collected per project scan.
|
|
305
|
+
* Prevents runaway scans on huge root paths (e.g. home directory).
|
|
306
|
+
* Projects with more files than this are scanned up to the cap only.
|
|
307
|
+
*/
|
|
308
|
+
const MAX_FILES_PER_PROJECT = 5e3;
|
|
309
|
+
/**
|
|
310
|
+
* Maximum recursion depth for directory walks.
|
|
311
|
+
* Prevents deep traversal of large directory trees (e.g. development repos).
|
|
312
|
+
* Depth 0 = the given directory itself (no recursion).
|
|
313
|
+
* Value 6 allows: root → subdirs → sub-subdirs → ... up to 6 levels.
|
|
314
|
+
* Sufficient for memory/, Notes/, and typical docs structures.
|
|
315
|
+
*/
|
|
316
|
+
const MAX_WALK_DEPTH = 6;
|
|
317
|
+
/**
|
|
318
|
+
* Recursively collect all .md files under a directory.
|
|
319
|
+
* Returns absolute paths. Stops early if the accumulated count hits the cap
|
|
320
|
+
* or if the recursion depth exceeds MAX_WALK_DEPTH.
|
|
321
|
+
*
|
|
322
|
+
* @param dir Directory to scan.
|
|
323
|
+
* @param acc Shared accumulator array (mutated in place for early exit).
|
|
324
|
+
* @param cap Maximum number of files to collect (across all recursive calls).
|
|
325
|
+
* @param depth Current recursion depth (0 = the initial call).
|
|
326
|
+
*/
|
|
327
|
+
function walkMdFiles(dir, acc, cap = MAX_FILES_PER_PROJECT, depth = 0) {
|
|
328
|
+
const results = acc ?? [];
|
|
329
|
+
if (!existsSync(dir)) return results;
|
|
330
|
+
if (results.length >= cap) return results;
|
|
331
|
+
if (depth > MAX_WALK_DEPTH) return results;
|
|
332
|
+
try {
|
|
333
|
+
for (const entry of readdirSync(dir, { withFileTypes: true })) {
|
|
334
|
+
if (results.length >= cap) break;
|
|
335
|
+
if (entry.isSymbolicLink()) continue;
|
|
336
|
+
if (ALWAYS_SKIP_DIRS.has(entry.name)) continue;
|
|
337
|
+
const full = join(dir, entry.name);
|
|
338
|
+
if (entry.isDirectory()) walkMdFiles(full, results, cap, depth + 1);
|
|
339
|
+
else if (entry.isFile() && entry.name.endsWith(".md")) results.push(full);
|
|
340
|
+
}
|
|
341
|
+
} catch {}
|
|
342
|
+
return results;
|
|
343
|
+
}
|
|
344
|
+
/**
|
|
345
|
+
* Recursively collect all .md files under rootPath, excluding directories
|
|
346
|
+
* that are already covered by dedicated scans (memory/, Notes/) and
|
|
347
|
+
* common noise directories (.git, node_modules, etc.).
|
|
348
|
+
*
|
|
349
|
+
* Returns absolute paths for files NOT already handled by the specific scanners.
|
|
350
|
+
* Stops collecting once MAX_FILES_PER_PROJECT is reached.
|
|
351
|
+
*/
|
|
352
|
+
function walkContentFiles(rootPath) {
|
|
353
|
+
if (!existsSync(rootPath)) return [];
|
|
354
|
+
const results = [];
|
|
355
|
+
try {
|
|
356
|
+
for (const entry of readdirSync(rootPath, { withFileTypes: true })) {
|
|
357
|
+
if (results.length >= MAX_FILES_PER_PROJECT) break;
|
|
358
|
+
if (entry.isSymbolicLink()) continue;
|
|
359
|
+
if (ROOT_SCAN_SKIP_DIRS.has(entry.name)) continue;
|
|
360
|
+
if (CONTENT_SCAN_SKIP_DIRS.has(entry.name)) continue;
|
|
361
|
+
const full = join(rootPath, entry.name);
|
|
362
|
+
if (entry.isDirectory()) walkMdFiles(full, results, MAX_FILES_PER_PROJECT);
|
|
363
|
+
else if (entry.isFile() && entry.name.endsWith(".md")) {
|
|
364
|
+
if (entry.name !== "MEMORY.md") results.push(full);
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
} catch {}
|
|
368
|
+
return results;
|
|
369
|
+
}
|
|
370
|
+
/** Paths that must never be indexed — system/temp dirs that can contain backup snapshots. */
|
|
371
|
+
const BLOCKED_ROOTS = new Set([
|
|
372
|
+
"/tmp",
|
|
373
|
+
"/private/tmp",
|
|
374
|
+
"/var",
|
|
375
|
+
"/private/var"
|
|
376
|
+
]);
|
|
377
|
+
/**
|
|
378
|
+
* Returns true if rootPath should skip the recursive content scan.
|
|
379
|
+
*
|
|
380
|
+
* Skips content scanning for:
|
|
381
|
+
* - The home directory itself or any ancestor (too broad — millions of files)
|
|
382
|
+
* - Git repositories (code repos — index memory/ and Notes/ only, not all .md files)
|
|
383
|
+
*
|
|
384
|
+
* The content scan is still useful for Obsidian vaults, Notes folders, and
|
|
385
|
+
* other doc-centric project trees where ALL markdown files are meaningful.
|
|
386
|
+
*
|
|
387
|
+
* The memory/, Notes/, and claude_notes_dir scans always run regardless.
|
|
388
|
+
*/
|
|
389
|
+
function isPathTooBroadForContentScan(rootPath) {
|
|
390
|
+
const normalized = normalize(rootPath);
|
|
391
|
+
if (BLOCKED_ROOTS.has(normalized)) return true;
|
|
392
|
+
for (const blocked of BLOCKED_ROOTS) if (normalized.startsWith(blocked + "/")) return true;
|
|
393
|
+
const home = homedir();
|
|
394
|
+
if (home.startsWith(normalized) || normalized === "/") return true;
|
|
395
|
+
if (normalized.startsWith(home)) {
|
|
396
|
+
const rel = normalized.slice(home.length).replace(/^\//, "");
|
|
397
|
+
if ((rel ? rel.split("/").length : 0) === 0) return true;
|
|
398
|
+
}
|
|
399
|
+
if (existsSync(join(normalized, ".git"))) return true;
|
|
400
|
+
return false;
|
|
401
|
+
}
|
|
402
|
+
const SESSION_TITLE_RE = /^(\d{4})\s*-\s*(\d{4}-\d{2}-\d{2})\s*-\s*(.+)\.md$/;
|
|
403
|
+
/**
|
|
404
|
+
* Parse a session title from a Notes filename.
|
|
405
|
+
* Format: "NNNN - YYYY-MM-DD - Descriptive Title.md"
|
|
406
|
+
* Returns a synthetic chunk text like "Session #0086 2026-02-23: Pai Daemon Background Service"
|
|
407
|
+
* or null if the filename doesn't match the expected pattern.
|
|
408
|
+
*/
|
|
409
|
+
function parseSessionTitleChunk(fileName) {
|
|
410
|
+
const m = SESSION_TITLE_RE.exec(fileName);
|
|
411
|
+
if (!m) return null;
|
|
412
|
+
const [, num, date, title] = m;
|
|
413
|
+
return `Session #${num} ${date}: ${title}`;
|
|
414
|
+
}
|
|
415
|
+
/** Number of files to process before yielding to the event loop inside indexProject. */
|
|
416
|
+
const INDEX_YIELD_EVERY = 10;
|
|
417
|
+
|
|
418
|
+
//#endregion
|
|
419
|
+
export { parseSessionTitleChunk as a, yieldToEventLoop as c, sha256 as d, sha256File as f, isPathTooBroadForContentScan as i, chunkMarkdown as l, chunkId as n, walkContentFiles as o, detectTier as r, walkMdFiles as s, INDEX_YIELD_EVERY as t, estimateTokens as u };
|
|
420
|
+
//# sourceMappingURL=helpers-BEST-4Gx.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"helpers-BEST-4Gx.mjs","names":["createHash"],"sources":["../src/utils/hash.ts","../src/memory/chunker.ts","../src/memory/indexer/helpers.ts"],"sourcesContent":["/**\n * Shared hashing utilities. Centralises all SHA-256 usage so every module\n * obtains digests through the same function rather than inlining createHash.\n */\n\nimport { createHash } from \"node:crypto\";\n\n/**\n * Compute a SHA-256 hex digest of the given string.\n * Aliased as sha256File for compatibility with existing call-sites that use\n * that name to hash file contents.\n */\nexport function sha256(content: string): string {\n return createHash(\"sha256\").update(content).digest(\"hex\");\n}\n\n/** Alias kept for backwards compatibility with memory/indexer call-sites. */\nexport const sha256File = sha256;\n","/**\n * Markdown text chunker for the PAI memory engine.\n *\n * Splits markdown files into overlapping text segments suitable for BM25\n * full-text indexing. Respects heading boundaries where possible, falling\n * back to paragraph and sentence splitting when sections are large.\n */\n\nimport { sha256 } from \"../utils/hash.js\";\n\nexport interface Chunk {\n text: string;\n startLine: number; // 1-indexed\n endLine: number; // 1-indexed, inclusive\n hash: string; // SHA-256 of text\n}\n\nexport interface ChunkOptions {\n /** Approximate maximum tokens per chunk. Default 400. */\n maxTokens?: number;\n /** Overlap in tokens from the previous chunk. Default 80. */\n overlap?: number;\n}\n\nconst DEFAULT_MAX_TOKENS = 400;\nconst DEFAULT_OVERLAP = 80;\n\n/**\n * Approximate token count using a words * 1.3 heuristic.\n * Matches the OpenClaw estimate approach.\n */\nexport function estimateTokens(text: string): number {\n const wordCount = text.split(/\\s+/).filter(Boolean).length;\n return Math.ceil(wordCount * 1.3);\n}\n\n// sha256 imported from utils/hash.ts\n\n// ---------------------------------------------------------------------------\n// Internal section / paragraph / sentence splitters\n// ---------------------------------------------------------------------------\n\n/**\n * A contiguous block of lines associated with an approximate token count.\n */\ninterface LineBlock {\n lines: Array<{ text: string; lineNo: number }>;\n tokens: number;\n}\n\n/**\n * Split content into sections delimited by ## or ### headings.\n * Each section starts at its heading line (or at line 1 for a preamble).\n */\nfunction splitBySections(\n lines: Array<{ text: string; lineNo: number }>,\n): LineBlock[] {\n const sections: LineBlock[] = [];\n let current: Array<{ text: string; lineNo: number }> = [];\n\n for (const line of lines) {\n const isHeading = /^#{1,3}\\s/.test(line.text);\n if (isHeading && current.length > 0) {\n const text = current.map((l) => l.text).join(\"\\n\");\n sections.push({ lines: current, tokens: estimateTokens(text) });\n current = [];\n }\n current.push(line);\n }\n\n if (current.length > 0) {\n const text = current.map((l) => l.text).join(\"\\n\");\n sections.push({ lines: current, tokens: estimateTokens(text) });\n }\n\n return sections;\n}\n\n/**\n * Split a LineBlock by double-newline paragraph boundaries.\n */\nfunction splitByParagraphs(block: LineBlock): LineBlock[] {\n const paragraphs: LineBlock[] = [];\n let current: Array<{ text: string; lineNo: number }> = [];\n\n for (const line of block.lines) {\n if (line.text.trim() === \"\" && current.length > 0) {\n // Empty line — potential paragraph boundary\n const text = current.map((l) => l.text).join(\"\\n\");\n paragraphs.push({ lines: [...current], tokens: estimateTokens(text) });\n current = [];\n } else {\n current.push(line);\n }\n }\n\n if (current.length > 0) {\n const text = current.map((l) => l.text).join(\"\\n\");\n paragraphs.push({ lines: current, tokens: estimateTokens(text) });\n }\n\n return paragraphs.length > 0 ? paragraphs : [block];\n}\n\n/**\n * Split a LineBlock by sentence boundaries (. ! ?) when even paragraphs are\n * too large. Works character-by-character within joined lines.\n */\nfunction splitBySentences(block: LineBlock, maxTokens: number): LineBlock[] {\n const fullText = block.lines.map((l) => l.text).join(\" \");\n // Very rough sentence split — split on '. ', '! ', '? ' followed by uppercase\n const sentenceRe = /(?<=[.!?])\\s+(?=[A-Z\"'])/g;\n const sentences = fullText.split(sentenceRe);\n\n const result: LineBlock[] = [];\n let accText = \"\";\n // We can't recover exact line numbers inside a single oversized paragraph,\n // so we approximate using the block's start/end lines distributed evenly.\n const startLine = block.lines[0]?.lineNo ?? 1;\n const endLine = block.lines[block.lines.length - 1]?.lineNo ?? startLine;\n const totalLines = endLine - startLine + 1;\n const linesPerSentence = Math.max(1, Math.floor(totalLines / Math.max(1, sentences.length)));\n\n let sentenceIdx = 0;\n let approxLine = startLine;\n\n const flush = () => {\n if (!accText.trim()) return;\n const endApprox = Math.min(approxLine + linesPerSentence - 1, endLine);\n result.push({\n lines: [{ text: accText.trim(), lineNo: approxLine }],\n tokens: estimateTokens(accText),\n });\n approxLine = endApprox + 1;\n accText = \"\";\n };\n\n for (const sentence of sentences) {\n sentenceIdx++;\n const candidateText = accText ? accText + \" \" + sentence : sentence;\n if (estimateTokens(candidateText) > maxTokens && accText) {\n flush();\n accText = sentence;\n } else {\n accText = candidateText;\n }\n }\n void sentenceIdx; // used only for iteration count\n flush();\n\n return result.length > 0 ? result : [block];\n}\n\n// ---------------------------------------------------------------------------\n// Overlap helper\n// ---------------------------------------------------------------------------\n\n/**\n * Extract the last `overlapTokens` worth of text from a list of previously\n * emitted chunks to prepend to the next chunk.\n */\nfunction buildOverlapPrefix(\n chunks: Chunk[],\n overlapTokens: number,\n): Array<{ text: string; lineNo: number }> {\n if (overlapTokens <= 0 || chunks.length === 0) return [];\n\n const lastChunk = chunks[chunks.length - 1];\n if (!lastChunk) return [];\n\n const lines = lastChunk.text.split(\"\\n\");\n const kept: string[] = [];\n let acc = 0;\n\n for (let i = lines.length - 1; i >= 0; i--) {\n const lineTokens = estimateTokens(lines[i] ?? \"\");\n acc += lineTokens;\n kept.unshift(lines[i] ?? \"\");\n if (acc >= overlapTokens) break;\n }\n\n // Distribute overlap lines across the lastChunk's line range\n const startLine = lastChunk.endLine - kept.length + 1;\n return kept.map((text, idx) => ({ text, lineNo: Math.max(lastChunk.startLine, startLine + idx) }));\n}\n\n// ---------------------------------------------------------------------------\n// Public API\n// ---------------------------------------------------------------------------\n\n/**\n * Chunk a markdown file into overlapping segments for BM25 indexing.\n *\n * Strategy:\n * 1. Split by headings (##, ###) as natural boundaries.\n * 2. If a section exceeds maxTokens, split by paragraphs.\n * 3. If a paragraph still exceeds maxTokens, split by sentences.\n * 4. Apply overlap: each chunk includes the last `overlap` tokens from the\n * previous chunk.\n */\nexport function chunkMarkdown(content: string, opts?: ChunkOptions): Chunk[] {\n const maxTokens = opts?.maxTokens ?? DEFAULT_MAX_TOKENS;\n const overlapTokens = opts?.overlap ?? DEFAULT_OVERLAP;\n\n if (!content.trim()) return [];\n\n const rawLines = content.split(\"\\n\");\n const lines: Array<{ text: string; lineNo: number }> = rawLines.map((text, idx) => ({\n text,\n lineNo: idx + 1, // 1-indexed\n }));\n\n // Step 1: section split\n const sections = splitBySections(lines);\n\n // Step 2 & 3: further split oversized sections\n const finalBlocks: LineBlock[] = [];\n for (const section of sections) {\n if (section.tokens <= maxTokens) {\n finalBlocks.push(section);\n continue;\n }\n // Too big — split by paragraphs\n const paras = splitByParagraphs(section);\n for (const para of paras) {\n if (para.tokens <= maxTokens) {\n finalBlocks.push(para);\n continue;\n }\n // Still too big — split by sentences\n const sentences = splitBySentences(para, maxTokens);\n finalBlocks.push(...sentences);\n }\n }\n\n // Step 4: build final chunks with overlap\n const chunks: Chunk[] = [];\n\n for (const block of finalBlocks) {\n if (block.lines.length === 0) continue;\n\n // Build overlap prefix from previous chunks\n const overlapLines = buildOverlapPrefix(chunks, overlapTokens);\n\n // Combine overlap + block lines\n const allLines = [...overlapLines, ...block.lines];\n const text = allLines.map((l) => l.text).join(\"\\n\").trim();\n\n if (!text) continue;\n\n const startLine = block.lines[0]?.lineNo ?? 1;\n const endLine = block.lines[block.lines.length - 1]?.lineNo ?? startLine;\n\n chunks.push({\n text,\n startLine,\n endLine,\n hash: sha256(text),\n });\n }\n\n return chunks;\n}\n","/**\n * Shared helpers for the PAI memory indexers.\n *\n * Contains utilities used by both the sync (SQLite) and async (StorageBackend)\n * indexer paths: hashing, chunk ID generation, directory walking, and path guards.\n */\n\nimport { readdirSync, existsSync } from \"node:fs\";\nimport { sha256File } from \"../../utils/hash.js\";\nimport { join, normalize } from \"node:path\";\nimport { homedir } from \"node:os\";\nimport { basename } from \"node:path\";\n\n// ---------------------------------------------------------------------------\n// Tier detection\n// ---------------------------------------------------------------------------\n\n/**\n * Classify a relative file path into one of the four memory tiers.\n *\n * Rules (in priority order):\n * - MEMORY.md anywhere in memory/ → 'evergreen'\n * - YYYY-MM-DD.md in memory/ → 'daily'\n * - anything else in memory/ → 'topic'\n * - anything in Notes/ → 'session'\n */\nexport function detectTier(\n relativePath: string,\n): \"evergreen\" | \"daily\" | \"topic\" | \"session\" {\n // Normalise to forward slashes and strip leading ./\n const p = relativePath.replace(/\\\\/g, \"/\").replace(/^\\.\\//, \"\");\n\n // Notes directory → session tier\n if (p.startsWith(\"Notes/\") || p === \"Notes\") {\n return \"session\";\n }\n\n const fileName = basename(p);\n\n // MEMORY.md (case-sensitive match) → evergreen\n if (fileName === \"MEMORY.md\") {\n return \"evergreen\";\n }\n\n // YYYY-MM-DD.md → daily\n if (/^\\d{4}-\\d{2}-\\d{2}\\.md$/.test(fileName)) {\n return \"daily\";\n }\n\n // Default for memory/ files\n return \"topic\";\n}\n\n// ---------------------------------------------------------------------------\n// Hashing and chunk ID generation\n// ---------------------------------------------------------------------------\n\n// sha256File imported from ../../utils/hash.js\nexport { sha256File } from \"../../utils/hash.js\";\n\n/**\n * Generate a deterministic chunk ID from its coordinates.\n * Format: sha256(\"projectId:path:chunkIndex:startLine:endLine\")\n *\n * The chunkIndex (0-based position within the file) is included so that\n * chunks with approximated line numbers (e.g. from splitBySentences) never\n * produce colliding IDs even when multiple chunks share the same startLine/endLine.\n */\nexport function chunkId(\n projectId: number,\n path: string,\n chunkIndex: number,\n startLine: number,\n endLine: number,\n): string {\n return createHash(\"sha256\")\n .update(`${projectId}:${path}:${chunkIndex}:${startLine}:${endLine}`)\n .digest(\"hex\");\n}\n\n// ---------------------------------------------------------------------------\n// Event loop yield\n// ---------------------------------------------------------------------------\n\n/**\n * Yield to the Node.js event loop so that IPC server can process requests\n * during long index runs.\n *\n * Uses setTimeout(10ms) rather than setImmediate — the 10ms pause gives the\n * event loop enough time to accept and process incoming IPC connections\n * (socket data, new connections, etc.). Without this, synchronous ONNX\n * inference blocks IPC for the full duration of each embedding (~50-100ms\n * per chunk).\n */\nexport function yieldToEventLoop(): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, 10));\n}\n\n// ---------------------------------------------------------------------------\n// Directory skip sets\n// ---------------------------------------------------------------------------\n\n/**\n * Directories to ALWAYS skip, at any depth, during any directory walk.\n * These are build artifacts, dependency trees, and VCS internals that\n * should never be indexed regardless of where they appear in the tree.\n */\nexport const ALWAYS_SKIP_DIRS = new Set([\n // Version control\n \".git\",\n // Dependency directories (any language)\n \"node_modules\",\n \"vendor\",\n \"Pods\", // CocoaPods (iOS/macOS)\n // Build / compile output\n \"dist\",\n \"build\",\n \"out\",\n \"DerivedData\", // Xcode\n \".next\", // Next.js\n // Python virtual environments and caches\n \".venv\",\n \"venv\",\n \"__pycache__\",\n // General caches\n \".cache\",\n \".bun\",\n // Backup snapshots (Carbon Copy Cloner, Time Machine, etc.)\n \"snaps\",\n \".Trashes\",\n]);\n\n/**\n * Directories to skip when doing a root-level content scan.\n * These are either already handled by dedicated scans or should never be indexed.\n */\nexport const ROOT_SCAN_SKIP_DIRS = new Set([\n \"memory\",\n \"Notes\",\n \".claude\",\n \".DS_Store\",\n // Everything in ALWAYS_SKIP_DIRS is also excluded at root level\n ...ALWAYS_SKIP_DIRS,\n]);\n\n/**\n * Additional directories to skip at the content-scan level (first level below root).\n * These are common macOS/Linux home-directory or repo noise directories that are\n * never meaningful as project content.\n */\nexport const CONTENT_SCAN_SKIP_DIRS = new Set([\n // macOS home directory standard folders\n \"Library\",\n \"Applications\",\n \"Music\",\n \"Movies\",\n \"Pictures\",\n \"Desktop\",\n \"Downloads\",\n \"Public\",\n // Common dev noise\n \"coverage\",\n // Everything in ALWAYS_SKIP_DIRS is also excluded at this level\n ...ALWAYS_SKIP_DIRS,\n]);\n\n// ---------------------------------------------------------------------------\n// Directory walkers\n// ---------------------------------------------------------------------------\n\n/**\n * Safety cap: maximum number of .md files collected per project scan.\n * Prevents runaway scans on huge root paths (e.g. home directory).\n * Projects with more files than this are scanned up to the cap only.\n */\nconst MAX_FILES_PER_PROJECT = 5_000;\n\n/**\n * Maximum recursion depth for directory walks.\n * Prevents deep traversal of large directory trees (e.g. development repos).\n * Depth 0 = the given directory itself (no recursion).\n * Value 6 allows: root → subdirs → sub-subdirs → ... up to 6 levels.\n * Sufficient for memory/, Notes/, and typical docs structures.\n */\nconst MAX_WALK_DEPTH = 6;\n\n/**\n * Recursively collect all .md files under a directory.\n * Returns absolute paths. Stops early if the accumulated count hits the cap\n * or if the recursion depth exceeds MAX_WALK_DEPTH.\n *\n * @param dir Directory to scan.\n * @param acc Shared accumulator array (mutated in place for early exit).\n * @param cap Maximum number of files to collect (across all recursive calls).\n * @param depth Current recursion depth (0 = the initial call).\n */\nexport function walkMdFiles(\n dir: string,\n acc?: string[],\n cap = MAX_FILES_PER_PROJECT,\n depth = 0,\n): string[] {\n const results = acc ?? [];\n if (!existsSync(dir)) return results;\n if (results.length >= cap) return results;\n if (depth > MAX_WALK_DEPTH) return results;\n\n try {\n for (const entry of readdirSync(dir, { withFileTypes: true })) {\n if (results.length >= cap) break;\n if (entry.isSymbolicLink()) continue;\n // Skip known junk directories at every recursion depth\n if (ALWAYS_SKIP_DIRS.has(entry.name)) continue;\n const full = join(dir, entry.name);\n if (entry.isDirectory()) {\n walkMdFiles(full, results, cap, depth + 1);\n } else if (entry.isFile() && entry.name.endsWith(\".md\")) {\n results.push(full);\n }\n }\n } catch {\n // Unreadable directory — skip\n }\n return results;\n}\n\n/**\n * Recursively collect all .md files under rootPath, excluding directories\n * that are already covered by dedicated scans (memory/, Notes/) and\n * common noise directories (.git, node_modules, etc.).\n *\n * Returns absolute paths for files NOT already handled by the specific scanners.\n * Stops collecting once MAX_FILES_PER_PROJECT is reached.\n */\nexport function walkContentFiles(rootPath: string): string[] {\n if (!existsSync(rootPath)) return [];\n\n const results: string[] = [];\n try {\n for (const entry of readdirSync(rootPath, { withFileTypes: true })) {\n if (results.length >= MAX_FILES_PER_PROJECT) break;\n if (entry.isSymbolicLink()) continue;\n if (ROOT_SCAN_SKIP_DIRS.has(entry.name)) continue;\n if (CONTENT_SCAN_SKIP_DIRS.has(entry.name)) continue;\n\n const full = join(rootPath, entry.name);\n if (entry.isDirectory()) {\n walkMdFiles(full, results, MAX_FILES_PER_PROJECT);\n } else if (entry.isFile() && entry.name.endsWith(\".md\")) {\n // Skip root-level MEMORY.md — handled by the dedicated evergreen scan\n if (entry.name !== \"MEMORY.md\") {\n results.push(full);\n }\n }\n }\n } catch {\n // Unreadable directory — skip\n }\n return results;\n}\n\n// ---------------------------------------------------------------------------\n// Path safety guard\n// ---------------------------------------------------------------------------\n\n/** Paths that must never be indexed — system/temp dirs that can contain backup snapshots. */\nconst BLOCKED_ROOTS = new Set([\"/tmp\", \"/private/tmp\", \"/var\", \"/private/var\"]);\n\n/**\n * Returns true if rootPath should skip the recursive content scan.\n *\n * Skips content scanning for:\n * - The home directory itself or any ancestor (too broad — millions of files)\n * - Git repositories (code repos — index memory/ and Notes/ only, not all .md files)\n *\n * The content scan is still useful for Obsidian vaults, Notes folders, and\n * other doc-centric project trees where ALL markdown files are meaningful.\n *\n * The memory/, Notes/, and claude_notes_dir scans always run regardless.\n */\nexport function isPathTooBroadForContentScan(rootPath: string): boolean {\n const normalized = normalize(rootPath);\n\n // Block system/temp directories outright (CCC snapshots live here)\n if (BLOCKED_ROOTS.has(normalized)) return true;\n for (const blocked of BLOCKED_ROOTS) {\n if (normalized.startsWith(blocked + \"/\")) return true;\n }\n\n const home = homedir();\n\n // Skip the home directory itself or any ancestor of home\n if (home.startsWith(normalized) || normalized === \"/\") {\n return true;\n }\n\n // Skip home directory itself (depth 0)\n if (normalized.startsWith(home)) {\n const rel = normalized.slice(home.length).replace(/^\\//, \"\");\n const depth = rel ? rel.split(\"/\").length : 0;\n if (depth === 0) return true;\n }\n\n // Skip git repositories — content scan is only for doc-centric projects\n // (Obsidian vaults, knowledge bases). Code repos use memory/ and Notes/ only.\n if (existsSync(join(normalized, \".git\"))) {\n return true;\n }\n\n return false;\n}\n\n// ---------------------------------------------------------------------------\n// Session title parser\n// ---------------------------------------------------------------------------\n\nconst SESSION_TITLE_RE = /^(\\d{4})\\s*-\\s*(\\d{4}-\\d{2}-\\d{2})\\s*-\\s*(.+)\\.md$/;\n\n/**\n * Parse a session title from a Notes filename.\n * Format: \"NNNN - YYYY-MM-DD - Descriptive Title.md\"\n * Returns a synthetic chunk text like \"Session #0086 2026-02-23: Pai Daemon Background Service\"\n * or null if the filename doesn't match the expected pattern.\n */\nexport function parseSessionTitleChunk(fileName: string): string | null {\n const m = SESSION_TITLE_RE.exec(fileName);\n if (!m) return null;\n const [, num, date, title] = m;\n return `Session #${num} ${date}: ${title}`;\n}\n\n/** Number of files to process before yielding to the event loop inside indexProject. */\nexport const INDEX_YIELD_EVERY = 10;\n"],"mappings":";;;;;;;;;;;;;;;AAYA,SAAgB,OAAO,SAAyB;AAC9C,QAAOA,aAAW,SAAS,CAAC,OAAO,QAAQ,CAAC,OAAO,MAAM;;;AAI3D,MAAa,aAAa;;;;;;;;;;;ACO1B,MAAM,qBAAqB;AAC3B,MAAM,kBAAkB;;;;;AAMxB,SAAgB,eAAe,MAAsB;CACnD,MAAM,YAAY,KAAK,MAAM,MAAM,CAAC,OAAO,QAAQ,CAAC;AACpD,QAAO,KAAK,KAAK,YAAY,IAAI;;;;;;AAqBnC,SAAS,gBACP,OACa;CACb,MAAM,WAAwB,EAAE;CAChC,IAAI,UAAmD,EAAE;AAEzD,MAAK,MAAM,QAAQ,OAAO;AAExB,MADkB,YAAY,KAAK,KAAK,KAAK,IAC5B,QAAQ,SAAS,GAAG;GACnC,MAAM,OAAO,QAAQ,KAAK,MAAM,EAAE,KAAK,CAAC,KAAK,KAAK;AAClD,YAAS,KAAK;IAAE,OAAO;IAAS,QAAQ,eAAe,KAAK;IAAE,CAAC;AAC/D,aAAU,EAAE;;AAEd,UAAQ,KAAK,KAAK;;AAGpB,KAAI,QAAQ,SAAS,GAAG;EACtB,MAAM,OAAO,QAAQ,KAAK,MAAM,EAAE,KAAK,CAAC,KAAK,KAAK;AAClD,WAAS,KAAK;GAAE,OAAO;GAAS,QAAQ,eAAe,KAAK;GAAE,CAAC;;AAGjE,QAAO;;;;;AAMT,SAAS,kBAAkB,OAA+B;CACxD,MAAM,aAA0B,EAAE;CAClC,IAAI,UAAmD,EAAE;AAEzD,MAAK,MAAM,QAAQ,MAAM,MACvB,KAAI,KAAK,KAAK,MAAM,KAAK,MAAM,QAAQ,SAAS,GAAG;EAEjD,MAAM,OAAO,QAAQ,KAAK,MAAM,EAAE,KAAK,CAAC,KAAK,KAAK;AAClD,aAAW,KAAK;GAAE,OAAO,CAAC,GAAG,QAAQ;GAAE,QAAQ,eAAe,KAAK;GAAE,CAAC;AACtE,YAAU,EAAE;OAEZ,SAAQ,KAAK,KAAK;AAItB,KAAI,QAAQ,SAAS,GAAG;EACtB,MAAM,OAAO,QAAQ,KAAK,MAAM,EAAE,KAAK,CAAC,KAAK,KAAK;AAClD,aAAW,KAAK;GAAE,OAAO;GAAS,QAAQ,eAAe,KAAK;GAAE,CAAC;;AAGnE,QAAO,WAAW,SAAS,IAAI,aAAa,CAAC,MAAM;;;;;;AAOrD,SAAS,iBAAiB,OAAkB,WAAgC;CAI1E,MAAM,YAHW,MAAM,MAAM,KAAK,MAAM,EAAE,KAAK,CAAC,KAAK,IAAI,CAG9B,MADR,4BACyB;CAE5C,MAAM,SAAsB,EAAE;CAC9B,IAAI,UAAU;CAGd,MAAM,YAAY,MAAM,MAAM,IAAI,UAAU;CAC5C,MAAM,UAAU,MAAM,MAAM,MAAM,MAAM,SAAS,IAAI,UAAU;CAC/D,MAAM,aAAa,UAAU,YAAY;CACzC,MAAM,mBAAmB,KAAK,IAAI,GAAG,KAAK,MAAM,aAAa,KAAK,IAAI,GAAG,UAAU,OAAO,CAAC,CAAC;CAE5F,IAAI,cAAc;CAClB,IAAI,aAAa;CAEjB,MAAM,cAAc;AAClB,MAAI,CAAC,QAAQ,MAAM,CAAE;EACrB,MAAM,YAAY,KAAK,IAAI,aAAa,mBAAmB,GAAG,QAAQ;AACtE,SAAO,KAAK;GACV,OAAO,CAAC;IAAE,MAAM,QAAQ,MAAM;IAAE,QAAQ;IAAY,CAAC;GACrD,QAAQ,eAAe,QAAQ;GAChC,CAAC;AACF,eAAa,YAAY;AACzB,YAAU;;AAGZ,MAAK,MAAM,YAAY,WAAW;AAChC;EACA,MAAM,gBAAgB,UAAU,UAAU,MAAM,WAAW;AAC3D,MAAI,eAAe,cAAc,GAAG,aAAa,SAAS;AACxD,UAAO;AACP,aAAU;QAEV,WAAU;;AAId,QAAO;AAEP,QAAO,OAAO,SAAS,IAAI,SAAS,CAAC,MAAM;;;;;;AAW7C,SAAS,mBACP,QACA,eACyC;AACzC,KAAI,iBAAiB,KAAK,OAAO,WAAW,EAAG,QAAO,EAAE;CAExD,MAAM,YAAY,OAAO,OAAO,SAAS;AACzC,KAAI,CAAC,UAAW,QAAO,EAAE;CAEzB,MAAM,QAAQ,UAAU,KAAK,MAAM,KAAK;CACxC,MAAM,OAAiB,EAAE;CACzB,IAAI,MAAM;AAEV,MAAK,IAAI,IAAI,MAAM,SAAS,GAAG,KAAK,GAAG,KAAK;EAC1C,MAAM,aAAa,eAAe,MAAM,MAAM,GAAG;AACjD,SAAO;AACP,OAAK,QAAQ,MAAM,MAAM,GAAG;AAC5B,MAAI,OAAO,cAAe;;CAI5B,MAAM,YAAY,UAAU,UAAU,KAAK,SAAS;AACpD,QAAO,KAAK,KAAK,MAAM,SAAS;EAAE;EAAM,QAAQ,KAAK,IAAI,UAAU,WAAW,YAAY,IAAI;EAAE,EAAE;;;;;;;;;;;;AAiBpG,SAAgB,cAAc,SAAiB,MAA8B;CAC3E,MAAM,YAAY,MAAM,aAAa;CACrC,MAAM,gBAAgB,MAAM,WAAW;AAEvC,KAAI,CAAC,QAAQ,MAAM,CAAE,QAAO,EAAE;CAS9B,MAAM,WAAW,gBAPA,QAAQ,MAAM,KAAK,CAC4B,KAAK,MAAM,SAAS;EAClF;EACA,QAAQ,MAAM;EACf,EAAE,CAGoC;CAGvC,MAAM,cAA2B,EAAE;AACnC,MAAK,MAAM,WAAW,UAAU;AAC9B,MAAI,QAAQ,UAAU,WAAW;AAC/B,eAAY,KAAK,QAAQ;AACzB;;EAGF,MAAM,QAAQ,kBAAkB,QAAQ;AACxC,OAAK,MAAM,QAAQ,OAAO;AACxB,OAAI,KAAK,UAAU,WAAW;AAC5B,gBAAY,KAAK,KAAK;AACtB;;GAGF,MAAM,YAAY,iBAAiB,MAAM,UAAU;AACnD,eAAY,KAAK,GAAG,UAAU;;;CAKlC,MAAM,SAAkB,EAAE;AAE1B,MAAK,MAAM,SAAS,aAAa;AAC/B,MAAI,MAAM,MAAM,WAAW,EAAG;EAO9B,MAAM,OADW,CAAC,GAHG,mBAAmB,QAAQ,cAAc,EAG3B,GAAG,MAAM,MAAM,CAC5B,KAAK,MAAM,EAAE,KAAK,CAAC,KAAK,KAAK,CAAC,MAAM;AAE1D,MAAI,CAAC,KAAM;EAEX,MAAM,YAAY,MAAM,MAAM,IAAI,UAAU;EAC5C,MAAM,UAAU,MAAM,MAAM,MAAM,MAAM,SAAS,IAAI,UAAU;AAE/D,SAAO,KAAK;GACV;GACA;GACA;GACA,MAAM,OAAO,KAAK;GACnB,CAAC;;AAGJ,QAAO;;;;;;;;;;;;;;;;;;;;AC3OT,SAAgB,WACd,cAC6C;CAE7C,MAAM,IAAI,aAAa,QAAQ,OAAO,IAAI,CAAC,QAAQ,SAAS,GAAG;AAG/D,KAAI,EAAE,WAAW,SAAS,IAAI,MAAM,QAClC,QAAO;CAGT,MAAM,WAAW,SAAS,EAAE;AAG5B,KAAI,aAAa,YACf,QAAO;AAIT,KAAI,0BAA0B,KAAK,SAAS,CAC1C,QAAO;AAIT,QAAO;;;;;;;;;;AAkBT,SAAgB,QACd,WACA,MACA,YACA,WACA,SACQ;AACR,QAAO,WAAW,SAAS,CACxB,OAAO,GAAG,UAAU,GAAG,KAAK,GAAG,WAAW,GAAG,UAAU,GAAG,UAAU,CACpE,OAAO,MAAM;;;;;;;;;;;;AAiBlB,SAAgB,mBAAkC;AAChD,QAAO,IAAI,SAAS,YAAY,WAAW,SAAS,GAAG,CAAC;;;;;;;AAY1D,MAAa,mBAAmB,IAAI,IAAI;CAEtC;CAEA;CACA;CACA;CAEA;CACA;CACA;CACA;CACA;CAEA;CACA;CACA;CAEA;CACA;CAEA;CACA;CACD,CAAC;;;;;AAMF,MAAa,sBAAsB,IAAI,IAAI;CACzC;CACA;CACA;CACA;CAEA,GAAG;CACJ,CAAC;;;;;;AAOF,MAAa,yBAAyB,IAAI,IAAI;CAE5C;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CAEA;CAEA,GAAG;CACJ,CAAC;;;;;;AAWF,MAAM,wBAAwB;;;;;;;;AAS9B,MAAM,iBAAiB;;;;;;;;;;;AAYvB,SAAgB,YACd,KACA,KACA,MAAM,uBACN,QAAQ,GACE;CACV,MAAM,UAAU,OAAO,EAAE;AACzB,KAAI,CAAC,WAAW,IAAI,CAAE,QAAO;AAC7B,KAAI,QAAQ,UAAU,IAAK,QAAO;AAClC,KAAI,QAAQ,eAAgB,QAAO;AAEnC,KAAI;AACF,OAAK,MAAM,SAAS,YAAY,KAAK,EAAE,eAAe,MAAM,CAAC,EAAE;AAC7D,OAAI,QAAQ,UAAU,IAAK;AAC3B,OAAI,MAAM,gBAAgB,CAAE;AAE5B,OAAI,iBAAiB,IAAI,MAAM,KAAK,CAAE;GACtC,MAAM,OAAO,KAAK,KAAK,MAAM,KAAK;AAClC,OAAI,MAAM,aAAa,CACrB,aAAY,MAAM,SAAS,KAAK,QAAQ,EAAE;YACjC,MAAM,QAAQ,IAAI,MAAM,KAAK,SAAS,MAAM,CACrD,SAAQ,KAAK,KAAK;;SAGhB;AAGR,QAAO;;;;;;;;;;AAWT,SAAgB,iBAAiB,UAA4B;AAC3D,KAAI,CAAC,WAAW,SAAS,CAAE,QAAO,EAAE;CAEpC,MAAM,UAAoB,EAAE;AAC5B,KAAI;AACF,OAAK,MAAM,SAAS,YAAY,UAAU,EAAE,eAAe,MAAM,CAAC,EAAE;AAClE,OAAI,QAAQ,UAAU,sBAAuB;AAC7C,OAAI,MAAM,gBAAgB,CAAE;AAC5B,OAAI,oBAAoB,IAAI,MAAM,KAAK,CAAE;AACzC,OAAI,uBAAuB,IAAI,MAAM,KAAK,CAAE;GAE5C,MAAM,OAAO,KAAK,UAAU,MAAM,KAAK;AACvC,OAAI,MAAM,aAAa,CACrB,aAAY,MAAM,SAAS,sBAAsB;YACxC,MAAM,QAAQ,IAAI,MAAM,KAAK,SAAS,MAAM,EAErD;QAAI,MAAM,SAAS,YACjB,SAAQ,KAAK,KAAK;;;SAIlB;AAGR,QAAO;;;AAQT,MAAM,gBAAgB,IAAI,IAAI;CAAC;CAAQ;CAAgB;CAAQ;CAAe,CAAC;;;;;;;;;;;;;AAc/E,SAAgB,6BAA6B,UAA2B;CACtE,MAAM,aAAa,UAAU,SAAS;AAGtC,KAAI,cAAc,IAAI,WAAW,CAAE,QAAO;AAC1C,MAAK,MAAM,WAAW,cACpB,KAAI,WAAW,WAAW,UAAU,IAAI,CAAE,QAAO;CAGnD,MAAM,OAAO,SAAS;AAGtB,KAAI,KAAK,WAAW,WAAW,IAAI,eAAe,IAChD,QAAO;AAIT,KAAI,WAAW,WAAW,KAAK,EAAE;EAC/B,MAAM,MAAM,WAAW,MAAM,KAAK,OAAO,CAAC,QAAQ,OAAO,GAAG;AAE5D,OADc,MAAM,IAAI,MAAM,IAAI,CAAC,SAAS,OAC9B,EAAG,QAAO;;AAK1B,KAAI,WAAW,KAAK,YAAY,OAAO,CAAC,CACtC,QAAO;AAGT,QAAO;;AAOT,MAAM,mBAAmB;;;;;;;AAQzB,SAAgB,uBAAuB,UAAiC;CACtE,MAAM,IAAI,iBAAiB,KAAK,SAAS;AACzC,KAAI,CAAC,EAAG,QAAO;CACf,MAAM,GAAG,KAAK,MAAM,SAAS;AAC7B,QAAO,YAAY,IAAI,GAAG,KAAK,IAAI;;;AAIrC,MAAa,oBAAoB"}
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
// src/hooks/ts/capture-all-events.ts
|
|
4
4
|
import { readFileSync as readFileSync2, appendFileSync, mkdirSync, existsSync as existsSync2, writeFileSync } from "fs";
|
|
5
|
-
import { join as
|
|
5
|
+
import { join as join3 } from "path";
|
|
6
6
|
|
|
7
7
|
// src/hooks/ts/lib/pai-paths.ts
|
|
8
8
|
import { homedir } from "os";
|
|
@@ -123,6 +123,18 @@ function isAgentSpawningCall(toolName, toolInput) {
|
|
|
123
123
|
return toolName === "Task" && toolInput?.subagent_type !== void 0;
|
|
124
124
|
}
|
|
125
125
|
|
|
126
|
+
// src/hooks/ts/lib/project-utils/paths.ts
|
|
127
|
+
import { join as join2, basename } from "path";
|
|
128
|
+
var PROJECTS_DIR = join2(PAI_DIR, "projects");
|
|
129
|
+
var PROBE_CWD_PATTERNS = [
|
|
130
|
+
"/CodexBar/ClaudeProbe",
|
|
131
|
+
"/ClaudeProbe"
|
|
132
|
+
];
|
|
133
|
+
function isProbeSession(cwd) {
|
|
134
|
+
const dir = cwd || process.cwd();
|
|
135
|
+
return PROBE_CWD_PATTERNS.some((pattern) => dir.includes(pattern));
|
|
136
|
+
}
|
|
137
|
+
|
|
126
138
|
// src/hooks/ts/capture-all-events.ts
|
|
127
139
|
function getLocalTimestamp() {
|
|
128
140
|
const date = /* @__PURE__ */ new Date();
|
|
@@ -143,14 +155,14 @@ function getEventsFilePath() {
|
|
|
143
155
|
const year = localDate.getFullYear();
|
|
144
156
|
const month = String(localDate.getMonth() + 1).padStart(2, "0");
|
|
145
157
|
const day = String(localDate.getDate()).padStart(2, "0");
|
|
146
|
-
const monthDir =
|
|
158
|
+
const monthDir = join3(HISTORY_DIR, "raw-outputs", `${year}-${month}`);
|
|
147
159
|
if (!existsSync2(monthDir)) {
|
|
148
160
|
mkdirSync(monthDir, { recursive: true });
|
|
149
161
|
}
|
|
150
|
-
return
|
|
162
|
+
return join3(monthDir, `${year}-${month}-${day}_all-events.jsonl`);
|
|
151
163
|
}
|
|
152
164
|
function getSessionMappingFile() {
|
|
153
|
-
return
|
|
165
|
+
return join3(PAI_DIR, "agent-sessions.json");
|
|
154
166
|
}
|
|
155
167
|
function getAgentForSession(sessionId) {
|
|
156
168
|
try {
|
|
@@ -176,6 +188,9 @@ function setAgentForSession(sessionId, agentName) {
|
|
|
176
188
|
}
|
|
177
189
|
}
|
|
178
190
|
async function main() {
|
|
191
|
+
if (isProbeSession()) {
|
|
192
|
+
process.exit(0);
|
|
193
|
+
}
|
|
179
194
|
try {
|
|
180
195
|
const args = process.argv.slice(2);
|
|
181
196
|
const eventTypeIndex = args.indexOf("--event-type");
|