@ulrichc1/sparn 1.0.1 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,269 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/utils/tokenizer.ts
4
+ function estimateTokens(text) {
5
+ if (!text || text.length === 0) {
6
+ return 0;
7
+ }
8
+ const words = text.split(/\s+/).filter((w) => w.length > 0);
9
+ const wordCount = words.length;
10
+ const charCount = text.length;
11
+ const charEstimate = Math.ceil(charCount / 4);
12
+ const wordEstimate = Math.ceil(wordCount * 0.75);
13
+ return Math.max(wordEstimate, charEstimate);
14
+ }
15
+
16
+ // src/hooks/post-tool-result.ts
17
+ function exitSuccess(output) {
18
+ process.stdout.write(output);
19
+ process.exit(0);
20
+ }
21
+ var COMPRESSION_THRESHOLD = 5e3;
22
+ var TOOL_PATTERNS = {
23
+ fileRead: /<file_path>(.*?)<\/file_path>[\s\S]*?<content>([\s\S]*?)<\/content>/,
24
+ grepResult: /<pattern>(.*?)<\/pattern>[\s\S]*?<matches>([\s\S]*?)<\/matches>/,
25
+ gitDiff: /^diff --git/m,
26
+ buildOutput: /(error|warning|failed|failure)/i,
27
+ npmInstall: /^(npm|pnpm|yarn) (install|add|i)/m,
28
+ dockerLogs: /^\[?\d{4}-\d{2}-\d{2}/m,
29
+ testResults: /(PASS|FAIL|SKIP).*?\.test\./i,
30
+ typescriptErrors: /^.*\(\d+,\d+\): error TS\d+:/m,
31
+ webpackBuild: /webpack \d+\.\d+\.\d+/i
32
+ };
33
+ function compressFileRead(content, maxLines = 100) {
34
+ const lines = content.split("\n");
35
+ if (lines.length <= maxLines * 2) {
36
+ return content;
37
+ }
38
+ const head = lines.slice(0, maxLines);
39
+ const tail = lines.slice(-maxLines);
40
+ const omitted = lines.length - maxLines * 2;
41
+ return [...head, "", `... [${omitted} lines omitted] ...`, "", ...tail].join("\n");
42
+ }
43
+ function compressGrepResults(content, maxMatchesPerFile = 5) {
44
+ const lines = content.split("\n");
45
+ const fileMatches = /* @__PURE__ */ new Map();
46
+ for (const line of lines) {
47
+ const match = line.match(/^(.*?):(\d+):(.*)/);
48
+ if (match?.[1] && match[2] && match[3]) {
49
+ const file = match[1];
50
+ const lineNum = match[2];
51
+ const text = match[3];
52
+ if (!fileMatches.has(file)) {
53
+ fileMatches.set(file, []);
54
+ }
55
+ fileMatches.get(file)?.push(` Line ${lineNum}: ${text.trim()}`);
56
+ }
57
+ }
58
+ const compressed = [];
59
+ for (const [file, matches] of fileMatches.entries()) {
60
+ compressed.push(`${file} (${matches.length} matches):`);
61
+ if (matches.length <= maxMatchesPerFile) {
62
+ compressed.push(...matches);
63
+ } else {
64
+ compressed.push(...matches.slice(0, maxMatchesPerFile));
65
+ compressed.push(` ... and ${matches.length - maxMatchesPerFile} more matches`);
66
+ }
67
+ compressed.push("");
68
+ }
69
+ return compressed.join("\n");
70
+ }
71
+ function compressGitDiff(content) {
72
+ const lines = content.split("\n");
73
+ const files = /* @__PURE__ */ new Map();
74
+ let currentFile = "";
75
+ for (const line of lines) {
76
+ if (line.startsWith("diff --git")) {
77
+ const match = line.match(/diff --git a\/(.*?) b\/(.*)/);
78
+ if (match) {
79
+ currentFile = match[2] || "";
80
+ files.set(currentFile, { added: 0, removed: 0 });
81
+ }
82
+ } else if (line.startsWith("+") && !line.startsWith("+++")) {
83
+ const stats = files.get(currentFile);
84
+ if (stats) stats.added++;
85
+ } else if (line.startsWith("-") && !line.startsWith("---")) {
86
+ const stats = files.get(currentFile);
87
+ if (stats) stats.removed++;
88
+ }
89
+ }
90
+ const summary = ["Git diff summary:"];
91
+ for (const [file, stats] of files.entries()) {
92
+ summary.push(` ${file}: +${stats.added} -${stats.removed}`);
93
+ }
94
+ return summary.join("\n");
95
+ }
96
+ function compressBuildOutput(content) {
97
+ const lines = content.split("\n");
98
+ const important = [];
99
+ for (const line of lines) {
100
+ if (/(error|warning|failed|failure|fatal)/i.test(line)) {
101
+ important.push(line);
102
+ }
103
+ }
104
+ if (important.length === 0) {
105
+ return "Build output: No errors or warnings found";
106
+ }
107
+ return ["Build errors/warnings:", ...important].join("\n");
108
+ }
109
+ function compressNpmInstall(content) {
110
+ const lines = content.split("\n");
111
+ const summary = [];
112
+ const warnings = [];
113
+ const errors = [];
114
+ for (const line of lines) {
115
+ if (/added \d+ packages?/i.test(line)) {
116
+ summary.push(line.trim());
117
+ }
118
+ if (/warn/i.test(line)) {
119
+ warnings.push(line.trim());
120
+ }
121
+ if (/error/i.test(line)) {
122
+ errors.push(line.trim());
123
+ }
124
+ }
125
+ if (errors.length > 0) {
126
+ return ["Package installation errors:", ...errors.slice(0, 5)].join("\n");
127
+ }
128
+ if (warnings.length > 0) {
129
+ return [
130
+ "Package installation completed with warnings:",
131
+ ...warnings.slice(0, 3),
132
+ warnings.length > 3 ? `... and ${warnings.length - 3} more warnings` : ""
133
+ ].filter(Boolean).join("\n");
134
+ }
135
+ return summary.length > 0 ? summary.join("\n") : "Package installation completed successfully";
136
+ }
137
+ function compressDockerLogs(content) {
138
+ const lines = content.split("\n");
139
+ const logMap = /* @__PURE__ */ new Map();
140
+ for (const line of lines) {
141
+ const normalized = line.replace(/^\[?\d{4}-\d{2}-\d{2}.*?\]\s*/, "").trim();
142
+ if (normalized) {
143
+ logMap.set(normalized, (logMap.get(normalized) || 0) + 1);
144
+ }
145
+ }
146
+ const summary = ["Docker logs (deduplicated):"];
147
+ for (const [log, count] of Array.from(logMap.entries()).slice(0, 20)) {
148
+ if (count > 1) {
149
+ summary.push(` [${count}x] ${log}`);
150
+ } else {
151
+ summary.push(` ${log}`);
152
+ }
153
+ }
154
+ if (logMap.size > 20) {
155
+ summary.push(` ... and ${logMap.size - 20} more unique log lines`);
156
+ }
157
+ return summary.join("\n");
158
+ }
159
+ function compressTestResults(content) {
160
+ const lines = content.split("\n");
161
+ let passed = 0;
162
+ let failed = 0;
163
+ let skipped = 0;
164
+ const failures = [];
165
+ for (const line of lines) {
166
+ if (/PASS/i.test(line)) passed++;
167
+ if (/FAIL/i.test(line)) {
168
+ failed++;
169
+ failures.push(line.trim());
170
+ }
171
+ if (/SKIP/i.test(line)) skipped++;
172
+ }
173
+ const summary = [`Test Results: ${passed} passed, ${failed} failed, ${skipped} skipped`];
174
+ if (failures.length > 0) {
175
+ summary.push("", "Failed tests:");
176
+ summary.push(...failures.slice(0, 10));
177
+ if (failures.length > 10) {
178
+ summary.push(`... and ${failures.length - 10} more failures`);
179
+ }
180
+ }
181
+ return summary.join("\n");
182
+ }
183
+ function compressTypescriptErrors(content) {
184
+ const lines = content.split("\n");
185
+ const errorMap = /* @__PURE__ */ new Map();
186
+ for (const line of lines) {
187
+ const match = line.match(/^(.*?)\(\d+,\d+\): error (TS\d+):/);
188
+ if (match) {
189
+ const file = match[1] || "unknown";
190
+ const errorCode = match[2] || "TS0000";
191
+ const key = `${file}:${errorCode}`;
192
+ if (!errorMap.has(key)) {
193
+ errorMap.set(key, []);
194
+ }
195
+ errorMap.get(key)?.push(line);
196
+ }
197
+ }
198
+ const summary = ["TypeScript Errors (grouped by file):"];
199
+ for (const [key, errors] of errorMap.entries()) {
200
+ summary.push(` ${key} (${errors.length} errors)`);
201
+ summary.push(` ${errors[0]}`);
202
+ }
203
+ return summary.join("\n");
204
+ }
205
+ function compressToolResult(input) {
206
+ const tokens = estimateTokens(input);
207
+ if (tokens < COMPRESSION_THRESHOLD) {
208
+ return input;
209
+ }
210
+ if (TOOL_PATTERNS.fileRead.test(input)) {
211
+ const match = input.match(TOOL_PATTERNS.fileRead);
212
+ if (match?.[2]) {
213
+ const content = match[2];
214
+ const compressed = compressFileRead(content);
215
+ return input.replace(content, compressed);
216
+ }
217
+ }
218
+ if (TOOL_PATTERNS.grepResult.test(input)) {
219
+ const match = input.match(TOOL_PATTERNS.grepResult);
220
+ if (match?.[2]) {
221
+ const matches = match[2];
222
+ const compressed = compressGrepResults(matches);
223
+ return input.replace(matches, compressed);
224
+ }
225
+ }
226
+ if (TOOL_PATTERNS.gitDiff.test(input)) {
227
+ return compressGitDiff(input);
228
+ }
229
+ if (TOOL_PATTERNS.buildOutput.test(input)) {
230
+ return compressBuildOutput(input);
231
+ }
232
+ if (TOOL_PATTERNS.npmInstall.test(input)) {
233
+ return compressNpmInstall(input);
234
+ }
235
+ if (TOOL_PATTERNS.dockerLogs.test(input)) {
236
+ return compressDockerLogs(input);
237
+ }
238
+ if (TOOL_PATTERNS.testResults.test(input)) {
239
+ return compressTestResults(input);
240
+ }
241
+ if (TOOL_PATTERNS.typescriptErrors.test(input)) {
242
+ return compressTypescriptErrors(input);
243
+ }
244
+ const lines = input.split("\n");
245
+ if (lines.length > 200) {
246
+ return compressFileRead(input, 100);
247
+ }
248
+ return input;
249
+ }
250
+ async function main() {
251
+ try {
252
+ const chunks = [];
253
+ for await (const chunk of process.stdin) {
254
+ chunks.push(chunk);
255
+ }
256
+ const input = Buffer.concat(chunks).toString("utf-8");
257
+ const output = compressToolResult(input);
258
+ exitSuccess(output);
259
+ } catch (_error) {
260
+ const chunks = [];
261
+ for await (const chunk of process.stdin) {
262
+ chunks.push(chunk);
263
+ }
264
+ const input = Buffer.concat(chunks).toString("utf-8");
265
+ exitSuccess(input);
266
+ }
267
+ }
268
+ main();
269
+ //# sourceMappingURL=post-tool-result.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/utils/tokenizer.ts","../../src/hooks/post-tool-result.ts"],"sourcesContent":["/**\n * Token estimation utilities.\n * Uses whitespace heuristic (~90% accuracy vs GPT tokenizer).\n */\n\n/**\n * Estimate token count for text using heuristic.\n *\n * Approximation: 1 token ≈ 4 chars or 0.75 words\n * Provides ~90% accuracy compared to GPT tokenizer, sufficient for optimization heuristics.\n *\n * @param text - Text to count\n * @returns Estimated token count\n *\n * @example\n * ```typescript\n * const tokens = estimateTokens('Hello world');\n * console.log(tokens); // ~2\n * ```\n */\nexport function estimateTokens(text: string): number {\n if (!text || text.length === 0) {\n return 0;\n }\n\n // Split on whitespace to get words\n const words = text.split(/\\s+/).filter((w) => w.length > 0);\n const wordCount = words.length;\n\n // Character-based estimate\n const charCount = text.length;\n const charEstimate = Math.ceil(charCount / 4);\n\n // Word-based estimate\n const wordEstimate = Math.ceil(wordCount * 0.75);\n\n // Return the maximum of both estimates (more conservative)\n return Math.max(wordEstimate, charEstimate);\n}\n","#!/usr/bin/env node\n/**\n * Post-Tool-Result Hook - Claude Code hook for compressing verbose tool output\n *\n * Compresses large tool results using type-specific strategies:\n * - File reads: Truncate long files, show first/last N lines\n * - Grep results: Group by file, show match count + samples\n * - Git diffs: Summarize file changes, show stats\n * - Build output: Extract errors/warnings only\n *\n * CRITICAL: Always exits 0 (never disrupts Claude Code).\n * Falls through unmodified on error or if already small.\n */\n\nimport { estimateTokens } from '../utils/tokenizer.js';\n\n// Exit 0 wrapper for all errors\nfunction exitSuccess(output: string): void {\n process.stdout.write(output);\n process.exit(0);\n}\n\n// Compression threshold (only compress if over this many tokens)\nconst COMPRESSION_THRESHOLD = 5000;\n\n// Tool result patterns\nconst TOOL_PATTERNS = {\n fileRead: /<file_path>(.*?)<\\/file_path>[\\s\\S]*?<content>([\\s\\S]*?)<\\/content>/,\n grepResult: /<pattern>(.*?)<\\/pattern>[\\s\\S]*?<matches>([\\s\\S]*?)<\\/matches>/,\n gitDiff: /^diff --git/m,\n buildOutput: /(error|warning|failed|failure)/i,\n npmInstall: /^(npm|pnpm|yarn) (install|add|i)/m,\n dockerLogs: /^\\[?\\d{4}-\\d{2}-\\d{2}/m,\n testResults: /(PASS|FAIL|SKIP).*?\\.test\\./i,\n typescriptErrors: /^.*\\(\\d+,\\d+\\): error TS\\d+:/m,\n webpackBuild: /webpack \\d+\\.\\d+\\.\\d+/i,\n};\n\n/**\n * Compress file read results\n */\nfunction compressFileRead(content: string, maxLines = 100): string {\n const lines = content.split('\\n');\n\n if (lines.length <= maxLines * 2) {\n return content; // Already small enough\n }\n\n const head = lines.slice(0, maxLines);\n const tail = lines.slice(-maxLines);\n const omitted = lines.length - maxLines * 2;\n\n return [...head, '', `... [${omitted} lines omitted] ...`, '', ...tail].join('\\n');\n}\n\n/**\n * Compress grep results\n */\nfunction compressGrepResults(content: string, maxMatchesPerFile = 5): string {\n const lines = content.split('\\n');\n const fileMatches = new Map<string, string[]>();\n\n // Group matches by file\n for (const line of lines) {\n const match = line.match(/^(.*?):(\\d+):(.*)/);\n if (match?.[1] && match[2] && match[3]) {\n const file = match[1];\n const lineNum = match[2];\n const text = match[3];\n if (!fileMatches.has(file)) {\n fileMatches.set(file, []);\n }\n fileMatches.get(file)?.push(` Line ${lineNum}: ${text.trim()}`);\n }\n }\n\n // Build compressed output\n const compressed: string[] = [];\n\n for (const [file, matches] of fileMatches.entries()) {\n compressed.push(`${file} (${matches.length} matches):`);\n\n if (matches.length <= maxMatchesPerFile) {\n compressed.push(...matches);\n } else {\n compressed.push(...matches.slice(0, maxMatchesPerFile));\n compressed.push(` ... and ${matches.length - maxMatchesPerFile} more matches`);\n }\n\n compressed.push('');\n }\n\n return compressed.join('\\n');\n}\n\n/**\n * Compress git diff results\n */\nfunction compressGitDiff(content: string): string {\n const lines = content.split('\\n');\n const files = new Map<string, { added: number; removed: number }>();\n let currentFile = '';\n\n for (const line of lines) {\n if (line.startsWith('diff --git')) {\n const match = line.match(/diff --git a\\/(.*?) b\\/(.*)/);\n if (match) {\n currentFile = match[2] || '';\n files.set(currentFile, { added: 0, removed: 0 });\n }\n } else if (line.startsWith('+') && !line.startsWith('+++')) {\n const stats = files.get(currentFile);\n if (stats) stats.added++;\n } else if (line.startsWith('-') && !line.startsWith('---')) {\n const stats = files.get(currentFile);\n if (stats) stats.removed++;\n }\n }\n\n // Build summary\n const summary: string[] = ['Git diff summary:'];\n\n for (const [file, stats] of files.entries()) {\n summary.push(` ${file}: +${stats.added} -${stats.removed}`);\n }\n\n return summary.join('\\n');\n}\n\n/**\n * Compress build output (extract errors/warnings only)\n */\nfunction compressBuildOutput(content: string): string {\n const lines = content.split('\\n');\n const important: string[] = [];\n\n for (const line of lines) {\n if (/(error|warning|failed|failure|fatal)/i.test(line)) {\n important.push(line);\n }\n }\n\n if (important.length === 0) {\n return 'Build output: No errors or warnings found';\n }\n\n return ['Build errors/warnings:', ...important].join('\\n');\n}\n\n/**\n * Compress npm/pnpm install output\n */\nfunction compressNpmInstall(content: string): string {\n const lines = content.split('\\n');\n const summary: string[] = [];\n\n // Extract package count and warnings/errors\n const warnings: string[] = [];\n const errors: string[] = [];\n\n for (const line of lines) {\n if (/added \\d+ packages?/i.test(line)) {\n summary.push(line.trim());\n }\n if (/warn/i.test(line)) {\n warnings.push(line.trim());\n }\n if (/error/i.test(line)) {\n errors.push(line.trim());\n }\n }\n\n if (errors.length > 0) {\n return ['Package installation errors:', ...errors.slice(0, 5)].join('\\n');\n }\n\n if (warnings.length > 0) {\n return [\n 'Package installation completed with warnings:',\n ...warnings.slice(0, 3),\n warnings.length > 3 ? `... and ${warnings.length - 3} more warnings` : '',\n ]\n .filter(Boolean)\n .join('\\n');\n }\n\n return summary.length > 0 ? summary.join('\\n') : 'Package installation completed successfully';\n}\n\n/**\n * Compress Docker logs\n */\nfunction compressDockerLogs(content: string): string {\n const lines = content.split('\\n');\n const logMap = new Map<string, number>();\n\n // Deduplicate and count repeated lines\n for (const line of lines) {\n // Strip timestamps for deduplication\n const normalized = line.replace(/^\\[?\\d{4}-\\d{2}-\\d{2}.*?\\]\\s*/, '').trim();\n if (normalized) {\n logMap.set(normalized, (logMap.get(normalized) || 0) + 1);\n }\n }\n\n const summary: string[] = ['Docker logs (deduplicated):'];\n\n for (const [log, count] of Array.from(logMap.entries()).slice(0, 20)) {\n if (count > 1) {\n summary.push(` [${count}x] ${log}`);\n } else {\n summary.push(` ${log}`);\n }\n }\n\n if (logMap.size > 20) {\n summary.push(` ... and ${logMap.size - 20} more unique log lines`);\n }\n\n return summary.join('\\n');\n}\n\n/**\n * Compress test results\n */\nfunction compressTestResults(content: string): string {\n const lines = content.split('\\n');\n let passed = 0;\n let failed = 0;\n let skipped = 0;\n const failures: string[] = [];\n\n for (const line of lines) {\n if (/PASS/i.test(line)) passed++;\n if (/FAIL/i.test(line)) {\n failed++;\n failures.push(line.trim());\n }\n if (/SKIP/i.test(line)) skipped++;\n }\n\n const summary = [`Test Results: ${passed} passed, ${failed} failed, ${skipped} skipped`];\n\n if (failures.length > 0) {\n summary.push('', 'Failed tests:');\n summary.push(...failures.slice(0, 10));\n if (failures.length > 10) {\n summary.push(`... and ${failures.length - 10} more failures`);\n }\n }\n\n return summary.join('\\n');\n}\n\n/**\n * Compress TypeScript errors\n */\nfunction compressTypescriptErrors(content: string): string {\n const lines = content.split('\\n');\n const errorMap = new Map<string, string[]>();\n\n for (const line of lines) {\n const match = line.match(/^(.*?)\\(\\d+,\\d+\\): error (TS\\d+):/);\n if (match) {\n const file = match[1] || 'unknown';\n const errorCode = match[2] || 'TS0000';\n const key = `${file}:${errorCode}`;\n\n if (!errorMap.has(key)) {\n errorMap.set(key, []);\n }\n errorMap.get(key)?.push(line);\n }\n }\n\n const summary = ['TypeScript Errors (grouped by file):'];\n\n for (const [key, errors] of errorMap.entries()) {\n summary.push(` ${key} (${errors.length} errors)`);\n summary.push(` ${errors[0]}`);\n }\n\n return summary.join('\\n');\n}\n\n/**\n * Main compression logic\n */\nfunction compressToolResult(input: string): string {\n const tokens = estimateTokens(input);\n\n // Only compress if over threshold\n if (tokens < COMPRESSION_THRESHOLD) {\n return input;\n }\n\n // Detect tool result type and compress accordingly\n if (TOOL_PATTERNS.fileRead.test(input)) {\n const match = input.match(TOOL_PATTERNS.fileRead);\n if (match?.[2]) {\n const content = match[2];\n const compressed = compressFileRead(content);\n return input.replace(content, compressed);\n }\n }\n\n if (TOOL_PATTERNS.grepResult.test(input)) {\n const match = input.match(TOOL_PATTERNS.grepResult);\n if (match?.[2]) {\n const matches = match[2];\n const compressed = compressGrepResults(matches);\n return input.replace(matches, compressed);\n }\n }\n\n if (TOOL_PATTERNS.gitDiff.test(input)) {\n return compressGitDiff(input);\n }\n\n if (TOOL_PATTERNS.buildOutput.test(input)) {\n return compressBuildOutput(input);\n }\n\n if (TOOL_PATTERNS.npmInstall.test(input)) {\n return compressNpmInstall(input);\n }\n\n if (TOOL_PATTERNS.dockerLogs.test(input)) {\n return compressDockerLogs(input);\n }\n\n if (TOOL_PATTERNS.testResults.test(input)) {\n return compressTestResults(input);\n }\n\n if (TOOL_PATTERNS.typescriptErrors.test(input)) {\n return compressTypescriptErrors(input);\n }\n\n // Unknown type or no compression pattern matched\n // Apply generic truncation as fallback\n const lines = input.split('\\n');\n if (lines.length > 200) {\n return compressFileRead(input, 100);\n }\n\n return input;\n}\n\n// Main hook logic\nasync function main(): Promise<void> {\n try {\n // Read stdin (tool result)\n const chunks: Buffer[] = [];\n for await (const chunk of process.stdin) {\n chunks.push(chunk);\n }\n const input = Buffer.concat(chunks).toString('utf-8');\n\n // Compress if needed\n const output = compressToolResult(input);\n\n exitSuccess(output);\n } catch (_error) {\n // On any error, pass through original input\n const chunks: Buffer[] = [];\n for await (const chunk of process.stdin) {\n chunks.push(chunk);\n }\n const input = Buffer.concat(chunks).toString('utf-8');\n exitSuccess(input);\n }\n}\n\n// Run hook\nmain();\n"],"mappings":";;;AAoBO,SAAS,eAAe,MAAsB;AACnD,MAAI,CAAC,QAAQ,KAAK,WAAW,GAAG;AAC9B,WAAO;AAAA,EACT;AAGA,QAAM,QAAQ,KAAK,MAAM,KAAK,EAAE,OAAO,CAAC,MAAM,EAAE,SAAS,CAAC;AAC1D,QAAM,YAAY,MAAM;AAGxB,QAAM,YAAY,KAAK;AACvB,QAAM,eAAe,KAAK,KAAK,YAAY,CAAC;AAG5C,QAAM,eAAe,KAAK,KAAK,YAAY,IAAI;AAG/C,SAAO,KAAK,IAAI,cAAc,YAAY;AAC5C;;;ACrBA,SAAS,YAAY,QAAsB;AACzC,UAAQ,OAAO,MAAM,MAAM;AAC3B,UAAQ,KAAK,CAAC;AAChB;AAGA,IAAM,wBAAwB;AAG9B,IAAM,gBAAgB;AAAA,EACpB,UAAU;AAAA,EACV,YAAY;AAAA,EACZ,SAAS;AAAA,EACT,aAAa;AAAA,EACb,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,kBAAkB;AAAA,EAClB,cAAc;AAChB;AAKA,SAAS,iBAAiB,SAAiB,WAAW,KAAa;AACjE,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAEhC,MAAI,MAAM,UAAU,WAAW,GAAG;AAChC,WAAO;AAAA,EACT;AAEA,QAAM,OAAO,MAAM,MAAM,GAAG,QAAQ;AACpC,QAAM,OAAO,MAAM,MAAM,CAAC,QAAQ;AAClC,QAAM,UAAU,MAAM,SAAS,WAAW;AAE1C,SAAO,CAAC,GAAG,MAAM,IAAI,QAAQ,OAAO,uBAAuB,IAAI,GAAG,IAAI,EAAE,KAAK,IAAI;AACnF;AAKA,SAAS,oBAAoB,SAAiB,oBAAoB,GAAW;AAC3E,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAChC,QAAM,cAAc,oBAAI,IAAsB;AAG9C,aAAW,QAAQ,OAAO;AACxB,UAAM,QAAQ,KAAK,MAAM,mBAAmB;AAC5C,QAAI,QAAQ,CAAC,KAAK,MAAM,CAAC,KAAK,MAAM,CAAC,GAAG;AACtC,YAAM,OAAO,MAAM,CAAC;AACpB,YAAM,UAAU,MAAM,CAAC;AACvB,YAAM,OAAO,MAAM,CAAC;AACpB,UAAI,CAAC,YAAY,IAAI,IAAI,GAAG;AAC1B,oBAAY,IAAI,MAAM,CAAC,CAAC;AAAA,MAC1B;AACA,kBAAY,IAAI,IAAI,GAAG,KAAK,UAAU,OAAO,KAAK,KAAK,KAAK,CAAC,EAAE;AAAA,IACjE;AAAA,EACF;AAGA,QAAM,aAAuB,CAAC;AAE9B,aAAW,CAAC,MAAM,OAAO,KAAK,YAAY,QAAQ,GAAG;AACnD,eAAW,KAAK,GAAG,IAAI,KAAK,QAAQ,MAAM,YAAY;AAEtD,QAAI,QAAQ,UAAU,mBAAmB;AACvC,iBAAW,KAAK,GAAG,OAAO;AAAA,IAC5B,OAAO;AACL,iBAAW,KAAK,GAAG,QAAQ,MAAM,GAAG,iBAAiB,CAAC;AACtD,iBAAW,KAAK,aAAa,QAAQ,SAAS,iBAAiB,eAAe;AAAA,IAChF;AAEA,eAAW,KAAK,EAAE;AAAA,EACpB;AAEA,SAAO,WAAW,KAAK,IAAI;AAC7B;AAKA,SAAS,gBAAgB,SAAyB;AAChD,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAChC,QAAM,QAAQ,oBAAI,IAAgD;AAClE,MAAI,cAAc;AAElB,aAAW,QAAQ,OAAO;AACxB,QAAI,KAAK,WAAW,YAAY,GAAG;AACjC,YAAM,QAAQ,KAAK,MAAM,6BAA6B;AACtD,UAAI,OAAO;AACT,sBAAc,MAAM,CAAC,KAAK;AAC1B,cAAM,IAAI,aAAa,EAAE,OAAO,GAAG,SAAS,EAAE,CAAC;AAAA,MACjD;AAAA,IACF,WAAW,KAAK,WAAW,GAAG,KAAK,CAAC,KAAK,WAAW,KAAK,GAAG;AAC1D,YAAM,QAAQ,MAAM,IAAI,WAAW;AACnC,UAAI,MAAO,OAAM;AAAA,IACnB,WAAW,KAAK,WAAW,GAAG,KAAK,CAAC,KAAK,WAAW,KAAK,GAAG;AAC1D,YAAM,QAAQ,MAAM,IAAI,WAAW;AACnC,UAAI,MAAO,OAAM;AAAA,IACnB;AAAA,EACF;AAGA,QAAM,UAAoB,CAAC,mBAAmB;AAE9C,aAAW,CAAC,MAAM,KAAK,KAAK,MAAM,QAAQ,GAAG;AAC3C,YAAQ,KAAK,KAAK,IAAI,MAAM,MAAM,KAAK,KAAK,MAAM,OAAO,EAAE;AAAA,EAC7D;AAEA,SAAO,QAAQ,KAAK,IAAI;AAC1B;AAKA,SAAS,oBAAoB,SAAyB;AACpD,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAChC,QAAM,YAAsB,CAAC;AAE7B,aAAW,QAAQ,OAAO;AACxB,QAAI,wCAAwC,KAAK,IAAI,GAAG;AACtD,gBAAU,KAAK,IAAI;AAAA,IACrB;AAAA,EACF;AAEA,MAAI,UAAU,WAAW,GAAG;AAC1B,WAAO;AAAA,EACT;AAEA,SAAO,CAAC,0BAA0B,GAAG,SAAS,EAAE,KAAK,IAAI;AAC3D;AAKA,SAAS,mBAAmB,SAAyB;AACnD,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAChC,QAAM,UAAoB,CAAC;AAG3B,QAAM,WAAqB,CAAC;AAC5B,QAAM,SAAmB,CAAC;AAE1B,aAAW,QAAQ,OAAO;AACxB,QAAI,uBAAuB,KAAK,IAAI,GAAG;AACrC,cAAQ,KAAK,KAAK,KAAK,CAAC;AAAA,IAC1B;AACA,QAAI,QAAQ,KAAK,IAAI,GAAG;AACtB,eAAS,KAAK,KAAK,KAAK,CAAC;AAAA,IAC3B;AACA,QAAI,SAAS,KAAK,IAAI,GAAG;AACvB,aAAO,KAAK,KAAK,KAAK,CAAC;AAAA,IACzB;AAAA,EACF;AAEA,MAAI,OAAO,SAAS,GAAG;AACrB,WAAO,CAAC,gCAAgC,GAAG,OAAO,MAAM,GAAG,CAAC,CAAC,EAAE,KAAK,IAAI;AAAA,EAC1E;AAEA,MAAI,SAAS,SAAS,GAAG;AACvB,WAAO;AAAA,MACL;AAAA,MACA,GAAG,SAAS,MAAM,GAAG,CAAC;AAAA,MACtB,SAAS,SAAS,IAAI,WAAW,SAAS,SAAS,CAAC,mBAAmB;AAAA,IACzE,EACG,OAAO,OAAO,EACd,KAAK,IAAI;AAAA,EACd;AAEA,SAAO,QAAQ,SAAS,IAAI,QAAQ,KAAK,IAAI,IAAI;AACnD;AAKA,SAAS,mBAAmB,SAAyB;AACnD,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAChC,QAAM,SAAS,oBAAI,IAAoB;AAGvC,aAAW,QAAQ,OAAO;AAExB,UAAM,aAAa,KAAK,QAAQ,iCAAiC,EAAE,EAAE,KAAK;AAC1E,QAAI,YAAY;AACd,aAAO,IAAI,aAAa,OAAO,IAAI,UAAU,KAAK,KAAK,CAAC;AAAA,IAC1D;AAAA,EACF;AAEA,QAAM,UAAoB,CAAC,6BAA6B;AAExD,aAAW,CAAC,KAAK,KAAK,KAAK,MAAM,KAAK,OAAO,QAAQ,CAAC,EAAE,MAAM,GAAG,EAAE,GAAG;AACpE,QAAI,QAAQ,GAAG;AACb,cAAQ,KAAK,MAAM,KAAK,MAAM,GAAG,EAAE;AAAA,IACrC,OAAO;AACL,cAAQ,KAAK,KAAK,GAAG,EAAE;AAAA,IACzB;AAAA,EACF;AAEA,MAAI,OAAO,OAAO,IAAI;AACpB,YAAQ,KAAK,aAAa,OAAO,OAAO,EAAE,wBAAwB;AAAA,EACpE;AAEA,SAAO,QAAQ,KAAK,IAAI;AAC1B;AAKA,SAAS,oBAAoB,SAAyB;AACpD,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAChC,MAAI,SAAS;AACb,MAAI,SAAS;AACb,MAAI,UAAU;AACd,QAAM,WAAqB,CAAC;AAE5B,aAAW,QAAQ,OAAO;AACxB,QAAI,QAAQ,KAAK,IAAI,EAAG;AACxB,QAAI,QAAQ,KAAK,IAAI,GAAG;AACtB;AACA,eAAS,KAAK,KAAK,KAAK,CAAC;AAAA,IAC3B;AACA,QAAI,QAAQ,KAAK,IAAI,EAAG;AAAA,EAC1B;AAEA,QAAM,UAAU,CAAC,iBAAiB,MAAM,YAAY,MAAM,YAAY,OAAO,UAAU;AAEvF,MAAI,SAAS,SAAS,GAAG;AACvB,YAAQ,KAAK,IAAI,eAAe;AAChC,YAAQ,KAAK,GAAG,SAAS,MAAM,GAAG,EAAE,CAAC;AACrC,QAAI,SAAS,SAAS,IAAI;AACxB,cAAQ,KAAK,WAAW,SAAS,SAAS,EAAE,gBAAgB;AAAA,IAC9D;AAAA,EACF;AAEA,SAAO,QAAQ,KAAK,IAAI;AAC1B;AAKA,SAAS,yBAAyB,SAAyB;AACzD,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAChC,QAAM,WAAW,oBAAI,IAAsB;AAE3C,aAAW,QAAQ,OAAO;AACxB,UAAM,QAAQ,KAAK,MAAM,mCAAmC;AAC5D,QAAI,OAAO;AACT,YAAM,OAAO,MAAM,CAAC,KAAK;AACzB,YAAM,YAAY,MAAM,CAAC,KAAK;AAC9B,YAAM,MAAM,GAAG,IAAI,IAAI,SAAS;AAEhC,UAAI,CAAC,SAAS,IAAI,GAAG,GAAG;AACtB,iBAAS,IAAI,KAAK,CAAC,CAAC;AAAA,MACtB;AACA,eAAS,IAAI,GAAG,GAAG,KAAK,IAAI;AAAA,IAC9B;AAAA,EACF;AAEA,QAAM,UAAU,CAAC,sCAAsC;AAEvD,aAAW,CAAC,KAAK,MAAM,KAAK,SAAS,QAAQ,GAAG;AAC9C,YAAQ,KAAK,KAAK,GAAG,KAAK,OAAO,MAAM,UAAU;AACjD,YAAQ,KAAK,OAAO,OAAO,CAAC,CAAC,EAAE;AAAA,EACjC;AAEA,SAAO,QAAQ,KAAK,IAAI;AAC1B;AAKA,SAAS,mBAAmB,OAAuB;AACjD,QAAM,SAAS,eAAe,KAAK;AAGnC,MAAI,SAAS,uBAAuB;AAClC,WAAO;AAAA,EACT;AAGA,MAAI,cAAc,SAAS,KAAK,KAAK,GAAG;AACtC,UAAM,QAAQ,MAAM,MAAM,cAAc,QAAQ;AAChD,QAAI,QAAQ,CAAC,GAAG;AACd,YAAM,UAAU,MAAM,CAAC;AACvB,YAAM,aAAa,iBAAiB,OAAO;AAC3C,aAAO,MAAM,QAAQ,SAAS,UAAU;AAAA,IAC1C;AAAA,EACF;AAEA,MAAI,cAAc,WAAW,KAAK,KAAK,GAAG;AACxC,UAAM,QAAQ,MAAM,MAAM,cAAc,UAAU;AAClD,QAAI,QAAQ,CAAC,GAAG;AACd,YAAM,UAAU,MAAM,CAAC;AACvB,YAAM,aAAa,oBAAoB,OAAO;AAC9C,aAAO,MAAM,QAAQ,SAAS,UAAU;AAAA,IAC1C;AAAA,EACF;AAEA,MAAI,cAAc,QAAQ,KAAK,KAAK,GAAG;AACrC,WAAO,gBAAgB,KAAK;AAAA,EAC9B;AAEA,MAAI,cAAc,YAAY,KAAK,KAAK,GAAG;AACzC,WAAO,oBAAoB,KAAK;AAAA,EAClC;AAEA,MAAI,cAAc,WAAW,KAAK,KAAK,GAAG;AACxC,WAAO,mBAAmB,KAAK;AAAA,EACjC;AAEA,MAAI,cAAc,WAAW,KAAK,KAAK,GAAG;AACxC,WAAO,mBAAmB,KAAK;AAAA,EACjC;AAEA,MAAI,cAAc,YAAY,KAAK,KAAK,GAAG;AACzC,WAAO,oBAAoB,KAAK;AAAA,EAClC;AAEA,MAAI,cAAc,iBAAiB,KAAK,KAAK,GAAG;AAC9C,WAAO,yBAAyB,KAAK;AAAA,EACvC;AAIA,QAAM,QAAQ,MAAM,MAAM,IAAI;AAC9B,MAAI,MAAM,SAAS,KAAK;AACtB,WAAO,iBAAiB,OAAO,GAAG;AAAA,EACpC;AAEA,SAAO;AACT;AAGA,eAAe,OAAsB;AACnC,MAAI;AAEF,UAAM,SAAmB,CAAC;AAC1B,qBAAiB,SAAS,QAAQ,OAAO;AACvC,aAAO,KAAK,KAAK;AAAA,IACnB;AACA,UAAM,QAAQ,OAAO,OAAO,MAAM,EAAE,SAAS,OAAO;AAGpD,UAAM,SAAS,mBAAmB,KAAK;AAEvC,gBAAY,MAAM;AAAA,EACpB,SAAS,QAAQ;AAEf,UAAM,SAAmB,CAAC;AAC1B,qBAAiB,SAAS,QAAQ,OAAO;AACvC,aAAO,KAAK,KAAK;AAAA,IACnB;AACA,UAAM,QAAQ,OAAO,OAAO,MAAM,EAAE,SAAS,OAAO;AACpD,gBAAY,KAAK;AAAA,EACnB;AACF;AAGA,KAAK;","names":[]}
@@ -0,0 +1,287 @@
1
+ #!/usr/bin/env node
2
+ "use strict";
3
+
4
+ // src/hooks/pre-prompt.ts
5
+ var import_node_fs = require("fs");
6
+ var import_node_os = require("os");
7
+ var import_node_path = require("path");
8
+ var import_js_yaml = require("js-yaml");
9
+
10
+ // src/utils/tokenizer.ts
11
+ function estimateTokens(text) {
12
+ if (!text || text.length === 0) {
13
+ return 0;
14
+ }
15
+ const words = text.split(/\s+/).filter((w) => w.length > 0);
16
+ const wordCount = words.length;
17
+ const charCount = text.length;
18
+ const charEstimate = Math.ceil(charCount / 4);
19
+ const wordEstimate = Math.ceil(wordCount * 0.75);
20
+ return Math.max(wordEstimate, charEstimate);
21
+ }
22
+
23
+ // src/core/engram-scorer.ts
24
+ function createEngramScorer(config) {
25
+ const { defaultTTL } = config;
26
+ function calculateDecay(ageInSeconds, ttlInSeconds) {
27
+ if (ttlInSeconds === 0) return 1;
28
+ if (ageInSeconds <= 0) return 0;
29
+ const ratio = ageInSeconds / ttlInSeconds;
30
+ const decay = 1 - Math.exp(-ratio);
31
+ return Math.max(0, Math.min(1, decay));
32
+ }
33
+ function calculateScore(entry, currentTime = Date.now()) {
34
+ const ageInMilliseconds = currentTime - entry.timestamp;
35
+ const ageInSeconds = Math.max(0, ageInMilliseconds / 1e3);
36
+ const decay = calculateDecay(ageInSeconds, entry.ttl);
37
+ let score = entry.score * (1 - decay);
38
+ if (entry.accessCount > 0) {
39
+ const accessBonus = Math.log(entry.accessCount + 1) * 0.1;
40
+ score = Math.min(1, score + accessBonus);
41
+ }
42
+ if (entry.isBTSP) {
43
+ score = Math.max(score, 0.9);
44
+ }
45
+ return Math.max(0, Math.min(1, score));
46
+ }
47
+ function refreshTTL(entry) {
48
+ return {
49
+ ...entry,
50
+ ttl: defaultTTL * 3600,
51
+ // Convert hours to seconds
52
+ timestamp: Date.now()
53
+ };
54
+ }
55
+ return {
56
+ calculateScore,
57
+ refreshTTL,
58
+ calculateDecay
59
+ };
60
+ }
61
+
62
+ // src/core/budget-pruner.ts
63
+ function createBudgetPruner(config) {
64
+ const { tokenBudget, decay } = config;
65
+ const engramScorer = createEngramScorer(decay);
66
+ function tokenize(text) {
67
+ return text.toLowerCase().split(/\s+/).filter((word) => word.length > 0);
68
+ }
69
+ function calculateTF(term, tokens) {
70
+ const count = tokens.filter((t) => t === term).length;
71
+ return Math.sqrt(count);
72
+ }
73
+ function calculateIDF(term, allEntries) {
74
+ const totalDocs = allEntries.length;
75
+ const docsWithTerm = allEntries.filter((entry) => {
76
+ const tokens = tokenize(entry.content);
77
+ return tokens.includes(term);
78
+ }).length;
79
+ if (docsWithTerm === 0) return 0;
80
+ return Math.log(totalDocs / docsWithTerm);
81
+ }
82
+ function calculateTFIDF(entry, allEntries) {
83
+ const tokens = tokenize(entry.content);
84
+ if (tokens.length === 0) return 0;
85
+ const uniqueTerms = [...new Set(tokens)];
86
+ let totalScore = 0;
87
+ for (const term of uniqueTerms) {
88
+ const tf = calculateTF(term, tokens);
89
+ const idf = calculateIDF(term, allEntries);
90
+ totalScore += tf * idf;
91
+ }
92
+ return totalScore / tokens.length;
93
+ }
94
+ function getStateMultiplier(entry) {
95
+ if (entry.isBTSP) return 2;
96
+ switch (entry.state) {
97
+ case "active":
98
+ return 2;
99
+ case "ready":
100
+ return 1;
101
+ case "silent":
102
+ return 0.5;
103
+ default:
104
+ return 1;
105
+ }
106
+ }
107
+ function priorityScore(entry, allEntries) {
108
+ const tfidf = calculateTFIDF(entry, allEntries);
109
+ const currentScore = engramScorer.calculateScore(entry);
110
+ const engramDecay = 1 - currentScore;
111
+ const stateMultiplier = getStateMultiplier(entry);
112
+ return tfidf * (1 - engramDecay) * stateMultiplier;
113
+ }
114
+ function pruneToFit(entries, budget = tokenBudget) {
115
+ if (entries.length === 0) {
116
+ return {
117
+ kept: [],
118
+ removed: [],
119
+ originalTokens: 0,
120
+ prunedTokens: 0,
121
+ budgetUtilization: 0
122
+ };
123
+ }
124
+ const originalTokens = entries.reduce((sum, e) => sum + estimateTokens(e.content), 0);
125
+ const btspEntries = entries.filter((e) => e.isBTSP);
126
+ const regularEntries = entries.filter((e) => !e.isBTSP);
127
+ const btspTokens = btspEntries.reduce((sum, e) => sum + estimateTokens(e.content), 0);
128
+ const scored = regularEntries.map((entry) => ({
129
+ entry,
130
+ score: priorityScore(entry, entries),
131
+ tokens: estimateTokens(entry.content)
132
+ }));
133
+ scored.sort((a, b) => b.score - a.score);
134
+ const kept = [...btspEntries];
135
+ const removed = [];
136
+ let currentTokens = btspTokens;
137
+ for (const item of scored) {
138
+ if (currentTokens + item.tokens <= budget) {
139
+ kept.push(item.entry);
140
+ currentTokens += item.tokens;
141
+ } else {
142
+ removed.push(item.entry);
143
+ }
144
+ }
145
+ const budgetUtilization = budget > 0 ? currentTokens / budget : 0;
146
+ return {
147
+ kept,
148
+ removed,
149
+ originalTokens,
150
+ prunedTokens: currentTokens,
151
+ budgetUtilization
152
+ };
153
+ }
154
+ return {
155
+ pruneToFit,
156
+ priorityScore
157
+ };
158
+ }
159
+ function createBudgetPrunerFromConfig(realtimeConfig, decayConfig, statesConfig) {
160
+ return createBudgetPruner({
161
+ tokenBudget: realtimeConfig.tokenBudget,
162
+ decay: decayConfig,
163
+ states: statesConfig
164
+ });
165
+ }
166
+
167
+ // src/utils/context-parser.ts
168
+ var import_node_crypto2 = require("crypto");
169
+
170
+ // src/utils/hash.ts
171
+ var import_node_crypto = require("crypto");
172
+ function hashContent(content) {
173
+ return (0, import_node_crypto.createHash)("sha256").update(content, "utf8").digest("hex");
174
+ }
175
+
176
+ // src/utils/context-parser.ts
177
+ function parseClaudeCodeContext(context) {
178
+ const entries = [];
179
+ const now = Date.now();
180
+ const lines = context.split("\n");
181
+ let currentBlock = [];
182
+ let blockType = "other";
183
+ for (const line of lines) {
184
+ const trimmed = line.trim();
185
+ if (trimmed.startsWith("User:") || trimmed.startsWith("Assistant:")) {
186
+ if (currentBlock.length > 0) {
187
+ entries.push(createEntry(currentBlock.join("\n"), blockType, now));
188
+ currentBlock = [];
189
+ }
190
+ blockType = "conversation";
191
+ currentBlock.push(line);
192
+ } else if (trimmed.includes("<function_calls>") || trimmed.includes("<invoke>") || trimmed.includes("<tool_use>")) {
193
+ if (currentBlock.length > 0) {
194
+ entries.push(createEntry(currentBlock.join("\n"), blockType, now));
195
+ currentBlock = [];
196
+ }
197
+ blockType = "tool";
198
+ currentBlock.push(line);
199
+ } else if (trimmed.includes("<function_results>") || trimmed.includes("</function_results>")) {
200
+ if (currentBlock.length > 0 && blockType !== "result") {
201
+ entries.push(createEntry(currentBlock.join("\n"), blockType, now));
202
+ currentBlock = [];
203
+ }
204
+ blockType = "result";
205
+ currentBlock.push(line);
206
+ } else if (currentBlock.length > 0) {
207
+ currentBlock.push(line);
208
+ } else if (trimmed.length > 0) {
209
+ currentBlock.push(line);
210
+ blockType = "other";
211
+ }
212
+ }
213
+ if (currentBlock.length > 0) {
214
+ entries.push(createEntry(currentBlock.join("\n"), blockType, now));
215
+ }
216
+ return entries.filter((e) => e.content.trim().length > 0);
217
+ }
218
+ function createEntry(content, type, baseTime) {
219
+ const tags = [type];
220
+ let initialScore = 0.5;
221
+ if (type === "conversation") initialScore = 0.8;
222
+ if (type === "tool") initialScore = 0.7;
223
+ if (type === "result") initialScore = 0.4;
224
+ return {
225
+ id: (0, import_node_crypto2.randomUUID)(),
226
+ content,
227
+ hash: hashContent(content),
228
+ timestamp: baseTime,
229
+ score: initialScore,
230
+ state: initialScore > 0.7 ? "active" : initialScore > 0.3 ? "ready" : "silent",
231
+ ttl: 24 * 3600,
232
+ // 24 hours default
233
+ accessCount: 0,
234
+ tags,
235
+ metadata: { type },
236
+ isBTSP: false
237
+ };
238
+ }
239
+
240
+ // src/hooks/pre-prompt.ts
241
+ function exitSuccess(output) {
242
+ process.stdout.write(output);
243
+ process.exit(0);
244
+ }
245
+ async function main() {
246
+ try {
247
+ const chunks = [];
248
+ for await (const chunk of process.stdin) {
249
+ chunks.push(chunk);
250
+ }
251
+ const input = Buffer.concat(chunks).toString("utf-8");
252
+ const tokens = estimateTokens(input);
253
+ const configPath = (0, import_node_path.join)((0, import_node_os.homedir)(), ".sparn", "config.yaml");
254
+ let config;
255
+ try {
256
+ const configYAML = (0, import_node_fs.readFileSync)(configPath, "utf-8");
257
+ config = (0, import_js_yaml.load)(configYAML);
258
+ } catch {
259
+ exitSuccess(input);
260
+ return;
261
+ }
262
+ const { autoOptimizeThreshold, tokenBudget } = config.realtime;
263
+ if (tokens < autoOptimizeThreshold) {
264
+ exitSuccess(input);
265
+ return;
266
+ }
267
+ const entries = parseClaudeCodeContext(input);
268
+ if (entries.length === 0) {
269
+ exitSuccess(input);
270
+ return;
271
+ }
272
+ const pruner = createBudgetPrunerFromConfig(config.realtime, config.decay, config.states);
273
+ const result = pruner.pruneToFit(entries, tokenBudget);
274
+ const sorted = [...result.kept].sort((a, b) => a.timestamp - b.timestamp);
275
+ const optimizedContext = sorted.map((e) => e.content).join("\n\n");
276
+ exitSuccess(optimizedContext);
277
+ } catch (_error) {
278
+ const chunks = [];
279
+ for await (const chunk of process.stdin) {
280
+ chunks.push(chunk);
281
+ }
282
+ const input = Buffer.concat(chunks).toString("utf-8");
283
+ exitSuccess(input);
284
+ }
285
+ }
286
+ main();
287
+ //# sourceMappingURL=pre-prompt.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/hooks/pre-prompt.ts","../../src/utils/tokenizer.ts","../../src/core/engram-scorer.ts","../../src/core/budget-pruner.ts","../../src/utils/context-parser.ts","../../src/utils/hash.ts"],"sourcesContent":["#!/usr/bin/env node\n/**\n * Pre-Prompt Hook - Claude Code hook for real-time context optimization\n *\n * Reads context from stdin, checks if tokens exceed threshold,\n * optimizes if needed, writes to stdout.\n *\n * CRITICAL: Always exits 0 (never disrupts Claude Code).\n * Falls through unmodified if under threshold or on error.\n */\n\nimport { readFileSync } from 'node:fs';\nimport { homedir } from 'node:os';\nimport { join } from 'node:path';\nimport { load as parseYAML } from 'js-yaml';\nimport { createBudgetPrunerFromConfig } from '../core/budget-pruner.js';\nimport type { SparnConfig } from '../types/config.js';\nimport { parseClaudeCodeContext } from '../utils/context-parser.js';\nimport { estimateTokens } from '../utils/tokenizer.js';\n\n// Exit 0 wrapper for all errors\nfunction exitSuccess(output: string): void {\n process.stdout.write(output);\n process.exit(0);\n}\n\n// Main hook logic\nasync function main(): Promise<void> {\n try {\n // Read stdin (context)\n const chunks: Buffer[] = [];\n for await (const chunk of process.stdin) {\n chunks.push(chunk);\n }\n const input = Buffer.concat(chunks).toString('utf-8');\n\n // Estimate tokens\n const tokens = estimateTokens(input);\n\n // Load config\n const configPath = join(homedir(), '.sparn', 'config.yaml');\n let config: SparnConfig;\n\n try {\n const configYAML = readFileSync(configPath, 'utf-8');\n config = parseYAML(configYAML) as SparnConfig;\n } catch {\n // Config not found or invalid, fall through\n exitSuccess(input);\n return;\n }\n\n const { autoOptimizeThreshold, tokenBudget } = config.realtime;\n\n // Check if optimization needed\n if (tokens < autoOptimizeThreshold) {\n // Under threshold, pass through unmodified\n exitSuccess(input);\n return;\n }\n\n // Parse context into entries\n const entries = parseClaudeCodeContext(input);\n\n if (entries.length === 0) {\n // No entries to optimize, pass through\n exitSuccess(input);\n return;\n }\n\n // Create budget pruner\n const pruner = createBudgetPrunerFromConfig(config.realtime, config.decay, config.states);\n\n // Prune to fit budget\n const result = pruner.pruneToFit(entries, tokenBudget);\n\n // Build optimized context (chronologically ordered)\n const sorted = [...result.kept].sort((a, b) => a.timestamp - b.timestamp);\n const optimizedContext = sorted.map((e) => e.content).join('\\n\\n');\n\n // Output optimized context\n exitSuccess(optimizedContext);\n } catch (_error) {\n // On any error, pass through original input\n // Read stdin again if needed (shouldn't happen, but safety fallback)\n const chunks: Buffer[] = [];\n for await (const chunk of process.stdin) {\n chunks.push(chunk);\n }\n const input = Buffer.concat(chunks).toString('utf-8');\n exitSuccess(input);\n }\n}\n\n// Run hook\nmain();\n","/**\n * Token estimation utilities.\n * Uses whitespace heuristic (~90% accuracy vs GPT tokenizer).\n */\n\n/**\n * Estimate token count for text using heuristic.\n *\n * Approximation: 1 token ≈ 4 chars or 0.75 words\n * Provides ~90% accuracy compared to GPT tokenizer, sufficient for optimization heuristics.\n *\n * @param text - Text to count\n * @returns Estimated token count\n *\n * @example\n * ```typescript\n * const tokens = estimateTokens('Hello world');\n * console.log(tokens); // ~2\n * ```\n */\nexport function estimateTokens(text: string): number {\n if (!text || text.length === 0) {\n return 0;\n }\n\n // Split on whitespace to get words\n const words = text.split(/\\s+/).filter((w) => w.length > 0);\n const wordCount = words.length;\n\n // Character-based estimate\n const charCount = text.length;\n const charEstimate = Math.ceil(charCount / 4);\n\n // Word-based estimate\n const wordEstimate = Math.ceil(wordCount * 0.75);\n\n // Return the maximum of both estimates (more conservative)\n return Math.max(wordEstimate, charEstimate);\n}\n","/**\n * Engram Scorer - Implements engram theory (memory decay)\n *\n * Neuroscience: Memories fade over time without reinforcement.\n * Application: Apply exponential decay formula to memory scores based on age and access count.\n *\n * Formula: decay = 1 - e^(-age/TTL)\n * Score adjustment: score_new = score_old * (1 - decay) + (accessCount bonus)\n */\n\nimport type { MemoryEntry } from '../types/memory.js';\n\nexport interface EngramScorerConfig {\n /** Default TTL in hours for new entries */\n defaultTTL: number;\n /** Decay threshold (0.0-1.0) above which entries are marked for pruning */\n decayThreshold: number;\n}\n\nexport interface EngramScorer {\n /**\n * Calculate current score for an entry based on decay and access count\n * @param entry - Memory entry to score\n * @param currentTime - Current timestamp in milliseconds (for testing)\n * @returns Updated score (0.0-1.0)\n */\n calculateScore(entry: MemoryEntry, currentTime?: number): number;\n\n /**\n * Refresh TTL to default value\n * @param entry - Entry to refresh\n * @returns Entry with refreshed TTL and timestamp\n */\n refreshTTL(entry: MemoryEntry): MemoryEntry;\n\n /**\n * Calculate decay factor (0.0-1.0) based on age and TTL\n * @param ageInSeconds - Age of entry in seconds\n * @param ttlInSeconds - TTL in seconds\n * @returns Decay factor (0.0 = fresh, 1.0 = fully decayed)\n */\n calculateDecay(ageInSeconds: number, ttlInSeconds: number): number;\n}\n\n/**\n * Create an engram scorer instance\n * @param config - Scorer configuration\n * @returns EngramScorer instance\n */\nexport function createEngramScorer(config: EngramScorerConfig): EngramScorer {\n const { defaultTTL } = config;\n\n function calculateDecay(ageInSeconds: number, ttlInSeconds: number): number {\n if (ttlInSeconds === 0) return 1.0; // Instant decay\n if (ageInSeconds <= 0) return 0.0; // Fresh entry\n\n // Exponential decay: 1 - e^(-age/TTL)\n const ratio = ageInSeconds / ttlInSeconds;\n const decay = 1 - Math.exp(-ratio);\n\n // Clamp to [0.0, 1.0]\n return Math.max(0, Math.min(1, decay));\n }\n\n function calculateScore(entry: MemoryEntry, currentTime: number = Date.now()): number {\n // Calculate age in seconds\n const ageInMilliseconds = currentTime - entry.timestamp;\n const ageInSeconds = Math.max(0, ageInMilliseconds / 1000);\n\n // Calculate decay factor\n const decay = calculateDecay(ageInSeconds, entry.ttl);\n\n // Base score reduced by decay\n let score = entry.score * (1 - decay);\n\n // Access count bonus (diminishing returns via log)\n if (entry.accessCount > 0) {\n const accessBonus = Math.log(entry.accessCount + 1) * 0.1;\n score = Math.min(1.0, score + accessBonus);\n }\n\n // BTSP entries maintain high score\n if (entry.isBTSP) {\n score = Math.max(score, 0.9);\n }\n\n return Math.max(0, Math.min(1, score));\n }\n\n function refreshTTL(entry: MemoryEntry): MemoryEntry {\n return {\n ...entry,\n ttl: defaultTTL * 3600, // Convert hours to seconds\n timestamp: Date.now(),\n };\n }\n\n return {\n calculateScore,\n refreshTTL,\n calculateDecay,\n };\n}\n","/**\n * Budget-Aware Pruner - Token budget optimization\n *\n * Unlike SparsePruner which keeps top N% entries, BudgetPruner fits entries\n * within a target token budget using priority scoring that combines:\n * - TF-IDF relevance\n * - Engram decay\n * - Confidence state multipliers\n * - BTSP bypass (always included)\n *\n * Target use case: Real-time optimization for Opus model (~50K token budget)\n */\n\nimport type { RealtimeConfig } from '../types/config.js';\nimport type { MemoryEntry } from '../types/memory.js';\nimport type { PruneResult } from '../types/pruner.js';\nimport { estimateTokens } from '../utils/tokenizer.js';\nimport { createEngramScorer } from './engram-scorer.js';\n\nexport interface BudgetPrunerConfig {\n /** Target token budget */\n tokenBudget: number;\n /** Decay configuration */\n decay: {\n defaultTTL: number;\n decayThreshold: number;\n };\n /** State multipliers */\n states: {\n activeThreshold: number;\n readyThreshold: number;\n };\n}\n\nexport interface BudgetPruner {\n /**\n * Prune entries to fit within token budget\n * @param entries - Memory entries to prune\n * @param budget - Optional override budget (uses config default if not provided)\n * @returns Result with kept/removed entries and budget utilization\n */\n pruneToFit(entries: MemoryEntry[], budget?: number): PruneResult & { budgetUtilization: number };\n\n /**\n * Calculate priority score for an entry\n * @param entry - Entry to score\n * @param allEntries - All entries for TF-IDF calculation\n * @returns Priority score (higher = more important)\n */\n priorityScore(entry: MemoryEntry, allEntries: MemoryEntry[]): number;\n}\n\n/**\n * Create a budget-aware pruner instance\n * @param config - Pruner configuration\n * @returns BudgetPruner instance\n */\nexport function createBudgetPruner(config: BudgetPrunerConfig): BudgetPruner {\n const { tokenBudget, decay } = config;\n const engramScorer = createEngramScorer(decay);\n\n function tokenize(text: string): string[] {\n return text\n .toLowerCase()\n .split(/\\s+/)\n .filter((word) => word.length > 0);\n }\n\n function calculateTF(term: string, tokens: string[]): number {\n const count = tokens.filter((t) => t === term).length;\n // Sqrt capping to prevent common words from dominating\n return Math.sqrt(count);\n }\n\n function calculateIDF(term: string, allEntries: MemoryEntry[]): number {\n const totalDocs = allEntries.length;\n const docsWithTerm = allEntries.filter((entry) => {\n const tokens = tokenize(entry.content);\n return tokens.includes(term);\n }).length;\n\n if (docsWithTerm === 0) return 0;\n\n return Math.log(totalDocs / docsWithTerm);\n }\n\n function calculateTFIDF(entry: MemoryEntry, allEntries: MemoryEntry[]): number {\n const tokens = tokenize(entry.content);\n if (tokens.length === 0) return 0;\n\n const uniqueTerms = [...new Set(tokens)];\n let totalScore = 0;\n\n for (const term of uniqueTerms) {\n const tf = calculateTF(term, tokens);\n const idf = calculateIDF(term, allEntries);\n totalScore += tf * idf;\n }\n\n // Normalize by entry length\n return totalScore / tokens.length;\n }\n\n function getStateMultiplier(entry: MemoryEntry): number {\n // BTSP entries get max priority (handled separately, but keep high multiplier)\n if (entry.isBTSP) return 2.0;\n\n // State-based multipliers\n switch (entry.state) {\n case 'active':\n return 2.0;\n case 'ready':\n return 1.0;\n case 'silent':\n return 0.5;\n default:\n return 1.0;\n }\n }\n\n function priorityScore(entry: MemoryEntry, allEntries: MemoryEntry[]): number {\n const tfidf = calculateTFIDF(entry, allEntries);\n const currentScore = engramScorer.calculateScore(entry);\n const engramDecay = 1 - currentScore; // Lower decay = higher priority\n const stateMultiplier = getStateMultiplier(entry);\n\n // Priority = TF-IDF * (1 - decay) * state_multiplier\n // This balances relevance, recency, and confidence state\n return tfidf * (1 - engramDecay) * stateMultiplier;\n }\n\n function pruneToFit(\n entries: MemoryEntry[],\n budget: number = tokenBudget,\n ): PruneResult & { budgetUtilization: number } {\n if (entries.length === 0) {\n return {\n kept: [],\n removed: [],\n originalTokens: 0,\n prunedTokens: 0,\n budgetUtilization: 0,\n };\n }\n\n // Calculate original token count\n const originalTokens = entries.reduce((sum, e) => sum + estimateTokens(e.content), 0);\n\n // Step 1: Separate BTSP entries (always included, bypass budget)\n const btspEntries = entries.filter((e) => e.isBTSP);\n const regularEntries = entries.filter((e) => !e.isBTSP);\n\n const btspTokens = btspEntries.reduce((sum, e) => sum + estimateTokens(e.content), 0);\n\n // Step 2: Score regular entries\n const scored = regularEntries.map((entry) => ({\n entry,\n score: priorityScore(entry, entries),\n tokens: estimateTokens(entry.content),\n }));\n\n // Step 3: Sort by priority score descending\n scored.sort((a, b) => b.score - a.score);\n\n // Step 4: Greedy fill until budget exceeded\n const kept: MemoryEntry[] = [...btspEntries];\n const removed: MemoryEntry[] = [];\n let currentTokens = btspTokens;\n\n for (const item of scored) {\n if (currentTokens + item.tokens <= budget) {\n kept.push(item.entry);\n currentTokens += item.tokens;\n } else {\n removed.push(item.entry);\n }\n }\n\n const budgetUtilization = budget > 0 ? currentTokens / budget : 0;\n\n return {\n kept,\n removed,\n originalTokens,\n prunedTokens: currentTokens,\n budgetUtilization,\n };\n }\n\n return {\n pruneToFit,\n priorityScore,\n };\n}\n\n/**\n * Helper to create budget pruner from RealtimeConfig\n * @param realtimeConfig - Realtime configuration\n * @param decayConfig - Decay configuration\n * @param statesConfig - States configuration\n * @returns BudgetPruner instance\n */\nexport function createBudgetPrunerFromConfig(\n realtimeConfig: RealtimeConfig,\n decayConfig: { defaultTTL: number; decayThreshold: number },\n statesConfig: { activeThreshold: number; readyThreshold: number },\n): BudgetPruner {\n return createBudgetPruner({\n tokenBudget: realtimeConfig.tokenBudget,\n decay: decayConfig,\n states: statesConfig,\n });\n}\n","/**\n * Context Parser - Shared utilities for parsing agent contexts into memory entries\n *\n * Extracted from claude-code adapter to enable reuse across:\n * - Adapters (claude-code, generic)\n * - Real-time pipeline (streaming context)\n * - Hooks (pre-prompt, post-tool-result)\n */\n\nimport { randomUUID } from 'node:crypto';\nimport type { MemoryEntry } from '../types/memory.js';\nimport { hashContent } from './hash.js';\n\n/**\n * Block type classification for Claude Code context\n */\nexport type BlockType = 'conversation' | 'tool' | 'result' | 'other';\n\n/**\n * Parse Claude Code context into memory entries\n * Handles conversation turns, tool uses, and results\n * @param context - Raw context string\n * @returns Array of memory entries\n */\nexport function parseClaudeCodeContext(context: string): MemoryEntry[] {\n const entries: MemoryEntry[] = [];\n const now = Date.now();\n\n // Split by conversation turns and tool boundaries\n const lines = context.split('\\n');\n let currentBlock: string[] = [];\n let blockType: BlockType = 'other';\n\n for (const line of lines) {\n const trimmed = line.trim();\n\n // Detect conversation turns\n if (trimmed.startsWith('User:') || trimmed.startsWith('Assistant:')) {\n if (currentBlock.length > 0) {\n entries.push(createEntry(currentBlock.join('\\n'), blockType, now));\n currentBlock = [];\n }\n blockType = 'conversation';\n currentBlock.push(line);\n }\n // Detect tool calls\n else if (\n trimmed.includes('<function_calls>') ||\n trimmed.includes('<invoke>') ||\n trimmed.includes('<tool_use>')\n ) {\n if (currentBlock.length > 0) {\n entries.push(createEntry(currentBlock.join('\\n'), blockType, now));\n currentBlock = [];\n }\n blockType = 'tool';\n currentBlock.push(line);\n }\n // Detect tool results\n else if (trimmed.includes('<function_results>') || trimmed.includes('</function_results>')) {\n if (currentBlock.length > 0 && blockType !== 'result') {\n entries.push(createEntry(currentBlock.join('\\n'), blockType, now));\n currentBlock = [];\n }\n blockType = 'result';\n currentBlock.push(line);\n }\n // Continue current block\n else if (currentBlock.length > 0) {\n currentBlock.push(line);\n }\n // Start new block if line has content\n else if (trimmed.length > 0) {\n currentBlock.push(line);\n blockType = 'other';\n }\n }\n\n // Add final block\n if (currentBlock.length > 0) {\n entries.push(createEntry(currentBlock.join('\\n'), blockType, now));\n }\n\n return entries.filter((e) => e.content.trim().length > 0);\n}\n\n/**\n * Create a memory entry from a content block\n * @param content - Block content\n * @param type - Block type\n * @param baseTime - Base timestamp\n * @returns Memory entry\n */\nexport function createEntry(content: string, type: BlockType, baseTime: number): MemoryEntry {\n const tags: string[] = [type];\n\n // Assign initial score based on type\n let initialScore = 0.5;\n if (type === 'conversation') initialScore = 0.8; // Prioritize conversation\n if (type === 'tool') initialScore = 0.7; // Tool calls are important\n if (type === 'result') initialScore = 0.4; // Results can be verbose\n\n return {\n id: randomUUID(),\n content,\n hash: hashContent(content),\n timestamp: baseTime,\n score: initialScore,\n state: initialScore > 0.7 ? 'active' : initialScore > 0.3 ? 'ready' : 'silent',\n ttl: 24 * 3600, // 24 hours default\n accessCount: 0,\n tags,\n metadata: { type },\n isBTSP: false,\n };\n}\n\n/**\n * Parse generic context (fallback for non-Claude-Code agents)\n * Splits on double newlines, treats as paragraphs\n * @param context - Raw context string\n * @returns Array of memory entries\n */\nexport function parseGenericContext(context: string): MemoryEntry[] {\n const entries: MemoryEntry[] = [];\n const now = Date.now();\n\n // Split on double newlines (paragraph boundaries)\n const blocks = context.split(/\\n\\n+/);\n\n for (const block of blocks) {\n const trimmed = block.trim();\n if (trimmed.length === 0) continue;\n\n entries.push(createEntry(trimmed, 'other', now));\n }\n\n return entries;\n}\n","/**\n * Content hashing utilities.\n * Uses SHA-256 for deduplication.\n */\n\nimport { createHash } from 'node:crypto';\n\n/**\n * Generate SHA-256 hash of content for deduplication.\n *\n * @param content - Content to hash\n * @returns 64-character hex string (SHA-256)\n *\n * @example\n * ```typescript\n * const hash = hashContent('Hello world');\n * console.log(hash.length); // 64\n * ```\n */\nexport function hashContent(content: string): string {\n return createHash('sha256').update(content, 'utf8').digest('hex');\n}\n"],"mappings":";;;;AAWA,qBAA6B;AAC7B,qBAAwB;AACxB,uBAAqB;AACrB,qBAAkC;;;ACM3B,SAAS,eAAe,MAAsB;AACnD,MAAI,CAAC,QAAQ,KAAK,WAAW,GAAG;AAC9B,WAAO;AAAA,EACT;AAGA,QAAM,QAAQ,KAAK,MAAM,KAAK,EAAE,OAAO,CAAC,MAAM,EAAE,SAAS,CAAC;AAC1D,QAAM,YAAY,MAAM;AAGxB,QAAM,YAAY,KAAK;AACvB,QAAM,eAAe,KAAK,KAAK,YAAY,CAAC;AAG5C,QAAM,eAAe,KAAK,KAAK,YAAY,IAAI;AAG/C,SAAO,KAAK,IAAI,cAAc,YAAY;AAC5C;;;ACWO,SAAS,mBAAmB,QAA0C;AAC3E,QAAM,EAAE,WAAW,IAAI;AAEvB,WAAS,eAAe,cAAsB,cAA8B;AAC1E,QAAI,iBAAiB,EAAG,QAAO;AAC/B,QAAI,gBAAgB,EAAG,QAAO;AAG9B,UAAM,QAAQ,eAAe;AAC7B,UAAM,QAAQ,IAAI,KAAK,IAAI,CAAC,KAAK;AAGjC,WAAO,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,KAAK,CAAC;AAAA,EACvC;AAEA,WAAS,eAAe,OAAoB,cAAsB,KAAK,IAAI,GAAW;AAEpF,UAAM,oBAAoB,cAAc,MAAM;AAC9C,UAAM,eAAe,KAAK,IAAI,GAAG,oBAAoB,GAAI;AAGzD,UAAM,QAAQ,eAAe,cAAc,MAAM,GAAG;AAGpD,QAAI,QAAQ,MAAM,SAAS,IAAI;AAG/B,QAAI,MAAM,cAAc,GAAG;AACzB,YAAM,cAAc,KAAK,IAAI,MAAM,cAAc,CAAC,IAAI;AACtD,cAAQ,KAAK,IAAI,GAAK,QAAQ,WAAW;AAAA,IAC3C;AAGA,QAAI,MAAM,QAAQ;AAChB,cAAQ,KAAK,IAAI,OAAO,GAAG;AAAA,IAC7B;AAEA,WAAO,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,KAAK,CAAC;AAAA,EACvC;AAEA,WAAS,WAAW,OAAiC;AACnD,WAAO;AAAA,MACL,GAAG;AAAA,MACH,KAAK,aAAa;AAAA;AAAA,MAClB,WAAW,KAAK,IAAI;AAAA,IACtB;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;;;AC7CO,SAAS,mBAAmB,QAA0C;AAC3E,QAAM,EAAE,aAAa,MAAM,IAAI;AAC/B,QAAM,eAAe,mBAAmB,KAAK;AAE7C,WAAS,SAAS,MAAwB;AACxC,WAAO,KACJ,YAAY,EACZ,MAAM,KAAK,EACX,OAAO,CAAC,SAAS,KAAK,SAAS,CAAC;AAAA,EACrC;AAEA,WAAS,YAAY,MAAc,QAA0B;AAC3D,UAAM,QAAQ,OAAO,OAAO,CAAC,MAAM,MAAM,IAAI,EAAE;AAE/C,WAAO,KAAK,KAAK,KAAK;AAAA,EACxB;AAEA,WAAS,aAAa,MAAc,YAAmC;AACrE,UAAM,YAAY,WAAW;AAC7B,UAAM,eAAe,WAAW,OAAO,CAAC,UAAU;AAChD,YAAM,SAAS,SAAS,MAAM,OAAO;AACrC,aAAO,OAAO,SAAS,IAAI;AAAA,IAC7B,CAAC,EAAE;AAEH,QAAI,iBAAiB,EAAG,QAAO;AAE/B,WAAO,KAAK,IAAI,YAAY,YAAY;AAAA,EAC1C;AAEA,WAAS,eAAe,OAAoB,YAAmC;AAC7E,UAAM,SAAS,SAAS,MAAM,OAAO;AACrC,QAAI,OAAO,WAAW,EAAG,QAAO;AAEhC,UAAM,cAAc,CAAC,GAAG,IAAI,IAAI,MAAM,CAAC;AACvC,QAAI,aAAa;AAEjB,eAAW,QAAQ,aAAa;AAC9B,YAAM,KAAK,YAAY,MAAM,MAAM;AACnC,YAAM,MAAM,aAAa,MAAM,UAAU;AACzC,oBAAc,KAAK;AAAA,IACrB;AAGA,WAAO,aAAa,OAAO;AAAA,EAC7B;AAEA,WAAS,mBAAmB,OAA4B;AAEtD,QAAI,MAAM,OAAQ,QAAO;AAGzB,YAAQ,MAAM,OAAO;AAAA,MACnB,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT;AACE,eAAO;AAAA,IACX;AAAA,EACF;AAEA,WAAS,cAAc,OAAoB,YAAmC;AAC5E,UAAM,QAAQ,eAAe,OAAO,UAAU;AAC9C,UAAM,eAAe,aAAa,eAAe,KAAK;AACtD,UAAM,cAAc,IAAI;AACxB,UAAM,kBAAkB,mBAAmB,KAAK;AAIhD,WAAO,SAAS,IAAI,eAAe;AAAA,EACrC;AAEA,WAAS,WACP,SACA,SAAiB,aAC4B;AAC7C,QAAI,QAAQ,WAAW,GAAG;AACxB,aAAO;AAAA,QACL,MAAM,CAAC;AAAA,QACP,SAAS,CAAC;AAAA,QACV,gBAAgB;AAAA,QAChB,cAAc;AAAA,QACd,mBAAmB;AAAA,MACrB;AAAA,IACF;AAGA,UAAM,iBAAiB,QAAQ,OAAO,CAAC,KAAK,MAAM,MAAM,eAAe,EAAE,OAAO,GAAG,CAAC;AAGpF,UAAM,cAAc,QAAQ,OAAO,CAAC,MAAM,EAAE,MAAM;AAClD,UAAM,iBAAiB,QAAQ,OAAO,CAAC,MAAM,CAAC,EAAE,MAAM;AAEtD,UAAM,aAAa,YAAY,OAAO,CAAC,KAAK,MAAM,MAAM,eAAe,EAAE,OAAO,GAAG,CAAC;AAGpF,UAAM,SAAS,eAAe,IAAI,CAAC,WAAW;AAAA,MAC5C;AAAA,MACA,OAAO,cAAc,OAAO,OAAO;AAAA,MACnC,QAAQ,eAAe,MAAM,OAAO;AAAA,IACtC,EAAE;AAGF,WAAO,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAGvC,UAAM,OAAsB,CAAC,GAAG,WAAW;AAC3C,UAAM,UAAyB,CAAC;AAChC,QAAI,gBAAgB;AAEpB,eAAW,QAAQ,QAAQ;AACzB,UAAI,gBAAgB,KAAK,UAAU,QAAQ;AACzC,aAAK,KAAK,KAAK,KAAK;AACpB,yBAAiB,KAAK;AAAA,MACxB,OAAO;AACL,gBAAQ,KAAK,KAAK,KAAK;AAAA,MACzB;AAAA,IACF;AAEA,UAAM,oBAAoB,SAAS,IAAI,gBAAgB,SAAS;AAEhE,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA,cAAc;AAAA,MACd;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA;AAAA,EACF;AACF;AASO,SAAS,6BACd,gBACA,aACA,cACc;AACd,SAAO,mBAAmB;AAAA,IACxB,aAAa,eAAe;AAAA,IAC5B,OAAO;AAAA,IACP,QAAQ;AAAA,EACV,CAAC;AACH;;;AC3MA,IAAAA,sBAA2B;;;ACJ3B,yBAA2B;AAcpB,SAAS,YAAY,SAAyB;AACnD,aAAO,+BAAW,QAAQ,EAAE,OAAO,SAAS,MAAM,EAAE,OAAO,KAAK;AAClE;;;ADGO,SAAS,uBAAuB,SAAgC;AACrE,QAAM,UAAyB,CAAC;AAChC,QAAM,MAAM,KAAK,IAAI;AAGrB,QAAM,QAAQ,QAAQ,MAAM,IAAI;AAChC,MAAI,eAAyB,CAAC;AAC9B,MAAI,YAAuB;AAE3B,aAAW,QAAQ,OAAO;AACxB,UAAM,UAAU,KAAK,KAAK;AAG1B,QAAI,QAAQ,WAAW,OAAO,KAAK,QAAQ,WAAW,YAAY,GAAG;AACnE,UAAI,aAAa,SAAS,GAAG;AAC3B,gBAAQ,KAAK,YAAY,aAAa,KAAK,IAAI,GAAG,WAAW,GAAG,CAAC;AACjE,uBAAe,CAAC;AAAA,MAClB;AACA,kBAAY;AACZ,mBAAa,KAAK,IAAI;AAAA,IACxB,WAGE,QAAQ,SAAS,kBAAkB,KACnC,QAAQ,SAAS,UAAU,KAC3B,QAAQ,SAAS,YAAY,GAC7B;AACA,UAAI,aAAa,SAAS,GAAG;AAC3B,gBAAQ,KAAK,YAAY,aAAa,KAAK,IAAI,GAAG,WAAW,GAAG,CAAC;AACjE,uBAAe,CAAC;AAAA,MAClB;AACA,kBAAY;AACZ,mBAAa,KAAK,IAAI;AAAA,IACxB,WAES,QAAQ,SAAS,oBAAoB,KAAK,QAAQ,SAAS,qBAAqB,GAAG;AAC1F,UAAI,aAAa,SAAS,KAAK,cAAc,UAAU;AACrD,gBAAQ,KAAK,YAAY,aAAa,KAAK,IAAI,GAAG,WAAW,GAAG,CAAC;AACjE,uBAAe,CAAC;AAAA,MAClB;AACA,kBAAY;AACZ,mBAAa,KAAK,IAAI;AAAA,IACxB,WAES,aAAa,SAAS,GAAG;AAChC,mBAAa,KAAK,IAAI;AAAA,IACxB,WAES,QAAQ,SAAS,GAAG;AAC3B,mBAAa,KAAK,IAAI;AACtB,kBAAY;AAAA,IACd;AAAA,EACF;AAGA,MAAI,aAAa,SAAS,GAAG;AAC3B,YAAQ,KAAK,YAAY,aAAa,KAAK,IAAI,GAAG,WAAW,GAAG,CAAC;AAAA,EACnE;AAEA,SAAO,QAAQ,OAAO,CAAC,MAAM,EAAE,QAAQ,KAAK,EAAE,SAAS,CAAC;AAC1D;AASO,SAAS,YAAY,SAAiB,MAAiB,UAA+B;AAC3F,QAAM,OAAiB,CAAC,IAAI;AAG5B,MAAI,eAAe;AACnB,MAAI,SAAS,eAAgB,gBAAe;AAC5C,MAAI,SAAS,OAAQ,gBAAe;AACpC,MAAI,SAAS,SAAU,gBAAe;AAEtC,SAAO;AAAA,IACL,QAAI,gCAAW;AAAA,IACf;AAAA,IACA,MAAM,YAAY,OAAO;AAAA,IACzB,WAAW;AAAA,IACX,OAAO;AAAA,IACP,OAAO,eAAe,MAAM,WAAW,eAAe,MAAM,UAAU;AAAA,IACtE,KAAK,KAAK;AAAA;AAAA,IACV,aAAa;AAAA,IACb;AAAA,IACA,UAAU,EAAE,KAAK;AAAA,IACjB,QAAQ;AAAA,EACV;AACF;;;AJ9FA,SAAS,YAAY,QAAsB;AACzC,UAAQ,OAAO,MAAM,MAAM;AAC3B,UAAQ,KAAK,CAAC;AAChB;AAGA,eAAe,OAAsB;AACnC,MAAI;AAEF,UAAM,SAAmB,CAAC;AAC1B,qBAAiB,SAAS,QAAQ,OAAO;AACvC,aAAO,KAAK,KAAK;AAAA,IACnB;AACA,UAAM,QAAQ,OAAO,OAAO,MAAM,EAAE,SAAS,OAAO;AAGpD,UAAM,SAAS,eAAe,KAAK;AAGnC,UAAM,iBAAa,2BAAK,wBAAQ,GAAG,UAAU,aAAa;AAC1D,QAAI;AAEJ,QAAI;AACF,YAAM,iBAAa,6BAAa,YAAY,OAAO;AACnD,mBAAS,eAAAC,MAAU,UAAU;AAAA,IAC/B,QAAQ;AAEN,kBAAY,KAAK;AACjB;AAAA,IACF;AAEA,UAAM,EAAE,uBAAuB,YAAY,IAAI,OAAO;AAGtD,QAAI,SAAS,uBAAuB;AAElC,kBAAY,KAAK;AACjB;AAAA,IACF;AAGA,UAAM,UAAU,uBAAuB,KAAK;AAE5C,QAAI,QAAQ,WAAW,GAAG;AAExB,kBAAY,KAAK;AACjB;AAAA,IACF;AAGA,UAAM,SAAS,6BAA6B,OAAO,UAAU,OAAO,OAAO,OAAO,MAAM;AAGxF,UAAM,SAAS,OAAO,WAAW,SAAS,WAAW;AAGrD,UAAM,SAAS,CAAC,GAAG,OAAO,IAAI,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE,SAAS;AACxE,UAAM,mBAAmB,OAAO,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,MAAM;AAGjE,gBAAY,gBAAgB;AAAA,EAC9B,SAAS,QAAQ;AAGf,UAAM,SAAmB,CAAC;AAC1B,qBAAiB,SAAS,QAAQ,OAAO;AACvC,aAAO,KAAK,KAAK;AAAA,IACnB;AACA,UAAM,QAAQ,OAAO,OAAO,MAAM,EAAE,SAAS,OAAO;AACpD,gBAAY,KAAK;AAAA,EACnB;AACF;AAGA,KAAK;","names":["import_node_crypto","parseYAML"]}
@@ -0,0 +1 @@
1
+ #!/usr/bin/env node
@@ -0,0 +1 @@
1
+ #!/usr/bin/env node