claude-cortex 1.7.2 → 1.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +9 -0
- package/dist/index.js.map +1 -1
- package/dist/setup/claude-md.d.ts +6 -2
- package/dist/setup/claude-md.d.ts.map +1 -1
- package/dist/setup/claude-md.js +34 -13
- package/dist/setup/claude-md.js.map +1 -1
- package/dist/setup/clawdbot.d.ts +15 -0
- package/dist/setup/clawdbot.d.ts.map +1 -0
- package/dist/setup/clawdbot.js +118 -0
- package/dist/setup/clawdbot.js.map +1 -0
- package/hooks/clawdbot/cortex-memory/HOOK.md +71 -0
- package/hooks/clawdbot/cortex-memory/handler.js +280 -0
- package/package.json +4 -2
- package/scripts/pre-compact-hook.mjs +671 -0
- package/scripts/pre-compact-hook.sh +29 -0
- package/scripts/session-start-hook.mjs +219 -0
- package/scripts/start-dashboard.sh +41 -0
- package/scripts/stop-dashboard.sh +21 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "claude-cortex",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.8.1",
|
|
4
4
|
"description": "Brain-like memory system for Claude Code - solves context compaction and memory persistence",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"type": "module",
|
|
@@ -65,6 +65,8 @@
|
|
|
65
65
|
},
|
|
66
66
|
"files": [
|
|
67
67
|
"dist",
|
|
68
|
-
"dashboard"
|
|
68
|
+
"dashboard",
|
|
69
|
+
"hooks",
|
|
70
|
+
"scripts"
|
|
69
71
|
]
|
|
70
72
|
}
|
|
@@ -0,0 +1,671 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Pre-compact hook for Claude Memory - Automatic Memory Extraction
|
|
4
|
+
*
|
|
5
|
+
* This script runs before context compaction and:
|
|
6
|
+
* 1. Analyzes conversation content for important information
|
|
7
|
+
* 2. Auto-extracts high-salience items (decisions, patterns, errors, etc.)
|
|
8
|
+
* 3. Saves them to the memory database automatically
|
|
9
|
+
* 4. Creates a session marker for continuity
|
|
10
|
+
*
|
|
11
|
+
* The goal: Never lose important context during compaction.
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import Database from 'better-sqlite3';
|
|
15
|
+
import { existsSync, mkdirSync } from 'fs';
|
|
16
|
+
import { join } from 'path';
|
|
17
|
+
import { homedir } from 'os';
|
|
18
|
+
|
|
19
|
+
// Database paths (with legacy fallback)
|
|
20
|
+
const NEW_DB_DIR = join(homedir(), '.claude-cortex');
|
|
21
|
+
const LEGACY_DB_DIR = join(homedir(), '.claude-memory');
|
|
22
|
+
|
|
23
|
+
// Auto-detect: use new path if it exists, or if legacy doesn't exist (new install)
|
|
24
|
+
function getDbPath() {
|
|
25
|
+
const newPath = join(NEW_DB_DIR, 'memories.db');
|
|
26
|
+
const legacyPath = join(LEGACY_DB_DIR, 'memories.db');
|
|
27
|
+
if (existsSync(newPath) || !existsSync(legacyPath)) {
|
|
28
|
+
return { dir: NEW_DB_DIR, path: newPath };
|
|
29
|
+
}
|
|
30
|
+
return { dir: LEGACY_DB_DIR, path: legacyPath };
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const { dir: DB_DIR, path: DB_PATH } = getDbPath();
|
|
34
|
+
|
|
35
|
+
// Memory limits (should match src/memory/types.ts DEFAULT_CONFIG)
|
|
36
|
+
const MAX_SHORT_TERM_MEMORIES = 100;
|
|
37
|
+
const MAX_LONG_TERM_MEMORIES = 1000;
|
|
38
|
+
|
|
39
|
+
// Base salience threshold (will be adjusted dynamically)
|
|
40
|
+
// Lowered from 0.45 to capture more content
|
|
41
|
+
const BASE_THRESHOLD = 0.35;
|
|
42
|
+
|
|
43
|
+
// Category-specific extraction thresholds (lower = easier to extract)
|
|
44
|
+
// Lowered across the board to be more permissive
|
|
45
|
+
const CATEGORY_EXTRACTION_THRESHOLDS = {
|
|
46
|
+
architecture: 0.28, // Very valuable - extract readily
|
|
47
|
+
error: 0.30, // Valuable for debugging
|
|
48
|
+
context: 0.32, // Important decisions
|
|
49
|
+
learning: 0.32, // Useful learnings
|
|
50
|
+
pattern: 0.35, // Code patterns
|
|
51
|
+
preference: 0.38, // User preferences
|
|
52
|
+
note: 0.42, // General notes
|
|
53
|
+
todo: 0.40, // Moderate
|
|
54
|
+
relationship: 0.35,
|
|
55
|
+
custom: 0.35,
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
// ==================== PROJECT DETECTION (Mirrors src/context/project-context.ts) ====================
|
|
59
|
+
|
|
60
|
+
/** Directories to skip when extracting project name from path */
|
|
61
|
+
const SKIP_DIRECTORIES = [
|
|
62
|
+
'src', 'lib', 'dist', 'build', 'out',
|
|
63
|
+
'node_modules', '.git', '.next', '.cache',
|
|
64
|
+
'test', 'tests', '__tests__', 'spec',
|
|
65
|
+
'bin', 'scripts', 'config', 'public', 'static',
|
|
66
|
+
];
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Extract project name from a file path.
|
|
70
|
+
* Skips common directory names that don't represent projects.
|
|
71
|
+
*/
|
|
72
|
+
function extractProjectFromPath(path) {
|
|
73
|
+
if (!path) return null;
|
|
74
|
+
|
|
75
|
+
const segments = path.split(/[/\\]/).filter(Boolean);
|
|
76
|
+
if (segments.length === 0) return null;
|
|
77
|
+
|
|
78
|
+
// Start from the end and find first non-skipped segment
|
|
79
|
+
for (let i = segments.length - 1; i >= 0; i--) {
|
|
80
|
+
const segment = segments[i];
|
|
81
|
+
if (!SKIP_DIRECTORIES.includes(segment.toLowerCase())) {
|
|
82
|
+
// Skip hidden directories (starting with .)
|
|
83
|
+
if (segment.startsWith('.')) continue;
|
|
84
|
+
return segment;
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return null;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Maximum memories to auto-create per compaction
|
|
92
|
+
const MAX_AUTO_MEMORIES = 5;
|
|
93
|
+
|
|
94
|
+
// ==================== DYNAMIC THRESHOLD CALCULATION ====================
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Get current memory stats from database
|
|
98
|
+
*/
|
|
99
|
+
function getMemoryStats(db) {
|
|
100
|
+
try {
|
|
101
|
+
const stats = db.prepare(`
|
|
102
|
+
SELECT
|
|
103
|
+
COUNT(*) as total,
|
|
104
|
+
SUM(CASE WHEN type = 'short_term' THEN 1 ELSE 0 END) as shortTerm,
|
|
105
|
+
SUM(CASE WHEN type = 'long_term' THEN 1 ELSE 0 END) as longTerm
|
|
106
|
+
FROM memories
|
|
107
|
+
`).get();
|
|
108
|
+
return stats || { total: 0, shortTerm: 0, longTerm: 0 };
|
|
109
|
+
} catch {
|
|
110
|
+
return { total: 0, shortTerm: 0, longTerm: 0 };
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Calculate dynamic threshold based on memory fullness
|
|
116
|
+
* When memory is full, be more selective. When sparse, be more permissive.
|
|
117
|
+
* Lowered thresholds to capture more content.
|
|
118
|
+
*/
|
|
119
|
+
function getDynamicThreshold(memoryCount, maxMemories) {
|
|
120
|
+
const fullness = memoryCount / maxMemories;
|
|
121
|
+
|
|
122
|
+
// More selective when memory is full, more permissive when sparse
|
|
123
|
+
if (fullness > 0.8) return 0.50; // Very full - highly selective
|
|
124
|
+
if (fullness > 0.6) return 0.42; // Getting full - moderately selective
|
|
125
|
+
if (fullness > 0.4) return 0.35; // Normal - standard threshold
|
|
126
|
+
if (fullness > 0.2) return 0.30; // Sparse - more permissive
|
|
127
|
+
return 0.25; // Very sparse - accept most valuable items
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Get extraction threshold for a specific category
|
|
132
|
+
* Combines dynamic threshold with category-specific adjustments
|
|
133
|
+
*/
|
|
134
|
+
function getExtractionThreshold(category, dynamicThreshold) {
|
|
135
|
+
const categoryThreshold = CATEGORY_EXTRACTION_THRESHOLDS[category] || BASE_THRESHOLD;
|
|
136
|
+
// Use whichever is lower (more permissive for valuable categories when memory is sparse)
|
|
137
|
+
return Math.min(categoryThreshold, dynamicThreshold);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
// ==================== SALIENCE DETECTION (Mirrors src/memory/salience.ts) ====================
|
|
141
|
+
|
|
142
|
+
const ARCHITECTURE_KEYWORDS = [
|
|
143
|
+
'architecture', 'design', 'pattern', 'structure', 'system',
|
|
144
|
+
'database', 'api', 'schema', 'model', 'framework', 'stack',
|
|
145
|
+
'microservice', 'monolith', 'serverless', 'infrastructure'
|
|
146
|
+
];
|
|
147
|
+
|
|
148
|
+
const ERROR_KEYWORDS = [
|
|
149
|
+
'error', 'bug', 'fix', 'issue', 'problem', 'crash', 'fail',
|
|
150
|
+
'exception', 'debug', 'resolve', 'solution', 'workaround'
|
|
151
|
+
];
|
|
152
|
+
|
|
153
|
+
const PREFERENCE_KEYWORDS = [
|
|
154
|
+
'prefer', 'always', 'never', 'style', 'convention', 'standard',
|
|
155
|
+
'like', 'want', 'should', 'must', 'require'
|
|
156
|
+
];
|
|
157
|
+
|
|
158
|
+
const PATTERN_KEYWORDS = [
|
|
159
|
+
'pattern', 'practice', 'approach', 'method', 'technique',
|
|
160
|
+
'implementation', 'strategy', 'algorithm', 'workflow'
|
|
161
|
+
];
|
|
162
|
+
|
|
163
|
+
const DECISION_KEYWORDS = [
|
|
164
|
+
'decided', 'decision', 'chose', 'chosen', 'selected', 'going with',
|
|
165
|
+
'will use', 'opted for', 'settled on', 'agreed'
|
|
166
|
+
];
|
|
167
|
+
|
|
168
|
+
const LEARNING_KEYWORDS = [
|
|
169
|
+
'learned', 'discovered', 'realized', 'found out', 'turns out',
|
|
170
|
+
'TIL', 'now know', 'understand now', 'figured out'
|
|
171
|
+
];
|
|
172
|
+
|
|
173
|
+
const EMOTIONAL_MARKERS = [
|
|
174
|
+
'important', 'critical', 'crucial', 'essential', 'key',
|
|
175
|
+
'finally', 'breakthrough', 'eureka', 'aha', 'got it',
|
|
176
|
+
'frustrating', 'annoying', 'tricky', 'remember'
|
|
177
|
+
];
|
|
178
|
+
|
|
179
|
+
const CODE_REFERENCE_PATTERNS = [
|
|
180
|
+
/\b[A-Z][a-zA-Z]*\.[a-zA-Z]+\b/,
|
|
181
|
+
/\b[a-z_][a-zA-Z0-9_]*\.(ts|js|py|go|rs)\b/,
|
|
182
|
+
/`[^`]+`/,
|
|
183
|
+
/\b(function|class|interface|type|const|let|var)\s+\w+/i,
|
|
184
|
+
/\bline\s*\d+\b/i,
|
|
185
|
+
/\b(src|lib|app|components?)\/\S+/,
|
|
186
|
+
];
|
|
187
|
+
|
|
188
|
+
function detectKeywords(text, keywords) {
|
|
189
|
+
const lower = text.toLowerCase();
|
|
190
|
+
return keywords.some(keyword => lower.includes(keyword.toLowerCase()));
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
function detectCodeReferences(content) {
|
|
194
|
+
return CODE_REFERENCE_PATTERNS.some(pattern => pattern.test(content));
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
function detectExplicitRequest(text) {
|
|
198
|
+
const patterns = [
|
|
199
|
+
/\bremember\s+(this|that)\b/i,
|
|
200
|
+
/\bdon'?t\s+forget\b/i,
|
|
201
|
+
/\bkeep\s+(in\s+)?mind\b/i,
|
|
202
|
+
/\bnote\s+(this|that)\b/i,
|
|
203
|
+
/\bsave\s+(this|that)\b/i,
|
|
204
|
+
/\bimportant[:\s]/i,
|
|
205
|
+
/\bfor\s+future\s+reference\b/i,
|
|
206
|
+
];
|
|
207
|
+
return patterns.some(pattern => pattern.test(text));
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
function calculateSalience(text) {
|
|
211
|
+
let score = 0.25; // Base score
|
|
212
|
+
|
|
213
|
+
if (detectExplicitRequest(text)) score += 0.5;
|
|
214
|
+
if (detectKeywords(text, ARCHITECTURE_KEYWORDS)) score += 0.4;
|
|
215
|
+
if (detectKeywords(text, ERROR_KEYWORDS)) score += 0.35;
|
|
216
|
+
if (detectKeywords(text, DECISION_KEYWORDS)) score += 0.35;
|
|
217
|
+
if (detectKeywords(text, LEARNING_KEYWORDS)) score += 0.3;
|
|
218
|
+
if (detectKeywords(text, PATTERN_KEYWORDS)) score += 0.25;
|
|
219
|
+
if (detectKeywords(text, PREFERENCE_KEYWORDS)) score += 0.25;
|
|
220
|
+
if (detectCodeReferences(text)) score += 0.15;
|
|
221
|
+
if (detectKeywords(text, EMOTIONAL_MARKERS)) score += 0.2;
|
|
222
|
+
|
|
223
|
+
return Math.min(1.0, score);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
function suggestCategory(text) {
|
|
227
|
+
const lower = text.toLowerCase();
|
|
228
|
+
|
|
229
|
+
if (detectKeywords(lower, ARCHITECTURE_KEYWORDS)) return 'architecture';
|
|
230
|
+
if (detectKeywords(lower, ERROR_KEYWORDS)) return 'error';
|
|
231
|
+
if (detectKeywords(lower, DECISION_KEYWORDS)) return 'context';
|
|
232
|
+
if (detectKeywords(lower, LEARNING_KEYWORDS)) return 'learning';
|
|
233
|
+
if (detectKeywords(lower, PREFERENCE_KEYWORDS)) return 'preference';
|
|
234
|
+
if (detectKeywords(lower, PATTERN_KEYWORDS)) return 'pattern';
|
|
235
|
+
if (/\b(todo|fixme|hack|xxx)\b/i.test(lower)) return 'todo';
|
|
236
|
+
|
|
237
|
+
return 'note';
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
function extractTags(text, extractorName = null) {
|
|
241
|
+
const tags = new Set();
|
|
242
|
+
|
|
243
|
+
// Extract hashtags
|
|
244
|
+
const hashtagMatches = text.match(/#[a-zA-Z][a-zA-Z0-9_-]*/g);
|
|
245
|
+
if (hashtagMatches) {
|
|
246
|
+
hashtagMatches.forEach(tag => tags.add(tag.slice(1).toLowerCase()));
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// Extract common tech terms
|
|
250
|
+
const techTerms = [
|
|
251
|
+
'react', 'vue', 'angular', 'node', 'python', 'typescript', 'javascript',
|
|
252
|
+
'api', 'database', 'sql', 'mongodb', 'postgresql', 'mysql',
|
|
253
|
+
'docker', 'kubernetes', 'aws', 'git', 'testing', 'auth', 'security'
|
|
254
|
+
];
|
|
255
|
+
|
|
256
|
+
const lowerText = text.toLowerCase();
|
|
257
|
+
techTerms.forEach(term => {
|
|
258
|
+
if (lowerText.includes(term)) tags.add(term);
|
|
259
|
+
});
|
|
260
|
+
|
|
261
|
+
// Add auto-extracted tag
|
|
262
|
+
tags.add('auto-extracted');
|
|
263
|
+
|
|
264
|
+
// Add source extractor tag for tracking
|
|
265
|
+
if (extractorName) {
|
|
266
|
+
tags.add(`source:${extractorName}`);
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
return Array.from(tags).slice(0, 12);
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
/**
|
|
273
|
+
* Calculate frequency boost based on how often key terms appear
|
|
274
|
+
* across all extracted segments. Repeated topics are more important.
|
|
275
|
+
*/
|
|
276
|
+
function calculateFrequencyBoost(segment, allSegments) {
|
|
277
|
+
// Extract key terms (words > 5 chars that aren't common)
|
|
278
|
+
const commonWords = new Set([
|
|
279
|
+
'about', 'after', 'before', 'being', 'between', 'could', 'during',
|
|
280
|
+
'every', 'found', 'through', 'would', 'should', 'which', 'where',
|
|
281
|
+
'there', 'these', 'their', 'other', 'using', 'because', 'without'
|
|
282
|
+
]);
|
|
283
|
+
|
|
284
|
+
const words = segment.content.toLowerCase().split(/\s+/);
|
|
285
|
+
const keyTerms = words.filter(w =>
|
|
286
|
+
w.length > 5 &&
|
|
287
|
+
!commonWords.has(w) &&
|
|
288
|
+
/^[a-z]+$/.test(w)
|
|
289
|
+
);
|
|
290
|
+
|
|
291
|
+
let boost = 0;
|
|
292
|
+
const seenTerms = new Set();
|
|
293
|
+
|
|
294
|
+
for (const term of keyTerms) {
|
|
295
|
+
if (seenTerms.has(term)) continue;
|
|
296
|
+
seenTerms.add(term);
|
|
297
|
+
|
|
298
|
+
// Count how many other segments mention this term
|
|
299
|
+
const mentions = allSegments.filter(s =>
|
|
300
|
+
s !== segment &&
|
|
301
|
+
s.content.toLowerCase().includes(term)
|
|
302
|
+
).length;
|
|
303
|
+
|
|
304
|
+
// Boost for repeated topics (cap at 5 mentions)
|
|
305
|
+
if (mentions > 1) {
|
|
306
|
+
boost += 0.03 * Math.min(mentions, 5);
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
// Cap total frequency boost at 0.15
|
|
311
|
+
return Math.min(0.15, boost);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
// ==================== CONTENT EXTRACTION ====================
|
|
315
|
+
|
|
316
|
+
/**
|
|
317
|
+
* Extract meaningful segments from conversation text
|
|
318
|
+
* Looks for decisions, learnings, fixes, patterns, etc.
|
|
319
|
+
*/
|
|
320
|
+
function extractMemorableSegments(conversationText) {
|
|
321
|
+
const segments = [];
|
|
322
|
+
|
|
323
|
+
// Pattern matchers for different types of important content
|
|
324
|
+
// Expanded patterns with lower minimum lengths for better capture
|
|
325
|
+
const extractors = [
|
|
326
|
+
{
|
|
327
|
+
name: 'decision',
|
|
328
|
+
patterns: [
|
|
329
|
+
/(?:we\s+)?decided\s+(?:to\s+)?(.{15,200})/gi,
|
|
330
|
+
/(?:going|went)\s+with\s+(.{15,150})/gi,
|
|
331
|
+
/(?:chose|chosen|selected)\s+(.{15,150})/gi,
|
|
332
|
+
/the\s+(?:approach|solution|fix)\s+(?:is|was)\s+(.{15,200})/gi,
|
|
333
|
+
// New patterns
|
|
334
|
+
/(?:using|will\s+use)\s+(.{15,150})/gi,
|
|
335
|
+
/(?:opted\s+for|settled\s+on)\s+(.{15,150})/gi,
|
|
336
|
+
],
|
|
337
|
+
titlePrefix: 'Decision: ',
|
|
338
|
+
},
|
|
339
|
+
{
|
|
340
|
+
name: 'error-fix',
|
|
341
|
+
patterns: [
|
|
342
|
+
/(?:fixed|solved|resolved)\s+(?:by\s+)?(.{15,200})/gi,
|
|
343
|
+
/the\s+(?:fix|solution|workaround)\s+(?:is|was)\s+(.{15,200})/gi,
|
|
344
|
+
/(?:root\s+cause|issue)\s+(?:is|was)\s+(.{15,200})/gi,
|
|
345
|
+
/(?:error|bug)\s+(?:was\s+)?caused\s+by\s+(.{15,200})/gi,
|
|
346
|
+
// New patterns
|
|
347
|
+
/(?:problem|issue)\s+was\s+(.{15,150})/gi,
|
|
348
|
+
/(?:the\s+)?bug\s+(?:is|was)\s+(.{15,150})/gi,
|
|
349
|
+
/(?:debugging|debugged)\s+(.{15,150})/gi,
|
|
350
|
+
],
|
|
351
|
+
titlePrefix: 'Fix: ',
|
|
352
|
+
},
|
|
353
|
+
{
|
|
354
|
+
name: 'learning',
|
|
355
|
+
patterns: [
|
|
356
|
+
/(?:learned|discovered|realized|found\s+out)\s+(?:that\s+)?(.{15,200})/gi,
|
|
357
|
+
/turns\s+out\s+(?:that\s+)?(.{15,200})/gi,
|
|
358
|
+
/(?:TIL|today\s+I\s+learned)[:\s]+(.{15,200})/gi,
|
|
359
|
+
// New patterns
|
|
360
|
+
/(?:now\s+)?(?:understand|know)\s+(?:that\s+)?(.{15,150})/gi,
|
|
361
|
+
/(?:figured\s+out|worked\s+out)\s+(.{15,150})/gi,
|
|
362
|
+
],
|
|
363
|
+
titlePrefix: 'Learned: ',
|
|
364
|
+
},
|
|
365
|
+
{
|
|
366
|
+
name: 'architecture',
|
|
367
|
+
patterns: [
|
|
368
|
+
/the\s+architecture\s+(?:is|uses|consists\s+of)\s+(.{15,200})/gi,
|
|
369
|
+
/(?:design|pattern)\s+(?:is|uses)\s+(.{15,200})/gi,
|
|
370
|
+
/(?:system|api|database)\s+(?:structure|design)\s+(?:is|uses)\s+(.{15,200})/gi,
|
|
371
|
+
// New patterns
|
|
372
|
+
/(?:created|added|implemented|built)\s+(?:a\s+)?(.{15,200})/gi,
|
|
373
|
+
/(?:refactored|updated|changed)\s+(?:the\s+)?(.{15,150})/gi,
|
|
374
|
+
],
|
|
375
|
+
titlePrefix: 'Architecture: ',
|
|
376
|
+
},
|
|
377
|
+
{
|
|
378
|
+
name: 'preference',
|
|
379
|
+
patterns: [
|
|
380
|
+
/(?:always|never)\s+(.{10,150})/gi,
|
|
381
|
+
/(?:prefer|want)\s+to\s+(.{10,150})/gi,
|
|
382
|
+
/(?:should|must)\s+(?:always\s+)?(.{10,150})/gi,
|
|
383
|
+
],
|
|
384
|
+
titlePrefix: 'Preference: ',
|
|
385
|
+
},
|
|
386
|
+
{
|
|
387
|
+
name: 'important-note',
|
|
388
|
+
patterns: [
|
|
389
|
+
/important[:\s]+(.{15,200})/gi,
|
|
390
|
+
/(?:note|remember)[:\s]+(.{15,200})/gi,
|
|
391
|
+
/(?:key|critical)\s+(?:point|thing)[:\s]+(.{15,200})/gi,
|
|
392
|
+
// New patterns
|
|
393
|
+
/(?:this\s+is\s+)?(?:crucial|essential)[:\s]+(.{15,150})/gi,
|
|
394
|
+
/(?:don't\s+forget|keep\s+in\s+mind)[:\s]+(.{15,150})/gi,
|
|
395
|
+
],
|
|
396
|
+
titlePrefix: 'Note: ',
|
|
397
|
+
},
|
|
398
|
+
];
|
|
399
|
+
|
|
400
|
+
for (const extractor of extractors) {
|
|
401
|
+
for (const pattern of extractor.patterns) {
|
|
402
|
+
let match;
|
|
403
|
+
while ((match = pattern.exec(conversationText)) !== null) {
|
|
404
|
+
const content = match[1].trim();
|
|
405
|
+
if (content.length >= 20) {
|
|
406
|
+
// Generate a title from first ~50 chars
|
|
407
|
+
const titleContent = content.slice(0, 50).replace(/\s+/g, ' ').trim();
|
|
408
|
+
const title = extractor.titlePrefix + (titleContent.length < 50 ? titleContent : titleContent + '...');
|
|
409
|
+
|
|
410
|
+
segments.push({
|
|
411
|
+
title,
|
|
412
|
+
content: content.slice(0, 500), // Cap content length
|
|
413
|
+
extractorType: extractor.name,
|
|
414
|
+
});
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
return segments;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
/**
|
|
424
|
+
* Deduplicate and score segments
|
|
425
|
+
* @param {Array} segments - Raw extracted segments
|
|
426
|
+
* @param {number} dynamicThreshold - Dynamic threshold based on memory fullness
|
|
427
|
+
*/
|
|
428
|
+
function processSegments(segments, dynamicThreshold = BASE_THRESHOLD) {
|
|
429
|
+
// Remove near-duplicates (segments with >80% overlap)
|
|
430
|
+
const unique = [];
|
|
431
|
+
for (const seg of segments) {
|
|
432
|
+
const isDupe = unique.some(existing => {
|
|
433
|
+
const overlap = calculateOverlap(existing.content, seg.content);
|
|
434
|
+
return overlap > 0.8;
|
|
435
|
+
});
|
|
436
|
+
if (!isDupe) {
|
|
437
|
+
const text = seg.title + ' ' + seg.content;
|
|
438
|
+
const baseSalience = calculateSalience(text);
|
|
439
|
+
const category = suggestCategory(text);
|
|
440
|
+
|
|
441
|
+
unique.push({
|
|
442
|
+
...seg,
|
|
443
|
+
baseSalience,
|
|
444
|
+
category,
|
|
445
|
+
tags: extractTags(text, seg.extractorType),
|
|
446
|
+
});
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
// Calculate frequency boost after we have all unique segments
|
|
451
|
+
for (const seg of unique) {
|
|
452
|
+
const frequencyBoost = calculateFrequencyBoost(seg, unique);
|
|
453
|
+
seg.salience = Math.min(1.0, seg.baseSalience + frequencyBoost);
|
|
454
|
+
seg.frequencyBoost = frequencyBoost;
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
// Sort by salience (highest first)
|
|
458
|
+
unique.sort((a, b) => b.salience - a.salience);
|
|
459
|
+
|
|
460
|
+
// Filter by category-specific threshold (combined with dynamic threshold)
|
|
461
|
+
const filtered = unique.filter(seg => {
|
|
462
|
+
const threshold = getExtractionThreshold(seg.category, dynamicThreshold);
|
|
463
|
+
return seg.salience >= threshold;
|
|
464
|
+
});
|
|
465
|
+
|
|
466
|
+
return filtered.slice(0, MAX_AUTO_MEMORIES);
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
/**
|
|
470
|
+
* Simple overlap calculation (Jaccard similarity on words)
|
|
471
|
+
*/
|
|
472
|
+
function calculateOverlap(text1, text2) {
|
|
473
|
+
const words1 = new Set(text1.toLowerCase().split(/\s+/));
|
|
474
|
+
const words2 = new Set(text2.toLowerCase().split(/\s+/));
|
|
475
|
+
|
|
476
|
+
const intersection = new Set([...words1].filter(w => words2.has(w)));
|
|
477
|
+
const union = new Set([...words1, ...words2]);
|
|
478
|
+
|
|
479
|
+
return intersection.size / union.size;
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
// ==================== DATABASE OPERATIONS ====================
|
|
483
|
+
|
|
484
|
+
function saveMemory(db, memory, project) {
|
|
485
|
+
const timestamp = new Date().toISOString();
|
|
486
|
+
|
|
487
|
+
const stmt = db.prepare(`
|
|
488
|
+
INSERT INTO memories (title, content, type, category, salience, tags, project, created_at, last_accessed)
|
|
489
|
+
VALUES (?, ?, 'short_term', ?, ?, ?, ?, ?, ?)
|
|
490
|
+
`);
|
|
491
|
+
|
|
492
|
+
stmt.run(
|
|
493
|
+
memory.title,
|
|
494
|
+
memory.content,
|
|
495
|
+
memory.category,
|
|
496
|
+
memory.salience,
|
|
497
|
+
JSON.stringify(memory.tags),
|
|
498
|
+
project || null,
|
|
499
|
+
timestamp,
|
|
500
|
+
timestamp
|
|
501
|
+
);
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
function createSessionMarker(db, trigger, project, autoExtractedCount) {
|
|
505
|
+
const timestamp = new Date().toISOString();
|
|
506
|
+
const title = `Session compaction (${trigger})`;
|
|
507
|
+
const content = autoExtractedCount > 0
|
|
508
|
+
? `Context compaction at ${timestamp}. Auto-extracted ${autoExtractedCount} memories. Type: ${trigger}.`
|
|
509
|
+
: `Context compaction at ${timestamp}. Type: ${trigger}. No auto-extractable content found.`;
|
|
510
|
+
|
|
511
|
+
const stmt = db.prepare(`
|
|
512
|
+
INSERT INTO memories (title, content, type, category, salience, tags, project, created_at, last_accessed)
|
|
513
|
+
VALUES (?, ?, 'episodic', 'context', 0.3, ?, ?, ?, ?)
|
|
514
|
+
`);
|
|
515
|
+
|
|
516
|
+
stmt.run(title, content, JSON.stringify(['session', 'compaction', 'auto-extracted']), project || null, timestamp, timestamp);
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
// ==================== MAIN HOOK LOGIC ====================
|
|
520
|
+
|
|
521
|
+
let input = '';
|
|
522
|
+
process.stdin.setEncoding('utf8');
|
|
523
|
+
|
|
524
|
+
process.stdin.on('readable', () => {
|
|
525
|
+
let chunk;
|
|
526
|
+
while ((chunk = process.stdin.read()) !== null) {
|
|
527
|
+
input += chunk;
|
|
528
|
+
}
|
|
529
|
+
});
|
|
530
|
+
|
|
531
|
+
process.stdin.on('end', () => {
|
|
532
|
+
try {
|
|
533
|
+
const hookData = JSON.parse(input || '{}');
|
|
534
|
+
|
|
535
|
+
// Debug logging - what does Claude Code actually send?
|
|
536
|
+
console.error('[hook-debug] Received fields:', Object.keys(hookData).join(', '));
|
|
537
|
+
console.error('[hook-debug] Data preview:', JSON.stringify(hookData, null, 2).slice(0, 1000));
|
|
538
|
+
|
|
539
|
+
const trigger = hookData.trigger || 'unknown';
|
|
540
|
+
const project = extractProjectFromPath(hookData.cwd);
|
|
541
|
+
|
|
542
|
+
// Extract conversation text from hook data
|
|
543
|
+
// Claude Code passes conversation in various formats
|
|
544
|
+
const conversationText = extractConversationText(hookData);
|
|
545
|
+
|
|
546
|
+
// Ensure database directory exists
|
|
547
|
+
if (!existsSync(DB_DIR)) {
|
|
548
|
+
mkdirSync(DB_DIR, { recursive: true });
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
// Check if database exists
|
|
552
|
+
if (!existsSync(DB_PATH)) {
|
|
553
|
+
console.error('[pre-compact] Memory database not found, skipping auto-extraction');
|
|
554
|
+
outputReminder(0, BASE_THRESHOLD);
|
|
555
|
+
process.exit(0);
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
// Connect to database with timeout to handle concurrent access
|
|
559
|
+
// timeout: 5000ms prevents hook from hanging if DB is locked
|
|
560
|
+
const db = new Database(DB_PATH, { timeout: 5000 });
|
|
561
|
+
|
|
562
|
+
// Get current memory stats for dynamic threshold calculation
|
|
563
|
+
const stats = getMemoryStats(db);
|
|
564
|
+
const totalMemories = stats.shortTerm + stats.longTerm;
|
|
565
|
+
const maxMemories = MAX_SHORT_TERM_MEMORIES + MAX_LONG_TERM_MEMORIES;
|
|
566
|
+
const dynamicThreshold = getDynamicThreshold(totalMemories, maxMemories);
|
|
567
|
+
|
|
568
|
+
console.error(`[auto-extract] Memory status: ${totalMemories}/${maxMemories} (${(totalMemories/maxMemories*100).toFixed(0)}% full)`);
|
|
569
|
+
console.error(`[auto-extract] Dynamic threshold: ${dynamicThreshold.toFixed(2)}`);
|
|
570
|
+
|
|
571
|
+
let autoExtractedCount = 0;
|
|
572
|
+
|
|
573
|
+
// Only attempt extraction if we have conversation content
|
|
574
|
+
if (conversationText && conversationText.length > 100) {
|
|
575
|
+
// Extract memorable segments
|
|
576
|
+
const segments = extractMemorableSegments(conversationText);
|
|
577
|
+
const processedSegments = processSegments(segments, dynamicThreshold);
|
|
578
|
+
|
|
579
|
+
// Save auto-extracted memories
|
|
580
|
+
for (const memory of processedSegments) {
|
|
581
|
+
try {
|
|
582
|
+
saveMemory(db, memory, project);
|
|
583
|
+
autoExtractedCount++;
|
|
584
|
+
const boostInfo = memory.frequencyBoost > 0 ? ` +${memory.frequencyBoost.toFixed(2)} boost` : '';
|
|
585
|
+
console.error(`[auto-extract] Saved: ${memory.title} (salience: ${memory.salience.toFixed(2)}${boostInfo}, category: ${memory.category})`);
|
|
586
|
+
} catch (err) {
|
|
587
|
+
console.error(`[auto-extract] Failed to save "${memory.title}": ${err.message}`);
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
// Create session marker
|
|
593
|
+
createSessionMarker(db, trigger, project, autoExtractedCount);
|
|
594
|
+
|
|
595
|
+
console.error(`[claude-cortex] Pre-compact complete: ${autoExtractedCount} memories auto-extracted`);
|
|
596
|
+
|
|
597
|
+
outputReminder(autoExtractedCount, dynamicThreshold);
|
|
598
|
+
|
|
599
|
+
db.close();
|
|
600
|
+
process.exit(0);
|
|
601
|
+
} catch (error) {
|
|
602
|
+
console.error(`[pre-compact] Error: ${error.message}`);
|
|
603
|
+
outputReminder(0, BASE_THRESHOLD);
|
|
604
|
+
process.exit(0); // Don't block compaction on errors
|
|
605
|
+
}
|
|
606
|
+
});
|
|
607
|
+
|
|
608
|
+
/**
|
|
609
|
+
* Extract conversation text from various hook data formats
|
|
610
|
+
*/
|
|
611
|
+
function extractConversationText(hookData) {
|
|
612
|
+
// Try different possible locations for conversation content
|
|
613
|
+
const sources = [
|
|
614
|
+
hookData.conversation,
|
|
615
|
+
hookData.messages,
|
|
616
|
+
hookData.transcript,
|
|
617
|
+
hookData.content,
|
|
618
|
+
hookData.context,
|
|
619
|
+
hookData.text,
|
|
620
|
+
];
|
|
621
|
+
|
|
622
|
+
for (const source of sources) {
|
|
623
|
+
if (typeof source === 'string' && source.length > 0) {
|
|
624
|
+
return source;
|
|
625
|
+
}
|
|
626
|
+
if (Array.isArray(source)) {
|
|
627
|
+
// If it's an array of messages, concatenate them
|
|
628
|
+
return source
|
|
629
|
+
.map(msg => {
|
|
630
|
+
if (typeof msg === 'string') return msg;
|
|
631
|
+
if (msg.content) return msg.content;
|
|
632
|
+
if (msg.text) return msg.text;
|
|
633
|
+
return '';
|
|
634
|
+
})
|
|
635
|
+
.join('\n');
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
// If no specific field, try to stringify the whole object
|
|
640
|
+
// (but exclude large binary/irrelevant fields)
|
|
641
|
+
const { stdin, stdout, stderr, ...relevantData } = hookData;
|
|
642
|
+
const fullText = JSON.stringify(relevantData);
|
|
643
|
+
|
|
644
|
+
// Only return if it looks like it has useful content
|
|
645
|
+
if (fullText.length > 200) {
|
|
646
|
+
return fullText;
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
return '';
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
/**
|
|
653
|
+
* Output reminder message to stdout
|
|
654
|
+
*/
|
|
655
|
+
function outputReminder(autoExtractedCount, dynamicThreshold) {
|
|
656
|
+
if (autoExtractedCount > 0) {
|
|
657
|
+
console.log(`
|
|
658
|
+
🧠 AUTO-MEMORY: ${autoExtractedCount} important items were automatically saved before compaction.
|
|
659
|
+
After compaction, use 'get_context' to retrieve your memories.
|
|
660
|
+
`);
|
|
661
|
+
} else {
|
|
662
|
+
const thresholdNote = dynamicThreshold > 0.5
|
|
663
|
+
? ' (Memory near capacity - being selective)'
|
|
664
|
+
: '';
|
|
665
|
+
console.log(`
|
|
666
|
+
🧠 PRE-COMPACT: No auto-extractable content found with high enough salience${thresholdNote}.
|
|
667
|
+
If there's something important, use 'remember' to save it explicitly.
|
|
668
|
+
After compaction, use 'get_context' to retrieve your memories.
|
|
669
|
+
`);
|
|
670
|
+
}
|
|
671
|
+
}
|