claude-mneme 2.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/plugin.json +17 -0
- package/CLAUDE.md +98 -0
- package/CONFIG_REFERENCE.md +495 -0
- package/README.md +40 -0
- package/commands/entity.md +64 -0
- package/commands/forget.md +69 -0
- package/commands/remember.md +60 -0
- package/commands/status.md +90 -0
- package/commands/summarize.md +69 -0
- package/hooks/hooks.json +123 -0
- package/package.json +12 -0
- package/scripts/mem-add.mjs +59 -0
- package/scripts/mem-entity.mjs +143 -0
- package/scripts/mem-forget.mjs +245 -0
- package/scripts/mem-status.mjs +319 -0
- package/scripts/mem-summarize.mjs +338 -0
- package/scripts/post-compact.mjs +132 -0
- package/scripts/post-tool-use.mjs +353 -0
- package/scripts/pre-compact.mjs +491 -0
- package/scripts/session-start.mjs +283 -0
- package/scripts/session-stop.mjs +31 -0
- package/scripts/stop-capture.mjs +294 -0
- package/scripts/subagent-stop.mjs +203 -0
- package/scripts/summarize.mjs +428 -0
- package/scripts/sync.mjs +609 -0
- package/scripts/user-prompt-submit.mjs +77 -0
- package/scripts/utils.mjs +2142 -0
- package/scripts/utils.test.mjs +1465 -0
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* SubagentStop Hook - Agent Completion Capture
|
|
4
|
+
* Captures summaries from specialized agents when they complete tasks
|
|
5
|
+
* These are high-signal entries about complex work performed
|
|
6
|
+
*
|
|
7
|
+
* Note: Claude Code passes transcript_path (file path), not direct output
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
import { readFileSync, existsSync, openSync, readSync, closeSync, statSync } from 'fs';
|
|
11
|
+
import { ensureMemoryDirs, loadConfig, appendLogEntry, extractiveSummarize, stripLeadIns, stripMarkdown, logError } from './utils.mjs';
|
|
12
|
+
|
|
13
|
+
// Read hook input from stdin
|
|
14
|
+
let input = '';
|
|
15
|
+
process.stdin.setEncoding('utf8');
|
|
16
|
+
process.stdin.on('data', chunk => input += chunk);
|
|
17
|
+
process.stdin.on('end', () => {
|
|
18
|
+
try {
|
|
19
|
+
const hookData = JSON.parse(input);
|
|
20
|
+
processSubagentStop(hookData);
|
|
21
|
+
} catch (e) {
|
|
22
|
+
logError(e, 'subagent-stop');
|
|
23
|
+
process.exit(0);
|
|
24
|
+
}
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Read and parse transcript from transcript_path
|
|
29
|
+
* Claude Code provides transcript as a JSONL file path, not direct data
|
|
30
|
+
*/
|
|
31
|
+
function readTranscript(transcriptPath) {
|
|
32
|
+
if (!transcriptPath || !existsSync(transcriptPath)) {
|
|
33
|
+
return null;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
try {
|
|
37
|
+
const content = readFileSync(transcriptPath, 'utf-8').trim();
|
|
38
|
+
if (!content) return null;
|
|
39
|
+
|
|
40
|
+
// Parse JSONL - each line is a JSON object
|
|
41
|
+
const lines = content.split('\n').filter(l => l.trim());
|
|
42
|
+
const transcript = [];
|
|
43
|
+
|
|
44
|
+
for (const line of lines) {
|
|
45
|
+
try {
|
|
46
|
+
const entry = JSON.parse(line);
|
|
47
|
+
if (entry.type === 'user' || entry.type === 'assistant') {
|
|
48
|
+
transcript.push({
|
|
49
|
+
role: entry.type,
|
|
50
|
+
content: entry.message?.content || entry.content
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
} catch {
|
|
54
|
+
// Skip malformed lines
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
return transcript.length > 0 ? transcript : null;
|
|
59
|
+
} catch {
|
|
60
|
+
return null;
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Extract text content from transcript message
|
|
66
|
+
*/
|
|
67
|
+
function extractTextContent(content) {
|
|
68
|
+
if (typeof content === 'string') {
|
|
69
|
+
return content;
|
|
70
|
+
}
|
|
71
|
+
if (Array.isArray(content)) {
|
|
72
|
+
return content
|
|
73
|
+
.filter(block => block.type === 'text')
|
|
74
|
+
.map(block => block.text)
|
|
75
|
+
.join('\n');
|
|
76
|
+
}
|
|
77
|
+
return '';
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Check if a very recent response entry already covers the same content.
|
|
82
|
+
* Prevents duplicate logging when both stop-capture and subagent-stop fire.
|
|
83
|
+
* Checks both main log and pending file.
|
|
84
|
+
*/
|
|
85
|
+
function readLastLines(filePath, count) {
|
|
86
|
+
try {
|
|
87
|
+
const stat = statSync(filePath);
|
|
88
|
+
if (stat.size === 0) return [];
|
|
89
|
+
const readSize = Math.min(stat.size, 4096);
|
|
90
|
+
const buf = Buffer.alloc(readSize);
|
|
91
|
+
const fd = openSync(filePath, 'r');
|
|
92
|
+
readSync(fd, buf, 0, readSize, stat.size - readSize);
|
|
93
|
+
closeSync(fd);
|
|
94
|
+
const lines = buf.toString('utf-8').trim().split('\n');
|
|
95
|
+
return lines.slice(-count);
|
|
96
|
+
} catch {
|
|
97
|
+
return [];
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
function isDuplicateOfRecentResponse(content, logPath) {
|
|
102
|
+
const pendingPath = logPath.replace('.jsonl', '.pending.jsonl');
|
|
103
|
+
const filesToCheck = [logPath, pendingPath].filter(f => existsSync(f));
|
|
104
|
+
|
|
105
|
+
const now = Date.now();
|
|
106
|
+
for (const filePath of filesToCheck) {
|
|
107
|
+
// Read only the tail of the file (last ~4KB) instead of the entire log
|
|
108
|
+
const lastLines = readLastLines(filePath, 3);
|
|
109
|
+
for (const line of lastLines) {
|
|
110
|
+
if (!line) continue;
|
|
111
|
+
try {
|
|
112
|
+
const entry = JSON.parse(line);
|
|
113
|
+
if (entry.type === 'response' && (now - new Date(entry.ts).getTime()) < 30000) {
|
|
114
|
+
const a = content.substring(0, 100).toLowerCase();
|
|
115
|
+
const b = (entry.content || '').substring(0, 100).toLowerCase();
|
|
116
|
+
if (a === b || a.startsWith(b) || b.startsWith(a)) {
|
|
117
|
+
return true;
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
} catch {}
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
return false;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
function processSubagentStop(hookData) {
|
|
127
|
+
const { agent_type, task_description, transcript_path, cwd } = hookData;
|
|
128
|
+
|
|
129
|
+
const transcript = readTranscript(transcript_path);
|
|
130
|
+
if (!transcript || transcript.length === 0) {
|
|
131
|
+
process.exit(0);
|
|
132
|
+
return;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// Find the last assistant message (the agent's final output)
|
|
136
|
+
let lastAssistantMessage = null;
|
|
137
|
+
for (let i = transcript.length - 1; i >= 0; i--) {
|
|
138
|
+
if (transcript[i].role === 'assistant') {
|
|
139
|
+
lastAssistantMessage = transcript[i];
|
|
140
|
+
break;
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
if (!lastAssistantMessage) {
|
|
145
|
+
process.exit(0);
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const rawOutput = extractTextContent(lastAssistantMessage.content);
|
|
150
|
+
if (!rawOutput || !rawOutput.trim()) {
|
|
151
|
+
process.exit(0);
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const output = stripMarkdown(rawOutput);
|
|
156
|
+
const agentName = agent_type || 'agent';
|
|
157
|
+
const config = loadConfig();
|
|
158
|
+
const paths = ensureMemoryDirs(cwd || process.cwd());
|
|
159
|
+
|
|
160
|
+
// Summarize agent output based on configured mode
|
|
161
|
+
// Subagent output is typically verbose — extractive is a sensible minimum
|
|
162
|
+
const mode = config.responseSummarization || 'none';
|
|
163
|
+
const summary = (mode === 'none')
|
|
164
|
+
? stripLeadIns(output)
|
|
165
|
+
: extractiveSummarize(output, config);
|
|
166
|
+
|
|
167
|
+
if (!summary) {
|
|
168
|
+
process.exit(0);
|
|
169
|
+
return;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
// Build content, include task description if short
|
|
173
|
+
const parts = [];
|
|
174
|
+
if (task_description && task_description.length < 50) {
|
|
175
|
+
parts.push(`${task_description}:`);
|
|
176
|
+
}
|
|
177
|
+
parts.push(summary);
|
|
178
|
+
let content = parts.join(' ');
|
|
179
|
+
|
|
180
|
+
// Apply max length truncation as final safeguard
|
|
181
|
+
if (content.length > config.maxResponseLength) {
|
|
182
|
+
content = content.substring(0, config.maxResponseLength) + '...';
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// Skip if a recent response entry already covers the same content
|
|
186
|
+
if (isDuplicateOfRecentResponse(content, paths.log)) {
|
|
187
|
+
process.exit(0);
|
|
188
|
+
return;
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
const entry = {
|
|
192
|
+
ts: new Date().toISOString(),
|
|
193
|
+
type: 'agent',
|
|
194
|
+
agent_type: agentName,
|
|
195
|
+
content
|
|
196
|
+
};
|
|
197
|
+
|
|
198
|
+
appendLogEntry(entry, cwd || process.cwd());
|
|
199
|
+
process.exit(0);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
// Timeout fallback
|
|
203
|
+
setTimeout(() => process.exit(0), 5000);
|
|
@@ -0,0 +1,428 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Incremental Summarization Script
|
|
4
|
+
*
|
|
5
|
+
* Usage: node summarize.mjs <project-dir>
|
|
6
|
+
* node summarize.mjs <project-dir> --migrate (migrate old summary.md to JSON)
|
|
7
|
+
*
|
|
8
|
+
* Uses structured JSON storage for efficient incremental updates.
|
|
9
|
+
* Only new entries are sent to Haiku, not the entire summary.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { readFileSync, writeFileSync, existsSync, unlinkSync } from 'fs';
|
|
13
|
+
import {
|
|
14
|
+
ensureDeps,
|
|
15
|
+
ensureMemoryDirs,
|
|
16
|
+
loadConfig,
|
|
17
|
+
getProjectName,
|
|
18
|
+
formatEntriesForSummary,
|
|
19
|
+
emptyStructuredSummary,
|
|
20
|
+
deduplicateEntries,
|
|
21
|
+
withFileLock,
|
|
22
|
+
logError
|
|
23
|
+
} from './utils.mjs';
|
|
24
|
+
|
|
25
|
+
const cwd = process.argv[2] || process.cwd();
|
|
26
|
+
const migrateOnly = process.argv.includes('--migrate');
|
|
27
|
+
const paths = ensureMemoryDirs(cwd);
|
|
28
|
+
const config = loadConfig();
|
|
29
|
+
const projectName = getProjectName(cwd);
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Read existing structured summary, or return empty structure
|
|
33
|
+
*/
|
|
34
|
+
function readStructuredSummary() {
|
|
35
|
+
if (existsSync(paths.summaryJson)) {
|
|
36
|
+
try {
|
|
37
|
+
return JSON.parse(readFileSync(paths.summaryJson, 'utf-8'));
|
|
38
|
+
} catch {
|
|
39
|
+
return emptyStructuredSummary();
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
return emptyStructuredSummary();
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Migrate old markdown summary to structured JSON format
|
|
47
|
+
*/
|
|
48
|
+
async function migrateMarkdownSummary() {
|
|
49
|
+
if (!existsSync(paths.summary)) {
|
|
50
|
+
console.error('[claude-mneme] No summary.md to migrate');
|
|
51
|
+
return null;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const markdown = readFileSync(paths.summary, 'utf-8').trim();
|
|
55
|
+
if (!markdown) {
|
|
56
|
+
return emptyStructuredSummary();
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
console.error(`[claude-mneme] Migrating summary.md to structured JSON for "${projectName}"...`);
|
|
60
|
+
|
|
61
|
+
const prompt = `Convert this markdown memory summary into structured JSON format.
|
|
62
|
+
|
|
63
|
+
<markdown_summary>
|
|
64
|
+
${markdown}
|
|
65
|
+
</markdown_summary>
|
|
66
|
+
|
|
67
|
+
Output a JSON object with this exact structure:
|
|
68
|
+
{
|
|
69
|
+
"projectContext": "Brief description of what this project is (1-2 sentences)",
|
|
70
|
+
"keyDecisions": [
|
|
71
|
+
{ "date": "YYYY-MM-DD or null", "decision": "The decision made", "reason": "Why it was made or null" }
|
|
72
|
+
],
|
|
73
|
+
"currentState": [
|
|
74
|
+
{ "topic": "Feature/component name", "status": "Current implementation status" }
|
|
75
|
+
],
|
|
76
|
+
"recentWork": [
|
|
77
|
+
{ "date": "YYYY-MM-DD or null", "summary": "What was done" }
|
|
78
|
+
]
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
Rules:
|
|
82
|
+
- Extract project context from any "Project Context" or introductory section
|
|
83
|
+
- Key decisions are ONLY strategic/architectural choices that affect project direction (the "why", not the "how").
|
|
84
|
+
Do NOT include implementation details like config keys, defaults, thresholds, or parameter names — those go in currentState.
|
|
85
|
+
- Current state describes what's implemented, in progress, known issues, and relevant implementation details
|
|
86
|
+
- Recent work is the latest activity that hasn't been folded into current state yet
|
|
87
|
+
- Use null for dates if not clearly specified
|
|
88
|
+
- Output ONLY the JSON object, no other text`;
|
|
89
|
+
|
|
90
|
+
try {
|
|
91
|
+
ensureDeps();
|
|
92
|
+
const { query } = await import('@anthropic-ai/claude-agent-sdk');
|
|
93
|
+
|
|
94
|
+
async function* messageGenerator() {
|
|
95
|
+
yield {
|
|
96
|
+
type: 'user',
|
|
97
|
+
message: { role: 'user', content: prompt },
|
|
98
|
+
session_id: `memory-migrate-${Date.now()}`,
|
|
99
|
+
parent_tool_use_id: null,
|
|
100
|
+
isSynthetic: true
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const queryResult = query({
|
|
105
|
+
prompt: messageGenerator(),
|
|
106
|
+
options: {
|
|
107
|
+
model: config.model,
|
|
108
|
+
disallowedTools: ['Bash', 'Read', 'Write', 'Edit', 'Grep', 'Glob', 'WebFetch', 'WebSearch', 'Task', 'TodoWrite'],
|
|
109
|
+
pathToClaudeCodeExecutable: config.claudePath
|
|
110
|
+
}
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
let response = '';
|
|
114
|
+
try {
|
|
115
|
+
for await (const message of queryResult) {
|
|
116
|
+
if (message.type === 'assistant') {
|
|
117
|
+
const content = message.message.content;
|
|
118
|
+
response = Array.isArray(content)
|
|
119
|
+
? content.filter(c => c.type === 'text').map(c => c.text).join('\n')
|
|
120
|
+
: typeof content === 'string' ? content : '';
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
} catch (iterError) {
|
|
124
|
+
if (!response) throw iterError;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
if (response) {
|
|
128
|
+
const jsonMatch = response.match(/\{[\s\S]*\}/);
|
|
129
|
+
if (jsonMatch) {
|
|
130
|
+
const migrated = JSON.parse(jsonMatch[0]);
|
|
131
|
+
migrated.lastUpdated = new Date().toISOString();
|
|
132
|
+
return migrated;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
} catch (error) {
|
|
136
|
+
console.error(`[claude-mneme] Migration error: ${error.message}`);
|
|
137
|
+
logError(error, 'summarize-migrate');
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
return null;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Perform incremental summarization of new entries
|
|
145
|
+
*/
|
|
146
|
+
async function incrementalSummarize(existingSummary, newEntries) {
|
|
147
|
+
// Parse entries for deduplication
|
|
148
|
+
const parsedEntries = newEntries.map(line => {
|
|
149
|
+
try { return JSON.parse(line); }
|
|
150
|
+
catch { return null; }
|
|
151
|
+
}).filter(Boolean);
|
|
152
|
+
|
|
153
|
+
// Deduplicate before sending to LLM - reduces noise and cost
|
|
154
|
+
const deduped = deduplicateEntries(parsedEntries, config);
|
|
155
|
+
const dedupedLines = deduped.map(e => JSON.stringify(e));
|
|
156
|
+
|
|
157
|
+
const entriesText = formatEntriesForSummary(dedupedLines);
|
|
158
|
+
|
|
159
|
+
// Build a compact representation of existing summary for context
|
|
160
|
+
const existingContext = [];
|
|
161
|
+
if (existingSummary.projectContext) {
|
|
162
|
+
existingContext.push(`Project: ${existingSummary.projectContext}`);
|
|
163
|
+
}
|
|
164
|
+
if (existingSummary.keyDecisions?.length > 0) {
|
|
165
|
+
existingContext.push(`Key decisions: ${existingSummary.keyDecisions.map(d => d.decision).join('; ')}`);
|
|
166
|
+
}
|
|
167
|
+
if (existingSummary.currentState?.length > 0) {
|
|
168
|
+
existingContext.push(`Current state: ${existingSummary.currentState.map(s => `${s.topic}: ${s.status}`).join('; ')}`);
|
|
169
|
+
}
|
|
170
|
+
const recentCount = existingSummary.recentWork?.length || 0;
|
|
171
|
+
|
|
172
|
+
const prompt = `You are updating a structured memory for project "${projectName}".
|
|
173
|
+
|
|
174
|
+
<existing_context>
|
|
175
|
+
${existingContext.join('\n') || '(New project, no existing context)'}
|
|
176
|
+
Recent work items: ${recentCount}
|
|
177
|
+
</existing_context>
|
|
178
|
+
|
|
179
|
+
<new_entries>
|
|
180
|
+
${entriesText}
|
|
181
|
+
</new_entries>
|
|
182
|
+
|
|
183
|
+
Analyze the new entries and output a JSON object with updates:
|
|
184
|
+
|
|
185
|
+
{
|
|
186
|
+
"projectContext": "Updated project description if new info changes it, or null to keep existing",
|
|
187
|
+
"newKeyDecisions": [
|
|
188
|
+
{ "date": "YYYY-MM-DD", "decision": "Important architectural/design choice", "reason": "Why" }
|
|
189
|
+
],
|
|
190
|
+
"updateCurrentState": [
|
|
191
|
+
{ "topic": "Feature name", "status": "New or updated status" }
|
|
192
|
+
],
|
|
193
|
+
"newRecentWork": [
|
|
194
|
+
{ "date": "YYYY-MM-DD", "summary": "What was done" }
|
|
195
|
+
],
|
|
196
|
+
"promoteToCurrentState": ["indices of recentWork items to promote, e.g. 0, 1"],
|
|
197
|
+
"removeFromRecentWork": ["indices of recentWork items that are now stale"]
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
Rules:
|
|
201
|
+
- Only include fields that have updates (use empty arrays for no changes)
|
|
202
|
+
- Key decisions: ONLY strategic/architectural choices that affect project direction.
|
|
203
|
+
Record the "why" not the "how". Good: "Prune old entities to prevent unbounded growth".
|
|
204
|
+
Bad: "Default 30-day retention (entityExtraction.maxAgeDays: 30), configurable by user".
|
|
205
|
+
Implementation details (config keys, defaults, thresholds, parameter names) belong in currentState, not keyDecisions.
|
|
206
|
+
- Current state: features implemented, work in progress, known issues, implementation details
|
|
207
|
+
- Recent work: specific tasks completed in this batch of entries
|
|
208
|
+
- Merge similar entries, avoid duplicates
|
|
209
|
+
- Be concise — each item should be one clear sentence
|
|
210
|
+
- Output ONLY the JSON object`;
|
|
211
|
+
|
|
212
|
+
try {
|
|
213
|
+
ensureDeps();
|
|
214
|
+
const { query } = await import('@anthropic-ai/claude-agent-sdk');
|
|
215
|
+
|
|
216
|
+
async function* messageGenerator() {
|
|
217
|
+
yield {
|
|
218
|
+
type: 'user',
|
|
219
|
+
message: { role: 'user', content: prompt },
|
|
220
|
+
session_id: `memory-summarize-${Date.now()}`,
|
|
221
|
+
parent_tool_use_id: null,
|
|
222
|
+
isSynthetic: true
|
|
223
|
+
};
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
const queryResult = query({
|
|
227
|
+
prompt: messageGenerator(),
|
|
228
|
+
options: {
|
|
229
|
+
model: config.model,
|
|
230
|
+
disallowedTools: ['Bash', 'Read', 'Write', 'Edit', 'Grep', 'Glob', 'WebFetch', 'WebSearch', 'Task', 'TodoWrite'],
|
|
231
|
+
pathToClaudeCodeExecutable: config.claudePath
|
|
232
|
+
}
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
let response = '';
|
|
236
|
+
try {
|
|
237
|
+
for await (const message of queryResult) {
|
|
238
|
+
if (message.type === 'assistant') {
|
|
239
|
+
const content = message.message.content;
|
|
240
|
+
response = Array.isArray(content)
|
|
241
|
+
? content.filter(c => c.type === 'text').map(c => c.text).join('\n')
|
|
242
|
+
: typeof content === 'string' ? content : '';
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
} catch (iterError) {
|
|
246
|
+
if (!response) throw iterError;
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
if (response) {
|
|
250
|
+
const jsonMatch = response.match(/\{[\s\S]*\}/);
|
|
251
|
+
if (jsonMatch) {
|
|
252
|
+
return JSON.parse(jsonMatch[0]);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
} catch (error) {
|
|
256
|
+
console.error(`[claude-mneme] Summarization error: ${error.message}`);
|
|
257
|
+
logError(error, 'summarize');
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
return null;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
/**
|
|
264
|
+
* Apply incremental updates to existing summary
|
|
265
|
+
*/
|
|
266
|
+
function applyUpdates(existing, updates) {
|
|
267
|
+
const result = { ...existing };
|
|
268
|
+
|
|
269
|
+
// Update project context if provided
|
|
270
|
+
if (updates.projectContext) {
|
|
271
|
+
result.projectContext = updates.projectContext;
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
// Add new key decisions
|
|
275
|
+
if (updates.newKeyDecisions?.length > 0) {
|
|
276
|
+
result.keyDecisions = [...(result.keyDecisions || []), ...updates.newKeyDecisions];
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// Update current state (merge by topic)
|
|
280
|
+
if (updates.updateCurrentState?.length > 0) {
|
|
281
|
+
const stateMap = new Map((result.currentState || []).map(s => [s.topic, s]));
|
|
282
|
+
for (const update of updates.updateCurrentState) {
|
|
283
|
+
update.updatedAt = new Date().toISOString();
|
|
284
|
+
stateMap.set(update.topic, update);
|
|
285
|
+
}
|
|
286
|
+
result.currentState = Array.from(stateMap.values());
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
// Handle recentWork: remove stale, promote, add new
|
|
290
|
+
let recentWork = [...(result.recentWork || [])];
|
|
291
|
+
|
|
292
|
+
// Remove stale items (process in reverse to preserve indices)
|
|
293
|
+
if (updates.removeFromRecentWork?.length > 0) {
|
|
294
|
+
const toRemove = new Set(updates.removeFromRecentWork.map(Number));
|
|
295
|
+
recentWork = recentWork.filter((_, i) => !toRemove.has(i));
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
// Promote items to current state
|
|
299
|
+
if (updates.promoteToCurrentState?.length > 0) {
|
|
300
|
+
const toPromote = new Set(updates.promoteToCurrentState.map(Number));
|
|
301
|
+
for (const idx of toPromote) {
|
|
302
|
+
if (result.recentWork?.[idx]) {
|
|
303
|
+
const item = result.recentWork[idx];
|
|
304
|
+
// Add to current state as a completed item
|
|
305
|
+
result.currentState = result.currentState || [];
|
|
306
|
+
result.currentState.push({
|
|
307
|
+
topic: 'Completed',
|
|
308
|
+
status: item.summary,
|
|
309
|
+
updatedAt: new Date().toISOString()
|
|
310
|
+
});
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
recentWork = recentWork.filter((_, i) => !toPromote.has(i));
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
// Add new recent work
|
|
317
|
+
if (updates.newRecentWork?.length > 0) {
|
|
318
|
+
recentWork = [...recentWork, ...updates.newRecentWork];
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
// Keep only last 10 recent work items
|
|
322
|
+
result.recentWork = recentWork.slice(-10);
|
|
323
|
+
|
|
324
|
+
// Limit current state to 15 items (oldest get removed)
|
|
325
|
+
if (result.currentState?.length > 15) {
|
|
326
|
+
result.currentState = result.currentState.slice(-15);
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
// Limit key decisions to 10 (oldest get removed)
|
|
330
|
+
if (result.keyDecisions?.length > 10) {
|
|
331
|
+
result.keyDecisions = result.keyDecisions.slice(-10);
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
result.lastUpdated = new Date().toISOString();
|
|
335
|
+
|
|
336
|
+
return result;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// ============ Main execution ============
|
|
340
|
+
|
|
341
|
+
// Handle migration mode
|
|
342
|
+
if (migrateOnly) {
|
|
343
|
+
const migrated = await migrateMarkdownSummary();
|
|
344
|
+
if (migrated) {
|
|
345
|
+
writeFileSync(paths.summaryJson, JSON.stringify(migrated, null, 2) + '\n');
|
|
346
|
+
console.error(`[claude-mneme] Migration complete. Created summary.json for "${projectName}".`);
|
|
347
|
+
console.error('[claude-mneme] You can delete summary.md if the migration looks correct.');
|
|
348
|
+
}
|
|
349
|
+
process.exit(0);
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
// Check if log exists and has entries
|
|
353
|
+
if (!existsSync(paths.log)) {
|
|
354
|
+
process.exit(0);
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
const logContent = readFileSync(paths.log, 'utf-8').trim();
|
|
358
|
+
if (!logContent) {
|
|
359
|
+
process.exit(0);
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
const lines = logContent.split('\n').filter(l => l);
|
|
363
|
+
const entryCount = lines.length;
|
|
364
|
+
|
|
365
|
+
// Check if we need to summarize
|
|
366
|
+
if (entryCount < config.maxLogEntriesBeforeSummarize) {
|
|
367
|
+
process.exit(0);
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
// Lock file for concurrency control.
|
|
371
|
+
// When spawned by maybeSummarize(), the lock already exists (it created it).
|
|
372
|
+
// When called directly (pre-compact, manual), we create it here.
|
|
373
|
+
// Either way, refresh the timestamp and clean up in the finally block.
|
|
374
|
+
const lockFile = paths.log + '.lock';
|
|
375
|
+
writeFileSync(lockFile, Date.now().toString());
|
|
376
|
+
|
|
377
|
+
try {
|
|
378
|
+
// Check if we need to migrate first
|
|
379
|
+
let existingSummary = readStructuredSummary();
|
|
380
|
+
if (!existingSummary.lastUpdated && existsSync(paths.summary)) {
|
|
381
|
+
console.error(`[claude-mneme] Migrating existing summary.md to JSON format...`);
|
|
382
|
+
const migrated = await migrateMarkdownSummary();
|
|
383
|
+
if (migrated) {
|
|
384
|
+
existingSummary = migrated;
|
|
385
|
+
writeFileSync(paths.summaryJson, JSON.stringify(existingSummary, null, 2) + '\n');
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
// Calculate entries to summarize vs keep
|
|
390
|
+
const summarizeCount = entryCount - config.keepRecentEntries;
|
|
391
|
+
if (summarizeCount <= 0) {
|
|
392
|
+
process.exit(0);
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
const entriesToSummarize = lines.slice(0, summarizeCount);
|
|
396
|
+
const entriesToKeep = lines.slice(summarizeCount);
|
|
397
|
+
|
|
398
|
+
console.error(`[claude-mneme] Incrementally summarizing ${entriesToSummarize.length} entries for "${projectName}"...`);
|
|
399
|
+
|
|
400
|
+
// Run incremental summarization
|
|
401
|
+
const updates = await incrementalSummarize(existingSummary, entriesToSummarize);
|
|
402
|
+
|
|
403
|
+
if (updates) {
|
|
404
|
+
const newSummary = applyUpdates(existingSummary, updates);
|
|
405
|
+
writeFileSync(paths.summaryJson, JSON.stringify(newSummary, null, 2) + '\n');
|
|
406
|
+
|
|
407
|
+
// Re-read the log under write lock to prevent flushPendingLog from
|
|
408
|
+
// appending between our read and write (which would silently lose entries).
|
|
409
|
+
const logWriteLock = paths.log + '.wlock';
|
|
410
|
+
const truncateResult = withFileLock(logWriteLock, () => {
|
|
411
|
+
const currentLogContent = readFileSync(paths.log, 'utf-8').trim();
|
|
412
|
+
const currentLines = currentLogContent ? currentLogContent.split('\n').filter(l => l) : [];
|
|
413
|
+
const remainingLines = currentLines.slice(summarizeCount);
|
|
414
|
+
writeFileSync(paths.log, remainingLines.join('\n') + (remainingLines.length ? '\n' : ''));
|
|
415
|
+
return remainingLines.length;
|
|
416
|
+
}, 30);
|
|
417
|
+
|
|
418
|
+
const keptCount = truncateResult ?? 0;
|
|
419
|
+
console.error(`[claude-mneme] Summary updated. Kept ${keptCount} entries (${keptCount - entriesToKeep.length} arrived during summarization).`);
|
|
420
|
+
} else {
|
|
421
|
+
console.error('[claude-mneme] Summarization returned no updates, keeping log intact.');
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
} finally {
|
|
425
|
+
try { unlinkSync(lockFile); } catch {}
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
process.exit(0);
|