claude-mem-lite 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,165 @@
1
+ // claude-mem-lite: Unified LLM call wrapper
2
+ // Shared by memory (hook.mjs) and dispatch modules
3
+ // Auto-detects API key for direct calls, falls back to claude CLI
4
+ // Model configurable via CLAUDE_MEM_MODEL env var (default: haiku)
5
+
6
+ import { execFileSync } from 'child_process';
7
+ import { readFileSync } from 'fs';
8
+ import { join } from 'path';
9
+ import { debugLog, debugCatch, parseJsonFromLLM } from './utils.mjs';
10
+ import { DB_DIR } from './schema.mjs';
11
+
12
+ // ─── Model Resolution ────────────────────────────────────────────────────────
13
+
14
+ // CLI name → API model ID mapping
15
+ const MODEL_MAP = {
16
+ haiku: 'claude-haiku-4-5-20251001',
17
+ sonnet: 'claude-sonnet-4-5-20250929',
18
+ };
19
+
20
+ /**
21
+ * Resolve the LLM model to use for background calls.
22
+ * Reads CLAUDE_MEM_MODEL env var, defaults to 'haiku'.
23
+ * @returns {{ cli: string, api: string }} CLI name and API model ID
24
+ */
25
+ export function resolveModel() {
26
+ const raw = (process.env.CLAUDE_MEM_MODEL || 'haiku').toLowerCase().trim();
27
+ const cli = MODEL_MAP[raw] ? raw : 'haiku';
28
+ const api = MODEL_MAP[cli];
29
+ return { cli, api };
30
+ }
31
+
32
+ // ─── Mode Detection ──────────────────────────────────────────────────────────
33
+
34
+ let _mode = null;
35
+
36
+ /**
37
+ * Detect whether to use direct API or CLI for LLM calls.
38
+ * Cached after first call.
39
+ * @returns {'api'|'cli'} The detected mode
40
+ */
41
+ export function detectMode() {
42
+ if (_mode) return _mode;
43
+ _mode = process.env.ANTHROPIC_API_KEY ? 'api' : 'cli';
44
+ const { cli } = resolveModel();
45
+ debugLog('DEBUG', 'haiku-client', `mode: ${_mode}, model: ${cli}`);
46
+ return _mode;
47
+ }
48
+
49
+ /** Reset cached mode (for testing). */
50
+ export function _resetMode() { _mode = null; }
51
+
52
+ // ─── CLI Path ────────────────────────────────────────────────────────────────
53
+
54
+ export function getClaudePath() {
55
+ try {
56
+ const s = JSON.parse(readFileSync(join(DB_DIR, 'settings.json'), 'utf8'));
57
+ if (s.CLAUDE_CODE_PATH) return s.CLAUDE_CODE_PATH;
58
+ } catch {}
59
+ return process.env.CLAUDE_CODE_PATH || 'claude';
60
+ }
61
+
62
+ // ─── Core Call ───────────────────────────────────────────────────────────────
63
+
64
+ /**
65
+ * Call Haiku model with a prompt. Returns parsed text or null on failure.
66
+ * Uses direct API when ANTHROPIC_API_KEY is available, otherwise falls back to CLI.
67
+ * Never throws — returns null on any error.
68
+ *
69
+ * @param {string} prompt The prompt text
70
+ * @param {object} [opts] Options
71
+ * @param {number} [opts.timeout=10000] Timeout in milliseconds
72
+ * @param {number} [opts.maxTokens=500] Max tokens in response
73
+ * @returns {Promise<{text: string}|null>} Response or null on failure
74
+ */
75
+ export async function callHaiku(prompt, { timeout = 10000, maxTokens = 500 } = {}) {
76
+ if (!prompt) return null;
77
+
78
+ const mode = detectMode();
79
+
80
+ try {
81
+ if (mode === 'api') {
82
+ return await callHaikuAPI(prompt, { timeout, maxTokens });
83
+ }
84
+ return callHaikuCLI(prompt, { timeout });
85
+ } catch (e) {
86
+ debugCatch(e, 'callHaiku');
87
+ return null;
88
+ }
89
+ }
90
+
91
+ /**
92
+ * Call Haiku and parse JSON response. Convenience wrapper.
93
+ * @param {string} prompt The prompt text
94
+ * @param {object} [opts] Options passed to callHaiku
95
+ * @returns {Promise<object|null>} Parsed JSON or null
96
+ */
97
+ export async function callHaikuJSON(prompt, opts) {
98
+ const result = await callHaiku(prompt, opts);
99
+ if (!result?.text) return null;
100
+ return parseJsonFromLLM(result.text);
101
+ }
102
+
103
+ // ─── API Mode ────────────────────────────────────────────────────────────────
104
+
105
+ async function callHaikuAPI(prompt, { timeout, maxTokens }) {
106
+ const apiKey = process.env.ANTHROPIC_API_KEY;
107
+ if (!apiKey) return null;
108
+
109
+ const { api: modelId } = resolveModel();
110
+ const controller = new AbortController();
111
+ const timer = setTimeout(() => controller.abort(), timeout);
112
+
113
+ try {
114
+ const res = await fetch('https://api.anthropic.com/v1/messages', {
115
+ method: 'POST',
116
+ headers: {
117
+ 'Content-Type': 'application/json',
118
+ 'x-api-key': apiKey,
119
+ 'anthropic-version': '2023-06-01',
120
+ },
121
+ body: JSON.stringify({
122
+ model: modelId,
123
+ max_tokens: maxTokens,
124
+ messages: [{ role: 'user', content: prompt }],
125
+ }),
126
+ signal: controller.signal,
127
+ });
128
+
129
+ if (!res.ok) {
130
+ debugLog('WARN', 'haiku-api', `HTTP ${res.status}`);
131
+ return null;
132
+ }
133
+
134
+ const data = await res.json();
135
+ const text = data.content?.[0]?.text;
136
+ return text ? { text } : null;
137
+ } finally {
138
+ clearTimeout(timer);
139
+ }
140
+ }
141
+
142
+ // ─── CLI Mode ────────────────────────────────────────────────────────────────
143
+
144
+ function callHaikuCLI(prompt, { timeout }) {
145
+ const { cli: modelName } = resolveModel();
146
+ try {
147
+ const result = execFileSync(getClaudePath(), ['-p', '--model', modelName], {
148
+ input: prompt,
149
+ timeout,
150
+ encoding: 'utf8',
151
+ env: { ...process.env, CLAUDE_MEM_HOOK_RUNNING: '1' },
152
+ stdio: ['pipe', 'pipe', 'pipe'],
153
+ });
154
+ const text = result.trim();
155
+ return text ? { text } : null;
156
+ } catch (e) {
157
+ // Try to extract partial output on timeout — validate JSON before returning
158
+ const out = e.stdout?.toString?.()?.trim() || e.output?.[1]?.toString?.()?.trim();
159
+ if (out && out.startsWith('{') && out.endsWith('}')) {
160
+ try { JSON.parse(out); return { text: out }; } catch {}
161
+ }
162
+ debugCatch(e, 'haiku-cli');
163
+ return null;
164
+ }
165
+ }
@@ -0,0 +1,176 @@
1
+ // claude-mem-lite CLAUDE.md context injection and token budgeting
2
+ // Handles adaptive time windows, token-budgeted selection, and CLAUDE.md persistence
3
+
4
+ import { join } from 'path';
5
+ import { readFileSync, writeFileSync, renameSync } from 'fs';
6
+ import { estimateTokens, debugLog, debugCatch } from './utils.mjs';
7
+
8
+ /**
9
+ * Infer the project directory from environment variables or cwd.
10
+ * @returns {string} Absolute path to the project directory
11
+ */
12
+ function inferProjectDir() {
13
+ return process.env.CLAUDE_PROJECT_DIR || process.env.PWD || process.cwd();
14
+ }
15
+
16
+ /**
17
+ * Compute adaptive recall time windows based on project activity velocity.
18
+ * High activity -> shorter windows (recent data more relevant).
19
+ * Low activity -> longer windows (older data stays relevant).
20
+ * @param {object} db better-sqlite3 database handle
21
+ * @param {string} project Project name to check velocity for
22
+ * @returns {{tier1: number, tier2: number, tier3: number, sessWindow: number}} Time window durations in ms
23
+ */
24
+ export function computeAdaptiveWindows(db, project) {
25
+ const sevenDaysAgo = Date.now() - 7 * 86400000;
26
+ const row = db.prepare(`
27
+ SELECT COUNT(*) as c FROM observations
28
+ WHERE project = ? AND created_at_epoch > ? AND COALESCE(compressed_into, 0) = 0
29
+ `).get(project, sevenDaysAgo);
30
+ const velocity = (row?.c || 0) / 7; // observations per day
31
+
32
+ if (velocity > 10) {
33
+ // High velocity: tighter windows, focus on very recent
34
+ return { tier1: 12 * 3600000, tier2: 3 * 86400000, tier3: 14 * 86400000, sessWindow: 3 * 86400000 };
35
+ } else if (velocity >= 3) {
36
+ // Medium velocity: default windows
37
+ return { tier1: 24 * 3600000, tier2: 7 * 86400000, tier3: 30 * 86400000, sessWindow: 7 * 86400000 };
38
+ } else {
39
+ // Low velocity: wider windows, older data still relevant
40
+ return { tier1: 48 * 3600000, tier2: 14 * 86400000, tier3: 60 * 86400000, sessWindow: 14 * 86400000 };
41
+ }
42
+ }
43
+
44
+ /**
45
+ * Select observations and sessions within a token budget using greedy knapsack.
46
+ * Scores candidates by recency * importance, picks highest value-density first.
47
+ * @param {object} db better-sqlite3 database handle
48
+ * @param {string} project Project name
49
+ * @param {number} [budget=2000] Maximum token budget
50
+ * @returns {{observations: object[], summaries: object[], totalTokens: number}} Selected items
51
+ */
52
+ export function selectWithTokenBudget(db, project, budget = 2000) {
53
+ const now_ms = Date.now();
54
+ const windows = computeAdaptiveWindows(db, project);
55
+ const tier1Ago = now_ms - windows.tier1;
56
+ const tier2Ago = now_ms - windows.tier2;
57
+ const tier3Ago = now_ms - windows.tier3;
58
+
59
+ // Candidate pool: tiered time windows by importance (adaptive)
60
+ const obsPool = db.prepare(`
61
+ SELECT id, type, title, narrative, importance, created_at_epoch, files_modified
62
+ FROM observations
63
+ WHERE project = ? AND COALESCE(compressed_into, 0) = 0
64
+ AND (
65
+ (created_at_epoch > ? AND importance >= 1)
66
+ OR (created_at_epoch > ? AND importance >= 2)
67
+ OR (created_at_epoch > ? AND importance >= 3)
68
+ )
69
+ ORDER BY created_at_epoch DESC
70
+ LIMIT 50
71
+ `).all(project, tier1Ago, tier2Ago, tier3Ago);
72
+
73
+ const sessPool = db.prepare(`
74
+ SELECT id, request, completed, next_steps, created_at_epoch
75
+ FROM session_summaries
76
+ WHERE project = ? AND created_at_epoch > ?
77
+ ORDER BY created_at_epoch DESC
78
+ LIMIT 10
79
+ `).all(project, now_ms - windows.sessWindow);
80
+
81
+ const now = Date.now();
82
+ const selectedObs = [];
83
+ const selectedSess = [];
84
+ let totalTokens = 0;
85
+
86
+ // Score each candidate: value = recency * importance, cost = tokens
87
+ const scoredObs = obsPool.map(o => {
88
+ const ageDays = (now - o.created_at_epoch) / 86400000;
89
+ const recency = 1 / (1 + ageDays);
90
+ const impBoost = 0.5 + 0.5 * (o.importance || 1);
91
+ const value = recency * impBoost;
92
+ const cost = estimateTokens((o.title || '') + (o.narrative || ''));
93
+ return { ...o, value, cost, valueDensity: cost > 0 ? value / Math.sqrt(cost) : 0 };
94
+ });
95
+
96
+ const scoredSess = sessPool.map(s => {
97
+ const ageDays = (now - s.created_at_epoch) / 86400000;
98
+ const recency = 1 / (1 + ageDays);
99
+ const value = recency * 1.5; // Session summaries slightly boosted
100
+ const cost = estimateTokens((s.request || '') + (s.completed || '') + (s.next_steps || ''));
101
+ return { ...s, value, cost, valueDensity: cost > 0 ? value / Math.sqrt(cost) : 0 };
102
+ });
103
+
104
+ // Combine and sort by value density (greedy knapsack)
105
+ const allCandidates = [
106
+ ...scoredObs.map(o => ({ ...o, _kind: 'obs' })),
107
+ ...scoredSess.map(s => ({ ...s, _kind: 'sess' })),
108
+ ].sort((a, b) => b.valueDensity - a.valueDensity);
109
+
110
+ const selectedFiles = new Set();
111
+
112
+ for (const c of allCandidates) {
113
+ if (totalTokens + c.cost > budget) continue;
114
+
115
+ // Diversity penalty: reduce value for file overlap with already-selected
116
+ if (c._kind === 'obs' && c.files_modified) {
117
+ let cFiles;
118
+ try { cFiles = JSON.parse(c.files_modified || '[]'); } catch (e) { debugCatch(e, 'budgetSelect-parseFiles'); cFiles = []; }
119
+ if (cFiles.length > 0 && selectedFiles.size > 0) {
120
+ const overlap = cFiles.filter(f => selectedFiles.has(f)).length;
121
+ const overlapRatio = overlap / cFiles.length;
122
+ const penalizedValue = c.valueDensity * (1 - 0.3 * overlapRatio);
123
+ if (penalizedValue < 0.001) continue; // Skip if too redundant
124
+ }
125
+ for (const f of cFiles) selectedFiles.add(f);
126
+ }
127
+
128
+ totalTokens += c.cost;
129
+ if (c._kind === 'obs') {
130
+ selectedObs.push({ id: c.id, type: c.type, title: c.title, created_at: new Date(c.created_at_epoch).toISOString() });
131
+ } else {
132
+ selectedSess.push({ id: c.id, request: c.request, completed: c.completed, next_steps: c.next_steps, created_at: new Date(c.created_at_epoch).toISOString() });
133
+ }
134
+ }
135
+
136
+ return { observations: selectedObs, summaries: selectedSess, totalTokens };
137
+ }
138
+
139
+ /**
140
+ * Update the project's CLAUDE.md file with a context block.
141
+ * Replaces existing <claude-mem-context> section or appends a new one.
142
+ * Uses atomic tmp+rename write to prevent partial writes.
143
+ * @param {string} contextBlock Markdown content to inject
144
+ */
145
+ export function updateClaudeMd(contextBlock) {
146
+ const claudeMdPath = join(inferProjectDir(), 'CLAUDE.md');
147
+ let content = '';
148
+ try { content = readFileSync(claudeMdPath, 'utf8'); } catch {}
149
+
150
+ const startTag = '<claude-mem-context>';
151
+ const endTag = '</claude-mem-context>';
152
+ const hintComment = '<!-- claude-mem-lite: auto-updated context. To avoid git noise, add CLAUDE.md to .gitignore -->';
153
+ const newSection = `${startTag}\n${contextBlock}\n${endTag}`;
154
+
155
+ const startIdx = content.indexOf(startTag);
156
+ const endIdx = content.indexOf(endTag);
157
+
158
+ if (startIdx !== -1 && endIdx !== -1) {
159
+ // Replace existing section in-place — preserves surrounding content (including hint if present)
160
+ content = content.slice(0, startIdx) + newSection + content.slice(endIdx + endTag.length);
161
+ } else if (content.length > 0) {
162
+ // Append to end — never disturb existing CLAUDE.md structure
163
+ const hint = content.includes(hintComment) ? '' : hintComment + '\n';
164
+ content = content.trimEnd() + '\n\n' + hint + newSection + '\n';
165
+ } else {
166
+ content = hintComment + '\n' + newSection + '\n';
167
+ }
168
+
169
+ try {
170
+ const tmp = claudeMdPath + '.mem-tmp';
171
+ writeFileSync(tmp, content);
172
+ renameSync(tmp, claudeMdPath);
173
+ } catch (e) {
174
+ debugLog('ERROR', 'updateClaudeMd', `CLAUDE.md write failed: ${e.message}`);
175
+ }
176
+ }
@@ -0,0 +1,222 @@
1
+ // claude-mem-lite episode buffer management
2
+ // Handles file-based episode storage with advisory locking and pending entry recovery
3
+
4
+ import { join } from 'path';
5
+ import { readFileSync, writeFileSync, unlinkSync, readdirSync, openSync, closeSync, writeSync, renameSync, statSync, constants as fsConstants } from 'fs';
6
+ import { inferProject } from './utils.mjs';
7
+ import { RUNTIME_DIR } from './hook-shared.mjs';
8
+
9
+ /**
10
+ * Read episode file without locking (for signal handlers only).
11
+ * @returns {object|null} Parsed episode or null on failure
12
+ */
13
+ export function readEpisodeRaw() {
14
+ try {
15
+ return JSON.parse(readFileSync(join(RUNTIME_DIR, `ep-${inferProject()}.json`), 'utf8'));
16
+ } catch { return null; }
17
+ }
18
+
19
+ /**
20
+ * Get the path to the current project's episode buffer file.
21
+ * @returns {string} Absolute path to the episode JSON file
22
+ */
23
+ export function episodeFile() {
24
+ return join(RUNTIME_DIR, `ep-${inferProject()}.json`);
25
+ }
26
+
27
+ /**
28
+ * Get the path to the advisory lock file for episode operations.
29
+ * @returns {string} Absolute path to the lock file
30
+ */
31
+ export function lockFile() {
32
+ return episodeFile() + '.lock';
33
+ }
34
+
35
+ /**
36
+ * Acquire an advisory file lock for episode buffer operations.
37
+ * Uses atomic O_CREAT|O_EXCL for lock creation with stale lock detection.
38
+ * @param {number} [maxWaitMs=500] Maximum time to wait for the lock
39
+ * @returns {boolean} true if lock acquired, false on timeout
40
+ */
41
+ export function acquireLock(maxWaitMs = 500) {
42
+ const lf = lockFile();
43
+ const deadline = Date.now() + maxWaitMs;
44
+ while (Date.now() < deadline) {
45
+ try {
46
+ let fd;
47
+ try {
48
+ fd = openSync(lf, fsConstants.O_CREAT | fsConstants.O_EXCL | fsConstants.O_WRONLY);
49
+ const payload = JSON.stringify({ pid: process.pid, ts: Date.now() });
50
+ writeSync(fd, payload);
51
+ } finally {
52
+ if (fd !== undefined) closeSync(fd);
53
+ }
54
+ return true;
55
+ } catch {
56
+ // Lock exists — check if stale or orphaned
57
+ try {
58
+ const raw = readFileSync(lf, 'utf8');
59
+ const info = JSON.parse(raw);
60
+ const age = Date.now() - (info.ts || 0);
61
+ let stale = age > 30000; // >30s = stale
62
+ if (!stale && info.pid) {
63
+ try { process.kill(info.pid, 0); } catch (killErr) {
64
+ stale = killErr.code === 'ESRCH'; // Only stale if process truly gone
65
+ }
66
+ }
67
+ if (stale) { try { unlinkSync(lf); } catch {} continue; }
68
+ } catch {
69
+ // Can't read lock — check mtime
70
+ try {
71
+ const st = statSync(lf);
72
+ if (Date.now() - st.mtimeMs > 30000) { try { unlinkSync(lf); } catch {} continue; }
73
+ } catch {}
74
+ }
75
+ // WARNING: Atomics.wait blocks the main thread. This is intentional and safe here
76
+ // because hook.mjs runs as a short-lived subprocess (not the MCP server).
77
+ // Do NOT use this pattern in server.mjs or any long-lived event-driven process.
78
+ const wait = Math.ceil(Math.random() * 20);
79
+ Atomics.wait(new Int32Array(new SharedArrayBuffer(4)), 0, 0, wait);
80
+ }
81
+ }
82
+ return false;
83
+ }
84
+
85
+ /**
86
+ * Release the advisory file lock for episode buffer operations.
87
+ */
88
+ export function releaseLock() {
89
+ try { unlinkSync(lockFile()); } catch {}
90
+ }
91
+
92
+ /**
93
+ * Read the current episode buffer from disk (requires lock).
94
+ * @returns {object|null} Parsed episode or null if not found
95
+ */
96
+ export function readEpisode() {
97
+ try {
98
+ return JSON.parse(readFileSync(episodeFile(), 'utf8'));
99
+ } catch {
100
+ return null;
101
+ }
102
+ }
103
+
104
+ /**
105
+ * Atomically write episode buffer to disk using tmp+rename.
106
+ * @param {object} episode The episode object to persist
107
+ */
108
+ export function writeEpisode(episode) {
109
+ const target = episodeFile();
110
+ const tmp = target + '.tmp';
111
+ const { _fileSet, ...serializable } = episode;
112
+ writeFileSync(tmp, JSON.stringify(serializable));
113
+ try {
114
+ renameSync(tmp, target);
115
+ } catch (err) {
116
+ try { unlinkSync(tmp); } catch {}
117
+ throw err;
118
+ }
119
+ }
120
+
121
+ /**
122
+ * Create a new empty episode buffer.
123
+ * @param {string} sessionId The current session ID
124
+ * @param {string} project The current project name
125
+ * @returns {object} A fresh episode object
126
+ */
127
+ export function createEpisode(sessionId, project) {
128
+ return {
129
+ sessionId,
130
+ project,
131
+ startedAt: Date.now(),
132
+ lastAt: Date.now(),
133
+ files: [],
134
+ entries: [],
135
+ filesRead: [],
136
+ fileHistoryShown: [],
137
+ };
138
+ }
139
+
140
+ /**
141
+ * Add file paths to an episode's file tracking set (deduped).
142
+ * @param {object} episode The episode to update
143
+ * @param {string[]} files Array of file paths to add
144
+ */
145
+ export function addFileToEpisode(episode, files) {
146
+ if (!episode._fileSet) episode._fileSet = new Set(episode.files);
147
+ for (const f of files) {
148
+ if (!episode._fileSet.has(f)) {
149
+ episode._fileSet.add(f);
150
+ episode.files.push(f);
151
+ }
152
+ }
153
+ }
154
+
155
+ /**
156
+ * Write a pending entry to a recovery file when the episode lock cannot be acquired.
157
+ * @param {object} entry The episode entry to persist
158
+ * @param {string} sessionId The current session ID
159
+ * @param {string} project The current project name
160
+ */
161
+ export function writePendingEntry(entry, sessionId, project) {
162
+ const ts = Date.now();
163
+ const rand = Math.random().toString(36).slice(2, 6);
164
+ const pendingFile = join(RUNTIME_DIR, `pending-${ts}-${rand}.json`);
165
+ const tmp = pendingFile + '.tmp';
166
+ try {
167
+ writeFileSync(tmp, JSON.stringify({ entry, sessionId, project, ts }));
168
+ renameSync(tmp, pendingFile);
169
+ } catch {
170
+ try { unlinkSync(tmp); } catch {}
171
+ }
172
+ }
173
+
174
+ /**
175
+ * Merge pending recovery entries into the current episode buffer.
176
+ * Reads and removes pending-*.json files from the runtime directory.
177
+ * @param {object} episode The episode to merge entries into
178
+ */
179
+ export function mergePendingEntries(episode) {
180
+ const oneHourAgo = Date.now() - 3600000;
181
+ const MAX_PENDING_MERGE = 50;
182
+ let files;
183
+ try {
184
+ files = readdirSync(RUNTIME_DIR).filter(f => f.startsWith('pending-')).sort();
185
+ } catch { return; }
186
+
187
+ let merged = 0;
188
+ for (const f of files) {
189
+ if (merged >= MAX_PENDING_MERGE) break;
190
+ const fp = join(RUNTIME_DIR, f);
191
+ try {
192
+ const raw = readFileSync(fp, 'utf8');
193
+ const pending = JSON.parse(raw);
194
+ if (pending.ts < oneHourAgo) { try { unlinkSync(fp); } catch {} continue; }
195
+ // Only merge entries belonging to the same project
196
+ if (pending.project && episode.project && pending.project !== episode.project) continue;
197
+ unlinkSync(fp);
198
+ if (pending.entry) {
199
+ episode.entries.push(pending.entry);
200
+ episode.lastAt = Math.max(episode.lastAt, pending.entry.ts || pending.ts);
201
+ addFileToEpisode(episode, pending.entry.files || []);
202
+ merged++;
203
+ }
204
+ } catch {
205
+ // Corrupt pending file — remove
206
+ try { unlinkSync(fp); } catch {}
207
+ }
208
+ }
209
+ }
210
+
211
+ /**
212
+ * Check if an episode has significant content worth processing with LLM.
213
+ * Significant = contains file edits or Bash errors.
214
+ * @param {object} episode The episode to check
215
+ * @returns {boolean} true if the episode has significant content
216
+ */
217
+ export function episodeHasSignificantContent(episode) {
218
+ return episode.entries.some(e =>
219
+ ['Edit', 'Write', 'NotebookEdit'].includes(e.tool) ||
220
+ (e.tool === 'Bash' && e.isError)
221
+ );
222
+ }