claude-mem-lite 2.34.0 → 2.34.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,7 @@
10
10
  "plugins": [
11
11
  {
12
12
  "name": "claude-mem-lite",
13
- "version": "2.34.0",
13
+ "version": "2.34.1",
14
14
  "source": "./",
15
15
  "description": "Lightweight persistent memory system for Claude Code — FTS5 search, episode batching, error-triggered recall"
16
16
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-mem-lite",
3
- "version": "2.34.0",
3
+ "version": "2.34.1",
4
4
  "description": "Lightweight persistent memory system for Claude Code — FTS5 search, episode batching, error-triggered recall",
5
5
  "author": {
6
6
  "name": "sdsrss"
package/hook.mjs CHANGED
@@ -417,27 +417,35 @@ async function handleStop() {
417
417
  try { buildAndSaveHandoff(db, sessionId, project, 'exit', episodeSnapshot, ccSessionId || sessionId); }
418
418
  catch (e) { debugCatch(e, 'handleStop-handoff'); }
419
419
 
420
- // Fast summary baseline — ensures summary exists even if background LLM fails
420
+ // Fast summary baseline — ensures summary exists even if background LLM fails.
421
+ // T4-P2-B: guard against Stop firing twice for the same session (rare but possible;
422
+ // mirrors handleSessionStart line 795 hasSummary guard). Uses mem-internal sessionId
423
+ // as the WHERE key per the top-of-file dual-id invariant (#7789).
421
424
  try {
422
- const firstPrompt = db.prepare(`
423
- SELECT prompt_text FROM user_prompts
424
- WHERE content_session_id = ?
425
- ORDER BY prompt_number ASC LIMIT 1
426
- `).get(sessionId);
427
- const recentObs = db.prepare(`
428
- SELECT title FROM observations
429
- WHERE memory_session_id = ? AND COALESCE(compressed_into, 0) = 0
430
- ORDER BY created_at_epoch DESC LIMIT 5
431
- `).all(sessionId);
432
- const fastRequest = truncate(firstPrompt?.prompt_text || '', 200);
433
- const fastCompleted = recentObs.map(o => o.title).filter(Boolean).join('; ');
434
- if (fastRequest || fastCompleted) {
435
- const now = new Date();
436
- db.prepare(`
437
- INSERT INTO session_summaries
438
- (memory_session_id, project, request, investigated, learned, completed, next_steps, remaining_items, files_read, files_edited, notes, created_at, created_at_epoch)
439
- VALUES (?, ?, ?, '', '', ?, '', '', '[]', '[]', 'fast', ?, ?)
440
- `).run(sessionId, project, fastRequest, truncate(fastCompleted, 300), now.toISOString(), now.getTime());
425
+ const existingSummary = db.prepare(
426
+ 'SELECT 1 FROM session_summaries WHERE memory_session_id = ? LIMIT 1'
427
+ ).get(sessionId);
428
+ if (!existingSummary) {
429
+ const firstPrompt = db.prepare(`
430
+ SELECT prompt_text FROM user_prompts
431
+ WHERE content_session_id = ?
432
+ ORDER BY prompt_number ASC LIMIT 1
433
+ `).get(sessionId);
434
+ const recentObs = db.prepare(`
435
+ SELECT title FROM observations
436
+ WHERE memory_session_id = ? AND COALESCE(compressed_into, 0) = 0
437
+ ORDER BY created_at_epoch DESC LIMIT 5
438
+ `).all(sessionId);
439
+ const fastRequest = truncate(firstPrompt?.prompt_text || '', 200);
440
+ const fastCompleted = recentObs.map(o => o.title).filter(Boolean).join('; ');
441
+ if (fastRequest || fastCompleted) {
442
+ const now = new Date();
443
+ db.prepare(`
444
+ INSERT INTO session_summaries
445
+ (memory_session_id, project, request, investigated, learned, completed, next_steps, remaining_items, files_read, files_edited, notes, created_at, created_at_epoch)
446
+ VALUES (?, ?, ?, '', '', ?, '', '', '[]', '[]', 'fast', ?, ?)
447
+ `).run(sessionId, project, fastRequest, truncate(fastCompleted, 300), now.toISOString(), now.getTime());
448
+ }
441
449
  }
442
450
  } catch (e) { debugCatch(e, 'handleStop-fast-summary'); }
443
451
  } finally {
@@ -603,12 +611,14 @@ async function handleSessionStart() {
603
611
  const STALE_AGE = Date.now() - 30 * 86400000;
604
612
  const OP_CAP = 500;
605
613
 
606
- // Purge FIRST: delete entries already marked pending-purge from previous cycles (7-day retention)
607
- // Must run before decay/idle-mark to avoid same-cycle delete of newly-marked entries
614
+ // Purge FIRST: delete pending-purge entries. Schema has no marked_at_epoch, so we
615
+ // anchor retention on created_at_epoch instead: 30d marking gate + 7d grace = 37d.
616
+ // Older cutoffs (e.g. 7d) were always redundant with the 30d marking filter and
617
+ // made purge effectively immediate on the next maintenance cycle — fix for T4-P1-A.
608
618
  const purged = db.prepare(`
609
619
  DELETE FROM observations WHERE compressed_into = ${COMPRESSED_PENDING_PURGE}
610
620
  AND created_at_epoch < ?
611
- `).run(Date.now() - 7 * 86400000);
621
+ `).run(Date.now() - 37 * 86400000);
612
622
  if (purged.changes > 0) debugLog('DEBUG', 'auto-maintain', `purged ${purged.changes} stale observations`);
613
623
 
614
624
  // Cleanup: remove broken observations (no title AND no narrative)
@@ -906,9 +916,13 @@ async function handleUserPrompt() {
906
916
  VALUES (?, ?, ?, ?, ?, 'active')
907
917
  `).run(sessionId, sessionId, project, now.toISOString(), now.getTime());
908
918
 
909
- // Increment prompt counter
910
- db.prepare('UPDATE sdk_sessions SET prompt_counter = COALESCE(prompt_counter, 0) + 1 WHERE content_session_id = ?').run(sessionId);
911
- const counter = db.prepare('SELECT prompt_counter FROM sdk_sessions WHERE content_session_id = ?').get(sessionId);
919
+ // T4-P2-D: atomic increment+read via UPDATE ... RETURNING (SQLite 3.35+).
920
+ // Previously UPDATE + SELECT as two statements; parallel prompts could read a stale
921
+ // counter and emit duplicate prompt_number values. better-sqlite3 ships a modern SQLite.
922
+ const bumped = db.prepare(
923
+ 'UPDATE sdk_sessions SET prompt_counter = COALESCE(prompt_counter, 0) + 1 WHERE content_session_id = ? RETURNING prompt_counter'
924
+ ).get(sessionId);
925
+ const promptNumber = bumped?.prompt_counter || 1;
912
926
 
913
927
  db.prepare(`
914
928
  INSERT INTO user_prompts (content_session_id, prompt_text, prompt_number, created_at, created_at_epoch)
@@ -916,7 +930,7 @@ async function handleUserPrompt() {
916
930
  `).run(
917
931
  sessionId,
918
932
  scrubSecrets(promptText.slice(0, 10000)),
919
- counter?.prompt_counter || 1,
933
+ promptNumber,
920
934
  now.toISOString(), now.getTime()
921
935
  );
922
936
 
@@ -928,7 +942,7 @@ async function handleUserPrompt() {
928
942
  const ccSessionId = typeof hookData.session_id === 'string' && hookData.session_id.length > 0
929
943
  ? hookData.session_id
930
944
  : null;
931
- if (counter?.prompt_counter <= 3) {
945
+ if (promptNumber <= 3) {
932
946
  try {
933
947
  if (detectContinuationIntent(db, promptText, project, ccSessionId)) {
934
948
  const injection = renderHandoffInjection(db, project, ccSessionId);
package/mem-cli.mjs CHANGED
@@ -783,7 +783,7 @@ function cmdSave(db, args) {
783
783
  const { positional, flags } = parseArgs(args);
784
784
  const text = positional.join(' ');
785
785
  if (!text) {
786
- fail('[mem] Usage: mem save "<text>" [--type T] [--title T] [--importance N] [--project P] [--files f1,f2]');
786
+ fail('[mem] Usage: mem save "<text>" [--type T] [--title T] [--importance N] [--project P] [--files f1,f2] [--lesson T]');
787
787
  return;
788
788
  }
789
789
 
@@ -805,9 +805,21 @@ function cmdSave(db, args) {
805
805
  const project = flags.project ? resolveProject(db, flags.project) : inferProject();
806
806
  const saveFiles = flags.files ? flags.files.split(',').map(f => f.trim()).filter(Boolean) : [];
807
807
 
808
+ // Optional lesson_learned — accepts --lesson or --lesson-learned (alias)
809
+ // Mirrors MCP memSaveSchema.lesson_learned (≤500 chars) and cmdUpdate's flag handling.
810
+ const rawLesson = flags.lesson !== undefined ? flags.lesson
811
+ : flags['lesson-learned'] !== undefined ? flags['lesson-learned']
812
+ : null;
813
+ if (rawLesson !== null && typeof rawLesson === 'string' && rawLesson.length > 500) {
814
+ fail(`[mem] --lesson too long (${rawLesson.length} chars, max 500).`);
815
+ return;
816
+ }
817
+
808
818
  // Secret scrubbing (aligned with MCP mem_save)
809
819
  const safeContent = scrubSecrets(text);
810
820
  const safeTitle = scrubSecrets(rawTitle);
821
+ const safeLesson = (rawLesson !== null && typeof rawLesson === 'string' && rawLesson.length > 0)
822
+ ? scrubSecrets(rawLesson) : null;
811
823
 
812
824
  // Dedup: skip if similar title/content saved in last 5 minutes (aligned with MCP mem_save)
813
825
  const fiveMinAgo = Date.now() - 5 * 60 * 1000;
@@ -827,8 +839,11 @@ function cmdSave(db, args) {
827
839
  }
828
840
 
829
841
  // MinHash + CJK bigrams (aligned with MCP mem_save)
842
+ // Include lesson in the FTS-indexed text so the +0.3 lesson-boost actually surfaces
843
+ // lesson-bearing rows (mirrors MCP mem_save which builds the same indexText).
830
844
  const minhashSig = computeMinHash(safeTitle + ' ' + safeContent);
831
- const bigramText = cjkBigrams(safeTitle + ' ' + safeContent);
845
+ const indexText = [safeTitle, safeContent, safeLesson].filter(Boolean).join(' ');
846
+ const bigramText = cjkBigrams(indexText);
832
847
  const textField = bigramText ? safeContent + ' ' + bigramText : safeContent;
833
848
 
834
849
  const now = new Date();
@@ -843,9 +858,9 @@ function cmdSave(db, args) {
843
858
  // Atomic: insert observation + observation_files + TF-IDF vector (aligned with MCP mem_save)
844
859
  const saveTx = db.transaction(() => {
845
860
  const result = db.prepare(`
846
- INSERT INTO observations (memory_session_id, project, text, type, title, narrative, concepts, facts, files_read, files_modified, importance, minhash_sig, branch, created_at, created_at_epoch)
847
- VALUES (?, ?, ?, ?, ?, ?, '', '', '[]', ?, ?, ?, ?, ?, ?)
848
- `).run(sessionId, project, textField, type, safeTitle, safeContent, JSON.stringify(saveFiles), importance, minhashSig, getCurrentBranch(), now.toISOString(), now.getTime());
861
+ INSERT INTO observations (memory_session_id, project, text, type, title, narrative, concepts, facts, files_read, files_modified, importance, minhash_sig, lesson_learned, branch, created_at, created_at_epoch)
862
+ VALUES (?, ?, ?, ?, ?, ?, '', '', '[]', ?, ?, ?, ?, ?, ?, ?)
863
+ `).run(sessionId, project, textField, type, safeTitle, safeContent, JSON.stringify(saveFiles), importance, minhashSig, safeLesson, getCurrentBranch(), now.toISOString(), now.getTime());
849
864
  const savedId = Number(result.lastInsertRowid);
850
865
 
851
866
  // Populate observation_files junction table (aligned with MCP mem_save)
@@ -870,7 +885,8 @@ function cmdSave(db, args) {
870
885
  });
871
886
  const result = saveTx();
872
887
 
873
- out(`[mem] Saved #${result.lastInsertRowid} [${type}] "${truncate(safeTitle, 80)}" (project: ${project})`);
888
+ const lessonNote = safeLesson ? ' 💡lesson captured' : '';
889
+ out(`[mem] Saved #${result.lastInsertRowid} [${type}] "${truncate(safeTitle, 80)}" (project: ${project})${lessonNote}`);
874
890
  }
875
891
 
876
892
  // N-1: Quality-focused stats for R-2 A/B baseline.
@@ -1645,6 +1661,9 @@ function cmdMaintain(db, args) {
1645
1661
  const OP_CAP = 1000;
1646
1662
  const results = [];
1647
1663
 
1664
+ // T2-P1-B: surface the OP_CAP hit so users know to re-run, matching MCP mem_maintain.
1665
+ const capHint = (changes) => (changes >= OP_CAP ? ' (cap reached, re-run for more)' : '');
1666
+
1648
1667
  db.transaction(() => {
1649
1668
  if (ops.includes('cleanup')) {
1650
1669
  const deleted = db.prepare(`
@@ -1655,7 +1674,7 @@ function cmdMaintain(db, args) {
1655
1674
  ${projectFilter} LIMIT ${OP_CAP}
1656
1675
  )
1657
1676
  `).run(...baseParams);
1658
- results.push(`Cleaned up ${deleted.changes} broken observations`);
1677
+ results.push(`Cleaned up ${deleted.changes} broken observations${capHint(deleted.changes)}`);
1659
1678
  }
1660
1679
 
1661
1680
  if (ops.includes('decay')) {
@@ -1683,7 +1702,8 @@ function cmdMaintain(db, args) {
1683
1702
  ${projectFilter} LIMIT ${OP_CAP}
1684
1703
  )
1685
1704
  `).run(staleAge, ...baseParams);
1686
- results.push(`Decayed ${decayed.changes} stale observations, marked ${idleMarked.changes} idle as pending-purge`);
1705
+ const decayCap = (decayed.changes >= OP_CAP || idleMarked.changes >= OP_CAP) ? ' (cap reached, re-run for more)' : '';
1706
+ results.push(`Decayed ${decayed.changes} stale observations, marked ${idleMarked.changes} idle as pending-purge${decayCap}`);
1687
1707
  }
1688
1708
 
1689
1709
  if (ops.includes('boost')) {
@@ -1697,7 +1717,7 @@ function cmdMaintain(db, args) {
1697
1717
  ${projectFilter} LIMIT ${OP_CAP}
1698
1718
  )
1699
1719
  `).run(...baseParams);
1700
- results.push(`Boosted ${boosted.changes} frequently-accessed observations`);
1720
+ results.push(`Boosted ${boosted.changes} frequently-accessed observations${capHint(boosted.changes)}`);
1701
1721
  }
1702
1722
 
1703
1723
  if (ops.includes('dedup') && flags['merge-ids']) {
@@ -1715,17 +1735,41 @@ function cmdMaintain(db, args) {
1715
1735
  results.push(`Merged ${totalMerged} duplicate observations`);
1716
1736
  }
1717
1737
 
1738
+ // T2-P1-B parity with MCP: warn when merge-ids is provided but dedup wasn't requested.
1739
+ if (!ops.includes('dedup') && flags['merge-ids']) {
1740
+ results.push('Warning: --merge-ids provided but "dedup" not in operations — merge-ids ignored');
1741
+ }
1742
+
1718
1743
  if (ops.includes('purge_stale')) {
1719
1744
  const retainDays = parseInt(flags['retain-days'], 10) || 30;
1720
1745
  const retainCutoff = Date.now() - retainDays * 86400000;
1721
- const purged = db.prepare(`
1722
- DELETE FROM observations WHERE id IN (
1723
- SELECT id FROM observations
1724
- WHERE compressed_into = ${COMPRESSED_PENDING_PURGE} AND created_at_epoch < ?
1725
- ${projectFilter} LIMIT ${OP_CAP}
1726
- )
1727
- `).run(retainCutoff, ...baseParams);
1728
- results.push(`Purged ${purged.changes} stale observations`);
1746
+ // T2-P0-A (CLI parity): purge_stale is the only DELETE in this code path — require
1747
+ // --confirm so a mis-typed `maintain execute --ops purge_stale` can't wipe rows silently.
1748
+ const confirmed = flags.confirm === true || flags.confirm === 'true';
1749
+ if (!confirmed) {
1750
+ const previewRow = db.prepare(`
1751
+ SELECT COUNT(*) AS candidates, MIN(created_at_epoch) AS oldest, MAX(created_at_epoch) AS newest
1752
+ FROM observations
1753
+ WHERE compressed_into = ${COMPRESSED_PENDING_PURGE} AND created_at_epoch < ? ${projectFilter}
1754
+ `).get(retainCutoff, ...baseParams);
1755
+ const pushLines = [`purge_stale preview (no --confirm):`,
1756
+ ` Candidates (pending-purge, older than ${retainDays}d): ${previewRow.candidates}`];
1757
+ if (previewRow.candidates > 0) {
1758
+ pushLines.push(` Oldest: ${new Date(previewRow.oldest).toISOString().slice(0, 10)}`);
1759
+ pushLines.push(` Newest: ${new Date(previewRow.newest).toISOString().slice(0, 10)}`);
1760
+ }
1761
+ pushLines.push(` To delete, re-run with --confirm.`);
1762
+ results.push(pushLines.join('\n'));
1763
+ } else {
1764
+ const purged = db.prepare(`
1765
+ DELETE FROM observations WHERE id IN (
1766
+ SELECT id FROM observations
1767
+ WHERE compressed_into = ${COMPRESSED_PENDING_PURGE} AND created_at_epoch < ?
1768
+ ${projectFilter} LIMIT ${OP_CAP}
1769
+ )
1770
+ `).run(retainCutoff, ...baseParams);
1771
+ results.push(`Purged ${purged.changes} stale observations (retained last ${retainDays} days)${capHint(purged.changes)}`);
1772
+ }
1729
1773
  }
1730
1774
  })();
1731
1775
 
@@ -1993,6 +2037,7 @@ Commands:
1993
2037
  --importance N 1-3 (default: 2)
1994
2038
  --project P Project name
1995
2039
  --files f1,f2 Comma-separated file paths
2040
+ --lesson T Lesson learned (≤500 chars; alias: --lesson-learned)
1996
2041
 
1997
2042
  delete <id1,id2,...> Delete observations by ID
1998
2043
  --confirm Execute deletion (preview by default)
@@ -2180,10 +2225,32 @@ async function cmdEnrich(argv) {
2180
2225
  async function cmdOptimize(db, args) {
2181
2226
  const run = args.includes('--run');
2182
2227
  const runAll = args.includes('--run-all');
2228
+ // T2-P1-D: --task accepts a single task or a comma-separated list, parity with MCP memOptimizeSchema.tasks.
2229
+ const VALID_TASKS = ['re-enrich', 'normalize', 'cluster-merge', 'smart-compress'];
2183
2230
  const taskIdx = args.indexOf('--task');
2184
- const tasks = taskIdx >= 0 && args[taskIdx + 1] ? [args[taskIdx + 1]] : undefined;
2231
+ let tasks;
2232
+ if (taskIdx >= 0 && args[taskIdx + 1]) {
2233
+ const parsed = args[taskIdx + 1].split(',').map(s => s.trim()).filter(Boolean);
2234
+ const invalid = parsed.filter(t => !VALID_TASKS.includes(t));
2235
+ if (invalid.length > 0) {
2236
+ fail(`[mem] Unknown task(s): ${invalid.join(', ')}. Valid: ${VALID_TASKS.join(', ')}`);
2237
+ return;
2238
+ }
2239
+ tasks = parsed;
2240
+ }
2241
+ // T2-P1-C: reject --max 0 / --max <non-positive> / --max <non-number> explicitly — the old
2242
+ // `|| 15` fallback silently turned these into the default (15), burning LLM tokens.
2185
2243
  const maxIdx = args.indexOf('--max');
2186
- const maxItems = maxIdx >= 0 ? parseInt(args[maxIdx + 1], 10) || 15 : 15;
2244
+ let maxItems = 15;
2245
+ if (maxIdx >= 0) {
2246
+ const raw = args[maxIdx + 1];
2247
+ const parsed = parseInt(raw, 10);
2248
+ if (!Number.isFinite(parsed) || parsed < 1 || parsed > 100) {
2249
+ fail(`[mem] Invalid --max "${raw}". Must be an integer between 1 and 100.`);
2250
+ return;
2251
+ }
2252
+ maxItems = parsed;
2253
+ }
2187
2254
  // R-7 micro: --scope wide targets bugfix/refactor/feature/decision with narrative but no
2188
2255
  // lesson_learned (the "Haiku judged 'none'" cases). Default 'narrow' preserves old behavior.
2189
2256
  const scopeIdx = args.indexOf('--scope');
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-mem-lite",
3
- "version": "2.34.0",
3
+ "version": "2.34.1",
4
4
  "description": "Lightweight persistent memory system for Claude Code",
5
5
  "type": "module",
6
6
  "engines": {
@@ -66,13 +66,24 @@ try {
66
66
 
67
67
  // Read and output
68
68
  const content = readFileSync(skillPath, 'utf8');
69
- // Token budget: ~4 chars per token, 4000 token limit = 16000 chars
69
+ // T4-P1-B: JSON hookSpecificOutput parity with pre-tool-recall.js. Some CC variants
70
+ // (notably sdscc) silently drop plain-text stdout from PreToolUse — the previous
71
+ // console.log() form would render on stock CC but no-op on those variants.
72
+ // Token budget: ~4 chars per token, 4000 token limit = 16000 chars.
73
+ let additionalContext;
70
74
  if (content.length > 16000) {
71
75
  const summary = content.slice(0, 800);
72
- console.log(`<skill-bridge name="${row.name}" source="managed" truncated="true">\n${summary}\n...\n</skill-bridge>\n\nSkill content truncated. Use mem_use(name="${row.name}") to load full content.`);
76
+ additionalContext = `<skill-bridge name="${row.name}" source="managed" truncated="true">\n${summary}\n...\n</skill-bridge>\n\nSkill content truncated. Use mem_use(name="${row.name}") to load full content.`;
73
77
  } else {
74
- console.log(`<skill-bridge name="${row.name}" source="managed">\n${content}\n</skill-bridge>\n\nThis skill was loaded from the managed registry. Follow the instructions above.`);
78
+ additionalContext = `<skill-bridge name="${row.name}" source="managed">\n${content}\n</skill-bridge>\n\nThis skill was loaded from the managed registry. Follow the instructions above.`;
75
79
  }
80
+ process.stdout.write(JSON.stringify({
81
+ suppressOutput: true,
82
+ hookSpecificOutput: {
83
+ hookEventName: 'PreToolUse',
84
+ additionalContext,
85
+ },
86
+ }));
76
87
  } catch {
77
88
  // Silent failure — never block Skill tool
78
89
  } finally {
package/server.mjs CHANGED
@@ -159,7 +159,7 @@ function buildObsFtsQuery(scoring, { multiplier, withSnippet, withOffset, includ
159
159
  const mult = multiplier ? ` * ${multiplier}` : '';
160
160
  const lowSignalClause = includeNoise ? '' : `AND ${notLowSignalTitleClause('o')}`;
161
161
  return `
162
- SELECT o.id, o.type, o.title, o.subtitle, o.project, o.created_at, o.importance,
162
+ SELECT o.id, o.type, o.title, o.subtitle, o.project, o.created_at, o.created_at_epoch, o.importance,
163
163
  o.files_modified,
164
164
  ${withSnippet ? "snippet(observations_fts, 2, '»', '«', '…', 10) as match_snippet," : ''}
165
165
  ${scoreExpr}${mult} as score
@@ -201,7 +201,8 @@ function buildObsFtsParams({ now, projectBoost, ftsQuery, args, epochFrom, epoch
201
201
  function ftsRowToResult(r, { scoreMultiplier, snippet } = {}) {
202
202
  return {
203
203
  source: 'obs', id: r.id, type: r.type, title: r.title, subtitle: r.subtitle,
204
- project: r.project, date: r.created_at, score: scoreMultiplier ? r.score * scoreMultiplier : r.score,
204
+ project: r.project, date: r.created_at, created_at_epoch: r.created_at_epoch,
205
+ score: scoreMultiplier ? r.score * scoreMultiplier : r.score,
205
206
  files_modified: r.files_modified, importance: r.importance, snippet: snippet ? (r.match_snippet || '') : '',
206
207
  };
207
208
  }
@@ -312,7 +313,7 @@ function searchObservations(ctx) {
312
313
  LIMIT ? OFFSET ?
313
314
  `).all(...params);
314
315
  for (const r of rows) {
315
- results.push({ source: 'obs', id: r.id, type: r.type, title: r.title, subtitle: r.subtitle, project: r.project, date: r.created_at, dateEpoch: r.created_at_epoch });
316
+ results.push({ source: 'obs', id: r.id, type: r.type, title: r.title, subtitle: r.subtitle, project: r.project, date: r.created_at, created_at_epoch: r.created_at_epoch, files_modified: r.files_modified, importance: r.importance });
316
317
  }
317
318
  }
318
319
 
@@ -371,7 +372,7 @@ function searchSessions(ctx) {
371
372
  const now = Date.now();
372
373
  const sessionProjectBoost = args.project ? null : currentProject;
373
374
  const rows = db.prepare(`
374
- SELECT s.id, s.request, s.completed, s.project, s.created_at,
375
+ SELECT s.id, s.request, s.completed, s.project, s.created_at, s.created_at_epoch,
375
376
  ${SESS_BM25}
376
377
  * (1.0 + EXP(-0.693 * (? - s.created_at_epoch) / ${RECENCY_HALF_LIFE_MS}.0))
377
378
  * (CASE WHEN ? IS NOT NULL AND s.project = ? THEN 2.0 ELSE 1.0 END) as score
@@ -393,7 +394,7 @@ function searchSessions(ctx) {
393
394
  perSourceLimit, perSourceOffset
394
395
  );
395
396
  for (const r of rows) {
396
- results.push({ source: 'session', id: r.id, request: r.request, completed: r.completed, project: r.project, date: r.created_at, score: r.score });
397
+ results.push({ source: 'session', id: r.id, request: r.request, completed: r.completed, project: r.project, date: r.created_at, created_at_epoch: r.created_at_epoch, score: r.score });
397
398
  }
398
399
  } else if (!searchType) {
399
400
  // Skip sessions in unfiltered no-query mode (too noisy)
@@ -412,7 +413,7 @@ function searchSessions(ctx) {
412
413
  LIMIT ? OFFSET ?
413
414
  `).all(...params);
414
415
  for (const r of rows) {
415
- results.push({ source: 'session', id: r.id, request: r.request, completed: r.completed, project: r.project, date: r.created_at, dateEpoch: r.created_at_epoch });
416
+ results.push({ source: 'session', id: r.id, request: r.request, completed: r.completed, project: r.project, date: r.created_at, created_at_epoch: r.created_at_epoch });
416
417
  }
417
418
  }
418
419
 
@@ -425,7 +426,7 @@ function searchPrompts(ctx) {
425
426
 
426
427
  if (ftsQuery) {
427
428
  const rows = db.prepare(`
428
- SELECT p.id, p.prompt_text, p.content_session_id, p.created_at,
429
+ SELECT p.id, p.prompt_text, p.content_session_id, p.created_at, p.created_at_epoch,
429
430
  bm25(user_prompts_fts, 1) as score
430
431
  FROM user_prompts_fts
431
432
  JOIN user_prompts p ON user_prompts_fts.rowid = p.id
@@ -445,7 +446,7 @@ function searchPrompts(ctx) {
445
446
  perSourceLimit, perSourceOffset
446
447
  );
447
448
  for (const r of rows) {
448
- results.push({ source: 'prompt', id: r.id, text: r.prompt_text, session: r.content_session_id, date: r.created_at, score: r.score });
449
+ results.push({ source: 'prompt', id: r.id, text: r.prompt_text, session: r.content_session_id, date: r.created_at, created_at_epoch: r.created_at_epoch, score: r.score });
449
450
  }
450
451
  // CJK LIKE fallback: FTS5 unicode61 can't tokenize CJK substrings in prompts
451
452
  if (rows.length === 0 && args.query) {
@@ -454,7 +455,7 @@ function searchPrompts(ctx) {
454
455
  const likeConds = cjkPatterns.map(() => 'p.prompt_text LIKE ?');
455
456
  const likeParams = cjkPatterns.map(p => `%${p}%`);
456
457
  const fallbackRows = db.prepare(`
457
- SELECT p.id, p.prompt_text, p.content_session_id, p.created_at
458
+ SELECT p.id, p.prompt_text, p.content_session_id, p.created_at, p.created_at_epoch
458
459
  FROM user_prompts p
459
460
  JOIN sdk_sessions s ON p.content_session_id = s.content_session_id
460
461
  WHERE (${likeConds.join(' OR ')})
@@ -472,7 +473,7 @@ function searchPrompts(ctx) {
472
473
  perSourceLimit, perSourceOffset
473
474
  );
474
475
  for (const r of fallbackRows) {
475
- results.push({ source: 'prompt', id: r.id, text: r.prompt_text, session: r.content_session_id, date: r.created_at, score: 0 });
476
+ results.push({ source: 'prompt', id: r.id, text: r.prompt_text, session: r.content_session_id, date: r.created_at, created_at_epoch: r.created_at_epoch, score: 0 });
476
477
  }
477
478
  }
478
479
  }
@@ -493,7 +494,7 @@ function searchPrompts(ctx) {
493
494
  LIMIT ? OFFSET ?
494
495
  `).all(...params);
495
496
  for (const r of rows) {
496
- results.push({ source: 'prompt', id: r.id, text: r.prompt_text, session: r.content_session_id, date: r.created_at, dateEpoch: r.created_at_epoch });
497
+ results.push({ source: 'prompt', id: r.id, text: r.prompt_text, session: r.content_session_id, date: r.created_at, created_at_epoch: r.created_at_epoch });
497
498
  }
498
499
  }
499
500
 
@@ -522,7 +523,10 @@ function formatSearchOutput(paginatedResults, args, ftsQuery, totalCount, isCros
522
523
  ? `${paginatedResults.length} of ${totalCount}`
523
524
  : `${paginatedResults.length}`;
524
525
  const hasMixed = paginatedResults.some(r => r.source === 'session' || r.source === 'prompt');
525
- lines.push(`Found ${countLabel} result(s)${args.query ? ` for "${args.query}"` : ''}:${hasMixed ? ' (# observation, S# session, P# prompt)' : ''}\n`);
526
+ // P2-6: empty/omitted query falls through to a "listing recent" path label it explicitly
527
+ // so callers don't mistake BM25-less results for relevance-ranked ones.
528
+ const qLabel = args.query ? ` for "${args.query}"` : ' (no query — listing recent)';
529
+ lines.push(`Found ${countLabel} result(s)${qLabel}:${hasMixed ? ' (# observation, S# session, P# prompt)' : ''}\n`);
526
530
 
527
531
  for (const r of paginatedResults) {
528
532
  if (r.source === 'obs') {
@@ -627,7 +631,7 @@ server.registerTool(
627
631
  if (ftsQuery) {
628
632
  results.sort((a, b) => (a.score ?? 0) - (b.score ?? 0));
629
633
  } else {
630
- results.sort((a, b) => (b.dateEpoch ?? 0) - (a.dateEpoch ?? 0));
634
+ results.sort((a, b) => (b.created_at_epoch ?? 0) - (a.created_at_epoch ?? 0));
631
635
  }
632
636
  }
633
637
 
@@ -832,15 +836,17 @@ server.registerTool(
832
836
  const source = args.source || 'obs';
833
837
  const placeholders = args.ids.map(() => '?').join(',');
834
838
 
835
- let rows, allFields, prefix;
839
+ let rows, allFields, prefix, sourceLabel;
836
840
  if (source === 'session') {
837
841
  rows = db.prepare(`SELECT * FROM session_summaries WHERE id IN (${placeholders}) ORDER BY created_at_epoch ASC`).all(...args.ids);
838
842
  allFields = ['id', 'request', 'investigated', 'learned', 'completed', 'next_steps', 'files_read', 'files_edited', 'notes', 'project', 'created_at', 'memory_session_id', 'prompt_number'];
839
843
  prefix = 'S#';
844
+ sourceLabel = 'sessions';
840
845
  } else if (source === 'prompt') {
841
846
  rows = db.prepare(`SELECT * FROM user_prompts WHERE id IN (${placeholders}) ORDER BY created_at_epoch ASC`).all(...args.ids);
842
847
  allFields = ['id', 'prompt_text', 'content_session_id', 'prompt_number', 'created_at'];
843
848
  prefix = 'P#';
849
+ sourceLabel = 'prompts';
844
850
  } else {
845
851
  // Increment access_count for retrieved observations (batch UPDATE)
846
852
  try {
@@ -852,15 +858,43 @@ server.registerTool(
852
858
  rows = db.prepare(`SELECT * FROM observations WHERE id IN (${placeholders}) ORDER BY created_at_epoch ASC`).all(...args.ids);
853
859
  allFields = ['id', 'type', 'title', 'subtitle', 'narrative', 'text', 'facts', 'concepts', 'lesson_learned', 'search_aliases', 'files_read', 'files_modified', 'project', 'created_at', 'memory_session_id', 'prompt_number', 'importance', 'related_ids', 'access_count', 'branch', 'superseded_at', 'superseded_by', 'last_accessed_at'];
854
860
  prefix = '#';
861
+ sourceLabel = 'observations';
862
+ }
863
+
864
+ // P1-3: validate requested fields — throw on all-invalid so callers don't silently get an
865
+ // empty record (header only). Partial-invalid is tolerated but surfaced as a note.
866
+ let fieldsNote = '';
867
+ if (args.fields?.length) {
868
+ const invalid = args.fields.filter(f => !allFields.includes(f));
869
+ const valid = args.fields.filter(f => allFields.includes(f));
870
+ if (valid.length === 0) {
871
+ throw new Error(`No valid fields. Unknown field(s): ${invalid.join(', ')}. Valid: ${allFields.join(', ')}`);
872
+ }
873
+ if (invalid.length > 0) {
874
+ fieldsNote = `Note: unknown field(s) dropped: ${invalid.join(', ')}. Valid: ${allFields.join(', ')}`;
875
+ }
855
876
  }
856
877
 
857
878
  if (rows.length === 0) {
858
- return { content: [{ type: 'text', text: `No ${source === 'session' ? 'sessions' : source === 'prompt' ? 'prompts' : 'observations'} found for given IDs.` }] };
879
+ // P2-7: for source=session/prompt, check whether the IDs exist as observations so the
880
+ // caller can switch source instead of chasing a phantom miss.
881
+ let hint = '';
882
+ if (source === 'session' || source === 'prompt') {
883
+ try {
884
+ const obsHits = db.prepare(`SELECT id FROM observations WHERE id IN (${placeholders})`).all(...args.ids);
885
+ if (obsHits.length > 0) {
886
+ hint = ` These ID(s) exist as observations: ${obsHits.map(r => r.id).join(', ')}. Try source='obs'.`;
887
+ }
888
+ } catch { /* best-effort hint */ }
889
+ }
890
+ const msg = `No ${sourceLabel} found for given IDs.${hint}`;
891
+ return { content: [{ type: 'text', text: fieldsNote ? `${msg}\n\n${fieldsNote}` : msg }] };
859
892
  }
860
893
 
861
894
  const fields = args.fields?.length ? args.fields.filter(f => allFields.includes(f)) : allFields;
862
895
 
863
896
  const parts = [];
897
+ if (fieldsNote) parts.push(fieldsNote);
864
898
  for (const row of rows) {
865
899
  const lines = [`── ${prefix}${row.id} ──`];
866
900
  for (const f of fields) {
@@ -875,6 +909,13 @@ server.registerTool(
875
909
  parts.push(lines.join('\n'));
876
910
  }
877
911
 
912
+ // P1-4: surface IDs that weren't found (mirrors mem_delete's missing-ID note).
913
+ const foundIds = new Set(rows.map(r => r.id));
914
+ const missing = args.ids.filter(id => !foundIds.has(id));
915
+ if (missing.length > 0) {
916
+ parts.push(`Note: ID(s) ${missing.join(', ')} not found.`);
917
+ }
918
+
878
919
  return { content: [{ type: 'text', text: parts.join('\n\n') }] };
879
920
  })
880
921
  );
@@ -1366,11 +1407,43 @@ server.registerTool(
1366
1407
  }
1367
1408
 
1368
1409
  if (action === 'execute') {
1369
- const ops = args.operations || ['cleanup', 'decay', 'boost'];
1410
+ const ops = args.operations && args.operations.length > 0
1411
+ ? args.operations
1412
+ : ['cleanup', 'decay', 'boost'];
1413
+ // T2-P1-A: reject explicit empty array (vs. omitted → defaults above). Empty-array
1414
+ // callers are almost always mistakes; silently running only FTS5 optimize hides the error.
1415
+ if (args.operations && args.operations.length === 0) {
1416
+ return { content: [{ type: 'text', text: 'operations array is empty. Pass a non-empty list (e.g. ["cleanup","decay","boost"]) or omit operations to use the default set.' }], isError: true };
1417
+ }
1370
1418
  const results = [];
1371
1419
  const staleAge = Date.now() - STALE_AGE_MS;
1372
1420
  const OP_ROW_CAP = 1000; // safety cap per operation
1373
1421
 
1422
+ // T2-P0-A: purge_stale is the only DELETE in this handler. Require confirm=true;
1423
+ // a first call without confirm returns a dry-run preview so callers know the blast radius.
1424
+ const purgeRequested = ops.includes('purge_stale');
1425
+ if (purgeRequested && args.confirm !== true) {
1426
+ const retainDays = args.retain_days ?? 30;
1427
+ const retainCutoff = Date.now() - retainDays * 86400000;
1428
+ const previewRow = db.prepare(`
1429
+ SELECT COUNT(*) AS candidates, MIN(created_at_epoch) AS oldest, MAX(created_at_epoch) AS newest
1430
+ FROM observations
1431
+ WHERE compressed_into = ${COMPRESSED_PENDING_PURGE} AND created_at_epoch < ? ${projectFilter}
1432
+ `).get(retainCutoff, ...baseParams);
1433
+ const lines = [
1434
+ 'purge_stale preview (confirm=false):',
1435
+ ` Candidates (pending-purge, older than ${retainDays}d): ${previewRow.candidates}`,
1436
+ ];
1437
+ if (previewRow.candidates > 0) {
1438
+ lines.push(` Oldest: ${new Date(previewRow.oldest).toISOString().slice(0, 10)}`);
1439
+ lines.push(` Newest: ${new Date(previewRow.newest).toISOString().slice(0, 10)}`);
1440
+ }
1441
+ lines.push('');
1442
+ lines.push('Nothing was deleted. To execute, re-run with confirm=true:');
1443
+ lines.push(` mem_maintain(action="execute", operations=${JSON.stringify(ops)}, confirm=true${args.retain_days ? `, retain_days=${args.retain_days}` : ''}${args.project ? `, project="${args.project}"` : ''})`);
1444
+ return { content: [{ type: 'text', text: lines.join('\n') }] };
1445
+ }
1446
+
1374
1447
  db.transaction(() => {
1375
1448
  if (ops.includes('cleanup')) {
1376
1449
  const deleted = db.prepare(`
@@ -1541,6 +1614,9 @@ server.registerTool(
1541
1614
  tasks: args.tasks,
1542
1615
  maxItems: args.max_items || 15,
1543
1616
  force,
1617
+ // T2-P0-B: scope parity with CLI (--scope wide). When omitted, optimizeRun defaults
1618
+ // to narrow via its own code; passing through keeps that fallback intact.
1619
+ reenrichScope: args.scope,
1544
1620
  });
1545
1621
 
1546
1622
  const lines = ['🔧 LLM Optimization Results:'];
@@ -1626,15 +1702,18 @@ server.registerTool(
1626
1702
  const typeFilter = args.type;
1627
1703
  const where = typeFilter ? 'WHERE type = ? AND status = ?' : 'WHERE status = ?';
1628
1704
  const params = typeFilter ? [typeFilter, 'active'] : ['active'];
1705
+ // T3-P2-A: order by adoption then recommendation (CLI parity), and coalesce NULL counts
1706
+ // so the output shows "adopt:0" rather than the jarring "adopt:null".
1629
1707
  const resources = rdb.prepare(`
1630
1708
  SELECT name, type, invocation_name, recommend_count, adopt_count, capability_summary
1631
- FROM resources ${where} ORDER BY type, name
1709
+ FROM resources ${where}
1710
+ ORDER BY COALESCE(adopt_count, 0) DESC, COALESCE(recommend_count, 0) DESC, type, name
1632
1711
  `).all(...params);
1633
1712
 
1634
1713
  if (resources.length === 0) return { content: [{ type: 'text', text: 'No resources found.' }] };
1635
1714
 
1636
1715
  const lines = resources.map(r =>
1637
- `${r.type === 'skill' ? 'S' : 'A'} ${r.name}${r.invocation_name ? ` (${r.invocation_name})` : ''} — rec:${r.recommend_count} adopt:${r.adopt_count} — ${truncate(r.capability_summary || '', 80)}`
1716
+ `${r.type === 'skill' ? 'S' : 'A'} ${r.name}${r.invocation_name ? ` (${r.invocation_name})` : ''} — rec:${r.recommend_count ?? 0} adopt:${r.adopt_count ?? 0} — ${truncate(r.capability_summary || '', 80)}`
1638
1717
  );
1639
1718
  return { content: [{ type: 'text', text: `Resources (${resources.length}):\n${lines.join('\n')}` }] };
1640
1719
  }
@@ -1909,19 +1988,29 @@ server.registerTool(
1909
1988
  wheres.push('superseded_at IS NULL');
1910
1989
  if (args.project) { wheres.push('project = ?'); params.push(resolveProject(args.project)); }
1911
1990
  if (args.type) { wheres.push('type = ?'); params.push(args.type); }
1991
+ // T3-P1-A: surface invalid dates instead of silently dropping the filter — mirrors
1992
+ // mem_search, which threw. A dropped filter can quietly expand the export blast radius.
1912
1993
  if (args.date_from) {
1913
1994
  const epoch = new Date(args.date_from).getTime();
1914
- if (!isNaN(epoch)) { wheres.push('created_at_epoch >= ?'); params.push(epoch); }
1995
+ if (isNaN(epoch)) throw new Error(`Invalid date_from: "${args.date_from}" (use ISO 8601 or YYYY-MM-DD)`);
1996
+ wheres.push('created_at_epoch >= ?');
1997
+ params.push(epoch);
1915
1998
  }
1916
1999
  if (args.date_to) {
1917
2000
  const d = args.date_to.length === 10 ? args.date_to + 'T23:59:59.999Z' : args.date_to;
1918
2001
  const epoch = new Date(d).getTime();
1919
- if (!isNaN(epoch)) { wheres.push('created_at_epoch <= ?'); params.push(epoch); }
2002
+ if (isNaN(epoch)) throw new Error(`Invalid date_to: "${args.date_to}" (use ISO 8601 or YYYY-MM-DD)`);
2003
+ wheres.push('created_at_epoch <= ?');
2004
+ params.push(epoch);
1920
2005
  }
1921
2006
 
1922
2007
  const where = wheres.length > 0 ? 'WHERE ' + wheres.join(' AND ') : '';
1923
2008
  const exportLimit = Math.min(args.limit ?? 200, 1000);
1924
- const rows = db.prepare(`SELECT id, project, type, title, subtitle, narrative, concepts, facts, lesson_learned, importance, files_modified, created_at, created_at_epoch FROM observations ${where} ORDER BY created_at_epoch DESC LIMIT ?`).all(...params, exportLimit);
2009
+ // T3-P2-B: probe limit+1 so we can tell "user hit their own limit with more waiting" from
2010
+ // "user got exactly what existed". Trim to exportLimit before rendering.
2011
+ const probed = db.prepare(`SELECT id, project, type, title, subtitle, narrative, concepts, facts, lesson_learned, importance, files_modified, branch, access_count, memory_session_id, created_at, created_at_epoch FROM observations ${where} ORDER BY created_at_epoch DESC LIMIT ?`).all(...params, exportLimit + 1);
2012
+ const rows = probed.slice(0, exportLimit);
2013
+ const moreAvailable = probed.length > exportLimit;
1925
2014
 
1926
2015
  if (rows.length === 0) return { content: [{ type: 'text', text: 'No observations found matching the criteria.' }] };
1927
2016
 
@@ -1929,7 +2018,7 @@ server.registerTool(
1929
2018
  ? rows.map(r => JSON.stringify(r)).join('\n')
1930
2019
  : JSON.stringify(rows, null, 2);
1931
2020
 
1932
- const cap = rows.length >= exportLimit ? `\nNote: Results capped at ${exportLimit}. Use date_from/date_to or increase limit (max 1000) to export more.` : '';
2021
+ const cap = moreAvailable ? `\nNote: Results capped at ${exportLimit}. Use date_from/date_to or increase limit (max 1000) to export more.` : '';
1933
2022
  return { content: [{ type: 'text', text: `Exported ${rows.length} observations:${cap}\n${output}` }] };
1934
2023
  })
1935
2024
  );
@@ -1988,20 +2077,20 @@ server.registerTool(
1988
2077
  inputSchema: memFtsCheckSchema,
1989
2078
  },
1990
2079
  safeHandler(async (args) => {
2080
+ // T3-P2-C: Zod `action: z.enum(['check','rebuild'])` filters any other value before we
2081
+ // reach this handler, so there's no "Unknown action" fallback to write.
1991
2082
  if (args.action === 'check') {
1992
2083
  const result = checkFTSIntegrity(db);
1993
2084
  return { content: [{ type: 'text', text: result.healthy
1994
2085
  ? 'FTS5 indexes are healthy — all integrity checks passed.'
1995
2086
  : `FTS5 issues found:\n${result.details.join('\n')}` }] };
1996
2087
  }
1997
- if (args.action === 'rebuild') {
1998
- const result = rebuildFTS(db);
1999
- const summary = result.errors.length > 0
2000
- ? `Rebuilt: ${result.rebuilt.join(', ')}. Errors: ${result.errors.join(', ')}`
2001
- : `Successfully rebuilt: ${result.rebuilt.join(', ')}`;
2002
- return { content: [{ type: 'text', text: summary }] };
2003
- }
2004
- return { content: [{ type: 'text', text: `Unknown action: ${args.action}` }], isError: true };
2088
+ // args.action === 'rebuild'
2089
+ const result = rebuildFTS(db);
2090
+ const summary = result.errors.length > 0
2091
+ ? `Rebuilt: ${result.rebuilt.join(', ')}. Errors: ${result.errors.join(', ')}`
2092
+ : `Successfully rebuilt: ${result.rebuilt.join(', ')}`;
2093
+ return { content: [{ type: 'text', text: summary }] };
2005
2094
  })
2006
2095
  );
2007
2096
 
package/tool-schemas.mjs CHANGED
@@ -50,8 +50,8 @@ export const memRecentSchema = {
50
50
  };
51
51
 
52
52
  export const memTimelineSchema = {
53
- anchor: coerceInt.pipe(z.number().int()).optional().describe('Observation ID as center point'),
54
- query: z.string().optional().describe('FTS5 query to auto-find anchor'),
53
+ anchor: coerceInt.pipe(z.number().int()).optional().describe('Observation ID as center point. Takes precedence over query when both are provided.'),
54
+ query: z.string().optional().describe('FTS5 query to auto-find anchor. Ignored when anchor is also given; use one or the other.'),
55
55
  before: coerceInt.pipe(z.number().int().min(0).max(50)).optional().describe('Items before anchor (default 5)'),
56
56
  after: coerceInt.pipe(z.number().int().min(0).max(50)).optional().describe('Items after anchor (default 5)'),
57
57
  project: z.string().optional().describe('Filter by project'),
@@ -96,18 +96,22 @@ export const memOptimizeSchema = {
96
96
  .describe('Which optimization tasks to run (default: all)'),
97
97
  max_items: coerceInt.pipe(z.number().int().min(1).max(100)).optional().default(15)
98
98
  .describe('Maximum LLM calls across all tasks (default: 15)'),
99
+ scope: z.enum(['narrow', 'wide']).optional().default('narrow')
100
+ .describe("Re-enrich scope: narrow=narrative-only candidates (default); wide=R-7 backfill (bugfix/refactor/feature/decision with narrative but lesson_learned='none'). CLI parity: --scope wide."),
99
101
  };
100
102
 
101
103
  export const memMaintainSchema = {
102
104
  action: z.enum(['scan', 'execute']).describe('scan=analyze candidates, execute=apply changes'),
103
105
  operations: z.array(z.enum(['dedup', 'decay', 'cleanup', 'boost', 'purge_stale', 'rebuild_vectors'])).optional()
104
- .describe('Operations: dedup=find/merge duplicate observations, decay=reduce importance of old low-value obs, cleanup=remove orphaned records, boost=promote frequently-accessed obs, purge_stale=delete decayed obs (needs confirm via scan first), rebuild_vectors=rebuild TF-IDF vocabulary and all observation vectors'),
106
+ .describe('Operations: dedup=find/merge duplicate observations, decay=reduce importance of old low-value obs, cleanup=remove orphaned records, boost=promote frequently-accessed obs, purge_stale=DELETE pending-purge obs older than retain_days (requires confirm=true; first call previews), rebuild_vectors=rebuild TF-IDF vocabulary and all observation vectors'),
105
107
  merge_ids: z.preprocess(
106
108
  (v) => Array.isArray(v) ? v.map(g => Array.isArray(g) ? g.map(x => typeof x === 'string' ? parseInt(x, 10) : x) : g) : v,
107
109
  z.array(z.array(z.number().int()).min(2))
108
110
  ).optional().describe('For dedup: [[keepId, removeId1, removeId2], ...] — first ID in each group is kept'),
109
111
  retain_days: coerceInt.pipe(z.number().int().min(7).max(365)).optional()
110
112
  .describe('For purge_stale: keep observations newer than N days (default 30)'),
113
+ confirm: coerceBool.optional()
114
+ .describe('Required for destructive ops in `execute` mode (currently: purge_stale). Omit/false → dry-run preview; true → actually delete.'),
111
115
  project: z.string().optional().describe('Filter by project'),
112
116
  };
113
117