clementine-agent 1.18.20 → 1.18.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +17 -0
  2. package/dist/agent/action-enforcer.d.ts +29 -0
  3. package/dist/agent/action-enforcer.js +120 -0
  4. package/dist/agent/assistant.d.ts +12 -0
  5. package/dist/agent/assistant.js +165 -31
  6. package/dist/agent/auto-update.js +46 -2
  7. package/dist/agent/local-turn.d.ts +16 -0
  8. package/dist/agent/local-turn.js +54 -1
  9. package/dist/agent/route-classifier.d.ts +1 -0
  10. package/dist/agent/route-classifier.js +30 -3
  11. package/dist/agent/toolsets.d.ts +14 -0
  12. package/dist/agent/toolsets.js +68 -0
  13. package/dist/brain/ingestion-pipeline.d.ts +7 -0
  14. package/dist/brain/ingestion-pipeline.js +107 -21
  15. package/dist/channels/discord.js +38 -7
  16. package/dist/channels/telegram.js +5 -6
  17. package/dist/cli/dashboard.js +56 -6
  18. package/dist/cli/index.js +174 -0
  19. package/dist/cli/ingest.js +8 -2
  20. package/dist/gateway/context-hygiene.d.ts +17 -0
  21. package/dist/gateway/context-hygiene.js +31 -0
  22. package/dist/gateway/heartbeat-scheduler.d.ts +20 -0
  23. package/dist/gateway/heartbeat-scheduler.js +27 -10
  24. package/dist/gateway/router.d.ts +7 -0
  25. package/dist/gateway/router.js +303 -9
  26. package/dist/gateway/turn-ledger.d.ts +32 -0
  27. package/dist/gateway/turn-ledger.js +55 -0
  28. package/dist/memory/embeddings.d.ts +2 -0
  29. package/dist/memory/embeddings.js +8 -1
  30. package/dist/memory/store.d.ts +88 -1
  31. package/dist/memory/store.js +349 -18
  32. package/dist/memory/write-queue.d.ts +16 -0
  33. package/dist/memory/write-queue.js +5 -0
  34. package/dist/tools/shared.d.ts +89 -0
  35. package/dist/types.d.ts +11 -0
  36. package/package.json +1 -1
  37. package/scripts/postinstall.js +56 -6
@@ -46,6 +46,35 @@ export class MemoryStore {
46
46
  const conf = Math.max(0, Math.min(1, confidence ?? 1));
47
47
  return conf >= 1 ? 1 : 0.5 + 0.5 * conf;
48
48
  }
49
+ static formatBytes(n) {
50
+ if (!Number.isFinite(n) || n < 0)
51
+ return '0 B';
52
+ if (n < 1024)
53
+ return `${n} B`;
54
+ if (n < 1024 * 1024)
55
+ return `${(n / 1024).toFixed(1)} KB`;
56
+ if (n < 1024 * 1024 * 1024)
57
+ return `${(n / 1024 / 1024).toFixed(1)} MB`;
58
+ return `${(n / 1024 / 1024 / 1024).toFixed(2)} GB`;
59
+ }
60
+ static dirSizeBytes(dir) {
61
+ if (!existsSync(dir))
62
+ return 0;
63
+ let total = 0;
64
+ try {
65
+ for (const entry of readdirSync(dir, { withFileTypes: true })) {
66
+ const full = path.join(dir, entry.name);
67
+ if (entry.isDirectory())
68
+ total += MemoryStore.dirSizeBytes(full);
69
+ else if (entry.isFile())
70
+ total += statSync(full).size;
71
+ }
72
+ }
73
+ catch {
74
+ return total;
75
+ }
76
+ return total;
77
+ }
49
78
  // ── Lifecycle ──────────────────────────────────────────────────────
50
79
  /**
51
80
  * Create the database and schema if needed.
@@ -126,6 +155,29 @@ export class MemoryStore {
126
155
  CREATE INDEX IF NOT EXISTS idx_transcripts_session ON transcripts(session_key);
127
156
  CREATE INDEX IF NOT EXISTS idx_transcripts_created ON transcripts(created_at);
128
157
 
158
+ CREATE VIRTUAL TABLE IF NOT EXISTS transcripts_fts USING fts5(
159
+ session_key, role, content, model, created_at,
160
+ content='transcripts', content_rowid='id',
161
+ tokenize='porter unicode61'
162
+ );
163
+
164
+ CREATE TRIGGER IF NOT EXISTS transcripts_ai AFTER INSERT ON transcripts BEGIN
165
+ INSERT INTO transcripts_fts(rowid, session_key, role, content, model, created_at)
166
+ VALUES (new.id, new.session_key, new.role, new.content, new.model, new.created_at);
167
+ END;
168
+
169
+ CREATE TRIGGER IF NOT EXISTS transcripts_ad AFTER DELETE ON transcripts BEGIN
170
+ INSERT INTO transcripts_fts(transcripts_fts, rowid, session_key, role, content, model, created_at)
171
+ VALUES ('delete', old.id, old.session_key, old.role, old.content, old.model, old.created_at);
172
+ END;
173
+
174
+ CREATE TRIGGER IF NOT EXISTS transcripts_au AFTER UPDATE ON transcripts BEGIN
175
+ INSERT INTO transcripts_fts(transcripts_fts, rowid, session_key, role, content, model, created_at)
176
+ VALUES ('delete', old.id, old.session_key, old.role, old.content, old.model, old.created_at);
177
+ INSERT INTO transcripts_fts(rowid, session_key, role, content, model, created_at)
178
+ VALUES (new.id, new.session_key, new.role, new.content, new.model, new.created_at);
179
+ END;
180
+
129
181
  CREATE TABLE IF NOT EXISTS session_summaries (
130
182
  id INTEGER PRIMARY KEY,
131
183
  session_key TEXT NOT NULL,
@@ -136,7 +188,32 @@ export class MemoryStore {
136
188
 
137
189
  CREATE INDEX IF NOT EXISTS idx_session_summaries_key ON session_summaries(session_key);
138
190
  CREATE INDEX IF NOT EXISTS idx_session_summaries_created ON session_summaries(created_at);
191
+
192
+ CREATE TABLE IF NOT EXISTS session_lineage (
193
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
194
+ session_key TEXT NOT NULL,
195
+ parent_session_id TEXT,
196
+ child_session_id TEXT,
197
+ reason TEXT NOT NULL,
198
+ summary TEXT NOT NULL,
199
+ exchange_count INTEGER DEFAULT 0,
200
+ created_at TEXT DEFAULT (datetime('now'))
201
+ );
202
+
203
+ CREATE INDEX IF NOT EXISTS idx_session_lineage_key ON session_lineage(session_key, created_at DESC);
204
+ CREATE INDEX IF NOT EXISTS idx_session_lineage_parent ON session_lineage(parent_session_id);
139
205
  `);
206
+ try {
207
+ this.conn.exec(`
208
+ INSERT INTO transcripts_fts(rowid, session_key, role, content, model, created_at)
209
+ SELECT id, session_key, role, content, model, created_at
210
+ FROM transcripts
211
+ WHERE id NOT IN (SELECT rowid FROM transcripts_fts)
212
+ `);
213
+ }
214
+ catch {
215
+ // FTS backfill is best-effort; triggers keep new rows indexed.
216
+ }
140
217
  // ── Migrations ────────────────────────────────────────────────
141
218
  // Add salience column to chunks
142
219
  try {
@@ -373,6 +450,29 @@ export class MemoryStore {
373
450
  );
374
451
  CREATE INDEX IF NOT EXISTS idx_extractions_session ON memory_extractions(session_key);
375
452
  CREATE INDEX IF NOT EXISTS idx_extractions_status ON memory_extractions(status);
453
+ `);
454
+ // Memory event ledger — compact proof that each major input stream has
455
+ // crossed into the memory system. This is intentionally smaller than the
456
+ // source payload tables; it powers health checks and lets us spot gaps
457
+ // like "transcripts are saved but never indexed".
458
+ this.conn.exec(`
459
+ CREATE TABLE IF NOT EXISTS memory_events (
460
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
461
+ source_type TEXT NOT NULL,
462
+ source_id INTEGER,
463
+ session_key TEXT,
464
+ agent_slug TEXT,
465
+ content_hash TEXT NOT NULL,
466
+ content_preview TEXT NOT NULL,
467
+ indexed_at TEXT,
468
+ created_at TEXT NOT NULL DEFAULT (datetime('now'))
469
+ );
470
+ CREATE INDEX IF NOT EXISTS idx_memory_events_source
471
+ ON memory_events(source_type, source_id);
472
+ CREATE INDEX IF NOT EXISTS idx_memory_events_session
473
+ ON memory_events(session_key, created_at DESC);
474
+ CREATE INDEX IF NOT EXISTS idx_memory_events_created
475
+ ON memory_events(created_at DESC);
376
476
  `);
377
477
  this.conn.exec(`
378
478
  CREATE TABLE IF NOT EXISTS memory_promotion_candidates (
@@ -714,6 +814,14 @@ export class MemoryStore {
714
814
  CREATE INDEX IF NOT EXISTS idx_ingestion_runs_source ON ingestion_runs(source_slug, started_at DESC);
715
815
  CREATE INDEX IF NOT EXISTS idx_ingestion_runs_status ON ingestion_runs(status);
716
816
  `);
817
+ try {
818
+ this.conn.exec('ALTER TABLE ingestion_runs ADD COLUMN records_unchanged INTEGER NOT NULL DEFAULT 0');
819
+ }
820
+ catch { /* already exists */ }
821
+ try {
822
+ this.conn.exec('ALTER TABLE ingestion_runs ADD COLUMN recall_check_status TEXT DEFAULT NULL');
823
+ }
824
+ catch { /* already exists */ }
717
825
  // Ingested rows — structured overlay on chunks for SQL aggregates.
718
826
  // chunk_id FK makes this an INDEX on top of chunks, not a silo.
719
827
  // Per-source dynamic columns are added via ALTER TABLE during
@@ -772,6 +880,22 @@ export class MemoryStore {
772
880
  this.conn.exec('ALTER TABLE recall_traces ADD COLUMN match_types TEXT DEFAULT NULL');
773
881
  }
774
882
  catch { /* column already exists */ }
883
+ try {
884
+ this.conn.exec('ALTER TABLE recall_traces ADD COLUMN backend_counts TEXT DEFAULT NULL');
885
+ }
886
+ catch { /* column already exists */ }
887
+ try {
888
+ this.conn.exec('ALTER TABLE recall_traces ADD COLUMN evidence_json TEXT DEFAULT NULL');
889
+ }
890
+ catch { /* column already exists */ }
891
+ try {
892
+ this.conn.exec('ALTER TABLE recall_traces ADD COLUMN confidence REAL DEFAULT NULL');
893
+ }
894
+ catch { /* column already exists */ }
895
+ try {
896
+ this.conn.exec('ALTER TABLE recall_traces ADD COLUMN empty_reason TEXT DEFAULT NULL');
897
+ }
898
+ catch { /* column already exists */ }
775
899
  // Dense neural embeddings (transformers.js — arctic-embed-m by default).
776
900
  // Parallel to the existing chunks.embedding (TF-IDF, 512-dim) so we can
777
901
  // backfill incrementally and fall back gracefully if the dense model
@@ -1809,7 +1933,24 @@ export class MemoryStore {
1809
1933
  const finalResults = mmrRerank(deduplicateResults(merged), 0.7, limit + recencyLimit);
1810
1934
  // 5. Log recall trace if session context provided. Skipped for internal
1811
1935
  // calls (e.g. consolidation, dedup checks) by passing skipTrace=true.
1812
- if (sessionKey && !skipTrace && finalResults.length > 0) {
1936
+ if (sessionKey && !skipTrace) {
1937
+ const backendCounts = {
1938
+ fts: ftsResults.length,
1939
+ vector: vectorResults.length,
1940
+ graph: graphResults.length,
1941
+ recency: recentResults.length,
1942
+ };
1943
+ const evidence = finalResults.slice(0, 8).map((r) => ({
1944
+ chunkId: r.chunkId,
1945
+ matchType: r.matchType,
1946
+ score: r.score,
1947
+ sourceFile: r.sourceFile,
1948
+ section: r.section,
1949
+ }));
1950
+ const topScore = finalResults[0]?.score ?? 0;
1951
+ const confidence = finalResults.length > 0
1952
+ ? Math.max(0.1, Math.min(1, topScore / (Math.abs(topScore) + 1)))
1953
+ : 0;
1813
1954
  this.logRecallTrace({
1814
1955
  sessionKey,
1815
1956
  messageId: messageId ?? null,
@@ -1818,6 +1959,11 @@ export class MemoryStore {
1818
1959
  scores: finalResults.map(r => r.score),
1819
1960
  agentSlug: agentSlug ?? null,
1820
1961
  matchTypes: finalResults.map(r => r.matchType),
1962
+ backendCounts,
1963
+ evidence,
1964
+ confidence,
1965
+ emptyReason: finalResults.length === 0 ? 'no_backend_matches' : null,
1966
+ allowEmpty: finalResults.length === 0,
1821
1967
  });
1822
1968
  }
1823
1969
  return finalResults;
@@ -1851,7 +1997,7 @@ export class MemoryStore {
1851
1997
  * Non-fatal: errors are swallowed so retrieval never fails on logging issues.
1852
1998
  */
1853
1999
  logRecallTrace(opts) {
1854
- if (opts.chunkIds.length === 0)
2000
+ if (opts.chunkIds.length === 0 && !opts.allowEmpty)
1855
2001
  return;
1856
2002
  if (this.writeQueue) {
1857
2003
  this.writeQueue.enqueue({
@@ -1863,6 +2009,11 @@ export class MemoryStore {
1863
2009
  scores: [...opts.scores],
1864
2010
  agentSlug: opts.agentSlug ?? null,
1865
2011
  matchTypes: opts.matchTypes ? [...opts.matchTypes] : undefined,
2012
+ backendCounts: opts.backendCounts ?? undefined,
2013
+ evidence: opts.evidence ? [...opts.evidence] : undefined,
2014
+ confidence: opts.confidence ?? undefined,
2015
+ emptyReason: opts.emptyReason ?? undefined,
2016
+ allowEmpty: opts.allowEmpty,
1866
2017
  });
1867
2018
  return;
1868
2019
  }
@@ -1870,11 +2021,13 @@ export class MemoryStore {
1870
2021
  }
1871
2022
  /** Internal sync recall_trace insert. Called by the WriteQueue. */
1872
2023
  _logRecallTraceSync(opts) {
1873
- if (opts.chunkIds.length === 0)
2024
+ if (opts.chunkIds.length === 0 && !opts.allowEmpty)
1874
2025
  return;
1875
2026
  try {
1876
- this.conn.prepare(`INSERT INTO recall_traces (session_key, message_id, query, chunk_ids, scores, agent_slug, match_types)
1877
- VALUES (?, ?, ?, ?, ?, ?, ?)`).run(opts.sessionKey, opts.messageId ?? null, opts.query, JSON.stringify(opts.chunkIds), JSON.stringify(opts.scores), opts.agentSlug ?? null, opts.matchTypes ? JSON.stringify(opts.matchTypes) : null);
2027
+ this.conn.prepare(`INSERT INTO recall_traces
2028
+ (session_key, message_id, query, chunk_ids, scores, agent_slug, match_types,
2029
+ backend_counts, evidence_json, confidence, empty_reason)
2030
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`).run(opts.sessionKey, opts.messageId ?? null, opts.query, JSON.stringify(opts.chunkIds), JSON.stringify(opts.scores), opts.agentSlug ?? null, opts.matchTypes ? JSON.stringify(opts.matchTypes) : null, opts.backendCounts ? JSON.stringify(opts.backendCounts) : null, opts.evidence ? JSON.stringify(opts.evidence) : null, opts.confidence ?? null, opts.emptyReason ?? null);
1878
2031
  }
1879
2032
  catch {
1880
2033
  // Non-fatal — recall trace logging never breaks retrieval
@@ -1885,7 +2038,8 @@ export class MemoryStore {
1885
2038
  * Used by the dashboard chat panel to show "what memory powered this answer".
1886
2039
  */
1887
2040
  getRecentRecallTraces(sessionKey, limit = 50) {
1888
- const rows = this.conn.prepare(`SELECT id, message_id, query, chunk_ids, scores, retrieved_at
2041
+ const rows = this.conn.prepare(`SELECT id, message_id, query, chunk_ids, scores, backend_counts,
2042
+ evidence_json, confidence, empty_reason, retrieved_at
1889
2043
  FROM recall_traces
1890
2044
  WHERE session_key = ?
1891
2045
  ORDER BY retrieved_at DESC, id DESC
@@ -1896,6 +2050,10 @@ export class MemoryStore {
1896
2050
  query: r.query,
1897
2051
  chunkIds: this._parseJsonArray(r.chunk_ids),
1898
2052
  scores: this._parseJsonArray(r.scores),
2053
+ backendCounts: this._parseJsonObject(r.backend_counts),
2054
+ evidence: this._parseJsonArray(r.evidence_json ?? '[]'),
2055
+ confidence: r.confidence,
2056
+ emptyReason: r.empty_reason,
1899
2057
  retrievedAt: r.retrieved_at,
1900
2058
  }));
1901
2059
  }
@@ -1904,7 +2062,8 @@ export class MemoryStore {
1904
2062
  * Used for the dashboard "view sources" expansion on a single message.
1905
2063
  */
1906
2064
  getRecallTrace(traceId) {
1907
- const trace = this.conn.prepare(`SELECT id, session_key, message_id, query, chunk_ids, scores, retrieved_at
2065
+ const trace = this.conn.prepare(`SELECT id, session_key, message_id, query, chunk_ids, scores,
2066
+ backend_counts, evidence_json, confidence, empty_reason, retrieved_at
1908
2067
  FROM recall_traces WHERE id = ?`).get(traceId);
1909
2068
  if (!trace)
1910
2069
  return null;
@@ -1925,6 +2084,10 @@ export class MemoryStore {
1925
2084
  messageId: trace.message_id,
1926
2085
  query: trace.query,
1927
2086
  retrievedAt: trace.retrieved_at,
2087
+ backendCounts: this._parseJsonObject(trace.backend_counts),
2088
+ evidence: this._parseJsonArray(trace.evidence_json ?? '[]'),
2089
+ confidence: trace.confidence,
2090
+ emptyReason: trace.empty_reason,
1928
2091
  chunks: ordered,
1929
2092
  };
1930
2093
  }
@@ -2014,6 +2177,17 @@ export class MemoryStore {
2014
2177
  return [];
2015
2178
  }
2016
2179
  }
2180
+ _parseJsonObject(json) {
2181
+ if (!json)
2182
+ return null;
2183
+ try {
2184
+ const parsed = JSON.parse(json);
2185
+ return parsed && typeof parsed === 'object' && !Array.isArray(parsed) ? parsed : null;
2186
+ }
2187
+ catch {
2188
+ return null;
2189
+ }
2190
+ }
2017
2191
  // ── User Mental Model ──────────────────────────────────────────────
2018
2192
  //
2019
2193
  // MemGPT-style core memory: a small, always-in-context surface for
@@ -2446,7 +2620,15 @@ export class MemoryStore {
2446
2620
  if (!this._stmtInsertTranscript) {
2447
2621
  this._stmtInsertTranscript = this.conn.prepare('INSERT INTO transcripts (session_key, role, content, model) VALUES (?, ?, ?, ?)');
2448
2622
  }
2449
- this._stmtInsertTranscript.run(sessionKey, role, content, model);
2623
+ const info = this._stmtInsertTranscript.run(sessionKey, role, content, model);
2624
+ this.recordMemoryEvent({
2625
+ sourceType: 'transcript',
2626
+ sourceId: info.lastInsertRowid,
2627
+ sessionKey,
2628
+ agentSlug: null,
2629
+ content: `${role}: ${content}`,
2630
+ indexed: true,
2631
+ });
2450
2632
  }
2451
2633
  /**
2452
2634
  * Get all turns for a given session, ordered chronologically.
@@ -2511,8 +2693,35 @@ export class MemoryStore {
2511
2693
  * Search transcripts by keyword. Returns matching turns with context.
2512
2694
  */
2513
2695
  searchTranscripts(query, limit = 20, sessionKey = '') {
2514
- const queryLower = `%${query.toLowerCase()}%`;
2696
+ const sanitized = MemoryStore.sanitizeFtsQuery(query);
2515
2697
  let rows;
2698
+ if (sanitized) {
2699
+ try {
2700
+ const params = [sanitized];
2701
+ let sql = `SELECT t.session_key, t.role, t.content, t.model, t.created_at
2702
+ FROM transcripts_fts f
2703
+ JOIN transcripts t ON t.id = f.rowid
2704
+ WHERE transcripts_fts MATCH ?`;
2705
+ if (sessionKey) {
2706
+ sql += ' AND t.session_key = ?';
2707
+ params.push(sessionKey);
2708
+ }
2709
+ sql += ' ORDER BY t.created_at DESC, t.id DESC LIMIT ?';
2710
+ params.push(limit);
2711
+ rows = this.conn.prepare(sql).all(...params);
2712
+ return rows.map((row) => ({
2713
+ sessionKey: row.session_key,
2714
+ role: row.role,
2715
+ content: row.content.slice(0, 2000),
2716
+ model: row.model,
2717
+ createdAt: row.created_at,
2718
+ }));
2719
+ }
2720
+ catch {
2721
+ // Fall back to LIKE for malformed FTS queries or legacy SQLite builds.
2722
+ }
2723
+ }
2724
+ const queryLower = `%${query.toLowerCase()}%`;
2516
2725
  if (sessionKey) {
2517
2726
  rows = this.conn
2518
2727
  .prepare(`SELECT session_key, role, content, model, created_at
@@ -2561,6 +2770,49 @@ export class MemoryStore {
2561
2770
  createdAt: row.created_at,
2562
2771
  }));
2563
2772
  }
2773
+ /**
2774
+ * Get recent session summaries scoped to one conversation.
2775
+ */
2776
+ getRecentSummariesForSession(sessionKey, limit = 3) {
2777
+ const rows = this.conn
2778
+ .prepare(`SELECT session_key, summary, exchange_count, created_at
2779
+ FROM session_summaries
2780
+ WHERE session_key = ?
2781
+ ORDER BY created_at DESC
2782
+ LIMIT ?`)
2783
+ .all(sessionKey, limit);
2784
+ return rows.map((row) => ({
2785
+ sessionKey: row.session_key,
2786
+ summary: row.summary,
2787
+ exchangeCount: row.exchange_count,
2788
+ createdAt: row.created_at,
2789
+ }));
2790
+ }
2791
+ recordSessionLineage(input) {
2792
+ this.conn
2793
+ .prepare(`INSERT INTO session_lineage
2794
+ (session_key, parent_session_id, child_session_id, reason, summary, exchange_count)
2795
+ VALUES (?, ?, ?, ?, ?, ?)`)
2796
+ .run(input.sessionKey, input.parentSessionId ?? null, input.childSessionId ?? null, input.reason, input.summary, input.exchangeCount ?? 0);
2797
+ }
2798
+ getSessionLineage(sessionKey, limit = 5) {
2799
+ const rows = this.conn
2800
+ .prepare(`SELECT session_key, parent_session_id, child_session_id, reason, summary, exchange_count, created_at
2801
+ FROM session_lineage
2802
+ WHERE session_key = ?
2803
+ ORDER BY created_at DESC
2804
+ LIMIT ?`)
2805
+ .all(sessionKey, limit);
2806
+ return rows.map((row) => ({
2807
+ sessionKey: row.session_key,
2808
+ parentSessionId: row.parent_session_id,
2809
+ childSessionId: row.child_session_id,
2810
+ reason: row.reason,
2811
+ summary: row.summary,
2812
+ exchangeCount: row.exchange_count,
2813
+ createdAt: row.created_at,
2814
+ }));
2815
+ }
2564
2816
  // ── Salience Tracking ─────────────────────────────────────────────
2565
2817
  /**
2566
2818
  * Record that chunks were accessed (retrieved/displayed). Routes through
@@ -2800,7 +3052,38 @@ export class MemoryStore {
2800
3052
  const stmt = this.conn.prepare(`INSERT INTO tool_artifacts (session_key, agent_slug, tool_name, summary, content, tags)
2801
3053
  VALUES (?, ?, ?, ?, ?, ?)`);
2802
3054
  const info = stmt.run(input.sessionKey ?? null, input.agentSlug ?? null, input.toolName, input.summary, input.content, input.tags ?? '');
2803
- return info.lastInsertRowid;
3055
+ const id = info.lastInsertRowid;
3056
+ this.recordMemoryEvent({
3057
+ sourceType: 'artifact',
3058
+ sourceId: id,
3059
+ sessionKey: input.sessionKey ?? null,
3060
+ agentSlug: input.agentSlug ?? null,
3061
+ content: `${input.toolName}\n${input.summary}\n${input.content}`,
3062
+ indexed: true,
3063
+ });
3064
+ return id;
3065
+ }
3066
+ recordMemoryEvent(input) {
3067
+ try {
3068
+ const content = String(input.content ?? '');
3069
+ const contentHash = createHash('sha256').update(content).digest('hex').slice(0, 16);
3070
+ const preview = content.replace(/\s+/g, ' ').trim().slice(0, 500);
3071
+ this.conn.prepare(`INSERT INTO memory_events
3072
+ (source_type, source_id, session_key, agent_slug, content_hash, content_preview, indexed_at)
3073
+ VALUES (?, ?, ?, ?, ?, ?, ${input.indexed === false ? 'NULL' : "datetime('now')"})`).run(input.sourceType, input.sourceId ?? null, input.sessionKey ?? null, input.agentSlug ?? null, contentHash, preview);
3074
+ }
3075
+ catch {
3076
+ // Ledger writes are observability only; never fail the source write.
3077
+ }
3078
+ }
3079
+ getMemoryEventStats() {
3080
+ const total = this.conn.prepare('SELECT COUNT(*) AS c FROM memory_events').get().c;
3081
+ const indexed = this.conn.prepare('SELECT COUNT(*) AS c FROM memory_events WHERE indexed_at IS NOT NULL').get().c;
3082
+ const bySourceType = this.conn
3083
+ .prepare(`SELECT source_type AS sourceType, COUNT(*) AS count
3084
+ FROM memory_events GROUP BY source_type ORDER BY count DESC`)
3085
+ .all();
3086
+ return { total, indexed, bySourceType };
2804
3087
  }
2805
3088
  /**
2806
3089
  * Search artifacts via FTS over summary + content + tool_name + tags.
@@ -2985,6 +3268,14 @@ export class MemoryStore {
2985
3268
  sets.push('records_failed = ?');
2986
3269
  params.push(patch.recordsFailed);
2987
3270
  }
3271
+ if (patch.recordsUnchanged !== undefined) {
3272
+ sets.push('records_unchanged = ?');
3273
+ params.push(patch.recordsUnchanged);
3274
+ }
3275
+ if (patch.recallCheckStatus !== undefined) {
3276
+ sets.push('recall_check_status = ?');
3277
+ params.push(patch.recallCheckStatus);
3278
+ }
2988
3279
  if (patch.overviewNotePath !== undefined) {
2989
3280
  sets.push('overview_note_path = ?');
2990
3281
  params.push(patch.overviewNotePath);
@@ -3007,20 +3298,22 @@ export class MemoryStore {
3007
3298
  }
3008
3299
  listIngestionRuns(sourceSlug, limit = 50) {
3009
3300
  let sql = `SELECT id, source_slug, started_at, finished_at, records_in, records_written,
3010
- records_skipped, records_failed, overview_note_path, errors_json, status
3301
+ records_skipped, records_failed, records_unchanged, recall_check_status,
3302
+ overview_note_path, errors_json, status
3011
3303
  FROM ingestion_runs`;
3012
3304
  const params = [];
3013
3305
  if (sourceSlug) {
3014
3306
  sql += ` WHERE source_slug = ?`;
3015
3307
  params.push(sourceSlug);
3016
3308
  }
3017
- sql += ` ORDER BY started_at DESC LIMIT ?`;
3309
+ sql += ` ORDER BY started_at DESC, id DESC LIMIT ?`;
3018
3310
  params.push(limit);
3019
3311
  const rows = this.conn.prepare(sql).all(...params);
3020
3312
  return rows.map((r) => ({
3021
3313
  id: r.id, sourceSlug: r.source_slug, startedAt: r.started_at, finishedAt: r.finished_at,
3022
3314
  recordsIn: r.records_in, recordsWritten: r.records_written,
3023
3315
  recordsSkipped: r.records_skipped, recordsFailed: r.records_failed,
3316
+ recordsUnchanged: r.records_unchanged ?? 0, recallCheckStatus: r.recall_check_status ?? null,
3024
3317
  overviewNotePath: r.overview_note_path, errorsJson: r.errors_json, status: r.status,
3025
3318
  }));
3026
3319
  }
@@ -3165,6 +3458,7 @@ export class MemoryStore {
3165
3458
  // 90-day window is enough to debug "why did the agent answer that way last
3166
3459
  // week" without letting the table grow unbounded.
3167
3460
  const recallRetention = opts.recallTraceRetentionDays ?? 90;
3461
+ const memoryEventRetention = opts.memoryEventRetentionDays ?? 180;
3168
3462
  // Prune stale episodic chunks (not vault-sourced content)
3169
3463
  const episodicResult = this.conn
3170
3464
  .prepare(`DELETE FROM chunks
@@ -3211,6 +3505,16 @@ export class MemoryStore {
3211
3505
  catch {
3212
3506
  // Table may not exist on first boot before initialize() runs the new schema
3213
3507
  }
3508
+ let memoryEventsPruned = 0;
3509
+ try {
3510
+ const memoryEventsResult = this.conn
3511
+ .prepare(`DELETE FROM memory_events WHERE created_at < datetime('now', ?)`)
3512
+ .run(`-${memoryEventRetention} days`);
3513
+ memoryEventsPruned = memoryEventsResult.changes;
3514
+ }
3515
+ catch {
3516
+ // Table may not exist on first boot before initialize() runs the new schema
3517
+ }
3214
3518
  return {
3215
3519
  episodicPruned: episodicResult.changes,
3216
3520
  accessLogPruned: accessResult.changes,
@@ -3220,6 +3524,7 @@ export class MemoryStore {
3220
3524
  reflectionsPruned: reflectionsResult.changes,
3221
3525
  usageLogPruned: usageResult.changes,
3222
3526
  recallTracesPruned,
3527
+ memoryEventsPruned,
3223
3528
  };
3224
3529
  }
3225
3530
  // ── Staleness detection ─────────────────────────────────────────
@@ -4517,12 +4822,16 @@ export class MemoryStore {
4517
4822
  markConsolidated(chunkIds) {
4518
4823
  if (chunkIds.length === 0)
4519
4824
  return;
4520
- const placeholders = chunkIds.map(() => '?').join(',');
4521
- this.conn
4522
- .prepare(`UPDATE chunks
4523
- SET consolidated = 1, salience = MAX(salience - 0.3, 0.0)
4524
- WHERE id IN (${placeholders})`)
4525
- .run(...chunkIds);
4825
+ const batchSize = 500;
4826
+ for (let i = 0; i < chunkIds.length; i += batchSize) {
4827
+ const batch = chunkIds.slice(i, i + batchSize);
4828
+ const placeholders = batch.map(() => '?').join(',');
4829
+ this.conn
4830
+ .prepare(`UPDATE chunks
4831
+ SET consolidated = 1, salience = MAX(salience - 0.3, 0.0)
4832
+ WHERE id IN (${placeholders})`)
4833
+ .run(...batch);
4834
+ }
4526
4835
  }
4527
4836
  // ── Autonomy log ───────────────────────────────────────────────────
4528
4837
  _stmtLogAutonomy = null;
@@ -4634,6 +4943,7 @@ export class MemoryStore {
4634
4943
  'transcripts',
4635
4944
  'session_summaries',
4636
4945
  'memory_extractions',
4946
+ 'memory_events',
4637
4947
  'chunk_soft_deletes',
4638
4948
  'chunk_history',
4639
4949
  'sdk_session_entries',
@@ -4660,6 +4970,18 @@ export class MemoryStore {
4660
4970
  .prepare(`SELECT COUNT(*) AS c FROM memory_extractions WHERE status LIKE 'skipped:%' AND extracted_at >= datetime('now', '-30 days')`)
4661
4971
  .get().c,
4662
4972
  };
4973
+ const retrievalProof = {
4974
+ tracesLast7d: recentActivity.recallTracesLast7d,
4975
+ emptyTracesLast7d: this.conn
4976
+ .prepare(`SELECT COUNT(*) AS c FROM recall_traces
4977
+ WHERE retrieved_at >= datetime('now', '-7 days')
4978
+ AND json_array_length(chunk_ids) = 0`)
4979
+ .get().c,
4980
+ tracedChunksLast7d: this.conn
4981
+ .prepare(`SELECT COALESCE(SUM(json_array_length(chunk_ids)), 0) AS c
4982
+ FROM recall_traces WHERE retrieved_at >= datetime('now', '-7 days')`)
4983
+ .get().c,
4984
+ };
4663
4985
  const userModelSlots = this.conn
4664
4986
  .prepare(`SELECT
4665
4987
  COUNT(*) AS total,
@@ -4718,6 +5040,8 @@ export class MemoryStore {
4718
5040
  chunksByCategory: byCategory,
4719
5041
  tableRowCounts,
4720
5042
  recentActivity,
5043
+ retrievalProof,
5044
+ memoryEvents: this.getMemoryEventStats(),
4721
5045
  topCitedLast30d: topCited.map((r) => ({
4722
5046
  chunkId: r.chunk_id,
4723
5047
  sourceFile: r.source_file,
@@ -4752,12 +5076,19 @@ export class MemoryStore {
4752
5076
  FROM chunks WHERE embedding_dense IS NOT NULL
4753
5077
  GROUP BY embedding_dense_model ORDER BY count DESC`)
4754
5078
  .all();
5079
+ const cacheDir = embeddingsModule.denseModelCacheDir();
5080
+ const cacheBytes = MemoryStore.dirSizeBytes(cacheDir);
4755
5081
  return {
4756
5082
  withDense,
4757
5083
  total: chunkAgg.total,
4758
5084
  models,
4759
5085
  currentModel: embeddingsModule.currentDenseModel(),
4760
5086
  ready: embeddingsModule.isDenseReady(),
5087
+ cacheDir,
5088
+ cacheExists: existsSync(cacheDir),
5089
+ cacheBytes,
5090
+ cacheSize: MemoryStore.formatBytes(cacheBytes),
5091
+ installed: cacheBytes >= 1024 * 1024,
4761
5092
  };
4762
5093
  })(),
4763
5094
  };
@@ -33,6 +33,22 @@ export type QueueOp = {
33
33
  scores: number[];
34
34
  agentSlug: string | null;
35
35
  matchTypes?: string[];
36
+ backendCounts?: {
37
+ fts: number;
38
+ vector: number;
39
+ graph: number;
40
+ recency: number;
41
+ } | null;
42
+ evidence?: Array<{
43
+ chunkId: number;
44
+ matchType: string;
45
+ score: number;
46
+ sourceFile?: string;
47
+ section?: string;
48
+ }>;
49
+ confidence?: number | null;
50
+ emptyReason?: string | null;
51
+ allowEmpty?: boolean;
36
52
  } | {
37
53
  kind: 'outcome';
38
54
  outcomes: Array<{
@@ -137,6 +137,11 @@ export class WriteQueue {
137
137
  scores: op.scores,
138
138
  agentSlug: op.agentSlug,
139
139
  matchTypes: op.matchTypes,
140
+ backendCounts: op.backendCounts,
141
+ evidence: op.evidence,
142
+ confidence: op.confidence,
143
+ emptyReason: op.emptyReason,
144
+ allowEmpty: op.allowEmpty,
140
145
  });
141
146
  break;
142
147
  case 'outcome':