@onenomad/engram-mcp 1.0.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,148 +1,148 @@
1
- /**
2
- * Session-scoped same-source ingest dedup.
3
- *
4
- * Agents in long sessions repeatedly re-read stable files, re-poll
5
- * unchanged endpoints, and re-list the same directories. Each re-ingest
6
- * goes through the full chunk → embed → save pipeline even though the
7
- * content hasn't moved. On CPU embeddings (Engram's default backend),
8
- * a 20K-token re-read can cost 5–15 seconds; multiplied across a
9
- * 50-step agent run that's significant wall-clock burn.
10
- *
11
- * The existing 0.75-similarity dedup (in `server.ts`'s memory_ingest
12
- * tool handler) catches semantic duplicates, but does so against the
13
- * ENTIRE memory store — and at write-time it actually trips on
14
- * incidentally-similar memories (a fact about Pyre at 0.78 similarity
15
- * to a fact about Engram). It also requires the new content to be
16
- * embedded first, so it doesn't save the embedding cost.
17
- *
18
- * This module is the cheaper, more conservative path:
19
- * - Scoped to a single source identifier (file path, URL, etc.).
20
- * - Hash-based equality (SHA-256 of trimmed content) — exact match
21
- * only, no false positives.
22
- * - In-memory LRU keyed by `source` → list of recent content hashes.
23
- * - Bounded: max 64 sources tracked, max 8 hashes per source.
24
- *
25
- * When an ingest hits the dedup cache, the caller can skip embedding
26
- * AND skip the disk write — return the cached chunk id. Agent's
27
- * conversation history stays internally consistent (same id for same
28
- * content), and the wall-clock cost drops from "embed + save" to a
29
- * map lookup.
30
- *
31
- * Process-scoped intentionally: the persistence layer doesn't need
32
- * to know about this. Engram restart resets the cache — first ingest
33
- * after restart goes through the full pipeline, which is fine.
34
- */
35
- import { createHash } from 'node:crypto';
36
- const MAX_SOURCES = 64;
37
- const MAX_PER_SOURCE = 8;
38
- export class SourceDedupCache {
39
- /** sourceKey → list of recent (hash, chunkId) entries, MRU first. */
40
- bySource = new Map();
41
- /** Cache hit count since boot. Useful for telemetry. */
42
- hits = 0;
43
- /** Cache miss count since boot. */
44
- misses = 0;
45
- /**
46
- * Hash trimmed content. Stable across ingest calls for the same
47
- * payload — that's the whole point.
48
- */
49
- static hashContent(content) {
50
- return createHash('sha256').update(content.trim()).digest('hex');
51
- }
52
- /**
53
- * Look up a (source, content) pair. Returns the cached entry on hit
54
- * or null on miss. Does NOT promote the entry on read — promote on
55
- * write only, so a hit doesn't reset its LRU position.
56
- */
57
- lookup(source, content) {
58
- if (!source) {
59
- // No source key → no scoping → don't dedup. The caller's
60
- // existing semantic-similarity dedup is the right tool for
61
- // unscoped ingests.
62
- this.misses++;
63
- return null;
64
- }
65
- const list = this.bySource.get(source);
66
- if (!list || list.length === 0) {
67
- this.misses++;
68
- return null;
69
- }
70
- const hash = SourceDedupCache.hashContent(content);
71
- const found = list.find((e) => e.hash === hash);
72
- if (found) {
73
- this.hits++;
74
- return found;
75
- }
76
- this.misses++;
77
- return null;
78
- }
79
- /**
80
- * Record a new (source, content, chunkId) entry after a successful
81
- * ingest. LRU-evicts the oldest entry per source when the per-source
82
- * cap is hit, and the oldest source when the overall cap is hit.
83
- */
84
- remember(source, content, chunkId) {
85
- if (!source)
86
- return;
87
- const hash = SourceDedupCache.hashContent(content);
88
- const entry = { hash, chunkId, ts: Date.now() };
89
- let list = this.bySource.get(source);
90
- if (!list) {
91
- list = [];
92
- // Evict the oldest source if we're at the global cap.
93
- if (this.bySource.size >= MAX_SOURCES) {
94
- let oldestKey = null;
95
- let oldestTs = Infinity;
96
- for (const [k, entries] of this.bySource.entries()) {
97
- const recent = entries[0]?.ts ?? 0;
98
- if (recent < oldestTs) {
99
- oldestTs = recent;
100
- oldestKey = k;
101
- }
102
- }
103
- if (oldestKey)
104
- this.bySource.delete(oldestKey);
105
- }
106
- this.bySource.set(source, list);
107
- }
108
- // De-dupe by hash within the per-source list — if the same hash
109
- // is already there, replace it (fresh chunkId), otherwise prepend.
110
- const existing = list.findIndex((e) => e.hash === hash);
111
- if (existing >= 0) {
112
- list[existing] = entry;
113
- }
114
- else {
115
- list.unshift(entry);
116
- if (list.length > MAX_PER_SOURCE)
117
- list.length = MAX_PER_SOURCE;
118
- }
119
- }
120
- /** Drop the entire cache. Useful for tests and explicit resets. */
121
- clear() {
122
- this.bySource.clear();
123
- this.hits = 0;
124
- this.misses = 0;
125
- }
126
- /** Snapshot stats for telemetry / Settings UI. */
127
- stats() {
128
- let entries = 0;
129
- for (const list of this.bySource.values())
130
- entries += list.length;
131
- const total = this.hits + this.misses;
132
- return {
133
- sources: this.bySource.size,
134
- entries,
135
- hits: this.hits,
136
- misses: this.misses,
137
- hitRate: total === 0 ? 0 : this.hits / total,
138
- };
139
- }
140
- }
141
- /**
142
- * Module-level singleton. Engram is process-singleton anyway (one
143
- * server instance per data dir), so a single cache covers the whole
144
- * lifetime. Tests construct fresh `SourceDedupCache` instances; prod
145
- * uses this default.
146
- */
147
- export const sourceDedup = new SourceDedupCache();
1
+ /**
2
+ * Session-scoped same-source ingest dedup.
3
+ *
4
+ * Agents in long sessions repeatedly re-read stable files, re-poll
5
+ * unchanged endpoints, and re-list the same directories. Each re-ingest
6
+ * goes through the full chunk → embed → save pipeline even though the
7
+ * content hasn't moved. On CPU embeddings (Engram's default backend),
8
+ * a 20K-token re-read can cost 5–15 seconds; multiplied across a
9
+ * 50-step agent run that's significant wall-clock burn.
10
+ *
11
+ * The existing 0.75-similarity dedup (in `server.ts`'s engram-ingest
12
+ * tool handler) catches semantic duplicates, but does so against the
13
+ * ENTIRE memory store — and at write-time it actually trips on
14
+ * incidentally-similar memories (a fact about Pyre at 0.78 similarity
15
+ * to a fact about Engram). It also requires the new content to be
16
+ * embedded first, so it doesn't save the embedding cost.
17
+ *
18
+ * This module is the cheaper, more conservative path:
19
+ * - Scoped to a single source identifier (file path, URL, etc.).
20
+ * - Hash-based equality (SHA-256 of trimmed content) — exact match
21
+ * only, no false positives.
22
+ * - In-memory LRU keyed by `source` → list of recent content hashes.
23
+ * - Bounded: max 64 sources tracked, max 8 hashes per source.
24
+ *
25
+ * When an ingest hits the dedup cache, the caller can skip embedding
26
+ * AND skip the disk write — return the cached chunk id. Agent's
27
+ * conversation history stays internally consistent (same id for same
28
+ * content), and the wall-clock cost drops from "embed + save" to a
29
+ * map lookup.
30
+ *
31
+ * Process-scoped intentionally: the persistence layer doesn't need
32
+ * to know about this. Engram restart resets the cache — first ingest
33
+ * after restart goes through the full pipeline, which is fine.
34
+ */
35
+ import { createHash } from 'node:crypto';
36
+ const MAX_SOURCES = 64;
37
+ const MAX_PER_SOURCE = 8;
38
+ export class SourceDedupCache {
39
+ /** sourceKey → list of recent (hash, chunkId) entries, MRU first. */
40
+ bySource = new Map();
41
+ /** Cache hit count since boot. Useful for telemetry. */
42
+ hits = 0;
43
+ /** Cache miss count since boot. */
44
+ misses = 0;
45
+ /**
46
+ * Hash trimmed content. Stable across ingest calls for the same
47
+ * payload — that's the whole point.
48
+ */
49
+ static hashContent(content) {
50
+ return createHash('sha256').update(content.trim()).digest('hex');
51
+ }
52
+ /**
53
+ * Look up a (source, content) pair. Returns the cached entry on hit
54
+ * or null on miss. Does NOT promote the entry on read — promote on
55
+ * write only, so a hit doesn't reset its LRU position.
56
+ */
57
+ lookup(source, content) {
58
+ if (!source) {
59
+ // No source key → no scoping → don't dedup. The caller's
60
+ // existing semantic-similarity dedup is the right tool for
61
+ // unscoped ingests.
62
+ this.misses++;
63
+ return null;
64
+ }
65
+ const list = this.bySource.get(source);
66
+ if (!list || list.length === 0) {
67
+ this.misses++;
68
+ return null;
69
+ }
70
+ const hash = SourceDedupCache.hashContent(content);
71
+ const found = list.find((e) => e.hash === hash);
72
+ if (found) {
73
+ this.hits++;
74
+ return found;
75
+ }
76
+ this.misses++;
77
+ return null;
78
+ }
79
+ /**
80
+ * Record a new (source, content, chunkId) entry after a successful
81
+ * ingest. LRU-evicts the oldest entry per source when the per-source
82
+ * cap is hit, and the oldest source when the overall cap is hit.
83
+ */
84
+ remember(source, content, chunkId) {
85
+ if (!source)
86
+ return;
87
+ const hash = SourceDedupCache.hashContent(content);
88
+ const entry = { hash, chunkId, ts: Date.now() };
89
+ let list = this.bySource.get(source);
90
+ if (!list) {
91
+ list = [];
92
+ // Evict the oldest source if we're at the global cap.
93
+ if (this.bySource.size >= MAX_SOURCES) {
94
+ let oldestKey = null;
95
+ let oldestTs = Infinity;
96
+ for (const [k, entries] of this.bySource.entries()) {
97
+ const recent = entries[0]?.ts ?? 0;
98
+ if (recent < oldestTs) {
99
+ oldestTs = recent;
100
+ oldestKey = k;
101
+ }
102
+ }
103
+ if (oldestKey)
104
+ this.bySource.delete(oldestKey);
105
+ }
106
+ this.bySource.set(source, list);
107
+ }
108
+ // De-dupe by hash within the per-source list — if the same hash
109
+ // is already there, replace it (fresh chunkId), otherwise prepend.
110
+ const existing = list.findIndex((e) => e.hash === hash);
111
+ if (existing >= 0) {
112
+ list[existing] = entry;
113
+ }
114
+ else {
115
+ list.unshift(entry);
116
+ if (list.length > MAX_PER_SOURCE)
117
+ list.length = MAX_PER_SOURCE;
118
+ }
119
+ }
120
+ /** Drop the entire cache. Useful for tests and explicit resets. */
121
+ clear() {
122
+ this.bySource.clear();
123
+ this.hits = 0;
124
+ this.misses = 0;
125
+ }
126
+ /** Snapshot stats for telemetry / Settings UI. */
127
+ stats() {
128
+ let entries = 0;
129
+ for (const list of this.bySource.values())
130
+ entries += list.length;
131
+ const total = this.hits + this.misses;
132
+ return {
133
+ sources: this.bySource.size,
134
+ entries,
135
+ hits: this.hits,
136
+ misses: this.misses,
137
+ hitRate: total === 0 ? 0 : this.hits / total,
138
+ };
139
+ }
140
+ }
141
+ /**
142
+ * Module-level singleton. Engram is process-singleton anyway (one
143
+ * server instance per data dir), so a single cache covers the whole
144
+ * lifetime. Tests construct fresh `SourceDedupCache` instances; prod
145
+ * uses this default.
146
+ */
147
+ export const sourceDedup = new SourceDedupCache();
148
148
  //# sourceMappingURL=source-dedup.js.map
@@ -68,13 +68,13 @@ export class PostgresStorageAdapter {
68
68
  // ── Chunks ─────────────────────────────────────────────────────────
69
69
  async saveChunk(chunk) {
70
70
  const { params } = this.chunkInsertParams(chunk);
71
- await this.pool.query(`INSERT INTO chunks (id, tenant_id, embedding, domain, content, metadata, created_at)
72
- VALUES ($1, $2, $3::vector, $4, $5, $6::jsonb, $7)
73
- ON CONFLICT (id) DO UPDATE SET
74
- tenant_id = EXCLUDED.tenant_id,
75
- embedding = EXCLUDED.embedding,
76
- domain = EXCLUDED.domain,
77
- content = EXCLUDED.content,
71
+ await this.pool.query(`INSERT INTO chunks (id, tenant_id, embedding, domain, content, metadata, created_at)
72
+ VALUES ($1, $2, $3::vector, $4, $5, $6::jsonb, $7)
73
+ ON CONFLICT (id) DO UPDATE SET
74
+ tenant_id = EXCLUDED.tenant_id,
75
+ embedding = EXCLUDED.embedding,
76
+ domain = EXCLUDED.domain,
77
+ content = EXCLUDED.content,
78
78
  metadata = EXCLUDED.metadata`, params);
79
79
  }
80
80
  async saveChunks(chunks) {
@@ -92,8 +92,8 @@ export class PostgresStorageAdapter {
92
92
  values.push(`($${++p}, $${++p}, $${++p}::vector, $${++p}, $${++p}, $${++p}::jsonb, $${++p})`);
93
93
  params.push(...row);
94
94
  }
95
- await this.pool.query(`INSERT INTO chunks (id, tenant_id, embedding, domain, content, metadata, created_at)
96
- VALUES ${values.join(', ')}
95
+ await this.pool.query(`INSERT INTO chunks (id, tenant_id, embedding, domain, content, metadata, created_at)
96
+ VALUES ${values.join(', ')}
97
97
  ON CONFLICT (id) DO NOTHING`, params);
98
98
  }
99
99
  chunkInsertParams(chunk) {
@@ -139,9 +139,9 @@ export class PostgresStorageAdapter {
139
139
  };
140
140
  }
141
141
  async getChunk(id) {
142
- const { rows } = await this.pool.query(`SELECT id, domain, content, metadata, embedding::text AS embedding, created_at
143
- FROM chunks
144
- WHERE tenant_id = $1 AND id = $2
142
+ const { rows } = await this.pool.query(`SELECT id, domain, content, metadata, embedding::text AS embedding, created_at
143
+ FROM chunks
144
+ WHERE tenant_id = $1 AND id = $2
145
145
  LIMIT 1`, [this.tenantId, id]);
146
146
  return rows[0] ? pgRowToChunk(rows[0], this.embeddingDim) : null;
147
147
  }
@@ -178,8 +178,8 @@ export class PostgresStorageAdapter {
178
178
  params.push(JSON.stringify([opts.tag]));
179
179
  conds.push(`metadata->'tags' @> $${params.length}::jsonb`);
180
180
  }
181
- const { rows } = await this.pool.query(`SELECT id, domain, content, metadata, embedding::text AS embedding, created_at
182
- FROM chunks
181
+ const { rows } = await this.pool.query(`SELECT id, domain, content, metadata, embedding::text AS embedding, created_at
182
+ FROM chunks
183
183
  WHERE ${conds.join(' AND ')}`, params);
184
184
  return rows.map((r) => pgRowToChunk(r, this.embeddingDim));
185
185
  }
@@ -212,11 +212,11 @@ export class PostgresStorageAdapter {
212
212
  const translated = translateFilter(filter);
213
213
  extra = ` AND (${translated})`;
214
214
  }
215
- const { rows } = await this.pool.query(`SELECT id, domain, content, metadata, embedding::text AS embedding, created_at,
216
- embedding <=> $2::vector AS distance
217
- FROM chunks
218
- WHERE tenant_id = $1${extra}
219
- ORDER BY embedding <=> $2::vector
215
+ const { rows } = await this.pool.query(`SELECT id, domain, content, metadata, embedding::text AS embedding, created_at,
216
+ embedding <=> $2::vector AS distance
217
+ FROM chunks
218
+ WHERE tenant_id = $1${extra}
219
+ ORDER BY embedding <=> $2::vector
220
220
  LIMIT $3`, params);
221
221
  return rows.map((row) => ({
222
222
  chunk: pgRowToChunk(row, this.embeddingDim),
@@ -238,7 +238,7 @@ export class PostgresStorageAdapter {
238
238
  }
239
239
  // ── Daily Logs ────────────────────────────────────────────────────
240
240
  async appendDailyEntry(date, entry) {
241
- await this.pool.query(`INSERT INTO daily_logs (date, tenant_id, entry, created_at)
241
+ await this.pool.query(`INSERT INTO daily_logs (date, tenant_id, entry, created_at)
242
242
  VALUES ($1::date, $2, $3::jsonb, NOW())`, [
243
243
  date,
244
244
  this.tenantId,
@@ -251,9 +251,9 @@ export class PostgresStorageAdapter {
251
251
  ]);
252
252
  }
253
253
  async getDailyLogs(daysBack) {
254
- const { rows } = await this.pool.query(`SELECT date::text AS date, entry
255
- FROM daily_logs
256
- WHERE tenant_id = $1 AND date >= (CURRENT_DATE - ($2 || ' days')::interval)
254
+ const { rows } = await this.pool.query(`SELECT date::text AS date, entry
255
+ FROM daily_logs
256
+ WHERE tenant_id = $1 AND date >= (CURRENT_DATE - ($2 || ' days')::interval)
257
257
  ORDER BY date, created_at`, [this.tenantId, String(daysBack)]);
258
258
  const grouped = new Map();
259
259
  for (const r of rows) {
@@ -271,8 +271,8 @@ export class PostgresStorageAdapter {
271
271
  }
272
272
  // ── Procedural Rules ──────────────────────────────────────────────
273
273
  async saveRule(rule) {
274
- await this.pool.query(`INSERT INTO rules (id, tenant_id, rule, created_at)
275
- VALUES ($1, $2, $3::jsonb, $4)
274
+ await this.pool.query(`INSERT INTO rules (id, tenant_id, rule, created_at)
275
+ VALUES ($1, $2, $3::jsonb, $4)
276
276
  ON CONFLICT (id) DO UPDATE SET rule = EXCLUDED.rule, tenant_id = EXCLUDED.tenant_id`, [rule.id, this.tenantId, JSON.stringify(rule), rule.createdAt]);
277
277
  }
278
278
  async getRules() {
@@ -286,15 +286,15 @@ export class PostgresStorageAdapter {
286
286
  }
287
287
  // ── Knowledge Triples ────────────────────────────────────────────
288
288
  async saveTriple(triple) {
289
- await this.pool.query(`INSERT INTO knowledge_triples
290
- (id, tenant_id, subject, predicate, object, source_id, invalidated_at, created_at)
291
- VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
292
- ON CONFLICT (id) DO UPDATE SET
293
- tenant_id = EXCLUDED.tenant_id,
294
- subject = EXCLUDED.subject,
295
- predicate = EXCLUDED.predicate,
296
- object = EXCLUDED.object,
297
- source_id = EXCLUDED.source_id,
289
+ await this.pool.query(`INSERT INTO knowledge_triples
290
+ (id, tenant_id, subject, predicate, object, source_id, invalidated_at, created_at)
291
+ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
292
+ ON CONFLICT (id) DO UPDATE SET
293
+ tenant_id = EXCLUDED.tenant_id,
294
+ subject = EXCLUDED.subject,
295
+ predicate = EXCLUDED.predicate,
296
+ object = EXCLUDED.object,
297
+ source_id = EXCLUDED.source_id,
298
298
  invalidated_at = EXCLUDED.invalidated_at`, [
299
299
  triple.id,
300
300
  this.tenantId,
@@ -328,30 +328,30 @@ export class PostgresStorageAdapter {
328
328
  }
329
329
  if (opts?.activeOnly)
330
330
  conds.push(`invalidated_at IS NULL`);
331
- const { rows } = await this.pool.query(`SELECT id, subject, predicate, object, source_id, invalidated_at, created_at
332
- FROM knowledge_triples
331
+ const { rows } = await this.pool.query(`SELECT id, subject, predicate, object, source_id, invalidated_at, created_at
332
+ FROM knowledge_triples
333
333
  WHERE ${conds.join(' AND ')}`, params);
334
334
  return rows.map(pgRowToTriple);
335
335
  }
336
336
  async invalidateTriple(id) {
337
- await this.pool.query(`UPDATE knowledge_triples SET invalidated_at = NOW()
337
+ await this.pool.query(`UPDATE knowledge_triples SET invalidated_at = NOW()
338
338
  WHERE tenant_id = $1 AND id = $2`, [this.tenantId, id]);
339
339
  }
340
340
  async getTripleTimeline(entity) {
341
- const { rows } = await this.pool.query(`SELECT id, subject, predicate, object, source_id, invalidated_at, created_at
342
- FROM knowledge_triples
343
- WHERE tenant_id = $1 AND (subject = $2 OR object = $2)
341
+ const { rows } = await this.pool.query(`SELECT id, subject, predicate, object, source_id, invalidated_at, created_at
342
+ FROM knowledge_triples
343
+ WHERE tenant_id = $1 AND (subject = $2 OR object = $2)
344
344
  ORDER BY created_at ASC`, [this.tenantId, entity]);
345
345
  return rows.map(pgRowToTriple);
346
346
  }
347
347
  async getTripleStats() {
348
- const { rows } = await this.pool.query(`SELECT
349
- COUNT(*)::int AS total,
350
- COUNT(*) FILTER (WHERE invalidated_at IS NULL)::int AS active,
351
- COUNT(*) FILTER (WHERE invalidated_at IS NOT NULL)::int AS invalidated,
352
- COUNT(DISTINCT subject)::int AS subjects,
353
- COUNT(DISTINCT predicate)::int AS predicates
354
- FROM knowledge_triples
348
+ const { rows } = await this.pool.query(`SELECT
349
+ COUNT(*)::int AS total,
350
+ COUNT(*) FILTER (WHERE invalidated_at IS NULL)::int AS active,
351
+ COUNT(*) FILTER (WHERE invalidated_at IS NOT NULL)::int AS invalidated,
352
+ COUNT(DISTINCT subject)::int AS subjects,
353
+ COUNT(DISTINCT predicate)::int AS predicates
354
+ FROM knowledge_triples
355
355
  WHERE tenant_id = $1`, [this.tenantId]);
356
356
  const r = rows[0] ?? { total: 0, active: 0, invalidated: 0, subjects: 0, predicates: 0 };
357
357
  return { total: r.total, active: r.active, invalidated: r.invalidated, subjects: r.subjects, predicates: r.predicates };
@@ -362,7 +362,7 @@ export class PostgresStorageAdapter {
362
362
  const date = now.toISOString().split('T')[0];
363
363
  const time = now.toISOString().split('T')[1].split('.')[0];
364
364
  const trimmed = content.trim();
365
- await this.pool.query(`INSERT INTO diary_entries (date, tenant_id, agent, content, created_at)
365
+ await this.pool.query(`INSERT INTO diary_entries (date, tenant_id, agent, content, created_at)
366
366
  VALUES ($1::date, $2, $3, $4, $5)`, [date, this.tenantId, agent, trimmed, now.toISOString()]);
367
367
  return { date, time, content: trimmed, agent };
368
368
  }
@@ -382,9 +382,9 @@ export class PostgresStorageAdapter {
382
382
  params.push(opts.agent);
383
383
  conds.push(`agent = $${params.length}`);
384
384
  }
385
- const { rows } = await this.pool.query(`SELECT date::text AS date, agent, content, created_at
386
- FROM diary_entries
387
- WHERE ${conds.join(' AND ')}
385
+ const { rows } = await this.pool.query(`SELECT date::text AS date, agent, content, created_at
386
+ FROM diary_entries
387
+ WHERE ${conds.join(' AND ')}
388
388
  ORDER BY date DESC, created_at ASC`, params);
389
389
  const grouped = new Map();
390
390
  for (const r of rows) {
@@ -397,9 +397,9 @@ export class PostgresStorageAdapter {
397
397
  return Array.from(grouped.entries()).map(([date, entries]) => ({ date, entries }));
398
398
  }
399
399
  async listDiaryDates() {
400
- const { rows } = await this.pool.query(`SELECT DISTINCT date::text AS date
401
- FROM diary_entries
402
- WHERE tenant_id = $1
400
+ const { rows } = await this.pool.query(`SELECT DISTINCT date::text AS date
401
+ FROM diary_entries
402
+ WHERE tenant_id = $1
403
403
  ORDER BY date DESC`, [this.tenantId]);
404
404
  return rows.map((r) => r.date);
405
405
  }
@@ -408,7 +408,7 @@ export class PostgresStorageAdapter {
408
408
  const timestamp = new Date().toISOString();
409
409
  const full = { ...note, timestamp };
410
410
  const id = randomUUID();
411
- await this.pool.query(`INSERT INTO handoffs (id, tenant_id, content_json, content_md, created_at)
411
+ await this.pool.query(`INSERT INTO handoffs (id, tenant_id, content_json, content_md, created_at)
412
412
  VALUES ($1, $2, $3::jsonb, $4, $5)`, [id, this.tenantId, JSON.stringify(full), formatHandoffMarkdown(full), timestamp]);
413
413
  return full;
414
414
  }
@@ -420,16 +420,16 @@ export class PostgresStorageAdapter {
420
420
  const { rows } = await this.pool.query(`SELECT content_json FROM handoffs WHERE tenant_id = $1 AND id = $2 LIMIT 1`, [this.tenantId, stamp]);
421
421
  return rows[0] ? rows[0].content_json : null;
422
422
  }
423
- const { rows } = await this.pool.query(`SELECT content_json FROM handoffs
424
- WHERE tenant_id = $1
423
+ const { rows } = await this.pool.query(`SELECT content_json FROM handoffs
424
+ WHERE tenant_id = $1
425
425
  ORDER BY created_at DESC LIMIT 1`, [this.tenantId]);
426
426
  return rows[0] ? rows[0].content_json : null;
427
427
  }
428
428
  async listHandoffs(limit = 10) {
429
- const { rows } = await this.pool.query(`SELECT id, content_json, created_at
430
- FROM handoffs
431
- WHERE tenant_id = $1
432
- ORDER BY created_at DESC
429
+ const { rows } = await this.pool.query(`SELECT id, content_json, created_at
430
+ FROM handoffs
431
+ WHERE tenant_id = $1
432
+ ORDER BY created_at DESC
433
433
  LIMIT $2`, [this.tenantId, limit]);
434
434
  return rows.map((r) => {
435
435
  const note = r.content_json;
@@ -1,29 +1,29 @@
1
- import type { CognitiveLayer, MemoryChunk, MemoryType, Sentiment } from './types.js';
2
- export interface UpdateMetadataInput {
3
- tags?: string[];
4
- source?: string;
5
- domain?: string;
6
- topic?: string;
7
- type?: MemoryType;
8
- sentiment?: Sentiment;
9
- importance?: number;
10
- cognitiveLayer?: CognitiveLayer;
11
- }
12
- export type UpdateMetadataMode = 'merge' | 'replace';
13
- /**
14
- * Pure helper: build the storage patch for a memory_update_metadata
15
- * call. Separated from server.ts so importing it (e.g. from tests)
16
- * doesn't pull in the MCP stdio server bootstrap.
17
- *
18
- * - `merge`: only fields the caller specified land in the patch.
19
- * Untouched fields are absent → Storage.updateChunk leaves them alone.
20
- * - `replace`: every metadata-shape field is set, with caller values
21
- * where present and engram defaults otherwise. Existing untouched
22
- * fields get overwritten with the default. Footgun-y; the tool
23
- * layer logs a warning when this mode fires.
24
- *
25
- * Immutable fields (id, createdAt, embedding, embeddingVersion) are
26
- * never produced by this helper; the tool layer doesn't accept them
27
- * in its input schema either.
28
- */
29
- export declare function buildUpdateMetadataPatch(metadata: UpdateMetadataInput, mode: UpdateMetadataMode): Partial<MemoryChunk>;
1
+ import type { CognitiveLayer, MemoryChunk, MemoryType, Sentiment } from './types.js';
2
+ export interface UpdateMetadataInput {
3
+ tags?: string[];
4
+ source?: string;
5
+ domain?: string;
6
+ topic?: string;
7
+ type?: MemoryType;
8
+ sentiment?: Sentiment;
9
+ importance?: number;
10
+ cognitiveLayer?: CognitiveLayer;
11
+ }
12
+ export type UpdateMetadataMode = 'merge' | 'replace';
13
+ /**
14
+ * Pure helper: build the storage patch for a engram-update-metadata
15
+ * call. Separated from server.ts so importing it (e.g. from tests)
16
+ * doesn't pull in the MCP stdio server bootstrap.
17
+ *
18
+ * - `merge`: only fields the caller specified land in the patch.
19
+ * Untouched fields are absent → Storage.updateChunk leaves them alone.
20
+ * - `replace`: every metadata-shape field is set, with caller values
21
+ * where present and engram defaults otherwise. Existing untouched
22
+ * fields get overwritten with the default. Footgun-y; the tool
23
+ * layer logs a warning when this mode fires.
24
+ *
25
+ * Immutable fields (id, createdAt, embedding, embeddingVersion) are
26
+ * never produced by this helper; the tool layer doesn't accept them
27
+ * in its input schema either.
28
+ */
29
+ export declare function buildUpdateMetadataPatch(metadata: UpdateMetadataInput, mode: UpdateMetadataMode): Partial<MemoryChunk>;