audrey 0.3.3 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -46,7 +46,7 @@ npx audrey status
46
46
  npx audrey uninstall
47
47
  ```
48
48
 
49
- Every Claude Code session now has 5 memory tools: `memory_encode`, `memory_recall`, `memory_consolidate`, `memory_introspect`, `memory_resolve_truth`.
49
+ Every Claude Code session now has 7 memory tools: `memory_encode`, `memory_recall`, `memory_consolidate`, `memory_introspect`, `memory_resolve_truth`, `memory_export`, `memory_import`.
50
50
 
51
51
  ### SDK in Your Code
52
52
 
@@ -533,14 +533,18 @@ Demonstrates the full pipeline: encode 3 rate-limit observations → consolidate
533
533
  - [x] Published to npm with proper package metadata
534
534
  - [x] 194 tests across 17 test files
535
535
 
536
- ### v0.3.3Hardening (current)
536
+ ### v0.5.0Feature Depth (current)
537
537
 
538
- - [x] Fix status command dimension mismatch (read stored dimensions from existing database)
539
- - [x] Safe JSON parsing in LLM providers (descriptive errors on malformed responses)
540
- - [x] Fetch timeouts on all API calls (configurable, default 30s)
541
- - [x] Config validation in Audrey constructor (dormantThreshold, minEpisodes)
542
- - [x] encodeBatch error isolation tests
543
- - [x] 208 tests across 17 test files
538
+ - [x] Configurable confidence weights per Audrey instance
539
+ - [x] Configurable decay rates (half-lives) per Audrey instance
540
+ - [x] Confidence config wired through constructor to recall and decay
541
+ - [x] Memory export (JSON snapshot of all tables, no raw embeddings)
542
+ - [x] Memory import with automatic re-embedding via current provider
543
+ - [x] `memory_export` and `memory_import` MCP tools (7 tools total)
544
+ - [x] Auto-consolidation scheduling (`startAutoConsolidate` / `stopAutoConsolidate`)
545
+ - [x] Consolidation metrics tracking (per-run params and results)
546
+ - [x] Adaptive consolidation parameter suggestions based on historical yield
547
+ - [x] 220+ tests across 20 test files
544
548
 
545
549
  ### v0.4.0 — Type Safety & Developer Experience
546
550
 
@@ -554,16 +558,6 @@ Demonstrates the full pipeline: encode 3 rate-limit observations → consolidate
554
558
  - [ ] Embedding migration pipeline (re-embed when models change)
555
559
  - [ ] Re-consolidation queue (re-run consolidation with new embedding model)
556
560
 
557
- ### v0.5.0 — Advanced Memory Features
558
-
559
- - [ ] Adaptive consolidation threshold (learn optimal N per domain, not fixed N=3)
560
- - [ ] Source-aware confidence for semantic memories (track strongest source composition)
561
- - [ ] Configurable decay rates per Audrey instance
562
- - [ ] Configurable confidence weights per Audrey instance
563
- - [ ] PII detection and redaction (opt-in)
564
- - [ ] Memory export/import (JSON snapshot)
565
- - [ ] Auto-consolidation scheduling (setInterval with configurable interval)
566
-
567
561
  ### v0.6.0 — Scale
568
562
 
569
563
  - [ ] pgvector adapter for PostgreSQL backend
@@ -1,7 +1,7 @@
1
1
  import { homedir } from 'node:os';
2
2
  import { join } from 'node:path';
3
3
 
4
- export const VERSION = '0.3.3';
4
+ export const VERSION = '0.5.0';
5
5
  export const SERVER_NAME = 'audrey-memory';
6
6
  export const DEFAULT_DATA_DIR = join(homedir(), '.audrey', 'data');
7
7
 
@@ -65,12 +65,14 @@ function install() {
65
65
  console.log(`
66
66
  Audrey registered as "${SERVER_NAME}" with Claude Code.
67
67
 
68
- 5 tools available in every session:
68
+ 7 tools available in every session:
69
69
  memory_encode — Store observations, facts, preferences
70
70
  memory_recall — Search memories by semantic similarity
71
71
  memory_consolidate — Extract principles from accumulated episodes
72
72
  memory_introspect — Check memory system health
73
73
  memory_resolve_truth — Resolve contradictions between claims
74
+ memory_export — Export all memories as JSON snapshot
75
+ memory_import — Import a snapshot into a fresh database
74
76
 
75
77
  Data stored in: ${DEFAULT_DATA_DIR}
76
78
  Verify: claude mcp list
@@ -239,6 +241,44 @@ async function main() {
239
241
  },
240
242
  );
241
243
 
244
+ server.tool(
245
+ 'memory_export',
246
+ {},
247
+ async () => {
248
+ try {
249
+ const snapshot = audrey.export();
250
+ return toolResult(snapshot);
251
+ } catch (err) {
252
+ return toolError(err);
253
+ }
254
+ },
255
+ );
256
+
257
+ server.tool(
258
+ 'memory_import',
259
+ {
260
+ snapshot: z.object({
261
+ version: z.string(),
262
+ episodes: z.array(z.any()),
263
+ semantics: z.array(z.any()).optional(),
264
+ procedures: z.array(z.any()).optional(),
265
+ causalLinks: z.array(z.any()).optional(),
266
+ contradictions: z.array(z.any()).optional(),
267
+ consolidationRuns: z.array(z.any()).optional(),
268
+ config: z.record(z.string()).optional(),
269
+ }).passthrough().describe('A snapshot from memory_export'),
270
+ },
271
+ async ({ snapshot }) => {
272
+ try {
273
+ await audrey.import(snapshot);
274
+ const stats = audrey.introspect();
275
+ return toolResult({ imported: true, stats });
276
+ } catch (err) {
277
+ return toolError(err);
278
+ }
279
+ },
280
+ );
281
+
242
282
  const transport = new StdioServerTransport();
243
283
  await server.connect(transport);
244
284
  console.error('[audrey-mcp] connected via stdio');
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "audrey",
3
- "version": "0.3.3",
3
+ "version": "0.5.0",
4
4
  "description": "Biological memory architecture for AI agents — encode, consolidate, and recall memories with confidence decay, contradiction detection, and causal graphs",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -0,0 +1,53 @@
1
+ export function suggestConsolidationParams(db) {
2
+ const runs = db.prepare(`
3
+ SELECT min_cluster_size, similarity_threshold, clusters_found, principles_extracted, episodes_evaluated
4
+ FROM consolidation_metrics
5
+ ORDER BY created_at DESC
6
+ LIMIT 20
7
+ `).all();
8
+
9
+ if (runs.length === 0) {
10
+ return {
11
+ minClusterSize: 3,
12
+ similarityThreshold: 0.85,
13
+ confidence: 'no_data',
14
+ };
15
+ }
16
+
17
+ const paramScores = new Map();
18
+ for (const run of runs) {
19
+ if (run.episodes_evaluated === 0) continue;
20
+ const key = `${run.min_cluster_size}:${run.similarity_threshold}`;
21
+ if (!paramScores.has(key)) {
22
+ paramScores.set(key, {
23
+ minClusterSize: run.min_cluster_size,
24
+ similarityThreshold: run.similarity_threshold,
25
+ yields: [],
26
+ });
27
+ }
28
+ paramScores.get(key).yields.push(run.principles_extracted / run.episodes_evaluated);
29
+ }
30
+
31
+ let bestKey = null;
32
+ let bestAvgYield = -1;
33
+ for (const [key, data] of paramScores) {
34
+ const avg = data.yields.reduce((a, b) => a + b, 0) / data.yields.length;
35
+ if (avg > bestAvgYield) {
36
+ bestAvgYield = avg;
37
+ bestKey = key;
38
+ }
39
+ }
40
+
41
+ if (!bestKey) {
42
+ return { minClusterSize: 3, similarityThreshold: 0.85, confidence: 'no_data' };
43
+ }
44
+
45
+ const best = paramScores.get(bestKey);
46
+ const confidence = runs.length >= 5 ? 'high' : runs.length >= 2 ? 'medium' : 'low';
47
+
48
+ return {
49
+ minClusterSize: best.minClusterSize,
50
+ similarityThreshold: best.similarityThreshold,
51
+ confidence,
52
+ };
53
+ }
package/src/audrey.js CHANGED
@@ -10,6 +10,9 @@ import { applyDecay } from './decay.js';
10
10
  import { rollbackConsolidation, getConsolidationHistory } from './rollback.js';
11
11
  import { introspect as introspectFn } from './introspect.js';
12
12
  import { buildContextResolutionPrompt } from './prompts.js';
13
+ import { exportMemories } from './export.js';
14
+ import { importMemories } from './import.js';
15
+ import { suggestConsolidationParams as suggestParamsFn } from './adaptive.js';
13
16
 
14
17
  /**
15
18
  * @typedef {'direct-observation' | 'told-by-user' | 'tool-result' | 'inference' | 'model-generated'} SourceType
@@ -77,6 +80,7 @@ export class Audrey extends EventEmitter {
77
80
  agent = 'default',
78
81
  embedding = { provider: 'mock', dimensions: 64 },
79
82
  llm,
83
+ confidence = {},
80
84
  consolidation = {},
81
85
  decay = {},
82
86
  } = {}) {
@@ -97,8 +101,16 @@ export class Audrey extends EventEmitter {
97
101
  this.embeddingProvider = createEmbeddingProvider(embedding);
98
102
  this.db = createDatabase(dataDir, { dimensions: this.embeddingProvider.dimensions });
99
103
  this.llmProvider = llm ? createLLMProvider(llm) : null;
100
- this.consolidationConfig = { minEpisodes };
101
- this.decayConfig = { dormantThreshold };
104
+ this.confidenceConfig = {
105
+ weights: confidence.weights,
106
+ halfLives: confidence.halfLives,
107
+ sourceReliability: confidence.sourceReliability,
108
+ };
109
+ this.consolidationConfig = {
110
+ minEpisodes: consolidation.minEpisodes || 3,
111
+ };
112
+ this.decayConfig = { dormantThreshold: decay.dormantThreshold || 0.1 };
113
+ this._autoConsolidateTimer = null;
102
114
  }
103
115
 
104
116
  _emitValidation(id, params) {
@@ -161,7 +173,10 @@ export class Audrey extends EventEmitter {
161
173
  * @returns {Promise<RecallResult[]>}
162
174
  */
163
175
  recall(query, options = {}) {
164
- return recallFn(this.db, this.embeddingProvider, query, options);
176
+ return recallFn(this.db, this.embeddingProvider, query, {
177
+ ...options,
178
+ confidenceConfig: options.confidenceConfig ?? this.confidenceConfig,
179
+ });
165
180
  }
166
181
 
167
182
  /**
@@ -170,7 +185,10 @@ export class Audrey extends EventEmitter {
170
185
  * @returns {AsyncGenerator<RecallResult>}
171
186
  */
172
187
  async *recallStream(query, options = {}) {
173
- yield* recallStreamFn(this.db, this.embeddingProvider, query, options);
188
+ yield* recallStreamFn(this.db, this.embeddingProvider, query, {
189
+ ...options,
190
+ confidenceConfig: options.confidenceConfig ?? this.confidenceConfig,
191
+ });
174
192
  }
175
193
 
176
194
  /**
@@ -197,6 +215,7 @@ export class Audrey extends EventEmitter {
197
215
  decay(options = {}) {
198
216
  const result = applyDecay(this.db, {
199
217
  dormantThreshold: options.dormantThreshold || this.decayConfig.dormantThreshold,
218
+ halfLives: options.halfLives ?? this.confidenceConfig.halfLives,
200
219
  });
201
220
  this.emit('decay', result);
202
221
  return result;
@@ -278,8 +297,40 @@ export class Audrey extends EventEmitter {
278
297
  return introspectFn(this.db);
279
298
  }
280
299
 
300
+ export() {
301
+ return exportMemories(this.db);
302
+ }
303
+
304
+ async import(snapshot) {
305
+ return importMemories(this.db, this.embeddingProvider, snapshot);
306
+ }
307
+
308
+ startAutoConsolidate(intervalMs, options = {}) {
309
+ if (intervalMs < 1000) {
310
+ throw new Error('Auto-consolidation interval must be at least 1000ms');
311
+ }
312
+ if (this._autoConsolidateTimer) {
313
+ throw new Error('Auto-consolidation is already running');
314
+ }
315
+ this._autoConsolidateTimer = setInterval(() => {
316
+ this.consolidate(options).catch(err => this.emit('error', err));
317
+ }, intervalMs);
318
+ }
319
+
320
+ stopAutoConsolidate() {
321
+ if (this._autoConsolidateTimer) {
322
+ clearInterval(this._autoConsolidateTimer);
323
+ this._autoConsolidateTimer = null;
324
+ }
325
+ }
326
+
327
+ suggestConsolidationParams() {
328
+ return suggestParamsFn(this.db);
329
+ }
330
+
281
331
  /** @returns {void} */
282
332
  close() {
333
+ this.stopAutoConsolidate();
283
334
  closeDatabase(this.db);
284
335
  }
285
336
  }
@@ -213,6 +213,15 @@ export async function runConsolidation(db, embeddingProvider, options = {}) {
213
213
 
214
214
  promoteAll();
215
215
 
216
+ db.prepare(`
217
+ INSERT INTO consolidation_metrics (id, run_id, min_cluster_size, similarity_threshold,
218
+ episodes_evaluated, clusters_found, principles_extracted, created_at)
219
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
220
+ `).run(
221
+ generateId(), runId, minClusterSize, similarityThreshold,
222
+ episodesEvaluated, clusters.length, principlesExtracted, new Date().toISOString(),
223
+ );
224
+
216
225
  return {
217
226
  runId,
218
227
  episodesEvaluated,
package/src/db.js CHANGED
@@ -104,6 +104,18 @@ const SCHEMA = `
104
104
  value TEXT NOT NULL
105
105
  );
106
106
 
107
+ CREATE TABLE IF NOT EXISTS consolidation_metrics (
108
+ id TEXT PRIMARY KEY,
109
+ run_id TEXT NOT NULL,
110
+ min_cluster_size INTEGER NOT NULL,
111
+ similarity_threshold REAL NOT NULL,
112
+ episodes_evaluated INTEGER NOT NULL,
113
+ clusters_found INTEGER NOT NULL,
114
+ principles_extracted INTEGER NOT NULL,
115
+ created_at TEXT NOT NULL,
116
+ FOREIGN KEY (run_id) REFERENCES consolidation_runs(id)
117
+ );
118
+
107
119
  CREATE INDEX IF NOT EXISTS idx_episodes_created ON episodes(created_at);
108
120
  CREATE INDEX IF NOT EXISTS idx_episodes_consolidated ON episodes(consolidated);
109
121
  CREATE INDEX IF NOT EXISTS idx_episodes_source ON episodes(source);
package/src/decay.js CHANGED
@@ -6,7 +6,7 @@ import { daysBetween } from './utils.js';
6
6
  * @param {{ dormantThreshold?: number }} [options]
7
7
  * @returns {{ totalEvaluated: number, transitionedToDormant: number, timestamp: string }}
8
8
  */
9
- export function applyDecay(db, { dormantThreshold = 0.1 } = {}) {
9
+ export function applyDecay(db, { dormantThreshold = 0.1, halfLives } = {}) {
10
10
  const now = new Date();
11
11
  let totalEvaluated = 0;
12
12
  let transitionedToDormant = 0;
@@ -31,7 +31,7 @@ export function applyDecay(db, { dormantThreshold = 0.1 } = {}) {
31
31
  supportingCount: sem.supporting_count || 0,
32
32
  contradictingCount: sem.contradicting_count || 0,
33
33
  ageDays,
34
- halfLifeDays: DEFAULT_HALF_LIVES.semantic,
34
+ halfLifeDays: halfLives?.semantic ?? DEFAULT_HALF_LIVES.semantic,
35
35
  retrievalCount: sem.retrieval_count || 0,
36
36
  daysSinceRetrieval,
37
37
  });
@@ -62,7 +62,7 @@ export function applyDecay(db, { dormantThreshold = 0.1 } = {}) {
62
62
  supportingCount: proc.success_count || 0,
63
63
  contradictingCount: proc.failure_count || 0,
64
64
  ageDays,
65
- halfLifeDays: DEFAULT_HALF_LIVES.procedural,
65
+ halfLifeDays: halfLives?.procedural ?? DEFAULT_HALF_LIVES.procedural,
66
66
  retrievalCount: proc.retrieval_count || 0,
67
67
  daysSinceRetrieval,
68
68
  });
package/src/export.js ADDED
@@ -0,0 +1,59 @@
1
+ import { readFileSync } from 'node:fs';
2
+ import { fileURLToPath } from 'node:url';
3
+ import { join, dirname } from 'node:path';
4
+ import { safeJsonParse } from './utils.js';
5
+
6
+ const __dirname = dirname(fileURLToPath(import.meta.url));
7
+ const pkg = JSON.parse(readFileSync(join(__dirname, '../package.json'), 'utf-8'));
8
+
9
+ export function exportMemories(db) {
10
+ const episodes = db.prepare(
11
+ 'SELECT id, content, source, source_reliability, salience, tags, causal_trigger, causal_consequence, created_at, supersedes, superseded_by, consolidated FROM episodes'
12
+ ).all().map(ep => ({
13
+ ...ep,
14
+ tags: safeJsonParse(ep.tags, null),
15
+ }));
16
+
17
+ const semantics = db.prepare(
18
+ 'SELECT id, content, state, conditions, evidence_episode_ids, evidence_count, supporting_count, contradicting_count, source_type_diversity, consolidation_checkpoint, created_at, last_reinforced_at, retrieval_count, challenge_count FROM semantics'
19
+ ).all().map(sem => ({
20
+ ...sem,
21
+ evidence_episode_ids: safeJsonParse(sem.evidence_episode_ids, []),
22
+ }));
23
+
24
+ const procedures = db.prepare(
25
+ 'SELECT id, content, state, trigger_conditions, evidence_episode_ids, success_count, failure_count, created_at, last_reinforced_at, retrieval_count FROM procedures'
26
+ ).all().map(proc => ({
27
+ ...proc,
28
+ evidence_episode_ids: safeJsonParse(proc.evidence_episode_ids, []),
29
+ }));
30
+
31
+ const causalLinks = db.prepare('SELECT * FROM causal_links').all();
32
+
33
+ const contradictions = db.prepare(
34
+ 'SELECT id, claim_a_id, claim_a_type, claim_b_id, claim_b_type, state, resolution, resolved_at, reopened_at, reopen_evidence_id, created_at FROM contradictions'
35
+ ).all();
36
+
37
+ const consolidationRuns = db.prepare(
38
+ 'SELECT id, input_episode_ids, output_memory_ids, started_at, completed_at, status FROM consolidation_runs'
39
+ ).all().map(run => ({
40
+ ...run,
41
+ input_episode_ids: safeJsonParse(run.input_episode_ids, []),
42
+ output_memory_ids: safeJsonParse(run.output_memory_ids, []),
43
+ }));
44
+
45
+ const configRows = db.prepare('SELECT key, value FROM audrey_config').all();
46
+ const config = Object.fromEntries(configRows.map(r => [r.key, r.value]));
47
+
48
+ return {
49
+ version: pkg.version,
50
+ exportedAt: new Date().toISOString(),
51
+ episodes,
52
+ semantics,
53
+ procedures,
54
+ causalLinks,
55
+ contradictions,
56
+ consolidationRuns,
57
+ config,
58
+ };
59
+ }
package/src/import.js ADDED
@@ -0,0 +1,116 @@
1
+ export async function importMemories(db, embeddingProvider, snapshot) {
2
+ const existingEpisodes = db.prepare('SELECT COUNT(*) as c FROM episodes').get().c;
3
+ if (existingEpisodes > 0) {
4
+ throw new Error('Cannot import into a database that is not empty');
5
+ }
6
+
7
+ const insertEpisode = db.prepare(`
8
+ INSERT INTO episodes (id, content, source, source_reliability, salience, tags,
9
+ causal_trigger, causal_consequence, created_at, supersedes, superseded_by, consolidated)
10
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
11
+ `);
12
+
13
+ const insertVecEpisode = db.prepare(
14
+ 'INSERT INTO vec_episodes(id, embedding, source, consolidated) VALUES (?, ?, ?, ?)'
15
+ );
16
+
17
+ const insertSemantic = db.prepare(`
18
+ INSERT INTO semantics (id, content, state, conditions, evidence_episode_ids,
19
+ evidence_count, supporting_count, contradicting_count, source_type_diversity,
20
+ consolidation_checkpoint, created_at, last_reinforced_at, retrieval_count, challenge_count)
21
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
22
+ `);
23
+
24
+ const insertVecSemantic = db.prepare(
25
+ 'INSERT INTO vec_semantics(id, embedding, state) VALUES (?, ?, ?)'
26
+ );
27
+
28
+ const insertProcedure = db.prepare(`
29
+ INSERT INTO procedures (id, content, state, trigger_conditions, evidence_episode_ids,
30
+ success_count, failure_count, created_at, last_reinforced_at, retrieval_count)
31
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
32
+ `);
33
+
34
+ const insertVecProcedure = db.prepare(
35
+ 'INSERT INTO vec_procedures(id, embedding, state) VALUES (?, ?, ?)'
36
+ );
37
+
38
+ const insertCausalLink = db.prepare(`
39
+ INSERT INTO causal_links (id, cause_id, effect_id, link_type, mechanism, confidence, evidence_count, created_at)
40
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
41
+ `);
42
+
43
+ const insertContradiction = db.prepare(`
44
+ INSERT INTO contradictions (id, claim_a_id, claim_a_type, claim_b_id, claim_b_type,
45
+ state, resolution, resolved_at, reopened_at, reopen_evidence_id, created_at)
46
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
47
+ `);
48
+
49
+ const insertConsolidationRun = db.prepare(`
50
+ INSERT INTO consolidation_runs (id, input_episode_ids, output_memory_ids, started_at, completed_at, status)
51
+ VALUES (?, ?, ?, ?, ?, ?)
52
+ `);
53
+
54
+ for (const ep of snapshot.episodes) {
55
+ const tags = ep.tags ? JSON.stringify(ep.tags) : null;
56
+ insertEpisode.run(
57
+ ep.id, ep.content, ep.source, ep.source_reliability, ep.salience ?? 0.5,
58
+ tags, ep.causal_trigger ?? null, ep.causal_consequence ?? null,
59
+ ep.created_at, ep.supersedes ?? null, ep.superseded_by ?? null, ep.consolidated ?? 0,
60
+ );
61
+
62
+ const vector = await embeddingProvider.embed(ep.content);
63
+ const buffer = embeddingProvider.vectorToBuffer(vector);
64
+ insertVecEpisode.run(ep.id, buffer, ep.source, BigInt(ep.consolidated ?? 0));
65
+ }
66
+
67
+ for (const sem of (snapshot.semantics || [])) {
68
+ insertSemantic.run(
69
+ sem.id, sem.content, sem.state, sem.conditions ?? null,
70
+ JSON.stringify(sem.evidence_episode_ids || []),
71
+ sem.evidence_count ?? 0, sem.supporting_count ?? 0, sem.contradicting_count ?? 0,
72
+ sem.source_type_diversity ?? 0, sem.consolidation_checkpoint ?? null,
73
+ sem.created_at, sem.last_reinforced_at ?? null, sem.retrieval_count ?? 0, sem.challenge_count ?? 0,
74
+ );
75
+
76
+ const vector = await embeddingProvider.embed(sem.content);
77
+ const buffer = embeddingProvider.vectorToBuffer(vector);
78
+ insertVecSemantic.run(sem.id, buffer, sem.state);
79
+ }
80
+
81
+ for (const proc of (snapshot.procedures || [])) {
82
+ insertProcedure.run(
83
+ proc.id, proc.content, proc.state, proc.trigger_conditions ?? null,
84
+ JSON.stringify(proc.evidence_episode_ids || []),
85
+ proc.success_count ?? 0, proc.failure_count ?? 0,
86
+ proc.created_at, proc.last_reinforced_at ?? null, proc.retrieval_count ?? 0,
87
+ );
88
+
89
+ const vector = await embeddingProvider.embed(proc.content);
90
+ const buffer = embeddingProvider.vectorToBuffer(vector);
91
+ insertVecProcedure.run(proc.id, buffer, proc.state);
92
+ }
93
+
94
+ for (const link of (snapshot.causalLinks || [])) {
95
+ insertCausalLink.run(
96
+ link.id, link.cause_id, link.effect_id, link.link_type ?? 'causal',
97
+ link.mechanism ?? null, link.confidence ?? null, link.evidence_count ?? 1, link.created_at,
98
+ );
99
+ }
100
+
101
+ for (const con of (snapshot.contradictions || [])) {
102
+ insertContradiction.run(
103
+ con.id, con.claim_a_id, con.claim_a_type, con.claim_b_id, con.claim_b_type,
104
+ con.state, con.resolution ?? null, con.resolved_at ?? null,
105
+ con.reopened_at ?? null, con.reopen_evidence_id ?? null, con.created_at,
106
+ );
107
+ }
108
+
109
+ for (const run of (snapshot.consolidationRuns || [])) {
110
+ insertConsolidationRun.run(
111
+ run.id, JSON.stringify(run.input_episode_ids || []),
112
+ JSON.stringify(run.output_memory_ids || []),
113
+ run.started_at ?? null, run.completed_at ?? null, run.status,
114
+ );
115
+ }
116
+ }
package/src/index.js CHANGED
@@ -10,3 +10,6 @@ export {
10
10
  buildCausalArticulationPrompt,
11
11
  buildContextResolutionPrompt,
12
12
  } from './prompts.js';
13
+ export { exportMemories } from './export.js';
14
+ export { importMemories } from './import.js';
15
+ export { suggestConsolidationParams } from './adaptive.js';
package/src/recall.js CHANGED
@@ -1,48 +1,57 @@
1
1
  import { computeConfidence, DEFAULT_HALF_LIVES } from './confidence.js';
2
2
  import { daysBetween, safeJsonParse } from './utils.js';
3
3
 
4
- function computeEpisodicConfidence(ep, now) {
4
+ function computeEpisodicConfidence(ep, now, confidenceConfig = {}) {
5
5
  const ageDays = daysBetween(ep.created_at, now);
6
+ const halfLives = confidenceConfig.halfLives || DEFAULT_HALF_LIVES;
6
7
  return computeConfidence({
7
8
  sourceType: ep.source,
8
9
  supportingCount: 1,
9
10
  contradictingCount: 0,
10
11
  ageDays,
11
- halfLifeDays: DEFAULT_HALF_LIVES.episodic,
12
+ halfLifeDays: halfLives.episodic ?? DEFAULT_HALF_LIVES.episodic,
12
13
  retrievalCount: 0,
13
14
  daysSinceRetrieval: ageDays,
15
+ weights: confidenceConfig.weights,
16
+ customSourceReliability: confidenceConfig.sourceReliability,
14
17
  });
15
18
  }
16
19
 
17
- function computeSemanticConfidence(sem, now) {
20
+ function computeSemanticConfidence(sem, now, confidenceConfig = {}) {
18
21
  const ageDays = daysBetween(sem.created_at, now);
19
22
  const daysSinceRetrieval = sem.last_reinforced_at
20
23
  ? daysBetween(sem.last_reinforced_at, now)
21
24
  : ageDays;
25
+ const halfLives = confidenceConfig.halfLives || DEFAULT_HALF_LIVES;
22
26
  return computeConfidence({
23
27
  sourceType: 'tool-result',
24
28
  supportingCount: sem.supporting_count || 0,
25
29
  contradictingCount: sem.contradicting_count || 0,
26
30
  ageDays,
27
- halfLifeDays: DEFAULT_HALF_LIVES.semantic,
31
+ halfLifeDays: halfLives.semantic ?? DEFAULT_HALF_LIVES.semantic,
28
32
  retrievalCount: sem.retrieval_count || 0,
29
33
  daysSinceRetrieval,
34
+ weights: confidenceConfig.weights,
35
+ customSourceReliability: confidenceConfig.sourceReliability,
30
36
  });
31
37
  }
32
38
 
33
- function computeProceduralConfidence(proc, now) {
39
+ function computeProceduralConfidence(proc, now, confidenceConfig = {}) {
34
40
  const ageDays = daysBetween(proc.created_at, now);
35
41
  const daysSinceRetrieval = proc.last_reinforced_at
36
42
  ? daysBetween(proc.last_reinforced_at, now)
37
43
  : ageDays;
44
+ const halfLives = confidenceConfig.halfLives || DEFAULT_HALF_LIVES;
38
45
  return computeConfidence({
39
46
  sourceType: 'tool-result',
40
47
  supportingCount: proc.success_count || 0,
41
48
  contradictingCount: proc.failure_count || 0,
42
49
  ageDays,
43
- halfLifeDays: DEFAULT_HALF_LIVES.procedural,
50
+ halfLifeDays: halfLives.procedural ?? DEFAULT_HALF_LIVES.procedural,
44
51
  retrievalCount: proc.retrieval_count || 0,
45
52
  daysSinceRetrieval,
53
+ weights: confidenceConfig.weights,
54
+ customSourceReliability: confidenceConfig.sourceReliability,
46
55
  });
47
56
  }
48
57
 
@@ -112,7 +121,7 @@ function buildProceduralEntry(proc, confidence, score, includeProvenance) {
112
121
  return entry;
113
122
  }
114
123
 
115
- function knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance) {
124
+ function knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, confidenceConfig) {
116
125
  const rows = db.prepare(`
117
126
  SELECT e.*, (1.0 - v.distance) AS similarity
118
127
  FROM vec_episodes v
@@ -124,7 +133,7 @@ function knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includePro
124
133
 
125
134
  const results = [];
126
135
  for (const row of rows) {
127
- const confidence = computeEpisodicConfidence(row, now);
136
+ const confidence = computeEpisodicConfidence(row, now, confidenceConfig);
128
137
  if (confidence < minConfidence) continue;
129
138
  const score = row.similarity * confidence;
130
139
  results.push(buildEpisodicEntry(row, confidence, score, includeProvenance));
@@ -132,7 +141,7 @@ function knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includePro
132
141
  return results;
133
142
  }
134
143
 
135
- function knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant) {
144
+ function knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant, confidenceConfig) {
136
145
  let stateFilter;
137
146
  if (includeDormant) {
138
147
  stateFilter = "AND (v.state = 'active' OR v.state = 'context_dependent' OR v.state = 'dormant')";
@@ -152,7 +161,7 @@ function knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includePro
152
161
  const results = [];
153
162
  const matchedIds = [];
154
163
  for (const row of rows) {
155
- const confidence = computeSemanticConfidence(row, now);
164
+ const confidence = computeSemanticConfidence(row, now, confidenceConfig);
156
165
  if (confidence < minConfidence) continue;
157
166
  const score = row.similarity * confidence;
158
167
  matchedIds.push(row.id);
@@ -161,7 +170,7 @@ function knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includePro
161
170
  return { results, matchedIds };
162
171
  }
163
172
 
164
- function knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant) {
173
+ function knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant, confidenceConfig) {
165
174
  let stateFilter;
166
175
  if (includeDormant) {
167
176
  stateFilter = "AND (v.state = 'active' OR v.state = 'context_dependent' OR v.state = 'dormant')";
@@ -181,7 +190,7 @@ function knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeP
181
190
  const results = [];
182
191
  const matchedIds = [];
183
192
  for (const row of rows) {
184
- const confidence = computeProceduralConfidence(row, now);
193
+ const confidence = computeProceduralConfidence(row, now, confidenceConfig);
185
194
  if (confidence < minConfidence) continue;
186
195
  const score = row.similarity * confidence;
187
196
  matchedIds.push(row.id);
@@ -204,6 +213,7 @@ export async function* recallStream(db, embeddingProvider, query, options = {})
204
213
  limit = 10,
205
214
  includeProvenance = false,
206
215
  includeDormant = false,
216
+ confidenceConfig,
207
217
  } = options;
208
218
 
209
219
  const queryVector = await embeddingProvider.embed(query);
@@ -215,13 +225,13 @@ export async function* recallStream(db, embeddingProvider, query, options = {})
215
225
  const allResults = [];
216
226
 
217
227
  if (searchTypes.includes('episodic')) {
218
- const episodic = knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance);
228
+ const episodic = knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, confidenceConfig);
219
229
  allResults.push(...episodic);
220
230
  }
221
231
 
222
232
  if (searchTypes.includes('semantic')) {
223
233
  const { results: semResults, matchedIds: semIds } =
224
- knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant);
234
+ knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant, confidenceConfig);
225
235
  allResults.push(...semResults);
226
236
 
227
237
  if (semIds.length > 0) {
@@ -237,7 +247,7 @@ export async function* recallStream(db, embeddingProvider, query, options = {})
237
247
 
238
248
  if (searchTypes.includes('procedural')) {
239
249
  const { results: procResults, matchedIds: procIds } =
240
- knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant);
250
+ knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant, confidenceConfig);
241
251
  allResults.push(...procResults);
242
252
 
243
253
  if (procIds.length > 0) {