audrey 0.3.2 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +16 -13
- package/mcp-server/config.js +1 -1
- package/mcp-server/index.js +44 -2
- package/package.json +1 -1
- package/src/adaptive.js +53 -0
- package/src/audrey.js +62 -2
- package/src/consolidate.js +9 -0
- package/src/db.js +28 -5
- package/src/decay.js +3 -3
- package/src/embedding.js +38 -23
- package/src/export.js +59 -0
- package/src/import.js +116 -0
- package/src/index.js +3 -0
- package/src/llm.js +59 -35
- package/src/recall.js +25 -15
package/README.md
CHANGED
|
@@ -46,7 +46,7 @@ npx audrey status
|
|
|
46
46
|
npx audrey uninstall
|
|
47
47
|
```
|
|
48
48
|
|
|
49
|
-
Every Claude Code session now has
|
|
49
|
+
Every Claude Code session now has 7 memory tools: `memory_encode`, `memory_recall`, `memory_consolidate`, `memory_introspect`, `memory_resolve_truth`, `memory_export`, `memory_import`.
|
|
50
50
|
|
|
51
51
|
### SDK in Your Code
|
|
52
52
|
|
|
@@ -456,7 +456,7 @@ All mutations use SQLite transactions. CHECK constraints enforce valid states an
|
|
|
456
456
|
## Running Tests
|
|
457
457
|
|
|
458
458
|
```bash
|
|
459
|
-
npm test #
|
|
459
|
+
npm test # 208 tests across 17 files
|
|
460
460
|
npm run test:watch
|
|
461
461
|
```
|
|
462
462
|
|
|
@@ -522,7 +522,7 @@ Demonstrates the full pipeline: encode 3 rate-limit observations → consolidate
|
|
|
522
522
|
- [x] Automatic migration from v0.2.0 embedding BLOBs to vec0 tables
|
|
523
523
|
- [x] 168 tests across 16 test files
|
|
524
524
|
|
|
525
|
-
### v0.3.1 — MCP Server + JSDoc Types
|
|
525
|
+
### v0.3.1 — MCP Server + JSDoc Types
|
|
526
526
|
|
|
527
527
|
- [x] MCP tool server via `@modelcontextprotocol/sdk` with stdio transport
|
|
528
528
|
- [x] 5 tools: `memory_encode`, `memory_recall`, `memory_consolidate`, `memory_introspect`, `memory_resolve_truth`
|
|
@@ -533,6 +533,19 @@ Demonstrates the full pipeline: encode 3 rate-limit observations → consolidate
|
|
|
533
533
|
- [x] Published to npm with proper package metadata
|
|
534
534
|
- [x] 194 tests across 17 test files
|
|
535
535
|
|
|
536
|
+
### v0.5.0 — Feature Depth (current)
|
|
537
|
+
|
|
538
|
+
- [x] Configurable confidence weights per Audrey instance
|
|
539
|
+
- [x] Configurable decay rates (half-lives) per Audrey instance
|
|
540
|
+
- [x] Confidence config wired through constructor to recall and decay
|
|
541
|
+
- [x] Memory export (JSON snapshot of all tables, no raw embeddings)
|
|
542
|
+
- [x] Memory import with automatic re-embedding via current provider
|
|
543
|
+
- [x] `memory_export` and `memory_import` MCP tools (7 tools total)
|
|
544
|
+
- [x] Auto-consolidation scheduling (`startAutoConsolidate` / `stopAutoConsolidate`)
|
|
545
|
+
- [x] Consolidation metrics tracking (per-run params and results)
|
|
546
|
+
- [x] Adaptive consolidation parameter suggestions based on historical yield
|
|
547
|
+
- [x] 220+ tests across 20 test files
|
|
548
|
+
|
|
536
549
|
### v0.4.0 — Type Safety & Developer Experience
|
|
537
550
|
|
|
538
551
|
- [ ] Full TypeScript conversion with strict mode
|
|
@@ -545,16 +558,6 @@ Demonstrates the full pipeline: encode 3 rate-limit observations → consolidate
|
|
|
545
558
|
- [ ] Embedding migration pipeline (re-embed when models change)
|
|
546
559
|
- [ ] Re-consolidation queue (re-run consolidation with new embedding model)
|
|
547
560
|
|
|
548
|
-
### v0.5.0 — Advanced Memory Features
|
|
549
|
-
|
|
550
|
-
- [ ] Adaptive consolidation threshold (learn optimal N per domain, not fixed N=3)
|
|
551
|
-
- [ ] Source-aware confidence for semantic memories (track strongest source composition)
|
|
552
|
-
- [ ] Configurable decay rates per Audrey instance
|
|
553
|
-
- [ ] Configurable confidence weights per Audrey instance
|
|
554
|
-
- [ ] PII detection and redaction (opt-in)
|
|
555
|
-
- [ ] Memory export/import (JSON snapshot)
|
|
556
|
-
- [ ] Auto-consolidation scheduling (setInterval with configurable interval)
|
|
557
|
-
|
|
558
561
|
### v0.6.0 — Scale
|
|
559
562
|
|
|
560
563
|
- [ ] pgvector adapter for PostgreSQL backend
|
package/mcp-server/config.js
CHANGED
package/mcp-server/index.js
CHANGED
|
@@ -7,6 +7,7 @@ import { join } from 'node:path';
|
|
|
7
7
|
import { existsSync, readFileSync } from 'node:fs';
|
|
8
8
|
import { execFileSync } from 'node:child_process';
|
|
9
9
|
import { Audrey } from '../src/index.js';
|
|
10
|
+
import { readStoredDimensions } from '../src/db.js';
|
|
10
11
|
import { VERSION, SERVER_NAME, DEFAULT_DATA_DIR, buildAudreyConfig, buildInstallArgs } from './config.js';
|
|
11
12
|
|
|
12
13
|
const VALID_SOURCES = ['direct-observation', 'told-by-user', 'tool-result', 'inference', 'model-generated'];
|
|
@@ -64,12 +65,14 @@ function install() {
|
|
|
64
65
|
console.log(`
|
|
65
66
|
Audrey registered as "${SERVER_NAME}" with Claude Code.
|
|
66
67
|
|
|
67
|
-
|
|
68
|
+
7 tools available in every session:
|
|
68
69
|
memory_encode — Store observations, facts, preferences
|
|
69
70
|
memory_recall — Search memories by semantic similarity
|
|
70
71
|
memory_consolidate — Extract principles from accumulated episodes
|
|
71
72
|
memory_introspect — Check memory system health
|
|
72
73
|
memory_resolve_truth — Resolve contradictions between claims
|
|
74
|
+
memory_export — Export all memories as JSON snapshot
|
|
75
|
+
memory_import — Import a snapshot into a fresh database
|
|
73
76
|
|
|
74
77
|
Data stored in: ${DEFAULT_DATA_DIR}
|
|
75
78
|
Verify: claude mcp list
|
|
@@ -107,10 +110,11 @@ function status() {
|
|
|
107
110
|
|
|
108
111
|
if (existsSync(DEFAULT_DATA_DIR)) {
|
|
109
112
|
try {
|
|
113
|
+
const dimensions = readStoredDimensions(DEFAULT_DATA_DIR) || 8;
|
|
110
114
|
const audrey = new Audrey({
|
|
111
115
|
dataDir: DEFAULT_DATA_DIR,
|
|
112
116
|
agent: 'status-check',
|
|
113
|
-
embedding: { provider: 'mock', dimensions
|
|
117
|
+
embedding: { provider: 'mock', dimensions },
|
|
114
118
|
});
|
|
115
119
|
const stats = audrey.introspect();
|
|
116
120
|
audrey.close();
|
|
@@ -237,6 +241,44 @@ async function main() {
|
|
|
237
241
|
},
|
|
238
242
|
);
|
|
239
243
|
|
|
244
|
+
server.tool(
|
|
245
|
+
'memory_export',
|
|
246
|
+
{},
|
|
247
|
+
async () => {
|
|
248
|
+
try {
|
|
249
|
+
const snapshot = audrey.export();
|
|
250
|
+
return toolResult(snapshot);
|
|
251
|
+
} catch (err) {
|
|
252
|
+
return toolError(err);
|
|
253
|
+
}
|
|
254
|
+
},
|
|
255
|
+
);
|
|
256
|
+
|
|
257
|
+
server.tool(
|
|
258
|
+
'memory_import',
|
|
259
|
+
{
|
|
260
|
+
snapshot: z.object({
|
|
261
|
+
version: z.string(),
|
|
262
|
+
episodes: z.array(z.any()),
|
|
263
|
+
semantics: z.array(z.any()).optional(),
|
|
264
|
+
procedures: z.array(z.any()).optional(),
|
|
265
|
+
causalLinks: z.array(z.any()).optional(),
|
|
266
|
+
contradictions: z.array(z.any()).optional(),
|
|
267
|
+
consolidationRuns: z.array(z.any()).optional(),
|
|
268
|
+
config: z.record(z.string()).optional(),
|
|
269
|
+
}).passthrough().describe('A snapshot from memory_export'),
|
|
270
|
+
},
|
|
271
|
+
async ({ snapshot }) => {
|
|
272
|
+
try {
|
|
273
|
+
await audrey.import(snapshot);
|
|
274
|
+
const stats = audrey.introspect();
|
|
275
|
+
return toolResult({ imported: true, stats });
|
|
276
|
+
} catch (err) {
|
|
277
|
+
return toolError(err);
|
|
278
|
+
}
|
|
279
|
+
},
|
|
280
|
+
);
|
|
281
|
+
|
|
240
282
|
const transport = new StdioServerTransport();
|
|
241
283
|
await server.connect(transport);
|
|
242
284
|
console.error('[audrey-mcp] connected via stdio');
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "audrey",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.5.0",
|
|
4
4
|
"description": "Biological memory architecture for AI agents — encode, consolidate, and recall memories with confidence decay, contradiction detection, and causal graphs",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "src/index.js",
|
package/src/adaptive.js
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
export function suggestConsolidationParams(db) {
|
|
2
|
+
const runs = db.prepare(`
|
|
3
|
+
SELECT min_cluster_size, similarity_threshold, clusters_found, principles_extracted, episodes_evaluated
|
|
4
|
+
FROM consolidation_metrics
|
|
5
|
+
ORDER BY created_at DESC
|
|
6
|
+
LIMIT 20
|
|
7
|
+
`).all();
|
|
8
|
+
|
|
9
|
+
if (runs.length === 0) {
|
|
10
|
+
return {
|
|
11
|
+
minClusterSize: 3,
|
|
12
|
+
similarityThreshold: 0.85,
|
|
13
|
+
confidence: 'no_data',
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
const paramScores = new Map();
|
|
18
|
+
for (const run of runs) {
|
|
19
|
+
if (run.episodes_evaluated === 0) continue;
|
|
20
|
+
const key = `${run.min_cluster_size}:${run.similarity_threshold}`;
|
|
21
|
+
if (!paramScores.has(key)) {
|
|
22
|
+
paramScores.set(key, {
|
|
23
|
+
minClusterSize: run.min_cluster_size,
|
|
24
|
+
similarityThreshold: run.similarity_threshold,
|
|
25
|
+
yields: [],
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
paramScores.get(key).yields.push(run.principles_extracted / run.episodes_evaluated);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
let bestKey = null;
|
|
32
|
+
let bestAvgYield = -1;
|
|
33
|
+
for (const [key, data] of paramScores) {
|
|
34
|
+
const avg = data.yields.reduce((a, b) => a + b, 0) / data.yields.length;
|
|
35
|
+
if (avg > bestAvgYield) {
|
|
36
|
+
bestAvgYield = avg;
|
|
37
|
+
bestKey = key;
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
if (!bestKey) {
|
|
42
|
+
return { minClusterSize: 3, similarityThreshold: 0.85, confidence: 'no_data' };
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
const best = paramScores.get(bestKey);
|
|
46
|
+
const confidence = runs.length >= 5 ? 'high' : runs.length >= 2 ? 'medium' : 'low';
|
|
47
|
+
|
|
48
|
+
return {
|
|
49
|
+
minClusterSize: best.minClusterSize,
|
|
50
|
+
similarityThreshold: best.similarityThreshold,
|
|
51
|
+
confidence,
|
|
52
|
+
};
|
|
53
|
+
}
|
package/src/audrey.js
CHANGED
|
@@ -10,6 +10,9 @@ import { applyDecay } from './decay.js';
|
|
|
10
10
|
import { rollbackConsolidation, getConsolidationHistory } from './rollback.js';
|
|
11
11
|
import { introspect as introspectFn } from './introspect.js';
|
|
12
12
|
import { buildContextResolutionPrompt } from './prompts.js';
|
|
13
|
+
import { exportMemories } from './export.js';
|
|
14
|
+
import { importMemories } from './import.js';
|
|
15
|
+
import { suggestConsolidationParams as suggestParamsFn } from './adaptive.js';
|
|
13
16
|
|
|
14
17
|
/**
|
|
15
18
|
* @typedef {'direct-observation' | 'told-by-user' | 'tool-result' | 'inference' | 'model-generated'} SourceType
|
|
@@ -77,19 +80,37 @@ export class Audrey extends EventEmitter {
|
|
|
77
80
|
agent = 'default',
|
|
78
81
|
embedding = { provider: 'mock', dimensions: 64 },
|
|
79
82
|
llm,
|
|
83
|
+
confidence = {},
|
|
80
84
|
consolidation = {},
|
|
81
85
|
decay = {},
|
|
82
86
|
} = {}) {
|
|
83
87
|
super();
|
|
88
|
+
|
|
89
|
+
const dormantThreshold = decay.dormantThreshold ?? 0.1;
|
|
90
|
+
if (dormantThreshold < 0 || dormantThreshold > 1) {
|
|
91
|
+
throw new Error(`dormantThreshold must be between 0 and 1, got: ${dormantThreshold}`);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
const minEpisodes = consolidation.minEpisodes ?? 3;
|
|
95
|
+
if (!Number.isInteger(minEpisodes) || minEpisodes < 1) {
|
|
96
|
+
throw new Error(`minEpisodes must be a positive integer, got: ${minEpisodes}`);
|
|
97
|
+
}
|
|
98
|
+
|
|
84
99
|
this.agent = agent;
|
|
85
100
|
this.dataDir = dataDir;
|
|
86
101
|
this.embeddingProvider = createEmbeddingProvider(embedding);
|
|
87
102
|
this.db = createDatabase(dataDir, { dimensions: this.embeddingProvider.dimensions });
|
|
88
103
|
this.llmProvider = llm ? createLLMProvider(llm) : null;
|
|
104
|
+
this.confidenceConfig = {
|
|
105
|
+
weights: confidence.weights,
|
|
106
|
+
halfLives: confidence.halfLives,
|
|
107
|
+
sourceReliability: confidence.sourceReliability,
|
|
108
|
+
};
|
|
89
109
|
this.consolidationConfig = {
|
|
90
110
|
minEpisodes: consolidation.minEpisodes || 3,
|
|
91
111
|
};
|
|
92
112
|
this.decayConfig = { dormantThreshold: decay.dormantThreshold || 0.1 };
|
|
113
|
+
this._autoConsolidateTimer = null;
|
|
93
114
|
}
|
|
94
115
|
|
|
95
116
|
_emitValidation(id, params) {
|
|
@@ -152,7 +173,10 @@ export class Audrey extends EventEmitter {
|
|
|
152
173
|
* @returns {Promise<RecallResult[]>}
|
|
153
174
|
*/
|
|
154
175
|
recall(query, options = {}) {
|
|
155
|
-
return recallFn(this.db, this.embeddingProvider, query,
|
|
176
|
+
return recallFn(this.db, this.embeddingProvider, query, {
|
|
177
|
+
...options,
|
|
178
|
+
confidenceConfig: options.confidenceConfig ?? this.confidenceConfig,
|
|
179
|
+
});
|
|
156
180
|
}
|
|
157
181
|
|
|
158
182
|
/**
|
|
@@ -161,7 +185,10 @@ export class Audrey extends EventEmitter {
|
|
|
161
185
|
* @returns {AsyncGenerator<RecallResult>}
|
|
162
186
|
*/
|
|
163
187
|
async *recallStream(query, options = {}) {
|
|
164
|
-
yield* recallStreamFn(this.db, this.embeddingProvider, query,
|
|
188
|
+
yield* recallStreamFn(this.db, this.embeddingProvider, query, {
|
|
189
|
+
...options,
|
|
190
|
+
confidenceConfig: options.confidenceConfig ?? this.confidenceConfig,
|
|
191
|
+
});
|
|
165
192
|
}
|
|
166
193
|
|
|
167
194
|
/**
|
|
@@ -188,6 +215,7 @@ export class Audrey extends EventEmitter {
|
|
|
188
215
|
decay(options = {}) {
|
|
189
216
|
const result = applyDecay(this.db, {
|
|
190
217
|
dormantThreshold: options.dormantThreshold || this.decayConfig.dormantThreshold,
|
|
218
|
+
halfLives: options.halfLives ?? this.confidenceConfig.halfLives,
|
|
191
219
|
});
|
|
192
220
|
this.emit('decay', result);
|
|
193
221
|
return result;
|
|
@@ -269,8 +297,40 @@ export class Audrey extends EventEmitter {
|
|
|
269
297
|
return introspectFn(this.db);
|
|
270
298
|
}
|
|
271
299
|
|
|
300
|
+
export() {
|
|
301
|
+
return exportMemories(this.db);
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
async import(snapshot) {
|
|
305
|
+
return importMemories(this.db, this.embeddingProvider, snapshot);
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
startAutoConsolidate(intervalMs, options = {}) {
|
|
309
|
+
if (intervalMs < 1000) {
|
|
310
|
+
throw new Error('Auto-consolidation interval must be at least 1000ms');
|
|
311
|
+
}
|
|
312
|
+
if (this._autoConsolidateTimer) {
|
|
313
|
+
throw new Error('Auto-consolidation is already running');
|
|
314
|
+
}
|
|
315
|
+
this._autoConsolidateTimer = setInterval(() => {
|
|
316
|
+
this.consolidate(options).catch(err => this.emit('error', err));
|
|
317
|
+
}, intervalMs);
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
stopAutoConsolidate() {
|
|
321
|
+
if (this._autoConsolidateTimer) {
|
|
322
|
+
clearInterval(this._autoConsolidateTimer);
|
|
323
|
+
this._autoConsolidateTimer = null;
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
suggestConsolidationParams() {
|
|
328
|
+
return suggestParamsFn(this.db);
|
|
329
|
+
}
|
|
330
|
+
|
|
272
331
|
/** @returns {void} */
|
|
273
332
|
close() {
|
|
333
|
+
this.stopAutoConsolidate();
|
|
274
334
|
closeDatabase(this.db);
|
|
275
335
|
}
|
|
276
336
|
}
|
package/src/consolidate.js
CHANGED
|
@@ -213,6 +213,15 @@ export async function runConsolidation(db, embeddingProvider, options = {}) {
|
|
|
213
213
|
|
|
214
214
|
promoteAll();
|
|
215
215
|
|
|
216
|
+
db.prepare(`
|
|
217
|
+
INSERT INTO consolidation_metrics (id, run_id, min_cluster_size, similarity_threshold,
|
|
218
|
+
episodes_evaluated, clusters_found, principles_extracted, created_at)
|
|
219
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
220
|
+
`).run(
|
|
221
|
+
generateId(), runId, minClusterSize, similarityThreshold,
|
|
222
|
+
episodesEvaluated, clusters.length, principlesExtracted, new Date().toISOString(),
|
|
223
|
+
);
|
|
224
|
+
|
|
216
225
|
return {
|
|
217
226
|
runId,
|
|
218
227
|
episodesEvaluated,
|
package/src/db.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import Database from 'better-sqlite3';
|
|
2
2
|
import * as sqliteVec from 'sqlite-vec';
|
|
3
3
|
import { join } from 'node:path';
|
|
4
|
-
import { mkdirSync } from 'node:fs';
|
|
4
|
+
import { mkdirSync, existsSync } from 'node:fs';
|
|
5
5
|
|
|
6
6
|
const SCHEMA = `
|
|
7
7
|
CREATE TABLE IF NOT EXISTS episodes (
|
|
@@ -104,6 +104,18 @@ const SCHEMA = `
|
|
|
104
104
|
value TEXT NOT NULL
|
|
105
105
|
);
|
|
106
106
|
|
|
107
|
+
CREATE TABLE IF NOT EXISTS consolidation_metrics (
|
|
108
|
+
id TEXT PRIMARY KEY,
|
|
109
|
+
run_id TEXT NOT NULL,
|
|
110
|
+
min_cluster_size INTEGER NOT NULL,
|
|
111
|
+
similarity_threshold REAL NOT NULL,
|
|
112
|
+
episodes_evaluated INTEGER NOT NULL,
|
|
113
|
+
clusters_found INTEGER NOT NULL,
|
|
114
|
+
principles_extracted INTEGER NOT NULL,
|
|
115
|
+
created_at TEXT NOT NULL,
|
|
116
|
+
FOREIGN KEY (run_id) REFERENCES consolidation_runs(id)
|
|
117
|
+
);
|
|
118
|
+
|
|
107
119
|
CREATE INDEX IF NOT EXISTS idx_episodes_created ON episodes(created_at);
|
|
108
120
|
CREATE INDEX IF NOT EXISTS idx_episodes_consolidated ON episodes(consolidated);
|
|
109
121
|
CREATE INDEX IF NOT EXISTS idx_episodes_source ON episodes(source);
|
|
@@ -232,10 +244,21 @@ export function createDatabase(dataDir, options = {}) {
|
|
|
232
244
|
return db;
|
|
233
245
|
}
|
|
234
246
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
247
|
+
export function readStoredDimensions(dataDir) {
|
|
248
|
+
const dbPath = join(dataDir, 'audrey.db');
|
|
249
|
+
if (!existsSync(dbPath)) return null;
|
|
250
|
+
const db = new Database(dbPath, { readonly: true });
|
|
251
|
+
try {
|
|
252
|
+
const row = db.prepare("SELECT value FROM audrey_config WHERE key = 'dimensions'").get();
|
|
253
|
+
return row ? parseInt(row.value, 10) : null;
|
|
254
|
+
} catch (err) {
|
|
255
|
+
if (err.message?.includes('no such table')) return null;
|
|
256
|
+
throw err;
|
|
257
|
+
} finally {
|
|
258
|
+
db.close();
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
239
262
|
export function closeDatabase(db) {
|
|
240
263
|
if (db && db.open) {
|
|
241
264
|
db.close();
|
package/src/decay.js
CHANGED
|
@@ -6,7 +6,7 @@ import { daysBetween } from './utils.js';
|
|
|
6
6
|
* @param {{ dormantThreshold?: number }} [options]
|
|
7
7
|
* @returns {{ totalEvaluated: number, transitionedToDormant: number, timestamp: string }}
|
|
8
8
|
*/
|
|
9
|
-
export function applyDecay(db, { dormantThreshold = 0.1 } = {}) {
|
|
9
|
+
export function applyDecay(db, { dormantThreshold = 0.1, halfLives } = {}) {
|
|
10
10
|
const now = new Date();
|
|
11
11
|
let totalEvaluated = 0;
|
|
12
12
|
let transitionedToDormant = 0;
|
|
@@ -31,7 +31,7 @@ export function applyDecay(db, { dormantThreshold = 0.1 } = {}) {
|
|
|
31
31
|
supportingCount: sem.supporting_count || 0,
|
|
32
32
|
contradictingCount: sem.contradicting_count || 0,
|
|
33
33
|
ageDays,
|
|
34
|
-
halfLifeDays: DEFAULT_HALF_LIVES.semantic,
|
|
34
|
+
halfLifeDays: halfLives?.semantic ?? DEFAULT_HALF_LIVES.semantic,
|
|
35
35
|
retrievalCount: sem.retrieval_count || 0,
|
|
36
36
|
daysSinceRetrieval,
|
|
37
37
|
});
|
|
@@ -62,7 +62,7 @@ export function applyDecay(db, { dormantThreshold = 0.1 } = {}) {
|
|
|
62
62
|
supportingCount: proc.success_count || 0,
|
|
63
63
|
contradictingCount: proc.failure_count || 0,
|
|
64
64
|
ageDays,
|
|
65
|
-
halfLifeDays: DEFAULT_HALF_LIVES.procedural,
|
|
65
|
+
halfLifeDays: halfLives?.procedural ?? DEFAULT_HALF_LIVES.procedural,
|
|
66
66
|
retrievalCount: proc.retrieval_count || 0,
|
|
67
67
|
daysSinceRetrieval,
|
|
68
68
|
});
|
package/src/embedding.js
CHANGED
|
@@ -76,10 +76,11 @@ export class MockEmbeddingProvider {
|
|
|
76
76
|
/** @implements {EmbeddingProvider} */
|
|
77
77
|
export class OpenAIEmbeddingProvider {
|
|
78
78
|
/** @param {Partial<OpenAIEmbeddingConfig>} [config={}] */
|
|
79
|
-
constructor({ apiKey, model = 'text-embedding-3-small', dimensions = 1536 } = {}) {
|
|
79
|
+
constructor({ apiKey, model = 'text-embedding-3-small', dimensions = 1536, timeout = 30000 } = {}) {
|
|
80
80
|
this.apiKey = apiKey || process.env.OPENAI_API_KEY;
|
|
81
81
|
this.model = model;
|
|
82
82
|
this.dimensions = dimensions;
|
|
83
|
+
this.timeout = timeout;
|
|
83
84
|
this.modelName = model;
|
|
84
85
|
this.modelVersion = 'latest';
|
|
85
86
|
}
|
|
@@ -89,17 +90,24 @@ export class OpenAIEmbeddingProvider {
|
|
|
89
90
|
* @returns {Promise<number[]>}
|
|
90
91
|
*/
|
|
91
92
|
async embed(text) {
|
|
92
|
-
const
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
93
|
+
const controller = new AbortController();
|
|
94
|
+
const timer = setTimeout(() => controller.abort(), this.timeout);
|
|
95
|
+
try {
|
|
96
|
+
const response = await fetch('https://api.openai.com/v1/embeddings', {
|
|
97
|
+
method: 'POST',
|
|
98
|
+
headers: {
|
|
99
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
100
|
+
'Content-Type': 'application/json',
|
|
101
|
+
},
|
|
102
|
+
body: JSON.stringify({ input: text, model: this.model, dimensions: this.dimensions }),
|
|
103
|
+
signal: controller.signal,
|
|
104
|
+
});
|
|
105
|
+
if (!response.ok) throw new Error(`OpenAI embedding failed: ${response.status}`);
|
|
106
|
+
const data = await response.json();
|
|
107
|
+
return data.data[0].embedding;
|
|
108
|
+
} finally {
|
|
109
|
+
clearTimeout(timer);
|
|
110
|
+
}
|
|
103
111
|
}
|
|
104
112
|
|
|
105
113
|
/**
|
|
@@ -107,17 +115,24 @@ export class OpenAIEmbeddingProvider {
|
|
|
107
115
|
* @returns {Promise<number[][]>}
|
|
108
116
|
*/
|
|
109
117
|
async embedBatch(texts) {
|
|
110
|
-
const
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
118
|
+
const controller = new AbortController();
|
|
119
|
+
const timer = setTimeout(() => controller.abort(), this.timeout);
|
|
120
|
+
try {
|
|
121
|
+
const response = await fetch('https://api.openai.com/v1/embeddings', {
|
|
122
|
+
method: 'POST',
|
|
123
|
+
headers: {
|
|
124
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
125
|
+
'Content-Type': 'application/json',
|
|
126
|
+
},
|
|
127
|
+
body: JSON.stringify({ input: texts, model: this.model, dimensions: this.dimensions }),
|
|
128
|
+
signal: controller.signal,
|
|
129
|
+
});
|
|
130
|
+
if (!response.ok) throw new Error(`OpenAI embedding failed: ${response.status}`);
|
|
131
|
+
const data = await response.json();
|
|
132
|
+
return data.data.map(d => d.embedding);
|
|
133
|
+
} finally {
|
|
134
|
+
clearTimeout(timer);
|
|
135
|
+
}
|
|
121
136
|
}
|
|
122
137
|
|
|
123
138
|
/**
|
package/src/export.js
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { readFileSync } from 'node:fs';
|
|
2
|
+
import { fileURLToPath } from 'node:url';
|
|
3
|
+
import { join, dirname } from 'node:path';
|
|
4
|
+
import { safeJsonParse } from './utils.js';
|
|
5
|
+
|
|
6
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
7
|
+
const pkg = JSON.parse(readFileSync(join(__dirname, '../package.json'), 'utf-8'));
|
|
8
|
+
|
|
9
|
+
export function exportMemories(db) {
|
|
10
|
+
const episodes = db.prepare(
|
|
11
|
+
'SELECT id, content, source, source_reliability, salience, tags, causal_trigger, causal_consequence, created_at, supersedes, superseded_by, consolidated FROM episodes'
|
|
12
|
+
).all().map(ep => ({
|
|
13
|
+
...ep,
|
|
14
|
+
tags: safeJsonParse(ep.tags, null),
|
|
15
|
+
}));
|
|
16
|
+
|
|
17
|
+
const semantics = db.prepare(
|
|
18
|
+
'SELECT id, content, state, conditions, evidence_episode_ids, evidence_count, supporting_count, contradicting_count, source_type_diversity, consolidation_checkpoint, created_at, last_reinforced_at, retrieval_count, challenge_count FROM semantics'
|
|
19
|
+
).all().map(sem => ({
|
|
20
|
+
...sem,
|
|
21
|
+
evidence_episode_ids: safeJsonParse(sem.evidence_episode_ids, []),
|
|
22
|
+
}));
|
|
23
|
+
|
|
24
|
+
const procedures = db.prepare(
|
|
25
|
+
'SELECT id, content, state, trigger_conditions, evidence_episode_ids, success_count, failure_count, created_at, last_reinforced_at, retrieval_count FROM procedures'
|
|
26
|
+
).all().map(proc => ({
|
|
27
|
+
...proc,
|
|
28
|
+
evidence_episode_ids: safeJsonParse(proc.evidence_episode_ids, []),
|
|
29
|
+
}));
|
|
30
|
+
|
|
31
|
+
const causalLinks = db.prepare('SELECT * FROM causal_links').all();
|
|
32
|
+
|
|
33
|
+
const contradictions = db.prepare(
|
|
34
|
+
'SELECT id, claim_a_id, claim_a_type, claim_b_id, claim_b_type, state, resolution, resolved_at, reopened_at, reopen_evidence_id, created_at FROM contradictions'
|
|
35
|
+
).all();
|
|
36
|
+
|
|
37
|
+
const consolidationRuns = db.prepare(
|
|
38
|
+
'SELECT id, input_episode_ids, output_memory_ids, started_at, completed_at, status FROM consolidation_runs'
|
|
39
|
+
).all().map(run => ({
|
|
40
|
+
...run,
|
|
41
|
+
input_episode_ids: safeJsonParse(run.input_episode_ids, []),
|
|
42
|
+
output_memory_ids: safeJsonParse(run.output_memory_ids, []),
|
|
43
|
+
}));
|
|
44
|
+
|
|
45
|
+
const configRows = db.prepare('SELECT key, value FROM audrey_config').all();
|
|
46
|
+
const config = Object.fromEntries(configRows.map(r => [r.key, r.value]));
|
|
47
|
+
|
|
48
|
+
return {
|
|
49
|
+
version: pkg.version,
|
|
50
|
+
exportedAt: new Date().toISOString(),
|
|
51
|
+
episodes,
|
|
52
|
+
semantics,
|
|
53
|
+
procedures,
|
|
54
|
+
causalLinks,
|
|
55
|
+
contradictions,
|
|
56
|
+
consolidationRuns,
|
|
57
|
+
config,
|
|
58
|
+
};
|
|
59
|
+
}
|
package/src/import.js
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
export async function importMemories(db, embeddingProvider, snapshot) {
|
|
2
|
+
const existingEpisodes = db.prepare('SELECT COUNT(*) as c FROM episodes').get().c;
|
|
3
|
+
if (existingEpisodes > 0) {
|
|
4
|
+
throw new Error('Cannot import into a database that is not empty');
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
const insertEpisode = db.prepare(`
|
|
8
|
+
INSERT INTO episodes (id, content, source, source_reliability, salience, tags,
|
|
9
|
+
causal_trigger, causal_consequence, created_at, supersedes, superseded_by, consolidated)
|
|
10
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
11
|
+
`);
|
|
12
|
+
|
|
13
|
+
const insertVecEpisode = db.prepare(
|
|
14
|
+
'INSERT INTO vec_episodes(id, embedding, source, consolidated) VALUES (?, ?, ?, ?)'
|
|
15
|
+
);
|
|
16
|
+
|
|
17
|
+
const insertSemantic = db.prepare(`
|
|
18
|
+
INSERT INTO semantics (id, content, state, conditions, evidence_episode_ids,
|
|
19
|
+
evidence_count, supporting_count, contradicting_count, source_type_diversity,
|
|
20
|
+
consolidation_checkpoint, created_at, last_reinforced_at, retrieval_count, challenge_count)
|
|
21
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
22
|
+
`);
|
|
23
|
+
|
|
24
|
+
const insertVecSemantic = db.prepare(
|
|
25
|
+
'INSERT INTO vec_semantics(id, embedding, state) VALUES (?, ?, ?)'
|
|
26
|
+
);
|
|
27
|
+
|
|
28
|
+
const insertProcedure = db.prepare(`
|
|
29
|
+
INSERT INTO procedures (id, content, state, trigger_conditions, evidence_episode_ids,
|
|
30
|
+
success_count, failure_count, created_at, last_reinforced_at, retrieval_count)
|
|
31
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
32
|
+
`);
|
|
33
|
+
|
|
34
|
+
const insertVecProcedure = db.prepare(
|
|
35
|
+
'INSERT INTO vec_procedures(id, embedding, state) VALUES (?, ?, ?)'
|
|
36
|
+
);
|
|
37
|
+
|
|
38
|
+
const insertCausalLink = db.prepare(`
|
|
39
|
+
INSERT INTO causal_links (id, cause_id, effect_id, link_type, mechanism, confidence, evidence_count, created_at)
|
|
40
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
41
|
+
`);
|
|
42
|
+
|
|
43
|
+
const insertContradiction = db.prepare(`
|
|
44
|
+
INSERT INTO contradictions (id, claim_a_id, claim_a_type, claim_b_id, claim_b_type,
|
|
45
|
+
state, resolution, resolved_at, reopened_at, reopen_evidence_id, created_at)
|
|
46
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
47
|
+
`);
|
|
48
|
+
|
|
49
|
+
const insertConsolidationRun = db.prepare(`
|
|
50
|
+
INSERT INTO consolidation_runs (id, input_episode_ids, output_memory_ids, started_at, completed_at, status)
|
|
51
|
+
VALUES (?, ?, ?, ?, ?, ?)
|
|
52
|
+
`);
|
|
53
|
+
|
|
54
|
+
for (const ep of snapshot.episodes) {
|
|
55
|
+
const tags = ep.tags ? JSON.stringify(ep.tags) : null;
|
|
56
|
+
insertEpisode.run(
|
|
57
|
+
ep.id, ep.content, ep.source, ep.source_reliability, ep.salience ?? 0.5,
|
|
58
|
+
tags, ep.causal_trigger ?? null, ep.causal_consequence ?? null,
|
|
59
|
+
ep.created_at, ep.supersedes ?? null, ep.superseded_by ?? null, ep.consolidated ?? 0,
|
|
60
|
+
);
|
|
61
|
+
|
|
62
|
+
const vector = await embeddingProvider.embed(ep.content);
|
|
63
|
+
const buffer = embeddingProvider.vectorToBuffer(vector);
|
|
64
|
+
insertVecEpisode.run(ep.id, buffer, ep.source, BigInt(ep.consolidated ?? 0));
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
for (const sem of (snapshot.semantics || [])) {
|
|
68
|
+
insertSemantic.run(
|
|
69
|
+
sem.id, sem.content, sem.state, sem.conditions ?? null,
|
|
70
|
+
JSON.stringify(sem.evidence_episode_ids || []),
|
|
71
|
+
sem.evidence_count ?? 0, sem.supporting_count ?? 0, sem.contradicting_count ?? 0,
|
|
72
|
+
sem.source_type_diversity ?? 0, sem.consolidation_checkpoint ?? null,
|
|
73
|
+
sem.created_at, sem.last_reinforced_at ?? null, sem.retrieval_count ?? 0, sem.challenge_count ?? 0,
|
|
74
|
+
);
|
|
75
|
+
|
|
76
|
+
const vector = await embeddingProvider.embed(sem.content);
|
|
77
|
+
const buffer = embeddingProvider.vectorToBuffer(vector);
|
|
78
|
+
insertVecSemantic.run(sem.id, buffer, sem.state);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
for (const proc of (snapshot.procedures || [])) {
|
|
82
|
+
insertProcedure.run(
|
|
83
|
+
proc.id, proc.content, proc.state, proc.trigger_conditions ?? null,
|
|
84
|
+
JSON.stringify(proc.evidence_episode_ids || []),
|
|
85
|
+
proc.success_count ?? 0, proc.failure_count ?? 0,
|
|
86
|
+
proc.created_at, proc.last_reinforced_at ?? null, proc.retrieval_count ?? 0,
|
|
87
|
+
);
|
|
88
|
+
|
|
89
|
+
const vector = await embeddingProvider.embed(proc.content);
|
|
90
|
+
const buffer = embeddingProvider.vectorToBuffer(vector);
|
|
91
|
+
insertVecProcedure.run(proc.id, buffer, proc.state);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
for (const link of (snapshot.causalLinks || [])) {
|
|
95
|
+
insertCausalLink.run(
|
|
96
|
+
link.id, link.cause_id, link.effect_id, link.link_type ?? 'causal',
|
|
97
|
+
link.mechanism ?? null, link.confidence ?? null, link.evidence_count ?? 1, link.created_at,
|
|
98
|
+
);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
for (const con of (snapshot.contradictions || [])) {
|
|
102
|
+
insertContradiction.run(
|
|
103
|
+
con.id, con.claim_a_id, con.claim_a_type, con.claim_b_id, con.claim_b_type,
|
|
104
|
+
con.state, con.resolution ?? null, con.resolved_at ?? null,
|
|
105
|
+
con.reopened_at ?? null, con.reopen_evidence_id ?? null, con.created_at,
|
|
106
|
+
);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
for (const run of (snapshot.consolidationRuns || [])) {
|
|
110
|
+
insertConsolidationRun.run(
|
|
111
|
+
run.id, JSON.stringify(run.input_episode_ids || []),
|
|
112
|
+
JSON.stringify(run.output_memory_ids || []),
|
|
113
|
+
run.started_at ?? null, run.completed_at ?? null, run.status,
|
|
114
|
+
);
|
|
115
|
+
}
|
|
116
|
+
}
|
package/src/index.js
CHANGED
package/src/llm.js
CHANGED
|
@@ -92,10 +92,11 @@ export class MockLLMProvider {
|
|
|
92
92
|
/** @implements {LLMProvider} */
|
|
93
93
|
export class AnthropicLLMProvider {
|
|
94
94
|
/** @param {Partial<AnthropicLLMConfig>} [config={}] */
|
|
95
|
-
constructor({ apiKey, model = 'claude-sonnet-4-6', maxTokens = 1024 } = {}) {
|
|
95
|
+
constructor({ apiKey, model = 'claude-sonnet-4-6', maxTokens = 1024, timeout = 30000 } = {}) {
|
|
96
96
|
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
|
|
97
97
|
this.model = model;
|
|
98
98
|
this.maxTokens = maxTokens;
|
|
99
|
+
this.timeout = timeout;
|
|
99
100
|
this.modelName = model;
|
|
100
101
|
this.modelVersion = 'latest';
|
|
101
102
|
}
|
|
@@ -116,23 +117,30 @@ export class AnthropicLLMProvider {
|
|
|
116
117
|
};
|
|
117
118
|
if (systemMsg) body.system = systemMsg;
|
|
118
119
|
|
|
119
|
-
const
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
120
|
+
const controller = new AbortController();
|
|
121
|
+
const timer = setTimeout(() => controller.abort(), this.timeout);
|
|
122
|
+
try {
|
|
123
|
+
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
|
124
|
+
method: 'POST',
|
|
125
|
+
headers: {
|
|
126
|
+
'x-api-key': this.apiKey,
|
|
127
|
+
'anthropic-version': '2023-06-01',
|
|
128
|
+
'content-type': 'application/json',
|
|
129
|
+
},
|
|
130
|
+
body: JSON.stringify(body),
|
|
131
|
+
signal: controller.signal,
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
if (!response.ok) {
|
|
135
|
+
throw new Error(`Anthropic API error: ${response.status}`);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
const data = await response.json();
|
|
139
|
+
const text = data.content?.[0]?.text || '';
|
|
140
|
+
return { content: text };
|
|
141
|
+
} finally {
|
|
142
|
+
clearTimeout(timer);
|
|
131
143
|
}
|
|
132
|
-
|
|
133
|
-
const data = await response.json();
|
|
134
|
-
const text = data.content?.[0]?.text || '';
|
|
135
|
-
return { content: text };
|
|
136
144
|
}
|
|
137
145
|
|
|
138
146
|
/**
|
|
@@ -142,17 +150,22 @@ export class AnthropicLLMProvider {
|
|
|
142
150
|
*/
|
|
143
151
|
async json(messages, options = {}) {
|
|
144
152
|
const result = await this.complete(messages, options);
|
|
145
|
-
|
|
153
|
+
try {
|
|
154
|
+
return JSON.parse(result.content);
|
|
155
|
+
} catch {
|
|
156
|
+
throw new Error(`Failed to parse LLM response as JSON: ${result.content.slice(0, 200)}`);
|
|
157
|
+
}
|
|
146
158
|
}
|
|
147
159
|
}
|
|
148
160
|
|
|
149
161
|
/** @implements {LLMProvider} */
|
|
150
162
|
export class OpenAILLMProvider {
|
|
151
163
|
/** @param {Partial<OpenAILLMConfig>} [config={}] */
|
|
152
|
-
constructor({ apiKey, model = 'gpt-4o', maxTokens = 1024 } = {}) {
|
|
164
|
+
constructor({ apiKey, model = 'gpt-4o', maxTokens = 1024, timeout = 30000 } = {}) {
|
|
153
165
|
this.apiKey = apiKey || process.env.OPENAI_API_KEY;
|
|
154
166
|
this.model = model;
|
|
155
167
|
this.maxTokens = maxTokens;
|
|
168
|
+
this.timeout = timeout;
|
|
156
169
|
this.modelName = model;
|
|
157
170
|
this.modelVersion = 'latest';
|
|
158
171
|
}
|
|
@@ -169,22 +182,29 @@ export class OpenAILLMProvider {
|
|
|
169
182
|
messages,
|
|
170
183
|
};
|
|
171
184
|
|
|
172
|
-
const
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
185
|
+
const controller = new AbortController();
|
|
186
|
+
const timer = setTimeout(() => controller.abort(), this.timeout);
|
|
187
|
+
try {
|
|
188
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
189
|
+
method: 'POST',
|
|
190
|
+
headers: {
|
|
191
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
192
|
+
'Content-Type': 'application/json',
|
|
193
|
+
},
|
|
194
|
+
body: JSON.stringify(body),
|
|
195
|
+
signal: controller.signal,
|
|
196
|
+
});
|
|
197
|
+
|
|
198
|
+
if (!response.ok) {
|
|
199
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
const data = await response.json();
|
|
203
|
+
const text = data.choices?.[0]?.message?.content || '';
|
|
204
|
+
return { content: text };
|
|
205
|
+
} finally {
|
|
206
|
+
clearTimeout(timer);
|
|
183
207
|
}
|
|
184
|
-
|
|
185
|
-
const data = await response.json();
|
|
186
|
-
const text = data.choices?.[0]?.message?.content || '';
|
|
187
|
-
return { content: text };
|
|
188
208
|
}
|
|
189
209
|
|
|
190
210
|
/**
|
|
@@ -194,7 +214,11 @@ export class OpenAILLMProvider {
|
|
|
194
214
|
*/
|
|
195
215
|
async json(messages, options = {}) {
|
|
196
216
|
const result = await this.complete(messages, options);
|
|
197
|
-
|
|
217
|
+
try {
|
|
218
|
+
return JSON.parse(result.content);
|
|
219
|
+
} catch {
|
|
220
|
+
throw new Error(`Failed to parse LLM response as JSON: ${result.content.slice(0, 200)}`);
|
|
221
|
+
}
|
|
198
222
|
}
|
|
199
223
|
}
|
|
200
224
|
|
package/src/recall.js
CHANGED
|
@@ -1,48 +1,57 @@
|
|
|
1
1
|
import { computeConfidence, DEFAULT_HALF_LIVES } from './confidence.js';
|
|
2
2
|
import { daysBetween, safeJsonParse } from './utils.js';
|
|
3
3
|
|
|
4
|
-
function computeEpisodicConfidence(ep, now) {
|
|
4
|
+
function computeEpisodicConfidence(ep, now, confidenceConfig = {}) {
|
|
5
5
|
const ageDays = daysBetween(ep.created_at, now);
|
|
6
|
+
const halfLives = confidenceConfig.halfLives || DEFAULT_HALF_LIVES;
|
|
6
7
|
return computeConfidence({
|
|
7
8
|
sourceType: ep.source,
|
|
8
9
|
supportingCount: 1,
|
|
9
10
|
contradictingCount: 0,
|
|
10
11
|
ageDays,
|
|
11
|
-
halfLifeDays: DEFAULT_HALF_LIVES.episodic,
|
|
12
|
+
halfLifeDays: halfLives.episodic ?? DEFAULT_HALF_LIVES.episodic,
|
|
12
13
|
retrievalCount: 0,
|
|
13
14
|
daysSinceRetrieval: ageDays,
|
|
15
|
+
weights: confidenceConfig.weights,
|
|
16
|
+
customSourceReliability: confidenceConfig.sourceReliability,
|
|
14
17
|
});
|
|
15
18
|
}
|
|
16
19
|
|
|
17
|
-
function computeSemanticConfidence(sem, now) {
|
|
20
|
+
function computeSemanticConfidence(sem, now, confidenceConfig = {}) {
|
|
18
21
|
const ageDays = daysBetween(sem.created_at, now);
|
|
19
22
|
const daysSinceRetrieval = sem.last_reinforced_at
|
|
20
23
|
? daysBetween(sem.last_reinforced_at, now)
|
|
21
24
|
: ageDays;
|
|
25
|
+
const halfLives = confidenceConfig.halfLives || DEFAULT_HALF_LIVES;
|
|
22
26
|
return computeConfidence({
|
|
23
27
|
sourceType: 'tool-result',
|
|
24
28
|
supportingCount: sem.supporting_count || 0,
|
|
25
29
|
contradictingCount: sem.contradicting_count || 0,
|
|
26
30
|
ageDays,
|
|
27
|
-
halfLifeDays: DEFAULT_HALF_LIVES.semantic,
|
|
31
|
+
halfLifeDays: halfLives.semantic ?? DEFAULT_HALF_LIVES.semantic,
|
|
28
32
|
retrievalCount: sem.retrieval_count || 0,
|
|
29
33
|
daysSinceRetrieval,
|
|
34
|
+
weights: confidenceConfig.weights,
|
|
35
|
+
customSourceReliability: confidenceConfig.sourceReliability,
|
|
30
36
|
});
|
|
31
37
|
}
|
|
32
38
|
|
|
33
|
-
function computeProceduralConfidence(proc, now) {
|
|
39
|
+
function computeProceduralConfidence(proc, now, confidenceConfig = {}) {
|
|
34
40
|
const ageDays = daysBetween(proc.created_at, now);
|
|
35
41
|
const daysSinceRetrieval = proc.last_reinforced_at
|
|
36
42
|
? daysBetween(proc.last_reinforced_at, now)
|
|
37
43
|
: ageDays;
|
|
44
|
+
const halfLives = confidenceConfig.halfLives || DEFAULT_HALF_LIVES;
|
|
38
45
|
return computeConfidence({
|
|
39
46
|
sourceType: 'tool-result',
|
|
40
47
|
supportingCount: proc.success_count || 0,
|
|
41
48
|
contradictingCount: proc.failure_count || 0,
|
|
42
49
|
ageDays,
|
|
43
|
-
halfLifeDays: DEFAULT_HALF_LIVES.procedural,
|
|
50
|
+
halfLifeDays: halfLives.procedural ?? DEFAULT_HALF_LIVES.procedural,
|
|
44
51
|
retrievalCount: proc.retrieval_count || 0,
|
|
45
52
|
daysSinceRetrieval,
|
|
53
|
+
weights: confidenceConfig.weights,
|
|
54
|
+
customSourceReliability: confidenceConfig.sourceReliability,
|
|
46
55
|
});
|
|
47
56
|
}
|
|
48
57
|
|
|
@@ -112,7 +121,7 @@ function buildProceduralEntry(proc, confidence, score, includeProvenance) {
|
|
|
112
121
|
return entry;
|
|
113
122
|
}
|
|
114
123
|
|
|
115
|
-
function knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance) {
|
|
124
|
+
function knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, confidenceConfig) {
|
|
116
125
|
const rows = db.prepare(`
|
|
117
126
|
SELECT e.*, (1.0 - v.distance) AS similarity
|
|
118
127
|
FROM vec_episodes v
|
|
@@ -124,7 +133,7 @@ function knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includePro
|
|
|
124
133
|
|
|
125
134
|
const results = [];
|
|
126
135
|
for (const row of rows) {
|
|
127
|
-
const confidence = computeEpisodicConfidence(row, now);
|
|
136
|
+
const confidence = computeEpisodicConfidence(row, now, confidenceConfig);
|
|
128
137
|
if (confidence < minConfidence) continue;
|
|
129
138
|
const score = row.similarity * confidence;
|
|
130
139
|
results.push(buildEpisodicEntry(row, confidence, score, includeProvenance));
|
|
@@ -132,7 +141,7 @@ function knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includePro
|
|
|
132
141
|
return results;
|
|
133
142
|
}
|
|
134
143
|
|
|
135
|
-
function knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant) {
|
|
144
|
+
function knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant, confidenceConfig) {
|
|
136
145
|
let stateFilter;
|
|
137
146
|
if (includeDormant) {
|
|
138
147
|
stateFilter = "AND (v.state = 'active' OR v.state = 'context_dependent' OR v.state = 'dormant')";
|
|
@@ -152,7 +161,7 @@ function knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includePro
|
|
|
152
161
|
const results = [];
|
|
153
162
|
const matchedIds = [];
|
|
154
163
|
for (const row of rows) {
|
|
155
|
-
const confidence = computeSemanticConfidence(row, now);
|
|
164
|
+
const confidence = computeSemanticConfidence(row, now, confidenceConfig);
|
|
156
165
|
if (confidence < minConfidence) continue;
|
|
157
166
|
const score = row.similarity * confidence;
|
|
158
167
|
matchedIds.push(row.id);
|
|
@@ -161,7 +170,7 @@ function knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includePro
|
|
|
161
170
|
return { results, matchedIds };
|
|
162
171
|
}
|
|
163
172
|
|
|
164
|
-
function knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant) {
|
|
173
|
+
function knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant, confidenceConfig) {
|
|
165
174
|
let stateFilter;
|
|
166
175
|
if (includeDormant) {
|
|
167
176
|
stateFilter = "AND (v.state = 'active' OR v.state = 'context_dependent' OR v.state = 'dormant')";
|
|
@@ -181,7 +190,7 @@ function knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeP
|
|
|
181
190
|
const results = [];
|
|
182
191
|
const matchedIds = [];
|
|
183
192
|
for (const row of rows) {
|
|
184
|
-
const confidence = computeProceduralConfidence(row, now);
|
|
193
|
+
const confidence = computeProceduralConfidence(row, now, confidenceConfig);
|
|
185
194
|
if (confidence < minConfidence) continue;
|
|
186
195
|
const score = row.similarity * confidence;
|
|
187
196
|
matchedIds.push(row.id);
|
|
@@ -204,6 +213,7 @@ export async function* recallStream(db, embeddingProvider, query, options = {})
|
|
|
204
213
|
limit = 10,
|
|
205
214
|
includeProvenance = false,
|
|
206
215
|
includeDormant = false,
|
|
216
|
+
confidenceConfig,
|
|
207
217
|
} = options;
|
|
208
218
|
|
|
209
219
|
const queryVector = await embeddingProvider.embed(query);
|
|
@@ -215,13 +225,13 @@ export async function* recallStream(db, embeddingProvider, query, options = {})
|
|
|
215
225
|
const allResults = [];
|
|
216
226
|
|
|
217
227
|
if (searchTypes.includes('episodic')) {
|
|
218
|
-
const episodic = knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance);
|
|
228
|
+
const episodic = knnEpisodic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, confidenceConfig);
|
|
219
229
|
allResults.push(...episodic);
|
|
220
230
|
}
|
|
221
231
|
|
|
222
232
|
if (searchTypes.includes('semantic')) {
|
|
223
233
|
const { results: semResults, matchedIds: semIds } =
|
|
224
|
-
knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant);
|
|
234
|
+
knnSemantic(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant, confidenceConfig);
|
|
225
235
|
allResults.push(...semResults);
|
|
226
236
|
|
|
227
237
|
if (semIds.length > 0) {
|
|
@@ -237,7 +247,7 @@ export async function* recallStream(db, embeddingProvider, query, options = {})
|
|
|
237
247
|
|
|
238
248
|
if (searchTypes.includes('procedural')) {
|
|
239
249
|
const { results: procResults, matchedIds: procIds } =
|
|
240
|
-
knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant);
|
|
250
|
+
knnProcedural(db, queryBuffer, candidateK, now, minConfidence, includeProvenance, includeDormant, confidenceConfig);
|
|
241
251
|
allResults.push(...procResults);
|
|
242
252
|
|
|
243
253
|
if (procIds.length > 0) {
|