@framers/agentos 0.2.12 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. package/dist/ingest-router/executors/EntityExtractor.d.ts +23 -0
  2. package/dist/ingest-router/executors/EntityExtractor.d.ts.map +1 -0
  3. package/dist/ingest-router/executors/EntityExtractor.js +69 -0
  4. package/dist/ingest-router/executors/EntityExtractor.js.map +1 -0
  5. package/dist/ingest-router/executors/EntityLinkingIngestExecutor.d.ts +46 -0
  6. package/dist/ingest-router/executors/EntityLinkingIngestExecutor.d.ts.map +1 -0
  7. package/dist/ingest-router/executors/EntityLinkingIngestExecutor.js +45 -0
  8. package/dist/ingest-router/executors/EntityLinkingIngestExecutor.js.map +1 -0
  9. package/dist/ingest-router/executors/entity-types.d.ts +55 -0
  10. package/dist/ingest-router/executors/entity-types.d.ts.map +1 -0
  11. package/dist/ingest-router/executors/entity-types.js +17 -0
  12. package/dist/ingest-router/executors/entity-types.js.map +1 -0
  13. package/dist/ingest-router/executors/index.d.ts +7 -0
  14. package/dist/ingest-router/executors/index.d.ts.map +1 -1
  15. package/dist/ingest-router/executors/index.js +6 -0
  16. package/dist/ingest-router/executors/index.js.map +1 -1
  17. package/dist/ingest-router/index.d.ts +2 -2
  18. package/dist/ingest-router/index.d.ts.map +1 -1
  19. package/dist/ingest-router/index.js +1 -1
  20. package/dist/ingest-router/index.js.map +1 -1
  21. package/dist/memory/AgentMemory.js +1 -1
  22. package/dist/memory/AgentMemory.js.map +1 -1
  23. package/dist/memory/CognitiveMemoryManager.js +4 -4
  24. package/dist/memory/CognitiveMemoryManager.js.map +1 -1
  25. package/dist/memory/archive/IMemoryArchive.d.ts +2 -2
  26. package/dist/memory/archive/SqlStorageMemoryArchive.d.ts +17 -13
  27. package/dist/memory/archive/SqlStorageMemoryArchive.d.ts.map +1 -1
  28. package/dist/memory/archive/SqlStorageMemoryArchive.js +36 -28
  29. package/dist/memory/archive/SqlStorageMemoryArchive.js.map +1 -1
  30. package/dist/memory/core/config.d.ts +4 -4
  31. package/dist/memory/core/config.d.ts.map +1 -1
  32. package/dist/memory/index.d.ts +3 -3
  33. package/dist/memory/index.d.ts.map +1 -1
  34. package/dist/memory/index.js +3 -3
  35. package/dist/memory/index.js.map +1 -1
  36. package/dist/memory/io/ChatGptImporter.d.ts +5 -5
  37. package/dist/memory/io/ChatGptImporter.d.ts.map +1 -1
  38. package/dist/memory/io/ChatGptImporter.js +9 -7
  39. package/dist/memory/io/ChatGptImporter.js.map +1 -1
  40. package/dist/memory/io/CsvImporter.d.ts +4 -4
  41. package/dist/memory/io/CsvImporter.d.ts.map +1 -1
  42. package/dist/memory/io/CsvImporter.js +11 -8
  43. package/dist/memory/io/CsvImporter.js.map +1 -1
  44. package/dist/memory/io/JsonExporter.d.ts +5 -5
  45. package/dist/memory/io/JsonExporter.d.ts.map +1 -1
  46. package/dist/memory/io/JsonExporter.js +13 -12
  47. package/dist/memory/io/JsonExporter.js.map +1 -1
  48. package/dist/memory/io/JsonImporter.d.ts +5 -5
  49. package/dist/memory/io/JsonImporter.d.ts.map +1 -1
  50. package/dist/memory/io/JsonImporter.js +50 -34
  51. package/dist/memory/io/JsonImporter.js.map +1 -1
  52. package/dist/memory/io/MarkdownExporter.d.ts +4 -4
  53. package/dist/memory/io/MarkdownExporter.d.ts.map +1 -1
  54. package/dist/memory/io/MarkdownExporter.js +1 -1
  55. package/dist/memory/io/MarkdownExporter.js.map +1 -1
  56. package/dist/memory/io/MarkdownImporter.d.ts +6 -6
  57. package/dist/memory/io/MarkdownImporter.d.ts.map +1 -1
  58. package/dist/memory/io/MarkdownImporter.js +8 -7
  59. package/dist/memory/io/MarkdownImporter.js.map +1 -1
  60. package/dist/memory/io/ObsidianImporter.d.ts +4 -4
  61. package/dist/memory/io/ObsidianImporter.d.ts.map +1 -1
  62. package/dist/memory/io/ObsidianImporter.js +15 -10
  63. package/dist/memory/io/ObsidianImporter.js.map +1 -1
  64. package/dist/memory/io/SqliteExporter.d.ts +5 -5
  65. package/dist/memory/io/SqliteExporter.d.ts.map +1 -1
  66. package/dist/memory/io/SqliteExporter.js +3 -3
  67. package/dist/memory/io/SqliteExporter.js.map +1 -1
  68. package/dist/memory/io/SqliteImporter.d.ts +4 -4
  69. package/dist/memory/io/SqliteImporter.d.ts.map +1 -1
  70. package/dist/memory/io/SqliteImporter.js +23 -16
  71. package/dist/memory/io/SqliteImporter.js.map +1 -1
  72. package/dist/memory/io/facade/Memory.d.ts +58 -10
  73. package/dist/memory/io/facade/Memory.d.ts.map +1 -1
  74. package/dist/memory/io/facade/Memory.js +124 -50
  75. package/dist/memory/io/facade/Memory.js.map +1 -1
  76. package/dist/memory/io/facade/types.d.ts +1 -1
  77. package/dist/memory/io/index.d.ts +2 -2
  78. package/dist/memory/io/index.js +2 -2
  79. package/dist/memory/io/tools/MemoryAddTool.d.ts +2 -2
  80. package/dist/memory/io/tools/MemoryAddTool.d.ts.map +1 -1
  81. package/dist/memory/io/tools/MemoryAddTool.js +2 -2
  82. package/dist/memory/io/tools/MemoryAddTool.js.map +1 -1
  83. package/dist/memory/io/tools/MemoryDeleteTool.d.ts +2 -2
  84. package/dist/memory/io/tools/MemoryDeleteTool.d.ts.map +1 -1
  85. package/dist/memory/io/tools/MemoryDeleteTool.js +1 -1
  86. package/dist/memory/io/tools/MemoryDeleteTool.js.map +1 -1
  87. package/dist/memory/io/tools/MemoryMergeTool.d.ts +2 -2
  88. package/dist/memory/io/tools/MemoryMergeTool.d.ts.map +1 -1
  89. package/dist/memory/io/tools/MemoryMergeTool.js +4 -3
  90. package/dist/memory/io/tools/MemoryMergeTool.js.map +1 -1
  91. package/dist/memory/io/tools/MemoryReflectTool.d.ts +2 -2
  92. package/dist/memory/io/tools/MemoryReflectTool.d.ts.map +1 -1
  93. package/dist/memory/io/tools/MemoryReflectTool.js.map +1 -1
  94. package/dist/memory/io/tools/MemorySearchTool.d.ts +2 -2
  95. package/dist/memory/io/tools/MemorySearchTool.d.ts.map +1 -1
  96. package/dist/memory/io/tools/MemorySearchTool.js.map +1 -1
  97. package/dist/memory/io/tools/MemoryUpdateTool.d.ts +2 -2
  98. package/dist/memory/io/tools/MemoryUpdateTool.d.ts.map +1 -1
  99. package/dist/memory/io/tools/MemoryUpdateTool.js +5 -4
  100. package/dist/memory/io/tools/MemoryUpdateTool.js.map +1 -1
  101. package/dist/memory/pipeline/consolidation/ConsolidationLoop.d.ts +3 -3
  102. package/dist/memory/pipeline/consolidation/ConsolidationLoop.d.ts.map +1 -1
  103. package/dist/memory/pipeline/consolidation/ConsolidationLoop.js +22 -17
  104. package/dist/memory/pipeline/consolidation/ConsolidationLoop.js.map +1 -1
  105. package/dist/memory/retrieval/feedback/RetrievalFeedbackSignal.d.ts +3 -3
  106. package/dist/memory/retrieval/feedback/RetrievalFeedbackSignal.d.ts.map +1 -1
  107. package/dist/memory/retrieval/feedback/RetrievalFeedbackSignal.js +15 -12
  108. package/dist/memory/retrieval/feedback/RetrievalFeedbackSignal.js.map +1 -1
  109. package/dist/memory/retrieval/graph/index.d.ts +0 -1
  110. package/dist/memory/retrieval/graph/index.d.ts.map +1 -1
  111. package/dist/memory/retrieval/graph/index.js +4 -1
  112. package/dist/memory/retrieval/graph/index.js.map +1 -1
  113. package/dist/memory/retrieval/store/{SqliteBrain.d.ts → Brain.d.ts} +106 -23
  114. package/dist/memory/retrieval/store/Brain.d.ts.map +1 -0
  115. package/dist/memory/retrieval/store/Brain.js +898 -0
  116. package/dist/memory/retrieval/store/Brain.js.map +1 -0
  117. package/dist/memory/retrieval/store/HnswSidecar.d.ts +1 -1
  118. package/dist/memory/retrieval/store/HnswSidecar.js +1 -1
  119. package/dist/memory/retrieval/store/MemoryStore.d.ts +6 -6
  120. package/dist/memory/retrieval/store/MemoryStore.d.ts.map +1 -1
  121. package/dist/memory/retrieval/store/MemoryStore.js +10 -9
  122. package/dist/memory/retrieval/store/MemoryStore.js.map +1 -1
  123. package/dist/memory/retrieval/store/{SqliteKnowledgeGraph.d.ts → SqlKnowledgeGraph.d.ts} +12 -12
  124. package/dist/memory/retrieval/store/SqlKnowledgeGraph.d.ts.map +1 -0
  125. package/dist/memory/retrieval/store/{SqliteKnowledgeGraph.js → SqlKnowledgeGraph.js} +83 -64
  126. package/dist/memory/retrieval/store/SqlKnowledgeGraph.js.map +1 -0
  127. package/dist/memory/retrieval/store/{SqliteMemoryGraph.d.ts → SqlMemoryGraph.d.ts} +11 -11
  128. package/dist/memory/retrieval/store/SqlMemoryGraph.d.ts.map +1 -0
  129. package/dist/memory/retrieval/store/{SqliteMemoryGraph.js → SqlMemoryGraph.js} +26 -24
  130. package/dist/memory/retrieval/store/SqlMemoryGraph.js.map +1 -0
  131. package/dist/memory/retrieval/store/migrations/MigrationRunner.d.ts +50 -0
  132. package/dist/memory/retrieval/store/migrations/MigrationRunner.d.ts.map +1 -0
  133. package/dist/memory/retrieval/store/migrations/MigrationRunner.js +100 -0
  134. package/dist/memory/retrieval/store/migrations/MigrationRunner.js.map +1 -0
  135. package/dist/memory/retrieval/store/migrations/index.d.ts +20 -0
  136. package/dist/memory/retrieval/store/migrations/index.d.ts.map +1 -0
  137. package/dist/memory/retrieval/store/migrations/index.js +19 -0
  138. package/dist/memory/retrieval/store/migrations/index.js.map +1 -0
  139. package/dist/memory/retrieval/store/migrations/types.d.ts +28 -0
  140. package/dist/memory/retrieval/store/migrations/types.d.ts.map +1 -0
  141. package/dist/memory/retrieval/store/migrations/types.js +7 -0
  142. package/dist/memory/retrieval/store/migrations/types.js.map +1 -0
  143. package/dist/memory/retrieval/store/migrations/v1-to-v2.d.ts +40 -0
  144. package/dist/memory/retrieval/store/migrations/v1-to-v2.d.ts.map +1 -0
  145. package/dist/memory/retrieval/store/migrations/v1-to-v2.js +491 -0
  146. package/dist/memory/retrieval/store/migrations/v1-to-v2.js.map +1 -0
  147. package/dist/memory/retrieval/store/portable-tables.d.ts +27 -0
  148. package/dist/memory/retrieval/store/portable-tables.d.ts.map +1 -0
  149. package/dist/memory/retrieval/store/portable-tables.js +56 -0
  150. package/dist/memory/retrieval/store/portable-tables.js.map +1 -0
  151. package/dist/memory-router/backends/EntityRetrievalRanker.d.ts +54 -0
  152. package/dist/memory-router/backends/EntityRetrievalRanker.d.ts.map +1 -0
  153. package/dist/memory-router/backends/EntityRetrievalRanker.js +39 -0
  154. package/dist/memory-router/backends/EntityRetrievalRanker.js.map +1 -0
  155. package/dist/memory-router/backends/index.d.ts +16 -0
  156. package/dist/memory-router/backends/index.d.ts.map +1 -0
  157. package/dist/memory-router/backends/index.js +16 -0
  158. package/dist/memory-router/backends/index.js.map +1 -0
  159. package/dist/memory-router/index.d.ts +2 -0
  160. package/dist/memory-router/index.d.ts.map +1 -1
  161. package/dist/memory-router/index.js +4 -0
  162. package/dist/memory-router/index.js.map +1 -1
  163. package/dist/rag/utils/vectorMath.d.ts +1 -1
  164. package/dist/rag/utils/vectorMath.js +1 -1
  165. package/dist/rag/vector-search/HnswIndexSidecar.d.ts +1 -1
  166. package/dist/rag/vector-search/HnswIndexSidecar.js +1 -1
  167. package/package.json +2 -2
  168. package/dist/memory/retrieval/graph/knowledge/SqliteKnowledgeGraph.d.ts +0 -10
  169. package/dist/memory/retrieval/graph/knowledge/SqliteKnowledgeGraph.d.ts.map +0 -1
  170. package/dist/memory/retrieval/graph/knowledge/SqliteKnowledgeGraph.js +0 -10
  171. package/dist/memory/retrieval/graph/knowledge/SqliteKnowledgeGraph.js.map +0 -1
  172. package/dist/memory/retrieval/store/SqliteBrain.d.ts.map +0 -1
  173. package/dist/memory/retrieval/store/SqliteBrain.js +0 -553
  174. package/dist/memory/retrieval/store/SqliteBrain.js.map +0 -1
  175. package/dist/memory/retrieval/store/SqliteKnowledgeGraph.d.ts.map +0 -1
  176. package/dist/memory/retrieval/store/SqliteKnowledgeGraph.js.map +0 -1
  177. package/dist/memory/retrieval/store/SqliteMemoryGraph.d.ts.map +0 -1
  178. package/dist/memory/retrieval/store/SqliteMemoryGraph.js.map +0 -1
@@ -0,0 +1,898 @@
1
+ /**
2
+ * @fileoverview Unified SQLite connection manager for a single agent's long-term brain.
3
+ *
4
+ * One `brain.sqlite` file stores everything the memory ingestion engine needs:
5
+ * memory traces, knowledge graph nodes/edges, document ingestion records,
6
+ * conversation history, consolidation logs, and retrieval feedback signals.
7
+ *
8
+ * ## Cognitive science grounding
9
+ * The schema mirrors Tulving's LTM taxonomy:
10
+ * - `memory_traces` → episodic + semantic + procedural + prospective memories
11
+ * - `knowledge_nodes/edges` → semantic network (Collins & Quillian spreading-activation model)
12
+ * - `documents/chunks` → external world model (grounded episodic encoding)
13
+ * - `conversations/messages` → episodic conversational buffer
14
+ * - `consolidation_log` → slow-wave sleep analogue (offline consolidation events)
15
+ * - `retrieval_feedback` → Hebbian reinforcement ("neurons that fire together wire together")
16
+ *
17
+ * ## Storage design choices
18
+ * - **Cross-platform**: Uses `@framers/sql-storage-adapter` StorageAdapter interface,
19
+ * enabling browser (IndexedDB/sql.js), mobile (Capacitor), and Postgres backends
20
+ * in addition to the default Node.js better-sqlite3 path.
21
+ * - **WAL mode**: allows concurrent reads during writes (when adapter supports it).
22
+ * - **FTS5 with Porter tokenizer**: enables fast full-text search over memory content with
23
+ * morphological stemming (retrieval cue → "retriev*").
24
+ * - **Embeddings as BLOBs**: raw Float32Array buffers stored directly — no external vector DB
25
+ * dependency for the SQLite-backed path; vector similarity runs in-process via HNSW.
26
+ * - **JSON columns**: tags, emotions, metadata stored as JSON TEXT for schema flexibility
27
+ * without sacrificing query-ability via SQLite's json_extract().
28
+ *
29
+ * @module memory/store/Brain
30
+ */
31
+ var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) {
32
+ if (kind === "m") throw new TypeError("Private method is not writable");
33
+ if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter");
34
+ if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it");
35
+ return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;
36
+ };
37
+ var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) {
38
+ if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter");
39
+ if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
40
+ return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
41
+ };
42
+ var _Brain_brainId;
43
+ import { promises as fs } from 'node:fs';
44
+ import path from 'node:path';
45
+ import { resolveStorageAdapter, createStorageFeatures, createPostgresAdapter } from '@framers/sql-storage-adapter';
46
+ import { DDL_ARCHIVED_TRACES, DDL_ARCHIVED_TRACES_IDX_AGENT_TIME, DDL_ARCHIVED_TRACES_IDX_REASON, DDL_ARCHIVE_ACCESS_LOG, DDL_ARCHIVE_ACCESS_LOG_IDX, } from '../../archive/SqlStorageMemoryArchive.js';
47
+ import { MigrationRunner, MIGRATIONS, LATEST_SCHEMA_VERSION } from './migrations/index.js';
48
+ import { PORTABLE_TABLES, PORTABLE_TABLE_PRIMARY_KEYS } from './portable-tables.js';
49
+ /**
50
+ * Derive a stable brain identifier from the database file path.
51
+ *
52
+ * `:memory:` becomes `'default'`. For real paths, the file basename is used
53
+ * with extensions stripped (e.g. `companion-alice.sqlite` becomes
54
+ * `companion-alice`; `foo.brain.sqlite` becomes `foo.brain`).
55
+ *
56
+ * Used by {@link Brain.open} when the caller does not supply an
57
+ * explicit `brainId`.
58
+ */
59
+ function deriveBrainIdFromPath(dbPath) {
60
+ if (dbPath === ':memory:')
61
+ return 'default';
62
+ const basename = path.basename(dbPath);
63
+ const lastDot = basename.lastIndexOf('.');
64
+ return lastDot > 0 ? basename.slice(0, lastDot) : basename;
65
+ }
66
+ /**
67
+ * Redact the password segment from a Postgres connection string for safe
68
+ * inclusion in error messages.
69
+ *
70
+ * `postgresql://user:secret@host/db` becomes `postgresql://user:***@host/db`.
71
+ * Connection strings without embedded passwords pass through unchanged.
72
+ */
73
+ function redactPostgresPassword(connStr) {
74
+ return connStr.replace(/(:\/\/[^:]+:)[^@]+(@)/, '$1***$2');
75
+ }
76
+ // ---------------------------------------------------------------------------
77
+ // Constants
78
+ // ---------------------------------------------------------------------------
79
+ // SCHEMA_VERSION moved to migrations/index.ts as LATEST_SCHEMA_VERSION
80
+ // (derived from the highest registered migration, so adding v2-to-v3.ts
81
+ // auto-bumps the seed value).
82
+ // ---------------------------------------------------------------------------
83
+ // DDL — full schema
84
+ // ---------------------------------------------------------------------------
85
+ /**
86
+ * Brain metadata key-value store.
87
+ * Used for versioning, agent identity, and embedding configuration.
88
+ */
89
+ const DDL_BRAIN_META = `
90
+ CREATE TABLE IF NOT EXISTS brain_meta (
91
+ brain_id TEXT NOT NULL,
92
+ key TEXT NOT NULL,
93
+ value TEXT NOT NULL,
94
+ PRIMARY KEY (brain_id, key)
95
+ );
96
+ `;
97
+ /**
98
+ * Core memory trace table (Tulving's unified trace model).
99
+ *
100
+ * Column notes:
101
+ * - `embedding` is a raw BLOB (Float32Array serialised as little-endian bytes).
102
+ * - `strength` is the Ebbinghaus retrievability R ∈ [0, 1].
103
+ * - `tags` / `emotions` / `metadata` are JSON TEXT columns.
104
+ * - `deleted` is a soft-delete flag (0 = active, 1 = tombstoned).
105
+ */
106
+ const DDL_MEMORY_TRACES = `
107
+ CREATE TABLE IF NOT EXISTS memory_traces (
108
+ brain_id TEXT NOT NULL,
109
+ id TEXT NOT NULL,
110
+ type TEXT NOT NULL,
111
+ scope TEXT NOT NULL,
112
+ content TEXT NOT NULL,
113
+ embedding BLOB,
114
+ strength REAL NOT NULL DEFAULT 1.0,
115
+ created_at INTEGER NOT NULL,
116
+ last_accessed INTEGER,
117
+ retrieval_count INTEGER NOT NULL DEFAULT 0,
118
+ tags TEXT NOT NULL DEFAULT '[]',
119
+ emotions TEXT NOT NULL DEFAULT '{}',
120
+ metadata TEXT NOT NULL DEFAULT '{}',
121
+ deleted INTEGER NOT NULL DEFAULT 0,
122
+ PRIMARY KEY (brain_id, id)
123
+ );
124
+
125
+ CREATE INDEX IF NOT EXISTS idx_memory_traces_brain_type
126
+ ON memory_traces (brain_id, type, created_at DESC);
127
+ CREATE INDEX IF NOT EXISTS idx_memory_traces_brain_scope
128
+ ON memory_traces (brain_id, scope);
129
+ `;
130
+ // FTS index DDL is now generated dynamically by features.fts.createIndex()
131
+ // to support both SQLite FTS5 and Postgres tsvector/GIN.
132
+ /**
133
+ * Knowledge graph nodes (semantic network).
134
+ * Each node represents a real-world entity or concept the agent has learned about.
135
+ *
136
+ * `properties` is a JSON TEXT column holding arbitrary typed attributes.
137
+ * `source` is a JSON TEXT provenance reference.
138
+ * `confidence` ∈ [0, 1] — certainty of this node's existence / accuracy.
139
+ */
140
+ const DDL_KNOWLEDGE_NODES = `
141
+ CREATE TABLE IF NOT EXISTS knowledge_nodes (
142
+ brain_id TEXT NOT NULL,
143
+ id TEXT NOT NULL,
144
+ type TEXT NOT NULL,
145
+ label TEXT NOT NULL,
146
+ properties TEXT NOT NULL DEFAULT '{}',
147
+ embedding BLOB,
148
+ confidence REAL NOT NULL DEFAULT 1.0,
149
+ source TEXT NOT NULL DEFAULT '{}',
150
+ created_at INTEGER NOT NULL,
151
+ PRIMARY KEY (brain_id, id)
152
+ );
153
+
154
+ CREATE INDEX IF NOT EXISTS idx_knowledge_nodes_brain_type
155
+ ON knowledge_nodes (brain_id, type);
156
+ `;
157
+ /**
158
+ * Knowledge graph edges (typed relationships).
159
+ * Models semantic links between knowledge nodes (e.g. IS_A, HAS_PART, CAUSED_BY).
160
+ *
161
+ * `bidirectional = 1` means the edge applies in both directions (e.g. SIBLING_OF).
162
+ * `weight` ∈ [0, 1] represents relationship strength / confidence.
163
+ */
164
+ const DDL_KNOWLEDGE_EDGES = `
165
+ CREATE TABLE IF NOT EXISTS knowledge_edges (
166
+ brain_id TEXT NOT NULL,
167
+ id TEXT NOT NULL,
168
+ source_id TEXT NOT NULL,
169
+ target_id TEXT NOT NULL,
170
+ type TEXT NOT NULL,
171
+ weight REAL NOT NULL DEFAULT 1.0,
172
+ bidirectional INTEGER NOT NULL DEFAULT 0,
173
+ metadata TEXT NOT NULL DEFAULT '{}',
174
+ created_at INTEGER NOT NULL,
175
+ PRIMARY KEY (brain_id, id),
176
+ FOREIGN KEY (brain_id, source_id) REFERENCES knowledge_nodes(brain_id, id),
177
+ FOREIGN KEY (brain_id, target_id) REFERENCES knowledge_nodes(brain_id, id)
178
+ );
179
+
180
+ CREATE INDEX IF NOT EXISTS idx_knowledge_edges_brain_source
181
+ ON knowledge_edges (brain_id, source_id);
182
+ CREATE INDEX IF NOT EXISTS idx_knowledge_edges_brain_target
183
+ ON knowledge_edges (brain_id, target_id);
184
+ `;
185
+ /**
186
+ * Ingested document registry.
187
+ *
188
+ * Tracks every external document (PDF, Markdown, web page, etc.) that has
189
+ * been chunked and embedded into this agent's brain.
190
+ *
191
+ * `content_hash` enables idempotent re-ingestion (skip if unchanged).
192
+ */
193
+ const DDL_DOCUMENTS = `
194
+ CREATE TABLE IF NOT EXISTS documents (
195
+ brain_id TEXT NOT NULL,
196
+ id TEXT NOT NULL,
197
+ path TEXT NOT NULL,
198
+ format TEXT NOT NULL,
199
+ title TEXT,
200
+ content_hash TEXT NOT NULL,
201
+ chunk_count INTEGER NOT NULL DEFAULT 0,
202
+ metadata TEXT NOT NULL DEFAULT '{}',
203
+ ingested_at INTEGER NOT NULL,
204
+ PRIMARY KEY (brain_id, id)
205
+ );
206
+ `;
207
+ /**
208
+ * Document chunk table.
209
+ *
210
+ * Each chunk corresponds to a contiguous passage of text extracted from a
211
+ * parent document. `trace_id` links to the corresponding memory trace so
212
+ * retrieval pipelines can cross-reference vector search results.
213
+ */
214
+ const DDL_DOCUMENT_CHUNKS = `
215
+ CREATE TABLE IF NOT EXISTS document_chunks (
216
+ brain_id TEXT NOT NULL,
217
+ id TEXT NOT NULL,
218
+ document_id TEXT NOT NULL,
219
+ trace_id TEXT,
220
+ content TEXT NOT NULL,
221
+ chunk_index INTEGER NOT NULL,
222
+ page_number INTEGER,
223
+ embedding BLOB,
224
+ PRIMARY KEY (brain_id, id),
225
+ FOREIGN KEY (brain_id, document_id) REFERENCES documents(brain_id, id),
226
+ FOREIGN KEY (brain_id, trace_id) REFERENCES memory_traces(brain_id, id)
227
+ );
228
+
229
+ CREATE INDEX IF NOT EXISTS idx_document_chunks_brain_document
230
+ ON document_chunks (brain_id, document_id, chunk_index);
231
+ `;
232
+ /**
233
+ * Document image table.
234
+ *
235
+ * Stores visual assets extracted from documents (e.g. figures, diagrams).
236
+ * `caption` and `embedding` support multimodal retrieval.
237
+ */
238
+ const DDL_DOCUMENT_IMAGES = `
239
+ CREATE TABLE IF NOT EXISTS document_images (
240
+ brain_id TEXT NOT NULL,
241
+ id TEXT NOT NULL,
242
+ document_id TEXT NOT NULL,
243
+ chunk_id TEXT,
244
+ data BLOB NOT NULL,
245
+ mime_type TEXT NOT NULL,
246
+ caption TEXT,
247
+ page_number INTEGER,
248
+ embedding BLOB,
249
+ PRIMARY KEY (brain_id, id),
250
+ FOREIGN KEY (brain_id, document_id) REFERENCES documents(brain_id, id),
251
+ FOREIGN KEY (brain_id, chunk_id) REFERENCES document_chunks(brain_id, id)
252
+ );
253
+ `;
254
+ /**
255
+ * Consolidation log.
256
+ *
257
+ * Records each offline consolidation run — the analogue of slow-wave sleep
258
+ * memory consolidation. Tracks how many traces were pruned, merged, derived
259
+ * (by inference), or compacted (losslessly compressed).
260
+ */
261
+ const DDL_CONSOLIDATION_LOG = `
262
+ CREATE TABLE IF NOT EXISTS consolidation_log (
263
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
264
+ brain_id TEXT NOT NULL,
265
+ ran_at INTEGER NOT NULL,
266
+ pruned INTEGER NOT NULL DEFAULT 0,
267
+ merged INTEGER NOT NULL DEFAULT 0,
268
+ derived INTEGER NOT NULL DEFAULT 0,
269
+ compacted INTEGER NOT NULL DEFAULT 0,
270
+ duration_ms INTEGER NOT NULL DEFAULT 0
271
+ );
272
+
273
+ CREATE INDEX IF NOT EXISTS idx_consolidation_log_brain_time
274
+ ON consolidation_log (brain_id, ran_at DESC);
275
+ `;
276
+ /**
277
+ * Retrieval feedback signals.
278
+ *
279
+ * Captures explicit (thumbs up/down) or implicit (click, dwell time, follow-up)
280
+ * feedback on retrieved memory traces. Used by the spaced-repetition scheduler
281
+ * to modulate `strength` and `stability` updates (Hebbian reinforcement).
282
+ *
283
+ * `signal` examples: 'positive', 'negative', 'neutral', 'implicit_positive'.
284
+ */
285
+ const DDL_RETRIEVAL_FEEDBACK = `
286
+ CREATE TABLE IF NOT EXISTS retrieval_feedback (
287
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
288
+ brain_id TEXT NOT NULL,
289
+ trace_id TEXT NOT NULL,
290
+ signal TEXT NOT NULL,
291
+ query TEXT,
292
+ created_at INTEGER NOT NULL,
293
+ FOREIGN KEY (brain_id, trace_id) REFERENCES memory_traces(brain_id, id)
294
+ );
295
+
296
+ CREATE INDEX IF NOT EXISTS idx_retrieval_feedback_brain_trace
297
+ ON retrieval_feedback (brain_id, trace_id, created_at DESC);
298
+ `;
299
+ /**
300
+ * Conversation sessions.
301
+ *
302
+ * Provides a lightweight conversational buffer independent of external message
303
+ * stores. Primarily used for episodic memory encoding (conversation → trace).
304
+ */
305
+ const DDL_CONVERSATIONS = `
306
+ CREATE TABLE IF NOT EXISTS conversations (
307
+ brain_id TEXT NOT NULL,
308
+ id TEXT NOT NULL,
309
+ title TEXT,
310
+ created_at INTEGER NOT NULL,
311
+ updated_at INTEGER NOT NULL,
312
+ metadata TEXT NOT NULL DEFAULT '{}',
313
+ PRIMARY KEY (brain_id, id)
314
+ );
315
+ `;
316
+ /**
317
+ * Conversation messages.
318
+ *
319
+ * Each message belongs to a conversation. `role` follows the OpenAI convention:
320
+ * 'user' | 'assistant' | 'system' | 'tool'.
321
+ */
322
+ const DDL_MESSAGES = `
323
+ CREATE TABLE IF NOT EXISTS messages (
324
+ brain_id TEXT NOT NULL,
325
+ id TEXT NOT NULL,
326
+ conversation_id TEXT NOT NULL,
327
+ role TEXT NOT NULL,
328
+ content TEXT NOT NULL,
329
+ created_at INTEGER NOT NULL,
330
+ metadata TEXT NOT NULL DEFAULT '{}',
331
+ PRIMARY KEY (brain_id, id),
332
+ FOREIGN KEY (brain_id, conversation_id) REFERENCES conversations(brain_id, id)
333
+ );
334
+
335
+ CREATE INDEX IF NOT EXISTS idx_messages_brain_conversation
336
+ ON messages (brain_id, conversation_id, created_at);
337
+ `;
338
+ /**
339
+ * Prospective memory items table.
340
+ *
341
+ * Stores time-based, event-based, and context-based reminders/intentions
342
+ * that the ProspectiveMemoryManager checks each turn. Items are registered
343
+ * automatically from commitment and intention observation notes.
344
+ *
345
+ * `trigger_type` determines how the item fires:
346
+ * - 'time_based': fires at or after `trigger_at` timestamp
347
+ * - 'event_based': fires when `trigger_event` name occurs
348
+ * - 'context_based': fires when embedding similarity to `cue_embedding` exceeds threshold
349
+ */
350
+ const DDL_PROSPECTIVE_ITEMS = `
351
+ CREATE TABLE IF NOT EXISTS prospective_items (
352
+ brain_id TEXT NOT NULL,
353
+ id TEXT NOT NULL,
354
+ content TEXT NOT NULL,
355
+ trigger_type TEXT NOT NULL,
356
+ trigger_at INTEGER,
357
+ trigger_event TEXT,
358
+ cue_text TEXT,
359
+ cue_embedding BLOB,
360
+ similarity_threshold REAL DEFAULT 0.7,
361
+ importance REAL NOT NULL DEFAULT 0.5,
362
+ triggered INTEGER NOT NULL DEFAULT 0,
363
+ recurring INTEGER NOT NULL DEFAULT 0,
364
+ source_trace_id TEXT,
365
+ created_at INTEGER NOT NULL,
366
+ PRIMARY KEY (brain_id, id)
367
+ );
368
+ `;
369
+ // ---------------------------------------------------------------------------
370
+ // Brain
371
+ // ---------------------------------------------------------------------------
372
+ /**
373
+ * Unified cross-platform connection manager for a single agent's persistent brain.
374
+ *
375
+ * Uses the `StorageAdapter` interface from `@framers/sql-storage-adapter` to
376
+ * support multiple backends (better-sqlite3, sql.js, IndexedDB, Postgres, etc.)
377
+ * transparently. All methods are async.
378
+ *
379
+ * **Usage:**
380
+ * ```ts
381
+ * const brain = await Brain.open('/path/to/agent/brain.sqlite');
382
+ *
383
+ * // Async query API for subsystems
384
+ * const row = await brain.get<{ value: string }>('SELECT value FROM brain_meta WHERE key = ?', ['schema_version']);
385
+ *
386
+ * // Meta helpers
387
+ * await brain.setMeta('last_sync', Date.now().toString());
388
+ * const ver = await brain.getMeta('schema_version'); // '1'
389
+ *
390
+ * await brain.close();
391
+ * ```
392
+ *
393
+ * Subsystems (KnowledgeGraph, MemoryGraph, ConsolidationLoop, etc.)
394
+ * receive the `Brain` instance and call its async proxy methods
395
+ * (`run`, `get`, `all`, `exec`, `transaction`) for all database operations.
396
+ */
397
+ export class Brain {
398
+ // ---------------------------------------------------------------------------
399
+ // Constructor (private — use Brain.open())
400
+ // ---------------------------------------------------------------------------
401
+ /**
402
+ * Private constructor — use `Brain.open(dbPath)` instead.
403
+ *
404
+ * @param adapter - A fully initialised StorageAdapter instance.
405
+ * @param features - Platform-aware feature bundle.
406
+ * @param brainId - Brain identifier used to scope multi-tenant queries.
407
+ */
408
+ constructor(adapter, features, brainId) {
409
+ /**
410
+ * Brain identifier used to scope every brain-owned table row.
411
+ *
412
+ * In SQLite per-file mode, defaults to the file basename (or `'default'`
413
+ * for `:memory:`); subsystems pass it through to the `brain_id` column
414
+ * on every INSERT/UPDATE and into every WHERE clause on SELECT.
415
+ *
416
+ * In Postgres mode (multi-tenant), this is required and must be unique
417
+ * per brain across the database.
418
+ */
419
+ _Brain_brainId.set(this, void 0);
420
+ this._adapter = adapter;
421
+ this._features = features;
422
+ __classPrivateFieldSet(this, _Brain_brainId, brainId, "f");
423
+ }
424
+ /**
425
+ * Brain identifier scoping every query through this Brain instance.
426
+ * Subsystems (KnowledgeGraph, MemoryGraph, ConsolidationLoop) read this
427
+ * to inject `brain_id` into their own SQL.
428
+ */
429
+ get brainId() {
430
+ return __classPrivateFieldGet(this, _Brain_brainId, "f");
431
+ }
432
+ // ---------------------------------------------------------------------------
433
+ // Async factories (three named entry points)
434
+ //
435
+ // Naming convention:
436
+ // - openSqlite / openPostgres: factory by-DIALECT. The caller specifies
437
+ // "I want a SQLite-backed brain at this file" or "I want a Postgres-
438
+ // backed brain at this URL." The adapter is constructed internally.
439
+ // - openWithAdapter: factory by-PRE-BUILT-ADAPTER. The caller has already
440
+ // built the StorageAdapter (e.g., to share a connection pool with
441
+ // another subsystem) and hands it to Brain to consume.
442
+ //
443
+ // The naming asymmetry is intentional: the first two are dialect-specific
444
+ // entry points; the third is the escape hatch for advanced cases where the
445
+ // adapter is owned outside the Brain.
446
+ // ---------------------------------------------------------------------------
447
+ /**
448
+ * Open a Brain backed by SQLite. Tries adapters in order:
449
+ * better-sqlite3 (Node native) -> sql.js (WASM) -> indexeddb (browser).
450
+ *
451
+ * @param path - File path. Use `:memory:` for in-process testing.
452
+ * @param opts.brainId - Optional explicit brainId; defaults to file basename
453
+ * (or `'default'` for `:memory:`).
454
+ * @param opts.priority - Override the default adapter priority.
455
+ * @returns A fully initialised `Brain` instance with the v2 schema.
456
+ */
457
+ static async openSqlite(path, opts = {}) {
458
+ const adapter = await resolveStorageAdapter({
459
+ filePath: path,
460
+ priority: opts.priority ?? ['better-sqlite3', 'sqljs', 'indexeddb'],
461
+ quiet: true,
462
+ });
463
+ const brainId = opts.brainId ?? deriveBrainIdFromPath(path);
464
+ return Brain._initialize(adapter, brainId);
465
+ }
466
+ /**
467
+ * Open a Brain backed by PostgreSQL. Requires the `pg` npm package and
468
+ * a reachable Postgres instance.
469
+ *
470
+ * @param connectionString - Standard Postgres connection URL.
471
+ * @param opts.brainId - REQUIRED. Used to scope every query so multiple
472
+ * brains can share one Postgres database without leaking rows.
473
+ * @param opts.poolSize - pg connection pool size. Defaults to 10.
474
+ */
475
+ static async openPostgres(connectionString, opts) {
476
+ if (!opts.brainId) {
477
+ throw new Error('Brain.openPostgres: opts.brainId is required (Postgres mode is multi-tenant)');
478
+ }
479
+ // Use createPostgresAdapter directly so we can pass pool size; the
480
+ // resolveStorageAdapter facade only forwards `connectionString`.
481
+ let adapter;
482
+ try {
483
+ adapter = await createPostgresAdapter({
484
+ connectionString,
485
+ max: opts.poolSize ?? 10,
486
+ });
487
+ await adapter.open();
488
+ }
489
+ catch (err) {
490
+ const safe = redactPostgresPassword(connectionString);
491
+ const msg = err instanceof Error ? err.message : String(err);
492
+ throw new Error(`Brain.openPostgres: connection failed for ${safe}: ${msg}`);
493
+ }
494
+ return Brain._initialize(adapter, opts.brainId);
495
+ }
496
+ /**
497
+ * Open a Brain with a pre-resolved StorageAdapter. Use when sharing an
498
+ * adapter across subsystems (e.g., wilds-ai foundation pool + brain) or
499
+ * when the consumer needs full control over adapter resolution.
500
+ *
501
+ * @param adapter - Pre-built StorageAdapter instance.
502
+ * @param opts.brainId - Required for postgres-kind adapters; optional for
503
+ * sqlite-kind adapters (defaults to `'default'`).
504
+ */
505
+ static async openWithAdapter(adapter, opts = {}) {
506
+ const isPostgres = adapter.kind.includes('postgres');
507
+ if (isPostgres && !opts.brainId) {
508
+ throw new Error('Brain.openWithAdapter: opts.brainId is required for postgres-kind adapters');
509
+ }
510
+ const brainId = opts.brainId ?? 'default';
511
+ return Brain._initialize(adapter, brainId);
512
+ }
513
+ /**
514
+ * Internal common initialization path used by all three factories.
515
+ *
516
+ * Sequence:
517
+ * 1. Build platform-aware feature bundle.
518
+ * 2. Set WAL mode (dialect.pragma returns null on Postgres).
519
+ * 3. Enable foreign key enforcement (dialect.pragma returns null on Postgres).
520
+ * 4. Auto-migrate v1 schemas to v2 (idempotent; no-op for fresh DBs and v2).
521
+ * 5. Apply full DDL via _initSchema().
522
+ * 6. Seed brain_meta defaults.
523
+ */
524
+ static async _initialize(adapter, brainId) {
525
+ const features = createStorageFeatures(adapter);
526
+ const brain = new Brain(adapter, features, brainId);
527
+ const walPragma = features.dialect.pragma('journal_mode', 'WAL');
528
+ if (walPragma)
529
+ await adapter.exec(walPragma);
530
+ const fkPragma = features.dialect.pragma('foreign_keys', 'ON');
531
+ if (fkPragma)
532
+ await adapter.exec(fkPragma);
533
+ await MigrationRunner.runPending(adapter, features, brainId, MIGRATIONS);
534
+ await brain._initSchema();
535
+ await brain._seedMeta();
536
+ return brain;
537
+ }
538
+ // ---------------------------------------------------------------------------
539
+ // Async proxy methods (for consumer subsystems)
540
+ // ---------------------------------------------------------------------------
541
+ /**
542
+ * Execute a mutation statement (INSERT, UPDATE, DELETE).
543
+ *
544
+ * @param sql - SQL statement with `?` positional placeholders.
545
+ * @param params - Parameter array matching the placeholders.
546
+ * @returns Metadata about affected rows.
547
+ */
548
+ async run(sql, params) {
549
+ return this._adapter.run(sql, params);
550
+ }
551
+ /**
552
+ * Retrieve a single row (or null if none found).
553
+ *
554
+ * @param sql - SQL SELECT statement.
555
+ * @param params - Parameter array.
556
+ * @returns First matching row or null.
557
+ */
558
+ async get(sql, params) {
559
+ return this._adapter.get(sql, params);
560
+ }
561
+ /**
562
+ * Retrieve all rows matching the statement.
563
+ *
564
+ * @param sql - SQL SELECT statement.
565
+ * @param params - Parameter array.
566
+ * @returns Array of matching rows (empty array if none).
567
+ */
568
+ async all(sql, params) {
569
+ return this._adapter.all(sql, params);
570
+ }
571
+ /**
572
+ * Execute a script containing multiple SQL statements.
573
+ *
574
+ * @param sql - SQL script (semicolon-delimited statements).
575
+ */
576
+ async exec(sql) {
577
+ return this._adapter.exec(sql);
578
+ }
579
+ /**
580
+ * Execute a callback within a database transaction.
581
+ *
582
+ * The transaction is automatically committed on success or rolled back
583
+ * on error.
584
+ *
585
+ * @param fn - Async callback receiving a transactional adapter.
586
+ * @returns Result of the callback.
587
+ */
588
+ async transaction(fn) {
589
+ return this._adapter.transaction(fn);
590
+ }
591
+ /**
592
+ * Expose the raw storage adapter for advanced usage.
593
+ *
594
+ * Primarily used by SqliteExporter (VACUUM INTO) and SqliteImporter
595
+ * (which needs direct adapter access for the target brain).
596
+ */
597
+ get adapter() {
598
+ return this._adapter;
599
+ }
600
+ /**
601
+ * Platform-aware feature bundle (dialect, FTS, BLOB codec, exporter).
602
+ * Consumers use this to generate cross-platform SQL instead of hardcoding
603
+ * SQLite-specific syntax.
604
+ */
605
+ get features() {
606
+ return this._features;
607
+ }
608
+ // ---------------------------------------------------------------------------
609
+ // Private init helpers
610
+ // ---------------------------------------------------------------------------
611
+ /**
612
+ * Execute idempotent DDL statements to initialize the schema.
613
+ * `CREATE TABLE IF NOT EXISTS` is safe to re-run, so a sequential setup path
614
+ * is sufficient and avoids adapter-specific transaction quirks during DDL.
615
+ */
616
+ async _initSchema() {
617
+ const ddlStatements = [
618
+ DDL_BRAIN_META,
619
+ DDL_MEMORY_TRACES,
620
+ DDL_KNOWLEDGE_NODES,
621
+ DDL_KNOWLEDGE_EDGES,
622
+ DDL_DOCUMENTS,
623
+ DDL_DOCUMENT_CHUNKS,
624
+ DDL_DOCUMENT_IMAGES,
625
+ DDL_CONSOLIDATION_LOG,
626
+ DDL_RETRIEVAL_FEEDBACK,
627
+ DDL_CONVERSATIONS,
628
+ DDL_MESSAGES,
629
+ DDL_PROSPECTIVE_ITEMS,
630
+ // Memory archive tables (write-ahead cold storage for verbatim content)
631
+ DDL_ARCHIVED_TRACES,
632
+ DDL_ARCHIVED_TRACES_IDX_AGENT_TIME,
633
+ DDL_ARCHIVED_TRACES_IDX_REASON,
634
+ DDL_ARCHIVE_ACCESS_LOG,
635
+ DDL_ARCHIVE_ACCESS_LOG_IDX,
636
+ ];
637
+ for (const statement of ddlStatements) {
638
+ await this._adapter.exec(statement);
639
+ }
640
+ // FTS index via feature abstraction (FTS5 on SQLite, tsvector/GIN on Postgres).
641
+ // SQL.js builds may not include FTS5, so keep the core schema independent.
642
+ const ftsDdl = this._features.fts.createIndex({
643
+ table: 'memory_traces_fts',
644
+ columns: ['content', 'tags'],
645
+ contentTable: 'memory_traces',
646
+ tokenizer: 'porter ascii',
647
+ });
648
+ try {
649
+ await this._adapter.exec(ftsDdl);
650
+ }
651
+ catch (error) {
652
+ const message = error instanceof Error ? error.message : String(error);
653
+ if (!message.includes('no such module: fts5')) {
654
+ throw error;
655
+ }
656
+ }
657
+ }
658
+ /**
659
+ * Seed `brain_meta` with mandatory keys on first creation.
660
+ * Uses INSERT OR IGNORE to be idempotent on subsequent opens.
661
+ */
662
+ async _seedMeta() {
663
+ const { dialect } = this._features;
664
+ // INSERT OR IGNORE is idempotent — no transaction needed.
665
+ // Avoids sql.js "cannot rollback" errors when DDL from _initSchema()
666
+ // leaves the connection in an implicit-commit state.
667
+ await this._adapter.run(dialect.insertOrIgnore('brain_meta', ['brain_id', 'key', 'value'], ['?', '?', '?']), [__classPrivateFieldGet(this, _Brain_brainId, "f"), 'schema_version', String(LATEST_SCHEMA_VERSION)]);
668
+ await this._adapter.run(dialect.insertOrIgnore('brain_meta', ['brain_id', 'key', 'value'], ['?', '?', '?']), [__classPrivateFieldGet(this, _Brain_brainId, "f"), 'created_at', Date.now().toString()]);
669
+ }
670
+ // ---------------------------------------------------------------------------
671
+ // Public API
672
+ // ---------------------------------------------------------------------------
673
+ /**
674
+ * Read a value from the `brain_meta` key-value store.
675
+ *
676
+ * @param key - The metadata key to look up.
677
+ * @returns The stored string value, or `undefined` if the key does not exist.
678
+ */
679
+ async getMeta(key) {
680
+ const row = await this._adapter.get('SELECT value FROM brain_meta WHERE brain_id = ? AND key = ?', [__classPrivateFieldGet(this, _Brain_brainId, "f"), key]);
681
+ return row?.value;
682
+ }
683
+ /**
684
+ * Upsert a value into the `brain_meta` key-value store.
685
+ *
686
+ * Uses `INSERT OR REPLACE` semantics — creates the row if absent, or
687
+ * overwrites if present.
688
+ *
689
+ * @param key - The metadata key.
690
+ * @param value - The string value to store.
691
+ */
692
+ async setMeta(key, value) {
693
+ await this._adapter.run(this._features.dialect.insertOrReplace('brain_meta', ['brain_id', 'key', 'value'], ['?', '?', '?'], 'brain_id, key'), [__classPrivateFieldGet(this, _Brain_brainId, "f"), key, value]);
694
+ }
695
+ /**
696
+ * Check whether a given embedding dimension is compatible with this brain.
697
+ *
698
+ * On first call (no stored `embedding_dimensions`), returns `true` and stores
699
+ * the provided dimension for future compatibility checks.
700
+ *
701
+ * Subsequent calls compare `dimensions` against the stored value.
702
+ * Mismatches indicate that a different embedding model was used to encode
703
+ * memories — mixing dimensions would corrupt vector similarity searches.
704
+ *
705
+ * @param dimensions - The embedding vector length to check (e.g. 1536 for OpenAI ada-002).
706
+ * @returns `true` if compatible (or no prior value), `false` on mismatch.
707
+ */
708
+ async checkEmbeddingCompat(dimensions) {
709
+ const stored = await this.getMeta('embedding_dimensions');
710
+ if (stored === undefined) {
711
+ // First embedding model encounter — store and accept.
712
+ await this.setMeta('embedding_dimensions', String(dimensions));
713
+ return true;
714
+ }
715
+ return parseInt(stored, 10) === dimensions;
716
+ }
717
+ // ---------------------------------------------------------------------------
718
+ // Portable artifact: export to / import from a SQLite snapshot
719
+ // ---------------------------------------------------------------------------
720
+ /**
721
+ * Materialize this brain to a portable SQLite file at `targetPath`.
722
+ *
723
+ * Source can be any backend (SQLite, Postgres, Capacitor, etc.); output
724
+ * is always a fresh SQLite file. Used by `.wildsoul`-style export and
725
+ * other portability flows.
726
+ *
727
+ * Refuses to overwrite an existing file at `targetPath` so callers do
728
+ * not silently lose data.
729
+ *
730
+ * Forking semantics: rows are emitted with the source brainId. Importing
731
+ * the resulting file under a different brainId produces a fork.
732
+ *
733
+ * @param targetPath - Destination file path. File must not exist.
734
+ * @returns Bytes written to the destination file.
735
+ */
736
+ async exportToSqlite(targetPath) {
737
+ // Refuse to overwrite an existing file.
738
+ try {
739
+ await fs.access(targetPath);
740
+ throw new Error(`Brain.exportToSqlite: target already exists: ${targetPath}`);
741
+ }
742
+ catch (err) {
743
+ const code = err.code;
744
+ if (code !== 'ENOENT') {
745
+ // Re-throw the "already exists" error and any other access error
746
+ // that isn't a missing-file response.
747
+ throw err;
748
+ }
749
+ }
750
+ // Open a fresh SQLite Brain at the target path. We import under the
751
+ // source brainId so the export file is identifiable as belonging to
752
+ // this brain even if the receiving Brain has a different id.
753
+ const target = await Brain.openSqlite(targetPath, { brainId: __classPrivateFieldGet(this, _Brain_brainId, "f") });
754
+ try {
755
+ for (const table of PORTABLE_TABLES) {
756
+ const rows = await this.all(`SELECT * FROM ${table} WHERE brain_id = ?`, [__classPrivateFieldGet(this, _Brain_brainId, "f")]);
757
+ if (rows.length === 0)
758
+ continue;
759
+ // Upsert so source rows override the brain_meta defaults
760
+ // (schema_version, created_at) seeded during target initialisation.
761
+ await this._bulkCopy(target, table, rows, __classPrivateFieldGet(this, _Brain_brainId, "f"), { upsert: true });
762
+ }
763
+ }
764
+ finally {
765
+ await target.close();
766
+ }
767
+ const stat = await fs.stat(targetPath);
768
+ return { bytesWritten: stat.size };
769
+ }
770
+ /**
771
+ * Load a portable SQLite file into this Brain's adapter.
772
+ *
773
+ * Forking semantics: rows from the source file are written under the
774
+ * RECEIVING brain's `brainId`, not the brainId stored in the source
775
+ * file. This means importing an `alice` snapshot into a Brain opened
776
+ * with `brainId: 'alice-fork'` produces a fork with no shared identity.
777
+ *
778
+ * **CAVEAT:** importing from a pre-0.3.0 SQLite file MUTATES the source
779
+ * file. Opening the source via `Brain.openSqlite` runs the v1 to v2
780
+ * migration in place. To preserve the source unchanged, copy the file to
781
+ * a temp path before calling this method.
782
+ *
783
+ * @param sourcePath - Source SQLite file path (typically produced by
784
+ * `Brain.exportToSqlite`).
785
+ * @param opts.strategy - `'merge'` (default) upserts on PK collision;
786
+ * `'replace'` wipes all rows for the receiving `brainId` first.
787
+ * @returns Counts of rows imported per table.
788
+ */
789
+ async importFromSqlite(sourcePath, opts = {}) {
790
+ const strategy = opts.strategy ?? 'merge';
791
+ // Peek at the source's brain_meta BEFORE opening it as a Brain. Opening
792
+ // via Brain.openSqlite without a brainId would derive one from the file
793
+ // path and pollute brain_meta with that synthetic id (via _seedMeta),
794
+ // breaking the single-brain check below. We use a raw adapter for the
795
+ // peek so we don't trigger any seeding.
796
+ const peekAdapter = await resolveStorageAdapter({
797
+ filePath: sourcePath,
798
+ priority: ['better-sqlite3', 'sqljs'],
799
+ quiet: true,
800
+ });
801
+ let sourceBrainIds;
802
+ try {
803
+ sourceBrainIds = await peekAdapter.all(`SELECT DISTINCT brain_id FROM brain_meta WHERE brain_id IS NOT NULL`);
804
+ }
805
+ finally {
806
+ await peekAdapter.close();
807
+ }
808
+ if (sourceBrainIds.length > 1) {
809
+ const ids = sourceBrainIds.map((r) => r.brain_id).join(', ');
810
+ throw new Error(`Brain.importFromSqlite: source contains multiple brain_ids (${ids}). ` +
811
+ `Imports must be from a single-brain export (use Brain.exportToSqlite).`);
812
+ }
813
+ // Open the source as a Brain with the peeked brainId (if any) to avoid
814
+ // _seedMeta polluting brain_meta with a path-derived id.
815
+ const sourceBrainId = sourceBrainIds[0]?.brain_id;
816
+ const source = sourceBrainId
817
+ ? await Brain.openSqlite(sourcePath, { brainId: sourceBrainId })
818
+ : await Brain.openSqlite(sourcePath);
819
+ const tablesImported = {};
820
+ try {
821
+ if (strategy === 'replace') {
822
+ // Wipe existing rows for the receiving brainId in every portable table.
823
+ // Order matters: child tables before parent tables to satisfy FKs.
824
+ for (const table of [...PORTABLE_TABLES].reverse()) {
825
+ await this.run(`DELETE FROM ${table} WHERE brain_id = ?`, [__classPrivateFieldGet(this, _Brain_brainId, "f")]);
826
+ }
827
+ }
828
+ for (const table of PORTABLE_TABLES) {
829
+ // Read every row in the source file regardless of its stored brainId
830
+ // so we capture the full snapshot for re-insertion under our brainId.
831
+ const rows = await source.all(`SELECT * FROM ${table}`);
832
+ tablesImported[table] = rows.length;
833
+ if (rows.length === 0)
834
+ continue;
835
+ // Always use upsert to gracefully handle the brain_meta rows seeded
836
+ // by `_seedMeta` during the receiving Brain's initialization (which
837
+ // would otherwise collide with the source's schema_version/created_at).
838
+ await this._bulkCopy(this, table, rows, __classPrivateFieldGet(this, _Brain_brainId, "f"), { upsert: true });
839
+ }
840
+ }
841
+ finally {
842
+ await source.close();
843
+ }
844
+ return { tablesImported };
845
+ }
846
+ /**
847
+ * Internal helper: bulk-insert `rows` into `target.<table>`, rewriting
848
+ * `brain_id` on each row to `targetBrainId`. When `opts.upsert` is true,
849
+ * uses `dialect.insertOrReplace` so PK collisions overwrite (idempotent).
850
+ */
851
+ async _bulkCopy(target, table, rows, targetBrainId, opts = {}) {
852
+ if (rows.length === 0)
853
+ return;
854
+ const columns = Object.keys(rows[0]);
855
+ const placeholders = columns.map(() => '?').join(', ');
856
+ const colList = columns.join(', ');
857
+ const stmt = opts.upsert
858
+ ? target._features.dialect.insertOrReplace(table, columns, columns.map(() => '?'), PORTABLE_TABLE_PRIMARY_KEYS[table] ?? 'brain_id, id')
859
+ : `INSERT INTO ${table} (${colList}) VALUES (${placeholders})`;
860
+ // Single transaction per table for bulk-insert performance + atomicity.
861
+ await target._adapter.exec('BEGIN');
862
+ try {
863
+ for (const row of rows) {
864
+ const values = columns.map((c) => c === 'brain_id' ? targetBrainId : row[c]);
865
+ await target._adapter.run(stmt, values);
866
+ }
867
+ await target._adapter.exec('COMMIT');
868
+ }
869
+ catch (err) {
870
+ await target._adapter.exec('ROLLBACK');
871
+ throw err;
872
+ }
873
+ }
874
+ /**
875
+ * Close the database connection.
876
+ *
877
+ * Must be called when the agent shuts down to flush the WAL and release
878
+ * the file lock. Failing to close may leave the database in WAL mode with
879
+ * an unconsumed WAL file.
880
+ */
881
+ async close() {
882
+ try {
883
+ await this._adapter.close();
884
+ }
885
+ catch (err) {
886
+ // Adapter close failures (pool drain timeouts, lock-release races on
887
+ // shutdown) shouldn't propagate to callers who are themselves shutting
888
+ // down and can't usefully react. Log to stderr so CI artifacts capture
889
+ // the failure context if it ever indicates a real problem.
890
+ const msg = err instanceof Error ? err.message : String(err);
891
+ process.stderr.write(`[Brain.close] adapter close failed: ${msg}\n`);
892
+ }
893
+ }
894
+ }
895
+ _Brain_brainId = new WeakMap();
896
+ // PORTABLE_TABLES + PORTABLE_TABLE_PRIMARY_KEYS moved to ./portable-tables.ts
897
+ // (single source of truth shared with v1-to-v2 migration + postgres test cleanup).
898
+ //# sourceMappingURL=Brain.js.map