@launchapp-dev/ao-memory-mcp 1.0.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@launchapp-dev/ao-memory-mcp",
3
- "version": "1.0.0",
4
- "description": "MCP server for agent memory management structured recall, cross-project patterns, and lifecycle management for ao-cli agents",
3
+ "version": "2.0.0",
4
+ "description": "Cognitive memory MCP server for AI agentssemantic search, document RAG, knowledge graph, episodic memory, and hybrid retrieval",
5
5
  "type": "module",
6
6
  "bin": {
7
7
  "ao-memory-mcp": "./src/server.ts"
@@ -12,8 +12,10 @@
12
12
  "build": "tsc --noEmit -p tsconfig.json"
13
13
  },
14
14
  "dependencies": {
15
+ "@huggingface/transformers": "^3.0.0",
15
16
  "@modelcontextprotocol/sdk": "^1.0.0",
16
17
  "better-sqlite3": "^11.0.0",
18
+ "sqlite-vec": "^0.1.0",
17
19
  "zod": "^3.22.0"
18
20
  },
19
21
  "devDependencies": {
package/src/db.ts CHANGED
@@ -7,6 +7,8 @@ import { fileURLToPath } from "node:url";
7
7
 
8
8
  const __dirname = dirname(fileURLToPath(import.meta.url));
9
9
 
10
+ let vecLoaded = false;
11
+
10
12
  export function resolveDbPath(cliDbPath?: string): string {
11
13
  if (cliDbPath) return cliDbPath;
12
14
  if (process.env.AO_MEMORY_DB) return process.env.AO_MEMORY_DB;
@@ -23,16 +25,41 @@ export function initDb(dbPath: string): Database.Database {
23
25
  return db;
24
26
  }
25
27
 
26
- export function contentHash(
27
- entryType: string,
28
- agentRole: string,
29
- project: string,
30
- title: string,
31
- body: string
32
- ): string {
33
- return createHash("sha256")
34
- .update(`${entryType}\0${agentRole}\0${project}\0${title}\0${body}`)
35
- .digest("hex");
28
+ export function initVec(db: Database.Database, dimensions: number) {
29
+ if (vecLoaded) return;
30
+ try {
31
+ const sqliteVec = require("sqlite-vec");
32
+ sqliteVec.load(db);
33
+ vecLoaded = true;
34
+ } catch {
35
+ try {
36
+ // Fallback: the package might export differently
37
+ const mod = require("sqlite-vec");
38
+ if (mod.default?.load) mod.default.load(db);
39
+ else if (mod.load) mod.load(db);
40
+ vecLoaded = true;
41
+ } catch (e) {
42
+ console.error("[ao-memory] sqlite-vec not available, vector search disabled");
43
+ return;
44
+ }
45
+ }
46
+
47
+ db.exec(`
48
+ CREATE VIRTUAL TABLE IF NOT EXISTS vec_memories USING vec0(
49
+ embedding float[${dimensions}] distance_metric=cosine
50
+ );
51
+ CREATE VIRTUAL TABLE IF NOT EXISTS vec_chunks USING vec0(
52
+ embedding float[${dimensions}] distance_metric=cosine
53
+ );
54
+ `);
55
+ }
56
+
57
+ export function isVecAvailable(): boolean {
58
+ return vecLoaded;
59
+ }
60
+
61
+ export function contentHash(...parts: string[]): string {
62
+ return createHash("sha256").update(parts.join("\0")).digest("hex");
36
63
  }
37
64
 
38
65
  export function now(): string {
@@ -46,3 +73,34 @@ export function jsonResult(data: unknown) {
46
73
  export function errorResult(message: string) {
47
74
  return { content: [{ type: "text" as const, text: JSON.stringify({ error: message }) }], isError: true };
48
75
  }
76
+
77
+ export function touchAccess(db: Database.Database, id: number) {
78
+ db.prepare("UPDATE memories SET last_accessed_at = ?, access_count = access_count + 1 WHERE id = ?").run(now(), id);
79
+ }
80
+
81
+ export function chunkText(text: string, maxChars: number = 1000, overlap: number = 100): { content: string; offset: number }[] {
82
+ if (text.length <= maxChars) {
83
+ return [{ content: text, offset: 0 }];
84
+ }
85
+
86
+ const chunks: { content: string; offset: number }[] = [];
87
+ let offset = 0;
88
+ while (offset < text.length) {
89
+ let end = Math.min(offset + maxChars, text.length);
90
+
91
+ if (end < text.length) {
92
+ const paraBreak = text.lastIndexOf("\n\n", end);
93
+ if (paraBreak > offset + maxChars * 0.3) end = paraBreak + 2;
94
+ else {
95
+ const sentBreak = text.lastIndexOf(". ", end);
96
+ if (sentBreak > offset + maxChars * 0.3) end = sentBreak + 2;
97
+ }
98
+ }
99
+
100
+ chunks.push({ content: text.slice(offset, end).trim(), offset });
101
+ offset = end - overlap;
102
+ if (offset >= text.length) break;
103
+ }
104
+
105
+ return chunks.filter(c => c.content.length > 0);
106
+ }
@@ -0,0 +1,97 @@
1
+ import type Database from "better-sqlite3";
2
+ import { isVecAvailable } from "./db.ts";
3
+
4
+ let extractor: any = null;
5
+
6
+ const DEFAULT_MODEL = "nomic-ai/nomic-embed-text-v1.5";
7
+ const NOMIC_DIMS = 768;
8
+ const MINILM_DIMS = 384;
9
+
10
+ export function getModelId(): string {
11
+ return process.env.AO_MEMORY_MODEL || DEFAULT_MODEL;
12
+ }
13
+
14
+ function isNomicModel(): boolean {
15
+ return getModelId().includes("nomic");
16
+ }
17
+
18
+ export function getDimensions(): number {
19
+ return isNomicModel() ? NOMIC_DIMS : MINILM_DIMS;
20
+ }
21
+
22
+ async function getExtractor() {
23
+ if (extractor) return extractor;
24
+ const { pipeline } = await import("@huggingface/transformers");
25
+ const model = getModelId();
26
+ console.error(`[ao-memory] Loading embedding model: ${model}`);
27
+ extractor = await pipeline("feature-extraction", model, { dtype: "q8" });
28
+ console.error(`[ao-memory] Model ready (${getDimensions()}d)`);
29
+ return extractor;
30
+ }
31
+
32
+ export async function embed(text: string, isQuery: boolean = false): Promise<Float32Array> {
33
+ const ext = await getExtractor();
34
+ const input = isNomicModel()
35
+ ? (isQuery ? `search_query: ${text}` : `search_document: ${text}`)
36
+ : text;
37
+ const output = await ext(input, { pooling: "mean", normalize: true });
38
+ return new Float32Array(output.data);
39
+ }
40
+
41
+ export function storeVector(db: Database.Database, table: string, rowid: number, embedding: Float32Array) {
42
+ if (!isVecAvailable()) return;
43
+ db.prepare(`INSERT OR REPLACE INTO ${table}(rowid, embedding) VALUES (?, ?)`).run(
44
+ BigInt(rowid), Buffer.from(embedding.buffer)
45
+ );
46
+ }
47
+
48
+ export function deleteVector(db: Database.Database, table: string, rowid: number) {
49
+ if (!isVecAvailable()) return;
50
+ db.prepare(`DELETE FROM ${table} WHERE rowid = ?`).run(BigInt(rowid));
51
+ }
52
+
53
+ export function searchVectors(db: Database.Database, table: string, queryEmbedding: Float32Array, limit: number = 20): { rowid: number; distance: number }[] {
54
+ if (!isVecAvailable()) return [];
55
+ return db.prepare(
56
+ `SELECT rowid, distance FROM ${table} WHERE embedding MATCH ? ORDER BY distance LIMIT ?`
57
+ ).all(Buffer.from(queryEmbedding.buffer), limit) as any[];
58
+ }
59
+
60
+ export function hybridSearch(
61
+ db: Database.Database,
62
+ ftsTable: string,
63
+ vecTable: string,
64
+ queryText: string,
65
+ queryEmbedding: Float32Array,
66
+ limit: number = 10,
67
+ alpha: number = 0.5
68
+ ): { rowid: number; score: number }[] {
69
+ const RRF_K = 60;
70
+ const scores = new Map<number, number>();
71
+
72
+ // FTS5 keyword results
73
+ try {
74
+ const ftsResults = db.prepare(
75
+ `SELECT rowid FROM ${ftsTable} WHERE ${ftsTable} MATCH ? LIMIT 30`
76
+ ).all(queryText) as any[];
77
+
78
+ ftsResults.forEach((r, i) => {
79
+ const id = Number(r.rowid);
80
+ scores.set(id, (scores.get(id) || 0) + (1 - alpha) * (1 / (RRF_K + i + 1)));
81
+ });
82
+ } catch {}
83
+
84
+ // Vector similarity results
85
+ if (isVecAvailable()) {
86
+ const vecResults = searchVectors(db, vecTable, queryEmbedding, 30);
87
+ vecResults.forEach((r, i) => {
88
+ const id = Number(r.rowid);
89
+ scores.set(id, (scores.get(id) || 0) + alpha * (1 / (RRF_K + i + 1)));
90
+ });
91
+ }
92
+
93
+ return [...scores.entries()]
94
+ .sort((a, b) => b[1] - a[1])
95
+ .slice(0, limit)
96
+ .map(([rowid, score]) => ({ rowid, score }));
97
+ }
package/src/schema.sql CHANGED
@@ -1,113 +1,194 @@
1
1
  PRAGMA journal_mode = WAL;
2
2
  PRAGMA foreign_keys = ON;
3
3
 
4
- CREATE TABLE IF NOT EXISTS memory_entries (
4
+ -- ============================================================
5
+ -- MEMORIES — unified store for semantic, episodic, procedural
6
+ -- ============================================================
7
+ CREATE TABLE IF NOT EXISTS memories (
8
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
9
+ memory_type TEXT NOT NULL, -- semantic | episodic | procedural
10
+ scope TEXT NOT NULL DEFAULT 'project', -- global | user | project | session
11
+ namespace TEXT, -- project name, user id, session id, etc.
12
+ agent_role TEXT, -- planner, reviewer, qa-tester, or any custom role
13
+ title TEXT NOT NULL,
14
+ content TEXT NOT NULL,
15
+ -- references
16
+ task_id TEXT,
17
+ pr_number INTEGER,
18
+ run_id TEXT,
19
+ -- lifecycle
20
+ status TEXT NOT NULL DEFAULT 'active', -- active | summarized | archived
21
+ confidence REAL NOT NULL DEFAULT 1.0, -- 0.0-1.0, decays over time
22
+ superseded_by INTEGER REFERENCES memories(id),
23
+ -- temporal
24
+ tags TEXT NOT NULL DEFAULT '[]',
25
+ metadata TEXT NOT NULL DEFAULT '{}',
26
+ created_at TEXT NOT NULL,
27
+ occurred_at TEXT NOT NULL,
28
+ updated_at TEXT NOT NULL,
29
+ last_accessed_at TEXT,
30
+ access_count INTEGER NOT NULL DEFAULT 0,
31
+ content_hash TEXT NOT NULL
32
+ );
33
+
34
+ CREATE INDEX IF NOT EXISTS idx_mem_type ON memories(memory_type);
35
+ CREATE INDEX IF NOT EXISTS idx_mem_scope ON memories(scope, namespace);
36
+ CREATE INDEX IF NOT EXISTS idx_mem_role ON memories(agent_role);
37
+ CREATE INDEX IF NOT EXISTS idx_mem_status ON memories(status);
38
+ CREATE INDEX IF NOT EXISTS idx_mem_task ON memories(task_id);
39
+ CREATE INDEX IF NOT EXISTS idx_mem_occurred ON memories(occurred_at);
40
+ CREATE INDEX IF NOT EXISTS idx_mem_hash ON memories(content_hash);
41
+ CREATE INDEX IF NOT EXISTS idx_mem_ns_type ON memories(namespace, memory_type);
42
+ CREATE INDEX IF NOT EXISTS idx_mem_ns_role ON memories(namespace, agent_role);
43
+ CREATE INDEX IF NOT EXISTS idx_mem_confidence ON memories(confidence);
44
+ CREATE INDEX IF NOT EXISTS idx_mem_accessed ON memories(last_accessed_at);
45
+
46
+ -- FTS5 for memories
47
+ CREATE VIRTUAL TABLE IF NOT EXISTS memories_fts USING fts5(
48
+ title, content,
49
+ content=memories, content_rowid=id
50
+ );
51
+ CREATE TRIGGER IF NOT EXISTS mem_fts_i AFTER INSERT ON memories BEGIN
52
+ INSERT INTO memories_fts(rowid, title, content) VALUES (new.id, new.title, new.content);
53
+ END;
54
+ CREATE TRIGGER IF NOT EXISTS mem_fts_u AFTER UPDATE ON memories BEGIN
55
+ INSERT INTO memories_fts(memories_fts, rowid, title, content) VALUES ('delete', old.id, old.title, old.content);
56
+ INSERT INTO memories_fts(rowid, title, content) VALUES (new.id, new.title, new.content);
57
+ END;
58
+ CREATE TRIGGER IF NOT EXISTS mem_fts_d AFTER DELETE ON memories BEGIN
59
+ INSERT INTO memories_fts(memories_fts, rowid, title, content) VALUES ('delete', old.id, old.title, old.content);
60
+ END;
61
+
62
+ -- ============================================================
63
+ -- DOCUMENTS — source documents ingested for RAG
64
+ -- ============================================================
65
+ CREATE TABLE IF NOT EXISTS documents (
66
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
67
+ namespace TEXT,
68
+ title TEXT NOT NULL,
69
+ source TEXT, -- file path, URL, or identifier
70
+ mime_type TEXT DEFAULT 'text/plain',
71
+ content TEXT NOT NULL, -- full original content
72
+ metadata TEXT NOT NULL DEFAULT '{}',
73
+ created_at TEXT NOT NULL,
74
+ updated_at TEXT NOT NULL
75
+ );
76
+
77
+ CREATE INDEX IF NOT EXISTS idx_doc_ns ON documents(namespace);
78
+
79
+ -- CHUNKS — document chunks with embeddings
80
+ CREATE TABLE IF NOT EXISTS chunks (
5
81
  id INTEGER PRIMARY KEY AUTOINCREMENT,
6
- entry_type TEXT NOT NULL,
7
- agent_role TEXT NOT NULL,
8
- project TEXT NOT NULL,
9
- title TEXT NOT NULL,
10
- body TEXT NOT NULL,
11
- task_id TEXT,
12
- pr_number INTEGER,
13
- run_id TEXT,
14
- status TEXT NOT NULL DEFAULT 'active',
15
- tags TEXT NOT NULL DEFAULT '[]',
82
+ document_id INTEGER NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
83
+ chunk_index INTEGER NOT NULL,
84
+ content TEXT NOT NULL,
85
+ char_offset INTEGER NOT NULL DEFAULT 0,
86
+ char_length INTEGER NOT NULL DEFAULT 0,
16
87
  metadata TEXT NOT NULL DEFAULT '{}',
17
- created_at TEXT NOT NULL,
18
- occurred_at TEXT NOT NULL,
19
- updated_at TEXT NOT NULL,
20
- content_hash TEXT NOT NULL
88
+ created_at TEXT NOT NULL
89
+ );
90
+
91
+ CREATE INDEX IF NOT EXISTS idx_chunk_doc ON chunks(document_id);
92
+
93
+ -- FTS5 for chunks
94
+ CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
95
+ content,
96
+ content=chunks, content_rowid=id
21
97
  );
98
+ CREATE TRIGGER IF NOT EXISTS chunk_fts_i AFTER INSERT ON chunks BEGIN
99
+ INSERT INTO chunks_fts(rowid, content) VALUES (new.id, new.content);
100
+ END;
101
+ CREATE TRIGGER IF NOT EXISTS chunk_fts_u AFTER UPDATE ON chunks BEGIN
102
+ INSERT INTO chunks_fts(chunks_fts, rowid, content) VALUES ('delete', old.id, old.content);
103
+ INSERT INTO chunks_fts(rowid, content) VALUES (new.id, new.content);
104
+ END;
105
+ CREATE TRIGGER IF NOT EXISTS chunk_fts_d AFTER DELETE ON chunks BEGIN
106
+ INSERT INTO chunks_fts(chunks_fts, rowid, content) VALUES ('delete', old.id, old.content);
107
+ END;
22
108
 
23
- CREATE TABLE IF NOT EXISTS memory_summaries (
24
- id INTEGER PRIMARY KEY AUTOINCREMENT,
25
- agent_role TEXT NOT NULL,
26
- project TEXT NOT NULL,
27
- entry_type TEXT,
28
- title TEXT NOT NULL,
29
- body TEXT NOT NULL,
30
- entry_count INTEGER NOT NULL,
31
- date_from TEXT NOT NULL,
32
- date_to TEXT NOT NULL,
33
- entry_ids TEXT NOT NULL,
34
- created_at TEXT NOT NULL
109
+ -- ============================================================
110
+ -- KNOWLEDGE GRAPH — entities and relations
111
+ -- ============================================================
112
+ CREATE TABLE IF NOT EXISTS entities (
113
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
114
+ name TEXT NOT NULL,
115
+ entity_type TEXT NOT NULL, -- project, person, technology, concept, file, etc.
116
+ namespace TEXT,
117
+ description TEXT,
118
+ metadata TEXT NOT NULL DEFAULT '{}',
119
+ created_at TEXT NOT NULL,
120
+ updated_at TEXT NOT NULL,
121
+ UNIQUE(name, entity_type, namespace)
35
122
  );
36
123
 
37
- CREATE TABLE IF NOT EXISTS memory_patterns (
124
+ CREATE INDEX IF NOT EXISTS idx_ent_type ON entities(entity_type);
125
+ CREATE INDEX IF NOT EXISTS idx_ent_ns ON entities(namespace);
126
+
127
+ CREATE TABLE IF NOT EXISTS relations (
38
128
  id INTEGER PRIMARY KEY AUTOINCREMENT,
39
- pattern_type TEXT NOT NULL,
40
- title TEXT NOT NULL,
41
- description TEXT NOT NULL,
42
- projects TEXT NOT NULL DEFAULT '[]',
43
- agent_roles TEXT NOT NULL DEFAULT '[]',
44
- entry_ids TEXT NOT NULL DEFAULT '[]',
45
- occurrence_count INTEGER NOT NULL DEFAULT 1,
46
- status TEXT NOT NULL DEFAULT 'active',
47
- first_seen TEXT NOT NULL,
48
- last_seen TEXT NOT NULL,
49
- resolved_at TEXT,
129
+ source_entity_id INTEGER NOT NULL REFERENCES entities(id) ON DELETE CASCADE,
130
+ relation_type TEXT NOT NULL, -- uses, depends_on, created_by, part_of, related_to, etc.
131
+ target_entity_id INTEGER NOT NULL REFERENCES entities(id) ON DELETE CASCADE,
132
+ weight REAL NOT NULL DEFAULT 1.0,
133
+ memory_id INTEGER REFERENCES memories(id), -- evidence link
134
+ metadata TEXT NOT NULL DEFAULT '{}',
50
135
  created_at TEXT NOT NULL,
51
- updated_at TEXT NOT NULL
136
+ UNIQUE(source_entity_id, relation_type, target_entity_id)
52
137
  );
53
138
 
54
- -- Indexes for memory_entries
55
- CREATE INDEX IF NOT EXISTS idx_me_entry_type ON memory_entries(entry_type);
56
- CREATE INDEX IF NOT EXISTS idx_me_agent_role ON memory_entries(agent_role);
57
- CREATE INDEX IF NOT EXISTS idx_me_project ON memory_entries(project);
58
- CREATE INDEX IF NOT EXISTS idx_me_status ON memory_entries(status);
59
- CREATE INDEX IF NOT EXISTS idx_me_task_id ON memory_entries(task_id);
60
- CREATE INDEX IF NOT EXISTS idx_me_occurred_at ON memory_entries(occurred_at);
61
- CREATE INDEX IF NOT EXISTS idx_me_content_hash ON memory_entries(content_hash);
62
- CREATE INDEX IF NOT EXISTS idx_me_role_project ON memory_entries(agent_role, project);
63
- CREATE INDEX IF NOT EXISTS idx_me_proj_type ON memory_entries(project, entry_type);
64
- CREATE INDEX IF NOT EXISTS idx_me_proj_date ON memory_entries(project, occurred_at);
65
-
66
- -- Indexes for memory_summaries
67
- CREATE INDEX IF NOT EXISTS idx_ms_role_project ON memory_summaries(agent_role, project);
68
-
69
- -- Indexes for memory_patterns
70
- CREATE INDEX IF NOT EXISTS idx_mp_status ON memory_patterns(status);
71
- CREATE INDEX IF NOT EXISTS idx_mp_pattern_type ON memory_patterns(pattern_type);
72
-
73
- -- FTS5 for memory_entries
74
- CREATE VIRTUAL TABLE IF NOT EXISTS memory_fts USING fts5(
75
- title,
76
- body,
77
- content=memory_entries,
78
- content_rowid=id
139
+ CREATE INDEX IF NOT EXISTS idx_rel_source ON relations(source_entity_id);
140
+ CREATE INDEX IF NOT EXISTS idx_rel_target ON relations(target_entity_id);
141
+ CREATE INDEX IF NOT EXISTS idx_rel_type ON relations(relation_type);
142
+
143
+ -- ============================================================
144
+ -- EPISODES conversation/run history
145
+ -- ============================================================
146
+ CREATE TABLE IF NOT EXISTS episodes (
147
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
148
+ session_id TEXT NOT NULL,
149
+ namespace TEXT,
150
+ agent_role TEXT,
151
+ role TEXT NOT NULL, -- user | assistant | system
152
+ content TEXT NOT NULL,
153
+ summary TEXT,
154
+ metadata TEXT NOT NULL DEFAULT '{}',
155
+ created_at TEXT NOT NULL
79
156
  );
80
157
 
81
- CREATE TRIGGER IF NOT EXISTS memory_fts_insert AFTER INSERT ON memory_entries BEGIN
82
- INSERT INTO memory_fts(rowid, title, body) VALUES (new.id, new.title, new.body);
83
- END;
158
+ CREATE INDEX IF NOT EXISTS idx_ep_session ON episodes(session_id);
159
+ CREATE INDEX IF NOT EXISTS idx_ep_ns ON episodes(namespace);
84
160
 
85
- CREATE TRIGGER IF NOT EXISTS memory_fts_update AFTER UPDATE ON memory_entries BEGIN
86
- INSERT INTO memory_fts(memory_fts, rowid, title, body) VALUES ('delete', old.id, old.title, old.body);
87
- INSERT INTO memory_fts(rowid, title, body) VALUES (new.id, new.title, new.body);
161
+ -- FTS5 for episodes
162
+ CREATE VIRTUAL TABLE IF NOT EXISTS episodes_fts USING fts5(
163
+ content, summary,
164
+ content=episodes, content_rowid=id
165
+ );
166
+ CREATE TRIGGER IF NOT EXISTS ep_fts_i AFTER INSERT ON episodes BEGIN
167
+ INSERT INTO episodes_fts(rowid, content, summary) VALUES (new.id, new.content, new.summary);
88
168
  END;
89
-
90
- CREATE TRIGGER IF NOT EXISTS memory_fts_delete AFTER DELETE ON memory_entries BEGIN
91
- INSERT INTO memory_fts(memory_fts, rowid, title, body) VALUES ('delete', old.id, old.title, old.body);
169
+ CREATE TRIGGER IF NOT EXISTS ep_fts_u AFTER UPDATE ON episodes BEGIN
170
+ INSERT INTO episodes_fts(episodes_fts, rowid, content, summary) VALUES ('delete', old.id, old.content, old.summary);
171
+ INSERT INTO episodes_fts(rowid, content, summary) VALUES (new.id, new.content, new.summary);
92
172
  END;
93
-
94
- -- FTS5 for memory_summaries
95
- CREATE VIRTUAL TABLE IF NOT EXISTS memory_summaries_fts USING fts5(
96
- title,
97
- body,
98
- content=memory_summaries,
99
- content_rowid=id
100
- );
101
-
102
- CREATE TRIGGER IF NOT EXISTS ms_fts_insert AFTER INSERT ON memory_summaries BEGIN
103
- INSERT INTO memory_summaries_fts(rowid, title, body) VALUES (new.id, new.title, new.body);
173
+ CREATE TRIGGER IF NOT EXISTS ep_fts_d AFTER DELETE ON episodes BEGIN
174
+ INSERT INTO episodes_fts(episodes_fts, rowid, content, summary) VALUES ('delete', old.id, old.content, old.content);
104
175
  END;
105
176
 
106
- CREATE TRIGGER IF NOT EXISTS ms_fts_update AFTER UPDATE ON memory_summaries BEGIN
107
- INSERT INTO memory_summaries_fts(memory_summaries_fts, rowid, title, body) VALUES ('delete', old.id, old.title, old.body);
108
- INSERT INTO memory_summaries_fts(rowid, title, body) VALUES (new.id, new.title, new.body);
109
- END;
177
+ -- ============================================================
178
+ -- SUMMARIES rolled-up digests
179
+ -- ============================================================
180
+ CREATE TABLE IF NOT EXISTS summaries (
181
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
182
+ scope TEXT NOT NULL,
183
+ namespace TEXT,
184
+ agent_role TEXT,
185
+ title TEXT NOT NULL,
186
+ content TEXT NOT NULL,
187
+ entry_count INTEGER NOT NULL,
188
+ date_from TEXT NOT NULL,
189
+ date_to TEXT NOT NULL,
190
+ entry_ids TEXT NOT NULL, -- JSON array
191
+ created_at TEXT NOT NULL
192
+ );
110
193
 
111
- CREATE TRIGGER IF NOT EXISTS ms_fts_delete AFTER DELETE ON memory_summaries BEGIN
112
- INSERT INTO memory_summaries_fts(memory_summaries_fts, rowid, title, body) VALUES ('delete', old.id, old.title, old.body);
113
- END;
194
+ CREATE INDEX IF NOT EXISTS idx_sum_ns ON summaries(namespace, agent_role);
package/src/server.ts CHANGED
@@ -5,38 +5,45 @@ import {
5
5
  ListToolsRequestSchema,
6
6
  CallToolRequestSchema,
7
7
  } from "@modelcontextprotocol/sdk/types.js";
8
- import { resolveDbPath, initDb, errorResult } from "./db.ts";
8
+ import { resolveDbPath, initDb, initVec, errorResult } from "./db.ts";
9
+ import { getDimensions } from "./embeddings.ts";
9
10
  import { storeTools, handleStore } from "./tools/store.ts";
10
11
  import { recallTools, handleRecall } from "./tools/recall.ts";
11
12
  import { statsTools, handleStats } from "./tools/stats.ts";
12
13
  import { contextTools, handleContext } from "./tools/context.ts";
13
14
  import { summarizeTools, handleSummarize } from "./tools/summarize.ts";
14
- import { patternTools, handlePatterns } from "./tools/patterns.ts";
15
+ import { documentTools, handleDocuments } from "./tools/documents.ts";
16
+ import { knowledgeTools, handleKnowledge } from "./tools/knowledge.ts";
17
+ import { episodeTools, handleEpisodes } from "./tools/episodes.ts";
15
18
 
16
19
  // Parse CLI args
17
20
  const args = process.argv.slice(2);
18
21
  let dbPath: string | undefined;
19
22
  for (let i = 0; i < args.length; i++) {
20
- if (args[i] === "--db" && args[i + 1]) {
21
- dbPath = args[++i];
22
- }
23
+ if (args[i] === "--db" && args[i + 1]) dbPath = args[++i];
23
24
  }
24
25
 
25
26
  const db = initDb(resolveDbPath(dbPath));
27
+ initVec(db, getDimensions());
26
28
 
27
29
  const allTools = [
28
30
  ...storeTools,
29
31
  ...recallTools,
30
- ...statsTools,
32
+ ...documentTools,
33
+ ...knowledgeTools,
34
+ ...episodeTools,
31
35
  ...contextTools,
32
36
  ...summarizeTools,
33
- ...patternTools,
37
+ ...statsTools,
34
38
  ];
35
39
 
36
- const handlers = [handleStore, handleRecall, handleStats, handleContext, handleSummarize, handlePatterns];
40
+ const handlers: Array<(db: any, name: string, args: any) => any> = [
41
+ handleStore, handleRecall, handleDocuments, handleKnowledge,
42
+ handleEpisodes, handleContext, handleSummarize, handleStats,
43
+ ];
37
44
 
38
45
  const server = new Server(
39
- { name: "ao-memory-mcp", version: "1.0.0" },
46
+ { name: "ao-memory-mcp", version: "2.0.0" },
40
47
  { capabilities: { tools: {} } }
41
48
  );
42
49
 
@@ -49,7 +56,11 @@ server.setRequestHandler(CallToolRequestSchema, async (req) => {
49
56
 
50
57
  for (const handler of handlers) {
51
58
  const result = handler(db, name, input || {});
52
- if (result) return result;
59
+ if (result !== null) {
60
+ // Handle async results (embed operations)
61
+ if (result instanceof Promise) return await result;
62
+ return result;
63
+ }
53
64
  }
54
65
 
55
66
  return errorResult(`Unknown tool: ${name}`);