@launchapp-dev/ao-memory-mcp 1.0.0 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/db.d.ts +25 -0
- package/dist/db.js +90 -0
- package/dist/embeddings.d.ts +14 -0
- package/dist/embeddings.js +72 -0
- package/dist/schema.sql +194 -0
- package/dist/server.d.ts +2 -0
- package/dist/server.js +56 -0
- package/dist/tools/context.d.ts +29 -0
- package/dist/tools/context.js +88 -0
- package/dist/tools/documents.d.ts +142 -0
- package/dist/tools/documents.js +201 -0
- package/dist/tools/episodes.d.ts +112 -0
- package/dist/tools/episodes.js +98 -0
- package/dist/tools/knowledge.d.ts +177 -0
- package/dist/tools/knowledge.js +235 -0
- package/dist/tools/recall.d.ts +153 -0
- package/dist/tools/recall.js +180 -0
- package/dist/tools/stats.d.ts +24 -0
- package/dist/tools/stats.js +50 -0
- package/dist/tools/store.d.ts +180 -0
- package/dist/tools/store.js +176 -0
- package/dist/tools/summarize.d.ts +74 -0
- package/dist/tools/summarize.js +92 -0
- package/package.json +12 -6
- package/src/schema.sql +173 -92
- package/migrate.ts +0 -250
- package/src/db.ts +0 -48
- package/src/server.ts +0 -59
- package/src/tools/context.ts +0 -77
- package/src/tools/patterns.ts +0 -165
- package/src/tools/recall.ts +0 -124
- package/src/tools/stats.ts +0 -74
- package/src/tools/store.ts +0 -160
- package/src/tools/summarize.ts +0 -140
- package/tsconfig.json +0 -12
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import { now, jsonResult, errorResult } from "../db.js";
|
|
2
|
+
export const summarizeTools = [
|
|
3
|
+
{
|
|
4
|
+
name: "memory.summarize",
|
|
5
|
+
description: "Create a summary of memory entries. Agent provides the summary text. Server creates the summary record and transitions entries to 'summarized'.",
|
|
6
|
+
inputSchema: {
|
|
7
|
+
type: "object",
|
|
8
|
+
properties: {
|
|
9
|
+
namespace: { type: "string", description: "Namespace to summarize" },
|
|
10
|
+
agent_role: { type: "string", description: "Agent role" },
|
|
11
|
+
summary_title: { type: "string", description: "Summary title" },
|
|
12
|
+
summary_body: { type: "string", description: "Summary content (markdown)" },
|
|
13
|
+
before: { type: "string", description: "Summarize entries before this ISO date" },
|
|
14
|
+
entry_ids: { type: "array", items: { type: "number" }, description: "Specific IDs to summarize" },
|
|
15
|
+
},
|
|
16
|
+
required: ["namespace", "summary_title", "summary_body"],
|
|
17
|
+
},
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
name: "memory.cleanup",
|
|
21
|
+
description: "Identify stale entries needing summarization or archive old summarized entries.",
|
|
22
|
+
inputSchema: {
|
|
23
|
+
type: "object",
|
|
24
|
+
properties: {
|
|
25
|
+
older_than_days: { type: "number", description: "Entries older than N days (default 7)" },
|
|
26
|
+
min_entries: { type: "number", description: "Min entries per scope to trigger (default 10)" },
|
|
27
|
+
dry_run: { type: "boolean", description: "Preview only (default true)" },
|
|
28
|
+
},
|
|
29
|
+
},
|
|
30
|
+
},
|
|
31
|
+
];
|
|
32
|
+
export function handleSummarize(db, name, args) {
|
|
33
|
+
if (name === "memory.summarize")
|
|
34
|
+
return memorySummarize(db, args);
|
|
35
|
+
if (name === "memory.cleanup")
|
|
36
|
+
return memoryCleanup(db, args);
|
|
37
|
+
return null;
|
|
38
|
+
}
|
|
39
|
+
function memorySummarize(db, args) {
|
|
40
|
+
const { namespace, agent_role, summary_title, summary_body } = args;
|
|
41
|
+
const result = db.transaction(() => {
|
|
42
|
+
let entryIds;
|
|
43
|
+
if (args.entry_ids?.length) {
|
|
44
|
+
entryIds = args.entry_ids;
|
|
45
|
+
}
|
|
46
|
+
else {
|
|
47
|
+
const cutoff = args.before || new Date(Date.now() - 3 * 24 * 60 * 60 * 1000).toISOString();
|
|
48
|
+
const conditions = ["namespace = ?", "status = 'active'", "occurred_at < ?"];
|
|
49
|
+
const vals = [namespace, cutoff];
|
|
50
|
+
if (agent_role) {
|
|
51
|
+
conditions.push("agent_role = ?");
|
|
52
|
+
vals.push(agent_role);
|
|
53
|
+
}
|
|
54
|
+
const rows = db.prepare(`SELECT id FROM memories WHERE ${conditions.join(" AND ")}`).all(...vals);
|
|
55
|
+
entryIds = rows.map(r => r.id);
|
|
56
|
+
}
|
|
57
|
+
if (entryIds.length === 0)
|
|
58
|
+
return { error: "No entries to summarize" };
|
|
59
|
+
const range = db.prepare(`SELECT MIN(occurred_at) as date_from, MAX(occurred_at) as date_to FROM memories WHERE id IN (${entryIds.map(() => "?").join(",")})`).get(...entryIds);
|
|
60
|
+
const ts = now();
|
|
61
|
+
const sumResult = db.prepare(`
|
|
62
|
+
INSERT INTO summaries (scope, namespace, agent_role, title, content, entry_count, date_from, date_to, entry_ids, created_at)
|
|
63
|
+
VALUES ('project', ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
64
|
+
`).run(namespace, agent_role || null, summary_title, summary_body, entryIds.length, range.date_from, range.date_to, JSON.stringify(entryIds), ts);
|
|
65
|
+
db.prepare(`UPDATE memories SET status = 'summarized', updated_at = ? WHERE id IN (${entryIds.map(() => "?").join(",")})`).run(ts, ...entryIds);
|
|
66
|
+
return { summary_id: Number(sumResult.lastInsertRowid), entries_summarized: entryIds.length };
|
|
67
|
+
})();
|
|
68
|
+
if (result.error)
|
|
69
|
+
return errorResult(result.error);
|
|
70
|
+
return jsonResult(result);
|
|
71
|
+
}
|
|
72
|
+
function memoryCleanup(db, args) {
|
|
73
|
+
const olderThanDays = args.older_than_days ?? 7;
|
|
74
|
+
const minEntries = args.min_entries ?? 10;
|
|
75
|
+
const dryRun = args.dry_run ?? true;
|
|
76
|
+
const cutoff = new Date(Date.now() - olderThanDays * 24 * 60 * 60 * 1000).toISOString();
|
|
77
|
+
const needsSummarization = db.prepare(`
|
|
78
|
+
SELECT namespace, agent_role, COUNT(*) as entry_count,
|
|
79
|
+
MIN(occurred_at) as date_from, MAX(occurred_at) as date_to
|
|
80
|
+
FROM memories WHERE status = 'active' AND occurred_at < ?
|
|
81
|
+
GROUP BY namespace, agent_role
|
|
82
|
+
HAVING COUNT(*) >= ?
|
|
83
|
+
`).all(cutoff, minEntries);
|
|
84
|
+
const archivalCutoff = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString();
|
|
85
|
+
const needsArchival = db.prepare("SELECT COUNT(*) as c FROM memories WHERE status = 'summarized' AND updated_at < ?").get(archivalCutoff).c;
|
|
86
|
+
let archived = 0;
|
|
87
|
+
if (!dryRun && needsArchival > 0) {
|
|
88
|
+
const ts = now();
|
|
89
|
+
archived = db.prepare("UPDATE memories SET status = 'archived', updated_at = ? WHERE status = 'summarized' AND updated_at < ?").run(ts, archivalCutoff).changes;
|
|
90
|
+
}
|
|
91
|
+
return jsonResult({ needs_summarization: needsSummarization, needs_archival: needsArchival, archived, dry_run: dryRun });
|
|
92
|
+
}
|
package/package.json
CHANGED
|
@@ -1,19 +1,25 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@launchapp-dev/ao-memory-mcp",
|
|
3
|
-
"version": "
|
|
4
|
-
"description": "MCP server for
|
|
3
|
+
"version": "2.0.1",
|
|
4
|
+
"description": "Cognitive memory MCP server for AI agents — semantic search, document RAG, knowledge graph, episodic memory, and hybrid retrieval",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
7
|
-
"ao-memory-mcp": "./
|
|
7
|
+
"ao-memory-mcp": "./dist/server.js"
|
|
8
8
|
},
|
|
9
|
+
"files": [
|
|
10
|
+
"dist",
|
|
11
|
+
"src/schema.sql"
|
|
12
|
+
],
|
|
9
13
|
"scripts": {
|
|
10
|
-
"start": "node
|
|
11
|
-
"
|
|
12
|
-
"
|
|
14
|
+
"start": "node dist/server.js",
|
|
15
|
+
"build": "tsc && cp src/schema.sql dist/schema.sql",
|
|
16
|
+
"prepublishOnly": "npm run build"
|
|
13
17
|
},
|
|
14
18
|
"dependencies": {
|
|
19
|
+
"@huggingface/transformers": "^3.0.0",
|
|
15
20
|
"@modelcontextprotocol/sdk": "^1.0.0",
|
|
16
21
|
"better-sqlite3": "^11.0.0",
|
|
22
|
+
"sqlite-vec": "^0.1.0",
|
|
17
23
|
"zod": "^3.22.0"
|
|
18
24
|
},
|
|
19
25
|
"devDependencies": {
|
package/src/schema.sql
CHANGED
|
@@ -1,113 +1,194 @@
|
|
|
1
1
|
PRAGMA journal_mode = WAL;
|
|
2
2
|
PRAGMA foreign_keys = ON;
|
|
3
3
|
|
|
4
|
-
|
|
4
|
+
-- ============================================================
|
|
5
|
+
-- MEMORIES — unified store for semantic, episodic, procedural
|
|
6
|
+
-- ============================================================
|
|
7
|
+
CREATE TABLE IF NOT EXISTS memories (
|
|
8
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
9
|
+
memory_type TEXT NOT NULL, -- semantic | episodic | procedural
|
|
10
|
+
scope TEXT NOT NULL DEFAULT 'project', -- global | user | project | session
|
|
11
|
+
namespace TEXT, -- project name, user id, session id, etc.
|
|
12
|
+
agent_role TEXT, -- planner, reviewer, qa-tester, or any custom role
|
|
13
|
+
title TEXT NOT NULL,
|
|
14
|
+
content TEXT NOT NULL,
|
|
15
|
+
-- references
|
|
16
|
+
task_id TEXT,
|
|
17
|
+
pr_number INTEGER,
|
|
18
|
+
run_id TEXT,
|
|
19
|
+
-- lifecycle
|
|
20
|
+
status TEXT NOT NULL DEFAULT 'active', -- active | summarized | archived
|
|
21
|
+
confidence REAL NOT NULL DEFAULT 1.0, -- 0.0-1.0, decays over time
|
|
22
|
+
superseded_by INTEGER REFERENCES memories(id),
|
|
23
|
+
-- temporal
|
|
24
|
+
tags TEXT NOT NULL DEFAULT '[]',
|
|
25
|
+
metadata TEXT NOT NULL DEFAULT '{}',
|
|
26
|
+
created_at TEXT NOT NULL,
|
|
27
|
+
occurred_at TEXT NOT NULL,
|
|
28
|
+
updated_at TEXT NOT NULL,
|
|
29
|
+
last_accessed_at TEXT,
|
|
30
|
+
access_count INTEGER NOT NULL DEFAULT 0,
|
|
31
|
+
content_hash TEXT NOT NULL
|
|
32
|
+
);
|
|
33
|
+
|
|
34
|
+
CREATE INDEX IF NOT EXISTS idx_mem_type ON memories(memory_type);
|
|
35
|
+
CREATE INDEX IF NOT EXISTS idx_mem_scope ON memories(scope, namespace);
|
|
36
|
+
CREATE INDEX IF NOT EXISTS idx_mem_role ON memories(agent_role);
|
|
37
|
+
CREATE INDEX IF NOT EXISTS idx_mem_status ON memories(status);
|
|
38
|
+
CREATE INDEX IF NOT EXISTS idx_mem_task ON memories(task_id);
|
|
39
|
+
CREATE INDEX IF NOT EXISTS idx_mem_occurred ON memories(occurred_at);
|
|
40
|
+
CREATE INDEX IF NOT EXISTS idx_mem_hash ON memories(content_hash);
|
|
41
|
+
CREATE INDEX IF NOT EXISTS idx_mem_ns_type ON memories(namespace, memory_type);
|
|
42
|
+
CREATE INDEX IF NOT EXISTS idx_mem_ns_role ON memories(namespace, agent_role);
|
|
43
|
+
CREATE INDEX IF NOT EXISTS idx_mem_confidence ON memories(confidence);
|
|
44
|
+
CREATE INDEX IF NOT EXISTS idx_mem_accessed ON memories(last_accessed_at);
|
|
45
|
+
|
|
46
|
+
-- FTS5 for memories
|
|
47
|
+
CREATE VIRTUAL TABLE IF NOT EXISTS memories_fts USING fts5(
|
|
48
|
+
title, content,
|
|
49
|
+
content=memories, content_rowid=id
|
|
50
|
+
);
|
|
51
|
+
CREATE TRIGGER IF NOT EXISTS mem_fts_i AFTER INSERT ON memories BEGIN
|
|
52
|
+
INSERT INTO memories_fts(rowid, title, content) VALUES (new.id, new.title, new.content);
|
|
53
|
+
END;
|
|
54
|
+
CREATE TRIGGER IF NOT EXISTS mem_fts_u AFTER UPDATE ON memories BEGIN
|
|
55
|
+
INSERT INTO memories_fts(memories_fts, rowid, title, content) VALUES ('delete', old.id, old.title, old.content);
|
|
56
|
+
INSERT INTO memories_fts(rowid, title, content) VALUES (new.id, new.title, new.content);
|
|
57
|
+
END;
|
|
58
|
+
CREATE TRIGGER IF NOT EXISTS mem_fts_d AFTER DELETE ON memories BEGIN
|
|
59
|
+
INSERT INTO memories_fts(memories_fts, rowid, title, content) VALUES ('delete', old.id, old.title, old.content);
|
|
60
|
+
END;
|
|
61
|
+
|
|
62
|
+
-- ============================================================
|
|
63
|
+
-- DOCUMENTS — source documents ingested for RAG
|
|
64
|
+
-- ============================================================
|
|
65
|
+
CREATE TABLE IF NOT EXISTS documents (
|
|
66
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
67
|
+
namespace TEXT,
|
|
68
|
+
title TEXT NOT NULL,
|
|
69
|
+
source TEXT, -- file path, URL, or identifier
|
|
70
|
+
mime_type TEXT DEFAULT 'text/plain',
|
|
71
|
+
content TEXT NOT NULL, -- full original content
|
|
72
|
+
metadata TEXT NOT NULL DEFAULT '{}',
|
|
73
|
+
created_at TEXT NOT NULL,
|
|
74
|
+
updated_at TEXT NOT NULL
|
|
75
|
+
);
|
|
76
|
+
|
|
77
|
+
CREATE INDEX IF NOT EXISTS idx_doc_ns ON documents(namespace);
|
|
78
|
+
|
|
79
|
+
-- CHUNKS — document chunks with embeddings
|
|
80
|
+
CREATE TABLE IF NOT EXISTS chunks (
|
|
5
81
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
task_id TEXT,
|
|
12
|
-
pr_number INTEGER,
|
|
13
|
-
run_id TEXT,
|
|
14
|
-
status TEXT NOT NULL DEFAULT 'active',
|
|
15
|
-
tags TEXT NOT NULL DEFAULT '[]',
|
|
82
|
+
document_id INTEGER NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
|
|
83
|
+
chunk_index INTEGER NOT NULL,
|
|
84
|
+
content TEXT NOT NULL,
|
|
85
|
+
char_offset INTEGER NOT NULL DEFAULT 0,
|
|
86
|
+
char_length INTEGER NOT NULL DEFAULT 0,
|
|
16
87
|
metadata TEXT NOT NULL DEFAULT '{}',
|
|
17
|
-
created_at TEXT NOT NULL
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
88
|
+
created_at TEXT NOT NULL
|
|
89
|
+
);
|
|
90
|
+
|
|
91
|
+
CREATE INDEX IF NOT EXISTS idx_chunk_doc ON chunks(document_id);
|
|
92
|
+
|
|
93
|
+
-- FTS5 for chunks
|
|
94
|
+
CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
|
|
95
|
+
content,
|
|
96
|
+
content=chunks, content_rowid=id
|
|
21
97
|
);
|
|
98
|
+
CREATE TRIGGER IF NOT EXISTS chunk_fts_i AFTER INSERT ON chunks BEGIN
|
|
99
|
+
INSERT INTO chunks_fts(rowid, content) VALUES (new.id, new.content);
|
|
100
|
+
END;
|
|
101
|
+
CREATE TRIGGER IF NOT EXISTS chunk_fts_u AFTER UPDATE ON chunks BEGIN
|
|
102
|
+
INSERT INTO chunks_fts(chunks_fts, rowid, content) VALUES ('delete', old.id, old.content);
|
|
103
|
+
INSERT INTO chunks_fts(rowid, content) VALUES (new.id, new.content);
|
|
104
|
+
END;
|
|
105
|
+
CREATE TRIGGER IF NOT EXISTS chunk_fts_d AFTER DELETE ON chunks BEGIN
|
|
106
|
+
INSERT INTO chunks_fts(chunks_fts, rowid, content) VALUES ('delete', old.id, old.content);
|
|
107
|
+
END;
|
|
22
108
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
109
|
+
-- ============================================================
|
|
110
|
+
-- KNOWLEDGE GRAPH — entities and relations
|
|
111
|
+
-- ============================================================
|
|
112
|
+
CREATE TABLE IF NOT EXISTS entities (
|
|
113
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
114
|
+
name TEXT NOT NULL,
|
|
115
|
+
entity_type TEXT NOT NULL, -- project, person, technology, concept, file, etc.
|
|
116
|
+
namespace TEXT,
|
|
117
|
+
description TEXT,
|
|
118
|
+
metadata TEXT NOT NULL DEFAULT '{}',
|
|
119
|
+
created_at TEXT NOT NULL,
|
|
120
|
+
updated_at TEXT NOT NULL,
|
|
121
|
+
UNIQUE(name, entity_type, namespace)
|
|
35
122
|
);
|
|
36
123
|
|
|
37
|
-
CREATE
|
|
124
|
+
CREATE INDEX IF NOT EXISTS idx_ent_type ON entities(entity_type);
|
|
125
|
+
CREATE INDEX IF NOT EXISTS idx_ent_ns ON entities(namespace);
|
|
126
|
+
|
|
127
|
+
CREATE TABLE IF NOT EXISTS relations (
|
|
38
128
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
occurrence_count INTEGER NOT NULL DEFAULT 1,
|
|
46
|
-
status TEXT NOT NULL DEFAULT 'active',
|
|
47
|
-
first_seen TEXT NOT NULL,
|
|
48
|
-
last_seen TEXT NOT NULL,
|
|
49
|
-
resolved_at TEXT,
|
|
129
|
+
source_entity_id INTEGER NOT NULL REFERENCES entities(id) ON DELETE CASCADE,
|
|
130
|
+
relation_type TEXT NOT NULL, -- uses, depends_on, created_by, part_of, related_to, etc.
|
|
131
|
+
target_entity_id INTEGER NOT NULL REFERENCES entities(id) ON DELETE CASCADE,
|
|
132
|
+
weight REAL NOT NULL DEFAULT 1.0,
|
|
133
|
+
memory_id INTEGER REFERENCES memories(id), -- evidence link
|
|
134
|
+
metadata TEXT NOT NULL DEFAULT '{}',
|
|
50
135
|
created_at TEXT NOT NULL,
|
|
51
|
-
|
|
136
|
+
UNIQUE(source_entity_id, relation_type, target_entity_id)
|
|
52
137
|
);
|
|
53
138
|
|
|
54
|
-
|
|
55
|
-
CREATE INDEX IF NOT EXISTS
|
|
56
|
-
CREATE INDEX IF NOT EXISTS
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
CREATE
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
--
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
CREATE INDEX IF NOT EXISTS idx_mp_pattern_type ON memory_patterns(pattern_type);
|
|
72
|
-
|
|
73
|
-
-- FTS5 for memory_entries
|
|
74
|
-
CREATE VIRTUAL TABLE IF NOT EXISTS memory_fts USING fts5(
|
|
75
|
-
title,
|
|
76
|
-
body,
|
|
77
|
-
content=memory_entries,
|
|
78
|
-
content_rowid=id
|
|
139
|
+
CREATE INDEX IF NOT EXISTS idx_rel_source ON relations(source_entity_id);
|
|
140
|
+
CREATE INDEX IF NOT EXISTS idx_rel_target ON relations(target_entity_id);
|
|
141
|
+
CREATE INDEX IF NOT EXISTS idx_rel_type ON relations(relation_type);
|
|
142
|
+
|
|
143
|
+
-- ============================================================
|
|
144
|
+
-- EPISODES — conversation/run history
|
|
145
|
+
-- ============================================================
|
|
146
|
+
CREATE TABLE IF NOT EXISTS episodes (
|
|
147
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
148
|
+
session_id TEXT NOT NULL,
|
|
149
|
+
namespace TEXT,
|
|
150
|
+
agent_role TEXT,
|
|
151
|
+
role TEXT NOT NULL, -- user | assistant | system
|
|
152
|
+
content TEXT NOT NULL,
|
|
153
|
+
summary TEXT,
|
|
154
|
+
metadata TEXT NOT NULL DEFAULT '{}',
|
|
155
|
+
created_at TEXT NOT NULL
|
|
79
156
|
);
|
|
80
157
|
|
|
81
|
-
CREATE
|
|
82
|
-
|
|
83
|
-
END;
|
|
158
|
+
CREATE INDEX IF NOT EXISTS idx_ep_session ON episodes(session_id);
|
|
159
|
+
CREATE INDEX IF NOT EXISTS idx_ep_ns ON episodes(namespace);
|
|
84
160
|
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
161
|
+
-- FTS5 for episodes
|
|
162
|
+
CREATE VIRTUAL TABLE IF NOT EXISTS episodes_fts USING fts5(
|
|
163
|
+
content, summary,
|
|
164
|
+
content=episodes, content_rowid=id
|
|
165
|
+
);
|
|
166
|
+
CREATE TRIGGER IF NOT EXISTS ep_fts_i AFTER INSERT ON episodes BEGIN
|
|
167
|
+
INSERT INTO episodes_fts(rowid, content, summary) VALUES (new.id, new.content, new.summary);
|
|
88
168
|
END;
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
INSERT INTO
|
|
169
|
+
CREATE TRIGGER IF NOT EXISTS ep_fts_u AFTER UPDATE ON episodes BEGIN
|
|
170
|
+
INSERT INTO episodes_fts(episodes_fts, rowid, content, summary) VALUES ('delete', old.id, old.content, old.summary);
|
|
171
|
+
INSERT INTO episodes_fts(rowid, content, summary) VALUES (new.id, new.content, new.summary);
|
|
92
172
|
END;
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
CREATE VIRTUAL TABLE IF NOT EXISTS memory_summaries_fts USING fts5(
|
|
96
|
-
title,
|
|
97
|
-
body,
|
|
98
|
-
content=memory_summaries,
|
|
99
|
-
content_rowid=id
|
|
100
|
-
);
|
|
101
|
-
|
|
102
|
-
CREATE TRIGGER IF NOT EXISTS ms_fts_insert AFTER INSERT ON memory_summaries BEGIN
|
|
103
|
-
INSERT INTO memory_summaries_fts(rowid, title, body) VALUES (new.id, new.title, new.body);
|
|
173
|
+
CREATE TRIGGER IF NOT EXISTS ep_fts_d AFTER DELETE ON episodes BEGIN
|
|
174
|
+
INSERT INTO episodes_fts(episodes_fts, rowid, content, summary) VALUES ('delete', old.id, old.content, old.content);
|
|
104
175
|
END;
|
|
105
176
|
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
177
|
+
-- ============================================================
|
|
178
|
+
-- SUMMARIES — rolled-up digests
|
|
179
|
+
-- ============================================================
|
|
180
|
+
CREATE TABLE IF NOT EXISTS summaries (
|
|
181
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
182
|
+
scope TEXT NOT NULL,
|
|
183
|
+
namespace TEXT,
|
|
184
|
+
agent_role TEXT,
|
|
185
|
+
title TEXT NOT NULL,
|
|
186
|
+
content TEXT NOT NULL,
|
|
187
|
+
entry_count INTEGER NOT NULL,
|
|
188
|
+
date_from TEXT NOT NULL,
|
|
189
|
+
date_to TEXT NOT NULL,
|
|
190
|
+
entry_ids TEXT NOT NULL, -- JSON array
|
|
191
|
+
created_at TEXT NOT NULL
|
|
192
|
+
);
|
|
110
193
|
|
|
111
|
-
CREATE
|
|
112
|
-
INSERT INTO memory_summaries_fts(memory_summaries_fts, rowid, title, body) VALUES ('delete', old.id, old.title, old.body);
|
|
113
|
-
END;
|
|
194
|
+
CREATE INDEX IF NOT EXISTS idx_sum_ns ON summaries(namespace, agent_role);
|
package/migrate.ts
DELETED
|
@@ -1,250 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node --experimental-strip-types
|
|
2
|
-
/**
|
|
3
|
-
* Migration utility: imports existing .ao/memory/*.md files into the memory database.
|
|
4
|
-
*
|
|
5
|
-
* Usage:
|
|
6
|
-
* node --experimental-strip-types migrate.ts [--repos-dir <path>] [--db <path>]
|
|
7
|
-
*
|
|
8
|
-
* Defaults:
|
|
9
|
-
* --repos-dir: scans current directory and subdirectories for .ao/memory/
|
|
10
|
-
* --db: ~/.ao/memory.db
|
|
11
|
-
*/
|
|
12
|
-
import { readdirSync, readFileSync, existsSync, statSync } from "node:fs";
|
|
13
|
-
import { join, basename, dirname } from "node:path";
|
|
14
|
-
import { resolveDbPath, initDb, contentHash, now } from "./src/db.ts";
|
|
15
|
-
|
|
16
|
-
// Parse args
|
|
17
|
-
const argv = process.argv.slice(2);
|
|
18
|
-
let reposDir = ".";
|
|
19
|
-
let dbPath: string | undefined;
|
|
20
|
-
for (let i = 0; i < argv.length; i++) {
|
|
21
|
-
if (argv[i] === "--repos-dir" && argv[i + 1]) reposDir = argv[++i];
|
|
22
|
-
if (argv[i] === "--db" && argv[i + 1]) dbPath = argv[++i];
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
const db = initDb(resolveDbPath(dbPath));
|
|
26
|
-
|
|
27
|
-
const roleToFile: Record<string, string> = {
|
|
28
|
-
"planner.md": "planner",
|
|
29
|
-
"product-owner.md": "product-owner",
|
|
30
|
-
"reconciler.md": "reconciler",
|
|
31
|
-
"reviewer.md": "reviewer",
|
|
32
|
-
"qa-tester.md": "qa-tester",
|
|
33
|
-
};
|
|
34
|
-
|
|
35
|
-
const sectionToEntryType: Record<string, string> = {
|
|
36
|
-
"tasks enqueued": "task_dispatch",
|
|
37
|
-
"recently enqueued": "task_dispatch",
|
|
38
|
-
"rework dispatched": "task_dispatch",
|
|
39
|
-
"rebase dispatched": "task_dispatch",
|
|
40
|
-
"tasks skipped": "observation",
|
|
41
|
-
"capacity notes": "observation",
|
|
42
|
-
"queue status": "observation",
|
|
43
|
-
"pipeline health": "observation",
|
|
44
|
-
"decisions": "decision",
|
|
45
|
-
"tasks created": "decision",
|
|
46
|
-
"features assessed": "observation",
|
|
47
|
-
"gaps identified": "observation",
|
|
48
|
-
"tasks unblocked": "action",
|
|
49
|
-
"tasks marked done": "action",
|
|
50
|
-
"queue cleaned": "action",
|
|
51
|
-
"actions log": "action",
|
|
52
|
-
"prs merged": "review",
|
|
53
|
-
"prs with changes requested": "review",
|
|
54
|
-
"prs closed": "review",
|
|
55
|
-
"known patterns": "pattern",
|
|
56
|
-
"log": "test_result",
|
|
57
|
-
"test results": "test_result",
|
|
58
|
-
"bugs filed": "test_result",
|
|
59
|
-
"regressions": "test_result",
|
|
60
|
-
};
|
|
61
|
-
|
|
62
|
-
function guessEntryType(sectionHeader: string, agentRole: string): string {
|
|
63
|
-
const lower = sectionHeader.toLowerCase();
|
|
64
|
-
for (const [key, type] of Object.entries(sectionToEntryType)) {
|
|
65
|
-
if (lower.includes(key)) return type;
|
|
66
|
-
}
|
|
67
|
-
// Fallback by role
|
|
68
|
-
if (agentRole === "planner") return "task_dispatch";
|
|
69
|
-
if (agentRole === "product-owner") return "decision";
|
|
70
|
-
if (agentRole === "reconciler") return "action";
|
|
71
|
-
if (agentRole === "reviewer") return "review";
|
|
72
|
-
if (agentRole === "qa-tester") return "test_result";
|
|
73
|
-
return "observation";
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
interface ParsedEntry {
|
|
77
|
-
date: string;
|
|
78
|
-
title: string;
|
|
79
|
-
body: string;
|
|
80
|
-
entryType: string;
|
|
81
|
-
taskId?: string;
|
|
82
|
-
prNumber?: number;
|
|
83
|
-
runId?: string;
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
function parseMemoryFile(content: string, agentRole: string): ParsedEntry[] {
|
|
87
|
-
const entries: ParsedEntry[] = [];
|
|
88
|
-
const lines = content.split("\n");
|
|
89
|
-
let currentSection = "";
|
|
90
|
-
let currentDate = "";
|
|
91
|
-
let currentBlock: string[] = [];
|
|
92
|
-
|
|
93
|
-
function flushBlock() {
|
|
94
|
-
if (currentBlock.length === 0 || !currentDate) return;
|
|
95
|
-
const body = currentBlock.join("\n").trim();
|
|
96
|
-
if (!body) return;
|
|
97
|
-
|
|
98
|
-
const entryType = guessEntryType(currentSection, agentRole);
|
|
99
|
-
const firstLine = currentBlock.find(l => l.trim())?.trim() || "";
|
|
100
|
-
const title = firstLine.length > 120 ? firstLine.slice(0, 117) + "..." : firstLine;
|
|
101
|
-
|
|
102
|
-
// Extract task IDs
|
|
103
|
-
const taskMatch = body.match(/TASK-\d+/);
|
|
104
|
-
const prMatch = body.match(/(?:PR\s*#|#)(\d+)/);
|
|
105
|
-
const runMatch = body.match(/run\s+(\d+)/i);
|
|
106
|
-
|
|
107
|
-
entries.push({
|
|
108
|
-
date: currentDate,
|
|
109
|
-
title: title || `${agentRole} ${entryType} ${currentDate}`,
|
|
110
|
-
body,
|
|
111
|
-
entryType,
|
|
112
|
-
taskId: taskMatch?.[0],
|
|
113
|
-
prNumber: prMatch ? parseInt(prMatch[1]) : undefined,
|
|
114
|
-
runId: runMatch ? `run ${runMatch[1]}` : undefined,
|
|
115
|
-
});
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
for (const line of lines) {
|
|
119
|
-
// Section headers
|
|
120
|
-
const sectionMatch = line.match(/^##\s+(.+)/);
|
|
121
|
-
if (sectionMatch) {
|
|
122
|
-
flushBlock();
|
|
123
|
-
currentBlock = [];
|
|
124
|
-
currentSection = sectionMatch[1];
|
|
125
|
-
|
|
126
|
-
// Check if section header contains a date
|
|
127
|
-
const dateInHeader = currentSection.match(/(\d{4}-\d{2}-\d{2})/);
|
|
128
|
-
if (dateInHeader) currentDate = dateInHeader[1];
|
|
129
|
-
continue;
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
// Date patterns
|
|
133
|
-
const dateMatch = line.match(/\[(\d{4}-\d{2}-\d{2})\]/);
|
|
134
|
-
if (dateMatch) {
|
|
135
|
-
flushBlock();
|
|
136
|
-
currentBlock = [];
|
|
137
|
-
currentDate = dateMatch[1];
|
|
138
|
-
currentBlock.push(line);
|
|
139
|
-
continue;
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
// Separator — flush
|
|
143
|
-
if (line.match(/^---\s*$/)) {
|
|
144
|
-
flushBlock();
|
|
145
|
-
currentBlock = [];
|
|
146
|
-
continue;
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
currentBlock.push(line);
|
|
150
|
-
}
|
|
151
|
-
flushBlock();
|
|
152
|
-
|
|
153
|
-
return entries;
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
function findMemoryDirs(rootDir: string): { project: string; memoryDir: string }[] {
|
|
157
|
-
const results: { project: string; memoryDir: string }[] = [];
|
|
158
|
-
|
|
159
|
-
// Check if rootDir itself has .ao/memory
|
|
160
|
-
const directMemory = join(rootDir, ".ao", "memory");
|
|
161
|
-
if (existsSync(directMemory) && statSync(directMemory).isDirectory()) {
|
|
162
|
-
results.push({ project: basename(rootDir), memoryDir: directMemory });
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
// Scan subdirectories
|
|
166
|
-
try {
|
|
167
|
-
for (const entry of readdirSync(rootDir, { withFileTypes: true })) {
|
|
168
|
-
if (!entry.isDirectory() || entry.name.startsWith(".")) continue;
|
|
169
|
-
const memDir = join(rootDir, entry.name, ".ao", "memory");
|
|
170
|
-
if (existsSync(memDir) && statSync(memDir).isDirectory()) {
|
|
171
|
-
results.push({ project: entry.name, memoryDir: memDir });
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
} catch {}
|
|
175
|
-
|
|
176
|
-
return results;
|
|
177
|
-
}
|
|
178
|
-
|
|
179
|
-
// Main
|
|
180
|
-
const ts = now();
|
|
181
|
-
const memoryDirs = findMemoryDirs(reposDir);
|
|
182
|
-
const summary: Record<string, Record<string, number>> = {};
|
|
183
|
-
let totalImported = 0;
|
|
184
|
-
let totalSkipped = 0;
|
|
185
|
-
|
|
186
|
-
const insert = db.prepare(`
|
|
187
|
-
INSERT INTO memory_entries (entry_type, agent_role, project, title, body, task_id, pr_number, run_id, status, tags, metadata, created_at, occurred_at, updated_at, content_hash)
|
|
188
|
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, 'active', '[]', '{}', ?, ?, ?, ?)
|
|
189
|
-
`);
|
|
190
|
-
|
|
191
|
-
const checkHash = db.prepare("SELECT id FROM memory_entries WHERE content_hash = ?");
|
|
192
|
-
|
|
193
|
-
const importAll = db.transaction(() => {
|
|
194
|
-
for (const { project, memoryDir } of memoryDirs) {
|
|
195
|
-
summary[project] = {};
|
|
196
|
-
try {
|
|
197
|
-
for (const file of readdirSync(memoryDir)) {
|
|
198
|
-
const agentRole = roleToFile[file];
|
|
199
|
-
if (!agentRole) continue;
|
|
200
|
-
|
|
201
|
-
const content = readFileSync(join(memoryDir, file), "utf-8");
|
|
202
|
-
const entries = parseMemoryFile(content, agentRole);
|
|
203
|
-
|
|
204
|
-
let count = 0;
|
|
205
|
-
for (const entry of entries) {
|
|
206
|
-
const hash = contentHash(entry.entryType, agentRole, project, entry.title, entry.body);
|
|
207
|
-
if (checkHash.get(hash)) {
|
|
208
|
-
totalSkipped++;
|
|
209
|
-
continue;
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
insert.run(
|
|
213
|
-
entry.entryType, agentRole, project,
|
|
214
|
-
entry.title, entry.body,
|
|
215
|
-
entry.taskId || null, entry.prNumber || null, entry.runId || null,
|
|
216
|
-
ts, entry.date + "T00:00:00.000Z", ts, hash
|
|
217
|
-
);
|
|
218
|
-
count++;
|
|
219
|
-
totalImported++;
|
|
220
|
-
}
|
|
221
|
-
summary[project][agentRole] = count;
|
|
222
|
-
}
|
|
223
|
-
} catch (err) {
|
|
224
|
-
console.error(`Error processing ${project}: ${err}`);
|
|
225
|
-
}
|
|
226
|
-
}
|
|
227
|
-
});
|
|
228
|
-
|
|
229
|
-
importAll();
|
|
230
|
-
|
|
231
|
-
// Print results
|
|
232
|
-
console.log("\n=== Migration Complete ===\n");
|
|
233
|
-
console.log(`Scanned: ${memoryDirs.length} projects with .ao/memory/`);
|
|
234
|
-
console.log(`Imported: ${totalImported} entries`);
|
|
235
|
-
console.log(`Skipped (duplicates): ${totalSkipped}\n`);
|
|
236
|
-
|
|
237
|
-
const roles = [...new Set(Object.values(summary).flatMap(s => Object.keys(s)))].sort();
|
|
238
|
-
const header = ["Project", ...roles, "Total"].map(h => h.padEnd(16)).join(" | ");
|
|
239
|
-
console.log(header);
|
|
240
|
-
console.log("-".repeat(header.length));
|
|
241
|
-
|
|
242
|
-
for (const [project, counts] of Object.entries(summary).sort()) {
|
|
243
|
-
const total = Object.values(counts).reduce((a, b) => a + b, 0);
|
|
244
|
-
const row = [project, ...roles.map(r => String(counts[r] || 0)), String(total)]
|
|
245
|
-
.map(v => v.padEnd(16))
|
|
246
|
-
.join(" | ");
|
|
247
|
-
console.log(row);
|
|
248
|
-
}
|
|
249
|
-
|
|
250
|
-
console.log(`\nDatabase: ${resolveDbPath(dbPath)}`);
|