@coreidentitylabs/open-graph-memory-mcp 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/.agents/skills/mcp-builder/LICENSE.txt +202 -0
  2. package/.agents/skills/mcp-builder/SKILL.md +236 -0
  3. package/.agents/skills/mcp-builder/reference/evaluation.md +602 -0
  4. package/.agents/skills/mcp-builder/reference/mcp_best_practices.md +249 -0
  5. package/.agents/skills/mcp-builder/reference/node_mcp_server.md +970 -0
  6. package/.agents/skills/mcp-builder/reference/python_mcp_server.md +719 -0
  7. package/.agents/skills/mcp-builder/scripts/connections.py +151 -0
  8. package/.agents/skills/mcp-builder/scripts/evaluation.py +373 -0
  9. package/.agents/skills/mcp-builder/scripts/example_evaluation.xml +22 -0
  10. package/.agents/skills/mcp-builder/scripts/requirements.txt +2 -0
  11. package/.env.example +26 -0
  12. package/Implementation Plan.md +358 -0
  13. package/README.md +187 -0
  14. package/dist/constants.d.ts +34 -0
  15. package/dist/constants.d.ts.map +1 -0
  16. package/dist/constants.js +40 -0
  17. package/dist/constants.js.map +1 -0
  18. package/dist/encoding/embedder.d.ts +12 -0
  19. package/dist/encoding/embedder.d.ts.map +1 -0
  20. package/dist/encoding/embedder.js +85 -0
  21. package/dist/encoding/embedder.js.map +1 -0
  22. package/dist/encoding/pipeline.d.ts +28 -0
  23. package/dist/encoding/pipeline.d.ts.map +1 -0
  24. package/dist/encoding/pipeline.js +146 -0
  25. package/dist/encoding/pipeline.js.map +1 -0
  26. package/dist/evolution/consolidator.d.ts +12 -0
  27. package/dist/evolution/consolidator.d.ts.map +1 -0
  28. package/dist/evolution/consolidator.js +212 -0
  29. package/dist/evolution/consolidator.js.map +1 -0
  30. package/dist/index.d.ts +3 -0
  31. package/dist/index.d.ts.map +1 -0
  32. package/dist/index.js +53 -0
  33. package/dist/index.js.map +1 -0
  34. package/dist/llm/openai-provider.d.ts +23 -0
  35. package/dist/llm/openai-provider.d.ts.map +1 -0
  36. package/dist/llm/openai-provider.js +141 -0
  37. package/dist/llm/openai-provider.js.map +1 -0
  38. package/dist/llm/prompts.d.ts +10 -0
  39. package/dist/llm/prompts.d.ts.map +1 -0
  40. package/dist/llm/prompts.js +63 -0
  41. package/dist/llm/prompts.js.map +1 -0
  42. package/dist/llm/provider.d.ts +7 -0
  43. package/dist/llm/provider.d.ts.map +1 -0
  44. package/dist/llm/provider.js +25 -0
  45. package/dist/llm/provider.js.map +1 -0
  46. package/dist/resources/context-resource.d.ts +8 -0
  47. package/dist/resources/context-resource.d.ts.map +1 -0
  48. package/dist/resources/context-resource.js +51 -0
  49. package/dist/resources/context-resource.js.map +1 -0
  50. package/dist/retrieval/search.d.ts +24 -0
  51. package/dist/retrieval/search.d.ts.map +1 -0
  52. package/dist/retrieval/search.js +143 -0
  53. package/dist/retrieval/search.js.map +1 -0
  54. package/dist/storage/factory.d.ts +10 -0
  55. package/dist/storage/factory.d.ts.map +1 -0
  56. package/dist/storage/factory.js +35 -0
  57. package/dist/storage/factory.js.map +1 -0
  58. package/dist/storage/json-store.d.ts +34 -0
  59. package/dist/storage/json-store.d.ts.map +1 -0
  60. package/dist/storage/json-store.js +248 -0
  61. package/dist/storage/json-store.js.map +1 -0
  62. package/dist/storage/neo4j-store.d.ts +31 -0
  63. package/dist/storage/neo4j-store.d.ts.map +1 -0
  64. package/dist/storage/neo4j-store.js +440 -0
  65. package/dist/storage/neo4j-store.js.map +1 -0
  66. package/dist/tools/memory-tools.d.ts +4 -0
  67. package/dist/tools/memory-tools.d.ts.map +1 -0
  68. package/dist/tools/memory-tools.js +873 -0
  69. package/dist/tools/memory-tools.js.map +1 -0
  70. package/dist/types.d.ts +129 -0
  71. package/dist/types.d.ts.map +1 -0
  72. package/dist/types.js +5 -0
  73. package/dist/types.js.map +1 -0
  74. package/implementation_plan.md.resolved.md +322 -0
  75. package/package.json +43 -0
  76. package/src/constants.ts +52 -0
  77. package/src/encoding/embedder.ts +93 -0
  78. package/src/encoding/pipeline.ts +197 -0
  79. package/src/evolution/consolidator.ts +281 -0
  80. package/src/index.ts +67 -0
  81. package/src/llm/openai-provider.ts +208 -0
  82. package/src/llm/prompts.ts +66 -0
  83. package/src/llm/provider.ts +37 -0
  84. package/src/resources/context-resource.ts +74 -0
  85. package/src/retrieval/search.ts +203 -0
  86. package/src/storage/factory.ts +48 -0
  87. package/src/storage/json-store.ts +325 -0
  88. package/src/storage/neo4j-store.ts +564 -0
  89. package/src/tools/memory-tools.ts +1067 -0
  90. package/src/types.ts +207 -0
  91. package/tsconfig.json +21 -0
@@ -0,0 +1,93 @@
1
+ // =============================================================================
2
+ // Embedding Service — Lightweight text-to-vector without external dependencies
3
+ // =============================================================================
4
+ // Uses character n-gram hashing for offline, zero-dependency embeddings.
5
+ // If an LLM provider is configured, it can be used for higher-quality embeddings.
6
+ // =============================================================================
7
+
8
+ const EMBEDDING_DIM = 256;
9
+
10
+ /**
11
+ * Generate a lightweight embedding from text using character n-gram hashing.
12
+ * This is a simple, deterministic approach that works offline without any API.
13
+ * Quality is lower than neural embeddings but sufficient for basic similarity.
14
+ */
15
+ export function generateLocalEmbedding(text: string): number[] {
16
+ const normalized = text.toLowerCase().trim();
17
+ const vector = new Float64Array(EMBEDDING_DIM).fill(0);
18
+
19
+ // Character trigram hashing
20
+ for (let i = 0; i < normalized.length - 2; i++) {
21
+ const trigram = normalized.substring(i, i + 3);
22
+ const hash = hashString(trigram);
23
+ const index = Math.abs(hash) % EMBEDDING_DIM;
24
+ // Random-ish sign based on hash
25
+ const sign = hash > 0 ? 1 : -1;
26
+ vector[index] += sign;
27
+ }
28
+
29
+ // Word-level hashing for broader semantic signals
30
+ const words = normalized.split(/\s+/).filter((w) => w.length > 0);
31
+ for (const word of words) {
32
+ const hash = hashString(word);
33
+ const index = Math.abs(hash) % EMBEDDING_DIM;
34
+ vector[index] += hash > 0 ? 0.5 : -0.5;
35
+ }
36
+
37
+ // Word bigram hashing for phrase-level patterns
38
+ for (let i = 0; i < words.length - 1; i++) {
39
+ const bigram = `${words[i]} ${words[i + 1]}`;
40
+ const hash = hashString(bigram);
41
+ const index = Math.abs(hash) % EMBEDDING_DIM;
42
+ vector[index] += hash > 0 ? 0.3 : -0.3;
43
+ }
44
+
45
+ // L2 normalize
46
+ return l2Normalize(Array.from(vector));
47
+ }
48
+
49
+ /**
50
+ * Compute cosine similarity between two vectors.
51
+ * Returns a value between -1 and 1 (1 = identical, 0 = orthogonal).
52
+ */
53
+ export function cosineSimilarity(a: number[], b: number[]): number {
54
+ if (a.length !== b.length || a.length === 0) return 0;
55
+
56
+ let dotProduct = 0;
57
+ let normA = 0;
58
+ let normB = 0;
59
+
60
+ for (let i = 0; i < a.length; i++) {
61
+ dotProduct += a[i] * b[i];
62
+ normA += a[i] * a[i];
63
+ normB += b[i] * b[i];
64
+ }
65
+
66
+ const denominator = Math.sqrt(normA) * Math.sqrt(normB);
67
+ if (denominator === 0) return 0;
68
+
69
+ return dotProduct / denominator;
70
+ }
71
+
72
+ /**
73
+ * L2 normalize a vector (unit length).
74
+ */
75
+ function l2Normalize(vector: number[]): number[] {
76
+ let norm = 0;
77
+ for (const v of vector) norm += v * v;
78
+ norm = Math.sqrt(norm);
79
+ if (norm === 0) return vector;
80
+ return vector.map((v) => v / norm);
81
+ }
82
+
83
+ /**
84
+ * Simple deterministic string hash (djb2 variant).
85
+ */
86
+ function hashString(str: string): number {
87
+ let hash = 5381;
88
+ for (let i = 0; i < str.length; i++) {
89
+ // hash * 33 + char
90
+ hash = ((hash << 5) + hash + str.charCodeAt(i)) | 0;
91
+ }
92
+ return hash;
93
+ }
@@ -0,0 +1,197 @@
1
+ // =============================================================================
2
+ // Encoding Pipeline — Server-Side Entity Extraction + Storage
3
+ // =============================================================================
4
+ // Orchestrates: LLM extraction → entity resolution → embedding → storage
5
+ // Only available when LLM_API_KEY is configured.
6
+ // =============================================================================
7
+
8
+ import { v4 as uuidv4 } from "uuid";
9
+ import type {
10
+ StorageBackend,
11
+ LLMProvider,
12
+ MemoryNode,
13
+ MemoryEdge,
14
+ ExtractionResult,
15
+ NodeType,
16
+ } from "../types.js";
17
+ import { generateLocalEmbedding } from "./embedder.js";
18
+
19
+ export interface EncodingResult {
20
+ entitiesCreated: number;
21
+ entitiesUpdated: number;
22
+ relationsCreated: number;
23
+ details: {
24
+ entities: { id: string; name: string; status: string }[];
25
+ relations: {
26
+ source: string;
27
+ target: string;
28
+ relation: string;
29
+ status: string;
30
+ }[];
31
+ };
32
+ }
33
+
34
+ /**
35
+ * Full server-side encoding pipeline:
36
+ * 1. Extract entities + relations from text using LLM
37
+ * 2. Resolve entities against existing graph (avoid duplicates)
38
+ * 3. Generate embeddings (LLM embeddings if available, local fallback)
39
+ * 4. Store nodes and edges
40
+ */
41
+ export async function encodeText(
42
+ text: string,
43
+ store: StorageBackend,
44
+ llm: LLMProvider,
45
+ ): Promise<EncodingResult> {
46
+ const result: EncodingResult = {
47
+ entitiesCreated: 0,
48
+ entitiesUpdated: 0,
49
+ relationsCreated: 0,
50
+ details: { entities: [], relations: [] },
51
+ };
52
+
53
+ // Step 1: Get existing entity names for resolution
54
+ const { nodes: existingNodes } = await store.getAllNodes(10000, 0);
55
+ const existingNames = existingNodes.map((n) => n.name);
56
+
57
+ // Step 2: Extract entities and relations via LLM
58
+ let extraction: ExtractionResult;
59
+ try {
60
+ extraction = await llm.extractEntitiesAndRelations(text, existingNames);
61
+ } catch (error) {
62
+ console.error("[open-memory] LLM extraction failed:", error);
63
+ throw new Error(
64
+ `LLM extraction failed: ${error instanceof Error ? error.message : String(error)}`,
65
+ );
66
+ }
67
+
68
+ if (extraction.entities.length === 0 && extraction.relations.length === 0) {
69
+ return result;
70
+ }
71
+
72
+ // Step 3: Store entities (with resolution)
73
+ const entityNameToId = new Map<string, string>();
74
+
75
+ // Pre-populate with existing entities
76
+ for (const node of existingNodes) {
77
+ entityNameToId.set(node.name.toLowerCase(), node.id);
78
+ }
79
+
80
+ for (const entity of extraction.entities) {
81
+ const nameLower = entity.name.toLowerCase();
82
+ const existingNode = await store.getNodeByName(entity.name);
83
+
84
+ // Generate embedding — try LLM first, fall back to local
85
+ let embedding: number[];
86
+ try {
87
+ embedding = await llm.generateEmbedding(
88
+ `${entity.name}: ${entity.description}`,
89
+ );
90
+ } catch {
91
+ embedding = generateLocalEmbedding(
92
+ `${entity.name} ${entity.description}`,
93
+ );
94
+ }
95
+
96
+ if (existingNode) {
97
+ // Update existing entity with richer description
98
+ const mergedDescription =
99
+ existingNode.description.length >= entity.description.length
100
+ ? existingNode.description
101
+ : entity.description;
102
+
103
+ await store.updateNode(existingNode.id, {
104
+ description: mergedDescription,
105
+ embedding,
106
+ metadata: { ...existingNode.metadata, ...entity.metadata },
107
+ });
108
+
109
+ entityNameToId.set(nameLower, existingNode.id);
110
+ result.entitiesUpdated++;
111
+ result.details.entities.push({
112
+ id: existingNode.id,
113
+ name: entity.name,
114
+ status: "updated",
115
+ });
116
+ } else {
117
+ // Create new entity
118
+ const now = new Date().toISOString();
119
+ const node: MemoryNode = {
120
+ id: uuidv4(),
121
+ name: entity.name,
122
+ type: entity.type as NodeType,
123
+ description: entity.description,
124
+ embedding,
125
+ metadata: entity.metadata ?? {},
126
+ createdAt: now,
127
+ updatedAt: now,
128
+ source: "llm_extraction",
129
+ accessCount: 0,
130
+ };
131
+
132
+ await store.addNode(node);
133
+ entityNameToId.set(nameLower, node.id);
134
+ result.entitiesCreated++;
135
+ result.details.entities.push({
136
+ id: node.id,
137
+ name: entity.name,
138
+ status: "created",
139
+ });
140
+ }
141
+ }
142
+
143
+ // Step 4: Store relations
144
+ for (const rel of extraction.relations) {
145
+ const sourceId = entityNameToId.get(rel.source.toLowerCase()) ?? null;
146
+ const targetId = entityNameToId.get(rel.target.toLowerCase()) ?? null;
147
+
148
+ if (!sourceId || !targetId) {
149
+ result.details.relations.push({
150
+ source: rel.source,
151
+ target: rel.target,
152
+ relation: rel.relation,
153
+ status: `skipped: ${!sourceId ? `source '${rel.source}' not found` : `target '${rel.target}' not found`}`,
154
+ });
155
+ continue;
156
+ }
157
+
158
+ // Avoid duplicate edges
159
+ const existingEdges = await store.getEdgesBetween(sourceId, targetId);
160
+ const duplicate = existingEdges.find((e) => e.relation === rel.relation);
161
+
162
+ if (duplicate) {
163
+ result.details.relations.push({
164
+ source: rel.source,
165
+ target: rel.target,
166
+ relation: rel.relation,
167
+ status: "skipped: duplicate",
168
+ });
169
+ continue;
170
+ }
171
+
172
+ const now = new Date().toISOString();
173
+ const edge: MemoryEdge = {
174
+ id: uuidv4(),
175
+ source: sourceId,
176
+ target: targetId,
177
+ relation: rel.relation,
178
+ description:
179
+ rel.description ?? `${rel.source} ${rel.relation} ${rel.target}`,
180
+ weight: rel.weight ?? 0.8,
181
+ metadata: {},
182
+ createdAt: now,
183
+ updatedAt: now,
184
+ };
185
+
186
+ await store.addEdge(edge);
187
+ result.relationsCreated++;
188
+ result.details.relations.push({
189
+ source: rel.source,
190
+ target: rel.target,
191
+ relation: rel.relation,
192
+ status: "created",
193
+ });
194
+ }
195
+
196
+ return result;
197
+ }
@@ -0,0 +1,281 @@
1
+ // =============================================================================
2
+ // Memory Evolution Engine — Consolidation, Dedup, Inference, Pruning
3
+ // =============================================================================
4
+
5
+ import type {
6
+ StorageBackend,
7
+ MemoryNode,
8
+ MemoryEdge,
9
+ ConsolidationResult,
10
+ ConsolidationStrategy,
11
+ } from "../types.js";
12
+ import { cosineSimilarity } from "../encoding/embedder.js";
13
+ import {
14
+ DUPLICATE_SIMILARITY_THRESHOLD,
15
+ MIN_EDGE_WEIGHT,
16
+ STALE_NODE_AGE_DAYS,
17
+ } from "../constants.js";
18
+
19
+ /**
20
+ * Run memory consolidation to keep the graph healthy.
21
+ *
22
+ * Strategies:
23
+ * - full: runs all operations
24
+ * - merge_only: only merge duplicates
25
+ * - prune_only: only prune stale nodes
26
+ * - infer_only: only infer transitive edges
27
+ */
28
+ export async function consolidateMemory(
29
+ store: StorageBackend,
30
+ strategy: ConsolidationStrategy = "full",
31
+ ): Promise<ConsolidationResult> {
32
+ const startTime = Date.now();
33
+ const result: ConsolidationResult = {
34
+ mergedNodes: 0,
35
+ resolvedConflicts: 0,
36
+ inferredEdges: 0,
37
+ prunedNodes: 0,
38
+ duration: 0,
39
+ };
40
+
41
+ if (strategy === "full" || strategy === "merge_only") {
42
+ const mergeResult = await mergeDuplicates(store);
43
+ result.mergedNodes = mergeResult.merged;
44
+ result.resolvedConflicts = mergeResult.conflicts;
45
+ }
46
+
47
+ if (strategy === "full" || strategy === "infer_only") {
48
+ result.inferredEdges = await inferTransitiveEdges(store);
49
+ }
50
+
51
+ if (strategy === "full" || strategy === "prune_only") {
52
+ result.prunedNodes = await pruneStaleNodes(store);
53
+ }
54
+
55
+ result.duration = Date.now() - startTime;
56
+ return result;
57
+ }
58
+
59
+ // -----------------------------------------------------------------------------
60
+ // Duplicate Merging
61
+ // -----------------------------------------------------------------------------
62
+
63
+ async function mergeDuplicates(
64
+ store: StorageBackend,
65
+ ): Promise<{ merged: number; conflicts: number }> {
66
+ const { nodes } = await store.getAllNodes(10000, 0);
67
+ let merged = 0;
68
+ let conflicts = 0;
69
+ const processedIds = new Set<string>();
70
+
71
+ for (let i = 0; i < nodes.length; i++) {
72
+ if (processedIds.has(nodes[i].id)) continue;
73
+
74
+ for (let j = i + 1; j < nodes.length; j++) {
75
+ if (processedIds.has(nodes[j].id)) continue;
76
+
77
+ const nodeA = nodes[i];
78
+ const nodeB = nodes[j];
79
+
80
+ // Check name similarity
81
+ const nameMatch =
82
+ nodeA.name.toLowerCase() === nodeB.name.toLowerCase() ||
83
+ levenshteinSimilarity(
84
+ nodeA.name.toLowerCase(),
85
+ nodeB.name.toLowerCase(),
86
+ ) > 0.85;
87
+
88
+ // Check embedding similarity if both have embeddings
89
+ let embeddingSimilar = false;
90
+ if (
91
+ nodeA.embedding &&
92
+ nodeB.embedding &&
93
+ nodeA.embedding.length > 0 &&
94
+ nodeB.embedding.length > 0
95
+ ) {
96
+ const sim = cosineSimilarity(nodeA.embedding, nodeB.embedding);
97
+ embeddingSimilar = sim > DUPLICATE_SIMILARITY_THRESHOLD;
98
+ }
99
+
100
+ if (nameMatch || embeddingSimilar) {
101
+ // Merge B into A (keep the older/more accessed node)
102
+ const keepNode = nodeA.accessCount >= nodeB.accessCount ? nodeA : nodeB;
103
+ const removeNode = keepNode === nodeA ? nodeB : nodeA;
104
+
105
+ // Merge descriptions
106
+ const mergedDescription =
107
+ keepNode.description.length >= removeNode.description.length
108
+ ? keepNode.description
109
+ : `${keepNode.description} | ${removeNode.description}`;
110
+
111
+ // Re-point edges from removeNode to keepNode
112
+ const edges = await store.getEdgesForNode(removeNode.id);
113
+ for (const edge of edges) {
114
+ const newEdge: MemoryEdge = {
115
+ ...edge,
116
+ source: edge.source === removeNode.id ? keepNode.id : edge.source,
117
+ target: edge.target === removeNode.id ? keepNode.id : edge.target,
118
+ updatedAt: new Date().toISOString(),
119
+ };
120
+ // Skip self-loops
121
+ if (newEdge.source === newEdge.target) continue;
122
+
123
+ // Check if we already have this edge
124
+ const existing = await store.getEdgesBetween(
125
+ newEdge.source,
126
+ newEdge.target,
127
+ );
128
+ const duplicate = existing.find(
129
+ (e) => e.relation === newEdge.relation,
130
+ );
131
+ if (!duplicate) {
132
+ await store.addEdge(newEdge);
133
+ }
134
+ }
135
+
136
+ // Update kept node and delete merged node
137
+ await store.updateNode(keepNode.id, {
138
+ description: mergedDescription,
139
+ accessCount: keepNode.accessCount + removeNode.accessCount,
140
+ });
141
+ await store.deleteNode(removeNode.id);
142
+
143
+ processedIds.add(removeNode.id);
144
+ merged++;
145
+
146
+ // Check for temporal conflicts
147
+ if (
148
+ keepNode.validUntil &&
149
+ removeNode.validFrom &&
150
+ keepNode.validUntil < removeNode.validFrom
151
+ ) {
152
+ conflicts++;
153
+ }
154
+ }
155
+ }
156
+ }
157
+
158
+ return { merged, conflicts };
159
+ }
160
+
161
+ // -----------------------------------------------------------------------------
162
+ // Transitive Edge Inference
163
+ // -----------------------------------------------------------------------------
164
+
165
+ async function inferTransitiveEdges(store: StorageBackend): Promise<number> {
166
+ const { nodes } = await store.getAllNodes(10000, 0);
167
+ let inferred = 0;
168
+
169
+ for (const node of nodes) {
170
+ const outEdges = await store.getEdgesForNode(node.id, "out");
171
+
172
+ for (const edgeAB of outEdges) {
173
+ // A -> B, now look for B -> C
174
+ const nextEdges = await store.getEdgesForNode(edgeAB.target, "out");
175
+
176
+ for (const edgeBC of nextEdges) {
177
+ // Skip if A == C (no self-loops)
178
+ if (edgeBC.target === node.id) continue;
179
+
180
+ // Check if A -> C edge already exists
181
+ const existing = await store.getEdgesBetween(node.id, edgeBC.target);
182
+ if (existing.length > 0) continue;
183
+
184
+ // Only infer for certain relationship types
185
+ if (
186
+ edgeAB.relation === edgeBC.relation &&
187
+ isTransitiveRelation(edgeAB.relation)
188
+ ) {
189
+ const { v4: uuidv4 } = await import("uuid");
190
+ const now = new Date().toISOString();
191
+ await store.addEdge({
192
+ id: uuidv4(),
193
+ source: node.id,
194
+ target: edgeBC.target,
195
+ relation: edgeAB.relation,
196
+ description: `Inferred: transitive ${edgeAB.relation}`,
197
+ weight: Math.min(edgeAB.weight, edgeBC.weight) * 0.8,
198
+ metadata: { inferred: true },
199
+ createdAt: now,
200
+ updatedAt: now,
201
+ });
202
+ inferred++;
203
+ }
204
+ }
205
+ }
206
+ }
207
+
208
+ return inferred;
209
+ }
210
+
211
+ function isTransitiveRelation(relation: string): boolean {
212
+ const transitiveRelations = [
213
+ "depends_on",
214
+ "part_of",
215
+ "belongs_to",
216
+ "contains",
217
+ "extends",
218
+ "imports",
219
+ ];
220
+ return transitiveRelations.includes(relation);
221
+ }
222
+
223
+ // -----------------------------------------------------------------------------
224
+ // Stale Node Pruning
225
+ // -----------------------------------------------------------------------------
226
+
227
+ async function pruneStaleNodes(store: StorageBackend): Promise<number> {
228
+ const { nodes } = await store.getAllNodes(10000, 0);
229
+ const cutoff = new Date();
230
+ cutoff.setDate(cutoff.getDate() - STALE_NODE_AGE_DAYS);
231
+ const cutoffStr = cutoff.toISOString();
232
+ let pruned = 0;
233
+
234
+ for (const node of nodes) {
235
+ // Don't prune conversations or decisions (high-value types)
236
+ if (node.type === "conversation" || node.type === "decision") continue;
237
+
238
+ // Prune if never accessed and old
239
+ const lastActivity = node.lastAccessedAt ?? node.updatedAt;
240
+ if (lastActivity < cutoffStr && node.accessCount === 0) {
241
+ await store.deleteNode(node.id);
242
+ pruned++;
243
+ }
244
+ }
245
+
246
+ return pruned;
247
+ }
248
+
249
+ // -----------------------------------------------------------------------------
250
+ // Utilities
251
+ // -----------------------------------------------------------------------------
252
+
253
+ function levenshteinSimilarity(a: string, b: string): number {
254
+ const maxLen = Math.max(a.length, b.length);
255
+ if (maxLen === 0) return 1;
256
+ return 1 - levenshteinDistance(a, b) / maxLen;
257
+ }
258
+
259
+ function levenshteinDistance(a: string, b: string): number {
260
+ const m = a.length;
261
+ const n = b.length;
262
+ const dp: number[][] = Array.from({ length: m + 1 }, () =>
263
+ Array(n + 1).fill(0),
264
+ );
265
+
266
+ for (let i = 0; i <= m; i++) dp[i][0] = i;
267
+ for (let j = 0; j <= n; j++) dp[0][j] = j;
268
+
269
+ for (let i = 1; i <= m; i++) {
270
+ for (let j = 1; j <= n; j++) {
271
+ const cost = a[i - 1] === b[j - 1] ? 0 : 1;
272
+ dp[i][j] = Math.min(
273
+ dp[i - 1][j] + 1,
274
+ dp[i][j - 1] + 1,
275
+ dp[i - 1][j - 1] + cost,
276
+ );
277
+ }
278
+ }
279
+
280
+ return dp[m][n];
281
+ }
package/src/index.ts ADDED
@@ -0,0 +1,67 @@
1
+ #!/usr/bin/env node
2
+ // =============================================================================
3
+ // Open-Memory MCP Server — Entry Point
4
+ // =============================================================================
5
+ // Graph-based agent memory for AI coding assistants.
6
+ // Extends context window by storing entities, relationships, and decisions
7
+ // in a persistent knowledge graph accessible via MCP tools.
8
+ // =============================================================================
9
+
10
+ import dotenv from "dotenv";
11
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
12
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
13
+ import { createStorageBackend } from "./storage/factory.js";
14
+ import { registerMemoryTools } from "./tools/memory-tools.js";
15
+ import { registerMemoryResources } from "./resources/context-resource.js";
16
+ import { createLLMProvider } from "./llm/provider.js";
17
+ import { SERVER_NAME, SERVER_VERSION } from "./constants.js";
18
+
19
+ // Load environment variables
20
+ dotenv.config();
21
+
22
+ async function main(): Promise<void> {
23
+ console.error(`[open-memory] Starting ${SERVER_NAME} v${SERVER_VERSION}...`);
24
+
25
+ // 1. Initialize storage backend
26
+ const store = createStorageBackend();
27
+ await store.initialize();
28
+
29
+ // 2. Initialize optional LLM provider
30
+ const llm = createLLMProvider();
31
+
32
+ // 3. Create MCP server
33
+ const server = new McpServer({
34
+ name: SERVER_NAME,
35
+ version: SERVER_VERSION,
36
+ });
37
+
38
+ // 4. Register tools and resources
39
+ registerMemoryTools(server, store, llm);
40
+ registerMemoryResources(server, store);
41
+
42
+ const toolCount = llm ? 12 : 11;
43
+ console.error(`[open-memory] Registered ${toolCount} tools and 2 resources`);
44
+
45
+ // 5. Connect via stdio transport
46
+ const transport = new StdioServerTransport();
47
+ await server.connect(transport);
48
+
49
+ console.error(
50
+ `[open-memory] Server running via stdio. Ready for connections.`,
51
+ );
52
+
53
+ // Graceful shutdown
54
+ const shutdown = async () => {
55
+ console.error(`[open-memory] Shutting down...`);
56
+ await store.close();
57
+ process.exit(0);
58
+ };
59
+
60
+ process.on("SIGINT", shutdown);
61
+ process.on("SIGTERM", shutdown);
62
+ }
63
+
64
+ main().catch((error) => {
65
+ console.error(`[open-memory] Fatal error:`, error);
66
+ process.exit(1);
67
+ });