@coreidentitylabs/open-graph-memory-mcp 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/.agents/skills/mcp-builder/LICENSE.txt +202 -0
  2. package/.agents/skills/mcp-builder/SKILL.md +236 -0
  3. package/.agents/skills/mcp-builder/reference/evaluation.md +602 -0
  4. package/.agents/skills/mcp-builder/reference/mcp_best_practices.md +249 -0
  5. package/.agents/skills/mcp-builder/reference/node_mcp_server.md +970 -0
  6. package/.agents/skills/mcp-builder/reference/python_mcp_server.md +719 -0
  7. package/.agents/skills/mcp-builder/scripts/connections.py +151 -0
  8. package/.agents/skills/mcp-builder/scripts/evaluation.py +373 -0
  9. package/.agents/skills/mcp-builder/scripts/example_evaluation.xml +22 -0
  10. package/.agents/skills/mcp-builder/scripts/requirements.txt +2 -0
  11. package/.env.example +26 -0
  12. package/Implementation Plan.md +358 -0
  13. package/README.md +187 -0
  14. package/dist/constants.d.ts +34 -0
  15. package/dist/constants.d.ts.map +1 -0
  16. package/dist/constants.js +40 -0
  17. package/dist/constants.js.map +1 -0
  18. package/dist/encoding/embedder.d.ts +12 -0
  19. package/dist/encoding/embedder.d.ts.map +1 -0
  20. package/dist/encoding/embedder.js +85 -0
  21. package/dist/encoding/embedder.js.map +1 -0
  22. package/dist/encoding/pipeline.d.ts +28 -0
  23. package/dist/encoding/pipeline.d.ts.map +1 -0
  24. package/dist/encoding/pipeline.js +146 -0
  25. package/dist/encoding/pipeline.js.map +1 -0
  26. package/dist/evolution/consolidator.d.ts +12 -0
  27. package/dist/evolution/consolidator.d.ts.map +1 -0
  28. package/dist/evolution/consolidator.js +212 -0
  29. package/dist/evolution/consolidator.js.map +1 -0
  30. package/dist/index.d.ts +3 -0
  31. package/dist/index.d.ts.map +1 -0
  32. package/dist/index.js +53 -0
  33. package/dist/index.js.map +1 -0
  34. package/dist/llm/openai-provider.d.ts +23 -0
  35. package/dist/llm/openai-provider.d.ts.map +1 -0
  36. package/dist/llm/openai-provider.js +141 -0
  37. package/dist/llm/openai-provider.js.map +1 -0
  38. package/dist/llm/prompts.d.ts +10 -0
  39. package/dist/llm/prompts.d.ts.map +1 -0
  40. package/dist/llm/prompts.js +63 -0
  41. package/dist/llm/prompts.js.map +1 -0
  42. package/dist/llm/provider.d.ts +7 -0
  43. package/dist/llm/provider.d.ts.map +1 -0
  44. package/dist/llm/provider.js +25 -0
  45. package/dist/llm/provider.js.map +1 -0
  46. package/dist/resources/context-resource.d.ts +8 -0
  47. package/dist/resources/context-resource.d.ts.map +1 -0
  48. package/dist/resources/context-resource.js +51 -0
  49. package/dist/resources/context-resource.js.map +1 -0
  50. package/dist/retrieval/search.d.ts +24 -0
  51. package/dist/retrieval/search.d.ts.map +1 -0
  52. package/dist/retrieval/search.js +143 -0
  53. package/dist/retrieval/search.js.map +1 -0
  54. package/dist/storage/factory.d.ts +10 -0
  55. package/dist/storage/factory.d.ts.map +1 -0
  56. package/dist/storage/factory.js +35 -0
  57. package/dist/storage/factory.js.map +1 -0
  58. package/dist/storage/json-store.d.ts +34 -0
  59. package/dist/storage/json-store.d.ts.map +1 -0
  60. package/dist/storage/json-store.js +248 -0
  61. package/dist/storage/json-store.js.map +1 -0
  62. package/dist/storage/neo4j-store.d.ts +31 -0
  63. package/dist/storage/neo4j-store.d.ts.map +1 -0
  64. package/dist/storage/neo4j-store.js +440 -0
  65. package/dist/storage/neo4j-store.js.map +1 -0
  66. package/dist/tools/memory-tools.d.ts +4 -0
  67. package/dist/tools/memory-tools.d.ts.map +1 -0
  68. package/dist/tools/memory-tools.js +873 -0
  69. package/dist/tools/memory-tools.js.map +1 -0
  70. package/dist/types.d.ts +129 -0
  71. package/dist/types.d.ts.map +1 -0
  72. package/dist/types.js +5 -0
  73. package/dist/types.js.map +1 -0
  74. package/implementation_plan.md.resolved.md +322 -0
  75. package/package.json +43 -0
  76. package/src/constants.ts +52 -0
  77. package/src/encoding/embedder.ts +93 -0
  78. package/src/encoding/pipeline.ts +197 -0
  79. package/src/evolution/consolidator.ts +281 -0
  80. package/src/index.ts +67 -0
  81. package/src/llm/openai-provider.ts +208 -0
  82. package/src/llm/prompts.ts +66 -0
  83. package/src/llm/provider.ts +37 -0
  84. package/src/resources/context-resource.ts +74 -0
  85. package/src/retrieval/search.ts +203 -0
  86. package/src/storage/factory.ts +48 -0
  87. package/src/storage/json-store.ts +325 -0
  88. package/src/storage/neo4j-store.ts +564 -0
  89. package/src/tools/memory-tools.ts +1067 -0
  90. package/src/types.ts +207 -0
  91. package/tsconfig.json +21 -0
@@ -0,0 +1,358 @@
1
+ # Open-Memory MCP Server — Implementation Plan
2
+
3
+ A graph-based agent memory MCP server that **extends AI coding assistant memory** for developers using Google Antigravity or VS Code GitHub Copilot.
4
+
5
+ ## Problem Statement
6
+
7
+ 1. **Context window loss** — all LLM-based coding assistants lose context as conversations grow, forcing developers to re-explain code architecture and past changes repeatedly
8
+ 2. **Rapid code evolution** — frequent features/bug fixes cause project documentation to become outdated quickly
9
+ 3. **No persistent developer memory** — conversations are ephemeral; valuable decisions, patterns, and context are lost between sessions
10
+
11
+ ## Solution
12
+
13
+ An MCP server that captures conversations, extracts entities/relationships into a knowledge graph, and provides relevant historical context back to the agent _before_ API calls — effectively giving the AI a persistent project memory.
14
+
15
+ ---
16
+
17
+ ## Architecture
18
+
19
+ ```mermaid
20
+ graph TB
21
+ subgraph IDE["IDE (Antigravity / Copilot)"]
22
+ Agent["AI Agent"]
23
+ end
24
+
25
+ subgraph MCP["open-memory-mcp-server (stdio)"]
26
+ Tools["MCP Tools Layer"]
27
+ Resources["MCP Resources"]
28
+ Encoder["Encoding Pipeline"]
29
+ Retriever["Retrieval Engine"]
30
+ Evolve["Evolution Engine"]
31
+ end
32
+
33
+ subgraph Storage["Storage Backend (configurable)"]
34
+ JSON["Local JSON File"]
35
+ Neo4j["Neo4j Graph DB"]
36
+ end
37
+
38
+ subgraph LLM["LLM Provider (pluggable)"]
39
+ OpenAI["OpenAI"]
40
+ Gemini["Google Gemini"]
41
+ Claude["Anthropic Claude"]
42
+ Ollama["Ollama (local)"]
43
+ end
44
+
45
+ Agent <-->|stdio| Tools
46
+ Agent -->|read| Resources
47
+ Tools --> Encoder
48
+ Tools --> Retriever
49
+ Tools --> Evolve
50
+ Encoder --> LLM
51
+ Retriever --> LLM
52
+ Encoder --> Storage
53
+ Retriever --> Storage
54
+ Evolve --> Storage
55
+ ```
56
+
57
+ ### Key Design Decisions
58
+
59
+ | Decision | Choice | Rationale |
60
+ | ---------------- | ----------------------- | -------------------------------------------------------------- |
61
+ | **Storage** | Both JSON + Neo4j | JSON for zero-config dev; Neo4j for production graph queries |
62
+ | **LLM Provider** | Pluggable via config | Adapts to user's model selection in IDE |
63
+ | **Transport** | stdio | Standard for local IDE integrations |
64
+ | **Language** | TypeScript | Best SDK support, good AI code generation |
65
+ | **Encoding** | LLM-based extraction | Outperforms NER for novel entity types and implicit relations |
66
+ | **Retrieval** | Hybrid semantic + graph | Vector similarity finds anchors; graph traversal finds context |
67
+
68
+ ---
69
+
70
+ ## Proposed Changes
71
+
72
+ ### 1. Project Scaffolding
73
+
74
+ #### [NEW] [package.json](file:///d:/Projects/open-memory/package.json)
75
+
76
+ Dependencies:
77
+
78
+ - `@modelcontextprotocol/sdk` — MCP protocol
79
+ - `zod` — input validation
80
+ - `openai` — default LLM provider (OpenAI-compatible API)
81
+ - `neo4j-driver` — Neo4j graph database driver
82
+ - `uuid` — unique node IDs
83
+ - `dotenv` — environment config
84
+
85
+ #### [NEW] [tsconfig.json](file:///d:/Projects/open-memory/tsconfig.json)
86
+
87
+ #### [NEW] [.env.example](file:///d:/Projects/open-memory/.env.example)
88
+
89
+ #### [NEW] [README.md](file:///d:/Projects/open-memory/README.md)
90
+
91
+ ---
92
+
93
+ ### 2. Core Data Model
94
+
95
+ #### [NEW] [src/types.ts](file:///d:/Projects/open-memory/src/types.ts)
96
+
97
+ ```typescript
98
+ // --- Memory Nodes ---
99
+ interface MemoryNode {
100
+ id: string; // UUID
101
+ name: string; // canonical entity name
102
+ type: "entity" | "concept" | "event" | "code_pattern" | "decision";
103
+ description: string;
104
+ embedding: number[]; // vector for semantic retrieval
105
+ metadata: Record<string, unknown>; // flexible attrs (file paths, languages, etc.)
106
+ createdAt: string; // ISO timestamp (transaction time)
107
+ updatedAt: string;
108
+ validFrom?: string; // bi-temporal: when fact became true
109
+ validUntil?: string; // bi-temporal: when fact expired
110
+ source?: string; // "conversation" | "code_change" | "manual"
111
+ accessCount: number; // for retrieval ranking
112
+ lastAccessedAt?: string;
113
+ }
114
+
115
+ // --- Memory Edges ---
116
+ interface MemoryEdge {
117
+ id: string;
118
+ source: string; // node ID
119
+ target: string; // node ID
120
+ relation: string; // e.g. "works_on", "depends_on", "decided_to"
121
+ description: string;
122
+ weight: number; // 0-1 confidence/strength
123
+ metadata: Record<string, unknown>;
124
+ createdAt: string;
125
+ updatedAt: string;
126
+ }
127
+
128
+ // --- Storage Backend Interface ---
129
+ interface StorageBackend {
130
+ initialize(): Promise<void>;
131
+ addNode(node: MemoryNode): Promise<void>;
132
+ updateNode(id: string, updates: Partial<MemoryNode>): Promise<void>;
133
+ deleteNode(id: string): Promise<void>;
134
+ getNode(id: string): Promise<MemoryNode | null>;
135
+ findNodesByName(name: string): Promise<MemoryNode[]>;
136
+ findNodesByEmbedding(
137
+ embedding: number[],
138
+ topK: number,
139
+ ): Promise<ScoredNode[]>;
140
+ getNeighborhood(nodeId: string, depth: number): Promise<Subgraph>;
141
+ addEdge(edge: MemoryEdge): Promise<void>;
142
+ deleteEdge(id: string): Promise<void>;
143
+ getEdgesForNode(nodeId: string): Promise<MemoryEdge[]>;
144
+ getAllNodes(filter?: NodeFilter): Promise<MemoryNode[]>;
145
+ getStats(): Promise<GraphStats>;
146
+ close(): Promise<void>;
147
+ }
148
+
149
+ // --- LLM Provider Interface ---
150
+ interface LLMProvider {
151
+ extractEntitiesAndRelations(
152
+ text: string,
153
+ existingEntities: string[],
154
+ ): Promise<ExtractionResult>;
155
+ generateEmbedding(text: string): Promise<number[]>;
156
+ generateEmbeddingBatch(texts: string[]): Promise<number[][]>;
157
+ }
158
+ ```
159
+
160
+ ---
161
+
162
+ ### 3. Storage Backends
163
+
164
+ #### [NEW] [src/storage/json-store.ts](file:///d:/Projects/open-memory/src/storage/json-store.ts)
165
+
166
+ Local JSON file storage implementing `StorageBackend`:
167
+
168
+ - Atomic file writes with temp-file rename
169
+ - In-memory index for fast lookups
170
+ - Brute-force cosine similarity for embedding search (sufficient for <10K nodes)
171
+ - Configurable file path via `MEMORY_STORE_PATH`
172
+
173
+ #### [NEW] [src/storage/neo4j-store.ts](file:///d:/Projects/open-memory/src/storage/neo4j-store.ts)
174
+
175
+ Neo4j storage implementing `StorageBackend`:
176
+
177
+ - Cypher queries for CRUD + multi-hop traversal
178
+ - Neo4j vector index for embedding search
179
+ - Connection config via `NEO4J_URI`, `NEO4J_USER`, `NEO4J_PASSWORD`
180
+ - Auto-creates schema constraints/indexes on first run
181
+
182
+ #### [NEW] [src/storage/factory.ts](file:///d:/Projects/open-memory/src/storage/factory.ts)
183
+
184
+ Factory function: reads `STORAGE_BACKEND` env var (`"json"` | `"neo4j"`) and returns the appropriate implementation.
185
+
186
+ ---
187
+
188
+ ### 4. LLM Provider Abstraction
189
+
190
+ #### [NEW] [src/llm/provider.ts](file:///d:/Projects/open-memory/src/llm/provider.ts)
191
+
192
+ Interface + factory for LLM providers. Reads `LLM_PROVIDER` env var.
193
+
194
+ #### [NEW] [src/llm/openai-provider.ts](file:///d:/Projects/open-memory/src/llm/openai-provider.ts)
195
+
196
+ OpenAI-compatible provider (works with OpenAI, Azure OpenAI, local Ollama with OpenAI-compat API):
197
+
198
+ - Entity/relation extraction via chat completion (structured JSON output)
199
+ - Embeddings via embeddings API
200
+ - Configurable model names via `LLM_CHAT_MODEL` and `LLM_EMBEDDING_MODEL`
201
+
202
+ #### [NEW] [src/llm/prompts.ts](file:///d:/Projects/open-memory/src/llm/prompts.ts)
203
+
204
+ System prompts for entity extraction and relationship extraction, optimized for developer conversations (code patterns, architecture decisions, file references, dependencies).
205
+
206
+ ---
207
+
208
+ ### 5. Encoding Pipeline
209
+
210
+ #### [NEW] [src/encoding/pipeline.ts](file:///d:/Projects/open-memory/src/encoding/pipeline.ts)
211
+
212
+ Orchestrates the full encoding flow:
213
+
214
+ 1. **Extract** entities + relationships from message text (via LLM)
215
+ 2. **Resolve** entities against existing graph (fuzzy name match + embedding similarity)
216
+ 3. **Embed** new entity descriptions
217
+ 4. **Store** nodes and edges with temporal metadata
218
+ 5. **Return** summary of what was stored
219
+
220
+ ---
221
+
222
+ ### 6. Retrieval Engine
223
+
224
+ #### [NEW] [src/retrieval/search.ts](file:///d:/Projects/open-memory/src/retrieval/search.ts)
225
+
226
+ Hybrid retrieval:
227
+
228
+ 1. Embed query → find top-K nodes by cosine similarity (semantic anchors)
229
+ 2. From anchors, traverse 2-hop neighborhood (graph context)
230
+ 3. Apply temporal filtering (prefer recent, valid facts)
231
+ 4. Rank: `score = α·semantic + β·proximity + γ·recency + δ·access_frequency`
232
+ 5. Format as structured context ready for injection into LLM prompt
233
+
234
+ ---
235
+
236
+ ### 7. Memory Evolution
237
+
238
+ #### [NEW] [src/evolution/consolidator.ts](file:///d:/Projects/open-memory/src/evolution/consolidator.ts)
239
+
240
+ - **Duplicate merging**: Detect similar nodes (embedding similarity > threshold) and merge
241
+ - **Conflict resolution**: Temporal invalidation of contradicted facts
242
+ - **Edge inference**: Transitive relationship detection
243
+ - **Pruning**: Remove stale, low-access nodes past a configurable age
244
+
245
+ ---
246
+
247
+ ### 8. MCP Tools & Resources
248
+
249
+ #### [NEW] [src/tools/memory-tools.ts](file:///d:/Projects/open-memory/src/tools/memory-tools.ts)
250
+
251
+ | Tool | Action | Annotations |
252
+ | ---------------------- | ------------------------------------------------------------- | ----------- |
253
+ | `memory_save_message` | Ingest conversation text → extract & store entities/relations | write |
254
+ | `memory_search` | Hybrid semantic + graph search for relevant context | read-only |
255
+ | `memory_get_entity` | Get specific entity with its relationships | read-only |
256
+ | `memory_list_entities` | List entities with pagination & type filter | read-only |
257
+ | `memory_get_relations` | Get all relationships for an entity | read-only |
258
+ | `memory_delete` | Remove a memory node and its edges | destructive |
259
+ | `memory_consolidate` | Trigger manual memory consolidation | write |
260
+ | `memory_status` | Node/edge counts, storage type, last consolidation | read-only |
261
+
262
+ #### [NEW] [src/resources/context-resource.ts](file:///d:/Projects/open-memory/src/resources/context-resource.ts)
263
+
264
+ MCP Resource exposing a `memory://context/recent` URI that returns the most relevant recent memories — enabling automatic context injection without explicit tool calls.
265
+
266
+ ---
267
+
268
+ ### 9. Server Entry Point
269
+
270
+ #### [NEW] [src/index.ts](file:///d:/Projects/open-memory/src/index.ts)
271
+
272
+ - Load env config (dotenv)
273
+ - Initialize storage backend (JSON or Neo4j)
274
+ - Initialize LLM provider
275
+ - Register all tools + resources
276
+ - Connect via `StdioServerTransport`
277
+
278
+ ---
279
+
280
+ ## File Tree Summary
281
+
282
+ ```
283
+ open-memory/
284
+ ├── package.json
285
+ ├── tsconfig.json
286
+ ├── .env.example
287
+ ├── README.md
288
+ └── src/
289
+ ├── index.ts # entry point
290
+ ├── types.ts # data models & interfaces
291
+ ├── constants.ts # config constants
292
+ ├── storage/
293
+ │ ├── factory.ts # backend constructor
294
+ │ ├── json-store.ts # local JSON backend
295
+ │ └── neo4j-store.ts # Neo4j backend
296
+ ├── llm/
297
+ │ ├── provider.ts # LLM abstraction + factory
298
+ │ ├── openai-provider.ts # OpenAI-compatible provider
299
+ │ └── prompts.ts # extraction prompts
300
+ ├── encoding/
301
+ │ └── pipeline.ts # extract → resolve → embed → store
302
+ ├── retrieval/
303
+ │ └── search.ts # hybrid semantic + graph search
304
+ ├── evolution/
305
+ │ └── consolidator.ts # merge, prune, infer
306
+ ├── tools/
307
+ │ └── memory-tools.ts # 8 MCP tools
308
+ └── resources/
309
+ └── context-resource.ts # auto-inject context resource
310
+ ```
311
+
312
+ ---
313
+
314
+ ## Environment Configuration
315
+
316
+ ```env
317
+ # Storage (required)
318
+ STORAGE_BACKEND=json # "json" or "neo4j"
319
+ MEMORY_STORE_PATH=./memory.json # for json backend
320
+
321
+ # Neo4j (required if STORAGE_BACKEND=neo4j)
322
+ NEO4J_URI=bolt://localhost:7687
323
+ NEO4J_USER=neo4j
324
+ NEO4J_PASSWORD=password
325
+
326
+ # LLM (required)
327
+ LLM_PROVIDER=openai # "openai" (OpenAI-compatible)
328
+ LLM_API_KEY=sk-...
329
+ LLM_BASE_URL=https://api.openai.com/v1 # change for Ollama, Azure, etc.
330
+ LLM_CHAT_MODEL=gpt-4o-mini
331
+ LLM_EMBEDDING_MODEL=text-embedding-3-small
332
+ ```
333
+
334
+ ---
335
+
336
+ ## Verification Plan
337
+
338
+ ### Build Verification
339
+
340
+ ```bash
341
+ cd d:/Projects/open-memory && npm run build
342
+ ```
343
+
344
+ ### MCP Inspector
345
+
346
+ ```bash
347
+ npx @modelcontextprotocol/inspector node dist/index.js
348
+ ```
349
+
350
+ ### Smoke Test Scenario
351
+
352
+ 1. `memory_status` → `{ nodes: 0, edges: 0, storage: "json" }`
353
+ 2. `memory_save_message` with `"We decided to use React Query for data fetching in the dashboard module"`
354
+ 3. `memory_list_entities` → should show `React Query`, `dashboard module` entities
355
+ 4. `memory_search` with `"What did we decide about data fetching?"` → returns the decision context
356
+ 5. `memory_save_message` with `"Actually, we switched from React Query to SWR for the dashboard"`
357
+ 6. `memory_search` again → should show SWR as current, React Query as outdated (temporal invalidation)
358
+ 7. `memory_consolidate` → completes without errors
package/README.md ADDED
@@ -0,0 +1,187 @@
1
+ # Open-Memory MCP Server
2
+
3
+ **Graph-based agent memory for AI coding assistants** — extends context windows by storing entities, relationships, and decisions in a persistent knowledge graph.
4
+
5
+ ## Problem
6
+
7
+ AI coding assistants (Google Antigravity, VS Code GitHub Copilot) lose context as conversations grow. Developers repeatedly re-explain code architecture and past decisions. Open-Memory solves this by giving your AI a persistent, structured memory.
8
+
9
+ ## How It Works
10
+
11
+ ### Agent-Driven Flow (no API key needed)
12
+
13
+ ```
14
+ You chat with your AI assistant
15
+ ↓ Agent extracts entities/decisions from conversation
16
+ ↓ Calls memory_add_entities / memory_add_relations
17
+ ↓ Entities stored in knowledge graph (JSON or Neo4j)
18
+ ↓ Before next task, agent calls memory_search or memory_get_context
19
+ ↓ Relevant historical context injected into prompt
20
+ = AI remembers your project across sessions
21
+ ```
22
+
23
+ ### Server-Side Encoding Flow (optional, requires LLM API key)
24
+
25
+ ```
26
+ You pass raw text to memory_encode_text
27
+ ↓ Server-side LLM extracts entities + relationships automatically
28
+ ↓ Entity resolution against existing graph (dedup)
29
+ ↓ LLM-quality embeddings generated
30
+ ↓ Nodes + edges stored
31
+ = Fully automated — no manual entity extraction needed
32
+ ```
33
+
34
+ ## Installation & Usage
35
+
36
+ You can run **Open-Memory** directly without manual installation using `npx`. This is the recommended way for both VS Code and Claude Desktop.
37
+
38
+ ### 1. VS Code (via MCP Extension)
39
+
40
+ 1. Install the [MCP](https://marketplace.visualstudio.com/items?itemName=mcp.mcp) extension for VS Code.
41
+ 2. Open the extension settings or click the MCP icon in the status bar.
42
+ 3. Click "Add MCP Server" and enter:
43
+ - **Command**: `npx`
44
+ - **Arguments**: `-y @coreidentitylabs/open-graph-memory-mcp` (or `npx -y github:YOUR_USERNAME/open-memory` if not on NPM yet)
45
+
46
+ ### 2. Claude Desktop / Antigravity Desktop
47
+
48
+ Add the following to your MCP configuration file (e.g., `%APPDATA%\Claude\claude_desktop_config.json` or `config.json` for Antigravity):
49
+
50
+ ```json
51
+ {
52
+ "mcpServers": {
53
+ "open-memory": {
54
+ "command": "npx",
55
+ "args": ["-y", "@coreidentitylabs/open-graph-memory-mcp"],
56
+ "env": {
57
+ "STORAGE_BACKEND": "json",
58
+ "MEMORY_STORE_PATH": "C:/path/to/your/memory.json"
59
+ }
60
+ }
61
+ }
62
+ }
63
+ ```
64
+
65
+ ### 3. Manual Development Setup
66
+
67
+ If you want to contribute or run from source:
68
+
69
+ ```bash
70
+ # Clone and install dependencies
71
+ git clone https://github.com/YOUR_USERNAME/open-memory.git
72
+ cd open-memory
73
+ npm install
74
+
75
+ # Build
76
+ npm run build
77
+
78
+ # Run (stdio mode)
79
+ node dist/index.js
80
+ ```
81
+
82
+ ### Environment Variables
83
+
84
+ ```bash
85
+ # Storage backend: "json" (default) or "neo4j"
86
+ STORAGE_BACKEND=json
87
+ MEMORY_STORE_PATH=./memory.json
88
+
89
+ # Neo4j (only if STORAGE_BACKEND=neo4j)
90
+ NEO4J_URI=bolt://localhost:7687
91
+ NEO4J_USER=neo4j
92
+ NEO4J_PASSWORD=password
93
+
94
+ # Optional: Server-side encoding (memory_encode_text tool)
95
+ # Works with OpenAI, Azure OpenAI, Ollama (OpenAI-compat), etc.
96
+ # LLM_API_KEY=sk-...
97
+ # LLM_BASE_URL=https://api.openai.com/v1
98
+ # LLM_CHAT_MODEL=gpt-4o-mini
99
+ # LLM_EMBEDDING_MODEL=text-embedding-3-small
100
+ ```
101
+
102
+ ### MCP Client Configuration
103
+
104
+ Add to your MCP config (e.g. `mcp_config.json`):
105
+
106
+ ```json
107
+ {
108
+ "mcpServers": {
109
+ "open-memory": {
110
+ "command": "node",
111
+ "args": ["d:/Projects/open-memory/dist/index.js"],
112
+ "env": {
113
+ "STORAGE_BACKEND": "json",
114
+ "MEMORY_STORE_PATH": "./memory.json"
115
+ }
116
+ }
117
+ }
118
+ }
119
+ ```
120
+
121
+ ## Tools
122
+
123
+ ### Write
124
+
125
+ | Tool | Description | LLM Required |
126
+ | -------------------------- | ------------------------------------------------------------------ | :----------: |
127
+ | `memory_add_entities` | Store entities (people, tools, concepts, code patterns, decisions) | ❌ |
128
+ | `memory_add_relations` | Store relationships between entities | ❌ |
129
+ | `memory_save_conversation` | Save conversation snapshots for history | ❌ |
130
+ | `memory_encode_text` | **Auto-extract entities & relations from raw text via LLM** | ✅ |
131
+
132
+ ### Read
133
+
134
+ | Tool | Description |
135
+ | ---------------------- | ------------------------------------------ |
136
+ | `memory_search` | Hybrid semantic + graph search |
137
+ | `memory_get_entity` | Get entity details with relationships |
138
+ | `memory_list_entities` | List entities with filtering/pagination |
139
+ | `memory_get_relations` | Get relationships for an entity |
140
+ | `memory_get_context` | Get formatted context for prompt injection |
141
+
142
+ ### Management
143
+
144
+ | Tool | Description |
145
+ | ---------------------- | ------------------------------------------------ |
146
+ | `memory_delete_entity` | Remove entity and its edges |
147
+ | `memory_consolidate` | Merge duplicates, prune stale nodes, infer edges |
148
+ | `memory_status` | Graph health stats |
149
+
150
+ ## Architecture
151
+
152
+ ```
153
+ src/
154
+ ├── index.ts # Entry point (stdio transport)
155
+ ├── types.ts # Core type definitions
156
+ ├── constants.ts # Configuration constants
157
+ ├── storage/
158
+ │ ├── json-store.ts # Local JSON file backend
159
+ │ ├── neo4j-store.ts # Neo4j graph database backend
160
+ │ └── factory.ts # Storage backend factory
161
+ ├── encoding/
162
+ │ ├── embedder.ts # Offline n-gram embeddings
163
+ │ └── pipeline.ts # Server-side encoding pipeline
164
+ ├── llm/
165
+ │ ├── provider.ts # LLM provider factory
166
+ │ ├── openai-provider.ts # OpenAI-compatible provider
167
+ │ └── prompts.ts # Extraction prompts
168
+ ├── retrieval/
169
+ │ └── search.ts # Hybrid search engine
170
+ ├── evolution/
171
+ │ └── consolidator.ts # Memory consolidation
172
+ ├── tools/
173
+ │ └── memory-tools.ts # MCP tool definitions
174
+ └── resources/
175
+ └── context-resource.ts # MCP resources
176
+ ```
177
+
178
+ - **Storage**: Pluggable backend — JSON file (zero-config) or Neo4j (production)
179
+ - **Embeddings**: Offline n-gram hashing by default, LLM embeddings when configured
180
+ - **Retrieval**: Hybrid text + semantic + graph traversal with weighted scoring
181
+ - **Evolution**: Duplicate merging, transitive edge inference, stale node pruning
182
+ - **Encoding**: Optional server-side LLM pipeline (OpenAI, Ollama, Azure, etc.)
183
+ - **Transport**: stdio (standard for IDE integrations)
184
+
185
+ ## License
186
+
187
+ MIT
@@ -0,0 +1,34 @@
1
+ /** Maximum response size in characters to prevent overwhelming the agent */
2
+ export declare const CHARACTER_LIMIT = 25000;
3
+ /** Default number of results in paginated responses */
4
+ export declare const DEFAULT_PAGE_SIZE = 20;
5
+ /** Maximum page size for list operations */
6
+ export declare const MAX_PAGE_SIZE = 100;
7
+ /** Default number of hops for graph neighborhood traversal */
8
+ export declare const DEFAULT_TRAVERSAL_DEPTH = 2;
9
+ /** Default number of results for semantic search */
10
+ export declare const DEFAULT_TOP_K = 10;
11
+ /** Similarity threshold for duplicate detection during consolidation */
12
+ export declare const DUPLICATE_SIMILARITY_THRESHOLD = 0.85;
13
+ /** Minimum edge weight to keep during pruning */
14
+ export declare const MIN_EDGE_WEIGHT = 0.1;
15
+ /** Maximum age (days) for stale node pruning — nodes not accessed in this period */
16
+ export declare const STALE_NODE_AGE_DAYS = 90;
17
+ /** Default storage file path */
18
+ export declare const DEFAULT_MEMORY_STORE_PATH = "./memory.json";
19
+ /** Server name for MCP registration */
20
+ export declare const SERVER_NAME = "open-memory-mcp-server";
21
+ /** Server version */
22
+ export declare const SERVER_VERSION = "1.0.0";
23
+ export declare const ENV_KEYS: {
24
+ readonly STORAGE_BACKEND: "STORAGE_BACKEND";
25
+ readonly MEMORY_STORE_PATH: "MEMORY_STORE_PATH";
26
+ readonly NEO4J_URI: "NEO4J_URI";
27
+ readonly NEO4J_USER: "NEO4J_USER";
28
+ readonly NEO4J_PASSWORD: "NEO4J_PASSWORD";
29
+ readonly LLM_API_KEY: "LLM_API_KEY";
30
+ readonly LLM_BASE_URL: "LLM_BASE_URL";
31
+ readonly LLM_CHAT_MODEL: "LLM_CHAT_MODEL";
32
+ readonly LLM_EMBEDDING_MODEL: "LLM_EMBEDDING_MODEL";
33
+ };
34
+ //# sourceMappingURL=constants.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"constants.d.ts","sourceRoot":"","sources":["../src/constants.ts"],"names":[],"mappings":"AAIA,4EAA4E;AAC5E,eAAO,MAAM,eAAe,QAAS,CAAC;AAEtC,uDAAuD;AACvD,eAAO,MAAM,iBAAiB,KAAK,CAAC;AAEpC,4CAA4C;AAC5C,eAAO,MAAM,aAAa,MAAM,CAAC;AAEjC,8DAA8D;AAC9D,eAAO,MAAM,uBAAuB,IAAI,CAAC;AAEzC,oDAAoD;AACpD,eAAO,MAAM,aAAa,KAAK,CAAC;AAEhC,wEAAwE;AACxE,eAAO,MAAM,8BAA8B,OAAO,CAAC;AAEnD,iDAAiD;AACjD,eAAO,MAAM,eAAe,MAAM,CAAC;AAEnC,oFAAoF;AACpF,eAAO,MAAM,mBAAmB,KAAK,CAAC;AAEtC,gCAAgC;AAChC,eAAO,MAAM,yBAAyB,kBAAkB,CAAC;AAEzD,uCAAuC;AACvC,eAAO,MAAM,WAAW,2BAA2B,CAAC;AAEpD,qBAAqB;AACrB,eAAO,MAAM,cAAc,UAAU,CAAC;AAMtC,eAAO,MAAM,QAAQ;;;;;;;;;;CAUX,CAAC"}
@@ -0,0 +1,40 @@
1
+ // =============================================================================
2
+ // Open-Memory MCP Server — Constants & Configuration
3
+ // =============================================================================
4
+ /** Maximum response size in characters to prevent overwhelming the agent */
5
+ export const CHARACTER_LIMIT = 25_000;
6
+ /** Default number of results in paginated responses */
7
+ export const DEFAULT_PAGE_SIZE = 20;
8
+ /** Maximum page size for list operations */
9
+ export const MAX_PAGE_SIZE = 100;
10
+ /** Default number of hops for graph neighborhood traversal */
11
+ export const DEFAULT_TRAVERSAL_DEPTH = 2;
12
+ /** Default number of results for semantic search */
13
+ export const DEFAULT_TOP_K = 10;
14
+ /** Similarity threshold for duplicate detection during consolidation */
15
+ export const DUPLICATE_SIMILARITY_THRESHOLD = 0.85;
16
+ /** Minimum edge weight to keep during pruning */
17
+ export const MIN_EDGE_WEIGHT = 0.1;
18
+ /** Maximum age (days) for stale node pruning — nodes not accessed in this period */
19
+ export const STALE_NODE_AGE_DAYS = 90;
20
+ /** Default storage file path */
21
+ export const DEFAULT_MEMORY_STORE_PATH = "./memory.json";
22
+ /** Server name for MCP registration */
23
+ export const SERVER_NAME = "open-memory-mcp-server";
24
+ /** Server version */
25
+ export const SERVER_VERSION = "1.0.0";
26
+ // -----------------------------------------------------------------------------
27
+ // Environment variable keys
28
+ // -----------------------------------------------------------------------------
29
+ export const ENV_KEYS = {
30
+ STORAGE_BACKEND: "STORAGE_BACKEND",
31
+ MEMORY_STORE_PATH: "MEMORY_STORE_PATH",
32
+ NEO4J_URI: "NEO4J_URI",
33
+ NEO4J_USER: "NEO4J_USER",
34
+ NEO4J_PASSWORD: "NEO4J_PASSWORD",
35
+ LLM_API_KEY: "LLM_API_KEY",
36
+ LLM_BASE_URL: "LLM_BASE_URL",
37
+ LLM_CHAT_MODEL: "LLM_CHAT_MODEL",
38
+ LLM_EMBEDDING_MODEL: "LLM_EMBEDDING_MODEL",
39
+ };
40
+ //# sourceMappingURL=constants.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"constants.js","sourceRoot":"","sources":["../src/constants.ts"],"names":[],"mappings":"AAAA,gFAAgF;AAChF,qDAAqD;AACrD,gFAAgF;AAEhF,4EAA4E;AAC5E,MAAM,CAAC,MAAM,eAAe,GAAG,MAAM,CAAC;AAEtC,uDAAuD;AACvD,MAAM,CAAC,MAAM,iBAAiB,GAAG,EAAE,CAAC;AAEpC,4CAA4C;AAC5C,MAAM,CAAC,MAAM,aAAa,GAAG,GAAG,CAAC;AAEjC,8DAA8D;AAC9D,MAAM,CAAC,MAAM,uBAAuB,GAAG,CAAC,CAAC;AAEzC,oDAAoD;AACpD,MAAM,CAAC,MAAM,aAAa,GAAG,EAAE,CAAC;AAEhC,wEAAwE;AACxE,MAAM,CAAC,MAAM,8BAA8B,GAAG,IAAI,CAAC;AAEnD,iDAAiD;AACjD,MAAM,CAAC,MAAM,eAAe,GAAG,GAAG,CAAC;AAEnC,oFAAoF;AACpF,MAAM,CAAC,MAAM,mBAAmB,GAAG,EAAE,CAAC;AAEtC,gCAAgC;AAChC,MAAM,CAAC,MAAM,yBAAyB,GAAG,eAAe,CAAC;AAEzD,uCAAuC;AACvC,MAAM,CAAC,MAAM,WAAW,GAAG,wBAAwB,CAAC;AAEpD,qBAAqB;AACrB,MAAM,CAAC,MAAM,cAAc,GAAG,OAAO,CAAC;AAEtC,gFAAgF;AAChF,4BAA4B;AAC5B,gFAAgF;AAEhF,MAAM,CAAC,MAAM,QAAQ,GAAG;IACtB,eAAe,EAAE,iBAAiB;IAClC,iBAAiB,EAAE,mBAAmB;IACtC,SAAS,EAAE,WAAW;IACtB,UAAU,EAAE,YAAY;IACxB,cAAc,EAAE,gBAAgB;IAChC,WAAW,EAAE,aAAa;IAC1B,YAAY,EAAE,cAAc;IAC5B,cAAc,EAAE,gBAAgB;IAChC,mBAAmB,EAAE,qBAAqB;CAClC,CAAC"}
@@ -0,0 +1,12 @@
1
+ /**
2
+ * Generate a lightweight embedding from text using character n-gram hashing.
3
+ * This is a simple, deterministic approach that works offline without any API.
4
+ * Quality is lower than neural embeddings but sufficient for basic similarity.
5
+ */
6
+ export declare function generateLocalEmbedding(text: string): number[];
7
+ /**
8
+ * Compute cosine similarity between two vectors.
9
+ * Returns a value between -1 and 1 (1 = identical, 0 = orthogonal).
10
+ */
11
+ export declare function cosineSimilarity(a: number[], b: number[]): number;
12
+ //# sourceMappingURL=embedder.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"embedder.d.ts","sourceRoot":"","sources":["../../src/encoding/embedder.ts"],"names":[],"mappings":"AASA;;;;GAIG;AACH,wBAAgB,sBAAsB,CAAC,IAAI,EAAE,MAAM,GAAG,MAAM,EAAE,CAgC7D;AAED;;;GAGG;AACH,wBAAgB,gBAAgB,CAAC,CAAC,EAAE,MAAM,EAAE,EAAE,CAAC,EAAE,MAAM,EAAE,GAAG,MAAM,CAiBjE"}