@aeriondyseti/vector-memory-mcp 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +413 -0
- package/package.json +43 -0
- package/src/config/index.ts +29 -0
- package/src/db/connection.ts +11 -0
- package/src/db/memory.repository.ts +94 -0
- package/src/db/schema.ts +33 -0
- package/src/index.ts +23 -0
- package/src/mcp/handlers.ts +111 -0
- package/src/mcp/server.ts +34 -0
- package/src/mcp/tools.ts +80 -0
- package/src/services/embeddings.service.ts +46 -0
- package/src/services/memory.service.ts +102 -0
- package/src/types/memory.ts +35 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 AerionDyseti
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,413 @@
|
|
|
1
|
+
# Vector Memory MCP Server
|
|
2
|
+
|
|
3
|
+
> Replace static markdown context files with intelligent, semantically-searchable memories that understand what you're working on.
|
|
4
|
+
|
|
5
|
+
A production-ready MCP (Model Context Protocol) server that provides semantic memory storage for AI assistants. Uses local embeddings and vector search to automatically retrieve relevant context without cloud dependencies.
|
|
6
|
+
|
|
7
|
+
**Perfect for:** Software teams maintaining architectural knowledge, developers juggling multiple projects, and anyone building with AI assistants like Claude Code.
|
|
8
|
+
|
|
9
|
+
[](https://opensource.org/licenses/MIT)
|
|
10
|
+
[](https://www.typescriptlang.org/)
|
|
11
|
+
[](https://bun.sh/)
|
|
12
|
+
[](https://modelcontextprotocol.io)
|
|
13
|
+
|
|
14
|
+
---
|
|
15
|
+
|
|
16
|
+
## ✨ Features
|
|
17
|
+
|
|
18
|
+
### 🔒 **Local-First & Private**
|
|
19
|
+
- All embeddings generated locally (no cloud APIs)
|
|
20
|
+
- Data stored in local LanceDB databases
|
|
21
|
+
- Complete privacy and control over your memories
|
|
22
|
+
|
|
23
|
+
### 🎯 **Intelligent Semantic Search**
|
|
24
|
+
- Vector similarity with multi-factor scoring
|
|
25
|
+
- Considers relevance, recency, priority, and usage frequency
|
|
26
|
+
- Context-aware retrieval based on conversation flow
|
|
27
|
+
|
|
28
|
+
### 📊 **Smart Memory Storage**
|
|
29
|
+
- Stores memories in `~/.local/share/vector-memory-mcp/memories.db`
|
|
30
|
+
- Fast LanceDB-based storage with vector search capabilities
|
|
31
|
+
- Memories persist across sessions and projects
|
|
32
|
+
|
|
33
|
+
### ⚡ **High Performance**
|
|
34
|
+
- Sub-100ms search latency for 1000+ memories
|
|
35
|
+
- Efficient storage (<10MB per 1000 memories)
|
|
36
|
+
- CPU-optimized local embeddings (no GPU required)
|
|
37
|
+
|
|
38
|
+
### 🔌 **MCP Native Integration**
|
|
39
|
+
- Works seamlessly with Claude Code
|
|
40
|
+
- Session hooks for automatic context injection
|
|
41
|
+
- Standard MCP protocol (compatible with future clients)
|
|
42
|
+
|
|
43
|
+
### 🛠️ **Developer-Friendly**
|
|
44
|
+
- Zero-configuration setup
|
|
45
|
+
- Built with Bun for maximum performance
|
|
46
|
+
- Simple MCP tools for storing and searching
|
|
47
|
+
- TypeScript for type safety
|
|
48
|
+
|
|
49
|
+
---
|
|
50
|
+
|
|
51
|
+
## 🚀 Quick Start
|
|
52
|
+
|
|
53
|
+
### Prerequisites
|
|
54
|
+
|
|
55
|
+
- [Bun](https://bun.sh/) 1.0+
|
|
56
|
+
- Claude Code or another MCP-compatible client
|
|
57
|
+
|
|
58
|
+
> **Note:** This server requires Bun to run.
|
|
59
|
+
|
|
60
|
+
### Installation
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
# Clone the repository
|
|
64
|
+
git clone https://github.com/AerionDyseti/vector-memory-mcp.git
|
|
65
|
+
cd vector-memory-mcp
|
|
66
|
+
|
|
67
|
+
# Install dependencies
|
|
68
|
+
bun install
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
### Configure Claude Code
|
|
72
|
+
|
|
73
|
+
Add to your `~/.claude/config.json`:
|
|
74
|
+
|
|
75
|
+
```json
|
|
76
|
+
{
|
|
77
|
+
"mcpServers": {
|
|
78
|
+
"memory": {
|
|
79
|
+
"command": "bun",
|
|
80
|
+
"args": ["run", "/absolute/path/to/vector-memory-mcp/src/index.ts"]
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
Replace `/absolute/path/to/` with your actual installation path.
|
|
87
|
+
|
|
88
|
+
### Start Using It
|
|
89
|
+
|
|
90
|
+
That's it! Restart Claude Code and you'll have access to memory tools:
|
|
91
|
+
- `store_memory` - Save information for later recall
|
|
92
|
+
- `search_memories` - Find relevant memories semantically
|
|
93
|
+
- `get_memory` - Retrieve a specific memory by ID
|
|
94
|
+
- `delete_memory` - Remove a memory
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## 📖 Usage
|
|
99
|
+
|
|
100
|
+
### Storing Memories
|
|
101
|
+
|
|
102
|
+
Ask Claude Code to remember things for you:
|
|
103
|
+
|
|
104
|
+
```
|
|
105
|
+
You: "Remember that we use Drizzle ORM for database access"
|
|
106
|
+
Claude: [calls store_memory tool]
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
Or Claude Code can store memories directly:
|
|
110
|
+
```json
|
|
111
|
+
{
|
|
112
|
+
"content": "Use Drizzle ORM for type-safe database access",
|
|
113
|
+
"metadata": {
|
|
114
|
+
"tags": ["architecture", "database"],
|
|
115
|
+
"category": "tooling"
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
### Searching Memories
|
|
121
|
+
|
|
122
|
+
Claude Code automatically searches memories when relevant, or you can ask:
|
|
123
|
+
|
|
124
|
+
```
|
|
125
|
+
You: "What did we decide about the database?"
|
|
126
|
+
Claude: [calls search_memories with query about database decisions]
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
Search parameters:
|
|
130
|
+
```json
|
|
131
|
+
{
|
|
132
|
+
"query": "authentication strategy",
|
|
133
|
+
"limit": 10
|
|
134
|
+
}
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
### Managing Memories
|
|
138
|
+
|
|
139
|
+
Retrieve a specific memory:
|
|
140
|
+
```json
|
|
141
|
+
{
|
|
142
|
+
"id": "memory-id-here"
|
|
143
|
+
}
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
Delete a memory:
|
|
147
|
+
```json
|
|
148
|
+
{
|
|
149
|
+
"id": "memory-id-here"
|
|
150
|
+
}
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
---
|
|
154
|
+
|
|
155
|
+
## 🏗️ Architecture
|
|
156
|
+
|
|
157
|
+
```
|
|
158
|
+
vector-memory-mcp/
|
|
159
|
+
├── src/
|
|
160
|
+
│ ├── index.ts # Entry point
|
|
161
|
+
│ ├── config/ # Configuration management
|
|
162
|
+
│ ├── db/ # Database layer (LanceDB)
|
|
163
|
+
│ ├── services/
|
|
164
|
+
│ │ ├── embeddings.service.ts # Embeddings via @xenova/transformers
|
|
165
|
+
│ │ └── memory.service.ts # Core memory operations
|
|
166
|
+
│ └── mcp/
|
|
167
|
+
│ ├── server.ts # MCP server setup
|
|
168
|
+
│ ├── tools.ts # MCP tool definitions
|
|
169
|
+
│ └── handlers.ts # Tool request handlers
|
|
170
|
+
├── tests/
|
|
171
|
+
│ ├── memory.test.ts
|
|
172
|
+
│ └── embeddings.test.ts
|
|
173
|
+
├── bin/
|
|
174
|
+
│ └── vector-memory-mcp.js # Executable entry point
|
|
175
|
+
└── package.json
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
### Technology Stack
|
|
179
|
+
|
|
180
|
+
- **MCP Framework**: @modelcontextprotocol/sdk (official SDK)
|
|
181
|
+
- **Vector Database**: LanceDB (fast, local, vector search)
|
|
182
|
+
- **Embeddings**: @xenova/transformers (Xenova/all-MiniLM-L6-v2, 384 dimensions)
|
|
183
|
+
- **Language**: TypeScript 5.0+
|
|
184
|
+
- **Runtime**: Bun 1.0+
|
|
185
|
+
- **Testing**: Bun test
|
|
186
|
+
|
|
187
|
+
---
|
|
188
|
+
|
|
189
|
+
## 🎨 How It Works
|
|
190
|
+
|
|
191
|
+
### 1. Memory Storage
|
|
192
|
+
|
|
193
|
+
```
|
|
194
|
+
Claude Code calls store_memory tool
|
|
195
|
+
↓
|
|
196
|
+
Content → @xenova/transformers → 384d vector
|
|
197
|
+
↓
|
|
198
|
+
Store in LanceDB with metadata
|
|
199
|
+
↓
|
|
200
|
+
~/.local/share/vector-memory-mcp/memories.db
|
|
201
|
+
```
|
|
202
|
+
|
|
203
|
+
### 2. Memory Retrieval
|
|
204
|
+
|
|
205
|
+
```
|
|
206
|
+
Claude Code calls search_memories
|
|
207
|
+
↓
|
|
208
|
+
Query → @xenova/transformers → 384d vector
|
|
209
|
+
↓
|
|
210
|
+
Vector search in LanceDB
|
|
211
|
+
↓
|
|
212
|
+
Vector similarity scoring
|
|
213
|
+
↓
|
|
214
|
+
Return top N relevant memories
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
---
|
|
218
|
+
|
|
219
|
+
## 🔧 Configuration
|
|
220
|
+
|
|
221
|
+
The server uses environment variables for configuration:
|
|
222
|
+
|
|
223
|
+
- `VECTOR_MEMORY_DB_PATH` - Custom database path (default: `~/.local/share/vector-memory-mcp/memories.db`)
|
|
224
|
+
- `VECTOR_MEMORY_MODEL` - Embedding model to use (default: `Xenova/all-MiniLM-L6-v2`)
|
|
225
|
+
|
|
226
|
+
Example:
|
|
227
|
+
```bash
|
|
228
|
+
export VECTOR_MEMORY_DB_PATH="/path/to/custom/memories.db"
|
|
229
|
+
export VECTOR_MEMORY_MODEL="Xenova/all-MiniLM-L6-v2"
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
Or in your Claude Code config:
|
|
233
|
+
```json
|
|
234
|
+
{
|
|
235
|
+
"mcpServers": {
|
|
236
|
+
"memory": {
|
|
237
|
+
"command": "vector-memory-mcp",
|
|
238
|
+
"env": {
|
|
239
|
+
"VECTOR_MEMORY_DB_PATH": "/custom/path/memories.db"
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
---
|
|
247
|
+
|
|
248
|
+
## 🧪 Development
|
|
249
|
+
|
|
250
|
+
### Running Tests
|
|
251
|
+
|
|
252
|
+
```bash
|
|
253
|
+
# Run all tests
|
|
254
|
+
bun test
|
|
255
|
+
|
|
256
|
+
# Run with coverage
|
|
257
|
+
bun test --coverage
|
|
258
|
+
|
|
259
|
+
# Type checking
|
|
260
|
+
bun run typecheck
|
|
261
|
+
```
|
|
262
|
+
|
|
263
|
+
### Development Mode
|
|
264
|
+
|
|
265
|
+
```bash
|
|
266
|
+
# Watch mode - auto-restart on file changes
|
|
267
|
+
bun run dev
|
|
268
|
+
|
|
269
|
+
# Run directly without building
|
|
270
|
+
bun run src/index.ts
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
### Building
|
|
274
|
+
|
|
275
|
+
```bash
|
|
276
|
+
# Build for production
|
|
277
|
+
bun run build
|
|
278
|
+
|
|
279
|
+
# Output will be in dist/
|
|
280
|
+
```
|
|
281
|
+
|
|
282
|
+
---
|
|
283
|
+
|
|
284
|
+
## 🗺️ Roadmap
|
|
285
|
+
|
|
286
|
+
### ✅ Phase 1: Foundation (Current)
|
|
287
|
+
- ✅ Core database with LanceDB
|
|
288
|
+
- ✅ Embedding generation with @xenova/transformers
|
|
289
|
+
- ✅ Basic MCP tools (store, search, get, delete)
|
|
290
|
+
- ✅ TypeScript implementation with Drizzle ORM
|
|
291
|
+
|
|
292
|
+
### 🚧 Phase 2: Enhanced Search & Scoring
|
|
293
|
+
- Multi-factor scoring algorithm (similarity, recency, priority, usage frequency)
|
|
294
|
+
- Configurable scoring weights
|
|
295
|
+
- Priority levels for memories
|
|
296
|
+
- Usage tracking and frequency-based ranking
|
|
297
|
+
- Metadata filtering and advanced tagging
|
|
298
|
+
|
|
299
|
+
### 📋 Phase 3: Dual-Level Memory System
|
|
300
|
+
- Project-specific memories (`.memory/db` in repo)
|
|
301
|
+
- Global memories (`~/.local/share/vector-memory-mcp/`)
|
|
302
|
+
- Automatic precedence handling (project overrides global)
|
|
303
|
+
- Project detection and context switching
|
|
304
|
+
|
|
305
|
+
### 🎯 Phase 4: Smart Automation
|
|
306
|
+
- Auto-detect architectural decisions
|
|
307
|
+
- Capture bug fixes and solutions automatically
|
|
308
|
+
- Generate session-end summaries
|
|
309
|
+
- Natural language trigger detection (85%+ accuracy)
|
|
310
|
+
- Continuous conversation monitoring
|
|
311
|
+
|
|
312
|
+
### 🔮 Phase 5: Advanced Features
|
|
313
|
+
- Memory deduplication with similarity threshold
|
|
314
|
+
- Batch operations (import/export)
|
|
315
|
+
- Markdown import/export
|
|
316
|
+
- Memory clustering and visualization
|
|
317
|
+
- Cross-project insights
|
|
318
|
+
- Multi-modal memories (images, diagrams)
|
|
319
|
+
- Session hooks for automatic context injection
|
|
320
|
+
- Multi-CLI support (Cursor, Windsurf, etc.)
|
|
321
|
+
- Smart priority suggestions
|
|
322
|
+
|
|
323
|
+
---
|
|
324
|
+
|
|
325
|
+
## 🤝 Contributing
|
|
326
|
+
|
|
327
|
+
Contributions are welcome! This project is in active development.
|
|
328
|
+
|
|
329
|
+
### Areas We'd Love Help With:
|
|
330
|
+
- Testing and bug reports
|
|
331
|
+
- Documentation improvements
|
|
332
|
+
- Performance optimizations
|
|
333
|
+
- New feature ideas
|
|
334
|
+
|
|
335
|
+
See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines *(coming soon)*.
|
|
336
|
+
|
|
337
|
+
---
|
|
338
|
+
|
|
339
|
+
## 📄 License
|
|
340
|
+
|
|
341
|
+
MIT License - see [LICENSE](LICENSE) for details.
|
|
342
|
+
|
|
343
|
+
---
|
|
344
|
+
|
|
345
|
+
## 🙏 Acknowledgments
|
|
346
|
+
|
|
347
|
+
- Built with [@modelcontextprotocol/sdk](https://github.com/modelcontextprotocol/typescript-sdk) - Official MCP TypeScript SDK
|
|
348
|
+
- Uses [LanceDB](https://lancedb.com/) for fast, local vector search
|
|
349
|
+
- Powered by [@xenova/transformers](https://github.com/xenova/transformers.js) for local embeddings
|
|
350
|
+
- Database layer via [Drizzle ORM](https://orm.drizzle.team/)
|
|
351
|
+
- Inspired by [doobidoo's mcp-memory-service](https://github.com/doobidoo/mcp-memory-service)
|
|
352
|
+
|
|
353
|
+
---
|
|
354
|
+
|
|
355
|
+
## 🔗 Related Projects
|
|
356
|
+
|
|
357
|
+
- [Model Context Protocol](https://modelcontextprotocol.io) - Official MCP specification
|
|
358
|
+
- [Claude Code](https://claude.ai/code) - AI coding assistant from Anthropic
|
|
359
|
+
- [LanceDB](https://lancedb.com/) - Fast, local vector search
|
|
360
|
+
- [Transformers.js](https://huggingface.co/docs/transformers.js) - Run transformers in JavaScript
|
|
361
|
+
|
|
362
|
+
---
|
|
363
|
+
|
|
364
|
+
## 💬 Support
|
|
365
|
+
|
|
366
|
+
- **Issues**: [GitHub Issues](https://github.com/AerionDyseti/vector-memory-mcp/issues)
|
|
367
|
+
- **Discussions**: [GitHub Discussions](https://github.com/AerionDyseti/vector-memory-mcp/discussions)
|
|
368
|
+
- **Documentation**: Check the `docs/` directory
|
|
369
|
+
|
|
370
|
+
---
|
|
371
|
+
|
|
372
|
+
## ⚡ Quick Examples
|
|
373
|
+
|
|
374
|
+
### Example 1: Storing a Decision
|
|
375
|
+
|
|
376
|
+
```
|
|
377
|
+
You: "Remember that we decided to use Drizzle ORM for type-safe database access"
|
|
378
|
+
Claude: I'll store that for you.
|
|
379
|
+
[Calls store_memory tool with content and metadata]
|
|
380
|
+
✓ Memory stored successfully
|
|
381
|
+
```
|
|
382
|
+
|
|
383
|
+
### Example 2: Searching Memories
|
|
384
|
+
|
|
385
|
+
```
|
|
386
|
+
You: "What did we decide about database tooling?"
|
|
387
|
+
Claude: Let me search for that...
|
|
388
|
+
[Calls search_memories with query about database]
|
|
389
|
+
Found: "Use Drizzle ORM for type-safe database access"
|
|
390
|
+
|
|
391
|
+
Based on our previous decision, we're using Drizzle ORM...
|
|
392
|
+
```
|
|
393
|
+
|
|
394
|
+
### Example 3: Managing Memories
|
|
395
|
+
|
|
396
|
+
```
|
|
397
|
+
You: "Show me what you remember about authentication"
|
|
398
|
+
Claude: [Searches for authentication-related memories]
|
|
399
|
+
Found 3 memories:
|
|
400
|
+
1. "Use JWT tokens for API authentication"
|
|
401
|
+
2. "Store refresh tokens in httpOnly cookies"
|
|
402
|
+
3. "Implement rate limiting on auth endpoints"
|
|
403
|
+
```
|
|
404
|
+
|
|
405
|
+
---
|
|
406
|
+
|
|
407
|
+
<div align="center">
|
|
408
|
+
|
|
409
|
+
**[⬆ Back to Top](#vector-memory-mcp-server)**
|
|
410
|
+
|
|
411
|
+
Made with ❤️ for developers who value context continuity
|
|
412
|
+
|
|
413
|
+
</div>
|
package/package.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@aeriondyseti/vector-memory-mcp",
|
|
3
|
+
"version": "0.2.0",
|
|
4
|
+
"description": "A zero-configuration RAG memory server for MCP clients",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "src/index.ts",
|
|
7
|
+
"bin": {
|
|
8
|
+
"vector-memory-mcp": "src/index.ts"
|
|
9
|
+
},
|
|
10
|
+
"files": [
|
|
11
|
+
"src",
|
|
12
|
+
"README.md",
|
|
13
|
+
"LICENSE"
|
|
14
|
+
],
|
|
15
|
+
"repository": {
|
|
16
|
+
"type": "git",
|
|
17
|
+
"url": "git+https://github.com/AerionDyseti/vector-memory-mcp.git"
|
|
18
|
+
},
|
|
19
|
+
"author": "AerionDyseti",
|
|
20
|
+
"bugs": {
|
|
21
|
+
"url": "https://github.com/AerionDyseti/vector-memory-mcp/issues"
|
|
22
|
+
},
|
|
23
|
+
"homepage": "https://github.com/AerionDyseti/vector-memory-mcp#readme",
|
|
24
|
+
"scripts": {
|
|
25
|
+
"start": "bun run src/index.ts",
|
|
26
|
+
"dev": "bun --watch run src/index.ts",
|
|
27
|
+
"typecheck": "tsc --noEmit",
|
|
28
|
+
"test": "bun test",
|
|
29
|
+
"test:coverage": "bun test --coverage"
|
|
30
|
+
},
|
|
31
|
+
"keywords": ["mcp", "memory", "rag", "embeddings", "lancedb"],
|
|
32
|
+
"license": "MIT",
|
|
33
|
+
"dependencies": {
|
|
34
|
+
"@lancedb/lancedb": "^0.22.3",
|
|
35
|
+
"@modelcontextprotocol/sdk": "^1.0.0",
|
|
36
|
+
"@xenova/transformers": "^2.17.0",
|
|
37
|
+
"apache-arrow": "^21.1.0"
|
|
38
|
+
},
|
|
39
|
+
"devDependencies": {
|
|
40
|
+
"@types/bun": "latest",
|
|
41
|
+
"typescript": "^5.0.0"
|
|
42
|
+
}
|
|
43
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { join } from "path";
|
|
2
|
+
import { homedir } from "os";
|
|
3
|
+
|
|
4
|
+
export interface Config {
|
|
5
|
+
dbPath: string;
|
|
6
|
+
embeddingModel: string;
|
|
7
|
+
embeddingDimension: number;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
const DEFAULT_DB_PATH = join(
|
|
11
|
+
homedir(),
|
|
12
|
+
".local",
|
|
13
|
+
"share",
|
|
14
|
+
"vector-memory-mcp",
|
|
15
|
+
"memories.db"
|
|
16
|
+
);
|
|
17
|
+
|
|
18
|
+
const DEFAULT_EMBEDDING_MODEL = "Xenova/all-MiniLM-L6-v2";
|
|
19
|
+
const DEFAULT_EMBEDDING_DIMENSION = 384;
|
|
20
|
+
|
|
21
|
+
export function loadConfig(): Config {
|
|
22
|
+
return {
|
|
23
|
+
dbPath: process.env.VECTOR_MEMORY_DB_PATH ?? DEFAULT_DB_PATH,
|
|
24
|
+
embeddingModel: process.env.VECTOR_MEMORY_MODEL ?? DEFAULT_EMBEDDING_MODEL,
|
|
25
|
+
embeddingDimension: DEFAULT_EMBEDDING_DIMENSION,
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export const config = loadConfig();
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import * as lancedb from "@lancedb/lancedb";
|
|
2
|
+
import { mkdirSync } from "fs";
|
|
3
|
+
import { dirname } from "path";
|
|
4
|
+
|
|
5
|
+
export async function connectToDatabase(dbPath: string): Promise<lancedb.Connection> {
|
|
6
|
+
// Ensure directory exists
|
|
7
|
+
mkdirSync(dirname(dbPath), { recursive: true });
|
|
8
|
+
|
|
9
|
+
const db = await lancedb.connect(dbPath);
|
|
10
|
+
return db;
|
|
11
|
+
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import * as lancedb from "@lancedb/lancedb";
|
|
2
|
+
import { TABLE_NAME, memorySchema } from "./schema.js";
|
|
3
|
+
import {
|
|
4
|
+
type Memory,
|
|
5
|
+
type VectorRow,
|
|
6
|
+
DELETED_TOMBSTONE,
|
|
7
|
+
} from "../types/memory.js";
|
|
8
|
+
|
|
9
|
+
export class MemoryRepository {
|
|
10
|
+
constructor(private db: lancedb.Connection) {}
|
|
11
|
+
|
|
12
|
+
private async getTable() {
|
|
13
|
+
const names = await this.db.tableNames();
|
|
14
|
+
if (names.includes(TABLE_NAME)) {
|
|
15
|
+
return await this.db.openTable(TABLE_NAME);
|
|
16
|
+
}
|
|
17
|
+
// Create with empty data to initialize schema
|
|
18
|
+
return await this.db.createTable(TABLE_NAME, [], { schema: memorySchema });
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
async insert(memory: Memory): Promise<void> {
|
|
22
|
+
const table = await this.getTable();
|
|
23
|
+
await table.add([
|
|
24
|
+
{
|
|
25
|
+
id: memory.id,
|
|
26
|
+
vector: memory.embedding,
|
|
27
|
+
content: memory.content,
|
|
28
|
+
metadata: JSON.stringify(memory.metadata),
|
|
29
|
+
created_at: memory.createdAt.getTime(),
|
|
30
|
+
updated_at: memory.updatedAt.getTime(),
|
|
31
|
+
superseded_by: memory.supersededBy,
|
|
32
|
+
},
|
|
33
|
+
]);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
async findById(id: string): Promise<Memory | null> {
|
|
37
|
+
const table = await this.getTable();
|
|
38
|
+
const results = await table.query().where(`id = '${id}'`).limit(1).toArray();
|
|
39
|
+
|
|
40
|
+
if (results.length === 0) {
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const row = results[0];
|
|
45
|
+
|
|
46
|
+
// Handle Arrow Vector type conversion
|
|
47
|
+
// LanceDB returns an Arrow Vector object which is iterable but not an array
|
|
48
|
+
const vectorData = row.vector as any;
|
|
49
|
+
const embedding = Array.isArray(vectorData)
|
|
50
|
+
? vectorData
|
|
51
|
+
: Array.from(vectorData) as number[];
|
|
52
|
+
|
|
53
|
+
return {
|
|
54
|
+
id: row.id as string,
|
|
55
|
+
content: row.content as string,
|
|
56
|
+
embedding,
|
|
57
|
+
metadata: JSON.parse(row.metadata as string),
|
|
58
|
+
createdAt: new Date(row.created_at as number),
|
|
59
|
+
updatedAt: new Date(row.updated_at as number),
|
|
60
|
+
supersededBy: row.superseded_by as string | null,
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
async markDeleted(id: string): Promise<boolean> {
|
|
65
|
+
const table = await this.getTable();
|
|
66
|
+
|
|
67
|
+
// Verify existence first to match previous behavior (return false if not found)
|
|
68
|
+
const existing = await table.query().where(`id = '${id}'`).limit(1).toArray();
|
|
69
|
+
if (existing.length === 0) {
|
|
70
|
+
return false;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const now = Date.now();
|
|
74
|
+
await table.update({
|
|
75
|
+
where: `id = '${id}'`,
|
|
76
|
+
values: {
|
|
77
|
+
superseded_by: DELETED_TOMBSTONE,
|
|
78
|
+
updated_at: now,
|
|
79
|
+
},
|
|
80
|
+
});
|
|
81
|
+
|
|
82
|
+
return true;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
async findSimilar(embedding: number[], limit: number): Promise<VectorRow[]> {
|
|
86
|
+
const table = await this.getTable();
|
|
87
|
+
const results = await table.vectorSearch(embedding).limit(limit).toArray();
|
|
88
|
+
|
|
89
|
+
return results.map((r) => ({
|
|
90
|
+
id: r.id as string,
|
|
91
|
+
distance: r._distance as number,
|
|
92
|
+
}));
|
|
93
|
+
}
|
|
94
|
+
}
|
package/src/db/schema.ts
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import {
|
|
2
|
+
Schema,
|
|
3
|
+
Field,
|
|
4
|
+
FixedSizeList,
|
|
5
|
+
Float32,
|
|
6
|
+
Utf8,
|
|
7
|
+
Timestamp,
|
|
8
|
+
TimeUnit,
|
|
9
|
+
} from "apache-arrow";
|
|
10
|
+
|
|
11
|
+
export const TABLE_NAME = "memories";
|
|
12
|
+
|
|
13
|
+
export const memorySchema = new Schema([
|
|
14
|
+
new Field("id", new Utf8(), false),
|
|
15
|
+
new Field(
|
|
16
|
+
"vector",
|
|
17
|
+
new FixedSizeList(384, new Field("item", new Float32())),
|
|
18
|
+
false
|
|
19
|
+
),
|
|
20
|
+
new Field("content", new Utf8(), false),
|
|
21
|
+
new Field("metadata", new Utf8(), false), // JSON string
|
|
22
|
+
new Field(
|
|
23
|
+
"created_at",
|
|
24
|
+
new Timestamp(TimeUnit.MILLISECOND),
|
|
25
|
+
false
|
|
26
|
+
),
|
|
27
|
+
new Field(
|
|
28
|
+
"updated_at",
|
|
29
|
+
new Timestamp(TimeUnit.MILLISECOND),
|
|
30
|
+
false
|
|
31
|
+
),
|
|
32
|
+
new Field("superseded_by", new Utf8(), true), // Nullable
|
|
33
|
+
]);
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { config } from "./config/index.js";
|
|
4
|
+
import { connectToDatabase } from "./db/connection.js";
|
|
5
|
+
import { MemoryRepository } from "./db/memory.repository.js";
|
|
6
|
+
import { EmbeddingsService } from "./services/embeddings.service.js";
|
|
7
|
+
import { MemoryService } from "./services/memory.service.js";
|
|
8
|
+
import { startServer } from "./mcp/server.js";
|
|
9
|
+
|
|
10
|
+
async function main(): Promise<void> {
|
|
11
|
+
// Initialize database
|
|
12
|
+
const db = await connectToDatabase(config.dbPath);
|
|
13
|
+
|
|
14
|
+
// Initialize layers
|
|
15
|
+
const repository = new MemoryRepository(db);
|
|
16
|
+
const embeddings = new EmbeddingsService(config.embeddingModel, config.embeddingDimension);
|
|
17
|
+
const memoryService = new MemoryService(repository, embeddings);
|
|
18
|
+
|
|
19
|
+
// Start MCP server
|
|
20
|
+
await startServer(memoryService);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import type { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
|
|
2
|
+
import type { MemoryService } from "../services/memory.service.js";
|
|
3
|
+
|
|
4
|
+
export async function handleStoreMemory(
|
|
5
|
+
args: Record<string, unknown> | undefined,
|
|
6
|
+
service: MemoryService
|
|
7
|
+
): Promise<CallToolResult> {
|
|
8
|
+
const content = args?.content as string;
|
|
9
|
+
const metadata = (args?.metadata as Record<string, unknown>) ?? {};
|
|
10
|
+
const memory = await service.store(content, metadata);
|
|
11
|
+
|
|
12
|
+
return {
|
|
13
|
+
content: [{ type: "text", text: `Memory stored with ID: ${memory.id}` }],
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export async function handleDeleteMemory(
|
|
18
|
+
args: Record<string, unknown> | undefined,
|
|
19
|
+
service: MemoryService
|
|
20
|
+
): Promise<CallToolResult> {
|
|
21
|
+
const id = args?.id as string;
|
|
22
|
+
const success = await service.delete(id);
|
|
23
|
+
|
|
24
|
+
return {
|
|
25
|
+
content: [
|
|
26
|
+
{
|
|
27
|
+
type: "text",
|
|
28
|
+
text: success
|
|
29
|
+
? `Memory ${id} deleted successfully`
|
|
30
|
+
: `Memory ${id} not found`,
|
|
31
|
+
},
|
|
32
|
+
],
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export async function handleSearchMemories(
|
|
37
|
+
args: Record<string, unknown> | undefined,
|
|
38
|
+
service: MemoryService
|
|
39
|
+
): Promise<CallToolResult> {
|
|
40
|
+
const query = args?.query as string;
|
|
41
|
+
const limit = (args?.limit as number) ?? 10;
|
|
42
|
+
const memories = await service.search(query, limit);
|
|
43
|
+
|
|
44
|
+
if (memories.length === 0) {
|
|
45
|
+
return {
|
|
46
|
+
content: [{ type: "text", text: "No memories found matching your query." }],
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
const results = memories.map((mem) => {
|
|
51
|
+
let result = `ID: ${mem.id}\nContent: ${mem.content}`;
|
|
52
|
+
if (Object.keys(mem.metadata).length > 0) {
|
|
53
|
+
result += `\nMetadata: ${JSON.stringify(mem.metadata)}`;
|
|
54
|
+
}
|
|
55
|
+
return result;
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
return {
|
|
59
|
+
content: [{ type: "text", text: results.join("\n\n---\n\n") }],
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
export async function handleGetMemory(
|
|
64
|
+
args: Record<string, unknown> | undefined,
|
|
65
|
+
service: MemoryService
|
|
66
|
+
): Promise<CallToolResult> {
|
|
67
|
+
const id = args?.id as string;
|
|
68
|
+
const memory = await service.get(id);
|
|
69
|
+
|
|
70
|
+
if (!memory) {
|
|
71
|
+
return {
|
|
72
|
+
content: [{ type: "text", text: `Memory ${id} not found` }],
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
let result = `ID: ${memory.id}\nContent: ${memory.content}`;
|
|
77
|
+
if (Object.keys(memory.metadata).length > 0) {
|
|
78
|
+
result += `\nMetadata: ${JSON.stringify(memory.metadata)}`;
|
|
79
|
+
}
|
|
80
|
+
result += `\nCreated: ${memory.createdAt.toISOString()}`;
|
|
81
|
+
result += `\nUpdated: ${memory.updatedAt.toISOString()}`;
|
|
82
|
+
if (memory.supersededBy) {
|
|
83
|
+
result += `\nSuperseded by: ${memory.supersededBy}`;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return {
|
|
87
|
+
content: [{ type: "text", text: result }],
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
export async function handleToolCall(
|
|
92
|
+
name: string,
|
|
93
|
+
args: Record<string, unknown> | undefined,
|
|
94
|
+
service: MemoryService
|
|
95
|
+
): Promise<CallToolResult> {
|
|
96
|
+
switch (name) {
|
|
97
|
+
case "store_memory":
|
|
98
|
+
return handleStoreMemory(args, service);
|
|
99
|
+
case "delete_memory":
|
|
100
|
+
return handleDeleteMemory(args, service);
|
|
101
|
+
case "search_memories":
|
|
102
|
+
return handleSearchMemories(args, service);
|
|
103
|
+
case "get_memory":
|
|
104
|
+
return handleGetMemory(args, service);
|
|
105
|
+
default:
|
|
106
|
+
return {
|
|
107
|
+
content: [{ type: "text", text: `Unknown tool: ${name}` }],
|
|
108
|
+
isError: true,
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
2
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
3
|
+
import {
|
|
4
|
+
CallToolRequestSchema,
|
|
5
|
+
ListToolsRequestSchema,
|
|
6
|
+
} from "@modelcontextprotocol/sdk/types.js";
|
|
7
|
+
|
|
8
|
+
import { tools } from "./tools.js";
|
|
9
|
+
import { handleToolCall } from "./handlers.js";
|
|
10
|
+
import type { MemoryService } from "../services/memory.service.js";
|
|
11
|
+
|
|
12
|
+
export function createServer(memoryService: MemoryService): Server {
|
|
13
|
+
const server = new Server(
|
|
14
|
+
{ name: "vector-memory-mcp", version: "0.2.0" },
|
|
15
|
+
{ capabilities: { tools: {} } }
|
|
16
|
+
);
|
|
17
|
+
|
|
18
|
+
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
19
|
+
return { tools };
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
23
|
+
const { name, arguments: args } = request.params;
|
|
24
|
+
return handleToolCall(name, args, memoryService);
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
return server;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export async function startServer(memoryService: MemoryService): Promise<void> {
|
|
31
|
+
const server = createServer(memoryService);
|
|
32
|
+
const transport = new StdioServerTransport();
|
|
33
|
+
await server.connect(transport);
|
|
34
|
+
}
|
package/src/mcp/tools.ts
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import type { Tool } from "@modelcontextprotocol/sdk/types.js";
|
|
2
|
+
|
|
3
|
+
export const storeMemoryTool: Tool = {
|
|
4
|
+
name: "store_memory",
|
|
5
|
+
description: "Store a new memory. Use this to save information for later recall.",
|
|
6
|
+
inputSchema: {
|
|
7
|
+
type: "object",
|
|
8
|
+
properties: {
|
|
9
|
+
content: {
|
|
10
|
+
type: "string",
|
|
11
|
+
description: "The text content to store as a memory",
|
|
12
|
+
},
|
|
13
|
+
metadata: {
|
|
14
|
+
type: "object",
|
|
15
|
+
description: "Optional key-value metadata to attach to the memory",
|
|
16
|
+
additionalProperties: true,
|
|
17
|
+
},
|
|
18
|
+
},
|
|
19
|
+
required: ["content"],
|
|
20
|
+
},
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
export const deleteMemoryTool: Tool = {
|
|
24
|
+
name: "delete_memory",
|
|
25
|
+
description:
|
|
26
|
+
"Delete a memory by its ID. The memory will be soft-deleted and won't appear in search results.",
|
|
27
|
+
inputSchema: {
|
|
28
|
+
type: "object",
|
|
29
|
+
properties: {
|
|
30
|
+
id: {
|
|
31
|
+
type: "string",
|
|
32
|
+
description: "The ID of the memory to delete",
|
|
33
|
+
},
|
|
34
|
+
},
|
|
35
|
+
required: ["id"],
|
|
36
|
+
},
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
export const searchMemoriesTool: Tool = {
|
|
40
|
+
name: "search_memories",
|
|
41
|
+
description:
|
|
42
|
+
"Search for memories using semantic similarity. Returns the most relevant memories for the given query.",
|
|
43
|
+
inputSchema: {
|
|
44
|
+
type: "object",
|
|
45
|
+
properties: {
|
|
46
|
+
query: {
|
|
47
|
+
type: "string",
|
|
48
|
+
description: "The search query to find relevant memories",
|
|
49
|
+
},
|
|
50
|
+
limit: {
|
|
51
|
+
type: "integer",
|
|
52
|
+
description: "Maximum number of results to return (default: 10)",
|
|
53
|
+
default: 10,
|
|
54
|
+
},
|
|
55
|
+
},
|
|
56
|
+
required: ["query"],
|
|
57
|
+
},
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
export const getMemoryTool: Tool = {
|
|
61
|
+
name: "get_memory",
|
|
62
|
+
description: "Retrieve a specific memory by its ID.",
|
|
63
|
+
inputSchema: {
|
|
64
|
+
type: "object",
|
|
65
|
+
properties: {
|
|
66
|
+
id: {
|
|
67
|
+
type: "string",
|
|
68
|
+
description: "The ID of the memory to retrieve",
|
|
69
|
+
},
|
|
70
|
+
},
|
|
71
|
+
required: ["id"],
|
|
72
|
+
},
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
export const tools: Tool[] = [
|
|
76
|
+
storeMemoryTool,
|
|
77
|
+
deleteMemoryTool,
|
|
78
|
+
searchMemoriesTool,
|
|
79
|
+
getMemoryTool,
|
|
80
|
+
];
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { pipeline, type FeatureExtractionPipeline } from "@xenova/transformers";
|
|
2
|
+
|
|
3
|
+
export class EmbeddingsService {
|
|
4
|
+
private modelName: string;
|
|
5
|
+
private extractor: FeatureExtractionPipeline | null = null;
|
|
6
|
+
private initPromise: Promise<FeatureExtractionPipeline> | null = null;
|
|
7
|
+
private _dimension: number;
|
|
8
|
+
|
|
9
|
+
constructor(modelName: string, dimension: number) {
|
|
10
|
+
this.modelName = modelName;
|
|
11
|
+
this._dimension = dimension;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
get dimension(): number {
|
|
15
|
+
return this._dimension;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
private async getExtractor(): Promise<FeatureExtractionPipeline> {
|
|
19
|
+
if (this.extractor) {
|
|
20
|
+
return this.extractor;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
if (!this.initPromise) {
|
|
24
|
+
this.initPromise = pipeline("feature-extraction", this.modelName, {
|
|
25
|
+
quantized: true,
|
|
26
|
+
}) as Promise<FeatureExtractionPipeline>;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
this.extractor = await this.initPromise;
|
|
30
|
+
return this.extractor;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
async embed(text: string): Promise<number[]> {
|
|
34
|
+
const extractor = await this.getExtractor();
|
|
35
|
+
const output = await extractor(text, { pooling: "mean", normalize: true });
|
|
36
|
+
return Array.from(output.data as Float32Array);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async embedBatch(texts: string[]): Promise<number[][]> {
|
|
40
|
+
const results: number[][] = [];
|
|
41
|
+
for (const text of texts) {
|
|
42
|
+
results.push(await this.embed(text));
|
|
43
|
+
}
|
|
44
|
+
return results;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import { randomUUID } from "crypto";
|
|
2
|
+
import type { Memory } from "../types/memory.js";
|
|
3
|
+
import { DELETED_TOMBSTONE, isSuperseded } from "../types/memory.js";
|
|
4
|
+
import type { MemoryRepository } from "../db/memory.repository.js";
|
|
5
|
+
import type { EmbeddingsService } from "./embeddings.service.js";
|
|
6
|
+
|
|
7
|
+
export class MemoryService {
|
|
8
|
+
constructor(
|
|
9
|
+
private repository: MemoryRepository,
|
|
10
|
+
private embeddings: EmbeddingsService
|
|
11
|
+
) {}
|
|
12
|
+
|
|
13
|
+
async store(content: string, metadata: Record<string, unknown> = {}): Promise<Memory> {
|
|
14
|
+
const id = randomUUID();
|
|
15
|
+
const now = new Date();
|
|
16
|
+
const embedding = await this.embeddings.embed(content);
|
|
17
|
+
|
|
18
|
+
const memory: Memory = {
|
|
19
|
+
id,
|
|
20
|
+
content,
|
|
21
|
+
embedding,
|
|
22
|
+
metadata,
|
|
23
|
+
createdAt: now,
|
|
24
|
+
updatedAt: now,
|
|
25
|
+
supersededBy: null,
|
|
26
|
+
};
|
|
27
|
+
|
|
28
|
+
await this.repository.insert(memory);
|
|
29
|
+
return memory;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
async get(id: string): Promise<Memory | null> {
|
|
33
|
+
return await this.repository.findById(id);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
async delete(id: string): Promise<boolean> {
|
|
37
|
+
return await this.repository.markDeleted(id);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
async search(query: string, limit: number = 10): Promise<Memory[]> {
|
|
41
|
+
const queryEmbedding = await this.embeddings.embed(query);
|
|
42
|
+
const fetchLimit = limit * 3;
|
|
43
|
+
|
|
44
|
+
const rows = await this.repository.findSimilar(queryEmbedding, fetchLimit);
|
|
45
|
+
|
|
46
|
+
const results: Memory[] = [];
|
|
47
|
+
const seenIds = new Set<string>();
|
|
48
|
+
|
|
49
|
+
for (const row of rows) {
|
|
50
|
+
let memory = await this.repository.findById(row.id);
|
|
51
|
+
|
|
52
|
+
if (!memory) {
|
|
53
|
+
continue;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
if (isSuperseded(memory)) {
|
|
57
|
+
memory = await this.followSupersessionChain(row.id);
|
|
58
|
+
if (!memory) {
|
|
59
|
+
continue;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if (seenIds.has(memory.id)) {
|
|
64
|
+
continue;
|
|
65
|
+
}
|
|
66
|
+
seenIds.add(memory.id);
|
|
67
|
+
|
|
68
|
+
results.push(memory);
|
|
69
|
+
if (results.length >= limit) {
|
|
70
|
+
break;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
return results;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
private async followSupersessionChain(memoryId: string): Promise<Memory | null> {
|
|
78
|
+
const visited = new Set<string>();
|
|
79
|
+
let currentId: string | null = memoryId;
|
|
80
|
+
|
|
81
|
+
while (currentId && !visited.has(currentId)) {
|
|
82
|
+
visited.add(currentId);
|
|
83
|
+
const memory = await this.repository.findById(currentId);
|
|
84
|
+
|
|
85
|
+
if (!memory) {
|
|
86
|
+
return null;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
if (memory.supersededBy === null) {
|
|
90
|
+
return memory;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if (memory.supersededBy === DELETED_TOMBSTONE) {
|
|
94
|
+
return null;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
currentId = memory.supersededBy;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
return null;
|
|
101
|
+
}
|
|
102
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
export const DELETED_TOMBSTONE = "DELETED";
|
|
2
|
+
|
|
3
|
+
export interface Memory {
|
|
4
|
+
id: string;
|
|
5
|
+
content: string;
|
|
6
|
+
embedding: number[];
|
|
7
|
+
metadata: Record<string, unknown>;
|
|
8
|
+
createdAt: Date;
|
|
9
|
+
updatedAt: Date;
|
|
10
|
+
supersededBy: string | null;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
export interface VectorRow {
|
|
14
|
+
id: string;
|
|
15
|
+
distance: number;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export function isDeleted(memory: Memory): boolean {
|
|
19
|
+
return memory.supersededBy === DELETED_TOMBSTONE;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export function isSuperseded(memory: Memory): boolean {
|
|
23
|
+
return memory.supersededBy !== null;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export function memoryToDict(memory: Memory): Record<string, unknown> {
|
|
27
|
+
return {
|
|
28
|
+
id: memory.id,
|
|
29
|
+
content: memory.content,
|
|
30
|
+
metadata: memory.metadata,
|
|
31
|
+
createdAt: memory.createdAt.toISOString(),
|
|
32
|
+
updatedAt: memory.updatedAt.toISOString(),
|
|
33
|
+
supersededBy: memory.supersededBy,
|
|
34
|
+
};
|
|
35
|
+
}
|