@aeriondyseti/vector-memory-mcp 2.3.0 → 2.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aeriondyseti/vector-memory-mcp",
3
- "version": "2.3.0",
3
+ "version": "2.4.0",
4
4
  "description": "A zero-configuration RAG memory server for MCP clients",
5
5
  "type": "module",
6
6
  "main": "server/index.ts",
@@ -9,7 +9,6 @@
9
9
  },
10
10
  "files": [
11
11
  "server",
12
- "scripts",
13
12
  "README.md",
14
13
  "LICENSE"
15
14
  ],
@@ -31,6 +30,7 @@
31
30
  "test:quick": "bun test",
32
31
  "test:coverage": "bun test --preload ./tests/preload.ts --coverage",
33
32
  "benchmark": "bun test tests/benchmark.test.ts --preload ./tests/preload.ts",
33
+ "benchmark:update": "bun run scripts/update-benchmarks.ts",
34
34
  "test:preload": "bun run tests/preload.ts",
35
35
  "smoke": "bun run scripts/smoke-test.ts",
36
36
  "warmup": "bun run scripts/warmup.ts",
@@ -47,18 +47,18 @@
47
47
  ],
48
48
  "license": "MIT",
49
49
  "dependencies": {
50
- "@huggingface/transformers": "^3.8.0",
50
+ "@huggingface/tokenizers": "^0.1.3",
51
51
  "@lancedb/lancedb": "^0.26.2",
52
52
  "@modelcontextprotocol/sdk": "^1.0.0",
53
53
  "arg": "^5.0.2",
54
- "hono": "^4.11.3"
54
+ "hono": "^4.11.3",
55
+ "onnxruntime-node": "^1.21.0"
55
56
  },
56
57
  "devDependencies": {
57
58
  "@types/bun": "latest",
58
59
  "typescript": "^5.0.0"
59
60
  },
60
61
  "trustedDependencies": [
61
- "protobufjs",
62
- "sharp"
62
+ "protobufjs"
63
63
  ]
64
64
  }
@@ -1,9 +1,17 @@
1
- import { pipeline, type FeatureExtractionPipeline } from "@huggingface/transformers";
1
+ import * as ort from "onnxruntime-node";
2
+ import { Tokenizer } from "@huggingface/tokenizers";
3
+ import { join, dirname } from "path";
4
+ import { mkdir } from "fs/promises";
5
+ import { existsSync } from "fs";
6
+
7
+ const HF_CDN = "https://huggingface.co";
8
+ const MAX_SEQ_LENGTH = 512;
2
9
 
3
10
  export class EmbeddingsService {
4
11
  private modelName: string;
5
- private extractor: FeatureExtractionPipeline | null = null;
6
- private initPromise: Promise<FeatureExtractionPipeline> | null = null;
12
+ private session: ort.InferenceSession | null = null;
13
+ private tokenizer: Tokenizer | null = null;
14
+ private initPromise: Promise<void> | null = null;
7
15
  private _dimension: number;
8
16
 
9
17
  constructor(modelName: string, dimension: number) {
@@ -15,27 +23,79 @@ export class EmbeddingsService {
15
23
  return this._dimension;
16
24
  }
17
25
 
18
- private async getExtractor(): Promise<FeatureExtractionPipeline> {
19
- if (this.extractor) {
20
- return this.extractor;
21
- }
26
+ get isReady(): boolean {
27
+ return this.session !== null;
28
+ }
22
29
 
30
+ async warmup(): Promise<void> {
31
+ await this.initialize();
32
+ }
33
+
34
+ private async initialize(): Promise<void> {
35
+ if (this.session) return;
23
36
  if (!this.initPromise) {
24
- this.initPromise = pipeline(
25
- "feature-extraction",
26
- this.modelName,
27
- { dtype: "fp32" } as any
28
- ) as Promise<FeatureExtractionPipeline>;
37
+ this.initPromise = this._init();
29
38
  }
39
+ await this.initPromise;
40
+ }
41
+
42
+ private get cacheDir(): string {
43
+ const packageRoot = join(dirname(Bun.main), "..");
44
+ return join(packageRoot, ".cache", "models", this.modelName);
45
+ }
46
+
47
+ private async downloadIfMissing(fileName: string): Promise<string> {
48
+ const filePath = join(this.cacheDir, fileName);
49
+ if (existsSync(filePath)) return filePath;
50
+
51
+ const url = `${HF_CDN}/${this.modelName}/resolve/main/${fileName}`;
52
+ await mkdir(dirname(filePath), { recursive: true });
53
+ const response = await fetch(url);
54
+ if (!response.ok) throw new Error(`Failed to download ${url}: ${response.status}`);
55
+ const buffer = await response.arrayBuffer();
56
+ await Bun.write(filePath, buffer);
57
+ return filePath;
58
+ }
59
+
60
+ private async _init(): Promise<void> {
61
+ const modelPath = await this.downloadIfMissing("onnx/model.onnx");
62
+ const tokenizerJsonPath = await this.downloadIfMissing("tokenizer.json");
63
+ const tokenizerConfigPath = await this.downloadIfMissing("tokenizer_config.json");
64
+
65
+ this.session = await ort.InferenceSession.create(modelPath, {
66
+ executionProviders: ["cpu"],
67
+ });
30
68
 
31
- this.extractor = await this.initPromise;
32
- return this.extractor;
69
+ const tokenizerJson = await Bun.file(tokenizerJsonPath).json();
70
+ const tokenizerConfig = await Bun.file(tokenizerConfigPath).json();
71
+ this.tokenizer = new Tokenizer(tokenizerJson, tokenizerConfig);
33
72
  }
34
73
 
35
74
  async embed(text: string): Promise<number[]> {
36
- const extractor = await this.getExtractor();
37
- const output = await extractor(text, { pooling: "mean", normalize: true });
38
- return Array.from(output.data as Float32Array);
75
+ await this.initialize();
76
+
77
+ const encoded = this.tokenizer!.encode(text);
78
+
79
+ // Truncate to model's max sequence length
80
+ const seqLen = Math.min(encoded.ids.length, MAX_SEQ_LENGTH);
81
+ const ids = encoded.ids.slice(0, seqLen);
82
+ const mask = encoded.attention_mask.slice(0, seqLen);
83
+
84
+ const inputIds = BigInt64Array.from(ids.map(BigInt));
85
+ const attentionMask = BigInt64Array.from(mask.map(BigInt));
86
+ const tokenTypeIds = new BigInt64Array(seqLen); // zeros for single-sequence input
87
+
88
+ const feeds: Record<string, ort.Tensor> = {
89
+ input_ids: new ort.Tensor("int64", inputIds, [1, seqLen]),
90
+ attention_mask: new ort.Tensor("int64", attentionMask, [1, seqLen]),
91
+ token_type_ids: new ort.Tensor("int64", tokenTypeIds, [1, seqLen]),
92
+ };
93
+
94
+ const output = await this.session!.run(feeds);
95
+ const lastHidden = output["last_hidden_state"];
96
+
97
+ const pooled = this.meanPool(lastHidden.data as Float32Array, mask, seqLen);
98
+ return this.normalize(pooled);
39
99
  }
40
100
 
41
101
  async embedBatch(texts: string[]): Promise<number[][]> {
@@ -45,4 +105,29 @@ export class EmbeddingsService {
45
105
  }
46
106
  return results;
47
107
  }
108
+
109
+ private meanPool(data: Float32Array, mask: number[], seqLen: number): number[] {
110
+ const dim = this._dimension;
111
+ const pooled = new Array(dim).fill(0);
112
+ let maskSum = 0;
113
+ for (let t = 0; t < seqLen; t++) {
114
+ if (mask[t]) {
115
+ maskSum += 1;
116
+ for (let d = 0; d < dim; d++) {
117
+ pooled[d] += data[t * dim + d];
118
+ }
119
+ }
120
+ }
121
+ for (let d = 0; d < dim; d++) {
122
+ pooled[d] /= maskSum;
123
+ }
124
+ return pooled;
125
+ }
126
+
127
+ private normalize(vec: number[]): number[] {
128
+ let norm = 0;
129
+ for (const v of vec) norm += v * v;
130
+ norm = Math.sqrt(norm);
131
+ return vec.map(v => v / norm);
132
+ }
48
133
  }
@@ -127,8 +127,43 @@ export async function backfillVectors(
127
127
  db: Database,
128
128
  embeddings: EmbeddingsService,
129
129
  ): Promise<void> {
130
+ // Fast sentinel check: skip the LEFT JOIN queries entirely when backfill is done
131
+ const sentinel = db
132
+ .prepare("SELECT 1 FROM memories_vec LIMIT 1")
133
+ .get();
134
+ const memoriesExist = db.prepare("SELECT 1 FROM memories LIMIT 1").get();
135
+ const convosExist = db.prepare("SELECT 1 FROM conversation_history LIMIT 1").get();
136
+
137
+ // If vec tables have data and source tables have data, backfill is likely complete.
138
+ // Only run the expensive LEFT JOIN when there's reason to suspect gaps.
139
+ const convoSentinel = db
140
+ .prepare("SELECT 1 FROM conversation_history_vec LIMIT 1")
141
+ .get();
142
+ const mayNeedMemoryBackfill = memoriesExist && !sentinel;
143
+ const mayNeedConvoBackfill = convosExist && !convoSentinel;
144
+
145
+ // If both vec tables are populated, do a quick count check to confirm
146
+ if (!mayNeedMemoryBackfill && !mayNeedConvoBackfill) {
147
+ if (memoriesExist) {
148
+ const gap = db.prepare(
149
+ `SELECT 1 FROM memories m LEFT JOIN memories_vec v ON m.id = v.id
150
+ WHERE v.id IS NULL OR length(v.vector) = 0 LIMIT 1`,
151
+ ).get();
152
+ if (!gap && convosExist) {
153
+ const convoGap = db.prepare(
154
+ `SELECT 1 FROM conversation_history c LEFT JOIN conversation_history_vec v ON c.id = v.id
155
+ WHERE v.id IS NULL OR length(v.vector) = 0 LIMIT 1`,
156
+ ).get();
157
+ if (!convoGap) return;
158
+ } else if (!gap && !convosExist) {
159
+ return;
160
+ }
161
+ } else {
162
+ return; // No data at all
163
+ }
164
+ }
165
+
130
166
  // ── Memories ──────────────────────────────────────────────────────
131
- // Catch both missing rows (v.id IS NULL) and corrupt 0-byte BLOBs
132
167
  const missingMemories = db
133
168
  .prepare(
134
169
  `SELECT m.id, m.content, json_extract(m.metadata, '$.type') AS type
@@ -151,14 +186,27 @@ export async function backfillVectors(
151
186
  new Array(embeddings.dimension).fill(0),
152
187
  );
153
188
 
154
- for (const row of missingMemories) {
155
- // Waypoints use a zero vector (not semantically searched)
156
- const blob =
157
- row.type === "waypoint"
158
- ? zeroVector
159
- : serializeVector(await embeddings.embed(row.content));
189
+ // Separate waypoints from content that needs embedding
190
+ const toEmbed = missingMemories.filter((r) => r.type !== "waypoint");
191
+ const waypoints = missingMemories.filter((r) => r.type === "waypoint");
192
+
193
+ // Batch embed all non-waypoint content
194
+ const vectors = toEmbed.length > 0
195
+ ? await embeddings.embedBatch(toEmbed.map((r) => r.content))
196
+ : [];
160
197
 
161
- insertVec.run(row.id, blob);
198
+ db.exec("BEGIN");
199
+ try {
200
+ for (const row of waypoints) {
201
+ insertVec.run(row.id, zeroVector);
202
+ }
203
+ for (let i = 0; i < toEmbed.length; i++) {
204
+ insertVec.run(toEmbed[i].id, serializeVector(vectors[i]));
205
+ }
206
+ db.exec("COMMIT");
207
+ } catch (e) {
208
+ db.exec("ROLLBACK");
209
+ throw e;
162
210
  }
163
211
 
164
212
  console.error(
@@ -185,17 +233,27 @@ export async function backfillVectors(
185
233
  "INSERT OR REPLACE INTO conversation_history_vec (id, vector) VALUES (?, ?)",
186
234
  );
187
235
 
188
- for (let i = 0; i < missingConvos.length; i++) {
189
- const row = missingConvos[i];
190
- const vec = serializeVector(await embeddings.embed(row.content));
191
- insertConvoVec.run(row.id, vec);
236
+ // Batch embed in chunks of 32
237
+ const BATCH_SIZE = 32;
238
+ db.exec("BEGIN");
239
+ try {
240
+ for (let i = 0; i < missingConvos.length; i += BATCH_SIZE) {
241
+ const batch = missingConvos.slice(i, i + BATCH_SIZE);
242
+ const vecs = await embeddings.embedBatch(batch.map((r) => r.content));
243
+ for (let j = 0; j < batch.length; j++) {
244
+ insertConvoVec.run(batch[j].id, serializeVector(vecs[j]));
245
+ }
192
246
 
193
- // Log progress every 100 chunks
194
- if ((i + 1) % 100 === 0) {
195
- console.error(
196
- `[vector-memory-mcp] ...${i + 1}/${missingConvos.length} conversation chunks`,
197
- );
247
+ if ((i + BATCH_SIZE) % 100 < BATCH_SIZE) {
248
+ console.error(
249
+ `[vector-memory-mcp] ...${Math.min(i + BATCH_SIZE, missingConvos.length)}/${missingConvos.length} conversation chunks`,
250
+ );
251
+ }
198
252
  }
253
+ db.exec("COMMIT");
254
+ } catch (e) {
255
+ db.exec("ROLLBACK");
256
+ throw e;
199
257
  }
200
258
 
201
259
  console.error(
package/server/index.ts CHANGED
@@ -25,17 +25,15 @@ async function main(): Promise<void> {
25
25
  const overrides = parseCliArgs(args);
26
26
  const config = loadConfig(overrides);
27
27
 
28
- // Initialize database
28
+ // Initialize database and backfill any missing vectors before services start
29
29
  const db = connectToDatabase(config.dbPath);
30
+ const embeddings = new EmbeddingsService(config.embeddingModel, config.embeddingDimension);
31
+ await backfillVectors(db, embeddings);
30
32
 
31
33
  // Initialize layers
32
34
  const repository = new MemoryRepository(db);
33
- const embeddings = new EmbeddingsService(config.embeddingModel, config.embeddingDimension);
34
35
  const memoryService = new MemoryService(repository, embeddings);
35
36
 
36
- // Backfill any missing vectors (e.g. after vec0-to-BLOB migration)
37
- await backfillVectors(db, embeddings);
38
-
39
37
  if (config.pluginMode) {
40
38
  console.error("[vector-memory-mcp] Running in plugin mode");
41
39
  }
@@ -111,10 +111,22 @@ export function createHttpApp(memoryService: MemoryService, config: Config): Hon
111
111
  embeddingDimension: config.embeddingDimension,
112
112
  historyEnabled: config.conversationHistory.enabled,
113
113
  pluginMode: config.pluginMode,
114
+ embeddingReady: memoryService.getEmbeddings().isReady,
114
115
  },
115
116
  });
116
117
  });
117
118
 
119
+ // Warmup endpoint — triggers ONNX model load if not already cached
120
+ app.post("/warmup", async (c) => {
121
+ const embeddings = memoryService.getEmbeddings();
122
+ if (embeddings.isReady) {
123
+ return c.json({ status: "already_warm" });
124
+ }
125
+ const start = Date.now();
126
+ await embeddings.warmup();
127
+ return c.json({ status: "warmed", elapsed: Date.now() - start });
128
+ });
129
+
118
130
  // Search endpoint
119
131
  app.post("/search", async (c) => {
120
132
  try {
@@ -1,181 +0,0 @@
1
- #!/usr/bin/env bun
2
- /**
3
- * Standalone LanceDB data extractor — runs in a child process so that
4
- * @lancedb/lancedb native bindings never coexist with bun:sqlite's
5
- * extension loading in the same process.
6
- *
7
- * Usage: bun scripts/lancedb-extract.ts <lance-db-path>
8
- * Output: JSON on stdout — { memories: Row[], conversations: Row[] }
9
- */
10
-
11
- const source = process.argv[2];
12
- if (!source) {
13
- console.error("Usage: bun scripts/lancedb-extract.ts <lance-db-path>");
14
- process.exit(1);
15
- }
16
-
17
- // Arrow TimeUnit enum → divisor to convert to milliseconds.
18
- // 0=SECOND, 1=MILLISECOND, 2=MICROSECOND, 3=NANOSECOND
19
- // Negative divisor = multiply (seconds → ms needs ×1000).
20
- const TIME_UNIT_TO_MS_DIVISOR: Record<number, bigint> = {
21
- 0: -1000n, // seconds → ms (multiply by 1000)
22
- 1: 1n, // ms → no conversion
23
- 2: 1000n, // μs → ms
24
- 3: 1000000n, // ns → ms
25
- };
26
-
27
- function buildTimestampDivisors(schema: any): Map<string, bigint> {
28
- const map = new Map<string, bigint>();
29
- for (const field of schema.fields) {
30
- if (field.type.typeId === 10) {
31
- map.set(field.name, TIME_UNIT_TO_MS_DIVISOR[field.type.unit] ?? 1n);
32
- }
33
- }
34
- return map;
35
- }
36
-
37
- function columnValue(batch: any, colName: string, rowIdx: number): unknown {
38
- const col = batch.getChild(colName);
39
- if (!col) return undefined;
40
- try {
41
- return col.get(rowIdx);
42
- } catch {
43
- // Arrow's getter can throw on BigInt timestamps exceeding MAX_SAFE_INTEGER;
44
- // fall back to the raw typed array.
45
- let offset = rowIdx;
46
- for (const data of col.data) {
47
- if (offset < data.length) {
48
- return (data.values instanceof BigInt64Array || data.values instanceof BigUint64Array)
49
- ? data.values[offset]
50
- : null;
51
- }
52
- offset -= data.length;
53
- }
54
- return null;
55
- }
56
- }
57
-
58
- function toEpochMs(value: unknown, divisor: bigint = 1n): number {
59
- if (value == null) return Date.now();
60
- if (value instanceof Date) return value.getTime();
61
- if (typeof value === "bigint") {
62
- if (divisor < 0n) return Number(value * -divisor); // seconds → ms
63
- if (divisor === 1n) return Number(value);
64
- return Number(value / divisor);
65
- }
66
- if (typeof value === "number") {
67
- if (divisor < 0n) return value * Number(-divisor);
68
- if (divisor === 1n) return value;
69
- return Math.floor(value / Number(divisor));
70
- }
71
- return Date.now();
72
- }
73
-
74
- function toFloatArray(vec: unknown): number[] {
75
- if (Array.isArray(vec)) return vec;
76
- if (vec instanceof Float32Array) return Array.from(vec);
77
- if (vec && typeof (vec as any).toArray === "function") {
78
- return Array.from((vec as any).toArray());
79
- }
80
- if (ArrayBuffer.isView(vec)) {
81
- const view = vec as DataView;
82
- return Array.from(new Float32Array(view.buffer, view.byteOffset, view.byteLength / 4));
83
- }
84
- return [];
85
- }
86
-
87
- const BATCH_SIZE = 100;
88
- const lancedb = await import("@lancedb/lancedb");
89
- const db = await lancedb.connect(source);
90
- const tableNames = await db.tableNames();
91
- console.error(`Found tables: ${tableNames.join(", ")}`);
92
-
93
- const result: { memories: any[]; conversations: any[] } = {
94
- memories: [],
95
- conversations: [],
96
- };
97
-
98
- if (tableNames.includes("memories")) {
99
- const table = await db.openTable("memories");
100
- const total = await table.countRows();
101
- console.error(`Reading ${total} memories...`);
102
-
103
- // Paginated scan — query().toArrow() without offset/limit returns
104
- // non-deterministic results that can duplicate some rows and skip others.
105
- const schemaSample = await table.query().limit(1).toArrow();
106
- const tsDivisors = buildTimestampDivisors(schemaSample.schema);
107
- const seen = new Map<string, any>();
108
-
109
- for (let offset = 0; offset < total; offset += BATCH_SIZE) {
110
- const arrowTable = await table.query().offset(offset).limit(BATCH_SIZE).toArrow();
111
- for (const batch of arrowTable.batches) {
112
- for (let i = 0; i < batch.numRows; i++) {
113
- const id = columnValue(batch, "id", i) as string;
114
- const content = columnValue(batch, "content", i) as string;
115
- const lastAccessed = columnValue(batch, "last_accessed", i);
116
- const accessedMs = lastAccessed != null ? toEpochMs(lastAccessed, tsDivisors.get("last_accessed")) : null;
117
- // Deduplicate by ID: prefer most recently accessed, then longest content.
118
- const existing = seen.get(id);
119
- if (existing) {
120
- const existingAccess = existing.last_accessed ?? 0;
121
- const newAccess = accessedMs ?? 0;
122
- if (newAccess < existingAccess) continue;
123
- if (newAccess === existingAccess && content.length <= existing.content.length) continue;
124
- }
125
- seen.set(id, {
126
- id,
127
- content,
128
- metadata: columnValue(batch, "metadata", i) ?? "{}",
129
- vector: toFloatArray(columnValue(batch, "vector", i)),
130
- created_at: toEpochMs(columnValue(batch, "created_at", i), tsDivisors.get("created_at")),
131
- updated_at: toEpochMs(columnValue(batch, "updated_at", i), tsDivisors.get("updated_at")),
132
- last_accessed: accessedMs,
133
- superseded_by: columnValue(batch, "superseded_by", i) ?? null,
134
- usefulness: columnValue(batch, "usefulness", i) ?? 0,
135
- access_count: columnValue(batch, "access_count", i) ?? 0,
136
- });
137
- }
138
- }
139
- }
140
- result.memories = [...seen.values()];
141
- console.error(` ${result.memories.length} unique memories read (${total} rows scanned)`);
142
- }
143
-
144
- if (tableNames.includes("conversation_history")) {
145
- const table = await db.openTable("conversation_history");
146
- const total = await table.countRows();
147
- console.error(`Reading ${total} conversation chunks...`);
148
-
149
- const schemaSample = await table.query().limit(1).toArrow();
150
- const tsDivisors = buildTimestampDivisors(schemaSample.schema);
151
- const seen = new Map<string, any>();
152
-
153
- for (let offset = 0; offset < total; offset += BATCH_SIZE) {
154
- const arrowTable = await table.query().offset(offset).limit(BATCH_SIZE).toArrow();
155
- for (const batch of arrowTable.batches) {
156
- for (let i = 0; i < batch.numRows; i++) {
157
- const id = columnValue(batch, "id", i) as string;
158
- const content = columnValue(batch, "content", i) as string;
159
- const existing = seen.get(id);
160
- if (existing && existing.content.length >= content.length) continue;
161
- seen.set(id, {
162
- id,
163
- content,
164
- metadata: columnValue(batch, "metadata", i) ?? "{}",
165
- vector: toFloatArray(columnValue(batch, "vector", i)),
166
- created_at: toEpochMs(columnValue(batch, "created_at", i), tsDivisors.get("created_at")),
167
- session_id: columnValue(batch, "session_id", i),
168
- role: columnValue(batch, "role", i),
169
- message_index_start: columnValue(batch, "message_index_start", i) ?? 0,
170
- message_index_end: columnValue(batch, "message_index_end", i) ?? 0,
171
- project: columnValue(batch, "project", i) ?? "",
172
- });
173
- }
174
- }
175
- }
176
- result.conversations = [...seen.values()];
177
- console.error(` ${result.conversations.length} unique conversation chunks read (${total} rows scanned)`);
178
- }
179
-
180
- await db.close?.();
181
- process.stdout.write(JSON.stringify(result));