@aeriondyseti/vector-memory-mcp 2.2.1 → 2.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aeriondyseti/vector-memory-mcp",
3
- "version": "2.2.1",
3
+ "version": "2.2.2",
4
4
  "description": "A zero-configuration RAG memory server for MCP clients",
5
5
  "type": "module",
6
6
  "main": "src/index.ts",
@@ -14,10 +14,25 @@ if (!source) {
14
14
  process.exit(1);
15
15
  }
16
16
 
17
+ /**
18
+ * Read a value from an Arrow column at a given row index.
19
+ * Arrow timestamp columns return BigInt — we convert to epoch-ms here
20
+ * without going through Arrow's bigIntToNumber safety check.
21
+ */
22
+ function columnValue(batch: any, colName: string, rowIdx: number): unknown {
23
+ const col = batch.getChild(colName);
24
+ if (!col) return undefined;
25
+ return col.get(rowIdx);
26
+ }
27
+
17
28
  function toEpochMs(value: unknown): number {
18
29
  if (typeof value === "number") return value;
19
30
  if (value instanceof Date) return value.getTime();
20
- if (typeof value === "bigint") return Number(value);
31
+ if (typeof value === "bigint") {
32
+ // Arrow timestamps are microseconds; convert to milliseconds.
33
+ const ms = value / 1000n;
34
+ return Number(ms);
35
+ }
21
36
  return Date.now();
22
37
  }
23
38
 
@@ -44,32 +59,30 @@ const result: { memories: any[]; conversations: any[] } = {
44
59
  conversations: [],
45
60
  };
46
61
 
47
- const BATCH_SIZE = 500;
48
-
49
62
  if (tableNames.includes("memories")) {
50
63
  const table = await db.openTable("memories");
51
64
  const total = await table.countRows();
52
65
  console.error(`Reading ${total} memories...`);
53
66
 
54
- let offset = 0;
55
- while (true) {
56
- const rows = await table.query().limit(BATCH_SIZE).offset(offset).toArray();
57
- if (rows.length === 0) break;
58
- for (const row of rows) {
67
+ // Use toArrow() to get raw Arrow RecordBatches, bypassing StructRow
68
+ // property accessors that throw on BigInt timestamps.
69
+ const arrowTable = await table.query().toArrow();
70
+ for (const batch of arrowTable.batches) {
71
+ for (let i = 0; i < batch.numRows; i++) {
72
+ const lastAccessed = columnValue(batch, "last_accessed", i);
59
73
  result.memories.push({
60
- id: row.id,
61
- content: row.content,
62
- metadata: row.metadata ?? "{}",
63
- vector: toFloatArray(row.vector),
64
- created_at: toEpochMs(row.created_at),
65
- updated_at: toEpochMs(row.updated_at),
66
- last_accessed: row.last_accessed != null ? toEpochMs(row.last_accessed) : null,
67
- superseded_by: row.superseded_by ?? null,
68
- usefulness: row.usefulness ?? 0,
69
- access_count: row.access_count ?? 0,
74
+ id: columnValue(batch, "id", i),
75
+ content: columnValue(batch, "content", i),
76
+ metadata: columnValue(batch, "metadata", i) ?? "{}",
77
+ vector: toFloatArray(columnValue(batch, "vector", i)),
78
+ created_at: toEpochMs(columnValue(batch, "created_at", i)),
79
+ updated_at: toEpochMs(columnValue(batch, "updated_at", i)),
80
+ last_accessed: lastAccessed != null ? toEpochMs(lastAccessed) : null,
81
+ superseded_by: columnValue(batch, "superseded_by", i) ?? null,
82
+ usefulness: columnValue(batch, "usefulness", i) ?? 0,
83
+ access_count: columnValue(batch, "access_count", i) ?? 0,
70
84
  });
71
85
  }
72
- offset += BATCH_SIZE;
73
86
  }
74
87
  console.error(` ${result.memories.length} memories read`);
75
88
  }
@@ -79,25 +92,22 @@ if (tableNames.includes("conversation_history")) {
79
92
  const total = await table.countRows();
80
93
  console.error(`Reading ${total} conversation chunks...`);
81
94
 
82
- let offset = 0;
83
- while (true) {
84
- const rows = await table.query().limit(BATCH_SIZE).offset(offset).toArray();
85
- if (rows.length === 0) break;
86
- for (const row of rows) {
95
+ const arrowTable = await table.query().toArrow();
96
+ for (const batch of arrowTable.batches) {
97
+ for (let i = 0; i < batch.numRows; i++) {
87
98
  result.conversations.push({
88
- id: row.id,
89
- content: row.content,
90
- metadata: row.metadata ?? "{}",
91
- vector: toFloatArray(row.vector),
92
- created_at: toEpochMs(row.created_at),
93
- session_id: row.session_id,
94
- role: row.role,
95
- message_index_start: row.message_index_start ?? 0,
96
- message_index_end: row.message_index_end ?? 0,
97
- project: row.project ?? "",
99
+ id: columnValue(batch, "id", i),
100
+ content: columnValue(batch, "content", i),
101
+ metadata: columnValue(batch, "metadata", i) ?? "{}",
102
+ vector: toFloatArray(columnValue(batch, "vector", i)),
103
+ created_at: toEpochMs(columnValue(batch, "created_at", i)),
104
+ session_id: columnValue(batch, "session_id", i),
105
+ role: columnValue(batch, "role", i),
106
+ message_index_start: columnValue(batch, "message_index_start", i) ?? 0,
107
+ message_index_end: columnValue(batch, "message_index_end", i) ?? 0,
108
+ project: columnValue(batch, "project", i) ?? "",
98
109
  });
99
110
  }
100
- offset += BATCH_SIZE;
101
111
  }
102
112
  console.error(` ${result.conversations.length} conversation chunks read`);
103
113
  }