vektor-slipstream 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +252 -0
- package/detect-hardware.js +181 -0
- package/examples/example-claude-mcp.js +284 -0
- package/examples/example-langchain-researcher.js +116 -0
- package/examples/example-openai-assistant.js +195 -0
- package/examples/examples-README.md +161 -0
- package/models/model_quantized.onnx +0 -0
- package/package.json +48 -0
- package/slipstream-core.js +334 -0
- package/slipstream-db.js +140 -0
- package/slipstream-embedder.js +273 -0
package/README.md
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
\# vektor-slipstream
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
\*\*Hardware-accelerated persistent memory for AI agents.\*\*
|
|
6
|
+
|
|
7
|
+
Local-first. Zero cloud dependency. $0 embedding cost.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
|
|
13
|
+
npm install vektor-slipstream
|
|
14
|
+
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
\---
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
\## What it is
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
A drop-in memory layer for any AI agent. Your agent remembers everything across sessions — preferences, decisions, research, conversations — stored in a single portable SQLite file on your machine.
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
No OpenAI embedding bill. No cloud roundtrip. No API key for memory.
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
```js
|
|
36
|
+
|
|
37
|
+
const { createMemory } = require('vektor-slipstream');
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
const memory = await createMemory({ agentId: 'my-agent' });
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
// Store a memory
|
|
46
|
+
|
|
47
|
+
await memory.remember('User prefers TypeScript over JavaScript');
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
// Recall by semantic similarity — avg 8ms, fully local
|
|
52
|
+
|
|
53
|
+
const results = await memory.recall('coding preferences');
|
|
54
|
+
|
|
55
|
+
// → \[{ content: 'User prefers TypeScript...', score: 0.97, id: 1 }]
|
|
56
|
+
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
\---
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
\## How it works
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
\- \*\*Local ONNX embeddings\*\* — `all-MiniLM-L6-v2 INT8` runs on your hardware via `onnxruntime-node`. No API calls.
|
|
70
|
+
|
|
71
|
+
\- \*\*Hyper-PRAGMA SQLite\*\* — WAL mode, 1GB mmap, 64MB cache. Recall at RAM speeds.
|
|
72
|
+
|
|
73
|
+
\- \*\*MAGMA graph\*\* — 4-layer associative graph (semantic · causal · temporal · entity). Memories connect to each other.
|
|
74
|
+
|
|
75
|
+
\- \*\*Hardware auto-detection\*\* — uses CUDA on NVIDIA, CoreML on Apple Silicon, CPU everywhere else.
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
\---
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
\## API
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
```js
|
|
88
|
+
|
|
89
|
+
const { createMemory } = require('vektor-slipstream');
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
const memory = await createMemory({
|
|
94
|
+
|
|
95
|
+
  agentId: 'my-agent', // isolates memories per agent
|
|
96
|
+
|
|
97
|
+
  dbPath: './memory.db', // default: ./slipstream-memory.db
|
|
98
|
+
|
|
99
|
+
  silent: false, // suppress boot banner
|
|
100
|
+
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
\### `memory.remember(text, opts?)`
|
|
108
|
+
|
|
109
|
+
Store a memory with its vector embedding.
|
|
110
|
+
|
|
111
|
+
```js
|
|
112
|
+
|
|
113
|
+
const { id } = await memory.remember('User is based in Brisbane, AU');
|
|
114
|
+
|
|
115
|
+
const { id } = await memory.remember('Closed Series A at $4M', { importance: 5 });
|
|
116
|
+
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
\### `memory.recall(query, topK?)`
|
|
122
|
+
|
|
123
|
+
Semantic recall — returns top-k most relevant memories.
|
|
124
|
+
|
|
125
|
+
```js
|
|
126
|
+
|
|
127
|
+
const results = await memory.recall('user location', 5);
|
|
128
|
+
|
|
129
|
+
// → \[{ id, content, score, importance }]
|
|
130
|
+
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
\### `memory.graph(concept, opts?)`
|
|
136
|
+
|
|
137
|
+
Breadth-first traversal from a concept — finds connected memories.
|
|
138
|
+
|
|
139
|
+
```js
|
|
140
|
+
|
|
141
|
+
const { nodes, edges } = await memory.graph('fundraising', { hops: 2 });
|
|
142
|
+
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
\### `memory.delta(topic, days?)`
|
|
148
|
+
|
|
149
|
+
What changed on a topic in the last N days.
|
|
150
|
+
|
|
151
|
+
```js
|
|
152
|
+
|
|
153
|
+
const changes = await memory.delta('project status', 7);
|
|
154
|
+
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
\### `memory.briefing()`
|
|
160
|
+
|
|
161
|
+
Summary of everything learned in the last 24 hours. Inject into system prompt.
|
|
162
|
+
|
|
163
|
+
```js
|
|
164
|
+
|
|
165
|
+
const brief = await memory.briefing();
|
|
166
|
+
|
|
167
|
+
// → "\[SLIPSTREAM BRIEFING — last 24h — 12 memories]\\n1. ..."
|
|
168
|
+
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
\---
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
\## Examples
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
Three production-ready agent examples are included in `examples/`:
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
| File | Description |
|
|
186
|
+
|
|
187
|
+
|------|-------------|
|
|
188
|
+
|
|
189
|
+
| `example-langchain-researcher.js` | LangChain agent that builds a persistent knowledge base |
|
|
190
|
+
|
|
191
|
+
| `example-openai-assistant.js` | OpenAI assistant with automatic cross-session memory |
|
|
192
|
+
|
|
193
|
+
| `example-claude-mcp.js` | Claude MCP server + direct chat mode |
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
See \[`examples/README.md`](examples/README.md) for setup and usage.
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
\---
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
\## Performance
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
| Metric | Value |
|
|
210
|
+
|
|
211
|
+
|--------|-------|
|
|
212
|
+
|
|
213
|
+
| Recall latency | \~8ms avg (local SQLite) |
|
|
214
|
+
|
|
215
|
+
| Embedding cost | $0 — fully local ONNX |
|
|
216
|
+
|
|
217
|
+
| Embedding latency | \~10ms GPU / \~25ms CPU (post-warmup) |
|
|
218
|
+
|
|
219
|
+
| DB engine | SQLite WAL + 1GB mmap |
|
|
220
|
+
|
|
221
|
+
| Vector dimensions | 384 (all-MiniLM-L6-v2 INT8) |
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
\---
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
\## Requirements
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
\- Node.js 18+
|
|
234
|
+
|
|
235
|
+
\- \~25MB disk for the ONNX model (bundled)
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
\---
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
\## License
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
Commercial licence — see LICENSE file.
|
|
248
|
+
|
|
249
|
+
One-time purchase includes all future updates.
|
|
250
|
+
|
|
251
|
+
Purchase at \[vektormemory.com](https://vektormemory.com)
|
|
252
|
+
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* VEKTOR SLIPSTREAM
|
|
5
|
+
* detect-hardware.js — Execution Provider Probe
|
|
6
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
7
|
+
* Probes the host system for hardware acceleration capabilities WITHOUT
|
|
8
|
+
* attempting to load any native binaries. Uses only OS-level CLI tools that
|
|
9
|
+
* ship with the relevant drivers/OS — so this file itself has zero native deps
|
|
10
|
+
* and zero risk of crashing the terminal.
|
|
11
|
+
*
|
|
12
|
+
* Returns: 'cuda' | 'coreml' | 'cpu'
|
|
13
|
+
* Execution time: < 10ms (CLI probe only, no driver load)
|
|
14
|
+
*
|
|
15
|
+
* Consumed by:
|
|
16
|
+
* - postinstall.js → to decide which onnxruntime variant to install
|
|
17
|
+
* - slipstream.js → to decide which EP to pass to InferenceSession.create()
|
|
18
|
+
* ─────────────────────────────────────────────────────────────────────────────
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
const os = require('os');
|
|
22
|
+
const { execSync } = require('child_process');
|
|
23
|
+
|
|
24
|
+
// ─── Constants ───────────────────────────────────────────────────────────────
|
|
25
|
+
|
|
26
|
+
const EP = Object.freeze({
|
|
27
|
+
CUDA : 'cuda',
|
|
28
|
+
COREML : 'coreml',
|
|
29
|
+
CPU : 'cpu',
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
// ─── Probe Helpers ───────────────────────────────────────────────────────────
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* macOS: Check for Apple Silicon via sysctl.
|
|
36
|
+
* sysctl is a native macOS syscall utility — always present, zero install.
|
|
37
|
+
* Returns 'coreml' on M-series chips, 'cpu' on Intel Macs.
|
|
38
|
+
*/
|
|
39
|
+
function probeDarwin() {
|
|
40
|
+
try {
|
|
41
|
+
const cpuBrand = execSync(
|
|
42
|
+
'sysctl -n machdep.cpu.brand_string 2>/dev/null',
|
|
43
|
+
{ stdio: 'pipe', timeout: 2000 }
|
|
44
|
+
).toString().trim();
|
|
45
|
+
|
|
46
|
+
if (cpuBrand.includes('Apple')) {
|
|
47
|
+
return EP.COREML;
|
|
48
|
+
}
|
|
49
|
+
} catch (_) {
|
|
50
|
+
// sysctl unavailable or returned non-zero — safe fallback
|
|
51
|
+
}
|
|
52
|
+
return EP.CPU;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Linux / Windows: Check for NVIDIA GPU via nvidia-smi.
|
|
57
|
+
* nvidia-smi ships with the NVIDIA driver package.
|
|
58
|
+
* If it exits 0, the GPU is present AND the driver is loaded — both required.
|
|
59
|
+
* stdio: 'ignore' suppresses the full nvidia-smi output table.
|
|
60
|
+
* timeout: 3000ms prevents hangs on misconfigured driver environments.
|
|
61
|
+
*/
|
|
62
|
+
function probeNvidia() {
|
|
63
|
+
try {
|
|
64
|
+
execSync('nvidia-smi', { stdio: 'ignore', timeout: 3000 });
|
|
65
|
+
return EP.CUDA;
|
|
66
|
+
} catch (_) {
|
|
67
|
+
// nvidia-smi not found, or GPU present but driver not loaded
|
|
68
|
+
return EP.CPU;
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// ─── Main Export ─────────────────────────────────────────────────────────────
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* detectHardware()
|
|
76
|
+
*
|
|
77
|
+
* Synchronous hardware probe. Returns the highest-performance execution
|
|
78
|
+
* provider available on the current machine. Decision tree:
|
|
79
|
+
*
|
|
80
|
+
* macOS + Apple Silicon → 'coreml' (Neural Engine via CoreML)
|
|
81
|
+
* macOS + Intel → 'cpu' (no CoreML acceleration)
|
|
82
|
+
* Linux/Win + NVIDIA → 'cuda' (GPU via CUDA)
|
|
83
|
+
* Linux/Win + no GPU → 'cpu' (optimised WASM SIMD path)
|
|
84
|
+
* Any other platform → 'cpu' (safe universal fallback)
|
|
85
|
+
*
|
|
86
|
+
* @returns {'cuda'|'coreml'|'cpu'}
|
|
87
|
+
*/
|
|
88
|
+
function detectHardware() {
|
|
89
|
+
const platform = os.platform();
|
|
90
|
+
|
|
91
|
+
switch (platform) {
|
|
92
|
+
case 'darwin':
|
|
93
|
+
return probeDarwin();
|
|
94
|
+
|
|
95
|
+
case 'linux':
|
|
96
|
+
case 'win32':
|
|
97
|
+
return probeNvidia();
|
|
98
|
+
|
|
99
|
+
default:
|
|
100
|
+
// FreeBSD, Android, unknown — CPU is always safe
|
|
101
|
+
return EP.CPU;
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* detectHardwareAsync()
|
|
107
|
+
*
|
|
108
|
+
* Non-blocking wrapper. Runs the probe in a setImmediate tick so it doesn't
|
|
109
|
+
* stall the event loop during module initialisation. Preferred for use inside
|
|
110
|
+
* slipstream.js createMemory() boot sequence.
|
|
111
|
+
*
|
|
112
|
+
* @returns {Promise<'cuda'|'coreml'|'cpu'>}
|
|
113
|
+
*/
|
|
114
|
+
function detectHardwareAsync() {
|
|
115
|
+
return new Promise((resolve) => {
|
|
116
|
+
setImmediate(() => resolve(detectHardware()));
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* getEPLabel()
|
|
122
|
+
* Human-readable label for the audit log banner.
|
|
123
|
+
*
|
|
124
|
+
* @param {'cuda'|'coreml'|'cpu'} ep
|
|
125
|
+
* @returns {string}
|
|
126
|
+
*/
|
|
127
|
+
function getEPLabel(ep) {
|
|
128
|
+
const labels = {
|
|
129
|
+
[EP.CUDA] : 'CUDA (NVIDIA GPU)',
|
|
130
|
+
[EP.COREML] : 'CoreML (Apple Neural Engine)',
|
|
131
|
+
[EP.CPU] : 'CPU (WASM SIMD)',
|
|
132
|
+
};
|
|
133
|
+
return labels[ep] ?? 'CPU (WASM SIMD)';
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
module.exports = {
|
|
137
|
+
detectHardware,
|
|
138
|
+
detectHardwareAsync,
|
|
139
|
+
getEPLabel,
|
|
140
|
+
EP, // export constants so consumers don't hardcode strings
|
|
141
|
+
};
|
|
142
|
+
|
|
143
|
+
// ─── CLI Self-Test ───────────────────────────────────────────────────────────
|
|
144
|
+
// Run directly to validate the probe on any target machine:
|
|
145
|
+
// node detect-hardware.js
|
|
146
|
+
//
|
|
147
|
+
// Expected outputs:
|
|
148
|
+
// Apple M-series → [SLIPSTREAM PROBE] EP: CoreML (Apple Neural Engine) ✓
|
|
149
|
+
// NVIDIA machine → [SLIPSTREAM PROBE] EP: CUDA (NVIDIA GPU) ✓
|
|
150
|
+
// Any other → [SLIPSTREAM PROBE] EP: CPU (WASM SIMD) ✓
|
|
151
|
+
|
|
152
|
+
if (require.main === module) {
|
|
153
|
+
const start = Date.now();
|
|
154
|
+
const ep = detectHardware();
|
|
155
|
+
const ms = Date.now() - start;
|
|
156
|
+
|
|
157
|
+
const icon = ep === EP.CPU ? '⚙️ ' : '🚀';
|
|
158
|
+
|
|
159
|
+
console.log('');
|
|
160
|
+
console.log(' ╔══════════════════════════════════════════╗');
|
|
161
|
+
console.log(' ║ VEKTOR SLIPSTREAM — PROBE ║');
|
|
162
|
+
console.log(' ╚══════════════════════════════════════════╝');
|
|
163
|
+
console.log('');
|
|
164
|
+
console.log(` ${icon} EP: ${getEPLabel(ep)}`);
|
|
165
|
+
console.log(` ⏱ Probe: ${ms}ms`);
|
|
166
|
+
console.log(` 🖥 Platform: ${os.platform()} / ${os.arch()}`);
|
|
167
|
+
console.log(` 💾 RAM: ${(os.totalmem() / 1024 ** 3).toFixed(1)} GB`);
|
|
168
|
+
console.log('');
|
|
169
|
+
|
|
170
|
+
if (ep === EP.CPU) {
|
|
171
|
+
console.log(' ℹ️ No GPU acceleration detected.');
|
|
172
|
+
console.log(' Slipstream will run on optimised CPU (WASM SIMD).');
|
|
173
|
+
console.log(' For CUDA: ensure nvidia-smi is accessible in PATH.');
|
|
174
|
+
console.log(' For CoreML: Apple Silicon Mac required.');
|
|
175
|
+
} else {
|
|
176
|
+
console.log(` ✓ Hardware acceleration confirmed. Slipstream will`);
|
|
177
|
+
console.log(` engage ${getEPLabel(ep)} for sub-12ms embeddings.`);
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
console.log('');
|
|
181
|
+
}
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* example-claude-mcp.js
|
|
3
|
+
* VEKTOR SLIPSTREAM — Claude MCP Agent Example
|
|
4
|
+
* ─────────────────────────────────────────────
|
|
5
|
+
* Connects Claude to Slipstream persistent memory via the
|
|
6
|
+
* Model Context Protocol (MCP). Claude can recall, store,
|
|
7
|
+
* and traverse the memory graph across every conversation.
|
|
8
|
+
*
|
|
9
|
+
* Two modes:
|
|
10
|
+
* 1. MCP SERVER MODE — run as a stdio MCP server for Claude Desktop
|
|
11
|
+
* 2. DIRECT CHAT MODE — interactive chat with memory, no MCP needed
|
|
12
|
+
*
|
|
13
|
+
* Install:
|
|
14
|
+
* npm install vektor-slipstream @anthropic-ai/sdk
|
|
15
|
+
*
|
|
16
|
+
* MCP server mode (add to claude_desktop_config.json):
|
|
17
|
+
* {
|
|
18
|
+
* "mcpServers": {
|
|
19
|
+
* "slipstream": {
|
|
20
|
+
* "command": "node",
|
|
21
|
+
* "args": ["/path/to/example-claude-mcp.js", "--mcp"],
|
|
22
|
+
* "env": { "SLIPSTREAM_AGENT_ID": "claude-desktop" }
|
|
23
|
+
* }
|
|
24
|
+
* }
|
|
25
|
+
* }
|
|
26
|
+
*
|
|
27
|
+
* Direct chat mode:
|
|
28
|
+
* ANTHROPIC_API_KEY=sk-ant-... node example-claude-mcp.js
|
|
29
|
+
*/
|
|
30
|
+
|
|
31
|
+
'use strict';
|
|
32
|
+
|
|
33
|
+
const { createMemory } = require('vektor-slipstream');
|
|
34
|
+
const Anthropic = require('@anthropic-ai/sdk');
|
|
35
|
+
const readline = require('readline');
|
|
36
|
+
|
|
37
|
+
const IS_MCP = process.argv.includes('--mcp');
|
|
38
|
+
const AGENT_ID = process.env.SLIPSTREAM_AGENT_ID || 'claude-mcp';
|
|
39
|
+
const MODEL = 'claude-haiku-4-5-20251001'; // fast + cheap for tool use
|
|
40
|
+
|
|
41
|
+
// ── MCP Tool Definitions ──────────────────────────────────────────────────────
|
|
42
|
+
|
|
43
|
+
const MCP_TOOLS = [
|
|
44
|
+
{
|
|
45
|
+
name: 'vektor_recall',
|
|
46
|
+
description: 'Search Slipstream persistent memory for relevant context. Call before answering any question that might have prior context.',
|
|
47
|
+
input_schema: {
|
|
48
|
+
type: 'object',
|
|
49
|
+
properties: {
|
|
50
|
+
query: { type: 'string', description: 'What to search for.' },
|
|
51
|
+
top_k: { type: 'integer', description: 'Number of results (default 5, max 20).', default: 5 },
|
|
52
|
+
},
|
|
53
|
+
required: ['query'],
|
|
54
|
+
},
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
name: 'vektor_store',
|
|
58
|
+
description: 'Store a fact, preference, decision, or piece of context in persistent memory. Use whenever the user shares something worth remembering.',
|
|
59
|
+
input_schema: {
|
|
60
|
+
type: 'object',
|
|
61
|
+
properties: {
|
|
62
|
+
content: { type: 'string', description: 'The memory to store as a clear complete sentence.' },
|
|
63
|
+
importance: { type: 'number', description: 'Importance 1-5. 5=critical (name/key decision), 3=useful, 1=minor.' },
|
|
64
|
+
},
|
|
65
|
+
required: ['content'],
|
|
66
|
+
},
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
name: 'vektor_graph',
|
|
70
|
+
description: 'Traverse the memory graph from a concept — finds connected memories and relationships. Use to understand full context around a topic.',
|
|
71
|
+
input_schema: {
|
|
72
|
+
type: 'object',
|
|
73
|
+
properties: {
|
|
74
|
+
concept: { type: 'string', description: 'The concept to start from.' },
|
|
75
|
+
hops: { type: 'integer', description: 'Traversal depth 1-3 (default 2).', default: 2 },
|
|
76
|
+
},
|
|
77
|
+
required: ['concept'],
|
|
78
|
+
},
|
|
79
|
+
},
|
|
80
|
+
{
|
|
81
|
+
name: 'vektor_delta',
|
|
82
|
+
description: 'See what changed in memory on a topic over recent days.',
|
|
83
|
+
input_schema: {
|
|
84
|
+
type: 'object',
|
|
85
|
+
properties: {
|
|
86
|
+
topic: { type: 'string', description: 'The topic to check.' },
|
|
87
|
+
days: { type: 'integer', description: 'How many days back to look (default 7).', default: 7 },
|
|
88
|
+
},
|
|
89
|
+
required: ['topic'],
|
|
90
|
+
},
|
|
91
|
+
},
|
|
92
|
+
];
|
|
93
|
+
|
|
94
|
+
// ── Tool execution ────────────────────────────────────────────────────────────
|
|
95
|
+
|
|
96
|
+
async function runTool(name, input, memory) {
|
|
97
|
+
switch (name) {
|
|
98
|
+
case 'vektor_recall': {
|
|
99
|
+
const results = await memory.recall(input.query, input.top_k || 5);
|
|
100
|
+
if (!results.length) return { found: 0, memories: [] };
|
|
101
|
+
return {
|
|
102
|
+
found: results.length,
|
|
103
|
+
memories: results.map(r => ({
|
|
104
|
+
content: r.content,
|
|
105
|
+
relevance: r.score,
|
|
106
|
+
id: r.id,
|
|
107
|
+
})),
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
case 'vektor_store': {
|
|
111
|
+
const { id } = await memory.remember(input.content, { importance: input.importance || 2 });
|
|
112
|
+
return { stored: true, memory_id: id, content: input.content };
|
|
113
|
+
}
|
|
114
|
+
case 'vektor_graph': {
|
|
115
|
+
const { nodes, edges } = await memory.graph(input.concept, { hops: input.hops || 2 });
|
|
116
|
+
return {
|
|
117
|
+
nodes: nodes.slice(0, 10).map(n => ({ id: n.id, content: n.content, importance: n.importance })),
|
|
118
|
+
edge_count: edges.length,
|
|
119
|
+
node_count: nodes.length,
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
case 'vektor_delta': {
|
|
123
|
+
const changes = await memory.delta(input.topic, input.days || 7);
|
|
124
|
+
return {
|
|
125
|
+
topic: input.topic,
|
|
126
|
+
days: input.days || 7,
|
|
127
|
+
changes: changes.slice(0, 10).map(c => ({ content: c.content, updated_at: c.updated_at })),
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
default:
|
|
131
|
+
return { error: `Unknown tool: ${name}` };
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// ── MCP Server Mode (stdio) ───────────────────────────────────────────────────
|
|
136
|
+
|
|
137
|
+
async function runMCPServer(memory) {
|
|
138
|
+
// MCP JSON-RPC over stdio
|
|
139
|
+
process.stdin.setEncoding('utf8');
|
|
140
|
+
let buffer = '';
|
|
141
|
+
|
|
142
|
+
async function handleRequest(req) {
|
|
143
|
+
if (req.method === 'initialize') {
|
|
144
|
+
return {
|
|
145
|
+
protocolVersion: '2024-11-05',
|
|
146
|
+
serverInfo: { name: 'vektor-slipstream', version: '1.0.0' },
|
|
147
|
+
capabilities: { tools: {} },
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
if (req.method === 'tools/list') {
|
|
152
|
+
return { tools: MCP_TOOLS };
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
if (req.method === 'tools/call') {
|
|
156
|
+
const { name, arguments: args } = req.params;
|
|
157
|
+
try {
|
|
158
|
+
const result = await runTool(name, args, memory);
|
|
159
|
+
return {
|
|
160
|
+
content: [{ type: 'text', text: JSON.stringify(result, null, 2) }],
|
|
161
|
+
};
|
|
162
|
+
} catch(e) {
|
|
163
|
+
return {
|
|
164
|
+
content: [{ type: 'text', text: `Error: ${e.message}` }],
|
|
165
|
+
isError: true,
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
return { error: { code: -32601, message: 'Method not found' } };
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
process.stdin.on('data', async chunk => {
|
|
174
|
+
buffer += chunk;
|
|
175
|
+
const lines = buffer.split('\n');
|
|
176
|
+
buffer = lines.pop();
|
|
177
|
+
|
|
178
|
+
for (const line of lines) {
|
|
179
|
+
if (!line.trim()) continue;
|
|
180
|
+
try {
|
|
181
|
+
const req = JSON.parse(line);
|
|
182
|
+
const result = await handleRequest(req);
|
|
183
|
+
const response = JSON.stringify({ jsonrpc: '2.0', id: req.id, result });
|
|
184
|
+
process.stdout.write(response + '\n');
|
|
185
|
+
} catch(e) {
|
|
186
|
+
const err = JSON.stringify({ jsonrpc: '2.0', id: null, error: { code: -32700, message: 'Parse error' } });
|
|
187
|
+
process.stdout.write(err + '\n');
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
});
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// ── Direct Chat Mode ──────────────────────────────────────────────────────────
|
|
194
|
+
|
|
195
|
+
async function runDirectChat(memory) {
|
|
196
|
+
const client = new Anthropic();
|
|
197
|
+
const messages = [];
|
|
198
|
+
|
|
199
|
+
const briefing = await memory.briefing();
|
|
200
|
+
const system = `You are a persistent assistant with long-term memory via VEKTOR Slipstream.
|
|
201
|
+
|
|
202
|
+
You have four memory tools: vektor_recall, vektor_store, vektor_graph, vektor_delta.
|
|
203
|
+
- Always recall before answering questions that might have prior context
|
|
204
|
+
- Store important facts the user shares
|
|
205
|
+
- Be proactive about remembering preferences and decisions
|
|
206
|
+
|
|
207
|
+
${briefing}`;
|
|
208
|
+
|
|
209
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
210
|
+
const prompt = () => new Promise(resolve => rl.question('\nYou: ', resolve));
|
|
211
|
+
|
|
212
|
+
console.log('\n[CLAUDE+SLIPSTREAM] Ready. Memories persist across sessions.');
|
|
213
|
+
console.log('[CLAUDE+SLIPSTREAM] Type "exit" to quit.\n');
|
|
214
|
+
|
|
215
|
+
while (true) {
|
|
216
|
+
const userInput = (await prompt()).trim();
|
|
217
|
+
if (!userInput) continue;
|
|
218
|
+
if (userInput.toLowerCase() === 'exit') { rl.close(); break; }
|
|
219
|
+
|
|
220
|
+
messages.push({ role: 'user', content: userInput });
|
|
221
|
+
|
|
222
|
+
// Agentic loop
|
|
223
|
+
while (true) {
|
|
224
|
+
const response = await client.messages.create({
|
|
225
|
+
model: MODEL,
|
|
226
|
+
max_tokens: 1024,
|
|
227
|
+
system,
|
|
228
|
+
tools: MCP_TOOLS,
|
|
229
|
+
messages,
|
|
230
|
+
});
|
|
231
|
+
|
|
232
|
+
const assistantMsg = { role: 'assistant', content: response.content };
|
|
233
|
+
messages.push(assistantMsg);
|
|
234
|
+
|
|
235
|
+
if (response.stop_reason !== 'tool_use') {
|
|
236
|
+
// Final text response
|
|
237
|
+
const text = response.content.find(b => b.type === 'text')?.text || '';
|
|
238
|
+
console.log(`\nClaude: ${text}`);
|
|
239
|
+
break;
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
// Handle tool calls
|
|
243
|
+
const toolResults = [];
|
|
244
|
+
for (const block of response.content) {
|
|
245
|
+
if (block.type !== 'tool_use') continue;
|
|
246
|
+
const result = await runTool(block.name, block.input, memory);
|
|
247
|
+
console.log(` [${block.name}] ${JSON.stringify(result).slice(0, 100)}...`);
|
|
248
|
+
toolResults.push({
|
|
249
|
+
type: 'tool_result',
|
|
250
|
+
tool_use_id: block.id,
|
|
251
|
+
content: JSON.stringify(result),
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
messages.push({ role: 'user', content: toolResults });
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
console.log('\n[CLAUDE+SLIPSTREAM] Session ended. Memories saved.');
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
// ── Entry Point ───────────────────────────────────────────────────────────────
|
|
262
|
+
|
|
263
|
+
async function main() {
|
|
264
|
+
if (!IS_MCP) {
|
|
265
|
+
console.log('\n[CLAUDE+SLIPSTREAM] Booting Slipstream memory...');
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
const memory = await createMemory({
|
|
269
|
+
agentId: AGENT_ID,
|
|
270
|
+
dbPath: './claude-memory.db',
|
|
271
|
+
silent: IS_MCP, // suppress banner in MCP mode (stdout is JSON-RPC)
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
if (IS_MCP) {
|
|
275
|
+
await runMCPServer(memory);
|
|
276
|
+
} else {
|
|
277
|
+
await runDirectChat(memory);
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
main().catch(e => {
|
|
282
|
+
if (!IS_MCP) console.error('[CLAUDE+SLIPSTREAM] Error:', e.message);
|
|
283
|
+
process.exit(1);
|
|
284
|
+
});
|