vektor-slipstream 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,181 @@
1
+ 'use strict';
2
+
3
+ /**
4
+ * VEKTOR SLIPSTREAM
5
+ * detect-hardware.js — Execution Provider Probe
6
+ * ─────────────────────────────────────────────────────────────────────────────
7
+ * Probes the host system for hardware acceleration capabilities WITHOUT
8
+ * attempting to load any native binaries. Uses only OS-level CLI tools that
9
+ * ship with the relevant drivers/OS — so this file itself has zero native deps
10
+ * and zero risk of crashing the terminal.
11
+ *
12
+ * Returns: 'cuda' | 'coreml' | 'cpu'
13
+ * Execution time: < 10ms (CLI probe only, no driver load)
14
+ *
15
+ * Consumed by:
16
+ * - postinstall.js → to decide which onnxruntime variant to install
17
+ * - slipstream.js → to decide which EP to pass to InferenceSession.create()
18
+ * ─────────────────────────────────────────────────────────────────────────────
19
+ */
20
+
21
+ const os = require('os');
22
+ const { execSync } = require('child_process');
23
+
24
+ // ─── Constants ───────────────────────────────────────────────────────────────
25
+
26
+ const EP = Object.freeze({
27
+ CUDA : 'cuda',
28
+ COREML : 'coreml',
29
+ CPU : 'cpu',
30
+ });
31
+
32
+ // ─── Probe Helpers ───────────────────────────────────────────────────────────
33
+
34
+ /**
35
+ * macOS: Check for Apple Silicon via sysctl.
36
+ * sysctl is a native macOS syscall utility — always present, zero install.
37
+ * Returns 'coreml' on M-series chips, 'cpu' on Intel Macs.
38
+ */
39
+ function probeDarwin() {
40
+ try {
41
+ const cpuBrand = execSync(
42
+ 'sysctl -n machdep.cpu.brand_string 2>/dev/null',
43
+ { stdio: 'pipe', timeout: 2000 }
44
+ ).toString().trim();
45
+
46
+ if (cpuBrand.includes('Apple')) {
47
+ return EP.COREML;
48
+ }
49
+ } catch (_) {
50
+ // sysctl unavailable or returned non-zero — safe fallback
51
+ }
52
+ return EP.CPU;
53
+ }
54
+
55
+ /**
56
+ * Linux / Windows: Check for NVIDIA GPU via nvidia-smi.
57
+ * nvidia-smi ships with the NVIDIA driver package.
58
+ * If it exits 0, the GPU is present AND the driver is loaded — both required.
59
+ * stdio: 'ignore' suppresses the full nvidia-smi output table.
60
+ * timeout: 3000ms prevents hangs on misconfigured driver environments.
61
+ */
62
+ function probeNvidia() {
63
+ try {
64
+ execSync('nvidia-smi', { stdio: 'ignore', timeout: 3000 });
65
+ return EP.CUDA;
66
+ } catch (_) {
67
+ // nvidia-smi not found, or GPU present but driver not loaded
68
+ return EP.CPU;
69
+ }
70
+ }
71
+
72
+ // ─── Main Export ─────────────────────────────────────────────────────────────
73
+
74
+ /**
75
+ * detectHardware()
76
+ *
77
+ * Synchronous hardware probe. Returns the highest-performance execution
78
+ * provider available on the current machine. Decision tree:
79
+ *
80
+ * macOS + Apple Silicon → 'coreml' (Neural Engine via CoreML)
81
+ * macOS + Intel → 'cpu' (no CoreML acceleration)
82
+ * Linux/Win + NVIDIA → 'cuda' (GPU via CUDA)
83
+ * Linux/Win + no GPU → 'cpu' (optimised WASM SIMD path)
84
+ * Any other platform → 'cpu' (safe universal fallback)
85
+ *
86
+ * @returns {'cuda'|'coreml'|'cpu'}
87
+ */
88
+ function detectHardware() {
89
+ const platform = os.platform();
90
+
91
+ switch (platform) {
92
+ case 'darwin':
93
+ return probeDarwin();
94
+
95
+ case 'linux':
96
+ case 'win32':
97
+ return probeNvidia();
98
+
99
+ default:
100
+ // FreeBSD, Android, unknown — CPU is always safe
101
+ return EP.CPU;
102
+ }
103
+ }
104
+
105
+ /**
106
+ * detectHardwareAsync()
107
+ *
108
+ * Non-blocking wrapper. Runs the probe in a setImmediate tick so it doesn't
109
+ * stall the event loop during module initialisation. Preferred for use inside
110
+ * slipstream.js createMemory() boot sequence.
111
+ *
112
+ * @returns {Promise<'cuda'|'coreml'|'cpu'>}
113
+ */
114
+ function detectHardwareAsync() {
115
+ return new Promise((resolve) => {
116
+ setImmediate(() => resolve(detectHardware()));
117
+ });
118
+ }
119
+
120
+ /**
121
+ * getEPLabel()
122
+ * Human-readable label for the audit log banner.
123
+ *
124
+ * @param {'cuda'|'coreml'|'cpu'} ep
125
+ * @returns {string}
126
+ */
127
+ function getEPLabel(ep) {
128
+ const labels = {
129
+ [EP.CUDA] : 'CUDA (NVIDIA GPU)',
130
+ [EP.COREML] : 'CoreML (Apple Neural Engine)',
131
+ [EP.CPU] : 'CPU (WASM SIMD)',
132
+ };
133
+ return labels[ep] ?? 'CPU (WASM SIMD)';
134
+ }
135
+
136
+ module.exports = {
137
+ detectHardware,
138
+ detectHardwareAsync,
139
+ getEPLabel,
140
+ EP, // export constants so consumers don't hardcode strings
141
+ };
142
+
143
+ // ─── CLI Self-Test ───────────────────────────────────────────────────────────
144
+ // Run directly to validate the probe on any target machine:
145
+ // node detect-hardware.js
146
+ //
147
+ // Expected outputs:
148
+ // Apple M-series → [SLIPSTREAM PROBE] EP: CoreML (Apple Neural Engine) ✓
149
+ // NVIDIA machine → [SLIPSTREAM PROBE] EP: CUDA (NVIDIA GPU) ✓
150
+ // Any other → [SLIPSTREAM PROBE] EP: CPU (WASM SIMD) ✓
151
+
152
+ if (require.main === module) {
153
+ const start = Date.now();
154
+ const ep = detectHardware();
155
+ const ms = Date.now() - start;
156
+
157
+ const icon = ep === EP.CPU ? '⚙️ ' : '🚀';
158
+
159
+ console.log('');
160
+ console.log(' ╔══════════════════════════════════════════╗');
161
+ console.log(' ║ VEKTOR SLIPSTREAM — PROBE ║');
162
+ console.log(' ╚══════════════════════════════════════════╝');
163
+ console.log('');
164
+ console.log(` ${icon} EP: ${getEPLabel(ep)}`);
165
+ console.log(` ⏱ Probe: ${ms}ms`);
166
+ console.log(` 🖥 Platform: ${os.platform()} / ${os.arch()}`);
167
+ console.log(` 💾 RAM: ${(os.totalmem() / 1024 ** 3).toFixed(1)} GB`);
168
+ console.log('');
169
+
170
+ if (ep === EP.CPU) {
171
+ console.log(' ℹ️ No GPU acceleration detected.');
172
+ console.log(' Slipstream will run on optimised CPU (WASM SIMD).');
173
+ console.log(' For CUDA: ensure nvidia-smi is accessible in PATH.');
174
+ console.log(' For CoreML: Apple Silicon Mac required.');
175
+ } else {
176
+ console.log(` ✓ Hardware acceleration confirmed. Slipstream will`);
177
+ console.log(` engage ${getEPLabel(ep)} for sub-12ms embeddings.`);
178
+ }
179
+
180
+ console.log('');
181
+ }
@@ -0,0 +1,284 @@
1
+ /**
2
+ * example-claude-mcp.js
3
+ * VEKTOR SLIPSTREAM — Claude MCP Agent Example
4
+ * ─────────────────────────────────────────────
5
+ * Connects Claude to Slipstream persistent memory via the
6
+ * Model Context Protocol (MCP). Claude can recall, store,
7
+ * and traverse the memory graph across every conversation.
8
+ *
9
+ * Two modes:
10
+ * 1. MCP SERVER MODE — run as a stdio MCP server for Claude Desktop
11
+ * 2. DIRECT CHAT MODE — interactive chat with memory, no MCP needed
12
+ *
13
+ * Install:
14
+ * npm install vektor-slipstream @anthropic-ai/sdk
15
+ *
16
+ * MCP server mode (add to claude_desktop_config.json):
17
+ * {
18
+ * "mcpServers": {
19
+ * "slipstream": {
20
+ * "command": "node",
21
+ * "args": ["/path/to/example-claude-mcp.js", "--mcp"],
22
+ * "env": { "SLIPSTREAM_AGENT_ID": "claude-desktop" }
23
+ * }
24
+ * }
25
+ * }
26
+ *
27
+ * Direct chat mode:
28
+ * ANTHROPIC_API_KEY=sk-ant-... node example-claude-mcp.js
29
+ */
30
+
31
+ 'use strict';
32
+
33
+ const { createMemory } = require('vektor-slipstream');
34
+ const Anthropic = require('@anthropic-ai/sdk');
35
+ const readline = require('readline');
36
+
37
+ const IS_MCP = process.argv.includes('--mcp');
38
+ const AGENT_ID = process.env.SLIPSTREAM_AGENT_ID || 'claude-mcp';
39
+ const MODEL = 'claude-haiku-4-5-20251001'; // fast + cheap for tool use
40
+
41
+ // ── MCP Tool Definitions ──────────────────────────────────────────────────────
42
+
43
+ const MCP_TOOLS = [
44
+ {
45
+ name: 'vektor_recall',
46
+ description: 'Search Slipstream persistent memory for relevant context. Call before answering any question that might have prior context.',
47
+ input_schema: {
48
+ type: 'object',
49
+ properties: {
50
+ query: { type: 'string', description: 'What to search for.' },
51
+ top_k: { type: 'integer', description: 'Number of results (default 5, max 20).', default: 5 },
52
+ },
53
+ required: ['query'],
54
+ },
55
+ },
56
+ {
57
+ name: 'vektor_store',
58
+ description: 'Store a fact, preference, decision, or piece of context in persistent memory. Use whenever the user shares something worth remembering.',
59
+ input_schema: {
60
+ type: 'object',
61
+ properties: {
62
+ content: { type: 'string', description: 'The memory to store as a clear complete sentence.' },
63
+ importance: { type: 'number', description: 'Importance 1-5. 5=critical (name/key decision), 3=useful, 1=minor.' },
64
+ },
65
+ required: ['content'],
66
+ },
67
+ },
68
+ {
69
+ name: 'vektor_graph',
70
+ description: 'Traverse the memory graph from a concept — finds connected memories and relationships. Use to understand full context around a topic.',
71
+ input_schema: {
72
+ type: 'object',
73
+ properties: {
74
+ concept: { type: 'string', description: 'The concept to start from.' },
75
+ hops: { type: 'integer', description: 'Traversal depth 1-3 (default 2).', default: 2 },
76
+ },
77
+ required: ['concept'],
78
+ },
79
+ },
80
+ {
81
+ name: 'vektor_delta',
82
+ description: 'See what changed in memory on a topic over recent days.',
83
+ input_schema: {
84
+ type: 'object',
85
+ properties: {
86
+ topic: { type: 'string', description: 'The topic to check.' },
87
+ days: { type: 'integer', description: 'How many days back to look (default 7).', default: 7 },
88
+ },
89
+ required: ['topic'],
90
+ },
91
+ },
92
+ ];
93
+
94
+ // ── Tool execution ────────────────────────────────────────────────────────────
95
+
96
+ async function runTool(name, input, memory) {
97
+ switch (name) {
98
+ case 'vektor_recall': {
99
+ const results = await memory.recall(input.query, input.top_k || 5);
100
+ if (!results.length) return { found: 0, memories: [] };
101
+ return {
102
+ found: results.length,
103
+ memories: results.map(r => ({
104
+ content: r.content,
105
+ relevance: r.score,
106
+ id: r.id,
107
+ })),
108
+ };
109
+ }
110
+ case 'vektor_store': {
111
+ const { id } = await memory.remember(input.content, { importance: input.importance || 2 });
112
+ return { stored: true, memory_id: id, content: input.content };
113
+ }
114
+ case 'vektor_graph': {
115
+ const { nodes, edges } = await memory.graph(input.concept, { hops: input.hops || 2 });
116
+ return {
117
+ nodes: nodes.slice(0, 10).map(n => ({ id: n.id, content: n.content, importance: n.importance })),
118
+ edge_count: edges.length,
119
+ node_count: nodes.length,
120
+ };
121
+ }
122
+ case 'vektor_delta': {
123
+ const changes = await memory.delta(input.topic, input.days || 7);
124
+ return {
125
+ topic: input.topic,
126
+ days: input.days || 7,
127
+ changes: changes.slice(0, 10).map(c => ({ content: c.content, updated_at: c.updated_at })),
128
+ };
129
+ }
130
+ default:
131
+ return { error: `Unknown tool: ${name}` };
132
+ }
133
+ }
134
+
135
+ // ── MCP Server Mode (stdio) ───────────────────────────────────────────────────
136
+
137
+ async function runMCPServer(memory) {
138
+ // MCP JSON-RPC over stdio
139
+ process.stdin.setEncoding('utf8');
140
+ let buffer = '';
141
+
142
+ async function handleRequest(req) {
143
+ if (req.method === 'initialize') {
144
+ return {
145
+ protocolVersion: '2024-11-05',
146
+ serverInfo: { name: 'vektor-slipstream', version: '1.0.0' },
147
+ capabilities: { tools: {} },
148
+ };
149
+ }
150
+
151
+ if (req.method === 'tools/list') {
152
+ return { tools: MCP_TOOLS };
153
+ }
154
+
155
+ if (req.method === 'tools/call') {
156
+ const { name, arguments: args } = req.params;
157
+ try {
158
+ const result = await runTool(name, args, memory);
159
+ return {
160
+ content: [{ type: 'text', text: JSON.stringify(result, null, 2) }],
161
+ };
162
+ } catch(e) {
163
+ return {
164
+ content: [{ type: 'text', text: `Error: ${e.message}` }],
165
+ isError: true,
166
+ };
167
+ }
168
+ }
169
+
170
+ return { error: { code: -32601, message: 'Method not found' } };
171
+ }
172
+
173
+ process.stdin.on('data', async chunk => {
174
+ buffer += chunk;
175
+ const lines = buffer.split('\n');
176
+ buffer = lines.pop();
177
+
178
+ for (const line of lines) {
179
+ if (!line.trim()) continue;
180
+ try {
181
+ const req = JSON.parse(line);
182
+ const result = await handleRequest(req);
183
+ const response = JSON.stringify({ jsonrpc: '2.0', id: req.id, result });
184
+ process.stdout.write(response + '\n');
185
+ } catch(e) {
186
+ const err = JSON.stringify({ jsonrpc: '2.0', id: null, error: { code: -32700, message: 'Parse error' } });
187
+ process.stdout.write(err + '\n');
188
+ }
189
+ }
190
+ });
191
+ }
192
+
193
+ // ── Direct Chat Mode ──────────────────────────────────────────────────────────
194
+
195
+ async function runDirectChat(memory) {
196
+ const client = new Anthropic();
197
+ const messages = [];
198
+
199
+ const briefing = await memory.briefing();
200
+ const system = `You are a persistent assistant with long-term memory via VEKTOR Slipstream.
201
+
202
+ You have four memory tools: vektor_recall, vektor_store, vektor_graph, vektor_delta.
203
+ - Always recall before answering questions that might have prior context
204
+ - Store important facts the user shares
205
+ - Be proactive about remembering preferences and decisions
206
+
207
+ ${briefing}`;
208
+
209
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
210
+ const prompt = () => new Promise(resolve => rl.question('\nYou: ', resolve));
211
+
212
+ console.log('\n[CLAUDE+SLIPSTREAM] Ready. Memories persist across sessions.');
213
+ console.log('[CLAUDE+SLIPSTREAM] Type "exit" to quit.\n');
214
+
215
+ while (true) {
216
+ const userInput = (await prompt()).trim();
217
+ if (!userInput) continue;
218
+ if (userInput.toLowerCase() === 'exit') { rl.close(); break; }
219
+
220
+ messages.push({ role: 'user', content: userInput });
221
+
222
+ // Agentic loop
223
+ while (true) {
224
+ const response = await client.messages.create({
225
+ model: MODEL,
226
+ max_tokens: 1024,
227
+ system,
228
+ tools: MCP_TOOLS,
229
+ messages,
230
+ });
231
+
232
+ const assistantMsg = { role: 'assistant', content: response.content };
233
+ messages.push(assistantMsg);
234
+
235
+ if (response.stop_reason !== 'tool_use') {
236
+ // Final text response
237
+ const text = response.content.find(b => b.type === 'text')?.text || '';
238
+ console.log(`\nClaude: ${text}`);
239
+ break;
240
+ }
241
+
242
+ // Handle tool calls
243
+ const toolResults = [];
244
+ for (const block of response.content) {
245
+ if (block.type !== 'tool_use') continue;
246
+ const result = await runTool(block.name, block.input, memory);
247
+ console.log(` [${block.name}] ${JSON.stringify(result).slice(0, 100)}...`);
248
+ toolResults.push({
249
+ type: 'tool_result',
250
+ tool_use_id: block.id,
251
+ content: JSON.stringify(result),
252
+ });
253
+ }
254
+ messages.push({ role: 'user', content: toolResults });
255
+ }
256
+ }
257
+
258
+ console.log('\n[CLAUDE+SLIPSTREAM] Session ended. Memories saved.');
259
+ }
260
+
261
+ // ── Entry Point ───────────────────────────────────────────────────────────────
262
+
263
+ async function main() {
264
+ if (!IS_MCP) {
265
+ console.log('\n[CLAUDE+SLIPSTREAM] Booting Slipstream memory...');
266
+ }
267
+
268
+ const memory = await createMemory({
269
+ agentId: AGENT_ID,
270
+ dbPath: './claude-memory.db',
271
+ silent: IS_MCP, // suppress banner in MCP mode (stdout is JSON-RPC)
272
+ });
273
+
274
+ if (IS_MCP) {
275
+ await runMCPServer(memory);
276
+ } else {
277
+ await runDirectChat(memory);
278
+ }
279
+ }
280
+
281
+ main().catch(e => {
282
+ if (!IS_MCP) console.error('[CLAUDE+SLIPSTREAM] Error:', e.message);
283
+ process.exit(1);
284
+ });
@@ -0,0 +1,116 @@
1
+ /**
2
+ * example-langchain-researcher.js
3
+ * VEKTOR SLIPSTREAM — LangChain Research Agent Example
4
+ * ─────────────────────────────────────────────────────
5
+ * A research agent that uses LangChain to search the web,
6
+ * summarise findings, and store everything in persistent
7
+ * Slipstream memory so it learns across sessions.
8
+ *
9
+ * Install:
10
+ * npm install vektor-slipstream langchain @langchain/openai
11
+ *
12
+ * Usage:
13
+ * OPENAI_API_KEY=sk-... node example-langchain-researcher.js
14
+ */
15
+
16
+ 'use strict';
17
+
18
+ const { createMemory } = require('vektor-slipstream');
19
+ const { ChatOpenAI } = require('@langchain/openai');
20
+ const { AgentExecutor } = require('langchain/agents');
21
+ const { createOpenAIFunctionsAgent } = require('langchain/agents');
22
+ const { TavilySearchResults } = require('@langchain/community/tools/tavily_search');
23
+ const { ChatPromptTemplate, MessagesPlaceholder } = require('@langchain/core/prompts');
24
+
25
+ // ── Config ────────────────────────────────────────────────────────────────────
26
+
27
+ const AGENT_ID = 'langchain-researcher';
28
+ const TOPIC = process.argv[2] || 'latest developments in agentic AI memory systems';
29
+
30
+ // ── Boot ──────────────────────────────────────────────────────────────────────
31
+
32
+ async function main() {
33
+ console.log('\n[RESEARCHER] Booting Slipstream memory...');
34
+ const memory = await createMemory({ agentId: AGENT_ID, dbPath: './research-memory.db' });
35
+
36
+ // ── 1. Recall what we already know ──────────────────────────────────────────
37
+ console.log(`[RESEARCHER] Recalling prior knowledge on: "${TOPIC}"`);
38
+ const priorKnowledge = await memory.recall(TOPIC, 5);
39
+
40
+ let priorContext = 'No prior research on this topic.';
41
+ if (priorKnowledge.length > 0) {
42
+ priorContext = priorKnowledge
43
+ .map((m, i) => `${i + 1}. [score: ${m.score}] ${m.content}`)
44
+ .join('\n');
45
+ console.log(`[RESEARCHER] Found ${priorKnowledge.length} prior memories.`);
46
+ } else {
47
+ console.log('[RESEARCHER] No prior memories — starting fresh.');
48
+ }
49
+
50
+ // ── 2. Build the LangChain agent ────────────────────────────────────────────
51
+ const llm = new ChatOpenAI({ modelName: 'gpt-4o-mini', temperature: 0.1 });
52
+ const tools = [new TavilySearchResults({ maxResults: 5 })];
53
+
54
+ const prompt = ChatPromptTemplate.fromMessages([
55
+ ['system', `You are a meticulous research agent with persistent memory.
56
+
57
+ PRIOR KNOWLEDGE FROM MEMORY:
58
+ ${priorContext}
59
+
60
+ Instructions:
61
+ - Search for new information on the given topic
62
+ - Compare findings with prior knowledge — note what's changed or new
63
+ - Produce a concise research summary (3-5 paragraphs)
64
+ - End with a "KEY FINDINGS:" section listing 3-5 bullet points
65
+ - Be specific: cite sources, dates, and data points`],
66
+ ['human', '{input}'],
67
+ new MessagesPlaceholder('agent_scratchpad'),
68
+ ]);
69
+
70
+ const agent = await createOpenAIFunctionsAgent({ llm, tools, prompt });
71
+ const executor = new AgentExecutor({ agent, tools, verbose: false });
72
+
73
+ // ── 3. Run research ──────────────────────────────────────────────────────────
74
+ console.log('[RESEARCHER] Running LangChain agent...\n');
75
+ const result = await executor.invoke({ input: `Research this topic thoroughly: ${TOPIC}` });
76
+ const output = result.output;
77
+
78
+ console.log('─'.repeat(60));
79
+ console.log(output);
80
+ console.log('─'.repeat(60));
81
+
82
+ // ── 4. Store findings in Slipstream ─────────────────────────────────────────
83
+ console.log('\n[RESEARCHER] Storing findings in Slipstream memory...');
84
+
85
+ // Store the full summary
86
+ await memory.remember(
87
+ `[RESEARCH] ${TOPIC}: ${output}`,
88
+ { importance: 2 }
89
+ );
90
+
91
+ // Extract and store each KEY FINDING separately for granular recall
92
+ const keyFindingsMatch = output.match(/KEY FINDINGS:([\s\S]+?)(?:\n\n|$)/i);
93
+ if (keyFindingsMatch) {
94
+ const findings = keyFindingsMatch[1]
95
+ .split('\n')
96
+ .map(l => l.replace(/^[-•*]\s*/, '').trim())
97
+ .filter(Boolean);
98
+
99
+ for (const finding of findings) {
100
+ await memory.remember(`[FINDING] ${finding}`, { importance: 3 });
101
+ }
102
+ console.log(`[RESEARCHER] Stored ${findings.length} key findings separately.`);
103
+ }
104
+
105
+ // ── 5. Show memory briefing ──────────────────────────────────────────────────
106
+ const briefing = await memory.briefing();
107
+ console.log('\n[RESEARCHER] Memory briefing (last 24h):');
108
+ console.log(briefing);
109
+
110
+ console.log('\n[RESEARCHER] Done. Run again tomorrow — agent will build on today\'s findings.');
111
+ }
112
+
113
+ main().catch(e => {
114
+ console.error('[RESEARCHER] Error:', e.message);
115
+ process.exit(1);
116
+ });