vektor-slipstream 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,116 @@
1
+ /**
2
+ * example-langchain-researcher.js
3
+ * VEKTOR SLIPSTREAM — LangChain Research Agent Example
4
+ * ─────────────────────────────────────────────────────
5
+ * A research agent that uses LangChain to search the web,
6
+ * summarise findings, and store everything in persistent
7
+ * Slipstream memory so it learns across sessions.
8
+ *
9
+ * Install:
10
+ * npm install vektor-slipstream langchain @langchain/openai
11
+ *
12
+ * Usage:
13
+ * OPENAI_API_KEY=sk-... node example-langchain-researcher.js
14
+ */
15
+
16
+ 'use strict';
17
+
18
+ const { createMemory } = require('vektor-slipstream');
19
+ const { ChatOpenAI } = require('@langchain/openai');
20
+ const { AgentExecutor } = require('langchain/agents');
21
+ const { createOpenAIFunctionsAgent } = require('langchain/agents');
22
+ const { TavilySearchResults } = require('@langchain/community/tools/tavily_search');
23
+ const { ChatPromptTemplate, MessagesPlaceholder } = require('@langchain/core/prompts');
24
+
25
+ // ── Config ────────────────────────────────────────────────────────────────────
26
+
27
+ const AGENT_ID = 'langchain-researcher';
28
+ const TOPIC = process.argv[2] || 'latest developments in agentic AI memory systems';
29
+
30
+ // ── Boot ──────────────────────────────────────────────────────────────────────
31
+
32
+ async function main() {
33
+ console.log('\n[RESEARCHER] Booting Slipstream memory...');
34
+ const memory = await createMemory({ agentId: AGENT_ID, dbPath: './research-memory.db' });
35
+
36
+ // ── 1. Recall what we already know ──────────────────────────────────────────
37
+ console.log(`[RESEARCHER] Recalling prior knowledge on: "${TOPIC}"`);
38
+ const priorKnowledge = await memory.recall(TOPIC, 5);
39
+
40
+ let priorContext = 'No prior research on this topic.';
41
+ if (priorKnowledge.length > 0) {
42
+ priorContext = priorKnowledge
43
+ .map((m, i) => `${i + 1}. [score: ${m.score}] ${m.content}`)
44
+ .join('\n');
45
+ console.log(`[RESEARCHER] Found ${priorKnowledge.length} prior memories.`);
46
+ } else {
47
+ console.log('[RESEARCHER] No prior memories — starting fresh.');
48
+ }
49
+
50
+ // ── 2. Build the LangChain agent ────────────────────────────────────────────
51
+ const llm = new ChatOpenAI({ modelName: 'gpt-4o-mini', temperature: 0.1 });
52
+ const tools = [new TavilySearchResults({ maxResults: 5 })];
53
+
54
+ const prompt = ChatPromptTemplate.fromMessages([
55
+ ['system', `You are a meticulous research agent with persistent memory.
56
+
57
+ PRIOR KNOWLEDGE FROM MEMORY:
58
+ ${priorContext}
59
+
60
+ Instructions:
61
+ - Search for new information on the given topic
62
+ - Compare findings with prior knowledge — note what's changed or new
63
+ - Produce a concise research summary (3-5 paragraphs)
64
+ - End with a "KEY FINDINGS:" section listing 3-5 bullet points
65
+ - Be specific: cite sources, dates, and data points`],
66
+ ['human', '{input}'],
67
+ new MessagesPlaceholder('agent_scratchpad'),
68
+ ]);
69
+
70
+ const agent = await createOpenAIFunctionsAgent({ llm, tools, prompt });
71
+ const executor = new AgentExecutor({ agent, tools, verbose: false });
72
+
73
+ // ── 3. Run research ──────────────────────────────────────────────────────────
74
+ console.log('[RESEARCHER] Running LangChain agent...\n');
75
+ const result = await executor.invoke({ input: `Research this topic thoroughly: ${TOPIC}` });
76
+ const output = result.output;
77
+
78
+ console.log('─'.repeat(60));
79
+ console.log(output);
80
+ console.log('─'.repeat(60));
81
+
82
+ // ── 4. Store findings in Slipstream ─────────────────────────────────────────
83
+ console.log('\n[RESEARCHER] Storing findings in Slipstream memory...');
84
+
85
+ // Store the full summary
86
+ await memory.remember(
87
+ `[RESEARCH] ${TOPIC}: ${output}`,
88
+ { importance: 2 }
89
+ );
90
+
91
+ // Extract and store each KEY FINDING separately for granular recall
92
+ const keyFindingsMatch = output.match(/KEY FINDINGS:([\s\S]+?)(?:\n\n|$)/i);
93
+ if (keyFindingsMatch) {
94
+ const findings = keyFindingsMatch[1]
95
+ .split('\n')
96
+ .map(l => l.replace(/^[-•*]\s*/, '').trim())
97
+ .filter(Boolean);
98
+
99
+ for (const finding of findings) {
100
+ await memory.remember(`[FINDING] ${finding}`, { importance: 3 });
101
+ }
102
+ console.log(`[RESEARCHER] Stored ${findings.length} key findings separately.`);
103
+ }
104
+
105
+ // ── 5. Show memory briefing ──────────────────────────────────────────────────
106
+ const briefing = await memory.briefing();
107
+ console.log('\n[RESEARCHER] Memory briefing (last 24h):');
108
+ console.log(briefing);
109
+
110
+ console.log('\n[RESEARCHER] Done. Run again tomorrow — agent will build on today\'s findings.');
111
+ }
112
+
113
+ main().catch(e => {
114
+ console.error('[RESEARCHER] Error:', e.message);
115
+ process.exit(1);
116
+ });
@@ -0,0 +1,195 @@
1
+ /**
2
+ * example-openai-assistant.js
3
+ * VEKTOR SLIPSTREAM — OpenAI Agents SDK Assistant Loop Example
4
+ * ─────────────────────────────────────────────────────────────
5
+ * A persistent assistant that remembers every conversation.
6
+ * Uses the OpenAI Agents SDK for tool use and Slipstream for
7
+ * cross-session memory — the assistant genuinely knows who you
8
+ * are and what you've discussed before.
9
+ *
10
+ * Install:
11
+ * npm install vektor-slipstream openai @openai/agents readline
12
+ *
13
+ * Usage:
14
+ * OPENAI_API_KEY=sk-... node example-openai-assistant.js
15
+ */
16
+
17
+ 'use strict';
18
+
19
+ const { createMemory } = require('vektor-slipstream');
20
+ const OpenAI = require('openai');
21
+ const readline = require('readline');
22
+
23
+ // ── Config ────────────────────────────────────────────────────────────────────
24
+
25
+ const AGENT_ID = 'openai-assistant';
26
+ const MODEL = 'gpt-4o-mini';
27
+
28
+ // ── Tools available to the assistant ─────────────────────────────────────────
29
+
30
+ function buildTools(memory) {
31
+ return [
32
+ {
33
+ type: 'function',
34
+ function: {
35
+ name: 'remember',
36
+ description: 'Store an important fact, preference, or decision in long-term memory. Call this whenever the user shares something worth remembering across sessions.',
37
+ parameters: {
38
+ type: 'object',
39
+ properties: {
40
+ content: {
41
+ type: 'string',
42
+ description: 'The fact or information to remember, written as a clear complete sentence.',
43
+ },
44
+ importance: {
45
+ type: 'number',
46
+ description: 'Importance score 1-5. Use 5 for critical facts (name, key decisions), 3 for useful context, 1 for minor details.',
47
+ },
48
+ },
49
+ required: ['content'],
50
+ },
51
+ },
52
+ },
53
+ {
54
+ type: 'function',
55
+ function: {
56
+ name: 'recall',
57
+ description: 'Search long-term memory for relevant information. Call this when you need context about the user or a topic discussed previously.',
58
+ parameters: {
59
+ type: 'object',
60
+ properties: {
61
+ query: {
62
+ type: 'string',
63
+ description: 'What to search for in memory.',
64
+ },
65
+ },
66
+ required: ['query'],
67
+ },
68
+ },
69
+ },
70
+ {
71
+ type: 'function',
72
+ function: {
73
+ name: 'memory_graph',
74
+ description: 'Explore connected memories around a concept. Use this to understand the full context around a topic.',
75
+ parameters: {
76
+ type: 'object',
77
+ properties: {
78
+ concept: { type: 'string', description: 'The concept to explore.' },
79
+ hops: { type: 'number', description: 'How many hops to traverse (1-3). Default 2.' },
80
+ },
81
+ required: ['concept'],
82
+ },
83
+ },
84
+ },
85
+ ];
86
+ }
87
+
88
+ // ── Tool execution ────────────────────────────────────────────────────────────
89
+
90
+ async function executeTool(name, args, memory) {
91
+ switch (name) {
92
+ case 'remember': {
93
+ const { id } = await memory.remember(args.content, { importance: args.importance || 2 });
94
+ return `Stored memory #${id}: "${args.content.slice(0, 80)}${args.content.length > 80 ? '...' : ''}"`;
95
+ }
96
+ case 'recall': {
97
+ const results = await memory.recall(args.query, 5);
98
+ if (!results.length) return 'No relevant memories found.';
99
+ return results.map((r, i) =>
100
+ `${i + 1}. [relevance: ${r.score}] ${r.content}`
101
+ ).join('\n');
102
+ }
103
+ case 'memory_graph': {
104
+ const { nodes, edges } = await memory.graph(args.concept, { hops: args.hops || 2 });
105
+ return `Found ${nodes.length} connected memories and ${edges.length} relationships.\n` +
106
+ nodes.slice(0, 5).map(n => `• ${n.content.slice(0, 100)}`).join('\n');
107
+ }
108
+ default:
109
+ return 'Unknown tool.';
110
+ }
111
+ }
112
+
113
+ // ── Main assistant loop ───────────────────────────────────────────────────────
114
+
115
+ async function main() {
116
+ console.log('\n[ASSISTANT] Booting Slipstream memory...');
117
+ const memory = await createMemory({ agentId: AGENT_ID, dbPath: './assistant-memory.db' });
118
+
119
+ const client = new OpenAI();
120
+ const tools = buildTools(memory);
121
+ const messages = [];
122
+
123
+ // Inject morning briefing as system context
124
+ const briefing = await memory.briefing();
125
+ const systemPrompt = `You are a persistent personal assistant with long-term memory powered by VEKTOR Slipstream.
126
+
127
+ You have access to tools to remember and recall information across sessions.
128
+ Always recall relevant memories before answering questions about past conversations or the user's context.
129
+ When the user shares important information, use the remember tool to store it.
130
+
131
+ ${briefing}`;
132
+
133
+ messages.push({ role: 'system', content: systemPrompt });
134
+
135
+ // Terminal interface
136
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
137
+ const prompt = () => new Promise(resolve => rl.question('\nYou: ', resolve));
138
+
139
+ console.log('\n[ASSISTANT] Ready. Your memories persist across sessions.');
140
+ console.log('[ASSISTANT] Type "exit" to quit, "briefing" to see memory summary.\n');
141
+
142
+ while (true) {
143
+ const userInput = (await prompt()).trim();
144
+ if (!userInput) continue;
145
+ if (userInput.toLowerCase() === 'exit') { rl.close(); break; }
146
+
147
+ if (userInput.toLowerCase() === 'briefing') {
148
+ console.log('\n' + await memory.briefing());
149
+ continue;
150
+ }
151
+
152
+ messages.push({ role: 'user', content: userInput });
153
+
154
+ // Agent loop — handles tool calls automatically
155
+ let response;
156
+ while (true) {
157
+ response = await client.chat.completions.create({
158
+ model: MODEL,
159
+ messages,
160
+ tools,
161
+ tool_choice: 'auto',
162
+ });
163
+
164
+ const msg = response.choices[0].message;
165
+ messages.push(msg);
166
+
167
+ if (!msg.tool_calls?.length) break; // Done
168
+
169
+ // Execute all tool calls
170
+ const toolResults = await Promise.all(
171
+ msg.tool_calls.map(async tc => {
172
+ const args = JSON.parse(tc.function.arguments);
173
+ const result = await executeTool(tc.function.name, args, memory);
174
+ console.log(` [tool:${tc.function.name}] ${result.slice(0, 120)}`);
175
+ return {
176
+ role: 'tool',
177
+ tool_call_id: tc.id,
178
+ content: result,
179
+ };
180
+ })
181
+ );
182
+ messages.push(...toolResults);
183
+ }
184
+
185
+ const reply = response.choices[0].message.content;
186
+ console.log(`\nAssistant: ${reply}`);
187
+ }
188
+
189
+ console.log('\n[ASSISTANT] Session ended. Memories saved for next time.');
190
+ }
191
+
192
+ main().catch(e => {
193
+ console.error('[ASSISTANT] Error:', e.message);
194
+ process.exit(1);
195
+ });
@@ -0,0 +1,161 @@
1
+ # VEKTOR Slipstream — Production Agent Examples
2
+
3
+ Three complete, copyable agents built on the Slipstream memory API.
4
+ Each works out of the box — adapt the agent logic, keep the memory layer.
5
+
6
+ ---
7
+
8
+ ## Quick Start
9
+
10
+ ```bash
11
+ npm install vektor-slipstream
12
+ ```
13
+
14
+ All three examples use the same two-line memory setup:
15
+
16
+ ```js
17
+ const { createMemory } = require('vektor-slipstream');
18
+ const memory = await createMemory({ agentId: 'my-agent' });
19
+ ```
20
+
21
+ ---
22
+
23
+ ## Example 1 — LangChain Researcher
24
+
25
+ `example-langchain-researcher.js`
26
+
27
+ A research agent that searches the web, summarises findings, and builds
28
+ a persistent knowledge base that grows smarter with every run.
29
+
30
+ **What it does:**
31
+ - Recalls prior research before starting a new search
32
+ - Runs a LangChain agent with Tavily web search
33
+ - Stores the full summary and each key finding separately
34
+ - Prints a memory briefing showing everything learned today
35
+
36
+ **Install:**
37
+ ```bash
38
+ npm install vektor-slipstream langchain @langchain/openai @langchain/community
39
+ ```
40
+
41
+ **Run:**
42
+ ```bash
43
+ OPENAI_API_KEY=sk-... TAVILY_API_KEY=tvly-... \
44
+ node example-langchain-researcher.js "quantum computing breakthroughs 2025"
45
+ ```
46
+
47
+ **Run again tomorrow** — the agent picks up where it left off.
48
+
49
+ ---
50
+
51
+ ## Example 2 — OpenAI Assistant Loop
52
+
53
+ `example-openai-assistant.js`
54
+
55
+ An interactive personal assistant that genuinely remembers you across sessions.
56
+ Uses the OpenAI Agents SDK with three memory tools wired in.
57
+
58
+ **What it does:**
59
+ - Injects a memory briefing into every session
60
+ - Gives the assistant `remember`, `recall`, and `memory_graph` tools
61
+ - Stores important facts automatically as you chat
62
+ - Traverses the memory graph for full context on any topic
63
+
64
+ **Install:**
65
+ ```bash
66
+ npm install vektor-slipstream openai
67
+ ```
68
+
69
+ **Run:**
70
+ ```bash
71
+ OPENAI_API_KEY=sk-... node example-openai-assistant.js
72
+ ```
73
+
74
+ **Commands:**
75
+ - Type naturally — the assistant manages memory automatically
76
+ - Type `briefing` to see everything stored in the last 24h
77
+ - Type `exit` to end the session
78
+
79
+ ---
80
+
81
+ ## Example 3 — Claude MCP Agent
82
+
83
+ `example-claude-mcp.js`
84
+
85
+ Connects Claude to Slipstream via the Model Context Protocol.
86
+ Runs in two modes: as an MCP server for Claude Desktop, or as a
87
+ standalone interactive chat.
88
+
89
+ **What it does:**
90
+ - Implements `vektor_recall`, `vektor_store`, `vektor_graph`, `vektor_delta` as MCP tools
91
+ - Full stdio MCP server (JSON-RPC 2.0) for Claude Desktop integration
92
+ - Direct chat mode for testing without Claude Desktop
93
+
94
+ **Install:**
95
+ ```bash
96
+ npm install vektor-slipstream @anthropic-ai/sdk
97
+ ```
98
+
99
+ **Direct chat mode:**
100
+ ```bash
101
+ ANTHROPIC_API_KEY=sk-ant-... node example-claude-mcp.js
102
+ ```
103
+
104
+ **Claude Desktop MCP mode** — add to `claude_desktop_config.json`:
105
+ ```json
106
+ {
107
+ "mcpServers": {
108
+ "slipstream": {
109
+ "command": "node",
110
+ "args": ["/absolute/path/to/example-claude-mcp.js", "--mcp"],
111
+ "env": {
112
+ "SLIPSTREAM_AGENT_ID": "claude-desktop"
113
+ }
114
+ }
115
+ }
116
+ }
117
+ ```
118
+
119
+ Restart Claude Desktop — Slipstream memory tools appear automatically.
120
+
121
+ ---
122
+
123
+ ## The Memory API
124
+
125
+ All three examples use the same four methods:
126
+
127
+ ```js
128
+ // Store a memory
129
+ await memory.remember('User prefers TypeScript over JavaScript', { importance: 3 });
130
+
131
+ // Recall by semantic similarity
132
+ const results = await memory.recall('coding preferences', 5);
133
+ // → [{ content, score, id, importance }]
134
+
135
+ // Traverse the graph
136
+ const { nodes, edges } = await memory.graph('TypeScript', { hops: 2 });
137
+
138
+ // See what changed recently
139
+ const changes = await memory.delta('project status', 7); // last 7 days
140
+
141
+ // Morning briefing
142
+ const brief = await memory.briefing(); // inject into system prompt
143
+ ```
144
+
145
+ ---
146
+
147
+ ## Each agent gets its own DB
148
+
149
+ ```js
150
+ // Separate memory per agent — no cross-contamination
151
+ const researchMemory = await createMemory({ agentId: 'researcher', dbPath: './research.db' });
152
+ const assistantMemory = await createMemory({ agentId: 'assistant', dbPath: './assistant.db' });
153
+ ```
154
+
155
+ Or share memory across agents:
156
+
157
+ ```js
158
+ // Same DB, different agent IDs — memories are scoped per agent
159
+ const scout = await createMemory({ agentId: 'scout', dbPath: './shared.db' });
160
+ const forge = await createMemory({ agentId: 'forge', dbPath: './shared.db' });
161
+ ```
Binary file
package/package.json ADDED
@@ -0,0 +1,48 @@
1
+ {
2
+ "name": "vektor-slipstream",
3
+ "version": "1.0.0",
4
+ "description": "Hardware-accelerated persistent memory for AI agents. Local-first, zero cloud dependency, $0 embedding cost.",
5
+ "main": "slipstream-core.js",
6
+ "exports": {
7
+ ".": "./slipstream-core.js",
8
+ "./db": "./slipstream-db.js",
9
+ "./embedder": "./slipstream-embedder.js"
10
+ },
11
+ "keywords": [
12
+ "ai", "memory", "agent", "vector", "sqlite", "embeddings",
13
+ "langchain", "openai", "anthropic", "claude", "mcp",
14
+ "rag", "persistent-memory", "local-ai", "onnx"
15
+ ],
16
+ "author": "VEKTOR Memory <hello@vektormemory.com>",
17
+ "license": "SEE LICENSE IN LICENSE",
18
+ "homepage": "https://vektormemory.com",
19
+ "repository": {
20
+ "type": "git",
21
+ "url": "https://github.com/Vektor-Memory/Vektor-memory"
22
+ },
23
+ "engines": {
24
+ "node": ">=18.0.0"
25
+ },
26
+ "dependencies": {
27
+ "better-sqlite3": "^9.4.3",
28
+ "onnxruntime-node": "^1.17.3"
29
+ },
30
+ "optionalDependencies": {
31
+ "sqlite-vec-windows-x64": "^0.1.6",
32
+ "sqlite-vec-darwin-arm64": "^0.1.6",
33
+ "sqlite-vec-linux-x64": "^0.1.6"
34
+ },
35
+ "files": [
36
+ "slipstream-core.js",
37
+ "slipstream-db.js",
38
+ "slipstream-embedder.js",
39
+ "detect-hardware.js",
40
+ "models/model_quantized.onnx",
41
+ "examples/",
42
+ "README.md",
43
+ "LICENSE"
44
+ ],
45
+ "publishConfig": {
46
+ "access": "public"
47
+ }
48
+ }