@jeremiaheth/neolata-mem 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,73 +1,297 @@
1
- # @jeremiaheth/neolata-mem
1
+ # neolata-mem
2
2
 
3
- A-MEM style memory graph engine for AI agents. Semantic auto-linking, decay, conflict resolution, and cross-agent traversal.
3
+ **Graph-native memory engine for AI agents.** Zettelkasten-inspired linking, biological decay, conflict resolution.
4
4
 
5
- ## Install
5
+ [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
6
+ [![Node.js](https://img.shields.io/badge/node-%3E%3D18-brightgreen.svg)](https://nodejs.org)
7
+
8
+ ---
9
+
10
+ No Python. No Docker. No Neo4j. Just `npm install`.
6
11
 
7
12
  ```bash
8
13
  npm install @jeremiaheth/neolata-mem
9
14
  ```
10
15
 
11
- ## Quick Start
16
+ ## Quick Start (3 lines)
12
17
 
13
- ```js
14
- import { MemoryGraph, NvidiaEmbeddingProvider, JsonFileStorage, LLMExtractionProvider } from '@jeremiaheth/neolata-mem';
18
+ ```javascript
19
+ import { createMemory } from '@jeremiaheth/neolata-mem';
15
20
 
16
- const graph = new MemoryGraph({
17
- embedding: new NvidiaEmbeddingProvider({ apiKey: process.env.NVIDIA_API_KEY }),
18
- storage: new JsonFileStorage('./memory-data'),
19
- extraction: new LLMExtractionProvider({
20
- baseUrl: 'http://localhost:18789/v1/chat/completions',
21
- apiKey: process.env.OPENCLAW_TOKEN,
22
- model: 'anthropic/claude-haiku-4-5'
23
- })
21
+ const mem = createMemory();
22
+ await mem.store('agent-1', 'User prefers dark mode');
23
+ const results = await mem.search('agent-1', 'UI preferences');
24
+ // [{ memory: 'User prefers dark mode', score: 1.0, ... }]
25
+ ```
26
+
27
+ That's it. Zero config. Local JSON storage, keyword search, no API keys needed.
28
+
29
+ ## With Embeddings (Semantic Search)
30
+
31
+ ```javascript
32
+ const mem = createMemory({
33
+ embeddings: {
34
+ type: 'openai',
35
+ apiKey: process.env.OPENAI_API_KEY,
36
+ model: 'text-embedding-3-small',
37
+ },
38
+ });
39
+
40
+ await mem.store('kuro', 'Found XSS vulnerability in login form', { category: 'finding', importance: 0.9 });
41
+ await mem.store('kuro', 'OWASP Top 10 audit completed', { category: 'event' });
42
+
43
+ const results = await mem.search('kuro', 'security vulnerabilities');
44
+ // Ranked by semantic similarity
45
+ ```
46
+
47
+ Works with any OpenAI-compatible API: **OpenAI, NVIDIA NIM, Ollama, Azure, Groq, Together, etc.**
48
+
49
+ ```javascript
50
+ // NVIDIA NIM (free tier)
51
+ embeddings: {
52
+ type: 'openai',
53
+ apiKey: process.env.NVIDIA_API_KEY,
54
+ model: 'baai/bge-m3',
55
+ baseUrl: 'https://integrate.api.nvidia.com/v1',
56
+ }
57
+
58
+ // Local Ollama
59
+ embeddings: {
60
+ type: 'openai',
61
+ apiKey: 'ollama',
62
+ model: 'nomic-embed-text',
63
+ baseUrl: 'http://localhost:11434/v1',
64
+ }
65
+ ```
66
+
67
+ ## Core Concepts
68
+
69
+ ### 🔗 A-MEM Zettelkasten Linking
70
+
71
+ Every memory automatically links to related memories — bidirectionally. When you store "Redis runs on port 6379", it finds existing memories about Redis, ports, or databases and creates links in both directions.
72
+
73
+ ```javascript
74
+ await mem.store('a', 'Redis runs on port 6379');
75
+ await mem.store('a', 'We use Redis for session caching'); // Auto-links to first memory
76
+
77
+ const links = await mem.links(memoryId);
78
+ // { memory: 'Redis runs on port 6379', links: [{ memory: 'We use Redis for session caching', similarity: 0.87 }] }
79
+ ```
80
+
81
+ ### 🧬 Biological Decay
82
+
83
+ Memories have a **strength** that decays over time, just like biological memory:
84
+
85
+ - **Half-life**: 30 days (configurable)
86
+ - **Link reinforcement**: More connections = slower decay (+5% per link, max +30%)
87
+ - **Category stickiness**: Decisions (1.3×) and preferences (1.4×) resist decay
88
+ - **Access boost**: Each reinforcement adds +2% importance
89
+
90
+ ```javascript
91
+ const report = await mem.decay({ dryRun: true }); // Preview what would be pruned
92
+ // { total: 100, healthy: 85, weakening: 10, archived: [...], deleted: [...] }
93
+
94
+ await mem.decay(); // Archive weak memories, delete dead ones
95
+ ```
96
+
97
+ ### ⚔️ Conflict Resolution
98
+
99
+ Detects contradictions and evolves memories over time (requires LLM):
100
+
101
+ ```javascript
102
+ const mem = createMemory({
103
+ embeddings: { type: 'openai', apiKey: KEY },
104
+ llm: { type: 'openai', apiKey: KEY, model: 'gpt-4.1-nano' },
24
105
  });
25
106
 
26
- // Store with auto-linking
27
- const result = await graph.addWithLinking('agent-1', 'Port 443 is open on target', 'finding', 0.8, ['recon']);
107
+ await mem.store('a', 'Server runs on port 3000');
108
+ await mem.evolve('a', 'Server now runs on port 8080');
109
+ // EVOLVED: "Server runs on port 3000" → "Server now runs on port 8080"
110
+ // Old version archived with evolution history
111
+ ```
112
+
113
+ ### 🌐 Multi-Agent
28
114
 
29
- // Semantic search
30
- const results = await graph.search('open ports', 'agent-1');
115
+ Native support for multiple agents with cross-agent search:
31
116
 
32
- // Evolve with conflict resolution
33
- await graph.evolve('agent-1', 'Port 443 is closed on target', 'finding', 0.9, ['recon']);
117
+ ```javascript
118
+ await mem.store('kuro', 'Found SQL injection in /api/users');
119
+ await mem.store('maki', 'Deployed fix for /api/users endpoint');
34
120
 
35
- // Graph health
36
- const health = await graph.healthReport();
121
+ const results = await mem.searchAll('api users security');
122
+ // Returns memories from both agents, ranked by relevance
37
123
  ```
38
124
 
39
- ## Providers
125
+ ## Graph Queries
126
+
127
+ ```javascript
128
+ // Multi-hop traversal
129
+ const graph = await mem.traverse(memoryId, 3); // Walk 3 hops from a memory
130
+
131
+ // Find memory clusters
132
+ const clusters = await mem.clusters(3); // Connected components with 3+ members
40
133
 
41
- ### Embedding
42
- - `NvidiaEmbeddingProvider` NVIDIA NIM (baai/bge-m3)
43
- - `OpenAIEmbeddingProvider` — OpenAI or compatible APIs
134
+ // Shortest path between memories
135
+ const path = await mem.path(idA, idB);
44
136
 
45
- ### Storage
46
- - `JsonFileStorage` local JSON files
47
- - `SupabaseStorage` — Supabase REST API
137
+ // Find disconnected memories
138
+ const orphans = await mem.orphans('kuro');
139
+
140
+ // Health report
141
+ const health = await mem.health();
142
+ // { total, byAgent, byCategory, avgStrength, distribution, orphans, ... }
143
+
144
+ // Timeline view
145
+ const timeline = await mem.timeline('kuro', 7); // Last 7 days
146
+
147
+ // Context generation (for RAG / prompt injection)
148
+ const ctx = await mem.context('kuro', 'database security');
149
+ // Returns formatted briefing with 1-hop expansion from top results
150
+ ```
48
151
 
49
- ### Extraction
50
- - `LLMExtractionProvider` — any OpenAI-compatible chat API
51
- - `PassthroughExtractionProvider` — no LLM, wraps text as-is
152
+ ## Configuration
52
153
 
53
- ## API
154
+ ```javascript
155
+ const mem = createMemory({
156
+ // Storage backend
157
+ storage: {
158
+ type: 'json', // 'json' (default) | 'memory' (ephemeral)
159
+ dir: './my-data', // Custom directory for JSON storage
160
+ },
161
+
162
+ // Embeddings (optional — keyword search works without)
163
+ embeddings: {
164
+ type: 'openai', // 'openai' (any compatible API) | 'noop' (keyword only)
165
+ apiKey: '...',
166
+ model: 'text-embedding-3-small',
167
+ baseUrl: 'https://api.openai.com/v1',
168
+ extraBody: {}, // Extra params (e.g. { input_type: 'passage' } for NIM)
169
+ },
170
+
171
+ // Fact extraction (optional — enables ingest())
172
+ extraction: {
173
+ type: 'llm', // 'llm' | 'passthrough'
174
+ apiKey: '...',
175
+ model: 'gpt-4.1-nano',
176
+ baseUrl: 'https://api.openai.com/v1',
177
+ },
178
+
179
+ // LLM for conflict resolution (optional — enables evolve())
180
+ llm: {
181
+ type: 'openai',
182
+ apiKey: '...',
183
+ model: 'gpt-4.1-nano',
184
+ baseUrl: 'https://api.openai.com/v1',
185
+ },
186
+
187
+ // Graph behavior
188
+ graph: {
189
+ linkThreshold: 0.5, // Min similarity for auto-linking (0-1)
190
+ maxLinksPerMemory: 5, // Max auto-links per new memory
191
+ decayHalfLifeDays: 30, // Decay half-life
192
+ archiveThreshold: 0.15, // Archive below this strength
193
+ deleteThreshold: 0.05, // Delete below this strength
194
+ },
195
+ });
196
+ ```
197
+
198
+ ## CLI
199
+
200
+ ```bash
201
+ npx @jeremiaheth/neolata-mem store agent-1 "User prefers dark mode"
202
+ npx @jeremiaheth/neolata-mem search agent-1 "UI preferences"
203
+ npx @jeremiaheth/neolata-mem health
204
+ npx @jeremiaheth/neolata-mem decay --dry-run
205
+ ```
206
+
207
+ Set `OPENAI_API_KEY` or `NVIDIA_API_KEY` for embedding support. See `npx @jeremiaheth/neolata-mem` for all commands.
208
+
209
+ ## API Reference
210
+
211
+ ### `createMemory(opts?) → MemoryGraph`
212
+
213
+ Factory function. All options are optional — zero-config returns a working instance with JSON storage and keyword search.
214
+
215
+ ### Core Methods
216
+
217
+ | Method | Description |
218
+ |--------|-------------|
219
+ | `store(agent, text, opts?)` | Store with A-MEM auto-linking |
220
+ | `search(agent, query, opts?)` | Semantic/keyword search (single agent) |
221
+ | `searchAll(query, opts?)` | Cross-agent search |
222
+ | `evolve(agent, text, opts?)` | Store with conflict resolution |
223
+ | `ingest(agent, text, opts?)` | Bulk extract facts and store |
224
+ | `context(agent, query, opts?)` | Generate context briefing |
225
+
226
+ ### Graph Methods
54
227
 
55
228
  | Method | Description |
56
229
  |--------|-------------|
57
- | `addWithLinking(agent, memory, category, importance, tags)` | Store with A-MEM auto-linking |
58
- | `search(query, agent?, limit?)` | Semantic search |
59
- | `evolve(agent, memory, category, importance, tags)` | Store with conflict resolution |
60
- | `detectConflicts(agent, text)` | Check for contradictions |
61
- | `decay(dryRun?)` | Run decay cycle |
62
- | `reinforce(memoryId, boost?)` | Boost a memory |
63
- | `traverse(startId, maxHops?)` | Multi-hop graph walk |
64
- | `findClusters(minSize?)` | Detect connected components |
65
- | `findPath(idA, idB)` | Shortest path |
66
- | `findOrphans(agent?, maxLinks?)` | Unlinked memories |
67
- | `timeline(agent?, days?)` | Timeline view |
68
- | `healthReport()` | Full health report |
69
- | `generateContext(agent, query, maxMemories?)` | LLM-ready context block |
230
+ | `links(memoryId)` | Get memory and its connections |
231
+ | `traverse(startId, hops?)` | Multi-hop BFS walk |
232
+ | `clusters(minSize?)` | Find connected components |
233
+ | `path(idA, idB)` | Shortest path between memories |
234
+ | `orphans(agent?, maxLinks?)` | Find disconnected memories |
235
+
236
+ ### Lifecycle Methods
237
+
238
+ | Method | Description |
239
+ |--------|-------------|
240
+ | `decay(opts?)` | Run decay cycle (archive/delete weak memories) |
241
+ | `reinforce(memoryId, boost?)` | Boost memory importance |
242
+ | `health()` | Full health report |
243
+ | `timeline(agent?, days?)` | Date-grouped memory view |
244
+
245
+ ### Advanced: Bring Your Own Providers
246
+
247
+ ```javascript
248
+ import { MemoryGraph } from '@jeremiaheth/neolata-mem';
249
+
250
+ const graph = new MemoryGraph({
251
+ storage: myCustomStorage, // { load, save, loadArchive, saveArchive, genId }
252
+ embeddings: myCustomEmbedder, // { embed(texts) → number[][] }
253
+ extraction: myExtractor, // { extract(text) → Fact[] }
254
+ llm: myLLM, // { chat(prompt) → string }
255
+ config: { ... },
256
+ });
257
+ ```
258
+
259
+ ## How It Works
260
+
261
+ ```
262
+ Text → [Embed] → [Find Related] → [Link Bidirectionally] → [Store]
263
+
264
+ Existing memories
265
+ with embeddings
266
+
267
+ Conflict Detection:
268
+ New fact → [Embed] → [Find high-similarity] → [LLM: conflict/update/novel?]
269
+ → CONFLICT: archive old, store new
270
+ → UPDATE: modify existing in-place (with evolution history)
271
+ → NOVEL: normal A-MEM store
272
+
273
+ Decay Cycle:
274
+ For each memory:
275
+ strength = (importance × ageFactor × touchFactor × categoryWeight) + linkBonus + accessBonus
276
+ if strength < 0.05: delete
277
+ if strength < 0.15: archive
278
+ Clean up broken links
279
+ ```
280
+
281
+ ## Comparison
282
+
283
+ | Feature | neolata-mem | Mem0 | Letta | Zep |
284
+ |---------|------------|------|-------|-----|
285
+ | Zettelkasten linking | ✅ | ❌ | ❌ | ❌ |
286
+ | Biological decay | ✅ | ❌ | ❌ | ❌ |
287
+ | Graph traversal | ✅ | ❌ | ❌ | ✅ |
288
+ | Multi-agent native | ✅ | ❌ | ❌ | ❌ |
289
+ | Conflict resolution | ✅ | ✅ | ❌ | ❌ |
290
+ | Works offline | ✅ | ✅ | ✅ | ❌ |
291
+ | No Python needed | ✅ | ❌ | ❌ | ❌ |
292
+ | Zero-config start | ✅ | ❌ | ❌ | ❌ |
293
+ | LLM optional | ✅ | ❌ | ❌ | ❌ |
70
294
 
71
295
  ## License
72
296
 
73
- MIT
297
+ MIT — do whatever you want.
package/cli/index.mjs ADDED
@@ -0,0 +1,203 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * neolata-mem CLI
4
+ * Usage: npx @jeremiaheth/neolata-mem <command> [args]
5
+ */
6
+
7
+ import { createMemory } from '../src/index.mjs';
8
+
9
+ function parseEnvConfig() {
10
+ const opts = {};
11
+
12
+ // Auto-detect embedding provider from env
13
+ if (process.env.OPENAI_API_KEY) {
14
+ opts.embeddings = { type: 'openai', apiKey: process.env.OPENAI_API_KEY, model: process.env.NEOLATA_EMBED_MODEL || 'text-embedding-3-small' };
15
+ opts.llm = { type: 'openai', apiKey: process.env.OPENAI_API_KEY, model: process.env.NEOLATA_LLM_MODEL || 'gpt-4.1-nano' };
16
+ opts.extraction = { type: 'llm', apiKey: process.env.OPENAI_API_KEY, model: process.env.NEOLATA_LLM_MODEL || 'gpt-4.1-nano' };
17
+ } else if (process.env.NVIDIA_API_KEY) {
18
+ opts.embeddings = {
19
+ type: 'openai', apiKey: process.env.NVIDIA_API_KEY,
20
+ model: process.env.NEOLATA_EMBED_MODEL || 'baai/bge-m3',
21
+ baseUrl: 'https://integrate.api.nvidia.com/v1',
22
+ };
23
+ }
24
+
25
+ if (process.env.NEOLATA_STORAGE_DIR) {
26
+ opts.storage = { type: 'json', dir: process.env.NEOLATA_STORAGE_DIR };
27
+ }
28
+
29
+ return opts;
30
+ }
31
+
32
+ async function main() {
33
+ const [cmd, ...args] = process.argv.slice(2);
34
+ const mem = createMemory(parseEnvConfig());
35
+
36
+ switch (cmd) {
37
+ case 'store': {
38
+ const agent = args[0];
39
+ const text = args[1];
40
+ const tags = args.slice(2);
41
+ if (!agent || !text) { console.error('Usage: neolata-mem store <agent> <text> [tags...]'); process.exit(1); }
42
+ const result = await mem.store(agent, text, { tags });
43
+ console.log(`✅ Stored: ${result.id} (${result.links} links, top: ${result.topLink})`);
44
+ break;
45
+ }
46
+
47
+ case 'search': {
48
+ const agent = args[0];
49
+ const query = args.slice(1).join(' ');
50
+ if (!agent || !query) { console.error('Usage: neolata-mem search <agent> <query>'); process.exit(1); }
51
+ const results = await mem.search(agent, query);
52
+ for (const r of results) {
53
+ console.log(`[${r.score.toFixed(3)}] [${r.agent}/${r.category}] ${r.memory}`);
54
+ }
55
+ if (!results.length) console.log('No memories found.');
56
+ break;
57
+ }
58
+
59
+ case 'search-all': {
60
+ const query = args.join(' ');
61
+ if (!query) { console.error('Usage: neolata-mem search-all <query>'); process.exit(1); }
62
+ const results = await mem.searchAll(query);
63
+ for (const r of results) {
64
+ console.log(`[${r.score.toFixed(3)}] [${r.agent}/${r.category}] ${r.memory}`);
65
+ }
66
+ if (!results.length) console.log('No memories found.');
67
+ break;
68
+ }
69
+
70
+ case 'evolve': {
71
+ const agent = args[0];
72
+ const text = args.slice(1).join(' ');
73
+ if (!agent || !text) { console.error('Usage: neolata-mem evolve <agent> <text>'); process.exit(1); }
74
+ const result = await mem.evolve(agent, text);
75
+ for (const action of result.actions) {
76
+ if (action.type === 'archived') console.log(` CONFLICT: Archived "${action.old?.slice(0, 60)}..." — ${action.reason}`);
77
+ else if (action.type === 'updated') console.log(` EVOLVED: "${action.old?.slice(0, 50)}..." → "${action.new?.slice(0, 50)}..."`);
78
+ else if (action.type === 'stored') console.log(` STORED: ${action.id} (${action.links} links)`);
79
+ }
80
+ break;
81
+ }
82
+
83
+ case 'links': {
84
+ const memId = args[0];
85
+ if (!memId) { console.error('Usage: neolata-mem links <memory-id>'); process.exit(1); }
86
+ const data = await mem.links(memId);
87
+ if (!data) { console.log('Memory not found.'); break; }
88
+ console.log(`Memory: ${data.memory} (${data.agent})`);
89
+ console.log(`Links (${data.links.length}):`);
90
+ for (const l of data.links) {
91
+ console.log(` [${(l.similarity * 100).toFixed(1)}%] [${l.agent}/${l.category}] ${l.memory}`);
92
+ }
93
+ break;
94
+ }
95
+
96
+ case 'traverse': {
97
+ const memId = args[0];
98
+ const hops = parseInt(args[1]) || 2;
99
+ if (!memId) { console.error('Usage: neolata-mem traverse <memory-id> [hops]'); process.exit(1); }
100
+ const result = await mem.traverse(memId, hops);
101
+ if (!result) { console.log('Memory not found.'); break; }
102
+ console.log(`Traversal from: ${result.start.memory} (${result.start.agent})`);
103
+ console.log(`Max hops: ${result.hops} | Reached: ${result.reached}\n`);
104
+ let lastHop = -1;
105
+ for (const node of result.nodes) {
106
+ if (node.hop !== lastHop) { console.log(`--- Hop ${node.hop} ---`); lastHop = node.hop; }
107
+ const sim = node.hop === 0 ? 'origin' : `${(node.similarity * 100).toFixed(1)}%`;
108
+ console.log(` [${sim}] [${node.agent}/${node.category}] ${node.memory}`);
109
+ }
110
+ break;
111
+ }
112
+
113
+ case 'clusters': {
114
+ const minSize = parseInt(args[0]) || 3;
115
+ const clusters = await mem.clusters(minSize);
116
+ console.log(`Found ${clusters.length} clusters (min size: ${minSize}):\n`);
117
+ for (let i = 0; i < clusters.length; i++) {
118
+ const c = clusters[i];
119
+ console.log(`Cluster ${i + 1}: ${c.size} memories | Agents: ${JSON.stringify(c.agents)} | Tags: ${c.topTags.join(', ') || '(none)'}`);
120
+ for (const m of c.memories.slice(0, 3)) {
121
+ console.log(` [${m.agent}/${m.category}] ${m.memory.slice(0, 90)}`);
122
+ }
123
+ if (c.size > 3) console.log(` ... and ${c.size - 3} more`);
124
+ console.log();
125
+ }
126
+ break;
127
+ }
128
+
129
+ case 'path': {
130
+ const [idA, idB] = args;
131
+ if (!idA || !idB) { console.error('Usage: neolata-mem path <id-a> <id-b>'); process.exit(1); }
132
+ const result = await mem.path(idA, idB);
133
+ if (!result || !result.found) { console.log('No path found.'); break; }
134
+ console.log(`Path: ${result.hops} hops\n`);
135
+ for (let i = 0; i < result.path.length; i++) {
136
+ const p = result.path[i];
137
+ const pre = i === 0 ? 'START' : i === result.path.length - 1 ? 'END ' : ` ${i} `;
138
+ console.log(` ${pre} [${p.agent}/${p.category}] ${p.memory}`);
139
+ if (i < result.path.length - 1) console.log(` |`);
140
+ }
141
+ break;
142
+ }
143
+
144
+ case 'decay': {
145
+ const dryRun = args.includes('--dry-run');
146
+ const report = await mem.decay({ dryRun });
147
+ console.log(`Decay Report${dryRun ? ' (DRY RUN)' : ''}:`);
148
+ console.log(` Total: ${report.total} | Healthy: ${report.healthy} | Weakening: ${report.weakening}`);
149
+ console.log(` Archived: ${report.archived.length} | Deleted: ${report.deleted.length} | Links cleaned: ${report.linksClean}`);
150
+ for (const a of report.archived) console.log(` [${a.strength}] [${a.agent}] ${a.memory}`);
151
+ for (const d of report.deleted) console.log(` [${d.strength}] [${d.agent}] ${d.memory}`);
152
+ break;
153
+ }
154
+
155
+ case 'health': {
156
+ const r = await mem.health();
157
+ console.log(`=== Memory Graph Health ===\n`);
158
+ console.log(`Memories: ${r.total} active, ${r.archivedCount} archived`);
159
+ console.log(`By agent: ${JSON.stringify(r.byAgent)}`);
160
+ console.log(`By category: ${JSON.stringify(r.byCategory)}`);
161
+ console.log(`Links: ${r.totalLinks} total, ${r.crossAgentLinks} cross-agent, avg ${r.avgLinksPerMemory}/memory`);
162
+ console.log(`Orphans: ${r.orphans}`);
163
+ console.log(`Age: avg ${r.avgAgeDays}d, max ${r.maxAgeDays}d`);
164
+ console.log(`Avg strength: ${r.avgStrength}`);
165
+ console.log(`Distribution: strong=${r.distribution.strong} healthy=${r.distribution.healthy} weakening=${r.distribution.weakening} critical=${r.distribution.critical} dead=${r.distribution.dead}`);
166
+ break;
167
+ }
168
+
169
+ case 'context': {
170
+ const agent = args[0];
171
+ const query = args.slice(1).join(' ');
172
+ if (!agent || !query) { console.error('Usage: neolata-mem context <agent> <query>'); process.exit(1); }
173
+ const result = await mem.context(agent, query);
174
+ console.log(result.context);
175
+ break;
176
+ }
177
+
178
+ default:
179
+ console.log(`neolata-mem — Graph-native memory for AI agents
180
+
181
+ Commands:
182
+ store <agent> <text> [tags...] Store with A-MEM auto-linking
183
+ search <agent> <query> Semantic search (single agent)
184
+ search-all <query> Cross-agent search
185
+ evolve <agent> <text> Store with conflict resolution
186
+ links <memory-id> Show memory connections
187
+ traverse <memory-id> [hops] Multi-hop graph walk
188
+ clusters [min-size] Find memory clusters
189
+ path <id-a> <id-b> Shortest path between memories
190
+ decay [--dry-run] Run memory decay cycle
191
+ health Full health report
192
+ context <agent> <query> Generate context briefing
193
+
194
+ Environment:
195
+ OPENAI_API_KEY Enables embeddings + LLM features
196
+ NVIDIA_API_KEY Use NVIDIA NIM for embeddings
197
+ NEOLATA_EMBED_MODEL Override embedding model
198
+ NEOLATA_LLM_MODEL Override LLM model
199
+ NEOLATA_STORAGE_DIR Override storage directory`);
200
+ }
201
+ }
202
+
203
+ main().catch(e => { console.error(e.message); process.exit(1); });
package/package.json CHANGED
@@ -1,26 +1,40 @@
1
1
  {
2
2
  "name": "@jeremiaheth/neolata-mem",
3
- "version": "0.1.0",
4
- "description": "A-MEM style memory graph engine with semantic linking, decay, conflict resolution, and cross-agent traversal",
3
+ "version": "0.2.0",
4
+ "description": "Graph-native memory engine for AI agents with Zettelkasten linking, biological decay, and conflict resolution",
5
5
  "type": "module",
6
6
  "main": "src/index.mjs",
7
7
  "exports": {
8
8
  ".": "./src/index.mjs",
9
- "./providers/*": "./src/providers/*.mjs"
9
+ "./graph": "./src/graph.mjs",
10
+ "./embeddings": "./src/embeddings.mjs",
11
+ "./storage": "./src/storage.mjs",
12
+ "./extraction": "./src/extraction.mjs",
13
+ "./llm": "./src/llm.mjs"
14
+ },
15
+ "bin": {
16
+ "neolata-mem": "./cli/index.mjs"
10
17
  },
11
18
  "files": [
12
19
  "src/",
20
+ "cli/",
13
21
  "LICENSE",
14
22
  "README.md"
15
23
  ],
24
+ "scripts": {
25
+ "test": "node --test test/*.test.mjs"
26
+ },
16
27
  "keywords": [
17
- "memory-graph",
18
- "a-mem",
28
+ "ai",
29
+ "memory",
30
+ "agent",
19
31
  "zettelkasten",
20
- "semantic-search",
32
+ "graph",
21
33
  "embeddings",
22
- "knowledge-graph",
23
- "agent-memory",
34
+ "rag",
35
+ "llm",
36
+ "vector-search",
37
+ "multi-agent",
24
38
  "decay",
25
39
  "conflict-resolution"
26
40
  ],
@@ -0,0 +1,68 @@
1
+ /**
2
+ * Embedding provider interface and implementations.
3
+ * All providers must implement: embed(texts) → number[][]
4
+ */
5
+
6
+ /**
7
+ * Cosine similarity between two vectors.
8
+ * @param {number[]} a
9
+ * @param {number[]} b
10
+ * @returns {number}
11
+ */
12
+ export function cosineSimilarity(a, b) {
13
+ let dot = 0, normA = 0, normB = 0;
14
+ for (let i = 0; i < a.length; i++) {
15
+ dot += a[i] * b[i];
16
+ normA += a[i] * a[i];
17
+ normB += b[i] * b[i];
18
+ }
19
+ return dot / (Math.sqrt(normA) * Math.sqrt(normB));
20
+ }
21
+
22
+ // ─── OpenAI-Compatible Provider ─────────────────────────────
23
+ /**
24
+ * Works with OpenAI, NVIDIA NIM, Ollama, Azure, any OpenAI-compatible API.
25
+ * @param {object} opts
26
+ * @param {string} opts.apiKey
27
+ * @param {string} opts.model - e.g. 'text-embedding-3-small', 'baai/bge-m3'
28
+ * @param {string} [opts.baseUrl='https://api.openai.com/v1'] - API base URL
29
+ * @param {object} [opts.extraBody] - Extra body params (e.g. { input_type: 'passage' })
30
+ * @param {number} [opts.retryMs=2000] - Retry delay on 429
31
+ */
32
+ export function openaiEmbeddings({ apiKey, model, baseUrl = 'https://api.openai.com/v1', extraBody = {}, retryMs = 2000 }) {
33
+ return {
34
+ name: `openai-compatible(${model})`,
35
+ model,
36
+ async embed(texts) {
37
+ const input = Array.isArray(texts) ? texts : [texts];
38
+ const res = await fetch(`${baseUrl}/embeddings`, {
39
+ method: 'POST',
40
+ headers: { 'Authorization': `Bearer ${apiKey}`, 'Content-Type': 'application/json' },
41
+ body: JSON.stringify({ model, input, ...extraBody }),
42
+ });
43
+ if (res.status === 429) {
44
+ await new Promise(r => setTimeout(r, retryMs));
45
+ return this.embed(texts);
46
+ }
47
+ if (!res.ok) throw new Error(`Embedding ${res.status}: ${await res.text()}`);
48
+ const data = await res.json();
49
+ return data.data.map(d => d.embedding);
50
+ },
51
+ };
52
+ }
53
+
54
+ // ─── Noop Provider (keyword-only mode) ──────────────────────
55
+ /**
56
+ * No-op embedding provider. Returns null embeddings.
57
+ * Use this when you don't want/need vector search — keyword matching still works.
58
+ */
59
+ export function noopEmbeddings() {
60
+ return {
61
+ name: 'noop',
62
+ model: null,
63
+ async embed(texts) {
64
+ const input = Array.isArray(texts) ? texts : [texts];
65
+ return input.map(() => null);
66
+ },
67
+ };
68
+ }