agentic-memory 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +446 -0
- package/dist/adapters/openai.d.mts +33 -0
- package/dist/adapters/openai.d.ts +33 -0
- package/dist/adapters/openai.js +85 -0
- package/dist/adapters/openai.mjs +60 -0
- package/dist/adapters/voyageai.d.mts +30 -0
- package/dist/adapters/voyageai.d.ts +30 -0
- package/dist/adapters/voyageai.js +64 -0
- package/dist/adapters/voyageai.mjs +39 -0
- package/dist/index.d.mts +246 -0
- package/dist/index.d.ts +246 -0
- package/dist/index.js +724 -0
- package/dist/index.mjs +689 -0
- package/dist/types-CQ8Hcoqw.d.mts +131 -0
- package/dist/types-CQ8Hcoqw.d.ts +131 -0
- package/package.json +76 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 genieincodebottle
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
# agentic-memory
|
|
2
|
+
|
|
3
|
+
**The missing memory layer for AI agents.** Works with any LLM. Zero dependencies.
|
|
4
|
+
|
|
5
|
+
Available for both **TypeScript/JavaScript** and **Python**.
|
|
6
|
+
|
|
7
|
+
[](https://opensource.org/licenses/MIT)
|
|
8
|
+
[](./tests/)
|
|
9
|
+
[](./python/tests/)
|
|
10
|
+
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
## Why this exists
|
|
14
|
+
|
|
15
|
+
Every AI agent framework gives you tools, chains, and orchestration. None of them solve **memory** properly.
|
|
16
|
+
|
|
17
|
+
```
|
|
18
|
+
Without agentic-memory:
|
|
19
|
+
|
|
20
|
+
Session 1: "I'm vegetarian" --> stored somewhere
|
|
21
|
+
Session 5: "Find me a recipe" --> agent suggests steak
|
|
22
|
+
40-min workflow --> context fills up, loses all progress
|
|
23
|
+
Multi-agent system --> agents duplicate each other's work
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
```
|
|
27
|
+
With agentic-memory:
|
|
28
|
+
|
|
29
|
+
Session 1: "I'm vegetarian" --> stored as hard constraint (never decays)
|
|
30
|
+
Session 5: "Find me steak" --> conflict detected, agent asks to confirm
|
|
31
|
+
40-min workflow --> checkpoint saves state, resumes after overflow
|
|
32
|
+
Multi-agent system --> scoped memory, no cross-contamination
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## Where it fits
|
|
38
|
+
|
|
39
|
+
<p align="center">
|
|
40
|
+
<img src="assets/architecture.svg" alt="Architecture diagram" width="720"/>
|
|
41
|
+
</p>
|
|
42
|
+
|
|
43
|
+
---
|
|
44
|
+
|
|
45
|
+
## Install
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
# JavaScript / TypeScript
|
|
49
|
+
npm install agentic-memory
|
|
50
|
+
|
|
51
|
+
# Python
|
|
52
|
+
pip install agentic-memory
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
---
|
|
56
|
+
|
|
57
|
+
## End-to-end example: Build a memory-aware agent
|
|
58
|
+
|
|
59
|
+
This shows exactly how `agentic-memory` fits into a real agent loop - from storing user context, to retrieving it before LLM calls, to catching dangerous contradictions.
|
|
60
|
+
|
|
61
|
+
### TypeScript
|
|
62
|
+
|
|
63
|
+
```typescript
|
|
64
|
+
import { AgentMemory } from 'agentic-memory';
|
|
65
|
+
|
|
66
|
+
const memory = new AgentMemory();
|
|
67
|
+
|
|
68
|
+
// ── Session 1: User onboarding ──
|
|
69
|
+
// Store facts the agent learns about the user
|
|
70
|
+
await memory.store({
|
|
71
|
+
content: 'User is vegetarian',
|
|
72
|
+
type: 'preference',
|
|
73
|
+
scope: 'user:42',
|
|
74
|
+
importance: 'hard', // hard = never forgets (like allergies)
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
await memory.store({
|
|
78
|
+
content: 'User prefers quick 30-minute recipes',
|
|
79
|
+
type: 'preference',
|
|
80
|
+
scope: 'user:42',
|
|
81
|
+
importance: 'soft', // soft = fades over time if not reinforced
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
// ── Session 2: User asks for help ──
|
|
85
|
+
// STEP 1: Retrieve relevant memories BEFORE calling the LLM
|
|
86
|
+
const context = await memory.retrieve({
|
|
87
|
+
query: 'recipe suggestions',
|
|
88
|
+
scope: 'user:42',
|
|
89
|
+
signals: ['similarity', 'recency', 'importance'],
|
|
90
|
+
limit: 5,
|
|
91
|
+
});
|
|
92
|
+
// context[0].entry.content = "User is vegetarian"
|
|
93
|
+
// context[1].entry.content = "User prefers quick 30-minute recipes"
|
|
94
|
+
|
|
95
|
+
// STEP 2: Inject memories into your LLM prompt
|
|
96
|
+
const prompt = `
|
|
97
|
+
User context: ${context.map(r => r.entry.content).join('. ')}
|
|
98
|
+
User message: Suggest a recipe for dinner tonight.
|
|
99
|
+
`;
|
|
100
|
+
// LLM now knows: vegetarian + quick recipes
|
|
101
|
+
// Response: "Here's a 25-minute mushroom risotto..."
|
|
102
|
+
|
|
103
|
+
// ── Session 5: Catch contradictions ──
|
|
104
|
+
// User (or another agent) tries something that conflicts
|
|
105
|
+
const conflicts = await memory.checkConflicts(
|
|
106
|
+
'Order a steak dinner for the user',
|
|
107
|
+
'user:42',
|
|
108
|
+
);
|
|
109
|
+
|
|
110
|
+
if (conflicts.length > 0) {
|
|
111
|
+
console.log(conflicts[0].action); // 'clarify'
|
|
112
|
+
console.log(conflicts[0].reason);
|
|
113
|
+
// "Conflicts with a hard constraint: 'User is vegetarian'.
|
|
114
|
+
// This memory was marked as non-negotiable - please confirm the change."
|
|
115
|
+
|
|
116
|
+
// Agent should ASK the user, not silently override
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// ── Long workflow: Survive context overflow ──
|
|
120
|
+
const cp = await memory.checkpoint({
|
|
121
|
+
taskGraph: [
|
|
122
|
+
{ id: 't1', description: 'Scraped 500 recipes', status: 'done', dependencies: [], result: recipes },
|
|
123
|
+
{ id: 't2', description: 'Filtering by diet', status: 'in_progress', dependencies: ['t1'] },
|
|
124
|
+
{ id: 't3', description: 'Rank by prep time', status: 'pending', dependencies: ['t2'] },
|
|
125
|
+
],
|
|
126
|
+
summary: 'Scraped 500 recipes, filtering for vegetarian, 200 remaining',
|
|
127
|
+
toolOutputs: { scraped: recipes },
|
|
128
|
+
activeMemoryIds: context.map(r => r.entry.id),
|
|
129
|
+
});
|
|
130
|
+
|
|
131
|
+
// ... context window fills up and resets ...
|
|
132
|
+
|
|
133
|
+
// Resume exactly where you left off
|
|
134
|
+
const restored = await memory.rehydrate(cp.id);
|
|
135
|
+
// restored.checkpoint.taskGraph[1].status = 'in_progress'
|
|
136
|
+
// restored.memories = [vegetarian preference, quick recipe preference]
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
### Python
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
import asyncio
|
|
143
|
+
from agentic_memory import AgentMemory, MemoryType, ImportanceLevel, RetrievalQuery
|
|
144
|
+
|
|
145
|
+
async def main():
|
|
146
|
+
memory = AgentMemory()
|
|
147
|
+
|
|
148
|
+
# Session 1: Store user preferences
|
|
149
|
+
await memory.store(
|
|
150
|
+
content="User is vegetarian",
|
|
151
|
+
type=MemoryType.PREFERENCE,
|
|
152
|
+
scope="user:42",
|
|
153
|
+
importance=ImportanceLevel.HARD,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Session 2: Retrieve context before LLM call
|
|
157
|
+
context = await memory.retrieve(RetrievalQuery(
|
|
158
|
+
query="recipe suggestions",
|
|
159
|
+
scope="user:42",
|
|
160
|
+
signals=["similarity", "recency", "importance"],
|
|
161
|
+
limit=5,
|
|
162
|
+
))
|
|
163
|
+
|
|
164
|
+
# Inject into prompt
|
|
165
|
+
memory_context = ". ".join(r.entry.content for r in context)
|
|
166
|
+
prompt = f"User context: {memory_context}\nSuggest a recipe."
|
|
167
|
+
|
|
168
|
+
# Catch contradictions
|
|
169
|
+
conflicts = await memory.check_conflicts(
|
|
170
|
+
"Order a steak dinner for the user", "user:42"
|
|
171
|
+
)
|
|
172
|
+
if conflicts:
|
|
173
|
+
print(f"Action: {conflicts[0].action.value}") # 'clarify'
|
|
174
|
+
print(f"Reason: {conflicts[0].reason}")
|
|
175
|
+
|
|
176
|
+
asyncio.run(main())
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
**No config, no database, no API keys.** Just `import` and go.
|
|
180
|
+
|
|
181
|
+
---
|
|
182
|
+
|
|
183
|
+
## What it solves
|
|
184
|
+
|
|
185
|
+
### 1. Smarter retrieval (not just similarity search)
|
|
186
|
+
|
|
187
|
+
RAG gives you "topically related" results. Agents need more than that.
|
|
188
|
+
|
|
189
|
+
<p align="center">
|
|
190
|
+
<img src="assets/retrieval-signals.svg" alt="Multi-signal retrieval" width="640"/>
|
|
191
|
+
</p>
|
|
192
|
+
|
|
193
|
+
```typescript
|
|
194
|
+
const results = await memory.retrieve({
|
|
195
|
+
query: 'programming preferences',
|
|
196
|
+
taskContext: 'Building a REST API', // boosts API-related memories
|
|
197
|
+
signals: ['similarity', 'recency', 'importance', 'taskRelevance'],
|
|
198
|
+
limit: 5,
|
|
199
|
+
});
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
### 2. Conflict detection (catch contradictions before they ship)
|
|
203
|
+
|
|
204
|
+
<p align="center">
|
|
205
|
+
<img src="assets/conflict-detection.svg" alt="Conflict detection flow" width="640"/>
|
|
206
|
+
</p>
|
|
207
|
+
|
|
208
|
+
```typescript
|
|
209
|
+
const conflicts = await memory.checkConflicts('Order a steak dinner', 'user:123');
|
|
210
|
+
// Returns:
|
|
211
|
+
// {
|
|
212
|
+
// confidence: 0.85,
|
|
213
|
+
// action: 'clarify',
|
|
214
|
+
// reason: 'Conflicts with hard constraint: "User is vegetarian"'
|
|
215
|
+
// }
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
Uses negation detection, antonym matching, and change-language detection - not just keyword overlap.
|
|
219
|
+
|
|
220
|
+
### 3. Typed decay (a peanut allergy != a favorite color)
|
|
221
|
+
|
|
222
|
+
<p align="center">
|
|
223
|
+
<img src="assets/typed-decay.svg" alt="Typed decay curves" width="640"/>
|
|
224
|
+
</p>
|
|
225
|
+
|
|
226
|
+
```typescript
|
|
227
|
+
// This will still be there in 5 years
|
|
228
|
+
await memory.store({
|
|
229
|
+
content: 'User has peanut allergy',
|
|
230
|
+
type: 'constraint',
|
|
231
|
+
importance: 'hard',
|
|
232
|
+
});
|
|
233
|
+
|
|
234
|
+
// This expires after 7 days
|
|
235
|
+
await memory.store({
|
|
236
|
+
content: 'Currently debugging auth module',
|
|
237
|
+
type: 'task',
|
|
238
|
+
importance: 'ephemeral',
|
|
239
|
+
});
|
|
240
|
+
|
|
241
|
+
// Periodic cleanup
|
|
242
|
+
const deletedCount = await memory.cleanup('user:123');
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
### 4. Checkpointing (survive context overflow)
|
|
246
|
+
|
|
247
|
+
A 40-minute agent workflow shouldn't lose everything when context fills up.
|
|
248
|
+
|
|
249
|
+
<p align="center">
|
|
250
|
+
<img src="assets/checkpoint-flow.svg" alt="Checkpoint and rehydrate flow" width="640"/>
|
|
251
|
+
</p>
|
|
252
|
+
|
|
253
|
+
```typescript
|
|
254
|
+
// Save state before overflow
|
|
255
|
+
const cp = await memory.checkpoint({
|
|
256
|
+
taskGraph: [
|
|
257
|
+
{ id: 's1', description: 'Fetch data', status: 'done', result: data, dependencies: [] },
|
|
258
|
+
{ id: 's2', description: 'Process', status: 'in_progress', dependencies: ['s1'] },
|
|
259
|
+
{ id: 's3', description: 'Report', status: 'pending', dependencies: ['s2'] },
|
|
260
|
+
],
|
|
261
|
+
summary: 'Fetched 1000 records, processing row 450/1000',
|
|
262
|
+
toolOutputs: { api_response: apiData },
|
|
263
|
+
activeMemoryIds: ['mem_abc', 'mem_def'],
|
|
264
|
+
});
|
|
265
|
+
|
|
266
|
+
// After context reset - pick up where you left off
|
|
267
|
+
const { checkpoint, memories } = await memory.rehydrate(cp.id);
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
### 5. Scope isolation (multi-agent without the chaos)
|
|
271
|
+
|
|
272
|
+
```typescript
|
|
273
|
+
// Each agent gets its own namespace
|
|
274
|
+
await memory.store({ content: 'Plan: 3 steps', scope: 'agent:planner' });
|
|
275
|
+
await memory.store({ content: 'API returned 200', scope: 'agent:executor' });
|
|
276
|
+
|
|
277
|
+
// No cross-contamination
|
|
278
|
+
const plannerOnly = await memory.getAll('agent:planner');
|
|
279
|
+
|
|
280
|
+
// User memories are separate too
|
|
281
|
+
await memory.store({ content: 'Prefers Python', scope: 'user:alice' });
|
|
282
|
+
await memory.store({ content: 'Prefers Rust', scope: 'user:bob' });
|
|
283
|
+
```
|
|
284
|
+
|
|
285
|
+
---
|
|
286
|
+
|
|
287
|
+
## Embedder adapters (production-ready)
|
|
288
|
+
|
|
289
|
+
The built-in embedder (TF-IDF) works for dev/testing. For production, use the included adapters:
|
|
290
|
+
|
|
291
|
+
### TypeScript
|
|
292
|
+
|
|
293
|
+
```typescript
|
|
294
|
+
import { AgentMemory } from 'agentic-memory';
|
|
295
|
+
import { OpenAIEmbedder } from 'agentic-memory/adapters/openai';
|
|
296
|
+
import { VoyageEmbedder } from 'agentic-memory/adapters/voyageai';
|
|
297
|
+
|
|
298
|
+
// OpenAI (most popular)
|
|
299
|
+
const memory = new AgentMemory({
|
|
300
|
+
embedder: new OpenAIEmbedder({
|
|
301
|
+
apiKey: process.env.OPENAI_API_KEY!,
|
|
302
|
+
model: 'text-embedding-3-small', // default
|
|
303
|
+
dimensions: 512, // smaller = faster + cheaper
|
|
304
|
+
}),
|
|
305
|
+
});
|
|
306
|
+
|
|
307
|
+
// Voyage AI (higher quality, cheaper)
|
|
308
|
+
const memory2 = new AgentMemory({
|
|
309
|
+
embedder: new VoyageEmbedder({
|
|
310
|
+
apiKey: process.env.VOYAGE_API_KEY!,
|
|
311
|
+
model: 'voyage-3-lite',
|
|
312
|
+
}),
|
|
313
|
+
});
|
|
314
|
+
```
|
|
315
|
+
|
|
316
|
+
### Python
|
|
317
|
+
|
|
318
|
+
```python
|
|
319
|
+
from agentic_memory import AgentMemory
|
|
320
|
+
from agentic_memory.adapters import OpenAIEmbedder, VoyageEmbedder
|
|
321
|
+
|
|
322
|
+
# OpenAI
|
|
323
|
+
memory = AgentMemory(
|
|
324
|
+
embedder=OpenAIEmbedder(api_key="sk-...", dim=512)
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
# Voyage AI
|
|
328
|
+
memory = AgentMemory(
|
|
329
|
+
embedder=VoyageEmbedder(api_key="voyage-...")
|
|
330
|
+
)
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
Or bring your own - just implement `embed()`, `embed_batch()`, and `dimensions()`.
|
|
334
|
+
|
|
335
|
+
---
|
|
336
|
+
|
|
337
|
+
## Storage backends
|
|
338
|
+
|
|
339
|
+
```typescript
|
|
340
|
+
import { AgentMemory, FileStore } from 'agentic-memory';
|
|
341
|
+
|
|
342
|
+
// Default: in-memory (dev/testing)
|
|
343
|
+
const memory = new AgentMemory();
|
|
344
|
+
|
|
345
|
+
// File-based (persists across restarts)
|
|
346
|
+
const memory = new AgentMemory({
|
|
347
|
+
store: new FileStore('./agent-memory.json'),
|
|
348
|
+
});
|
|
349
|
+
|
|
350
|
+
// Custom backend (Redis, Postgres, etc.)
|
|
351
|
+
const memory = new AgentMemory({ store: myCustomBackend });
|
|
352
|
+
```
|
|
353
|
+
|
|
354
|
+
Implement the `StorageBackend` interface for any database:
|
|
355
|
+
|
|
356
|
+
| Method | Signature |
|
|
357
|
+
|--------|-----------|
|
|
358
|
+
| `get` | `(id: string) => Promise<MemoryEntry \| null>` |
|
|
359
|
+
| `getAll` | `(scope?: string) => Promise<MemoryEntry[]>` |
|
|
360
|
+
| `set` | `(entry: MemoryEntry) => Promise<void>` |
|
|
361
|
+
| `delete` | `(id: string) => Promise<boolean>` |
|
|
362
|
+
| `clear` | `(scope?: string) => Promise<void>` |
|
|
363
|
+
| `search` | `(query: RetrievalQuery) => Promise<MemoryEntry[]>` |
|
|
364
|
+
|
|
365
|
+
---
|
|
366
|
+
|
|
367
|
+
## API at a glance
|
|
368
|
+
|
|
369
|
+
```typescript
|
|
370
|
+
const memory = new AgentMemory(config?)
|
|
371
|
+
|
|
372
|
+
// CRUD
|
|
373
|
+
await memory.store({ content, type?, scope?, importance?, confidence?, metadata? })
|
|
374
|
+
await memory.get(id)
|
|
375
|
+
await memory.update(id, { content?, type?, importance?, confidence?, metadata? })
|
|
376
|
+
await memory.delete(id)
|
|
377
|
+
await memory.getAll(scope?)
|
|
378
|
+
await memory.clear(scope?)
|
|
379
|
+
|
|
380
|
+
// Intelligence
|
|
381
|
+
await memory.retrieve({ query, taskContext?, scope?, types?, signals?, limit?, threshold? })
|
|
382
|
+
await memory.checkConflicts(content, scope?)
|
|
383
|
+
memory.getDecayedConfidence(entry)
|
|
384
|
+
await memory.cleanup(scope?, threshold?)
|
|
385
|
+
|
|
386
|
+
// Checkpointing
|
|
387
|
+
await memory.checkpoint({ taskGraph, summary, toolOutputs?, activeMemoryIds? })
|
|
388
|
+
await memory.rehydrate(checkpointId)
|
|
389
|
+
memory.getLatestCheckpoint()
|
|
390
|
+
memory.listCheckpoints()
|
|
391
|
+
```
|
|
392
|
+
|
|
393
|
+
---
|
|
394
|
+
|
|
395
|
+
## Runnable examples
|
|
396
|
+
|
|
397
|
+
```bash
|
|
398
|
+
# No API key needed - shows checkpoint/rehydrate flow
|
|
399
|
+
npx tsx examples/checkpoint-recovery.ts
|
|
400
|
+
|
|
401
|
+
# Full demo with OpenAI embeddings + LLM
|
|
402
|
+
export OPENAI_API_KEY=sk-...
|
|
403
|
+
npx tsx examples/with-openai.ts
|
|
404
|
+
|
|
405
|
+
# Full demo with Claude + OpenAI embeddings
|
|
406
|
+
export OPENAI_API_KEY=sk-...
|
|
407
|
+
export ANTHROPIC_API_KEY=sk-ant-...
|
|
408
|
+
npx tsx examples/with-anthropic.ts
|
|
409
|
+
|
|
410
|
+
# Python
|
|
411
|
+
export OPENAI_API_KEY=sk-...
|
|
412
|
+
cd python && python examples/with_openai.py
|
|
413
|
+
```
|
|
414
|
+
|
|
415
|
+
---
|
|
416
|
+
|
|
417
|
+
## Roadmap
|
|
418
|
+
|
|
419
|
+
- [x] **v0.1** - Core memory, multi-signal retrieval, conflict detection, typed decay, checkpointing
|
|
420
|
+
- [x] **v0.1** - Python port with full test parity
|
|
421
|
+
- [ ] **v0.2** - Redis and Postgres storage backends
|
|
422
|
+
- [ ] **v0.3** - Multi-agent coordination (SharedMemory, TaskRegistry, Plan Store)
|
|
423
|
+
- [ ] **v0.4** - Audit/attribution layer, GDPR cascade delete
|
|
424
|
+
- [ ] **v1.0** - Framework adapters (LangChain, Vercel AI SDK, Claude Agent SDK)
|
|
425
|
+
|
|
426
|
+
---
|
|
427
|
+
|
|
428
|
+
## Contributing
|
|
429
|
+
|
|
430
|
+
PRs welcome. Run tests before submitting:
|
|
431
|
+
|
|
432
|
+
```bash
|
|
433
|
+
# TypeScript
|
|
434
|
+
npm test # 72 tests
|
|
435
|
+
|
|
436
|
+
# Python
|
|
437
|
+
cd python
|
|
438
|
+
pip install -e ".[dev]"
|
|
439
|
+
pytest tests/ -v # 62 tests
|
|
440
|
+
```
|
|
441
|
+
|
|
442
|
+
---
|
|
443
|
+
|
|
444
|
+
## License
|
|
445
|
+
|
|
446
|
+
MIT
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { E as Embedder } from '../types-CQ8Hcoqw.mjs';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* OpenAI embedder adapter.
|
|
5
|
+
* Requires: npm install openai
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* import { AgentMemory } from 'agentic-memory';
|
|
9
|
+
* import { OpenAIEmbedder } from 'agentic-memory/adapters/openai';
|
|
10
|
+
*
|
|
11
|
+
* const memory = new AgentMemory({
|
|
12
|
+
* embedder: new OpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY }),
|
|
13
|
+
* });
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
interface OpenAIEmbedderConfig {
|
|
17
|
+
apiKey: string;
|
|
18
|
+
model?: string;
|
|
19
|
+
dimensions?: number;
|
|
20
|
+
baseURL?: string;
|
|
21
|
+
}
|
|
22
|
+
declare class OpenAIEmbedder implements Embedder {
|
|
23
|
+
private apiKey;
|
|
24
|
+
private model;
|
|
25
|
+
private dim;
|
|
26
|
+
private baseURL;
|
|
27
|
+
constructor(config: OpenAIEmbedderConfig);
|
|
28
|
+
dimensions(): number;
|
|
29
|
+
embed(text: string): Promise<number[]>;
|
|
30
|
+
embedBatch(texts: string[]): Promise<number[][]>;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export { OpenAIEmbedder, type OpenAIEmbedderConfig };
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { E as Embedder } from '../types-CQ8Hcoqw.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* OpenAI embedder adapter.
|
|
5
|
+
* Requires: npm install openai
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* import { AgentMemory } from 'agentic-memory';
|
|
9
|
+
* import { OpenAIEmbedder } from 'agentic-memory/adapters/openai';
|
|
10
|
+
*
|
|
11
|
+
* const memory = new AgentMemory({
|
|
12
|
+
* embedder: new OpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY }),
|
|
13
|
+
* });
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
interface OpenAIEmbedderConfig {
|
|
17
|
+
apiKey: string;
|
|
18
|
+
model?: string;
|
|
19
|
+
dimensions?: number;
|
|
20
|
+
baseURL?: string;
|
|
21
|
+
}
|
|
22
|
+
declare class OpenAIEmbedder implements Embedder {
|
|
23
|
+
private apiKey;
|
|
24
|
+
private model;
|
|
25
|
+
private dim;
|
|
26
|
+
private baseURL;
|
|
27
|
+
constructor(config: OpenAIEmbedderConfig);
|
|
28
|
+
dimensions(): number;
|
|
29
|
+
embed(text: string): Promise<number[]>;
|
|
30
|
+
embedBatch(texts: string[]): Promise<number[][]>;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export { OpenAIEmbedder, type OpenAIEmbedderConfig };
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/adapters/openai.ts
|
|
21
|
+
var openai_exports = {};
|
|
22
|
+
__export(openai_exports, {
|
|
23
|
+
OpenAIEmbedder: () => OpenAIEmbedder
|
|
24
|
+
});
|
|
25
|
+
module.exports = __toCommonJS(openai_exports);
|
|
26
|
+
var OpenAIEmbedder = class {
|
|
27
|
+
apiKey;
|
|
28
|
+
model;
|
|
29
|
+
dim;
|
|
30
|
+
baseURL;
|
|
31
|
+
constructor(config) {
|
|
32
|
+
this.apiKey = config.apiKey;
|
|
33
|
+
this.model = config.model ?? "text-embedding-3-small";
|
|
34
|
+
this.dim = config.dimensions ?? 1536;
|
|
35
|
+
this.baseURL = config.baseURL ?? "https://api.openai.com/v1";
|
|
36
|
+
}
|
|
37
|
+
dimensions() {
|
|
38
|
+
return this.dim;
|
|
39
|
+
}
|
|
40
|
+
async embed(text) {
|
|
41
|
+
const res = await fetch(`${this.baseURL}/embeddings`, {
|
|
42
|
+
method: "POST",
|
|
43
|
+
headers: {
|
|
44
|
+
"Content-Type": "application/json",
|
|
45
|
+
"Authorization": `Bearer ${this.apiKey}`
|
|
46
|
+
},
|
|
47
|
+
body: JSON.stringify({
|
|
48
|
+
model: this.model,
|
|
49
|
+
input: text,
|
|
50
|
+
dimensions: this.dim
|
|
51
|
+
})
|
|
52
|
+
});
|
|
53
|
+
if (!res.ok) {
|
|
54
|
+
const err = await res.text();
|
|
55
|
+
throw new Error(`OpenAI embedding failed (${res.status}): ${err}`);
|
|
56
|
+
}
|
|
57
|
+
const data = await res.json();
|
|
58
|
+
return data.data[0].embedding;
|
|
59
|
+
}
|
|
60
|
+
async embedBatch(texts) {
|
|
61
|
+
if (texts.length === 0) return [];
|
|
62
|
+
const res = await fetch(`${this.baseURL}/embeddings`, {
|
|
63
|
+
method: "POST",
|
|
64
|
+
headers: {
|
|
65
|
+
"Content-Type": "application/json",
|
|
66
|
+
"Authorization": `Bearer ${this.apiKey}`
|
|
67
|
+
},
|
|
68
|
+
body: JSON.stringify({
|
|
69
|
+
model: this.model,
|
|
70
|
+
input: texts,
|
|
71
|
+
dimensions: this.dim
|
|
72
|
+
})
|
|
73
|
+
});
|
|
74
|
+
if (!res.ok) {
|
|
75
|
+
const err = await res.text();
|
|
76
|
+
throw new Error(`OpenAI embedding failed (${res.status}): ${err}`);
|
|
77
|
+
}
|
|
78
|
+
const data = await res.json();
|
|
79
|
+
return data.data.sort((a, b) => a.index - b.index).map((d) => d.embedding);
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
83
|
+
0 && (module.exports = {
|
|
84
|
+
OpenAIEmbedder
|
|
85
|
+
});
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
// src/adapters/openai.ts
|
|
2
|
+
var OpenAIEmbedder = class {
|
|
3
|
+
apiKey;
|
|
4
|
+
model;
|
|
5
|
+
dim;
|
|
6
|
+
baseURL;
|
|
7
|
+
constructor(config) {
|
|
8
|
+
this.apiKey = config.apiKey;
|
|
9
|
+
this.model = config.model ?? "text-embedding-3-small";
|
|
10
|
+
this.dim = config.dimensions ?? 1536;
|
|
11
|
+
this.baseURL = config.baseURL ?? "https://api.openai.com/v1";
|
|
12
|
+
}
|
|
13
|
+
dimensions() {
|
|
14
|
+
return this.dim;
|
|
15
|
+
}
|
|
16
|
+
async embed(text) {
|
|
17
|
+
const res = await fetch(`${this.baseURL}/embeddings`, {
|
|
18
|
+
method: "POST",
|
|
19
|
+
headers: {
|
|
20
|
+
"Content-Type": "application/json",
|
|
21
|
+
"Authorization": `Bearer ${this.apiKey}`
|
|
22
|
+
},
|
|
23
|
+
body: JSON.stringify({
|
|
24
|
+
model: this.model,
|
|
25
|
+
input: text,
|
|
26
|
+
dimensions: this.dim
|
|
27
|
+
})
|
|
28
|
+
});
|
|
29
|
+
if (!res.ok) {
|
|
30
|
+
const err = await res.text();
|
|
31
|
+
throw new Error(`OpenAI embedding failed (${res.status}): ${err}`);
|
|
32
|
+
}
|
|
33
|
+
const data = await res.json();
|
|
34
|
+
return data.data[0].embedding;
|
|
35
|
+
}
|
|
36
|
+
async embedBatch(texts) {
|
|
37
|
+
if (texts.length === 0) return [];
|
|
38
|
+
const res = await fetch(`${this.baseURL}/embeddings`, {
|
|
39
|
+
method: "POST",
|
|
40
|
+
headers: {
|
|
41
|
+
"Content-Type": "application/json",
|
|
42
|
+
"Authorization": `Bearer ${this.apiKey}`
|
|
43
|
+
},
|
|
44
|
+
body: JSON.stringify({
|
|
45
|
+
model: this.model,
|
|
46
|
+
input: texts,
|
|
47
|
+
dimensions: this.dim
|
|
48
|
+
})
|
|
49
|
+
});
|
|
50
|
+
if (!res.ok) {
|
|
51
|
+
const err = await res.text();
|
|
52
|
+
throw new Error(`OpenAI embedding failed (${res.status}): ${err}`);
|
|
53
|
+
}
|
|
54
|
+
const data = await res.json();
|
|
55
|
+
return data.data.sort((a, b) => a.index - b.index).map((d) => d.embedding);
|
|
56
|
+
}
|
|
57
|
+
};
|
|
58
|
+
export {
|
|
59
|
+
OpenAIEmbedder
|
|
60
|
+
};
|