@memorylayerai/sdk 0.1.1 β 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +635 -17
- package/dist/index.cjs +198 -0
- package/dist/index.d.cts +280 -1
- package/dist/index.d.ts +280 -1
- package/dist/index.js +197 -0
- package/package.json +1 -1
- package/src/client.ts +12 -0
- package/src/index.ts +15 -0
- package/src/resources/graph.ts +212 -0
- package/src/types.ts +179 -0
- package/tests/graph.test.ts +260 -0
- package/tests/graph.unit.test.ts +513 -0
package/README.md
CHANGED
|
@@ -1,6 +1,18 @@
|
|
|
1
|
-
#
|
|
1
|
+
# MemoryLayer Node.js SDK
|
|
2
2
|
|
|
3
|
-
Official Node.js/TypeScript SDK for MemoryLayer -
|
|
3
|
+
Official Node.js/TypeScript SDK for [MemoryLayer](https://memorylayer.com) - The intelligent memory layer for AI applications.
|
|
4
|
+
|
|
5
|
+
[](https://www.npmjs.com/package/@memorylayerai/sdk)
|
|
6
|
+
[](https://opensource.org/licenses/MIT)
|
|
7
|
+
|
|
8
|
+
## Features
|
|
9
|
+
|
|
10
|
+
- π§ **Memory Management**: Store, retrieve, and manage AI memories
|
|
11
|
+
- π **Hybrid Search**: Vector + keyword + graph-based retrieval
|
|
12
|
+
- πΈοΈ **Memory Graph**: Visualize and traverse memory relationships
|
|
13
|
+
- π― **Smart Retrieval**: LLM reranking and query rewriting
|
|
14
|
+
- π **Observability**: Track performance and quality metrics
|
|
15
|
+
- π **Type-Safe**: Full TypeScript support with auto-completion
|
|
4
16
|
|
|
5
17
|
## Installation
|
|
6
18
|
|
|
@@ -8,35 +20,641 @@ Official Node.js/TypeScript SDK for MemoryLayer - Add memory capabilities to you
|
|
|
8
20
|
npm install @memorylayerai/sdk
|
|
9
21
|
```
|
|
10
22
|
|
|
23
|
+
or with yarn:
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
yarn add @memorylayerai/sdk
|
|
27
|
+
```
|
|
28
|
+
|
|
11
29
|
## Quick Start
|
|
12
30
|
|
|
31
|
+
### Option 1: Transparent Router (Zero Code Changes) β‘
|
|
32
|
+
|
|
33
|
+
The easiest way to add memory - just change your OpenAI baseURL:
|
|
34
|
+
|
|
35
|
+
```typescript
|
|
36
|
+
import OpenAI from 'openai';
|
|
37
|
+
|
|
38
|
+
const openai = new OpenAI({
|
|
39
|
+
baseURL: 'https://api.memorylayer.ai/v1', // β Just change this
|
|
40
|
+
apiKey: 'ml_your_memorylayer_key' // β Use your MemoryLayer key
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
// That's it! Memory is automatically injected
|
|
44
|
+
const response = await openai.chat.completions.create({
|
|
45
|
+
model: 'gpt-4',
|
|
46
|
+
messages: [{ role: 'user', content: 'What are my preferences?' }]
|
|
47
|
+
});
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
**Benefits:**
|
|
51
|
+
- β
Zero code changes to your application
|
|
52
|
+
- β
Automatic memory injection
|
|
53
|
+
- β
Works with existing OpenAI SDK code
|
|
54
|
+
- β
Configurable via headers
|
|
55
|
+
|
|
56
|
+
See [Transparent Router Guide](#transparent-router) for details.
|
|
57
|
+
|
|
58
|
+
### Option 2: Manual Integration (Full Control)
|
|
59
|
+
|
|
60
|
+
For more control over memory retrieval and injection:
|
|
61
|
+
|
|
62
|
+
#### 1. Get Your API Key
|
|
63
|
+
|
|
64
|
+
Sign up at [memorylayer.com](https://memorylayer.com) and create an API key from your project settings.
|
|
65
|
+
|
|
66
|
+
#### 2. Initialize the Client
|
|
67
|
+
|
|
68
|
+
```typescript
|
|
69
|
+
import { MemoryLayer } from '@memorylayerai/sdk';
|
|
70
|
+
|
|
71
|
+
const client = new MemoryLayer({
|
|
72
|
+
apiKey: 'ml_key_...',
|
|
73
|
+
// Optional: specify custom base URL
|
|
74
|
+
// baseUrl: 'https://api.memorylayer.com'
|
|
75
|
+
});
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
#### 3. Create Memories
|
|
79
|
+
|
|
80
|
+
```typescript
|
|
81
|
+
// Create a single memory
|
|
82
|
+
const memory = await client.memories.create({
|
|
83
|
+
projectId: 'your-project-id',
|
|
84
|
+
content: 'The user prefers dark mode in their applications',
|
|
85
|
+
type: 'preference',
|
|
86
|
+
tags: {
|
|
87
|
+
category: 'ui',
|
|
88
|
+
importance: 'high'
|
|
89
|
+
}
|
|
90
|
+
});
|
|
91
|
+
|
|
92
|
+
console.log('Memory created:', memory.id);
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
#### 4. Search Memories
|
|
96
|
+
|
|
97
|
+
```typescript
|
|
98
|
+
// Hybrid search (vector + keyword + graph)
|
|
99
|
+
const results = await client.search.hybrid({
|
|
100
|
+
projectId: 'your-project-id',
|
|
101
|
+
query: 'What are the user UI preferences?',
|
|
102
|
+
limit: 10,
|
|
103
|
+
// Optional: enable advanced features
|
|
104
|
+
useReranking: true, // LLM-based reranking
|
|
105
|
+
useQueryRewriting: true, // Query expansion
|
|
106
|
+
useGraphTraversal: true // Follow memory relationships
|
|
107
|
+
});
|
|
108
|
+
|
|
109
|
+
results.forEach(result => {
|
|
110
|
+
console.log(`Score: ${result.score}`);
|
|
111
|
+
console.log(`Content: ${result.content}`);
|
|
112
|
+
console.log(`Type: ${result.type}`);
|
|
113
|
+
});
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
## Transparent Router
|
|
117
|
+
|
|
118
|
+
The transparent router is an OpenAI-compatible proxy that automatically injects memory context into your requests. It's the easiest way to add memory to your application.
|
|
119
|
+
|
|
120
|
+
### Basic Usage
|
|
121
|
+
|
|
122
|
+
```typescript
|
|
123
|
+
import OpenAI from 'openai';
|
|
124
|
+
|
|
125
|
+
const openai = new OpenAI({
|
|
126
|
+
baseURL: 'https://api.memorylayer.ai/v1',
|
|
127
|
+
apiKey: process.env.MEMORYLAYER_API_KEY
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
const response = await openai.chat.completions.create({
|
|
131
|
+
model: 'gpt-4',
|
|
132
|
+
messages: [{ role: 'user', content: 'What are my preferences?' }]
|
|
133
|
+
});
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
### Configuration Headers
|
|
137
|
+
|
|
138
|
+
Control memory injection with optional headers:
|
|
139
|
+
|
|
140
|
+
```typescript
|
|
141
|
+
const response = await openai.chat.completions.create({
|
|
142
|
+
model: 'gpt-4',
|
|
143
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
144
|
+
headers: {
|
|
145
|
+
'x-memory-user-id': 'user_123', // User scope
|
|
146
|
+
'x-memory-session-id': 'sess_abc', // Session scope
|
|
147
|
+
'x-memory-limit': '10', // Max memories
|
|
148
|
+
'x-memory-injection-mode': 'balanced', // safe|balanced|full
|
|
149
|
+
'x-memory-injection-strategy': 'system_append', // Injection strategy
|
|
150
|
+
'x-memory-disabled': 'false' // Enable/disable
|
|
151
|
+
}
|
|
152
|
+
});
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
### Injection Modes
|
|
156
|
+
|
|
157
|
+
- **safe**: Only fact + preference (minimal risk)
|
|
158
|
+
- **balanced** (default): fact + preference + trusted summaries
|
|
159
|
+
- **full**: All memory types including snippets
|
|
160
|
+
|
|
161
|
+
### Diagnostic Headers
|
|
162
|
+
|
|
163
|
+
Every response includes diagnostic headers:
|
|
164
|
+
|
|
165
|
+
```typescript
|
|
166
|
+
const response = await openai.chat.completions.create({ ... });
|
|
167
|
+
|
|
168
|
+
console.log('Memories retrieved:', response.headers?.['x-memory-hit-count']);
|
|
169
|
+
console.log('Tokens injected:', response.headers?.['x-memory-injected-tokens']);
|
|
170
|
+
console.log('Max score:', response.headers?.['x-memory-max-score']);
|
|
171
|
+
console.log('Query rewriting:', response.headers?.['x-memory-rewrite']);
|
|
172
|
+
console.log('Memory status:', response.headers?.['x-memory-status']);
|
|
173
|
+
console.log('Session ID:', response.headers?.['x-memory-session-id']);
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
### Streaming Support
|
|
177
|
+
|
|
178
|
+
Streaming works seamlessly:
|
|
179
|
+
|
|
180
|
+
```typescript
|
|
181
|
+
const stream = await openai.chat.completions.create({
|
|
182
|
+
model: 'gpt-4',
|
|
183
|
+
messages: [{ role: 'user', content: 'Tell me about myself' }],
|
|
184
|
+
stream: true,
|
|
185
|
+
headers: {
|
|
186
|
+
'x-memory-user-id': 'user_123'
|
|
187
|
+
}
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
for await (const chunk of stream) {
|
|
191
|
+
const content = chunk.choices[0]?.delta?.content || '';
|
|
192
|
+
process.stdout.write(content);
|
|
193
|
+
}
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Session Management
|
|
197
|
+
|
|
198
|
+
If you don't provide `x-memory-user-id` or `x-memory-session-id`, the router generates a session ID. Persist it for conversation continuity:
|
|
199
|
+
|
|
200
|
+
```typescript
|
|
201
|
+
const response = await openai.chat.completions.create({ ... });
|
|
202
|
+
|
|
203
|
+
// Get generated session ID
|
|
204
|
+
const sessionId = response.headers?.['x-memory-session-id'];
|
|
205
|
+
|
|
206
|
+
// Store it and send on next request
|
|
207
|
+
const nextResponse = await openai.chat.completions.create({
|
|
208
|
+
messages: [...],
|
|
209
|
+
headers: {
|
|
210
|
+
'x-memory-session-id': sessionId // β Persist this!
|
|
211
|
+
}
|
|
212
|
+
});
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
### Error Handling
|
|
216
|
+
|
|
217
|
+
The router gracefully degrades on errors:
|
|
218
|
+
|
|
219
|
+
```typescript
|
|
220
|
+
const response = await openai.chat.completions.create({ ... });
|
|
221
|
+
|
|
222
|
+
// Check memory status
|
|
223
|
+
const memoryStatus = response.headers?.['x-memory-status'];
|
|
224
|
+
if (memoryStatus === 'error') {
|
|
225
|
+
console.warn('Memory retrieval failed:', response.headers?.['x-memory-error-code']);
|
|
226
|
+
console.log('But the request still succeeded (graceful degradation)');
|
|
227
|
+
}
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
### Migration from Manual Integration
|
|
231
|
+
|
|
232
|
+
See [examples/MIGRATION_GUIDE.md](../../examples/MIGRATION_GUIDE.md) for a complete migration guide.
|
|
233
|
+
|
|
234
|
+
## Core Features
|
|
235
|
+
|
|
236
|
+
### Memory Management
|
|
237
|
+
|
|
238
|
+
#### Create Memory
|
|
239
|
+
|
|
13
240
|
```typescript
|
|
14
|
-
|
|
241
|
+
const memory = await client.memories.create({
|
|
242
|
+
projectId: 'project-id',
|
|
243
|
+
content: 'User completed onboarding on 2024-01-15',
|
|
244
|
+
type: 'fact',
|
|
245
|
+
tags: {
|
|
246
|
+
event: 'onboarding',
|
|
247
|
+
date: '2024-01-15'
|
|
248
|
+
},
|
|
249
|
+
metadata: {
|
|
250
|
+
source: 'mobile-app',
|
|
251
|
+
version: '2.1.0'
|
|
252
|
+
}
|
|
253
|
+
});
|
|
254
|
+
```
|
|
15
255
|
|
|
16
|
-
|
|
17
|
-
|
|
256
|
+
#### List Memories
|
|
257
|
+
|
|
258
|
+
```typescript
|
|
259
|
+
const memories = await client.memories.list({
|
|
260
|
+
projectId: 'project-id',
|
|
261
|
+
types: ['fact', 'preference'],
|
|
262
|
+
status: ['active'],
|
|
263
|
+
page: 1,
|
|
264
|
+
pageSize: 50
|
|
18
265
|
});
|
|
19
266
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
content
|
|
23
|
-
|
|
24
|
-
|
|
267
|
+
console.log(`Total: ${memories.total}`);
|
|
268
|
+
memories.items.forEach(memory => {
|
|
269
|
+
console.log(memory.content);
|
|
270
|
+
});
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
#### Get Memory
|
|
274
|
+
|
|
275
|
+
```typescript
|
|
276
|
+
const memory = await client.memories.get('memory-id');
|
|
277
|
+
console.log(memory.content);
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
#### Update Memory
|
|
281
|
+
|
|
282
|
+
```typescript
|
|
283
|
+
const updated = await client.memories.update('memory-id', {
|
|
284
|
+
content: 'Updated content',
|
|
285
|
+
tags: {
|
|
286
|
+
updated: 'true'
|
|
287
|
+
}
|
|
25
288
|
});
|
|
289
|
+
```
|
|
290
|
+
|
|
291
|
+
#### Delete Memory
|
|
292
|
+
|
|
293
|
+
```typescript
|
|
294
|
+
await client.memories.delete('memory-id');
|
|
295
|
+
```
|
|
296
|
+
|
|
297
|
+
### Search & Retrieval
|
|
298
|
+
|
|
299
|
+
#### Hybrid Search
|
|
300
|
+
|
|
301
|
+
Combines vector search, keyword search, and graph traversal:
|
|
26
302
|
|
|
27
|
-
|
|
28
|
-
const results = await client.search.
|
|
303
|
+
```typescript
|
|
304
|
+
const results = await client.search.hybrid({
|
|
305
|
+
projectId: 'project-id',
|
|
306
|
+
query: 'What does the user like?',
|
|
307
|
+
limit: 10,
|
|
308
|
+
|
|
309
|
+
// Scoring weights (optional)
|
|
310
|
+
vectorWeight: 0.5,
|
|
311
|
+
keywordWeight: 0.3,
|
|
312
|
+
recencyWeight: 0.2,
|
|
313
|
+
|
|
314
|
+
// Advanced features
|
|
315
|
+
useReranking: true, // Use LLM to rerank results
|
|
316
|
+
useQueryRewriting: true, // Expand and clarify query
|
|
317
|
+
useGraphTraversal: true, // Follow memory relationships
|
|
318
|
+
graphDepth: 2 // How many hops to traverse
|
|
319
|
+
});
|
|
320
|
+
```
|
|
321
|
+
|
|
322
|
+
#### Vector Search Only
|
|
323
|
+
|
|
324
|
+
```typescript
|
|
325
|
+
const results = await client.search.vector({
|
|
326
|
+
projectId: 'project-id',
|
|
29
327
|
query: 'user preferences',
|
|
30
|
-
|
|
328
|
+
limit: 5,
|
|
329
|
+
threshold: 0.7 // Minimum similarity score
|
|
330
|
+
});
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
#### Keyword Search Only
|
|
334
|
+
|
|
335
|
+
```typescript
|
|
336
|
+
const results = await client.search.keyword({
|
|
337
|
+
projectId: 'project-id',
|
|
338
|
+
query: 'dark mode',
|
|
339
|
+
limit: 5
|
|
340
|
+
});
|
|
341
|
+
```
|
|
342
|
+
|
|
343
|
+
### Memory Graph
|
|
344
|
+
|
|
345
|
+
#### Get Graph Data
|
|
346
|
+
|
|
347
|
+
```typescript
|
|
348
|
+
const graph = await client.graph.get({
|
|
349
|
+
projectId: 'project-id',
|
|
350
|
+
// Optional filters
|
|
351
|
+
memoryTypes: ['fact', 'preference'],
|
|
352
|
+
searchQuery: 'user preferences',
|
|
353
|
+
dateRange: {
|
|
354
|
+
start: '2024-01-01',
|
|
355
|
+
end: '2024-12-31'
|
|
356
|
+
}
|
|
357
|
+
});
|
|
358
|
+
|
|
359
|
+
console.log(`Nodes: ${graph.nodes.length}`);
|
|
360
|
+
console.log(`Edges: ${graph.edges.length}`);
|
|
361
|
+
|
|
362
|
+
// Nodes
|
|
363
|
+
graph.nodes.forEach(node => {
|
|
364
|
+
console.log(`${node.id}: ${node.content}`);
|
|
365
|
+
});
|
|
366
|
+
|
|
367
|
+
// Edges (relationships)
|
|
368
|
+
graph.edges.forEach(edge => {
|
|
369
|
+
console.log(`${edge.source} -> ${edge.target} (${edge.type})`);
|
|
370
|
+
});
|
|
371
|
+
```
|
|
372
|
+
|
|
373
|
+
#### Create Edge
|
|
374
|
+
|
|
375
|
+
```typescript
|
|
376
|
+
const edge = await client.graph.createEdge({
|
|
377
|
+
projectId: 'project-id',
|
|
378
|
+
sourceMemoryId: 'memory-1',
|
|
379
|
+
targetMemoryId: 'memory-2',
|
|
380
|
+
relationshipType: 'derives', // or 'similarity', 'temporal', etc.
|
|
381
|
+
metadata: {
|
|
382
|
+
confidence: 0.95,
|
|
383
|
+
reason: 'User explicitly linked these'
|
|
384
|
+
}
|
|
385
|
+
});
|
|
386
|
+
```
|
|
387
|
+
|
|
388
|
+
#### Traverse Graph
|
|
389
|
+
|
|
390
|
+
```typescript
|
|
391
|
+
const related = await client.graph.traverse({
|
|
392
|
+
projectId: 'project-id',
|
|
393
|
+
startMemoryIds: ['memory-1'],
|
|
394
|
+
depth: 2, // How many hops
|
|
395
|
+
relationshipTypes: ['similarity', 'derives']
|
|
396
|
+
});
|
|
397
|
+
|
|
398
|
+
console.log(`Found ${related.length} related memories`);
|
|
399
|
+
```
|
|
400
|
+
|
|
401
|
+
### Ingestion
|
|
402
|
+
|
|
403
|
+
#### Ingest Document
|
|
404
|
+
|
|
405
|
+
```typescript
|
|
406
|
+
const job = await client.ingestion.ingest({
|
|
407
|
+
projectId: 'project-id',
|
|
408
|
+
content: 'Long document content...',
|
|
409
|
+
metadata: {
|
|
410
|
+
title: 'Product Documentation',
|
|
411
|
+
source: 'docs.example.com'
|
|
412
|
+
},
|
|
413
|
+
// Chunking strategy
|
|
414
|
+
chunkingStrategy: 'semantic', // or 'fixed-size', 'sentence', 'paragraph'
|
|
415
|
+
chunkSize: 512,
|
|
416
|
+
chunkOverlap: 50
|
|
417
|
+
});
|
|
418
|
+
|
|
419
|
+
console.log(`Job ID: ${job.id}`);
|
|
420
|
+
console.log(`Status: ${job.status}`);
|
|
421
|
+
```
|
|
422
|
+
|
|
423
|
+
#### Check Job Status
|
|
424
|
+
|
|
425
|
+
```typescript
|
|
426
|
+
const job = await client.ingestion.getJob('job-id');
|
|
427
|
+
console.log(`Status: ${job.status}`);
|
|
428
|
+
console.log(`Progress: ${job.progress}%`);
|
|
429
|
+
console.log(`Memories created: ${job.memoriesCreated}`);
|
|
430
|
+
```
|
|
431
|
+
|
|
432
|
+
## Advanced Features
|
|
433
|
+
|
|
434
|
+
### LLM Reranking
|
|
435
|
+
|
|
436
|
+
Improve search relevance using LLM-based reranking:
|
|
437
|
+
|
|
438
|
+
```typescript
|
|
439
|
+
const results = await client.search.hybrid({
|
|
440
|
+
projectId: 'project-id',
|
|
441
|
+
query: 'complex user question',
|
|
442
|
+
limit: 20,
|
|
443
|
+
useReranking: true,
|
|
444
|
+
rerankingModel: 'gpt-4', // or 'claude-3'
|
|
445
|
+
rerankingTopK: 10 // Return top 10 after reranking
|
|
31
446
|
});
|
|
447
|
+
```
|
|
448
|
+
|
|
449
|
+
### Query Rewriting
|
|
32
450
|
|
|
33
|
-
|
|
451
|
+
Expand and clarify queries for better results:
|
|
452
|
+
|
|
453
|
+
```typescript
|
|
454
|
+
const results = await client.search.hybrid({
|
|
455
|
+
projectId: 'project-id',
|
|
456
|
+
query: 'ML preferences', // Will expand to "machine learning preferences"
|
|
457
|
+
useQueryRewriting: true,
|
|
458
|
+
queryRewritingStrategy: 'expansion' // or 'clarification', 'multi-query'
|
|
459
|
+
});
|
|
34
460
|
```
|
|
35
461
|
|
|
36
|
-
|
|
462
|
+
### Graph Traversal
|
|
37
463
|
|
|
38
|
-
|
|
464
|
+
Follow memory relationships for contextual retrieval:
|
|
465
|
+
|
|
466
|
+
```typescript
|
|
467
|
+
const results = await client.search.hybrid({
|
|
468
|
+
projectId: 'project-id',
|
|
469
|
+
query: 'user settings',
|
|
470
|
+
useGraphTraversal: true,
|
|
471
|
+
graphDepth: 2, // Follow relationships 2 hops deep
|
|
472
|
+
graphRelationshipTypes: ['similarity', 'derives']
|
|
473
|
+
});
|
|
474
|
+
```
|
|
475
|
+
|
|
476
|
+
## TypeScript Support
|
|
477
|
+
|
|
478
|
+
The SDK is written in TypeScript and provides full type definitions:
|
|
479
|
+
|
|
480
|
+
```typescript
|
|
481
|
+
import {
|
|
482
|
+
MemoryLayer,
|
|
483
|
+
Memory,
|
|
484
|
+
SearchResult,
|
|
485
|
+
GraphData,
|
|
486
|
+
IngestionJob
|
|
487
|
+
} from '@memorylayerai/sdk';
|
|
488
|
+
|
|
489
|
+
// All methods are fully typed
|
|
490
|
+
const client = new MemoryLayer({ apiKey: 'ml_key_...' });
|
|
491
|
+
|
|
492
|
+
// TypeScript will auto-complete and type-check
|
|
493
|
+
const memory: Memory = await client.memories.create({
|
|
494
|
+
projectId: 'project-id',
|
|
495
|
+
content: 'typed content',
|
|
496
|
+
type: 'fact' // TypeScript knows valid types
|
|
497
|
+
});
|
|
498
|
+
```
|
|
499
|
+
|
|
500
|
+
## Error Handling
|
|
501
|
+
|
|
502
|
+
```typescript
|
|
503
|
+
import { MemoryLayerError } from '@memorylayerai/sdk';
|
|
504
|
+
|
|
505
|
+
try {
|
|
506
|
+
const memory = await client.memories.create({
|
|
507
|
+
projectId: 'project-id',
|
|
508
|
+
content: 'test'
|
|
509
|
+
});
|
|
510
|
+
} catch (error) {
|
|
511
|
+
if (error instanceof MemoryLayerError) {
|
|
512
|
+
console.error('API Error:', error.message);
|
|
513
|
+
console.error('Status:', error.statusCode);
|
|
514
|
+
console.error('Request ID:', error.requestId);
|
|
515
|
+
} else {
|
|
516
|
+
console.error('Unexpected error:', error);
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
```
|
|
520
|
+
|
|
521
|
+
## Configuration
|
|
522
|
+
|
|
523
|
+
### Custom Base URL
|
|
524
|
+
|
|
525
|
+
```typescript
|
|
526
|
+
const client = new MemoryLayer({
|
|
527
|
+
apiKey: 'ml_key_...',
|
|
528
|
+
baseUrl: 'https://your-custom-domain.com'
|
|
529
|
+
});
|
|
530
|
+
```
|
|
531
|
+
|
|
532
|
+
### Timeout
|
|
533
|
+
|
|
534
|
+
```typescript
|
|
535
|
+
const client = new MemoryLayer({
|
|
536
|
+
apiKey: 'ml_key_...',
|
|
537
|
+
timeout: 30000 // 30 seconds
|
|
538
|
+
});
|
|
539
|
+
```
|
|
540
|
+
|
|
541
|
+
### Retry Configuration
|
|
542
|
+
|
|
543
|
+
```typescript
|
|
544
|
+
const client = new MemoryLayer({
|
|
545
|
+
apiKey: 'ml_key_...',
|
|
546
|
+
maxRetries: 3,
|
|
547
|
+
retryDelay: 1000 // 1 second
|
|
548
|
+
});
|
|
549
|
+
```
|
|
550
|
+
|
|
551
|
+
## Examples
|
|
552
|
+
|
|
553
|
+
### Chatbot with Memory
|
|
554
|
+
|
|
555
|
+
```typescript
|
|
556
|
+
import { MemoryLayer } from '@memorylayerai/sdk';
|
|
557
|
+
|
|
558
|
+
const client = new MemoryLayer({ apiKey: process.env.MEMORYLAYER_API_KEY });
|
|
559
|
+
const projectId = 'your-project-id';
|
|
560
|
+
|
|
561
|
+
async function chatWithMemory(userMessage: string, userId: string) {
|
|
562
|
+
// 1. Search for relevant memories
|
|
563
|
+
const memories = await client.search.hybrid({
|
|
564
|
+
projectId,
|
|
565
|
+
query: userMessage,
|
|
566
|
+
limit: 5,
|
|
567
|
+
useReranking: true,
|
|
568
|
+
useGraphTraversal: true
|
|
569
|
+
});
|
|
570
|
+
|
|
571
|
+
// 2. Build context from memories
|
|
572
|
+
const context = memories
|
|
573
|
+
.map(m => m.content)
|
|
574
|
+
.join('\n\n');
|
|
575
|
+
|
|
576
|
+
// 3. Send to LLM with context
|
|
577
|
+
const response = await callYourLLM({
|
|
578
|
+
system: `You are a helpful assistant. Use this context about the user:\n\n${context}`,
|
|
579
|
+
user: userMessage
|
|
580
|
+
});
|
|
581
|
+
|
|
582
|
+
// 4. Store new memory from conversation
|
|
583
|
+
await client.memories.create({
|
|
584
|
+
projectId,
|
|
585
|
+
content: `User said: "${userMessage}". Assistant responded: "${response}"`,
|
|
586
|
+
type: 'fact',
|
|
587
|
+
tags: { userId, timestamp: new Date().toISOString() }
|
|
588
|
+
});
|
|
589
|
+
|
|
590
|
+
return response;
|
|
591
|
+
}
|
|
592
|
+
```
|
|
593
|
+
|
|
594
|
+
### Document Q&A
|
|
595
|
+
|
|
596
|
+
```typescript
|
|
597
|
+
async function ingestAndQuery(documentContent: string, question: string) {
|
|
598
|
+
// 1. Ingest document
|
|
599
|
+
const job = await client.ingestion.ingest({
|
|
600
|
+
projectId: 'your-project-id',
|
|
601
|
+
content: documentContent,
|
|
602
|
+
chunkingStrategy: 'semantic',
|
|
603
|
+
chunkSize: 512
|
|
604
|
+
});
|
|
605
|
+
|
|
606
|
+
// 2. Wait for ingestion to complete
|
|
607
|
+
let status = await client.ingestion.getJob(job.id);
|
|
608
|
+
while (status.status === 'processing') {
|
|
609
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
610
|
+
status = await client.ingestion.getJob(job.id);
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
// 3. Query the document
|
|
614
|
+
const results = await client.search.hybrid({
|
|
615
|
+
projectId: 'your-project-id',
|
|
616
|
+
query: question,
|
|
617
|
+
limit: 3,
|
|
618
|
+
useReranking: true
|
|
619
|
+
});
|
|
620
|
+
|
|
621
|
+
return results.map(r => r.content).join('\n\n');
|
|
622
|
+
}
|
|
623
|
+
```
|
|
624
|
+
|
|
625
|
+
## API Reference
|
|
626
|
+
|
|
627
|
+
Full API documentation available at [docs.memorylayer.com](https://docs.memorylayer.com)
|
|
628
|
+
|
|
629
|
+
## Support
|
|
630
|
+
|
|
631
|
+
- π§ Email: support@memorylayer.com
|
|
632
|
+
- π¬ Discord: [discord.gg/memorylayer](https://discord.gg/memorylayer)
|
|
633
|
+
- π Docs: [docs.memorylayer.com](https://docs.memorylayer.com)
|
|
634
|
+
- π Issues: [github.com/memorylayer/sdk/issues](https://github.com/memorylayer/sdk/issues)
|
|
39
635
|
|
|
40
636
|
## License
|
|
41
637
|
|
|
42
|
-
MIT
|
|
638
|
+
MIT License - see [LICENSE](LICENSE) file for details.
|
|
639
|
+
|
|
640
|
+
## Changelog
|
|
641
|
+
|
|
642
|
+
### v0.2.0 (2024-01-20)
|
|
643
|
+
|
|
644
|
+
- β¨ Added Memory Graph API support
|
|
645
|
+
- β¨ Added Hybrid Search with LLM reranking
|
|
646
|
+
- β¨ Added Query Rewriting capabilities
|
|
647
|
+
- β¨ Added Graph Traversal for contextual retrieval
|
|
648
|
+
- π Fixed type definitions for better TypeScript support
|
|
649
|
+
- π Comprehensive documentation and examples
|
|
650
|
+
|
|
651
|
+
### v0.1.1 (2024-01-10)
|
|
652
|
+
|
|
653
|
+
- π Bug fixes and stability improvements
|
|
654
|
+
|
|
655
|
+
### v0.1.0 (2024-01-01)
|
|
656
|
+
|
|
657
|
+
- π Initial release
|
|
658
|
+
- β¨ Basic memory CRUD operations
|
|
659
|
+
- β¨ Vector search
|
|
660
|
+
- β¨ Ingestion API
|