@mzhub/mem-ts 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +335 -0
- package/dist/BaseAdapter-BoRh1T7O.d.mts +75 -0
- package/dist/BaseAdapter-CQVX-gcA.d.ts +75 -0
- package/dist/BaseProvider-CEoiLGj5.d.ts +34 -0
- package/dist/BaseProvider-edMh_R9t.d.mts +34 -0
- package/dist/adapters/index.d.mts +259 -0
- package/dist/adapters/index.d.ts +259 -0
- package/dist/adapters/index.js +1570 -0
- package/dist/adapters/index.js.map +1 -0
- package/dist/adapters/index.mjs +1542 -0
- package/dist/adapters/index.mjs.map +1 -0
- package/dist/index-Ci5Q9G9H.d.mts +289 -0
- package/dist/index-Dl-Q2au9.d.ts +289 -0
- package/dist/index.d.mts +1206 -0
- package/dist/index.d.ts +1206 -0
- package/dist/index.js +5126 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +5058 -0
- package/dist/index.mjs.map +1 -0
- package/dist/middleware/index.d.mts +4 -0
- package/dist/middleware/index.d.ts +4 -0
- package/dist/middleware/index.js +63 -0
- package/dist/middleware/index.js.map +1 -0
- package/dist/middleware/index.mjs +59 -0
- package/dist/middleware/index.mjs.map +1 -0
- package/dist/providers/index.d.mts +96 -0
- package/dist/providers/index.d.ts +96 -0
- package/dist/providers/index.js +379 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/providers/index.mjs +370 -0
- package/dist/providers/index.mjs.map +1 -0
- package/dist/types-G9qmfSeZ.d.mts +260 -0
- package/dist/types-G9qmfSeZ.d.ts +260 -0
- package/logo.png +0 -0
- package/package.json +114 -0
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
import { P as ProviderConfig, c as MemoryOSOptions, H as HydrateOptions, d as HydratedContext, E as ExtractionResult, F as FactFilter, M as MemoryFact, S as Session, C as ConversationExchange } from './types-G9qmfSeZ.js';
|
|
2
|
+
import { B as BaseAdapter } from './BaseAdapter-CQVX-gcA.js';
|
|
3
|
+
import { B as BaseProvider } from './BaseProvider-CEoiLGj5.js';
|
|
4
|
+
|
|
5
|
+
interface MemoryOSConfig {
|
|
6
|
+
/** LLM provider configuration or instance */
|
|
7
|
+
llm: ProviderConfig | {
|
|
8
|
+
instance: BaseProvider;
|
|
9
|
+
};
|
|
10
|
+
/** Storage adapter instance */
|
|
11
|
+
adapter?: BaseAdapter;
|
|
12
|
+
/** Behavioral options */
|
|
13
|
+
options?: MemoryOSOptions;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* MemoryOS - The main orchestrator for AI agent memory.
|
|
17
|
+
*
|
|
18
|
+
* Implements the "Two-Brain" architecture:
|
|
19
|
+
* - Fast Brain: Synchronous context retrieval before LLM calls
|
|
20
|
+
* - Slow Brain: Asynchronous fact extraction after responses
|
|
21
|
+
*
|
|
22
|
+
* @example
|
|
23
|
+
* ```typescript
|
|
24
|
+
* import { MemoryOS } from 'mem-ts';
|
|
25
|
+
* import { JSONFileAdapter } from 'mem-ts/adapters';
|
|
26
|
+
*
|
|
27
|
+
* const memory = new MemoryOS({
|
|
28
|
+
* llm: { provider: 'openai', apiKey: 'sk-...', model: 'gpt-4o-mini' },
|
|
29
|
+
* adapter: new JSONFileAdapter({ path: './.mem-ts' })
|
|
30
|
+
* });
|
|
31
|
+
*
|
|
32
|
+
* // Before LLM call - get context
|
|
33
|
+
* const context = await memory.hydrate(userId, userMessage);
|
|
34
|
+
*
|
|
35
|
+
* // After LLM response - extract facts (non-blocking)
|
|
36
|
+
* memory.digest(userId, userMessage, assistantResponse);
|
|
37
|
+
* ```
|
|
38
|
+
*/
|
|
39
|
+
declare class MemoryOS {
|
|
40
|
+
private adapter;
|
|
41
|
+
private provider;
|
|
42
|
+
private extractor;
|
|
43
|
+
private hydrator;
|
|
44
|
+
private options;
|
|
45
|
+
private initialized;
|
|
46
|
+
private activeSessions;
|
|
47
|
+
constructor(config: MemoryOSConfig);
|
|
48
|
+
/**
|
|
49
|
+
* Initialize the memory system (connects to storage, etc.)
|
|
50
|
+
*/
|
|
51
|
+
initialize(): Promise<void>;
|
|
52
|
+
/**
|
|
53
|
+
* Ensure the system is initialized
|
|
54
|
+
*/
|
|
55
|
+
private ensureInitialized;
|
|
56
|
+
/**
|
|
57
|
+
* Hydrate context for injection into an LLM prompt.
|
|
58
|
+
*
|
|
59
|
+
* This is the "Fast Brain" - runs synchronously before each LLM call
|
|
60
|
+
* to provide relevant context from the user's memory.
|
|
61
|
+
*
|
|
62
|
+
* @param userId - Unique identifier for the user
|
|
63
|
+
* @param message - The user's current message (used for relevance ranking)
|
|
64
|
+
* @param options - Optional filtering and limiting options
|
|
65
|
+
* @returns Compiled context ready for injection
|
|
66
|
+
*
|
|
67
|
+
* @example
|
|
68
|
+
* ```typescript
|
|
69
|
+
* const context = await memory.hydrate(userId, userMessage);
|
|
70
|
+
*
|
|
71
|
+
* const response = await openai.chat.completions.create({
|
|
72
|
+
* messages: [
|
|
73
|
+
* { role: 'system', content: `Context: ${context.compiledPrompt}` },
|
|
74
|
+
* { role: 'user', content: userMessage }
|
|
75
|
+
* ]
|
|
76
|
+
* });
|
|
77
|
+
* ```
|
|
78
|
+
*/
|
|
79
|
+
hydrate(userId: string, message: string, options?: HydrateOptions): Promise<HydratedContext>;
|
|
80
|
+
/**
|
|
81
|
+
* Digest a conversation exchange to extract facts.
|
|
82
|
+
*
|
|
83
|
+
* This is the "Slow Brain" - runs asynchronously in the background
|
|
84
|
+
* after a response is sent to the user. Does not block.
|
|
85
|
+
*
|
|
86
|
+
* @param userId - Unique identifier for the user
|
|
87
|
+
* @param userMessage - What the user said
|
|
88
|
+
* @param assistantResponse - What the assistant replied
|
|
89
|
+
*
|
|
90
|
+
* @example
|
|
91
|
+
* ```typescript
|
|
92
|
+
* // Fire and forget - doesn't block
|
|
93
|
+
* memory.digest(userId, userMessage, response.content);
|
|
94
|
+
*
|
|
95
|
+
* // Return response to user immediately
|
|
96
|
+
* res.json({ message: response.content });
|
|
97
|
+
* ```
|
|
98
|
+
*/
|
|
99
|
+
digest(userId: string, userMessage: string, assistantResponse: string): void;
|
|
100
|
+
/**
|
|
101
|
+
* Extract facts immediately (synchronous version of digest).
|
|
102
|
+
* Useful for testing or when you need the extraction result.
|
|
103
|
+
*/
|
|
104
|
+
digestSync(userId: string, userMessage: string, assistantResponse: string): Promise<ExtractionResult>;
|
|
105
|
+
/**
|
|
106
|
+
* Get all facts for a user
|
|
107
|
+
*/
|
|
108
|
+
getFacts(userId: string, filter?: FactFilter): Promise<MemoryFact[]>;
|
|
109
|
+
/**
|
|
110
|
+
* Add a fact directly (bypasses extraction)
|
|
111
|
+
*/
|
|
112
|
+
addFact(userId: string, subject: string, predicate: string, object: string, confidence?: number, importance?: number): Promise<MemoryFact>;
|
|
113
|
+
/**
|
|
114
|
+
* Delete a fact
|
|
115
|
+
*/
|
|
116
|
+
deleteFact(userId: string, factId: string, reason?: string): Promise<void>;
|
|
117
|
+
/**
|
|
118
|
+
* Clear all facts for a user (use with caution!)
|
|
119
|
+
*/
|
|
120
|
+
clearFacts(userId: string): Promise<void>;
|
|
121
|
+
/**
|
|
122
|
+
* Start a new session for a user
|
|
123
|
+
*/
|
|
124
|
+
startSession(userId: string): Promise<Session>;
|
|
125
|
+
/**
|
|
126
|
+
* End the current session for a user
|
|
127
|
+
*/
|
|
128
|
+
endSession(userId: string, summary?: string): Promise<Session | null>;
|
|
129
|
+
/**
|
|
130
|
+
* Get or create a session ID for a user
|
|
131
|
+
*/
|
|
132
|
+
private getOrCreateSession;
|
|
133
|
+
/**
|
|
134
|
+
* Ensure a session exists (async version)
|
|
135
|
+
*/
|
|
136
|
+
private ensureSession;
|
|
137
|
+
/**
|
|
138
|
+
* Get conversation history for a user
|
|
139
|
+
*/
|
|
140
|
+
getHistory(userId: string, limit?: number, sessionId?: string): Promise<ConversationExchange[]>;
|
|
141
|
+
/**
|
|
142
|
+
* Export all data for a user (for portability)
|
|
143
|
+
*/
|
|
144
|
+
exportUser(userId: string): Promise<{
|
|
145
|
+
facts: MemoryFact[];
|
|
146
|
+
conversations: ConversationExchange[];
|
|
147
|
+
sessions: Session[];
|
|
148
|
+
}>;
|
|
149
|
+
/**
|
|
150
|
+
* Wait for all pending extractions to complete
|
|
151
|
+
*/
|
|
152
|
+
drain(): Promise<void>;
|
|
153
|
+
/**
|
|
154
|
+
* Get the number of pending extraction tasks
|
|
155
|
+
*/
|
|
156
|
+
getPendingExtractions(): number;
|
|
157
|
+
/**
|
|
158
|
+
* Close the memory system (disconnects from storage)
|
|
159
|
+
*/
|
|
160
|
+
close(): Promise<void>;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* Express/Connect-style request object
|
|
165
|
+
*/
|
|
166
|
+
interface MiddlewareRequest {
|
|
167
|
+
body?: {
|
|
168
|
+
userId?: string;
|
|
169
|
+
message?: string;
|
|
170
|
+
[key: string]: unknown;
|
|
171
|
+
};
|
|
172
|
+
params?: Record<string, string>;
|
|
173
|
+
query?: Record<string, string>;
|
|
174
|
+
headers?: Record<string, string | string[] | undefined>;
|
|
175
|
+
user?: {
|
|
176
|
+
id?: string;
|
|
177
|
+
userId?: string;
|
|
178
|
+
[key: string]: unknown;
|
|
179
|
+
};
|
|
180
|
+
memoryContext?: HydratedContext;
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Express/Connect-style response object
|
|
184
|
+
*/
|
|
185
|
+
interface MiddlewareResponse {
|
|
186
|
+
locals?: Record<string, unknown>;
|
|
187
|
+
json?: (body: unknown) => void;
|
|
188
|
+
on?: (event: string, callback: () => void) => void;
|
|
189
|
+
}
|
|
190
|
+
/**
|
|
191
|
+
* Next function to call the next middleware
|
|
192
|
+
*/
|
|
193
|
+
type NextFunction = (error?: unknown) => void;
|
|
194
|
+
/**
|
|
195
|
+
* Options for the memory middleware
|
|
196
|
+
*/
|
|
197
|
+
interface MemoryMiddlewareOptions {
|
|
198
|
+
/** Function to extract userId from request */
|
|
199
|
+
getUserId?: (req: MiddlewareRequest) => string | undefined;
|
|
200
|
+
/** Function to extract user message from request */
|
|
201
|
+
getMessage?: (req: MiddlewareRequest) => string | undefined;
|
|
202
|
+
/** Attach context to request object */
|
|
203
|
+
attachToRequest?: boolean;
|
|
204
|
+
/** Auto-digest on response finish (requires response body capture) */
|
|
205
|
+
autoDigest?: boolean;
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Result attached to request/response
|
|
209
|
+
*/
|
|
210
|
+
declare global {
|
|
211
|
+
namespace Express {
|
|
212
|
+
interface Request {
|
|
213
|
+
memoryContext?: HydratedContext;
|
|
214
|
+
}
|
|
215
|
+
interface Locals {
|
|
216
|
+
memoryContext?: HydratedContext;
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
/**
|
|
221
|
+
* Create Express middleware for automatic context hydration.
|
|
222
|
+
*
|
|
223
|
+
* @example
|
|
224
|
+
* ```typescript
|
|
225
|
+
* import express from 'express';
|
|
226
|
+
* import { MemoryOS } from 'mem-ts';
|
|
227
|
+
* import { createMemoryMiddleware } from 'mem-ts/middleware';
|
|
228
|
+
*
|
|
229
|
+
* const app = express();
|
|
230
|
+
* const memory = new MemoryOS({ ... });
|
|
231
|
+
*
|
|
232
|
+
* app.use('/chat', createMemoryMiddleware(memory, {
|
|
233
|
+
* getUserId: (req) => req.user?.id,
|
|
234
|
+
* getMessage: (req) => req.body?.message,
|
|
235
|
+
* }));
|
|
236
|
+
*
|
|
237
|
+
* app.post('/chat', (req, res) => {
|
|
238
|
+
* const context = req.memoryContext;
|
|
239
|
+
* // Use context.compiledPrompt in your LLM call
|
|
240
|
+
* });
|
|
241
|
+
* ```
|
|
242
|
+
*/
|
|
243
|
+
declare function createMemoryMiddleware(memory: MemoryOS, options?: MemoryMiddlewareOptions): (req: MiddlewareRequest, res: MiddlewareResponse, next: NextFunction) => Promise<void>;
|
|
244
|
+
/**
|
|
245
|
+
* Helper function to digest after response in Express.
|
|
246
|
+
* Call this after sending the response.
|
|
247
|
+
*
|
|
248
|
+
* @example
|
|
249
|
+
* ```typescript
|
|
250
|
+
* app.post('/chat', async (req, res) => {
|
|
251
|
+
* const response = await callLLM(req.memoryContext, req.body.message);
|
|
252
|
+
* res.json({ message: response });
|
|
253
|
+
*
|
|
254
|
+
* // Digest in background
|
|
255
|
+
* digestAfterResponse(memory, req.user.id, req.body.message, response);
|
|
256
|
+
* });
|
|
257
|
+
* ```
|
|
258
|
+
*/
|
|
259
|
+
declare function digestAfterResponse(memory: MemoryOS, userId: string, userMessage: string, assistantResponse: string): void;
|
|
260
|
+
/**
|
|
261
|
+
* Create a Next.js API route handler wrapper.
|
|
262
|
+
*
|
|
263
|
+
* @example
|
|
264
|
+
* ```typescript
|
|
265
|
+
* // pages/api/chat.ts or app/api/chat/route.ts
|
|
266
|
+
* import { withMemory } from 'mem-ts/middleware';
|
|
267
|
+
*
|
|
268
|
+
* export const POST = withMemory(memory, async (req, context) => {
|
|
269
|
+
* const { message } = await req.json();
|
|
270
|
+
* const response = await callLLM(context.compiledPrompt, message);
|
|
271
|
+
* return Response.json({ message: response });
|
|
272
|
+
* }, {
|
|
273
|
+
* getUserId: (req) => req.headers.get('x-user-id'),
|
|
274
|
+
* });
|
|
275
|
+
* ```
|
|
276
|
+
*/
|
|
277
|
+
declare function withMemory<T extends {
|
|
278
|
+
json: () => Promise<{
|
|
279
|
+
message?: string;
|
|
280
|
+
userId?: string;
|
|
281
|
+
}>;
|
|
282
|
+
}>(memory: MemoryOS, handler: (req: T, context: HydratedContext) => Promise<Response>, options?: {
|
|
283
|
+
getUserId?: (req: T) => string | null | undefined;
|
|
284
|
+
getMessage?: (body: {
|
|
285
|
+
message?: string;
|
|
286
|
+
}) => string | undefined;
|
|
287
|
+
}): (req: T) => Promise<Response>;
|
|
288
|
+
|
|
289
|
+
export { MemoryOS as M, type NextFunction as N, type MemoryOSConfig as a, type MemoryMiddlewareOptions as b, createMemoryMiddleware as c, digestAfterResponse as d, type MiddlewareRequest as e, type MiddlewareResponse as f, withMemory as w };
|