mdb-engine 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mdb_engine/README.md +144 -0
- mdb_engine/__init__.py +37 -0
- mdb_engine/auth/README.md +631 -0
- mdb_engine/auth/__init__.py +128 -0
- mdb_engine/auth/casbin_factory.py +199 -0
- mdb_engine/auth/casbin_models.py +46 -0
- mdb_engine/auth/config_defaults.py +71 -0
- mdb_engine/auth/config_helpers.py +213 -0
- mdb_engine/auth/cookie_utils.py +158 -0
- mdb_engine/auth/decorators.py +350 -0
- mdb_engine/auth/dependencies.py +747 -0
- mdb_engine/auth/helpers.py +64 -0
- mdb_engine/auth/integration.py +578 -0
- mdb_engine/auth/jwt.py +225 -0
- mdb_engine/auth/middleware.py +241 -0
- mdb_engine/auth/oso_factory.py +323 -0
- mdb_engine/auth/provider.py +570 -0
- mdb_engine/auth/restrictions.py +271 -0
- mdb_engine/auth/session_manager.py +477 -0
- mdb_engine/auth/token_lifecycle.py +213 -0
- mdb_engine/auth/token_store.py +289 -0
- mdb_engine/auth/users.py +1516 -0
- mdb_engine/auth/utils.py +614 -0
- mdb_engine/cli/__init__.py +13 -0
- mdb_engine/cli/commands/__init__.py +7 -0
- mdb_engine/cli/commands/generate.py +105 -0
- mdb_engine/cli/commands/migrate.py +83 -0
- mdb_engine/cli/commands/show.py +70 -0
- mdb_engine/cli/commands/validate.py +63 -0
- mdb_engine/cli/main.py +41 -0
- mdb_engine/cli/utils.py +92 -0
- mdb_engine/config.py +217 -0
- mdb_engine/constants.py +160 -0
- mdb_engine/core/README.md +542 -0
- mdb_engine/core/__init__.py +42 -0
- mdb_engine/core/app_registration.py +392 -0
- mdb_engine/core/connection.py +243 -0
- mdb_engine/core/engine.py +749 -0
- mdb_engine/core/index_management.py +162 -0
- mdb_engine/core/manifest.py +2793 -0
- mdb_engine/core/seeding.py +179 -0
- mdb_engine/core/service_initialization.py +355 -0
- mdb_engine/core/types.py +413 -0
- mdb_engine/database/README.md +522 -0
- mdb_engine/database/__init__.py +31 -0
- mdb_engine/database/abstraction.py +635 -0
- mdb_engine/database/connection.py +387 -0
- mdb_engine/database/scoped_wrapper.py +1721 -0
- mdb_engine/embeddings/README.md +184 -0
- mdb_engine/embeddings/__init__.py +62 -0
- mdb_engine/embeddings/dependencies.py +193 -0
- mdb_engine/embeddings/service.py +759 -0
- mdb_engine/exceptions.py +167 -0
- mdb_engine/indexes/README.md +651 -0
- mdb_engine/indexes/__init__.py +21 -0
- mdb_engine/indexes/helpers.py +145 -0
- mdb_engine/indexes/manager.py +895 -0
- mdb_engine/memory/README.md +451 -0
- mdb_engine/memory/__init__.py +30 -0
- mdb_engine/memory/service.py +1285 -0
- mdb_engine/observability/README.md +515 -0
- mdb_engine/observability/__init__.py +42 -0
- mdb_engine/observability/health.py +296 -0
- mdb_engine/observability/logging.py +161 -0
- mdb_engine/observability/metrics.py +297 -0
- mdb_engine/routing/README.md +462 -0
- mdb_engine/routing/__init__.py +73 -0
- mdb_engine/routing/websockets.py +813 -0
- mdb_engine/utils/__init__.py +7 -0
- mdb_engine-0.1.6.dist-info/METADATA +213 -0
- mdb_engine-0.1.6.dist-info/RECORD +75 -0
- mdb_engine-0.1.6.dist-info/WHEEL +5 -0
- mdb_engine-0.1.6.dist-info/entry_points.txt +2 -0
- mdb_engine-0.1.6.dist-info/licenses/LICENSE +661 -0
- mdb_engine-0.1.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
# Memory Service Module
|
|
2
|
+
|
|
3
|
+
Mem0.ai integration for intelligent memory management in MDB_ENGINE applications. Provides semantic memory storage, retrieval, and inference capabilities with MongoDB integration.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Mem0 Integration**: Wrapper around Mem0.ai for intelligent memory management
|
|
8
|
+
- **MongoDB Storage**: Built-in MongoDB vector store integration
|
|
9
|
+
- **Auto-Detection**: Automatically detects OpenAI or Azure OpenAI from environment variables
|
|
10
|
+
- **Semantic Search**: Vector-based semantic memory search
|
|
11
|
+
- **Memory Inference**: Optional LLM-based memory inference and summarization
|
|
12
|
+
- **Graph Memory**: Optional graph-based memory relationships (requires graph store config)
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
The memory module requires mem0ai:
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
pip install mem0ai
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Configuration
|
|
23
|
+
|
|
24
|
+
### Environment Variables
|
|
25
|
+
|
|
26
|
+
The service auto-detects the provider from environment variables:
|
|
27
|
+
|
|
28
|
+
#### OpenAI
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
export OPENAI_API_KEY="sk-..."
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
#### Azure OpenAI
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
export AZURE_OPENAI_API_KEY="..."
|
|
38
|
+
export AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/"
|
|
39
|
+
export AZURE_OPENAI_API_VERSION="2024-02-15-preview" # Optional
|
|
40
|
+
export AZURE_OPENAI_DEPLOYMENT_NAME="gpt-4" # Optional, for LLM
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### Manifest Configuration
|
|
44
|
+
|
|
45
|
+
Enable memory service in your `manifest.json`:
|
|
46
|
+
|
|
47
|
+
```json
|
|
48
|
+
{
|
|
49
|
+
"slug": "my_app",
|
|
50
|
+
"memory_config": {
|
|
51
|
+
"enabled": true,
|
|
52
|
+
"collection_name": "memories",
|
|
53
|
+
"embedding_model": "text-embedding-3-small",
|
|
54
|
+
"embedding_dimensions": 1536,
|
|
55
|
+
"chat_model": "gpt-4",
|
|
56
|
+
"temperature": 0.7,
|
|
57
|
+
"infer": true,
|
|
58
|
+
"enable_graph": false
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Usage
|
|
64
|
+
|
|
65
|
+
### Basic Usage
|
|
66
|
+
|
|
67
|
+
```python
|
|
68
|
+
from mdb_engine.memory import Mem0MemoryService
|
|
69
|
+
from mdb_engine.core import MongoDBEngine
|
|
70
|
+
|
|
71
|
+
# Initialize engine
|
|
72
|
+
engine = MongoDBEngine(mongo_uri="...", db_name="...")
|
|
73
|
+
await engine.initialize()
|
|
74
|
+
|
|
75
|
+
# Get memory service (automatically configured from manifest)
|
|
76
|
+
memory_service = engine.get_memory_service("my_app")
|
|
77
|
+
|
|
78
|
+
# Add memory
|
|
79
|
+
memory = await memory_service.add(
|
|
80
|
+
messages=[{"role": "user", "content": "I love Python programming"}],
|
|
81
|
+
user_id="user123"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Search memories
|
|
85
|
+
results = await memory_service.search(
|
|
86
|
+
query="What does the user like?",
|
|
87
|
+
user_id="user123",
|
|
88
|
+
limit=5
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Get all memories for user
|
|
92
|
+
all_memories = await memory_service.get_all(user_id="user123")
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### Initialize Memory Service
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
from mdb_engine.memory import Mem0MemoryService
|
|
99
|
+
|
|
100
|
+
# Initialize with MongoDB connection
|
|
101
|
+
memory_service = Mem0MemoryService(
|
|
102
|
+
mongo_uri="mongodb://localhost:27017",
|
|
103
|
+
db_name="my_database",
|
|
104
|
+
collection_name="memories",
|
|
105
|
+
app_slug="my_app",
|
|
106
|
+
embedding_model="text-embedding-3-small",
|
|
107
|
+
embedding_dimensions=1536,
|
|
108
|
+
chat_model="gpt-4",
|
|
109
|
+
temperature=0.7,
|
|
110
|
+
infer=True # Enable LLM inference
|
|
111
|
+
)
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
### Add Memory
|
|
115
|
+
|
|
116
|
+
Store memories with automatic embedding generation:
|
|
117
|
+
|
|
118
|
+
```python
|
|
119
|
+
# Add single memory
|
|
120
|
+
memory = await memory_service.add(
|
|
121
|
+
messages=[{"role": "user", "content": "My favorite color is blue"}],
|
|
122
|
+
user_id="user123",
|
|
123
|
+
metadata={"source": "conversation", "timestamp": "2024-01-01"}
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Add multiple memories
|
|
127
|
+
memories = await memory_service.add_all(
|
|
128
|
+
memories=[
|
|
129
|
+
{
|
|
130
|
+
"messages": [{"role": "user", "content": "I work at Acme Corp"}],
|
|
131
|
+
"user_id": "user123"
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
"messages": [{"role": "user", "content": "I live in San Francisco"}],
|
|
135
|
+
"user_id": "user123"
|
|
136
|
+
}
|
|
137
|
+
]
|
|
138
|
+
)
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
### Search Memories
|
|
142
|
+
|
|
143
|
+
Semantic search across stored memories:
|
|
144
|
+
|
|
145
|
+
```python
|
|
146
|
+
# Basic search
|
|
147
|
+
results = await memory_service.search(
|
|
148
|
+
query="Where does the user work?",
|
|
149
|
+
user_id="user123",
|
|
150
|
+
limit=5
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Search with filters
|
|
154
|
+
results = await memory_service.search(
|
|
155
|
+
query="What are the user's preferences?",
|
|
156
|
+
user_id="user123",
|
|
157
|
+
limit=10,
|
|
158
|
+
filters={"source": "conversation"}
|
|
159
|
+
)
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
### Get Memories
|
|
163
|
+
|
|
164
|
+
Retrieve memories for a user:
|
|
165
|
+
|
|
166
|
+
```python
|
|
167
|
+
# Get all memories
|
|
168
|
+
all_memories = await memory_service.get_all(user_id="user123")
|
|
169
|
+
|
|
170
|
+
# Get specific memory
|
|
171
|
+
memory = await memory_service.get(memory_id="memory_123", user_id="user123")
|
|
172
|
+
|
|
173
|
+
# Get memories with filters
|
|
174
|
+
memories = await memory_service.get_all(
|
|
175
|
+
user_id="user123",
|
|
176
|
+
filters={"source": "conversation"}
|
|
177
|
+
)
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
### Update Memory
|
|
181
|
+
|
|
182
|
+
Update existing memories:
|
|
183
|
+
|
|
184
|
+
```python
|
|
185
|
+
# Update memory
|
|
186
|
+
updated = await memory_service.update(
|
|
187
|
+
memory_id="memory_123",
|
|
188
|
+
user_id="user123",
|
|
189
|
+
messages=[{"role": "user", "content": "Updated content"}],
|
|
190
|
+
metadata={"updated": True}
|
|
191
|
+
)
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
### Delete Memory
|
|
195
|
+
|
|
196
|
+
Remove memories:
|
|
197
|
+
|
|
198
|
+
```python
|
|
199
|
+
# Delete single memory
|
|
200
|
+
await memory_service.delete(memory_id="memory_123", user_id="user123")
|
|
201
|
+
|
|
202
|
+
# Delete all memories for user
|
|
203
|
+
await memory_service.delete_all(user_id="user123")
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
### Memory Inference
|
|
207
|
+
|
|
208
|
+
With `infer=True`, the service can generate insights and summaries:
|
|
209
|
+
|
|
210
|
+
```python
|
|
211
|
+
# Get memory insights (requires infer=True)
|
|
212
|
+
insights = await memory_service.get_all(user_id="user123")
|
|
213
|
+
|
|
214
|
+
# Memories include inferred insights and summaries
|
|
215
|
+
for memory in insights:
|
|
216
|
+
print(f"Memory: {memory.get('memory')}")
|
|
217
|
+
print(f"Insights: {memory.get('insights')}")
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
## API Reference
|
|
221
|
+
|
|
222
|
+
### Mem0MemoryService
|
|
223
|
+
|
|
224
|
+
#### Initialization
|
|
225
|
+
|
|
226
|
+
```python
|
|
227
|
+
Mem0MemoryService(
|
|
228
|
+
mongo_uri: str,
|
|
229
|
+
db_name: str,
|
|
230
|
+
collection_name: str = "memories",
|
|
231
|
+
app_slug: str = None,
|
|
232
|
+
embedding_model: str = "text-embedding-3-small",
|
|
233
|
+
embedding_dimensions: int = None,
|
|
234
|
+
chat_model: str = "gpt-4",
|
|
235
|
+
temperature: float = 0.7,
|
|
236
|
+
infer: bool = True,
|
|
237
|
+
enable_graph: bool = False,
|
|
238
|
+
config: dict = None
|
|
239
|
+
)
|
|
240
|
+
```
|
|
241
|
+
|
|
242
|
+
#### Methods
|
|
243
|
+
|
|
244
|
+
- `add(messages, user_id, metadata=None)` - Add single memory
|
|
245
|
+
- `add_all(memories)` - Add multiple memories
|
|
246
|
+
- `search(query, user_id, limit=10, filters=None)` - Search memories
|
|
247
|
+
- `get(memory_id, user_id)` - Get specific memory
|
|
248
|
+
- `get_all(user_id, filters=None)` - Get all memories for user
|
|
249
|
+
- `update(memory_id, user_id, messages=None, metadata=None)` - Update memory
|
|
250
|
+
- `delete(memory_id, user_id)` - Delete memory
|
|
251
|
+
- `delete_all(user_id)` - Delete all memories for user
|
|
252
|
+
|
|
253
|
+
## Configuration Options
|
|
254
|
+
|
|
255
|
+
### Embedding Model
|
|
256
|
+
|
|
257
|
+
Choose embedding model based on your needs:
|
|
258
|
+
|
|
259
|
+
```python
|
|
260
|
+
# Small, fast, cost-effective
|
|
261
|
+
embedding_model="text-embedding-3-small" # 1536 dimensions
|
|
262
|
+
|
|
263
|
+
# Large, more accurate
|
|
264
|
+
embedding_model="text-embedding-3-large" # 3072 dimensions
|
|
265
|
+
|
|
266
|
+
# Legacy (still supported)
|
|
267
|
+
embedding_model="text-embedding-ada-002" # 1536 dimensions
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
### Chat Model
|
|
271
|
+
|
|
272
|
+
For inference (`infer=True`), choose chat model:
|
|
273
|
+
|
|
274
|
+
```python
|
|
275
|
+
# GPT-4 (more capable, more expensive)
|
|
276
|
+
chat_model="gpt-4"
|
|
277
|
+
|
|
278
|
+
# GPT-3.5 Turbo (faster, cheaper)
|
|
279
|
+
chat_model="gpt-3.5-turbo"
|
|
280
|
+
|
|
281
|
+
# GPT-4 Turbo (balanced)
|
|
282
|
+
chat_model="gpt-4-turbo-preview"
|
|
283
|
+
```
|
|
284
|
+
|
|
285
|
+
### Temperature
|
|
286
|
+
|
|
287
|
+
Control randomness in LLM inference:
|
|
288
|
+
|
|
289
|
+
```python
|
|
290
|
+
# Low temperature (more deterministic)
|
|
291
|
+
temperature=0.3
|
|
292
|
+
|
|
293
|
+
# Medium temperature (balanced)
|
|
294
|
+
temperature=0.7
|
|
295
|
+
|
|
296
|
+
# High temperature (more creative)
|
|
297
|
+
temperature=1.0
|
|
298
|
+
```
|
|
299
|
+
|
|
300
|
+
## Integration with MongoDBEngine
|
|
301
|
+
|
|
302
|
+
The memory service integrates seamlessly with MongoDBEngine:
|
|
303
|
+
|
|
304
|
+
```python
|
|
305
|
+
from mdb_engine import MongoDBEngine
|
|
306
|
+
|
|
307
|
+
engine = MongoDBEngine(mongo_uri="...", db_name="...")
|
|
308
|
+
await engine.initialize()
|
|
309
|
+
|
|
310
|
+
# Load manifest with memory_config
|
|
311
|
+
manifest = await engine.load_manifest("manifest.json")
|
|
312
|
+
await engine.register_app(manifest)
|
|
313
|
+
|
|
314
|
+
# Get memory service (automatically configured from manifest)
|
|
315
|
+
memory_service = engine.get_memory_service("my_app")
|
|
316
|
+
```
|
|
317
|
+
|
|
318
|
+
## Use Cases
|
|
319
|
+
|
|
320
|
+
### Conversational Memory
|
|
321
|
+
|
|
322
|
+
Store and retrieve conversation context:
|
|
323
|
+
|
|
324
|
+
```python
|
|
325
|
+
# Store conversation
|
|
326
|
+
await memory_service.add(
|
|
327
|
+
messages=[
|
|
328
|
+
{"role": "user", "content": "I'm planning a trip to Japan"},
|
|
329
|
+
{"role": "assistant", "content": "That sounds exciting! When are you going?"}
|
|
330
|
+
],
|
|
331
|
+
user_id="user123"
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
# Later, retrieve context
|
|
335
|
+
context = await memory_service.search(
|
|
336
|
+
query="What trips is the user planning?",
|
|
337
|
+
user_id="user123"
|
|
338
|
+
)
|
|
339
|
+
```
|
|
340
|
+
|
|
341
|
+
### User Preferences
|
|
342
|
+
|
|
343
|
+
Store user preferences and retrieve them:
|
|
344
|
+
|
|
345
|
+
```python
|
|
346
|
+
# Store preference
|
|
347
|
+
await memory_service.add(
|
|
348
|
+
messages=[{"role": "user", "content": "I prefer dark mode interfaces"}],
|
|
349
|
+
user_id="user123",
|
|
350
|
+
metadata={"type": "preference", "category": "ui"}
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
# Retrieve preferences
|
|
354
|
+
preferences = await memory_service.search(
|
|
355
|
+
query="What are the user's UI preferences?",
|
|
356
|
+
user_id="user123",
|
|
357
|
+
filters={"type": "preference"}
|
|
358
|
+
)
|
|
359
|
+
```
|
|
360
|
+
|
|
361
|
+
### Knowledge Base
|
|
362
|
+
|
|
363
|
+
Build a knowledge base from user interactions:
|
|
364
|
+
|
|
365
|
+
```python
|
|
366
|
+
# Add knowledge
|
|
367
|
+
await memory_service.add(
|
|
368
|
+
messages=[{"role": "user", "content": "The project deadline is next Friday"}],
|
|
369
|
+
user_id="user123",
|
|
370
|
+
metadata={"type": "knowledge", "topic": "project"}
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
# Query knowledge
|
|
374
|
+
knowledge = await memory_service.search(
|
|
375
|
+
query="When is the project deadline?",
|
|
376
|
+
user_id="user123"
|
|
377
|
+
)
|
|
378
|
+
```
|
|
379
|
+
|
|
380
|
+
## Best Practices
|
|
381
|
+
|
|
382
|
+
1. **Use appropriate embedding models** - Choose based on accuracy vs. cost trade-offs
|
|
383
|
+
2. **Enable inference selectively** - Only enable `infer=True` when you need LLM insights
|
|
384
|
+
3. **Add metadata** - Include metadata for better filtering and organization
|
|
385
|
+
4. **Limit search results** - Use `limit` parameter to control result size
|
|
386
|
+
5. **Filter by user** - Always specify `user_id` for user-specific memories
|
|
387
|
+
6. **Monitor costs** - Track API usage for embedding and LLM calls
|
|
388
|
+
7. **Clean up old memories** - Periodically delete outdated memories
|
|
389
|
+
8. **Use semantic queries** - Leverage semantic search for natural language queries
|
|
390
|
+
|
|
391
|
+
## Error Handling
|
|
392
|
+
|
|
393
|
+
```python
|
|
394
|
+
from mdb_engine.memory import Mem0MemoryServiceError
|
|
395
|
+
|
|
396
|
+
try:
|
|
397
|
+
memory = await memory_service.add(
|
|
398
|
+
messages=[{"role": "user", "content": "Test"}],
|
|
399
|
+
user_id="user123"
|
|
400
|
+
)
|
|
401
|
+
except Mem0MemoryServiceError as e:
|
|
402
|
+
print(f"Memory service error: {e}")
|
|
403
|
+
except Exception as e:
|
|
404
|
+
print(f"Unexpected error: {e}")
|
|
405
|
+
```
|
|
406
|
+
|
|
407
|
+
## Environment Variables Reference
|
|
408
|
+
|
|
409
|
+
### OpenAI
|
|
410
|
+
|
|
411
|
+
```bash
|
|
412
|
+
export OPENAI_API_KEY="sk-..."
|
|
413
|
+
```
|
|
414
|
+
|
|
415
|
+
### Azure OpenAI
|
|
416
|
+
|
|
417
|
+
```bash
|
|
418
|
+
export AZURE_OPENAI_API_KEY="..."
|
|
419
|
+
export AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/"
|
|
420
|
+
export AZURE_OPENAI_API_VERSION="2024-02-15-preview"
|
|
421
|
+
export AZURE_OPENAI_DEPLOYMENT_NAME="gpt-4" # For LLM
|
|
422
|
+
```
|
|
423
|
+
|
|
424
|
+
## Graph Memory (Advanced)
|
|
425
|
+
|
|
426
|
+
Enable graph-based memory relationships:
|
|
427
|
+
|
|
428
|
+
```json
|
|
429
|
+
{
|
|
430
|
+
"memory_config": {
|
|
431
|
+
"enabled": true,
|
|
432
|
+
"enable_graph": true,
|
|
433
|
+
"graph_store": {
|
|
434
|
+
"provider": "neo4j",
|
|
435
|
+
"config": {
|
|
436
|
+
"uri": "bolt://localhost:7687",
|
|
437
|
+
"user": "neo4j",
|
|
438
|
+
"password": "password"
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
```
|
|
444
|
+
|
|
445
|
+
**Note**: Graph memory requires additional graph store configuration (Neo4j, Memgraph, etc.).
|
|
446
|
+
|
|
447
|
+
## Related Modules
|
|
448
|
+
|
|
449
|
+
- **`embeddings/`** - Embedding generation service
|
|
450
|
+
- **`database/`** - MongoDB integration
|
|
451
|
+
- **`core/`** - MongoDBEngine integration
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory Service Module (Mem0 Integration)
|
|
3
|
+
-----------------------------------------
|
|
4
|
+
This module provides intelligent memory management using Mem0.ai.
|
|
5
|
+
|
|
6
|
+
Mem0 enables applications to:
|
|
7
|
+
- Store and retrieve user memories automatically
|
|
8
|
+
- Build knowledge graphs from conversations
|
|
9
|
+
- Provide context-aware responses based on user history
|
|
10
|
+
|
|
11
|
+
Key Features:
|
|
12
|
+
- **MongoDB Integration**: Uses MongoDB as the vector store (native integration with mdb-engine)
|
|
13
|
+
- **Standalone Operation**: Works with just MongoDB - no LLM required
|
|
14
|
+
- **Optional LLM Inference**: Can leverage LLM service for automatic memory
|
|
15
|
+
extraction (set infer: false to disable)
|
|
16
|
+
- **Graph Support**: Optional knowledge graph construction for entity relationships
|
|
17
|
+
|
|
18
|
+
Dependencies:
|
|
19
|
+
pip install mem0ai
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
# Import service components (mem0 import is lazy within service.py)
|
|
23
|
+
from .service import (Mem0MemoryService, Mem0MemoryServiceError,
|
|
24
|
+
get_memory_service)
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
"Mem0MemoryService",
|
|
28
|
+
"Mem0MemoryServiceError",
|
|
29
|
+
"get_memory_service",
|
|
30
|
+
]
|