mem-llm 1.0.3__tar.gz → 1.0.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- mem_llm-1.0.5/PKG-INFO +304 -0
- mem_llm-1.0.5/README.md +263 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/__init__.py +1 -1
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/mem_agent.py +38 -26
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/memory_db.py +33 -7
- mem_llm-1.0.5/mem_llm.egg-info/PKG-INFO +304 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/setup.py +1 -1
- mem_llm-1.0.3/PKG-INFO +0 -382
- mem_llm-1.0.3/README.md +0 -341
- mem_llm-1.0.3/mem_llm.egg-info/PKG-INFO +0 -382
- {mem_llm-1.0.3 → mem_llm-1.0.5}/CHANGELOG.md +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/INTEGRATION_GUIDE.md +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/MANIFEST.in +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/QUICKSTART.md +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/QUICKSTART_TR.md +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/STRUCTURE.md +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/docs/CONFIG_GUIDE.md +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/docs/INDEX.md +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/docs/README.md +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/config.yaml.example +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/config_from_docs.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/config_manager.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/knowledge_loader.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/llm_client.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/memory_manager.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/memory_tools.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm/prompt_templates.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm.egg-info/SOURCES.txt +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm.egg-info/dependency_links.txt +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm.egg-info/requires.txt +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/mem_llm.egg-info/top_level.txt +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/requirements.txt +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/setup.cfg +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/tests/test_integration.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/tests/test_llm_client.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/tests/test_mem_agent.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/tests/test_memory_manager.py +0 -0
- {mem_llm-1.0.3 → mem_llm-1.0.5}/tests/test_memory_tools.py +0 -0
mem_llm-1.0.5/PKG-INFO
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mem-llm
|
|
3
|
+
Version: 1.0.5
|
|
4
|
+
Summary: Memory-enabled AI assistant with local LLM support
|
|
5
|
+
Home-page: https://github.com/emredeveloper/Mem-LLM
|
|
6
|
+
Author: C. Emre Karataş
|
|
7
|
+
Author-email: karatasqemre@gmail.com
|
|
8
|
+
Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
|
|
9
|
+
Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
|
|
10
|
+
Keywords: llm ai memory agent chatbot ollama local
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Requires-Python: >=3.8
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: requests>=2.31.0
|
|
24
|
+
Requires-Dist: pyyaml>=6.0.1
|
|
25
|
+
Provides-Extra: dev
|
|
26
|
+
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
27
|
+
Requires-Dist: black>=23.7.0; extra == "dev"
|
|
28
|
+
Requires-Dist: flake8>=6.1.0; extra == "dev"
|
|
29
|
+
Dynamic: author
|
|
30
|
+
Dynamic: author-email
|
|
31
|
+
Dynamic: classifier
|
|
32
|
+
Dynamic: description
|
|
33
|
+
Dynamic: description-content-type
|
|
34
|
+
Dynamic: home-page
|
|
35
|
+
Dynamic: keywords
|
|
36
|
+
Dynamic: project-url
|
|
37
|
+
Dynamic: provides-extra
|
|
38
|
+
Dynamic: requires-dist
|
|
39
|
+
Dynamic: requires-python
|
|
40
|
+
Dynamic: summary
|
|
41
|
+
|
|
42
|
+
# 🧠 mem-llm
|
|
43
|
+
|
|
44
|
+
**Memory-enabled AI assistant that remembers conversations using local LLMs**
|
|
45
|
+
|
|
46
|
+
[](https://www.python.org/downloads/)
|
|
47
|
+
[](https://pypi.org/project/mem-llm/)
|
|
48
|
+
[](LICENSE)
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## 🎯 What is it?
|
|
53
|
+
|
|
54
|
+
A lightweight Python library that adds **persistent memory** to local LLM chatbots. Each user gets their own conversation history that the AI remembers across sessions.
|
|
55
|
+
|
|
56
|
+
**Perfect for:**
|
|
57
|
+
- 💬 Customer service chatbots
|
|
58
|
+
- 🤖 Personal AI assistants
|
|
59
|
+
- 📝 Context-aware applications
|
|
60
|
+
- 🏢 Business automation
|
|
61
|
+
|
|
62
|
+
---
|
|
63
|
+
|
|
64
|
+
## ⚡ Quick Start
|
|
65
|
+
|
|
66
|
+
### 1. Install
|
|
67
|
+
|
|
68
|
+
```bash
|
|
69
|
+
pip install mem-llm
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### 2. Setup Ollama (one-time)
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
# Install: https://ollama.ai/download
|
|
76
|
+
ollama serve
|
|
77
|
+
|
|
78
|
+
# Download model (only 2.5GB)
|
|
79
|
+
ollama pull granite4:tiny-h
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### 3. Use
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
from mem_llm import MemAgent
|
|
86
|
+
|
|
87
|
+
# Create agent (one line!)
|
|
88
|
+
agent = MemAgent()
|
|
89
|
+
|
|
90
|
+
# Set user
|
|
91
|
+
agent.set_user("john")
|
|
92
|
+
|
|
93
|
+
# Chat - it remembers!
|
|
94
|
+
agent.chat("My name is John")
|
|
95
|
+
agent.chat("What's my name?") # → "Your name is John"
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
---
|
|
99
|
+
|
|
100
|
+
## 💡 Features
|
|
101
|
+
|
|
102
|
+
| Feature | Description |
|
|
103
|
+
|---------|-------------|
|
|
104
|
+
| 🧠 **Memory** | Remembers each user's conversation history |
|
|
105
|
+
| 👥 **Multi-user** | Separate memory for each user |
|
|
106
|
+
| 🔒 **Privacy** | 100% local, no cloud/API needed |
|
|
107
|
+
| ⚡ **Fast** | Lightweight SQLite/JSON storage |
|
|
108
|
+
| 🎯 **Simple** | 3 lines of code to get started |
|
|
109
|
+
|
|
110
|
+
---
|
|
111
|
+
|
|
112
|
+
## 📖 Usage Examples
|
|
113
|
+
|
|
114
|
+
### Basic Chat
|
|
115
|
+
|
|
116
|
+
```python
|
|
117
|
+
from mem_llm import MemAgent
|
|
118
|
+
|
|
119
|
+
agent = MemAgent()
|
|
120
|
+
agent.set_user("alice")
|
|
121
|
+
|
|
122
|
+
# First conversation
|
|
123
|
+
agent.chat("I love pizza")
|
|
124
|
+
|
|
125
|
+
# Later...
|
|
126
|
+
agent.chat("What's my favorite food?")
|
|
127
|
+
# → "Your favorite food is pizza"
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### Customer Service Bot
|
|
131
|
+
|
|
132
|
+
```python
|
|
133
|
+
agent = MemAgent()
|
|
134
|
+
|
|
135
|
+
# Customer 1
|
|
136
|
+
agent.set_user("customer_001")
|
|
137
|
+
agent.chat("My order #12345 is delayed")
|
|
138
|
+
|
|
139
|
+
# Customer 2 (different memory!)
|
|
140
|
+
agent.set_user("customer_002")
|
|
141
|
+
agent.chat("I want to return item #67890")
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
### Check User Profile
|
|
145
|
+
|
|
146
|
+
```python
|
|
147
|
+
# Get automatically extracted user info
|
|
148
|
+
profile = agent.get_user_profile()
|
|
149
|
+
# {'name': 'Alice', 'favorite_food': 'pizza', 'location': 'NYC'}
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
---
|
|
153
|
+
|
|
154
|
+
## 🔧 Configuration
|
|
155
|
+
|
|
156
|
+
### JSON Memory (default - simple)
|
|
157
|
+
|
|
158
|
+
```python
|
|
159
|
+
agent = MemAgent(
|
|
160
|
+
model="granite4:tiny-h",
|
|
161
|
+
use_sql=False, # Use JSON files
|
|
162
|
+
memory_dir="memories"
|
|
163
|
+
)
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
### SQL Memory (advanced - faster)
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
agent = MemAgent(
|
|
170
|
+
model="granite4:tiny-h",
|
|
171
|
+
use_sql=True, # Use SQLite
|
|
172
|
+
memory_dir="memories.db"
|
|
173
|
+
)
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
### Custom Settings
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
agent = MemAgent(
|
|
180
|
+
model="llama2", # Any Ollama model
|
|
181
|
+
ollama_url="http://localhost:11434"
|
|
182
|
+
)
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
---
|
|
186
|
+
|
|
187
|
+
## 📚 API Reference
|
|
188
|
+
|
|
189
|
+
### MemAgent
|
|
190
|
+
|
|
191
|
+
```python
|
|
192
|
+
# Initialize
|
|
193
|
+
agent = MemAgent(model="granite4:tiny-h", use_sql=False)
|
|
194
|
+
|
|
195
|
+
# Set active user
|
|
196
|
+
agent.set_user(user_id: str, name: Optional[str] = None)
|
|
197
|
+
|
|
198
|
+
# Chat
|
|
199
|
+
response = agent.chat(message: str, metadata: Optional[Dict] = None) -> str
|
|
200
|
+
|
|
201
|
+
# Get profile
|
|
202
|
+
profile = agent.get_user_profile(user_id: Optional[str] = None) -> Dict
|
|
203
|
+
|
|
204
|
+
# System check
|
|
205
|
+
status = agent.check_setup() -> Dict
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
---
|
|
209
|
+
|
|
210
|
+
## 🎨 Advanced: PDF/DOCX Config
|
|
211
|
+
|
|
212
|
+
Generate config from business documents:
|
|
213
|
+
|
|
214
|
+
```python
|
|
215
|
+
from mem_llm import create_config_from_document
|
|
216
|
+
|
|
217
|
+
# Create config.yaml from PDF
|
|
218
|
+
create_config_from_document(
|
|
219
|
+
doc_path="company_info.pdf",
|
|
220
|
+
output_path="config.yaml",
|
|
221
|
+
company_name="Acme Corp"
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Use config
|
|
225
|
+
agent = MemAgent(config_file="config.yaml")
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
---
|
|
229
|
+
|
|
230
|
+
## 🔥 Models
|
|
231
|
+
|
|
232
|
+
Works with any [Ollama](https://ollama.ai/) model:
|
|
233
|
+
|
|
234
|
+
| Model | Size | Speed | Quality |
|
|
235
|
+
|-------|------|-------|---------|
|
|
236
|
+
| `granite4:tiny-h` | 2.5GB | ⚡⚡⚡ | ⭐⭐ |
|
|
237
|
+
| `llama2` | 4GB | ⚡⚡ | ⭐⭐⭐ |
|
|
238
|
+
| `mistral` | 4GB | ⚡⚡ | ⭐⭐⭐⭐ |
|
|
239
|
+
| `llama3` | 5GB | ⚡ | ⭐⭐⭐⭐⭐ |
|
|
240
|
+
|
|
241
|
+
```bash
|
|
242
|
+
ollama pull <model-name>
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
---
|
|
246
|
+
|
|
247
|
+
## 📦 Requirements
|
|
248
|
+
|
|
249
|
+
- Python 3.8+
|
|
250
|
+
- Ollama (for LLM)
|
|
251
|
+
- 4GB RAM minimum
|
|
252
|
+
- 5GB disk space
|
|
253
|
+
|
|
254
|
+
**Dependencies** (auto-installed):
|
|
255
|
+
- `requests >= 2.31.0`
|
|
256
|
+
- `pyyaml >= 6.0.1`
|
|
257
|
+
|
|
258
|
+
---
|
|
259
|
+
|
|
260
|
+
## 🐛 Troubleshooting
|
|
261
|
+
|
|
262
|
+
### Ollama not running?
|
|
263
|
+
|
|
264
|
+
```bash
|
|
265
|
+
ollama serve
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
### Model not found?
|
|
269
|
+
|
|
270
|
+
```bash
|
|
271
|
+
ollama pull granite4:tiny-h
|
|
272
|
+
```
|
|
273
|
+
|
|
274
|
+
### Import error?
|
|
275
|
+
|
|
276
|
+
```bash
|
|
277
|
+
pip install mem-llm --upgrade
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
---
|
|
281
|
+
|
|
282
|
+
## 📄 License
|
|
283
|
+
|
|
284
|
+
MIT License - feel free to use in personal and commercial projects!
|
|
285
|
+
|
|
286
|
+
---
|
|
287
|
+
|
|
288
|
+
## 🔗 Links
|
|
289
|
+
|
|
290
|
+
- **PyPI:** https://pypi.org/project/mem-llm/
|
|
291
|
+
- **GitHub:** https://github.com/emredeveloper/Mem-LLM
|
|
292
|
+
- **Ollama:** https://ollama.ai/
|
|
293
|
+
|
|
294
|
+
---
|
|
295
|
+
|
|
296
|
+
## 🌟 Star us on GitHub!
|
|
297
|
+
|
|
298
|
+
If you find this useful, give us a ⭐ on [GitHub](https://github.com/emredeveloper/Mem-LLM)!
|
|
299
|
+
|
|
300
|
+
---
|
|
301
|
+
|
|
302
|
+
<div align="center">
|
|
303
|
+
Made with ❤️ by <a href="https://github.com/emredeveloper">C. Emre Karataş</a>
|
|
304
|
+
</div>
|
mem_llm-1.0.5/README.md
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
1
|
+
# 🧠 mem-llm
|
|
2
|
+
|
|
3
|
+
**Memory-enabled AI assistant that remembers conversations using local LLMs**
|
|
4
|
+
|
|
5
|
+
[](https://www.python.org/downloads/)
|
|
6
|
+
[](https://pypi.org/project/mem-llm/)
|
|
7
|
+
[](LICENSE)
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## 🎯 What is it?
|
|
12
|
+
|
|
13
|
+
A lightweight Python library that adds **persistent memory** to local LLM chatbots. Each user gets their own conversation history that the AI remembers across sessions.
|
|
14
|
+
|
|
15
|
+
**Perfect for:**
|
|
16
|
+
- 💬 Customer service chatbots
|
|
17
|
+
- 🤖 Personal AI assistants
|
|
18
|
+
- 📝 Context-aware applications
|
|
19
|
+
- 🏢 Business automation
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
## ⚡ Quick Start
|
|
24
|
+
|
|
25
|
+
### 1. Install
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
pip install mem-llm
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
### 2. Setup Ollama (one-time)
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
# Install: https://ollama.ai/download
|
|
35
|
+
ollama serve
|
|
36
|
+
|
|
37
|
+
# Download model (only 2.5GB)
|
|
38
|
+
ollama pull granite4:tiny-h
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### 3. Use
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
from mem_llm import MemAgent
|
|
45
|
+
|
|
46
|
+
# Create agent (one line!)
|
|
47
|
+
agent = MemAgent()
|
|
48
|
+
|
|
49
|
+
# Set user
|
|
50
|
+
agent.set_user("john")
|
|
51
|
+
|
|
52
|
+
# Chat - it remembers!
|
|
53
|
+
agent.chat("My name is John")
|
|
54
|
+
agent.chat("What's my name?") # → "Your name is John"
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
---
|
|
58
|
+
|
|
59
|
+
## 💡 Features
|
|
60
|
+
|
|
61
|
+
| Feature | Description |
|
|
62
|
+
|---------|-------------|
|
|
63
|
+
| 🧠 **Memory** | Remembers each user's conversation history |
|
|
64
|
+
| 👥 **Multi-user** | Separate memory for each user |
|
|
65
|
+
| 🔒 **Privacy** | 100% local, no cloud/API needed |
|
|
66
|
+
| ⚡ **Fast** | Lightweight SQLite/JSON storage |
|
|
67
|
+
| 🎯 **Simple** | 3 lines of code to get started |
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
## 📖 Usage Examples
|
|
72
|
+
|
|
73
|
+
### Basic Chat
|
|
74
|
+
|
|
75
|
+
```python
|
|
76
|
+
from mem_llm import MemAgent
|
|
77
|
+
|
|
78
|
+
agent = MemAgent()
|
|
79
|
+
agent.set_user("alice")
|
|
80
|
+
|
|
81
|
+
# First conversation
|
|
82
|
+
agent.chat("I love pizza")
|
|
83
|
+
|
|
84
|
+
# Later...
|
|
85
|
+
agent.chat("What's my favorite food?")
|
|
86
|
+
# → "Your favorite food is pizza"
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### Customer Service Bot
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
agent = MemAgent()
|
|
93
|
+
|
|
94
|
+
# Customer 1
|
|
95
|
+
agent.set_user("customer_001")
|
|
96
|
+
agent.chat("My order #12345 is delayed")
|
|
97
|
+
|
|
98
|
+
# Customer 2 (different memory!)
|
|
99
|
+
agent.set_user("customer_002")
|
|
100
|
+
agent.chat("I want to return item #67890")
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Check User Profile
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
# Get automatically extracted user info
|
|
107
|
+
profile = agent.get_user_profile()
|
|
108
|
+
# {'name': 'Alice', 'favorite_food': 'pizza', 'location': 'NYC'}
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
---
|
|
112
|
+
|
|
113
|
+
## 🔧 Configuration
|
|
114
|
+
|
|
115
|
+
### JSON Memory (default - simple)
|
|
116
|
+
|
|
117
|
+
```python
|
|
118
|
+
agent = MemAgent(
|
|
119
|
+
model="granite4:tiny-h",
|
|
120
|
+
use_sql=False, # Use JSON files
|
|
121
|
+
memory_dir="memories"
|
|
122
|
+
)
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
### SQL Memory (advanced - faster)
|
|
126
|
+
|
|
127
|
+
```python
|
|
128
|
+
agent = MemAgent(
|
|
129
|
+
model="granite4:tiny-h",
|
|
130
|
+
use_sql=True, # Use SQLite
|
|
131
|
+
memory_dir="memories.db"
|
|
132
|
+
)
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
### Custom Settings
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
agent = MemAgent(
|
|
139
|
+
model="llama2", # Any Ollama model
|
|
140
|
+
ollama_url="http://localhost:11434"
|
|
141
|
+
)
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
---
|
|
145
|
+
|
|
146
|
+
## 📚 API Reference
|
|
147
|
+
|
|
148
|
+
### MemAgent
|
|
149
|
+
|
|
150
|
+
```python
|
|
151
|
+
# Initialize
|
|
152
|
+
agent = MemAgent(model="granite4:tiny-h", use_sql=False)
|
|
153
|
+
|
|
154
|
+
# Set active user
|
|
155
|
+
agent.set_user(user_id: str, name: Optional[str] = None)
|
|
156
|
+
|
|
157
|
+
# Chat
|
|
158
|
+
response = agent.chat(message: str, metadata: Optional[Dict] = None) -> str
|
|
159
|
+
|
|
160
|
+
# Get profile
|
|
161
|
+
profile = agent.get_user_profile(user_id: Optional[str] = None) -> Dict
|
|
162
|
+
|
|
163
|
+
# System check
|
|
164
|
+
status = agent.check_setup() -> Dict
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
---
|
|
168
|
+
|
|
169
|
+
## 🎨 Advanced: PDF/DOCX Config
|
|
170
|
+
|
|
171
|
+
Generate config from business documents:
|
|
172
|
+
|
|
173
|
+
```python
|
|
174
|
+
from mem_llm import create_config_from_document
|
|
175
|
+
|
|
176
|
+
# Create config.yaml from PDF
|
|
177
|
+
create_config_from_document(
|
|
178
|
+
doc_path="company_info.pdf",
|
|
179
|
+
output_path="config.yaml",
|
|
180
|
+
company_name="Acme Corp"
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Use config
|
|
184
|
+
agent = MemAgent(config_file="config.yaml")
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
---
|
|
188
|
+
|
|
189
|
+
## 🔥 Models
|
|
190
|
+
|
|
191
|
+
Works with any [Ollama](https://ollama.ai/) model:
|
|
192
|
+
|
|
193
|
+
| Model | Size | Speed | Quality |
|
|
194
|
+
|-------|------|-------|---------|
|
|
195
|
+
| `granite4:tiny-h` | 2.5GB | ⚡⚡⚡ | ⭐⭐ |
|
|
196
|
+
| `llama2` | 4GB | ⚡⚡ | ⭐⭐⭐ |
|
|
197
|
+
| `mistral` | 4GB | ⚡⚡ | ⭐⭐⭐⭐ |
|
|
198
|
+
| `llama3` | 5GB | ⚡ | ⭐⭐⭐⭐⭐ |
|
|
199
|
+
|
|
200
|
+
```bash
|
|
201
|
+
ollama pull <model-name>
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
---
|
|
205
|
+
|
|
206
|
+
## 📦 Requirements
|
|
207
|
+
|
|
208
|
+
- Python 3.8+
|
|
209
|
+
- Ollama (for LLM)
|
|
210
|
+
- 4GB RAM minimum
|
|
211
|
+
- 5GB disk space
|
|
212
|
+
|
|
213
|
+
**Dependencies** (auto-installed):
|
|
214
|
+
- `requests >= 2.31.0`
|
|
215
|
+
- `pyyaml >= 6.0.1`
|
|
216
|
+
|
|
217
|
+
---
|
|
218
|
+
|
|
219
|
+
## 🐛 Troubleshooting
|
|
220
|
+
|
|
221
|
+
### Ollama not running?
|
|
222
|
+
|
|
223
|
+
```bash
|
|
224
|
+
ollama serve
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
### Model not found?
|
|
228
|
+
|
|
229
|
+
```bash
|
|
230
|
+
ollama pull granite4:tiny-h
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
### Import error?
|
|
234
|
+
|
|
235
|
+
```bash
|
|
236
|
+
pip install mem-llm --upgrade
|
|
237
|
+
```
|
|
238
|
+
|
|
239
|
+
---
|
|
240
|
+
|
|
241
|
+
## 📄 License
|
|
242
|
+
|
|
243
|
+
MIT License - feel free to use in personal and commercial projects!
|
|
244
|
+
|
|
245
|
+
---
|
|
246
|
+
|
|
247
|
+
## 🔗 Links
|
|
248
|
+
|
|
249
|
+
- **PyPI:** https://pypi.org/project/mem-llm/
|
|
250
|
+
- **GitHub:** https://github.com/emredeveloper/Mem-LLM
|
|
251
|
+
- **Ollama:** https://ollama.ai/
|
|
252
|
+
|
|
253
|
+
---
|
|
254
|
+
|
|
255
|
+
## 🌟 Star us on GitHub!
|
|
256
|
+
|
|
257
|
+
If you find this useful, give us a ⭐ on [GitHub](https://github.com/emredeveloper/Mem-LLM)!
|
|
258
|
+
|
|
259
|
+
---
|
|
260
|
+
|
|
261
|
+
<div align="center">
|
|
262
|
+
Made with ❤️ by <a href="https://github.com/emredeveloper">C. Emre Karataş</a>
|
|
263
|
+
</div>
|
|
@@ -220,22 +220,30 @@ class MemAgent:
|
|
|
220
220
|
except Exception as e:
|
|
221
221
|
self.logger.error(f"Prompt template loading error: {e}")
|
|
222
222
|
# Simple, short and effective default prompt
|
|
223
|
-
self.current_system_prompt = """You are a
|
|
223
|
+
self.current_system_prompt = """You are a helpful AI assistant with access to a knowledge base.
|
|
224
224
|
|
|
225
|
-
RULES (
|
|
226
|
-
1.
|
|
227
|
-
2.
|
|
228
|
-
3.
|
|
229
|
-
4.
|
|
230
|
-
5.
|
|
225
|
+
CRITICAL RULES (FOLLOW EXACTLY):
|
|
226
|
+
1. If KNOWLEDGE BASE information is provided below, USE IT FIRST - it's the correct answer!
|
|
227
|
+
2. Knowledge base answers are marked with "📚 RELEVANT KNOWLEDGE BASE"
|
|
228
|
+
3. Keep responses SHORT (1-3 sentences maximum)
|
|
229
|
+
4. When user shares personal info: Just acknowledge briefly ("Got it!" or "Noted!")
|
|
230
|
+
5. Answer from knowledge base EXACTLY as written, don't make up information
|
|
231
|
+
6. If knowledge base has no info, use conversation history or say "I don't have that information"
|
|
232
|
+
|
|
233
|
+
RESPONSE PRIORITY:
|
|
234
|
+
1st Priority: Knowledge Base (if available) ← USE THIS!
|
|
235
|
+
2nd Priority: Conversation History
|
|
236
|
+
3rd Priority: General knowledge (be brief)
|
|
231
237
|
|
|
232
238
|
EXAMPLES:
|
|
239
|
+
User: "What's the shipping cost?"
|
|
240
|
+
Knowledge Base: "Shipping is free over $150"
|
|
241
|
+
You: "Shipping is free for orders over $150!"
|
|
242
|
+
|
|
233
243
|
User: "My name is Alice" → You: "Nice to meet you, Alice!"
|
|
234
|
-
User: "My favorite food is pizza" → You: "Got it!"
|
|
235
244
|
User: "What's my name?" → You: "Your name is Alice."
|
|
236
|
-
User: "Tell me about Python" → You: "Python is a versatile programming language for web, data science, and AI."
|
|
237
245
|
|
|
238
|
-
|
|
246
|
+
REMEMBER: Knowledge base = truth. Always use it when provided!"""
|
|
239
247
|
|
|
240
248
|
def check_setup(self) -> Dict[str, Any]:
|
|
241
249
|
"""Check system setup"""
|
|
@@ -322,18 +330,24 @@ BE BRIEF OR USER WILL LEAVE!"""
|
|
|
322
330
|
|
|
323
331
|
# Knowledge base search (if using SQL)
|
|
324
332
|
kb_context = ""
|
|
325
|
-
if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager)
|
|
326
|
-
if
|
|
333
|
+
if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager):
|
|
334
|
+
# Check config only if it exists, otherwise always use KB
|
|
335
|
+
use_kb = True
|
|
336
|
+
kb_limit = 5
|
|
337
|
+
|
|
338
|
+
if hasattr(self, 'config') and self.config:
|
|
339
|
+
use_kb = self.config.get("response.use_knowledge_base", True)
|
|
340
|
+
kb_limit = self.config.get("knowledge_base.search_limit", 5)
|
|
341
|
+
|
|
342
|
+
if use_kb:
|
|
327
343
|
try:
|
|
328
|
-
kb_results = self.memory.search_knowledge(
|
|
329
|
-
query=message,
|
|
330
|
-
limit=self.config.get("knowledge_base.search_limit", 5)
|
|
331
|
-
)
|
|
344
|
+
kb_results = self.memory.search_knowledge(query=message, limit=kb_limit)
|
|
332
345
|
|
|
333
346
|
if kb_results:
|
|
334
|
-
kb_context = "\n\
|
|
347
|
+
kb_context = "\n\n📚 RELEVANT KNOWLEDGE BASE:\n"
|
|
335
348
|
for i, result in enumerate(kb_results, 1):
|
|
336
|
-
kb_context += f"{i}.
|
|
349
|
+
kb_context += f"{i}. Q: {result['question']}\n A: {result['answer']}\n"
|
|
350
|
+
kb_context += "\n⚠️ USE THIS INFORMATION TO ANSWER! Be brief but accurate.\n"
|
|
337
351
|
except Exception as e:
|
|
338
352
|
self.logger.error(f"Knowledge base search error: {e}")
|
|
339
353
|
|
|
@@ -355,15 +369,13 @@ BE BRIEF OR USER WILL LEAVE!"""
|
|
|
355
369
|
except Exception as e:
|
|
356
370
|
self.logger.error(f"Memory history loading error: {e}")
|
|
357
371
|
|
|
358
|
-
# Add knowledge base context
|
|
372
|
+
# Add current message WITH knowledge base context (if available)
|
|
373
|
+
final_message = message
|
|
359
374
|
if kb_context:
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
# Add current message
|
|
366
|
-
messages.append({"role": "user", "content": message})
|
|
375
|
+
# Inject KB directly into user message for maximum visibility
|
|
376
|
+
final_message = f"{kb_context}\n\nUser Question: {message}"
|
|
377
|
+
|
|
378
|
+
messages.append({"role": "user", "content": final_message})
|
|
367
379
|
|
|
368
380
|
# Get response from LLM
|
|
369
381
|
try:
|