mem-llm 1.0.2__py3-none-any.whl → 1.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- mem_llm/__init__.py +1 -1
- mem_llm/llm_client.py +5 -1
- mem_llm/mem_agent.py +113 -23
- mem_llm-1.0.4.dist-info/METADATA +304 -0
- {mem_llm-1.0.2.dist-info → mem_llm-1.0.4.dist-info}/RECORD +7 -7
- mem_llm-1.0.2.dist-info/METADATA +0 -382
- {mem_llm-1.0.2.dist-info → mem_llm-1.0.4.dist-info}/WHEEL +0 -0
- {mem_llm-1.0.2.dist-info → mem_llm-1.0.4.dist-info}/top_level.txt +0 -0
mem_llm/__init__.py
CHANGED
mem_llm/llm_client.py
CHANGED
|
@@ -107,7 +107,11 @@ class OllamaClient:
|
|
|
107
107
|
"stream": False,
|
|
108
108
|
"options": {
|
|
109
109
|
"temperature": temperature,
|
|
110
|
-
"num_predict": max_tokens
|
|
110
|
+
"num_predict": max_tokens,
|
|
111
|
+
"num_ctx": 2048, # Context window
|
|
112
|
+
"top_k": 40, # Limit vocab
|
|
113
|
+
"top_p": 0.9, # Nucleus sampling
|
|
114
|
+
"stop": ["\n\n\n", "---"] # Stop sequences
|
|
111
115
|
}
|
|
112
116
|
}
|
|
113
117
|
|
mem_llm/mem_agent.py
CHANGED
|
@@ -220,11 +220,30 @@ class MemAgent:
|
|
|
220
220
|
except Exception as e:
|
|
221
221
|
self.logger.error(f"Prompt template loading error: {e}")
|
|
222
222
|
# Simple, short and effective default prompt
|
|
223
|
-
self.current_system_prompt = """You are a helpful AI assistant
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
223
|
+
self.current_system_prompt = """You are a helpful AI assistant with access to a knowledge base.
|
|
224
|
+
|
|
225
|
+
CRITICAL RULES (FOLLOW EXACTLY):
|
|
226
|
+
1. If KNOWLEDGE BASE information is provided below, USE IT FIRST - it's the correct answer!
|
|
227
|
+
2. Knowledge base answers are marked with "📚 RELEVANT KNOWLEDGE BASE"
|
|
228
|
+
3. Keep responses SHORT (1-3 sentences maximum)
|
|
229
|
+
4. When user shares personal info: Just acknowledge briefly ("Got it!" or "Noted!")
|
|
230
|
+
5. Answer from knowledge base EXACTLY as written, don't make up information
|
|
231
|
+
6. If knowledge base has no info, use conversation history or say "I don't have that information"
|
|
232
|
+
|
|
233
|
+
RESPONSE PRIORITY:
|
|
234
|
+
1st Priority: Knowledge Base (if available) ← USE THIS!
|
|
235
|
+
2nd Priority: Conversation History
|
|
236
|
+
3rd Priority: General knowledge (be brief)
|
|
237
|
+
|
|
238
|
+
EXAMPLES:
|
|
239
|
+
User: "What's the shipping cost?"
|
|
240
|
+
Knowledge Base: "Shipping is free over $150"
|
|
241
|
+
You: "Shipping is free for orders over $150!"
|
|
242
|
+
|
|
243
|
+
User: "My name is Alice" → You: "Nice to meet you, Alice!"
|
|
244
|
+
User: "What's my name?" → You: "Your name is Alice."
|
|
245
|
+
|
|
246
|
+
REMEMBER: Knowledge base = truth. Always use it when provided!"""
|
|
228
247
|
|
|
229
248
|
def check_setup(self) -> Dict[str, Any]:
|
|
230
249
|
"""Check system setup"""
|
|
@@ -311,18 +330,24 @@ class MemAgent:
|
|
|
311
330
|
|
|
312
331
|
# Knowledge base search (if using SQL)
|
|
313
332
|
kb_context = ""
|
|
314
|
-
if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager)
|
|
315
|
-
if
|
|
333
|
+
if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager):
|
|
334
|
+
# Check config only if it exists, otherwise always use KB
|
|
335
|
+
use_kb = True
|
|
336
|
+
kb_limit = 5
|
|
337
|
+
|
|
338
|
+
if hasattr(self, 'config') and self.config:
|
|
339
|
+
use_kb = self.config.get("response.use_knowledge_base", True)
|
|
340
|
+
kb_limit = self.config.get("knowledge_base.search_limit", 5)
|
|
341
|
+
|
|
342
|
+
if use_kb:
|
|
316
343
|
try:
|
|
317
|
-
kb_results = self.memory.search_knowledge(
|
|
318
|
-
query=message,
|
|
319
|
-
limit=self.config.get("knowledge_base.search_limit", 5)
|
|
320
|
-
)
|
|
344
|
+
kb_results = self.memory.search_knowledge(query=message, limit=kb_limit)
|
|
321
345
|
|
|
322
346
|
if kb_results:
|
|
323
|
-
kb_context = "\n\
|
|
347
|
+
kb_context = "\n\n📚 RELEVANT KNOWLEDGE BASE:\n"
|
|
324
348
|
for i, result in enumerate(kb_results, 1):
|
|
325
|
-
kb_context += f"{i}.
|
|
349
|
+
kb_context += f"{i}. Q: {result['question']}\n A: {result['answer']}\n"
|
|
350
|
+
kb_context += "\n⚠️ USE THIS INFORMATION TO ANSWER! Be brief but accurate.\n"
|
|
326
351
|
except Exception as e:
|
|
327
352
|
self.logger.error(f"Knowledge base search error: {e}")
|
|
328
353
|
|
|
@@ -344,22 +369,20 @@ class MemAgent:
|
|
|
344
369
|
except Exception as e:
|
|
345
370
|
self.logger.error(f"Memory history loading error: {e}")
|
|
346
371
|
|
|
347
|
-
# Add knowledge base context
|
|
372
|
+
# Add current message WITH knowledge base context (if available)
|
|
373
|
+
final_message = message
|
|
348
374
|
if kb_context:
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
# Add current message
|
|
355
|
-
messages.append({"role": "user", "content": message})
|
|
375
|
+
# Inject KB directly into user message for maximum visibility
|
|
376
|
+
final_message = f"{kb_context}\n\nUser Question: {message}"
|
|
377
|
+
|
|
378
|
+
messages.append({"role": "user", "content": final_message})
|
|
356
379
|
|
|
357
380
|
# Get response from LLM
|
|
358
381
|
try:
|
|
359
382
|
response = self.llm.chat(
|
|
360
383
|
messages=messages,
|
|
361
|
-
temperature=self.config.get("llm.temperature", 0.
|
|
362
|
-
max_tokens=self.config.get("llm.max_tokens",
|
|
384
|
+
temperature=self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2, # Very focused
|
|
385
|
+
max_tokens=self.config.get("llm.max_tokens", 150) if hasattr(self, 'config') and self.config else 150 # Max 2-3 sentences
|
|
363
386
|
)
|
|
364
387
|
except Exception as e:
|
|
365
388
|
self.logger.error(f"LLM response error: {e}")
|
|
@@ -374,11 +397,78 @@ class MemAgent:
|
|
|
374
397
|
bot_response=response,
|
|
375
398
|
metadata=metadata
|
|
376
399
|
)
|
|
400
|
+
|
|
401
|
+
# Extract and save user info to profile
|
|
402
|
+
self._update_user_profile(user_id, message, response)
|
|
377
403
|
except Exception as e:
|
|
378
404
|
self.logger.error(f"Interaction saving error: {e}")
|
|
379
405
|
|
|
380
406
|
return response
|
|
407
|
+
|
|
408
|
+
def _update_user_profile(self, user_id: str, message: str, response: str):
|
|
409
|
+
"""Extract user info from conversation and update profile"""
|
|
410
|
+
if not hasattr(self.memory, 'update_profile'):
|
|
411
|
+
return
|
|
412
|
+
|
|
413
|
+
msg_lower = message.lower()
|
|
414
|
+
updates = {}
|
|
415
|
+
|
|
416
|
+
# Extract name
|
|
417
|
+
if "my name is" in msg_lower or "i am" in msg_lower or "i'm" in msg_lower:
|
|
418
|
+
# Simple name extraction
|
|
419
|
+
for phrase in ["my name is ", "i am ", "i'm "]:
|
|
420
|
+
if phrase in msg_lower:
|
|
421
|
+
name_part = message[msg_lower.index(phrase) + len(phrase):].strip()
|
|
422
|
+
name = name_part.split()[0] if name_part else None
|
|
423
|
+
if name and len(name) > 1:
|
|
424
|
+
updates['name'] = name.strip('.,!?')
|
|
425
|
+
break
|
|
426
|
+
|
|
427
|
+
# Extract favorite food
|
|
428
|
+
if "favorite food" in msg_lower or "favourite food" in msg_lower:
|
|
429
|
+
if "is" in msg_lower:
|
|
430
|
+
food = msg_lower.split("is")[-1].strip().strip('.,!?')
|
|
431
|
+
if food and len(food) < 50:
|
|
432
|
+
updates['favorite_food'] = food
|
|
433
|
+
|
|
434
|
+
# Extract location
|
|
435
|
+
if "i live in" in msg_lower or "i'm from" in msg_lower or "from" in msg_lower:
|
|
436
|
+
for phrase in ["i live in ", "i'm from ", "from "]:
|
|
437
|
+
if phrase in msg_lower:
|
|
438
|
+
loc = message[msg_lower.index(phrase) + len(phrase):].strip()
|
|
439
|
+
location = loc.split()[0] if loc else None
|
|
440
|
+
if location and len(location) > 2:
|
|
441
|
+
updates['location'] = location.strip('.,!?')
|
|
442
|
+
break
|
|
443
|
+
|
|
444
|
+
# Save updates
|
|
445
|
+
if updates:
|
|
446
|
+
try:
|
|
447
|
+
self.memory.update_profile(user_id, updates)
|
|
448
|
+
self.logger.debug(f"Profile updated for {user_id}: {updates}")
|
|
449
|
+
except:
|
|
450
|
+
pass
|
|
381
451
|
|
|
452
|
+
def get_user_profile(self, user_id: Optional[str] = None) -> Dict:
|
|
453
|
+
"""
|
|
454
|
+
Get user's profile info
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
user_id: User ID (uses current_user if not specified)
|
|
458
|
+
|
|
459
|
+
Returns:
|
|
460
|
+
User profile dictionary
|
|
461
|
+
"""
|
|
462
|
+
uid = user_id or self.current_user
|
|
463
|
+
if not uid:
|
|
464
|
+
return {}
|
|
465
|
+
|
|
466
|
+
try:
|
|
467
|
+
memory_data = self.memory.load_memory(uid)
|
|
468
|
+
return memory_data.get('profile', {})
|
|
469
|
+
except:
|
|
470
|
+
return {}
|
|
471
|
+
|
|
382
472
|
def add_knowledge(self, category: str, question: str, answer: str,
|
|
383
473
|
keywords: Optional[List[str]] = None, priority: int = 0) -> int:
|
|
384
474
|
"""Add new record to knowledge base"""
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mem-llm
|
|
3
|
+
Version: 1.0.4
|
|
4
|
+
Summary: Memory-enabled AI assistant with local LLM support
|
|
5
|
+
Home-page: https://github.com/emredeveloper/Mem-LLM
|
|
6
|
+
Author: C. Emre Karataş
|
|
7
|
+
Author-email: karatasqemre@gmail.com
|
|
8
|
+
Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
|
|
9
|
+
Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
|
|
10
|
+
Keywords: llm ai memory agent chatbot ollama local
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Requires-Python: >=3.8
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: requests>=2.31.0
|
|
24
|
+
Requires-Dist: pyyaml>=6.0.1
|
|
25
|
+
Provides-Extra: dev
|
|
26
|
+
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
27
|
+
Requires-Dist: black>=23.7.0; extra == "dev"
|
|
28
|
+
Requires-Dist: flake8>=6.1.0; extra == "dev"
|
|
29
|
+
Dynamic: author
|
|
30
|
+
Dynamic: author-email
|
|
31
|
+
Dynamic: classifier
|
|
32
|
+
Dynamic: description
|
|
33
|
+
Dynamic: description-content-type
|
|
34
|
+
Dynamic: home-page
|
|
35
|
+
Dynamic: keywords
|
|
36
|
+
Dynamic: project-url
|
|
37
|
+
Dynamic: provides-extra
|
|
38
|
+
Dynamic: requires-dist
|
|
39
|
+
Dynamic: requires-python
|
|
40
|
+
Dynamic: summary
|
|
41
|
+
|
|
42
|
+
# 🧠 mem-llm
|
|
43
|
+
|
|
44
|
+
**Memory-enabled AI assistant that remembers conversations using local LLMs**
|
|
45
|
+
|
|
46
|
+
[](https://www.python.org/downloads/)
|
|
47
|
+
[](https://pypi.org/project/mem-llm/)
|
|
48
|
+
[](LICENSE)
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## 🎯 What is it?
|
|
53
|
+
|
|
54
|
+
A lightweight Python library that adds **persistent memory** to local LLM chatbots. Each user gets their own conversation history that the AI remembers across sessions.
|
|
55
|
+
|
|
56
|
+
**Perfect for:**
|
|
57
|
+
- 💬 Customer service chatbots
|
|
58
|
+
- 🤖 Personal AI assistants
|
|
59
|
+
- 📝 Context-aware applications
|
|
60
|
+
- 🏢 Business automation
|
|
61
|
+
|
|
62
|
+
---
|
|
63
|
+
|
|
64
|
+
## ⚡ Quick Start
|
|
65
|
+
|
|
66
|
+
### 1. Install
|
|
67
|
+
|
|
68
|
+
```bash
|
|
69
|
+
pip install mem-llm
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### 2. Setup Ollama (one-time)
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
# Install: https://ollama.ai/download
|
|
76
|
+
ollama serve
|
|
77
|
+
|
|
78
|
+
# Download model (only 2.5GB)
|
|
79
|
+
ollama pull granite4:tiny-h
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### 3. Use
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
from mem_llm import MemAgent
|
|
86
|
+
|
|
87
|
+
# Create agent (one line!)
|
|
88
|
+
agent = MemAgent()
|
|
89
|
+
|
|
90
|
+
# Set user
|
|
91
|
+
agent.set_user("john")
|
|
92
|
+
|
|
93
|
+
# Chat - it remembers!
|
|
94
|
+
agent.chat("My name is John")
|
|
95
|
+
agent.chat("What's my name?") # → "Your name is John"
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
---
|
|
99
|
+
|
|
100
|
+
## 💡 Features
|
|
101
|
+
|
|
102
|
+
| Feature | Description |
|
|
103
|
+
|---------|-------------|
|
|
104
|
+
| 🧠 **Memory** | Remembers each user's conversation history |
|
|
105
|
+
| 👥 **Multi-user** | Separate memory for each user |
|
|
106
|
+
| 🔒 **Privacy** | 100% local, no cloud/API needed |
|
|
107
|
+
| ⚡ **Fast** | Lightweight SQLite/JSON storage |
|
|
108
|
+
| 🎯 **Simple** | 3 lines of code to get started |
|
|
109
|
+
|
|
110
|
+
---
|
|
111
|
+
|
|
112
|
+
## 📖 Usage Examples
|
|
113
|
+
|
|
114
|
+
### Basic Chat
|
|
115
|
+
|
|
116
|
+
```python
|
|
117
|
+
from mem_llm import MemAgent
|
|
118
|
+
|
|
119
|
+
agent = MemAgent()
|
|
120
|
+
agent.set_user("alice")
|
|
121
|
+
|
|
122
|
+
# First conversation
|
|
123
|
+
agent.chat("I love pizza")
|
|
124
|
+
|
|
125
|
+
# Later...
|
|
126
|
+
agent.chat("What's my favorite food?")
|
|
127
|
+
# → "Your favorite food is pizza"
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
### Customer Service Bot
|
|
131
|
+
|
|
132
|
+
```python
|
|
133
|
+
agent = MemAgent()
|
|
134
|
+
|
|
135
|
+
# Customer 1
|
|
136
|
+
agent.set_user("customer_001")
|
|
137
|
+
agent.chat("My order #12345 is delayed")
|
|
138
|
+
|
|
139
|
+
# Customer 2 (different memory!)
|
|
140
|
+
agent.set_user("customer_002")
|
|
141
|
+
agent.chat("I want to return item #67890")
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
### Check User Profile
|
|
145
|
+
|
|
146
|
+
```python
|
|
147
|
+
# Get automatically extracted user info
|
|
148
|
+
profile = agent.get_user_profile()
|
|
149
|
+
# {'name': 'Alice', 'favorite_food': 'pizza', 'location': 'NYC'}
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
---
|
|
153
|
+
|
|
154
|
+
## 🔧 Configuration
|
|
155
|
+
|
|
156
|
+
### JSON Memory (default - simple)
|
|
157
|
+
|
|
158
|
+
```python
|
|
159
|
+
agent = MemAgent(
|
|
160
|
+
model="granite4:tiny-h",
|
|
161
|
+
use_sql=False, # Use JSON files
|
|
162
|
+
memory_dir="memories"
|
|
163
|
+
)
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
### SQL Memory (advanced - faster)
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
agent = MemAgent(
|
|
170
|
+
model="granite4:tiny-h",
|
|
171
|
+
use_sql=True, # Use SQLite
|
|
172
|
+
memory_dir="memories.db"
|
|
173
|
+
)
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
### Custom Settings
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
agent = MemAgent(
|
|
180
|
+
model="llama2", # Any Ollama model
|
|
181
|
+
ollama_url="http://localhost:11434"
|
|
182
|
+
)
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
---
|
|
186
|
+
|
|
187
|
+
## 📚 API Reference
|
|
188
|
+
|
|
189
|
+
### MemAgent
|
|
190
|
+
|
|
191
|
+
```python
|
|
192
|
+
# Initialize
|
|
193
|
+
agent = MemAgent(model="granite4:tiny-h", use_sql=False)
|
|
194
|
+
|
|
195
|
+
# Set active user
|
|
196
|
+
agent.set_user(user_id: str, name: Optional[str] = None)
|
|
197
|
+
|
|
198
|
+
# Chat
|
|
199
|
+
response = agent.chat(message: str, metadata: Optional[Dict] = None) -> str
|
|
200
|
+
|
|
201
|
+
# Get profile
|
|
202
|
+
profile = agent.get_user_profile(user_id: Optional[str] = None) -> Dict
|
|
203
|
+
|
|
204
|
+
# System check
|
|
205
|
+
status = agent.check_setup() -> Dict
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
---
|
|
209
|
+
|
|
210
|
+
## 🎨 Advanced: PDF/DOCX Config
|
|
211
|
+
|
|
212
|
+
Generate config from business documents:
|
|
213
|
+
|
|
214
|
+
```python
|
|
215
|
+
from mem_llm import create_config_from_document
|
|
216
|
+
|
|
217
|
+
# Create config.yaml from PDF
|
|
218
|
+
create_config_from_document(
|
|
219
|
+
doc_path="company_info.pdf",
|
|
220
|
+
output_path="config.yaml",
|
|
221
|
+
company_name="Acme Corp"
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Use config
|
|
225
|
+
agent = MemAgent(config_file="config.yaml")
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
---
|
|
229
|
+
|
|
230
|
+
## 🔥 Models
|
|
231
|
+
|
|
232
|
+
Works with any [Ollama](https://ollama.ai/) model:
|
|
233
|
+
|
|
234
|
+
| Model | Size | Speed | Quality |
|
|
235
|
+
|-------|------|-------|---------|
|
|
236
|
+
| `granite4:tiny-h` | 2.5GB | ⚡⚡⚡ | ⭐⭐ |
|
|
237
|
+
| `llama2` | 4GB | ⚡⚡ | ⭐⭐⭐ |
|
|
238
|
+
| `mistral` | 4GB | ⚡⚡ | ⭐⭐⭐⭐ |
|
|
239
|
+
| `llama3` | 5GB | ⚡ | ⭐⭐⭐⭐⭐ |
|
|
240
|
+
|
|
241
|
+
```bash
|
|
242
|
+
ollama pull <model-name>
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
---
|
|
246
|
+
|
|
247
|
+
## 📦 Requirements
|
|
248
|
+
|
|
249
|
+
- Python 3.8+
|
|
250
|
+
- Ollama (for LLM)
|
|
251
|
+
- 4GB RAM minimum
|
|
252
|
+
- 5GB disk space
|
|
253
|
+
|
|
254
|
+
**Dependencies** (auto-installed):
|
|
255
|
+
- `requests >= 2.31.0`
|
|
256
|
+
- `pyyaml >= 6.0.1`
|
|
257
|
+
|
|
258
|
+
---
|
|
259
|
+
|
|
260
|
+
## 🐛 Troubleshooting
|
|
261
|
+
|
|
262
|
+
### Ollama not running?
|
|
263
|
+
|
|
264
|
+
```bash
|
|
265
|
+
ollama serve
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
### Model not found?
|
|
269
|
+
|
|
270
|
+
```bash
|
|
271
|
+
ollama pull granite4:tiny-h
|
|
272
|
+
```
|
|
273
|
+
|
|
274
|
+
### Import error?
|
|
275
|
+
|
|
276
|
+
```bash
|
|
277
|
+
pip install mem-llm --upgrade
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
---
|
|
281
|
+
|
|
282
|
+
## 📄 License
|
|
283
|
+
|
|
284
|
+
MIT License - feel free to use in personal and commercial projects!
|
|
285
|
+
|
|
286
|
+
---
|
|
287
|
+
|
|
288
|
+
## 🔗 Links
|
|
289
|
+
|
|
290
|
+
- **PyPI:** https://pypi.org/project/mem-llm/
|
|
291
|
+
- **GitHub:** https://github.com/emredeveloper/Mem-LLM
|
|
292
|
+
- **Ollama:** https://ollama.ai/
|
|
293
|
+
|
|
294
|
+
---
|
|
295
|
+
|
|
296
|
+
## 🌟 Star us on GitHub!
|
|
297
|
+
|
|
298
|
+
If you find this useful, give us a ⭐ on [GitHub](https://github.com/emredeveloper/Mem-LLM)!
|
|
299
|
+
|
|
300
|
+
---
|
|
301
|
+
|
|
302
|
+
<div align="center">
|
|
303
|
+
Made with ❤️ by <a href="https://github.com/emredeveloper">C. Emre Karataş</a>
|
|
304
|
+
</div>
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
mem_llm/__init__.py,sha256=
|
|
1
|
+
mem_llm/__init__.py,sha256=8aY47mzeaxhyimSQ3vU4FeiHTvyRnderMhX0qkQ5zKw,920
|
|
2
2
|
mem_llm/config.yaml.example,sha256=lgmfaU5pxnIm4zYxwgCcgLSohNx1Jw6oh3Qk0Xoe2DE,917
|
|
3
3
|
mem_llm/config_from_docs.py,sha256=YFhq1SWyK63C-TNMS73ncNHg8sJ-XGOf2idWVCjxFco,4974
|
|
4
4
|
mem_llm/config_manager.py,sha256=8PIHs21jZWlI-eG9DgekjOvNxU3-U4xH7SbT8Gr-Z6M,7075
|
|
5
5
|
mem_llm/knowledge_loader.py,sha256=oSNhfYYcx7DlZLVogxnbSwaIydq_Q3__RDJFeZR2XVw,2699
|
|
6
|
-
mem_llm/llm_client.py,sha256=
|
|
7
|
-
mem_llm/mem_agent.py,sha256=
|
|
6
|
+
mem_llm/llm_client.py,sha256=XW-LALYV4C-Nj2R8XYT2iT2YnTeH6_tUIldMKooY2uY,5461
|
|
7
|
+
mem_llm/mem_agent.py,sha256=FYiZyOIadURC0RdsY-aRdaZ53oxtOj0U5Crt2yWEbz8,24058
|
|
8
8
|
mem_llm/memory_db.py,sha256=KyNIcChYihSavd2ot5KMBlVB9lq8rexoBQ0lA5bCJNI,12611
|
|
9
9
|
mem_llm/memory_manager.py,sha256=iXnf5YEJXmQ75jgJ2LEx9zCHxIpZTcLtHlp2eWgFjRg,8335
|
|
10
10
|
mem_llm/memory_tools.py,sha256=ARANFqu_bmL56SlV1RzTjfQsJj-Qe2QvqY0pF92hDxU,8678
|
|
11
11
|
mem_llm/prompt_templates.py,sha256=tCiQJw3QQKIaH8NsxEKOIaIVxw4XT43PwdmyfCINzzM,6536
|
|
12
|
-
mem_llm-1.0.
|
|
13
|
-
mem_llm-1.0.
|
|
14
|
-
mem_llm-1.0.
|
|
15
|
-
mem_llm-1.0.
|
|
12
|
+
mem_llm-1.0.4.dist-info/METADATA,sha256=H4njHFVhNXq0msFIpiQdoE7sCcGB3YrR_LP6vzZH_tc,6505
|
|
13
|
+
mem_llm-1.0.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
14
|
+
mem_llm-1.0.4.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
|
|
15
|
+
mem_llm-1.0.4.dist-info/RECORD,,
|
mem_llm-1.0.2.dist-info/METADATA
DELETED
|
@@ -1,382 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: mem-llm
|
|
3
|
-
Version: 1.0.2
|
|
4
|
-
Summary: Memory-enabled AI assistant with local LLM support
|
|
5
|
-
Home-page: https://github.com/emredeveloper/Mem-LLM
|
|
6
|
-
Author: C. Emre Karataş
|
|
7
|
-
Author-email: karatasqemre@gmail.com
|
|
8
|
-
Project-URL: Bug Reports, https://github.com/emredeveloper/Mem-LLM/issues
|
|
9
|
-
Project-URL: Source, https://github.com/emredeveloper/Mem-LLM
|
|
10
|
-
Keywords: llm ai memory agent chatbot ollama local
|
|
11
|
-
Classifier: Development Status :: 4 - Beta
|
|
12
|
-
Classifier: Intended Audience :: Developers
|
|
13
|
-
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
14
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
-
Classifier: Programming Language :: Python :: 3
|
|
16
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
17
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
18
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
-
Requires-Python: >=3.8
|
|
22
|
-
Description-Content-Type: text/markdown
|
|
23
|
-
Requires-Dist: requests>=2.31.0
|
|
24
|
-
Requires-Dist: pyyaml>=6.0.1
|
|
25
|
-
Provides-Extra: dev
|
|
26
|
-
Requires-Dist: pytest>=7.4.0; extra == "dev"
|
|
27
|
-
Requires-Dist: black>=23.7.0; extra == "dev"
|
|
28
|
-
Requires-Dist: flake8>=6.1.0; extra == "dev"
|
|
29
|
-
Dynamic: author
|
|
30
|
-
Dynamic: author-email
|
|
31
|
-
Dynamic: classifier
|
|
32
|
-
Dynamic: description
|
|
33
|
-
Dynamic: description-content-type
|
|
34
|
-
Dynamic: home-page
|
|
35
|
-
Dynamic: keywords
|
|
36
|
-
Dynamic: project-url
|
|
37
|
-
Dynamic: provides-extra
|
|
38
|
-
Dynamic: requires-dist
|
|
39
|
-
Dynamic: requires-python
|
|
40
|
-
Dynamic: summary
|
|
41
|
-
|
|
42
|
-
# 🧠 Mem-Agent: Memory-Enabled Mini Assistant
|
|
43
|
-
|
|
44
|
-
<div align="center">
|
|
45
|
-
|
|
46
|
-
[](https://www.python.org/downloads/)
|
|
47
|
-
[](LICENSE)
|
|
48
|
-
[](https://ollama.ai/)
|
|
49
|
-
|
|
50
|
-
**A local AI assistant that remembers user interactions and responds with context awareness using a lightweight 4-billion parameter LLM.**
|
|
51
|
-
|
|
52
|
-
[Quick Start](#-quick-start) • [Features](#-features) • [Documentation](#-documentation) • [Examples](#-usage-examples)
|
|
53
|
-
|
|
54
|
-
</div>
|
|
55
|
-
|
|
56
|
-
---
|
|
57
|
-
|
|
58
|
-
## 🎯 Why Mem-Agent?
|
|
59
|
-
|
|
60
|
-
Most Large Language Models (LLMs) treat every conversation as "new" and don't remember past interactions. **Mem-Agent** uses a small locally-running model to:
|
|
61
|
-
|
|
62
|
-
- ✅ **Remember user history** - Separate memory for each customer/user
|
|
63
|
-
- ✅ **Context awareness** - Responds based on previous conversations
|
|
64
|
-
- ✅ **Fully local** - No internet connection required
|
|
65
|
-
- ✅ **Lightweight & fast** - Only 2.5 GB model size
|
|
66
|
-
- ✅ **Easy integration** - Get started with 3 lines of code
|
|
67
|
-
|
|
68
|
-
## 🚀 Quick Start
|
|
69
|
-
|
|
70
|
-
### 1. Install Ollama
|
|
71
|
-
|
|
72
|
-
```bash
|
|
73
|
-
# Windows/Mac/Linux: https://ollama.ai/download
|
|
74
|
-
curl https://ollama.ai/install.sh | sh
|
|
75
|
-
|
|
76
|
-
# Start the service
|
|
77
|
-
ollama serve
|
|
78
|
-
```
|
|
79
|
-
|
|
80
|
-
### 2. Download Model
|
|
81
|
-
|
|
82
|
-
```bash
|
|
83
|
-
ollama pull granite4:tiny-h
|
|
84
|
-
```
|
|
85
|
-
|
|
86
|
-
### 3. Use Mem-Agent
|
|
87
|
-
|
|
88
|
-
```python
|
|
89
|
-
from mem_llm import MemAgent
|
|
90
|
-
|
|
91
|
-
# Create agent
|
|
92
|
-
agent = MemAgent(model="granite4:tiny-h")
|
|
93
|
-
|
|
94
|
-
# System check
|
|
95
|
-
status = agent.check_setup()
|
|
96
|
-
if status['status'] == 'ready':
|
|
97
|
-
print("✅ System ready!")
|
|
98
|
-
else:
|
|
99
|
-
print("❌ Error:", status)
|
|
100
|
-
|
|
101
|
-
# Set user
|
|
102
|
-
agent.set_user("user123")
|
|
103
|
-
|
|
104
|
-
# First conversation
|
|
105
|
-
response = agent.chat("Hello, my name is Ali")
|
|
106
|
-
print(response)
|
|
107
|
-
|
|
108
|
-
# Second conversation - It remembers me!
|
|
109
|
-
response = agent.chat("Do you remember my name?")
|
|
110
|
-
print(response)
|
|
111
|
-
```
|
|
112
|
-
|
|
113
|
-
## 📚 Example Scripts
|
|
114
|
-
|
|
115
|
-
### 1. Simple Test
|
|
116
|
-
|
|
117
|
-
```bash
|
|
118
|
-
python examples/example_simple.py
|
|
119
|
-
```
|
|
120
|
-
|
|
121
|
-
### 2. Customer Service Simulation
|
|
122
|
-
|
|
123
|
-
```bash
|
|
124
|
-
python examples/example_customer_service.py
|
|
125
|
-
```
|
|
126
|
-
|
|
127
|
-
## 🏗️ Project Structure
|
|
128
|
-
|
|
129
|
-
```
|
|
130
|
-
Memory LLM/
|
|
131
|
-
├── memory_llm/ # Main package
|
|
132
|
-
│ ├── __init__.py # Package initialization
|
|
133
|
-
│ ├── mem_agent.py # Main assistant class
|
|
134
|
-
│ ├── memory_manager.py # Memory management
|
|
135
|
-
│ ├── memory_db.py # SQL database support
|
|
136
|
-
│ ├── llm_client.py # Ollama integration
|
|
137
|
-
│ ├── memory_tools.py # User tools
|
|
138
|
-
│ ├── knowledge_loader.py # Knowledge base loader
|
|
139
|
-
│ ├── prompt_templates.py # Prompt templates
|
|
140
|
-
│ └── config_manager.py # Configuration manager
|
|
141
|
-
├── examples/ # Example scripts
|
|
142
|
-
├── tests/ # Test files
|
|
143
|
-
├── setup.py # Installation script
|
|
144
|
-
├── requirements.txt # Dependencies
|
|
145
|
-
└── README.md # This file
|
|
146
|
-
```
|
|
147
|
-
|
|
148
|
-
## 🔧 API Usage
|
|
149
|
-
|
|
150
|
-
### MemAgent Class
|
|
151
|
-
|
|
152
|
-
```python
|
|
153
|
-
from mem_llm import MemAgent
|
|
154
|
-
|
|
155
|
-
agent = MemAgent(
|
|
156
|
-
model="granite4:tiny-h", # Ollama model name
|
|
157
|
-
memory_dir="memories", # Memory directory
|
|
158
|
-
ollama_url="http://localhost:11434" # Ollama API URL
|
|
159
|
-
)
|
|
160
|
-
```
|
|
161
|
-
|
|
162
|
-
#### Basic Methods
|
|
163
|
-
|
|
164
|
-
```python
|
|
165
|
-
# Set user
|
|
166
|
-
agent.set_user("user_id")
|
|
167
|
-
|
|
168
|
-
# Chat
|
|
169
|
-
response = agent.chat(
|
|
170
|
-
message="Hello",
|
|
171
|
-
user_id="optional_user_id", # If set_user not used
|
|
172
|
-
metadata={"key": "value"} # Additional information
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
# Get memory summary
|
|
176
|
-
summary = agent.memory_manager.get_summary("user_id")
|
|
177
|
-
|
|
178
|
-
# Search in history
|
|
179
|
-
results = agent.search_user_history("keyword", "user_id")
|
|
180
|
-
|
|
181
|
-
# Update profile
|
|
182
|
-
agent.update_user_info({
|
|
183
|
-
"name": "Ali",
|
|
184
|
-
"preferences": {"language": "en"}
|
|
185
|
-
})
|
|
186
|
-
|
|
187
|
-
# Get statistics
|
|
188
|
-
stats = agent.get_statistics()
|
|
189
|
-
|
|
190
|
-
# Export memory
|
|
191
|
-
json_data = agent.export_memory("user_id")
|
|
192
|
-
|
|
193
|
-
# Clear memory (WARNING!)
|
|
194
|
-
agent.clear_user_memory("user_id", confirm=True)
|
|
195
|
-
```
|
|
196
|
-
|
|
197
|
-
### MemoryManager Class
|
|
198
|
-
|
|
199
|
-
```python
|
|
200
|
-
from mem_llm import MemoryManager
|
|
201
|
-
|
|
202
|
-
memory = MemoryManager(memory_dir="memories")
|
|
203
|
-
|
|
204
|
-
# Load memory
|
|
205
|
-
data = memory.load_memory("user_id")
|
|
206
|
-
|
|
207
|
-
# Add interaction
|
|
208
|
-
memory.add_interaction(
|
|
209
|
-
user_id="user_id",
|
|
210
|
-
user_message="Hello",
|
|
211
|
-
bot_response="Hello! How can I help you?",
|
|
212
|
-
metadata={"timestamp": "2025-01-13"}
|
|
213
|
-
)
|
|
214
|
-
|
|
215
|
-
# Get recent conversations
|
|
216
|
-
recent = memory.get_recent_conversations("user_id", limit=5)
|
|
217
|
-
|
|
218
|
-
# Search
|
|
219
|
-
results = memory.search_memory("user_id", "order")
|
|
220
|
-
```
|
|
221
|
-
|
|
222
|
-
### OllamaClient Class
|
|
223
|
-
|
|
224
|
-
```python
|
|
225
|
-
from mem_llm import OllamaClient
|
|
226
|
-
|
|
227
|
-
client = OllamaClient(model="granite4:tiny-h")
|
|
228
|
-
|
|
229
|
-
# Simple generation
|
|
230
|
-
response = client.generate("Hello world!")
|
|
231
|
-
|
|
232
|
-
# Chat format
|
|
233
|
-
response = client.chat([
|
|
234
|
-
{"role": "system", "content": "You are a helpful assistant"},
|
|
235
|
-
{"role": "user", "content": "Hello"}
|
|
236
|
-
])
|
|
237
|
-
|
|
238
|
-
# Connection check
|
|
239
|
-
is_ready = client.check_connection()
|
|
240
|
-
|
|
241
|
-
# Model list
|
|
242
|
-
models = client.list_models()
|
|
243
|
-
```
|
|
244
|
-
|
|
245
|
-
## 💡 Usage Scenarios
|
|
246
|
-
|
|
247
|
-
### 1. Customer Service Bot
|
|
248
|
-
- Remembers customer history
|
|
249
|
-
- Knows previous issues
|
|
250
|
-
- Makes personalized recommendations
|
|
251
|
-
|
|
252
|
-
### 2. Personal Assistant
|
|
253
|
-
- Tracks daily activities
|
|
254
|
-
- Learns preferences
|
|
255
|
-
- Makes reminders
|
|
256
|
-
|
|
257
|
-
### 3. Education Assistant
|
|
258
|
-
- Tracks student progress
|
|
259
|
-
- Adjusts difficulty level
|
|
260
|
-
- Remembers past mistakes
|
|
261
|
-
|
|
262
|
-
### 4. Support Ticket System
|
|
263
|
-
- Stores ticket history
|
|
264
|
-
- Finds related old tickets
|
|
265
|
-
- Provides solution suggestions
|
|
266
|
-
|
|
267
|
-
## 📊 Memory Format
|
|
268
|
-
|
|
269
|
-
Memories are stored in JSON format:
|
|
270
|
-
|
|
271
|
-
```json
|
|
272
|
-
{
|
|
273
|
-
"conversations": [
|
|
274
|
-
{
|
|
275
|
-
"timestamp": "2025-01-13T10:30:00",
|
|
276
|
-
"user_message": "Hello",
|
|
277
|
-
"bot_response": "Hello! How can I help you?",
|
|
278
|
-
"metadata": {
|
|
279
|
-
"topic": "greeting"
|
|
280
|
-
}
|
|
281
|
-
}
|
|
282
|
-
],
|
|
283
|
-
"profile": {
|
|
284
|
-
"user_id": "user123",
|
|
285
|
-
"first_seen": "2025-01-13T10:30:00",
|
|
286
|
-
"preferences": {},
|
|
287
|
-
"summary": {}
|
|
288
|
-
},
|
|
289
|
-
"last_updated": "2025-01-13T10:35:00"
|
|
290
|
-
}
|
|
291
|
-
```
|
|
292
|
-
|
|
293
|
-
## 🔒 Privacy and Security
|
|
294
|
-
|
|
295
|
-
- ✅ Works completely locally (no internet connection required)
|
|
296
|
-
- ✅ Data stored on your computer
|
|
297
|
-
- ✅ No data sent to third-party services
|
|
298
|
-
- ✅ Memories in JSON format, easily deletable
|
|
299
|
-
|
|
300
|
-
## 🛠️ Development
|
|
301
|
-
|
|
302
|
-
### Test Mode
|
|
303
|
-
|
|
304
|
-
```python
|
|
305
|
-
# Simple chat without memory (for testing)
|
|
306
|
-
response = agent.simple_chat("Test message")
|
|
307
|
-
```
|
|
308
|
-
|
|
309
|
-
### Using Your Own Model
|
|
310
|
-
|
|
311
|
-
```python
|
|
312
|
-
# Different Ollama model
|
|
313
|
-
agent = MemAgent(model="llama2:7b")
|
|
314
|
-
|
|
315
|
-
# Or another LLM API
|
|
316
|
-
# Customize llm_client.py file
|
|
317
|
-
```
|
|
318
|
-
|
|
319
|
-
## 🐛 Troubleshooting
|
|
320
|
-
|
|
321
|
-
### Ollama Connection Error
|
|
322
|
-
|
|
323
|
-
```bash
|
|
324
|
-
# Start Ollama service
|
|
325
|
-
ollama serve
|
|
326
|
-
|
|
327
|
-
# Port check
|
|
328
|
-
netstat -an | findstr "11434"
|
|
329
|
-
```
|
|
330
|
-
|
|
331
|
-
### Model Not Found
|
|
332
|
-
|
|
333
|
-
```bash
|
|
334
|
-
# Check model list
|
|
335
|
-
ollama list
|
|
336
|
-
|
|
337
|
-
# Download model
|
|
338
|
-
ollama pull granite4:tiny-h
|
|
339
|
-
```
|
|
340
|
-
|
|
341
|
-
### Memory Issues
|
|
342
|
-
|
|
343
|
-
```python
|
|
344
|
-
# Check memory directory
|
|
345
|
-
import os
|
|
346
|
-
os.path.exists("memories")
|
|
347
|
-
|
|
348
|
-
# List memory files
|
|
349
|
-
os.listdir("memories")
|
|
350
|
-
```
|
|
351
|
-
|
|
352
|
-
## 📈 Performance
|
|
353
|
-
|
|
354
|
-
- **Model Size**: ~2.5 GB
|
|
355
|
-
- **Response Time**: ~1-3 seconds (depends on CPU)
|
|
356
|
-
- **Memory Usage**: ~4-6 GB RAM
|
|
357
|
-
- **Disk Usage**: ~10-50 KB per user
|
|
358
|
-
|
|
359
|
-
## 🤝 Contributing
|
|
360
|
-
|
|
361
|
-
1. Fork the repository
|
|
362
|
-
2. Create feature branch (`git checkout -b feature/amazing-feature`)
|
|
363
|
-
3. Commit changes (`git commit -m 'feat: Add amazing feature'`)
|
|
364
|
-
4. Push to branch (`git push origin feature/amazing-feature`)
|
|
365
|
-
5. Open Pull Request
|
|
366
|
-
|
|
367
|
-
## 📝 License
|
|
368
|
-
|
|
369
|
-
MIT License - See LICENSE file for details.
|
|
370
|
-
|
|
371
|
-
## 🙏 Acknowledgments
|
|
372
|
-
|
|
373
|
-
- [Ollama](https://ollama.ai/) - Local LLM server
|
|
374
|
-
- [Granite](https://www.ibm.com/granite) - IBM Granite models
|
|
375
|
-
|
|
376
|
-
## 📞 Contact
|
|
377
|
-
|
|
378
|
-
You can open an issue for your questions.
|
|
379
|
-
|
|
380
|
-
---
|
|
381
|
-
|
|
382
|
-
**Note**: This project is for educational and research purposes. Please perform comprehensive testing before using in production environment.
|
|
File without changes
|
|
File without changes
|