mem-llm 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- mem_llm-1.0.0.dist-info/METADATA +382 -0
- mem_llm-1.0.0.dist-info/RECORD +14 -0
- mem_llm-1.0.0.dist-info/WHEEL +5 -0
- mem_llm-1.0.0.dist-info/top_level.txt +1 -0
- memory_llm/__init__.py +34 -0
- memory_llm/config.yaml.example +52 -0
- memory_llm/config_manager.py +229 -0
- memory_llm/knowledge_loader.py +88 -0
- memory_llm/llm_client.py +162 -0
- memory_llm/mem_agent.py +512 -0
- memory_llm/memory_db.py +376 -0
- memory_llm/memory_manager.py +257 -0
- memory_llm/memory_tools.py +253 -0
- memory_llm/prompt_templates.py +244 -0
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration Manager
|
|
3
|
+
Reads and manages configuration from YAML file
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import yaml
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, Optional
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ConfigManager:
|
|
13
|
+
"""Manages configuration file"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, config_file: str = "config.yaml"):
|
|
16
|
+
"""
|
|
17
|
+
Args:
|
|
18
|
+
config_file: Configuration file path
|
|
19
|
+
"""
|
|
20
|
+
self.config_file = Path(config_file)
|
|
21
|
+
self.config: Dict[str, Any] = {}
|
|
22
|
+
self._load_config()
|
|
23
|
+
|
|
24
|
+
def _load_config(self) -> None:
|
|
25
|
+
"""Load configuration file"""
|
|
26
|
+
if self.config_file.exists():
|
|
27
|
+
with open(self.config_file, 'r', encoding='utf-8') as f:
|
|
28
|
+
self.config = yaml.safe_load(f) or {}
|
|
29
|
+
else:
|
|
30
|
+
# Default configuration
|
|
31
|
+
self.config = self._get_default_config()
|
|
32
|
+
self.save_config()
|
|
33
|
+
|
|
34
|
+
def _get_default_config(self) -> Dict[str, Any]:
|
|
35
|
+
"""Returns default configuration"""
|
|
36
|
+
return {
|
|
37
|
+
"llm": {
|
|
38
|
+
"model": "granite4:tiny-h",
|
|
39
|
+
"base_url": "http://localhost:11434",
|
|
40
|
+
"temperature": 0.7,
|
|
41
|
+
"max_tokens": 500
|
|
42
|
+
},
|
|
43
|
+
"memory": {
|
|
44
|
+
"backend": "sql",
|
|
45
|
+
"json_dir": "memories",
|
|
46
|
+
"db_path": "memories.db",
|
|
47
|
+
"max_conversations_per_user": 1000,
|
|
48
|
+
"auto_cleanup": True,
|
|
49
|
+
"cleanup_after_days": 90
|
|
50
|
+
},
|
|
51
|
+
"prompt": {
|
|
52
|
+
"template": "customer_service",
|
|
53
|
+
"variables": {
|
|
54
|
+
"company_name": "Our Company",
|
|
55
|
+
"tone": "friendly and professional"
|
|
56
|
+
},
|
|
57
|
+
"custom_prompt": None
|
|
58
|
+
},
|
|
59
|
+
"knowledge_base": {
|
|
60
|
+
"enabled": True,
|
|
61
|
+
"auto_load": True,
|
|
62
|
+
"default_kb": "ecommerce",
|
|
63
|
+
"custom_kb_file": None,
|
|
64
|
+
"search_limit": 5,
|
|
65
|
+
"min_relevance_score": 0.3
|
|
66
|
+
},
|
|
67
|
+
"response": {
|
|
68
|
+
"use_knowledge_base": True,
|
|
69
|
+
"use_memory": True,
|
|
70
|
+
"recent_conversations_limit": 5,
|
|
71
|
+
"format": {
|
|
72
|
+
"include_greeting": True,
|
|
73
|
+
"include_follow_up": True,
|
|
74
|
+
"max_length": 500
|
|
75
|
+
}
|
|
76
|
+
},
|
|
77
|
+
"security": {
|
|
78
|
+
"filter_sensitive_data": True,
|
|
79
|
+
"sensitive_keywords": [
|
|
80
|
+
"credit card", "password", "passcode", "CVV", "TR ID"
|
|
81
|
+
],
|
|
82
|
+
"rate_limit": {
|
|
83
|
+
"enabled": True,
|
|
84
|
+
"max_requests_per_minute": 60,
|
|
85
|
+
"max_requests_per_user_per_minute": 10
|
|
86
|
+
}
|
|
87
|
+
},
|
|
88
|
+
"logging": {
|
|
89
|
+
"enabled": True,
|
|
90
|
+
"level": "INFO",
|
|
91
|
+
"file": "mem_agent.log",
|
|
92
|
+
"max_size_mb": 10,
|
|
93
|
+
"backup_count": 5,
|
|
94
|
+
"log_user_messages": True,
|
|
95
|
+
"log_bot_responses": True,
|
|
96
|
+
"mask_sensitive": True
|
|
97
|
+
},
|
|
98
|
+
"performance": {
|
|
99
|
+
"enable_cache": True,
|
|
100
|
+
"cache_ttl_seconds": 3600,
|
|
101
|
+
"enable_parallel": False,
|
|
102
|
+
"max_workers": 4
|
|
103
|
+
},
|
|
104
|
+
"analytics": {
|
|
105
|
+
"enabled": True,
|
|
106
|
+
"track_response_time": True,
|
|
107
|
+
"track_user_satisfaction": False,
|
|
108
|
+
"track_conversation_length": True,
|
|
109
|
+
"export_interval_hours": 24,
|
|
110
|
+
"export_path": "analytics"
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
def get(self, key_path: str, default: Any = None) -> Any:
|
|
115
|
+
"""
|
|
116
|
+
Get configuration value with dot notation
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
key_path: Key path (e.g: "llm.model")
|
|
120
|
+
default: Value to return if not found
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Configuration value
|
|
124
|
+
"""
|
|
125
|
+
keys = key_path.split('.')
|
|
126
|
+
value = self.config
|
|
127
|
+
|
|
128
|
+
for key in keys:
|
|
129
|
+
if isinstance(value, dict) and key in value:
|
|
130
|
+
value = value[key]
|
|
131
|
+
else:
|
|
132
|
+
return default
|
|
133
|
+
|
|
134
|
+
return value
|
|
135
|
+
|
|
136
|
+
def set(self, key_path: str, value: Any) -> None:
|
|
137
|
+
"""
|
|
138
|
+
Set configuration value with dot notation
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
key_path: Key path (e.g: "llm.model")
|
|
142
|
+
value: Value to set
|
|
143
|
+
"""
|
|
144
|
+
keys = key_path.split('.')
|
|
145
|
+
config = self.config
|
|
146
|
+
|
|
147
|
+
for key in keys[:-1]:
|
|
148
|
+
if key not in config or not isinstance(config[key], dict):
|
|
149
|
+
config[key] = {}
|
|
150
|
+
config = config[key]
|
|
151
|
+
|
|
152
|
+
config[keys[-1]] = value
|
|
153
|
+
|
|
154
|
+
def save_config(self) -> None:
|
|
155
|
+
"""Save configuration to file"""
|
|
156
|
+
with open(self.config_file, 'w', encoding='utf-8') as f:
|
|
157
|
+
yaml.dump(self.config, f, default_flow_style=False,
|
|
158
|
+
allow_unicode=True, sort_keys=False)
|
|
159
|
+
|
|
160
|
+
def reload(self) -> None:
|
|
161
|
+
"""Reload configuration"""
|
|
162
|
+
self._load_config()
|
|
163
|
+
|
|
164
|
+
def get_llm_config(self) -> Dict[str, Any]:
|
|
165
|
+
"""Returns LLM configuration"""
|
|
166
|
+
return self.get("llm", {})
|
|
167
|
+
|
|
168
|
+
def get_memory_config(self) -> Dict[str, Any]:
|
|
169
|
+
"""Returns memory configuration"""
|
|
170
|
+
return self.get("memory", {})
|
|
171
|
+
|
|
172
|
+
def get_prompt_config(self) -> Dict[str, Any]:
|
|
173
|
+
"""Returns prompt configuration"""
|
|
174
|
+
return self.get("prompt", {})
|
|
175
|
+
|
|
176
|
+
def get_kb_config(self) -> Dict[str, Any]:
|
|
177
|
+
"""Returns knowledge base configuration"""
|
|
178
|
+
return self.get("knowledge_base", {})
|
|
179
|
+
|
|
180
|
+
def is_kb_enabled(self) -> bool:
|
|
181
|
+
"""Is knowledge base enabled?"""
|
|
182
|
+
return self.get("knowledge_base.enabled", True)
|
|
183
|
+
|
|
184
|
+
def is_memory_enabled(self) -> bool:
|
|
185
|
+
"""Is memory enabled?"""
|
|
186
|
+
return self.get("response.use_memory", True)
|
|
187
|
+
|
|
188
|
+
def get_memory_backend(self) -> str:
|
|
189
|
+
"""Returns memory backend type (json or sql)"""
|
|
190
|
+
return self.get("memory.backend", "sql")
|
|
191
|
+
|
|
192
|
+
def get_db_path(self) -> str:
|
|
193
|
+
"""Returns database file path"""
|
|
194
|
+
return self.get("memory.db_path", "memories.db")
|
|
195
|
+
|
|
196
|
+
def get_json_dir(self) -> str:
|
|
197
|
+
"""Returns JSON memory directory"""
|
|
198
|
+
return self.get("memory.json_dir", "memories")
|
|
199
|
+
|
|
200
|
+
def __repr__(self) -> str:
|
|
201
|
+
return f"ConfigManager(file='{self.config_file}')"
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
# Global instance
|
|
205
|
+
_config_manager: Optional[ConfigManager] = None
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def get_config(config_file: str = "config.yaml") -> ConfigManager:
|
|
209
|
+
"""
|
|
210
|
+
Returns global configuration manager
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
config_file: Configuration file
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
ConfigManager instance
|
|
217
|
+
"""
|
|
218
|
+
global _config_manager
|
|
219
|
+
if _config_manager is None:
|
|
220
|
+
_config_manager = ConfigManager(config_file)
|
|
221
|
+
return _config_manager
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def reload_config() -> None:
|
|
225
|
+
"""Reloads global configuration"""
|
|
226
|
+
global _config_manager
|
|
227
|
+
if _config_manager:
|
|
228
|
+
_config_manager.reload()
|
|
229
|
+
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Knowledge Base Loader
|
|
3
|
+
Loads pre-prepared problem/solution database into the system
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
try:
|
|
8
|
+
import yaml
|
|
9
|
+
except ImportError:
|
|
10
|
+
import pyyaml as yaml
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import List, Dict, Optional
|
|
13
|
+
from .memory_db import SQLMemoryManager
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class KnowledgeLoader:
|
|
17
|
+
"""Knowledge base management and loading"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, db_manager: SQLMemoryManager):
|
|
20
|
+
"""
|
|
21
|
+
Args:
|
|
22
|
+
db_manager: SQL memory manager
|
|
23
|
+
"""
|
|
24
|
+
self.db = db_manager
|
|
25
|
+
|
|
26
|
+
def load_from_json(self, file_path: str) -> int:
|
|
27
|
+
"""Load knowledge base from JSON file"""
|
|
28
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
29
|
+
data = json.load(f)
|
|
30
|
+
|
|
31
|
+
count = 0
|
|
32
|
+
for entry in data.get('knowledge_base', []):
|
|
33
|
+
self.db.add_knowledge(
|
|
34
|
+
category=entry['category'],
|
|
35
|
+
question=entry['question'],
|
|
36
|
+
answer=entry['answer'],
|
|
37
|
+
keywords=entry.get('keywords', []),
|
|
38
|
+
priority=entry.get('priority', 0)
|
|
39
|
+
)
|
|
40
|
+
count += 1
|
|
41
|
+
|
|
42
|
+
return count
|
|
43
|
+
|
|
44
|
+
def load_default_ecommerce_kb(self) -> int:
|
|
45
|
+
"""Load default e-commerce knowledge base"""
|
|
46
|
+
knowledge = [
|
|
47
|
+
{
|
|
48
|
+
"category": "shipping",
|
|
49
|
+
"question": "When will my order arrive?",
|
|
50
|
+
"answer": "Orders are shipped within 2-3 business days and delivered within 3-5 business days.",
|
|
51
|
+
"keywords": ["shipping", "delivery", "time"],
|
|
52
|
+
"priority": 10
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
"category": "return",
|
|
56
|
+
"question": "How do I return a product?",
|
|
57
|
+
"answer": "You can return products within 14 days. Create a return request from My Orders page.",
|
|
58
|
+
"keywords": ["return", "refund"],
|
|
59
|
+
"priority": 10
|
|
60
|
+
},
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
count = 0
|
|
64
|
+
for entry in knowledge:
|
|
65
|
+
self.db.add_knowledge(**entry)
|
|
66
|
+
count += 1
|
|
67
|
+
|
|
68
|
+
return count
|
|
69
|
+
|
|
70
|
+
def load_default_tech_support_kb(self) -> int:
|
|
71
|
+
"""Load default tech support knowledge base"""
|
|
72
|
+
knowledge = [
|
|
73
|
+
{
|
|
74
|
+
"category": "connection",
|
|
75
|
+
"question": "Cannot connect to internet",
|
|
76
|
+
"answer": "1) Restart your modem/router 2) Check Wi-Fi password 3) Try other devices",
|
|
77
|
+
"keywords": ["internet", "connection", "wifi"],
|
|
78
|
+
"priority": 10
|
|
79
|
+
},
|
|
80
|
+
]
|
|
81
|
+
|
|
82
|
+
count = 0
|
|
83
|
+
for entry in knowledge:
|
|
84
|
+
self.db.add_knowledge(**entry)
|
|
85
|
+
count += 1
|
|
86
|
+
|
|
87
|
+
return count
|
|
88
|
+
|
memory_llm/llm_client.py
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Client - Local model integration with Ollama
|
|
3
|
+
Works with Granite4:tiny-h model
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
import json
|
|
8
|
+
from typing import List, Dict, Optional
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OllamaClient:
|
|
12
|
+
"""Uses local LLM model with Ollama API"""
|
|
13
|
+
|
|
14
|
+
def __init__(self, model: str = "granite4:tiny-h",
|
|
15
|
+
base_url: str = "http://localhost:11434"):
|
|
16
|
+
"""
|
|
17
|
+
Args:
|
|
18
|
+
model: Model name to use
|
|
19
|
+
base_url: Ollama API URL
|
|
20
|
+
"""
|
|
21
|
+
self.model = model
|
|
22
|
+
self.base_url = base_url
|
|
23
|
+
self.api_url = f"{base_url}/api/generate"
|
|
24
|
+
self.chat_url = f"{base_url}/api/chat"
|
|
25
|
+
|
|
26
|
+
def check_connection(self) -> bool:
|
|
27
|
+
"""
|
|
28
|
+
Checks if Ollama service is running
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Is service running?
|
|
32
|
+
"""
|
|
33
|
+
try:
|
|
34
|
+
response = requests.get(f"{self.base_url}/api/tags", timeout=5)
|
|
35
|
+
return response.status_code == 200
|
|
36
|
+
except:
|
|
37
|
+
return False
|
|
38
|
+
|
|
39
|
+
def list_models(self) -> List[str]:
|
|
40
|
+
"""
|
|
41
|
+
List available models
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
List of model names
|
|
45
|
+
"""
|
|
46
|
+
try:
|
|
47
|
+
response = requests.get(f"{self.base_url}/api/tags")
|
|
48
|
+
if response.status_code == 200:
|
|
49
|
+
data = response.json()
|
|
50
|
+
return [model['name'] for model in data.get('models', [])]
|
|
51
|
+
return []
|
|
52
|
+
except:
|
|
53
|
+
return []
|
|
54
|
+
|
|
55
|
+
def generate(self, prompt: str, system_prompt: Optional[str] = None,
|
|
56
|
+
temperature: float = 0.7, max_tokens: int = 500) -> str:
|
|
57
|
+
"""
|
|
58
|
+
Generate simple text
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
prompt: User prompt (not AI system prompt)
|
|
62
|
+
system_prompt: AI system prompt
|
|
63
|
+
temperature: Creativity level (0-1)
|
|
64
|
+
max_tokens: Maximum token count
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
Model output
|
|
68
|
+
"""
|
|
69
|
+
payload = {
|
|
70
|
+
"model": self.model,
|
|
71
|
+
"prompt": prompt,
|
|
72
|
+
"stream": False,
|
|
73
|
+
"options": {
|
|
74
|
+
"temperature": temperature,
|
|
75
|
+
"num_predict": max_tokens
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if system_prompt:
|
|
80
|
+
payload["system"] = system_prompt
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
response = requests.post(self.api_url, json=payload, timeout=60)
|
|
84
|
+
if response.status_code == 200:
|
|
85
|
+
return response.json().get('response', '').strip()
|
|
86
|
+
else:
|
|
87
|
+
return f"Error: {response.status_code} - {response.text}"
|
|
88
|
+
except Exception as e:
|
|
89
|
+
return f"Connection error: {str(e)}"
|
|
90
|
+
|
|
91
|
+
def chat(self, messages: List[Dict[str, str]],
|
|
92
|
+
temperature: float = 0.7, max_tokens: int = 500) -> str:
|
|
93
|
+
"""
|
|
94
|
+
Chat format interaction
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
messages: Message history [{"role": "user/assistant/system", "content": "..."}]
|
|
98
|
+
temperature: Creativity level
|
|
99
|
+
max_tokens: Maximum token count
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Model response
|
|
103
|
+
"""
|
|
104
|
+
payload = {
|
|
105
|
+
"model": self.model,
|
|
106
|
+
"messages": messages,
|
|
107
|
+
"stream": False,
|
|
108
|
+
"options": {
|
|
109
|
+
"temperature": temperature,
|
|
110
|
+
"num_predict": max_tokens
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
response = requests.post(self.chat_url, json=payload, timeout=60)
|
|
116
|
+
if response.status_code == 200:
|
|
117
|
+
return response.json().get('message', {}).get('content', '').strip()
|
|
118
|
+
else:
|
|
119
|
+
return f"Error: {response.status_code} - {response.text}"
|
|
120
|
+
except Exception as e:
|
|
121
|
+
return f"Connection error: {str(e)}"
|
|
122
|
+
|
|
123
|
+
def generate_with_memory_context(self, user_message: str,
|
|
124
|
+
memory_summary: str,
|
|
125
|
+
recent_conversations: List[Dict]) -> str:
|
|
126
|
+
"""
|
|
127
|
+
Generate response with memory context
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
user_message: User's message
|
|
131
|
+
memory_summary: User memory summary
|
|
132
|
+
recent_conversations: Recent conversations
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
Context-aware response
|
|
136
|
+
"""
|
|
137
|
+
# Create system prompt
|
|
138
|
+
system_prompt = """You are a helpful customer service assistant.
|
|
139
|
+
You can remember past conversations with users.
|
|
140
|
+
Give short, clear and professional answers.
|
|
141
|
+
Use past interactions intelligently."""
|
|
142
|
+
|
|
143
|
+
# Create message history
|
|
144
|
+
messages = [{"role": "system", "content": system_prompt}]
|
|
145
|
+
|
|
146
|
+
# Add memory summary
|
|
147
|
+
if memory_summary and memory_summary != "No interactions with this user yet.":
|
|
148
|
+
messages.append({
|
|
149
|
+
"role": "system",
|
|
150
|
+
"content": f"User history:\n{memory_summary}"
|
|
151
|
+
})
|
|
152
|
+
|
|
153
|
+
# Add recent conversations
|
|
154
|
+
for conv in recent_conversations[-3:]:
|
|
155
|
+
messages.append({"role": "user", "content": conv.get('user_message', '')})
|
|
156
|
+
messages.append({"role": "assistant", "content": conv.get('bot_response', '')})
|
|
157
|
+
|
|
158
|
+
# Add current message
|
|
159
|
+
messages.append({"role": "user", "content": user_message})
|
|
160
|
+
|
|
161
|
+
return self.chat(messages, temperature=0.7)
|
|
162
|
+
|