mem-llm 1.0.7__py3-none-any.whl → 1.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- mem_llm/__init__.py +12 -6
- mem_llm/cli.py +254 -0
- mem_llm/dynamic_prompt.py +298 -0
- mem_llm/llm_client.py +46 -6
- mem_llm/mem_agent.py +124 -65
- mem_llm-1.0.10.dist-info/METADATA +1028 -0
- mem_llm-1.0.10.dist-info/RECORD +17 -0
- {mem_llm-1.0.7.dist-info → mem_llm-1.0.10.dist-info}/WHEEL +1 -1
- mem_llm-1.0.10.dist-info/entry_points.txt +2 -0
- mem_llm/prompt_templates.py +0 -244
- mem_llm-1.0.7.dist-info/METADATA +0 -304
- mem_llm-1.0.7.dist-info/RECORD +0 -15
- {mem_llm-1.0.7.dist-info → mem_llm-1.0.10.dist-info}/top_level.txt +0 -0
mem_llm/mem_agent.py
CHANGED
|
@@ -42,10 +42,10 @@ from .llm_client import OllamaClient
|
|
|
42
42
|
# Advanced features (optional)
|
|
43
43
|
try:
|
|
44
44
|
from .memory_db import SQLMemoryManager
|
|
45
|
-
from .prompt_templates import prompt_manager
|
|
46
45
|
from .knowledge_loader import KnowledgeLoader
|
|
47
46
|
from .config_manager import get_config
|
|
48
47
|
from .memory_tools import ToolExecutor, MemoryTools
|
|
48
|
+
from .dynamic_prompt import dynamic_prompt_builder
|
|
49
49
|
ADVANCED_AVAILABLE = True
|
|
50
50
|
except ImportError:
|
|
51
51
|
ADVANCED_AVAILABLE = False
|
|
@@ -65,7 +65,8 @@ class MemAgent:
|
|
|
65
65
|
use_sql: bool = True,
|
|
66
66
|
memory_dir: Optional[str] = None,
|
|
67
67
|
load_knowledge_base: bool = True,
|
|
68
|
-
ollama_url: str = "http://localhost:11434"
|
|
68
|
+
ollama_url: str = "http://localhost:11434",
|
|
69
|
+
check_connection: bool = False):
|
|
69
70
|
"""
|
|
70
71
|
Args:
|
|
71
72
|
model: LLM model to use
|
|
@@ -74,6 +75,7 @@ class MemAgent:
|
|
|
74
75
|
memory_dir: Memory directory
|
|
75
76
|
load_knowledge_base: Automatically load knowledge base
|
|
76
77
|
ollama_url: Ollama API URL
|
|
78
|
+
check_connection: Verify Ollama connection on startup (default: False)
|
|
77
79
|
"""
|
|
78
80
|
|
|
79
81
|
# Load configuration
|
|
@@ -98,6 +100,10 @@ class MemAgent:
|
|
|
98
100
|
# Setup logging
|
|
99
101
|
self._setup_logging()
|
|
100
102
|
|
|
103
|
+
# Initialize flags first
|
|
104
|
+
self.has_knowledge_base: bool = False # Track KB status
|
|
105
|
+
self.has_tools: bool = False # Track tools status
|
|
106
|
+
|
|
101
107
|
# Memory system selection
|
|
102
108
|
if use_sql and ADVANCED_AVAILABLE:
|
|
103
109
|
# SQL memory (advanced)
|
|
@@ -110,21 +116,63 @@ class MemAgent:
|
|
|
110
116
|
self.memory = MemoryManager(json_dir)
|
|
111
117
|
self.logger.info(f"JSON memory system active: {json_dir}")
|
|
112
118
|
|
|
119
|
+
# Active user and system prompt
|
|
120
|
+
self.current_user: Optional[str] = None
|
|
121
|
+
self.current_system_prompt: Optional[str] = None
|
|
122
|
+
|
|
113
123
|
# LLM client
|
|
114
124
|
self.model = model # Store model name
|
|
115
125
|
self.use_sql = use_sql # Store SQL usage flag
|
|
116
126
|
self.llm = OllamaClient(model, ollama_url)
|
|
127
|
+
|
|
128
|
+
# Optional connection check on startup
|
|
129
|
+
if check_connection:
|
|
130
|
+
self.logger.info("Checking Ollama connection...")
|
|
131
|
+
if not self.llm.check_connection():
|
|
132
|
+
error_msg = (
|
|
133
|
+
"❌ ERROR: Cannot connect to Ollama service!\n"
|
|
134
|
+
" \n"
|
|
135
|
+
" Solutions:\n"
|
|
136
|
+
" 1. Start Ollama: ollama serve\n"
|
|
137
|
+
" 2. Check if Ollama is running: http://localhost:11434\n"
|
|
138
|
+
" 3. Verify ollama_url parameter is correct\n"
|
|
139
|
+
" \n"
|
|
140
|
+
" To skip this check, use: MemAgent(check_connection=False)"
|
|
141
|
+
)
|
|
142
|
+
self.logger.error(error_msg)
|
|
143
|
+
raise ConnectionError("Ollama service not available")
|
|
144
|
+
|
|
145
|
+
# Check if model exists
|
|
146
|
+
available_models = self.llm.list_models()
|
|
147
|
+
if model not in available_models:
|
|
148
|
+
error_msg = (
|
|
149
|
+
f"❌ ERROR: Model '{model}' not found!\n"
|
|
150
|
+
f" \n"
|
|
151
|
+
f" Solutions:\n"
|
|
152
|
+
f" 1. Download model: ollama pull {model}\n"
|
|
153
|
+
f" 2. Use an available model: {', '.join(available_models[:3])}\n"
|
|
154
|
+
f" \n"
|
|
155
|
+
f" Available models: {len(available_models)} found\n"
|
|
156
|
+
f" To skip this check, use: MemAgent(check_connection=False)"
|
|
157
|
+
)
|
|
158
|
+
self.logger.error(error_msg)
|
|
159
|
+
raise ValueError(f"Model '{model}' not available")
|
|
160
|
+
|
|
161
|
+
self.logger.info(f"✅ Ollama connection verified, model '{model}' ready")
|
|
162
|
+
|
|
117
163
|
self.logger.info(f"LLM client ready: {model}")
|
|
118
164
|
|
|
165
|
+
# Initialize state variables FIRST
|
|
166
|
+
self.current_user: Optional[str] = None
|
|
167
|
+
self.current_system_prompt: Optional[str] = None
|
|
168
|
+
|
|
119
169
|
# Advanced features (if available)
|
|
120
170
|
if ADVANCED_AVAILABLE:
|
|
121
171
|
self._setup_advanced_features(load_knowledge_base)
|
|
122
172
|
else:
|
|
123
173
|
print("⚠️ Load additional packages for advanced features")
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
self.current_user: Optional[str] = None
|
|
127
|
-
self.current_system_prompt: Optional[str] = None
|
|
174
|
+
# Build basic prompt even without advanced features
|
|
175
|
+
self._build_dynamic_system_prompt()
|
|
128
176
|
|
|
129
177
|
# Tool system (always available)
|
|
130
178
|
self.tool_executor = ToolExecutor(self.memory)
|
|
@@ -140,12 +188,12 @@ class MemAgent:
|
|
|
140
188
|
log_config = self.config.get("logging", {})
|
|
141
189
|
|
|
142
190
|
if log_config.get("enabled", True):
|
|
191
|
+
# Only console logging (no file) - keep workspace clean
|
|
143
192
|
logging.basicConfig(
|
|
144
193
|
level=getattr(logging, log_config.get("level", "INFO")),
|
|
145
194
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
146
195
|
handlers=[
|
|
147
|
-
logging.
|
|
148
|
-
logging.StreamHandler()
|
|
196
|
+
logging.StreamHandler() # Console only
|
|
149
197
|
]
|
|
150
198
|
)
|
|
151
199
|
|
|
@@ -171,79 +219,73 @@ class MemAgent:
|
|
|
171
219
|
if default_kb == "ecommerce":
|
|
172
220
|
count = kb_loader.load_default_ecommerce_kb()
|
|
173
221
|
self.logger.info(f"E-commerce knowledge base loaded: {count} records")
|
|
222
|
+
self.has_knowledge_base = True # KB loaded!
|
|
174
223
|
elif default_kb == "tech_support":
|
|
175
224
|
count = kb_loader.load_default_tech_support_kb()
|
|
176
225
|
self.logger.info(f"Technical support knowledge base loaded: {count} records")
|
|
226
|
+
self.has_knowledge_base = True # KB loaded!
|
|
177
227
|
elif default_kb == "business_tech_support":
|
|
178
228
|
count = kb_loader.load_default_tech_support_kb()
|
|
179
229
|
self.logger.info(f"Corporate technical support knowledge base loaded: {count} records")
|
|
230
|
+
self.has_knowledge_base = True # KB loaded!
|
|
180
231
|
elif default_kb == "personal_learning":
|
|
181
232
|
# Simple KB for personal learning
|
|
182
233
|
count = kb_loader.load_default_ecommerce_kb() # Temporarily use the same KB
|
|
183
234
|
self.logger.info(f"Personal learning knowledge base loaded: {count} records")
|
|
235
|
+
self.has_knowledge_base = True # KB loaded!
|
|
184
236
|
except Exception as e:
|
|
185
237
|
self.logger.error(f"Knowledge base loading error: {e}")
|
|
238
|
+
self.has_knowledge_base = False
|
|
186
239
|
|
|
187
|
-
#
|
|
188
|
-
|
|
189
|
-
prompt_config = self.config.get("prompt", {})
|
|
190
|
-
|
|
191
|
-
# Select default template according to usage mode
|
|
192
|
-
if self.usage_mode == "business":
|
|
193
|
-
default_template = "business_customer_service"
|
|
194
|
-
else: # personal
|
|
195
|
-
default_template = "personal_assistant"
|
|
240
|
+
# Build dynamic system prompt based on active features
|
|
241
|
+
self._build_dynamic_system_prompt()
|
|
196
242
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
#
|
|
243
|
+
def _build_dynamic_system_prompt(self) -> None:
|
|
244
|
+
"""Build dynamic system prompt based on active features"""
|
|
245
|
+
if not ADVANCED_AVAILABLE:
|
|
246
|
+
# Fallback simple prompt
|
|
247
|
+
self.current_system_prompt = "You are a helpful AI assistant."
|
|
248
|
+
return
|
|
249
|
+
|
|
250
|
+
# Get config data
|
|
251
|
+
business_config = None
|
|
252
|
+
personal_config = None
|
|
253
|
+
|
|
254
|
+
if hasattr(self, 'config') and self.config:
|
|
201
255
|
if self.usage_mode == "business":
|
|
202
256
|
business_config = self.config.get("business", {})
|
|
203
|
-
|
|
204
|
-
"company_name": business_config.get("company_name", "Our Company"),
|
|
205
|
-
"founded_year": business_config.get("founded_year", "2010"),
|
|
206
|
-
"employee_count": business_config.get("employee_count", "100+"),
|
|
207
|
-
"industry": business_config.get("industry", "Teknoloji")
|
|
208
|
-
})
|
|
209
|
-
else: # personal
|
|
257
|
+
else:
|
|
210
258
|
personal_config = self.config.get("personal", {})
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
self.
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
#
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
You: "Shipping is free for orders over $150!"
|
|
242
|
-
|
|
243
|
-
User: "My name is Alice" → You: "Nice to meet you, Alice!"
|
|
244
|
-
User: "What's my name?" → You: "Your name is Alice."
|
|
245
|
-
|
|
246
|
-
REMEMBER: Knowledge base = truth. Always use it when provided!"""
|
|
259
|
+
|
|
260
|
+
# Check if tools are enabled (future feature)
|
|
261
|
+
# For now, tools are always available but not advertised in prompt
|
|
262
|
+
# self.has_tools = False # Will be enabled when tool system is ready
|
|
263
|
+
|
|
264
|
+
# Build prompt using dynamic builder
|
|
265
|
+
try:
|
|
266
|
+
self.current_system_prompt = dynamic_prompt_builder.build_prompt(
|
|
267
|
+
usage_mode=self.usage_mode,
|
|
268
|
+
has_knowledge_base=self.has_knowledge_base,
|
|
269
|
+
has_tools=False, # Not advertised yet
|
|
270
|
+
is_multi_user=False, # Always False for now, per-session state
|
|
271
|
+
business_config=business_config,
|
|
272
|
+
personal_config=personal_config,
|
|
273
|
+
memory_type="sql" if self.use_sql else "json"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# Log feature summary
|
|
277
|
+
feature_summary = dynamic_prompt_builder.get_feature_summary(
|
|
278
|
+
has_knowledge_base=self.has_knowledge_base,
|
|
279
|
+
has_tools=False,
|
|
280
|
+
is_multi_user=False,
|
|
281
|
+
memory_type="sql" if self.use_sql else "json"
|
|
282
|
+
)
|
|
283
|
+
self.logger.info(f"Dynamic prompt built: {feature_summary}")
|
|
284
|
+
|
|
285
|
+
except Exception as e:
|
|
286
|
+
self.logger.error(f"Dynamic prompt building error: {e}")
|
|
287
|
+
# Fallback
|
|
288
|
+
self.current_system_prompt = "You are a helpful AI assistant."
|
|
247
289
|
|
|
248
290
|
def check_setup(self) -> Dict[str, Any]:
|
|
249
291
|
"""Check system setup"""
|
|
@@ -382,8 +424,25 @@ REMEMBER: Knowledge base = truth. Always use it when provided!"""
|
|
|
382
424
|
response = self.llm.chat(
|
|
383
425
|
messages=messages,
|
|
384
426
|
temperature=self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2, # Very focused
|
|
385
|
-
max_tokens=self.config.get("llm.max_tokens",
|
|
427
|
+
max_tokens=self.config.get("llm.max_tokens", 2000) if hasattr(self, 'config') and self.config else 2000 # Enough tokens for thinking models
|
|
386
428
|
)
|
|
429
|
+
|
|
430
|
+
# Fallback: If response is empty (can happen with thinking models)
|
|
431
|
+
if not response or response.strip() == "":
|
|
432
|
+
self.logger.warning(f"Empty response from model {self.llm.model}, retrying with simpler prompt...")
|
|
433
|
+
|
|
434
|
+
# Retry with just the current message, no history
|
|
435
|
+
simple_messages = [
|
|
436
|
+
{"role": "system", "content": "You are a helpful assistant. Respond directly and concisely."},
|
|
437
|
+
{"role": "user", "content": message}
|
|
438
|
+
]
|
|
439
|
+
response = self.llm.chat(simple_messages, temperature=0.7, max_tokens=2000)
|
|
440
|
+
|
|
441
|
+
# If still empty, provide fallback
|
|
442
|
+
if not response or response.strip() == "":
|
|
443
|
+
response = "I'm having trouble responding right now. Could you rephrase your question?"
|
|
444
|
+
self.logger.error(f"Model {self.llm.model} returned empty response even after retry")
|
|
445
|
+
|
|
387
446
|
except Exception as e:
|
|
388
447
|
self.logger.error(f"LLM response error: {e}")
|
|
389
448
|
response = "Sorry, I cannot respond right now. Please try again later."
|