mem-llm 1.0.6__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

mem_llm/mem_agent.py CHANGED
@@ -42,10 +42,10 @@ from .llm_client import OllamaClient
42
42
  # Advanced features (optional)
43
43
  try:
44
44
  from .memory_db import SQLMemoryManager
45
- from .prompt_templates import prompt_manager
46
45
  from .knowledge_loader import KnowledgeLoader
47
46
  from .config_manager import get_config
48
47
  from .memory_tools import ToolExecutor, MemoryTools
48
+ from .dynamic_prompt import dynamic_prompt_builder
49
49
  ADVANCED_AVAILABLE = True
50
50
  except ImportError:
51
51
  ADVANCED_AVAILABLE = False
@@ -65,7 +65,8 @@ class MemAgent:
65
65
  use_sql: bool = True,
66
66
  memory_dir: Optional[str] = None,
67
67
  load_knowledge_base: bool = True,
68
- ollama_url: str = "http://localhost:11434"):
68
+ ollama_url: str = "http://localhost:11434",
69
+ check_connection: bool = False):
69
70
  """
70
71
  Args:
71
72
  model: LLM model to use
@@ -74,6 +75,7 @@ class MemAgent:
74
75
  memory_dir: Memory directory
75
76
  load_knowledge_base: Automatically load knowledge base
76
77
  ollama_url: Ollama API URL
78
+ check_connection: Verify Ollama connection on startup (default: False)
77
79
  """
78
80
 
79
81
  # Load configuration
@@ -98,6 +100,10 @@ class MemAgent:
98
100
  # Setup logging
99
101
  self._setup_logging()
100
102
 
103
+ # Initialize flags first
104
+ self.has_knowledge_base: bool = False # Track KB status
105
+ self.has_tools: bool = False # Track tools status
106
+
101
107
  # Memory system selection
102
108
  if use_sql and ADVANCED_AVAILABLE:
103
109
  # SQL memory (advanced)
@@ -110,21 +116,63 @@ class MemAgent:
110
116
  self.memory = MemoryManager(json_dir)
111
117
  self.logger.info(f"JSON memory system active: {json_dir}")
112
118
 
119
+ # Active user and system prompt
120
+ self.current_user: Optional[str] = None
121
+ self.current_system_prompt: Optional[str] = None
122
+
113
123
  # LLM client
114
124
  self.model = model # Store model name
115
125
  self.use_sql = use_sql # Store SQL usage flag
116
126
  self.llm = OllamaClient(model, ollama_url)
127
+
128
+ # Optional connection check on startup
129
+ if check_connection:
130
+ self.logger.info("Checking Ollama connection...")
131
+ if not self.llm.check_connection():
132
+ error_msg = (
133
+ "❌ ERROR: Cannot connect to Ollama service!\n"
134
+ " \n"
135
+ " Solutions:\n"
136
+ " 1. Start Ollama: ollama serve\n"
137
+ " 2. Check if Ollama is running: http://localhost:11434\n"
138
+ " 3. Verify ollama_url parameter is correct\n"
139
+ " \n"
140
+ " To skip this check, use: MemAgent(check_connection=False)"
141
+ )
142
+ self.logger.error(error_msg)
143
+ raise ConnectionError("Ollama service not available")
144
+
145
+ # Check if model exists
146
+ available_models = self.llm.list_models()
147
+ if model not in available_models:
148
+ error_msg = (
149
+ f"❌ ERROR: Model '{model}' not found!\n"
150
+ f" \n"
151
+ f" Solutions:\n"
152
+ f" 1. Download model: ollama pull {model}\n"
153
+ f" 2. Use an available model: {', '.join(available_models[:3])}\n"
154
+ f" \n"
155
+ f" Available models: {len(available_models)} found\n"
156
+ f" To skip this check, use: MemAgent(check_connection=False)"
157
+ )
158
+ self.logger.error(error_msg)
159
+ raise ValueError(f"Model '{model}' not available")
160
+
161
+ self.logger.info(f"✅ Ollama connection verified, model '{model}' ready")
162
+
117
163
  self.logger.info(f"LLM client ready: {model}")
118
164
 
165
+ # Initialize state variables FIRST
166
+ self.current_user: Optional[str] = None
167
+ self.current_system_prompt: Optional[str] = None
168
+
119
169
  # Advanced features (if available)
120
170
  if ADVANCED_AVAILABLE:
121
171
  self._setup_advanced_features(load_knowledge_base)
122
172
  else:
123
173
  print("⚠️ Load additional packages for advanced features")
124
-
125
- # Active user and system prompt
126
- self.current_user: Optional[str] = None
127
- self.current_system_prompt: Optional[str] = None
174
+ # Build basic prompt even without advanced features
175
+ self._build_dynamic_system_prompt()
128
176
 
129
177
  # Tool system (always available)
130
178
  self.tool_executor = ToolExecutor(self.memory)
@@ -140,12 +188,12 @@ class MemAgent:
140
188
  log_config = self.config.get("logging", {})
141
189
 
142
190
  if log_config.get("enabled", True):
191
+ # Only console logging (no file) - keep workspace clean
143
192
  logging.basicConfig(
144
193
  level=getattr(logging, log_config.get("level", "INFO")),
145
194
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
146
195
  handlers=[
147
- logging.FileHandler(log_config.get("file", "mem_agent.log")),
148
- logging.StreamHandler()
196
+ logging.StreamHandler() # Console only
149
197
  ]
150
198
  )
151
199
 
@@ -171,79 +219,73 @@ class MemAgent:
171
219
  if default_kb == "ecommerce":
172
220
  count = kb_loader.load_default_ecommerce_kb()
173
221
  self.logger.info(f"E-commerce knowledge base loaded: {count} records")
222
+ self.has_knowledge_base = True # KB loaded!
174
223
  elif default_kb == "tech_support":
175
224
  count = kb_loader.load_default_tech_support_kb()
176
225
  self.logger.info(f"Technical support knowledge base loaded: {count} records")
226
+ self.has_knowledge_base = True # KB loaded!
177
227
  elif default_kb == "business_tech_support":
178
228
  count = kb_loader.load_default_tech_support_kb()
179
229
  self.logger.info(f"Corporate technical support knowledge base loaded: {count} records")
230
+ self.has_knowledge_base = True # KB loaded!
180
231
  elif default_kb == "personal_learning":
181
232
  # Simple KB for personal learning
182
233
  count = kb_loader.load_default_ecommerce_kb() # Temporarily use the same KB
183
234
  self.logger.info(f"Personal learning knowledge base loaded: {count} records")
235
+ self.has_knowledge_base = True # KB loaded!
184
236
  except Exception as e:
185
237
  self.logger.error(f"Knowledge base loading error: {e}")
238
+ self.has_knowledge_base = False
186
239
 
187
- # Load system prompt (according to usage mode)
188
- if hasattr(self, 'config') and self.config:
189
- prompt_config = self.config.get("prompt", {})
190
-
191
- # Select default template according to usage mode
192
- if self.usage_mode == "business":
193
- default_template = "business_customer_service"
194
- else: # personal
195
- default_template = "personal_assistant"
240
+ # Build dynamic system prompt based on active features
241
+ self._build_dynamic_system_prompt()
196
242
 
197
- template_name = prompt_config.get("template", default_template)
198
- variables = prompt_config.get("variables", {})
199
-
200
- # Additional variables for business mode
243
+ def _build_dynamic_system_prompt(self) -> None:
244
+ """Build dynamic system prompt based on active features"""
245
+ if not ADVANCED_AVAILABLE:
246
+ # Fallback simple prompt
247
+ self.current_system_prompt = "You are a helpful AI assistant."
248
+ return
249
+
250
+ # Get config data
251
+ business_config = None
252
+ personal_config = None
253
+
254
+ if hasattr(self, 'config') and self.config:
201
255
  if self.usage_mode == "business":
202
256
  business_config = self.config.get("business", {})
203
- variables.update({
204
- "company_name": business_config.get("company_name", "Our Company"),
205
- "founded_year": business_config.get("founded_year", "2010"),
206
- "employee_count": business_config.get("employee_count", "100+"),
207
- "industry": business_config.get("industry", "Teknoloji")
208
- })
209
- else: # personal
257
+ else:
210
258
  personal_config = self.config.get("personal", {})
211
- variables.update({
212
- "user_name": personal_config.get("user_name", "User"),
213
- "timezone": personal_config.get("timezone", "Europe/London")
214
- })
215
-
216
- try:
217
- variables['current_date'] = datetime.now().strftime("%Y-%m-%d")
218
- self.current_system_prompt = prompt_manager.render_prompt(template_name, **variables)
219
- self.logger.info(f"Prompt template loaded: {template_name} (Mode: {self.usage_mode})")
220
- except Exception as e:
221
- self.logger.error(f"Prompt template loading error: {e}")
222
- # Simple, short and effective default prompt
223
- self.current_system_prompt = """You are a helpful AI assistant with access to a knowledge base.
224
-
225
- CRITICAL RULES (FOLLOW EXACTLY):
226
- 1. If KNOWLEDGE BASE information is provided below, USE IT FIRST - it's the correct answer!
227
- 2. Knowledge base answers are marked with "📚 RELEVANT KNOWLEDGE BASE"
228
- 3. Keep responses SHORT (1-3 sentences maximum)
229
- 4. When user shares personal info: Just acknowledge briefly ("Got it!" or "Noted!")
230
- 5. Answer from knowledge base EXACTLY as written, don't make up information
231
- 6. If knowledge base has no info, use conversation history or say "I don't have that information"
232
-
233
- RESPONSE PRIORITY:
234
- 1st Priority: Knowledge Base (if available) ← USE THIS!
235
- 2nd Priority: Conversation History
236
- 3rd Priority: General knowledge (be brief)
237
-
238
- EXAMPLES:
239
- User: "What's the shipping cost?"
240
- Knowledge Base: "Shipping is free over $150"
241
- You: "Shipping is free for orders over $150!"
242
-
243
- User: "My name is Alice" → You: "Nice to meet you, Alice!"
244
- User: "What's my name?" → You: "Your name is Alice."
245
-
246
- REMEMBER: Knowledge base = truth. Always use it when provided!"""
259
+
260
+ # Check if tools are enabled (future feature)
261
+ # For now, tools are always available but not advertised in prompt
262
+ # self.has_tools = False # Will be enabled when tool system is ready
263
+
264
+ # Build prompt using dynamic builder
265
+ try:
266
+ self.current_system_prompt = dynamic_prompt_builder.build_prompt(
267
+ usage_mode=self.usage_mode,
268
+ has_knowledge_base=self.has_knowledge_base,
269
+ has_tools=False, # Not advertised yet
270
+ is_multi_user=False, # Always False for now, per-session state
271
+ business_config=business_config,
272
+ personal_config=personal_config,
273
+ memory_type="sql" if self.use_sql else "json"
274
+ )
275
+
276
+ # Log feature summary
277
+ feature_summary = dynamic_prompt_builder.get_feature_summary(
278
+ has_knowledge_base=self.has_knowledge_base,
279
+ has_tools=False,
280
+ is_multi_user=False,
281
+ memory_type="sql" if self.use_sql else "json"
282
+ )
283
+ self.logger.info(f"Dynamic prompt built: {feature_summary}")
284
+
285
+ except Exception as e:
286
+ self.logger.error(f"Dynamic prompt building error: {e}")
287
+ # Fallback
288
+ self.current_system_prompt = "You are a helpful AI assistant."
247
289
 
248
290
  def check_setup(self) -> Dict[str, Any]:
249
291
  """Check system setup"""
@@ -382,8 +424,25 @@ REMEMBER: Knowledge base = truth. Always use it when provided!"""
382
424
  response = self.llm.chat(
383
425
  messages=messages,
384
426
  temperature=self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2, # Very focused
385
- max_tokens=self.config.get("llm.max_tokens", 150) if hasattr(self, 'config') and self.config else 150 # Max 2-3 sentences
427
+ max_tokens=self.config.get("llm.max_tokens", 2000) if hasattr(self, 'config') and self.config else 2000 # Enough tokens for thinking models
386
428
  )
429
+
430
+ # Fallback: If response is empty (can happen with thinking models)
431
+ if not response or response.strip() == "":
432
+ self.logger.warning(f"Empty response from model {self.llm.model}, retrying with simpler prompt...")
433
+
434
+ # Retry with just the current message, no history
435
+ simple_messages = [
436
+ {"role": "system", "content": "You are a helpful assistant. Respond directly and concisely."},
437
+ {"role": "user", "content": message}
438
+ ]
439
+ response = self.llm.chat(simple_messages, temperature=0.7, max_tokens=2000)
440
+
441
+ # If still empty, provide fallback
442
+ if not response or response.strip() == "":
443
+ response = "I'm having trouble responding right now. Could you rephrase your question?"
444
+ self.logger.error(f"Model {self.llm.model} returned empty response even after retry")
445
+
387
446
  except Exception as e:
388
447
  self.logger.error(f"LLM response error: {e}")
389
448
  response = "Sorry, I cannot respond right now. Please try again later."
mem_llm/memory_manager.py CHANGED
@@ -259,4 +259,54 @@ class MemoryManager:
259
259
  self.load_memory(user_id)
260
260
 
261
261
  return self.user_profiles.get(user_id)
262
+
263
+ def update_user_profile(self, user_id: str, updates: Dict) -> None:
264
+ """
265
+ Update user profile (SQL-compatible alias)
266
+
267
+ Args:
268
+ user_id: User ID
269
+ updates: Fields to update
270
+ """
271
+ return self.update_profile(user_id, updates)
272
+
273
+ def add_user(self, user_id: str, name: Optional[str] = None, metadata: Optional[Dict] = None) -> None:
274
+ """
275
+ Add or update user (SQL-compatible method)
276
+
277
+ Args:
278
+ user_id: User ID
279
+ name: User name (optional)
280
+ metadata: Additional metadata (optional)
281
+ """
282
+ self.load_memory(user_id)
283
+ if name and 'name' not in self.user_profiles[user_id]:
284
+ self.user_profiles[user_id]['name'] = name
285
+ if metadata:
286
+ self.user_profiles[user_id].update(metadata)
287
+ self.save_memory(user_id)
288
+
289
+ def get_statistics(self) -> Dict:
290
+ """
291
+ Get general statistics (SQL-compatible method)
292
+
293
+ Returns:
294
+ Statistics dictionary
295
+ """
296
+ all_users = list(self.memory_dir.glob("*.json"))
297
+ total_interactions = 0
298
+
299
+ for user_file in all_users:
300
+ try:
301
+ with open(user_file, 'r', encoding='utf-8') as f:
302
+ data = json.load(f)
303
+ total_interactions += len(data.get('conversations', []))
304
+ except:
305
+ pass
306
+
307
+ return {
308
+ 'total_users': len(all_users),
309
+ 'total_interactions': total_interactions,
310
+ 'knowledge_base_entries': 0 # JSON doesn't have KB
311
+ }
262
312