mem-llm 1.0.2__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

mem_llm/__init__.py CHANGED
@@ -24,7 +24,7 @@ try:
24
24
  except ImportError:
25
25
  __all_pro__ = []
26
26
 
27
- __version__ = "1.0.2"
27
+ __version__ = "1.0.3"
28
28
  __author__ = "C. Emre Karataş"
29
29
 
30
30
  __all__ = [
mem_llm/llm_client.py CHANGED
@@ -107,7 +107,11 @@ class OllamaClient:
107
107
  "stream": False,
108
108
  "options": {
109
109
  "temperature": temperature,
110
- "num_predict": max_tokens
110
+ "num_predict": max_tokens,
111
+ "num_ctx": 2048, # Context window
112
+ "top_k": 40, # Limit vocab
113
+ "top_p": 0.9, # Nucleus sampling
114
+ "stop": ["\n\n\n", "---"] # Stop sequences
111
115
  }
112
116
  }
113
117
 
mem_llm/mem_agent.py CHANGED
@@ -220,11 +220,22 @@ class MemAgent:
220
220
  except Exception as e:
221
221
  self.logger.error(f"Prompt template loading error: {e}")
222
222
  # Simple, short and effective default prompt
223
- self.current_system_prompt = """You are a helpful AI assistant. Be concise and direct.
224
- - Give short, clear answers (2-3 sentences max)
225
- - Only use information from conversation history
226
- - If you don't know something, say so
227
- - Don't make assumptions or hallucinate"""
223
+ self.current_system_prompt = """You are a concise AI assistant. Be EXTREMELY brief.
224
+
225
+ RULES (MANDATORY):
226
+ 1. MAX 1-2 SHORT sentences per response
227
+ 2. When user shares info: Just say "Got it!" or "Noted!"
228
+ 3. Answer questions: ONE sentence, direct
229
+ 4. NO lists, NO explanations, NO examples
230
+ 5. Use conversation history when relevant
231
+
232
+ EXAMPLES:
233
+ User: "My name is Alice" → You: "Nice to meet you, Alice!"
234
+ User: "My favorite food is pizza" → You: "Got it!"
235
+ User: "What's my name?" → You: "Your name is Alice."
236
+ User: "Tell me about Python" → You: "Python is a versatile programming language for web, data science, and AI."
237
+
238
+ BE BRIEF OR USER WILL LEAVE!"""
228
239
 
229
240
  def check_setup(self) -> Dict[str, Any]:
230
241
  """Check system setup"""
@@ -358,8 +369,8 @@ class MemAgent:
358
369
  try:
359
370
  response = self.llm.chat(
360
371
  messages=messages,
361
- temperature=self.config.get("llm.temperature", 0.3) if hasattr(self, 'config') and self.config else 0.3, # Lower = more focused
362
- max_tokens=self.config.get("llm.max_tokens", 300) if hasattr(self, 'config') and self.config else 300 # Shorter responses
372
+ temperature=self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2, # Very focused
373
+ max_tokens=self.config.get("llm.max_tokens", 150) if hasattr(self, 'config') and self.config else 150 # Max 2-3 sentences
363
374
  )
364
375
  except Exception as e:
365
376
  self.logger.error(f"LLM response error: {e}")
@@ -374,11 +385,78 @@ class MemAgent:
374
385
  bot_response=response,
375
386
  metadata=metadata
376
387
  )
388
+
389
+ # Extract and save user info to profile
390
+ self._update_user_profile(user_id, message, response)
377
391
  except Exception as e:
378
392
  self.logger.error(f"Interaction saving error: {e}")
379
393
 
380
394
  return response
395
+
396
+ def _update_user_profile(self, user_id: str, message: str, response: str):
397
+ """Extract user info from conversation and update profile"""
398
+ if not hasattr(self.memory, 'update_profile'):
399
+ return
400
+
401
+ msg_lower = message.lower()
402
+ updates = {}
403
+
404
+ # Extract name
405
+ if "my name is" in msg_lower or "i am" in msg_lower or "i'm" in msg_lower:
406
+ # Simple name extraction
407
+ for phrase in ["my name is ", "i am ", "i'm "]:
408
+ if phrase in msg_lower:
409
+ name_part = message[msg_lower.index(phrase) + len(phrase):].strip()
410
+ name = name_part.split()[0] if name_part else None
411
+ if name and len(name) > 1:
412
+ updates['name'] = name.strip('.,!?')
413
+ break
414
+
415
+ # Extract favorite food
416
+ if "favorite food" in msg_lower or "favourite food" in msg_lower:
417
+ if "is" in msg_lower:
418
+ food = msg_lower.split("is")[-1].strip().strip('.,!?')
419
+ if food and len(food) < 50:
420
+ updates['favorite_food'] = food
421
+
422
+ # Extract location
423
+ if "i live in" in msg_lower or "i'm from" in msg_lower or "from" in msg_lower:
424
+ for phrase in ["i live in ", "i'm from ", "from "]:
425
+ if phrase in msg_lower:
426
+ loc = message[msg_lower.index(phrase) + len(phrase):].strip()
427
+ location = loc.split()[0] if loc else None
428
+ if location and len(location) > 2:
429
+ updates['location'] = location.strip('.,!?')
430
+ break
431
+
432
+ # Save updates
433
+ if updates:
434
+ try:
435
+ self.memory.update_profile(user_id, updates)
436
+ self.logger.debug(f"Profile updated for {user_id}: {updates}")
437
+ except:
438
+ pass
381
439
 
440
+ def get_user_profile(self, user_id: Optional[str] = None) -> Dict:
441
+ """
442
+ Get user's profile info
443
+
444
+ Args:
445
+ user_id: User ID (uses current_user if not specified)
446
+
447
+ Returns:
448
+ User profile dictionary
449
+ """
450
+ uid = user_id or self.current_user
451
+ if not uid:
452
+ return {}
453
+
454
+ try:
455
+ memory_data = self.memory.load_memory(uid)
456
+ return memory_data.get('profile', {})
457
+ except:
458
+ return {}
459
+
382
460
  def add_knowledge(self, category: str, question: str, answer: str,
383
461
  keywords: Optional[List[str]] = None, priority: int = 0) -> int:
384
462
  """Add new record to knowledge base"""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mem-llm
3
- Version: 1.0.2
3
+ Version: 1.0.3
4
4
  Summary: Memory-enabled AI assistant with local LLM support
5
5
  Home-page: https://github.com/emredeveloper/Mem-LLM
6
6
  Author: C. Emre Karataş
@@ -1,15 +1,15 @@
1
- mem_llm/__init__.py,sha256=l9ynmAWNyC_CPZcb5q-pkJ_oVdJZpFN4hwVHRNqCkg8,920
1
+ mem_llm/__init__.py,sha256=C_7xanudK0nG04K2RkCf-6M5AwphjnkIqVfTmoCKkfc,920
2
2
  mem_llm/config.yaml.example,sha256=lgmfaU5pxnIm4zYxwgCcgLSohNx1Jw6oh3Qk0Xoe2DE,917
3
3
  mem_llm/config_from_docs.py,sha256=YFhq1SWyK63C-TNMS73ncNHg8sJ-XGOf2idWVCjxFco,4974
4
4
  mem_llm/config_manager.py,sha256=8PIHs21jZWlI-eG9DgekjOvNxU3-U4xH7SbT8Gr-Z6M,7075
5
5
  mem_llm/knowledge_loader.py,sha256=oSNhfYYcx7DlZLVogxnbSwaIydq_Q3__RDJFeZR2XVw,2699
6
- mem_llm/llm_client.py,sha256=tLNulVEV_tWdktvcQUokdhd0gTkIISUHipglRt17IWk,5255
7
- mem_llm/mem_agent.py,sha256=BIEMHpbss4QPstS-aEoZwmKBBc_fg87tf8Jj7MTIV8g,20357
6
+ mem_llm/llm_client.py,sha256=XW-LALYV4C-Nj2R8XYT2iT2YnTeH6_tUIldMKooY2uY,5461
7
+ mem_llm/mem_agent.py,sha256=R37BDniAdoCf7kc9i9zbPdzarSlftFlRoqGuYpyGp9Y,23352
8
8
  mem_llm/memory_db.py,sha256=KyNIcChYihSavd2ot5KMBlVB9lq8rexoBQ0lA5bCJNI,12611
9
9
  mem_llm/memory_manager.py,sha256=iXnf5YEJXmQ75jgJ2LEx9zCHxIpZTcLtHlp2eWgFjRg,8335
10
10
  mem_llm/memory_tools.py,sha256=ARANFqu_bmL56SlV1RzTjfQsJj-Qe2QvqY0pF92hDxU,8678
11
11
  mem_llm/prompt_templates.py,sha256=tCiQJw3QQKIaH8NsxEKOIaIVxw4XT43PwdmyfCINzzM,6536
12
- mem_llm-1.0.2.dist-info/METADATA,sha256=yJxAbApli62T27XkKE0SIHxgD50PPbhXldmBXfdyfhE,9347
13
- mem_llm-1.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
- mem_llm-1.0.2.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
15
- mem_llm-1.0.2.dist-info/RECORD,,
12
+ mem_llm-1.0.3.dist-info/METADATA,sha256=NPZlthtPNoMaD4hi1uTCMlSEJ_cSKlbCRW5D3b_BHn4,9347
13
+ mem_llm-1.0.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
+ mem_llm-1.0.3.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
15
+ mem_llm-1.0.3.dist-info/RECORD,,