mem-llm 1.0.2__tar.gz → 1.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- {mem_llm-1.0.2/mem_llm.egg-info → mem_llm-1.0.3}/PKG-INFO +1 -1
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/__init__.py +1 -1
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/llm_client.py +5 -1
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/mem_agent.py +85 -7
- {mem_llm-1.0.2 → mem_llm-1.0.3/mem_llm.egg-info}/PKG-INFO +1 -1
- {mem_llm-1.0.2 → mem_llm-1.0.3}/setup.py +1 -1
- {mem_llm-1.0.2 → mem_llm-1.0.3}/CHANGELOG.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/INTEGRATION_GUIDE.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/MANIFEST.in +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/QUICKSTART.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/QUICKSTART_TR.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/README.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/STRUCTURE.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/docs/CONFIG_GUIDE.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/docs/INDEX.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/docs/README.md +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/config.yaml.example +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/config_from_docs.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/config_manager.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/knowledge_loader.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/memory_db.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/memory_manager.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/memory_tools.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm/prompt_templates.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm.egg-info/SOURCES.txt +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm.egg-info/dependency_links.txt +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm.egg-info/requires.txt +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/mem_llm.egg-info/top_level.txt +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/requirements.txt +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/setup.cfg +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/tests/test_integration.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/tests/test_llm_client.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/tests/test_mem_agent.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/tests/test_memory_manager.py +0 -0
- {mem_llm-1.0.2 → mem_llm-1.0.3}/tests/test_memory_tools.py +0 -0
|
@@ -107,7 +107,11 @@ class OllamaClient:
|
|
|
107
107
|
"stream": False,
|
|
108
108
|
"options": {
|
|
109
109
|
"temperature": temperature,
|
|
110
|
-
"num_predict": max_tokens
|
|
110
|
+
"num_predict": max_tokens,
|
|
111
|
+
"num_ctx": 2048, # Context window
|
|
112
|
+
"top_k": 40, # Limit vocab
|
|
113
|
+
"top_p": 0.9, # Nucleus sampling
|
|
114
|
+
"stop": ["\n\n\n", "---"] # Stop sequences
|
|
111
115
|
}
|
|
112
116
|
}
|
|
113
117
|
|
|
@@ -220,11 +220,22 @@ class MemAgent:
|
|
|
220
220
|
except Exception as e:
|
|
221
221
|
self.logger.error(f"Prompt template loading error: {e}")
|
|
222
222
|
# Simple, short and effective default prompt
|
|
223
|
-
self.current_system_prompt = """You are a
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
223
|
+
self.current_system_prompt = """You are a concise AI assistant. Be EXTREMELY brief.
|
|
224
|
+
|
|
225
|
+
RULES (MANDATORY):
|
|
226
|
+
1. MAX 1-2 SHORT sentences per response
|
|
227
|
+
2. When user shares info: Just say "Got it!" or "Noted!"
|
|
228
|
+
3. Answer questions: ONE sentence, direct
|
|
229
|
+
4. NO lists, NO explanations, NO examples
|
|
230
|
+
5. Use conversation history when relevant
|
|
231
|
+
|
|
232
|
+
EXAMPLES:
|
|
233
|
+
User: "My name is Alice" → You: "Nice to meet you, Alice!"
|
|
234
|
+
User: "My favorite food is pizza" → You: "Got it!"
|
|
235
|
+
User: "What's my name?" → You: "Your name is Alice."
|
|
236
|
+
User: "Tell me about Python" → You: "Python is a versatile programming language for web, data science, and AI."
|
|
237
|
+
|
|
238
|
+
BE BRIEF OR USER WILL LEAVE!"""
|
|
228
239
|
|
|
229
240
|
def check_setup(self) -> Dict[str, Any]:
|
|
230
241
|
"""Check system setup"""
|
|
@@ -358,8 +369,8 @@ class MemAgent:
|
|
|
358
369
|
try:
|
|
359
370
|
response = self.llm.chat(
|
|
360
371
|
messages=messages,
|
|
361
|
-
temperature=self.config.get("llm.temperature", 0.
|
|
362
|
-
max_tokens=self.config.get("llm.max_tokens",
|
|
372
|
+
temperature=self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2, # Very focused
|
|
373
|
+
max_tokens=self.config.get("llm.max_tokens", 150) if hasattr(self, 'config') and self.config else 150 # Max 2-3 sentences
|
|
363
374
|
)
|
|
364
375
|
except Exception as e:
|
|
365
376
|
self.logger.error(f"LLM response error: {e}")
|
|
@@ -374,11 +385,78 @@ class MemAgent:
|
|
|
374
385
|
bot_response=response,
|
|
375
386
|
metadata=metadata
|
|
376
387
|
)
|
|
388
|
+
|
|
389
|
+
# Extract and save user info to profile
|
|
390
|
+
self._update_user_profile(user_id, message, response)
|
|
377
391
|
except Exception as e:
|
|
378
392
|
self.logger.error(f"Interaction saving error: {e}")
|
|
379
393
|
|
|
380
394
|
return response
|
|
395
|
+
|
|
396
|
+
def _update_user_profile(self, user_id: str, message: str, response: str):
|
|
397
|
+
"""Extract user info from conversation and update profile"""
|
|
398
|
+
if not hasattr(self.memory, 'update_profile'):
|
|
399
|
+
return
|
|
400
|
+
|
|
401
|
+
msg_lower = message.lower()
|
|
402
|
+
updates = {}
|
|
403
|
+
|
|
404
|
+
# Extract name
|
|
405
|
+
if "my name is" in msg_lower or "i am" in msg_lower or "i'm" in msg_lower:
|
|
406
|
+
# Simple name extraction
|
|
407
|
+
for phrase in ["my name is ", "i am ", "i'm "]:
|
|
408
|
+
if phrase in msg_lower:
|
|
409
|
+
name_part = message[msg_lower.index(phrase) + len(phrase):].strip()
|
|
410
|
+
name = name_part.split()[0] if name_part else None
|
|
411
|
+
if name and len(name) > 1:
|
|
412
|
+
updates['name'] = name.strip('.,!?')
|
|
413
|
+
break
|
|
414
|
+
|
|
415
|
+
# Extract favorite food
|
|
416
|
+
if "favorite food" in msg_lower or "favourite food" in msg_lower:
|
|
417
|
+
if "is" in msg_lower:
|
|
418
|
+
food = msg_lower.split("is")[-1].strip().strip('.,!?')
|
|
419
|
+
if food and len(food) < 50:
|
|
420
|
+
updates['favorite_food'] = food
|
|
421
|
+
|
|
422
|
+
# Extract location
|
|
423
|
+
if "i live in" in msg_lower or "i'm from" in msg_lower or "from" in msg_lower:
|
|
424
|
+
for phrase in ["i live in ", "i'm from ", "from "]:
|
|
425
|
+
if phrase in msg_lower:
|
|
426
|
+
loc = message[msg_lower.index(phrase) + len(phrase):].strip()
|
|
427
|
+
location = loc.split()[0] if loc else None
|
|
428
|
+
if location and len(location) > 2:
|
|
429
|
+
updates['location'] = location.strip('.,!?')
|
|
430
|
+
break
|
|
431
|
+
|
|
432
|
+
# Save updates
|
|
433
|
+
if updates:
|
|
434
|
+
try:
|
|
435
|
+
self.memory.update_profile(user_id, updates)
|
|
436
|
+
self.logger.debug(f"Profile updated for {user_id}: {updates}")
|
|
437
|
+
except:
|
|
438
|
+
pass
|
|
381
439
|
|
|
440
|
+
def get_user_profile(self, user_id: Optional[str] = None) -> Dict:
|
|
441
|
+
"""
|
|
442
|
+
Get user's profile info
|
|
443
|
+
|
|
444
|
+
Args:
|
|
445
|
+
user_id: User ID (uses current_user if not specified)
|
|
446
|
+
|
|
447
|
+
Returns:
|
|
448
|
+
User profile dictionary
|
|
449
|
+
"""
|
|
450
|
+
uid = user_id or self.current_user
|
|
451
|
+
if not uid:
|
|
452
|
+
return {}
|
|
453
|
+
|
|
454
|
+
try:
|
|
455
|
+
memory_data = self.memory.load_memory(uid)
|
|
456
|
+
return memory_data.get('profile', {})
|
|
457
|
+
except:
|
|
458
|
+
return {}
|
|
459
|
+
|
|
382
460
|
def add_knowledge(self, category: str, question: str, answer: str,
|
|
383
461
|
keywords: Optional[List[str]] = None, priority: int = 0) -> int:
|
|
384
462
|
"""Add new record to knowledge base"""
|
|
@@ -11,7 +11,7 @@ long_description = (this_directory / "README.md").read_text(encoding='utf-8')
|
|
|
11
11
|
|
|
12
12
|
setup(
|
|
13
13
|
name="mem-llm",
|
|
14
|
-
version="1.0.
|
|
14
|
+
version="1.0.3",
|
|
15
15
|
author="C. Emre Karataş",
|
|
16
16
|
author_email="karatasqemre@gmail.com", # PyPI için gerekli - kendi emailinizi yazın
|
|
17
17
|
description="Memory-enabled AI assistant with local LLM support",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|