mem-llm 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

mem_llm/__init__.py CHANGED
@@ -19,11 +19,12 @@ try:
19
19
  from .memory_db import SQLMemoryManager
20
20
  from .prompt_templates import prompt_manager
21
21
  from .config_manager import get_config
22
- __all_pro__ = ["SQLMemoryManager", "prompt_manager", "get_config"]
22
+ from .config_from_docs import create_config_from_document
23
+ __all_pro__ = ["SQLMemoryManager", "prompt_manager", "get_config", "create_config_from_document"]
23
24
  except ImportError:
24
25
  __all_pro__ = []
25
26
 
26
- __version__ = "1.0.1"
27
+ __version__ = "1.0.3"
27
28
  __author__ = "C. Emre Karataş"
28
29
 
29
30
  __all__ = [
@@ -0,0 +1,180 @@
1
+ """
2
+ Config Generator from Documents (PDF, DOCX, TXT)
3
+ Automatically creates config.yaml from business documents
4
+ """
5
+
6
+ import os
7
+ from typing import Optional, Dict, Any
8
+ import yaml
9
+
10
+
11
+ def extract_text_from_file(file_path: str) -> str:
12
+ """
13
+ Extract text from PDF, DOCX, or TXT files
14
+
15
+ Args:
16
+ file_path: Path to document
17
+
18
+ Returns:
19
+ Extracted text
20
+ """
21
+ file_ext = os.path.splitext(file_path)[1].lower()
22
+
23
+ if file_ext == '.txt':
24
+ with open(file_path, 'r', encoding='utf-8') as f:
25
+ return f.read()
26
+
27
+ elif file_ext == '.pdf':
28
+ try:
29
+ import PyPDF2
30
+ text = []
31
+ with open(file_path, 'rb') as f:
32
+ reader = PyPDF2.PdfReader(f)
33
+ for page in reader.pages:
34
+ text.append(page.extract_text())
35
+ return '\n'.join(text)
36
+ except ImportError:
37
+ return "⚠️ PyPDF2 not installed. Run: pip install PyPDF2"
38
+
39
+ elif file_ext in ['.docx', '.doc']:
40
+ try:
41
+ import docx
42
+ doc = docx.Document(file_path)
43
+ text = []
44
+ for paragraph in doc.paragraphs:
45
+ text.append(paragraph.text)
46
+ return '\n'.join(text)
47
+ except ImportError:
48
+ return "⚠️ python-docx not installed. Run: pip install python-docx"
49
+
50
+ else:
51
+ return f"⚠️ Unsupported file format: {file_ext}"
52
+
53
+
54
+ def generate_config_from_text(text: str, company_name: Optional[str] = None) -> Dict[str, Any]:
55
+ """
56
+ Generate config.yaml structure from text
57
+
58
+ Args:
59
+ text: Extracted text from document
60
+ company_name: Company name (optional)
61
+
62
+ Returns:
63
+ Config dictionary
64
+ """
65
+ # Simple config template
66
+ config = {
67
+ "usage_mode": "business", # or "personal"
68
+
69
+ "llm": {
70
+ "model": "granite4:tiny-h",
71
+ "temperature": 0.3,
72
+ "max_tokens": 300,
73
+ "ollama_url": "http://localhost:11434"
74
+ },
75
+
76
+ "memory": {
77
+ "use_sql": True,
78
+ "db_path": "memories.db",
79
+ "json_dir": "memories"
80
+ },
81
+
82
+ "response": {
83
+ "use_knowledge_base": True,
84
+ "recent_conversations_limit": 5
85
+ },
86
+
87
+ "business": {
88
+ "company_name": company_name or "Your Company",
89
+ "industry": "Technology",
90
+ "founded_year": "2024"
91
+ },
92
+
93
+ "knowledge_base": {
94
+ "auto_load": True,
95
+ "search_limit": 5
96
+ },
97
+
98
+ "logging": {
99
+ "level": "INFO",
100
+ "file": "mem_agent.log"
101
+ }
102
+ }
103
+
104
+ # Try to extract company name from text if not provided
105
+ if not company_name:
106
+ lines = text.split('\n')[:10] # First 10 lines
107
+ for line in lines:
108
+ if any(keyword in line.lower() for keyword in ['company', 'corp', 'inc', 'ltd']):
109
+ config["business"]["company_name"] = line.strip()[:50]
110
+ break
111
+
112
+ return config
113
+
114
+
115
+ def create_config_from_document(
116
+ doc_path: str,
117
+ output_path: str = "config.yaml",
118
+ company_name: Optional[str] = None
119
+ ) -> str:
120
+ """
121
+ Create config.yaml from a business document
122
+
123
+ Args:
124
+ doc_path: Path to PDF/DOCX/TXT document
125
+ output_path: Output config.yaml path
126
+ company_name: Company name (optional)
127
+
128
+ Returns:
129
+ Success message
130
+ """
131
+ if not os.path.exists(doc_path):
132
+ return f"❌ File not found: {doc_path}"
133
+
134
+ # Extract text
135
+ print(f"📄 Reading document: {doc_path}")
136
+ text = extract_text_from_file(doc_path)
137
+
138
+ if text.startswith("⚠️"):
139
+ return text # Error message
140
+
141
+ print(f"✅ Extracted {len(text)} characters")
142
+
143
+ # Generate config
144
+ config = generate_config_from_text(text, company_name)
145
+
146
+ # Save to YAML
147
+ with open(output_path, 'w', encoding='utf-8') as f:
148
+ yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
149
+
150
+ print(f"✅ Config created: {output_path}")
151
+ print(f"📌 Company: {config['business']['company_name']}")
152
+
153
+ return f"✅ Config successfully created at {output_path}"
154
+
155
+
156
+ # Simple CLI
157
+ if __name__ == "__main__":
158
+ import sys
159
+
160
+ if len(sys.argv) < 2:
161
+ print("""
162
+ 🔧 Config Generator from Documents
163
+
164
+ Usage:
165
+ python -m mem_llm.config_from_docs <document_path> [output_path] [company_name]
166
+
167
+ Examples:
168
+ python -m mem_llm.config_from_docs company_info.pdf
169
+ python -m mem_llm.config_from_docs business.docx my_config.yaml "Acme Corp"
170
+ python -m mem_llm.config_from_docs info.txt
171
+ """)
172
+ sys.exit(1)
173
+
174
+ doc_path = sys.argv[1]
175
+ output_path = sys.argv[2] if len(sys.argv) > 2 else "config.yaml"
176
+ company_name = sys.argv[3] if len(sys.argv) > 3 else None
177
+
178
+ result = create_config_from_document(doc_path, output_path, company_name)
179
+ print(result)
180
+
mem_llm/llm_client.py CHANGED
@@ -107,7 +107,11 @@ class OllamaClient:
107
107
  "stream": False,
108
108
  "options": {
109
109
  "temperature": temperature,
110
- "num_predict": max_tokens
110
+ "num_predict": max_tokens,
111
+ "num_ctx": 2048, # Context window
112
+ "top_k": 40, # Limit vocab
113
+ "top_p": 0.9, # Nucleus sampling
114
+ "stop": ["\n\n\n", "---"] # Stop sequences
111
115
  }
112
116
  }
113
117
 
mem_llm/mem_agent.py CHANGED
@@ -111,6 +111,8 @@ class MemAgent:
111
111
  self.logger.info(f"JSON memory system active: {json_dir}")
112
112
 
113
113
  # LLM client
114
+ self.model = model # Store model name
115
+ self.use_sql = use_sql # Store SQL usage flag
114
116
  self.llm = OllamaClient(model, ollama_url)
115
117
  self.logger.info(f"LLM client ready: {model}")
116
118
 
@@ -217,7 +219,23 @@ class MemAgent:
217
219
  self.logger.info(f"Prompt template loaded: {template_name} (Mode: {self.usage_mode})")
218
220
  except Exception as e:
219
221
  self.logger.error(f"Prompt template loading error: {e}")
220
- self.current_system_prompt = f"You are a helpful assistant in {self.usage_mode} mode."
222
+ # Simple, short and effective default prompt
223
+ self.current_system_prompt = """You are a concise AI assistant. Be EXTREMELY brief.
224
+
225
+ RULES (MANDATORY):
226
+ 1. MAX 1-2 SHORT sentences per response
227
+ 2. When user shares info: Just say "Got it!" or "Noted!"
228
+ 3. Answer questions: ONE sentence, direct
229
+ 4. NO lists, NO explanations, NO examples
230
+ 5. Use conversation history when relevant
231
+
232
+ EXAMPLES:
233
+ User: "My name is Alice" → You: "Nice to meet you, Alice!"
234
+ User: "My favorite food is pizza" → You: "Got it!"
235
+ User: "What's my name?" → You: "Your name is Alice."
236
+ User: "Tell me about Python" → You: "Python is a versatile programming language for web, data science, and AI."
237
+
238
+ BE BRIEF OR USER WILL LEAVE!"""
221
239
 
222
240
  def check_setup(self) -> Dict[str, Any]:
223
241
  """Check system setup"""
@@ -330,7 +348,8 @@ class MemAgent:
330
348
  recent_limit = self.config.get("response.recent_conversations_limit", 5) if hasattr(self, 'config') and self.config else 5
331
349
  recent_convs = self.memory.get_recent_conversations(user_id, recent_limit)
332
350
 
333
- for conv in reversed(recent_convs):
351
+ # Add conversations in chronological order (oldest first)
352
+ for conv in recent_convs:
334
353
  messages.append({"role": "user", "content": conv.get('user_message', '')})
335
354
  messages.append({"role": "assistant", "content": conv.get('bot_response', '')})
336
355
  except Exception as e:
@@ -350,8 +369,8 @@ class MemAgent:
350
369
  try:
351
370
  response = self.llm.chat(
352
371
  messages=messages,
353
- temperature=self.config.get("llm.temperature", 0.7) if hasattr(self, 'config') and self.config else 0.7,
354
- max_tokens=self.config.get("llm.max_tokens", 500) if hasattr(self, 'config') and self.config else 500
372
+ temperature=self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2, # Very focused
373
+ max_tokens=self.config.get("llm.max_tokens", 150) if hasattr(self, 'config') and self.config else 150 # Max 2-3 sentences
355
374
  )
356
375
  except Exception as e:
357
376
  self.logger.error(f"LLM response error: {e}")
@@ -366,11 +385,78 @@ class MemAgent:
366
385
  bot_response=response,
367
386
  metadata=metadata
368
387
  )
388
+
389
+ # Extract and save user info to profile
390
+ self._update_user_profile(user_id, message, response)
369
391
  except Exception as e:
370
392
  self.logger.error(f"Interaction saving error: {e}")
371
393
 
372
394
  return response
395
+
396
+ def _update_user_profile(self, user_id: str, message: str, response: str):
397
+ """Extract user info from conversation and update profile"""
398
+ if not hasattr(self.memory, 'update_profile'):
399
+ return
400
+
401
+ msg_lower = message.lower()
402
+ updates = {}
403
+
404
+ # Extract name
405
+ if "my name is" in msg_lower or "i am" in msg_lower or "i'm" in msg_lower:
406
+ # Simple name extraction
407
+ for phrase in ["my name is ", "i am ", "i'm "]:
408
+ if phrase in msg_lower:
409
+ name_part = message[msg_lower.index(phrase) + len(phrase):].strip()
410
+ name = name_part.split()[0] if name_part else None
411
+ if name and len(name) > 1:
412
+ updates['name'] = name.strip('.,!?')
413
+ break
414
+
415
+ # Extract favorite food
416
+ if "favorite food" in msg_lower or "favourite food" in msg_lower:
417
+ if "is" in msg_lower:
418
+ food = msg_lower.split("is")[-1].strip().strip('.,!?')
419
+ if food and len(food) < 50:
420
+ updates['favorite_food'] = food
421
+
422
+ # Extract location
423
+ if "i live in" in msg_lower or "i'm from" in msg_lower or "from" in msg_lower:
424
+ for phrase in ["i live in ", "i'm from ", "from "]:
425
+ if phrase in msg_lower:
426
+ loc = message[msg_lower.index(phrase) + len(phrase):].strip()
427
+ location = loc.split()[0] if loc else None
428
+ if location and len(location) > 2:
429
+ updates['location'] = location.strip('.,!?')
430
+ break
431
+
432
+ # Save updates
433
+ if updates:
434
+ try:
435
+ self.memory.update_profile(user_id, updates)
436
+ self.logger.debug(f"Profile updated for {user_id}: {updates}")
437
+ except:
438
+ pass
373
439
 
440
+ def get_user_profile(self, user_id: Optional[str] = None) -> Dict:
441
+ """
442
+ Get user's profile info
443
+
444
+ Args:
445
+ user_id: User ID (uses current_user if not specified)
446
+
447
+ Returns:
448
+ User profile dictionary
449
+ """
450
+ uid = user_id or self.current_user
451
+ if not uid:
452
+ return {}
453
+
454
+ try:
455
+ memory_data = self.memory.load_memory(uid)
456
+ return memory_data.get('profile', {})
457
+ except:
458
+ return {}
459
+
374
460
  def add_knowledge(self, category: str, question: str, answer: str,
375
461
  keywords: Optional[List[str]] = None, priority: int = 0) -> int:
376
462
  """Add new record to knowledge base"""
mem_llm/memory_db.py CHANGED
@@ -164,6 +164,11 @@ class SQLMemoryManager:
164
164
  self.conn.commit()
165
165
  return interaction_id
166
166
 
167
+ # Alias for compatibility
168
+ def add_conversation(self, user_id: str, user_message: str, bot_response: str, metadata: Optional[Dict] = None) -> int:
169
+ """Alias for add_interaction"""
170
+ return self.add_interaction(user_id, user_message, bot_response, metadata)
171
+
167
172
  def get_recent_conversations(self, user_id: str, limit: int = 10) -> List[Dict]:
168
173
  """
169
174
  Kullanıcının son konuşmalarını getirir
mem_llm/memory_manager.py CHANGED
@@ -101,6 +101,11 @@ class MemoryManager:
101
101
  self.conversations[user_id].append(interaction)
102
102
  self.save_memory(user_id)
103
103
 
104
+ # Alias for compatibility
105
+ def add_conversation(self, user_id: str, user_message: str, bot_response: str, metadata: Optional[Dict] = None) -> None:
106
+ """Alias for add_interaction"""
107
+ return self.add_interaction(user_id, user_message, bot_response, metadata)
108
+
104
109
  def update_profile(self, user_id: str, updates: Dict) -> None:
105
110
  """
106
111
  Update user profile
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mem-llm
3
- Version: 1.0.1
3
+ Version: 1.0.3
4
4
  Summary: Memory-enabled AI assistant with local LLM support
5
5
  Home-page: https://github.com/emredeveloper/Mem-LLM
6
6
  Author: C. Emre Karataş
@@ -0,0 +1,15 @@
1
+ mem_llm/__init__.py,sha256=C_7xanudK0nG04K2RkCf-6M5AwphjnkIqVfTmoCKkfc,920
2
+ mem_llm/config.yaml.example,sha256=lgmfaU5pxnIm4zYxwgCcgLSohNx1Jw6oh3Qk0Xoe2DE,917
3
+ mem_llm/config_from_docs.py,sha256=YFhq1SWyK63C-TNMS73ncNHg8sJ-XGOf2idWVCjxFco,4974
4
+ mem_llm/config_manager.py,sha256=8PIHs21jZWlI-eG9DgekjOvNxU3-U4xH7SbT8Gr-Z6M,7075
5
+ mem_llm/knowledge_loader.py,sha256=oSNhfYYcx7DlZLVogxnbSwaIydq_Q3__RDJFeZR2XVw,2699
6
+ mem_llm/llm_client.py,sha256=XW-LALYV4C-Nj2R8XYT2iT2YnTeH6_tUIldMKooY2uY,5461
7
+ mem_llm/mem_agent.py,sha256=R37BDniAdoCf7kc9i9zbPdzarSlftFlRoqGuYpyGp9Y,23352
8
+ mem_llm/memory_db.py,sha256=KyNIcChYihSavd2ot5KMBlVB9lq8rexoBQ0lA5bCJNI,12611
9
+ mem_llm/memory_manager.py,sha256=iXnf5YEJXmQ75jgJ2LEx9zCHxIpZTcLtHlp2eWgFjRg,8335
10
+ mem_llm/memory_tools.py,sha256=ARANFqu_bmL56SlV1RzTjfQsJj-Qe2QvqY0pF92hDxU,8678
11
+ mem_llm/prompt_templates.py,sha256=tCiQJw3QQKIaH8NsxEKOIaIVxw4XT43PwdmyfCINzzM,6536
12
+ mem_llm-1.0.3.dist-info/METADATA,sha256=NPZlthtPNoMaD4hi1uTCMlSEJ_cSKlbCRW5D3b_BHn4,9347
13
+ mem_llm-1.0.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
14
+ mem_llm-1.0.3.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
15
+ mem_llm-1.0.3.dist-info/RECORD,,
@@ -1,14 +0,0 @@
1
- mem_llm/__init__.py,sha256=Q_Ryd4aayMcfh9dNelr7TNupMc_AWWIuFmZUwO_Rn4I,827
2
- mem_llm/config.yaml.example,sha256=lgmfaU5pxnIm4zYxwgCcgLSohNx1Jw6oh3Qk0Xoe2DE,917
3
- mem_llm/config_manager.py,sha256=8PIHs21jZWlI-eG9DgekjOvNxU3-U4xH7SbT8Gr-Z6M,7075
4
- mem_llm/knowledge_loader.py,sha256=oSNhfYYcx7DlZLVogxnbSwaIydq_Q3__RDJFeZR2XVw,2699
5
- mem_llm/llm_client.py,sha256=tLNulVEV_tWdktvcQUokdhd0gTkIISUHipglRt17IWk,5255
6
- mem_llm/mem_agent.py,sha256=AMw8X5cFdHoyphyHf9B4eBXDFGTLEv9nkDBXnO_fGL4,19907
7
- mem_llm/memory_db.py,sha256=OGWTIHBHh1qETGvmrlZWfmv9szSaFuSCzJGMZg6HBww,12329
8
- mem_llm/memory_manager.py,sha256=-JM0Qb5dYm1Rj4jd3FQfDpZSaya-ly9rcgEjyvnyDzk,8052
9
- mem_llm/memory_tools.py,sha256=ARANFqu_bmL56SlV1RzTjfQsJj-Qe2QvqY0pF92hDxU,8678
10
- mem_llm/prompt_templates.py,sha256=tCiQJw3QQKIaH8NsxEKOIaIVxw4XT43PwdmyfCINzzM,6536
11
- mem_llm-1.0.1.dist-info/METADATA,sha256=QbeIJOQT5bhn7suA7Eo8AUe7RfDhK0jV8UZqxeF6o6g,9347
12
- mem_llm-1.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
- mem_llm-1.0.1.dist-info/top_level.txt,sha256=_fU1ML-0JwkaxWdhqpwtmTNaJEOvDMQeJdA8d5WqDn8,8
14
- mem_llm-1.0.1.dist-info/RECORD,,