mem-llm 1.0.1__tar.gz → 1.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

Files changed (35) hide show
  1. {mem_llm-1.0.1/mem_llm.egg-info → mem_llm-1.0.2}/PKG-INFO +1 -1
  2. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/__init__.py +3 -2
  3. mem_llm-1.0.2/mem_llm/config_from_docs.py +180 -0
  4. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/mem_agent.py +12 -4
  5. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/memory_db.py +5 -0
  6. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/memory_manager.py +5 -0
  7. {mem_llm-1.0.1 → mem_llm-1.0.2/mem_llm.egg-info}/PKG-INFO +1 -1
  8. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm.egg-info/SOURCES.txt +1 -0
  9. {mem_llm-1.0.1 → mem_llm-1.0.2}/setup.py +1 -1
  10. {mem_llm-1.0.1 → mem_llm-1.0.2}/tests/test_integration.py +6 -4
  11. {mem_llm-1.0.1 → mem_llm-1.0.2}/tests/test_mem_agent.py +1 -0
  12. {mem_llm-1.0.1 → mem_llm-1.0.2}/tests/test_memory_manager.py +1 -0
  13. {mem_llm-1.0.1 → mem_llm-1.0.2}/tests/test_memory_tools.py +1 -0
  14. {mem_llm-1.0.1 → mem_llm-1.0.2}/CHANGELOG.md +0 -0
  15. {mem_llm-1.0.1 → mem_llm-1.0.2}/INTEGRATION_GUIDE.md +0 -0
  16. {mem_llm-1.0.1 → mem_llm-1.0.2}/MANIFEST.in +0 -0
  17. {mem_llm-1.0.1 → mem_llm-1.0.2}/QUICKSTART.md +0 -0
  18. {mem_llm-1.0.1 → mem_llm-1.0.2}/QUICKSTART_TR.md +0 -0
  19. {mem_llm-1.0.1 → mem_llm-1.0.2}/README.md +0 -0
  20. {mem_llm-1.0.1 → mem_llm-1.0.2}/STRUCTURE.md +0 -0
  21. {mem_llm-1.0.1 → mem_llm-1.0.2}/docs/CONFIG_GUIDE.md +0 -0
  22. {mem_llm-1.0.1 → mem_llm-1.0.2}/docs/INDEX.md +0 -0
  23. {mem_llm-1.0.1 → mem_llm-1.0.2}/docs/README.md +0 -0
  24. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/config.yaml.example +0 -0
  25. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/config_manager.py +0 -0
  26. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/knowledge_loader.py +0 -0
  27. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/llm_client.py +0 -0
  28. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/memory_tools.py +0 -0
  29. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm/prompt_templates.py +0 -0
  30. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm.egg-info/dependency_links.txt +0 -0
  31. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm.egg-info/requires.txt +0 -0
  32. {mem_llm-1.0.1 → mem_llm-1.0.2}/mem_llm.egg-info/top_level.txt +0 -0
  33. {mem_llm-1.0.1 → mem_llm-1.0.2}/requirements.txt +0 -0
  34. {mem_llm-1.0.1 → mem_llm-1.0.2}/setup.cfg +0 -0
  35. {mem_llm-1.0.1 → mem_llm-1.0.2}/tests/test_llm_client.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mem-llm
3
- Version: 1.0.1
3
+ Version: 1.0.2
4
4
  Summary: Memory-enabled AI assistant with local LLM support
5
5
  Home-page: https://github.com/emredeveloper/Mem-LLM
6
6
  Author: C. Emre Karataş
@@ -19,11 +19,12 @@ try:
19
19
  from .memory_db import SQLMemoryManager
20
20
  from .prompt_templates import prompt_manager
21
21
  from .config_manager import get_config
22
- __all_pro__ = ["SQLMemoryManager", "prompt_manager", "get_config"]
22
+ from .config_from_docs import create_config_from_document
23
+ __all_pro__ = ["SQLMemoryManager", "prompt_manager", "get_config", "create_config_from_document"]
23
24
  except ImportError:
24
25
  __all_pro__ = []
25
26
 
26
- __version__ = "1.0.1"
27
+ __version__ = "1.0.2"
27
28
  __author__ = "C. Emre Karataş"
28
29
 
29
30
  __all__ = [
@@ -0,0 +1,180 @@
1
+ """
2
+ Config Generator from Documents (PDF, DOCX, TXT)
3
+ Automatically creates config.yaml from business documents
4
+ """
5
+
6
+ import os
7
+ from typing import Optional, Dict, Any
8
+ import yaml
9
+
10
+
11
+ def extract_text_from_file(file_path: str) -> str:
12
+ """
13
+ Extract text from PDF, DOCX, or TXT files
14
+
15
+ Args:
16
+ file_path: Path to document
17
+
18
+ Returns:
19
+ Extracted text
20
+ """
21
+ file_ext = os.path.splitext(file_path)[1].lower()
22
+
23
+ if file_ext == '.txt':
24
+ with open(file_path, 'r', encoding='utf-8') as f:
25
+ return f.read()
26
+
27
+ elif file_ext == '.pdf':
28
+ try:
29
+ import PyPDF2
30
+ text = []
31
+ with open(file_path, 'rb') as f:
32
+ reader = PyPDF2.PdfReader(f)
33
+ for page in reader.pages:
34
+ text.append(page.extract_text())
35
+ return '\n'.join(text)
36
+ except ImportError:
37
+ return "⚠️ PyPDF2 not installed. Run: pip install PyPDF2"
38
+
39
+ elif file_ext in ['.docx', '.doc']:
40
+ try:
41
+ import docx
42
+ doc = docx.Document(file_path)
43
+ text = []
44
+ for paragraph in doc.paragraphs:
45
+ text.append(paragraph.text)
46
+ return '\n'.join(text)
47
+ except ImportError:
48
+ return "⚠️ python-docx not installed. Run: pip install python-docx"
49
+
50
+ else:
51
+ return f"⚠️ Unsupported file format: {file_ext}"
52
+
53
+
54
+ def generate_config_from_text(text: str, company_name: Optional[str] = None) -> Dict[str, Any]:
55
+ """
56
+ Generate config.yaml structure from text
57
+
58
+ Args:
59
+ text: Extracted text from document
60
+ company_name: Company name (optional)
61
+
62
+ Returns:
63
+ Config dictionary
64
+ """
65
+ # Simple config template
66
+ config = {
67
+ "usage_mode": "business", # or "personal"
68
+
69
+ "llm": {
70
+ "model": "granite4:tiny-h",
71
+ "temperature": 0.3,
72
+ "max_tokens": 300,
73
+ "ollama_url": "http://localhost:11434"
74
+ },
75
+
76
+ "memory": {
77
+ "use_sql": True,
78
+ "db_path": "memories.db",
79
+ "json_dir": "memories"
80
+ },
81
+
82
+ "response": {
83
+ "use_knowledge_base": True,
84
+ "recent_conversations_limit": 5
85
+ },
86
+
87
+ "business": {
88
+ "company_name": company_name or "Your Company",
89
+ "industry": "Technology",
90
+ "founded_year": "2024"
91
+ },
92
+
93
+ "knowledge_base": {
94
+ "auto_load": True,
95
+ "search_limit": 5
96
+ },
97
+
98
+ "logging": {
99
+ "level": "INFO",
100
+ "file": "mem_agent.log"
101
+ }
102
+ }
103
+
104
+ # Try to extract company name from text if not provided
105
+ if not company_name:
106
+ lines = text.split('\n')[:10] # First 10 lines
107
+ for line in lines:
108
+ if any(keyword in line.lower() for keyword in ['company', 'corp', 'inc', 'ltd']):
109
+ config["business"]["company_name"] = line.strip()[:50]
110
+ break
111
+
112
+ return config
113
+
114
+
115
+ def create_config_from_document(
116
+ doc_path: str,
117
+ output_path: str = "config.yaml",
118
+ company_name: Optional[str] = None
119
+ ) -> str:
120
+ """
121
+ Create config.yaml from a business document
122
+
123
+ Args:
124
+ doc_path: Path to PDF/DOCX/TXT document
125
+ output_path: Output config.yaml path
126
+ company_name: Company name (optional)
127
+
128
+ Returns:
129
+ Success message
130
+ """
131
+ if not os.path.exists(doc_path):
132
+ return f"❌ File not found: {doc_path}"
133
+
134
+ # Extract text
135
+ print(f"📄 Reading document: {doc_path}")
136
+ text = extract_text_from_file(doc_path)
137
+
138
+ if text.startswith("⚠️"):
139
+ return text # Error message
140
+
141
+ print(f"✅ Extracted {len(text)} characters")
142
+
143
+ # Generate config
144
+ config = generate_config_from_text(text, company_name)
145
+
146
+ # Save to YAML
147
+ with open(output_path, 'w', encoding='utf-8') as f:
148
+ yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
149
+
150
+ print(f"✅ Config created: {output_path}")
151
+ print(f"📌 Company: {config['business']['company_name']}")
152
+
153
+ return f"✅ Config successfully created at {output_path}"
154
+
155
+
156
+ # Simple CLI
157
+ if __name__ == "__main__":
158
+ import sys
159
+
160
+ if len(sys.argv) < 2:
161
+ print("""
162
+ 🔧 Config Generator from Documents
163
+
164
+ Usage:
165
+ python -m mem_llm.config_from_docs <document_path> [output_path] [company_name]
166
+
167
+ Examples:
168
+ python -m mem_llm.config_from_docs company_info.pdf
169
+ python -m mem_llm.config_from_docs business.docx my_config.yaml "Acme Corp"
170
+ python -m mem_llm.config_from_docs info.txt
171
+ """)
172
+ sys.exit(1)
173
+
174
+ doc_path = sys.argv[1]
175
+ output_path = sys.argv[2] if len(sys.argv) > 2 else "config.yaml"
176
+ company_name = sys.argv[3] if len(sys.argv) > 3 else None
177
+
178
+ result = create_config_from_document(doc_path, output_path, company_name)
179
+ print(result)
180
+
@@ -111,6 +111,8 @@ class MemAgent:
111
111
  self.logger.info(f"JSON memory system active: {json_dir}")
112
112
 
113
113
  # LLM client
114
+ self.model = model # Store model name
115
+ self.use_sql = use_sql # Store SQL usage flag
114
116
  self.llm = OllamaClient(model, ollama_url)
115
117
  self.logger.info(f"LLM client ready: {model}")
116
118
 
@@ -217,7 +219,12 @@ class MemAgent:
217
219
  self.logger.info(f"Prompt template loaded: {template_name} (Mode: {self.usage_mode})")
218
220
  except Exception as e:
219
221
  self.logger.error(f"Prompt template loading error: {e}")
220
- self.current_system_prompt = f"You are a helpful assistant in {self.usage_mode} mode."
222
+ # Simple, short and effective default prompt
223
+ self.current_system_prompt = """You are a helpful AI assistant. Be concise and direct.
224
+ - Give short, clear answers (2-3 sentences max)
225
+ - Only use information from conversation history
226
+ - If you don't know something, say so
227
+ - Don't make assumptions or hallucinate"""
221
228
 
222
229
  def check_setup(self) -> Dict[str, Any]:
223
230
  """Check system setup"""
@@ -330,7 +337,8 @@ class MemAgent:
330
337
  recent_limit = self.config.get("response.recent_conversations_limit", 5) if hasattr(self, 'config') and self.config else 5
331
338
  recent_convs = self.memory.get_recent_conversations(user_id, recent_limit)
332
339
 
333
- for conv in reversed(recent_convs):
340
+ # Add conversations in chronological order (oldest first)
341
+ for conv in recent_convs:
334
342
  messages.append({"role": "user", "content": conv.get('user_message', '')})
335
343
  messages.append({"role": "assistant", "content": conv.get('bot_response', '')})
336
344
  except Exception as e:
@@ -350,8 +358,8 @@ class MemAgent:
350
358
  try:
351
359
  response = self.llm.chat(
352
360
  messages=messages,
353
- temperature=self.config.get("llm.temperature", 0.7) if hasattr(self, 'config') and self.config else 0.7,
354
- max_tokens=self.config.get("llm.max_tokens", 500) if hasattr(self, 'config') and self.config else 500
361
+ temperature=self.config.get("llm.temperature", 0.3) if hasattr(self, 'config') and self.config else 0.3, # Lower = more focused
362
+ max_tokens=self.config.get("llm.max_tokens", 300) if hasattr(self, 'config') and self.config else 300 # Shorter responses
355
363
  )
356
364
  except Exception as e:
357
365
  self.logger.error(f"LLM response error: {e}")
@@ -164,6 +164,11 @@ class SQLMemoryManager:
164
164
  self.conn.commit()
165
165
  return interaction_id
166
166
 
167
+ # Alias for compatibility
168
+ def add_conversation(self, user_id: str, user_message: str, bot_response: str, metadata: Optional[Dict] = None) -> int:
169
+ """Alias for add_interaction"""
170
+ return self.add_interaction(user_id, user_message, bot_response, metadata)
171
+
167
172
  def get_recent_conversations(self, user_id: str, limit: int = 10) -> List[Dict]:
168
173
  """
169
174
  Kullanıcının son konuşmalarını getirir
@@ -101,6 +101,11 @@ class MemoryManager:
101
101
  self.conversations[user_id].append(interaction)
102
102
  self.save_memory(user_id)
103
103
 
104
+ # Alias for compatibility
105
+ def add_conversation(self, user_id: str, user_message: str, bot_response: str, metadata: Optional[Dict] = None) -> None:
106
+ """Alias for add_interaction"""
107
+ return self.add_interaction(user_id, user_message, bot_response, metadata)
108
+
104
109
  def update_profile(self, user_id: str, updates: Dict) -> None:
105
110
  """
106
111
  Update user profile
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mem-llm
3
- Version: 1.0.1
3
+ Version: 1.0.2
4
4
  Summary: Memory-enabled AI assistant with local LLM support
5
5
  Home-page: https://github.com/emredeveloper/Mem-LLM
6
6
  Author: C. Emre Karataş
@@ -12,6 +12,7 @@ docs/INDEX.md
12
12
  docs/README.md
13
13
  mem_llm/__init__.py
14
14
  mem_llm/config.yaml.example
15
+ mem_llm/config_from_docs.py
15
16
  mem_llm/config_manager.py
16
17
  mem_llm/knowledge_loader.py
17
18
  mem_llm/llm_client.py
@@ -11,7 +11,7 @@ long_description = (this_directory / "README.md").read_text(encoding='utf-8')
11
11
 
12
12
  setup(
13
13
  name="mem-llm",
14
- version="1.0.1",
14
+ version="1.0.2",
15
15
  author="C. Emre Karataş",
16
16
  author_email="karatasqemre@gmail.com", # PyPI için gerekli - kendi emailinizi yazın
17
17
  description="Memory-enabled AI assistant with local LLM support",
@@ -6,6 +6,7 @@ Tests all system components working together
6
6
  import unittest
7
7
  import tempfile
8
8
  import shutil
9
+ import os
9
10
 
10
11
  # Import all modules
11
12
  from mem_llm import (
@@ -162,12 +163,12 @@ logging:
162
163
  """Hata yönetimi testi"""
163
164
  # Kullanıcı olmadan chat deneme
164
165
  response = self.simple_agent.chat("Test")
165
- self.assertIn("Hata", response)
166
+ self.assertIn("Error", response) # Error mesajı İngilizce
166
167
 
167
168
  # Geçersiz araç komutu
168
169
  tool_executor = ToolExecutor(self.simple_agent.memory)
169
170
  result = tool_executor.memory_tools.execute_tool("nonexistent_tool", {})
170
- self.assertIn("bulunamadı", result)
171
+ self.assertIn("not found", result) # İngilizce mesaj
171
172
 
172
173
  def test_performance_basic(self):
173
174
  """Temel performans testi"""
@@ -191,7 +192,8 @@ logging:
191
192
 
192
193
  def test_memory_consistency(self):
193
194
  """Bellek tutarlılık testi"""
194
- user_id = "consistency_test"
195
+ import uuid
196
+ user_id = f"consistency_test_{uuid.uuid4().hex[:8]}" # Benzersiz user_id
195
197
 
196
198
  # Basit agent ile konuşmalar
197
199
  self.simple_agent.set_user(user_id)
@@ -204,7 +206,7 @@ logging:
204
206
  if hasattr(self.simple_agent.memory, 'get_recent_conversations'):
205
207
  simple_conversations = self.simple_agent.memory.get_recent_conversations(user_id)
206
208
  self.assertIsInstance(simple_conversations, list)
207
- self.assertEqual(len(simple_conversations), 3)
209
+ self.assertGreaterEqual(len(simple_conversations), 3) # En az 3 olmalı
208
210
 
209
211
 
210
212
  def run_integration_tests():
@@ -8,6 +8,7 @@ import tempfile
8
8
  import json
9
9
  import time
10
10
  import shutil
11
+ import os
11
12
 
12
13
  # Test edilecek modüller
13
14
  from mem_llm import MemAgent, MemoryManager, OllamaClient
@@ -5,6 +5,7 @@ MemoryManager Specific Tests
5
5
  import unittest
6
6
  import tempfile
7
7
  import shutil
8
+ import os
8
9
 
9
10
  from mem_llm import MemoryManager
10
11
 
@@ -5,6 +5,7 @@ Memory Tools Specific Tests
5
5
  import unittest
6
6
  import tempfile
7
7
  import shutil
8
+ import os
8
9
 
9
10
  from mem_llm import MemoryManager, MemoryTools
10
11
 
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes