mem-llm 1.0.0__tar.gz → 1.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

Files changed (36) hide show
  1. {mem_llm-1.0.0 → mem_llm-1.0.2}/MANIFEST.in +2 -2
  2. {mem_llm-1.0.0/mem_llm.egg-info → mem_llm-1.0.2}/PKG-INFO +5 -5
  3. {mem_llm-1.0.0 → mem_llm-1.0.2}/README.md +4 -4
  4. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/__init__.py +3 -2
  5. mem_llm-1.0.2/mem_llm/config_from_docs.py +180 -0
  6. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/mem_agent.py +12 -4
  7. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/memory_db.py +5 -0
  8. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/memory_manager.py +5 -0
  9. {mem_llm-1.0.0 → mem_llm-1.0.2/mem_llm.egg-info}/PKG-INFO +5 -5
  10. {mem_llm-1.0.0 → mem_llm-1.0.2}/mem_llm.egg-info/SOURCES.txt +11 -10
  11. mem_llm-1.0.2/mem_llm.egg-info/top_level.txt +1 -0
  12. {mem_llm-1.0.0 → mem_llm-1.0.2}/setup.py +2 -2
  13. {mem_llm-1.0.0 → mem_llm-1.0.2}/tests/test_integration.py +7 -5
  14. {mem_llm-1.0.0 → mem_llm-1.0.2}/tests/test_llm_client.py +1 -1
  15. {mem_llm-1.0.0 → mem_llm-1.0.2}/tests/test_mem_agent.py +2 -1
  16. {mem_llm-1.0.0 → mem_llm-1.0.2}/tests/test_memory_manager.py +2 -1
  17. {mem_llm-1.0.0 → mem_llm-1.0.2}/tests/test_memory_tools.py +2 -1
  18. mem_llm-1.0.0/mem_llm.egg-info/top_level.txt +0 -1
  19. {mem_llm-1.0.0 → mem_llm-1.0.2}/CHANGELOG.md +0 -0
  20. {mem_llm-1.0.0 → mem_llm-1.0.2}/INTEGRATION_GUIDE.md +0 -0
  21. {mem_llm-1.0.0 → mem_llm-1.0.2}/QUICKSTART.md +0 -0
  22. {mem_llm-1.0.0 → mem_llm-1.0.2}/QUICKSTART_TR.md +0 -0
  23. {mem_llm-1.0.0 → mem_llm-1.0.2}/STRUCTURE.md +0 -0
  24. {mem_llm-1.0.0 → mem_llm-1.0.2}/docs/CONFIG_GUIDE.md +0 -0
  25. {mem_llm-1.0.0 → mem_llm-1.0.2}/docs/INDEX.md +0 -0
  26. {mem_llm-1.0.0 → mem_llm-1.0.2}/docs/README.md +0 -0
  27. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/config.yaml.example +0 -0
  28. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/config_manager.py +0 -0
  29. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/knowledge_loader.py +0 -0
  30. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/llm_client.py +0 -0
  31. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/memory_tools.py +0 -0
  32. {mem_llm-1.0.0/memory_llm → mem_llm-1.0.2/mem_llm}/prompt_templates.py +0 -0
  33. {mem_llm-1.0.0 → mem_llm-1.0.2}/mem_llm.egg-info/dependency_links.txt +0 -0
  34. {mem_llm-1.0.0 → mem_llm-1.0.2}/mem_llm.egg-info/requires.txt +0 -0
  35. {mem_llm-1.0.0 → mem_llm-1.0.2}/requirements.txt +0 -0
  36. {mem_llm-1.0.0 → mem_llm-1.0.2}/setup.cfg +0 -0
@@ -5,7 +5,7 @@ include QUICKSTART.md
5
5
  include QUICKSTART_TR.md
6
6
  include INTEGRATION_GUIDE.md
7
7
  include STRUCTURE.md
8
- recursive-include memory_llm *.yaml
9
- recursive-include memory_llm *.example
8
+ recursive-include mem_llm *.yaml
9
+ recursive-include mem_llm *.example
10
10
  recursive-include docs *.md
11
11
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mem-llm
3
- Version: 1.0.0
3
+ Version: 1.0.2
4
4
  Summary: Memory-enabled AI assistant with local LLM support
5
5
  Home-page: https://github.com/emredeveloper/Mem-LLM
6
6
  Author: C. Emre Karataş
@@ -86,7 +86,7 @@ ollama pull granite4:tiny-h
86
86
  ### 3. Use Mem-Agent
87
87
 
88
88
  ```python
89
- from memory_llm import MemAgent
89
+ from mem_llm import MemAgent
90
90
 
91
91
  # Create agent
92
92
  agent = MemAgent(model="granite4:tiny-h")
@@ -150,7 +150,7 @@ Memory LLM/
150
150
  ### MemAgent Class
151
151
 
152
152
  ```python
153
- from memory_llm import MemAgent
153
+ from mem_llm import MemAgent
154
154
 
155
155
  agent = MemAgent(
156
156
  model="granite4:tiny-h", # Ollama model name
@@ -197,7 +197,7 @@ agent.clear_user_memory("user_id", confirm=True)
197
197
  ### MemoryManager Class
198
198
 
199
199
  ```python
200
- from memory_llm import MemoryManager
200
+ from mem_llm import MemoryManager
201
201
 
202
202
  memory = MemoryManager(memory_dir="memories")
203
203
 
@@ -222,7 +222,7 @@ results = memory.search_memory("user_id", "order")
222
222
  ### OllamaClient Class
223
223
 
224
224
  ```python
225
- from memory_llm import OllamaClient
225
+ from mem_llm import OllamaClient
226
226
 
227
227
  client = OllamaClient(model="granite4:tiny-h")
228
228
 
@@ -45,7 +45,7 @@ ollama pull granite4:tiny-h
45
45
  ### 3. Use Mem-Agent
46
46
 
47
47
  ```python
48
- from memory_llm import MemAgent
48
+ from mem_llm import MemAgent
49
49
 
50
50
  # Create agent
51
51
  agent = MemAgent(model="granite4:tiny-h")
@@ -109,7 +109,7 @@ Memory LLM/
109
109
  ### MemAgent Class
110
110
 
111
111
  ```python
112
- from memory_llm import MemAgent
112
+ from mem_llm import MemAgent
113
113
 
114
114
  agent = MemAgent(
115
115
  model="granite4:tiny-h", # Ollama model name
@@ -156,7 +156,7 @@ agent.clear_user_memory("user_id", confirm=True)
156
156
  ### MemoryManager Class
157
157
 
158
158
  ```python
159
- from memory_llm import MemoryManager
159
+ from mem_llm import MemoryManager
160
160
 
161
161
  memory = MemoryManager(memory_dir="memories")
162
162
 
@@ -181,7 +181,7 @@ results = memory.search_memory("user_id", "order")
181
181
  ### OllamaClient Class
182
182
 
183
183
  ```python
184
- from memory_llm import OllamaClient
184
+ from mem_llm import OllamaClient
185
185
 
186
186
  client = OllamaClient(model="granite4:tiny-h")
187
187
 
@@ -19,11 +19,12 @@ try:
19
19
  from .memory_db import SQLMemoryManager
20
20
  from .prompt_templates import prompt_manager
21
21
  from .config_manager import get_config
22
- __all_pro__ = ["SQLMemoryManager", "prompt_manager", "get_config"]
22
+ from .config_from_docs import create_config_from_document
23
+ __all_pro__ = ["SQLMemoryManager", "prompt_manager", "get_config", "create_config_from_document"]
23
24
  except ImportError:
24
25
  __all_pro__ = []
25
26
 
26
- __version__ = "1.0.0"
27
+ __version__ = "1.0.2"
27
28
  __author__ = "C. Emre Karataş"
28
29
 
29
30
  __all__ = [
@@ -0,0 +1,180 @@
1
+ """
2
+ Config Generator from Documents (PDF, DOCX, TXT)
3
+ Automatically creates config.yaml from business documents
4
+ """
5
+
6
+ import os
7
+ from typing import Optional, Dict, Any
8
+ import yaml
9
+
10
+
11
+ def extract_text_from_file(file_path: str) -> str:
12
+ """
13
+ Extract text from PDF, DOCX, or TXT files
14
+
15
+ Args:
16
+ file_path: Path to document
17
+
18
+ Returns:
19
+ Extracted text
20
+ """
21
+ file_ext = os.path.splitext(file_path)[1].lower()
22
+
23
+ if file_ext == '.txt':
24
+ with open(file_path, 'r', encoding='utf-8') as f:
25
+ return f.read()
26
+
27
+ elif file_ext == '.pdf':
28
+ try:
29
+ import PyPDF2
30
+ text = []
31
+ with open(file_path, 'rb') as f:
32
+ reader = PyPDF2.PdfReader(f)
33
+ for page in reader.pages:
34
+ text.append(page.extract_text())
35
+ return '\n'.join(text)
36
+ except ImportError:
37
+ return "⚠️ PyPDF2 not installed. Run: pip install PyPDF2"
38
+
39
+ elif file_ext in ['.docx', '.doc']:
40
+ try:
41
+ import docx
42
+ doc = docx.Document(file_path)
43
+ text = []
44
+ for paragraph in doc.paragraphs:
45
+ text.append(paragraph.text)
46
+ return '\n'.join(text)
47
+ except ImportError:
48
+ return "⚠️ python-docx not installed. Run: pip install python-docx"
49
+
50
+ else:
51
+ return f"⚠️ Unsupported file format: {file_ext}"
52
+
53
+
54
+ def generate_config_from_text(text: str, company_name: Optional[str] = None) -> Dict[str, Any]:
55
+ """
56
+ Generate config.yaml structure from text
57
+
58
+ Args:
59
+ text: Extracted text from document
60
+ company_name: Company name (optional)
61
+
62
+ Returns:
63
+ Config dictionary
64
+ """
65
+ # Simple config template
66
+ config = {
67
+ "usage_mode": "business", # or "personal"
68
+
69
+ "llm": {
70
+ "model": "granite4:tiny-h",
71
+ "temperature": 0.3,
72
+ "max_tokens": 300,
73
+ "ollama_url": "http://localhost:11434"
74
+ },
75
+
76
+ "memory": {
77
+ "use_sql": True,
78
+ "db_path": "memories.db",
79
+ "json_dir": "memories"
80
+ },
81
+
82
+ "response": {
83
+ "use_knowledge_base": True,
84
+ "recent_conversations_limit": 5
85
+ },
86
+
87
+ "business": {
88
+ "company_name": company_name or "Your Company",
89
+ "industry": "Technology",
90
+ "founded_year": "2024"
91
+ },
92
+
93
+ "knowledge_base": {
94
+ "auto_load": True,
95
+ "search_limit": 5
96
+ },
97
+
98
+ "logging": {
99
+ "level": "INFO",
100
+ "file": "mem_agent.log"
101
+ }
102
+ }
103
+
104
+ # Try to extract company name from text if not provided
105
+ if not company_name:
106
+ lines = text.split('\n')[:10] # First 10 lines
107
+ for line in lines:
108
+ if any(keyword in line.lower() for keyword in ['company', 'corp', 'inc', 'ltd']):
109
+ config["business"]["company_name"] = line.strip()[:50]
110
+ break
111
+
112
+ return config
113
+
114
+
115
+ def create_config_from_document(
116
+ doc_path: str,
117
+ output_path: str = "config.yaml",
118
+ company_name: Optional[str] = None
119
+ ) -> str:
120
+ """
121
+ Create config.yaml from a business document
122
+
123
+ Args:
124
+ doc_path: Path to PDF/DOCX/TXT document
125
+ output_path: Output config.yaml path
126
+ company_name: Company name (optional)
127
+
128
+ Returns:
129
+ Success message
130
+ """
131
+ if not os.path.exists(doc_path):
132
+ return f"❌ File not found: {doc_path}"
133
+
134
+ # Extract text
135
+ print(f"📄 Reading document: {doc_path}")
136
+ text = extract_text_from_file(doc_path)
137
+
138
+ if text.startswith("⚠️"):
139
+ return text # Error message
140
+
141
+ print(f"✅ Extracted {len(text)} characters")
142
+
143
+ # Generate config
144
+ config = generate_config_from_text(text, company_name)
145
+
146
+ # Save to YAML
147
+ with open(output_path, 'w', encoding='utf-8') as f:
148
+ yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
149
+
150
+ print(f"✅ Config created: {output_path}")
151
+ print(f"📌 Company: {config['business']['company_name']}")
152
+
153
+ return f"✅ Config successfully created at {output_path}"
154
+
155
+
156
+ # Simple CLI
157
+ if __name__ == "__main__":
158
+ import sys
159
+
160
+ if len(sys.argv) < 2:
161
+ print("""
162
+ 🔧 Config Generator from Documents
163
+
164
+ Usage:
165
+ python -m mem_llm.config_from_docs <document_path> [output_path] [company_name]
166
+
167
+ Examples:
168
+ python -m mem_llm.config_from_docs company_info.pdf
169
+ python -m mem_llm.config_from_docs business.docx my_config.yaml "Acme Corp"
170
+ python -m mem_llm.config_from_docs info.txt
171
+ """)
172
+ sys.exit(1)
173
+
174
+ doc_path = sys.argv[1]
175
+ output_path = sys.argv[2] if len(sys.argv) > 2 else "config.yaml"
176
+ company_name = sys.argv[3] if len(sys.argv) > 3 else None
177
+
178
+ result = create_config_from_document(doc_path, output_path, company_name)
179
+ print(result)
180
+
@@ -111,6 +111,8 @@ class MemAgent:
111
111
  self.logger.info(f"JSON memory system active: {json_dir}")
112
112
 
113
113
  # LLM client
114
+ self.model = model # Store model name
115
+ self.use_sql = use_sql # Store SQL usage flag
114
116
  self.llm = OllamaClient(model, ollama_url)
115
117
  self.logger.info(f"LLM client ready: {model}")
116
118
 
@@ -217,7 +219,12 @@ class MemAgent:
217
219
  self.logger.info(f"Prompt template loaded: {template_name} (Mode: {self.usage_mode})")
218
220
  except Exception as e:
219
221
  self.logger.error(f"Prompt template loading error: {e}")
220
- self.current_system_prompt = f"You are a helpful assistant in {self.usage_mode} mode."
222
+ # Simple, short and effective default prompt
223
+ self.current_system_prompt = """You are a helpful AI assistant. Be concise and direct.
224
+ - Give short, clear answers (2-3 sentences max)
225
+ - Only use information from conversation history
226
+ - If you don't know something, say so
227
+ - Don't make assumptions or hallucinate"""
221
228
 
222
229
  def check_setup(self) -> Dict[str, Any]:
223
230
  """Check system setup"""
@@ -330,7 +337,8 @@ class MemAgent:
330
337
  recent_limit = self.config.get("response.recent_conversations_limit", 5) if hasattr(self, 'config') and self.config else 5
331
338
  recent_convs = self.memory.get_recent_conversations(user_id, recent_limit)
332
339
 
333
- for conv in reversed(recent_convs):
340
+ # Add conversations in chronological order (oldest first)
341
+ for conv in recent_convs:
334
342
  messages.append({"role": "user", "content": conv.get('user_message', '')})
335
343
  messages.append({"role": "assistant", "content": conv.get('bot_response', '')})
336
344
  except Exception as e:
@@ -350,8 +358,8 @@ class MemAgent:
350
358
  try:
351
359
  response = self.llm.chat(
352
360
  messages=messages,
353
- temperature=self.config.get("llm.temperature", 0.7) if hasattr(self, 'config') and self.config else 0.7,
354
- max_tokens=self.config.get("llm.max_tokens", 500) if hasattr(self, 'config') and self.config else 500
361
+ temperature=self.config.get("llm.temperature", 0.3) if hasattr(self, 'config') and self.config else 0.3, # Lower = more focused
362
+ max_tokens=self.config.get("llm.max_tokens", 300) if hasattr(self, 'config') and self.config else 300 # Shorter responses
355
363
  )
356
364
  except Exception as e:
357
365
  self.logger.error(f"LLM response error: {e}")
@@ -164,6 +164,11 @@ class SQLMemoryManager:
164
164
  self.conn.commit()
165
165
  return interaction_id
166
166
 
167
+ # Alias for compatibility
168
+ def add_conversation(self, user_id: str, user_message: str, bot_response: str, metadata: Optional[Dict] = None) -> int:
169
+ """Alias for add_interaction"""
170
+ return self.add_interaction(user_id, user_message, bot_response, metadata)
171
+
167
172
  def get_recent_conversations(self, user_id: str, limit: int = 10) -> List[Dict]:
168
173
  """
169
174
  Kullanıcının son konuşmalarını getirir
@@ -101,6 +101,11 @@ class MemoryManager:
101
101
  self.conversations[user_id].append(interaction)
102
102
  self.save_memory(user_id)
103
103
 
104
+ # Alias for compatibility
105
+ def add_conversation(self, user_id: str, user_message: str, bot_response: str, metadata: Optional[Dict] = None) -> None:
106
+ """Alias for add_interaction"""
107
+ return self.add_interaction(user_id, user_message, bot_response, metadata)
108
+
104
109
  def update_profile(self, user_id: str, updates: Dict) -> None:
105
110
  """
106
111
  Update user profile
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mem-llm
3
- Version: 1.0.0
3
+ Version: 1.0.2
4
4
  Summary: Memory-enabled AI assistant with local LLM support
5
5
  Home-page: https://github.com/emredeveloper/Mem-LLM
6
6
  Author: C. Emre Karataş
@@ -86,7 +86,7 @@ ollama pull granite4:tiny-h
86
86
  ### 3. Use Mem-Agent
87
87
 
88
88
  ```python
89
- from memory_llm import MemAgent
89
+ from mem_llm import MemAgent
90
90
 
91
91
  # Create agent
92
92
  agent = MemAgent(model="granite4:tiny-h")
@@ -150,7 +150,7 @@ Memory LLM/
150
150
  ### MemAgent Class
151
151
 
152
152
  ```python
153
- from memory_llm import MemAgent
153
+ from mem_llm import MemAgent
154
154
 
155
155
  agent = MemAgent(
156
156
  model="granite4:tiny-h", # Ollama model name
@@ -197,7 +197,7 @@ agent.clear_user_memory("user_id", confirm=True)
197
197
  ### MemoryManager Class
198
198
 
199
199
  ```python
200
- from memory_llm import MemoryManager
200
+ from mem_llm import MemoryManager
201
201
 
202
202
  memory = MemoryManager(memory_dir="memories")
203
203
 
@@ -222,7 +222,7 @@ results = memory.search_memory("user_id", "order")
222
222
  ### OllamaClient Class
223
223
 
224
224
  ```python
225
- from memory_llm import OllamaClient
225
+ from mem_llm import OllamaClient
226
226
 
227
227
  client = OllamaClient(model="granite4:tiny-h")
228
228
 
@@ -10,21 +10,22 @@ setup.py
10
10
  docs/CONFIG_GUIDE.md
11
11
  docs/INDEX.md
12
12
  docs/README.md
13
+ mem_llm/__init__.py
14
+ mem_llm/config.yaml.example
15
+ mem_llm/config_from_docs.py
16
+ mem_llm/config_manager.py
17
+ mem_llm/knowledge_loader.py
18
+ mem_llm/llm_client.py
19
+ mem_llm/mem_agent.py
20
+ mem_llm/memory_db.py
21
+ mem_llm/memory_manager.py
22
+ mem_llm/memory_tools.py
23
+ mem_llm/prompt_templates.py
13
24
  mem_llm.egg-info/PKG-INFO
14
25
  mem_llm.egg-info/SOURCES.txt
15
26
  mem_llm.egg-info/dependency_links.txt
16
27
  mem_llm.egg-info/requires.txt
17
28
  mem_llm.egg-info/top_level.txt
18
- memory_llm/__init__.py
19
- memory_llm/config.yaml.example
20
- memory_llm/config_manager.py
21
- memory_llm/knowledge_loader.py
22
- memory_llm/llm_client.py
23
- memory_llm/mem_agent.py
24
- memory_llm/memory_db.py
25
- memory_llm/memory_manager.py
26
- memory_llm/memory_tools.py
27
- memory_llm/prompt_templates.py
28
29
  tests/test_integration.py
29
30
  tests/test_llm_client.py
30
31
  tests/test_mem_agent.py
@@ -0,0 +1 @@
1
+ mem_llm
@@ -11,7 +11,7 @@ long_description = (this_directory / "README.md").read_text(encoding='utf-8')
11
11
 
12
12
  setup(
13
13
  name="mem-llm",
14
- version="1.0.0",
14
+ version="1.0.2",
15
15
  author="C. Emre Karataş",
16
16
  author_email="karatasqemre@gmail.com", # PyPI için gerekli - kendi emailinizi yazın
17
17
  description="Memory-enabled AI assistant with local LLM support",
@@ -45,7 +45,7 @@ setup(
45
45
  },
46
46
  include_package_data=True,
47
47
  package_data={
48
- 'memory_llm': ['config.yaml.example'],
48
+ 'mem_llm': ['config.yaml.example'],
49
49
  },
50
50
  keywords="llm ai memory agent chatbot ollama local",
51
51
  project_urls={
@@ -6,9 +6,10 @@ Tests all system components working together
6
6
  import unittest
7
7
  import tempfile
8
8
  import shutil
9
+ import os
9
10
 
10
11
  # Import all modules
11
- from memory_llm import (
12
+ from mem_llm import (
12
13
  MemAgent,
13
14
  MemoryManager,
14
15
  SQLMemoryManager,
@@ -162,12 +163,12 @@ logging:
162
163
  """Hata yönetimi testi"""
163
164
  # Kullanıcı olmadan chat deneme
164
165
  response = self.simple_agent.chat("Test")
165
- self.assertIn("Hata", response)
166
+ self.assertIn("Error", response) # Error mesajı İngilizce
166
167
 
167
168
  # Geçersiz araç komutu
168
169
  tool_executor = ToolExecutor(self.simple_agent.memory)
169
170
  result = tool_executor.memory_tools.execute_tool("nonexistent_tool", {})
170
- self.assertIn("bulunamadı", result)
171
+ self.assertIn("not found", result) # İngilizce mesaj
171
172
 
172
173
  def test_performance_basic(self):
173
174
  """Temel performans testi"""
@@ -191,7 +192,8 @@ logging:
191
192
 
192
193
  def test_memory_consistency(self):
193
194
  """Bellek tutarlılık testi"""
194
- user_id = "consistency_test"
195
+ import uuid
196
+ user_id = f"consistency_test_{uuid.uuid4().hex[:8]}" # Benzersiz user_id
195
197
 
196
198
  # Basit agent ile konuşmalar
197
199
  self.simple_agent.set_user(user_id)
@@ -204,7 +206,7 @@ logging:
204
206
  if hasattr(self.simple_agent.memory, 'get_recent_conversations'):
205
207
  simple_conversations = self.simple_agent.memory.get_recent_conversations(user_id)
206
208
  self.assertIsInstance(simple_conversations, list)
207
- self.assertEqual(len(simple_conversations), 3)
209
+ self.assertGreaterEqual(len(simple_conversations), 3) # En az 3 olmalı
208
210
 
209
211
 
210
212
  def run_integration_tests():
@@ -4,7 +4,7 @@ LLM Client Specific Tests
4
4
 
5
5
  import unittest
6
6
 
7
- from memory_llm import OllamaClient
7
+ from mem_llm import OllamaClient
8
8
 
9
9
 
10
10
  class TestLLMClient(unittest.TestCase):
@@ -8,9 +8,10 @@ import tempfile
8
8
  import json
9
9
  import time
10
10
  import shutil
11
+ import os
11
12
 
12
13
  # Test edilecek modüller
13
- from memory_llm import MemAgent, MemoryManager, OllamaClient
14
+ from mem_llm import MemAgent, MemoryManager, OllamaClient
14
15
 
15
16
 
16
17
  class TestMemAgent(unittest.TestCase):
@@ -5,8 +5,9 @@ MemoryManager Specific Tests
5
5
  import unittest
6
6
  import tempfile
7
7
  import shutil
8
+ import os
8
9
 
9
- from memory_llm import MemoryManager
10
+ from mem_llm import MemoryManager
10
11
 
11
12
 
12
13
  class TestMemoryManager(unittest.TestCase):
@@ -5,8 +5,9 @@ Memory Tools Specific Tests
5
5
  import unittest
6
6
  import tempfile
7
7
  import shutil
8
+ import os
8
9
 
9
- from memory_llm import MemoryManager, MemoryTools
10
+ from mem_llm import MemoryManager, MemoryTools
10
11
 
11
12
 
12
13
  class TestMemoryTools(unittest.TestCase):
@@ -1 +0,0 @@
1
- memory_llm
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes