mem-llm 1.0.1__tar.gz → 1.0.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- {mem_llm-1.0.1/mem_llm.egg-info → mem_llm-1.0.3}/PKG-INFO +1 -1
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/__init__.py +3 -2
- mem_llm-1.0.3/mem_llm/config_from_docs.py +180 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/llm_client.py +5 -1
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/mem_agent.py +90 -4
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/memory_db.py +5 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/memory_manager.py +5 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3/mem_llm.egg-info}/PKG-INFO +1 -1
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm.egg-info/SOURCES.txt +1 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/setup.py +1 -1
- {mem_llm-1.0.1 → mem_llm-1.0.3}/tests/test_integration.py +6 -4
- {mem_llm-1.0.1 → mem_llm-1.0.3}/tests/test_mem_agent.py +1 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/tests/test_memory_manager.py +1 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/tests/test_memory_tools.py +1 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/CHANGELOG.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/INTEGRATION_GUIDE.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/MANIFEST.in +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/QUICKSTART.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/QUICKSTART_TR.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/README.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/STRUCTURE.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/docs/CONFIG_GUIDE.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/docs/INDEX.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/docs/README.md +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/config.yaml.example +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/config_manager.py +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/knowledge_loader.py +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/memory_tools.py +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm/prompt_templates.py +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm.egg-info/dependency_links.txt +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm.egg-info/requires.txt +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/mem_llm.egg-info/top_level.txt +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/requirements.txt +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/setup.cfg +0 -0
- {mem_llm-1.0.1 → mem_llm-1.0.3}/tests/test_llm_client.py +0 -0
|
@@ -19,11 +19,12 @@ try:
|
|
|
19
19
|
from .memory_db import SQLMemoryManager
|
|
20
20
|
from .prompt_templates import prompt_manager
|
|
21
21
|
from .config_manager import get_config
|
|
22
|
-
|
|
22
|
+
from .config_from_docs import create_config_from_document
|
|
23
|
+
__all_pro__ = ["SQLMemoryManager", "prompt_manager", "get_config", "create_config_from_document"]
|
|
23
24
|
except ImportError:
|
|
24
25
|
__all_pro__ = []
|
|
25
26
|
|
|
26
|
-
__version__ = "1.0.
|
|
27
|
+
__version__ = "1.0.3"
|
|
27
28
|
__author__ = "C. Emre Karataş"
|
|
28
29
|
|
|
29
30
|
__all__ = [
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Config Generator from Documents (PDF, DOCX, TXT)
|
|
3
|
+
Automatically creates config.yaml from business documents
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
from typing import Optional, Dict, Any
|
|
8
|
+
import yaml
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def extract_text_from_file(file_path: str) -> str:
|
|
12
|
+
"""
|
|
13
|
+
Extract text from PDF, DOCX, or TXT files
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
file_path: Path to document
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
Extracted text
|
|
20
|
+
"""
|
|
21
|
+
file_ext = os.path.splitext(file_path)[1].lower()
|
|
22
|
+
|
|
23
|
+
if file_ext == '.txt':
|
|
24
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
25
|
+
return f.read()
|
|
26
|
+
|
|
27
|
+
elif file_ext == '.pdf':
|
|
28
|
+
try:
|
|
29
|
+
import PyPDF2
|
|
30
|
+
text = []
|
|
31
|
+
with open(file_path, 'rb') as f:
|
|
32
|
+
reader = PyPDF2.PdfReader(f)
|
|
33
|
+
for page in reader.pages:
|
|
34
|
+
text.append(page.extract_text())
|
|
35
|
+
return '\n'.join(text)
|
|
36
|
+
except ImportError:
|
|
37
|
+
return "⚠️ PyPDF2 not installed. Run: pip install PyPDF2"
|
|
38
|
+
|
|
39
|
+
elif file_ext in ['.docx', '.doc']:
|
|
40
|
+
try:
|
|
41
|
+
import docx
|
|
42
|
+
doc = docx.Document(file_path)
|
|
43
|
+
text = []
|
|
44
|
+
for paragraph in doc.paragraphs:
|
|
45
|
+
text.append(paragraph.text)
|
|
46
|
+
return '\n'.join(text)
|
|
47
|
+
except ImportError:
|
|
48
|
+
return "⚠️ python-docx not installed. Run: pip install python-docx"
|
|
49
|
+
|
|
50
|
+
else:
|
|
51
|
+
return f"⚠️ Unsupported file format: {file_ext}"
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def generate_config_from_text(text: str, company_name: Optional[str] = None) -> Dict[str, Any]:
|
|
55
|
+
"""
|
|
56
|
+
Generate config.yaml structure from text
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
text: Extracted text from document
|
|
60
|
+
company_name: Company name (optional)
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Config dictionary
|
|
64
|
+
"""
|
|
65
|
+
# Simple config template
|
|
66
|
+
config = {
|
|
67
|
+
"usage_mode": "business", # or "personal"
|
|
68
|
+
|
|
69
|
+
"llm": {
|
|
70
|
+
"model": "granite4:tiny-h",
|
|
71
|
+
"temperature": 0.3,
|
|
72
|
+
"max_tokens": 300,
|
|
73
|
+
"ollama_url": "http://localhost:11434"
|
|
74
|
+
},
|
|
75
|
+
|
|
76
|
+
"memory": {
|
|
77
|
+
"use_sql": True,
|
|
78
|
+
"db_path": "memories.db",
|
|
79
|
+
"json_dir": "memories"
|
|
80
|
+
},
|
|
81
|
+
|
|
82
|
+
"response": {
|
|
83
|
+
"use_knowledge_base": True,
|
|
84
|
+
"recent_conversations_limit": 5
|
|
85
|
+
},
|
|
86
|
+
|
|
87
|
+
"business": {
|
|
88
|
+
"company_name": company_name or "Your Company",
|
|
89
|
+
"industry": "Technology",
|
|
90
|
+
"founded_year": "2024"
|
|
91
|
+
},
|
|
92
|
+
|
|
93
|
+
"knowledge_base": {
|
|
94
|
+
"auto_load": True,
|
|
95
|
+
"search_limit": 5
|
|
96
|
+
},
|
|
97
|
+
|
|
98
|
+
"logging": {
|
|
99
|
+
"level": "INFO",
|
|
100
|
+
"file": "mem_agent.log"
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
# Try to extract company name from text if not provided
|
|
105
|
+
if not company_name:
|
|
106
|
+
lines = text.split('\n')[:10] # First 10 lines
|
|
107
|
+
for line in lines:
|
|
108
|
+
if any(keyword in line.lower() for keyword in ['company', 'corp', 'inc', 'ltd']):
|
|
109
|
+
config["business"]["company_name"] = line.strip()[:50]
|
|
110
|
+
break
|
|
111
|
+
|
|
112
|
+
return config
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def create_config_from_document(
|
|
116
|
+
doc_path: str,
|
|
117
|
+
output_path: str = "config.yaml",
|
|
118
|
+
company_name: Optional[str] = None
|
|
119
|
+
) -> str:
|
|
120
|
+
"""
|
|
121
|
+
Create config.yaml from a business document
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
doc_path: Path to PDF/DOCX/TXT document
|
|
125
|
+
output_path: Output config.yaml path
|
|
126
|
+
company_name: Company name (optional)
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
Success message
|
|
130
|
+
"""
|
|
131
|
+
if not os.path.exists(doc_path):
|
|
132
|
+
return f"❌ File not found: {doc_path}"
|
|
133
|
+
|
|
134
|
+
# Extract text
|
|
135
|
+
print(f"📄 Reading document: {doc_path}")
|
|
136
|
+
text = extract_text_from_file(doc_path)
|
|
137
|
+
|
|
138
|
+
if text.startswith("⚠️"):
|
|
139
|
+
return text # Error message
|
|
140
|
+
|
|
141
|
+
print(f"✅ Extracted {len(text)} characters")
|
|
142
|
+
|
|
143
|
+
# Generate config
|
|
144
|
+
config = generate_config_from_text(text, company_name)
|
|
145
|
+
|
|
146
|
+
# Save to YAML
|
|
147
|
+
with open(output_path, 'w', encoding='utf-8') as f:
|
|
148
|
+
yaml.dump(config, f, default_flow_style=False, allow_unicode=True)
|
|
149
|
+
|
|
150
|
+
print(f"✅ Config created: {output_path}")
|
|
151
|
+
print(f"📌 Company: {config['business']['company_name']}")
|
|
152
|
+
|
|
153
|
+
return f"✅ Config successfully created at {output_path}"
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
# Simple CLI
|
|
157
|
+
if __name__ == "__main__":
|
|
158
|
+
import sys
|
|
159
|
+
|
|
160
|
+
if len(sys.argv) < 2:
|
|
161
|
+
print("""
|
|
162
|
+
🔧 Config Generator from Documents
|
|
163
|
+
|
|
164
|
+
Usage:
|
|
165
|
+
python -m mem_llm.config_from_docs <document_path> [output_path] [company_name]
|
|
166
|
+
|
|
167
|
+
Examples:
|
|
168
|
+
python -m mem_llm.config_from_docs company_info.pdf
|
|
169
|
+
python -m mem_llm.config_from_docs business.docx my_config.yaml "Acme Corp"
|
|
170
|
+
python -m mem_llm.config_from_docs info.txt
|
|
171
|
+
""")
|
|
172
|
+
sys.exit(1)
|
|
173
|
+
|
|
174
|
+
doc_path = sys.argv[1]
|
|
175
|
+
output_path = sys.argv[2] if len(sys.argv) > 2 else "config.yaml"
|
|
176
|
+
company_name = sys.argv[3] if len(sys.argv) > 3 else None
|
|
177
|
+
|
|
178
|
+
result = create_config_from_document(doc_path, output_path, company_name)
|
|
179
|
+
print(result)
|
|
180
|
+
|
|
@@ -107,7 +107,11 @@ class OllamaClient:
|
|
|
107
107
|
"stream": False,
|
|
108
108
|
"options": {
|
|
109
109
|
"temperature": temperature,
|
|
110
|
-
"num_predict": max_tokens
|
|
110
|
+
"num_predict": max_tokens,
|
|
111
|
+
"num_ctx": 2048, # Context window
|
|
112
|
+
"top_k": 40, # Limit vocab
|
|
113
|
+
"top_p": 0.9, # Nucleus sampling
|
|
114
|
+
"stop": ["\n\n\n", "---"] # Stop sequences
|
|
111
115
|
}
|
|
112
116
|
}
|
|
113
117
|
|
|
@@ -111,6 +111,8 @@ class MemAgent:
|
|
|
111
111
|
self.logger.info(f"JSON memory system active: {json_dir}")
|
|
112
112
|
|
|
113
113
|
# LLM client
|
|
114
|
+
self.model = model # Store model name
|
|
115
|
+
self.use_sql = use_sql # Store SQL usage flag
|
|
114
116
|
self.llm = OllamaClient(model, ollama_url)
|
|
115
117
|
self.logger.info(f"LLM client ready: {model}")
|
|
116
118
|
|
|
@@ -217,7 +219,23 @@ class MemAgent:
|
|
|
217
219
|
self.logger.info(f"Prompt template loaded: {template_name} (Mode: {self.usage_mode})")
|
|
218
220
|
except Exception as e:
|
|
219
221
|
self.logger.error(f"Prompt template loading error: {e}")
|
|
220
|
-
|
|
222
|
+
# Simple, short and effective default prompt
|
|
223
|
+
self.current_system_prompt = """You are a concise AI assistant. Be EXTREMELY brief.
|
|
224
|
+
|
|
225
|
+
RULES (MANDATORY):
|
|
226
|
+
1. MAX 1-2 SHORT sentences per response
|
|
227
|
+
2. When user shares info: Just say "Got it!" or "Noted!"
|
|
228
|
+
3. Answer questions: ONE sentence, direct
|
|
229
|
+
4. NO lists, NO explanations, NO examples
|
|
230
|
+
5. Use conversation history when relevant
|
|
231
|
+
|
|
232
|
+
EXAMPLES:
|
|
233
|
+
User: "My name is Alice" → You: "Nice to meet you, Alice!"
|
|
234
|
+
User: "My favorite food is pizza" → You: "Got it!"
|
|
235
|
+
User: "What's my name?" → You: "Your name is Alice."
|
|
236
|
+
User: "Tell me about Python" → You: "Python is a versatile programming language for web, data science, and AI."
|
|
237
|
+
|
|
238
|
+
BE BRIEF OR USER WILL LEAVE!"""
|
|
221
239
|
|
|
222
240
|
def check_setup(self) -> Dict[str, Any]:
|
|
223
241
|
"""Check system setup"""
|
|
@@ -330,7 +348,8 @@ class MemAgent:
|
|
|
330
348
|
recent_limit = self.config.get("response.recent_conversations_limit", 5) if hasattr(self, 'config') and self.config else 5
|
|
331
349
|
recent_convs = self.memory.get_recent_conversations(user_id, recent_limit)
|
|
332
350
|
|
|
333
|
-
|
|
351
|
+
# Add conversations in chronological order (oldest first)
|
|
352
|
+
for conv in recent_convs:
|
|
334
353
|
messages.append({"role": "user", "content": conv.get('user_message', '')})
|
|
335
354
|
messages.append({"role": "assistant", "content": conv.get('bot_response', '')})
|
|
336
355
|
except Exception as e:
|
|
@@ -350,8 +369,8 @@ class MemAgent:
|
|
|
350
369
|
try:
|
|
351
370
|
response = self.llm.chat(
|
|
352
371
|
messages=messages,
|
|
353
|
-
temperature=self.config.get("llm.temperature", 0.
|
|
354
|
-
max_tokens=self.config.get("llm.max_tokens",
|
|
372
|
+
temperature=self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2, # Very focused
|
|
373
|
+
max_tokens=self.config.get("llm.max_tokens", 150) if hasattr(self, 'config') and self.config else 150 # Max 2-3 sentences
|
|
355
374
|
)
|
|
356
375
|
except Exception as e:
|
|
357
376
|
self.logger.error(f"LLM response error: {e}")
|
|
@@ -366,11 +385,78 @@ class MemAgent:
|
|
|
366
385
|
bot_response=response,
|
|
367
386
|
metadata=metadata
|
|
368
387
|
)
|
|
388
|
+
|
|
389
|
+
# Extract and save user info to profile
|
|
390
|
+
self._update_user_profile(user_id, message, response)
|
|
369
391
|
except Exception as e:
|
|
370
392
|
self.logger.error(f"Interaction saving error: {e}")
|
|
371
393
|
|
|
372
394
|
return response
|
|
395
|
+
|
|
396
|
+
def _update_user_profile(self, user_id: str, message: str, response: str):
|
|
397
|
+
"""Extract user info from conversation and update profile"""
|
|
398
|
+
if not hasattr(self.memory, 'update_profile'):
|
|
399
|
+
return
|
|
400
|
+
|
|
401
|
+
msg_lower = message.lower()
|
|
402
|
+
updates = {}
|
|
403
|
+
|
|
404
|
+
# Extract name
|
|
405
|
+
if "my name is" in msg_lower or "i am" in msg_lower or "i'm" in msg_lower:
|
|
406
|
+
# Simple name extraction
|
|
407
|
+
for phrase in ["my name is ", "i am ", "i'm "]:
|
|
408
|
+
if phrase in msg_lower:
|
|
409
|
+
name_part = message[msg_lower.index(phrase) + len(phrase):].strip()
|
|
410
|
+
name = name_part.split()[0] if name_part else None
|
|
411
|
+
if name and len(name) > 1:
|
|
412
|
+
updates['name'] = name.strip('.,!?')
|
|
413
|
+
break
|
|
414
|
+
|
|
415
|
+
# Extract favorite food
|
|
416
|
+
if "favorite food" in msg_lower or "favourite food" in msg_lower:
|
|
417
|
+
if "is" in msg_lower:
|
|
418
|
+
food = msg_lower.split("is")[-1].strip().strip('.,!?')
|
|
419
|
+
if food and len(food) < 50:
|
|
420
|
+
updates['favorite_food'] = food
|
|
421
|
+
|
|
422
|
+
# Extract location
|
|
423
|
+
if "i live in" in msg_lower or "i'm from" in msg_lower or "from" in msg_lower:
|
|
424
|
+
for phrase in ["i live in ", "i'm from ", "from "]:
|
|
425
|
+
if phrase in msg_lower:
|
|
426
|
+
loc = message[msg_lower.index(phrase) + len(phrase):].strip()
|
|
427
|
+
location = loc.split()[0] if loc else None
|
|
428
|
+
if location and len(location) > 2:
|
|
429
|
+
updates['location'] = location.strip('.,!?')
|
|
430
|
+
break
|
|
431
|
+
|
|
432
|
+
# Save updates
|
|
433
|
+
if updates:
|
|
434
|
+
try:
|
|
435
|
+
self.memory.update_profile(user_id, updates)
|
|
436
|
+
self.logger.debug(f"Profile updated for {user_id}: {updates}")
|
|
437
|
+
except:
|
|
438
|
+
pass
|
|
373
439
|
|
|
440
|
+
def get_user_profile(self, user_id: Optional[str] = None) -> Dict:
|
|
441
|
+
"""
|
|
442
|
+
Get user's profile info
|
|
443
|
+
|
|
444
|
+
Args:
|
|
445
|
+
user_id: User ID (uses current_user if not specified)
|
|
446
|
+
|
|
447
|
+
Returns:
|
|
448
|
+
User profile dictionary
|
|
449
|
+
"""
|
|
450
|
+
uid = user_id or self.current_user
|
|
451
|
+
if not uid:
|
|
452
|
+
return {}
|
|
453
|
+
|
|
454
|
+
try:
|
|
455
|
+
memory_data = self.memory.load_memory(uid)
|
|
456
|
+
return memory_data.get('profile', {})
|
|
457
|
+
except:
|
|
458
|
+
return {}
|
|
459
|
+
|
|
374
460
|
def add_knowledge(self, category: str, question: str, answer: str,
|
|
375
461
|
keywords: Optional[List[str]] = None, priority: int = 0) -> int:
|
|
376
462
|
"""Add new record to knowledge base"""
|
|
@@ -164,6 +164,11 @@ class SQLMemoryManager:
|
|
|
164
164
|
self.conn.commit()
|
|
165
165
|
return interaction_id
|
|
166
166
|
|
|
167
|
+
# Alias for compatibility
|
|
168
|
+
def add_conversation(self, user_id: str, user_message: str, bot_response: str, metadata: Optional[Dict] = None) -> int:
|
|
169
|
+
"""Alias for add_interaction"""
|
|
170
|
+
return self.add_interaction(user_id, user_message, bot_response, metadata)
|
|
171
|
+
|
|
167
172
|
def get_recent_conversations(self, user_id: str, limit: int = 10) -> List[Dict]:
|
|
168
173
|
"""
|
|
169
174
|
Kullanıcının son konuşmalarını getirir
|
|
@@ -101,6 +101,11 @@ class MemoryManager:
|
|
|
101
101
|
self.conversations[user_id].append(interaction)
|
|
102
102
|
self.save_memory(user_id)
|
|
103
103
|
|
|
104
|
+
# Alias for compatibility
|
|
105
|
+
def add_conversation(self, user_id: str, user_message: str, bot_response: str, metadata: Optional[Dict] = None) -> None:
|
|
106
|
+
"""Alias for add_interaction"""
|
|
107
|
+
return self.add_interaction(user_id, user_message, bot_response, metadata)
|
|
108
|
+
|
|
104
109
|
def update_profile(self, user_id: str, updates: Dict) -> None:
|
|
105
110
|
"""
|
|
106
111
|
Update user profile
|
|
@@ -11,7 +11,7 @@ long_description = (this_directory / "README.md").read_text(encoding='utf-8')
|
|
|
11
11
|
|
|
12
12
|
setup(
|
|
13
13
|
name="mem-llm",
|
|
14
|
-
version="1.0.
|
|
14
|
+
version="1.0.3",
|
|
15
15
|
author="C. Emre Karataş",
|
|
16
16
|
author_email="karatasqemre@gmail.com", # PyPI için gerekli - kendi emailinizi yazın
|
|
17
17
|
description="Memory-enabled AI assistant with local LLM support",
|
|
@@ -6,6 +6,7 @@ Tests all system components working together
|
|
|
6
6
|
import unittest
|
|
7
7
|
import tempfile
|
|
8
8
|
import shutil
|
|
9
|
+
import os
|
|
9
10
|
|
|
10
11
|
# Import all modules
|
|
11
12
|
from mem_llm import (
|
|
@@ -162,12 +163,12 @@ logging:
|
|
|
162
163
|
"""Hata yönetimi testi"""
|
|
163
164
|
# Kullanıcı olmadan chat deneme
|
|
164
165
|
response = self.simple_agent.chat("Test")
|
|
165
|
-
self.assertIn("
|
|
166
|
+
self.assertIn("Error", response) # Error mesajı İngilizce
|
|
166
167
|
|
|
167
168
|
# Geçersiz araç komutu
|
|
168
169
|
tool_executor = ToolExecutor(self.simple_agent.memory)
|
|
169
170
|
result = tool_executor.memory_tools.execute_tool("nonexistent_tool", {})
|
|
170
|
-
self.assertIn("
|
|
171
|
+
self.assertIn("not found", result) # İngilizce mesaj
|
|
171
172
|
|
|
172
173
|
def test_performance_basic(self):
|
|
173
174
|
"""Temel performans testi"""
|
|
@@ -191,7 +192,8 @@ logging:
|
|
|
191
192
|
|
|
192
193
|
def test_memory_consistency(self):
|
|
193
194
|
"""Bellek tutarlılık testi"""
|
|
194
|
-
|
|
195
|
+
import uuid
|
|
196
|
+
user_id = f"consistency_test_{uuid.uuid4().hex[:8]}" # Benzersiz user_id
|
|
195
197
|
|
|
196
198
|
# Basit agent ile konuşmalar
|
|
197
199
|
self.simple_agent.set_user(user_id)
|
|
@@ -204,7 +206,7 @@ logging:
|
|
|
204
206
|
if hasattr(self.simple_agent.memory, 'get_recent_conversations'):
|
|
205
207
|
simple_conversations = self.simple_agent.memory.get_recent_conversations(user_id)
|
|
206
208
|
self.assertIsInstance(simple_conversations, list)
|
|
207
|
-
self.
|
|
209
|
+
self.assertGreaterEqual(len(simple_conversations), 3) # En az 3 olmalı
|
|
208
210
|
|
|
209
211
|
|
|
210
212
|
def run_integration_tests():
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|