mem-llm 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mem_llm/__init__.py +98 -0
- mem_llm/api_server.py +595 -0
- mem_llm/base_llm_client.py +201 -0
- mem_llm/builtin_tools.py +311 -0
- mem_llm/cli.py +254 -0
- mem_llm/clients/__init__.py +22 -0
- mem_llm/clients/lmstudio_client.py +393 -0
- mem_llm/clients/ollama_client.py +354 -0
- mem_llm/config.yaml.example +52 -0
- mem_llm/config_from_docs.py +180 -0
- mem_llm/config_manager.py +231 -0
- mem_llm/conversation_summarizer.py +372 -0
- mem_llm/data_export_import.py +640 -0
- mem_llm/dynamic_prompt.py +298 -0
- mem_llm/knowledge_loader.py +88 -0
- mem_llm/llm_client.py +225 -0
- mem_llm/llm_client_factory.py +260 -0
- mem_llm/logger.py +129 -0
- mem_llm/mem_agent.py +1611 -0
- mem_llm/memory_db.py +612 -0
- mem_llm/memory_manager.py +321 -0
- mem_llm/memory_tools.py +253 -0
- mem_llm/prompt_security.py +304 -0
- mem_llm/response_metrics.py +221 -0
- mem_llm/retry_handler.py +193 -0
- mem_llm/thread_safe_db.py +301 -0
- mem_llm/tool_system.py +429 -0
- mem_llm/vector_store.py +278 -0
- mem_llm/web_launcher.py +129 -0
- mem_llm/web_ui/README.md +44 -0
- mem_llm/web_ui/__init__.py +7 -0
- mem_llm/web_ui/index.html +641 -0
- mem_llm/web_ui/memory.html +569 -0
- mem_llm/web_ui/metrics.html +75 -0
- mem_llm-2.0.0.dist-info/METADATA +667 -0
- mem_llm-2.0.0.dist-info/RECORD +39 -0
- mem_llm-2.0.0.dist-info/WHEEL +5 -0
- mem_llm-2.0.0.dist-info/entry_points.txt +3 -0
- mem_llm-2.0.0.dist-info/top_level.txt +1 -0
mem_llm/mem_agent.py
ADDED
|
@@ -0,0 +1,1611 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Mem-Agent: Unified Powerful System
|
|
3
|
+
==================================
|
|
4
|
+
|
|
5
|
+
A powerful Mem-Agent that combines all features in a single system.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- ✅ SQL and JSON memory support
|
|
9
|
+
- ✅ Prompt templates system
|
|
10
|
+
- ✅ Knowledge base integration
|
|
11
|
+
- ✅ User tools system
|
|
12
|
+
- ✅ Configuration management
|
|
13
|
+
- ✅ Advanced logging
|
|
14
|
+
- ✅ Production-ready structure
|
|
15
|
+
|
|
16
|
+
Usage:
|
|
17
|
+
```python
|
|
18
|
+
from memory_llm import MemAgent
|
|
19
|
+
|
|
20
|
+
# Simple usage
|
|
21
|
+
agent = MemAgent()
|
|
22
|
+
|
|
23
|
+
# Advanced usage
|
|
24
|
+
agent = MemAgent(
|
|
25
|
+
config_file="config.yaml",
|
|
26
|
+
use_sql=True,
|
|
27
|
+
load_knowledge_base=True
|
|
28
|
+
)
|
|
29
|
+
```
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
from typing import Optional, Dict, List, Any, Union, Iterator
|
|
33
|
+
from datetime import datetime
|
|
34
|
+
import logging
|
|
35
|
+
import json
|
|
36
|
+
import os
|
|
37
|
+
import time
|
|
38
|
+
|
|
39
|
+
# Core dependencies
|
|
40
|
+
from .memory_manager import MemoryManager
|
|
41
|
+
from .llm_client import OllamaClient # Backward compatibility
|
|
42
|
+
from .llm_client_factory import LLMClientFactory
|
|
43
|
+
from .base_llm_client import BaseLLMClient
|
|
44
|
+
from .response_metrics import ChatResponse, ResponseMetricsAnalyzer, calculate_confidence
|
|
45
|
+
from .tool_system import ToolRegistry, ToolCallParser, format_tools_for_prompt
|
|
46
|
+
|
|
47
|
+
# Advanced features (optional)
|
|
48
|
+
try:
|
|
49
|
+
from .memory_db import SQLMemoryManager
|
|
50
|
+
from .knowledge_loader import KnowledgeLoader
|
|
51
|
+
from .config_manager import get_config
|
|
52
|
+
from .memory_tools import ToolExecutor, MemoryTools
|
|
53
|
+
from .dynamic_prompt import dynamic_prompt_builder
|
|
54
|
+
ADVANCED_AVAILABLE = True
|
|
55
|
+
except ImportError:
|
|
56
|
+
ADVANCED_AVAILABLE = False
|
|
57
|
+
print("⚠️ Advanced features not available (install additional packages)")
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class MemAgent:
|
|
61
|
+
"""
|
|
62
|
+
Powerful and unified Mem-Agent system
|
|
63
|
+
|
|
64
|
+
Production-ready assistant that combines all features in one place.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(self,
|
|
68
|
+
model: str = "granite4:3b",
|
|
69
|
+
backend: str = "ollama",
|
|
70
|
+
config_file: Optional[str] = None,
|
|
71
|
+
use_sql: bool = True,
|
|
72
|
+
memory_dir: Optional[str] = None,
|
|
73
|
+
db_path: Optional[str] = None,
|
|
74
|
+
load_knowledge_base: bool = True,
|
|
75
|
+
ollama_url: str = "http://localhost:11434",
|
|
76
|
+
base_url: Optional[str] = None,
|
|
77
|
+
api_key: Optional[str] = None,
|
|
78
|
+
auto_detect_backend: bool = False,
|
|
79
|
+
check_connection: bool = False,
|
|
80
|
+
enable_security: bool = False,
|
|
81
|
+
enable_vector_search: bool = False,
|
|
82
|
+
embedding_model: str = "all-MiniLM-L6-v2",
|
|
83
|
+
enable_tools: bool = False,
|
|
84
|
+
tools: Optional[List] = None,
|
|
85
|
+
**llm_kwargs):
|
|
86
|
+
"""
|
|
87
|
+
Args:
|
|
88
|
+
model: LLM model to use
|
|
89
|
+
backend: LLM backend ('ollama', 'lmstudio') - NEW in v1.3.0
|
|
90
|
+
config_file: Configuration file (optional)
|
|
91
|
+
use_sql: Use SQL database (True) or JSON (False)
|
|
92
|
+
memory_dir: Memory directory (for JSON mode or if db_path not specified)
|
|
93
|
+
db_path: SQLite database path (for SQL mode, e.g., ":memory:" or "path/to/db.db")
|
|
94
|
+
load_knowledge_base: Automatically load knowledge base
|
|
95
|
+
ollama_url: Ollama API URL (backward compatibility, use base_url instead)
|
|
96
|
+
base_url: Backend API URL (for local backends) - NEW in v1.3.0
|
|
97
|
+
auto_detect_backend: Auto-detect available LLM backend - NEW in v1.3.0
|
|
98
|
+
check_connection: Verify LLM connection on startup (default: False)
|
|
99
|
+
enable_security: Enable prompt injection protection (v1.1.0+, default: False for backward compatibility)
|
|
100
|
+
enable_vector_search: Enable semantic/vector search for KB (v1.3.2+, requires chromadb) - NEW
|
|
101
|
+
embedding_model: Embedding model for vector search (default: "all-MiniLM-L6-v2") - NEW
|
|
102
|
+
**llm_kwargs: Additional backend-specific parameters
|
|
103
|
+
|
|
104
|
+
Examples:
|
|
105
|
+
# Default Ollama
|
|
106
|
+
agent = MemAgent()
|
|
107
|
+
|
|
108
|
+
# LM Studio
|
|
109
|
+
agent = MemAgent(backend='lmstudio', model='llama-3-8b')
|
|
110
|
+
|
|
111
|
+
# Auto-detect
|
|
112
|
+
agent = MemAgent(auto_detect_backend=True)
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
# Setup logging first
|
|
116
|
+
self._setup_logging()
|
|
117
|
+
|
|
118
|
+
# Security features (v1.1.0+)
|
|
119
|
+
self.enable_security = enable_security
|
|
120
|
+
self.security_detector = None
|
|
121
|
+
self.security_sanitizer = None
|
|
122
|
+
|
|
123
|
+
if enable_security:
|
|
124
|
+
try:
|
|
125
|
+
from .prompt_security import PromptInjectionDetector, InputSanitizer
|
|
126
|
+
self.security_detector = PromptInjectionDetector()
|
|
127
|
+
self.security_sanitizer = InputSanitizer()
|
|
128
|
+
self.logger.info("🔒 Security features enabled (prompt injection protection)")
|
|
129
|
+
except ImportError:
|
|
130
|
+
self.logger.warning("⚠️ Security features requested but not available")
|
|
131
|
+
|
|
132
|
+
# Load configuration
|
|
133
|
+
self.config = None
|
|
134
|
+
if ADVANCED_AVAILABLE and config_file:
|
|
135
|
+
try:
|
|
136
|
+
self.config = get_config(config_file)
|
|
137
|
+
except Exception:
|
|
138
|
+
print("⚠️ Config file could not be loaded, using default settings")
|
|
139
|
+
|
|
140
|
+
# Determine usage mode
|
|
141
|
+
self.usage_mode = "business" # default
|
|
142
|
+
if self.config:
|
|
143
|
+
self.usage_mode = self.config.get("usage_mode", "business")
|
|
144
|
+
elif config_file:
|
|
145
|
+
# Config file exists but couldn't be loaded
|
|
146
|
+
self.usage_mode = "business"
|
|
147
|
+
else:
|
|
148
|
+
# No config file
|
|
149
|
+
self.usage_mode = "personal"
|
|
150
|
+
|
|
151
|
+
# Initialize flags first
|
|
152
|
+
self.has_knowledge_base: bool = False # Track KB status
|
|
153
|
+
self.has_tools: bool = False # Track tools status (v1.3.x)
|
|
154
|
+
|
|
155
|
+
# Tool system (v2.0.0+)
|
|
156
|
+
self.enable_tools = enable_tools
|
|
157
|
+
self.tool_registry = None
|
|
158
|
+
if enable_tools:
|
|
159
|
+
self.tool_registry = ToolRegistry()
|
|
160
|
+
self.has_tools = True
|
|
161
|
+
|
|
162
|
+
# Register custom tools if provided
|
|
163
|
+
if tools:
|
|
164
|
+
for tool in tools:
|
|
165
|
+
self.tool_registry.register_function(tool)
|
|
166
|
+
self.logger.info(f"🔧 Registered {len(tools)} custom tools")
|
|
167
|
+
|
|
168
|
+
builtin_count = len(self.tool_registry.tools)
|
|
169
|
+
self.logger.info(f"🛠️ Tool system enabled ({builtin_count} tools available)")
|
|
170
|
+
|
|
171
|
+
# Memory system
|
|
172
|
+
if use_sql and ADVANCED_AVAILABLE:
|
|
173
|
+
# SQL memory (advanced)
|
|
174
|
+
# Determine database path
|
|
175
|
+
if db_path:
|
|
176
|
+
# Use provided db_path (can be ":memory:" for in-memory DB)
|
|
177
|
+
final_db_path = db_path
|
|
178
|
+
elif memory_dir:
|
|
179
|
+
final_db_path = memory_dir
|
|
180
|
+
elif self.config:
|
|
181
|
+
final_db_path = self.config.get("memory.db_path", "memories/memories.db")
|
|
182
|
+
else:
|
|
183
|
+
final_db_path = "memories/memories.db"
|
|
184
|
+
|
|
185
|
+
# Get vector search settings from config or parameters
|
|
186
|
+
vector_search_enabled = enable_vector_search
|
|
187
|
+
vector_model = embedding_model
|
|
188
|
+
|
|
189
|
+
if self.config:
|
|
190
|
+
vector_search_enabled = self.config.get("knowledge_base.enable_vector_search", vector_search_enabled)
|
|
191
|
+
vector_model = self.config.get("knowledge_base.embedding_model", vector_model)
|
|
192
|
+
|
|
193
|
+
# Ensure memories directory exists (skip for :memory:)
|
|
194
|
+
import os
|
|
195
|
+
if final_db_path != ":memory:":
|
|
196
|
+
db_dir = os.path.dirname(final_db_path)
|
|
197
|
+
if db_dir and not os.path.exists(db_dir):
|
|
198
|
+
os.makedirs(db_dir, exist_ok=True)
|
|
199
|
+
|
|
200
|
+
self.memory = SQLMemoryManager(
|
|
201
|
+
final_db_path,
|
|
202
|
+
enable_vector_search=vector_search_enabled,
|
|
203
|
+
embedding_model=vector_model
|
|
204
|
+
)
|
|
205
|
+
self.logger.info(f"SQL memory system active: {final_db_path}")
|
|
206
|
+
if vector_search_enabled:
|
|
207
|
+
self.logger.info(f"🔍 Vector search enabled (model: {vector_model})")
|
|
208
|
+
else:
|
|
209
|
+
# JSON memory (simple)
|
|
210
|
+
json_dir = memory_dir or self.config.get("memory.json_dir", "memories") if self.config else "memories"
|
|
211
|
+
self.memory = MemoryManager(json_dir)
|
|
212
|
+
self.logger.info(f"JSON memory system active: {json_dir}")
|
|
213
|
+
|
|
214
|
+
# Active user and system prompt
|
|
215
|
+
self.current_user: Optional[str] = None
|
|
216
|
+
self.current_system_prompt: Optional[str] = None
|
|
217
|
+
|
|
218
|
+
# LLM client
|
|
219
|
+
self.model = model # Store model name
|
|
220
|
+
self.backend = backend # Store backend name
|
|
221
|
+
self.use_sql = use_sql # Store SQL usage flag
|
|
222
|
+
|
|
223
|
+
# Initialize LLM client (v1.3.0: Multi-backend support)
|
|
224
|
+
# Prepare backend configuration
|
|
225
|
+
llm_config = llm_kwargs.copy()
|
|
226
|
+
|
|
227
|
+
# Handle backward compatibility: ollama_url -> base_url
|
|
228
|
+
if base_url is None and backend == "ollama":
|
|
229
|
+
base_url = ollama_url
|
|
230
|
+
|
|
231
|
+
# Add base_url for local backends
|
|
232
|
+
if base_url and backend in ['ollama', 'lmstudio']:
|
|
233
|
+
llm_config['base_url'] = base_url
|
|
234
|
+
|
|
235
|
+
# Add api_key for cloud backends
|
|
236
|
+
# Auto-detect backend if requested
|
|
237
|
+
if auto_detect_backend:
|
|
238
|
+
self.logger.info("🔍 Auto-detecting available LLM backend...")
|
|
239
|
+
self.llm = LLMClientFactory.auto_detect()
|
|
240
|
+
if self.llm:
|
|
241
|
+
detected_backend = self.llm.__class__.__name__
|
|
242
|
+
self.logger.info(f"✅ Detected and using: {detected_backend}")
|
|
243
|
+
else:
|
|
244
|
+
self.logger.error("❌ No LLM backend available.")
|
|
245
|
+
raise RuntimeError(
|
|
246
|
+
"No LLM backend detected. Please start a local LLM service (Ollama or LM Studio)."
|
|
247
|
+
)
|
|
248
|
+
else:
|
|
249
|
+
# Create client using factory
|
|
250
|
+
try:
|
|
251
|
+
self.llm = LLMClientFactory.create(
|
|
252
|
+
backend=backend,
|
|
253
|
+
model=model,
|
|
254
|
+
**llm_config
|
|
255
|
+
)
|
|
256
|
+
self.logger.info(f"✅ Initialized {backend} backend with model: {model}")
|
|
257
|
+
except Exception as e:
|
|
258
|
+
self.logger.error(f"❌ Failed to initialize {backend} backend: {e}")
|
|
259
|
+
raise
|
|
260
|
+
|
|
261
|
+
# Optional connection check on startup
|
|
262
|
+
if check_connection:
|
|
263
|
+
backend_name = backend if not auto_detect_backend else "LLM service"
|
|
264
|
+
self.logger.info(f"Checking {backend_name} connection...")
|
|
265
|
+
if not self.llm.check_connection():
|
|
266
|
+
error_msg = f"❌ ERROR: Cannot connect to {backend_name}!\n"
|
|
267
|
+
|
|
268
|
+
if backend == "ollama":
|
|
269
|
+
error_msg += (
|
|
270
|
+
" \n"
|
|
271
|
+
" Solutions:\n"
|
|
272
|
+
" 1. Start Ollama: ollama serve\n"
|
|
273
|
+
" 2. Check if Ollama is running: http://localhost:11434\n"
|
|
274
|
+
" 3. Verify base_url parameter is correct\n"
|
|
275
|
+
)
|
|
276
|
+
elif backend == "lmstudio":
|
|
277
|
+
error_msg += (
|
|
278
|
+
" \n"
|
|
279
|
+
" Solutions:\n"
|
|
280
|
+
" 1. Start LM Studio\n"
|
|
281
|
+
" 2. Load a model in LM Studio\n"
|
|
282
|
+
" 3. Start local server (default: http://localhost:1234)\n"
|
|
283
|
+
" 4. Verify base_url parameter is correct\n"
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
error_msg += " \n To skip this check, use: MemAgent(check_connection=False)"
|
|
287
|
+
self.logger.error(error_msg)
|
|
288
|
+
raise ConnectionError(f"{backend_name} not available")
|
|
289
|
+
|
|
290
|
+
# Check if model exists (for backends that support listing)
|
|
291
|
+
try:
|
|
292
|
+
available_models = self.llm.list_models()
|
|
293
|
+
if available_models and model not in available_models:
|
|
294
|
+
error_msg = (
|
|
295
|
+
f"❌ ERROR: Model '{model}' not found in {backend}!\n"
|
|
296
|
+
f" \n"
|
|
297
|
+
f" Available models: {', '.join(available_models[:5])}\n"
|
|
298
|
+
f" Total: {len(available_models)} models available\n"
|
|
299
|
+
f" \n"
|
|
300
|
+
f" To skip this check, use: MemAgent(check_connection=False)"
|
|
301
|
+
)
|
|
302
|
+
self.logger.error(error_msg)
|
|
303
|
+
raise ValueError(f"Model '{model}' not available")
|
|
304
|
+
except:
|
|
305
|
+
# Some backends may not support list_models, skip check
|
|
306
|
+
pass
|
|
307
|
+
|
|
308
|
+
self.logger.info(f"✅ {backend_name} connection verified, model '{model}' ready")
|
|
309
|
+
|
|
310
|
+
self.logger.info(f"LLM client ready: {model} on {backend}")
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
# Advanced features (if available)
|
|
314
|
+
if ADVANCED_AVAILABLE:
|
|
315
|
+
self._setup_advanced_features(load_knowledge_base)
|
|
316
|
+
else:
|
|
317
|
+
print("⚠️ Load additional packages for advanced features")
|
|
318
|
+
# Build basic prompt even without advanced features
|
|
319
|
+
self._build_dynamic_system_prompt()
|
|
320
|
+
|
|
321
|
+
# Tool system (always available)
|
|
322
|
+
self.tool_executor = ToolExecutor(self.memory)
|
|
323
|
+
|
|
324
|
+
# Metrics tracking system (v1.3.1+)
|
|
325
|
+
self.metrics_analyzer = ResponseMetricsAnalyzer()
|
|
326
|
+
self.track_metrics = True # Can be disabled if needed
|
|
327
|
+
|
|
328
|
+
self.logger.info("MemAgent successfully initialized")
|
|
329
|
+
|
|
330
|
+
# === UNIFIED SYSTEM METHODS ===
|
|
331
|
+
|
|
332
|
+
def _setup_logging(self) -> None:
|
|
333
|
+
"""Setup logging system"""
|
|
334
|
+
log_config = {}
|
|
335
|
+
if ADVANCED_AVAILABLE and hasattr(self, 'config') and self.config:
|
|
336
|
+
log_config = self.config.get("logging", {})
|
|
337
|
+
|
|
338
|
+
# Default to WARNING level to keep console clean (users can override in config)
|
|
339
|
+
default_level = "WARNING"
|
|
340
|
+
|
|
341
|
+
if log_config.get("enabled", True):
|
|
342
|
+
# Only console logging (no file) - keep workspace clean
|
|
343
|
+
logging.basicConfig(
|
|
344
|
+
level=getattr(logging, log_config.get("level", default_level)),
|
|
345
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
346
|
+
handlers=[
|
|
347
|
+
logging.StreamHandler() # Console only
|
|
348
|
+
]
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
self.logger = logging.getLogger("MemAgent")
|
|
352
|
+
|
|
353
|
+
# Set default level for mem_llm loggers
|
|
354
|
+
logging.getLogger("mem_llm").setLevel(getattr(logging, log_config.get("level", default_level)))
|
|
355
|
+
|
|
356
|
+
def _setup_advanced_features(self, load_knowledge_base: bool) -> None:
|
|
357
|
+
"""Setup advanced features"""
|
|
358
|
+
# Load knowledge base (according to usage mode)
|
|
359
|
+
if load_knowledge_base:
|
|
360
|
+
kb_loader = KnowledgeLoader(self.memory)
|
|
361
|
+
|
|
362
|
+
# Get KB settings from config
|
|
363
|
+
if hasattr(self, 'config') and self.config:
|
|
364
|
+
kb_config = self.config.get("knowledge_base", {})
|
|
365
|
+
|
|
366
|
+
# Select default KB according to usage mode
|
|
367
|
+
if self.usage_mode == "business":
|
|
368
|
+
default_kb = kb_config.get("default_kb", "business_tech_support")
|
|
369
|
+
else: # personal
|
|
370
|
+
default_kb = kb_config.get("default_kb", "personal_learning")
|
|
371
|
+
|
|
372
|
+
try:
|
|
373
|
+
if default_kb == "ecommerce":
|
|
374
|
+
count = kb_loader.load_default_ecommerce_kb()
|
|
375
|
+
self.logger.info(f"E-commerce knowledge base loaded: {count} records")
|
|
376
|
+
self.has_knowledge_base = True # KB loaded!
|
|
377
|
+
elif default_kb == "tech_support":
|
|
378
|
+
count = kb_loader.load_default_tech_support_kb()
|
|
379
|
+
self.logger.info(f"Technical support knowledge base loaded: {count} records")
|
|
380
|
+
self.has_knowledge_base = True # KB loaded!
|
|
381
|
+
elif default_kb == "business_tech_support":
|
|
382
|
+
count = kb_loader.load_default_tech_support_kb()
|
|
383
|
+
self.logger.info(f"Corporate technical support knowledge base loaded: {count} records")
|
|
384
|
+
self.has_knowledge_base = True # KB loaded!
|
|
385
|
+
elif default_kb == "personal_learning":
|
|
386
|
+
# Simple KB for personal learning
|
|
387
|
+
count = kb_loader.load_default_ecommerce_kb() # Temporarily use the same KB
|
|
388
|
+
self.logger.info(f"Personal learning knowledge base loaded: {count} records")
|
|
389
|
+
self.has_knowledge_base = True # KB loaded!
|
|
390
|
+
except Exception as e:
|
|
391
|
+
self.logger.error(f"Knowledge base loading error: {e}")
|
|
392
|
+
self.has_knowledge_base = False
|
|
393
|
+
|
|
394
|
+
# Build dynamic system prompt based on active features
|
|
395
|
+
self._build_dynamic_system_prompt()
|
|
396
|
+
|
|
397
|
+
def _build_dynamic_system_prompt(self) -> None:
|
|
398
|
+
"""Build dynamic system prompt based on active features"""
|
|
399
|
+
if not ADVANCED_AVAILABLE:
|
|
400
|
+
# Fallback simple prompt
|
|
401
|
+
self.current_system_prompt = "You are a helpful AI assistant."
|
|
402
|
+
return
|
|
403
|
+
|
|
404
|
+
# Get config data
|
|
405
|
+
business_config = None
|
|
406
|
+
personal_config = None
|
|
407
|
+
|
|
408
|
+
if hasattr(self, 'config') and self.config:
|
|
409
|
+
if self.usage_mode == "business":
|
|
410
|
+
business_config = self.config.get("business", {})
|
|
411
|
+
else:
|
|
412
|
+
personal_config = self.config.get("personal", {})
|
|
413
|
+
|
|
414
|
+
# Check if tools are enabled (future feature)
|
|
415
|
+
# For now, tools are always available but not advertised in prompt
|
|
416
|
+
# self.has_tools = False # Will be enabled when tool system is ready
|
|
417
|
+
|
|
418
|
+
# Build prompt using dynamic builder
|
|
419
|
+
try:
|
|
420
|
+
self.current_system_prompt = dynamic_prompt_builder.build_prompt(
|
|
421
|
+
usage_mode=self.usage_mode,
|
|
422
|
+
has_knowledge_base=self.has_knowledge_base,
|
|
423
|
+
has_tools=self.enable_tools, # Now advertised when enabled (v2.0+)
|
|
424
|
+
is_multi_user=False, # Always False for now, per-session state
|
|
425
|
+
business_config=business_config,
|
|
426
|
+
personal_config=personal_config,
|
|
427
|
+
memory_type="sql" if self.use_sql else "json"
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
# Add tool information to prompt if tools are enabled (v2.0+)
|
|
431
|
+
if self.enable_tools and self.tool_registry:
|
|
432
|
+
tools_list = self.tool_registry.list_tools()
|
|
433
|
+
tools_prompt = format_tools_for_prompt(tools_list)
|
|
434
|
+
self.current_system_prompt += f"\n\n{tools_prompt}"
|
|
435
|
+
|
|
436
|
+
# Log feature summary
|
|
437
|
+
feature_summary = dynamic_prompt_builder.get_feature_summary(
|
|
438
|
+
has_knowledge_base=self.has_knowledge_base,
|
|
439
|
+
has_tools=self.enable_tools,
|
|
440
|
+
is_multi_user=False,
|
|
441
|
+
memory_type="sql" if self.use_sql else "json"
|
|
442
|
+
)
|
|
443
|
+
self.logger.info(f"Dynamic prompt built: {feature_summary}")
|
|
444
|
+
|
|
445
|
+
except Exception as e:
|
|
446
|
+
self.logger.error(f"Dynamic prompt building error: {e}")
|
|
447
|
+
# Fallback
|
|
448
|
+
self.current_system_prompt = "You are a helpful AI assistant."
|
|
449
|
+
|
|
450
|
+
def check_setup(self) -> Dict[str, Any]:
|
|
451
|
+
"""Check system setup"""
|
|
452
|
+
ollama_running = self.llm.check_connection()
|
|
453
|
+
models = self.llm.list_models()
|
|
454
|
+
model_exists = self.llm.model in models
|
|
455
|
+
|
|
456
|
+
# Memory statistics
|
|
457
|
+
try:
|
|
458
|
+
if hasattr(self.memory, 'get_statistics'):
|
|
459
|
+
stats = self.memory.get_statistics()
|
|
460
|
+
else:
|
|
461
|
+
# Simple statistics for JSON memory
|
|
462
|
+
stats = {
|
|
463
|
+
"total_users": 0,
|
|
464
|
+
"total_interactions": 0,
|
|
465
|
+
"knowledge_base_entries": 0
|
|
466
|
+
}
|
|
467
|
+
except Exception:
|
|
468
|
+
stats = {
|
|
469
|
+
"total_users": 0,
|
|
470
|
+
"total_interactions": 0,
|
|
471
|
+
"knowledge_base_entries": 0
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
return {
|
|
475
|
+
"ollama_running": ollama_running,
|
|
476
|
+
"available_models": models,
|
|
477
|
+
"target_model": self.llm.model,
|
|
478
|
+
"model_ready": model_exists,
|
|
479
|
+
"memory_backend": "SQL" if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager) else "JSON",
|
|
480
|
+
"total_users": stats.get('total_users', 0),
|
|
481
|
+
"total_interactions": stats.get('total_interactions', 0),
|
|
482
|
+
"kb_entries": stats.get('knowledge_base_entries', 0),
|
|
483
|
+
"status": "ready" if (ollama_running and model_exists) else "not_ready"
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
def set_user(self, user_id: str, name: Optional[str] = None) -> None:
|
|
487
|
+
"""
|
|
488
|
+
Set active user
|
|
489
|
+
|
|
490
|
+
Args:
|
|
491
|
+
user_id: User ID
|
|
492
|
+
name: User name (optional)
|
|
493
|
+
"""
|
|
494
|
+
self.current_user = user_id
|
|
495
|
+
|
|
496
|
+
# Add user for SQL memory
|
|
497
|
+
if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager):
|
|
498
|
+
self.memory.add_user(user_id, name)
|
|
499
|
+
|
|
500
|
+
# Update user name (if provided)
|
|
501
|
+
if name:
|
|
502
|
+
if hasattr(self.memory, 'update_user_profile'):
|
|
503
|
+
self.memory.update_user_profile(user_id, {"name": name})
|
|
504
|
+
|
|
505
|
+
self.logger.debug(f"Active user set: {user_id}")
|
|
506
|
+
|
|
507
|
+
def _execute_tool_calls(self, response_text: str, max_iterations: int = 3) -> str:
|
|
508
|
+
"""
|
|
509
|
+
Execute tool calls found in LLM response and get results.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
response_text: LLM response that may contain tool calls
|
|
513
|
+
max_iterations: Maximum number of tool execution iterations
|
|
514
|
+
|
|
515
|
+
Returns:
|
|
516
|
+
Final response after all tool executions
|
|
517
|
+
"""
|
|
518
|
+
iteration = 0
|
|
519
|
+
current_text = response_text
|
|
520
|
+
|
|
521
|
+
while iteration < max_iterations:
|
|
522
|
+
# Check if response contains tool calls
|
|
523
|
+
if not ToolCallParser.has_tool_call(current_text):
|
|
524
|
+
break
|
|
525
|
+
|
|
526
|
+
# Parse tool calls
|
|
527
|
+
tool_calls = ToolCallParser.parse(current_text)
|
|
528
|
+
if not tool_calls:
|
|
529
|
+
break
|
|
530
|
+
|
|
531
|
+
self.logger.info(f"🔧 Detected {len(tool_calls)} tool call(s)")
|
|
532
|
+
|
|
533
|
+
# Execute each tool
|
|
534
|
+
tool_results = []
|
|
535
|
+
for call in tool_calls:
|
|
536
|
+
tool_name = call["tool"]
|
|
537
|
+
arguments = call["arguments"]
|
|
538
|
+
|
|
539
|
+
self.logger.info(f" Executing: {tool_name}({arguments})")
|
|
540
|
+
|
|
541
|
+
# Execute tool
|
|
542
|
+
result = self.tool_registry.execute(tool_name, **arguments)
|
|
543
|
+
|
|
544
|
+
# Handle memory-specific tools
|
|
545
|
+
if result.status.value == "success" and isinstance(result.result, str):
|
|
546
|
+
if result.result.startswith("MEMORY_SEARCH:"):
|
|
547
|
+
keyword = result.result.split(":", 1)[1]
|
|
548
|
+
try:
|
|
549
|
+
search_results = self.memory_manager.search_conversations(keyword)
|
|
550
|
+
if search_results:
|
|
551
|
+
formatted = f"Found {len(search_results)} results for '{keyword}':\n"
|
|
552
|
+
for idx, conv in enumerate(search_results[:5], 1):
|
|
553
|
+
formatted += f"{idx}. {conv.get('user', 'N/A')}: {conv.get('message', 'N/A')[:100]}...\n"
|
|
554
|
+
result.result = formatted
|
|
555
|
+
else:
|
|
556
|
+
result.result = f"No conversations found containing '{keyword}'"
|
|
557
|
+
except Exception as e:
|
|
558
|
+
result.result = f"Memory search error: {e}"
|
|
559
|
+
|
|
560
|
+
elif result.result == "MEMORY_USER_INFO":
|
|
561
|
+
try:
|
|
562
|
+
user_info = f"Current user: {self.current_user or 'Not set'}"
|
|
563
|
+
if self.current_user:
|
|
564
|
+
conv_count = len(self.memory_manager.get_conversation_history(self.current_user))
|
|
565
|
+
user_info += f"\nTotal conversations: {conv_count}"
|
|
566
|
+
result.result = user_info
|
|
567
|
+
except Exception as e:
|
|
568
|
+
result.result = f"User info error: {e}"
|
|
569
|
+
|
|
570
|
+
elif result.result.startswith("MEMORY_LIST_CONVERSATIONS:"):
|
|
571
|
+
try:
|
|
572
|
+
limit = int(result.result.split(":", 1)[1])
|
|
573
|
+
history = self.memory_manager.get_conversation_history(self.current_user or "default", limit=limit)
|
|
574
|
+
if history:
|
|
575
|
+
formatted = f"Last {len(history)} conversations:\n"
|
|
576
|
+
for idx, conv in enumerate(history, 1):
|
|
577
|
+
role = conv.get('role', 'unknown')
|
|
578
|
+
msg = conv.get('content', '')[:80]
|
|
579
|
+
formatted += f"{idx}. [{role}] {msg}...\n"
|
|
580
|
+
result.result = formatted
|
|
581
|
+
else:
|
|
582
|
+
result.result = "No conversation history found"
|
|
583
|
+
except Exception as e:
|
|
584
|
+
result.result = f"Conversation list error: {e}"
|
|
585
|
+
|
|
586
|
+
if result.status.value == "success": # Compare with enum value
|
|
587
|
+
self.logger.info(f" ✅ Success: {result.result}")
|
|
588
|
+
tool_results.append(f"Tool '{tool_name}' returned: {result.result}")
|
|
589
|
+
else:
|
|
590
|
+
self.logger.warning(f" ❌ Error: {result.error}")
|
|
591
|
+
tool_results.append(f"Tool '{tool_name}' failed with error: {result.error}")
|
|
592
|
+
|
|
593
|
+
# Remove tool call syntax from response
|
|
594
|
+
clean_text = ToolCallParser.remove_tool_calls(current_text)
|
|
595
|
+
|
|
596
|
+
# If we have tool results, ask LLM to continue with the results
|
|
597
|
+
if tool_results:
|
|
598
|
+
results_text = "\n".join(tool_results)
|
|
599
|
+
|
|
600
|
+
# Build follow-up message for LLM
|
|
601
|
+
follow_up = f"{clean_text}\n\nTool Results:\n{results_text}\n\nPlease provide the final answer to the user based on these results."
|
|
602
|
+
|
|
603
|
+
# Get LLM response with tool results
|
|
604
|
+
try:
|
|
605
|
+
messages = [
|
|
606
|
+
{"role": "system", "content": "You are a helpful assistant. Use the tool results to answer the user's question."},
|
|
607
|
+
{"role": "user", "content": follow_up}
|
|
608
|
+
]
|
|
609
|
+
|
|
610
|
+
llm_response = self.llm.chat(
|
|
611
|
+
messages=messages,
|
|
612
|
+
temperature=0.7,
|
|
613
|
+
max_tokens=500
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
current_text = llm_response
|
|
617
|
+
iteration += 1
|
|
618
|
+
except Exception as e:
|
|
619
|
+
self.logger.error(f"Error getting follow-up response: {e}")
|
|
620
|
+
# Return what we have
|
|
621
|
+
return f"{clean_text}\n\n{results_text}"
|
|
622
|
+
else:
|
|
623
|
+
# No tool results, return clean text
|
|
624
|
+
return clean_text
|
|
625
|
+
|
|
626
|
+
return current_text
|
|
627
|
+
|
|
628
|
+
def chat(self, message: str, user_id: Optional[str] = None,
|
|
629
|
+
metadata: Optional[Dict] = None, return_metrics: bool = False) -> Union[str, ChatResponse]:
|
|
630
|
+
"""
|
|
631
|
+
Chat with user
|
|
632
|
+
|
|
633
|
+
Args:
|
|
634
|
+
message: User's message
|
|
635
|
+
user_id: User ID (optional)
|
|
636
|
+
metadata: Additional information
|
|
637
|
+
return_metrics: If True, returns ChatResponse with metrics; if False, returns only text (default)
|
|
638
|
+
|
|
639
|
+
Returns:
|
|
640
|
+
Bot's response (str) or ChatResponse object with metrics
|
|
641
|
+
"""
|
|
642
|
+
# Start timing
|
|
643
|
+
start_time = time.time()
|
|
644
|
+
# Determine user
|
|
645
|
+
if user_id:
|
|
646
|
+
self.set_user(user_id)
|
|
647
|
+
elif not self.current_user:
|
|
648
|
+
error_response = "Error: User ID not specified."
|
|
649
|
+
if return_metrics:
|
|
650
|
+
return ChatResponse(
|
|
651
|
+
text=error_response,
|
|
652
|
+
confidence=1.0,
|
|
653
|
+
source="tool",
|
|
654
|
+
latency=(time.time() - start_time) * 1000,
|
|
655
|
+
timestamp=datetime.now(),
|
|
656
|
+
kb_results_count=0,
|
|
657
|
+
metadata={"error": True}
|
|
658
|
+
)
|
|
659
|
+
return error_response
|
|
660
|
+
|
|
661
|
+
user_id = self.current_user
|
|
662
|
+
|
|
663
|
+
# Initialize tracking variables
|
|
664
|
+
kb_results_count = 0
|
|
665
|
+
used_kb = False
|
|
666
|
+
used_memory = False
|
|
667
|
+
response_source = "model" # Default source
|
|
668
|
+
|
|
669
|
+
# Security check (v1.1.0+) - opt-in
|
|
670
|
+
security_info = {}
|
|
671
|
+
if self.enable_security and self.security_detector and self.security_sanitizer:
|
|
672
|
+
# Detect injection attempts
|
|
673
|
+
risk_level = self.security_detector.get_risk_level(message)
|
|
674
|
+
is_suspicious, patterns = self.security_detector.detect(message)
|
|
675
|
+
|
|
676
|
+
if risk_level in ["high", "critical"]:
|
|
677
|
+
self.logger.warning(f"🚨 Blocked {risk_level} risk input from {user_id}: {len(patterns)} patterns detected")
|
|
678
|
+
return f"⚠️ Your message was blocked due to security concerns. Please rephrase your request."
|
|
679
|
+
|
|
680
|
+
if is_suspicious:
|
|
681
|
+
self.logger.info(f"⚠️ Suspicious input from {user_id} (risk: {risk_level}): {len(patterns)} patterns")
|
|
682
|
+
|
|
683
|
+
# Sanitize input
|
|
684
|
+
original_message = message
|
|
685
|
+
message = self.security_sanitizer.sanitize(message, aggressive=(risk_level == "medium"))
|
|
686
|
+
|
|
687
|
+
if message != original_message:
|
|
688
|
+
self.logger.debug(f"Input sanitized for {user_id}")
|
|
689
|
+
|
|
690
|
+
security_info = {
|
|
691
|
+
"risk_level": risk_level,
|
|
692
|
+
"sanitized": message != original_message,
|
|
693
|
+
"patterns_detected": len(patterns)
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
# Check tool commands first
|
|
697
|
+
tool_result = self.tool_executor.execute_user_command(message, user_id)
|
|
698
|
+
if tool_result:
|
|
699
|
+
latency = (time.time() - start_time) * 1000
|
|
700
|
+
if return_metrics:
|
|
701
|
+
return ChatResponse(
|
|
702
|
+
text=tool_result,
|
|
703
|
+
confidence=0.95, # Tools are deterministic
|
|
704
|
+
source="tool",
|
|
705
|
+
latency=latency,
|
|
706
|
+
timestamp=datetime.now(),
|
|
707
|
+
kb_results_count=0,
|
|
708
|
+
metadata={"tool_command": True}
|
|
709
|
+
)
|
|
710
|
+
return tool_result
|
|
711
|
+
|
|
712
|
+
# Knowledge base search (if using SQL)
|
|
713
|
+
kb_context = ""
|
|
714
|
+
if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager):
|
|
715
|
+
# Check config only if it exists, otherwise always use KB
|
|
716
|
+
use_kb = True
|
|
717
|
+
kb_limit = 5
|
|
718
|
+
|
|
719
|
+
if hasattr(self, 'config') and self.config:
|
|
720
|
+
use_kb = self.config.get("response.use_knowledge_base", True)
|
|
721
|
+
kb_limit = self.config.get("knowledge_base.search_limit", 5)
|
|
722
|
+
|
|
723
|
+
if use_kb:
|
|
724
|
+
try:
|
|
725
|
+
kb_results = self.memory.search_knowledge(query=message, limit=kb_limit)
|
|
726
|
+
|
|
727
|
+
if kb_results:
|
|
728
|
+
kb_results_count = len(kb_results)
|
|
729
|
+
used_kb = True
|
|
730
|
+
kb_context = "\n\n📚 RELEVANT KNOWLEDGE BASE:\n"
|
|
731
|
+
for i, result in enumerate(kb_results, 1):
|
|
732
|
+
kb_context += f"{i}. Q: {result['question']}\n A: {result['answer']}\n"
|
|
733
|
+
kb_context += "\n⚠️ USE THIS INFORMATION TO ANSWER! Be brief but accurate.\n"
|
|
734
|
+
except Exception as e:
|
|
735
|
+
self.logger.error(f"Knowledge base search error: {e}")
|
|
736
|
+
|
|
737
|
+
# Get conversation history
|
|
738
|
+
messages = []
|
|
739
|
+
if self.current_system_prompt:
|
|
740
|
+
messages.append({"role": "system", "content": self.current_system_prompt})
|
|
741
|
+
|
|
742
|
+
# Add memory history
|
|
743
|
+
try:
|
|
744
|
+
if hasattr(self.memory, 'get_recent_conversations'):
|
|
745
|
+
recent_limit = self.config.get("response.recent_conversations_limit", 5) if hasattr(self, 'config') and self.config else 5
|
|
746
|
+
recent_convs = self.memory.get_recent_conversations(user_id, recent_limit)
|
|
747
|
+
|
|
748
|
+
if recent_convs:
|
|
749
|
+
used_memory = True
|
|
750
|
+
|
|
751
|
+
# Add conversations in chronological order (oldest first)
|
|
752
|
+
for conv in recent_convs:
|
|
753
|
+
messages.append({"role": "user", "content": conv.get('user_message', '')})
|
|
754
|
+
messages.append({"role": "assistant", "content": conv.get('bot_response', '')})
|
|
755
|
+
except Exception as e:
|
|
756
|
+
self.logger.error(f"Memory history loading error: {e}")
|
|
757
|
+
|
|
758
|
+
# Add current message WITH knowledge base context (if available)
|
|
759
|
+
final_message = message
|
|
760
|
+
if kb_context:
|
|
761
|
+
# Inject KB directly into user message for maximum visibility
|
|
762
|
+
final_message = f"{kb_context}\n\nUser Question: {message}"
|
|
763
|
+
|
|
764
|
+
messages.append({"role": "user", "content": final_message})
|
|
765
|
+
|
|
766
|
+
# Get response from LLM
|
|
767
|
+
temperature = self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2
|
|
768
|
+
try:
|
|
769
|
+
response = self.llm.chat(
|
|
770
|
+
messages=messages,
|
|
771
|
+
temperature=temperature,
|
|
772
|
+
max_tokens=self.config.get("llm.max_tokens", 2000) if hasattr(self, 'config') and self.config else 2000 # Enough tokens for thinking models
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
# Fallback: If response is empty (can happen with thinking models)
|
|
776
|
+
if not response or response.strip() == "":
|
|
777
|
+
self.logger.warning(f"Empty response from model {self.llm.model}, retrying with simpler prompt...")
|
|
778
|
+
|
|
779
|
+
# Retry with just the current message, no history
|
|
780
|
+
simple_messages = [
|
|
781
|
+
{"role": "system", "content": "You are a helpful assistant. Respond directly and concisely."},
|
|
782
|
+
{"role": "user", "content": message}
|
|
783
|
+
]
|
|
784
|
+
response = self.llm.chat(simple_messages, temperature=0.7, max_tokens=2000)
|
|
785
|
+
|
|
786
|
+
# If still empty, provide fallback
|
|
787
|
+
if not response or response.strip() == "":
|
|
788
|
+
response = "I'm having trouble responding right now. Could you rephrase your question?"
|
|
789
|
+
self.logger.error(f"Model {self.llm.model} returned empty response even after retry")
|
|
790
|
+
|
|
791
|
+
except Exception as e:
|
|
792
|
+
self.logger.error(f"LLM response error: {e}")
|
|
793
|
+
response = "Sorry, I cannot respond right now. Please try again later."
|
|
794
|
+
|
|
795
|
+
# Execute tool calls if tools are enabled (v2.0+)
|
|
796
|
+
if self.enable_tools and self.tool_registry and response:
|
|
797
|
+
try:
|
|
798
|
+
response = self._execute_tool_calls(response)
|
|
799
|
+
except Exception as e:
|
|
800
|
+
self.logger.error(f"Tool execution error: {e}")
|
|
801
|
+
# Continue with original response
|
|
802
|
+
|
|
803
|
+
# Calculate latency
|
|
804
|
+
latency = (time.time() - start_time) * 1000
|
|
805
|
+
|
|
806
|
+
# Determine response source
|
|
807
|
+
if used_kb and used_memory:
|
|
808
|
+
response_source = "hybrid"
|
|
809
|
+
elif used_kb:
|
|
810
|
+
response_source = "knowledge_base"
|
|
811
|
+
else:
|
|
812
|
+
response_source = "model"
|
|
813
|
+
|
|
814
|
+
# Calculate confidence score
|
|
815
|
+
confidence = calculate_confidence(
|
|
816
|
+
kb_results_count=kb_results_count,
|
|
817
|
+
temperature=temperature,
|
|
818
|
+
used_memory=used_memory,
|
|
819
|
+
response_length=len(response)
|
|
820
|
+
)
|
|
821
|
+
|
|
822
|
+
# Build enriched metadata with response metrics
|
|
823
|
+
enriched_metadata = {}
|
|
824
|
+
if metadata:
|
|
825
|
+
enriched_metadata.update(metadata)
|
|
826
|
+
enriched_metadata.update({
|
|
827
|
+
"confidence": round(confidence, 3),
|
|
828
|
+
"source": response_source,
|
|
829
|
+
"latency_ms": round(latency, 1),
|
|
830
|
+
"kb_results_count": kb_results_count,
|
|
831
|
+
"used_memory": used_memory,
|
|
832
|
+
"used_kb": used_kb,
|
|
833
|
+
"response_length": len(response),
|
|
834
|
+
"model": self.model,
|
|
835
|
+
"temperature": temperature
|
|
836
|
+
})
|
|
837
|
+
|
|
838
|
+
# Save interaction
|
|
839
|
+
try:
|
|
840
|
+
if hasattr(self.memory, 'add_interaction'):
|
|
841
|
+
self.memory.add_interaction(
|
|
842
|
+
user_id=user_id,
|
|
843
|
+
user_message=message,
|
|
844
|
+
bot_response=response,
|
|
845
|
+
metadata=enriched_metadata
|
|
846
|
+
)
|
|
847
|
+
|
|
848
|
+
# Extract and save user info to profile
|
|
849
|
+
self._update_user_profile(user_id, message, response)
|
|
850
|
+
|
|
851
|
+
# Always update summary after each conversation (JSON mode)
|
|
852
|
+
if not self.use_sql and hasattr(self.memory, 'conversations'):
|
|
853
|
+
self._update_conversation_summary(user_id)
|
|
854
|
+
# Save summary update
|
|
855
|
+
if user_id in self.memory.user_profiles:
|
|
856
|
+
self.memory.save_memory(user_id)
|
|
857
|
+
except Exception as e:
|
|
858
|
+
self.logger.error(f"Interaction saving error: {e}")
|
|
859
|
+
|
|
860
|
+
# Create response metrics object
|
|
861
|
+
chat_response = ChatResponse(
|
|
862
|
+
text=response,
|
|
863
|
+
confidence=confidence,
|
|
864
|
+
source=response_source,
|
|
865
|
+
latency=latency,
|
|
866
|
+
timestamp=datetime.now(),
|
|
867
|
+
kb_results_count=kb_results_count,
|
|
868
|
+
metadata={
|
|
869
|
+
"model": self.model,
|
|
870
|
+
"temperature": temperature,
|
|
871
|
+
"used_memory": used_memory,
|
|
872
|
+
"used_kb": used_kb,
|
|
873
|
+
"user_id": user_id
|
|
874
|
+
}
|
|
875
|
+
)
|
|
876
|
+
|
|
877
|
+
# Track metrics if enabled
|
|
878
|
+
if self.track_metrics:
|
|
879
|
+
self.metrics_analyzer.add_metric(chat_response)
|
|
880
|
+
|
|
881
|
+
# Return based on user preference
|
|
882
|
+
if return_metrics:
|
|
883
|
+
return chat_response
|
|
884
|
+
else:
|
|
885
|
+
return response
|
|
886
|
+
|
|
887
|
+
def chat_stream(self, message: str, user_id: Optional[str] = None, metadata: Optional[Dict] = None) -> Iterator[str]:
|
|
888
|
+
"""
|
|
889
|
+
Chat with user using streaming response (real-time)
|
|
890
|
+
|
|
891
|
+
This method streams the response as it's generated, providing a better UX
|
|
892
|
+
for longer responses (like ChatGPT's typing effect).
|
|
893
|
+
|
|
894
|
+
Args:
|
|
895
|
+
message: User's message
|
|
896
|
+
user_id: User ID (optional)
|
|
897
|
+
metadata: Additional information
|
|
898
|
+
|
|
899
|
+
Yields:
|
|
900
|
+
Response text chunks as they arrive from the LLM
|
|
901
|
+
|
|
902
|
+
Example:
|
|
903
|
+
>>> agent = MemAgent()
|
|
904
|
+
>>> agent.set_user("alice")
|
|
905
|
+
>>> for chunk in agent.chat_stream("Python nedir?"):
|
|
906
|
+
... print(chunk, end='', flush=True)
|
|
907
|
+
Python bir programlama dilidir...
|
|
908
|
+
"""
|
|
909
|
+
# Start timing
|
|
910
|
+
start_time = time.time()
|
|
911
|
+
|
|
912
|
+
# Determine user
|
|
913
|
+
if user_id:
|
|
914
|
+
self.set_user(user_id)
|
|
915
|
+
elif not self.current_user:
|
|
916
|
+
yield "Error: User ID not specified."
|
|
917
|
+
return
|
|
918
|
+
|
|
919
|
+
user_id = self.current_user
|
|
920
|
+
|
|
921
|
+
# Initialize tracking variables
|
|
922
|
+
kb_results_count = 0
|
|
923
|
+
used_kb = False
|
|
924
|
+
used_memory = False
|
|
925
|
+
|
|
926
|
+
# Security check (v1.1.0+) - opt-in
|
|
927
|
+
if self.enable_security and self.security_detector and self.security_sanitizer:
|
|
928
|
+
risk_level = self.security_detector.get_risk_level(message)
|
|
929
|
+
is_suspicious, patterns = self.security_detector.detect(message)
|
|
930
|
+
|
|
931
|
+
if risk_level in ["high", "critical"]:
|
|
932
|
+
self.logger.warning(f"🚨 Blocked {risk_level} risk input from {user_id}")
|
|
933
|
+
yield f"⚠️ Your message was blocked due to security concerns. Please rephrase your request."
|
|
934
|
+
return
|
|
935
|
+
|
|
936
|
+
# Sanitize input
|
|
937
|
+
message = self.security_sanitizer.sanitize(message, aggressive=(risk_level == "medium"))
|
|
938
|
+
|
|
939
|
+
# Check tool commands first
|
|
940
|
+
tool_result = self.tool_executor.execute_user_command(message, user_id)
|
|
941
|
+
if tool_result:
|
|
942
|
+
yield tool_result
|
|
943
|
+
return
|
|
944
|
+
|
|
945
|
+
# Knowledge base search (if using SQL)
|
|
946
|
+
kb_context = ""
|
|
947
|
+
if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager):
|
|
948
|
+
use_kb = True
|
|
949
|
+
kb_limit = 5
|
|
950
|
+
|
|
951
|
+
if hasattr(self, 'config') and self.config:
|
|
952
|
+
use_kb = self.config.get("response.use_knowledge_base", True)
|
|
953
|
+
kb_limit = self.config.get("knowledge_base.search_limit", 5)
|
|
954
|
+
|
|
955
|
+
if use_kb:
|
|
956
|
+
try:
|
|
957
|
+
kb_results = self.memory.search_knowledge(query=message, limit=kb_limit)
|
|
958
|
+
|
|
959
|
+
if kb_results:
|
|
960
|
+
kb_results_count = len(kb_results)
|
|
961
|
+
used_kb = True
|
|
962
|
+
kb_context = "\n\n📚 RELEVANT KNOWLEDGE BASE:\n"
|
|
963
|
+
for i, result in enumerate(kb_results, 1):
|
|
964
|
+
kb_context += f"{i}. Q: {result['question']}\n A: {result['answer']}\n"
|
|
965
|
+
kb_context += "\n⚠️ USE THIS INFORMATION TO ANSWER! Be brief but accurate.\n"
|
|
966
|
+
except Exception as e:
|
|
967
|
+
self.logger.error(f"Knowledge base search error: {e}")
|
|
968
|
+
|
|
969
|
+
# Get conversation history
|
|
970
|
+
messages = []
|
|
971
|
+
if self.current_system_prompt:
|
|
972
|
+
messages.append({"role": "system", "content": self.current_system_prompt})
|
|
973
|
+
|
|
974
|
+
# Add memory history
|
|
975
|
+
try:
|
|
976
|
+
if hasattr(self.memory, 'get_recent_conversations'):
|
|
977
|
+
recent_limit = self.config.get("response.recent_conversations_limit", 5) if hasattr(self, 'config') and self.config else 5
|
|
978
|
+
recent_convs = self.memory.get_recent_conversations(user_id, recent_limit)
|
|
979
|
+
|
|
980
|
+
if recent_convs:
|
|
981
|
+
used_memory = True
|
|
982
|
+
|
|
983
|
+
# Add conversations in chronological order
|
|
984
|
+
for conv in recent_convs:
|
|
985
|
+
messages.append({"role": "user", "content": conv.get('user_message', '')})
|
|
986
|
+
messages.append({"role": "assistant", "content": conv.get('bot_response', '')})
|
|
987
|
+
except Exception as e:
|
|
988
|
+
self.logger.error(f"Memory history loading error: {e}")
|
|
989
|
+
|
|
990
|
+
# Add current message WITH knowledge base context (if available)
|
|
991
|
+
final_message = message
|
|
992
|
+
if kb_context:
|
|
993
|
+
final_message = f"{kb_context}\n\nUser Question: {message}"
|
|
994
|
+
|
|
995
|
+
messages.append({"role": "user", "content": final_message})
|
|
996
|
+
|
|
997
|
+
# Get streaming response from LLM
|
|
998
|
+
temperature = self.config.get("llm.temperature", 0.2) if hasattr(self, 'config') and self.config else 0.2
|
|
999
|
+
max_tokens = self.config.get("llm.max_tokens", 2000) if hasattr(self, 'config') and self.config else 2000
|
|
1000
|
+
|
|
1001
|
+
# Collect full response for saving
|
|
1002
|
+
full_response = ""
|
|
1003
|
+
|
|
1004
|
+
try:
|
|
1005
|
+
# Stream chunks from LLM
|
|
1006
|
+
for chunk in self.llm.chat_stream(
|
|
1007
|
+
messages=messages,
|
|
1008
|
+
temperature=temperature,
|
|
1009
|
+
max_tokens=max_tokens
|
|
1010
|
+
):
|
|
1011
|
+
full_response += chunk
|
|
1012
|
+
yield chunk
|
|
1013
|
+
|
|
1014
|
+
except Exception as e:
|
|
1015
|
+
error_msg = f"Streaming error: {str(e)}"
|
|
1016
|
+
self.logger.error(error_msg)
|
|
1017
|
+
yield f"\n\n⚠️ {error_msg}"
|
|
1018
|
+
return
|
|
1019
|
+
|
|
1020
|
+
# Calculate latency
|
|
1021
|
+
latency = (time.time() - start_time) * 1000
|
|
1022
|
+
|
|
1023
|
+
# Determine response source
|
|
1024
|
+
response_source = "model"
|
|
1025
|
+
if used_memory and used_kb:
|
|
1026
|
+
response_source = "hybrid"
|
|
1027
|
+
elif used_kb:
|
|
1028
|
+
response_source = "knowledge_base"
|
|
1029
|
+
|
|
1030
|
+
# Calculate confidence
|
|
1031
|
+
confidence = calculate_confidence(
|
|
1032
|
+
kb_results_count=kb_results_count,
|
|
1033
|
+
temperature=temperature,
|
|
1034
|
+
used_memory=used_memory,
|
|
1035
|
+
response_length=len(full_response)
|
|
1036
|
+
)
|
|
1037
|
+
|
|
1038
|
+
# Build enriched metadata
|
|
1039
|
+
enriched_metadata = {}
|
|
1040
|
+
if metadata:
|
|
1041
|
+
enriched_metadata.update(metadata)
|
|
1042
|
+
enriched_metadata.update({
|
|
1043
|
+
"confidence": round(confidence, 3),
|
|
1044
|
+
"source": response_source,
|
|
1045
|
+
"latency_ms": round(latency, 1),
|
|
1046
|
+
"kb_results_count": kb_results_count,
|
|
1047
|
+
"used_memory": used_memory,
|
|
1048
|
+
"used_kb": used_kb,
|
|
1049
|
+
"response_length": len(full_response),
|
|
1050
|
+
"model": self.model,
|
|
1051
|
+
"temperature": temperature,
|
|
1052
|
+
"streaming": True
|
|
1053
|
+
})
|
|
1054
|
+
|
|
1055
|
+
# Save interaction
|
|
1056
|
+
try:
|
|
1057
|
+
if hasattr(self.memory, 'add_interaction'):
|
|
1058
|
+
self.memory.add_interaction(
|
|
1059
|
+
user_id=user_id,
|
|
1060
|
+
user_message=message,
|
|
1061
|
+
bot_response=full_response,
|
|
1062
|
+
metadata=enriched_metadata
|
|
1063
|
+
)
|
|
1064
|
+
|
|
1065
|
+
# Extract and save user info to profile
|
|
1066
|
+
self._update_user_profile(user_id, message, full_response)
|
|
1067
|
+
|
|
1068
|
+
# Update summary (JSON mode)
|
|
1069
|
+
if not self.use_sql and hasattr(self.memory, 'conversations'):
|
|
1070
|
+
self._update_conversation_summary(user_id)
|
|
1071
|
+
if user_id in self.memory.user_profiles:
|
|
1072
|
+
self.memory.save_memory(user_id)
|
|
1073
|
+
except Exception as e:
|
|
1074
|
+
self.logger.error(f"Interaction saving error: {e}")
|
|
1075
|
+
|
|
1076
|
+
# Track metrics if enabled
|
|
1077
|
+
if self.track_metrics:
|
|
1078
|
+
chat_response = ChatResponse(
|
|
1079
|
+
text=full_response,
|
|
1080
|
+
confidence=confidence,
|
|
1081
|
+
source=response_source,
|
|
1082
|
+
latency=latency,
|
|
1083
|
+
timestamp=datetime.now(),
|
|
1084
|
+
kb_results_count=kb_results_count,
|
|
1085
|
+
metadata={
|
|
1086
|
+
"model": self.model,
|
|
1087
|
+
"temperature": temperature,
|
|
1088
|
+
"used_memory": used_memory,
|
|
1089
|
+
"used_kb": used_kb,
|
|
1090
|
+
"user_id": user_id,
|
|
1091
|
+
"streaming": True
|
|
1092
|
+
}
|
|
1093
|
+
)
|
|
1094
|
+
self.metrics_analyzer.add_metric(chat_response)
|
|
1095
|
+
|
|
1096
|
+
def _update_user_profile(self, user_id: str, message: str, response: str):
|
|
1097
|
+
"""Extract user info from conversation and update profile"""
|
|
1098
|
+
msg_lower = message.lower()
|
|
1099
|
+
|
|
1100
|
+
# Extract information
|
|
1101
|
+
extracted = {}
|
|
1102
|
+
|
|
1103
|
+
# Extract name
|
|
1104
|
+
if "my name is" in msg_lower or "i am" in msg_lower or "i'm" in msg_lower or "adım" in msg_lower or "ismim" in msg_lower:
|
|
1105
|
+
for phrase in ["my name is ", "i am ", "i'm ", "adım ", "ismim ", "benim adım "]:
|
|
1106
|
+
if phrase in msg_lower:
|
|
1107
|
+
name_part = message[msg_lower.index(phrase) + len(phrase):].strip()
|
|
1108
|
+
name = name_part.split()[0] if name_part else None
|
|
1109
|
+
if name and len(name) > 1:
|
|
1110
|
+
extracted['name'] = name.strip('.,!?')
|
|
1111
|
+
break
|
|
1112
|
+
|
|
1113
|
+
# Extract favorite food
|
|
1114
|
+
if "favorite food" in msg_lower or "favourite food" in msg_lower or "sevdiğim yemek" in msg_lower or "en sevdiğim" in msg_lower:
|
|
1115
|
+
if "is" in msg_lower or ":" in msg_lower:
|
|
1116
|
+
food = msg_lower.split("is")[-1].strip() if "is" in msg_lower else msg_lower.split(":")[-1].strip()
|
|
1117
|
+
food = food.strip('.,!?')
|
|
1118
|
+
if food and len(food) < 50:
|
|
1119
|
+
extracted['favorite_food'] = food
|
|
1120
|
+
|
|
1121
|
+
# Extract location
|
|
1122
|
+
if "i live in" in msg_lower or "i'm from" in msg_lower or "yaşıyorum" in msg_lower or "yaşadığım" in msg_lower:
|
|
1123
|
+
for phrase in ["i live in ", "i'm from ", "from ", "yaşıyorum", "yaşadığım yer", "yaşadığım şehir"]:
|
|
1124
|
+
if phrase in msg_lower:
|
|
1125
|
+
loc = message[msg_lower.index(phrase) + len(phrase):].strip()
|
|
1126
|
+
location = loc.split()[0] if loc else None
|
|
1127
|
+
if location and len(location) > 2:
|
|
1128
|
+
extracted['location'] = location.strip('.,!?')
|
|
1129
|
+
break
|
|
1130
|
+
|
|
1131
|
+
# Save updates
|
|
1132
|
+
if extracted:
|
|
1133
|
+
try:
|
|
1134
|
+
# SQL memory - store in preferences JSON
|
|
1135
|
+
if hasattr(self.memory, 'update_user_profile'):
|
|
1136
|
+
# Get current profile
|
|
1137
|
+
profile = self.memory.get_user_profile(user_id) or {}
|
|
1138
|
+
|
|
1139
|
+
# Update name directly if extracted
|
|
1140
|
+
updates = {}
|
|
1141
|
+
if 'name' in extracted:
|
|
1142
|
+
updates['name'] = extracted.pop('name')
|
|
1143
|
+
|
|
1144
|
+
# Store other info in preferences
|
|
1145
|
+
if extracted:
|
|
1146
|
+
current_prefs = profile.get('preferences')
|
|
1147
|
+
if current_prefs:
|
|
1148
|
+
try:
|
|
1149
|
+
prefs = json.loads(current_prefs) if isinstance(current_prefs, str) else current_prefs
|
|
1150
|
+
except:
|
|
1151
|
+
prefs = {}
|
|
1152
|
+
else:
|
|
1153
|
+
prefs = {}
|
|
1154
|
+
|
|
1155
|
+
prefs.update(extracted)
|
|
1156
|
+
updates['preferences'] = json.dumps(prefs)
|
|
1157
|
+
|
|
1158
|
+
if updates:
|
|
1159
|
+
self.memory.update_user_profile(user_id, updates)
|
|
1160
|
+
self.logger.debug(f"Profile updated for {user_id}: {extracted}")
|
|
1161
|
+
|
|
1162
|
+
# JSON memory - direct update
|
|
1163
|
+
elif hasattr(self.memory, 'update_profile'):
|
|
1164
|
+
# Load memory if not already loaded
|
|
1165
|
+
if user_id not in self.memory.user_profiles:
|
|
1166
|
+
self.memory.load_memory(user_id)
|
|
1167
|
+
|
|
1168
|
+
# For JSON memory, merge into preferences
|
|
1169
|
+
current_profile = self.memory.user_profiles.get(user_id, {})
|
|
1170
|
+
current_prefs = current_profile.get('preferences', {})
|
|
1171
|
+
|
|
1172
|
+
# Handle case where preferences might be a JSON string
|
|
1173
|
+
if isinstance(current_prefs, str):
|
|
1174
|
+
try:
|
|
1175
|
+
current_prefs = json.loads(current_prefs)
|
|
1176
|
+
except:
|
|
1177
|
+
current_prefs = {}
|
|
1178
|
+
|
|
1179
|
+
# Update preferences
|
|
1180
|
+
if extracted:
|
|
1181
|
+
current_prefs.update(extracted)
|
|
1182
|
+
self.memory.user_profiles[user_id]['preferences'] = current_prefs
|
|
1183
|
+
|
|
1184
|
+
# Update name if extracted
|
|
1185
|
+
if 'name' in extracted:
|
|
1186
|
+
self.memory.user_profiles[user_id]['name'] = extracted['name']
|
|
1187
|
+
|
|
1188
|
+
# Auto-generate summary from conversation history
|
|
1189
|
+
self._update_conversation_summary(user_id)
|
|
1190
|
+
|
|
1191
|
+
# Save to disk
|
|
1192
|
+
self.memory.save_memory(user_id)
|
|
1193
|
+
self.logger.debug(f"Profile updated for {user_id}: {extracted}")
|
|
1194
|
+
except Exception as e:
|
|
1195
|
+
self.logger.error(f"Error updating profile: {e}")
|
|
1196
|
+
|
|
1197
|
+
def _update_conversation_summary(self, user_id: str) -> None:
|
|
1198
|
+
"""
|
|
1199
|
+
Auto-generate conversation summary for user profile
|
|
1200
|
+
|
|
1201
|
+
Args:
|
|
1202
|
+
user_id: User ID
|
|
1203
|
+
"""
|
|
1204
|
+
try:
|
|
1205
|
+
if not hasattr(self.memory, 'conversations'):
|
|
1206
|
+
return
|
|
1207
|
+
|
|
1208
|
+
# Ensure memory is loaded
|
|
1209
|
+
if user_id not in self.memory.conversations:
|
|
1210
|
+
self.memory.load_memory(user_id)
|
|
1211
|
+
|
|
1212
|
+
conversations = self.memory.conversations.get(user_id, [])
|
|
1213
|
+
if not conversations:
|
|
1214
|
+
return
|
|
1215
|
+
|
|
1216
|
+
# Get recent conversations for summary
|
|
1217
|
+
recent_convs = conversations[-10:] # Last 10 conversations
|
|
1218
|
+
|
|
1219
|
+
# Extract topics/interests
|
|
1220
|
+
all_messages = " ".join([c.get('user_message', '') for c in recent_convs])
|
|
1221
|
+
topics = self._extract_topics(all_messages)
|
|
1222
|
+
|
|
1223
|
+
# Calculate engagement stats
|
|
1224
|
+
total_interactions = len(conversations)
|
|
1225
|
+
avg_response_length = sum(len(c.get('bot_response', '')) for c in recent_convs) / len(recent_convs) if recent_convs else 0
|
|
1226
|
+
|
|
1227
|
+
# Build summary
|
|
1228
|
+
summary = {
|
|
1229
|
+
"total_interactions": total_interactions,
|
|
1230
|
+
"topics_of_interest": topics[:5] if topics else [], # Top 5 topics
|
|
1231
|
+
"avg_response_length": round(avg_response_length, 0),
|
|
1232
|
+
"last_active": recent_convs[-1].get('timestamp') if recent_convs else None,
|
|
1233
|
+
"engagement_level": "high" if total_interactions > 20 else ("medium" if total_interactions > 5 else "low")
|
|
1234
|
+
}
|
|
1235
|
+
|
|
1236
|
+
# Update profile summary (JSON mode)
|
|
1237
|
+
if user_id in self.memory.user_profiles:
|
|
1238
|
+
self.memory.user_profiles[user_id]['summary'] = summary
|
|
1239
|
+
|
|
1240
|
+
except Exception as e:
|
|
1241
|
+
self.logger.debug(f"Summary generation skipped: {e}")
|
|
1242
|
+
|
|
1243
|
+
def _extract_topics(self, text: str) -> List[str]:
|
|
1244
|
+
"""
|
|
1245
|
+
Extract key topics/interests from conversation text
|
|
1246
|
+
|
|
1247
|
+
Args:
|
|
1248
|
+
text: Combined conversation text
|
|
1249
|
+
|
|
1250
|
+
Returns:
|
|
1251
|
+
List of extracted topics
|
|
1252
|
+
"""
|
|
1253
|
+
# Simple keyword extraction (can be enhanced with NLP)
|
|
1254
|
+
keywords_map = {
|
|
1255
|
+
"python": "Python Programming",
|
|
1256
|
+
"javascript": "JavaScript",
|
|
1257
|
+
"coding": "Programming",
|
|
1258
|
+
"weather": "Weather",
|
|
1259
|
+
"food": "Food & Dining",
|
|
1260
|
+
"music": "Music",
|
|
1261
|
+
"sport": "Sports",
|
|
1262
|
+
"travel": "Travel",
|
|
1263
|
+
"work": "Work",
|
|
1264
|
+
"help": "Support",
|
|
1265
|
+
"problem": "Problem Solving",
|
|
1266
|
+
"question": "Questions",
|
|
1267
|
+
"chat": "Chatting"
|
|
1268
|
+
}
|
|
1269
|
+
|
|
1270
|
+
text_lower = text.lower()
|
|
1271
|
+
found_topics = []
|
|
1272
|
+
|
|
1273
|
+
for keyword, topic in keywords_map.items():
|
|
1274
|
+
if keyword in text_lower:
|
|
1275
|
+
found_topics.append(topic)
|
|
1276
|
+
|
|
1277
|
+
# Remove duplicates while preserving order
|
|
1278
|
+
seen = set()
|
|
1279
|
+
unique_topics = []
|
|
1280
|
+
for topic in found_topics:
|
|
1281
|
+
if topic not in seen:
|
|
1282
|
+
seen.add(topic)
|
|
1283
|
+
unique_topics.append(topic)
|
|
1284
|
+
|
|
1285
|
+
return unique_topics
|
|
1286
|
+
|
|
1287
|
+
def get_user_profile(self, user_id: Optional[str] = None) -> Dict:
|
|
1288
|
+
"""
|
|
1289
|
+
Get user's profile info
|
|
1290
|
+
|
|
1291
|
+
Args:
|
|
1292
|
+
user_id: User ID (uses current_user if not specified)
|
|
1293
|
+
|
|
1294
|
+
Returns:
|
|
1295
|
+
User profile dictionary with all info (name, favorite_food, location, etc.)
|
|
1296
|
+
"""
|
|
1297
|
+
uid = user_id or self.current_user
|
|
1298
|
+
if not uid:
|
|
1299
|
+
return {}
|
|
1300
|
+
|
|
1301
|
+
try:
|
|
1302
|
+
# Check if SQL or JSON memory - SQL has SQLMemoryManager type
|
|
1303
|
+
if ADVANCED_AVAILABLE and isinstance(self.memory, SQLMemoryManager):
|
|
1304
|
+
# SQL memory - merge preferences into main dict
|
|
1305
|
+
profile = self.memory.get_user_profile(uid)
|
|
1306
|
+
if not profile:
|
|
1307
|
+
return {}
|
|
1308
|
+
|
|
1309
|
+
# Parse preferences JSON if exists
|
|
1310
|
+
result = {
|
|
1311
|
+
'user_id': profile.get('user_id'),
|
|
1312
|
+
'name': profile.get('name'),
|
|
1313
|
+
'first_seen': profile.get('first_seen'),
|
|
1314
|
+
'last_interaction': profile.get('last_interaction'),
|
|
1315
|
+
}
|
|
1316
|
+
|
|
1317
|
+
# Merge preferences
|
|
1318
|
+
prefs_str = profile.get('preferences')
|
|
1319
|
+
if prefs_str:
|
|
1320
|
+
try:
|
|
1321
|
+
prefs = json.loads(prefs_str) if isinstance(prefs_str, str) else prefs_str
|
|
1322
|
+
result.update(prefs) # Add favorite_food, location, etc.
|
|
1323
|
+
except:
|
|
1324
|
+
pass
|
|
1325
|
+
|
|
1326
|
+
return result
|
|
1327
|
+
else:
|
|
1328
|
+
# JSON memory - reload from disk to get latest data
|
|
1329
|
+
memory_data = self.memory.load_memory(uid)
|
|
1330
|
+
profile = memory_data.get('profile', {}).copy() # Make a copy to avoid modifying cached data
|
|
1331
|
+
|
|
1332
|
+
# Parse preferences if it's a JSON string
|
|
1333
|
+
if isinstance(profile.get('preferences'), str):
|
|
1334
|
+
try:
|
|
1335
|
+
profile['preferences'] = json.loads(profile['preferences'])
|
|
1336
|
+
except:
|
|
1337
|
+
profile['preferences'] = {}
|
|
1338
|
+
|
|
1339
|
+
# Return profile as-is (summary should already be there if it was generated)
|
|
1340
|
+
# Only regenerate if truly missing
|
|
1341
|
+
summary_value = profile.get('summary')
|
|
1342
|
+
summary_is_empty = (not summary_value or
|
|
1343
|
+
(isinstance(summary_value, dict) and len(summary_value) == 0))
|
|
1344
|
+
|
|
1345
|
+
if summary_is_empty:
|
|
1346
|
+
# Try to regenerate summary if missing (for old users)
|
|
1347
|
+
# Ensure conversations are loaded
|
|
1348
|
+
if uid not in self.memory.conversations:
|
|
1349
|
+
self.memory.load_memory(uid)
|
|
1350
|
+
|
|
1351
|
+
if uid in self.memory.conversations and len(self.memory.conversations[uid]) > 0:
|
|
1352
|
+
self._update_conversation_summary(uid)
|
|
1353
|
+
# Save the updated summary
|
|
1354
|
+
if uid in self.memory.user_profiles:
|
|
1355
|
+
self.memory.save_memory(uid)
|
|
1356
|
+
# Reload to get updated summary
|
|
1357
|
+
memory_data = self.memory.load_memory(uid)
|
|
1358
|
+
profile = memory_data.get('profile', {}).copy()
|
|
1359
|
+
# Parse preferences again after reload
|
|
1360
|
+
if isinstance(profile.get('preferences'), str):
|
|
1361
|
+
try:
|
|
1362
|
+
profile['preferences'] = json.loads(profile['preferences'])
|
|
1363
|
+
except:
|
|
1364
|
+
profile['preferences'] = {}
|
|
1365
|
+
|
|
1366
|
+
return profile
|
|
1367
|
+
except Exception as e:
|
|
1368
|
+
self.logger.error(f"Error getting user profile: {e}")
|
|
1369
|
+
return {}
|
|
1370
|
+
|
|
1371
|
+
def add_knowledge(self, category: str, question: str, answer: str,
|
|
1372
|
+
keywords: Optional[List[str]] = None, priority: int = 0) -> int:
|
|
1373
|
+
"""Add new record to knowledge base"""
|
|
1374
|
+
if not ADVANCED_AVAILABLE or not isinstance(self.memory, SQLMemoryManager):
|
|
1375
|
+
return 0
|
|
1376
|
+
|
|
1377
|
+
try:
|
|
1378
|
+
kb_id = self.memory.add_knowledge(category, question, answer, keywords, priority)
|
|
1379
|
+
self.logger.info(f"New knowledge added: {category} - {kb_id}")
|
|
1380
|
+
return kb_id
|
|
1381
|
+
except Exception as e:
|
|
1382
|
+
self.logger.error(f"Knowledge adding error: {e}")
|
|
1383
|
+
return 0
|
|
1384
|
+
|
|
1385
|
+
def get_statistics(self) -> Dict[str, Any]:
|
|
1386
|
+
"""Returns general statistics"""
|
|
1387
|
+
try:
|
|
1388
|
+
if hasattr(self.memory, 'get_statistics'):
|
|
1389
|
+
return self.memory.get_statistics()
|
|
1390
|
+
else:
|
|
1391
|
+
# Simple statistics for JSON memory
|
|
1392
|
+
return {
|
|
1393
|
+
"total_users": 0,
|
|
1394
|
+
"total_interactions": 0,
|
|
1395
|
+
"memory_backend": "JSON"
|
|
1396
|
+
}
|
|
1397
|
+
except Exception as e:
|
|
1398
|
+
self.logger.error(f"Statistics retrieval error: {e}")
|
|
1399
|
+
return {}
|
|
1400
|
+
|
|
1401
|
+
def search_history(self, keyword: str, user_id: Optional[str] = None) -> List[Dict]:
|
|
1402
|
+
"""Search in user history"""
|
|
1403
|
+
uid = user_id or self.current_user
|
|
1404
|
+
if not uid:
|
|
1405
|
+
return []
|
|
1406
|
+
|
|
1407
|
+
try:
|
|
1408
|
+
if hasattr(self.memory, 'search_conversations'):
|
|
1409
|
+
return self.memory.search_conversations(uid, keyword)
|
|
1410
|
+
else:
|
|
1411
|
+
return []
|
|
1412
|
+
except Exception as e:
|
|
1413
|
+
self.logger.error(f"History search error: {e}")
|
|
1414
|
+
return []
|
|
1415
|
+
|
|
1416
|
+
def show_user_info(self, user_id: Optional[str] = None) -> str:
|
|
1417
|
+
"""Shows user information"""
|
|
1418
|
+
uid = user_id or self.current_user
|
|
1419
|
+
if not uid:
|
|
1420
|
+
return "User ID not specified."
|
|
1421
|
+
|
|
1422
|
+
try:
|
|
1423
|
+
if hasattr(self.memory, 'get_user_profile'):
|
|
1424
|
+
profile = self.memory.get_user_profile(uid)
|
|
1425
|
+
if profile:
|
|
1426
|
+
return f"User: {uid}\nName: {profile.get('name', 'Unknown')}\nFirst conversation: {profile.get('first_seen', 'Unknown')}"
|
|
1427
|
+
else:
|
|
1428
|
+
return f"User {uid} not found."
|
|
1429
|
+
else:
|
|
1430
|
+
return "This feature is not available."
|
|
1431
|
+
except Exception as e:
|
|
1432
|
+
return f"Error: {str(e)}"
|
|
1433
|
+
|
|
1434
|
+
def export_memory(self, user_id: Optional[str] = None, format: str = "json") -> str:
|
|
1435
|
+
"""Export user data"""
|
|
1436
|
+
uid = user_id or self.current_user
|
|
1437
|
+
if not uid:
|
|
1438
|
+
return "User ID not specified."
|
|
1439
|
+
|
|
1440
|
+
try:
|
|
1441
|
+
if hasattr(self.memory, 'get_recent_conversations') and hasattr(self.memory, 'get_user_profile'):
|
|
1442
|
+
conversations = self.memory.get_recent_conversations(uid, 1000)
|
|
1443
|
+
profile = self.memory.get_user_profile(uid)
|
|
1444
|
+
|
|
1445
|
+
if format == "json":
|
|
1446
|
+
export_data = {
|
|
1447
|
+
"user_id": uid,
|
|
1448
|
+
"export_date": datetime.now().isoformat(),
|
|
1449
|
+
"profile": profile,
|
|
1450
|
+
"conversations": conversations
|
|
1451
|
+
}
|
|
1452
|
+
return json.dumps(export_data, ensure_ascii=False, indent=2)
|
|
1453
|
+
elif format == "txt":
|
|
1454
|
+
result = f"{uid} user conversation history\n"
|
|
1455
|
+
result += f"Export date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
|
|
1456
|
+
result += "=" * 60 + "\n\n"
|
|
1457
|
+
|
|
1458
|
+
for i, conv in enumerate(conversations, 1):
|
|
1459
|
+
result += f"Conversation {i}:\n"
|
|
1460
|
+
result += f"Date: {conv.get('timestamp', 'Unknown')}\n"
|
|
1461
|
+
result += f"User: {conv.get('user_message', '')}\n"
|
|
1462
|
+
result += f"Bot: {conv.get('bot_response', '')}\n"
|
|
1463
|
+
result += "-" * 40 + "\n"
|
|
1464
|
+
|
|
1465
|
+
return result
|
|
1466
|
+
else:
|
|
1467
|
+
return "Unsupported format. Use json or txt."
|
|
1468
|
+
else:
|
|
1469
|
+
return "This feature is not available."
|
|
1470
|
+
except Exception as e:
|
|
1471
|
+
return f"Export error: {str(e)}"
|
|
1472
|
+
|
|
1473
|
+
def clear_user_data(self, user_id: Optional[str] = None, confirm: bool = False) -> str:
|
|
1474
|
+
"""Delete user data"""
|
|
1475
|
+
uid = user_id or self.current_user
|
|
1476
|
+
if not uid:
|
|
1477
|
+
return "User ID not specified."
|
|
1478
|
+
|
|
1479
|
+
if not confirm:
|
|
1480
|
+
return "Use confirm=True parameter to delete data."
|
|
1481
|
+
|
|
1482
|
+
try:
|
|
1483
|
+
if hasattr(self.memory, 'clear_memory'):
|
|
1484
|
+
self.memory.clear_memory(uid)
|
|
1485
|
+
return f"All data for user {uid} has been deleted."
|
|
1486
|
+
else:
|
|
1487
|
+
return "This feature is not available."
|
|
1488
|
+
except Exception as e:
|
|
1489
|
+
return f"Deletion error: {str(e)}"
|
|
1490
|
+
|
|
1491
|
+
def list_available_tools(self) -> str:
|
|
1492
|
+
"""List available tools"""
|
|
1493
|
+
if ADVANCED_AVAILABLE:
|
|
1494
|
+
return self.tool_executor.memory_tools.list_available_tools()
|
|
1495
|
+
else:
|
|
1496
|
+
return "Tool system not available."
|
|
1497
|
+
|
|
1498
|
+
# === METRICS & ANALYTICS METHODS (v1.3.1+) ===
|
|
1499
|
+
|
|
1500
|
+
def get_response_metrics(self, last_n: Optional[int] = None) -> Dict[str, Any]:
|
|
1501
|
+
"""
|
|
1502
|
+
Get response quality metrics summary
|
|
1503
|
+
|
|
1504
|
+
Args:
|
|
1505
|
+
last_n: Analyze only last N responses (None = all)
|
|
1506
|
+
|
|
1507
|
+
Returns:
|
|
1508
|
+
Metrics summary dictionary
|
|
1509
|
+
|
|
1510
|
+
Example:
|
|
1511
|
+
>>> agent.get_response_metrics(last_n=10)
|
|
1512
|
+
{
|
|
1513
|
+
'total_responses': 10,
|
|
1514
|
+
'avg_latency_ms': 245.3,
|
|
1515
|
+
'avg_confidence': 0.82,
|
|
1516
|
+
'kb_usage_rate': 0.6,
|
|
1517
|
+
'source_distribution': {'knowledge_base': 6, 'model': 4},
|
|
1518
|
+
'fast_response_rate': 0.9
|
|
1519
|
+
}
|
|
1520
|
+
"""
|
|
1521
|
+
return self.metrics_analyzer.get_summary(last_n)
|
|
1522
|
+
|
|
1523
|
+
def get_latest_response_metric(self) -> Optional[ChatResponse]:
|
|
1524
|
+
"""
|
|
1525
|
+
Get the most recent response metric
|
|
1526
|
+
|
|
1527
|
+
Returns:
|
|
1528
|
+
Latest ChatResponse object or None if no metrics
|
|
1529
|
+
"""
|
|
1530
|
+
if not self.metrics_analyzer.metrics_history:
|
|
1531
|
+
return None
|
|
1532
|
+
return self.metrics_analyzer.metrics_history[-1]
|
|
1533
|
+
|
|
1534
|
+
def get_average_confidence(self, last_n: Optional[int] = None) -> float:
|
|
1535
|
+
"""
|
|
1536
|
+
Get average confidence score
|
|
1537
|
+
|
|
1538
|
+
Args:
|
|
1539
|
+
last_n: Analyze only last N responses (None = all)
|
|
1540
|
+
|
|
1541
|
+
Returns:
|
|
1542
|
+
Average confidence (0.0-1.0)
|
|
1543
|
+
"""
|
|
1544
|
+
return self.metrics_analyzer.get_average_confidence(last_n)
|
|
1545
|
+
|
|
1546
|
+
def get_kb_usage_rate(self, last_n: Optional[int] = None) -> float:
|
|
1547
|
+
"""
|
|
1548
|
+
Get knowledge base usage rate
|
|
1549
|
+
|
|
1550
|
+
Args:
|
|
1551
|
+
last_n: Analyze only last N responses (None = all)
|
|
1552
|
+
|
|
1553
|
+
Returns:
|
|
1554
|
+
KB usage rate (0.0-1.0)
|
|
1555
|
+
"""
|
|
1556
|
+
return self.metrics_analyzer.get_kb_usage_rate(last_n)
|
|
1557
|
+
|
|
1558
|
+
def clear_metrics(self) -> None:
|
|
1559
|
+
"""Clear all metrics history"""
|
|
1560
|
+
self.metrics_analyzer.clear_history()
|
|
1561
|
+
self.logger.info("Metrics history cleared")
|
|
1562
|
+
|
|
1563
|
+
def export_metrics(self, format: str = "json") -> str:
|
|
1564
|
+
"""
|
|
1565
|
+
Export metrics data
|
|
1566
|
+
|
|
1567
|
+
Args:
|
|
1568
|
+
format: Export format ('json' or 'summary')
|
|
1569
|
+
|
|
1570
|
+
Returns:
|
|
1571
|
+
Formatted metrics data
|
|
1572
|
+
"""
|
|
1573
|
+
summary = self.get_response_metrics()
|
|
1574
|
+
|
|
1575
|
+
if format == "json":
|
|
1576
|
+
return json.dumps(summary, ensure_ascii=False, indent=2)
|
|
1577
|
+
elif format == "summary":
|
|
1578
|
+
lines = [
|
|
1579
|
+
"📊 RESPONSE METRICS SUMMARY",
|
|
1580
|
+
"=" * 60,
|
|
1581
|
+
f"Total Responses: {summary['total_responses']}",
|
|
1582
|
+
f"Avg Latency: {summary['avg_latency_ms']:.1f} ms",
|
|
1583
|
+
f"Avg Confidence: {summary['avg_confidence']:.2%}",
|
|
1584
|
+
f"KB Usage Rate: {summary['kb_usage_rate']:.2%}",
|
|
1585
|
+
f"Fast Response Rate: {summary['fast_response_rate']:.2%}",
|
|
1586
|
+
"",
|
|
1587
|
+
"Source Distribution:",
|
|
1588
|
+
]
|
|
1589
|
+
for source, count in summary['source_distribution'].items():
|
|
1590
|
+
lines.append(f" - {source:20s}: {count}")
|
|
1591
|
+
|
|
1592
|
+
lines.extend(["", "Quality Distribution:"])
|
|
1593
|
+
for quality, count in summary.get('quality_distribution', {}).items():
|
|
1594
|
+
lines.append(f" - {quality:20s}: {count}")
|
|
1595
|
+
|
|
1596
|
+
return "\n".join(lines)
|
|
1597
|
+
else:
|
|
1598
|
+
return "Unsupported format. Use 'json' or 'summary'."
|
|
1599
|
+
|
|
1600
|
+
def close(self) -> None:
|
|
1601
|
+
"""Clean up resources"""
|
|
1602
|
+
if hasattr(self.memory, 'close'):
|
|
1603
|
+
self.memory.close()
|
|
1604
|
+
self.logger.info("MemAgent closed")
|
|
1605
|
+
|
|
1606
|
+
def __enter__(self):
|
|
1607
|
+
return self
|
|
1608
|
+
|
|
1609
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
1610
|
+
self.close()
|
|
1611
|
+
|