mem-llm 1.2.0__py3-none-any.whl → 1.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mem-llm might be problematic. Click here for more details.
- mem_llm/__init__.py +12 -3
- mem_llm/base_llm_client.py +175 -0
- mem_llm/clients/__init__.py +25 -0
- mem_llm/clients/gemini_client.py +381 -0
- mem_llm/clients/lmstudio_client.py +280 -0
- mem_llm/clients/ollama_client.py +268 -0
- mem_llm/llm_client_factory.py +277 -0
- mem_llm/mem_agent.py +123 -37
- {mem_llm-1.2.0.dist-info → mem_llm-1.3.1.dist-info}/METADATA +103 -36
- {mem_llm-1.2.0.dist-info → mem_llm-1.3.1.dist-info}/RECORD +13 -7
- {mem_llm-1.2.0.dist-info → mem_llm-1.3.1.dist-info}/WHEEL +0 -0
- {mem_llm-1.2.0.dist-info → mem_llm-1.3.1.dist-info}/entry_points.txt +0 -0
- {mem_llm-1.2.0.dist-info → mem_llm-1.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Client Factory
|
|
3
|
+
==================
|
|
4
|
+
|
|
5
|
+
Factory pattern for creating LLM clients.
|
|
6
|
+
Supports multiple backends with automatic detection.
|
|
7
|
+
|
|
8
|
+
Supported Backends:
|
|
9
|
+
- Ollama: Local Ollama service
|
|
10
|
+
- LM Studio: Local LM Studio server
|
|
11
|
+
- Gemini: Google Gemini API (cloud)
|
|
12
|
+
|
|
13
|
+
Usage:
|
|
14
|
+
# Create specific backend
|
|
15
|
+
client = LLMClientFactory.create('ollama', model='llama3')
|
|
16
|
+
|
|
17
|
+
# Auto-detect available backend
|
|
18
|
+
client = LLMClientFactory.auto_detect()
|
|
19
|
+
|
|
20
|
+
# Get all available backends
|
|
21
|
+
backends = LLMClientFactory.get_available_backends()
|
|
22
|
+
|
|
23
|
+
Author: C. Emre Karataş
|
|
24
|
+
Version: 1.3.0
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
from typing import Optional, Dict, List, Any
|
|
28
|
+
import logging
|
|
29
|
+
|
|
30
|
+
from .clients.ollama_client import OllamaClient
|
|
31
|
+
from .clients.lmstudio_client import LMStudioClient
|
|
32
|
+
from .clients.gemini_client import GeminiClient
|
|
33
|
+
from .base_llm_client import BaseLLMClient
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class LLMClientFactory:
|
|
37
|
+
"""
|
|
38
|
+
Factory for creating LLM clients
|
|
39
|
+
|
|
40
|
+
Provides unified interface for creating different LLM backends.
|
|
41
|
+
Supports auto-detection of available local services.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
# Registry of supported backends
|
|
45
|
+
BACKENDS = {
|
|
46
|
+
'ollama': {
|
|
47
|
+
'class': OllamaClient,
|
|
48
|
+
'description': 'Local Ollama service',
|
|
49
|
+
'type': 'local',
|
|
50
|
+
'default_url': 'http://localhost:11434',
|
|
51
|
+
'default_model': 'granite4:tiny-h'
|
|
52
|
+
},
|
|
53
|
+
'lmstudio': {
|
|
54
|
+
'class': LMStudioClient,
|
|
55
|
+
'description': 'LM Studio local server (OpenAI-compatible)',
|
|
56
|
+
'type': 'local',
|
|
57
|
+
'default_url': 'http://localhost:1234',
|
|
58
|
+
'default_model': 'local-model'
|
|
59
|
+
},
|
|
60
|
+
'gemini': {
|
|
61
|
+
'class': GeminiClient,
|
|
62
|
+
'description': 'Google Gemini API (cloud)',
|
|
63
|
+
'type': 'cloud',
|
|
64
|
+
'default_model': 'gemini-2.5-flash',
|
|
65
|
+
'requires_api_key': True
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
@staticmethod
|
|
70
|
+
def create(backend: str, model: Optional[str] = None, **kwargs) -> BaseLLMClient:
|
|
71
|
+
"""
|
|
72
|
+
Create LLM client for specified backend
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
backend: Backend name ('ollama', 'lmstudio', 'gemini')
|
|
76
|
+
model: Model name (uses default if None)
|
|
77
|
+
**kwargs: Backend-specific configuration
|
|
78
|
+
- base_url: API endpoint (for local backends)
|
|
79
|
+
- api_key: API key (for cloud backends)
|
|
80
|
+
- temperature: Default temperature
|
|
81
|
+
- max_tokens: Default max tokens
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Configured LLM client
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
ValueError: If backend is not supported
|
|
88
|
+
|
|
89
|
+
Examples:
|
|
90
|
+
# Ollama
|
|
91
|
+
client = LLMClientFactory.create('ollama', model='llama3')
|
|
92
|
+
|
|
93
|
+
# LM Studio
|
|
94
|
+
client = LLMClientFactory.create(
|
|
95
|
+
'lmstudio',
|
|
96
|
+
model='llama-3-8b',
|
|
97
|
+
base_url='http://localhost:1234'
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# Gemini
|
|
101
|
+
client = LLMClientFactory.create(
|
|
102
|
+
'gemini',
|
|
103
|
+
model='gemini-2.5-flash',
|
|
104
|
+
api_key='your-api-key'
|
|
105
|
+
)
|
|
106
|
+
"""
|
|
107
|
+
backend = backend.lower()
|
|
108
|
+
|
|
109
|
+
if backend not in LLMClientFactory.BACKENDS:
|
|
110
|
+
available = ', '.join(LLMClientFactory.BACKENDS.keys())
|
|
111
|
+
raise ValueError(
|
|
112
|
+
f"Unsupported backend: '{backend}'. "
|
|
113
|
+
f"Available backends: {available}"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
backend_info = LLMClientFactory.BACKENDS[backend]
|
|
117
|
+
client_class = backend_info['class']
|
|
118
|
+
|
|
119
|
+
# Use default model if not specified
|
|
120
|
+
if not model:
|
|
121
|
+
model = backend_info.get('default_model')
|
|
122
|
+
|
|
123
|
+
# Add default base_url for local backends if not provided
|
|
124
|
+
if backend_info['type'] == 'local' and 'base_url' not in kwargs:
|
|
125
|
+
kwargs['base_url'] = backend_info.get('default_url')
|
|
126
|
+
|
|
127
|
+
# Create and return client
|
|
128
|
+
try:
|
|
129
|
+
return client_class(model=model, **kwargs)
|
|
130
|
+
except Exception as e:
|
|
131
|
+
raise ValueError(f"Failed to create {backend} client: {str(e)}") from e
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def auto_detect(preferred_backends: Optional[List[str]] = None) -> Optional[BaseLLMClient]:
|
|
135
|
+
"""
|
|
136
|
+
Auto-detect available LLM service
|
|
137
|
+
|
|
138
|
+
Checks common local services and returns the first available one.
|
|
139
|
+
Useful for applications that should work with any available backend.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
preferred_backends: List of backends to check in order
|
|
143
|
+
(if None, checks all in default order)
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
First available LLM client, or None if none available
|
|
147
|
+
|
|
148
|
+
Example:
|
|
149
|
+
# Try to find any available backend
|
|
150
|
+
client = LLMClientFactory.auto_detect()
|
|
151
|
+
if client:
|
|
152
|
+
print(f"Using {client.get_info()['backend']}")
|
|
153
|
+
else:
|
|
154
|
+
print("No LLM service found")
|
|
155
|
+
|
|
156
|
+
# Try specific backends in order
|
|
157
|
+
client = LLMClientFactory.auto_detect(['lmstudio', 'ollama'])
|
|
158
|
+
"""
|
|
159
|
+
logger = logging.getLogger('LLMClientFactory')
|
|
160
|
+
|
|
161
|
+
# Default check order: local services first
|
|
162
|
+
if preferred_backends is None:
|
|
163
|
+
preferred_backends = ['ollama', 'lmstudio']
|
|
164
|
+
|
|
165
|
+
for backend_name in preferred_backends:
|
|
166
|
+
if backend_name not in LLMClientFactory.BACKENDS:
|
|
167
|
+
logger.warning(f"Unknown backend in auto-detect: {backend_name}")
|
|
168
|
+
continue
|
|
169
|
+
|
|
170
|
+
backend_info = LLMClientFactory.BACKENDS[backend_name]
|
|
171
|
+
|
|
172
|
+
# Skip cloud services in auto-detect (they require API keys)
|
|
173
|
+
if backend_info['type'] == 'cloud':
|
|
174
|
+
logger.debug(f"Skipping cloud backend in auto-detect: {backend_name}")
|
|
175
|
+
continue
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
# Try to create client with defaults
|
|
179
|
+
client = LLMClientFactory.create(backend_name)
|
|
180
|
+
|
|
181
|
+
# Check if service is actually running
|
|
182
|
+
if client.check_connection():
|
|
183
|
+
logger.info(f"✅ Detected {backend_name} at {backend_info.get('default_url')}")
|
|
184
|
+
return client
|
|
185
|
+
else:
|
|
186
|
+
logger.debug(f"Service not running: {backend_name}")
|
|
187
|
+
|
|
188
|
+
except Exception as e:
|
|
189
|
+
logger.debug(f"Failed to detect {backend_name}: {e}")
|
|
190
|
+
continue
|
|
191
|
+
|
|
192
|
+
logger.warning("⚠️ No local LLM service detected")
|
|
193
|
+
return None
|
|
194
|
+
|
|
195
|
+
@staticmethod
|
|
196
|
+
def get_available_backends() -> List[Dict[str, Any]]:
|
|
197
|
+
"""
|
|
198
|
+
Get list of all supported backends with their info
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
List of backend information dictionaries
|
|
202
|
+
|
|
203
|
+
Example:
|
|
204
|
+
backends = LLMClientFactory.get_available_backends()
|
|
205
|
+
for backend in backends:
|
|
206
|
+
print(f"{backend['name']}: {backend['description']}")
|
|
207
|
+
"""
|
|
208
|
+
result = []
|
|
209
|
+
|
|
210
|
+
for name, info in LLMClientFactory.BACKENDS.items():
|
|
211
|
+
backend_dict = {
|
|
212
|
+
'name': name,
|
|
213
|
+
'description': info['description'],
|
|
214
|
+
'type': info['type'],
|
|
215
|
+
'default_model': info.get('default_model'),
|
|
216
|
+
'requires_api_key': info.get('requires_api_key', False)
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
if info['type'] == 'local':
|
|
220
|
+
backend_dict['default_url'] = info.get('default_url')
|
|
221
|
+
|
|
222
|
+
result.append(backend_dict)
|
|
223
|
+
|
|
224
|
+
return result
|
|
225
|
+
|
|
226
|
+
@staticmethod
|
|
227
|
+
def check_backend_availability(backend: str, **kwargs) -> bool:
|
|
228
|
+
"""
|
|
229
|
+
Check if a specific backend is available
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
backend: Backend name
|
|
233
|
+
**kwargs: Configuration for creating the client
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
True if backend is available and responding
|
|
237
|
+
|
|
238
|
+
Example:
|
|
239
|
+
# Check if Ollama is running
|
|
240
|
+
if LLMClientFactory.check_backend_availability('ollama'):
|
|
241
|
+
print("Ollama is available")
|
|
242
|
+
|
|
243
|
+
# Check custom LM Studio URL
|
|
244
|
+
if LLMClientFactory.check_backend_availability(
|
|
245
|
+
'lmstudio',
|
|
246
|
+
base_url='http://localhost:5000'
|
|
247
|
+
):
|
|
248
|
+
print("LM Studio is available")
|
|
249
|
+
"""
|
|
250
|
+
try:
|
|
251
|
+
client = LLMClientFactory.create(backend, **kwargs)
|
|
252
|
+
return client.check_connection()
|
|
253
|
+
except Exception:
|
|
254
|
+
return False
|
|
255
|
+
|
|
256
|
+
@staticmethod
|
|
257
|
+
def get_backend_info(backend: str) -> Dict[str, Any]:
|
|
258
|
+
"""
|
|
259
|
+
Get information about a specific backend
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
backend: Backend name
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
Backend information dictionary
|
|
266
|
+
|
|
267
|
+
Raises:
|
|
268
|
+
ValueError: If backend not found
|
|
269
|
+
"""
|
|
270
|
+
if backend not in LLMClientFactory.BACKENDS:
|
|
271
|
+
raise ValueError(f"Unknown backend: {backend}")
|
|
272
|
+
|
|
273
|
+
info = LLMClientFactory.BACKENDS[backend].copy()
|
|
274
|
+
# Remove class reference for JSON serialization
|
|
275
|
+
info.pop('class', None)
|
|
276
|
+
return info
|
|
277
|
+
|
mem_llm/mem_agent.py
CHANGED
|
@@ -37,7 +37,9 @@ import os
|
|
|
37
37
|
|
|
38
38
|
# Core dependencies
|
|
39
39
|
from .memory_manager import MemoryManager
|
|
40
|
-
from .llm_client import OllamaClient
|
|
40
|
+
from .llm_client import OllamaClient # Backward compatibility
|
|
41
|
+
from .llm_client_factory import LLMClientFactory
|
|
42
|
+
from .base_llm_client import BaseLLMClient
|
|
41
43
|
|
|
42
44
|
# Advanced features (optional)
|
|
43
45
|
try:
|
|
@@ -61,25 +63,48 @@ class MemAgent:
|
|
|
61
63
|
|
|
62
64
|
def __init__(self,
|
|
63
65
|
model: str = "granite4:tiny-h",
|
|
66
|
+
backend: str = "ollama",
|
|
64
67
|
config_file: Optional[str] = None,
|
|
65
68
|
use_sql: bool = True,
|
|
66
69
|
memory_dir: Optional[str] = None,
|
|
67
70
|
db_path: Optional[str] = None,
|
|
68
71
|
load_knowledge_base: bool = True,
|
|
69
72
|
ollama_url: str = "http://localhost:11434",
|
|
73
|
+
base_url: Optional[str] = None,
|
|
74
|
+
api_key: Optional[str] = None,
|
|
75
|
+
auto_detect_backend: bool = False,
|
|
70
76
|
check_connection: bool = False,
|
|
71
|
-
enable_security: bool = False
|
|
77
|
+
enable_security: bool = False,
|
|
78
|
+
**llm_kwargs):
|
|
72
79
|
"""
|
|
73
80
|
Args:
|
|
74
81
|
model: LLM model to use
|
|
82
|
+
backend: LLM backend ('ollama', 'lmstudio', 'gemini') - NEW in v1.3.0
|
|
75
83
|
config_file: Configuration file (optional)
|
|
76
84
|
use_sql: Use SQL database (True) or JSON (False)
|
|
77
85
|
memory_dir: Memory directory (for JSON mode or if db_path not specified)
|
|
78
86
|
db_path: SQLite database path (for SQL mode, e.g., ":memory:" or "path/to/db.db")
|
|
79
87
|
load_knowledge_base: Automatically load knowledge base
|
|
80
|
-
ollama_url: Ollama API URL
|
|
81
|
-
|
|
88
|
+
ollama_url: Ollama API URL (backward compatibility, use base_url instead)
|
|
89
|
+
base_url: Backend API URL (for local backends) - NEW in v1.3.0
|
|
90
|
+
api_key: API key (for cloud backends like Gemini) - NEW in v1.3.0
|
|
91
|
+
auto_detect_backend: Auto-detect available LLM backend - NEW in v1.3.0
|
|
92
|
+
check_connection: Verify LLM connection on startup (default: False)
|
|
82
93
|
enable_security: Enable prompt injection protection (v1.1.0+, default: False for backward compatibility)
|
|
94
|
+
**llm_kwargs: Additional backend-specific parameters
|
|
95
|
+
|
|
96
|
+
Examples:
|
|
97
|
+
# Default Ollama
|
|
98
|
+
agent = MemAgent()
|
|
99
|
+
|
|
100
|
+
# LM Studio
|
|
101
|
+
agent = MemAgent(backend='lmstudio', model='llama-3-8b')
|
|
102
|
+
|
|
103
|
+
# Gemini
|
|
104
|
+
agent = MemAgent(backend='gemini', model='gemini-1.5-flash', api_key='your-key')
|
|
105
|
+
|
|
106
|
+
# Auto-detect
|
|
107
|
+
agent = MemAgent(auto_detect_backend=True)
|
|
83
108
|
"""
|
|
84
109
|
|
|
85
110
|
# Setup logging first
|
|
@@ -158,48 +183,109 @@ class MemAgent:
|
|
|
158
183
|
# LLM client
|
|
159
184
|
self.model = model # Store model name
|
|
160
185
|
self.use_sql = use_sql # Store SQL usage flag
|
|
161
|
-
|
|
186
|
+
|
|
187
|
+
# Initialize LLM client (v1.3.0: Multi-backend support)
|
|
188
|
+
# Prepare backend configuration
|
|
189
|
+
llm_config = llm_kwargs.copy()
|
|
190
|
+
|
|
191
|
+
# Handle backward compatibility: ollama_url -> base_url
|
|
192
|
+
if base_url is None and backend == "ollama":
|
|
193
|
+
base_url = ollama_url
|
|
194
|
+
|
|
195
|
+
# Add base_url for local backends
|
|
196
|
+
if base_url and backend in ['ollama', 'lmstudio']:
|
|
197
|
+
llm_config['base_url'] = base_url
|
|
198
|
+
|
|
199
|
+
# Add api_key for cloud backends
|
|
200
|
+
if api_key and backend in ['gemini']:
|
|
201
|
+
llm_config['api_key'] = api_key
|
|
202
|
+
|
|
203
|
+
# Auto-detect backend if requested
|
|
204
|
+
if auto_detect_backend:
|
|
205
|
+
self.logger.info("🔍 Auto-detecting available LLM backend...")
|
|
206
|
+
self.llm = LLMClientFactory.auto_detect()
|
|
207
|
+
if self.llm:
|
|
208
|
+
detected_backend = self.llm.__class__.__name__
|
|
209
|
+
self.logger.info(f"✅ Detected and using: {detected_backend}")
|
|
210
|
+
else:
|
|
211
|
+
self.logger.error("❌ No LLM backend available.")
|
|
212
|
+
raise RuntimeError(
|
|
213
|
+
"No LLM backend detected. Please start a local LLM service (Ollama/LM Studio) "
|
|
214
|
+
"or provide Gemini API key."
|
|
215
|
+
)
|
|
216
|
+
else:
|
|
217
|
+
# Create client using factory
|
|
218
|
+
try:
|
|
219
|
+
self.llm = LLMClientFactory.create(
|
|
220
|
+
backend=backend,
|
|
221
|
+
model=model,
|
|
222
|
+
**llm_config
|
|
223
|
+
)
|
|
224
|
+
self.logger.info(f"✅ Initialized {backend} backend with model: {model}")
|
|
225
|
+
except Exception as e:
|
|
226
|
+
self.logger.error(f"❌ Failed to initialize {backend} backend: {e}")
|
|
227
|
+
raise
|
|
162
228
|
|
|
163
229
|
# Optional connection check on startup
|
|
164
230
|
if check_connection:
|
|
165
|
-
|
|
231
|
+
backend_name = backend if not auto_detect_backend else "LLM service"
|
|
232
|
+
self.logger.info(f"Checking {backend_name} connection...")
|
|
166
233
|
if not self.llm.check_connection():
|
|
167
|
-
error_msg =
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
234
|
+
error_msg = f"❌ ERROR: Cannot connect to {backend_name}!\n"
|
|
235
|
+
|
|
236
|
+
if backend == "ollama":
|
|
237
|
+
error_msg += (
|
|
238
|
+
" \n"
|
|
239
|
+
" Solutions:\n"
|
|
240
|
+
" 1. Start Ollama: ollama serve\n"
|
|
241
|
+
" 2. Check if Ollama is running: http://localhost:11434\n"
|
|
242
|
+
" 3. Verify base_url parameter is correct\n"
|
|
243
|
+
)
|
|
244
|
+
elif backend == "lmstudio":
|
|
245
|
+
error_msg += (
|
|
246
|
+
" \n"
|
|
247
|
+
" Solutions:\n"
|
|
248
|
+
" 1. Start LM Studio\n"
|
|
249
|
+
" 2. Load a model in LM Studio\n"
|
|
250
|
+
" 3. Start local server (default: http://localhost:1234)\n"
|
|
251
|
+
" 4. Verify base_url parameter is correct\n"
|
|
252
|
+
)
|
|
253
|
+
elif backend == "gemini":
|
|
254
|
+
error_msg += (
|
|
255
|
+
" \n"
|
|
256
|
+
" Solutions:\n"
|
|
257
|
+
" 1. Check your internet connection\n"
|
|
258
|
+
" 2. Verify Gemini API key is correct\n"
|
|
259
|
+
" 3. Check API quota/billing status\n"
|
|
260
|
+
" Get key: https://makersuite.google.com/app/apikey\n"
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
error_msg += " \n To skip this check, use: MemAgent(check_connection=False)"
|
|
177
264
|
self.logger.error(error_msg)
|
|
178
|
-
raise ConnectionError("
|
|
265
|
+
raise ConnectionError(f"{backend_name} not available")
|
|
179
266
|
|
|
180
|
-
# Check if model exists
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
267
|
+
# Check if model exists (for backends that support listing)
|
|
268
|
+
try:
|
|
269
|
+
available_models = self.llm.list_models()
|
|
270
|
+
if available_models and model not in available_models:
|
|
271
|
+
error_msg = (
|
|
272
|
+
f"❌ ERROR: Model '{model}' not found in {backend}!\n"
|
|
273
|
+
f" \n"
|
|
274
|
+
f" Available models: {', '.join(available_models[:5])}\n"
|
|
275
|
+
f" Total: {len(available_models)} models available\n"
|
|
276
|
+
f" \n"
|
|
277
|
+
f" To skip this check, use: MemAgent(check_connection=False)"
|
|
278
|
+
)
|
|
279
|
+
self.logger.error(error_msg)
|
|
280
|
+
raise ValueError(f"Model '{model}' not available")
|
|
281
|
+
except:
|
|
282
|
+
# Some backends may not support list_models, skip check
|
|
283
|
+
pass
|
|
195
284
|
|
|
196
|
-
self.logger.info(f"✅
|
|
285
|
+
self.logger.info(f"✅ {backend_name} connection verified, model '{model}' ready")
|
|
197
286
|
|
|
198
|
-
self.logger.info(f"LLM client ready: {model}")
|
|
287
|
+
self.logger.info(f"LLM client ready: {model} on {backend}")
|
|
199
288
|
|
|
200
|
-
# Initialize state variables FIRST
|
|
201
|
-
self.current_user: Optional[str] = None
|
|
202
|
-
self.current_system_prompt: Optional[str] = None
|
|
203
289
|
|
|
204
290
|
# Advanced features (if available)
|
|
205
291
|
if ADVANCED_AVAILABLE:
|