mem-llm 1.1.0__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mem-llm might be problematic. Click here for more details.

@@ -0,0 +1,277 @@
1
+ """
2
+ LLM Client Factory
3
+ ==================
4
+
5
+ Factory pattern for creating LLM clients.
6
+ Supports multiple backends with automatic detection.
7
+
8
+ Supported Backends:
9
+ - Ollama: Local Ollama service
10
+ - LM Studio: Local LM Studio server
11
+ - Gemini: Google Gemini API (cloud)
12
+
13
+ Usage:
14
+ # Create specific backend
15
+ client = LLMClientFactory.create('ollama', model='llama3')
16
+
17
+ # Auto-detect available backend
18
+ client = LLMClientFactory.auto_detect()
19
+
20
+ # Get all available backends
21
+ backends = LLMClientFactory.get_available_backends()
22
+
23
+ Author: C. Emre Karataş
24
+ Version: 1.3.0
25
+ """
26
+
27
+ from typing import Optional, Dict, List, Any
28
+ import logging
29
+
30
+ from .clients.ollama_client import OllamaClient
31
+ from .clients.lmstudio_client import LMStudioClient
32
+ from .clients.gemini_client import GeminiClient
33
+ from .base_llm_client import BaseLLMClient
34
+
35
+
36
+ class LLMClientFactory:
37
+ """
38
+ Factory for creating LLM clients
39
+
40
+ Provides unified interface for creating different LLM backends.
41
+ Supports auto-detection of available local services.
42
+ """
43
+
44
+ # Registry of supported backends
45
+ BACKENDS = {
46
+ 'ollama': {
47
+ 'class': OllamaClient,
48
+ 'description': 'Local Ollama service',
49
+ 'type': 'local',
50
+ 'default_url': 'http://localhost:11434',
51
+ 'default_model': 'granite4:tiny-h'
52
+ },
53
+ 'lmstudio': {
54
+ 'class': LMStudioClient,
55
+ 'description': 'LM Studio local server (OpenAI-compatible)',
56
+ 'type': 'local',
57
+ 'default_url': 'http://localhost:1234',
58
+ 'default_model': 'local-model'
59
+ },
60
+ 'gemini': {
61
+ 'class': GeminiClient,
62
+ 'description': 'Google Gemini API (cloud)',
63
+ 'type': 'cloud',
64
+ 'default_model': 'gemini-2.5-flash',
65
+ 'requires_api_key': True
66
+ }
67
+ }
68
+
69
+ @staticmethod
70
+ def create(backend: str, model: Optional[str] = None, **kwargs) -> BaseLLMClient:
71
+ """
72
+ Create LLM client for specified backend
73
+
74
+ Args:
75
+ backend: Backend name ('ollama', 'lmstudio', 'gemini')
76
+ model: Model name (uses default if None)
77
+ **kwargs: Backend-specific configuration
78
+ - base_url: API endpoint (for local backends)
79
+ - api_key: API key (for cloud backends)
80
+ - temperature: Default temperature
81
+ - max_tokens: Default max tokens
82
+
83
+ Returns:
84
+ Configured LLM client
85
+
86
+ Raises:
87
+ ValueError: If backend is not supported
88
+
89
+ Examples:
90
+ # Ollama
91
+ client = LLMClientFactory.create('ollama', model='llama3')
92
+
93
+ # LM Studio
94
+ client = LLMClientFactory.create(
95
+ 'lmstudio',
96
+ model='llama-3-8b',
97
+ base_url='http://localhost:1234'
98
+ )
99
+
100
+ # Gemini
101
+ client = LLMClientFactory.create(
102
+ 'gemini',
103
+ model='gemini-2.5-flash',
104
+ api_key='your-api-key'
105
+ )
106
+ """
107
+ backend = backend.lower()
108
+
109
+ if backend not in LLMClientFactory.BACKENDS:
110
+ available = ', '.join(LLMClientFactory.BACKENDS.keys())
111
+ raise ValueError(
112
+ f"Unsupported backend: '{backend}'. "
113
+ f"Available backends: {available}"
114
+ )
115
+
116
+ backend_info = LLMClientFactory.BACKENDS[backend]
117
+ client_class = backend_info['class']
118
+
119
+ # Use default model if not specified
120
+ if not model:
121
+ model = backend_info.get('default_model')
122
+
123
+ # Add default base_url for local backends if not provided
124
+ if backend_info['type'] == 'local' and 'base_url' not in kwargs:
125
+ kwargs['base_url'] = backend_info.get('default_url')
126
+
127
+ # Create and return client
128
+ try:
129
+ return client_class(model=model, **kwargs)
130
+ except Exception as e:
131
+ raise ValueError(f"Failed to create {backend} client: {str(e)}") from e
132
+
133
+ @staticmethod
134
+ def auto_detect(preferred_backends: Optional[List[str]] = None) -> Optional[BaseLLMClient]:
135
+ """
136
+ Auto-detect available LLM service
137
+
138
+ Checks common local services and returns the first available one.
139
+ Useful for applications that should work with any available backend.
140
+
141
+ Args:
142
+ preferred_backends: List of backends to check in order
143
+ (if None, checks all in default order)
144
+
145
+ Returns:
146
+ First available LLM client, or None if none available
147
+
148
+ Example:
149
+ # Try to find any available backend
150
+ client = LLMClientFactory.auto_detect()
151
+ if client:
152
+ print(f"Using {client.get_info()['backend']}")
153
+ else:
154
+ print("No LLM service found")
155
+
156
+ # Try specific backends in order
157
+ client = LLMClientFactory.auto_detect(['lmstudio', 'ollama'])
158
+ """
159
+ logger = logging.getLogger('LLMClientFactory')
160
+
161
+ # Default check order: local services first
162
+ if preferred_backends is None:
163
+ preferred_backends = ['ollama', 'lmstudio']
164
+
165
+ for backend_name in preferred_backends:
166
+ if backend_name not in LLMClientFactory.BACKENDS:
167
+ logger.warning(f"Unknown backend in auto-detect: {backend_name}")
168
+ continue
169
+
170
+ backend_info = LLMClientFactory.BACKENDS[backend_name]
171
+
172
+ # Skip cloud services in auto-detect (they require API keys)
173
+ if backend_info['type'] == 'cloud':
174
+ logger.debug(f"Skipping cloud backend in auto-detect: {backend_name}")
175
+ continue
176
+
177
+ try:
178
+ # Try to create client with defaults
179
+ client = LLMClientFactory.create(backend_name)
180
+
181
+ # Check if service is actually running
182
+ if client.check_connection():
183
+ logger.info(f"✅ Detected {backend_name} at {backend_info.get('default_url')}")
184
+ return client
185
+ else:
186
+ logger.debug(f"Service not running: {backend_name}")
187
+
188
+ except Exception as e:
189
+ logger.debug(f"Failed to detect {backend_name}: {e}")
190
+ continue
191
+
192
+ logger.warning("⚠️ No local LLM service detected")
193
+ return None
194
+
195
+ @staticmethod
196
+ def get_available_backends() -> List[Dict[str, Any]]:
197
+ """
198
+ Get list of all supported backends with their info
199
+
200
+ Returns:
201
+ List of backend information dictionaries
202
+
203
+ Example:
204
+ backends = LLMClientFactory.get_available_backends()
205
+ for backend in backends:
206
+ print(f"{backend['name']}: {backend['description']}")
207
+ """
208
+ result = []
209
+
210
+ for name, info in LLMClientFactory.BACKENDS.items():
211
+ backend_dict = {
212
+ 'name': name,
213
+ 'description': info['description'],
214
+ 'type': info['type'],
215
+ 'default_model': info.get('default_model'),
216
+ 'requires_api_key': info.get('requires_api_key', False)
217
+ }
218
+
219
+ if info['type'] == 'local':
220
+ backend_dict['default_url'] = info.get('default_url')
221
+
222
+ result.append(backend_dict)
223
+
224
+ return result
225
+
226
+ @staticmethod
227
+ def check_backend_availability(backend: str, **kwargs) -> bool:
228
+ """
229
+ Check if a specific backend is available
230
+
231
+ Args:
232
+ backend: Backend name
233
+ **kwargs: Configuration for creating the client
234
+
235
+ Returns:
236
+ True if backend is available and responding
237
+
238
+ Example:
239
+ # Check if Ollama is running
240
+ if LLMClientFactory.check_backend_availability('ollama'):
241
+ print("Ollama is available")
242
+
243
+ # Check custom LM Studio URL
244
+ if LLMClientFactory.check_backend_availability(
245
+ 'lmstudio',
246
+ base_url='http://localhost:5000'
247
+ ):
248
+ print("LM Studio is available")
249
+ """
250
+ try:
251
+ client = LLMClientFactory.create(backend, **kwargs)
252
+ return client.check_connection()
253
+ except Exception:
254
+ return False
255
+
256
+ @staticmethod
257
+ def get_backend_info(backend: str) -> Dict[str, Any]:
258
+ """
259
+ Get information about a specific backend
260
+
261
+ Args:
262
+ backend: Backend name
263
+
264
+ Returns:
265
+ Backend information dictionary
266
+
267
+ Raises:
268
+ ValueError: If backend not found
269
+ """
270
+ if backend not in LLMClientFactory.BACKENDS:
271
+ raise ValueError(f"Unknown backend: {backend}")
272
+
273
+ info = LLMClientFactory.BACKENDS[backend].copy()
274
+ # Remove class reference for JSON serialization
275
+ info.pop('class', None)
276
+ return info
277
+
mem_llm/mem_agent.py CHANGED
@@ -37,7 +37,9 @@ import os
37
37
 
38
38
  # Core dependencies
39
39
  from .memory_manager import MemoryManager
40
- from .llm_client import OllamaClient
40
+ from .llm_client import OllamaClient # Backward compatibility
41
+ from .llm_client_factory import LLMClientFactory
42
+ from .base_llm_client import BaseLLMClient
41
43
 
42
44
  # Advanced features (optional)
43
45
  try:
@@ -61,23 +63,48 @@ class MemAgent:
61
63
 
62
64
  def __init__(self,
63
65
  model: str = "granite4:tiny-h",
66
+ backend: str = "ollama",
64
67
  config_file: Optional[str] = None,
65
68
  use_sql: bool = True,
66
69
  memory_dir: Optional[str] = None,
70
+ db_path: Optional[str] = None,
67
71
  load_knowledge_base: bool = True,
68
72
  ollama_url: str = "http://localhost:11434",
73
+ base_url: Optional[str] = None,
74
+ api_key: Optional[str] = None,
75
+ auto_detect_backend: bool = False,
69
76
  check_connection: bool = False,
70
- enable_security: bool = False):
77
+ enable_security: bool = False,
78
+ **llm_kwargs):
71
79
  """
72
80
  Args:
73
81
  model: LLM model to use
82
+ backend: LLM backend ('ollama', 'lmstudio', 'gemini') - NEW in v1.3.0
74
83
  config_file: Configuration file (optional)
75
84
  use_sql: Use SQL database (True) or JSON (False)
76
- memory_dir: Memory directory
85
+ memory_dir: Memory directory (for JSON mode or if db_path not specified)
86
+ db_path: SQLite database path (for SQL mode, e.g., ":memory:" or "path/to/db.db")
77
87
  load_knowledge_base: Automatically load knowledge base
78
- ollama_url: Ollama API URL
79
- check_connection: Verify Ollama connection on startup (default: False)
88
+ ollama_url: Ollama API URL (backward compatibility, use base_url instead)
89
+ base_url: Backend API URL (for local backends) - NEW in v1.3.0
90
+ api_key: API key (for cloud backends like Gemini) - NEW in v1.3.0
91
+ auto_detect_backend: Auto-detect available LLM backend - NEW in v1.3.0
92
+ check_connection: Verify LLM connection on startup (default: False)
80
93
  enable_security: Enable prompt injection protection (v1.1.0+, default: False for backward compatibility)
94
+ **llm_kwargs: Additional backend-specific parameters
95
+
96
+ Examples:
97
+ # Default Ollama
98
+ agent = MemAgent()
99
+
100
+ # LM Studio
101
+ agent = MemAgent(backend='lmstudio', model='llama-3-8b')
102
+
103
+ # Gemini
104
+ agent = MemAgent(backend='gemini', model='gemini-1.5-flash', api_key='your-key')
105
+
106
+ # Auto-detect
107
+ agent = MemAgent(auto_detect_backend=True)
81
108
  """
82
109
 
83
110
  # Setup logging first
@@ -120,12 +147,29 @@ class MemAgent:
120
147
  self.has_knowledge_base: bool = False # Track KB status
121
148
  self.has_tools: bool = False # Track tools status
122
149
 
123
- # Memory system selection
150
+ # Memory system
124
151
  if use_sql and ADVANCED_AVAILABLE:
125
152
  # SQL memory (advanced)
126
- db_path = memory_dir or self.config.get("memory.db_path", "memories.db") if self.config else "memories.db"
127
- self.memory = SQLMemoryManager(db_path)
128
- self.logger.info(f"SQL memory system active: {db_path}")
153
+ # Determine database path
154
+ if db_path:
155
+ # Use provided db_path (can be ":memory:" for in-memory DB)
156
+ final_db_path = db_path
157
+ elif memory_dir:
158
+ final_db_path = memory_dir
159
+ elif self.config:
160
+ final_db_path = self.config.get("memory.db_path", "memories/memories.db")
161
+ else:
162
+ final_db_path = "memories/memories.db"
163
+
164
+ # Ensure memories directory exists (skip for :memory:)
165
+ import os
166
+ if final_db_path != ":memory:":
167
+ db_dir = os.path.dirname(final_db_path)
168
+ if db_dir and not os.path.exists(db_dir):
169
+ os.makedirs(db_dir, exist_ok=True)
170
+
171
+ self.memory = SQLMemoryManager(final_db_path)
172
+ self.logger.info(f"SQL memory system active: {final_db_path}")
129
173
  else:
130
174
  # JSON memory (simple)
131
175
  json_dir = memory_dir or self.config.get("memory.json_dir", "memories") if self.config else "memories"
@@ -139,48 +183,109 @@ class MemAgent:
139
183
  # LLM client
140
184
  self.model = model # Store model name
141
185
  self.use_sql = use_sql # Store SQL usage flag
142
- self.llm = OllamaClient(model, ollama_url)
186
+
187
+ # Initialize LLM client (v1.3.0: Multi-backend support)
188
+ # Prepare backend configuration
189
+ llm_config = llm_kwargs.copy()
190
+
191
+ # Handle backward compatibility: ollama_url -> base_url
192
+ if base_url is None and backend == "ollama":
193
+ base_url = ollama_url
194
+
195
+ # Add base_url for local backends
196
+ if base_url and backend in ['ollama', 'lmstudio']:
197
+ llm_config['base_url'] = base_url
198
+
199
+ # Add api_key for cloud backends
200
+ if api_key and backend in ['gemini']:
201
+ llm_config['api_key'] = api_key
202
+
203
+ # Auto-detect backend if requested
204
+ if auto_detect_backend:
205
+ self.logger.info("🔍 Auto-detecting available LLM backend...")
206
+ self.llm = LLMClientFactory.auto_detect()
207
+ if self.llm:
208
+ detected_backend = self.llm.__class__.__name__
209
+ self.logger.info(f"✅ Detected and using: {detected_backend}")
210
+ else:
211
+ self.logger.error("❌ No LLM backend available.")
212
+ raise RuntimeError(
213
+ "No LLM backend detected. Please start a local LLM service (Ollama/LM Studio) "
214
+ "or provide Gemini API key."
215
+ )
216
+ else:
217
+ # Create client using factory
218
+ try:
219
+ self.llm = LLMClientFactory.create(
220
+ backend=backend,
221
+ model=model,
222
+ **llm_config
223
+ )
224
+ self.logger.info(f"✅ Initialized {backend} backend with model: {model}")
225
+ except Exception as e:
226
+ self.logger.error(f"❌ Failed to initialize {backend} backend: {e}")
227
+ raise
143
228
 
144
229
  # Optional connection check on startup
145
230
  if check_connection:
146
- self.logger.info("Checking Ollama connection...")
231
+ backend_name = backend if not auto_detect_backend else "LLM service"
232
+ self.logger.info(f"Checking {backend_name} connection...")
147
233
  if not self.llm.check_connection():
148
- error_msg = (
149
- "❌ ERROR: Cannot connect to Ollama service!\n"
150
- " \n"
151
- " Solutions:\n"
152
- " 1. Start Ollama: ollama serve\n"
153
- " 2. Check if Ollama is running: http://localhost:11434\n"
154
- " 3. Verify ollama_url parameter is correct\n"
155
- " \n"
156
- " To skip this check, use: MemAgent(check_connection=False)"
157
- )
234
+ error_msg = f"❌ ERROR: Cannot connect to {backend_name}!\n"
235
+
236
+ if backend == "ollama":
237
+ error_msg += (
238
+ " \n"
239
+ " Solutions:\n"
240
+ " 1. Start Ollama: ollama serve\n"
241
+ " 2. Check if Ollama is running: http://localhost:11434\n"
242
+ " 3. Verify base_url parameter is correct\n"
243
+ )
244
+ elif backend == "lmstudio":
245
+ error_msg += (
246
+ " \n"
247
+ " Solutions:\n"
248
+ " 1. Start LM Studio\n"
249
+ " 2. Load a model in LM Studio\n"
250
+ " 3. Start local server (default: http://localhost:1234)\n"
251
+ " 4. Verify base_url parameter is correct\n"
252
+ )
253
+ elif backend == "gemini":
254
+ error_msg += (
255
+ " \n"
256
+ " Solutions:\n"
257
+ " 1. Check your internet connection\n"
258
+ " 2. Verify Gemini API key is correct\n"
259
+ " 3. Check API quota/billing status\n"
260
+ " Get key: https://makersuite.google.com/app/apikey\n"
261
+ )
262
+
263
+ error_msg += " \n To skip this check, use: MemAgent(check_connection=False)"
158
264
  self.logger.error(error_msg)
159
- raise ConnectionError("Ollama service not available")
265
+ raise ConnectionError(f"{backend_name} not available")
160
266
 
161
- # Check if model exists
162
- available_models = self.llm.list_models()
163
- if model not in available_models:
164
- error_msg = (
165
- f"❌ ERROR: Model '{model}' not found!\n"
166
- f" \n"
167
- f" Solutions:\n"
168
- f" 1. Download model: ollama pull {model}\n"
169
- f" 2. Use an available model: {', '.join(available_models[:3])}\n"
170
- f" \n"
171
- f" Available models: {len(available_models)} found\n"
172
- f" To skip this check, use: MemAgent(check_connection=False)"
173
- )
174
- self.logger.error(error_msg)
175
- raise ValueError(f"Model '{model}' not available")
267
+ # Check if model exists (for backends that support listing)
268
+ try:
269
+ available_models = self.llm.list_models()
270
+ if available_models and model not in available_models:
271
+ error_msg = (
272
+ f"❌ ERROR: Model '{model}' not found in {backend}!\n"
273
+ f" \n"
274
+ f" Available models: {', '.join(available_models[:5])}\n"
275
+ f" Total: {len(available_models)} models available\n"
276
+ f" \n"
277
+ f" To skip this check, use: MemAgent(check_connection=False)"
278
+ )
279
+ self.logger.error(error_msg)
280
+ raise ValueError(f"Model '{model}' not available")
281
+ except:
282
+ # Some backends may not support list_models, skip check
283
+ pass
176
284
 
177
- self.logger.info(f"✅ Ollama connection verified, model '{model}' ready")
285
+ self.logger.info(f"✅ {backend_name} connection verified, model '{model}' ready")
178
286
 
179
- self.logger.info(f"LLM client ready: {model}")
287
+ self.logger.info(f"LLM client ready: {model} on {backend}")
180
288
 
181
- # Initialize state variables FIRST
182
- self.current_user: Optional[str] = None
183
- self.current_system_prompt: Optional[str] = None
184
289
 
185
290
  # Advanced features (if available)
186
291
  if ADVANCED_AVAILABLE:
@@ -203,10 +308,13 @@ class MemAgent:
203
308
  if ADVANCED_AVAILABLE and hasattr(self, 'config') and self.config:
204
309
  log_config = self.config.get("logging", {})
205
310
 
311
+ # Default to WARNING level to keep console clean (users can override in config)
312
+ default_level = "WARNING"
313
+
206
314
  if log_config.get("enabled", True):
207
315
  # Only console logging (no file) - keep workspace clean
208
316
  logging.basicConfig(
209
- level=getattr(logging, log_config.get("level", "INFO")),
317
+ level=getattr(logging, log_config.get("level", default_level)),
210
318
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
211
319
  handlers=[
212
320
  logging.StreamHandler() # Console only
@@ -214,6 +322,9 @@ class MemAgent:
214
322
  )
215
323
 
216
324
  self.logger = logging.getLogger("MemAgent")
325
+
326
+ # Set default level for mem_llm loggers
327
+ logging.getLogger("mem_llm").setLevel(getattr(logging, log_config.get("level", default_level)))
217
328
 
218
329
  def _setup_advanced_features(self, load_knowledge_base: bool) -> None:
219
330
  """Setup advanced features"""
mem_llm/memory_db.py CHANGED
@@ -14,12 +14,18 @@ from pathlib import Path
14
14
  class SQLMemoryManager:
15
15
  """SQLite-based memory management system with thread-safety"""
16
16
 
17
- def __init__(self, db_path: str = "memories.db"):
17
+ def __init__(self, db_path: str = "memories/memories.db"):
18
18
  """
19
19
  Args:
20
20
  db_path: SQLite database file path
21
21
  """
22
22
  self.db_path = Path(db_path)
23
+
24
+ # Ensure directory exists
25
+ db_dir = self.db_path.parent
26
+ if not db_dir.exists():
27
+ db_dir.mkdir(parents=True, exist_ok=True)
28
+
23
29
  self.conn = None
24
30
  self._lock = threading.RLock() # Reentrant lock for thread safety
25
31
  self._init_database()
mem_llm/thread_safe_db.py CHANGED
@@ -124,7 +124,7 @@ class ConnectionPool:
124
124
  class ThreadSafeSQLMemory:
125
125
  """Thread-safe wrapper for SQL memory operations"""
126
126
 
127
- def __init__(self, db_path: str = "memories.db", pool_size: int = 5):
127
+ def __init__(self, db_path: str = "memories/memories.db", pool_size: int = 5):
128
128
  """
129
129
  Initialize thread-safe SQL memory
130
130
 
@@ -133,6 +133,12 @@ class ThreadSafeSQLMemory:
133
133
  pool_size: Connection pool size
134
134
  """
135
135
  self.db_path = Path(db_path)
136
+
137
+ # Ensure directory exists
138
+ db_dir = self.db_path.parent
139
+ if not db_dir.exists():
140
+ db_dir.mkdir(parents=True, exist_ok=True)
141
+
136
142
  self.pool = ConnectionPool(str(db_path), pool_size)
137
143
  self.logger = logging.getLogger(__name__)
138
144
  self._init_database()