mem-llm 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,260 @@
1
+ """
2
+ LLM Client Factory
3
+ ==================
4
+
5
+ Factory pattern for creating LLM clients.
6
+ Supports multiple backends with automatic detection.
7
+
8
+ Supported Backends:
9
+ - Ollama: Local Ollama service
10
+ - LM Studio: Local LM Studio server
11
+
12
+ Usage:
13
+ # Create specific backend
14
+ client = LLMClientFactory.create('ollama', model='llama3')
15
+
16
+ # Auto-detect available backend
17
+ client = LLMClientFactory.auto_detect()
18
+
19
+ # Get all available backends
20
+ backends = LLMClientFactory.get_available_backends()
21
+
22
+ Author: C. Emre Karataş
23
+ Version: 1.3.0
24
+ """
25
+
26
+ from typing import Optional, Dict, List, Any
27
+ import logging
28
+
29
+ from .clients.ollama_client import OllamaClient
30
+ from .clients.lmstudio_client import LMStudioClient
31
+ from .base_llm_client import BaseLLMClient
32
+
33
+
34
+ class LLMClientFactory:
35
+ """
36
+ Factory for creating LLM clients
37
+
38
+ Provides unified interface for creating different LLM backends.
39
+ Supports auto-detection of available local services.
40
+ """
41
+
42
+ # Registry of supported backends
43
+ BACKENDS = {
44
+ 'ollama': {
45
+ 'class': OllamaClient,
46
+ 'description': 'Local Ollama service',
47
+ 'type': 'local',
48
+ 'default_url': 'http://localhost:11434',
49
+ 'default_model': 'granite4:3b'
50
+ },
51
+ 'lmstudio': {
52
+ 'class': LMStudioClient,
53
+ 'description': 'LM Studio local server (OpenAI-compatible)',
54
+ 'type': 'local',
55
+ 'default_url': 'http://localhost:1234',
56
+ 'default_model': 'local-model'
57
+ }
58
+ }
59
+
60
+ @staticmethod
61
+ def create(backend: str, model: Optional[str] = None, **kwargs) -> BaseLLMClient:
62
+ """
63
+ Create LLM client for specified backend
64
+
65
+ Args:
66
+ backend: Backend name ('ollama', 'lmstudio')
67
+ model: Model name (uses default if None)
68
+ **kwargs: Backend-specific configuration
69
+ - base_url: API endpoint (for local backends)
70
+ - temperature: Default temperature
71
+ - max_tokens: Default max tokens
72
+
73
+ Returns:
74
+ Configured LLM client
75
+
76
+ Raises:
77
+ ValueError: If backend is not supported
78
+
79
+ Examples:
80
+ # Ollama
81
+ client = LLMClientFactory.create('ollama', model='llama3')
82
+
83
+ # LM Studio
84
+ client = LLMClientFactory.create(
85
+ 'lmstudio',
86
+ model='llama-3-8b',
87
+ base_url='http://localhost:1234'
88
+ )
89
+ """
90
+ backend = backend.lower()
91
+
92
+ if backend not in LLMClientFactory.BACKENDS:
93
+ available = ', '.join(LLMClientFactory.BACKENDS.keys())
94
+ raise ValueError(
95
+ f"Unsupported backend: '{backend}'. "
96
+ f"Available backends: {available}"
97
+ )
98
+
99
+ backend_info = LLMClientFactory.BACKENDS[backend]
100
+ client_class = backend_info['class']
101
+
102
+ # Use default model if not specified
103
+ if not model:
104
+ model = backend_info.get('default_model')
105
+
106
+ # Add default base_url for local backends if not provided
107
+ if backend_info['type'] == 'local' and 'base_url' not in kwargs:
108
+ kwargs['base_url'] = backend_info.get('default_url')
109
+
110
+ # Create and return client
111
+ try:
112
+ return client_class(model=model, **kwargs)
113
+ except Exception as e:
114
+ raise ValueError(f"Failed to create {backend} client: {str(e)}") from e
115
+
116
+ @staticmethod
117
+ def auto_detect(preferred_backends: Optional[List[str]] = None) -> Optional[BaseLLMClient]:
118
+ """
119
+ Auto-detect available LLM service
120
+
121
+ Checks common local services and returns the first available one.
122
+ Useful for applications that should work with any available backend.
123
+
124
+ Args:
125
+ preferred_backends: List of backends to check in order
126
+ (if None, checks all in default order)
127
+
128
+ Returns:
129
+ First available LLM client, or None if none available
130
+
131
+ Example:
132
+ # Try to find any available backend
133
+ client = LLMClientFactory.auto_detect()
134
+ if client:
135
+ print(f"Using {client.get_info()['backend']}")
136
+ else:
137
+ print("No LLM service found")
138
+
139
+ # Try specific backends in order
140
+ client = LLMClientFactory.auto_detect(['lmstudio', 'ollama'])
141
+ """
142
+ logger = logging.getLogger('LLMClientFactory')
143
+
144
+ # Default check order: local services first
145
+ if preferred_backends is None:
146
+ preferred_backends = ['ollama', 'lmstudio']
147
+
148
+ for backend_name in preferred_backends:
149
+ if backend_name not in LLMClientFactory.BACKENDS:
150
+ logger.warning(f"Unknown backend in auto-detect: {backend_name}")
151
+ continue
152
+
153
+ backend_info = LLMClientFactory.BACKENDS[backend_name]
154
+
155
+ # Skip cloud services in auto-detect (they require API keys)
156
+ if backend_info['type'] == 'cloud':
157
+ logger.debug(f"Skipping cloud backend in auto-detect: {backend_name}")
158
+ continue
159
+
160
+ try:
161
+ # Try to create client with defaults
162
+ client = LLMClientFactory.create(backend_name)
163
+
164
+ # Check if service is actually running
165
+ if client.check_connection():
166
+ logger.info(f"✅ Detected {backend_name} at {backend_info.get('default_url')}")
167
+ return client
168
+ else:
169
+ logger.debug(f"Service not running: {backend_name}")
170
+
171
+ except Exception as e:
172
+ logger.debug(f"Failed to detect {backend_name}: {e}")
173
+ continue
174
+
175
+ logger.warning("⚠️ No local LLM service detected")
176
+ return None
177
+
178
+ @staticmethod
179
+ def get_available_backends() -> List[Dict[str, Any]]:
180
+ """
181
+ Get list of all supported backends with their info
182
+
183
+ Returns:
184
+ List of backend information dictionaries
185
+
186
+ Example:
187
+ backends = LLMClientFactory.get_available_backends()
188
+ for backend in backends:
189
+ print(f"{backend['name']}: {backend['description']}")
190
+ """
191
+ result = []
192
+
193
+ for name, info in LLMClientFactory.BACKENDS.items():
194
+ backend_dict = {
195
+ 'name': name,
196
+ 'description': info['description'],
197
+ 'type': info['type'],
198
+ 'default_model': info.get('default_model'),
199
+ 'requires_api_key': info.get('requires_api_key', False)
200
+ }
201
+
202
+ if info['type'] == 'local':
203
+ backend_dict['default_url'] = info.get('default_url')
204
+
205
+ result.append(backend_dict)
206
+
207
+ return result
208
+
209
+ @staticmethod
210
+ def check_backend_availability(backend: str, **kwargs) -> bool:
211
+ """
212
+ Check if a specific backend is available
213
+
214
+ Args:
215
+ backend: Backend name
216
+ **kwargs: Configuration for creating the client
217
+
218
+ Returns:
219
+ True if backend is available and responding
220
+
221
+ Example:
222
+ # Check if Ollama is running
223
+ if LLMClientFactory.check_backend_availability('ollama'):
224
+ print("Ollama is available")
225
+
226
+ # Check custom LM Studio URL
227
+ if LLMClientFactory.check_backend_availability(
228
+ 'lmstudio',
229
+ base_url='http://localhost:5000'
230
+ ):
231
+ print("LM Studio is available")
232
+ """
233
+ try:
234
+ client = LLMClientFactory.create(backend, **kwargs)
235
+ return client.check_connection()
236
+ except Exception:
237
+ return False
238
+
239
+ @staticmethod
240
+ def get_backend_info(backend: str) -> Dict[str, Any]:
241
+ """
242
+ Get information about a specific backend
243
+
244
+ Args:
245
+ backend: Backend name
246
+
247
+ Returns:
248
+ Backend information dictionary
249
+
250
+ Raises:
251
+ ValueError: If backend not found
252
+ """
253
+ if backend not in LLMClientFactory.BACKENDS:
254
+ raise ValueError(f"Unknown backend: {backend}")
255
+
256
+ info = LLMClientFactory.BACKENDS[backend].copy()
257
+ # Remove class reference for JSON serialization
258
+ info.pop('class', None)
259
+ return info
260
+
mem_llm/logger.py ADDED
@@ -0,0 +1,129 @@
1
+ """
2
+ Enhanced Logging System for Mem-LLM
3
+ ====================================
4
+ Provides structured logging with different levels and output formats.
5
+ """
6
+
7
+ import logging
8
+ import sys
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+ from typing import Optional
12
+
13
+
14
+ class MemLLMLogger:
15
+ """Structured logger for Mem-LLM with file and console output"""
16
+
17
+ def __init__(self,
18
+ name: str = "mem_llm",
19
+ log_file: Optional[str] = None,
20
+ log_level: str = "INFO",
21
+ console_output: bool = True):
22
+ """
23
+ Initialize logger
24
+
25
+ Args:
26
+ name: Logger name
27
+ log_file: Path to log file (optional)
28
+ log_level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
29
+ console_output: Enable console output
30
+ """
31
+ self.logger = logging.getLogger(name)
32
+ self.logger.setLevel(getattr(logging, log_level.upper()))
33
+
34
+ # Clear existing handlers
35
+ self.logger.handlers = []
36
+
37
+ # Formatter
38
+ formatter = logging.Formatter(
39
+ '%(asctime)s | %(name)s | %(levelname)s | %(message)s',
40
+ datefmt='%Y-%m-%d %H:%M:%S'
41
+ )
42
+
43
+ # Console handler
44
+ if console_output:
45
+ console_handler = logging.StreamHandler(sys.stdout)
46
+ console_handler.setFormatter(formatter)
47
+ self.logger.addHandler(console_handler)
48
+
49
+ # File handler
50
+ if log_file:
51
+ log_path = Path(log_file)
52
+ log_path.parent.mkdir(parents=True, exist_ok=True)
53
+ file_handler = logging.FileHandler(log_file, encoding='utf-8')
54
+ file_handler.setFormatter(formatter)
55
+ self.logger.addHandler(file_handler)
56
+
57
+ def debug(self, message: str, **kwargs):
58
+ """Debug level log"""
59
+ extra_info = " | ".join(f"{k}={v}" for k, v in kwargs.items())
60
+ full_message = f"{message} | {extra_info}" if extra_info else message
61
+ self.logger.debug(full_message)
62
+
63
+ def info(self, message: str, **kwargs):
64
+ """Info level log"""
65
+ extra_info = " | ".join(f"{k}={v}" for k, v in kwargs.items())
66
+ full_message = f"{message} | {extra_info}" if extra_info else message
67
+ self.logger.info(full_message)
68
+
69
+ def warning(self, message: str, **kwargs):
70
+ """Warning level log"""
71
+ extra_info = " | ".join(f"{k}={v}" for k, v in kwargs.items())
72
+ full_message = f"{message} | {extra_info}" if extra_info else message
73
+ self.logger.warning(full_message)
74
+
75
+ def error(self, message: str, **kwargs):
76
+ """Error level log"""
77
+ extra_info = " | ".join(f"{k}={v}" for k, v in kwargs.items())
78
+ full_message = f"{message} | {extra_info}" if extra_info else message
79
+ self.logger.error(full_message)
80
+
81
+ def critical(self, message: str, **kwargs):
82
+ """Critical level log"""
83
+ extra_info = " | ".join(f"{k}={v}" for k, v in kwargs.items())
84
+ full_message = f"{message} | {extra_info}" if extra_info else message
85
+ self.logger.critical(full_message)
86
+
87
+ def log_llm_call(self, model: str, prompt_length: int, response_length: int, duration: float):
88
+ """Log LLM API call with metrics"""
89
+ self.info(
90
+ "LLM API Call",
91
+ model=model,
92
+ prompt_tokens=prompt_length,
93
+ response_tokens=response_length,
94
+ duration_ms=f"{duration*1000:.2f}"
95
+ )
96
+
97
+ def log_memory_operation(self, operation: str, user_id: str, success: bool, details: str = ""):
98
+ """Log memory operations"""
99
+ level = self.info if success else self.error
100
+ level(
101
+ f"Memory {operation}",
102
+ user_id=user_id,
103
+ success=success,
104
+ details=details
105
+ )
106
+
107
+ def log_error_with_context(self, error: Exception, context: dict):
108
+ """Log error with full context"""
109
+ self.error(
110
+ f"Exception: {type(error).__name__}: {str(error)}",
111
+ **context
112
+ )
113
+
114
+
115
+ def get_logger(name: str = "mem_llm",
116
+ log_file: Optional[str] = "logs/mem_llm.log",
117
+ log_level: str = "INFO") -> MemLLMLogger:
118
+ """
119
+ Get or create logger instance
120
+
121
+ Args:
122
+ name: Logger name
123
+ log_file: Log file path
124
+ log_level: Logging level
125
+
126
+ Returns:
127
+ MemLLMLogger instance
128
+ """
129
+ return MemLLMLogger(name=name, log_file=log_file, log_level=log_level)