mseep-lightfast-mcp 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- common/__init__.py +21 -0
- common/types.py +182 -0
- lightfast_mcp/__init__.py +50 -0
- lightfast_mcp/core/__init__.py +14 -0
- lightfast_mcp/core/base_server.py +205 -0
- lightfast_mcp/exceptions.py +55 -0
- lightfast_mcp/servers/__init__.py +1 -0
- lightfast_mcp/servers/blender/__init__.py +5 -0
- lightfast_mcp/servers/blender/server.py +358 -0
- lightfast_mcp/servers/blender_mcp_server.py +82 -0
- lightfast_mcp/servers/mock/__init__.py +5 -0
- lightfast_mcp/servers/mock/server.py +101 -0
- lightfast_mcp/servers/mock/tools.py +161 -0
- lightfast_mcp/servers/mock_server.py +78 -0
- lightfast_mcp/utils/__init__.py +1 -0
- lightfast_mcp/utils/logging_utils.py +69 -0
- mseep_lightfast_mcp-0.0.1.dist-info/METADATA +36 -0
- mseep_lightfast_mcp-0.0.1.dist-info/RECORD +43 -0
- mseep_lightfast_mcp-0.0.1.dist-info/WHEEL +5 -0
- mseep_lightfast_mcp-0.0.1.dist-info/entry_points.txt +7 -0
- mseep_lightfast_mcp-0.0.1.dist-info/licenses/LICENSE +21 -0
- mseep_lightfast_mcp-0.0.1.dist-info/top_level.txt +3 -0
- tools/__init__.py +46 -0
- tools/ai/__init__.py +8 -0
- tools/ai/conversation_cli.py +345 -0
- tools/ai/conversation_client.py +399 -0
- tools/ai/conversation_session.py +342 -0
- tools/ai/providers/__init__.py +11 -0
- tools/ai/providers/base_provider.py +64 -0
- tools/ai/providers/claude_provider.py +200 -0
- tools/ai/providers/openai_provider.py +204 -0
- tools/ai/tool_executor.py +257 -0
- tools/common/__init__.py +99 -0
- tools/common/async_utils.py +419 -0
- tools/common/errors.py +222 -0
- tools/common/logging.py +252 -0
- tools/common/types.py +130 -0
- tools/orchestration/__init__.py +15 -0
- tools/orchestration/cli.py +320 -0
- tools/orchestration/config_loader.py +348 -0
- tools/orchestration/server_orchestrator.py +466 -0
- tools/orchestration/server_registry.py +187 -0
- tools/orchestration/server_selector.py +242 -0
|
@@ -0,0 +1,419 @@
|
|
|
1
|
+
"""Async utilities and patterns for better performance and reliability."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from contextlib import asynccontextmanager
|
|
6
|
+
from typing import Any, Callable, Dict, List, Optional, TypeVar
|
|
7
|
+
|
|
8
|
+
from fastmcp import Client
|
|
9
|
+
|
|
10
|
+
from .errors import (
|
|
11
|
+
ConnectionPoolError,
|
|
12
|
+
ConnectionPoolExhaustedError,
|
|
13
|
+
ServerConnectionError,
|
|
14
|
+
)
|
|
15
|
+
from .logging import get_logger, with_correlation_id
|
|
16
|
+
from .types import OperationStatus, Result
|
|
17
|
+
|
|
18
|
+
logger = get_logger("AsyncUtils")
|
|
19
|
+
T = TypeVar("T")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ConnectionPool:
|
|
23
|
+
"""Manages persistent connections to MCP servers."""
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
max_connections_per_server: int = 5,
|
|
28
|
+
connection_timeout: float = 30.0,
|
|
29
|
+
idle_timeout: float = 300.0, # 5 minutes
|
|
30
|
+
):
|
|
31
|
+
self.max_connections = max_connections_per_server
|
|
32
|
+
self.connection_timeout = connection_timeout
|
|
33
|
+
self.idle_timeout = idle_timeout
|
|
34
|
+
|
|
35
|
+
# Pool storage: server_name -> Queue of available connections
|
|
36
|
+
self._pools: Dict[str, asyncio.Queue] = {}
|
|
37
|
+
self._active_connections: Dict[str, int] = {}
|
|
38
|
+
self._connection_configs: Dict[str, Dict[str, Any]] = {}
|
|
39
|
+
self._last_used: Dict[str, Dict[Client, float]] = {}
|
|
40
|
+
|
|
41
|
+
# Cleanup task
|
|
42
|
+
self._cleanup_task: Optional[asyncio.Task] = None
|
|
43
|
+
self._shutdown = False
|
|
44
|
+
|
|
45
|
+
async def initialize(self):
|
|
46
|
+
"""Initialize the connection pool."""
|
|
47
|
+
if not self._cleanup_task:
|
|
48
|
+
self._cleanup_task = asyncio.create_task(self._cleanup_idle_connections())
|
|
49
|
+
|
|
50
|
+
async def register_server(
|
|
51
|
+
self, server_name: str, connection_config: Dict[str, Any]
|
|
52
|
+
):
|
|
53
|
+
"""Register a server configuration for connection pooling."""
|
|
54
|
+
self._connection_configs[server_name] = connection_config
|
|
55
|
+
if server_name not in self._pools:
|
|
56
|
+
self._pools[server_name] = asyncio.Queue(maxsize=self.max_connections)
|
|
57
|
+
self._active_connections[server_name] = 0
|
|
58
|
+
self._last_used[server_name] = {}
|
|
59
|
+
|
|
60
|
+
@asynccontextmanager
|
|
61
|
+
async def get_connection(self, server_name: str):
|
|
62
|
+
"""Get a connection from the pool or create a new one."""
|
|
63
|
+
if server_name not in self._connection_configs:
|
|
64
|
+
raise ConnectionPoolError(f"Server {server_name} not registered")
|
|
65
|
+
|
|
66
|
+
connection = None
|
|
67
|
+
try:
|
|
68
|
+
connection = await self._acquire_connection(server_name)
|
|
69
|
+
# Use the FastMCP client's context manager to ensure proper connection
|
|
70
|
+
async with connection as connected_client:
|
|
71
|
+
yield connected_client
|
|
72
|
+
finally:
|
|
73
|
+
if connection:
|
|
74
|
+
await self._release_connection(server_name, connection)
|
|
75
|
+
|
|
76
|
+
async def _acquire_connection(self, server_name: str) -> Client:
|
|
77
|
+
"""Acquire a connection from the pool."""
|
|
78
|
+
pool = self._pools[server_name]
|
|
79
|
+
|
|
80
|
+
# Try to get an existing connection from the pool
|
|
81
|
+
try:
|
|
82
|
+
connection = pool.get_nowait()
|
|
83
|
+
self._last_used[server_name][connection] = time.time()
|
|
84
|
+
logger.debug(f"Reused connection for {server_name}")
|
|
85
|
+
return connection
|
|
86
|
+
except asyncio.QueueEmpty:
|
|
87
|
+
pass
|
|
88
|
+
|
|
89
|
+
# Check if we can create a new connection
|
|
90
|
+
if self._active_connections[server_name] >= self.max_connections:
|
|
91
|
+
# Wait for a connection to become available
|
|
92
|
+
try:
|
|
93
|
+
connection = await asyncio.wait_for(
|
|
94
|
+
pool.get(), timeout=self.connection_timeout
|
|
95
|
+
)
|
|
96
|
+
self._last_used[server_name][connection] = time.time()
|
|
97
|
+
logger.debug(f"Got pooled connection for {server_name}")
|
|
98
|
+
return connection
|
|
99
|
+
except asyncio.TimeoutError:
|
|
100
|
+
raise ConnectionPoolExhaustedError(
|
|
101
|
+
f"No connections available for {server_name} within timeout"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Create a new connection
|
|
105
|
+
connection = await self._create_connection(server_name)
|
|
106
|
+
self._active_connections[server_name] += 1
|
|
107
|
+
self._last_used[server_name][connection] = time.time()
|
|
108
|
+
logger.debug(f"Created new connection for {server_name}")
|
|
109
|
+
return connection
|
|
110
|
+
|
|
111
|
+
async def _create_connection(self, server_name: str) -> Client:
|
|
112
|
+
"""Create a new connection to the server."""
|
|
113
|
+
config = self._connection_configs[server_name]
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
if config.get("type") == "stdio":
|
|
117
|
+
# For stdio connections
|
|
118
|
+
import shlex
|
|
119
|
+
import urllib.parse
|
|
120
|
+
|
|
121
|
+
command = config.get("command", "")
|
|
122
|
+
args = config.get("args", [])
|
|
123
|
+
if args:
|
|
124
|
+
full_command = shlex.join([command] + args)
|
|
125
|
+
encoded_command = urllib.parse.quote(full_command, safe="")
|
|
126
|
+
client = Client(f"stdio://{encoded_command}")
|
|
127
|
+
else:
|
|
128
|
+
encoded_command = urllib.parse.quote(command, safe="")
|
|
129
|
+
client = Client(f"stdio://{encoded_command}")
|
|
130
|
+
else:
|
|
131
|
+
# For HTTP/SSE connections
|
|
132
|
+
url = config.get("url", "")
|
|
133
|
+
client = Client(url)
|
|
134
|
+
|
|
135
|
+
# Don't test the connection here - let the context manager handle it
|
|
136
|
+
# The client will be connected when used in the context manager
|
|
137
|
+
return client
|
|
138
|
+
|
|
139
|
+
except Exception as e:
|
|
140
|
+
raise ServerConnectionError(
|
|
141
|
+
f"Failed to create connection to {server_name}",
|
|
142
|
+
server_name=server_name,
|
|
143
|
+
cause=e,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
async def _release_connection(self, server_name: str, connection: Client):
|
|
147
|
+
"""Release a connection back to the pool."""
|
|
148
|
+
pool = self._pools[server_name]
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
# Put the connection back in the pool if there's space
|
|
152
|
+
pool.put_nowait(connection)
|
|
153
|
+
logger.debug(f"Released connection for {server_name}")
|
|
154
|
+
except asyncio.QueueFull:
|
|
155
|
+
# Pool is full, close the connection
|
|
156
|
+
await self._close_connection(server_name, connection)
|
|
157
|
+
|
|
158
|
+
async def _close_connection(self, server_name: str, connection: Client):
|
|
159
|
+
"""Close a connection and update counters."""
|
|
160
|
+
try:
|
|
161
|
+
# FastMCP clients don't have a direct close method
|
|
162
|
+
# They are managed through context managers
|
|
163
|
+
# Just clean up our tracking
|
|
164
|
+
pass
|
|
165
|
+
except Exception as e:
|
|
166
|
+
logger.warning(f"Error closing connection for {server_name}: {e}")
|
|
167
|
+
finally:
|
|
168
|
+
self._active_connections[server_name] -= 1
|
|
169
|
+
if connection in self._last_used[server_name]:
|
|
170
|
+
del self._last_used[server_name][connection]
|
|
171
|
+
|
|
172
|
+
async def _cleanup_idle_connections(self):
|
|
173
|
+
"""Periodically clean up idle connections."""
|
|
174
|
+
while not self._shutdown:
|
|
175
|
+
try:
|
|
176
|
+
await asyncio.sleep(60) # Check every minute
|
|
177
|
+
current_time = time.time()
|
|
178
|
+
|
|
179
|
+
for server_name, last_used_times in self._last_used.items():
|
|
180
|
+
pool = self._pools[server_name]
|
|
181
|
+
connections_to_close = []
|
|
182
|
+
|
|
183
|
+
# Check for idle connections
|
|
184
|
+
for connection, last_used in last_used_times.items():
|
|
185
|
+
if current_time - last_used > self.idle_timeout:
|
|
186
|
+
connections_to_close.append(connection)
|
|
187
|
+
|
|
188
|
+
# Close idle connections
|
|
189
|
+
for connection in connections_to_close:
|
|
190
|
+
try:
|
|
191
|
+
# Remove from pool if present
|
|
192
|
+
temp_connections = []
|
|
193
|
+
while not pool.empty():
|
|
194
|
+
try:
|
|
195
|
+
conn = pool.get_nowait()
|
|
196
|
+
if conn != connection:
|
|
197
|
+
temp_connections.append(conn)
|
|
198
|
+
except asyncio.QueueEmpty:
|
|
199
|
+
break
|
|
200
|
+
|
|
201
|
+
# Put back non-idle connections
|
|
202
|
+
for conn in temp_connections:
|
|
203
|
+
try:
|
|
204
|
+
pool.put_nowait(conn)
|
|
205
|
+
except asyncio.QueueFull:
|
|
206
|
+
await self._close_connection(server_name, conn)
|
|
207
|
+
|
|
208
|
+
# Close the idle connection
|
|
209
|
+
await self._close_connection(server_name, connection)
|
|
210
|
+
logger.debug(f"Closed idle connection for {server_name}")
|
|
211
|
+
|
|
212
|
+
except Exception as e:
|
|
213
|
+
logger.warning(
|
|
214
|
+
f"Error during cleanup for {server_name}: {e}"
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
except Exception as e:
|
|
218
|
+
logger.error(f"Error in connection cleanup: {e}")
|
|
219
|
+
|
|
220
|
+
async def close_all(self):
|
|
221
|
+
"""Close all connections and shutdown the pool."""
|
|
222
|
+
self._shutdown = True
|
|
223
|
+
|
|
224
|
+
if self._cleanup_task:
|
|
225
|
+
self._cleanup_task.cancel()
|
|
226
|
+
try:
|
|
227
|
+
await self._cleanup_task
|
|
228
|
+
except asyncio.CancelledError:
|
|
229
|
+
pass
|
|
230
|
+
|
|
231
|
+
# Close all connections
|
|
232
|
+
for server_name, pool in self._pools.items():
|
|
233
|
+
connections = []
|
|
234
|
+
while not pool.empty():
|
|
235
|
+
try:
|
|
236
|
+
connections.append(pool.get_nowait())
|
|
237
|
+
except asyncio.QueueEmpty:
|
|
238
|
+
break
|
|
239
|
+
|
|
240
|
+
for connection in connections:
|
|
241
|
+
try:
|
|
242
|
+
await self._close_connection(server_name, connection)
|
|
243
|
+
except Exception as e:
|
|
244
|
+
logger.warning(f"Error closing connection for {server_name}: {e}")
|
|
245
|
+
|
|
246
|
+
logger.info("Connection pool closed")
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
class RetryManager:
|
|
250
|
+
"""Manages retry logic with exponential backoff."""
|
|
251
|
+
|
|
252
|
+
def __init__(
|
|
253
|
+
self,
|
|
254
|
+
max_attempts: int = 3,
|
|
255
|
+
base_delay: float = 1.0,
|
|
256
|
+
max_delay: float = 60.0,
|
|
257
|
+
exponential_base: float = 2.0,
|
|
258
|
+
jitter: bool = True,
|
|
259
|
+
):
|
|
260
|
+
self.max_attempts = max_attempts
|
|
261
|
+
self.base_delay = base_delay
|
|
262
|
+
self.max_delay = max_delay
|
|
263
|
+
self.exponential_base = exponential_base
|
|
264
|
+
self.jitter = jitter
|
|
265
|
+
|
|
266
|
+
async def execute_with_retry(
|
|
267
|
+
self,
|
|
268
|
+
operation: Callable[[], Any],
|
|
269
|
+
retryable_exceptions: tuple = (Exception,),
|
|
270
|
+
operation_name: str = "operation",
|
|
271
|
+
) -> Result[Any]:
|
|
272
|
+
"""Execute an operation with retry logic."""
|
|
273
|
+
last_error = None
|
|
274
|
+
|
|
275
|
+
for attempt in range(1, self.max_attempts + 1):
|
|
276
|
+
try:
|
|
277
|
+
logger.debug(
|
|
278
|
+
f"Executing {operation_name}, attempt {attempt}/{self.max_attempts}"
|
|
279
|
+
)
|
|
280
|
+
result = await operation()
|
|
281
|
+
|
|
282
|
+
if attempt > 1:
|
|
283
|
+
logger.info(f"{operation_name} succeeded on attempt {attempt}")
|
|
284
|
+
|
|
285
|
+
return Result(status=OperationStatus.SUCCESS, data=result)
|
|
286
|
+
|
|
287
|
+
except retryable_exceptions as e:
|
|
288
|
+
last_error = e
|
|
289
|
+
|
|
290
|
+
if attempt == self.max_attempts:
|
|
291
|
+
logger.error(
|
|
292
|
+
f"{operation_name} failed after {attempt} attempts",
|
|
293
|
+
error=last_error,
|
|
294
|
+
)
|
|
295
|
+
break
|
|
296
|
+
|
|
297
|
+
# Calculate delay with exponential backoff
|
|
298
|
+
delay = min(
|
|
299
|
+
self.base_delay * (self.exponential_base ** (attempt - 1)),
|
|
300
|
+
self.max_delay,
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
# Add jitter to prevent thundering herd
|
|
304
|
+
if self.jitter:
|
|
305
|
+
import random
|
|
306
|
+
|
|
307
|
+
delay *= 0.5 + random.random() * 0.5
|
|
308
|
+
|
|
309
|
+
logger.warning(
|
|
310
|
+
f"{operation_name} failed on attempt {attempt}, retrying in {delay:.2f}s",
|
|
311
|
+
error=e,
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
await asyncio.sleep(delay)
|
|
315
|
+
|
|
316
|
+
except Exception as e:
|
|
317
|
+
# Non-retryable exception
|
|
318
|
+
logger.error(
|
|
319
|
+
f"{operation_name} failed with non-retryable error", error=e
|
|
320
|
+
)
|
|
321
|
+
return Result(
|
|
322
|
+
status=OperationStatus.FAILED,
|
|
323
|
+
error=str(e),
|
|
324
|
+
error_code=type(e).__name__,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
# All retries exhausted
|
|
328
|
+
return Result(
|
|
329
|
+
status=OperationStatus.FAILED,
|
|
330
|
+
error=f"Operation failed after {self.max_attempts} attempts: {last_error}",
|
|
331
|
+
error_code="RETRY_EXHAUSTED",
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
@with_correlation_id
|
|
336
|
+
async def run_concurrent_operations(
|
|
337
|
+
operations: List[Callable[[], Any]],
|
|
338
|
+
max_concurrent: int = 5,
|
|
339
|
+
operation_names: Optional[List[str]] = None,
|
|
340
|
+
) -> List[Result[Any]]:
|
|
341
|
+
"""Run multiple operations concurrently with controlled concurrency."""
|
|
342
|
+
if operation_names is None:
|
|
343
|
+
operation_names = [f"operation_{i}" for i in range(len(operations))]
|
|
344
|
+
|
|
345
|
+
# Handle edge case where max_concurrent is 0 or negative - use unlimited concurrency
|
|
346
|
+
if max_concurrent <= 0:
|
|
347
|
+
max_concurrent = len(operations) or 1
|
|
348
|
+
|
|
349
|
+
semaphore = asyncio.Semaphore(max_concurrent)
|
|
350
|
+
|
|
351
|
+
async def run_single_operation(operation: Callable, name: str) -> Result[Any]:
|
|
352
|
+
async with semaphore:
|
|
353
|
+
try:
|
|
354
|
+
start_time = time.time()
|
|
355
|
+
result = await operation()
|
|
356
|
+
duration = (time.time() - start_time) * 1000
|
|
357
|
+
|
|
358
|
+
return Result(
|
|
359
|
+
status=OperationStatus.SUCCESS, data=result, duration_ms=duration
|
|
360
|
+
)
|
|
361
|
+
except Exception as e:
|
|
362
|
+
logger.error(f"Operation {name} failed", error=e)
|
|
363
|
+
return Result(
|
|
364
|
+
status=OperationStatus.FAILED,
|
|
365
|
+
error=str(e),
|
|
366
|
+
error_code=type(e).__name__,
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
tasks = [
|
|
370
|
+
run_single_operation(op, name) for op, name in zip(operations, operation_names)
|
|
371
|
+
]
|
|
372
|
+
|
|
373
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
374
|
+
|
|
375
|
+
# Convert any exceptions to Result objects
|
|
376
|
+
final_results: List[Result[Any]] = []
|
|
377
|
+
for i, result in enumerate(results):
|
|
378
|
+
if isinstance(result, Exception):
|
|
379
|
+
final_results.append(
|
|
380
|
+
Result(
|
|
381
|
+
status=OperationStatus.FAILED,
|
|
382
|
+
error=str(result),
|
|
383
|
+
error_code=type(result).__name__,
|
|
384
|
+
)
|
|
385
|
+
)
|
|
386
|
+
elif isinstance(result, Result):
|
|
387
|
+
final_results.append(result)
|
|
388
|
+
else:
|
|
389
|
+
# This shouldn't happen, but handle it gracefully
|
|
390
|
+
final_results.append(
|
|
391
|
+
Result(
|
|
392
|
+
status=OperationStatus.FAILED,
|
|
393
|
+
error=f"Unexpected result type: {type(result)}",
|
|
394
|
+
error_code="UNEXPECTED_RESULT_TYPE",
|
|
395
|
+
)
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
return final_results
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
# Global connection pool instance
|
|
402
|
+
_connection_pool: Optional[ConnectionPool] = None
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
async def get_connection_pool() -> ConnectionPool:
|
|
406
|
+
"""Get the global connection pool instance."""
|
|
407
|
+
global _connection_pool
|
|
408
|
+
if _connection_pool is None:
|
|
409
|
+
_connection_pool = ConnectionPool()
|
|
410
|
+
await _connection_pool.initialize()
|
|
411
|
+
return _connection_pool
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
async def shutdown_connection_pool():
|
|
415
|
+
"""Shutdown the global connection pool."""
|
|
416
|
+
global _connection_pool
|
|
417
|
+
if _connection_pool:
|
|
418
|
+
await _connection_pool.close_all()
|
|
419
|
+
_connection_pool = None
|
tools/common/errors.py
ADDED
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"""Custom exception hierarchy for better error handling."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, Optional
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class LightfastMCPError(Exception):
|
|
7
|
+
"""Base exception for all Lightfast MCP errors."""
|
|
8
|
+
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
message: str,
|
|
12
|
+
error_code: Optional[str] = None,
|
|
13
|
+
details: Optional[Dict[str, Any]] = None,
|
|
14
|
+
cause: Optional[Exception] = None,
|
|
15
|
+
):
|
|
16
|
+
super().__init__(message)
|
|
17
|
+
self.error_code = error_code or self.__class__.__name__
|
|
18
|
+
self.details = details or {}
|
|
19
|
+
self.cause = cause
|
|
20
|
+
|
|
21
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
22
|
+
"""Convert error to dictionary for serialization."""
|
|
23
|
+
return {
|
|
24
|
+
"error_type": self.__class__.__name__,
|
|
25
|
+
"error_code": self.error_code,
|
|
26
|
+
"message": str(self),
|
|
27
|
+
"details": self.details,
|
|
28
|
+
"cause": str(self.cause) if self.cause else None,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class ConfigurationError(LightfastMCPError):
|
|
33
|
+
"""Configuration-related errors."""
|
|
34
|
+
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ValidationError(LightfastMCPError):
|
|
39
|
+
"""Input validation errors."""
|
|
40
|
+
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ServerError(LightfastMCPError):
|
|
45
|
+
"""Base class for server operation errors."""
|
|
46
|
+
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
message: str,
|
|
50
|
+
server_name: Optional[str] = None,
|
|
51
|
+
error_code: Optional[str] = None,
|
|
52
|
+
details: Optional[Dict[str, Any]] = None,
|
|
53
|
+
cause: Optional[Exception] = None,
|
|
54
|
+
):
|
|
55
|
+
super().__init__(message, error_code, details, cause)
|
|
56
|
+
self.server_name = server_name
|
|
57
|
+
if server_name:
|
|
58
|
+
self.details["server_name"] = server_name
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class ServerStartupError(ServerError):
|
|
62
|
+
"""Server failed to start."""
|
|
63
|
+
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class ServerShutdownError(ServerError):
|
|
68
|
+
"""Server failed to shutdown gracefully."""
|
|
69
|
+
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class ServerConnectionError(ServerError):
|
|
74
|
+
"""Server connection issues."""
|
|
75
|
+
|
|
76
|
+
def __init__(
|
|
77
|
+
self,
|
|
78
|
+
message: str,
|
|
79
|
+
server_name: Optional[str] = None,
|
|
80
|
+
host: Optional[str] = None,
|
|
81
|
+
port: Optional[int] = None,
|
|
82
|
+
error_code: Optional[str] = None,
|
|
83
|
+
details: Optional[Dict[str, Any]] = None,
|
|
84
|
+
cause: Optional[Exception] = None,
|
|
85
|
+
):
|
|
86
|
+
super().__init__(message, server_name, error_code, details, cause)
|
|
87
|
+
if host:
|
|
88
|
+
self.details["host"] = host
|
|
89
|
+
if port:
|
|
90
|
+
self.details["port"] = port
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class ServerHealthCheckError(ServerError):
|
|
94
|
+
"""Server health check failed."""
|
|
95
|
+
|
|
96
|
+
pass
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class ToolExecutionError(LightfastMCPError):
|
|
100
|
+
"""Tool execution errors."""
|
|
101
|
+
|
|
102
|
+
def __init__(
|
|
103
|
+
self,
|
|
104
|
+
message: str,
|
|
105
|
+
tool_name: Optional[str] = None,
|
|
106
|
+
server_name: Optional[str] = None,
|
|
107
|
+
error_code: Optional[str] = None,
|
|
108
|
+
details: Optional[Dict[str, Any]] = None,
|
|
109
|
+
cause: Optional[Exception] = None,
|
|
110
|
+
):
|
|
111
|
+
super().__init__(message, error_code, details, cause)
|
|
112
|
+
self.tool_name = tool_name
|
|
113
|
+
self.server_name = server_name
|
|
114
|
+
if tool_name:
|
|
115
|
+
self.details["tool_name"] = tool_name
|
|
116
|
+
if server_name:
|
|
117
|
+
self.details["server_name"] = server_name
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class ToolNotFoundError(ToolExecutionError):
|
|
121
|
+
"""Requested tool was not found."""
|
|
122
|
+
|
|
123
|
+
pass
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class ToolTimeoutError(ToolExecutionError):
|
|
127
|
+
"""Tool execution timed out."""
|
|
128
|
+
|
|
129
|
+
pass
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class AIProviderError(LightfastMCPError):
|
|
133
|
+
"""AI provider communication errors."""
|
|
134
|
+
|
|
135
|
+
def __init__(
|
|
136
|
+
self,
|
|
137
|
+
message: str,
|
|
138
|
+
provider: Optional[str] = None,
|
|
139
|
+
error_code: Optional[str] = None,
|
|
140
|
+
details: Optional[Dict[str, Any]] = None,
|
|
141
|
+
cause: Optional[Exception] = None,
|
|
142
|
+
):
|
|
143
|
+
super().__init__(message, error_code, details, cause)
|
|
144
|
+
self.provider = provider
|
|
145
|
+
if provider:
|
|
146
|
+
self.details["provider"] = provider
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class AIProviderAuthError(AIProviderError):
|
|
150
|
+
"""AI provider authentication failed."""
|
|
151
|
+
|
|
152
|
+
pass
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class AIProviderRateLimitError(AIProviderError):
|
|
156
|
+
"""AI provider rate limit exceeded."""
|
|
157
|
+
|
|
158
|
+
pass
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class AIProviderQuotaError(AIProviderError):
|
|
162
|
+
"""AI provider quota exceeded."""
|
|
163
|
+
|
|
164
|
+
pass
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class ConversationError(LightfastMCPError):
|
|
168
|
+
"""Conversation-related errors."""
|
|
169
|
+
|
|
170
|
+
def __init__(
|
|
171
|
+
self,
|
|
172
|
+
message: str,
|
|
173
|
+
session_id: Optional[str] = None,
|
|
174
|
+
step_number: Optional[int] = None,
|
|
175
|
+
error_code: Optional[str] = None,
|
|
176
|
+
details: Optional[Dict[str, Any]] = None,
|
|
177
|
+
cause: Optional[Exception] = None,
|
|
178
|
+
):
|
|
179
|
+
super().__init__(message, error_code, details, cause)
|
|
180
|
+
self.session_id = session_id
|
|
181
|
+
self.step_number = step_number
|
|
182
|
+
if session_id:
|
|
183
|
+
self.details["session_id"] = session_id
|
|
184
|
+
if step_number is not None:
|
|
185
|
+
self.details["step_number"] = step_number
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class ConversationTimeoutError(ConversationError):
|
|
189
|
+
"""Conversation step timed out."""
|
|
190
|
+
|
|
191
|
+
pass
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class ConnectionPoolError(LightfastMCPError):
|
|
195
|
+
"""Connection pool related errors."""
|
|
196
|
+
|
|
197
|
+
pass
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class ConnectionPoolExhaustedError(ConnectionPoolError):
|
|
201
|
+
"""All connections in pool are in use."""
|
|
202
|
+
|
|
203
|
+
pass
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
class RetryExhaustedError(LightfastMCPError):
|
|
207
|
+
"""All retry attempts have been exhausted."""
|
|
208
|
+
|
|
209
|
+
def __init__(
|
|
210
|
+
self,
|
|
211
|
+
message: str,
|
|
212
|
+
attempts: int,
|
|
213
|
+
last_error: Optional[Exception] = None,
|
|
214
|
+
error_code: Optional[str] = None,
|
|
215
|
+
details: Optional[Dict[str, Any]] = None,
|
|
216
|
+
):
|
|
217
|
+
super().__init__(message, error_code, details, last_error)
|
|
218
|
+
self.attempts = attempts
|
|
219
|
+
self.last_error = last_error
|
|
220
|
+
self.details["attempts"] = attempts
|
|
221
|
+
if last_error:
|
|
222
|
+
self.details["last_error"] = str(last_error)
|