kailash 0.6.2__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. kailash/__init__.py +3 -3
  2. kailash/api/custom_nodes_secure.py +3 -3
  3. kailash/api/gateway.py +1 -1
  4. kailash/api/studio.py +2 -3
  5. kailash/api/workflow_api.py +3 -4
  6. kailash/core/resilience/bulkhead.py +460 -0
  7. kailash/core/resilience/circuit_breaker.py +92 -10
  8. kailash/edge/discovery.py +86 -0
  9. kailash/mcp_server/__init__.py +334 -0
  10. kailash/mcp_server/advanced_features.py +1022 -0
  11. kailash/{mcp → mcp_server}/ai_registry_server.py +29 -4
  12. kailash/mcp_server/auth.py +789 -0
  13. kailash/mcp_server/client.py +712 -0
  14. kailash/mcp_server/discovery.py +1593 -0
  15. kailash/mcp_server/errors.py +673 -0
  16. kailash/mcp_server/oauth.py +1727 -0
  17. kailash/mcp_server/protocol.py +1126 -0
  18. kailash/mcp_server/registry_integration.py +587 -0
  19. kailash/mcp_server/server.py +1747 -0
  20. kailash/{mcp → mcp_server}/servers/ai_registry.py +2 -2
  21. kailash/mcp_server/transports.py +1169 -0
  22. kailash/mcp_server/utils/cache.py +510 -0
  23. kailash/middleware/auth/auth_manager.py +3 -3
  24. kailash/middleware/communication/api_gateway.py +2 -9
  25. kailash/middleware/communication/realtime.py +1 -1
  26. kailash/middleware/mcp/client_integration.py +1 -1
  27. kailash/middleware/mcp/enhanced_server.py +2 -2
  28. kailash/nodes/__init__.py +2 -0
  29. kailash/nodes/admin/audit_log.py +6 -6
  30. kailash/nodes/admin/permission_check.py +8 -8
  31. kailash/nodes/admin/role_management.py +32 -28
  32. kailash/nodes/admin/schema.sql +6 -1
  33. kailash/nodes/admin/schema_manager.py +13 -13
  34. kailash/nodes/admin/security_event.py +16 -20
  35. kailash/nodes/admin/tenant_isolation.py +3 -3
  36. kailash/nodes/admin/transaction_utils.py +3 -3
  37. kailash/nodes/admin/user_management.py +21 -22
  38. kailash/nodes/ai/a2a.py +11 -11
  39. kailash/nodes/ai/ai_providers.py +9 -12
  40. kailash/nodes/ai/embedding_generator.py +13 -14
  41. kailash/nodes/ai/intelligent_agent_orchestrator.py +19 -19
  42. kailash/nodes/ai/iterative_llm_agent.py +3 -3
  43. kailash/nodes/ai/llm_agent.py +213 -36
  44. kailash/nodes/ai/self_organizing.py +2 -2
  45. kailash/nodes/alerts/discord.py +4 -4
  46. kailash/nodes/api/graphql.py +6 -6
  47. kailash/nodes/api/http.py +12 -17
  48. kailash/nodes/api/rate_limiting.py +4 -4
  49. kailash/nodes/api/rest.py +15 -15
  50. kailash/nodes/auth/mfa.py +3 -4
  51. kailash/nodes/auth/risk_assessment.py +2 -2
  52. kailash/nodes/auth/session_management.py +5 -5
  53. kailash/nodes/auth/sso.py +143 -0
  54. kailash/nodes/base.py +6 -2
  55. kailash/nodes/base_async.py +16 -2
  56. kailash/nodes/base_with_acl.py +2 -2
  57. kailash/nodes/cache/__init__.py +9 -0
  58. kailash/nodes/cache/cache.py +1172 -0
  59. kailash/nodes/cache/cache_invalidation.py +870 -0
  60. kailash/nodes/cache/redis_pool_manager.py +595 -0
  61. kailash/nodes/code/async_python.py +2 -1
  62. kailash/nodes/code/python.py +196 -35
  63. kailash/nodes/compliance/data_retention.py +6 -6
  64. kailash/nodes/compliance/gdpr.py +5 -5
  65. kailash/nodes/data/__init__.py +10 -0
  66. kailash/nodes/data/optimistic_locking.py +906 -0
  67. kailash/nodes/data/readers.py +8 -8
  68. kailash/nodes/data/redis.py +349 -0
  69. kailash/nodes/data/sql.py +314 -3
  70. kailash/nodes/data/streaming.py +21 -0
  71. kailash/nodes/enterprise/__init__.py +8 -0
  72. kailash/nodes/enterprise/audit_logger.py +285 -0
  73. kailash/nodes/enterprise/batch_processor.py +22 -3
  74. kailash/nodes/enterprise/data_lineage.py +1 -1
  75. kailash/nodes/enterprise/mcp_executor.py +205 -0
  76. kailash/nodes/enterprise/service_discovery.py +150 -0
  77. kailash/nodes/enterprise/tenant_assignment.py +108 -0
  78. kailash/nodes/logic/async_operations.py +2 -2
  79. kailash/nodes/logic/convergence.py +1 -1
  80. kailash/nodes/logic/operations.py +1 -1
  81. kailash/nodes/monitoring/__init__.py +11 -1
  82. kailash/nodes/monitoring/health_check.py +456 -0
  83. kailash/nodes/monitoring/log_processor.py +817 -0
  84. kailash/nodes/monitoring/metrics_collector.py +627 -0
  85. kailash/nodes/monitoring/performance_benchmark.py +137 -11
  86. kailash/nodes/rag/advanced.py +7 -7
  87. kailash/nodes/rag/agentic.py +49 -2
  88. kailash/nodes/rag/conversational.py +3 -3
  89. kailash/nodes/rag/evaluation.py +3 -3
  90. kailash/nodes/rag/federated.py +3 -3
  91. kailash/nodes/rag/graph.py +3 -3
  92. kailash/nodes/rag/multimodal.py +3 -3
  93. kailash/nodes/rag/optimized.py +5 -5
  94. kailash/nodes/rag/privacy.py +3 -3
  95. kailash/nodes/rag/query_processing.py +6 -6
  96. kailash/nodes/rag/realtime.py +1 -1
  97. kailash/nodes/rag/registry.py +2 -6
  98. kailash/nodes/rag/router.py +1 -1
  99. kailash/nodes/rag/similarity.py +7 -7
  100. kailash/nodes/rag/strategies.py +4 -4
  101. kailash/nodes/security/abac_evaluator.py +6 -6
  102. kailash/nodes/security/behavior_analysis.py +5 -6
  103. kailash/nodes/security/credential_manager.py +1 -1
  104. kailash/nodes/security/rotating_credentials.py +11 -11
  105. kailash/nodes/security/threat_detection.py +8 -8
  106. kailash/nodes/testing/credential_testing.py +2 -2
  107. kailash/nodes/transform/processors.py +5 -5
  108. kailash/runtime/local.py +162 -14
  109. kailash/runtime/parameter_injection.py +425 -0
  110. kailash/runtime/parameter_injector.py +657 -0
  111. kailash/runtime/testing.py +2 -2
  112. kailash/testing/fixtures.py +2 -2
  113. kailash/workflow/builder.py +99 -18
  114. kailash/workflow/builder_improvements.py +207 -0
  115. kailash/workflow/input_handling.py +170 -0
  116. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/METADATA +21 -8
  117. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/RECORD +126 -101
  118. kailash/mcp/__init__.py +0 -53
  119. kailash/mcp/client.py +0 -445
  120. kailash/mcp/server.py +0 -292
  121. kailash/mcp/server_enhanced.py +0 -449
  122. kailash/mcp/utils/cache.py +0 -267
  123. /kailash/{mcp → mcp_server}/client_new.py +0 -0
  124. /kailash/{mcp → mcp_server}/utils/__init__.py +0 -0
  125. /kailash/{mcp → mcp_server}/utils/config.py +0 -0
  126. /kailash/{mcp → mcp_server}/utils/formatters.py +0 -0
  127. /kailash/{mcp → mcp_server}/utils/metrics.py +0 -0
  128. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/WHEEL +0 -0
  129. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/entry_points.txt +0 -0
  130. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/licenses/LICENSE +0 -0
  131. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/top_level.txt +0 -0
@@ -1,449 +0,0 @@
1
- """
2
- Enhanced MCP Server with production-ready capabilities.
3
-
4
- This module provides an enhanced MCP server that includes caching, configuration,
5
- metrics, and other production features by default, while maintaining compatibility
6
- with the official Anthropic FastMCP framework.
7
- """
8
-
9
- import asyncio
10
- import functools
11
- import logging
12
- from pathlib import Path
13
- from typing import Any, Callable, Dict, Optional, TypeVar, Union
14
-
15
- from .utils import CacheManager, ConfigManager, MetricsCollector, format_response
16
-
17
- logger = logging.getLogger(__name__)
18
-
19
- F = TypeVar("F", bound=Callable[..., Any])
20
-
21
-
22
- class EnhancedMCPServer:
23
- """
24
- Production-ready MCP server with enhanced capabilities.
25
-
26
- Features included by default:
27
- - Caching with TTL support
28
- - Hierarchical configuration management
29
- - Metrics collection and monitoring
30
- - Response formatting utilities
31
- - Error handling and logging
32
-
33
- All features can be disabled if not needed.
34
- """
35
-
36
- def __init__(
37
- self,
38
- name: str,
39
- config_file: Optional[Union[str, Path]] = None,
40
- enable_cache: bool = True,
41
- cache_ttl: int = 300,
42
- enable_metrics: bool = True,
43
- enable_formatting: bool = True,
44
- ):
45
- """
46
- Initialize enhanced MCP server.
47
-
48
- Args:
49
- name: Server name
50
- config_file: Optional configuration file path
51
- enable_cache: Whether to enable caching (default: True)
52
- cache_ttl: Default cache TTL in seconds (default: 300)
53
- enable_metrics: Whether to enable metrics collection (default: True)
54
- enable_formatting: Whether to enable response formatting (default: True)
55
- """
56
- self.name = name
57
-
58
- # Initialize configuration
59
- self.config = ConfigManager(config_file)
60
-
61
- # Set default configuration values
62
- self.config.update(
63
- {
64
- "server": {"name": name, "version": "1.0.0", "transport": "stdio"},
65
- "cache": {
66
- "enabled": enable_cache,
67
- "default_ttl": cache_ttl,
68
- "max_size": 128,
69
- },
70
- "metrics": {
71
- "enabled": enable_metrics,
72
- "collect_performance": True,
73
- "collect_usage": True,
74
- },
75
- "formatting": {
76
- "enabled": enable_formatting,
77
- "default_format": "markdown",
78
- },
79
- }
80
- )
81
-
82
- # Initialize components
83
- self.cache = CacheManager(
84
- enabled=self.config.get("cache.enabled", enable_cache),
85
- default_ttl=self.config.get("cache.default_ttl", cache_ttl),
86
- )
87
-
88
- self.metrics = MetricsCollector(
89
- enabled=self.config.get("metrics.enabled", enable_metrics),
90
- collect_performance=self.config.get("metrics.collect_performance", True),
91
- collect_usage=self.config.get("metrics.collect_usage", True),
92
- )
93
-
94
- # FastMCP server instance (initialized lazily)
95
- self._mcp = None
96
- self._running = False
97
-
98
- # Tool registry for management
99
- self._tool_registry: Dict[str, Dict[str, Any]] = {}
100
-
101
- def _init_mcp(self):
102
- """Initialize FastMCP server."""
103
- if self._mcp is not None:
104
- return
105
-
106
- try:
107
- from mcp.server.fastmcp import FastMCP
108
-
109
- self._mcp = FastMCP(self.name)
110
- logger.info(f"Initialized FastMCP server: {self.name}")
111
- except ImportError:
112
- logger.error(
113
- "FastMCP not available. Install with: pip install 'mcp[server]'"
114
- )
115
- raise
116
-
117
- def tool(
118
- self,
119
- cache_key: Optional[str] = None,
120
- cache_ttl: Optional[int] = None,
121
- format_response: Optional[str] = None,
122
- ):
123
- """
124
- Enhanced tool decorator with optional caching and metrics.
125
-
126
- Args:
127
- cache_key: Optional cache key for caching results
128
- cache_ttl: Optional TTL override for this tool
129
- format_response: Optional response format ("json", "markdown", "table", etc.)
130
-
131
- Returns:
132
- Decorated function with enhanced capabilities
133
-
134
- Example:
135
- @server.tool(cache_key="weather", cache_ttl=600, format_response="markdown")
136
- async def get_weather(city: str) -> dict:
137
- # Expensive API call - will be cached for 10 minutes
138
- return await fetch_weather_data(city)
139
- """
140
-
141
- def decorator(func: F) -> F:
142
- if self._mcp is None:
143
- self._init_mcp()
144
-
145
- # Get function name for registration
146
- tool_name = func.__name__
147
-
148
- # Create enhanced wrapper
149
- enhanced_func = self._create_enhanced_tool(
150
- func, tool_name, cache_key, cache_ttl, format_response
151
- )
152
-
153
- # Register with FastMCP
154
- mcp_tool = self._mcp.tool()(enhanced_func)
155
-
156
- # Track in registry
157
- self._tool_registry[tool_name] = {
158
- "function": mcp_tool,
159
- "original_function": func,
160
- "cached": cache_key is not None,
161
- "cache_key": cache_key,
162
- "cache_ttl": cache_ttl,
163
- "format_response": format_response,
164
- }
165
-
166
- logger.debug(
167
- f"Registered tool: {tool_name} (cached: {cache_key is not None})"
168
- )
169
- return mcp_tool
170
-
171
- return decorator
172
-
173
- def _create_enhanced_tool(
174
- self,
175
- func: F,
176
- tool_name: str,
177
- cache_key: Optional[str],
178
- cache_ttl: Optional[int],
179
- response_format: Optional[str],
180
- ) -> F:
181
- """Create enhanced tool function with caching, metrics, and formatting."""
182
-
183
- @functools.wraps(func)
184
- def sync_wrapper(*args, **kwargs):
185
- # Apply metrics tracking
186
- start_time = None
187
- if self.metrics.enabled:
188
- import time
189
-
190
- start_time = time.time()
191
-
192
- try:
193
- # Try cache first if enabled
194
- if cache_key and self.cache.enabled:
195
- cache = self.cache.get_cache(cache_key, ttl=cache_ttl)
196
- cache_lookup_key = self.cache._create_cache_key(
197
- tool_name, args, kwargs
198
- )
199
-
200
- result = cache.get(cache_lookup_key)
201
- if result is not None:
202
- logger.debug(f"Cache hit for {tool_name}")
203
- if self.metrics.enabled:
204
- latency = time.time() - start_time
205
- self.metrics.track_tool_call(tool_name, latency, True)
206
- return self._format_response(result, response_format)
207
-
208
- # Execute function
209
- result = func(*args, **kwargs)
210
-
211
- # Cache result if enabled
212
- if cache_key and self.cache.enabled:
213
- cache.set(cache_lookup_key, result)
214
- logger.debug(f"Cached result for {tool_name}")
215
-
216
- # Track success metrics
217
- if self.metrics.enabled:
218
- latency = time.time() - start_time
219
- self.metrics.track_tool_call(tool_name, latency, True)
220
-
221
- return self._format_response(result, response_format)
222
-
223
- except Exception as e:
224
- # Track error metrics
225
- if self.metrics.enabled and start_time:
226
- latency = time.time() - start_time
227
- self.metrics.track_tool_call(
228
- tool_name, latency, False, type(e).__name__
229
- )
230
-
231
- logger.error(f"Error in tool {tool_name}: {e}")
232
- raise
233
-
234
- @functools.wraps(func)
235
- async def async_wrapper(*args, **kwargs):
236
- # Apply metrics tracking
237
- start_time = None
238
- if self.metrics.enabled:
239
- import time
240
-
241
- start_time = time.time()
242
-
243
- try:
244
- # Try cache first if enabled
245
- if cache_key and self.cache.enabled:
246
- cache = self.cache.get_cache(cache_key, ttl=cache_ttl)
247
- cache_lookup_key = self.cache._create_cache_key(
248
- tool_name, args, kwargs
249
- )
250
-
251
- result = cache.get(cache_lookup_key)
252
- if result is not None:
253
- logger.debug(f"Cache hit for {tool_name}")
254
- if self.metrics.enabled:
255
- latency = time.time() - start_time
256
- self.metrics.track_tool_call(tool_name, latency, True)
257
- return self._format_response(result, response_format)
258
-
259
- # Execute function
260
- result = await func(*args, **kwargs)
261
-
262
- # Cache result if enabled
263
- if cache_key and self.cache.enabled:
264
- cache.set(cache_lookup_key, result)
265
- logger.debug(f"Cached result for {tool_name}")
266
-
267
- # Track success metrics
268
- if self.metrics.enabled:
269
- latency = time.time() - start_time
270
- self.metrics.track_tool_call(tool_name, latency, True)
271
-
272
- return self._format_response(result, response_format)
273
-
274
- except Exception as e:
275
- # Track error metrics
276
- if self.metrics.enabled and start_time:
277
- latency = time.time() - start_time
278
- self.metrics.track_tool_call(
279
- tool_name, latency, False, type(e).__name__
280
- )
281
-
282
- logger.error(f"Error in tool {tool_name}: {e}")
283
- raise
284
-
285
- # Return appropriate wrapper based on function type
286
- if asyncio.iscoroutinefunction(func):
287
- return async_wrapper
288
- else:
289
- return sync_wrapper
290
-
291
- def _format_response(self, result: Any, response_format: Optional[str]) -> Any:
292
- """Format response if formatting is enabled."""
293
- if not self.config.get("formatting.enabled", True) or not response_format:
294
- return result
295
-
296
- try:
297
- return format_response(result, response_format)
298
- except Exception as e:
299
- logger.warning(f"Failed to format response: {e}")
300
- return result
301
-
302
- def resource(self, uri: str):
303
- """
304
- Add resource with metrics tracking.
305
-
306
- Args:
307
- uri: Resource URI pattern
308
-
309
- Returns:
310
- Decorated function
311
- """
312
-
313
- def decorator(func: F) -> F:
314
- if self._mcp is None:
315
- self._init_mcp()
316
-
317
- # Wrap with metrics if enabled
318
- if self.metrics.enabled:
319
- func = self.metrics.track_tool(f"resource:{uri}")(func)
320
-
321
- return self._mcp.resource(uri)(func)
322
-
323
- return decorator
324
-
325
- def prompt(self, name: str):
326
- """
327
- Add prompt with metrics tracking.
328
-
329
- Args:
330
- name: Prompt name
331
-
332
- Returns:
333
- Decorated function
334
- """
335
-
336
- def decorator(func: F) -> F:
337
- if self._mcp is None:
338
- self._init_mcp()
339
-
340
- # Wrap with metrics if enabled
341
- if self.metrics.enabled:
342
- func = self.metrics.track_tool(f"prompt:{name}")(func)
343
-
344
- return self._mcp.prompt(name)(func)
345
-
346
- return decorator
347
-
348
- def get_tool_stats(self) -> Dict[str, Any]:
349
- """Get statistics for all registered tools."""
350
- stats = {
351
- "registered_tools": len(self._tool_registry),
352
- "cached_tools": sum(1 for t in self._tool_registry.values() if t["cached"]),
353
- "tools": {},
354
- }
355
-
356
- for tool_name, tool_info in self._tool_registry.items():
357
- stats["tools"][tool_name] = {
358
- "cached": tool_info["cached"],
359
- "cache_key": tool_info.get("cache_key"),
360
- "format_response": tool_info.get("format_response"),
361
- }
362
-
363
- return stats
364
-
365
- def get_server_stats(self) -> Dict[str, Any]:
366
- """Get comprehensive server statistics."""
367
- stats = {
368
- "server": {
369
- "name": self.name,
370
- "running": self._running,
371
- "config": self.config.to_dict(),
372
- },
373
- "tools": self.get_tool_stats(),
374
- }
375
-
376
- if self.metrics.enabled:
377
- stats["metrics"] = self.metrics.export_metrics()
378
-
379
- if self.cache.enabled:
380
- stats["cache"] = self.cache.stats()
381
-
382
- return stats
383
-
384
- def clear_cache(self, cache_name: Optional[str] = None) -> None:
385
- """Clear cache(s)."""
386
- if cache_name:
387
- cache = self.cache.get_cache(cache_name)
388
- cache.clear()
389
- logger.info(f"Cleared cache: {cache_name}")
390
- else:
391
- self.cache.clear_all()
392
- logger.info("Cleared all caches")
393
-
394
- def run(self):
395
- """Run the MCP server."""
396
- if self._mcp is None:
397
- self._init_mcp()
398
-
399
- logger.info(f"Starting enhanced MCP server: {self.name}")
400
- logger.info(f"Cache enabled: {self.cache.enabled}")
401
- logger.info(f"Metrics enabled: {self.metrics.enabled}")
402
-
403
- self._running = True
404
-
405
- try:
406
- self._mcp.run()
407
- except KeyboardInterrupt:
408
- logger.info("Server stopped by user")
409
- except Exception as e:
410
- logger.error(f"Server error: {e}")
411
- raise
412
- finally:
413
- self._running = False
414
-
415
-
416
- # For backward compatibility, make EnhancedMCPServer the default MCPServer
417
- MCPServer = EnhancedMCPServer
418
-
419
-
420
- class SimpleMCPServer(EnhancedMCPServer):
421
- """
422
- Simplified MCP server with minimal configuration.
423
-
424
- This inherits all enhanced capabilities but disables some features
425
- by default for simpler use cases.
426
- """
427
-
428
- def __init__(self, name: str, description: str = ""):
429
- """
430
- Initialize simple MCP server.
431
-
432
- Args:
433
- name: Server name
434
- description: Server description
435
- """
436
- # Initialize with some features disabled for simplicity
437
- super().__init__(
438
- name=name,
439
- enable_cache=False, # Disable cache by default
440
- enable_metrics=False, # Disable metrics by default
441
- enable_formatting=True, # Keep formatting for better output
442
- )
443
-
444
- self.description = description
445
-
446
- # Update config for simple use
447
- self.config.update(
448
- {"server": {"name": name, "description": description, "version": "1.0.0"}}
449
- )
@@ -1,267 +0,0 @@
1
- """
2
- Caching utilities for MCP servers.
3
-
4
- Provides LRU cache, TTL support, and decorators for method-level caching.
5
- Based on patterns from production MCP server implementations.
6
- """
7
-
8
- import asyncio
9
- import functools
10
- import logging
11
- import threading
12
- import time
13
- from typing import Any, Callable, Dict, Optional, Tuple, TypeVar
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
- F = TypeVar("F", bound=Callable[..., Any])
18
-
19
-
20
- class LRUCache:
21
- """
22
- Thread-safe LRU cache with TTL (time-to-live) support.
23
-
24
- Features:
25
- - Configurable maximum size
26
- - TTL expiration for entries
27
- - Thread-safe operations
28
- - Performance statistics
29
- """
30
-
31
- def __init__(self, max_size: int = 128, ttl: int = 300):
32
- """
33
- Initialize LRU cache.
34
-
35
- Args:
36
- max_size: Maximum number of entries to store
37
- ttl: Time-to-live in seconds (0 = no expiration)
38
- """
39
- self.max_size = max_size
40
- self.ttl = ttl
41
- self._cache: Dict[str, Tuple[Any, float]] = {}
42
- self._access_order: Dict[str, float] = {}
43
- self._lock = threading.RLock()
44
-
45
- # Statistics
46
- self._hits = 0
47
- self._misses = 0
48
- self._evictions = 0
49
-
50
- def get(self, key: str) -> Optional[Any]:
51
- """Get value from cache if it exists and hasn't expired."""
52
- with self._lock:
53
- if key not in self._cache:
54
- self._misses += 1
55
- return None
56
-
57
- value, timestamp = self._cache[key]
58
-
59
- # Check TTL expiration
60
- if self.ttl > 0 and time.time() - timestamp > self.ttl:
61
- del self._cache[key]
62
- del self._access_order[key]
63
- self._misses += 1
64
- return None
65
-
66
- # Update access time for LRU
67
- self._access_order[key] = time.time()
68
- self._hits += 1
69
- return value
70
-
71
- def set(self, key: str, value: Any) -> None:
72
- """Set value in cache, evicting LRU items if necessary."""
73
- with self._lock:
74
- current_time = time.time()
75
-
76
- # If key exists, update it
77
- if key in self._cache:
78
- self._cache[key] = (value, current_time)
79
- self._access_order[key] = current_time
80
- return
81
-
82
- # Check if we need to evict
83
- if len(self._cache) >= self.max_size:
84
- self._evict_lru()
85
-
86
- # Add new entry
87
- self._cache[key] = (value, current_time)
88
- self._access_order[key] = current_time
89
-
90
- def _evict_lru(self) -> None:
91
- """Evict least recently used item."""
92
- if not self._access_order:
93
- return
94
-
95
- lru_key = min(self._access_order.keys(), key=self._access_order.get)
96
- del self._cache[lru_key]
97
- del self._access_order[lru_key]
98
- self._evictions += 1
99
-
100
- def clear(self) -> None:
101
- """Clear all entries from cache."""
102
- with self._lock:
103
- self._cache.clear()
104
- self._access_order.clear()
105
-
106
- def stats(self) -> Dict[str, Any]:
107
- """Get cache performance statistics."""
108
- with self._lock:
109
- total_requests = self._hits + self._misses
110
- hit_rate = self._hits / total_requests if total_requests > 0 else 0
111
-
112
- return {
113
- "hits": self._hits,
114
- "misses": self._misses,
115
- "evictions": self._evictions,
116
- "hit_rate": hit_rate,
117
- "size": len(self._cache),
118
- "max_size": self.max_size,
119
- "ttl": self.ttl,
120
- }
121
-
122
-
123
- class CacheManager:
124
- """
125
- High-level cache management with multiple caching strategies.
126
-
127
- Provides easy-to-use caching for MCP servers with different cache types
128
- for different use cases.
129
- """
130
-
131
- def __init__(self, enabled: bool = True, default_ttl: int = 300):
132
- """
133
- Initialize cache manager.
134
-
135
- Args:
136
- enabled: Whether caching is enabled
137
- default_ttl: Default TTL for cache entries
138
- """
139
- self.enabled = enabled
140
- self.default_ttl = default_ttl
141
- self._caches: Dict[str, LRUCache] = {}
142
-
143
- def get_cache(
144
- self, name: str, max_size: int = 128, ttl: Optional[int] = None
145
- ) -> LRUCache:
146
- """Get or create a named cache."""
147
- if name not in self._caches:
148
- cache_ttl = ttl if ttl is not None else self.default_ttl
149
- self._caches[name] = LRUCache(max_size=max_size, ttl=cache_ttl)
150
- return self._caches[name]
151
-
152
- def cached(self, cache_name: str = "default", ttl: Optional[int] = None):
153
- """
154
- Decorator to cache function results.
155
-
156
- Args:
157
- cache_name: Name of cache to use
158
- ttl: TTL for this specific cache
159
-
160
- Returns:
161
- Decorated function with caching
162
- """
163
-
164
- def decorator(func: F) -> F:
165
- if not self.enabled:
166
- return func
167
-
168
- cache = self.get_cache(cache_name, ttl=ttl)
169
-
170
- @functools.wraps(func)
171
- def sync_wrapper(*args, **kwargs):
172
- # Create cache key from function name and arguments
173
- cache_key = self._create_cache_key(func.__name__, args, kwargs)
174
-
175
- # Try to get from cache
176
- result = cache.get(cache_key)
177
- if result is not None:
178
- logger.debug(f"Cache hit for {func.__name__}: {cache_key}")
179
- return result
180
-
181
- # Execute function and cache result
182
- logger.debug(f"Cache miss for {func.__name__}: {cache_key}")
183
- result = func(*args, **kwargs)
184
- cache.set(cache_key, result)
185
- return result
186
-
187
- @functools.wraps(func)
188
- async def async_wrapper(*args, **kwargs):
189
- # Create cache key from function name and arguments
190
- cache_key = self._create_cache_key(func.__name__, args, kwargs)
191
-
192
- # Try to get from cache
193
- result = cache.get(cache_key)
194
- if result is not None:
195
- logger.debug(f"Cache hit for {func.__name__}: {cache_key}")
196
- return result
197
-
198
- # Execute function and cache result
199
- logger.debug(f"Cache miss for {func.__name__}: {cache_key}")
200
- result = await func(*args, **kwargs)
201
- cache.set(cache_key, result)
202
- return result
203
-
204
- # Return appropriate wrapper based on function type
205
- if asyncio.iscoroutinefunction(func):
206
- return async_wrapper
207
- else:
208
- return sync_wrapper
209
-
210
- return decorator
211
-
212
- def _create_cache_key(self, func_name: str, args: tuple, kwargs: dict) -> str:
213
- """Create a cache key from function name and arguments."""
214
- # Convert args and kwargs to string representation
215
- args_str = str(args) if args else ""
216
- kwargs_str = str(sorted(kwargs.items())) if kwargs else ""
217
- return f"{func_name}:{args_str}:{kwargs_str}"
218
-
219
- def clear_all(self) -> None:
220
- """Clear all caches."""
221
- for cache in self._caches.values():
222
- cache.clear()
223
-
224
- def stats(self) -> Dict[str, Dict[str, Any]]:
225
- """Get statistics for all caches."""
226
- return {name: cache.stats() for name, cache in self._caches.items()}
227
-
228
-
229
- # Global cache manager instance
230
- _global_cache_manager = CacheManager()
231
-
232
-
233
- def cached_query(cache_name: str = "query", ttl: int = 300, enabled: bool = True):
234
- """
235
- Simple decorator for caching query results.
236
-
237
- This is a convenience decorator that uses the global cache manager.
238
-
239
- Args:
240
- cache_name: Name of cache to use
241
- ttl: Time-to-live for cache entries
242
- enabled: Whether caching is enabled
243
-
244
- Example:
245
- @cached_query("search", ttl=600)
246
- async def search_data(query: str) -> list:
247
- # Expensive search operation
248
- return results
249
- """
250
-
251
- def decorator(func: F) -> F:
252
- if not enabled:
253
- return func
254
-
255
- return _global_cache_manager.cached(cache_name, ttl=ttl)(func)
256
-
257
- return decorator
258
-
259
-
260
- def get_cache_stats() -> Dict[str, Dict[str, Any]]:
261
- """Get statistics for the global cache manager."""
262
- return _global_cache_manager.stats()
263
-
264
-
265
- def clear_all_caches() -> None:
266
- """Clear all caches in the global cache manager."""
267
- _global_cache_manager.clear_all()