kailash 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/access_control.py +40 -39
- kailash/api/auth.py +26 -32
- kailash/api/custom_nodes.py +29 -29
- kailash/api/custom_nodes_secure.py +35 -35
- kailash/api/database.py +17 -17
- kailash/api/gateway.py +19 -19
- kailash/api/mcp_integration.py +24 -23
- kailash/api/studio.py +45 -45
- kailash/api/workflow_api.py +8 -8
- kailash/cli/commands.py +5 -8
- kailash/manifest.py +42 -42
- kailash/mcp/__init__.py +1 -1
- kailash/mcp/ai_registry_server.py +20 -20
- kailash/mcp/client.py +9 -11
- kailash/mcp/client_new.py +10 -10
- kailash/mcp/server.py +1 -2
- kailash/mcp/server_enhanced.py +449 -0
- kailash/mcp/servers/ai_registry.py +6 -6
- kailash/mcp/utils/__init__.py +31 -0
- kailash/mcp/utils/cache.py +267 -0
- kailash/mcp/utils/config.py +263 -0
- kailash/mcp/utils/formatters.py +293 -0
- kailash/mcp/utils/metrics.py +418 -0
- kailash/nodes/ai/agents.py +9 -9
- kailash/nodes/ai/ai_providers.py +33 -34
- kailash/nodes/ai/embedding_generator.py +31 -32
- kailash/nodes/ai/intelligent_agent_orchestrator.py +62 -66
- kailash/nodes/ai/iterative_llm_agent.py +48 -48
- kailash/nodes/ai/llm_agent.py +32 -33
- kailash/nodes/ai/models.py +13 -13
- kailash/nodes/ai/self_organizing.py +44 -44
- kailash/nodes/api/auth.py +11 -11
- kailash/nodes/api/graphql.py +13 -13
- kailash/nodes/api/http.py +19 -19
- kailash/nodes/api/monitoring.py +20 -20
- kailash/nodes/api/rate_limiting.py +9 -13
- kailash/nodes/api/rest.py +29 -29
- kailash/nodes/api/security.py +44 -47
- kailash/nodes/base.py +21 -23
- kailash/nodes/base_async.py +7 -7
- kailash/nodes/base_cycle_aware.py +12 -12
- kailash/nodes/base_with_acl.py +5 -5
- kailash/nodes/code/python.py +56 -55
- kailash/nodes/data/directory.py +6 -6
- kailash/nodes/data/event_generation.py +10 -10
- kailash/nodes/data/file_discovery.py +28 -31
- kailash/nodes/data/readers.py +8 -8
- kailash/nodes/data/retrieval.py +10 -10
- kailash/nodes/data/sharepoint_graph.py +17 -17
- kailash/nodes/data/sources.py +5 -5
- kailash/nodes/data/sql.py +13 -13
- kailash/nodes/data/streaming.py +25 -25
- kailash/nodes/data/vector_db.py +22 -22
- kailash/nodes/data/writers.py +7 -7
- kailash/nodes/logic/async_operations.py +17 -17
- kailash/nodes/logic/convergence.py +11 -11
- kailash/nodes/logic/loop.py +4 -4
- kailash/nodes/logic/operations.py +11 -11
- kailash/nodes/logic/workflow.py +8 -9
- kailash/nodes/mixins/mcp.py +17 -17
- kailash/nodes/mixins.py +8 -10
- kailash/nodes/transform/chunkers.py +3 -3
- kailash/nodes/transform/formatters.py +7 -7
- kailash/nodes/transform/processors.py +10 -10
- kailash/runtime/access_controlled.py +18 -18
- kailash/runtime/async_local.py +17 -19
- kailash/runtime/docker.py +20 -22
- kailash/runtime/local.py +16 -16
- kailash/runtime/parallel.py +23 -23
- kailash/runtime/parallel_cyclic.py +27 -27
- kailash/runtime/runner.py +6 -6
- kailash/runtime/testing.py +20 -20
- kailash/sdk_exceptions.py +0 -58
- kailash/security.py +14 -26
- kailash/tracking/manager.py +38 -38
- kailash/tracking/metrics_collector.py +15 -14
- kailash/tracking/models.py +53 -53
- kailash/tracking/storage/base.py +7 -17
- kailash/tracking/storage/database.py +22 -23
- kailash/tracking/storage/filesystem.py +38 -40
- kailash/utils/export.py +21 -21
- kailash/utils/templates.py +2 -3
- kailash/visualization/api.py +30 -34
- kailash/visualization/dashboard.py +17 -17
- kailash/visualization/performance.py +16 -16
- kailash/visualization/reports.py +25 -27
- kailash/workflow/builder.py +8 -8
- kailash/workflow/convergence.py +13 -12
- kailash/workflow/cycle_analyzer.py +30 -32
- kailash/workflow/cycle_builder.py +12 -12
- kailash/workflow/cycle_config.py +16 -15
- kailash/workflow/cycle_debugger.py +40 -40
- kailash/workflow/cycle_exceptions.py +29 -29
- kailash/workflow/cycle_profiler.py +21 -21
- kailash/workflow/cycle_state.py +20 -22
- kailash/workflow/cyclic_runner.py +44 -44
- kailash/workflow/graph.py +40 -40
- kailash/workflow/mermaid_visualizer.py +9 -11
- kailash/workflow/migration.py +22 -22
- kailash/workflow/mock_registry.py +6 -6
- kailash/workflow/runner.py +9 -9
- kailash/workflow/safety.py +12 -13
- kailash/workflow/state.py +8 -11
- kailash/workflow/templates.py +19 -19
- kailash/workflow/validation.py +14 -14
- kailash/workflow/visualization.py +22 -22
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/METADATA +53 -5
- kailash-0.3.1.dist-info/RECORD +136 -0
- kailash-0.3.0.dist-info/RECORD +0 -130
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/WHEEL +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/entry_points.txt +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.3.0.dist-info → kailash-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,449 @@
|
|
1
|
+
"""
|
2
|
+
Enhanced MCP Server with production-ready capabilities.
|
3
|
+
|
4
|
+
This module provides an enhanced MCP server that includes caching, configuration,
|
5
|
+
metrics, and other production features by default, while maintaining compatibility
|
6
|
+
with the official Anthropic FastMCP framework.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import asyncio
|
10
|
+
import functools
|
11
|
+
import logging
|
12
|
+
from pathlib import Path
|
13
|
+
from typing import Any, Callable, Dict, Optional, TypeVar, Union
|
14
|
+
|
15
|
+
from .utils import CacheManager, ConfigManager, MetricsCollector, format_response
|
16
|
+
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
20
|
+
|
21
|
+
|
22
|
+
class EnhancedMCPServer:
|
23
|
+
"""
|
24
|
+
Production-ready MCP server with enhanced capabilities.
|
25
|
+
|
26
|
+
Features included by default:
|
27
|
+
- Caching with TTL support
|
28
|
+
- Hierarchical configuration management
|
29
|
+
- Metrics collection and monitoring
|
30
|
+
- Response formatting utilities
|
31
|
+
- Error handling and logging
|
32
|
+
|
33
|
+
All features can be disabled if not needed.
|
34
|
+
"""
|
35
|
+
|
36
|
+
def __init__(
|
37
|
+
self,
|
38
|
+
name: str,
|
39
|
+
config_file: Optional[Union[str, Path]] = None,
|
40
|
+
enable_cache: bool = True,
|
41
|
+
cache_ttl: int = 300,
|
42
|
+
enable_metrics: bool = True,
|
43
|
+
enable_formatting: bool = True,
|
44
|
+
):
|
45
|
+
"""
|
46
|
+
Initialize enhanced MCP server.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
name: Server name
|
50
|
+
config_file: Optional configuration file path
|
51
|
+
enable_cache: Whether to enable caching (default: True)
|
52
|
+
cache_ttl: Default cache TTL in seconds (default: 300)
|
53
|
+
enable_metrics: Whether to enable metrics collection (default: True)
|
54
|
+
enable_formatting: Whether to enable response formatting (default: True)
|
55
|
+
"""
|
56
|
+
self.name = name
|
57
|
+
|
58
|
+
# Initialize configuration
|
59
|
+
self.config = ConfigManager(config_file)
|
60
|
+
|
61
|
+
# Set default configuration values
|
62
|
+
self.config.update(
|
63
|
+
{
|
64
|
+
"server": {"name": name, "version": "1.0.0", "transport": "stdio"},
|
65
|
+
"cache": {
|
66
|
+
"enabled": enable_cache,
|
67
|
+
"default_ttl": cache_ttl,
|
68
|
+
"max_size": 128,
|
69
|
+
},
|
70
|
+
"metrics": {
|
71
|
+
"enabled": enable_metrics,
|
72
|
+
"collect_performance": True,
|
73
|
+
"collect_usage": True,
|
74
|
+
},
|
75
|
+
"formatting": {
|
76
|
+
"enabled": enable_formatting,
|
77
|
+
"default_format": "markdown",
|
78
|
+
},
|
79
|
+
}
|
80
|
+
)
|
81
|
+
|
82
|
+
# Initialize components
|
83
|
+
self.cache = CacheManager(
|
84
|
+
enabled=self.config.get("cache.enabled", enable_cache),
|
85
|
+
default_ttl=self.config.get("cache.default_ttl", cache_ttl),
|
86
|
+
)
|
87
|
+
|
88
|
+
self.metrics = MetricsCollector(
|
89
|
+
enabled=self.config.get("metrics.enabled", enable_metrics),
|
90
|
+
collect_performance=self.config.get("metrics.collect_performance", True),
|
91
|
+
collect_usage=self.config.get("metrics.collect_usage", True),
|
92
|
+
)
|
93
|
+
|
94
|
+
# FastMCP server instance (initialized lazily)
|
95
|
+
self._mcp = None
|
96
|
+
self._running = False
|
97
|
+
|
98
|
+
# Tool registry for management
|
99
|
+
self._tool_registry: Dict[str, Dict[str, Any]] = {}
|
100
|
+
|
101
|
+
def _init_mcp(self):
|
102
|
+
"""Initialize FastMCP server."""
|
103
|
+
if self._mcp is not None:
|
104
|
+
return
|
105
|
+
|
106
|
+
try:
|
107
|
+
from mcp.server.fastmcp import FastMCP
|
108
|
+
|
109
|
+
self._mcp = FastMCP(self.name)
|
110
|
+
logger.info(f"Initialized FastMCP server: {self.name}")
|
111
|
+
except ImportError:
|
112
|
+
logger.error(
|
113
|
+
"FastMCP not available. Install with: pip install 'mcp[server]'"
|
114
|
+
)
|
115
|
+
raise
|
116
|
+
|
117
|
+
def tool(
|
118
|
+
self,
|
119
|
+
cache_key: Optional[str] = None,
|
120
|
+
cache_ttl: Optional[int] = None,
|
121
|
+
format_response: Optional[str] = None,
|
122
|
+
):
|
123
|
+
"""
|
124
|
+
Enhanced tool decorator with optional caching and metrics.
|
125
|
+
|
126
|
+
Args:
|
127
|
+
cache_key: Optional cache key for caching results
|
128
|
+
cache_ttl: Optional TTL override for this tool
|
129
|
+
format_response: Optional response format ("json", "markdown", "table", etc.)
|
130
|
+
|
131
|
+
Returns:
|
132
|
+
Decorated function with enhanced capabilities
|
133
|
+
|
134
|
+
Example:
|
135
|
+
@server.tool(cache_key="weather", cache_ttl=600, format_response="markdown")
|
136
|
+
async def get_weather(city: str) -> dict:
|
137
|
+
# Expensive API call - will be cached for 10 minutes
|
138
|
+
return await fetch_weather_data(city)
|
139
|
+
"""
|
140
|
+
|
141
|
+
def decorator(func: F) -> F:
|
142
|
+
if self._mcp is None:
|
143
|
+
self._init_mcp()
|
144
|
+
|
145
|
+
# Get function name for registration
|
146
|
+
tool_name = func.__name__
|
147
|
+
|
148
|
+
# Create enhanced wrapper
|
149
|
+
enhanced_func = self._create_enhanced_tool(
|
150
|
+
func, tool_name, cache_key, cache_ttl, format_response
|
151
|
+
)
|
152
|
+
|
153
|
+
# Register with FastMCP
|
154
|
+
mcp_tool = self._mcp.tool()(enhanced_func)
|
155
|
+
|
156
|
+
# Track in registry
|
157
|
+
self._tool_registry[tool_name] = {
|
158
|
+
"function": mcp_tool,
|
159
|
+
"original_function": func,
|
160
|
+
"cached": cache_key is not None,
|
161
|
+
"cache_key": cache_key,
|
162
|
+
"cache_ttl": cache_ttl,
|
163
|
+
"format_response": format_response,
|
164
|
+
}
|
165
|
+
|
166
|
+
logger.debug(
|
167
|
+
f"Registered tool: {tool_name} (cached: {cache_key is not None})"
|
168
|
+
)
|
169
|
+
return mcp_tool
|
170
|
+
|
171
|
+
return decorator
|
172
|
+
|
173
|
+
def _create_enhanced_tool(
|
174
|
+
self,
|
175
|
+
func: F,
|
176
|
+
tool_name: str,
|
177
|
+
cache_key: Optional[str],
|
178
|
+
cache_ttl: Optional[int],
|
179
|
+
response_format: Optional[str],
|
180
|
+
) -> F:
|
181
|
+
"""Create enhanced tool function with caching, metrics, and formatting."""
|
182
|
+
|
183
|
+
@functools.wraps(func)
|
184
|
+
def sync_wrapper(*args, **kwargs):
|
185
|
+
# Apply metrics tracking
|
186
|
+
start_time = None
|
187
|
+
if self.metrics.enabled:
|
188
|
+
import time
|
189
|
+
|
190
|
+
start_time = time.time()
|
191
|
+
|
192
|
+
try:
|
193
|
+
# Try cache first if enabled
|
194
|
+
if cache_key and self.cache.enabled:
|
195
|
+
cache = self.cache.get_cache(cache_key, ttl=cache_ttl)
|
196
|
+
cache_lookup_key = self.cache._create_cache_key(
|
197
|
+
tool_name, args, kwargs
|
198
|
+
)
|
199
|
+
|
200
|
+
result = cache.get(cache_lookup_key)
|
201
|
+
if result is not None:
|
202
|
+
logger.debug(f"Cache hit for {tool_name}")
|
203
|
+
if self.metrics.enabled:
|
204
|
+
latency = time.time() - start_time
|
205
|
+
self.metrics.track_tool_call(tool_name, latency, True)
|
206
|
+
return self._format_response(result, response_format)
|
207
|
+
|
208
|
+
# Execute function
|
209
|
+
result = func(*args, **kwargs)
|
210
|
+
|
211
|
+
# Cache result if enabled
|
212
|
+
if cache_key and self.cache.enabled:
|
213
|
+
cache.set(cache_lookup_key, result)
|
214
|
+
logger.debug(f"Cached result for {tool_name}")
|
215
|
+
|
216
|
+
# Track success metrics
|
217
|
+
if self.metrics.enabled:
|
218
|
+
latency = time.time() - start_time
|
219
|
+
self.metrics.track_tool_call(tool_name, latency, True)
|
220
|
+
|
221
|
+
return self._format_response(result, response_format)
|
222
|
+
|
223
|
+
except Exception as e:
|
224
|
+
# Track error metrics
|
225
|
+
if self.metrics.enabled and start_time:
|
226
|
+
latency = time.time() - start_time
|
227
|
+
self.metrics.track_tool_call(
|
228
|
+
tool_name, latency, False, type(e).__name__
|
229
|
+
)
|
230
|
+
|
231
|
+
logger.error(f"Error in tool {tool_name}: {e}")
|
232
|
+
raise
|
233
|
+
|
234
|
+
@functools.wraps(func)
|
235
|
+
async def async_wrapper(*args, **kwargs):
|
236
|
+
# Apply metrics tracking
|
237
|
+
start_time = None
|
238
|
+
if self.metrics.enabled:
|
239
|
+
import time
|
240
|
+
|
241
|
+
start_time = time.time()
|
242
|
+
|
243
|
+
try:
|
244
|
+
# Try cache first if enabled
|
245
|
+
if cache_key and self.cache.enabled:
|
246
|
+
cache = self.cache.get_cache(cache_key, ttl=cache_ttl)
|
247
|
+
cache_lookup_key = self.cache._create_cache_key(
|
248
|
+
tool_name, args, kwargs
|
249
|
+
)
|
250
|
+
|
251
|
+
result = cache.get(cache_lookup_key)
|
252
|
+
if result is not None:
|
253
|
+
logger.debug(f"Cache hit for {tool_name}")
|
254
|
+
if self.metrics.enabled:
|
255
|
+
latency = time.time() - start_time
|
256
|
+
self.metrics.track_tool_call(tool_name, latency, True)
|
257
|
+
return self._format_response(result, response_format)
|
258
|
+
|
259
|
+
# Execute function
|
260
|
+
result = await func(*args, **kwargs)
|
261
|
+
|
262
|
+
# Cache result if enabled
|
263
|
+
if cache_key and self.cache.enabled:
|
264
|
+
cache.set(cache_lookup_key, result)
|
265
|
+
logger.debug(f"Cached result for {tool_name}")
|
266
|
+
|
267
|
+
# Track success metrics
|
268
|
+
if self.metrics.enabled:
|
269
|
+
latency = time.time() - start_time
|
270
|
+
self.metrics.track_tool_call(tool_name, latency, True)
|
271
|
+
|
272
|
+
return self._format_response(result, response_format)
|
273
|
+
|
274
|
+
except Exception as e:
|
275
|
+
# Track error metrics
|
276
|
+
if self.metrics.enabled and start_time:
|
277
|
+
latency = time.time() - start_time
|
278
|
+
self.metrics.track_tool_call(
|
279
|
+
tool_name, latency, False, type(e).__name__
|
280
|
+
)
|
281
|
+
|
282
|
+
logger.error(f"Error in tool {tool_name}: {e}")
|
283
|
+
raise
|
284
|
+
|
285
|
+
# Return appropriate wrapper based on function type
|
286
|
+
if asyncio.iscoroutinefunction(func):
|
287
|
+
return async_wrapper
|
288
|
+
else:
|
289
|
+
return sync_wrapper
|
290
|
+
|
291
|
+
def _format_response(self, result: Any, response_format: Optional[str]) -> Any:
|
292
|
+
"""Format response if formatting is enabled."""
|
293
|
+
if not self.config.get("formatting.enabled", True) or not response_format:
|
294
|
+
return result
|
295
|
+
|
296
|
+
try:
|
297
|
+
return format_response(result, response_format)
|
298
|
+
except Exception as e:
|
299
|
+
logger.warning(f"Failed to format response: {e}")
|
300
|
+
return result
|
301
|
+
|
302
|
+
def resource(self, uri: str):
|
303
|
+
"""
|
304
|
+
Add resource with metrics tracking.
|
305
|
+
|
306
|
+
Args:
|
307
|
+
uri: Resource URI pattern
|
308
|
+
|
309
|
+
Returns:
|
310
|
+
Decorated function
|
311
|
+
"""
|
312
|
+
|
313
|
+
def decorator(func: F) -> F:
|
314
|
+
if self._mcp is None:
|
315
|
+
self._init_mcp()
|
316
|
+
|
317
|
+
# Wrap with metrics if enabled
|
318
|
+
if self.metrics.enabled:
|
319
|
+
func = self.metrics.track_tool(f"resource:{uri}")(func)
|
320
|
+
|
321
|
+
return self._mcp.resource(uri)(func)
|
322
|
+
|
323
|
+
return decorator
|
324
|
+
|
325
|
+
def prompt(self, name: str):
|
326
|
+
"""
|
327
|
+
Add prompt with metrics tracking.
|
328
|
+
|
329
|
+
Args:
|
330
|
+
name: Prompt name
|
331
|
+
|
332
|
+
Returns:
|
333
|
+
Decorated function
|
334
|
+
"""
|
335
|
+
|
336
|
+
def decorator(func: F) -> F:
|
337
|
+
if self._mcp is None:
|
338
|
+
self._init_mcp()
|
339
|
+
|
340
|
+
# Wrap with metrics if enabled
|
341
|
+
if self.metrics.enabled:
|
342
|
+
func = self.metrics.track_tool(f"prompt:{name}")(func)
|
343
|
+
|
344
|
+
return self._mcp.prompt(name)(func)
|
345
|
+
|
346
|
+
return decorator
|
347
|
+
|
348
|
+
def get_tool_stats(self) -> Dict[str, Any]:
|
349
|
+
"""Get statistics for all registered tools."""
|
350
|
+
stats = {
|
351
|
+
"registered_tools": len(self._tool_registry),
|
352
|
+
"cached_tools": sum(1 for t in self._tool_registry.values() if t["cached"]),
|
353
|
+
"tools": {},
|
354
|
+
}
|
355
|
+
|
356
|
+
for tool_name, tool_info in self._tool_registry.items():
|
357
|
+
stats["tools"][tool_name] = {
|
358
|
+
"cached": tool_info["cached"],
|
359
|
+
"cache_key": tool_info.get("cache_key"),
|
360
|
+
"format_response": tool_info.get("format_response"),
|
361
|
+
}
|
362
|
+
|
363
|
+
return stats
|
364
|
+
|
365
|
+
def get_server_stats(self) -> Dict[str, Any]:
|
366
|
+
"""Get comprehensive server statistics."""
|
367
|
+
stats = {
|
368
|
+
"server": {
|
369
|
+
"name": self.name,
|
370
|
+
"running": self._running,
|
371
|
+
"config": self.config.to_dict(),
|
372
|
+
},
|
373
|
+
"tools": self.get_tool_stats(),
|
374
|
+
}
|
375
|
+
|
376
|
+
if self.metrics.enabled:
|
377
|
+
stats["metrics"] = self.metrics.export_metrics()
|
378
|
+
|
379
|
+
if self.cache.enabled:
|
380
|
+
stats["cache"] = self.cache.stats()
|
381
|
+
|
382
|
+
return stats
|
383
|
+
|
384
|
+
def clear_cache(self, cache_name: Optional[str] = None) -> None:
|
385
|
+
"""Clear cache(s)."""
|
386
|
+
if cache_name:
|
387
|
+
cache = self.cache.get_cache(cache_name)
|
388
|
+
cache.clear()
|
389
|
+
logger.info(f"Cleared cache: {cache_name}")
|
390
|
+
else:
|
391
|
+
self.cache.clear_all()
|
392
|
+
logger.info("Cleared all caches")
|
393
|
+
|
394
|
+
def run(self):
|
395
|
+
"""Run the MCP server."""
|
396
|
+
if self._mcp is None:
|
397
|
+
self._init_mcp()
|
398
|
+
|
399
|
+
logger.info(f"Starting enhanced MCP server: {self.name}")
|
400
|
+
logger.info(f"Cache enabled: {self.cache.enabled}")
|
401
|
+
logger.info(f"Metrics enabled: {self.metrics.enabled}")
|
402
|
+
|
403
|
+
self._running = True
|
404
|
+
|
405
|
+
try:
|
406
|
+
self._mcp.run()
|
407
|
+
except KeyboardInterrupt:
|
408
|
+
logger.info("Server stopped by user")
|
409
|
+
except Exception as e:
|
410
|
+
logger.error(f"Server error: {e}")
|
411
|
+
raise
|
412
|
+
finally:
|
413
|
+
self._running = False
|
414
|
+
|
415
|
+
|
416
|
+
# For backward compatibility, make EnhancedMCPServer the default MCPServer
|
417
|
+
MCPServer = EnhancedMCPServer
|
418
|
+
|
419
|
+
|
420
|
+
class SimpleMCPServer(EnhancedMCPServer):
|
421
|
+
"""
|
422
|
+
Simplified MCP server with minimal configuration.
|
423
|
+
|
424
|
+
This inherits all enhanced capabilities but disables some features
|
425
|
+
by default for simpler use cases.
|
426
|
+
"""
|
427
|
+
|
428
|
+
def __init__(self, name: str, description: str = ""):
|
429
|
+
"""
|
430
|
+
Initialize simple MCP server.
|
431
|
+
|
432
|
+
Args:
|
433
|
+
name: Server name
|
434
|
+
description: Server description
|
435
|
+
"""
|
436
|
+
# Initialize with some features disabled for simplicity
|
437
|
+
super().__init__(
|
438
|
+
name=name,
|
439
|
+
enable_cache=False, # Disable cache by default
|
440
|
+
enable_metrics=False, # Disable metrics by default
|
441
|
+
enable_formatting=True, # Keep formatting for better output
|
442
|
+
)
|
443
|
+
|
444
|
+
self.description = description
|
445
|
+
|
446
|
+
# Update config for simple use
|
447
|
+
self.config.update(
|
448
|
+
{"server": {"name": name, "description": description, "version": "1.0.0"}}
|
449
|
+
)
|
@@ -7,7 +7,7 @@ exposing tools for searching, analyzing, and exploring AI implementations.
|
|
7
7
|
import json
|
8
8
|
import os
|
9
9
|
from pathlib import Path
|
10
|
-
from typing import Any
|
10
|
+
from typing import Any
|
11
11
|
|
12
12
|
from kailash.mcp.server import MCPServer
|
13
13
|
|
@@ -51,7 +51,7 @@ class AIRegistryServer(MCPServer):
|
|
51
51
|
try:
|
52
52
|
registry_path = Path(self.registry_file)
|
53
53
|
if registry_path.exists():
|
54
|
-
with open(registry_path,
|
54
|
+
with open(registry_path, encoding="utf-8") as f:
|
55
55
|
self._registry_data = json.load(f)
|
56
56
|
else:
|
57
57
|
# Provide sample data if file not found
|
@@ -83,7 +83,7 @@ class AIRegistryServer(MCPServer):
|
|
83
83
|
"""Setup server tools and resources."""
|
84
84
|
|
85
85
|
@self.add_tool()
|
86
|
-
def search_use_cases(query: str, limit: int = 10) ->
|
86
|
+
def search_use_cases(query: str, limit: int = 10) -> dict[str, Any]:
|
87
87
|
"""Search for AI use cases matching the query.
|
88
88
|
|
89
89
|
Args:
|
@@ -119,7 +119,7 @@ class AIRegistryServer(MCPServer):
|
|
119
119
|
return {"results": results, "count": len(results), "query": query}
|
120
120
|
|
121
121
|
@self.add_tool()
|
122
|
-
def filter_by_domain(domain: str) ->
|
122
|
+
def filter_by_domain(domain: str) -> dict[str, Any]:
|
123
123
|
"""Filter use cases by application domain.
|
124
124
|
|
125
125
|
Args:
|
@@ -141,7 +141,7 @@ class AIRegistryServer(MCPServer):
|
|
141
141
|
return {"domain": domain, "use_cases": filtered, "count": len(filtered)}
|
142
142
|
|
143
143
|
@self.add_tool()
|
144
|
-
def get_use_case_details(use_case_id: int) ->
|
144
|
+
def get_use_case_details(use_case_id: int) -> dict[str, Any]:
|
145
145
|
"""Get detailed information about a specific use case.
|
146
146
|
|
147
147
|
Args:
|
@@ -161,7 +161,7 @@ class AIRegistryServer(MCPServer):
|
|
161
161
|
return {"error": f"Use case {use_case_id} not found"}
|
162
162
|
|
163
163
|
@self.add_tool()
|
164
|
-
def list_domains() ->
|
164
|
+
def list_domains() -> dict[str, Any]:
|
165
165
|
"""List all available application domains.
|
166
166
|
|
167
167
|
Returns:
|
@@ -0,0 +1,31 @@
|
|
1
|
+
"""
|
2
|
+
MCP utilities for enhanced server capabilities.
|
3
|
+
|
4
|
+
This module provides production-ready utilities for MCP servers including:
|
5
|
+
- Caching (LRU, TTL, query caching)
|
6
|
+
- Configuration management (hierarchical, environment overrides)
|
7
|
+
- Metrics collection (performance, usage tracking)
|
8
|
+
- Response formatting utilities
|
9
|
+
"""
|
10
|
+
|
11
|
+
from .cache import CacheManager, LRUCache, cached_query
|
12
|
+
from .config import ConfigManager
|
13
|
+
from .formatters import (
|
14
|
+
format_response,
|
15
|
+
json_formatter,
|
16
|
+
markdown_formatter,
|
17
|
+
search_formatter,
|
18
|
+
)
|
19
|
+
from .metrics import MetricsCollector
|
20
|
+
|
21
|
+
__all__ = [
|
22
|
+
"CacheManager",
|
23
|
+
"LRUCache",
|
24
|
+
"cached_query",
|
25
|
+
"ConfigManager",
|
26
|
+
"MetricsCollector",
|
27
|
+
"format_response",
|
28
|
+
"json_formatter",
|
29
|
+
"markdown_formatter",
|
30
|
+
"search_formatter",
|
31
|
+
]
|