kailash 0.2.2__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control.py +40 -39
  3. kailash/api/auth.py +26 -32
  4. kailash/api/custom_nodes.py +29 -29
  5. kailash/api/custom_nodes_secure.py +35 -35
  6. kailash/api/database.py +17 -17
  7. kailash/api/gateway.py +19 -19
  8. kailash/api/mcp_integration.py +24 -23
  9. kailash/api/studio.py +45 -45
  10. kailash/api/workflow_api.py +8 -8
  11. kailash/cli/commands.py +5 -8
  12. kailash/manifest.py +42 -42
  13. kailash/mcp/__init__.py +1 -1
  14. kailash/mcp/ai_registry_server.py +20 -20
  15. kailash/mcp/client.py +9 -11
  16. kailash/mcp/client_new.py +10 -10
  17. kailash/mcp/server.py +1 -2
  18. kailash/mcp/server_enhanced.py +449 -0
  19. kailash/mcp/servers/ai_registry.py +6 -6
  20. kailash/mcp/utils/__init__.py +31 -0
  21. kailash/mcp/utils/cache.py +267 -0
  22. kailash/mcp/utils/config.py +263 -0
  23. kailash/mcp/utils/formatters.py +293 -0
  24. kailash/mcp/utils/metrics.py +418 -0
  25. kailash/nodes/ai/agents.py +9 -9
  26. kailash/nodes/ai/ai_providers.py +33 -34
  27. kailash/nodes/ai/embedding_generator.py +31 -32
  28. kailash/nodes/ai/intelligent_agent_orchestrator.py +62 -66
  29. kailash/nodes/ai/iterative_llm_agent.py +48 -48
  30. kailash/nodes/ai/llm_agent.py +32 -33
  31. kailash/nodes/ai/models.py +13 -13
  32. kailash/nodes/ai/self_organizing.py +44 -44
  33. kailash/nodes/api/__init__.py +5 -0
  34. kailash/nodes/api/auth.py +11 -11
  35. kailash/nodes/api/graphql.py +13 -13
  36. kailash/nodes/api/http.py +19 -19
  37. kailash/nodes/api/monitoring.py +463 -0
  38. kailash/nodes/api/rate_limiting.py +9 -13
  39. kailash/nodes/api/rest.py +29 -29
  40. kailash/nodes/api/security.py +819 -0
  41. kailash/nodes/base.py +24 -26
  42. kailash/nodes/base_async.py +7 -7
  43. kailash/nodes/base_cycle_aware.py +12 -12
  44. kailash/nodes/base_with_acl.py +5 -5
  45. kailash/nodes/code/python.py +56 -55
  46. kailash/nodes/data/__init__.py +6 -0
  47. kailash/nodes/data/directory.py +6 -6
  48. kailash/nodes/data/event_generation.py +297 -0
  49. kailash/nodes/data/file_discovery.py +598 -0
  50. kailash/nodes/data/readers.py +8 -8
  51. kailash/nodes/data/retrieval.py +10 -10
  52. kailash/nodes/data/sharepoint_graph.py +17 -17
  53. kailash/nodes/data/sources.py +5 -5
  54. kailash/nodes/data/sql.py +13 -13
  55. kailash/nodes/data/streaming.py +25 -25
  56. kailash/nodes/data/vector_db.py +22 -22
  57. kailash/nodes/data/writers.py +7 -7
  58. kailash/nodes/logic/async_operations.py +17 -17
  59. kailash/nodes/logic/convergence.py +11 -11
  60. kailash/nodes/logic/loop.py +4 -4
  61. kailash/nodes/logic/operations.py +11 -11
  62. kailash/nodes/logic/workflow.py +8 -9
  63. kailash/nodes/mixins/mcp.py +17 -17
  64. kailash/nodes/mixins.py +8 -10
  65. kailash/nodes/transform/chunkers.py +3 -3
  66. kailash/nodes/transform/formatters.py +7 -7
  67. kailash/nodes/transform/processors.py +11 -11
  68. kailash/runtime/access_controlled.py +18 -18
  69. kailash/runtime/async_local.py +18 -20
  70. kailash/runtime/docker.py +24 -26
  71. kailash/runtime/local.py +55 -31
  72. kailash/runtime/parallel.py +25 -25
  73. kailash/runtime/parallel_cyclic.py +29 -29
  74. kailash/runtime/runner.py +6 -6
  75. kailash/runtime/testing.py +22 -22
  76. kailash/sdk_exceptions.py +0 -58
  77. kailash/security.py +14 -26
  78. kailash/tracking/manager.py +38 -38
  79. kailash/tracking/metrics_collector.py +15 -14
  80. kailash/tracking/models.py +53 -53
  81. kailash/tracking/storage/base.py +7 -17
  82. kailash/tracking/storage/database.py +22 -23
  83. kailash/tracking/storage/filesystem.py +38 -40
  84. kailash/utils/export.py +21 -21
  85. kailash/utils/templates.py +8 -9
  86. kailash/visualization/api.py +30 -34
  87. kailash/visualization/dashboard.py +17 -17
  88. kailash/visualization/performance.py +32 -19
  89. kailash/visualization/reports.py +30 -28
  90. kailash/workflow/builder.py +8 -8
  91. kailash/workflow/convergence.py +13 -12
  92. kailash/workflow/cycle_analyzer.py +38 -33
  93. kailash/workflow/cycle_builder.py +12 -12
  94. kailash/workflow/cycle_config.py +16 -15
  95. kailash/workflow/cycle_debugger.py +40 -40
  96. kailash/workflow/cycle_exceptions.py +29 -29
  97. kailash/workflow/cycle_profiler.py +21 -21
  98. kailash/workflow/cycle_state.py +20 -22
  99. kailash/workflow/cyclic_runner.py +45 -45
  100. kailash/workflow/graph.py +57 -45
  101. kailash/workflow/mermaid_visualizer.py +9 -11
  102. kailash/workflow/migration.py +22 -22
  103. kailash/workflow/mock_registry.py +6 -6
  104. kailash/workflow/runner.py +9 -9
  105. kailash/workflow/safety.py +12 -13
  106. kailash/workflow/state.py +8 -11
  107. kailash/workflow/templates.py +19 -19
  108. kailash/workflow/validation.py +14 -14
  109. kailash/workflow/visualization.py +32 -24
  110. kailash-0.3.1.dist-info/METADATA +476 -0
  111. kailash-0.3.1.dist-info/RECORD +136 -0
  112. kailash-0.2.2.dist-info/METADATA +0 -121
  113. kailash-0.2.2.dist-info/RECORD +0 -126
  114. {kailash-0.2.2.dist-info → kailash-0.3.1.dist-info}/WHEEL +0 -0
  115. {kailash-0.2.2.dist-info → kailash-0.3.1.dist-info}/entry_points.txt +0 -0
  116. {kailash-0.2.2.dist-info → kailash-0.3.1.dist-info}/licenses/LICENSE +0 -0
  117. {kailash-0.2.2.dist-info → kailash-0.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,293 @@
1
+ """
2
+ Response formatting utilities for MCP servers.
3
+
4
+ Provides consistent formatting for tool responses, making them more readable
5
+ and structured for LLM consumption.
6
+ """
7
+
8
+ import json
9
+ from datetime import datetime
10
+ from typing import Any, Dict, List, Optional, Union
11
+
12
+
13
+ class ResponseFormatter:
14
+ """Base class for response formatters."""
15
+
16
+ def format(self, data: Any, **kwargs) -> str:
17
+ """Format data into string representation."""
18
+ raise NotImplementedError
19
+
20
+
21
+ class JSONFormatter(ResponseFormatter):
22
+ """Format responses as pretty-printed JSON."""
23
+
24
+ def __init__(self, indent: int = 2, ensure_ascii: bool = False):
25
+ self.indent = indent
26
+ self.ensure_ascii = ensure_ascii
27
+
28
+ def format(self, data: Any, **kwargs) -> str:
29
+ """Format data as JSON string."""
30
+ try:
31
+ return json.dumps(
32
+ data,
33
+ indent=self.indent,
34
+ ensure_ascii=self.ensure_ascii,
35
+ default=self._json_serializer,
36
+ )
37
+ except Exception as e:
38
+ return f"Error formatting JSON: {e}"
39
+
40
+ def _json_serializer(self, obj):
41
+ """Handle non-serializable objects."""
42
+ if isinstance(obj, datetime):
43
+ return obj.isoformat()
44
+ return str(obj)
45
+
46
+
47
+ class MarkdownFormatter(ResponseFormatter):
48
+ """Format responses as Markdown for better readability."""
49
+
50
+ def format(self, data: Any, title: Optional[str] = None, **kwargs) -> str:
51
+ """Format data as Markdown."""
52
+ if isinstance(data, dict):
53
+ return self._format_dict(data, title)
54
+ elif isinstance(data, list):
55
+ return self._format_list(data, title)
56
+ else:
57
+ return self._format_simple(data, title)
58
+
59
+ def _format_dict(self, data: Dict[str, Any], title: Optional[str] = None) -> str:
60
+ """Format dictionary as Markdown."""
61
+ lines = []
62
+
63
+ if title:
64
+ lines.append(f"# {title}\n")
65
+
66
+ for key, value in data.items():
67
+ lines.append(f"**{key}**: {self._format_value(value)}")
68
+
69
+ return "\n".join(lines)
70
+
71
+ def _format_list(self, data: List[Any], title: Optional[str] = None) -> str:
72
+ """Format list as Markdown."""
73
+ lines = []
74
+
75
+ if title:
76
+ lines.append(f"# {title}\n")
77
+
78
+ for i, item in enumerate(data, 1):
79
+ if isinstance(item, dict):
80
+ lines.append(f"## {i}. Item")
81
+ for key, value in item.items():
82
+ lines.append(f"- **{key}**: {self._format_value(value)}")
83
+ lines.append("")
84
+ else:
85
+ lines.append(f"{i}. {self._format_value(item)}")
86
+
87
+ return "\n".join(lines)
88
+
89
+ def _format_simple(self, data: Any, title: Optional[str] = None) -> str:
90
+ """Format simple value as Markdown."""
91
+ lines = []
92
+
93
+ if title:
94
+ lines.append(f"# {title}\n")
95
+
96
+ lines.append(str(data))
97
+ return "\n".join(lines)
98
+
99
+ def _format_value(self, value: Any) -> str:
100
+ """Format individual value."""
101
+ if isinstance(value, (list, tuple)) and len(value) <= 5:
102
+ return ", ".join(str(v) for v in value)
103
+ elif isinstance(value, dict) and len(value) <= 3:
104
+ return ", ".join(f"{k}: {v}" for k, v in value.items())
105
+ else:
106
+ return str(value)
107
+
108
+
109
+ class TableFormatter(ResponseFormatter):
110
+ """Format tabular data as ASCII tables."""
111
+
112
+ def format(
113
+ self, data: List[Dict[str, Any]], headers: Optional[List[str]] = None, **kwargs
114
+ ) -> str:
115
+ """Format list of dictionaries as ASCII table."""
116
+ if not data:
117
+ return "No data available"
118
+
119
+ if not isinstance(data, list) or not all(
120
+ isinstance(item, dict) for item in data
121
+ ):
122
+ return "Data must be a list of dictionaries for table formatting"
123
+
124
+ # Determine headers
125
+ if headers is None:
126
+ headers = list(data[0].keys()) if data else []
127
+
128
+ # Calculate column widths
129
+ col_widths = {}
130
+ for header in headers:
131
+ col_widths[header] = len(header)
132
+
133
+ for row in data:
134
+ for header in headers:
135
+ value = str(row.get(header, ""))
136
+ col_widths[header] = max(col_widths[header], len(value))
137
+
138
+ # Build table
139
+ lines = []
140
+
141
+ # Header row
142
+ header_line = " | ".join(header.ljust(col_widths[header]) for header in headers)
143
+ lines.append(header_line)
144
+
145
+ # Separator
146
+ separator = "-+-".join("-" * col_widths[header] for header in headers)
147
+ lines.append(separator)
148
+
149
+ # Data rows
150
+ for row in data:
151
+ row_line = " | ".join(
152
+ str(row.get(header, "")).ljust(col_widths[header]) for header in headers
153
+ )
154
+ lines.append(row_line)
155
+
156
+ return "\n".join(lines)
157
+
158
+
159
+ class SearchResultFormatter(ResponseFormatter):
160
+ """Specialized formatter for search results."""
161
+
162
+ def format(
163
+ self,
164
+ results: List[Dict[str, Any]],
165
+ query: Optional[str] = None,
166
+ total_count: Optional[int] = None,
167
+ **kwargs,
168
+ ) -> str:
169
+ """Format search results with query context."""
170
+ lines = []
171
+
172
+ # Header
173
+ if query:
174
+ lines.append(f"# Search Results for: '{query}'\n")
175
+ else:
176
+ lines.append("# Search Results\n")
177
+
178
+ # Summary
179
+ result_count = len(results)
180
+ if total_count and total_count > result_count:
181
+ lines.append(f"Showing {result_count} of {total_count} results\n")
182
+ else:
183
+ lines.append(f"Found {result_count} results\n")
184
+
185
+ # Results
186
+ for i, result in enumerate(results, 1):
187
+ lines.append(f"## {i}. {result.get('name', result.get('title', 'Result'))}")
188
+
189
+ # Score if available
190
+ if "_relevance_score" in result:
191
+ score = result["_relevance_score"]
192
+ lines.append(f"**Relevance**: {score:.2f}")
193
+
194
+ # Description
195
+ if "description" in result:
196
+ lines.append(f"{result['description']}")
197
+
198
+ # Additional fields
199
+ for key, value in result.items():
200
+ if key not in ["name", "title", "description", "_relevance_score"]:
201
+ if isinstance(value, (list, tuple)):
202
+ if value: # Only show non-empty lists
203
+ lines.append(
204
+ f"**{key.title()}**: {', '.join(str(v) for v in value)}"
205
+ )
206
+ elif value: # Only show non-empty values
207
+ lines.append(f"**{key.title()}**: {value}")
208
+
209
+ lines.append("") # Empty line between results
210
+
211
+ return "\n".join(lines)
212
+
213
+
214
+ class MetricsFormatter(ResponseFormatter):
215
+ """Specialized formatter for metrics data."""
216
+
217
+ def format(self, metrics: Dict[str, Any], **kwargs) -> str:
218
+ """Format metrics data in human-readable format."""
219
+ lines = []
220
+ lines.append("# Server Metrics\n")
221
+
222
+ # Server stats
223
+ if "server" in metrics:
224
+ server = metrics["server"]
225
+ lines.append("## Server Statistics")
226
+ lines.append(
227
+ f"- **Uptime**: {self._format_duration(server.get('uptime_seconds', 0))}"
228
+ )
229
+ lines.append(f"- **Total Calls**: {server.get('total_calls', 0):,}")
230
+ lines.append(f"- **Total Errors**: {server.get('total_errors', 0):,}")
231
+ lines.append(f"- **Error Rate**: {server.get('overall_error_rate', 0):.2%}")
232
+ lines.append(f"- **Calls/Second**: {server.get('calls_per_second', 0):.2f}")
233
+ lines.append("")
234
+
235
+ # Tool stats
236
+ if "tools" in metrics and metrics["tools"]:
237
+ lines.append("## Tool Statistics")
238
+ for tool_name, stats in metrics["tools"].items():
239
+ lines.append(f"### {tool_name}")
240
+ lines.append(f"- **Calls**: {stats.get('calls', 0):,}")
241
+ lines.append(f"- **Errors**: {stats.get('errors', 0):,}")
242
+ lines.append(f"- **Error Rate**: {stats.get('error_rate', 0):.2%}")
243
+
244
+ if "avg_latency" in stats:
245
+ lines.append(f"- **Avg Latency**: {stats['avg_latency']:.3f}s")
246
+ lines.append(f"- **P95 Latency**: {stats['p95_latency']:.3f}s")
247
+
248
+ lines.append("")
249
+
250
+ return "\n".join(lines)
251
+
252
+ def _format_duration(self, seconds: float) -> str:
253
+ """Format duration in human-readable format."""
254
+ if seconds < 60:
255
+ return f"{seconds:.1f} seconds"
256
+ elif seconds < 3600:
257
+ return f"{seconds/60:.1f} minutes"
258
+ elif seconds < 86400:
259
+ return f"{seconds/3600:.1f} hours"
260
+ else:
261
+ return f"{seconds/86400:.1f} days"
262
+
263
+
264
+ # Default formatter instances
265
+ json_formatter = JSONFormatter()
266
+ markdown_formatter = MarkdownFormatter()
267
+ table_formatter = TableFormatter()
268
+ search_formatter = SearchResultFormatter()
269
+ metrics_formatter = MetricsFormatter()
270
+
271
+
272
+ def format_response(data: Any, format_type: str = "json", **kwargs) -> str:
273
+ """
274
+ Format response using specified formatter.
275
+
276
+ Args:
277
+ data: Data to format
278
+ format_type: Type of formatting ("json", "markdown", "table", "search", "metrics")
279
+ **kwargs: Additional formatting options
280
+
281
+ Returns:
282
+ Formatted string
283
+ """
284
+ formatters = {
285
+ "json": json_formatter,
286
+ "markdown": markdown_formatter,
287
+ "table": table_formatter,
288
+ "search": search_formatter,
289
+ "metrics": metrics_formatter,
290
+ }
291
+
292
+ formatter = formatters.get(format_type, json_formatter)
293
+ return formatter.format(data, **kwargs)
@@ -0,0 +1,418 @@
1
+ """
2
+ Metrics collection for MCP servers.
3
+
4
+ Provides comprehensive monitoring of MCP server performance including:
5
+ - Tool usage statistics
6
+ - Performance metrics (latency, throughput)
7
+ - Cache performance
8
+ - Error rates
9
+ """
10
+
11
+ import asyncio
12
+ import functools
13
+ import logging
14
+ import threading
15
+ import time
16
+ from collections import defaultdict, deque
17
+ from typing import Any, Callable, Dict, List, Optional, TypeVar
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ F = TypeVar("F", bound=Callable[..., Any])
22
+
23
+
24
+ class MetricsCollector:
25
+ """
26
+ Comprehensive metrics collection for MCP servers.
27
+
28
+ Tracks:
29
+ - Tool call frequency and latency
30
+ - Error rates and types
31
+ - Cache performance
32
+ - System resource usage
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ enabled: bool = True,
38
+ collect_performance: bool = True,
39
+ collect_usage: bool = True,
40
+ history_size: int = 1000,
41
+ ):
42
+ """
43
+ Initialize metrics collector.
44
+
45
+ Args:
46
+ enabled: Whether metrics collection is enabled
47
+ collect_performance: Whether to collect performance metrics
48
+ collect_usage: Whether to collect usage statistics
49
+ history_size: Number of recent events to keep in memory
50
+ """
51
+ self.enabled = enabled
52
+ self.collect_performance = collect_performance
53
+ self.collect_usage = collect_usage
54
+ self.history_size = history_size
55
+
56
+ # Thread safety
57
+ self._lock = threading.RLock()
58
+
59
+ # Usage metrics
60
+ self._tool_calls = defaultdict(int)
61
+ self._tool_errors = defaultdict(int)
62
+ self._tool_latencies = defaultdict(list)
63
+
64
+ # Performance history
65
+ self._recent_calls = deque(maxlen=history_size)
66
+ self._recent_errors = deque(maxlen=history_size)
67
+
68
+ # System metrics
69
+ self._start_time = time.time()
70
+ self._total_calls = 0
71
+ self._total_errors = 0
72
+
73
+ def track_tool_call(
74
+ self,
75
+ tool_name: str,
76
+ latency: float,
77
+ success: bool = True,
78
+ error_type: Optional[str] = None,
79
+ ) -> None:
80
+ """Record a tool call metric."""
81
+ if not self.enabled:
82
+ return
83
+
84
+ with self._lock:
85
+ current_time = time.time()
86
+
87
+ # Update counters
88
+ self._total_calls += 1
89
+ if self.collect_usage:
90
+ self._tool_calls[tool_name] += 1
91
+
92
+ # Track latency
93
+ if self.collect_performance:
94
+ self._tool_latencies[tool_name].append(latency)
95
+ # Keep only recent latencies to prevent memory growth
96
+ if len(self._tool_latencies[tool_name]) > 100:
97
+ self._tool_latencies[tool_name] = self._tool_latencies[tool_name][
98
+ -100:
99
+ ]
100
+
101
+ # Track errors
102
+ if not success:
103
+ self._total_errors += 1
104
+ if self.collect_usage:
105
+ self._tool_errors[tool_name] += 1
106
+
107
+ if self.collect_performance:
108
+ self._recent_errors.append(
109
+ {
110
+ "tool": tool_name,
111
+ "timestamp": current_time,
112
+ "error_type": error_type,
113
+ }
114
+ )
115
+
116
+ # Track recent calls
117
+ if self.collect_performance:
118
+ self._recent_calls.append(
119
+ {
120
+ "tool": tool_name,
121
+ "timestamp": current_time,
122
+ "latency": latency,
123
+ "success": success,
124
+ }
125
+ )
126
+
127
+ def track_tool(self, tool_name: Optional[str] = None):
128
+ """
129
+ Decorator to automatically track tool call metrics.
130
+
131
+ Args:
132
+ tool_name: Optional tool name override
133
+
134
+ Returns:
135
+ Decorated function with metrics tracking
136
+ """
137
+
138
+ def decorator(func: F) -> F:
139
+ if not self.enabled:
140
+ return func
141
+
142
+ actual_tool_name = tool_name or func.__name__
143
+
144
+ @functools.wraps(func)
145
+ def sync_wrapper(*args, **kwargs):
146
+ start_time = time.time()
147
+ success = True
148
+ error_type = None
149
+
150
+ try:
151
+ result = func(*args, **kwargs)
152
+ return result
153
+ except Exception as e:
154
+ success = False
155
+ error_type = type(e).__name__
156
+ raise
157
+ finally:
158
+ latency = time.time() - start_time
159
+ self.track_tool_call(actual_tool_name, latency, success, error_type)
160
+
161
+ @functools.wraps(func)
162
+ async def async_wrapper(*args, **kwargs):
163
+ start_time = time.time()
164
+ success = True
165
+ error_type = None
166
+
167
+ try:
168
+ result = await func(*args, **kwargs)
169
+ return result
170
+ except Exception as e:
171
+ success = False
172
+ error_type = type(e).__name__
173
+ raise
174
+ finally:
175
+ latency = time.time() - start_time
176
+ self.track_tool_call(actual_tool_name, latency, success, error_type)
177
+
178
+ # Return appropriate wrapper based on function type
179
+ if asyncio.iscoroutinefunction(func):
180
+ return async_wrapper
181
+ else:
182
+ return sync_wrapper
183
+
184
+ return decorator
185
+
186
+ def get_tool_stats(self) -> Dict[str, Dict[str, Any]]:
187
+ """Get statistics for all tools."""
188
+ with self._lock:
189
+ stats = {}
190
+
191
+ for tool_name in set(
192
+ list(self._tool_calls.keys()) + list(self._tool_errors.keys())
193
+ ):
194
+ calls = self._tool_calls[tool_name]
195
+ errors = self._tool_errors[tool_name]
196
+ latencies = self._tool_latencies.get(tool_name, [])
197
+
198
+ tool_stats = {
199
+ "calls": calls,
200
+ "errors": errors,
201
+ "error_rate": errors / calls if calls > 0 else 0,
202
+ }
203
+
204
+ if latencies:
205
+ tool_stats.update(
206
+ {
207
+ "avg_latency": sum(latencies) / len(latencies),
208
+ "min_latency": min(latencies),
209
+ "max_latency": max(latencies),
210
+ "p95_latency": self._percentile(latencies, 95),
211
+ "p99_latency": self._percentile(latencies, 99),
212
+ }
213
+ )
214
+
215
+ stats[tool_name] = tool_stats
216
+
217
+ return stats
218
+
219
+ def get_server_stats(self) -> Dict[str, Any]:
220
+ """Get overall server statistics."""
221
+ with self._lock:
222
+ uptime = time.time() - self._start_time
223
+
224
+ stats = {
225
+ "uptime_seconds": uptime,
226
+ "total_calls": self._total_calls,
227
+ "total_errors": self._total_errors,
228
+ "overall_error_rate": (
229
+ self._total_errors / self._total_calls
230
+ if self._total_calls > 0
231
+ else 0
232
+ ),
233
+ "calls_per_second": self._total_calls / uptime if uptime > 0 else 0,
234
+ }
235
+
236
+ # Recent activity
237
+ if self.collect_performance:
238
+ recent_window = 300 # 5 minutes
239
+ current_time = time.time()
240
+
241
+ recent_calls = [
242
+ call
243
+ for call in self._recent_calls
244
+ if current_time - call["timestamp"] <= recent_window
245
+ ]
246
+
247
+ if recent_calls:
248
+ recent_latencies = [call["latency"] for call in recent_calls]
249
+ recent_errors = sum(
250
+ 1 for call in recent_calls if not call["success"]
251
+ )
252
+
253
+ stats.update(
254
+ {
255
+ "recent_calls_5min": len(recent_calls),
256
+ "recent_errors_5min": recent_errors,
257
+ "recent_error_rate_5min": recent_errors / len(recent_calls),
258
+ "recent_avg_latency_5min": sum(recent_latencies)
259
+ / len(recent_latencies),
260
+ "recent_p95_latency_5min": self._percentile(
261
+ recent_latencies, 95
262
+ ),
263
+ }
264
+ )
265
+
266
+ return stats
267
+
268
+ def get_error_summary(self) -> Dict[str, Any]:
269
+ """Get summary of recent errors."""
270
+ with self._lock:
271
+ if not self.collect_performance:
272
+ return {"error": "Performance collection disabled"}
273
+
274
+ recent_window = 3600 # 1 hour
275
+ current_time = time.time()
276
+
277
+ recent_errors = [
278
+ error
279
+ for error in self._recent_errors
280
+ if current_time - error["timestamp"] <= recent_window
281
+ ]
282
+
283
+ # Group by error type
284
+ error_types = defaultdict(int)
285
+ for error in recent_errors:
286
+ error_types[error.get("error_type", "Unknown")] += 1
287
+
288
+ # Group by tool
289
+ error_tools = defaultdict(int)
290
+ for error in recent_errors:
291
+ error_tools[error["tool"]] += 1
292
+
293
+ return {
294
+ "total_recent_errors": len(recent_errors),
295
+ "error_types": dict(error_types),
296
+ "error_by_tool": dict(error_tools),
297
+ "window_hours": 1,
298
+ }
299
+
300
+ def _percentile(self, values: List[float], percentile: int) -> float:
301
+ """Calculate percentile value."""
302
+ if not values:
303
+ return 0.0
304
+
305
+ sorted_values = sorted(values)
306
+ k = (len(sorted_values) - 1) * percentile / 100
307
+ f = int(k)
308
+ c = k - f
309
+
310
+ if f == len(sorted_values) - 1:
311
+ return sorted_values[f]
312
+ else:
313
+ return sorted_values[f] * (1 - c) + sorted_values[f + 1] * c
314
+
315
+ def export_metrics(self, format: str = "dict") -> Any:
316
+ """
317
+ Export all metrics in specified format.
318
+
319
+ Args:
320
+ format: Export format ("dict", "json", "prometheus")
321
+
322
+ Returns:
323
+ Metrics in requested format
324
+ """
325
+ with self._lock:
326
+ metrics = {
327
+ "server": self.get_server_stats(),
328
+ "tools": self.get_tool_stats(),
329
+ "errors": self.get_error_summary(),
330
+ "collection_config": {
331
+ "enabled": self.enabled,
332
+ "collect_performance": self.collect_performance,
333
+ "collect_usage": self.collect_usage,
334
+ "history_size": self.history_size,
335
+ },
336
+ }
337
+
338
+ if format == "dict":
339
+ return metrics
340
+ elif format == "json":
341
+ import json
342
+
343
+ return json.dumps(metrics, indent=2)
344
+ elif format == "prometheus":
345
+ return self._export_prometheus(metrics)
346
+ else:
347
+ raise ValueError(f"Unsupported export format: {format}")
348
+
349
+ def _export_prometheus(self, metrics: Dict[str, Any]) -> str:
350
+ """Export metrics in Prometheus format."""
351
+ lines = []
352
+
353
+ # Server metrics
354
+ server = metrics["server"]
355
+ lines.append(f"mcp_server_uptime_seconds {server['uptime_seconds']}")
356
+ lines.append(f"mcp_server_total_calls {server['total_calls']}")
357
+ lines.append(f"mcp_server_total_errors {server['total_errors']}")
358
+ lines.append(f"mcp_server_error_rate {server['overall_error_rate']}")
359
+ lines.append(f"mcp_server_calls_per_second {server['calls_per_second']}")
360
+
361
+ # Tool metrics
362
+ for tool_name, tool_stats in metrics["tools"].items():
363
+ labels = f'{{tool="{tool_name}"}}'
364
+ lines.append(f"mcp_tool_calls{labels} {tool_stats['calls']}")
365
+ lines.append(f"mcp_tool_errors{labels} {tool_stats['errors']}")
366
+ lines.append(f"mcp_tool_error_rate{labels} {tool_stats['error_rate']}")
367
+
368
+ if "avg_latency" in tool_stats:
369
+ lines.append(
370
+ f"mcp_tool_latency_avg{labels} {tool_stats['avg_latency']}"
371
+ )
372
+ lines.append(
373
+ f"mcp_tool_latency_p95{labels} {tool_stats['p95_latency']}"
374
+ )
375
+ lines.append(
376
+ f"mcp_tool_latency_p99{labels} {tool_stats['p99_latency']}"
377
+ )
378
+
379
+ return "\n".join(lines)
380
+
381
+ def reset(self) -> None:
382
+ """Reset all metrics."""
383
+ with self._lock:
384
+ self._tool_calls.clear()
385
+ self._tool_errors.clear()
386
+ self._tool_latencies.clear()
387
+ self._recent_calls.clear()
388
+ self._recent_errors.clear()
389
+ self._start_time = time.time()
390
+ self._total_calls = 0
391
+ self._total_errors = 0
392
+
393
+
394
+ # Global metrics collector instance
395
+ _global_metrics = MetricsCollector()
396
+
397
+
398
+ def track_tool(tool_name: Optional[str] = None):
399
+ """
400
+ Convenience decorator using global metrics collector.
401
+
402
+ Args:
403
+ tool_name: Optional tool name override
404
+
405
+ Returns:
406
+ Decorated function with metrics tracking
407
+ """
408
+ return _global_metrics.track_tool(tool_name)
409
+
410
+
411
+ def get_metrics() -> Dict[str, Any]:
412
+ """Get metrics from global collector."""
413
+ return _global_metrics.export_metrics()
414
+
415
+
416
+ def reset_metrics() -> None:
417
+ """Reset global metrics."""
418
+ _global_metrics.reset()