awslabs.openapi-mcp-server 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- awslabs/__init__.py +16 -0
- awslabs/openapi_mcp_server/__init__.py +69 -0
- awslabs/openapi_mcp_server/api/__init__.py +18 -0
- awslabs/openapi_mcp_server/api/config.py +200 -0
- awslabs/openapi_mcp_server/auth/__init__.py +27 -0
- awslabs/openapi_mcp_server/auth/api_key_auth.py +185 -0
- awslabs/openapi_mcp_server/auth/auth_cache.py +190 -0
- awslabs/openapi_mcp_server/auth/auth_errors.py +206 -0
- awslabs/openapi_mcp_server/auth/auth_factory.py +146 -0
- awslabs/openapi_mcp_server/auth/auth_protocol.py +63 -0
- awslabs/openapi_mcp_server/auth/auth_provider.py +160 -0
- awslabs/openapi_mcp_server/auth/base_auth.py +218 -0
- awslabs/openapi_mcp_server/auth/basic_auth.py +171 -0
- awslabs/openapi_mcp_server/auth/bearer_auth.py +108 -0
- awslabs/openapi_mcp_server/auth/cognito_auth.py +538 -0
- awslabs/openapi_mcp_server/auth/register.py +100 -0
- awslabs/openapi_mcp_server/patch/__init__.py +17 -0
- awslabs/openapi_mcp_server/prompts/__init__.py +18 -0
- awslabs/openapi_mcp_server/prompts/generators/__init__.py +22 -0
- awslabs/openapi_mcp_server/prompts/generators/operation_prompts.py +642 -0
- awslabs/openapi_mcp_server/prompts/generators/workflow_prompts.py +257 -0
- awslabs/openapi_mcp_server/prompts/models.py +70 -0
- awslabs/openapi_mcp_server/prompts/prompt_manager.py +150 -0
- awslabs/openapi_mcp_server/server.py +511 -0
- awslabs/openapi_mcp_server/utils/__init__.py +18 -0
- awslabs/openapi_mcp_server/utils/cache_provider.py +249 -0
- awslabs/openapi_mcp_server/utils/config.py +35 -0
- awslabs/openapi_mcp_server/utils/error_handler.py +349 -0
- awslabs/openapi_mcp_server/utils/http_client.py +263 -0
- awslabs/openapi_mcp_server/utils/metrics_provider.py +503 -0
- awslabs/openapi_mcp_server/utils/openapi.py +217 -0
- awslabs/openapi_mcp_server/utils/openapi_validator.py +253 -0
- awslabs_openapi_mcp_server-0.1.1.dist-info/METADATA +418 -0
- awslabs_openapi_mcp_server-0.1.1.dist-info/RECORD +38 -0
- awslabs_openapi_mcp_server-0.1.1.dist-info/WHEEL +4 -0
- awslabs_openapi_mcp_server-0.1.1.dist-info/entry_points.txt +2 -0
- awslabs_openapi_mcp_server-0.1.1.dist-info/licenses/LICENSE +175 -0
- awslabs_openapi_mcp_server-0.1.1.dist-info/licenses/NOTICE +2 -0
|
@@ -0,0 +1,503 @@
|
|
|
1
|
+
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Metrics provider for the OpenAPI MCP Server.
|
|
15
|
+
|
|
16
|
+
This module provides a pluggable metrics system with different backends.
|
|
17
|
+
The default is a simple in-memory implementation, but it can be switched
|
|
18
|
+
to use Prometheus or other backends via environment variables.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
import time
|
|
22
|
+
from abc import ABC, abstractmethod
|
|
23
|
+
from awslabs.openapi_mcp_server import logger
|
|
24
|
+
from awslabs.openapi_mcp_server.utils.config import (
|
|
25
|
+
METRICS_MAX_HISTORY,
|
|
26
|
+
PROMETHEUS_PORT,
|
|
27
|
+
USE_PROMETHEUS,
|
|
28
|
+
)
|
|
29
|
+
from collections import defaultdict
|
|
30
|
+
from dataclasses import dataclass
|
|
31
|
+
from typing import Any, Dict, List, Optional
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class ApiCallMetrics:
|
|
36
|
+
"""Metrics for API calls."""
|
|
37
|
+
|
|
38
|
+
path: str
|
|
39
|
+
method: str
|
|
40
|
+
status_code: int
|
|
41
|
+
duration_ms: float
|
|
42
|
+
timestamp: float
|
|
43
|
+
error: Optional[str] = None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@dataclass
|
|
47
|
+
class ToolMetrics:
|
|
48
|
+
"""Metrics for tool usage."""
|
|
49
|
+
|
|
50
|
+
tool_name: str
|
|
51
|
+
duration_ms: float
|
|
52
|
+
timestamp: float
|
|
53
|
+
success: bool
|
|
54
|
+
error: Optional[str] = None
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class MetricsProvider(ABC):
|
|
58
|
+
"""Abstract base class for metrics providers."""
|
|
59
|
+
|
|
60
|
+
@abstractmethod
|
|
61
|
+
def record_api_call(
|
|
62
|
+
self,
|
|
63
|
+
path: str,
|
|
64
|
+
method: str,
|
|
65
|
+
status_code: int,
|
|
66
|
+
duration_ms: float,
|
|
67
|
+
error: Optional[str] = None,
|
|
68
|
+
) -> None:
|
|
69
|
+
"""Record metrics for an API call."""
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
@abstractmethod
|
|
73
|
+
def record_tool_usage(
|
|
74
|
+
self, tool_name: str, duration_ms: float, success: bool, error: Optional[str] = None
|
|
75
|
+
) -> None:
|
|
76
|
+
"""Record metrics for tool usage."""
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
@abstractmethod
|
|
80
|
+
def get_api_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
81
|
+
"""Get statistics for API calls."""
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
@abstractmethod
|
|
85
|
+
def get_tool_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
86
|
+
"""Get statistics for tool usage."""
|
|
87
|
+
pass
|
|
88
|
+
|
|
89
|
+
@abstractmethod
|
|
90
|
+
def get_recent_errors(self, limit: int = 10) -> List[Dict[str, Any]]:
|
|
91
|
+
"""Get recent API call errors."""
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
@abstractmethod
|
|
95
|
+
def get_summary(self) -> Dict[str, Any]:
|
|
96
|
+
"""Get a summary of all metrics."""
|
|
97
|
+
pass
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class InMemoryMetricsProvider(MetricsProvider):
|
|
101
|
+
"""Simple in-memory metrics provider."""
|
|
102
|
+
|
|
103
|
+
def __init__(self, max_history: Optional[int] = None):
|
|
104
|
+
"""Initialize the metrics provider.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
max_history: Maximum number of API calls to keep in history (defaults to config value)
|
|
108
|
+
|
|
109
|
+
"""
|
|
110
|
+
self._api_calls: List[ApiCallMetrics] = []
|
|
111
|
+
self._tool_usage: List[ToolMetrics] = []
|
|
112
|
+
self._max_history = max_history if max_history is not None else METRICS_MAX_HISTORY
|
|
113
|
+
self._path_stats: Dict[str, Dict[str, Any]] = defaultdict(
|
|
114
|
+
lambda: {'count': 0, 'errors': 0, 'total_duration_ms': 0}
|
|
115
|
+
)
|
|
116
|
+
self._tool_stats: Dict[str, Dict[str, Any]] = defaultdict(
|
|
117
|
+
lambda: {'count': 0, 'errors': 0, 'total_duration_ms': 0}
|
|
118
|
+
)
|
|
119
|
+
logger.debug(
|
|
120
|
+
f'Created in-memory metrics provider with max history of {self._max_history} entries'
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
def record_api_call(
|
|
124
|
+
self,
|
|
125
|
+
path: str,
|
|
126
|
+
method: str,
|
|
127
|
+
status_code: int,
|
|
128
|
+
duration_ms: float,
|
|
129
|
+
error: Optional[str] = None,
|
|
130
|
+
) -> None:
|
|
131
|
+
"""Record metrics for an API call."""
|
|
132
|
+
# Create metrics object
|
|
133
|
+
metrics = ApiCallMetrics(
|
|
134
|
+
path=path,
|
|
135
|
+
method=method,
|
|
136
|
+
status_code=status_code,
|
|
137
|
+
duration_ms=duration_ms,
|
|
138
|
+
timestamp=time.time(),
|
|
139
|
+
error=error,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Add to history, maintaining max size
|
|
143
|
+
self._api_calls.append(metrics)
|
|
144
|
+
if len(self._api_calls) > self._max_history:
|
|
145
|
+
self._api_calls.pop(0)
|
|
146
|
+
|
|
147
|
+
# Update path stats
|
|
148
|
+
path_key = f'{method.upper()} {path}'
|
|
149
|
+
self._path_stats[path_key]['count'] += 1
|
|
150
|
+
self._path_stats[path_key]['total_duration_ms'] += duration_ms
|
|
151
|
+
if error or status_code >= 400:
|
|
152
|
+
self._path_stats[path_key]['errors'] += 1
|
|
153
|
+
|
|
154
|
+
# Log the API call
|
|
155
|
+
if error or status_code >= 400:
|
|
156
|
+
logger.warning(
|
|
157
|
+
f'API call {method.upper()} {path} failed with status {status_code}: {error or "No error details"} ({duration_ms:.2f}ms)'
|
|
158
|
+
)
|
|
159
|
+
else:
|
|
160
|
+
logger.debug(
|
|
161
|
+
f'API call {method.upper()} {path} succeeded with status {status_code} ({duration_ms:.2f}ms)'
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
def record_tool_usage(
|
|
165
|
+
self, tool_name: str, duration_ms: float, success: bool, error: Optional[str] = None
|
|
166
|
+
) -> None:
|
|
167
|
+
"""Record metrics for tool usage."""
|
|
168
|
+
# Create metrics object
|
|
169
|
+
metrics = ToolMetrics(
|
|
170
|
+
tool_name=tool_name,
|
|
171
|
+
duration_ms=duration_ms,
|
|
172
|
+
timestamp=time.time(),
|
|
173
|
+
success=success,
|
|
174
|
+
error=error,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Add to history, maintaining max size
|
|
178
|
+
self._tool_usage.append(metrics)
|
|
179
|
+
if len(self._tool_usage) > self._max_history:
|
|
180
|
+
self._tool_usage.pop(0)
|
|
181
|
+
|
|
182
|
+
# Update tool stats
|
|
183
|
+
self._tool_stats[tool_name]['count'] += 1
|
|
184
|
+
self._tool_stats[tool_name]['total_duration_ms'] += duration_ms
|
|
185
|
+
if not success:
|
|
186
|
+
self._tool_stats[tool_name]['errors'] += 1
|
|
187
|
+
|
|
188
|
+
# Log the tool usage
|
|
189
|
+
if not success:
|
|
190
|
+
logger.warning(
|
|
191
|
+
f'Tool {tool_name} failed: {error or "No error details"} ({duration_ms:.2f}ms)'
|
|
192
|
+
)
|
|
193
|
+
else:
|
|
194
|
+
logger.debug(f'Tool {tool_name} succeeded ({duration_ms:.2f}ms)')
|
|
195
|
+
|
|
196
|
+
def get_api_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
197
|
+
"""Get statistics for API calls."""
|
|
198
|
+
result = {}
|
|
199
|
+
for path, stats in self._path_stats.items():
|
|
200
|
+
count = stats['count']
|
|
201
|
+
result[path] = {
|
|
202
|
+
'count': count,
|
|
203
|
+
'errors': stats['errors'],
|
|
204
|
+
'error_rate': (stats['errors'] / count) if count > 0 else 0,
|
|
205
|
+
'avg_duration_ms': (stats['total_duration_ms'] / count) if count > 0 else 0,
|
|
206
|
+
}
|
|
207
|
+
return result
|
|
208
|
+
|
|
209
|
+
def get_tool_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
210
|
+
"""Get statistics for tool usage."""
|
|
211
|
+
result = {}
|
|
212
|
+
for tool, stats in self._tool_stats.items():
|
|
213
|
+
count = stats['count']
|
|
214
|
+
result[tool] = {
|
|
215
|
+
'count': count,
|
|
216
|
+
'errors': stats['errors'],
|
|
217
|
+
'error_rate': (stats['errors'] / count) if count > 0 else 0,
|
|
218
|
+
'avg_duration_ms': (stats['total_duration_ms'] / count) if count > 0 else 0,
|
|
219
|
+
}
|
|
220
|
+
return result
|
|
221
|
+
|
|
222
|
+
def get_recent_errors(self, limit: int = 10) -> List[Dict[str, Any]]:
|
|
223
|
+
"""Get recent API call errors."""
|
|
224
|
+
errors = []
|
|
225
|
+
for call in reversed(self._api_calls):
|
|
226
|
+
if call.error or call.status_code >= 400:
|
|
227
|
+
errors.append(
|
|
228
|
+
{
|
|
229
|
+
'path': call.path,
|
|
230
|
+
'method': call.method,
|
|
231
|
+
'status_code': call.status_code,
|
|
232
|
+
'error': call.error,
|
|
233
|
+
'duration_ms': call.duration_ms,
|
|
234
|
+
'timestamp': call.timestamp,
|
|
235
|
+
}
|
|
236
|
+
)
|
|
237
|
+
if len(errors) >= limit:
|
|
238
|
+
break
|
|
239
|
+
return errors
|
|
240
|
+
|
|
241
|
+
def get_summary(self) -> Dict[str, Any]:
|
|
242
|
+
"""Get a summary of all metrics."""
|
|
243
|
+
api_calls = len(self._api_calls)
|
|
244
|
+
tool_calls = len(self._tool_usage)
|
|
245
|
+
|
|
246
|
+
api_errors = sum(1 for call in self._api_calls if call.error or call.status_code >= 400)
|
|
247
|
+
tool_errors = sum(1 for usage in self._tool_usage if not usage.success)
|
|
248
|
+
|
|
249
|
+
return {
|
|
250
|
+
'api_calls': {
|
|
251
|
+
'total': api_calls,
|
|
252
|
+
'errors': api_errors,
|
|
253
|
+
'error_rate': (api_errors / api_calls) if api_calls > 0 else 0,
|
|
254
|
+
'paths': len(self._path_stats),
|
|
255
|
+
},
|
|
256
|
+
'tool_usage': {
|
|
257
|
+
'total': tool_calls,
|
|
258
|
+
'errors': tool_errors,
|
|
259
|
+
'error_rate': (tool_errors / tool_calls) if tool_calls > 0 else 0,
|
|
260
|
+
'tools': len(self._tool_stats),
|
|
261
|
+
},
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
# Try to import prometheus_client if enabled
|
|
266
|
+
# Note: Tests for PrometheusMetricsProvider will be skipped if prometheus_client
|
|
267
|
+
# is not installed. This is expected behavior and not a test failure.
|
|
268
|
+
PROMETHEUS_AVAILABLE = False
|
|
269
|
+
prometheus_client = None
|
|
270
|
+
if USE_PROMETHEUS:
|
|
271
|
+
try:
|
|
272
|
+
import prometheus_client
|
|
273
|
+
|
|
274
|
+
PROMETHEUS_AVAILABLE = True
|
|
275
|
+
logger.info('Prometheus metrics enabled')
|
|
276
|
+
except ImportError:
|
|
277
|
+
logger.warning(
|
|
278
|
+
'Prometheus metrics requested but prometheus_client not installed. '
|
|
279
|
+
'Install with: pip install prometheus_client'
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
class PrometheusMetricsProvider(MetricsProvider):
|
|
284
|
+
"""Prometheus metrics provider."""
|
|
285
|
+
|
|
286
|
+
def __init__(self):
|
|
287
|
+
"""Initialize the Prometheus metrics provider."""
|
|
288
|
+
if not PROMETHEUS_AVAILABLE or prometheus_client is None:
|
|
289
|
+
raise ImportError('prometheus_client not available')
|
|
290
|
+
|
|
291
|
+
# Create Prometheus metrics
|
|
292
|
+
self._api_requests = prometheus_client.Counter(
|
|
293
|
+
'mcp_api_requests_total', 'Total API requests', ['method', 'path', 'status']
|
|
294
|
+
)
|
|
295
|
+
self._api_errors = prometheus_client.Counter(
|
|
296
|
+
'mcp_api_errors_total', 'Total API errors', ['method', 'path']
|
|
297
|
+
)
|
|
298
|
+
self._api_duration = prometheus_client.Histogram(
|
|
299
|
+
'mcp_api_request_duration_seconds',
|
|
300
|
+
'API request duration in seconds',
|
|
301
|
+
['method', 'path'],
|
|
302
|
+
)
|
|
303
|
+
self._tool_calls = prometheus_client.Counter(
|
|
304
|
+
'mcp_tool_calls_total', 'Total tool calls', ['tool', 'status']
|
|
305
|
+
)
|
|
306
|
+
self._tool_errors = prometheus_client.Counter(
|
|
307
|
+
'mcp_tool_errors_total', 'Total tool errors', ['tool']
|
|
308
|
+
)
|
|
309
|
+
self._tool_duration = prometheus_client.Histogram(
|
|
310
|
+
'mcp_tool_duration_seconds', 'Tool execution duration in seconds', ['tool']
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
# Start metrics server if port is specified
|
|
314
|
+
if PROMETHEUS_PORT > 0:
|
|
315
|
+
prometheus_client.start_http_server(PROMETHEUS_PORT)
|
|
316
|
+
logger.info(f'Started Prometheus metrics server on port {PROMETHEUS_PORT}')
|
|
317
|
+
|
|
318
|
+
# Keep a small in-memory buffer for recent errors
|
|
319
|
+
self._recent_errors = []
|
|
320
|
+
self._max_errors = 100
|
|
321
|
+
|
|
322
|
+
logger.info('Created Prometheus metrics provider')
|
|
323
|
+
|
|
324
|
+
def record_api_call(
|
|
325
|
+
self,
|
|
326
|
+
path: str,
|
|
327
|
+
method: str,
|
|
328
|
+
status_code: int,
|
|
329
|
+
duration_ms: float,
|
|
330
|
+
error: Optional[str] = None,
|
|
331
|
+
) -> None:
|
|
332
|
+
"""Record metrics for an API call."""
|
|
333
|
+
# Update Prometheus metrics
|
|
334
|
+
status = 'error' if status_code >= 400 or error else 'success'
|
|
335
|
+
self._api_requests.labels(method=method, path=path, status=status).inc()
|
|
336
|
+
self._api_duration.labels(method=method, path=path).observe(duration_ms / 1000.0)
|
|
337
|
+
|
|
338
|
+
if error or status_code >= 400:
|
|
339
|
+
self._api_errors.labels(method=method, path=path).inc()
|
|
340
|
+
|
|
341
|
+
# Add to recent errors
|
|
342
|
+
self._recent_errors.append(
|
|
343
|
+
{
|
|
344
|
+
'path': path,
|
|
345
|
+
'method': method,
|
|
346
|
+
'status_code': status_code,
|
|
347
|
+
'error': error,
|
|
348
|
+
'duration_ms': duration_ms,
|
|
349
|
+
'timestamp': time.time(),
|
|
350
|
+
}
|
|
351
|
+
)
|
|
352
|
+
if len(self._recent_errors) > self._max_errors:
|
|
353
|
+
self._recent_errors.pop(0)
|
|
354
|
+
|
|
355
|
+
# Log the API call
|
|
356
|
+
if error or status_code >= 400:
|
|
357
|
+
logger.warning(
|
|
358
|
+
f'API call {method.upper()} {path} failed with status {status_code}: {error or "No error details"} ({duration_ms:.2f}ms)'
|
|
359
|
+
)
|
|
360
|
+
else:
|
|
361
|
+
logger.debug(
|
|
362
|
+
f'API call {method.upper()} {path} succeeded with status {status_code} ({duration_ms:.2f}ms)'
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
def record_tool_usage(
|
|
366
|
+
self, tool_name: str, duration_ms: float, success: bool, error: Optional[str] = None
|
|
367
|
+
) -> None:
|
|
368
|
+
"""Record metrics for tool usage."""
|
|
369
|
+
# Update Prometheus metrics
|
|
370
|
+
status = 'success' if success else 'error'
|
|
371
|
+
self._tool_calls.labels(tool=tool_name, status=status).inc()
|
|
372
|
+
self._tool_duration.labels(tool=tool_name).observe(duration_ms / 1000.0)
|
|
373
|
+
|
|
374
|
+
if not success:
|
|
375
|
+
self._tool_errors.labels(tool=tool_name).inc()
|
|
376
|
+
|
|
377
|
+
# Log the tool usage
|
|
378
|
+
if not success:
|
|
379
|
+
logger.warning(
|
|
380
|
+
f'Tool {tool_name} failed: {error or "No error details"} ({duration_ms:.2f}ms)'
|
|
381
|
+
)
|
|
382
|
+
else:
|
|
383
|
+
logger.debug(f'Tool {tool_name} succeeded ({duration_ms:.2f}ms)')
|
|
384
|
+
|
|
385
|
+
def get_api_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
386
|
+
"""Get statistics for API calls.
|
|
387
|
+
|
|
388
|
+
Note: This is a limited implementation since Prometheus doesn't provide
|
|
389
|
+
a way to query metrics directly. We return an empty dict.
|
|
390
|
+
"""
|
|
391
|
+
logger.warning('API stats not available with Prometheus metrics provider')
|
|
392
|
+
return {}
|
|
393
|
+
|
|
394
|
+
def get_tool_stats(self) -> Dict[str, Dict[str, Any]]:
|
|
395
|
+
"""Get statistics for tool usage.
|
|
396
|
+
|
|
397
|
+
Note: This is a limited implementation since Prometheus doesn't provide
|
|
398
|
+
a way to query metrics directly. We return a default dict with empty values
|
|
399
|
+
to prevent errors in consumers.
|
|
400
|
+
"""
|
|
401
|
+
# Instead of just returning an empty dict and logging a warning,
|
|
402
|
+
# return a defaultdict that will provide empty values for any key
|
|
403
|
+
from collections import defaultdict
|
|
404
|
+
|
|
405
|
+
# Create a nested defaultdict that returns default values for any key
|
|
406
|
+
def nested_dict():
|
|
407
|
+
return {'count': 0, 'errors': 0, 'error_rate': 0.0, 'avg_duration_ms': 0.0}
|
|
408
|
+
|
|
409
|
+
result = defaultdict(nested_dict)
|
|
410
|
+
|
|
411
|
+
# Log at debug level instead of warning to avoid filling logs
|
|
412
|
+
logger.debug('Detailed tool stats not available with Prometheus metrics provider')
|
|
413
|
+
return result
|
|
414
|
+
|
|
415
|
+
def get_recent_errors(self, limit: int = 10) -> List[Dict[str, Any]]:
|
|
416
|
+
"""Get recent API call errors."""
|
|
417
|
+
return self._recent_errors[-limit:] if self._recent_errors else []
|
|
418
|
+
|
|
419
|
+
def get_summary(self) -> Dict[str, Any]:
|
|
420
|
+
"""Get a summary of all metrics.
|
|
421
|
+
|
|
422
|
+
Note: This is a limited implementation since Prometheus doesn't provide
|
|
423
|
+
a way to query metrics directly. We return a minimal summary.
|
|
424
|
+
"""
|
|
425
|
+
return {
|
|
426
|
+
'api_calls': {
|
|
427
|
+
'total': 'Available in Prometheus',
|
|
428
|
+
'errors': 'Available in Prometheus',
|
|
429
|
+
'paths': 'Available in Prometheus',
|
|
430
|
+
},
|
|
431
|
+
'tool_usage': {
|
|
432
|
+
'total': 'Available in Prometheus',
|
|
433
|
+
'errors': 'Available in Prometheus',
|
|
434
|
+
'tools': 'Available in Prometheus',
|
|
435
|
+
},
|
|
436
|
+
'prometheus_enabled': True,
|
|
437
|
+
'prometheus_port': PROMETHEUS_PORT,
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
# Create the appropriate metrics provider based on configuration
|
|
442
|
+
def create_metrics_provider() -> MetricsProvider:
|
|
443
|
+
"""Create a metrics provider based on configuration."""
|
|
444
|
+
if USE_PROMETHEUS and PROMETHEUS_AVAILABLE:
|
|
445
|
+
try:
|
|
446
|
+
return PrometheusMetricsProvider()
|
|
447
|
+
except Exception as e:
|
|
448
|
+
logger.error(f'Failed to create Prometheus metrics provider: {e}')
|
|
449
|
+
logger.info('Falling back to in-memory metrics provider')
|
|
450
|
+
|
|
451
|
+
# Default to in-memory provider
|
|
452
|
+
return InMemoryMetricsProvider()
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
# Global metrics provider instance
|
|
456
|
+
metrics = create_metrics_provider()
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
def api_call_timer(func):
|
|
460
|
+
"""Time API calls and record metrics."""
|
|
461
|
+
|
|
462
|
+
async def wrapper(*args, **kwargs):
|
|
463
|
+
start_time = time.time()
|
|
464
|
+
path = kwargs.get('path', 'unknown')
|
|
465
|
+
method = kwargs.get('method', 'unknown')
|
|
466
|
+
|
|
467
|
+
try:
|
|
468
|
+
response = await func(*args, **kwargs)
|
|
469
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
470
|
+
metrics.record_api_call(
|
|
471
|
+
path=path, method=method, status_code=response.status_code, duration_ms=duration_ms
|
|
472
|
+
)
|
|
473
|
+
return response
|
|
474
|
+
except Exception as e:
|
|
475
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
476
|
+
metrics.record_api_call(
|
|
477
|
+
path=path, method=method, status_code=500, duration_ms=duration_ms, error=str(e)
|
|
478
|
+
)
|
|
479
|
+
raise
|
|
480
|
+
|
|
481
|
+
return wrapper
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
def tool_usage_timer(func):
|
|
485
|
+
"""Time tool usage and record metrics."""
|
|
486
|
+
|
|
487
|
+
async def wrapper(*args, **kwargs):
|
|
488
|
+
start_time = time.time()
|
|
489
|
+
tool_name = getattr(func, '__name__', 'unknown')
|
|
490
|
+
|
|
491
|
+
try:
|
|
492
|
+
result = await func(*args, **kwargs)
|
|
493
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
494
|
+
metrics.record_tool_usage(tool_name=tool_name, duration_ms=duration_ms, success=True)
|
|
495
|
+
return result
|
|
496
|
+
except Exception as e:
|
|
497
|
+
duration_ms = (time.time() - start_time) * 1000
|
|
498
|
+
metrics.record_tool_usage(
|
|
499
|
+
tool_name=tool_name, duration_ms=duration_ms, success=False, error=str(e)
|
|
500
|
+
)
|
|
501
|
+
raise
|
|
502
|
+
|
|
503
|
+
return wrapper
|