foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,777 @@
|
|
|
1
|
+
"""Unified metrics tool with action routing and validation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from dataclasses import asdict
|
|
7
|
+
from typing import Any, Dict, Mapping, Optional, Tuple, TypedDict
|
|
8
|
+
|
|
9
|
+
from mcp.server.fastmcp import FastMCP
|
|
10
|
+
|
|
11
|
+
from foundry_mcp.config import ServerConfig
|
|
12
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
13
|
+
from foundry_mcp.core.pagination import (
|
|
14
|
+
CursorError,
|
|
15
|
+
decode_cursor,
|
|
16
|
+
encode_cursor,
|
|
17
|
+
normalize_page_size,
|
|
18
|
+
paginated_response,
|
|
19
|
+
)
|
|
20
|
+
from foundry_mcp.core.responses import (
|
|
21
|
+
ErrorCode,
|
|
22
|
+
ErrorType,
|
|
23
|
+
error_response,
|
|
24
|
+
success_response,
|
|
25
|
+
)
|
|
26
|
+
from foundry_mcp.tools.unified.router import (
|
|
27
|
+
ActionDefinition,
|
|
28
|
+
ActionRouter,
|
|
29
|
+
ActionRouterError,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class MetricsQueryPayload(TypedDict, total=False):
|
|
36
|
+
"""Typed definition for query inputs."""
|
|
37
|
+
|
|
38
|
+
metric_name: str
|
|
39
|
+
labels: Mapping[str, str]
|
|
40
|
+
label_selectors: Mapping[str, str]
|
|
41
|
+
since: str
|
|
42
|
+
until: str
|
|
43
|
+
limit: int
|
|
44
|
+
cursor: str
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class MetricsListPayload(TypedDict, total=False):
|
|
48
|
+
"""Typed definition for list inputs."""
|
|
49
|
+
|
|
50
|
+
limit: int
|
|
51
|
+
cursor: str
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class MetricsSummaryPayload(TypedDict, total=False):
|
|
55
|
+
"""Typed definition for summary inputs."""
|
|
56
|
+
|
|
57
|
+
metric_name: str
|
|
58
|
+
labels: Mapping[str, str]
|
|
59
|
+
since: str
|
|
60
|
+
until: str
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class MetricsCleanupPayload(TypedDict, total=False):
|
|
64
|
+
"""Typed definition for cleanup inputs."""
|
|
65
|
+
|
|
66
|
+
retention_days: int
|
|
67
|
+
max_records: int
|
|
68
|
+
dry_run: bool
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
_ACTION_SUMMARY = {
|
|
72
|
+
"query": "Query persisted metrics with optional filters",
|
|
73
|
+
"list": "List persisted metric series with pagination",
|
|
74
|
+
"summary": "Return aggregate statistics for a metric",
|
|
75
|
+
"stats": "Surface global metrics persistence statistics",
|
|
76
|
+
"cleanup": "Apply retention policy or preview cleanup",
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _metrics_disabled_response() -> dict:
|
|
81
|
+
return asdict(
|
|
82
|
+
error_response(
|
|
83
|
+
"Metrics persistence is disabled",
|
|
84
|
+
error_code=ErrorCode.UNAVAILABLE,
|
|
85
|
+
error_type=ErrorType.UNAVAILABLE,
|
|
86
|
+
remediation="Enable metrics_persistence.enabled in server configuration",
|
|
87
|
+
details={"config_key": "metrics_persistence.enabled"},
|
|
88
|
+
)
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _invalid_cursor_response(exc: CursorError) -> dict:
|
|
93
|
+
return asdict(
|
|
94
|
+
error_response(
|
|
95
|
+
f"Invalid cursor: {exc}",
|
|
96
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
97
|
+
error_type=ErrorType.VALIDATION,
|
|
98
|
+
remediation="Use the cursor value returned by the previous response",
|
|
99
|
+
)
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _validation_error(
|
|
104
|
+
field: str,
|
|
105
|
+
action: str,
|
|
106
|
+
message: str,
|
|
107
|
+
*,
|
|
108
|
+
code: ErrorCode = ErrorCode.INVALID_FORMAT,
|
|
109
|
+
remediation: Optional[str] = None,
|
|
110
|
+
) -> dict:
|
|
111
|
+
return asdict(
|
|
112
|
+
error_response(
|
|
113
|
+
f"Invalid field '{field}' for metrics.{action}: {message}",
|
|
114
|
+
error_code=code,
|
|
115
|
+
error_type=ErrorType.VALIDATION,
|
|
116
|
+
remediation=remediation,
|
|
117
|
+
details={"field": field, "action": f"metrics.{action}"},
|
|
118
|
+
)
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _resolve_metrics_store(config: ServerConfig) -> Tuple[Any | None, Optional[dict]]:
|
|
123
|
+
persistence = getattr(config, "metrics_persistence", None)
|
|
124
|
+
if not persistence or not persistence.enabled:
|
|
125
|
+
return None, _metrics_disabled_response()
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
from foundry_mcp.core.metrics_store import get_metrics_store
|
|
129
|
+
|
|
130
|
+
store = get_metrics_store(persistence.get_storage_path())
|
|
131
|
+
except Exception as exc: # pragma: no cover - defensive import/runtime guard
|
|
132
|
+
logger.exception("Failed to initialize metrics store")
|
|
133
|
+
return None, asdict(
|
|
134
|
+
error_response(
|
|
135
|
+
f"Failed to initialize metrics store: {exc}",
|
|
136
|
+
error_code=ErrorCode.UNAVAILABLE,
|
|
137
|
+
error_type=ErrorType.UNAVAILABLE,
|
|
138
|
+
remediation="Verify metrics persistence configuration",
|
|
139
|
+
details={"storage_path": str(persistence.get_storage_path())},
|
|
140
|
+
)
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
return store, None
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def _normalize_labels(
|
|
147
|
+
*,
|
|
148
|
+
action: str,
|
|
149
|
+
labels: Any = None,
|
|
150
|
+
label_selectors: Any = None,
|
|
151
|
+
) -> Tuple[Optional[Dict[str, str]], Optional[dict]]:
|
|
152
|
+
source = labels if labels is not None else label_selectors
|
|
153
|
+
if source is None:
|
|
154
|
+
return None, None
|
|
155
|
+
if not isinstance(source, Mapping):
|
|
156
|
+
return None, _validation_error(
|
|
157
|
+
"labels",
|
|
158
|
+
action,
|
|
159
|
+
"Expected an object with label key/value pairs",
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
normalized: Dict[str, str] = {}
|
|
163
|
+
for key, value in source.items():
|
|
164
|
+
if not isinstance(key, str) or not key.strip():
|
|
165
|
+
return None, _validation_error(
|
|
166
|
+
"labels",
|
|
167
|
+
action,
|
|
168
|
+
"Label names must be non-empty strings",
|
|
169
|
+
)
|
|
170
|
+
if not isinstance(value, str):
|
|
171
|
+
return None, _validation_error(
|
|
172
|
+
"labels",
|
|
173
|
+
action,
|
|
174
|
+
"Label values must be strings",
|
|
175
|
+
)
|
|
176
|
+
normalized[key] = value
|
|
177
|
+
|
|
178
|
+
return normalized or None, None
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def _validate_optional_str(
|
|
182
|
+
value: Any,
|
|
183
|
+
*,
|
|
184
|
+
field: str,
|
|
185
|
+
action: str,
|
|
186
|
+
allow_empty: bool = False,
|
|
187
|
+
) -> Tuple[Optional[str], Optional[dict]]:
|
|
188
|
+
if value is None:
|
|
189
|
+
return None, None
|
|
190
|
+
if isinstance(value, str) and (allow_empty or value.strip()):
|
|
191
|
+
return value, None
|
|
192
|
+
return None, _validation_error(field, action, "Expected a non-empty string")
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _validate_required_str(
|
|
196
|
+
value: Any, *, field: str, action: str
|
|
197
|
+
) -> Tuple[str, Optional[dict]]:
|
|
198
|
+
normalized, error = _validate_optional_str(value, field=field, action=action)
|
|
199
|
+
if error:
|
|
200
|
+
return "", error
|
|
201
|
+
if normalized is None:
|
|
202
|
+
return "", _validation_error(
|
|
203
|
+
field,
|
|
204
|
+
action,
|
|
205
|
+
"Value is required",
|
|
206
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
207
|
+
remediation=f"Provide '{field}' for metrics.{action}",
|
|
208
|
+
)
|
|
209
|
+
return normalized, None
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _validate_optional_int(
|
|
213
|
+
value: Any,
|
|
214
|
+
*,
|
|
215
|
+
field: str,
|
|
216
|
+
action: str,
|
|
217
|
+
minimum: int = 1,
|
|
218
|
+
) -> Tuple[Optional[int], Optional[dict]]:
|
|
219
|
+
if value is None:
|
|
220
|
+
return None, None
|
|
221
|
+
if isinstance(value, bool):
|
|
222
|
+
return None, _validation_error(field, action, "Boolean values are not allowed")
|
|
223
|
+
if not isinstance(value, int):
|
|
224
|
+
return None, _validation_error(field, action, "Expected an integer")
|
|
225
|
+
if value < minimum:
|
|
226
|
+
return None, _validation_error(
|
|
227
|
+
field,
|
|
228
|
+
action,
|
|
229
|
+
f"Value must be >= {minimum}",
|
|
230
|
+
remediation=f"Provide a {field} that is at least {minimum}",
|
|
231
|
+
)
|
|
232
|
+
return value, None
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def _validate_optional_bool(
|
|
236
|
+
value: Any, *, field: str, action: str
|
|
237
|
+
) -> Tuple[Optional[bool], Optional[dict]]:
|
|
238
|
+
if value is None:
|
|
239
|
+
return None, None
|
|
240
|
+
if isinstance(value, bool):
|
|
241
|
+
return value, None
|
|
242
|
+
return None, _validation_error(field, action, "Expected a boolean value")
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def _validate_optional_cursor(
|
|
246
|
+
value: Any, *, action: str
|
|
247
|
+
) -> Tuple[Optional[str], Optional[dict]]:
|
|
248
|
+
if value is None:
|
|
249
|
+
return None, None
|
|
250
|
+
if isinstance(value, str) and value.strip():
|
|
251
|
+
return value, None
|
|
252
|
+
return None, _validation_error(
|
|
253
|
+
"cursor", action, "Cursor must be a non-empty string"
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def _validate_query_payload(
|
|
258
|
+
payload: Mapping[str, Any],
|
|
259
|
+
) -> Tuple[Dict[str, Any], Optional[dict]]:
|
|
260
|
+
action = "query"
|
|
261
|
+
metric_name, error = _validate_optional_str(
|
|
262
|
+
payload.get("metric_name"), field="metric_name", action=action
|
|
263
|
+
)
|
|
264
|
+
if error:
|
|
265
|
+
return {}, error
|
|
266
|
+
|
|
267
|
+
labels, error = _normalize_labels(
|
|
268
|
+
action=action,
|
|
269
|
+
labels=payload.get("labels"),
|
|
270
|
+
label_selectors=payload.get("label_selectors"),
|
|
271
|
+
)
|
|
272
|
+
if error:
|
|
273
|
+
return {}, error
|
|
274
|
+
|
|
275
|
+
since, error = _validate_optional_str(
|
|
276
|
+
payload.get("since"), field="since", action=action
|
|
277
|
+
)
|
|
278
|
+
if error:
|
|
279
|
+
return {}, error
|
|
280
|
+
until, error = _validate_optional_str(
|
|
281
|
+
payload.get("until"), field="until", action=action
|
|
282
|
+
)
|
|
283
|
+
if error:
|
|
284
|
+
return {}, error
|
|
285
|
+
|
|
286
|
+
limit, error = _validate_optional_int(
|
|
287
|
+
payload.get("limit"), field="limit", action=action
|
|
288
|
+
)
|
|
289
|
+
if error:
|
|
290
|
+
return {}, error
|
|
291
|
+
|
|
292
|
+
cursor, error = _validate_optional_cursor(payload.get("cursor"), action=action)
|
|
293
|
+
if error:
|
|
294
|
+
return {}, error
|
|
295
|
+
|
|
296
|
+
return {
|
|
297
|
+
"metric_name": metric_name,
|
|
298
|
+
"labels": labels,
|
|
299
|
+
"since": since,
|
|
300
|
+
"until": until,
|
|
301
|
+
"limit": limit,
|
|
302
|
+
"cursor": cursor,
|
|
303
|
+
}, None
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
def _validate_list_payload(
|
|
307
|
+
payload: Mapping[str, Any],
|
|
308
|
+
) -> Tuple[Dict[str, Any], Optional[dict]]:
|
|
309
|
+
action = "list"
|
|
310
|
+
limit, error = _validate_optional_int(
|
|
311
|
+
payload.get("limit"), field="limit", action=action
|
|
312
|
+
)
|
|
313
|
+
if error:
|
|
314
|
+
return {}, error
|
|
315
|
+
cursor, error = _validate_optional_cursor(payload.get("cursor"), action=action)
|
|
316
|
+
if error:
|
|
317
|
+
return {}, error
|
|
318
|
+
return {"limit": limit, "cursor": cursor}, None
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def _validate_summary_payload(
|
|
322
|
+
payload: Mapping[str, Any],
|
|
323
|
+
) -> Tuple[Dict[str, Any], Optional[dict]]:
|
|
324
|
+
action = "summary"
|
|
325
|
+
metric_name, error = _validate_required_str(
|
|
326
|
+
payload.get("metric_name"), field="metric_name", action=action
|
|
327
|
+
)
|
|
328
|
+
if error:
|
|
329
|
+
return {}, error
|
|
330
|
+
|
|
331
|
+
labels, error = _normalize_labels(
|
|
332
|
+
action=action,
|
|
333
|
+
labels=payload.get("labels"),
|
|
334
|
+
label_selectors=payload.get("label_selectors"),
|
|
335
|
+
)
|
|
336
|
+
if error:
|
|
337
|
+
return {}, error
|
|
338
|
+
|
|
339
|
+
since, error = _validate_optional_str(
|
|
340
|
+
payload.get("since"), field="since", action=action
|
|
341
|
+
)
|
|
342
|
+
if error:
|
|
343
|
+
return {}, error
|
|
344
|
+
until, error = _validate_optional_str(
|
|
345
|
+
payload.get("until"), field="until", action=action
|
|
346
|
+
)
|
|
347
|
+
if error:
|
|
348
|
+
return {}, error
|
|
349
|
+
|
|
350
|
+
return {
|
|
351
|
+
"metric_name": metric_name,
|
|
352
|
+
"labels": labels,
|
|
353
|
+
"since": since,
|
|
354
|
+
"until": until,
|
|
355
|
+
}, None
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
def _validate_cleanup_payload(
|
|
359
|
+
payload: Mapping[str, Any],
|
|
360
|
+
) -> Tuple[Dict[str, Any], Optional[dict]]:
|
|
361
|
+
action = "cleanup"
|
|
362
|
+
retention_days, error = _validate_optional_int(
|
|
363
|
+
payload.get("retention_days"), field="retention_days", action=action
|
|
364
|
+
)
|
|
365
|
+
if error:
|
|
366
|
+
return {}, error
|
|
367
|
+
max_records, error = _validate_optional_int(
|
|
368
|
+
payload.get("max_records"), field="max_records", action=action
|
|
369
|
+
)
|
|
370
|
+
if error:
|
|
371
|
+
return {}, error
|
|
372
|
+
dry_run, error = _validate_optional_bool(
|
|
373
|
+
payload.get("dry_run"), field="dry_run", action=action
|
|
374
|
+
)
|
|
375
|
+
if error:
|
|
376
|
+
return {}, error
|
|
377
|
+
return {
|
|
378
|
+
"retention_days": retention_days,
|
|
379
|
+
"max_records": max_records,
|
|
380
|
+
"dry_run": dry_run if dry_run is not None else False,
|
|
381
|
+
}, None
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
def perform_metrics_query(
|
|
385
|
+
*,
|
|
386
|
+
config: ServerConfig,
|
|
387
|
+
metric_name: Optional[str],
|
|
388
|
+
labels: Optional[Mapping[str, str]],
|
|
389
|
+
since: Optional[str],
|
|
390
|
+
until: Optional[str],
|
|
391
|
+
limit: Optional[int],
|
|
392
|
+
cursor: Optional[str],
|
|
393
|
+
) -> dict:
|
|
394
|
+
store, error = _resolve_metrics_store(config)
|
|
395
|
+
if error:
|
|
396
|
+
return error
|
|
397
|
+
assert store is not None
|
|
398
|
+
|
|
399
|
+
page_size = normalize_page_size(limit)
|
|
400
|
+
offset = 0
|
|
401
|
+
if cursor:
|
|
402
|
+
try:
|
|
403
|
+
cursor_data = decode_cursor(cursor)
|
|
404
|
+
offset_value = cursor_data.get("offset", 0)
|
|
405
|
+
offset = (
|
|
406
|
+
int(offset_value)
|
|
407
|
+
if isinstance(offset_value, int)
|
|
408
|
+
else int(offset_value or 0)
|
|
409
|
+
)
|
|
410
|
+
except CursorError as exc:
|
|
411
|
+
return _invalid_cursor_response(exc)
|
|
412
|
+
except (TypeError, ValueError):
|
|
413
|
+
return _invalid_cursor_response(
|
|
414
|
+
CursorError("Cursor offset must be an integer", cursor=cursor)
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
try:
|
|
418
|
+
records = store.query(
|
|
419
|
+
metric_name=metric_name,
|
|
420
|
+
labels=labels,
|
|
421
|
+
since=since,
|
|
422
|
+
until=until,
|
|
423
|
+
limit=page_size + 1,
|
|
424
|
+
offset=offset,
|
|
425
|
+
)
|
|
426
|
+
except Exception as exc: # pragma: no cover - backend failure guard
|
|
427
|
+
logger.exception("Error querying metrics")
|
|
428
|
+
return asdict(
|
|
429
|
+
error_response(
|
|
430
|
+
f"Failed to query metrics: {exc}",
|
|
431
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
432
|
+
error_type=ErrorType.INTERNAL,
|
|
433
|
+
)
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
has_more = len(records) > page_size
|
|
437
|
+
visible_records = records[:page_size] if has_more else records
|
|
438
|
+
next_cursor = encode_cursor({"offset": offset + page_size}) if has_more else None
|
|
439
|
+
metrics_dicts = [record.to_dict() for record in visible_records]
|
|
440
|
+
|
|
441
|
+
data = {
|
|
442
|
+
"metrics": metrics_dicts,
|
|
443
|
+
"count": len(metrics_dicts),
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
total_count = None
|
|
447
|
+
try:
|
|
448
|
+
total_count = store.count()
|
|
449
|
+
except Exception: # pragma: no cover - defensive guard
|
|
450
|
+
logger.debug("Metrics store count failed; omitting total_count", exc_info=True)
|
|
451
|
+
|
|
452
|
+
return paginated_response(
|
|
453
|
+
data=data,
|
|
454
|
+
cursor=next_cursor,
|
|
455
|
+
has_more=has_more,
|
|
456
|
+
page_size=page_size,
|
|
457
|
+
total_count=total_count,
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
def perform_metrics_list(
|
|
462
|
+
*, config: ServerConfig, limit: Optional[int], cursor: Optional[str]
|
|
463
|
+
) -> dict:
|
|
464
|
+
store, error = _resolve_metrics_store(config)
|
|
465
|
+
if error:
|
|
466
|
+
return error
|
|
467
|
+
assert store is not None
|
|
468
|
+
|
|
469
|
+
page_size = normalize_page_size(limit)
|
|
470
|
+
offset = 0
|
|
471
|
+
if cursor:
|
|
472
|
+
try:
|
|
473
|
+
cursor_data = decode_cursor(cursor)
|
|
474
|
+
offset = int(cursor_data.get("offset", 0))
|
|
475
|
+
except CursorError as exc:
|
|
476
|
+
return _invalid_cursor_response(exc)
|
|
477
|
+
except (TypeError, ValueError):
|
|
478
|
+
return _invalid_cursor_response(
|
|
479
|
+
CursorError("Cursor offset must be an integer", cursor=cursor)
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
try:
|
|
483
|
+
all_metrics = store.list_metrics()
|
|
484
|
+
except Exception as exc: # pragma: no cover - backend failure guard
|
|
485
|
+
logger.exception("Error listing metrics")
|
|
486
|
+
return asdict(
|
|
487
|
+
error_response(
|
|
488
|
+
f"Failed to list metrics: {exc}",
|
|
489
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
490
|
+
error_type=ErrorType.INTERNAL,
|
|
491
|
+
)
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
total_count = len(all_metrics)
|
|
495
|
+
end_idx = offset + page_size
|
|
496
|
+
metrics_page = all_metrics[offset:end_idx]
|
|
497
|
+
has_more = end_idx < total_count
|
|
498
|
+
next_cursor = encode_cursor({"offset": end_idx}) if has_more else None
|
|
499
|
+
|
|
500
|
+
data = {
|
|
501
|
+
"metrics": metrics_page,
|
|
502
|
+
"count": len(metrics_page),
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
return paginated_response(
|
|
506
|
+
data=data,
|
|
507
|
+
cursor=next_cursor,
|
|
508
|
+
has_more=has_more,
|
|
509
|
+
page_size=page_size,
|
|
510
|
+
total_count=total_count,
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
def perform_metrics_summary(
|
|
515
|
+
*,
|
|
516
|
+
config: ServerConfig,
|
|
517
|
+
metric_name: str,
|
|
518
|
+
labels: Optional[Mapping[str, str]],
|
|
519
|
+
since: Optional[str],
|
|
520
|
+
until: Optional[str],
|
|
521
|
+
) -> dict:
|
|
522
|
+
store, error = _resolve_metrics_store(config)
|
|
523
|
+
if error:
|
|
524
|
+
return error
|
|
525
|
+
assert store is not None
|
|
526
|
+
|
|
527
|
+
try:
|
|
528
|
+
summary = store.get_summary(
|
|
529
|
+
metric_name=metric_name,
|
|
530
|
+
labels=labels,
|
|
531
|
+
since=since,
|
|
532
|
+
until=until,
|
|
533
|
+
)
|
|
534
|
+
except Exception as exc: # pragma: no cover - backend failure guard
|
|
535
|
+
logger.exception("Error getting metrics summary")
|
|
536
|
+
return asdict(
|
|
537
|
+
error_response(
|
|
538
|
+
f"Failed to get metrics summary: {exc}",
|
|
539
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
540
|
+
error_type=ErrorType.INTERNAL,
|
|
541
|
+
)
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
return asdict(success_response(data={"summary": summary}))
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
def perform_metrics_stats(*, config: ServerConfig) -> dict:
|
|
548
|
+
store, error = _resolve_metrics_store(config)
|
|
549
|
+
if error:
|
|
550
|
+
return error
|
|
551
|
+
assert store is not None
|
|
552
|
+
|
|
553
|
+
try:
|
|
554
|
+
metrics_list = store.list_metrics()
|
|
555
|
+
total_records = store.count()
|
|
556
|
+
except Exception as exc: # pragma: no cover - backend failure guard
|
|
557
|
+
logger.exception("Error getting metrics stats")
|
|
558
|
+
return asdict(
|
|
559
|
+
error_response(
|
|
560
|
+
f"Failed to get metrics stats: {exc}",
|
|
561
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
562
|
+
error_type=ErrorType.INTERNAL,
|
|
563
|
+
)
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
unique_metrics = len(metrics_list)
|
|
567
|
+
total_samples = sum(metric.get("count", 0) for metric in metrics_list)
|
|
568
|
+
|
|
569
|
+
return asdict(
|
|
570
|
+
success_response(
|
|
571
|
+
data={
|
|
572
|
+
"total_records": total_records,
|
|
573
|
+
"unique_metrics": unique_metrics,
|
|
574
|
+
"total_samples": total_samples,
|
|
575
|
+
"metrics_by_name": {
|
|
576
|
+
metric.get("metric_name"): metric.get("count", 0)
|
|
577
|
+
for metric in metrics_list
|
|
578
|
+
if metric.get("metric_name")
|
|
579
|
+
},
|
|
580
|
+
"storage_path": str(config.metrics_persistence.get_storage_path()),
|
|
581
|
+
"retention_days": config.metrics_persistence.retention_days,
|
|
582
|
+
"max_records": config.metrics_persistence.max_records,
|
|
583
|
+
}
|
|
584
|
+
)
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
|
|
588
|
+
def perform_metrics_cleanup(
|
|
589
|
+
*,
|
|
590
|
+
config: ServerConfig,
|
|
591
|
+
retention_days: Optional[int],
|
|
592
|
+
max_records: Optional[int],
|
|
593
|
+
dry_run: bool,
|
|
594
|
+
) -> dict:
|
|
595
|
+
store, error = _resolve_metrics_store(config)
|
|
596
|
+
if error:
|
|
597
|
+
return error
|
|
598
|
+
assert store is not None
|
|
599
|
+
|
|
600
|
+
effective_retention = (
|
|
601
|
+
retention_days
|
|
602
|
+
if retention_days is not None
|
|
603
|
+
else config.metrics_persistence.retention_days
|
|
604
|
+
)
|
|
605
|
+
effective_max = (
|
|
606
|
+
max_records
|
|
607
|
+
if max_records is not None
|
|
608
|
+
else config.metrics_persistence.max_records
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
try:
|
|
612
|
+
if dry_run:
|
|
613
|
+
current_count = store.count()
|
|
614
|
+
return asdict(
|
|
615
|
+
success_response(
|
|
616
|
+
data={
|
|
617
|
+
"current_count": current_count,
|
|
618
|
+
"retention_days": effective_retention,
|
|
619
|
+
"max_records": effective_max,
|
|
620
|
+
"dry_run": True,
|
|
621
|
+
"message": "Dry run - no records deleted",
|
|
622
|
+
}
|
|
623
|
+
)
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
deleted_count = store.cleanup(
|
|
627
|
+
retention_days=effective_retention,
|
|
628
|
+
max_records=effective_max,
|
|
629
|
+
)
|
|
630
|
+
except Exception as exc: # pragma: no cover - backend failure guard
|
|
631
|
+
logger.exception("Error cleaning up metrics")
|
|
632
|
+
return asdict(
|
|
633
|
+
error_response(
|
|
634
|
+
f"Failed to cleanup metrics: {exc}",
|
|
635
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
636
|
+
error_type=ErrorType.INTERNAL,
|
|
637
|
+
)
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
return asdict(
|
|
641
|
+
success_response(
|
|
642
|
+
data={
|
|
643
|
+
"deleted_count": deleted_count,
|
|
644
|
+
"retention_days": effective_retention,
|
|
645
|
+
"max_records": effective_max,
|
|
646
|
+
"dry_run": False,
|
|
647
|
+
}
|
|
648
|
+
)
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
def _handle_metrics_query(*, config: ServerConfig, **payload: Any) -> dict:
|
|
653
|
+
validated, error = _validate_query_payload(payload)
|
|
654
|
+
if error:
|
|
655
|
+
return error
|
|
656
|
+
return perform_metrics_query(config=config, **validated)
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
def _handle_metrics_list(*, config: ServerConfig, **payload: Any) -> dict:
|
|
660
|
+
validated, error = _validate_list_payload(payload)
|
|
661
|
+
if error:
|
|
662
|
+
return error
|
|
663
|
+
return perform_metrics_list(config=config, **validated)
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
def _handle_metrics_summary(*, config: ServerConfig, **payload: Any) -> dict:
|
|
667
|
+
validated, error = _validate_summary_payload(payload)
|
|
668
|
+
if error:
|
|
669
|
+
return error
|
|
670
|
+
return perform_metrics_summary(config=config, **validated)
|
|
671
|
+
|
|
672
|
+
|
|
673
|
+
def _handle_metrics_stats(*, config: ServerConfig, **_: Any) -> dict:
|
|
674
|
+
return perform_metrics_stats(config=config)
|
|
675
|
+
|
|
676
|
+
|
|
677
|
+
def _handle_metrics_cleanup(*, config: ServerConfig, **payload: Any) -> dict:
|
|
678
|
+
validated, error = _validate_cleanup_payload(payload)
|
|
679
|
+
if error:
|
|
680
|
+
return error
|
|
681
|
+
return perform_metrics_cleanup(config=config, **validated)
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
_METRICS_ROUTER = ActionRouter(
|
|
685
|
+
tool_name="metrics",
|
|
686
|
+
actions=[
|
|
687
|
+
ActionDefinition(
|
|
688
|
+
name="query",
|
|
689
|
+
handler=_handle_metrics_query,
|
|
690
|
+
summary=_ACTION_SUMMARY["query"],
|
|
691
|
+
),
|
|
692
|
+
ActionDefinition(
|
|
693
|
+
name="list",
|
|
694
|
+
handler=_handle_metrics_list,
|
|
695
|
+
summary=_ACTION_SUMMARY["list"],
|
|
696
|
+
),
|
|
697
|
+
ActionDefinition(
|
|
698
|
+
name="summary",
|
|
699
|
+
handler=_handle_metrics_summary,
|
|
700
|
+
summary=_ACTION_SUMMARY["summary"],
|
|
701
|
+
),
|
|
702
|
+
ActionDefinition(
|
|
703
|
+
name="stats",
|
|
704
|
+
handler=_handle_metrics_stats,
|
|
705
|
+
summary=_ACTION_SUMMARY["stats"],
|
|
706
|
+
),
|
|
707
|
+
ActionDefinition(
|
|
708
|
+
name="cleanup",
|
|
709
|
+
handler=_handle_metrics_cleanup,
|
|
710
|
+
summary=_ACTION_SUMMARY["cleanup"],
|
|
711
|
+
),
|
|
712
|
+
],
|
|
713
|
+
)
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def _dispatch_metrics_action(
|
|
717
|
+
*, action: str, payload: Dict[str, Any], config: ServerConfig
|
|
718
|
+
) -> dict:
|
|
719
|
+
try:
|
|
720
|
+
return _METRICS_ROUTER.dispatch(action=action, config=config, **payload)
|
|
721
|
+
except ActionRouterError as exc:
|
|
722
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
723
|
+
return asdict(
|
|
724
|
+
error_response(
|
|
725
|
+
f"Unsupported metrics action '{action}'. Allowed actions: {allowed}",
|
|
726
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
727
|
+
error_type=ErrorType.VALIDATION,
|
|
728
|
+
remediation=f"Use one of: {allowed}",
|
|
729
|
+
)
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
|
|
733
|
+
def register_unified_metrics_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
734
|
+
"""Register the consolidated metrics tool."""
|
|
735
|
+
|
|
736
|
+
@canonical_tool(
|
|
737
|
+
mcp,
|
|
738
|
+
canonical_name="metrics",
|
|
739
|
+
)
|
|
740
|
+
def metrics(
|
|
741
|
+
action: str,
|
|
742
|
+
metric_name: Optional[str] = None,
|
|
743
|
+
labels: Optional[Mapping[str, str]] = None,
|
|
744
|
+
label_selectors: Optional[Mapping[str, str]] = None,
|
|
745
|
+
since: Optional[str] = None,
|
|
746
|
+
until: Optional[str] = None,
|
|
747
|
+
limit: Optional[int] = None,
|
|
748
|
+
cursor: Optional[str] = None,
|
|
749
|
+
retention_days: Optional[int] = None,
|
|
750
|
+
max_records: Optional[int] = None,
|
|
751
|
+
dry_run: bool = False,
|
|
752
|
+
) -> dict:
|
|
753
|
+
payload = {
|
|
754
|
+
"metric_name": metric_name,
|
|
755
|
+
"labels": labels,
|
|
756
|
+
"label_selectors": label_selectors,
|
|
757
|
+
"since": since,
|
|
758
|
+
"until": until,
|
|
759
|
+
"limit": limit,
|
|
760
|
+
"cursor": cursor,
|
|
761
|
+
"retention_days": retention_days,
|
|
762
|
+
"max_records": max_records,
|
|
763
|
+
"dry_run": dry_run,
|
|
764
|
+
}
|
|
765
|
+
return _dispatch_metrics_action(action=action, payload=payload, config=config)
|
|
766
|
+
|
|
767
|
+
logger.debug("Registered unified metrics tool")
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
__all__ = [
|
|
771
|
+
"register_unified_metrics_tool",
|
|
772
|
+
"perform_metrics_query",
|
|
773
|
+
"perform_metrics_list",
|
|
774
|
+
"perform_metrics_summary",
|
|
775
|
+
"perform_metrics_stats",
|
|
776
|
+
"perform_metrics_cleanup",
|
|
777
|
+
]
|