foundry-mcp 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -0
- foundry_mcp/cli/__init__.py +80 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +633 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +652 -0
- foundry_mcp/cli/commands/session.py +479 -0
- foundry_mcp/cli/commands/specs.py +856 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +259 -0
- foundry_mcp/cli/flags.py +266 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +850 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1636 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/feature_flags.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/journal.py +694 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1350 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +123 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +317 -0
- foundry_mcp/core/prometheus.py +577 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +546 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +511 -0
- foundry_mcp/core/prompts/plan_review.py +623 -0
- foundry_mcp/core/providers/__init__.py +225 -0
- foundry_mcp/core/providers/base.py +476 -0
- foundry_mcp/core/providers/claude.py +460 -0
- foundry_mcp/core/providers/codex.py +619 -0
- foundry_mcp/core/providers/cursor_agent.py +642 -0
- foundry_mcp/core/providers/detectors.py +488 -0
- foundry_mcp/core/providers/gemini.py +405 -0
- foundry_mcp/core/providers/opencode.py +616 -0
- foundry_mcp/core/providers/opencode_wrapper.js +302 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +729 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +934 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +1650 -0
- foundry_mcp/core/task.py +1289 -0
- foundry_mcp/core/testing.py +450 -0
- foundry_mcp/core/validation.py +2081 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +234 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +289 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +174 -0
- foundry_mcp/dashboard/views/overview.py +160 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/sdd-spec-schema.json +386 -0
- foundry_mcp/server.py +164 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +71 -0
- foundry_mcp/tools/unified/authoring.py +1487 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +198 -0
- foundry_mcp/tools/unified/environment.py +939 -0
- foundry_mcp/tools/unified/error.py +462 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +632 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +745 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +629 -0
- foundry_mcp/tools/unified/review.py +685 -0
- foundry_mcp/tools/unified/review_helpers.py +299 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +580 -0
- foundry_mcp/tools/unified/spec.py +808 -0
- foundry_mcp/tools/unified/task.py +2202 -0
- foundry_mcp/tools/unified/test.py +370 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.3.3.dist-info/METADATA +337 -0
- foundry_mcp-0.3.3.dist-info/RECORD +135 -0
- foundry_mcp-0.3.3.dist-info/WHEEL +4 -0
- foundry_mcp-0.3.3.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.3.3.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,808 @@
|
|
|
1
|
+
"""Unified spec tooling with action routing.
|
|
2
|
+
|
|
3
|
+
This router consolidates the high-volume spec-* tool family behind a single
|
|
4
|
+
`spec(action=...)` surface.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
import time
|
|
11
|
+
from dataclasses import asdict
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
from mcp.server.fastmcp import FastMCP
|
|
16
|
+
|
|
17
|
+
from foundry_mcp.config import ServerConfig
|
|
18
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
19
|
+
from foundry_mcp.core.observability import audit_log, get_metrics, mcp_tool
|
|
20
|
+
from foundry_mcp.core.pagination import (
|
|
21
|
+
CursorError,
|
|
22
|
+
decode_cursor,
|
|
23
|
+
encode_cursor,
|
|
24
|
+
normalize_page_size,
|
|
25
|
+
)
|
|
26
|
+
from foundry_mcp.core.responses import (
|
|
27
|
+
ErrorCode,
|
|
28
|
+
ErrorType,
|
|
29
|
+
error_response,
|
|
30
|
+
success_response,
|
|
31
|
+
)
|
|
32
|
+
from foundry_mcp.core.spec import (
|
|
33
|
+
find_spec_file,
|
|
34
|
+
find_specs_directory,
|
|
35
|
+
list_specs,
|
|
36
|
+
load_spec,
|
|
37
|
+
)
|
|
38
|
+
from foundry_mcp.core.validation import (
|
|
39
|
+
apply_fixes,
|
|
40
|
+
calculate_stats,
|
|
41
|
+
get_fix_actions,
|
|
42
|
+
validate_spec,
|
|
43
|
+
)
|
|
44
|
+
from foundry_mcp.tools.unified.router import (
|
|
45
|
+
ActionDefinition,
|
|
46
|
+
ActionRouter,
|
|
47
|
+
ActionRouterError,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
logger = logging.getLogger(__name__)
|
|
51
|
+
_metrics = get_metrics()
|
|
52
|
+
|
|
53
|
+
_DEFAULT_PAGE_SIZE = 100
|
|
54
|
+
_MAX_PAGE_SIZE = 1000
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _resolve_specs_dir(
|
|
58
|
+
config: ServerConfig, workspace: Optional[str]
|
|
59
|
+
) -> Optional[Path]:
|
|
60
|
+
if workspace:
|
|
61
|
+
return find_specs_directory(workspace)
|
|
62
|
+
return config.specs_dir or find_specs_directory()
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _handle_find(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
66
|
+
spec_id = payload.get("spec_id")
|
|
67
|
+
workspace = payload.get("workspace")
|
|
68
|
+
|
|
69
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
70
|
+
return asdict(
|
|
71
|
+
error_response(
|
|
72
|
+
"spec_id is required",
|
|
73
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
74
|
+
error_type=ErrorType.VALIDATION,
|
|
75
|
+
remediation="Provide a spec_id parameter",
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
80
|
+
if not specs_dir:
|
|
81
|
+
return asdict(
|
|
82
|
+
error_response(
|
|
83
|
+
"No specs directory found",
|
|
84
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
85
|
+
error_type=ErrorType.NOT_FOUND,
|
|
86
|
+
remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
|
|
87
|
+
details={"workspace": workspace},
|
|
88
|
+
)
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
spec_file = find_spec_file(spec_id, specs_dir)
|
|
92
|
+
if spec_file:
|
|
93
|
+
return asdict(
|
|
94
|
+
success_response(
|
|
95
|
+
found=True,
|
|
96
|
+
spec_id=spec_id,
|
|
97
|
+
path=str(spec_file),
|
|
98
|
+
status_folder=spec_file.parent.name,
|
|
99
|
+
)
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
return asdict(success_response(found=False, spec_id=spec_id))
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _handle_list(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
106
|
+
status = payload.get("status", "all")
|
|
107
|
+
include_progress = payload.get("include_progress", True)
|
|
108
|
+
cursor = payload.get("cursor")
|
|
109
|
+
limit = payload.get("limit")
|
|
110
|
+
workspace = payload.get("workspace")
|
|
111
|
+
|
|
112
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
113
|
+
if not specs_dir:
|
|
114
|
+
return asdict(
|
|
115
|
+
error_response(
|
|
116
|
+
"No specs directory found",
|
|
117
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
118
|
+
error_type=ErrorType.NOT_FOUND,
|
|
119
|
+
remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
|
|
120
|
+
details={"workspace": workspace},
|
|
121
|
+
)
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
page_size = normalize_page_size(
|
|
125
|
+
limit, default=_DEFAULT_PAGE_SIZE, maximum=_MAX_PAGE_SIZE
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
start_after_id = None
|
|
129
|
+
if cursor:
|
|
130
|
+
try:
|
|
131
|
+
cursor_data = decode_cursor(cursor)
|
|
132
|
+
start_after_id = cursor_data.get("last_id")
|
|
133
|
+
except CursorError as exc:
|
|
134
|
+
return asdict(
|
|
135
|
+
error_response(
|
|
136
|
+
f"Invalid pagination cursor: {exc}",
|
|
137
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
138
|
+
error_type=ErrorType.VALIDATION,
|
|
139
|
+
remediation="Use the cursor value returned by the previous spec(action=list) call.",
|
|
140
|
+
)
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
filter_status = None if status == "all" else status
|
|
144
|
+
all_specs = list_specs(specs_dir=specs_dir, status=filter_status)
|
|
145
|
+
all_specs.sort(key=lambda entry: entry.get("spec_id", ""))
|
|
146
|
+
|
|
147
|
+
if not include_progress:
|
|
148
|
+
for entry in all_specs:
|
|
149
|
+
entry.pop("total_tasks", None)
|
|
150
|
+
entry.pop("completed_tasks", None)
|
|
151
|
+
entry.pop("progress_percentage", None)
|
|
152
|
+
|
|
153
|
+
if start_after_id:
|
|
154
|
+
start_index = 0
|
|
155
|
+
for idx, entry in enumerate(all_specs):
|
|
156
|
+
if entry.get("spec_id") == start_after_id:
|
|
157
|
+
start_index = idx + 1
|
|
158
|
+
break
|
|
159
|
+
all_specs = all_specs[start_index:]
|
|
160
|
+
|
|
161
|
+
page_specs = all_specs[: page_size + 1]
|
|
162
|
+
has_more = len(page_specs) > page_size
|
|
163
|
+
if has_more:
|
|
164
|
+
page_specs = page_specs[:page_size]
|
|
165
|
+
|
|
166
|
+
next_cursor = None
|
|
167
|
+
if has_more and page_specs:
|
|
168
|
+
next_cursor = encode_cursor({"last_id": page_specs[-1].get("spec_id")})
|
|
169
|
+
|
|
170
|
+
return asdict(
|
|
171
|
+
success_response(
|
|
172
|
+
specs=page_specs,
|
|
173
|
+
count=len(page_specs),
|
|
174
|
+
pagination={
|
|
175
|
+
"cursor": next_cursor,
|
|
176
|
+
"has_more": has_more,
|
|
177
|
+
"page_size": page_size,
|
|
178
|
+
},
|
|
179
|
+
)
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _handle_validate(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
184
|
+
spec_id = payload.get("spec_id")
|
|
185
|
+
workspace = payload.get("workspace")
|
|
186
|
+
|
|
187
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
188
|
+
return asdict(
|
|
189
|
+
error_response(
|
|
190
|
+
"spec_id is required",
|
|
191
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
192
|
+
error_type=ErrorType.VALIDATION,
|
|
193
|
+
)
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
197
|
+
if not specs_dir:
|
|
198
|
+
return asdict(
|
|
199
|
+
error_response(
|
|
200
|
+
"No specs directory found",
|
|
201
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
202
|
+
error_type=ErrorType.NOT_FOUND,
|
|
203
|
+
remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
|
|
204
|
+
details={"workspace": workspace},
|
|
205
|
+
)
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
209
|
+
if not spec_data:
|
|
210
|
+
return asdict(
|
|
211
|
+
error_response(
|
|
212
|
+
f"Spec not found: {spec_id}",
|
|
213
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
214
|
+
error_type=ErrorType.NOT_FOUND,
|
|
215
|
+
remediation='Verify the spec ID exists using spec(action="list").',
|
|
216
|
+
details={"spec_id": spec_id},
|
|
217
|
+
)
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
result = validate_spec(spec_data)
|
|
221
|
+
diagnostics = [
|
|
222
|
+
{
|
|
223
|
+
"code": diag.code,
|
|
224
|
+
"message": diag.message,
|
|
225
|
+
"severity": diag.severity,
|
|
226
|
+
"category": diag.category,
|
|
227
|
+
"location": diag.location,
|
|
228
|
+
"suggested_fix": diag.suggested_fix,
|
|
229
|
+
"auto_fixable": diag.auto_fixable,
|
|
230
|
+
}
|
|
231
|
+
for diag in result.diagnostics
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
return asdict(
|
|
235
|
+
success_response(
|
|
236
|
+
spec_id=result.spec_id,
|
|
237
|
+
is_valid=result.is_valid,
|
|
238
|
+
error_count=result.error_count,
|
|
239
|
+
warning_count=result.warning_count,
|
|
240
|
+
info_count=result.info_count,
|
|
241
|
+
diagnostics=diagnostics,
|
|
242
|
+
)
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _handle_fix(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
247
|
+
spec_id = payload.get("spec_id")
|
|
248
|
+
|
|
249
|
+
dry_run_value = payload.get("dry_run", False)
|
|
250
|
+
if dry_run_value is not None and not isinstance(dry_run_value, bool):
|
|
251
|
+
return asdict(
|
|
252
|
+
error_response(
|
|
253
|
+
"dry_run must be a boolean",
|
|
254
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
255
|
+
error_type=ErrorType.VALIDATION,
|
|
256
|
+
remediation="Provide dry_run=true|false",
|
|
257
|
+
details={"field": "dry_run"},
|
|
258
|
+
)
|
|
259
|
+
)
|
|
260
|
+
dry_run = dry_run_value if isinstance(dry_run_value, bool) else False
|
|
261
|
+
|
|
262
|
+
create_backup_value = payload.get("create_backup", True)
|
|
263
|
+
if create_backup_value is not None and not isinstance(create_backup_value, bool):
|
|
264
|
+
return asdict(
|
|
265
|
+
error_response(
|
|
266
|
+
"create_backup must be a boolean",
|
|
267
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
268
|
+
error_type=ErrorType.VALIDATION,
|
|
269
|
+
remediation="Provide create_backup=true|false",
|
|
270
|
+
details={"field": "create_backup"},
|
|
271
|
+
)
|
|
272
|
+
)
|
|
273
|
+
create_backup = (
|
|
274
|
+
create_backup_value if isinstance(create_backup_value, bool) else True
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
workspace = payload.get("workspace")
|
|
278
|
+
|
|
279
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
280
|
+
return asdict(
|
|
281
|
+
error_response(
|
|
282
|
+
"spec_id is required",
|
|
283
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
284
|
+
error_type=ErrorType.VALIDATION,
|
|
285
|
+
)
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
289
|
+
if not specs_dir:
|
|
290
|
+
return asdict(
|
|
291
|
+
error_response(
|
|
292
|
+
"No specs directory found",
|
|
293
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
294
|
+
error_type=ErrorType.NOT_FOUND,
|
|
295
|
+
remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
|
|
296
|
+
details={"workspace": workspace},
|
|
297
|
+
)
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
301
|
+
if not spec_path:
|
|
302
|
+
return asdict(
|
|
303
|
+
error_response(
|
|
304
|
+
f"Spec not found: {spec_id}",
|
|
305
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
306
|
+
error_type=ErrorType.NOT_FOUND,
|
|
307
|
+
remediation='Verify the spec ID exists using spec(action="list").',
|
|
308
|
+
details={"spec_id": spec_id},
|
|
309
|
+
)
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
313
|
+
if not spec_data:
|
|
314
|
+
return asdict(
|
|
315
|
+
error_response(
|
|
316
|
+
f"Failed to load spec: {spec_id}",
|
|
317
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
318
|
+
error_type=ErrorType.INTERNAL,
|
|
319
|
+
remediation="Check spec JSON validity and retry.",
|
|
320
|
+
details={"spec_id": spec_id},
|
|
321
|
+
)
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
validation_result = validate_spec(spec_data)
|
|
325
|
+
actions = get_fix_actions(validation_result, spec_data)
|
|
326
|
+
|
|
327
|
+
if not actions:
|
|
328
|
+
return asdict(
|
|
329
|
+
success_response(
|
|
330
|
+
spec_id=spec_id,
|
|
331
|
+
applied_count=0,
|
|
332
|
+
skipped_count=0,
|
|
333
|
+
message="No auto-fixable issues found",
|
|
334
|
+
)
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
report = apply_fixes(
|
|
338
|
+
actions, str(spec_path), dry_run=dry_run, create_backup=create_backup
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
applied_actions = [
|
|
342
|
+
{
|
|
343
|
+
"id": action.id,
|
|
344
|
+
"description": action.description,
|
|
345
|
+
"category": action.category,
|
|
346
|
+
}
|
|
347
|
+
for action in report.applied_actions
|
|
348
|
+
]
|
|
349
|
+
skipped_actions = [
|
|
350
|
+
{
|
|
351
|
+
"id": action.id,
|
|
352
|
+
"description": action.description,
|
|
353
|
+
"category": action.category,
|
|
354
|
+
}
|
|
355
|
+
for action in report.skipped_actions
|
|
356
|
+
]
|
|
357
|
+
|
|
358
|
+
return asdict(
|
|
359
|
+
success_response(
|
|
360
|
+
spec_id=spec_id,
|
|
361
|
+
dry_run=dry_run,
|
|
362
|
+
applied_count=len(report.applied_actions),
|
|
363
|
+
skipped_count=len(report.skipped_actions),
|
|
364
|
+
applied_actions=applied_actions,
|
|
365
|
+
skipped_actions=skipped_actions,
|
|
366
|
+
backup_path=report.backup_path,
|
|
367
|
+
)
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
def _handle_stats(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
372
|
+
spec_id = payload.get("spec_id")
|
|
373
|
+
workspace = payload.get("workspace")
|
|
374
|
+
|
|
375
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
376
|
+
return asdict(
|
|
377
|
+
error_response(
|
|
378
|
+
"spec_id is required",
|
|
379
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
380
|
+
error_type=ErrorType.VALIDATION,
|
|
381
|
+
)
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
385
|
+
if not specs_dir:
|
|
386
|
+
return asdict(
|
|
387
|
+
error_response(
|
|
388
|
+
"No specs directory found",
|
|
389
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
390
|
+
error_type=ErrorType.NOT_FOUND,
|
|
391
|
+
remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
|
|
392
|
+
details={"workspace": workspace},
|
|
393
|
+
)
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
397
|
+
if not spec_path:
|
|
398
|
+
return asdict(
|
|
399
|
+
error_response(
|
|
400
|
+
f"Spec not found: {spec_id}",
|
|
401
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
402
|
+
error_type=ErrorType.NOT_FOUND,
|
|
403
|
+
remediation='Verify the spec ID exists using spec(action="list").',
|
|
404
|
+
details={"spec_id": spec_id},
|
|
405
|
+
)
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
409
|
+
if not spec_data:
|
|
410
|
+
return asdict(
|
|
411
|
+
error_response(
|
|
412
|
+
f"Failed to load spec: {spec_id}",
|
|
413
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
414
|
+
error_type=ErrorType.INTERNAL,
|
|
415
|
+
remediation="Check spec JSON validity and retry.",
|
|
416
|
+
details={"spec_id": spec_id},
|
|
417
|
+
)
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
stats = calculate_stats(spec_data, str(spec_path))
|
|
421
|
+
return asdict(
|
|
422
|
+
success_response(
|
|
423
|
+
spec_id=stats.spec_id,
|
|
424
|
+
title=stats.title,
|
|
425
|
+
version=stats.version,
|
|
426
|
+
status=stats.status,
|
|
427
|
+
totals=stats.totals,
|
|
428
|
+
status_counts=stats.status_counts,
|
|
429
|
+
max_depth=stats.max_depth,
|
|
430
|
+
avg_tasks_per_phase=stats.avg_tasks_per_phase,
|
|
431
|
+
verification_coverage=stats.verification_coverage,
|
|
432
|
+
progress=stats.progress,
|
|
433
|
+
file_size_kb=stats.file_size_kb,
|
|
434
|
+
)
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
def _handle_validate_fix(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
439
|
+
spec_id = payload.get("spec_id")
|
|
440
|
+
|
|
441
|
+
auto_fix_value = payload.get("auto_fix", True)
|
|
442
|
+
if auto_fix_value is not None and not isinstance(auto_fix_value, bool):
|
|
443
|
+
return asdict(
|
|
444
|
+
error_response(
|
|
445
|
+
"auto_fix must be a boolean",
|
|
446
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
447
|
+
error_type=ErrorType.VALIDATION,
|
|
448
|
+
remediation="Provide auto_fix=true|false",
|
|
449
|
+
details={"field": "auto_fix"},
|
|
450
|
+
)
|
|
451
|
+
)
|
|
452
|
+
auto_fix = auto_fix_value if isinstance(auto_fix_value, bool) else True
|
|
453
|
+
|
|
454
|
+
workspace = payload.get("workspace")
|
|
455
|
+
|
|
456
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
457
|
+
return asdict(
|
|
458
|
+
error_response(
|
|
459
|
+
"spec_id is required",
|
|
460
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
461
|
+
error_type=ErrorType.VALIDATION,
|
|
462
|
+
)
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
466
|
+
if not specs_dir:
|
|
467
|
+
return asdict(
|
|
468
|
+
error_response(
|
|
469
|
+
"No specs directory found",
|
|
470
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
471
|
+
error_type=ErrorType.NOT_FOUND,
|
|
472
|
+
remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
|
|
473
|
+
details={"workspace": workspace},
|
|
474
|
+
)
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
478
|
+
if not spec_path:
|
|
479
|
+
return asdict(
|
|
480
|
+
error_response(
|
|
481
|
+
f"Spec not found: {spec_id}",
|
|
482
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
483
|
+
error_type=ErrorType.NOT_FOUND,
|
|
484
|
+
remediation='Verify the spec ID exists using spec(action="list").',
|
|
485
|
+
details={"spec_id": spec_id},
|
|
486
|
+
)
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
490
|
+
if not spec_data:
|
|
491
|
+
return asdict(
|
|
492
|
+
error_response(
|
|
493
|
+
f"Failed to load spec: {spec_id}",
|
|
494
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
495
|
+
error_type=ErrorType.INTERNAL,
|
|
496
|
+
remediation="Check spec JSON validity and retry.",
|
|
497
|
+
details={"spec_id": spec_id},
|
|
498
|
+
)
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
result = validate_spec(spec_data)
|
|
502
|
+
response_data: Dict[str, Any] = {
|
|
503
|
+
"spec_id": result.spec_id,
|
|
504
|
+
"is_valid": result.is_valid,
|
|
505
|
+
"error_count": result.error_count,
|
|
506
|
+
"warning_count": result.warning_count,
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
if auto_fix and not result.is_valid:
|
|
510
|
+
actions = get_fix_actions(result, spec_data)
|
|
511
|
+
if actions:
|
|
512
|
+
report = apply_fixes(
|
|
513
|
+
actions, str(spec_path), dry_run=False, create_backup=True
|
|
514
|
+
)
|
|
515
|
+
response_data["fixes_applied"] = len(report.applied_actions)
|
|
516
|
+
response_data["backup_path"] = report.backup_path
|
|
517
|
+
|
|
518
|
+
post_spec = load_spec(spec_id, specs_dir)
|
|
519
|
+
if post_spec:
|
|
520
|
+
post_result = validate_spec(post_spec)
|
|
521
|
+
response_data["post_fix_is_valid"] = post_result.is_valid
|
|
522
|
+
response_data["post_fix_error_count"] = post_result.error_count
|
|
523
|
+
else:
|
|
524
|
+
response_data["fixes_applied"] = 0
|
|
525
|
+
response_data["message"] = "No auto-fixable issues found"
|
|
526
|
+
else:
|
|
527
|
+
response_data["fixes_applied"] = 0
|
|
528
|
+
|
|
529
|
+
response_data["diagnostics"] = [
|
|
530
|
+
{
|
|
531
|
+
"code": diag.code,
|
|
532
|
+
"message": diag.message,
|
|
533
|
+
"severity": diag.severity,
|
|
534
|
+
"category": diag.category,
|
|
535
|
+
"location": diag.location,
|
|
536
|
+
"auto_fixable": diag.auto_fixable,
|
|
537
|
+
}
|
|
538
|
+
for diag in result.diagnostics
|
|
539
|
+
]
|
|
540
|
+
|
|
541
|
+
return asdict(success_response(**response_data))
|
|
542
|
+
|
|
543
|
+
|
|
544
|
+
def _handle_analyze(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
545
|
+
tool_name = "spec_analyze"
|
|
546
|
+
start_time = time.perf_counter()
|
|
547
|
+
|
|
548
|
+
directory = payload.get("directory")
|
|
549
|
+
path = payload.get("path")
|
|
550
|
+
ws_path = Path(directory or path or ".").resolve()
|
|
551
|
+
|
|
552
|
+
audit_log(
|
|
553
|
+
"tool_invocation",
|
|
554
|
+
tool="spec-analyze",
|
|
555
|
+
action="analyze_specs",
|
|
556
|
+
directory=str(ws_path),
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
specs_dir = find_specs_directory(str(ws_path))
|
|
560
|
+
has_specs = specs_dir is not None
|
|
561
|
+
|
|
562
|
+
analysis_data: Dict[str, Any] = {
|
|
563
|
+
"directory": str(ws_path),
|
|
564
|
+
"has_specs": has_specs,
|
|
565
|
+
"specs_dir": str(specs_dir) if specs_dir else None,
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
if has_specs and specs_dir:
|
|
569
|
+
folder_counts: Dict[str, int] = {}
|
|
570
|
+
for folder in ["active", "pending", "completed", "archived"]:
|
|
571
|
+
folder_path = specs_dir / folder
|
|
572
|
+
if folder_path.exists():
|
|
573
|
+
folder_counts[folder] = len(list(folder_path.glob("*.json")))
|
|
574
|
+
else:
|
|
575
|
+
folder_counts[folder] = 0
|
|
576
|
+
|
|
577
|
+
analysis_data["spec_counts"] = folder_counts
|
|
578
|
+
analysis_data["total_specs"] = sum(folder_counts.values())
|
|
579
|
+
|
|
580
|
+
docs_dir = specs_dir / ".human-readable"
|
|
581
|
+
analysis_data["documentation_available"] = docs_dir.exists() and any(
|
|
582
|
+
docs_dir.glob("*.md")
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
codebase_json = ws_path / "docs" / "codebase.json"
|
|
586
|
+
analysis_data["codebase_docs_available"] = codebase_json.exists()
|
|
587
|
+
|
|
588
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
589
|
+
_metrics.counter(f"analysis.{tool_name}", labels={"status": "success"})
|
|
590
|
+
_metrics.timer(f"analysis.{tool_name}.duration_ms", duration_ms)
|
|
591
|
+
|
|
592
|
+
return asdict(
|
|
593
|
+
success_response(
|
|
594
|
+
**analysis_data,
|
|
595
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
596
|
+
)
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
|
|
600
|
+
def _handle_analyze_deps(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
601
|
+
tool_name = "spec_analyze_deps"
|
|
602
|
+
start_time = time.perf_counter()
|
|
603
|
+
|
|
604
|
+
spec_id = payload.get("spec_id")
|
|
605
|
+
threshold = payload.get("bottleneck_threshold")
|
|
606
|
+
path = payload.get("path")
|
|
607
|
+
|
|
608
|
+
if not isinstance(spec_id, str) or not spec_id:
|
|
609
|
+
return asdict(
|
|
610
|
+
error_response(
|
|
611
|
+
"spec_id is required",
|
|
612
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
613
|
+
error_type=ErrorType.VALIDATION,
|
|
614
|
+
remediation="Provide a spec_id parameter (e.g., my-feature-spec)",
|
|
615
|
+
)
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
bottleneck_threshold = int(threshold) if isinstance(threshold, int) else 3
|
|
619
|
+
|
|
620
|
+
ws_path = Path(path) if isinstance(path, str) and path else Path.cwd()
|
|
621
|
+
|
|
622
|
+
audit_log(
|
|
623
|
+
"tool_invocation",
|
|
624
|
+
tool="spec-analyze-deps",
|
|
625
|
+
action="analyze_dependencies",
|
|
626
|
+
spec_id=spec_id,
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
specs_dir = find_specs_directory(str(ws_path))
|
|
630
|
+
if not specs_dir:
|
|
631
|
+
return asdict(
|
|
632
|
+
error_response(
|
|
633
|
+
f"Specs directory not found in {ws_path}",
|
|
634
|
+
data={"spec_id": spec_id, "workspace": str(ws_path)},
|
|
635
|
+
)
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
spec_file = find_spec_file(spec_id, specs_dir)
|
|
639
|
+
if not spec_file:
|
|
640
|
+
return asdict(
|
|
641
|
+
error_response(
|
|
642
|
+
f"Spec '{spec_id}' not found",
|
|
643
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
644
|
+
error_type=ErrorType.NOT_FOUND,
|
|
645
|
+
data={"spec_id": spec_id, "specs_dir": str(specs_dir)},
|
|
646
|
+
remediation="Ensure the spec exists in specs/active or specs/pending",
|
|
647
|
+
)
|
|
648
|
+
)
|
|
649
|
+
|
|
650
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
651
|
+
if not spec_data:
|
|
652
|
+
return asdict(
|
|
653
|
+
error_response(
|
|
654
|
+
f"Failed to load spec '{spec_id}'",
|
|
655
|
+
data={"spec_id": spec_id, "spec_file": str(spec_file)},
|
|
656
|
+
)
|
|
657
|
+
)
|
|
658
|
+
|
|
659
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
660
|
+
|
|
661
|
+
dependency_count = 0
|
|
662
|
+
blocks_count: Dict[str, int] = {}
|
|
663
|
+
bottlenecks: List[Dict[str, Any]] = []
|
|
664
|
+
|
|
665
|
+
for node in hierarchy.values():
|
|
666
|
+
deps = node.get("dependencies", {})
|
|
667
|
+
blocked_by = deps.get("blocked_by", [])
|
|
668
|
+
dependency_count += len(blocked_by)
|
|
669
|
+
for blocker_id in blocked_by:
|
|
670
|
+
blocks_count[blocker_id] = blocks_count.get(blocker_id, 0) + 1
|
|
671
|
+
|
|
672
|
+
for task_id, count in blocks_count.items():
|
|
673
|
+
if count >= bottleneck_threshold:
|
|
674
|
+
task = hierarchy.get(task_id, {})
|
|
675
|
+
bottlenecks.append(
|
|
676
|
+
{
|
|
677
|
+
"task_id": task_id,
|
|
678
|
+
"title": task.get("title", ""),
|
|
679
|
+
"status": task.get("status", ""),
|
|
680
|
+
"blocks_count": count,
|
|
681
|
+
}
|
|
682
|
+
)
|
|
683
|
+
|
|
684
|
+
bottlenecks.sort(key=lambda item: item["blocks_count"], reverse=True)
|
|
685
|
+
|
|
686
|
+
visited: set[str] = set()
|
|
687
|
+
rec_stack: set[str] = set()
|
|
688
|
+
circular_deps: List[str] = []
|
|
689
|
+
|
|
690
|
+
def detect_cycle(node_id: str, path: List[str]) -> bool:
|
|
691
|
+
visited.add(node_id)
|
|
692
|
+
rec_stack.add(node_id)
|
|
693
|
+
|
|
694
|
+
node = hierarchy.get(node_id, {})
|
|
695
|
+
for child_id in node.get("children", []):
|
|
696
|
+
if child_id not in visited:
|
|
697
|
+
if detect_cycle(child_id, path + [child_id]):
|
|
698
|
+
return True
|
|
699
|
+
elif child_id in rec_stack:
|
|
700
|
+
circular_deps.append(" -> ".join(path + [child_id]))
|
|
701
|
+
return True
|
|
702
|
+
|
|
703
|
+
rec_stack.remove(node_id)
|
|
704
|
+
return False
|
|
705
|
+
|
|
706
|
+
if "spec-root" in hierarchy:
|
|
707
|
+
detect_cycle("spec-root", ["spec-root"])
|
|
708
|
+
|
|
709
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
710
|
+
_metrics.counter(f"analysis.{tool_name}", labels={"status": "success"})
|
|
711
|
+
_metrics.timer(f"analysis.{tool_name}.duration_ms", duration_ms)
|
|
712
|
+
|
|
713
|
+
return asdict(
|
|
714
|
+
success_response(
|
|
715
|
+
spec_id=spec_id,
|
|
716
|
+
dependency_count=dependency_count,
|
|
717
|
+
bottlenecks=bottlenecks,
|
|
718
|
+
bottleneck_threshold=bottleneck_threshold,
|
|
719
|
+
circular_deps=circular_deps,
|
|
720
|
+
has_cycles=len(circular_deps) > 0,
|
|
721
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
722
|
+
)
|
|
723
|
+
)
|
|
724
|
+
|
|
725
|
+
|
|
726
|
+
_ACTIONS = [
|
|
727
|
+
ActionDefinition(name="find", handler=_handle_find, summary="Find a spec by ID"),
|
|
728
|
+
ActionDefinition(name="list", handler=_handle_list, summary="List specs"),
|
|
729
|
+
ActionDefinition(
|
|
730
|
+
name="validate", handler=_handle_validate, summary="Validate a spec"
|
|
731
|
+
),
|
|
732
|
+
ActionDefinition(name="fix", handler=_handle_fix, summary="Auto-fix a spec"),
|
|
733
|
+
ActionDefinition(name="stats", handler=_handle_stats, summary="Get spec stats"),
|
|
734
|
+
ActionDefinition(
|
|
735
|
+
name="validate-fix",
|
|
736
|
+
handler=_handle_validate_fix,
|
|
737
|
+
summary="Validate and optionally auto-fix",
|
|
738
|
+
),
|
|
739
|
+
ActionDefinition(
|
|
740
|
+
name="analyze", handler=_handle_analyze, summary="Analyze spec directory"
|
|
741
|
+
),
|
|
742
|
+
ActionDefinition(
|
|
743
|
+
name="analyze-deps",
|
|
744
|
+
handler=_handle_analyze_deps,
|
|
745
|
+
summary="Analyze spec dependency graph",
|
|
746
|
+
),
|
|
747
|
+
]
|
|
748
|
+
|
|
749
|
+
_SPEC_ROUTER = ActionRouter(tool_name="spec", actions=_ACTIONS)
|
|
750
|
+
|
|
751
|
+
|
|
752
|
+
def _dispatch_spec_action(
|
|
753
|
+
*, action: str, payload: Dict[str, Any], config: ServerConfig
|
|
754
|
+
) -> dict:
|
|
755
|
+
try:
|
|
756
|
+
return _SPEC_ROUTER.dispatch(action=action, payload=payload, config=config)
|
|
757
|
+
except ActionRouterError as exc:
|
|
758
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
759
|
+
return asdict(
|
|
760
|
+
error_response(
|
|
761
|
+
f"Unsupported spec action '{action}'. Allowed actions: {allowed}",
|
|
762
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
763
|
+
error_type=ErrorType.VALIDATION,
|
|
764
|
+
remediation=f"Use one of: {allowed}",
|
|
765
|
+
)
|
|
766
|
+
)
|
|
767
|
+
|
|
768
|
+
|
|
769
|
+
def register_unified_spec_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
770
|
+
"""Register the consolidated spec tool."""
|
|
771
|
+
|
|
772
|
+
@canonical_tool(mcp, canonical_name="spec")
|
|
773
|
+
@mcp_tool(tool_name="spec", emit_metrics=True, audit=True)
|
|
774
|
+
def spec(
|
|
775
|
+
action: str,
|
|
776
|
+
spec_id: Optional[str] = None,
|
|
777
|
+
workspace: Optional[str] = None,
|
|
778
|
+
status: str = "all",
|
|
779
|
+
include_progress: bool = True,
|
|
780
|
+
cursor: Optional[str] = None,
|
|
781
|
+
limit: Optional[int] = None,
|
|
782
|
+
dry_run: bool = False,
|
|
783
|
+
create_backup: bool = True,
|
|
784
|
+
auto_fix: bool = True,
|
|
785
|
+
directory: Optional[str] = None,
|
|
786
|
+
path: Optional[str] = None,
|
|
787
|
+
bottleneck_threshold: Optional[int] = None,
|
|
788
|
+
) -> dict:
|
|
789
|
+
payload = {
|
|
790
|
+
"spec_id": spec_id,
|
|
791
|
+
"workspace": workspace,
|
|
792
|
+
"status": status,
|
|
793
|
+
"include_progress": include_progress,
|
|
794
|
+
"cursor": cursor,
|
|
795
|
+
"limit": limit,
|
|
796
|
+
"dry_run": dry_run,
|
|
797
|
+
"create_backup": create_backup,
|
|
798
|
+
"auto_fix": auto_fix,
|
|
799
|
+
"directory": directory,
|
|
800
|
+
"path": path,
|
|
801
|
+
"bottleneck_threshold": bottleneck_threshold,
|
|
802
|
+
}
|
|
803
|
+
return _dispatch_spec_action(action=action, payload=payload, config=config)
|
|
804
|
+
|
|
805
|
+
|
|
806
|
+
__all__ = [
|
|
807
|
+
"register_unified_spec_tool",
|
|
808
|
+
]
|