foundry-mcp 0.8.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of foundry-mcp might be problematic. Click here for more details.

Files changed (153) hide show
  1. foundry_mcp/__init__.py +13 -0
  2. foundry_mcp/cli/__init__.py +67 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +640 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +667 -0
  15. foundry_mcp/cli/commands/session.py +472 -0
  16. foundry_mcp/cli/commands/specs.py +686 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +298 -0
  22. foundry_mcp/cli/logging.py +212 -0
  23. foundry_mcp/cli/main.py +44 -0
  24. foundry_mcp/cli/output.py +122 -0
  25. foundry_mcp/cli/registry.py +110 -0
  26. foundry_mcp/cli/resilience.py +178 -0
  27. foundry_mcp/cli/transcript.py +217 -0
  28. foundry_mcp/config.py +1454 -0
  29. foundry_mcp/core/__init__.py +144 -0
  30. foundry_mcp/core/ai_consultation.py +1773 -0
  31. foundry_mcp/core/batch_operations.py +1202 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/health.py +749 -0
  40. foundry_mcp/core/intake.py +933 -0
  41. foundry_mcp/core/journal.py +700 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1376 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +146 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +387 -0
  57. foundry_mcp/core/prometheus.py +564 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +691 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
  61. foundry_mcp/core/prompts/plan_review.py +627 -0
  62. foundry_mcp/core/providers/__init__.py +237 -0
  63. foundry_mcp/core/providers/base.py +515 -0
  64. foundry_mcp/core/providers/claude.py +472 -0
  65. foundry_mcp/core/providers/codex.py +637 -0
  66. foundry_mcp/core/providers/cursor_agent.py +630 -0
  67. foundry_mcp/core/providers/detectors.py +515 -0
  68. foundry_mcp/core/providers/gemini.py +426 -0
  69. foundry_mcp/core/providers/opencode.py +718 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +308 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +857 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/research/__init__.py +68 -0
  78. foundry_mcp/core/research/memory.py +528 -0
  79. foundry_mcp/core/research/models.py +1234 -0
  80. foundry_mcp/core/research/providers/__init__.py +40 -0
  81. foundry_mcp/core/research/providers/base.py +242 -0
  82. foundry_mcp/core/research/providers/google.py +507 -0
  83. foundry_mcp/core/research/providers/perplexity.py +442 -0
  84. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  85. foundry_mcp/core/research/providers/tavily.py +383 -0
  86. foundry_mcp/core/research/workflows/__init__.py +25 -0
  87. foundry_mcp/core/research/workflows/base.py +298 -0
  88. foundry_mcp/core/research/workflows/chat.py +271 -0
  89. foundry_mcp/core/research/workflows/consensus.py +539 -0
  90. foundry_mcp/core/research/workflows/deep_research.py +4142 -0
  91. foundry_mcp/core/research/workflows/ideate.py +682 -0
  92. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  93. foundry_mcp/core/resilience.py +600 -0
  94. foundry_mcp/core/responses.py +1624 -0
  95. foundry_mcp/core/review.py +366 -0
  96. foundry_mcp/core/security.py +438 -0
  97. foundry_mcp/core/spec.py +4119 -0
  98. foundry_mcp/core/task.py +2463 -0
  99. foundry_mcp/core/testing.py +839 -0
  100. foundry_mcp/core/validation.py +2357 -0
  101. foundry_mcp/dashboard/__init__.py +32 -0
  102. foundry_mcp/dashboard/app.py +119 -0
  103. foundry_mcp/dashboard/components/__init__.py +17 -0
  104. foundry_mcp/dashboard/components/cards.py +88 -0
  105. foundry_mcp/dashboard/components/charts.py +177 -0
  106. foundry_mcp/dashboard/components/filters.py +136 -0
  107. foundry_mcp/dashboard/components/tables.py +195 -0
  108. foundry_mcp/dashboard/data/__init__.py +11 -0
  109. foundry_mcp/dashboard/data/stores.py +433 -0
  110. foundry_mcp/dashboard/launcher.py +300 -0
  111. foundry_mcp/dashboard/views/__init__.py +12 -0
  112. foundry_mcp/dashboard/views/errors.py +217 -0
  113. foundry_mcp/dashboard/views/metrics.py +164 -0
  114. foundry_mcp/dashboard/views/overview.py +96 -0
  115. foundry_mcp/dashboard/views/providers.py +83 -0
  116. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  117. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  118. foundry_mcp/prompts/__init__.py +9 -0
  119. foundry_mcp/prompts/workflows.py +525 -0
  120. foundry_mcp/resources/__init__.py +9 -0
  121. foundry_mcp/resources/specs.py +591 -0
  122. foundry_mcp/schemas/__init__.py +38 -0
  123. foundry_mcp/schemas/intake-schema.json +89 -0
  124. foundry_mcp/schemas/sdd-spec-schema.json +414 -0
  125. foundry_mcp/server.py +150 -0
  126. foundry_mcp/tools/__init__.py +10 -0
  127. foundry_mcp/tools/unified/__init__.py +92 -0
  128. foundry_mcp/tools/unified/authoring.py +3620 -0
  129. foundry_mcp/tools/unified/context_helpers.py +98 -0
  130. foundry_mcp/tools/unified/documentation_helpers.py +268 -0
  131. foundry_mcp/tools/unified/environment.py +1341 -0
  132. foundry_mcp/tools/unified/error.py +479 -0
  133. foundry_mcp/tools/unified/health.py +225 -0
  134. foundry_mcp/tools/unified/journal.py +841 -0
  135. foundry_mcp/tools/unified/lifecycle.py +640 -0
  136. foundry_mcp/tools/unified/metrics.py +777 -0
  137. foundry_mcp/tools/unified/plan.py +876 -0
  138. foundry_mcp/tools/unified/pr.py +294 -0
  139. foundry_mcp/tools/unified/provider.py +589 -0
  140. foundry_mcp/tools/unified/research.py +1283 -0
  141. foundry_mcp/tools/unified/review.py +1042 -0
  142. foundry_mcp/tools/unified/review_helpers.py +314 -0
  143. foundry_mcp/tools/unified/router.py +102 -0
  144. foundry_mcp/tools/unified/server.py +565 -0
  145. foundry_mcp/tools/unified/spec.py +1283 -0
  146. foundry_mcp/tools/unified/task.py +3846 -0
  147. foundry_mcp/tools/unified/test.py +431 -0
  148. foundry_mcp/tools/unified/verification.py +520 -0
  149. foundry_mcp-0.8.22.dist-info/METADATA +344 -0
  150. foundry_mcp-0.8.22.dist-info/RECORD +153 -0
  151. foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
  152. foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
  153. foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1283 @@
1
+ """Unified spec tooling with action routing.
2
+
3
+ This router consolidates the high-volume spec-* tool family behind a single
4
+ `spec(action=...)` surface.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import logging
10
+ import time
11
+ from dataclasses import asdict
12
+ from pathlib import Path
13
+ from typing import Any, Dict, List, Optional
14
+
15
+ from mcp.server.fastmcp import FastMCP
16
+
17
+ from foundry_mcp.config import ServerConfig
18
+ from foundry_mcp.core.naming import canonical_tool
19
+ from foundry_mcp.core.observability import audit_log, get_metrics, mcp_tool
20
+ from foundry_mcp.core.pagination import (
21
+ CursorError,
22
+ decode_cursor,
23
+ encode_cursor,
24
+ normalize_page_size,
25
+ )
26
+ from foundry_mcp.core.responses import (
27
+ ErrorCode,
28
+ ErrorType,
29
+ error_response,
30
+ success_response,
31
+ )
32
+ from foundry_mcp.core.spec import (
33
+ TEMPLATES,
34
+ TEMPLATE_DESCRIPTIONS,
35
+ check_spec_completeness,
36
+ detect_duplicate_tasks,
37
+ diff_specs,
38
+ find_spec_file,
39
+ find_specs_directory,
40
+ list_spec_backups,
41
+ list_specs,
42
+ load_spec,
43
+ recalculate_actual_hours,
44
+ recalculate_estimated_hours,
45
+ )
46
+ from foundry_mcp.core.validation import (
47
+ VALID_NODE_TYPES,
48
+ VALID_STATUSES,
49
+ VALID_TASK_CATEGORIES,
50
+ VALID_VERIFICATION_TYPES,
51
+ apply_fixes,
52
+ calculate_stats,
53
+ get_fix_actions,
54
+ validate_spec,
55
+ )
56
+ from foundry_mcp.core.journal import (
57
+ VALID_BLOCKER_TYPES,
58
+ VALID_ENTRY_TYPES,
59
+ )
60
+ from foundry_mcp.core.lifecycle import VALID_FOLDERS
61
+ from foundry_mcp.tools.unified.router import (
62
+ ActionDefinition,
63
+ ActionRouter,
64
+ ActionRouterError,
65
+ )
66
+
67
+ logger = logging.getLogger(__name__)
68
+ _metrics = get_metrics()
69
+
70
+ _DEFAULT_PAGE_SIZE = 100
71
+ _MAX_PAGE_SIZE = 1000
72
+
73
+
74
+ def _resolve_specs_dir(
75
+ config: ServerConfig, workspace: Optional[str]
76
+ ) -> Optional[Path]:
77
+ if workspace:
78
+ return find_specs_directory(workspace)
79
+ return config.specs_dir or find_specs_directory()
80
+
81
+
82
+ def _handle_find(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
83
+ spec_id = payload.get("spec_id")
84
+ workspace = payload.get("workspace")
85
+
86
+ if not isinstance(spec_id, str) or not spec_id.strip():
87
+ return asdict(
88
+ error_response(
89
+ "spec_id is required",
90
+ error_code=ErrorCode.MISSING_REQUIRED,
91
+ error_type=ErrorType.VALIDATION,
92
+ remediation="Provide a spec_id parameter",
93
+ )
94
+ )
95
+
96
+ specs_dir = _resolve_specs_dir(config, workspace)
97
+ if not specs_dir:
98
+ return asdict(
99
+ error_response(
100
+ "No specs directory found",
101
+ error_code=ErrorCode.NOT_FOUND,
102
+ error_type=ErrorType.NOT_FOUND,
103
+ remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
104
+ details={"workspace": workspace},
105
+ )
106
+ )
107
+
108
+ spec_file = find_spec_file(spec_id, specs_dir)
109
+ if spec_file:
110
+ return asdict(
111
+ success_response(
112
+ found=True,
113
+ spec_id=spec_id,
114
+ path=str(spec_file),
115
+ status_folder=spec_file.parent.name,
116
+ )
117
+ )
118
+
119
+ return asdict(success_response(found=False, spec_id=spec_id))
120
+
121
+
122
+ def _handle_get(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
123
+ """Return raw spec JSON content in minified form."""
124
+ import json as _json
125
+
126
+ spec_id = payload.get("spec_id")
127
+ workspace = payload.get("workspace")
128
+
129
+ if not isinstance(spec_id, str) or not spec_id.strip():
130
+ return asdict(
131
+ error_response(
132
+ "spec_id is required",
133
+ error_code=ErrorCode.MISSING_REQUIRED,
134
+ error_type=ErrorType.VALIDATION,
135
+ remediation="Provide a spec_id parameter",
136
+ )
137
+ )
138
+
139
+ specs_dir = _resolve_specs_dir(config, workspace)
140
+ if not specs_dir:
141
+ return asdict(
142
+ error_response(
143
+ "No specs directory found",
144
+ error_code=ErrorCode.NOT_FOUND,
145
+ error_type=ErrorType.NOT_FOUND,
146
+ remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
147
+ details={"workspace": workspace},
148
+ )
149
+ )
150
+
151
+ spec_data = load_spec(spec_id, specs_dir)
152
+ if spec_data is None:
153
+ return asdict(
154
+ error_response(
155
+ f"Spec not found: {spec_id}",
156
+ error_code=ErrorCode.NOT_FOUND,
157
+ error_type=ErrorType.NOT_FOUND,
158
+ remediation=f"Verify the spec_id exists. Use spec(action='list') to see available specs.",
159
+ details={"spec_id": spec_id},
160
+ )
161
+ )
162
+
163
+ # Return minified JSON string to minimize token usage
164
+ minified_spec = _json.dumps(spec_data, separators=(",", ":"))
165
+ return asdict(success_response(spec=minified_spec))
166
+
167
+
168
+ def _handle_list(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
169
+ status = payload.get("status", "all")
170
+ include_progress = payload.get("include_progress", True)
171
+ cursor = payload.get("cursor")
172
+ limit = payload.get("limit")
173
+ workspace = payload.get("workspace")
174
+
175
+ specs_dir = _resolve_specs_dir(config, workspace)
176
+ if not specs_dir:
177
+ return asdict(
178
+ error_response(
179
+ "No specs directory found",
180
+ error_code=ErrorCode.NOT_FOUND,
181
+ error_type=ErrorType.NOT_FOUND,
182
+ remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
183
+ details={"workspace": workspace},
184
+ )
185
+ )
186
+
187
+ page_size = normalize_page_size(
188
+ limit, default=_DEFAULT_PAGE_SIZE, maximum=_MAX_PAGE_SIZE
189
+ )
190
+
191
+ start_after_id = None
192
+ if cursor:
193
+ try:
194
+ cursor_data = decode_cursor(cursor)
195
+ start_after_id = cursor_data.get("last_id")
196
+ except CursorError as exc:
197
+ return asdict(
198
+ error_response(
199
+ f"Invalid pagination cursor: {exc}",
200
+ error_code=ErrorCode.INVALID_FORMAT,
201
+ error_type=ErrorType.VALIDATION,
202
+ remediation="Use the cursor value returned by the previous spec(action=list) call.",
203
+ )
204
+ )
205
+
206
+ filter_status = None if status == "all" else status
207
+ all_specs = list_specs(specs_dir=specs_dir, status=filter_status)
208
+ all_specs.sort(key=lambda entry: entry.get("spec_id", ""))
209
+
210
+ if not include_progress:
211
+ for entry in all_specs:
212
+ entry.pop("total_tasks", None)
213
+ entry.pop("completed_tasks", None)
214
+ entry.pop("progress_percentage", None)
215
+
216
+ if start_after_id:
217
+ start_index = 0
218
+ for idx, entry in enumerate(all_specs):
219
+ if entry.get("spec_id") == start_after_id:
220
+ start_index = idx + 1
221
+ break
222
+ all_specs = all_specs[start_index:]
223
+
224
+ page_specs = all_specs[: page_size + 1]
225
+ has_more = len(page_specs) > page_size
226
+ if has_more:
227
+ page_specs = page_specs[:page_size]
228
+
229
+ next_cursor = None
230
+ if has_more and page_specs:
231
+ next_cursor = encode_cursor({"last_id": page_specs[-1].get("spec_id")})
232
+
233
+ return asdict(
234
+ success_response(
235
+ specs=page_specs,
236
+ count=len(page_specs),
237
+ pagination={
238
+ "cursor": next_cursor,
239
+ "has_more": has_more,
240
+ "page_size": page_size,
241
+ },
242
+ )
243
+ )
244
+
245
+
246
+ def _handle_validate(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
247
+ spec_id = payload.get("spec_id")
248
+ workspace = payload.get("workspace")
249
+
250
+ if not isinstance(spec_id, str) or not spec_id.strip():
251
+ return asdict(
252
+ error_response(
253
+ "spec_id is required",
254
+ error_code=ErrorCode.MISSING_REQUIRED,
255
+ error_type=ErrorType.VALIDATION,
256
+ )
257
+ )
258
+
259
+ specs_dir = _resolve_specs_dir(config, workspace)
260
+ if not specs_dir:
261
+ return asdict(
262
+ error_response(
263
+ "No specs directory found",
264
+ error_code=ErrorCode.NOT_FOUND,
265
+ error_type=ErrorType.NOT_FOUND,
266
+ remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
267
+ details={"workspace": workspace},
268
+ )
269
+ )
270
+
271
+ spec_data = load_spec(spec_id, specs_dir)
272
+ if not spec_data:
273
+ return asdict(
274
+ error_response(
275
+ f"Spec not found: {spec_id}",
276
+ error_code=ErrorCode.SPEC_NOT_FOUND,
277
+ error_type=ErrorType.NOT_FOUND,
278
+ remediation='Verify the spec ID exists using spec(action="list").',
279
+ details={"spec_id": spec_id},
280
+ )
281
+ )
282
+
283
+ result = validate_spec(spec_data)
284
+ diagnostics = [
285
+ {
286
+ "code": diag.code,
287
+ "message": diag.message,
288
+ "severity": diag.severity,
289
+ "category": diag.category,
290
+ "location": diag.location,
291
+ "suggested_fix": diag.suggested_fix,
292
+ "auto_fixable": diag.auto_fixable,
293
+ }
294
+ for diag in result.diagnostics
295
+ ]
296
+
297
+ return asdict(
298
+ success_response(
299
+ spec_id=result.spec_id,
300
+ is_valid=result.is_valid,
301
+ error_count=result.error_count,
302
+ warning_count=result.warning_count,
303
+ info_count=result.info_count,
304
+ diagnostics=diagnostics,
305
+ )
306
+ )
307
+
308
+
309
+ def _handle_fix(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
310
+ spec_id = payload.get("spec_id")
311
+
312
+ dry_run_value = payload.get("dry_run", False)
313
+ if dry_run_value is not None and not isinstance(dry_run_value, bool):
314
+ return asdict(
315
+ error_response(
316
+ "dry_run must be a boolean",
317
+ error_code=ErrorCode.INVALID_FORMAT,
318
+ error_type=ErrorType.VALIDATION,
319
+ remediation="Provide dry_run=true|false",
320
+ details={"field": "dry_run"},
321
+ )
322
+ )
323
+ dry_run = dry_run_value if isinstance(dry_run_value, bool) else False
324
+
325
+ create_backup_value = payload.get("create_backup", True)
326
+ if create_backup_value is not None and not isinstance(create_backup_value, bool):
327
+ return asdict(
328
+ error_response(
329
+ "create_backup must be a boolean",
330
+ error_code=ErrorCode.INVALID_FORMAT,
331
+ error_type=ErrorType.VALIDATION,
332
+ remediation="Provide create_backup=true|false",
333
+ details={"field": "create_backup"},
334
+ )
335
+ )
336
+ create_backup = (
337
+ create_backup_value if isinstance(create_backup_value, bool) else True
338
+ )
339
+
340
+ workspace = payload.get("workspace")
341
+
342
+ if not isinstance(spec_id, str) or not spec_id.strip():
343
+ return asdict(
344
+ error_response(
345
+ "spec_id is required",
346
+ error_code=ErrorCode.MISSING_REQUIRED,
347
+ error_type=ErrorType.VALIDATION,
348
+ )
349
+ )
350
+
351
+ specs_dir = _resolve_specs_dir(config, workspace)
352
+ if not specs_dir:
353
+ return asdict(
354
+ error_response(
355
+ "No specs directory found",
356
+ error_code=ErrorCode.NOT_FOUND,
357
+ error_type=ErrorType.NOT_FOUND,
358
+ remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
359
+ details={"workspace": workspace},
360
+ )
361
+ )
362
+
363
+ spec_path = find_spec_file(spec_id, specs_dir)
364
+ if not spec_path:
365
+ return asdict(
366
+ error_response(
367
+ f"Spec not found: {spec_id}",
368
+ error_code=ErrorCode.SPEC_NOT_FOUND,
369
+ error_type=ErrorType.NOT_FOUND,
370
+ remediation='Verify the spec ID exists using spec(action="list").',
371
+ details={"spec_id": spec_id},
372
+ )
373
+ )
374
+
375
+ spec_data = load_spec(spec_id, specs_dir)
376
+ if not spec_data:
377
+ return asdict(
378
+ error_response(
379
+ f"Failed to load spec: {spec_id}",
380
+ error_code=ErrorCode.INTERNAL_ERROR,
381
+ error_type=ErrorType.INTERNAL,
382
+ remediation="Check spec JSON validity and retry.",
383
+ details={"spec_id": spec_id},
384
+ )
385
+ )
386
+
387
+ validation_result = validate_spec(spec_data)
388
+ actions = get_fix_actions(validation_result, spec_data)
389
+
390
+ if not actions:
391
+ return asdict(
392
+ success_response(
393
+ spec_id=spec_id,
394
+ applied_count=0,
395
+ skipped_count=0,
396
+ message="No auto-fixable issues found",
397
+ )
398
+ )
399
+
400
+ report = apply_fixes(
401
+ actions, str(spec_path), dry_run=dry_run, create_backup=create_backup
402
+ )
403
+
404
+ applied_actions = [
405
+ {
406
+ "id": action.id,
407
+ "description": action.description,
408
+ "category": action.category,
409
+ }
410
+ for action in report.applied_actions
411
+ ]
412
+ skipped_actions = [
413
+ {
414
+ "id": action.id,
415
+ "description": action.description,
416
+ "category": action.category,
417
+ }
418
+ for action in report.skipped_actions
419
+ ]
420
+
421
+ return asdict(
422
+ success_response(
423
+ spec_id=spec_id,
424
+ dry_run=dry_run,
425
+ applied_count=len(report.applied_actions),
426
+ skipped_count=len(report.skipped_actions),
427
+ applied_actions=applied_actions,
428
+ skipped_actions=skipped_actions,
429
+ backup_path=report.backup_path,
430
+ )
431
+ )
432
+
433
+
434
+ def _handle_stats(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
435
+ spec_id = payload.get("spec_id")
436
+ workspace = payload.get("workspace")
437
+
438
+ if not isinstance(spec_id, str) or not spec_id.strip():
439
+ return asdict(
440
+ error_response(
441
+ "spec_id is required",
442
+ error_code=ErrorCode.MISSING_REQUIRED,
443
+ error_type=ErrorType.VALIDATION,
444
+ )
445
+ )
446
+
447
+ specs_dir = _resolve_specs_dir(config, workspace)
448
+ if not specs_dir:
449
+ return asdict(
450
+ error_response(
451
+ "No specs directory found",
452
+ error_code=ErrorCode.NOT_FOUND,
453
+ error_type=ErrorType.NOT_FOUND,
454
+ remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
455
+ details={"workspace": workspace},
456
+ )
457
+ )
458
+
459
+ spec_path = find_spec_file(spec_id, specs_dir)
460
+ if not spec_path:
461
+ return asdict(
462
+ error_response(
463
+ f"Spec not found: {spec_id}",
464
+ error_code=ErrorCode.SPEC_NOT_FOUND,
465
+ error_type=ErrorType.NOT_FOUND,
466
+ remediation='Verify the spec ID exists using spec(action="list").',
467
+ details={"spec_id": spec_id},
468
+ )
469
+ )
470
+
471
+ spec_data = load_spec(spec_id, specs_dir)
472
+ if not spec_data:
473
+ return asdict(
474
+ error_response(
475
+ f"Failed to load spec: {spec_id}",
476
+ error_code=ErrorCode.INTERNAL_ERROR,
477
+ error_type=ErrorType.INTERNAL,
478
+ remediation="Check spec JSON validity and retry.",
479
+ details={"spec_id": spec_id},
480
+ )
481
+ )
482
+
483
+ stats = calculate_stats(spec_data, str(spec_path))
484
+ return asdict(
485
+ success_response(
486
+ spec_id=stats.spec_id,
487
+ title=stats.title,
488
+ version=stats.version,
489
+ status=stats.status,
490
+ totals=stats.totals,
491
+ status_counts=stats.status_counts,
492
+ max_depth=stats.max_depth,
493
+ avg_tasks_per_phase=stats.avg_tasks_per_phase,
494
+ verification_coverage=stats.verification_coverage,
495
+ progress=stats.progress,
496
+ file_size_kb=stats.file_size_kb,
497
+ )
498
+ )
499
+
500
+
501
+ def _handle_validate_fix(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
502
+ spec_id = payload.get("spec_id")
503
+
504
+ auto_fix_value = payload.get("auto_fix", True)
505
+ if auto_fix_value is not None and not isinstance(auto_fix_value, bool):
506
+ return asdict(
507
+ error_response(
508
+ "auto_fix must be a boolean",
509
+ error_code=ErrorCode.INVALID_FORMAT,
510
+ error_type=ErrorType.VALIDATION,
511
+ remediation="Provide auto_fix=true|false",
512
+ details={"field": "auto_fix"},
513
+ )
514
+ )
515
+ auto_fix = auto_fix_value if isinstance(auto_fix_value, bool) else True
516
+
517
+ workspace = payload.get("workspace")
518
+
519
+ if not isinstance(spec_id, str) or not spec_id.strip():
520
+ return asdict(
521
+ error_response(
522
+ "spec_id is required",
523
+ error_code=ErrorCode.MISSING_REQUIRED,
524
+ error_type=ErrorType.VALIDATION,
525
+ )
526
+ )
527
+
528
+ specs_dir = _resolve_specs_dir(config, workspace)
529
+ if not specs_dir:
530
+ return asdict(
531
+ error_response(
532
+ "No specs directory found",
533
+ error_code=ErrorCode.NOT_FOUND,
534
+ error_type=ErrorType.NOT_FOUND,
535
+ remediation="Ensure you're in a project with a specs/ directory or pass workspace.",
536
+ details={"workspace": workspace},
537
+ )
538
+ )
539
+
540
+ spec_path = find_spec_file(spec_id, specs_dir)
541
+ if not spec_path:
542
+ return asdict(
543
+ error_response(
544
+ f"Spec not found: {spec_id}",
545
+ error_code=ErrorCode.SPEC_NOT_FOUND,
546
+ error_type=ErrorType.NOT_FOUND,
547
+ remediation='Verify the spec ID exists using spec(action="list").',
548
+ details={"spec_id": spec_id},
549
+ )
550
+ )
551
+
552
+ spec_data = load_spec(spec_id, specs_dir)
553
+ if not spec_data:
554
+ return asdict(
555
+ error_response(
556
+ f"Failed to load spec: {spec_id}",
557
+ error_code=ErrorCode.INTERNAL_ERROR,
558
+ error_type=ErrorType.INTERNAL,
559
+ remediation="Check spec JSON validity and retry.",
560
+ details={"spec_id": spec_id},
561
+ )
562
+ )
563
+
564
+ result = validate_spec(spec_data)
565
+ response_data: Dict[str, Any] = {
566
+ "spec_id": result.spec_id,
567
+ "is_valid": result.is_valid,
568
+ "error_count": result.error_count,
569
+ "warning_count": result.warning_count,
570
+ }
571
+
572
+ if auto_fix and not result.is_valid:
573
+ actions = get_fix_actions(result, spec_data)
574
+ if actions:
575
+ report = apply_fixes(
576
+ actions, str(spec_path), dry_run=False, create_backup=True
577
+ )
578
+ response_data["fixes_applied"] = len(report.applied_actions)
579
+ response_data["backup_path"] = report.backup_path
580
+
581
+ post_spec = load_spec(spec_id, specs_dir)
582
+ if post_spec:
583
+ post_result = validate_spec(post_spec)
584
+ response_data["post_fix_is_valid"] = post_result.is_valid
585
+ response_data["post_fix_error_count"] = post_result.error_count
586
+ else:
587
+ response_data["fixes_applied"] = 0
588
+ response_data["message"] = "No auto-fixable issues found"
589
+ else:
590
+ response_data["fixes_applied"] = 0
591
+
592
+ response_data["diagnostics"] = [
593
+ {
594
+ "code": diag.code,
595
+ "message": diag.message,
596
+ "severity": diag.severity,
597
+ "category": diag.category,
598
+ "location": diag.location,
599
+ "auto_fixable": diag.auto_fixable,
600
+ }
601
+ for diag in result.diagnostics
602
+ ]
603
+
604
+ return asdict(success_response(**response_data))
605
+
606
+
607
+ def _handle_analyze(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
608
+ tool_name = "spec_analyze"
609
+ start_time = time.perf_counter()
610
+
611
+ directory = payload.get("directory")
612
+ path = payload.get("path")
613
+ ws_path = Path(directory or path or ".").resolve()
614
+
615
+ audit_log(
616
+ "tool_invocation",
617
+ tool="spec-analyze",
618
+ action="analyze_specs",
619
+ directory=str(ws_path),
620
+ )
621
+
622
+ specs_dir = find_specs_directory(str(ws_path))
623
+ has_specs = specs_dir is not None
624
+
625
+ analysis_data: Dict[str, Any] = {
626
+ "directory": str(ws_path),
627
+ "has_specs": has_specs,
628
+ "specs_dir": str(specs_dir) if specs_dir else None,
629
+ }
630
+
631
+ if has_specs and specs_dir:
632
+ folder_counts: Dict[str, int] = {}
633
+ for folder in ["active", "pending", "completed", "archived"]:
634
+ folder_path = specs_dir / folder
635
+ if folder_path.exists():
636
+ folder_counts[folder] = len(list(folder_path.glob("*.json")))
637
+ else:
638
+ folder_counts[folder] = 0
639
+
640
+ analysis_data["spec_counts"] = folder_counts
641
+ analysis_data["total_specs"] = sum(folder_counts.values())
642
+
643
+ docs_dir = specs_dir / ".human-readable"
644
+ analysis_data["documentation_available"] = docs_dir.exists() and any(
645
+ docs_dir.glob("*.md")
646
+ )
647
+
648
+ codebase_json = ws_path / "docs" / "codebase.json"
649
+ analysis_data["codebase_docs_available"] = codebase_json.exists()
650
+
651
+ duration_ms = (time.perf_counter() - start_time) * 1000
652
+ _metrics.counter(f"analysis.{tool_name}", labels={"status": "success"})
653
+ _metrics.timer(f"analysis.{tool_name}.duration_ms", duration_ms)
654
+
655
+ return asdict(
656
+ success_response(
657
+ **analysis_data,
658
+ telemetry={"duration_ms": round(duration_ms, 2)},
659
+ )
660
+ )
661
+
662
+
663
+ def _handle_analyze_deps(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
664
+ tool_name = "spec_analyze_deps"
665
+ start_time = time.perf_counter()
666
+
667
+ spec_id = payload.get("spec_id")
668
+ threshold = payload.get("bottleneck_threshold")
669
+ path = payload.get("path")
670
+
671
+ if not isinstance(spec_id, str) or not spec_id:
672
+ return asdict(
673
+ error_response(
674
+ "spec_id is required",
675
+ error_code=ErrorCode.MISSING_REQUIRED,
676
+ error_type=ErrorType.VALIDATION,
677
+ remediation="Provide a spec_id parameter (e.g., my-feature-spec)",
678
+ )
679
+ )
680
+
681
+ bottleneck_threshold = int(threshold) if isinstance(threshold, int) else 3
682
+
683
+ ws_path = Path(path) if isinstance(path, str) and path else Path.cwd()
684
+
685
+ audit_log(
686
+ "tool_invocation",
687
+ tool="spec-analyze-deps",
688
+ action="analyze_dependencies",
689
+ spec_id=spec_id,
690
+ )
691
+
692
+ specs_dir = find_specs_directory(str(ws_path))
693
+ if not specs_dir:
694
+ return asdict(
695
+ error_response(
696
+ f"Specs directory not found in {ws_path}",
697
+ data={"spec_id": spec_id, "workspace": str(ws_path)},
698
+ )
699
+ )
700
+
701
+ spec_file = find_spec_file(spec_id, specs_dir)
702
+ if not spec_file:
703
+ return asdict(
704
+ error_response(
705
+ f"Spec '{spec_id}' not found",
706
+ error_code=ErrorCode.NOT_FOUND,
707
+ error_type=ErrorType.NOT_FOUND,
708
+ data={"spec_id": spec_id, "specs_dir": str(specs_dir)},
709
+ remediation="Ensure the spec exists in specs/active or specs/pending",
710
+ )
711
+ )
712
+
713
+ spec_data = load_spec(spec_id, specs_dir)
714
+ if not spec_data:
715
+ return asdict(
716
+ error_response(
717
+ f"Failed to load spec '{spec_id}'",
718
+ data={"spec_id": spec_id, "spec_file": str(spec_file)},
719
+ )
720
+ )
721
+
722
+ hierarchy = spec_data.get("hierarchy", {})
723
+
724
+ dependency_count = 0
725
+ blocks_count: Dict[str, int] = {}
726
+ bottlenecks: List[Dict[str, Any]] = []
727
+
728
+ for node in hierarchy.values():
729
+ deps = node.get("dependencies", {})
730
+ blocked_by = deps.get("blocked_by", [])
731
+ dependency_count += len(blocked_by)
732
+ for blocker_id in blocked_by:
733
+ blocks_count[blocker_id] = blocks_count.get(blocker_id, 0) + 1
734
+
735
+ for task_id, count in blocks_count.items():
736
+ if count >= bottleneck_threshold:
737
+ task = hierarchy.get(task_id, {})
738
+ bottlenecks.append(
739
+ {
740
+ "task_id": task_id,
741
+ "title": task.get("title", ""),
742
+ "status": task.get("status", ""),
743
+ "blocks_count": count,
744
+ }
745
+ )
746
+
747
+ bottlenecks.sort(key=lambda item: item["blocks_count"], reverse=True)
748
+
749
+ visited: set[str] = set()
750
+ rec_stack: set[str] = set()
751
+ circular_deps: List[str] = []
752
+
753
+ def detect_cycle(node_id: str, path: List[str]) -> bool:
754
+ visited.add(node_id)
755
+ rec_stack.add(node_id)
756
+
757
+ node = hierarchy.get(node_id, {})
758
+ for child_id in node.get("children", []):
759
+ if child_id not in visited:
760
+ if detect_cycle(child_id, path + [child_id]):
761
+ return True
762
+ elif child_id in rec_stack:
763
+ circular_deps.append(" -> ".join(path + [child_id]))
764
+ return True
765
+
766
+ rec_stack.remove(node_id)
767
+ return False
768
+
769
+ if "spec-root" in hierarchy:
770
+ detect_cycle("spec-root", ["spec-root"])
771
+
772
+ duration_ms = (time.perf_counter() - start_time) * 1000
773
+ _metrics.counter(f"analysis.{tool_name}", labels={"status": "success"})
774
+ _metrics.timer(f"analysis.{tool_name}.duration_ms", duration_ms)
775
+
776
+ return asdict(
777
+ success_response(
778
+ spec_id=spec_id,
779
+ dependency_count=dependency_count,
780
+ bottlenecks=bottlenecks,
781
+ bottleneck_threshold=bottleneck_threshold,
782
+ circular_deps=circular_deps,
783
+ has_cycles=len(circular_deps) > 0,
784
+ telemetry={"duration_ms": round(duration_ms, 2)},
785
+ )
786
+ )
787
+
788
+
789
+ def _handle_schema(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
790
+ """Return schema information for all valid values in the spec system."""
791
+ # Build templates with descriptions
792
+ templates_with_desc = [
793
+ {"name": t, "description": TEMPLATE_DESCRIPTIONS.get(t, "")}
794
+ for t in TEMPLATES
795
+ ]
796
+ return asdict(
797
+ success_response(
798
+ templates=templates_with_desc,
799
+ node_types=sorted(VALID_NODE_TYPES),
800
+ statuses=sorted(VALID_STATUSES),
801
+ task_categories=sorted(VALID_TASK_CATEGORIES),
802
+ verification_types=sorted(VALID_VERIFICATION_TYPES),
803
+ journal_entry_types=sorted(VALID_ENTRY_TYPES),
804
+ blocker_types=sorted(VALID_BLOCKER_TYPES),
805
+ status_folders=sorted(VALID_FOLDERS),
806
+ )
807
+ )
808
+
809
+
810
+ def _handle_diff(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
811
+ """Compare two specs and return categorized changes."""
812
+ spec_id = payload.get("spec_id")
813
+ if not spec_id:
814
+ return asdict(
815
+ error_response(
816
+ "spec_id is required for diff action",
817
+ error_code=ErrorCode.MISSING_REQUIRED,
818
+ error_type=ErrorType.VALIDATION,
819
+ remediation="Provide the spec_id of the current spec to compare",
820
+ )
821
+ )
822
+
823
+ # Target can be a backup timestamp or another spec_id
824
+ target = payload.get("target")
825
+ workspace = payload.get("workspace")
826
+ max_results = payload.get("limit")
827
+
828
+ specs_dir = _resolve_specs_dir(config, workspace)
829
+ if not specs_dir:
830
+ return asdict(
831
+ error_response(
832
+ "No specs directory found",
833
+ error_code=ErrorCode.NOT_FOUND,
834
+ error_type=ErrorType.NOT_FOUND,
835
+ remediation="Ensure you're in a project with a specs/ directory",
836
+ )
837
+ )
838
+
839
+ # If no target specified, diff against latest backup
840
+ if not target:
841
+ backups = list_spec_backups(spec_id, specs_dir=specs_dir)
842
+ if backups["count"] == 0:
843
+ return asdict(
844
+ error_response(
845
+ f"No backups found for spec '{spec_id}'",
846
+ error_code=ErrorCode.NOT_FOUND,
847
+ error_type=ErrorType.NOT_FOUND,
848
+ remediation="Create a backup first using spec save operations",
849
+ )
850
+ )
851
+ # Use latest backup as source (older state)
852
+ source_path = backups["backups"][0]["file_path"]
853
+ else:
854
+ # Check if target is a timestamp (backup) or spec_id
855
+ backup_file = specs_dir / ".backups" / spec_id / f"{target}.json"
856
+ if backup_file.is_file():
857
+ source_path = str(backup_file)
858
+ else:
859
+ # Treat as another spec_id
860
+ source_path = target
861
+
862
+ result = diff_specs(
863
+ source=source_path,
864
+ target=spec_id,
865
+ specs_dir=specs_dir,
866
+ max_results=max_results,
867
+ )
868
+
869
+ if "error" in result and not result.get("success", True):
870
+ return asdict(
871
+ error_response(
872
+ result["error"],
873
+ error_code=ErrorCode.NOT_FOUND,
874
+ error_type=ErrorType.NOT_FOUND,
875
+ remediation="Verify both specs exist and are accessible",
876
+ )
877
+ )
878
+
879
+ return asdict(
880
+ success_response(
881
+ spec_id=spec_id,
882
+ compared_to=source_path if not target else target,
883
+ summary=result["summary"],
884
+ changes=result["changes"],
885
+ partial=result["partial"],
886
+ )
887
+ )
888
+
889
+
890
+ def _handle_history(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
891
+ """List spec history including backups and revision history."""
892
+ spec_id = payload.get("spec_id")
893
+ if not spec_id:
894
+ return asdict(
895
+ error_response(
896
+ "spec_id is required for history action",
897
+ error_code=ErrorCode.MISSING_REQUIRED,
898
+ error_type=ErrorType.VALIDATION,
899
+ remediation="Provide the spec_id to view history",
900
+ )
901
+ )
902
+
903
+ workspace = payload.get("workspace")
904
+ cursor = payload.get("cursor")
905
+ limit = payload.get("limit")
906
+
907
+ specs_dir = _resolve_specs_dir(config, workspace)
908
+ if not specs_dir:
909
+ return asdict(
910
+ error_response(
911
+ "No specs directory found",
912
+ error_code=ErrorCode.NOT_FOUND,
913
+ error_type=ErrorType.NOT_FOUND,
914
+ remediation="Ensure you're in a project with a specs/ directory",
915
+ )
916
+ )
917
+
918
+ # Get backups with pagination
919
+ backups_result = list_spec_backups(
920
+ spec_id, specs_dir=specs_dir, cursor=cursor, limit=limit
921
+ )
922
+
923
+ # Get revision history from spec metadata
924
+ spec_data = load_spec(spec_id, specs_dir)
925
+ revision_history = []
926
+ if spec_data:
927
+ metadata = spec_data.get("metadata", {})
928
+ revision_history = metadata.get("revision_history", [])
929
+
930
+ # Merge and sort entries (backups and revisions)
931
+ history_entries = []
932
+
933
+ # Add backups as history entries
934
+ for backup in backups_result["backups"]:
935
+ history_entries.append({
936
+ "type": "backup",
937
+ "timestamp": backup["timestamp"],
938
+ "file_path": backup["file_path"],
939
+ "file_size_bytes": backup["file_size_bytes"],
940
+ })
941
+
942
+ # Add revision history entries
943
+ for rev in revision_history:
944
+ history_entries.append({
945
+ "type": "revision",
946
+ "timestamp": rev.get("date"),
947
+ "version": rev.get("version"),
948
+ "changes": rev.get("changes"),
949
+ "author": rev.get("author"),
950
+ })
951
+
952
+ return asdict(
953
+ success_response(
954
+ spec_id=spec_id,
955
+ entries=history_entries,
956
+ backup_count=backups_result["count"],
957
+ revision_count=len(revision_history),
958
+ pagination=backups_result["pagination"],
959
+ )
960
+ )
961
+
962
+
963
+ def _handle_completeness_check(
964
+ *, config: ServerConfig, payload: Dict[str, Any]
965
+ ) -> dict:
966
+ """Check spec completeness and return a score (0-100)."""
967
+ spec_id = payload.get("spec_id")
968
+ if not spec_id or not isinstance(spec_id, str) or not spec_id.strip():
969
+ return asdict(
970
+ error_response(
971
+ "spec_id is required for completeness-check action",
972
+ error_code=ErrorCode.MISSING_REQUIRED,
973
+ error_type=ErrorType.VALIDATION,
974
+ remediation="Provide the spec_id to check completeness",
975
+ )
976
+ )
977
+
978
+ workspace = payload.get("workspace")
979
+ specs_dir = _resolve_specs_dir(config, workspace)
980
+ if not specs_dir:
981
+ return asdict(
982
+ error_response(
983
+ "No specs directory found",
984
+ error_code=ErrorCode.NOT_FOUND,
985
+ error_type=ErrorType.NOT_FOUND,
986
+ remediation="Ensure you're in a project with a specs/ directory",
987
+ )
988
+ )
989
+
990
+ result, error = check_spec_completeness(spec_id, specs_dir=specs_dir)
991
+ if error:
992
+ return asdict(
993
+ error_response(
994
+ error,
995
+ error_code=ErrorCode.SPEC_NOT_FOUND,
996
+ error_type=ErrorType.NOT_FOUND,
997
+ remediation='Verify the spec ID exists using spec(action="list").',
998
+ details={"spec_id": spec_id},
999
+ )
1000
+ )
1001
+
1002
+ return asdict(success_response(**result))
1003
+
1004
+
1005
+ def _handle_duplicate_detection(
1006
+ *, config: ServerConfig, payload: Dict[str, Any]
1007
+ ) -> dict:
1008
+ """Detect duplicate or near-duplicate tasks in a spec."""
1009
+ spec_id = payload.get("spec_id")
1010
+ if not spec_id or not isinstance(spec_id, str) or not spec_id.strip():
1011
+ return asdict(
1012
+ error_response(
1013
+ "spec_id is required for duplicate-detection action",
1014
+ error_code=ErrorCode.MISSING_REQUIRED,
1015
+ error_type=ErrorType.VALIDATION,
1016
+ remediation="Provide the spec_id to check for duplicates",
1017
+ )
1018
+ )
1019
+
1020
+ workspace = payload.get("workspace")
1021
+ scope = payload.get("scope", "titles")
1022
+ threshold = payload.get("threshold", 0.8)
1023
+ max_pairs = payload.get("max_pairs", 100)
1024
+
1025
+ # Validate threshold
1026
+ if not isinstance(threshold, (int, float)) or not 0.0 <= threshold <= 1.0:
1027
+ return asdict(
1028
+ error_response(
1029
+ "threshold must be a number between 0.0 and 1.0",
1030
+ error_code=ErrorCode.VALIDATION_ERROR,
1031
+ error_type=ErrorType.VALIDATION,
1032
+ )
1033
+ )
1034
+
1035
+ specs_dir = _resolve_specs_dir(config, workspace)
1036
+ if not specs_dir:
1037
+ return asdict(
1038
+ error_response(
1039
+ "No specs directory found",
1040
+ error_code=ErrorCode.NOT_FOUND,
1041
+ error_type=ErrorType.NOT_FOUND,
1042
+ remediation="Ensure you're in a project with a specs/ directory",
1043
+ )
1044
+ )
1045
+
1046
+ result, error = detect_duplicate_tasks(
1047
+ spec_id,
1048
+ scope=scope,
1049
+ threshold=threshold,
1050
+ max_pairs=max_pairs,
1051
+ specs_dir=specs_dir,
1052
+ )
1053
+ if error:
1054
+ return asdict(
1055
+ error_response(
1056
+ error,
1057
+ error_code=ErrorCode.SPEC_NOT_FOUND,
1058
+ error_type=ErrorType.NOT_FOUND,
1059
+ remediation='Verify the spec ID exists using spec(action="list").',
1060
+ details={"spec_id": spec_id},
1061
+ )
1062
+ )
1063
+
1064
+ return asdict(success_response(**result))
1065
+
1066
+
1067
+ def _handle_recalculate_hours(
1068
+ *, config: ServerConfig, payload: Dict[str, Any]
1069
+ ) -> dict:
1070
+ """Recalculate estimated_hours by aggregating from tasks up through hierarchy."""
1071
+ spec_id = payload.get("spec_id")
1072
+ if not spec_id or not isinstance(spec_id, str) or not spec_id.strip():
1073
+ return asdict(
1074
+ error_response(
1075
+ "spec_id is required for recalculate-hours action",
1076
+ error_code=ErrorCode.MISSING_REQUIRED,
1077
+ error_type=ErrorType.VALIDATION,
1078
+ remediation="Provide the spec_id to recalculate hours for",
1079
+ )
1080
+ )
1081
+
1082
+ workspace = payload.get("workspace")
1083
+ dry_run = payload.get("dry_run", False)
1084
+
1085
+ specs_dir = _resolve_specs_dir(config, workspace)
1086
+ if not specs_dir:
1087
+ return asdict(
1088
+ error_response(
1089
+ "No specs directory found",
1090
+ error_code=ErrorCode.NOT_FOUND,
1091
+ error_type=ErrorType.NOT_FOUND,
1092
+ remediation="Ensure you're in a project with a specs/ directory",
1093
+ )
1094
+ )
1095
+
1096
+ result, error = recalculate_estimated_hours(
1097
+ spec_id,
1098
+ dry_run=dry_run,
1099
+ specs_dir=specs_dir,
1100
+ )
1101
+ if error:
1102
+ return asdict(
1103
+ error_response(
1104
+ error,
1105
+ error_code=ErrorCode.SPEC_NOT_FOUND,
1106
+ error_type=ErrorType.NOT_FOUND,
1107
+ remediation='Verify the spec ID exists using spec(action="list").',
1108
+ details={"spec_id": spec_id},
1109
+ )
1110
+ )
1111
+
1112
+ return asdict(success_response(**result))
1113
+
1114
+
1115
+ def _handle_recalculate_actual_hours(
1116
+ *, config: ServerConfig, payload: Dict[str, Any]
1117
+ ) -> dict:
1118
+ """Recalculate actual_hours by aggregating from tasks up through hierarchy."""
1119
+ spec_id = payload.get("spec_id")
1120
+ if not spec_id or not isinstance(spec_id, str) or not spec_id.strip():
1121
+ return asdict(
1122
+ error_response(
1123
+ "spec_id is required for recalculate-actual-hours action",
1124
+ error_code=ErrorCode.MISSING_REQUIRED,
1125
+ error_type=ErrorType.VALIDATION,
1126
+ remediation="Provide the spec_id to recalculate actual hours for",
1127
+ )
1128
+ )
1129
+
1130
+ workspace = payload.get("workspace")
1131
+ dry_run = payload.get("dry_run", False)
1132
+
1133
+ specs_dir = _resolve_specs_dir(config, workspace)
1134
+ if not specs_dir:
1135
+ return asdict(
1136
+ error_response(
1137
+ "No specs directory found",
1138
+ error_code=ErrorCode.NOT_FOUND,
1139
+ error_type=ErrorType.NOT_FOUND,
1140
+ remediation="Ensure you're in a project with a specs/ directory",
1141
+ )
1142
+ )
1143
+
1144
+ result, error = recalculate_actual_hours(
1145
+ spec_id,
1146
+ dry_run=dry_run,
1147
+ specs_dir=specs_dir,
1148
+ )
1149
+ if error:
1150
+ return asdict(
1151
+ error_response(
1152
+ error,
1153
+ error_code=ErrorCode.SPEC_NOT_FOUND,
1154
+ error_type=ErrorType.NOT_FOUND,
1155
+ remediation='Verify the spec ID exists using spec(action="list").',
1156
+ details={"spec_id": spec_id},
1157
+ )
1158
+ )
1159
+
1160
+ return asdict(success_response(**result))
1161
+
1162
+
1163
+ _ACTIONS = [
1164
+ ActionDefinition(name="find", handler=_handle_find, summary="Find a spec by ID"),
1165
+ ActionDefinition(name="get", handler=_handle_get, summary="Get raw spec JSON (minified)"),
1166
+ ActionDefinition(name="list", handler=_handle_list, summary="List specs"),
1167
+ ActionDefinition(
1168
+ name="validate", handler=_handle_validate, summary="Validate a spec"
1169
+ ),
1170
+ ActionDefinition(name="fix", handler=_handle_fix, summary="Auto-fix a spec"),
1171
+ ActionDefinition(name="stats", handler=_handle_stats, summary="Get spec stats"),
1172
+ ActionDefinition(
1173
+ name="validate-fix",
1174
+ handler=_handle_validate_fix,
1175
+ summary="Validate and optionally auto-fix",
1176
+ ),
1177
+ ActionDefinition(
1178
+ name="analyze", handler=_handle_analyze, summary="Analyze spec directory"
1179
+ ),
1180
+ ActionDefinition(
1181
+ name="analyze-deps",
1182
+ handler=_handle_analyze_deps,
1183
+ summary="Analyze spec dependency graph",
1184
+ ),
1185
+ ActionDefinition(
1186
+ name="schema",
1187
+ handler=_handle_schema,
1188
+ summary="Get valid values for spec fields",
1189
+ ),
1190
+ ActionDefinition(
1191
+ name="diff",
1192
+ handler=_handle_diff,
1193
+ summary="Compare spec against backup or another spec",
1194
+ ),
1195
+ ActionDefinition(
1196
+ name="history",
1197
+ handler=_handle_history,
1198
+ summary="List spec backups and revision history",
1199
+ ),
1200
+ ActionDefinition(
1201
+ name="completeness-check",
1202
+ handler=_handle_completeness_check,
1203
+ summary="Check spec completeness and return a score (0-100)",
1204
+ ),
1205
+ ActionDefinition(
1206
+ name="duplicate-detection",
1207
+ handler=_handle_duplicate_detection,
1208
+ summary="Detect duplicate or near-duplicate tasks",
1209
+ ),
1210
+ ActionDefinition(
1211
+ name="recalculate-hours",
1212
+ handler=_handle_recalculate_hours,
1213
+ summary="Recalculate estimated_hours from task/phase hierarchy",
1214
+ ),
1215
+ ActionDefinition(
1216
+ name="recalculate-actual-hours",
1217
+ handler=_handle_recalculate_actual_hours,
1218
+ summary="Recalculate actual_hours from task/phase hierarchy",
1219
+ ),
1220
+ ]
1221
+
1222
+ _SPEC_ROUTER = ActionRouter(tool_name="spec", actions=_ACTIONS)
1223
+
1224
+
1225
+ def _dispatch_spec_action(
1226
+ *, action: str, payload: Dict[str, Any], config: ServerConfig
1227
+ ) -> dict:
1228
+ try:
1229
+ return _SPEC_ROUTER.dispatch(action=action, payload=payload, config=config)
1230
+ except ActionRouterError as exc:
1231
+ allowed = ", ".join(exc.allowed_actions)
1232
+ return asdict(
1233
+ error_response(
1234
+ f"Unsupported spec action '{action}'. Allowed actions: {allowed}",
1235
+ error_code=ErrorCode.VALIDATION_ERROR,
1236
+ error_type=ErrorType.VALIDATION,
1237
+ remediation=f"Use one of: {allowed}",
1238
+ )
1239
+ )
1240
+
1241
+
1242
+ def register_unified_spec_tool(mcp: FastMCP, config: ServerConfig) -> None:
1243
+ """Register the consolidated spec tool."""
1244
+
1245
+ @canonical_tool(mcp, canonical_name="spec")
1246
+ @mcp_tool(tool_name="spec", emit_metrics=True, audit=True)
1247
+ def spec(
1248
+ action: str,
1249
+ spec_id: Optional[str] = None,
1250
+ workspace: Optional[str] = None,
1251
+ status: str = "all",
1252
+ include_progress: bool = True,
1253
+ cursor: Optional[str] = None,
1254
+ limit: Optional[int] = None,
1255
+ dry_run: bool = False,
1256
+ create_backup: bool = True,
1257
+ auto_fix: bool = True,
1258
+ directory: Optional[str] = None,
1259
+ path: Optional[str] = None,
1260
+ bottleneck_threshold: Optional[int] = None,
1261
+ target: Optional[str] = None,
1262
+ ) -> dict:
1263
+ payload = {
1264
+ "spec_id": spec_id,
1265
+ "workspace": workspace,
1266
+ "status": status,
1267
+ "include_progress": include_progress,
1268
+ "cursor": cursor,
1269
+ "limit": limit,
1270
+ "dry_run": dry_run,
1271
+ "create_backup": create_backup,
1272
+ "auto_fix": auto_fix,
1273
+ "directory": directory,
1274
+ "path": path,
1275
+ "bottleneck_threshold": bottleneck_threshold,
1276
+ "target": target,
1277
+ }
1278
+ return _dispatch_spec_action(action=action, payload=payload, config=config)
1279
+
1280
+
1281
+ __all__ = [
1282
+ "register_unified_spec_tool",
1283
+ ]