foundry-mcp 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. foundry_mcp/__init__.py +7 -0
  2. foundry_mcp/cli/__init__.py +80 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +633 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +652 -0
  15. foundry_mcp/cli/commands/session.py +479 -0
  16. foundry_mcp/cli/commands/specs.py +856 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +259 -0
  22. foundry_mcp/cli/flags.py +266 -0
  23. foundry_mcp/cli/logging.py +212 -0
  24. foundry_mcp/cli/main.py +44 -0
  25. foundry_mcp/cli/output.py +122 -0
  26. foundry_mcp/cli/registry.py +110 -0
  27. foundry_mcp/cli/resilience.py +178 -0
  28. foundry_mcp/cli/transcript.py +217 -0
  29. foundry_mcp/config.py +850 -0
  30. foundry_mcp/core/__init__.py +144 -0
  31. foundry_mcp/core/ai_consultation.py +1636 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/feature_flags.py +592 -0
  40. foundry_mcp/core/health.py +749 -0
  41. foundry_mcp/core/journal.py +694 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1350 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +123 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +317 -0
  57. foundry_mcp/core/prometheus.py +577 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +546 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +511 -0
  61. foundry_mcp/core/prompts/plan_review.py +623 -0
  62. foundry_mcp/core/providers/__init__.py +225 -0
  63. foundry_mcp/core/providers/base.py +476 -0
  64. foundry_mcp/core/providers/claude.py +460 -0
  65. foundry_mcp/core/providers/codex.py +619 -0
  66. foundry_mcp/core/providers/cursor_agent.py +642 -0
  67. foundry_mcp/core/providers/detectors.py +488 -0
  68. foundry_mcp/core/providers/gemini.py +405 -0
  69. foundry_mcp/core/providers/opencode.py +616 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +302 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +729 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/resilience.py +600 -0
  78. foundry_mcp/core/responses.py +934 -0
  79. foundry_mcp/core/review.py +366 -0
  80. foundry_mcp/core/security.py +438 -0
  81. foundry_mcp/core/spec.py +1650 -0
  82. foundry_mcp/core/task.py +1289 -0
  83. foundry_mcp/core/testing.py +450 -0
  84. foundry_mcp/core/validation.py +2081 -0
  85. foundry_mcp/dashboard/__init__.py +32 -0
  86. foundry_mcp/dashboard/app.py +119 -0
  87. foundry_mcp/dashboard/components/__init__.py +17 -0
  88. foundry_mcp/dashboard/components/cards.py +88 -0
  89. foundry_mcp/dashboard/components/charts.py +234 -0
  90. foundry_mcp/dashboard/components/filters.py +136 -0
  91. foundry_mcp/dashboard/components/tables.py +195 -0
  92. foundry_mcp/dashboard/data/__init__.py +11 -0
  93. foundry_mcp/dashboard/data/stores.py +433 -0
  94. foundry_mcp/dashboard/launcher.py +289 -0
  95. foundry_mcp/dashboard/views/__init__.py +12 -0
  96. foundry_mcp/dashboard/views/errors.py +217 -0
  97. foundry_mcp/dashboard/views/metrics.py +174 -0
  98. foundry_mcp/dashboard/views/overview.py +160 -0
  99. foundry_mcp/dashboard/views/providers.py +83 -0
  100. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  101. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  102. foundry_mcp/prompts/__init__.py +9 -0
  103. foundry_mcp/prompts/workflows.py +525 -0
  104. foundry_mcp/resources/__init__.py +9 -0
  105. foundry_mcp/resources/specs.py +591 -0
  106. foundry_mcp/schemas/__init__.py +38 -0
  107. foundry_mcp/schemas/sdd-spec-schema.json +386 -0
  108. foundry_mcp/server.py +164 -0
  109. foundry_mcp/tools/__init__.py +10 -0
  110. foundry_mcp/tools/unified/__init__.py +71 -0
  111. foundry_mcp/tools/unified/authoring.py +1487 -0
  112. foundry_mcp/tools/unified/context_helpers.py +98 -0
  113. foundry_mcp/tools/unified/documentation_helpers.py +198 -0
  114. foundry_mcp/tools/unified/environment.py +939 -0
  115. foundry_mcp/tools/unified/error.py +462 -0
  116. foundry_mcp/tools/unified/health.py +225 -0
  117. foundry_mcp/tools/unified/journal.py +841 -0
  118. foundry_mcp/tools/unified/lifecycle.py +632 -0
  119. foundry_mcp/tools/unified/metrics.py +777 -0
  120. foundry_mcp/tools/unified/plan.py +745 -0
  121. foundry_mcp/tools/unified/pr.py +294 -0
  122. foundry_mcp/tools/unified/provider.py +629 -0
  123. foundry_mcp/tools/unified/review.py +685 -0
  124. foundry_mcp/tools/unified/review_helpers.py +299 -0
  125. foundry_mcp/tools/unified/router.py +102 -0
  126. foundry_mcp/tools/unified/server.py +580 -0
  127. foundry_mcp/tools/unified/spec.py +808 -0
  128. foundry_mcp/tools/unified/task.py +2202 -0
  129. foundry_mcp/tools/unified/test.py +370 -0
  130. foundry_mcp/tools/unified/verification.py +520 -0
  131. foundry_mcp-0.3.3.dist-info/METADATA +337 -0
  132. foundry_mcp-0.3.3.dist-info/RECORD +135 -0
  133. foundry_mcp-0.3.3.dist-info/WHEEL +4 -0
  134. foundry_mcp-0.3.3.dist-info/entry_points.txt +3 -0
  135. foundry_mcp-0.3.3.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,2202 @@
1
+ """Unified task router with validation, pagination, and shared delegates."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ import time
7
+ from dataclasses import asdict
8
+ from pathlib import Path
9
+ from typing import Any, Dict, List, Optional, Tuple
10
+
11
+ from mcp.server.fastmcp import FastMCP
12
+
13
+ from foundry_mcp.config import ServerConfig
14
+ from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
15
+ from foundry_mcp.core.naming import canonical_tool
16
+ from foundry_mcp.core.observability import get_metrics, mcp_tool
17
+ from foundry_mcp.core.pagination import (
18
+ CursorError,
19
+ decode_cursor,
20
+ encode_cursor,
21
+ normalize_page_size,
22
+ paginated_response,
23
+ )
24
+ from foundry_mcp.core.progress import (
25
+ get_progress_summary,
26
+ list_phases,
27
+ update_parent_status,
28
+ )
29
+ from foundry_mcp.core.responses import (
30
+ ErrorCode,
31
+ ErrorType,
32
+ error_response,
33
+ success_response,
34
+ )
35
+ from foundry_mcp.core.spec import find_specs_directory, load_spec, save_spec
36
+ from foundry_mcp.core.journal import (
37
+ add_journal_entry,
38
+ get_blocker_info,
39
+ list_blocked_tasks,
40
+ mark_blocked,
41
+ unblock as unblock_task,
42
+ update_task_status,
43
+ )
44
+ from foundry_mcp.core.task import (
45
+ add_task,
46
+ check_dependencies,
47
+ get_next_task,
48
+ prepare_task as core_prepare_task,
49
+ remove_task,
50
+ update_estimate,
51
+ update_task_metadata,
52
+ )
53
+ from foundry_mcp.tools.unified.router import (
54
+ ActionDefinition,
55
+ ActionRouter,
56
+ ActionRouterError,
57
+ )
58
+
59
+ logger = logging.getLogger(__name__)
60
+ _metrics = get_metrics()
61
+
62
+ _TASK_DEFAULT_PAGE_SIZE = 25
63
+ _TASK_MAX_PAGE_SIZE = 100
64
+ _TASK_WARNING_THRESHOLD = 75
65
+ _ALLOWED_STATUS = {"pending", "in_progress", "completed", "blocked"}
66
+
67
+
68
+ def _request_id() -> str:
69
+ return get_correlation_id() or generate_correlation_id(prefix="task")
70
+
71
+
72
+ def _metric(action: str) -> str:
73
+ return f"unified_tools.task.{action.replace('-', '_')}"
74
+
75
+
76
+ def _specs_dir_missing_error(request_id: str) -> dict:
77
+ return asdict(
78
+ error_response(
79
+ "No specs directory found. Use --specs-dir or set SDD_SPECS_DIR.",
80
+ error_code=ErrorCode.NOT_FOUND,
81
+ error_type=ErrorType.NOT_FOUND,
82
+ remediation="Set SDD_SPECS_DIR or invoke with --specs-dir",
83
+ request_id=request_id,
84
+ )
85
+ )
86
+
87
+
88
+ def _validation_error(
89
+ *,
90
+ field: str,
91
+ action: str,
92
+ message: str,
93
+ request_id: str,
94
+ code: ErrorCode = ErrorCode.MISSING_REQUIRED,
95
+ remediation: Optional[str] = None,
96
+ ) -> dict:
97
+ effective_remediation = remediation or f"Provide a valid '{field}' value"
98
+ return asdict(
99
+ error_response(
100
+ f"Invalid field '{field}' for task.{action}: {message}",
101
+ error_code=code,
102
+ error_type=ErrorType.VALIDATION,
103
+ remediation=effective_remediation,
104
+ details={"field": field, "action": f"task.{action}"},
105
+ request_id=request_id,
106
+ )
107
+ )
108
+
109
+
110
+ def _resolve_specs_dir(
111
+ config: ServerConfig, workspace: Optional[str]
112
+ ) -> Optional[Path]:
113
+ try:
114
+ if workspace:
115
+ return find_specs_directory(workspace)
116
+
117
+ candidate = getattr(config, "specs_dir", None)
118
+ if isinstance(candidate, Path):
119
+ return candidate
120
+ if isinstance(candidate, str) and candidate.strip():
121
+ return Path(candidate)
122
+
123
+ return find_specs_directory()
124
+ except Exception: # pragma: no cover - defensive guard
125
+ logger.exception(
126
+ "Failed to resolve specs directory", extra={"workspace": workspace}
127
+ )
128
+ return None
129
+
130
+
131
+ def _load_spec_data(
132
+ spec_id: str, specs_dir: Optional[Path], request_id: str
133
+ ) -> Tuple[Optional[Dict[str, Any]], Optional[dict]]:
134
+ if specs_dir is None:
135
+ return None, _specs_dir_missing_error(request_id)
136
+
137
+ spec_data = load_spec(spec_id, specs_dir)
138
+ if spec_data is None:
139
+ return None, asdict(
140
+ error_response(
141
+ f"Spec not found: {spec_id}",
142
+ error_code=ErrorCode.SPEC_NOT_FOUND,
143
+ error_type=ErrorType.NOT_FOUND,
144
+ remediation='Verify the spec ID via spec(action="list")',
145
+ request_id=request_id,
146
+ )
147
+ )
148
+ return spec_data, None
149
+
150
+
151
+ def _attach_meta(
152
+ response: dict,
153
+ *,
154
+ request_id: str,
155
+ duration_ms: Optional[float] = None,
156
+ warnings: Optional[List[str]] = None,
157
+ ) -> dict:
158
+ meta = response.setdefault("meta", {"version": "response-v2"})
159
+ meta["request_id"] = request_id
160
+ if warnings:
161
+ existing = list(meta.get("warnings") or [])
162
+ existing.extend(warnings)
163
+ meta["warnings"] = existing
164
+ if duration_ms is not None:
165
+ telemetry = dict(meta.get("telemetry") or {})
166
+ telemetry["duration_ms"] = round(duration_ms, 2)
167
+ meta["telemetry"] = telemetry
168
+ return response
169
+
170
+
171
+ def _filter_hierarchy(
172
+ hierarchy: Dict[str, Any],
173
+ max_depth: int,
174
+ include_metadata: bool,
175
+ ) -> Dict[str, Any]:
176
+ result: Dict[str, Any] = {}
177
+
178
+ for node_id, node_data in hierarchy.items():
179
+ node_depth = node_id.count("-") if node_id != "spec-root" else 0
180
+ if max_depth > 0 and node_depth > max_depth:
181
+ continue
182
+
183
+ filtered_node: Dict[str, Any] = {
184
+ "type": node_data.get("type"),
185
+ "title": node_data.get("title"),
186
+ "status": node_data.get("status"),
187
+ }
188
+ if "children" in node_data:
189
+ filtered_node["children"] = node_data["children"]
190
+ if "parent" in node_data:
191
+ filtered_node["parent"] = node_data["parent"]
192
+
193
+ if include_metadata:
194
+ if "metadata" in node_data:
195
+ filtered_node["metadata"] = node_data["metadata"]
196
+ if "dependencies" in node_data:
197
+ filtered_node["dependencies"] = node_data["dependencies"]
198
+
199
+ result[node_id] = filtered_node
200
+
201
+ return result
202
+
203
+
204
+ def _pagination_warnings(total_count: int, has_more: bool) -> List[str]:
205
+ warnings: List[str] = []
206
+ if total_count > _TASK_WARNING_THRESHOLD:
207
+ warnings.append(
208
+ f"{total_count} results returned; consider using pagination to limit payload size."
209
+ )
210
+ if has_more:
211
+ warnings.append("Additional results available. Follow the cursor to continue.")
212
+ return warnings
213
+
214
+
215
+ def _handle_prepare(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
216
+ request_id = _request_id()
217
+ action = "prepare"
218
+ spec_id = payload.get("spec_id")
219
+ if not isinstance(spec_id, str) or not spec_id.strip():
220
+ return _validation_error(
221
+ field="spec_id",
222
+ action=action,
223
+ message="Provide a non-empty spec identifier",
224
+ request_id=request_id,
225
+ )
226
+ task_id = payload.get("task_id")
227
+ if task_id is not None and (not isinstance(task_id, str) or not task_id.strip()):
228
+ return _validation_error(
229
+ field="task_id",
230
+ action=action,
231
+ message="task_id must be a non-empty string",
232
+ request_id=request_id,
233
+ )
234
+
235
+ workspace = payload.get("workspace")
236
+ specs_dir = _resolve_specs_dir(config, workspace)
237
+ if specs_dir is None:
238
+ return _specs_dir_missing_error(request_id)
239
+
240
+ start = time.perf_counter()
241
+ result = core_prepare_task(
242
+ spec_id=spec_id.strip(), specs_dir=specs_dir, task_id=task_id
243
+ )
244
+ elapsed_ms = (time.perf_counter() - start) * 1000
245
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
246
+ _metrics.counter(_metric(action), labels={"status": "success"})
247
+ return _attach_meta(result, request_id=request_id, duration_ms=elapsed_ms)
248
+
249
+
250
+ def _handle_next(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
251
+ request_id = _request_id()
252
+ action = "next"
253
+ spec_id = payload.get("spec_id")
254
+ if not isinstance(spec_id, str) or not spec_id.strip():
255
+ return _validation_error(
256
+ field="spec_id",
257
+ action=action,
258
+ message="Provide a non-empty spec identifier",
259
+ request_id=request_id,
260
+ )
261
+ workspace = payload.get("workspace")
262
+ specs_dir = _resolve_specs_dir(config, workspace)
263
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
264
+ if error:
265
+ return error
266
+ assert spec_data is not None # narrow Optional
267
+
268
+ start = time.perf_counter()
269
+ next_task = get_next_task(spec_data)
270
+ elapsed_ms = (time.perf_counter() - start) * 1000
271
+ telemetry = {"duration_ms": round(elapsed_ms, 2)}
272
+
273
+ if next_task:
274
+ task_id, task_data = next_task
275
+ response = success_response(
276
+ spec_id=spec_id.strip(),
277
+ found=True,
278
+ task_id=task_id,
279
+ title=task_data.get("title", ""),
280
+ type=task_data.get("type", "task"),
281
+ status=task_data.get("status", "pending"),
282
+ metadata=task_data.get("metadata", {}),
283
+ request_id=request_id,
284
+ telemetry=telemetry,
285
+ )
286
+ else:
287
+ hierarchy = spec_data.get("hierarchy", {})
288
+ all_tasks = [
289
+ node
290
+ for node in hierarchy.values()
291
+ if node.get("type") in {"task", "subtask", "verify"}
292
+ ]
293
+ completed = sum(1 for node in all_tasks if node.get("status") == "completed")
294
+ pending = sum(1 for node in all_tasks if node.get("status") == "pending")
295
+ response = success_response(
296
+ spec_id=spec_id.strip(),
297
+ found=False,
298
+ spec_complete=pending == 0 and completed > 0,
299
+ message="All tasks completed"
300
+ if pending == 0 and completed > 0
301
+ else "No actionable tasks (tasks may be blocked)",
302
+ request_id=request_id,
303
+ telemetry=telemetry,
304
+ )
305
+
306
+ _metrics.counter(_metric(action), labels={"status": "success"})
307
+ return asdict(response)
308
+
309
+
310
+ def _handle_info(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
311
+ request_id = _request_id()
312
+ action = "info"
313
+ spec_id = payload.get("spec_id")
314
+ task_id = payload.get("task_id")
315
+ if not isinstance(spec_id, str) or not spec_id.strip():
316
+ return _validation_error(
317
+ field="spec_id",
318
+ action=action,
319
+ message="Provide a non-empty spec identifier",
320
+ request_id=request_id,
321
+ )
322
+ if not isinstance(task_id, str) or not task_id.strip():
323
+ return _validation_error(
324
+ field="task_id",
325
+ action=action,
326
+ message="Provide a non-empty task identifier",
327
+ request_id=request_id,
328
+ )
329
+
330
+ workspace = payload.get("workspace")
331
+ specs_dir = _resolve_specs_dir(config, workspace)
332
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
333
+ if error:
334
+ return error
335
+ assert spec_data is not None
336
+
337
+ task = spec_data.get("hierarchy", {}).get(task_id.strip())
338
+ if task is None:
339
+ return asdict(
340
+ error_response(
341
+ f"Task not found: {task_id.strip()}",
342
+ error_code=ErrorCode.TASK_NOT_FOUND,
343
+ error_type=ErrorType.NOT_FOUND,
344
+ remediation="Verify the task ID exists in the hierarchy",
345
+ request_id=request_id,
346
+ )
347
+ )
348
+
349
+ response = success_response(
350
+ spec_id=spec_id.strip(),
351
+ task_id=task_id.strip(),
352
+ task=task,
353
+ request_id=request_id,
354
+ )
355
+ _metrics.counter(_metric(action), labels={"status": "success"})
356
+ return asdict(response)
357
+
358
+
359
+ def _handle_check_deps(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
360
+ request_id = _request_id()
361
+ action = "check-deps"
362
+ spec_id = payload.get("spec_id")
363
+ task_id = payload.get("task_id")
364
+ if not isinstance(spec_id, str) or not spec_id.strip():
365
+ return _validation_error(
366
+ field="spec_id",
367
+ action=action,
368
+ message="Provide a non-empty spec identifier",
369
+ request_id=request_id,
370
+ )
371
+ if not isinstance(task_id, str) or not task_id.strip():
372
+ return _validation_error(
373
+ field="task_id",
374
+ action=action,
375
+ message="Provide a non-empty task identifier",
376
+ request_id=request_id,
377
+ )
378
+
379
+ workspace = payload.get("workspace")
380
+ specs_dir = _resolve_specs_dir(config, workspace)
381
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
382
+ if error:
383
+ return error
384
+ assert spec_data is not None
385
+
386
+ start = time.perf_counter()
387
+ deps = check_dependencies(spec_data, task_id.strip())
388
+ elapsed_ms = (time.perf_counter() - start) * 1000
389
+ response = success_response(
390
+ **deps,
391
+ spec_id=spec_id.strip(),
392
+ request_id=request_id,
393
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
394
+ )
395
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
396
+ _metrics.counter(_metric(action), labels={"status": "success"})
397
+ return asdict(response)
398
+
399
+
400
+ def _handle_progress(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
401
+ request_id = _request_id()
402
+ action = "progress"
403
+ spec_id = payload.get("spec_id")
404
+ node_id = payload.get("node_id", "spec-root")
405
+ include_phases = payload.get("include_phases", True)
406
+
407
+ if not isinstance(spec_id, str) or not spec_id.strip():
408
+ return _validation_error(
409
+ field="spec_id",
410
+ action=action,
411
+ message="Provide a non-empty spec identifier",
412
+ request_id=request_id,
413
+ )
414
+ if not isinstance(node_id, str) or not node_id.strip():
415
+ return _validation_error(
416
+ field="node_id",
417
+ action=action,
418
+ message="Provide a non-empty node identifier",
419
+ request_id=request_id,
420
+ )
421
+ if not isinstance(include_phases, bool):
422
+ return _validation_error(
423
+ field="include_phases",
424
+ action=action,
425
+ message="Expected a boolean value",
426
+ request_id=request_id,
427
+ code=ErrorCode.INVALID_FORMAT,
428
+ )
429
+
430
+ workspace = payload.get("workspace")
431
+ specs_dir = _resolve_specs_dir(config, workspace)
432
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
433
+ if error:
434
+ return error
435
+ assert spec_data is not None
436
+
437
+ progress = get_progress_summary(spec_data, node_id.strip())
438
+ if include_phases:
439
+ progress["phases"] = list_phases(spec_data)
440
+
441
+ response = success_response(
442
+ **progress,
443
+ request_id=request_id,
444
+ )
445
+ _metrics.counter(_metric(action), labels={"status": "success"})
446
+ return asdict(response)
447
+
448
+
449
+ def _handle_list(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
450
+ request_id = _request_id()
451
+ action = "list"
452
+ spec_id = payload.get("spec_id")
453
+ status_filter = payload.get("status_filter")
454
+ include_completed = payload.get("include_completed", True)
455
+ limit = payload.get("limit")
456
+ cursor = payload.get("cursor")
457
+
458
+ if not isinstance(spec_id, str) or not spec_id.strip():
459
+ return _validation_error(
460
+ field="spec_id",
461
+ action=action,
462
+ message="Provide a non-empty spec identifier",
463
+ request_id=request_id,
464
+ )
465
+ if status_filter is not None:
466
+ if not isinstance(status_filter, str) or status_filter not in _ALLOWED_STATUS:
467
+ return _validation_error(
468
+ field="status_filter",
469
+ action=action,
470
+ message=f"Status must be one of: {sorted(_ALLOWED_STATUS)}",
471
+ request_id=request_id,
472
+ code=ErrorCode.INVALID_FORMAT,
473
+ )
474
+ if not isinstance(include_completed, bool):
475
+ return _validation_error(
476
+ field="include_completed",
477
+ action=action,
478
+ message="Expected a boolean value",
479
+ request_id=request_id,
480
+ code=ErrorCode.INVALID_FORMAT,
481
+ )
482
+
483
+ page_size = normalize_page_size(
484
+ limit,
485
+ default=_TASK_DEFAULT_PAGE_SIZE,
486
+ maximum=_TASK_MAX_PAGE_SIZE,
487
+ )
488
+
489
+ start_after_id = None
490
+ if cursor:
491
+ try:
492
+ cursor_data = decode_cursor(cursor)
493
+ start_after_id = cursor_data.get("last_id")
494
+ except CursorError as exc:
495
+ return asdict(
496
+ error_response(
497
+ f"Invalid cursor: {exc.reason or exc}",
498
+ error_code=ErrorCode.INVALID_FORMAT,
499
+ error_type=ErrorType.VALIDATION,
500
+ request_id=request_id,
501
+ )
502
+ )
503
+
504
+ workspace = payload.get("workspace")
505
+ specs_dir = _resolve_specs_dir(config, workspace)
506
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
507
+ if error:
508
+ return error
509
+ assert spec_data is not None
510
+
511
+ start = time.perf_counter()
512
+ hierarchy = spec_data.get("hierarchy", {})
513
+ tasks: List[Dict[str, Any]] = []
514
+ for node_id, node in hierarchy.items():
515
+ if node.get("type") not in {"task", "subtask", "verify"}:
516
+ continue
517
+ status = node.get("status", "pending")
518
+ if status_filter and status != status_filter:
519
+ continue
520
+ if not include_completed and status == "completed":
521
+ continue
522
+ tasks.append(
523
+ {
524
+ "id": node_id,
525
+ "title": node.get("title", "Untitled"),
526
+ "type": node.get("type", "task"),
527
+ "status": status,
528
+ "icon": node.get("icon"),
529
+ "file_path": node.get("metadata", {}).get("file_path"),
530
+ "parent": node.get("parent"),
531
+ }
532
+ )
533
+
534
+ tasks.sort(key=lambda item: item.get("id", ""))
535
+ total_count = len(tasks)
536
+
537
+ if start_after_id:
538
+ try:
539
+ start_index = next(
540
+ i for i, task in enumerate(tasks) if task.get("id") == start_after_id
541
+ )
542
+ tasks = tasks[start_index + 1 :]
543
+ except StopIteration:
544
+ pass
545
+
546
+ page_tasks = tasks[: page_size + 1]
547
+ has_more = len(page_tasks) > page_size
548
+ if has_more:
549
+ page_tasks = page_tasks[:page_size]
550
+
551
+ next_cursor = None
552
+ if has_more and page_tasks:
553
+ next_cursor = encode_cursor({"last_id": page_tasks[-1].get("id")})
554
+
555
+ (time.perf_counter() - start) * 1000
556
+ warnings = _pagination_warnings(total_count, has_more)
557
+ response = paginated_response(
558
+ data={
559
+ "spec_id": spec_id.strip(),
560
+ "tasks": page_tasks,
561
+ "count": len(page_tasks),
562
+ },
563
+ cursor=next_cursor,
564
+ has_more=has_more,
565
+ page_size=page_size,
566
+ total_count=total_count,
567
+ warnings=warnings or None,
568
+ request_id=request_id,
569
+ )
570
+ _metrics.counter(_metric(action), labels={"status": "success"})
571
+ return response
572
+
573
+
574
+ def _handle_query(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
575
+ request_id = _request_id()
576
+ action = "query"
577
+ spec_id = payload.get("spec_id")
578
+ status = payload.get("status")
579
+ parent = payload.get("parent")
580
+ limit = payload.get("limit")
581
+ cursor = payload.get("cursor")
582
+
583
+ if not isinstance(spec_id, str) or not spec_id.strip():
584
+ return _validation_error(
585
+ field="spec_id",
586
+ action=action,
587
+ message="Provide a non-empty spec identifier",
588
+ request_id=request_id,
589
+ )
590
+ if status is not None:
591
+ if not isinstance(status, str) or status not in _ALLOWED_STATUS:
592
+ return _validation_error(
593
+ field="status",
594
+ action=action,
595
+ message=f"Status must be one of: {sorted(_ALLOWED_STATUS)}",
596
+ request_id=request_id,
597
+ code=ErrorCode.INVALID_FORMAT,
598
+ )
599
+ if parent is not None and (not isinstance(parent, str) or not parent.strip()):
600
+ return _validation_error(
601
+ field="parent",
602
+ action=action,
603
+ message="Parent must be a non-empty string",
604
+ request_id=request_id,
605
+ code=ErrorCode.INVALID_FORMAT,
606
+ )
607
+
608
+ page_size = normalize_page_size(
609
+ limit,
610
+ default=_TASK_DEFAULT_PAGE_SIZE,
611
+ maximum=_TASK_MAX_PAGE_SIZE,
612
+ )
613
+
614
+ start_after_id = None
615
+ if cursor:
616
+ try:
617
+ cursor_data = decode_cursor(cursor)
618
+ start_after_id = cursor_data.get("last_id")
619
+ except CursorError as exc:
620
+ return asdict(
621
+ error_response(
622
+ f"Invalid cursor: {exc.reason or exc}",
623
+ error_code=ErrorCode.INVALID_FORMAT,
624
+ error_type=ErrorType.VALIDATION,
625
+ request_id=request_id,
626
+ )
627
+ )
628
+
629
+ workspace = payload.get("workspace")
630
+ specs_dir = _resolve_specs_dir(config, workspace)
631
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
632
+ if error:
633
+ return error
634
+ assert spec_data is not None
635
+
636
+ start = time.perf_counter()
637
+ hierarchy = spec_data.get("hierarchy", {})
638
+ tasks: List[Dict[str, Any]] = []
639
+ for task_id, task_data in hierarchy.items():
640
+ if status and task_data.get("status") != status:
641
+ continue
642
+ if parent and task_data.get("parent") != parent:
643
+ continue
644
+ tasks.append(
645
+ {
646
+ "task_id": task_id,
647
+ "title": task_data.get("title", task_id),
648
+ "status": task_data.get("status", "pending"),
649
+ "type": task_data.get("type", "task"),
650
+ "parent": task_data.get("parent"),
651
+ }
652
+ )
653
+
654
+ tasks.sort(key=lambda item: item.get("task_id", ""))
655
+ total_count = len(tasks)
656
+
657
+ if start_after_id:
658
+ try:
659
+ start_index = next(
660
+ i
661
+ for i, task in enumerate(tasks)
662
+ if task.get("task_id") == start_after_id
663
+ )
664
+ tasks = tasks[start_index + 1 :]
665
+ except StopIteration:
666
+ pass
667
+
668
+ page_tasks = tasks[: page_size + 1]
669
+ has_more = len(page_tasks) > page_size
670
+ if has_more:
671
+ page_tasks = page_tasks[:page_size]
672
+
673
+ next_cursor = None
674
+ if has_more and page_tasks:
675
+ next_cursor = encode_cursor({"last_id": page_tasks[-1].get("task_id")})
676
+
677
+ elapsed_ms = (time.perf_counter() - start) * 1000
678
+ warnings = _pagination_warnings(total_count, has_more)
679
+ response = paginated_response(
680
+ data={
681
+ "spec_id": spec_id.strip(),
682
+ "tasks": page_tasks,
683
+ "count": len(page_tasks),
684
+ },
685
+ cursor=next_cursor,
686
+ has_more=has_more,
687
+ page_size=page_size,
688
+ total_count=total_count,
689
+ warnings=warnings or None,
690
+ request_id=request_id,
691
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
692
+ )
693
+ _metrics.counter(_metric(action), labels={"status": "success"})
694
+ return response
695
+
696
+
697
+ def _handle_hierarchy(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
698
+ request_id = _request_id()
699
+ action = "hierarchy"
700
+ spec_id = payload.get("spec_id")
701
+ max_depth = payload.get("max_depth", 2)
702
+ include_metadata = payload.get("include_metadata", False)
703
+ limit = payload.get("limit")
704
+ cursor = payload.get("cursor")
705
+
706
+ if not isinstance(spec_id, str) or not spec_id.strip():
707
+ return _validation_error(
708
+ field="spec_id",
709
+ action=action,
710
+ message="Provide a non-empty spec identifier",
711
+ request_id=request_id,
712
+ )
713
+ if not isinstance(max_depth, int) or max_depth < 0 or max_depth > 10:
714
+ return _validation_error(
715
+ field="max_depth",
716
+ action=action,
717
+ message="max_depth must be between 0 and 10",
718
+ request_id=request_id,
719
+ code=ErrorCode.INVALID_FORMAT,
720
+ )
721
+ if not isinstance(include_metadata, bool):
722
+ return _validation_error(
723
+ field="include_metadata",
724
+ action=action,
725
+ message="Expected a boolean value",
726
+ request_id=request_id,
727
+ code=ErrorCode.INVALID_FORMAT,
728
+ )
729
+
730
+ page_size = normalize_page_size(
731
+ limit,
732
+ default=_TASK_DEFAULT_PAGE_SIZE,
733
+ maximum=_TASK_MAX_PAGE_SIZE,
734
+ )
735
+
736
+ start_after_id = None
737
+ if cursor:
738
+ try:
739
+ cursor_data = decode_cursor(cursor)
740
+ start_after_id = cursor_data.get("last_id")
741
+ except CursorError as exc:
742
+ return asdict(
743
+ error_response(
744
+ f"Invalid cursor: {exc.reason or exc}",
745
+ error_code=ErrorCode.INVALID_FORMAT,
746
+ error_type=ErrorType.VALIDATION,
747
+ request_id=request_id,
748
+ )
749
+ )
750
+
751
+ workspace = payload.get("workspace")
752
+ specs_dir = _resolve_specs_dir(config, workspace)
753
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
754
+ if error:
755
+ return error
756
+ assert spec_data is not None
757
+
758
+ start = time.perf_counter()
759
+ full_hierarchy = spec_data.get("hierarchy", {})
760
+ filtered = _filter_hierarchy(full_hierarchy, max_depth, include_metadata)
761
+ sorted_ids = sorted(filtered.keys())
762
+
763
+ if start_after_id:
764
+ try:
765
+ start_index = sorted_ids.index(start_after_id) + 1
766
+ except ValueError:
767
+ start_index = 0
768
+ else:
769
+ start_index = 0
770
+
771
+ page_ids = sorted_ids[start_index : start_index + page_size + 1]
772
+ has_more = len(page_ids) > page_size
773
+ if has_more:
774
+ page_ids = page_ids[:page_size]
775
+
776
+ hierarchy_page = {node_id: filtered[node_id] for node_id in page_ids}
777
+ next_cursor = None
778
+ if has_more and page_ids:
779
+ next_cursor = encode_cursor({"last_id": page_ids[-1]})
780
+
781
+ elapsed_ms = (time.perf_counter() - start) * 1000
782
+ warnings = _pagination_warnings(len(filtered), has_more)
783
+ response = paginated_response(
784
+ data={
785
+ "spec_id": spec_id.strip(),
786
+ "hierarchy": hierarchy_page,
787
+ "node_count": len(hierarchy_page),
788
+ "total_nodes": len(filtered),
789
+ "filters_applied": {
790
+ "max_depth": max_depth,
791
+ "include_metadata": include_metadata,
792
+ },
793
+ },
794
+ cursor=next_cursor,
795
+ has_more=has_more,
796
+ page_size=page_size,
797
+ total_count=len(filtered),
798
+ warnings=warnings or None,
799
+ request_id=request_id,
800
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
801
+ )
802
+ _metrics.counter(_metric(action), labels={"status": "success"})
803
+ return response
804
+
805
+
806
+ def _handle_update_status(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
807
+ request_id = _request_id()
808
+ action = "update-status"
809
+ spec_id = payload.get("spec_id")
810
+ task_id = payload.get("task_id")
811
+ status = payload.get("status")
812
+ note = payload.get("note")
813
+
814
+ if not isinstance(spec_id, str) or not spec_id.strip():
815
+ return _validation_error(
816
+ field="spec_id",
817
+ action=action,
818
+ message="Provide a non-empty spec identifier",
819
+ request_id=request_id,
820
+ )
821
+ if not isinstance(task_id, str) or not task_id.strip():
822
+ return _validation_error(
823
+ field="task_id",
824
+ action=action,
825
+ message="Provide a non-empty task identifier",
826
+ request_id=request_id,
827
+ )
828
+ if not isinstance(status, str) or status not in _ALLOWED_STATUS:
829
+ return _validation_error(
830
+ field="status",
831
+ action=action,
832
+ message=f"Status must be one of: {sorted(_ALLOWED_STATUS)}",
833
+ request_id=request_id,
834
+ code=ErrorCode.INVALID_FORMAT,
835
+ )
836
+ if note is not None and (not isinstance(note, str) or not note.strip()):
837
+ return _validation_error(
838
+ field="note",
839
+ action=action,
840
+ message="note must be a non-empty string",
841
+ request_id=request_id,
842
+ code=ErrorCode.INVALID_FORMAT,
843
+ )
844
+
845
+ workspace = payload.get("workspace")
846
+ specs_dir = _resolve_specs_dir(config, workspace)
847
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
848
+ if error:
849
+ return error
850
+ assert spec_data is not None
851
+
852
+ hierarchy = spec_data.get("hierarchy", {})
853
+ task_key = task_id.strip()
854
+ if task_key not in hierarchy:
855
+ return asdict(
856
+ error_response(
857
+ f"Task not found: {task_key}",
858
+ error_code=ErrorCode.TASK_NOT_FOUND,
859
+ error_type=ErrorType.NOT_FOUND,
860
+ remediation="Verify the task ID exists in the hierarchy",
861
+ request_id=request_id,
862
+ )
863
+ )
864
+
865
+ start = time.perf_counter()
866
+ updated = update_task_status(spec_data, task_key, status, note=None)
867
+ if not updated:
868
+ return asdict(
869
+ error_response(
870
+ f"Failed to update task status for {task_key}",
871
+ error_code=ErrorCode.CONFLICT,
872
+ error_type=ErrorType.CONFLICT,
873
+ remediation="Confirm the task exists and the status is valid",
874
+ request_id=request_id,
875
+ )
876
+ )
877
+
878
+ update_parent_status(spec_data, task_key)
879
+
880
+ if note:
881
+ add_journal_entry(
882
+ spec_data,
883
+ title=f"Status changed to {status}",
884
+ content=note,
885
+ entry_type="status_change",
886
+ task_id=task_key,
887
+ author="foundry-mcp",
888
+ )
889
+
890
+ if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
891
+ return asdict(
892
+ error_response(
893
+ "Failed to save spec",
894
+ error_code=ErrorCode.INTERNAL_ERROR,
895
+ error_type=ErrorType.INTERNAL,
896
+ remediation="Check filesystem permissions and retry",
897
+ request_id=request_id,
898
+ )
899
+ )
900
+
901
+ elapsed_ms = (time.perf_counter() - start) * 1000
902
+ response = success_response(
903
+ spec_id=spec_id.strip(),
904
+ task_id=task_key,
905
+ new_status=status,
906
+ request_id=request_id,
907
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
908
+ )
909
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
910
+ _metrics.counter(_metric(action), labels={"status": "success"})
911
+ return asdict(response)
912
+
913
+
914
+ def _handle_start(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
915
+ request_id = _request_id()
916
+ action = "start"
917
+ spec_id = payload.get("spec_id")
918
+ task_id = payload.get("task_id")
919
+ note = payload.get("note")
920
+
921
+ if not isinstance(spec_id, str) or not spec_id.strip():
922
+ return _validation_error(
923
+ field="spec_id",
924
+ action=action,
925
+ message="Provide a non-empty spec identifier",
926
+ request_id=request_id,
927
+ )
928
+ if not isinstance(task_id, str) or not task_id.strip():
929
+ return _validation_error(
930
+ field="task_id",
931
+ action=action,
932
+ message="Provide a non-empty task identifier",
933
+ request_id=request_id,
934
+ )
935
+ if note is not None and (not isinstance(note, str) or not note.strip()):
936
+ return _validation_error(
937
+ field="note",
938
+ action=action,
939
+ message="note must be a non-empty string",
940
+ request_id=request_id,
941
+ code=ErrorCode.INVALID_FORMAT,
942
+ )
943
+
944
+ workspace = payload.get("workspace")
945
+ specs_dir = _resolve_specs_dir(config, workspace)
946
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
947
+ if error:
948
+ return error
949
+ assert spec_data is not None
950
+
951
+ start = time.perf_counter()
952
+ deps = check_dependencies(spec_data, task_id.strip())
953
+ if not deps.get("can_start", False):
954
+ blockers = [
955
+ b.get("title", b.get("id", ""))
956
+ for b in (deps.get("blocked_by") or [])
957
+ if isinstance(b, dict)
958
+ ]
959
+ return asdict(
960
+ error_response(
961
+ "Task is blocked by: " + ", ".join([b for b in blockers if b]),
962
+ error_code=ErrorCode.CONFLICT,
963
+ error_type=ErrorType.CONFLICT,
964
+ remediation="Resolve blocking tasks then retry",
965
+ details={"blocked_by": deps.get("blocked_by")},
966
+ request_id=request_id,
967
+ )
968
+ )
969
+
970
+ updated = update_task_status(spec_data, task_id.strip(), "in_progress", note=None)
971
+ if not updated:
972
+ return asdict(
973
+ error_response(
974
+ f"Failed to start task: {task_id.strip()}",
975
+ error_code=ErrorCode.CONFLICT,
976
+ error_type=ErrorType.CONFLICT,
977
+ remediation="Confirm the task exists and is not blocked",
978
+ request_id=request_id,
979
+ )
980
+ )
981
+
982
+ update_parent_status(spec_data, task_id.strip())
983
+
984
+ if note:
985
+ add_journal_entry(
986
+ spec_data,
987
+ title=f"Task Started: {task_id.strip()}",
988
+ content=note,
989
+ entry_type="status_change",
990
+ task_id=task_id.strip(),
991
+ author="foundry-mcp",
992
+ )
993
+
994
+ if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
995
+ return asdict(
996
+ error_response(
997
+ "Failed to save spec",
998
+ error_code=ErrorCode.INTERNAL_ERROR,
999
+ error_type=ErrorType.INTERNAL,
1000
+ remediation="Check filesystem permissions and retry",
1001
+ request_id=request_id,
1002
+ )
1003
+ )
1004
+
1005
+ task_data = spec_data.get("hierarchy", {}).get(task_id.strip(), {})
1006
+ started_at = task_data.get("metadata", {}).get("started_at")
1007
+ elapsed_ms = (time.perf_counter() - start) * 1000
1008
+ response = success_response(
1009
+ spec_id=spec_id.strip(),
1010
+ task_id=task_id.strip(),
1011
+ started_at=started_at,
1012
+ title=task_data.get("title", ""),
1013
+ type=task_data.get("type", "task"),
1014
+ request_id=request_id,
1015
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1016
+ )
1017
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1018
+ _metrics.counter(_metric(action), labels={"status": "success"})
1019
+ return asdict(response)
1020
+
1021
+
1022
+ def _handle_complete(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1023
+ request_id = _request_id()
1024
+ action = "complete"
1025
+ spec_id = payload.get("spec_id")
1026
+ task_id = payload.get("task_id")
1027
+ completion_note = payload.get("completion_note")
1028
+
1029
+ if not isinstance(spec_id, str) or not spec_id.strip():
1030
+ return _validation_error(
1031
+ field="spec_id",
1032
+ action=action,
1033
+ message="Provide a non-empty spec identifier",
1034
+ request_id=request_id,
1035
+ )
1036
+ if not isinstance(task_id, str) or not task_id.strip():
1037
+ return _validation_error(
1038
+ field="task_id",
1039
+ action=action,
1040
+ message="Provide a non-empty task identifier",
1041
+ request_id=request_id,
1042
+ )
1043
+ if not isinstance(completion_note, str) or not completion_note.strip():
1044
+ return _validation_error(
1045
+ field="completion_note",
1046
+ action=action,
1047
+ message="Provide a non-empty completion note",
1048
+ request_id=request_id,
1049
+ )
1050
+
1051
+ workspace = payload.get("workspace")
1052
+ specs_dir = _resolve_specs_dir(config, workspace)
1053
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
1054
+ if error:
1055
+ return error
1056
+ assert spec_data is not None
1057
+
1058
+ start = time.perf_counter()
1059
+ updated = update_task_status(spec_data, task_id.strip(), "completed", note=None)
1060
+ if not updated:
1061
+ return asdict(
1062
+ error_response(
1063
+ f"Failed to complete task: {task_id.strip()}",
1064
+ error_code=ErrorCode.CONFLICT,
1065
+ error_type=ErrorType.CONFLICT,
1066
+ remediation="Confirm the task exists and is not already completed",
1067
+ request_id=request_id,
1068
+ )
1069
+ )
1070
+
1071
+ update_parent_status(spec_data, task_id.strip())
1072
+
1073
+ task_data = spec_data.get("hierarchy", {}).get(task_id.strip(), {})
1074
+ add_journal_entry(
1075
+ spec_data,
1076
+ title=f"Task Completed: {task_data.get('title', task_id.strip())}",
1077
+ content=completion_note,
1078
+ entry_type="status_change",
1079
+ task_id=task_id.strip(),
1080
+ author="foundry-mcp",
1081
+ )
1082
+
1083
+ if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
1084
+ return asdict(
1085
+ error_response(
1086
+ "Failed to save spec",
1087
+ error_code=ErrorCode.INTERNAL_ERROR,
1088
+ error_type=ErrorType.INTERNAL,
1089
+ remediation="Check filesystem permissions and retry",
1090
+ request_id=request_id,
1091
+ )
1092
+ )
1093
+
1094
+ completed_at = task_data.get("metadata", {}).get("completed_at")
1095
+ progress = get_progress_summary(spec_data)
1096
+ elapsed_ms = (time.perf_counter() - start) * 1000
1097
+ response = success_response(
1098
+ spec_id=spec_id.strip(),
1099
+ task_id=task_id.strip(),
1100
+ completed_at=completed_at,
1101
+ progress={
1102
+ "completed_tasks": progress.get("completed_tasks", 0),
1103
+ "total_tasks": progress.get("total_tasks", 0),
1104
+ "percentage": progress.get("percentage", 0),
1105
+ },
1106
+ request_id=request_id,
1107
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1108
+ )
1109
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1110
+ _metrics.counter(_metric(action), labels={"status": "success"})
1111
+ return asdict(response)
1112
+
1113
+
1114
+ def _handle_block(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1115
+ request_id = _request_id()
1116
+ action = "block"
1117
+ spec_id = payload.get("spec_id")
1118
+ task_id = payload.get("task_id")
1119
+ reason = payload.get("reason")
1120
+ blocker_type = payload.get("blocker_type", "dependency")
1121
+ ticket = payload.get("ticket")
1122
+
1123
+ valid_types = {"dependency", "technical", "resource", "decision"}
1124
+
1125
+ if not isinstance(spec_id, str) or not spec_id.strip():
1126
+ return _validation_error(
1127
+ field="spec_id",
1128
+ action=action,
1129
+ message="Provide a non-empty spec identifier",
1130
+ request_id=request_id,
1131
+ )
1132
+ if not isinstance(task_id, str) or not task_id.strip():
1133
+ return _validation_error(
1134
+ field="task_id",
1135
+ action=action,
1136
+ message="Provide a non-empty task identifier",
1137
+ request_id=request_id,
1138
+ )
1139
+ if not isinstance(reason, str) or not reason.strip():
1140
+ return _validation_error(
1141
+ field="reason",
1142
+ action=action,
1143
+ message="Provide a non-empty blocker reason",
1144
+ request_id=request_id,
1145
+ )
1146
+ if not isinstance(blocker_type, str) or blocker_type not in valid_types:
1147
+ return _validation_error(
1148
+ field="blocker_type",
1149
+ action=action,
1150
+ message=f"blocker_type must be one of: {sorted(valid_types)}",
1151
+ request_id=request_id,
1152
+ code=ErrorCode.INVALID_FORMAT,
1153
+ )
1154
+ if ticket is not None and not isinstance(ticket, str):
1155
+ return _validation_error(
1156
+ field="ticket",
1157
+ action=action,
1158
+ message="ticket must be a string",
1159
+ request_id=request_id,
1160
+ code=ErrorCode.INVALID_FORMAT,
1161
+ )
1162
+
1163
+ workspace = payload.get("workspace")
1164
+ specs_dir = _resolve_specs_dir(config, workspace)
1165
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
1166
+ if error:
1167
+ return error
1168
+ assert spec_data is not None
1169
+
1170
+ start = time.perf_counter()
1171
+ blocked = mark_blocked(
1172
+ spec_data,
1173
+ task_id.strip(),
1174
+ reason.strip(),
1175
+ blocker_type=blocker_type,
1176
+ ticket=ticket,
1177
+ )
1178
+ if not blocked:
1179
+ return asdict(
1180
+ error_response(
1181
+ f"Task not found: {task_id.strip()}",
1182
+ error_code=ErrorCode.TASK_NOT_FOUND,
1183
+ error_type=ErrorType.NOT_FOUND,
1184
+ remediation="Verify the task ID exists in the hierarchy",
1185
+ request_id=request_id,
1186
+ )
1187
+ )
1188
+
1189
+ add_journal_entry(
1190
+ spec_data,
1191
+ title=f"Task Blocked: {task_id.strip()}",
1192
+ content=f"Blocker ({blocker_type}): {reason.strip()}"
1193
+ + (f" [Ticket: {ticket}]" if ticket else ""),
1194
+ entry_type="blocker",
1195
+ task_id=task_id.strip(),
1196
+ author="foundry-mcp",
1197
+ )
1198
+
1199
+ if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
1200
+ return asdict(
1201
+ error_response(
1202
+ "Failed to save spec",
1203
+ error_code=ErrorCode.INTERNAL_ERROR,
1204
+ error_type=ErrorType.INTERNAL,
1205
+ remediation="Check filesystem permissions and retry",
1206
+ request_id=request_id,
1207
+ )
1208
+ )
1209
+
1210
+ elapsed_ms = (time.perf_counter() - start) * 1000
1211
+ response = success_response(
1212
+ spec_id=spec_id.strip(),
1213
+ task_id=task_id.strip(),
1214
+ blocker_type=blocker_type,
1215
+ reason=reason.strip(),
1216
+ ticket=ticket,
1217
+ request_id=request_id,
1218
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1219
+ )
1220
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1221
+ _metrics.counter(_metric(action), labels={"status": "success"})
1222
+ return asdict(response)
1223
+
1224
+
1225
+ def _handle_unblock(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1226
+ request_id = _request_id()
1227
+ action = "unblock"
1228
+ spec_id = payload.get("spec_id")
1229
+ task_id = payload.get("task_id")
1230
+ resolution = payload.get("resolution")
1231
+
1232
+ if not isinstance(spec_id, str) or not spec_id.strip():
1233
+ return _validation_error(
1234
+ field="spec_id",
1235
+ action=action,
1236
+ message="Provide a non-empty spec identifier",
1237
+ request_id=request_id,
1238
+ )
1239
+ if not isinstance(task_id, str) or not task_id.strip():
1240
+ return _validation_error(
1241
+ field="task_id",
1242
+ action=action,
1243
+ message="Provide a non-empty task identifier",
1244
+ request_id=request_id,
1245
+ )
1246
+ if resolution is not None and (
1247
+ not isinstance(resolution, str) or not resolution.strip()
1248
+ ):
1249
+ return _validation_error(
1250
+ field="resolution",
1251
+ action=action,
1252
+ message="resolution must be a non-empty string",
1253
+ request_id=request_id,
1254
+ code=ErrorCode.INVALID_FORMAT,
1255
+ )
1256
+
1257
+ workspace = payload.get("workspace")
1258
+ specs_dir = _resolve_specs_dir(config, workspace)
1259
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
1260
+ if error:
1261
+ return error
1262
+ assert spec_data is not None
1263
+
1264
+ start = time.perf_counter()
1265
+ blocker = get_blocker_info(spec_data, task_id.strip())
1266
+ if blocker is None:
1267
+ return asdict(
1268
+ error_response(
1269
+ f"Task {task_id.strip()} is not blocked",
1270
+ error_code=ErrorCode.CONFLICT,
1271
+ error_type=ErrorType.CONFLICT,
1272
+ remediation="Confirm the task is currently blocked before unblocking",
1273
+ request_id=request_id,
1274
+ )
1275
+ )
1276
+
1277
+ unblocked = unblock_task(spec_data, task_id.strip(), resolution)
1278
+ if not unblocked:
1279
+ return asdict(
1280
+ error_response(
1281
+ f"Failed to unblock task: {task_id.strip()}",
1282
+ error_code=ErrorCode.CONFLICT,
1283
+ error_type=ErrorType.CONFLICT,
1284
+ remediation="Confirm the task exists and is currently blocked",
1285
+ request_id=request_id,
1286
+ )
1287
+ )
1288
+
1289
+ add_journal_entry(
1290
+ spec_data,
1291
+ title=f"Task Unblocked: {task_id.strip()}",
1292
+ content=f"Resolved: {resolution.strip() if isinstance(resolution, str) else 'Blocker resolved'}",
1293
+ entry_type="note",
1294
+ task_id=task_id.strip(),
1295
+ author="foundry-mcp",
1296
+ )
1297
+
1298
+ if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
1299
+ return asdict(
1300
+ error_response(
1301
+ "Failed to save spec",
1302
+ error_code=ErrorCode.INTERNAL_ERROR,
1303
+ error_type=ErrorType.INTERNAL,
1304
+ remediation="Check filesystem permissions and retry",
1305
+ request_id=request_id,
1306
+ )
1307
+ )
1308
+
1309
+ elapsed_ms = (time.perf_counter() - start) * 1000
1310
+ response = success_response(
1311
+ spec_id=spec_id.strip(),
1312
+ task_id=task_id.strip(),
1313
+ previous_blocker={
1314
+ "type": blocker.blocker_type,
1315
+ "description": blocker.description,
1316
+ },
1317
+ resolution=(resolution.strip() if isinstance(resolution, str) else None)
1318
+ or "Blocker resolved",
1319
+ new_status="pending",
1320
+ request_id=request_id,
1321
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1322
+ )
1323
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1324
+ _metrics.counter(_metric(action), labels={"status": "success"})
1325
+ return asdict(response)
1326
+
1327
+
1328
+ def _handle_list_blocked(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1329
+ request_id = _request_id()
1330
+ action = "list-blocked"
1331
+ spec_id = payload.get("spec_id")
1332
+ cursor = payload.get("cursor")
1333
+ limit = payload.get("limit")
1334
+
1335
+ if not isinstance(spec_id, str) or not spec_id.strip():
1336
+ return _validation_error(
1337
+ field="spec_id",
1338
+ action=action,
1339
+ message="Provide a non-empty spec identifier",
1340
+ request_id=request_id,
1341
+ )
1342
+
1343
+ page_size = normalize_page_size(
1344
+ limit,
1345
+ default=_TASK_DEFAULT_PAGE_SIZE,
1346
+ maximum=_TASK_MAX_PAGE_SIZE,
1347
+ )
1348
+
1349
+ start_after_id = None
1350
+ if cursor:
1351
+ try:
1352
+ cursor_data = decode_cursor(cursor)
1353
+ start_after_id = cursor_data.get("last_id")
1354
+ except CursorError as exc:
1355
+ return asdict(
1356
+ error_response(
1357
+ f"Invalid cursor: {exc.reason or exc}",
1358
+ error_code=ErrorCode.INVALID_FORMAT,
1359
+ error_type=ErrorType.VALIDATION,
1360
+ request_id=request_id,
1361
+ )
1362
+ )
1363
+
1364
+ workspace = payload.get("workspace")
1365
+ specs_dir = _resolve_specs_dir(config, workspace)
1366
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
1367
+ if error:
1368
+ return error
1369
+ assert spec_data is not None
1370
+
1371
+ start = time.perf_counter()
1372
+ blocked_tasks = list_blocked_tasks(spec_data)
1373
+ blocked_tasks.sort(key=lambda entry: entry.get("task_id", ""))
1374
+ total_count = len(blocked_tasks)
1375
+
1376
+ if start_after_id:
1377
+ try:
1378
+ start_index = next(
1379
+ i
1380
+ for i, entry in enumerate(blocked_tasks)
1381
+ if entry.get("task_id") == start_after_id
1382
+ )
1383
+ blocked_tasks = blocked_tasks[start_index + 1 :]
1384
+ except StopIteration:
1385
+ pass
1386
+
1387
+ page_tasks = blocked_tasks[: page_size + 1]
1388
+ has_more = len(page_tasks) > page_size
1389
+ if has_more:
1390
+ page_tasks = page_tasks[:page_size]
1391
+
1392
+ next_cursor = None
1393
+ if has_more and page_tasks:
1394
+ next_cursor = encode_cursor({"last_id": page_tasks[-1].get("task_id")})
1395
+
1396
+ elapsed_ms = (time.perf_counter() - start) * 1000
1397
+ warnings = _pagination_warnings(total_count, has_more)
1398
+ response = paginated_response(
1399
+ data={
1400
+ "spec_id": spec_id.strip(),
1401
+ "count": len(page_tasks),
1402
+ "blocked_tasks": page_tasks,
1403
+ },
1404
+ cursor=next_cursor,
1405
+ has_more=has_more,
1406
+ page_size=page_size,
1407
+ total_count=total_count,
1408
+ warnings=warnings or None,
1409
+ request_id=request_id,
1410
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1411
+ )
1412
+ _metrics.counter(_metric(action), labels={"status": "success"})
1413
+ return response
1414
+
1415
+
1416
+ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1417
+ request_id = _request_id()
1418
+ action = "add"
1419
+ spec_id = payload.get("spec_id")
1420
+ parent = payload.get("parent")
1421
+ title = payload.get("title")
1422
+ description = payload.get("description")
1423
+ task_type = payload.get("task_type", "task")
1424
+ estimated_hours = payload.get("estimated_hours")
1425
+ position = payload.get("position")
1426
+
1427
+ if not isinstance(spec_id, str) or not spec_id.strip():
1428
+ return _validation_error(
1429
+ field="spec_id",
1430
+ action=action,
1431
+ message="Provide a non-empty spec identifier",
1432
+ request_id=request_id,
1433
+ )
1434
+ if not isinstance(parent, str) or not parent.strip():
1435
+ return _validation_error(
1436
+ field="parent",
1437
+ action=action,
1438
+ message="Provide a non-empty parent node identifier",
1439
+ request_id=request_id,
1440
+ )
1441
+ if not isinstance(title, str) or not title.strip():
1442
+ return _validation_error(
1443
+ field="title",
1444
+ action=action,
1445
+ message="Provide a non-empty task title",
1446
+ request_id=request_id,
1447
+ )
1448
+ if description is not None and not isinstance(description, str):
1449
+ return _validation_error(
1450
+ field="description",
1451
+ action=action,
1452
+ message="description must be a string",
1453
+ request_id=request_id,
1454
+ code=ErrorCode.INVALID_FORMAT,
1455
+ )
1456
+ if not isinstance(task_type, str):
1457
+ return _validation_error(
1458
+ field="task_type",
1459
+ action=action,
1460
+ message="task_type must be a string",
1461
+ request_id=request_id,
1462
+ code=ErrorCode.INVALID_FORMAT,
1463
+ )
1464
+ if estimated_hours is not None and not isinstance(estimated_hours, (int, float)):
1465
+ return _validation_error(
1466
+ field="estimated_hours",
1467
+ action=action,
1468
+ message="estimated_hours must be a number",
1469
+ request_id=request_id,
1470
+ code=ErrorCode.INVALID_FORMAT,
1471
+ )
1472
+ if position is not None and (not isinstance(position, int) or position < 0):
1473
+ return _validation_error(
1474
+ field="position",
1475
+ action=action,
1476
+ message="position must be a non-negative integer",
1477
+ request_id=request_id,
1478
+ code=ErrorCode.INVALID_FORMAT,
1479
+ )
1480
+
1481
+ dry_run = payload.get("dry_run", False)
1482
+ if dry_run is not None and not isinstance(dry_run, bool):
1483
+ return _validation_error(
1484
+ field="dry_run",
1485
+ action=action,
1486
+ message="dry_run must be a boolean",
1487
+ request_id=request_id,
1488
+ code=ErrorCode.INVALID_FORMAT,
1489
+ )
1490
+ dry_run_bool = bool(dry_run)
1491
+
1492
+ workspace = payload.get("workspace")
1493
+ specs_dir = _resolve_specs_dir(config, workspace)
1494
+ if specs_dir is None:
1495
+ return _specs_dir_missing_error(request_id)
1496
+
1497
+ start = time.perf_counter()
1498
+ if dry_run_bool:
1499
+ spec_data, spec_error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
1500
+ if spec_error:
1501
+ return spec_error
1502
+
1503
+ hierarchy = (spec_data or {}).get("hierarchy", {})
1504
+ parent_node = (
1505
+ hierarchy.get(parent.strip()) if isinstance(hierarchy, dict) else None
1506
+ )
1507
+ if not isinstance(parent_node, dict):
1508
+ elapsed_ms = (time.perf_counter() - start) * 1000
1509
+ return asdict(
1510
+ error_response(
1511
+ f"Parent node '{parent.strip()}' not found",
1512
+ error_code=ErrorCode.NOT_FOUND,
1513
+ error_type=ErrorType.NOT_FOUND,
1514
+ remediation="Verify the parent node ID exists in the specification",
1515
+ request_id=request_id,
1516
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1517
+ )
1518
+ )
1519
+
1520
+ elapsed_ms = (time.perf_counter() - start) * 1000
1521
+ response = success_response(
1522
+ data={
1523
+ "spec_id": spec_id.strip(),
1524
+ "parent": parent.strip(),
1525
+ "title": title.strip(),
1526
+ "task_type": task_type,
1527
+ "position": position,
1528
+ "dry_run": True,
1529
+ },
1530
+ request_id=request_id,
1531
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1532
+ )
1533
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1534
+ _metrics.counter(
1535
+ _metric(action), labels={"status": "success", "dry_run": "true"}
1536
+ )
1537
+ return asdict(response)
1538
+
1539
+ result, error = add_task(
1540
+ spec_id=spec_id.strip(),
1541
+ parent_id=parent.strip(),
1542
+ title=title.strip(),
1543
+ description=description,
1544
+ task_type=task_type,
1545
+ estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
1546
+ position=position,
1547
+ specs_dir=specs_dir,
1548
+ )
1549
+ elapsed_ms = (time.perf_counter() - start) * 1000
1550
+
1551
+ if error or result is None:
1552
+ code = (
1553
+ ErrorCode.NOT_FOUND
1554
+ if "not found" in (error or "").lower()
1555
+ else ErrorCode.VALIDATION_ERROR
1556
+ )
1557
+ err_type = (
1558
+ ErrorType.NOT_FOUND if code == ErrorCode.NOT_FOUND else ErrorType.VALIDATION
1559
+ )
1560
+ return asdict(
1561
+ error_response(
1562
+ error or "Failed to add task",
1563
+ error_code=code,
1564
+ error_type=err_type,
1565
+ remediation="Verify parent/task inputs and retry",
1566
+ request_id=request_id,
1567
+ )
1568
+ )
1569
+
1570
+ response = success_response(
1571
+ **result,
1572
+ spec_id=spec_id.strip(),
1573
+ request_id=request_id,
1574
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1575
+ )
1576
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1577
+ _metrics.counter(_metric(action), labels={"status": "success"})
1578
+ return asdict(response)
1579
+
1580
+
1581
+ def _handle_remove(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1582
+ request_id = _request_id()
1583
+ action = "remove"
1584
+ spec_id = payload.get("spec_id")
1585
+ task_id = payload.get("task_id")
1586
+ cascade = payload.get("cascade", False)
1587
+
1588
+ if not isinstance(spec_id, str) or not spec_id.strip():
1589
+ return _validation_error(
1590
+ field="spec_id",
1591
+ action=action,
1592
+ message="Provide a non-empty spec identifier",
1593
+ request_id=request_id,
1594
+ )
1595
+ if not isinstance(task_id, str) or not task_id.strip():
1596
+ return _validation_error(
1597
+ field="task_id",
1598
+ action=action,
1599
+ message="Provide a non-empty task identifier",
1600
+ request_id=request_id,
1601
+ )
1602
+ if not isinstance(cascade, bool):
1603
+ return _validation_error(
1604
+ field="cascade",
1605
+ action=action,
1606
+ message="cascade must be a boolean",
1607
+ request_id=request_id,
1608
+ code=ErrorCode.INVALID_FORMAT,
1609
+ )
1610
+
1611
+ dry_run = payload.get("dry_run", False)
1612
+ if dry_run is not None and not isinstance(dry_run, bool):
1613
+ return _validation_error(
1614
+ field="dry_run",
1615
+ action=action,
1616
+ message="dry_run must be a boolean",
1617
+ request_id=request_id,
1618
+ code=ErrorCode.INVALID_FORMAT,
1619
+ )
1620
+ dry_run_bool = bool(dry_run)
1621
+
1622
+ workspace = payload.get("workspace")
1623
+ specs_dir = _resolve_specs_dir(config, workspace)
1624
+ if specs_dir is None:
1625
+ return _specs_dir_missing_error(request_id)
1626
+
1627
+ start = time.perf_counter()
1628
+ if dry_run_bool:
1629
+ spec_data, spec_error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
1630
+ if spec_error:
1631
+ return spec_error
1632
+
1633
+ hierarchy = (spec_data or {}).get("hierarchy", {})
1634
+ node = hierarchy.get(task_id.strip()) if isinstance(hierarchy, dict) else None
1635
+ if not isinstance(node, dict):
1636
+ elapsed_ms = (time.perf_counter() - start) * 1000
1637
+ return asdict(
1638
+ error_response(
1639
+ f"Task '{task_id.strip()}' not found",
1640
+ error_code=ErrorCode.TASK_NOT_FOUND,
1641
+ error_type=ErrorType.NOT_FOUND,
1642
+ remediation="Verify the task ID exists in the specification",
1643
+ request_id=request_id,
1644
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1645
+ )
1646
+ )
1647
+
1648
+ elapsed_ms = (time.perf_counter() - start) * 1000
1649
+ response = success_response(
1650
+ data={
1651
+ "spec_id": spec_id.strip(),
1652
+ "task_id": task_id.strip(),
1653
+ "cascade": cascade,
1654
+ "dry_run": True,
1655
+ },
1656
+ request_id=request_id,
1657
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1658
+ )
1659
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1660
+ _metrics.counter(
1661
+ _metric(action), labels={"status": "success", "dry_run": "true"}
1662
+ )
1663
+ return asdict(response)
1664
+
1665
+ result, error = remove_task(
1666
+ spec_id=spec_id.strip(),
1667
+ task_id=task_id.strip(),
1668
+ cascade=cascade,
1669
+ specs_dir=specs_dir,
1670
+ )
1671
+ elapsed_ms = (time.perf_counter() - start) * 1000
1672
+
1673
+ if error or result is None:
1674
+ code = (
1675
+ ErrorCode.NOT_FOUND
1676
+ if "not found" in (error or "").lower()
1677
+ else ErrorCode.VALIDATION_ERROR
1678
+ )
1679
+ err_type = (
1680
+ ErrorType.NOT_FOUND if code == ErrorCode.NOT_FOUND else ErrorType.VALIDATION
1681
+ )
1682
+ return asdict(
1683
+ error_response(
1684
+ error or "Failed to remove task",
1685
+ error_code=code,
1686
+ error_type=err_type,
1687
+ remediation="Verify the task ID and cascade flag",
1688
+ request_id=request_id,
1689
+ )
1690
+ )
1691
+
1692
+ response = success_response(
1693
+ **result,
1694
+ request_id=request_id,
1695
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1696
+ )
1697
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1698
+ _metrics.counter(_metric(action), labels={"status": "success"})
1699
+ return asdict(response)
1700
+
1701
+
1702
+ def _handle_update_estimate(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1703
+ request_id = _request_id()
1704
+ action = "update-estimate"
1705
+ spec_id = payload.get("spec_id")
1706
+ task_id = payload.get("task_id")
1707
+ estimated_hours = payload.get("estimated_hours")
1708
+ complexity = payload.get("complexity")
1709
+
1710
+ if not isinstance(spec_id, str) or not spec_id.strip():
1711
+ return _validation_error(
1712
+ field="spec_id",
1713
+ action=action,
1714
+ message="Provide a non-empty spec identifier",
1715
+ request_id=request_id,
1716
+ )
1717
+ if not isinstance(task_id, str) or not task_id.strip():
1718
+ return _validation_error(
1719
+ field="task_id",
1720
+ action=action,
1721
+ message="Provide a non-empty task identifier",
1722
+ request_id=request_id,
1723
+ )
1724
+ if estimated_hours is not None and not isinstance(estimated_hours, (int, float)):
1725
+ return _validation_error(
1726
+ field="estimated_hours",
1727
+ action=action,
1728
+ message="estimated_hours must be a number",
1729
+ request_id=request_id,
1730
+ code=ErrorCode.INVALID_FORMAT,
1731
+ )
1732
+ if complexity is not None and not isinstance(complexity, str):
1733
+ return _validation_error(
1734
+ field="complexity",
1735
+ action=action,
1736
+ message="complexity must be a string",
1737
+ request_id=request_id,
1738
+ code=ErrorCode.INVALID_FORMAT,
1739
+ )
1740
+
1741
+ dry_run = payload.get("dry_run", False)
1742
+ if dry_run is not None and not isinstance(dry_run, bool):
1743
+ return _validation_error(
1744
+ field="dry_run",
1745
+ action=action,
1746
+ message="dry_run must be a boolean",
1747
+ request_id=request_id,
1748
+ code=ErrorCode.INVALID_FORMAT,
1749
+ )
1750
+ dry_run_bool = bool(dry_run)
1751
+
1752
+ normalized_complexity: Optional[str] = None
1753
+ if isinstance(complexity, str):
1754
+ normalized_complexity = complexity.strip().lower() or None
1755
+
1756
+ if estimated_hours is None and normalized_complexity is None:
1757
+ return _validation_error(
1758
+ field="estimated_hours",
1759
+ action=action,
1760
+ message="Provide estimated_hours and/or complexity",
1761
+ request_id=request_id,
1762
+ code=ErrorCode.MISSING_REQUIRED,
1763
+ remediation="Provide hours and/or complexity to update",
1764
+ )
1765
+
1766
+ workspace = payload.get("workspace")
1767
+ specs_dir = _resolve_specs_dir(config, workspace)
1768
+ if specs_dir is None:
1769
+ return _specs_dir_missing_error(request_id)
1770
+
1771
+ start = time.perf_counter()
1772
+ if dry_run_bool:
1773
+ spec_data, spec_error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
1774
+ if spec_error:
1775
+ return spec_error
1776
+
1777
+ hierarchy = (spec_data or {}).get("hierarchy", {})
1778
+ task = hierarchy.get(task_id.strip()) if isinstance(hierarchy, dict) else None
1779
+ if not isinstance(task, dict):
1780
+ return asdict(
1781
+ error_response(
1782
+ f"Task '{task_id.strip()}' not found",
1783
+ error_code=ErrorCode.TASK_NOT_FOUND,
1784
+ error_type=ErrorType.NOT_FOUND,
1785
+ remediation="Verify the task ID exists in the specification",
1786
+ request_id=request_id,
1787
+ )
1788
+ )
1789
+
1790
+ metadata_candidate = task.get("metadata")
1791
+ if isinstance(metadata_candidate, dict):
1792
+ metadata: Dict[str, Any] = metadata_candidate
1793
+ else:
1794
+ metadata = {}
1795
+ data: Dict[str, Any] = {
1796
+ "spec_id": spec_id.strip(),
1797
+ "task_id": task_id.strip(),
1798
+ "dry_run": True,
1799
+ "previous_hours": metadata.get("estimated_hours"),
1800
+ "previous_complexity": metadata.get("complexity"),
1801
+ }
1802
+ if estimated_hours is not None:
1803
+ data["hours"] = float(estimated_hours)
1804
+ if normalized_complexity is not None:
1805
+ data["complexity"] = normalized_complexity
1806
+
1807
+ elapsed_ms = (time.perf_counter() - start) * 1000
1808
+ response = success_response(
1809
+ data=data,
1810
+ request_id=request_id,
1811
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1812
+ )
1813
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1814
+ _metrics.counter(
1815
+ _metric(action), labels={"status": "success", "dry_run": "true"}
1816
+ )
1817
+ return asdict(response)
1818
+
1819
+ result, error = update_estimate(
1820
+ spec_id=spec_id.strip(),
1821
+ task_id=task_id.strip(),
1822
+ estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
1823
+ complexity=normalized_complexity,
1824
+ specs_dir=specs_dir,
1825
+ )
1826
+ elapsed_ms = (time.perf_counter() - start) * 1000
1827
+
1828
+ if error or result is None:
1829
+ code = (
1830
+ ErrorCode.NOT_FOUND
1831
+ if "not found" in (error or "").lower()
1832
+ else ErrorCode.VALIDATION_ERROR
1833
+ )
1834
+ err_type = (
1835
+ ErrorType.NOT_FOUND if code == ErrorCode.NOT_FOUND else ErrorType.VALIDATION
1836
+ )
1837
+ return asdict(
1838
+ error_response(
1839
+ error or "Failed to update estimate",
1840
+ error_code=code,
1841
+ error_type=err_type,
1842
+ remediation="Provide estimated_hours and/or a valid complexity",
1843
+ request_id=request_id,
1844
+ )
1845
+ )
1846
+
1847
+ response = success_response(
1848
+ **result,
1849
+ request_id=request_id,
1850
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1851
+ )
1852
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1853
+ _metrics.counter(_metric(action), labels={"status": "success"})
1854
+ return asdict(response)
1855
+
1856
+
1857
+ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1858
+ request_id = _request_id()
1859
+ action = "update-metadata"
1860
+ spec_id = payload.get("spec_id")
1861
+ task_id = payload.get("task_id")
1862
+
1863
+ if not isinstance(spec_id, str) or not spec_id.strip():
1864
+ return _validation_error(
1865
+ field="spec_id",
1866
+ action=action,
1867
+ message="Provide a non-empty spec identifier",
1868
+ request_id=request_id,
1869
+ )
1870
+ if not isinstance(task_id, str) or not task_id.strip():
1871
+ return _validation_error(
1872
+ field="task_id",
1873
+ action=action,
1874
+ message="Provide a non-empty task identifier",
1875
+ request_id=request_id,
1876
+ )
1877
+
1878
+ dry_run = payload.get("dry_run", False)
1879
+ if dry_run is not None and not isinstance(dry_run, bool):
1880
+ return _validation_error(
1881
+ field="dry_run",
1882
+ action=action,
1883
+ message="dry_run must be a boolean",
1884
+ request_id=request_id,
1885
+ code=ErrorCode.INVALID_FORMAT,
1886
+ )
1887
+ dry_run_bool = bool(dry_run)
1888
+
1889
+ custom_metadata = payload.get("custom_metadata")
1890
+ if custom_metadata is not None and not isinstance(custom_metadata, dict):
1891
+ return _validation_error(
1892
+ field="custom_metadata",
1893
+ action=action,
1894
+ message="custom_metadata must be an object",
1895
+ request_id=request_id,
1896
+ code=ErrorCode.INVALID_FORMAT,
1897
+ remediation="Provide custom_metadata as a JSON object",
1898
+ )
1899
+
1900
+ update_fields = [
1901
+ payload.get("file_path"),
1902
+ payload.get("description"),
1903
+ payload.get("task_category"),
1904
+ payload.get("actual_hours"),
1905
+ payload.get("status_note"),
1906
+ payload.get("verification_type"),
1907
+ payload.get("command"),
1908
+ ]
1909
+ has_update = any(field is not None for field in update_fields) or bool(
1910
+ custom_metadata
1911
+ )
1912
+ if not has_update:
1913
+ return _validation_error(
1914
+ field="file_path",
1915
+ action=action,
1916
+ message="Provide at least one metadata field",
1917
+ request_id=request_id,
1918
+ code=ErrorCode.MISSING_REQUIRED,
1919
+ remediation="Provide file_path, description, task_category, actual_hours, status_note, verification_type, command, and/or custom_metadata",
1920
+ )
1921
+
1922
+ workspace = payload.get("workspace")
1923
+ specs_dir = _resolve_specs_dir(config, workspace)
1924
+ if specs_dir is None:
1925
+ return _specs_dir_missing_error(request_id)
1926
+
1927
+ start = time.perf_counter()
1928
+ if dry_run_bool:
1929
+ spec_data, spec_error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
1930
+ if spec_error:
1931
+ return spec_error
1932
+
1933
+ hierarchy = (spec_data or {}).get("hierarchy", {})
1934
+ task = hierarchy.get(task_id.strip()) if isinstance(hierarchy, dict) else None
1935
+ if not isinstance(task, dict):
1936
+ return asdict(
1937
+ error_response(
1938
+ f"Task '{task_id.strip()}' not found",
1939
+ error_code=ErrorCode.TASK_NOT_FOUND,
1940
+ error_type=ErrorType.NOT_FOUND,
1941
+ remediation="Verify the task ID exists in the specification",
1942
+ request_id=request_id,
1943
+ )
1944
+ )
1945
+
1946
+ fields_updated: List[str] = []
1947
+ if payload.get("file_path") is not None:
1948
+ fields_updated.append("file_path")
1949
+ if payload.get("description") is not None:
1950
+ fields_updated.append("description")
1951
+ if payload.get("task_category") is not None:
1952
+ fields_updated.append("task_category")
1953
+ if payload.get("actual_hours") is not None:
1954
+ fields_updated.append("actual_hours")
1955
+ if payload.get("status_note") is not None:
1956
+ fields_updated.append("status_note")
1957
+ if payload.get("verification_type") is not None:
1958
+ fields_updated.append("verification_type")
1959
+ if payload.get("command") is not None:
1960
+ fields_updated.append("command")
1961
+ if custom_metadata:
1962
+ fields_updated.extend(sorted(custom_metadata.keys()))
1963
+
1964
+ elapsed_ms = (time.perf_counter() - start) * 1000
1965
+ response = success_response(
1966
+ data={
1967
+ "spec_id": spec_id.strip(),
1968
+ "task_id": task_id.strip(),
1969
+ "fields_updated": fields_updated,
1970
+ "dry_run": True,
1971
+ },
1972
+ request_id=request_id,
1973
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1974
+ )
1975
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
1976
+ _metrics.counter(
1977
+ _metric(action), labels={"status": "success", "dry_run": "true"}
1978
+ )
1979
+ return asdict(response)
1980
+
1981
+ result, error = update_task_metadata(
1982
+ spec_id=spec_id.strip(),
1983
+ task_id=task_id.strip(),
1984
+ file_path=payload.get("file_path"),
1985
+ description=payload.get("description"),
1986
+ task_category=payload.get("task_category"),
1987
+ actual_hours=payload.get("actual_hours"),
1988
+ status_note=payload.get("status_note"),
1989
+ verification_type=payload.get("verification_type"),
1990
+ command=payload.get("command"),
1991
+ custom_metadata=custom_metadata,
1992
+ specs_dir=specs_dir,
1993
+ )
1994
+ elapsed_ms = (time.perf_counter() - start) * 1000
1995
+
1996
+ if error or result is None:
1997
+ code = (
1998
+ ErrorCode.NOT_FOUND
1999
+ if "not found" in (error or "").lower()
2000
+ else ErrorCode.VALIDATION_ERROR
2001
+ )
2002
+ err_type = (
2003
+ ErrorType.NOT_FOUND if code == ErrorCode.NOT_FOUND else ErrorType.VALIDATION
2004
+ )
2005
+ return asdict(
2006
+ error_response(
2007
+ error or "Failed to update metadata",
2008
+ error_code=code,
2009
+ error_type=err_type,
2010
+ remediation="Provide at least one metadata field to update",
2011
+ request_id=request_id,
2012
+ )
2013
+ )
2014
+
2015
+ response = success_response(
2016
+ **result,
2017
+ request_id=request_id,
2018
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2019
+ )
2020
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2021
+ _metrics.counter(_metric(action), labels={"status": "success"})
2022
+ return asdict(response)
2023
+
2024
+
2025
+ _ACTION_DEFINITIONS = [
2026
+ ActionDefinition(
2027
+ name="prepare",
2028
+ handler=_handle_prepare,
2029
+ summary="Prepare next actionable task context",
2030
+ ),
2031
+ ActionDefinition(
2032
+ name="next", handler=_handle_next, summary="Return the next actionable task"
2033
+ ),
2034
+ ActionDefinition(
2035
+ name="info", handler=_handle_info, summary="Fetch task metadata by ID"
2036
+ ),
2037
+ ActionDefinition(
2038
+ name="check-deps",
2039
+ handler=_handle_check_deps,
2040
+ summary="Analyze task dependencies and blockers",
2041
+ ),
2042
+ ActionDefinition(name="start", handler=_handle_start, summary="Start a task"),
2043
+ ActionDefinition(
2044
+ name="complete", handler=_handle_complete, summary="Complete a task"
2045
+ ),
2046
+ ActionDefinition(
2047
+ name="update-status",
2048
+ handler=_handle_update_status,
2049
+ summary="Update task status",
2050
+ ),
2051
+ ActionDefinition(name="block", handler=_handle_block, summary="Block a task"),
2052
+ ActionDefinition(name="unblock", handler=_handle_unblock, summary="Unblock a task"),
2053
+ ActionDefinition(
2054
+ name="list-blocked",
2055
+ handler=_handle_list_blocked,
2056
+ summary="List blocked tasks",
2057
+ ),
2058
+ ActionDefinition(name="add", handler=_handle_add, summary="Add a task"),
2059
+ ActionDefinition(name="remove", handler=_handle_remove, summary="Remove a task"),
2060
+ ActionDefinition(
2061
+ name="update-estimate",
2062
+ handler=_handle_update_estimate,
2063
+ summary="Update estimated effort",
2064
+ ),
2065
+ ActionDefinition(
2066
+ name="update-metadata",
2067
+ handler=_handle_update_metadata,
2068
+ summary="Update task metadata fields",
2069
+ ),
2070
+ ActionDefinition(
2071
+ name="progress",
2072
+ handler=_handle_progress,
2073
+ summary="Summarize completion metrics for a node",
2074
+ ),
2075
+ ActionDefinition(
2076
+ name="list",
2077
+ handler=_handle_list,
2078
+ summary="List tasks with pagination and optional filters",
2079
+ ),
2080
+ ActionDefinition(
2081
+ name="query",
2082
+ handler=_handle_query,
2083
+ summary="Query tasks by status or parent",
2084
+ ),
2085
+ ActionDefinition(
2086
+ name="hierarchy",
2087
+ handler=_handle_hierarchy,
2088
+ summary="Return paginated hierarchy slices",
2089
+ ),
2090
+ ]
2091
+
2092
+ _TASK_ROUTER = ActionRouter(tool_name="task", actions=_ACTION_DEFINITIONS)
2093
+
2094
+
2095
+ def _dispatch_task_action(
2096
+ *, action: str, payload: Dict[str, Any], config: ServerConfig
2097
+ ) -> dict:
2098
+ try:
2099
+ return _TASK_ROUTER.dispatch(action=action, config=config, payload=payload)
2100
+ except ActionRouterError as exc:
2101
+ request_id = _request_id()
2102
+ allowed = ", ".join(exc.allowed_actions)
2103
+ return asdict(
2104
+ error_response(
2105
+ f"Unsupported task action '{action}'. Allowed actions: {allowed}",
2106
+ error_code=ErrorCode.VALIDATION_ERROR,
2107
+ error_type=ErrorType.VALIDATION,
2108
+ remediation=f"Use one of: {allowed}",
2109
+ request_id=request_id,
2110
+ )
2111
+ )
2112
+
2113
+
2114
+ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2115
+ """Register the consolidated task tool."""
2116
+
2117
+ @canonical_tool(
2118
+ mcp,
2119
+ canonical_name="task",
2120
+ )
2121
+ @mcp_tool(tool_name="task", emit_metrics=True, audit=True)
2122
+ def task(
2123
+ action: str,
2124
+ spec_id: Optional[str] = None,
2125
+ task_id: Optional[str] = None,
2126
+ workspace: Optional[str] = None,
2127
+ status_filter: Optional[str] = None,
2128
+ include_completed: bool = True,
2129
+ node_id: str = "spec-root",
2130
+ include_phases: bool = True,
2131
+ cursor: Optional[str] = None,
2132
+ limit: Optional[int] = None,
2133
+ parent: Optional[str] = None,
2134
+ status: Optional[str] = None,
2135
+ note: Optional[str] = None,
2136
+ completion_note: Optional[str] = None,
2137
+ reason: Optional[str] = None,
2138
+ blocker_type: str = "dependency",
2139
+ ticket: Optional[str] = None,
2140
+ resolution: Optional[str] = None,
2141
+ title: Optional[str] = None,
2142
+ description: Optional[str] = None,
2143
+ task_type: str = "task",
2144
+ estimated_hours: Optional[float] = None,
2145
+ position: Optional[int] = None,
2146
+ cascade: bool = False,
2147
+ complexity: Optional[str] = None,
2148
+ file_path: Optional[str] = None,
2149
+ task_category: Optional[str] = None,
2150
+ actual_hours: Optional[float] = None,
2151
+ status_note: Optional[str] = None,
2152
+ verification_type: Optional[str] = None,
2153
+ command: Optional[str] = None,
2154
+ custom_metadata: Optional[Dict[str, Any]] = None,
2155
+ dry_run: bool = False,
2156
+ max_depth: int = 2,
2157
+ include_metadata: bool = False,
2158
+ ) -> dict:
2159
+ payload = {
2160
+ "spec_id": spec_id,
2161
+ "task_id": task_id,
2162
+ "workspace": workspace,
2163
+ "status_filter": status_filter,
2164
+ "include_completed": include_completed,
2165
+ "node_id": node_id,
2166
+ "include_phases": include_phases,
2167
+ "cursor": cursor,
2168
+ "limit": limit,
2169
+ "parent": parent,
2170
+ "status": status,
2171
+ "note": note,
2172
+ "completion_note": completion_note,
2173
+ "reason": reason,
2174
+ "blocker_type": blocker_type,
2175
+ "ticket": ticket,
2176
+ "resolution": resolution,
2177
+ "title": title,
2178
+ "description": description,
2179
+ "task_type": task_type,
2180
+ "estimated_hours": estimated_hours,
2181
+ "position": position,
2182
+ "cascade": cascade,
2183
+ "complexity": complexity,
2184
+ "file_path": file_path,
2185
+ "task_category": task_category,
2186
+ "actual_hours": actual_hours,
2187
+ "status_note": status_note,
2188
+ "verification_type": verification_type,
2189
+ "command": command,
2190
+ "custom_metadata": custom_metadata,
2191
+ "dry_run": dry_run,
2192
+ "max_depth": max_depth,
2193
+ "include_metadata": include_metadata,
2194
+ }
2195
+ return _dispatch_task_action(action=action, payload=payload, config=config)
2196
+
2197
+ logger.debug("Registered unified task tool")
2198
+
2199
+
2200
+ __all__ = [
2201
+ "register_unified_task_tool",
2202
+ ]