foundry-mcp 0.3.3__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/commands/plan.py +10 -3
  3. foundry_mcp/cli/commands/review.py +19 -4
  4. foundry_mcp/cli/commands/specs.py +38 -208
  5. foundry_mcp/cli/output.py +3 -3
  6. foundry_mcp/config.py +235 -5
  7. foundry_mcp/core/ai_consultation.py +146 -9
  8. foundry_mcp/core/discovery.py +6 -6
  9. foundry_mcp/core/error_store.py +2 -2
  10. foundry_mcp/core/intake.py +933 -0
  11. foundry_mcp/core/llm_config.py +20 -2
  12. foundry_mcp/core/metrics_store.py +2 -2
  13. foundry_mcp/core/progress.py +70 -0
  14. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  15. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  16. foundry_mcp/core/prompts/plan_review.py +5 -1
  17. foundry_mcp/core/providers/claude.py +6 -47
  18. foundry_mcp/core/providers/codex.py +6 -57
  19. foundry_mcp/core/providers/cursor_agent.py +3 -44
  20. foundry_mcp/core/providers/gemini.py +6 -57
  21. foundry_mcp/core/providers/opencode.py +35 -5
  22. foundry_mcp/core/research/__init__.py +68 -0
  23. foundry_mcp/core/research/memory.py +425 -0
  24. foundry_mcp/core/research/models.py +437 -0
  25. foundry_mcp/core/research/workflows/__init__.py +22 -0
  26. foundry_mcp/core/research/workflows/base.py +204 -0
  27. foundry_mcp/core/research/workflows/chat.py +271 -0
  28. foundry_mcp/core/research/workflows/consensus.py +396 -0
  29. foundry_mcp/core/research/workflows/ideate.py +682 -0
  30. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  31. foundry_mcp/core/responses.py +450 -0
  32. foundry_mcp/core/spec.py +2438 -236
  33. foundry_mcp/core/task.py +1064 -19
  34. foundry_mcp/core/testing.py +512 -123
  35. foundry_mcp/core/validation.py +313 -42
  36. foundry_mcp/dashboard/components/charts.py +0 -57
  37. foundry_mcp/dashboard/launcher.py +11 -0
  38. foundry_mcp/dashboard/views/metrics.py +25 -35
  39. foundry_mcp/dashboard/views/overview.py +1 -65
  40. foundry_mcp/resources/specs.py +25 -25
  41. foundry_mcp/schemas/intake-schema.json +89 -0
  42. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  43. foundry_mcp/server.py +38 -0
  44. foundry_mcp/tools/unified/__init__.py +4 -2
  45. foundry_mcp/tools/unified/authoring.py +2423 -267
  46. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  47. foundry_mcp/tools/unified/environment.py +235 -6
  48. foundry_mcp/tools/unified/error.py +18 -1
  49. foundry_mcp/tools/unified/lifecycle.py +8 -0
  50. foundry_mcp/tools/unified/plan.py +113 -1
  51. foundry_mcp/tools/unified/research.py +658 -0
  52. foundry_mcp/tools/unified/review.py +370 -16
  53. foundry_mcp/tools/unified/spec.py +367 -0
  54. foundry_mcp/tools/unified/task.py +1163 -48
  55. foundry_mcp/tools/unified/test.py +69 -8
  56. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/METADATA +7 -1
  57. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/RECORD +60 -48
  58. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/WHEEL +0 -0
  59. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
  60. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import logging
6
+ import re
6
7
  import time
7
8
  from dataclasses import asdict
8
9
  from pathlib import Path
@@ -24,6 +25,7 @@ from foundry_mcp.core.pagination import (
24
25
  from foundry_mcp.core.progress import (
25
26
  get_progress_summary,
26
27
  list_phases,
28
+ sync_computed_fields,
27
29
  update_parent_status,
28
30
  )
29
31
  from foundry_mcp.core.responses import (
@@ -43,12 +45,21 @@ from foundry_mcp.core.journal import (
43
45
  )
44
46
  from foundry_mcp.core.task import (
45
47
  add_task,
48
+ batch_update_tasks,
46
49
  check_dependencies,
47
50
  get_next_task,
51
+ manage_task_dependency,
52
+ move_task,
48
53
  prepare_task as core_prepare_task,
49
54
  remove_task,
55
+ REQUIREMENT_TYPES,
50
56
  update_estimate,
51
57
  update_task_metadata,
58
+ update_task_requirements,
59
+ )
60
+ from foundry_mcp.core.validation import (
61
+ VALID_VERIFICATION_TYPES,
62
+ VERIFICATION_TYPE_MAPPING,
52
63
  )
53
64
  from foundry_mcp.tools.unified.router import (
54
65
  ActionDefinition,
@@ -552,7 +563,7 @@ def _handle_list(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
552
563
  if has_more and page_tasks:
553
564
  next_cursor = encode_cursor({"last_id": page_tasks[-1].get("id")})
554
565
 
555
- (time.perf_counter() - start) * 1000
566
+ _ = (time.perf_counter() - start) * 1000 # timing placeholder
556
567
  warnings = _pagination_warnings(total_count, has_more)
557
568
  response = paginated_response(
558
569
  data={
@@ -980,6 +991,7 @@ def _handle_start(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
980
991
  )
981
992
 
982
993
  update_parent_status(spec_data, task_id.strip())
994
+ sync_computed_fields(spec_data)
983
995
 
984
996
  if note:
985
997
  add_journal_entry(
@@ -1069,6 +1081,7 @@ def _handle_complete(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1069
1081
  )
1070
1082
 
1071
1083
  update_parent_status(spec_data, task_id.strip())
1084
+ sync_computed_fields(spec_data)
1072
1085
 
1073
1086
  task_data = spec_data.get("hierarchy", {}).get(task_id.strip(), {})
1074
1087
  add_journal_entry(
@@ -1195,6 +1208,7 @@ def _handle_block(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1195
1208
  task_id=task_id.strip(),
1196
1209
  author="foundry-mcp",
1197
1210
  )
1211
+ sync_computed_fields(spec_data)
1198
1212
 
1199
1213
  if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
1200
1214
  return asdict(
@@ -1294,6 +1308,7 @@ def _handle_unblock(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1294
1308
  task_id=task_id.strip(),
1295
1309
  author="foundry-mcp",
1296
1310
  )
1311
+ sync_computed_fields(spec_data)
1297
1312
 
1298
1313
  if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
1299
1314
  return asdict(
@@ -1423,6 +1438,7 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1423
1438
  task_type = payload.get("task_type", "task")
1424
1439
  estimated_hours = payload.get("estimated_hours")
1425
1440
  position = payload.get("position")
1441
+ file_path = payload.get("file_path")
1426
1442
 
1427
1443
  if not isinstance(spec_id, str) or not spec_id.strip():
1428
1444
  return _validation_error(
@@ -1477,6 +1493,14 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1477
1493
  request_id=request_id,
1478
1494
  code=ErrorCode.INVALID_FORMAT,
1479
1495
  )
1496
+ if file_path is not None and not isinstance(file_path, str):
1497
+ return _validation_error(
1498
+ field="file_path",
1499
+ action=action,
1500
+ message="file_path must be a string",
1501
+ request_id=request_id,
1502
+ code=ErrorCode.INVALID_FORMAT,
1503
+ )
1480
1504
 
1481
1505
  dry_run = payload.get("dry_run", False)
1482
1506
  if dry_run is not None and not isinstance(dry_run, bool):
@@ -1525,6 +1549,7 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1525
1549
  "title": title.strip(),
1526
1550
  "task_type": task_type,
1527
1551
  "position": position,
1552
+ "file_path": file_path.strip() if file_path else None,
1528
1553
  "dry_run": True,
1529
1554
  },
1530
1555
  request_id=request_id,
@@ -1544,6 +1569,7 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1544
1569
  task_type=task_type,
1545
1570
  estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
1546
1571
  position=position,
1572
+ file_path=file_path,
1547
1573
  specs_dir=specs_dir,
1548
1574
  )
1549
1575
  elapsed_ms = (time.perf_counter() - start) * 1000
@@ -1897,9 +1923,21 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
1897
1923
  remediation="Provide custom_metadata as a JSON object",
1898
1924
  )
1899
1925
 
1926
+ acceptance_criteria = payload.get("acceptance_criteria")
1927
+ if acceptance_criteria is not None and not isinstance(acceptance_criteria, list):
1928
+ return _validation_error(
1929
+ field="acceptance_criteria",
1930
+ action=action,
1931
+ message="acceptance_criteria must be a list of strings",
1932
+ request_id=request_id,
1933
+ code=ErrorCode.INVALID_FORMAT,
1934
+ )
1935
+
1900
1936
  update_fields = [
1937
+ payload.get("title"),
1901
1938
  payload.get("file_path"),
1902
1939
  payload.get("description"),
1940
+ acceptance_criteria,
1903
1941
  payload.get("task_category"),
1904
1942
  payload.get("actual_hours"),
1905
1943
  payload.get("status_note"),
@@ -1911,12 +1949,12 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
1911
1949
  )
1912
1950
  if not has_update:
1913
1951
  return _validation_error(
1914
- field="file_path",
1952
+ field="title",
1915
1953
  action=action,
1916
- message="Provide at least one metadata field",
1954
+ message="Provide at least one field to update",
1917
1955
  request_id=request_id,
1918
1956
  code=ErrorCode.MISSING_REQUIRED,
1919
- remediation="Provide file_path, description, task_category, actual_hours, status_note, verification_type, command, and/or custom_metadata",
1957
+ remediation="Provide title, file_path, description, acceptance_criteria, task_category, actual_hours, status_note, verification_type, command, and/or custom_metadata",
1920
1958
  )
1921
1959
 
1922
1960
  workspace = payload.get("workspace")
@@ -1944,10 +1982,14 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
1944
1982
  )
1945
1983
 
1946
1984
  fields_updated: List[str] = []
1985
+ if payload.get("title") is not None:
1986
+ fields_updated.append("title")
1947
1987
  if payload.get("file_path") is not None:
1948
1988
  fields_updated.append("file_path")
1949
1989
  if payload.get("description") is not None:
1950
1990
  fields_updated.append("description")
1991
+ if acceptance_criteria is not None:
1992
+ fields_updated.append("acceptance_criteria")
1951
1993
  if payload.get("task_category") is not None:
1952
1994
  fields_updated.append("task_category")
1953
1995
  if payload.get("actual_hours") is not None:
@@ -1981,14 +2023,17 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
1981
2023
  result, error = update_task_metadata(
1982
2024
  spec_id=spec_id.strip(),
1983
2025
  task_id=task_id.strip(),
2026
+ title=payload.get("title"),
1984
2027
  file_path=payload.get("file_path"),
1985
2028
  description=payload.get("description"),
2029
+ acceptance_criteria=acceptance_criteria,
1986
2030
  task_category=payload.get("task_category"),
1987
2031
  actual_hours=payload.get("actual_hours"),
1988
2032
  status_note=payload.get("status_note"),
1989
2033
  verification_type=payload.get("verification_type"),
1990
2034
  command=payload.get("command"),
1991
2035
  custom_metadata=custom_metadata,
2036
+ dry_run=dry_run_bool,
1992
2037
  specs_dir=specs_dir,
1993
2038
  )
1994
2039
  elapsed_ms = (time.perf_counter() - start) * 1000
@@ -2022,50 +2067,1100 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
2022
2067
  return asdict(response)
2023
2068
 
2024
2069
 
2025
- _ACTION_DEFINITIONS = [
2026
- ActionDefinition(
2027
- name="prepare",
2028
- handler=_handle_prepare,
2029
- summary="Prepare next actionable task context",
2030
- ),
2031
- ActionDefinition(
2032
- name="next", handler=_handle_next, summary="Return the next actionable task"
2033
- ),
2034
- ActionDefinition(
2035
- name="info", handler=_handle_info, summary="Fetch task metadata by ID"
2036
- ),
2037
- ActionDefinition(
2038
- name="check-deps",
2039
- handler=_handle_check_deps,
2040
- summary="Analyze task dependencies and blockers",
2041
- ),
2042
- ActionDefinition(name="start", handler=_handle_start, summary="Start a task"),
2043
- ActionDefinition(
2044
- name="complete", handler=_handle_complete, summary="Complete a task"
2045
- ),
2046
- ActionDefinition(
2047
- name="update-status",
2048
- handler=_handle_update_status,
2049
- summary="Update task status",
2050
- ),
2051
- ActionDefinition(name="block", handler=_handle_block, summary="Block a task"),
2052
- ActionDefinition(name="unblock", handler=_handle_unblock, summary="Unblock a task"),
2053
- ActionDefinition(
2054
- name="list-blocked",
2055
- handler=_handle_list_blocked,
2056
- summary="List blocked tasks",
2057
- ),
2058
- ActionDefinition(name="add", handler=_handle_add, summary="Add a task"),
2059
- ActionDefinition(name="remove", handler=_handle_remove, summary="Remove a task"),
2060
- ActionDefinition(
2061
- name="update-estimate",
2062
- handler=_handle_update_estimate,
2063
- summary="Update estimated effort",
2064
- ),
2065
- ActionDefinition(
2066
- name="update-metadata",
2067
- handler=_handle_update_metadata,
2068
- summary="Update task metadata fields",
2070
+ def _handle_move(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2071
+ """Move a task to a new position or parent.
2072
+
2073
+ Supports two modes:
2074
+ 1. Reorder within parent: only specify position (new_parent=None)
2075
+ 2. Reparent to different phase/task: specify new_parent, optionally position
2076
+
2077
+ Updates task counts on affected parents. Prevents circular references.
2078
+ Emits warnings for cross-phase moves that might affect dependencies.
2079
+ """
2080
+ request_id = _request_id()
2081
+ action = "move"
2082
+ spec_id = payload.get("spec_id")
2083
+ task_id = payload.get("task_id")
2084
+ new_parent = payload.get("parent") # Target parent (phase or task ID)
2085
+ position = payload.get("position") # 1-based position in children list
2086
+
2087
+ # Validate required fields
2088
+ if not isinstance(spec_id, str) or not spec_id.strip():
2089
+ return _validation_error(
2090
+ field="spec_id",
2091
+ action=action,
2092
+ message="Provide a non-empty spec identifier",
2093
+ request_id=request_id,
2094
+ )
2095
+ if not isinstance(task_id, str) or not task_id.strip():
2096
+ return _validation_error(
2097
+ field="task_id",
2098
+ action=action,
2099
+ message="Provide a non-empty task identifier",
2100
+ request_id=request_id,
2101
+ )
2102
+
2103
+ # Validate optional new_parent
2104
+ if new_parent is not None and (
2105
+ not isinstance(new_parent, str) or not new_parent.strip()
2106
+ ):
2107
+ return _validation_error(
2108
+ field="parent",
2109
+ action=action,
2110
+ message="parent must be a non-empty string if provided",
2111
+ request_id=request_id,
2112
+ code=ErrorCode.INVALID_FORMAT,
2113
+ )
2114
+
2115
+ # Validate optional position (must be positive integer)
2116
+ if position is not None:
2117
+ if not isinstance(position, int) or position < 1:
2118
+ return _validation_error(
2119
+ field="position",
2120
+ action=action,
2121
+ message="position must be a positive integer (1-based)",
2122
+ request_id=request_id,
2123
+ code=ErrorCode.INVALID_FORMAT,
2124
+ )
2125
+
2126
+ # Validate dry_run
2127
+ dry_run = payload.get("dry_run", False)
2128
+ if dry_run is not None and not isinstance(dry_run, bool):
2129
+ return _validation_error(
2130
+ field="dry_run",
2131
+ action=action,
2132
+ message="dry_run must be a boolean",
2133
+ request_id=request_id,
2134
+ code=ErrorCode.INVALID_FORMAT,
2135
+ )
2136
+ dry_run_bool = bool(dry_run)
2137
+
2138
+ workspace = payload.get("workspace")
2139
+ specs_dir = _resolve_specs_dir(config, workspace)
2140
+ if specs_dir is None:
2141
+ return _specs_dir_missing_error(request_id)
2142
+
2143
+ start = time.perf_counter()
2144
+
2145
+ # Call the core move_task function
2146
+ result, error, warnings = move_task(
2147
+ spec_id=spec_id.strip(),
2148
+ task_id=task_id.strip(),
2149
+ new_parent=new_parent.strip() if new_parent else None,
2150
+ position=position,
2151
+ dry_run=dry_run_bool,
2152
+ specs_dir=specs_dir,
2153
+ )
2154
+ elapsed_ms = (time.perf_counter() - start) * 1000
2155
+
2156
+ if error or result is None:
2157
+ # Determine appropriate error code based on error message
2158
+ error_lower = (error or "").lower()
2159
+ if "not found" in error_lower:
2160
+ code = ErrorCode.TASK_NOT_FOUND
2161
+ err_type = ErrorType.NOT_FOUND
2162
+ remediation = "Verify the task ID and parent ID exist in the specification"
2163
+ elif "circular" in error_lower:
2164
+ code = ErrorCode.CIRCULAR_DEPENDENCY
2165
+ err_type = ErrorType.CONFLICT
2166
+ remediation = "Task cannot be moved under its own descendants"
2167
+ elif "invalid position" in error_lower:
2168
+ code = ErrorCode.INVALID_POSITION
2169
+ err_type = ErrorType.VALIDATION
2170
+ remediation = "Specify a valid position within the children list"
2171
+ elif "cannot move" in error_lower or "invalid" in error_lower:
2172
+ code = ErrorCode.INVALID_PARENT
2173
+ err_type = ErrorType.VALIDATION
2174
+ remediation = "Specify a valid phase, group, or task as the target parent"
2175
+ else:
2176
+ code = ErrorCode.VALIDATION_ERROR
2177
+ err_type = ErrorType.VALIDATION
2178
+ remediation = "Check task ID, parent, and position parameters"
2179
+
2180
+ return asdict(
2181
+ error_response(
2182
+ error or "Failed to move task",
2183
+ error_code=code,
2184
+ error_type=err_type,
2185
+ remediation=remediation,
2186
+ request_id=request_id,
2187
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2188
+ )
2189
+ )
2190
+
2191
+ # Build success response with warnings if any
2192
+ response = success_response(
2193
+ **result,
2194
+ request_id=request_id,
2195
+ warnings=warnings if warnings else None,
2196
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2197
+ )
2198
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2199
+ _metrics.counter(
2200
+ _metric(action),
2201
+ labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
2202
+ )
2203
+ return asdict(response)
2204
+
2205
+
2206
+ def _handle_add_dependency(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2207
+ """Add a dependency relationship between two tasks.
2208
+
2209
+ Manages blocks, blocked_by, and depends relationships.
2210
+ Updates both source and target tasks atomically.
2211
+
2212
+ Dependency types:
2213
+ - blocks: Source task blocks target (target cannot start until source completes)
2214
+ - blocked_by: Source task is blocked by target (source cannot start until target completes)
2215
+ - depends: Soft dependency (informational, doesn't block)
2216
+ """
2217
+ request_id = _request_id()
2218
+ action = "add-dependency"
2219
+ spec_id = payload.get("spec_id")
2220
+ task_id = payload.get("task_id") # Source task
2221
+ target_id = payload.get("target_id") # Target task
2222
+ dependency_type = payload.get("dependency_type", "blocks")
2223
+
2224
+ # Validate required fields
2225
+ if not isinstance(spec_id, str) or not spec_id.strip():
2226
+ return _validation_error(
2227
+ field="spec_id",
2228
+ action=action,
2229
+ message="Provide a non-empty spec identifier",
2230
+ request_id=request_id,
2231
+ )
2232
+ if not isinstance(task_id, str) or not task_id.strip():
2233
+ return _validation_error(
2234
+ field="task_id",
2235
+ action=action,
2236
+ message="Provide a non-empty source task identifier",
2237
+ request_id=request_id,
2238
+ )
2239
+ if not isinstance(target_id, str) or not target_id.strip():
2240
+ return _validation_error(
2241
+ field="target_id",
2242
+ action=action,
2243
+ message="Provide a non-empty target task identifier",
2244
+ request_id=request_id,
2245
+ )
2246
+
2247
+ # Validate dependency_type
2248
+ valid_types = ("blocks", "blocked_by", "depends")
2249
+ if dependency_type not in valid_types:
2250
+ return _validation_error(
2251
+ field="dependency_type",
2252
+ action=action,
2253
+ message=f"Must be one of: {', '.join(valid_types)}",
2254
+ request_id=request_id,
2255
+ code=ErrorCode.INVALID_FORMAT,
2256
+ )
2257
+
2258
+ # Validate dry_run
2259
+ dry_run = payload.get("dry_run", False)
2260
+ if dry_run is not None and not isinstance(dry_run, bool):
2261
+ return _validation_error(
2262
+ field="dry_run",
2263
+ action=action,
2264
+ message="dry_run must be a boolean",
2265
+ request_id=request_id,
2266
+ code=ErrorCode.INVALID_FORMAT,
2267
+ )
2268
+ dry_run_bool = bool(dry_run)
2269
+
2270
+ workspace = payload.get("workspace")
2271
+ specs_dir = _resolve_specs_dir(config, workspace)
2272
+ if specs_dir is None:
2273
+ return _specs_dir_missing_error(request_id)
2274
+
2275
+ start = time.perf_counter()
2276
+
2277
+ # Call the core function
2278
+ result, error = manage_task_dependency(
2279
+ spec_id=spec_id.strip(),
2280
+ source_task_id=task_id.strip(),
2281
+ target_task_id=target_id.strip(),
2282
+ dependency_type=dependency_type,
2283
+ action="add",
2284
+ dry_run=dry_run_bool,
2285
+ specs_dir=specs_dir,
2286
+ )
2287
+ elapsed_ms = (time.perf_counter() - start) * 1000
2288
+
2289
+ if error or result is None:
2290
+ # Determine appropriate error code based on error message
2291
+ error_lower = (error or "").lower()
2292
+ if "not found" in error_lower:
2293
+ code = ErrorCode.TASK_NOT_FOUND
2294
+ err_type = ErrorType.NOT_FOUND
2295
+ remediation = "Verify both task IDs exist in the specification"
2296
+ elif "circular" in error_lower:
2297
+ code = ErrorCode.CIRCULAR_DEPENDENCY
2298
+ err_type = ErrorType.CONFLICT
2299
+ remediation = "This dependency would create a cycle"
2300
+ elif "itself" in error_lower:
2301
+ code = ErrorCode.SELF_REFERENCE
2302
+ err_type = ErrorType.VALIDATION
2303
+ remediation = "A task cannot depend on itself"
2304
+ elif "already exists" in error_lower:
2305
+ code = ErrorCode.DUPLICATE_ENTRY
2306
+ err_type = ErrorType.CONFLICT
2307
+ remediation = "This dependency already exists"
2308
+ else:
2309
+ code = ErrorCode.VALIDATION_ERROR
2310
+ err_type = ErrorType.VALIDATION
2311
+ remediation = "Check task IDs and dependency type"
2312
+
2313
+ return asdict(
2314
+ error_response(
2315
+ error or "Failed to add dependency",
2316
+ error_code=code,
2317
+ error_type=err_type,
2318
+ remediation=remediation,
2319
+ request_id=request_id,
2320
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2321
+ )
2322
+ )
2323
+
2324
+ # Build success response
2325
+ response = success_response(
2326
+ **result,
2327
+ request_id=request_id,
2328
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2329
+ )
2330
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2331
+ _metrics.counter(
2332
+ _metric(action),
2333
+ labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
2334
+ )
2335
+ return asdict(response)
2336
+
2337
+
2338
+ def _handle_remove_dependency(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2339
+ """Remove a dependency relationship between two tasks.
2340
+
2341
+ Removes blocks, blocked_by, or depends relationships.
2342
+ Updates both source and target tasks atomically for reciprocal relationships.
2343
+ """
2344
+ request_id = _request_id()
2345
+ action = "remove-dependency"
2346
+ spec_id = payload.get("spec_id")
2347
+ task_id = payload.get("task_id") # Source task
2348
+ target_id = payload.get("target_id") # Target task
2349
+ dependency_type = payload.get("dependency_type", "blocks")
2350
+
2351
+ # Validate required fields
2352
+ if not isinstance(spec_id, str) or not spec_id.strip():
2353
+ return _validation_error(
2354
+ field="spec_id",
2355
+ action=action,
2356
+ message="Provide a non-empty spec identifier",
2357
+ request_id=request_id,
2358
+ )
2359
+ if not isinstance(task_id, str) or not task_id.strip():
2360
+ return _validation_error(
2361
+ field="task_id",
2362
+ action=action,
2363
+ message="Provide a non-empty source task identifier",
2364
+ request_id=request_id,
2365
+ )
2366
+ if not isinstance(target_id, str) or not target_id.strip():
2367
+ return _validation_error(
2368
+ field="target_id",
2369
+ action=action,
2370
+ message="Provide a non-empty target task identifier",
2371
+ request_id=request_id,
2372
+ )
2373
+
2374
+ # Validate dependency_type
2375
+ valid_types = ("blocks", "blocked_by", "depends")
2376
+ if dependency_type not in valid_types:
2377
+ return _validation_error(
2378
+ field="dependency_type",
2379
+ action=action,
2380
+ message=f"Must be one of: {', '.join(valid_types)}",
2381
+ request_id=request_id,
2382
+ code=ErrorCode.INVALID_FORMAT,
2383
+ )
2384
+
2385
+ # Validate dry_run
2386
+ dry_run = payload.get("dry_run", False)
2387
+ if dry_run is not None and not isinstance(dry_run, bool):
2388
+ return _validation_error(
2389
+ field="dry_run",
2390
+ action=action,
2391
+ message="dry_run must be a boolean",
2392
+ request_id=request_id,
2393
+ code=ErrorCode.INVALID_FORMAT,
2394
+ )
2395
+ dry_run_bool = bool(dry_run)
2396
+
2397
+ workspace = payload.get("workspace")
2398
+ specs_dir = _resolve_specs_dir(config, workspace)
2399
+ if specs_dir is None:
2400
+ return _specs_dir_missing_error(request_id)
2401
+
2402
+ start = time.perf_counter()
2403
+
2404
+ # Call the core function
2405
+ result, error = manage_task_dependency(
2406
+ spec_id=spec_id.strip(),
2407
+ source_task_id=task_id.strip(),
2408
+ target_task_id=target_id.strip(),
2409
+ dependency_type=dependency_type,
2410
+ action="remove",
2411
+ dry_run=dry_run_bool,
2412
+ specs_dir=specs_dir,
2413
+ )
2414
+ elapsed_ms = (time.perf_counter() - start) * 1000
2415
+
2416
+ if error or result is None:
2417
+ # Determine appropriate error code based on error message
2418
+ error_lower = (error or "").lower()
2419
+ if "does not exist" in error_lower:
2420
+ # Dependency relationship doesn't exist
2421
+ code = ErrorCode.DEPENDENCY_NOT_FOUND
2422
+ err_type = ErrorType.NOT_FOUND
2423
+ remediation = "This dependency does not exist"
2424
+ elif "not found" in error_lower:
2425
+ # Task or spec not found
2426
+ code = ErrorCode.TASK_NOT_FOUND
2427
+ err_type = ErrorType.NOT_FOUND
2428
+ remediation = "Verify both task IDs exist in the specification"
2429
+ else:
2430
+ code = ErrorCode.VALIDATION_ERROR
2431
+ err_type = ErrorType.VALIDATION
2432
+ remediation = "Check task IDs and dependency type"
2433
+
2434
+ return asdict(
2435
+ error_response(
2436
+ error or "Failed to remove dependency",
2437
+ error_code=code,
2438
+ error_type=err_type,
2439
+ remediation=remediation,
2440
+ request_id=request_id,
2441
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2442
+ )
2443
+ )
2444
+
2445
+ # Build success response
2446
+ response = success_response(
2447
+ **result,
2448
+ request_id=request_id,
2449
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2450
+ )
2451
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2452
+ _metrics.counter(
2453
+ _metric(action),
2454
+ labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
2455
+ )
2456
+ return asdict(response)
2457
+
2458
+
2459
+ def _handle_add_requirement(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2460
+ """Add a structured requirement to a task's metadata.
2461
+
2462
+ Requirements are stored in metadata.requirements as a list of objects:
2463
+ [{"id": "req-1", "type": "acceptance", "text": "..."}, ...]
2464
+
2465
+ Each requirement has:
2466
+ - id: Auto-generated unique ID (e.g., "req-1", "req-2")
2467
+ - type: Requirement type (acceptance, technical, constraint)
2468
+ - text: Requirement description text
2469
+ """
2470
+ request_id = _request_id()
2471
+ action = "add-requirement"
2472
+ spec_id = payload.get("spec_id")
2473
+ task_id = payload.get("task_id")
2474
+ requirement_type = payload.get("requirement_type")
2475
+ text = payload.get("text")
2476
+
2477
+ # Validate required fields
2478
+ if not isinstance(spec_id, str) or not spec_id.strip():
2479
+ return _validation_error(
2480
+ field="spec_id",
2481
+ action=action,
2482
+ message="Provide a non-empty spec identifier",
2483
+ request_id=request_id,
2484
+ )
2485
+ if not isinstance(task_id, str) or not task_id.strip():
2486
+ return _validation_error(
2487
+ field="task_id",
2488
+ action=action,
2489
+ message="Provide a non-empty task identifier",
2490
+ request_id=request_id,
2491
+ )
2492
+ if not isinstance(requirement_type, str) or not requirement_type.strip():
2493
+ return _validation_error(
2494
+ field="requirement_type",
2495
+ action=action,
2496
+ message="Provide a requirement type",
2497
+ request_id=request_id,
2498
+ )
2499
+
2500
+ # Validate requirement_type
2501
+ requirement_type_lower = requirement_type.lower().strip()
2502
+ if requirement_type_lower not in REQUIREMENT_TYPES:
2503
+ return _validation_error(
2504
+ field="requirement_type",
2505
+ action=action,
2506
+ message=f"Must be one of: {', '.join(REQUIREMENT_TYPES)}",
2507
+ request_id=request_id,
2508
+ code=ErrorCode.INVALID_FORMAT,
2509
+ )
2510
+
2511
+ # Validate text
2512
+ if not isinstance(text, str) or not text.strip():
2513
+ return _validation_error(
2514
+ field="text",
2515
+ action=action,
2516
+ message="Provide non-empty requirement text",
2517
+ request_id=request_id,
2518
+ )
2519
+
2520
+ # Validate dry_run
2521
+ dry_run = payload.get("dry_run", False)
2522
+ if dry_run is not None and not isinstance(dry_run, bool):
2523
+ return _validation_error(
2524
+ field="dry_run",
2525
+ action=action,
2526
+ message="dry_run must be a boolean",
2527
+ request_id=request_id,
2528
+ code=ErrorCode.INVALID_FORMAT,
2529
+ )
2530
+ dry_run_bool = bool(dry_run)
2531
+
2532
+ workspace = payload.get("workspace")
2533
+ specs_dir = _resolve_specs_dir(config, workspace)
2534
+ if specs_dir is None:
2535
+ return _specs_dir_missing_error(request_id)
2536
+
2537
+ start = time.perf_counter()
2538
+
2539
+ # Call the core function
2540
+ result, error = update_task_requirements(
2541
+ spec_id=spec_id.strip(),
2542
+ task_id=task_id.strip(),
2543
+ action="add",
2544
+ requirement_type=requirement_type_lower,
2545
+ text=text.strip(),
2546
+ dry_run=dry_run_bool,
2547
+ specs_dir=specs_dir,
2548
+ )
2549
+ elapsed_ms = (time.perf_counter() - start) * 1000
2550
+
2551
+ if error or result is None:
2552
+ # Determine appropriate error code based on error message
2553
+ error_lower = (error or "").lower()
2554
+ if "not found" in error_lower:
2555
+ if "specification" in error_lower:
2556
+ code = ErrorCode.SPEC_NOT_FOUND
2557
+ err_type = ErrorType.NOT_FOUND
2558
+ remediation = "Verify the spec ID exists"
2559
+ else:
2560
+ code = ErrorCode.TASK_NOT_FOUND
2561
+ err_type = ErrorType.NOT_FOUND
2562
+ remediation = "Verify the task ID exists in the specification"
2563
+ elif "maximum" in error_lower or "limit" in error_lower:
2564
+ code = ErrorCode.LIMIT_EXCEEDED
2565
+ err_type = ErrorType.VALIDATION
2566
+ remediation = "Remove some requirements before adding new ones"
2567
+ elif "requirement_type" in error_lower:
2568
+ code = ErrorCode.INVALID_FORMAT
2569
+ err_type = ErrorType.VALIDATION
2570
+ remediation = f"Use one of: {', '.join(REQUIREMENT_TYPES)}"
2571
+ else:
2572
+ code = ErrorCode.VALIDATION_ERROR
2573
+ err_type = ErrorType.VALIDATION
2574
+ remediation = "Check task ID and requirement fields"
2575
+
2576
+ return asdict(
2577
+ error_response(
2578
+ error or "Failed to add requirement",
2579
+ error_code=code,
2580
+ error_type=err_type,
2581
+ remediation=remediation,
2582
+ request_id=request_id,
2583
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2584
+ )
2585
+ )
2586
+
2587
+ # Build success response
2588
+ response = success_response(
2589
+ **result,
2590
+ request_id=request_id,
2591
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2592
+ )
2593
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2594
+ _metrics.counter(
2595
+ _metric(action),
2596
+ labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
2597
+ )
2598
+ return asdict(response)
2599
+
2600
+
2601
+ _VALID_NODE_TYPES = {"task", "verify", "phase", "subtask"}
2602
+ # Note: VALID_VERIFICATION_TYPES imported from foundry_mcp.core.validation
2603
+
2604
+
2605
+ def _match_nodes_for_batch(
2606
+ hierarchy: Dict[str, Any],
2607
+ *,
2608
+ phase_id: Optional[str] = None,
2609
+ pattern: Optional[str] = None,
2610
+ node_type: Optional[str] = None,
2611
+ ) -> List[str]:
2612
+ """Filter nodes by phase_id, regex pattern on title/id, and/or node_type.
2613
+
2614
+ All provided filters are combined with AND logic.
2615
+ Returns list of matching node IDs.
2616
+ """
2617
+ matched: List[str] = []
2618
+ compiled_pattern = None
2619
+ if pattern:
2620
+ try:
2621
+ compiled_pattern = re.compile(pattern, re.IGNORECASE)
2622
+ except re.error:
2623
+ return [] # Invalid regex returns empty
2624
+
2625
+ for node_id, node_data in hierarchy.items():
2626
+ if node_id == "spec-root":
2627
+ continue
2628
+
2629
+ # Filter by node_type if specified
2630
+ if node_type and node_data.get("type") != node_type:
2631
+ continue
2632
+
2633
+ # Filter by phase_id if specified (must be under that phase)
2634
+ if phase_id:
2635
+ node_parent = node_data.get("parent")
2636
+ # Direct children of the phase
2637
+ if node_parent != phase_id:
2638
+ # Check if it's a nested child (e.g., subtask under task under phase)
2639
+ parent_node = hierarchy.get(node_parent, {})
2640
+ if parent_node.get("parent") != phase_id:
2641
+ continue
2642
+
2643
+ # Filter by regex pattern on title or node_id
2644
+ if compiled_pattern:
2645
+ title = node_data.get("title", "")
2646
+ if not (compiled_pattern.search(title) or compiled_pattern.search(node_id)):
2647
+ continue
2648
+
2649
+ matched.append(node_id)
2650
+
2651
+ return sorted(matched)
2652
+
2653
+
2654
+ def _handle_metadata_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2655
+ """Batch update metadata across multiple tasks matching specified criteria.
2656
+
2657
+ Filters (combined with AND logic):
2658
+ - status_filter: Filter by task status (pending, in_progress, completed, blocked)
2659
+ - parent_filter: Filter by parent node ID (e.g., phase-1, task-2-1)
2660
+ - pattern: Regex pattern to match task titles/IDs
2661
+
2662
+ Legacy filters (deprecated, use parent_filter instead):
2663
+ - phase_id: Alias for parent_filter
2664
+
2665
+ Metadata fields supported:
2666
+ - description, file_path, estimated_hours, category, labels, owners
2667
+ - update_metadata: Dict for custom metadata fields (verification_type, command, etc.)
2668
+ """
2669
+ request_id = _request_id()
2670
+ action = "metadata-batch"
2671
+ start = time.perf_counter()
2672
+
2673
+ # Required: spec_id
2674
+ spec_id = payload.get("spec_id")
2675
+ if not isinstance(spec_id, str) or not spec_id.strip():
2676
+ return _validation_error(
2677
+ field="spec_id",
2678
+ action=action,
2679
+ message="Provide a non-empty spec identifier",
2680
+ request_id=request_id,
2681
+ )
2682
+ spec_id = spec_id.strip()
2683
+
2684
+ # Extract filter parameters
2685
+ status_filter = payload.get("status_filter")
2686
+ parent_filter = payload.get("parent_filter")
2687
+ phase_id = payload.get("phase_id") # Legacy alias for parent_filter
2688
+ pattern = payload.get("pattern")
2689
+
2690
+ # Use phase_id as parent_filter if parent_filter not provided (backwards compat)
2691
+ if parent_filter is None and phase_id is not None:
2692
+ parent_filter = phase_id
2693
+
2694
+ # Validate status_filter
2695
+ if status_filter is not None:
2696
+ if not isinstance(status_filter, str) or status_filter not in _ALLOWED_STATUS:
2697
+ return _validation_error(
2698
+ field="status_filter",
2699
+ action=action,
2700
+ message=f"status_filter must be one of: {sorted(_ALLOWED_STATUS)}",
2701
+ request_id=request_id,
2702
+ code=ErrorCode.INVALID_FORMAT,
2703
+ )
2704
+
2705
+ # Validate parent_filter
2706
+ if parent_filter is not None:
2707
+ if not isinstance(parent_filter, str) or not parent_filter.strip():
2708
+ return _validation_error(
2709
+ field="parent_filter",
2710
+ action=action,
2711
+ message="parent_filter must be a non-empty string",
2712
+ request_id=request_id,
2713
+ code=ErrorCode.INVALID_FORMAT,
2714
+ )
2715
+ parent_filter = parent_filter.strip()
2716
+
2717
+ # Validate pattern
2718
+ if pattern is not None:
2719
+ if not isinstance(pattern, str) or not pattern.strip():
2720
+ return _validation_error(
2721
+ field="pattern",
2722
+ action=action,
2723
+ message="pattern must be a non-empty string",
2724
+ request_id=request_id,
2725
+ code=ErrorCode.INVALID_FORMAT,
2726
+ )
2727
+ try:
2728
+ re.compile(pattern)
2729
+ except re.error as exc:
2730
+ return _validation_error(
2731
+ field="pattern",
2732
+ action=action,
2733
+ message=f"Invalid regex pattern: {exc}",
2734
+ request_id=request_id,
2735
+ code=ErrorCode.INVALID_FORMAT,
2736
+ )
2737
+ pattern = pattern.strip()
2738
+
2739
+ # At least one filter must be provided
2740
+ if not any([status_filter, parent_filter, pattern]):
2741
+ return _validation_error(
2742
+ field="status_filter",
2743
+ action=action,
2744
+ message="Provide at least one filter: status_filter, parent_filter, or pattern",
2745
+ request_id=request_id,
2746
+ code=ErrorCode.MISSING_REQUIRED,
2747
+ remediation="Specify status_filter, parent_filter (or phase_id), and/or pattern to target tasks",
2748
+ )
2749
+
2750
+ # Extract metadata fields
2751
+ description = payload.get("description")
2752
+ file_path = payload.get("file_path")
2753
+ estimated_hours = payload.get("estimated_hours")
2754
+ category = payload.get("category")
2755
+ labels = payload.get("labels")
2756
+ owners = payload.get("owners")
2757
+ update_metadata = payload.get("update_metadata") # Dict for custom fields
2758
+ dry_run = payload.get("dry_run", False)
2759
+
2760
+ # Validate metadata fields
2761
+ if description is not None and not isinstance(description, str):
2762
+ return _validation_error(
2763
+ field="description",
2764
+ action=action,
2765
+ message="description must be a string",
2766
+ request_id=request_id,
2767
+ code=ErrorCode.INVALID_FORMAT,
2768
+ )
2769
+
2770
+ if file_path is not None and not isinstance(file_path, str):
2771
+ return _validation_error(
2772
+ field="file_path",
2773
+ action=action,
2774
+ message="file_path must be a string",
2775
+ request_id=request_id,
2776
+ code=ErrorCode.INVALID_FORMAT,
2777
+ )
2778
+
2779
+ if estimated_hours is not None:
2780
+ if not isinstance(estimated_hours, (int, float)) or estimated_hours < 0:
2781
+ return _validation_error(
2782
+ field="estimated_hours",
2783
+ action=action,
2784
+ message="estimated_hours must be a non-negative number",
2785
+ request_id=request_id,
2786
+ code=ErrorCode.INVALID_FORMAT,
2787
+ )
2788
+
2789
+ if category is not None and not isinstance(category, str):
2790
+ return _validation_error(
2791
+ field="category",
2792
+ action=action,
2793
+ message="category must be a string",
2794
+ request_id=request_id,
2795
+ code=ErrorCode.INVALID_FORMAT,
2796
+ )
2797
+
2798
+ if labels is not None:
2799
+ if not isinstance(labels, dict) or not all(
2800
+ isinstance(k, str) and isinstance(v, str) for k, v in labels.items()
2801
+ ):
2802
+ return _validation_error(
2803
+ field="labels",
2804
+ action=action,
2805
+ message="labels must be a dict with string keys and values",
2806
+ request_id=request_id,
2807
+ code=ErrorCode.INVALID_FORMAT,
2808
+ )
2809
+
2810
+ if owners is not None:
2811
+ if not isinstance(owners, list) or not all(isinstance(o, str) for o in owners):
2812
+ return _validation_error(
2813
+ field="owners",
2814
+ action=action,
2815
+ message="owners must be a list of strings",
2816
+ request_id=request_id,
2817
+ code=ErrorCode.INVALID_FORMAT,
2818
+ )
2819
+
2820
+ if update_metadata is not None and not isinstance(update_metadata, dict):
2821
+ return _validation_error(
2822
+ field="update_metadata",
2823
+ action=action,
2824
+ message="update_metadata must be a dict",
2825
+ request_id=request_id,
2826
+ code=ErrorCode.INVALID_FORMAT,
2827
+ )
2828
+
2829
+ if dry_run is not None and not isinstance(dry_run, bool):
2830
+ return _validation_error(
2831
+ field="dry_run",
2832
+ action=action,
2833
+ message="dry_run must be a boolean",
2834
+ request_id=request_id,
2835
+ code=ErrorCode.INVALID_FORMAT,
2836
+ )
2837
+
2838
+ # At least one metadata field must be provided
2839
+ has_metadata = any([
2840
+ description is not None,
2841
+ file_path is not None,
2842
+ estimated_hours is not None,
2843
+ category is not None,
2844
+ labels is not None,
2845
+ owners is not None,
2846
+ update_metadata,
2847
+ ])
2848
+ if not has_metadata:
2849
+ return _validation_error(
2850
+ field="description",
2851
+ action=action,
2852
+ message="Provide at least one metadata field to update",
2853
+ request_id=request_id,
2854
+ code=ErrorCode.MISSING_REQUIRED,
2855
+ remediation="Specify description, file_path, estimated_hours, category, labels, owners, or update_metadata",
2856
+ )
2857
+
2858
+ # Resolve specs directory
2859
+ workspace = payload.get("workspace")
2860
+ specs_dir = _resolve_specs_dir(config, workspace)
2861
+ if specs_dir is None:
2862
+ return _specs_dir_missing_error(request_id)
2863
+
2864
+ # Delegate to core helper
2865
+ result, error = batch_update_tasks(
2866
+ spec_id,
2867
+ status_filter=status_filter,
2868
+ parent_filter=parent_filter,
2869
+ pattern=pattern,
2870
+ description=description,
2871
+ file_path=file_path,
2872
+ estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
2873
+ category=category,
2874
+ labels=labels,
2875
+ owners=owners,
2876
+ custom_metadata=update_metadata,
2877
+ dry_run=bool(dry_run),
2878
+ specs_dir=specs_dir,
2879
+ )
2880
+
2881
+ elapsed_ms = (time.perf_counter() - start) * 1000
2882
+
2883
+ if error:
2884
+ _metrics.counter(_metric(action), labels={"status": "error"})
2885
+ # Map helper errors to response-v2 format
2886
+ if "not found" in error.lower():
2887
+ return asdict(
2888
+ error_response(
2889
+ error,
2890
+ error_code=ErrorCode.NOT_FOUND,
2891
+ error_type=ErrorType.NOT_FOUND,
2892
+ remediation="Check spec_id and parent_filter values",
2893
+ request_id=request_id,
2894
+ )
2895
+ )
2896
+ if "at least one" in error.lower() or "must be" in error.lower():
2897
+ return asdict(
2898
+ error_response(
2899
+ error,
2900
+ error_code=ErrorCode.VALIDATION_ERROR,
2901
+ error_type=ErrorType.VALIDATION,
2902
+ remediation="Check filter and metadata parameters",
2903
+ request_id=request_id,
2904
+ )
2905
+ )
2906
+ return asdict(
2907
+ error_response(
2908
+ error,
2909
+ error_code=ErrorCode.INTERNAL_ERROR,
2910
+ error_type=ErrorType.INTERNAL,
2911
+ remediation="Check filesystem permissions and retry",
2912
+ request_id=request_id,
2913
+ )
2914
+ )
2915
+
2916
+ assert result is not None
2917
+
2918
+ # Build response with response-v2 envelope
2919
+ warnings: List[str] = result.get("warnings", [])
2920
+ if result["matched_count"] > _TASK_WARNING_THRESHOLD and not warnings:
2921
+ warnings.append(
2922
+ f"Updated {result['matched_count']} tasks; consider using more specific filters."
2923
+ )
2924
+
2925
+ response = success_response(
2926
+ spec_id=result["spec_id"],
2927
+ matched_count=result["matched_count"],
2928
+ updated_count=result["updated_count"],
2929
+ skipped_count=result.get("skipped_count", 0),
2930
+ nodes=result["nodes"],
2931
+ filters=result["filters"],
2932
+ metadata_applied=result["metadata_applied"],
2933
+ dry_run=result["dry_run"],
2934
+ request_id=request_id,
2935
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2936
+ )
2937
+
2938
+ response_dict = asdict(response)
2939
+ if warnings:
2940
+ meta = response_dict.setdefault("meta", {})
2941
+ meta["warnings"] = warnings
2942
+ if result.get("skipped_tasks"):
2943
+ response_dict["data"]["skipped_tasks"] = result["skipped_tasks"]
2944
+
2945
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2946
+ _metrics.counter(_metric(action), labels={"status": "success"})
2947
+ return response_dict
2948
+
2949
+
2950
+ def _handle_fix_verification_types(
2951
+ *, config: ServerConfig, payload: Dict[str, Any]
2952
+ ) -> dict:
2953
+ """Fix verification types across all verify nodes in a spec.
2954
+
2955
+ This action:
2956
+ 1. Finds all verify nodes with invalid or missing verification_type
2957
+ 2. Maps legacy values (e.g., 'test' -> 'run-tests') using VERIFICATION_TYPE_MAPPING
2958
+ 3. Sets missing types to 'run-tests' (default)
2959
+ 4. Sets unknown types to 'manual' (fallback)
2960
+
2961
+ Supports dry-run mode to preview changes without persisting.
2962
+ """
2963
+ request_id = _request_id()
2964
+ action = "fix-verification-types"
2965
+
2966
+ # Required: spec_id
2967
+ spec_id = payload.get("spec_id")
2968
+ if not isinstance(spec_id, str) or not spec_id.strip():
2969
+ return _validation_error(
2970
+ field="spec_id",
2971
+ action=action,
2972
+ message="Provide a non-empty spec identifier",
2973
+ request_id=request_id,
2974
+ )
2975
+
2976
+ dry_run = payload.get("dry_run", False)
2977
+ if dry_run is not None and not isinstance(dry_run, bool):
2978
+ return _validation_error(
2979
+ field="dry_run",
2980
+ action=action,
2981
+ message="dry_run must be a boolean",
2982
+ request_id=request_id,
2983
+ code=ErrorCode.INVALID_FORMAT,
2984
+ )
2985
+ dry_run_bool = bool(dry_run)
2986
+
2987
+ # Load spec
2988
+ workspace = payload.get("workspace")
2989
+ specs_dir = _resolve_specs_dir(config, workspace)
2990
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
2991
+ if error:
2992
+ return error
2993
+ assert spec_data is not None
2994
+
2995
+ start = time.perf_counter()
2996
+ hierarchy = spec_data.get("hierarchy", {})
2997
+
2998
+ # Find verify nodes and collect fixes
2999
+ fixes: List[Dict[str, Any]] = []
3000
+ for node_id, node_data in hierarchy.items():
3001
+ if node_data.get("type") != "verify":
3002
+ continue
3003
+
3004
+ metadata = node_data.get("metadata", {})
3005
+ current_type = metadata.get("verification_type")
3006
+
3007
+ # Determine the fix needed
3008
+ fix_info: Optional[Dict[str, Any]] = None
3009
+
3010
+ if current_type is None:
3011
+ # Missing verification_type -> default to 'run-tests'
3012
+ fix_info = {
3013
+ "node_id": node_id,
3014
+ "title": node_data.get("title", ""),
3015
+ "issue": "missing",
3016
+ "old_value": None,
3017
+ "new_value": "run-tests",
3018
+ }
3019
+ elif current_type not in VALID_VERIFICATION_TYPES:
3020
+ # Invalid type -> check mapping or fallback to 'manual'
3021
+ mapped = VERIFICATION_TYPE_MAPPING.get(current_type)
3022
+ if mapped:
3023
+ fix_info = {
3024
+ "node_id": node_id,
3025
+ "title": node_data.get("title", ""),
3026
+ "issue": "legacy",
3027
+ "old_value": current_type,
3028
+ "new_value": mapped,
3029
+ }
3030
+ else:
3031
+ fix_info = {
3032
+ "node_id": node_id,
3033
+ "title": node_data.get("title", ""),
3034
+ "issue": "invalid",
3035
+ "old_value": current_type,
3036
+ "new_value": "manual",
3037
+ }
3038
+
3039
+ if fix_info:
3040
+ fixes.append(fix_info)
3041
+
3042
+ if not dry_run_bool:
3043
+ # Apply the fix
3044
+ if "metadata" not in node_data:
3045
+ node_data["metadata"] = {}
3046
+ node_data["metadata"]["verification_type"] = fix_info["new_value"]
3047
+
3048
+ # Save if not dry_run and there were fixes
3049
+ if not dry_run_bool and fixes:
3050
+ if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
3051
+ return asdict(
3052
+ error_response(
3053
+ "Failed to save spec after fixing verification types",
3054
+ error_code=ErrorCode.INTERNAL_ERROR,
3055
+ error_type=ErrorType.INTERNAL,
3056
+ remediation="Check filesystem permissions and retry",
3057
+ request_id=request_id,
3058
+ )
3059
+ )
3060
+
3061
+ elapsed_ms = (time.perf_counter() - start) * 1000
3062
+
3063
+ # Count by issue type
3064
+ missing_count = sum(1 for f in fixes if f["issue"] == "missing")
3065
+ legacy_count = sum(1 for f in fixes if f["issue"] == "legacy")
3066
+ invalid_count = sum(1 for f in fixes if f["issue"] == "invalid")
3067
+
3068
+ response = success_response(
3069
+ spec_id=spec_id.strip(),
3070
+ total_fixes=len(fixes),
3071
+ applied_count=len(fixes) if not dry_run_bool else 0,
3072
+ fixes=fixes,
3073
+ summary={
3074
+ "missing_set_to_run_tests": missing_count,
3075
+ "legacy_mapped": legacy_count,
3076
+ "invalid_set_to_manual": invalid_count,
3077
+ },
3078
+ valid_types=sorted(VALID_VERIFICATION_TYPES),
3079
+ legacy_mappings=VERIFICATION_TYPE_MAPPING,
3080
+ dry_run=dry_run_bool,
3081
+ request_id=request_id,
3082
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3083
+ )
3084
+
3085
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
3086
+ _metrics.counter(_metric(action), labels={"status": "success"})
3087
+ return asdict(response)
3088
+
3089
+
3090
+ _ACTION_DEFINITIONS = [
3091
+ ActionDefinition(
3092
+ name="prepare",
3093
+ handler=_handle_prepare,
3094
+ summary="Prepare next actionable task context",
3095
+ ),
3096
+ ActionDefinition(
3097
+ name="next", handler=_handle_next, summary="Return the next actionable task"
3098
+ ),
3099
+ ActionDefinition(
3100
+ name="info", handler=_handle_info, summary="Fetch task metadata by ID"
3101
+ ),
3102
+ ActionDefinition(
3103
+ name="check-deps",
3104
+ handler=_handle_check_deps,
3105
+ summary="Analyze task dependencies and blockers",
3106
+ ),
3107
+ ActionDefinition(name="start", handler=_handle_start, summary="Start a task"),
3108
+ ActionDefinition(
3109
+ name="complete", handler=_handle_complete, summary="Complete a task"
3110
+ ),
3111
+ ActionDefinition(
3112
+ name="update-status",
3113
+ handler=_handle_update_status,
3114
+ summary="Update task status",
3115
+ ),
3116
+ ActionDefinition(name="block", handler=_handle_block, summary="Block a task"),
3117
+ ActionDefinition(name="unblock", handler=_handle_unblock, summary="Unblock a task"),
3118
+ ActionDefinition(
3119
+ name="list-blocked",
3120
+ handler=_handle_list_blocked,
3121
+ summary="List blocked tasks",
3122
+ ),
3123
+ ActionDefinition(name="add", handler=_handle_add, summary="Add a task"),
3124
+ ActionDefinition(name="remove", handler=_handle_remove, summary="Remove a task"),
3125
+ ActionDefinition(
3126
+ name="move",
3127
+ handler=_handle_move,
3128
+ summary="Move task to new position or parent",
3129
+ ),
3130
+ ActionDefinition(
3131
+ name="add-dependency",
3132
+ handler=_handle_add_dependency,
3133
+ summary="Add a dependency between two tasks",
3134
+ ),
3135
+ ActionDefinition(
3136
+ name="remove-dependency",
3137
+ handler=_handle_remove_dependency,
3138
+ summary="Remove a dependency between two tasks",
3139
+ ),
3140
+ ActionDefinition(
3141
+ name="add-requirement",
3142
+ handler=_handle_add_requirement,
3143
+ summary="Add a structured requirement to a task",
3144
+ ),
3145
+ ActionDefinition(
3146
+ name="update-estimate",
3147
+ handler=_handle_update_estimate,
3148
+ summary="Update estimated effort",
3149
+ ),
3150
+ ActionDefinition(
3151
+ name="update-metadata",
3152
+ handler=_handle_update_metadata,
3153
+ summary="Update task metadata fields",
3154
+ ),
3155
+ ActionDefinition(
3156
+ name="metadata-batch",
3157
+ handler=_handle_metadata_batch,
3158
+ summary="Batch update metadata across multiple nodes matching filters",
3159
+ ),
3160
+ ActionDefinition(
3161
+ name="fix-verification-types",
3162
+ handler=_handle_fix_verification_types,
3163
+ summary="Fix invalid/missing verification types across verify nodes",
2069
3164
  ),
2070
3165
  ActionDefinition(
2071
3166
  name="progress",
@@ -2140,6 +3235,7 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2140
3235
  resolution: Optional[str] = None,
2141
3236
  title: Optional[str] = None,
2142
3237
  description: Optional[str] = None,
3238
+ acceptance_criteria: Optional[List[str]] = None,
2143
3239
  task_type: str = "task",
2144
3240
  estimated_hours: Optional[float] = None,
2145
3241
  position: Optional[int] = None,
@@ -2155,6 +3251,15 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2155
3251
  dry_run: bool = False,
2156
3252
  max_depth: int = 2,
2157
3253
  include_metadata: bool = False,
3254
+ # metadata-batch specific parameters
3255
+ phase_id: Optional[str] = None,
3256
+ pattern: Optional[str] = None,
3257
+ node_type: Optional[str] = None,
3258
+ owners: Optional[List[str]] = None,
3259
+ labels: Optional[Dict[str, str]] = None,
3260
+ category: Optional[str] = None,
3261
+ parent_filter: Optional[str] = None,
3262
+ update_metadata: Optional[Dict[str, Any]] = None,
2158
3263
  ) -> dict:
2159
3264
  payload = {
2160
3265
  "spec_id": spec_id,
@@ -2176,6 +3281,7 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2176
3281
  "resolution": resolution,
2177
3282
  "title": title,
2178
3283
  "description": description,
3284
+ "acceptance_criteria": acceptance_criteria,
2179
3285
  "task_type": task_type,
2180
3286
  "estimated_hours": estimated_hours,
2181
3287
  "position": position,
@@ -2191,6 +3297,15 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2191
3297
  "dry_run": dry_run,
2192
3298
  "max_depth": max_depth,
2193
3299
  "include_metadata": include_metadata,
3300
+ # metadata-batch specific
3301
+ "phase_id": phase_id,
3302
+ "pattern": pattern,
3303
+ "node_type": node_type,
3304
+ "owners": owners,
3305
+ "labels": labels,
3306
+ "category": category,
3307
+ "parent_filter": parent_filter,
3308
+ "update_metadata": update_metadata,
2194
3309
  }
2195
3310
  return _dispatch_task_action(action=action, payload=payload, config=config)
2196
3311