foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/__init__.py +0 -13
  3. foundry_mcp/cli/commands/plan.py +10 -3
  4. foundry_mcp/cli/commands/review.py +19 -4
  5. foundry_mcp/cli/commands/session.py +1 -8
  6. foundry_mcp/cli/commands/specs.py +38 -208
  7. foundry_mcp/cli/context.py +39 -0
  8. foundry_mcp/cli/output.py +3 -3
  9. foundry_mcp/config.py +615 -11
  10. foundry_mcp/core/ai_consultation.py +146 -9
  11. foundry_mcp/core/batch_operations.py +1196 -0
  12. foundry_mcp/core/discovery.py +7 -7
  13. foundry_mcp/core/error_store.py +2 -2
  14. foundry_mcp/core/intake.py +933 -0
  15. foundry_mcp/core/llm_config.py +28 -2
  16. foundry_mcp/core/metrics_store.py +2 -2
  17. foundry_mcp/core/naming.py +25 -2
  18. foundry_mcp/core/progress.py +70 -0
  19. foundry_mcp/core/prometheus.py +0 -13
  20. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  21. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  22. foundry_mcp/core/prompts/plan_review.py +5 -1
  23. foundry_mcp/core/providers/__init__.py +12 -0
  24. foundry_mcp/core/providers/base.py +39 -0
  25. foundry_mcp/core/providers/claude.py +51 -48
  26. foundry_mcp/core/providers/codex.py +70 -60
  27. foundry_mcp/core/providers/cursor_agent.py +25 -47
  28. foundry_mcp/core/providers/detectors.py +34 -7
  29. foundry_mcp/core/providers/gemini.py +69 -58
  30. foundry_mcp/core/providers/opencode.py +101 -47
  31. foundry_mcp/core/providers/package-lock.json +4 -4
  32. foundry_mcp/core/providers/package.json +1 -1
  33. foundry_mcp/core/providers/validation.py +128 -0
  34. foundry_mcp/core/research/__init__.py +68 -0
  35. foundry_mcp/core/research/memory.py +528 -0
  36. foundry_mcp/core/research/models.py +1220 -0
  37. foundry_mcp/core/research/providers/__init__.py +40 -0
  38. foundry_mcp/core/research/providers/base.py +242 -0
  39. foundry_mcp/core/research/providers/google.py +507 -0
  40. foundry_mcp/core/research/providers/perplexity.py +442 -0
  41. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  42. foundry_mcp/core/research/providers/tavily.py +383 -0
  43. foundry_mcp/core/research/workflows/__init__.py +25 -0
  44. foundry_mcp/core/research/workflows/base.py +298 -0
  45. foundry_mcp/core/research/workflows/chat.py +271 -0
  46. foundry_mcp/core/research/workflows/consensus.py +539 -0
  47. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  48. foundry_mcp/core/research/workflows/ideate.py +682 -0
  49. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  50. foundry_mcp/core/responses.py +690 -0
  51. foundry_mcp/core/spec.py +2439 -236
  52. foundry_mcp/core/task.py +1205 -31
  53. foundry_mcp/core/testing.py +512 -123
  54. foundry_mcp/core/validation.py +319 -43
  55. foundry_mcp/dashboard/components/charts.py +0 -57
  56. foundry_mcp/dashboard/launcher.py +11 -0
  57. foundry_mcp/dashboard/views/metrics.py +25 -35
  58. foundry_mcp/dashboard/views/overview.py +1 -65
  59. foundry_mcp/resources/specs.py +25 -25
  60. foundry_mcp/schemas/intake-schema.json +89 -0
  61. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  62. foundry_mcp/server.py +0 -14
  63. foundry_mcp/tools/unified/__init__.py +39 -18
  64. foundry_mcp/tools/unified/authoring.py +2371 -248
  65. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  66. foundry_mcp/tools/unified/environment.py +434 -32
  67. foundry_mcp/tools/unified/error.py +18 -1
  68. foundry_mcp/tools/unified/lifecycle.py +8 -0
  69. foundry_mcp/tools/unified/plan.py +133 -2
  70. foundry_mcp/tools/unified/provider.py +0 -40
  71. foundry_mcp/tools/unified/research.py +1283 -0
  72. foundry_mcp/tools/unified/review.py +374 -17
  73. foundry_mcp/tools/unified/review_helpers.py +16 -1
  74. foundry_mcp/tools/unified/server.py +9 -24
  75. foundry_mcp/tools/unified/spec.py +367 -0
  76. foundry_mcp/tools/unified/task.py +1664 -30
  77. foundry_mcp/tools/unified/test.py +69 -8
  78. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
  79. foundry_mcp-0.8.10.dist-info/RECORD +153 -0
  80. foundry_mcp/cli/flags.py +0 -266
  81. foundry_mcp/core/feature_flags.py +0 -592
  82. foundry_mcp-0.3.3.dist-info/RECORD +0 -135
  83. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  84. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  85. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import logging
6
+ import re
6
7
  import time
7
8
  from dataclasses import asdict
8
9
  from pathlib import Path
@@ -24,6 +25,7 @@ from foundry_mcp.core.pagination import (
24
25
  from foundry_mcp.core.progress import (
25
26
  get_progress_summary,
26
27
  list_phases,
28
+ sync_computed_fields,
27
29
  update_parent_status,
28
30
  )
29
31
  from foundry_mcp.core.responses import (
@@ -43,12 +45,34 @@ from foundry_mcp.core.journal import (
43
45
  )
44
46
  from foundry_mcp.core.task import (
45
47
  add_task,
48
+ batch_update_tasks,
46
49
  check_dependencies,
47
50
  get_next_task,
51
+ manage_task_dependency,
52
+ move_task,
48
53
  prepare_task as core_prepare_task,
49
54
  remove_task,
55
+ REQUIREMENT_TYPES,
50
56
  update_estimate,
51
57
  update_task_metadata,
58
+ update_task_requirements,
59
+ )
60
+ from foundry_mcp.core.batch_operations import (
61
+ prepare_batch_context,
62
+ start_batch,
63
+ complete_batch,
64
+ reset_batch,
65
+ DEFAULT_MAX_TASKS,
66
+ DEFAULT_TOKEN_BUDGET,
67
+ STALE_TASK_THRESHOLD_HOURS,
68
+ )
69
+ from foundry_mcp.cli.context import (
70
+ AutonomousSession,
71
+ get_context_tracker,
72
+ )
73
+ from foundry_mcp.core.validation import (
74
+ VALID_VERIFICATION_TYPES,
75
+ VERIFICATION_TYPE_MAPPING,
52
76
  )
53
77
  from foundry_mcp.tools.unified.router import (
54
78
  ActionDefinition,
@@ -247,6 +271,303 @@ def _handle_prepare(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
247
271
  return _attach_meta(result, request_id=request_id, duration_ms=elapsed_ms)
248
272
 
249
273
 
274
+ def _handle_prepare_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
275
+ """
276
+ Handle prepare-batch action for parallel task execution.
277
+
278
+ Returns multiple independent tasks with context for parallel implementation.
279
+ """
280
+ request_id = _request_id()
281
+ action = "prepare-batch"
282
+ spec_id = payload.get("spec_id")
283
+ if not isinstance(spec_id, str) or not spec_id.strip():
284
+ return _validation_error(
285
+ field="spec_id",
286
+ action=action,
287
+ message="Provide a non-empty spec identifier",
288
+ request_id=request_id,
289
+ )
290
+
291
+ # Optional parameters with defaults
292
+ max_tasks = payload.get("max_tasks", DEFAULT_MAX_TASKS)
293
+ if not isinstance(max_tasks, int) or max_tasks < 1:
294
+ return _validation_error(
295
+ field="max_tasks",
296
+ action=action,
297
+ message="max_tasks must be a positive integer",
298
+ request_id=request_id,
299
+ code=ErrorCode.VALIDATION_ERROR,
300
+ )
301
+
302
+ token_budget = payload.get("token_budget", DEFAULT_TOKEN_BUDGET)
303
+ if not isinstance(token_budget, int) or token_budget < 1000:
304
+ return _validation_error(
305
+ field="token_budget",
306
+ action=action,
307
+ message="token_budget must be an integer >= 1000",
308
+ request_id=request_id,
309
+ code=ErrorCode.VALIDATION_ERROR,
310
+ )
311
+
312
+ workspace = payload.get("workspace")
313
+ specs_dir = _resolve_specs_dir(config, workspace)
314
+ if specs_dir is None:
315
+ return _specs_dir_missing_error(request_id)
316
+
317
+ start = time.perf_counter()
318
+ result, error = prepare_batch_context(
319
+ spec_id=spec_id.strip(),
320
+ max_tasks=max_tasks,
321
+ token_budget=token_budget,
322
+ specs_dir=specs_dir,
323
+ )
324
+ elapsed_ms = (time.perf_counter() - start) * 1000
325
+
326
+ if error:
327
+ _metrics.counter(_metric(action), labels={"status": "error"})
328
+ return asdict(
329
+ error_response(
330
+ error,
331
+ error_code=ErrorCode.OPERATION_FAILED,
332
+ error_type=ErrorType.VALIDATION,
333
+ request_id=request_id,
334
+ )
335
+ )
336
+
337
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
338
+ _metrics.counter(_metric(action), labels={"status": "success"})
339
+
340
+ # Build response with batch context
341
+ response = success_response(
342
+ spec_id=spec_id.strip(),
343
+ tasks=result.get("tasks", []),
344
+ task_count=result.get("task_count", 0),
345
+ spec_complete=result.get("spec_complete", False),
346
+ all_blocked=result.get("all_blocked", False),
347
+ stale_tasks=result.get("stale_tasks", []),
348
+ dependency_graph=result.get("dependency_graph", {}),
349
+ token_estimate=result.get("token_estimate", 0),
350
+ request_id=request_id,
351
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
352
+ )
353
+
354
+ warnings = result.get("warnings", [])
355
+ return _attach_meta(
356
+ asdict(response),
357
+ request_id=request_id,
358
+ duration_ms=elapsed_ms,
359
+ warnings=warnings if warnings else None,
360
+ )
361
+
362
+
363
+ def _handle_start_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
364
+ """
365
+ Handle start-batch action for atomically starting multiple tasks.
366
+
367
+ Validates all tasks can be started before making any changes.
368
+ """
369
+ request_id = _request_id()
370
+ action = "start-batch"
371
+ spec_id = payload.get("spec_id")
372
+ if not isinstance(spec_id, str) or not spec_id.strip():
373
+ return _validation_error(
374
+ field="spec_id",
375
+ action=action,
376
+ message="Provide a non-empty spec identifier",
377
+ request_id=request_id,
378
+ )
379
+
380
+ task_ids = payload.get("task_ids")
381
+ if not isinstance(task_ids, list) or not task_ids:
382
+ return _validation_error(
383
+ field="task_ids",
384
+ action=action,
385
+ message="Provide a non-empty list of task IDs",
386
+ request_id=request_id,
387
+ )
388
+
389
+ # Validate all task_ids are strings
390
+ for i, tid in enumerate(task_ids):
391
+ if not isinstance(tid, str) or not tid.strip():
392
+ return _validation_error(
393
+ field=f"task_ids[{i}]",
394
+ action=action,
395
+ message="Each task ID must be a non-empty string",
396
+ request_id=request_id,
397
+ code=ErrorCode.VALIDATION_ERROR,
398
+ )
399
+
400
+ workspace = payload.get("workspace")
401
+ specs_dir = _resolve_specs_dir(config, workspace)
402
+ if specs_dir is None:
403
+ return _specs_dir_missing_error(request_id)
404
+
405
+ start = time.perf_counter()
406
+ result, error = start_batch(
407
+ spec_id=spec_id.strip(),
408
+ task_ids=[tid.strip() for tid in task_ids],
409
+ specs_dir=specs_dir,
410
+ )
411
+ elapsed_ms = (time.perf_counter() - start) * 1000
412
+
413
+ if error:
414
+ _metrics.counter(_metric(action), labels={"status": "error"})
415
+ # Include partial results in error response
416
+ return asdict(
417
+ error_response(
418
+ error,
419
+ error_code=ErrorCode.OPERATION_FAILED,
420
+ error_type=ErrorType.VALIDATION,
421
+ request_id=request_id,
422
+ details=result if result else None,
423
+ )
424
+ )
425
+
426
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
427
+ _metrics.counter(_metric(action), labels={"status": "success"})
428
+
429
+ response = success_response(
430
+ spec_id=spec_id.strip(),
431
+ started=result.get("started", []),
432
+ started_count=result.get("started_count", 0),
433
+ started_at=result.get("started_at"),
434
+ request_id=request_id,
435
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
436
+ )
437
+ return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
438
+
439
+
440
+ def _handle_complete_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
441
+ """Handle complete-batch action for completing multiple tasks with partial failure support."""
442
+ request_id = _request_id()
443
+ action = "complete-batch"
444
+ spec_id = payload.get("spec_id")
445
+ if not isinstance(spec_id, str) or not spec_id.strip():
446
+ return _validation_error(field="spec_id", action=action, message="Provide a non-empty spec identifier", request_id=request_id)
447
+
448
+ completions = payload.get("completions")
449
+ if not isinstance(completions, list) or not completions:
450
+ return _validation_error(field="completions", action=action, message="Provide a non-empty list of completions", request_id=request_id)
451
+
452
+ workspace = payload.get("workspace")
453
+ specs_dir = _resolve_specs_dir(config, workspace)
454
+ if specs_dir is None:
455
+ return _specs_dir_missing_error(request_id)
456
+
457
+ start = time.perf_counter()
458
+ result, error = complete_batch(spec_id=spec_id.strip(), completions=completions, specs_dir=specs_dir)
459
+ elapsed_ms = (time.perf_counter() - start) * 1000
460
+
461
+ if error:
462
+ _metrics.counter(_metric(action), labels={"status": "error"})
463
+ return asdict(error_response(error, error_code=ErrorCode.OPERATION_FAILED, error_type=ErrorType.VALIDATION, request_id=request_id, details=result if result else None))
464
+
465
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
466
+ _metrics.counter(_metric(action), labels={"status": "success"})
467
+
468
+ response = success_response(
469
+ spec_id=spec_id.strip(),
470
+ results=result.get("results", {}),
471
+ completed_count=result.get("completed_count", 0),
472
+ failed_count=result.get("failed_count", 0),
473
+ total_processed=result.get("total_processed", 0),
474
+ request_id=request_id,
475
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
476
+ )
477
+ return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
478
+
479
+
480
+ def _handle_reset_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
481
+ """
482
+ Handle reset-batch action for resetting stale or specified in_progress tasks.
483
+
484
+ Resets tasks back to pending status and clears started_at timestamp.
485
+ If task_ids not provided, finds stale tasks automatically based on threshold.
486
+ """
487
+ request_id = _request_id()
488
+ action = "reset-batch"
489
+ spec_id = payload.get("spec_id")
490
+ if not isinstance(spec_id, str) or not spec_id.strip():
491
+ return _validation_error(
492
+ field="spec_id",
493
+ action=action,
494
+ message="Provide a non-empty spec identifier",
495
+ request_id=request_id,
496
+ )
497
+
498
+ # Optional: specific task IDs to reset
499
+ task_ids = payload.get("task_ids")
500
+ if task_ids is not None:
501
+ if not isinstance(task_ids, list):
502
+ return _validation_error(
503
+ field="task_ids",
504
+ action=action,
505
+ message="task_ids must be a list of strings",
506
+ request_id=request_id,
507
+ )
508
+ # Validate all task_ids are strings
509
+ for i, tid in enumerate(task_ids):
510
+ if not isinstance(tid, str) or not tid.strip():
511
+ return _validation_error(
512
+ field=f"task_ids[{i}]",
513
+ action=action,
514
+ message="Each task ID must be a non-empty string",
515
+ request_id=request_id,
516
+ code=ErrorCode.VALIDATION_ERROR,
517
+ )
518
+ task_ids = [tid.strip() for tid in task_ids]
519
+
520
+ # Optional: threshold in hours for stale detection
521
+ threshold_hours = payload.get("threshold_hours", STALE_TASK_THRESHOLD_HOURS)
522
+ if not isinstance(threshold_hours, (int, float)) or threshold_hours <= 0:
523
+ return _validation_error(
524
+ field="threshold_hours",
525
+ action=action,
526
+ message="threshold_hours must be a positive number",
527
+ request_id=request_id,
528
+ )
529
+
530
+ workspace = payload.get("workspace")
531
+ specs_dir = _resolve_specs_dir(config, workspace)
532
+ if specs_dir is None:
533
+ return _specs_dir_missing_error(request_id)
534
+
535
+ start = time.perf_counter()
536
+ result, error = reset_batch(
537
+ spec_id=spec_id.strip(),
538
+ task_ids=task_ids,
539
+ threshold_hours=float(threshold_hours),
540
+ specs_dir=specs_dir,
541
+ )
542
+ elapsed_ms = (time.perf_counter() - start) * 1000
543
+
544
+ if error:
545
+ _metrics.counter(_metric(action), labels={"status": "error"})
546
+ return asdict(
547
+ error_response(
548
+ error,
549
+ error_code=ErrorCode.OPERATION_FAILED,
550
+ error_type=ErrorType.VALIDATION,
551
+ request_id=request_id,
552
+ details=result if result else None,
553
+ )
554
+ )
555
+
556
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
557
+ _metrics.counter(_metric(action), labels={"status": "success"})
558
+
559
+ response = success_response(
560
+ spec_id=spec_id.strip(),
561
+ reset=result.get("reset", []),
562
+ reset_count=result.get("reset_count", 0),
563
+ errors=result.get("errors"),
564
+ message=result.get("message"),
565
+ request_id=request_id,
566
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
567
+ )
568
+ return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
569
+
570
+
250
571
  def _handle_next(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
251
572
  request_id = _request_id()
252
573
  action = "next"
@@ -552,7 +873,7 @@ def _handle_list(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
552
873
  if has_more and page_tasks:
553
874
  next_cursor = encode_cursor({"last_id": page_tasks[-1].get("id")})
554
875
 
555
- (time.perf_counter() - start) * 1000
876
+ _ = (time.perf_counter() - start) * 1000 # timing placeholder
556
877
  warnings = _pagination_warnings(total_count, has_more)
557
878
  response = paginated_response(
558
879
  data={
@@ -980,6 +1301,7 @@ def _handle_start(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
980
1301
  )
981
1302
 
982
1303
  update_parent_status(spec_data, task_id.strip())
1304
+ sync_computed_fields(spec_data)
983
1305
 
984
1306
  if note:
985
1307
  add_journal_entry(
@@ -1069,8 +1391,38 @@ def _handle_complete(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1069
1391
  )
1070
1392
 
1071
1393
  update_parent_status(spec_data, task_id.strip())
1394
+ sync_computed_fields(spec_data)
1072
1395
 
1073
1396
  task_data = spec_data.get("hierarchy", {}).get(task_id.strip(), {})
1397
+
1398
+ # Determine if commit is suggested based on git cadence config
1399
+ suggest_commit = False
1400
+ commit_scope: Optional[str] = None
1401
+ commit_message_hint: Optional[str] = None
1402
+
1403
+ if config.git.enabled:
1404
+ cadence = config.git.commit_cadence
1405
+ hierarchy = spec_data.get("hierarchy", {})
1406
+
1407
+ if cadence == "task":
1408
+ suggest_commit = True
1409
+ commit_scope = "task"
1410
+ commit_message_hint = f"task: {task_data.get('title', task_id.strip())}"
1411
+ elif cadence == "phase":
1412
+ # Check if parent phase just completed
1413
+ parent_id = task_data.get("parent")
1414
+ if parent_id:
1415
+ parent_data = hierarchy.get(parent_id, {})
1416
+ # Only suggest commit if parent is a phase and is now completed
1417
+ if (
1418
+ parent_data.get("type") == "phase"
1419
+ and parent_data.get("status") == "completed"
1420
+ ):
1421
+ suggest_commit = True
1422
+ commit_scope = "phase"
1423
+ commit_message_hint = (
1424
+ f"phase: {parent_data.get('title', parent_id)}"
1425
+ )
1074
1426
  add_journal_entry(
1075
1427
  spec_data,
1076
1428
  title=f"Task Completed: {task_data.get('title', task_id.strip())}",
@@ -1103,6 +1455,9 @@ def _handle_complete(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1103
1455
  "total_tasks": progress.get("total_tasks", 0),
1104
1456
  "percentage": progress.get("percentage", 0),
1105
1457
  },
1458
+ suggest_commit=suggest_commit,
1459
+ commit_scope=commit_scope,
1460
+ commit_message_hint=commit_message_hint,
1106
1461
  request_id=request_id,
1107
1462
  telemetry={"duration_ms": round(elapsed_ms, 2)},
1108
1463
  )
@@ -1195,6 +1550,7 @@ def _handle_block(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1195
1550
  task_id=task_id.strip(),
1196
1551
  author="foundry-mcp",
1197
1552
  )
1553
+ sync_computed_fields(spec_data)
1198
1554
 
1199
1555
  if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
1200
1556
  return asdict(
@@ -1294,6 +1650,7 @@ def _handle_unblock(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1294
1650
  task_id=task_id.strip(),
1295
1651
  author="foundry-mcp",
1296
1652
  )
1653
+ sync_computed_fields(spec_data)
1297
1654
 
1298
1655
  if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
1299
1656
  return asdict(
@@ -1418,11 +1775,23 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1418
1775
  action = "add"
1419
1776
  spec_id = payload.get("spec_id")
1420
1777
  parent = payload.get("parent")
1778
+ phase_id = payload.get("phase_id") # Alias for parent
1779
+
1780
+ # Use phase_id as parent if parent not provided
1781
+ if parent is None and phase_id is not None:
1782
+ parent = phase_id
1783
+
1421
1784
  title = payload.get("title")
1422
1785
  description = payload.get("description")
1423
1786
  task_type = payload.get("task_type", "task")
1424
1787
  estimated_hours = payload.get("estimated_hours")
1425
1788
  position = payload.get("position")
1789
+ file_path = payload.get("file_path")
1790
+
1791
+ # Research-specific parameters
1792
+ research_type = payload.get("research_type")
1793
+ blocking_mode = payload.get("blocking_mode")
1794
+ query = payload.get("query")
1426
1795
 
1427
1796
  if not isinstance(spec_id, str) or not spec_id.strip():
1428
1797
  return _validation_error(
@@ -1477,6 +1846,57 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1477
1846
  request_id=request_id,
1478
1847
  code=ErrorCode.INVALID_FORMAT,
1479
1848
  )
1849
+ if file_path is not None and not isinstance(file_path, str):
1850
+ return _validation_error(
1851
+ field="file_path",
1852
+ action=action,
1853
+ message="file_path must be a string",
1854
+ request_id=request_id,
1855
+ code=ErrorCode.INVALID_FORMAT,
1856
+ )
1857
+
1858
+ # Validate research-specific parameters when task_type is "research"
1859
+ if task_type == "research":
1860
+ from foundry_mcp.core.validation import VALID_RESEARCH_TYPES, RESEARCH_BLOCKING_MODES
1861
+
1862
+ if research_type is not None and not isinstance(research_type, str):
1863
+ return _validation_error(
1864
+ field="research_type",
1865
+ action=action,
1866
+ message="research_type must be a string",
1867
+ request_id=request_id,
1868
+ code=ErrorCode.INVALID_FORMAT,
1869
+ )
1870
+ if research_type and research_type not in VALID_RESEARCH_TYPES:
1871
+ return _validation_error(
1872
+ field="research_type",
1873
+ action=action,
1874
+ message=f"Must be one of: {', '.join(sorted(VALID_RESEARCH_TYPES))}",
1875
+ request_id=request_id,
1876
+ )
1877
+ if blocking_mode is not None and not isinstance(blocking_mode, str):
1878
+ return _validation_error(
1879
+ field="blocking_mode",
1880
+ action=action,
1881
+ message="blocking_mode must be a string",
1882
+ request_id=request_id,
1883
+ code=ErrorCode.INVALID_FORMAT,
1884
+ )
1885
+ if blocking_mode and blocking_mode not in RESEARCH_BLOCKING_MODES:
1886
+ return _validation_error(
1887
+ field="blocking_mode",
1888
+ action=action,
1889
+ message=f"Must be one of: {', '.join(sorted(RESEARCH_BLOCKING_MODES))}",
1890
+ request_id=request_id,
1891
+ )
1892
+ if query is not None and not isinstance(query, str):
1893
+ return _validation_error(
1894
+ field="query",
1895
+ action=action,
1896
+ message="query must be a string",
1897
+ request_id=request_id,
1898
+ code=ErrorCode.INVALID_FORMAT,
1899
+ )
1480
1900
 
1481
1901
  dry_run = payload.get("dry_run", False)
1482
1902
  if dry_run is not None and not isinstance(dry_run, bool):
@@ -1518,15 +1938,22 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1518
1938
  )
1519
1939
 
1520
1940
  elapsed_ms = (time.perf_counter() - start) * 1000
1941
+ dry_run_data: Dict[str, Any] = {
1942
+ "spec_id": spec_id.strip(),
1943
+ "parent": parent.strip(),
1944
+ "title": title.strip(),
1945
+ "task_type": task_type,
1946
+ "position": position,
1947
+ "file_path": file_path.strip() if file_path else None,
1948
+ "dry_run": True,
1949
+ }
1950
+ # Include research parameters in dry_run response
1951
+ if task_type == "research":
1952
+ dry_run_data["research_type"] = research_type
1953
+ dry_run_data["blocking_mode"] = blocking_mode
1954
+ dry_run_data["query"] = query
1521
1955
  response = success_response(
1522
- data={
1523
- "spec_id": spec_id.strip(),
1524
- "parent": parent.strip(),
1525
- "title": title.strip(),
1526
- "task_type": task_type,
1527
- "position": position,
1528
- "dry_run": True,
1529
- },
1956
+ data=dry_run_data,
1530
1957
  request_id=request_id,
1531
1958
  telemetry={"duration_ms": round(elapsed_ms, 2)},
1532
1959
  )
@@ -1544,7 +1971,12 @@ def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
1544
1971
  task_type=task_type,
1545
1972
  estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
1546
1973
  position=position,
1974
+ file_path=file_path,
1547
1975
  specs_dir=specs_dir,
1976
+ # Research-specific parameters
1977
+ research_type=research_type,
1978
+ blocking_mode=blocking_mode,
1979
+ query=query,
1548
1980
  )
1549
1981
  elapsed_ms = (time.perf_counter() - start) * 1000
1550
1982
 
@@ -1897,9 +2329,21 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
1897
2329
  remediation="Provide custom_metadata as a JSON object",
1898
2330
  )
1899
2331
 
2332
+ acceptance_criteria = payload.get("acceptance_criteria")
2333
+ if acceptance_criteria is not None and not isinstance(acceptance_criteria, list):
2334
+ return _validation_error(
2335
+ field="acceptance_criteria",
2336
+ action=action,
2337
+ message="acceptance_criteria must be a list of strings",
2338
+ request_id=request_id,
2339
+ code=ErrorCode.INVALID_FORMAT,
2340
+ )
2341
+
1900
2342
  update_fields = [
2343
+ payload.get("title"),
1901
2344
  payload.get("file_path"),
1902
2345
  payload.get("description"),
2346
+ acceptance_criteria,
1903
2347
  payload.get("task_category"),
1904
2348
  payload.get("actual_hours"),
1905
2349
  payload.get("status_note"),
@@ -1911,12 +2355,12 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
1911
2355
  )
1912
2356
  if not has_update:
1913
2357
  return _validation_error(
1914
- field="file_path",
2358
+ field="title",
1915
2359
  action=action,
1916
- message="Provide at least one metadata field",
2360
+ message="Provide at least one field to update",
1917
2361
  request_id=request_id,
1918
2362
  code=ErrorCode.MISSING_REQUIRED,
1919
- remediation="Provide file_path, description, task_category, actual_hours, status_note, verification_type, command, and/or custom_metadata",
2363
+ remediation="Provide title, file_path, description, acceptance_criteria, task_category, actual_hours, status_note, verification_type, command, and/or custom_metadata",
1920
2364
  )
1921
2365
 
1922
2366
  workspace = payload.get("workspace")
@@ -1944,10 +2388,14 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
1944
2388
  )
1945
2389
 
1946
2390
  fields_updated: List[str] = []
2391
+ if payload.get("title") is not None:
2392
+ fields_updated.append("title")
1947
2393
  if payload.get("file_path") is not None:
1948
2394
  fields_updated.append("file_path")
1949
2395
  if payload.get("description") is not None:
1950
2396
  fields_updated.append("description")
2397
+ if acceptance_criteria is not None:
2398
+ fields_updated.append("acceptance_criteria")
1951
2399
  if payload.get("task_category") is not None:
1952
2400
  fields_updated.append("task_category")
1953
2401
  if payload.get("actual_hours") is not None:
@@ -1981,14 +2429,17 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
1981
2429
  result, error = update_task_metadata(
1982
2430
  spec_id=spec_id.strip(),
1983
2431
  task_id=task_id.strip(),
2432
+ title=payload.get("title"),
1984
2433
  file_path=payload.get("file_path"),
1985
2434
  description=payload.get("description"),
2435
+ acceptance_criteria=acceptance_criteria,
1986
2436
  task_category=payload.get("task_category"),
1987
2437
  actual_hours=payload.get("actual_hours"),
1988
2438
  status_note=payload.get("status_note"),
1989
2439
  verification_type=payload.get("verification_type"),
1990
2440
  command=payload.get("command"),
1991
2441
  custom_metadata=custom_metadata,
2442
+ dry_run=dry_run_bool,
1992
2443
  specs_dir=specs_dir,
1993
2444
  )
1994
2445
  elapsed_ms = (time.perf_counter() - start) * 1000
@@ -2022,24 +2473,1146 @@ def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) ->
2022
2473
  return asdict(response)
2023
2474
 
2024
2475
 
2025
- _ACTION_DEFINITIONS = [
2026
- ActionDefinition(
2027
- name="prepare",
2028
- handler=_handle_prepare,
2029
- summary="Prepare next actionable task context",
2030
- ),
2031
- ActionDefinition(
2032
- name="next", handler=_handle_next, summary="Return the next actionable task"
2033
- ),
2034
- ActionDefinition(
2035
- name="info", handler=_handle_info, summary="Fetch task metadata by ID"
2036
- ),
2037
- ActionDefinition(
2038
- name="check-deps",
2039
- handler=_handle_check_deps,
2040
- summary="Analyze task dependencies and blockers",
2041
- ),
2042
- ActionDefinition(name="start", handler=_handle_start, summary="Start a task"),
2476
+ def _handle_move(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2477
+ """Move a task to a new position or parent.
2478
+
2479
+ Supports two modes:
2480
+ 1. Reorder within parent: only specify position (new_parent=None)
2481
+ 2. Reparent to different phase/task: specify new_parent, optionally position
2482
+
2483
+ Updates task counts on affected parents. Prevents circular references.
2484
+ Emits warnings for cross-phase moves that might affect dependencies.
2485
+ """
2486
+ request_id = _request_id()
2487
+ action = "move"
2488
+ spec_id = payload.get("spec_id")
2489
+ task_id = payload.get("task_id")
2490
+ new_parent = payload.get("parent") # Target parent (phase or task ID)
2491
+ position = payload.get("position") # 1-based position in children list
2492
+
2493
+ # Validate required fields
2494
+ if not isinstance(spec_id, str) or not spec_id.strip():
2495
+ return _validation_error(
2496
+ field="spec_id",
2497
+ action=action,
2498
+ message="Provide a non-empty spec identifier",
2499
+ request_id=request_id,
2500
+ )
2501
+ if not isinstance(task_id, str) or not task_id.strip():
2502
+ return _validation_error(
2503
+ field="task_id",
2504
+ action=action,
2505
+ message="Provide a non-empty task identifier",
2506
+ request_id=request_id,
2507
+ )
2508
+
2509
+ # Validate optional new_parent
2510
+ if new_parent is not None and (
2511
+ not isinstance(new_parent, str) or not new_parent.strip()
2512
+ ):
2513
+ return _validation_error(
2514
+ field="parent",
2515
+ action=action,
2516
+ message="parent must be a non-empty string if provided",
2517
+ request_id=request_id,
2518
+ code=ErrorCode.INVALID_FORMAT,
2519
+ )
2520
+
2521
+ # Validate optional position (must be positive integer)
2522
+ if position is not None:
2523
+ if not isinstance(position, int) or position < 1:
2524
+ return _validation_error(
2525
+ field="position",
2526
+ action=action,
2527
+ message="position must be a positive integer (1-based)",
2528
+ request_id=request_id,
2529
+ code=ErrorCode.INVALID_FORMAT,
2530
+ )
2531
+
2532
+ # Validate dry_run
2533
+ dry_run = payload.get("dry_run", False)
2534
+ if dry_run is not None and not isinstance(dry_run, bool):
2535
+ return _validation_error(
2536
+ field="dry_run",
2537
+ action=action,
2538
+ message="dry_run must be a boolean",
2539
+ request_id=request_id,
2540
+ code=ErrorCode.INVALID_FORMAT,
2541
+ )
2542
+ dry_run_bool = bool(dry_run)
2543
+
2544
+ workspace = payload.get("workspace")
2545
+ specs_dir = _resolve_specs_dir(config, workspace)
2546
+ if specs_dir is None:
2547
+ return _specs_dir_missing_error(request_id)
2548
+
2549
+ start = time.perf_counter()
2550
+
2551
+ # Call the core move_task function
2552
+ result, error, warnings = move_task(
2553
+ spec_id=spec_id.strip(),
2554
+ task_id=task_id.strip(),
2555
+ new_parent=new_parent.strip() if new_parent else None,
2556
+ position=position,
2557
+ dry_run=dry_run_bool,
2558
+ specs_dir=specs_dir,
2559
+ )
2560
+ elapsed_ms = (time.perf_counter() - start) * 1000
2561
+
2562
+ if error or result is None:
2563
+ # Determine appropriate error code based on error message
2564
+ error_lower = (error or "").lower()
2565
+ if "not found" in error_lower:
2566
+ code = ErrorCode.TASK_NOT_FOUND
2567
+ err_type = ErrorType.NOT_FOUND
2568
+ remediation = "Verify the task ID and parent ID exist in the specification"
2569
+ elif "circular" in error_lower:
2570
+ code = ErrorCode.CIRCULAR_DEPENDENCY
2571
+ err_type = ErrorType.CONFLICT
2572
+ remediation = "Task cannot be moved under its own descendants"
2573
+ elif "invalid position" in error_lower:
2574
+ code = ErrorCode.INVALID_POSITION
2575
+ err_type = ErrorType.VALIDATION
2576
+ remediation = "Specify a valid position within the children list"
2577
+ elif "cannot move" in error_lower or "invalid" in error_lower:
2578
+ code = ErrorCode.INVALID_PARENT
2579
+ err_type = ErrorType.VALIDATION
2580
+ remediation = "Specify a valid phase, group, or task as the target parent"
2581
+ else:
2582
+ code = ErrorCode.VALIDATION_ERROR
2583
+ err_type = ErrorType.VALIDATION
2584
+ remediation = "Check task ID, parent, and position parameters"
2585
+
2586
+ return asdict(
2587
+ error_response(
2588
+ error or "Failed to move task",
2589
+ error_code=code,
2590
+ error_type=err_type,
2591
+ remediation=remediation,
2592
+ request_id=request_id,
2593
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2594
+ )
2595
+ )
2596
+
2597
+ # Build success response with warnings if any
2598
+ response = success_response(
2599
+ **result,
2600
+ request_id=request_id,
2601
+ warnings=warnings if warnings else None,
2602
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2603
+ )
2604
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2605
+ _metrics.counter(
2606
+ _metric(action),
2607
+ labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
2608
+ )
2609
+ return asdict(response)
2610
+
2611
+
2612
+ def _handle_add_dependency(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2613
+ """Add a dependency relationship between two tasks.
2614
+
2615
+ Manages blocks, blocked_by, and depends relationships.
2616
+ Updates both source and target tasks atomically.
2617
+
2618
+ Dependency types:
2619
+ - blocks: Source task blocks target (target cannot start until source completes)
2620
+ - blocked_by: Source task is blocked by target (source cannot start until target completes)
2621
+ - depends: Soft dependency (informational, doesn't block)
2622
+ """
2623
+ request_id = _request_id()
2624
+ action = "add-dependency"
2625
+ spec_id = payload.get("spec_id")
2626
+ task_id = payload.get("task_id") # Source task
2627
+ target_id = payload.get("target_id") # Target task
2628
+ dependency_type = payload.get("dependency_type", "blocks")
2629
+
2630
+ # Validate required fields
2631
+ if not isinstance(spec_id, str) or not spec_id.strip():
2632
+ return _validation_error(
2633
+ field="spec_id",
2634
+ action=action,
2635
+ message="Provide a non-empty spec identifier",
2636
+ request_id=request_id,
2637
+ )
2638
+ if not isinstance(task_id, str) or not task_id.strip():
2639
+ return _validation_error(
2640
+ field="task_id",
2641
+ action=action,
2642
+ message="Provide a non-empty source task identifier",
2643
+ request_id=request_id,
2644
+ )
2645
+ if not isinstance(target_id, str) or not target_id.strip():
2646
+ return _validation_error(
2647
+ field="target_id",
2648
+ action=action,
2649
+ message="Provide a non-empty target task identifier",
2650
+ request_id=request_id,
2651
+ )
2652
+
2653
+ # Validate dependency_type
2654
+ valid_types = ("blocks", "blocked_by", "depends")
2655
+ if dependency_type not in valid_types:
2656
+ return _validation_error(
2657
+ field="dependency_type",
2658
+ action=action,
2659
+ message=f"Must be one of: {', '.join(valid_types)}",
2660
+ request_id=request_id,
2661
+ code=ErrorCode.INVALID_FORMAT,
2662
+ )
2663
+
2664
+ # Validate dry_run
2665
+ dry_run = payload.get("dry_run", False)
2666
+ if dry_run is not None and not isinstance(dry_run, bool):
2667
+ return _validation_error(
2668
+ field="dry_run",
2669
+ action=action,
2670
+ message="dry_run must be a boolean",
2671
+ request_id=request_id,
2672
+ code=ErrorCode.INVALID_FORMAT,
2673
+ )
2674
+ dry_run_bool = bool(dry_run)
2675
+
2676
+ workspace = payload.get("workspace")
2677
+ specs_dir = _resolve_specs_dir(config, workspace)
2678
+ if specs_dir is None:
2679
+ return _specs_dir_missing_error(request_id)
2680
+
2681
+ start = time.perf_counter()
2682
+
2683
+ # Call the core function
2684
+ result, error = manage_task_dependency(
2685
+ spec_id=spec_id.strip(),
2686
+ source_task_id=task_id.strip(),
2687
+ target_task_id=target_id.strip(),
2688
+ dependency_type=dependency_type,
2689
+ action="add",
2690
+ dry_run=dry_run_bool,
2691
+ specs_dir=specs_dir,
2692
+ )
2693
+ elapsed_ms = (time.perf_counter() - start) * 1000
2694
+
2695
+ if error or result is None:
2696
+ # Determine appropriate error code based on error message
2697
+ error_lower = (error or "").lower()
2698
+ if "not found" in error_lower:
2699
+ code = ErrorCode.TASK_NOT_FOUND
2700
+ err_type = ErrorType.NOT_FOUND
2701
+ remediation = "Verify both task IDs exist in the specification"
2702
+ elif "circular" in error_lower:
2703
+ code = ErrorCode.CIRCULAR_DEPENDENCY
2704
+ err_type = ErrorType.CONFLICT
2705
+ remediation = "This dependency would create a cycle"
2706
+ elif "itself" in error_lower:
2707
+ code = ErrorCode.SELF_REFERENCE
2708
+ err_type = ErrorType.VALIDATION
2709
+ remediation = "A task cannot depend on itself"
2710
+ elif "already exists" in error_lower:
2711
+ code = ErrorCode.DUPLICATE_ENTRY
2712
+ err_type = ErrorType.CONFLICT
2713
+ remediation = "This dependency already exists"
2714
+ else:
2715
+ code = ErrorCode.VALIDATION_ERROR
2716
+ err_type = ErrorType.VALIDATION
2717
+ remediation = "Check task IDs and dependency type"
2718
+
2719
+ return asdict(
2720
+ error_response(
2721
+ error or "Failed to add dependency",
2722
+ error_code=code,
2723
+ error_type=err_type,
2724
+ remediation=remediation,
2725
+ request_id=request_id,
2726
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2727
+ )
2728
+ )
2729
+
2730
+ # Build success response
2731
+ response = success_response(
2732
+ **result,
2733
+ request_id=request_id,
2734
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2735
+ )
2736
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2737
+ _metrics.counter(
2738
+ _metric(action),
2739
+ labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
2740
+ )
2741
+ return asdict(response)
2742
+
2743
+
2744
+ def _handle_remove_dependency(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2745
+ """Remove a dependency relationship between two tasks.
2746
+
2747
+ Removes blocks, blocked_by, or depends relationships.
2748
+ Updates both source and target tasks atomically for reciprocal relationships.
2749
+ """
2750
+ request_id = _request_id()
2751
+ action = "remove-dependency"
2752
+ spec_id = payload.get("spec_id")
2753
+ task_id = payload.get("task_id") # Source task
2754
+ target_id = payload.get("target_id") # Target task
2755
+ dependency_type = payload.get("dependency_type", "blocks")
2756
+
2757
+ # Validate required fields
2758
+ if not isinstance(spec_id, str) or not spec_id.strip():
2759
+ return _validation_error(
2760
+ field="spec_id",
2761
+ action=action,
2762
+ message="Provide a non-empty spec identifier",
2763
+ request_id=request_id,
2764
+ )
2765
+ if not isinstance(task_id, str) or not task_id.strip():
2766
+ return _validation_error(
2767
+ field="task_id",
2768
+ action=action,
2769
+ message="Provide a non-empty source task identifier",
2770
+ request_id=request_id,
2771
+ )
2772
+ if not isinstance(target_id, str) or not target_id.strip():
2773
+ return _validation_error(
2774
+ field="target_id",
2775
+ action=action,
2776
+ message="Provide a non-empty target task identifier",
2777
+ request_id=request_id,
2778
+ )
2779
+
2780
+ # Validate dependency_type
2781
+ valid_types = ("blocks", "blocked_by", "depends")
2782
+ if dependency_type not in valid_types:
2783
+ return _validation_error(
2784
+ field="dependency_type",
2785
+ action=action,
2786
+ message=f"Must be one of: {', '.join(valid_types)}",
2787
+ request_id=request_id,
2788
+ code=ErrorCode.INVALID_FORMAT,
2789
+ )
2790
+
2791
+ # Validate dry_run
2792
+ dry_run = payload.get("dry_run", False)
2793
+ if dry_run is not None and not isinstance(dry_run, bool):
2794
+ return _validation_error(
2795
+ field="dry_run",
2796
+ action=action,
2797
+ message="dry_run must be a boolean",
2798
+ request_id=request_id,
2799
+ code=ErrorCode.INVALID_FORMAT,
2800
+ )
2801
+ dry_run_bool = bool(dry_run)
2802
+
2803
+ workspace = payload.get("workspace")
2804
+ specs_dir = _resolve_specs_dir(config, workspace)
2805
+ if specs_dir is None:
2806
+ return _specs_dir_missing_error(request_id)
2807
+
2808
+ start = time.perf_counter()
2809
+
2810
+ # Call the core function
2811
+ result, error = manage_task_dependency(
2812
+ spec_id=spec_id.strip(),
2813
+ source_task_id=task_id.strip(),
2814
+ target_task_id=target_id.strip(),
2815
+ dependency_type=dependency_type,
2816
+ action="remove",
2817
+ dry_run=dry_run_bool,
2818
+ specs_dir=specs_dir,
2819
+ )
2820
+ elapsed_ms = (time.perf_counter() - start) * 1000
2821
+
2822
+ if error or result is None:
2823
+ # Determine appropriate error code based on error message
2824
+ error_lower = (error or "").lower()
2825
+ if "does not exist" in error_lower:
2826
+ # Dependency relationship doesn't exist
2827
+ code = ErrorCode.DEPENDENCY_NOT_FOUND
2828
+ err_type = ErrorType.NOT_FOUND
2829
+ remediation = "This dependency does not exist"
2830
+ elif "not found" in error_lower:
2831
+ # Task or spec not found
2832
+ code = ErrorCode.TASK_NOT_FOUND
2833
+ err_type = ErrorType.NOT_FOUND
2834
+ remediation = "Verify both task IDs exist in the specification"
2835
+ else:
2836
+ code = ErrorCode.VALIDATION_ERROR
2837
+ err_type = ErrorType.VALIDATION
2838
+ remediation = "Check task IDs and dependency type"
2839
+
2840
+ return asdict(
2841
+ error_response(
2842
+ error or "Failed to remove dependency",
2843
+ error_code=code,
2844
+ error_type=err_type,
2845
+ remediation=remediation,
2846
+ request_id=request_id,
2847
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2848
+ )
2849
+ )
2850
+
2851
+ # Build success response
2852
+ response = success_response(
2853
+ **result,
2854
+ request_id=request_id,
2855
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2856
+ )
2857
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
2858
+ _metrics.counter(
2859
+ _metric(action),
2860
+ labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
2861
+ )
2862
+ return asdict(response)
2863
+
2864
+
2865
+ def _handle_add_requirement(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
2866
+ """Add a structured requirement to a task's metadata.
2867
+
2868
+ Requirements are stored in metadata.requirements as a list of objects:
2869
+ [{"id": "req-1", "type": "acceptance", "text": "..."}, ...]
2870
+
2871
+ Each requirement has:
2872
+ - id: Auto-generated unique ID (e.g., "req-1", "req-2")
2873
+ - type: Requirement type (acceptance, technical, constraint)
2874
+ - text: Requirement description text
2875
+ """
2876
+ request_id = _request_id()
2877
+ action = "add-requirement"
2878
+ spec_id = payload.get("spec_id")
2879
+ task_id = payload.get("task_id")
2880
+ requirement_type = payload.get("requirement_type")
2881
+ text = payload.get("text")
2882
+
2883
+ # Validate required fields
2884
+ if not isinstance(spec_id, str) or not spec_id.strip():
2885
+ return _validation_error(
2886
+ field="spec_id",
2887
+ action=action,
2888
+ message="Provide a non-empty spec identifier",
2889
+ request_id=request_id,
2890
+ )
2891
+ if not isinstance(task_id, str) or not task_id.strip():
2892
+ return _validation_error(
2893
+ field="task_id",
2894
+ action=action,
2895
+ message="Provide a non-empty task identifier",
2896
+ request_id=request_id,
2897
+ )
2898
+ if not isinstance(requirement_type, str) or not requirement_type.strip():
2899
+ return _validation_error(
2900
+ field="requirement_type",
2901
+ action=action,
2902
+ message="Provide a requirement type",
2903
+ request_id=request_id,
2904
+ )
2905
+
2906
+ # Validate requirement_type
2907
+ requirement_type_lower = requirement_type.lower().strip()
2908
+ if requirement_type_lower not in REQUIREMENT_TYPES:
2909
+ return _validation_error(
2910
+ field="requirement_type",
2911
+ action=action,
2912
+ message=f"Must be one of: {', '.join(REQUIREMENT_TYPES)}",
2913
+ request_id=request_id,
2914
+ code=ErrorCode.INVALID_FORMAT,
2915
+ )
2916
+
2917
+ # Validate text
2918
+ if not isinstance(text, str) or not text.strip():
2919
+ return _validation_error(
2920
+ field="text",
2921
+ action=action,
2922
+ message="Provide non-empty requirement text",
2923
+ request_id=request_id,
2924
+ )
2925
+
2926
+ # Validate dry_run
2927
+ dry_run = payload.get("dry_run", False)
2928
+ if dry_run is not None and not isinstance(dry_run, bool):
2929
+ return _validation_error(
2930
+ field="dry_run",
2931
+ action=action,
2932
+ message="dry_run must be a boolean",
2933
+ request_id=request_id,
2934
+ code=ErrorCode.INVALID_FORMAT,
2935
+ )
2936
+ dry_run_bool = bool(dry_run)
2937
+
2938
+ workspace = payload.get("workspace")
2939
+ specs_dir = _resolve_specs_dir(config, workspace)
2940
+ if specs_dir is None:
2941
+ return _specs_dir_missing_error(request_id)
2942
+
2943
+ start = time.perf_counter()
2944
+
2945
+ # Call the core function
2946
+ result, error = update_task_requirements(
2947
+ spec_id=spec_id.strip(),
2948
+ task_id=task_id.strip(),
2949
+ action="add",
2950
+ requirement_type=requirement_type_lower,
2951
+ text=text.strip(),
2952
+ dry_run=dry_run_bool,
2953
+ specs_dir=specs_dir,
2954
+ )
2955
+ elapsed_ms = (time.perf_counter() - start) * 1000
2956
+
2957
+ if error or result is None:
2958
+ # Determine appropriate error code based on error message
2959
+ error_lower = (error or "").lower()
2960
+ if "not found" in error_lower:
2961
+ if "specification" in error_lower:
2962
+ code = ErrorCode.SPEC_NOT_FOUND
2963
+ err_type = ErrorType.NOT_FOUND
2964
+ remediation = "Verify the spec ID exists"
2965
+ else:
2966
+ code = ErrorCode.TASK_NOT_FOUND
2967
+ err_type = ErrorType.NOT_FOUND
2968
+ remediation = "Verify the task ID exists in the specification"
2969
+ elif "maximum" in error_lower or "limit" in error_lower:
2970
+ code = ErrorCode.LIMIT_EXCEEDED
2971
+ err_type = ErrorType.VALIDATION
2972
+ remediation = "Remove some requirements before adding new ones"
2973
+ elif "requirement_type" in error_lower:
2974
+ code = ErrorCode.INVALID_FORMAT
2975
+ err_type = ErrorType.VALIDATION
2976
+ remediation = f"Use one of: {', '.join(REQUIREMENT_TYPES)}"
2977
+ else:
2978
+ code = ErrorCode.VALIDATION_ERROR
2979
+ err_type = ErrorType.VALIDATION
2980
+ remediation = "Check task ID and requirement fields"
2981
+
2982
+ return asdict(
2983
+ error_response(
2984
+ error or "Failed to add requirement",
2985
+ error_code=code,
2986
+ error_type=err_type,
2987
+ remediation=remediation,
2988
+ request_id=request_id,
2989
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2990
+ )
2991
+ )
2992
+
2993
+ # Build success response
2994
+ response = success_response(
2995
+ **result,
2996
+ request_id=request_id,
2997
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2998
+ )
2999
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
3000
+ _metrics.counter(
3001
+ _metric(action),
3002
+ labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
3003
+ )
3004
+ return asdict(response)
3005
+
3006
+
3007
+ _VALID_NODE_TYPES = {"task", "verify", "phase", "subtask"}
3008
+ # Note: VALID_VERIFICATION_TYPES imported from foundry_mcp.core.validation
3009
+
3010
+
3011
+ def _match_nodes_for_batch(
3012
+ hierarchy: Dict[str, Any],
3013
+ *,
3014
+ phase_id: Optional[str] = None,
3015
+ pattern: Optional[str] = None,
3016
+ node_type: Optional[str] = None,
3017
+ ) -> List[str]:
3018
+ """Filter nodes by phase_id, regex pattern on title/id, and/or node_type.
3019
+
3020
+ All provided filters are combined with AND logic.
3021
+ Returns list of matching node IDs.
3022
+ """
3023
+ matched: List[str] = []
3024
+ compiled_pattern = None
3025
+ if pattern:
3026
+ try:
3027
+ compiled_pattern = re.compile(pattern, re.IGNORECASE)
3028
+ except re.error:
3029
+ return [] # Invalid regex returns empty
3030
+
3031
+ for node_id, node_data in hierarchy.items():
3032
+ if node_id == "spec-root":
3033
+ continue
3034
+
3035
+ # Filter by node_type if specified
3036
+ if node_type and node_data.get("type") != node_type:
3037
+ continue
3038
+
3039
+ # Filter by phase_id if specified (must be under that phase)
3040
+ if phase_id:
3041
+ node_parent = node_data.get("parent")
3042
+ # Direct children of the phase
3043
+ if node_parent != phase_id:
3044
+ # Check if it's a nested child (e.g., subtask under task under phase)
3045
+ parent_node = hierarchy.get(node_parent, {})
3046
+ if parent_node.get("parent") != phase_id:
3047
+ continue
3048
+
3049
+ # Filter by regex pattern on title or node_id
3050
+ if compiled_pattern:
3051
+ title = node_data.get("title", "")
3052
+ if not (compiled_pattern.search(title) or compiled_pattern.search(node_id)):
3053
+ continue
3054
+
3055
+ matched.append(node_id)
3056
+
3057
+ return sorted(matched)
3058
+
3059
+
3060
+ def _handle_metadata_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
3061
+ """Batch update metadata across multiple tasks matching specified criteria.
3062
+
3063
+ Filters (combined with AND logic):
3064
+ - status_filter: Filter by task status (pending, in_progress, completed, blocked)
3065
+ - parent_filter: Filter by parent node ID (e.g., phase-1, task-2-1)
3066
+ - pattern: Regex pattern to match task titles/IDs
3067
+
3068
+ Legacy filters (deprecated, use parent_filter instead):
3069
+ - phase_id: Alias for parent_filter
3070
+
3071
+ Metadata fields supported:
3072
+ - description, file_path, estimated_hours, category, labels, owners
3073
+ - update_metadata: Dict for custom metadata fields (verification_type, command, etc.)
3074
+ """
3075
+ request_id = _request_id()
3076
+ action = "metadata-batch"
3077
+ start = time.perf_counter()
3078
+
3079
+ # Required: spec_id
3080
+ spec_id = payload.get("spec_id")
3081
+ if not isinstance(spec_id, str) or not spec_id.strip():
3082
+ return _validation_error(
3083
+ field="spec_id",
3084
+ action=action,
3085
+ message="Provide a non-empty spec identifier",
3086
+ request_id=request_id,
3087
+ )
3088
+ spec_id = spec_id.strip()
3089
+
3090
+ # Extract filter parameters
3091
+ status_filter = payload.get("status_filter")
3092
+ parent_filter = payload.get("parent_filter")
3093
+ phase_id = payload.get("phase_id") # Legacy alias for parent_filter
3094
+ pattern = payload.get("pattern")
3095
+
3096
+ # Use phase_id as parent_filter if parent_filter not provided (backwards compat)
3097
+ if parent_filter is None and phase_id is not None:
3098
+ parent_filter = phase_id
3099
+
3100
+ # Validate status_filter
3101
+ if status_filter is not None:
3102
+ if not isinstance(status_filter, str) or status_filter not in _ALLOWED_STATUS:
3103
+ return _validation_error(
3104
+ field="status_filter",
3105
+ action=action,
3106
+ message=f"status_filter must be one of: {sorted(_ALLOWED_STATUS)}",
3107
+ request_id=request_id,
3108
+ code=ErrorCode.INVALID_FORMAT,
3109
+ )
3110
+
3111
+ # Validate parent_filter
3112
+ if parent_filter is not None:
3113
+ if not isinstance(parent_filter, str) or not parent_filter.strip():
3114
+ return _validation_error(
3115
+ field="parent_filter",
3116
+ action=action,
3117
+ message="parent_filter must be a non-empty string",
3118
+ request_id=request_id,
3119
+ code=ErrorCode.INVALID_FORMAT,
3120
+ )
3121
+ parent_filter = parent_filter.strip()
3122
+
3123
+ # Validate pattern
3124
+ if pattern is not None:
3125
+ if not isinstance(pattern, str) or not pattern.strip():
3126
+ return _validation_error(
3127
+ field="pattern",
3128
+ action=action,
3129
+ message="pattern must be a non-empty string",
3130
+ request_id=request_id,
3131
+ code=ErrorCode.INVALID_FORMAT,
3132
+ )
3133
+ try:
3134
+ re.compile(pattern)
3135
+ except re.error as exc:
3136
+ return _validation_error(
3137
+ field="pattern",
3138
+ action=action,
3139
+ message=f"Invalid regex pattern: {exc}",
3140
+ request_id=request_id,
3141
+ code=ErrorCode.INVALID_FORMAT,
3142
+ )
3143
+ pattern = pattern.strip()
3144
+
3145
+ # At least one filter must be provided
3146
+ if not any([status_filter, parent_filter, pattern]):
3147
+ return _validation_error(
3148
+ field="status_filter",
3149
+ action=action,
3150
+ message="Provide at least one filter: status_filter, parent_filter, or pattern",
3151
+ request_id=request_id,
3152
+ code=ErrorCode.MISSING_REQUIRED,
3153
+ remediation="Specify status_filter, parent_filter (or phase_id), and/or pattern to target tasks",
3154
+ )
3155
+
3156
+ # Extract metadata fields
3157
+ description = payload.get("description")
3158
+ file_path = payload.get("file_path")
3159
+ estimated_hours = payload.get("estimated_hours")
3160
+ category = payload.get("category")
3161
+ labels = payload.get("labels")
3162
+ owners = payload.get("owners")
3163
+ update_metadata = payload.get("update_metadata") # Dict for custom fields
3164
+ dry_run = payload.get("dry_run", False)
3165
+
3166
+ # Validate metadata fields
3167
+ if description is not None and not isinstance(description, str):
3168
+ return _validation_error(
3169
+ field="description",
3170
+ action=action,
3171
+ message="description must be a string",
3172
+ request_id=request_id,
3173
+ code=ErrorCode.INVALID_FORMAT,
3174
+ )
3175
+
3176
+ if file_path is not None and not isinstance(file_path, str):
3177
+ return _validation_error(
3178
+ field="file_path",
3179
+ action=action,
3180
+ message="file_path must be a string",
3181
+ request_id=request_id,
3182
+ code=ErrorCode.INVALID_FORMAT,
3183
+ )
3184
+
3185
+ if estimated_hours is not None:
3186
+ if not isinstance(estimated_hours, (int, float)) or estimated_hours < 0:
3187
+ return _validation_error(
3188
+ field="estimated_hours",
3189
+ action=action,
3190
+ message="estimated_hours must be a non-negative number",
3191
+ request_id=request_id,
3192
+ code=ErrorCode.INVALID_FORMAT,
3193
+ )
3194
+
3195
+ if category is not None and not isinstance(category, str):
3196
+ return _validation_error(
3197
+ field="category",
3198
+ action=action,
3199
+ message="category must be a string",
3200
+ request_id=request_id,
3201
+ code=ErrorCode.INVALID_FORMAT,
3202
+ )
3203
+
3204
+ if labels is not None:
3205
+ if not isinstance(labels, dict) or not all(
3206
+ isinstance(k, str) and isinstance(v, str) for k, v in labels.items()
3207
+ ):
3208
+ return _validation_error(
3209
+ field="labels",
3210
+ action=action,
3211
+ message="labels must be a dict with string keys and values",
3212
+ request_id=request_id,
3213
+ code=ErrorCode.INVALID_FORMAT,
3214
+ )
3215
+
3216
+ if owners is not None:
3217
+ if not isinstance(owners, list) or not all(isinstance(o, str) for o in owners):
3218
+ return _validation_error(
3219
+ field="owners",
3220
+ action=action,
3221
+ message="owners must be a list of strings",
3222
+ request_id=request_id,
3223
+ code=ErrorCode.INVALID_FORMAT,
3224
+ )
3225
+
3226
+ if update_metadata is not None and not isinstance(update_metadata, dict):
3227
+ return _validation_error(
3228
+ field="update_metadata",
3229
+ action=action,
3230
+ message="update_metadata must be a dict",
3231
+ request_id=request_id,
3232
+ code=ErrorCode.INVALID_FORMAT,
3233
+ )
3234
+
3235
+ if dry_run is not None and not isinstance(dry_run, bool):
3236
+ return _validation_error(
3237
+ field="dry_run",
3238
+ action=action,
3239
+ message="dry_run must be a boolean",
3240
+ request_id=request_id,
3241
+ code=ErrorCode.INVALID_FORMAT,
3242
+ )
3243
+
3244
+ # At least one metadata field must be provided
3245
+ has_metadata = any([
3246
+ description is not None,
3247
+ file_path is not None,
3248
+ estimated_hours is not None,
3249
+ category is not None,
3250
+ labels is not None,
3251
+ owners is not None,
3252
+ update_metadata,
3253
+ ])
3254
+ if not has_metadata:
3255
+ return _validation_error(
3256
+ field="description",
3257
+ action=action,
3258
+ message="Provide at least one metadata field to update",
3259
+ request_id=request_id,
3260
+ code=ErrorCode.MISSING_REQUIRED,
3261
+ remediation="Specify description, file_path, estimated_hours, category, labels, owners, or update_metadata",
3262
+ )
3263
+
3264
+ # Resolve specs directory
3265
+ workspace = payload.get("workspace")
3266
+ specs_dir = _resolve_specs_dir(config, workspace)
3267
+ if specs_dir is None:
3268
+ return _specs_dir_missing_error(request_id)
3269
+
3270
+ # Delegate to core helper
3271
+ result, error = batch_update_tasks(
3272
+ spec_id,
3273
+ status_filter=status_filter,
3274
+ parent_filter=parent_filter,
3275
+ pattern=pattern,
3276
+ description=description,
3277
+ file_path=file_path,
3278
+ estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
3279
+ category=category,
3280
+ labels=labels,
3281
+ owners=owners,
3282
+ custom_metadata=update_metadata,
3283
+ dry_run=bool(dry_run),
3284
+ specs_dir=specs_dir,
3285
+ )
3286
+
3287
+ elapsed_ms = (time.perf_counter() - start) * 1000
3288
+
3289
+ if error:
3290
+ _metrics.counter(_metric(action), labels={"status": "error"})
3291
+ # Map helper errors to response-v2 format
3292
+ if "not found" in error.lower():
3293
+ return asdict(
3294
+ error_response(
3295
+ error,
3296
+ error_code=ErrorCode.NOT_FOUND,
3297
+ error_type=ErrorType.NOT_FOUND,
3298
+ remediation="Check spec_id and parent_filter values",
3299
+ request_id=request_id,
3300
+ )
3301
+ )
3302
+ if "at least one" in error.lower() or "must be" in error.lower():
3303
+ return asdict(
3304
+ error_response(
3305
+ error,
3306
+ error_code=ErrorCode.VALIDATION_ERROR,
3307
+ error_type=ErrorType.VALIDATION,
3308
+ remediation="Check filter and metadata parameters",
3309
+ request_id=request_id,
3310
+ )
3311
+ )
3312
+ return asdict(
3313
+ error_response(
3314
+ error,
3315
+ error_code=ErrorCode.INTERNAL_ERROR,
3316
+ error_type=ErrorType.INTERNAL,
3317
+ remediation="Check filesystem permissions and retry",
3318
+ request_id=request_id,
3319
+ )
3320
+ )
3321
+
3322
+ assert result is not None
3323
+
3324
+ # Build response with response-v2 envelope
3325
+ warnings: List[str] = result.get("warnings", [])
3326
+ if result["matched_count"] > _TASK_WARNING_THRESHOLD and not warnings:
3327
+ warnings.append(
3328
+ f"Updated {result['matched_count']} tasks; consider using more specific filters."
3329
+ )
3330
+
3331
+ response = success_response(
3332
+ spec_id=result["spec_id"],
3333
+ matched_count=result["matched_count"],
3334
+ updated_count=result["updated_count"],
3335
+ skipped_count=result.get("skipped_count", 0),
3336
+ nodes=result["nodes"],
3337
+ filters=result["filters"],
3338
+ metadata_applied=result["metadata_applied"],
3339
+ dry_run=result["dry_run"],
3340
+ request_id=request_id,
3341
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3342
+ )
3343
+
3344
+ response_dict = asdict(response)
3345
+ if warnings:
3346
+ meta = response_dict.setdefault("meta", {})
3347
+ meta["warnings"] = warnings
3348
+ if result.get("skipped_tasks"):
3349
+ response_dict["data"]["skipped_tasks"] = result["skipped_tasks"]
3350
+
3351
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
3352
+ _metrics.counter(_metric(action), labels={"status": "success"})
3353
+ return response_dict
3354
+
3355
+
3356
+ def _handle_fix_verification_types(
3357
+ *, config: ServerConfig, payload: Dict[str, Any]
3358
+ ) -> dict:
3359
+ """Fix verification types across all verify nodes in a spec.
3360
+
3361
+ This action:
3362
+ 1. Finds all verify nodes with invalid or missing verification_type
3363
+ 2. Maps legacy values (e.g., 'test' -> 'run-tests') using VERIFICATION_TYPE_MAPPING
3364
+ 3. Sets missing types to 'run-tests' (default)
3365
+ 4. Sets unknown types to 'manual' (fallback)
3366
+
3367
+ Supports dry-run mode to preview changes without persisting.
3368
+ """
3369
+ request_id = _request_id()
3370
+ action = "fix-verification-types"
3371
+
3372
+ # Required: spec_id
3373
+ spec_id = payload.get("spec_id")
3374
+ if not isinstance(spec_id, str) or not spec_id.strip():
3375
+ return _validation_error(
3376
+ field="spec_id",
3377
+ action=action,
3378
+ message="Provide a non-empty spec identifier",
3379
+ request_id=request_id,
3380
+ )
3381
+
3382
+ dry_run = payload.get("dry_run", False)
3383
+ if dry_run is not None and not isinstance(dry_run, bool):
3384
+ return _validation_error(
3385
+ field="dry_run",
3386
+ action=action,
3387
+ message="dry_run must be a boolean",
3388
+ request_id=request_id,
3389
+ code=ErrorCode.INVALID_FORMAT,
3390
+ )
3391
+ dry_run_bool = bool(dry_run)
3392
+
3393
+ # Load spec
3394
+ workspace = payload.get("workspace")
3395
+ specs_dir = _resolve_specs_dir(config, workspace)
3396
+ spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
3397
+ if error:
3398
+ return error
3399
+ assert spec_data is not None
3400
+
3401
+ start = time.perf_counter()
3402
+ hierarchy = spec_data.get("hierarchy", {})
3403
+
3404
+ # Find verify nodes and collect fixes
3405
+ fixes: List[Dict[str, Any]] = []
3406
+ for node_id, node_data in hierarchy.items():
3407
+ if node_data.get("type") != "verify":
3408
+ continue
3409
+
3410
+ metadata = node_data.get("metadata", {})
3411
+ current_type = metadata.get("verification_type")
3412
+
3413
+ # Determine the fix needed
3414
+ fix_info: Optional[Dict[str, Any]] = None
3415
+
3416
+ if current_type is None:
3417
+ # Missing verification_type -> default to 'run-tests'
3418
+ fix_info = {
3419
+ "node_id": node_id,
3420
+ "title": node_data.get("title", ""),
3421
+ "issue": "missing",
3422
+ "old_value": None,
3423
+ "new_value": "run-tests",
3424
+ }
3425
+ elif current_type not in VALID_VERIFICATION_TYPES:
3426
+ # Invalid type -> check mapping or fallback to 'manual'
3427
+ mapped = VERIFICATION_TYPE_MAPPING.get(current_type)
3428
+ if mapped:
3429
+ fix_info = {
3430
+ "node_id": node_id,
3431
+ "title": node_data.get("title", ""),
3432
+ "issue": "legacy",
3433
+ "old_value": current_type,
3434
+ "new_value": mapped,
3435
+ }
3436
+ else:
3437
+ fix_info = {
3438
+ "node_id": node_id,
3439
+ "title": node_data.get("title", ""),
3440
+ "issue": "invalid",
3441
+ "old_value": current_type,
3442
+ "new_value": "manual",
3443
+ }
3444
+
3445
+ if fix_info:
3446
+ fixes.append(fix_info)
3447
+
3448
+ if not dry_run_bool:
3449
+ # Apply the fix
3450
+ if "metadata" not in node_data:
3451
+ node_data["metadata"] = {}
3452
+ node_data["metadata"]["verification_type"] = fix_info["new_value"]
3453
+
3454
+ # Save if not dry_run and there were fixes
3455
+ if not dry_run_bool and fixes:
3456
+ if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
3457
+ return asdict(
3458
+ error_response(
3459
+ "Failed to save spec after fixing verification types",
3460
+ error_code=ErrorCode.INTERNAL_ERROR,
3461
+ error_type=ErrorType.INTERNAL,
3462
+ remediation="Check filesystem permissions and retry",
3463
+ request_id=request_id,
3464
+ )
3465
+ )
3466
+
3467
+ elapsed_ms = (time.perf_counter() - start) * 1000
3468
+
3469
+ # Count by issue type
3470
+ missing_count = sum(1 for f in fixes if f["issue"] == "missing")
3471
+ legacy_count = sum(1 for f in fixes if f["issue"] == "legacy")
3472
+ invalid_count = sum(1 for f in fixes if f["issue"] == "invalid")
3473
+
3474
+ response = success_response(
3475
+ spec_id=spec_id.strip(),
3476
+ total_fixes=len(fixes),
3477
+ applied_count=len(fixes) if not dry_run_bool else 0,
3478
+ fixes=fixes,
3479
+ summary={
3480
+ "missing_set_to_run_tests": missing_count,
3481
+ "legacy_mapped": legacy_count,
3482
+ "invalid_set_to_manual": invalid_count,
3483
+ },
3484
+ valid_types=sorted(VALID_VERIFICATION_TYPES),
3485
+ legacy_mappings=VERIFICATION_TYPE_MAPPING,
3486
+ dry_run=dry_run_bool,
3487
+ request_id=request_id,
3488
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3489
+ )
3490
+
3491
+ _metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
3492
+ _metrics.counter(_metric(action), labels={"status": "success"})
3493
+ return asdict(response)
3494
+
3495
+
3496
+ def _handle_session_config(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
3497
+ """
3498
+ Handle session-config action: get/set autonomous mode preferences.
3499
+
3500
+ This action manages the ephemeral autonomous session state, allowing
3501
+ agents to enable/disable autonomous mode and track task completion
3502
+ during autonomous execution.
3503
+
3504
+ Parameters:
3505
+ get: If true, just return current session config without changes
3506
+ auto_mode: Set autonomous mode enabled (true) or disabled (false)
3507
+
3508
+ Returns:
3509
+ Current session configuration including autonomous state
3510
+ """
3511
+ from datetime import datetime, timezone
3512
+
3513
+ request_id = _request_id()
3514
+ action = "session-config"
3515
+ start = time.perf_counter()
3516
+
3517
+ # Get parameters
3518
+ get_only = payload.get("get", False)
3519
+ auto_mode = payload.get("auto_mode")
3520
+
3521
+ # Get the context tracker and session
3522
+ tracker = get_context_tracker()
3523
+ session = tracker.get_or_create_session()
3524
+
3525
+ # Initialize autonomous if not present
3526
+ if session.autonomous is None:
3527
+ session.autonomous = AutonomousSession()
3528
+
3529
+ # If just getting, return current state
3530
+ if get_only:
3531
+ elapsed_ms = (time.perf_counter() - start) * 1000
3532
+ response = success_response(
3533
+ session_id=session.session_id,
3534
+ autonomous=session.autonomous.to_dict(),
3535
+ message="Current session configuration",
3536
+ request_id=request_id,
3537
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3538
+ )
3539
+ _metrics.counter(_metric(action), labels={"status": "success", "operation": "get"})
3540
+ return asdict(response)
3541
+
3542
+ # Handle auto_mode setting
3543
+ if auto_mode is not None:
3544
+ if not isinstance(auto_mode, bool):
3545
+ return _validation_error(
3546
+ field="auto_mode",
3547
+ action=action,
3548
+ message="auto_mode must be a boolean (true/false)",
3549
+ request_id=request_id,
3550
+ )
3551
+
3552
+ previous_enabled = session.autonomous.enabled
3553
+ session.autonomous.enabled = auto_mode
3554
+
3555
+ if auto_mode and not previous_enabled:
3556
+ # Starting autonomous mode
3557
+ session.autonomous.started_at = (
3558
+ datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
3559
+ )
3560
+ session.autonomous.tasks_completed = 0
3561
+ session.autonomous.pause_reason = None
3562
+ elif not auto_mode and previous_enabled:
3563
+ # Stopping autonomous mode
3564
+ session.autonomous.pause_reason = "user"
3565
+
3566
+ elapsed_ms = (time.perf_counter() - start) * 1000
3567
+ response = success_response(
3568
+ session_id=session.session_id,
3569
+ autonomous=session.autonomous.to_dict(),
3570
+ message="Autonomous mode enabled" if session.autonomous.enabled else "Autonomous mode disabled",
3571
+ request_id=request_id,
3572
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3573
+ )
3574
+ _metrics.counter(_metric(action), labels={"status": "success", "operation": "set"})
3575
+ return asdict(response)
3576
+
3577
+
3578
+ _ACTION_DEFINITIONS = [
3579
+ ActionDefinition(
3580
+ name="prepare",
3581
+ handler=_handle_prepare,
3582
+ summary="Prepare next actionable task context",
3583
+ ),
3584
+ ActionDefinition(
3585
+ name="prepare-batch",
3586
+ handler=_handle_prepare_batch,
3587
+ summary="Prepare multiple independent tasks for parallel execution",
3588
+ ),
3589
+ ActionDefinition(
3590
+ name="start-batch",
3591
+ handler=_handle_start_batch,
3592
+ summary="Atomically start multiple tasks as in_progress",
3593
+ ),
3594
+ ActionDefinition(
3595
+ name="complete-batch",
3596
+ handler=_handle_complete_batch,
3597
+ summary="Complete multiple tasks with partial failure support",
3598
+ ),
3599
+ ActionDefinition(
3600
+ name="reset-batch",
3601
+ handler=_handle_reset_batch,
3602
+ summary="Reset stale or specified in_progress tasks to pending",
3603
+ ),
3604
+ ActionDefinition(
3605
+ name="next", handler=_handle_next, summary="Return the next actionable task"
3606
+ ),
3607
+ ActionDefinition(
3608
+ name="info", handler=_handle_info, summary="Fetch task metadata by ID"
3609
+ ),
3610
+ ActionDefinition(
3611
+ name="check-deps",
3612
+ handler=_handle_check_deps,
3613
+ summary="Analyze task dependencies and blockers",
3614
+ ),
3615
+ ActionDefinition(name="start", handler=_handle_start, summary="Start a task"),
2043
3616
  ActionDefinition(
2044
3617
  name="complete", handler=_handle_complete, summary="Complete a task"
2045
3618
  ),
@@ -2057,6 +3630,26 @@ _ACTION_DEFINITIONS = [
2057
3630
  ),
2058
3631
  ActionDefinition(name="add", handler=_handle_add, summary="Add a task"),
2059
3632
  ActionDefinition(name="remove", handler=_handle_remove, summary="Remove a task"),
3633
+ ActionDefinition(
3634
+ name="move",
3635
+ handler=_handle_move,
3636
+ summary="Move task to new position or parent",
3637
+ ),
3638
+ ActionDefinition(
3639
+ name="add-dependency",
3640
+ handler=_handle_add_dependency,
3641
+ summary="Add a dependency between two tasks",
3642
+ ),
3643
+ ActionDefinition(
3644
+ name="remove-dependency",
3645
+ handler=_handle_remove_dependency,
3646
+ summary="Remove a dependency between two tasks",
3647
+ ),
3648
+ ActionDefinition(
3649
+ name="add-requirement",
3650
+ handler=_handle_add_requirement,
3651
+ summary="Add a structured requirement to a task",
3652
+ ),
2060
3653
  ActionDefinition(
2061
3654
  name="update-estimate",
2062
3655
  handler=_handle_update_estimate,
@@ -2067,6 +3660,16 @@ _ACTION_DEFINITIONS = [
2067
3660
  handler=_handle_update_metadata,
2068
3661
  summary="Update task metadata fields",
2069
3662
  ),
3663
+ ActionDefinition(
3664
+ name="metadata-batch",
3665
+ handler=_handle_metadata_batch,
3666
+ summary="Batch update metadata across multiple nodes matching filters",
3667
+ ),
3668
+ ActionDefinition(
3669
+ name="fix-verification-types",
3670
+ handler=_handle_fix_verification_types,
3671
+ summary="Fix invalid/missing verification types across verify nodes",
3672
+ ),
2070
3673
  ActionDefinition(
2071
3674
  name="progress",
2072
3675
  handler=_handle_progress,
@@ -2087,6 +3690,11 @@ _ACTION_DEFINITIONS = [
2087
3690
  handler=_handle_hierarchy,
2088
3691
  summary="Return paginated hierarchy slices",
2089
3692
  ),
3693
+ ActionDefinition(
3694
+ name="session-config",
3695
+ handler=_handle_session_config,
3696
+ summary="Get/set autonomous session configuration",
3697
+ ),
2090
3698
  ]
2091
3699
 
2092
3700
  _TASK_ROUTER = ActionRouter(tool_name="task", actions=_ACTION_DEFINITIONS)
@@ -2140,6 +3748,7 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2140
3748
  resolution: Optional[str] = None,
2141
3749
  title: Optional[str] = None,
2142
3750
  description: Optional[str] = None,
3751
+ acceptance_criteria: Optional[List[str]] = None,
2143
3752
  task_type: str = "task",
2144
3753
  estimated_hours: Optional[float] = None,
2145
3754
  position: Optional[int] = None,
@@ -2155,6 +3764,18 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2155
3764
  dry_run: bool = False,
2156
3765
  max_depth: int = 2,
2157
3766
  include_metadata: bool = False,
3767
+ # metadata-batch specific parameters
3768
+ phase_id: Optional[str] = None,
3769
+ pattern: Optional[str] = None,
3770
+ node_type: Optional[str] = None,
3771
+ owners: Optional[List[str]] = None,
3772
+ labels: Optional[Dict[str, str]] = None,
3773
+ category: Optional[str] = None,
3774
+ parent_filter: Optional[str] = None,
3775
+ update_metadata: Optional[Dict[str, Any]] = None,
3776
+ # session-config specific parameters
3777
+ get: bool = False,
3778
+ auto_mode: Optional[bool] = None,
2158
3779
  ) -> dict:
2159
3780
  payload = {
2160
3781
  "spec_id": spec_id,
@@ -2176,6 +3797,7 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2176
3797
  "resolution": resolution,
2177
3798
  "title": title,
2178
3799
  "description": description,
3800
+ "acceptance_criteria": acceptance_criteria,
2179
3801
  "task_type": task_type,
2180
3802
  "estimated_hours": estimated_hours,
2181
3803
  "position": position,
@@ -2191,6 +3813,18 @@ def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
2191
3813
  "dry_run": dry_run,
2192
3814
  "max_depth": max_depth,
2193
3815
  "include_metadata": include_metadata,
3816
+ # metadata-batch specific
3817
+ "phase_id": phase_id,
3818
+ "pattern": pattern,
3819
+ "node_type": node_type,
3820
+ "owners": owners,
3821
+ "labels": labels,
3822
+ "category": category,
3823
+ "parent_filter": parent_filter,
3824
+ "update_metadata": update_metadata,
3825
+ # session-config specific
3826
+ "get": get,
3827
+ "auto_mode": auto_mode,
2194
3828
  }
2195
3829
  return _dispatch_task_action(action=action, payload=payload, config=config)
2196
3830