foundry-mcp 0.3.3__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/commands/plan.py +10 -3
  3. foundry_mcp/cli/commands/review.py +19 -4
  4. foundry_mcp/cli/commands/specs.py +38 -208
  5. foundry_mcp/cli/output.py +3 -3
  6. foundry_mcp/config.py +235 -5
  7. foundry_mcp/core/ai_consultation.py +146 -9
  8. foundry_mcp/core/discovery.py +6 -6
  9. foundry_mcp/core/error_store.py +2 -2
  10. foundry_mcp/core/intake.py +933 -0
  11. foundry_mcp/core/llm_config.py +20 -2
  12. foundry_mcp/core/metrics_store.py +2 -2
  13. foundry_mcp/core/progress.py +70 -0
  14. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  15. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  16. foundry_mcp/core/prompts/plan_review.py +5 -1
  17. foundry_mcp/core/providers/claude.py +6 -47
  18. foundry_mcp/core/providers/codex.py +6 -57
  19. foundry_mcp/core/providers/cursor_agent.py +3 -44
  20. foundry_mcp/core/providers/gemini.py +6 -57
  21. foundry_mcp/core/providers/opencode.py +35 -5
  22. foundry_mcp/core/research/__init__.py +68 -0
  23. foundry_mcp/core/research/memory.py +425 -0
  24. foundry_mcp/core/research/models.py +437 -0
  25. foundry_mcp/core/research/workflows/__init__.py +22 -0
  26. foundry_mcp/core/research/workflows/base.py +204 -0
  27. foundry_mcp/core/research/workflows/chat.py +271 -0
  28. foundry_mcp/core/research/workflows/consensus.py +396 -0
  29. foundry_mcp/core/research/workflows/ideate.py +682 -0
  30. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  31. foundry_mcp/core/responses.py +450 -0
  32. foundry_mcp/core/spec.py +2438 -236
  33. foundry_mcp/core/task.py +1064 -19
  34. foundry_mcp/core/testing.py +512 -123
  35. foundry_mcp/core/validation.py +313 -42
  36. foundry_mcp/dashboard/components/charts.py +0 -57
  37. foundry_mcp/dashboard/launcher.py +11 -0
  38. foundry_mcp/dashboard/views/metrics.py +25 -35
  39. foundry_mcp/dashboard/views/overview.py +1 -65
  40. foundry_mcp/resources/specs.py +25 -25
  41. foundry_mcp/schemas/intake-schema.json +89 -0
  42. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  43. foundry_mcp/server.py +38 -0
  44. foundry_mcp/tools/unified/__init__.py +4 -2
  45. foundry_mcp/tools/unified/authoring.py +2423 -267
  46. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  47. foundry_mcp/tools/unified/environment.py +235 -6
  48. foundry_mcp/tools/unified/error.py +18 -1
  49. foundry_mcp/tools/unified/lifecycle.py +8 -0
  50. foundry_mcp/tools/unified/plan.py +113 -1
  51. foundry_mcp/tools/unified/research.py +658 -0
  52. foundry_mcp/tools/unified/review.py +370 -16
  53. foundry_mcp/tools/unified/spec.py +367 -0
  54. foundry_mcp/tools/unified/task.py +1163 -48
  55. foundry_mcp/tools/unified/test.py +69 -8
  56. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/METADATA +7 -1
  57. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/RECORD +60 -48
  58. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/WHEEL +0 -0
  59. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
  60. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import logging
6
+ import re
6
7
  import time
7
8
  from dataclasses import asdict
8
9
  from pathlib import Path
@@ -12,6 +13,8 @@ from mcp.server.fastmcp import FastMCP
12
13
 
13
14
  from foundry_mcp.config import ServerConfig
14
15
  from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
16
+ from foundry_mcp.core.feature_flags import FeatureFlag, FlagState, get_flag_service
17
+ from foundry_mcp.core.intake import IntakeStore, LockAcquisitionError, INTAKE_ID_PATTERN
15
18
  from foundry_mcp.core.naming import canonical_tool
16
19
  from foundry_mcp.core.observability import audit_log, get_metrics, mcp_tool
17
20
  from foundry_mcp.core.responses import (
@@ -24,17 +27,27 @@ from foundry_mcp.core.responses import (
24
27
  from foundry_mcp.core.spec import (
25
28
  ASSUMPTION_TYPES,
26
29
  CATEGORIES,
30
+ PHASE_TEMPLATES,
27
31
  TEMPLATES,
28
32
  add_assumption,
29
33
  add_phase,
34
+ add_phase_bulk,
30
35
  add_revision,
36
+ apply_phase_template,
31
37
  create_spec,
38
+ find_replace_in_spec,
32
39
  find_specs_directory,
40
+ generate_spec_data,
41
+ get_phase_template_structure,
33
42
  list_assumptions,
34
43
  load_spec,
44
+ move_phase,
35
45
  remove_phase,
46
+ rollback_spec,
36
47
  update_frontmatter,
48
+ update_phase_metadata,
37
49
  )
50
+ from foundry_mcp.core.validation import validate_spec
38
51
  from foundry_mcp.tools.unified.router import (
39
52
  ActionDefinition,
40
53
  ActionRouter,
@@ -44,15 +57,56 @@ from foundry_mcp.tools.unified.router import (
44
57
  logger = logging.getLogger(__name__)
45
58
  _metrics = get_metrics()
46
59
 
60
+ # Register intake_tools feature flag
61
+ _flag_service = get_flag_service()
62
+ try:
63
+ _flag_service.register(
64
+ FeatureFlag(
65
+ name="intake_tools",
66
+ description="Bikelane intake queue tools (add, list, dismiss)",
67
+ state=FlagState.EXPERIMENTAL,
68
+ default_enabled=False,
69
+ )
70
+ )
71
+ except ValueError:
72
+ pass # Flag already registered
73
+
74
+
75
+ def _intake_feature_flag_blocked(request_id: str) -> Optional[dict]:
76
+ """Check if intake tools are blocked by feature flag."""
77
+ if _flag_service.is_enabled("intake_tools"):
78
+ return None
79
+
80
+ return asdict(
81
+ error_response(
82
+ "Intake tools are disabled by feature flag",
83
+ error_code=ErrorCode.FEATURE_DISABLED,
84
+ error_type=ErrorType.FEATURE_FLAG,
85
+ data={"feature": "intake_tools"},
86
+ remediation="Enable the 'intake_tools' feature flag to use intake actions.",
87
+ request_id=request_id,
88
+ )
89
+ )
90
+
91
+
47
92
  _ACTION_SUMMARY = {
48
93
  "spec-create": "Scaffold a new SDD specification",
49
94
  "spec-template": "List/show/apply spec templates",
50
95
  "spec-update-frontmatter": "Update a top-level metadata field",
96
+ "spec-find-replace": "Find and replace text across spec titles and descriptions",
97
+ "spec-rollback": "Restore a spec from a backup timestamp",
51
98
  "phase-add": "Add a new phase under spec-root with verification scaffolding",
99
+ "phase-add-bulk": "Add a phase with pre-defined tasks in a single atomic operation",
100
+ "phase-template": "List/show/apply phase templates to add pre-configured phases",
101
+ "phase-move": "Reorder a phase within spec-root children",
102
+ "phase-update-metadata": "Update metadata fields of an existing phase",
52
103
  "phase-remove": "Remove an existing phase (and optionally dependents)",
53
104
  "assumption-add": "Append an assumption entry to spec metadata",
54
105
  "assumption-list": "List recorded assumptions for a spec",
55
106
  "revision-add": "Record a revision entry in the spec history",
107
+ "intake-add": "Capture a new work idea in the bikelane intake queue",
108
+ "intake-list": "List new intake items awaiting triage in FIFO order",
109
+ "intake-dismiss": "Dismiss an intake item from the triage queue",
56
110
  }
57
111
 
58
112
 
@@ -159,7 +213,7 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
159
213
  code=ErrorCode.MISSING_REQUIRED,
160
214
  )
161
215
 
162
- template = payload.get("template") or "medium"
216
+ template = payload.get("template") or "empty"
163
217
  if not isinstance(template, str):
164
218
  return _validation_error(
165
219
  field="template",
@@ -168,14 +222,14 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
168
222
  request_id=request_id,
169
223
  code=ErrorCode.INVALID_FORMAT,
170
224
  )
171
- template = template.strip() or "medium"
225
+ template = template.strip() or "empty"
172
226
  if template not in TEMPLATES:
173
227
  return _validation_error(
174
228
  field="template",
175
229
  action=action,
176
- message=f"Template must be one of: {', '.join(TEMPLATES)}",
230
+ message=f"Only 'empty' template is supported. Use phase templates to add structure.",
177
231
  request_id=request_id,
178
- remediation=f"Use one of: {', '.join(TEMPLATES)}",
232
+ remediation="Use template='empty' and add phases via phase-add-bulk or phase-template apply",
179
233
  )
180
234
 
181
235
  category = payload.get("category") or "implementation"
@@ -197,6 +251,16 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
197
251
  remediation=f"Use one of: {', '.join(CATEGORIES)}",
198
252
  )
199
253
 
254
+ mission = payload.get("mission")
255
+ if mission is not None and not isinstance(mission, str):
256
+ return _validation_error(
257
+ field="mission",
258
+ action=action,
259
+ message="mission must be a string",
260
+ request_id=request_id,
261
+ code=ErrorCode.INVALID_FORMAT,
262
+ )
263
+
200
264
  dry_run = payload.get("dry_run", False)
201
265
  if dry_run is not None and not isinstance(dry_run, bool):
202
266
  return _validation_error(
@@ -222,14 +286,49 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
222
286
  return _specs_directory_missing_error(request_id)
223
287
 
224
288
  if dry_run:
289
+ # Generate spec data for preflight validation
290
+ spec_data, gen_error = generate_spec_data(
291
+ name=name.strip(),
292
+ template=template,
293
+ category=category,
294
+ mission=mission,
295
+ )
296
+ if gen_error:
297
+ return _validation_error(
298
+ field="spec",
299
+ action=action,
300
+ message=gen_error,
301
+ request_id=request_id,
302
+ code=ErrorCode.VALIDATION_ERROR,
303
+ )
304
+
305
+ # Run full validation on generated spec
306
+ validation_result = validate_spec(spec_data)
307
+ diagnostics = [
308
+ {
309
+ "code": d.code,
310
+ "message": d.message,
311
+ "severity": d.severity,
312
+ "location": d.location,
313
+ "suggested_fix": d.suggested_fix,
314
+ }
315
+ for d in validation_result.diagnostics
316
+ ]
317
+
225
318
  return asdict(
226
319
  success_response(
227
320
  data={
228
321
  "name": name.strip(),
322
+ "spec_id": spec_data["spec_id"],
229
323
  "template": template,
230
324
  "category": category,
325
+ "mission": mission.strip() if isinstance(mission, str) else None,
231
326
  "dry_run": True,
232
- "note": "Dry run - no changes made",
327
+ "is_valid": validation_result.is_valid,
328
+ "error_count": validation_result.error_count,
329
+ "warning_count": validation_result.warning_count,
330
+ "diagnostics": diagnostics,
331
+ "note": "Preflight validation complete - no changes made",
233
332
  },
234
333
  request_id=request_id,
235
334
  )
@@ -249,6 +348,7 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
249
348
  name=name.strip(),
250
349
  template=template,
251
350
  category=category,
351
+ mission=mission,
252
352
  specs_dir=specs_dir,
253
353
  )
254
354
  elapsed_ms = (time.perf_counter() - start_time) * 1000
@@ -349,38 +449,34 @@ def _handle_spec_template(*, config: ServerConfig, **payload: Any) -> dict:
349
449
  if template_action == "list":
350
450
  data["templates"] = [
351
451
  {
352
- "name": "simple",
353
- "description": "Minimal spec with 1 phase and basic tasks",
354
- },
355
- {
356
- "name": "medium",
357
- "description": "Standard spec with 2-3 phases (default)",
358
- },
359
- {
360
- "name": "complex",
361
- "description": "Multi-phase spec with groups and subtasks",
362
- },
363
- {
364
- "name": "security",
365
- "description": "Security-focused spec with audit tasks",
452
+ "name": "empty",
453
+ "description": "Blank spec with no phases - use phase templates to add structure",
366
454
  },
367
455
  ]
368
- data["total_count"] = len(data["templates"])
456
+ data["phase_templates"] = [
457
+ {"name": t, "description": f"Add {t} phase structure"}
458
+ for t in PHASE_TEMPLATES
459
+ ]
460
+ data["total_count"] = 1
461
+ data["message"] = "Use 'empty' template, then add phases via phase-add-bulk or phase-template apply"
369
462
  elif template_action == "show":
370
463
  data["template_name"] = template_name
371
464
  data["content"] = {
372
465
  "name": template_name,
373
- "description": f"Template structure for '{template_name}' specs",
374
- "usage": f"Use authoring(action='spec-create', template='{template_name}') to create a spec",
466
+ "description": "Blank spec with no phases",
467
+ "usage": "Use authoring(action='spec-create', name='your-spec') to create, then add phases",
468
+ "phase_templates": list(PHASE_TEMPLATES),
375
469
  }
376
470
  else:
377
471
  data["template_name"] = template_name
378
472
  data["generated"] = {
379
473
  "template": template_name,
380
- "message": f"Use authoring(action='spec-create', template='{template_name}') to create a new spec",
474
+ "message": "Use spec-create to create an empty spec, then add phases",
381
475
  }
382
476
  data["instructions"] = (
383
- f"Call authoring(action='spec-create', name='your-spec-name', template='{template_name}')"
477
+ "1. Create spec: authoring(action='spec-create', name='your-spec-name')\n"
478
+ "2. Add phases: authoring(action='phase-template', template_action='apply', "
479
+ "template_name='planning', spec_id='...')"
384
480
  )
385
481
 
386
482
  return asdict(success_response(data=data, request_id=request_id))
@@ -515,6 +611,324 @@ def _handle_spec_update_frontmatter(*, config: ServerConfig, **payload: Any) ->
515
611
  )
516
612
 
517
613
 
614
+ # Valid scopes for find-replace
615
+ _FIND_REPLACE_SCOPES = {"all", "titles", "descriptions"}
616
+
617
+
618
+ def _handle_spec_find_replace(*, config: ServerConfig, **payload: Any) -> dict:
619
+ """Find and replace text across spec hierarchy nodes.
620
+
621
+ Supports literal or regex find/replace across titles and/or descriptions.
622
+ Returns a preview in dry_run mode, or applies changes and returns a summary.
623
+ """
624
+ request_id = _request_id()
625
+ action = "spec-find-replace"
626
+
627
+ # Required: spec_id
628
+ spec_id = payload.get("spec_id")
629
+ if not isinstance(spec_id, str) or not spec_id.strip():
630
+ return _validation_error(
631
+ field="spec_id",
632
+ action=action,
633
+ message="Provide a non-empty spec_id parameter",
634
+ request_id=request_id,
635
+ code=ErrorCode.MISSING_REQUIRED,
636
+ remediation="Pass the spec identifier to authoring",
637
+ )
638
+ spec_id = spec_id.strip()
639
+
640
+ # Required: find
641
+ find = payload.get("find")
642
+ if not isinstance(find, str) or not find:
643
+ return _validation_error(
644
+ field="find",
645
+ action=action,
646
+ message="Provide a non-empty find pattern",
647
+ request_id=request_id,
648
+ code=ErrorCode.MISSING_REQUIRED,
649
+ remediation="Specify the text or regex pattern to find",
650
+ )
651
+
652
+ # Required: replace (can be empty string to delete matches)
653
+ replace = payload.get("replace")
654
+ if replace is None:
655
+ return _validation_error(
656
+ field="replace",
657
+ action=action,
658
+ message="Provide a replace value (use empty string to delete matches)",
659
+ request_id=request_id,
660
+ code=ErrorCode.MISSING_REQUIRED,
661
+ remediation="Provide a replacement string (use empty string to delete)",
662
+ )
663
+ if not isinstance(replace, str):
664
+ return _validation_error(
665
+ field="replace",
666
+ action=action,
667
+ message="replace must be a string",
668
+ request_id=request_id,
669
+ code=ErrorCode.INVALID_FORMAT,
670
+ remediation="Provide a string value for replace parameter",
671
+ )
672
+
673
+ # Optional: scope (default: "all")
674
+ scope = payload.get("scope", "all")
675
+ if not isinstance(scope, str) or scope not in _FIND_REPLACE_SCOPES:
676
+ return _validation_error(
677
+ field="scope",
678
+ action=action,
679
+ message=f"scope must be one of: {sorted(_FIND_REPLACE_SCOPES)}",
680
+ request_id=request_id,
681
+ code=ErrorCode.INVALID_FORMAT,
682
+ remediation=f"Use one of: {sorted(_FIND_REPLACE_SCOPES)}",
683
+ )
684
+
685
+ # Optional: use_regex (default: False)
686
+ use_regex = payload.get("use_regex", False)
687
+ if not isinstance(use_regex, bool):
688
+ return _validation_error(
689
+ field="use_regex",
690
+ action=action,
691
+ message="use_regex must be a boolean",
692
+ request_id=request_id,
693
+ code=ErrorCode.INVALID_FORMAT,
694
+ remediation="Set use_regex to true or false",
695
+ )
696
+
697
+ # Optional: case_sensitive (default: True)
698
+ case_sensitive = payload.get("case_sensitive", True)
699
+ if not isinstance(case_sensitive, bool):
700
+ return _validation_error(
701
+ field="case_sensitive",
702
+ action=action,
703
+ message="case_sensitive must be a boolean",
704
+ request_id=request_id,
705
+ code=ErrorCode.INVALID_FORMAT,
706
+ remediation="Set case_sensitive to true or false",
707
+ )
708
+
709
+ # Optional: dry_run (default: False)
710
+ dry_run = payload.get("dry_run", False)
711
+ if not isinstance(dry_run, bool):
712
+ return _validation_error(
713
+ field="dry_run",
714
+ action=action,
715
+ message="dry_run must be a boolean",
716
+ request_id=request_id,
717
+ code=ErrorCode.INVALID_FORMAT,
718
+ remediation="Set dry_run to true or false",
719
+ )
720
+
721
+ # Optional: path (workspace)
722
+ path = payload.get("path")
723
+ if path is not None and not isinstance(path, str):
724
+ return _validation_error(
725
+ field="path",
726
+ action=action,
727
+ message="Workspace path must be a string",
728
+ request_id=request_id,
729
+ code=ErrorCode.INVALID_FORMAT,
730
+ )
731
+
732
+ specs_dir = _resolve_specs_dir(config, path)
733
+ if specs_dir is None:
734
+ return _specs_directory_missing_error(request_id)
735
+
736
+ audit_log(
737
+ "tool_invocation",
738
+ tool="authoring",
739
+ action=action,
740
+ spec_id=spec_id,
741
+ find=find[:50] + "..." if len(find) > 50 else find,
742
+ use_regex=use_regex,
743
+ dry_run=dry_run,
744
+ )
745
+
746
+ metric_key = _metric_name(action)
747
+ start_time = time.perf_counter()
748
+
749
+ try:
750
+ result, error = find_replace_in_spec(
751
+ spec_id,
752
+ find,
753
+ replace,
754
+ scope=scope,
755
+ use_regex=use_regex,
756
+ case_sensitive=case_sensitive,
757
+ dry_run=dry_run,
758
+ specs_dir=specs_dir,
759
+ )
760
+ except Exception as exc: # pragma: no cover - defensive guard
761
+ logger.exception("Unexpected error in spec find-replace")
762
+ _metrics.counter(metric_key, labels={"status": "error"})
763
+ return asdict(
764
+ error_response(
765
+ sanitize_error_message(exc, context="authoring"),
766
+ error_code=ErrorCode.INTERNAL_ERROR,
767
+ error_type=ErrorType.INTERNAL,
768
+ remediation="Check logs for details",
769
+ request_id=request_id,
770
+ )
771
+ )
772
+
773
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
774
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
775
+
776
+ if error:
777
+ _metrics.counter(metric_key, labels={"status": "error"})
778
+ # Map error types
779
+ if "not found" in error.lower():
780
+ return asdict(
781
+ error_response(
782
+ error,
783
+ error_code=ErrorCode.NOT_FOUND,
784
+ error_type=ErrorType.NOT_FOUND,
785
+ remediation="Check spec_id value",
786
+ request_id=request_id,
787
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
788
+ )
789
+ )
790
+ if "invalid regex" in error.lower():
791
+ return asdict(
792
+ error_response(
793
+ error,
794
+ error_code=ErrorCode.INVALID_FORMAT,
795
+ error_type=ErrorType.VALIDATION,
796
+ remediation="Check regex syntax",
797
+ request_id=request_id,
798
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
799
+ )
800
+ )
801
+ return asdict(
802
+ error_response(
803
+ error,
804
+ error_code=ErrorCode.VALIDATION_ERROR,
805
+ error_type=ErrorType.VALIDATION,
806
+ remediation="Check find and replace parameters",
807
+ request_id=request_id,
808
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
809
+ )
810
+ )
811
+
812
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
813
+ return asdict(
814
+ success_response(
815
+ data=result,
816
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
817
+ request_id=request_id,
818
+ )
819
+ )
820
+
821
+
822
+ def _handle_spec_rollback(*, config: ServerConfig, **payload: Any) -> dict:
823
+ """Restore a spec from a backup timestamp."""
824
+ request_id = _request_id()
825
+ action = "spec-rollback"
826
+
827
+ spec_id = payload.get("spec_id")
828
+ if not isinstance(spec_id, str) or not spec_id.strip():
829
+ return _validation_error(
830
+ field="spec_id",
831
+ action=action,
832
+ message="Provide a non-empty spec_id parameter",
833
+ request_id=request_id,
834
+ code=ErrorCode.MISSING_REQUIRED,
835
+ )
836
+ spec_id = spec_id.strip()
837
+
838
+ timestamp = payload.get("version") # Use 'version' parameter for timestamp
839
+ if not isinstance(timestamp, str) or not timestamp.strip():
840
+ return _validation_error(
841
+ field="version",
842
+ action=action,
843
+ message="Provide the backup timestamp to restore (use spec history to list)",
844
+ request_id=request_id,
845
+ code=ErrorCode.MISSING_REQUIRED,
846
+ )
847
+ timestamp = timestamp.strip()
848
+
849
+ dry_run = payload.get("dry_run", False)
850
+ if not isinstance(dry_run, bool):
851
+ return _validation_error(
852
+ field="dry_run",
853
+ action=action,
854
+ message="Expected a boolean value",
855
+ request_id=request_id,
856
+ )
857
+
858
+ path = payload.get("path")
859
+ if path is not None and not isinstance(path, str):
860
+ return _validation_error(
861
+ field="path",
862
+ action=action,
863
+ message="Workspace path must be a string",
864
+ request_id=request_id,
865
+ )
866
+
867
+ specs_dir = _resolve_specs_dir(config, path)
868
+ if specs_dir is None:
869
+ return _specs_directory_missing_error(request_id)
870
+
871
+ audit_log(
872
+ "tool_invocation",
873
+ tool="authoring",
874
+ action=action,
875
+ spec_id=spec_id,
876
+ timestamp=timestamp,
877
+ dry_run=dry_run,
878
+ )
879
+
880
+ metric_key = _metric_name(action)
881
+ start_time = time.perf_counter()
882
+
883
+ result = rollback_spec(
884
+ spec_id=spec_id,
885
+ timestamp=timestamp,
886
+ specs_dir=specs_dir,
887
+ dry_run=dry_run,
888
+ create_backup=True,
889
+ )
890
+
891
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
892
+
893
+ if not result.get("success"):
894
+ _metrics.counter(metric_key, labels={"status": "error"})
895
+ error_msg = result.get("error", "Unknown error during rollback")
896
+
897
+ # Determine error code based on error message
898
+ if "not found" in error_msg.lower():
899
+ error_code = ErrorCode.NOT_FOUND
900
+ error_type = ErrorType.NOT_FOUND
901
+ remediation = "Use spec(action='history') to list available backups"
902
+ else:
903
+ error_code = ErrorCode.INTERNAL_ERROR
904
+ error_type = ErrorType.INTERNAL
905
+ remediation = "Check spec and backup file permissions"
906
+
907
+ return asdict(
908
+ error_response(
909
+ error_msg,
910
+ error_code=error_code,
911
+ error_type=error_type,
912
+ remediation=remediation,
913
+ request_id=request_id,
914
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
915
+ )
916
+ )
917
+
918
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
919
+ return asdict(
920
+ success_response(
921
+ spec_id=spec_id,
922
+ timestamp=timestamp,
923
+ dry_run=dry_run,
924
+ restored_from=result.get("restored_from"),
925
+ backup_created=result.get("backup_created"),
926
+ request_id=request_id,
927
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
928
+ )
929
+ )
930
+
931
+
518
932
  def _handle_phase_add(*, config: ServerConfig, **payload: Any) -> dict:
519
933
  request_id = _request_id()
520
934
  action = "phase-add"
@@ -725,9 +1139,10 @@ def _handle_phase_add(*, config: ServerConfig, **payload: Any) -> dict:
725
1139
  )
726
1140
 
727
1141
 
728
- def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
1142
+ def _handle_phase_update_metadata(*, config: ServerConfig, **payload: Any) -> dict:
1143
+ """Update metadata fields of an existing phase."""
729
1144
  request_id = _request_id()
730
- action = "phase-remove"
1145
+ action = "phase-update-metadata"
731
1146
 
732
1147
  spec_id = payload.get("spec_id")
733
1148
  if not isinstance(spec_id, str) or not spec_id.strip():
@@ -735,6 +1150,7 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
735
1150
  field="spec_id",
736
1151
  action=action,
737
1152
  message="Provide a non-empty spec_id parameter",
1153
+ remediation="Pass the spec identifier to authoring",
738
1154
  request_id=request_id,
739
1155
  code=ErrorCode.MISSING_REQUIRED,
740
1156
  )
@@ -745,18 +1161,69 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
745
1161
  return _validation_error(
746
1162
  field="phase_id",
747
1163
  action=action,
748
- message="Provide the phase identifier (e.g., phase-1)",
1164
+ message="Provide a non-empty phase_id parameter",
1165
+ remediation="Pass the phase identifier (e.g., 'phase-1')",
749
1166
  request_id=request_id,
750
1167
  code=ErrorCode.MISSING_REQUIRED,
751
1168
  )
752
1169
  phase_id = phase_id.strip()
753
1170
 
754
- force = payload.get("force", False)
755
- if not isinstance(force, bool):
1171
+ # Extract optional metadata fields
1172
+ estimated_hours = payload.get("estimated_hours")
1173
+ description = payload.get("description")
1174
+ purpose = payload.get("purpose")
1175
+
1176
+ # Validate at least one field is provided
1177
+ has_update = any(v is not None for v in [estimated_hours, description, purpose])
1178
+ if not has_update:
756
1179
  return _validation_error(
757
- field="force",
1180
+ field="metadata",
758
1181
  action=action,
759
- message="Expected a boolean value",
1182
+ message="At least one metadata field must be provided",
1183
+ remediation="Include estimated_hours, description, or purpose",
1184
+ request_id=request_id,
1185
+ code=ErrorCode.VALIDATION_FAILED,
1186
+ )
1187
+
1188
+ # Validate estimated_hours if provided
1189
+ if estimated_hours is not None:
1190
+ if isinstance(estimated_hours, bool) or not isinstance(
1191
+ estimated_hours, (int, float)
1192
+ ):
1193
+ return _validation_error(
1194
+ field="estimated_hours",
1195
+ action=action,
1196
+ message="Provide a numeric value",
1197
+ remediation="Set estimated_hours to a number >= 0",
1198
+ request_id=request_id,
1199
+ )
1200
+ if estimated_hours < 0:
1201
+ return _validation_error(
1202
+ field="estimated_hours",
1203
+ action=action,
1204
+ message="Value must be non-negative",
1205
+ remediation="Set hours to zero or greater",
1206
+ request_id=request_id,
1207
+ )
1208
+ estimated_hours = float(estimated_hours)
1209
+
1210
+ # Validate description if provided
1211
+ if description is not None and not isinstance(description, str):
1212
+ return _validation_error(
1213
+ field="description",
1214
+ action=action,
1215
+ message="Description must be a string",
1216
+ remediation="Provide a text description",
1217
+ request_id=request_id,
1218
+ )
1219
+
1220
+ # Validate purpose if provided
1221
+ if purpose is not None and not isinstance(purpose, str):
1222
+ return _validation_error(
1223
+ field="purpose",
1224
+ action=action,
1225
+ message="Purpose must be a string",
1226
+ remediation="Provide a text purpose",
760
1227
  request_id=request_id,
761
1228
  )
762
1229
 
@@ -766,6 +1233,7 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
766
1233
  field="dry_run",
767
1234
  action=action,
768
1235
  message="Expected a boolean value",
1236
+ remediation="Set dry_run to true or false",
769
1237
  request_id=request_id,
770
1238
  )
771
1239
 
@@ -775,6 +1243,7 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
775
1243
  field="path",
776
1244
  action=action,
777
1245
  message="Workspace path must be a string",
1246
+ remediation="Provide a valid workspace path",
778
1247
  request_id=request_id,
779
1248
  )
780
1249
 
@@ -788,38 +1257,24 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
788
1257
  action=action,
789
1258
  spec_id=spec_id,
790
1259
  phase_id=phase_id,
791
- force=force,
792
1260
  dry_run=dry_run,
793
1261
  )
794
1262
 
795
1263
  metric_key = _metric_name(action)
796
- if dry_run:
797
- _metrics.counter(
798
- metric_key, labels={"status": "success", "force": str(force).lower()}
799
- )
800
- return asdict(
801
- success_response(
802
- data={
803
- "spec_id": spec_id,
804
- "phase_id": phase_id,
805
- "force": force,
806
- "dry_run": True,
807
- "note": "Dry run - no changes made",
808
- },
809
- request_id=request_id,
810
- )
811
- )
812
-
813
1264
  start_time = time.perf_counter()
1265
+
814
1266
  try:
815
- result, error = remove_phase(
1267
+ result, error = update_phase_metadata(
816
1268
  spec_id=spec_id,
817
1269
  phase_id=phase_id,
818
- force=force,
1270
+ estimated_hours=estimated_hours,
1271
+ description=description,
1272
+ purpose=purpose,
1273
+ dry_run=dry_run,
819
1274
  specs_dir=specs_dir,
820
1275
  )
821
1276
  except Exception as exc: # pragma: no cover - defensive guard
822
- logger.exception("Unexpected error removing phase")
1277
+ logger.exception("Unexpected error updating phase metadata")
823
1278
  _metrics.counter(metric_key, labels={"status": "error"})
824
1279
  return asdict(
825
1280
  error_response(
@@ -837,7 +1292,7 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
837
1292
  if error:
838
1293
  _metrics.counter(metric_key, labels={"status": "error"})
839
1294
  lowered = error.lower()
840
- if "spec" in lowered and "not found" in lowered:
1295
+ if "specification" in lowered and "not found" in lowered:
841
1296
  return asdict(
842
1297
  error_response(
843
1298
  f"Specification '{spec_id}' not found",
@@ -850,10 +1305,10 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
850
1305
  if "phase" in lowered and "not found" in lowered:
851
1306
  return asdict(
852
1307
  error_response(
853
- f"Phase '{phase_id}' not found in spec",
854
- error_code=ErrorCode.PHASE_NOT_FOUND,
1308
+ f"Phase '{phase_id}' not found in spec '{spec_id}'",
1309
+ error_code=ErrorCode.TASK_NOT_FOUND,
855
1310
  error_type=ErrorType.NOT_FOUND,
856
- remediation="Confirm the phase exists in the hierarchy",
1311
+ remediation='Verify the phase ID via task(action="query")',
857
1312
  request_id=request_id,
858
1313
  )
859
1314
  )
@@ -861,25 +1316,15 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
861
1316
  return asdict(
862
1317
  error_response(
863
1318
  f"Node '{phase_id}' is not a phase",
864
- error_code=ErrorCode.VALIDATION_ERROR,
1319
+ error_code=ErrorCode.VALIDATION_FAILED,
865
1320
  error_type=ErrorType.VALIDATION,
866
- remediation="Use task-remove for non-phase nodes",
867
- request_id=request_id,
868
- )
869
- )
870
- if "non-completed" in lowered or "has" in lowered and "task" in lowered:
871
- return asdict(
872
- error_response(
873
- f"Phase '{phase_id}' has non-completed tasks. Use force=True to remove anyway",
874
- error_code=ErrorCode.CONFLICT,
875
- error_type=ErrorType.CONFLICT,
876
- remediation="Set force=True to remove active phases",
1321
+ remediation="Provide a valid phase ID (e.g., 'phase-1')",
877
1322
  request_id=request_id,
878
1323
  )
879
1324
  )
880
1325
  return asdict(
881
1326
  error_response(
882
- f"Failed to remove phase: {error}",
1327
+ f"Failed to update phase metadata: {error}",
883
1328
  error_code=ErrorCode.INTERNAL_ERROR,
884
1329
  error_type=ErrorType.INTERNAL,
885
1330
  remediation="Check input values and retry",
@@ -887,59 +1332,206 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
887
1332
  )
888
1333
  )
889
1334
 
890
- _metrics.counter(
891
- metric_key, labels={"status": "success", "force": str(force).lower()}
892
- )
1335
+ _metrics.counter(metric_key, labels={"status": "success"})
893
1336
  return asdict(
894
1337
  success_response(
895
- data={"spec_id": spec_id, "dry_run": False, **(result or {})},
1338
+ data={"spec_id": spec_id, "phase_id": phase_id, **(result or {})},
896
1339
  telemetry={"duration_ms": round(elapsed_ms, 2)},
897
1340
  request_id=request_id,
898
1341
  )
899
1342
  )
900
1343
 
901
1344
 
902
- def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1345
+ def _handle_phase_add_bulk(*, config: ServerConfig, **payload: Any) -> dict:
903
1346
  request_id = _request_id()
904
- action = "assumption-add"
1347
+ action = "phase-add-bulk"
905
1348
 
1349
+ # Validate spec_id
906
1350
  spec_id = payload.get("spec_id")
907
1351
  if not isinstance(spec_id, str) or not spec_id.strip():
908
1352
  return _validation_error(
909
1353
  field="spec_id",
910
1354
  action=action,
911
1355
  message="Provide a non-empty spec_id parameter",
1356
+ remediation="Pass the spec identifier to authoring",
912
1357
  request_id=request_id,
913
1358
  code=ErrorCode.MISSING_REQUIRED,
914
1359
  )
915
1360
  spec_id = spec_id.strip()
916
1361
 
917
- text = payload.get("text")
918
- if not isinstance(text, str) or not text.strip():
1362
+ # Require macro format: {phase: {...}, tasks: [...]}
1363
+ phase_obj = payload.get("phase")
1364
+ if not isinstance(phase_obj, dict):
919
1365
  return _validation_error(
920
- field="text",
1366
+ field="phase",
921
1367
  action=action,
922
- message="Provide the assumption text",
1368
+ message="Provide a phase object with metadata",
1369
+ remediation="Use macro format: {phase: {title: '...', description: '...'}, tasks: [...]}",
923
1370
  request_id=request_id,
924
1371
  code=ErrorCode.MISSING_REQUIRED,
925
1372
  )
926
- text = text.strip()
927
1373
 
928
- assumption_type = payload.get("assumption_type") or "constraint"
929
- if assumption_type not in ASSUMPTION_TYPES:
1374
+ # Extract phase metadata from nested object
1375
+ title = phase_obj.get("title")
1376
+ if not isinstance(title, str) or not title.strip():
930
1377
  return _validation_error(
931
- field="assumption_type",
1378
+ field="phase.title",
932
1379
  action=action,
933
- message=f"Must be one of: {', '.join(ASSUMPTION_TYPES)}",
1380
+ message="Provide a non-empty phase title",
1381
+ remediation="Include phase.title in the phase object",
934
1382
  request_id=request_id,
1383
+ code=ErrorCode.MISSING_REQUIRED,
935
1384
  )
1385
+ title = title.strip()
936
1386
 
937
- author = payload.get("author")
938
- if author is not None and not isinstance(author, str):
1387
+ # Validate tasks array
1388
+ tasks = payload.get("tasks")
1389
+ if not tasks or not isinstance(tasks, list) or len(tasks) == 0:
939
1390
  return _validation_error(
940
- field="author",
1391
+ field="tasks",
941
1392
  action=action,
942
- message="Author must be a string",
1393
+ message="Provide at least one task definition",
1394
+ remediation="Include a tasks array with type and title for each task",
1395
+ request_id=request_id,
1396
+ code=ErrorCode.MISSING_REQUIRED,
1397
+ )
1398
+
1399
+ # Validate each task in the array
1400
+ valid_task_types = {"task", "verify"}
1401
+ for idx, task_def in enumerate(tasks):
1402
+ if not isinstance(task_def, dict):
1403
+ return _validation_error(
1404
+ field=f"tasks[{idx}]",
1405
+ action=action,
1406
+ message="Each task must be a dictionary",
1407
+ request_id=request_id,
1408
+ )
1409
+
1410
+ task_type = task_def.get("type")
1411
+ if not task_type or task_type not in valid_task_types:
1412
+ return _validation_error(
1413
+ field=f"tasks[{idx}].type",
1414
+ action=action,
1415
+ message="Task type must be 'task' or 'verify'",
1416
+ remediation="Set type to 'task' or 'verify'",
1417
+ request_id=request_id,
1418
+ )
1419
+
1420
+ task_title = task_def.get("title")
1421
+ if not task_title or not isinstance(task_title, str) or not task_title.strip():
1422
+ return _validation_error(
1423
+ field=f"tasks[{idx}].title",
1424
+ action=action,
1425
+ message="Each task must have a non-empty title",
1426
+ request_id=request_id,
1427
+ code=ErrorCode.MISSING_REQUIRED,
1428
+ )
1429
+
1430
+ est_hours = task_def.get("estimated_hours")
1431
+ if est_hours is not None:
1432
+ if isinstance(est_hours, bool) or not isinstance(est_hours, (int, float)):
1433
+ return _validation_error(
1434
+ field=f"tasks[{idx}].estimated_hours",
1435
+ action=action,
1436
+ message="estimated_hours must be a number",
1437
+ request_id=request_id,
1438
+ )
1439
+ if est_hours < 0:
1440
+ return _validation_error(
1441
+ field=f"tasks[{idx}].estimated_hours",
1442
+ action=action,
1443
+ message="estimated_hours must be non-negative",
1444
+ request_id=request_id,
1445
+ )
1446
+
1447
+ # Validate optional phase metadata (from phase object)
1448
+ description = phase_obj.get("description")
1449
+ if description is not None and not isinstance(description, str):
1450
+ return _validation_error(
1451
+ field="phase.description",
1452
+ action=action,
1453
+ message="Description must be a string",
1454
+ request_id=request_id,
1455
+ )
1456
+
1457
+ purpose = phase_obj.get("purpose")
1458
+ if purpose is not None and not isinstance(purpose, str):
1459
+ return _validation_error(
1460
+ field="phase.purpose",
1461
+ action=action,
1462
+ message="Purpose must be a string",
1463
+ request_id=request_id,
1464
+ )
1465
+
1466
+ estimated_hours = phase_obj.get("estimated_hours")
1467
+ if estimated_hours is not None:
1468
+ if isinstance(estimated_hours, bool) or not isinstance(
1469
+ estimated_hours, (int, float)
1470
+ ):
1471
+ return _validation_error(
1472
+ field="phase.estimated_hours",
1473
+ action=action,
1474
+ message="Provide a numeric value",
1475
+ request_id=request_id,
1476
+ )
1477
+ if estimated_hours < 0:
1478
+ return _validation_error(
1479
+ field="phase.estimated_hours",
1480
+ action=action,
1481
+ message="Value must be non-negative",
1482
+ remediation="Set hours to zero or greater",
1483
+ request_id=request_id,
1484
+ )
1485
+ estimated_hours = float(estimated_hours)
1486
+
1487
+ # Handle metadata_defaults from both top-level and phase object
1488
+ # Top-level serves as base, phase-level overrides
1489
+ top_level_defaults = payload.get("metadata_defaults")
1490
+ if top_level_defaults is not None and not isinstance(top_level_defaults, dict):
1491
+ return _validation_error(
1492
+ field="metadata_defaults",
1493
+ action=action,
1494
+ message="metadata_defaults must be a dictionary",
1495
+ request_id=request_id,
1496
+ )
1497
+
1498
+ phase_level_defaults = phase_obj.get("metadata_defaults")
1499
+ if phase_level_defaults is not None and not isinstance(phase_level_defaults, dict):
1500
+ return _validation_error(
1501
+ field="phase.metadata_defaults",
1502
+ action=action,
1503
+ message="metadata_defaults must be a dictionary",
1504
+ request_id=request_id,
1505
+ )
1506
+
1507
+ # Merge: top-level as base, phase-level overrides
1508
+ metadata_defaults = None
1509
+ if top_level_defaults or phase_level_defaults:
1510
+ metadata_defaults = {**(top_level_defaults or {}), **(phase_level_defaults or {})}
1511
+
1512
+ position = payload.get("position")
1513
+ if position is not None:
1514
+ if isinstance(position, bool) or not isinstance(position, int):
1515
+ return _validation_error(
1516
+ field="position",
1517
+ action=action,
1518
+ message="Position must be an integer",
1519
+ request_id=request_id,
1520
+ )
1521
+ if position < 0:
1522
+ return _validation_error(
1523
+ field="position",
1524
+ action=action,
1525
+ message="Position must be >= 0",
1526
+ request_id=request_id,
1527
+ )
1528
+
1529
+ link_previous = payload.get("link_previous", True)
1530
+ if not isinstance(link_previous, bool):
1531
+ return _validation_error(
1532
+ field="link_previous",
1533
+ action=action,
1534
+ message="Expected a boolean value",
943
1535
  request_id=request_id,
944
1536
  )
945
1537
 
@@ -965,10 +1557,11 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
965
1557
  if specs_dir is None:
966
1558
  return _specs_directory_missing_error(request_id)
967
1559
 
1560
+ # Check for duplicate phase title (warning only)
968
1561
  warnings: List[str] = []
969
- if _assumption_exists(spec_id, specs_dir, text):
1562
+ if _phase_exists(spec_id, specs_dir, title):
970
1563
  warnings.append(
971
- "An assumption with identical text already exists; another entry will be appended"
1564
+ f"Phase titled '{title}' already exists; the new phase will still be added"
972
1565
  )
973
1566
 
974
1567
  audit_log(
@@ -976,27 +1569,31 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
976
1569
  tool="authoring",
977
1570
  action=action,
978
1571
  spec_id=spec_id,
979
- assumption_type=assumption_type,
1572
+ title=title,
1573
+ task_count=len(tasks),
980
1574
  dry_run=dry_run,
1575
+ link_previous=link_previous,
981
1576
  )
982
1577
 
983
1578
  metric_key = _metric_name(action)
984
1579
 
985
1580
  if dry_run:
986
1581
  _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
987
- data = {
988
- "spec_id": spec_id,
989
- "assumption_id": "(preview)",
990
- "text": text,
991
- "type": assumption_type,
992
- "dry_run": True,
993
- "note": "Dry run - no changes made",
994
- }
995
- if author:
996
- data["author"] = author
1582
+ preview_tasks = [
1583
+ {"task_id": "(preview)", "title": t.get("title", ""), "type": t.get("type", "")}
1584
+ for t in tasks
1585
+ ]
997
1586
  return asdict(
998
1587
  success_response(
999
- data=data,
1588
+ data={
1589
+ "spec_id": spec_id,
1590
+ "phase_id": "(preview)",
1591
+ "title": title,
1592
+ "tasks_created": preview_tasks,
1593
+ "total_tasks": len(tasks),
1594
+ "dry_run": True,
1595
+ "note": "Dry run - no changes made",
1596
+ },
1000
1597
  warnings=warnings or None,
1001
1598
  request_id=request_id,
1002
1599
  )
@@ -1004,15 +1601,20 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1004
1601
 
1005
1602
  start_time = time.perf_counter()
1006
1603
  try:
1007
- result, error = add_assumption(
1604
+ result, error = add_phase_bulk(
1008
1605
  spec_id=spec_id,
1009
- text=text,
1010
- assumption_type=assumption_type,
1011
- author=author,
1606
+ phase_title=title,
1607
+ tasks=tasks,
1608
+ phase_description=description,
1609
+ phase_purpose=purpose,
1610
+ phase_estimated_hours=estimated_hours,
1611
+ metadata_defaults=metadata_defaults,
1612
+ position=position,
1613
+ link_previous=link_previous,
1012
1614
  specs_dir=specs_dir,
1013
1615
  )
1014
1616
  except Exception as exc: # pragma: no cover - defensive guard
1015
- logger.exception("Unexpected error adding assumption")
1617
+ logger.exception("Unexpected error in phase-add-bulk")
1016
1618
  _metrics.counter(metric_key, labels={"status": "error"})
1017
1619
  return asdict(
1018
1620
  error_response(
@@ -1029,7 +1631,8 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1029
1631
 
1030
1632
  if error:
1031
1633
  _metrics.counter(metric_key, labels={"status": "error"})
1032
- if "not found" in error.lower():
1634
+ lowered = error.lower()
1635
+ if "specification" in lowered and "not found" in lowered:
1033
1636
  return asdict(
1034
1637
  error_response(
1035
1638
  f"Specification '{spec_id}' not found",
@@ -1039,305 +1642,1772 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1039
1642
  request_id=request_id,
1040
1643
  )
1041
1644
  )
1645
+ if "task at index" in lowered:
1646
+ return asdict(
1647
+ error_response(
1648
+ error,
1649
+ error_code=ErrorCode.VALIDATION_ERROR,
1650
+ error_type=ErrorType.VALIDATION,
1651
+ remediation="Check each task has valid type and title",
1652
+ request_id=request_id,
1653
+ )
1654
+ )
1042
1655
  return asdict(
1043
1656
  error_response(
1044
- f"Failed to add assumption: {error}",
1657
+ f"Failed to add phase with tasks: {error}",
1045
1658
  error_code=ErrorCode.INTERNAL_ERROR,
1046
1659
  error_type=ErrorType.INTERNAL,
1047
- remediation="Check that the spec exists",
1660
+ remediation="Check input values and retry",
1048
1661
  request_id=request_id,
1049
1662
  )
1050
1663
  )
1051
1664
 
1052
- data = {
1053
- "spec_id": spec_id,
1054
- "assumption_id": result.get("assumption_id") if result else None,
1665
+ _metrics.counter(metric_key, labels={"status": "success"})
1666
+ return asdict(
1667
+ success_response(
1668
+ data={"spec_id": spec_id, "dry_run": False, **(result or {})},
1669
+ warnings=warnings or None,
1670
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1671
+ request_id=request_id,
1672
+ )
1673
+ )
1674
+
1675
+
1676
+ def _handle_phase_template(*, config: ServerConfig, **payload: Any) -> dict:
1677
+ """Handle phase-template action: list/show/apply phase templates."""
1678
+ request_id = _request_id()
1679
+ action = "phase-template"
1680
+
1681
+ template_action = payload.get("template_action")
1682
+ if not isinstance(template_action, str) or not template_action.strip():
1683
+ return _validation_error(
1684
+ field="template_action",
1685
+ action=action,
1686
+ message="Provide one of: list, show, apply",
1687
+ request_id=request_id,
1688
+ code=ErrorCode.MISSING_REQUIRED,
1689
+ )
1690
+ template_action = template_action.strip().lower()
1691
+ if template_action not in ("list", "show", "apply"):
1692
+ return _validation_error(
1693
+ field="template_action",
1694
+ action=action,
1695
+ message="template_action must be one of: list, show, apply",
1696
+ request_id=request_id,
1697
+ remediation="Use list, show, or apply",
1698
+ )
1699
+
1700
+ template_name = payload.get("template_name")
1701
+ if template_action in ("show", "apply"):
1702
+ if not isinstance(template_name, str) or not template_name.strip():
1703
+ return _validation_error(
1704
+ field="template_name",
1705
+ action=action,
1706
+ message="Provide a template name",
1707
+ request_id=request_id,
1708
+ code=ErrorCode.MISSING_REQUIRED,
1709
+ )
1710
+ template_name = template_name.strip()
1711
+ if template_name not in PHASE_TEMPLATES:
1712
+ return asdict(
1713
+ error_response(
1714
+ f"Phase template '{template_name}' not found",
1715
+ error_code=ErrorCode.NOT_FOUND,
1716
+ error_type=ErrorType.NOT_FOUND,
1717
+ remediation=f"Use template_action='list' to see available templates. Valid: {', '.join(PHASE_TEMPLATES)}",
1718
+ request_id=request_id,
1719
+ )
1720
+ )
1721
+
1722
+ data: Dict[str, Any] = {"action": template_action}
1723
+
1724
+ if template_action == "list":
1725
+ data["templates"] = [
1726
+ {
1727
+ "name": "planning",
1728
+ "description": "Requirements gathering and initial planning phase",
1729
+ "tasks": 2,
1730
+ "estimated_hours": 4,
1731
+ },
1732
+ {
1733
+ "name": "implementation",
1734
+ "description": "Core development and feature implementation phase",
1735
+ "tasks": 2,
1736
+ "estimated_hours": 8,
1737
+ },
1738
+ {
1739
+ "name": "testing",
1740
+ "description": "Comprehensive testing and quality assurance phase",
1741
+ "tasks": 2,
1742
+ "estimated_hours": 6,
1743
+ },
1744
+ {
1745
+ "name": "security",
1746
+ "description": "Security audit and hardening phase",
1747
+ "tasks": 2,
1748
+ "estimated_hours": 6,
1749
+ },
1750
+ {
1751
+ "name": "documentation",
1752
+ "description": "Technical documentation and knowledge capture phase",
1753
+ "tasks": 2,
1754
+ "estimated_hours": 4,
1755
+ },
1756
+ ]
1757
+ data["total_count"] = len(data["templates"])
1758
+ data["note"] = "All templates include automatic verification scaffolding (run-tests + fidelity)"
1759
+ return asdict(success_response(data=data, request_id=request_id))
1760
+
1761
+ elif template_action == "show":
1762
+ try:
1763
+ template_struct = get_phase_template_structure(template_name)
1764
+ data["template_name"] = template_name
1765
+ data["content"] = {
1766
+ "name": template_name,
1767
+ "title": template_struct["title"],
1768
+ "description": template_struct["description"],
1769
+ "purpose": template_struct["purpose"],
1770
+ "estimated_hours": template_struct["estimated_hours"],
1771
+ "tasks": template_struct["tasks"],
1772
+ "includes_verification": template_struct["includes_verification"],
1773
+ }
1774
+ data["usage"] = (
1775
+ f"Use authoring(action='phase-template', template_action='apply', "
1776
+ f"template_name='{template_name}', spec_id='your-spec-id') to apply this template"
1777
+ )
1778
+ return asdict(success_response(data=data, request_id=request_id))
1779
+ except ValueError as exc:
1780
+ return asdict(
1781
+ error_response(
1782
+ str(exc),
1783
+ error_code=ErrorCode.NOT_FOUND,
1784
+ error_type=ErrorType.NOT_FOUND,
1785
+ request_id=request_id,
1786
+ )
1787
+ )
1788
+
1789
+ else: # apply
1790
+ spec_id = payload.get("spec_id")
1791
+ if not isinstance(spec_id, str) or not spec_id.strip():
1792
+ return _validation_error(
1793
+ field="spec_id",
1794
+ action=action,
1795
+ message="Provide the target spec_id to apply the template to",
1796
+ request_id=request_id,
1797
+ code=ErrorCode.MISSING_REQUIRED,
1798
+ )
1799
+ spec_id = spec_id.strip()
1800
+
1801
+ # Optional parameters for apply
1802
+ category = payload.get("category", "implementation")
1803
+ if not isinstance(category, str):
1804
+ return _validation_error(
1805
+ field="category",
1806
+ action=action,
1807
+ message="Category must be a string",
1808
+ request_id=request_id,
1809
+ )
1810
+ category = category.strip()
1811
+ if category and category not in CATEGORIES:
1812
+ return _validation_error(
1813
+ field="category",
1814
+ action=action,
1815
+ message=f"Category must be one of: {', '.join(CATEGORIES)}",
1816
+ request_id=request_id,
1817
+ )
1818
+
1819
+ position = payload.get("position")
1820
+ if position is not None:
1821
+ if isinstance(position, bool) or not isinstance(position, int):
1822
+ return _validation_error(
1823
+ field="position",
1824
+ action=action,
1825
+ message="Position must be an integer",
1826
+ request_id=request_id,
1827
+ )
1828
+ if position < 0:
1829
+ return _validation_error(
1830
+ field="position",
1831
+ action=action,
1832
+ message="Position must be >= 0",
1833
+ request_id=request_id,
1834
+ )
1835
+
1836
+ link_previous = payload.get("link_previous", True)
1837
+ if not isinstance(link_previous, bool):
1838
+ return _validation_error(
1839
+ field="link_previous",
1840
+ action=action,
1841
+ message="Expected a boolean value",
1842
+ request_id=request_id,
1843
+ )
1844
+
1845
+ dry_run = payload.get("dry_run", False)
1846
+ if not isinstance(dry_run, bool):
1847
+ return _validation_error(
1848
+ field="dry_run",
1849
+ action=action,
1850
+ message="Expected a boolean value",
1851
+ request_id=request_id,
1852
+ )
1853
+
1854
+ path = payload.get("path")
1855
+ if path is not None and not isinstance(path, str):
1856
+ return _validation_error(
1857
+ field="path",
1858
+ action=action,
1859
+ message="Workspace path must be a string",
1860
+ request_id=request_id,
1861
+ )
1862
+
1863
+ specs_dir = _resolve_specs_dir(config, path)
1864
+ if specs_dir is None:
1865
+ return _specs_directory_missing_error(request_id)
1866
+
1867
+ audit_log(
1868
+ "tool_invocation",
1869
+ tool="authoring",
1870
+ action=action,
1871
+ spec_id=spec_id,
1872
+ template_name=template_name,
1873
+ dry_run=dry_run,
1874
+ link_previous=link_previous,
1875
+ )
1876
+
1877
+ metric_key = _metric_name(action)
1878
+
1879
+ if dry_run:
1880
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
1881
+ template_struct = get_phase_template_structure(template_name, category)
1882
+ return asdict(
1883
+ success_response(
1884
+ data={
1885
+ "spec_id": spec_id,
1886
+ "template_applied": template_name,
1887
+ "phase_id": "(preview)",
1888
+ "title": template_struct["title"],
1889
+ "tasks_created": [
1890
+ {"task_id": "(preview)", "title": t["title"], "type": "task"}
1891
+ for t in template_struct["tasks"]
1892
+ ],
1893
+ "total_tasks": len(template_struct["tasks"]),
1894
+ "dry_run": True,
1895
+ "note": "Dry run - no changes made. Verification scaffolding will be auto-added.",
1896
+ },
1897
+ request_id=request_id,
1898
+ )
1899
+ )
1900
+
1901
+ start_time = time.perf_counter()
1902
+ try:
1903
+ result, error = apply_phase_template(
1904
+ spec_id=spec_id,
1905
+ template=template_name,
1906
+ specs_dir=specs_dir,
1907
+ category=category,
1908
+ position=position,
1909
+ link_previous=link_previous,
1910
+ )
1911
+ except Exception as exc: # pragma: no cover - defensive guard
1912
+ logger.exception("Unexpected error in phase-template apply")
1913
+ _metrics.counter(metric_key, labels={"status": "error"})
1914
+ return asdict(
1915
+ error_response(
1916
+ sanitize_error_message(exc, context="authoring"),
1917
+ error_code=ErrorCode.INTERNAL_ERROR,
1918
+ error_type=ErrorType.INTERNAL,
1919
+ remediation="Check logs for details",
1920
+ request_id=request_id,
1921
+ )
1922
+ )
1923
+
1924
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
1925
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
1926
+
1927
+ if error:
1928
+ _metrics.counter(metric_key, labels={"status": "error"})
1929
+ lowered = error.lower()
1930
+ if "specification" in lowered and "not found" in lowered:
1931
+ return asdict(
1932
+ error_response(
1933
+ f"Specification '{spec_id}' not found",
1934
+ error_code=ErrorCode.SPEC_NOT_FOUND,
1935
+ error_type=ErrorType.NOT_FOUND,
1936
+ remediation='Verify the spec ID via spec(action="list")',
1937
+ request_id=request_id,
1938
+ )
1939
+ )
1940
+ if "invalid phase template" in lowered:
1941
+ return asdict(
1942
+ error_response(
1943
+ error,
1944
+ error_code=ErrorCode.VALIDATION_ERROR,
1945
+ error_type=ErrorType.VALIDATION,
1946
+ remediation=f"Valid templates: {', '.join(PHASE_TEMPLATES)}",
1947
+ request_id=request_id,
1948
+ )
1949
+ )
1950
+ return asdict(
1951
+ error_response(
1952
+ f"Failed to apply phase template: {error}",
1953
+ error_code=ErrorCode.INTERNAL_ERROR,
1954
+ error_type=ErrorType.INTERNAL,
1955
+ remediation="Check input values and retry",
1956
+ request_id=request_id,
1957
+ )
1958
+ )
1959
+
1960
+ _metrics.counter(metric_key, labels={"status": "success"})
1961
+ return asdict(
1962
+ success_response(
1963
+ data={"spec_id": spec_id, "dry_run": False, **(result or {})},
1964
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1965
+ request_id=request_id,
1966
+ )
1967
+ )
1968
+
1969
+
1970
+ def _handle_phase_move(*, config: ServerConfig, **payload: Any) -> dict:
1971
+ """Handle phase-move action: reorder a phase within spec-root children."""
1972
+ request_id = _request_id()
1973
+ action = "phase-move"
1974
+
1975
+ spec_id = payload.get("spec_id")
1976
+ if not isinstance(spec_id, str) or not spec_id.strip():
1977
+ return _validation_error(
1978
+ field="spec_id",
1979
+ action=action,
1980
+ message="Provide a non-empty spec_id parameter",
1981
+ request_id=request_id,
1982
+ code=ErrorCode.MISSING_REQUIRED,
1983
+ remediation='Use spec(action="list") to find available spec IDs',
1984
+ )
1985
+ spec_id = spec_id.strip()
1986
+
1987
+ phase_id = payload.get("phase_id")
1988
+ if not isinstance(phase_id, str) or not phase_id.strip():
1989
+ return _validation_error(
1990
+ field="phase_id",
1991
+ action=action,
1992
+ message="Provide the phase identifier (e.g., phase-1)",
1993
+ request_id=request_id,
1994
+ code=ErrorCode.MISSING_REQUIRED,
1995
+ remediation="Specify a phase ID like phase-1 or phase-2",
1996
+ )
1997
+ phase_id = phase_id.strip()
1998
+
1999
+ position = payload.get("position")
2000
+ if position is None:
2001
+ return _validation_error(
2002
+ field="position",
2003
+ action=action,
2004
+ message="Provide the target position (1-based index)",
2005
+ request_id=request_id,
2006
+ code=ErrorCode.MISSING_REQUIRED,
2007
+ remediation="Specify position as a positive integer (1 = first)",
2008
+ )
2009
+ if isinstance(position, bool) or not isinstance(position, int):
2010
+ return _validation_error(
2011
+ field="position",
2012
+ action=action,
2013
+ message="Position must be an integer",
2014
+ request_id=request_id,
2015
+ code=ErrorCode.INVALID_FORMAT,
2016
+ remediation="Provide position as an integer, e.g. position=2",
2017
+ )
2018
+ if position < 1:
2019
+ return _validation_error(
2020
+ field="position",
2021
+ action=action,
2022
+ message="Position must be a positive integer (1-based)",
2023
+ request_id=request_id,
2024
+ code=ErrorCode.INVALID_FORMAT,
2025
+ remediation="Use 1 for first position, 2 for second, etc.",
2026
+ )
2027
+
2028
+ link_previous = payload.get("link_previous", True)
2029
+ if not isinstance(link_previous, bool):
2030
+ return _validation_error(
2031
+ field="link_previous",
2032
+ action=action,
2033
+ message="Expected a boolean value",
2034
+ request_id=request_id,
2035
+ code=ErrorCode.INVALID_FORMAT,
2036
+ remediation="Use true or false for link_previous",
2037
+ )
2038
+
2039
+ dry_run = payload.get("dry_run", False)
2040
+ if not isinstance(dry_run, bool):
2041
+ return _validation_error(
2042
+ field="dry_run",
2043
+ action=action,
2044
+ message="Expected a boolean value",
2045
+ request_id=request_id,
2046
+ code=ErrorCode.INVALID_FORMAT,
2047
+ remediation="Use true or false for dry_run",
2048
+ )
2049
+
2050
+ path = payload.get("path")
2051
+ if path is not None and not isinstance(path, str):
2052
+ return _validation_error(
2053
+ field="path",
2054
+ action=action,
2055
+ message="Workspace path must be a string",
2056
+ request_id=request_id,
2057
+ remediation="Provide a valid filesystem path string",
2058
+ code=ErrorCode.INVALID_FORMAT,
2059
+ )
2060
+
2061
+ specs_dir = _resolve_specs_dir(config, path)
2062
+ if specs_dir is None:
2063
+ return _specs_directory_missing_error(request_id)
2064
+
2065
+ audit_log(
2066
+ "tool_invocation",
2067
+ tool="authoring",
2068
+ action=action,
2069
+ spec_id=spec_id,
2070
+ phase_id=phase_id,
2071
+ position=position,
2072
+ link_previous=link_previous,
2073
+ dry_run=dry_run,
2074
+ )
2075
+
2076
+ metric_key = _metric_name(action)
2077
+ start_time = time.perf_counter()
2078
+
2079
+ try:
2080
+ result, error = move_phase(
2081
+ spec_id=spec_id,
2082
+ phase_id=phase_id,
2083
+ position=position,
2084
+ link_previous=link_previous,
2085
+ dry_run=dry_run,
2086
+ specs_dir=specs_dir,
2087
+ )
2088
+ except Exception as exc: # pragma: no cover - defensive guard
2089
+ logger.exception("Unexpected error moving phase")
2090
+ _metrics.counter(metric_key, labels={"status": "error"})
2091
+ return asdict(
2092
+ error_response(
2093
+ sanitize_error_message(exc, context="authoring"),
2094
+ error_code=ErrorCode.INTERNAL_ERROR,
2095
+ error_type=ErrorType.INTERNAL,
2096
+ remediation="Check logs for details",
2097
+ request_id=request_id,
2098
+ )
2099
+ )
2100
+
2101
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2102
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2103
+
2104
+ if error:
2105
+ _metrics.counter(metric_key, labels={"status": "error"})
2106
+ lowered = error.lower()
2107
+ if "specification" in lowered and "not found" in lowered:
2108
+ return asdict(
2109
+ error_response(
2110
+ f"Specification '{spec_id}' not found",
2111
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2112
+ error_type=ErrorType.NOT_FOUND,
2113
+ remediation='Verify the spec ID via spec(action="list")',
2114
+ request_id=request_id,
2115
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2116
+ )
2117
+ )
2118
+ if "phase" in lowered and "not found" in lowered:
2119
+ return asdict(
2120
+ error_response(
2121
+ f"Phase '{phase_id}' not found in spec",
2122
+ error_code=ErrorCode.PHASE_NOT_FOUND,
2123
+ error_type=ErrorType.NOT_FOUND,
2124
+ remediation="Confirm the phase exists in the hierarchy",
2125
+ request_id=request_id,
2126
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2127
+ )
2128
+ )
2129
+ if "not a phase" in lowered:
2130
+ return asdict(
2131
+ error_response(
2132
+ f"Node '{phase_id}' is not a phase",
2133
+ error_code=ErrorCode.VALIDATION_ERROR,
2134
+ error_type=ErrorType.VALIDATION,
2135
+ remediation="Provide a valid phase ID (e.g., phase-1)",
2136
+ request_id=request_id,
2137
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2138
+ )
2139
+ )
2140
+ if "invalid position" in lowered or "must be" in lowered:
2141
+ return asdict(
2142
+ error_response(
2143
+ error,
2144
+ error_code=ErrorCode.VALIDATION_ERROR,
2145
+ error_type=ErrorType.VALIDATION,
2146
+ remediation="Provide a valid 1-based position within range",
2147
+ request_id=request_id,
2148
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2149
+ )
2150
+ )
2151
+ return asdict(
2152
+ error_response(
2153
+ f"Failed to move phase: {error}",
2154
+ error_code=ErrorCode.INTERNAL_ERROR,
2155
+ error_type=ErrorType.INTERNAL,
2156
+ remediation="Check input values and retry",
2157
+ request_id=request_id,
2158
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2159
+ )
2160
+ )
2161
+
2162
+ _metrics.counter(metric_key, labels={"status": "success"})
2163
+ return asdict(
2164
+ success_response(
2165
+ data=result or {},
2166
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2167
+ request_id=request_id,
2168
+ )
2169
+ )
2170
+
2171
+
2172
+ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
2173
+ request_id = _request_id()
2174
+ action = "phase-remove"
2175
+
2176
+ spec_id = payload.get("spec_id")
2177
+ if not isinstance(spec_id, str) or not spec_id.strip():
2178
+ return _validation_error(
2179
+ field="spec_id",
2180
+ action=action,
2181
+ message="Provide a non-empty spec_id parameter",
2182
+ request_id=request_id,
2183
+ code=ErrorCode.MISSING_REQUIRED,
2184
+ )
2185
+ spec_id = spec_id.strip()
2186
+
2187
+ phase_id = payload.get("phase_id")
2188
+ if not isinstance(phase_id, str) or not phase_id.strip():
2189
+ return _validation_error(
2190
+ field="phase_id",
2191
+ action=action,
2192
+ message="Provide the phase identifier (e.g., phase-1)",
2193
+ request_id=request_id,
2194
+ code=ErrorCode.MISSING_REQUIRED,
2195
+ )
2196
+ phase_id = phase_id.strip()
2197
+
2198
+ force = payload.get("force", False)
2199
+ if not isinstance(force, bool):
2200
+ return _validation_error(
2201
+ field="force",
2202
+ action=action,
2203
+ message="Expected a boolean value",
2204
+ request_id=request_id,
2205
+ )
2206
+
2207
+ dry_run = payload.get("dry_run", False)
2208
+ if not isinstance(dry_run, bool):
2209
+ return _validation_error(
2210
+ field="dry_run",
2211
+ action=action,
2212
+ message="Expected a boolean value",
2213
+ request_id=request_id,
2214
+ )
2215
+
2216
+ path = payload.get("path")
2217
+ if path is not None and not isinstance(path, str):
2218
+ return _validation_error(
2219
+ field="path",
2220
+ action=action,
2221
+ message="Workspace path must be a string",
2222
+ request_id=request_id,
2223
+ )
2224
+
2225
+ specs_dir = _resolve_specs_dir(config, path)
2226
+ if specs_dir is None:
2227
+ return _specs_directory_missing_error(request_id)
2228
+
2229
+ audit_log(
2230
+ "tool_invocation",
2231
+ tool="authoring",
2232
+ action=action,
2233
+ spec_id=spec_id,
2234
+ phase_id=phase_id,
2235
+ force=force,
2236
+ dry_run=dry_run,
2237
+ )
2238
+
2239
+ metric_key = _metric_name(action)
2240
+ if dry_run:
2241
+ _metrics.counter(
2242
+ metric_key, labels={"status": "success", "force": str(force).lower()}
2243
+ )
2244
+ return asdict(
2245
+ success_response(
2246
+ data={
2247
+ "spec_id": spec_id,
2248
+ "phase_id": phase_id,
2249
+ "force": force,
2250
+ "dry_run": True,
2251
+ "note": "Dry run - no changes made",
2252
+ },
2253
+ request_id=request_id,
2254
+ )
2255
+ )
2256
+
2257
+ start_time = time.perf_counter()
2258
+ try:
2259
+ result, error = remove_phase(
2260
+ spec_id=spec_id,
2261
+ phase_id=phase_id,
2262
+ force=force,
2263
+ specs_dir=specs_dir,
2264
+ )
2265
+ except Exception as exc: # pragma: no cover - defensive guard
2266
+ logger.exception("Unexpected error removing phase")
2267
+ _metrics.counter(metric_key, labels={"status": "error"})
2268
+ return asdict(
2269
+ error_response(
2270
+ sanitize_error_message(exc, context="authoring"),
2271
+ error_code=ErrorCode.INTERNAL_ERROR,
2272
+ error_type=ErrorType.INTERNAL,
2273
+ remediation="Check logs for details",
2274
+ request_id=request_id,
2275
+ )
2276
+ )
2277
+
2278
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2279
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2280
+
2281
+ if error:
2282
+ _metrics.counter(metric_key, labels={"status": "error"})
2283
+ lowered = error.lower()
2284
+ if "spec" in lowered and "not found" in lowered:
2285
+ return asdict(
2286
+ error_response(
2287
+ f"Specification '{spec_id}' not found",
2288
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2289
+ error_type=ErrorType.NOT_FOUND,
2290
+ remediation='Verify the spec ID via spec(action="list")',
2291
+ request_id=request_id,
2292
+ )
2293
+ )
2294
+ if "phase" in lowered and "not found" in lowered:
2295
+ return asdict(
2296
+ error_response(
2297
+ f"Phase '{phase_id}' not found in spec",
2298
+ error_code=ErrorCode.PHASE_NOT_FOUND,
2299
+ error_type=ErrorType.NOT_FOUND,
2300
+ remediation="Confirm the phase exists in the hierarchy",
2301
+ request_id=request_id,
2302
+ )
2303
+ )
2304
+ if "not a phase" in lowered:
2305
+ return asdict(
2306
+ error_response(
2307
+ f"Node '{phase_id}' is not a phase",
2308
+ error_code=ErrorCode.VALIDATION_ERROR,
2309
+ error_type=ErrorType.VALIDATION,
2310
+ remediation="Use task-remove for non-phase nodes",
2311
+ request_id=request_id,
2312
+ )
2313
+ )
2314
+ if "non-completed" in lowered or "has" in lowered and "task" in lowered:
2315
+ return asdict(
2316
+ error_response(
2317
+ f"Phase '{phase_id}' has non-completed tasks. Use force=True to remove anyway",
2318
+ error_code=ErrorCode.CONFLICT,
2319
+ error_type=ErrorType.CONFLICT,
2320
+ remediation="Set force=True to remove active phases",
2321
+ request_id=request_id,
2322
+ )
2323
+ )
2324
+ return asdict(
2325
+ error_response(
2326
+ f"Failed to remove phase: {error}",
2327
+ error_code=ErrorCode.INTERNAL_ERROR,
2328
+ error_type=ErrorType.INTERNAL,
2329
+ remediation="Check input values and retry",
2330
+ request_id=request_id,
2331
+ )
2332
+ )
2333
+
2334
+ _metrics.counter(
2335
+ metric_key, labels={"status": "success", "force": str(force).lower()}
2336
+ )
2337
+ return asdict(
2338
+ success_response(
2339
+ data={"spec_id": spec_id, "dry_run": False, **(result or {})},
2340
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2341
+ request_id=request_id,
2342
+ )
2343
+ )
2344
+
2345
+
2346
+ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
2347
+ request_id = _request_id()
2348
+ action = "assumption-add"
2349
+
2350
+ spec_id = payload.get("spec_id")
2351
+ if not isinstance(spec_id, str) or not spec_id.strip():
2352
+ return _validation_error(
2353
+ field="spec_id",
2354
+ action=action,
2355
+ message="Provide a non-empty spec_id parameter",
2356
+ request_id=request_id,
2357
+ code=ErrorCode.MISSING_REQUIRED,
2358
+ )
2359
+ spec_id = spec_id.strip()
2360
+
2361
+ text = payload.get("text")
2362
+ if not isinstance(text, str) or not text.strip():
2363
+ return _validation_error(
2364
+ field="text",
2365
+ action=action,
2366
+ message="Provide the assumption text",
2367
+ request_id=request_id,
2368
+ code=ErrorCode.MISSING_REQUIRED,
2369
+ )
2370
+ text = text.strip()
2371
+
2372
+ assumption_type = payload.get("assumption_type") or "constraint"
2373
+ if assumption_type not in ASSUMPTION_TYPES:
2374
+ return _validation_error(
2375
+ field="assumption_type",
2376
+ action=action,
2377
+ message=f"Must be one of: {', '.join(ASSUMPTION_TYPES)}",
2378
+ request_id=request_id,
2379
+ )
2380
+
2381
+ author = payload.get("author")
2382
+ if author is not None and not isinstance(author, str):
2383
+ return _validation_error(
2384
+ field="author",
2385
+ action=action,
2386
+ message="Author must be a string",
2387
+ request_id=request_id,
2388
+ )
2389
+
2390
+ dry_run = payload.get("dry_run", False)
2391
+ if not isinstance(dry_run, bool):
2392
+ return _validation_error(
2393
+ field="dry_run",
2394
+ action=action,
2395
+ message="Expected a boolean value",
2396
+ request_id=request_id,
2397
+ )
2398
+
2399
+ path = payload.get("path")
2400
+ if path is not None and not isinstance(path, str):
2401
+ return _validation_error(
2402
+ field="path",
2403
+ action=action,
2404
+ message="Workspace path must be a string",
2405
+ request_id=request_id,
2406
+ )
2407
+
2408
+ specs_dir = _resolve_specs_dir(config, path)
2409
+ if specs_dir is None:
2410
+ return _specs_directory_missing_error(request_id)
2411
+
2412
+ warnings: List[str] = []
2413
+ if _assumption_exists(spec_id, specs_dir, text):
2414
+ warnings.append(
2415
+ "An assumption with identical text already exists; another entry will be appended"
2416
+ )
2417
+
2418
+ audit_log(
2419
+ "tool_invocation",
2420
+ tool="authoring",
2421
+ action=action,
2422
+ spec_id=spec_id,
2423
+ assumption_type=assumption_type,
2424
+ dry_run=dry_run,
2425
+ )
2426
+
2427
+ metric_key = _metric_name(action)
2428
+
2429
+ if dry_run:
2430
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
2431
+ data = {
2432
+ "spec_id": spec_id,
2433
+ "assumption_id": "(preview)",
2434
+ "text": text,
2435
+ "type": assumption_type,
2436
+ "dry_run": True,
2437
+ "note": "Dry run - no changes made",
2438
+ }
2439
+ if author:
2440
+ data["author"] = author
2441
+ return asdict(
2442
+ success_response(
2443
+ data=data,
2444
+ warnings=warnings or None,
2445
+ request_id=request_id,
2446
+ )
2447
+ )
2448
+
2449
+ start_time = time.perf_counter()
2450
+ try:
2451
+ result, error = add_assumption(
2452
+ spec_id=spec_id,
2453
+ text=text,
2454
+ assumption_type=assumption_type,
2455
+ author=author,
2456
+ specs_dir=specs_dir,
2457
+ )
2458
+ except Exception as exc: # pragma: no cover - defensive guard
2459
+ logger.exception("Unexpected error adding assumption")
2460
+ _metrics.counter(metric_key, labels={"status": "error"})
2461
+ return asdict(
2462
+ error_response(
2463
+ sanitize_error_message(exc, context="authoring"),
2464
+ error_code=ErrorCode.INTERNAL_ERROR,
2465
+ error_type=ErrorType.INTERNAL,
2466
+ remediation="Check logs for details",
2467
+ request_id=request_id,
2468
+ )
2469
+ )
2470
+
2471
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2472
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2473
+
2474
+ if error:
2475
+ _metrics.counter(metric_key, labels={"status": "error"})
2476
+ if "not found" in error.lower():
2477
+ return asdict(
2478
+ error_response(
2479
+ f"Specification '{spec_id}' not found",
2480
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2481
+ error_type=ErrorType.NOT_FOUND,
2482
+ remediation='Verify the spec ID via spec(action="list")',
2483
+ request_id=request_id,
2484
+ )
2485
+ )
2486
+ return asdict(
2487
+ error_response(
2488
+ f"Failed to add assumption: {error}",
2489
+ error_code=ErrorCode.INTERNAL_ERROR,
2490
+ error_type=ErrorType.INTERNAL,
2491
+ remediation="Check that the spec exists",
2492
+ request_id=request_id,
2493
+ )
2494
+ )
2495
+
2496
+ data = {
2497
+ "spec_id": spec_id,
2498
+ "assumption_id": result.get("assumption_id") if result else None,
1055
2499
  "text": text,
1056
2500
  "type": assumption_type,
1057
2501
  "dry_run": False,
1058
2502
  }
1059
- if author:
1060
- data["author"] = author
2503
+ if author:
2504
+ data["author"] = author
2505
+
2506
+ _metrics.counter(metric_key, labels={"status": "success"})
2507
+ return asdict(
2508
+ success_response(
2509
+ data=data,
2510
+ warnings=warnings or None,
2511
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2512
+ request_id=request_id,
2513
+ )
2514
+ )
2515
+
2516
+
2517
+ def _handle_assumption_list(*, config: ServerConfig, **payload: Any) -> dict:
2518
+ request_id = _request_id()
2519
+ action = "assumption-list"
2520
+
2521
+ spec_id = payload.get("spec_id")
2522
+ if not isinstance(spec_id, str) or not spec_id.strip():
2523
+ return _validation_error(
2524
+ field="spec_id",
2525
+ action=action,
2526
+ message="Provide a non-empty spec_id parameter",
2527
+ request_id=request_id,
2528
+ code=ErrorCode.MISSING_REQUIRED,
2529
+ )
2530
+ spec_id = spec_id.strip()
2531
+
2532
+ assumption_type = payload.get("assumption_type")
2533
+ if assumption_type is not None and assumption_type not in ASSUMPTION_TYPES:
2534
+ return _validation_error(
2535
+ field="assumption_type",
2536
+ action=action,
2537
+ message=f"Must be one of: {', '.join(ASSUMPTION_TYPES)}",
2538
+ request_id=request_id,
2539
+ )
2540
+
2541
+ path = payload.get("path")
2542
+ if path is not None and not isinstance(path, str):
2543
+ return _validation_error(
2544
+ field="path",
2545
+ action=action,
2546
+ message="Workspace path must be a string",
2547
+ request_id=request_id,
2548
+ )
2549
+
2550
+ specs_dir = _resolve_specs_dir(config, path)
2551
+ if specs_dir is None:
2552
+ return _specs_directory_missing_error(request_id)
2553
+
2554
+ audit_log(
2555
+ "tool_invocation",
2556
+ tool="authoring",
2557
+ action=action,
2558
+ spec_id=spec_id,
2559
+ assumption_type=assumption_type,
2560
+ )
2561
+
2562
+ metric_key = _metric_name(action)
2563
+ start_time = time.perf_counter()
2564
+ try:
2565
+ result, error = list_assumptions(
2566
+ spec_id=spec_id,
2567
+ assumption_type=assumption_type,
2568
+ specs_dir=specs_dir,
2569
+ )
2570
+ except Exception as exc: # pragma: no cover - defensive guard
2571
+ logger.exception("Unexpected error listing assumptions")
2572
+ _metrics.counter(metric_key, labels={"status": "error"})
2573
+ return asdict(
2574
+ error_response(
2575
+ sanitize_error_message(exc, context="authoring"),
2576
+ error_code=ErrorCode.INTERNAL_ERROR,
2577
+ error_type=ErrorType.INTERNAL,
2578
+ remediation="Check logs for details",
2579
+ request_id=request_id,
2580
+ )
2581
+ )
2582
+
2583
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2584
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2585
+
2586
+ if error:
2587
+ _metrics.counter(metric_key, labels={"status": "error"})
2588
+ if "not found" in error.lower():
2589
+ return asdict(
2590
+ error_response(
2591
+ f"Specification '{spec_id}' not found",
2592
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2593
+ error_type=ErrorType.NOT_FOUND,
2594
+ remediation='Verify the spec ID via spec(action="list")',
2595
+ request_id=request_id,
2596
+ )
2597
+ )
2598
+ return asdict(
2599
+ error_response(
2600
+ f"Failed to list assumptions: {error}",
2601
+ error_code=ErrorCode.INTERNAL_ERROR,
2602
+ error_type=ErrorType.INTERNAL,
2603
+ remediation="Check that the spec exists",
2604
+ request_id=request_id,
2605
+ )
2606
+ )
2607
+
2608
+ warnings: List[str] = []
2609
+ if assumption_type:
2610
+ warnings.append(
2611
+ "assumption_type filter is advisory only; all assumptions are returned"
2612
+ )
2613
+
2614
+ _metrics.counter(metric_key, labels={"status": "success"})
2615
+ return asdict(
2616
+ success_response(
2617
+ data=result or {"spec_id": spec_id, "assumptions": [], "total_count": 0},
2618
+ warnings=warnings or None,
2619
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2620
+ request_id=request_id,
2621
+ )
2622
+ )
2623
+
2624
+
2625
+ def _handle_revision_add(*, config: ServerConfig, **payload: Any) -> dict:
2626
+ request_id = _request_id()
2627
+ action = "revision-add"
2628
+
2629
+ spec_id = payload.get("spec_id")
2630
+ if not isinstance(spec_id, str) or not spec_id.strip():
2631
+ return _validation_error(
2632
+ field="spec_id",
2633
+ action=action,
2634
+ message="Provide a non-empty spec_id parameter",
2635
+ request_id=request_id,
2636
+ code=ErrorCode.MISSING_REQUIRED,
2637
+ )
2638
+ spec_id = spec_id.strip()
2639
+
2640
+ version = payload.get("version")
2641
+ if not isinstance(version, str) or not version.strip():
2642
+ return _validation_error(
2643
+ field="version",
2644
+ action=action,
2645
+ message="Provide the revision version (e.g., 1.1)",
2646
+ request_id=request_id,
2647
+ code=ErrorCode.MISSING_REQUIRED,
2648
+ )
2649
+ version = version.strip()
2650
+
2651
+ changes = payload.get("changes")
2652
+ if not isinstance(changes, str) or not changes.strip():
2653
+ return _validation_error(
2654
+ field="changes",
2655
+ action=action,
2656
+ message="Provide a summary of changes",
2657
+ request_id=request_id,
2658
+ code=ErrorCode.MISSING_REQUIRED,
2659
+ )
2660
+ changes = changes.strip()
2661
+
2662
+ author = payload.get("author")
2663
+ if author is not None and not isinstance(author, str):
2664
+ return _validation_error(
2665
+ field="author",
2666
+ action=action,
2667
+ message="Author must be a string",
2668
+ request_id=request_id,
2669
+ )
2670
+
2671
+ dry_run = payload.get("dry_run", False)
2672
+ if not isinstance(dry_run, bool):
2673
+ return _validation_error(
2674
+ field="dry_run",
2675
+ action=action,
2676
+ message="Expected a boolean value",
2677
+ request_id=request_id,
2678
+ )
2679
+
2680
+ path = payload.get("path")
2681
+ if path is not None and not isinstance(path, str):
2682
+ return _validation_error(
2683
+ field="path",
2684
+ action=action,
2685
+ message="Workspace path must be a string",
2686
+ request_id=request_id,
2687
+ )
2688
+
2689
+ specs_dir = _resolve_specs_dir(config, path)
2690
+ if specs_dir is None:
2691
+ return _specs_directory_missing_error(request_id)
2692
+
2693
+ audit_log(
2694
+ "tool_invocation",
2695
+ tool="authoring",
2696
+ action=action,
2697
+ spec_id=spec_id,
2698
+ version=version,
2699
+ dry_run=dry_run,
2700
+ )
2701
+
2702
+ metric_key = _metric_name(action)
2703
+ if dry_run:
2704
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
2705
+ data = {
2706
+ "spec_id": spec_id,
2707
+ "version": version,
2708
+ "changes": changes,
2709
+ "dry_run": True,
2710
+ "note": "Dry run - no changes made",
2711
+ }
2712
+ if author:
2713
+ data["author"] = author
2714
+ return asdict(
2715
+ success_response(
2716
+ data=data,
2717
+ request_id=request_id,
2718
+ )
2719
+ )
2720
+
2721
+ start_time = time.perf_counter()
2722
+ try:
2723
+ result, error = add_revision(
2724
+ spec_id=spec_id,
2725
+ version=version,
2726
+ changelog=changes,
2727
+ author=author,
2728
+ specs_dir=specs_dir,
2729
+ )
2730
+ except Exception as exc: # pragma: no cover - defensive guard
2731
+ logger.exception("Unexpected error adding revision")
2732
+ _metrics.counter(metric_key, labels={"status": "error"})
2733
+ return asdict(
2734
+ error_response(
2735
+ sanitize_error_message(exc, context="authoring"),
2736
+ error_code=ErrorCode.INTERNAL_ERROR,
2737
+ error_type=ErrorType.INTERNAL,
2738
+ remediation="Check logs for details",
2739
+ request_id=request_id,
2740
+ )
2741
+ )
2742
+
2743
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2744
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2745
+
2746
+ if error:
2747
+ _metrics.counter(metric_key, labels={"status": "error"})
2748
+ if "not found" in error.lower():
2749
+ return asdict(
2750
+ error_response(
2751
+ f"Specification '{spec_id}' not found",
2752
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2753
+ error_type=ErrorType.NOT_FOUND,
2754
+ remediation='Verify the spec ID via spec(action="list")',
2755
+ request_id=request_id,
2756
+ )
2757
+ )
2758
+ return asdict(
2759
+ error_response(
2760
+ f"Failed to add revision: {error}",
2761
+ error_code=ErrorCode.INTERNAL_ERROR,
2762
+ error_type=ErrorType.INTERNAL,
2763
+ remediation="Check that the spec exists",
2764
+ request_id=request_id,
2765
+ )
2766
+ )
2767
+
2768
+ data = {
2769
+ "spec_id": spec_id,
2770
+ "version": version,
2771
+ "changes": changes,
2772
+ "dry_run": False,
2773
+ }
2774
+ if author:
2775
+ data["author"] = author
2776
+ if result and result.get("date"):
2777
+ data["date"] = result["date"]
2778
+
2779
+ _metrics.counter(metric_key, labels={"status": "success"})
2780
+ return asdict(
2781
+ success_response(
2782
+ data=data,
2783
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2784
+ request_id=request_id,
2785
+ )
2786
+ )
2787
+
2788
+
2789
+ # Validation constants for intake
2790
+ _INTAKE_TITLE_MAX_LEN = 140
2791
+ _INTAKE_DESC_MAX_LEN = 2000
2792
+ _INTAKE_TAG_MAX_LEN = 32
2793
+ _INTAKE_TAG_MAX_COUNT = 20
2794
+ _INTAKE_SOURCE_MAX_LEN = 100
2795
+ _INTAKE_REQUESTER_MAX_LEN = 100
2796
+ _INTAKE_IDEMPOTENCY_KEY_MAX_LEN = 64
2797
+ _INTAKE_PRIORITY_VALUES = ("p0", "p1", "p2", "p3", "p4")
2798
+ _INTAKE_TAG_PATTERN = "^[a-z0-9_-]+$"
2799
+ _TAG_REGEX = re.compile(_INTAKE_TAG_PATTERN)
2800
+
2801
+
2802
+ def _handle_intake_add(*, config: ServerConfig, **payload: Any) -> dict:
2803
+ """Add a new intake item to the bikelane queue."""
2804
+ request_id = _request_id()
2805
+ action = "intake-add"
2806
+
2807
+ # Check feature flag
2808
+ blocked = _intake_feature_flag_blocked(request_id)
2809
+ if blocked:
2810
+ return blocked
2811
+
2812
+ # Validate title (required, 1-140 chars)
2813
+ title = payload.get("title")
2814
+ if not isinstance(title, str) or not title.strip():
2815
+ return _validation_error(
2816
+ field="title",
2817
+ action=action,
2818
+ message="Provide a non-empty title (1-140 characters)",
2819
+ request_id=request_id,
2820
+ code=ErrorCode.MISSING_REQUIRED,
2821
+ )
2822
+ title = title.strip()
2823
+ if len(title) > _INTAKE_TITLE_MAX_LEN:
2824
+ return _validation_error(
2825
+ field="title",
2826
+ action=action,
2827
+ message=f"Title exceeds maximum length of {_INTAKE_TITLE_MAX_LEN} characters",
2828
+ request_id=request_id,
2829
+ code=ErrorCode.VALIDATION_ERROR,
2830
+ remediation=f"Shorten title to {_INTAKE_TITLE_MAX_LEN} characters or less",
2831
+ )
2832
+
2833
+ # Validate description (optional, max 2000 chars)
2834
+ description = payload.get("description")
2835
+ if description is not None:
2836
+ if not isinstance(description, str):
2837
+ return _validation_error(
2838
+ field="description",
2839
+ action=action,
2840
+ message="Description must be a string",
2841
+ request_id=request_id,
2842
+ code=ErrorCode.INVALID_FORMAT,
2843
+ )
2844
+ description = description.strip() or None
2845
+ if description and len(description) > _INTAKE_DESC_MAX_LEN:
2846
+ return _validation_error(
2847
+ field="description",
2848
+ action=action,
2849
+ message=f"Description exceeds maximum length of {_INTAKE_DESC_MAX_LEN} characters",
2850
+ request_id=request_id,
2851
+ code=ErrorCode.VALIDATION_ERROR,
2852
+ remediation=f"Shorten description to {_INTAKE_DESC_MAX_LEN} characters or less",
2853
+ )
2854
+
2855
+ # Validate priority (optional, enum p0-p4, default p2)
2856
+ priority = payload.get("priority", "p2")
2857
+ if not isinstance(priority, str):
2858
+ return _validation_error(
2859
+ field="priority",
2860
+ action=action,
2861
+ message="Priority must be a string",
2862
+ request_id=request_id,
2863
+ code=ErrorCode.INVALID_FORMAT,
2864
+ )
2865
+ priority = priority.strip().lower()
2866
+ if priority not in _INTAKE_PRIORITY_VALUES:
2867
+ return _validation_error(
2868
+ field="priority",
2869
+ action=action,
2870
+ message=f"Priority must be one of: {', '.join(_INTAKE_PRIORITY_VALUES)}",
2871
+ request_id=request_id,
2872
+ code=ErrorCode.VALIDATION_ERROR,
2873
+ remediation="Use p0 (highest) through p4 (lowest), default is p2",
2874
+ )
2875
+
2876
+ # Validate tags (optional, max 20 items, each 1-32 chars, lowercase pattern)
2877
+ tags = payload.get("tags", [])
2878
+ if tags is None:
2879
+ tags = []
2880
+ if not isinstance(tags, list):
2881
+ return _validation_error(
2882
+ field="tags",
2883
+ action=action,
2884
+ message="Tags must be a list of strings",
2885
+ request_id=request_id,
2886
+ code=ErrorCode.INVALID_FORMAT,
2887
+ )
2888
+ if len(tags) > _INTAKE_TAG_MAX_COUNT:
2889
+ return _validation_error(
2890
+ field="tags",
2891
+ action=action,
2892
+ message=f"Maximum {_INTAKE_TAG_MAX_COUNT} tags allowed",
2893
+ request_id=request_id,
2894
+ code=ErrorCode.VALIDATION_ERROR,
2895
+ )
2896
+ validated_tags = []
2897
+ for i, tag in enumerate(tags):
2898
+ if not isinstance(tag, str):
2899
+ return _validation_error(
2900
+ field=f"tags[{i}]",
2901
+ action=action,
2902
+ message="Each tag must be a string",
2903
+ request_id=request_id,
2904
+ code=ErrorCode.INVALID_FORMAT,
2905
+ )
2906
+ tag = tag.strip().lower()
2907
+ if not tag:
2908
+ continue
2909
+ if len(tag) > _INTAKE_TAG_MAX_LEN:
2910
+ return _validation_error(
2911
+ field=f"tags[{i}]",
2912
+ action=action,
2913
+ message=f"Tag exceeds maximum length of {_INTAKE_TAG_MAX_LEN} characters",
2914
+ request_id=request_id,
2915
+ code=ErrorCode.VALIDATION_ERROR,
2916
+ )
2917
+ if not _TAG_REGEX.match(tag):
2918
+ return _validation_error(
2919
+ field=f"tags[{i}]",
2920
+ action=action,
2921
+ message=f"Tag must match pattern {_INTAKE_TAG_PATTERN} (lowercase alphanumeric, hyphens, underscores)",
2922
+ request_id=request_id,
2923
+ code=ErrorCode.INVALID_FORMAT,
2924
+ )
2925
+ validated_tags.append(tag)
2926
+ tags = validated_tags
2927
+
2928
+ # Validate source (optional, max 100 chars)
2929
+ source = payload.get("source")
2930
+ if source is not None:
2931
+ if not isinstance(source, str):
2932
+ return _validation_error(
2933
+ field="source",
2934
+ action=action,
2935
+ message="Source must be a string",
2936
+ request_id=request_id,
2937
+ code=ErrorCode.INVALID_FORMAT,
2938
+ )
2939
+ source = source.strip() or None
2940
+ if source and len(source) > _INTAKE_SOURCE_MAX_LEN:
2941
+ return _validation_error(
2942
+ field="source",
2943
+ action=action,
2944
+ message=f"Source exceeds maximum length of {_INTAKE_SOURCE_MAX_LEN} characters",
2945
+ request_id=request_id,
2946
+ code=ErrorCode.VALIDATION_ERROR,
2947
+ )
2948
+
2949
+ # Validate requester (optional, max 100 chars)
2950
+ requester = payload.get("requester")
2951
+ if requester is not None:
2952
+ if not isinstance(requester, str):
2953
+ return _validation_error(
2954
+ field="requester",
2955
+ action=action,
2956
+ message="Requester must be a string",
2957
+ request_id=request_id,
2958
+ code=ErrorCode.INVALID_FORMAT,
2959
+ )
2960
+ requester = requester.strip() or None
2961
+ if requester and len(requester) > _INTAKE_REQUESTER_MAX_LEN:
2962
+ return _validation_error(
2963
+ field="requester",
2964
+ action=action,
2965
+ message=f"Requester exceeds maximum length of {_INTAKE_REQUESTER_MAX_LEN} characters",
2966
+ request_id=request_id,
2967
+ code=ErrorCode.VALIDATION_ERROR,
2968
+ )
2969
+
2970
+ # Validate idempotency_key (optional, max 64 chars)
2971
+ idempotency_key = payload.get("idempotency_key")
2972
+ if idempotency_key is not None:
2973
+ if not isinstance(idempotency_key, str):
2974
+ return _validation_error(
2975
+ field="idempotency_key",
2976
+ action=action,
2977
+ message="Idempotency key must be a string",
2978
+ request_id=request_id,
2979
+ code=ErrorCode.INVALID_FORMAT,
2980
+ )
2981
+ idempotency_key = idempotency_key.strip() or None
2982
+ if idempotency_key and len(idempotency_key) > _INTAKE_IDEMPOTENCY_KEY_MAX_LEN:
2983
+ return _validation_error(
2984
+ field="idempotency_key",
2985
+ action=action,
2986
+ message=f"Idempotency key exceeds maximum length of {_INTAKE_IDEMPOTENCY_KEY_MAX_LEN} characters",
2987
+ request_id=request_id,
2988
+ code=ErrorCode.VALIDATION_ERROR,
2989
+ )
2990
+
2991
+ # Validate dry_run
2992
+ dry_run = payload.get("dry_run", False)
2993
+ if not isinstance(dry_run, bool):
2994
+ return _validation_error(
2995
+ field="dry_run",
2996
+ action=action,
2997
+ message="dry_run must be a boolean",
2998
+ request_id=request_id,
2999
+ code=ErrorCode.INVALID_FORMAT,
3000
+ )
3001
+
3002
+ # Validate path
3003
+ path = payload.get("path")
3004
+ if path is not None and not isinstance(path, str):
3005
+ return _validation_error(
3006
+ field="path",
3007
+ action=action,
3008
+ message="path must be a string",
3009
+ request_id=request_id,
3010
+ code=ErrorCode.INVALID_FORMAT,
3011
+ )
3012
+
3013
+ # Resolve specs directory
3014
+ specs_dir = _resolve_specs_dir(config, path)
3015
+ if specs_dir is None:
3016
+ return _specs_directory_missing_error(request_id)
3017
+
3018
+ # Audit log
3019
+ audit_log(
3020
+ "tool_invocation",
3021
+ tool="authoring",
3022
+ action=action,
3023
+ title=title[:100], # Truncate for logging
3024
+ dry_run=dry_run,
3025
+ )
3026
+
3027
+ metric_key = _metric_name(action)
3028
+ start_time = time.perf_counter()
3029
+
3030
+ try:
3031
+ # Get bikelane_dir from config (allows customization via TOML or env var)
3032
+ bikelane_dir = config.get_bikelane_dir(specs_dir)
3033
+ store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
3034
+ item, was_duplicate, lock_wait_ms = store.add(
3035
+ title=title,
3036
+ description=description,
3037
+ priority=priority,
3038
+ tags=tags,
3039
+ source=source,
3040
+ requester=requester,
3041
+ idempotency_key=idempotency_key,
3042
+ dry_run=dry_run,
3043
+ )
3044
+ except LockAcquisitionError:
3045
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3046
+ _metrics.counter(metric_key, labels={"status": "error"})
3047
+ return asdict(
3048
+ error_response(
3049
+ "Failed to acquire file lock within timeout. Resource is busy.",
3050
+ error_code=ErrorCode.RESOURCE_BUSY,
3051
+ error_type=ErrorType.UNAVAILABLE,
3052
+ remediation="Retry after a moment",
3053
+ request_id=request_id,
3054
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3055
+ )
3056
+ )
3057
+ except Exception as exc:
3058
+ logger.exception("Unexpected error adding intake item")
3059
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3060
+ _metrics.counter(metric_key, labels={"status": "error"})
3061
+ return asdict(
3062
+ error_response(
3063
+ sanitize_error_message(exc, context="authoring.intake-add"),
3064
+ error_code=ErrorCode.INTERNAL_ERROR,
3065
+ error_type=ErrorType.INTERNAL,
3066
+ remediation="Check logs for details",
3067
+ request_id=request_id,
3068
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3069
+ )
3070
+ )
3071
+
3072
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3073
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
3074
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
3075
+
3076
+ data = {
3077
+ "item": item.to_dict(),
3078
+ "intake_path": store.intake_path,
3079
+ "was_duplicate": was_duplicate,
3080
+ }
3081
+
3082
+ meta_extra = {}
3083
+ if dry_run:
3084
+ meta_extra["dry_run"] = True
1061
3085
 
1062
- _metrics.counter(metric_key, labels={"status": "success"})
1063
3086
  return asdict(
1064
3087
  success_response(
1065
3088
  data=data,
1066
- warnings=warnings or None,
1067
- telemetry={"duration_ms": round(elapsed_ms, 2)},
3089
+ telemetry={"duration_ms": round(elapsed_ms, 2), "lock_wait_ms": round(lock_wait_ms, 2)},
1068
3090
  request_id=request_id,
3091
+ meta=meta_extra,
1069
3092
  )
1070
3093
  )
1071
3094
 
1072
3095
 
1073
- def _handle_assumption_list(*, config: ServerConfig, **payload: Any) -> dict:
3096
+ # Intake list constants (from intake.py)
3097
+ _INTAKE_LIST_DEFAULT_LIMIT = 50
3098
+ _INTAKE_LIST_MAX_LIMIT = 200
3099
+
3100
+
3101
+ def _handle_intake_list(*, config: ServerConfig, **payload: Any) -> dict:
3102
+ """List intake items with status='new' in FIFO order with pagination."""
1074
3103
  request_id = _request_id()
1075
- action = "assumption-list"
3104
+ action = "intake-list"
1076
3105
 
1077
- spec_id = payload.get("spec_id")
1078
- if not isinstance(spec_id, str) or not spec_id.strip():
1079
- return _validation_error(
1080
- field="spec_id",
1081
- action=action,
1082
- message="Provide a non-empty spec_id parameter",
1083
- request_id=request_id,
1084
- code=ErrorCode.MISSING_REQUIRED,
1085
- )
1086
- spec_id = spec_id.strip()
3106
+ # Check feature flag
3107
+ blocked = _intake_feature_flag_blocked(request_id)
3108
+ if blocked:
3109
+ return blocked
1087
3110
 
1088
- assumption_type = payload.get("assumption_type")
1089
- if assumption_type is not None and assumption_type not in ASSUMPTION_TYPES:
1090
- return _validation_error(
1091
- field="assumption_type",
1092
- action=action,
1093
- message=f"Must be one of: {', '.join(ASSUMPTION_TYPES)}",
1094
- request_id=request_id,
1095
- )
3111
+ # Validate limit (optional, default 50, range 1-200)
3112
+ limit = payload.get("limit", _INTAKE_LIST_DEFAULT_LIMIT)
3113
+ if limit is not None:
3114
+ if not isinstance(limit, int):
3115
+ return _validation_error(
3116
+ field="limit",
3117
+ action=action,
3118
+ message="limit must be an integer",
3119
+ request_id=request_id,
3120
+ code=ErrorCode.INVALID_FORMAT,
3121
+ )
3122
+ if limit < 1 or limit > _INTAKE_LIST_MAX_LIMIT:
3123
+ return _validation_error(
3124
+ field="limit",
3125
+ action=action,
3126
+ message=f"limit must be between 1 and {_INTAKE_LIST_MAX_LIMIT}",
3127
+ request_id=request_id,
3128
+ code=ErrorCode.VALIDATION_ERROR,
3129
+ remediation=f"Use a value between 1 and {_INTAKE_LIST_MAX_LIMIT} (default: {_INTAKE_LIST_DEFAULT_LIMIT})",
3130
+ )
3131
+
3132
+ # Validate cursor (optional string)
3133
+ cursor = payload.get("cursor")
3134
+ if cursor is not None:
3135
+ if not isinstance(cursor, str):
3136
+ return _validation_error(
3137
+ field="cursor",
3138
+ action=action,
3139
+ message="cursor must be a string",
3140
+ request_id=request_id,
3141
+ code=ErrorCode.INVALID_FORMAT,
3142
+ )
3143
+ cursor = cursor.strip() or None
1096
3144
 
3145
+ # Validate path (optional workspace override)
1097
3146
  path = payload.get("path")
1098
3147
  if path is not None and not isinstance(path, str):
1099
3148
  return _validation_error(
1100
3149
  field="path",
1101
3150
  action=action,
1102
- message="Workspace path must be a string",
3151
+ message="path must be a string",
1103
3152
  request_id=request_id,
3153
+ code=ErrorCode.INVALID_FORMAT,
1104
3154
  )
1105
3155
 
3156
+ # Resolve specs directory
1106
3157
  specs_dir = _resolve_specs_dir(config, path)
1107
3158
  if specs_dir is None:
1108
3159
  return _specs_directory_missing_error(request_id)
1109
3160
 
3161
+ # Audit log
1110
3162
  audit_log(
1111
3163
  "tool_invocation",
1112
3164
  tool="authoring",
1113
3165
  action=action,
1114
- spec_id=spec_id,
1115
- assumption_type=assumption_type,
3166
+ limit=limit,
3167
+ has_cursor=cursor is not None,
1116
3168
  )
1117
3169
 
1118
3170
  metric_key = _metric_name(action)
1119
3171
  start_time = time.perf_counter()
3172
+
1120
3173
  try:
1121
- result, error = list_assumptions(
1122
- spec_id=spec_id,
1123
- assumption_type=assumption_type,
1124
- specs_dir=specs_dir,
1125
- )
1126
- except Exception as exc: # pragma: no cover - defensive guard
1127
- logger.exception("Unexpected error listing assumptions")
3174
+ # Get bikelane_dir from config (allows customization via TOML or env var)
3175
+ bikelane_dir = config.get_bikelane_dir(specs_dir)
3176
+ store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
3177
+ items, total_count, next_cursor, has_more, lock_wait_ms = store.list_new(
3178
+ cursor=cursor,
3179
+ limit=limit,
3180
+ )
3181
+ except LockAcquisitionError:
3182
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
1128
3183
  _metrics.counter(metric_key, labels={"status": "error"})
1129
3184
  return asdict(
1130
3185
  error_response(
1131
- sanitize_error_message(exc, context="authoring"),
1132
- error_code=ErrorCode.INTERNAL_ERROR,
1133
- error_type=ErrorType.INTERNAL,
1134
- remediation="Check logs for details",
3186
+ "Failed to acquire file lock within timeout. Resource is busy.",
3187
+ error_code=ErrorCode.RESOURCE_BUSY,
3188
+ error_type=ErrorType.UNAVAILABLE,
3189
+ remediation="Retry after a moment",
1135
3190
  request_id=request_id,
3191
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1136
3192
  )
1137
3193
  )
1138
-
1139
- elapsed_ms = (time.perf_counter() - start_time) * 1000
1140
- _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
1141
-
1142
- if error:
3194
+ except Exception as exc:
3195
+ logger.exception("Unexpected error listing intake items")
3196
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
1143
3197
  _metrics.counter(metric_key, labels={"status": "error"})
1144
- if "not found" in error.lower():
1145
- return asdict(
1146
- error_response(
1147
- f"Specification '{spec_id}' not found",
1148
- error_code=ErrorCode.SPEC_NOT_FOUND,
1149
- error_type=ErrorType.NOT_FOUND,
1150
- remediation='Verify the spec ID via spec(action="list")',
1151
- request_id=request_id,
1152
- )
1153
- )
1154
3198
  return asdict(
1155
3199
  error_response(
1156
- f"Failed to list assumptions: {error}",
3200
+ sanitize_error_message(exc, context="authoring.intake-list"),
1157
3201
  error_code=ErrorCode.INTERNAL_ERROR,
1158
3202
  error_type=ErrorType.INTERNAL,
1159
- remediation="Check that the spec exists",
3203
+ remediation="Check logs for details",
1160
3204
  request_id=request_id,
3205
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1161
3206
  )
1162
3207
  )
1163
3208
 
1164
- warnings: List[str] = []
1165
- if assumption_type:
1166
- warnings.append(
1167
- "assumption_type filter is advisory only; all assumptions are returned"
1168
- )
1169
-
3209
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3210
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
1170
3211
  _metrics.counter(metric_key, labels={"status": "success"})
3212
+
3213
+ data = {
3214
+ "items": [item.to_dict() for item in items],
3215
+ "total_count": total_count,
3216
+ "intake_path": store.intake_path,
3217
+ }
3218
+
3219
+ # Build pagination metadata
3220
+ pagination = None
3221
+ if has_more or cursor is not None:
3222
+ pagination = {
3223
+ "cursor": next_cursor,
3224
+ "has_more": has_more,
3225
+ "page_size": limit,
3226
+ }
3227
+
1171
3228
  return asdict(
1172
3229
  success_response(
1173
- data=result or {"spec_id": spec_id, "assumptions": [], "total_count": 0},
1174
- warnings=warnings or None,
1175
- telemetry={"duration_ms": round(elapsed_ms, 2)},
3230
+ data=data,
3231
+ pagination=pagination,
3232
+ telemetry={
3233
+ "duration_ms": round(elapsed_ms, 2),
3234
+ "lock_wait_ms": round(lock_wait_ms, 2),
3235
+ },
1176
3236
  request_id=request_id,
1177
3237
  )
1178
3238
  )
1179
3239
 
1180
3240
 
1181
- def _handle_revision_add(*, config: ServerConfig, **payload: Any) -> dict:
3241
+ # Intake dismiss constants
3242
+ _INTAKE_DISMISS_REASON_MAX_LEN = 200
3243
+
3244
+
3245
+ def _handle_intake_dismiss(*, config: ServerConfig, **payload: Any) -> dict:
3246
+ """Dismiss an intake item by changing its status to 'dismissed'."""
1182
3247
  request_id = _request_id()
1183
- action = "revision-add"
3248
+ action = "intake-dismiss"
1184
3249
 
1185
- spec_id = payload.get("spec_id")
1186
- if not isinstance(spec_id, str) or not spec_id.strip():
1187
- return _validation_error(
1188
- field="spec_id",
1189
- action=action,
1190
- message="Provide a non-empty spec_id parameter",
1191
- request_id=request_id,
1192
- code=ErrorCode.MISSING_REQUIRED,
1193
- )
1194
- spec_id = spec_id.strip()
3250
+ # Check feature flag
3251
+ blocked = _intake_feature_flag_blocked(request_id)
3252
+ if blocked:
3253
+ return blocked
1195
3254
 
1196
- version = payload.get("version")
1197
- if not isinstance(version, str) or not version.strip():
3255
+ # Validate intake_id (required, must match pattern)
3256
+ intake_id = payload.get("intake_id")
3257
+ if not isinstance(intake_id, str) or not intake_id.strip():
1198
3258
  return _validation_error(
1199
- field="version",
3259
+ field="intake_id",
1200
3260
  action=action,
1201
- message="Provide the revision version (e.g., 1.1)",
3261
+ message="Provide a valid intake_id",
1202
3262
  request_id=request_id,
1203
3263
  code=ErrorCode.MISSING_REQUIRED,
1204
3264
  )
1205
- version = version.strip()
1206
-
1207
- changes = payload.get("changes")
1208
- if not isinstance(changes, str) or not changes.strip():
3265
+ intake_id = intake_id.strip()
3266
+ if not INTAKE_ID_PATTERN.match(intake_id):
1209
3267
  return _validation_error(
1210
- field="changes",
3268
+ field="intake_id",
1211
3269
  action=action,
1212
- message="Provide a summary of changes",
3270
+ message="intake_id must match pattern intake-<uuid>",
1213
3271
  request_id=request_id,
1214
- code=ErrorCode.MISSING_REQUIRED,
3272
+ code=ErrorCode.INVALID_FORMAT,
3273
+ remediation="Use format: intake-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
1215
3274
  )
1216
- changes = changes.strip()
1217
3275
 
1218
- author = payload.get("author")
1219
- if author is not None and not isinstance(author, str):
1220
- return _validation_error(
1221
- field="author",
1222
- action=action,
1223
- message="Author must be a string",
1224
- request_id=request_id,
1225
- )
3276
+ # Validate reason (optional, max 200 chars)
3277
+ reason = payload.get("reason")
3278
+ if reason is not None:
3279
+ if not isinstance(reason, str):
3280
+ return _validation_error(
3281
+ field="reason",
3282
+ action=action,
3283
+ message="reason must be a string",
3284
+ request_id=request_id,
3285
+ code=ErrorCode.INVALID_FORMAT,
3286
+ )
3287
+ reason = reason.strip() or None
3288
+ if reason and len(reason) > _INTAKE_DISMISS_REASON_MAX_LEN:
3289
+ return _validation_error(
3290
+ field="reason",
3291
+ action=action,
3292
+ message=f"reason exceeds maximum length of {_INTAKE_DISMISS_REASON_MAX_LEN} characters",
3293
+ request_id=request_id,
3294
+ code=ErrorCode.VALIDATION_ERROR,
3295
+ remediation=f"Shorten reason to {_INTAKE_DISMISS_REASON_MAX_LEN} characters or less",
3296
+ )
1226
3297
 
3298
+ # Validate dry_run
1227
3299
  dry_run = payload.get("dry_run", False)
1228
3300
  if not isinstance(dry_run, bool):
1229
3301
  return _validation_error(
1230
3302
  field="dry_run",
1231
3303
  action=action,
1232
- message="Expected a boolean value",
3304
+ message="dry_run must be a boolean",
1233
3305
  request_id=request_id,
3306
+ code=ErrorCode.INVALID_FORMAT,
1234
3307
  )
1235
3308
 
3309
+ # Validate path
1236
3310
  path = payload.get("path")
1237
3311
  if path is not None and not isinstance(path, str):
1238
3312
  return _validation_error(
1239
3313
  field="path",
1240
3314
  action=action,
1241
- message="Workspace path must be a string",
3315
+ message="path must be a string",
1242
3316
  request_id=request_id,
3317
+ code=ErrorCode.INVALID_FORMAT,
1243
3318
  )
1244
3319
 
3320
+ # Resolve specs directory
1245
3321
  specs_dir = _resolve_specs_dir(config, path)
1246
3322
  if specs_dir is None:
1247
3323
  return _specs_directory_missing_error(request_id)
1248
3324
 
3325
+ # Audit log
1249
3326
  audit_log(
1250
3327
  "tool_invocation",
1251
3328
  tool="authoring",
1252
3329
  action=action,
1253
- spec_id=spec_id,
1254
- version=version,
3330
+ intake_id=intake_id,
1255
3331
  dry_run=dry_run,
1256
3332
  )
1257
3333
 
1258
3334
  metric_key = _metric_name(action)
1259
- if dry_run:
1260
- _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
1261
- data = {
1262
- "spec_id": spec_id,
1263
- "version": version,
1264
- "changes": changes,
1265
- "dry_run": True,
1266
- "note": "Dry run - no changes made",
1267
- }
1268
- if author:
1269
- data["author"] = author
3335
+ start_time = time.perf_counter()
3336
+
3337
+ try:
3338
+ # Get bikelane_dir from config (allows customization via TOML or env var)
3339
+ bikelane_dir = config.get_bikelane_dir(specs_dir)
3340
+ store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
3341
+ item, lock_wait_ms = store.dismiss(
3342
+ intake_id=intake_id,
3343
+ reason=reason,
3344
+ dry_run=dry_run,
3345
+ )
3346
+ except LockAcquisitionError:
3347
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3348
+ _metrics.counter(metric_key, labels={"status": "error"})
1270
3349
  return asdict(
1271
- success_response(
1272
- data=data,
3350
+ error_response(
3351
+ "Failed to acquire file lock within timeout. Resource is busy.",
3352
+ error_code=ErrorCode.RESOURCE_BUSY,
3353
+ error_type=ErrorType.UNAVAILABLE,
3354
+ remediation="Retry after a moment",
1273
3355
  request_id=request_id,
3356
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1274
3357
  )
1275
3358
  )
1276
-
1277
- start_time = time.perf_counter()
1278
- try:
1279
- result, error = add_revision(
1280
- spec_id=spec_id,
1281
- version=version,
1282
- changelog=changes,
1283
- author=author,
1284
- specs_dir=specs_dir,
1285
- )
1286
- except Exception as exc: # pragma: no cover - defensive guard
1287
- logger.exception("Unexpected error adding revision")
3359
+ except Exception as exc:
3360
+ logger.exception("Unexpected error dismissing intake item")
3361
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
1288
3362
  _metrics.counter(metric_key, labels={"status": "error"})
1289
3363
  return asdict(
1290
3364
  error_response(
1291
- sanitize_error_message(exc, context="authoring"),
3365
+ sanitize_error_message(exc, context="authoring.intake-dismiss"),
1292
3366
  error_code=ErrorCode.INTERNAL_ERROR,
1293
3367
  error_type=ErrorType.INTERNAL,
1294
3368
  remediation="Check logs for details",
1295
3369
  request_id=request_id,
3370
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1296
3371
  )
1297
3372
  )
1298
3373
 
1299
3374
  elapsed_ms = (time.perf_counter() - start_time) * 1000
1300
- _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
1301
3375
 
1302
- if error:
1303
- _metrics.counter(metric_key, labels={"status": "error"})
1304
- if "not found" in error.lower():
1305
- return asdict(
1306
- error_response(
1307
- f"Specification '{spec_id}' not found",
1308
- error_code=ErrorCode.SPEC_NOT_FOUND,
1309
- error_type=ErrorType.NOT_FOUND,
1310
- remediation='Verify the spec ID via spec(action="list")',
1311
- request_id=request_id,
1312
- )
1313
- )
3376
+ # Handle not found case
3377
+ if item is None:
3378
+ _metrics.counter(metric_key, labels={"status": "not_found"})
1314
3379
  return asdict(
1315
3380
  error_response(
1316
- f"Failed to add revision: {error}",
1317
- error_code=ErrorCode.INTERNAL_ERROR,
1318
- error_type=ErrorType.INTERNAL,
1319
- remediation="Check that the spec exists",
3381
+ f"Intake item not found: {intake_id}",
3382
+ error_code=ErrorCode.NOT_FOUND,
3383
+ error_type=ErrorType.NOT_FOUND,
3384
+ remediation="Verify the intake_id exists using intake-list action",
1320
3385
  request_id=request_id,
3386
+ telemetry={"duration_ms": round(elapsed_ms, 2), "lock_wait_ms": round(lock_wait_ms, 2)},
1321
3387
  )
1322
3388
  )
1323
3389
 
3390
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
3391
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
3392
+
1324
3393
  data = {
1325
- "spec_id": spec_id,
1326
- "version": version,
1327
- "changes": changes,
1328
- "dry_run": False,
3394
+ "item": item.to_dict(),
3395
+ "intake_path": store.intake_path,
1329
3396
  }
1330
- if author:
1331
- data["author"] = author
1332
- if result and result.get("date"):
1333
- data["date"] = result["date"]
1334
3397
 
1335
- _metrics.counter(metric_key, labels={"status": "success"})
3398
+ meta_extra = {}
3399
+ if dry_run:
3400
+ meta_extra["dry_run"] = True
3401
+
1336
3402
  return asdict(
1337
3403
  success_response(
1338
3404
  data=data,
1339
- telemetry={"duration_ms": round(elapsed_ms, 2)},
3405
+ telemetry={
3406
+ "duration_ms": round(elapsed_ms, 2),
3407
+ "lock_wait_ms": round(lock_wait_ms, 2),
3408
+ },
1340
3409
  request_id=request_id,
3410
+ meta=meta_extra,
1341
3411
  )
1342
3412
  )
1343
3413
 
@@ -1363,12 +3433,48 @@ _AUTHORING_ROUTER = ActionRouter(
1363
3433
  summary=_ACTION_SUMMARY["spec-update-frontmatter"],
1364
3434
  aliases=("spec_update_frontmatter",),
1365
3435
  ),
3436
+ ActionDefinition(
3437
+ name="spec-find-replace",
3438
+ handler=_handle_spec_find_replace,
3439
+ summary=_ACTION_SUMMARY["spec-find-replace"],
3440
+ aliases=("spec_find_replace",),
3441
+ ),
3442
+ ActionDefinition(
3443
+ name="spec-rollback",
3444
+ handler=_handle_spec_rollback,
3445
+ summary=_ACTION_SUMMARY["spec-rollback"],
3446
+ aliases=("spec_rollback",),
3447
+ ),
1366
3448
  ActionDefinition(
1367
3449
  name="phase-add",
1368
3450
  handler=_handle_phase_add,
1369
3451
  summary=_ACTION_SUMMARY["phase-add"],
1370
3452
  aliases=("phase_add",),
1371
3453
  ),
3454
+ ActionDefinition(
3455
+ name="phase-add-bulk",
3456
+ handler=_handle_phase_add_bulk,
3457
+ summary=_ACTION_SUMMARY["phase-add-bulk"],
3458
+ aliases=("phase_add_bulk",),
3459
+ ),
3460
+ ActionDefinition(
3461
+ name="phase-template",
3462
+ handler=_handle_phase_template,
3463
+ summary=_ACTION_SUMMARY["phase-template"],
3464
+ aliases=("phase_template",),
3465
+ ),
3466
+ ActionDefinition(
3467
+ name="phase-move",
3468
+ handler=_handle_phase_move,
3469
+ summary=_ACTION_SUMMARY["phase-move"],
3470
+ aliases=("phase_move",),
3471
+ ),
3472
+ ActionDefinition(
3473
+ name="phase-update-metadata",
3474
+ handler=_handle_phase_update_metadata,
3475
+ summary=_ACTION_SUMMARY["phase-update-metadata"],
3476
+ aliases=("phase_update_metadata",),
3477
+ ),
1372
3478
  ActionDefinition(
1373
3479
  name="phase-remove",
1374
3480
  handler=_handle_phase_remove,
@@ -1393,6 +3499,24 @@ _AUTHORING_ROUTER = ActionRouter(
1393
3499
  summary=_ACTION_SUMMARY["revision-add"],
1394
3500
  aliases=("revision_add",),
1395
3501
  ),
3502
+ ActionDefinition(
3503
+ name="intake-add",
3504
+ handler=_handle_intake_add,
3505
+ summary=_ACTION_SUMMARY["intake-add"],
3506
+ aliases=("intake_add",),
3507
+ ),
3508
+ ActionDefinition(
3509
+ name="intake-list",
3510
+ handler=_handle_intake_list,
3511
+ summary=_ACTION_SUMMARY["intake-list"],
3512
+ aliases=("intake_list",),
3513
+ ),
3514
+ ActionDefinition(
3515
+ name="intake-dismiss",
3516
+ handler=_handle_intake_dismiss,
3517
+ summary=_ACTION_SUMMARY["intake-dismiss"],
3518
+ aliases=("intake_dismiss",),
3519
+ ),
1396
3520
  ],
1397
3521
  )
1398
3522
 
@@ -1430,6 +3554,7 @@ def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
1430
3554
  name: Optional[str] = None,
1431
3555
  template: Optional[str] = None,
1432
3556
  category: Optional[str] = None,
3557
+ mission: Optional[str] = None,
1433
3558
  template_action: Optional[str] = None,
1434
3559
  template_name: Optional[str] = None,
1435
3560
  key: Optional[str] = None,
@@ -1447,8 +3572,23 @@ def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
1447
3572
  author: Optional[str] = None,
1448
3573
  version: Optional[str] = None,
1449
3574
  changes: Optional[str] = None,
3575
+ tasks: Optional[List[Dict[str, Any]]] = None,
3576
+ phase: Optional[Dict[str, Any]] = None,
3577
+ metadata_defaults: Optional[Dict[str, Any]] = None,
1450
3578
  dry_run: bool = False,
1451
3579
  path: Optional[str] = None,
3580
+ # spec-find-replace parameters
3581
+ find: Optional[str] = None,
3582
+ replace: Optional[str] = None,
3583
+ scope: Optional[str] = None,
3584
+ use_regex: bool = False,
3585
+ case_sensitive: bool = True,
3586
+ # intake parameters
3587
+ priority: Optional[str] = None,
3588
+ tags: Optional[List[str]] = None,
3589
+ source: Optional[str] = None,
3590
+ requester: Optional[str] = None,
3591
+ idempotency_key: Optional[str] = None,
1452
3592
  ) -> dict:
1453
3593
  """Execute authoring workflows via the action router."""
1454
3594
 
@@ -1457,6 +3597,7 @@ def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
1457
3597
  "name": name,
1458
3598
  "template": template,
1459
3599
  "category": category,
3600
+ "mission": mission,
1460
3601
  "template_action": template_action,
1461
3602
  "template_name": template_name,
1462
3603
  "key": key,
@@ -1474,8 +3615,23 @@ def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
1474
3615
  "author": author,
1475
3616
  "version": version,
1476
3617
  "changes": changes,
3618
+ "tasks": tasks,
3619
+ "phase": phase,
3620
+ "metadata_defaults": metadata_defaults,
1477
3621
  "dry_run": dry_run,
1478
3622
  "path": path,
3623
+ # spec-find-replace parameters
3624
+ "find": find,
3625
+ "replace": replace,
3626
+ "scope": scope,
3627
+ "use_regex": use_regex,
3628
+ "case_sensitive": case_sensitive,
3629
+ # intake parameters
3630
+ "priority": priority,
3631
+ "tags": tags,
3632
+ "source": source,
3633
+ "requester": requester,
3634
+ "idempotency_key": idempotency_key,
1479
3635
  }
1480
3636
  return _dispatch_authoring_action(action=action, payload=payload, config=config)
1481
3637