foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/__init__.py +0 -13
  3. foundry_mcp/cli/commands/plan.py +10 -3
  4. foundry_mcp/cli/commands/review.py +19 -4
  5. foundry_mcp/cli/commands/session.py +1 -8
  6. foundry_mcp/cli/commands/specs.py +38 -208
  7. foundry_mcp/cli/context.py +39 -0
  8. foundry_mcp/cli/output.py +3 -3
  9. foundry_mcp/config.py +615 -11
  10. foundry_mcp/core/ai_consultation.py +146 -9
  11. foundry_mcp/core/batch_operations.py +1196 -0
  12. foundry_mcp/core/discovery.py +7 -7
  13. foundry_mcp/core/error_store.py +2 -2
  14. foundry_mcp/core/intake.py +933 -0
  15. foundry_mcp/core/llm_config.py +28 -2
  16. foundry_mcp/core/metrics_store.py +2 -2
  17. foundry_mcp/core/naming.py +25 -2
  18. foundry_mcp/core/progress.py +70 -0
  19. foundry_mcp/core/prometheus.py +0 -13
  20. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  21. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  22. foundry_mcp/core/prompts/plan_review.py +5 -1
  23. foundry_mcp/core/providers/__init__.py +12 -0
  24. foundry_mcp/core/providers/base.py +39 -0
  25. foundry_mcp/core/providers/claude.py +51 -48
  26. foundry_mcp/core/providers/codex.py +70 -60
  27. foundry_mcp/core/providers/cursor_agent.py +25 -47
  28. foundry_mcp/core/providers/detectors.py +34 -7
  29. foundry_mcp/core/providers/gemini.py +69 -58
  30. foundry_mcp/core/providers/opencode.py +101 -47
  31. foundry_mcp/core/providers/package-lock.json +4 -4
  32. foundry_mcp/core/providers/package.json +1 -1
  33. foundry_mcp/core/providers/validation.py +128 -0
  34. foundry_mcp/core/research/__init__.py +68 -0
  35. foundry_mcp/core/research/memory.py +528 -0
  36. foundry_mcp/core/research/models.py +1220 -0
  37. foundry_mcp/core/research/providers/__init__.py +40 -0
  38. foundry_mcp/core/research/providers/base.py +242 -0
  39. foundry_mcp/core/research/providers/google.py +507 -0
  40. foundry_mcp/core/research/providers/perplexity.py +442 -0
  41. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  42. foundry_mcp/core/research/providers/tavily.py +383 -0
  43. foundry_mcp/core/research/workflows/__init__.py +25 -0
  44. foundry_mcp/core/research/workflows/base.py +298 -0
  45. foundry_mcp/core/research/workflows/chat.py +271 -0
  46. foundry_mcp/core/research/workflows/consensus.py +539 -0
  47. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  48. foundry_mcp/core/research/workflows/ideate.py +682 -0
  49. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  50. foundry_mcp/core/responses.py +690 -0
  51. foundry_mcp/core/spec.py +2439 -236
  52. foundry_mcp/core/task.py +1205 -31
  53. foundry_mcp/core/testing.py +512 -123
  54. foundry_mcp/core/validation.py +319 -43
  55. foundry_mcp/dashboard/components/charts.py +0 -57
  56. foundry_mcp/dashboard/launcher.py +11 -0
  57. foundry_mcp/dashboard/views/metrics.py +25 -35
  58. foundry_mcp/dashboard/views/overview.py +1 -65
  59. foundry_mcp/resources/specs.py +25 -25
  60. foundry_mcp/schemas/intake-schema.json +89 -0
  61. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  62. foundry_mcp/server.py +0 -14
  63. foundry_mcp/tools/unified/__init__.py +39 -18
  64. foundry_mcp/tools/unified/authoring.py +2371 -248
  65. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  66. foundry_mcp/tools/unified/environment.py +434 -32
  67. foundry_mcp/tools/unified/error.py +18 -1
  68. foundry_mcp/tools/unified/lifecycle.py +8 -0
  69. foundry_mcp/tools/unified/plan.py +133 -2
  70. foundry_mcp/tools/unified/provider.py +0 -40
  71. foundry_mcp/tools/unified/research.py +1283 -0
  72. foundry_mcp/tools/unified/review.py +374 -17
  73. foundry_mcp/tools/unified/review_helpers.py +16 -1
  74. foundry_mcp/tools/unified/server.py +9 -24
  75. foundry_mcp/tools/unified/spec.py +367 -0
  76. foundry_mcp/tools/unified/task.py +1664 -30
  77. foundry_mcp/tools/unified/test.py +69 -8
  78. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
  79. foundry_mcp-0.8.10.dist-info/RECORD +153 -0
  80. foundry_mcp/cli/flags.py +0 -266
  81. foundry_mcp/core/feature_flags.py +0 -592
  82. foundry_mcp-0.3.3.dist-info/RECORD +0 -135
  83. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  84. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  85. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -3,6 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import logging
6
+ import re
6
7
  import time
7
8
  from dataclasses import asdict
8
9
  from pathlib import Path
@@ -12,6 +13,7 @@ from mcp.server.fastmcp import FastMCP
12
13
 
13
14
  from foundry_mcp.config import ServerConfig
14
15
  from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
16
+ from foundry_mcp.core.intake import IntakeStore, LockAcquisitionError, INTAKE_ID_PATTERN
15
17
  from foundry_mcp.core.naming import canonical_tool
16
18
  from foundry_mcp.core.observability import audit_log, get_metrics, mcp_tool
17
19
  from foundry_mcp.core.responses import (
@@ -24,17 +26,27 @@ from foundry_mcp.core.responses import (
24
26
  from foundry_mcp.core.spec import (
25
27
  ASSUMPTION_TYPES,
26
28
  CATEGORIES,
29
+ PHASE_TEMPLATES,
27
30
  TEMPLATES,
28
31
  add_assumption,
29
32
  add_phase,
33
+ add_phase_bulk,
30
34
  add_revision,
35
+ apply_phase_template,
31
36
  create_spec,
37
+ find_replace_in_spec,
32
38
  find_specs_directory,
39
+ generate_spec_data,
40
+ get_phase_template_structure,
33
41
  list_assumptions,
34
42
  load_spec,
43
+ move_phase,
35
44
  remove_phase,
45
+ rollback_spec,
36
46
  update_frontmatter,
47
+ update_phase_metadata,
37
48
  )
49
+ from foundry_mcp.core.validation import validate_spec
38
50
  from foundry_mcp.tools.unified.router import (
39
51
  ActionDefinition,
40
52
  ActionRouter,
@@ -48,11 +60,20 @@ _ACTION_SUMMARY = {
48
60
  "spec-create": "Scaffold a new SDD specification",
49
61
  "spec-template": "List/show/apply spec templates",
50
62
  "spec-update-frontmatter": "Update a top-level metadata field",
63
+ "spec-find-replace": "Find and replace text across spec titles and descriptions",
64
+ "spec-rollback": "Restore a spec from a backup timestamp",
51
65
  "phase-add": "Add a new phase under spec-root with verification scaffolding",
66
+ "phase-add-bulk": "Add a phase with pre-defined tasks in a single atomic operation",
67
+ "phase-template": "List/show/apply phase templates to add pre-configured phases",
68
+ "phase-move": "Reorder a phase within spec-root children",
69
+ "phase-update-metadata": "Update metadata fields of an existing phase",
52
70
  "phase-remove": "Remove an existing phase (and optionally dependents)",
53
71
  "assumption-add": "Append an assumption entry to spec metadata",
54
72
  "assumption-list": "List recorded assumptions for a spec",
55
73
  "revision-add": "Record a revision entry in the spec history",
74
+ "intake-add": "Capture a new work idea in the bikelane intake queue",
75
+ "intake-list": "List new intake items awaiting triage in FIFO order",
76
+ "intake-dismiss": "Dismiss an intake item from the triage queue",
56
77
  }
57
78
 
58
79
 
@@ -159,7 +180,7 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
159
180
  code=ErrorCode.MISSING_REQUIRED,
160
181
  )
161
182
 
162
- template = payload.get("template") or "medium"
183
+ template = payload.get("template") or "empty"
163
184
  if not isinstance(template, str):
164
185
  return _validation_error(
165
186
  field="template",
@@ -168,14 +189,14 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
168
189
  request_id=request_id,
169
190
  code=ErrorCode.INVALID_FORMAT,
170
191
  )
171
- template = template.strip() or "medium"
192
+ template = template.strip() or "empty"
172
193
  if template not in TEMPLATES:
173
194
  return _validation_error(
174
195
  field="template",
175
196
  action=action,
176
- message=f"Template must be one of: {', '.join(TEMPLATES)}",
197
+ message=f"Only 'empty' template is supported. Use phase templates to add structure.",
177
198
  request_id=request_id,
178
- remediation=f"Use one of: {', '.join(TEMPLATES)}",
199
+ remediation="Use template='empty' and add phases via phase-add-bulk or phase-template apply",
179
200
  )
180
201
 
181
202
  category = payload.get("category") or "implementation"
@@ -197,6 +218,16 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
197
218
  remediation=f"Use one of: {', '.join(CATEGORIES)}",
198
219
  )
199
220
 
221
+ mission = payload.get("mission")
222
+ if mission is not None and not isinstance(mission, str):
223
+ return _validation_error(
224
+ field="mission",
225
+ action=action,
226
+ message="mission must be a string",
227
+ request_id=request_id,
228
+ code=ErrorCode.INVALID_FORMAT,
229
+ )
230
+
200
231
  dry_run = payload.get("dry_run", False)
201
232
  if dry_run is not None and not isinstance(dry_run, bool):
202
233
  return _validation_error(
@@ -222,14 +253,49 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
222
253
  return _specs_directory_missing_error(request_id)
223
254
 
224
255
  if dry_run:
256
+ # Generate spec data for preflight validation
257
+ spec_data, gen_error = generate_spec_data(
258
+ name=name.strip(),
259
+ template=template,
260
+ category=category,
261
+ mission=mission,
262
+ )
263
+ if gen_error:
264
+ return _validation_error(
265
+ field="spec",
266
+ action=action,
267
+ message=gen_error,
268
+ request_id=request_id,
269
+ code=ErrorCode.VALIDATION_ERROR,
270
+ )
271
+
272
+ # Run full validation on generated spec
273
+ validation_result = validate_spec(spec_data)
274
+ diagnostics = [
275
+ {
276
+ "code": d.code,
277
+ "message": d.message,
278
+ "severity": d.severity,
279
+ "location": d.location,
280
+ "suggested_fix": d.suggested_fix,
281
+ }
282
+ for d in validation_result.diagnostics
283
+ ]
284
+
225
285
  return asdict(
226
286
  success_response(
227
287
  data={
228
288
  "name": name.strip(),
289
+ "spec_id": spec_data["spec_id"],
229
290
  "template": template,
230
291
  "category": category,
292
+ "mission": mission.strip() if isinstance(mission, str) else None,
231
293
  "dry_run": True,
232
- "note": "Dry run - no changes made",
294
+ "is_valid": validation_result.is_valid,
295
+ "error_count": validation_result.error_count,
296
+ "warning_count": validation_result.warning_count,
297
+ "diagnostics": diagnostics,
298
+ "note": "Preflight validation complete - no changes made",
233
299
  },
234
300
  request_id=request_id,
235
301
  )
@@ -249,6 +315,7 @@ def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
249
315
  name=name.strip(),
250
316
  template=template,
251
317
  category=category,
318
+ mission=mission,
252
319
  specs_dir=specs_dir,
253
320
  )
254
321
  elapsed_ms = (time.perf_counter() - start_time) * 1000
@@ -349,38 +416,34 @@ def _handle_spec_template(*, config: ServerConfig, **payload: Any) -> dict:
349
416
  if template_action == "list":
350
417
  data["templates"] = [
351
418
  {
352
- "name": "simple",
353
- "description": "Minimal spec with 1 phase and basic tasks",
354
- },
355
- {
356
- "name": "medium",
357
- "description": "Standard spec with 2-3 phases (default)",
358
- },
359
- {
360
- "name": "complex",
361
- "description": "Multi-phase spec with groups and subtasks",
362
- },
363
- {
364
- "name": "security",
365
- "description": "Security-focused spec with audit tasks",
419
+ "name": "empty",
420
+ "description": "Blank spec with no phases - use phase templates to add structure",
366
421
  },
367
422
  ]
368
- data["total_count"] = len(data["templates"])
423
+ data["phase_templates"] = [
424
+ {"name": t, "description": f"Add {t} phase structure"}
425
+ for t in PHASE_TEMPLATES
426
+ ]
427
+ data["total_count"] = 1
428
+ data["message"] = "Use 'empty' template, then add phases via phase-add-bulk or phase-template apply"
369
429
  elif template_action == "show":
370
430
  data["template_name"] = template_name
371
431
  data["content"] = {
372
432
  "name": template_name,
373
- "description": f"Template structure for '{template_name}' specs",
374
- "usage": f"Use authoring(action='spec-create', template='{template_name}') to create a spec",
433
+ "description": "Blank spec with no phases",
434
+ "usage": "Use authoring(action='spec-create', name='your-spec') to create, then add phases",
435
+ "phase_templates": list(PHASE_TEMPLATES),
375
436
  }
376
437
  else:
377
438
  data["template_name"] = template_name
378
439
  data["generated"] = {
379
440
  "template": template_name,
380
- "message": f"Use authoring(action='spec-create', template='{template_name}') to create a new spec",
441
+ "message": "Use spec-create to create an empty spec, then add phases",
381
442
  }
382
443
  data["instructions"] = (
383
- f"Call authoring(action='spec-create', name='your-spec-name', template='{template_name}')"
444
+ "1. Create spec: authoring(action='spec-create', name='your-spec-name')\n"
445
+ "2. Add phases: authoring(action='phase-template', template_action='apply', "
446
+ "template_name='planning', spec_id='...')"
384
447
  )
385
448
 
386
449
  return asdict(success_response(data=data, request_id=request_id))
@@ -515,6 +578,324 @@ def _handle_spec_update_frontmatter(*, config: ServerConfig, **payload: Any) ->
515
578
  )
516
579
 
517
580
 
581
+ # Valid scopes for find-replace
582
+ _FIND_REPLACE_SCOPES = {"all", "titles", "descriptions"}
583
+
584
+
585
+ def _handle_spec_find_replace(*, config: ServerConfig, **payload: Any) -> dict:
586
+ """Find and replace text across spec hierarchy nodes.
587
+
588
+ Supports literal or regex find/replace across titles and/or descriptions.
589
+ Returns a preview in dry_run mode, or applies changes and returns a summary.
590
+ """
591
+ request_id = _request_id()
592
+ action = "spec-find-replace"
593
+
594
+ # Required: spec_id
595
+ spec_id = payload.get("spec_id")
596
+ if not isinstance(spec_id, str) or not spec_id.strip():
597
+ return _validation_error(
598
+ field="spec_id",
599
+ action=action,
600
+ message="Provide a non-empty spec_id parameter",
601
+ request_id=request_id,
602
+ code=ErrorCode.MISSING_REQUIRED,
603
+ remediation="Pass the spec identifier to authoring",
604
+ )
605
+ spec_id = spec_id.strip()
606
+
607
+ # Required: find
608
+ find = payload.get("find")
609
+ if not isinstance(find, str) or not find:
610
+ return _validation_error(
611
+ field="find",
612
+ action=action,
613
+ message="Provide a non-empty find pattern",
614
+ request_id=request_id,
615
+ code=ErrorCode.MISSING_REQUIRED,
616
+ remediation="Specify the text or regex pattern to find",
617
+ )
618
+
619
+ # Required: replace (can be empty string to delete matches)
620
+ replace = payload.get("replace")
621
+ if replace is None:
622
+ return _validation_error(
623
+ field="replace",
624
+ action=action,
625
+ message="Provide a replace value (use empty string to delete matches)",
626
+ request_id=request_id,
627
+ code=ErrorCode.MISSING_REQUIRED,
628
+ remediation="Provide a replacement string (use empty string to delete)",
629
+ )
630
+ if not isinstance(replace, str):
631
+ return _validation_error(
632
+ field="replace",
633
+ action=action,
634
+ message="replace must be a string",
635
+ request_id=request_id,
636
+ code=ErrorCode.INVALID_FORMAT,
637
+ remediation="Provide a string value for replace parameter",
638
+ )
639
+
640
+ # Optional: scope (default: "all")
641
+ scope = payload.get("scope", "all")
642
+ if not isinstance(scope, str) or scope not in _FIND_REPLACE_SCOPES:
643
+ return _validation_error(
644
+ field="scope",
645
+ action=action,
646
+ message=f"scope must be one of: {sorted(_FIND_REPLACE_SCOPES)}",
647
+ request_id=request_id,
648
+ code=ErrorCode.INVALID_FORMAT,
649
+ remediation=f"Use one of: {sorted(_FIND_REPLACE_SCOPES)}",
650
+ )
651
+
652
+ # Optional: use_regex (default: False)
653
+ use_regex = payload.get("use_regex", False)
654
+ if not isinstance(use_regex, bool):
655
+ return _validation_error(
656
+ field="use_regex",
657
+ action=action,
658
+ message="use_regex must be a boolean",
659
+ request_id=request_id,
660
+ code=ErrorCode.INVALID_FORMAT,
661
+ remediation="Set use_regex to true or false",
662
+ )
663
+
664
+ # Optional: case_sensitive (default: True)
665
+ case_sensitive = payload.get("case_sensitive", True)
666
+ if not isinstance(case_sensitive, bool):
667
+ return _validation_error(
668
+ field="case_sensitive",
669
+ action=action,
670
+ message="case_sensitive must be a boolean",
671
+ request_id=request_id,
672
+ code=ErrorCode.INVALID_FORMAT,
673
+ remediation="Set case_sensitive to true or false",
674
+ )
675
+
676
+ # Optional: dry_run (default: False)
677
+ dry_run = payload.get("dry_run", False)
678
+ if not isinstance(dry_run, bool):
679
+ return _validation_error(
680
+ field="dry_run",
681
+ action=action,
682
+ message="dry_run must be a boolean",
683
+ request_id=request_id,
684
+ code=ErrorCode.INVALID_FORMAT,
685
+ remediation="Set dry_run to true or false",
686
+ )
687
+
688
+ # Optional: path (workspace)
689
+ path = payload.get("path")
690
+ if path is not None and not isinstance(path, str):
691
+ return _validation_error(
692
+ field="path",
693
+ action=action,
694
+ message="Workspace path must be a string",
695
+ request_id=request_id,
696
+ code=ErrorCode.INVALID_FORMAT,
697
+ )
698
+
699
+ specs_dir = _resolve_specs_dir(config, path)
700
+ if specs_dir is None:
701
+ return _specs_directory_missing_error(request_id)
702
+
703
+ audit_log(
704
+ "tool_invocation",
705
+ tool="authoring",
706
+ action=action,
707
+ spec_id=spec_id,
708
+ find=find[:50] + "..." if len(find) > 50 else find,
709
+ use_regex=use_regex,
710
+ dry_run=dry_run,
711
+ )
712
+
713
+ metric_key = _metric_name(action)
714
+ start_time = time.perf_counter()
715
+
716
+ try:
717
+ result, error = find_replace_in_spec(
718
+ spec_id,
719
+ find,
720
+ replace,
721
+ scope=scope,
722
+ use_regex=use_regex,
723
+ case_sensitive=case_sensitive,
724
+ dry_run=dry_run,
725
+ specs_dir=specs_dir,
726
+ )
727
+ except Exception as exc: # pragma: no cover - defensive guard
728
+ logger.exception("Unexpected error in spec find-replace")
729
+ _metrics.counter(metric_key, labels={"status": "error"})
730
+ return asdict(
731
+ error_response(
732
+ sanitize_error_message(exc, context="authoring"),
733
+ error_code=ErrorCode.INTERNAL_ERROR,
734
+ error_type=ErrorType.INTERNAL,
735
+ remediation="Check logs for details",
736
+ request_id=request_id,
737
+ )
738
+ )
739
+
740
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
741
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
742
+
743
+ if error:
744
+ _metrics.counter(metric_key, labels={"status": "error"})
745
+ # Map error types
746
+ if "not found" in error.lower():
747
+ return asdict(
748
+ error_response(
749
+ error,
750
+ error_code=ErrorCode.NOT_FOUND,
751
+ error_type=ErrorType.NOT_FOUND,
752
+ remediation="Check spec_id value",
753
+ request_id=request_id,
754
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
755
+ )
756
+ )
757
+ if "invalid regex" in error.lower():
758
+ return asdict(
759
+ error_response(
760
+ error,
761
+ error_code=ErrorCode.INVALID_FORMAT,
762
+ error_type=ErrorType.VALIDATION,
763
+ remediation="Check regex syntax",
764
+ request_id=request_id,
765
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
766
+ )
767
+ )
768
+ return asdict(
769
+ error_response(
770
+ error,
771
+ error_code=ErrorCode.VALIDATION_ERROR,
772
+ error_type=ErrorType.VALIDATION,
773
+ remediation="Check find and replace parameters",
774
+ request_id=request_id,
775
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
776
+ )
777
+ )
778
+
779
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
780
+ return asdict(
781
+ success_response(
782
+ data=result,
783
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
784
+ request_id=request_id,
785
+ )
786
+ )
787
+
788
+
789
+ def _handle_spec_rollback(*, config: ServerConfig, **payload: Any) -> dict:
790
+ """Restore a spec from a backup timestamp."""
791
+ request_id = _request_id()
792
+ action = "spec-rollback"
793
+
794
+ spec_id = payload.get("spec_id")
795
+ if not isinstance(spec_id, str) or not spec_id.strip():
796
+ return _validation_error(
797
+ field="spec_id",
798
+ action=action,
799
+ message="Provide a non-empty spec_id parameter",
800
+ request_id=request_id,
801
+ code=ErrorCode.MISSING_REQUIRED,
802
+ )
803
+ spec_id = spec_id.strip()
804
+
805
+ timestamp = payload.get("version") # Use 'version' parameter for timestamp
806
+ if not isinstance(timestamp, str) or not timestamp.strip():
807
+ return _validation_error(
808
+ field="version",
809
+ action=action,
810
+ message="Provide the backup timestamp to restore (use spec history to list)",
811
+ request_id=request_id,
812
+ code=ErrorCode.MISSING_REQUIRED,
813
+ )
814
+ timestamp = timestamp.strip()
815
+
816
+ dry_run = payload.get("dry_run", False)
817
+ if not isinstance(dry_run, bool):
818
+ return _validation_error(
819
+ field="dry_run",
820
+ action=action,
821
+ message="Expected a boolean value",
822
+ request_id=request_id,
823
+ )
824
+
825
+ path = payload.get("path")
826
+ if path is not None and not isinstance(path, str):
827
+ return _validation_error(
828
+ field="path",
829
+ action=action,
830
+ message="Workspace path must be a string",
831
+ request_id=request_id,
832
+ )
833
+
834
+ specs_dir = _resolve_specs_dir(config, path)
835
+ if specs_dir is None:
836
+ return _specs_directory_missing_error(request_id)
837
+
838
+ audit_log(
839
+ "tool_invocation",
840
+ tool="authoring",
841
+ action=action,
842
+ spec_id=spec_id,
843
+ timestamp=timestamp,
844
+ dry_run=dry_run,
845
+ )
846
+
847
+ metric_key = _metric_name(action)
848
+ start_time = time.perf_counter()
849
+
850
+ result = rollback_spec(
851
+ spec_id=spec_id,
852
+ timestamp=timestamp,
853
+ specs_dir=specs_dir,
854
+ dry_run=dry_run,
855
+ create_backup=True,
856
+ )
857
+
858
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
859
+
860
+ if not result.get("success"):
861
+ _metrics.counter(metric_key, labels={"status": "error"})
862
+ error_msg = result.get("error", "Unknown error during rollback")
863
+
864
+ # Determine error code based on error message
865
+ if "not found" in error_msg.lower():
866
+ error_code = ErrorCode.NOT_FOUND
867
+ error_type = ErrorType.NOT_FOUND
868
+ remediation = "Use spec(action='history') to list available backups"
869
+ else:
870
+ error_code = ErrorCode.INTERNAL_ERROR
871
+ error_type = ErrorType.INTERNAL
872
+ remediation = "Check spec and backup file permissions"
873
+
874
+ return asdict(
875
+ error_response(
876
+ error_msg,
877
+ error_code=error_code,
878
+ error_type=error_type,
879
+ remediation=remediation,
880
+ request_id=request_id,
881
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
882
+ )
883
+ )
884
+
885
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
886
+ return asdict(
887
+ success_response(
888
+ spec_id=spec_id,
889
+ timestamp=timestamp,
890
+ dry_run=dry_run,
891
+ restored_from=result.get("restored_from"),
892
+ backup_created=result.get("backup_created"),
893
+ request_id=request_id,
894
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
895
+ )
896
+ )
897
+
898
+
518
899
  def _handle_phase_add(*, config: ServerConfig, **payload: Any) -> dict:
519
900
  request_id = _request_id()
520
901
  action = "phase-add"
@@ -725,9 +1106,10 @@ def _handle_phase_add(*, config: ServerConfig, **payload: Any) -> dict:
725
1106
  )
726
1107
 
727
1108
 
728
- def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
1109
+ def _handle_phase_update_metadata(*, config: ServerConfig, **payload: Any) -> dict:
1110
+ """Update metadata fields of an existing phase."""
729
1111
  request_id = _request_id()
730
- action = "phase-remove"
1112
+ action = "phase-update-metadata"
731
1113
 
732
1114
  spec_id = payload.get("spec_id")
733
1115
  if not isinstance(spec_id, str) or not spec_id.strip():
@@ -735,6 +1117,7 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
735
1117
  field="spec_id",
736
1118
  action=action,
737
1119
  message="Provide a non-empty spec_id parameter",
1120
+ remediation="Pass the spec identifier to authoring",
738
1121
  request_id=request_id,
739
1122
  code=ErrorCode.MISSING_REQUIRED,
740
1123
  )
@@ -745,27 +1128,79 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
745
1128
  return _validation_error(
746
1129
  field="phase_id",
747
1130
  action=action,
748
- message="Provide the phase identifier (e.g., phase-1)",
1131
+ message="Provide a non-empty phase_id parameter",
1132
+ remediation="Pass the phase identifier (e.g., 'phase-1')",
749
1133
  request_id=request_id,
750
1134
  code=ErrorCode.MISSING_REQUIRED,
751
1135
  )
752
1136
  phase_id = phase_id.strip()
753
1137
 
754
- force = payload.get("force", False)
755
- if not isinstance(force, bool):
1138
+ # Extract optional metadata fields
1139
+ estimated_hours = payload.get("estimated_hours")
1140
+ description = payload.get("description")
1141
+ purpose = payload.get("purpose")
1142
+
1143
+ # Validate at least one field is provided
1144
+ has_update = any(v is not None for v in [estimated_hours, description, purpose])
1145
+ if not has_update:
756
1146
  return _validation_error(
757
- field="force",
1147
+ field="metadata",
758
1148
  action=action,
759
- message="Expected a boolean value",
1149
+ message="At least one metadata field must be provided",
1150
+ remediation="Include estimated_hours, description, or purpose",
760
1151
  request_id=request_id,
1152
+ code=ErrorCode.VALIDATION_FAILED,
761
1153
  )
762
1154
 
763
- dry_run = payload.get("dry_run", False)
1155
+ # Validate estimated_hours if provided
1156
+ if estimated_hours is not None:
1157
+ if isinstance(estimated_hours, bool) or not isinstance(
1158
+ estimated_hours, (int, float)
1159
+ ):
1160
+ return _validation_error(
1161
+ field="estimated_hours",
1162
+ action=action,
1163
+ message="Provide a numeric value",
1164
+ remediation="Set estimated_hours to a number >= 0",
1165
+ request_id=request_id,
1166
+ )
1167
+ if estimated_hours < 0:
1168
+ return _validation_error(
1169
+ field="estimated_hours",
1170
+ action=action,
1171
+ message="Value must be non-negative",
1172
+ remediation="Set hours to zero or greater",
1173
+ request_id=request_id,
1174
+ )
1175
+ estimated_hours = float(estimated_hours)
1176
+
1177
+ # Validate description if provided
1178
+ if description is not None and not isinstance(description, str):
1179
+ return _validation_error(
1180
+ field="description",
1181
+ action=action,
1182
+ message="Description must be a string",
1183
+ remediation="Provide a text description",
1184
+ request_id=request_id,
1185
+ )
1186
+
1187
+ # Validate purpose if provided
1188
+ if purpose is not None and not isinstance(purpose, str):
1189
+ return _validation_error(
1190
+ field="purpose",
1191
+ action=action,
1192
+ message="Purpose must be a string",
1193
+ remediation="Provide a text purpose",
1194
+ request_id=request_id,
1195
+ )
1196
+
1197
+ dry_run = payload.get("dry_run", False)
764
1198
  if not isinstance(dry_run, bool):
765
1199
  return _validation_error(
766
1200
  field="dry_run",
767
1201
  action=action,
768
1202
  message="Expected a boolean value",
1203
+ remediation="Set dry_run to true or false",
769
1204
  request_id=request_id,
770
1205
  )
771
1206
 
@@ -775,6 +1210,7 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
775
1210
  field="path",
776
1211
  action=action,
777
1212
  message="Workspace path must be a string",
1213
+ remediation="Provide a valid workspace path",
778
1214
  request_id=request_id,
779
1215
  )
780
1216
 
@@ -788,38 +1224,24 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
788
1224
  action=action,
789
1225
  spec_id=spec_id,
790
1226
  phase_id=phase_id,
791
- force=force,
792
1227
  dry_run=dry_run,
793
1228
  )
794
1229
 
795
1230
  metric_key = _metric_name(action)
796
- if dry_run:
797
- _metrics.counter(
798
- metric_key, labels={"status": "success", "force": str(force).lower()}
799
- )
800
- return asdict(
801
- success_response(
802
- data={
803
- "spec_id": spec_id,
804
- "phase_id": phase_id,
805
- "force": force,
806
- "dry_run": True,
807
- "note": "Dry run - no changes made",
808
- },
809
- request_id=request_id,
810
- )
811
- )
812
-
813
1231
  start_time = time.perf_counter()
1232
+
814
1233
  try:
815
- result, error = remove_phase(
1234
+ result, error = update_phase_metadata(
816
1235
  spec_id=spec_id,
817
1236
  phase_id=phase_id,
818
- force=force,
1237
+ estimated_hours=estimated_hours,
1238
+ description=description,
1239
+ purpose=purpose,
1240
+ dry_run=dry_run,
819
1241
  specs_dir=specs_dir,
820
1242
  )
821
1243
  except Exception as exc: # pragma: no cover - defensive guard
822
- logger.exception("Unexpected error removing phase")
1244
+ logger.exception("Unexpected error updating phase metadata")
823
1245
  _metrics.counter(metric_key, labels={"status": "error"})
824
1246
  return asdict(
825
1247
  error_response(
@@ -837,7 +1259,7 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
837
1259
  if error:
838
1260
  _metrics.counter(metric_key, labels={"status": "error"})
839
1261
  lowered = error.lower()
840
- if "spec" in lowered and "not found" in lowered:
1262
+ if "specification" in lowered and "not found" in lowered:
841
1263
  return asdict(
842
1264
  error_response(
843
1265
  f"Specification '{spec_id}' not found",
@@ -850,10 +1272,10 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
850
1272
  if "phase" in lowered and "not found" in lowered:
851
1273
  return asdict(
852
1274
  error_response(
853
- f"Phase '{phase_id}' not found in spec",
854
- error_code=ErrorCode.PHASE_NOT_FOUND,
1275
+ f"Phase '{phase_id}' not found in spec '{spec_id}'",
1276
+ error_code=ErrorCode.TASK_NOT_FOUND,
855
1277
  error_type=ErrorType.NOT_FOUND,
856
- remediation="Confirm the phase exists in the hierarchy",
1278
+ remediation='Verify the phase ID via task(action="query")',
857
1279
  request_id=request_id,
858
1280
  )
859
1281
  )
@@ -861,25 +1283,15 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
861
1283
  return asdict(
862
1284
  error_response(
863
1285
  f"Node '{phase_id}' is not a phase",
864
- error_code=ErrorCode.VALIDATION_ERROR,
1286
+ error_code=ErrorCode.VALIDATION_FAILED,
865
1287
  error_type=ErrorType.VALIDATION,
866
- remediation="Use task-remove for non-phase nodes",
867
- request_id=request_id,
868
- )
869
- )
870
- if "non-completed" in lowered or "has" in lowered and "task" in lowered:
871
- return asdict(
872
- error_response(
873
- f"Phase '{phase_id}' has non-completed tasks. Use force=True to remove anyway",
874
- error_code=ErrorCode.CONFLICT,
875
- error_type=ErrorType.CONFLICT,
876
- remediation="Set force=True to remove active phases",
1288
+ remediation="Provide a valid phase ID (e.g., 'phase-1')",
877
1289
  request_id=request_id,
878
1290
  )
879
1291
  )
880
1292
  return asdict(
881
1293
  error_response(
882
- f"Failed to remove phase: {error}",
1294
+ f"Failed to update phase metadata: {error}",
883
1295
  error_code=ErrorCode.INTERNAL_ERROR,
884
1296
  error_type=ErrorType.INTERNAL,
885
1297
  remediation="Check input values and retry",
@@ -887,59 +1299,206 @@ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
887
1299
  )
888
1300
  )
889
1301
 
890
- _metrics.counter(
891
- metric_key, labels={"status": "success", "force": str(force).lower()}
892
- )
1302
+ _metrics.counter(metric_key, labels={"status": "success"})
893
1303
  return asdict(
894
1304
  success_response(
895
- data={"spec_id": spec_id, "dry_run": False, **(result or {})},
1305
+ data={"spec_id": spec_id, "phase_id": phase_id, **(result or {})},
896
1306
  telemetry={"duration_ms": round(elapsed_ms, 2)},
897
1307
  request_id=request_id,
898
1308
  )
899
1309
  )
900
1310
 
901
1311
 
902
- def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1312
+ def _handle_phase_add_bulk(*, config: ServerConfig, **payload: Any) -> dict:
903
1313
  request_id = _request_id()
904
- action = "assumption-add"
1314
+ action = "phase-add-bulk"
905
1315
 
1316
+ # Validate spec_id
906
1317
  spec_id = payload.get("spec_id")
907
1318
  if not isinstance(spec_id, str) or not spec_id.strip():
908
1319
  return _validation_error(
909
1320
  field="spec_id",
910
1321
  action=action,
911
1322
  message="Provide a non-empty spec_id parameter",
1323
+ remediation="Pass the spec identifier to authoring",
912
1324
  request_id=request_id,
913
1325
  code=ErrorCode.MISSING_REQUIRED,
914
1326
  )
915
1327
  spec_id = spec_id.strip()
916
1328
 
917
- text = payload.get("text")
918
- if not isinstance(text, str) or not text.strip():
1329
+ # Require macro format: {phase: {...}, tasks: [...]}
1330
+ phase_obj = payload.get("phase")
1331
+ if not isinstance(phase_obj, dict):
919
1332
  return _validation_error(
920
- field="text",
1333
+ field="phase",
921
1334
  action=action,
922
- message="Provide the assumption text",
1335
+ message="Provide a phase object with metadata",
1336
+ remediation="Use macro format: {phase: {title: '...', description: '...'}, tasks: [...]}",
923
1337
  request_id=request_id,
924
1338
  code=ErrorCode.MISSING_REQUIRED,
925
1339
  )
926
- text = text.strip()
927
1340
 
928
- assumption_type = payload.get("assumption_type") or "constraint"
929
- if assumption_type not in ASSUMPTION_TYPES:
1341
+ # Extract phase metadata from nested object
1342
+ title = phase_obj.get("title")
1343
+ if not isinstance(title, str) or not title.strip():
930
1344
  return _validation_error(
931
- field="assumption_type",
1345
+ field="phase.title",
932
1346
  action=action,
933
- message=f"Must be one of: {', '.join(ASSUMPTION_TYPES)}",
1347
+ message="Provide a non-empty phase title",
1348
+ remediation="Include phase.title in the phase object",
934
1349
  request_id=request_id,
1350
+ code=ErrorCode.MISSING_REQUIRED,
935
1351
  )
1352
+ title = title.strip()
936
1353
 
937
- author = payload.get("author")
938
- if author is not None and not isinstance(author, str):
1354
+ # Validate tasks array
1355
+ tasks = payload.get("tasks")
1356
+ if not tasks or not isinstance(tasks, list) or len(tasks) == 0:
939
1357
  return _validation_error(
940
- field="author",
1358
+ field="tasks",
941
1359
  action=action,
942
- message="Author must be a string",
1360
+ message="Provide at least one task definition",
1361
+ remediation="Include a tasks array with type and title for each task",
1362
+ request_id=request_id,
1363
+ code=ErrorCode.MISSING_REQUIRED,
1364
+ )
1365
+
1366
+ # Validate each task in the array
1367
+ valid_task_types = {"task", "verify"}
1368
+ for idx, task_def in enumerate(tasks):
1369
+ if not isinstance(task_def, dict):
1370
+ return _validation_error(
1371
+ field=f"tasks[{idx}]",
1372
+ action=action,
1373
+ message="Each task must be a dictionary",
1374
+ request_id=request_id,
1375
+ )
1376
+
1377
+ task_type = task_def.get("type")
1378
+ if not task_type or task_type not in valid_task_types:
1379
+ return _validation_error(
1380
+ field=f"tasks[{idx}].type",
1381
+ action=action,
1382
+ message="Task type must be 'task' or 'verify'",
1383
+ remediation="Set type to 'task' or 'verify'",
1384
+ request_id=request_id,
1385
+ )
1386
+
1387
+ task_title = task_def.get("title")
1388
+ if not task_title or not isinstance(task_title, str) or not task_title.strip():
1389
+ return _validation_error(
1390
+ field=f"tasks[{idx}].title",
1391
+ action=action,
1392
+ message="Each task must have a non-empty title",
1393
+ request_id=request_id,
1394
+ code=ErrorCode.MISSING_REQUIRED,
1395
+ )
1396
+
1397
+ est_hours = task_def.get("estimated_hours")
1398
+ if est_hours is not None:
1399
+ if isinstance(est_hours, bool) or not isinstance(est_hours, (int, float)):
1400
+ return _validation_error(
1401
+ field=f"tasks[{idx}].estimated_hours",
1402
+ action=action,
1403
+ message="estimated_hours must be a number",
1404
+ request_id=request_id,
1405
+ )
1406
+ if est_hours < 0:
1407
+ return _validation_error(
1408
+ field=f"tasks[{idx}].estimated_hours",
1409
+ action=action,
1410
+ message="estimated_hours must be non-negative",
1411
+ request_id=request_id,
1412
+ )
1413
+
1414
+ # Validate optional phase metadata (from phase object)
1415
+ description = phase_obj.get("description")
1416
+ if description is not None and not isinstance(description, str):
1417
+ return _validation_error(
1418
+ field="phase.description",
1419
+ action=action,
1420
+ message="Description must be a string",
1421
+ request_id=request_id,
1422
+ )
1423
+
1424
+ purpose = phase_obj.get("purpose")
1425
+ if purpose is not None and not isinstance(purpose, str):
1426
+ return _validation_error(
1427
+ field="phase.purpose",
1428
+ action=action,
1429
+ message="Purpose must be a string",
1430
+ request_id=request_id,
1431
+ )
1432
+
1433
+ estimated_hours = phase_obj.get("estimated_hours")
1434
+ if estimated_hours is not None:
1435
+ if isinstance(estimated_hours, bool) or not isinstance(
1436
+ estimated_hours, (int, float)
1437
+ ):
1438
+ return _validation_error(
1439
+ field="phase.estimated_hours",
1440
+ action=action,
1441
+ message="Provide a numeric value",
1442
+ request_id=request_id,
1443
+ )
1444
+ if estimated_hours < 0:
1445
+ return _validation_error(
1446
+ field="phase.estimated_hours",
1447
+ action=action,
1448
+ message="Value must be non-negative",
1449
+ remediation="Set hours to zero or greater",
1450
+ request_id=request_id,
1451
+ )
1452
+ estimated_hours = float(estimated_hours)
1453
+
1454
+ # Handle metadata_defaults from both top-level and phase object
1455
+ # Top-level serves as base, phase-level overrides
1456
+ top_level_defaults = payload.get("metadata_defaults")
1457
+ if top_level_defaults is not None and not isinstance(top_level_defaults, dict):
1458
+ return _validation_error(
1459
+ field="metadata_defaults",
1460
+ action=action,
1461
+ message="metadata_defaults must be a dictionary",
1462
+ request_id=request_id,
1463
+ )
1464
+
1465
+ phase_level_defaults = phase_obj.get("metadata_defaults")
1466
+ if phase_level_defaults is not None and not isinstance(phase_level_defaults, dict):
1467
+ return _validation_error(
1468
+ field="phase.metadata_defaults",
1469
+ action=action,
1470
+ message="metadata_defaults must be a dictionary",
1471
+ request_id=request_id,
1472
+ )
1473
+
1474
+ # Merge: top-level as base, phase-level overrides
1475
+ metadata_defaults = None
1476
+ if top_level_defaults or phase_level_defaults:
1477
+ metadata_defaults = {**(top_level_defaults or {}), **(phase_level_defaults or {})}
1478
+
1479
+ position = payload.get("position")
1480
+ if position is not None:
1481
+ if isinstance(position, bool) or not isinstance(position, int):
1482
+ return _validation_error(
1483
+ field="position",
1484
+ action=action,
1485
+ message="Position must be an integer",
1486
+ request_id=request_id,
1487
+ )
1488
+ if position < 0:
1489
+ return _validation_error(
1490
+ field="position",
1491
+ action=action,
1492
+ message="Position must be >= 0",
1493
+ request_id=request_id,
1494
+ )
1495
+
1496
+ link_previous = payload.get("link_previous", True)
1497
+ if not isinstance(link_previous, bool):
1498
+ return _validation_error(
1499
+ field="link_previous",
1500
+ action=action,
1501
+ message="Expected a boolean value",
943
1502
  request_id=request_id,
944
1503
  )
945
1504
 
@@ -965,10 +1524,11 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
965
1524
  if specs_dir is None:
966
1525
  return _specs_directory_missing_error(request_id)
967
1526
 
1527
+ # Check for duplicate phase title (warning only)
968
1528
  warnings: List[str] = []
969
- if _assumption_exists(spec_id, specs_dir, text):
1529
+ if _phase_exists(spec_id, specs_dir, title):
970
1530
  warnings.append(
971
- "An assumption with identical text already exists; another entry will be appended"
1531
+ f"Phase titled '{title}' already exists; the new phase will still be added"
972
1532
  )
973
1533
 
974
1534
  audit_log(
@@ -976,27 +1536,31 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
976
1536
  tool="authoring",
977
1537
  action=action,
978
1538
  spec_id=spec_id,
979
- assumption_type=assumption_type,
1539
+ title=title,
1540
+ task_count=len(tasks),
980
1541
  dry_run=dry_run,
1542
+ link_previous=link_previous,
981
1543
  )
982
1544
 
983
1545
  metric_key = _metric_name(action)
984
1546
 
985
1547
  if dry_run:
986
1548
  _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
987
- data = {
988
- "spec_id": spec_id,
989
- "assumption_id": "(preview)",
990
- "text": text,
991
- "type": assumption_type,
992
- "dry_run": True,
993
- "note": "Dry run - no changes made",
994
- }
995
- if author:
996
- data["author"] = author
1549
+ preview_tasks = [
1550
+ {"task_id": "(preview)", "title": t.get("title", ""), "type": t.get("type", "")}
1551
+ for t in tasks
1552
+ ]
997
1553
  return asdict(
998
1554
  success_response(
999
- data=data,
1555
+ data={
1556
+ "spec_id": spec_id,
1557
+ "phase_id": "(preview)",
1558
+ "title": title,
1559
+ "tasks_created": preview_tasks,
1560
+ "total_tasks": len(tasks),
1561
+ "dry_run": True,
1562
+ "note": "Dry run - no changes made",
1563
+ },
1000
1564
  warnings=warnings or None,
1001
1565
  request_id=request_id,
1002
1566
  )
@@ -1004,15 +1568,20 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1004
1568
 
1005
1569
  start_time = time.perf_counter()
1006
1570
  try:
1007
- result, error = add_assumption(
1571
+ result, error = add_phase_bulk(
1008
1572
  spec_id=spec_id,
1009
- text=text,
1010
- assumption_type=assumption_type,
1011
- author=author,
1573
+ phase_title=title,
1574
+ tasks=tasks,
1575
+ phase_description=description,
1576
+ phase_purpose=purpose,
1577
+ phase_estimated_hours=estimated_hours,
1578
+ metadata_defaults=metadata_defaults,
1579
+ position=position,
1580
+ link_previous=link_previous,
1012
1581
  specs_dir=specs_dir,
1013
1582
  )
1014
1583
  except Exception as exc: # pragma: no cover - defensive guard
1015
- logger.exception("Unexpected error adding assumption")
1584
+ logger.exception("Unexpected error in phase-add-bulk")
1016
1585
  _metrics.counter(metric_key, labels={"status": "error"})
1017
1586
  return asdict(
1018
1587
  error_response(
@@ -1029,7 +1598,8 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1029
1598
 
1030
1599
  if error:
1031
1600
  _metrics.counter(metric_key, labels={"status": "error"})
1032
- if "not found" in error.lower():
1601
+ lowered = error.lower()
1602
+ if "specification" in lowered and "not found" in lowered:
1033
1603
  return asdict(
1034
1604
  error_response(
1035
1605
  f"Specification '{spec_id}' not found",
@@ -1039,30 +1609,30 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1039
1609
  request_id=request_id,
1040
1610
  )
1041
1611
  )
1612
+ if "task at index" in lowered:
1613
+ return asdict(
1614
+ error_response(
1615
+ error,
1616
+ error_code=ErrorCode.VALIDATION_ERROR,
1617
+ error_type=ErrorType.VALIDATION,
1618
+ remediation="Check each task has valid type and title",
1619
+ request_id=request_id,
1620
+ )
1621
+ )
1042
1622
  return asdict(
1043
1623
  error_response(
1044
- f"Failed to add assumption: {error}",
1624
+ f"Failed to add phase with tasks: {error}",
1045
1625
  error_code=ErrorCode.INTERNAL_ERROR,
1046
1626
  error_type=ErrorType.INTERNAL,
1047
- remediation="Check that the spec exists",
1627
+ remediation="Check input values and retry",
1048
1628
  request_id=request_id,
1049
1629
  )
1050
1630
  )
1051
1631
 
1052
- data = {
1053
- "spec_id": spec_id,
1054
- "assumption_id": result.get("assumption_id") if result else None,
1055
- "text": text,
1056
- "type": assumption_type,
1057
- "dry_run": False,
1058
- }
1059
- if author:
1060
- data["author"] = author
1061
-
1062
1632
  _metrics.counter(metric_key, labels={"status": "success"})
1063
1633
  return asdict(
1064
1634
  success_response(
1065
- data=data,
1635
+ data={"spec_id": spec_id, "dry_run": False, **(result or {})},
1066
1636
  warnings=warnings or None,
1067
1637
  telemetry={"duration_ms": round(elapsed_ms, 2)},
1068
1638
  request_id=request_id,
@@ -1070,9 +1640,958 @@ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
1070
1640
  )
1071
1641
 
1072
1642
 
1073
- def _handle_assumption_list(*, config: ServerConfig, **payload: Any) -> dict:
1643
+ def _handle_phase_template(*, config: ServerConfig, **payload: Any) -> dict:
1644
+ """Handle phase-template action: list/show/apply phase templates."""
1074
1645
  request_id = _request_id()
1075
- action = "assumption-list"
1646
+ action = "phase-template"
1647
+
1648
+ template_action = payload.get("template_action")
1649
+ if not isinstance(template_action, str) or not template_action.strip():
1650
+ return _validation_error(
1651
+ field="template_action",
1652
+ action=action,
1653
+ message="Provide one of: list, show, apply",
1654
+ request_id=request_id,
1655
+ code=ErrorCode.MISSING_REQUIRED,
1656
+ )
1657
+ template_action = template_action.strip().lower()
1658
+ if template_action not in ("list", "show", "apply"):
1659
+ return _validation_error(
1660
+ field="template_action",
1661
+ action=action,
1662
+ message="template_action must be one of: list, show, apply",
1663
+ request_id=request_id,
1664
+ remediation="Use list, show, or apply",
1665
+ )
1666
+
1667
+ template_name = payload.get("template_name")
1668
+ if template_action in ("show", "apply"):
1669
+ if not isinstance(template_name, str) or not template_name.strip():
1670
+ return _validation_error(
1671
+ field="template_name",
1672
+ action=action,
1673
+ message="Provide a template name",
1674
+ request_id=request_id,
1675
+ code=ErrorCode.MISSING_REQUIRED,
1676
+ )
1677
+ template_name = template_name.strip()
1678
+ if template_name not in PHASE_TEMPLATES:
1679
+ return asdict(
1680
+ error_response(
1681
+ f"Phase template '{template_name}' not found",
1682
+ error_code=ErrorCode.NOT_FOUND,
1683
+ error_type=ErrorType.NOT_FOUND,
1684
+ remediation=f"Use template_action='list' to see available templates. Valid: {', '.join(PHASE_TEMPLATES)}",
1685
+ request_id=request_id,
1686
+ )
1687
+ )
1688
+
1689
+ data: Dict[str, Any] = {"action": template_action}
1690
+
1691
+ if template_action == "list":
1692
+ data["templates"] = [
1693
+ {
1694
+ "name": "planning",
1695
+ "description": "Requirements gathering and initial planning phase",
1696
+ "tasks": 2,
1697
+ "estimated_hours": 4,
1698
+ },
1699
+ {
1700
+ "name": "implementation",
1701
+ "description": "Core development and feature implementation phase",
1702
+ "tasks": 2,
1703
+ "estimated_hours": 8,
1704
+ },
1705
+ {
1706
+ "name": "testing",
1707
+ "description": "Comprehensive testing and quality assurance phase",
1708
+ "tasks": 2,
1709
+ "estimated_hours": 6,
1710
+ },
1711
+ {
1712
+ "name": "security",
1713
+ "description": "Security audit and hardening phase",
1714
+ "tasks": 2,
1715
+ "estimated_hours": 6,
1716
+ },
1717
+ {
1718
+ "name": "documentation",
1719
+ "description": "Technical documentation and knowledge capture phase",
1720
+ "tasks": 2,
1721
+ "estimated_hours": 4,
1722
+ },
1723
+ ]
1724
+ data["total_count"] = len(data["templates"])
1725
+ data["note"] = "All templates include automatic verification scaffolding (run-tests + fidelity)"
1726
+ return asdict(success_response(data=data, request_id=request_id))
1727
+
1728
+ elif template_action == "show":
1729
+ try:
1730
+ template_struct = get_phase_template_structure(template_name)
1731
+ data["template_name"] = template_name
1732
+ data["content"] = {
1733
+ "name": template_name,
1734
+ "title": template_struct["title"],
1735
+ "description": template_struct["description"],
1736
+ "purpose": template_struct["purpose"],
1737
+ "estimated_hours": template_struct["estimated_hours"],
1738
+ "tasks": template_struct["tasks"],
1739
+ "includes_verification": template_struct["includes_verification"],
1740
+ }
1741
+ data["usage"] = (
1742
+ f"Use authoring(action='phase-template', template_action='apply', "
1743
+ f"template_name='{template_name}', spec_id='your-spec-id') to apply this template"
1744
+ )
1745
+ return asdict(success_response(data=data, request_id=request_id))
1746
+ except ValueError as exc:
1747
+ return asdict(
1748
+ error_response(
1749
+ str(exc),
1750
+ error_code=ErrorCode.NOT_FOUND,
1751
+ error_type=ErrorType.NOT_FOUND,
1752
+ request_id=request_id,
1753
+ )
1754
+ )
1755
+
1756
+ else: # apply
1757
+ spec_id = payload.get("spec_id")
1758
+ if not isinstance(spec_id, str) or not spec_id.strip():
1759
+ return _validation_error(
1760
+ field="spec_id",
1761
+ action=action,
1762
+ message="Provide the target spec_id to apply the template to",
1763
+ request_id=request_id,
1764
+ code=ErrorCode.MISSING_REQUIRED,
1765
+ )
1766
+ spec_id = spec_id.strip()
1767
+
1768
+ # Optional parameters for apply
1769
+ category = payload.get("category", "implementation")
1770
+ if not isinstance(category, str):
1771
+ return _validation_error(
1772
+ field="category",
1773
+ action=action,
1774
+ message="Category must be a string",
1775
+ request_id=request_id,
1776
+ )
1777
+ category = category.strip()
1778
+ if category and category not in CATEGORIES:
1779
+ return _validation_error(
1780
+ field="category",
1781
+ action=action,
1782
+ message=f"Category must be one of: {', '.join(CATEGORIES)}",
1783
+ request_id=request_id,
1784
+ )
1785
+
1786
+ position = payload.get("position")
1787
+ if position is not None:
1788
+ if isinstance(position, bool) or not isinstance(position, int):
1789
+ return _validation_error(
1790
+ field="position",
1791
+ action=action,
1792
+ message="Position must be an integer",
1793
+ request_id=request_id,
1794
+ )
1795
+ if position < 0:
1796
+ return _validation_error(
1797
+ field="position",
1798
+ action=action,
1799
+ message="Position must be >= 0",
1800
+ request_id=request_id,
1801
+ )
1802
+
1803
+ link_previous = payload.get("link_previous", True)
1804
+ if not isinstance(link_previous, bool):
1805
+ return _validation_error(
1806
+ field="link_previous",
1807
+ action=action,
1808
+ message="Expected a boolean value",
1809
+ request_id=request_id,
1810
+ )
1811
+
1812
+ dry_run = payload.get("dry_run", False)
1813
+ if not isinstance(dry_run, bool):
1814
+ return _validation_error(
1815
+ field="dry_run",
1816
+ action=action,
1817
+ message="Expected a boolean value",
1818
+ request_id=request_id,
1819
+ )
1820
+
1821
+ path = payload.get("path")
1822
+ if path is not None and not isinstance(path, str):
1823
+ return _validation_error(
1824
+ field="path",
1825
+ action=action,
1826
+ message="Workspace path must be a string",
1827
+ request_id=request_id,
1828
+ )
1829
+
1830
+ specs_dir = _resolve_specs_dir(config, path)
1831
+ if specs_dir is None:
1832
+ return _specs_directory_missing_error(request_id)
1833
+
1834
+ audit_log(
1835
+ "tool_invocation",
1836
+ tool="authoring",
1837
+ action=action,
1838
+ spec_id=spec_id,
1839
+ template_name=template_name,
1840
+ dry_run=dry_run,
1841
+ link_previous=link_previous,
1842
+ )
1843
+
1844
+ metric_key = _metric_name(action)
1845
+
1846
+ if dry_run:
1847
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
1848
+ template_struct = get_phase_template_structure(template_name, category)
1849
+ return asdict(
1850
+ success_response(
1851
+ data={
1852
+ "spec_id": spec_id,
1853
+ "template_applied": template_name,
1854
+ "phase_id": "(preview)",
1855
+ "title": template_struct["title"],
1856
+ "tasks_created": [
1857
+ {"task_id": "(preview)", "title": t["title"], "type": "task"}
1858
+ for t in template_struct["tasks"]
1859
+ ],
1860
+ "total_tasks": len(template_struct["tasks"]),
1861
+ "dry_run": True,
1862
+ "note": "Dry run - no changes made. Verification scaffolding will be auto-added.",
1863
+ },
1864
+ request_id=request_id,
1865
+ )
1866
+ )
1867
+
1868
+ start_time = time.perf_counter()
1869
+ try:
1870
+ result, error = apply_phase_template(
1871
+ spec_id=spec_id,
1872
+ template=template_name,
1873
+ specs_dir=specs_dir,
1874
+ category=category,
1875
+ position=position,
1876
+ link_previous=link_previous,
1877
+ )
1878
+ except Exception as exc: # pragma: no cover - defensive guard
1879
+ logger.exception("Unexpected error in phase-template apply")
1880
+ _metrics.counter(metric_key, labels={"status": "error"})
1881
+ return asdict(
1882
+ error_response(
1883
+ sanitize_error_message(exc, context="authoring"),
1884
+ error_code=ErrorCode.INTERNAL_ERROR,
1885
+ error_type=ErrorType.INTERNAL,
1886
+ remediation="Check logs for details",
1887
+ request_id=request_id,
1888
+ )
1889
+ )
1890
+
1891
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
1892
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
1893
+
1894
+ if error:
1895
+ _metrics.counter(metric_key, labels={"status": "error"})
1896
+ lowered = error.lower()
1897
+ if "specification" in lowered and "not found" in lowered:
1898
+ return asdict(
1899
+ error_response(
1900
+ f"Specification '{spec_id}' not found",
1901
+ error_code=ErrorCode.SPEC_NOT_FOUND,
1902
+ error_type=ErrorType.NOT_FOUND,
1903
+ remediation='Verify the spec ID via spec(action="list")',
1904
+ request_id=request_id,
1905
+ )
1906
+ )
1907
+ if "invalid phase template" in lowered:
1908
+ return asdict(
1909
+ error_response(
1910
+ error,
1911
+ error_code=ErrorCode.VALIDATION_ERROR,
1912
+ error_type=ErrorType.VALIDATION,
1913
+ remediation=f"Valid templates: {', '.join(PHASE_TEMPLATES)}",
1914
+ request_id=request_id,
1915
+ )
1916
+ )
1917
+ return asdict(
1918
+ error_response(
1919
+ f"Failed to apply phase template: {error}",
1920
+ error_code=ErrorCode.INTERNAL_ERROR,
1921
+ error_type=ErrorType.INTERNAL,
1922
+ remediation="Check input values and retry",
1923
+ request_id=request_id,
1924
+ )
1925
+ )
1926
+
1927
+ _metrics.counter(metric_key, labels={"status": "success"})
1928
+ return asdict(
1929
+ success_response(
1930
+ data={"spec_id": spec_id, "dry_run": False, **(result or {})},
1931
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1932
+ request_id=request_id,
1933
+ )
1934
+ )
1935
+
1936
+
1937
+ def _handle_phase_move(*, config: ServerConfig, **payload: Any) -> dict:
1938
+ """Handle phase-move action: reorder a phase within spec-root children."""
1939
+ request_id = _request_id()
1940
+ action = "phase-move"
1941
+
1942
+ spec_id = payload.get("spec_id")
1943
+ if not isinstance(spec_id, str) or not spec_id.strip():
1944
+ return _validation_error(
1945
+ field="spec_id",
1946
+ action=action,
1947
+ message="Provide a non-empty spec_id parameter",
1948
+ request_id=request_id,
1949
+ code=ErrorCode.MISSING_REQUIRED,
1950
+ remediation='Use spec(action="list") to find available spec IDs',
1951
+ )
1952
+ spec_id = spec_id.strip()
1953
+
1954
+ phase_id = payload.get("phase_id")
1955
+ if not isinstance(phase_id, str) or not phase_id.strip():
1956
+ return _validation_error(
1957
+ field="phase_id",
1958
+ action=action,
1959
+ message="Provide the phase identifier (e.g., phase-1)",
1960
+ request_id=request_id,
1961
+ code=ErrorCode.MISSING_REQUIRED,
1962
+ remediation="Specify a phase ID like phase-1 or phase-2",
1963
+ )
1964
+ phase_id = phase_id.strip()
1965
+
1966
+ position = payload.get("position")
1967
+ if position is None:
1968
+ return _validation_error(
1969
+ field="position",
1970
+ action=action,
1971
+ message="Provide the target position (1-based index)",
1972
+ request_id=request_id,
1973
+ code=ErrorCode.MISSING_REQUIRED,
1974
+ remediation="Specify position as a positive integer (1 = first)",
1975
+ )
1976
+ if isinstance(position, bool) or not isinstance(position, int):
1977
+ return _validation_error(
1978
+ field="position",
1979
+ action=action,
1980
+ message="Position must be an integer",
1981
+ request_id=request_id,
1982
+ code=ErrorCode.INVALID_FORMAT,
1983
+ remediation="Provide position as an integer, e.g. position=2",
1984
+ )
1985
+ if position < 1:
1986
+ return _validation_error(
1987
+ field="position",
1988
+ action=action,
1989
+ message="Position must be a positive integer (1-based)",
1990
+ request_id=request_id,
1991
+ code=ErrorCode.INVALID_FORMAT,
1992
+ remediation="Use 1 for first position, 2 for second, etc.",
1993
+ )
1994
+
1995
+ link_previous = payload.get("link_previous", True)
1996
+ if not isinstance(link_previous, bool):
1997
+ return _validation_error(
1998
+ field="link_previous",
1999
+ action=action,
2000
+ message="Expected a boolean value",
2001
+ request_id=request_id,
2002
+ code=ErrorCode.INVALID_FORMAT,
2003
+ remediation="Use true or false for link_previous",
2004
+ )
2005
+
2006
+ dry_run = payload.get("dry_run", False)
2007
+ if not isinstance(dry_run, bool):
2008
+ return _validation_error(
2009
+ field="dry_run",
2010
+ action=action,
2011
+ message="Expected a boolean value",
2012
+ request_id=request_id,
2013
+ code=ErrorCode.INVALID_FORMAT,
2014
+ remediation="Use true or false for dry_run",
2015
+ )
2016
+
2017
+ path = payload.get("path")
2018
+ if path is not None and not isinstance(path, str):
2019
+ return _validation_error(
2020
+ field="path",
2021
+ action=action,
2022
+ message="Workspace path must be a string",
2023
+ request_id=request_id,
2024
+ remediation="Provide a valid filesystem path string",
2025
+ code=ErrorCode.INVALID_FORMAT,
2026
+ )
2027
+
2028
+ specs_dir = _resolve_specs_dir(config, path)
2029
+ if specs_dir is None:
2030
+ return _specs_directory_missing_error(request_id)
2031
+
2032
+ audit_log(
2033
+ "tool_invocation",
2034
+ tool="authoring",
2035
+ action=action,
2036
+ spec_id=spec_id,
2037
+ phase_id=phase_id,
2038
+ position=position,
2039
+ link_previous=link_previous,
2040
+ dry_run=dry_run,
2041
+ )
2042
+
2043
+ metric_key = _metric_name(action)
2044
+ start_time = time.perf_counter()
2045
+
2046
+ try:
2047
+ result, error = move_phase(
2048
+ spec_id=spec_id,
2049
+ phase_id=phase_id,
2050
+ position=position,
2051
+ link_previous=link_previous,
2052
+ dry_run=dry_run,
2053
+ specs_dir=specs_dir,
2054
+ )
2055
+ except Exception as exc: # pragma: no cover - defensive guard
2056
+ logger.exception("Unexpected error moving phase")
2057
+ _metrics.counter(metric_key, labels={"status": "error"})
2058
+ return asdict(
2059
+ error_response(
2060
+ sanitize_error_message(exc, context="authoring"),
2061
+ error_code=ErrorCode.INTERNAL_ERROR,
2062
+ error_type=ErrorType.INTERNAL,
2063
+ remediation="Check logs for details",
2064
+ request_id=request_id,
2065
+ )
2066
+ )
2067
+
2068
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2069
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2070
+
2071
+ if error:
2072
+ _metrics.counter(metric_key, labels={"status": "error"})
2073
+ lowered = error.lower()
2074
+ if "specification" in lowered and "not found" in lowered:
2075
+ return asdict(
2076
+ error_response(
2077
+ f"Specification '{spec_id}' not found",
2078
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2079
+ error_type=ErrorType.NOT_FOUND,
2080
+ remediation='Verify the spec ID via spec(action="list")',
2081
+ request_id=request_id,
2082
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2083
+ )
2084
+ )
2085
+ if "phase" in lowered and "not found" in lowered:
2086
+ return asdict(
2087
+ error_response(
2088
+ f"Phase '{phase_id}' not found in spec",
2089
+ error_code=ErrorCode.PHASE_NOT_FOUND,
2090
+ error_type=ErrorType.NOT_FOUND,
2091
+ remediation="Confirm the phase exists in the hierarchy",
2092
+ request_id=request_id,
2093
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2094
+ )
2095
+ )
2096
+ if "not a phase" in lowered:
2097
+ return asdict(
2098
+ error_response(
2099
+ f"Node '{phase_id}' is not a phase",
2100
+ error_code=ErrorCode.VALIDATION_ERROR,
2101
+ error_type=ErrorType.VALIDATION,
2102
+ remediation="Provide a valid phase ID (e.g., phase-1)",
2103
+ request_id=request_id,
2104
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2105
+ )
2106
+ )
2107
+ if "invalid position" in lowered or "must be" in lowered:
2108
+ return asdict(
2109
+ error_response(
2110
+ error,
2111
+ error_code=ErrorCode.VALIDATION_ERROR,
2112
+ error_type=ErrorType.VALIDATION,
2113
+ remediation="Provide a valid 1-based position within range",
2114
+ request_id=request_id,
2115
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2116
+ )
2117
+ )
2118
+ return asdict(
2119
+ error_response(
2120
+ f"Failed to move phase: {error}",
2121
+ error_code=ErrorCode.INTERNAL_ERROR,
2122
+ error_type=ErrorType.INTERNAL,
2123
+ remediation="Check input values and retry",
2124
+ request_id=request_id,
2125
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2126
+ )
2127
+ )
2128
+
2129
+ _metrics.counter(metric_key, labels={"status": "success"})
2130
+ return asdict(
2131
+ success_response(
2132
+ data=result or {},
2133
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2134
+ request_id=request_id,
2135
+ )
2136
+ )
2137
+
2138
+
2139
+ def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
2140
+ request_id = _request_id()
2141
+ action = "phase-remove"
2142
+
2143
+ spec_id = payload.get("spec_id")
2144
+ if not isinstance(spec_id, str) or not spec_id.strip():
2145
+ return _validation_error(
2146
+ field="spec_id",
2147
+ action=action,
2148
+ message="Provide a non-empty spec_id parameter",
2149
+ request_id=request_id,
2150
+ code=ErrorCode.MISSING_REQUIRED,
2151
+ )
2152
+ spec_id = spec_id.strip()
2153
+
2154
+ phase_id = payload.get("phase_id")
2155
+ if not isinstance(phase_id, str) or not phase_id.strip():
2156
+ return _validation_error(
2157
+ field="phase_id",
2158
+ action=action,
2159
+ message="Provide the phase identifier (e.g., phase-1)",
2160
+ request_id=request_id,
2161
+ code=ErrorCode.MISSING_REQUIRED,
2162
+ )
2163
+ phase_id = phase_id.strip()
2164
+
2165
+ force = payload.get("force", False)
2166
+ if not isinstance(force, bool):
2167
+ return _validation_error(
2168
+ field="force",
2169
+ action=action,
2170
+ message="Expected a boolean value",
2171
+ request_id=request_id,
2172
+ )
2173
+
2174
+ dry_run = payload.get("dry_run", False)
2175
+ if not isinstance(dry_run, bool):
2176
+ return _validation_error(
2177
+ field="dry_run",
2178
+ action=action,
2179
+ message="Expected a boolean value",
2180
+ request_id=request_id,
2181
+ )
2182
+
2183
+ path = payload.get("path")
2184
+ if path is not None and not isinstance(path, str):
2185
+ return _validation_error(
2186
+ field="path",
2187
+ action=action,
2188
+ message="Workspace path must be a string",
2189
+ request_id=request_id,
2190
+ )
2191
+
2192
+ specs_dir = _resolve_specs_dir(config, path)
2193
+ if specs_dir is None:
2194
+ return _specs_directory_missing_error(request_id)
2195
+
2196
+ audit_log(
2197
+ "tool_invocation",
2198
+ tool="authoring",
2199
+ action=action,
2200
+ spec_id=spec_id,
2201
+ phase_id=phase_id,
2202
+ force=force,
2203
+ dry_run=dry_run,
2204
+ )
2205
+
2206
+ metric_key = _metric_name(action)
2207
+ if dry_run:
2208
+ _metrics.counter(
2209
+ metric_key, labels={"status": "success", "force": str(force).lower()}
2210
+ )
2211
+ return asdict(
2212
+ success_response(
2213
+ data={
2214
+ "spec_id": spec_id,
2215
+ "phase_id": phase_id,
2216
+ "force": force,
2217
+ "dry_run": True,
2218
+ "note": "Dry run - no changes made",
2219
+ },
2220
+ request_id=request_id,
2221
+ )
2222
+ )
2223
+
2224
+ start_time = time.perf_counter()
2225
+ try:
2226
+ result, error = remove_phase(
2227
+ spec_id=spec_id,
2228
+ phase_id=phase_id,
2229
+ force=force,
2230
+ specs_dir=specs_dir,
2231
+ )
2232
+ except Exception as exc: # pragma: no cover - defensive guard
2233
+ logger.exception("Unexpected error removing phase")
2234
+ _metrics.counter(metric_key, labels={"status": "error"})
2235
+ return asdict(
2236
+ error_response(
2237
+ sanitize_error_message(exc, context="authoring"),
2238
+ error_code=ErrorCode.INTERNAL_ERROR,
2239
+ error_type=ErrorType.INTERNAL,
2240
+ remediation="Check logs for details",
2241
+ request_id=request_id,
2242
+ )
2243
+ )
2244
+
2245
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2246
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2247
+
2248
+ if error:
2249
+ _metrics.counter(metric_key, labels={"status": "error"})
2250
+ lowered = error.lower()
2251
+ if "spec" in lowered and "not found" in lowered:
2252
+ return asdict(
2253
+ error_response(
2254
+ f"Specification '{spec_id}' not found",
2255
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2256
+ error_type=ErrorType.NOT_FOUND,
2257
+ remediation='Verify the spec ID via spec(action="list")',
2258
+ request_id=request_id,
2259
+ )
2260
+ )
2261
+ if "phase" in lowered and "not found" in lowered:
2262
+ return asdict(
2263
+ error_response(
2264
+ f"Phase '{phase_id}' not found in spec",
2265
+ error_code=ErrorCode.PHASE_NOT_FOUND,
2266
+ error_type=ErrorType.NOT_FOUND,
2267
+ remediation="Confirm the phase exists in the hierarchy",
2268
+ request_id=request_id,
2269
+ )
2270
+ )
2271
+ if "not a phase" in lowered:
2272
+ return asdict(
2273
+ error_response(
2274
+ f"Node '{phase_id}' is not a phase",
2275
+ error_code=ErrorCode.VALIDATION_ERROR,
2276
+ error_type=ErrorType.VALIDATION,
2277
+ remediation="Use task-remove for non-phase nodes",
2278
+ request_id=request_id,
2279
+ )
2280
+ )
2281
+ if "non-completed" in lowered or "has" in lowered and "task" in lowered:
2282
+ return asdict(
2283
+ error_response(
2284
+ f"Phase '{phase_id}' has non-completed tasks. Use force=True to remove anyway",
2285
+ error_code=ErrorCode.CONFLICT,
2286
+ error_type=ErrorType.CONFLICT,
2287
+ remediation="Set force=True to remove active phases",
2288
+ request_id=request_id,
2289
+ )
2290
+ )
2291
+ return asdict(
2292
+ error_response(
2293
+ f"Failed to remove phase: {error}",
2294
+ error_code=ErrorCode.INTERNAL_ERROR,
2295
+ error_type=ErrorType.INTERNAL,
2296
+ remediation="Check input values and retry",
2297
+ request_id=request_id,
2298
+ )
2299
+ )
2300
+
2301
+ _metrics.counter(
2302
+ metric_key, labels={"status": "success", "force": str(force).lower()}
2303
+ )
2304
+ return asdict(
2305
+ success_response(
2306
+ data={"spec_id": spec_id, "dry_run": False, **(result or {})},
2307
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2308
+ request_id=request_id,
2309
+ )
2310
+ )
2311
+
2312
+
2313
+ def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
2314
+ request_id = _request_id()
2315
+ action = "assumption-add"
2316
+
2317
+ spec_id = payload.get("spec_id")
2318
+ if not isinstance(spec_id, str) or not spec_id.strip():
2319
+ return _validation_error(
2320
+ field="spec_id",
2321
+ action=action,
2322
+ message="Provide a non-empty spec_id parameter",
2323
+ request_id=request_id,
2324
+ code=ErrorCode.MISSING_REQUIRED,
2325
+ )
2326
+ spec_id = spec_id.strip()
2327
+
2328
+ text = payload.get("text")
2329
+ if not isinstance(text, str) or not text.strip():
2330
+ return _validation_error(
2331
+ field="text",
2332
+ action=action,
2333
+ message="Provide the assumption text",
2334
+ request_id=request_id,
2335
+ code=ErrorCode.MISSING_REQUIRED,
2336
+ )
2337
+ text = text.strip()
2338
+
2339
+ assumption_type = payload.get("assumption_type") or "constraint"
2340
+ if assumption_type not in ASSUMPTION_TYPES:
2341
+ return _validation_error(
2342
+ field="assumption_type",
2343
+ action=action,
2344
+ message=f"Must be one of: {', '.join(ASSUMPTION_TYPES)}",
2345
+ request_id=request_id,
2346
+ )
2347
+
2348
+ author = payload.get("author")
2349
+ if author is not None and not isinstance(author, str):
2350
+ return _validation_error(
2351
+ field="author",
2352
+ action=action,
2353
+ message="Author must be a string",
2354
+ request_id=request_id,
2355
+ )
2356
+
2357
+ dry_run = payload.get("dry_run", False)
2358
+ if not isinstance(dry_run, bool):
2359
+ return _validation_error(
2360
+ field="dry_run",
2361
+ action=action,
2362
+ message="Expected a boolean value",
2363
+ request_id=request_id,
2364
+ )
2365
+
2366
+ path = payload.get("path")
2367
+ if path is not None and not isinstance(path, str):
2368
+ return _validation_error(
2369
+ field="path",
2370
+ action=action,
2371
+ message="Workspace path must be a string",
2372
+ request_id=request_id,
2373
+ )
2374
+
2375
+ specs_dir = _resolve_specs_dir(config, path)
2376
+ if specs_dir is None:
2377
+ return _specs_directory_missing_error(request_id)
2378
+
2379
+ warnings: List[str] = []
2380
+ if _assumption_exists(spec_id, specs_dir, text):
2381
+ warnings.append(
2382
+ "An assumption with identical text already exists; another entry will be appended"
2383
+ )
2384
+
2385
+ audit_log(
2386
+ "tool_invocation",
2387
+ tool="authoring",
2388
+ action=action,
2389
+ spec_id=spec_id,
2390
+ assumption_type=assumption_type,
2391
+ dry_run=dry_run,
2392
+ )
2393
+
2394
+ metric_key = _metric_name(action)
2395
+
2396
+ if dry_run:
2397
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
2398
+ data = {
2399
+ "spec_id": spec_id,
2400
+ "assumption_id": "(preview)",
2401
+ "text": text,
2402
+ "type": assumption_type,
2403
+ "dry_run": True,
2404
+ "note": "Dry run - no changes made",
2405
+ }
2406
+ if author:
2407
+ data["author"] = author
2408
+ return asdict(
2409
+ success_response(
2410
+ data=data,
2411
+ warnings=warnings or None,
2412
+ request_id=request_id,
2413
+ )
2414
+ )
2415
+
2416
+ start_time = time.perf_counter()
2417
+ try:
2418
+ result, error = add_assumption(
2419
+ spec_id=spec_id,
2420
+ text=text,
2421
+ assumption_type=assumption_type,
2422
+ author=author,
2423
+ specs_dir=specs_dir,
2424
+ )
2425
+ except Exception as exc: # pragma: no cover - defensive guard
2426
+ logger.exception("Unexpected error adding assumption")
2427
+ _metrics.counter(metric_key, labels={"status": "error"})
2428
+ return asdict(
2429
+ error_response(
2430
+ sanitize_error_message(exc, context="authoring"),
2431
+ error_code=ErrorCode.INTERNAL_ERROR,
2432
+ error_type=ErrorType.INTERNAL,
2433
+ remediation="Check logs for details",
2434
+ request_id=request_id,
2435
+ )
2436
+ )
2437
+
2438
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2439
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2440
+
2441
+ if error:
2442
+ _metrics.counter(metric_key, labels={"status": "error"})
2443
+ if "not found" in error.lower():
2444
+ return asdict(
2445
+ error_response(
2446
+ f"Specification '{spec_id}' not found",
2447
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2448
+ error_type=ErrorType.NOT_FOUND,
2449
+ remediation='Verify the spec ID via spec(action="list")',
2450
+ request_id=request_id,
2451
+ )
2452
+ )
2453
+ return asdict(
2454
+ error_response(
2455
+ f"Failed to add assumption: {error}",
2456
+ error_code=ErrorCode.INTERNAL_ERROR,
2457
+ error_type=ErrorType.INTERNAL,
2458
+ remediation="Check that the spec exists",
2459
+ request_id=request_id,
2460
+ )
2461
+ )
2462
+
2463
+ data = {
2464
+ "spec_id": spec_id,
2465
+ "assumption_id": result.get("assumption_id") if result else None,
2466
+ "text": text,
2467
+ "type": assumption_type,
2468
+ "dry_run": False,
2469
+ }
2470
+ if author:
2471
+ data["author"] = author
2472
+
2473
+ _metrics.counter(metric_key, labels={"status": "success"})
2474
+ return asdict(
2475
+ success_response(
2476
+ data=data,
2477
+ warnings=warnings or None,
2478
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2479
+ request_id=request_id,
2480
+ )
2481
+ )
2482
+
2483
+
2484
+ def _handle_assumption_list(*, config: ServerConfig, **payload: Any) -> dict:
2485
+ request_id = _request_id()
2486
+ action = "assumption-list"
2487
+
2488
+ spec_id = payload.get("spec_id")
2489
+ if not isinstance(spec_id, str) or not spec_id.strip():
2490
+ return _validation_error(
2491
+ field="spec_id",
2492
+ action=action,
2493
+ message="Provide a non-empty spec_id parameter",
2494
+ request_id=request_id,
2495
+ code=ErrorCode.MISSING_REQUIRED,
2496
+ )
2497
+ spec_id = spec_id.strip()
2498
+
2499
+ assumption_type = payload.get("assumption_type")
2500
+ if assumption_type is not None and assumption_type not in ASSUMPTION_TYPES:
2501
+ return _validation_error(
2502
+ field="assumption_type",
2503
+ action=action,
2504
+ message=f"Must be one of: {', '.join(ASSUMPTION_TYPES)}",
2505
+ request_id=request_id,
2506
+ )
2507
+
2508
+ path = payload.get("path")
2509
+ if path is not None and not isinstance(path, str):
2510
+ return _validation_error(
2511
+ field="path",
2512
+ action=action,
2513
+ message="Workspace path must be a string",
2514
+ request_id=request_id,
2515
+ )
2516
+
2517
+ specs_dir = _resolve_specs_dir(config, path)
2518
+ if specs_dir is None:
2519
+ return _specs_directory_missing_error(request_id)
2520
+
2521
+ audit_log(
2522
+ "tool_invocation",
2523
+ tool="authoring",
2524
+ action=action,
2525
+ spec_id=spec_id,
2526
+ assumption_type=assumption_type,
2527
+ )
2528
+
2529
+ metric_key = _metric_name(action)
2530
+ start_time = time.perf_counter()
2531
+ try:
2532
+ result, error = list_assumptions(
2533
+ spec_id=spec_id,
2534
+ assumption_type=assumption_type,
2535
+ specs_dir=specs_dir,
2536
+ )
2537
+ except Exception as exc: # pragma: no cover - defensive guard
2538
+ logger.exception("Unexpected error listing assumptions")
2539
+ _metrics.counter(metric_key, labels={"status": "error"})
2540
+ return asdict(
2541
+ error_response(
2542
+ sanitize_error_message(exc, context="authoring"),
2543
+ error_code=ErrorCode.INTERNAL_ERROR,
2544
+ error_type=ErrorType.INTERNAL,
2545
+ remediation="Check logs for details",
2546
+ request_id=request_id,
2547
+ )
2548
+ )
2549
+
2550
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2551
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2552
+
2553
+ if error:
2554
+ _metrics.counter(metric_key, labels={"status": "error"})
2555
+ if "not found" in error.lower():
2556
+ return asdict(
2557
+ error_response(
2558
+ f"Specification '{spec_id}' not found",
2559
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2560
+ error_type=ErrorType.NOT_FOUND,
2561
+ remediation='Verify the spec ID via spec(action="list")',
2562
+ request_id=request_id,
2563
+ )
2564
+ )
2565
+ return asdict(
2566
+ error_response(
2567
+ f"Failed to list assumptions: {error}",
2568
+ error_code=ErrorCode.INTERNAL_ERROR,
2569
+ error_type=ErrorType.INTERNAL,
2570
+ remediation="Check that the spec exists",
2571
+ request_id=request_id,
2572
+ )
2573
+ )
2574
+
2575
+ warnings: List[str] = []
2576
+ if assumption_type:
2577
+ warnings.append(
2578
+ "assumption_type filter is advisory only; all assumptions are returned"
2579
+ )
2580
+
2581
+ _metrics.counter(metric_key, labels={"status": "success"})
2582
+ return asdict(
2583
+ success_response(
2584
+ data=result or {"spec_id": spec_id, "assumptions": [], "total_count": 0},
2585
+ warnings=warnings or None,
2586
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2587
+ request_id=request_id,
2588
+ )
2589
+ )
2590
+
2591
+
2592
+ def _handle_revision_add(*, config: ServerConfig, **payload: Any) -> dict:
2593
+ request_id = _request_id()
2594
+ action = "revision-add"
1076
2595
 
1077
2596
  spec_id = payload.get("spec_id")
1078
2597
  if not isinstance(spec_id, str) or not spec_id.strip():
@@ -1085,259 +2604,777 @@ def _handle_assumption_list(*, config: ServerConfig, **payload: Any) -> dict:
1085
2604
  )
1086
2605
  spec_id = spec_id.strip()
1087
2606
 
1088
- assumption_type = payload.get("assumption_type")
1089
- if assumption_type is not None and assumption_type not in ASSUMPTION_TYPES:
2607
+ version = payload.get("version")
2608
+ if not isinstance(version, str) or not version.strip():
1090
2609
  return _validation_error(
1091
- field="assumption_type",
2610
+ field="version",
2611
+ action=action,
2612
+ message="Provide the revision version (e.g., 1.1)",
2613
+ request_id=request_id,
2614
+ code=ErrorCode.MISSING_REQUIRED,
2615
+ )
2616
+ version = version.strip()
2617
+
2618
+ changes = payload.get("changes")
2619
+ if not isinstance(changes, str) or not changes.strip():
2620
+ return _validation_error(
2621
+ field="changes",
2622
+ action=action,
2623
+ message="Provide a summary of changes",
2624
+ request_id=request_id,
2625
+ code=ErrorCode.MISSING_REQUIRED,
2626
+ )
2627
+ changes = changes.strip()
2628
+
2629
+ author = payload.get("author")
2630
+ if author is not None and not isinstance(author, str):
2631
+ return _validation_error(
2632
+ field="author",
2633
+ action=action,
2634
+ message="Author must be a string",
2635
+ request_id=request_id,
2636
+ )
2637
+
2638
+ dry_run = payload.get("dry_run", False)
2639
+ if not isinstance(dry_run, bool):
2640
+ return _validation_error(
2641
+ field="dry_run",
2642
+ action=action,
2643
+ message="Expected a boolean value",
2644
+ request_id=request_id,
2645
+ )
2646
+
2647
+ path = payload.get("path")
2648
+ if path is not None and not isinstance(path, str):
2649
+ return _validation_error(
2650
+ field="path",
2651
+ action=action,
2652
+ message="Workspace path must be a string",
2653
+ request_id=request_id,
2654
+ )
2655
+
2656
+ specs_dir = _resolve_specs_dir(config, path)
2657
+ if specs_dir is None:
2658
+ return _specs_directory_missing_error(request_id)
2659
+
2660
+ audit_log(
2661
+ "tool_invocation",
2662
+ tool="authoring",
2663
+ action=action,
2664
+ spec_id=spec_id,
2665
+ version=version,
2666
+ dry_run=dry_run,
2667
+ )
2668
+
2669
+ metric_key = _metric_name(action)
2670
+ if dry_run:
2671
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
2672
+ data = {
2673
+ "spec_id": spec_id,
2674
+ "version": version,
2675
+ "changes": changes,
2676
+ "dry_run": True,
2677
+ "note": "Dry run - no changes made",
2678
+ }
2679
+ if author:
2680
+ data["author"] = author
2681
+ return asdict(
2682
+ success_response(
2683
+ data=data,
2684
+ request_id=request_id,
2685
+ )
2686
+ )
2687
+
2688
+ start_time = time.perf_counter()
2689
+ try:
2690
+ result, error = add_revision(
2691
+ spec_id=spec_id,
2692
+ version=version,
2693
+ changelog=changes,
2694
+ author=author,
2695
+ specs_dir=specs_dir,
2696
+ )
2697
+ except Exception as exc: # pragma: no cover - defensive guard
2698
+ logger.exception("Unexpected error adding revision")
2699
+ _metrics.counter(metric_key, labels={"status": "error"})
2700
+ return asdict(
2701
+ error_response(
2702
+ sanitize_error_message(exc, context="authoring"),
2703
+ error_code=ErrorCode.INTERNAL_ERROR,
2704
+ error_type=ErrorType.INTERNAL,
2705
+ remediation="Check logs for details",
2706
+ request_id=request_id,
2707
+ )
2708
+ )
2709
+
2710
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
2711
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
2712
+
2713
+ if error:
2714
+ _metrics.counter(metric_key, labels={"status": "error"})
2715
+ if "not found" in error.lower():
2716
+ return asdict(
2717
+ error_response(
2718
+ f"Specification '{spec_id}' not found",
2719
+ error_code=ErrorCode.SPEC_NOT_FOUND,
2720
+ error_type=ErrorType.NOT_FOUND,
2721
+ remediation='Verify the spec ID via spec(action="list")',
2722
+ request_id=request_id,
2723
+ )
2724
+ )
2725
+ return asdict(
2726
+ error_response(
2727
+ f"Failed to add revision: {error}",
2728
+ error_code=ErrorCode.INTERNAL_ERROR,
2729
+ error_type=ErrorType.INTERNAL,
2730
+ remediation="Check that the spec exists",
2731
+ request_id=request_id,
2732
+ )
2733
+ )
2734
+
2735
+ data = {
2736
+ "spec_id": spec_id,
2737
+ "version": version,
2738
+ "changes": changes,
2739
+ "dry_run": False,
2740
+ }
2741
+ if author:
2742
+ data["author"] = author
2743
+ if result and result.get("date"):
2744
+ data["date"] = result["date"]
2745
+
2746
+ _metrics.counter(metric_key, labels={"status": "success"})
2747
+ return asdict(
2748
+ success_response(
2749
+ data=data,
2750
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
2751
+ request_id=request_id,
2752
+ )
2753
+ )
2754
+
2755
+
2756
+ # Validation constants for intake
2757
+ _INTAKE_TITLE_MAX_LEN = 140
2758
+ _INTAKE_DESC_MAX_LEN = 2000
2759
+ _INTAKE_TAG_MAX_LEN = 32
2760
+ _INTAKE_TAG_MAX_COUNT = 20
2761
+ _INTAKE_SOURCE_MAX_LEN = 100
2762
+ _INTAKE_REQUESTER_MAX_LEN = 100
2763
+ _INTAKE_IDEMPOTENCY_KEY_MAX_LEN = 64
2764
+ _INTAKE_PRIORITY_VALUES = ("p0", "p1", "p2", "p3", "p4")
2765
+ _INTAKE_TAG_PATTERN = "^[a-z0-9_-]+$"
2766
+ _TAG_REGEX = re.compile(_INTAKE_TAG_PATTERN)
2767
+
2768
+
2769
+ def _handle_intake_add(*, config: ServerConfig, **payload: Any) -> dict:
2770
+ """Add a new intake item to the bikelane queue."""
2771
+ request_id = _request_id()
2772
+ action = "intake-add"
2773
+
2774
+ # Check feature flag
2775
+ blocked = _intake_feature_flag_blocked(request_id)
2776
+ if blocked:
2777
+ return blocked
2778
+
2779
+ # Validate title (required, 1-140 chars)
2780
+ title = payload.get("title")
2781
+ if not isinstance(title, str) or not title.strip():
2782
+ return _validation_error(
2783
+ field="title",
2784
+ action=action,
2785
+ message="Provide a non-empty title (1-140 characters)",
2786
+ request_id=request_id,
2787
+ code=ErrorCode.MISSING_REQUIRED,
2788
+ )
2789
+ title = title.strip()
2790
+ if len(title) > _INTAKE_TITLE_MAX_LEN:
2791
+ return _validation_error(
2792
+ field="title",
2793
+ action=action,
2794
+ message=f"Title exceeds maximum length of {_INTAKE_TITLE_MAX_LEN} characters",
2795
+ request_id=request_id,
2796
+ code=ErrorCode.VALIDATION_ERROR,
2797
+ remediation=f"Shorten title to {_INTAKE_TITLE_MAX_LEN} characters or less",
2798
+ )
2799
+
2800
+ # Validate description (optional, max 2000 chars)
2801
+ description = payload.get("description")
2802
+ if description is not None:
2803
+ if not isinstance(description, str):
2804
+ return _validation_error(
2805
+ field="description",
2806
+ action=action,
2807
+ message="Description must be a string",
2808
+ request_id=request_id,
2809
+ code=ErrorCode.INVALID_FORMAT,
2810
+ )
2811
+ description = description.strip() or None
2812
+ if description and len(description) > _INTAKE_DESC_MAX_LEN:
2813
+ return _validation_error(
2814
+ field="description",
2815
+ action=action,
2816
+ message=f"Description exceeds maximum length of {_INTAKE_DESC_MAX_LEN} characters",
2817
+ request_id=request_id,
2818
+ code=ErrorCode.VALIDATION_ERROR,
2819
+ remediation=f"Shorten description to {_INTAKE_DESC_MAX_LEN} characters or less",
2820
+ )
2821
+
2822
+ # Validate priority (optional, enum p0-p4, default p2)
2823
+ priority = payload.get("priority", "p2")
2824
+ if not isinstance(priority, str):
2825
+ return _validation_error(
2826
+ field="priority",
2827
+ action=action,
2828
+ message="Priority must be a string",
2829
+ request_id=request_id,
2830
+ code=ErrorCode.INVALID_FORMAT,
2831
+ )
2832
+ priority = priority.strip().lower()
2833
+ if priority not in _INTAKE_PRIORITY_VALUES:
2834
+ return _validation_error(
2835
+ field="priority",
2836
+ action=action,
2837
+ message=f"Priority must be one of: {', '.join(_INTAKE_PRIORITY_VALUES)}",
2838
+ request_id=request_id,
2839
+ code=ErrorCode.VALIDATION_ERROR,
2840
+ remediation="Use p0 (highest) through p4 (lowest), default is p2",
2841
+ )
2842
+
2843
+ # Validate tags (optional, max 20 items, each 1-32 chars, lowercase pattern)
2844
+ tags = payload.get("tags", [])
2845
+ if tags is None:
2846
+ tags = []
2847
+ if not isinstance(tags, list):
2848
+ return _validation_error(
2849
+ field="tags",
2850
+ action=action,
2851
+ message="Tags must be a list of strings",
2852
+ request_id=request_id,
2853
+ code=ErrorCode.INVALID_FORMAT,
2854
+ )
2855
+ if len(tags) > _INTAKE_TAG_MAX_COUNT:
2856
+ return _validation_error(
2857
+ field="tags",
2858
+ action=action,
2859
+ message=f"Maximum {_INTAKE_TAG_MAX_COUNT} tags allowed",
2860
+ request_id=request_id,
2861
+ code=ErrorCode.VALIDATION_ERROR,
2862
+ )
2863
+ validated_tags = []
2864
+ for i, tag in enumerate(tags):
2865
+ if not isinstance(tag, str):
2866
+ return _validation_error(
2867
+ field=f"tags[{i}]",
2868
+ action=action,
2869
+ message="Each tag must be a string",
2870
+ request_id=request_id,
2871
+ code=ErrorCode.INVALID_FORMAT,
2872
+ )
2873
+ tag = tag.strip().lower()
2874
+ if not tag:
2875
+ continue
2876
+ if len(tag) > _INTAKE_TAG_MAX_LEN:
2877
+ return _validation_error(
2878
+ field=f"tags[{i}]",
2879
+ action=action,
2880
+ message=f"Tag exceeds maximum length of {_INTAKE_TAG_MAX_LEN} characters",
2881
+ request_id=request_id,
2882
+ code=ErrorCode.VALIDATION_ERROR,
2883
+ )
2884
+ if not _TAG_REGEX.match(tag):
2885
+ return _validation_error(
2886
+ field=f"tags[{i}]",
2887
+ action=action,
2888
+ message=f"Tag must match pattern {_INTAKE_TAG_PATTERN} (lowercase alphanumeric, hyphens, underscores)",
2889
+ request_id=request_id,
2890
+ code=ErrorCode.INVALID_FORMAT,
2891
+ )
2892
+ validated_tags.append(tag)
2893
+ tags = validated_tags
2894
+
2895
+ # Validate source (optional, max 100 chars)
2896
+ source = payload.get("source")
2897
+ if source is not None:
2898
+ if not isinstance(source, str):
2899
+ return _validation_error(
2900
+ field="source",
2901
+ action=action,
2902
+ message="Source must be a string",
2903
+ request_id=request_id,
2904
+ code=ErrorCode.INVALID_FORMAT,
2905
+ )
2906
+ source = source.strip() or None
2907
+ if source and len(source) > _INTAKE_SOURCE_MAX_LEN:
2908
+ return _validation_error(
2909
+ field="source",
2910
+ action=action,
2911
+ message=f"Source exceeds maximum length of {_INTAKE_SOURCE_MAX_LEN} characters",
2912
+ request_id=request_id,
2913
+ code=ErrorCode.VALIDATION_ERROR,
2914
+ )
2915
+
2916
+ # Validate requester (optional, max 100 chars)
2917
+ requester = payload.get("requester")
2918
+ if requester is not None:
2919
+ if not isinstance(requester, str):
2920
+ return _validation_error(
2921
+ field="requester",
2922
+ action=action,
2923
+ message="Requester must be a string",
2924
+ request_id=request_id,
2925
+ code=ErrorCode.INVALID_FORMAT,
2926
+ )
2927
+ requester = requester.strip() or None
2928
+ if requester and len(requester) > _INTAKE_REQUESTER_MAX_LEN:
2929
+ return _validation_error(
2930
+ field="requester",
2931
+ action=action,
2932
+ message=f"Requester exceeds maximum length of {_INTAKE_REQUESTER_MAX_LEN} characters",
2933
+ request_id=request_id,
2934
+ code=ErrorCode.VALIDATION_ERROR,
2935
+ )
2936
+
2937
+ # Validate idempotency_key (optional, max 64 chars)
2938
+ idempotency_key = payload.get("idempotency_key")
2939
+ if idempotency_key is not None:
2940
+ if not isinstance(idempotency_key, str):
2941
+ return _validation_error(
2942
+ field="idempotency_key",
2943
+ action=action,
2944
+ message="Idempotency key must be a string",
2945
+ request_id=request_id,
2946
+ code=ErrorCode.INVALID_FORMAT,
2947
+ )
2948
+ idempotency_key = idempotency_key.strip() or None
2949
+ if idempotency_key and len(idempotency_key) > _INTAKE_IDEMPOTENCY_KEY_MAX_LEN:
2950
+ return _validation_error(
2951
+ field="idempotency_key",
2952
+ action=action,
2953
+ message=f"Idempotency key exceeds maximum length of {_INTAKE_IDEMPOTENCY_KEY_MAX_LEN} characters",
2954
+ request_id=request_id,
2955
+ code=ErrorCode.VALIDATION_ERROR,
2956
+ )
2957
+
2958
+ # Validate dry_run
2959
+ dry_run = payload.get("dry_run", False)
2960
+ if not isinstance(dry_run, bool):
2961
+ return _validation_error(
2962
+ field="dry_run",
1092
2963
  action=action,
1093
- message=f"Must be one of: {', '.join(ASSUMPTION_TYPES)}",
2964
+ message="dry_run must be a boolean",
1094
2965
  request_id=request_id,
2966
+ code=ErrorCode.INVALID_FORMAT,
1095
2967
  )
1096
2968
 
2969
+ # Validate path
1097
2970
  path = payload.get("path")
1098
2971
  if path is not None and not isinstance(path, str):
1099
2972
  return _validation_error(
1100
2973
  field="path",
1101
2974
  action=action,
1102
- message="Workspace path must be a string",
2975
+ message="path must be a string",
1103
2976
  request_id=request_id,
2977
+ code=ErrorCode.INVALID_FORMAT,
1104
2978
  )
1105
2979
 
2980
+ # Resolve specs directory
1106
2981
  specs_dir = _resolve_specs_dir(config, path)
1107
2982
  if specs_dir is None:
1108
2983
  return _specs_directory_missing_error(request_id)
1109
2984
 
2985
+ # Audit log
1110
2986
  audit_log(
1111
2987
  "tool_invocation",
1112
2988
  tool="authoring",
1113
2989
  action=action,
1114
- spec_id=spec_id,
1115
- assumption_type=assumption_type,
2990
+ title=title[:100], # Truncate for logging
2991
+ dry_run=dry_run,
1116
2992
  )
1117
2993
 
1118
2994
  metric_key = _metric_name(action)
1119
2995
  start_time = time.perf_counter()
2996
+
1120
2997
  try:
1121
- result, error = list_assumptions(
1122
- spec_id=spec_id,
1123
- assumption_type=assumption_type,
1124
- specs_dir=specs_dir,
2998
+ # Get bikelane_dir from config (allows customization via TOML or env var)
2999
+ bikelane_dir = config.get_bikelane_dir(specs_dir)
3000
+ store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
3001
+ item, was_duplicate, lock_wait_ms = store.add(
3002
+ title=title,
3003
+ description=description,
3004
+ priority=priority,
3005
+ tags=tags,
3006
+ source=source,
3007
+ requester=requester,
3008
+ idempotency_key=idempotency_key,
3009
+ dry_run=dry_run,
3010
+ )
3011
+ except LockAcquisitionError:
3012
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3013
+ _metrics.counter(metric_key, labels={"status": "error"})
3014
+ return asdict(
3015
+ error_response(
3016
+ "Failed to acquire file lock within timeout. Resource is busy.",
3017
+ error_code=ErrorCode.RESOURCE_BUSY,
3018
+ error_type=ErrorType.UNAVAILABLE,
3019
+ remediation="Retry after a moment",
3020
+ request_id=request_id,
3021
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
3022
+ )
1125
3023
  )
1126
- except Exception as exc: # pragma: no cover - defensive guard
1127
- logger.exception("Unexpected error listing assumptions")
3024
+ except Exception as exc:
3025
+ logger.exception("Unexpected error adding intake item")
3026
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
1128
3027
  _metrics.counter(metric_key, labels={"status": "error"})
1129
3028
  return asdict(
1130
3029
  error_response(
1131
- sanitize_error_message(exc, context="authoring"),
3030
+ sanitize_error_message(exc, context="authoring.intake-add"),
1132
3031
  error_code=ErrorCode.INTERNAL_ERROR,
1133
3032
  error_type=ErrorType.INTERNAL,
1134
3033
  remediation="Check logs for details",
1135
3034
  request_id=request_id,
3035
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1136
3036
  )
1137
3037
  )
1138
3038
 
1139
3039
  elapsed_ms = (time.perf_counter() - start_time) * 1000
1140
3040
  _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
3041
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
1141
3042
 
1142
- if error:
3043
+ data = {
3044
+ "item": item.to_dict(),
3045
+ "intake_path": store.intake_path,
3046
+ "was_duplicate": was_duplicate,
3047
+ }
3048
+
3049
+ meta_extra = {}
3050
+ if dry_run:
3051
+ meta_extra["dry_run"] = True
3052
+
3053
+ return asdict(
3054
+ success_response(
3055
+ data=data,
3056
+ telemetry={"duration_ms": round(elapsed_ms, 2), "lock_wait_ms": round(lock_wait_ms, 2)},
3057
+ request_id=request_id,
3058
+ meta=meta_extra,
3059
+ )
3060
+ )
3061
+
3062
+
3063
+ # Intake list constants (from intake.py)
3064
+ _INTAKE_LIST_DEFAULT_LIMIT = 50
3065
+ _INTAKE_LIST_MAX_LIMIT = 200
3066
+
3067
+
3068
+ def _handle_intake_list(*, config: ServerConfig, **payload: Any) -> dict:
3069
+ """List intake items with status='new' in FIFO order with pagination."""
3070
+ request_id = _request_id()
3071
+ action = "intake-list"
3072
+
3073
+ # Check feature flag
3074
+ blocked = _intake_feature_flag_blocked(request_id)
3075
+ if blocked:
3076
+ return blocked
3077
+
3078
+ # Validate limit (optional, default 50, range 1-200)
3079
+ limit = payload.get("limit", _INTAKE_LIST_DEFAULT_LIMIT)
3080
+ if limit is not None:
3081
+ if not isinstance(limit, int):
3082
+ return _validation_error(
3083
+ field="limit",
3084
+ action=action,
3085
+ message="limit must be an integer",
3086
+ request_id=request_id,
3087
+ code=ErrorCode.INVALID_FORMAT,
3088
+ )
3089
+ if limit < 1 or limit > _INTAKE_LIST_MAX_LIMIT:
3090
+ return _validation_error(
3091
+ field="limit",
3092
+ action=action,
3093
+ message=f"limit must be between 1 and {_INTAKE_LIST_MAX_LIMIT}",
3094
+ request_id=request_id,
3095
+ code=ErrorCode.VALIDATION_ERROR,
3096
+ remediation=f"Use a value between 1 and {_INTAKE_LIST_MAX_LIMIT} (default: {_INTAKE_LIST_DEFAULT_LIMIT})",
3097
+ )
3098
+
3099
+ # Validate cursor (optional string)
3100
+ cursor = payload.get("cursor")
3101
+ if cursor is not None:
3102
+ if not isinstance(cursor, str):
3103
+ return _validation_error(
3104
+ field="cursor",
3105
+ action=action,
3106
+ message="cursor must be a string",
3107
+ request_id=request_id,
3108
+ code=ErrorCode.INVALID_FORMAT,
3109
+ )
3110
+ cursor = cursor.strip() or None
3111
+
3112
+ # Validate path (optional workspace override)
3113
+ path = payload.get("path")
3114
+ if path is not None and not isinstance(path, str):
3115
+ return _validation_error(
3116
+ field="path",
3117
+ action=action,
3118
+ message="path must be a string",
3119
+ request_id=request_id,
3120
+ code=ErrorCode.INVALID_FORMAT,
3121
+ )
3122
+
3123
+ # Resolve specs directory
3124
+ specs_dir = _resolve_specs_dir(config, path)
3125
+ if specs_dir is None:
3126
+ return _specs_directory_missing_error(request_id)
3127
+
3128
+ # Audit log
3129
+ audit_log(
3130
+ "tool_invocation",
3131
+ tool="authoring",
3132
+ action=action,
3133
+ limit=limit,
3134
+ has_cursor=cursor is not None,
3135
+ )
3136
+
3137
+ metric_key = _metric_name(action)
3138
+ start_time = time.perf_counter()
3139
+
3140
+ try:
3141
+ # Get bikelane_dir from config (allows customization via TOML or env var)
3142
+ bikelane_dir = config.get_bikelane_dir(specs_dir)
3143
+ store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
3144
+ items, total_count, next_cursor, has_more, lock_wait_ms = store.list_new(
3145
+ cursor=cursor,
3146
+ limit=limit,
3147
+ )
3148
+ except LockAcquisitionError:
3149
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
1143
3150
  _metrics.counter(metric_key, labels={"status": "error"})
1144
- if "not found" in error.lower():
1145
- return asdict(
1146
- error_response(
1147
- f"Specification '{spec_id}' not found",
1148
- error_code=ErrorCode.SPEC_NOT_FOUND,
1149
- error_type=ErrorType.NOT_FOUND,
1150
- remediation='Verify the spec ID via spec(action="list")',
1151
- request_id=request_id,
1152
- )
3151
+ return asdict(
3152
+ error_response(
3153
+ "Failed to acquire file lock within timeout. Resource is busy.",
3154
+ error_code=ErrorCode.RESOURCE_BUSY,
3155
+ error_type=ErrorType.UNAVAILABLE,
3156
+ remediation="Retry after a moment",
3157
+ request_id=request_id,
3158
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1153
3159
  )
3160
+ )
3161
+ except Exception as exc:
3162
+ logger.exception("Unexpected error listing intake items")
3163
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3164
+ _metrics.counter(metric_key, labels={"status": "error"})
1154
3165
  return asdict(
1155
3166
  error_response(
1156
- f"Failed to list assumptions: {error}",
3167
+ sanitize_error_message(exc, context="authoring.intake-list"),
1157
3168
  error_code=ErrorCode.INTERNAL_ERROR,
1158
3169
  error_type=ErrorType.INTERNAL,
1159
- remediation="Check that the spec exists",
3170
+ remediation="Check logs for details",
1160
3171
  request_id=request_id,
3172
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1161
3173
  )
1162
3174
  )
1163
3175
 
1164
- warnings: List[str] = []
1165
- if assumption_type:
1166
- warnings.append(
1167
- "assumption_type filter is advisory only; all assumptions are returned"
1168
- )
1169
-
3176
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3177
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
1170
3178
  _metrics.counter(metric_key, labels={"status": "success"})
3179
+
3180
+ data = {
3181
+ "items": [item.to_dict() for item in items],
3182
+ "total_count": total_count,
3183
+ "intake_path": store.intake_path,
3184
+ }
3185
+
3186
+ # Build pagination metadata
3187
+ pagination = None
3188
+ if has_more or cursor is not None:
3189
+ pagination = {
3190
+ "cursor": next_cursor,
3191
+ "has_more": has_more,
3192
+ "page_size": limit,
3193
+ }
3194
+
1171
3195
  return asdict(
1172
3196
  success_response(
1173
- data=result or {"spec_id": spec_id, "assumptions": [], "total_count": 0},
1174
- warnings=warnings or None,
1175
- telemetry={"duration_ms": round(elapsed_ms, 2)},
3197
+ data=data,
3198
+ pagination=pagination,
3199
+ telemetry={
3200
+ "duration_ms": round(elapsed_ms, 2),
3201
+ "lock_wait_ms": round(lock_wait_ms, 2),
3202
+ },
1176
3203
  request_id=request_id,
1177
3204
  )
1178
3205
  )
1179
3206
 
1180
3207
 
1181
- def _handle_revision_add(*, config: ServerConfig, **payload: Any) -> dict:
3208
+ # Intake dismiss constants
3209
+ _INTAKE_DISMISS_REASON_MAX_LEN = 200
3210
+
3211
+
3212
+ def _handle_intake_dismiss(*, config: ServerConfig, **payload: Any) -> dict:
3213
+ """Dismiss an intake item by changing its status to 'dismissed'."""
1182
3214
  request_id = _request_id()
1183
- action = "revision-add"
3215
+ action = "intake-dismiss"
1184
3216
 
1185
- spec_id = payload.get("spec_id")
1186
- if not isinstance(spec_id, str) or not spec_id.strip():
1187
- return _validation_error(
1188
- field="spec_id",
1189
- action=action,
1190
- message="Provide a non-empty spec_id parameter",
1191
- request_id=request_id,
1192
- code=ErrorCode.MISSING_REQUIRED,
1193
- )
1194
- spec_id = spec_id.strip()
3217
+ # Check feature flag
3218
+ blocked = _intake_feature_flag_blocked(request_id)
3219
+ if blocked:
3220
+ return blocked
1195
3221
 
1196
- version = payload.get("version")
1197
- if not isinstance(version, str) or not version.strip():
3222
+ # Validate intake_id (required, must match pattern)
3223
+ intake_id = payload.get("intake_id")
3224
+ if not isinstance(intake_id, str) or not intake_id.strip():
1198
3225
  return _validation_error(
1199
- field="version",
3226
+ field="intake_id",
1200
3227
  action=action,
1201
- message="Provide the revision version (e.g., 1.1)",
3228
+ message="Provide a valid intake_id",
1202
3229
  request_id=request_id,
1203
3230
  code=ErrorCode.MISSING_REQUIRED,
1204
3231
  )
1205
- version = version.strip()
1206
-
1207
- changes = payload.get("changes")
1208
- if not isinstance(changes, str) or not changes.strip():
3232
+ intake_id = intake_id.strip()
3233
+ if not INTAKE_ID_PATTERN.match(intake_id):
1209
3234
  return _validation_error(
1210
- field="changes",
3235
+ field="intake_id",
1211
3236
  action=action,
1212
- message="Provide a summary of changes",
3237
+ message="intake_id must match pattern intake-<uuid>",
1213
3238
  request_id=request_id,
1214
- code=ErrorCode.MISSING_REQUIRED,
3239
+ code=ErrorCode.INVALID_FORMAT,
3240
+ remediation="Use format: intake-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
1215
3241
  )
1216
- changes = changes.strip()
1217
3242
 
1218
- author = payload.get("author")
1219
- if author is not None and not isinstance(author, str):
1220
- return _validation_error(
1221
- field="author",
1222
- action=action,
1223
- message="Author must be a string",
1224
- request_id=request_id,
1225
- )
3243
+ # Validate reason (optional, max 200 chars)
3244
+ reason = payload.get("reason")
3245
+ if reason is not None:
3246
+ if not isinstance(reason, str):
3247
+ return _validation_error(
3248
+ field="reason",
3249
+ action=action,
3250
+ message="reason must be a string",
3251
+ request_id=request_id,
3252
+ code=ErrorCode.INVALID_FORMAT,
3253
+ )
3254
+ reason = reason.strip() or None
3255
+ if reason and len(reason) > _INTAKE_DISMISS_REASON_MAX_LEN:
3256
+ return _validation_error(
3257
+ field="reason",
3258
+ action=action,
3259
+ message=f"reason exceeds maximum length of {_INTAKE_DISMISS_REASON_MAX_LEN} characters",
3260
+ request_id=request_id,
3261
+ code=ErrorCode.VALIDATION_ERROR,
3262
+ remediation=f"Shorten reason to {_INTAKE_DISMISS_REASON_MAX_LEN} characters or less",
3263
+ )
1226
3264
 
3265
+ # Validate dry_run
1227
3266
  dry_run = payload.get("dry_run", False)
1228
3267
  if not isinstance(dry_run, bool):
1229
3268
  return _validation_error(
1230
3269
  field="dry_run",
1231
3270
  action=action,
1232
- message="Expected a boolean value",
3271
+ message="dry_run must be a boolean",
1233
3272
  request_id=request_id,
3273
+ code=ErrorCode.INVALID_FORMAT,
1234
3274
  )
1235
3275
 
3276
+ # Validate path
1236
3277
  path = payload.get("path")
1237
3278
  if path is not None and not isinstance(path, str):
1238
3279
  return _validation_error(
1239
3280
  field="path",
1240
3281
  action=action,
1241
- message="Workspace path must be a string",
3282
+ message="path must be a string",
1242
3283
  request_id=request_id,
3284
+ code=ErrorCode.INVALID_FORMAT,
1243
3285
  )
1244
3286
 
3287
+ # Resolve specs directory
1245
3288
  specs_dir = _resolve_specs_dir(config, path)
1246
3289
  if specs_dir is None:
1247
3290
  return _specs_directory_missing_error(request_id)
1248
3291
 
3292
+ # Audit log
1249
3293
  audit_log(
1250
3294
  "tool_invocation",
1251
3295
  tool="authoring",
1252
3296
  action=action,
1253
- spec_id=spec_id,
1254
- version=version,
3297
+ intake_id=intake_id,
1255
3298
  dry_run=dry_run,
1256
3299
  )
1257
3300
 
1258
3301
  metric_key = _metric_name(action)
1259
- if dry_run:
1260
- _metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
1261
- data = {
1262
- "spec_id": spec_id,
1263
- "version": version,
1264
- "changes": changes,
1265
- "dry_run": True,
1266
- "note": "Dry run - no changes made",
1267
- }
1268
- if author:
1269
- data["author"] = author
3302
+ start_time = time.perf_counter()
3303
+
3304
+ try:
3305
+ # Get bikelane_dir from config (allows customization via TOML or env var)
3306
+ bikelane_dir = config.get_bikelane_dir(specs_dir)
3307
+ store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
3308
+ item, lock_wait_ms = store.dismiss(
3309
+ intake_id=intake_id,
3310
+ reason=reason,
3311
+ dry_run=dry_run,
3312
+ )
3313
+ except LockAcquisitionError:
3314
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
3315
+ _metrics.counter(metric_key, labels={"status": "error"})
1270
3316
  return asdict(
1271
- success_response(
1272
- data=data,
3317
+ error_response(
3318
+ "Failed to acquire file lock within timeout. Resource is busy.",
3319
+ error_code=ErrorCode.RESOURCE_BUSY,
3320
+ error_type=ErrorType.UNAVAILABLE,
3321
+ remediation="Retry after a moment",
1273
3322
  request_id=request_id,
3323
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1274
3324
  )
1275
3325
  )
1276
-
1277
- start_time = time.perf_counter()
1278
- try:
1279
- result, error = add_revision(
1280
- spec_id=spec_id,
1281
- version=version,
1282
- changelog=changes,
1283
- author=author,
1284
- specs_dir=specs_dir,
1285
- )
1286
- except Exception as exc: # pragma: no cover - defensive guard
1287
- logger.exception("Unexpected error adding revision")
3326
+ except Exception as exc:
3327
+ logger.exception("Unexpected error dismissing intake item")
3328
+ elapsed_ms = (time.perf_counter() - start_time) * 1000
1288
3329
  _metrics.counter(metric_key, labels={"status": "error"})
1289
3330
  return asdict(
1290
3331
  error_response(
1291
- sanitize_error_message(exc, context="authoring"),
3332
+ sanitize_error_message(exc, context="authoring.intake-dismiss"),
1292
3333
  error_code=ErrorCode.INTERNAL_ERROR,
1293
3334
  error_type=ErrorType.INTERNAL,
1294
3335
  remediation="Check logs for details",
1295
3336
  request_id=request_id,
3337
+ telemetry={"duration_ms": round(elapsed_ms, 2)},
1296
3338
  )
1297
3339
  )
1298
3340
 
1299
3341
  elapsed_ms = (time.perf_counter() - start_time) * 1000
1300
- _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
1301
3342
 
1302
- if error:
1303
- _metrics.counter(metric_key, labels={"status": "error"})
1304
- if "not found" in error.lower():
1305
- return asdict(
1306
- error_response(
1307
- f"Specification '{spec_id}' not found",
1308
- error_code=ErrorCode.SPEC_NOT_FOUND,
1309
- error_type=ErrorType.NOT_FOUND,
1310
- remediation='Verify the spec ID via spec(action="list")',
1311
- request_id=request_id,
1312
- )
1313
- )
3343
+ # Handle not found case
3344
+ if item is None:
3345
+ _metrics.counter(metric_key, labels={"status": "not_found"})
1314
3346
  return asdict(
1315
3347
  error_response(
1316
- f"Failed to add revision: {error}",
1317
- error_code=ErrorCode.INTERNAL_ERROR,
1318
- error_type=ErrorType.INTERNAL,
1319
- remediation="Check that the spec exists",
3348
+ f"Intake item not found: {intake_id}",
3349
+ error_code=ErrorCode.NOT_FOUND,
3350
+ error_type=ErrorType.NOT_FOUND,
3351
+ remediation="Verify the intake_id exists using intake-list action",
1320
3352
  request_id=request_id,
3353
+ telemetry={"duration_ms": round(elapsed_ms, 2), "lock_wait_ms": round(lock_wait_ms, 2)},
1321
3354
  )
1322
3355
  )
1323
3356
 
3357
+ _metrics.timer(metric_key + ".duration_ms", elapsed_ms)
3358
+ _metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
3359
+
1324
3360
  data = {
1325
- "spec_id": spec_id,
1326
- "version": version,
1327
- "changes": changes,
1328
- "dry_run": False,
3361
+ "item": item.to_dict(),
3362
+ "intake_path": store.intake_path,
1329
3363
  }
1330
- if author:
1331
- data["author"] = author
1332
- if result and result.get("date"):
1333
- data["date"] = result["date"]
1334
3364
 
1335
- _metrics.counter(metric_key, labels={"status": "success"})
3365
+ meta_extra = {}
3366
+ if dry_run:
3367
+ meta_extra["dry_run"] = True
3368
+
1336
3369
  return asdict(
1337
3370
  success_response(
1338
3371
  data=data,
1339
- telemetry={"duration_ms": round(elapsed_ms, 2)},
3372
+ telemetry={
3373
+ "duration_ms": round(elapsed_ms, 2),
3374
+ "lock_wait_ms": round(lock_wait_ms, 2),
3375
+ },
1340
3376
  request_id=request_id,
3377
+ meta=meta_extra,
1341
3378
  )
1342
3379
  )
1343
3380
 
@@ -1363,12 +3400,48 @@ _AUTHORING_ROUTER = ActionRouter(
1363
3400
  summary=_ACTION_SUMMARY["spec-update-frontmatter"],
1364
3401
  aliases=("spec_update_frontmatter",),
1365
3402
  ),
3403
+ ActionDefinition(
3404
+ name="spec-find-replace",
3405
+ handler=_handle_spec_find_replace,
3406
+ summary=_ACTION_SUMMARY["spec-find-replace"],
3407
+ aliases=("spec_find_replace",),
3408
+ ),
3409
+ ActionDefinition(
3410
+ name="spec-rollback",
3411
+ handler=_handle_spec_rollback,
3412
+ summary=_ACTION_SUMMARY["spec-rollback"],
3413
+ aliases=("spec_rollback",),
3414
+ ),
1366
3415
  ActionDefinition(
1367
3416
  name="phase-add",
1368
3417
  handler=_handle_phase_add,
1369
3418
  summary=_ACTION_SUMMARY["phase-add"],
1370
3419
  aliases=("phase_add",),
1371
3420
  ),
3421
+ ActionDefinition(
3422
+ name="phase-add-bulk",
3423
+ handler=_handle_phase_add_bulk,
3424
+ summary=_ACTION_SUMMARY["phase-add-bulk"],
3425
+ aliases=("phase_add_bulk",),
3426
+ ),
3427
+ ActionDefinition(
3428
+ name="phase-template",
3429
+ handler=_handle_phase_template,
3430
+ summary=_ACTION_SUMMARY["phase-template"],
3431
+ aliases=("phase_template",),
3432
+ ),
3433
+ ActionDefinition(
3434
+ name="phase-move",
3435
+ handler=_handle_phase_move,
3436
+ summary=_ACTION_SUMMARY["phase-move"],
3437
+ aliases=("phase_move",),
3438
+ ),
3439
+ ActionDefinition(
3440
+ name="phase-update-metadata",
3441
+ handler=_handle_phase_update_metadata,
3442
+ summary=_ACTION_SUMMARY["phase-update-metadata"],
3443
+ aliases=("phase_update_metadata",),
3444
+ ),
1372
3445
  ActionDefinition(
1373
3446
  name="phase-remove",
1374
3447
  handler=_handle_phase_remove,
@@ -1393,6 +3466,24 @@ _AUTHORING_ROUTER = ActionRouter(
1393
3466
  summary=_ACTION_SUMMARY["revision-add"],
1394
3467
  aliases=("revision_add",),
1395
3468
  ),
3469
+ ActionDefinition(
3470
+ name="intake-add",
3471
+ handler=_handle_intake_add,
3472
+ summary=_ACTION_SUMMARY["intake-add"],
3473
+ aliases=("intake_add",),
3474
+ ),
3475
+ ActionDefinition(
3476
+ name="intake-list",
3477
+ handler=_handle_intake_list,
3478
+ summary=_ACTION_SUMMARY["intake-list"],
3479
+ aliases=("intake_list",),
3480
+ ),
3481
+ ActionDefinition(
3482
+ name="intake-dismiss",
3483
+ handler=_handle_intake_dismiss,
3484
+ summary=_ACTION_SUMMARY["intake-dismiss"],
3485
+ aliases=("intake_dismiss",),
3486
+ ),
1396
3487
  ],
1397
3488
  )
1398
3489
 
@@ -1430,6 +3521,7 @@ def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
1430
3521
  name: Optional[str] = None,
1431
3522
  template: Optional[str] = None,
1432
3523
  category: Optional[str] = None,
3524
+ mission: Optional[str] = None,
1433
3525
  template_action: Optional[str] = None,
1434
3526
  template_name: Optional[str] = None,
1435
3527
  key: Optional[str] = None,
@@ -1447,8 +3539,23 @@ def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
1447
3539
  author: Optional[str] = None,
1448
3540
  version: Optional[str] = None,
1449
3541
  changes: Optional[str] = None,
3542
+ tasks: Optional[List[Dict[str, Any]]] = None,
3543
+ phase: Optional[Dict[str, Any]] = None,
3544
+ metadata_defaults: Optional[Dict[str, Any]] = None,
1450
3545
  dry_run: bool = False,
1451
3546
  path: Optional[str] = None,
3547
+ # spec-find-replace parameters
3548
+ find: Optional[str] = None,
3549
+ replace: Optional[str] = None,
3550
+ scope: Optional[str] = None,
3551
+ use_regex: bool = False,
3552
+ case_sensitive: bool = True,
3553
+ # intake parameters
3554
+ priority: Optional[str] = None,
3555
+ tags: Optional[List[str]] = None,
3556
+ source: Optional[str] = None,
3557
+ requester: Optional[str] = None,
3558
+ idempotency_key: Optional[str] = None,
1452
3559
  ) -> dict:
1453
3560
  """Execute authoring workflows via the action router."""
1454
3561
 
@@ -1457,6 +3564,7 @@ def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
1457
3564
  "name": name,
1458
3565
  "template": template,
1459
3566
  "category": category,
3567
+ "mission": mission,
1460
3568
  "template_action": template_action,
1461
3569
  "template_name": template_name,
1462
3570
  "key": key,
@@ -1474,8 +3582,23 @@ def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
1474
3582
  "author": author,
1475
3583
  "version": version,
1476
3584
  "changes": changes,
3585
+ "tasks": tasks,
3586
+ "phase": phase,
3587
+ "metadata_defaults": metadata_defaults,
1477
3588
  "dry_run": dry_run,
1478
3589
  "path": path,
3590
+ # spec-find-replace parameters
3591
+ "find": find,
3592
+ "replace": replace,
3593
+ "scope": scope,
3594
+ "use_regex": use_regex,
3595
+ "case_sensitive": case_sensitive,
3596
+ # intake parameters
3597
+ "priority": priority,
3598
+ "tags": tags,
3599
+ "source": source,
3600
+ "requester": requester,
3601
+ "idempotency_key": idempotency_key,
1479
3602
  }
1480
3603
  return _dispatch_authoring_action(action=action, payload=payload, config=config)
1481
3604