foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,3620 @@
|
|
|
1
|
+
"""Unified authoring tool backed by ActionRouter and shared validation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from dataclasses import asdict
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
from mcp.server.fastmcp import FastMCP
|
|
13
|
+
|
|
14
|
+
from foundry_mcp.config import ServerConfig
|
|
15
|
+
from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
|
|
16
|
+
from foundry_mcp.core.intake import IntakeStore, LockAcquisitionError, INTAKE_ID_PATTERN
|
|
17
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
18
|
+
from foundry_mcp.core.observability import audit_log, get_metrics, mcp_tool
|
|
19
|
+
from foundry_mcp.core.responses import (
|
|
20
|
+
ErrorCode,
|
|
21
|
+
ErrorType,
|
|
22
|
+
error_response,
|
|
23
|
+
sanitize_error_message,
|
|
24
|
+
success_response,
|
|
25
|
+
)
|
|
26
|
+
from foundry_mcp.core.spec import (
|
|
27
|
+
CATEGORIES,
|
|
28
|
+
PHASE_TEMPLATES,
|
|
29
|
+
TEMPLATES,
|
|
30
|
+
add_assumption,
|
|
31
|
+
add_phase,
|
|
32
|
+
add_phase_bulk,
|
|
33
|
+
add_revision,
|
|
34
|
+
apply_phase_template,
|
|
35
|
+
create_spec,
|
|
36
|
+
find_replace_in_spec,
|
|
37
|
+
find_specs_directory,
|
|
38
|
+
generate_spec_data,
|
|
39
|
+
get_phase_template_structure,
|
|
40
|
+
list_assumptions,
|
|
41
|
+
load_spec,
|
|
42
|
+
move_phase,
|
|
43
|
+
remove_phase,
|
|
44
|
+
rollback_spec,
|
|
45
|
+
update_frontmatter,
|
|
46
|
+
update_phase_metadata,
|
|
47
|
+
)
|
|
48
|
+
from foundry_mcp.core.validation import validate_spec
|
|
49
|
+
from foundry_mcp.tools.unified.router import (
|
|
50
|
+
ActionDefinition,
|
|
51
|
+
ActionRouter,
|
|
52
|
+
ActionRouterError,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
logger = logging.getLogger(__name__)
|
|
56
|
+
_metrics = get_metrics()
|
|
57
|
+
|
|
58
|
+
_ACTION_SUMMARY = {
|
|
59
|
+
"spec-create": "Scaffold a new SDD specification",
|
|
60
|
+
"spec-template": "List/show/apply spec templates",
|
|
61
|
+
"spec-update-frontmatter": "Update a top-level metadata field",
|
|
62
|
+
"spec-find-replace": "Find and replace text across spec titles and descriptions",
|
|
63
|
+
"spec-rollback": "Restore a spec from a backup timestamp",
|
|
64
|
+
"phase-add": "Add a new phase under spec-root with verification scaffolding",
|
|
65
|
+
"phase-add-bulk": "Add a phase with pre-defined tasks in a single atomic operation",
|
|
66
|
+
"phase-template": "List/show/apply phase templates to add pre-configured phases",
|
|
67
|
+
"phase-move": "Reorder a phase within spec-root children",
|
|
68
|
+
"phase-update-metadata": "Update metadata fields of an existing phase",
|
|
69
|
+
"phase-remove": "Remove an existing phase (and optionally dependents)",
|
|
70
|
+
"assumption-add": "Append an assumption entry to spec metadata",
|
|
71
|
+
"assumption-list": "List recorded assumptions for a spec",
|
|
72
|
+
"revision-add": "Record a revision entry in the spec history",
|
|
73
|
+
"intake-add": "Capture a new work idea in the bikelane intake queue",
|
|
74
|
+
"intake-list": "List new intake items awaiting triage in FIFO order",
|
|
75
|
+
"intake-dismiss": "Dismiss an intake item from the triage queue",
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _metric_name(action: str) -> str:
|
|
80
|
+
return f"authoring.{action.replace('-', '_')}"
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _request_id() -> str:
|
|
84
|
+
return get_correlation_id() or generate_correlation_id(prefix="authoring")
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _validation_error(
|
|
88
|
+
*,
|
|
89
|
+
field: str,
|
|
90
|
+
action: str,
|
|
91
|
+
message: str,
|
|
92
|
+
request_id: str,
|
|
93
|
+
code: ErrorCode = ErrorCode.VALIDATION_ERROR,
|
|
94
|
+
remediation: Optional[str] = None,
|
|
95
|
+
) -> dict:
|
|
96
|
+
return asdict(
|
|
97
|
+
error_response(
|
|
98
|
+
f"Invalid field '{field}' for authoring.{action}: {message}",
|
|
99
|
+
error_code=code,
|
|
100
|
+
error_type=ErrorType.VALIDATION,
|
|
101
|
+
remediation=remediation,
|
|
102
|
+
details={"field": field, "action": f"authoring.{action}"},
|
|
103
|
+
request_id=request_id,
|
|
104
|
+
)
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _specs_directory_missing_error(request_id: str) -> dict:
|
|
109
|
+
return asdict(
|
|
110
|
+
error_response(
|
|
111
|
+
"No specs directory found. Use specs_dir parameter or set SDD_SPECS_DIR.",
|
|
112
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
113
|
+
error_type=ErrorType.NOT_FOUND,
|
|
114
|
+
remediation="Use --specs-dir or set SDD_SPECS_DIR",
|
|
115
|
+
request_id=request_id,
|
|
116
|
+
)
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def _resolve_specs_dir(config: ServerConfig, path: Optional[str]) -> Optional[Path]:
|
|
121
|
+
try:
|
|
122
|
+
if path:
|
|
123
|
+
return find_specs_directory(path)
|
|
124
|
+
return config.specs_dir or find_specs_directory()
|
|
125
|
+
except Exception: # pragma: no cover - defensive guard
|
|
126
|
+
logger.exception("Failed to resolve specs directory", extra={"path": path})
|
|
127
|
+
return None
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def _phase_exists(spec_id: str, specs_dir: Path, title: str) -> bool:
|
|
131
|
+
try:
|
|
132
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
133
|
+
except Exception: # pragma: no cover - defensive guard
|
|
134
|
+
logger.exception(
|
|
135
|
+
"Failed to inspect spec for duplicate phases", extra={"spec_id": spec_id}
|
|
136
|
+
)
|
|
137
|
+
return False
|
|
138
|
+
|
|
139
|
+
if not spec_data:
|
|
140
|
+
return False
|
|
141
|
+
|
|
142
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
143
|
+
if not isinstance(hierarchy, dict):
|
|
144
|
+
return False
|
|
145
|
+
|
|
146
|
+
normalized = title.strip().casefold()
|
|
147
|
+
for node in hierarchy.values():
|
|
148
|
+
if isinstance(node, dict) and node.get("type") == "phase":
|
|
149
|
+
node_title = str(node.get("title", "")).strip().casefold()
|
|
150
|
+
if node_title and node_title == normalized:
|
|
151
|
+
return True
|
|
152
|
+
return False
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _assumption_exists(spec_id: str, specs_dir: Path, text: str) -> bool:
|
|
156
|
+
result, error = list_assumptions(spec_id=spec_id, specs_dir=specs_dir)
|
|
157
|
+
if error or not result:
|
|
158
|
+
return False
|
|
159
|
+
|
|
160
|
+
normalized = text.strip().casefold()
|
|
161
|
+
for entry in result.get("assumptions", []):
|
|
162
|
+
entry_text = str(entry.get("text", "")).strip().casefold()
|
|
163
|
+
if entry_text and entry_text == normalized:
|
|
164
|
+
return True
|
|
165
|
+
return False
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def _handle_spec_create(*, config: ServerConfig, **payload: Any) -> dict:
|
|
169
|
+
request_id = _request_id()
|
|
170
|
+
action = "spec-create"
|
|
171
|
+
|
|
172
|
+
name = payload.get("name")
|
|
173
|
+
if not isinstance(name, str) or not name.strip():
|
|
174
|
+
return _validation_error(
|
|
175
|
+
field="name",
|
|
176
|
+
action=action,
|
|
177
|
+
message="Provide a non-empty specification name",
|
|
178
|
+
request_id=request_id,
|
|
179
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
template = payload.get("template") or "empty"
|
|
183
|
+
if not isinstance(template, str):
|
|
184
|
+
return _validation_error(
|
|
185
|
+
field="template",
|
|
186
|
+
action=action,
|
|
187
|
+
message="template must be a string",
|
|
188
|
+
request_id=request_id,
|
|
189
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
190
|
+
)
|
|
191
|
+
template = template.strip() or "empty"
|
|
192
|
+
if template not in TEMPLATES:
|
|
193
|
+
return _validation_error(
|
|
194
|
+
field="template",
|
|
195
|
+
action=action,
|
|
196
|
+
message=f"Only 'empty' template is supported. Use phase templates to add structure.",
|
|
197
|
+
request_id=request_id,
|
|
198
|
+
remediation="Use template='empty' and add phases via phase-add-bulk or phase-template apply",
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
category = payload.get("category") or "implementation"
|
|
202
|
+
if not isinstance(category, str):
|
|
203
|
+
return _validation_error(
|
|
204
|
+
field="category",
|
|
205
|
+
action=action,
|
|
206
|
+
message="category must be a string",
|
|
207
|
+
request_id=request_id,
|
|
208
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
209
|
+
)
|
|
210
|
+
category = category.strip() or "implementation"
|
|
211
|
+
if category not in CATEGORIES:
|
|
212
|
+
return _validation_error(
|
|
213
|
+
field="category",
|
|
214
|
+
action=action,
|
|
215
|
+
message=f"Category must be one of: {', '.join(CATEGORIES)}",
|
|
216
|
+
request_id=request_id,
|
|
217
|
+
remediation=f"Use one of: {', '.join(CATEGORIES)}",
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
mission = payload.get("mission")
|
|
221
|
+
if mission is not None and not isinstance(mission, str):
|
|
222
|
+
return _validation_error(
|
|
223
|
+
field="mission",
|
|
224
|
+
action=action,
|
|
225
|
+
message="mission must be a string",
|
|
226
|
+
request_id=request_id,
|
|
227
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
dry_run = payload.get("dry_run", False)
|
|
231
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
232
|
+
return _validation_error(
|
|
233
|
+
field="dry_run",
|
|
234
|
+
action=action,
|
|
235
|
+
message="dry_run must be a boolean",
|
|
236
|
+
request_id=request_id,
|
|
237
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
path = payload.get("path")
|
|
241
|
+
if path is not None and not isinstance(path, str):
|
|
242
|
+
return _validation_error(
|
|
243
|
+
field="path",
|
|
244
|
+
action=action,
|
|
245
|
+
message="path must be a string",
|
|
246
|
+
request_id=request_id,
|
|
247
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
251
|
+
if specs_dir is None:
|
|
252
|
+
return _specs_directory_missing_error(request_id)
|
|
253
|
+
|
|
254
|
+
if dry_run:
|
|
255
|
+
# Generate spec data for preflight validation
|
|
256
|
+
spec_data, gen_error = generate_spec_data(
|
|
257
|
+
name=name.strip(),
|
|
258
|
+
template=template,
|
|
259
|
+
category=category,
|
|
260
|
+
mission=mission,
|
|
261
|
+
)
|
|
262
|
+
if gen_error:
|
|
263
|
+
return _validation_error(
|
|
264
|
+
field="spec",
|
|
265
|
+
action=action,
|
|
266
|
+
message=gen_error,
|
|
267
|
+
request_id=request_id,
|
|
268
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
# Run full validation on generated spec
|
|
272
|
+
validation_result = validate_spec(spec_data)
|
|
273
|
+
diagnostics = [
|
|
274
|
+
{
|
|
275
|
+
"code": d.code,
|
|
276
|
+
"message": d.message,
|
|
277
|
+
"severity": d.severity,
|
|
278
|
+
"location": d.location,
|
|
279
|
+
"suggested_fix": d.suggested_fix,
|
|
280
|
+
}
|
|
281
|
+
for d in validation_result.diagnostics
|
|
282
|
+
]
|
|
283
|
+
|
|
284
|
+
return asdict(
|
|
285
|
+
success_response(
|
|
286
|
+
data={
|
|
287
|
+
"name": name.strip(),
|
|
288
|
+
"spec_id": spec_data["spec_id"],
|
|
289
|
+
"template": template,
|
|
290
|
+
"category": category,
|
|
291
|
+
"mission": mission.strip() if isinstance(mission, str) else None,
|
|
292
|
+
"dry_run": True,
|
|
293
|
+
"is_valid": validation_result.is_valid,
|
|
294
|
+
"error_count": validation_result.error_count,
|
|
295
|
+
"warning_count": validation_result.warning_count,
|
|
296
|
+
"diagnostics": diagnostics,
|
|
297
|
+
"note": "Preflight validation complete - no changes made",
|
|
298
|
+
},
|
|
299
|
+
request_id=request_id,
|
|
300
|
+
)
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
start_time = time.perf_counter()
|
|
304
|
+
audit_log(
|
|
305
|
+
"tool_invocation",
|
|
306
|
+
tool="authoring",
|
|
307
|
+
action="spec_create",
|
|
308
|
+
name=name.strip(),
|
|
309
|
+
template=template,
|
|
310
|
+
category=category,
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
result, error = create_spec(
|
|
314
|
+
name=name.strip(),
|
|
315
|
+
template=template,
|
|
316
|
+
category=category,
|
|
317
|
+
mission=mission,
|
|
318
|
+
specs_dir=specs_dir,
|
|
319
|
+
)
|
|
320
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
321
|
+
metric_key = _metric_name(action)
|
|
322
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
323
|
+
|
|
324
|
+
if error:
|
|
325
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
326
|
+
lowered = error.lower()
|
|
327
|
+
if "already exists" in lowered:
|
|
328
|
+
return asdict(
|
|
329
|
+
error_response(
|
|
330
|
+
f"A specification with name '{name.strip()}' already exists",
|
|
331
|
+
error_code=ErrorCode.DUPLICATE_ENTRY,
|
|
332
|
+
error_type=ErrorType.CONFLICT,
|
|
333
|
+
remediation="Use a different name or update the existing spec",
|
|
334
|
+
request_id=request_id,
|
|
335
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
336
|
+
)
|
|
337
|
+
)
|
|
338
|
+
return asdict(
|
|
339
|
+
error_response(
|
|
340
|
+
f"Failed to create specification: {error}",
|
|
341
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
342
|
+
error_type=ErrorType.INTERNAL,
|
|
343
|
+
remediation="Check that the specs directory is writable",
|
|
344
|
+
request_id=request_id,
|
|
345
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
346
|
+
)
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
data: Dict[str, Any] = {
|
|
350
|
+
"spec_id": (result or {}).get("spec_id"),
|
|
351
|
+
"spec_path": (result or {}).get("spec_path"),
|
|
352
|
+
"template": template,
|
|
353
|
+
"category": category,
|
|
354
|
+
"name": name.strip(),
|
|
355
|
+
}
|
|
356
|
+
if result and result.get("structure"):
|
|
357
|
+
data["structure"] = result["structure"]
|
|
358
|
+
|
|
359
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
360
|
+
return asdict(
|
|
361
|
+
success_response(
|
|
362
|
+
data=data,
|
|
363
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
364
|
+
request_id=request_id,
|
|
365
|
+
)
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
def _handle_spec_template(*, config: ServerConfig, **payload: Any) -> dict:
|
|
370
|
+
request_id = _request_id()
|
|
371
|
+
action = "spec-template"
|
|
372
|
+
|
|
373
|
+
template_action = payload.get("template_action")
|
|
374
|
+
if not isinstance(template_action, str) or not template_action.strip():
|
|
375
|
+
return _validation_error(
|
|
376
|
+
field="template_action",
|
|
377
|
+
action=action,
|
|
378
|
+
message="Provide one of: list, show, apply",
|
|
379
|
+
request_id=request_id,
|
|
380
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
381
|
+
)
|
|
382
|
+
template_action = template_action.strip().lower()
|
|
383
|
+
if template_action not in ("list", "show", "apply"):
|
|
384
|
+
return _validation_error(
|
|
385
|
+
field="template_action",
|
|
386
|
+
action=action,
|
|
387
|
+
message="template_action must be one of: list, show, apply",
|
|
388
|
+
request_id=request_id,
|
|
389
|
+
remediation="Use list, show, or apply",
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
template_name = payload.get("template_name")
|
|
393
|
+
if template_action in ("show", "apply"):
|
|
394
|
+
if not isinstance(template_name, str) or not template_name.strip():
|
|
395
|
+
return _validation_error(
|
|
396
|
+
field="template_name",
|
|
397
|
+
action=action,
|
|
398
|
+
message="Provide a template name",
|
|
399
|
+
request_id=request_id,
|
|
400
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
401
|
+
)
|
|
402
|
+
template_name = template_name.strip()
|
|
403
|
+
if template_name not in TEMPLATES:
|
|
404
|
+
return asdict(
|
|
405
|
+
error_response(
|
|
406
|
+
f"Template '{template_name}' not found",
|
|
407
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
408
|
+
error_type=ErrorType.NOT_FOUND,
|
|
409
|
+
remediation=f"Use template_action='list' to see available templates. Valid: {', '.join(TEMPLATES)}",
|
|
410
|
+
request_id=request_id,
|
|
411
|
+
)
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
data: Dict[str, Any] = {"action": template_action}
|
|
415
|
+
if template_action == "list":
|
|
416
|
+
data["templates"] = [
|
|
417
|
+
{
|
|
418
|
+
"name": "empty",
|
|
419
|
+
"description": "Blank spec with no phases - use phase templates to add structure",
|
|
420
|
+
},
|
|
421
|
+
]
|
|
422
|
+
data["phase_templates"] = [
|
|
423
|
+
{"name": t, "description": f"Add {t} phase structure"}
|
|
424
|
+
for t in PHASE_TEMPLATES
|
|
425
|
+
]
|
|
426
|
+
data["total_count"] = 1
|
|
427
|
+
data["message"] = "Use 'empty' template, then add phases via phase-add-bulk or phase-template apply"
|
|
428
|
+
elif template_action == "show":
|
|
429
|
+
data["template_name"] = template_name
|
|
430
|
+
data["content"] = {
|
|
431
|
+
"name": template_name,
|
|
432
|
+
"description": "Blank spec with no phases",
|
|
433
|
+
"usage": "Use authoring(action='spec-create', name='your-spec') to create, then add phases",
|
|
434
|
+
"phase_templates": list(PHASE_TEMPLATES),
|
|
435
|
+
}
|
|
436
|
+
else:
|
|
437
|
+
data["template_name"] = template_name
|
|
438
|
+
data["generated"] = {
|
|
439
|
+
"template": template_name,
|
|
440
|
+
"message": "Use spec-create to create an empty spec, then add phases",
|
|
441
|
+
}
|
|
442
|
+
data["instructions"] = (
|
|
443
|
+
"1. Create spec: authoring(action='spec-create', name='your-spec-name')\n"
|
|
444
|
+
"2. Add phases: authoring(action='phase-template', template_action='apply', "
|
|
445
|
+
"template_name='planning', spec_id='...')"
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
return asdict(success_response(data=data, request_id=request_id))
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def _handle_spec_update_frontmatter(*, config: ServerConfig, **payload: Any) -> dict:
|
|
452
|
+
request_id = _request_id()
|
|
453
|
+
action = "spec-update-frontmatter"
|
|
454
|
+
|
|
455
|
+
spec_id = payload.get("spec_id")
|
|
456
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
457
|
+
return _validation_error(
|
|
458
|
+
field="spec_id",
|
|
459
|
+
action=action,
|
|
460
|
+
message="Provide a non-empty spec identifier",
|
|
461
|
+
request_id=request_id,
|
|
462
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
key = payload.get("key")
|
|
466
|
+
if not isinstance(key, str) or not key.strip():
|
|
467
|
+
return _validation_error(
|
|
468
|
+
field="key",
|
|
469
|
+
action=action,
|
|
470
|
+
message="Provide a non-empty metadata key",
|
|
471
|
+
request_id=request_id,
|
|
472
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
value = payload.get("value")
|
|
476
|
+
if value is None:
|
|
477
|
+
return _validation_error(
|
|
478
|
+
field="value",
|
|
479
|
+
action=action,
|
|
480
|
+
message="Provide a value",
|
|
481
|
+
request_id=request_id,
|
|
482
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
dry_run = payload.get("dry_run", False)
|
|
486
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
487
|
+
return _validation_error(
|
|
488
|
+
field="dry_run",
|
|
489
|
+
action=action,
|
|
490
|
+
message="dry_run must be a boolean",
|
|
491
|
+
request_id=request_id,
|
|
492
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
path = payload.get("path")
|
|
496
|
+
if path is not None and not isinstance(path, str):
|
|
497
|
+
return _validation_error(
|
|
498
|
+
field="path",
|
|
499
|
+
action=action,
|
|
500
|
+
message="path must be a string",
|
|
501
|
+
request_id=request_id,
|
|
502
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
506
|
+
if specs_dir is None:
|
|
507
|
+
return _specs_directory_missing_error(request_id)
|
|
508
|
+
|
|
509
|
+
if dry_run:
|
|
510
|
+
return asdict(
|
|
511
|
+
success_response(
|
|
512
|
+
data={
|
|
513
|
+
"spec_id": spec_id.strip(),
|
|
514
|
+
"key": key.strip(),
|
|
515
|
+
"value": value,
|
|
516
|
+
"dry_run": True,
|
|
517
|
+
"note": "Dry run - no changes made",
|
|
518
|
+
},
|
|
519
|
+
request_id=request_id,
|
|
520
|
+
)
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
start_time = time.perf_counter()
|
|
524
|
+
result, error = update_frontmatter(
|
|
525
|
+
spec_id=spec_id.strip(),
|
|
526
|
+
key=key.strip(),
|
|
527
|
+
value=value,
|
|
528
|
+
specs_dir=specs_dir,
|
|
529
|
+
)
|
|
530
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
531
|
+
metric_key = _metric_name(action)
|
|
532
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
533
|
+
|
|
534
|
+
if error or not result:
|
|
535
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
536
|
+
lowered = (error or "").lower()
|
|
537
|
+
if "not found" in lowered and "spec" in lowered:
|
|
538
|
+
return asdict(
|
|
539
|
+
error_response(
|
|
540
|
+
f"Specification '{spec_id.strip()}' not found",
|
|
541
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
542
|
+
error_type=ErrorType.NOT_FOUND,
|
|
543
|
+
remediation='Verify the spec ID exists using spec(action="list")',
|
|
544
|
+
request_id=request_id,
|
|
545
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
546
|
+
)
|
|
547
|
+
)
|
|
548
|
+
if "use dedicated" in lowered:
|
|
549
|
+
return asdict(
|
|
550
|
+
error_response(
|
|
551
|
+
error or "Invalid metadata key",
|
|
552
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
553
|
+
error_type=ErrorType.VALIDATION,
|
|
554
|
+
remediation="Use authoring(action='assumption-add') or authoring(action='revision-add') for list fields",
|
|
555
|
+
request_id=request_id,
|
|
556
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
557
|
+
)
|
|
558
|
+
)
|
|
559
|
+
return asdict(
|
|
560
|
+
error_response(
|
|
561
|
+
error or "Failed to update frontmatter",
|
|
562
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
563
|
+
error_type=ErrorType.VALIDATION,
|
|
564
|
+
remediation="Provide a valid key and value",
|
|
565
|
+
request_id=request_id,
|
|
566
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
567
|
+
)
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
571
|
+
return asdict(
|
|
572
|
+
success_response(
|
|
573
|
+
data=result,
|
|
574
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
575
|
+
request_id=request_id,
|
|
576
|
+
)
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
|
|
580
|
+
# Valid scopes for find-replace
|
|
581
|
+
_FIND_REPLACE_SCOPES = {"all", "titles", "descriptions"}
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
def _handle_spec_find_replace(*, config: ServerConfig, **payload: Any) -> dict:
|
|
585
|
+
"""Find and replace text across spec hierarchy nodes.
|
|
586
|
+
|
|
587
|
+
Supports literal or regex find/replace across titles and/or descriptions.
|
|
588
|
+
Returns a preview in dry_run mode, or applies changes and returns a summary.
|
|
589
|
+
"""
|
|
590
|
+
request_id = _request_id()
|
|
591
|
+
action = "spec-find-replace"
|
|
592
|
+
|
|
593
|
+
# Required: spec_id
|
|
594
|
+
spec_id = payload.get("spec_id")
|
|
595
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
596
|
+
return _validation_error(
|
|
597
|
+
field="spec_id",
|
|
598
|
+
action=action,
|
|
599
|
+
message="Provide a non-empty spec_id parameter",
|
|
600
|
+
request_id=request_id,
|
|
601
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
602
|
+
remediation="Pass the spec identifier to authoring",
|
|
603
|
+
)
|
|
604
|
+
spec_id = spec_id.strip()
|
|
605
|
+
|
|
606
|
+
# Required: find
|
|
607
|
+
find = payload.get("find")
|
|
608
|
+
if not isinstance(find, str) or not find:
|
|
609
|
+
return _validation_error(
|
|
610
|
+
field="find",
|
|
611
|
+
action=action,
|
|
612
|
+
message="Provide a non-empty find pattern",
|
|
613
|
+
request_id=request_id,
|
|
614
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
615
|
+
remediation="Specify the text or regex pattern to find",
|
|
616
|
+
)
|
|
617
|
+
|
|
618
|
+
# Required: replace (can be empty string to delete matches)
|
|
619
|
+
replace = payload.get("replace")
|
|
620
|
+
if replace is None:
|
|
621
|
+
return _validation_error(
|
|
622
|
+
field="replace",
|
|
623
|
+
action=action,
|
|
624
|
+
message="Provide a replace value (use empty string to delete matches)",
|
|
625
|
+
request_id=request_id,
|
|
626
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
627
|
+
remediation="Provide a replacement string (use empty string to delete)",
|
|
628
|
+
)
|
|
629
|
+
if not isinstance(replace, str):
|
|
630
|
+
return _validation_error(
|
|
631
|
+
field="replace",
|
|
632
|
+
action=action,
|
|
633
|
+
message="replace must be a string",
|
|
634
|
+
request_id=request_id,
|
|
635
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
636
|
+
remediation="Provide a string value for replace parameter",
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
# Optional: scope (default: "all")
|
|
640
|
+
scope = payload.get("scope", "all")
|
|
641
|
+
if not isinstance(scope, str) or scope not in _FIND_REPLACE_SCOPES:
|
|
642
|
+
return _validation_error(
|
|
643
|
+
field="scope",
|
|
644
|
+
action=action,
|
|
645
|
+
message=f"scope must be one of: {sorted(_FIND_REPLACE_SCOPES)}",
|
|
646
|
+
request_id=request_id,
|
|
647
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
648
|
+
remediation=f"Use one of: {sorted(_FIND_REPLACE_SCOPES)}",
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
# Optional: use_regex (default: False)
|
|
652
|
+
use_regex = payload.get("use_regex", False)
|
|
653
|
+
if not isinstance(use_regex, bool):
|
|
654
|
+
return _validation_error(
|
|
655
|
+
field="use_regex",
|
|
656
|
+
action=action,
|
|
657
|
+
message="use_regex must be a boolean",
|
|
658
|
+
request_id=request_id,
|
|
659
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
660
|
+
remediation="Set use_regex to true or false",
|
|
661
|
+
)
|
|
662
|
+
|
|
663
|
+
# Optional: case_sensitive (default: True)
|
|
664
|
+
case_sensitive = payload.get("case_sensitive", True)
|
|
665
|
+
if not isinstance(case_sensitive, bool):
|
|
666
|
+
return _validation_error(
|
|
667
|
+
field="case_sensitive",
|
|
668
|
+
action=action,
|
|
669
|
+
message="case_sensitive must be a boolean",
|
|
670
|
+
request_id=request_id,
|
|
671
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
672
|
+
remediation="Set case_sensitive to true or false",
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
# Optional: dry_run (default: False)
|
|
676
|
+
dry_run = payload.get("dry_run", False)
|
|
677
|
+
if not isinstance(dry_run, bool):
|
|
678
|
+
return _validation_error(
|
|
679
|
+
field="dry_run",
|
|
680
|
+
action=action,
|
|
681
|
+
message="dry_run must be a boolean",
|
|
682
|
+
request_id=request_id,
|
|
683
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
684
|
+
remediation="Set dry_run to true or false",
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
# Optional: path (workspace)
|
|
688
|
+
path = payload.get("path")
|
|
689
|
+
if path is not None and not isinstance(path, str):
|
|
690
|
+
return _validation_error(
|
|
691
|
+
field="path",
|
|
692
|
+
action=action,
|
|
693
|
+
message="Workspace path must be a string",
|
|
694
|
+
request_id=request_id,
|
|
695
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
696
|
+
)
|
|
697
|
+
|
|
698
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
699
|
+
if specs_dir is None:
|
|
700
|
+
return _specs_directory_missing_error(request_id)
|
|
701
|
+
|
|
702
|
+
audit_log(
|
|
703
|
+
"tool_invocation",
|
|
704
|
+
tool="authoring",
|
|
705
|
+
action=action,
|
|
706
|
+
spec_id=spec_id,
|
|
707
|
+
find=find[:50] + "..." if len(find) > 50 else find,
|
|
708
|
+
use_regex=use_regex,
|
|
709
|
+
dry_run=dry_run,
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
metric_key = _metric_name(action)
|
|
713
|
+
start_time = time.perf_counter()
|
|
714
|
+
|
|
715
|
+
try:
|
|
716
|
+
result, error = find_replace_in_spec(
|
|
717
|
+
spec_id,
|
|
718
|
+
find,
|
|
719
|
+
replace,
|
|
720
|
+
scope=scope,
|
|
721
|
+
use_regex=use_regex,
|
|
722
|
+
case_sensitive=case_sensitive,
|
|
723
|
+
dry_run=dry_run,
|
|
724
|
+
specs_dir=specs_dir,
|
|
725
|
+
)
|
|
726
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
727
|
+
logger.exception("Unexpected error in spec find-replace")
|
|
728
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
729
|
+
return asdict(
|
|
730
|
+
error_response(
|
|
731
|
+
sanitize_error_message(exc, context="authoring"),
|
|
732
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
733
|
+
error_type=ErrorType.INTERNAL,
|
|
734
|
+
remediation="Check logs for details",
|
|
735
|
+
request_id=request_id,
|
|
736
|
+
)
|
|
737
|
+
)
|
|
738
|
+
|
|
739
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
740
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
741
|
+
|
|
742
|
+
if error:
|
|
743
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
744
|
+
# Map error types
|
|
745
|
+
if "not found" in error.lower():
|
|
746
|
+
return asdict(
|
|
747
|
+
error_response(
|
|
748
|
+
error,
|
|
749
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
750
|
+
error_type=ErrorType.NOT_FOUND,
|
|
751
|
+
remediation="Check spec_id value",
|
|
752
|
+
request_id=request_id,
|
|
753
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
754
|
+
)
|
|
755
|
+
)
|
|
756
|
+
if "invalid regex" in error.lower():
|
|
757
|
+
return asdict(
|
|
758
|
+
error_response(
|
|
759
|
+
error,
|
|
760
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
761
|
+
error_type=ErrorType.VALIDATION,
|
|
762
|
+
remediation="Check regex syntax",
|
|
763
|
+
request_id=request_id,
|
|
764
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
765
|
+
)
|
|
766
|
+
)
|
|
767
|
+
return asdict(
|
|
768
|
+
error_response(
|
|
769
|
+
error,
|
|
770
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
771
|
+
error_type=ErrorType.VALIDATION,
|
|
772
|
+
remediation="Check find and replace parameters",
|
|
773
|
+
request_id=request_id,
|
|
774
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
775
|
+
)
|
|
776
|
+
)
|
|
777
|
+
|
|
778
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
|
|
779
|
+
return asdict(
|
|
780
|
+
success_response(
|
|
781
|
+
data=result,
|
|
782
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
783
|
+
request_id=request_id,
|
|
784
|
+
)
|
|
785
|
+
)
|
|
786
|
+
|
|
787
|
+
|
|
788
|
+
def _handle_spec_rollback(*, config: ServerConfig, **payload: Any) -> dict:
|
|
789
|
+
"""Restore a spec from a backup timestamp."""
|
|
790
|
+
request_id = _request_id()
|
|
791
|
+
action = "spec-rollback"
|
|
792
|
+
|
|
793
|
+
spec_id = payload.get("spec_id")
|
|
794
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
795
|
+
return _validation_error(
|
|
796
|
+
field="spec_id",
|
|
797
|
+
action=action,
|
|
798
|
+
message="Provide a non-empty spec_id parameter",
|
|
799
|
+
request_id=request_id,
|
|
800
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
801
|
+
)
|
|
802
|
+
spec_id = spec_id.strip()
|
|
803
|
+
|
|
804
|
+
timestamp = payload.get("version") # Use 'version' parameter for timestamp
|
|
805
|
+
if not isinstance(timestamp, str) or not timestamp.strip():
|
|
806
|
+
return _validation_error(
|
|
807
|
+
field="version",
|
|
808
|
+
action=action,
|
|
809
|
+
message="Provide the backup timestamp to restore (use spec history to list)",
|
|
810
|
+
request_id=request_id,
|
|
811
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
812
|
+
)
|
|
813
|
+
timestamp = timestamp.strip()
|
|
814
|
+
|
|
815
|
+
dry_run = payload.get("dry_run", False)
|
|
816
|
+
if not isinstance(dry_run, bool):
|
|
817
|
+
return _validation_error(
|
|
818
|
+
field="dry_run",
|
|
819
|
+
action=action,
|
|
820
|
+
message="Expected a boolean value",
|
|
821
|
+
request_id=request_id,
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
path = payload.get("path")
|
|
825
|
+
if path is not None and not isinstance(path, str):
|
|
826
|
+
return _validation_error(
|
|
827
|
+
field="path",
|
|
828
|
+
action=action,
|
|
829
|
+
message="Workspace path must be a string",
|
|
830
|
+
request_id=request_id,
|
|
831
|
+
)
|
|
832
|
+
|
|
833
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
834
|
+
if specs_dir is None:
|
|
835
|
+
return _specs_directory_missing_error(request_id)
|
|
836
|
+
|
|
837
|
+
audit_log(
|
|
838
|
+
"tool_invocation",
|
|
839
|
+
tool="authoring",
|
|
840
|
+
action=action,
|
|
841
|
+
spec_id=spec_id,
|
|
842
|
+
timestamp=timestamp,
|
|
843
|
+
dry_run=dry_run,
|
|
844
|
+
)
|
|
845
|
+
|
|
846
|
+
metric_key = _metric_name(action)
|
|
847
|
+
start_time = time.perf_counter()
|
|
848
|
+
|
|
849
|
+
result = rollback_spec(
|
|
850
|
+
spec_id=spec_id,
|
|
851
|
+
timestamp=timestamp,
|
|
852
|
+
specs_dir=specs_dir,
|
|
853
|
+
dry_run=dry_run,
|
|
854
|
+
create_backup=True,
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
858
|
+
|
|
859
|
+
if not result.get("success"):
|
|
860
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
861
|
+
error_msg = result.get("error", "Unknown error during rollback")
|
|
862
|
+
|
|
863
|
+
# Determine error code based on error message
|
|
864
|
+
if "not found" in error_msg.lower():
|
|
865
|
+
error_code = ErrorCode.NOT_FOUND
|
|
866
|
+
error_type = ErrorType.NOT_FOUND
|
|
867
|
+
remediation = "Use spec(action='history') to list available backups"
|
|
868
|
+
else:
|
|
869
|
+
error_code = ErrorCode.INTERNAL_ERROR
|
|
870
|
+
error_type = ErrorType.INTERNAL
|
|
871
|
+
remediation = "Check spec and backup file permissions"
|
|
872
|
+
|
|
873
|
+
return asdict(
|
|
874
|
+
error_response(
|
|
875
|
+
error_msg,
|
|
876
|
+
error_code=error_code,
|
|
877
|
+
error_type=error_type,
|
|
878
|
+
remediation=remediation,
|
|
879
|
+
request_id=request_id,
|
|
880
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
881
|
+
)
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
|
|
885
|
+
return asdict(
|
|
886
|
+
success_response(
|
|
887
|
+
spec_id=spec_id,
|
|
888
|
+
timestamp=timestamp,
|
|
889
|
+
dry_run=dry_run,
|
|
890
|
+
restored_from=result.get("restored_from"),
|
|
891
|
+
backup_created=result.get("backup_created"),
|
|
892
|
+
request_id=request_id,
|
|
893
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
894
|
+
)
|
|
895
|
+
)
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
def _handle_phase_add(*, config: ServerConfig, **payload: Any) -> dict:
|
|
899
|
+
request_id = _request_id()
|
|
900
|
+
action = "phase-add"
|
|
901
|
+
|
|
902
|
+
spec_id = payload.get("spec_id")
|
|
903
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
904
|
+
return _validation_error(
|
|
905
|
+
field="spec_id",
|
|
906
|
+
action=action,
|
|
907
|
+
message="Provide a non-empty spec_id parameter",
|
|
908
|
+
remediation="Pass the spec identifier to authoring",
|
|
909
|
+
request_id=request_id,
|
|
910
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
911
|
+
)
|
|
912
|
+
spec_id = spec_id.strip()
|
|
913
|
+
|
|
914
|
+
title = payload.get("title")
|
|
915
|
+
if not isinstance(title, str) or not title.strip():
|
|
916
|
+
return _validation_error(
|
|
917
|
+
field="title",
|
|
918
|
+
action=action,
|
|
919
|
+
message="Provide a non-empty phase title",
|
|
920
|
+
remediation="Include a descriptive title for the new phase",
|
|
921
|
+
request_id=request_id,
|
|
922
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
923
|
+
)
|
|
924
|
+
title = title.strip()
|
|
925
|
+
|
|
926
|
+
description = payload.get("description")
|
|
927
|
+
if description is not None and not isinstance(description, str):
|
|
928
|
+
return _validation_error(
|
|
929
|
+
field="description",
|
|
930
|
+
action=action,
|
|
931
|
+
message="Description must be a string",
|
|
932
|
+
request_id=request_id,
|
|
933
|
+
)
|
|
934
|
+
purpose = payload.get("purpose")
|
|
935
|
+
if purpose is not None and not isinstance(purpose, str):
|
|
936
|
+
return _validation_error(
|
|
937
|
+
field="purpose",
|
|
938
|
+
action=action,
|
|
939
|
+
message="Purpose must be a string",
|
|
940
|
+
request_id=request_id,
|
|
941
|
+
)
|
|
942
|
+
|
|
943
|
+
estimated_hours = payload.get("estimated_hours")
|
|
944
|
+
if estimated_hours is not None:
|
|
945
|
+
if isinstance(estimated_hours, bool) or not isinstance(
|
|
946
|
+
estimated_hours, (int, float)
|
|
947
|
+
):
|
|
948
|
+
return _validation_error(
|
|
949
|
+
field="estimated_hours",
|
|
950
|
+
action=action,
|
|
951
|
+
message="Provide a numeric value",
|
|
952
|
+
request_id=request_id,
|
|
953
|
+
)
|
|
954
|
+
if estimated_hours < 0:
|
|
955
|
+
return _validation_error(
|
|
956
|
+
field="estimated_hours",
|
|
957
|
+
action=action,
|
|
958
|
+
message="Value must be non-negative",
|
|
959
|
+
remediation="Set hours to zero or greater",
|
|
960
|
+
request_id=request_id,
|
|
961
|
+
)
|
|
962
|
+
estimated_hours = float(estimated_hours)
|
|
963
|
+
|
|
964
|
+
position = payload.get("position")
|
|
965
|
+
if position is not None:
|
|
966
|
+
if isinstance(position, bool) or not isinstance(position, int):
|
|
967
|
+
return _validation_error(
|
|
968
|
+
field="position",
|
|
969
|
+
action=action,
|
|
970
|
+
message="Position must be an integer",
|
|
971
|
+
request_id=request_id,
|
|
972
|
+
)
|
|
973
|
+
if position < 0:
|
|
974
|
+
return _validation_error(
|
|
975
|
+
field="position",
|
|
976
|
+
action=action,
|
|
977
|
+
message="Position must be >= 0",
|
|
978
|
+
request_id=request_id,
|
|
979
|
+
)
|
|
980
|
+
|
|
981
|
+
link_previous = payload.get("link_previous", True)
|
|
982
|
+
if not isinstance(link_previous, bool):
|
|
983
|
+
return _validation_error(
|
|
984
|
+
field="link_previous",
|
|
985
|
+
action=action,
|
|
986
|
+
message="Expected a boolean value",
|
|
987
|
+
request_id=request_id,
|
|
988
|
+
)
|
|
989
|
+
|
|
990
|
+
dry_run = payload.get("dry_run", False)
|
|
991
|
+
if not isinstance(dry_run, bool):
|
|
992
|
+
return _validation_error(
|
|
993
|
+
field="dry_run",
|
|
994
|
+
action=action,
|
|
995
|
+
message="Expected a boolean value",
|
|
996
|
+
request_id=request_id,
|
|
997
|
+
)
|
|
998
|
+
|
|
999
|
+
path = payload.get("path")
|
|
1000
|
+
if path is not None and not isinstance(path, str):
|
|
1001
|
+
return _validation_error(
|
|
1002
|
+
field="path",
|
|
1003
|
+
action=action,
|
|
1004
|
+
message="Workspace path must be a string",
|
|
1005
|
+
request_id=request_id,
|
|
1006
|
+
)
|
|
1007
|
+
|
|
1008
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
1009
|
+
if specs_dir is None:
|
|
1010
|
+
return _specs_directory_missing_error(request_id)
|
|
1011
|
+
|
|
1012
|
+
warnings: List[str] = []
|
|
1013
|
+
if _phase_exists(spec_id, specs_dir, title):
|
|
1014
|
+
warnings.append(
|
|
1015
|
+
f"Phase titled '{title}' already exists; the new phase will still be added"
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
audit_log(
|
|
1019
|
+
"tool_invocation",
|
|
1020
|
+
tool="authoring",
|
|
1021
|
+
action=action,
|
|
1022
|
+
spec_id=spec_id,
|
|
1023
|
+
title=title,
|
|
1024
|
+
dry_run=dry_run,
|
|
1025
|
+
link_previous=link_previous,
|
|
1026
|
+
)
|
|
1027
|
+
|
|
1028
|
+
metric_key = _metric_name(action)
|
|
1029
|
+
|
|
1030
|
+
if dry_run:
|
|
1031
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
|
|
1032
|
+
return asdict(
|
|
1033
|
+
success_response(
|
|
1034
|
+
data={
|
|
1035
|
+
"spec_id": spec_id,
|
|
1036
|
+
"phase_id": "(preview)",
|
|
1037
|
+
"title": title,
|
|
1038
|
+
"dry_run": True,
|
|
1039
|
+
"note": "Dry run - no changes made",
|
|
1040
|
+
},
|
|
1041
|
+
warnings=warnings or None,
|
|
1042
|
+
request_id=request_id,
|
|
1043
|
+
)
|
|
1044
|
+
)
|
|
1045
|
+
|
|
1046
|
+
start_time = time.perf_counter()
|
|
1047
|
+
try:
|
|
1048
|
+
result, error = add_phase(
|
|
1049
|
+
spec_id=spec_id,
|
|
1050
|
+
title=title,
|
|
1051
|
+
description=description,
|
|
1052
|
+
purpose=purpose,
|
|
1053
|
+
estimated_hours=estimated_hours,
|
|
1054
|
+
position=position,
|
|
1055
|
+
link_previous=link_previous,
|
|
1056
|
+
specs_dir=specs_dir,
|
|
1057
|
+
)
|
|
1058
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
1059
|
+
logger.exception("Unexpected error adding phase")
|
|
1060
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1061
|
+
return asdict(
|
|
1062
|
+
error_response(
|
|
1063
|
+
sanitize_error_message(exc, context="authoring"),
|
|
1064
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1065
|
+
error_type=ErrorType.INTERNAL,
|
|
1066
|
+
remediation="Check logs for details",
|
|
1067
|
+
request_id=request_id,
|
|
1068
|
+
)
|
|
1069
|
+
)
|
|
1070
|
+
|
|
1071
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
1072
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
1073
|
+
|
|
1074
|
+
if error:
|
|
1075
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1076
|
+
lowered = error.lower()
|
|
1077
|
+
if "specification" in lowered and "not found" in lowered:
|
|
1078
|
+
return asdict(
|
|
1079
|
+
error_response(
|
|
1080
|
+
f"Specification '{spec_id}' not found",
|
|
1081
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
1082
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1083
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
1084
|
+
request_id=request_id,
|
|
1085
|
+
)
|
|
1086
|
+
)
|
|
1087
|
+
return asdict(
|
|
1088
|
+
error_response(
|
|
1089
|
+
f"Failed to add phase: {error}",
|
|
1090
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1091
|
+
error_type=ErrorType.INTERNAL,
|
|
1092
|
+
remediation="Check input values and retry",
|
|
1093
|
+
request_id=request_id,
|
|
1094
|
+
)
|
|
1095
|
+
)
|
|
1096
|
+
|
|
1097
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
1098
|
+
return asdict(
|
|
1099
|
+
success_response(
|
|
1100
|
+
data={"spec_id": spec_id, "dry_run": False, **(result or {})},
|
|
1101
|
+
warnings=warnings or None,
|
|
1102
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1103
|
+
request_id=request_id,
|
|
1104
|
+
)
|
|
1105
|
+
)
|
|
1106
|
+
|
|
1107
|
+
|
|
1108
|
+
def _handle_phase_update_metadata(*, config: ServerConfig, **payload: Any) -> dict:
|
|
1109
|
+
"""Update metadata fields of an existing phase."""
|
|
1110
|
+
request_id = _request_id()
|
|
1111
|
+
action = "phase-update-metadata"
|
|
1112
|
+
|
|
1113
|
+
spec_id = payload.get("spec_id")
|
|
1114
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1115
|
+
return _validation_error(
|
|
1116
|
+
field="spec_id",
|
|
1117
|
+
action=action,
|
|
1118
|
+
message="Provide a non-empty spec_id parameter",
|
|
1119
|
+
remediation="Pass the spec identifier to authoring",
|
|
1120
|
+
request_id=request_id,
|
|
1121
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1122
|
+
)
|
|
1123
|
+
spec_id = spec_id.strip()
|
|
1124
|
+
|
|
1125
|
+
phase_id = payload.get("phase_id")
|
|
1126
|
+
if not isinstance(phase_id, str) or not phase_id.strip():
|
|
1127
|
+
return _validation_error(
|
|
1128
|
+
field="phase_id",
|
|
1129
|
+
action=action,
|
|
1130
|
+
message="Provide a non-empty phase_id parameter",
|
|
1131
|
+
remediation="Pass the phase identifier (e.g., 'phase-1')",
|
|
1132
|
+
request_id=request_id,
|
|
1133
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1134
|
+
)
|
|
1135
|
+
phase_id = phase_id.strip()
|
|
1136
|
+
|
|
1137
|
+
# Extract optional metadata fields
|
|
1138
|
+
estimated_hours = payload.get("estimated_hours")
|
|
1139
|
+
description = payload.get("description")
|
|
1140
|
+
purpose = payload.get("purpose")
|
|
1141
|
+
|
|
1142
|
+
# Validate at least one field is provided
|
|
1143
|
+
has_update = any(v is not None for v in [estimated_hours, description, purpose])
|
|
1144
|
+
if not has_update:
|
|
1145
|
+
return _validation_error(
|
|
1146
|
+
field="metadata",
|
|
1147
|
+
action=action,
|
|
1148
|
+
message="At least one metadata field must be provided",
|
|
1149
|
+
remediation="Include estimated_hours, description, or purpose",
|
|
1150
|
+
request_id=request_id,
|
|
1151
|
+
code=ErrorCode.VALIDATION_FAILED,
|
|
1152
|
+
)
|
|
1153
|
+
|
|
1154
|
+
# Validate estimated_hours if provided
|
|
1155
|
+
if estimated_hours is not None:
|
|
1156
|
+
if isinstance(estimated_hours, bool) or not isinstance(
|
|
1157
|
+
estimated_hours, (int, float)
|
|
1158
|
+
):
|
|
1159
|
+
return _validation_error(
|
|
1160
|
+
field="estimated_hours",
|
|
1161
|
+
action=action,
|
|
1162
|
+
message="Provide a numeric value",
|
|
1163
|
+
remediation="Set estimated_hours to a number >= 0",
|
|
1164
|
+
request_id=request_id,
|
|
1165
|
+
)
|
|
1166
|
+
if estimated_hours < 0:
|
|
1167
|
+
return _validation_error(
|
|
1168
|
+
field="estimated_hours",
|
|
1169
|
+
action=action,
|
|
1170
|
+
message="Value must be non-negative",
|
|
1171
|
+
remediation="Set hours to zero or greater",
|
|
1172
|
+
request_id=request_id,
|
|
1173
|
+
)
|
|
1174
|
+
estimated_hours = float(estimated_hours)
|
|
1175
|
+
|
|
1176
|
+
# Validate description if provided
|
|
1177
|
+
if description is not None and not isinstance(description, str):
|
|
1178
|
+
return _validation_error(
|
|
1179
|
+
field="description",
|
|
1180
|
+
action=action,
|
|
1181
|
+
message="Description must be a string",
|
|
1182
|
+
remediation="Provide a text description",
|
|
1183
|
+
request_id=request_id,
|
|
1184
|
+
)
|
|
1185
|
+
|
|
1186
|
+
# Validate purpose if provided
|
|
1187
|
+
if purpose is not None and not isinstance(purpose, str):
|
|
1188
|
+
return _validation_error(
|
|
1189
|
+
field="purpose",
|
|
1190
|
+
action=action,
|
|
1191
|
+
message="Purpose must be a string",
|
|
1192
|
+
remediation="Provide a text purpose",
|
|
1193
|
+
request_id=request_id,
|
|
1194
|
+
)
|
|
1195
|
+
|
|
1196
|
+
dry_run = payload.get("dry_run", False)
|
|
1197
|
+
if not isinstance(dry_run, bool):
|
|
1198
|
+
return _validation_error(
|
|
1199
|
+
field="dry_run",
|
|
1200
|
+
action=action,
|
|
1201
|
+
message="Expected a boolean value",
|
|
1202
|
+
remediation="Set dry_run to true or false",
|
|
1203
|
+
request_id=request_id,
|
|
1204
|
+
)
|
|
1205
|
+
|
|
1206
|
+
path = payload.get("path")
|
|
1207
|
+
if path is not None and not isinstance(path, str):
|
|
1208
|
+
return _validation_error(
|
|
1209
|
+
field="path",
|
|
1210
|
+
action=action,
|
|
1211
|
+
message="Workspace path must be a string",
|
|
1212
|
+
remediation="Provide a valid workspace path",
|
|
1213
|
+
request_id=request_id,
|
|
1214
|
+
)
|
|
1215
|
+
|
|
1216
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
1217
|
+
if specs_dir is None:
|
|
1218
|
+
return _specs_directory_missing_error(request_id)
|
|
1219
|
+
|
|
1220
|
+
audit_log(
|
|
1221
|
+
"tool_invocation",
|
|
1222
|
+
tool="authoring",
|
|
1223
|
+
action=action,
|
|
1224
|
+
spec_id=spec_id,
|
|
1225
|
+
phase_id=phase_id,
|
|
1226
|
+
dry_run=dry_run,
|
|
1227
|
+
)
|
|
1228
|
+
|
|
1229
|
+
metric_key = _metric_name(action)
|
|
1230
|
+
start_time = time.perf_counter()
|
|
1231
|
+
|
|
1232
|
+
try:
|
|
1233
|
+
result, error = update_phase_metadata(
|
|
1234
|
+
spec_id=spec_id,
|
|
1235
|
+
phase_id=phase_id,
|
|
1236
|
+
estimated_hours=estimated_hours,
|
|
1237
|
+
description=description,
|
|
1238
|
+
purpose=purpose,
|
|
1239
|
+
dry_run=dry_run,
|
|
1240
|
+
specs_dir=specs_dir,
|
|
1241
|
+
)
|
|
1242
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
1243
|
+
logger.exception("Unexpected error updating phase metadata")
|
|
1244
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1245
|
+
return asdict(
|
|
1246
|
+
error_response(
|
|
1247
|
+
sanitize_error_message(exc, context="authoring"),
|
|
1248
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1249
|
+
error_type=ErrorType.INTERNAL,
|
|
1250
|
+
remediation="Check logs for details",
|
|
1251
|
+
request_id=request_id,
|
|
1252
|
+
)
|
|
1253
|
+
)
|
|
1254
|
+
|
|
1255
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
1256
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
1257
|
+
|
|
1258
|
+
if error:
|
|
1259
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1260
|
+
lowered = error.lower()
|
|
1261
|
+
if "specification" in lowered and "not found" in lowered:
|
|
1262
|
+
return asdict(
|
|
1263
|
+
error_response(
|
|
1264
|
+
f"Specification '{spec_id}' not found",
|
|
1265
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
1266
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1267
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
1268
|
+
request_id=request_id,
|
|
1269
|
+
)
|
|
1270
|
+
)
|
|
1271
|
+
if "phase" in lowered and "not found" in lowered:
|
|
1272
|
+
return asdict(
|
|
1273
|
+
error_response(
|
|
1274
|
+
f"Phase '{phase_id}' not found in spec '{spec_id}'",
|
|
1275
|
+
error_code=ErrorCode.TASK_NOT_FOUND,
|
|
1276
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1277
|
+
remediation='Verify the phase ID via task(action="query")',
|
|
1278
|
+
request_id=request_id,
|
|
1279
|
+
)
|
|
1280
|
+
)
|
|
1281
|
+
if "not a phase" in lowered:
|
|
1282
|
+
return asdict(
|
|
1283
|
+
error_response(
|
|
1284
|
+
f"Node '{phase_id}' is not a phase",
|
|
1285
|
+
error_code=ErrorCode.VALIDATION_FAILED,
|
|
1286
|
+
error_type=ErrorType.VALIDATION,
|
|
1287
|
+
remediation="Provide a valid phase ID (e.g., 'phase-1')",
|
|
1288
|
+
request_id=request_id,
|
|
1289
|
+
)
|
|
1290
|
+
)
|
|
1291
|
+
return asdict(
|
|
1292
|
+
error_response(
|
|
1293
|
+
f"Failed to update phase metadata: {error}",
|
|
1294
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1295
|
+
error_type=ErrorType.INTERNAL,
|
|
1296
|
+
remediation="Check input values and retry",
|
|
1297
|
+
request_id=request_id,
|
|
1298
|
+
)
|
|
1299
|
+
)
|
|
1300
|
+
|
|
1301
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
1302
|
+
return asdict(
|
|
1303
|
+
success_response(
|
|
1304
|
+
data={"spec_id": spec_id, "phase_id": phase_id, **(result or {})},
|
|
1305
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1306
|
+
request_id=request_id,
|
|
1307
|
+
)
|
|
1308
|
+
)
|
|
1309
|
+
|
|
1310
|
+
|
|
1311
|
+
def _handle_phase_add_bulk(*, config: ServerConfig, **payload: Any) -> dict:
|
|
1312
|
+
request_id = _request_id()
|
|
1313
|
+
action = "phase-add-bulk"
|
|
1314
|
+
|
|
1315
|
+
# Validate spec_id
|
|
1316
|
+
spec_id = payload.get("spec_id")
|
|
1317
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1318
|
+
return _validation_error(
|
|
1319
|
+
field="spec_id",
|
|
1320
|
+
action=action,
|
|
1321
|
+
message="Provide a non-empty spec_id parameter",
|
|
1322
|
+
remediation="Pass the spec identifier to authoring",
|
|
1323
|
+
request_id=request_id,
|
|
1324
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1325
|
+
)
|
|
1326
|
+
spec_id = spec_id.strip()
|
|
1327
|
+
|
|
1328
|
+
# Require macro format: {phase: {...}, tasks: [...]}
|
|
1329
|
+
phase_obj = payload.get("phase")
|
|
1330
|
+
if not isinstance(phase_obj, dict):
|
|
1331
|
+
return _validation_error(
|
|
1332
|
+
field="phase",
|
|
1333
|
+
action=action,
|
|
1334
|
+
message="Provide a phase object with metadata",
|
|
1335
|
+
remediation="Use macro format: {phase: {title: '...', description: '...'}, tasks: [...]}",
|
|
1336
|
+
request_id=request_id,
|
|
1337
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1338
|
+
)
|
|
1339
|
+
|
|
1340
|
+
# Extract phase metadata from nested object
|
|
1341
|
+
title = phase_obj.get("title")
|
|
1342
|
+
if not isinstance(title, str) or not title.strip():
|
|
1343
|
+
return _validation_error(
|
|
1344
|
+
field="phase.title",
|
|
1345
|
+
action=action,
|
|
1346
|
+
message="Provide a non-empty phase title",
|
|
1347
|
+
remediation="Include phase.title in the phase object",
|
|
1348
|
+
request_id=request_id,
|
|
1349
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1350
|
+
)
|
|
1351
|
+
title = title.strip()
|
|
1352
|
+
|
|
1353
|
+
# Validate tasks array
|
|
1354
|
+
tasks = payload.get("tasks")
|
|
1355
|
+
if not tasks or not isinstance(tasks, list) or len(tasks) == 0:
|
|
1356
|
+
return _validation_error(
|
|
1357
|
+
field="tasks",
|
|
1358
|
+
action=action,
|
|
1359
|
+
message="Provide at least one task definition",
|
|
1360
|
+
remediation="Include a tasks array with type and title for each task",
|
|
1361
|
+
request_id=request_id,
|
|
1362
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1363
|
+
)
|
|
1364
|
+
|
|
1365
|
+
# Validate each task in the array
|
|
1366
|
+
valid_task_types = {"task", "verify"}
|
|
1367
|
+
for idx, task_def in enumerate(tasks):
|
|
1368
|
+
if not isinstance(task_def, dict):
|
|
1369
|
+
return _validation_error(
|
|
1370
|
+
field=f"tasks[{idx}]",
|
|
1371
|
+
action=action,
|
|
1372
|
+
message="Each task must be a dictionary",
|
|
1373
|
+
request_id=request_id,
|
|
1374
|
+
)
|
|
1375
|
+
|
|
1376
|
+
task_type = task_def.get("type")
|
|
1377
|
+
if not task_type or task_type not in valid_task_types:
|
|
1378
|
+
return _validation_error(
|
|
1379
|
+
field=f"tasks[{idx}].type",
|
|
1380
|
+
action=action,
|
|
1381
|
+
message="Task type must be 'task' or 'verify'",
|
|
1382
|
+
remediation="Set type to 'task' or 'verify'",
|
|
1383
|
+
request_id=request_id,
|
|
1384
|
+
)
|
|
1385
|
+
|
|
1386
|
+
task_title = task_def.get("title")
|
|
1387
|
+
if not task_title or not isinstance(task_title, str) or not task_title.strip():
|
|
1388
|
+
return _validation_error(
|
|
1389
|
+
field=f"tasks[{idx}].title",
|
|
1390
|
+
action=action,
|
|
1391
|
+
message="Each task must have a non-empty title",
|
|
1392
|
+
request_id=request_id,
|
|
1393
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1394
|
+
)
|
|
1395
|
+
|
|
1396
|
+
est_hours = task_def.get("estimated_hours")
|
|
1397
|
+
if est_hours is not None:
|
|
1398
|
+
if isinstance(est_hours, bool) or not isinstance(est_hours, (int, float)):
|
|
1399
|
+
return _validation_error(
|
|
1400
|
+
field=f"tasks[{idx}].estimated_hours",
|
|
1401
|
+
action=action,
|
|
1402
|
+
message="estimated_hours must be a number",
|
|
1403
|
+
request_id=request_id,
|
|
1404
|
+
)
|
|
1405
|
+
if est_hours < 0:
|
|
1406
|
+
return _validation_error(
|
|
1407
|
+
field=f"tasks[{idx}].estimated_hours",
|
|
1408
|
+
action=action,
|
|
1409
|
+
message="estimated_hours must be non-negative",
|
|
1410
|
+
request_id=request_id,
|
|
1411
|
+
)
|
|
1412
|
+
|
|
1413
|
+
# Validate optional phase metadata (from phase object)
|
|
1414
|
+
description = phase_obj.get("description")
|
|
1415
|
+
if description is not None and not isinstance(description, str):
|
|
1416
|
+
return _validation_error(
|
|
1417
|
+
field="phase.description",
|
|
1418
|
+
action=action,
|
|
1419
|
+
message="Description must be a string",
|
|
1420
|
+
request_id=request_id,
|
|
1421
|
+
)
|
|
1422
|
+
|
|
1423
|
+
purpose = phase_obj.get("purpose")
|
|
1424
|
+
if purpose is not None and not isinstance(purpose, str):
|
|
1425
|
+
return _validation_error(
|
|
1426
|
+
field="phase.purpose",
|
|
1427
|
+
action=action,
|
|
1428
|
+
message="Purpose must be a string",
|
|
1429
|
+
request_id=request_id,
|
|
1430
|
+
)
|
|
1431
|
+
|
|
1432
|
+
estimated_hours = phase_obj.get("estimated_hours")
|
|
1433
|
+
if estimated_hours is not None:
|
|
1434
|
+
if isinstance(estimated_hours, bool) or not isinstance(
|
|
1435
|
+
estimated_hours, (int, float)
|
|
1436
|
+
):
|
|
1437
|
+
return _validation_error(
|
|
1438
|
+
field="phase.estimated_hours",
|
|
1439
|
+
action=action,
|
|
1440
|
+
message="Provide a numeric value",
|
|
1441
|
+
request_id=request_id,
|
|
1442
|
+
)
|
|
1443
|
+
if estimated_hours < 0:
|
|
1444
|
+
return _validation_error(
|
|
1445
|
+
field="phase.estimated_hours",
|
|
1446
|
+
action=action,
|
|
1447
|
+
message="Value must be non-negative",
|
|
1448
|
+
remediation="Set hours to zero or greater",
|
|
1449
|
+
request_id=request_id,
|
|
1450
|
+
)
|
|
1451
|
+
estimated_hours = float(estimated_hours)
|
|
1452
|
+
|
|
1453
|
+
# Handle metadata_defaults from both top-level and phase object
|
|
1454
|
+
# Top-level serves as base, phase-level overrides
|
|
1455
|
+
top_level_defaults = payload.get("metadata_defaults")
|
|
1456
|
+
if top_level_defaults is not None and not isinstance(top_level_defaults, dict):
|
|
1457
|
+
return _validation_error(
|
|
1458
|
+
field="metadata_defaults",
|
|
1459
|
+
action=action,
|
|
1460
|
+
message="metadata_defaults must be a dictionary",
|
|
1461
|
+
request_id=request_id,
|
|
1462
|
+
)
|
|
1463
|
+
|
|
1464
|
+
phase_level_defaults = phase_obj.get("metadata_defaults")
|
|
1465
|
+
if phase_level_defaults is not None and not isinstance(phase_level_defaults, dict):
|
|
1466
|
+
return _validation_error(
|
|
1467
|
+
field="phase.metadata_defaults",
|
|
1468
|
+
action=action,
|
|
1469
|
+
message="metadata_defaults must be a dictionary",
|
|
1470
|
+
request_id=request_id,
|
|
1471
|
+
)
|
|
1472
|
+
|
|
1473
|
+
# Merge: top-level as base, phase-level overrides
|
|
1474
|
+
metadata_defaults = None
|
|
1475
|
+
if top_level_defaults or phase_level_defaults:
|
|
1476
|
+
metadata_defaults = {**(top_level_defaults or {}), **(phase_level_defaults or {})}
|
|
1477
|
+
|
|
1478
|
+
position = payload.get("position")
|
|
1479
|
+
if position is not None:
|
|
1480
|
+
if isinstance(position, bool) or not isinstance(position, int):
|
|
1481
|
+
return _validation_error(
|
|
1482
|
+
field="position",
|
|
1483
|
+
action=action,
|
|
1484
|
+
message="Position must be an integer",
|
|
1485
|
+
request_id=request_id,
|
|
1486
|
+
)
|
|
1487
|
+
if position < 0:
|
|
1488
|
+
return _validation_error(
|
|
1489
|
+
field="position",
|
|
1490
|
+
action=action,
|
|
1491
|
+
message="Position must be >= 0",
|
|
1492
|
+
request_id=request_id,
|
|
1493
|
+
)
|
|
1494
|
+
|
|
1495
|
+
link_previous = payload.get("link_previous", True)
|
|
1496
|
+
if not isinstance(link_previous, bool):
|
|
1497
|
+
return _validation_error(
|
|
1498
|
+
field="link_previous",
|
|
1499
|
+
action=action,
|
|
1500
|
+
message="Expected a boolean value",
|
|
1501
|
+
request_id=request_id,
|
|
1502
|
+
)
|
|
1503
|
+
|
|
1504
|
+
dry_run = payload.get("dry_run", False)
|
|
1505
|
+
if not isinstance(dry_run, bool):
|
|
1506
|
+
return _validation_error(
|
|
1507
|
+
field="dry_run",
|
|
1508
|
+
action=action,
|
|
1509
|
+
message="Expected a boolean value",
|
|
1510
|
+
request_id=request_id,
|
|
1511
|
+
)
|
|
1512
|
+
|
|
1513
|
+
path = payload.get("path")
|
|
1514
|
+
if path is not None and not isinstance(path, str):
|
|
1515
|
+
return _validation_error(
|
|
1516
|
+
field="path",
|
|
1517
|
+
action=action,
|
|
1518
|
+
message="Workspace path must be a string",
|
|
1519
|
+
request_id=request_id,
|
|
1520
|
+
)
|
|
1521
|
+
|
|
1522
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
1523
|
+
if specs_dir is None:
|
|
1524
|
+
return _specs_directory_missing_error(request_id)
|
|
1525
|
+
|
|
1526
|
+
# Check for duplicate phase title (warning only)
|
|
1527
|
+
warnings: List[str] = []
|
|
1528
|
+
if _phase_exists(spec_id, specs_dir, title):
|
|
1529
|
+
warnings.append(
|
|
1530
|
+
f"Phase titled '{title}' already exists; the new phase will still be added"
|
|
1531
|
+
)
|
|
1532
|
+
|
|
1533
|
+
audit_log(
|
|
1534
|
+
"tool_invocation",
|
|
1535
|
+
tool="authoring",
|
|
1536
|
+
action=action,
|
|
1537
|
+
spec_id=spec_id,
|
|
1538
|
+
title=title,
|
|
1539
|
+
task_count=len(tasks),
|
|
1540
|
+
dry_run=dry_run,
|
|
1541
|
+
link_previous=link_previous,
|
|
1542
|
+
)
|
|
1543
|
+
|
|
1544
|
+
metric_key = _metric_name(action)
|
|
1545
|
+
|
|
1546
|
+
if dry_run:
|
|
1547
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
|
|
1548
|
+
preview_tasks = [
|
|
1549
|
+
{"task_id": "(preview)", "title": t.get("title", ""), "type": t.get("type", "")}
|
|
1550
|
+
for t in tasks
|
|
1551
|
+
]
|
|
1552
|
+
return asdict(
|
|
1553
|
+
success_response(
|
|
1554
|
+
data={
|
|
1555
|
+
"spec_id": spec_id,
|
|
1556
|
+
"phase_id": "(preview)",
|
|
1557
|
+
"title": title,
|
|
1558
|
+
"tasks_created": preview_tasks,
|
|
1559
|
+
"total_tasks": len(tasks),
|
|
1560
|
+
"dry_run": True,
|
|
1561
|
+
"note": "Dry run - no changes made",
|
|
1562
|
+
},
|
|
1563
|
+
warnings=warnings or None,
|
|
1564
|
+
request_id=request_id,
|
|
1565
|
+
)
|
|
1566
|
+
)
|
|
1567
|
+
|
|
1568
|
+
start_time = time.perf_counter()
|
|
1569
|
+
try:
|
|
1570
|
+
result, error = add_phase_bulk(
|
|
1571
|
+
spec_id=spec_id,
|
|
1572
|
+
phase_title=title,
|
|
1573
|
+
tasks=tasks,
|
|
1574
|
+
phase_description=description,
|
|
1575
|
+
phase_purpose=purpose,
|
|
1576
|
+
phase_estimated_hours=estimated_hours,
|
|
1577
|
+
metadata_defaults=metadata_defaults,
|
|
1578
|
+
position=position,
|
|
1579
|
+
link_previous=link_previous,
|
|
1580
|
+
specs_dir=specs_dir,
|
|
1581
|
+
)
|
|
1582
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
1583
|
+
logger.exception("Unexpected error in phase-add-bulk")
|
|
1584
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1585
|
+
return asdict(
|
|
1586
|
+
error_response(
|
|
1587
|
+
sanitize_error_message(exc, context="authoring"),
|
|
1588
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1589
|
+
error_type=ErrorType.INTERNAL,
|
|
1590
|
+
remediation="Check logs for details",
|
|
1591
|
+
request_id=request_id,
|
|
1592
|
+
)
|
|
1593
|
+
)
|
|
1594
|
+
|
|
1595
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
1596
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
1597
|
+
|
|
1598
|
+
if error:
|
|
1599
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1600
|
+
lowered = error.lower()
|
|
1601
|
+
if "specification" in lowered and "not found" in lowered:
|
|
1602
|
+
return asdict(
|
|
1603
|
+
error_response(
|
|
1604
|
+
f"Specification '{spec_id}' not found",
|
|
1605
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
1606
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1607
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
1608
|
+
request_id=request_id,
|
|
1609
|
+
)
|
|
1610
|
+
)
|
|
1611
|
+
if "task at index" in lowered:
|
|
1612
|
+
return asdict(
|
|
1613
|
+
error_response(
|
|
1614
|
+
error,
|
|
1615
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
1616
|
+
error_type=ErrorType.VALIDATION,
|
|
1617
|
+
remediation="Check each task has valid type and title",
|
|
1618
|
+
request_id=request_id,
|
|
1619
|
+
)
|
|
1620
|
+
)
|
|
1621
|
+
return asdict(
|
|
1622
|
+
error_response(
|
|
1623
|
+
f"Failed to add phase with tasks: {error}",
|
|
1624
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1625
|
+
error_type=ErrorType.INTERNAL,
|
|
1626
|
+
remediation="Check input values and retry",
|
|
1627
|
+
request_id=request_id,
|
|
1628
|
+
)
|
|
1629
|
+
)
|
|
1630
|
+
|
|
1631
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
1632
|
+
return asdict(
|
|
1633
|
+
success_response(
|
|
1634
|
+
data={"spec_id": spec_id, "dry_run": False, **(result or {})},
|
|
1635
|
+
warnings=warnings or None,
|
|
1636
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1637
|
+
request_id=request_id,
|
|
1638
|
+
)
|
|
1639
|
+
)
|
|
1640
|
+
|
|
1641
|
+
|
|
1642
|
+
def _handle_phase_template(*, config: ServerConfig, **payload: Any) -> dict:
|
|
1643
|
+
"""Handle phase-template action: list/show/apply phase templates."""
|
|
1644
|
+
request_id = _request_id()
|
|
1645
|
+
action = "phase-template"
|
|
1646
|
+
|
|
1647
|
+
template_action = payload.get("template_action")
|
|
1648
|
+
if not isinstance(template_action, str) or not template_action.strip():
|
|
1649
|
+
return _validation_error(
|
|
1650
|
+
field="template_action",
|
|
1651
|
+
action=action,
|
|
1652
|
+
message="Provide one of: list, show, apply",
|
|
1653
|
+
request_id=request_id,
|
|
1654
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1655
|
+
)
|
|
1656
|
+
template_action = template_action.strip().lower()
|
|
1657
|
+
if template_action not in ("list", "show", "apply"):
|
|
1658
|
+
return _validation_error(
|
|
1659
|
+
field="template_action",
|
|
1660
|
+
action=action,
|
|
1661
|
+
message="template_action must be one of: list, show, apply",
|
|
1662
|
+
request_id=request_id,
|
|
1663
|
+
remediation="Use list, show, or apply",
|
|
1664
|
+
)
|
|
1665
|
+
|
|
1666
|
+
template_name = payload.get("template_name")
|
|
1667
|
+
if template_action in ("show", "apply"):
|
|
1668
|
+
if not isinstance(template_name, str) or not template_name.strip():
|
|
1669
|
+
return _validation_error(
|
|
1670
|
+
field="template_name",
|
|
1671
|
+
action=action,
|
|
1672
|
+
message="Provide a template name",
|
|
1673
|
+
request_id=request_id,
|
|
1674
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1675
|
+
)
|
|
1676
|
+
template_name = template_name.strip()
|
|
1677
|
+
if template_name not in PHASE_TEMPLATES:
|
|
1678
|
+
return asdict(
|
|
1679
|
+
error_response(
|
|
1680
|
+
f"Phase template '{template_name}' not found",
|
|
1681
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
1682
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1683
|
+
remediation=f"Use template_action='list' to see available templates. Valid: {', '.join(PHASE_TEMPLATES)}",
|
|
1684
|
+
request_id=request_id,
|
|
1685
|
+
)
|
|
1686
|
+
)
|
|
1687
|
+
|
|
1688
|
+
data: Dict[str, Any] = {"action": template_action}
|
|
1689
|
+
|
|
1690
|
+
if template_action == "list":
|
|
1691
|
+
data["templates"] = [
|
|
1692
|
+
{
|
|
1693
|
+
"name": "planning",
|
|
1694
|
+
"description": "Requirements gathering and initial planning phase",
|
|
1695
|
+
"tasks": 2,
|
|
1696
|
+
"estimated_hours": 4,
|
|
1697
|
+
},
|
|
1698
|
+
{
|
|
1699
|
+
"name": "implementation",
|
|
1700
|
+
"description": "Core development and feature implementation phase",
|
|
1701
|
+
"tasks": 2,
|
|
1702
|
+
"estimated_hours": 8,
|
|
1703
|
+
},
|
|
1704
|
+
{
|
|
1705
|
+
"name": "testing",
|
|
1706
|
+
"description": "Comprehensive testing and quality assurance phase",
|
|
1707
|
+
"tasks": 2,
|
|
1708
|
+
"estimated_hours": 6,
|
|
1709
|
+
},
|
|
1710
|
+
{
|
|
1711
|
+
"name": "security",
|
|
1712
|
+
"description": "Security audit and hardening phase",
|
|
1713
|
+
"tasks": 2,
|
|
1714
|
+
"estimated_hours": 6,
|
|
1715
|
+
},
|
|
1716
|
+
{
|
|
1717
|
+
"name": "documentation",
|
|
1718
|
+
"description": "Technical documentation and knowledge capture phase",
|
|
1719
|
+
"tasks": 2,
|
|
1720
|
+
"estimated_hours": 4,
|
|
1721
|
+
},
|
|
1722
|
+
]
|
|
1723
|
+
data["total_count"] = len(data["templates"])
|
|
1724
|
+
data["note"] = "All templates include automatic verification scaffolding (run-tests + fidelity)"
|
|
1725
|
+
return asdict(success_response(data=data, request_id=request_id))
|
|
1726
|
+
|
|
1727
|
+
elif template_action == "show":
|
|
1728
|
+
try:
|
|
1729
|
+
template_struct = get_phase_template_structure(template_name)
|
|
1730
|
+
data["template_name"] = template_name
|
|
1731
|
+
data["content"] = {
|
|
1732
|
+
"name": template_name,
|
|
1733
|
+
"title": template_struct["title"],
|
|
1734
|
+
"description": template_struct["description"],
|
|
1735
|
+
"purpose": template_struct["purpose"],
|
|
1736
|
+
"estimated_hours": template_struct["estimated_hours"],
|
|
1737
|
+
"tasks": template_struct["tasks"],
|
|
1738
|
+
"includes_verification": template_struct["includes_verification"],
|
|
1739
|
+
}
|
|
1740
|
+
data["usage"] = (
|
|
1741
|
+
f"Use authoring(action='phase-template', template_action='apply', "
|
|
1742
|
+
f"template_name='{template_name}', spec_id='your-spec-id') to apply this template"
|
|
1743
|
+
)
|
|
1744
|
+
return asdict(success_response(data=data, request_id=request_id))
|
|
1745
|
+
except ValueError as exc:
|
|
1746
|
+
return asdict(
|
|
1747
|
+
error_response(
|
|
1748
|
+
str(exc),
|
|
1749
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
1750
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1751
|
+
request_id=request_id,
|
|
1752
|
+
)
|
|
1753
|
+
)
|
|
1754
|
+
|
|
1755
|
+
else: # apply
|
|
1756
|
+
spec_id = payload.get("spec_id")
|
|
1757
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1758
|
+
return _validation_error(
|
|
1759
|
+
field="spec_id",
|
|
1760
|
+
action=action,
|
|
1761
|
+
message="Provide the target spec_id to apply the template to",
|
|
1762
|
+
request_id=request_id,
|
|
1763
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1764
|
+
)
|
|
1765
|
+
spec_id = spec_id.strip()
|
|
1766
|
+
|
|
1767
|
+
# Optional parameters for apply
|
|
1768
|
+
category = payload.get("category", "implementation")
|
|
1769
|
+
if not isinstance(category, str):
|
|
1770
|
+
return _validation_error(
|
|
1771
|
+
field="category",
|
|
1772
|
+
action=action,
|
|
1773
|
+
message="Category must be a string",
|
|
1774
|
+
request_id=request_id,
|
|
1775
|
+
)
|
|
1776
|
+
category = category.strip()
|
|
1777
|
+
if category and category not in CATEGORIES:
|
|
1778
|
+
return _validation_error(
|
|
1779
|
+
field="category",
|
|
1780
|
+
action=action,
|
|
1781
|
+
message=f"Category must be one of: {', '.join(CATEGORIES)}",
|
|
1782
|
+
request_id=request_id,
|
|
1783
|
+
)
|
|
1784
|
+
|
|
1785
|
+
position = payload.get("position")
|
|
1786
|
+
if position is not None:
|
|
1787
|
+
if isinstance(position, bool) or not isinstance(position, int):
|
|
1788
|
+
return _validation_error(
|
|
1789
|
+
field="position",
|
|
1790
|
+
action=action,
|
|
1791
|
+
message="Position must be an integer",
|
|
1792
|
+
request_id=request_id,
|
|
1793
|
+
)
|
|
1794
|
+
if position < 0:
|
|
1795
|
+
return _validation_error(
|
|
1796
|
+
field="position",
|
|
1797
|
+
action=action,
|
|
1798
|
+
message="Position must be >= 0",
|
|
1799
|
+
request_id=request_id,
|
|
1800
|
+
)
|
|
1801
|
+
|
|
1802
|
+
link_previous = payload.get("link_previous", True)
|
|
1803
|
+
if not isinstance(link_previous, bool):
|
|
1804
|
+
return _validation_error(
|
|
1805
|
+
field="link_previous",
|
|
1806
|
+
action=action,
|
|
1807
|
+
message="Expected a boolean value",
|
|
1808
|
+
request_id=request_id,
|
|
1809
|
+
)
|
|
1810
|
+
|
|
1811
|
+
dry_run = payload.get("dry_run", False)
|
|
1812
|
+
if not isinstance(dry_run, bool):
|
|
1813
|
+
return _validation_error(
|
|
1814
|
+
field="dry_run",
|
|
1815
|
+
action=action,
|
|
1816
|
+
message="Expected a boolean value",
|
|
1817
|
+
request_id=request_id,
|
|
1818
|
+
)
|
|
1819
|
+
|
|
1820
|
+
path = payload.get("path")
|
|
1821
|
+
if path is not None and not isinstance(path, str):
|
|
1822
|
+
return _validation_error(
|
|
1823
|
+
field="path",
|
|
1824
|
+
action=action,
|
|
1825
|
+
message="Workspace path must be a string",
|
|
1826
|
+
request_id=request_id,
|
|
1827
|
+
)
|
|
1828
|
+
|
|
1829
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
1830
|
+
if specs_dir is None:
|
|
1831
|
+
return _specs_directory_missing_error(request_id)
|
|
1832
|
+
|
|
1833
|
+
audit_log(
|
|
1834
|
+
"tool_invocation",
|
|
1835
|
+
tool="authoring",
|
|
1836
|
+
action=action,
|
|
1837
|
+
spec_id=spec_id,
|
|
1838
|
+
template_name=template_name,
|
|
1839
|
+
dry_run=dry_run,
|
|
1840
|
+
link_previous=link_previous,
|
|
1841
|
+
)
|
|
1842
|
+
|
|
1843
|
+
metric_key = _metric_name(action)
|
|
1844
|
+
|
|
1845
|
+
if dry_run:
|
|
1846
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
|
|
1847
|
+
template_struct = get_phase_template_structure(template_name, category)
|
|
1848
|
+
return asdict(
|
|
1849
|
+
success_response(
|
|
1850
|
+
data={
|
|
1851
|
+
"spec_id": spec_id,
|
|
1852
|
+
"template_applied": template_name,
|
|
1853
|
+
"phase_id": "(preview)",
|
|
1854
|
+
"title": template_struct["title"],
|
|
1855
|
+
"tasks_created": [
|
|
1856
|
+
{"task_id": "(preview)", "title": t["title"], "type": "task"}
|
|
1857
|
+
for t in template_struct["tasks"]
|
|
1858
|
+
],
|
|
1859
|
+
"total_tasks": len(template_struct["tasks"]),
|
|
1860
|
+
"dry_run": True,
|
|
1861
|
+
"note": "Dry run - no changes made. Verification scaffolding will be auto-added.",
|
|
1862
|
+
},
|
|
1863
|
+
request_id=request_id,
|
|
1864
|
+
)
|
|
1865
|
+
)
|
|
1866
|
+
|
|
1867
|
+
start_time = time.perf_counter()
|
|
1868
|
+
try:
|
|
1869
|
+
result, error = apply_phase_template(
|
|
1870
|
+
spec_id=spec_id,
|
|
1871
|
+
template=template_name,
|
|
1872
|
+
specs_dir=specs_dir,
|
|
1873
|
+
category=category,
|
|
1874
|
+
position=position,
|
|
1875
|
+
link_previous=link_previous,
|
|
1876
|
+
)
|
|
1877
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
1878
|
+
logger.exception("Unexpected error in phase-template apply")
|
|
1879
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1880
|
+
return asdict(
|
|
1881
|
+
error_response(
|
|
1882
|
+
sanitize_error_message(exc, context="authoring"),
|
|
1883
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1884
|
+
error_type=ErrorType.INTERNAL,
|
|
1885
|
+
remediation="Check logs for details",
|
|
1886
|
+
request_id=request_id,
|
|
1887
|
+
)
|
|
1888
|
+
)
|
|
1889
|
+
|
|
1890
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
1891
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
1892
|
+
|
|
1893
|
+
if error:
|
|
1894
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1895
|
+
lowered = error.lower()
|
|
1896
|
+
if "specification" in lowered and "not found" in lowered:
|
|
1897
|
+
return asdict(
|
|
1898
|
+
error_response(
|
|
1899
|
+
f"Specification '{spec_id}' not found",
|
|
1900
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
1901
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1902
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
1903
|
+
request_id=request_id,
|
|
1904
|
+
)
|
|
1905
|
+
)
|
|
1906
|
+
if "invalid phase template" in lowered:
|
|
1907
|
+
return asdict(
|
|
1908
|
+
error_response(
|
|
1909
|
+
error,
|
|
1910
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
1911
|
+
error_type=ErrorType.VALIDATION,
|
|
1912
|
+
remediation=f"Valid templates: {', '.join(PHASE_TEMPLATES)}",
|
|
1913
|
+
request_id=request_id,
|
|
1914
|
+
)
|
|
1915
|
+
)
|
|
1916
|
+
return asdict(
|
|
1917
|
+
error_response(
|
|
1918
|
+
f"Failed to apply phase template: {error}",
|
|
1919
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1920
|
+
error_type=ErrorType.INTERNAL,
|
|
1921
|
+
remediation="Check input values and retry",
|
|
1922
|
+
request_id=request_id,
|
|
1923
|
+
)
|
|
1924
|
+
)
|
|
1925
|
+
|
|
1926
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
1927
|
+
return asdict(
|
|
1928
|
+
success_response(
|
|
1929
|
+
data={"spec_id": spec_id, "dry_run": False, **(result or {})},
|
|
1930
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1931
|
+
request_id=request_id,
|
|
1932
|
+
)
|
|
1933
|
+
)
|
|
1934
|
+
|
|
1935
|
+
|
|
1936
|
+
def _handle_phase_move(*, config: ServerConfig, **payload: Any) -> dict:
|
|
1937
|
+
"""Handle phase-move action: reorder a phase within spec-root children."""
|
|
1938
|
+
request_id = _request_id()
|
|
1939
|
+
action = "phase-move"
|
|
1940
|
+
|
|
1941
|
+
spec_id = payload.get("spec_id")
|
|
1942
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1943
|
+
return _validation_error(
|
|
1944
|
+
field="spec_id",
|
|
1945
|
+
action=action,
|
|
1946
|
+
message="Provide a non-empty spec_id parameter",
|
|
1947
|
+
request_id=request_id,
|
|
1948
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1949
|
+
remediation='Use spec(action="list") to find available spec IDs',
|
|
1950
|
+
)
|
|
1951
|
+
spec_id = spec_id.strip()
|
|
1952
|
+
|
|
1953
|
+
phase_id = payload.get("phase_id")
|
|
1954
|
+
if not isinstance(phase_id, str) or not phase_id.strip():
|
|
1955
|
+
return _validation_error(
|
|
1956
|
+
field="phase_id",
|
|
1957
|
+
action=action,
|
|
1958
|
+
message="Provide the phase identifier (e.g., phase-1)",
|
|
1959
|
+
request_id=request_id,
|
|
1960
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1961
|
+
remediation="Specify a phase ID like phase-1 or phase-2",
|
|
1962
|
+
)
|
|
1963
|
+
phase_id = phase_id.strip()
|
|
1964
|
+
|
|
1965
|
+
position = payload.get("position")
|
|
1966
|
+
if position is None:
|
|
1967
|
+
return _validation_error(
|
|
1968
|
+
field="position",
|
|
1969
|
+
action=action,
|
|
1970
|
+
message="Provide the target position (1-based index)",
|
|
1971
|
+
request_id=request_id,
|
|
1972
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
1973
|
+
remediation="Specify position as a positive integer (1 = first)",
|
|
1974
|
+
)
|
|
1975
|
+
if isinstance(position, bool) or not isinstance(position, int):
|
|
1976
|
+
return _validation_error(
|
|
1977
|
+
field="position",
|
|
1978
|
+
action=action,
|
|
1979
|
+
message="Position must be an integer",
|
|
1980
|
+
request_id=request_id,
|
|
1981
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1982
|
+
remediation="Provide position as an integer, e.g. position=2",
|
|
1983
|
+
)
|
|
1984
|
+
if position < 1:
|
|
1985
|
+
return _validation_error(
|
|
1986
|
+
field="position",
|
|
1987
|
+
action=action,
|
|
1988
|
+
message="Position must be a positive integer (1-based)",
|
|
1989
|
+
request_id=request_id,
|
|
1990
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1991
|
+
remediation="Use 1 for first position, 2 for second, etc.",
|
|
1992
|
+
)
|
|
1993
|
+
|
|
1994
|
+
link_previous = payload.get("link_previous", True)
|
|
1995
|
+
if not isinstance(link_previous, bool):
|
|
1996
|
+
return _validation_error(
|
|
1997
|
+
field="link_previous",
|
|
1998
|
+
action=action,
|
|
1999
|
+
message="Expected a boolean value",
|
|
2000
|
+
request_id=request_id,
|
|
2001
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2002
|
+
remediation="Use true or false for link_previous",
|
|
2003
|
+
)
|
|
2004
|
+
|
|
2005
|
+
dry_run = payload.get("dry_run", False)
|
|
2006
|
+
if not isinstance(dry_run, bool):
|
|
2007
|
+
return _validation_error(
|
|
2008
|
+
field="dry_run",
|
|
2009
|
+
action=action,
|
|
2010
|
+
message="Expected a boolean value",
|
|
2011
|
+
request_id=request_id,
|
|
2012
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2013
|
+
remediation="Use true or false for dry_run",
|
|
2014
|
+
)
|
|
2015
|
+
|
|
2016
|
+
path = payload.get("path")
|
|
2017
|
+
if path is not None and not isinstance(path, str):
|
|
2018
|
+
return _validation_error(
|
|
2019
|
+
field="path",
|
|
2020
|
+
action=action,
|
|
2021
|
+
message="Workspace path must be a string",
|
|
2022
|
+
request_id=request_id,
|
|
2023
|
+
remediation="Provide a valid filesystem path string",
|
|
2024
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2025
|
+
)
|
|
2026
|
+
|
|
2027
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
2028
|
+
if specs_dir is None:
|
|
2029
|
+
return _specs_directory_missing_error(request_id)
|
|
2030
|
+
|
|
2031
|
+
audit_log(
|
|
2032
|
+
"tool_invocation",
|
|
2033
|
+
tool="authoring",
|
|
2034
|
+
action=action,
|
|
2035
|
+
spec_id=spec_id,
|
|
2036
|
+
phase_id=phase_id,
|
|
2037
|
+
position=position,
|
|
2038
|
+
link_previous=link_previous,
|
|
2039
|
+
dry_run=dry_run,
|
|
2040
|
+
)
|
|
2041
|
+
|
|
2042
|
+
metric_key = _metric_name(action)
|
|
2043
|
+
start_time = time.perf_counter()
|
|
2044
|
+
|
|
2045
|
+
try:
|
|
2046
|
+
result, error = move_phase(
|
|
2047
|
+
spec_id=spec_id,
|
|
2048
|
+
phase_id=phase_id,
|
|
2049
|
+
position=position,
|
|
2050
|
+
link_previous=link_previous,
|
|
2051
|
+
dry_run=dry_run,
|
|
2052
|
+
specs_dir=specs_dir,
|
|
2053
|
+
)
|
|
2054
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
2055
|
+
logger.exception("Unexpected error moving phase")
|
|
2056
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2057
|
+
return asdict(
|
|
2058
|
+
error_response(
|
|
2059
|
+
sanitize_error_message(exc, context="authoring"),
|
|
2060
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2061
|
+
error_type=ErrorType.INTERNAL,
|
|
2062
|
+
remediation="Check logs for details",
|
|
2063
|
+
request_id=request_id,
|
|
2064
|
+
)
|
|
2065
|
+
)
|
|
2066
|
+
|
|
2067
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
2068
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
2069
|
+
|
|
2070
|
+
if error:
|
|
2071
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2072
|
+
lowered = error.lower()
|
|
2073
|
+
if "specification" in lowered and "not found" in lowered:
|
|
2074
|
+
return asdict(
|
|
2075
|
+
error_response(
|
|
2076
|
+
f"Specification '{spec_id}' not found",
|
|
2077
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
2078
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2079
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
2080
|
+
request_id=request_id,
|
|
2081
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2082
|
+
)
|
|
2083
|
+
)
|
|
2084
|
+
if "phase" in lowered and "not found" in lowered:
|
|
2085
|
+
return asdict(
|
|
2086
|
+
error_response(
|
|
2087
|
+
f"Phase '{phase_id}' not found in spec",
|
|
2088
|
+
error_code=ErrorCode.PHASE_NOT_FOUND,
|
|
2089
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2090
|
+
remediation="Confirm the phase exists in the hierarchy",
|
|
2091
|
+
request_id=request_id,
|
|
2092
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2093
|
+
)
|
|
2094
|
+
)
|
|
2095
|
+
if "not a phase" in lowered:
|
|
2096
|
+
return asdict(
|
|
2097
|
+
error_response(
|
|
2098
|
+
f"Node '{phase_id}' is not a phase",
|
|
2099
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
2100
|
+
error_type=ErrorType.VALIDATION,
|
|
2101
|
+
remediation="Provide a valid phase ID (e.g., phase-1)",
|
|
2102
|
+
request_id=request_id,
|
|
2103
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2104
|
+
)
|
|
2105
|
+
)
|
|
2106
|
+
if "invalid position" in lowered or "must be" in lowered:
|
|
2107
|
+
return asdict(
|
|
2108
|
+
error_response(
|
|
2109
|
+
error,
|
|
2110
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
2111
|
+
error_type=ErrorType.VALIDATION,
|
|
2112
|
+
remediation="Provide a valid 1-based position within range",
|
|
2113
|
+
request_id=request_id,
|
|
2114
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2115
|
+
)
|
|
2116
|
+
)
|
|
2117
|
+
return asdict(
|
|
2118
|
+
error_response(
|
|
2119
|
+
f"Failed to move phase: {error}",
|
|
2120
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2121
|
+
error_type=ErrorType.INTERNAL,
|
|
2122
|
+
remediation="Check input values and retry",
|
|
2123
|
+
request_id=request_id,
|
|
2124
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2125
|
+
)
|
|
2126
|
+
)
|
|
2127
|
+
|
|
2128
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
2129
|
+
return asdict(
|
|
2130
|
+
success_response(
|
|
2131
|
+
data=result or {},
|
|
2132
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2133
|
+
request_id=request_id,
|
|
2134
|
+
)
|
|
2135
|
+
)
|
|
2136
|
+
|
|
2137
|
+
|
|
2138
|
+
def _handle_phase_remove(*, config: ServerConfig, **payload: Any) -> dict:
|
|
2139
|
+
request_id = _request_id()
|
|
2140
|
+
action = "phase-remove"
|
|
2141
|
+
|
|
2142
|
+
spec_id = payload.get("spec_id")
|
|
2143
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2144
|
+
return _validation_error(
|
|
2145
|
+
field="spec_id",
|
|
2146
|
+
action=action,
|
|
2147
|
+
message="Provide a non-empty spec_id parameter",
|
|
2148
|
+
request_id=request_id,
|
|
2149
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2150
|
+
)
|
|
2151
|
+
spec_id = spec_id.strip()
|
|
2152
|
+
|
|
2153
|
+
phase_id = payload.get("phase_id")
|
|
2154
|
+
if not isinstance(phase_id, str) or not phase_id.strip():
|
|
2155
|
+
return _validation_error(
|
|
2156
|
+
field="phase_id",
|
|
2157
|
+
action=action,
|
|
2158
|
+
message="Provide the phase identifier (e.g., phase-1)",
|
|
2159
|
+
request_id=request_id,
|
|
2160
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2161
|
+
)
|
|
2162
|
+
phase_id = phase_id.strip()
|
|
2163
|
+
|
|
2164
|
+
force = payload.get("force", False)
|
|
2165
|
+
if not isinstance(force, bool):
|
|
2166
|
+
return _validation_error(
|
|
2167
|
+
field="force",
|
|
2168
|
+
action=action,
|
|
2169
|
+
message="Expected a boolean value",
|
|
2170
|
+
request_id=request_id,
|
|
2171
|
+
)
|
|
2172
|
+
|
|
2173
|
+
dry_run = payload.get("dry_run", False)
|
|
2174
|
+
if not isinstance(dry_run, bool):
|
|
2175
|
+
return _validation_error(
|
|
2176
|
+
field="dry_run",
|
|
2177
|
+
action=action,
|
|
2178
|
+
message="Expected a boolean value",
|
|
2179
|
+
request_id=request_id,
|
|
2180
|
+
)
|
|
2181
|
+
|
|
2182
|
+
path = payload.get("path")
|
|
2183
|
+
if path is not None and not isinstance(path, str):
|
|
2184
|
+
return _validation_error(
|
|
2185
|
+
field="path",
|
|
2186
|
+
action=action,
|
|
2187
|
+
message="Workspace path must be a string",
|
|
2188
|
+
request_id=request_id,
|
|
2189
|
+
)
|
|
2190
|
+
|
|
2191
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
2192
|
+
if specs_dir is None:
|
|
2193
|
+
return _specs_directory_missing_error(request_id)
|
|
2194
|
+
|
|
2195
|
+
audit_log(
|
|
2196
|
+
"tool_invocation",
|
|
2197
|
+
tool="authoring",
|
|
2198
|
+
action=action,
|
|
2199
|
+
spec_id=spec_id,
|
|
2200
|
+
phase_id=phase_id,
|
|
2201
|
+
force=force,
|
|
2202
|
+
dry_run=dry_run,
|
|
2203
|
+
)
|
|
2204
|
+
|
|
2205
|
+
metric_key = _metric_name(action)
|
|
2206
|
+
if dry_run:
|
|
2207
|
+
_metrics.counter(
|
|
2208
|
+
metric_key, labels={"status": "success", "force": str(force).lower()}
|
|
2209
|
+
)
|
|
2210
|
+
return asdict(
|
|
2211
|
+
success_response(
|
|
2212
|
+
data={
|
|
2213
|
+
"spec_id": spec_id,
|
|
2214
|
+
"phase_id": phase_id,
|
|
2215
|
+
"force": force,
|
|
2216
|
+
"dry_run": True,
|
|
2217
|
+
"note": "Dry run - no changes made",
|
|
2218
|
+
},
|
|
2219
|
+
request_id=request_id,
|
|
2220
|
+
)
|
|
2221
|
+
)
|
|
2222
|
+
|
|
2223
|
+
start_time = time.perf_counter()
|
|
2224
|
+
try:
|
|
2225
|
+
result, error = remove_phase(
|
|
2226
|
+
spec_id=spec_id,
|
|
2227
|
+
phase_id=phase_id,
|
|
2228
|
+
force=force,
|
|
2229
|
+
specs_dir=specs_dir,
|
|
2230
|
+
)
|
|
2231
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
2232
|
+
logger.exception("Unexpected error removing phase")
|
|
2233
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2234
|
+
return asdict(
|
|
2235
|
+
error_response(
|
|
2236
|
+
sanitize_error_message(exc, context="authoring"),
|
|
2237
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2238
|
+
error_type=ErrorType.INTERNAL,
|
|
2239
|
+
remediation="Check logs for details",
|
|
2240
|
+
request_id=request_id,
|
|
2241
|
+
)
|
|
2242
|
+
)
|
|
2243
|
+
|
|
2244
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
2245
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
2246
|
+
|
|
2247
|
+
if error:
|
|
2248
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2249
|
+
lowered = error.lower()
|
|
2250
|
+
if "spec" in lowered and "not found" in lowered:
|
|
2251
|
+
return asdict(
|
|
2252
|
+
error_response(
|
|
2253
|
+
f"Specification '{spec_id}' not found",
|
|
2254
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
2255
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2256
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
2257
|
+
request_id=request_id,
|
|
2258
|
+
)
|
|
2259
|
+
)
|
|
2260
|
+
if "phase" in lowered and "not found" in lowered:
|
|
2261
|
+
return asdict(
|
|
2262
|
+
error_response(
|
|
2263
|
+
f"Phase '{phase_id}' not found in spec",
|
|
2264
|
+
error_code=ErrorCode.PHASE_NOT_FOUND,
|
|
2265
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2266
|
+
remediation="Confirm the phase exists in the hierarchy",
|
|
2267
|
+
request_id=request_id,
|
|
2268
|
+
)
|
|
2269
|
+
)
|
|
2270
|
+
if "not a phase" in lowered:
|
|
2271
|
+
return asdict(
|
|
2272
|
+
error_response(
|
|
2273
|
+
f"Node '{phase_id}' is not a phase",
|
|
2274
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
2275
|
+
error_type=ErrorType.VALIDATION,
|
|
2276
|
+
remediation="Use task-remove for non-phase nodes",
|
|
2277
|
+
request_id=request_id,
|
|
2278
|
+
)
|
|
2279
|
+
)
|
|
2280
|
+
if "non-completed" in lowered or "has" in lowered and "task" in lowered:
|
|
2281
|
+
return asdict(
|
|
2282
|
+
error_response(
|
|
2283
|
+
f"Phase '{phase_id}' has non-completed tasks. Use force=True to remove anyway",
|
|
2284
|
+
error_code=ErrorCode.CONFLICT,
|
|
2285
|
+
error_type=ErrorType.CONFLICT,
|
|
2286
|
+
remediation="Set force=True to remove active phases",
|
|
2287
|
+
request_id=request_id,
|
|
2288
|
+
)
|
|
2289
|
+
)
|
|
2290
|
+
return asdict(
|
|
2291
|
+
error_response(
|
|
2292
|
+
f"Failed to remove phase: {error}",
|
|
2293
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2294
|
+
error_type=ErrorType.INTERNAL,
|
|
2295
|
+
remediation="Check input values and retry",
|
|
2296
|
+
request_id=request_id,
|
|
2297
|
+
)
|
|
2298
|
+
)
|
|
2299
|
+
|
|
2300
|
+
_metrics.counter(
|
|
2301
|
+
metric_key, labels={"status": "success", "force": str(force).lower()}
|
|
2302
|
+
)
|
|
2303
|
+
return asdict(
|
|
2304
|
+
success_response(
|
|
2305
|
+
data={"spec_id": spec_id, "dry_run": False, **(result or {})},
|
|
2306
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2307
|
+
request_id=request_id,
|
|
2308
|
+
)
|
|
2309
|
+
)
|
|
2310
|
+
|
|
2311
|
+
|
|
2312
|
+
def _handle_assumption_add(*, config: ServerConfig, **payload: Any) -> dict:
|
|
2313
|
+
request_id = _request_id()
|
|
2314
|
+
action = "assumption-add"
|
|
2315
|
+
|
|
2316
|
+
spec_id = payload.get("spec_id")
|
|
2317
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2318
|
+
return _validation_error(
|
|
2319
|
+
field="spec_id",
|
|
2320
|
+
action=action,
|
|
2321
|
+
message="Provide a non-empty spec_id parameter",
|
|
2322
|
+
request_id=request_id,
|
|
2323
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2324
|
+
)
|
|
2325
|
+
spec_id = spec_id.strip()
|
|
2326
|
+
|
|
2327
|
+
text = payload.get("text")
|
|
2328
|
+
if not isinstance(text, str) or not text.strip():
|
|
2329
|
+
return _validation_error(
|
|
2330
|
+
field="text",
|
|
2331
|
+
action=action,
|
|
2332
|
+
message="Provide the assumption text",
|
|
2333
|
+
request_id=request_id,
|
|
2334
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2335
|
+
)
|
|
2336
|
+
text = text.strip()
|
|
2337
|
+
|
|
2338
|
+
assumption_type = payload.get("assumption_type") # Optional, any string accepted
|
|
2339
|
+
|
|
2340
|
+
author = payload.get("author")
|
|
2341
|
+
if author is not None and not isinstance(author, str):
|
|
2342
|
+
return _validation_error(
|
|
2343
|
+
field="author",
|
|
2344
|
+
action=action,
|
|
2345
|
+
message="Author must be a string",
|
|
2346
|
+
request_id=request_id,
|
|
2347
|
+
)
|
|
2348
|
+
|
|
2349
|
+
dry_run = payload.get("dry_run", False)
|
|
2350
|
+
if not isinstance(dry_run, bool):
|
|
2351
|
+
return _validation_error(
|
|
2352
|
+
field="dry_run",
|
|
2353
|
+
action=action,
|
|
2354
|
+
message="Expected a boolean value",
|
|
2355
|
+
request_id=request_id,
|
|
2356
|
+
)
|
|
2357
|
+
|
|
2358
|
+
path = payload.get("path")
|
|
2359
|
+
if path is not None and not isinstance(path, str):
|
|
2360
|
+
return _validation_error(
|
|
2361
|
+
field="path",
|
|
2362
|
+
action=action,
|
|
2363
|
+
message="Workspace path must be a string",
|
|
2364
|
+
request_id=request_id,
|
|
2365
|
+
)
|
|
2366
|
+
|
|
2367
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
2368
|
+
if specs_dir is None:
|
|
2369
|
+
return _specs_directory_missing_error(request_id)
|
|
2370
|
+
|
|
2371
|
+
warnings: List[str] = []
|
|
2372
|
+
if _assumption_exists(spec_id, specs_dir, text):
|
|
2373
|
+
warnings.append(
|
|
2374
|
+
"An assumption with identical text already exists; another entry will be appended"
|
|
2375
|
+
)
|
|
2376
|
+
|
|
2377
|
+
audit_log(
|
|
2378
|
+
"tool_invocation",
|
|
2379
|
+
tool="authoring",
|
|
2380
|
+
action=action,
|
|
2381
|
+
spec_id=spec_id,
|
|
2382
|
+
assumption_type=assumption_type,
|
|
2383
|
+
dry_run=dry_run,
|
|
2384
|
+
)
|
|
2385
|
+
|
|
2386
|
+
metric_key = _metric_name(action)
|
|
2387
|
+
|
|
2388
|
+
if dry_run:
|
|
2389
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
|
|
2390
|
+
data = {
|
|
2391
|
+
"spec_id": spec_id,
|
|
2392
|
+
"assumption_id": "(preview)",
|
|
2393
|
+
"text": text,
|
|
2394
|
+
"type": assumption_type,
|
|
2395
|
+
"dry_run": True,
|
|
2396
|
+
"note": "Dry run - no changes made",
|
|
2397
|
+
}
|
|
2398
|
+
if author:
|
|
2399
|
+
data["author"] = author
|
|
2400
|
+
return asdict(
|
|
2401
|
+
success_response(
|
|
2402
|
+
data=data,
|
|
2403
|
+
warnings=warnings or None,
|
|
2404
|
+
request_id=request_id,
|
|
2405
|
+
)
|
|
2406
|
+
)
|
|
2407
|
+
|
|
2408
|
+
start_time = time.perf_counter()
|
|
2409
|
+
try:
|
|
2410
|
+
result, error = add_assumption(
|
|
2411
|
+
spec_id=spec_id,
|
|
2412
|
+
text=text,
|
|
2413
|
+
assumption_type=assumption_type,
|
|
2414
|
+
author=author,
|
|
2415
|
+
specs_dir=specs_dir,
|
|
2416
|
+
)
|
|
2417
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
2418
|
+
logger.exception("Unexpected error adding assumption")
|
|
2419
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2420
|
+
return asdict(
|
|
2421
|
+
error_response(
|
|
2422
|
+
sanitize_error_message(exc, context="authoring"),
|
|
2423
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2424
|
+
error_type=ErrorType.INTERNAL,
|
|
2425
|
+
remediation="Check logs for details",
|
|
2426
|
+
request_id=request_id,
|
|
2427
|
+
)
|
|
2428
|
+
)
|
|
2429
|
+
|
|
2430
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
2431
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
2432
|
+
|
|
2433
|
+
if error:
|
|
2434
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2435
|
+
if "not found" in error.lower():
|
|
2436
|
+
return asdict(
|
|
2437
|
+
error_response(
|
|
2438
|
+
f"Specification '{spec_id}' not found",
|
|
2439
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
2440
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2441
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
2442
|
+
request_id=request_id,
|
|
2443
|
+
)
|
|
2444
|
+
)
|
|
2445
|
+
return asdict(
|
|
2446
|
+
error_response(
|
|
2447
|
+
f"Failed to add assumption: {error}",
|
|
2448
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2449
|
+
error_type=ErrorType.INTERNAL,
|
|
2450
|
+
remediation="Check that the spec exists",
|
|
2451
|
+
request_id=request_id,
|
|
2452
|
+
)
|
|
2453
|
+
)
|
|
2454
|
+
|
|
2455
|
+
data = {
|
|
2456
|
+
"spec_id": spec_id,
|
|
2457
|
+
"assumption_id": result.get("assumption_id") if result else None,
|
|
2458
|
+
"text": text,
|
|
2459
|
+
"type": assumption_type,
|
|
2460
|
+
"dry_run": False,
|
|
2461
|
+
}
|
|
2462
|
+
if author:
|
|
2463
|
+
data["author"] = author
|
|
2464
|
+
|
|
2465
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
2466
|
+
return asdict(
|
|
2467
|
+
success_response(
|
|
2468
|
+
data=data,
|
|
2469
|
+
warnings=warnings or None,
|
|
2470
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2471
|
+
request_id=request_id,
|
|
2472
|
+
)
|
|
2473
|
+
)
|
|
2474
|
+
|
|
2475
|
+
|
|
2476
|
+
def _handle_assumption_list(*, config: ServerConfig, **payload: Any) -> dict:
|
|
2477
|
+
request_id = _request_id()
|
|
2478
|
+
action = "assumption-list"
|
|
2479
|
+
|
|
2480
|
+
spec_id = payload.get("spec_id")
|
|
2481
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2482
|
+
return _validation_error(
|
|
2483
|
+
field="spec_id",
|
|
2484
|
+
action=action,
|
|
2485
|
+
message="Provide a non-empty spec_id parameter",
|
|
2486
|
+
request_id=request_id,
|
|
2487
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2488
|
+
)
|
|
2489
|
+
spec_id = spec_id.strip()
|
|
2490
|
+
|
|
2491
|
+
assumption_type = payload.get("assumption_type") # Optional filter, any string accepted
|
|
2492
|
+
|
|
2493
|
+
path = payload.get("path")
|
|
2494
|
+
if path is not None and not isinstance(path, str):
|
|
2495
|
+
return _validation_error(
|
|
2496
|
+
field="path",
|
|
2497
|
+
action=action,
|
|
2498
|
+
message="Workspace path must be a string",
|
|
2499
|
+
request_id=request_id,
|
|
2500
|
+
)
|
|
2501
|
+
|
|
2502
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
2503
|
+
if specs_dir is None:
|
|
2504
|
+
return _specs_directory_missing_error(request_id)
|
|
2505
|
+
|
|
2506
|
+
audit_log(
|
|
2507
|
+
"tool_invocation",
|
|
2508
|
+
tool="authoring",
|
|
2509
|
+
action=action,
|
|
2510
|
+
spec_id=spec_id,
|
|
2511
|
+
assumption_type=assumption_type,
|
|
2512
|
+
)
|
|
2513
|
+
|
|
2514
|
+
metric_key = _metric_name(action)
|
|
2515
|
+
start_time = time.perf_counter()
|
|
2516
|
+
try:
|
|
2517
|
+
result, error = list_assumptions(
|
|
2518
|
+
spec_id=spec_id,
|
|
2519
|
+
assumption_type=assumption_type,
|
|
2520
|
+
specs_dir=specs_dir,
|
|
2521
|
+
)
|
|
2522
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
2523
|
+
logger.exception("Unexpected error listing assumptions")
|
|
2524
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2525
|
+
return asdict(
|
|
2526
|
+
error_response(
|
|
2527
|
+
sanitize_error_message(exc, context="authoring"),
|
|
2528
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2529
|
+
error_type=ErrorType.INTERNAL,
|
|
2530
|
+
remediation="Check logs for details",
|
|
2531
|
+
request_id=request_id,
|
|
2532
|
+
)
|
|
2533
|
+
)
|
|
2534
|
+
|
|
2535
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
2536
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
2537
|
+
|
|
2538
|
+
if error:
|
|
2539
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2540
|
+
if "not found" in error.lower():
|
|
2541
|
+
return asdict(
|
|
2542
|
+
error_response(
|
|
2543
|
+
f"Specification '{spec_id}' not found",
|
|
2544
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
2545
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2546
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
2547
|
+
request_id=request_id,
|
|
2548
|
+
)
|
|
2549
|
+
)
|
|
2550
|
+
return asdict(
|
|
2551
|
+
error_response(
|
|
2552
|
+
f"Failed to list assumptions: {error}",
|
|
2553
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2554
|
+
error_type=ErrorType.INTERNAL,
|
|
2555
|
+
remediation="Check that the spec exists",
|
|
2556
|
+
request_id=request_id,
|
|
2557
|
+
)
|
|
2558
|
+
)
|
|
2559
|
+
|
|
2560
|
+
warnings: List[str] = []
|
|
2561
|
+
if assumption_type:
|
|
2562
|
+
warnings.append(
|
|
2563
|
+
"assumption_type filter is advisory only; all assumptions are returned"
|
|
2564
|
+
)
|
|
2565
|
+
|
|
2566
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
2567
|
+
return asdict(
|
|
2568
|
+
success_response(
|
|
2569
|
+
data=result or {"spec_id": spec_id, "assumptions": [], "total_count": 0},
|
|
2570
|
+
warnings=warnings or None,
|
|
2571
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2572
|
+
request_id=request_id,
|
|
2573
|
+
)
|
|
2574
|
+
)
|
|
2575
|
+
|
|
2576
|
+
|
|
2577
|
+
def _handle_revision_add(*, config: ServerConfig, **payload: Any) -> dict:
|
|
2578
|
+
request_id = _request_id()
|
|
2579
|
+
action = "revision-add"
|
|
2580
|
+
|
|
2581
|
+
spec_id = payload.get("spec_id")
|
|
2582
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2583
|
+
return _validation_error(
|
|
2584
|
+
field="spec_id",
|
|
2585
|
+
action=action,
|
|
2586
|
+
message="Provide a non-empty spec_id parameter",
|
|
2587
|
+
request_id=request_id,
|
|
2588
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2589
|
+
)
|
|
2590
|
+
spec_id = spec_id.strip()
|
|
2591
|
+
|
|
2592
|
+
version = payload.get("version")
|
|
2593
|
+
if not isinstance(version, str) or not version.strip():
|
|
2594
|
+
return _validation_error(
|
|
2595
|
+
field="version",
|
|
2596
|
+
action=action,
|
|
2597
|
+
message="Provide the revision version (e.g., 1.1)",
|
|
2598
|
+
request_id=request_id,
|
|
2599
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2600
|
+
)
|
|
2601
|
+
version = version.strip()
|
|
2602
|
+
|
|
2603
|
+
changes = payload.get("changes")
|
|
2604
|
+
if not isinstance(changes, str) or not changes.strip():
|
|
2605
|
+
return _validation_error(
|
|
2606
|
+
field="changes",
|
|
2607
|
+
action=action,
|
|
2608
|
+
message="Provide a summary of changes",
|
|
2609
|
+
request_id=request_id,
|
|
2610
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2611
|
+
)
|
|
2612
|
+
changes = changes.strip()
|
|
2613
|
+
|
|
2614
|
+
author = payload.get("author")
|
|
2615
|
+
if author is not None and not isinstance(author, str):
|
|
2616
|
+
return _validation_error(
|
|
2617
|
+
field="author",
|
|
2618
|
+
action=action,
|
|
2619
|
+
message="Author must be a string",
|
|
2620
|
+
request_id=request_id,
|
|
2621
|
+
)
|
|
2622
|
+
|
|
2623
|
+
dry_run = payload.get("dry_run", False)
|
|
2624
|
+
if not isinstance(dry_run, bool):
|
|
2625
|
+
return _validation_error(
|
|
2626
|
+
field="dry_run",
|
|
2627
|
+
action=action,
|
|
2628
|
+
message="Expected a boolean value",
|
|
2629
|
+
request_id=request_id,
|
|
2630
|
+
)
|
|
2631
|
+
|
|
2632
|
+
path = payload.get("path")
|
|
2633
|
+
if path is not None and not isinstance(path, str):
|
|
2634
|
+
return _validation_error(
|
|
2635
|
+
field="path",
|
|
2636
|
+
action=action,
|
|
2637
|
+
message="Workspace path must be a string",
|
|
2638
|
+
request_id=request_id,
|
|
2639
|
+
)
|
|
2640
|
+
|
|
2641
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
2642
|
+
if specs_dir is None:
|
|
2643
|
+
return _specs_directory_missing_error(request_id)
|
|
2644
|
+
|
|
2645
|
+
audit_log(
|
|
2646
|
+
"tool_invocation",
|
|
2647
|
+
tool="authoring",
|
|
2648
|
+
action=action,
|
|
2649
|
+
spec_id=spec_id,
|
|
2650
|
+
version=version,
|
|
2651
|
+
dry_run=dry_run,
|
|
2652
|
+
)
|
|
2653
|
+
|
|
2654
|
+
metric_key = _metric_name(action)
|
|
2655
|
+
if dry_run:
|
|
2656
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": "true"})
|
|
2657
|
+
data = {
|
|
2658
|
+
"spec_id": spec_id,
|
|
2659
|
+
"version": version,
|
|
2660
|
+
"changes": changes,
|
|
2661
|
+
"dry_run": True,
|
|
2662
|
+
"note": "Dry run - no changes made",
|
|
2663
|
+
}
|
|
2664
|
+
if author:
|
|
2665
|
+
data["author"] = author
|
|
2666
|
+
return asdict(
|
|
2667
|
+
success_response(
|
|
2668
|
+
data=data,
|
|
2669
|
+
request_id=request_id,
|
|
2670
|
+
)
|
|
2671
|
+
)
|
|
2672
|
+
|
|
2673
|
+
start_time = time.perf_counter()
|
|
2674
|
+
try:
|
|
2675
|
+
result, error = add_revision(
|
|
2676
|
+
spec_id=spec_id,
|
|
2677
|
+
version=version,
|
|
2678
|
+
changelog=changes,
|
|
2679
|
+
author=author,
|
|
2680
|
+
specs_dir=specs_dir,
|
|
2681
|
+
)
|
|
2682
|
+
except Exception as exc: # pragma: no cover - defensive guard
|
|
2683
|
+
logger.exception("Unexpected error adding revision")
|
|
2684
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2685
|
+
return asdict(
|
|
2686
|
+
error_response(
|
|
2687
|
+
sanitize_error_message(exc, context="authoring"),
|
|
2688
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2689
|
+
error_type=ErrorType.INTERNAL,
|
|
2690
|
+
remediation="Check logs for details",
|
|
2691
|
+
request_id=request_id,
|
|
2692
|
+
)
|
|
2693
|
+
)
|
|
2694
|
+
|
|
2695
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
2696
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
2697
|
+
|
|
2698
|
+
if error:
|
|
2699
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
2700
|
+
if "not found" in error.lower():
|
|
2701
|
+
return asdict(
|
|
2702
|
+
error_response(
|
|
2703
|
+
f"Specification '{spec_id}' not found",
|
|
2704
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
2705
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2706
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
2707
|
+
request_id=request_id,
|
|
2708
|
+
)
|
|
2709
|
+
)
|
|
2710
|
+
return asdict(
|
|
2711
|
+
error_response(
|
|
2712
|
+
f"Failed to add revision: {error}",
|
|
2713
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
2714
|
+
error_type=ErrorType.INTERNAL,
|
|
2715
|
+
remediation="Check that the spec exists",
|
|
2716
|
+
request_id=request_id,
|
|
2717
|
+
)
|
|
2718
|
+
)
|
|
2719
|
+
|
|
2720
|
+
data = {
|
|
2721
|
+
"spec_id": spec_id,
|
|
2722
|
+
"version": version,
|
|
2723
|
+
"changes": changes,
|
|
2724
|
+
"dry_run": False,
|
|
2725
|
+
}
|
|
2726
|
+
if author:
|
|
2727
|
+
data["author"] = author
|
|
2728
|
+
if result and result.get("date"):
|
|
2729
|
+
data["date"] = result["date"]
|
|
2730
|
+
|
|
2731
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
2732
|
+
return asdict(
|
|
2733
|
+
success_response(
|
|
2734
|
+
data=data,
|
|
2735
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2736
|
+
request_id=request_id,
|
|
2737
|
+
)
|
|
2738
|
+
)
|
|
2739
|
+
|
|
2740
|
+
|
|
2741
|
+
# Validation constants for intake
|
|
2742
|
+
_INTAKE_TITLE_MAX_LEN = 140
|
|
2743
|
+
_INTAKE_DESC_MAX_LEN = 2000
|
|
2744
|
+
_INTAKE_TAG_MAX_LEN = 32
|
|
2745
|
+
_INTAKE_TAG_MAX_COUNT = 20
|
|
2746
|
+
_INTAKE_SOURCE_MAX_LEN = 100
|
|
2747
|
+
_INTAKE_REQUESTER_MAX_LEN = 100
|
|
2748
|
+
_INTAKE_IDEMPOTENCY_KEY_MAX_LEN = 64
|
|
2749
|
+
_INTAKE_PRIORITY_VALUES = ("p0", "p1", "p2", "p3", "p4")
|
|
2750
|
+
_INTAKE_PRIORITY_ALIASES = {
|
|
2751
|
+
"critical": "p0",
|
|
2752
|
+
"highest": "p0",
|
|
2753
|
+
"high": "p1",
|
|
2754
|
+
"medium": "p2",
|
|
2755
|
+
"normal": "p2",
|
|
2756
|
+
"low": "p3",
|
|
2757
|
+
"lowest": "p4",
|
|
2758
|
+
}
|
|
2759
|
+
_INTAKE_TAG_PATTERN = "^[a-z0-9_-]+$"
|
|
2760
|
+
_TAG_REGEX = re.compile(_INTAKE_TAG_PATTERN)
|
|
2761
|
+
|
|
2762
|
+
|
|
2763
|
+
def _intake_feature_flag_blocked(request_id: str) -> Optional[dict]:
|
|
2764
|
+
"""Check if intake tools are blocked by feature flag. Returns None to allow."""
|
|
2765
|
+
# Feature flags disabled - always allow
|
|
2766
|
+
return None
|
|
2767
|
+
|
|
2768
|
+
|
|
2769
|
+
def _handle_intake_add(*, config: ServerConfig, **payload: Any) -> dict:
|
|
2770
|
+
"""Add a new intake item to the bikelane queue."""
|
|
2771
|
+
request_id = _request_id()
|
|
2772
|
+
action = "intake-add"
|
|
2773
|
+
|
|
2774
|
+
# Check feature flag
|
|
2775
|
+
blocked = _intake_feature_flag_blocked(request_id)
|
|
2776
|
+
if blocked:
|
|
2777
|
+
return blocked
|
|
2778
|
+
|
|
2779
|
+
# Validate title (required, 1-140 chars)
|
|
2780
|
+
title = payload.get("title")
|
|
2781
|
+
if not isinstance(title, str) or not title.strip():
|
|
2782
|
+
return _validation_error(
|
|
2783
|
+
field="title",
|
|
2784
|
+
action=action,
|
|
2785
|
+
message="Provide a non-empty title (1-140 characters)",
|
|
2786
|
+
request_id=request_id,
|
|
2787
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2788
|
+
)
|
|
2789
|
+
title = title.strip()
|
|
2790
|
+
if len(title) > _INTAKE_TITLE_MAX_LEN:
|
|
2791
|
+
return _validation_error(
|
|
2792
|
+
field="title",
|
|
2793
|
+
action=action,
|
|
2794
|
+
message=f"Title exceeds maximum length of {_INTAKE_TITLE_MAX_LEN} characters",
|
|
2795
|
+
request_id=request_id,
|
|
2796
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
2797
|
+
remediation=f"Shorten title to {_INTAKE_TITLE_MAX_LEN} characters or less",
|
|
2798
|
+
)
|
|
2799
|
+
|
|
2800
|
+
# Validate description (optional, max 2000 chars)
|
|
2801
|
+
description = payload.get("description")
|
|
2802
|
+
if description is not None:
|
|
2803
|
+
if not isinstance(description, str):
|
|
2804
|
+
return _validation_error(
|
|
2805
|
+
field="description",
|
|
2806
|
+
action=action,
|
|
2807
|
+
message="Description must be a string",
|
|
2808
|
+
request_id=request_id,
|
|
2809
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2810
|
+
)
|
|
2811
|
+
description = description.strip() or None
|
|
2812
|
+
if description and len(description) > _INTAKE_DESC_MAX_LEN:
|
|
2813
|
+
return _validation_error(
|
|
2814
|
+
field="description",
|
|
2815
|
+
action=action,
|
|
2816
|
+
message=f"Description exceeds maximum length of {_INTAKE_DESC_MAX_LEN} characters",
|
|
2817
|
+
request_id=request_id,
|
|
2818
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
2819
|
+
remediation=f"Shorten description to {_INTAKE_DESC_MAX_LEN} characters or less",
|
|
2820
|
+
)
|
|
2821
|
+
|
|
2822
|
+
# Validate priority (optional, enum p0-p4, default p2)
|
|
2823
|
+
# Handle both missing key AND explicit null from JSON
|
|
2824
|
+
priority = payload.get("priority")
|
|
2825
|
+
if priority is None:
|
|
2826
|
+
priority = "p2" # Default for both missing and explicit null
|
|
2827
|
+
elif not isinstance(priority, str):
|
|
2828
|
+
return _validation_error(
|
|
2829
|
+
field="priority",
|
|
2830
|
+
action=action,
|
|
2831
|
+
message=f"Priority must be a string. Valid values: {', '.join(_INTAKE_PRIORITY_VALUES)}",
|
|
2832
|
+
request_id=request_id,
|
|
2833
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2834
|
+
remediation=f"Use {', '.join(_INTAKE_PRIORITY_VALUES)} or aliases: {', '.join(_INTAKE_PRIORITY_ALIASES.keys())}",
|
|
2835
|
+
)
|
|
2836
|
+
|
|
2837
|
+
priority = priority.strip().lower()
|
|
2838
|
+
|
|
2839
|
+
# Map human-readable aliases to canonical values
|
|
2840
|
+
if priority in _INTAKE_PRIORITY_ALIASES:
|
|
2841
|
+
priority = _INTAKE_PRIORITY_ALIASES[priority]
|
|
2842
|
+
|
|
2843
|
+
if priority not in _INTAKE_PRIORITY_VALUES:
|
|
2844
|
+
return _validation_error(
|
|
2845
|
+
field="priority",
|
|
2846
|
+
action=action,
|
|
2847
|
+
message=f"Priority must be one of: {', '.join(_INTAKE_PRIORITY_VALUES)}",
|
|
2848
|
+
request_id=request_id,
|
|
2849
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
2850
|
+
remediation=f"Use p0-p4 or aliases like 'high', 'medium', 'low'. Default is p2 (medium).",
|
|
2851
|
+
)
|
|
2852
|
+
|
|
2853
|
+
# Validate tags (optional, max 20 items, each 1-32 chars, lowercase pattern)
|
|
2854
|
+
tags = payload.get("tags", [])
|
|
2855
|
+
if tags is None:
|
|
2856
|
+
tags = []
|
|
2857
|
+
if not isinstance(tags, list):
|
|
2858
|
+
return _validation_error(
|
|
2859
|
+
field="tags",
|
|
2860
|
+
action=action,
|
|
2861
|
+
message="Tags must be a list of strings",
|
|
2862
|
+
request_id=request_id,
|
|
2863
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2864
|
+
)
|
|
2865
|
+
if len(tags) > _INTAKE_TAG_MAX_COUNT:
|
|
2866
|
+
return _validation_error(
|
|
2867
|
+
field="tags",
|
|
2868
|
+
action=action,
|
|
2869
|
+
message=f"Maximum {_INTAKE_TAG_MAX_COUNT} tags allowed",
|
|
2870
|
+
request_id=request_id,
|
|
2871
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
2872
|
+
)
|
|
2873
|
+
validated_tags = []
|
|
2874
|
+
for i, tag in enumerate(tags):
|
|
2875
|
+
if not isinstance(tag, str):
|
|
2876
|
+
return _validation_error(
|
|
2877
|
+
field=f"tags[{i}]",
|
|
2878
|
+
action=action,
|
|
2879
|
+
message="Each tag must be a string",
|
|
2880
|
+
request_id=request_id,
|
|
2881
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2882
|
+
)
|
|
2883
|
+
tag = tag.strip().lower()
|
|
2884
|
+
if not tag:
|
|
2885
|
+
continue
|
|
2886
|
+
if len(tag) > _INTAKE_TAG_MAX_LEN:
|
|
2887
|
+
return _validation_error(
|
|
2888
|
+
field=f"tags[{i}]",
|
|
2889
|
+
action=action,
|
|
2890
|
+
message=f"Tag exceeds maximum length of {_INTAKE_TAG_MAX_LEN} characters",
|
|
2891
|
+
request_id=request_id,
|
|
2892
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
2893
|
+
)
|
|
2894
|
+
if not _TAG_REGEX.match(tag):
|
|
2895
|
+
return _validation_error(
|
|
2896
|
+
field=f"tags[{i}]",
|
|
2897
|
+
action=action,
|
|
2898
|
+
message=f"Tag must match pattern {_INTAKE_TAG_PATTERN} (lowercase alphanumeric, hyphens, underscores)",
|
|
2899
|
+
request_id=request_id,
|
|
2900
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2901
|
+
)
|
|
2902
|
+
validated_tags.append(tag)
|
|
2903
|
+
tags = validated_tags
|
|
2904
|
+
|
|
2905
|
+
# Validate source (optional, max 100 chars)
|
|
2906
|
+
source = payload.get("source")
|
|
2907
|
+
if source is not None:
|
|
2908
|
+
if not isinstance(source, str):
|
|
2909
|
+
return _validation_error(
|
|
2910
|
+
field="source",
|
|
2911
|
+
action=action,
|
|
2912
|
+
message="Source must be a string",
|
|
2913
|
+
request_id=request_id,
|
|
2914
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2915
|
+
)
|
|
2916
|
+
source = source.strip() or None
|
|
2917
|
+
if source and len(source) > _INTAKE_SOURCE_MAX_LEN:
|
|
2918
|
+
return _validation_error(
|
|
2919
|
+
field="source",
|
|
2920
|
+
action=action,
|
|
2921
|
+
message=f"Source exceeds maximum length of {_INTAKE_SOURCE_MAX_LEN} characters",
|
|
2922
|
+
request_id=request_id,
|
|
2923
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
2924
|
+
)
|
|
2925
|
+
|
|
2926
|
+
# Validate requester (optional, max 100 chars)
|
|
2927
|
+
requester = payload.get("requester")
|
|
2928
|
+
if requester is not None:
|
|
2929
|
+
if not isinstance(requester, str):
|
|
2930
|
+
return _validation_error(
|
|
2931
|
+
field="requester",
|
|
2932
|
+
action=action,
|
|
2933
|
+
message="Requester must be a string",
|
|
2934
|
+
request_id=request_id,
|
|
2935
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2936
|
+
)
|
|
2937
|
+
requester = requester.strip() or None
|
|
2938
|
+
if requester and len(requester) > _INTAKE_REQUESTER_MAX_LEN:
|
|
2939
|
+
return _validation_error(
|
|
2940
|
+
field="requester",
|
|
2941
|
+
action=action,
|
|
2942
|
+
message=f"Requester exceeds maximum length of {_INTAKE_REQUESTER_MAX_LEN} characters",
|
|
2943
|
+
request_id=request_id,
|
|
2944
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
2945
|
+
)
|
|
2946
|
+
|
|
2947
|
+
# Validate idempotency_key (optional, max 64 chars)
|
|
2948
|
+
idempotency_key = payload.get("idempotency_key")
|
|
2949
|
+
if idempotency_key is not None:
|
|
2950
|
+
if not isinstance(idempotency_key, str):
|
|
2951
|
+
return _validation_error(
|
|
2952
|
+
field="idempotency_key",
|
|
2953
|
+
action=action,
|
|
2954
|
+
message="Idempotency key must be a string",
|
|
2955
|
+
request_id=request_id,
|
|
2956
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2957
|
+
)
|
|
2958
|
+
idempotency_key = idempotency_key.strip() or None
|
|
2959
|
+
if idempotency_key and len(idempotency_key) > _INTAKE_IDEMPOTENCY_KEY_MAX_LEN:
|
|
2960
|
+
return _validation_error(
|
|
2961
|
+
field="idempotency_key",
|
|
2962
|
+
action=action,
|
|
2963
|
+
message=f"Idempotency key exceeds maximum length of {_INTAKE_IDEMPOTENCY_KEY_MAX_LEN} characters",
|
|
2964
|
+
request_id=request_id,
|
|
2965
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
2966
|
+
)
|
|
2967
|
+
|
|
2968
|
+
# Validate dry_run
|
|
2969
|
+
dry_run = payload.get("dry_run", False)
|
|
2970
|
+
if not isinstance(dry_run, bool):
|
|
2971
|
+
return _validation_error(
|
|
2972
|
+
field="dry_run",
|
|
2973
|
+
action=action,
|
|
2974
|
+
message="dry_run must be a boolean",
|
|
2975
|
+
request_id=request_id,
|
|
2976
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2977
|
+
)
|
|
2978
|
+
|
|
2979
|
+
# Validate path
|
|
2980
|
+
path = payload.get("path")
|
|
2981
|
+
if path is not None and not isinstance(path, str):
|
|
2982
|
+
return _validation_error(
|
|
2983
|
+
field="path",
|
|
2984
|
+
action=action,
|
|
2985
|
+
message="path must be a string",
|
|
2986
|
+
request_id=request_id,
|
|
2987
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2988
|
+
)
|
|
2989
|
+
|
|
2990
|
+
# Resolve specs directory
|
|
2991
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
2992
|
+
if specs_dir is None:
|
|
2993
|
+
return _specs_directory_missing_error(request_id)
|
|
2994
|
+
|
|
2995
|
+
# Audit log
|
|
2996
|
+
audit_log(
|
|
2997
|
+
"tool_invocation",
|
|
2998
|
+
tool="authoring",
|
|
2999
|
+
action=action,
|
|
3000
|
+
title=title[:100], # Truncate for logging
|
|
3001
|
+
dry_run=dry_run,
|
|
3002
|
+
)
|
|
3003
|
+
|
|
3004
|
+
metric_key = _metric_name(action)
|
|
3005
|
+
start_time = time.perf_counter()
|
|
3006
|
+
|
|
3007
|
+
try:
|
|
3008
|
+
# Get bikelane_dir from config (allows customization via TOML or env var)
|
|
3009
|
+
bikelane_dir = config.get_bikelane_dir(specs_dir)
|
|
3010
|
+
store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
|
|
3011
|
+
item, was_duplicate, lock_wait_ms = store.add(
|
|
3012
|
+
title=title,
|
|
3013
|
+
description=description,
|
|
3014
|
+
priority=priority,
|
|
3015
|
+
tags=tags,
|
|
3016
|
+
source=source,
|
|
3017
|
+
requester=requester,
|
|
3018
|
+
idempotency_key=idempotency_key,
|
|
3019
|
+
dry_run=dry_run,
|
|
3020
|
+
)
|
|
3021
|
+
except LockAcquisitionError:
|
|
3022
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3023
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
3024
|
+
return asdict(
|
|
3025
|
+
error_response(
|
|
3026
|
+
"Failed to acquire file lock within timeout. Resource is busy.",
|
|
3027
|
+
error_code=ErrorCode.RESOURCE_BUSY,
|
|
3028
|
+
error_type=ErrorType.UNAVAILABLE,
|
|
3029
|
+
remediation="Retry after a moment",
|
|
3030
|
+
request_id=request_id,
|
|
3031
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3032
|
+
)
|
|
3033
|
+
)
|
|
3034
|
+
except Exception as exc:
|
|
3035
|
+
logger.exception("Unexpected error adding intake item")
|
|
3036
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3037
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
3038
|
+
return asdict(
|
|
3039
|
+
error_response(
|
|
3040
|
+
sanitize_error_message(exc, context="authoring.intake-add"),
|
|
3041
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
3042
|
+
error_type=ErrorType.INTERNAL,
|
|
3043
|
+
remediation="Check logs for details",
|
|
3044
|
+
request_id=request_id,
|
|
3045
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3046
|
+
)
|
|
3047
|
+
)
|
|
3048
|
+
|
|
3049
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3050
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
3051
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
|
|
3052
|
+
|
|
3053
|
+
data = {
|
|
3054
|
+
"item": item.to_dict(),
|
|
3055
|
+
"intake_path": store.intake_path,
|
|
3056
|
+
"was_duplicate": was_duplicate,
|
|
3057
|
+
}
|
|
3058
|
+
|
|
3059
|
+
meta_extra = {}
|
|
3060
|
+
if dry_run:
|
|
3061
|
+
meta_extra["dry_run"] = True
|
|
3062
|
+
|
|
3063
|
+
return asdict(
|
|
3064
|
+
success_response(
|
|
3065
|
+
data=data,
|
|
3066
|
+
telemetry={"duration_ms": round(elapsed_ms, 2), "lock_wait_ms": round(lock_wait_ms, 2)},
|
|
3067
|
+
request_id=request_id,
|
|
3068
|
+
meta=meta_extra,
|
|
3069
|
+
)
|
|
3070
|
+
)
|
|
3071
|
+
|
|
3072
|
+
|
|
3073
|
+
# Intake list constants (from intake.py)
|
|
3074
|
+
_INTAKE_LIST_DEFAULT_LIMIT = 50
|
|
3075
|
+
_INTAKE_LIST_MAX_LIMIT = 200
|
|
3076
|
+
|
|
3077
|
+
|
|
3078
|
+
def _handle_intake_list(*, config: ServerConfig, **payload: Any) -> dict:
|
|
3079
|
+
"""List intake items with status='new' in FIFO order with pagination."""
|
|
3080
|
+
request_id = _request_id()
|
|
3081
|
+
action = "intake-list"
|
|
3082
|
+
|
|
3083
|
+
# Check feature flag
|
|
3084
|
+
blocked = _intake_feature_flag_blocked(request_id)
|
|
3085
|
+
if blocked:
|
|
3086
|
+
return blocked
|
|
3087
|
+
|
|
3088
|
+
# Validate limit (optional, default 50, range 1-200)
|
|
3089
|
+
limit = payload.get("limit", _INTAKE_LIST_DEFAULT_LIMIT)
|
|
3090
|
+
if limit is not None:
|
|
3091
|
+
if not isinstance(limit, int):
|
|
3092
|
+
return _validation_error(
|
|
3093
|
+
field="limit",
|
|
3094
|
+
action=action,
|
|
3095
|
+
message="limit must be an integer",
|
|
3096
|
+
request_id=request_id,
|
|
3097
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3098
|
+
)
|
|
3099
|
+
if limit < 1 or limit > _INTAKE_LIST_MAX_LIMIT:
|
|
3100
|
+
return _validation_error(
|
|
3101
|
+
field="limit",
|
|
3102
|
+
action=action,
|
|
3103
|
+
message=f"limit must be between 1 and {_INTAKE_LIST_MAX_LIMIT}",
|
|
3104
|
+
request_id=request_id,
|
|
3105
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
3106
|
+
remediation=f"Use a value between 1 and {_INTAKE_LIST_MAX_LIMIT} (default: {_INTAKE_LIST_DEFAULT_LIMIT})",
|
|
3107
|
+
)
|
|
3108
|
+
|
|
3109
|
+
# Validate cursor (optional string)
|
|
3110
|
+
cursor = payload.get("cursor")
|
|
3111
|
+
if cursor is not None:
|
|
3112
|
+
if not isinstance(cursor, str):
|
|
3113
|
+
return _validation_error(
|
|
3114
|
+
field="cursor",
|
|
3115
|
+
action=action,
|
|
3116
|
+
message="cursor must be a string",
|
|
3117
|
+
request_id=request_id,
|
|
3118
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3119
|
+
)
|
|
3120
|
+
cursor = cursor.strip() or None
|
|
3121
|
+
|
|
3122
|
+
# Validate path (optional workspace override)
|
|
3123
|
+
path = payload.get("path")
|
|
3124
|
+
if path is not None and not isinstance(path, str):
|
|
3125
|
+
return _validation_error(
|
|
3126
|
+
field="path",
|
|
3127
|
+
action=action,
|
|
3128
|
+
message="path must be a string",
|
|
3129
|
+
request_id=request_id,
|
|
3130
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3131
|
+
)
|
|
3132
|
+
|
|
3133
|
+
# Resolve specs directory
|
|
3134
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
3135
|
+
if specs_dir is None:
|
|
3136
|
+
return _specs_directory_missing_error(request_id)
|
|
3137
|
+
|
|
3138
|
+
# Audit log
|
|
3139
|
+
audit_log(
|
|
3140
|
+
"tool_invocation",
|
|
3141
|
+
tool="authoring",
|
|
3142
|
+
action=action,
|
|
3143
|
+
limit=limit,
|
|
3144
|
+
has_cursor=cursor is not None,
|
|
3145
|
+
)
|
|
3146
|
+
|
|
3147
|
+
metric_key = _metric_name(action)
|
|
3148
|
+
start_time = time.perf_counter()
|
|
3149
|
+
|
|
3150
|
+
try:
|
|
3151
|
+
# Get bikelane_dir from config (allows customization via TOML or env var)
|
|
3152
|
+
bikelane_dir = config.get_bikelane_dir(specs_dir)
|
|
3153
|
+
store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
|
|
3154
|
+
items, total_count, next_cursor, has_more, lock_wait_ms = store.list_new(
|
|
3155
|
+
cursor=cursor,
|
|
3156
|
+
limit=limit,
|
|
3157
|
+
)
|
|
3158
|
+
except LockAcquisitionError:
|
|
3159
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3160
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
3161
|
+
return asdict(
|
|
3162
|
+
error_response(
|
|
3163
|
+
"Failed to acquire file lock within timeout. Resource is busy.",
|
|
3164
|
+
error_code=ErrorCode.RESOURCE_BUSY,
|
|
3165
|
+
error_type=ErrorType.UNAVAILABLE,
|
|
3166
|
+
remediation="Retry after a moment",
|
|
3167
|
+
request_id=request_id,
|
|
3168
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3169
|
+
)
|
|
3170
|
+
)
|
|
3171
|
+
except Exception as exc:
|
|
3172
|
+
logger.exception("Unexpected error listing intake items")
|
|
3173
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3174
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
3175
|
+
return asdict(
|
|
3176
|
+
error_response(
|
|
3177
|
+
sanitize_error_message(exc, context="authoring.intake-list"),
|
|
3178
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
3179
|
+
error_type=ErrorType.INTERNAL,
|
|
3180
|
+
remediation="Check logs for details",
|
|
3181
|
+
request_id=request_id,
|
|
3182
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3183
|
+
)
|
|
3184
|
+
)
|
|
3185
|
+
|
|
3186
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3187
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
3188
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
3189
|
+
|
|
3190
|
+
data = {
|
|
3191
|
+
"items": [item.to_dict() for item in items],
|
|
3192
|
+
"total_count": total_count,
|
|
3193
|
+
"intake_path": store.intake_path,
|
|
3194
|
+
}
|
|
3195
|
+
|
|
3196
|
+
# Build pagination metadata
|
|
3197
|
+
pagination = None
|
|
3198
|
+
if has_more or cursor is not None:
|
|
3199
|
+
pagination = {
|
|
3200
|
+
"cursor": next_cursor,
|
|
3201
|
+
"has_more": has_more,
|
|
3202
|
+
"page_size": limit,
|
|
3203
|
+
}
|
|
3204
|
+
|
|
3205
|
+
return asdict(
|
|
3206
|
+
success_response(
|
|
3207
|
+
data=data,
|
|
3208
|
+
pagination=pagination,
|
|
3209
|
+
telemetry={
|
|
3210
|
+
"duration_ms": round(elapsed_ms, 2),
|
|
3211
|
+
"lock_wait_ms": round(lock_wait_ms, 2),
|
|
3212
|
+
},
|
|
3213
|
+
request_id=request_id,
|
|
3214
|
+
)
|
|
3215
|
+
)
|
|
3216
|
+
|
|
3217
|
+
|
|
3218
|
+
# Intake dismiss constants
|
|
3219
|
+
_INTAKE_DISMISS_REASON_MAX_LEN = 200
|
|
3220
|
+
|
|
3221
|
+
|
|
3222
|
+
def _handle_intake_dismiss(*, config: ServerConfig, **payload: Any) -> dict:
|
|
3223
|
+
"""Dismiss an intake item by changing its status to 'dismissed'."""
|
|
3224
|
+
request_id = _request_id()
|
|
3225
|
+
action = "intake-dismiss"
|
|
3226
|
+
|
|
3227
|
+
# Check feature flag
|
|
3228
|
+
blocked = _intake_feature_flag_blocked(request_id)
|
|
3229
|
+
if blocked:
|
|
3230
|
+
return blocked
|
|
3231
|
+
|
|
3232
|
+
# Validate intake_id (required, must match pattern)
|
|
3233
|
+
intake_id = payload.get("intake_id")
|
|
3234
|
+
if not isinstance(intake_id, str) or not intake_id.strip():
|
|
3235
|
+
return _validation_error(
|
|
3236
|
+
field="intake_id",
|
|
3237
|
+
action=action,
|
|
3238
|
+
message="Provide a valid intake_id",
|
|
3239
|
+
request_id=request_id,
|
|
3240
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
3241
|
+
)
|
|
3242
|
+
intake_id = intake_id.strip()
|
|
3243
|
+
if not INTAKE_ID_PATTERN.match(intake_id):
|
|
3244
|
+
return _validation_error(
|
|
3245
|
+
field="intake_id",
|
|
3246
|
+
action=action,
|
|
3247
|
+
message="intake_id must match pattern intake-<uuid>",
|
|
3248
|
+
request_id=request_id,
|
|
3249
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3250
|
+
remediation="Use format: intake-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
|
|
3251
|
+
)
|
|
3252
|
+
|
|
3253
|
+
# Validate reason (optional, max 200 chars)
|
|
3254
|
+
reason = payload.get("reason")
|
|
3255
|
+
if reason is not None:
|
|
3256
|
+
if not isinstance(reason, str):
|
|
3257
|
+
return _validation_error(
|
|
3258
|
+
field="reason",
|
|
3259
|
+
action=action,
|
|
3260
|
+
message="reason must be a string",
|
|
3261
|
+
request_id=request_id,
|
|
3262
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3263
|
+
)
|
|
3264
|
+
reason = reason.strip() or None
|
|
3265
|
+
if reason and len(reason) > _INTAKE_DISMISS_REASON_MAX_LEN:
|
|
3266
|
+
return _validation_error(
|
|
3267
|
+
field="reason",
|
|
3268
|
+
action=action,
|
|
3269
|
+
message=f"reason exceeds maximum length of {_INTAKE_DISMISS_REASON_MAX_LEN} characters",
|
|
3270
|
+
request_id=request_id,
|
|
3271
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
3272
|
+
remediation=f"Shorten reason to {_INTAKE_DISMISS_REASON_MAX_LEN} characters or less",
|
|
3273
|
+
)
|
|
3274
|
+
|
|
3275
|
+
# Validate dry_run
|
|
3276
|
+
dry_run = payload.get("dry_run", False)
|
|
3277
|
+
if not isinstance(dry_run, bool):
|
|
3278
|
+
return _validation_error(
|
|
3279
|
+
field="dry_run",
|
|
3280
|
+
action=action,
|
|
3281
|
+
message="dry_run must be a boolean",
|
|
3282
|
+
request_id=request_id,
|
|
3283
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3284
|
+
)
|
|
3285
|
+
|
|
3286
|
+
# Validate path
|
|
3287
|
+
path = payload.get("path")
|
|
3288
|
+
if path is not None and not isinstance(path, str):
|
|
3289
|
+
return _validation_error(
|
|
3290
|
+
field="path",
|
|
3291
|
+
action=action,
|
|
3292
|
+
message="path must be a string",
|
|
3293
|
+
request_id=request_id,
|
|
3294
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3295
|
+
)
|
|
3296
|
+
|
|
3297
|
+
# Resolve specs directory
|
|
3298
|
+
specs_dir = _resolve_specs_dir(config, path)
|
|
3299
|
+
if specs_dir is None:
|
|
3300
|
+
return _specs_directory_missing_error(request_id)
|
|
3301
|
+
|
|
3302
|
+
# Audit log
|
|
3303
|
+
audit_log(
|
|
3304
|
+
"tool_invocation",
|
|
3305
|
+
tool="authoring",
|
|
3306
|
+
action=action,
|
|
3307
|
+
intake_id=intake_id,
|
|
3308
|
+
dry_run=dry_run,
|
|
3309
|
+
)
|
|
3310
|
+
|
|
3311
|
+
metric_key = _metric_name(action)
|
|
3312
|
+
start_time = time.perf_counter()
|
|
3313
|
+
|
|
3314
|
+
try:
|
|
3315
|
+
# Get bikelane_dir from config (allows customization via TOML or env var)
|
|
3316
|
+
bikelane_dir = config.get_bikelane_dir(specs_dir)
|
|
3317
|
+
store = IntakeStore(specs_dir, bikelane_dir=bikelane_dir)
|
|
3318
|
+
item, lock_wait_ms = store.dismiss(
|
|
3319
|
+
intake_id=intake_id,
|
|
3320
|
+
reason=reason,
|
|
3321
|
+
dry_run=dry_run,
|
|
3322
|
+
)
|
|
3323
|
+
except LockAcquisitionError:
|
|
3324
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3325
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
3326
|
+
return asdict(
|
|
3327
|
+
error_response(
|
|
3328
|
+
"Failed to acquire file lock within timeout. Resource is busy.",
|
|
3329
|
+
error_code=ErrorCode.RESOURCE_BUSY,
|
|
3330
|
+
error_type=ErrorType.UNAVAILABLE,
|
|
3331
|
+
remediation="Retry after a moment",
|
|
3332
|
+
request_id=request_id,
|
|
3333
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3334
|
+
)
|
|
3335
|
+
)
|
|
3336
|
+
except Exception as exc:
|
|
3337
|
+
logger.exception("Unexpected error dismissing intake item")
|
|
3338
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3339
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
3340
|
+
return asdict(
|
|
3341
|
+
error_response(
|
|
3342
|
+
sanitize_error_message(exc, context="authoring.intake-dismiss"),
|
|
3343
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
3344
|
+
error_type=ErrorType.INTERNAL,
|
|
3345
|
+
remediation="Check logs for details",
|
|
3346
|
+
request_id=request_id,
|
|
3347
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3348
|
+
)
|
|
3349
|
+
)
|
|
3350
|
+
|
|
3351
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
3352
|
+
|
|
3353
|
+
# Handle not found case
|
|
3354
|
+
if item is None:
|
|
3355
|
+
_metrics.counter(metric_key, labels={"status": "not_found"})
|
|
3356
|
+
return asdict(
|
|
3357
|
+
error_response(
|
|
3358
|
+
f"Intake item not found: {intake_id}",
|
|
3359
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
3360
|
+
error_type=ErrorType.NOT_FOUND,
|
|
3361
|
+
remediation="Verify the intake_id exists using intake-list action",
|
|
3362
|
+
request_id=request_id,
|
|
3363
|
+
telemetry={"duration_ms": round(elapsed_ms, 2), "lock_wait_ms": round(lock_wait_ms, 2)},
|
|
3364
|
+
)
|
|
3365
|
+
)
|
|
3366
|
+
|
|
3367
|
+
_metrics.timer(metric_key + ".duration_ms", elapsed_ms)
|
|
3368
|
+
_metrics.counter(metric_key, labels={"status": "success", "dry_run": str(dry_run).lower()})
|
|
3369
|
+
|
|
3370
|
+
data = {
|
|
3371
|
+
"item": item.to_dict(),
|
|
3372
|
+
"intake_path": store.intake_path,
|
|
3373
|
+
}
|
|
3374
|
+
|
|
3375
|
+
meta_extra = {}
|
|
3376
|
+
if dry_run:
|
|
3377
|
+
meta_extra["dry_run"] = True
|
|
3378
|
+
|
|
3379
|
+
return asdict(
|
|
3380
|
+
success_response(
|
|
3381
|
+
data=data,
|
|
3382
|
+
telemetry={
|
|
3383
|
+
"duration_ms": round(elapsed_ms, 2),
|
|
3384
|
+
"lock_wait_ms": round(lock_wait_ms, 2),
|
|
3385
|
+
},
|
|
3386
|
+
request_id=request_id,
|
|
3387
|
+
meta=meta_extra,
|
|
3388
|
+
)
|
|
3389
|
+
)
|
|
3390
|
+
|
|
3391
|
+
|
|
3392
|
+
_AUTHORING_ROUTER = ActionRouter(
|
|
3393
|
+
tool_name="authoring",
|
|
3394
|
+
actions=[
|
|
3395
|
+
ActionDefinition(
|
|
3396
|
+
name="spec-create",
|
|
3397
|
+
handler=_handle_spec_create,
|
|
3398
|
+
summary=_ACTION_SUMMARY["spec-create"],
|
|
3399
|
+
aliases=("spec_create",),
|
|
3400
|
+
),
|
|
3401
|
+
ActionDefinition(
|
|
3402
|
+
name="spec-template",
|
|
3403
|
+
handler=_handle_spec_template,
|
|
3404
|
+
summary=_ACTION_SUMMARY["spec-template"],
|
|
3405
|
+
aliases=("spec_template",),
|
|
3406
|
+
),
|
|
3407
|
+
ActionDefinition(
|
|
3408
|
+
name="spec-update-frontmatter",
|
|
3409
|
+
handler=_handle_spec_update_frontmatter,
|
|
3410
|
+
summary=_ACTION_SUMMARY["spec-update-frontmatter"],
|
|
3411
|
+
aliases=("spec_update_frontmatter",),
|
|
3412
|
+
),
|
|
3413
|
+
ActionDefinition(
|
|
3414
|
+
name="spec-find-replace",
|
|
3415
|
+
handler=_handle_spec_find_replace,
|
|
3416
|
+
summary=_ACTION_SUMMARY["spec-find-replace"],
|
|
3417
|
+
aliases=("spec_find_replace",),
|
|
3418
|
+
),
|
|
3419
|
+
ActionDefinition(
|
|
3420
|
+
name="spec-rollback",
|
|
3421
|
+
handler=_handle_spec_rollback,
|
|
3422
|
+
summary=_ACTION_SUMMARY["spec-rollback"],
|
|
3423
|
+
aliases=("spec_rollback",),
|
|
3424
|
+
),
|
|
3425
|
+
ActionDefinition(
|
|
3426
|
+
name="phase-add",
|
|
3427
|
+
handler=_handle_phase_add,
|
|
3428
|
+
summary=_ACTION_SUMMARY["phase-add"],
|
|
3429
|
+
aliases=("phase_add",),
|
|
3430
|
+
),
|
|
3431
|
+
ActionDefinition(
|
|
3432
|
+
name="phase-add-bulk",
|
|
3433
|
+
handler=_handle_phase_add_bulk,
|
|
3434
|
+
summary=_ACTION_SUMMARY["phase-add-bulk"],
|
|
3435
|
+
aliases=("phase_add_bulk",),
|
|
3436
|
+
),
|
|
3437
|
+
ActionDefinition(
|
|
3438
|
+
name="phase-template",
|
|
3439
|
+
handler=_handle_phase_template,
|
|
3440
|
+
summary=_ACTION_SUMMARY["phase-template"],
|
|
3441
|
+
aliases=("phase_template",),
|
|
3442
|
+
),
|
|
3443
|
+
ActionDefinition(
|
|
3444
|
+
name="phase-move",
|
|
3445
|
+
handler=_handle_phase_move,
|
|
3446
|
+
summary=_ACTION_SUMMARY["phase-move"],
|
|
3447
|
+
aliases=("phase_move",),
|
|
3448
|
+
),
|
|
3449
|
+
ActionDefinition(
|
|
3450
|
+
name="phase-update-metadata",
|
|
3451
|
+
handler=_handle_phase_update_metadata,
|
|
3452
|
+
summary=_ACTION_SUMMARY["phase-update-metadata"],
|
|
3453
|
+
aliases=("phase_update_metadata",),
|
|
3454
|
+
),
|
|
3455
|
+
ActionDefinition(
|
|
3456
|
+
name="phase-remove",
|
|
3457
|
+
handler=_handle_phase_remove,
|
|
3458
|
+
summary=_ACTION_SUMMARY["phase-remove"],
|
|
3459
|
+
aliases=("phase_remove",),
|
|
3460
|
+
),
|
|
3461
|
+
ActionDefinition(
|
|
3462
|
+
name="assumption-add",
|
|
3463
|
+
handler=_handle_assumption_add,
|
|
3464
|
+
summary=_ACTION_SUMMARY["assumption-add"],
|
|
3465
|
+
aliases=("assumption_add",),
|
|
3466
|
+
),
|
|
3467
|
+
ActionDefinition(
|
|
3468
|
+
name="assumption-list",
|
|
3469
|
+
handler=_handle_assumption_list,
|
|
3470
|
+
summary=_ACTION_SUMMARY["assumption-list"],
|
|
3471
|
+
aliases=("assumption_list",),
|
|
3472
|
+
),
|
|
3473
|
+
ActionDefinition(
|
|
3474
|
+
name="revision-add",
|
|
3475
|
+
handler=_handle_revision_add,
|
|
3476
|
+
summary=_ACTION_SUMMARY["revision-add"],
|
|
3477
|
+
aliases=("revision_add",),
|
|
3478
|
+
),
|
|
3479
|
+
ActionDefinition(
|
|
3480
|
+
name="intake-add",
|
|
3481
|
+
handler=_handle_intake_add,
|
|
3482
|
+
summary=_ACTION_SUMMARY["intake-add"],
|
|
3483
|
+
aliases=("intake_add",),
|
|
3484
|
+
),
|
|
3485
|
+
ActionDefinition(
|
|
3486
|
+
name="intake-list",
|
|
3487
|
+
handler=_handle_intake_list,
|
|
3488
|
+
summary=_ACTION_SUMMARY["intake-list"],
|
|
3489
|
+
aliases=("intake_list",),
|
|
3490
|
+
),
|
|
3491
|
+
ActionDefinition(
|
|
3492
|
+
name="intake-dismiss",
|
|
3493
|
+
handler=_handle_intake_dismiss,
|
|
3494
|
+
summary=_ACTION_SUMMARY["intake-dismiss"],
|
|
3495
|
+
aliases=("intake_dismiss",),
|
|
3496
|
+
),
|
|
3497
|
+
],
|
|
3498
|
+
)
|
|
3499
|
+
|
|
3500
|
+
|
|
3501
|
+
def _dispatch_authoring_action(
|
|
3502
|
+
*, action: str, payload: Dict[str, Any], config: ServerConfig
|
|
3503
|
+
) -> dict:
|
|
3504
|
+
try:
|
|
3505
|
+
return _AUTHORING_ROUTER.dispatch(action=action, config=config, **payload)
|
|
3506
|
+
except ActionRouterError as exc:
|
|
3507
|
+
request_id = _request_id()
|
|
3508
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
3509
|
+
return asdict(
|
|
3510
|
+
error_response(
|
|
3511
|
+
f"Unsupported authoring action '{action}'. Allowed actions: {allowed}",
|
|
3512
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
3513
|
+
error_type=ErrorType.VALIDATION,
|
|
3514
|
+
remediation=f"Use one of: {allowed}",
|
|
3515
|
+
request_id=request_id,
|
|
3516
|
+
)
|
|
3517
|
+
)
|
|
3518
|
+
|
|
3519
|
+
|
|
3520
|
+
def register_unified_authoring_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
3521
|
+
"""Register the consolidated authoring tool."""
|
|
3522
|
+
|
|
3523
|
+
@canonical_tool(
|
|
3524
|
+
mcp,
|
|
3525
|
+
canonical_name="authoring",
|
|
3526
|
+
)
|
|
3527
|
+
@mcp_tool(tool_name="authoring", emit_metrics=True, audit=True)
|
|
3528
|
+
def authoring(
|
|
3529
|
+
action: str,
|
|
3530
|
+
spec_id: Optional[str] = None,
|
|
3531
|
+
name: Optional[str] = None,
|
|
3532
|
+
template: Optional[str] = None,
|
|
3533
|
+
category: Optional[str] = None,
|
|
3534
|
+
mission: Optional[str] = None,
|
|
3535
|
+
template_action: Optional[str] = None,
|
|
3536
|
+
template_name: Optional[str] = None,
|
|
3537
|
+
key: Optional[str] = None,
|
|
3538
|
+
value: Optional[str] = None,
|
|
3539
|
+
title: Optional[str] = None,
|
|
3540
|
+
description: Optional[str] = None,
|
|
3541
|
+
purpose: Optional[str] = None,
|
|
3542
|
+
estimated_hours: Optional[float] = None,
|
|
3543
|
+
position: Optional[int] = None,
|
|
3544
|
+
link_previous: bool = True,
|
|
3545
|
+
phase_id: Optional[str] = None,
|
|
3546
|
+
force: bool = False,
|
|
3547
|
+
text: Optional[str] = None,
|
|
3548
|
+
assumption_type: Optional[str] = None,
|
|
3549
|
+
author: Optional[str] = None,
|
|
3550
|
+
version: Optional[str] = None,
|
|
3551
|
+
changes: Optional[str] = None,
|
|
3552
|
+
tasks: Optional[List[Dict[str, Any]]] = None,
|
|
3553
|
+
phase: Optional[Dict[str, Any]] = None,
|
|
3554
|
+
metadata_defaults: Optional[Dict[str, Any]] = None,
|
|
3555
|
+
dry_run: bool = False,
|
|
3556
|
+
path: Optional[str] = None,
|
|
3557
|
+
# spec-find-replace parameters
|
|
3558
|
+
find: Optional[str] = None,
|
|
3559
|
+
replace: Optional[str] = None,
|
|
3560
|
+
scope: Optional[str] = None,
|
|
3561
|
+
use_regex: bool = False,
|
|
3562
|
+
case_sensitive: bool = True,
|
|
3563
|
+
# intake parameters
|
|
3564
|
+
priority: Optional[str] = None,
|
|
3565
|
+
tags: Optional[List[str]] = None,
|
|
3566
|
+
source: Optional[str] = None,
|
|
3567
|
+
requester: Optional[str] = None,
|
|
3568
|
+
idempotency_key: Optional[str] = None,
|
|
3569
|
+
) -> dict:
|
|
3570
|
+
"""Execute authoring workflows via the action router."""
|
|
3571
|
+
|
|
3572
|
+
payload = {
|
|
3573
|
+
"spec_id": spec_id,
|
|
3574
|
+
"name": name,
|
|
3575
|
+
"template": template,
|
|
3576
|
+
"category": category,
|
|
3577
|
+
"mission": mission,
|
|
3578
|
+
"template_action": template_action,
|
|
3579
|
+
"template_name": template_name,
|
|
3580
|
+
"key": key,
|
|
3581
|
+
"value": value,
|
|
3582
|
+
"title": title,
|
|
3583
|
+
"description": description,
|
|
3584
|
+
"purpose": purpose,
|
|
3585
|
+
"estimated_hours": estimated_hours,
|
|
3586
|
+
"position": position,
|
|
3587
|
+
"link_previous": link_previous,
|
|
3588
|
+
"phase_id": phase_id,
|
|
3589
|
+
"force": force,
|
|
3590
|
+
"text": text,
|
|
3591
|
+
"assumption_type": assumption_type,
|
|
3592
|
+
"author": author,
|
|
3593
|
+
"version": version,
|
|
3594
|
+
"changes": changes,
|
|
3595
|
+
"tasks": tasks,
|
|
3596
|
+
"phase": phase,
|
|
3597
|
+
"metadata_defaults": metadata_defaults,
|
|
3598
|
+
"dry_run": dry_run,
|
|
3599
|
+
"path": path,
|
|
3600
|
+
# spec-find-replace parameters
|
|
3601
|
+
"find": find,
|
|
3602
|
+
"replace": replace,
|
|
3603
|
+
"scope": scope,
|
|
3604
|
+
"use_regex": use_regex,
|
|
3605
|
+
"case_sensitive": case_sensitive,
|
|
3606
|
+
# intake parameters
|
|
3607
|
+
"priority": priority,
|
|
3608
|
+
"tags": tags,
|
|
3609
|
+
"source": source,
|
|
3610
|
+
"requester": requester,
|
|
3611
|
+
"idempotency_key": idempotency_key,
|
|
3612
|
+
}
|
|
3613
|
+
return _dispatch_authoring_action(action=action, payload=payload, config=config)
|
|
3614
|
+
|
|
3615
|
+
logger.debug("Registered unified authoring tool")
|
|
3616
|
+
|
|
3617
|
+
|
|
3618
|
+
__all__ = [
|
|
3619
|
+
"register_unified_authoring_tool",
|
|
3620
|
+
]
|