foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,3846 @@
|
|
|
1
|
+
"""Unified task router with validation, pagination, and shared delegates."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from dataclasses import asdict
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
11
|
+
|
|
12
|
+
from mcp.server.fastmcp import FastMCP
|
|
13
|
+
|
|
14
|
+
from foundry_mcp.config import ServerConfig
|
|
15
|
+
from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
|
|
16
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
17
|
+
from foundry_mcp.core.observability import get_metrics, mcp_tool
|
|
18
|
+
from foundry_mcp.core.pagination import (
|
|
19
|
+
CursorError,
|
|
20
|
+
decode_cursor,
|
|
21
|
+
encode_cursor,
|
|
22
|
+
normalize_page_size,
|
|
23
|
+
paginated_response,
|
|
24
|
+
)
|
|
25
|
+
from foundry_mcp.core.progress import (
|
|
26
|
+
get_progress_summary,
|
|
27
|
+
list_phases,
|
|
28
|
+
sync_computed_fields,
|
|
29
|
+
update_parent_status,
|
|
30
|
+
)
|
|
31
|
+
from foundry_mcp.core.responses import (
|
|
32
|
+
ErrorCode,
|
|
33
|
+
ErrorType,
|
|
34
|
+
error_response,
|
|
35
|
+
success_response,
|
|
36
|
+
)
|
|
37
|
+
from foundry_mcp.core.spec import find_specs_directory, load_spec, save_spec
|
|
38
|
+
from foundry_mcp.core.journal import (
|
|
39
|
+
add_journal_entry,
|
|
40
|
+
get_blocker_info,
|
|
41
|
+
list_blocked_tasks,
|
|
42
|
+
mark_blocked,
|
|
43
|
+
unblock as unblock_task,
|
|
44
|
+
update_task_status,
|
|
45
|
+
)
|
|
46
|
+
from foundry_mcp.core.task import (
|
|
47
|
+
add_task,
|
|
48
|
+
batch_update_tasks,
|
|
49
|
+
check_dependencies,
|
|
50
|
+
get_next_task,
|
|
51
|
+
manage_task_dependency,
|
|
52
|
+
move_task,
|
|
53
|
+
prepare_task as core_prepare_task,
|
|
54
|
+
remove_task,
|
|
55
|
+
REQUIREMENT_TYPES,
|
|
56
|
+
update_estimate,
|
|
57
|
+
update_task_metadata,
|
|
58
|
+
update_task_requirements,
|
|
59
|
+
)
|
|
60
|
+
from foundry_mcp.core.batch_operations import (
|
|
61
|
+
prepare_batch_context,
|
|
62
|
+
start_batch,
|
|
63
|
+
complete_batch,
|
|
64
|
+
reset_batch,
|
|
65
|
+
DEFAULT_MAX_TASKS,
|
|
66
|
+
DEFAULT_TOKEN_BUDGET,
|
|
67
|
+
STALE_TASK_THRESHOLD_HOURS,
|
|
68
|
+
)
|
|
69
|
+
from foundry_mcp.cli.context import (
|
|
70
|
+
AutonomousSession,
|
|
71
|
+
get_context_tracker,
|
|
72
|
+
)
|
|
73
|
+
from foundry_mcp.core.validation import (
|
|
74
|
+
VALID_VERIFICATION_TYPES,
|
|
75
|
+
VERIFICATION_TYPE_MAPPING,
|
|
76
|
+
)
|
|
77
|
+
from foundry_mcp.tools.unified.router import (
|
|
78
|
+
ActionDefinition,
|
|
79
|
+
ActionRouter,
|
|
80
|
+
ActionRouterError,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
logger = logging.getLogger(__name__)
|
|
84
|
+
_metrics = get_metrics()
|
|
85
|
+
|
|
86
|
+
_TASK_DEFAULT_PAGE_SIZE = 25
|
|
87
|
+
_TASK_MAX_PAGE_SIZE = 100
|
|
88
|
+
_TASK_WARNING_THRESHOLD = 75
|
|
89
|
+
_ALLOWED_STATUS = {"pending", "in_progress", "completed", "blocked"}
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _request_id() -> str:
|
|
93
|
+
return get_correlation_id() or generate_correlation_id(prefix="task")
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _metric(action: str) -> str:
|
|
97
|
+
return f"unified_tools.task.{action.replace('-', '_')}"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _specs_dir_missing_error(request_id: str) -> dict:
|
|
101
|
+
return asdict(
|
|
102
|
+
error_response(
|
|
103
|
+
"No specs directory found. Use --specs-dir or set SDD_SPECS_DIR.",
|
|
104
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
105
|
+
error_type=ErrorType.NOT_FOUND,
|
|
106
|
+
remediation="Set SDD_SPECS_DIR or invoke with --specs-dir",
|
|
107
|
+
request_id=request_id,
|
|
108
|
+
)
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _validation_error(
|
|
113
|
+
*,
|
|
114
|
+
field: str,
|
|
115
|
+
action: str,
|
|
116
|
+
message: str,
|
|
117
|
+
request_id: str,
|
|
118
|
+
code: ErrorCode = ErrorCode.MISSING_REQUIRED,
|
|
119
|
+
remediation: Optional[str] = None,
|
|
120
|
+
) -> dict:
|
|
121
|
+
effective_remediation = remediation or f"Provide a valid '{field}' value"
|
|
122
|
+
return asdict(
|
|
123
|
+
error_response(
|
|
124
|
+
f"Invalid field '{field}' for task.{action}: {message}",
|
|
125
|
+
error_code=code,
|
|
126
|
+
error_type=ErrorType.VALIDATION,
|
|
127
|
+
remediation=effective_remediation,
|
|
128
|
+
details={"field": field, "action": f"task.{action}"},
|
|
129
|
+
request_id=request_id,
|
|
130
|
+
)
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _resolve_specs_dir(
|
|
135
|
+
config: ServerConfig, workspace: Optional[str]
|
|
136
|
+
) -> Optional[Path]:
|
|
137
|
+
try:
|
|
138
|
+
if workspace:
|
|
139
|
+
return find_specs_directory(workspace)
|
|
140
|
+
|
|
141
|
+
candidate = getattr(config, "specs_dir", None)
|
|
142
|
+
if isinstance(candidate, Path):
|
|
143
|
+
return candidate
|
|
144
|
+
if isinstance(candidate, str) and candidate.strip():
|
|
145
|
+
return Path(candidate)
|
|
146
|
+
|
|
147
|
+
return find_specs_directory()
|
|
148
|
+
except Exception: # pragma: no cover - defensive guard
|
|
149
|
+
logger.exception(
|
|
150
|
+
"Failed to resolve specs directory", extra={"workspace": workspace}
|
|
151
|
+
)
|
|
152
|
+
return None
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _load_spec_data(
|
|
156
|
+
spec_id: str, specs_dir: Optional[Path], request_id: str
|
|
157
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[dict]]:
|
|
158
|
+
if specs_dir is None:
|
|
159
|
+
return None, _specs_dir_missing_error(request_id)
|
|
160
|
+
|
|
161
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
162
|
+
if spec_data is None:
|
|
163
|
+
return None, asdict(
|
|
164
|
+
error_response(
|
|
165
|
+
f"Spec not found: {spec_id}",
|
|
166
|
+
error_code=ErrorCode.SPEC_NOT_FOUND,
|
|
167
|
+
error_type=ErrorType.NOT_FOUND,
|
|
168
|
+
remediation='Verify the spec ID via spec(action="list")',
|
|
169
|
+
request_id=request_id,
|
|
170
|
+
)
|
|
171
|
+
)
|
|
172
|
+
return spec_data, None
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def _attach_meta(
|
|
176
|
+
response: dict,
|
|
177
|
+
*,
|
|
178
|
+
request_id: str,
|
|
179
|
+
duration_ms: Optional[float] = None,
|
|
180
|
+
warnings: Optional[List[str]] = None,
|
|
181
|
+
) -> dict:
|
|
182
|
+
meta = response.setdefault("meta", {"version": "response-v2"})
|
|
183
|
+
meta["request_id"] = request_id
|
|
184
|
+
if warnings:
|
|
185
|
+
existing = list(meta.get("warnings") or [])
|
|
186
|
+
existing.extend(warnings)
|
|
187
|
+
meta["warnings"] = existing
|
|
188
|
+
if duration_ms is not None:
|
|
189
|
+
telemetry = dict(meta.get("telemetry") or {})
|
|
190
|
+
telemetry["duration_ms"] = round(duration_ms, 2)
|
|
191
|
+
meta["telemetry"] = telemetry
|
|
192
|
+
return response
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _filter_hierarchy(
|
|
196
|
+
hierarchy: Dict[str, Any],
|
|
197
|
+
max_depth: int,
|
|
198
|
+
include_metadata: bool,
|
|
199
|
+
) -> Dict[str, Any]:
|
|
200
|
+
result: Dict[str, Any] = {}
|
|
201
|
+
|
|
202
|
+
for node_id, node_data in hierarchy.items():
|
|
203
|
+
node_depth = node_id.count("-") if node_id != "spec-root" else 0
|
|
204
|
+
if max_depth > 0 and node_depth > max_depth:
|
|
205
|
+
continue
|
|
206
|
+
|
|
207
|
+
filtered_node: Dict[str, Any] = {
|
|
208
|
+
"type": node_data.get("type"),
|
|
209
|
+
"title": node_data.get("title"),
|
|
210
|
+
"status": node_data.get("status"),
|
|
211
|
+
}
|
|
212
|
+
if "children" in node_data:
|
|
213
|
+
filtered_node["children"] = node_data["children"]
|
|
214
|
+
if "parent" in node_data:
|
|
215
|
+
filtered_node["parent"] = node_data["parent"]
|
|
216
|
+
|
|
217
|
+
if include_metadata:
|
|
218
|
+
if "metadata" in node_data:
|
|
219
|
+
filtered_node["metadata"] = node_data["metadata"]
|
|
220
|
+
if "dependencies" in node_data:
|
|
221
|
+
filtered_node["dependencies"] = node_data["dependencies"]
|
|
222
|
+
|
|
223
|
+
result[node_id] = filtered_node
|
|
224
|
+
|
|
225
|
+
return result
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _pagination_warnings(total_count: int, has_more: bool) -> List[str]:
|
|
229
|
+
warnings: List[str] = []
|
|
230
|
+
if total_count > _TASK_WARNING_THRESHOLD:
|
|
231
|
+
warnings.append(
|
|
232
|
+
f"{total_count} results returned; consider using pagination to limit payload size."
|
|
233
|
+
)
|
|
234
|
+
if has_more:
|
|
235
|
+
warnings.append("Additional results available. Follow the cursor to continue.")
|
|
236
|
+
return warnings
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def _handle_prepare(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
240
|
+
request_id = _request_id()
|
|
241
|
+
action = "prepare"
|
|
242
|
+
spec_id = payload.get("spec_id")
|
|
243
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
244
|
+
return _validation_error(
|
|
245
|
+
field="spec_id",
|
|
246
|
+
action=action,
|
|
247
|
+
message="Provide a non-empty spec identifier",
|
|
248
|
+
request_id=request_id,
|
|
249
|
+
)
|
|
250
|
+
task_id = payload.get("task_id")
|
|
251
|
+
if task_id is not None and (not isinstance(task_id, str) or not task_id.strip()):
|
|
252
|
+
return _validation_error(
|
|
253
|
+
field="task_id",
|
|
254
|
+
action=action,
|
|
255
|
+
message="task_id must be a non-empty string",
|
|
256
|
+
request_id=request_id,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
workspace = payload.get("workspace")
|
|
260
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
261
|
+
if specs_dir is None:
|
|
262
|
+
return _specs_dir_missing_error(request_id)
|
|
263
|
+
|
|
264
|
+
start = time.perf_counter()
|
|
265
|
+
result = core_prepare_task(
|
|
266
|
+
spec_id=spec_id.strip(), specs_dir=specs_dir, task_id=task_id
|
|
267
|
+
)
|
|
268
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
269
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
270
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
271
|
+
return _attach_meta(result, request_id=request_id, duration_ms=elapsed_ms)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def _handle_prepare_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
275
|
+
"""
|
|
276
|
+
Handle prepare-batch action for parallel task execution.
|
|
277
|
+
|
|
278
|
+
Returns multiple independent tasks with context for parallel implementation.
|
|
279
|
+
"""
|
|
280
|
+
request_id = _request_id()
|
|
281
|
+
action = "prepare-batch"
|
|
282
|
+
spec_id = payload.get("spec_id")
|
|
283
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
284
|
+
return _validation_error(
|
|
285
|
+
field="spec_id",
|
|
286
|
+
action=action,
|
|
287
|
+
message="Provide a non-empty spec identifier",
|
|
288
|
+
request_id=request_id,
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Optional parameters with defaults
|
|
292
|
+
max_tasks = payload.get("max_tasks", DEFAULT_MAX_TASKS)
|
|
293
|
+
if not isinstance(max_tasks, int) or max_tasks < 1:
|
|
294
|
+
return _validation_error(
|
|
295
|
+
field="max_tasks",
|
|
296
|
+
action=action,
|
|
297
|
+
message="max_tasks must be a positive integer",
|
|
298
|
+
request_id=request_id,
|
|
299
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
token_budget = payload.get("token_budget", DEFAULT_TOKEN_BUDGET)
|
|
303
|
+
if not isinstance(token_budget, int) or token_budget < 1000:
|
|
304
|
+
return _validation_error(
|
|
305
|
+
field="token_budget",
|
|
306
|
+
action=action,
|
|
307
|
+
message="token_budget must be an integer >= 1000",
|
|
308
|
+
request_id=request_id,
|
|
309
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
workspace = payload.get("workspace")
|
|
313
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
314
|
+
if specs_dir is None:
|
|
315
|
+
return _specs_dir_missing_error(request_id)
|
|
316
|
+
|
|
317
|
+
start = time.perf_counter()
|
|
318
|
+
result, error = prepare_batch_context(
|
|
319
|
+
spec_id=spec_id.strip(),
|
|
320
|
+
max_tasks=max_tasks,
|
|
321
|
+
token_budget=token_budget,
|
|
322
|
+
specs_dir=specs_dir,
|
|
323
|
+
)
|
|
324
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
325
|
+
|
|
326
|
+
if error:
|
|
327
|
+
_metrics.counter(_metric(action), labels={"status": "error"})
|
|
328
|
+
return asdict(
|
|
329
|
+
error_response(
|
|
330
|
+
error,
|
|
331
|
+
error_code=ErrorCode.OPERATION_FAILED,
|
|
332
|
+
error_type=ErrorType.VALIDATION,
|
|
333
|
+
request_id=request_id,
|
|
334
|
+
)
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
338
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
339
|
+
|
|
340
|
+
# Build response with batch context
|
|
341
|
+
response = success_response(
|
|
342
|
+
spec_id=spec_id.strip(),
|
|
343
|
+
tasks=result.get("tasks", []),
|
|
344
|
+
task_count=result.get("task_count", 0),
|
|
345
|
+
spec_complete=result.get("spec_complete", False),
|
|
346
|
+
all_blocked=result.get("all_blocked", False),
|
|
347
|
+
stale_tasks=result.get("stale_tasks", []),
|
|
348
|
+
dependency_graph=result.get("dependency_graph", {}),
|
|
349
|
+
token_estimate=result.get("token_estimate", 0),
|
|
350
|
+
request_id=request_id,
|
|
351
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
warnings = result.get("warnings", [])
|
|
355
|
+
return _attach_meta(
|
|
356
|
+
asdict(response),
|
|
357
|
+
request_id=request_id,
|
|
358
|
+
duration_ms=elapsed_ms,
|
|
359
|
+
warnings=warnings if warnings else None,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
def _handle_start_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
364
|
+
"""
|
|
365
|
+
Handle start-batch action for atomically starting multiple tasks.
|
|
366
|
+
|
|
367
|
+
Validates all tasks can be started before making any changes.
|
|
368
|
+
"""
|
|
369
|
+
request_id = _request_id()
|
|
370
|
+
action = "start-batch"
|
|
371
|
+
spec_id = payload.get("spec_id")
|
|
372
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
373
|
+
return _validation_error(
|
|
374
|
+
field="spec_id",
|
|
375
|
+
action=action,
|
|
376
|
+
message="Provide a non-empty spec identifier",
|
|
377
|
+
request_id=request_id,
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
task_ids = payload.get("task_ids")
|
|
381
|
+
if not isinstance(task_ids, list) or not task_ids:
|
|
382
|
+
return _validation_error(
|
|
383
|
+
field="task_ids",
|
|
384
|
+
action=action,
|
|
385
|
+
message="Provide a non-empty list of task IDs",
|
|
386
|
+
request_id=request_id,
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
# Validate all task_ids are strings
|
|
390
|
+
for i, tid in enumerate(task_ids):
|
|
391
|
+
if not isinstance(tid, str) or not tid.strip():
|
|
392
|
+
return _validation_error(
|
|
393
|
+
field=f"task_ids[{i}]",
|
|
394
|
+
action=action,
|
|
395
|
+
message="Each task ID must be a non-empty string",
|
|
396
|
+
request_id=request_id,
|
|
397
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
workspace = payload.get("workspace")
|
|
401
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
402
|
+
if specs_dir is None:
|
|
403
|
+
return _specs_dir_missing_error(request_id)
|
|
404
|
+
|
|
405
|
+
start = time.perf_counter()
|
|
406
|
+
result, error = start_batch(
|
|
407
|
+
spec_id=spec_id.strip(),
|
|
408
|
+
task_ids=[tid.strip() for tid in task_ids],
|
|
409
|
+
specs_dir=specs_dir,
|
|
410
|
+
)
|
|
411
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
412
|
+
|
|
413
|
+
if error:
|
|
414
|
+
_metrics.counter(_metric(action), labels={"status": "error"})
|
|
415
|
+
# Include partial results in error response
|
|
416
|
+
return asdict(
|
|
417
|
+
error_response(
|
|
418
|
+
error,
|
|
419
|
+
error_code=ErrorCode.OPERATION_FAILED,
|
|
420
|
+
error_type=ErrorType.VALIDATION,
|
|
421
|
+
request_id=request_id,
|
|
422
|
+
details=result if result else None,
|
|
423
|
+
)
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
427
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
428
|
+
|
|
429
|
+
response = success_response(
|
|
430
|
+
spec_id=spec_id.strip(),
|
|
431
|
+
started=result.get("started", []),
|
|
432
|
+
started_count=result.get("started_count", 0),
|
|
433
|
+
started_at=result.get("started_at"),
|
|
434
|
+
request_id=request_id,
|
|
435
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
436
|
+
)
|
|
437
|
+
return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
def _handle_complete_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
441
|
+
"""Handle complete-batch action for completing multiple tasks with partial failure support."""
|
|
442
|
+
request_id = _request_id()
|
|
443
|
+
action = "complete-batch"
|
|
444
|
+
spec_id = payload.get("spec_id")
|
|
445
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
446
|
+
return _validation_error(field="spec_id", action=action, message="Provide a non-empty spec identifier", request_id=request_id)
|
|
447
|
+
|
|
448
|
+
completions = payload.get("completions")
|
|
449
|
+
if not isinstance(completions, list) or not completions:
|
|
450
|
+
return _validation_error(field="completions", action=action, message="Provide a non-empty list of completions", request_id=request_id)
|
|
451
|
+
|
|
452
|
+
workspace = payload.get("workspace")
|
|
453
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
454
|
+
if specs_dir is None:
|
|
455
|
+
return _specs_dir_missing_error(request_id)
|
|
456
|
+
|
|
457
|
+
start = time.perf_counter()
|
|
458
|
+
result, error = complete_batch(spec_id=spec_id.strip(), completions=completions, specs_dir=specs_dir)
|
|
459
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
460
|
+
|
|
461
|
+
if error:
|
|
462
|
+
_metrics.counter(_metric(action), labels={"status": "error"})
|
|
463
|
+
return asdict(error_response(error, error_code=ErrorCode.OPERATION_FAILED, error_type=ErrorType.VALIDATION, request_id=request_id, details=result if result else None))
|
|
464
|
+
|
|
465
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
466
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
467
|
+
|
|
468
|
+
response = success_response(
|
|
469
|
+
spec_id=spec_id.strip(),
|
|
470
|
+
results=result.get("results", {}),
|
|
471
|
+
completed_count=result.get("completed_count", 0),
|
|
472
|
+
failed_count=result.get("failed_count", 0),
|
|
473
|
+
total_processed=result.get("total_processed", 0),
|
|
474
|
+
request_id=request_id,
|
|
475
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
476
|
+
)
|
|
477
|
+
return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
def _handle_reset_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
481
|
+
"""
|
|
482
|
+
Handle reset-batch action for resetting stale or specified in_progress tasks.
|
|
483
|
+
|
|
484
|
+
Resets tasks back to pending status and clears started_at timestamp.
|
|
485
|
+
If task_ids not provided, finds stale tasks automatically based on threshold.
|
|
486
|
+
"""
|
|
487
|
+
request_id = _request_id()
|
|
488
|
+
action = "reset-batch"
|
|
489
|
+
spec_id = payload.get("spec_id")
|
|
490
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
491
|
+
return _validation_error(
|
|
492
|
+
field="spec_id",
|
|
493
|
+
action=action,
|
|
494
|
+
message="Provide a non-empty spec identifier",
|
|
495
|
+
request_id=request_id,
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
# Optional: specific task IDs to reset
|
|
499
|
+
task_ids = payload.get("task_ids")
|
|
500
|
+
if task_ids is not None:
|
|
501
|
+
if not isinstance(task_ids, list):
|
|
502
|
+
return _validation_error(
|
|
503
|
+
field="task_ids",
|
|
504
|
+
action=action,
|
|
505
|
+
message="task_ids must be a list of strings",
|
|
506
|
+
request_id=request_id,
|
|
507
|
+
)
|
|
508
|
+
# Validate all task_ids are strings
|
|
509
|
+
for i, tid in enumerate(task_ids):
|
|
510
|
+
if not isinstance(tid, str) or not tid.strip():
|
|
511
|
+
return _validation_error(
|
|
512
|
+
field=f"task_ids[{i}]",
|
|
513
|
+
action=action,
|
|
514
|
+
message="Each task ID must be a non-empty string",
|
|
515
|
+
request_id=request_id,
|
|
516
|
+
code=ErrorCode.VALIDATION_ERROR,
|
|
517
|
+
)
|
|
518
|
+
task_ids = [tid.strip() for tid in task_ids]
|
|
519
|
+
|
|
520
|
+
# Optional: threshold in hours for stale detection
|
|
521
|
+
threshold_hours = payload.get("threshold_hours", STALE_TASK_THRESHOLD_HOURS)
|
|
522
|
+
if not isinstance(threshold_hours, (int, float)) or threshold_hours <= 0:
|
|
523
|
+
return _validation_error(
|
|
524
|
+
field="threshold_hours",
|
|
525
|
+
action=action,
|
|
526
|
+
message="threshold_hours must be a positive number",
|
|
527
|
+
request_id=request_id,
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
workspace = payload.get("workspace")
|
|
531
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
532
|
+
if specs_dir is None:
|
|
533
|
+
return _specs_dir_missing_error(request_id)
|
|
534
|
+
|
|
535
|
+
start = time.perf_counter()
|
|
536
|
+
result, error = reset_batch(
|
|
537
|
+
spec_id=spec_id.strip(),
|
|
538
|
+
task_ids=task_ids,
|
|
539
|
+
threshold_hours=float(threshold_hours),
|
|
540
|
+
specs_dir=specs_dir,
|
|
541
|
+
)
|
|
542
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
543
|
+
|
|
544
|
+
if error:
|
|
545
|
+
_metrics.counter(_metric(action), labels={"status": "error"})
|
|
546
|
+
return asdict(
|
|
547
|
+
error_response(
|
|
548
|
+
error,
|
|
549
|
+
error_code=ErrorCode.OPERATION_FAILED,
|
|
550
|
+
error_type=ErrorType.VALIDATION,
|
|
551
|
+
request_id=request_id,
|
|
552
|
+
details=result if result else None,
|
|
553
|
+
)
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
557
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
558
|
+
|
|
559
|
+
response = success_response(
|
|
560
|
+
spec_id=spec_id.strip(),
|
|
561
|
+
reset=result.get("reset", []),
|
|
562
|
+
reset_count=result.get("reset_count", 0),
|
|
563
|
+
errors=result.get("errors"),
|
|
564
|
+
message=result.get("message"),
|
|
565
|
+
request_id=request_id,
|
|
566
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
567
|
+
)
|
|
568
|
+
return _attach_meta(asdict(response), request_id=request_id, duration_ms=elapsed_ms)
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
def _handle_next(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
572
|
+
request_id = _request_id()
|
|
573
|
+
action = "next"
|
|
574
|
+
spec_id = payload.get("spec_id")
|
|
575
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
576
|
+
return _validation_error(
|
|
577
|
+
field="spec_id",
|
|
578
|
+
action=action,
|
|
579
|
+
message="Provide a non-empty spec identifier",
|
|
580
|
+
request_id=request_id,
|
|
581
|
+
)
|
|
582
|
+
workspace = payload.get("workspace")
|
|
583
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
584
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
585
|
+
if error:
|
|
586
|
+
return error
|
|
587
|
+
assert spec_data is not None # narrow Optional
|
|
588
|
+
|
|
589
|
+
start = time.perf_counter()
|
|
590
|
+
next_task = get_next_task(spec_data)
|
|
591
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
592
|
+
telemetry = {"duration_ms": round(elapsed_ms, 2)}
|
|
593
|
+
|
|
594
|
+
if next_task:
|
|
595
|
+
task_id, task_data = next_task
|
|
596
|
+
response = success_response(
|
|
597
|
+
spec_id=spec_id.strip(),
|
|
598
|
+
found=True,
|
|
599
|
+
task_id=task_id,
|
|
600
|
+
title=task_data.get("title", ""),
|
|
601
|
+
type=task_data.get("type", "task"),
|
|
602
|
+
status=task_data.get("status", "pending"),
|
|
603
|
+
metadata=task_data.get("metadata", {}),
|
|
604
|
+
request_id=request_id,
|
|
605
|
+
telemetry=telemetry,
|
|
606
|
+
)
|
|
607
|
+
else:
|
|
608
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
609
|
+
all_tasks = [
|
|
610
|
+
node
|
|
611
|
+
for node in hierarchy.values()
|
|
612
|
+
if node.get("type") in {"task", "subtask", "verify"}
|
|
613
|
+
]
|
|
614
|
+
completed = sum(1 for node in all_tasks if node.get("status") == "completed")
|
|
615
|
+
pending = sum(1 for node in all_tasks if node.get("status") == "pending")
|
|
616
|
+
response = success_response(
|
|
617
|
+
spec_id=spec_id.strip(),
|
|
618
|
+
found=False,
|
|
619
|
+
spec_complete=pending == 0 and completed > 0,
|
|
620
|
+
message="All tasks completed"
|
|
621
|
+
if pending == 0 and completed > 0
|
|
622
|
+
else "No actionable tasks (tasks may be blocked)",
|
|
623
|
+
request_id=request_id,
|
|
624
|
+
telemetry=telemetry,
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
628
|
+
return asdict(response)
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
def _handle_info(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
632
|
+
request_id = _request_id()
|
|
633
|
+
action = "info"
|
|
634
|
+
spec_id = payload.get("spec_id")
|
|
635
|
+
task_id = payload.get("task_id")
|
|
636
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
637
|
+
return _validation_error(
|
|
638
|
+
field="spec_id",
|
|
639
|
+
action=action,
|
|
640
|
+
message="Provide a non-empty spec identifier",
|
|
641
|
+
request_id=request_id,
|
|
642
|
+
)
|
|
643
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
644
|
+
return _validation_error(
|
|
645
|
+
field="task_id",
|
|
646
|
+
action=action,
|
|
647
|
+
message="Provide a non-empty task identifier",
|
|
648
|
+
request_id=request_id,
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
workspace = payload.get("workspace")
|
|
652
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
653
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
654
|
+
if error:
|
|
655
|
+
return error
|
|
656
|
+
assert spec_data is not None
|
|
657
|
+
|
|
658
|
+
task = spec_data.get("hierarchy", {}).get(task_id.strip())
|
|
659
|
+
if task is None:
|
|
660
|
+
return asdict(
|
|
661
|
+
error_response(
|
|
662
|
+
f"Task not found: {task_id.strip()}",
|
|
663
|
+
error_code=ErrorCode.TASK_NOT_FOUND,
|
|
664
|
+
error_type=ErrorType.NOT_FOUND,
|
|
665
|
+
remediation="Verify the task ID exists in the hierarchy",
|
|
666
|
+
request_id=request_id,
|
|
667
|
+
)
|
|
668
|
+
)
|
|
669
|
+
|
|
670
|
+
response = success_response(
|
|
671
|
+
spec_id=spec_id.strip(),
|
|
672
|
+
task_id=task_id.strip(),
|
|
673
|
+
task=task,
|
|
674
|
+
request_id=request_id,
|
|
675
|
+
)
|
|
676
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
677
|
+
return asdict(response)
|
|
678
|
+
|
|
679
|
+
|
|
680
|
+
def _handle_check_deps(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
681
|
+
request_id = _request_id()
|
|
682
|
+
action = "check-deps"
|
|
683
|
+
spec_id = payload.get("spec_id")
|
|
684
|
+
task_id = payload.get("task_id")
|
|
685
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
686
|
+
return _validation_error(
|
|
687
|
+
field="spec_id",
|
|
688
|
+
action=action,
|
|
689
|
+
message="Provide a non-empty spec identifier",
|
|
690
|
+
request_id=request_id,
|
|
691
|
+
)
|
|
692
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
693
|
+
return _validation_error(
|
|
694
|
+
field="task_id",
|
|
695
|
+
action=action,
|
|
696
|
+
message="Provide a non-empty task identifier",
|
|
697
|
+
request_id=request_id,
|
|
698
|
+
)
|
|
699
|
+
|
|
700
|
+
workspace = payload.get("workspace")
|
|
701
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
702
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
703
|
+
if error:
|
|
704
|
+
return error
|
|
705
|
+
assert spec_data is not None
|
|
706
|
+
|
|
707
|
+
start = time.perf_counter()
|
|
708
|
+
deps = check_dependencies(spec_data, task_id.strip())
|
|
709
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
710
|
+
response = success_response(
|
|
711
|
+
**deps,
|
|
712
|
+
spec_id=spec_id.strip(),
|
|
713
|
+
request_id=request_id,
|
|
714
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
715
|
+
)
|
|
716
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
717
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
718
|
+
return asdict(response)
|
|
719
|
+
|
|
720
|
+
|
|
721
|
+
def _handle_progress(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
722
|
+
request_id = _request_id()
|
|
723
|
+
action = "progress"
|
|
724
|
+
spec_id = payload.get("spec_id")
|
|
725
|
+
node_id = payload.get("node_id", "spec-root")
|
|
726
|
+
include_phases = payload.get("include_phases", True)
|
|
727
|
+
|
|
728
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
729
|
+
return _validation_error(
|
|
730
|
+
field="spec_id",
|
|
731
|
+
action=action,
|
|
732
|
+
message="Provide a non-empty spec identifier",
|
|
733
|
+
request_id=request_id,
|
|
734
|
+
)
|
|
735
|
+
if not isinstance(node_id, str) or not node_id.strip():
|
|
736
|
+
return _validation_error(
|
|
737
|
+
field="node_id",
|
|
738
|
+
action=action,
|
|
739
|
+
message="Provide a non-empty node identifier",
|
|
740
|
+
request_id=request_id,
|
|
741
|
+
)
|
|
742
|
+
if not isinstance(include_phases, bool):
|
|
743
|
+
return _validation_error(
|
|
744
|
+
field="include_phases",
|
|
745
|
+
action=action,
|
|
746
|
+
message="Expected a boolean value",
|
|
747
|
+
request_id=request_id,
|
|
748
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
749
|
+
)
|
|
750
|
+
|
|
751
|
+
workspace = payload.get("workspace")
|
|
752
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
753
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
754
|
+
if error:
|
|
755
|
+
return error
|
|
756
|
+
assert spec_data is not None
|
|
757
|
+
|
|
758
|
+
progress = get_progress_summary(spec_data, node_id.strip())
|
|
759
|
+
if include_phases:
|
|
760
|
+
progress["phases"] = list_phases(spec_data)
|
|
761
|
+
|
|
762
|
+
response = success_response(
|
|
763
|
+
**progress,
|
|
764
|
+
request_id=request_id,
|
|
765
|
+
)
|
|
766
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
767
|
+
return asdict(response)
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
def _handle_list(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
771
|
+
request_id = _request_id()
|
|
772
|
+
action = "list"
|
|
773
|
+
spec_id = payload.get("spec_id")
|
|
774
|
+
status_filter = payload.get("status_filter")
|
|
775
|
+
include_completed = payload.get("include_completed", True)
|
|
776
|
+
limit = payload.get("limit")
|
|
777
|
+
cursor = payload.get("cursor")
|
|
778
|
+
|
|
779
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
780
|
+
return _validation_error(
|
|
781
|
+
field="spec_id",
|
|
782
|
+
action=action,
|
|
783
|
+
message="Provide a non-empty spec identifier",
|
|
784
|
+
request_id=request_id,
|
|
785
|
+
)
|
|
786
|
+
if status_filter is not None:
|
|
787
|
+
if not isinstance(status_filter, str) or status_filter not in _ALLOWED_STATUS:
|
|
788
|
+
return _validation_error(
|
|
789
|
+
field="status_filter",
|
|
790
|
+
action=action,
|
|
791
|
+
message=f"Status must be one of: {sorted(_ALLOWED_STATUS)}",
|
|
792
|
+
request_id=request_id,
|
|
793
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
794
|
+
)
|
|
795
|
+
if not isinstance(include_completed, bool):
|
|
796
|
+
return _validation_error(
|
|
797
|
+
field="include_completed",
|
|
798
|
+
action=action,
|
|
799
|
+
message="Expected a boolean value",
|
|
800
|
+
request_id=request_id,
|
|
801
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
page_size = normalize_page_size(
|
|
805
|
+
limit,
|
|
806
|
+
default=_TASK_DEFAULT_PAGE_SIZE,
|
|
807
|
+
maximum=_TASK_MAX_PAGE_SIZE,
|
|
808
|
+
)
|
|
809
|
+
|
|
810
|
+
start_after_id = None
|
|
811
|
+
if cursor:
|
|
812
|
+
try:
|
|
813
|
+
cursor_data = decode_cursor(cursor)
|
|
814
|
+
start_after_id = cursor_data.get("last_id")
|
|
815
|
+
except CursorError as exc:
|
|
816
|
+
return asdict(
|
|
817
|
+
error_response(
|
|
818
|
+
f"Invalid cursor: {exc.reason or exc}",
|
|
819
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
820
|
+
error_type=ErrorType.VALIDATION,
|
|
821
|
+
request_id=request_id,
|
|
822
|
+
)
|
|
823
|
+
)
|
|
824
|
+
|
|
825
|
+
workspace = payload.get("workspace")
|
|
826
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
827
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
828
|
+
if error:
|
|
829
|
+
return error
|
|
830
|
+
assert spec_data is not None
|
|
831
|
+
|
|
832
|
+
start = time.perf_counter()
|
|
833
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
834
|
+
tasks: List[Dict[str, Any]] = []
|
|
835
|
+
for node_id, node in hierarchy.items():
|
|
836
|
+
if node.get("type") not in {"task", "subtask", "verify"}:
|
|
837
|
+
continue
|
|
838
|
+
status = node.get("status", "pending")
|
|
839
|
+
if status_filter and status != status_filter:
|
|
840
|
+
continue
|
|
841
|
+
if not include_completed and status == "completed":
|
|
842
|
+
continue
|
|
843
|
+
tasks.append(
|
|
844
|
+
{
|
|
845
|
+
"id": node_id,
|
|
846
|
+
"title": node.get("title", "Untitled"),
|
|
847
|
+
"type": node.get("type", "task"),
|
|
848
|
+
"status": status,
|
|
849
|
+
"icon": node.get("icon"),
|
|
850
|
+
"file_path": node.get("metadata", {}).get("file_path"),
|
|
851
|
+
"parent": node.get("parent"),
|
|
852
|
+
}
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
tasks.sort(key=lambda item: item.get("id", ""))
|
|
856
|
+
total_count = len(tasks)
|
|
857
|
+
|
|
858
|
+
if start_after_id:
|
|
859
|
+
try:
|
|
860
|
+
start_index = next(
|
|
861
|
+
i for i, task in enumerate(tasks) if task.get("id") == start_after_id
|
|
862
|
+
)
|
|
863
|
+
tasks = tasks[start_index + 1 :]
|
|
864
|
+
except StopIteration:
|
|
865
|
+
pass
|
|
866
|
+
|
|
867
|
+
page_tasks = tasks[: page_size + 1]
|
|
868
|
+
has_more = len(page_tasks) > page_size
|
|
869
|
+
if has_more:
|
|
870
|
+
page_tasks = page_tasks[:page_size]
|
|
871
|
+
|
|
872
|
+
next_cursor = None
|
|
873
|
+
if has_more and page_tasks:
|
|
874
|
+
next_cursor = encode_cursor({"last_id": page_tasks[-1].get("id")})
|
|
875
|
+
|
|
876
|
+
_ = (time.perf_counter() - start) * 1000 # timing placeholder
|
|
877
|
+
warnings = _pagination_warnings(total_count, has_more)
|
|
878
|
+
response = paginated_response(
|
|
879
|
+
data={
|
|
880
|
+
"spec_id": spec_id.strip(),
|
|
881
|
+
"tasks": page_tasks,
|
|
882
|
+
"count": len(page_tasks),
|
|
883
|
+
},
|
|
884
|
+
cursor=next_cursor,
|
|
885
|
+
has_more=has_more,
|
|
886
|
+
page_size=page_size,
|
|
887
|
+
total_count=total_count,
|
|
888
|
+
warnings=warnings or None,
|
|
889
|
+
request_id=request_id,
|
|
890
|
+
)
|
|
891
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
892
|
+
return response
|
|
893
|
+
|
|
894
|
+
|
|
895
|
+
def _handle_query(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
896
|
+
request_id = _request_id()
|
|
897
|
+
action = "query"
|
|
898
|
+
spec_id = payload.get("spec_id")
|
|
899
|
+
status = payload.get("status")
|
|
900
|
+
parent = payload.get("parent")
|
|
901
|
+
limit = payload.get("limit")
|
|
902
|
+
cursor = payload.get("cursor")
|
|
903
|
+
|
|
904
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
905
|
+
return _validation_error(
|
|
906
|
+
field="spec_id",
|
|
907
|
+
action=action,
|
|
908
|
+
message="Provide a non-empty spec identifier",
|
|
909
|
+
request_id=request_id,
|
|
910
|
+
)
|
|
911
|
+
if status is not None:
|
|
912
|
+
if not isinstance(status, str) or status not in _ALLOWED_STATUS:
|
|
913
|
+
return _validation_error(
|
|
914
|
+
field="status",
|
|
915
|
+
action=action,
|
|
916
|
+
message=f"Status must be one of: {sorted(_ALLOWED_STATUS)}",
|
|
917
|
+
request_id=request_id,
|
|
918
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
919
|
+
)
|
|
920
|
+
if parent is not None and (not isinstance(parent, str) or not parent.strip()):
|
|
921
|
+
return _validation_error(
|
|
922
|
+
field="parent",
|
|
923
|
+
action=action,
|
|
924
|
+
message="Parent must be a non-empty string",
|
|
925
|
+
request_id=request_id,
|
|
926
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
927
|
+
)
|
|
928
|
+
|
|
929
|
+
page_size = normalize_page_size(
|
|
930
|
+
limit,
|
|
931
|
+
default=_TASK_DEFAULT_PAGE_SIZE,
|
|
932
|
+
maximum=_TASK_MAX_PAGE_SIZE,
|
|
933
|
+
)
|
|
934
|
+
|
|
935
|
+
start_after_id = None
|
|
936
|
+
if cursor:
|
|
937
|
+
try:
|
|
938
|
+
cursor_data = decode_cursor(cursor)
|
|
939
|
+
start_after_id = cursor_data.get("last_id")
|
|
940
|
+
except CursorError as exc:
|
|
941
|
+
return asdict(
|
|
942
|
+
error_response(
|
|
943
|
+
f"Invalid cursor: {exc.reason or exc}",
|
|
944
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
945
|
+
error_type=ErrorType.VALIDATION,
|
|
946
|
+
request_id=request_id,
|
|
947
|
+
)
|
|
948
|
+
)
|
|
949
|
+
|
|
950
|
+
workspace = payload.get("workspace")
|
|
951
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
952
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
953
|
+
if error:
|
|
954
|
+
return error
|
|
955
|
+
assert spec_data is not None
|
|
956
|
+
|
|
957
|
+
start = time.perf_counter()
|
|
958
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
959
|
+
tasks: List[Dict[str, Any]] = []
|
|
960
|
+
for task_id, task_data in hierarchy.items():
|
|
961
|
+
if status and task_data.get("status") != status:
|
|
962
|
+
continue
|
|
963
|
+
if parent and task_data.get("parent") != parent:
|
|
964
|
+
continue
|
|
965
|
+
tasks.append(
|
|
966
|
+
{
|
|
967
|
+
"task_id": task_id,
|
|
968
|
+
"title": task_data.get("title", task_id),
|
|
969
|
+
"status": task_data.get("status", "pending"),
|
|
970
|
+
"type": task_data.get("type", "task"),
|
|
971
|
+
"parent": task_data.get("parent"),
|
|
972
|
+
}
|
|
973
|
+
)
|
|
974
|
+
|
|
975
|
+
tasks.sort(key=lambda item: item.get("task_id", ""))
|
|
976
|
+
total_count = len(tasks)
|
|
977
|
+
|
|
978
|
+
if start_after_id:
|
|
979
|
+
try:
|
|
980
|
+
start_index = next(
|
|
981
|
+
i
|
|
982
|
+
for i, task in enumerate(tasks)
|
|
983
|
+
if task.get("task_id") == start_after_id
|
|
984
|
+
)
|
|
985
|
+
tasks = tasks[start_index + 1 :]
|
|
986
|
+
except StopIteration:
|
|
987
|
+
pass
|
|
988
|
+
|
|
989
|
+
page_tasks = tasks[: page_size + 1]
|
|
990
|
+
has_more = len(page_tasks) > page_size
|
|
991
|
+
if has_more:
|
|
992
|
+
page_tasks = page_tasks[:page_size]
|
|
993
|
+
|
|
994
|
+
next_cursor = None
|
|
995
|
+
if has_more and page_tasks:
|
|
996
|
+
next_cursor = encode_cursor({"last_id": page_tasks[-1].get("task_id")})
|
|
997
|
+
|
|
998
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
999
|
+
warnings = _pagination_warnings(total_count, has_more)
|
|
1000
|
+
response = paginated_response(
|
|
1001
|
+
data={
|
|
1002
|
+
"spec_id": spec_id.strip(),
|
|
1003
|
+
"tasks": page_tasks,
|
|
1004
|
+
"count": len(page_tasks),
|
|
1005
|
+
},
|
|
1006
|
+
cursor=next_cursor,
|
|
1007
|
+
has_more=has_more,
|
|
1008
|
+
page_size=page_size,
|
|
1009
|
+
total_count=total_count,
|
|
1010
|
+
warnings=warnings or None,
|
|
1011
|
+
request_id=request_id,
|
|
1012
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1013
|
+
)
|
|
1014
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
1015
|
+
return response
|
|
1016
|
+
|
|
1017
|
+
|
|
1018
|
+
def _handle_hierarchy(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
1019
|
+
request_id = _request_id()
|
|
1020
|
+
action = "hierarchy"
|
|
1021
|
+
spec_id = payload.get("spec_id")
|
|
1022
|
+
max_depth = payload.get("max_depth", 2)
|
|
1023
|
+
include_metadata = payload.get("include_metadata", False)
|
|
1024
|
+
limit = payload.get("limit")
|
|
1025
|
+
cursor = payload.get("cursor")
|
|
1026
|
+
|
|
1027
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1028
|
+
return _validation_error(
|
|
1029
|
+
field="spec_id",
|
|
1030
|
+
action=action,
|
|
1031
|
+
message="Provide a non-empty spec identifier",
|
|
1032
|
+
request_id=request_id,
|
|
1033
|
+
)
|
|
1034
|
+
if not isinstance(max_depth, int) or max_depth < 0 or max_depth > 10:
|
|
1035
|
+
return _validation_error(
|
|
1036
|
+
field="max_depth",
|
|
1037
|
+
action=action,
|
|
1038
|
+
message="max_depth must be between 0 and 10",
|
|
1039
|
+
request_id=request_id,
|
|
1040
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1041
|
+
)
|
|
1042
|
+
if not isinstance(include_metadata, bool):
|
|
1043
|
+
return _validation_error(
|
|
1044
|
+
field="include_metadata",
|
|
1045
|
+
action=action,
|
|
1046
|
+
message="Expected a boolean value",
|
|
1047
|
+
request_id=request_id,
|
|
1048
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
page_size = normalize_page_size(
|
|
1052
|
+
limit,
|
|
1053
|
+
default=_TASK_DEFAULT_PAGE_SIZE,
|
|
1054
|
+
maximum=_TASK_MAX_PAGE_SIZE,
|
|
1055
|
+
)
|
|
1056
|
+
|
|
1057
|
+
start_after_id = None
|
|
1058
|
+
if cursor:
|
|
1059
|
+
try:
|
|
1060
|
+
cursor_data = decode_cursor(cursor)
|
|
1061
|
+
start_after_id = cursor_data.get("last_id")
|
|
1062
|
+
except CursorError as exc:
|
|
1063
|
+
return asdict(
|
|
1064
|
+
error_response(
|
|
1065
|
+
f"Invalid cursor: {exc.reason or exc}",
|
|
1066
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
1067
|
+
error_type=ErrorType.VALIDATION,
|
|
1068
|
+
request_id=request_id,
|
|
1069
|
+
)
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
workspace = payload.get("workspace")
|
|
1073
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
1074
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
1075
|
+
if error:
|
|
1076
|
+
return error
|
|
1077
|
+
assert spec_data is not None
|
|
1078
|
+
|
|
1079
|
+
start = time.perf_counter()
|
|
1080
|
+
full_hierarchy = spec_data.get("hierarchy", {})
|
|
1081
|
+
filtered = _filter_hierarchy(full_hierarchy, max_depth, include_metadata)
|
|
1082
|
+
sorted_ids = sorted(filtered.keys())
|
|
1083
|
+
|
|
1084
|
+
if start_after_id:
|
|
1085
|
+
try:
|
|
1086
|
+
start_index = sorted_ids.index(start_after_id) + 1
|
|
1087
|
+
except ValueError:
|
|
1088
|
+
start_index = 0
|
|
1089
|
+
else:
|
|
1090
|
+
start_index = 0
|
|
1091
|
+
|
|
1092
|
+
page_ids = sorted_ids[start_index : start_index + page_size + 1]
|
|
1093
|
+
has_more = len(page_ids) > page_size
|
|
1094
|
+
if has_more:
|
|
1095
|
+
page_ids = page_ids[:page_size]
|
|
1096
|
+
|
|
1097
|
+
hierarchy_page = {node_id: filtered[node_id] for node_id in page_ids}
|
|
1098
|
+
next_cursor = None
|
|
1099
|
+
if has_more and page_ids:
|
|
1100
|
+
next_cursor = encode_cursor({"last_id": page_ids[-1]})
|
|
1101
|
+
|
|
1102
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1103
|
+
warnings = _pagination_warnings(len(filtered), has_more)
|
|
1104
|
+
response = paginated_response(
|
|
1105
|
+
data={
|
|
1106
|
+
"spec_id": spec_id.strip(),
|
|
1107
|
+
"hierarchy": hierarchy_page,
|
|
1108
|
+
"node_count": len(hierarchy_page),
|
|
1109
|
+
"total_nodes": len(filtered),
|
|
1110
|
+
"filters_applied": {
|
|
1111
|
+
"max_depth": max_depth,
|
|
1112
|
+
"include_metadata": include_metadata,
|
|
1113
|
+
},
|
|
1114
|
+
},
|
|
1115
|
+
cursor=next_cursor,
|
|
1116
|
+
has_more=has_more,
|
|
1117
|
+
page_size=page_size,
|
|
1118
|
+
total_count=len(filtered),
|
|
1119
|
+
warnings=warnings or None,
|
|
1120
|
+
request_id=request_id,
|
|
1121
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1122
|
+
)
|
|
1123
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
1124
|
+
return response
|
|
1125
|
+
|
|
1126
|
+
|
|
1127
|
+
def _handle_update_status(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
1128
|
+
request_id = _request_id()
|
|
1129
|
+
action = "update-status"
|
|
1130
|
+
spec_id = payload.get("spec_id")
|
|
1131
|
+
task_id = payload.get("task_id")
|
|
1132
|
+
status = payload.get("status")
|
|
1133
|
+
note = payload.get("note")
|
|
1134
|
+
|
|
1135
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1136
|
+
return _validation_error(
|
|
1137
|
+
field="spec_id",
|
|
1138
|
+
action=action,
|
|
1139
|
+
message="Provide a non-empty spec identifier",
|
|
1140
|
+
request_id=request_id,
|
|
1141
|
+
)
|
|
1142
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
1143
|
+
return _validation_error(
|
|
1144
|
+
field="task_id",
|
|
1145
|
+
action=action,
|
|
1146
|
+
message="Provide a non-empty task identifier",
|
|
1147
|
+
request_id=request_id,
|
|
1148
|
+
)
|
|
1149
|
+
if not isinstance(status, str) or status not in _ALLOWED_STATUS:
|
|
1150
|
+
return _validation_error(
|
|
1151
|
+
field="status",
|
|
1152
|
+
action=action,
|
|
1153
|
+
message=f"Status must be one of: {sorted(_ALLOWED_STATUS)}",
|
|
1154
|
+
request_id=request_id,
|
|
1155
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1156
|
+
)
|
|
1157
|
+
if note is not None and (not isinstance(note, str) or not note.strip()):
|
|
1158
|
+
return _validation_error(
|
|
1159
|
+
field="note",
|
|
1160
|
+
action=action,
|
|
1161
|
+
message="note must be a non-empty string",
|
|
1162
|
+
request_id=request_id,
|
|
1163
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1164
|
+
)
|
|
1165
|
+
|
|
1166
|
+
workspace = payload.get("workspace")
|
|
1167
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
1168
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
1169
|
+
if error:
|
|
1170
|
+
return error
|
|
1171
|
+
assert spec_data is not None
|
|
1172
|
+
|
|
1173
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
1174
|
+
task_key = task_id.strip()
|
|
1175
|
+
if task_key not in hierarchy:
|
|
1176
|
+
return asdict(
|
|
1177
|
+
error_response(
|
|
1178
|
+
f"Task not found: {task_key}",
|
|
1179
|
+
error_code=ErrorCode.TASK_NOT_FOUND,
|
|
1180
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1181
|
+
remediation="Verify the task ID exists in the hierarchy",
|
|
1182
|
+
request_id=request_id,
|
|
1183
|
+
)
|
|
1184
|
+
)
|
|
1185
|
+
|
|
1186
|
+
start = time.perf_counter()
|
|
1187
|
+
updated = update_task_status(spec_data, task_key, status, note=None)
|
|
1188
|
+
if not updated:
|
|
1189
|
+
return asdict(
|
|
1190
|
+
error_response(
|
|
1191
|
+
f"Failed to update task status for {task_key}",
|
|
1192
|
+
error_code=ErrorCode.CONFLICT,
|
|
1193
|
+
error_type=ErrorType.CONFLICT,
|
|
1194
|
+
remediation="Confirm the task exists and the status is valid",
|
|
1195
|
+
request_id=request_id,
|
|
1196
|
+
)
|
|
1197
|
+
)
|
|
1198
|
+
|
|
1199
|
+
update_parent_status(spec_data, task_key)
|
|
1200
|
+
|
|
1201
|
+
if note:
|
|
1202
|
+
add_journal_entry(
|
|
1203
|
+
spec_data,
|
|
1204
|
+
title=f"Status changed to {status}",
|
|
1205
|
+
content=note,
|
|
1206
|
+
entry_type="status_change",
|
|
1207
|
+
task_id=task_key,
|
|
1208
|
+
author="foundry-mcp",
|
|
1209
|
+
)
|
|
1210
|
+
|
|
1211
|
+
if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
|
|
1212
|
+
return asdict(
|
|
1213
|
+
error_response(
|
|
1214
|
+
"Failed to save spec",
|
|
1215
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1216
|
+
error_type=ErrorType.INTERNAL,
|
|
1217
|
+
remediation="Check filesystem permissions and retry",
|
|
1218
|
+
request_id=request_id,
|
|
1219
|
+
)
|
|
1220
|
+
)
|
|
1221
|
+
|
|
1222
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1223
|
+
response = success_response(
|
|
1224
|
+
spec_id=spec_id.strip(),
|
|
1225
|
+
task_id=task_key,
|
|
1226
|
+
new_status=status,
|
|
1227
|
+
request_id=request_id,
|
|
1228
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1229
|
+
)
|
|
1230
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
1231
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
1232
|
+
return asdict(response)
|
|
1233
|
+
|
|
1234
|
+
|
|
1235
|
+
def _handle_start(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
1236
|
+
request_id = _request_id()
|
|
1237
|
+
action = "start"
|
|
1238
|
+
spec_id = payload.get("spec_id")
|
|
1239
|
+
task_id = payload.get("task_id")
|
|
1240
|
+
note = payload.get("note")
|
|
1241
|
+
|
|
1242
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1243
|
+
return _validation_error(
|
|
1244
|
+
field="spec_id",
|
|
1245
|
+
action=action,
|
|
1246
|
+
message="Provide a non-empty spec identifier",
|
|
1247
|
+
request_id=request_id,
|
|
1248
|
+
)
|
|
1249
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
1250
|
+
return _validation_error(
|
|
1251
|
+
field="task_id",
|
|
1252
|
+
action=action,
|
|
1253
|
+
message="Provide a non-empty task identifier",
|
|
1254
|
+
request_id=request_id,
|
|
1255
|
+
)
|
|
1256
|
+
if note is not None and (not isinstance(note, str) or not note.strip()):
|
|
1257
|
+
return _validation_error(
|
|
1258
|
+
field="note",
|
|
1259
|
+
action=action,
|
|
1260
|
+
message="note must be a non-empty string",
|
|
1261
|
+
request_id=request_id,
|
|
1262
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1263
|
+
)
|
|
1264
|
+
|
|
1265
|
+
workspace = payload.get("workspace")
|
|
1266
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
1267
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
1268
|
+
if error:
|
|
1269
|
+
return error
|
|
1270
|
+
assert spec_data is not None
|
|
1271
|
+
|
|
1272
|
+
start = time.perf_counter()
|
|
1273
|
+
deps = check_dependencies(spec_data, task_id.strip())
|
|
1274
|
+
if not deps.get("can_start", False):
|
|
1275
|
+
blockers = [
|
|
1276
|
+
b.get("title", b.get("id", ""))
|
|
1277
|
+
for b in (deps.get("blocked_by") or [])
|
|
1278
|
+
if isinstance(b, dict)
|
|
1279
|
+
]
|
|
1280
|
+
return asdict(
|
|
1281
|
+
error_response(
|
|
1282
|
+
"Task is blocked by: " + ", ".join([b for b in blockers if b]),
|
|
1283
|
+
error_code=ErrorCode.CONFLICT,
|
|
1284
|
+
error_type=ErrorType.CONFLICT,
|
|
1285
|
+
remediation="Resolve blocking tasks then retry",
|
|
1286
|
+
details={"blocked_by": deps.get("blocked_by")},
|
|
1287
|
+
request_id=request_id,
|
|
1288
|
+
)
|
|
1289
|
+
)
|
|
1290
|
+
|
|
1291
|
+
updated = update_task_status(spec_data, task_id.strip(), "in_progress", note=None)
|
|
1292
|
+
if not updated:
|
|
1293
|
+
return asdict(
|
|
1294
|
+
error_response(
|
|
1295
|
+
f"Failed to start task: {task_id.strip()}",
|
|
1296
|
+
error_code=ErrorCode.CONFLICT,
|
|
1297
|
+
error_type=ErrorType.CONFLICT,
|
|
1298
|
+
remediation="Confirm the task exists and is not blocked",
|
|
1299
|
+
request_id=request_id,
|
|
1300
|
+
)
|
|
1301
|
+
)
|
|
1302
|
+
|
|
1303
|
+
update_parent_status(spec_data, task_id.strip())
|
|
1304
|
+
sync_computed_fields(spec_data)
|
|
1305
|
+
|
|
1306
|
+
if note:
|
|
1307
|
+
add_journal_entry(
|
|
1308
|
+
spec_data,
|
|
1309
|
+
title=f"Task Started: {task_id.strip()}",
|
|
1310
|
+
content=note,
|
|
1311
|
+
entry_type="status_change",
|
|
1312
|
+
task_id=task_id.strip(),
|
|
1313
|
+
author="foundry-mcp",
|
|
1314
|
+
)
|
|
1315
|
+
|
|
1316
|
+
if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
|
|
1317
|
+
return asdict(
|
|
1318
|
+
error_response(
|
|
1319
|
+
"Failed to save spec",
|
|
1320
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1321
|
+
error_type=ErrorType.INTERNAL,
|
|
1322
|
+
remediation="Check filesystem permissions and retry",
|
|
1323
|
+
request_id=request_id,
|
|
1324
|
+
)
|
|
1325
|
+
)
|
|
1326
|
+
|
|
1327
|
+
task_data = spec_data.get("hierarchy", {}).get(task_id.strip(), {})
|
|
1328
|
+
started_at = task_data.get("metadata", {}).get("started_at")
|
|
1329
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1330
|
+
response = success_response(
|
|
1331
|
+
spec_id=spec_id.strip(),
|
|
1332
|
+
task_id=task_id.strip(),
|
|
1333
|
+
started_at=started_at,
|
|
1334
|
+
title=task_data.get("title", ""),
|
|
1335
|
+
type=task_data.get("type", "task"),
|
|
1336
|
+
request_id=request_id,
|
|
1337
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1338
|
+
)
|
|
1339
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
1340
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
1341
|
+
return asdict(response)
|
|
1342
|
+
|
|
1343
|
+
|
|
1344
|
+
def _handle_complete(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
1345
|
+
request_id = _request_id()
|
|
1346
|
+
action = "complete"
|
|
1347
|
+
spec_id = payload.get("spec_id")
|
|
1348
|
+
task_id = payload.get("task_id")
|
|
1349
|
+
completion_note = payload.get("completion_note")
|
|
1350
|
+
|
|
1351
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1352
|
+
return _validation_error(
|
|
1353
|
+
field="spec_id",
|
|
1354
|
+
action=action,
|
|
1355
|
+
message="Provide a non-empty spec identifier",
|
|
1356
|
+
request_id=request_id,
|
|
1357
|
+
)
|
|
1358
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
1359
|
+
return _validation_error(
|
|
1360
|
+
field="task_id",
|
|
1361
|
+
action=action,
|
|
1362
|
+
message="Provide a non-empty task identifier",
|
|
1363
|
+
request_id=request_id,
|
|
1364
|
+
)
|
|
1365
|
+
if not isinstance(completion_note, str) or not completion_note.strip():
|
|
1366
|
+
return _validation_error(
|
|
1367
|
+
field="completion_note",
|
|
1368
|
+
action=action,
|
|
1369
|
+
message="Provide a non-empty completion note",
|
|
1370
|
+
request_id=request_id,
|
|
1371
|
+
)
|
|
1372
|
+
|
|
1373
|
+
workspace = payload.get("workspace")
|
|
1374
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
1375
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
1376
|
+
if error:
|
|
1377
|
+
return error
|
|
1378
|
+
assert spec_data is not None
|
|
1379
|
+
|
|
1380
|
+
start = time.perf_counter()
|
|
1381
|
+
updated = update_task_status(spec_data, task_id.strip(), "completed", note=None)
|
|
1382
|
+
if not updated:
|
|
1383
|
+
return asdict(
|
|
1384
|
+
error_response(
|
|
1385
|
+
f"Failed to complete task: {task_id.strip()}",
|
|
1386
|
+
error_code=ErrorCode.CONFLICT,
|
|
1387
|
+
error_type=ErrorType.CONFLICT,
|
|
1388
|
+
remediation="Confirm the task exists and is not already completed",
|
|
1389
|
+
request_id=request_id,
|
|
1390
|
+
)
|
|
1391
|
+
)
|
|
1392
|
+
|
|
1393
|
+
update_parent_status(spec_data, task_id.strip())
|
|
1394
|
+
sync_computed_fields(spec_data)
|
|
1395
|
+
|
|
1396
|
+
task_data = spec_data.get("hierarchy", {}).get(task_id.strip(), {})
|
|
1397
|
+
|
|
1398
|
+
# Determine if commit is suggested based on git cadence config
|
|
1399
|
+
suggest_commit = False
|
|
1400
|
+
commit_scope: Optional[str] = None
|
|
1401
|
+
commit_message_hint: Optional[str] = None
|
|
1402
|
+
|
|
1403
|
+
if config.git.enabled:
|
|
1404
|
+
cadence = config.git.commit_cadence
|
|
1405
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
1406
|
+
|
|
1407
|
+
if cadence == "task":
|
|
1408
|
+
suggest_commit = True
|
|
1409
|
+
commit_scope = "task"
|
|
1410
|
+
commit_message_hint = f"task: {task_data.get('title', task_id.strip())}"
|
|
1411
|
+
elif cadence == "phase":
|
|
1412
|
+
# Check if parent phase just completed
|
|
1413
|
+
parent_id = task_data.get("parent")
|
|
1414
|
+
if parent_id:
|
|
1415
|
+
parent_data = hierarchy.get(parent_id, {})
|
|
1416
|
+
# Only suggest commit if parent is a phase and is now completed
|
|
1417
|
+
if (
|
|
1418
|
+
parent_data.get("type") == "phase"
|
|
1419
|
+
and parent_data.get("status") == "completed"
|
|
1420
|
+
):
|
|
1421
|
+
suggest_commit = True
|
|
1422
|
+
commit_scope = "phase"
|
|
1423
|
+
commit_message_hint = (
|
|
1424
|
+
f"phase: {parent_data.get('title', parent_id)}"
|
|
1425
|
+
)
|
|
1426
|
+
add_journal_entry(
|
|
1427
|
+
spec_data,
|
|
1428
|
+
title=f"Task Completed: {task_data.get('title', task_id.strip())}",
|
|
1429
|
+
content=completion_note,
|
|
1430
|
+
entry_type="status_change",
|
|
1431
|
+
task_id=task_id.strip(),
|
|
1432
|
+
author="foundry-mcp",
|
|
1433
|
+
)
|
|
1434
|
+
|
|
1435
|
+
if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
|
|
1436
|
+
return asdict(
|
|
1437
|
+
error_response(
|
|
1438
|
+
"Failed to save spec",
|
|
1439
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1440
|
+
error_type=ErrorType.INTERNAL,
|
|
1441
|
+
remediation="Check filesystem permissions and retry",
|
|
1442
|
+
request_id=request_id,
|
|
1443
|
+
)
|
|
1444
|
+
)
|
|
1445
|
+
|
|
1446
|
+
completed_at = task_data.get("metadata", {}).get("completed_at")
|
|
1447
|
+
progress = get_progress_summary(spec_data)
|
|
1448
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1449
|
+
response = success_response(
|
|
1450
|
+
spec_id=spec_id.strip(),
|
|
1451
|
+
task_id=task_id.strip(),
|
|
1452
|
+
completed_at=completed_at,
|
|
1453
|
+
progress={
|
|
1454
|
+
"completed_tasks": progress.get("completed_tasks", 0),
|
|
1455
|
+
"total_tasks": progress.get("total_tasks", 0),
|
|
1456
|
+
"percentage": progress.get("percentage", 0),
|
|
1457
|
+
},
|
|
1458
|
+
suggest_commit=suggest_commit,
|
|
1459
|
+
commit_scope=commit_scope,
|
|
1460
|
+
commit_message_hint=commit_message_hint,
|
|
1461
|
+
request_id=request_id,
|
|
1462
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1463
|
+
)
|
|
1464
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
1465
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
1466
|
+
return asdict(response)
|
|
1467
|
+
|
|
1468
|
+
|
|
1469
|
+
def _handle_block(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
1470
|
+
request_id = _request_id()
|
|
1471
|
+
action = "block"
|
|
1472
|
+
spec_id = payload.get("spec_id")
|
|
1473
|
+
task_id = payload.get("task_id")
|
|
1474
|
+
reason = payload.get("reason")
|
|
1475
|
+
blocker_type = payload.get("blocker_type", "dependency")
|
|
1476
|
+
ticket = payload.get("ticket")
|
|
1477
|
+
|
|
1478
|
+
valid_types = {"dependency", "technical", "resource", "decision"}
|
|
1479
|
+
|
|
1480
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1481
|
+
return _validation_error(
|
|
1482
|
+
field="spec_id",
|
|
1483
|
+
action=action,
|
|
1484
|
+
message="Provide a non-empty spec identifier",
|
|
1485
|
+
request_id=request_id,
|
|
1486
|
+
)
|
|
1487
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
1488
|
+
return _validation_error(
|
|
1489
|
+
field="task_id",
|
|
1490
|
+
action=action,
|
|
1491
|
+
message="Provide a non-empty task identifier",
|
|
1492
|
+
request_id=request_id,
|
|
1493
|
+
)
|
|
1494
|
+
if not isinstance(reason, str) or not reason.strip():
|
|
1495
|
+
return _validation_error(
|
|
1496
|
+
field="reason",
|
|
1497
|
+
action=action,
|
|
1498
|
+
message="Provide a non-empty blocker reason",
|
|
1499
|
+
request_id=request_id,
|
|
1500
|
+
)
|
|
1501
|
+
if not isinstance(blocker_type, str) or blocker_type not in valid_types:
|
|
1502
|
+
return _validation_error(
|
|
1503
|
+
field="blocker_type",
|
|
1504
|
+
action=action,
|
|
1505
|
+
message=f"blocker_type must be one of: {sorted(valid_types)}",
|
|
1506
|
+
request_id=request_id,
|
|
1507
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1508
|
+
)
|
|
1509
|
+
if ticket is not None and not isinstance(ticket, str):
|
|
1510
|
+
return _validation_error(
|
|
1511
|
+
field="ticket",
|
|
1512
|
+
action=action,
|
|
1513
|
+
message="ticket must be a string",
|
|
1514
|
+
request_id=request_id,
|
|
1515
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1516
|
+
)
|
|
1517
|
+
|
|
1518
|
+
workspace = payload.get("workspace")
|
|
1519
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
1520
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
1521
|
+
if error:
|
|
1522
|
+
return error
|
|
1523
|
+
assert spec_data is not None
|
|
1524
|
+
|
|
1525
|
+
start = time.perf_counter()
|
|
1526
|
+
blocked = mark_blocked(
|
|
1527
|
+
spec_data,
|
|
1528
|
+
task_id.strip(),
|
|
1529
|
+
reason.strip(),
|
|
1530
|
+
blocker_type=blocker_type,
|
|
1531
|
+
ticket=ticket,
|
|
1532
|
+
)
|
|
1533
|
+
if not blocked:
|
|
1534
|
+
return asdict(
|
|
1535
|
+
error_response(
|
|
1536
|
+
f"Task not found: {task_id.strip()}",
|
|
1537
|
+
error_code=ErrorCode.TASK_NOT_FOUND,
|
|
1538
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1539
|
+
remediation="Verify the task ID exists in the hierarchy",
|
|
1540
|
+
request_id=request_id,
|
|
1541
|
+
)
|
|
1542
|
+
)
|
|
1543
|
+
|
|
1544
|
+
add_journal_entry(
|
|
1545
|
+
spec_data,
|
|
1546
|
+
title=f"Task Blocked: {task_id.strip()}",
|
|
1547
|
+
content=f"Blocker ({blocker_type}): {reason.strip()}"
|
|
1548
|
+
+ (f" [Ticket: {ticket}]" if ticket else ""),
|
|
1549
|
+
entry_type="blocker",
|
|
1550
|
+
task_id=task_id.strip(),
|
|
1551
|
+
author="foundry-mcp",
|
|
1552
|
+
)
|
|
1553
|
+
sync_computed_fields(spec_data)
|
|
1554
|
+
|
|
1555
|
+
if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
|
|
1556
|
+
return asdict(
|
|
1557
|
+
error_response(
|
|
1558
|
+
"Failed to save spec",
|
|
1559
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1560
|
+
error_type=ErrorType.INTERNAL,
|
|
1561
|
+
remediation="Check filesystem permissions and retry",
|
|
1562
|
+
request_id=request_id,
|
|
1563
|
+
)
|
|
1564
|
+
)
|
|
1565
|
+
|
|
1566
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1567
|
+
response = success_response(
|
|
1568
|
+
spec_id=spec_id.strip(),
|
|
1569
|
+
task_id=task_id.strip(),
|
|
1570
|
+
blocker_type=blocker_type,
|
|
1571
|
+
reason=reason.strip(),
|
|
1572
|
+
ticket=ticket,
|
|
1573
|
+
request_id=request_id,
|
|
1574
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1575
|
+
)
|
|
1576
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
1577
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
1578
|
+
return asdict(response)
|
|
1579
|
+
|
|
1580
|
+
|
|
1581
|
+
def _handle_unblock(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
1582
|
+
request_id = _request_id()
|
|
1583
|
+
action = "unblock"
|
|
1584
|
+
spec_id = payload.get("spec_id")
|
|
1585
|
+
task_id = payload.get("task_id")
|
|
1586
|
+
resolution = payload.get("resolution")
|
|
1587
|
+
|
|
1588
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1589
|
+
return _validation_error(
|
|
1590
|
+
field="spec_id",
|
|
1591
|
+
action=action,
|
|
1592
|
+
message="Provide a non-empty spec identifier",
|
|
1593
|
+
request_id=request_id,
|
|
1594
|
+
)
|
|
1595
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
1596
|
+
return _validation_error(
|
|
1597
|
+
field="task_id",
|
|
1598
|
+
action=action,
|
|
1599
|
+
message="Provide a non-empty task identifier",
|
|
1600
|
+
request_id=request_id,
|
|
1601
|
+
)
|
|
1602
|
+
if resolution is not None and (
|
|
1603
|
+
not isinstance(resolution, str) or not resolution.strip()
|
|
1604
|
+
):
|
|
1605
|
+
return _validation_error(
|
|
1606
|
+
field="resolution",
|
|
1607
|
+
action=action,
|
|
1608
|
+
message="resolution must be a non-empty string",
|
|
1609
|
+
request_id=request_id,
|
|
1610
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1611
|
+
)
|
|
1612
|
+
|
|
1613
|
+
workspace = payload.get("workspace")
|
|
1614
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
1615
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
1616
|
+
if error:
|
|
1617
|
+
return error
|
|
1618
|
+
assert spec_data is not None
|
|
1619
|
+
|
|
1620
|
+
start = time.perf_counter()
|
|
1621
|
+
blocker = get_blocker_info(spec_data, task_id.strip())
|
|
1622
|
+
if blocker is None:
|
|
1623
|
+
return asdict(
|
|
1624
|
+
error_response(
|
|
1625
|
+
f"Task {task_id.strip()} is not blocked",
|
|
1626
|
+
error_code=ErrorCode.CONFLICT,
|
|
1627
|
+
error_type=ErrorType.CONFLICT,
|
|
1628
|
+
remediation="Confirm the task is currently blocked before unblocking",
|
|
1629
|
+
request_id=request_id,
|
|
1630
|
+
)
|
|
1631
|
+
)
|
|
1632
|
+
|
|
1633
|
+
unblocked = unblock_task(spec_data, task_id.strip(), resolution)
|
|
1634
|
+
if not unblocked:
|
|
1635
|
+
return asdict(
|
|
1636
|
+
error_response(
|
|
1637
|
+
f"Failed to unblock task: {task_id.strip()}",
|
|
1638
|
+
error_code=ErrorCode.CONFLICT,
|
|
1639
|
+
error_type=ErrorType.CONFLICT,
|
|
1640
|
+
remediation="Confirm the task exists and is currently blocked",
|
|
1641
|
+
request_id=request_id,
|
|
1642
|
+
)
|
|
1643
|
+
)
|
|
1644
|
+
|
|
1645
|
+
add_journal_entry(
|
|
1646
|
+
spec_data,
|
|
1647
|
+
title=f"Task Unblocked: {task_id.strip()}",
|
|
1648
|
+
content=f"Resolved: {resolution.strip() if isinstance(resolution, str) else 'Blocker resolved'}",
|
|
1649
|
+
entry_type="note",
|
|
1650
|
+
task_id=task_id.strip(),
|
|
1651
|
+
author="foundry-mcp",
|
|
1652
|
+
)
|
|
1653
|
+
sync_computed_fields(spec_data)
|
|
1654
|
+
|
|
1655
|
+
if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
|
|
1656
|
+
return asdict(
|
|
1657
|
+
error_response(
|
|
1658
|
+
"Failed to save spec",
|
|
1659
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1660
|
+
error_type=ErrorType.INTERNAL,
|
|
1661
|
+
remediation="Check filesystem permissions and retry",
|
|
1662
|
+
request_id=request_id,
|
|
1663
|
+
)
|
|
1664
|
+
)
|
|
1665
|
+
|
|
1666
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1667
|
+
response = success_response(
|
|
1668
|
+
spec_id=spec_id.strip(),
|
|
1669
|
+
task_id=task_id.strip(),
|
|
1670
|
+
previous_blocker={
|
|
1671
|
+
"type": blocker.blocker_type,
|
|
1672
|
+
"description": blocker.description,
|
|
1673
|
+
},
|
|
1674
|
+
resolution=(resolution.strip() if isinstance(resolution, str) else None)
|
|
1675
|
+
or "Blocker resolved",
|
|
1676
|
+
new_status="pending",
|
|
1677
|
+
request_id=request_id,
|
|
1678
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1679
|
+
)
|
|
1680
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
1681
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
1682
|
+
return asdict(response)
|
|
1683
|
+
|
|
1684
|
+
|
|
1685
|
+
def _handle_list_blocked(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
1686
|
+
request_id = _request_id()
|
|
1687
|
+
action = "list-blocked"
|
|
1688
|
+
spec_id = payload.get("spec_id")
|
|
1689
|
+
cursor = payload.get("cursor")
|
|
1690
|
+
limit = payload.get("limit")
|
|
1691
|
+
|
|
1692
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1693
|
+
return _validation_error(
|
|
1694
|
+
field="spec_id",
|
|
1695
|
+
action=action,
|
|
1696
|
+
message="Provide a non-empty spec identifier",
|
|
1697
|
+
request_id=request_id,
|
|
1698
|
+
)
|
|
1699
|
+
|
|
1700
|
+
page_size = normalize_page_size(
|
|
1701
|
+
limit,
|
|
1702
|
+
default=_TASK_DEFAULT_PAGE_SIZE,
|
|
1703
|
+
maximum=_TASK_MAX_PAGE_SIZE,
|
|
1704
|
+
)
|
|
1705
|
+
|
|
1706
|
+
start_after_id = None
|
|
1707
|
+
if cursor:
|
|
1708
|
+
try:
|
|
1709
|
+
cursor_data = decode_cursor(cursor)
|
|
1710
|
+
start_after_id = cursor_data.get("last_id")
|
|
1711
|
+
except CursorError as exc:
|
|
1712
|
+
return asdict(
|
|
1713
|
+
error_response(
|
|
1714
|
+
f"Invalid cursor: {exc.reason or exc}",
|
|
1715
|
+
error_code=ErrorCode.INVALID_FORMAT,
|
|
1716
|
+
error_type=ErrorType.VALIDATION,
|
|
1717
|
+
request_id=request_id,
|
|
1718
|
+
)
|
|
1719
|
+
)
|
|
1720
|
+
|
|
1721
|
+
workspace = payload.get("workspace")
|
|
1722
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
1723
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
1724
|
+
if error:
|
|
1725
|
+
return error
|
|
1726
|
+
assert spec_data is not None
|
|
1727
|
+
|
|
1728
|
+
start = time.perf_counter()
|
|
1729
|
+
blocked_tasks = list_blocked_tasks(spec_data)
|
|
1730
|
+
blocked_tasks.sort(key=lambda entry: entry.get("task_id", ""))
|
|
1731
|
+
total_count = len(blocked_tasks)
|
|
1732
|
+
|
|
1733
|
+
if start_after_id:
|
|
1734
|
+
try:
|
|
1735
|
+
start_index = next(
|
|
1736
|
+
i
|
|
1737
|
+
for i, entry in enumerate(blocked_tasks)
|
|
1738
|
+
if entry.get("task_id") == start_after_id
|
|
1739
|
+
)
|
|
1740
|
+
blocked_tasks = blocked_tasks[start_index + 1 :]
|
|
1741
|
+
except StopIteration:
|
|
1742
|
+
pass
|
|
1743
|
+
|
|
1744
|
+
page_tasks = blocked_tasks[: page_size + 1]
|
|
1745
|
+
has_more = len(page_tasks) > page_size
|
|
1746
|
+
if has_more:
|
|
1747
|
+
page_tasks = page_tasks[:page_size]
|
|
1748
|
+
|
|
1749
|
+
next_cursor = None
|
|
1750
|
+
if has_more and page_tasks:
|
|
1751
|
+
next_cursor = encode_cursor({"last_id": page_tasks[-1].get("task_id")})
|
|
1752
|
+
|
|
1753
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1754
|
+
warnings = _pagination_warnings(total_count, has_more)
|
|
1755
|
+
response = paginated_response(
|
|
1756
|
+
data={
|
|
1757
|
+
"spec_id": spec_id.strip(),
|
|
1758
|
+
"count": len(page_tasks),
|
|
1759
|
+
"blocked_tasks": page_tasks,
|
|
1760
|
+
},
|
|
1761
|
+
cursor=next_cursor,
|
|
1762
|
+
has_more=has_more,
|
|
1763
|
+
page_size=page_size,
|
|
1764
|
+
total_count=total_count,
|
|
1765
|
+
warnings=warnings or None,
|
|
1766
|
+
request_id=request_id,
|
|
1767
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1768
|
+
)
|
|
1769
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
1770
|
+
return response
|
|
1771
|
+
|
|
1772
|
+
|
|
1773
|
+
def _handle_add(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
1774
|
+
request_id = _request_id()
|
|
1775
|
+
action = "add"
|
|
1776
|
+
spec_id = payload.get("spec_id")
|
|
1777
|
+
parent = payload.get("parent")
|
|
1778
|
+
phase_id = payload.get("phase_id") # Alias for parent
|
|
1779
|
+
|
|
1780
|
+
# Use phase_id as parent if parent not provided
|
|
1781
|
+
if parent is None and phase_id is not None:
|
|
1782
|
+
parent = phase_id
|
|
1783
|
+
|
|
1784
|
+
title = payload.get("title")
|
|
1785
|
+
description = payload.get("description")
|
|
1786
|
+
task_type = payload.get("task_type", "task")
|
|
1787
|
+
estimated_hours = payload.get("estimated_hours")
|
|
1788
|
+
position = payload.get("position")
|
|
1789
|
+
file_path = payload.get("file_path")
|
|
1790
|
+
|
|
1791
|
+
# Research-specific parameters
|
|
1792
|
+
research_type = payload.get("research_type")
|
|
1793
|
+
blocking_mode = payload.get("blocking_mode")
|
|
1794
|
+
query = payload.get("query")
|
|
1795
|
+
|
|
1796
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
1797
|
+
return _validation_error(
|
|
1798
|
+
field="spec_id",
|
|
1799
|
+
action=action,
|
|
1800
|
+
message="Provide a non-empty spec identifier",
|
|
1801
|
+
request_id=request_id,
|
|
1802
|
+
)
|
|
1803
|
+
if not isinstance(parent, str) or not parent.strip():
|
|
1804
|
+
return _validation_error(
|
|
1805
|
+
field="parent",
|
|
1806
|
+
action=action,
|
|
1807
|
+
message="Provide a non-empty parent node identifier",
|
|
1808
|
+
request_id=request_id,
|
|
1809
|
+
)
|
|
1810
|
+
if not isinstance(title, str) or not title.strip():
|
|
1811
|
+
return _validation_error(
|
|
1812
|
+
field="title",
|
|
1813
|
+
action=action,
|
|
1814
|
+
message="Provide a non-empty task title",
|
|
1815
|
+
request_id=request_id,
|
|
1816
|
+
)
|
|
1817
|
+
if description is not None and not isinstance(description, str):
|
|
1818
|
+
return _validation_error(
|
|
1819
|
+
field="description",
|
|
1820
|
+
action=action,
|
|
1821
|
+
message="description must be a string",
|
|
1822
|
+
request_id=request_id,
|
|
1823
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1824
|
+
)
|
|
1825
|
+
if not isinstance(task_type, str):
|
|
1826
|
+
return _validation_error(
|
|
1827
|
+
field="task_type",
|
|
1828
|
+
action=action,
|
|
1829
|
+
message="task_type must be a string",
|
|
1830
|
+
request_id=request_id,
|
|
1831
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1832
|
+
)
|
|
1833
|
+
if estimated_hours is not None and not isinstance(estimated_hours, (int, float)):
|
|
1834
|
+
return _validation_error(
|
|
1835
|
+
field="estimated_hours",
|
|
1836
|
+
action=action,
|
|
1837
|
+
message="estimated_hours must be a number",
|
|
1838
|
+
request_id=request_id,
|
|
1839
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1840
|
+
)
|
|
1841
|
+
if position is not None and (not isinstance(position, int) or position < 0):
|
|
1842
|
+
return _validation_error(
|
|
1843
|
+
field="position",
|
|
1844
|
+
action=action,
|
|
1845
|
+
message="position must be a non-negative integer",
|
|
1846
|
+
request_id=request_id,
|
|
1847
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1848
|
+
)
|
|
1849
|
+
if file_path is not None and not isinstance(file_path, str):
|
|
1850
|
+
return _validation_error(
|
|
1851
|
+
field="file_path",
|
|
1852
|
+
action=action,
|
|
1853
|
+
message="file_path must be a string",
|
|
1854
|
+
request_id=request_id,
|
|
1855
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1856
|
+
)
|
|
1857
|
+
|
|
1858
|
+
# Validate research-specific parameters when task_type is "research"
|
|
1859
|
+
if task_type == "research":
|
|
1860
|
+
from foundry_mcp.core.validation import VALID_RESEARCH_TYPES, RESEARCH_BLOCKING_MODES
|
|
1861
|
+
|
|
1862
|
+
if research_type is not None and not isinstance(research_type, str):
|
|
1863
|
+
return _validation_error(
|
|
1864
|
+
field="research_type",
|
|
1865
|
+
action=action,
|
|
1866
|
+
message="research_type must be a string",
|
|
1867
|
+
request_id=request_id,
|
|
1868
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1869
|
+
)
|
|
1870
|
+
if research_type and research_type not in VALID_RESEARCH_TYPES:
|
|
1871
|
+
return _validation_error(
|
|
1872
|
+
field="research_type",
|
|
1873
|
+
action=action,
|
|
1874
|
+
message=f"Must be one of: {', '.join(sorted(VALID_RESEARCH_TYPES))}",
|
|
1875
|
+
request_id=request_id,
|
|
1876
|
+
)
|
|
1877
|
+
if blocking_mode is not None and not isinstance(blocking_mode, str):
|
|
1878
|
+
return _validation_error(
|
|
1879
|
+
field="blocking_mode",
|
|
1880
|
+
action=action,
|
|
1881
|
+
message="blocking_mode must be a string",
|
|
1882
|
+
request_id=request_id,
|
|
1883
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1884
|
+
)
|
|
1885
|
+
if blocking_mode and blocking_mode not in RESEARCH_BLOCKING_MODES:
|
|
1886
|
+
return _validation_error(
|
|
1887
|
+
field="blocking_mode",
|
|
1888
|
+
action=action,
|
|
1889
|
+
message=f"Must be one of: {', '.join(sorted(RESEARCH_BLOCKING_MODES))}",
|
|
1890
|
+
request_id=request_id,
|
|
1891
|
+
)
|
|
1892
|
+
if query is not None and not isinstance(query, str):
|
|
1893
|
+
return _validation_error(
|
|
1894
|
+
field="query",
|
|
1895
|
+
action=action,
|
|
1896
|
+
message="query must be a string",
|
|
1897
|
+
request_id=request_id,
|
|
1898
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1899
|
+
)
|
|
1900
|
+
|
|
1901
|
+
dry_run = payload.get("dry_run", False)
|
|
1902
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
1903
|
+
return _validation_error(
|
|
1904
|
+
field="dry_run",
|
|
1905
|
+
action=action,
|
|
1906
|
+
message="dry_run must be a boolean",
|
|
1907
|
+
request_id=request_id,
|
|
1908
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1909
|
+
)
|
|
1910
|
+
dry_run_bool = bool(dry_run)
|
|
1911
|
+
|
|
1912
|
+
workspace = payload.get("workspace")
|
|
1913
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
1914
|
+
if specs_dir is None:
|
|
1915
|
+
return _specs_dir_missing_error(request_id)
|
|
1916
|
+
|
|
1917
|
+
start = time.perf_counter()
|
|
1918
|
+
if dry_run_bool:
|
|
1919
|
+
spec_data, spec_error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
1920
|
+
if spec_error:
|
|
1921
|
+
return spec_error
|
|
1922
|
+
|
|
1923
|
+
hierarchy = (spec_data or {}).get("hierarchy", {})
|
|
1924
|
+
parent_node = (
|
|
1925
|
+
hierarchy.get(parent.strip()) if isinstance(hierarchy, dict) else None
|
|
1926
|
+
)
|
|
1927
|
+
if not isinstance(parent_node, dict):
|
|
1928
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1929
|
+
return asdict(
|
|
1930
|
+
error_response(
|
|
1931
|
+
f"Parent node '{parent.strip()}' not found",
|
|
1932
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
1933
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1934
|
+
remediation="Verify the parent node ID exists in the specification",
|
|
1935
|
+
request_id=request_id,
|
|
1936
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1937
|
+
)
|
|
1938
|
+
)
|
|
1939
|
+
|
|
1940
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1941
|
+
dry_run_data: Dict[str, Any] = {
|
|
1942
|
+
"spec_id": spec_id.strip(),
|
|
1943
|
+
"parent": parent.strip(),
|
|
1944
|
+
"title": title.strip(),
|
|
1945
|
+
"task_type": task_type,
|
|
1946
|
+
"position": position,
|
|
1947
|
+
"file_path": file_path.strip() if file_path else None,
|
|
1948
|
+
"dry_run": True,
|
|
1949
|
+
}
|
|
1950
|
+
# Include research parameters in dry_run response
|
|
1951
|
+
if task_type == "research":
|
|
1952
|
+
dry_run_data["research_type"] = research_type
|
|
1953
|
+
dry_run_data["blocking_mode"] = blocking_mode
|
|
1954
|
+
dry_run_data["query"] = query
|
|
1955
|
+
response = success_response(
|
|
1956
|
+
data=dry_run_data,
|
|
1957
|
+
request_id=request_id,
|
|
1958
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
1959
|
+
)
|
|
1960
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
1961
|
+
_metrics.counter(
|
|
1962
|
+
_metric(action), labels={"status": "success", "dry_run": "true"}
|
|
1963
|
+
)
|
|
1964
|
+
return asdict(response)
|
|
1965
|
+
|
|
1966
|
+
result, error = add_task(
|
|
1967
|
+
spec_id=spec_id.strip(),
|
|
1968
|
+
parent_id=parent.strip(),
|
|
1969
|
+
title=title.strip(),
|
|
1970
|
+
description=description,
|
|
1971
|
+
task_type=task_type,
|
|
1972
|
+
estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
|
|
1973
|
+
position=position,
|
|
1974
|
+
file_path=file_path,
|
|
1975
|
+
specs_dir=specs_dir,
|
|
1976
|
+
# Research-specific parameters
|
|
1977
|
+
research_type=research_type,
|
|
1978
|
+
blocking_mode=blocking_mode,
|
|
1979
|
+
query=query,
|
|
1980
|
+
)
|
|
1981
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
1982
|
+
|
|
1983
|
+
if error or result is None:
|
|
1984
|
+
code = (
|
|
1985
|
+
ErrorCode.NOT_FOUND
|
|
1986
|
+
if "not found" in (error or "").lower()
|
|
1987
|
+
else ErrorCode.VALIDATION_ERROR
|
|
1988
|
+
)
|
|
1989
|
+
err_type = (
|
|
1990
|
+
ErrorType.NOT_FOUND if code == ErrorCode.NOT_FOUND else ErrorType.VALIDATION
|
|
1991
|
+
)
|
|
1992
|
+
return asdict(
|
|
1993
|
+
error_response(
|
|
1994
|
+
error or "Failed to add task",
|
|
1995
|
+
error_code=code,
|
|
1996
|
+
error_type=err_type,
|
|
1997
|
+
remediation="Verify parent/task inputs and retry",
|
|
1998
|
+
request_id=request_id,
|
|
1999
|
+
)
|
|
2000
|
+
)
|
|
2001
|
+
|
|
2002
|
+
response = success_response(
|
|
2003
|
+
**result,
|
|
2004
|
+
spec_id=spec_id.strip(),
|
|
2005
|
+
request_id=request_id,
|
|
2006
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2007
|
+
)
|
|
2008
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2009
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
2010
|
+
return asdict(response)
|
|
2011
|
+
|
|
2012
|
+
|
|
2013
|
+
def _handle_remove(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
2014
|
+
request_id = _request_id()
|
|
2015
|
+
action = "remove"
|
|
2016
|
+
spec_id = payload.get("spec_id")
|
|
2017
|
+
task_id = payload.get("task_id")
|
|
2018
|
+
cascade = payload.get("cascade", False)
|
|
2019
|
+
|
|
2020
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2021
|
+
return _validation_error(
|
|
2022
|
+
field="spec_id",
|
|
2023
|
+
action=action,
|
|
2024
|
+
message="Provide a non-empty spec identifier",
|
|
2025
|
+
request_id=request_id,
|
|
2026
|
+
)
|
|
2027
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
2028
|
+
return _validation_error(
|
|
2029
|
+
field="task_id",
|
|
2030
|
+
action=action,
|
|
2031
|
+
message="Provide a non-empty task identifier",
|
|
2032
|
+
request_id=request_id,
|
|
2033
|
+
)
|
|
2034
|
+
if not isinstance(cascade, bool):
|
|
2035
|
+
return _validation_error(
|
|
2036
|
+
field="cascade",
|
|
2037
|
+
action=action,
|
|
2038
|
+
message="cascade must be a boolean",
|
|
2039
|
+
request_id=request_id,
|
|
2040
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2041
|
+
)
|
|
2042
|
+
|
|
2043
|
+
dry_run = payload.get("dry_run", False)
|
|
2044
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
2045
|
+
return _validation_error(
|
|
2046
|
+
field="dry_run",
|
|
2047
|
+
action=action,
|
|
2048
|
+
message="dry_run must be a boolean",
|
|
2049
|
+
request_id=request_id,
|
|
2050
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2051
|
+
)
|
|
2052
|
+
dry_run_bool = bool(dry_run)
|
|
2053
|
+
|
|
2054
|
+
workspace = payload.get("workspace")
|
|
2055
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
2056
|
+
if specs_dir is None:
|
|
2057
|
+
return _specs_dir_missing_error(request_id)
|
|
2058
|
+
|
|
2059
|
+
start = time.perf_counter()
|
|
2060
|
+
if dry_run_bool:
|
|
2061
|
+
spec_data, spec_error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
2062
|
+
if spec_error:
|
|
2063
|
+
return spec_error
|
|
2064
|
+
|
|
2065
|
+
hierarchy = (spec_data or {}).get("hierarchy", {})
|
|
2066
|
+
node = hierarchy.get(task_id.strip()) if isinstance(hierarchy, dict) else None
|
|
2067
|
+
if not isinstance(node, dict):
|
|
2068
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2069
|
+
return asdict(
|
|
2070
|
+
error_response(
|
|
2071
|
+
f"Task '{task_id.strip()}' not found",
|
|
2072
|
+
error_code=ErrorCode.TASK_NOT_FOUND,
|
|
2073
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2074
|
+
remediation="Verify the task ID exists in the specification",
|
|
2075
|
+
request_id=request_id,
|
|
2076
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2077
|
+
)
|
|
2078
|
+
)
|
|
2079
|
+
|
|
2080
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2081
|
+
response = success_response(
|
|
2082
|
+
data={
|
|
2083
|
+
"spec_id": spec_id.strip(),
|
|
2084
|
+
"task_id": task_id.strip(),
|
|
2085
|
+
"cascade": cascade,
|
|
2086
|
+
"dry_run": True,
|
|
2087
|
+
},
|
|
2088
|
+
request_id=request_id,
|
|
2089
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2090
|
+
)
|
|
2091
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2092
|
+
_metrics.counter(
|
|
2093
|
+
_metric(action), labels={"status": "success", "dry_run": "true"}
|
|
2094
|
+
)
|
|
2095
|
+
return asdict(response)
|
|
2096
|
+
|
|
2097
|
+
result, error = remove_task(
|
|
2098
|
+
spec_id=spec_id.strip(),
|
|
2099
|
+
task_id=task_id.strip(),
|
|
2100
|
+
cascade=cascade,
|
|
2101
|
+
specs_dir=specs_dir,
|
|
2102
|
+
)
|
|
2103
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2104
|
+
|
|
2105
|
+
if error or result is None:
|
|
2106
|
+
code = (
|
|
2107
|
+
ErrorCode.NOT_FOUND
|
|
2108
|
+
if "not found" in (error or "").lower()
|
|
2109
|
+
else ErrorCode.VALIDATION_ERROR
|
|
2110
|
+
)
|
|
2111
|
+
err_type = (
|
|
2112
|
+
ErrorType.NOT_FOUND if code == ErrorCode.NOT_FOUND else ErrorType.VALIDATION
|
|
2113
|
+
)
|
|
2114
|
+
return asdict(
|
|
2115
|
+
error_response(
|
|
2116
|
+
error or "Failed to remove task",
|
|
2117
|
+
error_code=code,
|
|
2118
|
+
error_type=err_type,
|
|
2119
|
+
remediation="Verify the task ID and cascade flag",
|
|
2120
|
+
request_id=request_id,
|
|
2121
|
+
)
|
|
2122
|
+
)
|
|
2123
|
+
|
|
2124
|
+
response = success_response(
|
|
2125
|
+
**result,
|
|
2126
|
+
request_id=request_id,
|
|
2127
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2128
|
+
)
|
|
2129
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2130
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
2131
|
+
return asdict(response)
|
|
2132
|
+
|
|
2133
|
+
|
|
2134
|
+
def _handle_update_estimate(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
2135
|
+
request_id = _request_id()
|
|
2136
|
+
action = "update-estimate"
|
|
2137
|
+
spec_id = payload.get("spec_id")
|
|
2138
|
+
task_id = payload.get("task_id")
|
|
2139
|
+
estimated_hours = payload.get("estimated_hours")
|
|
2140
|
+
complexity = payload.get("complexity")
|
|
2141
|
+
|
|
2142
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2143
|
+
return _validation_error(
|
|
2144
|
+
field="spec_id",
|
|
2145
|
+
action=action,
|
|
2146
|
+
message="Provide a non-empty spec identifier",
|
|
2147
|
+
request_id=request_id,
|
|
2148
|
+
)
|
|
2149
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
2150
|
+
return _validation_error(
|
|
2151
|
+
field="task_id",
|
|
2152
|
+
action=action,
|
|
2153
|
+
message="Provide a non-empty task identifier",
|
|
2154
|
+
request_id=request_id,
|
|
2155
|
+
)
|
|
2156
|
+
if estimated_hours is not None and not isinstance(estimated_hours, (int, float)):
|
|
2157
|
+
return _validation_error(
|
|
2158
|
+
field="estimated_hours",
|
|
2159
|
+
action=action,
|
|
2160
|
+
message="estimated_hours must be a number",
|
|
2161
|
+
request_id=request_id,
|
|
2162
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2163
|
+
)
|
|
2164
|
+
if complexity is not None and not isinstance(complexity, str):
|
|
2165
|
+
return _validation_error(
|
|
2166
|
+
field="complexity",
|
|
2167
|
+
action=action,
|
|
2168
|
+
message="complexity must be a string",
|
|
2169
|
+
request_id=request_id,
|
|
2170
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2171
|
+
)
|
|
2172
|
+
|
|
2173
|
+
dry_run = payload.get("dry_run", False)
|
|
2174
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
2175
|
+
return _validation_error(
|
|
2176
|
+
field="dry_run",
|
|
2177
|
+
action=action,
|
|
2178
|
+
message="dry_run must be a boolean",
|
|
2179
|
+
request_id=request_id,
|
|
2180
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2181
|
+
)
|
|
2182
|
+
dry_run_bool = bool(dry_run)
|
|
2183
|
+
|
|
2184
|
+
normalized_complexity: Optional[str] = None
|
|
2185
|
+
if isinstance(complexity, str):
|
|
2186
|
+
normalized_complexity = complexity.strip().lower() or None
|
|
2187
|
+
|
|
2188
|
+
if estimated_hours is None and normalized_complexity is None:
|
|
2189
|
+
return _validation_error(
|
|
2190
|
+
field="estimated_hours",
|
|
2191
|
+
action=action,
|
|
2192
|
+
message="Provide estimated_hours and/or complexity",
|
|
2193
|
+
request_id=request_id,
|
|
2194
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2195
|
+
remediation="Provide hours and/or complexity to update",
|
|
2196
|
+
)
|
|
2197
|
+
|
|
2198
|
+
workspace = payload.get("workspace")
|
|
2199
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
2200
|
+
if specs_dir is None:
|
|
2201
|
+
return _specs_dir_missing_error(request_id)
|
|
2202
|
+
|
|
2203
|
+
start = time.perf_counter()
|
|
2204
|
+
if dry_run_bool:
|
|
2205
|
+
spec_data, spec_error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
2206
|
+
if spec_error:
|
|
2207
|
+
return spec_error
|
|
2208
|
+
|
|
2209
|
+
hierarchy = (spec_data or {}).get("hierarchy", {})
|
|
2210
|
+
task = hierarchy.get(task_id.strip()) if isinstance(hierarchy, dict) else None
|
|
2211
|
+
if not isinstance(task, dict):
|
|
2212
|
+
return asdict(
|
|
2213
|
+
error_response(
|
|
2214
|
+
f"Task '{task_id.strip()}' not found",
|
|
2215
|
+
error_code=ErrorCode.TASK_NOT_FOUND,
|
|
2216
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2217
|
+
remediation="Verify the task ID exists in the specification",
|
|
2218
|
+
request_id=request_id,
|
|
2219
|
+
)
|
|
2220
|
+
)
|
|
2221
|
+
|
|
2222
|
+
metadata_candidate = task.get("metadata")
|
|
2223
|
+
if isinstance(metadata_candidate, dict):
|
|
2224
|
+
metadata: Dict[str, Any] = metadata_candidate
|
|
2225
|
+
else:
|
|
2226
|
+
metadata = {}
|
|
2227
|
+
data: Dict[str, Any] = {
|
|
2228
|
+
"spec_id": spec_id.strip(),
|
|
2229
|
+
"task_id": task_id.strip(),
|
|
2230
|
+
"dry_run": True,
|
|
2231
|
+
"previous_hours": metadata.get("estimated_hours"),
|
|
2232
|
+
"previous_complexity": metadata.get("complexity"),
|
|
2233
|
+
}
|
|
2234
|
+
if estimated_hours is not None:
|
|
2235
|
+
data["hours"] = float(estimated_hours)
|
|
2236
|
+
if normalized_complexity is not None:
|
|
2237
|
+
data["complexity"] = normalized_complexity
|
|
2238
|
+
|
|
2239
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2240
|
+
response = success_response(
|
|
2241
|
+
data=data,
|
|
2242
|
+
request_id=request_id,
|
|
2243
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2244
|
+
)
|
|
2245
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2246
|
+
_metrics.counter(
|
|
2247
|
+
_metric(action), labels={"status": "success", "dry_run": "true"}
|
|
2248
|
+
)
|
|
2249
|
+
return asdict(response)
|
|
2250
|
+
|
|
2251
|
+
result, error = update_estimate(
|
|
2252
|
+
spec_id=spec_id.strip(),
|
|
2253
|
+
task_id=task_id.strip(),
|
|
2254
|
+
estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
|
|
2255
|
+
complexity=normalized_complexity,
|
|
2256
|
+
specs_dir=specs_dir,
|
|
2257
|
+
)
|
|
2258
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2259
|
+
|
|
2260
|
+
if error or result is None:
|
|
2261
|
+
code = (
|
|
2262
|
+
ErrorCode.NOT_FOUND
|
|
2263
|
+
if "not found" in (error or "").lower()
|
|
2264
|
+
else ErrorCode.VALIDATION_ERROR
|
|
2265
|
+
)
|
|
2266
|
+
err_type = (
|
|
2267
|
+
ErrorType.NOT_FOUND if code == ErrorCode.NOT_FOUND else ErrorType.VALIDATION
|
|
2268
|
+
)
|
|
2269
|
+
return asdict(
|
|
2270
|
+
error_response(
|
|
2271
|
+
error or "Failed to update estimate",
|
|
2272
|
+
error_code=code,
|
|
2273
|
+
error_type=err_type,
|
|
2274
|
+
remediation="Provide estimated_hours and/or a valid complexity",
|
|
2275
|
+
request_id=request_id,
|
|
2276
|
+
)
|
|
2277
|
+
)
|
|
2278
|
+
|
|
2279
|
+
response = success_response(
|
|
2280
|
+
**result,
|
|
2281
|
+
request_id=request_id,
|
|
2282
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2283
|
+
)
|
|
2284
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2285
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
2286
|
+
return asdict(response)
|
|
2287
|
+
|
|
2288
|
+
|
|
2289
|
+
def _handle_update_metadata(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
2290
|
+
request_id = _request_id()
|
|
2291
|
+
action = "update-metadata"
|
|
2292
|
+
spec_id = payload.get("spec_id")
|
|
2293
|
+
task_id = payload.get("task_id")
|
|
2294
|
+
|
|
2295
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2296
|
+
return _validation_error(
|
|
2297
|
+
field="spec_id",
|
|
2298
|
+
action=action,
|
|
2299
|
+
message="Provide a non-empty spec identifier",
|
|
2300
|
+
request_id=request_id,
|
|
2301
|
+
)
|
|
2302
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
2303
|
+
return _validation_error(
|
|
2304
|
+
field="task_id",
|
|
2305
|
+
action=action,
|
|
2306
|
+
message="Provide a non-empty task identifier",
|
|
2307
|
+
request_id=request_id,
|
|
2308
|
+
)
|
|
2309
|
+
|
|
2310
|
+
dry_run = payload.get("dry_run", False)
|
|
2311
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
2312
|
+
return _validation_error(
|
|
2313
|
+
field="dry_run",
|
|
2314
|
+
action=action,
|
|
2315
|
+
message="dry_run must be a boolean",
|
|
2316
|
+
request_id=request_id,
|
|
2317
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2318
|
+
)
|
|
2319
|
+
dry_run_bool = bool(dry_run)
|
|
2320
|
+
|
|
2321
|
+
custom_metadata = payload.get("custom_metadata")
|
|
2322
|
+
if custom_metadata is not None and not isinstance(custom_metadata, dict):
|
|
2323
|
+
return _validation_error(
|
|
2324
|
+
field="custom_metadata",
|
|
2325
|
+
action=action,
|
|
2326
|
+
message="custom_metadata must be an object",
|
|
2327
|
+
request_id=request_id,
|
|
2328
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2329
|
+
remediation="Provide custom_metadata as a JSON object",
|
|
2330
|
+
)
|
|
2331
|
+
|
|
2332
|
+
acceptance_criteria = payload.get("acceptance_criteria")
|
|
2333
|
+
if acceptance_criteria is not None and not isinstance(acceptance_criteria, list):
|
|
2334
|
+
return _validation_error(
|
|
2335
|
+
field="acceptance_criteria",
|
|
2336
|
+
action=action,
|
|
2337
|
+
message="acceptance_criteria must be a list of strings",
|
|
2338
|
+
request_id=request_id,
|
|
2339
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2340
|
+
)
|
|
2341
|
+
|
|
2342
|
+
update_fields = [
|
|
2343
|
+
payload.get("title"),
|
|
2344
|
+
payload.get("file_path"),
|
|
2345
|
+
payload.get("description"),
|
|
2346
|
+
acceptance_criteria,
|
|
2347
|
+
payload.get("task_category"),
|
|
2348
|
+
payload.get("actual_hours"),
|
|
2349
|
+
payload.get("status_note"),
|
|
2350
|
+
payload.get("verification_type"),
|
|
2351
|
+
payload.get("command"),
|
|
2352
|
+
]
|
|
2353
|
+
has_update = any(field is not None for field in update_fields) or bool(
|
|
2354
|
+
custom_metadata
|
|
2355
|
+
)
|
|
2356
|
+
if not has_update:
|
|
2357
|
+
return _validation_error(
|
|
2358
|
+
field="title",
|
|
2359
|
+
action=action,
|
|
2360
|
+
message="Provide at least one field to update",
|
|
2361
|
+
request_id=request_id,
|
|
2362
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
2363
|
+
remediation="Provide title, file_path, description, acceptance_criteria, task_category, actual_hours, status_note, verification_type, command, and/or custom_metadata",
|
|
2364
|
+
)
|
|
2365
|
+
|
|
2366
|
+
workspace = payload.get("workspace")
|
|
2367
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
2368
|
+
if specs_dir is None:
|
|
2369
|
+
return _specs_dir_missing_error(request_id)
|
|
2370
|
+
|
|
2371
|
+
start = time.perf_counter()
|
|
2372
|
+
if dry_run_bool:
|
|
2373
|
+
spec_data, spec_error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
2374
|
+
if spec_error:
|
|
2375
|
+
return spec_error
|
|
2376
|
+
|
|
2377
|
+
hierarchy = (spec_data or {}).get("hierarchy", {})
|
|
2378
|
+
task = hierarchy.get(task_id.strip()) if isinstance(hierarchy, dict) else None
|
|
2379
|
+
if not isinstance(task, dict):
|
|
2380
|
+
return asdict(
|
|
2381
|
+
error_response(
|
|
2382
|
+
f"Task '{task_id.strip()}' not found",
|
|
2383
|
+
error_code=ErrorCode.TASK_NOT_FOUND,
|
|
2384
|
+
error_type=ErrorType.NOT_FOUND,
|
|
2385
|
+
remediation="Verify the task ID exists in the specification",
|
|
2386
|
+
request_id=request_id,
|
|
2387
|
+
)
|
|
2388
|
+
)
|
|
2389
|
+
|
|
2390
|
+
fields_updated: List[str] = []
|
|
2391
|
+
if payload.get("title") is not None:
|
|
2392
|
+
fields_updated.append("title")
|
|
2393
|
+
if payload.get("file_path") is not None:
|
|
2394
|
+
fields_updated.append("file_path")
|
|
2395
|
+
if payload.get("description") is not None:
|
|
2396
|
+
fields_updated.append("description")
|
|
2397
|
+
if acceptance_criteria is not None:
|
|
2398
|
+
fields_updated.append("acceptance_criteria")
|
|
2399
|
+
if payload.get("task_category") is not None:
|
|
2400
|
+
fields_updated.append("task_category")
|
|
2401
|
+
if payload.get("actual_hours") is not None:
|
|
2402
|
+
fields_updated.append("actual_hours")
|
|
2403
|
+
if payload.get("status_note") is not None:
|
|
2404
|
+
fields_updated.append("status_note")
|
|
2405
|
+
if payload.get("verification_type") is not None:
|
|
2406
|
+
fields_updated.append("verification_type")
|
|
2407
|
+
if payload.get("command") is not None:
|
|
2408
|
+
fields_updated.append("command")
|
|
2409
|
+
if custom_metadata:
|
|
2410
|
+
fields_updated.extend(sorted(custom_metadata.keys()))
|
|
2411
|
+
|
|
2412
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2413
|
+
response = success_response(
|
|
2414
|
+
data={
|
|
2415
|
+
"spec_id": spec_id.strip(),
|
|
2416
|
+
"task_id": task_id.strip(),
|
|
2417
|
+
"fields_updated": fields_updated,
|
|
2418
|
+
"dry_run": True,
|
|
2419
|
+
},
|
|
2420
|
+
request_id=request_id,
|
|
2421
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2422
|
+
)
|
|
2423
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2424
|
+
_metrics.counter(
|
|
2425
|
+
_metric(action), labels={"status": "success", "dry_run": "true"}
|
|
2426
|
+
)
|
|
2427
|
+
return asdict(response)
|
|
2428
|
+
|
|
2429
|
+
result, error = update_task_metadata(
|
|
2430
|
+
spec_id=spec_id.strip(),
|
|
2431
|
+
task_id=task_id.strip(),
|
|
2432
|
+
title=payload.get("title"),
|
|
2433
|
+
file_path=payload.get("file_path"),
|
|
2434
|
+
description=payload.get("description"),
|
|
2435
|
+
acceptance_criteria=acceptance_criteria,
|
|
2436
|
+
task_category=payload.get("task_category"),
|
|
2437
|
+
actual_hours=payload.get("actual_hours"),
|
|
2438
|
+
status_note=payload.get("status_note"),
|
|
2439
|
+
verification_type=payload.get("verification_type"),
|
|
2440
|
+
command=payload.get("command"),
|
|
2441
|
+
custom_metadata=custom_metadata,
|
|
2442
|
+
dry_run=dry_run_bool,
|
|
2443
|
+
specs_dir=specs_dir,
|
|
2444
|
+
)
|
|
2445
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2446
|
+
|
|
2447
|
+
if error or result is None:
|
|
2448
|
+
code = (
|
|
2449
|
+
ErrorCode.NOT_FOUND
|
|
2450
|
+
if "not found" in (error or "").lower()
|
|
2451
|
+
else ErrorCode.VALIDATION_ERROR
|
|
2452
|
+
)
|
|
2453
|
+
err_type = (
|
|
2454
|
+
ErrorType.NOT_FOUND if code == ErrorCode.NOT_FOUND else ErrorType.VALIDATION
|
|
2455
|
+
)
|
|
2456
|
+
return asdict(
|
|
2457
|
+
error_response(
|
|
2458
|
+
error or "Failed to update metadata",
|
|
2459
|
+
error_code=code,
|
|
2460
|
+
error_type=err_type,
|
|
2461
|
+
remediation="Provide at least one metadata field to update",
|
|
2462
|
+
request_id=request_id,
|
|
2463
|
+
)
|
|
2464
|
+
)
|
|
2465
|
+
|
|
2466
|
+
response = success_response(
|
|
2467
|
+
**result,
|
|
2468
|
+
request_id=request_id,
|
|
2469
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2470
|
+
)
|
|
2471
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2472
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
2473
|
+
return asdict(response)
|
|
2474
|
+
|
|
2475
|
+
|
|
2476
|
+
def _handle_move(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
2477
|
+
"""Move a task to a new position or parent.
|
|
2478
|
+
|
|
2479
|
+
Supports two modes:
|
|
2480
|
+
1. Reorder within parent: only specify position (new_parent=None)
|
|
2481
|
+
2. Reparent to different phase/task: specify new_parent, optionally position
|
|
2482
|
+
|
|
2483
|
+
Updates task counts on affected parents. Prevents circular references.
|
|
2484
|
+
Emits warnings for cross-phase moves that might affect dependencies.
|
|
2485
|
+
"""
|
|
2486
|
+
request_id = _request_id()
|
|
2487
|
+
action = "move"
|
|
2488
|
+
spec_id = payload.get("spec_id")
|
|
2489
|
+
task_id = payload.get("task_id")
|
|
2490
|
+
new_parent = payload.get("parent") # Target parent (phase or task ID)
|
|
2491
|
+
position = payload.get("position") # 1-based position in children list
|
|
2492
|
+
|
|
2493
|
+
# Validate required fields
|
|
2494
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2495
|
+
return _validation_error(
|
|
2496
|
+
field="spec_id",
|
|
2497
|
+
action=action,
|
|
2498
|
+
message="Provide a non-empty spec identifier",
|
|
2499
|
+
request_id=request_id,
|
|
2500
|
+
)
|
|
2501
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
2502
|
+
return _validation_error(
|
|
2503
|
+
field="task_id",
|
|
2504
|
+
action=action,
|
|
2505
|
+
message="Provide a non-empty task identifier",
|
|
2506
|
+
request_id=request_id,
|
|
2507
|
+
)
|
|
2508
|
+
|
|
2509
|
+
# Validate optional new_parent
|
|
2510
|
+
if new_parent is not None and (
|
|
2511
|
+
not isinstance(new_parent, str) or not new_parent.strip()
|
|
2512
|
+
):
|
|
2513
|
+
return _validation_error(
|
|
2514
|
+
field="parent",
|
|
2515
|
+
action=action,
|
|
2516
|
+
message="parent must be a non-empty string if provided",
|
|
2517
|
+
request_id=request_id,
|
|
2518
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2519
|
+
)
|
|
2520
|
+
|
|
2521
|
+
# Validate optional position (must be positive integer)
|
|
2522
|
+
if position is not None:
|
|
2523
|
+
if not isinstance(position, int) or position < 1:
|
|
2524
|
+
return _validation_error(
|
|
2525
|
+
field="position",
|
|
2526
|
+
action=action,
|
|
2527
|
+
message="position must be a positive integer (1-based)",
|
|
2528
|
+
request_id=request_id,
|
|
2529
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2530
|
+
)
|
|
2531
|
+
|
|
2532
|
+
# Validate dry_run
|
|
2533
|
+
dry_run = payload.get("dry_run", False)
|
|
2534
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
2535
|
+
return _validation_error(
|
|
2536
|
+
field="dry_run",
|
|
2537
|
+
action=action,
|
|
2538
|
+
message="dry_run must be a boolean",
|
|
2539
|
+
request_id=request_id,
|
|
2540
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2541
|
+
)
|
|
2542
|
+
dry_run_bool = bool(dry_run)
|
|
2543
|
+
|
|
2544
|
+
workspace = payload.get("workspace")
|
|
2545
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
2546
|
+
if specs_dir is None:
|
|
2547
|
+
return _specs_dir_missing_error(request_id)
|
|
2548
|
+
|
|
2549
|
+
start = time.perf_counter()
|
|
2550
|
+
|
|
2551
|
+
# Call the core move_task function
|
|
2552
|
+
result, error, warnings = move_task(
|
|
2553
|
+
spec_id=spec_id.strip(),
|
|
2554
|
+
task_id=task_id.strip(),
|
|
2555
|
+
new_parent=new_parent.strip() if new_parent else None,
|
|
2556
|
+
position=position,
|
|
2557
|
+
dry_run=dry_run_bool,
|
|
2558
|
+
specs_dir=specs_dir,
|
|
2559
|
+
)
|
|
2560
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2561
|
+
|
|
2562
|
+
if error or result is None:
|
|
2563
|
+
# Determine appropriate error code based on error message
|
|
2564
|
+
error_lower = (error or "").lower()
|
|
2565
|
+
if "not found" in error_lower:
|
|
2566
|
+
code = ErrorCode.TASK_NOT_FOUND
|
|
2567
|
+
err_type = ErrorType.NOT_FOUND
|
|
2568
|
+
remediation = "Verify the task ID and parent ID exist in the specification"
|
|
2569
|
+
elif "circular" in error_lower:
|
|
2570
|
+
code = ErrorCode.CIRCULAR_DEPENDENCY
|
|
2571
|
+
err_type = ErrorType.CONFLICT
|
|
2572
|
+
remediation = "Task cannot be moved under its own descendants"
|
|
2573
|
+
elif "invalid position" in error_lower:
|
|
2574
|
+
code = ErrorCode.INVALID_POSITION
|
|
2575
|
+
err_type = ErrorType.VALIDATION
|
|
2576
|
+
remediation = "Specify a valid position within the children list"
|
|
2577
|
+
elif "cannot move" in error_lower or "invalid" in error_lower:
|
|
2578
|
+
code = ErrorCode.INVALID_PARENT
|
|
2579
|
+
err_type = ErrorType.VALIDATION
|
|
2580
|
+
remediation = "Specify a valid phase, group, or task as the target parent"
|
|
2581
|
+
else:
|
|
2582
|
+
code = ErrorCode.VALIDATION_ERROR
|
|
2583
|
+
err_type = ErrorType.VALIDATION
|
|
2584
|
+
remediation = "Check task ID, parent, and position parameters"
|
|
2585
|
+
|
|
2586
|
+
return asdict(
|
|
2587
|
+
error_response(
|
|
2588
|
+
error or "Failed to move task",
|
|
2589
|
+
error_code=code,
|
|
2590
|
+
error_type=err_type,
|
|
2591
|
+
remediation=remediation,
|
|
2592
|
+
request_id=request_id,
|
|
2593
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2594
|
+
)
|
|
2595
|
+
)
|
|
2596
|
+
|
|
2597
|
+
# Build success response with warnings if any
|
|
2598
|
+
response = success_response(
|
|
2599
|
+
**result,
|
|
2600
|
+
request_id=request_id,
|
|
2601
|
+
warnings=warnings if warnings else None,
|
|
2602
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2603
|
+
)
|
|
2604
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2605
|
+
_metrics.counter(
|
|
2606
|
+
_metric(action),
|
|
2607
|
+
labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
|
|
2608
|
+
)
|
|
2609
|
+
return asdict(response)
|
|
2610
|
+
|
|
2611
|
+
|
|
2612
|
+
def _handle_add_dependency(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
2613
|
+
"""Add a dependency relationship between two tasks.
|
|
2614
|
+
|
|
2615
|
+
Manages blocks, blocked_by, and depends relationships.
|
|
2616
|
+
Updates both source and target tasks atomically.
|
|
2617
|
+
|
|
2618
|
+
Dependency types:
|
|
2619
|
+
- blocks: Source task blocks target (target cannot start until source completes)
|
|
2620
|
+
- blocked_by: Source task is blocked by target (source cannot start until target completes)
|
|
2621
|
+
- depends: Soft dependency (informational, doesn't block)
|
|
2622
|
+
"""
|
|
2623
|
+
request_id = _request_id()
|
|
2624
|
+
action = "add-dependency"
|
|
2625
|
+
spec_id = payload.get("spec_id")
|
|
2626
|
+
task_id = payload.get("task_id") # Source task
|
|
2627
|
+
target_id = payload.get("target_id") # Target task
|
|
2628
|
+
dependency_type = payload.get("dependency_type", "blocks")
|
|
2629
|
+
|
|
2630
|
+
# Validate required fields
|
|
2631
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2632
|
+
return _validation_error(
|
|
2633
|
+
field="spec_id",
|
|
2634
|
+
action=action,
|
|
2635
|
+
message="Provide a non-empty spec identifier",
|
|
2636
|
+
request_id=request_id,
|
|
2637
|
+
)
|
|
2638
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
2639
|
+
return _validation_error(
|
|
2640
|
+
field="task_id",
|
|
2641
|
+
action=action,
|
|
2642
|
+
message="Provide a non-empty source task identifier",
|
|
2643
|
+
request_id=request_id,
|
|
2644
|
+
)
|
|
2645
|
+
if not isinstance(target_id, str) or not target_id.strip():
|
|
2646
|
+
return _validation_error(
|
|
2647
|
+
field="target_id",
|
|
2648
|
+
action=action,
|
|
2649
|
+
message="Provide a non-empty target task identifier",
|
|
2650
|
+
request_id=request_id,
|
|
2651
|
+
)
|
|
2652
|
+
|
|
2653
|
+
# Validate dependency_type
|
|
2654
|
+
valid_types = ("blocks", "blocked_by", "depends")
|
|
2655
|
+
if dependency_type not in valid_types:
|
|
2656
|
+
return _validation_error(
|
|
2657
|
+
field="dependency_type",
|
|
2658
|
+
action=action,
|
|
2659
|
+
message=f"Must be one of: {', '.join(valid_types)}",
|
|
2660
|
+
request_id=request_id,
|
|
2661
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2662
|
+
)
|
|
2663
|
+
|
|
2664
|
+
# Validate dry_run
|
|
2665
|
+
dry_run = payload.get("dry_run", False)
|
|
2666
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
2667
|
+
return _validation_error(
|
|
2668
|
+
field="dry_run",
|
|
2669
|
+
action=action,
|
|
2670
|
+
message="dry_run must be a boolean",
|
|
2671
|
+
request_id=request_id,
|
|
2672
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2673
|
+
)
|
|
2674
|
+
dry_run_bool = bool(dry_run)
|
|
2675
|
+
|
|
2676
|
+
workspace = payload.get("workspace")
|
|
2677
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
2678
|
+
if specs_dir is None:
|
|
2679
|
+
return _specs_dir_missing_error(request_id)
|
|
2680
|
+
|
|
2681
|
+
start = time.perf_counter()
|
|
2682
|
+
|
|
2683
|
+
# Call the core function
|
|
2684
|
+
result, error = manage_task_dependency(
|
|
2685
|
+
spec_id=spec_id.strip(),
|
|
2686
|
+
source_task_id=task_id.strip(),
|
|
2687
|
+
target_task_id=target_id.strip(),
|
|
2688
|
+
dependency_type=dependency_type,
|
|
2689
|
+
action="add",
|
|
2690
|
+
dry_run=dry_run_bool,
|
|
2691
|
+
specs_dir=specs_dir,
|
|
2692
|
+
)
|
|
2693
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2694
|
+
|
|
2695
|
+
if error or result is None:
|
|
2696
|
+
# Determine appropriate error code based on error message
|
|
2697
|
+
error_lower = (error or "").lower()
|
|
2698
|
+
if "not found" in error_lower:
|
|
2699
|
+
code = ErrorCode.TASK_NOT_FOUND
|
|
2700
|
+
err_type = ErrorType.NOT_FOUND
|
|
2701
|
+
remediation = "Verify both task IDs exist in the specification"
|
|
2702
|
+
elif "circular" in error_lower:
|
|
2703
|
+
code = ErrorCode.CIRCULAR_DEPENDENCY
|
|
2704
|
+
err_type = ErrorType.CONFLICT
|
|
2705
|
+
remediation = "This dependency would create a cycle"
|
|
2706
|
+
elif "itself" in error_lower:
|
|
2707
|
+
code = ErrorCode.SELF_REFERENCE
|
|
2708
|
+
err_type = ErrorType.VALIDATION
|
|
2709
|
+
remediation = "A task cannot depend on itself"
|
|
2710
|
+
elif "already exists" in error_lower:
|
|
2711
|
+
code = ErrorCode.DUPLICATE_ENTRY
|
|
2712
|
+
err_type = ErrorType.CONFLICT
|
|
2713
|
+
remediation = "This dependency already exists"
|
|
2714
|
+
else:
|
|
2715
|
+
code = ErrorCode.VALIDATION_ERROR
|
|
2716
|
+
err_type = ErrorType.VALIDATION
|
|
2717
|
+
remediation = "Check task IDs and dependency type"
|
|
2718
|
+
|
|
2719
|
+
return asdict(
|
|
2720
|
+
error_response(
|
|
2721
|
+
error or "Failed to add dependency",
|
|
2722
|
+
error_code=code,
|
|
2723
|
+
error_type=err_type,
|
|
2724
|
+
remediation=remediation,
|
|
2725
|
+
request_id=request_id,
|
|
2726
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2727
|
+
)
|
|
2728
|
+
)
|
|
2729
|
+
|
|
2730
|
+
# Build success response
|
|
2731
|
+
response = success_response(
|
|
2732
|
+
**result,
|
|
2733
|
+
request_id=request_id,
|
|
2734
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2735
|
+
)
|
|
2736
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2737
|
+
_metrics.counter(
|
|
2738
|
+
_metric(action),
|
|
2739
|
+
labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
|
|
2740
|
+
)
|
|
2741
|
+
return asdict(response)
|
|
2742
|
+
|
|
2743
|
+
|
|
2744
|
+
def _handle_remove_dependency(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
2745
|
+
"""Remove a dependency relationship between two tasks.
|
|
2746
|
+
|
|
2747
|
+
Removes blocks, blocked_by, or depends relationships.
|
|
2748
|
+
Updates both source and target tasks atomically for reciprocal relationships.
|
|
2749
|
+
"""
|
|
2750
|
+
request_id = _request_id()
|
|
2751
|
+
action = "remove-dependency"
|
|
2752
|
+
spec_id = payload.get("spec_id")
|
|
2753
|
+
task_id = payload.get("task_id") # Source task
|
|
2754
|
+
target_id = payload.get("target_id") # Target task
|
|
2755
|
+
dependency_type = payload.get("dependency_type", "blocks")
|
|
2756
|
+
|
|
2757
|
+
# Validate required fields
|
|
2758
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2759
|
+
return _validation_error(
|
|
2760
|
+
field="spec_id",
|
|
2761
|
+
action=action,
|
|
2762
|
+
message="Provide a non-empty spec identifier",
|
|
2763
|
+
request_id=request_id,
|
|
2764
|
+
)
|
|
2765
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
2766
|
+
return _validation_error(
|
|
2767
|
+
field="task_id",
|
|
2768
|
+
action=action,
|
|
2769
|
+
message="Provide a non-empty source task identifier",
|
|
2770
|
+
request_id=request_id,
|
|
2771
|
+
)
|
|
2772
|
+
if not isinstance(target_id, str) or not target_id.strip():
|
|
2773
|
+
return _validation_error(
|
|
2774
|
+
field="target_id",
|
|
2775
|
+
action=action,
|
|
2776
|
+
message="Provide a non-empty target task identifier",
|
|
2777
|
+
request_id=request_id,
|
|
2778
|
+
)
|
|
2779
|
+
|
|
2780
|
+
# Validate dependency_type
|
|
2781
|
+
valid_types = ("blocks", "blocked_by", "depends")
|
|
2782
|
+
if dependency_type not in valid_types:
|
|
2783
|
+
return _validation_error(
|
|
2784
|
+
field="dependency_type",
|
|
2785
|
+
action=action,
|
|
2786
|
+
message=f"Must be one of: {', '.join(valid_types)}",
|
|
2787
|
+
request_id=request_id,
|
|
2788
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2789
|
+
)
|
|
2790
|
+
|
|
2791
|
+
# Validate dry_run
|
|
2792
|
+
dry_run = payload.get("dry_run", False)
|
|
2793
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
2794
|
+
return _validation_error(
|
|
2795
|
+
field="dry_run",
|
|
2796
|
+
action=action,
|
|
2797
|
+
message="dry_run must be a boolean",
|
|
2798
|
+
request_id=request_id,
|
|
2799
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2800
|
+
)
|
|
2801
|
+
dry_run_bool = bool(dry_run)
|
|
2802
|
+
|
|
2803
|
+
workspace = payload.get("workspace")
|
|
2804
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
2805
|
+
if specs_dir is None:
|
|
2806
|
+
return _specs_dir_missing_error(request_id)
|
|
2807
|
+
|
|
2808
|
+
start = time.perf_counter()
|
|
2809
|
+
|
|
2810
|
+
# Call the core function
|
|
2811
|
+
result, error = manage_task_dependency(
|
|
2812
|
+
spec_id=spec_id.strip(),
|
|
2813
|
+
source_task_id=task_id.strip(),
|
|
2814
|
+
target_task_id=target_id.strip(),
|
|
2815
|
+
dependency_type=dependency_type,
|
|
2816
|
+
action="remove",
|
|
2817
|
+
dry_run=dry_run_bool,
|
|
2818
|
+
specs_dir=specs_dir,
|
|
2819
|
+
)
|
|
2820
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2821
|
+
|
|
2822
|
+
if error or result is None:
|
|
2823
|
+
# Determine appropriate error code based on error message
|
|
2824
|
+
error_lower = (error or "").lower()
|
|
2825
|
+
if "does not exist" in error_lower:
|
|
2826
|
+
# Dependency relationship doesn't exist
|
|
2827
|
+
code = ErrorCode.DEPENDENCY_NOT_FOUND
|
|
2828
|
+
err_type = ErrorType.NOT_FOUND
|
|
2829
|
+
remediation = "This dependency does not exist"
|
|
2830
|
+
elif "not found" in error_lower:
|
|
2831
|
+
# Task or spec not found
|
|
2832
|
+
code = ErrorCode.TASK_NOT_FOUND
|
|
2833
|
+
err_type = ErrorType.NOT_FOUND
|
|
2834
|
+
remediation = "Verify both task IDs exist in the specification"
|
|
2835
|
+
else:
|
|
2836
|
+
code = ErrorCode.VALIDATION_ERROR
|
|
2837
|
+
err_type = ErrorType.VALIDATION
|
|
2838
|
+
remediation = "Check task IDs and dependency type"
|
|
2839
|
+
|
|
2840
|
+
return asdict(
|
|
2841
|
+
error_response(
|
|
2842
|
+
error or "Failed to remove dependency",
|
|
2843
|
+
error_code=code,
|
|
2844
|
+
error_type=err_type,
|
|
2845
|
+
remediation=remediation,
|
|
2846
|
+
request_id=request_id,
|
|
2847
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2848
|
+
)
|
|
2849
|
+
)
|
|
2850
|
+
|
|
2851
|
+
# Build success response
|
|
2852
|
+
response = success_response(
|
|
2853
|
+
**result,
|
|
2854
|
+
request_id=request_id,
|
|
2855
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2856
|
+
)
|
|
2857
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
2858
|
+
_metrics.counter(
|
|
2859
|
+
_metric(action),
|
|
2860
|
+
labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
|
|
2861
|
+
)
|
|
2862
|
+
return asdict(response)
|
|
2863
|
+
|
|
2864
|
+
|
|
2865
|
+
def _handle_add_requirement(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
2866
|
+
"""Add a structured requirement to a task's metadata.
|
|
2867
|
+
|
|
2868
|
+
Requirements are stored in metadata.requirements as a list of objects:
|
|
2869
|
+
[{"id": "req-1", "type": "acceptance", "text": "..."}, ...]
|
|
2870
|
+
|
|
2871
|
+
Each requirement has:
|
|
2872
|
+
- id: Auto-generated unique ID (e.g., "req-1", "req-2")
|
|
2873
|
+
- type: Requirement type (acceptance, technical, constraint)
|
|
2874
|
+
- text: Requirement description text
|
|
2875
|
+
"""
|
|
2876
|
+
request_id = _request_id()
|
|
2877
|
+
action = "add-requirement"
|
|
2878
|
+
spec_id = payload.get("spec_id")
|
|
2879
|
+
task_id = payload.get("task_id")
|
|
2880
|
+
requirement_type = payload.get("requirement_type")
|
|
2881
|
+
text = payload.get("text")
|
|
2882
|
+
|
|
2883
|
+
# Validate required fields
|
|
2884
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
2885
|
+
return _validation_error(
|
|
2886
|
+
field="spec_id",
|
|
2887
|
+
action=action,
|
|
2888
|
+
message="Provide a non-empty spec identifier",
|
|
2889
|
+
request_id=request_id,
|
|
2890
|
+
)
|
|
2891
|
+
if not isinstance(task_id, str) or not task_id.strip():
|
|
2892
|
+
return _validation_error(
|
|
2893
|
+
field="task_id",
|
|
2894
|
+
action=action,
|
|
2895
|
+
message="Provide a non-empty task identifier",
|
|
2896
|
+
request_id=request_id,
|
|
2897
|
+
)
|
|
2898
|
+
if not isinstance(requirement_type, str) or not requirement_type.strip():
|
|
2899
|
+
return _validation_error(
|
|
2900
|
+
field="requirement_type",
|
|
2901
|
+
action=action,
|
|
2902
|
+
message="Provide a requirement type",
|
|
2903
|
+
request_id=request_id,
|
|
2904
|
+
)
|
|
2905
|
+
|
|
2906
|
+
# Validate requirement_type
|
|
2907
|
+
requirement_type_lower = requirement_type.lower().strip()
|
|
2908
|
+
if requirement_type_lower not in REQUIREMENT_TYPES:
|
|
2909
|
+
return _validation_error(
|
|
2910
|
+
field="requirement_type",
|
|
2911
|
+
action=action,
|
|
2912
|
+
message=f"Must be one of: {', '.join(REQUIREMENT_TYPES)}",
|
|
2913
|
+
request_id=request_id,
|
|
2914
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2915
|
+
)
|
|
2916
|
+
|
|
2917
|
+
# Validate text
|
|
2918
|
+
if not isinstance(text, str) or not text.strip():
|
|
2919
|
+
return _validation_error(
|
|
2920
|
+
field="text",
|
|
2921
|
+
action=action,
|
|
2922
|
+
message="Provide non-empty requirement text",
|
|
2923
|
+
request_id=request_id,
|
|
2924
|
+
)
|
|
2925
|
+
|
|
2926
|
+
# Validate dry_run
|
|
2927
|
+
dry_run = payload.get("dry_run", False)
|
|
2928
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
2929
|
+
return _validation_error(
|
|
2930
|
+
field="dry_run",
|
|
2931
|
+
action=action,
|
|
2932
|
+
message="dry_run must be a boolean",
|
|
2933
|
+
request_id=request_id,
|
|
2934
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
2935
|
+
)
|
|
2936
|
+
dry_run_bool = bool(dry_run)
|
|
2937
|
+
|
|
2938
|
+
workspace = payload.get("workspace")
|
|
2939
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
2940
|
+
if specs_dir is None:
|
|
2941
|
+
return _specs_dir_missing_error(request_id)
|
|
2942
|
+
|
|
2943
|
+
start = time.perf_counter()
|
|
2944
|
+
|
|
2945
|
+
# Call the core function
|
|
2946
|
+
result, error = update_task_requirements(
|
|
2947
|
+
spec_id=spec_id.strip(),
|
|
2948
|
+
task_id=task_id.strip(),
|
|
2949
|
+
action="add",
|
|
2950
|
+
requirement_type=requirement_type_lower,
|
|
2951
|
+
text=text.strip(),
|
|
2952
|
+
dry_run=dry_run_bool,
|
|
2953
|
+
specs_dir=specs_dir,
|
|
2954
|
+
)
|
|
2955
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
2956
|
+
|
|
2957
|
+
if error or result is None:
|
|
2958
|
+
# Determine appropriate error code based on error message
|
|
2959
|
+
error_lower = (error or "").lower()
|
|
2960
|
+
if "not found" in error_lower:
|
|
2961
|
+
if "specification" in error_lower:
|
|
2962
|
+
code = ErrorCode.SPEC_NOT_FOUND
|
|
2963
|
+
err_type = ErrorType.NOT_FOUND
|
|
2964
|
+
remediation = "Verify the spec ID exists"
|
|
2965
|
+
else:
|
|
2966
|
+
code = ErrorCode.TASK_NOT_FOUND
|
|
2967
|
+
err_type = ErrorType.NOT_FOUND
|
|
2968
|
+
remediation = "Verify the task ID exists in the specification"
|
|
2969
|
+
elif "maximum" in error_lower or "limit" in error_lower:
|
|
2970
|
+
code = ErrorCode.LIMIT_EXCEEDED
|
|
2971
|
+
err_type = ErrorType.VALIDATION
|
|
2972
|
+
remediation = "Remove some requirements before adding new ones"
|
|
2973
|
+
elif "requirement_type" in error_lower:
|
|
2974
|
+
code = ErrorCode.INVALID_FORMAT
|
|
2975
|
+
err_type = ErrorType.VALIDATION
|
|
2976
|
+
remediation = f"Use one of: {', '.join(REQUIREMENT_TYPES)}"
|
|
2977
|
+
else:
|
|
2978
|
+
code = ErrorCode.VALIDATION_ERROR
|
|
2979
|
+
err_type = ErrorType.VALIDATION
|
|
2980
|
+
remediation = "Check task ID and requirement fields"
|
|
2981
|
+
|
|
2982
|
+
return asdict(
|
|
2983
|
+
error_response(
|
|
2984
|
+
error or "Failed to add requirement",
|
|
2985
|
+
error_code=code,
|
|
2986
|
+
error_type=err_type,
|
|
2987
|
+
remediation=remediation,
|
|
2988
|
+
request_id=request_id,
|
|
2989
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2990
|
+
)
|
|
2991
|
+
)
|
|
2992
|
+
|
|
2993
|
+
# Build success response
|
|
2994
|
+
response = success_response(
|
|
2995
|
+
**result,
|
|
2996
|
+
request_id=request_id,
|
|
2997
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
2998
|
+
)
|
|
2999
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
3000
|
+
_metrics.counter(
|
|
3001
|
+
_metric(action),
|
|
3002
|
+
labels={"status": "success", "dry_run": str(dry_run_bool).lower()},
|
|
3003
|
+
)
|
|
3004
|
+
return asdict(response)
|
|
3005
|
+
|
|
3006
|
+
|
|
3007
|
+
_VALID_NODE_TYPES = {"task", "verify", "phase", "subtask"}
|
|
3008
|
+
# Note: VALID_VERIFICATION_TYPES imported from foundry_mcp.core.validation
|
|
3009
|
+
|
|
3010
|
+
|
|
3011
|
+
def _match_nodes_for_batch(
|
|
3012
|
+
hierarchy: Dict[str, Any],
|
|
3013
|
+
*,
|
|
3014
|
+
phase_id: Optional[str] = None,
|
|
3015
|
+
pattern: Optional[str] = None,
|
|
3016
|
+
node_type: Optional[str] = None,
|
|
3017
|
+
) -> List[str]:
|
|
3018
|
+
"""Filter nodes by phase_id, regex pattern on title/id, and/or node_type.
|
|
3019
|
+
|
|
3020
|
+
All provided filters are combined with AND logic.
|
|
3021
|
+
Returns list of matching node IDs.
|
|
3022
|
+
"""
|
|
3023
|
+
matched: List[str] = []
|
|
3024
|
+
compiled_pattern = None
|
|
3025
|
+
if pattern:
|
|
3026
|
+
try:
|
|
3027
|
+
compiled_pattern = re.compile(pattern, re.IGNORECASE)
|
|
3028
|
+
except re.error:
|
|
3029
|
+
return [] # Invalid regex returns empty
|
|
3030
|
+
|
|
3031
|
+
for node_id, node_data in hierarchy.items():
|
|
3032
|
+
if node_id == "spec-root":
|
|
3033
|
+
continue
|
|
3034
|
+
|
|
3035
|
+
# Filter by node_type if specified
|
|
3036
|
+
if node_type and node_data.get("type") != node_type:
|
|
3037
|
+
continue
|
|
3038
|
+
|
|
3039
|
+
# Filter by phase_id if specified (must be under that phase)
|
|
3040
|
+
if phase_id:
|
|
3041
|
+
node_parent = node_data.get("parent")
|
|
3042
|
+
# Direct children of the phase
|
|
3043
|
+
if node_parent != phase_id:
|
|
3044
|
+
# Check if it's a nested child (e.g., subtask under task under phase)
|
|
3045
|
+
parent_node = hierarchy.get(node_parent, {})
|
|
3046
|
+
if parent_node.get("parent") != phase_id:
|
|
3047
|
+
continue
|
|
3048
|
+
|
|
3049
|
+
# Filter by regex pattern on title or node_id
|
|
3050
|
+
if compiled_pattern:
|
|
3051
|
+
title = node_data.get("title", "")
|
|
3052
|
+
if not (compiled_pattern.search(title) or compiled_pattern.search(node_id)):
|
|
3053
|
+
continue
|
|
3054
|
+
|
|
3055
|
+
matched.append(node_id)
|
|
3056
|
+
|
|
3057
|
+
return sorted(matched)
|
|
3058
|
+
|
|
3059
|
+
|
|
3060
|
+
def _handle_metadata_batch(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
3061
|
+
"""Batch update metadata across multiple tasks matching specified criteria.
|
|
3062
|
+
|
|
3063
|
+
Filters (combined with AND logic):
|
|
3064
|
+
- status_filter: Filter by task status (pending, in_progress, completed, blocked)
|
|
3065
|
+
- parent_filter: Filter by parent node ID (e.g., phase-1, task-2-1)
|
|
3066
|
+
- pattern: Regex pattern to match task titles/IDs
|
|
3067
|
+
|
|
3068
|
+
Legacy filters (deprecated, use parent_filter instead):
|
|
3069
|
+
- phase_id: Alias for parent_filter
|
|
3070
|
+
|
|
3071
|
+
Metadata fields supported:
|
|
3072
|
+
- description, file_path, estimated_hours, category, labels, owners
|
|
3073
|
+
- update_metadata: Dict for custom metadata fields (verification_type, command, etc.)
|
|
3074
|
+
"""
|
|
3075
|
+
request_id = _request_id()
|
|
3076
|
+
action = "metadata-batch"
|
|
3077
|
+
start = time.perf_counter()
|
|
3078
|
+
|
|
3079
|
+
# Required: spec_id
|
|
3080
|
+
spec_id = payload.get("spec_id")
|
|
3081
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
3082
|
+
return _validation_error(
|
|
3083
|
+
field="spec_id",
|
|
3084
|
+
action=action,
|
|
3085
|
+
message="Provide a non-empty spec identifier",
|
|
3086
|
+
request_id=request_id,
|
|
3087
|
+
)
|
|
3088
|
+
spec_id = spec_id.strip()
|
|
3089
|
+
|
|
3090
|
+
# Extract filter parameters
|
|
3091
|
+
status_filter = payload.get("status_filter")
|
|
3092
|
+
parent_filter = payload.get("parent_filter")
|
|
3093
|
+
phase_id = payload.get("phase_id") # Legacy alias for parent_filter
|
|
3094
|
+
pattern = payload.get("pattern")
|
|
3095
|
+
|
|
3096
|
+
# Use phase_id as parent_filter if parent_filter not provided (backwards compat)
|
|
3097
|
+
if parent_filter is None and phase_id is not None:
|
|
3098
|
+
parent_filter = phase_id
|
|
3099
|
+
|
|
3100
|
+
# Validate status_filter
|
|
3101
|
+
if status_filter is not None:
|
|
3102
|
+
if not isinstance(status_filter, str) or status_filter not in _ALLOWED_STATUS:
|
|
3103
|
+
return _validation_error(
|
|
3104
|
+
field="status_filter",
|
|
3105
|
+
action=action,
|
|
3106
|
+
message=f"status_filter must be one of: {sorted(_ALLOWED_STATUS)}",
|
|
3107
|
+
request_id=request_id,
|
|
3108
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3109
|
+
)
|
|
3110
|
+
|
|
3111
|
+
# Validate parent_filter
|
|
3112
|
+
if parent_filter is not None:
|
|
3113
|
+
if not isinstance(parent_filter, str) or not parent_filter.strip():
|
|
3114
|
+
return _validation_error(
|
|
3115
|
+
field="parent_filter",
|
|
3116
|
+
action=action,
|
|
3117
|
+
message="parent_filter must be a non-empty string",
|
|
3118
|
+
request_id=request_id,
|
|
3119
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3120
|
+
)
|
|
3121
|
+
parent_filter = parent_filter.strip()
|
|
3122
|
+
|
|
3123
|
+
# Validate pattern
|
|
3124
|
+
if pattern is not None:
|
|
3125
|
+
if not isinstance(pattern, str) or not pattern.strip():
|
|
3126
|
+
return _validation_error(
|
|
3127
|
+
field="pattern",
|
|
3128
|
+
action=action,
|
|
3129
|
+
message="pattern must be a non-empty string",
|
|
3130
|
+
request_id=request_id,
|
|
3131
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3132
|
+
)
|
|
3133
|
+
try:
|
|
3134
|
+
re.compile(pattern)
|
|
3135
|
+
except re.error as exc:
|
|
3136
|
+
return _validation_error(
|
|
3137
|
+
field="pattern",
|
|
3138
|
+
action=action,
|
|
3139
|
+
message=f"Invalid regex pattern: {exc}",
|
|
3140
|
+
request_id=request_id,
|
|
3141
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3142
|
+
)
|
|
3143
|
+
pattern = pattern.strip()
|
|
3144
|
+
|
|
3145
|
+
# At least one filter must be provided
|
|
3146
|
+
if not any([status_filter, parent_filter, pattern]):
|
|
3147
|
+
return _validation_error(
|
|
3148
|
+
field="status_filter",
|
|
3149
|
+
action=action,
|
|
3150
|
+
message="Provide at least one filter: status_filter, parent_filter, or pattern",
|
|
3151
|
+
request_id=request_id,
|
|
3152
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
3153
|
+
remediation="Specify status_filter, parent_filter (or phase_id), and/or pattern to target tasks",
|
|
3154
|
+
)
|
|
3155
|
+
|
|
3156
|
+
# Extract metadata fields
|
|
3157
|
+
description = payload.get("description")
|
|
3158
|
+
file_path = payload.get("file_path")
|
|
3159
|
+
estimated_hours = payload.get("estimated_hours")
|
|
3160
|
+
category = payload.get("category")
|
|
3161
|
+
labels = payload.get("labels")
|
|
3162
|
+
owners = payload.get("owners")
|
|
3163
|
+
update_metadata = payload.get("update_metadata") # Dict for custom fields
|
|
3164
|
+
dry_run = payload.get("dry_run", False)
|
|
3165
|
+
|
|
3166
|
+
# Validate metadata fields
|
|
3167
|
+
if description is not None and not isinstance(description, str):
|
|
3168
|
+
return _validation_error(
|
|
3169
|
+
field="description",
|
|
3170
|
+
action=action,
|
|
3171
|
+
message="description must be a string",
|
|
3172
|
+
request_id=request_id,
|
|
3173
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3174
|
+
)
|
|
3175
|
+
|
|
3176
|
+
if file_path is not None and not isinstance(file_path, str):
|
|
3177
|
+
return _validation_error(
|
|
3178
|
+
field="file_path",
|
|
3179
|
+
action=action,
|
|
3180
|
+
message="file_path must be a string",
|
|
3181
|
+
request_id=request_id,
|
|
3182
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3183
|
+
)
|
|
3184
|
+
|
|
3185
|
+
if estimated_hours is not None:
|
|
3186
|
+
if not isinstance(estimated_hours, (int, float)) or estimated_hours < 0:
|
|
3187
|
+
return _validation_error(
|
|
3188
|
+
field="estimated_hours",
|
|
3189
|
+
action=action,
|
|
3190
|
+
message="estimated_hours must be a non-negative number",
|
|
3191
|
+
request_id=request_id,
|
|
3192
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3193
|
+
)
|
|
3194
|
+
|
|
3195
|
+
if category is not None and not isinstance(category, str):
|
|
3196
|
+
return _validation_error(
|
|
3197
|
+
field="category",
|
|
3198
|
+
action=action,
|
|
3199
|
+
message="category must be a string",
|
|
3200
|
+
request_id=request_id,
|
|
3201
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3202
|
+
)
|
|
3203
|
+
|
|
3204
|
+
if labels is not None:
|
|
3205
|
+
if not isinstance(labels, dict) or not all(
|
|
3206
|
+
isinstance(k, str) and isinstance(v, str) for k, v in labels.items()
|
|
3207
|
+
):
|
|
3208
|
+
return _validation_error(
|
|
3209
|
+
field="labels",
|
|
3210
|
+
action=action,
|
|
3211
|
+
message="labels must be a dict with string keys and values",
|
|
3212
|
+
request_id=request_id,
|
|
3213
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3214
|
+
)
|
|
3215
|
+
|
|
3216
|
+
if owners is not None:
|
|
3217
|
+
if not isinstance(owners, list) or not all(isinstance(o, str) for o in owners):
|
|
3218
|
+
return _validation_error(
|
|
3219
|
+
field="owners",
|
|
3220
|
+
action=action,
|
|
3221
|
+
message="owners must be a list of strings",
|
|
3222
|
+
request_id=request_id,
|
|
3223
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3224
|
+
)
|
|
3225
|
+
|
|
3226
|
+
if update_metadata is not None and not isinstance(update_metadata, dict):
|
|
3227
|
+
return _validation_error(
|
|
3228
|
+
field="update_metadata",
|
|
3229
|
+
action=action,
|
|
3230
|
+
message="update_metadata must be a dict",
|
|
3231
|
+
request_id=request_id,
|
|
3232
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3233
|
+
)
|
|
3234
|
+
|
|
3235
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
3236
|
+
return _validation_error(
|
|
3237
|
+
field="dry_run",
|
|
3238
|
+
action=action,
|
|
3239
|
+
message="dry_run must be a boolean",
|
|
3240
|
+
request_id=request_id,
|
|
3241
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3242
|
+
)
|
|
3243
|
+
|
|
3244
|
+
# At least one metadata field must be provided
|
|
3245
|
+
has_metadata = any([
|
|
3246
|
+
description is not None,
|
|
3247
|
+
file_path is not None,
|
|
3248
|
+
estimated_hours is not None,
|
|
3249
|
+
category is not None,
|
|
3250
|
+
labels is not None,
|
|
3251
|
+
owners is not None,
|
|
3252
|
+
update_metadata,
|
|
3253
|
+
])
|
|
3254
|
+
if not has_metadata:
|
|
3255
|
+
return _validation_error(
|
|
3256
|
+
field="description",
|
|
3257
|
+
action=action,
|
|
3258
|
+
message="Provide at least one metadata field to update",
|
|
3259
|
+
request_id=request_id,
|
|
3260
|
+
code=ErrorCode.MISSING_REQUIRED,
|
|
3261
|
+
remediation="Specify description, file_path, estimated_hours, category, labels, owners, or update_metadata",
|
|
3262
|
+
)
|
|
3263
|
+
|
|
3264
|
+
# Resolve specs directory
|
|
3265
|
+
workspace = payload.get("workspace")
|
|
3266
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
3267
|
+
if specs_dir is None:
|
|
3268
|
+
return _specs_dir_missing_error(request_id)
|
|
3269
|
+
|
|
3270
|
+
# Delegate to core helper
|
|
3271
|
+
result, error = batch_update_tasks(
|
|
3272
|
+
spec_id,
|
|
3273
|
+
status_filter=status_filter,
|
|
3274
|
+
parent_filter=parent_filter,
|
|
3275
|
+
pattern=pattern,
|
|
3276
|
+
description=description,
|
|
3277
|
+
file_path=file_path,
|
|
3278
|
+
estimated_hours=float(estimated_hours) if estimated_hours is not None else None,
|
|
3279
|
+
category=category,
|
|
3280
|
+
labels=labels,
|
|
3281
|
+
owners=owners,
|
|
3282
|
+
custom_metadata=update_metadata,
|
|
3283
|
+
dry_run=bool(dry_run),
|
|
3284
|
+
specs_dir=specs_dir,
|
|
3285
|
+
)
|
|
3286
|
+
|
|
3287
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
3288
|
+
|
|
3289
|
+
if error:
|
|
3290
|
+
_metrics.counter(_metric(action), labels={"status": "error"})
|
|
3291
|
+
# Map helper errors to response-v2 format
|
|
3292
|
+
if "not found" in error.lower():
|
|
3293
|
+
return asdict(
|
|
3294
|
+
error_response(
|
|
3295
|
+
error,
|
|
3296
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
3297
|
+
error_type=ErrorType.NOT_FOUND,
|
|
3298
|
+
remediation="Check spec_id and parent_filter values",
|
|
3299
|
+
request_id=request_id,
|
|
3300
|
+
)
|
|
3301
|
+
)
|
|
3302
|
+
if "at least one" in error.lower() or "must be" in error.lower():
|
|
3303
|
+
return asdict(
|
|
3304
|
+
error_response(
|
|
3305
|
+
error,
|
|
3306
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
3307
|
+
error_type=ErrorType.VALIDATION,
|
|
3308
|
+
remediation="Check filter and metadata parameters",
|
|
3309
|
+
request_id=request_id,
|
|
3310
|
+
)
|
|
3311
|
+
)
|
|
3312
|
+
return asdict(
|
|
3313
|
+
error_response(
|
|
3314
|
+
error,
|
|
3315
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
3316
|
+
error_type=ErrorType.INTERNAL,
|
|
3317
|
+
remediation="Check filesystem permissions and retry",
|
|
3318
|
+
request_id=request_id,
|
|
3319
|
+
)
|
|
3320
|
+
)
|
|
3321
|
+
|
|
3322
|
+
assert result is not None
|
|
3323
|
+
|
|
3324
|
+
# Build response with response-v2 envelope
|
|
3325
|
+
warnings: List[str] = result.get("warnings", [])
|
|
3326
|
+
if result["matched_count"] > _TASK_WARNING_THRESHOLD and not warnings:
|
|
3327
|
+
warnings.append(
|
|
3328
|
+
f"Updated {result['matched_count']} tasks; consider using more specific filters."
|
|
3329
|
+
)
|
|
3330
|
+
|
|
3331
|
+
response = success_response(
|
|
3332
|
+
spec_id=result["spec_id"],
|
|
3333
|
+
matched_count=result["matched_count"],
|
|
3334
|
+
updated_count=result["updated_count"],
|
|
3335
|
+
skipped_count=result.get("skipped_count", 0),
|
|
3336
|
+
nodes=result["nodes"],
|
|
3337
|
+
filters=result["filters"],
|
|
3338
|
+
metadata_applied=result["metadata_applied"],
|
|
3339
|
+
dry_run=result["dry_run"],
|
|
3340
|
+
request_id=request_id,
|
|
3341
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3342
|
+
)
|
|
3343
|
+
|
|
3344
|
+
response_dict = asdict(response)
|
|
3345
|
+
if warnings:
|
|
3346
|
+
meta = response_dict.setdefault("meta", {})
|
|
3347
|
+
meta["warnings"] = warnings
|
|
3348
|
+
if result.get("skipped_tasks"):
|
|
3349
|
+
response_dict["data"]["skipped_tasks"] = result["skipped_tasks"]
|
|
3350
|
+
|
|
3351
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
3352
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
3353
|
+
return response_dict
|
|
3354
|
+
|
|
3355
|
+
|
|
3356
|
+
def _handle_fix_verification_types(
|
|
3357
|
+
*, config: ServerConfig, payload: Dict[str, Any]
|
|
3358
|
+
) -> dict:
|
|
3359
|
+
"""Fix verification types across all verify nodes in a spec.
|
|
3360
|
+
|
|
3361
|
+
This action:
|
|
3362
|
+
1. Finds all verify nodes with invalid or missing verification_type
|
|
3363
|
+
2. Maps legacy values (e.g., 'test' -> 'run-tests') using VERIFICATION_TYPE_MAPPING
|
|
3364
|
+
3. Sets missing types to 'run-tests' (default)
|
|
3365
|
+
4. Sets unknown types to 'manual' (fallback)
|
|
3366
|
+
|
|
3367
|
+
Supports dry-run mode to preview changes without persisting.
|
|
3368
|
+
"""
|
|
3369
|
+
request_id = _request_id()
|
|
3370
|
+
action = "fix-verification-types"
|
|
3371
|
+
|
|
3372
|
+
# Required: spec_id
|
|
3373
|
+
spec_id = payload.get("spec_id")
|
|
3374
|
+
if not isinstance(spec_id, str) or not spec_id.strip():
|
|
3375
|
+
return _validation_error(
|
|
3376
|
+
field="spec_id",
|
|
3377
|
+
action=action,
|
|
3378
|
+
message="Provide a non-empty spec identifier",
|
|
3379
|
+
request_id=request_id,
|
|
3380
|
+
)
|
|
3381
|
+
|
|
3382
|
+
dry_run = payload.get("dry_run", False)
|
|
3383
|
+
if dry_run is not None and not isinstance(dry_run, bool):
|
|
3384
|
+
return _validation_error(
|
|
3385
|
+
field="dry_run",
|
|
3386
|
+
action=action,
|
|
3387
|
+
message="dry_run must be a boolean",
|
|
3388
|
+
request_id=request_id,
|
|
3389
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
3390
|
+
)
|
|
3391
|
+
dry_run_bool = bool(dry_run)
|
|
3392
|
+
|
|
3393
|
+
# Load spec
|
|
3394
|
+
workspace = payload.get("workspace")
|
|
3395
|
+
specs_dir = _resolve_specs_dir(config, workspace)
|
|
3396
|
+
spec_data, error = _load_spec_data(spec_id.strip(), specs_dir, request_id)
|
|
3397
|
+
if error:
|
|
3398
|
+
return error
|
|
3399
|
+
assert spec_data is not None
|
|
3400
|
+
|
|
3401
|
+
start = time.perf_counter()
|
|
3402
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
3403
|
+
|
|
3404
|
+
# Find verify nodes and collect fixes
|
|
3405
|
+
fixes: List[Dict[str, Any]] = []
|
|
3406
|
+
for node_id, node_data in hierarchy.items():
|
|
3407
|
+
if node_data.get("type") != "verify":
|
|
3408
|
+
continue
|
|
3409
|
+
|
|
3410
|
+
metadata = node_data.get("metadata", {})
|
|
3411
|
+
current_type = metadata.get("verification_type")
|
|
3412
|
+
|
|
3413
|
+
# Determine the fix needed
|
|
3414
|
+
fix_info: Optional[Dict[str, Any]] = None
|
|
3415
|
+
|
|
3416
|
+
if current_type is None:
|
|
3417
|
+
# Missing verification_type -> default to 'run-tests'
|
|
3418
|
+
fix_info = {
|
|
3419
|
+
"node_id": node_id,
|
|
3420
|
+
"title": node_data.get("title", ""),
|
|
3421
|
+
"issue": "missing",
|
|
3422
|
+
"old_value": None,
|
|
3423
|
+
"new_value": "run-tests",
|
|
3424
|
+
}
|
|
3425
|
+
elif current_type not in VALID_VERIFICATION_TYPES:
|
|
3426
|
+
# Invalid type -> check mapping or fallback to 'manual'
|
|
3427
|
+
mapped = VERIFICATION_TYPE_MAPPING.get(current_type)
|
|
3428
|
+
if mapped:
|
|
3429
|
+
fix_info = {
|
|
3430
|
+
"node_id": node_id,
|
|
3431
|
+
"title": node_data.get("title", ""),
|
|
3432
|
+
"issue": "legacy",
|
|
3433
|
+
"old_value": current_type,
|
|
3434
|
+
"new_value": mapped,
|
|
3435
|
+
}
|
|
3436
|
+
else:
|
|
3437
|
+
fix_info = {
|
|
3438
|
+
"node_id": node_id,
|
|
3439
|
+
"title": node_data.get("title", ""),
|
|
3440
|
+
"issue": "invalid",
|
|
3441
|
+
"old_value": current_type,
|
|
3442
|
+
"new_value": "manual",
|
|
3443
|
+
}
|
|
3444
|
+
|
|
3445
|
+
if fix_info:
|
|
3446
|
+
fixes.append(fix_info)
|
|
3447
|
+
|
|
3448
|
+
if not dry_run_bool:
|
|
3449
|
+
# Apply the fix
|
|
3450
|
+
if "metadata" not in node_data:
|
|
3451
|
+
node_data["metadata"] = {}
|
|
3452
|
+
node_data["metadata"]["verification_type"] = fix_info["new_value"]
|
|
3453
|
+
|
|
3454
|
+
# Save if not dry_run and there were fixes
|
|
3455
|
+
if not dry_run_bool and fixes:
|
|
3456
|
+
if specs_dir is None or not save_spec(spec_id.strip(), spec_data, specs_dir):
|
|
3457
|
+
return asdict(
|
|
3458
|
+
error_response(
|
|
3459
|
+
"Failed to save spec after fixing verification types",
|
|
3460
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
3461
|
+
error_type=ErrorType.INTERNAL,
|
|
3462
|
+
remediation="Check filesystem permissions and retry",
|
|
3463
|
+
request_id=request_id,
|
|
3464
|
+
)
|
|
3465
|
+
)
|
|
3466
|
+
|
|
3467
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
3468
|
+
|
|
3469
|
+
# Count by issue type
|
|
3470
|
+
missing_count = sum(1 for f in fixes if f["issue"] == "missing")
|
|
3471
|
+
legacy_count = sum(1 for f in fixes if f["issue"] == "legacy")
|
|
3472
|
+
invalid_count = sum(1 for f in fixes if f["issue"] == "invalid")
|
|
3473
|
+
|
|
3474
|
+
response = success_response(
|
|
3475
|
+
spec_id=spec_id.strip(),
|
|
3476
|
+
total_fixes=len(fixes),
|
|
3477
|
+
applied_count=len(fixes) if not dry_run_bool else 0,
|
|
3478
|
+
fixes=fixes,
|
|
3479
|
+
summary={
|
|
3480
|
+
"missing_set_to_run_tests": missing_count,
|
|
3481
|
+
"legacy_mapped": legacy_count,
|
|
3482
|
+
"invalid_set_to_manual": invalid_count,
|
|
3483
|
+
},
|
|
3484
|
+
valid_types=sorted(VALID_VERIFICATION_TYPES),
|
|
3485
|
+
legacy_mappings=VERIFICATION_TYPE_MAPPING,
|
|
3486
|
+
dry_run=dry_run_bool,
|
|
3487
|
+
request_id=request_id,
|
|
3488
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3489
|
+
)
|
|
3490
|
+
|
|
3491
|
+
_metrics.timer(_metric(action) + ".duration_ms", elapsed_ms)
|
|
3492
|
+
_metrics.counter(_metric(action), labels={"status": "success"})
|
|
3493
|
+
return asdict(response)
|
|
3494
|
+
|
|
3495
|
+
|
|
3496
|
+
def _handle_session_config(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
3497
|
+
"""
|
|
3498
|
+
Handle session-config action: get/set autonomous mode preferences.
|
|
3499
|
+
|
|
3500
|
+
This action manages the ephemeral autonomous session state, allowing
|
|
3501
|
+
agents to enable/disable autonomous mode and track task completion
|
|
3502
|
+
during autonomous execution.
|
|
3503
|
+
|
|
3504
|
+
Parameters:
|
|
3505
|
+
get: If true, just return current session config without changes
|
|
3506
|
+
auto_mode: Set autonomous mode enabled (true) or disabled (false)
|
|
3507
|
+
|
|
3508
|
+
Returns:
|
|
3509
|
+
Current session configuration including autonomous state
|
|
3510
|
+
"""
|
|
3511
|
+
from datetime import datetime, timezone
|
|
3512
|
+
|
|
3513
|
+
request_id = _request_id()
|
|
3514
|
+
action = "session-config"
|
|
3515
|
+
start = time.perf_counter()
|
|
3516
|
+
|
|
3517
|
+
# Get parameters
|
|
3518
|
+
get_only = payload.get("get", False)
|
|
3519
|
+
auto_mode = payload.get("auto_mode")
|
|
3520
|
+
|
|
3521
|
+
# Get the context tracker and session
|
|
3522
|
+
tracker = get_context_tracker()
|
|
3523
|
+
session = tracker.get_or_create_session()
|
|
3524
|
+
|
|
3525
|
+
# Initialize autonomous if not present
|
|
3526
|
+
if session.autonomous is None:
|
|
3527
|
+
session.autonomous = AutonomousSession()
|
|
3528
|
+
|
|
3529
|
+
# If just getting, return current state
|
|
3530
|
+
if get_only:
|
|
3531
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
3532
|
+
response = success_response(
|
|
3533
|
+
session_id=session.session_id,
|
|
3534
|
+
autonomous=session.autonomous.to_dict(),
|
|
3535
|
+
message="Current session configuration",
|
|
3536
|
+
request_id=request_id,
|
|
3537
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3538
|
+
)
|
|
3539
|
+
_metrics.counter(_metric(action), labels={"status": "success", "operation": "get"})
|
|
3540
|
+
return asdict(response)
|
|
3541
|
+
|
|
3542
|
+
# Handle auto_mode setting
|
|
3543
|
+
if auto_mode is not None:
|
|
3544
|
+
if not isinstance(auto_mode, bool):
|
|
3545
|
+
return _validation_error(
|
|
3546
|
+
field="auto_mode",
|
|
3547
|
+
action=action,
|
|
3548
|
+
message="auto_mode must be a boolean (true/false)",
|
|
3549
|
+
request_id=request_id,
|
|
3550
|
+
)
|
|
3551
|
+
|
|
3552
|
+
previous_enabled = session.autonomous.enabled
|
|
3553
|
+
session.autonomous.enabled = auto_mode
|
|
3554
|
+
|
|
3555
|
+
if auto_mode and not previous_enabled:
|
|
3556
|
+
# Starting autonomous mode
|
|
3557
|
+
session.autonomous.started_at = (
|
|
3558
|
+
datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
3559
|
+
)
|
|
3560
|
+
session.autonomous.tasks_completed = 0
|
|
3561
|
+
session.autonomous.pause_reason = None
|
|
3562
|
+
elif not auto_mode and previous_enabled:
|
|
3563
|
+
# Stopping autonomous mode
|
|
3564
|
+
session.autonomous.pause_reason = "user"
|
|
3565
|
+
|
|
3566
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
3567
|
+
response = success_response(
|
|
3568
|
+
session_id=session.session_id,
|
|
3569
|
+
autonomous=session.autonomous.to_dict(),
|
|
3570
|
+
message="Autonomous mode enabled" if session.autonomous.enabled else "Autonomous mode disabled",
|
|
3571
|
+
request_id=request_id,
|
|
3572
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
3573
|
+
)
|
|
3574
|
+
_metrics.counter(_metric(action), labels={"status": "success", "operation": "set"})
|
|
3575
|
+
return asdict(response)
|
|
3576
|
+
|
|
3577
|
+
|
|
3578
|
+
_ACTION_DEFINITIONS = [
|
|
3579
|
+
ActionDefinition(
|
|
3580
|
+
name="prepare",
|
|
3581
|
+
handler=_handle_prepare,
|
|
3582
|
+
summary="Prepare next actionable task context",
|
|
3583
|
+
),
|
|
3584
|
+
ActionDefinition(
|
|
3585
|
+
name="prepare-batch",
|
|
3586
|
+
handler=_handle_prepare_batch,
|
|
3587
|
+
summary="Prepare multiple independent tasks for parallel execution",
|
|
3588
|
+
),
|
|
3589
|
+
ActionDefinition(
|
|
3590
|
+
name="start-batch",
|
|
3591
|
+
handler=_handle_start_batch,
|
|
3592
|
+
summary="Atomically start multiple tasks as in_progress",
|
|
3593
|
+
),
|
|
3594
|
+
ActionDefinition(
|
|
3595
|
+
name="complete-batch",
|
|
3596
|
+
handler=_handle_complete_batch,
|
|
3597
|
+
summary="Complete multiple tasks with partial failure support",
|
|
3598
|
+
),
|
|
3599
|
+
ActionDefinition(
|
|
3600
|
+
name="reset-batch",
|
|
3601
|
+
handler=_handle_reset_batch,
|
|
3602
|
+
summary="Reset stale or specified in_progress tasks to pending",
|
|
3603
|
+
),
|
|
3604
|
+
ActionDefinition(
|
|
3605
|
+
name="next", handler=_handle_next, summary="Return the next actionable task"
|
|
3606
|
+
),
|
|
3607
|
+
ActionDefinition(
|
|
3608
|
+
name="info", handler=_handle_info, summary="Fetch task metadata by ID"
|
|
3609
|
+
),
|
|
3610
|
+
ActionDefinition(
|
|
3611
|
+
name="check-deps",
|
|
3612
|
+
handler=_handle_check_deps,
|
|
3613
|
+
summary="Analyze task dependencies and blockers",
|
|
3614
|
+
),
|
|
3615
|
+
ActionDefinition(name="start", handler=_handle_start, summary="Start a task"),
|
|
3616
|
+
ActionDefinition(
|
|
3617
|
+
name="complete", handler=_handle_complete, summary="Complete a task"
|
|
3618
|
+
),
|
|
3619
|
+
ActionDefinition(
|
|
3620
|
+
name="update-status",
|
|
3621
|
+
handler=_handle_update_status,
|
|
3622
|
+
summary="Update task status",
|
|
3623
|
+
),
|
|
3624
|
+
ActionDefinition(name="block", handler=_handle_block, summary="Block a task"),
|
|
3625
|
+
ActionDefinition(name="unblock", handler=_handle_unblock, summary="Unblock a task"),
|
|
3626
|
+
ActionDefinition(
|
|
3627
|
+
name="list-blocked",
|
|
3628
|
+
handler=_handle_list_blocked,
|
|
3629
|
+
summary="List blocked tasks",
|
|
3630
|
+
),
|
|
3631
|
+
ActionDefinition(name="add", handler=_handle_add, summary="Add a task"),
|
|
3632
|
+
ActionDefinition(name="remove", handler=_handle_remove, summary="Remove a task"),
|
|
3633
|
+
ActionDefinition(
|
|
3634
|
+
name="move",
|
|
3635
|
+
handler=_handle_move,
|
|
3636
|
+
summary="Move task to new position or parent",
|
|
3637
|
+
),
|
|
3638
|
+
ActionDefinition(
|
|
3639
|
+
name="add-dependency",
|
|
3640
|
+
handler=_handle_add_dependency,
|
|
3641
|
+
summary="Add a dependency between two tasks",
|
|
3642
|
+
),
|
|
3643
|
+
ActionDefinition(
|
|
3644
|
+
name="remove-dependency",
|
|
3645
|
+
handler=_handle_remove_dependency,
|
|
3646
|
+
summary="Remove a dependency between two tasks",
|
|
3647
|
+
),
|
|
3648
|
+
ActionDefinition(
|
|
3649
|
+
name="add-requirement",
|
|
3650
|
+
handler=_handle_add_requirement,
|
|
3651
|
+
summary="Add a structured requirement to a task",
|
|
3652
|
+
),
|
|
3653
|
+
ActionDefinition(
|
|
3654
|
+
name="update-estimate",
|
|
3655
|
+
handler=_handle_update_estimate,
|
|
3656
|
+
summary="Update estimated effort",
|
|
3657
|
+
),
|
|
3658
|
+
ActionDefinition(
|
|
3659
|
+
name="update-metadata",
|
|
3660
|
+
handler=_handle_update_metadata,
|
|
3661
|
+
summary="Update task metadata fields",
|
|
3662
|
+
),
|
|
3663
|
+
ActionDefinition(
|
|
3664
|
+
name="metadata-batch",
|
|
3665
|
+
handler=_handle_metadata_batch,
|
|
3666
|
+
summary="Batch update metadata across multiple nodes matching filters",
|
|
3667
|
+
),
|
|
3668
|
+
ActionDefinition(
|
|
3669
|
+
name="fix-verification-types",
|
|
3670
|
+
handler=_handle_fix_verification_types,
|
|
3671
|
+
summary="Fix invalid/missing verification types across verify nodes",
|
|
3672
|
+
),
|
|
3673
|
+
ActionDefinition(
|
|
3674
|
+
name="progress",
|
|
3675
|
+
handler=_handle_progress,
|
|
3676
|
+
summary="Summarize completion metrics for a node",
|
|
3677
|
+
),
|
|
3678
|
+
ActionDefinition(
|
|
3679
|
+
name="list",
|
|
3680
|
+
handler=_handle_list,
|
|
3681
|
+
summary="List tasks with pagination and optional filters",
|
|
3682
|
+
),
|
|
3683
|
+
ActionDefinition(
|
|
3684
|
+
name="query",
|
|
3685
|
+
handler=_handle_query,
|
|
3686
|
+
summary="Query tasks by status or parent",
|
|
3687
|
+
),
|
|
3688
|
+
ActionDefinition(
|
|
3689
|
+
name="hierarchy",
|
|
3690
|
+
handler=_handle_hierarchy,
|
|
3691
|
+
summary="Return paginated hierarchy slices",
|
|
3692
|
+
),
|
|
3693
|
+
ActionDefinition(
|
|
3694
|
+
name="session-config",
|
|
3695
|
+
handler=_handle_session_config,
|
|
3696
|
+
summary="Get/set autonomous session configuration",
|
|
3697
|
+
),
|
|
3698
|
+
]
|
|
3699
|
+
|
|
3700
|
+
_TASK_ROUTER = ActionRouter(tool_name="task", actions=_ACTION_DEFINITIONS)
|
|
3701
|
+
|
|
3702
|
+
|
|
3703
|
+
def _dispatch_task_action(
|
|
3704
|
+
*, action: str, payload: Dict[str, Any], config: ServerConfig
|
|
3705
|
+
) -> dict:
|
|
3706
|
+
try:
|
|
3707
|
+
return _TASK_ROUTER.dispatch(action=action, config=config, payload=payload)
|
|
3708
|
+
except ActionRouterError as exc:
|
|
3709
|
+
request_id = _request_id()
|
|
3710
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
3711
|
+
return asdict(
|
|
3712
|
+
error_response(
|
|
3713
|
+
f"Unsupported task action '{action}'. Allowed actions: {allowed}",
|
|
3714
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
3715
|
+
error_type=ErrorType.VALIDATION,
|
|
3716
|
+
remediation=f"Use one of: {allowed}",
|
|
3717
|
+
request_id=request_id,
|
|
3718
|
+
)
|
|
3719
|
+
)
|
|
3720
|
+
|
|
3721
|
+
|
|
3722
|
+
def register_unified_task_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
3723
|
+
"""Register the consolidated task tool."""
|
|
3724
|
+
|
|
3725
|
+
@canonical_tool(
|
|
3726
|
+
mcp,
|
|
3727
|
+
canonical_name="task",
|
|
3728
|
+
)
|
|
3729
|
+
@mcp_tool(tool_name="task", emit_metrics=True, audit=True)
|
|
3730
|
+
def task(
|
|
3731
|
+
action: str,
|
|
3732
|
+
spec_id: Optional[str] = None,
|
|
3733
|
+
task_id: Optional[str] = None,
|
|
3734
|
+
task_ids: Optional[List[str]] = None,
|
|
3735
|
+
workspace: Optional[str] = None,
|
|
3736
|
+
status_filter: Optional[str] = None,
|
|
3737
|
+
include_completed: bool = True,
|
|
3738
|
+
node_id: str = "spec-root",
|
|
3739
|
+
include_phases: bool = True,
|
|
3740
|
+
cursor: Optional[str] = None,
|
|
3741
|
+
limit: Optional[int] = None,
|
|
3742
|
+
parent: Optional[str] = None,
|
|
3743
|
+
status: Optional[str] = None,
|
|
3744
|
+
note: Optional[str] = None,
|
|
3745
|
+
completion_note: Optional[str] = None,
|
|
3746
|
+
reason: Optional[str] = None,
|
|
3747
|
+
blocker_type: str = "dependency",
|
|
3748
|
+
ticket: Optional[str] = None,
|
|
3749
|
+
resolution: Optional[str] = None,
|
|
3750
|
+
title: Optional[str] = None,
|
|
3751
|
+
description: Optional[str] = None,
|
|
3752
|
+
acceptance_criteria: Optional[List[str]] = None,
|
|
3753
|
+
task_type: str = "task",
|
|
3754
|
+
estimated_hours: Optional[float] = None,
|
|
3755
|
+
position: Optional[int] = None,
|
|
3756
|
+
cascade: bool = False,
|
|
3757
|
+
complexity: Optional[str] = None,
|
|
3758
|
+
file_path: Optional[str] = None,
|
|
3759
|
+
task_category: Optional[str] = None,
|
|
3760
|
+
actual_hours: Optional[float] = None,
|
|
3761
|
+
status_note: Optional[str] = None,
|
|
3762
|
+
verification_type: Optional[str] = None,
|
|
3763
|
+
command: Optional[str] = None,
|
|
3764
|
+
custom_metadata: Optional[Dict[str, Any]] = None,
|
|
3765
|
+
dry_run: bool = False,
|
|
3766
|
+
max_depth: int = 2,
|
|
3767
|
+
include_metadata: bool = False,
|
|
3768
|
+
# metadata-batch specific parameters
|
|
3769
|
+
phase_id: Optional[str] = None,
|
|
3770
|
+
pattern: Optional[str] = None,
|
|
3771
|
+
node_type: Optional[str] = None,
|
|
3772
|
+
owners: Optional[List[str]] = None,
|
|
3773
|
+
labels: Optional[Dict[str, str]] = None,
|
|
3774
|
+
category: Optional[str] = None,
|
|
3775
|
+
parent_filter: Optional[str] = None,
|
|
3776
|
+
update_metadata: Optional[Dict[str, Any]] = None,
|
|
3777
|
+
# session-config specific parameters
|
|
3778
|
+
get: bool = False,
|
|
3779
|
+
auto_mode: Optional[bool] = None,
|
|
3780
|
+
# complete-batch specific parameters
|
|
3781
|
+
completions: Optional[List[Dict[str, Any]]] = None,
|
|
3782
|
+
# reset-batch specific parameters
|
|
3783
|
+
threshold_hours: Optional[float] = None,
|
|
3784
|
+
) -> dict:
|
|
3785
|
+
payload = {
|
|
3786
|
+
"spec_id": spec_id,
|
|
3787
|
+
"task_id": task_id,
|
|
3788
|
+
"task_ids": task_ids,
|
|
3789
|
+
"workspace": workspace,
|
|
3790
|
+
"status_filter": status_filter,
|
|
3791
|
+
"include_completed": include_completed,
|
|
3792
|
+
"node_id": node_id,
|
|
3793
|
+
"include_phases": include_phases,
|
|
3794
|
+
"cursor": cursor,
|
|
3795
|
+
"limit": limit,
|
|
3796
|
+
"parent": parent,
|
|
3797
|
+
"status": status,
|
|
3798
|
+
"note": note,
|
|
3799
|
+
"completion_note": completion_note,
|
|
3800
|
+
"reason": reason,
|
|
3801
|
+
"blocker_type": blocker_type,
|
|
3802
|
+
"ticket": ticket,
|
|
3803
|
+
"resolution": resolution,
|
|
3804
|
+
"title": title,
|
|
3805
|
+
"description": description,
|
|
3806
|
+
"acceptance_criteria": acceptance_criteria,
|
|
3807
|
+
"task_type": task_type,
|
|
3808
|
+
"estimated_hours": estimated_hours,
|
|
3809
|
+
"position": position,
|
|
3810
|
+
"cascade": cascade,
|
|
3811
|
+
"complexity": complexity,
|
|
3812
|
+
"file_path": file_path,
|
|
3813
|
+
"task_category": task_category,
|
|
3814
|
+
"actual_hours": actual_hours,
|
|
3815
|
+
"status_note": status_note,
|
|
3816
|
+
"verification_type": verification_type,
|
|
3817
|
+
"command": command,
|
|
3818
|
+
"custom_metadata": custom_metadata,
|
|
3819
|
+
"dry_run": dry_run,
|
|
3820
|
+
"max_depth": max_depth,
|
|
3821
|
+
"include_metadata": include_metadata,
|
|
3822
|
+
# metadata-batch specific
|
|
3823
|
+
"phase_id": phase_id,
|
|
3824
|
+
"pattern": pattern,
|
|
3825
|
+
"node_type": node_type,
|
|
3826
|
+
"owners": owners,
|
|
3827
|
+
"labels": labels,
|
|
3828
|
+
"category": category,
|
|
3829
|
+
"parent_filter": parent_filter,
|
|
3830
|
+
"update_metadata": update_metadata,
|
|
3831
|
+
# session-config specific
|
|
3832
|
+
"get": get,
|
|
3833
|
+
"auto_mode": auto_mode,
|
|
3834
|
+
# complete-batch specific
|
|
3835
|
+
"completions": completions,
|
|
3836
|
+
# reset-batch specific
|
|
3837
|
+
"threshold_hours": threshold_hours,
|
|
3838
|
+
}
|
|
3839
|
+
return _dispatch_task_action(action=action, payload=payload, config=config)
|
|
3840
|
+
|
|
3841
|
+
logger.debug("Registered unified task tool")
|
|
3842
|
+
|
|
3843
|
+
|
|
3844
|
+
__all__ = [
|
|
3845
|
+
"register_unified_task_tool",
|
|
3846
|
+
]
|