foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,2357 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Validation operations for SDD spec files.
|
|
3
|
+
Provides spec validation, auto-fix capabilities, and statistics.
|
|
4
|
+
|
|
5
|
+
Security Note:
|
|
6
|
+
This module uses size limits from foundry_mcp.core.security to protect
|
|
7
|
+
against resource exhaustion attacks. See docs/mcp_best_practices/04-validation-input-hygiene.md
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from difflib import get_close_matches
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any, Dict, List, Optional, Callable
|
|
14
|
+
import json
|
|
15
|
+
import re
|
|
16
|
+
import copy
|
|
17
|
+
from datetime import datetime, timezone
|
|
18
|
+
|
|
19
|
+
from foundry_mcp.core.security import (
|
|
20
|
+
MAX_INPUT_SIZE,
|
|
21
|
+
MAX_ARRAY_LENGTH,
|
|
22
|
+
MAX_STRING_LENGTH,
|
|
23
|
+
MAX_NESTED_DEPTH,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# Validation result data structures
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class Diagnostic:
|
|
32
|
+
"""
|
|
33
|
+
Structured diagnostic for MCP consumption.
|
|
34
|
+
|
|
35
|
+
Provides a machine-readable format for validation findings
|
|
36
|
+
that can be easily processed by MCP tools.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
code: str # Diagnostic code (e.g., "MISSING_FILE_PATH", "INVALID_STATUS")
|
|
40
|
+
message: str # Human-readable description
|
|
41
|
+
severity: str # "error", "warning", "info"
|
|
42
|
+
category: str # Category for grouping (e.g., "metadata", "structure", "counts")
|
|
43
|
+
location: Optional[str] = None # Node ID or path where issue occurred
|
|
44
|
+
suggested_fix: Optional[str] = None # Suggested fix description
|
|
45
|
+
auto_fixable: bool = False # Whether this can be auto-fixed
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class ValidationResult:
|
|
50
|
+
"""
|
|
51
|
+
Complete validation result for a spec file.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
spec_id: str
|
|
55
|
+
is_valid: bool
|
|
56
|
+
diagnostics: List[Diagnostic] = field(default_factory=list)
|
|
57
|
+
error_count: int = 0
|
|
58
|
+
warning_count: int = 0
|
|
59
|
+
info_count: int = 0
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class FixAction:
|
|
64
|
+
"""
|
|
65
|
+
Represents a candidate auto-fix operation.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
id: str
|
|
69
|
+
description: str
|
|
70
|
+
category: str
|
|
71
|
+
severity: str
|
|
72
|
+
auto_apply: bool
|
|
73
|
+
preview: str
|
|
74
|
+
apply: Callable[[Dict[str, Any]], None]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class FixReport:
|
|
79
|
+
"""
|
|
80
|
+
Outcome of applying a set of fix actions.
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
spec_path: Optional[str] = None
|
|
84
|
+
backup_path: Optional[str] = None
|
|
85
|
+
applied_actions: List[FixAction] = field(default_factory=list)
|
|
86
|
+
skipped_actions: List[FixAction] = field(default_factory=list)
|
|
87
|
+
before_state: Optional[Dict[str, Any]] = None
|
|
88
|
+
after_state: Optional[Dict[str, Any]] = None
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass
|
|
92
|
+
class SpecStats:
|
|
93
|
+
"""
|
|
94
|
+
Statistics for a spec file.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
spec_id: str
|
|
98
|
+
title: str
|
|
99
|
+
version: str
|
|
100
|
+
status: str
|
|
101
|
+
totals: Dict[str, int] = field(default_factory=dict)
|
|
102
|
+
status_counts: Dict[str, int] = field(default_factory=dict)
|
|
103
|
+
max_depth: int = 0
|
|
104
|
+
avg_tasks_per_phase: float = 0.0
|
|
105
|
+
verification_coverage: float = 0.0
|
|
106
|
+
progress: float = 0.0
|
|
107
|
+
file_size_kb: float = 0.0
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
# Constants
|
|
111
|
+
|
|
112
|
+
STATUS_FIELDS = {"pending", "in_progress", "completed", "blocked"}
|
|
113
|
+
VALID_NODE_TYPES = {"spec", "phase", "group", "task", "subtask", "verify", "research"}
|
|
114
|
+
VALID_STATUSES = {"pending", "in_progress", "completed", "blocked"}
|
|
115
|
+
VALID_TASK_CATEGORIES = {
|
|
116
|
+
"investigation",
|
|
117
|
+
"implementation",
|
|
118
|
+
"refactoring",
|
|
119
|
+
"decision",
|
|
120
|
+
"research",
|
|
121
|
+
}
|
|
122
|
+
VALID_VERIFICATION_TYPES = {"run-tests", "fidelity", "manual"}
|
|
123
|
+
|
|
124
|
+
# Legacy to canonical verification type mapping
|
|
125
|
+
VERIFICATION_TYPE_MAPPING = {
|
|
126
|
+
"test": "run-tests",
|
|
127
|
+
"auto": "run-tests",
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
# Research node constants
|
|
131
|
+
VALID_RESEARCH_TYPES = {"chat", "consensus", "thinkdeep", "ideate", "deep-research"}
|
|
132
|
+
VALID_RESEARCH_RESULTS = {"completed", "inconclusive", "blocked", "cancelled"}
|
|
133
|
+
RESEARCH_BLOCKING_MODES = {"none", "soft", "hard"}
|
|
134
|
+
|
|
135
|
+
# Common field name typos/alternatives
|
|
136
|
+
FIELD_NAME_SUGGESTIONS = {
|
|
137
|
+
"category": "task_category",
|
|
138
|
+
"type": "node type or verification_type",
|
|
139
|
+
"desc": "description",
|
|
140
|
+
"details": "description",
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _suggest_value(value: str, valid_values: set, n: int = 1) -> Optional[str]:
|
|
145
|
+
"""
|
|
146
|
+
Suggest a close match for an invalid value.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
value: The invalid value provided
|
|
150
|
+
valid_values: Set of valid values to match against
|
|
151
|
+
n: Number of suggestions to return (default 1)
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
Suggestion string like "did you mean 'X'?" or None if no close match
|
|
155
|
+
"""
|
|
156
|
+
if not value:
|
|
157
|
+
return None
|
|
158
|
+
matches = get_close_matches(value.lower(), [v.lower() for v in valid_values], n=n, cutoff=0.6)
|
|
159
|
+
if matches:
|
|
160
|
+
# Find the original-case version of the match
|
|
161
|
+
for v in valid_values:
|
|
162
|
+
if v.lower() == matches[0]:
|
|
163
|
+
return f"did you mean '{v}'?"
|
|
164
|
+
return f"did you mean '{matches[0]}'?"
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# Validation functions
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _requires_rich_task_fields(spec_data: Dict[str, Any]) -> bool:
|
|
172
|
+
"""Check if spec requires rich task fields based on explicit complexity metadata."""
|
|
173
|
+
metadata = spec_data.get("metadata", {})
|
|
174
|
+
if not isinstance(metadata, dict):
|
|
175
|
+
return False
|
|
176
|
+
|
|
177
|
+
# Only check explicit complexity metadata (template no longer indicates complexity)
|
|
178
|
+
complexity = metadata.get("complexity")
|
|
179
|
+
if isinstance(complexity, str) and complexity.strip().lower() in {
|
|
180
|
+
"medium",
|
|
181
|
+
"complex",
|
|
182
|
+
"high",
|
|
183
|
+
}:
|
|
184
|
+
return True
|
|
185
|
+
|
|
186
|
+
return False
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def validate_spec_input(
|
|
190
|
+
raw_input: str | bytes,
|
|
191
|
+
*,
|
|
192
|
+
max_size: Optional[int] = None,
|
|
193
|
+
) -> tuple[Optional[Dict[str, Any]], Optional[ValidationResult]]:
|
|
194
|
+
"""
|
|
195
|
+
Validate and parse raw spec input with size checks.
|
|
196
|
+
|
|
197
|
+
Performs size validation before JSON parsing to prevent resource
|
|
198
|
+
exhaustion attacks from oversized payloads.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
raw_input: Raw JSON string or bytes to validate
|
|
202
|
+
max_size: Maximum allowed size in bytes (default: MAX_INPUT_SIZE)
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Tuple of (parsed_data, error_result):
|
|
206
|
+
- On success: (dict, None)
|
|
207
|
+
- On failure: (None, ValidationResult with error)
|
|
208
|
+
|
|
209
|
+
Example:
|
|
210
|
+
>>> spec_data, error = validate_spec_input(json_string)
|
|
211
|
+
>>> if error:
|
|
212
|
+
... return error_response(error.diagnostics[0].message)
|
|
213
|
+
>>> result = validate_spec(spec_data)
|
|
214
|
+
"""
|
|
215
|
+
effective_max_size = max_size if max_size is not None else MAX_INPUT_SIZE
|
|
216
|
+
|
|
217
|
+
# Convert to bytes if string for consistent size checking
|
|
218
|
+
if isinstance(raw_input, str):
|
|
219
|
+
input_bytes = raw_input.encode("utf-8")
|
|
220
|
+
else:
|
|
221
|
+
input_bytes = raw_input
|
|
222
|
+
|
|
223
|
+
# Check input size
|
|
224
|
+
if len(input_bytes) > effective_max_size:
|
|
225
|
+
error_result = ValidationResult(
|
|
226
|
+
spec_id="unknown",
|
|
227
|
+
is_valid=False,
|
|
228
|
+
error_count=1,
|
|
229
|
+
)
|
|
230
|
+
error_result.diagnostics.append(
|
|
231
|
+
Diagnostic(
|
|
232
|
+
code="INPUT_TOO_LARGE",
|
|
233
|
+
message=f"Input size ({len(input_bytes):,} bytes) exceeds maximum allowed ({effective_max_size:,} bytes)",
|
|
234
|
+
severity="error",
|
|
235
|
+
category="security",
|
|
236
|
+
suggested_fix=f"Reduce input size to under {effective_max_size:,} bytes",
|
|
237
|
+
)
|
|
238
|
+
)
|
|
239
|
+
return None, error_result
|
|
240
|
+
|
|
241
|
+
# Try to parse JSON
|
|
242
|
+
try:
|
|
243
|
+
if isinstance(raw_input, bytes):
|
|
244
|
+
spec_data = json.loads(raw_input.decode("utf-8"))
|
|
245
|
+
else:
|
|
246
|
+
spec_data = json.loads(raw_input)
|
|
247
|
+
except json.JSONDecodeError as e:
|
|
248
|
+
error_result = ValidationResult(
|
|
249
|
+
spec_id="unknown",
|
|
250
|
+
is_valid=False,
|
|
251
|
+
error_count=1,
|
|
252
|
+
)
|
|
253
|
+
error_result.diagnostics.append(
|
|
254
|
+
Diagnostic(
|
|
255
|
+
code="INVALID_JSON",
|
|
256
|
+
message=f"Failed to parse JSON: {e}",
|
|
257
|
+
severity="error",
|
|
258
|
+
category="structure",
|
|
259
|
+
)
|
|
260
|
+
)
|
|
261
|
+
return None, error_result
|
|
262
|
+
|
|
263
|
+
# Spec data must be a dict
|
|
264
|
+
if not isinstance(spec_data, dict):
|
|
265
|
+
error_result = ValidationResult(
|
|
266
|
+
spec_id="unknown",
|
|
267
|
+
is_valid=False,
|
|
268
|
+
error_count=1,
|
|
269
|
+
)
|
|
270
|
+
error_result.diagnostics.append(
|
|
271
|
+
Diagnostic(
|
|
272
|
+
code="INVALID_SPEC_TYPE",
|
|
273
|
+
message=f"Spec must be a JSON object, got {type(spec_data).__name__}",
|
|
274
|
+
severity="error",
|
|
275
|
+
category="structure",
|
|
276
|
+
)
|
|
277
|
+
)
|
|
278
|
+
return None, error_result
|
|
279
|
+
|
|
280
|
+
return spec_data, None
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def validate_spec(spec_data: Dict[str, Any]) -> ValidationResult:
|
|
284
|
+
"""
|
|
285
|
+
Validate a spec file and return structured diagnostics.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
spec_data: Parsed JSON spec data
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
ValidationResult with all diagnostics
|
|
292
|
+
|
|
293
|
+
Note:
|
|
294
|
+
For raw JSON input, use validate_spec_input() first to perform
|
|
295
|
+
size validation before parsing.
|
|
296
|
+
"""
|
|
297
|
+
spec_id = spec_data.get("spec_id", "unknown")
|
|
298
|
+
result = ValidationResult(spec_id=spec_id, is_valid=True)
|
|
299
|
+
|
|
300
|
+
# Check overall structure size (defense in depth)
|
|
301
|
+
_validate_size_limits(spec_data, result)
|
|
302
|
+
|
|
303
|
+
# Run all validation checks
|
|
304
|
+
_validate_structure(spec_data, result)
|
|
305
|
+
|
|
306
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
307
|
+
if hierarchy:
|
|
308
|
+
_validate_hierarchy(hierarchy, result)
|
|
309
|
+
_validate_nodes(hierarchy, result)
|
|
310
|
+
_validate_task_counts(hierarchy, result)
|
|
311
|
+
_validate_dependencies(hierarchy, result)
|
|
312
|
+
_validate_metadata(spec_data, hierarchy, result)
|
|
313
|
+
|
|
314
|
+
# Count diagnostics by severity
|
|
315
|
+
for diag in result.diagnostics:
|
|
316
|
+
if diag.severity == "error":
|
|
317
|
+
result.error_count += 1
|
|
318
|
+
elif diag.severity == "warning":
|
|
319
|
+
result.warning_count += 1
|
|
320
|
+
else:
|
|
321
|
+
result.info_count += 1
|
|
322
|
+
|
|
323
|
+
result.is_valid = result.error_count == 0
|
|
324
|
+
return result
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def _iter_valid_nodes(
|
|
328
|
+
hierarchy: Dict[str, Any],
|
|
329
|
+
result: ValidationResult,
|
|
330
|
+
report_invalid: bool = True,
|
|
331
|
+
):
|
|
332
|
+
"""
|
|
333
|
+
Iterate over hierarchy yielding only valid (dict) nodes.
|
|
334
|
+
|
|
335
|
+
Args:
|
|
336
|
+
hierarchy: The hierarchy dict to iterate
|
|
337
|
+
result: ValidationResult to append errors to
|
|
338
|
+
report_invalid: Whether to report invalid nodes as errors (default True,
|
|
339
|
+
set False if already reported by another function)
|
|
340
|
+
|
|
341
|
+
Yields:
|
|
342
|
+
Tuples of (node_id, node) where node is a valid dict
|
|
343
|
+
"""
|
|
344
|
+
for node_id, node in hierarchy.items():
|
|
345
|
+
if not isinstance(node, dict):
|
|
346
|
+
if report_invalid:
|
|
347
|
+
result.diagnostics.append(
|
|
348
|
+
Diagnostic(
|
|
349
|
+
code="INVALID_NODE_STRUCTURE",
|
|
350
|
+
message=f"Node '{node_id}' is not a valid object (got {type(node).__name__})",
|
|
351
|
+
severity="error",
|
|
352
|
+
category="node",
|
|
353
|
+
location=str(node_id),
|
|
354
|
+
suggested_fix="Ensure all hierarchy values are valid node objects",
|
|
355
|
+
)
|
|
356
|
+
)
|
|
357
|
+
continue
|
|
358
|
+
yield node_id, node
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def _validate_size_limits(spec_data: Dict[str, Any], result: ValidationResult) -> None:
|
|
362
|
+
"""Validate size limits on spec data structures (defense in depth)."""
|
|
363
|
+
|
|
364
|
+
def count_items(obj: Any, depth: int = 0) -> tuple[int, int]:
|
|
365
|
+
"""Count total items and max depth in nested structure."""
|
|
366
|
+
if depth > MAX_NESTED_DEPTH:
|
|
367
|
+
return 0, depth
|
|
368
|
+
|
|
369
|
+
if isinstance(obj, dict):
|
|
370
|
+
total = len(obj)
|
|
371
|
+
max_d = depth
|
|
372
|
+
for v in obj.values():
|
|
373
|
+
sub_count, sub_depth = count_items(v, depth + 1)
|
|
374
|
+
total += sub_count
|
|
375
|
+
max_d = max(max_d, sub_depth)
|
|
376
|
+
return total, max_d
|
|
377
|
+
elif isinstance(obj, list):
|
|
378
|
+
total = len(obj)
|
|
379
|
+
max_d = depth
|
|
380
|
+
for item in obj:
|
|
381
|
+
sub_count, sub_depth = count_items(item, depth + 1)
|
|
382
|
+
total += sub_count
|
|
383
|
+
max_d = max(max_d, sub_depth)
|
|
384
|
+
return total, max_d
|
|
385
|
+
else:
|
|
386
|
+
return 1, depth
|
|
387
|
+
|
|
388
|
+
# Check hierarchy nesting depth
|
|
389
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
390
|
+
if hierarchy:
|
|
391
|
+
_, max_depth = count_items(hierarchy)
|
|
392
|
+
if max_depth > MAX_NESTED_DEPTH:
|
|
393
|
+
result.diagnostics.append(
|
|
394
|
+
Diagnostic(
|
|
395
|
+
code="EXCESSIVE_NESTING",
|
|
396
|
+
message=f"Hierarchy nesting depth ({max_depth}) exceeds maximum ({MAX_NESTED_DEPTH})",
|
|
397
|
+
severity="warning",
|
|
398
|
+
category="security",
|
|
399
|
+
suggested_fix="Flatten hierarchy structure to reduce nesting depth",
|
|
400
|
+
)
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
# Check array lengths in common locations
|
|
404
|
+
children = hierarchy.get("children", [])
|
|
405
|
+
if len(children) > MAX_ARRAY_LENGTH:
|
|
406
|
+
result.diagnostics.append(
|
|
407
|
+
Diagnostic(
|
|
408
|
+
code="EXCESSIVE_ARRAY_LENGTH",
|
|
409
|
+
message=f"Root children array ({len(children)} items) exceeds maximum ({MAX_ARRAY_LENGTH})",
|
|
410
|
+
severity="warning",
|
|
411
|
+
category="security",
|
|
412
|
+
location="hierarchy.children",
|
|
413
|
+
suggested_fix="Split large phase/task lists into smaller groups",
|
|
414
|
+
)
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
# Check journal array length
|
|
418
|
+
journal = spec_data.get("journal", [])
|
|
419
|
+
if len(journal) > MAX_ARRAY_LENGTH:
|
|
420
|
+
result.diagnostics.append(
|
|
421
|
+
Diagnostic(
|
|
422
|
+
code="EXCESSIVE_JOURNAL_LENGTH",
|
|
423
|
+
message=f"Journal array ({len(journal)} entries) exceeds maximum ({MAX_ARRAY_LENGTH})",
|
|
424
|
+
severity="warning",
|
|
425
|
+
category="security",
|
|
426
|
+
location="journal",
|
|
427
|
+
suggested_fix="Archive old journal entries or split into separate files",
|
|
428
|
+
)
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
def _validate_structure(spec_data: Dict[str, Any], result: ValidationResult) -> None:
|
|
433
|
+
"""Validate top-level structure and required fields."""
|
|
434
|
+
required_fields = ["spec_id", "generated", "last_updated", "hierarchy"]
|
|
435
|
+
|
|
436
|
+
for field_name in required_fields:
|
|
437
|
+
if field_name not in spec_data:
|
|
438
|
+
result.diagnostics.append(
|
|
439
|
+
Diagnostic(
|
|
440
|
+
code="MISSING_REQUIRED_FIELD",
|
|
441
|
+
message=f"Missing required field '{field_name}'",
|
|
442
|
+
severity="error",
|
|
443
|
+
category="structure",
|
|
444
|
+
suggested_fix=f"Add required field '{field_name}' to spec",
|
|
445
|
+
auto_fixable=False,
|
|
446
|
+
)
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
# Validate spec_id format
|
|
450
|
+
spec_id = spec_data.get("spec_id")
|
|
451
|
+
if spec_id and not _is_valid_spec_id(spec_id):
|
|
452
|
+
result.diagnostics.append(
|
|
453
|
+
Diagnostic(
|
|
454
|
+
code="INVALID_SPEC_ID_FORMAT",
|
|
455
|
+
message=f"spec_id '{spec_id}' doesn't follow format: {{feature}}-{{YYYY-MM-DD}}-{{nnn}}",
|
|
456
|
+
severity="warning",
|
|
457
|
+
category="structure",
|
|
458
|
+
location="spec_id",
|
|
459
|
+
)
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# Validate date fields
|
|
463
|
+
for field_name in ["generated", "last_updated"]:
|
|
464
|
+
value = spec_data.get(field_name)
|
|
465
|
+
if value and not _is_valid_iso8601(value):
|
|
466
|
+
result.diagnostics.append(
|
|
467
|
+
Diagnostic(
|
|
468
|
+
code="INVALID_DATE_FORMAT",
|
|
469
|
+
message=f"'{field_name}' should be in ISO 8601 format",
|
|
470
|
+
severity="warning",
|
|
471
|
+
category="structure",
|
|
472
|
+
location=field_name,
|
|
473
|
+
suggested_fix="Normalize timestamp to ISO 8601 format",
|
|
474
|
+
auto_fixable=True,
|
|
475
|
+
)
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
if _requires_rich_task_fields(spec_data):
|
|
479
|
+
metadata = spec_data.get("metadata", {})
|
|
480
|
+
mission = metadata.get("mission") if isinstance(metadata, dict) else None
|
|
481
|
+
if not isinstance(mission, str) or not mission.strip():
|
|
482
|
+
result.diagnostics.append(
|
|
483
|
+
Diagnostic(
|
|
484
|
+
code="MISSING_MISSION",
|
|
485
|
+
message="Spec metadata.mission is required when complexity is medium/complex/high",
|
|
486
|
+
severity="error",
|
|
487
|
+
category="metadata",
|
|
488
|
+
location="metadata.mission",
|
|
489
|
+
suggested_fix="Set metadata.mission to a concise goal statement",
|
|
490
|
+
auto_fixable=False,
|
|
491
|
+
)
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
# Check hierarchy is dict
|
|
495
|
+
hierarchy = spec_data.get("hierarchy")
|
|
496
|
+
if hierarchy is not None and not isinstance(hierarchy, dict):
|
|
497
|
+
result.diagnostics.append(
|
|
498
|
+
Diagnostic(
|
|
499
|
+
code="INVALID_HIERARCHY_TYPE",
|
|
500
|
+
message="'hierarchy' must be a dictionary",
|
|
501
|
+
severity="error",
|
|
502
|
+
category="structure",
|
|
503
|
+
)
|
|
504
|
+
)
|
|
505
|
+
elif hierarchy is not None and len(hierarchy) == 0:
|
|
506
|
+
result.diagnostics.append(
|
|
507
|
+
Diagnostic(
|
|
508
|
+
code="EMPTY_HIERARCHY",
|
|
509
|
+
message="'hierarchy' is empty",
|
|
510
|
+
severity="error",
|
|
511
|
+
category="structure",
|
|
512
|
+
)
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
|
|
516
|
+
def _validate_hierarchy(hierarchy: Dict[str, Any], result: ValidationResult) -> None:
|
|
517
|
+
"""Validate hierarchy integrity: parent/child references, no orphans, no cycles."""
|
|
518
|
+
# Check spec-root exists
|
|
519
|
+
if "spec-root" not in hierarchy:
|
|
520
|
+
result.diagnostics.append(
|
|
521
|
+
Diagnostic(
|
|
522
|
+
code="MISSING_SPEC_ROOT",
|
|
523
|
+
message="Missing 'spec-root' node in hierarchy",
|
|
524
|
+
severity="error",
|
|
525
|
+
category="hierarchy",
|
|
526
|
+
)
|
|
527
|
+
)
|
|
528
|
+
return
|
|
529
|
+
|
|
530
|
+
root = hierarchy["spec-root"]
|
|
531
|
+
if root.get("parent") is not None:
|
|
532
|
+
result.diagnostics.append(
|
|
533
|
+
Diagnostic(
|
|
534
|
+
code="INVALID_ROOT_PARENT",
|
|
535
|
+
message="'spec-root' must have parent: null",
|
|
536
|
+
severity="error",
|
|
537
|
+
category="hierarchy",
|
|
538
|
+
location="spec-root",
|
|
539
|
+
suggested_fix="Set spec-root parent to null",
|
|
540
|
+
auto_fixable=True,
|
|
541
|
+
)
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
# Validate parent references
|
|
545
|
+
for node_id, node in _iter_valid_nodes(hierarchy, result):
|
|
546
|
+
parent_id = node.get("parent")
|
|
547
|
+
|
|
548
|
+
if node_id != "spec-root" and parent_id is None:
|
|
549
|
+
result.diagnostics.append(
|
|
550
|
+
Diagnostic(
|
|
551
|
+
code="NULL_PARENT",
|
|
552
|
+
message=f"Node '{node_id}' has null parent (only spec-root should)",
|
|
553
|
+
severity="error",
|
|
554
|
+
category="hierarchy",
|
|
555
|
+
location=node_id,
|
|
556
|
+
)
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
if parent_id and parent_id not in hierarchy:
|
|
560
|
+
result.diagnostics.append(
|
|
561
|
+
Diagnostic(
|
|
562
|
+
code="MISSING_PARENT",
|
|
563
|
+
message=f"Node '{node_id}' references non-existent parent '{parent_id}'",
|
|
564
|
+
severity="error",
|
|
565
|
+
category="hierarchy",
|
|
566
|
+
location=node_id,
|
|
567
|
+
)
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
# Validate child references
|
|
571
|
+
for node_id, node in _iter_valid_nodes(hierarchy, result, report_invalid=False):
|
|
572
|
+
children = node.get("children", [])
|
|
573
|
+
|
|
574
|
+
if not isinstance(children, list):
|
|
575
|
+
result.diagnostics.append(
|
|
576
|
+
Diagnostic(
|
|
577
|
+
code="INVALID_CHILDREN_TYPE",
|
|
578
|
+
message=f"Node '{node_id}' children field must be a list",
|
|
579
|
+
severity="error",
|
|
580
|
+
category="hierarchy",
|
|
581
|
+
location=node_id,
|
|
582
|
+
)
|
|
583
|
+
)
|
|
584
|
+
continue
|
|
585
|
+
|
|
586
|
+
for child_id in children:
|
|
587
|
+
if child_id not in hierarchy:
|
|
588
|
+
result.diagnostics.append(
|
|
589
|
+
Diagnostic(
|
|
590
|
+
code="MISSING_CHILD",
|
|
591
|
+
message=f"Node '{node_id}' references non-existent child '{child_id}'",
|
|
592
|
+
severity="error",
|
|
593
|
+
category="hierarchy",
|
|
594
|
+
location=node_id,
|
|
595
|
+
)
|
|
596
|
+
)
|
|
597
|
+
else:
|
|
598
|
+
child_node = hierarchy[child_id]
|
|
599
|
+
if child_node.get("parent") != node_id:
|
|
600
|
+
result.diagnostics.append(
|
|
601
|
+
Diagnostic(
|
|
602
|
+
code="PARENT_CHILD_MISMATCH",
|
|
603
|
+
message=f"'{node_id}' lists '{child_id}' as child, but '{child_id}' has parent='{child_node.get('parent')}'",
|
|
604
|
+
severity="error",
|
|
605
|
+
category="hierarchy",
|
|
606
|
+
location=node_id,
|
|
607
|
+
suggested_fix="Align parent references with children list",
|
|
608
|
+
auto_fixable=True,
|
|
609
|
+
)
|
|
610
|
+
)
|
|
611
|
+
|
|
612
|
+
# Check for orphaned nodes
|
|
613
|
+
reachable = set()
|
|
614
|
+
|
|
615
|
+
def traverse(node_id: str) -> None:
|
|
616
|
+
if node_id in reachable:
|
|
617
|
+
return
|
|
618
|
+
reachable.add(node_id)
|
|
619
|
+
node = hierarchy.get(node_id, {})
|
|
620
|
+
for child_id in node.get("children", []):
|
|
621
|
+
if child_id in hierarchy:
|
|
622
|
+
traverse(child_id)
|
|
623
|
+
|
|
624
|
+
traverse("spec-root")
|
|
625
|
+
|
|
626
|
+
orphaned = set(hierarchy.keys()) - reachable
|
|
627
|
+
if orphaned:
|
|
628
|
+
orphan_list = ", ".join(sorted(orphaned))
|
|
629
|
+
result.diagnostics.append(
|
|
630
|
+
Diagnostic(
|
|
631
|
+
code="ORPHANED_NODES",
|
|
632
|
+
message=f"Found {len(orphaned)} orphaned node(s) not reachable from spec-root: {orphan_list}",
|
|
633
|
+
severity="error",
|
|
634
|
+
category="hierarchy",
|
|
635
|
+
suggested_fix="Attach orphaned nodes to spec-root or remove them",
|
|
636
|
+
auto_fixable=True,
|
|
637
|
+
)
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
# Check for cycles
|
|
641
|
+
visited = set()
|
|
642
|
+
rec_stack = set()
|
|
643
|
+
|
|
644
|
+
def has_cycle(node_id: str) -> bool:
|
|
645
|
+
visited.add(node_id)
|
|
646
|
+
rec_stack.add(node_id)
|
|
647
|
+
|
|
648
|
+
node = hierarchy.get(node_id, {})
|
|
649
|
+
for child_id in node.get("children", []):
|
|
650
|
+
if child_id not in visited:
|
|
651
|
+
if has_cycle(child_id):
|
|
652
|
+
return True
|
|
653
|
+
elif child_id in rec_stack:
|
|
654
|
+
return True
|
|
655
|
+
|
|
656
|
+
rec_stack.remove(node_id)
|
|
657
|
+
return False
|
|
658
|
+
|
|
659
|
+
if has_cycle("spec-root"):
|
|
660
|
+
result.diagnostics.append(
|
|
661
|
+
Diagnostic(
|
|
662
|
+
code="CYCLE_DETECTED",
|
|
663
|
+
message="Cycle detected in hierarchy tree",
|
|
664
|
+
severity="error",
|
|
665
|
+
category="hierarchy",
|
|
666
|
+
)
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
|
|
670
|
+
def _validate_nodes(hierarchy: Dict[str, Any], result: ValidationResult) -> None:
|
|
671
|
+
"""Validate node structure and required fields."""
|
|
672
|
+
required_fields = [
|
|
673
|
+
"type",
|
|
674
|
+
"title",
|
|
675
|
+
"status",
|
|
676
|
+
"parent",
|
|
677
|
+
"children",
|
|
678
|
+
"total_tasks",
|
|
679
|
+
"completed_tasks",
|
|
680
|
+
"metadata",
|
|
681
|
+
]
|
|
682
|
+
|
|
683
|
+
for node_id, node in _iter_valid_nodes(hierarchy, result, report_invalid=False):
|
|
684
|
+
# Check required fields
|
|
685
|
+
for field_name in required_fields:
|
|
686
|
+
if field_name not in node:
|
|
687
|
+
result.diagnostics.append(
|
|
688
|
+
Diagnostic(
|
|
689
|
+
code="MISSING_NODE_FIELD",
|
|
690
|
+
message=f"Node '{node_id}' missing required field '{field_name}'",
|
|
691
|
+
severity="error",
|
|
692
|
+
category="node",
|
|
693
|
+
location=node_id,
|
|
694
|
+
suggested_fix="Add missing required fields with sensible defaults",
|
|
695
|
+
auto_fixable=True,
|
|
696
|
+
)
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
# Validate type
|
|
700
|
+
node_type = node.get("type")
|
|
701
|
+
if node_type and node_type not in VALID_NODE_TYPES:
|
|
702
|
+
hint = _suggest_value(node_type, VALID_NODE_TYPES)
|
|
703
|
+
msg = f"Node '{node_id}' has invalid type '{node_type}'"
|
|
704
|
+
if hint:
|
|
705
|
+
msg += f"; {hint}"
|
|
706
|
+
result.diagnostics.append(
|
|
707
|
+
Diagnostic(
|
|
708
|
+
code="INVALID_NODE_TYPE",
|
|
709
|
+
message=msg,
|
|
710
|
+
severity="error",
|
|
711
|
+
category="node",
|
|
712
|
+
location=node_id,
|
|
713
|
+
suggested_fix=f"Valid types: {', '.join(sorted(VALID_NODE_TYPES))}",
|
|
714
|
+
auto_fixable=True,
|
|
715
|
+
)
|
|
716
|
+
)
|
|
717
|
+
|
|
718
|
+
# Validate status
|
|
719
|
+
status = node.get("status")
|
|
720
|
+
if status and status not in VALID_STATUSES:
|
|
721
|
+
hint = _suggest_value(status, VALID_STATUSES)
|
|
722
|
+
msg = f"Node '{node_id}' has invalid status '{status}'"
|
|
723
|
+
if hint:
|
|
724
|
+
msg += f"; {hint}"
|
|
725
|
+
result.diagnostics.append(
|
|
726
|
+
Diagnostic(
|
|
727
|
+
code="INVALID_STATUS",
|
|
728
|
+
message=msg,
|
|
729
|
+
severity="error",
|
|
730
|
+
category="node",
|
|
731
|
+
location=node_id,
|
|
732
|
+
suggested_fix=f"Valid statuses: {', '.join(sorted(VALID_STATUSES))}",
|
|
733
|
+
auto_fixable=True,
|
|
734
|
+
)
|
|
735
|
+
)
|
|
736
|
+
|
|
737
|
+
# Check title is not empty
|
|
738
|
+
title = node.get("title")
|
|
739
|
+
if title is not None and not str(title).strip():
|
|
740
|
+
result.diagnostics.append(
|
|
741
|
+
Diagnostic(
|
|
742
|
+
code="EMPTY_TITLE",
|
|
743
|
+
message=f"Node '{node_id}' has empty title",
|
|
744
|
+
severity="warning",
|
|
745
|
+
category="node",
|
|
746
|
+
location=node_id,
|
|
747
|
+
suggested_fix="Generate title from node ID",
|
|
748
|
+
auto_fixable=True,
|
|
749
|
+
)
|
|
750
|
+
)
|
|
751
|
+
|
|
752
|
+
# Validate dependencies structure
|
|
753
|
+
if "dependencies" in node:
|
|
754
|
+
deps = node["dependencies"]
|
|
755
|
+
if not isinstance(deps, dict):
|
|
756
|
+
result.diagnostics.append(
|
|
757
|
+
Diagnostic(
|
|
758
|
+
code="INVALID_DEPENDENCIES_TYPE",
|
|
759
|
+
message=f"Node '{node_id}' dependencies must be a dictionary",
|
|
760
|
+
severity="error",
|
|
761
|
+
category="dependency",
|
|
762
|
+
location=node_id,
|
|
763
|
+
suggested_fix="Create dependencies dict with blocks/blocked_by/depends arrays",
|
|
764
|
+
auto_fixable=True,
|
|
765
|
+
)
|
|
766
|
+
)
|
|
767
|
+
else:
|
|
768
|
+
for dep_key in ["blocks", "blocked_by", "depends"]:
|
|
769
|
+
if dep_key in deps and not isinstance(deps[dep_key], list):
|
|
770
|
+
result.diagnostics.append(
|
|
771
|
+
Diagnostic(
|
|
772
|
+
code="INVALID_DEPENDENCY_FIELD",
|
|
773
|
+
message=f"Node '{node_id}' dependencies.{dep_key} must be a list",
|
|
774
|
+
severity="error",
|
|
775
|
+
category="dependency",
|
|
776
|
+
location=node_id,
|
|
777
|
+
)
|
|
778
|
+
)
|
|
779
|
+
|
|
780
|
+
|
|
781
|
+
def _validate_task_counts(hierarchy: Dict[str, Any], result: ValidationResult) -> None:
|
|
782
|
+
"""Validate task count accuracy and propagation."""
|
|
783
|
+
for node_id, node in _iter_valid_nodes(hierarchy, result, report_invalid=False):
|
|
784
|
+
total_tasks = node.get("total_tasks", 0)
|
|
785
|
+
completed_tasks = node.get("completed_tasks", 0)
|
|
786
|
+
children = node.get("children", [])
|
|
787
|
+
|
|
788
|
+
# Completed can't exceed total
|
|
789
|
+
if completed_tasks > total_tasks:
|
|
790
|
+
result.diagnostics.append(
|
|
791
|
+
Diagnostic(
|
|
792
|
+
code="COMPLETED_EXCEEDS_TOTAL",
|
|
793
|
+
message=f"Node '{node_id}' has completed_tasks ({completed_tasks}) > total_tasks ({total_tasks})",
|
|
794
|
+
severity="error",
|
|
795
|
+
category="counts",
|
|
796
|
+
location=node_id,
|
|
797
|
+
suggested_fix="Recalculate total/completed task rollups for parent nodes",
|
|
798
|
+
auto_fixable=True,
|
|
799
|
+
)
|
|
800
|
+
)
|
|
801
|
+
|
|
802
|
+
# If node has children, verify counts match sum
|
|
803
|
+
if children:
|
|
804
|
+
child_total = 0
|
|
805
|
+
child_completed = 0
|
|
806
|
+
|
|
807
|
+
for child_id in children:
|
|
808
|
+
if child_id in hierarchy:
|
|
809
|
+
child_node = hierarchy[child_id]
|
|
810
|
+
child_total += child_node.get("total_tasks", 0)
|
|
811
|
+
child_completed += child_node.get("completed_tasks", 0)
|
|
812
|
+
|
|
813
|
+
if total_tasks != child_total:
|
|
814
|
+
result.diagnostics.append(
|
|
815
|
+
Diagnostic(
|
|
816
|
+
code="TOTAL_TASKS_MISMATCH",
|
|
817
|
+
message=f"Node '{node_id}' total_tasks ({total_tasks}) doesn't match sum of children ({child_total})",
|
|
818
|
+
severity="error",
|
|
819
|
+
category="counts",
|
|
820
|
+
location=node_id,
|
|
821
|
+
suggested_fix="Recalculate total/completed task rollups",
|
|
822
|
+
auto_fixable=True,
|
|
823
|
+
)
|
|
824
|
+
)
|
|
825
|
+
|
|
826
|
+
if completed_tasks != child_completed:
|
|
827
|
+
result.diagnostics.append(
|
|
828
|
+
Diagnostic(
|
|
829
|
+
code="COMPLETED_TASKS_MISMATCH",
|
|
830
|
+
message=f"Node '{node_id}' completed_tasks ({completed_tasks}) doesn't match sum of children ({child_completed})",
|
|
831
|
+
severity="error",
|
|
832
|
+
category="counts",
|
|
833
|
+
location=node_id,
|
|
834
|
+
suggested_fix="Recalculate total/completed task rollups",
|
|
835
|
+
auto_fixable=True,
|
|
836
|
+
)
|
|
837
|
+
)
|
|
838
|
+
else:
|
|
839
|
+
# Leaf nodes should have total_tasks = 1
|
|
840
|
+
node_type = node.get("type")
|
|
841
|
+
if node_type in ["task", "subtask", "verify"]:
|
|
842
|
+
if total_tasks != 1:
|
|
843
|
+
result.diagnostics.append(
|
|
844
|
+
Diagnostic(
|
|
845
|
+
code="INVALID_LEAF_COUNT",
|
|
846
|
+
message=f"Leaf node '{node_id}' (type={node_type}) should have total_tasks=1, has {total_tasks}",
|
|
847
|
+
severity="warning",
|
|
848
|
+
category="counts",
|
|
849
|
+
location=node_id,
|
|
850
|
+
suggested_fix="Set leaf node total_tasks to 1",
|
|
851
|
+
auto_fixable=True,
|
|
852
|
+
)
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
|
|
856
|
+
def _validate_dependencies(hierarchy: Dict[str, Any], result: ValidationResult) -> None:
|
|
857
|
+
"""Validate dependency graph and bidirectional consistency."""
|
|
858
|
+
for node_id, node in _iter_valid_nodes(hierarchy, result, report_invalid=False):
|
|
859
|
+
if "dependencies" not in node:
|
|
860
|
+
continue
|
|
861
|
+
|
|
862
|
+
deps = node["dependencies"]
|
|
863
|
+
if not isinstance(deps, dict):
|
|
864
|
+
continue
|
|
865
|
+
|
|
866
|
+
# Check dependency references exist
|
|
867
|
+
for dep_type in ["blocks", "blocked_by", "depends"]:
|
|
868
|
+
if dep_type not in deps:
|
|
869
|
+
continue
|
|
870
|
+
|
|
871
|
+
for dep_id in deps[dep_type]:
|
|
872
|
+
if dep_id not in hierarchy:
|
|
873
|
+
result.diagnostics.append(
|
|
874
|
+
Diagnostic(
|
|
875
|
+
code="MISSING_DEPENDENCY_TARGET",
|
|
876
|
+
message=f"Node '{node_id}' {dep_type} references non-existent node '{dep_id}'",
|
|
877
|
+
severity="error",
|
|
878
|
+
category="dependency",
|
|
879
|
+
location=node_id,
|
|
880
|
+
)
|
|
881
|
+
)
|
|
882
|
+
|
|
883
|
+
# Check bidirectional consistency for blocks/blocked_by
|
|
884
|
+
for blocked_id in deps.get("blocks", []):
|
|
885
|
+
if blocked_id in hierarchy:
|
|
886
|
+
blocked_node = hierarchy[blocked_id]
|
|
887
|
+
blocked_deps = blocked_node.get("dependencies", {})
|
|
888
|
+
if isinstance(blocked_deps, dict):
|
|
889
|
+
if node_id not in blocked_deps.get("blocked_by", []):
|
|
890
|
+
result.diagnostics.append(
|
|
891
|
+
Diagnostic(
|
|
892
|
+
code="BIDIRECTIONAL_INCONSISTENCY",
|
|
893
|
+
message=f"'{node_id}' blocks '{blocked_id}', but '{blocked_id}' doesn't list '{node_id}' in blocked_by",
|
|
894
|
+
severity="error",
|
|
895
|
+
category="dependency",
|
|
896
|
+
location=node_id,
|
|
897
|
+
suggested_fix="Synchronize bidirectional dependency relationships",
|
|
898
|
+
auto_fixable=True,
|
|
899
|
+
)
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
for blocker_id in deps.get("blocked_by", []):
|
|
903
|
+
if blocker_id in hierarchy:
|
|
904
|
+
blocker_node = hierarchy[blocker_id]
|
|
905
|
+
blocker_deps = blocker_node.get("dependencies", {})
|
|
906
|
+
if isinstance(blocker_deps, dict):
|
|
907
|
+
if node_id not in blocker_deps.get("blocks", []):
|
|
908
|
+
result.diagnostics.append(
|
|
909
|
+
Diagnostic(
|
|
910
|
+
code="BIDIRECTIONAL_INCONSISTENCY",
|
|
911
|
+
message=f"'{node_id}' blocked_by '{blocker_id}', but '{blocker_id}' doesn't list '{node_id}' in blocks",
|
|
912
|
+
severity="error",
|
|
913
|
+
category="dependency",
|
|
914
|
+
location=node_id,
|
|
915
|
+
suggested_fix="Synchronize bidirectional dependency relationships",
|
|
916
|
+
auto_fixable=True,
|
|
917
|
+
)
|
|
918
|
+
)
|
|
919
|
+
|
|
920
|
+
|
|
921
|
+
def _validate_metadata(
|
|
922
|
+
spec_data: Dict[str, Any],
|
|
923
|
+
hierarchy: Dict[str, Any],
|
|
924
|
+
result: ValidationResult,
|
|
925
|
+
) -> None:
|
|
926
|
+
"""Validate type-specific metadata requirements."""
|
|
927
|
+
requires_rich_tasks = _requires_rich_task_fields(spec_data)
|
|
928
|
+
|
|
929
|
+
def _nonempty_string(value: Any) -> bool:
|
|
930
|
+
return isinstance(value, str) and bool(value.strip())
|
|
931
|
+
|
|
932
|
+
def _has_description(metadata: Dict[str, Any]) -> bool:
|
|
933
|
+
if _nonempty_string(metadata.get("description")):
|
|
934
|
+
return True
|
|
935
|
+
details = metadata.get("details")
|
|
936
|
+
if _nonempty_string(details):
|
|
937
|
+
return True
|
|
938
|
+
if isinstance(details, list):
|
|
939
|
+
return any(_nonempty_string(item) for item in details)
|
|
940
|
+
return False
|
|
941
|
+
|
|
942
|
+
for node_id, node in _iter_valid_nodes(hierarchy, result, report_invalid=False):
|
|
943
|
+
node_type = node.get("type")
|
|
944
|
+
metadata = node.get("metadata", {})
|
|
945
|
+
|
|
946
|
+
if not isinstance(metadata, dict):
|
|
947
|
+
result.diagnostics.append(
|
|
948
|
+
Diagnostic(
|
|
949
|
+
code="INVALID_METADATA_TYPE",
|
|
950
|
+
message=f"Node '{node_id}' metadata must be a dictionary",
|
|
951
|
+
severity="error",
|
|
952
|
+
category="metadata",
|
|
953
|
+
location=node_id,
|
|
954
|
+
)
|
|
955
|
+
)
|
|
956
|
+
continue
|
|
957
|
+
|
|
958
|
+
# Verify nodes
|
|
959
|
+
if node_type == "verify":
|
|
960
|
+
verification_type = metadata.get("verification_type")
|
|
961
|
+
|
|
962
|
+
if not verification_type:
|
|
963
|
+
result.diagnostics.append(
|
|
964
|
+
Diagnostic(
|
|
965
|
+
code="MISSING_VERIFICATION_TYPE",
|
|
966
|
+
message=f"Verify node '{node_id}' missing metadata.verification_type",
|
|
967
|
+
severity="error",
|
|
968
|
+
category="metadata",
|
|
969
|
+
location=node_id,
|
|
970
|
+
suggested_fix="Set verification_type to 'run-tests', 'fidelity', or 'manual'",
|
|
971
|
+
auto_fixable=True,
|
|
972
|
+
)
|
|
973
|
+
)
|
|
974
|
+
elif verification_type not in VALID_VERIFICATION_TYPES:
|
|
975
|
+
hint = _suggest_value(verification_type, VALID_VERIFICATION_TYPES)
|
|
976
|
+
msg = f"Verify node '{node_id}' has invalid verification_type '{verification_type}'"
|
|
977
|
+
if hint:
|
|
978
|
+
msg += f"; {hint}"
|
|
979
|
+
result.diagnostics.append(
|
|
980
|
+
Diagnostic(
|
|
981
|
+
code="INVALID_VERIFICATION_TYPE",
|
|
982
|
+
message=msg,
|
|
983
|
+
severity="error",
|
|
984
|
+
category="metadata",
|
|
985
|
+
location=node_id,
|
|
986
|
+
suggested_fix=f"Valid types: {', '.join(sorted(VALID_VERIFICATION_TYPES))}",
|
|
987
|
+
auto_fixable=True,
|
|
988
|
+
)
|
|
989
|
+
)
|
|
990
|
+
|
|
991
|
+
# Task nodes
|
|
992
|
+
if node_type == "task":
|
|
993
|
+
raw_task_category = metadata.get("task_category")
|
|
994
|
+
task_category = None
|
|
995
|
+
if isinstance(raw_task_category, str) and raw_task_category.strip():
|
|
996
|
+
task_category = raw_task_category.strip().lower()
|
|
997
|
+
|
|
998
|
+
# Check for common field name typo: 'category' instead of 'task_category'
|
|
999
|
+
if task_category is None and "category" in metadata and "task_category" not in metadata:
|
|
1000
|
+
result.diagnostics.append(
|
|
1001
|
+
Diagnostic(
|
|
1002
|
+
code="UNKNOWN_FIELD",
|
|
1003
|
+
message=f"Task node '{node_id}' has unknown field 'category'; did you mean 'task_category'?",
|
|
1004
|
+
severity="warning",
|
|
1005
|
+
category="metadata",
|
|
1006
|
+
location=node_id,
|
|
1007
|
+
suggested_fix="Rename 'category' to 'task_category'",
|
|
1008
|
+
auto_fixable=False,
|
|
1009
|
+
)
|
|
1010
|
+
)
|
|
1011
|
+
|
|
1012
|
+
if task_category is not None and task_category not in VALID_TASK_CATEGORIES:
|
|
1013
|
+
hint = _suggest_value(task_category, VALID_TASK_CATEGORIES)
|
|
1014
|
+
msg = f"Task node '{node_id}' has invalid task_category '{task_category}'"
|
|
1015
|
+
if hint:
|
|
1016
|
+
msg += f"; {hint}"
|
|
1017
|
+
result.diagnostics.append(
|
|
1018
|
+
Diagnostic(
|
|
1019
|
+
code="INVALID_TASK_CATEGORY",
|
|
1020
|
+
message=msg,
|
|
1021
|
+
severity="error",
|
|
1022
|
+
category="metadata",
|
|
1023
|
+
location=node_id,
|
|
1024
|
+
suggested_fix=f"Valid categories: {', '.join(sorted(VALID_TASK_CATEGORIES))}",
|
|
1025
|
+
auto_fixable=False, # Disabled: manual fix required
|
|
1026
|
+
)
|
|
1027
|
+
)
|
|
1028
|
+
|
|
1029
|
+
if requires_rich_tasks and task_category is None:
|
|
1030
|
+
result.diagnostics.append(
|
|
1031
|
+
Diagnostic(
|
|
1032
|
+
code="MISSING_TASK_CATEGORY",
|
|
1033
|
+
message=f"Task node '{node_id}' missing metadata.task_category",
|
|
1034
|
+
severity="error",
|
|
1035
|
+
category="metadata",
|
|
1036
|
+
location=node_id,
|
|
1037
|
+
suggested_fix="Set metadata.task_category to a valid category",
|
|
1038
|
+
auto_fixable=False,
|
|
1039
|
+
)
|
|
1040
|
+
)
|
|
1041
|
+
|
|
1042
|
+
if requires_rich_tasks and not _has_description(metadata):
|
|
1043
|
+
result.diagnostics.append(
|
|
1044
|
+
Diagnostic(
|
|
1045
|
+
code="MISSING_TASK_DESCRIPTION",
|
|
1046
|
+
message=f"Task node '{node_id}' missing metadata.description",
|
|
1047
|
+
severity="error",
|
|
1048
|
+
category="metadata",
|
|
1049
|
+
location=node_id,
|
|
1050
|
+
suggested_fix="Provide metadata.description (or details) for the task",
|
|
1051
|
+
auto_fixable=False,
|
|
1052
|
+
)
|
|
1053
|
+
)
|
|
1054
|
+
|
|
1055
|
+
if requires_rich_tasks:
|
|
1056
|
+
acceptance_criteria = metadata.get("acceptance_criteria")
|
|
1057
|
+
if acceptance_criteria is None:
|
|
1058
|
+
result.diagnostics.append(
|
|
1059
|
+
Diagnostic(
|
|
1060
|
+
code="MISSING_ACCEPTANCE_CRITERIA",
|
|
1061
|
+
message=f"Task node '{node_id}' missing metadata.acceptance_criteria",
|
|
1062
|
+
severity="error",
|
|
1063
|
+
category="metadata",
|
|
1064
|
+
location=node_id,
|
|
1065
|
+
suggested_fix="Provide a non-empty acceptance_criteria list",
|
|
1066
|
+
auto_fixable=False,
|
|
1067
|
+
)
|
|
1068
|
+
)
|
|
1069
|
+
elif not isinstance(acceptance_criteria, list):
|
|
1070
|
+
result.diagnostics.append(
|
|
1071
|
+
Diagnostic(
|
|
1072
|
+
code="INVALID_ACCEPTANCE_CRITERIA",
|
|
1073
|
+
message=(
|
|
1074
|
+
f"Task node '{node_id}' metadata.acceptance_criteria must be a list of strings"
|
|
1075
|
+
),
|
|
1076
|
+
severity="error",
|
|
1077
|
+
category="metadata",
|
|
1078
|
+
location=node_id,
|
|
1079
|
+
suggested_fix="Provide acceptance_criteria as an array of strings",
|
|
1080
|
+
auto_fixable=False,
|
|
1081
|
+
)
|
|
1082
|
+
)
|
|
1083
|
+
elif not acceptance_criteria:
|
|
1084
|
+
result.diagnostics.append(
|
|
1085
|
+
Diagnostic(
|
|
1086
|
+
code="MISSING_ACCEPTANCE_CRITERIA",
|
|
1087
|
+
message=f"Task node '{node_id}' must include at least one acceptance criterion",
|
|
1088
|
+
severity="error",
|
|
1089
|
+
category="metadata",
|
|
1090
|
+
location=node_id,
|
|
1091
|
+
suggested_fix="Add at least one acceptance criterion",
|
|
1092
|
+
auto_fixable=False,
|
|
1093
|
+
)
|
|
1094
|
+
)
|
|
1095
|
+
else:
|
|
1096
|
+
invalid_items = [
|
|
1097
|
+
idx
|
|
1098
|
+
for idx, item in enumerate(acceptance_criteria)
|
|
1099
|
+
if not _nonempty_string(item)
|
|
1100
|
+
]
|
|
1101
|
+
if invalid_items:
|
|
1102
|
+
result.diagnostics.append(
|
|
1103
|
+
Diagnostic(
|
|
1104
|
+
code="INVALID_ACCEPTANCE_CRITERIA",
|
|
1105
|
+
message=(
|
|
1106
|
+
f"Task node '{node_id}' has invalid acceptance_criteria entries"
|
|
1107
|
+
),
|
|
1108
|
+
severity="error",
|
|
1109
|
+
category="metadata",
|
|
1110
|
+
location=node_id,
|
|
1111
|
+
suggested_fix="Ensure acceptance_criteria contains non-empty strings",
|
|
1112
|
+
auto_fixable=False,
|
|
1113
|
+
)
|
|
1114
|
+
)
|
|
1115
|
+
|
|
1116
|
+
category_for_file_path = task_category
|
|
1117
|
+
if category_for_file_path is None:
|
|
1118
|
+
legacy_category = metadata.get("category")
|
|
1119
|
+
if isinstance(legacy_category, str) and legacy_category.strip():
|
|
1120
|
+
category_for_file_path = legacy_category.strip().lower()
|
|
1121
|
+
|
|
1122
|
+
# file_path required for implementation and refactoring.
|
|
1123
|
+
# Do not auto-generate placeholder paths; the authoring agent/user must
|
|
1124
|
+
# provide a real path in the target codebase.
|
|
1125
|
+
if category_for_file_path in ["implementation", "refactoring"]:
|
|
1126
|
+
file_path = metadata.get("file_path")
|
|
1127
|
+
if not _nonempty_string(file_path):
|
|
1128
|
+
result.diagnostics.append(
|
|
1129
|
+
Diagnostic(
|
|
1130
|
+
code="MISSING_FILE_PATH",
|
|
1131
|
+
message=f"Task node '{node_id}' with category '{category_for_file_path}' missing metadata.file_path",
|
|
1132
|
+
severity="error",
|
|
1133
|
+
category="metadata",
|
|
1134
|
+
location=node_id,
|
|
1135
|
+
suggested_fix=(
|
|
1136
|
+
"Set metadata.file_path to the real repo-relative path of the primary file impacted"
|
|
1137
|
+
),
|
|
1138
|
+
auto_fixable=False,
|
|
1139
|
+
)
|
|
1140
|
+
)
|
|
1141
|
+
|
|
1142
|
+
|
|
1143
|
+
# Fix action functions
|
|
1144
|
+
|
|
1145
|
+
|
|
1146
|
+
def get_fix_actions(
|
|
1147
|
+
result: ValidationResult, spec_data: Dict[str, Any]
|
|
1148
|
+
) -> List[FixAction]:
|
|
1149
|
+
"""
|
|
1150
|
+
Generate fix actions from validation diagnostics.
|
|
1151
|
+
|
|
1152
|
+
Args:
|
|
1153
|
+
result: ValidationResult with diagnostics
|
|
1154
|
+
spec_data: Original spec data
|
|
1155
|
+
|
|
1156
|
+
Returns:
|
|
1157
|
+
List of FixAction objects that can be applied
|
|
1158
|
+
"""
|
|
1159
|
+
actions: List[FixAction] = []
|
|
1160
|
+
seen_ids = set()
|
|
1161
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
1162
|
+
|
|
1163
|
+
for diag in result.diagnostics:
|
|
1164
|
+
if not diag.auto_fixable:
|
|
1165
|
+
continue
|
|
1166
|
+
|
|
1167
|
+
action = _build_fix_action(diag, spec_data, hierarchy)
|
|
1168
|
+
if action and action.id not in seen_ids:
|
|
1169
|
+
actions.append(action)
|
|
1170
|
+
seen_ids.add(action.id)
|
|
1171
|
+
|
|
1172
|
+
return actions
|
|
1173
|
+
|
|
1174
|
+
|
|
1175
|
+
def _build_fix_action(
|
|
1176
|
+
diag: Diagnostic, spec_data: Dict[str, Any], hierarchy: Dict[str, Any]
|
|
1177
|
+
) -> Optional[FixAction]:
|
|
1178
|
+
"""Build a fix action for a diagnostic."""
|
|
1179
|
+
code = diag.code
|
|
1180
|
+
|
|
1181
|
+
if code == "INVALID_DATE_FORMAT":
|
|
1182
|
+
return _build_date_fix(diag, spec_data)
|
|
1183
|
+
|
|
1184
|
+
if code == "PARENT_CHILD_MISMATCH":
|
|
1185
|
+
return _build_hierarchy_align_fix(diag, hierarchy)
|
|
1186
|
+
|
|
1187
|
+
if code == "ORPHANED_NODES":
|
|
1188
|
+
return _build_orphan_fix(diag, hierarchy)
|
|
1189
|
+
|
|
1190
|
+
if code == "INVALID_ROOT_PARENT":
|
|
1191
|
+
return _build_root_parent_fix(diag, hierarchy)
|
|
1192
|
+
|
|
1193
|
+
if code == "MISSING_NODE_FIELD":
|
|
1194
|
+
return _build_missing_fields_fix(diag, hierarchy)
|
|
1195
|
+
|
|
1196
|
+
if code == "INVALID_NODE_TYPE":
|
|
1197
|
+
return _build_type_normalize_fix(diag, hierarchy)
|
|
1198
|
+
|
|
1199
|
+
if code == "INVALID_STATUS":
|
|
1200
|
+
return _build_status_normalize_fix(diag, hierarchy)
|
|
1201
|
+
|
|
1202
|
+
if code == "EMPTY_TITLE":
|
|
1203
|
+
return _build_title_generate_fix(diag, hierarchy)
|
|
1204
|
+
|
|
1205
|
+
if code in [
|
|
1206
|
+
"TOTAL_TASKS_MISMATCH",
|
|
1207
|
+
"COMPLETED_TASKS_MISMATCH",
|
|
1208
|
+
"COMPLETED_EXCEEDS_TOTAL",
|
|
1209
|
+
"INVALID_LEAF_COUNT",
|
|
1210
|
+
]:
|
|
1211
|
+
return _build_counts_fix(diag, spec_data)
|
|
1212
|
+
|
|
1213
|
+
if code == "BIDIRECTIONAL_INCONSISTENCY":
|
|
1214
|
+
return _build_bidirectional_fix(diag, hierarchy)
|
|
1215
|
+
|
|
1216
|
+
if code == "INVALID_DEPENDENCIES_TYPE":
|
|
1217
|
+
return _build_deps_structure_fix(diag, hierarchy)
|
|
1218
|
+
|
|
1219
|
+
if code == "MISSING_VERIFICATION_TYPE":
|
|
1220
|
+
return _build_verification_type_fix(diag, hierarchy)
|
|
1221
|
+
|
|
1222
|
+
if code == "INVALID_VERIFICATION_TYPE":
|
|
1223
|
+
return _build_invalid_verification_type_fix(diag, hierarchy)
|
|
1224
|
+
|
|
1225
|
+
# INVALID_TASK_CATEGORY auto-fix disabled - manual correction required
|
|
1226
|
+
# if code == "INVALID_TASK_CATEGORY":
|
|
1227
|
+
# return _build_task_category_fix(diag, hierarchy)
|
|
1228
|
+
|
|
1229
|
+
return None
|
|
1230
|
+
|
|
1231
|
+
|
|
1232
|
+
def _build_date_fix(diag: Diagnostic, spec_data: Dict[str, Any]) -> Optional[FixAction]:
|
|
1233
|
+
"""Build fix for date normalization."""
|
|
1234
|
+
field_name = diag.location
|
|
1235
|
+
if not field_name:
|
|
1236
|
+
return None
|
|
1237
|
+
|
|
1238
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1239
|
+
value = data.get(field_name)
|
|
1240
|
+
normalized = _normalize_timestamp(value)
|
|
1241
|
+
if normalized:
|
|
1242
|
+
data[field_name] = normalized
|
|
1243
|
+
|
|
1244
|
+
return FixAction(
|
|
1245
|
+
id=f"date.normalize:{field_name}",
|
|
1246
|
+
description=f"Normalize {field_name} to ISO 8601",
|
|
1247
|
+
category="structure",
|
|
1248
|
+
severity=diag.severity,
|
|
1249
|
+
auto_apply=True,
|
|
1250
|
+
preview=f"Normalize timestamp field: {field_name}",
|
|
1251
|
+
apply=apply,
|
|
1252
|
+
)
|
|
1253
|
+
|
|
1254
|
+
|
|
1255
|
+
def _build_hierarchy_align_fix(
|
|
1256
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1257
|
+
) -> Optional[FixAction]:
|
|
1258
|
+
"""Build fix for parent/child alignment."""
|
|
1259
|
+
# Parse node IDs from message
|
|
1260
|
+
match = re.search(r"'([^']+)' lists '([^']+)' as child", diag.message)
|
|
1261
|
+
if not match:
|
|
1262
|
+
return None
|
|
1263
|
+
|
|
1264
|
+
parent_id = match.group(1)
|
|
1265
|
+
child_id = match.group(2)
|
|
1266
|
+
|
|
1267
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1268
|
+
hier = data.get("hierarchy", {})
|
|
1269
|
+
parent = hier.get(parent_id)
|
|
1270
|
+
child = hier.get(child_id)
|
|
1271
|
+
if parent and child:
|
|
1272
|
+
children = parent.setdefault("children", [])
|
|
1273
|
+
if child_id not in children:
|
|
1274
|
+
children.append(child_id)
|
|
1275
|
+
child["parent"] = parent_id
|
|
1276
|
+
|
|
1277
|
+
return FixAction(
|
|
1278
|
+
id=f"hierarchy.align:{parent_id}->{child_id}",
|
|
1279
|
+
description=f"Align {child_id} parent reference with {parent_id}",
|
|
1280
|
+
category="hierarchy",
|
|
1281
|
+
severity=diag.severity,
|
|
1282
|
+
auto_apply=True,
|
|
1283
|
+
preview=f"Align {child_id} parent reference with {parent_id}",
|
|
1284
|
+
apply=apply,
|
|
1285
|
+
)
|
|
1286
|
+
|
|
1287
|
+
|
|
1288
|
+
def _build_orphan_fix(
|
|
1289
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1290
|
+
) -> Optional[FixAction]:
|
|
1291
|
+
"""Build fix for orphaned nodes."""
|
|
1292
|
+
match = re.search(r"not reachable from spec-root:\s*(.+)$", diag.message)
|
|
1293
|
+
if not match:
|
|
1294
|
+
return None
|
|
1295
|
+
|
|
1296
|
+
orphan_list_str = match.group(1)
|
|
1297
|
+
orphan_ids = [nid.strip() for nid in orphan_list_str.split(",")]
|
|
1298
|
+
|
|
1299
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1300
|
+
hier = data.get("hierarchy", {})
|
|
1301
|
+
spec_root = hier.get("spec-root")
|
|
1302
|
+
if not spec_root:
|
|
1303
|
+
return
|
|
1304
|
+
|
|
1305
|
+
root_children = spec_root.setdefault("children", [])
|
|
1306
|
+
for orphan_id in orphan_ids:
|
|
1307
|
+
if orphan_id in hier:
|
|
1308
|
+
hier[orphan_id]["parent"] = "spec-root"
|
|
1309
|
+
if orphan_id not in root_children:
|
|
1310
|
+
root_children.append(orphan_id)
|
|
1311
|
+
|
|
1312
|
+
return FixAction(
|
|
1313
|
+
id=f"hierarchy.attach_orphans:{len(orphan_ids)}",
|
|
1314
|
+
description=f"Attach {len(orphan_ids)} orphaned node(s) to spec-root",
|
|
1315
|
+
category="hierarchy",
|
|
1316
|
+
severity=diag.severity,
|
|
1317
|
+
auto_apply=True,
|
|
1318
|
+
preview=f"Attach {len(orphan_ids)} orphaned node(s) to spec-root",
|
|
1319
|
+
apply=apply,
|
|
1320
|
+
)
|
|
1321
|
+
|
|
1322
|
+
|
|
1323
|
+
def _build_root_parent_fix(
|
|
1324
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1325
|
+
) -> Optional[FixAction]:
|
|
1326
|
+
"""Build fix for spec-root having non-null parent."""
|
|
1327
|
+
|
|
1328
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1329
|
+
hier = data.get("hierarchy", {})
|
|
1330
|
+
spec_root = hier.get("spec-root")
|
|
1331
|
+
if spec_root:
|
|
1332
|
+
spec_root["parent"] = None
|
|
1333
|
+
|
|
1334
|
+
return FixAction(
|
|
1335
|
+
id="hierarchy.fix_root_parent",
|
|
1336
|
+
description="Set spec-root parent to null",
|
|
1337
|
+
category="hierarchy",
|
|
1338
|
+
severity=diag.severity,
|
|
1339
|
+
auto_apply=True,
|
|
1340
|
+
preview="Set spec-root parent to null",
|
|
1341
|
+
apply=apply,
|
|
1342
|
+
)
|
|
1343
|
+
|
|
1344
|
+
|
|
1345
|
+
def _build_missing_fields_fix(
|
|
1346
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1347
|
+
) -> Optional[FixAction]:
|
|
1348
|
+
"""Build fix for missing node fields."""
|
|
1349
|
+
node_id = diag.location
|
|
1350
|
+
if not node_id:
|
|
1351
|
+
return None
|
|
1352
|
+
|
|
1353
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1354
|
+
hier = data.get("hierarchy", {})
|
|
1355
|
+
node = hier.get(node_id)
|
|
1356
|
+
if not node:
|
|
1357
|
+
return
|
|
1358
|
+
|
|
1359
|
+
if "type" not in node:
|
|
1360
|
+
node["type"] = "task"
|
|
1361
|
+
if "title" not in node:
|
|
1362
|
+
node["title"] = node_id.replace("-", " ").title()
|
|
1363
|
+
if "status" not in node:
|
|
1364
|
+
node["status"] = "pending"
|
|
1365
|
+
if "parent" not in node:
|
|
1366
|
+
# Find actual parent by checking which node lists this node as a child
|
|
1367
|
+
# This prevents regression where we set parent="spec-root" but the node
|
|
1368
|
+
# is actually in another node's children list (causing PARENT_CHILD_MISMATCH)
|
|
1369
|
+
actual_parent = "spec-root" # fallback if not found in any children list
|
|
1370
|
+
for other_id, other_node in hier.items():
|
|
1371
|
+
if not isinstance(other_node, dict):
|
|
1372
|
+
continue
|
|
1373
|
+
children = other_node.get("children", [])
|
|
1374
|
+
if isinstance(children, list) and node_id in children:
|
|
1375
|
+
actual_parent = other_id
|
|
1376
|
+
break
|
|
1377
|
+
node["parent"] = actual_parent
|
|
1378
|
+
if "children" not in node:
|
|
1379
|
+
node["children"] = []
|
|
1380
|
+
if "total_tasks" not in node:
|
|
1381
|
+
node["total_tasks"] = (
|
|
1382
|
+
1 if node.get("type") in {"task", "subtask", "verify"} else 0
|
|
1383
|
+
)
|
|
1384
|
+
if "completed_tasks" not in node:
|
|
1385
|
+
node["completed_tasks"] = 0
|
|
1386
|
+
if "metadata" not in node:
|
|
1387
|
+
node["metadata"] = {}
|
|
1388
|
+
|
|
1389
|
+
return FixAction(
|
|
1390
|
+
id=f"node.add_missing_fields:{node_id}",
|
|
1391
|
+
description=f"Add missing fields to {node_id}",
|
|
1392
|
+
category="node",
|
|
1393
|
+
severity=diag.severity,
|
|
1394
|
+
auto_apply=True,
|
|
1395
|
+
preview=f"Add missing required fields to {node_id}",
|
|
1396
|
+
apply=apply,
|
|
1397
|
+
)
|
|
1398
|
+
|
|
1399
|
+
|
|
1400
|
+
def _build_type_normalize_fix(
|
|
1401
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1402
|
+
) -> Optional[FixAction]:
|
|
1403
|
+
"""Build fix for invalid node type."""
|
|
1404
|
+
node_id = diag.location
|
|
1405
|
+
if not node_id:
|
|
1406
|
+
return None
|
|
1407
|
+
|
|
1408
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1409
|
+
hier = data.get("hierarchy", {})
|
|
1410
|
+
node = hier.get(node_id)
|
|
1411
|
+
if not node:
|
|
1412
|
+
return
|
|
1413
|
+
node["type"] = _normalize_node_type(node.get("type", ""))
|
|
1414
|
+
|
|
1415
|
+
return FixAction(
|
|
1416
|
+
id=f"node.normalize_type:{node_id}",
|
|
1417
|
+
description=f"Normalize type for {node_id}",
|
|
1418
|
+
category="node",
|
|
1419
|
+
severity=diag.severity,
|
|
1420
|
+
auto_apply=True,
|
|
1421
|
+
preview=f"Normalize node type for {node_id}",
|
|
1422
|
+
apply=apply,
|
|
1423
|
+
)
|
|
1424
|
+
|
|
1425
|
+
|
|
1426
|
+
def _build_status_normalize_fix(
|
|
1427
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1428
|
+
) -> Optional[FixAction]:
|
|
1429
|
+
"""Build fix for invalid status."""
|
|
1430
|
+
node_id = diag.location
|
|
1431
|
+
if not node_id:
|
|
1432
|
+
return None
|
|
1433
|
+
|
|
1434
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1435
|
+
hier = data.get("hierarchy", {})
|
|
1436
|
+
node = hier.get(node_id)
|
|
1437
|
+
if not node:
|
|
1438
|
+
return
|
|
1439
|
+
node["status"] = _normalize_status(node.get("status"))
|
|
1440
|
+
|
|
1441
|
+
return FixAction(
|
|
1442
|
+
id=f"status.normalize:{node_id}",
|
|
1443
|
+
description=f"Normalize status for {node_id}",
|
|
1444
|
+
category="node",
|
|
1445
|
+
severity=diag.severity,
|
|
1446
|
+
auto_apply=True,
|
|
1447
|
+
preview=f"Normalize status for {node_id}",
|
|
1448
|
+
apply=apply,
|
|
1449
|
+
)
|
|
1450
|
+
|
|
1451
|
+
|
|
1452
|
+
def _build_title_generate_fix(
|
|
1453
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1454
|
+
) -> Optional[FixAction]:
|
|
1455
|
+
"""Build fix for empty title."""
|
|
1456
|
+
node_id = diag.location
|
|
1457
|
+
if not node_id:
|
|
1458
|
+
return None
|
|
1459
|
+
|
|
1460
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1461
|
+
hier = data.get("hierarchy", {})
|
|
1462
|
+
node = hier.get(node_id)
|
|
1463
|
+
if not node:
|
|
1464
|
+
return
|
|
1465
|
+
node["title"] = node_id.replace("-", " ").replace("_", " ").title()
|
|
1466
|
+
|
|
1467
|
+
return FixAction(
|
|
1468
|
+
id=f"node.generate_title:{node_id}",
|
|
1469
|
+
description=f"Generate title for {node_id}",
|
|
1470
|
+
category="node",
|
|
1471
|
+
severity=diag.severity,
|
|
1472
|
+
auto_apply=True,
|
|
1473
|
+
preview=f"Generate title from node ID for {node_id}",
|
|
1474
|
+
apply=apply,
|
|
1475
|
+
)
|
|
1476
|
+
|
|
1477
|
+
|
|
1478
|
+
def _build_counts_fix(
|
|
1479
|
+
diag: Diagnostic, spec_data: Dict[str, Any]
|
|
1480
|
+
) -> Optional[FixAction]:
|
|
1481
|
+
"""Build fix for task count issues."""
|
|
1482
|
+
|
|
1483
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1484
|
+
_recalculate_counts(data)
|
|
1485
|
+
|
|
1486
|
+
return FixAction(
|
|
1487
|
+
id="counts.recalculate",
|
|
1488
|
+
description="Recalculate task count rollups",
|
|
1489
|
+
category="counts",
|
|
1490
|
+
severity=diag.severity,
|
|
1491
|
+
auto_apply=True,
|
|
1492
|
+
preview="Recalculate total/completed task rollups across the hierarchy",
|
|
1493
|
+
apply=apply,
|
|
1494
|
+
)
|
|
1495
|
+
|
|
1496
|
+
|
|
1497
|
+
def _build_bidirectional_fix(
|
|
1498
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1499
|
+
) -> Optional[FixAction]:
|
|
1500
|
+
"""Build fix for bidirectional dependency inconsistency."""
|
|
1501
|
+
# Parse node IDs from message
|
|
1502
|
+
blocks_match = re.search(r"'([^']+)' blocks '([^']+)'", diag.message)
|
|
1503
|
+
blocked_by_match = re.search(r"'([^']+)' blocked_by '([^']+)'", diag.message)
|
|
1504
|
+
|
|
1505
|
+
if blocks_match:
|
|
1506
|
+
blocker_id = blocks_match.group(1)
|
|
1507
|
+
blocked_id = blocks_match.group(2)
|
|
1508
|
+
elif blocked_by_match:
|
|
1509
|
+
blocked_id = blocked_by_match.group(1)
|
|
1510
|
+
blocker_id = blocked_by_match.group(2)
|
|
1511
|
+
else:
|
|
1512
|
+
return None
|
|
1513
|
+
|
|
1514
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1515
|
+
hier = data.get("hierarchy", {})
|
|
1516
|
+
blocker = hier.get(blocker_id)
|
|
1517
|
+
blocked = hier.get(blocked_id)
|
|
1518
|
+
if not blocker or not blocked:
|
|
1519
|
+
return
|
|
1520
|
+
|
|
1521
|
+
# Ensure dependencies structure
|
|
1522
|
+
if not isinstance(blocker.get("dependencies"), dict):
|
|
1523
|
+
blocker["dependencies"] = {"blocks": [], "blocked_by": [], "depends": []}
|
|
1524
|
+
if not isinstance(blocked.get("dependencies"), dict):
|
|
1525
|
+
blocked["dependencies"] = {"blocks": [], "blocked_by": [], "depends": []}
|
|
1526
|
+
|
|
1527
|
+
blocker_deps = blocker["dependencies"]
|
|
1528
|
+
blocked_deps = blocked["dependencies"]
|
|
1529
|
+
|
|
1530
|
+
# Ensure all fields exist
|
|
1531
|
+
for dep_key in ["blocks", "blocked_by", "depends"]:
|
|
1532
|
+
blocker_deps.setdefault(dep_key, [])
|
|
1533
|
+
blocked_deps.setdefault(dep_key, [])
|
|
1534
|
+
|
|
1535
|
+
# Sync relationship
|
|
1536
|
+
if blocked_id not in blocker_deps["blocks"]:
|
|
1537
|
+
blocker_deps["blocks"].append(blocked_id)
|
|
1538
|
+
if blocker_id not in blocked_deps["blocked_by"]:
|
|
1539
|
+
blocked_deps["blocked_by"].append(blocker_id)
|
|
1540
|
+
|
|
1541
|
+
return FixAction(
|
|
1542
|
+
id=f"dependency.sync_bidirectional:{blocker_id}-{blocked_id}",
|
|
1543
|
+
description=f"Sync bidirectional dependency: {blocker_id} blocks {blocked_id}",
|
|
1544
|
+
category="dependency",
|
|
1545
|
+
severity=diag.severity,
|
|
1546
|
+
auto_apply=True,
|
|
1547
|
+
preview=f"Sync bidirectional dependency: {blocker_id} blocks {blocked_id}",
|
|
1548
|
+
apply=apply,
|
|
1549
|
+
)
|
|
1550
|
+
|
|
1551
|
+
|
|
1552
|
+
def _build_deps_structure_fix(
|
|
1553
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1554
|
+
) -> Optional[FixAction]:
|
|
1555
|
+
"""Build fix for missing dependencies structure."""
|
|
1556
|
+
node_id = diag.location
|
|
1557
|
+
if not node_id:
|
|
1558
|
+
return None
|
|
1559
|
+
|
|
1560
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1561
|
+
hier = data.get("hierarchy", {})
|
|
1562
|
+
node = hier.get(node_id)
|
|
1563
|
+
if not node:
|
|
1564
|
+
return
|
|
1565
|
+
if not isinstance(node.get("dependencies"), dict):
|
|
1566
|
+
node["dependencies"] = {"blocks": [], "blocked_by": [], "depends": []}
|
|
1567
|
+
|
|
1568
|
+
return FixAction(
|
|
1569
|
+
id=f"dependency.create_structure:{node_id}",
|
|
1570
|
+
description=f"Create dependencies structure for {node_id}",
|
|
1571
|
+
category="dependency",
|
|
1572
|
+
severity=diag.severity,
|
|
1573
|
+
auto_apply=True,
|
|
1574
|
+
preview=f"Create dependencies structure for {node_id}",
|
|
1575
|
+
apply=apply,
|
|
1576
|
+
)
|
|
1577
|
+
|
|
1578
|
+
|
|
1579
|
+
def _build_verification_type_fix(
|
|
1580
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1581
|
+
) -> Optional[FixAction]:
|
|
1582
|
+
"""Build fix for missing verification type."""
|
|
1583
|
+
node_id = diag.location
|
|
1584
|
+
if not node_id:
|
|
1585
|
+
return None
|
|
1586
|
+
|
|
1587
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1588
|
+
hier = data.get("hierarchy", {})
|
|
1589
|
+
node = hier.get(node_id)
|
|
1590
|
+
if not node:
|
|
1591
|
+
return
|
|
1592
|
+
metadata = node.setdefault("metadata", {})
|
|
1593
|
+
if "verification_type" not in metadata:
|
|
1594
|
+
metadata["verification_type"] = "run-tests"
|
|
1595
|
+
|
|
1596
|
+
return FixAction(
|
|
1597
|
+
id=f"metadata.fix_verification_type:{node_id}",
|
|
1598
|
+
description=f"Set verification_type to 'run-tests' for {node_id}",
|
|
1599
|
+
category="metadata",
|
|
1600
|
+
severity=diag.severity,
|
|
1601
|
+
auto_apply=True,
|
|
1602
|
+
preview=f"Set verification_type to 'run-tests' for {node_id}",
|
|
1603
|
+
apply=apply,
|
|
1604
|
+
)
|
|
1605
|
+
|
|
1606
|
+
|
|
1607
|
+
def _build_invalid_verification_type_fix(
|
|
1608
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1609
|
+
) -> Optional[FixAction]:
|
|
1610
|
+
"""Build fix for invalid verification type by mapping to canonical value."""
|
|
1611
|
+
node_id = diag.location
|
|
1612
|
+
if not node_id:
|
|
1613
|
+
return None
|
|
1614
|
+
|
|
1615
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1616
|
+
hier = data.get("hierarchy", {})
|
|
1617
|
+
node = hier.get(node_id)
|
|
1618
|
+
if not node:
|
|
1619
|
+
return
|
|
1620
|
+
metadata = node.get("metadata", {})
|
|
1621
|
+
current_type = metadata.get("verification_type", "")
|
|
1622
|
+
|
|
1623
|
+
# Map legacy values to canonical
|
|
1624
|
+
mapped_type = VERIFICATION_TYPE_MAPPING.get(current_type)
|
|
1625
|
+
if mapped_type:
|
|
1626
|
+
metadata["verification_type"] = mapped_type
|
|
1627
|
+
elif current_type not in VALID_VERIFICATION_TYPES:
|
|
1628
|
+
metadata["verification_type"] = "manual" # safe fallback for unknown values
|
|
1629
|
+
|
|
1630
|
+
return FixAction(
|
|
1631
|
+
id=f"metadata.fix_invalid_verification_type:{node_id}",
|
|
1632
|
+
description=f"Map verification_type to canonical value for {node_id}",
|
|
1633
|
+
category="metadata",
|
|
1634
|
+
severity=diag.severity,
|
|
1635
|
+
auto_apply=True,
|
|
1636
|
+
preview=f"Map legacy verification_type to canonical value for {node_id}",
|
|
1637
|
+
apply=apply,
|
|
1638
|
+
)
|
|
1639
|
+
|
|
1640
|
+
|
|
1641
|
+
# NOTE: We intentionally do not auto-fix missing `metadata.file_path`.
|
|
1642
|
+
# It must be a real repo-relative path in the target workspace.
|
|
1643
|
+
|
|
1644
|
+
|
|
1645
|
+
def _build_task_category_fix(
|
|
1646
|
+
diag: Diagnostic, hierarchy: Dict[str, Any]
|
|
1647
|
+
) -> Optional[FixAction]:
|
|
1648
|
+
"""Build fix for invalid task category."""
|
|
1649
|
+
node_id = diag.location
|
|
1650
|
+
if not node_id:
|
|
1651
|
+
return None
|
|
1652
|
+
|
|
1653
|
+
def apply(data: Dict[str, Any]) -> None:
|
|
1654
|
+
hier = data.get("hierarchy", {})
|
|
1655
|
+
node = hier.get(node_id)
|
|
1656
|
+
if not node:
|
|
1657
|
+
return
|
|
1658
|
+
metadata = node.setdefault("metadata", {})
|
|
1659
|
+
# Default to implementation
|
|
1660
|
+
metadata["task_category"] = "implementation"
|
|
1661
|
+
|
|
1662
|
+
return FixAction(
|
|
1663
|
+
id=f"metadata.fix_task_category:{node_id}",
|
|
1664
|
+
description=f"Set task_category to 'implementation' for {node_id}",
|
|
1665
|
+
category="metadata",
|
|
1666
|
+
severity=diag.severity,
|
|
1667
|
+
auto_apply=True,
|
|
1668
|
+
preview=f"Set task_category to 'implementation' for {node_id}",
|
|
1669
|
+
apply=apply,
|
|
1670
|
+
)
|
|
1671
|
+
|
|
1672
|
+
|
|
1673
|
+
def apply_fixes(
|
|
1674
|
+
actions: List[FixAction],
|
|
1675
|
+
spec_path: str,
|
|
1676
|
+
*,
|
|
1677
|
+
dry_run: bool = False,
|
|
1678
|
+
create_backup: bool = True,
|
|
1679
|
+
capture_diff: bool = False,
|
|
1680
|
+
) -> FixReport:
|
|
1681
|
+
"""
|
|
1682
|
+
Apply fix actions to a spec file.
|
|
1683
|
+
|
|
1684
|
+
Args:
|
|
1685
|
+
actions: List of FixAction objects to apply
|
|
1686
|
+
spec_path: Path to spec file
|
|
1687
|
+
dry_run: If True, don't actually save changes
|
|
1688
|
+
create_backup: If True, create backup before modifying
|
|
1689
|
+
capture_diff: If True, capture before/after state
|
|
1690
|
+
|
|
1691
|
+
Returns:
|
|
1692
|
+
FixReport with results
|
|
1693
|
+
"""
|
|
1694
|
+
report = FixReport(spec_path=spec_path)
|
|
1695
|
+
|
|
1696
|
+
if dry_run:
|
|
1697
|
+
report.skipped_actions.extend(actions)
|
|
1698
|
+
return report
|
|
1699
|
+
|
|
1700
|
+
try:
|
|
1701
|
+
with open(spec_path, "r") as f:
|
|
1702
|
+
data = json.load(f)
|
|
1703
|
+
except (OSError, json.JSONDecodeError):
|
|
1704
|
+
return report
|
|
1705
|
+
|
|
1706
|
+
# Capture before state
|
|
1707
|
+
if capture_diff:
|
|
1708
|
+
report.before_state = copy.deepcopy(data)
|
|
1709
|
+
|
|
1710
|
+
# Create backup
|
|
1711
|
+
if create_backup:
|
|
1712
|
+
backup_path = Path(spec_path).with_suffix(".json.backup")
|
|
1713
|
+
try:
|
|
1714
|
+
with open(backup_path, "w") as f:
|
|
1715
|
+
json.dump(data, f, indent=2)
|
|
1716
|
+
report.backup_path = str(backup_path)
|
|
1717
|
+
except OSError:
|
|
1718
|
+
pass
|
|
1719
|
+
|
|
1720
|
+
# Apply each action
|
|
1721
|
+
for action in actions:
|
|
1722
|
+
try:
|
|
1723
|
+
action.apply(data)
|
|
1724
|
+
report.applied_actions.append(action)
|
|
1725
|
+
except Exception:
|
|
1726
|
+
report.skipped_actions.append(action)
|
|
1727
|
+
|
|
1728
|
+
# Recalculate counts after all fixes
|
|
1729
|
+
if report.applied_actions:
|
|
1730
|
+
_recalculate_counts(data)
|
|
1731
|
+
|
|
1732
|
+
# Capture after state
|
|
1733
|
+
if capture_diff:
|
|
1734
|
+
report.after_state = copy.deepcopy(data)
|
|
1735
|
+
|
|
1736
|
+
# Save changes
|
|
1737
|
+
try:
|
|
1738
|
+
with open(spec_path, "w") as f:
|
|
1739
|
+
json.dump(data, f, indent=2)
|
|
1740
|
+
except OSError:
|
|
1741
|
+
pass
|
|
1742
|
+
|
|
1743
|
+
return report
|
|
1744
|
+
|
|
1745
|
+
|
|
1746
|
+
# Statistics functions
|
|
1747
|
+
|
|
1748
|
+
|
|
1749
|
+
def calculate_stats(
|
|
1750
|
+
spec_data: Dict[str, Any], file_path: Optional[str] = None
|
|
1751
|
+
) -> SpecStats:
|
|
1752
|
+
"""
|
|
1753
|
+
Calculate statistics for a spec file.
|
|
1754
|
+
|
|
1755
|
+
Args:
|
|
1756
|
+
spec_data: Parsed JSON spec data
|
|
1757
|
+
file_path: Optional path to spec file for size calculation
|
|
1758
|
+
|
|
1759
|
+
Returns:
|
|
1760
|
+
SpecStats with calculated metrics
|
|
1761
|
+
"""
|
|
1762
|
+
hierarchy = spec_data.get("hierarchy", {}) or {}
|
|
1763
|
+
|
|
1764
|
+
totals = {
|
|
1765
|
+
"nodes": len(hierarchy),
|
|
1766
|
+
"tasks": 0,
|
|
1767
|
+
"phases": 0,
|
|
1768
|
+
"verifications": 0,
|
|
1769
|
+
}
|
|
1770
|
+
|
|
1771
|
+
status_counts = {status: 0 for status in STATUS_FIELDS}
|
|
1772
|
+
max_depth = 0
|
|
1773
|
+
|
|
1774
|
+
def traverse(node_id: str, depth: int) -> None:
|
|
1775
|
+
nonlocal max_depth
|
|
1776
|
+
node = hierarchy.get(node_id, {})
|
|
1777
|
+
node_type = node.get("type")
|
|
1778
|
+
|
|
1779
|
+
max_depth = max(max_depth, depth)
|
|
1780
|
+
|
|
1781
|
+
if node_type in {"task", "subtask"}:
|
|
1782
|
+
totals["tasks"] += 1
|
|
1783
|
+
status = node.get("status", "").lower().replace(" ", "_").replace("-", "_")
|
|
1784
|
+
if status in status_counts:
|
|
1785
|
+
status_counts[status] += 1
|
|
1786
|
+
elif node_type == "phase":
|
|
1787
|
+
totals["phases"] += 1
|
|
1788
|
+
elif node_type == "verify":
|
|
1789
|
+
totals["verifications"] += 1
|
|
1790
|
+
|
|
1791
|
+
for child_id in node.get("children", []) or []:
|
|
1792
|
+
if child_id in hierarchy:
|
|
1793
|
+
traverse(child_id, depth + 1)
|
|
1794
|
+
|
|
1795
|
+
if "spec-root" in hierarchy:
|
|
1796
|
+
traverse("spec-root", 0)
|
|
1797
|
+
|
|
1798
|
+
total_tasks = totals["tasks"]
|
|
1799
|
+
phase_count = totals["phases"] or 1
|
|
1800
|
+
avg_tasks_per_phase = round(total_tasks / phase_count, 2)
|
|
1801
|
+
|
|
1802
|
+
root = hierarchy.get("spec-root", {})
|
|
1803
|
+
root_total_tasks = root.get("total_tasks", total_tasks)
|
|
1804
|
+
root_completed = root.get("completed_tasks", 0)
|
|
1805
|
+
|
|
1806
|
+
verification_count = totals["verifications"]
|
|
1807
|
+
verification_coverage = (verification_count / total_tasks) if total_tasks else 0.0
|
|
1808
|
+
progress = (root_completed / root_total_tasks) if root_total_tasks else 0.0
|
|
1809
|
+
|
|
1810
|
+
file_size = 0.0
|
|
1811
|
+
if file_path:
|
|
1812
|
+
try:
|
|
1813
|
+
file_size = Path(file_path).stat().st_size / 1024
|
|
1814
|
+
except OSError:
|
|
1815
|
+
file_size = 0.0
|
|
1816
|
+
|
|
1817
|
+
return SpecStats(
|
|
1818
|
+
spec_id=spec_data.get("spec_id", "unknown"),
|
|
1819
|
+
title=spec_data.get("title", ""),
|
|
1820
|
+
version=spec_data.get("version", ""),
|
|
1821
|
+
status=root.get("status", "unknown"),
|
|
1822
|
+
totals=totals,
|
|
1823
|
+
status_counts=status_counts,
|
|
1824
|
+
max_depth=max_depth,
|
|
1825
|
+
avg_tasks_per_phase=avg_tasks_per_phase,
|
|
1826
|
+
verification_coverage=verification_coverage,
|
|
1827
|
+
progress=progress,
|
|
1828
|
+
file_size_kb=file_size,
|
|
1829
|
+
)
|
|
1830
|
+
|
|
1831
|
+
|
|
1832
|
+
# Helper functions
|
|
1833
|
+
|
|
1834
|
+
|
|
1835
|
+
def _is_valid_spec_id(spec_id: str) -> bool:
|
|
1836
|
+
"""Check if spec_id follows the recommended format."""
|
|
1837
|
+
pattern = r"^[a-z0-9-]+-\d{4}-\d{2}-\d{2}-\d{3}$"
|
|
1838
|
+
return bool(re.match(pattern, spec_id))
|
|
1839
|
+
|
|
1840
|
+
|
|
1841
|
+
def _is_valid_iso8601(value: str) -> bool:
|
|
1842
|
+
"""Check if value is valid ISO 8601 date."""
|
|
1843
|
+
try:
|
|
1844
|
+
# Try parsing with Z suffix
|
|
1845
|
+
if value.endswith("Z"):
|
|
1846
|
+
datetime.fromisoformat(value.replace("Z", "+00:00"))
|
|
1847
|
+
else:
|
|
1848
|
+
datetime.fromisoformat(value)
|
|
1849
|
+
return True
|
|
1850
|
+
except ValueError:
|
|
1851
|
+
return False
|
|
1852
|
+
|
|
1853
|
+
|
|
1854
|
+
def _normalize_timestamp(value: Any) -> Optional[str]:
|
|
1855
|
+
"""Normalize timestamp to ISO 8601 format."""
|
|
1856
|
+
if not value:
|
|
1857
|
+
return None
|
|
1858
|
+
|
|
1859
|
+
text = str(value).strip()
|
|
1860
|
+
candidate = text.replace("Z", "")
|
|
1861
|
+
|
|
1862
|
+
for fmt in ("%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S", "%Y-%m-%dT%H:%M"):
|
|
1863
|
+
try:
|
|
1864
|
+
dt = datetime.strptime(candidate, fmt)
|
|
1865
|
+
return dt.replace(tzinfo=timezone.utc).isoformat().replace("+00:00", "Z")
|
|
1866
|
+
except ValueError:
|
|
1867
|
+
continue
|
|
1868
|
+
|
|
1869
|
+
try:
|
|
1870
|
+
dt = datetime.fromisoformat(text.replace("Z", "+00:00"))
|
|
1871
|
+
return dt.astimezone(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
1872
|
+
except ValueError:
|
|
1873
|
+
return None
|
|
1874
|
+
|
|
1875
|
+
|
|
1876
|
+
def _normalize_status(value: Any) -> str:
|
|
1877
|
+
"""Normalize status value."""
|
|
1878
|
+
if not value:
|
|
1879
|
+
return "pending"
|
|
1880
|
+
|
|
1881
|
+
text = str(value).strip().lower().replace("-", "_").replace(" ", "_")
|
|
1882
|
+
mapping = {
|
|
1883
|
+
"inprogress": "in_progress",
|
|
1884
|
+
"in__progress": "in_progress",
|
|
1885
|
+
"todo": "pending",
|
|
1886
|
+
"to_do": "pending",
|
|
1887
|
+
"complete": "completed",
|
|
1888
|
+
"done": "completed",
|
|
1889
|
+
}
|
|
1890
|
+
text = mapping.get(text, text)
|
|
1891
|
+
|
|
1892
|
+
if text in VALID_STATUSES:
|
|
1893
|
+
return text
|
|
1894
|
+
|
|
1895
|
+
return "pending"
|
|
1896
|
+
|
|
1897
|
+
|
|
1898
|
+
def _normalize_node_type(value: Any) -> str:
|
|
1899
|
+
"""Normalize node type value."""
|
|
1900
|
+
if not value:
|
|
1901
|
+
return "task"
|
|
1902
|
+
|
|
1903
|
+
text = str(value).strip().lower().replace(" ", "_").replace("-", "_")
|
|
1904
|
+
mapping = {
|
|
1905
|
+
"tasks": "task",
|
|
1906
|
+
"sub_task": "subtask",
|
|
1907
|
+
"verification": "verify",
|
|
1908
|
+
"validate": "verify",
|
|
1909
|
+
}
|
|
1910
|
+
text = mapping.get(text, text)
|
|
1911
|
+
|
|
1912
|
+
if text in VALID_NODE_TYPES:
|
|
1913
|
+
return text
|
|
1914
|
+
|
|
1915
|
+
return "task"
|
|
1916
|
+
|
|
1917
|
+
|
|
1918
|
+
def _recalculate_counts(spec_data: Dict[str, Any]) -> None:
|
|
1919
|
+
"""Recalculate task counts for all nodes in hierarchy."""
|
|
1920
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
1921
|
+
if not hierarchy:
|
|
1922
|
+
return
|
|
1923
|
+
|
|
1924
|
+
# Process bottom-up: leaves first, then parents
|
|
1925
|
+
def calculate_node(node_id: str) -> tuple:
|
|
1926
|
+
"""Return (total_tasks, completed_tasks) for a node."""
|
|
1927
|
+
node = hierarchy.get(node_id, {})
|
|
1928
|
+
children = node.get("children", [])
|
|
1929
|
+
node_type = node.get("type", "")
|
|
1930
|
+
status = node.get("status", "")
|
|
1931
|
+
|
|
1932
|
+
if not children:
|
|
1933
|
+
# Leaf node
|
|
1934
|
+
if node_type in {"task", "subtask", "verify"}:
|
|
1935
|
+
total = 1
|
|
1936
|
+
completed = 1 if status == "completed" else 0
|
|
1937
|
+
else:
|
|
1938
|
+
total = 0
|
|
1939
|
+
completed = 0
|
|
1940
|
+
else:
|
|
1941
|
+
# Parent node: sum children
|
|
1942
|
+
total = 0
|
|
1943
|
+
completed = 0
|
|
1944
|
+
for child_id in children:
|
|
1945
|
+
if child_id in hierarchy:
|
|
1946
|
+
child_total, child_completed = calculate_node(child_id)
|
|
1947
|
+
total += child_total
|
|
1948
|
+
completed += child_completed
|
|
1949
|
+
|
|
1950
|
+
node["total_tasks"] = total
|
|
1951
|
+
node["completed_tasks"] = completed
|
|
1952
|
+
return total, completed
|
|
1953
|
+
|
|
1954
|
+
if "spec-root" in hierarchy:
|
|
1955
|
+
calculate_node("spec-root")
|
|
1956
|
+
|
|
1957
|
+
|
|
1958
|
+
# Verification management functions
|
|
1959
|
+
|
|
1960
|
+
# Valid verification results
|
|
1961
|
+
VERIFICATION_RESULTS = ("PASSED", "FAILED", "PARTIAL")
|
|
1962
|
+
|
|
1963
|
+
|
|
1964
|
+
def add_verification(
|
|
1965
|
+
spec_data: Dict[str, Any],
|
|
1966
|
+
verify_id: str,
|
|
1967
|
+
result: str,
|
|
1968
|
+
command: Optional[str] = None,
|
|
1969
|
+
output: Optional[str] = None,
|
|
1970
|
+
issues: Optional[str] = None,
|
|
1971
|
+
notes: Optional[str] = None,
|
|
1972
|
+
) -> tuple[bool, Optional[str]]:
|
|
1973
|
+
"""
|
|
1974
|
+
Add verification result to a verify node.
|
|
1975
|
+
|
|
1976
|
+
Records verification results including test outcomes, command output,
|
|
1977
|
+
and issues found during verification.
|
|
1978
|
+
|
|
1979
|
+
Args:
|
|
1980
|
+
spec_data: The loaded spec data dict (modified in place).
|
|
1981
|
+
verify_id: Verification node ID (e.g., verify-1-1).
|
|
1982
|
+
result: Verification result (PASSED, FAILED, PARTIAL).
|
|
1983
|
+
command: Optional command that was run for verification.
|
|
1984
|
+
output: Optional command output or test results.
|
|
1985
|
+
issues: Optional issues found during verification.
|
|
1986
|
+
notes: Optional additional notes about the verification.
|
|
1987
|
+
|
|
1988
|
+
Returns:
|
|
1989
|
+
Tuple of (success, error_message).
|
|
1990
|
+
On success: (True, None)
|
|
1991
|
+
On failure: (False, "error message")
|
|
1992
|
+
"""
|
|
1993
|
+
# Validate result
|
|
1994
|
+
result_upper = result.upper().strip()
|
|
1995
|
+
if result_upper not in VERIFICATION_RESULTS:
|
|
1996
|
+
return (
|
|
1997
|
+
False,
|
|
1998
|
+
f"Invalid result '{result}'. Must be one of: {', '.join(VERIFICATION_RESULTS)}",
|
|
1999
|
+
)
|
|
2000
|
+
|
|
2001
|
+
# Get hierarchy
|
|
2002
|
+
hierarchy = spec_data.get("hierarchy")
|
|
2003
|
+
if not hierarchy or not isinstance(hierarchy, dict):
|
|
2004
|
+
return False, "Invalid spec data: missing or invalid hierarchy"
|
|
2005
|
+
|
|
2006
|
+
# Find the verify node
|
|
2007
|
+
node = hierarchy.get(verify_id)
|
|
2008
|
+
if node is None:
|
|
2009
|
+
return False, f"Verification node '{verify_id}' not found"
|
|
2010
|
+
|
|
2011
|
+
# Validate node type
|
|
2012
|
+
node_type = node.get("type")
|
|
2013
|
+
if node_type != "verify":
|
|
2014
|
+
return False, f"Node '{verify_id}' is type '{node_type}', expected 'verify'"
|
|
2015
|
+
|
|
2016
|
+
# Get or create metadata
|
|
2017
|
+
metadata = node.get("metadata")
|
|
2018
|
+
if metadata is None:
|
|
2019
|
+
metadata = {}
|
|
2020
|
+
node["metadata"] = metadata
|
|
2021
|
+
|
|
2022
|
+
# Build verification result entry
|
|
2023
|
+
verification_entry: Dict[str, Any] = {
|
|
2024
|
+
"result": result_upper,
|
|
2025
|
+
"timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
|
2026
|
+
}
|
|
2027
|
+
|
|
2028
|
+
if command:
|
|
2029
|
+
verification_entry["command"] = command.strip()
|
|
2030
|
+
|
|
2031
|
+
if output:
|
|
2032
|
+
# Truncate output if very long
|
|
2033
|
+
max_output_len = MAX_STRING_LENGTH
|
|
2034
|
+
output_text = output.strip()
|
|
2035
|
+
if len(output_text) > max_output_len:
|
|
2036
|
+
output_text = output_text[:max_output_len] + "\n... (truncated)"
|
|
2037
|
+
verification_entry["output"] = output_text
|
|
2038
|
+
|
|
2039
|
+
if issues:
|
|
2040
|
+
verification_entry["issues"] = issues.strip()
|
|
2041
|
+
|
|
2042
|
+
if notes:
|
|
2043
|
+
verification_entry["notes"] = notes.strip()
|
|
2044
|
+
|
|
2045
|
+
# Add to verification history (keep last N entries)
|
|
2046
|
+
verification_history = metadata.get("verification_history", [])
|
|
2047
|
+
if not isinstance(verification_history, list):
|
|
2048
|
+
verification_history = []
|
|
2049
|
+
|
|
2050
|
+
verification_history.append(verification_entry)
|
|
2051
|
+
|
|
2052
|
+
# Keep only last 10 entries
|
|
2053
|
+
if len(verification_history) > 10:
|
|
2054
|
+
verification_history = verification_history[-10:]
|
|
2055
|
+
|
|
2056
|
+
metadata["verification_history"] = verification_history
|
|
2057
|
+
|
|
2058
|
+
# Update latest result fields for quick access
|
|
2059
|
+
metadata["last_result"] = result_upper
|
|
2060
|
+
metadata["last_verified_at"] = verification_entry["timestamp"]
|
|
2061
|
+
|
|
2062
|
+
return True, None
|
|
2063
|
+
|
|
2064
|
+
|
|
2065
|
+
def execute_verification(
|
|
2066
|
+
spec_data: Dict[str, Any],
|
|
2067
|
+
verify_id: str,
|
|
2068
|
+
record: bool = False,
|
|
2069
|
+
timeout: int = 300,
|
|
2070
|
+
cwd: Optional[str] = None,
|
|
2071
|
+
) -> Dict[str, Any]:
|
|
2072
|
+
"""
|
|
2073
|
+
Execute verification command and capture results.
|
|
2074
|
+
|
|
2075
|
+
Runs the verification command defined in a verify node's metadata
|
|
2076
|
+
and captures output, exit code, and result status.
|
|
2077
|
+
|
|
2078
|
+
Args:
|
|
2079
|
+
spec_data: The loaded spec data dict.
|
|
2080
|
+
verify_id: Verification node ID (e.g., verify-1-1).
|
|
2081
|
+
record: If True, automatically record result to spec using add_verification().
|
|
2082
|
+
timeout: Command timeout in seconds (default: 300).
|
|
2083
|
+
cwd: Working directory for command execution (default: current directory).
|
|
2084
|
+
|
|
2085
|
+
Returns:
|
|
2086
|
+
Dict with execution results:
|
|
2087
|
+
- success: Whether execution completed (not result status)
|
|
2088
|
+
- spec_id: The specification ID
|
|
2089
|
+
- verify_id: The verification ID
|
|
2090
|
+
- result: Execution result (PASSED, FAILED, PARTIAL)
|
|
2091
|
+
- command: Command that was executed
|
|
2092
|
+
- output: Combined stdout/stderr output
|
|
2093
|
+
- exit_code: Command exit code
|
|
2094
|
+
- recorded: Whether result was recorded to spec
|
|
2095
|
+
- error: Error message if execution failed
|
|
2096
|
+
|
|
2097
|
+
Example:
|
|
2098
|
+
>>> result = execute_verification(spec_data, "verify-1-1", record=True)
|
|
2099
|
+
>>> if result["success"]:
|
|
2100
|
+
... print(f"Verification {result['result']}: {result['exit_code']}")
|
|
2101
|
+
"""
|
|
2102
|
+
import subprocess
|
|
2103
|
+
|
|
2104
|
+
response: Dict[str, Any] = {
|
|
2105
|
+
"success": False,
|
|
2106
|
+
"spec_id": spec_data.get("spec_id", "unknown"),
|
|
2107
|
+
"verify_id": verify_id,
|
|
2108
|
+
"result": None,
|
|
2109
|
+
"command": None,
|
|
2110
|
+
"output": None,
|
|
2111
|
+
"exit_code": None,
|
|
2112
|
+
"recorded": False,
|
|
2113
|
+
"error": None,
|
|
2114
|
+
}
|
|
2115
|
+
|
|
2116
|
+
# Get hierarchy
|
|
2117
|
+
hierarchy = spec_data.get("hierarchy")
|
|
2118
|
+
if not hierarchy or not isinstance(hierarchy, dict):
|
|
2119
|
+
response["error"] = "Invalid spec data: missing or invalid hierarchy"
|
|
2120
|
+
return response
|
|
2121
|
+
|
|
2122
|
+
# Find the verify node
|
|
2123
|
+
node = hierarchy.get(verify_id)
|
|
2124
|
+
if node is None:
|
|
2125
|
+
response["error"] = f"Verification node '{verify_id}' not found"
|
|
2126
|
+
return response
|
|
2127
|
+
|
|
2128
|
+
# Validate node type
|
|
2129
|
+
node_type = node.get("type")
|
|
2130
|
+
if node_type != "verify":
|
|
2131
|
+
response["error"] = (
|
|
2132
|
+
f"Node '{verify_id}' is type '{node_type}', expected 'verify'"
|
|
2133
|
+
)
|
|
2134
|
+
return response
|
|
2135
|
+
|
|
2136
|
+
# Get command from metadata
|
|
2137
|
+
metadata = node.get("metadata", {})
|
|
2138
|
+
command = metadata.get("command")
|
|
2139
|
+
|
|
2140
|
+
if not command:
|
|
2141
|
+
response["error"] = f"No command defined in verify node '{verify_id}' metadata"
|
|
2142
|
+
return response
|
|
2143
|
+
|
|
2144
|
+
response["command"] = command
|
|
2145
|
+
|
|
2146
|
+
# Execute the command
|
|
2147
|
+
try:
|
|
2148
|
+
proc = subprocess.run(
|
|
2149
|
+
command,
|
|
2150
|
+
shell=True,
|
|
2151
|
+
capture_output=True,
|
|
2152
|
+
text=True,
|
|
2153
|
+
timeout=timeout,
|
|
2154
|
+
cwd=cwd,
|
|
2155
|
+
)
|
|
2156
|
+
|
|
2157
|
+
exit_code = proc.returncode
|
|
2158
|
+
stdout = proc.stdout or ""
|
|
2159
|
+
stderr = proc.stderr or ""
|
|
2160
|
+
|
|
2161
|
+
# Combine output
|
|
2162
|
+
output_parts = []
|
|
2163
|
+
if stdout.strip():
|
|
2164
|
+
output_parts.append(stdout.strip())
|
|
2165
|
+
if stderr.strip():
|
|
2166
|
+
output_parts.append(f"[stderr]\n{stderr.strip()}")
|
|
2167
|
+
output = "\n".join(output_parts) if output_parts else "(no output)"
|
|
2168
|
+
|
|
2169
|
+
# Truncate if too long
|
|
2170
|
+
if len(output) > MAX_STRING_LENGTH:
|
|
2171
|
+
output = output[:MAX_STRING_LENGTH] + "\n... (truncated)"
|
|
2172
|
+
|
|
2173
|
+
response["exit_code"] = exit_code
|
|
2174
|
+
response["output"] = output
|
|
2175
|
+
|
|
2176
|
+
# Determine result based on exit code
|
|
2177
|
+
if exit_code == 0:
|
|
2178
|
+
result = "PASSED"
|
|
2179
|
+
else:
|
|
2180
|
+
result = "FAILED"
|
|
2181
|
+
|
|
2182
|
+
response["result"] = result
|
|
2183
|
+
response["success"] = True
|
|
2184
|
+
|
|
2185
|
+
# Optionally record result to spec
|
|
2186
|
+
if record:
|
|
2187
|
+
record_success, record_error = add_verification(
|
|
2188
|
+
spec_data=spec_data,
|
|
2189
|
+
verify_id=verify_id,
|
|
2190
|
+
result=result,
|
|
2191
|
+
command=command,
|
|
2192
|
+
output=output,
|
|
2193
|
+
)
|
|
2194
|
+
if record_success:
|
|
2195
|
+
response["recorded"] = True
|
|
2196
|
+
else:
|
|
2197
|
+
response["recorded"] = False
|
|
2198
|
+
# Don't fail the whole operation, just note the recording failed
|
|
2199
|
+
if response.get("error"):
|
|
2200
|
+
response["error"] += f"; Recording failed: {record_error}"
|
|
2201
|
+
else:
|
|
2202
|
+
response["error"] = f"Recording failed: {record_error}"
|
|
2203
|
+
|
|
2204
|
+
except subprocess.TimeoutExpired:
|
|
2205
|
+
response["error"] = f"Command timed out after {timeout} seconds"
|
|
2206
|
+
response["result"] = "FAILED"
|
|
2207
|
+
response["exit_code"] = -1
|
|
2208
|
+
response["output"] = f"Command timed out after {timeout} seconds"
|
|
2209
|
+
|
|
2210
|
+
except subprocess.SubprocessError as e:
|
|
2211
|
+
response["error"] = f"Command execution failed: {e}"
|
|
2212
|
+
response["result"] = "FAILED"
|
|
2213
|
+
|
|
2214
|
+
except Exception as e:
|
|
2215
|
+
response["error"] = f"Unexpected error: {e}"
|
|
2216
|
+
response["result"] = "FAILED"
|
|
2217
|
+
|
|
2218
|
+
return response
|
|
2219
|
+
|
|
2220
|
+
|
|
2221
|
+
def format_verification_summary(
|
|
2222
|
+
verification_data: Dict[str, Any] | List[Dict[str, Any]],
|
|
2223
|
+
) -> Dict[str, Any]:
|
|
2224
|
+
"""
|
|
2225
|
+
Format verification results into a human-readable summary.
|
|
2226
|
+
|
|
2227
|
+
Processes verification results (from execute_verification or JSON input)
|
|
2228
|
+
and produces a structured summary with counts and formatted text.
|
|
2229
|
+
|
|
2230
|
+
Args:
|
|
2231
|
+
verification_data: Either:
|
|
2232
|
+
- A single verification result dict (from execute_verification)
|
|
2233
|
+
- A list of verification result dicts
|
|
2234
|
+
- A dict with "verifications" key containing a list
|
|
2235
|
+
|
|
2236
|
+
Returns:
|
|
2237
|
+
Dict with formatted summary:
|
|
2238
|
+
- summary: Human-readable summary text
|
|
2239
|
+
- total_verifications: Total number of verifications
|
|
2240
|
+
- passed: Number of passed verifications
|
|
2241
|
+
- failed: Number of failed verifications
|
|
2242
|
+
- partial: Number of partial verifications
|
|
2243
|
+
- results: List of individual result summaries
|
|
2244
|
+
|
|
2245
|
+
Example:
|
|
2246
|
+
>>> results = [
|
|
2247
|
+
... execute_verification(spec_data, "verify-1"),
|
|
2248
|
+
... execute_verification(spec_data, "verify-2"),
|
|
2249
|
+
... ]
|
|
2250
|
+
>>> summary = format_verification_summary(results)
|
|
2251
|
+
>>> print(summary["summary"])
|
|
2252
|
+
"""
|
|
2253
|
+
# Normalize input to a list of verification results
|
|
2254
|
+
verifications: List[Dict[str, Any]] = []
|
|
2255
|
+
|
|
2256
|
+
if isinstance(verification_data, list):
|
|
2257
|
+
verifications = verification_data
|
|
2258
|
+
elif isinstance(verification_data, dict):
|
|
2259
|
+
if "verifications" in verification_data:
|
|
2260
|
+
verifications = verification_data.get("verifications", [])
|
|
2261
|
+
else:
|
|
2262
|
+
# Single verification result
|
|
2263
|
+
verifications = [verification_data]
|
|
2264
|
+
|
|
2265
|
+
# Count results by type
|
|
2266
|
+
passed = 0
|
|
2267
|
+
failed = 0
|
|
2268
|
+
partial = 0
|
|
2269
|
+
results: List[Dict[str, Any]] = []
|
|
2270
|
+
|
|
2271
|
+
for v in verifications:
|
|
2272
|
+
if not isinstance(v, dict):
|
|
2273
|
+
continue
|
|
2274
|
+
|
|
2275
|
+
result = (v.get("result") or "").upper()
|
|
2276
|
+
verify_id = v.get("verify_id", "unknown")
|
|
2277
|
+
command = v.get("command", "")
|
|
2278
|
+
output = v.get("output", "")
|
|
2279
|
+
error = v.get("error")
|
|
2280
|
+
|
|
2281
|
+
# Count by result type
|
|
2282
|
+
if result == "PASSED":
|
|
2283
|
+
passed += 1
|
|
2284
|
+
status_icon = "✓"
|
|
2285
|
+
elif result == "FAILED":
|
|
2286
|
+
failed += 1
|
|
2287
|
+
status_icon = "✗"
|
|
2288
|
+
elif result == "PARTIAL":
|
|
2289
|
+
partial += 1
|
|
2290
|
+
status_icon = "◐"
|
|
2291
|
+
else:
|
|
2292
|
+
status_icon = "?"
|
|
2293
|
+
|
|
2294
|
+
# Build individual result summary
|
|
2295
|
+
result_entry: Dict[str, Any] = {
|
|
2296
|
+
"verify_id": verify_id,
|
|
2297
|
+
"result": result or "UNKNOWN",
|
|
2298
|
+
"status_icon": status_icon,
|
|
2299
|
+
"command": command,
|
|
2300
|
+
}
|
|
2301
|
+
|
|
2302
|
+
if error:
|
|
2303
|
+
result_entry["error"] = error
|
|
2304
|
+
|
|
2305
|
+
# Truncate output for summary
|
|
2306
|
+
if output:
|
|
2307
|
+
output_preview = output[:200].strip()
|
|
2308
|
+
if len(output) > 200:
|
|
2309
|
+
output_preview += "..."
|
|
2310
|
+
result_entry["output_preview"] = output_preview
|
|
2311
|
+
|
|
2312
|
+
results.append(result_entry)
|
|
2313
|
+
|
|
2314
|
+
# Calculate totals
|
|
2315
|
+
total = len(results)
|
|
2316
|
+
|
|
2317
|
+
# Build summary text
|
|
2318
|
+
summary_lines = []
|
|
2319
|
+
summary_lines.append(f"Verification Summary: {total} total")
|
|
2320
|
+
summary_lines.append(f" ✓ Passed: {passed}")
|
|
2321
|
+
summary_lines.append(f" ✗ Failed: {failed}")
|
|
2322
|
+
if partial > 0:
|
|
2323
|
+
summary_lines.append(f" ◐ Partial: {partial}")
|
|
2324
|
+
summary_lines.append("")
|
|
2325
|
+
|
|
2326
|
+
# Add individual results
|
|
2327
|
+
if results:
|
|
2328
|
+
summary_lines.append("Results:")
|
|
2329
|
+
for r in results:
|
|
2330
|
+
icon = r["status_icon"]
|
|
2331
|
+
vid = r["verify_id"]
|
|
2332
|
+
res = r["result"]
|
|
2333
|
+
cmd = r.get("command", "")
|
|
2334
|
+
|
|
2335
|
+
line = f" {icon} {vid}: {res}"
|
|
2336
|
+
if cmd:
|
|
2337
|
+
# Truncate command for display
|
|
2338
|
+
cmd_display = cmd[:50]
|
|
2339
|
+
if len(cmd) > 50:
|
|
2340
|
+
cmd_display += "..."
|
|
2341
|
+
line += f" ({cmd_display})"
|
|
2342
|
+
|
|
2343
|
+
summary_lines.append(line)
|
|
2344
|
+
|
|
2345
|
+
if r.get("error"):
|
|
2346
|
+
summary_lines.append(f" Error: {r['error']}")
|
|
2347
|
+
|
|
2348
|
+
summary_text = "\n".join(summary_lines)
|
|
2349
|
+
|
|
2350
|
+
return {
|
|
2351
|
+
"summary": summary_text,
|
|
2352
|
+
"total_verifications": total,
|
|
2353
|
+
"passed": passed,
|
|
2354
|
+
"failed": failed,
|
|
2355
|
+
"partial": partial,
|
|
2356
|
+
"results": results,
|
|
2357
|
+
}
|