foundry-mcp 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -0
- foundry_mcp/cli/__init__.py +80 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +633 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +652 -0
- foundry_mcp/cli/commands/session.py +479 -0
- foundry_mcp/cli/commands/specs.py +856 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +259 -0
- foundry_mcp/cli/flags.py +266 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +850 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1636 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/feature_flags.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/journal.py +694 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1350 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +123 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +317 -0
- foundry_mcp/core/prometheus.py +577 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +546 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +511 -0
- foundry_mcp/core/prompts/plan_review.py +623 -0
- foundry_mcp/core/providers/__init__.py +225 -0
- foundry_mcp/core/providers/base.py +476 -0
- foundry_mcp/core/providers/claude.py +460 -0
- foundry_mcp/core/providers/codex.py +619 -0
- foundry_mcp/core/providers/cursor_agent.py +642 -0
- foundry_mcp/core/providers/detectors.py +488 -0
- foundry_mcp/core/providers/gemini.py +405 -0
- foundry_mcp/core/providers/opencode.py +616 -0
- foundry_mcp/core/providers/opencode_wrapper.js +302 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +729 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +934 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +1650 -0
- foundry_mcp/core/task.py +1289 -0
- foundry_mcp/core/testing.py +450 -0
- foundry_mcp/core/validation.py +2081 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +234 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +289 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +174 -0
- foundry_mcp/dashboard/views/overview.py +160 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/sdd-spec-schema.json +386 -0
- foundry_mcp/server.py +164 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +71 -0
- foundry_mcp/tools/unified/authoring.py +1487 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +198 -0
- foundry_mcp/tools/unified/environment.py +939 -0
- foundry_mcp/tools/unified/error.py +462 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +632 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +745 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +629 -0
- foundry_mcp/tools/unified/review.py +685 -0
- foundry_mcp/tools/unified/review_helpers.py +299 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +580 -0
- foundry_mcp/tools/unified/spec.py +808 -0
- foundry_mcp/tools/unified/task.py +2202 -0
- foundry_mcp/tools/unified/test.py +370 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.3.3.dist-info/METADATA +337 -0
- foundry_mcp-0.3.3.dist-info/RECORD +135 -0
- foundry_mcp-0.3.3.dist-info/WHEEL +4 -0
- foundry_mcp-0.3.3.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.3.3.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,982 @@
|
|
|
1
|
+
"""Validation commands for SDD CLI.
|
|
2
|
+
|
|
3
|
+
Provides commands for spec validation, auto-fix, statistics, and reporting.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import time
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
import click
|
|
10
|
+
|
|
11
|
+
from foundry_mcp.cli.logging import cli_command, get_cli_logger
|
|
12
|
+
from foundry_mcp.cli.output import emit_error, emit_success
|
|
13
|
+
from foundry_mcp.cli.registry import get_context
|
|
14
|
+
from foundry_mcp.cli.resilience import (
|
|
15
|
+
MEDIUM_TIMEOUT,
|
|
16
|
+
with_sync_timeout,
|
|
17
|
+
handle_keyboard_interrupt,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
logger = get_cli_logger()
|
|
21
|
+
from foundry_mcp.core.spec import load_spec, find_spec_file
|
|
22
|
+
from foundry_mcp.core.validation import (
|
|
23
|
+
apply_fixes,
|
|
24
|
+
calculate_stats,
|
|
25
|
+
get_fix_actions,
|
|
26
|
+
validate_spec,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@click.group("validate")
|
|
31
|
+
def validate_group() -> None:
|
|
32
|
+
"""Spec validation and fix commands."""
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@validate_group.command("check")
|
|
37
|
+
@click.argument("spec_id")
|
|
38
|
+
@click.pass_context
|
|
39
|
+
@cli_command("check")
|
|
40
|
+
@handle_keyboard_interrupt()
|
|
41
|
+
@with_sync_timeout(MEDIUM_TIMEOUT, "Validation check timed out")
|
|
42
|
+
def validate_check_cmd(ctx: click.Context, spec_id: str) -> None:
|
|
43
|
+
"""Validate a specification and report diagnostics.
|
|
44
|
+
|
|
45
|
+
SPEC_ID is the specification identifier.
|
|
46
|
+
"""
|
|
47
|
+
cli_ctx = get_context(ctx)
|
|
48
|
+
specs_dir = cli_ctx.specs_dir
|
|
49
|
+
|
|
50
|
+
if specs_dir is None:
|
|
51
|
+
emit_error(
|
|
52
|
+
"No specs directory found",
|
|
53
|
+
code="VALIDATION_ERROR",
|
|
54
|
+
error_type="validation",
|
|
55
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
56
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
57
|
+
)
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
# Load spec
|
|
61
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
62
|
+
if spec_data is None:
|
|
63
|
+
emit_error(
|
|
64
|
+
f"Specification not found: {spec_id}",
|
|
65
|
+
code="SPEC_NOT_FOUND",
|
|
66
|
+
error_type="not_found",
|
|
67
|
+
remediation="Verify the spec ID exists using: sdd specs list",
|
|
68
|
+
details={"spec_id": spec_id},
|
|
69
|
+
)
|
|
70
|
+
return
|
|
71
|
+
|
|
72
|
+
# Run validation
|
|
73
|
+
result = validate_spec(spec_data)
|
|
74
|
+
|
|
75
|
+
# Format diagnostics for output
|
|
76
|
+
diagnostics = []
|
|
77
|
+
for diag in result.diagnostics:
|
|
78
|
+
diagnostics.append(
|
|
79
|
+
{
|
|
80
|
+
"code": diag.code,
|
|
81
|
+
"message": diag.message,
|
|
82
|
+
"severity": diag.severity,
|
|
83
|
+
"category": diag.category,
|
|
84
|
+
"location": diag.location,
|
|
85
|
+
"suggested_fix": diag.suggested_fix,
|
|
86
|
+
"auto_fixable": diag.auto_fixable,
|
|
87
|
+
}
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
emit_success(
|
|
91
|
+
{
|
|
92
|
+
"spec_id": result.spec_id,
|
|
93
|
+
"is_valid": result.is_valid,
|
|
94
|
+
"error_count": result.error_count,
|
|
95
|
+
"warning_count": result.warning_count,
|
|
96
|
+
"info_count": result.info_count,
|
|
97
|
+
"diagnostics": diagnostics,
|
|
98
|
+
}
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@validate_group.command("fix")
|
|
103
|
+
@click.argument("spec_id")
|
|
104
|
+
@click.option("--dry-run", is_flag=True, help="Preview fixes without applying.")
|
|
105
|
+
@click.option("--no-backup", is_flag=True, help="Skip creating backup file.")
|
|
106
|
+
@click.pass_context
|
|
107
|
+
@cli_command("fix")
|
|
108
|
+
@handle_keyboard_interrupt()
|
|
109
|
+
@with_sync_timeout(MEDIUM_TIMEOUT, "Validation fix timed out")
|
|
110
|
+
def validate_fix_cmd(
|
|
111
|
+
ctx: click.Context,
|
|
112
|
+
spec_id: str,
|
|
113
|
+
dry_run: bool,
|
|
114
|
+
no_backup: bool,
|
|
115
|
+
) -> None:
|
|
116
|
+
"""Apply auto-fixes to a specification.
|
|
117
|
+
|
|
118
|
+
SPEC_ID is the specification identifier.
|
|
119
|
+
"""
|
|
120
|
+
cli_ctx = get_context(ctx)
|
|
121
|
+
specs_dir = cli_ctx.specs_dir
|
|
122
|
+
|
|
123
|
+
if specs_dir is None:
|
|
124
|
+
emit_error(
|
|
125
|
+
"No specs directory found",
|
|
126
|
+
code="VALIDATION_ERROR",
|
|
127
|
+
error_type="validation",
|
|
128
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
129
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
130
|
+
)
|
|
131
|
+
return
|
|
132
|
+
|
|
133
|
+
# Find spec path
|
|
134
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
135
|
+
if spec_path is None:
|
|
136
|
+
emit_error(
|
|
137
|
+
f"Specification not found: {spec_id}",
|
|
138
|
+
code="SPEC_NOT_FOUND",
|
|
139
|
+
error_type="not_found",
|
|
140
|
+
remediation="Verify the spec ID exists using: sdd specs list",
|
|
141
|
+
details={"spec_id": spec_id},
|
|
142
|
+
)
|
|
143
|
+
return
|
|
144
|
+
|
|
145
|
+
# Load spec
|
|
146
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
147
|
+
if spec_data is None:
|
|
148
|
+
emit_error(
|
|
149
|
+
f"Failed to load specification: {spec_id}",
|
|
150
|
+
code="INTERNAL_ERROR",
|
|
151
|
+
error_type="internal",
|
|
152
|
+
remediation="Check that the spec file is valid JSON",
|
|
153
|
+
details={"spec_id": spec_id},
|
|
154
|
+
)
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
# Validate to get diagnostics
|
|
158
|
+
result = validate_spec(spec_data)
|
|
159
|
+
|
|
160
|
+
# Generate fix actions
|
|
161
|
+
actions = get_fix_actions(result, spec_data)
|
|
162
|
+
|
|
163
|
+
if not actions:
|
|
164
|
+
emit_success(
|
|
165
|
+
{
|
|
166
|
+
"spec_id": spec_id,
|
|
167
|
+
"applied_count": 0,
|
|
168
|
+
"skipped_count": 0,
|
|
169
|
+
"message": "No auto-fixable issues found",
|
|
170
|
+
}
|
|
171
|
+
)
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
# Apply fixes
|
|
175
|
+
report = apply_fixes(
|
|
176
|
+
actions,
|
|
177
|
+
str(spec_path),
|
|
178
|
+
dry_run=dry_run,
|
|
179
|
+
create_backup=not no_backup,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
# Format applied/skipped actions
|
|
183
|
+
applied = [
|
|
184
|
+
{"id": a.id, "description": a.description, "category": a.category}
|
|
185
|
+
for a in report.applied_actions
|
|
186
|
+
]
|
|
187
|
+
skipped = [
|
|
188
|
+
{"id": a.id, "description": a.description, "category": a.category}
|
|
189
|
+
for a in report.skipped_actions
|
|
190
|
+
]
|
|
191
|
+
|
|
192
|
+
emit_success(
|
|
193
|
+
{
|
|
194
|
+
"spec_id": spec_id,
|
|
195
|
+
"dry_run": dry_run,
|
|
196
|
+
"applied_count": len(applied),
|
|
197
|
+
"skipped_count": len(skipped),
|
|
198
|
+
"applied_actions": applied,
|
|
199
|
+
"skipped_actions": skipped,
|
|
200
|
+
"backup_path": report.backup_path,
|
|
201
|
+
}
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
@validate_group.command("stats")
|
|
206
|
+
@click.argument("spec_id")
|
|
207
|
+
@click.pass_context
|
|
208
|
+
@cli_command("stats")
|
|
209
|
+
@handle_keyboard_interrupt()
|
|
210
|
+
@with_sync_timeout(MEDIUM_TIMEOUT, "Statistics calculation timed out")
|
|
211
|
+
def validate_stats_cmd(ctx: click.Context, spec_id: str) -> None:
|
|
212
|
+
"""Get statistics for a specification.
|
|
213
|
+
|
|
214
|
+
SPEC_ID is the specification identifier.
|
|
215
|
+
"""
|
|
216
|
+
cli_ctx = get_context(ctx)
|
|
217
|
+
specs_dir = cli_ctx.specs_dir
|
|
218
|
+
|
|
219
|
+
if specs_dir is None:
|
|
220
|
+
emit_error(
|
|
221
|
+
"No specs directory found",
|
|
222
|
+
code="VALIDATION_ERROR",
|
|
223
|
+
error_type="validation",
|
|
224
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
225
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
226
|
+
)
|
|
227
|
+
return
|
|
228
|
+
|
|
229
|
+
# Find spec path
|
|
230
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
231
|
+
if spec_path is None:
|
|
232
|
+
emit_error(
|
|
233
|
+
f"Specification not found: {spec_id}",
|
|
234
|
+
code="SPEC_NOT_FOUND",
|
|
235
|
+
error_type="not_found",
|
|
236
|
+
remediation="Verify the spec ID exists using: sdd specs list",
|
|
237
|
+
details={"spec_id": spec_id},
|
|
238
|
+
)
|
|
239
|
+
return
|
|
240
|
+
|
|
241
|
+
# Load spec
|
|
242
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
243
|
+
if spec_data is None:
|
|
244
|
+
emit_error(
|
|
245
|
+
f"Failed to load specification: {spec_id}",
|
|
246
|
+
code="INTERNAL_ERROR",
|
|
247
|
+
error_type="internal",
|
|
248
|
+
remediation="Check that the spec file is valid JSON",
|
|
249
|
+
details={"spec_id": spec_id},
|
|
250
|
+
)
|
|
251
|
+
return
|
|
252
|
+
|
|
253
|
+
# Calculate stats
|
|
254
|
+
stats = calculate_stats(spec_data, str(spec_path))
|
|
255
|
+
|
|
256
|
+
emit_success(
|
|
257
|
+
{
|
|
258
|
+
"spec_id": stats.spec_id,
|
|
259
|
+
"title": stats.title,
|
|
260
|
+
"version": stats.version,
|
|
261
|
+
"status": stats.status,
|
|
262
|
+
"totals": stats.totals,
|
|
263
|
+
"status_counts": stats.status_counts,
|
|
264
|
+
"max_depth": stats.max_depth,
|
|
265
|
+
"avg_tasks_per_phase": stats.avg_tasks_per_phase,
|
|
266
|
+
"verification_coverage": round(stats.verification_coverage * 100, 1),
|
|
267
|
+
"progress": round(stats.progress * 100, 1),
|
|
268
|
+
"file_size_kb": round(stats.file_size_kb, 2),
|
|
269
|
+
}
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
@validate_group.command("report")
|
|
274
|
+
@click.argument("spec_id")
|
|
275
|
+
@click.option(
|
|
276
|
+
"--sections",
|
|
277
|
+
"-s",
|
|
278
|
+
default="all",
|
|
279
|
+
help="Sections to include: validation,stats,health,all",
|
|
280
|
+
)
|
|
281
|
+
@click.pass_context
|
|
282
|
+
@cli_command("report")
|
|
283
|
+
@handle_keyboard_interrupt()
|
|
284
|
+
@with_sync_timeout(MEDIUM_TIMEOUT, "Report generation timed out")
|
|
285
|
+
def validate_report_cmd(
|
|
286
|
+
ctx: click.Context,
|
|
287
|
+
spec_id: str,
|
|
288
|
+
sections: str,
|
|
289
|
+
) -> None:
|
|
290
|
+
"""Generate a comprehensive report for a specification.
|
|
291
|
+
|
|
292
|
+
SPEC_ID is the specification identifier.
|
|
293
|
+
|
|
294
|
+
Combines validation, statistics, and health assessment into
|
|
295
|
+
a single report suitable for review and documentation.
|
|
296
|
+
"""
|
|
297
|
+
start_time = time.perf_counter()
|
|
298
|
+
cli_ctx = get_context(ctx)
|
|
299
|
+
specs_dir = cli_ctx.specs_dir
|
|
300
|
+
|
|
301
|
+
if specs_dir is None:
|
|
302
|
+
emit_error(
|
|
303
|
+
"No specs directory found",
|
|
304
|
+
code="VALIDATION_ERROR",
|
|
305
|
+
error_type="validation",
|
|
306
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
307
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
308
|
+
)
|
|
309
|
+
return
|
|
310
|
+
|
|
311
|
+
# Find spec path
|
|
312
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
313
|
+
if spec_path is None:
|
|
314
|
+
emit_error(
|
|
315
|
+
f"Specification not found: {spec_id}",
|
|
316
|
+
code="SPEC_NOT_FOUND",
|
|
317
|
+
error_type="not_found",
|
|
318
|
+
remediation="Verify the spec ID exists using: sdd specs list",
|
|
319
|
+
details={"spec_id": spec_id},
|
|
320
|
+
)
|
|
321
|
+
return
|
|
322
|
+
|
|
323
|
+
# Load spec
|
|
324
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
325
|
+
if spec_data is None:
|
|
326
|
+
emit_error(
|
|
327
|
+
f"Failed to load specification: {spec_id}",
|
|
328
|
+
code="INTERNAL_ERROR",
|
|
329
|
+
error_type="internal",
|
|
330
|
+
remediation="Check that the spec file is valid JSON",
|
|
331
|
+
details={"spec_id": spec_id},
|
|
332
|
+
)
|
|
333
|
+
return
|
|
334
|
+
|
|
335
|
+
# Parse sections
|
|
336
|
+
requested_sections = set()
|
|
337
|
+
if sections.lower() == "all":
|
|
338
|
+
requested_sections = {"validation", "stats", "health"}
|
|
339
|
+
else:
|
|
340
|
+
for s in sections.lower().split(","):
|
|
341
|
+
s = s.strip()
|
|
342
|
+
if s in ("validation", "stats", "health"):
|
|
343
|
+
requested_sections.add(s)
|
|
344
|
+
|
|
345
|
+
output: dict = {
|
|
346
|
+
"spec_id": spec_id,
|
|
347
|
+
"sections": list(requested_sections),
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
# Validation section
|
|
351
|
+
if "validation" in requested_sections or "health" in requested_sections:
|
|
352
|
+
result = validate_spec(spec_data)
|
|
353
|
+
diagnostics = []
|
|
354
|
+
for diag in result.diagnostics:
|
|
355
|
+
diagnostics.append(
|
|
356
|
+
{
|
|
357
|
+
"code": diag.code,
|
|
358
|
+
"message": diag.message,
|
|
359
|
+
"severity": diag.severity,
|
|
360
|
+
"category": diag.category,
|
|
361
|
+
"location": diag.location,
|
|
362
|
+
"suggested_fix": diag.suggested_fix,
|
|
363
|
+
"auto_fixable": diag.auto_fixable,
|
|
364
|
+
}
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
if "validation" in requested_sections:
|
|
368
|
+
output["validation"] = {
|
|
369
|
+
"is_valid": result.is_valid,
|
|
370
|
+
"error_count": result.error_count,
|
|
371
|
+
"warning_count": result.warning_count,
|
|
372
|
+
"info_count": result.info_count,
|
|
373
|
+
"diagnostics": diagnostics,
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
# Stats section
|
|
377
|
+
if "stats" in requested_sections or "health" in requested_sections:
|
|
378
|
+
stats = calculate_stats(spec_data, str(spec_path))
|
|
379
|
+
|
|
380
|
+
if "stats" in requested_sections:
|
|
381
|
+
output["statistics"] = {
|
|
382
|
+
"title": stats.title,
|
|
383
|
+
"version": stats.version,
|
|
384
|
+
"status": stats.status,
|
|
385
|
+
"totals": stats.totals,
|
|
386
|
+
"status_counts": stats.status_counts,
|
|
387
|
+
"max_depth": stats.max_depth,
|
|
388
|
+
"avg_tasks_per_phase": stats.avg_tasks_per_phase,
|
|
389
|
+
"verification_coverage": round(stats.verification_coverage * 100, 1),
|
|
390
|
+
"progress": stats.progress,
|
|
391
|
+
"file_size_kb": round(stats.file_size_kb, 2),
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
# Health section
|
|
395
|
+
if "health" in requested_sections:
|
|
396
|
+
health_score = 100
|
|
397
|
+
health_issues = []
|
|
398
|
+
|
|
399
|
+
# Validation impact
|
|
400
|
+
if "validation" in output:
|
|
401
|
+
validation = output["validation"]
|
|
402
|
+
if not validation["is_valid"]:
|
|
403
|
+
error_count = validation["error_count"]
|
|
404
|
+
health_issues.append(f"Validation errors: {error_count}")
|
|
405
|
+
health_score -= min(30, error_count * 10)
|
|
406
|
+
if validation["warning_count"] > 5:
|
|
407
|
+
health_issues.append(
|
|
408
|
+
f"High warning count: {validation['warning_count']}"
|
|
409
|
+
)
|
|
410
|
+
health_score -= min(20, validation["warning_count"] * 2)
|
|
411
|
+
|
|
412
|
+
# Stats impact
|
|
413
|
+
if "stats" in output:
|
|
414
|
+
statistics = output["statistics"]
|
|
415
|
+
if statistics["verification_coverage"] < 50:
|
|
416
|
+
health_issues.append(
|
|
417
|
+
f"Low verification coverage: {statistics['verification_coverage']}%"
|
|
418
|
+
)
|
|
419
|
+
health_score -= 10
|
|
420
|
+
|
|
421
|
+
health_score = max(0, health_score)
|
|
422
|
+
|
|
423
|
+
if health_score >= 80:
|
|
424
|
+
health_status = "healthy"
|
|
425
|
+
elif health_score >= 50:
|
|
426
|
+
health_status = "needs_attention"
|
|
427
|
+
else:
|
|
428
|
+
health_status = "critical"
|
|
429
|
+
|
|
430
|
+
output["health"] = {
|
|
431
|
+
"score": health_score,
|
|
432
|
+
"status": health_status,
|
|
433
|
+
"issues": health_issues,
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
# Summary
|
|
437
|
+
output["summary"] = {
|
|
438
|
+
"spec_id": spec_id,
|
|
439
|
+
"is_valid": output.get("validation", {}).get("is_valid", True),
|
|
440
|
+
"error_count": output.get("validation", {}).get("error_count", 0),
|
|
441
|
+
"warning_count": output.get("validation", {}).get("warning_count", 0),
|
|
442
|
+
"health_score": output.get("health", {}).get("score", 100),
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
446
|
+
output["telemetry"] = {"duration_ms": round(duration_ms, 2)}
|
|
447
|
+
|
|
448
|
+
emit_success(output)
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
@validate_group.command("analyze-deps")
|
|
452
|
+
@click.argument("spec_id")
|
|
453
|
+
@click.option(
|
|
454
|
+
"--bottleneck-threshold",
|
|
455
|
+
"-t",
|
|
456
|
+
type=int,
|
|
457
|
+
default=3,
|
|
458
|
+
help="Minimum tasks blocked to flag as bottleneck.",
|
|
459
|
+
)
|
|
460
|
+
@click.option(
|
|
461
|
+
"--limit",
|
|
462
|
+
type=int,
|
|
463
|
+
default=100,
|
|
464
|
+
help="Maximum items to return per section.",
|
|
465
|
+
)
|
|
466
|
+
@click.pass_context
|
|
467
|
+
@cli_command("analyze-deps")
|
|
468
|
+
@handle_keyboard_interrupt()
|
|
469
|
+
@with_sync_timeout(MEDIUM_TIMEOUT, "Dependency analysis timed out")
|
|
470
|
+
def validate_analyze_deps_cmd(
|
|
471
|
+
ctx: click.Context,
|
|
472
|
+
spec_id: str,
|
|
473
|
+
bottleneck_threshold: int,
|
|
474
|
+
limit: int,
|
|
475
|
+
) -> None:
|
|
476
|
+
"""Analyze dependency graph health for a specification.
|
|
477
|
+
|
|
478
|
+
SPEC_ID is the specification identifier.
|
|
479
|
+
|
|
480
|
+
Identifies blocking tasks, bottlenecks, circular dependencies,
|
|
481
|
+
and the critical path for task completion.
|
|
482
|
+
"""
|
|
483
|
+
start_time = time.perf_counter()
|
|
484
|
+
cli_ctx = get_context(ctx)
|
|
485
|
+
specs_dir = cli_ctx.specs_dir
|
|
486
|
+
|
|
487
|
+
if specs_dir is None:
|
|
488
|
+
emit_error(
|
|
489
|
+
"No specs directory found",
|
|
490
|
+
code="VALIDATION_ERROR",
|
|
491
|
+
error_type="validation",
|
|
492
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
493
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
494
|
+
)
|
|
495
|
+
return
|
|
496
|
+
|
|
497
|
+
# Load spec
|
|
498
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
499
|
+
if spec_data is None:
|
|
500
|
+
emit_error(
|
|
501
|
+
f"Specification not found: {spec_id}",
|
|
502
|
+
code="SPEC_NOT_FOUND",
|
|
503
|
+
error_type="not_found",
|
|
504
|
+
remediation="Verify the spec ID exists using: sdd specs list",
|
|
505
|
+
details={"spec_id": spec_id},
|
|
506
|
+
)
|
|
507
|
+
return
|
|
508
|
+
|
|
509
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
510
|
+
|
|
511
|
+
# Collect all dependency relationships
|
|
512
|
+
all_deps = []
|
|
513
|
+
blocks_count: dict = {} # task_id -> count of tasks it blocks
|
|
514
|
+
blocked_by_map: dict = {} # task_id -> list of blockers
|
|
515
|
+
|
|
516
|
+
for node_id, node in hierarchy.items():
|
|
517
|
+
if node.get("type") not in ["task", "subtask", "verify", "phase"]:
|
|
518
|
+
continue
|
|
519
|
+
|
|
520
|
+
deps = node.get("dependencies", {})
|
|
521
|
+
blocked_by = deps.get("blocked_by", [])
|
|
522
|
+
blocks = deps.get("blocks", [])
|
|
523
|
+
|
|
524
|
+
blocked_by_map[node_id] = blocked_by
|
|
525
|
+
|
|
526
|
+
for blocker_id in blocked_by:
|
|
527
|
+
all_deps.append(
|
|
528
|
+
{
|
|
529
|
+
"from": blocker_id,
|
|
530
|
+
"to": node_id,
|
|
531
|
+
"type": "blocks",
|
|
532
|
+
}
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
# Count how many tasks each node blocks
|
|
536
|
+
for blocked_id in blocks:
|
|
537
|
+
blocks_count[node_id] = blocks_count.get(node_id, 0) + 1
|
|
538
|
+
|
|
539
|
+
# Also count from blocked_by relationships
|
|
540
|
+
for blocker_id in blocked_by:
|
|
541
|
+
blocks_count[blocker_id] = blocks_count.get(blocker_id, 0) + 1
|
|
542
|
+
|
|
543
|
+
# Find bottlenecks (tasks blocking many others)
|
|
544
|
+
bottlenecks = []
|
|
545
|
+
for task_id, count in sorted(blocks_count.items(), key=lambda x: -x[1]):
|
|
546
|
+
if count >= bottleneck_threshold:
|
|
547
|
+
task = hierarchy.get(task_id, {})
|
|
548
|
+
bottlenecks.append(
|
|
549
|
+
{
|
|
550
|
+
"id": task_id,
|
|
551
|
+
"title": task.get("title", ""),
|
|
552
|
+
"status": task.get("status", ""),
|
|
553
|
+
"blocks_count": count,
|
|
554
|
+
}
|
|
555
|
+
)
|
|
556
|
+
if len(bottlenecks) >= limit:
|
|
557
|
+
break
|
|
558
|
+
|
|
559
|
+
# Detect circular dependencies using DFS
|
|
560
|
+
circular_deps = []
|
|
561
|
+
visited = set()
|
|
562
|
+
rec_stack = set()
|
|
563
|
+
|
|
564
|
+
def detect_cycle(node_id: str, path: list) -> Optional[list]:
|
|
565
|
+
if node_id in rec_stack:
|
|
566
|
+
# Found a cycle
|
|
567
|
+
cycle_start = path.index(node_id)
|
|
568
|
+
return path[cycle_start:] + [node_id]
|
|
569
|
+
if node_id in visited:
|
|
570
|
+
return None
|
|
571
|
+
|
|
572
|
+
visited.add(node_id)
|
|
573
|
+
rec_stack.add(node_id)
|
|
574
|
+
path.append(node_id)
|
|
575
|
+
|
|
576
|
+
for blocker_id in blocked_by_map.get(node_id, []):
|
|
577
|
+
cycle = detect_cycle(blocker_id, path[:])
|
|
578
|
+
if cycle:
|
|
579
|
+
return cycle
|
|
580
|
+
|
|
581
|
+
rec_stack.remove(node_id)
|
|
582
|
+
return None
|
|
583
|
+
|
|
584
|
+
for node_id in hierarchy:
|
|
585
|
+
if node_id not in visited:
|
|
586
|
+
cycle = detect_cycle(node_id, [])
|
|
587
|
+
if cycle and cycle not in circular_deps:
|
|
588
|
+
circular_deps.append(cycle)
|
|
589
|
+
if len(circular_deps) >= limit:
|
|
590
|
+
break
|
|
591
|
+
|
|
592
|
+
# Calculate critical path (longest dependency chain)
|
|
593
|
+
def get_chain_length(node_id: str, memo: dict) -> int:
|
|
594
|
+
if node_id in memo:
|
|
595
|
+
return memo[node_id]
|
|
596
|
+
blockers = blocked_by_map.get(node_id, [])
|
|
597
|
+
if not blockers:
|
|
598
|
+
memo[node_id] = 1
|
|
599
|
+
return 1
|
|
600
|
+
max_blocker = max(get_chain_length(b, memo) for b in blockers)
|
|
601
|
+
memo[node_id] = max_blocker + 1
|
|
602
|
+
return memo[node_id]
|
|
603
|
+
|
|
604
|
+
chain_lengths: dict = {}
|
|
605
|
+
for node_id in hierarchy:
|
|
606
|
+
if hierarchy.get(node_id, {}).get("type") in ["task", "subtask", "verify"]:
|
|
607
|
+
try:
|
|
608
|
+
get_chain_length(node_id, chain_lengths)
|
|
609
|
+
except RecursionError:
|
|
610
|
+
# Circular dependency detected
|
|
611
|
+
pass
|
|
612
|
+
|
|
613
|
+
# Find critical path
|
|
614
|
+
critical_path = []
|
|
615
|
+
if chain_lengths:
|
|
616
|
+
max_length = max(chain_lengths.values())
|
|
617
|
+
for node_id, length in sorted(chain_lengths.items(), key=lambda x: -x[1]):
|
|
618
|
+
if length == max_length:
|
|
619
|
+
task = hierarchy.get(node_id, {})
|
|
620
|
+
critical_path.append(
|
|
621
|
+
{
|
|
622
|
+
"id": node_id,
|
|
623
|
+
"title": task.get("title", ""),
|
|
624
|
+
"status": task.get("status", ""),
|
|
625
|
+
"chain_length": length,
|
|
626
|
+
}
|
|
627
|
+
)
|
|
628
|
+
if len(critical_path) >= limit:
|
|
629
|
+
break
|
|
630
|
+
|
|
631
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
632
|
+
|
|
633
|
+
emit_success(
|
|
634
|
+
{
|
|
635
|
+
"spec_id": spec_id,
|
|
636
|
+
"dependency_count": len(all_deps),
|
|
637
|
+
"bottlenecks": bottlenecks,
|
|
638
|
+
"bottleneck_threshold": bottleneck_threshold,
|
|
639
|
+
"circular_deps": circular_deps,
|
|
640
|
+
"has_circular_deps": len(circular_deps) > 0,
|
|
641
|
+
"critical_path": critical_path,
|
|
642
|
+
"max_chain_length": max(chain_lengths.values()) if chain_lengths else 0,
|
|
643
|
+
"telemetry": {"duration_ms": round(duration_ms, 2)},
|
|
644
|
+
}
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
|
|
648
|
+
# Top-level validate command (alias for check)
|
|
649
|
+
@click.command("validate")
|
|
650
|
+
@click.argument("spec_id")
|
|
651
|
+
@click.option(
|
|
652
|
+
"--fix", "auto_fix", is_flag=True, help="Auto-fix issues after validation."
|
|
653
|
+
)
|
|
654
|
+
@click.option(
|
|
655
|
+
"--dry-run", is_flag=True, help="Preview fixes without applying (requires --fix)."
|
|
656
|
+
)
|
|
657
|
+
@click.option(
|
|
658
|
+
"--preview", is_flag=True, help="Show summary only (counts and issue codes)."
|
|
659
|
+
)
|
|
660
|
+
@click.option(
|
|
661
|
+
"--diff",
|
|
662
|
+
"show_diff",
|
|
663
|
+
is_flag=True,
|
|
664
|
+
help="Show unified diff of changes (requires --fix).",
|
|
665
|
+
)
|
|
666
|
+
@click.option(
|
|
667
|
+
"--select", "select_codes", help="Only fix selected issue codes (comma-separated)."
|
|
668
|
+
)
|
|
669
|
+
@click.pass_context
|
|
670
|
+
@cli_command("validate")
|
|
671
|
+
@handle_keyboard_interrupt()
|
|
672
|
+
@with_sync_timeout(MEDIUM_TIMEOUT, "Validation timed out")
|
|
673
|
+
def validate_cmd(
|
|
674
|
+
ctx: click.Context,
|
|
675
|
+
spec_id: str,
|
|
676
|
+
auto_fix: bool,
|
|
677
|
+
dry_run: bool,
|
|
678
|
+
preview: bool,
|
|
679
|
+
show_diff: bool,
|
|
680
|
+
select_codes: Optional[str],
|
|
681
|
+
) -> None:
|
|
682
|
+
"""Validate a specification and optionally apply fixes.
|
|
683
|
+
|
|
684
|
+
SPEC_ID is the specification identifier.
|
|
685
|
+
"""
|
|
686
|
+
cli_ctx = get_context(ctx)
|
|
687
|
+
specs_dir = cli_ctx.specs_dir
|
|
688
|
+
|
|
689
|
+
if specs_dir is None:
|
|
690
|
+
emit_error(
|
|
691
|
+
"No specs directory found",
|
|
692
|
+
code="VALIDATION_ERROR",
|
|
693
|
+
error_type="validation",
|
|
694
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
695
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
696
|
+
)
|
|
697
|
+
return
|
|
698
|
+
|
|
699
|
+
# Find spec path
|
|
700
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
701
|
+
if spec_path is None:
|
|
702
|
+
emit_error(
|
|
703
|
+
f"Specification not found: {spec_id}",
|
|
704
|
+
code="SPEC_NOT_FOUND",
|
|
705
|
+
error_type="not_found",
|
|
706
|
+
remediation="Verify the spec ID exists using: sdd specs list",
|
|
707
|
+
details={"spec_id": spec_id},
|
|
708
|
+
)
|
|
709
|
+
return
|
|
710
|
+
|
|
711
|
+
# Load spec
|
|
712
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
713
|
+
if spec_data is None:
|
|
714
|
+
emit_error(
|
|
715
|
+
f"Failed to load specification: {spec_id}",
|
|
716
|
+
code="INTERNAL_ERROR",
|
|
717
|
+
error_type="internal",
|
|
718
|
+
remediation="Check that the spec file is valid JSON",
|
|
719
|
+
details={"spec_id": spec_id},
|
|
720
|
+
)
|
|
721
|
+
return
|
|
722
|
+
|
|
723
|
+
# Run validation
|
|
724
|
+
result = validate_spec(spec_data)
|
|
725
|
+
|
|
726
|
+
# Parse select codes if provided
|
|
727
|
+
selected_codes = None
|
|
728
|
+
if select_codes:
|
|
729
|
+
selected_codes = set(code.strip() for code in select_codes.split(","))
|
|
730
|
+
|
|
731
|
+
# Format diagnostics (filtered by select if provided)
|
|
732
|
+
diagnostics = []
|
|
733
|
+
for diag in result.diagnostics:
|
|
734
|
+
if selected_codes and diag.code not in selected_codes:
|
|
735
|
+
continue
|
|
736
|
+
if preview:
|
|
737
|
+
# Preview mode: only include code and severity
|
|
738
|
+
diagnostics.append(
|
|
739
|
+
{
|
|
740
|
+
"code": diag.code,
|
|
741
|
+
"severity": diag.severity,
|
|
742
|
+
"auto_fixable": diag.auto_fixable,
|
|
743
|
+
}
|
|
744
|
+
)
|
|
745
|
+
else:
|
|
746
|
+
diagnostics.append(
|
|
747
|
+
{
|
|
748
|
+
"code": diag.code,
|
|
749
|
+
"message": diag.message,
|
|
750
|
+
"severity": diag.severity,
|
|
751
|
+
"category": diag.category,
|
|
752
|
+
"location": diag.location,
|
|
753
|
+
"suggested_fix": diag.suggested_fix,
|
|
754
|
+
"auto_fixable": diag.auto_fixable,
|
|
755
|
+
}
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
output: dict = {
|
|
759
|
+
"spec_id": result.spec_id,
|
|
760
|
+
"is_valid": result.is_valid,
|
|
761
|
+
"error_count": result.error_count,
|
|
762
|
+
"warning_count": result.warning_count,
|
|
763
|
+
"info_count": result.info_count,
|
|
764
|
+
"preview": preview,
|
|
765
|
+
}
|
|
766
|
+
|
|
767
|
+
if not preview:
|
|
768
|
+
output["diagnostics"] = diagnostics
|
|
769
|
+
else:
|
|
770
|
+
# Preview mode: group by code
|
|
771
|
+
code_summary: dict = {}
|
|
772
|
+
for diag in diagnostics:
|
|
773
|
+
code = diag["code"]
|
|
774
|
+
if code not in code_summary:
|
|
775
|
+
code_summary[code] = {
|
|
776
|
+
"count": 0,
|
|
777
|
+
"severity": diag["severity"],
|
|
778
|
+
"auto_fixable": diag["auto_fixable"],
|
|
779
|
+
}
|
|
780
|
+
code_summary[code]["count"] += 1
|
|
781
|
+
output["issue_summary"] = code_summary
|
|
782
|
+
|
|
783
|
+
# Apply fixes if requested
|
|
784
|
+
if auto_fix:
|
|
785
|
+
actions = get_fix_actions(result, spec_data)
|
|
786
|
+
|
|
787
|
+
# Filter actions by selected codes
|
|
788
|
+
if selected_codes:
|
|
789
|
+
actions = [
|
|
790
|
+
a
|
|
791
|
+
for a in actions
|
|
792
|
+
if a.id in selected_codes
|
|
793
|
+
or any(code in a.id for code in selected_codes)
|
|
794
|
+
]
|
|
795
|
+
|
|
796
|
+
if actions:
|
|
797
|
+
# Read original content for diff
|
|
798
|
+
original_content = None
|
|
799
|
+
if show_diff and spec_path:
|
|
800
|
+
with open(spec_path) as f:
|
|
801
|
+
original_content = f.read()
|
|
802
|
+
|
|
803
|
+
report = apply_fixes(
|
|
804
|
+
actions,
|
|
805
|
+
str(spec_path),
|
|
806
|
+
dry_run=dry_run,
|
|
807
|
+
create_backup=True,
|
|
808
|
+
)
|
|
809
|
+
output["fix_applied"] = not dry_run
|
|
810
|
+
output["fix_dry_run"] = dry_run
|
|
811
|
+
output["fixes_count"] = len(report.applied_actions)
|
|
812
|
+
output["fixes"] = [
|
|
813
|
+
{"id": a.id, "description": a.description}
|
|
814
|
+
for a in report.applied_actions
|
|
815
|
+
]
|
|
816
|
+
output["backup_path"] = report.backup_path
|
|
817
|
+
|
|
818
|
+
# Generate diff if requested
|
|
819
|
+
if show_diff and spec_path and original_content and not dry_run:
|
|
820
|
+
import difflib
|
|
821
|
+
|
|
822
|
+
with open(spec_path) as f:
|
|
823
|
+
new_content = f.read()
|
|
824
|
+
diff_lines = list(
|
|
825
|
+
difflib.unified_diff(
|
|
826
|
+
original_content.splitlines(keepends=True),
|
|
827
|
+
new_content.splitlines(keepends=True),
|
|
828
|
+
fromfile=f"{spec_id} (before)",
|
|
829
|
+
tofile=f"{spec_id} (after)",
|
|
830
|
+
)
|
|
831
|
+
)
|
|
832
|
+
output["diff"] = "".join(diff_lines)
|
|
833
|
+
else:
|
|
834
|
+
output["fix_applied"] = False
|
|
835
|
+
output["fixes_count"] = 0
|
|
836
|
+
output["fixes"] = []
|
|
837
|
+
|
|
838
|
+
emit_success(output)
|
|
839
|
+
|
|
840
|
+
|
|
841
|
+
# Top-level fix command (alias for validate fix)
|
|
842
|
+
@click.command("fix")
|
|
843
|
+
@click.argument("spec_id")
|
|
844
|
+
@click.option("--dry-run", is_flag=True, help="Preview fixes without applying.")
|
|
845
|
+
@click.option("--no-backup", is_flag=True, help="Skip creating backup file.")
|
|
846
|
+
@click.option("--diff", "show_diff", is_flag=True, help="Show unified diff of changes.")
|
|
847
|
+
@click.option(
|
|
848
|
+
"--select", "select_codes", help="Only fix selected issue codes (comma-separated)."
|
|
849
|
+
)
|
|
850
|
+
@click.pass_context
|
|
851
|
+
@cli_command("fix")
|
|
852
|
+
@handle_keyboard_interrupt()
|
|
853
|
+
@with_sync_timeout(MEDIUM_TIMEOUT, "Fix operation timed out")
|
|
854
|
+
def fix_cmd(
|
|
855
|
+
ctx: click.Context,
|
|
856
|
+
spec_id: str,
|
|
857
|
+
dry_run: bool,
|
|
858
|
+
no_backup: bool,
|
|
859
|
+
show_diff: bool,
|
|
860
|
+
select_codes: Optional[str],
|
|
861
|
+
) -> None:
|
|
862
|
+
"""Apply auto-fixes to a specification.
|
|
863
|
+
|
|
864
|
+
SPEC_ID is the specification identifier.
|
|
865
|
+
|
|
866
|
+
This is a top-level alias for `sdd validate fix`.
|
|
867
|
+
"""
|
|
868
|
+
cli_ctx = get_context(ctx)
|
|
869
|
+
specs_dir = cli_ctx.specs_dir
|
|
870
|
+
|
|
871
|
+
if specs_dir is None:
|
|
872
|
+
emit_error(
|
|
873
|
+
"No specs directory found",
|
|
874
|
+
code="VALIDATION_ERROR",
|
|
875
|
+
error_type="validation",
|
|
876
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
877
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
878
|
+
)
|
|
879
|
+
return
|
|
880
|
+
|
|
881
|
+
# Find spec path
|
|
882
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
883
|
+
if spec_path is None:
|
|
884
|
+
emit_error(
|
|
885
|
+
f"Specification not found: {spec_id}",
|
|
886
|
+
code="SPEC_NOT_FOUND",
|
|
887
|
+
error_type="not_found",
|
|
888
|
+
remediation="Verify the spec ID exists using: sdd specs list",
|
|
889
|
+
details={"spec_id": spec_id},
|
|
890
|
+
)
|
|
891
|
+
return
|
|
892
|
+
|
|
893
|
+
# Load spec
|
|
894
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
895
|
+
if spec_data is None:
|
|
896
|
+
emit_error(
|
|
897
|
+
f"Failed to load specification: {spec_id}",
|
|
898
|
+
code="INTERNAL_ERROR",
|
|
899
|
+
error_type="internal",
|
|
900
|
+
remediation="Check that the spec file is valid JSON",
|
|
901
|
+
details={"spec_id": spec_id},
|
|
902
|
+
)
|
|
903
|
+
return
|
|
904
|
+
|
|
905
|
+
# Validate to get diagnostics
|
|
906
|
+
result = validate_spec(spec_data)
|
|
907
|
+
|
|
908
|
+
# Generate fix actions
|
|
909
|
+
actions = get_fix_actions(result, spec_data)
|
|
910
|
+
|
|
911
|
+
# Parse and filter by selected codes
|
|
912
|
+
if select_codes:
|
|
913
|
+
selected_codes = set(code.strip() for code in select_codes.split(","))
|
|
914
|
+
actions = [
|
|
915
|
+
a
|
|
916
|
+
for a in actions
|
|
917
|
+
if a.id in selected_codes or any(code in a.id for code in selected_codes)
|
|
918
|
+
]
|
|
919
|
+
|
|
920
|
+
if not actions:
|
|
921
|
+
emit_success(
|
|
922
|
+
{
|
|
923
|
+
"spec_id": spec_id,
|
|
924
|
+
"applied_count": 0,
|
|
925
|
+
"skipped_count": 0,
|
|
926
|
+
"message": "No auto-fixable issues found"
|
|
927
|
+
+ (" matching selection" if select_codes else ""),
|
|
928
|
+
}
|
|
929
|
+
)
|
|
930
|
+
return
|
|
931
|
+
|
|
932
|
+
# Read original content for diff
|
|
933
|
+
original_content = None
|
|
934
|
+
if show_diff and spec_path:
|
|
935
|
+
with open(spec_path) as f:
|
|
936
|
+
original_content = f.read()
|
|
937
|
+
|
|
938
|
+
# Apply fixes
|
|
939
|
+
report = apply_fixes(
|
|
940
|
+
actions,
|
|
941
|
+
str(spec_path),
|
|
942
|
+
dry_run=dry_run,
|
|
943
|
+
create_backup=not no_backup,
|
|
944
|
+
)
|
|
945
|
+
|
|
946
|
+
# Format applied/skipped actions
|
|
947
|
+
applied = [
|
|
948
|
+
{"id": a.id, "description": a.description, "category": a.category}
|
|
949
|
+
for a in report.applied_actions
|
|
950
|
+
]
|
|
951
|
+
skipped = [
|
|
952
|
+
{"id": a.id, "description": a.description, "category": a.category}
|
|
953
|
+
for a in report.skipped_actions
|
|
954
|
+
]
|
|
955
|
+
|
|
956
|
+
output: dict = {
|
|
957
|
+
"spec_id": spec_id,
|
|
958
|
+
"dry_run": dry_run,
|
|
959
|
+
"applied_count": len(applied),
|
|
960
|
+
"skipped_count": len(skipped),
|
|
961
|
+
"applied_actions": applied,
|
|
962
|
+
"skipped_actions": skipped,
|
|
963
|
+
"backup_path": report.backup_path,
|
|
964
|
+
}
|
|
965
|
+
|
|
966
|
+
# Generate diff if requested
|
|
967
|
+
if show_diff and spec_path and original_content and not dry_run:
|
|
968
|
+
import difflib
|
|
969
|
+
|
|
970
|
+
with open(spec_path) as f:
|
|
971
|
+
new_content = f.read()
|
|
972
|
+
diff_lines = list(
|
|
973
|
+
difflib.unified_diff(
|
|
974
|
+
original_content.splitlines(keepends=True),
|
|
975
|
+
new_content.splitlines(keepends=True),
|
|
976
|
+
fromfile=f"{spec_id} (before)",
|
|
977
|
+
tofile=f"{spec_id} (after)",
|
|
978
|
+
)
|
|
979
|
+
)
|
|
980
|
+
output["diff"] = "".join(diff_lines)
|
|
981
|
+
|
|
982
|
+
emit_success(output)
|