foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -1
- foundry_mcp/cli/__init__.py +0 -13
- foundry_mcp/cli/commands/plan.py +10 -3
- foundry_mcp/cli/commands/review.py +19 -4
- foundry_mcp/cli/commands/session.py +1 -8
- foundry_mcp/cli/commands/specs.py +38 -208
- foundry_mcp/cli/context.py +39 -0
- foundry_mcp/cli/output.py +3 -3
- foundry_mcp/config.py +615 -11
- foundry_mcp/core/ai_consultation.py +146 -9
- foundry_mcp/core/batch_operations.py +1196 -0
- foundry_mcp/core/discovery.py +7 -7
- foundry_mcp/core/error_store.py +2 -2
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/llm_config.py +28 -2
- foundry_mcp/core/metrics_store.py +2 -2
- foundry_mcp/core/naming.py +25 -2
- foundry_mcp/core/progress.py +70 -0
- foundry_mcp/core/prometheus.py +0 -13
- foundry_mcp/core/prompts/fidelity_review.py +149 -4
- foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
- foundry_mcp/core/prompts/plan_review.py +5 -1
- foundry_mcp/core/providers/__init__.py +12 -0
- foundry_mcp/core/providers/base.py +39 -0
- foundry_mcp/core/providers/claude.py +51 -48
- foundry_mcp/core/providers/codex.py +70 -60
- foundry_mcp/core/providers/cursor_agent.py +25 -47
- foundry_mcp/core/providers/detectors.py +34 -7
- foundry_mcp/core/providers/gemini.py +69 -58
- foundry_mcp/core/providers/opencode.py +101 -47
- foundry_mcp/core/providers/package-lock.json +4 -4
- foundry_mcp/core/providers/package.json +1 -1
- foundry_mcp/core/providers/validation.py +128 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1220 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4020 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/responses.py +690 -0
- foundry_mcp/core/spec.py +2439 -236
- foundry_mcp/core/task.py +1205 -31
- foundry_mcp/core/testing.py +512 -123
- foundry_mcp/core/validation.py +319 -43
- foundry_mcp/dashboard/components/charts.py +0 -57
- foundry_mcp/dashboard/launcher.py +11 -0
- foundry_mcp/dashboard/views/metrics.py +25 -35
- foundry_mcp/dashboard/views/overview.py +1 -65
- foundry_mcp/resources/specs.py +25 -25
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +33 -5
- foundry_mcp/server.py +0 -14
- foundry_mcp/tools/unified/__init__.py +39 -18
- foundry_mcp/tools/unified/authoring.py +2371 -248
- foundry_mcp/tools/unified/documentation_helpers.py +69 -6
- foundry_mcp/tools/unified/environment.py +434 -32
- foundry_mcp/tools/unified/error.py +18 -1
- foundry_mcp/tools/unified/lifecycle.py +8 -0
- foundry_mcp/tools/unified/plan.py +133 -2
- foundry_mcp/tools/unified/provider.py +0 -40
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +374 -17
- foundry_mcp/tools/unified/review_helpers.py +16 -1
- foundry_mcp/tools/unified/server.py +9 -24
- foundry_mcp/tools/unified/spec.py +367 -0
- foundry_mcp/tools/unified/task.py +1664 -30
- foundry_mcp/tools/unified/test.py +69 -8
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
- foundry_mcp-0.8.10.dist-info/RECORD +153 -0
- foundry_mcp/cli/flags.py +0 -266
- foundry_mcp/core/feature_flags.py +0 -592
- foundry_mcp-0.3.3.dist-info/RECORD +0 -135
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
foundry_mcp/core/spec.py
CHANGED
|
@@ -9,16 +9,59 @@ import shutil
|
|
|
9
9
|
import subprocess
|
|
10
10
|
from datetime import datetime, timezone
|
|
11
11
|
from pathlib import Path
|
|
12
|
-
from typing import Optional, Dict, Any, List, Tuple
|
|
12
|
+
from typing import Optional, Dict, Any, List, Tuple, Union
|
|
13
13
|
|
|
14
14
|
# Valid templates and categories for spec creation
|
|
15
|
-
|
|
15
|
+
# Note: Only 'empty' template is supported. Use phase templates to add structure.
|
|
16
|
+
TEMPLATES = ("empty",)
|
|
17
|
+
TEMPLATE_DESCRIPTIONS = {
|
|
18
|
+
"empty": "Blank spec with no phases - use phase templates to add structure",
|
|
19
|
+
}
|
|
16
20
|
CATEGORIES = ("investigation", "implementation", "refactoring", "decision", "research")
|
|
17
21
|
|
|
18
22
|
# Valid verification types for verify nodes
|
|
19
|
-
# -
|
|
23
|
+
# - run-tests: Automated tests via mcp__foundry-mcp__test-run
|
|
20
24
|
# - fidelity: Implementation-vs-spec comparison via mcp__foundry-mcp__spec-review-fidelity
|
|
21
|
-
|
|
25
|
+
# - manual: Manual verification steps
|
|
26
|
+
VERIFICATION_TYPES = ("run-tests", "fidelity", "manual")
|
|
27
|
+
|
|
28
|
+
# Valid phase templates for reusable phase structures
|
|
29
|
+
PHASE_TEMPLATES = ("planning", "implementation", "testing", "security", "documentation")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _requires_rich_task_fields(spec_data: Dict[str, Any]) -> bool:
|
|
33
|
+
"""Check if spec requires rich task fields based on explicit complexity metadata."""
|
|
34
|
+
metadata = spec_data.get("metadata", {})
|
|
35
|
+
if not isinstance(metadata, dict):
|
|
36
|
+
return False
|
|
37
|
+
|
|
38
|
+
# Only check explicit complexity metadata (template no longer indicates complexity)
|
|
39
|
+
complexity = metadata.get("complexity")
|
|
40
|
+
if isinstance(complexity, str) and complexity.strip().lower() in {
|
|
41
|
+
"medium",
|
|
42
|
+
"complex",
|
|
43
|
+
"high",
|
|
44
|
+
}:
|
|
45
|
+
return True
|
|
46
|
+
|
|
47
|
+
return False
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _normalize_acceptance_criteria(value: Any) -> Optional[List[str]]:
|
|
51
|
+
if value is None:
|
|
52
|
+
return None
|
|
53
|
+
if isinstance(value, str):
|
|
54
|
+
cleaned = value.strip()
|
|
55
|
+
return [cleaned] if cleaned else []
|
|
56
|
+
if isinstance(value, list):
|
|
57
|
+
cleaned_items = []
|
|
58
|
+
for item in value:
|
|
59
|
+
if isinstance(item, str):
|
|
60
|
+
cleaned = item.strip()
|
|
61
|
+
if cleaned:
|
|
62
|
+
cleaned_items.append(cleaned)
|
|
63
|
+
return cleaned_items
|
|
64
|
+
return []
|
|
22
65
|
|
|
23
66
|
|
|
24
67
|
def find_git_root() -> Optional[Path]:
|
|
@@ -161,6 +204,36 @@ def resolve_spec_file(
|
|
|
161
204
|
return find_spec_file(search_name, specs_dir)
|
|
162
205
|
|
|
163
206
|
|
|
207
|
+
def _migrate_spec_fields(spec_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
208
|
+
"""
|
|
209
|
+
Migrate spec from dual-field format to canonical format.
|
|
210
|
+
|
|
211
|
+
Moves status, progress_percentage, and current_phase from metadata
|
|
212
|
+
to top-level (their canonical location). This handles specs created
|
|
213
|
+
before the field deduplication.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
spec_data: Spec data dictionary (modified in place)
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
The modified spec_data
|
|
220
|
+
"""
|
|
221
|
+
if not spec_data:
|
|
222
|
+
return spec_data
|
|
223
|
+
|
|
224
|
+
metadata = spec_data.get("metadata", {})
|
|
225
|
+
computed_fields = ("status", "progress_percentage", "current_phase")
|
|
226
|
+
|
|
227
|
+
for field in computed_fields:
|
|
228
|
+
# If field exists in metadata but not at top-level, migrate it
|
|
229
|
+
if field in metadata and field not in spec_data:
|
|
230
|
+
spec_data[field] = metadata[field]
|
|
231
|
+
# Remove from metadata (canonical location is top-level)
|
|
232
|
+
metadata.pop(field, None)
|
|
233
|
+
|
|
234
|
+
return spec_data
|
|
235
|
+
|
|
236
|
+
|
|
164
237
|
def load_spec(
|
|
165
238
|
spec_id: str, specs_dir: Optional[Path] = None
|
|
166
239
|
) -> Optional[Dict[str, Any]]:
|
|
@@ -181,7 +254,9 @@ def load_spec(
|
|
|
181
254
|
|
|
182
255
|
try:
|
|
183
256
|
with open(spec_file, "r") as f:
|
|
184
|
-
|
|
257
|
+
spec_data = json.load(f)
|
|
258
|
+
# Migrate old specs to canonical field locations
|
|
259
|
+
return _migrate_spec_fields(spec_data)
|
|
185
260
|
except (json.JSONDecodeError, IOError):
|
|
186
261
|
return None
|
|
187
262
|
|
|
@@ -234,13 +309,34 @@ def save_spec(
|
|
|
234
309
|
return False
|
|
235
310
|
|
|
236
311
|
|
|
237
|
-
|
|
312
|
+
# Default retention policy for versioned backups
|
|
313
|
+
DEFAULT_MAX_BACKUPS = 10
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def backup_spec(
|
|
317
|
+
spec_id: str,
|
|
318
|
+
specs_dir: Optional[Path] = None,
|
|
319
|
+
max_backups: int = DEFAULT_MAX_BACKUPS,
|
|
320
|
+
) -> Optional[Path]:
|
|
238
321
|
"""
|
|
239
|
-
Create a backup
|
|
322
|
+
Create a versioned backup of the JSON spec file.
|
|
323
|
+
|
|
324
|
+
Creates timestamped backups in .backups/{spec_id}/ directory with a
|
|
325
|
+
configurable retention policy. Also maintains a latest.json copy for
|
|
326
|
+
quick access to the most recent backup.
|
|
327
|
+
|
|
328
|
+
Directory structure:
|
|
329
|
+
.backups/
|
|
330
|
+
└── {spec_id}/
|
|
331
|
+
├── 2025-12-26T18-20-13.456789.json # Timestamped backups (μs precision)
|
|
332
|
+
├── 2025-12-26T18-30-45.123456.json
|
|
333
|
+
└── latest.json # Copy of most recent
|
|
240
334
|
|
|
241
335
|
Args:
|
|
242
336
|
spec_id: Specification ID or path to spec file
|
|
243
337
|
specs_dir: Path to specs directory (optional, auto-detected if not provided)
|
|
338
|
+
max_backups: Maximum number of versioned backups to retain (default: 10).
|
|
339
|
+
Set to 0 for unlimited backups.
|
|
244
340
|
|
|
245
341
|
Returns:
|
|
246
342
|
Path to backup file if created, None otherwise
|
|
@@ -256,18 +352,556 @@ def backup_spec(spec_id: str, specs_dir: Optional[Path] = None) -> Optional[Path
|
|
|
256
352
|
if not specs_dir:
|
|
257
353
|
return None
|
|
258
354
|
|
|
259
|
-
|
|
260
|
-
|
|
355
|
+
# Create versioned backup directory: .backups/{spec_id}/
|
|
356
|
+
spec_backups_dir = specs_dir / ".backups" / spec_id
|
|
357
|
+
spec_backups_dir.mkdir(parents=True, exist_ok=True)
|
|
261
358
|
|
|
262
|
-
|
|
359
|
+
# Generate timestamp filename (ISO format with safe characters)
|
|
360
|
+
# Include full microseconds to handle rapid successive saves
|
|
361
|
+
now = datetime.now(timezone.utc)
|
|
362
|
+
timestamp = now.strftime("%Y-%m-%dT%H-%M-%S")
|
|
363
|
+
micros = now.strftime("%f") # Full 6-digit microseconds
|
|
364
|
+
backup_file = spec_backups_dir / f"{timestamp}.{micros}.json"
|
|
263
365
|
|
|
264
366
|
try:
|
|
367
|
+
# Create the timestamped backup
|
|
265
368
|
shutil.copy2(spec_file, backup_file)
|
|
369
|
+
|
|
370
|
+
# Update latest.json to point to the newest backup
|
|
371
|
+
latest_file = spec_backups_dir / "latest.json"
|
|
372
|
+
shutil.copy2(backup_file, latest_file)
|
|
373
|
+
|
|
374
|
+
# Apply retention policy
|
|
375
|
+
if max_backups > 0:
|
|
376
|
+
_apply_backup_retention(spec_backups_dir, max_backups)
|
|
377
|
+
|
|
266
378
|
return backup_file
|
|
267
379
|
except (IOError, OSError):
|
|
268
380
|
return None
|
|
269
381
|
|
|
270
382
|
|
|
383
|
+
def _apply_backup_retention(backups_dir: Path, max_backups: int) -> int:
|
|
384
|
+
"""
|
|
385
|
+
Apply retention policy by removing oldest backups exceeding the limit.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
backups_dir: Path to the spec's backup directory
|
|
389
|
+
max_backups: Maximum number of backups to retain
|
|
390
|
+
|
|
391
|
+
Returns:
|
|
392
|
+
Number of backups deleted
|
|
393
|
+
"""
|
|
394
|
+
# List all timestamped backup files (exclude latest.json)
|
|
395
|
+
backup_files = sorted(
|
|
396
|
+
[
|
|
397
|
+
f for f in backups_dir.glob("*.json")
|
|
398
|
+
if f.name != "latest.json" and f.is_file()
|
|
399
|
+
],
|
|
400
|
+
key=lambda p: p.name, # Sort by filename (timestamp order)
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
deleted_count = 0
|
|
404
|
+
while len(backup_files) > max_backups:
|
|
405
|
+
oldest = backup_files.pop(0)
|
|
406
|
+
try:
|
|
407
|
+
oldest.unlink()
|
|
408
|
+
deleted_count += 1
|
|
409
|
+
except (IOError, OSError):
|
|
410
|
+
pass # Best effort deletion
|
|
411
|
+
|
|
412
|
+
return deleted_count
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
# Default pagination settings for backup listing
|
|
416
|
+
DEFAULT_BACKUP_PAGE_SIZE = 50
|
|
417
|
+
MAX_BACKUP_PAGE_SIZE = 100
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def list_spec_backups(
|
|
421
|
+
spec_id: str,
|
|
422
|
+
specs_dir: Optional[Path] = None,
|
|
423
|
+
cursor: Optional[str] = None,
|
|
424
|
+
limit: Optional[int] = None,
|
|
425
|
+
) -> Dict[str, Any]:
|
|
426
|
+
"""
|
|
427
|
+
List backups for a spec with cursor-based pagination.
|
|
428
|
+
|
|
429
|
+
Lists timestamped backup files chronologically (newest first) from the
|
|
430
|
+
.backups/{spec_id}/ directory. Returns file metadata including timestamp,
|
|
431
|
+
path, and size. Designed for use with spec.history action.
|
|
432
|
+
|
|
433
|
+
Args:
|
|
434
|
+
spec_id: Specification ID to list backups for
|
|
435
|
+
specs_dir: Base specs directory (uses find_specs_directory if None)
|
|
436
|
+
cursor: Pagination cursor from previous call (base64-encoded JSON)
|
|
437
|
+
limit: Maximum backups per page (default: 50, max: 100)
|
|
438
|
+
|
|
439
|
+
Returns:
|
|
440
|
+
Dict with structure:
|
|
441
|
+
{
|
|
442
|
+
"spec_id": str,
|
|
443
|
+
"backups": [
|
|
444
|
+
{
|
|
445
|
+
"timestamp": str, # ISO-ish format from filename
|
|
446
|
+
"file_path": str, # Absolute path to backup file
|
|
447
|
+
"file_size_bytes": int # File size
|
|
448
|
+
},
|
|
449
|
+
...
|
|
450
|
+
],
|
|
451
|
+
"count": int,
|
|
452
|
+
"pagination": {
|
|
453
|
+
"cursor": Optional[str],
|
|
454
|
+
"has_more": bool,
|
|
455
|
+
"page_size": int
|
|
456
|
+
}
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
Returns empty backups list if spec or backup directory doesn't exist.
|
|
460
|
+
"""
|
|
461
|
+
# Import pagination helpers
|
|
462
|
+
from foundry_mcp.core.pagination import (
|
|
463
|
+
CursorError,
|
|
464
|
+
decode_cursor,
|
|
465
|
+
encode_cursor,
|
|
466
|
+
normalize_page_size,
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
# Resolve specs directory
|
|
470
|
+
if specs_dir is None:
|
|
471
|
+
specs_dir = find_specs_directory()
|
|
472
|
+
|
|
473
|
+
# Normalize page size
|
|
474
|
+
page_size = normalize_page_size(
|
|
475
|
+
limit, default=DEFAULT_BACKUP_PAGE_SIZE, maximum=MAX_BACKUP_PAGE_SIZE
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
result: Dict[str, Any] = {
|
|
479
|
+
"spec_id": spec_id,
|
|
480
|
+
"backups": [],
|
|
481
|
+
"count": 0,
|
|
482
|
+
"pagination": {
|
|
483
|
+
"cursor": None,
|
|
484
|
+
"has_more": False,
|
|
485
|
+
"page_size": page_size,
|
|
486
|
+
},
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
if not specs_dir:
|
|
490
|
+
return result
|
|
491
|
+
|
|
492
|
+
# Locate backup directory: .backups/{spec_id}/
|
|
493
|
+
backups_dir = specs_dir / ".backups" / spec_id
|
|
494
|
+
if not backups_dir.is_dir():
|
|
495
|
+
return result
|
|
496
|
+
|
|
497
|
+
# List all timestamped backup files (exclude latest.json)
|
|
498
|
+
backup_files = sorted(
|
|
499
|
+
[
|
|
500
|
+
f
|
|
501
|
+
for f in backups_dir.glob("*.json")
|
|
502
|
+
if f.name != "latest.json" and f.is_file()
|
|
503
|
+
],
|
|
504
|
+
key=lambda p: p.name,
|
|
505
|
+
reverse=True, # Newest first
|
|
506
|
+
)
|
|
507
|
+
|
|
508
|
+
if not backup_files:
|
|
509
|
+
return result
|
|
510
|
+
|
|
511
|
+
# Handle cursor-based pagination
|
|
512
|
+
start_after_timestamp: Optional[str] = None
|
|
513
|
+
if cursor:
|
|
514
|
+
try:
|
|
515
|
+
cursor_data = decode_cursor(cursor)
|
|
516
|
+
start_after_timestamp = cursor_data.get("last_id")
|
|
517
|
+
except CursorError:
|
|
518
|
+
# Invalid cursor - return from beginning
|
|
519
|
+
pass
|
|
520
|
+
|
|
521
|
+
# Find start position based on cursor
|
|
522
|
+
if start_after_timestamp:
|
|
523
|
+
start_index = 0
|
|
524
|
+
for idx, backup_file in enumerate(backup_files):
|
|
525
|
+
# Filename without extension is the timestamp
|
|
526
|
+
timestamp = backup_file.stem
|
|
527
|
+
if timestamp == start_after_timestamp:
|
|
528
|
+
start_index = idx + 1
|
|
529
|
+
break
|
|
530
|
+
backup_files = backup_files[start_index:]
|
|
531
|
+
|
|
532
|
+
# Fetch one extra to check for more pages
|
|
533
|
+
page_files = backup_files[: page_size + 1]
|
|
534
|
+
has_more = len(page_files) > page_size
|
|
535
|
+
if has_more:
|
|
536
|
+
page_files = page_files[:page_size]
|
|
537
|
+
|
|
538
|
+
# Build backup entries with metadata
|
|
539
|
+
backups = []
|
|
540
|
+
for backup_file in page_files:
|
|
541
|
+
try:
|
|
542
|
+
file_stat = backup_file.stat()
|
|
543
|
+
backups.append(
|
|
544
|
+
{
|
|
545
|
+
"timestamp": backup_file.stem,
|
|
546
|
+
"file_path": str(backup_file.absolute()),
|
|
547
|
+
"file_size_bytes": file_stat.st_size,
|
|
548
|
+
}
|
|
549
|
+
)
|
|
550
|
+
except OSError:
|
|
551
|
+
# Skip files we can't stat
|
|
552
|
+
continue
|
|
553
|
+
|
|
554
|
+
# Generate next cursor if more pages exist
|
|
555
|
+
next_cursor = None
|
|
556
|
+
if has_more and backups:
|
|
557
|
+
next_cursor = encode_cursor({"last_id": backups[-1]["timestamp"]})
|
|
558
|
+
|
|
559
|
+
result["backups"] = backups
|
|
560
|
+
result["count"] = len(backups)
|
|
561
|
+
result["pagination"] = {
|
|
562
|
+
"cursor": next_cursor,
|
|
563
|
+
"has_more": has_more,
|
|
564
|
+
"page_size": page_size,
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
return result
|
|
568
|
+
|
|
569
|
+
|
|
570
|
+
# Default settings for diff operations
|
|
571
|
+
DEFAULT_DIFF_MAX_RESULTS = 100
|
|
572
|
+
|
|
573
|
+
|
|
574
|
+
def _load_spec_source(
|
|
575
|
+
source: Union[str, Path, Dict[str, Any]],
|
|
576
|
+
specs_dir: Optional[Path] = None,
|
|
577
|
+
) -> Optional[Dict[str, Any]]:
|
|
578
|
+
"""
|
|
579
|
+
Load a spec from various source types.
|
|
580
|
+
|
|
581
|
+
Args:
|
|
582
|
+
source: Spec ID, file path, or already-loaded dict
|
|
583
|
+
specs_dir: Base specs directory for ID lookups
|
|
584
|
+
|
|
585
|
+
Returns:
|
|
586
|
+
Loaded spec dict, or None if not found/invalid
|
|
587
|
+
"""
|
|
588
|
+
# Already a dict - return as-is
|
|
589
|
+
if isinstance(source, dict):
|
|
590
|
+
return source
|
|
591
|
+
|
|
592
|
+
# Path object or string path
|
|
593
|
+
source_path = Path(source) if isinstance(source, str) else source
|
|
594
|
+
|
|
595
|
+
# If it's an existing file path, load directly
|
|
596
|
+
if source_path.is_file():
|
|
597
|
+
try:
|
|
598
|
+
with open(source_path, "r") as f:
|
|
599
|
+
return json.load(f)
|
|
600
|
+
except (IOError, json.JSONDecodeError):
|
|
601
|
+
return None
|
|
602
|
+
|
|
603
|
+
# Otherwise treat as spec_id and use resolve_spec_file
|
|
604
|
+
if isinstance(source, str):
|
|
605
|
+
return load_spec(source, specs_dir)
|
|
606
|
+
|
|
607
|
+
return None
|
|
608
|
+
|
|
609
|
+
|
|
610
|
+
def _diff_node(
|
|
611
|
+
old_node: Dict[str, Any],
|
|
612
|
+
new_node: Dict[str, Any],
|
|
613
|
+
node_id: str,
|
|
614
|
+
) -> Optional[Dict[str, Any]]:
|
|
615
|
+
"""
|
|
616
|
+
Compare two nodes and return field-level changes.
|
|
617
|
+
|
|
618
|
+
Args:
|
|
619
|
+
old_node: Original node data
|
|
620
|
+
new_node: Updated node data
|
|
621
|
+
node_id: Node identifier for the result
|
|
622
|
+
|
|
623
|
+
Returns:
|
|
624
|
+
Dict with node info and field_changes list, or None if no changes
|
|
625
|
+
"""
|
|
626
|
+
# Fields to compare (excluding computed/transient fields)
|
|
627
|
+
compare_fields = ["title", "status", "type", "parent", "children", "metadata", "dependencies"]
|
|
628
|
+
|
|
629
|
+
field_changes = []
|
|
630
|
+
for field in compare_fields:
|
|
631
|
+
old_val = old_node.get(field)
|
|
632
|
+
new_val = new_node.get(field)
|
|
633
|
+
|
|
634
|
+
if old_val != new_val:
|
|
635
|
+
field_changes.append({
|
|
636
|
+
"field": field,
|
|
637
|
+
"old": old_val,
|
|
638
|
+
"new": new_val,
|
|
639
|
+
})
|
|
640
|
+
|
|
641
|
+
if not field_changes:
|
|
642
|
+
return None
|
|
643
|
+
|
|
644
|
+
return {
|
|
645
|
+
"node_id": node_id,
|
|
646
|
+
"type": new_node.get("type", old_node.get("type")),
|
|
647
|
+
"title": new_node.get("title", old_node.get("title")),
|
|
648
|
+
"field_changes": field_changes,
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
def diff_specs(
|
|
653
|
+
source: Union[str, Path, Dict[str, Any]],
|
|
654
|
+
target: Union[str, Path, Dict[str, Any]],
|
|
655
|
+
specs_dir: Optional[Path] = None,
|
|
656
|
+
max_results: Optional[int] = None,
|
|
657
|
+
) -> Dict[str, Any]:
|
|
658
|
+
"""
|
|
659
|
+
Compare two specs and categorize changes as added, removed, or modified.
|
|
660
|
+
|
|
661
|
+
Compares hierarchy nodes between source (base/older) and target (comparison/newer)
|
|
662
|
+
specs, identifying structural and content changes at the task level.
|
|
663
|
+
|
|
664
|
+
Args:
|
|
665
|
+
source: Base spec - spec_id, file path (including backup), or loaded dict
|
|
666
|
+
target: Comparison spec - spec_id, file path, or loaded dict
|
|
667
|
+
specs_dir: Base specs directory (auto-detected if None)
|
|
668
|
+
max_results: Maximum changes to return per category (default: 100)
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
Dict with structure:
|
|
672
|
+
{
|
|
673
|
+
"summary": {
|
|
674
|
+
"added_count": int,
|
|
675
|
+
"removed_count": int,
|
|
676
|
+
"modified_count": int,
|
|
677
|
+
"total_changes": int
|
|
678
|
+
},
|
|
679
|
+
"changes": {
|
|
680
|
+
"added": [{"node_id": str, "type": str, "title": str}, ...],
|
|
681
|
+
"removed": [{"node_id": str, "type": str, "title": str}, ...],
|
|
682
|
+
"modified": [{
|
|
683
|
+
"node_id": str,
|
|
684
|
+
"type": str,
|
|
685
|
+
"title": str,
|
|
686
|
+
"field_changes": [{"field": str, "old": Any, "new": Any}, ...]
|
|
687
|
+
}, ...]
|
|
688
|
+
},
|
|
689
|
+
"partial": bool, # True if results truncated
|
|
690
|
+
"source_spec_id": Optional[str],
|
|
691
|
+
"target_spec_id": Optional[str]
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
Returns error structure if specs cannot be loaded:
|
|
695
|
+
{"error": str, "success": False}
|
|
696
|
+
"""
|
|
697
|
+
# Resolve specs directory
|
|
698
|
+
if specs_dir is None:
|
|
699
|
+
specs_dir = find_specs_directory()
|
|
700
|
+
|
|
701
|
+
# Load source spec
|
|
702
|
+
source_spec = _load_spec_source(source, specs_dir)
|
|
703
|
+
if source_spec is None:
|
|
704
|
+
return {
|
|
705
|
+
"error": f"Could not load source spec: {source}",
|
|
706
|
+
"success": False,
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
# Load target spec
|
|
710
|
+
target_spec = _load_spec_source(target, specs_dir)
|
|
711
|
+
if target_spec is None:
|
|
712
|
+
return {
|
|
713
|
+
"error": f"Could not load target spec: {target}",
|
|
714
|
+
"success": False,
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
# Get hierarchies
|
|
718
|
+
source_hierarchy = source_spec.get("hierarchy", {})
|
|
719
|
+
target_hierarchy = target_spec.get("hierarchy", {})
|
|
720
|
+
|
|
721
|
+
source_ids = set(source_hierarchy.keys())
|
|
722
|
+
target_ids = set(target_hierarchy.keys())
|
|
723
|
+
|
|
724
|
+
# Categorize changes
|
|
725
|
+
added_ids = target_ids - source_ids
|
|
726
|
+
removed_ids = source_ids - target_ids
|
|
727
|
+
common_ids = source_ids & target_ids
|
|
728
|
+
|
|
729
|
+
# Apply max_results limit
|
|
730
|
+
limit = max_results if max_results is not None else DEFAULT_DIFF_MAX_RESULTS
|
|
731
|
+
partial = False
|
|
732
|
+
|
|
733
|
+
# Build added list
|
|
734
|
+
added = []
|
|
735
|
+
for node_id in sorted(added_ids):
|
|
736
|
+
if len(added) >= limit:
|
|
737
|
+
partial = True
|
|
738
|
+
break
|
|
739
|
+
node = target_hierarchy[node_id]
|
|
740
|
+
added.append({
|
|
741
|
+
"node_id": node_id,
|
|
742
|
+
"type": node.get("type"),
|
|
743
|
+
"title": node.get("title"),
|
|
744
|
+
})
|
|
745
|
+
|
|
746
|
+
# Build removed list
|
|
747
|
+
removed = []
|
|
748
|
+
for node_id in sorted(removed_ids):
|
|
749
|
+
if len(removed) >= limit:
|
|
750
|
+
partial = True
|
|
751
|
+
break
|
|
752
|
+
node = source_hierarchy[node_id]
|
|
753
|
+
removed.append({
|
|
754
|
+
"node_id": node_id,
|
|
755
|
+
"type": node.get("type"),
|
|
756
|
+
"title": node.get("title"),
|
|
757
|
+
})
|
|
758
|
+
|
|
759
|
+
# Build modified list
|
|
760
|
+
modified = []
|
|
761
|
+
for node_id in sorted(common_ids):
|
|
762
|
+
if len(modified) >= limit:
|
|
763
|
+
partial = True
|
|
764
|
+
break
|
|
765
|
+
old_node = source_hierarchy[node_id]
|
|
766
|
+
new_node = target_hierarchy[node_id]
|
|
767
|
+
diff = _diff_node(old_node, new_node, node_id)
|
|
768
|
+
if diff:
|
|
769
|
+
modified.append(diff)
|
|
770
|
+
|
|
771
|
+
# Calculate actual counts (may exceed displayed if partial)
|
|
772
|
+
total_added = len(added_ids)
|
|
773
|
+
total_removed = len(removed_ids)
|
|
774
|
+
total_modified = sum(
|
|
775
|
+
1 for nid in common_ids
|
|
776
|
+
if _diff_node(source_hierarchy[nid], target_hierarchy[nid], nid)
|
|
777
|
+
) if not partial else len(modified) # Only count all if not already partial
|
|
778
|
+
|
|
779
|
+
return {
|
|
780
|
+
"summary": {
|
|
781
|
+
"added_count": total_added,
|
|
782
|
+
"removed_count": total_removed,
|
|
783
|
+
"modified_count": total_modified if not partial else len(modified),
|
|
784
|
+
"total_changes": total_added + total_removed + (total_modified if not partial else len(modified)),
|
|
785
|
+
},
|
|
786
|
+
"changes": {
|
|
787
|
+
"added": added,
|
|
788
|
+
"removed": removed,
|
|
789
|
+
"modified": modified,
|
|
790
|
+
},
|
|
791
|
+
"partial": partial,
|
|
792
|
+
"source_spec_id": source_spec.get("spec_id"),
|
|
793
|
+
"target_spec_id": target_spec.get("spec_id"),
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
|
|
797
|
+
def rollback_spec(
|
|
798
|
+
spec_id: str,
|
|
799
|
+
timestamp: str,
|
|
800
|
+
specs_dir: Optional[Path] = None,
|
|
801
|
+
dry_run: bool = False,
|
|
802
|
+
create_backup: bool = True,
|
|
803
|
+
) -> Dict[str, Any]:
|
|
804
|
+
"""
|
|
805
|
+
Restore a spec from a specific backup timestamp.
|
|
806
|
+
|
|
807
|
+
Creates a safety backup of the current state before rollback (by default),
|
|
808
|
+
then replaces the spec file with the contents from the specified backup.
|
|
809
|
+
|
|
810
|
+
Args:
|
|
811
|
+
spec_id: Specification ID to rollback
|
|
812
|
+
timestamp: Backup timestamp to restore (e.g., "2025-12-26T18-20-13.456789")
|
|
813
|
+
specs_dir: Base specs directory (auto-detected if None)
|
|
814
|
+
dry_run: If True, validate and return what would happen without changes
|
|
815
|
+
create_backup: If True (default), create safety backup before rollback
|
|
816
|
+
|
|
817
|
+
Returns:
|
|
818
|
+
Dict with structure:
|
|
819
|
+
{
|
|
820
|
+
"success": bool,
|
|
821
|
+
"spec_id": str,
|
|
822
|
+
"timestamp": str,
|
|
823
|
+
"dry_run": bool,
|
|
824
|
+
"backup_created": Optional[str], # Safety backup path
|
|
825
|
+
"restored_from": str, # Source backup path
|
|
826
|
+
"error": Optional[str] # Error if failed
|
|
827
|
+
}
|
|
828
|
+
"""
|
|
829
|
+
# Resolve specs directory
|
|
830
|
+
if specs_dir is None:
|
|
831
|
+
specs_dir = find_specs_directory()
|
|
832
|
+
|
|
833
|
+
result: Dict[str, Any] = {
|
|
834
|
+
"success": False,
|
|
835
|
+
"spec_id": spec_id,
|
|
836
|
+
"timestamp": timestamp,
|
|
837
|
+
"dry_run": dry_run,
|
|
838
|
+
"backup_created": None,
|
|
839
|
+
"restored_from": None,
|
|
840
|
+
"error": None,
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
if not specs_dir:
|
|
844
|
+
result["error"] = "Could not find specs directory"
|
|
845
|
+
return result
|
|
846
|
+
|
|
847
|
+
# Find current spec file
|
|
848
|
+
spec_file = find_spec_file(spec_id, specs_dir)
|
|
849
|
+
if not spec_file:
|
|
850
|
+
result["error"] = f"Spec '{spec_id}' not found"
|
|
851
|
+
return result
|
|
852
|
+
|
|
853
|
+
# Locate backup directory
|
|
854
|
+
backups_dir = specs_dir / ".backups" / spec_id
|
|
855
|
+
if not backups_dir.is_dir():
|
|
856
|
+
result["error"] = f"No backups directory for spec '{spec_id}'"
|
|
857
|
+
return result
|
|
858
|
+
|
|
859
|
+
# Find the backup file matching the timestamp
|
|
860
|
+
backup_file = backups_dir / f"{timestamp}.json"
|
|
861
|
+
if not backup_file.is_file():
|
|
862
|
+
result["error"] = f"Backup not found for timestamp '{timestamp}'"
|
|
863
|
+
return result
|
|
864
|
+
|
|
865
|
+
result["restored_from"] = str(backup_file)
|
|
866
|
+
|
|
867
|
+
# Validate backup is valid JSON
|
|
868
|
+
try:
|
|
869
|
+
with open(backup_file, "r") as f:
|
|
870
|
+
backup_data = json.load(f)
|
|
871
|
+
if not isinstance(backup_data, dict):
|
|
872
|
+
result["error"] = "Backup file is not a valid spec (not a JSON object)"
|
|
873
|
+
return result
|
|
874
|
+
except json.JSONDecodeError as e:
|
|
875
|
+
result["error"] = f"Backup file is not valid JSON: {e}"
|
|
876
|
+
return result
|
|
877
|
+
except IOError as e:
|
|
878
|
+
result["error"] = f"Could not read backup file: {e}"
|
|
879
|
+
return result
|
|
880
|
+
|
|
881
|
+
# dry_run - return success without making changes
|
|
882
|
+
if dry_run:
|
|
883
|
+
result["success"] = True
|
|
884
|
+
if create_backup:
|
|
885
|
+
result["backup_created"] = "(would be created)"
|
|
886
|
+
return result
|
|
887
|
+
|
|
888
|
+
# Create safety backup of current state before rollback
|
|
889
|
+
if create_backup:
|
|
890
|
+
safety_backup = backup_spec(spec_id, specs_dir)
|
|
891
|
+
if safety_backup:
|
|
892
|
+
result["backup_created"] = str(safety_backup)
|
|
893
|
+
|
|
894
|
+
# Perform rollback - copy backup to spec location
|
|
895
|
+
try:
|
|
896
|
+
shutil.copy2(backup_file, spec_file)
|
|
897
|
+
result["success"] = True
|
|
898
|
+
except (IOError, OSError) as e:
|
|
899
|
+
result["error"] = f"Failed to restore backup: {e}"
|
|
900
|
+
return result
|
|
901
|
+
|
|
902
|
+
return result
|
|
903
|
+
|
|
904
|
+
|
|
271
905
|
def _validate_spec_structure(spec_data: Dict[str, Any]) -> bool:
|
|
272
906
|
"""
|
|
273
907
|
Validate basic JSON spec file structure.
|
|
@@ -297,6 +931,7 @@ def _validate_spec_structure(spec_data: Dict[str, Any]) -> bool:
|
|
|
297
931
|
"in_progress",
|
|
298
932
|
"completed",
|
|
299
933
|
"blocked",
|
|
934
|
+
"failed",
|
|
300
935
|
]:
|
|
301
936
|
return False
|
|
302
937
|
|
|
@@ -684,73 +1319,444 @@ def add_phase(
|
|
|
684
1319
|
}, None
|
|
685
1320
|
|
|
686
1321
|
|
|
687
|
-
def
|
|
1322
|
+
def add_phase_bulk(
|
|
1323
|
+
spec_id: str,
|
|
1324
|
+
phase_title: str,
|
|
1325
|
+
tasks: List[Dict[str, Any]],
|
|
1326
|
+
phase_description: Optional[str] = None,
|
|
1327
|
+
phase_purpose: Optional[str] = None,
|
|
1328
|
+
phase_estimated_hours: Optional[float] = None,
|
|
1329
|
+
metadata_defaults: Optional[Dict[str, Any]] = None,
|
|
1330
|
+
position: Optional[int] = None,
|
|
1331
|
+
link_previous: bool = True,
|
|
1332
|
+
specs_dir: Optional[Path] = None,
|
|
1333
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
688
1334
|
"""
|
|
689
|
-
|
|
1335
|
+
Add a new phase with pre-defined tasks in a single atomic operation.
|
|
1336
|
+
|
|
1337
|
+
Creates a phase and all specified tasks/verify nodes without auto-generating
|
|
1338
|
+
verification scaffolding. This enables creating complete phase structures
|
|
1339
|
+
in one operation.
|
|
690
1340
|
|
|
691
1341
|
Args:
|
|
692
|
-
|
|
693
|
-
|
|
1342
|
+
spec_id: Specification ID to mutate.
|
|
1343
|
+
phase_title: Phase title.
|
|
1344
|
+
tasks: List of task definitions, each containing:
|
|
1345
|
+
- type: "task" or "verify" (required)
|
|
1346
|
+
- title: Task title (required)
|
|
1347
|
+
- description: Optional description
|
|
1348
|
+
- acceptance_criteria: Optional list of acceptance criteria
|
|
1349
|
+
- task_category: Optional task category
|
|
1350
|
+
- file_path: Optional associated file path
|
|
1351
|
+
- estimated_hours: Optional time estimate
|
|
1352
|
+
- verification_type: Optional verification type for verify tasks
|
|
1353
|
+
phase_description: Optional phase description.
|
|
1354
|
+
phase_purpose: Optional purpose/goal metadata string.
|
|
1355
|
+
phase_estimated_hours: Optional estimated hours for the phase.
|
|
1356
|
+
metadata_defaults: Optional defaults applied to tasks missing explicit values.
|
|
1357
|
+
Supported keys: task_category, category, acceptance_criteria, estimated_hours
|
|
1358
|
+
position: Optional zero-based insertion index in spec-root children.
|
|
1359
|
+
link_previous: Whether to automatically block on the previous phase.
|
|
1360
|
+
specs_dir: Specs directory override.
|
|
694
1361
|
|
|
695
1362
|
Returns:
|
|
696
|
-
|
|
1363
|
+
Tuple of (result_dict, error_message).
|
|
1364
|
+
On success: ({"phase_id": ..., "tasks_created": [...], ...}, None)
|
|
1365
|
+
On failure: (None, "error message")
|
|
697
1366
|
"""
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
return descendants
|
|
1367
|
+
# Validate required parameters
|
|
1368
|
+
if not spec_id or not spec_id.strip():
|
|
1369
|
+
return None, "Specification ID is required"
|
|
702
1370
|
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
return descendants
|
|
1371
|
+
if not phase_title or not phase_title.strip():
|
|
1372
|
+
return None, "Phase title is required"
|
|
706
1373
|
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
1374
|
+
if not tasks or not isinstance(tasks, list) or len(tasks) == 0:
|
|
1375
|
+
return None, "At least one task definition is required"
|
|
1376
|
+
|
|
1377
|
+
if phase_estimated_hours is not None and phase_estimated_hours < 0:
|
|
1378
|
+
return None, "phase_estimated_hours must be non-negative"
|
|
1379
|
+
|
|
1380
|
+
phase_title = phase_title.strip()
|
|
1381
|
+
defaults = metadata_defaults or {}
|
|
1382
|
+
|
|
1383
|
+
# Validate metadata_defaults values
|
|
1384
|
+
if defaults:
|
|
1385
|
+
default_est_hours = defaults.get("estimated_hours")
|
|
1386
|
+
if default_est_hours is not None:
|
|
1387
|
+
if not isinstance(default_est_hours, (int, float)) or default_est_hours < 0:
|
|
1388
|
+
return None, "metadata_defaults.estimated_hours must be a non-negative number"
|
|
1389
|
+
default_category = defaults.get("task_category")
|
|
1390
|
+
if default_category is None:
|
|
1391
|
+
default_category = defaults.get("category")
|
|
1392
|
+
if default_category is not None and not isinstance(default_category, str):
|
|
1393
|
+
return None, "metadata_defaults.task_category must be a string"
|
|
1394
|
+
default_acceptance = defaults.get("acceptance_criteria")
|
|
1395
|
+
if default_acceptance is not None and not isinstance(
|
|
1396
|
+
default_acceptance, (list, str)
|
|
1397
|
+
):
|
|
1398
|
+
return None, "metadata_defaults.acceptance_criteria must be a list of strings"
|
|
1399
|
+
if isinstance(default_acceptance, list) and any(
|
|
1400
|
+
not isinstance(item, str) for item in default_acceptance
|
|
1401
|
+
):
|
|
1402
|
+
return None, "metadata_defaults.acceptance_criteria must be a list of strings"
|
|
1403
|
+
|
|
1404
|
+
# Validate each task definition
|
|
1405
|
+
valid_task_types = {"task", "verify"}
|
|
1406
|
+
for idx, task_def in enumerate(tasks):
|
|
1407
|
+
if not isinstance(task_def, dict):
|
|
1408
|
+
return None, f"Task at index {idx} must be a dictionary"
|
|
1409
|
+
|
|
1410
|
+
task_type = task_def.get("type")
|
|
1411
|
+
if not task_type or task_type not in valid_task_types:
|
|
1412
|
+
return None, f"Task at index {idx} must have type 'task' or 'verify'"
|
|
1413
|
+
|
|
1414
|
+
task_title = task_def.get("title")
|
|
1415
|
+
if not task_title or not isinstance(task_title, str) or not task_title.strip():
|
|
1416
|
+
return None, f"Task at index {idx} must have a non-empty title"
|
|
1417
|
+
|
|
1418
|
+
est_hours = task_def.get("estimated_hours")
|
|
1419
|
+
if est_hours is not None:
|
|
1420
|
+
if not isinstance(est_hours, (int, float)) or est_hours < 0:
|
|
1421
|
+
return None, f"Task at index {idx} has invalid estimated_hours"
|
|
1422
|
+
|
|
1423
|
+
task_category = task_def.get("task_category")
|
|
1424
|
+
if task_category is not None and not isinstance(task_category, str):
|
|
1425
|
+
return None, f"Task at index {idx} has invalid task_category"
|
|
1426
|
+
|
|
1427
|
+
legacy_category = task_def.get("category")
|
|
1428
|
+
if legacy_category is not None and not isinstance(legacy_category, str):
|
|
1429
|
+
return None, f"Task at index {idx} has invalid category"
|
|
1430
|
+
|
|
1431
|
+
acceptance_criteria = task_def.get("acceptance_criteria")
|
|
1432
|
+
if acceptance_criteria is not None and not isinstance(
|
|
1433
|
+
acceptance_criteria, (list, str)
|
|
1434
|
+
):
|
|
1435
|
+
return None, f"Task at index {idx} has invalid acceptance_criteria"
|
|
1436
|
+
if isinstance(acceptance_criteria, list) and any(
|
|
1437
|
+
not isinstance(item, str) for item in acceptance_criteria
|
|
1438
|
+
):
|
|
1439
|
+
return None, f"Task at index {idx} acceptance_criteria must be a list of strings"
|
|
710
1440
|
|
|
711
|
-
|
|
1441
|
+
# Find specs directory
|
|
1442
|
+
if specs_dir is None:
|
|
1443
|
+
specs_dir = find_specs_directory()
|
|
712
1444
|
|
|
1445
|
+
if specs_dir is None:
|
|
1446
|
+
return (
|
|
1447
|
+
None,
|
|
1448
|
+
"No specs directory found. Use specs_dir parameter or set SDD_SPECS_DIR.",
|
|
1449
|
+
)
|
|
713
1450
|
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
"""
|
|
718
|
-
Count total and completed tasks in a list of nodes.
|
|
1451
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
1452
|
+
if spec_path is None:
|
|
1453
|
+
return None, f"Specification '{spec_id}' not found"
|
|
719
1454
|
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
1455
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
1456
|
+
if spec_data is None:
|
|
1457
|
+
return None, f"Failed to load specification '{spec_id}'"
|
|
723
1458
|
|
|
724
|
-
|
|
725
|
-
Tuple of (total_count, completed_count)
|
|
726
|
-
"""
|
|
727
|
-
total = 0
|
|
728
|
-
completed = 0
|
|
1459
|
+
requires_rich_tasks = _requires_rich_task_fields(spec_data)
|
|
729
1460
|
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
if not node:
|
|
733
|
-
continue
|
|
734
|
-
node_type = node.get("type")
|
|
735
|
-
if node_type in ("task", "subtask", "verify"):
|
|
736
|
-
total += 1
|
|
737
|
-
if node.get("status") == "completed":
|
|
738
|
-
completed += 1
|
|
1461
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
1462
|
+
spec_root = hierarchy.get("spec-root")
|
|
739
1463
|
|
|
740
|
-
|
|
1464
|
+
if spec_root is None:
|
|
1465
|
+
return None, "Specification root node 'spec-root' not found"
|
|
741
1466
|
|
|
1467
|
+
if spec_root.get("type") not in {"spec", "root"}:
|
|
1468
|
+
return None, "Specification root node has invalid type"
|
|
742
1469
|
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
"""
|
|
747
|
-
Remove references to deleted nodes from all dependency lists.
|
|
1470
|
+
children = spec_root.get("children", []) or []
|
|
1471
|
+
if not isinstance(children, list):
|
|
1472
|
+
children = []
|
|
748
1473
|
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
1474
|
+
insert_index = len(children)
|
|
1475
|
+
if position is not None and position >= 0:
|
|
1476
|
+
insert_index = min(position, len(children))
|
|
1477
|
+
|
|
1478
|
+
# Generate phase ID
|
|
1479
|
+
phase_id, phase_num = _generate_phase_id(hierarchy)
|
|
1480
|
+
|
|
1481
|
+
# Build phase metadata
|
|
1482
|
+
phase_metadata: Dict[str, Any] = {
|
|
1483
|
+
"purpose": (phase_purpose.strip() if phase_purpose else ""),
|
|
1484
|
+
}
|
|
1485
|
+
if phase_description:
|
|
1486
|
+
phase_metadata["description"] = phase_description.strip()
|
|
1487
|
+
if phase_estimated_hours is not None:
|
|
1488
|
+
phase_metadata["estimated_hours"] = phase_estimated_hours
|
|
1489
|
+
|
|
1490
|
+
# Create phase node (without children initially)
|
|
1491
|
+
phase_node = {
|
|
1492
|
+
"type": "phase",
|
|
1493
|
+
"title": phase_title,
|
|
1494
|
+
"status": "pending",
|
|
1495
|
+
"parent": "spec-root",
|
|
1496
|
+
"children": [],
|
|
1497
|
+
"total_tasks": 0,
|
|
1498
|
+
"completed_tasks": 0,
|
|
1499
|
+
"metadata": phase_metadata,
|
|
1500
|
+
"dependencies": {
|
|
1501
|
+
"blocks": [],
|
|
1502
|
+
"blocked_by": [],
|
|
1503
|
+
"depends": [],
|
|
1504
|
+
},
|
|
1505
|
+
}
|
|
1506
|
+
|
|
1507
|
+
hierarchy[phase_id] = phase_node
|
|
1508
|
+
|
|
1509
|
+
# Insert phase into spec-root children
|
|
1510
|
+
if insert_index == len(children):
|
|
1511
|
+
children.append(phase_id)
|
|
1512
|
+
else:
|
|
1513
|
+
children.insert(insert_index, phase_id)
|
|
1514
|
+
spec_root["children"] = children
|
|
1515
|
+
|
|
1516
|
+
# Link to previous phase if requested
|
|
1517
|
+
linked_phase_id: Optional[str] = None
|
|
1518
|
+
if link_previous and insert_index > 0 and insert_index == len(children) - 1:
|
|
1519
|
+
candidate = children[insert_index - 1]
|
|
1520
|
+
previous = hierarchy.get(candidate)
|
|
1521
|
+
if previous and previous.get("type") == "phase":
|
|
1522
|
+
linked_phase_id = candidate
|
|
1523
|
+
prev_deps = previous.setdefault(
|
|
1524
|
+
"dependencies",
|
|
1525
|
+
{"blocks": [], "blocked_by": [], "depends": []},
|
|
1526
|
+
)
|
|
1527
|
+
blocks = prev_deps.setdefault("blocks", [])
|
|
1528
|
+
if phase_id not in blocks:
|
|
1529
|
+
blocks.append(phase_id)
|
|
1530
|
+
phase_node["dependencies"]["blocked_by"].append(candidate)
|
|
1531
|
+
|
|
1532
|
+
def _nonempty_string(value: Any) -> bool:
|
|
1533
|
+
return isinstance(value, str) and bool(value.strip())
|
|
1534
|
+
|
|
1535
|
+
def _extract_description(task_def: Dict[str, Any]) -> tuple[Optional[str], Any]:
|
|
1536
|
+
description = task_def.get("description")
|
|
1537
|
+
if _nonempty_string(description) and isinstance(description, str):
|
|
1538
|
+
return "description", description.strip()
|
|
1539
|
+
details = task_def.get("details")
|
|
1540
|
+
if _nonempty_string(details) and isinstance(details, str):
|
|
1541
|
+
return "details", details.strip()
|
|
1542
|
+
if isinstance(details, list):
|
|
1543
|
+
cleaned = [
|
|
1544
|
+
item.strip()
|
|
1545
|
+
for item in details
|
|
1546
|
+
if isinstance(item, str) and item.strip()
|
|
1547
|
+
]
|
|
1548
|
+
if cleaned:
|
|
1549
|
+
return "details", cleaned
|
|
1550
|
+
return None, None
|
|
1551
|
+
|
|
1552
|
+
# Create tasks under the phase
|
|
1553
|
+
tasks_created: List[Dict[str, Any]] = []
|
|
1554
|
+
task_counter = 0
|
|
1555
|
+
verify_counter = 0
|
|
1556
|
+
|
|
1557
|
+
for task_def in tasks:
|
|
1558
|
+
task_type = task_def["type"]
|
|
1559
|
+
task_title = task_def["title"].strip()
|
|
1560
|
+
|
|
1561
|
+
# Generate task ID based on type
|
|
1562
|
+
if task_type == "verify":
|
|
1563
|
+
verify_counter += 1
|
|
1564
|
+
task_id = f"verify-{phase_num}-{verify_counter}"
|
|
1565
|
+
else:
|
|
1566
|
+
task_counter += 1
|
|
1567
|
+
task_id = f"task-{phase_num}-{task_counter}"
|
|
1568
|
+
|
|
1569
|
+
# Build task metadata with defaults cascade
|
|
1570
|
+
task_metadata: Dict[str, Any] = {}
|
|
1571
|
+
|
|
1572
|
+
# Apply description/details
|
|
1573
|
+
desc_field, desc_value = _extract_description(task_def)
|
|
1574
|
+
if desc_field and desc_value is not None:
|
|
1575
|
+
task_metadata[desc_field] = desc_value
|
|
1576
|
+
elif requires_rich_tasks and task_type == "task":
|
|
1577
|
+
return None, f"Task '{task_title}' missing description"
|
|
1578
|
+
|
|
1579
|
+
# Apply file_path
|
|
1580
|
+
file_path = task_def.get("file_path")
|
|
1581
|
+
if file_path and isinstance(file_path, str):
|
|
1582
|
+
task_metadata["file_path"] = file_path.strip()
|
|
1583
|
+
|
|
1584
|
+
# Apply estimated_hours (task-level overrides defaults)
|
|
1585
|
+
est_hours = task_def.get("estimated_hours")
|
|
1586
|
+
if est_hours is not None:
|
|
1587
|
+
task_metadata["estimated_hours"] = float(est_hours)
|
|
1588
|
+
elif defaults.get("estimated_hours") is not None:
|
|
1589
|
+
task_metadata["estimated_hours"] = float(defaults["estimated_hours"])
|
|
1590
|
+
|
|
1591
|
+
normalized_category = None
|
|
1592
|
+
if task_type == "task":
|
|
1593
|
+
# Apply acceptance_criteria
|
|
1594
|
+
raw_acceptance = task_def.get("acceptance_criteria")
|
|
1595
|
+
if raw_acceptance is None:
|
|
1596
|
+
raw_acceptance = defaults.get("acceptance_criteria")
|
|
1597
|
+
acceptance_criteria = _normalize_acceptance_criteria(raw_acceptance)
|
|
1598
|
+
if acceptance_criteria is not None:
|
|
1599
|
+
task_metadata["acceptance_criteria"] = acceptance_criteria
|
|
1600
|
+
if requires_rich_tasks:
|
|
1601
|
+
if raw_acceptance is None:
|
|
1602
|
+
return None, f"Task '{task_title}' missing acceptance_criteria"
|
|
1603
|
+
if not acceptance_criteria:
|
|
1604
|
+
return (
|
|
1605
|
+
None,
|
|
1606
|
+
f"Task '{task_title}' acceptance_criteria must include at least one entry",
|
|
1607
|
+
)
|
|
1608
|
+
|
|
1609
|
+
# Apply task_category from defaults if not specified
|
|
1610
|
+
category = task_def.get("task_category") or task_def.get("category")
|
|
1611
|
+
if category is None:
|
|
1612
|
+
category = defaults.get("task_category") or defaults.get("category")
|
|
1613
|
+
if category and isinstance(category, str):
|
|
1614
|
+
normalized_category = category.strip().lower()
|
|
1615
|
+
if normalized_category not in CATEGORIES:
|
|
1616
|
+
return (
|
|
1617
|
+
None,
|
|
1618
|
+
f"Task '{task_title}' has invalid task_category '{category}'",
|
|
1619
|
+
)
|
|
1620
|
+
task_metadata["task_category"] = normalized_category
|
|
1621
|
+
if requires_rich_tasks and normalized_category is None:
|
|
1622
|
+
return None, f"Task '{task_title}' missing task_category"
|
|
1623
|
+
|
|
1624
|
+
if normalized_category in {"implementation", "refactoring"}:
|
|
1625
|
+
if not _nonempty_string(task_metadata.get("file_path")):
|
|
1626
|
+
return (
|
|
1627
|
+
None,
|
|
1628
|
+
f"Task '{task_title}' missing file_path for category '{normalized_category}'",
|
|
1629
|
+
)
|
|
1630
|
+
|
|
1631
|
+
# Apply verification_type for verify tasks
|
|
1632
|
+
if task_type == "verify":
|
|
1633
|
+
verify_type = task_def.get("verification_type")
|
|
1634
|
+
if verify_type and verify_type in VERIFICATION_TYPES:
|
|
1635
|
+
task_metadata["verification_type"] = verify_type
|
|
1636
|
+
|
|
1637
|
+
# Create task node
|
|
1638
|
+
task_node = {
|
|
1639
|
+
"type": task_type,
|
|
1640
|
+
"title": task_title,
|
|
1641
|
+
"status": "pending",
|
|
1642
|
+
"parent": phase_id,
|
|
1643
|
+
"children": [],
|
|
1644
|
+
"total_tasks": 1,
|
|
1645
|
+
"completed_tasks": 0,
|
|
1646
|
+
"metadata": task_metadata,
|
|
1647
|
+
"dependencies": {
|
|
1648
|
+
"blocks": [],
|
|
1649
|
+
"blocked_by": [],
|
|
1650
|
+
"depends": [],
|
|
1651
|
+
},
|
|
1652
|
+
}
|
|
1653
|
+
|
|
1654
|
+
hierarchy[task_id] = task_node
|
|
1655
|
+
phase_node["children"].append(task_id)
|
|
1656
|
+
phase_node["total_tasks"] += 1
|
|
1657
|
+
|
|
1658
|
+
tasks_created.append({
|
|
1659
|
+
"task_id": task_id,
|
|
1660
|
+
"title": task_title,
|
|
1661
|
+
"type": task_type,
|
|
1662
|
+
})
|
|
1663
|
+
|
|
1664
|
+
# Update spec-root total_tasks
|
|
1665
|
+
total_tasks = spec_root.get("total_tasks", 0)
|
|
1666
|
+
spec_root["total_tasks"] = total_tasks + phase_node["total_tasks"]
|
|
1667
|
+
|
|
1668
|
+
# Update spec-level estimated hours if provided
|
|
1669
|
+
if phase_estimated_hours is not None:
|
|
1670
|
+
spec_metadata = spec_data.setdefault("metadata", {})
|
|
1671
|
+
current_hours = spec_metadata.get("estimated_hours")
|
|
1672
|
+
if isinstance(current_hours, (int, float)):
|
|
1673
|
+
spec_metadata["estimated_hours"] = current_hours + phase_estimated_hours
|
|
1674
|
+
else:
|
|
1675
|
+
spec_metadata["estimated_hours"] = phase_estimated_hours
|
|
1676
|
+
|
|
1677
|
+
# Save spec atomically
|
|
1678
|
+
saved = save_spec(spec_id, spec_data, specs_dir)
|
|
1679
|
+
if not saved:
|
|
1680
|
+
return None, "Failed to save specification"
|
|
1681
|
+
|
|
1682
|
+
return {
|
|
1683
|
+
"spec_id": spec_id,
|
|
1684
|
+
"phase_id": phase_id,
|
|
1685
|
+
"title": phase_title,
|
|
1686
|
+
"position": insert_index,
|
|
1687
|
+
"linked_previous": linked_phase_id,
|
|
1688
|
+
"tasks_created": tasks_created,
|
|
1689
|
+
"total_tasks": len(tasks_created),
|
|
1690
|
+
}, None
|
|
1691
|
+
|
|
1692
|
+
|
|
1693
|
+
def _collect_descendants(hierarchy: Dict[str, Any], node_id: str) -> List[str]:
|
|
1694
|
+
"""
|
|
1695
|
+
Recursively collect all descendant node IDs for a given node.
|
|
1696
|
+
|
|
1697
|
+
Args:
|
|
1698
|
+
hierarchy: The spec hierarchy dict
|
|
1699
|
+
node_id: Starting node ID
|
|
1700
|
+
|
|
1701
|
+
Returns:
|
|
1702
|
+
List of all descendant node IDs (not including the starting node)
|
|
1703
|
+
"""
|
|
1704
|
+
descendants: List[str] = []
|
|
1705
|
+
node = hierarchy.get(node_id)
|
|
1706
|
+
if not node:
|
|
1707
|
+
return descendants
|
|
1708
|
+
|
|
1709
|
+
children = node.get("children", [])
|
|
1710
|
+
if not isinstance(children, list):
|
|
1711
|
+
return descendants
|
|
1712
|
+
|
|
1713
|
+
for child_id in children:
|
|
1714
|
+
descendants.append(child_id)
|
|
1715
|
+
descendants.extend(_collect_descendants(hierarchy, child_id))
|
|
1716
|
+
|
|
1717
|
+
return descendants
|
|
1718
|
+
|
|
1719
|
+
|
|
1720
|
+
def _count_tasks_in_subtree(
|
|
1721
|
+
hierarchy: Dict[str, Any], node_ids: List[str]
|
|
1722
|
+
) -> Tuple[int, int]:
|
|
1723
|
+
"""
|
|
1724
|
+
Count total and completed tasks in a list of nodes.
|
|
1725
|
+
|
|
1726
|
+
Args:
|
|
1727
|
+
hierarchy: The spec hierarchy dict
|
|
1728
|
+
node_ids: List of node IDs to count
|
|
1729
|
+
|
|
1730
|
+
Returns:
|
|
1731
|
+
Tuple of (total_count, completed_count)
|
|
1732
|
+
"""
|
|
1733
|
+
total = 0
|
|
1734
|
+
completed = 0
|
|
1735
|
+
|
|
1736
|
+
for node_id in node_ids:
|
|
1737
|
+
node = hierarchy.get(node_id)
|
|
1738
|
+
if not node:
|
|
1739
|
+
continue
|
|
1740
|
+
node_type = node.get("type")
|
|
1741
|
+
if node_type in ("task", "subtask", "verify"):
|
|
1742
|
+
total += 1
|
|
1743
|
+
if node.get("status") == "completed":
|
|
1744
|
+
completed += 1
|
|
1745
|
+
|
|
1746
|
+
return total, completed
|
|
1747
|
+
|
|
1748
|
+
|
|
1749
|
+
def _remove_dependency_references(
|
|
1750
|
+
hierarchy: Dict[str, Any], removed_ids: List[str]
|
|
1751
|
+
) -> None:
|
|
1752
|
+
"""
|
|
1753
|
+
Remove references to deleted nodes from all dependency lists.
|
|
1754
|
+
|
|
1755
|
+
Args:
|
|
1756
|
+
hierarchy: The spec hierarchy dict
|
|
1757
|
+
removed_ids: List of node IDs being removed
|
|
1758
|
+
"""
|
|
1759
|
+
removed_set = set(removed_ids)
|
|
754
1760
|
|
|
755
1761
|
for node_id, node in hierarchy.items():
|
|
756
1762
|
deps = node.get("dependencies")
|
|
@@ -971,26 +1977,519 @@ def remove_phase(
|
|
|
971
1977
|
return result, None
|
|
972
1978
|
|
|
973
1979
|
|
|
1980
|
+
def move_phase(
|
|
1981
|
+
spec_id: str,
|
|
1982
|
+
phase_id: str,
|
|
1983
|
+
position: int,
|
|
1984
|
+
link_previous: bool = True,
|
|
1985
|
+
dry_run: bool = False,
|
|
1986
|
+
specs_dir: Optional[Path] = None,
|
|
1987
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
1988
|
+
"""
|
|
1989
|
+
Move a phase to a new position within spec-root's children.
|
|
1990
|
+
|
|
1991
|
+
Supports reordering phases and optionally re-linking phase dependencies
|
|
1992
|
+
according to the link_previous pattern (each phase blocked by its predecessor).
|
|
1993
|
+
|
|
1994
|
+
Args:
|
|
1995
|
+
spec_id: Specification ID containing the phase.
|
|
1996
|
+
phase_id: Phase ID to move (e.g., "phase-2").
|
|
1997
|
+
position: Target position (1-based index) in spec-root children.
|
|
1998
|
+
link_previous: If True, update dependencies to maintain the sequential
|
|
1999
|
+
blocking pattern. If False, preserve existing dependencies.
|
|
2000
|
+
dry_run: If True, validate and return preview without saving changes.
|
|
2001
|
+
specs_dir: Path to specs directory (auto-detected if not provided).
|
|
2002
|
+
|
|
2003
|
+
Returns:
|
|
2004
|
+
Tuple of (result_dict, error_message).
|
|
2005
|
+
On success: ({"spec_id": ..., "phase_id": ..., "old_position": ..., "new_position": ..., ...}, None)
|
|
2006
|
+
On failure: (None, "error message")
|
|
2007
|
+
"""
|
|
2008
|
+
# Validate inputs
|
|
2009
|
+
if not spec_id or not spec_id.strip():
|
|
2010
|
+
return None, "Specification ID is required"
|
|
2011
|
+
|
|
2012
|
+
if not phase_id or not phase_id.strip():
|
|
2013
|
+
return None, "Phase ID is required"
|
|
2014
|
+
|
|
2015
|
+
if not isinstance(position, int) or position < 1:
|
|
2016
|
+
return None, "Position must be a positive integer (1-based)"
|
|
2017
|
+
|
|
2018
|
+
# Find specs directory
|
|
2019
|
+
if specs_dir is None:
|
|
2020
|
+
specs_dir = find_specs_directory()
|
|
2021
|
+
|
|
2022
|
+
if specs_dir is None:
|
|
2023
|
+
return (
|
|
2024
|
+
None,
|
|
2025
|
+
"No specs directory found. Use specs_dir parameter or set SDD_SPECS_DIR.",
|
|
2026
|
+
)
|
|
2027
|
+
|
|
2028
|
+
# Find and load the spec
|
|
2029
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
2030
|
+
if spec_path is None:
|
|
2031
|
+
return None, f"Specification '{spec_id}' not found"
|
|
2032
|
+
|
|
2033
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
2034
|
+
if spec_data is None:
|
|
2035
|
+
return None, f"Failed to load specification '{spec_id}'"
|
|
2036
|
+
|
|
2037
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
2038
|
+
|
|
2039
|
+
# Validate phase exists
|
|
2040
|
+
phase = hierarchy.get(phase_id)
|
|
2041
|
+
if phase is None:
|
|
2042
|
+
return None, f"Phase '{phase_id}' not found"
|
|
2043
|
+
|
|
2044
|
+
# Validate node type is phase
|
|
2045
|
+
node_type = phase.get("type")
|
|
2046
|
+
if node_type != "phase":
|
|
2047
|
+
return None, f"Node '{phase_id}' is not a phase (type: {node_type})"
|
|
2048
|
+
|
|
2049
|
+
# Get spec-root
|
|
2050
|
+
spec_root = hierarchy.get("spec-root")
|
|
2051
|
+
if spec_root is None:
|
|
2052
|
+
return None, "Specification root node 'spec-root' not found"
|
|
2053
|
+
|
|
2054
|
+
children = spec_root.get("children", [])
|
|
2055
|
+
if not isinstance(children, list):
|
|
2056
|
+
children = []
|
|
2057
|
+
|
|
2058
|
+
# Find current position
|
|
2059
|
+
try:
|
|
2060
|
+
old_index = children.index(phase_id)
|
|
2061
|
+
except ValueError:
|
|
2062
|
+
return None, f"Phase '{phase_id}' not found in spec-root children"
|
|
2063
|
+
|
|
2064
|
+
# Convert to 0-based index for internal use
|
|
2065
|
+
new_index = position - 1
|
|
2066
|
+
|
|
2067
|
+
# Validate position is within bounds
|
|
2068
|
+
if new_index < 0 or new_index >= len(children):
|
|
2069
|
+
return None, f"Invalid position {position}. Must be 1-{len(children)}"
|
|
2070
|
+
|
|
2071
|
+
# No change needed if same position
|
|
2072
|
+
if old_index == new_index:
|
|
2073
|
+
return {
|
|
2074
|
+
"spec_id": spec_id,
|
|
2075
|
+
"phase_id": phase_id,
|
|
2076
|
+
"phase_title": phase.get("title", ""),
|
|
2077
|
+
"old_position": old_index + 1,
|
|
2078
|
+
"new_position": new_index + 1,
|
|
2079
|
+
"moved": False,
|
|
2080
|
+
"dry_run": dry_run,
|
|
2081
|
+
"message": "Phase is already at the specified position",
|
|
2082
|
+
}, None
|
|
2083
|
+
|
|
2084
|
+
# Identify old neighbors for dependency cleanup
|
|
2085
|
+
old_prev_id: Optional[str] = None
|
|
2086
|
+
old_next_id: Optional[str] = None
|
|
2087
|
+
|
|
2088
|
+
if old_index > 0:
|
|
2089
|
+
candidate = children[old_index - 1]
|
|
2090
|
+
if hierarchy.get(candidate, {}).get("type") == "phase":
|
|
2091
|
+
old_prev_id = candidate
|
|
2092
|
+
|
|
2093
|
+
if old_index < len(children) - 1:
|
|
2094
|
+
candidate = children[old_index + 1]
|
|
2095
|
+
if hierarchy.get(candidate, {}).get("type") == "phase":
|
|
2096
|
+
old_next_id = candidate
|
|
2097
|
+
|
|
2098
|
+
# Perform the move in children list
|
|
2099
|
+
children.remove(phase_id)
|
|
2100
|
+
# After removal, adjust target index if moving forward
|
|
2101
|
+
insert_index = new_index if new_index <= old_index else new_index
|
|
2102
|
+
if insert_index >= len(children):
|
|
2103
|
+
children.append(phase_id)
|
|
2104
|
+
else:
|
|
2105
|
+
children.insert(insert_index, phase_id)
|
|
2106
|
+
|
|
2107
|
+
# Identify new neighbors
|
|
2108
|
+
actual_new_index = children.index(phase_id)
|
|
2109
|
+
new_prev_id: Optional[str] = None
|
|
2110
|
+
new_next_id: Optional[str] = None
|
|
2111
|
+
|
|
2112
|
+
if actual_new_index > 0:
|
|
2113
|
+
candidate = children[actual_new_index - 1]
|
|
2114
|
+
if hierarchy.get(candidate, {}).get("type") == "phase":
|
|
2115
|
+
new_prev_id = candidate
|
|
2116
|
+
|
|
2117
|
+
if actual_new_index < len(children) - 1:
|
|
2118
|
+
candidate = children[actual_new_index + 1]
|
|
2119
|
+
if hierarchy.get(candidate, {}).get("type") == "phase":
|
|
2120
|
+
new_next_id = candidate
|
|
2121
|
+
|
|
2122
|
+
# Track dependency changes
|
|
2123
|
+
dependencies_updated: List[Dict[str, Any]] = []
|
|
2124
|
+
|
|
2125
|
+
if link_previous:
|
|
2126
|
+
# Remove old dependency links
|
|
2127
|
+
phase_deps = phase.setdefault(
|
|
2128
|
+
"dependencies", {"blocks": [], "blocked_by": [], "depends": []}
|
|
2129
|
+
)
|
|
2130
|
+
|
|
2131
|
+
# 1. Remove this phase from old_prev's blocks list
|
|
2132
|
+
if old_prev_id:
|
|
2133
|
+
old_prev = hierarchy.get(old_prev_id)
|
|
2134
|
+
if old_prev:
|
|
2135
|
+
old_prev_deps = old_prev.get("dependencies", {})
|
|
2136
|
+
old_prev_blocks = old_prev_deps.get("blocks", [])
|
|
2137
|
+
if phase_id in old_prev_blocks:
|
|
2138
|
+
old_prev_blocks.remove(phase_id)
|
|
2139
|
+
dependencies_updated.append({
|
|
2140
|
+
"action": "removed",
|
|
2141
|
+
"from": old_prev_id,
|
|
2142
|
+
"relationship": "blocks",
|
|
2143
|
+
"target": phase_id,
|
|
2144
|
+
})
|
|
2145
|
+
|
|
2146
|
+
# 2. Remove old_prev from this phase's blocked_by
|
|
2147
|
+
phase_blocked_by = phase_deps.setdefault("blocked_by", [])
|
|
2148
|
+
if old_prev_id and old_prev_id in phase_blocked_by:
|
|
2149
|
+
phase_blocked_by.remove(old_prev_id)
|
|
2150
|
+
dependencies_updated.append({
|
|
2151
|
+
"action": "removed",
|
|
2152
|
+
"from": phase_id,
|
|
2153
|
+
"relationship": "blocked_by",
|
|
2154
|
+
"target": old_prev_id,
|
|
2155
|
+
})
|
|
2156
|
+
|
|
2157
|
+
# 3. Remove this phase from old_next's blocked_by
|
|
2158
|
+
if old_next_id:
|
|
2159
|
+
old_next = hierarchy.get(old_next_id)
|
|
2160
|
+
if old_next:
|
|
2161
|
+
old_next_deps = old_next.get("dependencies", {})
|
|
2162
|
+
old_next_blocked_by = old_next_deps.get("blocked_by", [])
|
|
2163
|
+
if phase_id in old_next_blocked_by:
|
|
2164
|
+
old_next_blocked_by.remove(phase_id)
|
|
2165
|
+
dependencies_updated.append({
|
|
2166
|
+
"action": "removed",
|
|
2167
|
+
"from": old_next_id,
|
|
2168
|
+
"relationship": "blocked_by",
|
|
2169
|
+
"target": phase_id,
|
|
2170
|
+
})
|
|
2171
|
+
|
|
2172
|
+
# 4. Remove old_next from this phase's blocks
|
|
2173
|
+
phase_blocks = phase_deps.setdefault("blocks", [])
|
|
2174
|
+
if old_next_id and old_next_id in phase_blocks:
|
|
2175
|
+
phase_blocks.remove(old_next_id)
|
|
2176
|
+
dependencies_updated.append({
|
|
2177
|
+
"action": "removed",
|
|
2178
|
+
"from": phase_id,
|
|
2179
|
+
"relationship": "blocks",
|
|
2180
|
+
"target": old_next_id,
|
|
2181
|
+
})
|
|
2182
|
+
|
|
2183
|
+
# 5. Link old neighbors to each other (if they were adjacent via this phase)
|
|
2184
|
+
if old_prev_id and old_next_id:
|
|
2185
|
+
old_prev = hierarchy.get(old_prev_id)
|
|
2186
|
+
old_next = hierarchy.get(old_next_id)
|
|
2187
|
+
if old_prev and old_next:
|
|
2188
|
+
old_prev_deps = old_prev.setdefault(
|
|
2189
|
+
"dependencies", {"blocks": [], "blocked_by": [], "depends": []}
|
|
2190
|
+
)
|
|
2191
|
+
old_prev_blocks = old_prev_deps.setdefault("blocks", [])
|
|
2192
|
+
if old_next_id not in old_prev_blocks:
|
|
2193
|
+
old_prev_blocks.append(old_next_id)
|
|
2194
|
+
dependencies_updated.append({
|
|
2195
|
+
"action": "added",
|
|
2196
|
+
"from": old_prev_id,
|
|
2197
|
+
"relationship": "blocks",
|
|
2198
|
+
"target": old_next_id,
|
|
2199
|
+
})
|
|
2200
|
+
|
|
2201
|
+
old_next_deps = old_next.setdefault(
|
|
2202
|
+
"dependencies", {"blocks": [], "blocked_by": [], "depends": []}
|
|
2203
|
+
)
|
|
2204
|
+
old_next_blocked_by = old_next_deps.setdefault("blocked_by", [])
|
|
2205
|
+
if old_prev_id not in old_next_blocked_by:
|
|
2206
|
+
old_next_blocked_by.append(old_prev_id)
|
|
2207
|
+
dependencies_updated.append({
|
|
2208
|
+
"action": "added",
|
|
2209
|
+
"from": old_next_id,
|
|
2210
|
+
"relationship": "blocked_by",
|
|
2211
|
+
"target": old_prev_id,
|
|
2212
|
+
})
|
|
2213
|
+
|
|
2214
|
+
# Add new dependency links
|
|
2215
|
+
# 6. New prev blocks this phase
|
|
2216
|
+
if new_prev_id:
|
|
2217
|
+
new_prev = hierarchy.get(new_prev_id)
|
|
2218
|
+
if new_prev:
|
|
2219
|
+
new_prev_deps = new_prev.setdefault(
|
|
2220
|
+
"dependencies", {"blocks": [], "blocked_by": [], "depends": []}
|
|
2221
|
+
)
|
|
2222
|
+
new_prev_blocks = new_prev_deps.setdefault("blocks", [])
|
|
2223
|
+
if phase_id not in new_prev_blocks:
|
|
2224
|
+
new_prev_blocks.append(phase_id)
|
|
2225
|
+
dependencies_updated.append({
|
|
2226
|
+
"action": "added",
|
|
2227
|
+
"from": new_prev_id,
|
|
2228
|
+
"relationship": "blocks",
|
|
2229
|
+
"target": phase_id,
|
|
2230
|
+
})
|
|
2231
|
+
|
|
2232
|
+
# This phase is blocked by new prev
|
|
2233
|
+
if new_prev_id not in phase_blocked_by:
|
|
2234
|
+
phase_blocked_by.append(new_prev_id)
|
|
2235
|
+
dependencies_updated.append({
|
|
2236
|
+
"action": "added",
|
|
2237
|
+
"from": phase_id,
|
|
2238
|
+
"relationship": "blocked_by",
|
|
2239
|
+
"target": new_prev_id,
|
|
2240
|
+
})
|
|
2241
|
+
|
|
2242
|
+
# 7. This phase blocks new next
|
|
2243
|
+
if new_next_id:
|
|
2244
|
+
new_next = hierarchy.get(new_next_id)
|
|
2245
|
+
if new_next:
|
|
2246
|
+
if new_next_id not in phase_blocks:
|
|
2247
|
+
phase_blocks.append(new_next_id)
|
|
2248
|
+
dependencies_updated.append({
|
|
2249
|
+
"action": "added",
|
|
2250
|
+
"from": phase_id,
|
|
2251
|
+
"relationship": "blocks",
|
|
2252
|
+
"target": new_next_id,
|
|
2253
|
+
})
|
|
2254
|
+
|
|
2255
|
+
new_next_deps = new_next.setdefault(
|
|
2256
|
+
"dependencies", {"blocks": [], "blocked_by": [], "depends": []}
|
|
2257
|
+
)
|
|
2258
|
+
new_next_blocked_by = new_next_deps.setdefault("blocked_by", [])
|
|
2259
|
+
if phase_id not in new_next_blocked_by:
|
|
2260
|
+
new_next_blocked_by.append(phase_id)
|
|
2261
|
+
dependencies_updated.append({
|
|
2262
|
+
"action": "added",
|
|
2263
|
+
"from": new_next_id,
|
|
2264
|
+
"relationship": "blocked_by",
|
|
2265
|
+
"target": phase_id,
|
|
2266
|
+
})
|
|
2267
|
+
|
|
2268
|
+
# Remove old link from new prev to new next (now goes through this phase)
|
|
2269
|
+
if new_prev_id:
|
|
2270
|
+
new_prev = hierarchy.get(new_prev_id)
|
|
2271
|
+
if new_prev:
|
|
2272
|
+
new_prev_deps = new_prev.get("dependencies", {})
|
|
2273
|
+
new_prev_blocks = new_prev_deps.get("blocks", [])
|
|
2274
|
+
if new_next_id in new_prev_blocks:
|
|
2275
|
+
new_prev_blocks.remove(new_next_id)
|
|
2276
|
+
dependencies_updated.append({
|
|
2277
|
+
"action": "removed",
|
|
2278
|
+
"from": new_prev_id,
|
|
2279
|
+
"relationship": "blocks",
|
|
2280
|
+
"target": new_next_id,
|
|
2281
|
+
})
|
|
2282
|
+
|
|
2283
|
+
if new_prev_id in new_next_blocked_by:
|
|
2284
|
+
new_next_blocked_by.remove(new_prev_id)
|
|
2285
|
+
dependencies_updated.append({
|
|
2286
|
+
"action": "removed",
|
|
2287
|
+
"from": new_next_id,
|
|
2288
|
+
"relationship": "blocked_by",
|
|
2289
|
+
"target": new_prev_id,
|
|
2290
|
+
})
|
|
2291
|
+
|
|
2292
|
+
# Update spec-root children
|
|
2293
|
+
spec_root["children"] = children
|
|
2294
|
+
|
|
2295
|
+
# Build result
|
|
2296
|
+
result: Dict[str, Any] = {
|
|
2297
|
+
"spec_id": spec_id,
|
|
2298
|
+
"phase_id": phase_id,
|
|
2299
|
+
"phase_title": phase.get("title", ""),
|
|
2300
|
+
"old_position": old_index + 1,
|
|
2301
|
+
"new_position": actual_new_index + 1,
|
|
2302
|
+
"moved": True,
|
|
2303
|
+
"link_previous": link_previous,
|
|
2304
|
+
"dry_run": dry_run,
|
|
2305
|
+
}
|
|
2306
|
+
|
|
2307
|
+
if dependencies_updated:
|
|
2308
|
+
result["dependencies_updated"] = dependencies_updated
|
|
2309
|
+
|
|
2310
|
+
if dry_run:
|
|
2311
|
+
result["message"] = "Dry run - changes not saved"
|
|
2312
|
+
return result, None
|
|
2313
|
+
|
|
2314
|
+
# Save the spec
|
|
2315
|
+
saved = save_spec(spec_id, spec_data, specs_dir)
|
|
2316
|
+
if not saved:
|
|
2317
|
+
return None, "Failed to save specification"
|
|
2318
|
+
|
|
2319
|
+
return result, None
|
|
2320
|
+
|
|
2321
|
+
|
|
2322
|
+
def update_phase_metadata(
|
|
2323
|
+
spec_id: str,
|
|
2324
|
+
phase_id: str,
|
|
2325
|
+
*,
|
|
2326
|
+
estimated_hours: Optional[float] = None,
|
|
2327
|
+
description: Optional[str] = None,
|
|
2328
|
+
purpose: Optional[str] = None,
|
|
2329
|
+
dry_run: bool = False,
|
|
2330
|
+
specs_dir: Optional[Path] = None,
|
|
2331
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
2332
|
+
"""
|
|
2333
|
+
Update metadata fields of a phase in a specification.
|
|
2334
|
+
|
|
2335
|
+
Allows updating phase-level metadata such as estimated_hours, description,
|
|
2336
|
+
and purpose. Tracks previous values for audit purposes.
|
|
2337
|
+
|
|
2338
|
+
Args:
|
|
2339
|
+
spec_id: Specification ID containing the phase.
|
|
2340
|
+
phase_id: Phase ID to update (e.g., "phase-1").
|
|
2341
|
+
estimated_hours: New estimated hours value (must be >= 0 if provided).
|
|
2342
|
+
description: New description text for the phase.
|
|
2343
|
+
purpose: New purpose text for the phase.
|
|
2344
|
+
dry_run: If True, validate and return preview without saving changes.
|
|
2345
|
+
specs_dir: Path to specs directory (auto-detected if not provided).
|
|
2346
|
+
|
|
2347
|
+
Returns:
|
|
2348
|
+
Tuple of (result_dict, error_message).
|
|
2349
|
+
On success: ({"spec_id": ..., "phase_id": ..., "updates": [...], ...}, None)
|
|
2350
|
+
On failure: (None, "error message")
|
|
2351
|
+
"""
|
|
2352
|
+
# Validate spec_id
|
|
2353
|
+
if not spec_id or not spec_id.strip():
|
|
2354
|
+
return None, "Specification ID is required"
|
|
2355
|
+
|
|
2356
|
+
# Validate phase_id
|
|
2357
|
+
if not phase_id or not phase_id.strip():
|
|
2358
|
+
return None, "Phase ID is required"
|
|
2359
|
+
|
|
2360
|
+
# Validate estimated_hours if provided
|
|
2361
|
+
if estimated_hours is not None:
|
|
2362
|
+
if not isinstance(estimated_hours, (int, float)):
|
|
2363
|
+
return None, "estimated_hours must be a number"
|
|
2364
|
+
if estimated_hours < 0:
|
|
2365
|
+
return None, "estimated_hours must be >= 0"
|
|
2366
|
+
|
|
2367
|
+
# Check that at least one field is being updated
|
|
2368
|
+
has_update = any(
|
|
2369
|
+
v is not None for v in [estimated_hours, description, purpose]
|
|
2370
|
+
)
|
|
2371
|
+
if not has_update:
|
|
2372
|
+
return None, "At least one field (estimated_hours, description, purpose) must be provided"
|
|
2373
|
+
|
|
2374
|
+
# Find specs directory
|
|
2375
|
+
if specs_dir is None:
|
|
2376
|
+
specs_dir = find_specs_directory()
|
|
2377
|
+
|
|
2378
|
+
if specs_dir is None:
|
|
2379
|
+
return (
|
|
2380
|
+
None,
|
|
2381
|
+
"No specs directory found. Use specs_dir parameter or set SDD_SPECS_DIR.",
|
|
2382
|
+
)
|
|
2383
|
+
|
|
2384
|
+
# Find and load the spec
|
|
2385
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
2386
|
+
if spec_path is None:
|
|
2387
|
+
return None, f"Specification '{spec_id}' not found"
|
|
2388
|
+
|
|
2389
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
2390
|
+
if spec_data is None:
|
|
2391
|
+
return None, f"Failed to load specification '{spec_id}'"
|
|
2392
|
+
|
|
2393
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
2394
|
+
|
|
2395
|
+
# Validate phase exists
|
|
2396
|
+
phase = hierarchy.get(phase_id)
|
|
2397
|
+
if phase is None:
|
|
2398
|
+
return None, f"Phase '{phase_id}' not found"
|
|
2399
|
+
|
|
2400
|
+
# Validate node type is phase
|
|
2401
|
+
node_type = phase.get("type")
|
|
2402
|
+
if node_type != "phase":
|
|
2403
|
+
return None, f"Node '{phase_id}' is not a phase (type: {node_type})"
|
|
2404
|
+
|
|
2405
|
+
# Ensure metadata exists on phase
|
|
2406
|
+
if "metadata" not in phase:
|
|
2407
|
+
phase["metadata"] = {}
|
|
2408
|
+
|
|
2409
|
+
phase_metadata = phase["metadata"]
|
|
2410
|
+
|
|
2411
|
+
# Track updates with previous values
|
|
2412
|
+
updates: List[Dict[str, Any]] = []
|
|
2413
|
+
|
|
2414
|
+
if estimated_hours is not None:
|
|
2415
|
+
previous = phase_metadata.get("estimated_hours")
|
|
2416
|
+
phase_metadata["estimated_hours"] = estimated_hours
|
|
2417
|
+
updates.append({
|
|
2418
|
+
"field": "estimated_hours",
|
|
2419
|
+
"previous_value": previous,
|
|
2420
|
+
"new_value": estimated_hours,
|
|
2421
|
+
})
|
|
2422
|
+
|
|
2423
|
+
if description is not None:
|
|
2424
|
+
description = description.strip() if description else description
|
|
2425
|
+
previous = phase_metadata.get("description")
|
|
2426
|
+
phase_metadata["description"] = description
|
|
2427
|
+
updates.append({
|
|
2428
|
+
"field": "description",
|
|
2429
|
+
"previous_value": previous,
|
|
2430
|
+
"new_value": description,
|
|
2431
|
+
})
|
|
2432
|
+
|
|
2433
|
+
if purpose is not None:
|
|
2434
|
+
purpose = purpose.strip() if purpose else purpose
|
|
2435
|
+
previous = phase_metadata.get("purpose")
|
|
2436
|
+
phase_metadata["purpose"] = purpose
|
|
2437
|
+
updates.append({
|
|
2438
|
+
"field": "purpose",
|
|
2439
|
+
"previous_value": previous,
|
|
2440
|
+
"new_value": purpose,
|
|
2441
|
+
})
|
|
2442
|
+
|
|
2443
|
+
# Build result
|
|
2444
|
+
result: Dict[str, Any] = {
|
|
2445
|
+
"spec_id": spec_id,
|
|
2446
|
+
"phase_id": phase_id,
|
|
2447
|
+
"phase_title": phase.get("title", ""),
|
|
2448
|
+
"updates": updates,
|
|
2449
|
+
"dry_run": dry_run,
|
|
2450
|
+
}
|
|
2451
|
+
|
|
2452
|
+
if dry_run:
|
|
2453
|
+
result["message"] = "Dry run - changes not saved"
|
|
2454
|
+
return result, None
|
|
2455
|
+
|
|
2456
|
+
# Save the spec
|
|
2457
|
+
saved = save_spec(spec_id, spec_data, specs_dir)
|
|
2458
|
+
if not saved:
|
|
2459
|
+
return None, "Failed to save specification"
|
|
2460
|
+
|
|
2461
|
+
return result, None
|
|
2462
|
+
|
|
2463
|
+
|
|
974
2464
|
def get_template_structure(template: str, category: str) -> Dict[str, Any]:
|
|
975
2465
|
"""
|
|
976
2466
|
Get the hierarchical structure for a spec template.
|
|
977
2467
|
|
|
978
|
-
|
|
2468
|
+
Only the 'empty' template is supported. Use phase templates to add structure.
|
|
979
2469
|
|
|
980
2470
|
Args:
|
|
981
|
-
template: Template type (
|
|
2471
|
+
template: Template type (only 'empty' is valid).
|
|
982
2472
|
category: Default task category.
|
|
983
2473
|
|
|
984
2474
|
Returns:
|
|
985
2475
|
Hierarchy dict for the spec.
|
|
2476
|
+
|
|
2477
|
+
Raises:
|
|
2478
|
+
ValueError: If template is not 'empty'.
|
|
986
2479
|
"""
|
|
987
|
-
|
|
2480
|
+
if template != "empty":
|
|
2481
|
+
raise ValueError(
|
|
2482
|
+
f"Invalid template '{template}'. Only 'empty' template is supported. "
|
|
2483
|
+
f"Use phase templates (phase-add-bulk or phase-template apply) to add structure."
|
|
2484
|
+
)
|
|
2485
|
+
|
|
2486
|
+
return {
|
|
988
2487
|
"spec-root": {
|
|
989
2488
|
"type": "spec",
|
|
990
2489
|
"title": "", # Filled in later
|
|
991
2490
|
"status": "pending",
|
|
992
2491
|
"parent": None,
|
|
993
|
-
"children": [
|
|
2492
|
+
"children": [],
|
|
994
2493
|
"total_tasks": 0,
|
|
995
2494
|
"completed_tasks": 0,
|
|
996
2495
|
"metadata": {
|
|
@@ -1003,201 +2502,300 @@ def get_template_structure(template: str, category: str) -> Dict[str, Any]:
|
|
|
1003
2502
|
"depends": [],
|
|
1004
2503
|
},
|
|
1005
2504
|
},
|
|
1006
|
-
"phase-1": {
|
|
1007
|
-
"type": "phase",
|
|
1008
|
-
"title": "Planning & Discovery",
|
|
1009
|
-
"status": "pending",
|
|
1010
|
-
"parent": "spec-root",
|
|
1011
|
-
"children": ["task-1-1"],
|
|
1012
|
-
"total_tasks": 1,
|
|
1013
|
-
"completed_tasks": 0,
|
|
1014
|
-
"metadata": {
|
|
1015
|
-
"purpose": "Initial planning and requirements gathering",
|
|
1016
|
-
"estimated_hours": 2,
|
|
1017
|
-
},
|
|
1018
|
-
"dependencies": {
|
|
1019
|
-
"blocks": [],
|
|
1020
|
-
"blocked_by": [],
|
|
1021
|
-
"depends": [],
|
|
1022
|
-
},
|
|
1023
|
-
},
|
|
1024
|
-
"task-1-1": {
|
|
1025
|
-
"type": "task",
|
|
1026
|
-
"title": "Define requirements",
|
|
1027
|
-
"status": "pending",
|
|
1028
|
-
"parent": "phase-1",
|
|
1029
|
-
"children": [],
|
|
1030
|
-
"total_tasks": 1,
|
|
1031
|
-
"completed_tasks": 0,
|
|
1032
|
-
"metadata": {
|
|
1033
|
-
"details": "Document the requirements and acceptance criteria",
|
|
1034
|
-
"category": category,
|
|
1035
|
-
"estimated_hours": 1,
|
|
1036
|
-
},
|
|
1037
|
-
"dependencies": {
|
|
1038
|
-
"blocks": [],
|
|
1039
|
-
"blocked_by": [],
|
|
1040
|
-
"depends": [],
|
|
1041
|
-
},
|
|
1042
|
-
},
|
|
1043
2505
|
}
|
|
1044
2506
|
|
|
1045
|
-
# Add verification to phase-1 (all templates)
|
|
1046
|
-
_add_phase_verification(base_hierarchy, 1, "phase-1")
|
|
1047
|
-
base_hierarchy["spec-root"]["total_tasks"] = 3 # task + 2 verify
|
|
1048
2507
|
|
|
1049
|
-
|
|
1050
|
-
|
|
2508
|
+
def get_phase_template_structure(
|
|
2509
|
+
template: str, category: str = "implementation"
|
|
2510
|
+
) -> Dict[str, Any]:
|
|
2511
|
+
"""
|
|
2512
|
+
Get the structure definition for a phase template.
|
|
2513
|
+
|
|
2514
|
+
Phase templates define reusable phase structures with pre-configured tasks.
|
|
2515
|
+
Each template includes automatic verification scaffolding (run-tests + fidelity).
|
|
2516
|
+
|
|
2517
|
+
Args:
|
|
2518
|
+
template: Phase template type (planning, implementation, testing, security, documentation).
|
|
2519
|
+
category: Default task category for tasks in this phase.
|
|
1051
2520
|
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
2521
|
+
Returns:
|
|
2522
|
+
Dict with phase structure including:
|
|
2523
|
+
- title: Phase title
|
|
2524
|
+
- description: Phase description
|
|
2525
|
+
- purpose: Phase purpose for metadata
|
|
2526
|
+
- estimated_hours: Total estimated hours
|
|
2527
|
+
- tasks: List of task definitions (title, description, category, estimated_hours)
|
|
2528
|
+
- includes_verification: Always True (verification auto-added)
|
|
2529
|
+
"""
|
|
2530
|
+
templates: Dict[str, Dict[str, Any]] = {
|
|
2531
|
+
"planning": {
|
|
2532
|
+
"title": "Planning & Discovery",
|
|
2533
|
+
"description": "Requirements gathering, analysis, and initial planning",
|
|
2534
|
+
"purpose": "Define scope, requirements, and acceptance criteria",
|
|
2535
|
+
"estimated_hours": 4,
|
|
2536
|
+
"tasks": [
|
|
2537
|
+
{
|
|
2538
|
+
"title": "Define requirements",
|
|
2539
|
+
"description": "Document functional and non-functional requirements",
|
|
2540
|
+
"task_category": "investigation",
|
|
2541
|
+
"acceptance_criteria": [
|
|
2542
|
+
"Requirements are documented and reviewed",
|
|
2543
|
+
],
|
|
2544
|
+
"estimated_hours": 2,
|
|
2545
|
+
},
|
|
2546
|
+
{
|
|
2547
|
+
"title": "Design solution approach",
|
|
2548
|
+
"description": "Outline the technical approach and architecture decisions",
|
|
2549
|
+
"task_category": "investigation",
|
|
2550
|
+
"acceptance_criteria": [
|
|
2551
|
+
"Solution approach and key decisions are documented",
|
|
2552
|
+
],
|
|
2553
|
+
"estimated_hours": 2,
|
|
2554
|
+
},
|
|
2555
|
+
],
|
|
2556
|
+
},
|
|
2557
|
+
"implementation": {
|
|
1058
2558
|
"title": "Implementation",
|
|
1059
|
-
"
|
|
1060
|
-
"
|
|
1061
|
-
"
|
|
1062
|
-
"
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
"
|
|
1088
|
-
|
|
1089
|
-
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
"
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
2559
|
+
"description": "Core development and feature implementation",
|
|
2560
|
+
"purpose": "Build the primary functionality",
|
|
2561
|
+
"estimated_hours": 8,
|
|
2562
|
+
"tasks": [
|
|
2563
|
+
{
|
|
2564
|
+
"title": "Implement core functionality",
|
|
2565
|
+
"description": "Build the main features and business logic",
|
|
2566
|
+
"task_category": "investigation",
|
|
2567
|
+
"acceptance_criteria": [
|
|
2568
|
+
"Core functionality is implemented and verified",
|
|
2569
|
+
],
|
|
2570
|
+
"estimated_hours": 6,
|
|
2571
|
+
},
|
|
2572
|
+
{
|
|
2573
|
+
"title": "Add error handling",
|
|
2574
|
+
"description": "Implement error handling and edge cases",
|
|
2575
|
+
"task_category": "investigation",
|
|
2576
|
+
"acceptance_criteria": [
|
|
2577
|
+
"Error handling covers expected edge cases",
|
|
2578
|
+
],
|
|
2579
|
+
"estimated_hours": 2,
|
|
2580
|
+
},
|
|
2581
|
+
],
|
|
2582
|
+
},
|
|
2583
|
+
"testing": {
|
|
2584
|
+
"title": "Testing & Validation",
|
|
2585
|
+
"description": "Comprehensive testing and quality assurance",
|
|
2586
|
+
"purpose": "Ensure code quality and correctness",
|
|
2587
|
+
"estimated_hours": 6,
|
|
2588
|
+
"tasks": [
|
|
2589
|
+
{
|
|
2590
|
+
"title": "Write unit tests",
|
|
2591
|
+
"description": "Create unit tests for individual components",
|
|
2592
|
+
"task_category": "investigation",
|
|
2593
|
+
"acceptance_criteria": [
|
|
2594
|
+
"Unit tests cover primary logic paths",
|
|
2595
|
+
],
|
|
2596
|
+
"estimated_hours": 3,
|
|
2597
|
+
},
|
|
2598
|
+
{
|
|
2599
|
+
"title": "Write integration tests",
|
|
2600
|
+
"description": "Create integration tests for component interactions",
|
|
2601
|
+
"task_category": "investigation",
|
|
2602
|
+
"acceptance_criteria": [
|
|
2603
|
+
"Integration tests cover critical workflows",
|
|
2604
|
+
],
|
|
2605
|
+
"estimated_hours": 3,
|
|
2606
|
+
},
|
|
2607
|
+
],
|
|
2608
|
+
},
|
|
2609
|
+
"security": {
|
|
2610
|
+
"title": "Security Review",
|
|
2611
|
+
"description": "Security audit, vulnerability assessment, and hardening",
|
|
2612
|
+
"purpose": "Identify and remediate security vulnerabilities",
|
|
2613
|
+
"estimated_hours": 6,
|
|
2614
|
+
"tasks": [
|
|
2615
|
+
{
|
|
2616
|
+
"title": "Security audit",
|
|
2617
|
+
"description": "Review code for security vulnerabilities (OWASP Top 10)",
|
|
2618
|
+
"task_category": "investigation",
|
|
2619
|
+
"acceptance_criteria": [
|
|
2620
|
+
"Security findings are documented with severity",
|
|
2621
|
+
],
|
|
2622
|
+
"estimated_hours": 3,
|
|
2623
|
+
},
|
|
2624
|
+
{
|
|
2625
|
+
"title": "Security remediation",
|
|
2626
|
+
"description": "Fix identified vulnerabilities and harden implementation",
|
|
2627
|
+
"task_category": "investigation",
|
|
2628
|
+
"acceptance_criteria": [
|
|
2629
|
+
"Security findings are addressed or tracked",
|
|
2630
|
+
],
|
|
2631
|
+
"estimated_hours": 3,
|
|
2632
|
+
},
|
|
2633
|
+
],
|
|
2634
|
+
},
|
|
2635
|
+
"documentation": {
|
|
2636
|
+
"title": "Documentation",
|
|
2637
|
+
"description": "Technical documentation and knowledge capture",
|
|
2638
|
+
"purpose": "Document the implementation for maintainability",
|
|
2639
|
+
"estimated_hours": 4,
|
|
2640
|
+
"tasks": [
|
|
2641
|
+
{
|
|
2642
|
+
"title": "Write API documentation",
|
|
2643
|
+
"description": "Document public APIs, parameters, and return values",
|
|
2644
|
+
"task_category": "research",
|
|
2645
|
+
"acceptance_criteria": [
|
|
2646
|
+
"API documentation is updated with current behavior",
|
|
2647
|
+
],
|
|
2648
|
+
"estimated_hours": 2,
|
|
2649
|
+
},
|
|
2650
|
+
{
|
|
2651
|
+
"title": "Write user guide",
|
|
2652
|
+
"description": "Create usage examples and integration guide",
|
|
2653
|
+
"task_category": "research",
|
|
2654
|
+
"acceptance_criteria": [
|
|
2655
|
+
"User guide includes usage examples",
|
|
2656
|
+
],
|
|
2657
|
+
"estimated_hours": 2,
|
|
2658
|
+
},
|
|
2659
|
+
],
|
|
2660
|
+
},
|
|
2661
|
+
}
|
|
1141
2662
|
|
|
1142
|
-
|
|
2663
|
+
if template not in templates:
|
|
2664
|
+
raise ValueError(
|
|
2665
|
+
f"Invalid phase template '{template}'. Must be one of: {', '.join(PHASE_TEMPLATES)}"
|
|
2666
|
+
)
|
|
1143
2667
|
|
|
2668
|
+
result = templates[template].copy()
|
|
2669
|
+
result["includes_verification"] = True
|
|
2670
|
+
result["template_name"] = template
|
|
2671
|
+
return result
|
|
1144
2672
|
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
2673
|
+
|
|
2674
|
+
def apply_phase_template(
|
|
2675
|
+
spec_id: str,
|
|
2676
|
+
template: str,
|
|
1149
2677
|
specs_dir: Optional[Path] = None,
|
|
2678
|
+
category: str = "implementation",
|
|
2679
|
+
position: Optional[int] = None,
|
|
2680
|
+
link_previous: bool = True,
|
|
1150
2681
|
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
1151
2682
|
"""
|
|
1152
|
-
|
|
2683
|
+
Apply a phase template to an existing spec.
|
|
2684
|
+
|
|
2685
|
+
Creates a new phase with pre-configured tasks based on the template.
|
|
2686
|
+
Automatically includes verification scaffolding (run-tests + fidelity).
|
|
1153
2687
|
|
|
1154
2688
|
Args:
|
|
1155
|
-
|
|
1156
|
-
template:
|
|
1157
|
-
category: Default task category. Default: implementation.
|
|
2689
|
+
spec_id: ID of the spec to add the phase to.
|
|
2690
|
+
template: Phase template name (planning, implementation, testing, security, documentation).
|
|
1158
2691
|
specs_dir: Path to specs directory (auto-detected if not provided).
|
|
2692
|
+
category: Default task category for tasks (can be overridden by template).
|
|
2693
|
+
position: Position to insert phase (None = append at end).
|
|
2694
|
+
link_previous: Whether to link this phase to the previous one with dependencies.
|
|
1159
2695
|
|
|
1160
2696
|
Returns:
|
|
1161
2697
|
Tuple of (result_dict, error_message).
|
|
1162
|
-
On success: ({"
|
|
2698
|
+
On success: ({"phase_id": ..., "tasks_created": [...], ...}, None)
|
|
1163
2699
|
On failure: (None, "error message")
|
|
1164
2700
|
"""
|
|
1165
2701
|
# Validate template
|
|
1166
|
-
if template not in
|
|
2702
|
+
if template not in PHASE_TEMPLATES:
|
|
1167
2703
|
return (
|
|
1168
2704
|
None,
|
|
1169
|
-
f"Invalid template '{template}'. Must be one of: {', '.join(
|
|
2705
|
+
f"Invalid phase template '{template}'. Must be one of: {', '.join(PHASE_TEMPLATES)}",
|
|
1170
2706
|
)
|
|
1171
2707
|
|
|
1172
|
-
#
|
|
1173
|
-
|
|
2708
|
+
# Get template structure
|
|
2709
|
+
template_struct = get_phase_template_structure(template, category)
|
|
2710
|
+
|
|
2711
|
+
# Build tasks list for add_phase_bulk
|
|
2712
|
+
tasks = []
|
|
2713
|
+
for task_def in template_struct["tasks"]:
|
|
2714
|
+
tasks.append({
|
|
2715
|
+
"type": "task",
|
|
2716
|
+
"title": task_def["title"],
|
|
2717
|
+
"description": task_def.get("description", ""),
|
|
2718
|
+
"task_category": task_def.get("task_category", task_def.get("category", category)),
|
|
2719
|
+
"acceptance_criteria": task_def.get("acceptance_criteria"),
|
|
2720
|
+
"estimated_hours": task_def.get("estimated_hours", 1),
|
|
2721
|
+
})
|
|
2722
|
+
|
|
2723
|
+
# Append verification scaffolding (run-tests + fidelity-review)
|
|
2724
|
+
tasks.append({
|
|
2725
|
+
"type": "verify",
|
|
2726
|
+
"title": "Run tests",
|
|
2727
|
+
"verification_type": "run-tests",
|
|
2728
|
+
})
|
|
2729
|
+
tasks.append({
|
|
2730
|
+
"type": "verify",
|
|
2731
|
+
"title": "Fidelity review",
|
|
2732
|
+
"verification_type": "fidelity",
|
|
2733
|
+
})
|
|
2734
|
+
|
|
2735
|
+
# Use add_phase_bulk to create the phase atomically
|
|
2736
|
+
result, error = add_phase_bulk(
|
|
2737
|
+
spec_id=spec_id,
|
|
2738
|
+
phase_title=template_struct["title"],
|
|
2739
|
+
tasks=tasks,
|
|
2740
|
+
specs_dir=specs_dir,
|
|
2741
|
+
phase_description=template_struct.get("description"),
|
|
2742
|
+
phase_purpose=template_struct.get("purpose"),
|
|
2743
|
+
phase_estimated_hours=template_struct.get("estimated_hours"),
|
|
2744
|
+
position=position,
|
|
2745
|
+
link_previous=link_previous,
|
|
2746
|
+
)
|
|
2747
|
+
|
|
2748
|
+
if error:
|
|
2749
|
+
return None, error
|
|
2750
|
+
|
|
2751
|
+
# Enhance result with template info
|
|
2752
|
+
if result:
|
|
2753
|
+
result["template_applied"] = template
|
|
2754
|
+
result["template_title"] = template_struct["title"]
|
|
2755
|
+
|
|
2756
|
+
return result, None
|
|
2757
|
+
|
|
2758
|
+
|
|
2759
|
+
def generate_spec_data(
|
|
2760
|
+
name: str,
|
|
2761
|
+
template: str = "empty",
|
|
2762
|
+
category: str = "implementation",
|
|
2763
|
+
mission: Optional[str] = None,
|
|
2764
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
2765
|
+
"""
|
|
2766
|
+
Generate spec data structure without writing to disk.
|
|
2767
|
+
|
|
2768
|
+
Used for preflight validation (dry_run) and by create_spec.
|
|
2769
|
+
|
|
2770
|
+
Args:
|
|
2771
|
+
name: Human-readable name for the specification.
|
|
2772
|
+
template: Template type (only 'empty' is valid).
|
|
2773
|
+
category: Default task category.
|
|
2774
|
+
mission: Optional mission statement for the spec.
|
|
2775
|
+
|
|
2776
|
+
Returns:
|
|
2777
|
+
Tuple of (spec_data, error_message).
|
|
2778
|
+
On success: (dict, None)
|
|
2779
|
+
On failure: (None, "error message")
|
|
2780
|
+
"""
|
|
2781
|
+
# Validate template - only 'empty' is supported
|
|
2782
|
+
if template not in TEMPLATES:
|
|
1174
2783
|
return (
|
|
1175
2784
|
None,
|
|
1176
|
-
f"Invalid
|
|
2785
|
+
f"Invalid template '{template}'. Only 'empty' template is supported. "
|
|
2786
|
+
f"Use phase templates to add structure.",
|
|
1177
2787
|
)
|
|
1178
2788
|
|
|
1179
|
-
#
|
|
1180
|
-
if
|
|
1181
|
-
specs_dir = find_specs_directory()
|
|
1182
|
-
|
|
1183
|
-
if specs_dir is None:
|
|
2789
|
+
# Validate category
|
|
2790
|
+
if category not in CATEGORIES:
|
|
1184
2791
|
return (
|
|
1185
2792
|
None,
|
|
1186
|
-
"
|
|
2793
|
+
f"Invalid category '{category}'. Must be one of: {', '.join(CATEGORIES)}",
|
|
1187
2794
|
)
|
|
1188
2795
|
|
|
1189
|
-
# Ensure pending directory exists
|
|
1190
|
-
pending_dir = specs_dir / "pending"
|
|
1191
|
-
pending_dir.mkdir(parents=True, exist_ok=True)
|
|
1192
|
-
|
|
1193
2796
|
# Generate spec ID
|
|
1194
2797
|
spec_id = generate_spec_id(name)
|
|
1195
2798
|
|
|
1196
|
-
# Check if spec already exists
|
|
1197
|
-
spec_path = pending_dir / f"{spec_id}.json"
|
|
1198
|
-
if spec_path.exists():
|
|
1199
|
-
return None, f"Specification already exists: {spec_id}"
|
|
1200
|
-
|
|
1201
2799
|
# Generate spec structure
|
|
1202
2800
|
now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
1203
2801
|
hierarchy = get_template_structure(template, category)
|
|
@@ -1219,24 +2817,77 @@ def create_spec(
|
|
|
1219
2817
|
"last_updated": now,
|
|
1220
2818
|
"metadata": {
|
|
1221
2819
|
"description": "",
|
|
2820
|
+
"mission": mission.strip() if isinstance(mission, str) else "",
|
|
1222
2821
|
"objectives": [],
|
|
1223
|
-
"complexity": "
|
|
2822
|
+
"complexity": "low", # Complexity set via explicit metadata, not template
|
|
1224
2823
|
"estimated_hours": estimated_hours,
|
|
1225
2824
|
"assumptions": [],
|
|
1226
|
-
"status": "pending",
|
|
1227
2825
|
"owner": "",
|
|
1228
|
-
"progress_percentage": 0,
|
|
1229
|
-
"current_phase": "phase-1",
|
|
1230
2826
|
"category": category,
|
|
1231
2827
|
"template": template,
|
|
1232
2828
|
},
|
|
1233
2829
|
"progress_percentage": 0,
|
|
1234
2830
|
"status": "pending",
|
|
1235
|
-
"current_phase":
|
|
2831
|
+
"current_phase": None, # Empty template has no phases
|
|
1236
2832
|
"hierarchy": hierarchy,
|
|
1237
2833
|
"journal": [],
|
|
1238
2834
|
}
|
|
1239
2835
|
|
|
2836
|
+
return spec_data, None
|
|
2837
|
+
|
|
2838
|
+
|
|
2839
|
+
def create_spec(
|
|
2840
|
+
name: str,
|
|
2841
|
+
template: str = "empty",
|
|
2842
|
+
category: str = "implementation",
|
|
2843
|
+
mission: Optional[str] = None,
|
|
2844
|
+
specs_dir: Optional[Path] = None,
|
|
2845
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
2846
|
+
"""
|
|
2847
|
+
Create a new specification file from a template.
|
|
2848
|
+
|
|
2849
|
+
Args:
|
|
2850
|
+
name: Human-readable name for the specification.
|
|
2851
|
+
template: Template type (only 'empty' is valid). Use phase templates to add structure.
|
|
2852
|
+
category: Default task category. Default: implementation.
|
|
2853
|
+
mission: Optional mission statement for the spec.
|
|
2854
|
+
specs_dir: Path to specs directory (auto-detected if not provided).
|
|
2855
|
+
|
|
2856
|
+
Returns:
|
|
2857
|
+
Tuple of (result_dict, error_message).
|
|
2858
|
+
On success: ({"spec_id": ..., "spec_path": ..., ...}, None)
|
|
2859
|
+
On failure: (None, "error message")
|
|
2860
|
+
"""
|
|
2861
|
+
# Generate spec data (handles validation)
|
|
2862
|
+
spec_data, error = generate_spec_data(
|
|
2863
|
+
name=name,
|
|
2864
|
+
template=template,
|
|
2865
|
+
category=category,
|
|
2866
|
+
mission=mission,
|
|
2867
|
+
)
|
|
2868
|
+
if error or spec_data is None:
|
|
2869
|
+
return None, error or "Failed to generate spec data"
|
|
2870
|
+
|
|
2871
|
+
# Find specs directory
|
|
2872
|
+
if specs_dir is None:
|
|
2873
|
+
specs_dir = find_specs_directory()
|
|
2874
|
+
|
|
2875
|
+
if specs_dir is None:
|
|
2876
|
+
return (
|
|
2877
|
+
None,
|
|
2878
|
+
"No specs directory found. Use specs_dir parameter or set SDD_SPECS_DIR.",
|
|
2879
|
+
)
|
|
2880
|
+
|
|
2881
|
+
# Ensure pending directory exists
|
|
2882
|
+
pending_dir = specs_dir / "pending"
|
|
2883
|
+
pending_dir.mkdir(parents=True, exist_ok=True)
|
|
2884
|
+
|
|
2885
|
+
# Check if spec already exists
|
|
2886
|
+
spec_id = spec_data["spec_id"]
|
|
2887
|
+
spec_path = pending_dir / f"{spec_id}.json"
|
|
2888
|
+
if spec_path.exists():
|
|
2889
|
+
return None, f"Specification already exists: {spec_id}"
|
|
2890
|
+
|
|
1240
2891
|
# Write the spec file
|
|
1241
2892
|
try:
|
|
1242
2893
|
with open(spec_path, "w") as f:
|
|
@@ -1245,6 +2896,7 @@ def create_spec(
|
|
|
1245
2896
|
return None, f"Failed to write spec file: {e}"
|
|
1246
2897
|
|
|
1247
2898
|
# Count tasks and phases
|
|
2899
|
+
hierarchy = spec_data["hierarchy"]
|
|
1248
2900
|
task_count = sum(
|
|
1249
2901
|
1
|
|
1250
2902
|
for node in hierarchy.values()
|
|
@@ -1543,6 +3195,7 @@ def list_assumptions(
|
|
|
1543
3195
|
FRONTMATTER_KEYS = (
|
|
1544
3196
|
"title",
|
|
1545
3197
|
"description",
|
|
3198
|
+
"mission",
|
|
1546
3199
|
"objectives",
|
|
1547
3200
|
"complexity",
|
|
1548
3201
|
"estimated_hours",
|
|
@@ -1618,20 +3271,27 @@ def update_frontmatter(
|
|
|
1618
3271
|
if "metadata" not in spec_data:
|
|
1619
3272
|
spec_data["metadata"] = {}
|
|
1620
3273
|
|
|
1621
|
-
# Get previous value for result
|
|
1622
|
-
|
|
3274
|
+
# Get previous value for result (check appropriate location)
|
|
3275
|
+
if key in ("status", "progress_percentage", "current_phase"):
|
|
3276
|
+
previous_value = spec_data.get(key)
|
|
3277
|
+
else:
|
|
3278
|
+
previous_value = spec_data["metadata"].get(key)
|
|
1623
3279
|
|
|
1624
3280
|
# Process value based on type
|
|
1625
3281
|
if isinstance(value, str):
|
|
1626
3282
|
value = value.strip() if value else value
|
|
1627
3283
|
|
|
1628
|
-
#
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
# Some fields like title, status, progress_percentage exist at both levels
|
|
1633
|
-
if key in ("title", "status", "progress_percentage", "current_phase"):
|
|
3284
|
+
# Computed fields (status, progress_percentage, current_phase) are now
|
|
3285
|
+
# stored only at top-level. Title is kept in metadata for descriptive purposes.
|
|
3286
|
+
if key in ("status", "progress_percentage", "current_phase"):
|
|
3287
|
+
# Update top-level only (canonical location for computed fields)
|
|
1634
3288
|
spec_data[key] = value
|
|
3289
|
+
else:
|
|
3290
|
+
# Regular metadata field
|
|
3291
|
+
spec_data["metadata"][key] = value
|
|
3292
|
+
# Also sync title to top-level if updating it
|
|
3293
|
+
if key == "title":
|
|
3294
|
+
spec_data[key] = value
|
|
1635
3295
|
|
|
1636
3296
|
# Update last_updated
|
|
1637
3297
|
now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
@@ -1648,3 +3308,546 @@ def update_frontmatter(
|
|
|
1648
3308
|
"value": value,
|
|
1649
3309
|
"previous_value": previous_value,
|
|
1650
3310
|
}, None
|
|
3311
|
+
|
|
3312
|
+
|
|
3313
|
+
# Safety constraints for find/replace operations
|
|
3314
|
+
_FR_MAX_PATTERN_LENGTH = 256
|
|
3315
|
+
_FR_DEFAULT_MAX_REPLACEMENTS = 1000
|
|
3316
|
+
_FR_VALID_SCOPES = {"all", "titles", "descriptions"}
|
|
3317
|
+
_FR_MAX_SAMPLE_DIFFS = 10
|
|
3318
|
+
|
|
3319
|
+
|
|
3320
|
+
def find_replace_in_spec(
|
|
3321
|
+
spec_id: str,
|
|
3322
|
+
find: str,
|
|
3323
|
+
replace: str,
|
|
3324
|
+
*,
|
|
3325
|
+
scope: str = "all",
|
|
3326
|
+
use_regex: bool = False,
|
|
3327
|
+
case_sensitive: bool = True,
|
|
3328
|
+
dry_run: bool = False,
|
|
3329
|
+
max_replacements: int = _FR_DEFAULT_MAX_REPLACEMENTS,
|
|
3330
|
+
specs_dir: Optional[Path] = None,
|
|
3331
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
3332
|
+
"""
|
|
3333
|
+
Find and replace text across spec hierarchy nodes.
|
|
3334
|
+
|
|
3335
|
+
Performs literal or regex find/replace across titles and/or descriptions
|
|
3336
|
+
in a specification's hierarchy nodes.
|
|
3337
|
+
|
|
3338
|
+
Args:
|
|
3339
|
+
spec_id: Specification ID to modify.
|
|
3340
|
+
find: Text or regex pattern to find.
|
|
3341
|
+
replace: Replacement text (supports backreferences if use_regex=True).
|
|
3342
|
+
scope: Where to search - "all", "titles", or "descriptions".
|
|
3343
|
+
use_regex: If True, treat `find` as a regex pattern.
|
|
3344
|
+
case_sensitive: If False, perform case-insensitive matching.
|
|
3345
|
+
dry_run: If True, preview changes without modifying the spec.
|
|
3346
|
+
max_replacements: Maximum number of replacements (safety limit).
|
|
3347
|
+
specs_dir: Path to specs directory (auto-detected if not provided).
|
|
3348
|
+
|
|
3349
|
+
Returns:
|
|
3350
|
+
Tuple of (result_dict, error_message).
|
|
3351
|
+
On success: ({"spec_id": ..., "total_replacements": ..., ...}, None)
|
|
3352
|
+
On failure: (None, "error message")
|
|
3353
|
+
"""
|
|
3354
|
+
# Validate find pattern
|
|
3355
|
+
if not find or not isinstance(find, str):
|
|
3356
|
+
return None, "find must be a non-empty string"
|
|
3357
|
+
# Don't strip the pattern - use exactly what user provides (whitespace may be intentional)
|
|
3358
|
+
if not find.strip():
|
|
3359
|
+
return None, "find must be a non-empty string"
|
|
3360
|
+
if len(find) > _FR_MAX_PATTERN_LENGTH:
|
|
3361
|
+
return None, f"find pattern exceeds maximum length of {_FR_MAX_PATTERN_LENGTH} characters"
|
|
3362
|
+
|
|
3363
|
+
# Validate replace
|
|
3364
|
+
if replace is None:
|
|
3365
|
+
return None, "replace must be provided (use empty string to delete matches)"
|
|
3366
|
+
if not isinstance(replace, str):
|
|
3367
|
+
return None, "replace must be a string"
|
|
3368
|
+
|
|
3369
|
+
# Validate scope
|
|
3370
|
+
if scope not in _FR_VALID_SCOPES:
|
|
3371
|
+
return None, f"scope must be one of: {sorted(_FR_VALID_SCOPES)}"
|
|
3372
|
+
|
|
3373
|
+
# Validate max_replacements
|
|
3374
|
+
if not isinstance(max_replacements, int) or max_replacements <= 0:
|
|
3375
|
+
return None, "max_replacements must be a positive integer"
|
|
3376
|
+
|
|
3377
|
+
# Compile regex if needed
|
|
3378
|
+
compiled_pattern = None
|
|
3379
|
+
if use_regex:
|
|
3380
|
+
try:
|
|
3381
|
+
flags = 0 if case_sensitive else re.IGNORECASE
|
|
3382
|
+
compiled_pattern = re.compile(find, flags)
|
|
3383
|
+
except re.error as e:
|
|
3384
|
+
return None, f"Invalid regex pattern: {e}"
|
|
3385
|
+
else:
|
|
3386
|
+
# For literal search, prepare flags
|
|
3387
|
+
if not case_sensitive:
|
|
3388
|
+
# Create case-insensitive literal pattern
|
|
3389
|
+
compiled_pattern = re.compile(re.escape(find), re.IGNORECASE)
|
|
3390
|
+
|
|
3391
|
+
# Find specs directory
|
|
3392
|
+
if specs_dir is None:
|
|
3393
|
+
specs_dir = find_specs_directory()
|
|
3394
|
+
if specs_dir is None:
|
|
3395
|
+
return None, "No specs directory found"
|
|
3396
|
+
|
|
3397
|
+
# Load spec
|
|
3398
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
3399
|
+
if not spec_path:
|
|
3400
|
+
return None, f"Specification '{spec_id}' not found"
|
|
3401
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
3402
|
+
if not spec_data:
|
|
3403
|
+
return None, f"Failed to load specification '{spec_id}'"
|
|
3404
|
+
|
|
3405
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
3406
|
+
if not hierarchy:
|
|
3407
|
+
return {
|
|
3408
|
+
"spec_id": spec_id,
|
|
3409
|
+
"total_replacements": 0,
|
|
3410
|
+
"nodes_affected": 0,
|
|
3411
|
+
"changes": [],
|
|
3412
|
+
"dry_run": dry_run,
|
|
3413
|
+
"message": "No hierarchy nodes to process",
|
|
3414
|
+
}, None
|
|
3415
|
+
|
|
3416
|
+
# Track changes
|
|
3417
|
+
changes: List[Dict[str, Any]] = []
|
|
3418
|
+
total_replacements = 0
|
|
3419
|
+
nodes_affected = set()
|
|
3420
|
+
warnings: List[str] = []
|
|
3421
|
+
limit_reached = False
|
|
3422
|
+
|
|
3423
|
+
# Helper to perform replacement
|
|
3424
|
+
def do_replace(text: str) -> Tuple[str, int]:
|
|
3425
|
+
if compiled_pattern:
|
|
3426
|
+
new_text, count = compiled_pattern.subn(replace, text)
|
|
3427
|
+
return new_text, count
|
|
3428
|
+
else:
|
|
3429
|
+
# Case-sensitive literal replace
|
|
3430
|
+
count = text.count(find)
|
|
3431
|
+
new_text = text.replace(find, replace)
|
|
3432
|
+
return new_text, count
|
|
3433
|
+
|
|
3434
|
+
# Process hierarchy nodes
|
|
3435
|
+
for node_id, node_data in hierarchy.items():
|
|
3436
|
+
if node_id == "spec-root":
|
|
3437
|
+
continue
|
|
3438
|
+
if limit_reached:
|
|
3439
|
+
break
|
|
3440
|
+
|
|
3441
|
+
# Process title if in scope
|
|
3442
|
+
if scope in ("all", "titles"):
|
|
3443
|
+
title = node_data.get("title", "")
|
|
3444
|
+
if title and isinstance(title, str):
|
|
3445
|
+
new_title, count = do_replace(title)
|
|
3446
|
+
if count > 0:
|
|
3447
|
+
if total_replacements + count > max_replacements:
|
|
3448
|
+
count = max_replacements - total_replacements
|
|
3449
|
+
# Partial replacement not supported, skip this field
|
|
3450
|
+
warnings.append(
|
|
3451
|
+
f"max_replacements limit ({max_replacements}) reached"
|
|
3452
|
+
)
|
|
3453
|
+
limit_reached = True
|
|
3454
|
+
else:
|
|
3455
|
+
total_replacements += count
|
|
3456
|
+
nodes_affected.add(node_id)
|
|
3457
|
+
changes.append({
|
|
3458
|
+
"node_id": node_id,
|
|
3459
|
+
"field": "title",
|
|
3460
|
+
"old": title,
|
|
3461
|
+
"new": new_title,
|
|
3462
|
+
"replacement_count": count,
|
|
3463
|
+
})
|
|
3464
|
+
if not dry_run:
|
|
3465
|
+
node_data["title"] = new_title
|
|
3466
|
+
|
|
3467
|
+
# Process description if in scope
|
|
3468
|
+
if scope in ("all", "descriptions") and not limit_reached:
|
|
3469
|
+
metadata = node_data.get("metadata", {})
|
|
3470
|
+
if isinstance(metadata, dict):
|
|
3471
|
+
description = metadata.get("description", "")
|
|
3472
|
+
if description and isinstance(description, str):
|
|
3473
|
+
new_description, count = do_replace(description)
|
|
3474
|
+
if count > 0:
|
|
3475
|
+
if total_replacements + count > max_replacements:
|
|
3476
|
+
warnings.append(
|
|
3477
|
+
f"max_replacements limit ({max_replacements}) reached"
|
|
3478
|
+
)
|
|
3479
|
+
limit_reached = True
|
|
3480
|
+
else:
|
|
3481
|
+
total_replacements += count
|
|
3482
|
+
nodes_affected.add(node_id)
|
|
3483
|
+
changes.append({
|
|
3484
|
+
"node_id": node_id,
|
|
3485
|
+
"field": "description",
|
|
3486
|
+
"old": description,
|
|
3487
|
+
"new": new_description,
|
|
3488
|
+
"replacement_count": count,
|
|
3489
|
+
})
|
|
3490
|
+
if not dry_run:
|
|
3491
|
+
metadata["description"] = new_description
|
|
3492
|
+
|
|
3493
|
+
# Save if not dry_run and there were changes
|
|
3494
|
+
if not dry_run and total_replacements > 0:
|
|
3495
|
+
if not save_spec(spec_id, spec_data, specs_dir):
|
|
3496
|
+
return None, "Failed to save specification after replacements"
|
|
3497
|
+
|
|
3498
|
+
# Build result
|
|
3499
|
+
result: Dict[str, Any] = {
|
|
3500
|
+
"spec_id": spec_id,
|
|
3501
|
+
"total_replacements": total_replacements,
|
|
3502
|
+
"nodes_affected": len(nodes_affected),
|
|
3503
|
+
"dry_run": dry_run,
|
|
3504
|
+
"scope": scope,
|
|
3505
|
+
"find": find,
|
|
3506
|
+
"replace": replace,
|
|
3507
|
+
"use_regex": use_regex,
|
|
3508
|
+
"case_sensitive": case_sensitive,
|
|
3509
|
+
}
|
|
3510
|
+
|
|
3511
|
+
# Include sample diffs (limited)
|
|
3512
|
+
if changes:
|
|
3513
|
+
result["changes"] = changes[:_FR_MAX_SAMPLE_DIFFS]
|
|
3514
|
+
if len(changes) > _FR_MAX_SAMPLE_DIFFS:
|
|
3515
|
+
result["changes_truncated"] = True
|
|
3516
|
+
result["total_changes"] = len(changes)
|
|
3517
|
+
|
|
3518
|
+
if warnings:
|
|
3519
|
+
result["warnings"] = warnings
|
|
3520
|
+
|
|
3521
|
+
if total_replacements == 0:
|
|
3522
|
+
result["message"] = "No matches found"
|
|
3523
|
+
|
|
3524
|
+
return result, None
|
|
3525
|
+
|
|
3526
|
+
|
|
3527
|
+
# Completeness check constants
|
|
3528
|
+
_CC_WEIGHT_TITLES = 0.20
|
|
3529
|
+
_CC_WEIGHT_DESCRIPTIONS = 0.30
|
|
3530
|
+
_CC_WEIGHT_FILE_PATHS = 0.25
|
|
3531
|
+
_CC_WEIGHT_ESTIMATES = 0.25
|
|
3532
|
+
|
|
3533
|
+
|
|
3534
|
+
def check_spec_completeness(
|
|
3535
|
+
spec_id: str,
|
|
3536
|
+
*,
|
|
3537
|
+
specs_dir: Optional[Path] = None,
|
|
3538
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
3539
|
+
"""
|
|
3540
|
+
Check spec completeness and calculate a score (0-100).
|
|
3541
|
+
|
|
3542
|
+
Evaluates spec quality by checking for:
|
|
3543
|
+
- Empty titles
|
|
3544
|
+
- Missing task descriptions
|
|
3545
|
+
- Missing file_path for implementation/refactoring tasks
|
|
3546
|
+
- Missing estimated_hours
|
|
3547
|
+
|
|
3548
|
+
Args:
|
|
3549
|
+
spec_id: Specification ID to check.
|
|
3550
|
+
specs_dir: Path to specs directory (auto-detected if not provided).
|
|
3551
|
+
|
|
3552
|
+
Returns:
|
|
3553
|
+
Tuple of (result_dict, error_message).
|
|
3554
|
+
On success: ({"spec_id": ..., "completeness_score": ..., ...}, None)
|
|
3555
|
+
On failure: (None, "error message")
|
|
3556
|
+
"""
|
|
3557
|
+
# Find specs directory
|
|
3558
|
+
if specs_dir is None:
|
|
3559
|
+
specs_dir = find_specs_directory()
|
|
3560
|
+
if specs_dir is None:
|
|
3561
|
+
return None, "No specs directory found"
|
|
3562
|
+
|
|
3563
|
+
# Load spec
|
|
3564
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
3565
|
+
if not spec_path:
|
|
3566
|
+
return None, f"Specification '{spec_id}' not found"
|
|
3567
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
3568
|
+
if not spec_data:
|
|
3569
|
+
return None, f"Failed to load specification '{spec_id}'"
|
|
3570
|
+
|
|
3571
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
3572
|
+
if not hierarchy:
|
|
3573
|
+
return {
|
|
3574
|
+
"spec_id": spec_id,
|
|
3575
|
+
"completeness_score": 100,
|
|
3576
|
+
"categories": {},
|
|
3577
|
+
"issues": [],
|
|
3578
|
+
"message": "No hierarchy nodes to check",
|
|
3579
|
+
}, None
|
|
3580
|
+
|
|
3581
|
+
# Helper functions
|
|
3582
|
+
def _nonempty_string(value: Any) -> bool:
|
|
3583
|
+
return isinstance(value, str) and bool(value.strip())
|
|
3584
|
+
|
|
3585
|
+
def _has_description(metadata: Dict[str, Any]) -> bool:
|
|
3586
|
+
if _nonempty_string(metadata.get("description")):
|
|
3587
|
+
return True
|
|
3588
|
+
details = metadata.get("details")
|
|
3589
|
+
if _nonempty_string(details):
|
|
3590
|
+
return True
|
|
3591
|
+
if isinstance(details, list):
|
|
3592
|
+
return any(_nonempty_string(item) for item in details)
|
|
3593
|
+
return False
|
|
3594
|
+
|
|
3595
|
+
# Tracking
|
|
3596
|
+
issues: List[Dict[str, Any]] = []
|
|
3597
|
+
categories: Dict[str, Dict[str, Any]] = {
|
|
3598
|
+
"titles": {"complete": 0, "total": 0, "score": 0.0},
|
|
3599
|
+
"descriptions": {"complete": 0, "total": 0, "score": 0.0},
|
|
3600
|
+
"file_paths": {"complete": 0, "total": 0, "score": 0.0},
|
|
3601
|
+
"estimates": {"complete": 0, "total": 0, "score": 0.0},
|
|
3602
|
+
}
|
|
3603
|
+
|
|
3604
|
+
# Check each node
|
|
3605
|
+
for node_id, node in hierarchy.items():
|
|
3606
|
+
if node_id == "spec-root":
|
|
3607
|
+
continue
|
|
3608
|
+
if not isinstance(node, dict):
|
|
3609
|
+
continue
|
|
3610
|
+
|
|
3611
|
+
node_type = node.get("type", "")
|
|
3612
|
+
title = node.get("title", "")
|
|
3613
|
+
metadata = node.get("metadata", {})
|
|
3614
|
+
if not isinstance(metadata, dict):
|
|
3615
|
+
metadata = {}
|
|
3616
|
+
|
|
3617
|
+
# Check title (all nodes)
|
|
3618
|
+
categories["titles"]["total"] += 1
|
|
3619
|
+
if _nonempty_string(title):
|
|
3620
|
+
categories["titles"]["complete"] += 1
|
|
3621
|
+
else:
|
|
3622
|
+
issues.append({
|
|
3623
|
+
"node_id": node_id,
|
|
3624
|
+
"category": "titles",
|
|
3625
|
+
"message": "Empty or missing title",
|
|
3626
|
+
})
|
|
3627
|
+
|
|
3628
|
+
# Check description (tasks and verify nodes only)
|
|
3629
|
+
if node_type in ("task", "verify"):
|
|
3630
|
+
categories["descriptions"]["total"] += 1
|
|
3631
|
+
if _has_description(metadata):
|
|
3632
|
+
categories["descriptions"]["complete"] += 1
|
|
3633
|
+
else:
|
|
3634
|
+
issues.append({
|
|
3635
|
+
"node_id": node_id,
|
|
3636
|
+
"category": "descriptions",
|
|
3637
|
+
"message": "Missing description",
|
|
3638
|
+
})
|
|
3639
|
+
|
|
3640
|
+
# Check file_path (implementation/refactoring tasks only)
|
|
3641
|
+
task_category = metadata.get("task_category", "")
|
|
3642
|
+
if task_category in ("implementation", "refactoring"):
|
|
3643
|
+
categories["file_paths"]["total"] += 1
|
|
3644
|
+
if _nonempty_string(metadata.get("file_path")):
|
|
3645
|
+
categories["file_paths"]["complete"] += 1
|
|
3646
|
+
else:
|
|
3647
|
+
issues.append({
|
|
3648
|
+
"node_id": node_id,
|
|
3649
|
+
"category": "file_paths",
|
|
3650
|
+
"message": "Missing file_path for implementation task",
|
|
3651
|
+
})
|
|
3652
|
+
|
|
3653
|
+
# Check estimated_hours (tasks only)
|
|
3654
|
+
if node_type == "task":
|
|
3655
|
+
categories["estimates"]["total"] += 1
|
|
3656
|
+
est = metadata.get("estimated_hours")
|
|
3657
|
+
if isinstance(est, (int, float)) and est > 0:
|
|
3658
|
+
categories["estimates"]["complete"] += 1
|
|
3659
|
+
else:
|
|
3660
|
+
issues.append({
|
|
3661
|
+
"node_id": node_id,
|
|
3662
|
+
"category": "estimates",
|
|
3663
|
+
"message": "Missing or invalid estimated_hours",
|
|
3664
|
+
})
|
|
3665
|
+
|
|
3666
|
+
# Calculate category scores
|
|
3667
|
+
for cat_data in categories.values():
|
|
3668
|
+
if cat_data["total"] > 0:
|
|
3669
|
+
cat_data["score"] = round(cat_data["complete"] / cat_data["total"], 2)
|
|
3670
|
+
else:
|
|
3671
|
+
cat_data["score"] = 1.0 # No items to check = complete
|
|
3672
|
+
|
|
3673
|
+
# Calculate weighted completeness score
|
|
3674
|
+
weighted_score = 0.0
|
|
3675
|
+
total_weight = 0.0
|
|
3676
|
+
|
|
3677
|
+
if categories["titles"]["total"] > 0:
|
|
3678
|
+
weighted_score += categories["titles"]["score"] * _CC_WEIGHT_TITLES
|
|
3679
|
+
total_weight += _CC_WEIGHT_TITLES
|
|
3680
|
+
|
|
3681
|
+
if categories["descriptions"]["total"] > 0:
|
|
3682
|
+
weighted_score += categories["descriptions"]["score"] * _CC_WEIGHT_DESCRIPTIONS
|
|
3683
|
+
total_weight += _CC_WEIGHT_DESCRIPTIONS
|
|
3684
|
+
|
|
3685
|
+
if categories["file_paths"]["total"] > 0:
|
|
3686
|
+
weighted_score += categories["file_paths"]["score"] * _CC_WEIGHT_FILE_PATHS
|
|
3687
|
+
total_weight += _CC_WEIGHT_FILE_PATHS
|
|
3688
|
+
|
|
3689
|
+
if categories["estimates"]["total"] > 0:
|
|
3690
|
+
weighted_score += categories["estimates"]["score"] * _CC_WEIGHT_ESTIMATES
|
|
3691
|
+
total_weight += _CC_WEIGHT_ESTIMATES
|
|
3692
|
+
|
|
3693
|
+
# Normalize score
|
|
3694
|
+
if total_weight > 0:
|
|
3695
|
+
completeness_score = int(round((weighted_score / total_weight) * 100))
|
|
3696
|
+
else:
|
|
3697
|
+
completeness_score = 100 # Nothing to check
|
|
3698
|
+
|
|
3699
|
+
return {
|
|
3700
|
+
"spec_id": spec_id,
|
|
3701
|
+
"completeness_score": completeness_score,
|
|
3702
|
+
"categories": categories,
|
|
3703
|
+
"issues": issues,
|
|
3704
|
+
"issue_count": len(issues),
|
|
3705
|
+
}, None
|
|
3706
|
+
|
|
3707
|
+
|
|
3708
|
+
# Duplicate detection constants
|
|
3709
|
+
_DD_DEFAULT_THRESHOLD = 0.8
|
|
3710
|
+
_DD_MAX_PAIRS = 100
|
|
3711
|
+
_DD_VALID_SCOPES = {"titles", "descriptions", "both"}
|
|
3712
|
+
|
|
3713
|
+
|
|
3714
|
+
def detect_duplicate_tasks(
|
|
3715
|
+
spec_id: str,
|
|
3716
|
+
*,
|
|
3717
|
+
scope: str = "titles",
|
|
3718
|
+
threshold: float = _DD_DEFAULT_THRESHOLD,
|
|
3719
|
+
max_pairs: int = _DD_MAX_PAIRS,
|
|
3720
|
+
specs_dir: Optional[Path] = None,
|
|
3721
|
+
) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
|
|
3722
|
+
"""
|
|
3723
|
+
Detect duplicate or near-duplicate tasks in a spec.
|
|
3724
|
+
|
|
3725
|
+
Uses text similarity to find tasks with similar titles or descriptions.
|
|
3726
|
+
|
|
3727
|
+
Args:
|
|
3728
|
+
spec_id: Specification ID to check.
|
|
3729
|
+
scope: What to compare - "titles", "descriptions", or "both".
|
|
3730
|
+
threshold: Similarity threshold (0.0-1.0). Default 0.8.
|
|
3731
|
+
max_pairs: Maximum duplicate pairs to return. Default 100.
|
|
3732
|
+
specs_dir: Path to specs directory (auto-detected if not provided).
|
|
3733
|
+
|
|
3734
|
+
Returns:
|
|
3735
|
+
Tuple of (result_dict, error_message).
|
|
3736
|
+
On success: ({"spec_id": ..., "duplicates": [...], ...}, None)
|
|
3737
|
+
On failure: (None, "error message")
|
|
3738
|
+
"""
|
|
3739
|
+
from difflib import SequenceMatcher
|
|
3740
|
+
|
|
3741
|
+
# Validate scope
|
|
3742
|
+
if scope not in _DD_VALID_SCOPES:
|
|
3743
|
+
return None, f"scope must be one of: {sorted(_DD_VALID_SCOPES)}"
|
|
3744
|
+
|
|
3745
|
+
# Validate threshold
|
|
3746
|
+
if not isinstance(threshold, (int, float)) or not 0.0 <= threshold <= 1.0:
|
|
3747
|
+
return None, "threshold must be a number between 0.0 and 1.0"
|
|
3748
|
+
|
|
3749
|
+
# Validate max_pairs
|
|
3750
|
+
if not isinstance(max_pairs, int) or max_pairs <= 0:
|
|
3751
|
+
return None, "max_pairs must be a positive integer"
|
|
3752
|
+
|
|
3753
|
+
# Find specs directory
|
|
3754
|
+
if specs_dir is None:
|
|
3755
|
+
specs_dir = find_specs_directory()
|
|
3756
|
+
if specs_dir is None:
|
|
3757
|
+
return None, "No specs directory found"
|
|
3758
|
+
|
|
3759
|
+
# Load spec
|
|
3760
|
+
spec_path = find_spec_file(spec_id, specs_dir)
|
|
3761
|
+
if not spec_path:
|
|
3762
|
+
return None, f"Specification '{spec_id}' not found"
|
|
3763
|
+
spec_data = load_spec(spec_id, specs_dir)
|
|
3764
|
+
if not spec_data:
|
|
3765
|
+
return None, f"Failed to load specification '{spec_id}'"
|
|
3766
|
+
|
|
3767
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
3768
|
+
if not hierarchy:
|
|
3769
|
+
return {
|
|
3770
|
+
"spec_id": spec_id,
|
|
3771
|
+
"duplicates": [],
|
|
3772
|
+
"duplicate_count": 0,
|
|
3773
|
+
"scope": scope,
|
|
3774
|
+
"threshold": threshold,
|
|
3775
|
+
"message": "No hierarchy nodes to check",
|
|
3776
|
+
}, None
|
|
3777
|
+
|
|
3778
|
+
# Collect tasks/verify nodes with their text
|
|
3779
|
+
nodes: List[Dict[str, Any]] = []
|
|
3780
|
+
for node_id, node in hierarchy.items():
|
|
3781
|
+
if node_id == "spec-root":
|
|
3782
|
+
continue
|
|
3783
|
+
if not isinstance(node, dict):
|
|
3784
|
+
continue
|
|
3785
|
+
node_type = node.get("type", "")
|
|
3786
|
+
if node_type not in ("task", "verify"):
|
|
3787
|
+
continue
|
|
3788
|
+
|
|
3789
|
+
title = node.get("title", "") or ""
|
|
3790
|
+
metadata = node.get("metadata", {})
|
|
3791
|
+
if not isinstance(metadata, dict):
|
|
3792
|
+
metadata = {}
|
|
3793
|
+
description = metadata.get("description", "") or ""
|
|
3794
|
+
|
|
3795
|
+
nodes.append({
|
|
3796
|
+
"id": node_id,
|
|
3797
|
+
"title": title.strip().lower(),
|
|
3798
|
+
"description": description.strip().lower(),
|
|
3799
|
+
})
|
|
3800
|
+
|
|
3801
|
+
# Compare pairs
|
|
3802
|
+
duplicates: List[Dict[str, Any]] = []
|
|
3803
|
+
truncated = False
|
|
3804
|
+
total_compared = 0
|
|
3805
|
+
|
|
3806
|
+
def similarity(a: str, b: str) -> float:
|
|
3807
|
+
if not a or not b:
|
|
3808
|
+
return 0.0
|
|
3809
|
+
return SequenceMatcher(None, a, b).ratio()
|
|
3810
|
+
|
|
3811
|
+
for i, node_a in enumerate(nodes):
|
|
3812
|
+
if len(duplicates) >= max_pairs:
|
|
3813
|
+
truncated = True
|
|
3814
|
+
break
|
|
3815
|
+
for node_b in nodes[i + 1:]:
|
|
3816
|
+
total_compared += 1
|
|
3817
|
+
if len(duplicates) >= max_pairs:
|
|
3818
|
+
truncated = True
|
|
3819
|
+
break
|
|
3820
|
+
|
|
3821
|
+
# Calculate similarity based on scope
|
|
3822
|
+
if scope == "titles":
|
|
3823
|
+
sim = similarity(node_a["title"], node_b["title"])
|
|
3824
|
+
elif scope == "descriptions":
|
|
3825
|
+
sim = similarity(node_a["description"], node_b["description"])
|
|
3826
|
+
else: # both
|
|
3827
|
+
title_sim = similarity(node_a["title"], node_b["title"])
|
|
3828
|
+
desc_sim = similarity(node_a["description"], node_b["description"])
|
|
3829
|
+
sim = max(title_sim, desc_sim)
|
|
3830
|
+
|
|
3831
|
+
if sim >= threshold:
|
|
3832
|
+
duplicates.append({
|
|
3833
|
+
"node_a": node_a["id"],
|
|
3834
|
+
"node_b": node_b["id"],
|
|
3835
|
+
"similarity": round(sim, 2),
|
|
3836
|
+
"scope": scope,
|
|
3837
|
+
})
|
|
3838
|
+
|
|
3839
|
+
result: Dict[str, Any] = {
|
|
3840
|
+
"spec_id": spec_id,
|
|
3841
|
+
"duplicates": duplicates,
|
|
3842
|
+
"duplicate_count": len(duplicates),
|
|
3843
|
+
"scope": scope,
|
|
3844
|
+
"threshold": threshold,
|
|
3845
|
+
"nodes_checked": len(nodes),
|
|
3846
|
+
"pairs_compared": total_compared,
|
|
3847
|
+
}
|
|
3848
|
+
|
|
3849
|
+
if truncated:
|
|
3850
|
+
result["truncated"] = True
|
|
3851
|
+
result["warnings"] = [f"Results limited to {max_pairs} pairs"]
|
|
3852
|
+
|
|
3853
|
+
return result, None
|