mindsystem-cc 3.20.0 → 3.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. package/README.md +9 -18
  2. package/agents/ms-mockup-designer.md +1 -1
  3. package/agents/ms-plan-checker.md +30 -30
  4. package/agents/ms-plan-writer.md +1 -1
  5. package/agents/ms-product-researcher.md +71 -0
  6. package/agents/ms-research-synthesizer.md +1 -1
  7. package/agents/ms-researcher.md +8 -8
  8. package/agents/ms-roadmapper.md +9 -13
  9. package/agents/ms-verifier.md +25 -117
  10. package/bin/install.js +68 -5
  11. package/commands/ms/add-phase.md +7 -8
  12. package/commands/ms/add-todo.md +3 -4
  13. package/commands/ms/adhoc.md +4 -5
  14. package/commands/ms/audit-milestone.md +15 -14
  15. package/commands/ms/complete-milestone.md +27 -24
  16. package/commands/ms/config.md +229 -0
  17. package/commands/ms/create-roadmap.md +3 -4
  18. package/commands/ms/debug.md +3 -4
  19. package/commands/ms/design-phase.md +11 -13
  20. package/commands/ms/discuss-phase.md +26 -22
  21. package/commands/ms/doctor.md +28 -205
  22. package/commands/ms/execute-phase.md +20 -12
  23. package/commands/ms/help.md +46 -39
  24. package/commands/ms/insert-phase.md +6 -7
  25. package/commands/ms/map-codebase.md +1 -2
  26. package/commands/ms/new-milestone.md +41 -19
  27. package/commands/ms/new-project.md +56 -47
  28. package/commands/ms/plan-milestone-gaps.md +7 -9
  29. package/commands/ms/plan-phase.md +4 -5
  30. package/commands/ms/progress.md +3 -4
  31. package/commands/ms/remove-phase.md +3 -4
  32. package/commands/ms/research-phase.md +11 -16
  33. package/commands/ms/research-project.md +19 -26
  34. package/commands/ms/review-design.md +4 -2
  35. package/commands/ms/verify-work.md +6 -8
  36. package/mindsystem/references/continuation-format.md +3 -3
  37. package/mindsystem/references/principles.md +1 -1
  38. package/mindsystem/references/routing/audit-result-routing.md +12 -11
  39. package/mindsystem/references/routing/between-milestones-routing.md +2 -2
  40. package/mindsystem/references/routing/milestone-complete-routing.md +1 -1
  41. package/mindsystem/references/routing/next-phase-routing.md +4 -2
  42. package/mindsystem/references/verification-patterns.md +0 -37
  43. package/mindsystem/templates/config.json +2 -1
  44. package/mindsystem/templates/context.md +7 -6
  45. package/mindsystem/templates/milestone-archive.md +5 -5
  46. package/mindsystem/templates/milestone-context.md +1 -1
  47. package/mindsystem/templates/milestone.md +9 -9
  48. package/mindsystem/templates/project.md +2 -2
  49. package/mindsystem/templates/research-subagent-prompt.md +3 -3
  50. package/mindsystem/templates/roadmap-milestone.md +14 -14
  51. package/mindsystem/templates/roadmap.md +10 -8
  52. package/mindsystem/templates/state.md +2 -2
  53. package/mindsystem/templates/verification-report.md +3 -26
  54. package/mindsystem/workflows/adhoc.md +1 -1
  55. package/mindsystem/workflows/complete-milestone.md +40 -75
  56. package/mindsystem/workflows/discuss-phase.md +141 -65
  57. package/mindsystem/workflows/doctor-fixes.md +273 -0
  58. package/mindsystem/workflows/execute-phase.md +9 -21
  59. package/mindsystem/workflows/execute-plan.md +3 -0
  60. package/mindsystem/workflows/map-codebase.md +6 -12
  61. package/mindsystem/workflows/mockup-generation.md +47 -23
  62. package/mindsystem/workflows/plan-phase.md +13 -6
  63. package/mindsystem/workflows/transition.md +2 -2
  64. package/mindsystem/workflows/verify-work.md +97 -70
  65. package/package.json +1 -1
  66. package/scripts/__pycache__/ms-tools.cpython-314.pyc +0 -0
  67. package/scripts/__pycache__/test_ms_tools.cpython-314-pytest-9.0.2.pyc +0 -0
  68. package/scripts/fixtures/scan-context/.planning/ROADMAP.md +16 -0
  69. package/scripts/fixtures/scan-context/.planning/adhoc/20260220-fix-token-SUMMARY.md +12 -0
  70. package/scripts/fixtures/scan-context/.planning/config.json +3 -0
  71. package/scripts/fixtures/scan-context/.planning/debug/resolved/token-bug.md +11 -0
  72. package/scripts/fixtures/scan-context/.planning/knowledge/auth.md +11 -0
  73. package/scripts/fixtures/scan-context/.planning/phases/02-infra/02-1-SUMMARY.md +20 -0
  74. package/scripts/fixtures/scan-context/.planning/phases/04-setup/04-1-SUMMARY.md +21 -0
  75. package/scripts/fixtures/scan-context/.planning/phases/05-auth/05-1-SUMMARY.md +28 -0
  76. package/scripts/fixtures/scan-context/.planning/todos/done/setup-db.md +10 -0
  77. package/scripts/fixtures/scan-context/.planning/todos/pending/add-logout.md +10 -0
  78. package/scripts/fixtures/scan-context/expected-output.json +257 -0
  79. package/scripts/ms-tools.py +2884 -0
  80. package/scripts/test_ms_tools.py +1622 -0
  81. package/agents/ms-flutter-code-quality.md +0 -169
  82. package/agents/ms-flutter-reviewer.md +0 -211
  83. package/agents/ms-flutter-simplifier.md +0 -79
  84. package/commands/ms/list-phase-assumptions.md +0 -56
  85. package/mindsystem/workflows/list-phase-assumptions.md +0 -178
  86. package/mindsystem/workflows/verify-phase.md +0 -625
  87. package/scripts/__pycache__/compare_mockups.cpython-314.pyc +0 -0
  88. package/scripts/archive-milestone-files.sh +0 -68
  89. package/scripts/archive-milestone-phases.sh +0 -138
  90. package/scripts/doctor-scan.sh +0 -402
  91. package/scripts/gather-milestone-stats.sh +0 -179
  92. package/scripts/generate-adhoc-patch.sh +0 -79
  93. package/scripts/generate-phase-patch.sh +0 -169
  94. package/scripts/scan-artifact-subsystems.sh +0 -55
  95. package/scripts/scan-planning-context.py +0 -839
  96. package/scripts/update-state.sh +0 -59
  97. package/scripts/validate-execution-order.sh +0 -104
  98. package/skills/flutter-code-quality/SKILL.md +0 -143
  99. package/skills/flutter-code-simplification/SKILL.md +0 -102
  100. package/skills/flutter-senior-review/AGENTS.md +0 -869
  101. package/skills/flutter-senior-review/SKILL.md +0 -205
  102. package/skills/flutter-senior-review/principles/dependencies-data-not-callbacks.md +0 -75
  103. package/skills/flutter-senior-review/principles/dependencies-provider-tree.md +0 -85
  104. package/skills/flutter-senior-review/principles/dependencies-temporal-coupling.md +0 -97
  105. package/skills/flutter-senior-review/principles/pragmatism-consistent-error-handling.md +0 -130
  106. package/skills/flutter-senior-review/principles/pragmatism-speculative-generality.md +0 -91
  107. package/skills/flutter-senior-review/principles/state-data-clumps.md +0 -64
  108. package/skills/flutter-senior-review/principles/state-invalid-states.md +0 -53
  109. package/skills/flutter-senior-review/principles/state-single-source-of-truth.md +0 -68
  110. package/skills/flutter-senior-review/principles/state-type-hierarchies.md +0 -75
  111. package/skills/flutter-senior-review/principles/structure-composition-over-config.md +0 -105
  112. package/skills/flutter-senior-review/principles/structure-shared-visual-patterns.md +0 -107
  113. package/skills/flutter-senior-review/principles/structure-wrapper-pattern.md +0 -90
@@ -0,0 +1,2884 @@
1
+ #!/usr/bin/env python3
2
+ # /// script
3
+ # requires-python = ">=3.10"
4
+ # dependencies = ["pyyaml"]
5
+ # ///
6
+ """Mindsystem CLI tools.
7
+
8
+ Single-file CLI with subcommands for all mechanical operations:
9
+ phase discovery, state updates, artifact counting, diagnostics,
10
+ patch generation, archival, and planning context scanning.
11
+ """
12
+
13
+ import argparse
14
+ import datetime
15
+ import json
16
+ import os
17
+ import re
18
+ import shutil
19
+ import subprocess
20
+ import sys
21
+ from pathlib import Path
22
+ from typing import Any
23
+
24
+ import yaml
25
+
26
+
27
+ # ---------------------------------------------------------------------------
28
+ # JSON encoder
29
+ # ---------------------------------------------------------------------------
30
+
31
+
32
+ class _SafeEncoder(json.JSONEncoder):
33
+ """Handle YAML types that json.dump can't serialize (date, datetime)."""
34
+
35
+ def default(self, o: object) -> Any:
36
+ if isinstance(o, (datetime.date, datetime.datetime)):
37
+ return o.isoformat()
38
+ return super().default(o)
39
+
40
+
41
+ # ---------------------------------------------------------------------------
42
+ # Shared helpers
43
+ # ---------------------------------------------------------------------------
44
+
45
+
46
+ def find_git_root() -> Path:
47
+ """Find the git repository root. Exit with error if not in a repo."""
48
+ try:
49
+ result = subprocess.run(
50
+ ["git", "rev-parse", "--show-toplevel"],
51
+ capture_output=True,
52
+ text=True,
53
+ check=True,
54
+ )
55
+ return Path(result.stdout.strip())
56
+ except (subprocess.CalledProcessError, FileNotFoundError):
57
+ print("Error: Not in a git repository", file=sys.stderr)
58
+ sys.exit(1)
59
+
60
+
61
+ def find_planning_dir() -> Path:
62
+ """Find .planning/ from git root. Exit with error if missing."""
63
+ planning = find_git_root() / ".planning"
64
+ if not planning.is_dir():
65
+ print("Error: No .planning/ directory found", file=sys.stderr)
66
+ sys.exit(1)
67
+ return planning
68
+
69
+
70
+ def slugify(name: str) -> str:
71
+ """Convert a milestone name to a URL-safe slug.
72
+
73
+ Lowercase, replace spaces/underscores with hyphens, strip non-alphanumeric
74
+ (except hyphens), collapse consecutive hyphens, trim edges.
75
+ """
76
+ s = name.lower()
77
+ s = re.sub(r"[\s_]+", "-", s)
78
+ s = re.sub(r"[^a-z0-9-]", "", s)
79
+ s = re.sub(r"-{2,}", "-", s)
80
+ s = s.strip("-")
81
+ return s
82
+
83
+
84
+ def find_planning_dir_optional() -> Path | None:
85
+ """Find .planning/ from git root. Return None if missing."""
86
+ try:
87
+ result = subprocess.run(
88
+ ["git", "rev-parse", "--show-toplevel"],
89
+ capture_output=True,
90
+ text=True,
91
+ check=True,
92
+ )
93
+ planning = Path(result.stdout.strip()) / ".planning"
94
+ return planning if planning.is_dir() else None
95
+ except (subprocess.CalledProcessError, FileNotFoundError):
96
+ return None
97
+
98
+
99
+ def normalize_phase(phase_str: str) -> str:
100
+ """Normalize phase input: '5' -> '05', '05' -> '05', '2.1' -> '02.1'."""
101
+ match = re.match(r"^(\d+)(?:\.(\d+))?$", phase_str)
102
+ if not match:
103
+ return phase_str
104
+ integer = int(match.group(1))
105
+ decimal = match.group(2)
106
+ if decimal:
107
+ return f"{integer:02d}.{decimal}"
108
+ return f"{integer:02d}"
109
+
110
+
111
+ def find_phase_dir(planning: Path, phase: str) -> Path | None:
112
+ """Find the phase directory matching a normalized phase number."""
113
+ phases_dir = planning / "phases"
114
+ if not phases_dir.is_dir():
115
+ return None
116
+ matches = sorted(phases_dir.glob(f"{phase}-*"))
117
+ dirs = [m for m in matches if m.is_dir()]
118
+ return dirs[0] if dirs else None
119
+
120
+
121
+ def run_git(*args: str) -> str:
122
+ """Run a git command and return stdout. Raise on failure."""
123
+ result = subprocess.run(
124
+ ["git", *args],
125
+ capture_output=True,
126
+ text=True,
127
+ check=True,
128
+ )
129
+ return result.stdout.strip()
130
+
131
+
132
+ def parse_json_config(planning: Path) -> dict:
133
+ """Read .planning/config.json."""
134
+ config_path = planning / "config.json"
135
+ if not config_path.is_file():
136
+ return {}
137
+ try:
138
+ return json.loads(config_path.read_text(encoding="utf-8"))
139
+ except (json.JSONDecodeError, OSError):
140
+ return {}
141
+
142
+
143
+ def in_range(phase_num: str, start: int, end: int) -> bool:
144
+ """Check if a phase number (possibly decimal) is within start..end range."""
145
+ try:
146
+ val = float(phase_num)
147
+ return start <= val <= end + 0.999
148
+ except ValueError:
149
+ return False
150
+
151
+
152
+ # ---------------------------------------------------------------------------
153
+ # YAML frontmatter parsing
154
+ # ---------------------------------------------------------------------------
155
+
156
+ _FRONTMATTER_RE = re.compile(r"\A---\s*\n(.*?\n)---\s*\n", re.DOTALL)
157
+
158
+
159
+ def parse_frontmatter(path: Path) -> dict[str, Any] | None:
160
+ """Extract YAML frontmatter from a markdown file."""
161
+ try:
162
+ text = path.read_text(encoding="utf-8", errors="replace")
163
+ except OSError:
164
+ return None
165
+
166
+ match = _FRONTMATTER_RE.match(text)
167
+ if not match:
168
+ return None
169
+
170
+ try:
171
+ return yaml.safe_load(match.group(1)) or {}
172
+ except yaml.YAMLError:
173
+ return None
174
+
175
+
176
+ # ---------------------------------------------------------------------------
177
+ # Patch generation helpers (shared between generate-phase-patch and
178
+ # generate-adhoc-patch)
179
+ # ---------------------------------------------------------------------------
180
+
181
+ PATCH_EXCLUSIONS = [
182
+ # Documentation
183
+ ".planning",
184
+ # Flutter/Dart generated
185
+ "*.g.dart",
186
+ "*.freezed.dart",
187
+ "*.gr.dart",
188
+ "generated",
189
+ ".dart_tool",
190
+ # Next.js/TypeScript generated
191
+ "node_modules",
192
+ ".next",
193
+ "dist",
194
+ "build",
195
+ "*.d.ts",
196
+ ".turbo",
197
+ # Common build artifacts
198
+ "*.lock",
199
+ ]
200
+
201
+
202
+ def build_exclude_pathspecs() -> list[str]:
203
+ """Build git pathspec exclusion list."""
204
+ return [f":!{p}" for p in PATCH_EXCLUSIONS]
205
+
206
+
207
+ # ===================================================================
208
+ # Subcommand: update-state
209
+ # ===================================================================
210
+
211
+
212
+ def cmd_update_state(args: argparse.Namespace) -> None:
213
+ """Update .planning/STATE.md Plan and Status lines.
214
+
215
+ Contract:
216
+ Args: completed (int), total (int)
217
+ Output: text — confirmation message
218
+ Exit codes: 0 = success, 1 = STATE.md missing or completed > total
219
+ Side effects: writes STATE.md
220
+ """
221
+ completed = args.completed
222
+ total = args.total
223
+
224
+ if completed > total:
225
+ print(f"Error: Completed ({completed}) cannot exceed total ({total})", file=sys.stderr)
226
+ sys.exit(1)
227
+
228
+ state_file = find_git_root() / ".planning" / "STATE.md"
229
+ if not state_file.is_file():
230
+ print(f"Error: STATE.md not found at {state_file}", file=sys.stderr)
231
+ sys.exit(1)
232
+
233
+ text = state_file.read_text(encoding="utf-8")
234
+
235
+ # Update Plan line
236
+ text = re.sub(
237
+ r"^Plan:.*$",
238
+ f"Plan: {completed} of {total} complete in current phase",
239
+ text,
240
+ flags=re.MULTILINE,
241
+ )
242
+
243
+ # Update Status line
244
+ if completed == total:
245
+ status = "All plans executed, pending verification"
246
+ else:
247
+ status = f"In progress — plan {completed} of {total} complete"
248
+ text = re.sub(r"^Status:.*$", f"Status: {status}", text, flags=re.MULTILINE)
249
+
250
+ state_file.write_text(text, encoding="utf-8")
251
+ print(f"STATE.md updated: {completed} of {total} plans complete")
252
+
253
+
254
+ # ===================================================================
255
+ # Subcommand: set-last-command
256
+ # ===================================================================
257
+
258
+
259
+ def cmd_set_last_command(args: argparse.Namespace) -> None:
260
+ """Update .planning/STATE.md Last Command field with timestamp.
261
+
262
+ Contract:
263
+ Args: command_string (str) — e.g. "ms:plan-phase 10"
264
+ Output: text — confirmation or warning
265
+ Exit codes: 0 always (bookkeeping, not critical path)
266
+ Side effects: writes STATE.md (if it exists)
267
+ """
268
+ command_string = args.command_string
269
+ now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
270
+ new_line = f"Last Command: {command_string} | {now}"
271
+
272
+ state_file = find_git_root() / ".planning" / "STATE.md"
273
+ if not state_file.is_file():
274
+ print("Warning: STATE.md not found, skipping Last Command update", file=sys.stderr)
275
+ return
276
+
277
+ text = state_file.read_text(encoding="utf-8")
278
+
279
+ # Try replacing existing Last Command line
280
+ updated, count = re.subn(
281
+ r"^Last Command:.*$", new_line, text, count=1, flags=re.MULTILINE,
282
+ )
283
+
284
+ if count == 0:
285
+ # Insert after Status: line
286
+ updated, count = re.subn(
287
+ r"^(Status:.*)$", rf"\1\n{new_line}", text, count=1, flags=re.MULTILINE,
288
+ )
289
+ if count == 0:
290
+ print("Warning: No 'Last Command:' or 'Status:' line found in STATE.md", file=sys.stderr)
291
+ return
292
+
293
+ state_file.write_text(updated, encoding="utf-8")
294
+ print(f"STATE.md Last Command: {command_string} | {now}")
295
+
296
+
297
+ # ===================================================================
298
+ # Subcommand: validate-execution-order
299
+ # ===================================================================
300
+
301
+
302
+ def cmd_validate_execution_order(args: argparse.Namespace) -> None:
303
+ """Validate EXECUTION-ORDER.md against plan files in a phase directory.
304
+
305
+ Contract:
306
+ Args: phase_dir (str) — path to phase directory
307
+ Output: text — PASS/FAIL message with plan count and wave count
308
+ Exit codes: 0 = all plans matched, 1 = mismatch or missing files
309
+ Side effects: read-only
310
+ """
311
+ phase_dir = Path(args.phase_dir)
312
+ if not phase_dir.is_dir():
313
+ print(f"FAIL: Directory does not exist: {phase_dir}")
314
+ sys.exit(1)
315
+
316
+ exec_order = phase_dir / "EXECUTION-ORDER.md"
317
+ if not exec_order.is_file():
318
+ print(f"FAIL: EXECUTION-ORDER.md not found in {phase_dir}")
319
+ sys.exit(1)
320
+
321
+ # Collect plan files on disk
322
+ disk_plans = sorted(p.name for p in phase_dir.glob("*-PLAN.md"))
323
+ if not disk_plans:
324
+ print(f"FAIL: No *-PLAN.md files found in {phase_dir}")
325
+ sys.exit(1)
326
+
327
+ # Parse EXECUTION-ORDER.md for plan filenames
328
+ exec_text = exec_order.read_text(encoding="utf-8")
329
+ plan_pattern = re.compile(r"[0-9][0-9.]*-[0-9]+-PLAN\.md")
330
+ order_plans = sorted(set(plan_pattern.findall(exec_text)))
331
+
332
+ errors: list[str] = []
333
+
334
+ # Check 1: Every disk plan is listed
335
+ for plan in disk_plans:
336
+ if plan not in order_plans:
337
+ errors.append(f" Missing from EXECUTION-ORDER.md: {plan}")
338
+
339
+ # Check 2: Every listed plan exists on disk
340
+ for plan in order_plans:
341
+ if plan not in disk_plans:
342
+ errors.append(f" Listed in EXECUTION-ORDER.md but file missing: {plan}")
343
+
344
+ if errors:
345
+ print("FAIL: Plan/execution-order mismatch")
346
+ for err in errors:
347
+ print(err)
348
+ sys.exit(1)
349
+
350
+ # Check 3 (warning): File conflicts within waves
351
+ current_wave = ""
352
+ wave_count = 0
353
+ current_wave_files: set[str] = set()
354
+
355
+ for line in exec_text.splitlines():
356
+ wave_match = re.match(r"^## Wave (\d+)", line)
357
+ if wave_match:
358
+ current_wave = wave_match.group(1)
359
+ wave_count += 1
360
+ current_wave_files = set()
361
+ elif current_wave:
362
+ plan_match = plan_pattern.search(line)
363
+ if plan_match:
364
+ plan_file = plan_match.group()
365
+ plan_path = phase_dir / plan_file
366
+ if plan_path.is_file():
367
+ plan_text = plan_path.read_text(encoding="utf-8")
368
+ for files_match in re.finditer(r"\*\*Files:\*\*(.+)", plan_text):
369
+ file_paths = files_match.group(1)
370
+ for fpath in file_paths.replace("`", "").split(","):
371
+ fpath = fpath.strip()
372
+ if fpath:
373
+ if fpath in current_wave_files:
374
+ print(f"WARNING: File '{fpath}' appears in multiple plans within Wave {current_wave}")
375
+ else:
376
+ current_wave_files.add(fpath)
377
+
378
+ if wave_count == 0:
379
+ print("FAIL: No '## Wave N' headers found in EXECUTION-ORDER.md")
380
+ sys.exit(1)
381
+
382
+ print(f"PASS: {len(disk_plans)} plans across {wave_count} waves")
383
+
384
+
385
+ # ===================================================================
386
+ # Subcommand: doctor-scan
387
+ # ===================================================================
388
+
389
+
390
+ def cmd_doctor_scan(args: argparse.Namespace) -> None:
391
+ """Single-pass diagnostic scan of the .planning/ tree.
392
+
393
+ Contract:
394
+ Args: (none)
395
+ Output: text — per-check PASS/FAIL/SKIP status and summary
396
+ Exit codes: 0 = scan completed, 2 = missing .planning/ or config.json
397
+ Side effects: read-only
398
+ """
399
+ git_root = find_git_root()
400
+ planning = git_root / ".planning"
401
+
402
+ if not planning.is_dir():
403
+ print("Error: No .planning/ directory found")
404
+ sys.exit(2)
405
+
406
+ config_path = planning / "config.json"
407
+ if not config_path.is_file():
408
+ print(f"Error: No config.json found at {config_path}")
409
+ sys.exit(2)
410
+
411
+ try:
412
+ config = json.loads(config_path.read_text(encoding="utf-8"))
413
+ except (json.JSONDecodeError, OSError):
414
+ print(f"Error: Cannot parse {config_path}")
415
+ sys.exit(2)
416
+
417
+ milestones_file = planning / "MILESTONES.md"
418
+ phases_dir = planning / "phases"
419
+ milestones_dir = planning / "milestones"
420
+ knowledge_dir = planning / "knowledge"
421
+
422
+ pass_count = 0
423
+ warn_count = 0
424
+ fail_count = 0
425
+ skip_count = 0
426
+ failed_checks: list[str] = []
427
+
428
+ def record(status: str, name: str) -> None:
429
+ nonlocal pass_count, warn_count, fail_count, skip_count
430
+ if status == "PASS":
431
+ pass_count += 1
432
+ elif status == "WARN":
433
+ warn_count += 1
434
+ elif status == "FAIL":
435
+ fail_count += 1
436
+ failed_checks.append(name)
437
+ else:
438
+ skip_count += 1
439
+
440
+ def format_phase_prefix(phase: str) -> str:
441
+ if "." in phase:
442
+ int_part, dec_part = phase.split(".", 1)
443
+ return f"{int(int_part):02d}.{dec_part}"
444
+ return f"{int(phase):02d}"
445
+
446
+ def parse_phase_numbers(line: str) -> list[str]:
447
+ """Parse phase numbers from a 'Phases completed' line."""
448
+ range_match = re.search(r"(\d+)-(\d+)", line)
449
+ if range_match:
450
+ start, end = int(range_match.group(1)), int(range_match.group(2))
451
+ return [str(i) for i in range(start, end + 1)]
452
+ return re.findall(r"\d+(?:\.\d+)?", line.split(":")[-1] if ":" in line else line)
453
+
454
+ subsystems = config.get("subsystems", [])
455
+ subsystem_count = len(subsystems)
456
+
457
+ # ---- CHECK 1: Subsystem Vocabulary ----
458
+ print("=== Subsystem Vocabulary ===")
459
+ if subsystem_count == 0:
460
+ print("Status: FAIL")
461
+ print("No subsystems array in config.json (or empty)")
462
+ record("FAIL", "Subsystem Vocabulary")
463
+ else:
464
+ print(f"Subsystems: {subsystem_count} configured")
465
+ for s in subsystems:
466
+ print(f" - {s}")
467
+
468
+ # Run artifact scan inline
469
+ artifact_values = _scan_artifact_subsystem_values(planning)
470
+ mismatches = [v for v in artifact_values if v not in subsystems]
471
+
472
+ if mismatches:
473
+ print("Status: FAIL")
474
+ print(f"Artifact values not in canonical list: {' '.join(mismatches)}")
475
+ record("FAIL", "Subsystem Vocabulary")
476
+ else:
477
+ print(f"Artifacts scanned: {len(artifact_values)} (all OK)")
478
+ print("Status: PASS")
479
+ record("PASS", "Subsystem Vocabulary")
480
+ print()
481
+
482
+ # ---- CHECK 2: Milestone Directory Structure ----
483
+ print("=== Milestone Directory Structure ===")
484
+ if not milestones_dir.is_dir():
485
+ if milestones_file.is_file() and any(
486
+ line.startswith("## ")
487
+ for line in milestones_file.read_text(encoding="utf-8").splitlines()
488
+ ):
489
+ print("Status: FAIL")
490
+ print("MILESTONES.md has entries but no milestones/ directory")
491
+ record("FAIL", "Milestone Directory Structure")
492
+ else:
493
+ print("Status: SKIP")
494
+ print("No completed milestones")
495
+ record("SKIP", "Milestone Directory Structure")
496
+ else:
497
+ flat_files = sorted(milestones_dir.glob("v*-*.md"))
498
+ if flat_files:
499
+ print("Status: FAIL")
500
+ print(f"Found {len(flat_files)} flat file(s) in milestones/ (old format):")
501
+ for f in flat_files:
502
+ version = re.match(r"(v[\d.]+)", f.name)
503
+ ver = version.group(1) if version else "?"
504
+ ver_dir = milestones_dir / ver
505
+ if ver_dir.is_dir():
506
+ print(f" {f.name} → directory {ver}/ exists (can restructure)")
507
+ else:
508
+ print(f" {f.name} → directory {ver}/ missing (need to create)")
509
+ record("FAIL", "Milestone Directory Structure")
510
+ else:
511
+ ms_dirs = [d for d in milestones_dir.iterdir() if d.is_dir()]
512
+ if not ms_dirs:
513
+ print("Status: SKIP")
514
+ print("No completed milestones")
515
+ record("SKIP", "Milestone Directory Structure")
516
+ else:
517
+ print("Status: PASS")
518
+ print(f"{len(ms_dirs)} milestone directories")
519
+ record("PASS", "Milestone Directory Structure")
520
+ print()
521
+
522
+ # ---- CHECK 3: Phase Archival ----
523
+ print("=== Phase Archival ===")
524
+ if not milestones_file.is_file():
525
+ print("Status: SKIP")
526
+ print("No completed milestones with phase ranges in MILESTONES.md")
527
+ record("SKIP", "Phase Archival")
528
+ else:
529
+ ms_text = milestones_file.read_text(encoding="utf-8")
530
+ phase_lines = [l for l in ms_text.splitlines() if "Phases completed" in l]
531
+ if not phase_lines:
532
+ print("Status: SKIP")
533
+ print("No completed milestones with phase ranges in MILESTONES.md")
534
+ record("SKIP", "Phase Archival")
535
+ else:
536
+ orphans: list[str] = []
537
+ for line in phase_lines:
538
+ for phase_num in parse_phase_numbers(line):
539
+ prefix = format_phase_prefix(phase_num)
540
+ if phases_dir.is_dir():
541
+ for d in phases_dir.glob(f"{prefix}-*/"):
542
+ if d.is_dir():
543
+ orphans.append(f" {d.name} (should be archived)")
544
+ if orphans:
545
+ print("Status: FAIL")
546
+ print(f"Found {len(orphans)} orphaned phase directories from completed milestones:")
547
+ for o in orphans:
548
+ print(o)
549
+ record("FAIL", "Phase Archival")
550
+ else:
551
+ print("Status: PASS")
552
+ print("All completed milestone phases are archived")
553
+ record("PASS", "Phase Archival")
554
+ print()
555
+
556
+ # ---- CHECK 4: Knowledge Files ----
557
+ print("=== Knowledge Files ===")
558
+ if subsystem_count == 0:
559
+ print("Status: SKIP")
560
+ print("No subsystems configured — knowledge check requires subsystem vocabulary")
561
+ record("SKIP", "Knowledge Files")
562
+ elif not knowledge_dir.is_dir():
563
+ print("Status: FAIL")
564
+ print("Knowledge directory missing: .planning/knowledge/")
565
+ print(f"Expected files for {subsystem_count} subsystems")
566
+ record("FAIL", "Knowledge Files")
567
+ else:
568
+ missing = [s for s in subsystems if not (knowledge_dir / f"{s}.md").is_file()]
569
+ orphaned = [
570
+ f.stem
571
+ for f in knowledge_dir.glob("*.md")
572
+ if f.stem not in subsystems
573
+ ]
574
+ if missing or orphaned:
575
+ present = subsystem_count - len(missing)
576
+ print("Status: FAIL")
577
+ print(f"Coverage: {present}/{subsystem_count} subsystems have knowledge files")
578
+ if missing:
579
+ print("Missing:")
580
+ for m in missing:
581
+ print(f" {m}.md")
582
+ if orphaned:
583
+ print("Orphaned:")
584
+ for o in orphaned:
585
+ print(f" {o}.md (not in subsystems list)")
586
+ record("FAIL", "Knowledge Files")
587
+ else:
588
+ print("Status: PASS")
589
+ print(f"All {subsystem_count} subsystems have knowledge files")
590
+ record("PASS", "Knowledge Files")
591
+ print()
592
+
593
+ # ---- CHECK 5: Phase Summaries ----
594
+ print("=== Phase Summaries ===")
595
+ if not milestones_dir.is_dir():
596
+ print("Status: SKIP")
597
+ print("No milestones directory")
598
+ record("SKIP", "Phase Summaries")
599
+ else:
600
+ ms_dirs = sorted(d for d in milestones_dir.iterdir() if d.is_dir())
601
+ if not ms_dirs:
602
+ print("Status: SKIP")
603
+ print("No milestone directories")
604
+ record("SKIP", "Phase Summaries")
605
+ else:
606
+ missing_summaries = [
607
+ d.name for d in ms_dirs if not (d / "PHASE-SUMMARIES.md").is_file()
608
+ ]
609
+ if missing_summaries:
610
+ print("Status: FAIL")
611
+ print(f"Missing PHASE-SUMMARIES.md in {len(missing_summaries)} milestone(s):")
612
+ for m in missing_summaries:
613
+ print(f" {m}/PHASE-SUMMARIES.md")
614
+ record("FAIL", "Phase Summaries")
615
+ else:
616
+ print("Status: PASS")
617
+ print(f"All {len(ms_dirs)} milestones have PHASE-SUMMARIES.md")
618
+ record("PASS", "Phase Summaries")
619
+ print()
620
+
621
+ # ---- CHECK 6: PLAN Cleanup ----
622
+ print("=== PLAN Cleanup ===")
623
+ if not milestones_file.is_file():
624
+ print("Status: SKIP")
625
+ print("No completed milestones — active phase PLANs are expected")
626
+ record("SKIP", "PLAN Cleanup")
627
+ else:
628
+ ms_text = milestones_file.read_text(encoding="utf-8")
629
+ phase_lines = [l for l in ms_text.splitlines() if "Phases completed" in l]
630
+ if not phase_lines:
631
+ print("Status: SKIP")
632
+ print("No completed milestones — active phase PLANs are expected")
633
+ record("SKIP", "PLAN Cleanup")
634
+ else:
635
+ leftovers: list[str] = []
636
+ for line in phase_lines:
637
+ for phase_num in parse_phase_numbers(line):
638
+ prefix = format_phase_prefix(phase_num)
639
+ if phases_dir.is_dir():
640
+ for d in phases_dir.glob(f"{prefix}-*/"):
641
+ if d.is_dir():
642
+ for plan in d.glob("*-PLAN.md"):
643
+ rel = plan.relative_to(planning)
644
+ leftovers.append(f" {rel}")
645
+
646
+ # Check archived milestone directories too
647
+ if milestones_dir.is_dir():
648
+ for ver_dir in milestones_dir.iterdir():
649
+ if not ver_dir.is_dir():
650
+ continue
651
+ archived_phases = ver_dir / "phases"
652
+ if archived_phases.is_dir():
653
+ for phase_d in archived_phases.iterdir():
654
+ if phase_d.is_dir():
655
+ for plan in phase_d.glob("*-PLAN.md"):
656
+ rel = plan.relative_to(planning)
657
+ leftovers.append(f" {rel}")
658
+
659
+ if leftovers:
660
+ print("Status: FAIL")
661
+ print(f"Found {len(leftovers)} leftover PLAN file(s) in completed phases:")
662
+ for l in leftovers:
663
+ print(l)
664
+ record("FAIL", "PLAN Cleanup")
665
+ else:
666
+ print("Status: PASS")
667
+ print("No leftover PLAN files in completed phases")
668
+ record("PASS", "PLAN Cleanup")
669
+ print()
670
+
671
+ # ---- CHECK 7: CLI Wrappers ----
672
+ print("=== CLI Wrappers ===")
673
+ wrapper_names = ["ms-tools", "ms-lookup", "ms-compare-mockups"]
674
+ missing_wrappers = [w for w in wrapper_names if shutil.which(w) is None]
675
+ if missing_wrappers:
676
+ print("Status: FAIL")
677
+ print(f"Not on PATH: {', '.join(missing_wrappers)}")
678
+ print("Fix: re-run `npx mindsystem-cc` to regenerate wrappers and PATH hook")
679
+ record("FAIL", "CLI Wrappers")
680
+ else:
681
+ print("Status: PASS")
682
+ print(f"All {len(wrapper_names)} CLI wrappers found on PATH")
683
+ record("PASS", "CLI Wrappers")
684
+ print()
685
+
686
+ # ---- CHECK 8: Milestone Naming Convention ----
687
+ print("=== Milestone Naming Convention ===")
688
+ if not milestones_dir.is_dir():
689
+ print("Status: SKIP")
690
+ print("No milestones directory")
691
+ record("SKIP", "Milestone Naming Convention")
692
+ else:
693
+ ms_dirs = [d for d in milestones_dir.iterdir() if d.is_dir()]
694
+ if not ms_dirs:
695
+ print("Status: SKIP")
696
+ print("No milestone directories")
697
+ record("SKIP", "Milestone Naming Convention")
698
+ else:
699
+ versioned = _detect_versioned_milestone_dirs(planning)
700
+ if versioned:
701
+ print("Status: FAIL")
702
+ print(f"Found {len(versioned)} version-prefixed milestone directories:")
703
+ for v in versioned:
704
+ dirname = v["path"].split("/", 1)[1] if "/" in v["path"] else v["path"]
705
+ print(f" {dirname} ({v['type']})")
706
+ record("FAIL", "Milestone Naming Convention")
707
+ else:
708
+ print("Status: PASS")
709
+ print("All milestone directories use name-based slugs")
710
+ record("PASS", "Milestone Naming Convention")
711
+ print()
712
+
713
+ # ---- CHECK 9: Research API Keys ----
714
+ print("=== Research API Keys ===")
715
+ c7_key = os.environ.get("CONTEXT7_API_KEY", "")
716
+ pplx_key = os.environ.get("PERPLEXITY_API_KEY", "")
717
+ if c7_key and pplx_key:
718
+ print("Status: PASS")
719
+ print("All research API keys configured")
720
+ record("PASS", "Research API Keys")
721
+ else:
722
+ print("Status: WARN")
723
+ missing_keys: list[str] = []
724
+ if not c7_key:
725
+ missing_keys.append("CONTEXT7_API_KEY")
726
+ print("CONTEXT7_API_KEY: not set")
727
+ print(" Enables: library documentation lookup via Context7")
728
+ print(" Without: falls back to WebSearch/WebFetch (less authoritative)")
729
+ print(" Set up: https://context7.com → copy API key → export CONTEXT7_API_KEY=<key>")
730
+ if not pplx_key:
731
+ missing_keys.append("PERPLEXITY_API_KEY")
732
+ print("PERPLEXITY_API_KEY: not set")
733
+ print(" Enables: deep research via Perplexity AI")
734
+ print(" Without: falls back to WebSearch/WebFetch (less comprehensive)")
735
+ print(" Set up: https://perplexity.ai/settings/api → copy API key → export PERPLEXITY_API_KEY=<key>")
736
+ record("WARN", "Research API Keys")
737
+ print()
738
+
739
+ # ---- SUMMARY ----
740
+ total = pass_count + warn_count + fail_count + skip_count
741
+ print("=== Summary ===")
742
+ print(f"Checks: {total} total, {pass_count} passed, {warn_count} warned, {fail_count} failed, {skip_count} skipped")
743
+
744
+ if fail_count > 0:
745
+ print(f"Issues: {', '.join(failed_checks)}")
746
+ elif warn_count > 0:
747
+ print("No failures — warnings are informational")
748
+ else:
749
+ print("All checks passed")
750
+
751
+
752
+ # ===================================================================
753
+ # Subcommand: gather-milestone-stats
754
+ # ===================================================================
755
+
756
+
757
+ def cmd_gather_milestone_stats(args: argparse.Namespace) -> None:
758
+ """Gather milestone readiness status and statistics.
759
+
760
+ Contract:
761
+ Args: start_phase (int), end_phase (int)
762
+ Output: text — readiness status (READY/NOT READY) and git stats
763
+ Exit codes: 0 = success, 1 = start > end or phases dir missing
764
+ Side effects: read-only
765
+ """
766
+ start = args.start_phase
767
+ end = args.end_phase
768
+
769
+ if start > end:
770
+ print(f"Error: Start phase ({start}) cannot exceed end phase ({end})", file=sys.stderr)
771
+ sys.exit(1)
772
+
773
+ git_root = find_git_root()
774
+ phases_dir = git_root / ".planning" / "phases"
775
+ if not phases_dir.is_dir():
776
+ print(f"Error: Phases directory not found at {phases_dir}", file=sys.stderr)
777
+ sys.exit(1)
778
+
779
+ # ---- READINESS ----
780
+ print("=== Readiness ===")
781
+ print()
782
+
783
+ phase_count = 0
784
+ plan_count = 0
785
+ complete = 0
786
+ incomplete_list: list[str] = []
787
+ phase_details: list[str] = []
788
+
789
+ for d in sorted(phases_dir.iterdir()):
790
+ if not d.is_dir():
791
+ continue
792
+ dirname = d.name
793
+ phase_num = dirname.split("-", 1)[0]
794
+ phase_name = dirname.split("-", 1)[1] if "-" in dirname else dirname
795
+
796
+ if in_range(phase_num, start, end):
797
+ phase_count += 1
798
+ phase_plans = 0
799
+ phase_complete = 0
800
+
801
+ for plan in sorted(d.glob("*-PLAN.md")):
802
+ plan_count += 1
803
+ phase_plans += 1
804
+ plan_base = plan.name.replace("-PLAN.md", "")
805
+ summary = d / f"{plan_base}-SUMMARY.md"
806
+ if summary.is_file():
807
+ complete += 1
808
+ phase_complete += 1
809
+ else:
810
+ incomplete_list.append(f" {dirname}/{plan.name}")
811
+
812
+ phase_details.append(f"- Phase {phase_num}: {phase_name} ({phase_complete}/{phase_plans} plans)")
813
+
814
+ print(f"Phases: {phase_count} (range {start}-{end})")
815
+ print(f"Plans: {plan_count} total, {complete} complete")
816
+ print()
817
+ for detail in phase_details:
818
+ print(detail)
819
+ print()
820
+
821
+ if complete == plan_count and plan_count > 0:
822
+ print("Status: READY")
823
+ else:
824
+ incomplete = plan_count - complete
825
+ print(f"Incomplete ({incomplete}):")
826
+ for item in incomplete_list:
827
+ print(item)
828
+ print("Status: NOT READY")
829
+
830
+ # ---- GIT STATS ----
831
+ print()
832
+ print("=== Git Stats ===")
833
+ print()
834
+
835
+ all_commits: list[str] = []
836
+
837
+ # Integer phases
838
+ for i in range(start, end + 1):
839
+ phase = f"{i:02d}"
840
+ try:
841
+ out = run_git("log", "--all", "--format=%H %ai %s", f"--grep=({phase}-")
842
+ if out:
843
+ all_commits.extend(out.splitlines())
844
+ except subprocess.CalledProcessError:
845
+ pass
846
+
847
+ # Decimal phases
848
+ for d in sorted(phases_dir.iterdir()):
849
+ if not d.is_dir():
850
+ continue
851
+ phase_num = d.name.split("-", 1)[0]
852
+ if "." in phase_num and in_range(phase_num, start, end):
853
+ try:
854
+ out = run_git("log", "--all", "--format=%H %ai %s", f"--grep=({phase_num}-")
855
+ if out:
856
+ all_commits.extend(out.splitlines())
857
+ except subprocess.CalledProcessError:
858
+ pass
859
+
860
+ # Deduplicate and sort by date
861
+ seen: set[str] = set()
862
+ unique_commits: list[str] = []
863
+ for c in all_commits:
864
+ hash_val = c.split()[0] if c.strip() else ""
865
+ if hash_val and hash_val not in seen:
866
+ seen.add(hash_val)
867
+ unique_commits.append(c)
868
+ unique_commits.sort(key=lambda x: x.split()[1] if len(x.split()) > 1 else "")
869
+
870
+ if unique_commits:
871
+ commit_count = len(unique_commits)
872
+ first = unique_commits[0].split(maxsplit=3)
873
+ last = unique_commits[-1].split(maxsplit=3)
874
+ first_hash, first_date = first[0], first[1]
875
+ last_hash, last_date = last[0], last[1]
876
+ first_msg = first[3] if len(first) > 3 else ""
877
+ last_msg = last[3] if len(last) > 3 else ""
878
+
879
+ try:
880
+ d1 = datetime.date.fromisoformat(first_date)
881
+ d2 = datetime.date.fromisoformat(last_date)
882
+ days = (d2 - d1).days
883
+ except ValueError:
884
+ days = "?"
885
+
886
+ print(f"Commits: {commit_count}")
887
+ print(f"Git range: {first_hash[:7]}..{last_hash[:7]}")
888
+ print(f"First: {first_date} — {first_msg}")
889
+ print(f"Last: {last_date} — {last_msg}")
890
+ print(f"Timeline: {days} days ({first_date} → {last_date})")
891
+
892
+ try:
893
+ diffstat = run_git("diff", "--shortstat", f"{first_hash}^..{last_hash}")
894
+ if diffstat:
895
+ print(f"Changes:{diffstat}")
896
+ except subprocess.CalledProcessError:
897
+ pass
898
+ else:
899
+ print("No commits found matching phase patterns (expected 'feat(XX-YY): ...')")
900
+ print("Determine git range manually from git log")
901
+
902
+ print()
903
+
904
+
905
+ # ===================================================================
906
+ # Subcommand: generate-phase-patch
907
+ # ===================================================================
908
+
909
+
910
+ def cmd_generate_phase_patch(args: argparse.Namespace) -> None:
911
+ """Generate a patch file with implementation changes from a phase.
912
+
913
+ Contract:
914
+ Args: phase (str), --suffix (str, optional)
915
+ Output: text — patch generation status and file path
916
+ Exit codes: 0 = success (or no matching commits), 1 = git error
917
+ Side effects: writes .patch file to phase directory
918
+ """
919
+ phase_input = args.phase
920
+ suffix = args.suffix
921
+
922
+ git_root = find_git_root()
923
+ import os
924
+ os.chdir(git_root)
925
+
926
+ # Normalize phase number
927
+ if re.match(r"^\d$", phase_input):
928
+ phase_number = f"{int(phase_input):02d}"
929
+ else:
930
+ phase_number = phase_input
931
+
932
+ # Determine commit pattern
933
+ if suffix:
934
+ if suffix == "uat-fixes":
935
+ commit_pattern = f"\\({phase_number}-uat\\):"
936
+ print(f"Generating UAT fixes patch for phase {phase_number}...")
937
+ else:
938
+ commit_pattern = f"\\({phase_number}-{suffix}\\):"
939
+ print(f"Generating {suffix} patch for phase {phase_number}...")
940
+ else:
941
+ commit_pattern = f"\\({phase_number}-"
942
+ print(f"Generating patch for phase {phase_number}...")
943
+
944
+ # Find matching commits
945
+ try:
946
+ log_output = run_git("log", "--oneline")
947
+ except subprocess.CalledProcessError:
948
+ print("Error: Failed to read git log", file=sys.stderr)
949
+ sys.exit(1)
950
+
951
+ phase_commits = []
952
+ for line in log_output.splitlines():
953
+ if re.search(commit_pattern, line):
954
+ phase_commits.append(line.split()[0])
955
+
956
+ if not phase_commits:
957
+ print(f"No commits found matching pattern: {commit_pattern}")
958
+ print("Patch skipped")
959
+ return
960
+
961
+ print(f"Found {len(phase_commits)} commit(s)")
962
+
963
+ # Determine base commit
964
+ earliest_commit = phase_commits[-1]
965
+ try:
966
+ base_commit = run_git("rev-parse", f"{earliest_commit}^")
967
+ except subprocess.CalledProcessError:
968
+ base_commit = run_git("rev-list", "--max-parents=0", "HEAD")
969
+
970
+ base_msg = run_git("log", "--oneline", "-1", base_commit)
971
+ print(f"Base commit: {base_msg}")
972
+
973
+ # Find output directory
974
+ phases_dir = Path(".planning/phases")
975
+ phase_dir_matches = sorted(phases_dir.glob(f"{phase_number}-*")) if phases_dir.is_dir() else []
976
+ phase_dir = str(phase_dir_matches[0]) if phase_dir_matches else str(phases_dir)
977
+
978
+ Path(phase_dir).mkdir(parents=True, exist_ok=True)
979
+ print(f"Output directory: {phase_dir}/")
980
+
981
+ # Determine output filename
982
+ if suffix:
983
+ patch_file = f"{phase_dir}/{phase_number}-{suffix}.patch"
984
+ else:
985
+ patch_file = f"{phase_dir}/{phase_number}-changes.patch"
986
+
987
+ # Generate diff
988
+ exclude_args = build_exclude_pathspecs()
989
+ if suffix:
990
+ latest_commit = phase_commits[0]
991
+ diff_args = ["diff", base_commit, latest_commit, "--", "."] + exclude_args
992
+ else:
993
+ diff_args = ["diff", base_commit, "HEAD", "--", "."] + exclude_args
994
+
995
+ result = subprocess.run(
996
+ ["git"] + diff_args,
997
+ capture_output=True,
998
+ text=True,
999
+ )
1000
+ patch_content = result.stdout
1001
+
1002
+ if not patch_content.strip():
1003
+ print("No implementation changes outside excluded patterns")
1004
+ print("Patch skipped")
1005
+ return
1006
+
1007
+ Path(patch_file).write_text(patch_content, encoding="utf-8")
1008
+ line_count = len(patch_content.splitlines())
1009
+
1010
+ print()
1011
+ print(f"Generated: {patch_file} ({line_count} lines)")
1012
+ print()
1013
+ print(f"Review: cat {patch_file}")
1014
+ print(f"Apply: git apply {patch_file}")
1015
+ print(f"Discard: rm {patch_file}")
1016
+
1017
+
1018
+ # ===================================================================
1019
+ # Subcommand: generate-adhoc-patch
1020
+ # ===================================================================
1021
+
1022
+
1023
+ def cmd_generate_adhoc_patch(args: argparse.Namespace) -> None:
1024
+ """Generate a patch file from an adhoc commit.
1025
+
1026
+ Contract:
1027
+ Args: commit (str) — commit hash, output (str) — output file path
1028
+ Output: text — patch generation status and file path
1029
+ Exit codes: 0 = success (or no changes), 1 = commit not found
1030
+ Side effects: writes .patch file to output path
1031
+ """
1032
+ commit_hash = args.commit
1033
+ output_path = args.output
1034
+
1035
+ git_root = find_git_root()
1036
+ import os
1037
+ os.chdir(git_root)
1038
+
1039
+ # Verify commit exists
1040
+ try:
1041
+ run_git("rev-parse", commit_hash)
1042
+ except subprocess.CalledProcessError:
1043
+ print(f"Error: Commit {commit_hash} not found", file=sys.stderr)
1044
+ sys.exit(1)
1045
+
1046
+ exclude_args = build_exclude_pathspecs()
1047
+ diff_args = ["diff", f"{commit_hash}^", commit_hash, "--", "."] + exclude_args
1048
+
1049
+ result = subprocess.run(
1050
+ ["git"] + diff_args,
1051
+ capture_output=True,
1052
+ text=True,
1053
+ )
1054
+
1055
+ if not result.stdout.strip():
1056
+ print("No implementation changes outside excluded patterns")
1057
+ print("Patch skipped")
1058
+ return
1059
+
1060
+ Path(output_path).parent.mkdir(parents=True, exist_ok=True)
1061
+ Path(output_path).write_text(result.stdout, encoding="utf-8")
1062
+ line_count = len(result.stdout.splitlines())
1063
+ print(f"Generated: {output_path} ({line_count} lines)")
1064
+
1065
+
1066
+ # ===================================================================
1067
+ # Subcommand: archive-milestone-phases
1068
+ # ===================================================================
1069
+
1070
+
1071
+ def cmd_archive_milestone_phases(args: argparse.Namespace) -> None:
1072
+ """Consolidate summaries, delete artifacts, move phase dirs to milestone archive.
1073
+
1074
+ Contract:
1075
+ Args: start_phase (int), end_phase (int), milestone (str — slug)
1076
+ Output: text — per-stage counts and archive summary
1077
+ Exit codes: 0 = success, 1 = start > end or dirs missing
1078
+ Side effects: writes PHASE-SUMMARIES.md, deletes artifact files, moves phase dirs
1079
+ """
1080
+ start = args.start_phase
1081
+ end = args.end_phase
1082
+ milestone = args.milestone
1083
+
1084
+ if start > end:
1085
+ print(f"Error: Start phase ({start}) cannot exceed end phase ({end})", file=sys.stderr)
1086
+ sys.exit(1)
1087
+
1088
+ git_root = find_git_root()
1089
+ phases_dir = git_root / ".planning" / "phases"
1090
+ if not phases_dir.is_dir():
1091
+ print(f"Error: Phases directory not found at {phases_dir}", file=sys.stderr)
1092
+ sys.exit(1)
1093
+
1094
+ milestone_dir = git_root / ".planning" / "milestones" / milestone
1095
+ if not milestone_dir.is_dir():
1096
+ print(f"Error: Milestone directory not found at {milestone_dir}", file=sys.stderr)
1097
+ print("Run archive_milestone step first to create it")
1098
+ sys.exit(1)
1099
+
1100
+ # Stage 1: Consolidate summaries
1101
+ summaries_file = milestone_dir / "PHASE-SUMMARIES.md"
1102
+ summary_count = 0
1103
+ lines = [f"# Phase Summaries: {milestone}", ""]
1104
+
1105
+ for d in sorted(phases_dir.iterdir()):
1106
+ if not d.is_dir():
1107
+ continue
1108
+ dirname = d.name
1109
+ phase_num = dirname.split("-", 1)[0]
1110
+ phase_name = dirname.split("-", 1)[1] if "-" in dirname else dirname
1111
+
1112
+ if in_range(phase_num, start, end):
1113
+ summary_files = sorted(d.glob("*-SUMMARY.md"))
1114
+ if summary_files:
1115
+ lines.append(f"## Phase {phase_num}: {phase_name}")
1116
+ lines.append("")
1117
+ for f in summary_files:
1118
+ plan_id = f.stem.replace("-SUMMARY", "")
1119
+ lines.append(f"### {plan_id}")
1120
+ lines.append("")
1121
+ lines.append(f.read_text(encoding="utf-8"))
1122
+ lines.append("")
1123
+ summary_count += 1
1124
+
1125
+ summaries_file.write_text("\n".join(lines), encoding="utf-8")
1126
+ print(f"Stage 1: Consolidated {summary_count} summaries to PHASE-SUMMARIES.md")
1127
+
1128
+ # Stage 2: Delete artifacts
1129
+ deleted = 0
1130
+ artifact_patterns = [
1131
+ "*-CONTEXT.md", "*-DESIGN.md", "*-RESEARCH.md",
1132
+ "*-SUMMARY.md", "*-UAT.md", "*-VERIFICATION.md",
1133
+ "*-EXECUTION-ORDER.md",
1134
+ ]
1135
+ for d in sorted(phases_dir.iterdir()):
1136
+ if not d.is_dir():
1137
+ continue
1138
+ phase_num = d.name.split("-", 1)[0]
1139
+ if in_range(phase_num, start, end):
1140
+ for pattern in artifact_patterns:
1141
+ for f in d.glob(pattern):
1142
+ f.unlink()
1143
+ deleted += 1
1144
+
1145
+ print(f"Stage 2: Deleted {deleted} artifact files")
1146
+
1147
+ # Stage 3: Move phase directories
1148
+ archive_phases = milestone_dir / "phases"
1149
+ archive_phases.mkdir(exist_ok=True)
1150
+ moved = 0
1151
+
1152
+ for d in sorted(phases_dir.iterdir()):
1153
+ if not d.is_dir():
1154
+ continue
1155
+ phase_num = d.name.split("-", 1)[0]
1156
+ if in_range(phase_num, start, end):
1157
+ shutil.move(str(d), str(archive_phases / d.name))
1158
+ moved += 1
1159
+
1160
+ print(f"Stage 3: Moved {moved} phase directories to milestones/{milestone}/phases/")
1161
+ print()
1162
+ print(f"Archive complete: {summary_count} summaries, {deleted} artifacts deleted, {moved} dirs moved")
1163
+
1164
+
1165
+ # ===================================================================
1166
+ # Subcommand: archive-milestone-files
1167
+ # ===================================================================
1168
+
1169
+
1170
+ def cmd_archive_milestone_files(args: argparse.Namespace) -> None:
1171
+ """Move optional milestone files to the milestone archive directory.
1172
+
1173
+ Contract:
1174
+ Args: milestone (str) — milestone slug (e.g., mvp, push-notifications)
1175
+ Output: text — per-file archive status
1176
+ Exit codes: 0 = success, 1 = milestone directory missing
1177
+ Side effects: moves audit, context, and research files to milestone dir
1178
+ """
1179
+ milestone = args.milestone
1180
+
1181
+ git_root = find_git_root()
1182
+ planning_dir = git_root / ".planning"
1183
+ milestone_dir = planning_dir / "milestones" / milestone
1184
+
1185
+ if not milestone_dir.is_dir():
1186
+ print(f"Error: Milestone directory not found at {milestone_dir}", file=sys.stderr)
1187
+ print("Run archive_milestone step first to create it")
1188
+ sys.exit(1)
1189
+
1190
+ archived = 0
1191
+
1192
+ # Milestone audit
1193
+ audit = planning_dir / "MILESTONE-AUDIT.md"
1194
+ if audit.is_file():
1195
+ shutil.move(str(audit), str(milestone_dir / "MILESTONE-AUDIT.md"))
1196
+ print("Archived: MILESTONE-AUDIT.md → MILESTONE-AUDIT.md")
1197
+ archived += 1
1198
+
1199
+ # Milestone context
1200
+ context = planning_dir / "MILESTONE-CONTEXT.md"
1201
+ if context.is_file():
1202
+ shutil.move(str(context), str(milestone_dir / "CONTEXT.md"))
1203
+ print("Archived: MILESTONE-CONTEXT.md → CONTEXT.md")
1204
+ archived += 1
1205
+
1206
+ # Research directory
1207
+ research = planning_dir / "research"
1208
+ if research.is_dir():
1209
+ shutil.move(str(research), str(milestone_dir / "research"))
1210
+ print("Archived: research/ → research/")
1211
+ archived += 1
1212
+
1213
+ if archived == 0:
1214
+ print("No optional files to archive (audit, context, research all absent)")
1215
+ else:
1216
+ print()
1217
+ print(f"Archived {archived} item(s) to milestones/{milestone}/")
1218
+
1219
+
1220
+ # ===================================================================
1221
+ # Subcommand: scan-artifact-subsystems
1222
+ # ===================================================================
1223
+
1224
+
1225
+ def _scan_artifact_subsystem_values(planning: Path) -> list[str]:
1226
+ """Extract all subsystem values from planning artifacts (helper for doctor-scan)."""
1227
+ values: list[str] = []
1228
+ scan_globs = [
1229
+ ("phases", "*/*-SUMMARY.md"),
1230
+ ("adhoc", "*-SUMMARY.md"),
1231
+ ("debug", "*.md"),
1232
+ ("debug/resolved", "*.md"),
1233
+ ("todos/pending", "*.md"),
1234
+ ("todos/done", "*.md"),
1235
+ ]
1236
+ for subdir, pattern in scan_globs:
1237
+ target = planning / subdir
1238
+ if target.is_dir():
1239
+ for f in sorted(target.glob(pattern)):
1240
+ fm = parse_frontmatter(f)
1241
+ if fm and fm.get("subsystem"):
1242
+ values.append(fm["subsystem"])
1243
+ return values
1244
+
1245
+
1246
+ def _detect_versioned_milestone_dirs(planning: Path) -> list[dict]:
1247
+ """Detect v-prefixed milestone directories that need migration.
1248
+
1249
+ Returns list of dicts with keys: path, version, sub, type.
1250
+ - "standard": v-dir has .md files directly
1251
+ - "nested": v-dir has sub-directories (excluding phases/) and no direct .md files
1252
+ """
1253
+ milestones_dir = planning / "milestones"
1254
+ if not milestones_dir.is_dir():
1255
+ return []
1256
+
1257
+ v_pattern = re.compile(r"^v\d+")
1258
+ results: list[dict] = []
1259
+
1260
+ for entry in sorted(milestones_dir.iterdir()):
1261
+ if not entry.is_dir() or not v_pattern.match(entry.name):
1262
+ continue
1263
+
1264
+ version = entry.name
1265
+ has_md_files = any(f.suffix == ".md" for f in entry.iterdir() if f.is_file())
1266
+ sub_dirs = [
1267
+ d for d in entry.iterdir()
1268
+ if d.is_dir() and d.name != "phases"
1269
+ ]
1270
+
1271
+ if sub_dirs and not has_md_files:
1272
+ # Nested: each sub-dir is a separate entry
1273
+ for sub in sorted(sub_dirs):
1274
+ results.append({
1275
+ "path": f"milestones/{version}/{sub.name}",
1276
+ "version": version,
1277
+ "sub": sub.name,
1278
+ "type": "nested",
1279
+ })
1280
+ else:
1281
+ # Standard: v-dir itself is the milestone
1282
+ results.append({
1283
+ "path": f"milestones/{version}",
1284
+ "version": version,
1285
+ "sub": None,
1286
+ "type": "standard",
1287
+ })
1288
+
1289
+ return results
1290
+
1291
+
1292
+ def _parse_milestone_name_mapping(planning: Path) -> list[dict]:
1293
+ """Parse MILESTONES.md and PROJECT.md to build version→name→slug mapping.
1294
+
1295
+ Returns list of dicts with keys: version, name, slug, and optionally current.
1296
+ """
1297
+ results: list[dict] = []
1298
+
1299
+ # Parse MILESTONES.md shipped/started headers
1300
+ milestones_file = planning / "MILESTONES.md"
1301
+ if milestones_file.is_file():
1302
+ ms_text = milestones_file.read_text(encoding="utf-8")
1303
+ header_re = re.compile(
1304
+ r"^## (v[\d.]+)\s+(.+?)\s*\((?:Shipped|Started):?\s*[^)]+\)",
1305
+ re.MULTILINE,
1306
+ )
1307
+ for match in header_re.finditer(ms_text):
1308
+ version = match.group(1)
1309
+ name = match.group(2).strip()
1310
+ results.append({
1311
+ "version": version,
1312
+ "name": name,
1313
+ "slug": slugify(name),
1314
+ })
1315
+
1316
+ # Parse PROJECT.md for current milestone
1317
+ project_file = planning / "PROJECT.md"
1318
+ if project_file.is_file():
1319
+ proj_text = project_file.read_text(encoding="utf-8")
1320
+ current_re = re.compile(
1321
+ r"^## Current Milestone:\s*(v[\d.]+)\s+(.+?)$",
1322
+ re.MULTILINE,
1323
+ )
1324
+ m = current_re.search(proj_text)
1325
+ if m:
1326
+ version = m.group(1)
1327
+ name = m.group(2).strip()
1328
+ results.append({
1329
+ "version": version,
1330
+ "name": name,
1331
+ "slug": slugify(name),
1332
+ "current": True,
1333
+ })
1334
+
1335
+ return results
1336
+
1337
+
1338
+ def cmd_scan_artifact_subsystems(args: argparse.Namespace) -> None:
1339
+ """Scan planning artifacts for subsystem YAML frontmatter values.
1340
+
1341
+ Contract:
1342
+ Args: --values-only (flag, optional)
1343
+ Output: text — subsystem values grouped by artifact type
1344
+ Exit codes: 0 = success, 1 = .planning/ missing
1345
+ Side effects: read-only
1346
+ """
1347
+ planning = find_planning_dir()
1348
+ values_only = args.values_only
1349
+
1350
+ sections = [
1351
+ ("Phase SUMMARYs", "phases", "*/*-SUMMARY.md"),
1352
+ ("Adhoc SUMMARYs", "adhoc", "*-SUMMARY.md"),
1353
+ ("Debug docs", "debug", "*.md"),
1354
+ ("Debug resolved", "debug/resolved", "*.md"),
1355
+ ("Pending Todos", "todos/pending", "*.md"),
1356
+ ("Done Todos", "todos/done", "*.md"),
1357
+ ]
1358
+
1359
+ for header, subdir, pattern in sections:
1360
+ print(f"=== {header} ===")
1361
+ target = planning / subdir
1362
+ if not target.is_dir():
1363
+ continue
1364
+ for f in sorted(target.glob(pattern)):
1365
+ fm = parse_frontmatter(f)
1366
+ if fm and fm.get("subsystem"):
1367
+ if values_only:
1368
+ print(fm["subsystem"])
1369
+ else:
1370
+ print(f"{f}\t{fm['subsystem']}")
1371
+
1372
+
1373
+ # ===================================================================
1374
+ # Subcommand: scan-milestone-naming
1375
+ # ===================================================================
1376
+
1377
+
1378
+ def cmd_scan_milestone_naming(args: argparse.Namespace) -> None:
1379
+ """Scan milestone directories for version-based naming needing migration.
1380
+
1381
+ Contract:
1382
+ Args: (none)
1383
+ Output: JSON — versioned_dirs, name_mappings, current_milestone, needs_migration
1384
+ Exit codes: 0 = success, 2 = missing .planning/
1385
+ Side effects: read-only
1386
+ """
1387
+ planning = find_planning_dir()
1388
+
1389
+ versioned_dirs = _detect_versioned_milestone_dirs(planning)
1390
+ name_mappings = _parse_milestone_name_mapping(planning)
1391
+
1392
+ current_milestone = None
1393
+ non_current: list[dict] = []
1394
+ for m in name_mappings:
1395
+ if m.get("current"):
1396
+ current_milestone = {
1397
+ "version": m["version"],
1398
+ "name": m["name"],
1399
+ "slug": m["slug"],
1400
+ }
1401
+ else:
1402
+ non_current.append(m)
1403
+
1404
+ result = {
1405
+ "versioned_dirs": versioned_dirs,
1406
+ "name_mappings": non_current,
1407
+ "current_milestone": current_milestone,
1408
+ "needs_migration": len(versioned_dirs) > 0,
1409
+ }
1410
+
1411
+ json.dump(result, sys.stdout, indent=2)
1412
+ sys.stdout.write("\n")
1413
+
1414
+
1415
+ # ===================================================================
1416
+ # Subcommand: find-phase
1417
+ # ===================================================================
1418
+
1419
+
1420
+ def cmd_find_phase(args: argparse.Namespace) -> None:
1421
+ """Find phase directory and validate against roadmap.
1422
+
1423
+ Contract:
1424
+ Args: phase (str) — phase number (e.g., 5, 05, 2.1)
1425
+ Output: JSON — {phase, dir, name, exists_in_roadmap}
1426
+ Exit codes: 0 = success, 1 = not in git repo
1427
+ Side effects: read-only
1428
+ """
1429
+ phase_input = args.phase
1430
+ phase = normalize_phase(phase_input)
1431
+
1432
+ git_root = find_git_root()
1433
+ planning = git_root / ".planning"
1434
+
1435
+ result: dict[str, Any] = {
1436
+ "phase": phase,
1437
+ "dir": None,
1438
+ "name": None,
1439
+ "exists_in_roadmap": False,
1440
+ }
1441
+
1442
+ if planning.is_dir():
1443
+ phase_dir = find_phase_dir(planning, phase)
1444
+ if phase_dir:
1445
+ result["dir"] = str(phase_dir.relative_to(git_root))
1446
+ name = phase_dir.name.split("-", 1)
1447
+ result["name"] = name[1] if len(name) > 1 else phase_dir.name
1448
+
1449
+ # Check roadmap
1450
+ roadmap = planning / "ROADMAP.md"
1451
+ if roadmap.is_file():
1452
+ roadmap_text = roadmap.read_text(encoding="utf-8")
1453
+ # Match "Phase XX:" or "Phase XX " patterns
1454
+ if re.search(rf"Phase\s+{re.escape(phase)}[\s:]", roadmap_text):
1455
+ result["exists_in_roadmap"] = True
1456
+
1457
+ json.dump(result, sys.stdout, indent=2)
1458
+ sys.stdout.write("\n")
1459
+
1460
+
1461
+ # ===================================================================
1462
+ # Subcommand: list-artifacts
1463
+ # ===================================================================
1464
+
1465
+
1466
+ def cmd_list_artifacts(args: argparse.Namespace) -> None:
1467
+ """Count PLANs, SUMMARYs, and other artifacts per phase.
1468
+
1469
+ Contract:
1470
+ Args: phase (str) — phase number
1471
+ Output: JSON — {phase, plans, summaries, has_context, has_design, ...}
1472
+ Exit codes: 0 = success, 1 = .planning/ missing
1473
+ Side effects: read-only
1474
+ """
1475
+ phase = normalize_phase(args.phase)
1476
+ planning = find_planning_dir()
1477
+ phase_dir = find_phase_dir(planning, phase)
1478
+
1479
+ result: dict[str, Any] = {
1480
+ "phase": phase,
1481
+ "plans": 0,
1482
+ "summaries": 0,
1483
+ "has_context": False,
1484
+ "has_design": False,
1485
+ "has_research": False,
1486
+ "has_uat": False,
1487
+ "has_verification": False,
1488
+ "has_execution_order": False,
1489
+ }
1490
+
1491
+ if phase_dir and phase_dir.is_dir():
1492
+ result["plans"] = len(list(phase_dir.glob("*-PLAN.md")))
1493
+ result["summaries"] = len(list(phase_dir.glob("*-SUMMARY.md")))
1494
+ result["has_context"] = any(phase_dir.glob("*-CONTEXT.md"))
1495
+ result["has_design"] = any(phase_dir.glob("*-DESIGN.md"))
1496
+ result["has_research"] = any(phase_dir.glob("*-RESEARCH.md"))
1497
+ result["has_uat"] = any(phase_dir.glob("*-UAT.md"))
1498
+ result["has_verification"] = any(phase_dir.glob("*-VERIFICATION.md"))
1499
+ result["has_execution_order"] = (phase_dir / "EXECUTION-ORDER.md").is_file()
1500
+
1501
+ json.dump(result, sys.stdout, indent=2)
1502
+ sys.stdout.write("\n")
1503
+
1504
+
1505
+ # ===================================================================
1506
+ # Subcommand: check-artifact
1507
+ # ===================================================================
1508
+
1509
+
1510
+ def cmd_check_artifact(args: argparse.Namespace) -> None:
1511
+ """Check if a specific artifact exists for a phase.
1512
+
1513
+ Contract:
1514
+ Args: phase (str), type (str) — artifact type (CONTEXT, DESIGN, etc.)
1515
+ Output: JSON — {exists, path}
1516
+ Exit codes: 0 = success, 1 = .planning/ missing
1517
+ Side effects: read-only
1518
+ """
1519
+ phase = normalize_phase(args.phase)
1520
+ artifact_type = args.type.upper()
1521
+ planning = find_planning_dir()
1522
+ phase_dir = find_phase_dir(planning, phase)
1523
+
1524
+ result: dict[str, Any] = {
1525
+ "exists": False,
1526
+ "path": None,
1527
+ }
1528
+
1529
+ if phase_dir and phase_dir.is_dir():
1530
+ # Map artifact types to glob patterns
1531
+ patterns = {
1532
+ "CONTEXT": f"*-CONTEXT.md",
1533
+ "DESIGN": f"*-DESIGN.md",
1534
+ "RESEARCH": f"*-RESEARCH.md",
1535
+ "UAT": f"*-UAT.md",
1536
+ "VERIFICATION": f"*-VERIFICATION.md",
1537
+ "PLAN": f"*-PLAN.md",
1538
+ "SUMMARY": f"*-SUMMARY.md",
1539
+ "EXECUTION-ORDER": "EXECUTION-ORDER.md",
1540
+ }
1541
+
1542
+ pattern = patterns.get(artifact_type)
1543
+ if pattern:
1544
+ matches = list(phase_dir.glob(pattern))
1545
+ if matches:
1546
+ result["exists"] = True
1547
+ result["path"] = str(matches[0].relative_to(find_git_root()))
1548
+
1549
+ json.dump(result, sys.stdout, indent=2)
1550
+ sys.stdout.write("\n")
1551
+
1552
+
1553
+ # ===================================================================
1554
+ # Subcommand: scan-planning-context
1555
+ # ===================================================================
1556
+
1557
+
1558
+ def _has_readiness_section(path: Path) -> bool:
1559
+ """Check if file has a non-empty '## Next Phase Readiness' section."""
1560
+ try:
1561
+ text = path.read_text(encoding="utf-8", errors="replace")
1562
+ except OSError:
1563
+ return False
1564
+
1565
+ idx = text.find("## Next Phase Readiness")
1566
+ if idx == -1:
1567
+ return False
1568
+
1569
+ after = text[idx + len("## Next Phase Readiness"):]
1570
+ next_heading = re.search(r"\n## ", after)
1571
+ section = after[:next_heading.start()] if next_heading else after
1572
+ stripped = section.strip().strip("-").strip()
1573
+ return len(stripped) > 0
1574
+
1575
+
1576
+ def _extract_phase_number(phase_str: str) -> int | None:
1577
+ """Extract integer phase number from phase string like '05-auth' or '05'."""
1578
+ match = re.match(r"^(\d+)", str(phase_str))
1579
+ return int(match.group(1)) if match else None
1580
+
1581
+
1582
+ def _is_adjacent_phase(target_num: int, candidate_num: int) -> bool:
1583
+ """Check if candidate is within 2 phases before target (N-1, N-2)."""
1584
+ diff = target_num - candidate_num
1585
+ return 1 <= diff <= 2
1586
+
1587
+
1588
+ def _score_summary(
1589
+ fm: dict[str, Any],
1590
+ target_phase: str,
1591
+ target_num: int | None,
1592
+ subsystems: list[str],
1593
+ keywords: list[str],
1594
+ ) -> tuple[str, list[str]]:
1595
+ """Score a SUMMARY's relevance to the target phase."""
1596
+ reasons: list[str] = []
1597
+ is_high = False
1598
+ is_medium = False
1599
+
1600
+ # HIGH signals
1601
+ affects = fm.get("affects", []) or []
1602
+ if isinstance(affects, str):
1603
+ affects = [affects]
1604
+ for a in affects:
1605
+ if target_phase in str(a):
1606
+ reasons.append(f"affects contains '{target_phase}'")
1607
+ is_high = True
1608
+
1609
+ fm_subsystem = fm.get("subsystem", "")
1610
+ if fm_subsystem and fm_subsystem in subsystems:
1611
+ reasons.append(f"same subsystem '{fm_subsystem}'")
1612
+ is_high = True
1613
+
1614
+ requires = fm.get("requires", []) or []
1615
+ if isinstance(requires, list):
1616
+ for req in requires:
1617
+ if isinstance(req, dict):
1618
+ req_phase = str(req.get("phase", ""))
1619
+ else:
1620
+ req_phase = str(req)
1621
+ if target_phase in req_phase:
1622
+ reasons.append(f"requires references '{target_phase}'")
1623
+ is_high = True
1624
+
1625
+ # MEDIUM signals
1626
+ fm_tags = fm.get("tags", []) or []
1627
+ if isinstance(fm_tags, str):
1628
+ fm_tags = [fm_tags]
1629
+ fm_tags_lower = {str(t).lower() for t in fm_tags}
1630
+ keywords_lower = {k.lower() for k in keywords}
1631
+ overlap = fm_tags_lower & keywords_lower
1632
+ if overlap:
1633
+ reasons.append(f"overlapping tags: {sorted(overlap)}")
1634
+ is_medium = True
1635
+
1636
+ fm_phase = fm.get("phase", "")
1637
+ candidate_num = _extract_phase_number(str(fm_phase))
1638
+ if target_num is not None and candidate_num is not None:
1639
+ if _is_adjacent_phase(target_num, candidate_num):
1640
+ reasons.append(f"adjacent phase (N-{target_num - candidate_num})")
1641
+ is_medium = True
1642
+
1643
+ if is_high:
1644
+ return ("HIGH", reasons)
1645
+ if is_medium:
1646
+ return ("MEDIUM", reasons)
1647
+ return ("LOW", reasons)
1648
+
1649
+
1650
+ def _resolve_transitive_requires(
1651
+ summaries: list[dict[str, Any]],
1652
+ target_phase: str,
1653
+ ) -> set[str]:
1654
+ """Find all phases transitively required by the target phase."""
1655
+ required: set[str] = set()
1656
+ for s in summaries:
1657
+ fm = s.get("frontmatter", {})
1658
+ phase_name = str(fm.get("phase", ""))
1659
+ affects = fm.get("affects", []) or []
1660
+ if isinstance(affects, str):
1661
+ affects = [affects]
1662
+ if any(target_phase in str(a) for a in affects):
1663
+ required.add(phase_name)
1664
+ requires = fm.get("requires", []) or []
1665
+ if isinstance(requires, list):
1666
+ for req in requires:
1667
+ if isinstance(req, dict):
1668
+ req_phase = str(req.get("phase", ""))
1669
+ else:
1670
+ req_phase = str(req)
1671
+ if req_phase:
1672
+ required.add(req_phase)
1673
+ return required
1674
+
1675
+
1676
+ def _scan_summaries(
1677
+ planning: Path,
1678
+ target_phase: str,
1679
+ target_num: int | None,
1680
+ subsystems: list[str],
1681
+ keywords: list[str],
1682
+ parse_errors: list[dict[str, str]],
1683
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
1684
+ """Scan phase summary files and score relevance."""
1685
+ phases_dir = planning / "phases"
1686
+ source_info: dict[str, Any] = {"dir": str(phases_dir), "scanned": 0, "skipped": None}
1687
+
1688
+ if not phases_dir.is_dir():
1689
+ source_info["skipped"] = "directory not found"
1690
+ return [], source_info
1691
+
1692
+ summary_files = sorted(phases_dir.glob("*/*-SUMMARY.md"))
1693
+ if not summary_files:
1694
+ source_info["skipped"] = "no SUMMARY.md files found"
1695
+ return [], source_info
1696
+
1697
+ results: list[dict[str, Any]] = []
1698
+ for path in summary_files:
1699
+ source_info["scanned"] += 1
1700
+ fm = parse_frontmatter(path)
1701
+ if fm is None:
1702
+ parse_errors.append({"path": str(path), "error": "no valid frontmatter"})
1703
+ continue
1704
+
1705
+ relevance, match_reasons = _score_summary(fm, target_phase, target_num, subsystems, keywords)
1706
+ readiness = _has_readiness_section(path)
1707
+
1708
+ results.append({
1709
+ "path": str(path),
1710
+ "frontmatter": fm,
1711
+ "relevance": relevance,
1712
+ "match_reasons": match_reasons,
1713
+ "has_readiness_warnings": readiness,
1714
+ })
1715
+
1716
+ transitive = _resolve_transitive_requires(results, target_phase)
1717
+ for entry in results:
1718
+ fm = entry["frontmatter"]
1719
+ phase_name = str(fm.get("phase", ""))
1720
+ if phase_name in transitive and entry["relevance"] != "HIGH":
1721
+ entry["relevance"] = "HIGH"
1722
+ entry["match_reasons"].append("in transitive requires chain")
1723
+
1724
+ return results, source_info
1725
+
1726
+
1727
+ def _scan_debug_docs(
1728
+ planning: Path,
1729
+ parse_errors: list[dict[str, str]],
1730
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
1731
+ """Scan resolved debug documents for learnings."""
1732
+ resolved_dir = planning / "debug" / "resolved"
1733
+ source_info: dict[str, Any] = {"dir": str(resolved_dir), "scanned": 0, "skipped": None}
1734
+
1735
+ if not resolved_dir.is_dir():
1736
+ source_info["skipped"] = "directory not found"
1737
+ return [], source_info
1738
+
1739
+ results: list[dict[str, Any]] = []
1740
+ for path in sorted(resolved_dir.glob("*.md")):
1741
+ source_info["scanned"] += 1
1742
+ fm = parse_frontmatter(path)
1743
+ if fm is None:
1744
+ parse_errors.append({"path": str(path), "error": "no valid frontmatter"})
1745
+ continue
1746
+
1747
+ results.append({
1748
+ "path": str(path),
1749
+ "slug": path.stem,
1750
+ "subsystem": fm.get("subsystem", ""),
1751
+ "root_cause": fm.get("root_cause", ""),
1752
+ "resolution": fm.get("resolution", ""),
1753
+ "tags": fm.get("tags", []) or [],
1754
+ "phase": fm.get("phase", ""),
1755
+ })
1756
+
1757
+ return results, source_info
1758
+
1759
+
1760
+ def _scan_adhoc_summaries(
1761
+ planning: Path,
1762
+ parse_errors: list[dict[str, str]],
1763
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
1764
+ """Scan adhoc summary files for learnings."""
1765
+ adhoc_dir = planning / "adhoc"
1766
+ source_info: dict[str, Any] = {"dir": str(adhoc_dir), "scanned": 0, "skipped": None}
1767
+
1768
+ if not adhoc_dir.is_dir():
1769
+ source_info["skipped"] = "directory not found"
1770
+ return [], source_info
1771
+
1772
+ summary_files = sorted(adhoc_dir.glob("*-SUMMARY.md"))
1773
+ if not summary_files:
1774
+ source_info["skipped"] = "no adhoc SUMMARY.md files found"
1775
+ return [], source_info
1776
+
1777
+ results: list[dict[str, Any]] = []
1778
+ for path in summary_files:
1779
+ source_info["scanned"] += 1
1780
+ fm = parse_frontmatter(path)
1781
+ if fm is None:
1782
+ parse_errors.append({"path": str(path), "error": "no valid frontmatter"})
1783
+ continue
1784
+
1785
+ learnings = fm.get("learnings", []) or []
1786
+ if isinstance(learnings, str):
1787
+ learnings = [learnings]
1788
+
1789
+ results.append({
1790
+ "path": str(path),
1791
+ "subsystem": fm.get("subsystem", ""),
1792
+ "learnings": learnings,
1793
+ "related_phase": fm.get("related_phase", ""),
1794
+ "tags": fm.get("tags", []) or [],
1795
+ })
1796
+
1797
+ return results, source_info
1798
+
1799
+
1800
+ def _scan_todos(
1801
+ planning: Path,
1802
+ subdir: str,
1803
+ parse_errors: list[dict[str, str]],
1804
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
1805
+ """Scan todo files (done/ or pending/) for metadata."""
1806
+ todo_dir = planning / "todos" / subdir
1807
+ source_info: dict[str, Any] = {"dir": str(todo_dir), "scanned": 0, "skipped": None}
1808
+
1809
+ if not todo_dir.is_dir():
1810
+ source_info["skipped"] = "directory not found"
1811
+ return [], source_info
1812
+
1813
+ md_files = sorted(todo_dir.glob("*.md"))
1814
+ if not md_files:
1815
+ source_info["skipped"] = f"no .md files in {subdir}/"
1816
+ return [], source_info
1817
+
1818
+ results: list[dict[str, Any]] = []
1819
+ for path in md_files:
1820
+ source_info["scanned"] += 1
1821
+ fm = parse_frontmatter(path)
1822
+ if fm is None:
1823
+ parse_errors.append({"path": str(path), "error": "no valid frontmatter"})
1824
+ continue
1825
+
1826
+ results.append({
1827
+ "path": str(path),
1828
+ "title": fm.get("title", path.stem),
1829
+ "subsystem": fm.get("subsystem", ""),
1830
+ "priority": fm.get("priority", ""),
1831
+ "phase_origin": fm.get("phase_origin", ""),
1832
+ })
1833
+
1834
+ return results, source_info
1835
+
1836
+
1837
+ def _scan_knowledge_files(
1838
+ planning: Path,
1839
+ subsystems: list[str],
1840
+ ) -> tuple[list[dict[str, Any]], dict[str, Any]]:
1841
+ """List knowledge files and match by subsystem."""
1842
+ knowledge_dir = planning / "knowledge"
1843
+ source_info: dict[str, Any] = {"dir": str(knowledge_dir), "scanned": 0, "skipped": None}
1844
+
1845
+ if not knowledge_dir.is_dir():
1846
+ source_info["skipped"] = "directory not found"
1847
+ return [], source_info
1848
+
1849
+ md_files = sorted(knowledge_dir.glob("*.md"))
1850
+ if not md_files:
1851
+ source_info["skipped"] = "no .md files in knowledge/"
1852
+ return [], source_info
1853
+
1854
+ subsystems_lower = {s.lower() for s in subsystems}
1855
+ results: list[dict[str, Any]] = []
1856
+ for path in md_files:
1857
+ source_info["scanned"] += 1
1858
+ file_subsystem = path.stem.lower()
1859
+ matched = file_subsystem in subsystems_lower
1860
+
1861
+ results.append({
1862
+ "path": str(path),
1863
+ "subsystem": path.stem,
1864
+ "matched": matched,
1865
+ })
1866
+
1867
+ return results, source_info
1868
+
1869
+
1870
+ def _aggregate_from_summaries(summaries: list[dict[str, Any]]) -> dict[str, list[str]]:
1871
+ """Aggregate tech stack, patterns, key files, decisions from HIGH+MEDIUM summaries."""
1872
+ tech_added: list[str] = []
1873
+ patterns: list[str] = []
1874
+ key_files_created: list[str] = []
1875
+ key_files_modified: list[str] = []
1876
+ key_decisions: list[str] = []
1877
+
1878
+ for entry in summaries:
1879
+ if entry["relevance"] == "LOW":
1880
+ continue
1881
+ fm = entry["frontmatter"]
1882
+
1883
+ ts = fm.get("tech-stack", {}) or {}
1884
+ if isinstance(ts, dict):
1885
+ added = ts.get("added", []) or []
1886
+ if isinstance(added, str):
1887
+ added = [added]
1888
+ tech_added.extend(str(a) for a in added)
1889
+ pat = ts.get("patterns", []) or []
1890
+ if isinstance(pat, str):
1891
+ pat = [pat]
1892
+ patterns.extend(str(p) for p in pat)
1893
+
1894
+ pe = fm.get("patterns-established", []) or []
1895
+ if isinstance(pe, str):
1896
+ pe = [pe]
1897
+ patterns.extend(str(p) for p in pe)
1898
+
1899
+ kf = fm.get("key-files", {}) or {}
1900
+ if isinstance(kf, dict):
1901
+ created = kf.get("created", []) or []
1902
+ if isinstance(created, str):
1903
+ created = [created]
1904
+ key_files_created.extend(str(f) for f in created)
1905
+ modified = kf.get("modified", []) or []
1906
+ if isinstance(modified, str):
1907
+ modified = [modified]
1908
+ key_files_modified.extend(str(f) for f in modified)
1909
+
1910
+ kd = fm.get("key-decisions", []) or []
1911
+ if isinstance(kd, str):
1912
+ kd = [kd]
1913
+ key_decisions.extend(str(d) for d in kd)
1914
+
1915
+ return {
1916
+ "tech_stack_added": sorted(set(tech_added)),
1917
+ "patterns_established": sorted(set(patterns)),
1918
+ "key_files_created": sorted(set(key_files_created)),
1919
+ "key_files_modified": sorted(set(key_files_modified)),
1920
+ "key_decisions": list(dict.fromkeys(key_decisions)),
1921
+ }
1922
+
1923
+
1924
+ def _format_markdown(output: dict[str, Any]) -> str:
1925
+ """Format scanner output as readable markdown for LLM consumption."""
1926
+ sections: list[str] = []
1927
+ agg = output.get("aggregated", {})
1928
+
1929
+ patterns = agg.get("patterns_established", [])
1930
+ if patterns:
1931
+ lines = ["### Established Patterns"]
1932
+ lines.extend(f"- {p}" for p in patterns)
1933
+ sections.append("\n".join(lines))
1934
+
1935
+ tech = agg.get("tech_stack_added", [])
1936
+ if tech:
1937
+ sections.append(f"### Tech Stack\n{', '.join(tech)}")
1938
+
1939
+ decisions = agg.get("key_decisions", [])
1940
+ if decisions:
1941
+ lines = ["### Key Decisions"]
1942
+ lines.extend(f"- {d}" for d in decisions)
1943
+ sections.append("\n".join(lines))
1944
+
1945
+ created = agg.get("key_files_created", [])
1946
+ modified = agg.get("key_files_modified", [])
1947
+ if created or modified:
1948
+ lines = ["### Key Files"]
1949
+ if created:
1950
+ lines.append("**Created:**")
1951
+ lines.extend(f"- `{f}`" for f in created)
1952
+ if modified:
1953
+ lines.append("**Modified:**")
1954
+ lines.extend(f"- `{f}`" for f in modified)
1955
+ sections.append("\n".join(lines))
1956
+
1957
+ debug = output.get("debug_learnings", [])
1958
+ if debug:
1959
+ lines = ["### Debug Learnings"]
1960
+ for d in debug:
1961
+ slug = d.get("slug", "unknown")
1962
+ sub = d.get("subsystem", "")
1963
+ rc = d.get("root_cause", "")
1964
+ res = d.get("resolution", "")
1965
+ lines.append(f"- **{slug}** ({sub}): {rc} — Fix: {res}")
1966
+ sections.append("\n".join(lines))
1967
+
1968
+ adhoc_entries = [a for a in output.get("adhoc_learnings", []) if a.get("learnings")]
1969
+ if adhoc_entries:
1970
+ lines = ["### Adhoc Learnings"]
1971
+ for a in adhoc_entries:
1972
+ sub = a.get("subsystem", "")
1973
+ path = a.get("path", "")
1974
+ label = sub or Path(path).stem if path else "unknown"
1975
+ lines.append(f"- **{label}**")
1976
+ for learning in a["learnings"]:
1977
+ lines.append(f" - {learning}")
1978
+ sections.append("\n".join(lines))
1979
+
1980
+ summaries = output.get("summaries", [])
1981
+ needs_read = [s for s in summaries if s.get("relevance") == "HIGH" and s.get("has_readiness_warnings")]
1982
+ other_relevant = [s for s in summaries if s.get("relevance") in ("HIGH", "MEDIUM") and not s.get("has_readiness_warnings")]
1983
+
1984
+ if needs_read:
1985
+ lines = ["### Summaries Needing Full Read"]
1986
+ lines.extend(f"- `{s['path']}`" for s in needs_read)
1987
+ sections.append("\n".join(lines))
1988
+
1989
+ if other_relevant:
1990
+ lines = ["### Other Relevant Summaries"]
1991
+ lines.extend(f"- `{s['path']}` [{s.get('relevance', '')}]" for s in other_relevant)
1992
+ sections.append("\n".join(lines))
1993
+
1994
+ matched_knowledge = [k for k in output.get("knowledge_files", []) if k.get("matched")]
1995
+ if matched_knowledge:
1996
+ lines = ["### Knowledge Files to Read"]
1997
+ lines.extend(f"- `{k['path']}`" for k in matched_knowledge)
1998
+ sections.append("\n".join(lines))
1999
+
2000
+ todos = output.get("pending_todos", [])
2001
+ if todos:
2002
+ lines = ["### Pending Todos"]
2003
+ for t in todos:
2004
+ title = t.get("title", "untitled")
2005
+ priority = t.get("priority", "")
2006
+ sub = t.get("subsystem", "")
2007
+ path = t.get("path", "")
2008
+ lines.append(f"- **{title}** [{priority}] ({sub}) — `{path}`")
2009
+ sections.append("\n".join(lines))
2010
+
2011
+ sources = output.get("sources", {})
2012
+ parse_errors = sources.get("parse_errors", [])
2013
+ info_lines = ["### Scanner Info"]
2014
+ for name, src in sources.items():
2015
+ if name == "parse_errors" or not isinstance(src, dict):
2016
+ continue
2017
+ scanned = src.get("scanned", 0)
2018
+ skipped = src.get("skipped")
2019
+ if skipped:
2020
+ info_lines.append(f"- {name}: skipped ({skipped})")
2021
+ else:
2022
+ info_lines.append(f"- {name}: {scanned} scanned")
2023
+ if parse_errors:
2024
+ info_lines.append("**Parse errors:**")
2025
+ for err in parse_errors:
2026
+ info_lines.append(f"- `{err.get('path', '')}`: {err.get('error', '')}")
2027
+ sections.append("\n".join(info_lines))
2028
+
2029
+ return "\n\n".join(sections)
2030
+
2031
+
2032
+ def cmd_scan_planning_context(args: argparse.Namespace) -> None:
2033
+ """Scan .planning/ artifacts and score relevance for plan-phase context assembly.
2034
+
2035
+ Contract:
2036
+ Args: --phase (str, required), --phase-name (str), --subsystem (repeatable), --keywords (csv), --json (flag)
2037
+ Output: JSON (--json) or markdown — scored summaries, learnings, todos, knowledge, aggregated context
2038
+ Exit codes: 0 = success (empty result if no .planning/)
2039
+ Side effects: read-only
2040
+ """
2041
+ phase = normalize_phase(args.phase)
2042
+ phase_name = args.phase_name.strip() if args.phase_name else ""
2043
+ subsystems = [s for s in (args.subsystems or []) if s]
2044
+ keywords = [k.strip() for k in (args.keywords or "").split(",") if k.strip()]
2045
+
2046
+ if phase_name:
2047
+ name_words = [w for w in re.split(r"[-_\s]+", phase_name) if len(w) > 2]
2048
+ keywords.extend(name_words)
2049
+
2050
+ target_num = _extract_phase_number(phase)
2051
+
2052
+ planning = find_planning_dir_optional()
2053
+ if planning is None:
2054
+ if args.json:
2055
+ empty_src = {"dir": "", "scanned": 0, "skipped": ".planning/ not found"}
2056
+ output: dict[str, Any] = {
2057
+ "success": True,
2058
+ "target": {"phase": phase, "phase_name": phase_name, "subsystems": subsystems, "keywords": keywords},
2059
+ "sources": {
2060
+ "summaries": empty_src, "debug_docs": empty_src, "adhoc_summaries": empty_src,
2061
+ "completed_todos": empty_src, "pending_todos": empty_src, "knowledge_files": empty_src,
2062
+ "parse_errors": [],
2063
+ },
2064
+ "summaries": [], "debug_learnings": [], "adhoc_learnings": [],
2065
+ "completed_todos": [], "pending_todos": [], "knowledge_files": [],
2066
+ "aggregated": {
2067
+ "tech_stack_added": [], "patterns_established": [],
2068
+ "key_files_created": [], "key_files_modified": [], "key_decisions": [],
2069
+ },
2070
+ }
2071
+ json.dump(output, sys.stdout, indent=2, cls=_SafeEncoder)
2072
+ sys.stdout.write("\n")
2073
+ else:
2074
+ print("No .planning/ directory found. No prior context available.")
2075
+ return
2076
+
2077
+ parse_errors: list[dict[str, str]] = []
2078
+
2079
+ summaries, summaries_src = _scan_summaries(planning, phase, target_num, subsystems, keywords, parse_errors)
2080
+ debug_learnings, debug_src = _scan_debug_docs(planning, parse_errors)
2081
+ adhoc_learnings, adhoc_src = _scan_adhoc_summaries(planning, parse_errors)
2082
+ completed_todos, completed_src = _scan_todos(planning, "done", parse_errors)
2083
+ pending_todos, pending_src = _scan_todos(planning, "pending", parse_errors)
2084
+ knowledge_files, knowledge_src = _scan_knowledge_files(planning, subsystems)
2085
+
2086
+ aggregated = _aggregate_from_summaries(summaries)
2087
+
2088
+ output = {
2089
+ "success": True,
2090
+ "target": {"phase": phase, "phase_name": phase_name, "subsystems": subsystems, "keywords": keywords},
2091
+ "sources": {
2092
+ "summaries": summaries_src, "debug_docs": debug_src, "adhoc_summaries": adhoc_src,
2093
+ "completed_todos": completed_src, "pending_todos": pending_src,
2094
+ "knowledge_files": knowledge_src, "parse_errors": parse_errors,
2095
+ },
2096
+ "summaries": summaries,
2097
+ "debug_learnings": debug_learnings,
2098
+ "adhoc_learnings": adhoc_learnings,
2099
+ "completed_todos": completed_todos,
2100
+ "pending_todos": pending_todos,
2101
+ "knowledge_files": knowledge_files,
2102
+ "aggregated": aggregated,
2103
+ }
2104
+
2105
+ if args.json:
2106
+ json.dump(output, sys.stdout, indent=2, cls=_SafeEncoder)
2107
+ sys.stdout.write("\n")
2108
+ else:
2109
+ print(_format_markdown(output))
2110
+
2111
+
2112
+ # ===================================================================
2113
+ # UAT File Management
2114
+ # ===================================================================
2115
+
2116
+ _TEST_HEADER_RE = re.compile(r"^###\s+(\d+)\.\s+(.+)$")
2117
+ _BATCH_HEADER_RE = re.compile(r"^###\s+Batch\s+(\d+):\s+(.+)$")
2118
+ _SECTION_HEADER_RE = re.compile(r"^##\s+(.+)$")
2119
+ _KV_LINE_RE = re.compile(r"^(\w[\w_]*)\s*:\s*(.*)$")
2120
+ _LIST_ITEM_START_RE = re.compile(r"^-\s+(\w[\w_]*)\s*:\s*(.*)$")
2121
+ _LIST_ITEM_CONT_RE = re.compile(r"^\s+(\w[\w_]*)\s*:\s*(.*)$")
2122
+
2123
+ # Fields that get quoted in serialization per section
2124
+ _QUOTED_FIELDS = {
2125
+ "current_batch": {"name"},
2126
+ "fixes": {"description"},
2127
+ "assumptions": {"name", "expected", "reason"},
2128
+ "tests": {"reported"},
2129
+ }
2130
+
2131
+
2132
+ def _ensure_quoted(value: str) -> str:
2133
+ """Add surrounding quotes if not already quoted."""
2134
+ if value.startswith('"') and value.endswith('"'):
2135
+ return value
2136
+ return f'"{value}"'
2137
+
2138
+
2139
+ class UATFile:
2140
+ """Internal representation of UAT.md for programmatic manipulation."""
2141
+
2142
+ def __init__(self) -> None:
2143
+ self.frontmatter: dict[str, Any] = {}
2144
+ self.progress: dict[str, str] = {}
2145
+ self.current_batch: dict[str, str] = {}
2146
+ self.tests: list[dict[str, str]] = []
2147
+ self.fixes: list[dict[str, str]] = []
2148
+ self.batches: list[dict[str, str]] = []
2149
+ self.assumptions: list[dict[str, str]] = []
2150
+
2151
+ # --- Parsing ---
2152
+
2153
+ @classmethod
2154
+ def parse(cls, text: str) -> "UATFile":
2155
+ """Parse UAT.md text into structured representation."""
2156
+ uat = cls()
2157
+
2158
+ # Parse frontmatter
2159
+ fm_match = _FRONTMATTER_RE.match(text)
2160
+ if fm_match:
2161
+ try:
2162
+ uat.frontmatter = yaml.safe_load(fm_match.group(1)) or {}
2163
+ except yaml.YAMLError:
2164
+ uat.frontmatter = {}
2165
+ body = text[fm_match.end():]
2166
+ else:
2167
+ body = text
2168
+
2169
+ # Split into sections by ## headers
2170
+ sections = cls._split_sections(body)
2171
+
2172
+ for name, content in sections.items():
2173
+ if name == "Progress":
2174
+ uat.progress = cls._parse_kv_block(content)
2175
+ elif name == "Current Batch":
2176
+ uat.current_batch = cls._parse_kv_block(content)
2177
+ elif name == "Tests":
2178
+ uat.tests = cls._parse_tests(content)
2179
+ elif name == "Fixes Applied":
2180
+ uat.fixes = cls._parse_list_items(content)
2181
+ elif name == "Batches":
2182
+ uat.batches = cls._parse_batches(content)
2183
+ elif name == "Assumptions":
2184
+ uat.assumptions = cls._parse_list_items(content)
2185
+
2186
+ return uat
2187
+
2188
+ @staticmethod
2189
+ def _split_sections(body: str) -> dict[str, str]:
2190
+ """Split body text into {section_name: content} dict."""
2191
+ sections: dict[str, str] = {}
2192
+ current_name: str | None = None
2193
+ current_lines: list[str] = []
2194
+
2195
+ for line in body.splitlines():
2196
+ m = _SECTION_HEADER_RE.match(line)
2197
+ if m:
2198
+ if current_name is not None:
2199
+ sections[current_name] = "\n".join(current_lines)
2200
+ current_name = m.group(1).strip()
2201
+ current_lines = []
2202
+ else:
2203
+ current_lines.append(line)
2204
+
2205
+ if current_name is not None:
2206
+ sections[current_name] = "\n".join(current_lines)
2207
+
2208
+ return sections
2209
+
2210
+ @staticmethod
2211
+ def _parse_kv_block(text: str) -> dict[str, str]:
2212
+ """Parse a block of key: value lines."""
2213
+ result: dict[str, str] = {}
2214
+ for line in text.splitlines():
2215
+ m = _KV_LINE_RE.match(line.strip())
2216
+ if m:
2217
+ result[m.group(1)] = m.group(2).strip()
2218
+ return result
2219
+
2220
+ @staticmethod
2221
+ def _parse_tests(text: str) -> list[dict[str, str]]:
2222
+ """Parse ### N. Name sections into test dicts."""
2223
+ tests: list[dict[str, str]] = []
2224
+ current: dict[str, str] | None = None
2225
+
2226
+ for line in text.splitlines():
2227
+ m = _TEST_HEADER_RE.match(line)
2228
+ if m:
2229
+ if current is not None:
2230
+ tests.append(current)
2231
+ current = {"num": m.group(1), "name": m.group(2).strip()}
2232
+ elif current is not None:
2233
+ kv = _KV_LINE_RE.match(line.strip())
2234
+ if kv:
2235
+ current[kv.group(1)] = kv.group(2).strip()
2236
+
2237
+ if current is not None:
2238
+ tests.append(current)
2239
+
2240
+ return tests
2241
+
2242
+ @staticmethod
2243
+ def _parse_batches(text: str) -> list[dict[str, str]]:
2244
+ """Parse ### Batch N: Name sections into batch dicts."""
2245
+ batches: list[dict[str, str]] = []
2246
+ current: dict[str, str] | None = None
2247
+
2248
+ for line in text.splitlines():
2249
+ m = _BATCH_HEADER_RE.match(line)
2250
+ if m:
2251
+ if current is not None:
2252
+ batches.append(current)
2253
+ current = {"num": m.group(1), "name": m.group(2).strip()}
2254
+ elif current is not None:
2255
+ kv = _KV_LINE_RE.match(line.strip())
2256
+ if kv:
2257
+ current[kv.group(1)] = kv.group(2).strip()
2258
+
2259
+ if current is not None:
2260
+ batches.append(current)
2261
+
2262
+ return batches
2263
+
2264
+ @staticmethod
2265
+ def _parse_list_items(text: str) -> list[dict[str, str]]:
2266
+ """Parse - key: value list items (fixes, assumptions)."""
2267
+ items: list[dict[str, str]] = []
2268
+ current: dict[str, str] | None = None
2269
+
2270
+ for line in text.splitlines():
2271
+ m = _LIST_ITEM_START_RE.match(line)
2272
+ if m:
2273
+ if current is not None:
2274
+ items.append(current)
2275
+ current = {m.group(1): m.group(2).strip()}
2276
+ elif current is not None:
2277
+ m2 = _LIST_ITEM_CONT_RE.match(line)
2278
+ if m2:
2279
+ current[m2.group(1)] = m2.group(2).strip()
2280
+
2281
+ if current is not None:
2282
+ items.append(current)
2283
+
2284
+ return items
2285
+
2286
+ # --- Construction ---
2287
+
2288
+ @classmethod
2289
+ def from_init_json(cls, data: dict, phase_name: str) -> "UATFile":
2290
+ """Construct from structured JSON input."""
2291
+ uat = cls()
2292
+ now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
2293
+
2294
+ tests = data.get("tests", [])
2295
+ batches = data.get("batches", [])
2296
+ source = data.get("source", [])
2297
+
2298
+ uat.frontmatter = {
2299
+ "status": "testing",
2300
+ "phase": phase_name,
2301
+ "source": source,
2302
+ "started": now,
2303
+ "updated": now,
2304
+ "current_batch": 1,
2305
+ "mocked_files": [],
2306
+ "pre_work_stash": None,
2307
+ }
2308
+
2309
+ # Build tests
2310
+ for i, t in enumerate(tests, 1):
2311
+ uat.tests.append({
2312
+ "num": str(i),
2313
+ "name": t["name"],
2314
+ "expected": t["expected"],
2315
+ "mock_required": str(t.get("mock_required", False)).lower(),
2316
+ "mock_type": t.get("mock_type") or "null",
2317
+ "result": "[pending]",
2318
+ })
2319
+
2320
+ # Build batches
2321
+ for i, b in enumerate(batches, 1):
2322
+ test_nums = "[" + ", ".join(str(n) for n in b["tests"]) + "]"
2323
+ uat.batches.append({
2324
+ "num": str(i),
2325
+ "name": b["name"],
2326
+ "tests": test_nums,
2327
+ "status": "pending",
2328
+ "mock_type": b.get("mock_type") or "null",
2329
+ })
2330
+
2331
+ # Set first batch as current
2332
+ if batches:
2333
+ first = batches[0]
2334
+ test_nums = "[" + ", ".join(str(n) for n in first["tests"]) + "]"
2335
+ uat.current_batch = {
2336
+ "batch": f"1 of {len(batches)}",
2337
+ "name": _ensure_quoted(first["name"]),
2338
+ "mock_type": first.get("mock_type") or "null",
2339
+ "tests": test_nums,
2340
+ "status": "pending",
2341
+ }
2342
+
2343
+ uat.recalc_progress()
2344
+ return uat
2345
+
2346
+ # --- Mutations ---
2347
+
2348
+ def update_test(self, num: int, fields: dict[str, str]) -> None:
2349
+ """Update fields on test N."""
2350
+ for t in self.tests:
2351
+ if t["num"] == str(num):
2352
+ t.update(fields)
2353
+ return
2354
+ raise ValueError(f"Test {num} not found")
2355
+
2356
+ def update_batch(self, num: int, fields: dict[str, str]) -> None:
2357
+ """Update fields on batch N."""
2358
+ for b in self.batches:
2359
+ if b["num"] == str(num):
2360
+ b.update(fields)
2361
+ return
2362
+ raise ValueError(f"Batch {num} not found")
2363
+
2364
+ def update_session(self, fields: dict[str, str]) -> None:
2365
+ """Update frontmatter fields."""
2366
+ for k, v in fields.items():
2367
+ if v == "":
2368
+ # Empty string means clear/null
2369
+ if k == "mocked_files":
2370
+ self.frontmatter[k] = []
2371
+ else:
2372
+ self.frontmatter[k] = None
2373
+ elif k == "mocked_files":
2374
+ self.frontmatter[k] = [f.strip() for f in v.split(",") if f.strip()]
2375
+ elif k == "current_batch":
2376
+ try:
2377
+ batch_num = int(v)
2378
+ self.frontmatter[k] = batch_num
2379
+ self._sync_current_batch(batch_num)
2380
+ except ValueError:
2381
+ self.frontmatter[k] = v
2382
+ else:
2383
+ self.frontmatter[k] = v
2384
+
2385
+ def _sync_current_batch(self, batch_num: int) -> None:
2386
+ """Sync Current Batch section when frontmatter current_batch changes."""
2387
+ total = len(self.batches)
2388
+ for b in self.batches:
2389
+ if b["num"] == str(batch_num):
2390
+ name = b["name"]
2391
+ if not name.startswith('"'):
2392
+ name = _ensure_quoted(name)
2393
+ self.current_batch = {
2394
+ "batch": f"{batch_num} of {total}",
2395
+ "name": name,
2396
+ "mock_type": b.get("mock_type", "null"),
2397
+ "tests": b.get("tests", "[]"),
2398
+ "status": b.get("status", "pending"),
2399
+ }
2400
+ return
2401
+
2402
+ def append_fix(self, fix_dict: dict) -> None:
2403
+ """Append to fixes. Update in-place if same test already has a fix."""
2404
+ test_num = str(fix_dict.get("test", ""))
2405
+ converted: dict[str, str] = {}
2406
+ for k, v in fix_dict.items():
2407
+ if isinstance(v, list):
2408
+ converted[k] = "[" + ", ".join(str(x) for x in v) + "]"
2409
+ elif k == "description":
2410
+ converted[k] = _ensure_quoted(str(v))
2411
+ else:
2412
+ converted[k] = str(v)
2413
+
2414
+ for i, f in enumerate(self.fixes):
2415
+ if f.get("test") == test_num:
2416
+ self.fixes[i] = converted
2417
+ return
2418
+ self.fixes.append(converted)
2419
+
2420
+ def append_assumption(self, assumption_dict: dict) -> None:
2421
+ """Append to assumptions."""
2422
+ converted: dict[str, str] = {}
2423
+ for k, v in assumption_dict.items():
2424
+ s = str(v)
2425
+ if k in ("name", "expected", "reason"):
2426
+ s = _ensure_quoted(s)
2427
+ converted[k] = s
2428
+ self.assumptions.append(converted)
2429
+
2430
+ # --- Progress ---
2431
+
2432
+ def recalc_progress(self) -> None:
2433
+ """Derive all progress counters from test results."""
2434
+ total = len(self.tests)
2435
+ pending = 0
2436
+ passed = 0
2437
+ issues = 0
2438
+ fixing = 0
2439
+ skipped = 0
2440
+
2441
+ for t in self.tests:
2442
+ result = t.get("result", "[pending]")
2443
+ fix_status = t.get("fix_status", "")
2444
+
2445
+ if result in ("[pending]", "blocked"):
2446
+ pending += 1
2447
+ elif result == "pass":
2448
+ passed += 1
2449
+ elif result == "issue":
2450
+ if fix_status == "verified":
2451
+ passed += 1
2452
+ elif fix_status in ("investigating", "applied"):
2453
+ fixing += 1
2454
+ else:
2455
+ issues += 1
2456
+ elif result == "skipped":
2457
+ skipped += 1
2458
+
2459
+ tested = total - pending
2460
+ self.progress = {
2461
+ "total": str(total),
2462
+ "tested": str(tested),
2463
+ "passed": str(passed),
2464
+ "issues": str(issues),
2465
+ "fixing": str(fixing),
2466
+ "pending": str(pending),
2467
+ "skipped": str(skipped),
2468
+ }
2469
+
2470
+ def progress_summary(self) -> str:
2471
+ """One-line summary for stdout."""
2472
+ p = self.progress
2473
+ return (
2474
+ f"{p.get('tested', '0')}/{p.get('total', '0')} "
2475
+ f"({p.get('passed', '0')} pass, {p.get('issues', '0')} issue, "
2476
+ f"{p.get('fixing', '0')} fixing, {p.get('skipped', '0')} skip)"
2477
+ )
2478
+
2479
+ # --- Serialization ---
2480
+
2481
+ def serialize(self) -> str:
2482
+ """Rebuild full file from internal state."""
2483
+ self.recalc_progress()
2484
+ self.frontmatter["updated"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
2485
+
2486
+ lines: list[str] = []
2487
+
2488
+ # Frontmatter
2489
+ lines.append("---")
2490
+ fm_text = yaml.dump(
2491
+ self.frontmatter, default_flow_style=False, sort_keys=False,
2492
+ ).rstrip()
2493
+ lines.append(fm_text)
2494
+ lines.append("---")
2495
+ lines.append("")
2496
+
2497
+ # Progress
2498
+ lines.append("## Progress")
2499
+ lines.append("")
2500
+ for k in ("total", "tested", "passed", "issues", "fixing", "pending", "skipped"):
2501
+ if k in self.progress:
2502
+ lines.append(f"{k}: {self.progress[k]}")
2503
+ lines.append("")
2504
+
2505
+ # Current Batch
2506
+ lines.append("## Current Batch")
2507
+ lines.append("")
2508
+ for k in ("batch", "name", "mock_type", "tests", "status"):
2509
+ if k in self.current_batch:
2510
+ lines.append(f"{k}: {self.current_batch[k]}")
2511
+ lines.append("")
2512
+
2513
+ # Tests
2514
+ lines.append("## Tests")
2515
+ lines.append("")
2516
+ test_field_order = (
2517
+ "expected", "mock_required", "mock_type", "result",
2518
+ "reported", "severity", "fix_status", "fix_commit",
2519
+ "retry_count", "reason",
2520
+ )
2521
+ for t in self.tests:
2522
+ lines.append(f"### {t['num']}. {t['name']}")
2523
+ for k in test_field_order:
2524
+ if k in t:
2525
+ val = t[k]
2526
+ if k in _QUOTED_FIELDS.get("tests", set()):
2527
+ val = _ensure_quoted(val)
2528
+ lines.append(f"{k}: {val}")
2529
+ lines.append("")
2530
+
2531
+ # Fixes Applied
2532
+ lines.append("## Fixes Applied")
2533
+ lines.append("")
2534
+ fix_field_order = ("commit", "test", "description", "files")
2535
+ for fix in self.fixes:
2536
+ first = True
2537
+ for k in fix_field_order:
2538
+ if k in fix:
2539
+ val = fix[k]
2540
+ if k in _QUOTED_FIELDS.get("fixes", set()):
2541
+ val = _ensure_quoted(val)
2542
+ prefix = "- " if first else " "
2543
+ lines.append(f"{prefix}{k}: {val}")
2544
+ first = False
2545
+ lines.append("")
2546
+
2547
+ # Batches
2548
+ lines.append("## Batches")
2549
+ lines.append("")
2550
+ batch_field_order = ("tests", "status", "mock_type", "passed", "issues")
2551
+ for b in self.batches:
2552
+ lines.append(f"### Batch {b['num']}: {b['name']}")
2553
+ for k in batch_field_order:
2554
+ if k in b:
2555
+ lines.append(f"{k}: {b[k]}")
2556
+ lines.append("")
2557
+
2558
+ # Assumptions
2559
+ lines.append("## Assumptions")
2560
+ lines.append("")
2561
+ assumption_field_order = ("test", "name", "expected", "reason")
2562
+ for a in self.assumptions:
2563
+ first = True
2564
+ for k in assumption_field_order:
2565
+ if k in a:
2566
+ val = a[k]
2567
+ if k in _QUOTED_FIELDS.get("assumptions", set()):
2568
+ val = _ensure_quoted(val)
2569
+ prefix = "- " if first else " "
2570
+ lines.append(f"{prefix}{k}: {val}")
2571
+ first = False
2572
+ lines.append("")
2573
+
2574
+ return "\n".join(lines)
2575
+
2576
+
2577
+ # ===================================================================
2578
+ # Subcommand: uat-init
2579
+ # ===================================================================
2580
+
2581
+
2582
+ def cmd_uat_init(args: argparse.Namespace) -> None:
2583
+ """Create UAT.md from JSON stdin.
2584
+
2585
+ Contract:
2586
+ Args: phase (str) — phase number
2587
+ Input: JSON on stdin with source, tests, batches
2588
+ Output: text — confirmation with path and counts
2589
+ Exit codes: 0 = success, 1 = invalid JSON
2590
+ Side effects: creates UAT.md file
2591
+ """
2592
+ phase = normalize_phase(args.phase)
2593
+ planning = find_planning_dir()
2594
+
2595
+ try:
2596
+ data = json.loads(sys.stdin.read())
2597
+ except json.JSONDecodeError as e:
2598
+ print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
2599
+ sys.exit(1)
2600
+
2601
+ phase_dir = find_phase_dir(planning, phase)
2602
+ if phase_dir is None:
2603
+ phases_dir = planning / "phases"
2604
+ phases_dir.mkdir(parents=True, exist_ok=True)
2605
+ phase_dir = phases_dir / phase
2606
+ phase_dir.mkdir(parents=True, exist_ok=True)
2607
+
2608
+ phase_name = phase_dir.name
2609
+ uat = UATFile.from_init_json(data, phase_name)
2610
+
2611
+ out_path = phase_dir / f"{phase_name}-UAT.md"
2612
+ out_path.write_text(uat.serialize(), encoding="utf-8")
2613
+
2614
+ n_tests = len(uat.tests)
2615
+ n_batches = len(uat.batches)
2616
+ print(f"Created {out_path} with {n_tests} tests in {n_batches} batches")
2617
+
2618
+
2619
+ # ===================================================================
2620
+ # Subcommand: uat-update
2621
+ # ===================================================================
2622
+
2623
+
2624
+ def cmd_uat_update(args: argparse.Namespace) -> None:
2625
+ """Update UAT.md fields.
2626
+
2627
+ Contract:
2628
+ Args: phase (str), mutually exclusive target flag, key=value pairs or JSON stdin
2629
+ Output: text — update label + progress summary
2630
+ Exit codes: 0 = success, 1 = file not found or invalid input
2631
+ Side effects: writes UAT.md
2632
+ """
2633
+ phase = normalize_phase(args.phase)
2634
+ planning = find_planning_dir()
2635
+
2636
+ phase_dir = find_phase_dir(planning, phase)
2637
+ if phase_dir is None:
2638
+ print(f"Error: Phase directory not found for {phase}", file=sys.stderr)
2639
+ sys.exit(1)
2640
+
2641
+ uat_path = phase_dir / f"{phase_dir.name}-UAT.md"
2642
+ if not uat_path.is_file():
2643
+ print(f"Error: UAT file not found: {uat_path}", file=sys.stderr)
2644
+ sys.exit(1)
2645
+
2646
+ uat = UATFile.parse(uat_path.read_text(encoding="utf-8"))
2647
+
2648
+ # Parse key=value pairs from remaining args
2649
+ fields: dict[str, str] = {}
2650
+ for kv in (args.fields or []):
2651
+ if "=" in kv:
2652
+ k, v = kv.split("=", 1)
2653
+ fields[k] = v
2654
+
2655
+ label = ""
2656
+
2657
+ if args.test is not None:
2658
+ uat.update_test(args.test, fields)
2659
+ label = f"Updated test {args.test}: " + ", ".join(f"{k}={v}" for k, v in fields.items())
2660
+ elif args.batch is not None:
2661
+ uat.update_batch(args.batch, fields)
2662
+ label = f"Updated batch {args.batch}: " + ", ".join(f"{k}={v}" for k, v in fields.items())
2663
+ elif args.session:
2664
+ uat.update_session(fields)
2665
+ label = f"Updated session: " + ", ".join(f"{k}={v}" for k, v in fields.items())
2666
+ elif args.append_fix:
2667
+ try:
2668
+ fix_data = json.loads(sys.stdin.read())
2669
+ except json.JSONDecodeError as e:
2670
+ print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
2671
+ sys.exit(1)
2672
+ uat.append_fix(fix_data)
2673
+ label = f"Appended fix for test {fix_data.get('test', '?')}"
2674
+ elif args.append_assumption:
2675
+ try:
2676
+ assumption_data = json.loads(sys.stdin.read())
2677
+ except json.JSONDecodeError as e:
2678
+ print(f"Error: Invalid JSON input: {e}", file=sys.stderr)
2679
+ sys.exit(1)
2680
+ uat.append_assumption(assumption_data)
2681
+ label = f"Appended assumption for test {assumption_data.get('test', '?')}"
2682
+
2683
+ uat_path.write_text(uat.serialize(), encoding="utf-8")
2684
+ print(f"{label} | Progress: {uat.progress_summary()}")
2685
+
2686
+
2687
+ # ===================================================================
2688
+ # Subcommand: uat-status
2689
+ # ===================================================================
2690
+
2691
+
2692
+ def cmd_uat_status(args: argparse.Namespace) -> None:
2693
+ """Output UAT status as JSON.
2694
+
2695
+ Contract:
2696
+ Args: phase (str) — phase number
2697
+ Output: JSON — compact status for LLM resume
2698
+ Exit codes: 0 = success, 1 = file not found
2699
+ Side effects: read-only
2700
+ """
2701
+ phase = normalize_phase(args.phase)
2702
+ planning = find_planning_dir()
2703
+
2704
+ phase_dir = find_phase_dir(planning, phase)
2705
+ if phase_dir is None:
2706
+ print(f"Error: Phase directory not found for {phase}", file=sys.stderr)
2707
+ sys.exit(1)
2708
+
2709
+ uat_path = phase_dir / f"{phase_dir.name}-UAT.md"
2710
+ if not uat_path.is_file():
2711
+ print(f"Error: UAT file not found: {uat_path}", file=sys.stderr)
2712
+ sys.exit(1)
2713
+
2714
+ uat = UATFile.parse(uat_path.read_text(encoding="utf-8"))
2715
+ uat.recalc_progress()
2716
+
2717
+ fixing_tests = []
2718
+ pending_tests = []
2719
+ blocked_tests = []
2720
+
2721
+ for t in uat.tests:
2722
+ num = int(t["num"])
2723
+ result = t.get("result", "[pending]")
2724
+ fix_status = t.get("fix_status", "")
2725
+
2726
+ if fix_status in ("investigating", "applied"):
2727
+ fixing_tests.append({
2728
+ "num": num,
2729
+ "name": t["name"],
2730
+ "fix_status": fix_status,
2731
+ "fix_commit": t.get("fix_commit", ""),
2732
+ "retry_count": int(t.get("retry_count", "0")),
2733
+ })
2734
+ if result == "[pending]":
2735
+ pending_tests.append(num)
2736
+ elif result == "blocked":
2737
+ blocked_tests.append(num)
2738
+
2739
+ output = {
2740
+ "status": uat.frontmatter.get("status", ""),
2741
+ "current_batch": uat.frontmatter.get("current_batch"),
2742
+ "total_batches": len(uat.batches),
2743
+ "progress": {k: int(v) for k, v in uat.progress.items()},
2744
+ "mocked_files": uat.frontmatter.get("mocked_files", []),
2745
+ "fixing_tests": fixing_tests,
2746
+ "pending_tests": pending_tests,
2747
+ "blocked_tests": blocked_tests,
2748
+ "pre_work_stash": uat.frontmatter.get("pre_work_stash"),
2749
+ "path": str(uat_path),
2750
+ }
2751
+
2752
+ json.dump(output, sys.stdout, cls=_SafeEncoder)
2753
+ sys.stdout.write("\n")
2754
+
2755
+
2756
+ # ===================================================================
2757
+ # Argument parser setup
2758
+ # ===================================================================
2759
+
2760
+
2761
+ def build_parser() -> argparse.ArgumentParser:
2762
+ parser = argparse.ArgumentParser(
2763
+ prog="ms-tools",
2764
+ description="Mindsystem CLI tools — unified subcommands for mechanical operations.",
2765
+ )
2766
+ subparsers = parser.add_subparsers(dest="command", required=True)
2767
+
2768
+ # --- update-state ---
2769
+ p = subparsers.add_parser("update-state", help="Update STATE.md plan progress")
2770
+ p.add_argument("completed", type=int, help="Number of completed plans")
2771
+ p.add_argument("total", type=int, help="Total number of plans")
2772
+ p.set_defaults(func=cmd_update_state)
2773
+
2774
+ # --- set-last-command ---
2775
+ p = subparsers.add_parser("set-last-command", help="Update STATE.md Last Command with timestamp")
2776
+ p.add_argument("command_string", help='Command that was run (e.g. "ms:plan-phase 10")')
2777
+ p.set_defaults(func=cmd_set_last_command)
2778
+
2779
+ # --- validate-execution-order ---
2780
+ p = subparsers.add_parser("validate-execution-order", help="Validate EXECUTION-ORDER.md against plan files")
2781
+ p.add_argument("phase_dir", help="Phase directory path")
2782
+ p.set_defaults(func=cmd_validate_execution_order)
2783
+
2784
+ # --- doctor-scan ---
2785
+ p = subparsers.add_parser("doctor-scan", help="Diagnostic scan of .planning/ tree")
2786
+ p.set_defaults(func=cmd_doctor_scan)
2787
+
2788
+ # --- gather-milestone-stats ---
2789
+ p = subparsers.add_parser("gather-milestone-stats", help="Gather milestone readiness and git statistics")
2790
+ p.add_argument("start_phase", type=int, help="Start phase number")
2791
+ p.add_argument("end_phase", type=int, help="End phase number")
2792
+ p.set_defaults(func=cmd_gather_milestone_stats)
2793
+
2794
+ # --- generate-phase-patch ---
2795
+ p = subparsers.add_parser("generate-phase-patch", help="Generate patch from phase commits")
2796
+ p.add_argument("phase", help="Phase number (e.g., 04 or 4)")
2797
+ p.add_argument("--suffix", default="", help="Filter commits and customize output filename")
2798
+ p.set_defaults(func=cmd_generate_phase_patch)
2799
+
2800
+ # --- generate-adhoc-patch ---
2801
+ p = subparsers.add_parser("generate-adhoc-patch", help="Generate patch from an adhoc commit")
2802
+ p.add_argument("commit", help="Commit hash")
2803
+ p.add_argument("output", help="Output path for the patch file")
2804
+ p.set_defaults(func=cmd_generate_adhoc_patch)
2805
+
2806
+ # --- archive-milestone-phases ---
2807
+ p = subparsers.add_parser("archive-milestone-phases", help="Archive phase dirs to milestone directory")
2808
+ p.add_argument("start_phase", type=int, help="Start phase number")
2809
+ p.add_argument("end_phase", type=int, help="End phase number")
2810
+ p.add_argument("milestone", help="Milestone slug (e.g., mvp, push-notifications)")
2811
+ p.set_defaults(func=cmd_archive_milestone_phases)
2812
+
2813
+ # --- archive-milestone-files ---
2814
+ p = subparsers.add_parser("archive-milestone-files", help="Archive optional milestone files")
2815
+ p.add_argument("milestone", help="Milestone slug (e.g., mvp, push-notifications)")
2816
+ p.set_defaults(func=cmd_archive_milestone_files)
2817
+
2818
+ # --- scan-artifact-subsystems ---
2819
+ p = subparsers.add_parser("scan-artifact-subsystems", help="Scan artifacts for subsystem values")
2820
+ p.add_argument("--values-only", action="store_true", help="Print only subsystem values")
2821
+ p.set_defaults(func=cmd_scan_artifact_subsystems)
2822
+
2823
+ # --- scan-milestone-naming ---
2824
+ p = subparsers.add_parser("scan-milestone-naming", help="Scan for version-based milestone naming needing migration")
2825
+ p.set_defaults(func=cmd_scan_milestone_naming)
2826
+
2827
+ # --- scan-planning-context ---
2828
+ p = subparsers.add_parser("scan-planning-context", help="Scan .planning/ and score relevance for plan-phase")
2829
+ p.add_argument("--phase", required=True, help='Phase number (e.g., "05" or "5" or "2.1")')
2830
+ p.add_argument("--phase-name", default="", help="Phase name for keyword matching")
2831
+ p.add_argument("--subsystem", action="append", default=[], dest="subsystems", help="Subsystem(s) for matching (repeatable)")
2832
+ p.add_argument("--keywords", default="", help="Comma-separated keywords for tag matching")
2833
+ p.add_argument("--json", action="store_true", help="Output raw JSON (default: formatted markdown)")
2834
+ p.set_defaults(func=cmd_scan_planning_context)
2835
+
2836
+ # --- find-phase ---
2837
+ p = subparsers.add_parser("find-phase", help="Find phase directory and validate against roadmap")
2838
+ p.add_argument("phase", help="Phase number (e.g., 5, 05, 2.1)")
2839
+ p.set_defaults(func=cmd_find_phase)
2840
+
2841
+ # --- list-artifacts ---
2842
+ p = subparsers.add_parser("list-artifacts", help="Count artifacts per phase")
2843
+ p.add_argument("phase", help="Phase number")
2844
+ p.set_defaults(func=cmd_list_artifacts)
2845
+
2846
+ # --- check-artifact ---
2847
+ p = subparsers.add_parser("check-artifact", help="Check if specific artifact exists")
2848
+ p.add_argument("phase", help="Phase number")
2849
+ p.add_argument("type", help="Artifact type (CONTEXT, DESIGN, RESEARCH, UAT, VERIFICATION, PLAN, SUMMARY, EXECUTION-ORDER)")
2850
+ p.set_defaults(func=cmd_check_artifact)
2851
+
2852
+ # --- uat-init ---
2853
+ p = subparsers.add_parser("uat-init", help="Create UAT.md from JSON stdin")
2854
+ p.add_argument("phase", help="Phase number (e.g., 5, 05, 2.1)")
2855
+ p.set_defaults(func=cmd_uat_init)
2856
+
2857
+ # --- uat-update ---
2858
+ p = subparsers.add_parser("uat-update", help="Update UAT.md fields")
2859
+ p.add_argument("phase", help="Phase number")
2860
+ group = p.add_mutually_exclusive_group(required=True)
2861
+ group.add_argument("--test", type=int, help="Test number to update")
2862
+ group.add_argument("--batch", type=int, help="Batch number to update")
2863
+ group.add_argument("--session", action="store_true", help="Update session/frontmatter fields")
2864
+ group.add_argument("--append-fix", action="store_true", help="Append fix (JSON from stdin)")
2865
+ group.add_argument("--append-assumption", action="store_true", help="Append assumption (JSON from stdin)")
2866
+ p.add_argument("fields", nargs="*", help="key=value pairs")
2867
+ p.set_defaults(func=cmd_uat_update)
2868
+
2869
+ # --- uat-status ---
2870
+ p = subparsers.add_parser("uat-status", help="Output UAT status as JSON")
2871
+ p.add_argument("phase", help="Phase number")
2872
+ p.set_defaults(func=cmd_uat_status)
2873
+
2874
+ return parser
2875
+
2876
+
2877
+ def main() -> None:
2878
+ parser = build_parser()
2879
+ args = parser.parse_args()
2880
+ args.func(args)
2881
+
2882
+
2883
+ if __name__ == "__main__":
2884
+ main()