mindsystem-cc 3.14.0 → 3.16.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/agents/ms-codebase-researcher.md +105 -0
- package/agents/ms-consolidator.md +137 -286
- package/agents/ms-debugger.md +1 -0
- package/agents/ms-designer.md +1 -0
- package/agents/ms-executor.md +2 -1
- package/agents/ms-flutter-code-quality.md +1 -1
- package/agents/ms-flutter-reviewer.md +1 -0
- package/agents/ms-flutter-simplifier.md +1 -1
- package/agents/ms-integration-checker.md +1 -0
- package/agents/ms-plan-checker.md +17 -327
- package/agents/ms-researcher.md +25 -343
- package/agents/ms-roadmapper.md +10 -75
- package/agents/ms-verifier.md +33 -309
- package/agents/ms-verify-fixer.md +1 -0
- package/commands/ms/check-phase.md +24 -55
- package/commands/ms/complete-milestone.md +6 -25
- package/commands/ms/create-roadmap.md +3 -15
- package/commands/ms/design-phase.md +52 -15
- package/commands/ms/discuss-phase.md +7 -9
- package/commands/ms/doctor.md +224 -0
- package/commands/ms/execute-phase.md +22 -12
- package/commands/ms/help.md +11 -0
- package/commands/ms/new-milestone.md +3 -3
- package/commands/ms/plan-phase.md +1 -1
- package/commands/ms/research-phase.md +249 -85
- package/commands/ms/verify-work.md +1 -0
- package/mindsystem/references/plan-risk-assessment.md +1 -2
- package/mindsystem/templates/context.md +1 -11
- package/mindsystem/templates/discovery.md +2 -3
- package/mindsystem/templates/knowledge.md +99 -0
- package/mindsystem/templates/requirements.md +3 -61
- package/mindsystem/templates/research-comparison-output.md +50 -0
- package/mindsystem/templates/research-feasibility-output.md +43 -0
- package/mindsystem/templates/research-project-output.md +81 -0
- package/mindsystem/templates/research-subagent-prompt.md +164 -48
- package/mindsystem/templates/roadmap-milestone.md +67 -0
- package/mindsystem/templates/roadmap.md +2 -66
- package/mindsystem/workflows/complete-milestone.md +23 -140
- package/mindsystem/workflows/define-requirements.md +4 -8
- package/mindsystem/workflows/discuss-phase.md +25 -8
- package/mindsystem/workflows/execute-phase.md +34 -0
- package/mindsystem/workflows/execute-plan.md +8 -0
- package/mindsystem/workflows/plan-phase.md +40 -104
- package/mindsystem/workflows/verify-work.md +20 -0
- package/package.json +1 -1
- package/scripts/cleanup-phase-artifacts.sh +68 -0
- package/scripts/scan-artifact-subsystems.sh +55 -0
- package/scripts/scan-planning-context.py +689 -0
- package/mindsystem/templates/decisions.md +0 -145
- package/mindsystem/templates/learnings.md +0 -150
|
@@ -0,0 +1,689 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# /// script
|
|
3
|
+
# requires-python = ">=3.10"
|
|
4
|
+
# dependencies = ["pyyaml"]
|
|
5
|
+
# ///
|
|
6
|
+
"""Scan .planning/ artifacts and score relevance for plan-phase context assembly.
|
|
7
|
+
|
|
8
|
+
Deterministic collection and scoring of planning artifacts so the LLM
|
|
9
|
+
receives structured JSON and focuses on interpretation and judgment.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import argparse
|
|
13
|
+
import datetime
|
|
14
|
+
import json
|
|
15
|
+
import re
|
|
16
|
+
import subprocess
|
|
17
|
+
import sys
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
import yaml
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class _SafeEncoder(json.JSONEncoder):
|
|
25
|
+
"""Handle YAML types that json.dump can't serialize (date, datetime)."""
|
|
26
|
+
|
|
27
|
+
def default(self, o: object) -> Any:
|
|
28
|
+
if isinstance(o, (datetime.date, datetime.datetime)):
|
|
29
|
+
return o.isoformat()
|
|
30
|
+
return super().default(o)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# ---------------------------------------------------------------------------
|
|
34
|
+
# Git root / .planning discovery
|
|
35
|
+
# ---------------------------------------------------------------------------
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def find_planning_dir() -> Path | None:
|
|
39
|
+
"""Find .planning/ from git root."""
|
|
40
|
+
try:
|
|
41
|
+
result = subprocess.run(
|
|
42
|
+
["git", "rev-parse", "--show-toplevel"],
|
|
43
|
+
capture_output=True,
|
|
44
|
+
text=True,
|
|
45
|
+
check=True,
|
|
46
|
+
)
|
|
47
|
+
git_root = Path(result.stdout.strip())
|
|
48
|
+
planning = git_root / ".planning"
|
|
49
|
+
return planning if planning.is_dir() else None
|
|
50
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
# ---------------------------------------------------------------------------
|
|
55
|
+
# YAML frontmatter parsing
|
|
56
|
+
# ---------------------------------------------------------------------------
|
|
57
|
+
|
|
58
|
+
_FRONTMATTER_RE = re.compile(r"\A---\s*\n(.*?\n)---\s*\n", re.DOTALL)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def parse_frontmatter(path: Path) -> dict[str, Any] | None:
|
|
62
|
+
"""Extract YAML frontmatter from a markdown file.
|
|
63
|
+
|
|
64
|
+
Returns parsed dict or None if no frontmatter found.
|
|
65
|
+
"""
|
|
66
|
+
try:
|
|
67
|
+
text = path.read_text(encoding="utf-8", errors="replace")
|
|
68
|
+
except OSError:
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
match = _FRONTMATTER_RE.match(text)
|
|
72
|
+
if not match:
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
return yaml.safe_load(match.group(1)) or {}
|
|
77
|
+
except yaml.YAMLError:
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def has_readiness_section(path: Path) -> bool:
|
|
82
|
+
"""Check if file has a non-empty '## Next Phase Readiness' section."""
|
|
83
|
+
try:
|
|
84
|
+
text = path.read_text(encoding="utf-8", errors="replace")
|
|
85
|
+
except OSError:
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
idx = text.find("## Next Phase Readiness")
|
|
89
|
+
if idx == -1:
|
|
90
|
+
return False
|
|
91
|
+
|
|
92
|
+
# Grab content until next ## heading or end of file
|
|
93
|
+
after = text[idx + len("## Next Phase Readiness") :]
|
|
94
|
+
next_heading = re.search(r"\n## ", after)
|
|
95
|
+
section = after[: next_heading.start()] if next_heading else after
|
|
96
|
+
# Non-empty = has more than whitespace / dashes
|
|
97
|
+
stripped = section.strip().strip("-").strip()
|
|
98
|
+
return len(stripped) > 0
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# ---------------------------------------------------------------------------
|
|
102
|
+
# Phase number helpers
|
|
103
|
+
# ---------------------------------------------------------------------------
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def normalize_phase(phase_str: str) -> str:
|
|
107
|
+
"""Normalize phase input: '5' -> '05', '05' -> '05', '2.1' -> '02.1'."""
|
|
108
|
+
match = re.match(r"^(\d+)(?:\.(\d+))?$", phase_str)
|
|
109
|
+
if not match:
|
|
110
|
+
return phase_str
|
|
111
|
+
integer = int(match.group(1))
|
|
112
|
+
decimal = match.group(2)
|
|
113
|
+
if decimal:
|
|
114
|
+
return f"{integer:02d}.{decimal}"
|
|
115
|
+
return f"{integer:02d}"
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def extract_phase_number(phase_str: str) -> int | None:
|
|
119
|
+
"""Extract integer phase number from phase string like '05-auth' or '05'."""
|
|
120
|
+
match = re.match(r"^(\d+)", str(phase_str))
|
|
121
|
+
return int(match.group(1)) if match else None
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def is_adjacent_phase(target_num: int, candidate_num: int) -> bool:
|
|
125
|
+
"""Check if candidate is within 2 phases before target (N-1, N-2)."""
|
|
126
|
+
diff = target_num - candidate_num
|
|
127
|
+
return 1 <= diff <= 2
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
# ---------------------------------------------------------------------------
|
|
131
|
+
# Relevance scoring
|
|
132
|
+
# ---------------------------------------------------------------------------
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def score_summary(
|
|
136
|
+
fm: dict[str, Any],
|
|
137
|
+
target_phase: str,
|
|
138
|
+
target_num: int | None,
|
|
139
|
+
subsystems: list[str],
|
|
140
|
+
keywords: list[str],
|
|
141
|
+
) -> tuple[str, list[str]]:
|
|
142
|
+
"""Score a SUMMARY's relevance to the target phase.
|
|
143
|
+
|
|
144
|
+
Returns (relevance, match_reasons) where relevance is HIGH/MEDIUM/LOW.
|
|
145
|
+
"""
|
|
146
|
+
reasons: list[str] = []
|
|
147
|
+
is_high = False
|
|
148
|
+
is_medium = False
|
|
149
|
+
|
|
150
|
+
# --- HIGH signals ---
|
|
151
|
+
|
|
152
|
+
# Target phase appears in affects list
|
|
153
|
+
affects = fm.get("affects", []) or []
|
|
154
|
+
if isinstance(affects, str):
|
|
155
|
+
affects = [affects]
|
|
156
|
+
for a in affects:
|
|
157
|
+
if target_phase in str(a):
|
|
158
|
+
reasons.append(f"affects contains '{target_phase}'")
|
|
159
|
+
is_high = True
|
|
160
|
+
|
|
161
|
+
# Same subsystem
|
|
162
|
+
fm_subsystem = fm.get("subsystem", "")
|
|
163
|
+
if fm_subsystem and fm_subsystem in subsystems:
|
|
164
|
+
reasons.append(f"same subsystem '{fm_subsystem}'")
|
|
165
|
+
is_high = True
|
|
166
|
+
|
|
167
|
+
# In requires chain (direct — transitive computed at caller level)
|
|
168
|
+
requires = fm.get("requires", []) or []
|
|
169
|
+
if isinstance(requires, list):
|
|
170
|
+
for req in requires:
|
|
171
|
+
if isinstance(req, dict):
|
|
172
|
+
req_phase = str(req.get("phase", ""))
|
|
173
|
+
else:
|
|
174
|
+
req_phase = str(req)
|
|
175
|
+
if target_phase in req_phase:
|
|
176
|
+
reasons.append(f"requires references '{target_phase}'")
|
|
177
|
+
is_high = True
|
|
178
|
+
|
|
179
|
+
# --- MEDIUM signals ---
|
|
180
|
+
|
|
181
|
+
# Overlapping tags with keywords
|
|
182
|
+
fm_tags = fm.get("tags", []) or []
|
|
183
|
+
if isinstance(fm_tags, str):
|
|
184
|
+
fm_tags = [fm_tags]
|
|
185
|
+
fm_tags_lower = {str(t).lower() for t in fm_tags}
|
|
186
|
+
keywords_lower = {k.lower() for k in keywords}
|
|
187
|
+
overlap = fm_tags_lower & keywords_lower
|
|
188
|
+
if overlap:
|
|
189
|
+
reasons.append(f"overlapping tags: {sorted(overlap)}")
|
|
190
|
+
is_medium = True
|
|
191
|
+
|
|
192
|
+
# Adjacent phase (N-1, N-2)
|
|
193
|
+
fm_phase = fm.get("phase", "")
|
|
194
|
+
candidate_num = extract_phase_number(str(fm_phase))
|
|
195
|
+
if target_num is not None and candidate_num is not None:
|
|
196
|
+
if is_adjacent_phase(target_num, candidate_num):
|
|
197
|
+
reasons.append(f"adjacent phase (N-{target_num - candidate_num})")
|
|
198
|
+
is_medium = True
|
|
199
|
+
|
|
200
|
+
if is_high:
|
|
201
|
+
return ("HIGH", reasons)
|
|
202
|
+
if is_medium:
|
|
203
|
+
return ("MEDIUM", reasons)
|
|
204
|
+
return ("LOW", reasons)
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
# ---------------------------------------------------------------------------
|
|
208
|
+
# Transitive requires resolution
|
|
209
|
+
# ---------------------------------------------------------------------------
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def resolve_transitive_requires(
|
|
213
|
+
summaries: list[dict[str, Any]],
|
|
214
|
+
target_phase: str,
|
|
215
|
+
) -> set[str]:
|
|
216
|
+
"""Find all phases transitively required by the target phase.
|
|
217
|
+
|
|
218
|
+
Build a reverse lookup: which phases provide things the target needs.
|
|
219
|
+
"""
|
|
220
|
+
# Build provides index: phase_name -> list of provides
|
|
221
|
+
provides_index: dict[str, list[str]] = {}
|
|
222
|
+
for s in summaries:
|
|
223
|
+
fm = s.get("frontmatter", {})
|
|
224
|
+
phase_name = str(fm.get("phase", ""))
|
|
225
|
+
provides = fm.get("provides", []) or []
|
|
226
|
+
if isinstance(provides, str):
|
|
227
|
+
provides = [provides]
|
|
228
|
+
provides_index[phase_name] = [str(p) for p in provides]
|
|
229
|
+
|
|
230
|
+
# Find which summaries have target in their affects
|
|
231
|
+
required: set[str] = set()
|
|
232
|
+
for s in summaries:
|
|
233
|
+
fm = s.get("frontmatter", {})
|
|
234
|
+
phase_name = str(fm.get("phase", ""))
|
|
235
|
+
affects = fm.get("affects", []) or []
|
|
236
|
+
if isinstance(affects, str):
|
|
237
|
+
affects = [affects]
|
|
238
|
+
if any(target_phase in str(a) for a in affects):
|
|
239
|
+
required.add(phase_name)
|
|
240
|
+
# Also add anything this phase requires (one hop)
|
|
241
|
+
requires = fm.get("requires", []) or []
|
|
242
|
+
if isinstance(requires, list):
|
|
243
|
+
for req in requires:
|
|
244
|
+
if isinstance(req, dict):
|
|
245
|
+
req_phase = str(req.get("phase", ""))
|
|
246
|
+
else:
|
|
247
|
+
req_phase = str(req)
|
|
248
|
+
if req_phase:
|
|
249
|
+
required.add(req_phase)
|
|
250
|
+
|
|
251
|
+
return required
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
# ---------------------------------------------------------------------------
|
|
255
|
+
# Directory scanners
|
|
256
|
+
# ---------------------------------------------------------------------------
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def scan_summaries(
|
|
260
|
+
planning: Path,
|
|
261
|
+
target_phase: str,
|
|
262
|
+
target_num: int | None,
|
|
263
|
+
subsystems: list[str],
|
|
264
|
+
keywords: list[str],
|
|
265
|
+
parse_errors: list[dict[str, str]],
|
|
266
|
+
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
|
|
267
|
+
"""Scan phase summary files and score relevance."""
|
|
268
|
+
phases_dir = planning / "phases"
|
|
269
|
+
source_info: dict[str, Any] = {
|
|
270
|
+
"dir": str(phases_dir),
|
|
271
|
+
"scanned": 0,
|
|
272
|
+
"skipped": None,
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
if not phases_dir.is_dir():
|
|
276
|
+
source_info["skipped"] = "directory not found"
|
|
277
|
+
return [], source_info
|
|
278
|
+
|
|
279
|
+
summary_files = sorted(phases_dir.glob("*/*-SUMMARY.md"))
|
|
280
|
+
if not summary_files:
|
|
281
|
+
source_info["skipped"] = "no SUMMARY.md files found"
|
|
282
|
+
return [], source_info
|
|
283
|
+
|
|
284
|
+
results: list[dict[str, Any]] = []
|
|
285
|
+
for path in summary_files:
|
|
286
|
+
source_info["scanned"] += 1
|
|
287
|
+
fm = parse_frontmatter(path)
|
|
288
|
+
if fm is None:
|
|
289
|
+
parse_errors.append({"path": str(path), "error": "no valid frontmatter"})
|
|
290
|
+
continue
|
|
291
|
+
|
|
292
|
+
relevance, match_reasons = score_summary(
|
|
293
|
+
fm, target_phase, target_num, subsystems, keywords
|
|
294
|
+
)
|
|
295
|
+
readiness = has_readiness_section(path)
|
|
296
|
+
|
|
297
|
+
results.append(
|
|
298
|
+
{
|
|
299
|
+
"path": str(path),
|
|
300
|
+
"frontmatter": fm,
|
|
301
|
+
"relevance": relevance,
|
|
302
|
+
"match_reasons": match_reasons,
|
|
303
|
+
"has_readiness_warnings": readiness,
|
|
304
|
+
}
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# Resolve transitive requires and upgrade scores
|
|
308
|
+
transitive = resolve_transitive_requires(results, target_phase)
|
|
309
|
+
for entry in results:
|
|
310
|
+
fm = entry["frontmatter"]
|
|
311
|
+
phase_name = str(fm.get("phase", ""))
|
|
312
|
+
if phase_name in transitive and entry["relevance"] != "HIGH":
|
|
313
|
+
entry["relevance"] = "HIGH"
|
|
314
|
+
entry["match_reasons"].append(f"in transitive requires chain")
|
|
315
|
+
|
|
316
|
+
return results, source_info
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
def scan_debug_docs(
|
|
320
|
+
planning: Path,
|
|
321
|
+
parse_errors: list[dict[str, str]],
|
|
322
|
+
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
|
|
323
|
+
"""Scan resolved debug documents for learnings."""
|
|
324
|
+
resolved_dir = planning / "debug" / "resolved"
|
|
325
|
+
source_info: dict[str, Any] = {
|
|
326
|
+
"dir": str(resolved_dir),
|
|
327
|
+
"scanned": 0,
|
|
328
|
+
"skipped": None,
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
if not resolved_dir.is_dir():
|
|
332
|
+
source_info["skipped"] = "directory not found"
|
|
333
|
+
return [], source_info
|
|
334
|
+
|
|
335
|
+
results: list[dict[str, Any]] = []
|
|
336
|
+
for path in sorted(resolved_dir.glob("*.md")):
|
|
337
|
+
source_info["scanned"] += 1
|
|
338
|
+
fm = parse_frontmatter(path)
|
|
339
|
+
if fm is None:
|
|
340
|
+
parse_errors.append({"path": str(path), "error": "no valid frontmatter"})
|
|
341
|
+
continue
|
|
342
|
+
|
|
343
|
+
results.append(
|
|
344
|
+
{
|
|
345
|
+
"path": str(path),
|
|
346
|
+
"subsystem": fm.get("subsystem", ""),
|
|
347
|
+
"root_cause": fm.get("root_cause", ""),
|
|
348
|
+
"resolution": fm.get("resolution", ""),
|
|
349
|
+
"tags": fm.get("tags", []) or [],
|
|
350
|
+
"phase": fm.get("phase", ""),
|
|
351
|
+
}
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
return results, source_info
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def scan_adhoc_summaries(
|
|
358
|
+
planning: Path,
|
|
359
|
+
parse_errors: list[dict[str, str]],
|
|
360
|
+
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
|
|
361
|
+
"""Scan adhoc summary files for learnings."""
|
|
362
|
+
adhoc_dir = planning / "adhoc"
|
|
363
|
+
source_info: dict[str, Any] = {
|
|
364
|
+
"dir": str(adhoc_dir),
|
|
365
|
+
"scanned": 0,
|
|
366
|
+
"skipped": None,
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
if not adhoc_dir.is_dir():
|
|
370
|
+
source_info["skipped"] = "directory not found"
|
|
371
|
+
return [], source_info
|
|
372
|
+
|
|
373
|
+
summary_files = sorted(adhoc_dir.glob("*-SUMMARY.md"))
|
|
374
|
+
if not summary_files:
|
|
375
|
+
source_info["skipped"] = "no adhoc SUMMARY.md files found"
|
|
376
|
+
return [], source_info
|
|
377
|
+
|
|
378
|
+
results: list[dict[str, Any]] = []
|
|
379
|
+
for path in summary_files:
|
|
380
|
+
source_info["scanned"] += 1
|
|
381
|
+
fm = parse_frontmatter(path)
|
|
382
|
+
if fm is None:
|
|
383
|
+
parse_errors.append({"path": str(path), "error": "no valid frontmatter"})
|
|
384
|
+
continue
|
|
385
|
+
|
|
386
|
+
learnings = fm.get("learnings", []) or []
|
|
387
|
+
if isinstance(learnings, str):
|
|
388
|
+
learnings = [learnings]
|
|
389
|
+
|
|
390
|
+
results.append(
|
|
391
|
+
{
|
|
392
|
+
"path": str(path),
|
|
393
|
+
"subsystem": fm.get("subsystem", ""),
|
|
394
|
+
"learnings": learnings,
|
|
395
|
+
"related_phase": fm.get("related_phase", ""),
|
|
396
|
+
"tags": fm.get("tags", []) or [],
|
|
397
|
+
}
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
return results, source_info
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def scan_todos(
|
|
404
|
+
planning: Path,
|
|
405
|
+
subdir: str,
|
|
406
|
+
parse_errors: list[dict[str, str]],
|
|
407
|
+
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
|
|
408
|
+
"""Scan todo files (done/ or pending/) for metadata."""
|
|
409
|
+
todo_dir = planning / "todos" / subdir
|
|
410
|
+
source_info: dict[str, Any] = {
|
|
411
|
+
"dir": str(todo_dir),
|
|
412
|
+
"scanned": 0,
|
|
413
|
+
"skipped": None,
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
if not todo_dir.is_dir():
|
|
417
|
+
source_info["skipped"] = "directory not found"
|
|
418
|
+
return [], source_info
|
|
419
|
+
|
|
420
|
+
md_files = sorted(todo_dir.glob("*.md"))
|
|
421
|
+
if not md_files:
|
|
422
|
+
source_info["skipped"] = f"no .md files in {subdir}/"
|
|
423
|
+
return [], source_info
|
|
424
|
+
|
|
425
|
+
results: list[dict[str, Any]] = []
|
|
426
|
+
for path in md_files:
|
|
427
|
+
source_info["scanned"] += 1
|
|
428
|
+
fm = parse_frontmatter(path)
|
|
429
|
+
if fm is None:
|
|
430
|
+
parse_errors.append({"path": str(path), "error": "no valid frontmatter"})
|
|
431
|
+
continue
|
|
432
|
+
|
|
433
|
+
results.append(
|
|
434
|
+
{
|
|
435
|
+
"path": str(path),
|
|
436
|
+
"title": fm.get("title", path.stem),
|
|
437
|
+
"subsystem": fm.get("subsystem", ""),
|
|
438
|
+
"priority": fm.get("priority", ""),
|
|
439
|
+
"phase_origin": fm.get("phase_origin", ""),
|
|
440
|
+
}
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
return results, source_info
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
def scan_knowledge_files(
|
|
447
|
+
planning: Path,
|
|
448
|
+
subsystems: list[str],
|
|
449
|
+
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
|
|
450
|
+
"""List knowledge files and match by subsystem."""
|
|
451
|
+
knowledge_dir = planning / "knowledge"
|
|
452
|
+
source_info: dict[str, Any] = {
|
|
453
|
+
"dir": str(knowledge_dir),
|
|
454
|
+
"scanned": 0,
|
|
455
|
+
"skipped": None,
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
if not knowledge_dir.is_dir():
|
|
459
|
+
source_info["skipped"] = "directory not found"
|
|
460
|
+
return [], source_info
|
|
461
|
+
|
|
462
|
+
md_files = sorted(knowledge_dir.glob("*.md"))
|
|
463
|
+
if not md_files:
|
|
464
|
+
source_info["skipped"] = "no .md files in knowledge/"
|
|
465
|
+
return [], source_info
|
|
466
|
+
|
|
467
|
+
subsystems_lower = {s.lower() for s in subsystems}
|
|
468
|
+
results: list[dict[str, Any]] = []
|
|
469
|
+
for path in md_files:
|
|
470
|
+
source_info["scanned"] += 1
|
|
471
|
+
# Knowledge files use filename as subsystem identifier
|
|
472
|
+
file_subsystem = path.stem.lower()
|
|
473
|
+
matched = file_subsystem in subsystems_lower
|
|
474
|
+
|
|
475
|
+
results.append(
|
|
476
|
+
{
|
|
477
|
+
"path": str(path),
|
|
478
|
+
"subsystem": path.stem,
|
|
479
|
+
"matched": matched,
|
|
480
|
+
}
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
return results, source_info
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
# ---------------------------------------------------------------------------
|
|
487
|
+
# Aggregation
|
|
488
|
+
# ---------------------------------------------------------------------------
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def aggregate_from_summaries(
|
|
492
|
+
summaries: list[dict[str, Any]],
|
|
493
|
+
) -> dict[str, list[str]]:
|
|
494
|
+
"""Aggregate tech stack, patterns, key files, decisions from HIGH+MEDIUM summaries."""
|
|
495
|
+
tech_added: list[str] = []
|
|
496
|
+
patterns: list[str] = []
|
|
497
|
+
key_files_created: list[str] = []
|
|
498
|
+
key_files_modified: list[str] = []
|
|
499
|
+
key_decisions: list[str] = []
|
|
500
|
+
|
|
501
|
+
for entry in summaries:
|
|
502
|
+
if entry["relevance"] == "LOW":
|
|
503
|
+
continue
|
|
504
|
+
|
|
505
|
+
fm = entry["frontmatter"]
|
|
506
|
+
|
|
507
|
+
# tech-stack.added
|
|
508
|
+
ts = fm.get("tech-stack", {}) or {}
|
|
509
|
+
if isinstance(ts, dict):
|
|
510
|
+
added = ts.get("added", []) or []
|
|
511
|
+
if isinstance(added, str):
|
|
512
|
+
added = [added]
|
|
513
|
+
tech_added.extend(str(a) for a in added)
|
|
514
|
+
|
|
515
|
+
# tech-stack.patterns
|
|
516
|
+
pat = ts.get("patterns", []) or []
|
|
517
|
+
if isinstance(pat, str):
|
|
518
|
+
pat = [pat]
|
|
519
|
+
patterns.extend(str(p) for p in pat)
|
|
520
|
+
|
|
521
|
+
# patterns-established
|
|
522
|
+
pe = fm.get("patterns-established", []) or []
|
|
523
|
+
if isinstance(pe, str):
|
|
524
|
+
pe = [pe]
|
|
525
|
+
patterns.extend(str(p) for p in pe)
|
|
526
|
+
|
|
527
|
+
# key-files
|
|
528
|
+
kf = fm.get("key-files", {}) or {}
|
|
529
|
+
if isinstance(kf, dict):
|
|
530
|
+
created = kf.get("created", []) or []
|
|
531
|
+
if isinstance(created, str):
|
|
532
|
+
created = [created]
|
|
533
|
+
key_files_created.extend(str(f) for f in created)
|
|
534
|
+
|
|
535
|
+
modified = kf.get("modified", []) or []
|
|
536
|
+
if isinstance(modified, str):
|
|
537
|
+
modified = [modified]
|
|
538
|
+
key_files_modified.extend(str(f) for f in modified)
|
|
539
|
+
|
|
540
|
+
# key-decisions
|
|
541
|
+
kd = fm.get("key-decisions", []) or []
|
|
542
|
+
if isinstance(kd, str):
|
|
543
|
+
kd = [kd]
|
|
544
|
+
key_decisions.extend(str(d) for d in kd)
|
|
545
|
+
|
|
546
|
+
return {
|
|
547
|
+
"tech_stack_added": sorted(set(tech_added)),
|
|
548
|
+
"patterns_established": sorted(set(patterns)),
|
|
549
|
+
"key_files_created": sorted(set(key_files_created)),
|
|
550
|
+
"key_files_modified": sorted(set(key_files_modified)),
|
|
551
|
+
"key_decisions": list(dict.fromkeys(key_decisions)), # dedupe, preserve order
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
# ---------------------------------------------------------------------------
|
|
556
|
+
# Main
|
|
557
|
+
# ---------------------------------------------------------------------------
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
def build_parser() -> argparse.ArgumentParser:
|
|
561
|
+
parser = argparse.ArgumentParser(
|
|
562
|
+
description="Scan .planning/ artifacts and score relevance for plan-phase context assembly.",
|
|
563
|
+
)
|
|
564
|
+
parser.add_argument(
|
|
565
|
+
"--phase",
|
|
566
|
+
required=True,
|
|
567
|
+
help='Phase number (e.g., "05" or "5" or "2.1")',
|
|
568
|
+
)
|
|
569
|
+
parser.add_argument(
|
|
570
|
+
"--phase-name",
|
|
571
|
+
default="",
|
|
572
|
+
help="Phase name for keyword matching",
|
|
573
|
+
)
|
|
574
|
+
parser.add_argument(
|
|
575
|
+
"--subsystem",
|
|
576
|
+
action="append",
|
|
577
|
+
default=[],
|
|
578
|
+
dest="subsystems",
|
|
579
|
+
help="Subsystem(s) for matching (repeatable)",
|
|
580
|
+
)
|
|
581
|
+
parser.add_argument(
|
|
582
|
+
"--keywords",
|
|
583
|
+
default="",
|
|
584
|
+
help="Comma-separated keywords for tag matching",
|
|
585
|
+
)
|
|
586
|
+
return parser
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
def main() -> None:
|
|
590
|
+
parser = build_parser()
|
|
591
|
+
args = parser.parse_args()
|
|
592
|
+
|
|
593
|
+
phase = normalize_phase(args.phase)
|
|
594
|
+
phase_name = args.phase_name.strip()
|
|
595
|
+
subsystems = [s for s in args.subsystems if s]
|
|
596
|
+
keywords = [k.strip() for k in args.keywords.split(",") if k.strip()]
|
|
597
|
+
|
|
598
|
+
# Add phase name words as keywords
|
|
599
|
+
if phase_name:
|
|
600
|
+
name_words = [w for w in re.split(r"[-_\s]+", phase_name) if len(w) > 2]
|
|
601
|
+
keywords.extend(name_words)
|
|
602
|
+
|
|
603
|
+
target_num = extract_phase_number(phase)
|
|
604
|
+
|
|
605
|
+
planning = find_planning_dir()
|
|
606
|
+
if planning is None:
|
|
607
|
+
# No .planning/ directory — output valid JSON with all sources skipped
|
|
608
|
+
output: dict[str, Any] = {
|
|
609
|
+
"success": True,
|
|
610
|
+
"target": {
|
|
611
|
+
"phase": phase,
|
|
612
|
+
"phase_name": phase_name,
|
|
613
|
+
"subsystems": subsystems,
|
|
614
|
+
"keywords": keywords,
|
|
615
|
+
},
|
|
616
|
+
"sources": {
|
|
617
|
+
"summaries": {"dir": "", "scanned": 0, "skipped": ".planning/ not found"},
|
|
618
|
+
"debug_docs": {"dir": "", "scanned": 0, "skipped": ".planning/ not found"},
|
|
619
|
+
"adhoc_summaries": {"dir": "", "scanned": 0, "skipped": ".planning/ not found"},
|
|
620
|
+
"completed_todos": {"dir": "", "scanned": 0, "skipped": ".planning/ not found"},
|
|
621
|
+
"pending_todos": {"dir": "", "scanned": 0, "skipped": ".planning/ not found"},
|
|
622
|
+
"knowledge_files": {"dir": "", "scanned": 0, "skipped": ".planning/ not found"},
|
|
623
|
+
"parse_errors": [],
|
|
624
|
+
},
|
|
625
|
+
"summaries": [],
|
|
626
|
+
"debug_learnings": [],
|
|
627
|
+
"adhoc_learnings": [],
|
|
628
|
+
"completed_todos": [],
|
|
629
|
+
"pending_todos": [],
|
|
630
|
+
"knowledge_files": [],
|
|
631
|
+
"aggregated": {
|
|
632
|
+
"tech_stack_added": [],
|
|
633
|
+
"patterns_established": [],
|
|
634
|
+
"key_files_created": [],
|
|
635
|
+
"key_files_modified": [],
|
|
636
|
+
"key_decisions": [],
|
|
637
|
+
},
|
|
638
|
+
}
|
|
639
|
+
json.dump(output, sys.stdout, indent=2, cls=_SafeEncoder)
|
|
640
|
+
sys.stdout.write("\n")
|
|
641
|
+
return
|
|
642
|
+
|
|
643
|
+
parse_errors: list[dict[str, str]] = []
|
|
644
|
+
|
|
645
|
+
# Scan all sources
|
|
646
|
+
summaries, summaries_src = scan_summaries(
|
|
647
|
+
planning, phase, target_num, subsystems, keywords, parse_errors
|
|
648
|
+
)
|
|
649
|
+
debug_learnings, debug_src = scan_debug_docs(planning, parse_errors)
|
|
650
|
+
adhoc_learnings, adhoc_src = scan_adhoc_summaries(planning, parse_errors)
|
|
651
|
+
completed_todos, completed_src = scan_todos(planning, "done", parse_errors)
|
|
652
|
+
pending_todos, pending_src = scan_todos(planning, "pending", parse_errors)
|
|
653
|
+
knowledge_files, knowledge_src = scan_knowledge_files(planning, subsystems)
|
|
654
|
+
|
|
655
|
+
# Aggregate from HIGH+MEDIUM summaries
|
|
656
|
+
aggregated = aggregate_from_summaries(summaries)
|
|
657
|
+
|
|
658
|
+
output = {
|
|
659
|
+
"success": True,
|
|
660
|
+
"target": {
|
|
661
|
+
"phase": phase,
|
|
662
|
+
"phase_name": phase_name,
|
|
663
|
+
"subsystems": subsystems,
|
|
664
|
+
"keywords": keywords,
|
|
665
|
+
},
|
|
666
|
+
"sources": {
|
|
667
|
+
"summaries": summaries_src,
|
|
668
|
+
"debug_docs": debug_src,
|
|
669
|
+
"adhoc_summaries": adhoc_src,
|
|
670
|
+
"completed_todos": completed_src,
|
|
671
|
+
"pending_todos": pending_src,
|
|
672
|
+
"knowledge_files": knowledge_src,
|
|
673
|
+
"parse_errors": parse_errors,
|
|
674
|
+
},
|
|
675
|
+
"summaries": summaries,
|
|
676
|
+
"debug_learnings": debug_learnings,
|
|
677
|
+
"adhoc_learnings": adhoc_learnings,
|
|
678
|
+
"completed_todos": completed_todos,
|
|
679
|
+
"pending_todos": pending_todos,
|
|
680
|
+
"knowledge_files": knowledge_files,
|
|
681
|
+
"aggregated": aggregated,
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
json.dump(output, sys.stdout, indent=2, cls=_SafeEncoder)
|
|
685
|
+
sys.stdout.write("\n")
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
if __name__ == "__main__":
|
|
689
|
+
main()
|