prizmkit 1.1.6 → 1.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/bundled/VERSION.json +3 -3
  2. package/bundled/dev-pipeline/README.md +65 -65
  3. package/bundled/dev-pipeline/assets/feature-list-example.json +2 -2
  4. package/bundled/dev-pipeline/launch-bugfix-daemon.sh +11 -10
  5. package/bundled/dev-pipeline/launch-feature-daemon.sh +12 -11
  6. package/bundled/dev-pipeline/launch-refactor-daemon.sh +11 -10
  7. package/bundled/dev-pipeline/reset-bug.sh +305 -0
  8. package/bundled/dev-pipeline/reset-feature.sh +9 -8
  9. package/bundled/dev-pipeline/reset-refactor.sh +10 -9
  10. package/bundled/dev-pipeline/retry-bugfix.sh +7 -6
  11. package/bundled/dev-pipeline/retry-feature.sh +7 -6
  12. package/bundled/dev-pipeline/retry-refactor.sh +7 -6
  13. package/bundled/dev-pipeline/run-bugfix.sh +71 -23
  14. package/bundled/dev-pipeline/run-feature.sh +30 -21
  15. package/bundled/dev-pipeline/run-refactor.sh +21 -17
  16. package/bundled/dev-pipeline/scripts/cleanup-logs.py +2 -2
  17. package/bundled/dev-pipeline/scripts/detect-stuck.py +3 -3
  18. package/bundled/dev-pipeline/scripts/generate-bootstrap-prompt.py +26 -14
  19. package/bundled/dev-pipeline/scripts/generate-bugfix-prompt.py +6 -6
  20. package/bundled/dev-pipeline/scripts/generate-refactor-prompt.py +6 -6
  21. package/bundled/dev-pipeline/scripts/init-bugfix-pipeline.py +4 -4
  22. package/bundled/dev-pipeline/scripts/init-pipeline.py +26 -12
  23. package/bundled/dev-pipeline/scripts/init-refactor-pipeline.py +4 -4
  24. package/bundled/dev-pipeline/scripts/update-bug-status.py +10 -10
  25. package/bundled/dev-pipeline/scripts/update-feature-status.py +31 -31
  26. package/bundled/dev-pipeline/scripts/update-refactor-status.py +8 -8
  27. package/bundled/dev-pipeline/templates/bug-fix-list-schema.json +111 -31
  28. package/bundled/dev-pipeline/templates/feature-list-schema.json +91 -25
  29. package/bundled/dev-pipeline/templates/refactor-list-schema.json +107 -28
  30. package/bundled/dev-pipeline/tests/test_auto_skip.py +1 -1
  31. package/bundled/skills/_metadata.json +10 -2
  32. package/bundled/skills/app-planner/SKILL.md +24 -13
  33. package/bundled/skills/app-planner/references/project-brief-guide.md +1 -1
  34. package/bundled/skills/bug-fix-workflow/SKILL.md +7 -5
  35. package/bundled/skills/bug-planner/SKILL.md +80 -25
  36. package/bundled/skills/bug-planner/scripts/validate-bug-list.py +3 -3
  37. package/bundled/skills/bugfix-pipeline-launcher/SKILL.md +38 -33
  38. package/bundled/skills/feature-pipeline-launcher/SKILL.md +33 -33
  39. package/bundled/skills/feature-pipeline-launcher/scripts/preflight-check.py +3 -3
  40. package/bundled/skills/feature-planner/SKILL.md +96 -24
  41. package/bundled/skills/feature-planner/references/error-recovery.md +9 -9
  42. package/bundled/skills/feature-planner/scripts/validate-and-generate.py +25 -24
  43. package/bundled/skills/feature-workflow/SKILL.md +23 -20
  44. package/bundled/skills/prizmkit-committer/SKILL.md +1 -0
  45. package/bundled/skills/prizmkit-deploy/SKILL.md +1 -0
  46. package/bundled/skills/prizmkit-deploy/assets/deploy-template.md +1 -1
  47. package/bundled/skills/prizmkit-implement/SKILL.md +1 -1
  48. package/bundled/skills/prizmkit-implement/references/deploy-guide-protocol.md +4 -4
  49. package/bundled/skills/prizmkit-plan/SKILL.md +3 -3
  50. package/bundled/skills/prizmkit-retrospective/SKILL.md +40 -3
  51. package/bundled/skills/prizmkit-verify/SKILL.md +281 -0
  52. package/bundled/skills/prizmkit-verify/scripts/verify-light.py +402 -0
  53. package/bundled/skills/recovery-workflow/SKILL.md +15 -14
  54. package/bundled/skills/recovery-workflow/evals/evals.json +5 -5
  55. package/bundled/skills/recovery-workflow/scripts/detect-recovery-state.py +43 -10
  56. package/bundled/skills/refactor-pipeline-launcher/SKILL.md +38 -34
  57. package/bundled/skills/refactor-planner/SKILL.md +74 -24
  58. package/bundled/skills/refactor-planner/scripts/validate-and-generate-refactor.py +17 -17
  59. package/bundled/skills/refactor-workflow/SKILL.md +24 -20
  60. package/package.json +1 -1
  61. package/src/clean.js +4 -4
  62. package/src/gitignore-template.js +7 -8
  63. package/src/scaffold.js +4 -2
@@ -0,0 +1,402 @@
1
+ #!/usr/bin/env python3
2
+ """PrizmKit Light Verification Script — static checks for framework integrity.
3
+
4
+ Usage:
5
+ python3 verify-light.py # Run all rounds
6
+ python3 verify-light.py --round R1 # Run specific round
7
+ python3 verify-light.py --round R1 R2 R4 # Run multiple rounds
8
+ """
9
+
10
+ import argparse
11
+ import json
12
+ import os
13
+ import re
14
+ import subprocess
15
+ import sys
16
+ from pathlib import Path
17
+
18
+ # --- Configuration ---
19
+
20
+ PROJECT_ROOT = Path(__file__).resolve().parents[5] # core/skills/prizmkit-skill/prizmkit-verify/scripts/verify-light.py → root (5 levels up)
21
+
22
+ SKILL_LAYER_DIR = PROJECT_ROOT / "core" / "skills" / "prizmkit-skill"
23
+ PIPELINE_LAYER_DIR = PROJECT_ROOT / "core" / "skills" / "orchestration-skill" / "pipelines"
24
+ WORKFLOW_LAYER_DIR = PROJECT_ROOT / "core" / "skills" / "orchestration-skill" / "workflows"
25
+ DEV_PIPELINE_DIR = PROJECT_ROOT / "dev-pipeline"
26
+ ALL_SKILLS_DIR = PROJECT_ROOT / "core" / "skills"
27
+
28
+ # Expected items per layer
29
+ EXPECTED_PIPELINE_SKILLS = {
30
+ "app-planner", "feature-planner", "bug-planner", "refactor-planner",
31
+ "feature-pipeline-launcher", "bugfix-pipeline-launcher", "refactor-pipeline-launcher",
32
+ }
33
+ EXPECTED_WORKFLOW_SKILLS = {
34
+ "feature-workflow", "bug-fix-workflow", "refactor-workflow", "recovery-workflow",
35
+ }
36
+
37
+ # --- Helpers ---
38
+
39
+ class Result:
40
+ def __init__(self):
41
+ self.passed = 0
42
+ self.warned = 0
43
+ self.failed = 0
44
+ self.details = []
45
+
46
+ def ok(self, msg):
47
+ self.passed += 1
48
+ self.details.append(("PASS", msg))
49
+
50
+ def warn(self, msg):
51
+ self.warned += 1
52
+ self.details.append(("WARN", msg))
53
+
54
+ def fail(self, msg):
55
+ self.failed += 1
56
+ self.details.append(("FAIL", msg))
57
+
58
+ @property
59
+ def status(self):
60
+ if self.failed > 0:
61
+ return "FAIL"
62
+ if self.warned > 0:
63
+ return "WARN"
64
+ return "PASS"
65
+
66
+ def summary(self):
67
+ return f"PASS={self.passed} WARN={self.warned} FAIL={self.failed}"
68
+
69
+
70
+ def parse_frontmatter(skill_md_path):
71
+ """Extract YAML frontmatter from SKILL.md."""
72
+ text = skill_md_path.read_text(encoding="utf-8")
73
+ match = re.match(r"^---\n(.*?)\n---", text, re.DOTALL)
74
+ if not match:
75
+ return {}
76
+ fm = {}
77
+ for line in match.group(1).split("\n"):
78
+ if ":" in line:
79
+ key, val = line.split(":", 1)
80
+ fm[key.strip()] = val.strip().strip('"').strip("'")
81
+ return fm
82
+
83
+
84
+ def find_skill_dirs(base_dir):
85
+ """Find all directories containing SKILL.md under base_dir."""
86
+ return sorted(
87
+ d for d in base_dir.iterdir()
88
+ if d.is_dir() and (d / "SKILL.md").exists()
89
+ )
90
+
91
+
92
+ def collect_all_skill_names():
93
+ """Collect all known skill names across all categories."""
94
+ names = set()
95
+ for category_dir in ALL_SKILLS_DIR.iterdir():
96
+ if not category_dir.is_dir() or category_dir.name.startswith("."):
97
+ continue
98
+ # Direct skill dirs
99
+ for d in category_dir.iterdir():
100
+ if d.is_dir() and (d / "SKILL.md").exists():
101
+ names.add(d.name)
102
+ # Nested (pipelines/, workflows/)
103
+ if d.is_dir() and not (d / "SKILL.md").exists():
104
+ for sub in d.iterdir():
105
+ if sub.is_dir() and (sub / "SKILL.md").exists():
106
+ names.add(sub.name)
107
+ return names
108
+
109
+
110
+ # --- Round Checks ---
111
+
112
+ def run_r1(result: Result):
113
+ """R1: L1 Skill Layer Light checks."""
114
+ print("\n=== R1: L1 Skill Layer ===")
115
+ skill_dirs = find_skill_dirs(SKILL_LAYER_DIR)
116
+ all_skill_names = collect_all_skill_names()
117
+
118
+ if not skill_dirs:
119
+ result.fail("No skill directories found in prizmkit-skill/")
120
+ return
121
+
122
+ print(f" Found {len(skill_dirs)} skills")
123
+
124
+ for skill_dir in skill_dirs:
125
+ skill_name = skill_dir.name
126
+ skill_md = skill_dir / "SKILL.md"
127
+
128
+ # 1. Frontmatter validation
129
+ fm = parse_frontmatter(skill_md)
130
+ if not fm.get("name"):
131
+ result.fail(f"[{skill_name}] Missing 'name' in frontmatter")
132
+ elif fm["name"] != skill_name:
133
+ result.fail(f"[{skill_name}] name '{fm['name']}' != dir '{skill_name}'")
134
+ else:
135
+ result.ok(f"[{skill_name}] Frontmatter name OK")
136
+
137
+ if not fm.get("description"):
138
+ result.fail(f"[{skill_name}] Missing 'description' in frontmatter")
139
+ else:
140
+ result.ok(f"[{skill_name}] Frontmatter description OK")
141
+
142
+ # 2. ${SKILL_DIR} reference resolution
143
+ content = skill_md.read_text(encoding="utf-8")
144
+ refs = re.findall(r'\$\{SKILL_DIR\}/([^\s`"\')]+)', content)
145
+ for ref in refs:
146
+ resolved = skill_dir / ref
147
+ if resolved.exists():
148
+ result.ok(f"[{skill_name}] Asset ref '{ref}' exists")
149
+ else:
150
+ result.fail(f"[{skill_name}] Asset ref '{ref}' NOT FOUND at {resolved}")
151
+
152
+ # 3. Cross-skill references
153
+ cross_refs = re.findall(r'(?<!\w)/prizmkit-[\w-]+(?=[\s`"\',\)])', content)
154
+ for ref in cross_refs:
155
+ ref_name = ref.lstrip("/")
156
+ if ref_name in all_skill_names:
157
+ result.ok(f"[{skill_name}] Cross-ref '{ref_name}' exists")
158
+ else:
159
+ result.warn(f"[{skill_name}] Cross-ref '{ref_name}' not found as skill dir")
160
+
161
+ # 4. ESLint
162
+ try:
163
+ lint_result = subprocess.run(
164
+ ["npm", "run", "lint"], capture_output=True, text=True,
165
+ cwd=str(PROJECT_ROOT), timeout=30,
166
+ )
167
+ errors = len(re.findall(r"(\d+) error[^s]", lint_result.stdout + lint_result.stderr))
168
+ if lint_result.returncode == 0 or errors == 0:
169
+ result.ok("ESLint: 0 errors")
170
+ else:
171
+ result.fail(f"ESLint: {errors} errors found")
172
+ except (subprocess.TimeoutExpired, FileNotFoundError) as e:
173
+ result.warn(f"ESLint: could not run ({e})")
174
+
175
+ # 5. Ruff
176
+ try:
177
+ ruff_result = subprocess.run(
178
+ ["npm", "run", "lint:py"], capture_output=True, text=True,
179
+ cwd=str(PROJECT_ROOT), timeout=30,
180
+ )
181
+ if ruff_result.returncode == 0:
182
+ result.ok("Ruff: all checks passed")
183
+ else:
184
+ result.fail(f"Ruff: {ruff_result.stdout.strip()}")
185
+ except (subprocess.TimeoutExpired, FileNotFoundError) as e:
186
+ result.warn(f"Ruff: could not run ({e})")
187
+
188
+
189
+ def run_r2(result: Result):
190
+ """R2: L2 Pipeline Layer Light checks."""
191
+ print("\n=== R2: L2 Pipeline Layer ===")
192
+ skill_dirs = find_skill_dirs(PIPELINE_LAYER_DIR)
193
+ found_names = {d.name for d in skill_dirs}
194
+
195
+ # Check expected skills present
196
+ for expected in EXPECTED_PIPELINE_SKILLS:
197
+ if expected in found_names:
198
+ result.ok(f"[{expected}] Skill exists")
199
+ else:
200
+ result.fail(f"[{expected}] Expected skill NOT FOUND")
201
+
202
+ # Frontmatter validation
203
+ for skill_dir in skill_dirs:
204
+ fm = parse_frontmatter(skill_dir / "SKILL.md")
205
+ if fm.get("name") == skill_dir.name and fm.get("description"):
206
+ result.ok(f"[{skill_dir.name}] Frontmatter OK")
207
+ else:
208
+ result.fail(f"[{skill_dir.name}] Frontmatter invalid: {fm}")
209
+
210
+ # Schema version consistency
211
+ scenarios = [
212
+ ("feature", "feature-list-schema.json", "init-pipeline.py"),
213
+ ("bugfix", "bug-fix-list-schema.json", "init-bugfix-pipeline.py"),
214
+ ("refactor", "refactor-list-schema.json", "init-refactor-pipeline.py"),
215
+ ]
216
+ for scenario, schema_file, init_script in scenarios:
217
+ schema_path = DEV_PIPELINE_DIR / "templates" / schema_file
218
+ init_path = DEV_PIPELINE_DIR / "scripts" / init_script
219
+
220
+ if not schema_path.exists():
221
+ result.fail(f"[{scenario}] Schema file missing: {schema_file}")
222
+ continue
223
+ if not init_path.exists():
224
+ result.fail(f"[{scenario}] Init script missing: {init_script}")
225
+ continue
226
+
227
+ # Extract schema const value
228
+ try:
229
+ schema_data = json.loads(schema_path.read_text())
230
+ schema_const = (
231
+ schema_data.get("properties", {})
232
+ .get("$schema", {})
233
+ .get("const", "NOT_FOUND")
234
+ )
235
+ except json.JSONDecodeError:
236
+ result.fail(f"[{scenario}] Schema file is not valid JSON")
237
+ continue
238
+
239
+ # Extract expected schema from init script
240
+ init_content = init_path.read_text()
241
+ match = re.search(r'EXPECTED_SCHEMA\s*=\s*["\']([^"\']+)', init_content)
242
+ init_expected = match.group(1) if match else "NOT_FOUND"
243
+
244
+ if schema_const == init_expected:
245
+ result.ok(f"[{scenario}] Schema version match: {schema_const}")
246
+ else:
247
+ result.fail(f"[{scenario}] Schema version MISMATCH: template={schema_const}, init={init_expected}")
248
+
249
+ # Launcher script path references
250
+ for scenario, script in [("feature", "run-feature.sh"), ("bugfix", "run-bugfix.sh"), ("refactor", "run-refactor.sh")]:
251
+ script_path = DEV_PIPELINE_DIR / script
252
+ if script_path.exists() and os.access(script_path, os.X_OK):
253
+ result.ok(f"[{scenario}] Script {script} exists and executable")
254
+ elif script_path.exists():
255
+ result.warn(f"[{scenario}] Script {script} exists but NOT executable")
256
+ else:
257
+ result.fail(f"[{scenario}] Script {script} NOT FOUND")
258
+
259
+
260
+ def run_r3(result: Result):
261
+ """R3: L3 Workflow Layer Light checks."""
262
+ print("\n=== R3: L3 Workflow Layer ===")
263
+ skill_dirs = find_skill_dirs(WORKFLOW_LAYER_DIR)
264
+ found_names = {d.name for d in skill_dirs}
265
+ pipeline_names = {d.name for d in find_skill_dirs(PIPELINE_LAYER_DIR)}
266
+
267
+ # Check expected skills
268
+ for expected in EXPECTED_WORKFLOW_SKILLS:
269
+ if expected in found_names:
270
+ result.ok(f"[{expected}] Workflow exists")
271
+ else:
272
+ result.fail(f"[{expected}] Expected workflow NOT FOUND")
273
+
274
+ for skill_dir in skill_dirs:
275
+ skill_name = skill_dir.name
276
+ skill_md = skill_dir / "SKILL.md"
277
+
278
+ # Frontmatter
279
+ fm = parse_frontmatter(skill_md)
280
+ if fm.get("name") == skill_name and fm.get("description"):
281
+ result.ok(f"[{skill_name}] Frontmatter OK")
282
+ else:
283
+ result.fail(f"[{skill_name}] Frontmatter invalid")
284
+
285
+ # Pipeline skill references
286
+ content = skill_md.read_text(encoding="utf-8")
287
+ for pipeline_ref in re.findall(r'`([\w-]+-planner|[\w-]+-pipeline-launcher)`', content):
288
+ if pipeline_ref in pipeline_names:
289
+ result.ok(f"[{skill_name}] Pipeline ref '{pipeline_ref}' exists")
290
+ else:
291
+ result.warn(f"[{skill_name}] Pipeline ref '{pipeline_ref}' not found")
292
+
293
+ # Checkpoint naming
294
+ checkpoints = re.findall(r'CP-\w+-\d+', content)
295
+ if checkpoints:
296
+ result.ok(f"[{skill_name}] {len(checkpoints)} checkpoints found (CP-*-N pattern)")
297
+ else:
298
+ result.warn(f"[{skill_name}] No checkpoint markers found")
299
+
300
+
301
+ def run_r4(result: Result):
302
+ """R4: L4 Script Layer Light checks."""
303
+ print("\n=== R4: L4 Script Layer ===")
304
+
305
+ # 1. Shell scripts executable
306
+ for sh_file in sorted(DEV_PIPELINE_DIR.glob("*.sh")):
307
+ if os.access(sh_file, os.X_OK):
308
+ result.ok(f"[{sh_file.name}] Executable")
309
+ else:
310
+ result.fail(f"[{sh_file.name}] NOT executable")
311
+
312
+ # 2. Python scripts compile
313
+ scripts_dir = DEV_PIPELINE_DIR / "scripts"
314
+ if scripts_dir.exists():
315
+ for py_file in sorted(scripts_dir.glob("*.py")):
316
+ try:
317
+ subprocess.run(
318
+ [sys.executable, "-m", "py_compile", str(py_file)],
319
+ capture_output=True, text=True, timeout=10,
320
+ )
321
+ result.ok(f"[{py_file.name}] Compiles OK")
322
+ except subprocess.TimeoutExpired:
323
+ result.warn(f"[{py_file.name}] Compile check timed out")
324
+
325
+ # 3. Bash lib sourcing
326
+ for run_script in ["run-feature.sh", "run-bugfix.sh", "run-refactor.sh"]:
327
+ script_path = DEV_PIPELINE_DIR / run_script
328
+ if not script_path.exists():
329
+ result.fail(f"[{run_script}] NOT FOUND")
330
+ continue
331
+ content = script_path.read_text(encoding="utf-8")
332
+ for lib in ["common.sh", "heartbeat.sh", "branch.sh"]:
333
+ if lib in content:
334
+ result.ok(f"[{run_script}] Sources {lib}")
335
+ else:
336
+ result.fail(f"[{run_script}] Does NOT source {lib}")
337
+
338
+ # 4. JSON templates valid
339
+ templates_dir = DEV_PIPELINE_DIR / "templates"
340
+ if templates_dir.exists():
341
+ for json_file in sorted(templates_dir.glob("*.json")):
342
+ try:
343
+ json.loads(json_file.read_text())
344
+ result.ok(f"[{json_file.name}] Valid JSON")
345
+ except json.JSONDecodeError as e:
346
+ result.fail(f"[{json_file.name}] Invalid JSON: {e}")
347
+
348
+ # 5. Test file existence
349
+ tests_dir = DEV_PIPELINE_DIR / "tests"
350
+ if tests_dir.exists():
351
+ test_files = list(tests_dir.glob("test_*.py"))
352
+ if test_files:
353
+ result.ok(f"Test files found: {len(test_files)}")
354
+ else:
355
+ result.warn("No test_*.py files found in dev-pipeline/tests/")
356
+ else:
357
+ result.warn("dev-pipeline/tests/ directory not found")
358
+
359
+
360
+ # --- Main ---
361
+
362
+ def main():
363
+ parser = argparse.ArgumentParser(description="PrizmKit Light Verification")
364
+ parser.add_argument("--round", nargs="*", default=["R1", "R2", "R3", "R4"],
365
+ help="Rounds to run (R1, R2, R3, R4)")
366
+ args = parser.parse_args()
367
+
368
+ rounds_to_run = [r.upper() for r in args.round]
369
+ result = Result()
370
+
371
+ round_map = {"R1": run_r1, "R2": run_r2, "R3": run_r3, "R4": run_r4}
372
+
373
+ for round_name in rounds_to_run:
374
+ if round_name in round_map:
375
+ round_map[round_name](result)
376
+ else:
377
+ print(f" Unknown round: {round_name}")
378
+
379
+ # Print results
380
+ print("\n" + "=" * 60)
381
+ print(f" RESULT: {result.status}")
382
+ print(f" {result.summary()}")
383
+ print("=" * 60)
384
+
385
+ if result.failed > 0:
386
+ print("\n FAILURES:")
387
+ for status, msg in result.details:
388
+ if status == "FAIL":
389
+ print(f" {msg}")
390
+
391
+ if result.warned > 0:
392
+ print("\n WARNINGS:")
393
+ for status, msg in result.details:
394
+ if status == "WARN":
395
+ print(f" {msg}")
396
+
397
+ print()
398
+ return 0 if result.failed == 0 else 1
399
+
400
+
401
+ if __name__ == "__main__":
402
+ sys.exit(main())
@@ -25,8 +25,8 @@ User says:
25
25
  | Workflow | Branch Pattern | Key Artifacts |
26
26
  |----------|---------------|---------------|
27
27
  | bug-fix-workflow | `fix/<BUG_ID>-*` | `.prizmkit/bugfix/<BUG_ID>/fix-plan.md`, `fix-report.md` |
28
- | feature-workflow | `feat/*` | `feature-list.json`, `dev-pipeline/state/features/` |
29
- | refactor-workflow | `refactor/*` | `refactor-list.json`, `dev-pipeline/state/refactors/` |
28
+ | feature-workflow | `feat/*` | `.prizmkit/plans/feature-list.json`, `.prizmkit/state/features/features/` |
29
+ | refactor-workflow | `refactor/*` | `.prizmkit/plans/refactor-list.json`, `.prizmkit/state/refactor/refactors/` |
30
30
 
31
31
  ---
32
32
 
@@ -71,9 +71,9 @@ The script uses **priority-ordered signature matching**:
71
71
  1. Current branch matches fix/* → bug-fix-workflow
72
72
  2. .prizmkit/bugfix/ directory has content → bug-fix-workflow
73
73
  3. Current branch matches refactor/* → refactor-workflow
74
- 4. refactor-list.json exists → refactor-workflow
74
+ 4. .prizmkit/plans/refactor-list.json exists → refactor-workflow
75
75
  5. Current branch matches feat/* → feature-workflow
76
- 6. feature-list.json exists → feature-workflow
76
+ 6. .prizmkit/plans/feature-list.json exists → feature-workflow
77
77
  7. None of the above → no workflow detected
78
78
  ```
79
79
 
@@ -161,6 +161,7 @@ If the user declines, suggest alternatives:
161
161
  1. **Read the workflow's SKILL.md** from `core/skills/orchestration-skill/workflows/{workflow-type}/SKILL.md`
162
162
  2. **Read existing artifacts** to restore context — check in this order for the most efficient recovery:
163
163
  - If `session-summary.md` exists in the artifact directory → read it first. It provides a lightweight summary of completed tasks, key decisions, active TRAPS, and remaining work from the interrupted session.
164
+ > **Note**: `session-summary.md` is generated by the pipeline's bootstrap prompt system (`generate-bootstrap-prompt.py`) at the end of each AI CLI session. It may not exist if the session crashed before completion or if running in interactive (non-pipeline) mode. If absent, reconstruct context from the artifacts below.
164
165
  - Then read remaining artifacts: spec, plan, code diffs, bug descriptions, etc.
165
166
  3. **Read relevant `.prizm-docs/`** — load project context (L0 root, relevant L1). If `session-summary.md` was found, use its "Files Changed" section to focus L1/L2 loading on affected modules only.
166
167
 
@@ -174,7 +175,7 @@ Phase inference table:
174
175
 
175
176
  | Detected State | Resume From | Actions |
176
177
  |---------------|------------|---------|
177
- | On `fix/<BUG_ID>-*` branch, no artifacts | Phase 1: Deep Bug Diagnosis | Read bug description from `bug-fix-list.json`. Start interactive diagnosis Q&A |
178
+ | On `fix/<BUG_ID>-*` branch, no artifacts | Phase 1: Deep Bug Diagnosis | Read bug description from `.prizmkit/plans/bug-fix-list.json`. Start interactive diagnosis Q&A |
178
179
  | `fix-plan.md` exists, no code changes | Phase 4: Fix | Read fix-plan.md. Implement the fix following the plan |
179
180
  | `fix-plan.md` + code changes exist | Phase 5: Review | Invoke `/prizmkit-code-review` on all changes |
180
181
  | All docs + review passed | Phase 6: User Verification | Ask user to verify the fix works |
@@ -194,11 +195,11 @@ Phase inference table:
194
195
 
195
196
  | Detected State | Resume From | Actions |
196
197
  |---------------|------------|---------|
197
- | No `feature-list.json` | Phase 1: Brainstorm | Cannot recover conversation context. Start requirement clarification, but leverage any workspace content (README, existing code) for context |
198
- | `feature-list.json` exists, no pipeline state | Phase 3: Launch | Invoke `feature-pipeline-launcher` to start the pipeline |
199
- | `feature-list.json` + pipeline state exists | Phase 4: Monitor | Check pipeline status via `feature-pipeline-launcher` (Intent B: Check Status) |
198
+ | No `.prizmkit/plans/feature-list.json` | Phase 1: Brainstorm | Cannot recover conversation context. Start requirement clarification, but leverage any workspace content (README, existing code) for context |
199
+ | `.prizmkit/plans/feature-list.json` exists, no pipeline state | Phase 3: Launch | Invoke `feature-pipeline-launcher` to start the pipeline |
200
+ | `.prizmkit/plans/feature-list.json` + pipeline state exists | Phase 4: Monitor | Check pipeline status via `feature-pipeline-launcher` (Intent B: Check Status) |
200
201
 
201
- **Note**: Feature-workflow recovery is simpler because Phases 3-4 are pipeline-driven. The main recovery value is avoiding re-brainstorming (Phase 1) when `feature-list.json` already exists.
202
+ **Note**: Feature-workflow recovery is simpler because Phases 3-4 are pipeline-driven. The main recovery value is avoiding re-brainstorming (Phase 1) when `.prizmkit/plans/feature-list.json` already exists.
202
203
 
203
204
  ---
204
205
 
@@ -208,9 +209,9 @@ Phase inference table (mirrors feature-workflow):
208
209
 
209
210
  | Detected State | Resume From | Actions |
210
211
  |---------------|------------|---------|
211
- | No `refactor-list.json` | Phase 1: Brainstorm | Start refactoring goal clarification |
212
- | `refactor-list.json` exists, no pipeline state | Phase 3: Launch | Invoke `refactor-pipeline-launcher` to start the pipeline |
213
- | `refactor-list.json` + pipeline state exists | Phase 4: Monitor | Check pipeline status |
212
+ | No `.prizmkit/plans/refactor-list.json` | Phase 1: Brainstorm | Start refactoring goal clarification |
213
+ | `.prizmkit/plans/refactor-list.json` exists, no pipeline state | Phase 3: Launch | Invoke `refactor-pipeline-launcher` to start the pipeline |
214
+ | `.prizmkit/plans/refactor-list.json` + pipeline state exists | Phase 4: Monitor | Check pipeline status |
214
215
 
215
216
  ---
216
217
 
@@ -242,9 +243,9 @@ Recovery complete.
242
243
  | No workflow signature matches | Show guidance message, suggest original workflow skills |
243
244
  | Branch exists but artifacts are inconsistent | Trust git as ground truth, report discrepancy in detection report |
244
245
  | Test failures in existing code | Report in detection summary; user decides whether to continue |
245
- | Multiple workflows could match (e.g., on main but both feature-list.json and bug-fix artifacts exist) | Pick highest priority (bug-fix > refactor > feature), mention others in report |
246
+ | Multiple workflows could match (e.g., on main but both .prizmkit/plans/feature-list.json and bug-fix artifacts exist) | Pick highest priority (bug-fix > refactor > feature), mention others in report |
246
247
  | Detection script fails | Fall back to manual detection (run individual git/file checks in bash) |
247
- | Bug ID not found in bug-fix-list.json | Continue with branch-only context; note that full bug description is unavailable |
248
+ | Bug ID not found in .prizmkit/plans/bug-fix-list.json | Continue with branch-only context; note that full bug description is unavailable |
248
249
 
249
250
  ---
250
251
 
@@ -22,16 +22,16 @@
22
22
  "id": 3,
23
23
  "name": "feature-list-exists",
24
24
  "prompt": "resume",
25
- "expected_output": "Should auto-detect feature-workflow from feature-list.json. Should detect feature-list.json exists but no pipeline state → infer Phase 3 (Launch). Should invoke feature-pipeline-launcher to start the pipeline.",
26
- "setup_description": "On main branch, create feature-list.json with 3 features. No dev-pipeline/state/features/ directory.",
25
+ "expected_output": "Should auto-detect feature-workflow from .prizmkit/plans/feature-list.json. Should detect .prizmkit/plans/feature-list.json exists but no pipeline state → infer Phase 3 (Launch). Should invoke feature-pipeline-launcher to start the pipeline.",
26
+ "setup_description": "On main branch, create .prizmkit/plans/feature-list.json with 3 features. No .prizmkit/state/features/features/ directory.",
27
27
  "files": []
28
28
  },
29
29
  {
30
30
  "id": 4,
31
31
  "name": "refactor-monitoring",
32
32
  "prompt": "pick up where it left off",
33
- "expected_output": "Should auto-detect refactor-workflow from refactor-list.json + pipeline state. Should infer Phase 4 (Monitor). Should check pipeline status and report results.",
34
- "setup_description": "On main branch, create refactor-list.json with 2 items. Create dev-pipeline/state/refactors/ with status files showing 1 completed, 1 in-progress.",
33
+ "expected_output": "Should auto-detect refactor-workflow from .prizmkit/plans/refactor-list.json + pipeline state. Should infer Phase 4 (Monitor). Should check pipeline status and report results.",
34
+ "setup_description": "On main branch, create .prizmkit/plans/refactor-list.json with 2 items. Create .prizmkit/state/refactor/refactors/ with status files showing 1 completed, 1 in-progress.",
35
35
  "files": []
36
36
  },
37
37
  {
@@ -39,7 +39,7 @@
39
39
  "name": "no-workflow-detected",
40
40
  "prompt": "恢复",
41
41
  "expected_output": "Should detect no workflow signatures in workspace. Should display guidance message suggesting /feature-workflow, /bug-fix-workflow, or /refactor-workflow. Should NOT attempt any recovery actions.",
42
- "setup_description": "Clean workspace on main branch. No feature-list.json, no bug-fix artifacts, no refactor-list.json, no fix/* or feat/* or refactor/* branches.",
42
+ "setup_description": "Clean workspace on main branch. No .prizmkit/plans/feature-list.json, no bug-fix artifacts, no .prizmkit/plans/refactor-list.json, no fix/* or feat/* or refactor/* branches.",
43
43
  "files": []
44
44
  }
45
45
  ]
@@ -85,7 +85,14 @@ def detect_workflow_type(project_root):
85
85
  return ("refactor-workflow", {"branch": branch})
86
86
 
87
87
  # Priority 4: refactor-list.json exists → refactor-workflow
88
- if os.path.isfile(os.path.join(project_root, "refactor-list.json")):
88
+ # Check both new and old paths for backward compatibility
89
+ new_refactor = os.path.join(project_root, ".prizmkit", "plans", "refactor-list.json")
90
+ old_refactor = os.path.join(project_root, "refactor-list.json")
91
+ if os.path.isfile(new_refactor):
92
+ return ("refactor-workflow", {"branch": branch})
93
+ elif os.path.isfile(old_refactor):
94
+ print(f"⚠️ Migration notice: refactor-list.json found in root. "
95
+ f"Please move to .prizmkit/plans/refactor-list.json", file=sys.stderr)
89
96
  return ("refactor-workflow", {"branch": branch})
90
97
 
91
98
  # Priority 5: feat/* branch → feature-workflow
@@ -93,7 +100,14 @@ def detect_workflow_type(project_root):
93
100
  return ("feature-workflow", {"branch": branch})
94
101
 
95
102
  # Priority 6: feature-list.json exists → feature-workflow
96
- if os.path.isfile(os.path.join(project_root, "feature-list.json")):
103
+ # Check both new and old paths for backward compatibility
104
+ new_feature = os.path.join(project_root, ".prizmkit", "plans", "feature-list.json")
105
+ old_feature = os.path.join(project_root, "feature-list.json")
106
+ if os.path.isfile(new_feature):
107
+ return ("feature-workflow", {"branch": branch})
108
+ elif os.path.isfile(old_feature):
109
+ print(f"⚠️ Migration notice: feature-list.json found in root. "
110
+ f"Please move to .prizmkit/plans/feature-list.json", file=sys.stderr)
97
111
  return ("feature-workflow", {"branch": branch})
98
112
 
99
113
  # No match
@@ -164,10 +178,27 @@ def _infer_pipeline_workflow_phase(project_root, list_filename, state_subdir, wo
164
178
  No list file → Phase 1: Brainstorm
165
179
  List file, no pipeline state → Phase 3: Launch
166
180
  List file + pipeline state → Phase 4: Monitor
181
+
182
+ Checks new path (.prizmkit/plans/<list_filename>) first, then falls back
183
+ to old root-level path with a migration warning.
167
184
  """
168
- has_list = os.path.isfile(os.path.join(project_root, list_filename))
169
- state_dir = os.path.join(project_root, "dev-pipeline", "state", state_subdir)
170
- has_pipeline_state = os.path.isdir(state_dir) and bool(os.listdir(state_dir))
185
+ # Check new path first, then old path with fallback warning
186
+ new_list_path = os.path.join(project_root, ".prizmkit", "plans", list_filename)
187
+ old_list_path = os.path.join(project_root, list_filename)
188
+ has_list = os.path.isfile(new_list_path)
189
+ if not has_list and os.path.isfile(old_list_path):
190
+ has_list = True
191
+ print(f"⚠️ Migration notice: {list_filename} found in root. "
192
+ f"Please move to .prizmkit/plans/{list_filename}", file=sys.stderr)
193
+
194
+ # Check new state path first, then old path with fallback warning
195
+ new_state_dir = os.path.join(project_root, ".prizmkit", "state", state_subdir)
196
+ old_state_dir = os.path.join(project_root, "dev-pipeline", "state", state_subdir)
197
+ has_pipeline_state = os.path.isdir(new_state_dir) and bool(os.listdir(new_state_dir))
198
+ if not has_pipeline_state and os.path.isdir(old_state_dir) and bool(os.listdir(old_state_dir)):
199
+ has_pipeline_state = True
200
+ print(f"⚠️ Migration notice: pipeline state found at dev-pipeline/state/{state_subdir}. "
201
+ f"Please move to .prizmkit/state/{state_subdir}", file=sys.stderr)
171
202
 
172
203
  artifacts = {
173
204
  f"{workflow_label}_list_exists": has_list,
@@ -176,16 +207,16 @@ def _infer_pipeline_workflow_phase(project_root, list_filename, state_subdir, wo
176
207
 
177
208
  if has_list and has_pipeline_state:
178
209
  return 4, "Monitor", artifacts, \
179
- f"{list_filename} + pipeline state exist", \
210
+ f".prizmkit/plans/{list_filename} + pipeline state exist", \
180
211
  "check pipeline status and report results"
181
212
 
182
213
  if has_list:
183
214
  return 3, "Launch", artifacts, \
184
- f"{list_filename} exists, no pipeline state", \
215
+ f".prizmkit/plans/{list_filename} exists, no pipeline state", \
185
216
  "launch pipeline → monitor progress"
186
217
 
187
218
  return 1, "Brainstorm", artifacts, \
188
- f"no {list_filename} found", \
219
+ f"no .prizmkit/plans/{list_filename} found", \
189
220
  f"{workflow_label} goal clarification → plan → launch → monitor"
190
221
 
191
222
 
@@ -199,7 +230,7 @@ def infer_feature_phase(project_root):
199
230
  def infer_refactor_phase(project_root):
200
231
  """Infer refactor-workflow phase from artifacts and pipeline state."""
201
232
  return _infer_pipeline_workflow_phase(
202
- project_root, "refactor-list.json", "refactors", "refactor"
233
+ project_root, "refactor-list.json", "refactor", "refactor"
203
234
  )
204
235
 
205
236
 
@@ -256,6 +287,9 @@ def detect_code_changes(project_root, main_branch="main"):
256
287
  files that represent actual implementation work.
257
288
  """
258
289
  IGNORED_FILES = {
290
+ ".prizmkit/plans/feature-list.json",
291
+ ".prizmkit/plans/bug-fix-list.json",
292
+ ".prizmkit/plans/refactor-list.json",
259
293
  "feature-list.json",
260
294
  "bug-fix-list.json",
261
295
  "refactor-list.json",
@@ -265,7 +299,6 @@ def detect_code_changes(project_root, main_branch="main"):
265
299
  }
266
300
  IGNORED_PREFIXES = (
267
301
  ".prizmkit/",
268
- "dev-pipeline/state/",
269
302
  ".prizm-docs/",
270
303
  ".claude/",
271
304
  ".codebuddy/",