prizmkit 1.1.5 → 1.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundled/VERSION.json +3 -3
- package/bundled/dev-pipeline/README.md +1 -1
- package/bundled/dev-pipeline/assets/feature-list-example.json +2 -2
- package/bundled/dev-pipeline/reset-bug.sh +304 -0
- package/bundled/dev-pipeline/run-bugfix.sh +55 -8
- package/bundled/dev-pipeline/run-feature.sh +12 -4
- package/bundled/dev-pipeline/run-refactor.sh +5 -2
- package/bundled/dev-pipeline/scripts/init-pipeline.py +19 -5
- package/bundled/dev-pipeline/scripts/update-bug-status.py +2 -2
- package/bundled/dev-pipeline/scripts/update-feature-status.py +6 -6
- package/bundled/dev-pipeline/templates/bug-fix-list-schema.json +111 -31
- package/bundled/dev-pipeline/templates/feature-list-schema.json +5 -5
- package/bundled/dev-pipeline/templates/refactor-list-schema.json +107 -28
- package/bundled/dev-pipeline/tests/test_auto_skip.py +1 -1
- package/bundled/skills/_metadata.json +10 -2
- package/bundled/skills/app-planner/SKILL.md +14 -3
- package/bundled/skills/bug-fix-workflow/SKILL.md +2 -0
- package/bundled/skills/bug-planner/SKILL.md +59 -4
- package/bundled/skills/bugfix-pipeline-launcher/SKILL.md +9 -4
- package/bundled/skills/feature-planner/SKILL.md +73 -1
- package/bundled/skills/feature-planner/references/error-recovery.md +1 -1
- package/bundled/skills/feature-planner/scripts/validate-and-generate.py +7 -6
- package/bundled/skills/feature-workflow/SKILL.md +4 -1
- package/bundled/skills/prizmkit-committer/SKILL.md +1 -0
- package/bundled/skills/prizmkit-deploy/SKILL.md +1 -0
- package/bundled/skills/prizmkit-deploy/assets/deploy-template.md +1 -1
- package/bundled/skills/prizmkit-implement/SKILL.md +1 -1
- package/bundled/skills/prizmkit-implement/references/deploy-guide-protocol.md +4 -4
- package/bundled/skills/prizmkit-plan/SKILL.md +3 -3
- package/bundled/skills/prizmkit-retrospective/SKILL.md +40 -3
- package/bundled/skills/prizmkit-verify/SKILL.md +281 -0
- package/bundled/skills/prizmkit-verify/scripts/verify-light.py +402 -0
- package/bundled/skills/recovery-workflow/SKILL.md +1 -0
- package/bundled/skills/refactor-pipeline-launcher/SKILL.md +7 -3
- package/bundled/skills/refactor-planner/SKILL.md +51 -1
- package/bundled/skills/refactor-workflow/SKILL.md +4 -0
- package/package.json +1 -1
- package/src/scaffold.js +24 -12
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""PrizmKit Light Verification Script — static checks for framework integrity.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
python3 verify-light.py # Run all rounds
|
|
6
|
+
python3 verify-light.py --round R1 # Run specific round
|
|
7
|
+
python3 verify-light.py --round R1 R2 R4 # Run multiple rounds
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import argparse
|
|
11
|
+
import json
|
|
12
|
+
import os
|
|
13
|
+
import re
|
|
14
|
+
import subprocess
|
|
15
|
+
import sys
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
# --- Configuration ---
|
|
19
|
+
|
|
20
|
+
PROJECT_ROOT = Path(__file__).resolve().parents[5] # core/skills/prizmkit-skill/prizmkit-verify/scripts/verify-light.py → root (5 levels up)
|
|
21
|
+
|
|
22
|
+
SKILL_LAYER_DIR = PROJECT_ROOT / "core" / "skills" / "prizmkit-skill"
|
|
23
|
+
PIPELINE_LAYER_DIR = PROJECT_ROOT / "core" / "skills" / "orchestration-skill" / "pipelines"
|
|
24
|
+
WORKFLOW_LAYER_DIR = PROJECT_ROOT / "core" / "skills" / "orchestration-skill" / "workflows"
|
|
25
|
+
DEV_PIPELINE_DIR = PROJECT_ROOT / "dev-pipeline"
|
|
26
|
+
ALL_SKILLS_DIR = PROJECT_ROOT / "core" / "skills"
|
|
27
|
+
|
|
28
|
+
# Expected items per layer
|
|
29
|
+
EXPECTED_PIPELINE_SKILLS = {
|
|
30
|
+
"app-planner", "feature-planner", "bug-planner", "refactor-planner",
|
|
31
|
+
"feature-pipeline-launcher", "bugfix-pipeline-launcher", "refactor-pipeline-launcher",
|
|
32
|
+
}
|
|
33
|
+
EXPECTED_WORKFLOW_SKILLS = {
|
|
34
|
+
"feature-workflow", "bug-fix-workflow", "refactor-workflow", "recovery-workflow",
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
# --- Helpers ---
|
|
38
|
+
|
|
39
|
+
class Result:
|
|
40
|
+
def __init__(self):
|
|
41
|
+
self.passed = 0
|
|
42
|
+
self.warned = 0
|
|
43
|
+
self.failed = 0
|
|
44
|
+
self.details = []
|
|
45
|
+
|
|
46
|
+
def ok(self, msg):
|
|
47
|
+
self.passed += 1
|
|
48
|
+
self.details.append(("PASS", msg))
|
|
49
|
+
|
|
50
|
+
def warn(self, msg):
|
|
51
|
+
self.warned += 1
|
|
52
|
+
self.details.append(("WARN", msg))
|
|
53
|
+
|
|
54
|
+
def fail(self, msg):
|
|
55
|
+
self.failed += 1
|
|
56
|
+
self.details.append(("FAIL", msg))
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def status(self):
|
|
60
|
+
if self.failed > 0:
|
|
61
|
+
return "FAIL"
|
|
62
|
+
if self.warned > 0:
|
|
63
|
+
return "WARN"
|
|
64
|
+
return "PASS"
|
|
65
|
+
|
|
66
|
+
def summary(self):
|
|
67
|
+
return f"PASS={self.passed} WARN={self.warned} FAIL={self.failed}"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def parse_frontmatter(skill_md_path):
|
|
71
|
+
"""Extract YAML frontmatter from SKILL.md."""
|
|
72
|
+
text = skill_md_path.read_text(encoding="utf-8")
|
|
73
|
+
match = re.match(r"^---\n(.*?)\n---", text, re.DOTALL)
|
|
74
|
+
if not match:
|
|
75
|
+
return {}
|
|
76
|
+
fm = {}
|
|
77
|
+
for line in match.group(1).split("\n"):
|
|
78
|
+
if ":" in line:
|
|
79
|
+
key, val = line.split(":", 1)
|
|
80
|
+
fm[key.strip()] = val.strip().strip('"').strip("'")
|
|
81
|
+
return fm
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def find_skill_dirs(base_dir):
|
|
85
|
+
"""Find all directories containing SKILL.md under base_dir."""
|
|
86
|
+
return sorted(
|
|
87
|
+
d for d in base_dir.iterdir()
|
|
88
|
+
if d.is_dir() and (d / "SKILL.md").exists()
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def collect_all_skill_names():
|
|
93
|
+
"""Collect all known skill names across all categories."""
|
|
94
|
+
names = set()
|
|
95
|
+
for category_dir in ALL_SKILLS_DIR.iterdir():
|
|
96
|
+
if not category_dir.is_dir() or category_dir.name.startswith("."):
|
|
97
|
+
continue
|
|
98
|
+
# Direct skill dirs
|
|
99
|
+
for d in category_dir.iterdir():
|
|
100
|
+
if d.is_dir() and (d / "SKILL.md").exists():
|
|
101
|
+
names.add(d.name)
|
|
102
|
+
# Nested (pipelines/, workflows/)
|
|
103
|
+
if d.is_dir() and not (d / "SKILL.md").exists():
|
|
104
|
+
for sub in d.iterdir():
|
|
105
|
+
if sub.is_dir() and (sub / "SKILL.md").exists():
|
|
106
|
+
names.add(sub.name)
|
|
107
|
+
return names
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
# --- Round Checks ---
|
|
111
|
+
|
|
112
|
+
def run_r1(result: Result):
|
|
113
|
+
"""R1: L1 Skill Layer Light checks."""
|
|
114
|
+
print("\n=== R1: L1 Skill Layer ===")
|
|
115
|
+
skill_dirs = find_skill_dirs(SKILL_LAYER_DIR)
|
|
116
|
+
all_skill_names = collect_all_skill_names()
|
|
117
|
+
|
|
118
|
+
if not skill_dirs:
|
|
119
|
+
result.fail("No skill directories found in prizmkit-skill/")
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
print(f" Found {len(skill_dirs)} skills")
|
|
123
|
+
|
|
124
|
+
for skill_dir in skill_dirs:
|
|
125
|
+
skill_name = skill_dir.name
|
|
126
|
+
skill_md = skill_dir / "SKILL.md"
|
|
127
|
+
|
|
128
|
+
# 1. Frontmatter validation
|
|
129
|
+
fm = parse_frontmatter(skill_md)
|
|
130
|
+
if not fm.get("name"):
|
|
131
|
+
result.fail(f"[{skill_name}] Missing 'name' in frontmatter")
|
|
132
|
+
elif fm["name"] != skill_name:
|
|
133
|
+
result.fail(f"[{skill_name}] name '{fm['name']}' != dir '{skill_name}'")
|
|
134
|
+
else:
|
|
135
|
+
result.ok(f"[{skill_name}] Frontmatter name OK")
|
|
136
|
+
|
|
137
|
+
if not fm.get("description"):
|
|
138
|
+
result.fail(f"[{skill_name}] Missing 'description' in frontmatter")
|
|
139
|
+
else:
|
|
140
|
+
result.ok(f"[{skill_name}] Frontmatter description OK")
|
|
141
|
+
|
|
142
|
+
# 2. ${SKILL_DIR} reference resolution
|
|
143
|
+
content = skill_md.read_text(encoding="utf-8")
|
|
144
|
+
refs = re.findall(r'\$\{SKILL_DIR\}/([^\s`"\')]+)', content)
|
|
145
|
+
for ref in refs:
|
|
146
|
+
resolved = skill_dir / ref
|
|
147
|
+
if resolved.exists():
|
|
148
|
+
result.ok(f"[{skill_name}] Asset ref '{ref}' exists")
|
|
149
|
+
else:
|
|
150
|
+
result.fail(f"[{skill_name}] Asset ref '{ref}' NOT FOUND at {resolved}")
|
|
151
|
+
|
|
152
|
+
# 3. Cross-skill references
|
|
153
|
+
cross_refs = re.findall(r'(?<!\w)/prizmkit-[\w-]+(?=[\s`"\',\)])', content)
|
|
154
|
+
for ref in cross_refs:
|
|
155
|
+
ref_name = ref.lstrip("/")
|
|
156
|
+
if ref_name in all_skill_names:
|
|
157
|
+
result.ok(f"[{skill_name}] Cross-ref '{ref_name}' exists")
|
|
158
|
+
else:
|
|
159
|
+
result.warn(f"[{skill_name}] Cross-ref '{ref_name}' not found as skill dir")
|
|
160
|
+
|
|
161
|
+
# 4. ESLint
|
|
162
|
+
try:
|
|
163
|
+
lint_result = subprocess.run(
|
|
164
|
+
["npm", "run", "lint"], capture_output=True, text=True,
|
|
165
|
+
cwd=str(PROJECT_ROOT), timeout=30,
|
|
166
|
+
)
|
|
167
|
+
errors = len(re.findall(r"(\d+) error[^s]", lint_result.stdout + lint_result.stderr))
|
|
168
|
+
if lint_result.returncode == 0 or errors == 0:
|
|
169
|
+
result.ok("ESLint: 0 errors")
|
|
170
|
+
else:
|
|
171
|
+
result.fail(f"ESLint: {errors} errors found")
|
|
172
|
+
except (subprocess.TimeoutExpired, FileNotFoundError) as e:
|
|
173
|
+
result.warn(f"ESLint: could not run ({e})")
|
|
174
|
+
|
|
175
|
+
# 5. Ruff
|
|
176
|
+
try:
|
|
177
|
+
ruff_result = subprocess.run(
|
|
178
|
+
["npm", "run", "lint:py"], capture_output=True, text=True,
|
|
179
|
+
cwd=str(PROJECT_ROOT), timeout=30,
|
|
180
|
+
)
|
|
181
|
+
if ruff_result.returncode == 0:
|
|
182
|
+
result.ok("Ruff: all checks passed")
|
|
183
|
+
else:
|
|
184
|
+
result.fail(f"Ruff: {ruff_result.stdout.strip()}")
|
|
185
|
+
except (subprocess.TimeoutExpired, FileNotFoundError) as e:
|
|
186
|
+
result.warn(f"Ruff: could not run ({e})")
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def run_r2(result: Result):
|
|
190
|
+
"""R2: L2 Pipeline Layer Light checks."""
|
|
191
|
+
print("\n=== R2: L2 Pipeline Layer ===")
|
|
192
|
+
skill_dirs = find_skill_dirs(PIPELINE_LAYER_DIR)
|
|
193
|
+
found_names = {d.name for d in skill_dirs}
|
|
194
|
+
|
|
195
|
+
# Check expected skills present
|
|
196
|
+
for expected in EXPECTED_PIPELINE_SKILLS:
|
|
197
|
+
if expected in found_names:
|
|
198
|
+
result.ok(f"[{expected}] Skill exists")
|
|
199
|
+
else:
|
|
200
|
+
result.fail(f"[{expected}] Expected skill NOT FOUND")
|
|
201
|
+
|
|
202
|
+
# Frontmatter validation
|
|
203
|
+
for skill_dir in skill_dirs:
|
|
204
|
+
fm = parse_frontmatter(skill_dir / "SKILL.md")
|
|
205
|
+
if fm.get("name") == skill_dir.name and fm.get("description"):
|
|
206
|
+
result.ok(f"[{skill_dir.name}] Frontmatter OK")
|
|
207
|
+
else:
|
|
208
|
+
result.fail(f"[{skill_dir.name}] Frontmatter invalid: {fm}")
|
|
209
|
+
|
|
210
|
+
# Schema version consistency
|
|
211
|
+
scenarios = [
|
|
212
|
+
("feature", "feature-list-schema.json", "init-pipeline.py"),
|
|
213
|
+
("bugfix", "bug-fix-list-schema.json", "init-bugfix-pipeline.py"),
|
|
214
|
+
("refactor", "refactor-list-schema.json", "init-refactor-pipeline.py"),
|
|
215
|
+
]
|
|
216
|
+
for scenario, schema_file, init_script in scenarios:
|
|
217
|
+
schema_path = DEV_PIPELINE_DIR / "templates" / schema_file
|
|
218
|
+
init_path = DEV_PIPELINE_DIR / "scripts" / init_script
|
|
219
|
+
|
|
220
|
+
if not schema_path.exists():
|
|
221
|
+
result.fail(f"[{scenario}] Schema file missing: {schema_file}")
|
|
222
|
+
continue
|
|
223
|
+
if not init_path.exists():
|
|
224
|
+
result.fail(f"[{scenario}] Init script missing: {init_script}")
|
|
225
|
+
continue
|
|
226
|
+
|
|
227
|
+
# Extract schema const value
|
|
228
|
+
try:
|
|
229
|
+
schema_data = json.loads(schema_path.read_text())
|
|
230
|
+
schema_const = (
|
|
231
|
+
schema_data.get("properties", {})
|
|
232
|
+
.get("$schema", {})
|
|
233
|
+
.get("const", "NOT_FOUND")
|
|
234
|
+
)
|
|
235
|
+
except json.JSONDecodeError:
|
|
236
|
+
result.fail(f"[{scenario}] Schema file is not valid JSON")
|
|
237
|
+
continue
|
|
238
|
+
|
|
239
|
+
# Extract expected schema from init script
|
|
240
|
+
init_content = init_path.read_text()
|
|
241
|
+
match = re.search(r'EXPECTED_SCHEMA\s*=\s*["\']([^"\']+)', init_content)
|
|
242
|
+
init_expected = match.group(1) if match else "NOT_FOUND"
|
|
243
|
+
|
|
244
|
+
if schema_const == init_expected:
|
|
245
|
+
result.ok(f"[{scenario}] Schema version match: {schema_const}")
|
|
246
|
+
else:
|
|
247
|
+
result.fail(f"[{scenario}] Schema version MISMATCH: template={schema_const}, init={init_expected}")
|
|
248
|
+
|
|
249
|
+
# Launcher script path references
|
|
250
|
+
for scenario, script in [("feature", "run-feature.sh"), ("bugfix", "run-bugfix.sh"), ("refactor", "run-refactor.sh")]:
|
|
251
|
+
script_path = DEV_PIPELINE_DIR / script
|
|
252
|
+
if script_path.exists() and os.access(script_path, os.X_OK):
|
|
253
|
+
result.ok(f"[{scenario}] Script {script} exists and executable")
|
|
254
|
+
elif script_path.exists():
|
|
255
|
+
result.warn(f"[{scenario}] Script {script} exists but NOT executable")
|
|
256
|
+
else:
|
|
257
|
+
result.fail(f"[{scenario}] Script {script} NOT FOUND")
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def run_r3(result: Result):
|
|
261
|
+
"""R3: L3 Workflow Layer Light checks."""
|
|
262
|
+
print("\n=== R3: L3 Workflow Layer ===")
|
|
263
|
+
skill_dirs = find_skill_dirs(WORKFLOW_LAYER_DIR)
|
|
264
|
+
found_names = {d.name for d in skill_dirs}
|
|
265
|
+
pipeline_names = {d.name for d in find_skill_dirs(PIPELINE_LAYER_DIR)}
|
|
266
|
+
|
|
267
|
+
# Check expected skills
|
|
268
|
+
for expected in EXPECTED_WORKFLOW_SKILLS:
|
|
269
|
+
if expected in found_names:
|
|
270
|
+
result.ok(f"[{expected}] Workflow exists")
|
|
271
|
+
else:
|
|
272
|
+
result.fail(f"[{expected}] Expected workflow NOT FOUND")
|
|
273
|
+
|
|
274
|
+
for skill_dir in skill_dirs:
|
|
275
|
+
skill_name = skill_dir.name
|
|
276
|
+
skill_md = skill_dir / "SKILL.md"
|
|
277
|
+
|
|
278
|
+
# Frontmatter
|
|
279
|
+
fm = parse_frontmatter(skill_md)
|
|
280
|
+
if fm.get("name") == skill_name and fm.get("description"):
|
|
281
|
+
result.ok(f"[{skill_name}] Frontmatter OK")
|
|
282
|
+
else:
|
|
283
|
+
result.fail(f"[{skill_name}] Frontmatter invalid")
|
|
284
|
+
|
|
285
|
+
# Pipeline skill references
|
|
286
|
+
content = skill_md.read_text(encoding="utf-8")
|
|
287
|
+
for pipeline_ref in re.findall(r'`([\w-]+-planner|[\w-]+-pipeline-launcher)`', content):
|
|
288
|
+
if pipeline_ref in pipeline_names:
|
|
289
|
+
result.ok(f"[{skill_name}] Pipeline ref '{pipeline_ref}' exists")
|
|
290
|
+
else:
|
|
291
|
+
result.warn(f"[{skill_name}] Pipeline ref '{pipeline_ref}' not found")
|
|
292
|
+
|
|
293
|
+
# Checkpoint naming
|
|
294
|
+
checkpoints = re.findall(r'CP-\w+-\d+', content)
|
|
295
|
+
if checkpoints:
|
|
296
|
+
result.ok(f"[{skill_name}] {len(checkpoints)} checkpoints found (CP-*-N pattern)")
|
|
297
|
+
else:
|
|
298
|
+
result.warn(f"[{skill_name}] No checkpoint markers found")
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def run_r4(result: Result):
|
|
302
|
+
"""R4: L4 Script Layer Light checks."""
|
|
303
|
+
print("\n=== R4: L4 Script Layer ===")
|
|
304
|
+
|
|
305
|
+
# 1. Shell scripts executable
|
|
306
|
+
for sh_file in sorted(DEV_PIPELINE_DIR.glob("*.sh")):
|
|
307
|
+
if os.access(sh_file, os.X_OK):
|
|
308
|
+
result.ok(f"[{sh_file.name}] Executable")
|
|
309
|
+
else:
|
|
310
|
+
result.fail(f"[{sh_file.name}] NOT executable")
|
|
311
|
+
|
|
312
|
+
# 2. Python scripts compile
|
|
313
|
+
scripts_dir = DEV_PIPELINE_DIR / "scripts"
|
|
314
|
+
if scripts_dir.exists():
|
|
315
|
+
for py_file in sorted(scripts_dir.glob("*.py")):
|
|
316
|
+
try:
|
|
317
|
+
subprocess.run(
|
|
318
|
+
[sys.executable, "-m", "py_compile", str(py_file)],
|
|
319
|
+
capture_output=True, text=True, timeout=10,
|
|
320
|
+
)
|
|
321
|
+
result.ok(f"[{py_file.name}] Compiles OK")
|
|
322
|
+
except subprocess.TimeoutExpired:
|
|
323
|
+
result.warn(f"[{py_file.name}] Compile check timed out")
|
|
324
|
+
|
|
325
|
+
# 3. Bash lib sourcing
|
|
326
|
+
for run_script in ["run-feature.sh", "run-bugfix.sh", "run-refactor.sh"]:
|
|
327
|
+
script_path = DEV_PIPELINE_DIR / run_script
|
|
328
|
+
if not script_path.exists():
|
|
329
|
+
result.fail(f"[{run_script}] NOT FOUND")
|
|
330
|
+
continue
|
|
331
|
+
content = script_path.read_text(encoding="utf-8")
|
|
332
|
+
for lib in ["common.sh", "heartbeat.sh", "branch.sh"]:
|
|
333
|
+
if lib in content:
|
|
334
|
+
result.ok(f"[{run_script}] Sources {lib}")
|
|
335
|
+
else:
|
|
336
|
+
result.fail(f"[{run_script}] Does NOT source {lib}")
|
|
337
|
+
|
|
338
|
+
# 4. JSON templates valid
|
|
339
|
+
templates_dir = DEV_PIPELINE_DIR / "templates"
|
|
340
|
+
if templates_dir.exists():
|
|
341
|
+
for json_file in sorted(templates_dir.glob("*.json")):
|
|
342
|
+
try:
|
|
343
|
+
json.loads(json_file.read_text())
|
|
344
|
+
result.ok(f"[{json_file.name}] Valid JSON")
|
|
345
|
+
except json.JSONDecodeError as e:
|
|
346
|
+
result.fail(f"[{json_file.name}] Invalid JSON: {e}")
|
|
347
|
+
|
|
348
|
+
# 5. Test file existence
|
|
349
|
+
tests_dir = DEV_PIPELINE_DIR / "tests"
|
|
350
|
+
if tests_dir.exists():
|
|
351
|
+
test_files = list(tests_dir.glob("test_*.py"))
|
|
352
|
+
if test_files:
|
|
353
|
+
result.ok(f"Test files found: {len(test_files)}")
|
|
354
|
+
else:
|
|
355
|
+
result.warn("No test_*.py files found in dev-pipeline/tests/")
|
|
356
|
+
else:
|
|
357
|
+
result.warn("dev-pipeline/tests/ directory not found")
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
# --- Main ---
|
|
361
|
+
|
|
362
|
+
def main():
|
|
363
|
+
parser = argparse.ArgumentParser(description="PrizmKit Light Verification")
|
|
364
|
+
parser.add_argument("--round", nargs="*", default=["R1", "R2", "R3", "R4"],
|
|
365
|
+
help="Rounds to run (R1, R2, R3, R4)")
|
|
366
|
+
args = parser.parse_args()
|
|
367
|
+
|
|
368
|
+
rounds_to_run = [r.upper() for r in args.round]
|
|
369
|
+
result = Result()
|
|
370
|
+
|
|
371
|
+
round_map = {"R1": run_r1, "R2": run_r2, "R3": run_r3, "R4": run_r4}
|
|
372
|
+
|
|
373
|
+
for round_name in rounds_to_run:
|
|
374
|
+
if round_name in round_map:
|
|
375
|
+
round_map[round_name](result)
|
|
376
|
+
else:
|
|
377
|
+
print(f" Unknown round: {round_name}")
|
|
378
|
+
|
|
379
|
+
# Print results
|
|
380
|
+
print("\n" + "=" * 60)
|
|
381
|
+
print(f" RESULT: {result.status}")
|
|
382
|
+
print(f" {result.summary()}")
|
|
383
|
+
print("=" * 60)
|
|
384
|
+
|
|
385
|
+
if result.failed > 0:
|
|
386
|
+
print("\n FAILURES:")
|
|
387
|
+
for status, msg in result.details:
|
|
388
|
+
if status == "FAIL":
|
|
389
|
+
print(f" {msg}")
|
|
390
|
+
|
|
391
|
+
if result.warned > 0:
|
|
392
|
+
print("\n WARNINGS:")
|
|
393
|
+
for status, msg in result.details:
|
|
394
|
+
if status == "WARN":
|
|
395
|
+
print(f" {msg}")
|
|
396
|
+
|
|
397
|
+
print()
|
|
398
|
+
return 0 if result.failed == 0 else 1
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
if __name__ == "__main__":
|
|
402
|
+
sys.exit(main())
|
|
@@ -161,6 +161,7 @@ If the user declines, suggest alternatives:
|
|
|
161
161
|
1. **Read the workflow's SKILL.md** from `core/skills/orchestration-skill/workflows/{workflow-type}/SKILL.md`
|
|
162
162
|
2. **Read existing artifacts** to restore context — check in this order for the most efficient recovery:
|
|
163
163
|
- If `session-summary.md` exists in the artifact directory → read it first. It provides a lightweight summary of completed tasks, key decisions, active TRAPS, and remaining work from the interrupted session.
|
|
164
|
+
> **Note**: `session-summary.md` is generated by the pipeline's bootstrap prompt system (`generate-bootstrap-prompt.py`) at the end of each AI CLI session. It may not exist if the session crashed before completion or if running in interactive (non-pipeline) mode. If absent, reconstruct context from the artifacts below.
|
|
164
165
|
- Then read remaining artifacts: spec, plan, code diffs, bug descriptions, etc.
|
|
165
166
|
3. **Read relevant `.prizm-docs/`** — load project context (L0 root, relevant L1). If `session-summary.md` was found, use its "Files Changed" section to focus L1/L2 loading on affected modules only.
|
|
166
167
|
|
|
@@ -135,16 +135,20 @@ Detect user intent from their message, then follow the corresponding workflow:
|
|
|
135
135
|
|
|
136
136
|
Use `AskUserQuestion` to present the following configuration choices. Each question is a separate selectable option:
|
|
137
137
|
|
|
138
|
-
**Question 1 —
|
|
138
|
+
**Question 1 — Critic review** (multiSelect: false):
|
|
139
|
+
- Off (default) — Skip adversarial review
|
|
140
|
+
- On — Enable critic review after refactoring (+3-8 min/refactor for critical/high complexity)
|
|
141
|
+
|
|
142
|
+
**Question 2 — Verbose logging** (multiSelect: false):
|
|
139
143
|
- On (default) — Detailed AI session logs including tool calls and subagent activity
|
|
140
144
|
- Off — Minimal logging
|
|
141
145
|
|
|
142
|
-
**Question
|
|
146
|
+
**Question 3 — Max retries** (multiSelect: false):
|
|
143
147
|
- 3 (default)
|
|
144
148
|
- 1
|
|
145
149
|
- 5
|
|
146
150
|
|
|
147
|
-
**Question
|
|
151
|
+
**Question 4 — Session timeout** (multiSelect: false):
|
|
148
152
|
- None (default) — No timeout
|
|
149
153
|
- 30 min — `SESSION_TIMEOUT=1800`
|
|
150
154
|
- 1 hour — `SESSION_TIMEOUT=3600`
|
|
@@ -302,7 +302,31 @@ If issues found, discuss with user and resolve before proceeding.
|
|
|
302
302
|
- `type` field must be one of: `extract`, `rename`, `restructure`, `simplify`, `decouple`, `migrate`
|
|
303
303
|
- Descriptions minimum 15 words (error). Recommended: 30/50/80 words for low/medium/high complexity (warning).
|
|
304
304
|
- `model` field is optional — omitting it means the pipeline uses $MODEL env or CLI default
|
|
305
|
-
- `scope` array
|
|
305
|
+
- `scope` object with nested structure: `files` array (target file paths) and `modules` array (module names)
|
|
306
|
+
|
|
307
|
+
## Adversarial Critic Review
|
|
308
|
+
|
|
309
|
+
All refactoring items support optional critic review for additional quality assurance. The critic mechanism helps validate that refactoring preserves behavior while improving code quality.
|
|
310
|
+
|
|
311
|
+
### Default Critic Behavior
|
|
312
|
+
|
|
313
|
+
| Priority | Complexity | `critic` | `critic_count` | Rationale |
|
|
314
|
+
|----------|-----------|----------|----------------|-----------|
|
|
315
|
+
| critical | high | `true` | `3` | Multi-critic voting for high-risk refactors |
|
|
316
|
+
| critical | medium/low | `true` | `1` | Single critic for critical-priority refactors |
|
|
317
|
+
| high | high | `true` | `1` | Single critic for high-complexity refactors |
|
|
318
|
+
| high | medium/low | `false` | (omitted) | Skip critic for simpler high-priority items |
|
|
319
|
+
| medium | any | `false` | (omitted) | Skip critic for medium-priority refactors |
|
|
320
|
+
| low | any | `false` | (omitted) | Skip critic for low-priority refactors |
|
|
321
|
+
|
|
322
|
+
- `critic: true` — Enable adversarial review after refactoring completion
|
|
323
|
+
- `critic_count: 1` — Single critic agent reviews the refactor
|
|
324
|
+
- `critic_count: 3` — Three critic agents vote on the refactor quality
|
|
325
|
+
- Critic verifies: behavior preservation strategy followed, code quality improved, no regressions, tests passing
|
|
326
|
+
|
|
327
|
+
**User Override**: During planning phases, users can opt to enable/disable critic on a per-item basis.
|
|
328
|
+
|
|
329
|
+
---
|
|
306
330
|
|
|
307
331
|
## Fast Path
|
|
308
332
|
|
|
@@ -342,6 +366,32 @@ AI: [Validates immediately]
|
|
|
342
366
|
AI: "Ready to proceed to dev-pipeline."
|
|
343
367
|
```
|
|
344
368
|
|
|
369
|
+
## Browser Verification
|
|
370
|
+
|
|
371
|
+
**Browser verification is a feature-pipeline capability only.** Refactors use `behavior_preservation` strategy instead to ensure no external behavior changes:
|
|
372
|
+
|
|
373
|
+
- `strategy: test-gate` — Rely on existing test suite. Pipeline runs tests before and after refactoring.
|
|
374
|
+
- `strategy: snapshot` — Compare behavior before/after refactoring using executable snapshots (outputs, API responses, side effects)
|
|
375
|
+
- `strategy: manual` — Require human verification that behavior is preserved
|
|
376
|
+
|
|
377
|
+
For refactors that modify UI code (e.g., component restructuring), the test-gate or snapshot strategy ensures visual appearance is preserved. You can optionally note browser verification needs in your description or acceptance criteria:
|
|
378
|
+
|
|
379
|
+
Example:
|
|
380
|
+
```
|
|
381
|
+
Refactor Title: Extract UserProfile component from AccountSettings
|
|
382
|
+
Type: extract
|
|
383
|
+
Strategy: snapshot
|
|
384
|
+
Acceptance Criteria:
|
|
385
|
+
1. UserProfile component renders identically to inline version (compare snapshots)
|
|
386
|
+
2. All props are correctly forwarded (unit tests pass)
|
|
387
|
+
3. No visual regression (screenshot comparison)
|
|
388
|
+
4. Component is reusable in other views
|
|
389
|
+
```
|
|
390
|
+
|
|
391
|
+
The refactor pipeline AI will use the snapshot strategy to verify external behavior is preserved during refactoring.
|
|
392
|
+
|
|
393
|
+
---
|
|
394
|
+
|
|
345
395
|
## Refactoring-Specific Features
|
|
346
396
|
|
|
347
397
|
### Behavior Preservation Check
|
|
@@ -235,6 +235,8 @@ Present this summary to the user and get explicit confirmation before proceeding
|
|
|
235
235
|
- Pass the structured goals summary as input — NOT the raw user conversation
|
|
236
236
|
- For new refactoring: standard planning mode
|
|
237
237
|
- For existing projects with `--incremental`: incremental planning mode
|
|
238
|
+
- **Input**: Markdown goals summary (refactor targets, scope, behavior preservation strategy)
|
|
239
|
+
- **Output**: `refactor-list.json` (schema: `dev-pipeline-refactor-list-v1`) containing `project_name`, `refactors[]` with id (R-NNN), title, description, scope, type, priority, complexity, behavior_preservation, acceptance_criteria, dependencies, status
|
|
238
240
|
|
|
239
241
|
2. **Interactive planning** (if refactor-planner requires clarification):
|
|
240
242
|
- Because Phase 1 was thorough, refactor-planner should need minimal clarification
|
|
@@ -267,9 +269,11 @@ Present this summary to the user and get explicit confirmation before proceeding
|
|
|
267
269
|
```
|
|
268
270
|
|
|
269
271
|
2. **Invoke `refactor-pipeline-launcher` skill**:
|
|
272
|
+
- **Input**: Path to validated `refactor-list.json`
|
|
270
273
|
- The launcher handles all prerequisites checks
|
|
271
274
|
- The launcher presents execution mode choices to the user (foreground/background/manual)
|
|
272
275
|
- Do NOT duplicate execution mode selection here — let the launcher handle it
|
|
276
|
+
- **Output**: PID/status, log file path, execution mode selected
|
|
273
277
|
- Returns PID/status and log file location
|
|
274
278
|
|
|
275
279
|
3. **Verify launch success**:
|
package/package.json
CHANGED
package/src/scaffold.js
CHANGED
|
@@ -414,7 +414,17 @@ export async function installSettings(platform, projectRoot, options, dryRun) {
|
|
|
414
414
|
'Bash(jq *)',
|
|
415
415
|
];
|
|
416
416
|
if (options.pipeline) {
|
|
417
|
-
|
|
417
|
+
// 动态扫描 pipeline 目录中的 .sh 入口脚本(run-*、launch-*),自动添加执行权限
|
|
418
|
+
const pipelineDir = getPipelineDir();
|
|
419
|
+
try {
|
|
420
|
+
const pipelineEntries = await fs.readdir(pipelineDir);
|
|
421
|
+
const entryScripts = pipelineEntries.filter(f => /^(run-|launch-|retry-|reset-).*\.sh$/.test(f));
|
|
422
|
+
for (const script of entryScripts) {
|
|
423
|
+
permissions.push(`Bash(./dev-pipeline/${script} *)`);
|
|
424
|
+
}
|
|
425
|
+
} catch {
|
|
426
|
+
// Fallback: if readdir fails, add no pipeline permissions
|
|
427
|
+
}
|
|
418
428
|
}
|
|
419
429
|
|
|
420
430
|
const settings = {
|
|
@@ -600,11 +610,15 @@ export function resolvePipelineFileList() {
|
|
|
600
610
|
const pipelineSource = getPipelineDir();
|
|
601
611
|
if (!pipelineSource || !fs.pathExistsSync(pipelineSource)) return [];
|
|
602
612
|
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
613
|
+
// 动态扫描,与 installPipeline() 保持一致的排除规则
|
|
614
|
+
const EXCLUDE = new Set(['tests', 'docs', '__pycache__', 'node_modules', '.DS_Store']);
|
|
615
|
+
let allEntries;
|
|
616
|
+
try {
|
|
617
|
+
allEntries = fs.readdirSync(pipelineSource);
|
|
618
|
+
} catch {
|
|
619
|
+
return [];
|
|
620
|
+
}
|
|
621
|
+
const items = allEntries.filter(name => !EXCLUDE.has(name) && !name.startsWith('.'));
|
|
608
622
|
|
|
609
623
|
const files = [];
|
|
610
624
|
for (const item of items) {
|
|
@@ -636,12 +650,10 @@ export async function installPipeline(projectRoot, dryRun, { forceOverwrite = fa
|
|
|
636
650
|
await fs.ensureDir(path.join(pipelineTarget, 'state'));
|
|
637
651
|
await fs.ensureDir(path.join(pipelineTarget, 'bugfix-state'));
|
|
638
652
|
|
|
639
|
-
//
|
|
640
|
-
const
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
'lib', 'scripts', 'templates', 'assets', 'README.md', '.gitignore',
|
|
644
|
-
];
|
|
653
|
+
// 动态扫描 bundled dev-pipeline 目录,排除不应安装到用户项目的内容
|
|
654
|
+
const EXCLUDE = new Set(['tests', 'docs', '__pycache__', 'node_modules', '.DS_Store']);
|
|
655
|
+
const allEntries = await fs.readdir(pipelineSource);
|
|
656
|
+
const items = allEntries.filter(name => !EXCLUDE.has(name) && !name.startsWith('.'));
|
|
645
657
|
|
|
646
658
|
let installedCount = 0;
|
|
647
659
|
const installedFiles = [];
|