prizmkit 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundled/VERSION.json +5 -0
- package/bundled/adapters/claude/agent-adapter.js +108 -0
- package/bundled/adapters/claude/command-adapter.js +104 -0
- package/bundled/adapters/claude/paths.js +35 -0
- package/bundled/adapters/claude/rules-adapter.js +77 -0
- package/bundled/adapters/claude/settings-adapter.js +73 -0
- package/bundled/adapters/claude/team-adapter.js +183 -0
- package/bundled/adapters/codebuddy/agent-adapter.js +43 -0
- package/bundled/adapters/codebuddy/paths.js +29 -0
- package/bundled/adapters/codebuddy/settings-adapter.js +47 -0
- package/bundled/adapters/codebuddy/skill-adapter.js +68 -0
- package/bundled/adapters/codebuddy/team-adapter.js +46 -0
- package/bundled/adapters/shared/frontmatter.js +77 -0
- package/bundled/agents/prizm-dev-team-coordinator.md +142 -0
- package/bundled/agents/prizm-dev-team-dev.md +99 -0
- package/bundled/agents/prizm-dev-team-pm.md +114 -0
- package/bundled/agents/prizm-dev-team-reviewer.md +119 -0
- package/bundled/dev-pipeline/README.md +482 -0
- package/bundled/dev-pipeline/assets/feature-list-example.json +147 -0
- package/bundled/dev-pipeline/assets/prizm-dev-team-integration.md +138 -0
- package/bundled/dev-pipeline/launch-bugfix-daemon.sh +425 -0
- package/bundled/dev-pipeline/launch-daemon.sh +549 -0
- package/bundled/dev-pipeline/reset-feature.sh +209 -0
- package/bundled/dev-pipeline/retry-bug.sh +344 -0
- package/bundled/dev-pipeline/retry-feature.sh +338 -0
- package/bundled/dev-pipeline/run-bugfix.sh +638 -0
- package/bundled/dev-pipeline/run.sh +845 -0
- package/bundled/dev-pipeline/scripts/check-session-status.py +158 -0
- package/bundled/dev-pipeline/scripts/detect-stuck.py +385 -0
- package/bundled/dev-pipeline/scripts/generate-bootstrap-prompt.py +598 -0
- package/bundled/dev-pipeline/scripts/generate-bugfix-prompt.py +402 -0
- package/bundled/dev-pipeline/scripts/init-bugfix-pipeline.py +294 -0
- package/bundled/dev-pipeline/scripts/init-dev-team.py +134 -0
- package/bundled/dev-pipeline/scripts/init-pipeline.py +335 -0
- package/bundled/dev-pipeline/scripts/update-bug-status.py +748 -0
- package/bundled/dev-pipeline/scripts/update-feature-status.py +1076 -0
- package/bundled/dev-pipeline/templates/bootstrap-prompt.md +262 -0
- package/bundled/dev-pipeline/templates/bug-fix-list-schema.json +159 -0
- package/bundled/dev-pipeline/templates/bugfix-bootstrap-prompt.md +291 -0
- package/bundled/dev-pipeline/templates/feature-list-schema.json +112 -0
- package/bundled/dev-pipeline/templates/session-status-schema.json +77 -0
- package/bundled/skills/_metadata.json +267 -0
- package/bundled/skills/app-planner/SKILL.md +580 -0
- package/bundled/skills/app-planner/assets/planning-guide.md +313 -0
- package/bundled/skills/app-planner/scripts/validate-and-generate.py +758 -0
- package/bundled/skills/bug-planner/SKILL.md +235 -0
- package/bundled/skills/bugfix-pipeline-launcher/SKILL.md +252 -0
- package/bundled/skills/dev-pipeline-launcher/SKILL.md +223 -0
- package/bundled/skills/prizm-kit/SKILL.md +151 -0
- package/bundled/skills/prizm-kit/assets/claude-md-template.md +38 -0
- package/bundled/skills/prizm-kit/assets/codebuddy-md-template.md +35 -0
- package/bundled/skills/prizm-kit/assets/hooks/prizm-commit-hook.json +15 -0
- package/bundled/skills/prizmkit-adr-manager/SKILL.md +68 -0
- package/bundled/skills/prizmkit-adr-manager/assets/adr-template.md +26 -0
- package/bundled/skills/prizmkit-analyze/SKILL.md +194 -0
- package/bundled/skills/prizmkit-api-doc-generator/SKILL.md +56 -0
- package/bundled/skills/prizmkit-bug-fix-workflow/SKILL.md +351 -0
- package/bundled/skills/prizmkit-bug-reproducer/SKILL.md +62 -0
- package/bundled/skills/prizmkit-ci-cd-generator/SKILL.md +54 -0
- package/bundled/skills/prizmkit-clarify/SKILL.md +52 -0
- package/bundled/skills/prizmkit-code-review/SKILL.md +70 -0
- package/bundled/skills/prizmkit-committer/SKILL.md +117 -0
- package/bundled/skills/prizmkit-db-migration/SKILL.md +65 -0
- package/bundled/skills/prizmkit-dependency-health/SKILL.md +123 -0
- package/bundled/skills/prizmkit-deployment-strategy/SKILL.md +58 -0
- package/bundled/skills/prizmkit-error-triage/SKILL.md +55 -0
- package/bundled/skills/prizmkit-implement/SKILL.md +47 -0
- package/bundled/skills/prizmkit-init/SKILL.md +156 -0
- package/bundled/skills/prizmkit-log-analyzer/SKILL.md +55 -0
- package/bundled/skills/prizmkit-monitoring-setup/SKILL.md +75 -0
- package/bundled/skills/prizmkit-onboarding-generator/SKILL.md +70 -0
- package/bundled/skills/prizmkit-perf-profiler/SKILL.md +55 -0
- package/bundled/skills/prizmkit-plan/SKILL.md +54 -0
- package/bundled/skills/prizmkit-plan/assets/plan-template.md +37 -0
- package/bundled/skills/prizmkit-prizm-docs/SKILL.md +140 -0
- package/bundled/skills/prizmkit-prizm-docs/assets/PRIZM-SPEC.md +943 -0
- package/bundled/skills/prizmkit-retrospective/SKILL.md +79 -0
- package/bundled/skills/prizmkit-security-audit/SKILL.md +130 -0
- package/bundled/skills/prizmkit-specify/SKILL.md +52 -0
- package/bundled/skills/prizmkit-specify/assets/spec-template.md +37 -0
- package/bundled/skills/prizmkit-summarize/SKILL.md +51 -0
- package/bundled/skills/prizmkit-summarize/assets/registry-template.md +18 -0
- package/bundled/skills/prizmkit-tasks/SKILL.md +50 -0
- package/bundled/skills/prizmkit-tasks/assets/tasks-template.md +21 -0
- package/bundled/skills/prizmkit-tech-debt-tracker/SKILL.md +139 -0
- package/bundled/team/prizm-dev-team.json +47 -0
- package/bundled/templates/claude-md-template.md +38 -0
- package/bundled/templates/codebuddy-md-template.md +35 -0
- package/package.json +2 -1
- package/src/scaffold.js +1 -1
|
@@ -0,0 +1,598 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Generate a session-specific bootstrap prompt from template and feature list.
|
|
3
|
+
|
|
4
|
+
Reads a bootstrap-prompt.md template and a feature-list.json, resolves all
|
|
5
|
+
{{PLACEHOLDER}} variables, handles conditional blocks, and writes the rendered
|
|
6
|
+
prompt to the specified output path.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python3 generate-bootstrap-prompt.py \
|
|
10
|
+
--feature-list <path> --feature-id <id> \
|
|
11
|
+
--session-id <id> --run-id <id> \
|
|
12
|
+
--retry-count <n> --resume-phase <n|null> \
|
|
13
|
+
--output <path>
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import argparse
|
|
17
|
+
import json
|
|
18
|
+
import os
|
|
19
|
+
import re
|
|
20
|
+
import sys
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
DEFAULT_MAX_RETRIES = 3
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def parse_args():
|
|
27
|
+
parser = argparse.ArgumentParser(
|
|
28
|
+
description=(
|
|
29
|
+
"Generate a session-specific bootstrap prompt from a template "
|
|
30
|
+
"and feature-list.json."
|
|
31
|
+
)
|
|
32
|
+
)
|
|
33
|
+
parser.add_argument(
|
|
34
|
+
"--feature-list",
|
|
35
|
+
required=True,
|
|
36
|
+
help="Path to feature-list.json",
|
|
37
|
+
)
|
|
38
|
+
parser.add_argument(
|
|
39
|
+
"--feature-id",
|
|
40
|
+
required=True,
|
|
41
|
+
help="Feature ID to generate prompt for (e.g. F-001)",
|
|
42
|
+
)
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"--session-id",
|
|
45
|
+
required=True,
|
|
46
|
+
help="Session ID for this pipeline session",
|
|
47
|
+
)
|
|
48
|
+
parser.add_argument(
|
|
49
|
+
"--run-id",
|
|
50
|
+
required=True,
|
|
51
|
+
help="Pipeline run ID",
|
|
52
|
+
)
|
|
53
|
+
parser.add_argument(
|
|
54
|
+
"--retry-count",
|
|
55
|
+
required=True,
|
|
56
|
+
help="Current retry count",
|
|
57
|
+
)
|
|
58
|
+
parser.add_argument(
|
|
59
|
+
"--resume-phase",
|
|
60
|
+
required=True,
|
|
61
|
+
help='Phase to resume from, or "null" for fresh start',
|
|
62
|
+
)
|
|
63
|
+
parser.add_argument(
|
|
64
|
+
"--state-dir",
|
|
65
|
+
default=None,
|
|
66
|
+
help="State directory path for reading previous session info",
|
|
67
|
+
)
|
|
68
|
+
parser.add_argument(
|
|
69
|
+
"--output",
|
|
70
|
+
required=True,
|
|
71
|
+
help="Output path for the rendered prompt",
|
|
72
|
+
)
|
|
73
|
+
parser.add_argument(
|
|
74
|
+
"--template",
|
|
75
|
+
default=None,
|
|
76
|
+
help=(
|
|
77
|
+
"Custom template path. Defaults to "
|
|
78
|
+
"{script_dir}/../templates/bootstrap-prompt.md"
|
|
79
|
+
),
|
|
80
|
+
)
|
|
81
|
+
parser.add_argument(
|
|
82
|
+
"--mode",
|
|
83
|
+
choices=["lite", "standard", "full"],
|
|
84
|
+
default=None,
|
|
85
|
+
help="Override pipeline mode (default: auto-detect from complexity)",
|
|
86
|
+
)
|
|
87
|
+
return parser.parse_args()
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def load_json_file(path):
|
|
91
|
+
"""Load and return parsed JSON from a file."""
|
|
92
|
+
abs_path = os.path.abspath(path)
|
|
93
|
+
if not os.path.isfile(abs_path):
|
|
94
|
+
return None, "File not found: {}".format(abs_path)
|
|
95
|
+
try:
|
|
96
|
+
with open(abs_path, "r", encoding="utf-8") as f:
|
|
97
|
+
data = json.load(f)
|
|
98
|
+
except json.JSONDecodeError as e:
|
|
99
|
+
return None, "Invalid JSON: {}".format(str(e))
|
|
100
|
+
except IOError as e:
|
|
101
|
+
return None, "Cannot read file: {}".format(str(e))
|
|
102
|
+
return data, None
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def read_text_file(path):
|
|
106
|
+
"""Read and return the text content of a file."""
|
|
107
|
+
abs_path = os.path.abspath(path)
|
|
108
|
+
if not os.path.isfile(abs_path):
|
|
109
|
+
return None, "File not found: {}".format(abs_path)
|
|
110
|
+
try:
|
|
111
|
+
with open(abs_path, "r", encoding="utf-8") as f:
|
|
112
|
+
return f.read(), None
|
|
113
|
+
except IOError as e:
|
|
114
|
+
return None, "Cannot read file: {}".format(str(e))
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def find_feature(features, feature_id):
|
|
118
|
+
"""Find and return the feature dict matching the given ID."""
|
|
119
|
+
for feature in features:
|
|
120
|
+
if isinstance(feature, dict) and feature.get("id") == feature_id:
|
|
121
|
+
return feature
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def compute_feature_slug(feature_id, title):
|
|
126
|
+
"""Compute the prizmkit feature slug: ###-kebab-case-name.
|
|
127
|
+
|
|
128
|
+
e.g. F-001 + "Project Infrastructure Setup" -> "001-project-infrastructure-setup"
|
|
129
|
+
The prizmkit skills use this slug to create per-feature directories.
|
|
130
|
+
"""
|
|
131
|
+
# Extract numeric part from feature_id (e.g., "F-001" -> "001")
|
|
132
|
+
numeric = feature_id.replace("F-", "").replace("f-", "")
|
|
133
|
+
# Pad to 3 digits
|
|
134
|
+
numeric = numeric.zfill(3)
|
|
135
|
+
|
|
136
|
+
# Convert title to kebab-case
|
|
137
|
+
slug = title.lower()
|
|
138
|
+
slug = re.sub(r"[^a-z0-9\s-]", "", slug) # remove non-alphanumeric
|
|
139
|
+
slug = re.sub(r"[\s]+", "-", slug.strip()) # spaces to hyphens
|
|
140
|
+
slug = re.sub(r"-+", "-", slug) # collapse multiple hyphens
|
|
141
|
+
slug = slug.strip("-")
|
|
142
|
+
|
|
143
|
+
return "{}-{}".format(numeric, slug)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def format_acceptance_criteria(criteria):
|
|
147
|
+
"""Format acceptance criteria as a markdown bullet list."""
|
|
148
|
+
if not criteria:
|
|
149
|
+
return "- (none specified)"
|
|
150
|
+
lines = []
|
|
151
|
+
for item in criteria:
|
|
152
|
+
lines.append("- {}".format(item))
|
|
153
|
+
return "\n".join(lines)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def format_global_context(global_context):
|
|
157
|
+
"""Format global_context dict as a key-value list."""
|
|
158
|
+
if not global_context:
|
|
159
|
+
return "- (none specified)"
|
|
160
|
+
lines = []
|
|
161
|
+
for key, value in sorted(global_context.items()):
|
|
162
|
+
lines.append("- **{}**: {}".format(key, value))
|
|
163
|
+
return "\n".join(lines)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def get_completed_dependencies(features, feature):
|
|
167
|
+
"""Look up dependency features and list those with status=completed."""
|
|
168
|
+
deps = feature.get("dependencies", [])
|
|
169
|
+
if not deps:
|
|
170
|
+
return "- (no dependencies)"
|
|
171
|
+
|
|
172
|
+
# Build a lookup map
|
|
173
|
+
feature_map = {}
|
|
174
|
+
for f in features:
|
|
175
|
+
if isinstance(f, dict) and "id" in f:
|
|
176
|
+
feature_map[f["id"]] = f
|
|
177
|
+
|
|
178
|
+
completed = []
|
|
179
|
+
for dep_id in deps:
|
|
180
|
+
dep = feature_map.get(dep_id)
|
|
181
|
+
if dep and dep.get("status") == "completed":
|
|
182
|
+
completed.append("- {} - {} (completed)".format(
|
|
183
|
+
dep_id, dep.get("title", "Untitled")
|
|
184
|
+
))
|
|
185
|
+
|
|
186
|
+
if not completed:
|
|
187
|
+
return "- (no completed dependencies yet)"
|
|
188
|
+
return "\n".join(completed)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def get_prev_session_status(state_dir, feature_id):
|
|
192
|
+
"""Read previous session status from state dir if available."""
|
|
193
|
+
if not state_dir:
|
|
194
|
+
return "N/A (first run)"
|
|
195
|
+
|
|
196
|
+
# Try to read the feature status file to find the last session
|
|
197
|
+
feature_status_path = os.path.join(
|
|
198
|
+
state_dir, "features", feature_id, "status.json"
|
|
199
|
+
)
|
|
200
|
+
if not os.path.isfile(feature_status_path):
|
|
201
|
+
return "N/A (first run)"
|
|
202
|
+
|
|
203
|
+
try:
|
|
204
|
+
with open(feature_status_path, "r", encoding="utf-8") as f:
|
|
205
|
+
feature_status = json.load(f)
|
|
206
|
+
except (json.JSONDecodeError, IOError):
|
|
207
|
+
return "N/A (could not read feature status)"
|
|
208
|
+
|
|
209
|
+
last_session_id = feature_status.get("last_session_id")
|
|
210
|
+
if not last_session_id:
|
|
211
|
+
return "N/A (first run)"
|
|
212
|
+
|
|
213
|
+
# Try to read the last session's session-status.json
|
|
214
|
+
session_status_path = os.path.join(
|
|
215
|
+
state_dir, "features", feature_id, "sessions",
|
|
216
|
+
last_session_id, "session-status.json"
|
|
217
|
+
)
|
|
218
|
+
if not os.path.isfile(session_status_path):
|
|
219
|
+
return "N/A (previous session status file not found)"
|
|
220
|
+
|
|
221
|
+
try:
|
|
222
|
+
with open(session_status_path, "r", encoding="utf-8") as f:
|
|
223
|
+
session_data = json.load(f)
|
|
224
|
+
except (json.JSONDecodeError, IOError):
|
|
225
|
+
return "N/A (could not read previous session status)"
|
|
226
|
+
|
|
227
|
+
status = session_data.get("status", "unknown")
|
|
228
|
+
checkpoint = session_data.get("checkpoint_reached", "none")
|
|
229
|
+
current_phase = session_data.get("current_phase", "unknown")
|
|
230
|
+
errors = session_data.get("errors", [])
|
|
231
|
+
|
|
232
|
+
result = "{} (checkpoint: {}, last phase: {})".format(
|
|
233
|
+
status, checkpoint, current_phase
|
|
234
|
+
)
|
|
235
|
+
if errors:
|
|
236
|
+
result += " — errors: {}".format("; ".join(str(e) for e in errors))
|
|
237
|
+
return result
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def resolve_project_root(script_dir):
|
|
241
|
+
"""Resolve project root as the parent directory of dev-pipeline/.
|
|
242
|
+
|
|
243
|
+
The script lives at dev-pipeline/scripts/, so project root is two
|
|
244
|
+
levels up from the script directory.
|
|
245
|
+
"""
|
|
246
|
+
# script_dir = .../dev-pipeline/scripts
|
|
247
|
+
# dev_pipeline_dir = .../dev-pipeline
|
|
248
|
+
# project_root = .../
|
|
249
|
+
dev_pipeline_dir = os.path.dirname(script_dir)
|
|
250
|
+
project_root = os.path.dirname(dev_pipeline_dir)
|
|
251
|
+
return os.path.abspath(project_root)
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def process_conditional_blocks(content, resume_phase):
|
|
255
|
+
"""Handle conditional blocks based on resume_phase and pipeline mode.
|
|
256
|
+
|
|
257
|
+
Supports:
|
|
258
|
+
- {{IF_FRESH_START}} / {{END_IF_FRESH_START}}
|
|
259
|
+
- {{IF_RESUME}} / {{END_IF_RESUME}}
|
|
260
|
+
- {{IF_INIT_NEEDED}} / {{END_IF_INIT_NEEDED}}
|
|
261
|
+
- {{IF_INIT_DONE}} / {{END_IF_INIT_DONE}}
|
|
262
|
+
- {{IF_MODE_LITE}} / {{END_IF_MODE_LITE}}
|
|
263
|
+
- {{IF_MODE_STANDARD}} / {{END_IF_MODE_STANDARD}}
|
|
264
|
+
- {{IF_MODE_FULL}} / {{END_IF_MODE_FULL}}
|
|
265
|
+
"""
|
|
266
|
+
is_resume = resume_phase != "null"
|
|
267
|
+
|
|
268
|
+
if is_resume:
|
|
269
|
+
content = re.sub(r"\{\{IF_RESUME\}\}\n?", "", content)
|
|
270
|
+
content = re.sub(r"\{\{END_IF_RESUME\}\}\n?", "", content)
|
|
271
|
+
content = re.sub(
|
|
272
|
+
r"\{\{IF_FRESH_START\}\}.*?\{\{END_IF_FRESH_START\}\}\n?",
|
|
273
|
+
"", content, flags=re.DOTALL,
|
|
274
|
+
)
|
|
275
|
+
else:
|
|
276
|
+
content = re.sub(r"\{\{IF_FRESH_START\}\}\n?", "", content)
|
|
277
|
+
content = re.sub(r"\{\{END_IF_FRESH_START\}\}\n?", "", content)
|
|
278
|
+
content = re.sub(
|
|
279
|
+
r"\{\{IF_RESUME\}\}.*?\{\{END_IF_RESUME\}\}\n?",
|
|
280
|
+
"", content, flags=re.DOTALL,
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
return content
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def process_mode_blocks(content, pipeline_mode, init_done):
|
|
287
|
+
"""Process pipeline mode and init conditional blocks.
|
|
288
|
+
|
|
289
|
+
Keeps the block matching the current mode, removes the others.
|
|
290
|
+
"""
|
|
291
|
+
modes = ["lite", "standard", "full"]
|
|
292
|
+
|
|
293
|
+
for mode in modes:
|
|
294
|
+
tag_open = "{{{{IF_MODE_{}}}}}".format(mode.upper())
|
|
295
|
+
tag_close = "{{{{END_IF_MODE_{}}}}}".format(mode.upper())
|
|
296
|
+
|
|
297
|
+
if mode == pipeline_mode:
|
|
298
|
+
# Keep content, remove tags
|
|
299
|
+
content = content.replace(tag_open + "\n", "")
|
|
300
|
+
content = content.replace(tag_open, "")
|
|
301
|
+
content = content.replace(tag_close + "\n", "")
|
|
302
|
+
content = content.replace(tag_close, "")
|
|
303
|
+
else:
|
|
304
|
+
# Remove entire block
|
|
305
|
+
pattern = re.escape(tag_open) + r".*?" + re.escape(tag_close) + r"\n?"
|
|
306
|
+
content = re.sub(pattern, "", content, flags=re.DOTALL)
|
|
307
|
+
|
|
308
|
+
# Init blocks
|
|
309
|
+
if init_done:
|
|
310
|
+
content = re.sub(r"\{\{IF_INIT_DONE\}\}\n?", "", content)
|
|
311
|
+
content = re.sub(r"\{\{END_IF_INIT_DONE\}\}\n?", "", content)
|
|
312
|
+
content = re.sub(
|
|
313
|
+
r"\{\{IF_INIT_NEEDED\}\}.*?\{\{END_IF_INIT_NEEDED\}\}\n?",
|
|
314
|
+
"", content, flags=re.DOTALL,
|
|
315
|
+
)
|
|
316
|
+
else:
|
|
317
|
+
content = re.sub(r"\{\{IF_INIT_NEEDED\}\}\n?", "", content)
|
|
318
|
+
content = re.sub(r"\{\{END_IF_INIT_NEEDED\}\}\n?", "", content)
|
|
319
|
+
content = re.sub(
|
|
320
|
+
r"\{\{IF_INIT_DONE\}\}.*?\{\{END_IF_INIT_DONE\}\}\n?",
|
|
321
|
+
"", content, flags=re.DOTALL,
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
return content
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def detect_init_status(project_root):
|
|
328
|
+
"""Check if PrizmKit init has already been done."""
|
|
329
|
+
prizm_docs = os.path.join(project_root, ".prizm-docs", "root.prizm")
|
|
330
|
+
prizmkit_config = os.path.join(project_root, ".prizmkit", "config.json")
|
|
331
|
+
return os.path.isfile(prizm_docs) and os.path.isfile(prizmkit_config)
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def detect_existing_artifacts(project_root, feature_slug):
|
|
335
|
+
"""Check which planning artifacts already exist for this feature.
|
|
336
|
+
|
|
337
|
+
Returns a dict with keys: has_spec, has_plan, has_tasks, all_complete.
|
|
338
|
+
"""
|
|
339
|
+
specs_dir = os.path.join(project_root, ".prizmkit", "specs", feature_slug)
|
|
340
|
+
result = {
|
|
341
|
+
"has_spec": os.path.isfile(os.path.join(specs_dir, "spec.md")),
|
|
342
|
+
"has_plan": os.path.isfile(os.path.join(specs_dir, "plan.md")),
|
|
343
|
+
"has_tasks": os.path.isfile(os.path.join(specs_dir, "tasks.md")),
|
|
344
|
+
}
|
|
345
|
+
result["all_complete"] = all([
|
|
346
|
+
result["has_spec"], result["has_plan"], result["has_tasks"]
|
|
347
|
+
])
|
|
348
|
+
return result
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
def determine_pipeline_mode(complexity):
|
|
352
|
+
"""Map estimated_complexity to pipeline mode.
|
|
353
|
+
|
|
354
|
+
Returns: 'lite', 'standard', or 'full'
|
|
355
|
+
"""
|
|
356
|
+
mapping = {
|
|
357
|
+
"low": "lite",
|
|
358
|
+
"medium": "standard",
|
|
359
|
+
"high": "full",
|
|
360
|
+
"critical": "full",
|
|
361
|
+
}
|
|
362
|
+
return mapping.get(complexity, "standard")
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def build_replacements(args, feature, features, global_context, script_dir):
|
|
366
|
+
"""Build the full dict of placeholder -> replacement value."""
|
|
367
|
+
project_root = resolve_project_root(script_dir)
|
|
368
|
+
|
|
369
|
+
# Resolve paths - platform-aware agent/team resolution
|
|
370
|
+
platform = os.environ.get("PRIZMKIT_PLATFORM", "")
|
|
371
|
+
home_dir = os.path.expanduser("~")
|
|
372
|
+
|
|
373
|
+
# Auto-detect platform if not set
|
|
374
|
+
if not platform:
|
|
375
|
+
if os.path.isdir(os.path.join(project_root, ".claude", "agents")):
|
|
376
|
+
platform = "claude"
|
|
377
|
+
else:
|
|
378
|
+
platform = "codebuddy"
|
|
379
|
+
|
|
380
|
+
if platform == "claude":
|
|
381
|
+
# Claude Code: agents in .claude/agents/, no native team config
|
|
382
|
+
agents_dir = os.path.join(project_root, ".claude", "agents")
|
|
383
|
+
team_config_path = os.path.join(
|
|
384
|
+
project_root, ".claude", "team-info.json",
|
|
385
|
+
)
|
|
386
|
+
else:
|
|
387
|
+
# CodeBuddy: agents in .codebuddy/agents/, team in ~/.codebuddy/teams/
|
|
388
|
+
agents_dir = os.path.join(project_root, ".codebuddy", "agents")
|
|
389
|
+
team_config_path = os.path.join(
|
|
390
|
+
home_dir, ".codebuddy", "teams", "prizm-dev-team", "config.json",
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
# Agent definitions are .md files in the platform-specific agents dir
|
|
394
|
+
coordinator_subagent = os.path.join(
|
|
395
|
+
agents_dir, "prizm-dev-team-coordinator.md",
|
|
396
|
+
)
|
|
397
|
+
pm_subagent = os.path.join(
|
|
398
|
+
agents_dir, "prizm-dev-team-pm.md",
|
|
399
|
+
)
|
|
400
|
+
dev_subagent = os.path.join(
|
|
401
|
+
agents_dir, "prizm-dev-team-dev.md",
|
|
402
|
+
)
|
|
403
|
+
reviewer_subagent = os.path.join(
|
|
404
|
+
agents_dir, "prizm-dev-team-reviewer.md",
|
|
405
|
+
)
|
|
406
|
+
# Validator scripts - check if they exist in .codebuddy/scripts/, otherwise use dev-pipeline/scripts/
|
|
407
|
+
validator_scripts_dir = os.path.join(project_root, "dev-pipeline", "scripts")
|
|
408
|
+
init_script_path = os.path.join(validator_scripts_dir, "init-dev-team.py")
|
|
409
|
+
|
|
410
|
+
# Session status path (relative to dev-pipeline/)
|
|
411
|
+
session_status_path = os.path.join(
|
|
412
|
+
"dev-pipeline", "state", "features", args.feature_id,
|
|
413
|
+
"sessions", args.session_id, "session-status.json",
|
|
414
|
+
)
|
|
415
|
+
# Make it relative from project root
|
|
416
|
+
session_status_abs = os.path.join(project_root, session_status_path)
|
|
417
|
+
|
|
418
|
+
prev_status = get_prev_session_status(args.state_dir, args.feature_id)
|
|
419
|
+
|
|
420
|
+
# Compute feature slug for per-feature directory naming
|
|
421
|
+
feature_slug = compute_feature_slug(
|
|
422
|
+
args.feature_id, feature.get("title", "")
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Detect project state
|
|
426
|
+
init_done = detect_init_status(project_root)
|
|
427
|
+
artifacts = detect_existing_artifacts(project_root, feature_slug)
|
|
428
|
+
complexity = feature.get("estimated_complexity", "medium")
|
|
429
|
+
if args.mode:
|
|
430
|
+
pipeline_mode = args.mode
|
|
431
|
+
else:
|
|
432
|
+
pipeline_mode = determine_pipeline_mode(complexity)
|
|
433
|
+
|
|
434
|
+
# Auto-detect resume: if all planning artifacts exist and resume_phase
|
|
435
|
+
# is "null" (fresh start), skip to Phase 6
|
|
436
|
+
effective_resume = args.resume_phase
|
|
437
|
+
if effective_resume == "null" and artifacts["all_complete"]:
|
|
438
|
+
effective_resume = "6"
|
|
439
|
+
|
|
440
|
+
replacements = {
|
|
441
|
+
"{{RUN_ID}}": args.run_id,
|
|
442
|
+
"{{SESSION_ID}}": args.session_id,
|
|
443
|
+
"{{FEATURE_ID}}": args.feature_id,
|
|
444
|
+
"{{FEATURE_TITLE}}": feature.get("title", ""),
|
|
445
|
+
"{{RETRY_COUNT}}": str(args.retry_count),
|
|
446
|
+
"{{MAX_RETRIES}}": str(DEFAULT_MAX_RETRIES),
|
|
447
|
+
"{{PREV_SESSION_STATUS}}": prev_status,
|
|
448
|
+
"{{RESUME_PHASE}}": args.resume_phase,
|
|
449
|
+
"{{FEATURE_DESCRIPTION}}": feature.get("description", ""),
|
|
450
|
+
"{{ACCEPTANCE_CRITERIA}}": format_acceptance_criteria(
|
|
451
|
+
feature.get("acceptance_criteria", [])
|
|
452
|
+
),
|
|
453
|
+
"{{COMPLETED_DEPENDENCIES}}": get_completed_dependencies(
|
|
454
|
+
features, feature
|
|
455
|
+
),
|
|
456
|
+
"{{GLOBAL_CONTEXT}}": format_global_context(global_context),
|
|
457
|
+
"{{TEAM_CONFIG_PATH}}": team_config_path,
|
|
458
|
+
"{{COORDINATOR_SUBAGENT_PATH}}": coordinator_subagent,
|
|
459
|
+
"{{PM_SUBAGENT_PATH}}": pm_subagent,
|
|
460
|
+
"{{DEV_SUBAGENT_PATH}}": dev_subagent,
|
|
461
|
+
"{{REVIEWER_SUBAGENT_PATH}}": reviewer_subagent,
|
|
462
|
+
"{{VALIDATOR_SCRIPTS_DIR}}": validator_scripts_dir,
|
|
463
|
+
"{{INIT_SCRIPT_PATH}}": init_script_path,
|
|
464
|
+
"{{SESSION_STATUS_PATH}}": session_status_abs,
|
|
465
|
+
"{{PROJECT_ROOT}}": project_root,
|
|
466
|
+
"{{FEATURE_SLUG}}": feature_slug,
|
|
467
|
+
"{{PIPELINE_MODE}}": pipeline_mode,
|
|
468
|
+
"{{COMPLEXITY}}": complexity,
|
|
469
|
+
"{{INIT_DONE}}": "true" if init_done else "false",
|
|
470
|
+
"{{HAS_SPEC}}": "true" if artifacts["has_spec"] else "false",
|
|
471
|
+
"{{HAS_PLAN}}": "true" if artifacts["has_plan"] else "false",
|
|
472
|
+
"{{HAS_TASKS}}": "true" if artifacts["has_tasks"] else "false",
|
|
473
|
+
"{{ARTIFACTS_COMPLETE}}": "true" if artifacts["all_complete"] else "false",
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
return replacements, effective_resume
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
def render_template(template_content, replacements, resume_phase):
|
|
480
|
+
"""Render the template by processing conditionals and replacing placeholders."""
|
|
481
|
+
# Step 1: Process fresh_start/resume conditional blocks
|
|
482
|
+
content = process_conditional_blocks(template_content, resume_phase)
|
|
483
|
+
|
|
484
|
+
# Step 2: Process mode and init conditional blocks
|
|
485
|
+
pipeline_mode = replacements.get("{{PIPELINE_MODE}}", "standard")
|
|
486
|
+
init_done = replacements.get("{{INIT_DONE}}", "false") == "true"
|
|
487
|
+
content = process_mode_blocks(content, pipeline_mode, init_done)
|
|
488
|
+
|
|
489
|
+
# Step 3: Replace all {{PLACEHOLDER}} variables
|
|
490
|
+
for placeholder, value in replacements.items():
|
|
491
|
+
content = content.replace(placeholder, value)
|
|
492
|
+
|
|
493
|
+
return content
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
def write_output(output_path, content):
|
|
497
|
+
"""Write the rendered content to the output file."""
|
|
498
|
+
abs_path = os.path.abspath(output_path)
|
|
499
|
+
output_dir = os.path.dirname(abs_path)
|
|
500
|
+
if output_dir and not os.path.isdir(output_dir):
|
|
501
|
+
try:
|
|
502
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
503
|
+
except OSError as e:
|
|
504
|
+
return "Cannot create output directory: {}".format(str(e))
|
|
505
|
+
try:
|
|
506
|
+
with open(abs_path, "w", encoding="utf-8") as f:
|
|
507
|
+
f.write(content)
|
|
508
|
+
except IOError as e:
|
|
509
|
+
return "Cannot write output file: {}".format(str(e))
|
|
510
|
+
return None
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
def main():
|
|
514
|
+
args = parse_args()
|
|
515
|
+
|
|
516
|
+
# Resolve script directory
|
|
517
|
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
518
|
+
|
|
519
|
+
# Resolve template path
|
|
520
|
+
if args.template:
|
|
521
|
+
template_path = args.template
|
|
522
|
+
else:
|
|
523
|
+
template_path = os.path.join(
|
|
524
|
+
script_dir, "..", "templates", "bootstrap-prompt.md"
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
# Load template
|
|
528
|
+
template_content, err = read_text_file(template_path)
|
|
529
|
+
if err:
|
|
530
|
+
output = {"success": False, "error": "Template error: {}".format(err)}
|
|
531
|
+
print(json.dumps(output, indent=2, ensure_ascii=False))
|
|
532
|
+
sys.exit(1)
|
|
533
|
+
|
|
534
|
+
# Load feature list
|
|
535
|
+
feature_list_data, err = load_json_file(args.feature_list)
|
|
536
|
+
if err:
|
|
537
|
+
output = {"success": False, "error": "Feature list error: {}".format(err)}
|
|
538
|
+
print(json.dumps(output, indent=2, ensure_ascii=False))
|
|
539
|
+
sys.exit(1)
|
|
540
|
+
|
|
541
|
+
# Extract features array
|
|
542
|
+
features = feature_list_data.get("features")
|
|
543
|
+
if not isinstance(features, list):
|
|
544
|
+
output = {
|
|
545
|
+
"success": False,
|
|
546
|
+
"error": "Feature list does not contain a 'features' array",
|
|
547
|
+
}
|
|
548
|
+
print(json.dumps(output, indent=2, ensure_ascii=False))
|
|
549
|
+
sys.exit(1)
|
|
550
|
+
|
|
551
|
+
# Find the target feature
|
|
552
|
+
feature = find_feature(features, args.feature_id)
|
|
553
|
+
if feature is None:
|
|
554
|
+
output = {
|
|
555
|
+
"success": False,
|
|
556
|
+
"error": "Feature '{}' not found in feature list".format(
|
|
557
|
+
args.feature_id
|
|
558
|
+
),
|
|
559
|
+
}
|
|
560
|
+
print(json.dumps(output, indent=2, ensure_ascii=False))
|
|
561
|
+
sys.exit(1)
|
|
562
|
+
|
|
563
|
+
# Extract global context
|
|
564
|
+
global_context = feature_list_data.get("global_context", {})
|
|
565
|
+
if not isinstance(global_context, dict):
|
|
566
|
+
global_context = {}
|
|
567
|
+
|
|
568
|
+
# Build replacements
|
|
569
|
+
replacements, effective_resume = build_replacements(
|
|
570
|
+
args, feature, features, global_context, script_dir
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
# Update RESUME_PHASE in replacements to reflect auto-detection
|
|
574
|
+
replacements["{{RESUME_PHASE}}"] = effective_resume
|
|
575
|
+
|
|
576
|
+
# Render the template
|
|
577
|
+
rendered = render_template(
|
|
578
|
+
template_content, replacements, effective_resume
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
# Write the output
|
|
582
|
+
err = write_output(args.output, rendered)
|
|
583
|
+
if err:
|
|
584
|
+
output = {"success": False, "error": err}
|
|
585
|
+
print(json.dumps(output, indent=2, ensure_ascii=False))
|
|
586
|
+
sys.exit(1)
|
|
587
|
+
|
|
588
|
+
# Success
|
|
589
|
+
output = {
|
|
590
|
+
"success": True,
|
|
591
|
+
"output_path": os.path.abspath(args.output),
|
|
592
|
+
}
|
|
593
|
+
print(json.dumps(output, indent=2, ensure_ascii=False))
|
|
594
|
+
sys.exit(0)
|
|
595
|
+
|
|
596
|
+
|
|
597
|
+
if __name__ == "__main__":
|
|
598
|
+
main()
|