prizmkit 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/bundled/VERSION.json +5 -0
  2. package/bundled/adapters/claude/agent-adapter.js +108 -0
  3. package/bundled/adapters/claude/command-adapter.js +104 -0
  4. package/bundled/adapters/claude/paths.js +35 -0
  5. package/bundled/adapters/claude/rules-adapter.js +77 -0
  6. package/bundled/adapters/claude/settings-adapter.js +73 -0
  7. package/bundled/adapters/claude/team-adapter.js +183 -0
  8. package/bundled/adapters/codebuddy/agent-adapter.js +43 -0
  9. package/bundled/adapters/codebuddy/paths.js +29 -0
  10. package/bundled/adapters/codebuddy/settings-adapter.js +47 -0
  11. package/bundled/adapters/codebuddy/skill-adapter.js +68 -0
  12. package/bundled/adapters/codebuddy/team-adapter.js +46 -0
  13. package/bundled/adapters/shared/frontmatter.js +77 -0
  14. package/bundled/agents/prizm-dev-team-coordinator.md +142 -0
  15. package/bundled/agents/prizm-dev-team-dev.md +99 -0
  16. package/bundled/agents/prizm-dev-team-pm.md +114 -0
  17. package/bundled/agents/prizm-dev-team-reviewer.md +119 -0
  18. package/bundled/dev-pipeline/README.md +482 -0
  19. package/bundled/dev-pipeline/assets/feature-list-example.json +147 -0
  20. package/bundled/dev-pipeline/assets/prizm-dev-team-integration.md +138 -0
  21. package/bundled/dev-pipeline/launch-bugfix-daemon.sh +425 -0
  22. package/bundled/dev-pipeline/launch-daemon.sh +549 -0
  23. package/bundled/dev-pipeline/reset-feature.sh +209 -0
  24. package/bundled/dev-pipeline/retry-bug.sh +344 -0
  25. package/bundled/dev-pipeline/retry-feature.sh +338 -0
  26. package/bundled/dev-pipeline/run-bugfix.sh +638 -0
  27. package/bundled/dev-pipeline/run.sh +845 -0
  28. package/bundled/dev-pipeline/scripts/check-session-status.py +158 -0
  29. package/bundled/dev-pipeline/scripts/detect-stuck.py +385 -0
  30. package/bundled/dev-pipeline/scripts/generate-bootstrap-prompt.py +598 -0
  31. package/bundled/dev-pipeline/scripts/generate-bugfix-prompt.py +402 -0
  32. package/bundled/dev-pipeline/scripts/init-bugfix-pipeline.py +294 -0
  33. package/bundled/dev-pipeline/scripts/init-dev-team.py +134 -0
  34. package/bundled/dev-pipeline/scripts/init-pipeline.py +335 -0
  35. package/bundled/dev-pipeline/scripts/update-bug-status.py +748 -0
  36. package/bundled/dev-pipeline/scripts/update-feature-status.py +1076 -0
  37. package/bundled/dev-pipeline/templates/bootstrap-prompt.md +262 -0
  38. package/bundled/dev-pipeline/templates/bug-fix-list-schema.json +159 -0
  39. package/bundled/dev-pipeline/templates/bugfix-bootstrap-prompt.md +291 -0
  40. package/bundled/dev-pipeline/templates/feature-list-schema.json +112 -0
  41. package/bundled/dev-pipeline/templates/session-status-schema.json +77 -0
  42. package/bundled/skills/_metadata.json +267 -0
  43. package/bundled/skills/app-planner/SKILL.md +580 -0
  44. package/bundled/skills/app-planner/assets/planning-guide.md +313 -0
  45. package/bundled/skills/app-planner/scripts/validate-and-generate.py +758 -0
  46. package/bundled/skills/bug-planner/SKILL.md +235 -0
  47. package/bundled/skills/bugfix-pipeline-launcher/SKILL.md +252 -0
  48. package/bundled/skills/dev-pipeline-launcher/SKILL.md +223 -0
  49. package/bundled/skills/prizm-kit/SKILL.md +151 -0
  50. package/bundled/skills/prizm-kit/assets/claude-md-template.md +38 -0
  51. package/bundled/skills/prizm-kit/assets/codebuddy-md-template.md +35 -0
  52. package/bundled/skills/prizm-kit/assets/hooks/prizm-commit-hook.json +15 -0
  53. package/bundled/skills/prizmkit-adr-manager/SKILL.md +68 -0
  54. package/bundled/skills/prizmkit-adr-manager/assets/adr-template.md +26 -0
  55. package/bundled/skills/prizmkit-analyze/SKILL.md +194 -0
  56. package/bundled/skills/prizmkit-api-doc-generator/SKILL.md +56 -0
  57. package/bundled/skills/prizmkit-bug-fix-workflow/SKILL.md +351 -0
  58. package/bundled/skills/prizmkit-bug-reproducer/SKILL.md +62 -0
  59. package/bundled/skills/prizmkit-ci-cd-generator/SKILL.md +54 -0
  60. package/bundled/skills/prizmkit-clarify/SKILL.md +52 -0
  61. package/bundled/skills/prizmkit-code-review/SKILL.md +70 -0
  62. package/bundled/skills/prizmkit-committer/SKILL.md +117 -0
  63. package/bundled/skills/prizmkit-db-migration/SKILL.md +65 -0
  64. package/bundled/skills/prizmkit-dependency-health/SKILL.md +123 -0
  65. package/bundled/skills/prizmkit-deployment-strategy/SKILL.md +58 -0
  66. package/bundled/skills/prizmkit-error-triage/SKILL.md +55 -0
  67. package/bundled/skills/prizmkit-implement/SKILL.md +47 -0
  68. package/bundled/skills/prizmkit-init/SKILL.md +156 -0
  69. package/bundled/skills/prizmkit-log-analyzer/SKILL.md +55 -0
  70. package/bundled/skills/prizmkit-monitoring-setup/SKILL.md +75 -0
  71. package/bundled/skills/prizmkit-onboarding-generator/SKILL.md +70 -0
  72. package/bundled/skills/prizmkit-perf-profiler/SKILL.md +55 -0
  73. package/bundled/skills/prizmkit-plan/SKILL.md +54 -0
  74. package/bundled/skills/prizmkit-plan/assets/plan-template.md +37 -0
  75. package/bundled/skills/prizmkit-prizm-docs/SKILL.md +140 -0
  76. package/bundled/skills/prizmkit-prizm-docs/assets/PRIZM-SPEC.md +943 -0
  77. package/bundled/skills/prizmkit-retrospective/SKILL.md +79 -0
  78. package/bundled/skills/prizmkit-security-audit/SKILL.md +130 -0
  79. package/bundled/skills/prizmkit-specify/SKILL.md +52 -0
  80. package/bundled/skills/prizmkit-specify/assets/spec-template.md +37 -0
  81. package/bundled/skills/prizmkit-summarize/SKILL.md +51 -0
  82. package/bundled/skills/prizmkit-summarize/assets/registry-template.md +18 -0
  83. package/bundled/skills/prizmkit-tasks/SKILL.md +50 -0
  84. package/bundled/skills/prizmkit-tasks/assets/tasks-template.md +21 -0
  85. package/bundled/skills/prizmkit-tech-debt-tracker/SKILL.md +139 -0
  86. package/bundled/team/prizm-dev-team.json +47 -0
  87. package/bundled/templates/claude-md-template.md +38 -0
  88. package/bundled/templates/codebuddy-md-template.md +35 -0
  89. package/package.json +2 -1
  90. package/src/scaffold.js +1 -1
@@ -0,0 +1,402 @@
1
+ #!/usr/bin/env python3
2
+ """Generate a session-specific bug fix bootstrap prompt from template and bug-fix-list.json.
3
+
4
+ Reads the bugfix-bootstrap-prompt.md template and a bug-fix-list.json, resolves all
5
+ {{PLACEHOLDER}} variables, handles conditional blocks, and writes the rendered
6
+ prompt to the specified output path.
7
+
8
+ Usage:
9
+ python3 generate-bugfix-prompt.py \
10
+ --bug-list <path> --bug-id <id> \
11
+ --session-id <id> --run-id <id> \
12
+ --retry-count <n> --resume-phase <n|null> \
13
+ --state-dir <path> --output <path>
14
+ """
15
+
16
+ import argparse
17
+ import json
18
+ import os
19
+ import re
20
+ import sys
21
+
22
+
23
+ DEFAULT_MAX_RETRIES = 3
24
+
25
+
26
+ def parse_args():
27
+ parser = argparse.ArgumentParser(
28
+ description=(
29
+ "Generate a session-specific bug fix bootstrap prompt from a template "
30
+ "and bug-fix-list.json."
31
+ )
32
+ )
33
+ parser.add_argument("--bug-list", required=True, help="Path to bug-fix-list.json")
34
+ parser.add_argument("--bug-id", required=True, help="Bug ID to generate prompt for (e.g. B-001)")
35
+ parser.add_argument("--session-id", required=True, help="Session ID for this pipeline session")
36
+ parser.add_argument("--run-id", required=True, help="Pipeline run ID")
37
+ parser.add_argument("--retry-count", required=True, help="Current retry count")
38
+ parser.add_argument("--resume-phase", required=True, help='Phase to resume from, or "null" for fresh start')
39
+ parser.add_argument("--state-dir", default=None, help="State directory path for reading previous session info")
40
+ parser.add_argument("--output", required=True, help="Output path for the rendered prompt")
41
+ parser.add_argument("--template", default=None, help="Custom template path. Defaults to {script_dir}/../templates/bugfix-bootstrap-prompt.md")
42
+ return parser.parse_args()
43
+
44
+
45
+ def load_json_file(path):
46
+ """Load and return parsed JSON from a file."""
47
+ abs_path = os.path.abspath(path)
48
+ if not os.path.isfile(abs_path):
49
+ return None, "File not found: {}".format(abs_path)
50
+ try:
51
+ with open(abs_path, "r", encoding="utf-8") as f:
52
+ data = json.load(f)
53
+ except json.JSONDecodeError as e:
54
+ return None, "Invalid JSON: {}".format(str(e))
55
+ except IOError as e:
56
+ return None, "Cannot read file: {}".format(str(e))
57
+ return data, None
58
+
59
+
60
+ def read_text_file(path):
61
+ """Read and return the text content of a file."""
62
+ abs_path = os.path.abspath(path)
63
+ if not os.path.isfile(abs_path):
64
+ return None, "File not found: {}".format(abs_path)
65
+ try:
66
+ with open(abs_path, "r", encoding="utf-8") as f:
67
+ return f.read(), None
68
+ except IOError as e:
69
+ return None, "Cannot read file: {}".format(str(e))
70
+
71
+
72
+ def find_bug(bugs, bug_id):
73
+ """Find and return the bug dict matching the given ID."""
74
+ for bug in bugs:
75
+ if isinstance(bug, dict) and bug.get("id") == bug_id:
76
+ return bug
77
+ return None
78
+
79
+
80
+ def format_acceptance_criteria(criteria):
81
+ """Format acceptance criteria as a markdown bullet list."""
82
+ if not criteria:
83
+ return "- (none specified)"
84
+ lines = []
85
+ for item in criteria:
86
+ lines.append("- {}".format(item))
87
+ return "\n".join(lines)
88
+
89
+
90
+ def format_global_context(global_context):
91
+ """Format global_context dict as a key-value list."""
92
+ if not global_context:
93
+ return "- (none specified)"
94
+ lines = []
95
+ for key, value in sorted(global_context.items()):
96
+ lines.append("- **{}**: {}".format(key, value))
97
+ return "\n".join(lines)
98
+
99
+
100
+ def format_error_source_details(error_source):
101
+ """Format error_source fields into markdown detail lines."""
102
+ if not error_source or not isinstance(error_source, dict):
103
+ return "- (no error source details)"
104
+ lines = []
105
+ etype = error_source.get("type", "unknown")
106
+
107
+ if etype == "stack_trace" and error_source.get("stack_trace"):
108
+ lines.append("- **Stack Trace**:")
109
+ lines.append("```")
110
+ lines.append(error_source["stack_trace"])
111
+ lines.append("```")
112
+ if error_source.get("error_message"):
113
+ lines.append("- **Error Message**: {}".format(error_source["error_message"]))
114
+ if etype == "log_pattern" and error_source.get("log_snippet"):
115
+ lines.append("- **Log Snippet**:")
116
+ lines.append("```")
117
+ lines.append(error_source["log_snippet"])
118
+ lines.append("```")
119
+ if etype == "failed_test" and error_source.get("failed_test_path"):
120
+ lines.append("- **Failed Test**: `{}`".format(error_source["failed_test_path"]))
121
+ if etype == "user_report" and error_source.get("reproduction_steps"):
122
+ lines.append("- **Reproduction Steps**:")
123
+ for i, step in enumerate(error_source["reproduction_steps"], 1):
124
+ lines.append(" {}. {}".format(i, step))
125
+
126
+ if not lines:
127
+ lines.append("- (no additional details)")
128
+ return "\n".join(lines)
129
+
130
+
131
+ def format_environment(env):
132
+ """Format environment dict as a key-value list."""
133
+ if not env or not isinstance(env, dict):
134
+ return "- (not specified)"
135
+ lines = []
136
+ for key, value in sorted(env.items()):
137
+ if value:
138
+ lines.append("- **{}**: {}".format(key, value))
139
+ if not lines:
140
+ return "- (not specified)"
141
+ return "\n".join(lines)
142
+
143
+
144
+ def get_prev_session_status(state_dir, bug_id):
145
+ """Read previous session status from state dir if available."""
146
+ if not state_dir:
147
+ return "N/A (first run)"
148
+
149
+ bug_status_path = os.path.join(state_dir, "bugs", bug_id, "status.json")
150
+ if not os.path.isfile(bug_status_path):
151
+ return "N/A (first run)"
152
+
153
+ try:
154
+ with open(bug_status_path, "r", encoding="utf-8") as f:
155
+ bug_status = json.load(f)
156
+ except (json.JSONDecodeError, IOError):
157
+ return "N/A (could not read bug status)"
158
+
159
+ last_session_id = bug_status.get("last_session_id")
160
+ if not last_session_id:
161
+ return "N/A (first run)"
162
+
163
+ session_status_path = os.path.join(
164
+ state_dir, "bugs", bug_id, "sessions",
165
+ last_session_id, "session-status.json"
166
+ )
167
+ if not os.path.isfile(session_status_path):
168
+ return "N/A (previous session status file not found)"
169
+
170
+ try:
171
+ with open(session_status_path, "r", encoding="utf-8") as f:
172
+ session_data = json.load(f)
173
+ except (json.JSONDecodeError, IOError):
174
+ return "N/A (could not read previous session status)"
175
+
176
+ status = session_data.get("status", "unknown")
177
+ checkpoint = session_data.get("checkpoint_reached", "none")
178
+ current_phase = session_data.get("current_phase", "unknown")
179
+ errors = session_data.get("errors", [])
180
+
181
+ result = "{} (checkpoint: {}, last phase: {})".format(
182
+ status, checkpoint, current_phase
183
+ )
184
+ if errors:
185
+ result += " — errors: {}".format("; ".join(str(e) for e in errors))
186
+ return result
187
+
188
+
189
+ def resolve_project_root(script_dir):
190
+ """Resolve project root as the parent of dev-pipeline/."""
191
+ dev_pipeline_dir = os.path.dirname(script_dir)
192
+ project_root = os.path.dirname(dev_pipeline_dir)
193
+ return os.path.abspath(project_root)
194
+
195
+
196
+ def build_replacements(args, bug, global_context, script_dir):
197
+ """Build the full dict of placeholder -> replacement value."""
198
+ project_root = resolve_project_root(script_dir)
199
+
200
+ # Platform-aware agent/team path resolution
201
+ platform = os.environ.get("PRIZMKIT_PLATFORM", "")
202
+ home_dir = os.path.expanduser("~")
203
+
204
+ if not platform:
205
+ if os.path.isdir(os.path.join(project_root, ".claude", "agents")):
206
+ platform = "claude"
207
+ else:
208
+ platform = "codebuddy"
209
+
210
+ if platform == "claude":
211
+ agents_dir = os.path.join(project_root, ".claude", "agents")
212
+ team_config_path = os.path.join(project_root, ".claude", "team-info.json")
213
+ else:
214
+ agents_dir = os.path.join(project_root, ".codebuddy", "agents")
215
+ team_config_path = os.path.join(
216
+ home_dir, ".codebuddy", "teams", "prizm-dev-team", "config.json"
217
+ )
218
+
219
+ dev_subagent = os.path.join(agents_dir, "prizm-dev-team-dev.md")
220
+ reviewer_subagent = os.path.join(agents_dir, "prizm-dev-team-reviewer.md")
221
+
222
+ # Session status path
223
+ session_status_path = os.path.join(
224
+ project_root, "dev-pipeline", "bugfix-state", "bugs", args.bug_id,
225
+ "sessions", args.session_id, "session-status.json"
226
+ )
227
+
228
+ prev_status = get_prev_session_status(args.state_dir, args.bug_id)
229
+
230
+ # Error source
231
+ error_source = bug.get("error_source", {})
232
+ error_type = error_source.get("type", "unknown") if isinstance(error_source, dict) else "unknown"
233
+
234
+ # Determine fix scope from affected_modules or title
235
+ affected_modules = bug.get("affected_modules", [])
236
+ if affected_modules:
237
+ fix_scope = affected_modules[0]
238
+ else:
239
+ fix_scope = bug.get("title", "unknown").split()[0].lower() if bug.get("title") else "unknown"
240
+
241
+ # Determine if manual/hybrid verification
242
+ vtype = bug.get("verification_type", "automated")
243
+ is_manual_or_hybrid = vtype in ("manual", "hybrid")
244
+
245
+ replacements = {
246
+ "{{RUN_ID}}": args.run_id,
247
+ "{{SESSION_ID}}": args.session_id,
248
+ "{{BUG_ID}}": args.bug_id,
249
+ "{{BUG_TITLE}}": bug.get("title", ""),
250
+ "{{SEVERITY}}": bug.get("severity", "medium"),
251
+ "{{VERIFICATION_TYPE}}": vtype,
252
+ "{{RETRY_COUNT}}": str(args.retry_count),
253
+ "{{MAX_RETRIES}}": str(DEFAULT_MAX_RETRIES),
254
+ "{{PREV_SESSION_STATUS}}": prev_status,
255
+ "{{RESUME_PHASE}}": args.resume_phase,
256
+ "{{BUG_DESCRIPTION}}": bug.get("description", ""),
257
+ "{{ERROR_SOURCE_TYPE}}": error_type,
258
+ "{{ERROR_SOURCE_DETAILS}}": format_error_source_details(error_source),
259
+ "{{ACCEPTANCE_CRITERIA}}": format_acceptance_criteria(
260
+ bug.get("acceptance_criteria", [])
261
+ ),
262
+ "{{AFFECTED_FEATURE}}": bug.get("affected_feature", "N/A"),
263
+ "{{ENVIRONMENT}}": format_environment(bug.get("environment")),
264
+ "{{GLOBAL_CONTEXT}}": format_global_context(global_context),
265
+ "{{TEAM_CONFIG_PATH}}": team_config_path,
266
+ "{{DEV_SUBAGENT_PATH}}": dev_subagent,
267
+ "{{REVIEWER_SUBAGENT_PATH}}": reviewer_subagent,
268
+ "{{SESSION_STATUS_PATH}}": session_status_path,
269
+ "{{PROJECT_ROOT}}": project_root,
270
+ "{{FIX_SCOPE}}": fix_scope,
271
+ "{{TIMESTAMP}}": "", # 占位符,agent 自行填写时间戳
272
+ }
273
+
274
+ return replacements
275
+
276
+
277
+ def process_conditional_blocks(content, bug):
278
+ """Handle conditional blocks based on verification_type."""
279
+ vtype = bug.get("verification_type", "automated")
280
+ is_manual_or_hybrid = vtype in ("manual", "hybrid")
281
+
282
+ if is_manual_or_hybrid:
283
+ content = content.replace("{{IF_VERIFICATION_MANUAL_OR_HYBRID}}\n", "")
284
+ content = content.replace("{{IF_VERIFICATION_MANUAL_OR_HYBRID}}", "")
285
+ content = content.replace("{{END_IF_VERIFICATION_MANUAL_OR_HYBRID}}\n", "")
286
+ content = content.replace("{{END_IF_VERIFICATION_MANUAL_OR_HYBRID}}", "")
287
+ else:
288
+ # 删除整个条件块
289
+ content = re.sub(
290
+ r"\{\{IF_VERIFICATION_MANUAL_OR_HYBRID\}\}.*?\{\{END_IF_VERIFICATION_MANUAL_OR_HYBRID\}\}\n?",
291
+ "", content, flags=re.DOTALL,
292
+ )
293
+
294
+ return content
295
+
296
+
297
+ def render_template(template_content, replacements, bug):
298
+ """Render the template by processing conditionals and replacing placeholders."""
299
+ # Step 1: Process conditional blocks
300
+ content = process_conditional_blocks(template_content, bug)
301
+
302
+ # Step 2: Replace all {{PLACEHOLDER}} variables
303
+ for placeholder, value in replacements.items():
304
+ content = content.replace(placeholder, value)
305
+
306
+ return content
307
+
308
+
309
+ def write_output(output_path, content):
310
+ """Write the rendered content to the output file."""
311
+ abs_path = os.path.abspath(output_path)
312
+ output_dir = os.path.dirname(abs_path)
313
+ if output_dir and not os.path.isdir(output_dir):
314
+ try:
315
+ os.makedirs(output_dir, exist_ok=True)
316
+ except OSError as e:
317
+ return "Cannot create output directory: {}".format(str(e))
318
+ try:
319
+ with open(abs_path, "w", encoding="utf-8") as f:
320
+ f.write(content)
321
+ except IOError as e:
322
+ return "Cannot write output file: {}".format(str(e))
323
+ return None
324
+
325
+
326
+ def main():
327
+ args = parse_args()
328
+
329
+ # Resolve script directory
330
+ script_dir = os.path.dirname(os.path.abspath(__file__))
331
+
332
+ # Resolve template path
333
+ if args.template:
334
+ template_path = args.template
335
+ else:
336
+ template_path = os.path.join(
337
+ script_dir, "..", "templates", "bugfix-bootstrap-prompt.md"
338
+ )
339
+
340
+ # Load template
341
+ template_content, err = read_text_file(template_path)
342
+ if err:
343
+ output = {"success": False, "error": "Template error: {}".format(err)}
344
+ print(json.dumps(output, indent=2, ensure_ascii=False))
345
+ sys.exit(1)
346
+
347
+ # Load bug fix list
348
+ bug_list_data, err = load_json_file(args.bug_list)
349
+ if err:
350
+ output = {"success": False, "error": "Bug list error: {}".format(err)}
351
+ print(json.dumps(output, indent=2, ensure_ascii=False))
352
+ sys.exit(1)
353
+
354
+ # Extract bugs array
355
+ bugs = bug_list_data.get("bugs")
356
+ if not isinstance(bugs, list):
357
+ output = {
358
+ "success": False,
359
+ "error": "Bug fix list does not contain a 'bugs' array",
360
+ }
361
+ print(json.dumps(output, indent=2, ensure_ascii=False))
362
+ sys.exit(1)
363
+
364
+ # Find the target bug
365
+ bug = find_bug(bugs, args.bug_id)
366
+ if bug is None:
367
+ output = {
368
+ "success": False,
369
+ "error": "Bug '{}' not found in bug fix list".format(args.bug_id),
370
+ }
371
+ print(json.dumps(output, indent=2, ensure_ascii=False))
372
+ sys.exit(1)
373
+
374
+ # Extract global context
375
+ global_context = bug_list_data.get("global_context", {})
376
+ if not isinstance(global_context, dict):
377
+ global_context = {}
378
+
379
+ # Build replacements
380
+ replacements = build_replacements(args, bug, global_context, script_dir)
381
+
382
+ # Render the template
383
+ rendered = render_template(template_content, replacements, bug)
384
+
385
+ # Write the output
386
+ err = write_output(args.output, rendered)
387
+ if err:
388
+ output = {"success": False, "error": err}
389
+ print(json.dumps(output, indent=2, ensure_ascii=False))
390
+ sys.exit(1)
391
+
392
+ # Success
393
+ output = {
394
+ "success": True,
395
+ "output_path": os.path.abspath(args.output),
396
+ }
397
+ print(json.dumps(output, indent=2, ensure_ascii=False))
398
+ sys.exit(0)
399
+
400
+
401
+ if __name__ == "__main__":
402
+ main()
@@ -0,0 +1,294 @@
1
+ #!/usr/bin/env python3
2
+ """Initialize the bug-fix pipeline state directory from a bug-fix-list.json file.
3
+
4
+ Validates the bug fix list schema, sorts by priority/severity, and creates
5
+ the state directory structure with pipeline and per-bug status files.
6
+
7
+ Usage:
8
+ python3 init-bugfix-pipeline.py --bug-list <path> --state-dir <path>
9
+ """
10
+
11
+ import argparse
12
+ import json
13
+ import os
14
+ import re
15
+ import sys
16
+ from datetime import datetime, timezone
17
+
18
+
19
+ EXPECTED_SCHEMA = "dev-pipeline-bug-fix-list-v1"
20
+ BUG_ID_PATTERN = re.compile(r"^B-\d{3}$")
21
+
22
+ REQUIRED_BUG_FIELDS = [
23
+ "id",
24
+ "title",
25
+ "description",
26
+ "severity",
27
+ "error_source",
28
+ "verification_type",
29
+ "acceptance_criteria",
30
+ "status",
31
+ ]
32
+
33
+ VALID_SEVERITIES = ["critical", "high", "medium", "low"]
34
+ VALID_VERIFICATION_TYPES = ["automated", "manual", "hybrid"]
35
+ VALID_STATUSES = [
36
+ "pending", "triaging", "reproducing", "fixing",
37
+ "verifying", "completed", "failed", "needs_info", "skipped",
38
+ ]
39
+
40
+
41
+ def parse_args():
42
+ parser = argparse.ArgumentParser(
43
+ description="Initialize bug-fix pipeline state from a bug-fix-list.json file."
44
+ )
45
+ parser.add_argument(
46
+ "--bug-list",
47
+ required=True,
48
+ help="Path to the bug-fix-list.json file",
49
+ )
50
+ parser.add_argument(
51
+ "--state-dir",
52
+ required=True,
53
+ help="Path to the state directory to create/initialize",
54
+ )
55
+ return parser.parse_args()
56
+
57
+
58
+ def load_bug_list(path):
59
+ """Load and return the parsed JSON from the bug fix list file."""
60
+ abs_path = os.path.abspath(path)
61
+ if not os.path.isfile(abs_path):
62
+ return None, ["Bug fix list file not found: {}".format(abs_path)]
63
+ try:
64
+ with open(abs_path, "r", encoding="utf-8") as f:
65
+ data = json.load(f)
66
+ except json.JSONDecodeError as e:
67
+ return None, ["Invalid JSON in bug fix list: {}".format(str(e))]
68
+ except IOError as e:
69
+ return None, ["Cannot read bug fix list file: {}".format(str(e))]
70
+ return data, []
71
+
72
+
73
+ def validate_schema(data):
74
+ """Validate the top-level schema and structure of the bug fix list."""
75
+ errors = []
76
+
77
+ # Check $schema
78
+ schema = data.get("$schema")
79
+ if schema != EXPECTED_SCHEMA:
80
+ errors.append(
81
+ "Invalid $schema: expected '{}', got '{}'".format(EXPECTED_SCHEMA, schema)
82
+ )
83
+
84
+ # Check project_name
85
+ if "project_name" not in data:
86
+ errors.append("Missing required field: project_name")
87
+ elif not isinstance(data["project_name"], str) or not data["project_name"].strip():
88
+ errors.append("project_name must be a non-empty string")
89
+
90
+ # Check bugs array
91
+ if "bugs" not in data:
92
+ errors.append("Missing required field: bugs")
93
+ elif not isinstance(data["bugs"], list):
94
+ errors.append("bugs must be an array")
95
+ elif len(data["bugs"]) == 0:
96
+ errors.append("bugs array must contain at least one bug")
97
+
98
+ return errors
99
+
100
+
101
+ def validate_bugs(bugs):
102
+ """Validate each bug object in the list."""
103
+ errors = []
104
+ seen_ids = set()
105
+
106
+ for i, bug in enumerate(bugs):
107
+ if not isinstance(bug, dict):
108
+ errors.append("Bug at index {} is not an object".format(i))
109
+ continue
110
+
111
+ # Check required fields
112
+ for field in REQUIRED_BUG_FIELDS:
113
+ if field not in bug:
114
+ errors.append(
115
+ "Bug at index {} missing required field: {}".format(i, field)
116
+ )
117
+
118
+ # Validate bug ID format
119
+ bid = bug.get("id")
120
+ if bid is not None:
121
+ if not isinstance(bid, str) or not BUG_ID_PATTERN.match(bid):
122
+ errors.append(
123
+ "Bug at index {} has invalid id '{}' "
124
+ "(must match B-NNN pattern)".format(i, bid)
125
+ )
126
+ elif bid in seen_ids:
127
+ errors.append("Duplicate bug id: {}".format(bid))
128
+ else:
129
+ seen_ids.add(bid)
130
+
131
+ # Validate severity
132
+ severity = bug.get("severity")
133
+ if severity is not None and severity not in VALID_SEVERITIES:
134
+ errors.append(
135
+ "Bug '{}' has invalid severity '{}' "
136
+ "(must be one of {})".format(
137
+ bid or "index {}".format(i), severity, VALID_SEVERITIES
138
+ )
139
+ )
140
+
141
+ # Validate verification_type
142
+ vtype = bug.get("verification_type")
143
+ if vtype is not None and vtype not in VALID_VERIFICATION_TYPES:
144
+ errors.append(
145
+ "Bug '{}' has invalid verification_type '{}' "
146
+ "(must be one of {})".format(
147
+ bid or "index {}".format(i), vtype, VALID_VERIFICATION_TYPES
148
+ )
149
+ )
150
+
151
+ # Validate status
152
+ status = bug.get("status")
153
+ if status is not None and status not in VALID_STATUSES:
154
+ errors.append(
155
+ "Bug '{}' has invalid status '{}' "
156
+ "(must be one of {})".format(
157
+ bid or "index {}".format(i), status, VALID_STATUSES
158
+ )
159
+ )
160
+
161
+ # Validate error_source has type field
162
+ error_source = bug.get("error_source")
163
+ if error_source is not None:
164
+ if not isinstance(error_source, dict):
165
+ errors.append(
166
+ "Bug '{}' error_source must be an object".format(
167
+ bid or "index {}".format(i)
168
+ )
169
+ )
170
+ elif "type" not in error_source:
171
+ errors.append(
172
+ "Bug '{}' error_source missing required field: type".format(
173
+ bid or "index {}".format(i)
174
+ )
175
+ )
176
+
177
+ # Validate acceptance_criteria is a list
178
+ ac = bug.get("acceptance_criteria")
179
+ if ac is not None and not isinstance(ac, list):
180
+ errors.append(
181
+ "Bug '{}' acceptance_criteria must be an array".format(
182
+ bid or "index {}".format(i)
183
+ )
184
+ )
185
+
186
+ return errors, seen_ids
187
+
188
+
189
+ def create_state_directory(state_dir, bug_list_path, bugs):
190
+ """Create the state directory structure with pipeline.json and per-bug status files."""
191
+ abs_state_dir = os.path.abspath(state_dir)
192
+ abs_bug_list_path = os.path.abspath(bug_list_path)
193
+ bugs_dir = os.path.join(abs_state_dir, "bugs")
194
+
195
+ now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
196
+ run_id = "bugfix-run-" + datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
197
+
198
+ # Create top-level state directory
199
+ os.makedirs(abs_state_dir, exist_ok=True)
200
+ os.makedirs(bugs_dir, exist_ok=True)
201
+
202
+ # Write pipeline.json
203
+ pipeline_state = {
204
+ "run_id": run_id,
205
+ "pipeline_type": "bugfix",
206
+ "status": "initialized",
207
+ "bug_list_path": abs_bug_list_path,
208
+ "created_at": now,
209
+ "total_bugs": len(bugs),
210
+ "completed_bugs": 0,
211
+ }
212
+ pipeline_path = os.path.join(abs_state_dir, "pipeline.json")
213
+ with open(pipeline_path, "w", encoding="utf-8") as f:
214
+ json.dump(pipeline_state, f, indent=2, ensure_ascii=False)
215
+ f.write("\n")
216
+
217
+ # Write per-bug status.json and create sessions directory
218
+ for bug in bugs:
219
+ if not isinstance(bug, dict):
220
+ continue
221
+ bid = bug.get("id")
222
+ if bid is None:
223
+ continue
224
+
225
+ bug_dir = os.path.join(bugs_dir, bid)
226
+ sessions_dir = os.path.join(bug_dir, "sessions")
227
+ os.makedirs(sessions_dir, exist_ok=True)
228
+
229
+ bug_status = {
230
+ "bug_id": bid,
231
+ "status": "pending",
232
+ "retry_count": 0,
233
+ "max_retries": 3,
234
+ "sessions": [],
235
+ "last_session_id": None,
236
+ "resume_from_phase": None,
237
+ "created_at": now,
238
+ "updated_at": now,
239
+ }
240
+ status_path = os.path.join(bug_dir, "status.json")
241
+ with open(status_path, "w", encoding="utf-8") as f:
242
+ json.dump(bug_status, f, indent=2, ensure_ascii=False)
243
+ f.write("\n")
244
+
245
+ return abs_state_dir
246
+
247
+
248
+ def main():
249
+ args = parse_args()
250
+
251
+ # Load bug fix list
252
+ data, load_errors = load_bug_list(args.bug_list)
253
+ if load_errors:
254
+ output = {"valid": False, "errors": load_errors}
255
+ print(json.dumps(output, indent=2, ensure_ascii=False))
256
+ sys.exit(1)
257
+
258
+ # Validate schema
259
+ schema_errors = validate_schema(data)
260
+ if schema_errors:
261
+ output = {"valid": False, "errors": schema_errors}
262
+ print(json.dumps(output, indent=2, ensure_ascii=False))
263
+ sys.exit(1)
264
+
265
+ # Validate bugs
266
+ bugs = data["bugs"]
267
+ bug_errors, bug_ids = validate_bugs(bugs)
268
+ if bug_errors:
269
+ output = {"valid": False, "errors": bug_errors}
270
+ print(json.dumps(output, indent=2, ensure_ascii=False))
271
+ sys.exit(1)
272
+
273
+ # Create state directory
274
+ try:
275
+ abs_state_dir = create_state_directory(
276
+ args.state_dir, args.bug_list, bugs
277
+ )
278
+ except (IOError, OSError) as e:
279
+ output = {"valid": False, "errors": ["Failed to create state directory: {}".format(str(e))]}
280
+ print(json.dumps(output, indent=2, ensure_ascii=False))
281
+ sys.exit(1)
282
+
283
+ # Success output
284
+ output = {
285
+ "valid": True,
286
+ "bugs_count": len(bugs),
287
+ "state_dir": abs_state_dir,
288
+ }
289
+ print(json.dumps(output, indent=2, ensure_ascii=False))
290
+ sys.exit(0)
291
+
292
+
293
+ if __name__ == "__main__":
294
+ main()