prizmkit 1.1.8 → 1.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/bundled/VERSION.json +3 -3
  2. package/bundled/adapters/codebuddy/skill-adapter.js +21 -7
  3. package/bundled/agents/prizm-dev-team-reviewer.md +53 -173
  4. package/bundled/dev-pipeline/.env.example +45 -0
  5. package/bundled/dev-pipeline/SCHEMA_ANALYSIS.md +535 -0
  6. package/bundled/dev-pipeline/assets/feature-list-example.json +0 -1
  7. package/bundled/dev-pipeline/launch-bugfix-daemon.sh +57 -12
  8. package/bundled/dev-pipeline/launch-feature-daemon.sh +3 -1
  9. package/bundled/dev-pipeline/launch-refactor-daemon.sh +57 -12
  10. package/bundled/dev-pipeline/lib/branch.sh +6 -1
  11. package/bundled/dev-pipeline/lib/common.sh +71 -0
  12. package/bundled/dev-pipeline/lib/heartbeat.sh +2 -2
  13. package/bundled/dev-pipeline/retry-bugfix.sh +60 -23
  14. package/bundled/dev-pipeline/retry-feature.sh +47 -12
  15. package/bundled/dev-pipeline/retry-refactor.sh +105 -23
  16. package/bundled/dev-pipeline/run-bugfix.sh +265 -44
  17. package/bundled/dev-pipeline/run-feature.sh +35 -1
  18. package/bundled/dev-pipeline/run-refactor.sh +376 -51
  19. package/bundled/dev-pipeline/scripts/check-session-status.py +24 -1
  20. package/bundled/dev-pipeline/scripts/detect-stuck.py +195 -85
  21. package/bundled/dev-pipeline/scripts/generate-bootstrap-prompt.py +31 -19
  22. package/bundled/dev-pipeline/scripts/generate-bugfix-prompt.py +19 -3
  23. package/bundled/dev-pipeline/scripts/generate-refactor-prompt.py +98 -11
  24. package/bundled/dev-pipeline/scripts/init-bugfix-pipeline.py +30 -5
  25. package/bundled/dev-pipeline/scripts/init-pipeline.py +3 -3
  26. package/bundled/dev-pipeline/scripts/init-refactor-pipeline.py +15 -4
  27. package/bundled/dev-pipeline/scripts/parse-stream-progress.py +1 -5
  28. package/bundled/dev-pipeline/scripts/patch-completion-notes.py +191 -0
  29. package/bundled/dev-pipeline/scripts/update-bug-status.py +159 -14
  30. package/bundled/dev-pipeline/scripts/update-feature-status.py +79 -37
  31. package/bundled/dev-pipeline/scripts/update-refactor-status.py +343 -13
  32. package/bundled/dev-pipeline/templates/agent-prompts/dev-fix.md +1 -1
  33. package/bundled/dev-pipeline/templates/agent-prompts/reviewer-review.md +7 -11
  34. package/bundled/dev-pipeline/templates/bootstrap-prompt.md +41 -7
  35. package/bundled/dev-pipeline/templates/bootstrap-tier1.md +27 -3
  36. package/bundled/dev-pipeline/templates/bootstrap-tier2.md +43 -19
  37. package/bundled/dev-pipeline/templates/bootstrap-tier3.md +54 -26
  38. package/bundled/dev-pipeline/templates/bug-fix-list-schema.json +5 -14
  39. package/bundled/dev-pipeline/templates/bugfix-bootstrap-prompt.md +36 -25
  40. package/bundled/dev-pipeline/templates/feature-list-schema.json +23 -11
  41. package/bundled/dev-pipeline/templates/refactor-bootstrap-prompt.md +270 -0
  42. package/bundled/dev-pipeline/templates/refactor-list-schema.json +10 -2
  43. package/bundled/dev-pipeline/templates/sections/context-budget-rules.md +3 -1
  44. package/bundled/dev-pipeline/templates/sections/critical-paths-agent.md +1 -0
  45. package/bundled/dev-pipeline/templates/sections/feature-context.md +2 -0
  46. package/bundled/dev-pipeline/templates/sections/phase-commit-full.md +29 -2
  47. package/bundled/dev-pipeline/templates/sections/phase-commit.md +22 -0
  48. package/bundled/dev-pipeline/templates/sections/phase-deploy-verification.md +2 -2
  49. package/bundled/dev-pipeline/templates/sections/phase-review-agent.md +8 -6
  50. package/bundled/dev-pipeline/templates/sections/phase-review-full.md +7 -5
  51. package/bundled/dev-pipeline/templates/sections/phase-specify-plan-full.md +3 -3
  52. package/bundled/skills/_metadata.json +5 -22
  53. package/bundled/skills/app-planner/SKILL.md +92 -66
  54. package/bundled/skills/app-planner/assets/app-design-guide.md +1 -1
  55. package/bundled/skills/app-planner/references/architecture-decisions.md +1 -1
  56. package/bundled/skills/app-planner/references/project-brief-guide.md +69 -66
  57. package/bundled/skills/bug-fix-workflow/SKILL.md +47 -4
  58. package/bundled/skills/bug-planner/SKILL.md +130 -188
  59. package/bundled/skills/bug-planner/assets/bug-confirmation-template.md +43 -0
  60. package/bundled/skills/bug-planner/references/critic-and-verification.md +44 -0
  61. package/bundled/skills/bug-planner/references/error-recovery.md +73 -0
  62. package/bundled/skills/bug-planner/references/input-formats.md +53 -0
  63. package/bundled/skills/bug-planner/references/schema-validation.md +25 -0
  64. package/bundled/skills/bug-planner/references/severity-rules.md +16 -0
  65. package/bundled/skills/bug-planner/scripts/validate-bug-list.py +1 -5
  66. package/bundled/skills/bugfix-pipeline-launcher/SKILL.md +5 -10
  67. package/bundled/skills/feature-pipeline-launcher/SKILL.md +16 -3
  68. package/bundled/skills/feature-planner/SKILL.md +33 -122
  69. package/bundled/skills/feature-planner/assets/evaluation-guide.md +1 -1
  70. package/bundled/skills/feature-planner/assets/planning-guide.md +21 -5
  71. package/bundled/skills/feature-planner/references/browser-interaction.md +2 -4
  72. package/bundled/skills/feature-planner/references/completeness-review.md +57 -0
  73. package/bundled/skills/feature-planner/references/error-recovery.md +15 -34
  74. package/bundled/skills/feature-planner/references/incremental-feature-planning.md +1 -1
  75. package/bundled/skills/feature-planner/references/new-project-planning.md +2 -2
  76. package/bundled/skills/feature-planner/scripts/validate-and-generate.py +1 -2
  77. package/bundled/skills/feature-workflow/SKILL.md +3 -4
  78. package/bundled/skills/prizm-kit/SKILL.md +39 -49
  79. package/bundled/skills/prizmkit-code-review/SKILL.md +51 -64
  80. package/bundled/skills/prizmkit-code-review/rules/dimensions.md +85 -0
  81. package/bundled/skills/prizmkit-code-review/rules/fix-strategy.md +11 -11
  82. package/bundled/skills/prizmkit-committer/SKILL.md +3 -31
  83. package/bundled/skills/prizmkit-deploy/SKILL.md +34 -31
  84. package/bundled/skills/prizmkit-deploy/assets/deploy-template.md +1 -1
  85. package/bundled/skills/prizmkit-implement/SKILL.md +35 -68
  86. package/bundled/skills/prizmkit-init/SKILL.md +112 -65
  87. package/bundled/skills/prizmkit-init/assets/project-brief-template.md +82 -0
  88. package/bundled/skills/prizmkit-plan/SKILL.md +120 -79
  89. package/bundled/skills/prizmkit-plan/assets/plan-template.md +28 -18
  90. package/bundled/skills/prizmkit-plan/assets/spec-template.md +28 -11
  91. package/bundled/skills/prizmkit-plan/references/clarify-guide.md +3 -3
  92. package/bundled/skills/prizmkit-plan/references/verification-checklist.md +60 -0
  93. package/bundled/skills/prizmkit-prizm-docs/SKILL.md +10 -81
  94. package/bundled/skills/prizmkit-prizm-docs/assets/{PRIZM-SPEC.md → prizm-docs-format.md} +41 -526
  95. package/bundled/skills/prizmkit-prizm-docs/references/op-init.md +46 -0
  96. package/bundled/skills/prizmkit-prizm-docs/references/op-rebuild.md +16 -0
  97. package/bundled/skills/prizmkit-prizm-docs/references/op-status.md +14 -0
  98. package/bundled/skills/prizmkit-prizm-docs/references/op-update.md +19 -0
  99. package/bundled/skills/prizmkit-prizm-docs/references/op-validate.md +17 -0
  100. package/bundled/skills/prizmkit-retrospective/SKILL.md +27 -65
  101. package/bundled/skills/prizmkit-retrospective/references/knowledge-injection-steps.md +3 -4
  102. package/bundled/skills/prizmkit-retrospective/references/structural-sync-steps.md +7 -25
  103. package/bundled/skills/recovery-workflow/SKILL.md +8 -8
  104. package/bundled/skills/refactor-pipeline-launcher/SKILL.md +17 -9
  105. package/bundled/skills/refactor-planner/SKILL.md +23 -41
  106. package/bundled/skills/refactor-workflow/SKILL.md +1 -2
  107. package/bundled/team/prizm-dev-team.json +1 -1
  108. package/bundled/{skills/prizm-kit/assets → templates}/project-memory-template.md +1 -1
  109. package/package.json +1 -1
  110. package/src/clean.js +0 -1
  111. package/src/gitignore-template.js +0 -1
  112. package/src/scaffold.js +10 -3
  113. package/bundled/dev-pipeline/templates/agent-prompts/reviewer-analyze.md +0 -5
  114. package/bundled/dev-pipeline/templates/sections/phase-analyze-agent.md +0 -19
  115. package/bundled/dev-pipeline/templates/sections/phase-analyze-full.md +0 -19
  116. package/bundled/skills/app-planner/references/project-conventions.md +0 -93
  117. package/bundled/skills/prizmkit-analyze/SKILL.md +0 -207
  118. package/bundled/skills/prizmkit-code-review/rules/dimensions-bugfix.md +0 -25
  119. package/bundled/skills/prizmkit-code-review/rules/dimensions-feature.md +0 -43
  120. package/bundled/skills/prizmkit-code-review/rules/dimensions-refactor.md +0 -25
  121. package/bundled/skills/prizmkit-implement/references/deploy-guide-protocol.md +0 -69
  122. package/bundled/skills/prizmkit-verify/SKILL.md +0 -281
  123. package/bundled/skills/prizmkit-verify/scripts/verify-light.py +0 -402
@@ -43,6 +43,8 @@ def parse_args():
43
43
  parser.add_argument("--state-dir", default=None, help="State directory (default: .prizmkit/state/bugfix)")
44
44
  parser.add_argument("--output", required=True, help="Output path for the rendered prompt")
45
45
  parser.add_argument("--template", default=None, help="Custom template path. Defaults to {script_dir}/../templates/bugfix-bootstrap-prompt.md")
46
+ parser.add_argument("--mode", default=None, help="Pipeline execution mode override: lite, standard, full")
47
+ parser.add_argument("--critic", default=None, help="Enable critic agent: true/false")
46
48
  return parser.parse_args()
47
49
 
48
50
 
@@ -254,7 +256,6 @@ def build_replacements(args, bug, global_context, script_dir):
254
256
  "{{ACCEPTANCE_CRITERIA}}": format_acceptance_criteria(
255
257
  bug.get("acceptance_criteria", [])
256
258
  ),
257
- "{{AFFECTED_FEATURE}}": bug.get("affected_feature", "N/A"),
258
259
  "{{ENVIRONMENT}}": format_environment(bug.get("environment")),
259
260
  "{{GLOBAL_CONTEXT}}": format_global_context(global_context, project_root),
260
261
  "{{TEAM_CONFIG_PATH}}": team_config_path,
@@ -263,7 +264,7 @@ def build_replacements(args, bug, global_context, script_dir):
263
264
  "{{SESSION_STATUS_PATH}}": session_status_path,
264
265
  "{{PROJECT_ROOT}}": project_root,
265
266
  "{{FIX_SCOPE}}": fix_scope,
266
- "{{TIMESTAMP}}": "", # 占位符,agent 自行填写时间戳
267
+ "{{TIMESTAMP}}": "", # Placeholder, agent fills in the timestamp
267
268
  }
268
269
 
269
270
  return replacements
@@ -280,7 +281,7 @@ def process_conditional_blocks(content, bug):
280
281
  content = content.replace("{{END_IF_VERIFICATION_MANUAL_OR_HYBRID}}\n", "")
281
282
  content = content.replace("{{END_IF_VERIFICATION_MANUAL_OR_HYBRID}}", "")
282
283
  else:
283
- # 删除整个条件块
284
+ # Remove the entire conditional block
284
285
  content = re.sub(
285
286
  r"\{\{IF_VERIFICATION_MANUAL_OR_HYBRID\}\}.*?\{\{END_IF_VERIFICATION_MANUAL_OR_HYBRID\}\}\n?",
286
287
  "", content, flags=re.DOTALL,
@@ -497,11 +498,26 @@ def main():
497
498
  json.dump(checkpoint, f, indent=2, ensure_ascii=False)
498
499
  LOGGER.info("Wrote bugfix checkpoint to %s", checkpoint_path)
499
500
 
501
+ # Resolve critic and mode
502
+ bug_critic = bug.get("critic", False)
503
+ if args.critic is not None:
504
+ critic_enabled = str(args.critic).lower() == "true"
505
+ else:
506
+ critic_enabled = bool(bug_critic)
507
+
508
+ pipeline_mode = args.mode or "standard"
509
+ agent_count = 5 if critic_enabled else 3
510
+
500
511
  # Success
512
+ bug_model = bug.get("model", "")
501
513
  output = {
502
514
  "success": True,
503
515
  "output_path": os.path.abspath(args.output),
504
516
  "checkpoint_path": checkpoint_path,
517
+ "model": bug_model,
518
+ "pipeline_mode": pipeline_mode,
519
+ "agent_count": agent_count,
520
+ "critic_enabled": critic_enabled,
505
521
  }
506
522
  print(json.dumps(output, indent=2, ensure_ascii=False))
507
523
  sys.exit(0)
@@ -16,6 +16,7 @@ Usage:
16
16
  import argparse
17
17
  import json
18
18
  import os
19
+ import re
19
20
  import sys
20
21
 
21
22
  from utils import enrich_global_context, load_json_file, setup_logging
@@ -42,6 +43,8 @@ def parse_args():
42
43
  parser.add_argument("--state-dir", default=None, help="State directory (default: .prizmkit/state/refactor)")
43
44
  parser.add_argument("--output", required=True, help="Output path for the rendered prompt")
44
45
  parser.add_argument("--template", default=None, help="Custom template path. Defaults to {script_dir}/../templates/refactor-bootstrap-prompt.md")
46
+ parser.add_argument("--mode", default=None, help="Pipeline execution mode override: lite, standard, full")
47
+ parser.add_argument("--critic", default=None, help="Enable critic agent: true/false")
45
48
  return parser.parse_args()
46
49
 
47
50
 
@@ -161,15 +164,42 @@ def format_behavior_preservation(bp):
161
164
  return "\n".join(lines)
162
165
 
163
166
 
164
- def format_dependencies(dependencies):
165
- """Format dependencies list as a markdown bullet list."""
167
+ def format_dependencies(dependencies, refactors=None):
168
+ """Format dependencies list as a markdown bullet list with completion context.
169
+
170
+ When refactors list is provided, look up completed dependencies and include
171
+ their completion_notes for rich context propagation.
172
+ """
166
173
  if not dependencies or not isinstance(dependencies, list):
167
174
  return "- (none)"
168
175
  if len(dependencies) == 0:
169
176
  return "- (none)"
177
+
178
+ # Build lookup map if refactors list is provided
179
+ refactor_map = {}
180
+ if refactors:
181
+ for r in refactors:
182
+ if isinstance(r, dict) and "id" in r:
183
+ refactor_map[r["id"]] = r
184
+
170
185
  lines = []
171
186
  for dep in dependencies:
172
- lines.append("- `{}`".format(dep))
187
+ dep_info = refactor_map.get(dep)
188
+ if dep_info and dep_info.get("status") == "completed":
189
+ header = "- **{}** — {} (completed)".format(
190
+ dep, dep_info.get("title", "Untitled")
191
+ )
192
+ notes = dep_info.get("completion_notes", [])
193
+ if notes and isinstance(notes, list):
194
+ note_lines = [
195
+ " - {}".format(n) for n in notes
196
+ if isinstance(n, str) and n.strip()
197
+ ]
198
+ if note_lines:
199
+ header += "\n" + "\n".join(note_lines)
200
+ lines.append(header)
201
+ else:
202
+ lines.append("- `{}`".format(dep))
173
203
  return "\n".join(lines)
174
204
 
175
205
 
@@ -219,7 +249,7 @@ def resolve_project_root(script_dir):
219
249
  return os.path.abspath(project_root)
220
250
 
221
251
 
222
- def build_replacements(args, refactor, global_context, script_dir):
252
+ def build_replacements(args, refactor, refactors, global_context, script_dir):
223
253
  """Build the full dict of placeholder -> replacement value."""
224
254
  project_root = resolve_project_root(script_dir)
225
255
 
@@ -260,7 +290,11 @@ def build_replacements(args, refactor, global_context, script_dir):
260
290
  bp = refactor.get("behavior_preservation", {})
261
291
  behavior_strategy = bp.get("strategy", "test-gate") if isinstance(bp, dict) else "test-gate"
262
292
  existing_tests = bp.get("existing_tests", []) if isinstance(bp, dict) else []
293
+ if not isinstance(existing_tests, list):
294
+ existing_tests = []
263
295
  new_tests_needed = bp.get("new_tests_needed", []) if isinstance(bp, dict) else []
296
+ if not isinstance(new_tests_needed, list):
297
+ new_tests_needed = []
264
298
 
265
299
  # Format existing tests
266
300
  if existing_tests:
@@ -296,7 +330,7 @@ def build_replacements(args, refactor, global_context, script_dir):
296
330
  refactor.get("acceptance_criteria", [])
297
331
  ),
298
332
  "{{DEPENDENCIES}}": format_dependencies(
299
- refactor.get("dependencies", [])
333
+ refactor.get("dependencies", []), refactors
300
334
  ),
301
335
  "{{GLOBAL_CONTEXT}}": format_global_context(global_context, project_root),
302
336
  "{{TEAM_CONFIG_PATH}}": team_config_path,
@@ -304,17 +338,55 @@ def build_replacements(args, refactor, global_context, script_dir):
304
338
  "{{REVIEWER_SUBAGENT_PATH}}": reviewer_subagent,
305
339
  "{{SESSION_STATUS_PATH}}": session_status_path,
306
340
  "{{PROJECT_ROOT}}": project_root,
341
+ "{{CHECKPOINT_PATH}}": os.path.join(
342
+ ".prizmkit", "refactor", args.refactor_id, "workflow-checkpoint.json",
343
+ ),
307
344
  "{{TIMESTAMP}}": "", # Placeholder — agent fills in timestamp
308
345
  }
309
346
 
310
347
  return replacements
311
348
 
312
349
 
313
- def render_template(template_content, replacements):
314
- """Render the template by replacing all {{PLACEHOLDER}} variables."""
315
- content = template_content
350
+ def process_conditional_blocks(content, resume_phase):
351
+ """Handle conditional blocks based on resume_phase.
352
+
353
+ - {{IF_RESUME}}...{{END_IF_RESUME}} — include only when resuming (resume_phase != "null")
354
+ - {{IF_FRESH_START}}...{{END_IF_FRESH_START}} — include only on fresh start (resume_phase == "null")
355
+ """
356
+ is_resume = resume_phase != "null"
357
+
358
+ if is_resume:
359
+ # Keep IF_RESUME content, strip markers
360
+ content = content.replace("{{IF_RESUME}}\n", "")
361
+ content = content.replace("{{IF_RESUME}}", "")
362
+ content = content.replace("{{END_IF_RESUME}}\n", "")
363
+ content = content.replace("{{END_IF_RESUME}}", "")
364
+ # Remove IF_FRESH_START blocks entirely
365
+ content = re.sub(
366
+ r"\{\{IF_FRESH_START\}\}.*?\{\{END_IF_FRESH_START\}\}\n?",
367
+ "", content, flags=re.DOTALL,
368
+ )
369
+ else:
370
+ # Keep IF_FRESH_START content, strip markers
371
+ content = content.replace("{{IF_FRESH_START}}\n", "")
372
+ content = content.replace("{{IF_FRESH_START}}", "")
373
+ content = content.replace("{{END_IF_FRESH_START}}\n", "")
374
+ content = content.replace("{{END_IF_FRESH_START}}", "")
375
+ # Remove IF_RESUME blocks entirely
376
+ content = re.sub(
377
+ r"\{\{IF_RESUME\}\}.*?\{\{END_IF_RESUME\}\}\n?",
378
+ "", content, flags=re.DOTALL,
379
+ )
380
+
381
+ return content
382
+
316
383
 
317
- # Replace all {{PLACEHOLDER}} variables
384
+ def render_template(template_content, replacements, resume_phase):
385
+ """Render the template by processing conditionals and replacing placeholders."""
386
+ # Step 1: Process conditional blocks
387
+ content = process_conditional_blocks(template_content, resume_phase)
388
+
389
+ # Step 2: Replace all {{PLACEHOLDER}} variables
318
390
  for placeholder, value in replacements.items():
319
391
  content = content.replace(placeholder, value)
320
392
 
@@ -388,20 +460,35 @@ def main():
388
460
  global_context = {}
389
461
 
390
462
  # Build replacements
391
- replacements = build_replacements(args, refactor, global_context, script_dir)
463
+ replacements = build_replacements(args, refactor, refactors, global_context, script_dir)
392
464
 
393
465
  # Render the template
394
- rendered = render_template(template_content, replacements)
466
+ rendered = render_template(template_content, replacements, args.resume_phase)
395
467
 
396
468
  # Write the output
397
469
  err = write_output(args.output, rendered)
398
470
  if err:
399
471
  emit_failure(err)
400
472
 
473
+ # Resolve critic and mode
474
+ refactor_critic = refactor.get("critic", False)
475
+ if args.critic is not None:
476
+ critic_enabled = str(args.critic).lower() == "true"
477
+ else:
478
+ critic_enabled = bool(refactor_critic)
479
+
480
+ pipeline_mode = args.mode or "standard"
481
+ agent_count = 5 if critic_enabled else 3
482
+
401
483
  # Success
484
+ refactor_model = refactor.get("model", "")
402
485
  output = {
403
486
  "success": True,
404
487
  "output_path": os.path.abspath(args.output),
488
+ "model": refactor_model,
489
+ "pipeline_mode": pipeline_mode,
490
+ "agent_count": agent_count,
491
+ "critic_enabled": critic_enabled,
405
492
  }
406
493
  print(json.dumps(output, indent=2, ensure_ascii=False))
407
494
  sys.exit(0)
@@ -33,8 +33,12 @@ REQUIRED_BUG_FIELDS = [
33
33
  VALID_SEVERITIES = ["critical", "high", "medium", "low"]
34
34
  VALID_VERIFICATION_TYPES = ["automated", "manual", "hybrid"]
35
35
  VALID_STATUSES = [
36
- "pending", "triaging", "reproducing", "fixing",
37
- "verifying", "completed", "failed", "needs_info", "skipped",
36
+ "pending", "in_progress", "completed", "failed",
37
+ "skipped", "needs_info",
38
+ ]
39
+ TERMINAL_STATUSES = {"completed", "failed", "skipped", "needs_info"}
40
+ VALID_ERROR_SOURCE_TYPES = [
41
+ "stack_trace", "user_report", "failed_test", "log_pattern", "monitoring_alert",
38
42
  ]
39
43
 
40
44
 
@@ -173,6 +177,17 @@ def validate_bugs(bugs):
173
177
  bid or "index {}".format(i)
174
178
  )
175
179
  )
180
+ else:
181
+ es_type = error_source["type"]
182
+ if es_type not in VALID_ERROR_SOURCE_TYPES:
183
+ # Warn but don't error — the pipeline can still attempt the fix
184
+ print(
185
+ "WARNING: Bug '{}' error_source.type '{}' is not one of {} "
186
+ "— pipeline will still attempt to process this bug".format(
187
+ bid or "index {}".format(i), es_type, VALID_ERROR_SOURCE_TYPES
188
+ ),
189
+ file=sys.stderr,
190
+ )
176
191
 
177
192
  # Validate acceptance_criteria is a list
178
193
  ac = bug.get("acceptance_criteria")
@@ -195,12 +210,18 @@ def create_state_directory(state_dir, bug_list_path, bugs):
195
210
  bugs_dir = os.path.join(abs_state_dir, "bugs")
196
211
 
197
212
  now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
198
- run_id = "bugfix-run-" + datetime.now(timezone.utc).strftime("%Y%m%d%H%M")
213
+ run_id = "bugfix-run-" + datetime.now(timezone.utc).strftime("%Y%m%d%H%M%S")
199
214
 
200
215
  # Create top-level state directory
201
216
  os.makedirs(abs_state_dir, exist_ok=True)
202
217
  os.makedirs(bugs_dir, exist_ok=True)
203
218
 
219
+ # Count bugs already in terminal status at init time
220
+ completed_count = sum(
221
+ 1 for b in bugs
222
+ if isinstance(b, dict) and b.get("status") in TERMINAL_STATUSES
223
+ )
224
+
204
225
  # Write pipeline.json
205
226
  pipeline_state = {
206
227
  "run_id": run_id,
@@ -209,7 +230,7 @@ def create_state_directory(state_dir, bug_list_path, bugs):
209
230
  "bug_list_path": rel_bug_list_path,
210
231
  "created_at": now,
211
232
  "total_bugs": len(bugs),
212
- "completed_bugs": 0,
233
+ "completed_bugs": completed_count,
213
234
  }
214
235
  pipeline_path = os.path.join(abs_state_dir, "pipeline.json")
215
236
  with open(pipeline_path, "w", encoding="utf-8") as f:
@@ -228,9 +249,13 @@ def create_state_directory(state_dir, bug_list_path, bugs):
228
249
  sessions_dir = os.path.join(bug_dir, "sessions")
229
250
  os.makedirs(sessions_dir, exist_ok=True)
230
251
 
252
+ # Respect existing terminal status from bug-fix-list.json
253
+ bl_status = bug.get("status", "pending")
254
+ init_status = bl_status if bl_status in TERMINAL_STATUSES else "pending"
255
+
231
256
  bug_status = {
232
257
  "bug_id": bid,
233
- "status": "pending",
258
+ "status": init_status,
234
259
  "retry_count": 0,
235
260
  "max_retries": 3,
236
261
  "sessions": [],
@@ -18,8 +18,8 @@ from datetime import datetime, timezone
18
18
 
19
19
 
20
20
  EXPECTED_SCHEMA = "dev-pipeline-feature-list-v1"
21
- FEATURE_ID_PATTERN = re.compile(r"^F-\d{3}$")
22
- TERMINAL_STATUSES = {"completed", "failed", "skipped"}
21
+ FEATURE_ID_PATTERN = re.compile(r"^F-\d{3}(-[A-Z])?$")
22
+ TERMINAL_STATUSES = {"completed", "failed", "skipped", "split", "auto_skipped"}
23
23
  VALID_PRIORITIES = {"critical", "high", "medium", "low"}
24
24
 
25
25
  REQUIRED_FEATURE_FIELDS = [
@@ -245,7 +245,7 @@ def create_state_directory(state_dir, feature_list_path, features):
245
245
  features_dir = os.path.join(abs_state_dir, "features")
246
246
 
247
247
  now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
248
- run_id = "run-" + datetime.now(timezone.utc).strftime("%Y%m%d%H%M")
248
+ run_id = "run-" + datetime.now(timezone.utc).strftime("%Y%m%d%H%M%S")
249
249
 
250
250
  # Create top-level state directory
251
251
  os.makedirs(abs_state_dir, exist_ok=True)
@@ -40,8 +40,9 @@ VALID_TYPES = ["extract", "rename", "restructure", "simplify", "decouple", "migr
40
40
  VALID_PRIORITIES = ["critical", "high", "medium", "low"]
41
41
  VALID_COMPLEXITIES = ["low", "medium", "high"]
42
42
  VALID_STATUSES = [
43
- "pending", "in_progress", "completed", "failed", "skipped",
43
+ "pending", "in_progress", "completed", "failed", "skipped", "auto_skipped",
44
44
  ]
45
+ TERMINAL_STATUSES = {"completed", "failed", "skipped", "auto_skipped"}
45
46
  VALID_BEHAVIOR_STRATEGIES = ["test-gate", "snapshot", "manual"]
46
47
 
47
48
 
@@ -285,12 +286,18 @@ def create_state_directory(state_dir, refactor_list_path, refactors):
285
286
  refactors_dir = os.path.join(abs_state_dir, "refactors")
286
287
 
287
288
  now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
288
- run_id = "refactor-run-" + datetime.now(timezone.utc).strftime("%Y%m%d%H%M")
289
+ run_id = "refactor-run-" + datetime.now(timezone.utc).strftime("%Y%m%d%H%M%S")
289
290
 
290
291
  # Create top-level state directory
291
292
  os.makedirs(abs_state_dir, exist_ok=True)
292
293
  os.makedirs(refactors_dir, exist_ok=True)
293
294
 
295
+ # Count refactors already in terminal status at init time
296
+ completed_count = sum(
297
+ 1 for r in refactors
298
+ if isinstance(r, dict) and r.get("status") in TERMINAL_STATUSES
299
+ )
300
+
294
301
  # Write pipeline.json
295
302
  pipeline_state = {
296
303
  "run_id": run_id,
@@ -299,7 +306,7 @@ def create_state_directory(state_dir, refactor_list_path, refactors):
299
306
  "refactor_list_path": rel_refactor_list_path,
300
307
  "created_at": now,
301
308
  "total_refactors": len(refactors),
302
- "completed_refactors": 0,
309
+ "completed_refactors": completed_count,
303
310
  }
304
311
  pipeline_path = os.path.join(abs_state_dir, "pipeline.json")
305
312
  with open(pipeline_path, "w", encoding="utf-8") as f:
@@ -318,9 +325,13 @@ def create_state_directory(state_dir, refactor_list_path, refactors):
318
325
  sessions_dir = os.path.join(refactor_dir, "sessions")
319
326
  os.makedirs(sessions_dir, exist_ok=True)
320
327
 
328
+ # Respect existing terminal status from refactor-list.json
329
+ rl_status = refactor.get("status", "pending")
330
+ init_status = rl_status if rl_status in TERMINAL_STATUSES else "pending"
331
+
321
332
  refactor_status = {
322
333
  "refactor_id": rid,
323
- "status": "pending",
334
+ "status": init_status,
324
335
  "retry_count": 0,
325
336
  "max_retries": 3,
326
337
  "sessions": [],
@@ -28,7 +28,7 @@ from datetime import datetime, timezone
28
28
  # Ordered pipeline phases — index defines forward-only progression.
29
29
  # Phase detection is monotonic: once a phase is reached, earlier phases
30
30
  # cannot be re-entered (prevents false positives from file content mentions).
31
- PHASE_ORDER = ["plan", "analyze", "implement", "code-review", "retrospective", "commit"]
31
+ PHASE_ORDER = ["plan", "implement", "code-review", "retrospective", "commit"]
32
32
 
33
33
  # Keywords for phase detection.
34
34
  # "strong" keywords are skill invocations — high confidence, but still
@@ -40,10 +40,6 @@ PHASE_KEYWORDS = {
40
40
  "strong": ["prizmkit-plan"],
41
41
  "weak": ["spec.md", "plan.md", "specification", "architecture", "task checklist", "task breakdown", "gathering requirements"],
42
42
  },
43
- "analyze": {
44
- "strong": ["prizmkit-analyze"],
45
- "weak": ["cross-check", "consistency analysis", "analyzing"],
46
- },
47
43
  "implement": {
48
44
  "strong": ["prizmkit-implement"],
49
45
  "weak": ["implement", "writing code", "TDD", "coding"],
@@ -0,0 +1,191 @@
1
+ #!/usr/bin/env python3
2
+ """Patch completion_notes into feature-list.json, refactor-list.json, or bug-fix-list.json.
3
+
4
+ Reads a completion-summary.json file written by the AI session and patches
5
+ the corresponding item in the task list with the completion_notes field.
6
+
7
+ This enables rich dependency context propagation: when a downstream task's
8
+ bootstrap prompt is generated, it can read completion_notes from its
9
+ completed dependencies to understand what was built/changed.
10
+
11
+ Usage:
12
+ python3 patch-completion-notes.py \
13
+ --feature-list .prizmkit/plans/feature-list.json \
14
+ --feature-id F-001 \
15
+ --summary .prizmkit/specs/001-my-feature/completion-summary.json
16
+
17
+ python3 patch-completion-notes.py \
18
+ --refactor-list .prizmkit/plans/refactor-list.json \
19
+ --refactor-id R-001 \
20
+ --summary <path-to-summary>
21
+ """
22
+
23
+ import argparse
24
+ import json
25
+ import os
26
+ import sys
27
+
28
+ from utils import load_json_file, write_json_file, setup_logging
29
+
30
+ LOGGER = setup_logging("patch-completion-notes")
31
+
32
+
33
+ def parse_args():
34
+ parser = argparse.ArgumentParser(
35
+ description="Patch completion_notes into a task list from completion-summary.json."
36
+ )
37
+ parser.add_argument(
38
+ "--feature-list",
39
+ default=None,
40
+ help="Path to .prizmkit/plans/feature-list.json",
41
+ )
42
+ parser.add_argument(
43
+ "--refactor-list",
44
+ default=None,
45
+ help="Path to .prizmkit/plans/refactor-list.json",
46
+ )
47
+ parser.add_argument(
48
+ "--bug-list",
49
+ default=None,
50
+ help="Path to .prizmkit/plans/bug-fix-list.json",
51
+ )
52
+ parser.add_argument(
53
+ "--feature-id",
54
+ default=None,
55
+ help="Feature ID to patch (e.g. F-001)",
56
+ )
57
+ parser.add_argument(
58
+ "--refactor-id",
59
+ default=None,
60
+ help="Refactor ID to patch (e.g. R-001)",
61
+ )
62
+ parser.add_argument(
63
+ "--bug-id",
64
+ default=None,
65
+ help="Bug ID to patch (e.g. B-001)",
66
+ )
67
+ parser.add_argument(
68
+ "--summary",
69
+ required=True,
70
+ help="Path to completion-summary.json file",
71
+ )
72
+ return parser.parse_args()
73
+
74
+
75
+ def read_completion_notes(summary_path):
76
+ """Read completion_notes from a completion-summary.json file.
77
+
78
+ Returns a list of strings, or an empty list if the file is missing
79
+ or malformed.
80
+ """
81
+ if not os.path.isfile(summary_path):
82
+ LOGGER.warning("Summary file not found: %s", summary_path)
83
+ return []
84
+
85
+ data, err = load_json_file(summary_path)
86
+ if err:
87
+ LOGGER.warning("Failed to read summary: %s", err)
88
+ return []
89
+
90
+ notes = data.get("completion_notes", [])
91
+ if not isinstance(notes, list):
92
+ LOGGER.warning("completion_notes is not a list in %s", summary_path)
93
+ return []
94
+
95
+ # Filter: only keep non-empty strings
96
+ return [n for n in notes if isinstance(n, str) and n.strip()]
97
+
98
+
99
+ def patch_list(list_path, item_id, item_key, notes):
100
+ """Patch completion_notes into a task list JSON file.
101
+
102
+ Args:
103
+ list_path: Path to the JSON list file
104
+ item_id: ID of the item to patch (e.g. "F-001" or "R-001")
105
+ item_key: Key for the items array (e.g. "features" or "refactors")
106
+ notes: List of completion note strings
107
+ """
108
+ data, err = load_json_file(list_path)
109
+ if err:
110
+ LOGGER.error("Failed to read list: %s", err)
111
+ return False
112
+
113
+ items = data.get(item_key, [])
114
+ found = False
115
+ for item in items:
116
+ if isinstance(item, dict) and item.get("id") == item_id:
117
+ item["completion_notes"] = notes
118
+ found = True
119
+ break
120
+
121
+ if not found:
122
+ LOGGER.error("Item %s not found in %s", item_id, list_path)
123
+ return False
124
+
125
+ err = write_json_file(list_path, data)
126
+ if err:
127
+ LOGGER.error("Failed to write list: %s", err)
128
+ return False
129
+
130
+ LOGGER.info(
131
+ "Patched %d completion notes for %s in %s",
132
+ len(notes), item_id, list_path,
133
+ )
134
+ return True
135
+
136
+
137
+ def main():
138
+ args = parse_args()
139
+
140
+ # Determine mode: feature, refactor, or bug
141
+ if args.feature_list and args.feature_id:
142
+ list_path = args.feature_list
143
+ item_id = args.feature_id
144
+ item_key = "features"
145
+ elif args.refactor_list and args.refactor_id:
146
+ list_path = args.refactor_list
147
+ item_id = args.refactor_id
148
+ item_key = "refactors"
149
+ elif args.bug_list and args.bug_id:
150
+ list_path = args.bug_list
151
+ item_id = args.bug_id
152
+ item_key = "bugs"
153
+ else:
154
+ print(
155
+ "Error: must provide either (--feature-list + --feature-id) "
156
+ "or (--refactor-list + --refactor-id) "
157
+ "or (--bug-list + --bug-id)",
158
+ file=sys.stderr,
159
+ )
160
+ sys.exit(1)
161
+
162
+ # Read completion notes
163
+ notes = read_completion_notes(args.summary)
164
+ if not notes:
165
+ LOGGER.info("No completion notes to patch for %s", item_id)
166
+ sys.exit(0)
167
+
168
+ # Patch the list
169
+ if not patch_list(list_path, item_id, item_key, notes):
170
+ sys.exit(1)
171
+
172
+ # Output result
173
+ result = {
174
+ "item_id": item_id,
175
+ "notes_count": len(notes),
176
+ "list_path": os.path.abspath(list_path),
177
+ }
178
+ print(json.dumps(result, indent=2, ensure_ascii=False))
179
+
180
+
181
+ if __name__ == "__main__":
182
+ try:
183
+ main()
184
+ except KeyboardInterrupt:
185
+ sys.exit(130)
186
+ except SystemExit:
187
+ raise
188
+ except Exception as exc:
189
+ LOGGER.exception("Unhandled exception")
190
+ print("Error: {}".format(exc), file=sys.stderr)
191
+ sys.exit(1)