shipwright-cli 3.2.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (279) hide show
  1. package/.claude/agents/code-reviewer.md +2 -0
  2. package/.claude/agents/devops-engineer.md +2 -0
  3. package/.claude/agents/doc-fleet-agent.md +2 -0
  4. package/.claude/agents/pipeline-agent.md +2 -0
  5. package/.claude/agents/shell-script-specialist.md +2 -0
  6. package/.claude/agents/test-specialist.md +2 -0
  7. package/.claude/hooks/agent-crash-capture.sh +32 -0
  8. package/.claude/hooks/post-tool-use.sh +3 -2
  9. package/.claude/hooks/pre-tool-use.sh +35 -3
  10. package/README.md +4 -4
  11. package/claude-code/hooks/config-change.sh +18 -0
  12. package/claude-code/hooks/instructions-reloaded.sh +7 -0
  13. package/claude-code/hooks/worktree-create.sh +25 -0
  14. package/claude-code/hooks/worktree-remove.sh +20 -0
  15. package/config/code-constitution.json +130 -0
  16. package/dashboard/middleware/auth.ts +134 -0
  17. package/dashboard/middleware/constants.ts +21 -0
  18. package/dashboard/public/index.html +2 -6
  19. package/dashboard/public/styles.css +100 -97
  20. package/dashboard/routes/auth.ts +38 -0
  21. package/dashboard/server.ts +66 -25
  22. package/dashboard/services/config.ts +26 -0
  23. package/dashboard/services/db.ts +118 -0
  24. package/dashboard/src/canvas/pixel-agent.ts +298 -0
  25. package/dashboard/src/canvas/pixel-sprites.ts +440 -0
  26. package/dashboard/src/canvas/shipyard-effects.ts +367 -0
  27. package/dashboard/src/canvas/shipyard-scene.ts +616 -0
  28. package/dashboard/src/canvas/submarine-layout.ts +267 -0
  29. package/dashboard/src/components/header.ts +8 -7
  30. package/dashboard/src/core/router.ts +1 -0
  31. package/dashboard/src/design/submarine-theme.ts +253 -0
  32. package/dashboard/src/main.ts +2 -0
  33. package/dashboard/src/types/api.ts +2 -1
  34. package/dashboard/src/views/activity.ts +2 -1
  35. package/dashboard/src/views/shipyard.ts +39 -0
  36. package/dashboard/types/index.ts +166 -0
  37. package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
  38. package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
  39. package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
  40. package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
  41. package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
  42. package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
  43. package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
  44. package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
  45. package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
  46. package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
  47. package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
  48. package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
  49. package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
  50. package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
  51. package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
  52. package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
  53. package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
  54. package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
  55. package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
  56. package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
  57. package/docs/research/RESEARCH_INDEX.md +439 -0
  58. package/docs/research/RESEARCH_SOURCES.md +440 -0
  59. package/docs/research/RESEARCH_SUMMARY.txt +275 -0
  60. package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
  61. package/package.json +2 -2
  62. package/scripts/lib/adaptive-model.sh +427 -0
  63. package/scripts/lib/adaptive-timeout.sh +316 -0
  64. package/scripts/lib/audit-trail.sh +309 -0
  65. package/scripts/lib/auto-recovery.sh +471 -0
  66. package/scripts/lib/bandit-selector.sh +431 -0
  67. package/scripts/lib/bootstrap.sh +104 -2
  68. package/scripts/lib/causal-graph.sh +455 -0
  69. package/scripts/lib/compat.sh +126 -0
  70. package/scripts/lib/compound-audit.sh +337 -0
  71. package/scripts/lib/constitutional.sh +454 -0
  72. package/scripts/lib/context-budget.sh +359 -0
  73. package/scripts/lib/convergence.sh +594 -0
  74. package/scripts/lib/cost-optimizer.sh +634 -0
  75. package/scripts/lib/daemon-adaptive.sh +10 -0
  76. package/scripts/lib/daemon-dispatch.sh +106 -17
  77. package/scripts/lib/daemon-failure.sh +34 -4
  78. package/scripts/lib/daemon-patrol.sh +23 -2
  79. package/scripts/lib/daemon-poll-github.sh +361 -0
  80. package/scripts/lib/daemon-poll-health.sh +299 -0
  81. package/scripts/lib/daemon-poll.sh +27 -611
  82. package/scripts/lib/daemon-state.sh +112 -66
  83. package/scripts/lib/daemon-triage.sh +10 -0
  84. package/scripts/lib/dod-scorecard.sh +442 -0
  85. package/scripts/lib/error-actionability.sh +300 -0
  86. package/scripts/lib/formal-spec.sh +461 -0
  87. package/scripts/lib/helpers.sh +177 -4
  88. package/scripts/lib/intent-analysis.sh +409 -0
  89. package/scripts/lib/loop-convergence.sh +350 -0
  90. package/scripts/lib/loop-iteration.sh +682 -0
  91. package/scripts/lib/loop-progress.sh +48 -0
  92. package/scripts/lib/loop-restart.sh +185 -0
  93. package/scripts/lib/memory-effectiveness.sh +506 -0
  94. package/scripts/lib/mutation-executor.sh +352 -0
  95. package/scripts/lib/outcome-feedback.sh +521 -0
  96. package/scripts/lib/pipeline-cli.sh +336 -0
  97. package/scripts/lib/pipeline-commands.sh +1216 -0
  98. package/scripts/lib/pipeline-detection.sh +100 -2
  99. package/scripts/lib/pipeline-execution.sh +897 -0
  100. package/scripts/lib/pipeline-github.sh +28 -3
  101. package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
  102. package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
  103. package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
  104. package/scripts/lib/pipeline-intelligence.sh +100 -1136
  105. package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
  106. package/scripts/lib/pipeline-quality-checks.sh +17 -715
  107. package/scripts/lib/pipeline-quality-gates.sh +563 -0
  108. package/scripts/lib/pipeline-stages-build.sh +730 -0
  109. package/scripts/lib/pipeline-stages-delivery.sh +965 -0
  110. package/scripts/lib/pipeline-stages-intake.sh +1133 -0
  111. package/scripts/lib/pipeline-stages-monitor.sh +407 -0
  112. package/scripts/lib/pipeline-stages-review.sh +1022 -0
  113. package/scripts/lib/pipeline-stages.sh +59 -2929
  114. package/scripts/lib/pipeline-state.sh +36 -5
  115. package/scripts/lib/pipeline-util.sh +487 -0
  116. package/scripts/lib/policy-learner.sh +438 -0
  117. package/scripts/lib/process-reward.sh +493 -0
  118. package/scripts/lib/project-detect.sh +649 -0
  119. package/scripts/lib/quality-profile.sh +334 -0
  120. package/scripts/lib/recruit-commands.sh +885 -0
  121. package/scripts/lib/recruit-learning.sh +739 -0
  122. package/scripts/lib/recruit-roles.sh +648 -0
  123. package/scripts/lib/reward-aggregator.sh +458 -0
  124. package/scripts/lib/rl-optimizer.sh +362 -0
  125. package/scripts/lib/root-cause.sh +427 -0
  126. package/scripts/lib/scope-enforcement.sh +445 -0
  127. package/scripts/lib/session-restart.sh +493 -0
  128. package/scripts/lib/skill-memory.sh +300 -0
  129. package/scripts/lib/skill-registry.sh +775 -0
  130. package/scripts/lib/spec-driven.sh +476 -0
  131. package/scripts/lib/test-helpers.sh +18 -7
  132. package/scripts/lib/test-holdout.sh +429 -0
  133. package/scripts/lib/test-optimizer.sh +511 -0
  134. package/scripts/shipwright-file-suggest.sh +45 -0
  135. package/scripts/skills/adversarial-quality.md +61 -0
  136. package/scripts/skills/api-design.md +44 -0
  137. package/scripts/skills/architecture-design.md +50 -0
  138. package/scripts/skills/brainstorming.md +43 -0
  139. package/scripts/skills/data-pipeline.md +44 -0
  140. package/scripts/skills/deploy-safety.md +64 -0
  141. package/scripts/skills/documentation.md +38 -0
  142. package/scripts/skills/frontend-design.md +45 -0
  143. package/scripts/skills/generated/.gitkeep +0 -0
  144. package/scripts/skills/generated/_refinements/.gitkeep +0 -0
  145. package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
  146. package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
  147. package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
  148. package/scripts/skills/generated/cli-version-management.md +29 -0
  149. package/scripts/skills/generated/collection-system-validation.md +99 -0
  150. package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
  151. package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
  152. package/scripts/skills/generated/test-parallelization-detection.md +65 -0
  153. package/scripts/skills/observability.md +79 -0
  154. package/scripts/skills/performance.md +48 -0
  155. package/scripts/skills/pr-quality.md +49 -0
  156. package/scripts/skills/product-thinking.md +43 -0
  157. package/scripts/skills/security-audit.md +49 -0
  158. package/scripts/skills/systematic-debugging.md +40 -0
  159. package/scripts/skills/testing-strategy.md +47 -0
  160. package/scripts/skills/two-stage-review.md +52 -0
  161. package/scripts/skills/validation-thoroughness.md +55 -0
  162. package/scripts/sw +9 -3
  163. package/scripts/sw-activity.sh +9 -2
  164. package/scripts/sw-adaptive.sh +2 -1
  165. package/scripts/sw-adversarial.sh +2 -1
  166. package/scripts/sw-architecture-enforcer.sh +3 -1
  167. package/scripts/sw-auth.sh +12 -2
  168. package/scripts/sw-autonomous.sh +5 -1
  169. package/scripts/sw-changelog.sh +4 -1
  170. package/scripts/sw-checkpoint.sh +2 -1
  171. package/scripts/sw-ci.sh +5 -1
  172. package/scripts/sw-cleanup.sh +4 -26
  173. package/scripts/sw-code-review.sh +10 -4
  174. package/scripts/sw-connect.sh +2 -1
  175. package/scripts/sw-context.sh +2 -1
  176. package/scripts/sw-cost.sh +48 -3
  177. package/scripts/sw-daemon.sh +66 -9
  178. package/scripts/sw-dashboard.sh +3 -1
  179. package/scripts/sw-db.sh +59 -16
  180. package/scripts/sw-decide.sh +8 -2
  181. package/scripts/sw-decompose.sh +360 -17
  182. package/scripts/sw-deps.sh +4 -1
  183. package/scripts/sw-developer-simulation.sh +4 -1
  184. package/scripts/sw-discovery.sh +325 -2
  185. package/scripts/sw-doc-fleet.sh +4 -1
  186. package/scripts/sw-docs-agent.sh +3 -1
  187. package/scripts/sw-docs.sh +2 -1
  188. package/scripts/sw-doctor.sh +453 -2
  189. package/scripts/sw-dora.sh +4 -1
  190. package/scripts/sw-durable.sh +4 -3
  191. package/scripts/sw-e2e-orchestrator.sh +17 -16
  192. package/scripts/sw-eventbus.sh +7 -1
  193. package/scripts/sw-evidence.sh +364 -12
  194. package/scripts/sw-feedback.sh +550 -9
  195. package/scripts/sw-fix.sh +20 -1
  196. package/scripts/sw-fleet-discover.sh +6 -2
  197. package/scripts/sw-fleet-viz.sh +4 -1
  198. package/scripts/sw-fleet.sh +5 -1
  199. package/scripts/sw-github-app.sh +16 -3
  200. package/scripts/sw-github-checks.sh +3 -2
  201. package/scripts/sw-github-deploy.sh +3 -2
  202. package/scripts/sw-github-graphql.sh +18 -7
  203. package/scripts/sw-guild.sh +5 -1
  204. package/scripts/sw-heartbeat.sh +5 -30
  205. package/scripts/sw-hello.sh +67 -0
  206. package/scripts/sw-hygiene.sh +6 -1
  207. package/scripts/sw-incident.sh +265 -1
  208. package/scripts/sw-init.sh +18 -2
  209. package/scripts/sw-instrument.sh +10 -2
  210. package/scripts/sw-intelligence.sh +42 -6
  211. package/scripts/sw-jira.sh +5 -1
  212. package/scripts/sw-launchd.sh +2 -1
  213. package/scripts/sw-linear.sh +4 -1
  214. package/scripts/sw-logs.sh +4 -1
  215. package/scripts/sw-loop.sh +432 -1128
  216. package/scripts/sw-memory.sh +356 -2
  217. package/scripts/sw-mission-control.sh +6 -1
  218. package/scripts/sw-model-router.sh +481 -26
  219. package/scripts/sw-otel.sh +13 -4
  220. package/scripts/sw-oversight.sh +14 -5
  221. package/scripts/sw-patrol-meta.sh +334 -0
  222. package/scripts/sw-pipeline-composer.sh +5 -1
  223. package/scripts/sw-pipeline-vitals.sh +2 -1
  224. package/scripts/sw-pipeline.sh +53 -2664
  225. package/scripts/sw-pm.sh +12 -5
  226. package/scripts/sw-pr-lifecycle.sh +2 -1
  227. package/scripts/sw-predictive.sh +7 -1
  228. package/scripts/sw-prep.sh +185 -2
  229. package/scripts/sw-ps.sh +5 -25
  230. package/scripts/sw-public-dashboard.sh +15 -3
  231. package/scripts/sw-quality.sh +2 -1
  232. package/scripts/sw-reaper.sh +8 -25
  233. package/scripts/sw-recruit.sh +156 -2303
  234. package/scripts/sw-regression.sh +19 -12
  235. package/scripts/sw-release-manager.sh +3 -1
  236. package/scripts/sw-release.sh +4 -1
  237. package/scripts/sw-remote.sh +3 -1
  238. package/scripts/sw-replay.sh +7 -1
  239. package/scripts/sw-retro.sh +158 -1
  240. package/scripts/sw-review-rerun.sh +3 -1
  241. package/scripts/sw-scale.sh +10 -3
  242. package/scripts/sw-security-audit.sh +6 -1
  243. package/scripts/sw-self-optimize.sh +6 -3
  244. package/scripts/sw-session.sh +9 -3
  245. package/scripts/sw-setup.sh +3 -1
  246. package/scripts/sw-stall-detector.sh +406 -0
  247. package/scripts/sw-standup.sh +15 -7
  248. package/scripts/sw-status.sh +3 -1
  249. package/scripts/sw-strategic.sh +4 -1
  250. package/scripts/sw-stream.sh +7 -1
  251. package/scripts/sw-swarm.sh +18 -6
  252. package/scripts/sw-team-stages.sh +13 -6
  253. package/scripts/sw-templates.sh +5 -29
  254. package/scripts/sw-testgen.sh +7 -1
  255. package/scripts/sw-tmux-pipeline.sh +4 -1
  256. package/scripts/sw-tmux-role-color.sh +2 -0
  257. package/scripts/sw-tmux-status.sh +1 -1
  258. package/scripts/sw-tmux.sh +3 -1
  259. package/scripts/sw-trace.sh +3 -1
  260. package/scripts/sw-tracker-github.sh +3 -0
  261. package/scripts/sw-tracker-jira.sh +3 -0
  262. package/scripts/sw-tracker-linear.sh +3 -0
  263. package/scripts/sw-tracker.sh +3 -1
  264. package/scripts/sw-triage.sh +2 -1
  265. package/scripts/sw-upgrade.sh +3 -1
  266. package/scripts/sw-ux.sh +5 -2
  267. package/scripts/sw-webhook.sh +3 -1
  268. package/scripts/sw-widgets.sh +3 -1
  269. package/scripts/sw-worktree.sh +15 -3
  270. package/scripts/test-skill-injection.sh +1233 -0
  271. package/templates/pipelines/autonomous.json +27 -3
  272. package/templates/pipelines/cost-aware.json +34 -8
  273. package/templates/pipelines/deployed.json +12 -0
  274. package/templates/pipelines/enterprise.json +12 -0
  275. package/templates/pipelines/fast.json +6 -0
  276. package/templates/pipelines/full.json +27 -3
  277. package/templates/pipelines/hotfix.json +6 -0
  278. package/templates/pipelines/standard.json +12 -0
  279. package/templates/pipelines/tdd.json +12 -0
@@ -1,4 +1,5 @@
1
1
  #!/usr/bin/env bash
2
+ # shellcheck disable=SC2034 # config vars used by sourced scripts and subshells
2
3
  # ╔═══════════════════════════════════════════════════════════════════════════╗
3
4
  # ║ shipwright pipeline — Autonomous Feature Delivery (Idea → Production) ║
4
5
  # ║ Full GitHub integration · Auto-detection · Task tracking · Metrics ║
@@ -12,7 +13,7 @@ unset CLAUDECODE 2>/dev/null || true
12
13
  trap '' HUP
13
14
  trap '' SIGPIPE
14
15
 
15
- VERSION="3.2.0"
16
+ VERSION="3.3.0"
16
17
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
17
18
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
18
19
 
@@ -40,12 +41,21 @@ fi
40
41
  [[ -f "$SCRIPT_DIR/lib/pipeline-github.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-github.sh"
41
42
  # shellcheck source=lib/pipeline-detection.sh
42
43
  [[ -f "$SCRIPT_DIR/lib/pipeline-detection.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-detection.sh"
44
+ # Adaptive Stage Timeout Engine (optional)
45
+ # shellcheck source=lib/adaptive-timeout.sh
46
+ [[ -f "$SCRIPT_DIR/lib/adaptive-timeout.sh" ]] && source "$SCRIPT_DIR/lib/adaptive-timeout.sh" 2>/dev/null || true
43
47
  # shellcheck source=lib/pipeline-quality-checks.sh
44
48
  [[ -f "$SCRIPT_DIR/lib/pipeline-quality-checks.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-quality-checks.sh"
45
49
  # shellcheck source=lib/pipeline-intelligence.sh
46
50
  [[ -f "$SCRIPT_DIR/lib/pipeline-intelligence.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-intelligence.sh"
47
51
  # shellcheck source=lib/pipeline-stages.sh
48
52
  [[ -f "$SCRIPT_DIR/lib/pipeline-stages.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-stages.sh"
53
+ # Audit trail for compliance-grade pipeline traceability
54
+ # shellcheck source=lib/audit-trail.sh
55
+ [[ -f "$SCRIPT_DIR/lib/audit-trail.sh" ]] && source "$SCRIPT_DIR/lib/audit-trail.sh" 2>/dev/null || true
56
+ # Root cause classifier for failure analysis and platform issue auto-creation
57
+ # shellcheck source=lib/root-cause.sh
58
+ [[ -f "$SCRIPT_DIR/lib/root-cause.sh" ]] && source "$SCRIPT_DIR/lib/root-cause.sh" 2>/dev/null || true
49
59
  PIPELINE_COVERAGE_THRESHOLD="${PIPELINE_COVERAGE_THRESHOLD:-60}"
50
60
  PIPELINE_QUALITY_GATE_THRESHOLD="${PIPELINE_QUALITY_GATE_THRESHOLD:-70}"
51
61
 
@@ -92,14 +102,25 @@ fi
92
102
  if [[ -f "$SCRIPT_DIR/sw-durable.sh" ]]; then
93
103
  source "$SCRIPT_DIR/sw-durable.sh"
94
104
  fi
95
- # shellcheck source=sw-db.sh — for db_save_checkpoint/db_load_checkpoint (durable workflows)
105
+ # shellcheck source=sw-db.sh
106
+ # for db_save_checkpoint/db_load_checkpoint (durable workflows)
96
107
  [[ -f "$SCRIPT_DIR/sw-db.sh" ]] && source "$SCRIPT_DIR/sw-db.sh"
97
108
  # Ensure DB schema exists so emit_event → db_add_event can write rows (CREATE IF NOT EXISTS is idempotent)
98
109
  if type init_schema >/dev/null 2>&1 && type check_sqlite3 >/dev/null 2>&1 && check_sqlite3 2>/dev/null; then
99
110
  init_schema 2>/dev/null || true
100
111
  fi
101
- # shellcheck source=sw-cost.sh — for cost_record persistence to costs.json + DB
112
+ # shellcheck source=sw-cost.sh
113
+ # for cost_record persistence to costs.json + DB
102
114
  [[ -f "$SCRIPT_DIR/sw-cost.sh" ]] && source "$SCRIPT_DIR/sw-cost.sh"
115
+ # shellcheck source=lib/cost-optimizer.sh
116
+ # for dynamic cost-performance pipeline optimization (budget checks, reductions, burst mode)
117
+ [[ -f "$SCRIPT_DIR/lib/cost-optimizer.sh" ]] && source "$SCRIPT_DIR/lib/cost-optimizer.sh"
118
+ # shellcheck source=lib/skill-registry.sh
119
+ # for skill_analyze_outcome (AI outcome learning)
120
+ [[ -f "$SCRIPT_DIR/lib/skill-registry.sh" ]] && source "$SCRIPT_DIR/lib/skill-registry.sh"
121
+ # shellcheck source=lib/skill-memory.sh
122
+ # for skill memory operations
123
+ [[ -f "$SCRIPT_DIR/lib/skill-memory.sh" ]] && source "$SCRIPT_DIR/lib/skill-memory.sh"
103
124
 
104
125
  # ─── GitHub API Modules (optional) ─────────────────────────────────────────
105
126
  # shellcheck source=sw-github-graphql.sh
@@ -109,145 +130,15 @@ fi
109
130
  # shellcheck source=sw-github-deploy.sh
110
131
  [[ -f "$SCRIPT_DIR/sw-github-deploy.sh" ]] && source "$SCRIPT_DIR/sw-github-deploy.sh"
111
132
 
112
- # Parse coverage percentage from test output — multi-framework patterns
113
- # Usage: parse_coverage_from_output <log_file>
114
- # Outputs coverage percentage or empty string
115
- parse_coverage_from_output() {
116
- local log_file="$1"
117
- [[ ! -f "$log_file" ]] && return
118
- local cov=""
119
- # Jest/Istanbul: "Statements : 85.5%"
120
- cov=$(grep -oE 'Statements\s*:\s*[0-9.]+' "$log_file" 2>/dev/null | grep -oE '[0-9.]+$' || true)
121
- # Istanbul table: "All files | 85.5"
122
- [[ -z "$cov" ]] && cov=$(grep -oE 'All files\s*\|\s*[0-9.]+' "$log_file" 2>/dev/null | grep -oE '[0-9.]+$' || true)
123
- # pytest-cov: "TOTAL 500 75 85%"
124
- [[ -z "$cov" ]] && cov=$(grep -oE 'TOTAL\s+[0-9]+\s+[0-9]+\s+[0-9]+%' "$log_file" 2>/dev/null | grep -oE '[0-9]+%' | tr -d '%' | tail -1 || true)
125
- # Vitest: "All files | 85.5 |"
126
- [[ -z "$cov" ]] && cov=$(grep -oE 'All files\s*\|\s*[0-9.]+\s*\|' "$log_file" 2>/dev/null | grep -oE '[0-9.]+' | head -1 || true)
127
- # Go coverage: "coverage: 85.5% of statements"
128
- [[ -z "$cov" ]] && cov=$(grep -oE 'coverage:\s*[0-9.]+%' "$log_file" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
129
- # Cargo tarpaulin: "85.50% coverage"
130
- [[ -z "$cov" ]] && cov=$(grep -oE '[0-9.]+%\s*coverage' "$log_file" 2>/dev/null | grep -oE '[0-9.]+' | head -1 || true)
131
- # Generic: "Coverage: 85.5%"
132
- [[ -z "$cov" ]] && cov=$(grep -oiE 'coverage:?\s*[0-9.]+%' "$log_file" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
133
- echo "$cov"
134
- }
135
-
136
- format_duration() {
137
- local secs="$1"
138
- if [[ "$secs" -ge 3600 ]]; then
139
- printf "%dh %dm %ds" $((secs/3600)) $((secs%3600/60)) $((secs%60))
140
- elif [[ "$secs" -ge 60 ]]; then
141
- printf "%dm %ds" $((secs/60)) $((secs%60))
142
- else
143
- printf "%ds" "$secs"
144
- fi
145
- }
146
-
147
- # Rotate event log if needed (standalone mode — daemon has its own rotation in poll loop)
148
- rotate_event_log_if_needed() {
149
- local events_file="${EVENTS_FILE:-$HOME/.shipwright/events.jsonl}"
150
- local max_lines=10000
151
- [[ ! -f "$events_file" ]] && return
152
- local lines
153
- lines=$(wc -l < "$events_file" 2>/dev/null || true)
154
- lines="${lines:-0}"
155
- if [[ "$lines" -gt "$max_lines" ]]; then
156
- local tmp="${events_file}.rotating"
157
- if tail -5000 "$events_file" > "$tmp" 2>/dev/null && mv "$tmp" "$events_file" 2>/dev/null; then
158
- info "Rotated events.jsonl: ${lines} -> 5000 lines"
159
- fi
160
- fi
161
- }
162
-
163
- _pipeline_compact_goal() {
164
- local goal="$1"
165
- local plan_file="${2:-}"
166
- local design_file="${3:-}"
167
- local compact="$goal"
168
-
169
- # Include plan summary (first 20 lines only)
170
- if [[ -n "$plan_file" && -f "$plan_file" ]]; then
171
- compact="${compact}
172
-
173
- ## Plan Summary
174
- $(head -20 "$plan_file" 2>/dev/null || true)
175
- [... full plan in .claude/pipeline-artifacts/plan.md]"
176
- fi
177
-
178
- # Include design key decisions only (grep for headers)
179
- if [[ -n "$design_file" && -f "$design_file" ]]; then
180
- compact="${compact}
181
-
182
- ## Key Design Decisions
183
- $(grep -E '^#{1,3} ' "$design_file" 2>/dev/null | head -10 || true)
184
- [... full design in .claude/pipeline-artifacts/design.md]"
185
- fi
186
-
187
- echo "$compact"
188
- }
189
-
190
- load_composed_pipeline() {
191
- local spec_file="$1"
192
- [[ ! -f "$spec_file" ]] && return 1
193
-
194
- # Read enabled stages from composed spec
195
- local composed_stages
196
- composed_stages=$(jq -r '.stages // [] | .[] | .id' "$spec_file" 2>/dev/null) || return 1
197
- [[ -z "$composed_stages" ]] && return 1
198
-
199
- # Override enabled stages
200
- COMPOSED_STAGES="$composed_stages"
201
-
202
- # Override per-stage settings
203
- local build_max
204
- build_max=$(jq -r '.stages[] | select(.id=="build") | .max_iterations // ""' "$spec_file" 2>/dev/null) || true
205
- [[ -n "$build_max" && "$build_max" != "null" ]] && COMPOSED_BUILD_ITERATIONS="$build_max"
206
-
207
- emit_event "pipeline.composed_loaded" "stages=$(echo "$composed_stages" | wc -l | tr -d ' ')"
208
- return 0
209
- }
210
-
211
- # ─── Token / Cost Parsing ─────────────────────────────────────────────────
212
- parse_claude_tokens() {
213
- local log_file="$1"
214
- local input_tok output_tok
215
- input_tok=$(grep -oE 'input[_ ]tokens?[: ]+[0-9,]+' "$log_file" 2>/dev/null | tail -1 | grep -oE '[0-9,]+' | tr -d ',' || echo "0")
216
- output_tok=$(grep -oE 'output[_ ]tokens?[: ]+[0-9,]+' "$log_file" 2>/dev/null | tail -1 | grep -oE '[0-9,]+' | tr -d ',' || echo "0")
217
-
218
- TOTAL_INPUT_TOKENS=$(( TOTAL_INPUT_TOKENS + ${input_tok:-0} ))
219
- TOTAL_OUTPUT_TOKENS=$(( TOTAL_OUTPUT_TOKENS + ${output_tok:-0} ))
220
- }
221
-
222
- # Estimate pipeline cost using historical averages from completed pipelines.
223
- # Falls back to per-stage estimates when no history exists.
224
- estimate_pipeline_cost() {
225
- local stages="$1"
226
- local stage_count
227
- stage_count=$(echo "$stages" | jq 'length' 2>/dev/null || echo "6")
228
- [[ ! "$stage_count" =~ ^[0-9]+$ ]] && stage_count=6
229
-
230
- local events_file="${EVENTS_FILE:-$HOME/.shipwright/events.jsonl}"
231
- local avg_input=0 avg_output=0
232
- if [[ -f "$events_file" ]]; then
233
- local hist
234
- hist=$(grep '"type":"pipeline.completed"' "$events_file" 2>/dev/null | tail -10)
235
- if [[ -n "$hist" ]]; then
236
- avg_input=$(echo "$hist" | jq -s -r '[.[] | .input_tokens // 0 | tonumber] | if length > 0 then (add / length | floor | tostring) else "0" end' 2>/dev/null | head -1)
237
- avg_output=$(echo "$hist" | jq -s -r '[.[] | .output_tokens // 0 | tonumber] | if length > 0 then (add / length | floor | tostring) else "0" end' 2>/dev/null | head -1)
238
- fi
239
- fi
240
- [[ ! "$avg_input" =~ ^[0-9]+$ ]] && avg_input=0
241
- [[ ! "$avg_output" =~ ^[0-9]+$ ]] && avg_output=0
242
-
243
- # Fall back to reasonable per-stage estimates only if no history
244
- if [[ "$avg_input" -eq 0 ]]; then
245
- avg_input=$(( stage_count * 8000 )) # More realistic: ~8K input per stage
246
- avg_output=$(( stage_count * 4000 )) # ~4K output per stage
247
- fi
248
-
249
- echo "{\"input_tokens\":${avg_input},\"output_tokens\":${avg_output}}"
250
- }
133
+ # ─── Pipeline Decomposed Modules ────────────────────────────────────────────
134
+ # shellcheck source=lib/pipeline-cli.sh
135
+ [[ -f "$SCRIPT_DIR/lib/pipeline-cli.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-cli.sh"
136
+ # shellcheck source=lib/pipeline-util.sh
137
+ [[ -f "$SCRIPT_DIR/lib/pipeline-util.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-util.sh"
138
+ # shellcheck source=lib/pipeline-execution.sh
139
+ [[ -f "$SCRIPT_DIR/lib/pipeline-execution.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-execution.sh"
140
+ # shellcheck source=lib/pipeline-commands.sh
141
+ [[ -f "$SCRIPT_DIR/lib/pipeline-commands.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-commands.sh"
251
142
 
252
143
  # ─── Defaults ───────────────────────────────────────────────────────────────
253
144
  GOAL=""
@@ -284,6 +175,8 @@ ORIGINAL_REPO_DIR=""
284
175
  REPO_OVERRIDE=""
285
176
  _cleanup_done=""
286
177
  PIPELINE_EXIT_CODE=1 # assume failure until run_pipeline succeeds
178
+ EFFORT_LEVEL_OVERRIDE="${SW_EFFORT_LEVEL:-}"
179
+ PIPELINE_FALLBACK_MODEL="${SW_FALLBACK_MODEL:-sonnet}"
287
180
 
288
181
  # GitHub metadata (populated during intake)
289
182
  ISSUE_LABELS=""
@@ -309,157 +202,33 @@ STATE_FILE=""
309
202
  ARTIFACTS_DIR=""
310
203
  TASKS_FILE=""
311
204
 
312
- # ─── Help ───────────────────────────────────────────────────────────────────
205
+ CURRENT_STAGE_ID=""
206
+
207
+ # Notification / webhook
208
+ SLACK_WEBHOOK=""
209
+ NOTIFICATION_ENABLED=false
210
+
211
+ # Placeholder to accumulate input/output tokens from all pipeline stages
212
+ TOTAL_INPUT_TOKENS=0
213
+ TOTAL_OUTPUT_TOKENS=0
313
214
 
314
- show_help() {
315
- echo -e "${CYAN}${BOLD}shipwright pipeline${RESET} — Autonomous Feature Delivery"
316
- echo ""
317
- echo -e "${BOLD}USAGE${RESET}"
318
- echo -e " ${CYAN}shipwright pipeline${RESET} <command> [options]"
319
- echo ""
320
- echo -e "${BOLD}COMMANDS${RESET}"
321
- echo -e " ${CYAN}start${RESET} --goal \"...\" Start a new pipeline"
322
- echo -e " ${CYAN}resume${RESET} Continue from last completed stage"
323
- echo -e " ${CYAN}status${RESET} Show pipeline progress dashboard"
324
- echo -e " ${CYAN}abort${RESET} Stop pipeline and mark aborted"
325
- echo -e " ${CYAN}list${RESET} Show available pipeline templates"
326
- echo -e " ${CYAN}show${RESET} <name> Display pipeline stages"
327
- echo ""
328
- echo -e "${BOLD}START OPTIONS${RESET}"
329
- echo -e " ${DIM}--goal \"description\"${RESET} What to build (required unless --issue)"
330
- echo -e " ${DIM}--issue <number>${RESET} Fetch goal from GitHub issue"
331
- echo -e " ${DIM}--repo <path>${RESET} Change to directory before running (must be a git repo)"
332
- echo -e " ${DIM}--local${RESET} Alias for --no-github --no-github-label (local-only mode)"
333
- echo -e " ${DIM}--pipeline <name>${RESET} Pipeline template (default: standard)"
334
- echo -e " ${DIM}--test-cmd \"command\"${RESET} Override test command (auto-detected if omitted)"
335
- echo -e " ${DIM}--model <model>${RESET} Override AI model (opus, sonnet, haiku)"
336
- echo -e " ${DIM}--agents <n>${RESET} Override agent count"
337
- echo -e " ${DIM}--skip-gates${RESET} Auto-approve all gates (fully autonomous)"
338
- echo -e " ${DIM}--headless${RESET} Full headless mode (skip gates, no prompts)"
339
- echo -e " ${DIM}--base <branch>${RESET} Base branch for PR (default: main)"
340
- echo -e " ${DIM}--reviewers \"a,b\"${RESET} Request PR reviewers (auto-detected if omitted)"
341
- echo -e " ${DIM}--labels \"a,b\"${RESET} Add labels to PR (inherited from issue if omitted)"
342
- echo -e " ${DIM}--no-github${RESET} Disable GitHub integration"
343
- echo -e " ${DIM}--no-github-label${RESET} Don't modify issue labels"
344
- echo -e " ${DIM}--ci${RESET} CI mode (skip gates, non-interactive)"
345
- echo -e " ${DIM}--ignore-budget${RESET} Skip budget enforcement checks"
346
- echo -e " ${DIM}--worktree [=name]${RESET} Run in isolated git worktree (parallel-safe)"
347
- echo -e " ${DIM}--dry-run${RESET} Show what would happen without executing"
348
- echo -e " ${DIM}--slack-webhook <url>${RESET} Send notifications to Slack"
349
- echo -e " ${DIM}--self-heal <n>${RESET} Build→test retry cycles on failure (default: 2)"
350
- echo -e " ${DIM}--max-iterations <n>${RESET} Override max build loop iterations"
351
- echo -e " ${DIM}--max-restarts <n>${RESET} Max session restarts in build loop"
352
- echo -e " ${DIM}--fast-test-cmd <cmd>${RESET} Fast/subset test for build loop"
353
- echo -e " ${DIM}--tdd${RESET} Test-first: generate tests before implementation"
354
- echo -e " ${DIM}--completed-stages \"a,b\"${RESET} Skip these stages (CI resume)"
355
- echo ""
356
- echo -e "${BOLD}STAGES${RESET} ${DIM}(configurable per pipeline template)${RESET}"
357
- echo -e " intake → plan → design → build → test → review → pr → deploy → validate → monitor"
358
- echo ""
359
- echo -e "${BOLD}GITHUB INTEGRATION${RESET} ${DIM}(automatic when gh CLI available)${RESET}"
360
- echo -e " • Issue intake: fetch metadata, labels, milestone, self-assign"
361
- echo -e " • Progress tracking: live updates posted as issue comments"
362
- echo -e " • Task checklist: plan posted as checkbox list on issue"
363
- echo -e " • PR creation: labels, milestone, reviewers auto-propagated"
364
- echo -e " • Issue lifecycle: labeled in-progress → closed on completion"
365
- echo ""
366
- echo -e "${BOLD}SELF-HEALING${RESET} ${DIM}(autonomous error recovery)${RESET}"
367
- echo -e " • Build→test feedback loop: failures feed back as build context"
368
- echo -e " • Configurable retry cycles (--self-heal N, default: 2)"
369
- echo -e " • Auto-rebase before PR: handles base branch drift"
370
- echo -e " • Signal-safe: Ctrl+C saves state for clean resume"
371
- echo -e " • Git stash/restore: protects uncommitted work"
372
- echo ""
373
- echo -e "${BOLD}AUTO-DETECTION${RESET} ${DIM}(zero-config for common setups)${RESET}"
374
- echo -e " • Test command: package.json, Makefile, Cargo.toml, go.mod, etc."
375
- echo -e " • Branch prefix: feat/, fix/, refactor/ based on task type"
376
- echo -e " • Reviewers: from CODEOWNERS or recent git contributors"
377
- echo -e " • Project type: language and framework detection"
378
- echo ""
379
- echo -e "${BOLD}NOTIFICATIONS${RESET} ${DIM}(team awareness)${RESET}"
380
- echo -e " • Slack: --slack-webhook <url>"
381
- echo -e " • Custom webhook: set SHIPWRIGHT_WEBHOOK_URL env var"
382
- echo -e " • Events: start, stage complete, failure, self-heal, done"
383
- echo ""
384
- echo -e "${BOLD}EXAMPLES${RESET}"
385
- echo -e " ${DIM}# From GitHub issue (fully autonomous)${RESET}"
386
- echo -e " ${DIM}shipwright pipeline start --issue 123 --skip-gates${RESET}"
387
- echo ""
388
- echo -e " ${DIM}# From inline goal${RESET}"
389
- echo -e " ${DIM}shipwright pipeline start --goal \"Add JWT authentication\"${RESET}"
390
- echo ""
391
- echo -e " ${DIM}# Hotfix with custom test command${RESET}"
392
- echo -e " ${DIM}shipwright pipeline start --issue 456 --pipeline hotfix --test-cmd \"pytest\"${RESET}"
393
- echo ""
394
- echo -e " ${DIM}# Full deployment pipeline with 3 agents${RESET}"
395
- echo -e " ${DIM}shipwright pipeline start --goal \"Build payment flow\" --pipeline full --agents 3${RESET}"
396
- echo ""
397
- echo -e " ${DIM}# Parallel pipeline in isolated worktree${RESET}"
398
- echo -e " ${DIM}shipwright pipeline start --issue 42 --worktree${RESET}"
399
- echo ""
400
- echo -e " ${DIM}# Resume / monitor / abort${RESET}"
401
- echo -e " ${DIM}shipwright pipeline resume${RESET}"
402
- echo -e " ${DIM}shipwright pipeline status${RESET}"
403
- echo -e " ${DIM}shipwright pipeline abort${RESET}"
404
- echo ""
405
- }
215
+ # Build-test retry limit (configurable via --self-heal)
216
+ BUILD_TEST_RETRIES="${BUILD_TEST_RETRIES:-2}"
406
217
 
407
- # ─── Argument Parsing ───────────────────────────────────────────────────────
218
+ # TDD mode flag (enable via --tdd or pipeline template)
219
+ TDD_ENABLED=false
220
+ PIPELINE_TDD=false
408
221
 
222
+ # ─── Argument Parsing (BEFORE other setup) ─────────────────────────────────
409
223
  SUBCOMMAND="${1:-help}"
410
224
  shift 2>/dev/null || true
411
225
 
412
- parse_args() {
413
- while [[ $# -gt 0 ]]; do
414
- case "$1" in
415
- --goal) GOAL="$2"; shift 2 ;;
416
- --issue) ISSUE_NUMBER="$2"; shift 2 ;;
417
- --repo) REPO_OVERRIDE="$2"; shift 2 ;;
418
- --local) NO_GITHUB=true; NO_GITHUB_LABEL=true; shift ;;
419
- --pipeline|--template) PIPELINE_NAME="$2"; shift 2 ;;
420
- --test-cmd) TEST_CMD="$2"; shift 2 ;;
421
- --model) MODEL="$2"; shift 2 ;;
422
- --agents) AGENTS="$2"; shift 2 ;;
423
- --skip-gates) SKIP_GATES=true; shift ;;
424
- --headless) HEADLESS=true; SKIP_GATES=true; shift ;;
425
- --base) BASE_BRANCH="$2"; shift 2 ;;
426
- --reviewers) REVIEWERS="$2"; shift 2 ;;
427
- --labels) LABELS="$2"; shift 2 ;;
428
- --no-github) NO_GITHUB=true; shift ;;
429
- --no-github-label) NO_GITHUB_LABEL=true; shift ;;
430
- --ci) CI_MODE=true; SKIP_GATES=true; shift ;;
431
- --ignore-budget) IGNORE_BUDGET=true; shift ;;
432
- --max-iterations) MAX_ITERATIONS_OVERRIDE="$2"; shift 2 ;;
433
- --completed-stages) COMPLETED_STAGES="$2"; shift 2 ;;
434
- --resume) RESUME_FROM_CHECKPOINT=true; shift ;;
435
- --worktree=*) AUTO_WORKTREE=true; WORKTREE_NAME="${1#--worktree=}"; WORKTREE_NAME="${WORKTREE_NAME//[^a-zA-Z0-9_-]/}"; if [[ -z "$WORKTREE_NAME" ]]; then error "Invalid worktree name (alphanumeric, hyphens, underscores only)"; exit 1; fi; shift ;;
436
- --worktree) AUTO_WORKTREE=true; shift ;;
437
- --dry-run) DRY_RUN=true; shift ;;
438
- --slack-webhook) SLACK_WEBHOOK="$2"; shift 2 ;;
439
- --self-heal) BUILD_TEST_RETRIES="${2:-3}"; shift 2 ;;
440
- --max-restarts)
441
- MAX_RESTARTS_OVERRIDE="$2"
442
- if ! [[ "$MAX_RESTARTS_OVERRIDE" =~ ^[0-9]+$ ]]; then
443
- error "--max-restarts must be numeric (got: $MAX_RESTARTS_OVERRIDE)"
444
- exit 1
445
- fi
446
- shift 2 ;;
447
-
448
- --fast-test-cmd) FAST_TEST_CMD_OVERRIDE="$2"; shift 2 ;;
449
- --tdd) TDD_ENABLED=true; shift ;;
450
- --help|-h) show_help; exit 0 ;;
451
- *)
452
- if [[ -z "$PIPELINE_NAME_ARG" ]]; then
453
- PIPELINE_NAME_ARG="$1"
454
- fi
455
- shift ;;
456
- esac
457
- done
458
- }
459
-
460
226
  PIPELINE_NAME_ARG=""
461
227
  parse_args "$@"
462
228
 
229
+ # Export effort and fallback variables so subprocesses can access them
230
+ export EFFORT_LEVEL_OVERRIDE PIPELINE_FALLBACK_MODEL
231
+
463
232
  # ─── Non-Interactive Detection ──────────────────────────────────────────────
464
233
  # When stdin is not a terminal (background, pipe, nohup, tmux send-keys),
465
234
  # auto-enable headless mode to prevent read prompts from killing the script.
@@ -474,2386 +243,6 @@ if [[ "$AUTO_WORKTREE" == "true" && "$SKIP_GATES" != "true" && ! -t 0 ]]; then
474
243
  SKIP_GATES=true
475
244
  fi
476
245
 
477
- # ─── Directory Setup ────────────────────────────────────────────────────────
478
-
479
- setup_dirs() {
480
- PROJECT_ROOT="$(git rev-parse --show-toplevel 2>/dev/null || pwd)"
481
- STATE_DIR="$PROJECT_ROOT/.claude"
482
- STATE_FILE="$STATE_DIR/pipeline-state.md"
483
- ARTIFACTS_DIR="$STATE_DIR/pipeline-artifacts"
484
- TASKS_FILE="$STATE_DIR/pipeline-tasks.md"
485
- mkdir -p "$STATE_DIR" "$ARTIFACTS_DIR"
486
- export SHIPWRIGHT_PIPELINE_ID="pipeline-$$-${ISSUE_NUMBER:-0}"
487
- }
488
-
489
- # ─── Pipeline Config Loading ───────────────────────────────────────────────
490
-
491
- find_pipeline_config() {
492
- local name="$1"
493
- local locations=(
494
- "$REPO_DIR/templates/pipelines/${name}.json"
495
- "${PROJECT_ROOT:-}/templates/pipelines/${name}.json"
496
- "$HOME/.shipwright/pipelines/${name}.json"
497
- )
498
- for loc in "${locations[@]}"; do
499
- if [[ -n "$loc" && -f "$loc" ]]; then
500
- echo "$loc"
501
- return 0
502
- fi
503
- done
504
- return 1
505
- }
506
-
507
- load_pipeline_config() {
508
- # Check for intelligence-composed pipeline first
509
- local composed_pipeline="${ARTIFACTS_DIR}/composed-pipeline.json"
510
- if [[ -f "$composed_pipeline" ]] && type composer_validate_pipeline >/dev/null 2>&1; then
511
- # Use composed pipeline if fresh (within cache TTL)
512
- local composed_cache_ttl
513
- composed_cache_ttl=$(_config_get_int "pipeline.composed_cache_ttl" 3600 2>/dev/null || echo 3600)
514
- local composed_age=99999
515
- local composed_mtime
516
- composed_mtime=$(file_mtime "$composed_pipeline")
517
- if [[ "$composed_mtime" -gt 0 ]]; then
518
- composed_age=$(( $(now_epoch) - composed_mtime ))
519
- fi
520
- if [[ "$composed_age" -lt "$composed_cache_ttl" ]]; then
521
- local validate_json
522
- validate_json=$(cat "$composed_pipeline" 2>/dev/null || echo "")
523
- if [[ -n "$validate_json" ]] && composer_validate_pipeline "$validate_json" 2>/dev/null; then
524
- PIPELINE_CONFIG="$composed_pipeline"
525
- info "Pipeline: ${BOLD}composed${RESET} ${DIM}(intelligence-driven)${RESET}"
526
- emit_event "pipeline.composed_loaded" "issue=${ISSUE_NUMBER:-0}"
527
- return
528
- fi
529
- fi
530
- fi
531
-
532
- PIPELINE_CONFIG=$(find_pipeline_config "$PIPELINE_NAME") || {
533
- error "Pipeline template not found: $PIPELINE_NAME"
534
- echo -e " Available templates: ${DIM}shipwright pipeline list${RESET}"
535
- exit 1
536
- }
537
- info "Pipeline: ${BOLD}$PIPELINE_NAME${RESET} ${DIM}($PIPELINE_CONFIG)${RESET}"
538
- # TDD from template (overridable by --tdd)
539
- [[ "$(jq -r '.tdd // false' "$PIPELINE_CONFIG" 2>/dev/null)" == "true" ]] && PIPELINE_TDD=true
540
- return 0
541
- }
542
-
543
- CURRENT_STAGE_ID=""
544
-
545
- # Notification / webhook
546
- SLACK_WEBHOOK=""
547
- NOTIFICATION_ENABLED=false
548
-
549
- # Self-healing
550
- BUILD_TEST_RETRIES=$(_config_get_int "pipeline.build_test_retries" 3 2>/dev/null || echo 3)
551
- STASHED_CHANGES=false
552
- SELF_HEAL_COUNT=0
553
-
554
- # ─── Cost Tracking ───────────────────────────────────────────────────────
555
- TOTAL_INPUT_TOKENS=0
556
- TOTAL_OUTPUT_TOKENS=0
557
- COST_MODEL_RATES='{"opus":{"input":15,"output":75},"sonnet":{"input":3,"output":15},"haiku":{"input":0.25,"output":1.25}}'
558
-
559
- # ─── Heartbeat ────────────────────────────────────────────────────────────────
560
- HEARTBEAT_PID=""
561
-
562
- start_heartbeat() {
563
- local job_id="${PIPELINE_NAME:-pipeline-$$}"
564
- (
565
- while true; do
566
- "$SCRIPT_DIR/sw-heartbeat.sh" write "$job_id" \
567
- --pid $$ \
568
- --issue "${ISSUE_NUMBER:-0}" \
569
- --stage "${CURRENT_STAGE_ID:-unknown}" \
570
- --iteration "0" \
571
- --activity "$(get_stage_description "${CURRENT_STAGE_ID:-}" 2>/dev/null || echo "Running pipeline")" 2>/dev/null || true
572
- sleep "$(_config_get_int "pipeline.heartbeat_interval" 30 2>/dev/null || echo 30)"
573
- done
574
- ) >/dev/null 2>&1 &
575
- HEARTBEAT_PID=$!
576
- }
577
-
578
- stop_heartbeat() {
579
- if [[ -n "${HEARTBEAT_PID:-}" ]]; then
580
- kill "$HEARTBEAT_PID" 2>/dev/null || true
581
- wait "$HEARTBEAT_PID" 2>/dev/null || true
582
- "$SCRIPT_DIR/sw-heartbeat.sh" clear "${PIPELINE_NAME:-pipeline-$$}" 2>/dev/null || true
583
- HEARTBEAT_PID=""
584
- fi
585
- }
586
-
587
- # ─── CI Helpers ───────────────────────────────────────────────────────────
588
-
589
- ci_push_partial_work() {
590
- [[ "${CI_MODE:-false}" != "true" ]] && return 0
591
- [[ -z "${ISSUE_NUMBER:-}" ]] && return 0
592
-
593
- local branch="shipwright/issue-${ISSUE_NUMBER}"
594
-
595
- # Only push if we have uncommitted changes
596
- if ! git diff --quiet 2>/dev/null || ! git diff --cached --quiet 2>/dev/null; then
597
- git add -A 2>/dev/null || true
598
- git commit -m "WIP: partial pipeline progress for #${ISSUE_NUMBER}" --no-verify 2>/dev/null || true
599
- fi
600
-
601
- # Push branch (create if needed, force to overwrite previous WIP)
602
- if ! git push origin "HEAD:refs/heads/$branch" --force 2>/dev/null; then
603
- warn "git push failed for $branch — remote may be out of sync"
604
- emit_event "pipeline.push_failed" "branch=$branch"
605
- fi
606
- }
607
-
608
- ci_post_stage_event() {
609
- [[ "${CI_MODE:-false}" != "true" ]] && return 0
610
- [[ -z "${ISSUE_NUMBER:-}" ]] && return 0
611
- [[ "${GH_AVAILABLE:-false}" != "true" ]] && return 0
612
-
613
- local stage="$1" status="$2" elapsed="${3:-0s}"
614
- local comment="<!-- SHIPWRIGHT-STAGE: ${stage}:${status}:${elapsed} -->"
615
- _timeout "$(_config_get_int "network.gh_timeout" 30 2>/dev/null || echo 30)" gh issue comment "$ISSUE_NUMBER" --body "$comment" 2>/dev/null || true
616
- }
617
-
618
- # ─── Signal Handling ───────────────────────────────────────────────────────
619
-
620
- cleanup_on_exit() {
621
- [[ "${_cleanup_done:-}" == "true" ]] && return 0
622
- _cleanup_done=true
623
- local exit_code=$?
624
-
625
- # Stop heartbeat writer
626
- stop_heartbeat
627
-
628
- # Save state if we were running
629
- if [[ "$PIPELINE_STATUS" == "running" && -n "$STATE_FILE" ]]; then
630
- PIPELINE_STATUS="interrupted"
631
- UPDATED_AT="$(now_iso)"
632
- write_state 2>/dev/null || true
633
- echo ""
634
- warn "Pipeline interrupted — state saved."
635
- echo -e " Resume: ${DIM}shipwright pipeline resume${RESET}"
636
-
637
- # Push partial work in CI mode so retries can pick it up
638
- ci_push_partial_work
639
- fi
640
-
641
- # Restore stashed changes
642
- if [[ "$STASHED_CHANGES" == "true" ]]; then
643
- git stash pop --quiet 2>/dev/null || true
644
- fi
645
-
646
- # Release durable pipeline lock
647
- if [[ -n "${_PIPELINE_LOCK_ID:-}" ]] && type release_lock >/dev/null 2>&1; then
648
- release_lock "$_PIPELINE_LOCK_ID" 2>/dev/null || true
649
- fi
650
-
651
- # Cancel lingering in_progress GitHub Check Runs
652
- pipeline_cancel_check_runs 2>/dev/null || true
653
-
654
- # Update GitHub
655
- if [[ -n "${ISSUE_NUMBER:-}" && "${GH_AVAILABLE:-false}" == "true" ]]; then
656
- if ! _timeout "$(_config_get_int "network.gh_timeout" 30 2>/dev/null || echo 30)" gh issue comment "$ISSUE_NUMBER" --body "⏸️ **Pipeline interrupted** at stage: ${CURRENT_STAGE_ID:-unknown}" 2>/dev/null; then
657
- warn "gh issue comment failed — status update may not have been posted"
658
- emit_event "pipeline.comment_failed" "issue=$ISSUE_NUMBER"
659
- fi
660
- fi
661
-
662
- exit "$exit_code"
663
- }
664
-
665
- trap cleanup_on_exit SIGINT SIGTERM
666
-
667
- # ─── Pre-flight Validation ─────────────────────────────────────────────────
668
-
669
- preflight_checks() {
670
- local errors=0
671
-
672
- echo -e "${PURPLE}${BOLD}━━━ Pre-flight Checks ━━━${RESET}"
673
- echo ""
674
-
675
- # 1. Required tools
676
- local required_tools=("git" "jq")
677
- local optional_tools=("gh" "claude" "bc" "curl")
678
-
679
- for tool in "${required_tools[@]}"; do
680
- if command -v "$tool" >/dev/null 2>&1; then
681
- echo -e " ${GREEN}✓${RESET} $tool"
682
- else
683
- echo -e " ${RED}✗${RESET} $tool ${RED}(required)${RESET}"
684
- errors=$((errors + 1))
685
- fi
686
- done
687
-
688
- for tool in "${optional_tools[@]}"; do
689
- if command -v "$tool" >/dev/null 2>&1; then
690
- echo -e " ${GREEN}✓${RESET} $tool"
691
- else
692
- echo -e " ${DIM}○${RESET} $tool ${DIM}(optional — some features disabled)${RESET}"
693
- fi
694
- done
695
-
696
- # 2. Git state
697
- echo ""
698
- if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
699
- echo -e " ${GREEN}✓${RESET} Inside git repo"
700
- else
701
- echo -e " ${RED}✗${RESET} Not inside a git repository"
702
- errors=$((errors + 1))
703
- fi
704
-
705
- # Check for uncommitted changes — offer to stash
706
- local dirty_files
707
- dirty_files=$(git status --porcelain 2>/dev/null | wc -l | xargs)
708
- if [[ "$dirty_files" -gt 0 ]]; then
709
- echo -e " ${YELLOW}⚠${RESET} $dirty_files uncommitted change(s)"
710
- if [[ "$SKIP_GATES" == "true" ]]; then
711
- info "Auto-stashing uncommitted changes..."
712
- git stash push -m "sw-pipeline: auto-stash before pipeline" --quiet 2>/dev/null && STASHED_CHANGES=true
713
- if [[ "$STASHED_CHANGES" == "true" ]]; then
714
- echo -e " ${GREEN}✓${RESET} Changes stashed (will restore on exit)"
715
- fi
716
- else
717
- echo -e " ${DIM}Tip: Use --skip-gates to auto-stash, or commit/stash manually${RESET}"
718
- fi
719
- else
720
- echo -e " ${GREEN}✓${RESET} Working tree clean"
721
- fi
722
-
723
- # Check if base branch exists
724
- if git rev-parse --verify "$BASE_BRANCH" >/dev/null 2>&1; then
725
- echo -e " ${GREEN}✓${RESET} Base branch: $BASE_BRANCH"
726
- else
727
- echo -e " ${RED}✗${RESET} Base branch not found: $BASE_BRANCH"
728
- errors=$((errors + 1))
729
- fi
730
-
731
- # 3. GitHub auth (if gh available and not disabled)
732
- if [[ "$NO_GITHUB" != "true" ]] && command -v gh >/dev/null 2>&1; then
733
- if gh auth status >/dev/null 2>&1; then
734
- echo -e " ${GREEN}✓${RESET} GitHub authenticated"
735
- else
736
- echo -e " ${YELLOW}⚠${RESET} GitHub not authenticated (features disabled)"
737
- fi
738
- fi
739
-
740
- # 4. Claude CLI
741
- if command -v claude >/dev/null 2>&1; then
742
- echo -e " ${GREEN}✓${RESET} Claude CLI available"
743
- else
744
- echo -e " ${RED}✗${RESET} Claude CLI not found — plan/build stages will fail"
745
- errors=$((errors + 1))
746
- fi
747
-
748
- # 5. sw loop (needed for build stage)
749
- if [[ -x "$SCRIPT_DIR/sw-loop.sh" ]]; then
750
- echo -e " ${GREEN}✓${RESET} shipwright loop available"
751
- else
752
- echo -e " ${RED}✗${RESET} sw-loop.sh not found at $SCRIPT_DIR"
753
- errors=$((errors + 1))
754
- fi
755
-
756
- # 6. Disk space check (warn if < 1GB free)
757
- local free_space_kb
758
- free_space_kb=$(df -k "$PROJECT_ROOT" 2>/dev/null | tail -1 | awk '{print $4}')
759
- if [[ -n "$free_space_kb" ]] && [[ "$free_space_kb" -lt 1048576 ]] 2>/dev/null; then
760
- echo -e " ${YELLOW}⚠${RESET} Low disk space: $(( free_space_kb / 1024 ))MB free"
761
- fi
762
-
763
- echo ""
764
-
765
- if [[ "$errors" -gt 0 ]]; then
766
- error "Pre-flight failed: $errors error(s)"
767
- return 1
768
- fi
769
-
770
- success "Pre-flight passed"
771
- echo ""
772
- return 0
773
- }
774
-
775
- # ─── Notification Helpers ──────────────────────────────────────────────────
776
-
777
- notify() {
778
- local title="$1" message="$2" level="${3:-info}"
779
- local emoji
780
- case "$level" in
781
- success) emoji="✅" ;;
782
- error) emoji="❌" ;;
783
- warn) emoji="⚠️" ;;
784
- *) emoji="🔔" ;;
785
- esac
786
-
787
- # Slack webhook
788
- if [[ -n "${SLACK_WEBHOOK:-}" ]]; then
789
- local payload
790
- payload=$(jq -n \
791
- --arg text "${emoji} *${title}*\n${message}" \
792
- '{text: $text}')
793
- curl -sf --connect-timeout "$(_config_get_int "network.connect_timeout" 10 2>/dev/null || echo 10)" --max-time "$(_config_get_int "network.max_time" 60 2>/dev/null || echo 60)" -X POST -H 'Content-Type: application/json' \
794
- -d "$payload" "$SLACK_WEBHOOK" >/dev/null 2>&1 || true
795
- fi
796
-
797
- # Custom webhook (env var SHIPWRIGHT_WEBHOOK_URL)
798
- local _webhook_url="${SHIPWRIGHT_WEBHOOK_URL:-}"
799
- if [[ -n "$_webhook_url" ]]; then
800
- local payload
801
- payload=$(jq -n \
802
- --arg title "$title" --arg message "$message" \
803
- --arg level "$level" --arg pipeline "${PIPELINE_NAME:-}" \
804
- --arg goal "${GOAL:-}" --arg stage "${CURRENT_STAGE_ID:-}" \
805
- '{title:$title, message:$message, level:$level, pipeline:$pipeline, goal:$goal, stage:$stage}')
806
- curl -sf --connect-timeout 10 --max-time 30 -X POST -H 'Content-Type: application/json' \
807
- -d "$payload" "$_webhook_url" >/dev/null 2>&1 || true
808
- fi
809
- }
810
-
811
- # ─── Error Classification ──────────────────────────────────────────────────
812
- # Classifies errors to determine whether retrying makes sense.
813
- # Returns: "infrastructure", "logic", "configuration", or "unknown"
814
-
815
- classify_error() {
816
- local stage_id="$1"
817
- local log_file="${ARTIFACTS_DIR}/${stage_id}-results.log"
818
- [[ ! -f "$log_file" ]] && log_file="${ARTIFACTS_DIR}/test-results.log"
819
- [[ ! -f "$log_file" ]] && { echo "unknown"; return; }
820
-
821
- local log_tail
822
- log_tail=$(tail -50 "$log_file" 2>/dev/null || echo "")
823
-
824
- # Generate error signature for history lookup
825
- local error_sig
826
- error_sig=$(echo "$log_tail" | grep -iE 'error|fail|exception|fatal' 2>/dev/null | head -3 | cksum | awk '{print $1}' || echo "0")
827
-
828
- # Check classification history first (learned from previous runs)
829
- local class_history="${HOME}/.shipwright/optimization/error-classifications.json"
830
- if [[ -f "$class_history" ]]; then
831
- local cached_class
832
- cached_class=$(jq -r --arg sig "$error_sig" '.[$sig].classification // empty' "$class_history" 2>/dev/null || true)
833
- if [[ -n "$cached_class" && "$cached_class" != "null" ]]; then
834
- echo "$cached_class"
835
- return
836
- fi
837
- fi
838
-
839
- local classification="unknown"
840
-
841
- # Infrastructure errors: timeout, OOM, network — retry makes sense
842
- if echo "$log_tail" | grep -qiE 'timeout|timed out|ETIMEDOUT|ECONNREFUSED|ECONNRESET|network|socket hang up|OOM|out of memory|killed|signal 9|Cannot allocate memory'; then
843
- classification="infrastructure"
844
- # Configuration errors: missing env, wrong path — don't retry, escalate
845
- elif echo "$log_tail" | grep -qiE 'ENOENT|not found|No such file|command not found|MODULE_NOT_FOUND|Cannot find module|missing.*env|undefined variable|permission denied|EACCES'; then
846
- classification="configuration"
847
- # Logic errors: assertion failures, type errors — retry won't help without code change
848
- elif echo "$log_tail" | grep -qiE 'AssertionError|assert.*fail|Expected.*but.*got|TypeError|ReferenceError|SyntaxError|CompileError|type mismatch|cannot assign|incompatible type'; then
849
- classification="logic"
850
- # Build errors: compilation failures
851
- elif echo "$log_tail" | grep -qiE 'error\[E[0-9]+\]|error: aborting|FAILED.*compile|build failed|tsc.*error|eslint.*error'; then
852
- classification="logic"
853
- # Intelligence fallback: Claude classification for unknown errors
854
- elif [[ "$classification" == "unknown" ]] && type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1; then
855
- local ai_class
856
- ai_class=$(claude --print --output-format text -p "Classify this error as exactly one of: infrastructure, configuration, logic, unknown.
857
-
858
- Error output:
859
- $(echo "$log_tail" | tail -20)
860
-
861
- Reply with ONLY the classification word, nothing else." --model haiku < /dev/null 2>/dev/null || true)
862
- ai_class=$(echo "$ai_class" | tr -d '[:space:]' | tr '[:upper:]' '[:lower:]')
863
- case "$ai_class" in
864
- infrastructure|configuration|logic) classification="$ai_class" ;;
865
- esac
866
- fi
867
-
868
- # Map retry categories to shared taxonomy (from lib/compat.sh SW_ERROR_CATEGORIES)
869
- # Retry uses: infrastructure, configuration, logic, unknown
870
- # Shared uses: test_failure, build_error, lint_error, timeout, dependency, flaky, config, security, permission, unknown
871
- local canonical_category="unknown"
872
- case "$classification" in
873
- infrastructure) canonical_category="timeout" ;;
874
- configuration) canonical_category="config" ;;
875
- logic)
876
- case "$stage_id" in
877
- test) canonical_category="test_failure" ;;
878
- *) canonical_category="build_error" ;;
879
- esac
880
- ;;
881
- esac
882
-
883
- # Record classification for future runs (using both retry and canonical categories)
884
- if [[ -n "$error_sig" && "$error_sig" != "0" ]]; then
885
- local class_dir="${HOME}/.shipwright/optimization"
886
- mkdir -p "$class_dir" 2>/dev/null || true
887
- local tmp_class
888
- tmp_class="$(mktemp)"
889
- trap "rm -f '$tmp_class'" RETURN
890
- if [[ -f "$class_history" ]]; then
891
- jq --arg sig "$error_sig" --arg cls "$classification" --arg canon "$canonical_category" --arg stage "$stage_id" \
892
- '.[$sig] = {"classification": $cls, "canonical": $canon, "stage": $stage, "recorded_at": now}' \
893
- "$class_history" > "$tmp_class" 2>/dev/null && \
894
- mv "$tmp_class" "$class_history" || rm -f "$tmp_class"
895
- else
896
- jq -n --arg sig "$error_sig" --arg cls "$classification" --arg canon "$canonical_category" --arg stage "$stage_id" \
897
- '{($sig): {"classification": $cls, "canonical": $canon, "stage": $stage, "recorded_at": now}}' \
898
- > "$tmp_class" 2>/dev/null && \
899
- mv "$tmp_class" "$class_history" || rm -f "$tmp_class"
900
- fi
901
- fi
902
-
903
- echo "$classification"
904
- }
905
-
906
- # ─── Stage Runner ───────────────────────────────────────────────────────────
907
-
908
- run_stage_with_retry() {
909
- local stage_id="$1"
910
- local max_retries
911
- max_retries=$(jq -r --arg id "$stage_id" '(.stages[] | select(.id == $id) | .config.retries) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
912
- [[ -z "$max_retries" || "$max_retries" == "null" ]] && max_retries=0
913
-
914
- local attempt=0
915
- local prev_error_class=""
916
- while true; do
917
- if "stage_${stage_id}"; then
918
- return 0
919
- fi
920
-
921
- # Capture error_class and error snippet for stage.failed / pipeline.completed events
922
- local error_class
923
- error_class=$(classify_error "$stage_id")
924
- LAST_STAGE_ERROR_CLASS="$error_class"
925
- LAST_STAGE_ERROR=""
926
- local _log_file="${ARTIFACTS_DIR}/${stage_id}-results.log"
927
- [[ ! -f "$_log_file" ]] && _log_file="${ARTIFACTS_DIR}/test-results.log"
928
- if [[ -f "$_log_file" ]]; then
929
- LAST_STAGE_ERROR=$(tail -20 "$_log_file" 2>/dev/null | grep -iE 'error|fail|exception|fatal' 2>/dev/null | head -1 | cut -c1-200 || true)
930
- fi
931
-
932
- attempt=$((attempt + 1))
933
- if [[ "$attempt" -gt "$max_retries" ]]; then
934
- return 1
935
- fi
936
-
937
- # Classify done above; decide whether retry makes sense
938
-
939
- emit_event "retry.classified" \
940
- "issue=${ISSUE_NUMBER:-0}" \
941
- "stage=$stage_id" \
942
- "attempt=$attempt" \
943
- "error_class=$error_class"
944
-
945
- case "$error_class" in
946
- infrastructure)
947
- info "Error classified as infrastructure (timeout/network/OOM) — retry makes sense"
948
- ;;
949
- configuration)
950
- error "Error classified as configuration (missing env/path) — skipping retry, escalating"
951
- emit_event "retry.escalated" \
952
- "issue=${ISSUE_NUMBER:-0}" \
953
- "stage=$stage_id" \
954
- "reason=configuration_error"
955
- return 1
956
- ;;
957
- logic)
958
- if [[ "$error_class" == "$prev_error_class" ]]; then
959
- error "Error classified as logic (assertion/type error) with same class — retry won't help without code change"
960
- emit_event "retry.skipped" \
961
- "issue=${ISSUE_NUMBER:-0}" \
962
- "stage=$stage_id" \
963
- "reason=repeated_logic_error"
964
- return 1
965
- fi
966
- warn "Error classified as logic — retrying once in case build fixes it"
967
- ;;
968
- *)
969
- info "Error classification: unknown — retrying"
970
- ;;
971
- esac
972
- prev_error_class="$error_class"
973
-
974
- if type db_save_reasoning_trace >/dev/null 2>&1; then
975
- local job_id="${SHIPWRIGHT_PIPELINE_ID:-$$}"
976
- local error_msg="${LAST_STAGE_ERROR:-$error_class}"
977
- db_save_reasoning_trace "$job_id" "retry_reasoning" \
978
- "stage=$stage_id error=$error_msg" \
979
- "Stage failed, analyzing error pattern before retry" \
980
- "retry_strategy=self_heal" 0.6 2>/dev/null || true
981
- fi
982
-
983
- warn "Stage $stage_id failed (attempt $attempt/$((max_retries + 1)), class: $error_class) — retrying..."
984
- # Exponential backoff with jitter to avoid thundering herd
985
- local backoff=$((2 ** attempt))
986
- [[ "$backoff" -gt 16 ]] && backoff=16
987
- local jitter=$(( RANDOM % (backoff + 1) ))
988
- local total_sleep=$((backoff + jitter))
989
- info "Backing off ${total_sleep}s before retry..."
990
- sleep "$total_sleep"
991
- done
992
- }
993
-
994
- # ─── Self-Healing Build→Test Feedback Loop ─────────────────────────────────
995
- # When tests fail after a build, this captures the error and re-runs the build
996
- # with the error context, so Claude can fix the issue automatically.
997
-
998
- self_healing_build_test() {
999
- local cycle=0
1000
- local max_cycles="$BUILD_TEST_RETRIES"
1001
- local last_test_error=""
1002
-
1003
- # Convergence tracking
1004
- local prev_error_sig="" consecutive_same_error=0
1005
- local prev_fail_count=0 zero_convergence_streak=0
1006
-
1007
- # Vitals-driven adaptive limit (preferred over static BUILD_TEST_RETRIES)
1008
- if type pipeline_adaptive_limit >/dev/null 2>&1; then
1009
- local _vitals_json=""
1010
- if type pipeline_compute_vitals >/dev/null 2>&1; then
1011
- _vitals_json=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
1012
- fi
1013
- local vitals_limit
1014
- vitals_limit=$(pipeline_adaptive_limit "build_test" "$_vitals_json" 2>/dev/null) || true
1015
- if [[ -n "$vitals_limit" && "$vitals_limit" =~ ^[0-9]+$ && "$vitals_limit" -gt 0 ]]; then
1016
- info "Vitals-driven build-test limit: ${max_cycles} → ${vitals_limit}"
1017
- max_cycles="$vitals_limit"
1018
- emit_event "vitals.adaptive_limit" \
1019
- "issue=${ISSUE_NUMBER:-0}" \
1020
- "context=build_test" \
1021
- "original=$BUILD_TEST_RETRIES" \
1022
- "vitals_limit=$vitals_limit"
1023
- fi
1024
- # Fallback: intelligence-based adaptive limits
1025
- elif type composer_estimate_iterations >/dev/null 2>&1; then
1026
- local estimated
1027
- estimated=$(composer_estimate_iterations \
1028
- "${INTELLIGENCE_ANALYSIS:-{}}" \
1029
- "${HOME}/.shipwright/optimization/iteration-model.json" 2>/dev/null || echo "")
1030
- if [[ -n "$estimated" && "$estimated" =~ ^[0-9]+$ && "$estimated" -gt 0 ]]; then
1031
- max_cycles="$estimated"
1032
- emit_event "intelligence.adaptive_iterations" \
1033
- "issue=${ISSUE_NUMBER:-0}" \
1034
- "estimated=$estimated" \
1035
- "original=$BUILD_TEST_RETRIES"
1036
- fi
1037
- fi
1038
-
1039
- # Fallback: adaptive cycle limits from optimization data
1040
- if [[ "$max_cycles" == "$BUILD_TEST_RETRIES" ]]; then
1041
- local _iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
1042
- if [[ -f "$_iter_model" ]]; then
1043
- local adaptive_bt_limit
1044
- adaptive_bt_limit=$(pipeline_adaptive_cycles "$max_cycles" "build_test" "0" "-1" 2>/dev/null) || true
1045
- if [[ -n "$adaptive_bt_limit" && "$adaptive_bt_limit" =~ ^[0-9]+$ && "$adaptive_bt_limit" -gt 0 && "$adaptive_bt_limit" != "$max_cycles" ]]; then
1046
- info "Adaptive build-test cycles: ${max_cycles} → ${adaptive_bt_limit}"
1047
- max_cycles="$adaptive_bt_limit"
1048
- fi
1049
- fi
1050
- fi
1051
-
1052
- while [[ "$cycle" -le "$max_cycles" ]]; do
1053
- cycle=$((cycle + 1))
1054
-
1055
- if [[ "$cycle" -gt 1 ]]; then
1056
- SELF_HEAL_COUNT=$((SELF_HEAL_COUNT + 1))
1057
- echo ""
1058
- echo -e "${YELLOW}${BOLD}━━━ Self-Healing Cycle ${cycle}/$((max_cycles + 1)) ━━━${RESET}"
1059
- info "Feeding test failure back to build loop..."
1060
-
1061
- if [[ -n "$ISSUE_NUMBER" ]]; then
1062
- gh_comment_issue "$ISSUE_NUMBER" "🔄 **Self-healing cycle ${cycle}** — rebuilding with error context" 2>/dev/null || true
1063
- fi
1064
-
1065
- # Reset build/test stage statuses for retry
1066
- set_stage_status "build" "retrying"
1067
- set_stage_status "test" "pending"
1068
- fi
1069
-
1070
- # ── Run Build Stage ──
1071
- echo ""
1072
- echo -e "${CYAN}${BOLD}▸ Stage: build${RESET} ${DIM}[cycle ${cycle}]${RESET}"
1073
- CURRENT_STAGE_ID="build"
1074
-
1075
- # Inject error context on retry cycles
1076
- if [[ "$cycle" -gt 1 && -n "$last_test_error" ]]; then
1077
- # Query memory for known fixes
1078
- local _memory_fix=""
1079
- if type memory_closed_loop_inject >/dev/null 2>&1; then
1080
- local _error_sig_short
1081
- _error_sig_short=$(echo "$last_test_error" | head -3 || echo "")
1082
- _memory_fix=$(memory_closed_loop_inject "$_error_sig_short" 2>/dev/null) || true
1083
- fi
1084
-
1085
- local memory_prefix=""
1086
- if [[ -n "$_memory_fix" ]]; then
1087
- info "Memory suggests fix: $(echo "$_memory_fix" | head -1)"
1088
- memory_prefix="KNOWN FIX (from past success): ${_memory_fix}
1089
-
1090
- "
1091
- fi
1092
-
1093
- # Temporarily augment the goal with error context
1094
- local original_goal="$GOAL"
1095
- GOAL="$GOAL
1096
-
1097
- ${memory_prefix}IMPORTANT — Previous build attempt failed tests. Fix these errors:
1098
- $last_test_error
1099
-
1100
- Focus on fixing the failing tests while keeping all passing tests working."
1101
-
1102
- update_status "running" "build"
1103
- record_stage_start "build"
1104
-
1105
- if run_stage_with_retry "build"; then
1106
- mark_stage_complete "build"
1107
- local timing
1108
- timing=$(get_stage_timing "build")
1109
- success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
1110
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1111
- local _diff_count
1112
- _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
1113
- local _snap_files _snap_error
1114
- _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
1115
- _snap_files="${_snap_files:-0}"
1116
- _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
1117
- _snap_error="${_snap_error:-}"
1118
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
1119
- fi
1120
- else
1121
- mark_stage_failed "build"
1122
- GOAL="$original_goal"
1123
- return 1
1124
- fi
1125
- GOAL="$original_goal"
1126
- else
1127
- update_status "running" "build"
1128
- record_stage_start "build"
1129
-
1130
- if run_stage_with_retry "build"; then
1131
- mark_stage_complete "build"
1132
- local timing
1133
- timing=$(get_stage_timing "build")
1134
- success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
1135
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1136
- local _diff_count
1137
- _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
1138
- local _snap_files _snap_error
1139
- _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
1140
- _snap_files="${_snap_files:-0}"
1141
- _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
1142
- _snap_error="${_snap_error:-}"
1143
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
1144
- fi
1145
- else
1146
- mark_stage_failed "build"
1147
- return 1
1148
- fi
1149
- fi
1150
-
1151
- # ── Run Test Stage ──
1152
- echo ""
1153
- echo -e "${CYAN}${BOLD}▸ Stage: test${RESET} ${DIM}[cycle ${cycle}]${RESET}"
1154
- CURRENT_STAGE_ID="test"
1155
- update_status "running" "test"
1156
- record_stage_start "test"
1157
-
1158
- if run_stage_with_retry "test"; then
1159
- mark_stage_complete "test"
1160
- local timing
1161
- timing=$(get_stage_timing "test")
1162
- success "Stage ${BOLD}test${RESET} complete ${DIM}(${timing})${RESET}"
1163
- emit_event "convergence.tests_passed" \
1164
- "issue=${ISSUE_NUMBER:-0}" \
1165
- "cycle=$cycle"
1166
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1167
- local _diff_count
1168
- _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
1169
- local _snap_files _snap_error
1170
- _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
1171
- _snap_files="${_snap_files:-0}"
1172
- _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
1173
- _snap_error="${_snap_error:-}"
1174
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-test}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
1175
- fi
1176
- # Record fix outcome when tests pass after a retry with memory injection (pipeline path)
1177
- if [[ "$cycle" -gt 1 && -n "${last_test_error:-}" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
1178
- local _sig
1179
- _sig=$(echo "$last_test_error" | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//')
1180
- [[ -n "$_sig" ]] && bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_sig" "true" "true" 2>/dev/null || true
1181
- fi
1182
- return 0 # Tests passed!
1183
- fi
1184
-
1185
- # Tests failed — capture error for next cycle
1186
- local test_log="$ARTIFACTS_DIR/test-results.log"
1187
- last_test_error=$(tail -30 "$test_log" 2>/dev/null || echo "Test command failed with no output")
1188
- mark_stage_failed "test"
1189
-
1190
- # ── Convergence Detection ──
1191
- # Hash the error output to detect repeated failures
1192
- local error_sig
1193
- error_sig=$(echo "$last_test_error" | shasum -a 256 2>/dev/null | cut -c1-16 || echo "unknown")
1194
-
1195
- # Count failing tests (extract from common patterns)
1196
- local current_fail_count=0
1197
- current_fail_count=$(grep -ciE 'fail|error|FAIL' "$test_log" 2>/dev/null || true)
1198
- current_fail_count="${current_fail_count:-0}"
1199
-
1200
- if [[ "$error_sig" == "$prev_error_sig" ]]; then
1201
- consecutive_same_error=$((consecutive_same_error + 1))
1202
- else
1203
- consecutive_same_error=1
1204
- fi
1205
- prev_error_sig="$error_sig"
1206
-
1207
- # Check: same error 3 times consecutively → stuck
1208
- if [[ "$consecutive_same_error" -ge 3 ]]; then
1209
- error "Convergence: stuck on same error for 3 consecutive cycles — exiting early"
1210
- emit_event "convergence.stuck" \
1211
- "issue=${ISSUE_NUMBER:-0}" \
1212
- "cycle=$cycle" \
1213
- "error_sig=$error_sig" \
1214
- "consecutive=$consecutive_same_error"
1215
- notify "Build Convergence" "Stuck on unfixable error after ${cycle} cycles" "error"
1216
- return 1
1217
- fi
1218
-
1219
- # Track convergence rate: did we reduce failures?
1220
- if [[ "$cycle" -gt 1 && "$prev_fail_count" -gt 0 ]]; then
1221
- if [[ "$current_fail_count" -ge "$prev_fail_count" ]]; then
1222
- zero_convergence_streak=$((zero_convergence_streak + 1))
1223
- else
1224
- zero_convergence_streak=0
1225
- fi
1226
-
1227
- # Check: zero convergence for 2 consecutive iterations → plateau
1228
- if [[ "$zero_convergence_streak" -ge 2 ]]; then
1229
- error "Convergence: no progress for 2 consecutive cycles (${current_fail_count} failures remain) — exiting early"
1230
- emit_event "convergence.plateau" \
1231
- "issue=${ISSUE_NUMBER:-0}" \
1232
- "cycle=$cycle" \
1233
- "fail_count=$current_fail_count" \
1234
- "streak=$zero_convergence_streak"
1235
- notify "Build Convergence" "No progress after ${cycle} cycles — plateau reached" "error"
1236
- return 1
1237
- fi
1238
- fi
1239
- prev_fail_count="$current_fail_count"
1240
-
1241
- info "Convergence: error_sig=${error_sig:0:8} repeat=${consecutive_same_error} failures=${current_fail_count} no_progress=${zero_convergence_streak}"
1242
-
1243
- if [[ "$cycle" -le "$max_cycles" ]]; then
1244
- warn "Tests failed — will attempt self-healing (cycle $((cycle + 1))/$((max_cycles + 1)))"
1245
- notify "Self-Healing" "Tests failed on cycle ${cycle}, retrying..." "warn"
1246
- fi
1247
- done
1248
-
1249
- error "Self-healing exhausted after $((max_cycles + 1)) cycles"
1250
- notify "Self-Healing Failed" "Tests still failing after $((max_cycles + 1)) build-test cycles" "error"
1251
- return 1
1252
- }
1253
-
1254
- # ─── Auto-Rebase ──────────────────────────────────────────────────────────
1255
-
1256
- auto_rebase() {
1257
- info "Syncing with ${BASE_BRANCH}..."
1258
-
1259
- # Fetch latest
1260
- git fetch origin "$BASE_BRANCH" --quiet 2>/dev/null || {
1261
- warn "Could not fetch origin/${BASE_BRANCH}"
1262
- return 0
1263
- }
1264
-
1265
- # Check if rebase is needed
1266
- local behind
1267
- behind=$(git rev-list --count "HEAD..origin/${BASE_BRANCH}" 2>/dev/null || echo "0")
1268
-
1269
- if [[ "$behind" -eq 0 ]]; then
1270
- success "Already up to date with ${BASE_BRANCH}"
1271
- return 0
1272
- fi
1273
-
1274
- info "Rebasing onto origin/${BASE_BRANCH} ($behind commits behind)..."
1275
- if git rebase "origin/${BASE_BRANCH}" --quiet 2>/dev/null; then
1276
- success "Rebase successful"
1277
- else
1278
- warn "Rebase conflict detected — aborting rebase"
1279
- git rebase --abort 2>/dev/null || true
1280
- warn "Falling back to merge..."
1281
- if git merge "origin/${BASE_BRANCH}" --no-edit --quiet 2>/dev/null; then
1282
- success "Merge successful"
1283
- else
1284
- git merge --abort 2>/dev/null || true
1285
- error "Both rebase and merge failed — manual intervention needed"
1286
- return 1
1287
- fi
1288
- fi
1289
- }
1290
-
1291
- run_pipeline() {
1292
- # Rotate event log if needed (standalone mode)
1293
- rotate_event_log_if_needed
1294
-
1295
- local stages
1296
- stages=$(jq -c '.stages[]' "$PIPELINE_CONFIG")
1297
-
1298
- local stage_count enabled_count
1299
- stage_count=$(jq '.stages | length' "$PIPELINE_CONFIG")
1300
- enabled_count=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG")
1301
- local completed=0
1302
-
1303
- # Check which stages are enabled to determine if we use the self-healing loop
1304
- local build_enabled test_enabled
1305
- build_enabled=$(jq -r '.stages[] | select(.id == "build") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null)
1306
- test_enabled=$(jq -r '.stages[] | select(.id == "test") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null)
1307
- local use_self_healing=false
1308
- if [[ "$build_enabled" == "true" && "$test_enabled" == "true" && "$BUILD_TEST_RETRIES" -gt 0 ]]; then
1309
- use_self_healing=true
1310
- fi
1311
-
1312
- while IFS= read -r -u 3 stage; do
1313
- local id enabled gate
1314
- id=$(echo "$stage" | jq -r '.id')
1315
- enabled=$(echo "$stage" | jq -r '.enabled')
1316
- gate=$(echo "$stage" | jq -r '.gate')
1317
-
1318
- CURRENT_STAGE_ID="$id"
1319
-
1320
- # Human intervention: check for skip-stage directive
1321
- if [[ -f "$ARTIFACTS_DIR/skip-stage.txt" ]]; then
1322
- local skip_list
1323
- skip_list="$(cat "$ARTIFACTS_DIR/skip-stage.txt" 2>/dev/null || true)"
1324
- if echo "$skip_list" | grep -qx "$id" 2>/dev/null; then
1325
- info "Stage ${BOLD}${id}${RESET} skipped by human directive"
1326
- emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=human_skip"
1327
- # Remove this stage from the skip file
1328
- local tmp_skip
1329
- tmp_skip="$(mktemp)"
1330
- trap "rm -f '$tmp_skip'" RETURN
1331
- grep -vx "$id" "$ARTIFACTS_DIR/skip-stage.txt" > "$tmp_skip" 2>/dev/null || true
1332
- mv "$tmp_skip" "$ARTIFACTS_DIR/skip-stage.txt"
1333
- continue
1334
- fi
1335
- fi
1336
-
1337
- # Human intervention: check for human message
1338
- if [[ -f "$ARTIFACTS_DIR/human-message.txt" ]]; then
1339
- local human_msg
1340
- human_msg="$(cat "$ARTIFACTS_DIR/human-message.txt" 2>/dev/null || true)"
1341
- if [[ -n "$human_msg" ]]; then
1342
- echo ""
1343
- echo -e " ${PURPLE}${BOLD}💬 Human message:${RESET} $human_msg"
1344
- emit_event "pipeline.human_message" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "message=$human_msg"
1345
- rm -f "$ARTIFACTS_DIR/human-message.txt"
1346
- fi
1347
- fi
1348
-
1349
- if [[ "$enabled" != "true" ]]; then
1350
- echo -e " ${DIM}○ ${id} — skipped (disabled)${RESET}"
1351
- continue
1352
- fi
1353
-
1354
- # Intelligence: evaluate whether to skip this stage
1355
- local skip_reason=""
1356
- skip_reason=$(pipeline_should_skip_stage "$id" 2>/dev/null) || true
1357
- if [[ -n "$skip_reason" ]]; then
1358
- echo -e " ${DIM}○ ${id} — skipped (intelligence: ${skip_reason})${RESET}"
1359
- set_stage_status "$id" "complete"
1360
- completed=$((completed + 1))
1361
- continue
1362
- fi
1363
-
1364
- local stage_status
1365
- stage_status=$(get_stage_status "$id")
1366
- if [[ "$stage_status" == "complete" ]]; then
1367
- echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— already complete${RESET}"
1368
- completed=$((completed + 1))
1369
- continue
1370
- fi
1371
-
1372
- # CI resume: skip stages marked as completed from previous run
1373
- if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "$id"; then
1374
- # Verify artifacts survived the merge — regenerate if missing
1375
- if verify_stage_artifacts "$id"; then
1376
- echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— skipped (CI resume)${RESET}"
1377
- set_stage_status "$id" "complete"
1378
- completed=$((completed + 1))
1379
- emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
1380
- continue
1381
- else
1382
- warn "Stage $id marked complete but artifacts missing — regenerating"
1383
- emit_event "stage.artifact_miss" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
1384
- fi
1385
- fi
1386
-
1387
- # Self-healing build→test loop: when we hit build, run both together
1388
- if [[ "$id" == "build" && "$use_self_healing" == "true" ]]; then
1389
- # TDD: generate tests before build when enabled
1390
- if [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
1391
- stage_test_first || true
1392
- fi
1393
- # Gate check for build
1394
- local build_gate
1395
- build_gate=$(echo "$stage" | jq -r '.gate')
1396
- if [[ "$build_gate" == "approve" && "$SKIP_GATES" != "true" ]]; then
1397
- show_stage_preview "build"
1398
- local answer=""
1399
- if [[ -t 0 ]]; then
1400
- read -rp " Proceed with build+test (self-healing)? [Y/n] " answer || true
1401
- fi
1402
- if [[ "$answer" =~ ^[Nn] ]]; then
1403
- update_status "paused" "build"
1404
- info "Pipeline paused. Resume with: ${DIM}shipwright pipeline resume${RESET}"
1405
- return 0
1406
- fi
1407
- fi
1408
-
1409
- if self_healing_build_test; then
1410
- completed=$((completed + 2)) # Both build and test
1411
-
1412
- # Intelligence: reassess complexity after build+test
1413
- local reassessment
1414
- reassessment=$(pipeline_reassess_complexity 2>/dev/null) || true
1415
- if [[ -n "$reassessment" && "$reassessment" != "as_expected" ]]; then
1416
- info "Complexity reassessment: ${reassessment}"
1417
- fi
1418
- else
1419
- update_status "failed" "test"
1420
- error "Pipeline failed: build→test self-healing exhausted"
1421
- return 1
1422
- fi
1423
- continue
1424
- fi
1425
-
1426
- # TDD: generate tests before build when enabled (non-self-healing path)
1427
- if [[ "$id" == "build" && "$use_self_healing" != "true" ]] && [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
1428
- stage_test_first || true
1429
- fi
1430
-
1431
- # Skip test if already handled by self-healing loop
1432
- if [[ "$id" == "test" && "$use_self_healing" == "true" ]]; then
1433
- stage_status=$(get_stage_status "test")
1434
- if [[ "$stage_status" == "complete" ]]; then
1435
- echo -e " ${GREEN}✓ test${RESET} ${DIM}— completed in build→test loop${RESET}"
1436
- fi
1437
- continue
1438
- fi
1439
-
1440
- # Gate check
1441
- if [[ "$gate" == "approve" && "$SKIP_GATES" != "true" ]]; then
1442
- show_stage_preview "$id"
1443
- local answer=""
1444
- if [[ -t 0 ]]; then
1445
- read -rp " Proceed with ${id}? [Y/n] " answer || true
1446
- else
1447
- # Non-interactive: auto-approve (shouldn't reach here if headless detection works)
1448
- info "Non-interactive mode — auto-approving ${id}"
1449
- fi
1450
- if [[ "$answer" =~ ^[Nn] ]]; then
1451
- update_status "paused" "$id"
1452
- info "Pipeline paused at ${BOLD}$id${RESET}. Resume with: ${DIM}shipwright pipeline resume${RESET}"
1453
- return 0
1454
- fi
1455
- fi
1456
-
1457
- # Budget enforcement check (skip with --ignore-budget)
1458
- if [[ "$IGNORE_BUDGET" != "true" ]] && [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
1459
- local budget_rc=0
1460
- bash "$SCRIPT_DIR/sw-cost.sh" check-budget 2>/dev/null || budget_rc=$?
1461
- if [[ "$budget_rc" -eq 2 ]]; then
1462
- warn "Daily budget exceeded — pausing pipeline before stage ${BOLD}$id${RESET}"
1463
- warn "Resume with --ignore-budget to override, or wait until tomorrow"
1464
- emit_event "pipeline.budget_paused" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
1465
- update_status "paused" "$id"
1466
- return 0
1467
- fi
1468
- fi
1469
-
1470
- # Intelligence: per-stage model routing (UCB1 when DB has data, else A/B testing)
1471
- local recommended_model="" from_ucb1=false
1472
- if type ucb1_select_model >/dev/null 2>&1; then
1473
- recommended_model=$(ucb1_select_model "$id" 2>/dev/null || echo "")
1474
- [[ -n "$recommended_model" ]] && from_ucb1=true
1475
- fi
1476
- if [[ -z "$recommended_model" ]] && type intelligence_recommend_model >/dev/null 2>&1; then
1477
- local stage_complexity="${INTELLIGENCE_COMPLEXITY:-5}"
1478
- local budget_remaining=""
1479
- if [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
1480
- budget_remaining=$(bash "$SCRIPT_DIR/sw-cost.sh" remaining-budget 2>/dev/null || echo "")
1481
- fi
1482
- local recommended_json
1483
- recommended_json=$(intelligence_recommend_model "$id" "$stage_complexity" "$budget_remaining" 2>/dev/null || echo "")
1484
- recommended_model=$(echo "$recommended_json" | jq -r '.model // empty' 2>/dev/null || echo "")
1485
- fi
1486
- if [[ -n "$recommended_model" && "$recommended_model" != "null" ]]; then
1487
- if [[ "$from_ucb1" == "true" ]]; then
1488
- # UCB1 already balances exploration/exploitation — use directly
1489
- export CLAUDE_MODEL="$recommended_model"
1490
- emit_event "intelligence.model_ucb1" \
1491
- "issue=${ISSUE_NUMBER:-0}" \
1492
- "stage=$id" \
1493
- "model=$recommended_model"
1494
- else
1495
- # A/B testing for intelligence recommendation
1496
- local ab_ratio=20
1497
- local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
1498
- if [[ -f "$daemon_cfg" ]]; then
1499
- local cfg_ratio
1500
- cfg_ratio=$(jq -r '.intelligence.ab_test_ratio // 0.2' "$daemon_cfg" 2>/dev/null || echo "0.2")
1501
- ab_ratio=$(awk -v r="$cfg_ratio" 'BEGIN{printf "%d", r * 100}' 2>/dev/null || echo "20")
1502
- fi
1503
-
1504
- local routing_file="${HOME}/.shipwright/optimization/model-routing.json"
1505
- local use_recommended=false
1506
- local ab_group="control"
1507
-
1508
- if [[ -f "$routing_file" ]]; then
1509
- local stage_samples total_samples
1510
- stage_samples=$(jq -r --arg s "$id" '.routes[$s].sonnet_samples // .[$s].sonnet_samples // 0' "$routing_file" 2>/dev/null || echo "0")
1511
- total_samples=$(jq -r --arg s "$id" '((.routes[$s].sonnet_samples // .[$s].sonnet_samples // 0) + (.routes[$s].opus_samples // .[$s].opus_samples // 0))' "$routing_file" 2>/dev/null || echo "0")
1512
- if [[ "${total_samples:-0}" -ge 50 ]]; then
1513
- use_recommended=true
1514
- ab_group="graduated"
1515
- fi
1516
- fi
1517
-
1518
- if [[ "$use_recommended" != "true" ]]; then
1519
- local roll=$((RANDOM % 100))
1520
- if [[ "$roll" -lt "$ab_ratio" ]]; then
1521
- use_recommended=true
1522
- ab_group="experiment"
1523
- fi
1524
- fi
1525
-
1526
- if [[ "$use_recommended" == "true" ]]; then
1527
- export CLAUDE_MODEL="$recommended_model"
1528
- else
1529
- export CLAUDE_MODEL="opus"
1530
- fi
1531
-
1532
- emit_event "intelligence.model_ab" \
1533
- "issue=${ISSUE_NUMBER:-0}" \
1534
- "stage=$id" \
1535
- "recommended=$recommended_model" \
1536
- "applied=$CLAUDE_MODEL" \
1537
- "ab_group=$ab_group" \
1538
- "ab_ratio=$ab_ratio"
1539
- fi
1540
- fi
1541
-
1542
- echo ""
1543
- echo -e "${CYAN}${BOLD}▸ Stage: ${id}${RESET} ${DIM}[$((completed + 1))/${enabled_count}]${RESET}"
1544
- update_status "running" "$id"
1545
- record_stage_start "$id"
1546
- local stage_start_epoch
1547
- stage_start_epoch=$(now_epoch)
1548
- emit_event "stage.started" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
1549
-
1550
- # Mark GitHub Check Run as in-progress
1551
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update >/dev/null 2>&1; then
1552
- gh_checks_stage_update "$id" "in_progress" "" "Stage $id started" 2>/dev/null || true
1553
- fi
1554
-
1555
- local stage_model_used="${CLAUDE_MODEL:-${MODEL:-opus}}"
1556
- if run_stage_with_retry "$id"; then
1557
- mark_stage_complete "$id"
1558
- completed=$((completed + 1))
1559
- # Capture project pattern after intake (for memory context in later stages)
1560
- if [[ "$id" == "intake" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
1561
- (cd "$REPO_DIR" && bash "$SCRIPT_DIR/sw-memory.sh" pattern "project" "{}" 2>/dev/null) || true
1562
- fi
1563
- local timing stage_dur_s
1564
- timing=$(get_stage_timing "$id")
1565
- stage_dur_s=$(( $(now_epoch) - stage_start_epoch ))
1566
- success "Stage ${BOLD}$id${RESET} complete ${DIM}(${timing})${RESET}"
1567
- emit_event "stage.completed" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "duration_s=$stage_dur_s" "result=success"
1568
- # Emit vitals snapshot on every stage transition (not just build/test)
1569
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1570
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "$id" "0" "0" "0" "" 2>/dev/null || true
1571
- fi
1572
- # Record model outcome for UCB1 learning
1573
- type record_model_outcome >/dev/null 2>&1 && record_model_outcome "$stage_model_used" "$id" 1 "$stage_dur_s" 0 2>/dev/null || true
1574
- # Broadcast discovery for cross-pipeline learning
1575
- if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
1576
- local _disc_cat _disc_patterns _disc_text
1577
- _disc_cat="$id"
1578
- case "$id" in
1579
- plan) _disc_patterns="*.md"; _disc_text="Plan completed: ${GOAL:-goal}" ;;
1580
- design) _disc_patterns="*.md,*.ts,*.tsx,*.js"; _disc_text="Design completed for ${GOAL:-goal}" ;;
1581
- build) _disc_patterns="src/*,*.ts,*.tsx,*.js"; _disc_text="Build completed" ;;
1582
- test) _disc_patterns="*.test.*,*_test.*"; _disc_text="Tests passed" ;;
1583
- review) _disc_patterns="*.md,*.ts,*.tsx"; _disc_text="Review completed" ;;
1584
- *) _disc_patterns="*"; _disc_text="Stage $id completed" ;;
1585
- esac
1586
- bash "$SCRIPT_DIR/sw-discovery.sh" broadcast "$_disc_cat" "$_disc_patterns" "$_disc_text" "" 2>/dev/null || true
1587
- fi
1588
- # Log model used for prediction feedback
1589
- echo "${id}|${stage_model_used}|true" >> "${ARTIFACTS_DIR}/model-routing.log"
1590
- else
1591
- mark_stage_failed "$id"
1592
- local stage_dur_s
1593
- stage_dur_s=$(( $(now_epoch) - stage_start_epoch ))
1594
- error "Pipeline failed at stage: ${BOLD}$id${RESET}"
1595
- update_status "failed" "$id"
1596
- emit_event "stage.failed" \
1597
- "issue=${ISSUE_NUMBER:-0}" \
1598
- "stage=$id" \
1599
- "duration_s=$stage_dur_s" \
1600
- "error=${LAST_STAGE_ERROR:-unknown}" \
1601
- "error_class=${LAST_STAGE_ERROR_CLASS:-unknown}"
1602
- # Emit vitals snapshot on failure too
1603
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1604
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "$id" "0" "0" "0" "${LAST_STAGE_ERROR:-unknown}" 2>/dev/null || true
1605
- fi
1606
- # Log model used for prediction feedback
1607
- echo "${id}|${stage_model_used}|false" >> "${ARTIFACTS_DIR}/model-routing.log"
1608
- # Record model outcome for UCB1 learning
1609
- type record_model_outcome >/dev/null 2>&1 && record_model_outcome "$stage_model_used" "$id" 0 "$stage_dur_s" 0 2>/dev/null || true
1610
- # Cancel any remaining in_progress check runs
1611
- pipeline_cancel_check_runs 2>/dev/null || true
1612
- return 1
1613
- fi
1614
- done 3<<< "$stages"
1615
-
1616
- # Pipeline complete!
1617
- update_status "complete" ""
1618
- PIPELINE_STAGES_PASSED="$completed"
1619
- PIPELINE_SLOWEST_STAGE=""
1620
- if type get_slowest_stage >/dev/null 2>&1; then
1621
- PIPELINE_SLOWEST_STAGE=$(get_slowest_stage 2>/dev/null || true)
1622
- fi
1623
- local total_dur=""
1624
- if [[ -n "$PIPELINE_START_EPOCH" ]]; then
1625
- total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
1626
- fi
1627
-
1628
- echo ""
1629
- echo -e "${GREEN}${BOLD}═══════════════════════════════════════════════════════════════════${RESET}"
1630
- success "Pipeline complete! ${completed}/${enabled_count} stages passed in ${total_dur:-unknown}"
1631
- echo -e "${GREEN}${BOLD}═══════════════════════════════════════════════════════════════════${RESET}"
1632
-
1633
- # Show summary
1634
- echo ""
1635
- if [[ -f "$ARTIFACTS_DIR/pr-url.txt" ]]; then
1636
- echo -e " ${BOLD}PR:${RESET} $(cat "$ARTIFACTS_DIR/pr-url.txt")"
1637
- fi
1638
- echo -e " ${BOLD}Branch:${RESET} $GIT_BRANCH"
1639
- [[ -n "${GITHUB_ISSUE:-}" ]] && echo -e " ${BOLD}Issue:${RESET} $GITHUB_ISSUE"
1640
- echo -e " ${BOLD}Duration:${RESET} $total_dur"
1641
- echo -e " ${BOLD}Artifacts:${RESET} $ARTIFACTS_DIR/"
1642
- echo ""
1643
-
1644
- # Capture learnings to memory (success or failure)
1645
- if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
1646
- bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
1647
- fi
1648
-
1649
- # Final GitHub progress update
1650
- if [[ -n "$ISSUE_NUMBER" ]]; then
1651
- local body
1652
- body=$(gh_build_progress_body)
1653
- gh_update_progress "$body"
1654
- fi
1655
-
1656
- # Post-completion cleanup
1657
- pipeline_post_completion_cleanup
1658
- }
1659
-
1660
- # ─── Post-Completion Cleanup ──────────────────────────────────────────────
1661
- # Cleans up transient artifacts after a successful pipeline run.
1662
-
1663
- pipeline_post_completion_cleanup() {
1664
- local cleaned=0
1665
-
1666
- # 1. Clear checkpoints and context files (they only matter for resume; pipeline is done)
1667
- if [[ -d "${ARTIFACTS_DIR}/checkpoints" ]]; then
1668
- local cp_count=0
1669
- local cp_file
1670
- for cp_file in "${ARTIFACTS_DIR}/checkpoints"/*-checkpoint.json; do
1671
- [[ -f "$cp_file" ]] || continue
1672
- rm -f "$cp_file"
1673
- cp_count=$((cp_count + 1))
1674
- done
1675
- for cp_file in "${ARTIFACTS_DIR}/checkpoints"/*-claude-context.json; do
1676
- [[ -f "$cp_file" ]] || continue
1677
- rm -f "$cp_file"
1678
- cp_count=$((cp_count + 1))
1679
- done
1680
- if [[ "$cp_count" -gt 0 ]]; then
1681
- cleaned=$((cleaned + cp_count))
1682
- fi
1683
- fi
1684
-
1685
- # 2. Clear per-run intelligence artifacts (not needed after completion)
1686
- local intel_files=(
1687
- "${ARTIFACTS_DIR}/classified-findings.json"
1688
- "${ARTIFACTS_DIR}/reassessment.json"
1689
- "${ARTIFACTS_DIR}/skip-stage.txt"
1690
- "${ARTIFACTS_DIR}/human-message.txt"
1691
- )
1692
- local f
1693
- for f in "${intel_files[@]}"; do
1694
- if [[ -f "$f" ]]; then
1695
- rm -f "$f"
1696
- cleaned=$((cleaned + 1))
1697
- fi
1698
- done
1699
-
1700
- # 3. Clear stale pipeline state (mark as idle so next run starts clean)
1701
- if [[ -f "$STATE_FILE" ]]; then
1702
- # Reset status to idle (preserves the file for reference but unblocks new runs)
1703
- local tmp_state
1704
- tmp_state=$(mktemp)
1705
- trap "rm -f '$tmp_state'" RETURN
1706
- sed 's/^status: .*/status: idle/' "$STATE_FILE" > "$tmp_state" 2>/dev/null || true
1707
- mv "$tmp_state" "$STATE_FILE"
1708
- fi
1709
-
1710
- if [[ "$cleaned" -gt 0 ]]; then
1711
- emit_event "pipeline.cleanup" \
1712
- "issue=${ISSUE_NUMBER:-0}" \
1713
- "cleaned=$cleaned" \
1714
- "type=post_completion"
1715
- fi
1716
- }
1717
-
1718
- # Cancel any lingering in_progress GitHub Check Runs (called on abort/interrupt)
1719
- pipeline_cancel_check_runs() {
1720
- if [[ "${NO_GITHUB:-false}" == "true" ]]; then
1721
- return
1722
- fi
1723
-
1724
- if ! type gh_checks_stage_update >/dev/null 2>&1; then
1725
- return
1726
- fi
1727
-
1728
- local ids_file="${ARTIFACTS_DIR:-/dev/null}/check-run-ids.json"
1729
- [[ -f "$ids_file" ]] || return
1730
-
1731
- local stage
1732
- while IFS= read -r stage; do
1733
- [[ -z "$stage" ]] && continue
1734
- gh_checks_stage_update "$stage" "completed" "cancelled" "Pipeline interrupted" 2>/dev/null || true
1735
- done < <(jq -r 'keys[]' "$ids_file" 2>/dev/null || true)
1736
- }
1737
-
1738
- # ─── Worktree Isolation ───────────────────────────────────────────────────
1739
- # Creates a git worktree for parallel-safe pipeline execution
1740
-
1741
- pipeline_setup_worktree() {
1742
- local worktree_base=".worktrees"
1743
- local name="${WORKTREE_NAME}"
1744
-
1745
- # Auto-generate name from issue number or timestamp
1746
- if [[ -z "$name" ]]; then
1747
- if [[ -n "${ISSUE_NUMBER:-}" ]]; then
1748
- name="pipeline-issue-${ISSUE_NUMBER}"
1749
- else
1750
- name="pipeline-$(date +%s)"
1751
- fi
1752
- fi
1753
-
1754
- local worktree_path="${worktree_base}/${name}"
1755
- local branch_name="pipeline/${name}"
1756
-
1757
- info "Setting up worktree: ${DIM}${worktree_path}${RESET}"
1758
-
1759
- # Ensure worktree base exists
1760
- mkdir -p "$worktree_base"
1761
-
1762
- # Remove stale worktree if it exists
1763
- if [[ -d "$worktree_path" ]]; then
1764
- warn "Worktree already exists — removing: ${worktree_path}"
1765
- git worktree remove --force "$worktree_path" 2>/dev/null || rm -rf "$worktree_path"
1766
- fi
1767
-
1768
- # Delete stale branch if it exists
1769
- git branch -D "$branch_name" 2>/dev/null || true
1770
-
1771
- # Create worktree with new branch from current HEAD
1772
- git worktree add -b "$branch_name" "$worktree_path" HEAD
1773
-
1774
- # Store original dir for cleanup, then cd into worktree
1775
- ORIGINAL_REPO_DIR="$(pwd)"
1776
- cd "$worktree_path" || { error "Failed to cd into worktree: $worktree_path"; return 1; }
1777
- CLEANUP_WORKTREE=true
1778
-
1779
- success "Worktree ready: ${CYAN}${worktree_path}${RESET} (branch: ${branch_name})"
1780
- }
1781
-
1782
- pipeline_cleanup_worktree() {
1783
- if [[ "${CLEANUP_WORKTREE:-false}" != "true" ]]; then
1784
- return
1785
- fi
1786
-
1787
- local worktree_path
1788
- worktree_path="$(pwd)"
1789
-
1790
- if [[ -n "${ORIGINAL_REPO_DIR:-}" && "$worktree_path" != "$ORIGINAL_REPO_DIR" ]]; then
1791
- cd "$ORIGINAL_REPO_DIR" 2>/dev/null || cd /
1792
- # Only clean up worktree on success — preserve on failure for inspection
1793
- if [[ "${PIPELINE_EXIT_CODE:-1}" -eq 0 ]]; then
1794
- info "Cleaning up worktree: ${DIM}${worktree_path}${RESET}"
1795
- # Extract branch name before removing worktree
1796
- local _wt_branch=""
1797
- _wt_branch=$(git worktree list --porcelain 2>/dev/null | grep -A1 "worktree ${worktree_path}$" | grep "^branch " | sed 's|^branch refs/heads/||' || true)
1798
- if ! git worktree remove --force "$worktree_path" 2>/dev/null; then
1799
- warn "Failed to remove worktree at ${worktree_path} — may need manual cleanup"
1800
- fi
1801
- # Clean up the local branch
1802
- if [[ -n "$_wt_branch" ]]; then
1803
- if ! git branch -D "$_wt_branch" 2>/dev/null; then
1804
- warn "Failed to delete local branch ${_wt_branch}"
1805
- fi
1806
- fi
1807
- # Clean up the remote branch (if it was pushed)
1808
- if [[ -n "$_wt_branch" && "${NO_GITHUB:-}" != "true" ]]; then
1809
- git push origin --delete "$_wt_branch" 2>/dev/null || true
1810
- fi
1811
- else
1812
- warn "Pipeline failed — worktree preserved for inspection: ${DIM}${worktree_path}${RESET}"
1813
- warn "Clean up manually: ${DIM}git worktree remove --force ${worktree_path}${RESET}"
1814
- fi
1815
- fi
1816
- }
1817
-
1818
- # ─── Dry Run Mode ───────────────────────────────────────────────────────────
1819
- # Shows what would happen without executing
1820
- run_dry_run() {
1821
- echo ""
1822
- echo -e "${BLUE}${BOLD}━━━ Dry Run: Pipeline Validation ━━━${RESET}"
1823
- echo ""
1824
-
1825
- # Validate pipeline config
1826
- if [[ ! -f "$PIPELINE_CONFIG" ]]; then
1827
- error "Pipeline config not found: $PIPELINE_CONFIG"
1828
- return 1
1829
- fi
1830
-
1831
- # Validate JSON structure
1832
- local validate_json
1833
- validate_json=$(jq . "$PIPELINE_CONFIG" 2>/dev/null) || {
1834
- error "Pipeline config is not valid JSON: $PIPELINE_CONFIG"
1835
- return 1
1836
- }
1837
-
1838
- # Extract pipeline metadata
1839
- local pipeline_name stages_count enabled_stages gated_stages
1840
- pipeline_name=$(jq -r '.name // "unknown"' "$PIPELINE_CONFIG")
1841
- stages_count=$(jq '.stages | length' "$PIPELINE_CONFIG")
1842
- enabled_stages=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG")
1843
- gated_stages=$(jq '[.stages[] | select(.enabled == true and .gate == "approve")] | length' "$PIPELINE_CONFIG")
1844
-
1845
- # Build model (per-stage override or default)
1846
- local default_model stage_model
1847
- default_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")
1848
- stage_model="$MODEL"
1849
- [[ -z "$stage_model" ]] && stage_model="$default_model"
1850
-
1851
- echo -e " ${BOLD}Pipeline:${RESET} $pipeline_name"
1852
- echo -e " ${BOLD}Stages:${RESET} $enabled_stages enabled of $stages_count total"
1853
- if [[ "$SKIP_GATES" == "true" ]]; then
1854
- echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
1855
- else
1856
- echo -e " ${BOLD}Gates:${RESET} $gated_stages approval gate(s)"
1857
- fi
1858
- echo -e " ${BOLD}Model:${RESET} $stage_model"
1859
- echo ""
1860
-
1861
- # Table header
1862
- echo -e "${CYAN}${BOLD}Stage Enabled Gate Model${RESET}"
1863
- echo -e "${CYAN}────────────────────────────────────────${RESET}"
1864
-
1865
- # List all stages
1866
- while IFS= read -r stage_json; do
1867
- local stage_id stage_enabled stage_gate stage_config_model stage_model_display
1868
- stage_id=$(echo "$stage_json" | jq -r '.id')
1869
- stage_enabled=$(echo "$stage_json" | jq -r '.enabled')
1870
- stage_gate=$(echo "$stage_json" | jq -r '.gate')
1871
-
1872
- # Determine stage model (config override or default)
1873
- stage_config_model=$(echo "$stage_json" | jq -r '.config.model // ""')
1874
- if [[ -n "$stage_config_model" && "$stage_config_model" != "null" ]]; then
1875
- stage_model_display="$stage_config_model"
1876
- else
1877
- stage_model_display="$default_model"
1878
- fi
1879
-
1880
- # Format enabled
1881
- local enabled_str
1882
- if [[ "$stage_enabled" == "true" ]]; then
1883
- enabled_str="${GREEN}yes${RESET}"
1884
- else
1885
- enabled_str="${DIM}no${RESET}"
1886
- fi
1887
-
1888
- # Format gate
1889
- local gate_str
1890
- if [[ "$stage_enabled" == "true" ]]; then
1891
- if [[ "$stage_gate" == "approve" ]]; then
1892
- gate_str="${YELLOW}approve${RESET}"
1893
- else
1894
- gate_str="${GREEN}auto${RESET}"
1895
- fi
1896
- else
1897
- gate_str="${DIM}—${RESET}"
1898
- fi
1899
-
1900
- printf "%-15s %s %s %s\n" "$stage_id" "$enabled_str" "$gate_str" "$stage_model_display"
1901
- done < <(jq -c '.stages[]' "$PIPELINE_CONFIG")
1902
-
1903
- echo ""
1904
-
1905
- # Validate required tools
1906
- echo -e "${BLUE}${BOLD}━━━ Tool Validation ━━━${RESET}"
1907
- echo ""
1908
-
1909
- local tool_errors=0
1910
- local required_tools=("git" "jq")
1911
- local optional_tools=("gh" "claude" "bc")
1912
-
1913
- for tool in "${required_tools[@]}"; do
1914
- if command -v "$tool" >/dev/null 2>&1; then
1915
- echo -e " ${GREEN}✓${RESET} $tool"
1916
- else
1917
- echo -e " ${RED}✗${RESET} $tool ${RED}(required)${RESET}"
1918
- tool_errors=$((tool_errors + 1))
1919
- fi
1920
- done
1921
-
1922
- for tool in "${optional_tools[@]}"; do
1923
- if command -v "$tool" >/dev/null 2>&1; then
1924
- echo -e " ${GREEN}✓${RESET} $tool"
1925
- else
1926
- echo -e " ${DIM}○${RESET} $tool"
1927
- fi
1928
- done
1929
-
1930
- echo ""
1931
-
1932
- # Cost estimation: use historical averages from past pipelines when available
1933
- echo -e "${BLUE}${BOLD}━━━ Estimated Resource Usage ━━━${RESET}"
1934
- echo ""
1935
-
1936
- local stages_json
1937
- stages_json=$(jq '[.stages[] | select(.enabled == true)]' "$PIPELINE_CONFIG" 2>/dev/null || echo "[]")
1938
- local est
1939
- est=$(estimate_pipeline_cost "$stages_json")
1940
- local input_tokens_estimate output_tokens_estimate
1941
- input_tokens_estimate=$(echo "$est" | jq -r '.input_tokens // 0')
1942
- output_tokens_estimate=$(echo "$est" | jq -r '.output_tokens // 0')
1943
-
1944
- # Calculate cost based on selected model
1945
- local input_rate output_rate input_cost output_cost total_cost
1946
- input_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.input // 3" 2>/dev/null || echo "3")
1947
- output_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.output // 15" 2>/dev/null || echo "15")
1948
-
1949
- # Cost calculation: tokens per million * rate
1950
- input_cost=$(awk -v tokens="$input_tokens_estimate" -v rate="$input_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
1951
- output_cost=$(awk -v tokens="$output_tokens_estimate" -v rate="$output_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
1952
- total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
1953
-
1954
- echo -e " ${BOLD}Estimated Input Tokens:${RESET} ~$input_tokens_estimate"
1955
- echo -e " ${BOLD}Estimated Output Tokens:${RESET} ~$output_tokens_estimate"
1956
- echo -e " ${BOLD}Model Cost Rate:${RESET} $stage_model"
1957
- echo -e " ${BOLD}Estimated Cost:${RESET} \$$total_cost USD"
1958
- echo ""
1959
-
1960
- # Validate composed pipeline if intelligence is enabled
1961
- if [[ -f "$ARTIFACTS_DIR/composed-pipeline.json" ]] && type composer_validate_pipeline >/dev/null 2>&1; then
1962
- echo -e "${BLUE}${BOLD}━━━ Intelligence-Composed Pipeline ━━━${RESET}"
1963
- echo ""
1964
-
1965
- if composer_validate_pipeline "$(cat "$ARTIFACTS_DIR/composed-pipeline.json" 2>/dev/null || echo "")" 2>/dev/null; then
1966
- echo -e " ${GREEN}✓${RESET} Composed pipeline is valid"
1967
- else
1968
- echo -e " ${YELLOW}⚠${RESET} Composed pipeline validation failed (will use template defaults)"
1969
- fi
1970
- echo ""
1971
- fi
1972
-
1973
- # Final validation result
1974
- if [[ "$tool_errors" -gt 0 ]]; then
1975
- error "Dry run validation failed: $tool_errors required tool(s) missing"
1976
- return 1
1977
- fi
1978
-
1979
- success "Dry run validation passed"
1980
- echo ""
1981
- echo -e " To execute this pipeline: ${DIM}remove --dry-run flag${RESET}"
1982
- echo ""
1983
- return 0
1984
- }
1985
-
1986
- # ─── Reasoning Trace Generation ──────────────────────────────────────────────
1987
- # Multi-step autonomous reasoning traces for pipeline start (before stages run)
1988
-
1989
- generate_reasoning_trace() {
1990
- local job_id="${SHIPWRIGHT_PIPELINE_ID:-$$}"
1991
- local issue="${ISSUE_NUMBER:-}"
1992
- local goal="${GOAL:-}"
1993
-
1994
- # Step 1: Analyze issue complexity and risk
1995
- local complexity="medium"
1996
- local risk_score=50
1997
- if [[ -n "$issue" ]] && type intelligence_analyze_issue >/dev/null 2>&1; then
1998
- local issue_json analysis
1999
- issue_json=$(gh issue view "$issue" --json number,title,body,labels 2>/dev/null || echo "{}")
2000
- if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
2001
- analysis=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "")
2002
- if [[ -n "$analysis" ]]; then
2003
- local comp_num
2004
- comp_num=$(echo "$analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
2005
- if [[ "$comp_num" -le 3 ]]; then
2006
- complexity="low"
2007
- elif [[ "$comp_num" -le 6 ]]; then
2008
- complexity="medium"
2009
- else
2010
- complexity="high"
2011
- fi
2012
- risk_score=$((100 - $(echo "$analysis" | jq -r '.success_probability // 50' 2>/dev/null || echo "50")))
2013
- fi
2014
- fi
2015
- elif [[ -n "$goal" ]]; then
2016
- issue_json=$(jq -n --arg title "${goal}" --arg body "" '{title: $title, body: $body, labels: []}')
2017
- if type intelligence_analyze_issue >/dev/null 2>&1; then
2018
- analysis=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "")
2019
- if [[ -n "$analysis" ]]; then
2020
- local comp_num
2021
- comp_num=$(echo "$analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
2022
- if [[ "$comp_num" -le 3 ]]; then complexity="low"; elif [[ "$comp_num" -le 6 ]]; then complexity="medium"; else complexity="high"; fi
2023
- risk_score=$((100 - $(echo "$analysis" | jq -r '.success_probability // 50' 2>/dev/null || echo "50")))
2024
- fi
2025
- fi
2026
- fi
2027
-
2028
- # Step 2: Query similar past issues
2029
- local similar_context=""
2030
- if type memory_semantic_search >/dev/null 2>&1 && [[ -n "$goal" ]]; then
2031
- similar_context=$(memory_semantic_search "$goal" "" 3 2>/dev/null || echo "")
2032
- fi
2033
-
2034
- # Step 3: Select template using Thompson sampling
2035
- local selected_template="${PIPELINE_TEMPLATE:-}"
2036
- if [[ -z "$selected_template" ]] && type thompson_select_template >/dev/null 2>&1; then
2037
- selected_template=$(thompson_select_template "$complexity" 2>/dev/null || echo "standard")
2038
- fi
2039
- [[ -z "$selected_template" ]] && selected_template="standard"
2040
-
2041
- # Step 4: Predict failure modes from memory
2042
- local failure_predictions=""
2043
- if type memory_semantic_search >/dev/null 2>&1 && [[ -n "$goal" ]]; then
2044
- failure_predictions=$(memory_semantic_search "failure error $goal" "" 3 2>/dev/null || echo "")
2045
- fi
2046
-
2047
- # Save reasoning traces to DB
2048
- if type db_save_reasoning_trace >/dev/null 2>&1; then
2049
- db_save_reasoning_trace "$job_id" "complexity_analysis" \
2050
- "issue=$issue goal=$goal" \
2051
- "Analyzed complexity=$complexity risk=$risk_score" \
2052
- "complexity=$complexity risk_score=$risk_score" 0.7 2>/dev/null || true
2053
-
2054
- db_save_reasoning_trace "$job_id" "template_selection" \
2055
- "complexity=$complexity historical_outcomes" \
2056
- "Thompson sampling over historical success rates" \
2057
- "template=$selected_template" 0.8 2>/dev/null || true
2058
-
2059
- if [[ -n "$similar_context" && "$similar_context" != "[]" ]]; then
2060
- db_save_reasoning_trace "$job_id" "similar_issues" \
2061
- "$goal" \
2062
- "Found similar past issues for context injection" \
2063
- "$similar_context" 0.6 2>/dev/null || true
2064
- fi
2065
-
2066
- if [[ -n "$failure_predictions" && "$failure_predictions" != "[]" ]]; then
2067
- db_save_reasoning_trace "$job_id" "failure_prediction" \
2068
- "$goal" \
2069
- "Predicted potential failure modes from history" \
2070
- "$failure_predictions" 0.5 2>/dev/null || true
2071
- fi
2072
- fi
2073
-
2074
- # Export for use by pipeline stages
2075
- [[ -n "$selected_template" && -z "${PIPELINE_TEMPLATE:-}" ]] && export PIPELINE_TEMPLATE="$selected_template"
2076
-
2077
- emit_event "reasoning.trace" "job_id=$job_id" "complexity=$complexity" "risk=$risk_score" "template=${selected_template:-standard}" 2>/dev/null || true
2078
- }
2079
-
2080
- # ─── Subcommands ────────────────────────────────────────────────────────────
2081
-
2082
- pipeline_start() {
2083
- # Handle --repo flag: change to directory before running
2084
- if [[ -n "$REPO_OVERRIDE" ]]; then
2085
- if [[ ! -d "$REPO_OVERRIDE" ]]; then
2086
- error "Directory does not exist: $REPO_OVERRIDE"
2087
- exit 1
2088
- fi
2089
- if ! cd "$REPO_OVERRIDE" 2>/dev/null; then
2090
- error "Cannot cd to: $REPO_OVERRIDE"
2091
- exit 1
2092
- fi
2093
- if ! git rev-parse --show-toplevel >/dev/null 2>&1; then
2094
- error "Not a git repository: $REPO_OVERRIDE"
2095
- exit 1
2096
- fi
2097
- ORIGINAL_REPO_DIR="$(pwd)"
2098
- info "Using repository: $ORIGINAL_REPO_DIR"
2099
- fi
2100
-
2101
- # Bootstrap optimization & memory if cold start (before first intelligence use)
2102
- if [[ -f "$SCRIPT_DIR/lib/bootstrap.sh" ]]; then
2103
- source "$SCRIPT_DIR/lib/bootstrap.sh"
2104
- [[ ! -f "$HOME/.shipwright/optimization/iteration-model.json" ]] && bootstrap_optimization 2>/dev/null || true
2105
- [[ ! -f "$HOME/.shipwright/memory/patterns.json" ]] && bootstrap_memory 2>/dev/null || true
2106
- fi
2107
-
2108
- if [[ -z "$GOAL" && -z "$ISSUE_NUMBER" ]]; then
2109
- error "Must provide --goal or --issue"
2110
- echo -e " Example: ${DIM}shipwright pipeline start --goal \"Add JWT auth\"${RESET}"
2111
- echo -e " Example: ${DIM}shipwright pipeline start --issue 123${RESET}"
2112
- exit 1
2113
- fi
2114
-
2115
- if ! command -v jq >/dev/null 2>&1; then
2116
- error "jq is required. Install it: brew install jq"
2117
- exit 1
2118
- fi
2119
-
2120
- # Set up worktree isolation if requested
2121
- if [[ "$AUTO_WORKTREE" == "true" ]]; then
2122
- pipeline_setup_worktree
2123
- fi
2124
-
2125
- # Register worktree cleanup on exit (chain with existing cleanup)
2126
- if [[ "$CLEANUP_WORKTREE" == "true" ]]; then
2127
- trap 'pipeline_cleanup_worktree; cleanup_on_exit' SIGINT SIGTERM
2128
- trap 'pipeline_cleanup_worktree; cleanup_on_exit' EXIT
2129
- fi
2130
-
2131
- setup_dirs
2132
-
2133
- # Acquire durable lock to prevent concurrent pipelines on the same issue/goal
2134
- _PIPELINE_LOCK_ID=""
2135
- if type acquire_lock >/dev/null 2>&1; then
2136
- _PIPELINE_LOCK_ID="pipeline-${ISSUE_NUMBER:-goal-$$}"
2137
- if ! acquire_lock "$_PIPELINE_LOCK_ID" 5 2>/dev/null; then
2138
- error "Another pipeline is already running for this issue/goal"
2139
- echo -e " Wait for it to finish, or remove stale lock:"
2140
- echo -e " ${DIM}rm -rf ~/.shipwright/durable/locks/${_PIPELINE_LOCK_ID}.lock${RESET}"
2141
- _PIPELINE_LOCK_ID=""
2142
- exit 1
2143
- fi
2144
- fi
2145
-
2146
- # Generate reasoning trace (complexity analysis, template selection, failure predictions)
2147
- local user_specified_pipeline="$PIPELINE_NAME"
2148
- generate_reasoning_trace 2>/dev/null || true
2149
- if [[ -n "${PIPELINE_TEMPLATE:-}" && "$user_specified_pipeline" == "standard" ]]; then
2150
- PIPELINE_NAME="$PIPELINE_TEMPLATE"
2151
- fi
2152
-
2153
- # Check for existing pipeline
2154
- if [[ -f "$STATE_FILE" ]]; then
2155
- local existing_status
2156
- existing_status=$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)
2157
- if [[ "$existing_status" == "running" || "$existing_status" == "paused" || "$existing_status" == "interrupted" ]]; then
2158
- warn "A pipeline is already in progress (status: $existing_status)"
2159
- echo -e " Resume it: ${DIM}shipwright pipeline resume${RESET}"
2160
- echo -e " Abort it: ${DIM}shipwright pipeline abort${RESET}"
2161
- exit 1
2162
- fi
2163
- fi
2164
-
2165
- # Pre-flight checks
2166
- preflight_checks || exit 1
2167
-
2168
- # Initialize GitHub integration
2169
- gh_init
2170
-
2171
- load_pipeline_config
2172
-
2173
- # Checkpoint resume: when --resume is passed, try DB first, then file-based
2174
- checkpoint_stage=""
2175
- checkpoint_iteration=0
2176
- if $RESUME_FROM_CHECKPOINT && type db_load_checkpoint >/dev/null 2>&1; then
2177
- local saved_checkpoint
2178
- saved_checkpoint=$(db_load_checkpoint "pipeline-${SHIPWRIGHT_PIPELINE_ID:-$$}" 2>/dev/null || echo "")
2179
- if [[ -n "$saved_checkpoint" ]]; then
2180
- checkpoint_stage=$(echo "$saved_checkpoint" | jq -r '.stage // ""' 2>/dev/null || echo "")
2181
- if [[ -n "$checkpoint_stage" ]]; then
2182
- info "Resuming from DB checkpoint: stage=$checkpoint_stage"
2183
- checkpoint_iteration=$(echo "$saved_checkpoint" | jq -r '.iteration // 0' 2>/dev/null || echo "0")
2184
- # Build COMPLETED_STAGES: all enabled stages before checkpoint_stage
2185
- local enabled_list before_list=""
2186
- enabled_list=$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" 2>/dev/null) || true
2187
- local s
2188
- while IFS= read -r s; do
2189
- [[ -z "$s" ]] && continue
2190
- if [[ "$s" == "$checkpoint_stage" ]]; then
2191
- break
2192
- fi
2193
- [[ -n "$before_list" ]] && before_list="${before_list},${s}" || before_list="$s"
2194
- done <<< "$enabled_list"
2195
- if [[ -n "$before_list" ]]; then
2196
- COMPLETED_STAGES="${before_list}"
2197
- SELF_HEAL_COUNT="${checkpoint_iteration}"
2198
- fi
2199
- fi
2200
- fi
2201
- fi
2202
- if $RESUME_FROM_CHECKPOINT && [[ -z "$checkpoint_stage" ]] && [[ -d "${ARTIFACTS_DIR}/checkpoints" ]]; then
2203
- local cp_dir="${ARTIFACTS_DIR}/checkpoints"
2204
- local latest_cp="" latest_mtime=0
2205
- local f
2206
- for f in "$cp_dir"/*-checkpoint.json; do
2207
- [[ -f "$f" ]] || continue
2208
- local mtime
2209
- mtime=$(file_mtime "$f" 2>/dev/null || echo "0")
2210
- if [[ "${mtime:-0}" -gt "$latest_mtime" ]]; then
2211
- latest_mtime="${mtime}"
2212
- latest_cp="$f"
2213
- fi
2214
- done
2215
- if [[ -n "$latest_cp" && -x "$SCRIPT_DIR/sw-checkpoint.sh" ]]; then
2216
- checkpoint_stage="$(basename "$latest_cp" -checkpoint.json)"
2217
- local cp_json
2218
- cp_json="$("$SCRIPT_DIR/sw-checkpoint.sh" restore --stage "$checkpoint_stage" 2>/dev/null)" || true
2219
- if [[ -n "$cp_json" ]] && command -v jq >/dev/null 2>&1; then
2220
- checkpoint_iteration="$(echo "$cp_json" | jq -r '.iteration // 0' 2>/dev/null)" || checkpoint_iteration=0
2221
- info "Checkpoint resume: stage=${checkpoint_stage} iteration=${checkpoint_iteration}"
2222
- # Build COMPLETED_STAGES: all enabled stages before checkpoint_stage
2223
- local enabled_list before_list=""
2224
- enabled_list="$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" 2>/dev/null)" || true
2225
- local s
2226
- while IFS= read -r s; do
2227
- [[ -z "$s" ]] && continue
2228
- if [[ "$s" == "$checkpoint_stage" ]]; then
2229
- break
2230
- fi
2231
- [[ -n "$before_list" ]] && before_list="${before_list},${s}" || before_list="$s"
2232
- done <<< "$enabled_list"
2233
- if [[ -n "$before_list" ]]; then
2234
- COMPLETED_STAGES="${before_list}"
2235
- SELF_HEAL_COUNT="${checkpoint_iteration}"
2236
- fi
2237
- fi
2238
- fi
2239
- fi
2240
-
2241
- # Restore from state file if resuming (failed/interrupted pipeline); else initialize fresh
2242
- if $RESUME_FROM_CHECKPOINT && [[ -f "$STATE_FILE" ]]; then
2243
- local existing_status
2244
- existing_status="$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)"
2245
- if [[ "$existing_status" == "failed" || "$existing_status" == "interrupted" ]]; then
2246
- resume_state
2247
- else
2248
- initialize_state
2249
- fi
2250
- else
2251
- initialize_state
2252
- fi
2253
-
2254
- # CI resume: restore branch + goal context when intake is skipped
2255
- if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "intake"; then
2256
- # Intake was completed in a previous run — restore context
2257
- # The workflow merges the partial work branch, so code changes are on HEAD
2258
-
2259
- # Restore GOAL from issue if not already set
2260
- if [[ -z "$GOAL" && -n "$ISSUE_NUMBER" ]]; then
2261
- GOAL=$(_timeout "$(_config_get_int "network.gh_timeout" 30 2>/dev/null || echo 30)" gh issue view "$ISSUE_NUMBER" --json title -q .title 2>/dev/null || echo "Issue #${ISSUE_NUMBER}")
2262
- info "CI resume: goal from issue — ${GOAL}"
2263
- fi
2264
-
2265
- # Restore branch context
2266
- if [[ -z "$GIT_BRANCH" ]]; then
2267
- local ci_branch="ci/issue-${ISSUE_NUMBER}"
2268
- info "CI resume: creating branch ${ci_branch} from current HEAD"
2269
- if ! git checkout -b "$ci_branch" 2>/dev/null && ! git checkout "$ci_branch" 2>/dev/null; then
2270
- warn "CI resume: failed to create or checkout branch ${ci_branch}"
2271
- fi
2272
- GIT_BRANCH="$ci_branch"
2273
- elif [[ "$(git branch --show-current 2>/dev/null)" != "$GIT_BRANCH" ]]; then
2274
- info "CI resume: checking out branch ${GIT_BRANCH}"
2275
- if ! git checkout -b "$GIT_BRANCH" 2>/dev/null && ! git checkout "$GIT_BRANCH" 2>/dev/null; then
2276
- warn "CI resume: failed to create or checkout branch ${GIT_BRANCH}"
2277
- fi
2278
- fi
2279
- write_state 2>/dev/null || true
2280
- fi
2281
-
2282
- echo ""
2283
- echo -e "${PURPLE}${BOLD}╔═══════════════════════════════════════════════════════════════════╗${RESET}"
2284
- echo -e "${PURPLE}${BOLD}║ shipwright pipeline — Autonomous Feature Delivery ║${RESET}"
2285
- echo -e "${PURPLE}${BOLD}╚═══════════════════════════════════════════════════════════════════╝${RESET}"
2286
- echo ""
2287
-
2288
- # Comprehensive environment summary
2289
- if [[ -n "$GOAL" ]]; then
2290
- echo -e " ${BOLD}Goal:${RESET} $GOAL"
2291
- fi
2292
- if [[ -n "$ISSUE_NUMBER" ]]; then
2293
- echo -e " ${BOLD}Issue:${RESET} #$ISSUE_NUMBER"
2294
- fi
2295
-
2296
- echo -e " ${BOLD}Pipeline:${RESET} $PIPELINE_NAME"
2297
-
2298
- local enabled_stages
2299
- enabled_stages=$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" | tr '\n' ' ')
2300
- echo -e " ${BOLD}Stages:${RESET} $enabled_stages"
2301
-
2302
- local gate_count
2303
- gate_count=$(jq '[.stages[] | select(.gate == "approve" and .enabled == true)] | length' "$PIPELINE_CONFIG")
2304
- if [[ "$HEADLESS" == "true" ]]; then
2305
- echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (headless — non-interactive stdin detected)${RESET}"
2306
- elif [[ "$SKIP_GATES" == "true" ]]; then
2307
- echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
2308
- else
2309
- echo -e " ${BOLD}Gates:${RESET} ${gate_count} approval gate(s)"
2310
- fi
2311
-
2312
- echo -e " ${BOLD}Model:${RESET} ${MODEL:-$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")}"
2313
- echo -e " ${BOLD}Self-heal:${RESET} ${BUILD_TEST_RETRIES} retry cycle(s)"
2314
-
2315
- if [[ "$GH_AVAILABLE" == "true" ]]; then
2316
- echo -e " ${BOLD}GitHub:${RESET} ${GREEN}✓${RESET} ${DIM}${REPO_OWNER}/${REPO_NAME}${RESET}"
2317
- else
2318
- echo -e " ${BOLD}GitHub:${RESET} ${DIM}disabled${RESET}"
2319
- fi
2320
-
2321
- if [[ -n "$SLACK_WEBHOOK" ]]; then
2322
- echo -e " ${BOLD}Slack:${RESET} ${GREEN}✓${RESET} notifications enabled"
2323
- fi
2324
-
2325
- echo ""
2326
-
2327
- if [[ "$DRY_RUN" == "true" ]]; then
2328
- run_dry_run
2329
- return $?
2330
- fi
2331
-
2332
- # Capture predictions for feedback loop (intelligence → actuals → learning)
2333
- if type intelligence_analyze_issue >/dev/null 2>&1 && (type intelligence_estimate_iterations >/dev/null 2>&1 || type intelligence_predict_cost >/dev/null 2>&1); then
2334
- local issue_json="${INTELLIGENCE_ANALYSIS:-}"
2335
- if [[ -z "$issue_json" || "$issue_json" == "{}" ]]; then
2336
- if [[ -n "$ISSUE_NUMBER" ]]; then
2337
- issue_json=$(gh issue view "$ISSUE_NUMBER" --json number,title,body,labels 2>/dev/null || echo "{}")
2338
- else
2339
- issue_json=$(jq -n --arg title "${GOAL:-untitled}" --arg body "" '{title: $title, body: $body, labels: []}')
2340
- fi
2341
- if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
2342
- issue_json=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "{}")
2343
- fi
2344
- fi
2345
- if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
2346
- if type intelligence_estimate_iterations >/dev/null 2>&1; then
2347
- PREDICTED_ITERATIONS=$(intelligence_estimate_iterations "$issue_json" "" 2>/dev/null || echo "")
2348
- export PREDICTED_ITERATIONS
2349
- fi
2350
- if type intelligence_predict_cost >/dev/null 2>&1; then
2351
- local cost_json
2352
- cost_json=$(intelligence_predict_cost "$issue_json" "{}" 2>/dev/null || echo "{}")
2353
- PREDICTED_COST=$(echo "$cost_json" | jq -r '.estimated_cost_usd // empty' 2>/dev/null || echo "")
2354
- export PREDICTED_COST
2355
- fi
2356
- fi
2357
- fi
2358
-
2359
- # Start background heartbeat writer
2360
- start_heartbeat
2361
-
2362
- # Initialize GitHub Check Runs for all pipeline stages
2363
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_pipeline_start >/dev/null 2>&1; then
2364
- local head_sha
2365
- head_sha=$(git rev-parse HEAD 2>/dev/null || echo "")
2366
- if [[ -n "$head_sha" && -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2367
- local stages_json
2368
- stages_json=$(jq -c '[.stages[] | select(.enabled == true) | .id]' "$PIPELINE_CONFIG" 2>/dev/null || echo '[]')
2369
- gh_checks_pipeline_start "$REPO_OWNER" "$REPO_NAME" "$head_sha" "$stages_json" >/dev/null 2>/dev/null || true
2370
- info "GitHub Checks: created check runs for pipeline stages"
2371
- fi
2372
- fi
2373
-
2374
- # Send start notification
2375
- notify "Pipeline Started" "Goal: ${GOAL}\nPipeline: ${PIPELINE_NAME}" "info"
2376
-
2377
- emit_event "pipeline.started" \
2378
- "issue=${ISSUE_NUMBER:-0}" \
2379
- "template=${PIPELINE_NAME}" \
2380
- "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
2381
- "machine=$(hostname 2>/dev/null || echo "unknown")" \
2382
- "pipeline=${PIPELINE_NAME}" \
2383
- "model=${MODEL:-opus}" \
2384
- "goal=${GOAL}"
2385
-
2386
- # Record pipeline run in SQLite for dashboard visibility
2387
- if type add_pipeline_run >/dev/null 2>&1; then
2388
- add_pipeline_run "${SHIPWRIGHT_PIPELINE_ID}" "${ISSUE_NUMBER:-0}" "${GOAL}" "${BRANCH:-}" "${PIPELINE_NAME}" 2>/dev/null || true
2389
- fi
2390
-
2391
- # Durable WAL: publish pipeline start event
2392
- if type publish_event >/dev/null 2>&1; then
2393
- publish_event "pipeline.started" "{\"issue\":\"${ISSUE_NUMBER:-0}\",\"pipeline\":\"${PIPELINE_NAME}\",\"goal\":\"${GOAL:0:200}\"}" 2>/dev/null || true
2394
- fi
2395
-
2396
- run_pipeline
2397
- local exit_code=$?
2398
- PIPELINE_EXIT_CODE="$exit_code"
2399
-
2400
- # Compute total cost for pipeline.completed (prefer actual from Claude when available)
2401
- local model_key="${MODEL:-sonnet}"
2402
- local total_cost
2403
- if [[ -n "${TOTAL_COST_USD:-}" && "${TOTAL_COST_USD}" != "0" && "${TOTAL_COST_USD}" != "null" ]]; then
2404
- total_cost="${TOTAL_COST_USD}"
2405
- else
2406
- local input_cost output_cost
2407
- input_cost=$(awk -v tokens="$TOTAL_INPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.input // 3")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
2408
- output_cost=$(awk -v tokens="$TOTAL_OUTPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.output // 15")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
2409
- total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
2410
- fi
2411
-
2412
- # Send completion notification + event
2413
- local total_dur_s=""
2414
- [[ -n "$PIPELINE_START_EPOCH" ]] && total_dur_s=$(( $(now_epoch) - PIPELINE_START_EPOCH ))
2415
- if [[ "$exit_code" -eq 0 ]]; then
2416
- local total_dur=""
2417
- [[ -n "$total_dur_s" ]] && total_dur=$(format_duration "$total_dur_s")
2418
- local pr_url
2419
- pr_url=$(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo "")
2420
- notify "Pipeline Complete" "Goal: ${GOAL}\nDuration: ${total_dur:-unknown}\nPR: ${pr_url:-N/A}" "success"
2421
- emit_event "pipeline.completed" \
2422
- "issue=${ISSUE_NUMBER:-0}" \
2423
- "result=success" \
2424
- "duration_s=${total_dur_s:-0}" \
2425
- "iterations=$((SELF_HEAL_COUNT + 1))" \
2426
- "template=${PIPELINE_NAME}" \
2427
- "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
2428
- "stages_passed=${PIPELINE_STAGES_PASSED:-0}" \
2429
- "slowest_stage=${PIPELINE_SLOWEST_STAGE:-}" \
2430
- "pr_url=${pr_url:-}" \
2431
- "agent_id=${PIPELINE_AGENT_ID}" \
2432
- "input_tokens=$TOTAL_INPUT_TOKENS" \
2433
- "output_tokens=$TOTAL_OUTPUT_TOKENS" \
2434
- "total_cost=$total_cost" \
2435
- "self_heal_count=$SELF_HEAL_COUNT"
2436
-
2437
- # Update pipeline run status in SQLite
2438
- if type update_pipeline_status >/dev/null 2>&1; then
2439
- update_pipeline_status "${SHIPWRIGHT_PIPELINE_ID}" "completed" "${PIPELINE_SLOWEST_STAGE:-}" "complete" "${total_dur_s:-0}" 2>/dev/null || true
2440
- fi
2441
-
2442
- # Auto-ingest pipeline outcome into recruit profiles
2443
- if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
2444
- bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
2445
- fi
2446
-
2447
- # Capture success patterns to memory (learn what works — parallel the failure path)
2448
- if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
2449
- bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
2450
- fi
2451
- # Update memory baselines with successful run metrics
2452
- if type memory_update_metrics >/dev/null 2>&1; then
2453
- memory_update_metrics "build_duration_s" "${total_dur_s:-0}" 2>/dev/null || true
2454
- memory_update_metrics "total_cost_usd" "${total_cost:-0}" 2>/dev/null || true
2455
- memory_update_metrics "iterations" "$((SELF_HEAL_COUNT + 1))" 2>/dev/null || true
2456
- fi
2457
-
2458
- # Record positive fix outcome if self-healing succeeded
2459
- if [[ "$SELF_HEAL_COUNT" -gt 0 && -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
2460
- local _success_sig
2461
- _success_sig=$(tail -30 "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//' || true)
2462
- if [[ -n "$_success_sig" ]]; then
2463
- bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_success_sig" "true" "true" 2>/dev/null || true
2464
- fi
2465
- fi
2466
- else
2467
- notify "Pipeline Failed" "Goal: ${GOAL}\nFailed at: ${CURRENT_STAGE_ID:-unknown}" "error"
2468
- emit_event "pipeline.completed" \
2469
- "issue=${ISSUE_NUMBER:-0}" \
2470
- "result=failure" \
2471
- "duration_s=${total_dur_s:-0}" \
2472
- "iterations=$((SELF_HEAL_COUNT + 1))" \
2473
- "template=${PIPELINE_NAME}" \
2474
- "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
2475
- "failed_stage=${CURRENT_STAGE_ID:-unknown}" \
2476
- "error_class=${LAST_STAGE_ERROR_CLASS:-unknown}" \
2477
- "agent_id=${PIPELINE_AGENT_ID}" \
2478
- "input_tokens=$TOTAL_INPUT_TOKENS" \
2479
- "output_tokens=$TOTAL_OUTPUT_TOKENS" \
2480
- "total_cost=$total_cost" \
2481
- "self_heal_count=$SELF_HEAL_COUNT"
2482
-
2483
- # Update pipeline run status in SQLite
2484
- if type update_pipeline_status >/dev/null 2>&1; then
2485
- update_pipeline_status "${SHIPWRIGHT_PIPELINE_ID}" "failed" "${CURRENT_STAGE_ID:-unknown}" "failed" "${total_dur_s:-0}" 2>/dev/null || true
2486
- fi
2487
-
2488
- # Auto-ingest pipeline outcome into recruit profiles
2489
- if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
2490
- bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
2491
- fi
2492
-
2493
- # Capture failure learnings to memory
2494
- if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
2495
- bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
2496
- bash "$SCRIPT_DIR/sw-memory.sh" analyze-failure "$ARTIFACTS_DIR/.claude-tokens-${CURRENT_STAGE_ID:-build}.log" "${CURRENT_STAGE_ID:-unknown}" 2>/dev/null || true
2497
-
2498
- # Record negative fix outcome — memory suggested a fix but it didn't resolve the issue
2499
- # This closes the negative side of the fix-outcome feedback loop
2500
- if [[ "$SELF_HEAL_COUNT" -gt 0 ]]; then
2501
- local _fail_sig
2502
- _fail_sig=$(tail -30 "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//' || true)
2503
- if [[ -n "$_fail_sig" ]]; then
2504
- bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_fail_sig" "true" "false" 2>/dev/null || true
2505
- fi
2506
- fi
2507
- fi
2508
- fi
2509
-
2510
- # ── Prediction Validation Events ──
2511
- # Compare predicted vs actual outcomes for feedback loop calibration
2512
- local pipeline_success="false"
2513
- [[ "$exit_code" -eq 0 ]] && pipeline_success="true"
2514
-
2515
- # Complexity prediction vs actual iterations
2516
- emit_event "prediction.validated" \
2517
- "issue=${ISSUE_NUMBER:-0}" \
2518
- "predicted_complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
2519
- "actual_iterations=$SELF_HEAL_COUNT" \
2520
- "success=$pipeline_success"
2521
-
2522
- # Close intelligence prediction feedback loop — validate predicted vs actual
2523
- if type intelligence_validate_prediction >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
2524
- intelligence_validate_prediction \
2525
- "$ISSUE_NUMBER" \
2526
- "${INTELLIGENCE_COMPLEXITY:-0}" \
2527
- "${SELF_HEAL_COUNT:-0}" \
2528
- "$pipeline_success" 2>/dev/null || true
2529
- fi
2530
-
2531
- # Validate iterations prediction against actuals (cost validation moved below after total_cost is computed)
2532
- local ACTUAL_ITERATIONS=$((SELF_HEAL_COUNT + 1))
2533
- if [[ -n "${PREDICTED_ITERATIONS:-}" ]] && type intelligence_validate_prediction >/dev/null 2>&1; then
2534
- intelligence_validate_prediction "iterations" "$PREDICTED_ITERATIONS" "$ACTUAL_ITERATIONS" 2>/dev/null || true
2535
- fi
2536
-
2537
- # Close predictive anomaly feedback loop — confirm whether flagged anomalies were real
2538
- if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
2539
- local _actual_failure="false"
2540
- [[ "$exit_code" -ne 0 ]] && _actual_failure="true"
2541
- # Confirm anomalies for build and test stages based on pipeline outcome
2542
- for _anomaly_stage in build test; do
2543
- bash "$SCRIPT_DIR/sw-predictive.sh" confirm-anomaly "$_anomaly_stage" "duration_s" "$_actual_failure" 2>/dev/null || true
2544
- done
2545
- fi
2546
-
2547
- # Template outcome tracking
2548
- emit_event "template.outcome" \
2549
- "issue=${ISSUE_NUMBER:-0}" \
2550
- "template=${PIPELINE_NAME}" \
2551
- "success=$pipeline_success" \
2552
- "duration_s=${total_dur_s:-0}" \
2553
- "complexity=${INTELLIGENCE_COMPLEXITY:-0}"
2554
-
2555
- # Risk prediction vs actual failure
2556
- local predicted_risk="${INTELLIGENCE_RISK_SCORE:-0}"
2557
- emit_event "risk.outcome" \
2558
- "issue=${ISSUE_NUMBER:-0}" \
2559
- "predicted_risk=$predicted_risk" \
2560
- "actual_failure=$([[ "$exit_code" -ne 0 ]] && echo "true" || echo "false")"
2561
-
2562
- # Per-stage model outcome events (read from stage timings)
2563
- local routing_log="${ARTIFACTS_DIR}/model-routing.log"
2564
- if [[ -f "$routing_log" ]]; then
2565
- while IFS='|' read -r s_stage s_model s_success; do
2566
- [[ -z "$s_stage" ]] && continue
2567
- emit_event "model.outcome" \
2568
- "issue=${ISSUE_NUMBER:-0}" \
2569
- "stage=$s_stage" \
2570
- "model=$s_model" \
2571
- "success=$s_success"
2572
- done < "$routing_log"
2573
- fi
2574
-
2575
- # Record pipeline outcome for model routing feedback loop
2576
- if type optimize_analyze_outcome >/dev/null 2>&1; then
2577
- optimize_analyze_outcome "$STATE_FILE" 2>/dev/null || true
2578
- fi
2579
-
2580
- # Auto-learn after pipeline completion (non-blocking)
2581
- if type optimize_tune_templates &>/dev/null; then
2582
- (
2583
- optimize_tune_templates 2>/dev/null
2584
- optimize_learn_iterations 2>/dev/null
2585
- optimize_route_models 2>/dev/null
2586
- optimize_learn_risk_keywords 2>/dev/null
2587
- ) &
2588
- fi
2589
-
2590
- if type memory_finalize_pipeline >/dev/null 2>&1; then
2591
- memory_finalize_pipeline "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
2592
- fi
2593
-
2594
- # Broadcast discovery for cross-pipeline learning
2595
- if type broadcast_discovery >/dev/null 2>&1; then
2596
- local _disc_result="failure"
2597
- [[ "$exit_code" -eq 0 ]] && _disc_result="success"
2598
- local _disc_files=""
2599
- _disc_files=$(git diff --name-only HEAD~1 HEAD 2>/dev/null | head -20 | tr '\n' ',' || true)
2600
- broadcast_discovery "pipeline_${_disc_result}" "${_disc_files:-unknown}" \
2601
- "Pipeline ${_disc_result} for issue #${ISSUE_NUMBER:-0} (${PIPELINE_NAME:-unknown} template, stage=${CURRENT_STAGE_ID:-unknown})" \
2602
- "${_disc_result}" 2>/dev/null || true
2603
- fi
2604
-
2605
- # Emit cost event — prefer actual cost from Claude CLI when available
2606
- local model_key="${MODEL:-sonnet}"
2607
- local total_cost
2608
- if [[ -n "${TOTAL_COST_USD:-}" && "${TOTAL_COST_USD}" != "0" && "${TOTAL_COST_USD}" != "null" ]]; then
2609
- total_cost="${TOTAL_COST_USD}"
2610
- else
2611
- # Fallback: estimate from token counts and model rates
2612
- local input_cost output_cost
2613
- input_cost=$(awk -v tokens="$TOTAL_INPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.input // 3")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
2614
- output_cost=$(awk -v tokens="$TOTAL_OUTPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.output // 15")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
2615
- total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
2616
- fi
2617
-
2618
- emit_event "pipeline.cost" \
2619
- "input_tokens=$TOTAL_INPUT_TOKENS" \
2620
- "output_tokens=$TOTAL_OUTPUT_TOKENS" \
2621
- "model=$model_key" \
2622
- "cost_usd=$total_cost"
2623
-
2624
- # Persist cost entry to costs.json + SQLite (was missing — tokens accumulated but never written)
2625
- if type cost_record >/dev/null 2>&1; then
2626
- cost_record "$TOTAL_INPUT_TOKENS" "$TOTAL_OUTPUT_TOKENS" "$model_key" "pipeline" "${ISSUE_NUMBER:-}" 2>/dev/null || true
2627
- fi
2628
-
2629
- # Record pipeline outcome for Thompson sampling / outcome-based learning
2630
- if type db_record_outcome >/dev/null 2>&1; then
2631
- local _outcome_success=0
2632
- [[ "$exit_code" -eq 0 ]] && _outcome_success=1
2633
- local _outcome_complexity="medium"
2634
- [[ "${INTELLIGENCE_COMPLEXITY:-5}" -le 3 ]] && _outcome_complexity="low"
2635
- [[ "${INTELLIGENCE_COMPLEXITY:-5}" -ge 7 ]] && _outcome_complexity="high"
2636
- db_record_outcome \
2637
- "${SHIPWRIGHT_PIPELINE_ID:-pipeline-$$-${ISSUE_NUMBER:-0}}" \
2638
- "${ISSUE_NUMBER:-}" \
2639
- "${PIPELINE_NAME:-standard}" \
2640
- "$_outcome_success" \
2641
- "${total_dur_s:-0}" \
2642
- "${SELF_HEAL_COUNT:-0}" \
2643
- "${total_cost:-0}" \
2644
- "$_outcome_complexity" 2>/dev/null || true
2645
- fi
2646
-
2647
- # Validate cost prediction against actual (after total_cost is computed)
2648
- if [[ -n "${PREDICTED_COST:-}" ]] && type intelligence_validate_prediction >/dev/null 2>&1; then
2649
- intelligence_validate_prediction "cost" "$PREDICTED_COST" "$total_cost" 2>/dev/null || true
2650
- fi
2651
-
2652
- return $exit_code
2653
- }
2654
-
2655
- pipeline_resume() {
2656
- setup_dirs
2657
- resume_state
2658
- echo ""
2659
- run_pipeline
2660
- }
2661
-
2662
- pipeline_status() {
2663
- setup_dirs
2664
-
2665
- if [[ ! -f "$STATE_FILE" ]]; then
2666
- info "No active pipeline."
2667
- echo -e " Start one: ${DIM}shipwright pipeline start --goal \"...\"${RESET}"
2668
- return
2669
- fi
2670
-
2671
- echo ""
2672
- echo -e "${PURPLE}${BOLD}━━━ Pipeline Status ━━━${RESET}"
2673
- echo ""
2674
-
2675
- local p_name="" p_goal="" p_status="" p_branch="" p_stage="" p_started="" p_issue="" p_elapsed="" p_pr=""
2676
- local in_frontmatter=false
2677
- while IFS= read -r line; do
2678
- if [[ "$line" == "---" ]]; then
2679
- if $in_frontmatter; then break; else in_frontmatter=true; continue; fi
2680
- fi
2681
- if $in_frontmatter; then
2682
- case "$line" in
2683
- pipeline:*) p_name="$(echo "${line#pipeline:}" | xargs)" ;;
2684
- goal:*) p_goal="$(echo "${line#goal:}" | sed 's/^ *"//;s/" *$//')" ;;
2685
- status:*) p_status="$(echo "${line#status:}" | xargs)" ;;
2686
- branch:*) p_branch="$(echo "${line#branch:}" | sed 's/^ *"//;s/" *$//')" ;;
2687
- current_stage:*) p_stage="$(echo "${line#current_stage:}" | xargs)" ;;
2688
- started_at:*) p_started="$(echo "${line#started_at:}" | xargs)" ;;
2689
- issue:*) p_issue="$(echo "${line#issue:}" | sed 's/^ *"//;s/" *$//')" ;;
2690
- elapsed:*) p_elapsed="$(echo "${line#elapsed:}" | xargs)" ;;
2691
- pr_number:*) p_pr="$(echo "${line#pr_number:}" | xargs)" ;;
2692
- esac
2693
- fi
2694
- done < "$STATE_FILE"
2695
-
2696
- local status_icon
2697
- case "$p_status" in
2698
- running) status_icon="${CYAN}●${RESET}" ;;
2699
- complete) status_icon="${GREEN}✓${RESET}" ;;
2700
- paused) status_icon="${YELLOW}⏸${RESET}" ;;
2701
- interrupted) status_icon="${YELLOW}⚡${RESET}" ;;
2702
- failed) status_icon="${RED}✗${RESET}" ;;
2703
- aborted) status_icon="${RED}◼${RESET}" ;;
2704
- *) status_icon="${DIM}○${RESET}" ;;
2705
- esac
2706
-
2707
- echo -e " ${BOLD}Pipeline:${RESET} $p_name"
2708
- echo -e " ${BOLD}Goal:${RESET} $p_goal"
2709
- echo -e " ${BOLD}Status:${RESET} $status_icon $p_status"
2710
- [[ -n "$p_branch" ]] && echo -e " ${BOLD}Branch:${RESET} $p_branch"
2711
- [[ -n "$p_issue" ]] && echo -e " ${BOLD}Issue:${RESET} $p_issue"
2712
- [[ -n "$p_pr" ]] && echo -e " ${BOLD}PR:${RESET} #$p_pr"
2713
- [[ -n "$p_stage" ]] && echo -e " ${BOLD}Stage:${RESET} $p_stage"
2714
- [[ -n "$p_started" ]] && echo -e " ${BOLD}Started:${RESET} $p_started"
2715
- [[ -n "$p_elapsed" ]] && echo -e " ${BOLD}Elapsed:${RESET} $p_elapsed"
2716
-
2717
- echo ""
2718
- echo -e " ${BOLD}Stages:${RESET}"
2719
-
2720
- local in_stages=false
2721
- while IFS= read -r line; do
2722
- if [[ "$line" == "stages:" ]]; then
2723
- in_stages=true; continue
2724
- fi
2725
- if $in_stages; then
2726
- if [[ "$line" == "---" || ! "$line" =~ ^" " ]]; then break; fi
2727
- local trimmed
2728
- trimmed="$(echo "$line" | xargs)"
2729
- if [[ "$trimmed" == *":"* ]]; then
2730
- local sid="${trimmed%%:*}"
2731
- local sst="${trimmed#*: }"
2732
- local s_icon
2733
- case "$sst" in
2734
- complete) s_icon="${GREEN}✓${RESET}" ;;
2735
- running) s_icon="${CYAN}●${RESET}" ;;
2736
- failed) s_icon="${RED}✗${RESET}" ;;
2737
- *) s_icon="${DIM}○${RESET}" ;;
2738
- esac
2739
- echo -e " $s_icon $sid"
2740
- fi
2741
- fi
2742
- done < "$STATE_FILE"
2743
-
2744
- if [[ -d "$ARTIFACTS_DIR" ]]; then
2745
- local artifact_count
2746
- artifact_count=$(find "$ARTIFACTS_DIR" -type f 2>/dev/null | wc -l | xargs)
2747
- if [[ "$artifact_count" -gt 0 ]]; then
2748
- echo ""
2749
- echo -e " ${BOLD}Artifacts:${RESET} ($artifact_count files)"
2750
- ls "$ARTIFACTS_DIR" 2>/dev/null | sed 's/^/ /'
2751
- fi
2752
- fi
2753
- echo ""
2754
- }
2755
-
2756
- pipeline_abort() {
2757
- setup_dirs
2758
-
2759
- if [[ ! -f "$STATE_FILE" ]]; then
2760
- info "No active pipeline to abort."
2761
- return
2762
- fi
2763
-
2764
- local current_status
2765
- current_status=$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)
2766
-
2767
- if [[ "$current_status" == "complete" || "$current_status" == "aborted" ]]; then
2768
- info "Pipeline already $current_status."
2769
- return
2770
- fi
2771
-
2772
- resume_state 2>/dev/null || true
2773
- PIPELINE_STATUS="aborted"
2774
- write_state
2775
-
2776
- # Update GitHub
2777
- if [[ -n "$ISSUE_NUMBER" ]]; then
2778
- gh_init
2779
- gh_remove_label "$ISSUE_NUMBER" "pipeline/in-progress"
2780
- gh_comment_issue "$ISSUE_NUMBER" "⏹️ **Pipeline aborted** at stage: ${CURRENT_STAGE:-unknown}"
2781
- fi
2782
-
2783
- warn "Pipeline aborted."
2784
- echo -e " State saved at: ${DIM}$STATE_FILE${RESET}"
2785
- }
2786
-
2787
- pipeline_list() {
2788
- local locations=(
2789
- "$REPO_DIR/templates/pipelines"
2790
- "$HOME/.shipwright/pipelines"
2791
- )
2792
-
2793
- echo ""
2794
- echo -e "${PURPLE}${BOLD}━━━ Pipeline Templates ━━━${RESET}"
2795
- echo ""
2796
-
2797
- local found=false
2798
- for dir in "${locations[@]}"; do
2799
- if [[ -d "$dir" ]]; then
2800
- for f in "$dir"/*.json; do
2801
- [[ -f "$f" ]] || continue
2802
- found=true
2803
- local name desc stages_enabled gate_count
2804
- name=$(jq -r '.name' "$f" 2>/dev/null)
2805
- desc=$(jq -r '.description' "$f" 2>/dev/null)
2806
- stages_enabled=$(jq -r '[.stages[] | select(.enabled == true) | .id] | join(" → ")' "$f" 2>/dev/null)
2807
- gate_count=$(jq '[.stages[] | select(.gate == "approve" and .enabled == true)] | length' "$f" 2>/dev/null)
2808
- echo -e " ${CYAN}${BOLD}$name${RESET}"
2809
- echo -e " $desc"
2810
- echo -e " ${DIM}$stages_enabled${RESET}"
2811
- echo -e " ${DIM}(${gate_count} approval gates)${RESET}"
2812
- echo ""
2813
- done
2814
- fi
2815
- done
2816
-
2817
- if [[ "$found" != "true" ]]; then
2818
- warn "No pipeline templates found."
2819
- echo -e " Expected at: ${DIM}templates/pipelines/*.json${RESET}"
2820
- fi
2821
- }
2822
-
2823
- pipeline_show() {
2824
- local name="${PIPELINE_NAME_ARG:-$PIPELINE_NAME}"
2825
-
2826
- local config_file
2827
- config_file=$(find_pipeline_config "$name") || {
2828
- error "Pipeline template not found: $name"
2829
- echo -e " Available: ${DIM}shipwright pipeline list${RESET}"
2830
- exit 1
2831
- }
2832
-
2833
- echo ""
2834
- echo -e "${PURPLE}${BOLD}━━━ Pipeline: $(jq -r '.name' "$config_file") ━━━${RESET}"
2835
- echo -e " $(jq -r '.description' "$config_file")"
2836
- echo ""
2837
-
2838
- echo -e "${BOLD} Defaults:${RESET}"
2839
- jq -r '.defaults | to_entries[] | " \(.key): \(.value)"' "$config_file" 2>/dev/null
2840
- echo ""
2841
-
2842
- echo -e "${BOLD} Stages:${RESET}"
2843
- jq -r '.stages[] |
2844
- (if .enabled then " ✓" else " ○" end) +
2845
- " \(.id)" +
2846
- (if .gate == "approve" then " [gate: approve]" elif .gate == "skip" then " [skip]" else "" end)
2847
- ' "$config_file" 2>/dev/null
2848
- echo ""
2849
-
2850
- echo -e "${BOLD} GitHub Integration:${RESET}"
2851
- echo -e " • Issue: self-assign, label lifecycle, progress comments"
2852
- echo -e " • PR: labels, milestone, reviewers auto-propagated"
2853
- echo -e " • Validation: auto-close issue on completion"
2854
- echo ""
2855
- }
2856
-
2857
246
  # ─── Main ───────────────────────────────────────────────────────────────────
2858
247
 
2859
248
  case "$SUBCOMMAND" in