shipwright-cli 3.1.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (283) hide show
  1. package/.claude/agents/code-reviewer.md +2 -0
  2. package/.claude/agents/devops-engineer.md +2 -0
  3. package/.claude/agents/doc-fleet-agent.md +2 -0
  4. package/.claude/agents/pipeline-agent.md +2 -0
  5. package/.claude/agents/shell-script-specialist.md +2 -0
  6. package/.claude/agents/test-specialist.md +2 -0
  7. package/.claude/hooks/agent-crash-capture.sh +32 -0
  8. package/.claude/hooks/post-tool-use.sh +3 -2
  9. package/.claude/hooks/pre-tool-use.sh +35 -3
  10. package/README.md +22 -8
  11. package/claude-code/hooks/config-change.sh +18 -0
  12. package/claude-code/hooks/instructions-reloaded.sh +7 -0
  13. package/claude-code/hooks/worktree-create.sh +25 -0
  14. package/claude-code/hooks/worktree-remove.sh +20 -0
  15. package/config/code-constitution.json +130 -0
  16. package/config/defaults.json +25 -2
  17. package/config/policy.json +1 -1
  18. package/dashboard/middleware/auth.ts +134 -0
  19. package/dashboard/middleware/constants.ts +21 -0
  20. package/dashboard/public/index.html +8 -6
  21. package/dashboard/public/styles.css +176 -97
  22. package/dashboard/routes/auth.ts +38 -0
  23. package/dashboard/server.ts +117 -25
  24. package/dashboard/services/config.ts +26 -0
  25. package/dashboard/services/db.ts +118 -0
  26. package/dashboard/src/canvas/pixel-agent.ts +298 -0
  27. package/dashboard/src/canvas/pixel-sprites.ts +440 -0
  28. package/dashboard/src/canvas/shipyard-effects.ts +367 -0
  29. package/dashboard/src/canvas/shipyard-scene.ts +616 -0
  30. package/dashboard/src/canvas/submarine-layout.ts +267 -0
  31. package/dashboard/src/components/header.ts +8 -7
  32. package/dashboard/src/core/api.ts +5 -0
  33. package/dashboard/src/core/router.ts +1 -0
  34. package/dashboard/src/design/submarine-theme.ts +253 -0
  35. package/dashboard/src/main.ts +2 -0
  36. package/dashboard/src/types/api.ts +12 -1
  37. package/dashboard/src/views/activity.ts +2 -1
  38. package/dashboard/src/views/metrics.ts +69 -1
  39. package/dashboard/src/views/shipyard.ts +39 -0
  40. package/dashboard/types/index.ts +166 -0
  41. package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
  42. package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
  43. package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
  44. package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
  45. package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
  46. package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
  47. package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
  48. package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
  49. package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
  50. package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
  51. package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
  52. package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
  53. package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
  54. package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
  55. package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
  56. package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
  57. package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
  58. package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
  59. package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
  60. package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
  61. package/docs/research/RESEARCH_INDEX.md +439 -0
  62. package/docs/research/RESEARCH_SOURCES.md +440 -0
  63. package/docs/research/RESEARCH_SUMMARY.txt +275 -0
  64. package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
  65. package/package.json +2 -2
  66. package/scripts/lib/adaptive-model.sh +427 -0
  67. package/scripts/lib/adaptive-timeout.sh +316 -0
  68. package/scripts/lib/audit-trail.sh +309 -0
  69. package/scripts/lib/auto-recovery.sh +471 -0
  70. package/scripts/lib/bandit-selector.sh +431 -0
  71. package/scripts/lib/bootstrap.sh +104 -2
  72. package/scripts/lib/causal-graph.sh +455 -0
  73. package/scripts/lib/compat.sh +126 -0
  74. package/scripts/lib/compound-audit.sh +337 -0
  75. package/scripts/lib/constitutional.sh +454 -0
  76. package/scripts/lib/context-budget.sh +359 -0
  77. package/scripts/lib/convergence.sh +594 -0
  78. package/scripts/lib/cost-optimizer.sh +634 -0
  79. package/scripts/lib/daemon-adaptive.sh +14 -2
  80. package/scripts/lib/daemon-dispatch.sh +106 -17
  81. package/scripts/lib/daemon-failure.sh +34 -4
  82. package/scripts/lib/daemon-patrol.sh +25 -4
  83. package/scripts/lib/daemon-poll-github.sh +361 -0
  84. package/scripts/lib/daemon-poll-health.sh +299 -0
  85. package/scripts/lib/daemon-poll.sh +27 -611
  86. package/scripts/lib/daemon-state.sh +119 -66
  87. package/scripts/lib/daemon-triage.sh +10 -0
  88. package/scripts/lib/dod-scorecard.sh +442 -0
  89. package/scripts/lib/error-actionability.sh +300 -0
  90. package/scripts/lib/formal-spec.sh +461 -0
  91. package/scripts/lib/helpers.sh +180 -5
  92. package/scripts/lib/intent-analysis.sh +409 -0
  93. package/scripts/lib/loop-convergence.sh +350 -0
  94. package/scripts/lib/loop-iteration.sh +682 -0
  95. package/scripts/lib/loop-progress.sh +48 -0
  96. package/scripts/lib/loop-restart.sh +185 -0
  97. package/scripts/lib/memory-effectiveness.sh +506 -0
  98. package/scripts/lib/mutation-executor.sh +352 -0
  99. package/scripts/lib/outcome-feedback.sh +521 -0
  100. package/scripts/lib/pipeline-cli.sh +336 -0
  101. package/scripts/lib/pipeline-commands.sh +1216 -0
  102. package/scripts/lib/pipeline-detection.sh +101 -3
  103. package/scripts/lib/pipeline-execution.sh +897 -0
  104. package/scripts/lib/pipeline-github.sh +28 -3
  105. package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
  106. package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
  107. package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
  108. package/scripts/lib/pipeline-intelligence.sh +104 -1138
  109. package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
  110. package/scripts/lib/pipeline-quality-checks.sh +17 -711
  111. package/scripts/lib/pipeline-quality-gates.sh +563 -0
  112. package/scripts/lib/pipeline-stages-build.sh +730 -0
  113. package/scripts/lib/pipeline-stages-delivery.sh +965 -0
  114. package/scripts/lib/pipeline-stages-intake.sh +1133 -0
  115. package/scripts/lib/pipeline-stages-monitor.sh +407 -0
  116. package/scripts/lib/pipeline-stages-review.sh +1022 -0
  117. package/scripts/lib/pipeline-stages.sh +161 -2901
  118. package/scripts/lib/pipeline-state.sh +36 -5
  119. package/scripts/lib/pipeline-util.sh +487 -0
  120. package/scripts/lib/policy-learner.sh +438 -0
  121. package/scripts/lib/process-reward.sh +493 -0
  122. package/scripts/lib/project-detect.sh +649 -0
  123. package/scripts/lib/quality-profile.sh +334 -0
  124. package/scripts/lib/recruit-commands.sh +885 -0
  125. package/scripts/lib/recruit-learning.sh +739 -0
  126. package/scripts/lib/recruit-roles.sh +648 -0
  127. package/scripts/lib/reward-aggregator.sh +458 -0
  128. package/scripts/lib/rl-optimizer.sh +362 -0
  129. package/scripts/lib/root-cause.sh +427 -0
  130. package/scripts/lib/scope-enforcement.sh +445 -0
  131. package/scripts/lib/session-restart.sh +493 -0
  132. package/scripts/lib/skill-memory.sh +300 -0
  133. package/scripts/lib/skill-registry.sh +775 -0
  134. package/scripts/lib/spec-driven.sh +476 -0
  135. package/scripts/lib/test-helpers.sh +18 -7
  136. package/scripts/lib/test-holdout.sh +429 -0
  137. package/scripts/lib/test-optimizer.sh +511 -0
  138. package/scripts/shipwright-file-suggest.sh +45 -0
  139. package/scripts/skills/adversarial-quality.md +61 -0
  140. package/scripts/skills/api-design.md +44 -0
  141. package/scripts/skills/architecture-design.md +50 -0
  142. package/scripts/skills/brainstorming.md +43 -0
  143. package/scripts/skills/data-pipeline.md +44 -0
  144. package/scripts/skills/deploy-safety.md +64 -0
  145. package/scripts/skills/documentation.md +38 -0
  146. package/scripts/skills/frontend-design.md +45 -0
  147. package/scripts/skills/generated/.gitkeep +0 -0
  148. package/scripts/skills/generated/_refinements/.gitkeep +0 -0
  149. package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
  150. package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
  151. package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
  152. package/scripts/skills/generated/cli-version-management.md +29 -0
  153. package/scripts/skills/generated/collection-system-validation.md +99 -0
  154. package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
  155. package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
  156. package/scripts/skills/generated/test-parallelization-detection.md +65 -0
  157. package/scripts/skills/observability.md +79 -0
  158. package/scripts/skills/performance.md +48 -0
  159. package/scripts/skills/pr-quality.md +49 -0
  160. package/scripts/skills/product-thinking.md +43 -0
  161. package/scripts/skills/security-audit.md +49 -0
  162. package/scripts/skills/systematic-debugging.md +40 -0
  163. package/scripts/skills/testing-strategy.md +47 -0
  164. package/scripts/skills/two-stage-review.md +52 -0
  165. package/scripts/skills/validation-thoroughness.md +55 -0
  166. package/scripts/sw +9 -3
  167. package/scripts/sw-activity.sh +9 -8
  168. package/scripts/sw-adaptive.sh +8 -7
  169. package/scripts/sw-adversarial.sh +2 -1
  170. package/scripts/sw-architecture-enforcer.sh +3 -1
  171. package/scripts/sw-auth.sh +12 -2
  172. package/scripts/sw-autonomous.sh +5 -1
  173. package/scripts/sw-changelog.sh +4 -1
  174. package/scripts/sw-checkpoint.sh +2 -1
  175. package/scripts/sw-ci.sh +15 -6
  176. package/scripts/sw-cleanup.sh +4 -26
  177. package/scripts/sw-code-review.sh +45 -20
  178. package/scripts/sw-connect.sh +2 -1
  179. package/scripts/sw-context.sh +2 -1
  180. package/scripts/sw-cost.sh +107 -5
  181. package/scripts/sw-daemon.sh +71 -11
  182. package/scripts/sw-dashboard.sh +3 -1
  183. package/scripts/sw-db.sh +71 -20
  184. package/scripts/sw-decide.sh +8 -2
  185. package/scripts/sw-decompose.sh +360 -17
  186. package/scripts/sw-deps.sh +4 -1
  187. package/scripts/sw-developer-simulation.sh +4 -1
  188. package/scripts/sw-discovery.sh +378 -5
  189. package/scripts/sw-doc-fleet.sh +4 -1
  190. package/scripts/sw-docs-agent.sh +3 -1
  191. package/scripts/sw-docs.sh +2 -1
  192. package/scripts/sw-doctor.sh +453 -2
  193. package/scripts/sw-dora.sh +4 -1
  194. package/scripts/sw-durable.sh +12 -7
  195. package/scripts/sw-e2e-orchestrator.sh +17 -16
  196. package/scripts/sw-eventbus.sh +13 -4
  197. package/scripts/sw-evidence.sh +364 -12
  198. package/scripts/sw-feedback.sh +550 -9
  199. package/scripts/sw-fix.sh +20 -1
  200. package/scripts/sw-fleet-discover.sh +6 -2
  201. package/scripts/sw-fleet-viz.sh +9 -4
  202. package/scripts/sw-fleet.sh +5 -1
  203. package/scripts/sw-github-app.sh +18 -4
  204. package/scripts/sw-github-checks.sh +3 -2
  205. package/scripts/sw-github-deploy.sh +3 -2
  206. package/scripts/sw-github-graphql.sh +18 -7
  207. package/scripts/sw-guild.sh +5 -1
  208. package/scripts/sw-heartbeat.sh +5 -30
  209. package/scripts/sw-hello.sh +67 -0
  210. package/scripts/sw-hygiene.sh +10 -3
  211. package/scripts/sw-incident.sh +273 -5
  212. package/scripts/sw-init.sh +18 -2
  213. package/scripts/sw-instrument.sh +10 -2
  214. package/scripts/sw-intelligence.sh +44 -7
  215. package/scripts/sw-jira.sh +5 -1
  216. package/scripts/sw-launchd.sh +2 -1
  217. package/scripts/sw-linear.sh +4 -1
  218. package/scripts/sw-logs.sh +4 -1
  219. package/scripts/sw-loop.sh +436 -1076
  220. package/scripts/sw-memory.sh +357 -3
  221. package/scripts/sw-mission-control.sh +6 -1
  222. package/scripts/sw-model-router.sh +483 -27
  223. package/scripts/sw-otel.sh +15 -4
  224. package/scripts/sw-oversight.sh +14 -5
  225. package/scripts/sw-patrol-meta.sh +334 -0
  226. package/scripts/sw-pipeline-composer.sh +7 -1
  227. package/scripts/sw-pipeline-vitals.sh +12 -6
  228. package/scripts/sw-pipeline.sh +54 -2653
  229. package/scripts/sw-pm.sh +16 -8
  230. package/scripts/sw-pr-lifecycle.sh +2 -1
  231. package/scripts/sw-predictive.sh +17 -5
  232. package/scripts/sw-prep.sh +185 -2
  233. package/scripts/sw-ps.sh +5 -25
  234. package/scripts/sw-public-dashboard.sh +17 -4
  235. package/scripts/sw-quality.sh +14 -6
  236. package/scripts/sw-reaper.sh +8 -25
  237. package/scripts/sw-recruit.sh +156 -2303
  238. package/scripts/sw-regression.sh +19 -12
  239. package/scripts/sw-release-manager.sh +3 -1
  240. package/scripts/sw-release.sh +4 -1
  241. package/scripts/sw-remote.sh +3 -1
  242. package/scripts/sw-replay.sh +7 -1
  243. package/scripts/sw-retro.sh +158 -1
  244. package/scripts/sw-review-rerun.sh +3 -1
  245. package/scripts/sw-scale.sh +14 -5
  246. package/scripts/sw-security-audit.sh +6 -1
  247. package/scripts/sw-self-optimize.sh +173 -6
  248. package/scripts/sw-session.sh +9 -3
  249. package/scripts/sw-setup.sh +3 -1
  250. package/scripts/sw-stall-detector.sh +406 -0
  251. package/scripts/sw-standup.sh +15 -7
  252. package/scripts/sw-status.sh +3 -1
  253. package/scripts/sw-strategic.sh +14 -6
  254. package/scripts/sw-stream.sh +13 -4
  255. package/scripts/sw-swarm.sh +20 -7
  256. package/scripts/sw-team-stages.sh +13 -6
  257. package/scripts/sw-templates.sh +7 -31
  258. package/scripts/sw-testgen.sh +17 -6
  259. package/scripts/sw-tmux-pipeline.sh +4 -1
  260. package/scripts/sw-tmux-role-color.sh +2 -0
  261. package/scripts/sw-tmux-status.sh +1 -1
  262. package/scripts/sw-tmux.sh +37 -1
  263. package/scripts/sw-trace.sh +3 -1
  264. package/scripts/sw-tracker-github.sh +3 -0
  265. package/scripts/sw-tracker-jira.sh +3 -0
  266. package/scripts/sw-tracker-linear.sh +3 -0
  267. package/scripts/sw-tracker.sh +3 -1
  268. package/scripts/sw-triage.sh +3 -2
  269. package/scripts/sw-upgrade.sh +3 -1
  270. package/scripts/sw-ux.sh +5 -2
  271. package/scripts/sw-webhook.sh +5 -2
  272. package/scripts/sw-widgets.sh +9 -4
  273. package/scripts/sw-worktree.sh +15 -3
  274. package/scripts/test-skill-injection.sh +1233 -0
  275. package/templates/pipelines/autonomous.json +27 -3
  276. package/templates/pipelines/cost-aware.json +34 -8
  277. package/templates/pipelines/deployed.json +12 -0
  278. package/templates/pipelines/enterprise.json +12 -0
  279. package/templates/pipelines/fast.json +6 -0
  280. package/templates/pipelines/full.json +27 -3
  281. package/templates/pipelines/hotfix.json +6 -0
  282. package/templates/pipelines/standard.json +12 -0
  283. package/templates/pipelines/tdd.json +12 -0
@@ -1,4 +1,5 @@
1
1
  #!/usr/bin/env bash
2
+ # shellcheck disable=SC2034 # config vars used by sourced scripts and subshells
2
3
  # ╔═══════════════════════════════════════════════════════════════════════════╗
3
4
  # ║ shipwright pipeline — Autonomous Feature Delivery (Idea → Production) ║
4
5
  # ║ Full GitHub integration · Auto-detection · Task tracking · Metrics ║
@@ -10,8 +11,9 @@ trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
10
11
  unset CLAUDECODE 2>/dev/null || true
11
12
  # Ignore SIGHUP so tmux attach/detach doesn't kill long-running plan/design/review stages
12
13
  trap '' HUP
14
+ trap '' SIGPIPE
13
15
 
14
- VERSION="3.1.0"
16
+ VERSION="3.3.0"
15
17
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
16
18
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
17
19
 
@@ -39,12 +41,21 @@ fi
39
41
  [[ -f "$SCRIPT_DIR/lib/pipeline-github.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-github.sh"
40
42
  # shellcheck source=lib/pipeline-detection.sh
41
43
  [[ -f "$SCRIPT_DIR/lib/pipeline-detection.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-detection.sh"
44
+ # Adaptive Stage Timeout Engine (optional)
45
+ # shellcheck source=lib/adaptive-timeout.sh
46
+ [[ -f "$SCRIPT_DIR/lib/adaptive-timeout.sh" ]] && source "$SCRIPT_DIR/lib/adaptive-timeout.sh" 2>/dev/null || true
42
47
  # shellcheck source=lib/pipeline-quality-checks.sh
43
48
  [[ -f "$SCRIPT_DIR/lib/pipeline-quality-checks.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-quality-checks.sh"
44
49
  # shellcheck source=lib/pipeline-intelligence.sh
45
50
  [[ -f "$SCRIPT_DIR/lib/pipeline-intelligence.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-intelligence.sh"
46
51
  # shellcheck source=lib/pipeline-stages.sh
47
52
  [[ -f "$SCRIPT_DIR/lib/pipeline-stages.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-stages.sh"
53
+ # Audit trail for compliance-grade pipeline traceability
54
+ # shellcheck source=lib/audit-trail.sh
55
+ [[ -f "$SCRIPT_DIR/lib/audit-trail.sh" ]] && source "$SCRIPT_DIR/lib/audit-trail.sh" 2>/dev/null || true
56
+ # Root cause classifier for failure analysis and platform issue auto-creation
57
+ # shellcheck source=lib/root-cause.sh
58
+ [[ -f "$SCRIPT_DIR/lib/root-cause.sh" ]] && source "$SCRIPT_DIR/lib/root-cause.sh" 2>/dev/null || true
48
59
  PIPELINE_COVERAGE_THRESHOLD="${PIPELINE_COVERAGE_THRESHOLD:-60}"
49
60
  PIPELINE_QUALITY_GATE_THRESHOLD="${PIPELINE_QUALITY_GATE_THRESHOLD:-70}"
50
61
 
@@ -91,14 +102,25 @@ fi
91
102
  if [[ -f "$SCRIPT_DIR/sw-durable.sh" ]]; then
92
103
  source "$SCRIPT_DIR/sw-durable.sh"
93
104
  fi
94
- # shellcheck source=sw-db.sh — for db_save_checkpoint/db_load_checkpoint (durable workflows)
105
+ # shellcheck source=sw-db.sh
106
+ # for db_save_checkpoint/db_load_checkpoint (durable workflows)
95
107
  [[ -f "$SCRIPT_DIR/sw-db.sh" ]] && source "$SCRIPT_DIR/sw-db.sh"
96
108
  # Ensure DB schema exists so emit_event → db_add_event can write rows (CREATE IF NOT EXISTS is idempotent)
97
109
  if type init_schema >/dev/null 2>&1 && type check_sqlite3 >/dev/null 2>&1 && check_sqlite3 2>/dev/null; then
98
110
  init_schema 2>/dev/null || true
99
111
  fi
100
- # shellcheck source=sw-cost.sh — for cost_record persistence to costs.json + DB
112
+ # shellcheck source=sw-cost.sh
113
+ # for cost_record persistence to costs.json + DB
101
114
  [[ -f "$SCRIPT_DIR/sw-cost.sh" ]] && source "$SCRIPT_DIR/sw-cost.sh"
115
+ # shellcheck source=lib/cost-optimizer.sh
116
+ # for dynamic cost-performance pipeline optimization (budget checks, reductions, burst mode)
117
+ [[ -f "$SCRIPT_DIR/lib/cost-optimizer.sh" ]] && source "$SCRIPT_DIR/lib/cost-optimizer.sh"
118
+ # shellcheck source=lib/skill-registry.sh
119
+ # for skill_analyze_outcome (AI outcome learning)
120
+ [[ -f "$SCRIPT_DIR/lib/skill-registry.sh" ]] && source "$SCRIPT_DIR/lib/skill-registry.sh"
121
+ # shellcheck source=lib/skill-memory.sh
122
+ # for skill memory operations
123
+ [[ -f "$SCRIPT_DIR/lib/skill-memory.sh" ]] && source "$SCRIPT_DIR/lib/skill-memory.sh"
102
124
 
103
125
  # ─── GitHub API Modules (optional) ─────────────────────────────────────────
104
126
  # shellcheck source=sw-github-graphql.sh
@@ -108,144 +130,15 @@ fi
108
130
  # shellcheck source=sw-github-deploy.sh
109
131
  [[ -f "$SCRIPT_DIR/sw-github-deploy.sh" ]] && source "$SCRIPT_DIR/sw-github-deploy.sh"
110
132
 
111
- # Parse coverage percentage from test output — multi-framework patterns
112
- # Usage: parse_coverage_from_output <log_file>
113
- # Outputs coverage percentage or empty string
114
- parse_coverage_from_output() {
115
- local log_file="$1"
116
- [[ ! -f "$log_file" ]] && return
117
- local cov=""
118
- # Jest/Istanbul: "Statements : 85.5%"
119
- cov=$(grep -oE 'Statements\s*:\s*[0-9.]+' "$log_file" 2>/dev/null | grep -oE '[0-9.]+$' || true)
120
- # Istanbul table: "All files | 85.5"
121
- [[ -z "$cov" ]] && cov=$(grep -oE 'All files\s*\|\s*[0-9.]+' "$log_file" 2>/dev/null | grep -oE '[0-9.]+$' || true)
122
- # pytest-cov: "TOTAL 500 75 85%"
123
- [[ -z "$cov" ]] && cov=$(grep -oE 'TOTAL\s+[0-9]+\s+[0-9]+\s+[0-9]+%' "$log_file" 2>/dev/null | grep -oE '[0-9]+%' | tr -d '%' | tail -1 || true)
124
- # Vitest: "All files | 85.5 |"
125
- [[ -z "$cov" ]] && cov=$(grep -oE 'All files\s*\|\s*[0-9.]+\s*\|' "$log_file" 2>/dev/null | grep -oE '[0-9.]+' | head -1 || true)
126
- # Go coverage: "coverage: 85.5% of statements"
127
- [[ -z "$cov" ]] && cov=$(grep -oE 'coverage:\s*[0-9.]+%' "$log_file" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
128
- # Cargo tarpaulin: "85.50% coverage"
129
- [[ -z "$cov" ]] && cov=$(grep -oE '[0-9.]+%\s*coverage' "$log_file" 2>/dev/null | grep -oE '[0-9.]+' | head -1 || true)
130
- # Generic: "Coverage: 85.5%"
131
- [[ -z "$cov" ]] && cov=$(grep -oiE 'coverage:?\s*[0-9.]+%' "$log_file" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
132
- echo "$cov"
133
- }
134
-
135
- format_duration() {
136
- local secs="$1"
137
- if [[ "$secs" -ge 3600 ]]; then
138
- printf "%dh %dm %ds" $((secs/3600)) $((secs%3600/60)) $((secs%60))
139
- elif [[ "$secs" -ge 60 ]]; then
140
- printf "%dm %ds" $((secs/60)) $((secs%60))
141
- else
142
- printf "%ds" "$secs"
143
- fi
144
- }
145
-
146
- # Rotate event log if needed (standalone mode — daemon has its own rotation in poll loop)
147
- rotate_event_log_if_needed() {
148
- local events_file="${EVENTS_FILE:-$HOME/.shipwright/events.jsonl}"
149
- local max_lines=10000
150
- [[ ! -f "$events_file" ]] && return
151
- local lines
152
- lines=$(wc -l < "$events_file" 2>/dev/null || echo "0")
153
- if [[ "$lines" -gt "$max_lines" ]]; then
154
- local tmp="${events_file}.rotating"
155
- if tail -5000 "$events_file" > "$tmp" 2>/dev/null && mv "$tmp" "$events_file" 2>/dev/null; then
156
- info "Rotated events.jsonl: ${lines} -> 5000 lines"
157
- fi
158
- fi
159
- }
160
-
161
- _pipeline_compact_goal() {
162
- local goal="$1"
163
- local plan_file="${2:-}"
164
- local design_file="${3:-}"
165
- local compact="$goal"
166
-
167
- # Include plan summary (first 20 lines only)
168
- if [[ -n "$plan_file" && -f "$plan_file" ]]; then
169
- compact="${compact}
170
-
171
- ## Plan Summary
172
- $(head -20 "$plan_file" 2>/dev/null || true)
173
- [... full plan in .claude/pipeline-artifacts/plan.md]"
174
- fi
175
-
176
- # Include design key decisions only (grep for headers)
177
- if [[ -n "$design_file" && -f "$design_file" ]]; then
178
- compact="${compact}
179
-
180
- ## Key Design Decisions
181
- $(grep -E '^#{1,3} ' "$design_file" 2>/dev/null | head -10 || true)
182
- [... full design in .claude/pipeline-artifacts/design.md]"
183
- fi
184
-
185
- echo "$compact"
186
- }
187
-
188
- load_composed_pipeline() {
189
- local spec_file="$1"
190
- [[ ! -f "$spec_file" ]] && return 1
191
-
192
- # Read enabled stages from composed spec
193
- local composed_stages
194
- composed_stages=$(jq -r '.stages // [] | .[] | .id' "$spec_file" 2>/dev/null) || return 1
195
- [[ -z "$composed_stages" ]] && return 1
196
-
197
- # Override enabled stages
198
- COMPOSED_STAGES="$composed_stages"
199
-
200
- # Override per-stage settings
201
- local build_max
202
- build_max=$(jq -r '.stages[] | select(.id=="build") | .max_iterations // ""' "$spec_file" 2>/dev/null) || true
203
- [[ -n "$build_max" && "$build_max" != "null" ]] && COMPOSED_BUILD_ITERATIONS="$build_max"
204
-
205
- emit_event "pipeline.composed_loaded" "stages=$(echo "$composed_stages" | wc -l | tr -d ' ')"
206
- return 0
207
- }
208
-
209
- # ─── Token / Cost Parsing ─────────────────────────────────────────────────
210
- parse_claude_tokens() {
211
- local log_file="$1"
212
- local input_tok output_tok
213
- input_tok=$(grep -oE 'input[_ ]tokens?[: ]+[0-9,]+' "$log_file" 2>/dev/null | tail -1 | grep -oE '[0-9,]+' | tr -d ',' || echo "0")
214
- output_tok=$(grep -oE 'output[_ ]tokens?[: ]+[0-9,]+' "$log_file" 2>/dev/null | tail -1 | grep -oE '[0-9,]+' | tr -d ',' || echo "0")
215
-
216
- TOTAL_INPUT_TOKENS=$(( TOTAL_INPUT_TOKENS + ${input_tok:-0} ))
217
- TOTAL_OUTPUT_TOKENS=$(( TOTAL_OUTPUT_TOKENS + ${output_tok:-0} ))
218
- }
219
-
220
- # Estimate pipeline cost using historical averages from completed pipelines.
221
- # Falls back to per-stage estimates when no history exists.
222
- estimate_pipeline_cost() {
223
- local stages="$1"
224
- local stage_count
225
- stage_count=$(echo "$stages" | jq 'length' 2>/dev/null || echo "6")
226
- [[ ! "$stage_count" =~ ^[0-9]+$ ]] && stage_count=6
227
-
228
- local events_file="${EVENTS_FILE:-$HOME/.shipwright/events.jsonl}"
229
- local avg_input=0 avg_output=0
230
- if [[ -f "$events_file" ]]; then
231
- local hist
232
- hist=$(grep '"type":"pipeline.completed"' "$events_file" 2>/dev/null | tail -10)
233
- if [[ -n "$hist" ]]; then
234
- avg_input=$(echo "$hist" | jq -s -r '[.[] | .input_tokens // 0 | tonumber] | if length > 0 then (add / length | floor | tostring) else "0" end' 2>/dev/null | head -1)
235
- avg_output=$(echo "$hist" | jq -s -r '[.[] | .output_tokens // 0 | tonumber] | if length > 0 then (add / length | floor | tostring) else "0" end' 2>/dev/null | head -1)
236
- fi
237
- fi
238
- [[ ! "$avg_input" =~ ^[0-9]+$ ]] && avg_input=0
239
- [[ ! "$avg_output" =~ ^[0-9]+$ ]] && avg_output=0
240
-
241
- # Fall back to reasonable per-stage estimates only if no history
242
- if [[ "$avg_input" -eq 0 ]]; then
243
- avg_input=$(( stage_count * 8000 )) # More realistic: ~8K input per stage
244
- avg_output=$(( stage_count * 4000 )) # ~4K output per stage
245
- fi
246
-
247
- echo "{\"input_tokens\":${avg_input},\"output_tokens\":${avg_output}}"
248
- }
133
+ # ─── Pipeline Decomposed Modules ────────────────────────────────────────────
134
+ # shellcheck source=lib/pipeline-cli.sh
135
+ [[ -f "$SCRIPT_DIR/lib/pipeline-cli.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-cli.sh"
136
+ # shellcheck source=lib/pipeline-util.sh
137
+ [[ -f "$SCRIPT_DIR/lib/pipeline-util.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-util.sh"
138
+ # shellcheck source=lib/pipeline-execution.sh
139
+ [[ -f "$SCRIPT_DIR/lib/pipeline-execution.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-execution.sh"
140
+ # shellcheck source=lib/pipeline-commands.sh
141
+ [[ -f "$SCRIPT_DIR/lib/pipeline-commands.sh" ]] && source "$SCRIPT_DIR/lib/pipeline-commands.sh"
249
142
 
250
143
  # ─── Defaults ───────────────────────────────────────────────────────────────
251
144
  GOAL=""
@@ -282,6 +175,8 @@ ORIGINAL_REPO_DIR=""
282
175
  REPO_OVERRIDE=""
283
176
  _cleanup_done=""
284
177
  PIPELINE_EXIT_CODE=1 # assume failure until run_pipeline succeeds
178
+ EFFORT_LEVEL_OVERRIDE="${SW_EFFORT_LEVEL:-}"
179
+ PIPELINE_FALLBACK_MODEL="${SW_FALLBACK_MODEL:-sonnet}"
285
180
 
286
181
  # GitHub metadata (populated during intake)
287
182
  ISSUE_LABELS=""
@@ -307,157 +202,33 @@ STATE_FILE=""
307
202
  ARTIFACTS_DIR=""
308
203
  TASKS_FILE=""
309
204
 
310
- # ─── Help ───────────────────────────────────────────────────────────────────
205
+ CURRENT_STAGE_ID=""
206
+
207
+ # Notification / webhook
208
+ SLACK_WEBHOOK=""
209
+ NOTIFICATION_ENABLED=false
210
+
211
+ # Placeholder to accumulate input/output tokens from all pipeline stages
212
+ TOTAL_INPUT_TOKENS=0
213
+ TOTAL_OUTPUT_TOKENS=0
311
214
 
312
- show_help() {
313
- echo -e "${CYAN}${BOLD}shipwright pipeline${RESET} — Autonomous Feature Delivery"
314
- echo ""
315
- echo -e "${BOLD}USAGE${RESET}"
316
- echo -e " ${CYAN}shipwright pipeline${RESET} <command> [options]"
317
- echo ""
318
- echo -e "${BOLD}COMMANDS${RESET}"
319
- echo -e " ${CYAN}start${RESET} --goal \"...\" Start a new pipeline"
320
- echo -e " ${CYAN}resume${RESET} Continue from last completed stage"
321
- echo -e " ${CYAN}status${RESET} Show pipeline progress dashboard"
322
- echo -e " ${CYAN}abort${RESET} Stop pipeline and mark aborted"
323
- echo -e " ${CYAN}list${RESET} Show available pipeline templates"
324
- echo -e " ${CYAN}show${RESET} <name> Display pipeline stages"
325
- echo ""
326
- echo -e "${BOLD}START OPTIONS${RESET}"
327
- echo -e " ${DIM}--goal \"description\"${RESET} What to build (required unless --issue)"
328
- echo -e " ${DIM}--issue <number>${RESET} Fetch goal from GitHub issue"
329
- echo -e " ${DIM}--repo <path>${RESET} Change to directory before running (must be a git repo)"
330
- echo -e " ${DIM}--local${RESET} Alias for --no-github --no-github-label (local-only mode)"
331
- echo -e " ${DIM}--pipeline <name>${RESET} Pipeline template (default: standard)"
332
- echo -e " ${DIM}--test-cmd \"command\"${RESET} Override test command (auto-detected if omitted)"
333
- echo -e " ${DIM}--model <model>${RESET} Override AI model (opus, sonnet, haiku)"
334
- echo -e " ${DIM}--agents <n>${RESET} Override agent count"
335
- echo -e " ${DIM}--skip-gates${RESET} Auto-approve all gates (fully autonomous)"
336
- echo -e " ${DIM}--headless${RESET} Full headless mode (skip gates, no prompts)"
337
- echo -e " ${DIM}--base <branch>${RESET} Base branch for PR (default: main)"
338
- echo -e " ${DIM}--reviewers \"a,b\"${RESET} Request PR reviewers (auto-detected if omitted)"
339
- echo -e " ${DIM}--labels \"a,b\"${RESET} Add labels to PR (inherited from issue if omitted)"
340
- echo -e " ${DIM}--no-github${RESET} Disable GitHub integration"
341
- echo -e " ${DIM}--no-github-label${RESET} Don't modify issue labels"
342
- echo -e " ${DIM}--ci${RESET} CI mode (skip gates, non-interactive)"
343
- echo -e " ${DIM}--ignore-budget${RESET} Skip budget enforcement checks"
344
- echo -e " ${DIM}--worktree [=name]${RESET} Run in isolated git worktree (parallel-safe)"
345
- echo -e " ${DIM}--dry-run${RESET} Show what would happen without executing"
346
- echo -e " ${DIM}--slack-webhook <url>${RESET} Send notifications to Slack"
347
- echo -e " ${DIM}--self-heal <n>${RESET} Build→test retry cycles on failure (default: 2)"
348
- echo -e " ${DIM}--max-iterations <n>${RESET} Override max build loop iterations"
349
- echo -e " ${DIM}--max-restarts <n>${RESET} Max session restarts in build loop"
350
- echo -e " ${DIM}--fast-test-cmd <cmd>${RESET} Fast/subset test for build loop"
351
- echo -e " ${DIM}--tdd${RESET} Test-first: generate tests before implementation"
352
- echo -e " ${DIM}--completed-stages \"a,b\"${RESET} Skip these stages (CI resume)"
353
- echo ""
354
- echo -e "${BOLD}STAGES${RESET} ${DIM}(configurable per pipeline template)${RESET}"
355
- echo -e " intake → plan → design → build → test → review → pr → deploy → validate → monitor"
356
- echo ""
357
- echo -e "${BOLD}GITHUB INTEGRATION${RESET} ${DIM}(automatic when gh CLI available)${RESET}"
358
- echo -e " • Issue intake: fetch metadata, labels, milestone, self-assign"
359
- echo -e " • Progress tracking: live updates posted as issue comments"
360
- echo -e " • Task checklist: plan posted as checkbox list on issue"
361
- echo -e " • PR creation: labels, milestone, reviewers auto-propagated"
362
- echo -e " • Issue lifecycle: labeled in-progress → closed on completion"
363
- echo ""
364
- echo -e "${BOLD}SELF-HEALING${RESET} ${DIM}(autonomous error recovery)${RESET}"
365
- echo -e " • Build→test feedback loop: failures feed back as build context"
366
- echo -e " • Configurable retry cycles (--self-heal N, default: 2)"
367
- echo -e " • Auto-rebase before PR: handles base branch drift"
368
- echo -e " • Signal-safe: Ctrl+C saves state for clean resume"
369
- echo -e " • Git stash/restore: protects uncommitted work"
370
- echo ""
371
- echo -e "${BOLD}AUTO-DETECTION${RESET} ${DIM}(zero-config for common setups)${RESET}"
372
- echo -e " • Test command: package.json, Makefile, Cargo.toml, go.mod, etc."
373
- echo -e " • Branch prefix: feat/, fix/, refactor/ based on task type"
374
- echo -e " • Reviewers: from CODEOWNERS or recent git contributors"
375
- echo -e " • Project type: language and framework detection"
376
- echo ""
377
- echo -e "${BOLD}NOTIFICATIONS${RESET} ${DIM}(team awareness)${RESET}"
378
- echo -e " • Slack: --slack-webhook <url>"
379
- echo -e " • Custom webhook: set SHIPWRIGHT_WEBHOOK_URL env var"
380
- echo -e " • Events: start, stage complete, failure, self-heal, done"
381
- echo ""
382
- echo -e "${BOLD}EXAMPLES${RESET}"
383
- echo -e " ${DIM}# From GitHub issue (fully autonomous)${RESET}"
384
- echo -e " ${DIM}shipwright pipeline start --issue 123 --skip-gates${RESET}"
385
- echo ""
386
- echo -e " ${DIM}# From inline goal${RESET}"
387
- echo -e " ${DIM}shipwright pipeline start --goal \"Add JWT authentication\"${RESET}"
388
- echo ""
389
- echo -e " ${DIM}# Hotfix with custom test command${RESET}"
390
- echo -e " ${DIM}shipwright pipeline start --issue 456 --pipeline hotfix --test-cmd \"pytest\"${RESET}"
391
- echo ""
392
- echo -e " ${DIM}# Full deployment pipeline with 3 agents${RESET}"
393
- echo -e " ${DIM}shipwright pipeline start --goal \"Build payment flow\" --pipeline full --agents 3${RESET}"
394
- echo ""
395
- echo -e " ${DIM}# Parallel pipeline in isolated worktree${RESET}"
396
- echo -e " ${DIM}shipwright pipeline start --issue 42 --worktree${RESET}"
397
- echo ""
398
- echo -e " ${DIM}# Resume / monitor / abort${RESET}"
399
- echo -e " ${DIM}shipwright pipeline resume${RESET}"
400
- echo -e " ${DIM}shipwright pipeline status${RESET}"
401
- echo -e " ${DIM}shipwright pipeline abort${RESET}"
402
- echo ""
403
- }
215
+ # Build-test retry limit (configurable via --self-heal)
216
+ BUILD_TEST_RETRIES="${BUILD_TEST_RETRIES:-2}"
404
217
 
405
- # ─── Argument Parsing ───────────────────────────────────────────────────────
218
+ # TDD mode flag (enable via --tdd or pipeline template)
219
+ TDD_ENABLED=false
220
+ PIPELINE_TDD=false
406
221
 
222
+ # ─── Argument Parsing (BEFORE other setup) ─────────────────────────────────
407
223
  SUBCOMMAND="${1:-help}"
408
224
  shift 2>/dev/null || true
409
225
 
410
- parse_args() {
411
- while [[ $# -gt 0 ]]; do
412
- case "$1" in
413
- --goal) GOAL="$2"; shift 2 ;;
414
- --issue) ISSUE_NUMBER="$2"; shift 2 ;;
415
- --repo) REPO_OVERRIDE="$2"; shift 2 ;;
416
- --local) NO_GITHUB=true; NO_GITHUB_LABEL=true; shift ;;
417
- --pipeline|--template) PIPELINE_NAME="$2"; shift 2 ;;
418
- --test-cmd) TEST_CMD="$2"; shift 2 ;;
419
- --model) MODEL="$2"; shift 2 ;;
420
- --agents) AGENTS="$2"; shift 2 ;;
421
- --skip-gates) SKIP_GATES=true; shift ;;
422
- --headless) HEADLESS=true; SKIP_GATES=true; shift ;;
423
- --base) BASE_BRANCH="$2"; shift 2 ;;
424
- --reviewers) REVIEWERS="$2"; shift 2 ;;
425
- --labels) LABELS="$2"; shift 2 ;;
426
- --no-github) NO_GITHUB=true; shift ;;
427
- --no-github-label) NO_GITHUB_LABEL=true; shift ;;
428
- --ci) CI_MODE=true; SKIP_GATES=true; shift ;;
429
- --ignore-budget) IGNORE_BUDGET=true; shift ;;
430
- --max-iterations) MAX_ITERATIONS_OVERRIDE="$2"; shift 2 ;;
431
- --completed-stages) COMPLETED_STAGES="$2"; shift 2 ;;
432
- --resume) RESUME_FROM_CHECKPOINT=true; shift ;;
433
- --worktree=*) AUTO_WORKTREE=true; WORKTREE_NAME="${1#--worktree=}"; WORKTREE_NAME="${WORKTREE_NAME//[^a-zA-Z0-9_-]/}"; if [[ -z "$WORKTREE_NAME" ]]; then error "Invalid worktree name (alphanumeric, hyphens, underscores only)"; exit 1; fi; shift ;;
434
- --worktree) AUTO_WORKTREE=true; shift ;;
435
- --dry-run) DRY_RUN=true; shift ;;
436
- --slack-webhook) SLACK_WEBHOOK="$2"; shift 2 ;;
437
- --self-heal) BUILD_TEST_RETRIES="${2:-3}"; shift 2 ;;
438
- --max-restarts)
439
- MAX_RESTARTS_OVERRIDE="$2"
440
- if ! [[ "$MAX_RESTARTS_OVERRIDE" =~ ^[0-9]+$ ]]; then
441
- error "--max-restarts must be numeric (got: $MAX_RESTARTS_OVERRIDE)"
442
- exit 1
443
- fi
444
- shift 2 ;;
445
-
446
- --fast-test-cmd) FAST_TEST_CMD_OVERRIDE="$2"; shift 2 ;;
447
- --tdd) TDD_ENABLED=true; shift ;;
448
- --help|-h) show_help; exit 0 ;;
449
- *)
450
- if [[ -z "$PIPELINE_NAME_ARG" ]]; then
451
- PIPELINE_NAME_ARG="$1"
452
- fi
453
- shift ;;
454
- esac
455
- done
456
- }
457
-
458
226
  PIPELINE_NAME_ARG=""
459
227
  parse_args "$@"
460
228
 
229
+ # Export effort and fallback variables so subprocesses can access them
230
+ export EFFORT_LEVEL_OVERRIDE PIPELINE_FALLBACK_MODEL
231
+
461
232
  # ─── Non-Interactive Detection ──────────────────────────────────────────────
462
233
  # When stdin is not a terminal (background, pipe, nohup, tmux send-keys),
463
234
  # auto-enable headless mode to prevent read prompts from killing the script.
@@ -472,2376 +243,6 @@ if [[ "$AUTO_WORKTREE" == "true" && "$SKIP_GATES" != "true" && ! -t 0 ]]; then
472
243
  SKIP_GATES=true
473
244
  fi
474
245
 
475
- # ─── Directory Setup ────────────────────────────────────────────────────────
476
-
477
- setup_dirs() {
478
- PROJECT_ROOT="$(git rev-parse --show-toplevel 2>/dev/null || pwd)"
479
- STATE_DIR="$PROJECT_ROOT/.claude"
480
- STATE_FILE="$STATE_DIR/pipeline-state.md"
481
- ARTIFACTS_DIR="$STATE_DIR/pipeline-artifacts"
482
- TASKS_FILE="$STATE_DIR/pipeline-tasks.md"
483
- mkdir -p "$STATE_DIR" "$ARTIFACTS_DIR"
484
- export SHIPWRIGHT_PIPELINE_ID="pipeline-$$-${ISSUE_NUMBER:-0}"
485
- }
486
-
487
- # ─── Pipeline Config Loading ───────────────────────────────────────────────
488
-
489
- find_pipeline_config() {
490
- local name="$1"
491
- local locations=(
492
- "$REPO_DIR/templates/pipelines/${name}.json"
493
- "${PROJECT_ROOT:-}/templates/pipelines/${name}.json"
494
- "$HOME/.shipwright/pipelines/${name}.json"
495
- )
496
- for loc in "${locations[@]}"; do
497
- if [[ -n "$loc" && -f "$loc" ]]; then
498
- echo "$loc"
499
- return 0
500
- fi
501
- done
502
- return 1
503
- }
504
-
505
- load_pipeline_config() {
506
- # Check for intelligence-composed pipeline first
507
- local composed_pipeline="${ARTIFACTS_DIR}/composed-pipeline.json"
508
- if [[ -f "$composed_pipeline" ]] && type composer_validate_pipeline >/dev/null 2>&1; then
509
- # Use composed pipeline if fresh (< 1 hour old)
510
- local composed_age=99999
511
- local composed_mtime
512
- composed_mtime=$(file_mtime "$composed_pipeline")
513
- if [[ "$composed_mtime" -gt 0 ]]; then
514
- composed_age=$(( $(now_epoch) - composed_mtime ))
515
- fi
516
- if [[ "$composed_age" -lt 3600 ]]; then
517
- local validate_json
518
- validate_json=$(cat "$composed_pipeline" 2>/dev/null || echo "")
519
- if [[ -n "$validate_json" ]] && composer_validate_pipeline "$validate_json" 2>/dev/null; then
520
- PIPELINE_CONFIG="$composed_pipeline"
521
- info "Pipeline: ${BOLD}composed${RESET} ${DIM}(intelligence-driven)${RESET}"
522
- emit_event "pipeline.composed_loaded" "issue=${ISSUE_NUMBER:-0}"
523
- return
524
- fi
525
- fi
526
- fi
527
-
528
- PIPELINE_CONFIG=$(find_pipeline_config "$PIPELINE_NAME") || {
529
- error "Pipeline template not found: $PIPELINE_NAME"
530
- echo -e " Available templates: ${DIM}shipwright pipeline list${RESET}"
531
- exit 1
532
- }
533
- info "Pipeline: ${BOLD}$PIPELINE_NAME${RESET} ${DIM}($PIPELINE_CONFIG)${RESET}"
534
- # TDD from template (overridable by --tdd)
535
- [[ "$(jq -r '.tdd // false' "$PIPELINE_CONFIG" 2>/dev/null)" == "true" ]] && PIPELINE_TDD=true
536
- return 0
537
- }
538
-
539
- CURRENT_STAGE_ID=""
540
-
541
- # Notification / webhook
542
- SLACK_WEBHOOK=""
543
- NOTIFICATION_ENABLED=false
544
-
545
- # Self-healing
546
- BUILD_TEST_RETRIES=$(_config_get_int "pipeline.build_test_retries" 3 2>/dev/null || echo 3)
547
- STASHED_CHANGES=false
548
- SELF_HEAL_COUNT=0
549
-
550
- # ─── Cost Tracking ───────────────────────────────────────────────────────
551
- TOTAL_INPUT_TOKENS=0
552
- TOTAL_OUTPUT_TOKENS=0
553
- COST_MODEL_RATES='{"opus":{"input":15,"output":75},"sonnet":{"input":3,"output":15},"haiku":{"input":0.25,"output":1.25}}'
554
-
555
- # ─── Heartbeat ────────────────────────────────────────────────────────────────
556
- HEARTBEAT_PID=""
557
-
558
- start_heartbeat() {
559
- local job_id="${PIPELINE_NAME:-pipeline-$$}"
560
- (
561
- while true; do
562
- "$SCRIPT_DIR/sw-heartbeat.sh" write "$job_id" \
563
- --pid $$ \
564
- --issue "${ISSUE_NUMBER:-0}" \
565
- --stage "${CURRENT_STAGE_ID:-unknown}" \
566
- --iteration "0" \
567
- --activity "$(get_stage_description "${CURRENT_STAGE_ID:-}" 2>/dev/null || echo "Running pipeline")" 2>/dev/null || true
568
- sleep "$(_config_get_int "pipeline.heartbeat_interval" 30 2>/dev/null || echo 30)"
569
- done
570
- ) >/dev/null 2>&1 &
571
- HEARTBEAT_PID=$!
572
- }
573
-
574
- stop_heartbeat() {
575
- if [[ -n "${HEARTBEAT_PID:-}" ]]; then
576
- kill "$HEARTBEAT_PID" 2>/dev/null || true
577
- wait "$HEARTBEAT_PID" 2>/dev/null || true
578
- "$SCRIPT_DIR/sw-heartbeat.sh" clear "${PIPELINE_NAME:-pipeline-$$}" 2>/dev/null || true
579
- HEARTBEAT_PID=""
580
- fi
581
- }
582
-
583
- # ─── CI Helpers ───────────────────────────────────────────────────────────
584
-
585
- ci_push_partial_work() {
586
- [[ "${CI_MODE:-false}" != "true" ]] && return 0
587
- [[ -z "${ISSUE_NUMBER:-}" ]] && return 0
588
-
589
- local branch="shipwright/issue-${ISSUE_NUMBER}"
590
-
591
- # Only push if we have uncommitted changes
592
- if ! git diff --quiet 2>/dev/null || ! git diff --cached --quiet 2>/dev/null; then
593
- git add -A 2>/dev/null || true
594
- git commit -m "WIP: partial pipeline progress for #${ISSUE_NUMBER}" --no-verify 2>/dev/null || true
595
- fi
596
-
597
- # Push branch (create if needed, force to overwrite previous WIP)
598
- if ! git push origin "HEAD:refs/heads/$branch" --force 2>/dev/null; then
599
- warn "git push failed for $branch — remote may be out of sync"
600
- emit_event "pipeline.push_failed" "branch=$branch"
601
- fi
602
- }
603
-
604
- ci_post_stage_event() {
605
- [[ "${CI_MODE:-false}" != "true" ]] && return 0
606
- [[ -z "${ISSUE_NUMBER:-}" ]] && return 0
607
- [[ "${GH_AVAILABLE:-false}" != "true" ]] && return 0
608
-
609
- local stage="$1" status="$2" elapsed="${3:-0s}"
610
- local comment="<!-- SHIPWRIGHT-STAGE: ${stage}:${status}:${elapsed} -->"
611
- _timeout "$(_config_get_int "network.gh_timeout" 30 2>/dev/null || echo 30)" gh issue comment "$ISSUE_NUMBER" --body "$comment" 2>/dev/null || true
612
- }
613
-
614
- # ─── Signal Handling ───────────────────────────────────────────────────────
615
-
616
- cleanup_on_exit() {
617
- [[ "${_cleanup_done:-}" == "true" ]] && return 0
618
- _cleanup_done=true
619
- local exit_code=$?
620
-
621
- # Stop heartbeat writer
622
- stop_heartbeat
623
-
624
- # Save state if we were running
625
- if [[ "$PIPELINE_STATUS" == "running" && -n "$STATE_FILE" ]]; then
626
- PIPELINE_STATUS="interrupted"
627
- UPDATED_AT="$(now_iso)"
628
- write_state 2>/dev/null || true
629
- echo ""
630
- warn "Pipeline interrupted — state saved."
631
- echo -e " Resume: ${DIM}shipwright pipeline resume${RESET}"
632
-
633
- # Push partial work in CI mode so retries can pick it up
634
- ci_push_partial_work
635
- fi
636
-
637
- # Restore stashed changes
638
- if [[ "$STASHED_CHANGES" == "true" ]]; then
639
- git stash pop --quiet 2>/dev/null || true
640
- fi
641
-
642
- # Release durable pipeline lock
643
- if [[ -n "${_PIPELINE_LOCK_ID:-}" ]] && type release_lock >/dev/null 2>&1; then
644
- release_lock "$_PIPELINE_LOCK_ID" 2>/dev/null || true
645
- fi
646
-
647
- # Cancel lingering in_progress GitHub Check Runs
648
- pipeline_cancel_check_runs 2>/dev/null || true
649
-
650
- # Update GitHub
651
- if [[ -n "${ISSUE_NUMBER:-}" && "${GH_AVAILABLE:-false}" == "true" ]]; then
652
- if ! _timeout "$(_config_get_int "network.gh_timeout" 30 2>/dev/null || echo 30)" gh issue comment "$ISSUE_NUMBER" --body "⏸️ **Pipeline interrupted** at stage: ${CURRENT_STAGE_ID:-unknown}" 2>/dev/null; then
653
- warn "gh issue comment failed — status update may not have been posted"
654
- emit_event "pipeline.comment_failed" "issue=$ISSUE_NUMBER"
655
- fi
656
- fi
657
-
658
- exit "$exit_code"
659
- }
660
-
661
- trap cleanup_on_exit SIGINT SIGTERM
662
-
663
- # ─── Pre-flight Validation ─────────────────────────────────────────────────
664
-
665
- preflight_checks() {
666
- local errors=0
667
-
668
- echo -e "${PURPLE}${BOLD}━━━ Pre-flight Checks ━━━${RESET}"
669
- echo ""
670
-
671
- # 1. Required tools
672
- local required_tools=("git" "jq")
673
- local optional_tools=("gh" "claude" "bc" "curl")
674
-
675
- for tool in "${required_tools[@]}"; do
676
- if command -v "$tool" >/dev/null 2>&1; then
677
- echo -e " ${GREEN}✓${RESET} $tool"
678
- else
679
- echo -e " ${RED}✗${RESET} $tool ${RED}(required)${RESET}"
680
- errors=$((errors + 1))
681
- fi
682
- done
683
-
684
- for tool in "${optional_tools[@]}"; do
685
- if command -v "$tool" >/dev/null 2>&1; then
686
- echo -e " ${GREEN}✓${RESET} $tool"
687
- else
688
- echo -e " ${DIM}○${RESET} $tool ${DIM}(optional — some features disabled)${RESET}"
689
- fi
690
- done
691
-
692
- # 2. Git state
693
- echo ""
694
- if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
695
- echo -e " ${GREEN}✓${RESET} Inside git repo"
696
- else
697
- echo -e " ${RED}✗${RESET} Not inside a git repository"
698
- errors=$((errors + 1))
699
- fi
700
-
701
- # Check for uncommitted changes — offer to stash
702
- local dirty_files
703
- dirty_files=$(git status --porcelain 2>/dev/null | wc -l | xargs)
704
- if [[ "$dirty_files" -gt 0 ]]; then
705
- echo -e " ${YELLOW}⚠${RESET} $dirty_files uncommitted change(s)"
706
- if [[ "$SKIP_GATES" == "true" ]]; then
707
- info "Auto-stashing uncommitted changes..."
708
- git stash push -m "sw-pipeline: auto-stash before pipeline" --quiet 2>/dev/null && STASHED_CHANGES=true
709
- if [[ "$STASHED_CHANGES" == "true" ]]; then
710
- echo -e " ${GREEN}✓${RESET} Changes stashed (will restore on exit)"
711
- fi
712
- else
713
- echo -e " ${DIM}Tip: Use --skip-gates to auto-stash, or commit/stash manually${RESET}"
714
- fi
715
- else
716
- echo -e " ${GREEN}✓${RESET} Working tree clean"
717
- fi
718
-
719
- # Check if base branch exists
720
- if git rev-parse --verify "$BASE_BRANCH" >/dev/null 2>&1; then
721
- echo -e " ${GREEN}✓${RESET} Base branch: $BASE_BRANCH"
722
- else
723
- echo -e " ${RED}✗${RESET} Base branch not found: $BASE_BRANCH"
724
- errors=$((errors + 1))
725
- fi
726
-
727
- # 3. GitHub auth (if gh available and not disabled)
728
- if [[ "$NO_GITHUB" != "true" ]] && command -v gh >/dev/null 2>&1; then
729
- if gh auth status >/dev/null 2>&1; then
730
- echo -e " ${GREEN}✓${RESET} GitHub authenticated"
731
- else
732
- echo -e " ${YELLOW}⚠${RESET} GitHub not authenticated (features disabled)"
733
- fi
734
- fi
735
-
736
- # 4. Claude CLI
737
- if command -v claude >/dev/null 2>&1; then
738
- echo -e " ${GREEN}✓${RESET} Claude CLI available"
739
- else
740
- echo -e " ${RED}✗${RESET} Claude CLI not found — plan/build stages will fail"
741
- errors=$((errors + 1))
742
- fi
743
-
744
- # 5. sw loop (needed for build stage)
745
- if [[ -x "$SCRIPT_DIR/sw-loop.sh" ]]; then
746
- echo -e " ${GREEN}✓${RESET} shipwright loop available"
747
- else
748
- echo -e " ${RED}✗${RESET} sw-loop.sh not found at $SCRIPT_DIR"
749
- errors=$((errors + 1))
750
- fi
751
-
752
- # 6. Disk space check (warn if < 1GB free)
753
- local free_space_kb
754
- free_space_kb=$(df -k "$PROJECT_ROOT" 2>/dev/null | tail -1 | awk '{print $4}')
755
- if [[ -n "$free_space_kb" ]] && [[ "$free_space_kb" -lt 1048576 ]] 2>/dev/null; then
756
- echo -e " ${YELLOW}⚠${RESET} Low disk space: $(( free_space_kb / 1024 ))MB free"
757
- fi
758
-
759
- echo ""
760
-
761
- if [[ "$errors" -gt 0 ]]; then
762
- error "Pre-flight failed: $errors error(s)"
763
- return 1
764
- fi
765
-
766
- success "Pre-flight passed"
767
- echo ""
768
- return 0
769
- }
770
-
771
- # ─── Notification Helpers ──────────────────────────────────────────────────
772
-
773
- notify() {
774
- local title="$1" message="$2" level="${3:-info}"
775
- local emoji
776
- case "$level" in
777
- success) emoji="✅" ;;
778
- error) emoji="❌" ;;
779
- warn) emoji="⚠️" ;;
780
- *) emoji="🔔" ;;
781
- esac
782
-
783
- # Slack webhook
784
- if [[ -n "${SLACK_WEBHOOK:-}" ]]; then
785
- local payload
786
- payload=$(jq -n \
787
- --arg text "${emoji} *${title}*\n${message}" \
788
- '{text: $text}')
789
- curl -sf --connect-timeout "$(_config_get_int "network.connect_timeout" 10 2>/dev/null || echo 10)" --max-time "$(_config_get_int "network.max_time" 60 2>/dev/null || echo 60)" -X POST -H 'Content-Type: application/json' \
790
- -d "$payload" "$SLACK_WEBHOOK" >/dev/null 2>&1 || true
791
- fi
792
-
793
- # Custom webhook (env var SHIPWRIGHT_WEBHOOK_URL)
794
- local _webhook_url="${SHIPWRIGHT_WEBHOOK_URL:-}"
795
- if [[ -n "$_webhook_url" ]]; then
796
- local payload
797
- payload=$(jq -n \
798
- --arg title "$title" --arg message "$message" \
799
- --arg level "$level" --arg pipeline "${PIPELINE_NAME:-}" \
800
- --arg goal "${GOAL:-}" --arg stage "${CURRENT_STAGE_ID:-}" \
801
- '{title:$title, message:$message, level:$level, pipeline:$pipeline, goal:$goal, stage:$stage}')
802
- curl -sf --connect-timeout 10 --max-time 30 -X POST -H 'Content-Type: application/json' \
803
- -d "$payload" "$_webhook_url" >/dev/null 2>&1 || true
804
- fi
805
- }
806
-
807
- # ─── Error Classification ──────────────────────────────────────────────────
808
- # Classifies errors to determine whether retrying makes sense.
809
- # Returns: "infrastructure", "logic", "configuration", or "unknown"
810
-
811
- classify_error() {
812
- local stage_id="$1"
813
- local log_file="${ARTIFACTS_DIR}/${stage_id}-results.log"
814
- [[ ! -f "$log_file" ]] && log_file="${ARTIFACTS_DIR}/test-results.log"
815
- [[ ! -f "$log_file" ]] && { echo "unknown"; return; }
816
-
817
- local log_tail
818
- log_tail=$(tail -50 "$log_file" 2>/dev/null || echo "")
819
-
820
- # Generate error signature for history lookup
821
- local error_sig
822
- error_sig=$(echo "$log_tail" | grep -iE 'error|fail|exception|fatal' 2>/dev/null | head -3 | cksum | awk '{print $1}' || echo "0")
823
-
824
- # Check classification history first (learned from previous runs)
825
- local class_history="${HOME}/.shipwright/optimization/error-classifications.json"
826
- if [[ -f "$class_history" ]]; then
827
- local cached_class
828
- cached_class=$(jq -r --arg sig "$error_sig" '.[$sig].classification // empty' "$class_history" 2>/dev/null || true)
829
- if [[ -n "$cached_class" && "$cached_class" != "null" ]]; then
830
- echo "$cached_class"
831
- return
832
- fi
833
- fi
834
-
835
- local classification="unknown"
836
-
837
- # Infrastructure errors: timeout, OOM, network — retry makes sense
838
- if echo "$log_tail" | grep -qiE 'timeout|timed out|ETIMEDOUT|ECONNREFUSED|ECONNRESET|network|socket hang up|OOM|out of memory|killed|signal 9|Cannot allocate memory'; then
839
- classification="infrastructure"
840
- # Configuration errors: missing env, wrong path — don't retry, escalate
841
- elif echo "$log_tail" | grep -qiE 'ENOENT|not found|No such file|command not found|MODULE_NOT_FOUND|Cannot find module|missing.*env|undefined variable|permission denied|EACCES'; then
842
- classification="configuration"
843
- # Logic errors: assertion failures, type errors — retry won't help without code change
844
- elif echo "$log_tail" | grep -qiE 'AssertionError|assert.*fail|Expected.*but.*got|TypeError|ReferenceError|SyntaxError|CompileError|type mismatch|cannot assign|incompatible type'; then
845
- classification="logic"
846
- # Build errors: compilation failures
847
- elif echo "$log_tail" | grep -qiE 'error\[E[0-9]+\]|error: aborting|FAILED.*compile|build failed|tsc.*error|eslint.*error'; then
848
- classification="logic"
849
- # Intelligence fallback: Claude classification for unknown errors
850
- elif [[ "$classification" == "unknown" ]] && type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1; then
851
- local ai_class
852
- ai_class=$(claude --print --output-format text -p "Classify this error as exactly one of: infrastructure, configuration, logic, unknown.
853
-
854
- Error output:
855
- $(echo "$log_tail" | tail -20)
856
-
857
- Reply with ONLY the classification word, nothing else." --model haiku < /dev/null 2>/dev/null || true)
858
- ai_class=$(echo "$ai_class" | tr -d '[:space:]' | tr '[:upper:]' '[:lower:]')
859
- case "$ai_class" in
860
- infrastructure|configuration|logic) classification="$ai_class" ;;
861
- esac
862
- fi
863
-
864
- # Map retry categories to shared taxonomy (from lib/compat.sh SW_ERROR_CATEGORIES)
865
- # Retry uses: infrastructure, configuration, logic, unknown
866
- # Shared uses: test_failure, build_error, lint_error, timeout, dependency, flaky, config, security, permission, unknown
867
- local canonical_category="unknown"
868
- case "$classification" in
869
- infrastructure) canonical_category="timeout" ;;
870
- configuration) canonical_category="config" ;;
871
- logic)
872
- case "$stage_id" in
873
- test) canonical_category="test_failure" ;;
874
- *) canonical_category="build_error" ;;
875
- esac
876
- ;;
877
- esac
878
-
879
- # Record classification for future runs (using both retry and canonical categories)
880
- if [[ -n "$error_sig" && "$error_sig" != "0" ]]; then
881
- local class_dir="${HOME}/.shipwright/optimization"
882
- mkdir -p "$class_dir" 2>/dev/null || true
883
- local tmp_class
884
- tmp_class="$(mktemp)"
885
- trap "rm -f '$tmp_class'" RETURN
886
- if [[ -f "$class_history" ]]; then
887
- jq --arg sig "$error_sig" --arg cls "$classification" --arg canon "$canonical_category" --arg stage "$stage_id" \
888
- '.[$sig] = {"classification": $cls, "canonical": $canon, "stage": $stage, "recorded_at": now}' \
889
- "$class_history" > "$tmp_class" 2>/dev/null && \
890
- mv "$tmp_class" "$class_history" || rm -f "$tmp_class"
891
- else
892
- jq -n --arg sig "$error_sig" --arg cls "$classification" --arg canon "$canonical_category" --arg stage "$stage_id" \
893
- '{($sig): {"classification": $cls, "canonical": $canon, "stage": $stage, "recorded_at": now}}' \
894
- > "$tmp_class" 2>/dev/null && \
895
- mv "$tmp_class" "$class_history" || rm -f "$tmp_class"
896
- fi
897
- fi
898
-
899
- echo "$classification"
900
- }
901
-
902
- # ─── Stage Runner ───────────────────────────────────────────────────────────
903
-
904
- run_stage_with_retry() {
905
- local stage_id="$1"
906
- local max_retries
907
- max_retries=$(jq -r --arg id "$stage_id" '(.stages[] | select(.id == $id) | .config.retries) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
908
- [[ -z "$max_retries" || "$max_retries" == "null" ]] && max_retries=0
909
-
910
- local attempt=0
911
- local prev_error_class=""
912
- while true; do
913
- if "stage_${stage_id}"; then
914
- return 0
915
- fi
916
-
917
- # Capture error_class and error snippet for stage.failed / pipeline.completed events
918
- local error_class
919
- error_class=$(classify_error "$stage_id")
920
- LAST_STAGE_ERROR_CLASS="$error_class"
921
- LAST_STAGE_ERROR=""
922
- local _log_file="${ARTIFACTS_DIR}/${stage_id}-results.log"
923
- [[ ! -f "$_log_file" ]] && _log_file="${ARTIFACTS_DIR}/test-results.log"
924
- if [[ -f "$_log_file" ]]; then
925
- LAST_STAGE_ERROR=$(tail -20 "$_log_file" 2>/dev/null | grep -iE 'error|fail|exception|fatal' 2>/dev/null | head -1 | cut -c1-200 || true)
926
- fi
927
-
928
- attempt=$((attempt + 1))
929
- if [[ "$attempt" -gt "$max_retries" ]]; then
930
- return 1
931
- fi
932
-
933
- # Classify done above; decide whether retry makes sense
934
-
935
- emit_event "retry.classified" \
936
- "issue=${ISSUE_NUMBER:-0}" \
937
- "stage=$stage_id" \
938
- "attempt=$attempt" \
939
- "error_class=$error_class"
940
-
941
- case "$error_class" in
942
- infrastructure)
943
- info "Error classified as infrastructure (timeout/network/OOM) — retry makes sense"
944
- ;;
945
- configuration)
946
- error "Error classified as configuration (missing env/path) — skipping retry, escalating"
947
- emit_event "retry.escalated" \
948
- "issue=${ISSUE_NUMBER:-0}" \
949
- "stage=$stage_id" \
950
- "reason=configuration_error"
951
- return 1
952
- ;;
953
- logic)
954
- if [[ "$error_class" == "$prev_error_class" ]]; then
955
- error "Error classified as logic (assertion/type error) with same class — retry won't help without code change"
956
- emit_event "retry.skipped" \
957
- "issue=${ISSUE_NUMBER:-0}" \
958
- "stage=$stage_id" \
959
- "reason=repeated_logic_error"
960
- return 1
961
- fi
962
- warn "Error classified as logic — retrying once in case build fixes it"
963
- ;;
964
- *)
965
- info "Error classification: unknown — retrying"
966
- ;;
967
- esac
968
- prev_error_class="$error_class"
969
-
970
- if type db_save_reasoning_trace >/dev/null 2>&1; then
971
- local job_id="${SHIPWRIGHT_PIPELINE_ID:-$$}"
972
- local error_msg="${LAST_STAGE_ERROR:-$error_class}"
973
- db_save_reasoning_trace "$job_id" "retry_reasoning" \
974
- "stage=$stage_id error=$error_msg" \
975
- "Stage failed, analyzing error pattern before retry" \
976
- "retry_strategy=self_heal" 0.6 2>/dev/null || true
977
- fi
978
-
979
- warn "Stage $stage_id failed (attempt $attempt/$((max_retries + 1)), class: $error_class) — retrying..."
980
- # Exponential backoff with jitter to avoid thundering herd
981
- local backoff=$((2 ** attempt))
982
- [[ "$backoff" -gt 16 ]] && backoff=16
983
- local jitter=$(( RANDOM % (backoff + 1) ))
984
- local total_sleep=$((backoff + jitter))
985
- info "Backing off ${total_sleep}s before retry..."
986
- sleep "$total_sleep"
987
- done
988
- }
989
-
990
- # ─── Self-Healing Build→Test Feedback Loop ─────────────────────────────────
991
- # When tests fail after a build, this captures the error and re-runs the build
992
- # with the error context, so Claude can fix the issue automatically.
993
-
994
- self_healing_build_test() {
995
- local cycle=0
996
- local max_cycles="$BUILD_TEST_RETRIES"
997
- local last_test_error=""
998
-
999
- # Convergence tracking
1000
- local prev_error_sig="" consecutive_same_error=0
1001
- local prev_fail_count=0 zero_convergence_streak=0
1002
-
1003
- # Vitals-driven adaptive limit (preferred over static BUILD_TEST_RETRIES)
1004
- if type pipeline_adaptive_limit >/dev/null 2>&1; then
1005
- local _vitals_json=""
1006
- if type pipeline_compute_vitals >/dev/null 2>&1; then
1007
- _vitals_json=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
1008
- fi
1009
- local vitals_limit
1010
- vitals_limit=$(pipeline_adaptive_limit "build_test" "$_vitals_json" 2>/dev/null) || true
1011
- if [[ -n "$vitals_limit" && "$vitals_limit" =~ ^[0-9]+$ && "$vitals_limit" -gt 0 ]]; then
1012
- info "Vitals-driven build-test limit: ${max_cycles} → ${vitals_limit}"
1013
- max_cycles="$vitals_limit"
1014
- emit_event "vitals.adaptive_limit" \
1015
- "issue=${ISSUE_NUMBER:-0}" \
1016
- "context=build_test" \
1017
- "original=$BUILD_TEST_RETRIES" \
1018
- "vitals_limit=$vitals_limit"
1019
- fi
1020
- # Fallback: intelligence-based adaptive limits
1021
- elif type composer_estimate_iterations >/dev/null 2>&1; then
1022
- local estimated
1023
- estimated=$(composer_estimate_iterations \
1024
- "${INTELLIGENCE_ANALYSIS:-{}}" \
1025
- "${HOME}/.shipwright/optimization/iteration-model.json" 2>/dev/null || echo "")
1026
- if [[ -n "$estimated" && "$estimated" =~ ^[0-9]+$ && "$estimated" -gt 0 ]]; then
1027
- max_cycles="$estimated"
1028
- emit_event "intelligence.adaptive_iterations" \
1029
- "issue=${ISSUE_NUMBER:-0}" \
1030
- "estimated=$estimated" \
1031
- "original=$BUILD_TEST_RETRIES"
1032
- fi
1033
- fi
1034
-
1035
- # Fallback: adaptive cycle limits from optimization data
1036
- if [[ "$max_cycles" == "$BUILD_TEST_RETRIES" ]]; then
1037
- local _iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
1038
- if [[ -f "$_iter_model" ]]; then
1039
- local adaptive_bt_limit
1040
- adaptive_bt_limit=$(pipeline_adaptive_cycles "$max_cycles" "build_test" "0" "-1" 2>/dev/null) || true
1041
- if [[ -n "$adaptive_bt_limit" && "$adaptive_bt_limit" =~ ^[0-9]+$ && "$adaptive_bt_limit" -gt 0 && "$adaptive_bt_limit" != "$max_cycles" ]]; then
1042
- info "Adaptive build-test cycles: ${max_cycles} → ${adaptive_bt_limit}"
1043
- max_cycles="$adaptive_bt_limit"
1044
- fi
1045
- fi
1046
- fi
1047
-
1048
- while [[ "$cycle" -le "$max_cycles" ]]; do
1049
- cycle=$((cycle + 1))
1050
-
1051
- if [[ "$cycle" -gt 1 ]]; then
1052
- SELF_HEAL_COUNT=$((SELF_HEAL_COUNT + 1))
1053
- echo ""
1054
- echo -e "${YELLOW}${BOLD}━━━ Self-Healing Cycle ${cycle}/$((max_cycles + 1)) ━━━${RESET}"
1055
- info "Feeding test failure back to build loop..."
1056
-
1057
- if [[ -n "$ISSUE_NUMBER" ]]; then
1058
- gh_comment_issue "$ISSUE_NUMBER" "🔄 **Self-healing cycle ${cycle}** — rebuilding with error context" 2>/dev/null || true
1059
- fi
1060
-
1061
- # Reset build/test stage statuses for retry
1062
- set_stage_status "build" "retrying"
1063
- set_stage_status "test" "pending"
1064
- fi
1065
-
1066
- # ── Run Build Stage ──
1067
- echo ""
1068
- echo -e "${CYAN}${BOLD}▸ Stage: build${RESET} ${DIM}[cycle ${cycle}]${RESET}"
1069
- CURRENT_STAGE_ID="build"
1070
-
1071
- # Inject error context on retry cycles
1072
- if [[ "$cycle" -gt 1 && -n "$last_test_error" ]]; then
1073
- # Query memory for known fixes
1074
- local _memory_fix=""
1075
- if type memory_closed_loop_inject >/dev/null 2>&1; then
1076
- local _error_sig_short
1077
- _error_sig_short=$(echo "$last_test_error" | head -3 || echo "")
1078
- _memory_fix=$(memory_closed_loop_inject "$_error_sig_short" 2>/dev/null) || true
1079
- fi
1080
-
1081
- local memory_prefix=""
1082
- if [[ -n "$_memory_fix" ]]; then
1083
- info "Memory suggests fix: $(echo "$_memory_fix" | head -1)"
1084
- memory_prefix="KNOWN FIX (from past success): ${_memory_fix}
1085
-
1086
- "
1087
- fi
1088
-
1089
- # Temporarily augment the goal with error context
1090
- local original_goal="$GOAL"
1091
- GOAL="$GOAL
1092
-
1093
- ${memory_prefix}IMPORTANT — Previous build attempt failed tests. Fix these errors:
1094
- $last_test_error
1095
-
1096
- Focus on fixing the failing tests while keeping all passing tests working."
1097
-
1098
- update_status "running" "build"
1099
- record_stage_start "build"
1100
-
1101
- if run_stage_with_retry "build"; then
1102
- mark_stage_complete "build"
1103
- local timing
1104
- timing=$(get_stage_timing "build")
1105
- success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
1106
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1107
- local _diff_count
1108
- _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
1109
- local _snap_files _snap_error
1110
- _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
1111
- _snap_files="${_snap_files:-0}"
1112
- _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
1113
- _snap_error="${_snap_error:-}"
1114
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
1115
- fi
1116
- else
1117
- mark_stage_failed "build"
1118
- GOAL="$original_goal"
1119
- return 1
1120
- fi
1121
- GOAL="$original_goal"
1122
- else
1123
- update_status "running" "build"
1124
- record_stage_start "build"
1125
-
1126
- if run_stage_with_retry "build"; then
1127
- mark_stage_complete "build"
1128
- local timing
1129
- timing=$(get_stage_timing "build")
1130
- success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
1131
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1132
- local _diff_count
1133
- _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
1134
- local _snap_files _snap_error
1135
- _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
1136
- _snap_files="${_snap_files:-0}"
1137
- _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
1138
- _snap_error="${_snap_error:-}"
1139
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
1140
- fi
1141
- else
1142
- mark_stage_failed "build"
1143
- return 1
1144
- fi
1145
- fi
1146
-
1147
- # ── Run Test Stage ──
1148
- echo ""
1149
- echo -e "${CYAN}${BOLD}▸ Stage: test${RESET} ${DIM}[cycle ${cycle}]${RESET}"
1150
- CURRENT_STAGE_ID="test"
1151
- update_status "running" "test"
1152
- record_stage_start "test"
1153
-
1154
- if run_stage_with_retry "test"; then
1155
- mark_stage_complete "test"
1156
- local timing
1157
- timing=$(get_stage_timing "test")
1158
- success "Stage ${BOLD}test${RESET} complete ${DIM}(${timing})${RESET}"
1159
- emit_event "convergence.tests_passed" \
1160
- "issue=${ISSUE_NUMBER:-0}" \
1161
- "cycle=$cycle"
1162
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1163
- local _diff_count
1164
- _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
1165
- local _snap_files _snap_error
1166
- _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
1167
- _snap_files="${_snap_files:-0}"
1168
- _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
1169
- _snap_error="${_snap_error:-}"
1170
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-test}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
1171
- fi
1172
- # Record fix outcome when tests pass after a retry with memory injection (pipeline path)
1173
- if [[ "$cycle" -gt 1 && -n "${last_test_error:-}" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
1174
- local _sig
1175
- _sig=$(echo "$last_test_error" | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//')
1176
- [[ -n "$_sig" ]] && bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_sig" "true" "true" 2>/dev/null || true
1177
- fi
1178
- return 0 # Tests passed!
1179
- fi
1180
-
1181
- # Tests failed — capture error for next cycle
1182
- local test_log="$ARTIFACTS_DIR/test-results.log"
1183
- last_test_error=$(tail -30 "$test_log" 2>/dev/null || echo "Test command failed with no output")
1184
- mark_stage_failed "test"
1185
-
1186
- # ── Convergence Detection ──
1187
- # Hash the error output to detect repeated failures
1188
- local error_sig
1189
- error_sig=$(echo "$last_test_error" | shasum -a 256 2>/dev/null | cut -c1-16 || echo "unknown")
1190
-
1191
- # Count failing tests (extract from common patterns)
1192
- local current_fail_count=0
1193
- current_fail_count=$(grep -ciE 'fail|error|FAIL' "$test_log" 2>/dev/null || true)
1194
- current_fail_count="${current_fail_count:-0}"
1195
-
1196
- if [[ "$error_sig" == "$prev_error_sig" ]]; then
1197
- consecutive_same_error=$((consecutive_same_error + 1))
1198
- else
1199
- consecutive_same_error=1
1200
- fi
1201
- prev_error_sig="$error_sig"
1202
-
1203
- # Check: same error 3 times consecutively → stuck
1204
- if [[ "$consecutive_same_error" -ge 3 ]]; then
1205
- error "Convergence: stuck on same error for 3 consecutive cycles — exiting early"
1206
- emit_event "convergence.stuck" \
1207
- "issue=${ISSUE_NUMBER:-0}" \
1208
- "cycle=$cycle" \
1209
- "error_sig=$error_sig" \
1210
- "consecutive=$consecutive_same_error"
1211
- notify "Build Convergence" "Stuck on unfixable error after ${cycle} cycles" "error"
1212
- return 1
1213
- fi
1214
-
1215
- # Track convergence rate: did we reduce failures?
1216
- if [[ "$cycle" -gt 1 && "$prev_fail_count" -gt 0 ]]; then
1217
- if [[ "$current_fail_count" -ge "$prev_fail_count" ]]; then
1218
- zero_convergence_streak=$((zero_convergence_streak + 1))
1219
- else
1220
- zero_convergence_streak=0
1221
- fi
1222
-
1223
- # Check: zero convergence for 2 consecutive iterations → plateau
1224
- if [[ "$zero_convergence_streak" -ge 2 ]]; then
1225
- error "Convergence: no progress for 2 consecutive cycles (${current_fail_count} failures remain) — exiting early"
1226
- emit_event "convergence.plateau" \
1227
- "issue=${ISSUE_NUMBER:-0}" \
1228
- "cycle=$cycle" \
1229
- "fail_count=$current_fail_count" \
1230
- "streak=$zero_convergence_streak"
1231
- notify "Build Convergence" "No progress after ${cycle} cycles — plateau reached" "error"
1232
- return 1
1233
- fi
1234
- fi
1235
- prev_fail_count="$current_fail_count"
1236
-
1237
- info "Convergence: error_sig=${error_sig:0:8} repeat=${consecutive_same_error} failures=${current_fail_count} no_progress=${zero_convergence_streak}"
1238
-
1239
- if [[ "$cycle" -le "$max_cycles" ]]; then
1240
- warn "Tests failed — will attempt self-healing (cycle $((cycle + 1))/$((max_cycles + 1)))"
1241
- notify "Self-Healing" "Tests failed on cycle ${cycle}, retrying..." "warn"
1242
- fi
1243
- done
1244
-
1245
- error "Self-healing exhausted after $((max_cycles + 1)) cycles"
1246
- notify "Self-Healing Failed" "Tests still failing after $((max_cycles + 1)) build-test cycles" "error"
1247
- return 1
1248
- }
1249
-
1250
- # ─── Auto-Rebase ──────────────────────────────────────────────────────────
1251
-
1252
- auto_rebase() {
1253
- info "Syncing with ${BASE_BRANCH}..."
1254
-
1255
- # Fetch latest
1256
- git fetch origin "$BASE_BRANCH" --quiet 2>/dev/null || {
1257
- warn "Could not fetch origin/${BASE_BRANCH}"
1258
- return 0
1259
- }
1260
-
1261
- # Check if rebase is needed
1262
- local behind
1263
- behind=$(git rev-list --count "HEAD..origin/${BASE_BRANCH}" 2>/dev/null || echo "0")
1264
-
1265
- if [[ "$behind" -eq 0 ]]; then
1266
- success "Already up to date with ${BASE_BRANCH}"
1267
- return 0
1268
- fi
1269
-
1270
- info "Rebasing onto origin/${BASE_BRANCH} ($behind commits behind)..."
1271
- if git rebase "origin/${BASE_BRANCH}" --quiet 2>/dev/null; then
1272
- success "Rebase successful"
1273
- else
1274
- warn "Rebase conflict detected — aborting rebase"
1275
- git rebase --abort 2>/dev/null || true
1276
- warn "Falling back to merge..."
1277
- if git merge "origin/${BASE_BRANCH}" --no-edit --quiet 2>/dev/null; then
1278
- success "Merge successful"
1279
- else
1280
- git merge --abort 2>/dev/null || true
1281
- error "Both rebase and merge failed — manual intervention needed"
1282
- return 1
1283
- fi
1284
- fi
1285
- }
1286
-
1287
- run_pipeline() {
1288
- # Rotate event log if needed (standalone mode)
1289
- rotate_event_log_if_needed
1290
-
1291
- local stages
1292
- stages=$(jq -c '.stages[]' "$PIPELINE_CONFIG")
1293
-
1294
- local stage_count enabled_count
1295
- stage_count=$(jq '.stages | length' "$PIPELINE_CONFIG")
1296
- enabled_count=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG")
1297
- local completed=0
1298
-
1299
- # Check which stages are enabled to determine if we use the self-healing loop
1300
- local build_enabled test_enabled
1301
- build_enabled=$(jq -r '.stages[] | select(.id == "build") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null)
1302
- test_enabled=$(jq -r '.stages[] | select(.id == "test") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null)
1303
- local use_self_healing=false
1304
- if [[ "$build_enabled" == "true" && "$test_enabled" == "true" && "$BUILD_TEST_RETRIES" -gt 0 ]]; then
1305
- use_self_healing=true
1306
- fi
1307
-
1308
- while IFS= read -r -u 3 stage; do
1309
- local id enabled gate
1310
- id=$(echo "$stage" | jq -r '.id')
1311
- enabled=$(echo "$stage" | jq -r '.enabled')
1312
- gate=$(echo "$stage" | jq -r '.gate')
1313
-
1314
- CURRENT_STAGE_ID="$id"
1315
-
1316
- # Human intervention: check for skip-stage directive
1317
- if [[ -f "$ARTIFACTS_DIR/skip-stage.txt" ]]; then
1318
- local skip_list
1319
- skip_list="$(cat "$ARTIFACTS_DIR/skip-stage.txt" 2>/dev/null || true)"
1320
- if echo "$skip_list" | grep -qx "$id" 2>/dev/null; then
1321
- info "Stage ${BOLD}${id}${RESET} skipped by human directive"
1322
- emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=human_skip"
1323
- # Remove this stage from the skip file
1324
- local tmp_skip
1325
- tmp_skip="$(mktemp)"
1326
- trap "rm -f '$tmp_skip'" RETURN
1327
- grep -vx "$id" "$ARTIFACTS_DIR/skip-stage.txt" > "$tmp_skip" 2>/dev/null || true
1328
- mv "$tmp_skip" "$ARTIFACTS_DIR/skip-stage.txt"
1329
- continue
1330
- fi
1331
- fi
1332
-
1333
- # Human intervention: check for human message
1334
- if [[ -f "$ARTIFACTS_DIR/human-message.txt" ]]; then
1335
- local human_msg
1336
- human_msg="$(cat "$ARTIFACTS_DIR/human-message.txt" 2>/dev/null || true)"
1337
- if [[ -n "$human_msg" ]]; then
1338
- echo ""
1339
- echo -e " ${PURPLE}${BOLD}💬 Human message:${RESET} $human_msg"
1340
- emit_event "pipeline.human_message" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "message=$human_msg"
1341
- rm -f "$ARTIFACTS_DIR/human-message.txt"
1342
- fi
1343
- fi
1344
-
1345
- if [[ "$enabled" != "true" ]]; then
1346
- echo -e " ${DIM}○ ${id} — skipped (disabled)${RESET}"
1347
- continue
1348
- fi
1349
-
1350
- # Intelligence: evaluate whether to skip this stage
1351
- local skip_reason=""
1352
- skip_reason=$(pipeline_should_skip_stage "$id" 2>/dev/null) || true
1353
- if [[ -n "$skip_reason" ]]; then
1354
- echo -e " ${DIM}○ ${id} — skipped (intelligence: ${skip_reason})${RESET}"
1355
- set_stage_status "$id" "complete"
1356
- completed=$((completed + 1))
1357
- continue
1358
- fi
1359
-
1360
- local stage_status
1361
- stage_status=$(get_stage_status "$id")
1362
- if [[ "$stage_status" == "complete" ]]; then
1363
- echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— already complete${RESET}"
1364
- completed=$((completed + 1))
1365
- continue
1366
- fi
1367
-
1368
- # CI resume: skip stages marked as completed from previous run
1369
- if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "$id"; then
1370
- # Verify artifacts survived the merge — regenerate if missing
1371
- if verify_stage_artifacts "$id"; then
1372
- echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— skipped (CI resume)${RESET}"
1373
- set_stage_status "$id" "complete"
1374
- completed=$((completed + 1))
1375
- emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
1376
- continue
1377
- else
1378
- warn "Stage $id marked complete but artifacts missing — regenerating"
1379
- emit_event "stage.artifact_miss" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
1380
- fi
1381
- fi
1382
-
1383
- # Self-healing build→test loop: when we hit build, run both together
1384
- if [[ "$id" == "build" && "$use_self_healing" == "true" ]]; then
1385
- # TDD: generate tests before build when enabled
1386
- if [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
1387
- stage_test_first || true
1388
- fi
1389
- # Gate check for build
1390
- local build_gate
1391
- build_gate=$(echo "$stage" | jq -r '.gate')
1392
- if [[ "$build_gate" == "approve" && "$SKIP_GATES" != "true" ]]; then
1393
- show_stage_preview "build"
1394
- local answer=""
1395
- if [[ -t 0 ]]; then
1396
- read -rp " Proceed with build+test (self-healing)? [Y/n] " answer || true
1397
- fi
1398
- if [[ "$answer" =~ ^[Nn] ]]; then
1399
- update_status "paused" "build"
1400
- info "Pipeline paused. Resume with: ${DIM}shipwright pipeline resume${RESET}"
1401
- return 0
1402
- fi
1403
- fi
1404
-
1405
- if self_healing_build_test; then
1406
- completed=$((completed + 2)) # Both build and test
1407
-
1408
- # Intelligence: reassess complexity after build+test
1409
- local reassessment
1410
- reassessment=$(pipeline_reassess_complexity 2>/dev/null) || true
1411
- if [[ -n "$reassessment" && "$reassessment" != "as_expected" ]]; then
1412
- info "Complexity reassessment: ${reassessment}"
1413
- fi
1414
- else
1415
- update_status "failed" "test"
1416
- error "Pipeline failed: build→test self-healing exhausted"
1417
- return 1
1418
- fi
1419
- continue
1420
- fi
1421
-
1422
- # TDD: generate tests before build when enabled (non-self-healing path)
1423
- if [[ "$id" == "build" && "$use_self_healing" != "true" ]] && [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
1424
- stage_test_first || true
1425
- fi
1426
-
1427
- # Skip test if already handled by self-healing loop
1428
- if [[ "$id" == "test" && "$use_self_healing" == "true" ]]; then
1429
- stage_status=$(get_stage_status "test")
1430
- if [[ "$stage_status" == "complete" ]]; then
1431
- echo -e " ${GREEN}✓ test${RESET} ${DIM}— completed in build→test loop${RESET}"
1432
- fi
1433
- continue
1434
- fi
1435
-
1436
- # Gate check
1437
- if [[ "$gate" == "approve" && "$SKIP_GATES" != "true" ]]; then
1438
- show_stage_preview "$id"
1439
- local answer=""
1440
- if [[ -t 0 ]]; then
1441
- read -rp " Proceed with ${id}? [Y/n] " answer || true
1442
- else
1443
- # Non-interactive: auto-approve (shouldn't reach here if headless detection works)
1444
- info "Non-interactive mode — auto-approving ${id}"
1445
- fi
1446
- if [[ "$answer" =~ ^[Nn] ]]; then
1447
- update_status "paused" "$id"
1448
- info "Pipeline paused at ${BOLD}$id${RESET}. Resume with: ${DIM}shipwright pipeline resume${RESET}"
1449
- return 0
1450
- fi
1451
- fi
1452
-
1453
- # Budget enforcement check (skip with --ignore-budget)
1454
- if [[ "$IGNORE_BUDGET" != "true" ]] && [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
1455
- local budget_rc=0
1456
- bash "$SCRIPT_DIR/sw-cost.sh" check-budget 2>/dev/null || budget_rc=$?
1457
- if [[ "$budget_rc" -eq 2 ]]; then
1458
- warn "Daily budget exceeded — pausing pipeline before stage ${BOLD}$id${RESET}"
1459
- warn "Resume with --ignore-budget to override, or wait until tomorrow"
1460
- emit_event "pipeline.budget_paused" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
1461
- update_status "paused" "$id"
1462
- return 0
1463
- fi
1464
- fi
1465
-
1466
- # Intelligence: per-stage model routing (UCB1 when DB has data, else A/B testing)
1467
- local recommended_model="" from_ucb1=false
1468
- if type ucb1_select_model >/dev/null 2>&1; then
1469
- recommended_model=$(ucb1_select_model "$id" 2>/dev/null || echo "")
1470
- [[ -n "$recommended_model" ]] && from_ucb1=true
1471
- fi
1472
- if [[ -z "$recommended_model" ]] && type intelligence_recommend_model >/dev/null 2>&1; then
1473
- local stage_complexity="${INTELLIGENCE_COMPLEXITY:-5}"
1474
- local budget_remaining=""
1475
- if [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
1476
- budget_remaining=$(bash "$SCRIPT_DIR/sw-cost.sh" remaining-budget 2>/dev/null || echo "")
1477
- fi
1478
- local recommended_json
1479
- recommended_json=$(intelligence_recommend_model "$id" "$stage_complexity" "$budget_remaining" 2>/dev/null || echo "")
1480
- recommended_model=$(echo "$recommended_json" | jq -r '.model // empty' 2>/dev/null || echo "")
1481
- fi
1482
- if [[ -n "$recommended_model" && "$recommended_model" != "null" ]]; then
1483
- if [[ "$from_ucb1" == "true" ]]; then
1484
- # UCB1 already balances exploration/exploitation — use directly
1485
- export CLAUDE_MODEL="$recommended_model"
1486
- emit_event "intelligence.model_ucb1" \
1487
- "issue=${ISSUE_NUMBER:-0}" \
1488
- "stage=$id" \
1489
- "model=$recommended_model"
1490
- else
1491
- # A/B testing for intelligence recommendation
1492
- local ab_ratio=20
1493
- local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
1494
- if [[ -f "$daemon_cfg" ]]; then
1495
- local cfg_ratio
1496
- cfg_ratio=$(jq -r '.intelligence.ab_test_ratio // 0.2' "$daemon_cfg" 2>/dev/null || echo "0.2")
1497
- ab_ratio=$(awk -v r="$cfg_ratio" 'BEGIN{printf "%d", r * 100}' 2>/dev/null || echo "20")
1498
- fi
1499
-
1500
- local routing_file="${HOME}/.shipwright/optimization/model-routing.json"
1501
- local use_recommended=false
1502
- local ab_group="control"
1503
-
1504
- if [[ -f "$routing_file" ]]; then
1505
- local stage_samples total_samples
1506
- stage_samples=$(jq -r --arg s "$id" '.routes[$s].sonnet_samples // .[$s].sonnet_samples // 0' "$routing_file" 2>/dev/null || echo "0")
1507
- total_samples=$(jq -r --arg s "$id" '((.routes[$s].sonnet_samples // .[$s].sonnet_samples // 0) + (.routes[$s].opus_samples // .[$s].opus_samples // 0))' "$routing_file" 2>/dev/null || echo "0")
1508
- if [[ "${total_samples:-0}" -ge 50 ]]; then
1509
- use_recommended=true
1510
- ab_group="graduated"
1511
- fi
1512
- fi
1513
-
1514
- if [[ "$use_recommended" != "true" ]]; then
1515
- local roll=$((RANDOM % 100))
1516
- if [[ "$roll" -lt "$ab_ratio" ]]; then
1517
- use_recommended=true
1518
- ab_group="experiment"
1519
- fi
1520
- fi
1521
-
1522
- if [[ "$use_recommended" == "true" ]]; then
1523
- export CLAUDE_MODEL="$recommended_model"
1524
- else
1525
- export CLAUDE_MODEL="opus"
1526
- fi
1527
-
1528
- emit_event "intelligence.model_ab" \
1529
- "issue=${ISSUE_NUMBER:-0}" \
1530
- "stage=$id" \
1531
- "recommended=$recommended_model" \
1532
- "applied=$CLAUDE_MODEL" \
1533
- "ab_group=$ab_group" \
1534
- "ab_ratio=$ab_ratio"
1535
- fi
1536
- fi
1537
-
1538
- echo ""
1539
- echo -e "${CYAN}${BOLD}▸ Stage: ${id}${RESET} ${DIM}[$((completed + 1))/${enabled_count}]${RESET}"
1540
- update_status "running" "$id"
1541
- record_stage_start "$id"
1542
- local stage_start_epoch
1543
- stage_start_epoch=$(now_epoch)
1544
- emit_event "stage.started" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
1545
-
1546
- # Mark GitHub Check Run as in-progress
1547
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update >/dev/null 2>&1; then
1548
- gh_checks_stage_update "$id" "in_progress" "" "Stage $id started" 2>/dev/null || true
1549
- fi
1550
-
1551
- local stage_model_used="${CLAUDE_MODEL:-${MODEL:-opus}}"
1552
- if run_stage_with_retry "$id"; then
1553
- mark_stage_complete "$id"
1554
- completed=$((completed + 1))
1555
- # Capture project pattern after intake (for memory context in later stages)
1556
- if [[ "$id" == "intake" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
1557
- (cd "$REPO_DIR" && bash "$SCRIPT_DIR/sw-memory.sh" pattern "project" "{}" 2>/dev/null) || true
1558
- fi
1559
- local timing stage_dur_s
1560
- timing=$(get_stage_timing "$id")
1561
- stage_dur_s=$(( $(now_epoch) - stage_start_epoch ))
1562
- success "Stage ${BOLD}$id${RESET} complete ${DIM}(${timing})${RESET}"
1563
- emit_event "stage.completed" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "duration_s=$stage_dur_s" "result=success"
1564
- # Emit vitals snapshot on every stage transition (not just build/test)
1565
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1566
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "$id" "0" "0" "0" "" 2>/dev/null || true
1567
- fi
1568
- # Record model outcome for UCB1 learning
1569
- type record_model_outcome >/dev/null 2>&1 && record_model_outcome "$stage_model_used" "$id" 1 "$stage_dur_s" 0 2>/dev/null || true
1570
- # Broadcast discovery for cross-pipeline learning
1571
- if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
1572
- local _disc_cat _disc_patterns _disc_text
1573
- _disc_cat="$id"
1574
- case "$id" in
1575
- plan) _disc_patterns="*.md"; _disc_text="Plan completed: ${GOAL:-goal}" ;;
1576
- design) _disc_patterns="*.md,*.ts,*.tsx,*.js"; _disc_text="Design completed for ${GOAL:-goal}" ;;
1577
- build) _disc_patterns="src/*,*.ts,*.tsx,*.js"; _disc_text="Build completed" ;;
1578
- test) _disc_patterns="*.test.*,*_test.*"; _disc_text="Tests passed" ;;
1579
- review) _disc_patterns="*.md,*.ts,*.tsx"; _disc_text="Review completed" ;;
1580
- *) _disc_patterns="*"; _disc_text="Stage $id completed" ;;
1581
- esac
1582
- bash "$SCRIPT_DIR/sw-discovery.sh" broadcast "$_disc_cat" "$_disc_patterns" "$_disc_text" "" 2>/dev/null || true
1583
- fi
1584
- # Log model used for prediction feedback
1585
- echo "${id}|${stage_model_used}|true" >> "${ARTIFACTS_DIR}/model-routing.log"
1586
- else
1587
- mark_stage_failed "$id"
1588
- local stage_dur_s
1589
- stage_dur_s=$(( $(now_epoch) - stage_start_epoch ))
1590
- error "Pipeline failed at stage: ${BOLD}$id${RESET}"
1591
- update_status "failed" "$id"
1592
- emit_event "stage.failed" \
1593
- "issue=${ISSUE_NUMBER:-0}" \
1594
- "stage=$id" \
1595
- "duration_s=$stage_dur_s" \
1596
- "error=${LAST_STAGE_ERROR:-unknown}" \
1597
- "error_class=${LAST_STAGE_ERROR_CLASS:-unknown}"
1598
- # Emit vitals snapshot on failure too
1599
- if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
1600
- pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "$id" "0" "0" "0" "${LAST_STAGE_ERROR:-unknown}" 2>/dev/null || true
1601
- fi
1602
- # Log model used for prediction feedback
1603
- echo "${id}|${stage_model_used}|false" >> "${ARTIFACTS_DIR}/model-routing.log"
1604
- # Record model outcome for UCB1 learning
1605
- type record_model_outcome >/dev/null 2>&1 && record_model_outcome "$stage_model_used" "$id" 0 "$stage_dur_s" 0 2>/dev/null || true
1606
- # Cancel any remaining in_progress check runs
1607
- pipeline_cancel_check_runs 2>/dev/null || true
1608
- return 1
1609
- fi
1610
- done 3<<< "$stages"
1611
-
1612
- # Pipeline complete!
1613
- update_status "complete" ""
1614
- PIPELINE_STAGES_PASSED="$completed"
1615
- PIPELINE_SLOWEST_STAGE=""
1616
- if type get_slowest_stage >/dev/null 2>&1; then
1617
- PIPELINE_SLOWEST_STAGE=$(get_slowest_stage 2>/dev/null || true)
1618
- fi
1619
- local total_dur=""
1620
- if [[ -n "$PIPELINE_START_EPOCH" ]]; then
1621
- total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
1622
- fi
1623
-
1624
- echo ""
1625
- echo -e "${GREEN}${BOLD}═══════════════════════════════════════════════════════════════════${RESET}"
1626
- success "Pipeline complete! ${completed}/${enabled_count} stages passed in ${total_dur:-unknown}"
1627
- echo -e "${GREEN}${BOLD}═══════════════════════════════════════════════════════════════════${RESET}"
1628
-
1629
- # Show summary
1630
- echo ""
1631
- if [[ -f "$ARTIFACTS_DIR/pr-url.txt" ]]; then
1632
- echo -e " ${BOLD}PR:${RESET} $(cat "$ARTIFACTS_DIR/pr-url.txt")"
1633
- fi
1634
- echo -e " ${BOLD}Branch:${RESET} $GIT_BRANCH"
1635
- [[ -n "${GITHUB_ISSUE:-}" ]] && echo -e " ${BOLD}Issue:${RESET} $GITHUB_ISSUE"
1636
- echo -e " ${BOLD}Duration:${RESET} $total_dur"
1637
- echo -e " ${BOLD}Artifacts:${RESET} $ARTIFACTS_DIR/"
1638
- echo ""
1639
-
1640
- # Capture learnings to memory (success or failure)
1641
- if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
1642
- bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
1643
- fi
1644
-
1645
- # Final GitHub progress update
1646
- if [[ -n "$ISSUE_NUMBER" ]]; then
1647
- local body
1648
- body=$(gh_build_progress_body)
1649
- gh_update_progress "$body"
1650
- fi
1651
-
1652
- # Post-completion cleanup
1653
- pipeline_post_completion_cleanup
1654
- }
1655
-
1656
- # ─── Post-Completion Cleanup ──────────────────────────────────────────────
1657
- # Cleans up transient artifacts after a successful pipeline run.
1658
-
1659
- pipeline_post_completion_cleanup() {
1660
- local cleaned=0
1661
-
1662
- # 1. Clear checkpoints and context files (they only matter for resume; pipeline is done)
1663
- if [[ -d "${ARTIFACTS_DIR}/checkpoints" ]]; then
1664
- local cp_count=0
1665
- local cp_file
1666
- for cp_file in "${ARTIFACTS_DIR}/checkpoints"/*-checkpoint.json; do
1667
- [[ -f "$cp_file" ]] || continue
1668
- rm -f "$cp_file"
1669
- cp_count=$((cp_count + 1))
1670
- done
1671
- for cp_file in "${ARTIFACTS_DIR}/checkpoints"/*-claude-context.json; do
1672
- [[ -f "$cp_file" ]] || continue
1673
- rm -f "$cp_file"
1674
- cp_count=$((cp_count + 1))
1675
- done
1676
- if [[ "$cp_count" -gt 0 ]]; then
1677
- cleaned=$((cleaned + cp_count))
1678
- fi
1679
- fi
1680
-
1681
- # 2. Clear per-run intelligence artifacts (not needed after completion)
1682
- local intel_files=(
1683
- "${ARTIFACTS_DIR}/classified-findings.json"
1684
- "${ARTIFACTS_DIR}/reassessment.json"
1685
- "${ARTIFACTS_DIR}/skip-stage.txt"
1686
- "${ARTIFACTS_DIR}/human-message.txt"
1687
- )
1688
- local f
1689
- for f in "${intel_files[@]}"; do
1690
- if [[ -f "$f" ]]; then
1691
- rm -f "$f"
1692
- cleaned=$((cleaned + 1))
1693
- fi
1694
- done
1695
-
1696
- # 3. Clear stale pipeline state (mark as idle so next run starts clean)
1697
- if [[ -f "$STATE_FILE" ]]; then
1698
- # Reset status to idle (preserves the file for reference but unblocks new runs)
1699
- local tmp_state
1700
- tmp_state=$(mktemp)
1701
- trap "rm -f '$tmp_state'" RETURN
1702
- sed 's/^status: .*/status: idle/' "$STATE_FILE" > "$tmp_state" 2>/dev/null || true
1703
- mv "$tmp_state" "$STATE_FILE"
1704
- fi
1705
-
1706
- if [[ "$cleaned" -gt 0 ]]; then
1707
- emit_event "pipeline.cleanup" \
1708
- "issue=${ISSUE_NUMBER:-0}" \
1709
- "cleaned=$cleaned" \
1710
- "type=post_completion"
1711
- fi
1712
- }
1713
-
1714
- # Cancel any lingering in_progress GitHub Check Runs (called on abort/interrupt)
1715
- pipeline_cancel_check_runs() {
1716
- if [[ "${NO_GITHUB:-false}" == "true" ]]; then
1717
- return
1718
- fi
1719
-
1720
- if ! type gh_checks_stage_update >/dev/null 2>&1; then
1721
- return
1722
- fi
1723
-
1724
- local ids_file="${ARTIFACTS_DIR:-/dev/null}/check-run-ids.json"
1725
- [[ -f "$ids_file" ]] || return
1726
-
1727
- local stage
1728
- while IFS= read -r stage; do
1729
- [[ -z "$stage" ]] && continue
1730
- gh_checks_stage_update "$stage" "completed" "cancelled" "Pipeline interrupted" 2>/dev/null || true
1731
- done < <(jq -r 'keys[]' "$ids_file" 2>/dev/null || true)
1732
- }
1733
-
1734
- # ─── Worktree Isolation ───────────────────────────────────────────────────
1735
- # Creates a git worktree for parallel-safe pipeline execution
1736
-
1737
- pipeline_setup_worktree() {
1738
- local worktree_base=".worktrees"
1739
- local name="${WORKTREE_NAME}"
1740
-
1741
- # Auto-generate name from issue number or timestamp
1742
- if [[ -z "$name" ]]; then
1743
- if [[ -n "${ISSUE_NUMBER:-}" ]]; then
1744
- name="pipeline-issue-${ISSUE_NUMBER}"
1745
- else
1746
- name="pipeline-$(date +%s)"
1747
- fi
1748
- fi
1749
-
1750
- local worktree_path="${worktree_base}/${name}"
1751
- local branch_name="pipeline/${name}"
1752
-
1753
- info "Setting up worktree: ${DIM}${worktree_path}${RESET}"
1754
-
1755
- # Ensure worktree base exists
1756
- mkdir -p "$worktree_base"
1757
-
1758
- # Remove stale worktree if it exists
1759
- if [[ -d "$worktree_path" ]]; then
1760
- warn "Worktree already exists — removing: ${worktree_path}"
1761
- git worktree remove --force "$worktree_path" 2>/dev/null || rm -rf "$worktree_path"
1762
- fi
1763
-
1764
- # Delete stale branch if it exists
1765
- git branch -D "$branch_name" 2>/dev/null || true
1766
-
1767
- # Create worktree with new branch from current HEAD
1768
- git worktree add -b "$branch_name" "$worktree_path" HEAD
1769
-
1770
- # Store original dir for cleanup, then cd into worktree
1771
- ORIGINAL_REPO_DIR="$(pwd)"
1772
- cd "$worktree_path" || { error "Failed to cd into worktree: $worktree_path"; return 1; }
1773
- CLEANUP_WORKTREE=true
1774
-
1775
- success "Worktree ready: ${CYAN}${worktree_path}${RESET} (branch: ${branch_name})"
1776
- }
1777
-
1778
- pipeline_cleanup_worktree() {
1779
- if [[ "${CLEANUP_WORKTREE:-false}" != "true" ]]; then
1780
- return
1781
- fi
1782
-
1783
- local worktree_path
1784
- worktree_path="$(pwd)"
1785
-
1786
- if [[ -n "${ORIGINAL_REPO_DIR:-}" && "$worktree_path" != "$ORIGINAL_REPO_DIR" ]]; then
1787
- cd "$ORIGINAL_REPO_DIR" 2>/dev/null || cd /
1788
- # Only clean up worktree on success — preserve on failure for inspection
1789
- if [[ "${PIPELINE_EXIT_CODE:-1}" -eq 0 ]]; then
1790
- info "Cleaning up worktree: ${DIM}${worktree_path}${RESET}"
1791
- # Extract branch name before removing worktree
1792
- local _wt_branch=""
1793
- _wt_branch=$(git worktree list --porcelain 2>/dev/null | grep -A1 "worktree ${worktree_path}$" | grep "^branch " | sed 's|^branch refs/heads/||' || true)
1794
- git worktree remove --force "$worktree_path" 2>/dev/null || true
1795
- # Clean up the local branch
1796
- if [[ -n "$_wt_branch" ]]; then
1797
- git branch -D "$_wt_branch" 2>/dev/null || true
1798
- fi
1799
- # Clean up the remote branch (if it was pushed)
1800
- if [[ -n "$_wt_branch" && "${NO_GITHUB:-}" != "true" ]]; then
1801
- git push origin --delete "$_wt_branch" 2>/dev/null || true
1802
- fi
1803
- else
1804
- warn "Pipeline failed — worktree preserved for inspection: ${DIM}${worktree_path}${RESET}"
1805
- warn "Clean up manually: ${DIM}git worktree remove --force ${worktree_path}${RESET}"
1806
- fi
1807
- fi
1808
- }
1809
-
1810
- # ─── Dry Run Mode ───────────────────────────────────────────────────────────
1811
- # Shows what would happen without executing
1812
- run_dry_run() {
1813
- echo ""
1814
- echo -e "${BLUE}${BOLD}━━━ Dry Run: Pipeline Validation ━━━${RESET}"
1815
- echo ""
1816
-
1817
- # Validate pipeline config
1818
- if [[ ! -f "$PIPELINE_CONFIG" ]]; then
1819
- error "Pipeline config not found: $PIPELINE_CONFIG"
1820
- return 1
1821
- fi
1822
-
1823
- # Validate JSON structure
1824
- local validate_json
1825
- validate_json=$(jq . "$PIPELINE_CONFIG" 2>/dev/null) || {
1826
- error "Pipeline config is not valid JSON: $PIPELINE_CONFIG"
1827
- return 1
1828
- }
1829
-
1830
- # Extract pipeline metadata
1831
- local pipeline_name stages_count enabled_stages gated_stages
1832
- pipeline_name=$(jq -r '.name // "unknown"' "$PIPELINE_CONFIG")
1833
- stages_count=$(jq '.stages | length' "$PIPELINE_CONFIG")
1834
- enabled_stages=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG")
1835
- gated_stages=$(jq '[.stages[] | select(.enabled == true and .gate == "approve")] | length' "$PIPELINE_CONFIG")
1836
-
1837
- # Build model (per-stage override or default)
1838
- local default_model stage_model
1839
- default_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")
1840
- stage_model="$MODEL"
1841
- [[ -z "$stage_model" ]] && stage_model="$default_model"
1842
-
1843
- echo -e " ${BOLD}Pipeline:${RESET} $pipeline_name"
1844
- echo -e " ${BOLD}Stages:${RESET} $enabled_stages enabled of $stages_count total"
1845
- if [[ "$SKIP_GATES" == "true" ]]; then
1846
- echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
1847
- else
1848
- echo -e " ${BOLD}Gates:${RESET} $gated_stages approval gate(s)"
1849
- fi
1850
- echo -e " ${BOLD}Model:${RESET} $stage_model"
1851
- echo ""
1852
-
1853
- # Table header
1854
- echo -e "${CYAN}${BOLD}Stage Enabled Gate Model${RESET}"
1855
- echo -e "${CYAN}────────────────────────────────────────${RESET}"
1856
-
1857
- # List all stages
1858
- while IFS= read -r stage_json; do
1859
- local stage_id stage_enabled stage_gate stage_config_model stage_model_display
1860
- stage_id=$(echo "$stage_json" | jq -r '.id')
1861
- stage_enabled=$(echo "$stage_json" | jq -r '.enabled')
1862
- stage_gate=$(echo "$stage_json" | jq -r '.gate')
1863
-
1864
- # Determine stage model (config override or default)
1865
- stage_config_model=$(echo "$stage_json" | jq -r '.config.model // ""')
1866
- if [[ -n "$stage_config_model" && "$stage_config_model" != "null" ]]; then
1867
- stage_model_display="$stage_config_model"
1868
- else
1869
- stage_model_display="$default_model"
1870
- fi
1871
-
1872
- # Format enabled
1873
- local enabled_str
1874
- if [[ "$stage_enabled" == "true" ]]; then
1875
- enabled_str="${GREEN}yes${RESET}"
1876
- else
1877
- enabled_str="${DIM}no${RESET}"
1878
- fi
1879
-
1880
- # Format gate
1881
- local gate_str
1882
- if [[ "$stage_enabled" == "true" ]]; then
1883
- if [[ "$stage_gate" == "approve" ]]; then
1884
- gate_str="${YELLOW}approve${RESET}"
1885
- else
1886
- gate_str="${GREEN}auto${RESET}"
1887
- fi
1888
- else
1889
- gate_str="${DIM}—${RESET}"
1890
- fi
1891
-
1892
- printf "%-15s %s %s %s\n" "$stage_id" "$enabled_str" "$gate_str" "$stage_model_display"
1893
- done < <(jq -c '.stages[]' "$PIPELINE_CONFIG")
1894
-
1895
- echo ""
1896
-
1897
- # Validate required tools
1898
- echo -e "${BLUE}${BOLD}━━━ Tool Validation ━━━${RESET}"
1899
- echo ""
1900
-
1901
- local tool_errors=0
1902
- local required_tools=("git" "jq")
1903
- local optional_tools=("gh" "claude" "bc")
1904
-
1905
- for tool in "${required_tools[@]}"; do
1906
- if command -v "$tool" >/dev/null 2>&1; then
1907
- echo -e " ${GREEN}✓${RESET} $tool"
1908
- else
1909
- echo -e " ${RED}✗${RESET} $tool ${RED}(required)${RESET}"
1910
- tool_errors=$((tool_errors + 1))
1911
- fi
1912
- done
1913
-
1914
- for tool in "${optional_tools[@]}"; do
1915
- if command -v "$tool" >/dev/null 2>&1; then
1916
- echo -e " ${GREEN}✓${RESET} $tool"
1917
- else
1918
- echo -e " ${DIM}○${RESET} $tool"
1919
- fi
1920
- done
1921
-
1922
- echo ""
1923
-
1924
- # Cost estimation: use historical averages from past pipelines when available
1925
- echo -e "${BLUE}${BOLD}━━━ Estimated Resource Usage ━━━${RESET}"
1926
- echo ""
1927
-
1928
- local stages_json
1929
- stages_json=$(jq '[.stages[] | select(.enabled == true)]' "$PIPELINE_CONFIG" 2>/dev/null || echo "[]")
1930
- local est
1931
- est=$(estimate_pipeline_cost "$stages_json")
1932
- local input_tokens_estimate output_tokens_estimate
1933
- input_tokens_estimate=$(echo "$est" | jq -r '.input_tokens // 0')
1934
- output_tokens_estimate=$(echo "$est" | jq -r '.output_tokens // 0')
1935
-
1936
- # Calculate cost based on selected model
1937
- local input_rate output_rate input_cost output_cost total_cost
1938
- input_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.input // 3" 2>/dev/null || echo "3")
1939
- output_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.output // 15" 2>/dev/null || echo "15")
1940
-
1941
- # Cost calculation: tokens per million * rate
1942
- input_cost=$(awk -v tokens="$input_tokens_estimate" -v rate="$input_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
1943
- output_cost=$(awk -v tokens="$output_tokens_estimate" -v rate="$output_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
1944
- total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
1945
-
1946
- echo -e " ${BOLD}Estimated Input Tokens:${RESET} ~$input_tokens_estimate"
1947
- echo -e " ${BOLD}Estimated Output Tokens:${RESET} ~$output_tokens_estimate"
1948
- echo -e " ${BOLD}Model Cost Rate:${RESET} $stage_model"
1949
- echo -e " ${BOLD}Estimated Cost:${RESET} \$$total_cost USD"
1950
- echo ""
1951
-
1952
- # Validate composed pipeline if intelligence is enabled
1953
- if [[ -f "$ARTIFACTS_DIR/composed-pipeline.json" ]] && type composer_validate_pipeline >/dev/null 2>&1; then
1954
- echo -e "${BLUE}${BOLD}━━━ Intelligence-Composed Pipeline ━━━${RESET}"
1955
- echo ""
1956
-
1957
- if composer_validate_pipeline "$(cat "$ARTIFACTS_DIR/composed-pipeline.json" 2>/dev/null || echo "")" 2>/dev/null; then
1958
- echo -e " ${GREEN}✓${RESET} Composed pipeline is valid"
1959
- else
1960
- echo -e " ${YELLOW}⚠${RESET} Composed pipeline validation failed (will use template defaults)"
1961
- fi
1962
- echo ""
1963
- fi
1964
-
1965
- # Final validation result
1966
- if [[ "$tool_errors" -gt 0 ]]; then
1967
- error "Dry run validation failed: $tool_errors required tool(s) missing"
1968
- return 1
1969
- fi
1970
-
1971
- success "Dry run validation passed"
1972
- echo ""
1973
- echo -e " To execute this pipeline: ${DIM}remove --dry-run flag${RESET}"
1974
- echo ""
1975
- return 0
1976
- }
1977
-
1978
- # ─── Reasoning Trace Generation ──────────────────────────────────────────────
1979
- # Multi-step autonomous reasoning traces for pipeline start (before stages run)
1980
-
1981
- generate_reasoning_trace() {
1982
- local job_id="${SHIPWRIGHT_PIPELINE_ID:-$$}"
1983
- local issue="${ISSUE_NUMBER:-}"
1984
- local goal="${GOAL:-}"
1985
-
1986
- # Step 1: Analyze issue complexity and risk
1987
- local complexity="medium"
1988
- local risk_score=50
1989
- if [[ -n "$issue" ]] && type intelligence_analyze_issue >/dev/null 2>&1; then
1990
- local issue_json analysis
1991
- issue_json=$(gh issue view "$issue" --json number,title,body,labels 2>/dev/null || echo "{}")
1992
- if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
1993
- analysis=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "")
1994
- if [[ -n "$analysis" ]]; then
1995
- local comp_num
1996
- comp_num=$(echo "$analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
1997
- if [[ "$comp_num" -le 3 ]]; then
1998
- complexity="low"
1999
- elif [[ "$comp_num" -le 6 ]]; then
2000
- complexity="medium"
2001
- else
2002
- complexity="high"
2003
- fi
2004
- risk_score=$((100 - $(echo "$analysis" | jq -r '.success_probability // 50' 2>/dev/null || echo "50")))
2005
- fi
2006
- fi
2007
- elif [[ -n "$goal" ]]; then
2008
- issue_json=$(jq -n --arg title "${goal}" --arg body "" '{title: $title, body: $body, labels: []}')
2009
- if type intelligence_analyze_issue >/dev/null 2>&1; then
2010
- analysis=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "")
2011
- if [[ -n "$analysis" ]]; then
2012
- local comp_num
2013
- comp_num=$(echo "$analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
2014
- if [[ "$comp_num" -le 3 ]]; then complexity="low"; elif [[ "$comp_num" -le 6 ]]; then complexity="medium"; else complexity="high"; fi
2015
- risk_score=$((100 - $(echo "$analysis" | jq -r '.success_probability // 50' 2>/dev/null || echo "50")))
2016
- fi
2017
- fi
2018
- fi
2019
-
2020
- # Step 2: Query similar past issues
2021
- local similar_context=""
2022
- if type memory_semantic_search >/dev/null 2>&1 && [[ -n "$goal" ]]; then
2023
- similar_context=$(memory_semantic_search "$goal" "" 3 2>/dev/null || echo "")
2024
- fi
2025
-
2026
- # Step 3: Select template using Thompson sampling
2027
- local selected_template="${PIPELINE_TEMPLATE:-}"
2028
- if [[ -z "$selected_template" ]] && type thompson_select_template >/dev/null 2>&1; then
2029
- selected_template=$(thompson_select_template "$complexity" 2>/dev/null || echo "standard")
2030
- fi
2031
- [[ -z "$selected_template" ]] && selected_template="standard"
2032
-
2033
- # Step 4: Predict failure modes from memory
2034
- local failure_predictions=""
2035
- if type memory_semantic_search >/dev/null 2>&1 && [[ -n "$goal" ]]; then
2036
- failure_predictions=$(memory_semantic_search "failure error $goal" "" 3 2>/dev/null || echo "")
2037
- fi
2038
-
2039
- # Save reasoning traces to DB
2040
- if type db_save_reasoning_trace >/dev/null 2>&1; then
2041
- db_save_reasoning_trace "$job_id" "complexity_analysis" \
2042
- "issue=$issue goal=$goal" \
2043
- "Analyzed complexity=$complexity risk=$risk_score" \
2044
- "complexity=$complexity risk_score=$risk_score" 0.7 2>/dev/null || true
2045
-
2046
- db_save_reasoning_trace "$job_id" "template_selection" \
2047
- "complexity=$complexity historical_outcomes" \
2048
- "Thompson sampling over historical success rates" \
2049
- "template=$selected_template" 0.8 2>/dev/null || true
2050
-
2051
- if [[ -n "$similar_context" && "$similar_context" != "[]" ]]; then
2052
- db_save_reasoning_trace "$job_id" "similar_issues" \
2053
- "$goal" \
2054
- "Found similar past issues for context injection" \
2055
- "$similar_context" 0.6 2>/dev/null || true
2056
- fi
2057
-
2058
- if [[ -n "$failure_predictions" && "$failure_predictions" != "[]" ]]; then
2059
- db_save_reasoning_trace "$job_id" "failure_prediction" \
2060
- "$goal" \
2061
- "Predicted potential failure modes from history" \
2062
- "$failure_predictions" 0.5 2>/dev/null || true
2063
- fi
2064
- fi
2065
-
2066
- # Export for use by pipeline stages
2067
- [[ -n "$selected_template" && -z "${PIPELINE_TEMPLATE:-}" ]] && export PIPELINE_TEMPLATE="$selected_template"
2068
-
2069
- emit_event "reasoning.trace" "job_id=$job_id" "complexity=$complexity" "risk=$risk_score" "template=${selected_template:-standard}" 2>/dev/null || true
2070
- }
2071
-
2072
- # ─── Subcommands ────────────────────────────────────────────────────────────
2073
-
2074
- pipeline_start() {
2075
- # Handle --repo flag: change to directory before running
2076
- if [[ -n "$REPO_OVERRIDE" ]]; then
2077
- if [[ ! -d "$REPO_OVERRIDE" ]]; then
2078
- error "Directory does not exist: $REPO_OVERRIDE"
2079
- exit 1
2080
- fi
2081
- if ! cd "$REPO_OVERRIDE" 2>/dev/null; then
2082
- error "Cannot cd to: $REPO_OVERRIDE"
2083
- exit 1
2084
- fi
2085
- if ! git rev-parse --show-toplevel >/dev/null 2>&1; then
2086
- error "Not a git repository: $REPO_OVERRIDE"
2087
- exit 1
2088
- fi
2089
- ORIGINAL_REPO_DIR="$(pwd)"
2090
- info "Using repository: $ORIGINAL_REPO_DIR"
2091
- fi
2092
-
2093
- # Bootstrap optimization & memory if cold start (before first intelligence use)
2094
- if [[ -f "$SCRIPT_DIR/lib/bootstrap.sh" ]]; then
2095
- source "$SCRIPT_DIR/lib/bootstrap.sh"
2096
- [[ ! -f "$HOME/.shipwright/optimization/iteration-model.json" ]] && bootstrap_optimization 2>/dev/null || true
2097
- [[ ! -f "$HOME/.shipwright/memory/patterns.json" ]] && bootstrap_memory 2>/dev/null || true
2098
- fi
2099
-
2100
- if [[ -z "$GOAL" && -z "$ISSUE_NUMBER" ]]; then
2101
- error "Must provide --goal or --issue"
2102
- echo -e " Example: ${DIM}shipwright pipeline start --goal \"Add JWT auth\"${RESET}"
2103
- echo -e " Example: ${DIM}shipwright pipeline start --issue 123${RESET}"
2104
- exit 1
2105
- fi
2106
-
2107
- if ! command -v jq >/dev/null 2>&1; then
2108
- error "jq is required. Install it: brew install jq"
2109
- exit 1
2110
- fi
2111
-
2112
- # Set up worktree isolation if requested
2113
- if [[ "$AUTO_WORKTREE" == "true" ]]; then
2114
- pipeline_setup_worktree
2115
- fi
2116
-
2117
- # Register worktree cleanup on exit (chain with existing cleanup)
2118
- if [[ "$CLEANUP_WORKTREE" == "true" ]]; then
2119
- trap 'pipeline_cleanup_worktree; cleanup_on_exit' SIGINT SIGTERM
2120
- trap 'pipeline_cleanup_worktree; cleanup_on_exit' EXIT
2121
- fi
2122
-
2123
- setup_dirs
2124
-
2125
- # Acquire durable lock to prevent concurrent pipelines on the same issue/goal
2126
- _PIPELINE_LOCK_ID=""
2127
- if type acquire_lock >/dev/null 2>&1; then
2128
- _PIPELINE_LOCK_ID="pipeline-${ISSUE_NUMBER:-goal-$$}"
2129
- if ! acquire_lock "$_PIPELINE_LOCK_ID" 5 2>/dev/null; then
2130
- error "Another pipeline is already running for this issue/goal"
2131
- echo -e " Wait for it to finish, or remove stale lock:"
2132
- echo -e " ${DIM}rm -rf ~/.shipwright/durable/locks/${_PIPELINE_LOCK_ID}.lock${RESET}"
2133
- _PIPELINE_LOCK_ID=""
2134
- exit 1
2135
- fi
2136
- fi
2137
-
2138
- # Generate reasoning trace (complexity analysis, template selection, failure predictions)
2139
- local user_specified_pipeline="$PIPELINE_NAME"
2140
- generate_reasoning_trace 2>/dev/null || true
2141
- if [[ -n "${PIPELINE_TEMPLATE:-}" && "$user_specified_pipeline" == "standard" ]]; then
2142
- PIPELINE_NAME="$PIPELINE_TEMPLATE"
2143
- fi
2144
-
2145
- # Check for existing pipeline
2146
- if [[ -f "$STATE_FILE" ]]; then
2147
- local existing_status
2148
- existing_status=$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)
2149
- if [[ "$existing_status" == "running" || "$existing_status" == "paused" || "$existing_status" == "interrupted" ]]; then
2150
- warn "A pipeline is already in progress (status: $existing_status)"
2151
- echo -e " Resume it: ${DIM}shipwright pipeline resume${RESET}"
2152
- echo -e " Abort it: ${DIM}shipwright pipeline abort${RESET}"
2153
- exit 1
2154
- fi
2155
- fi
2156
-
2157
- # Pre-flight checks
2158
- preflight_checks || exit 1
2159
-
2160
- # Initialize GitHub integration
2161
- gh_init
2162
-
2163
- load_pipeline_config
2164
-
2165
- # Checkpoint resume: when --resume is passed, try DB first, then file-based
2166
- checkpoint_stage=""
2167
- checkpoint_iteration=0
2168
- if $RESUME_FROM_CHECKPOINT && type db_load_checkpoint >/dev/null 2>&1; then
2169
- local saved_checkpoint
2170
- saved_checkpoint=$(db_load_checkpoint "pipeline-${SHIPWRIGHT_PIPELINE_ID:-$$}" 2>/dev/null || echo "")
2171
- if [[ -n "$saved_checkpoint" ]]; then
2172
- checkpoint_stage=$(echo "$saved_checkpoint" | jq -r '.stage // ""' 2>/dev/null || echo "")
2173
- if [[ -n "$checkpoint_stage" ]]; then
2174
- info "Resuming from DB checkpoint: stage=$checkpoint_stage"
2175
- checkpoint_iteration=$(echo "$saved_checkpoint" | jq -r '.iteration // 0' 2>/dev/null || echo "0")
2176
- # Build COMPLETED_STAGES: all enabled stages before checkpoint_stage
2177
- local enabled_list before_list=""
2178
- enabled_list=$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" 2>/dev/null) || true
2179
- local s
2180
- while IFS= read -r s; do
2181
- [[ -z "$s" ]] && continue
2182
- if [[ "$s" == "$checkpoint_stage" ]]; then
2183
- break
2184
- fi
2185
- [[ -n "$before_list" ]] && before_list="${before_list},${s}" || before_list="$s"
2186
- done <<< "$enabled_list"
2187
- if [[ -n "$before_list" ]]; then
2188
- COMPLETED_STAGES="${before_list}"
2189
- SELF_HEAL_COUNT="${checkpoint_iteration}"
2190
- fi
2191
- fi
2192
- fi
2193
- fi
2194
- if $RESUME_FROM_CHECKPOINT && [[ -z "$checkpoint_stage" ]] && [[ -d "${ARTIFACTS_DIR}/checkpoints" ]]; then
2195
- local cp_dir="${ARTIFACTS_DIR}/checkpoints"
2196
- local latest_cp="" latest_mtime=0
2197
- local f
2198
- for f in "$cp_dir"/*-checkpoint.json; do
2199
- [[ -f "$f" ]] || continue
2200
- local mtime
2201
- mtime=$(file_mtime "$f" 2>/dev/null || echo "0")
2202
- if [[ "${mtime:-0}" -gt "$latest_mtime" ]]; then
2203
- latest_mtime="${mtime}"
2204
- latest_cp="$f"
2205
- fi
2206
- done
2207
- if [[ -n "$latest_cp" && -x "$SCRIPT_DIR/sw-checkpoint.sh" ]]; then
2208
- checkpoint_stage="$(basename "$latest_cp" -checkpoint.json)"
2209
- local cp_json
2210
- cp_json="$("$SCRIPT_DIR/sw-checkpoint.sh" restore --stage "$checkpoint_stage" 2>/dev/null)" || true
2211
- if [[ -n "$cp_json" ]] && command -v jq >/dev/null 2>&1; then
2212
- checkpoint_iteration="$(echo "$cp_json" | jq -r '.iteration // 0' 2>/dev/null)" || checkpoint_iteration=0
2213
- info "Checkpoint resume: stage=${checkpoint_stage} iteration=${checkpoint_iteration}"
2214
- # Build COMPLETED_STAGES: all enabled stages before checkpoint_stage
2215
- local enabled_list before_list=""
2216
- enabled_list="$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" 2>/dev/null)" || true
2217
- local s
2218
- while IFS= read -r s; do
2219
- [[ -z "$s" ]] && continue
2220
- if [[ "$s" == "$checkpoint_stage" ]]; then
2221
- break
2222
- fi
2223
- [[ -n "$before_list" ]] && before_list="${before_list},${s}" || before_list="$s"
2224
- done <<< "$enabled_list"
2225
- if [[ -n "$before_list" ]]; then
2226
- COMPLETED_STAGES="${before_list}"
2227
- SELF_HEAL_COUNT="${checkpoint_iteration}"
2228
- fi
2229
- fi
2230
- fi
2231
- fi
2232
-
2233
- # Restore from state file if resuming (failed/interrupted pipeline); else initialize fresh
2234
- if $RESUME_FROM_CHECKPOINT && [[ -f "$STATE_FILE" ]]; then
2235
- local existing_status
2236
- existing_status="$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)"
2237
- if [[ "$existing_status" == "failed" || "$existing_status" == "interrupted" ]]; then
2238
- resume_state
2239
- else
2240
- initialize_state
2241
- fi
2242
- else
2243
- initialize_state
2244
- fi
2245
-
2246
- # CI resume: restore branch + goal context when intake is skipped
2247
- if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "intake"; then
2248
- # Intake was completed in a previous run — restore context
2249
- # The workflow merges the partial work branch, so code changes are on HEAD
2250
-
2251
- # Restore GOAL from issue if not already set
2252
- if [[ -z "$GOAL" && -n "$ISSUE_NUMBER" ]]; then
2253
- GOAL=$(_timeout "$(_config_get_int "network.gh_timeout" 30 2>/dev/null || echo 30)" gh issue view "$ISSUE_NUMBER" --json title -q .title 2>/dev/null || echo "Issue #${ISSUE_NUMBER}")
2254
- info "CI resume: goal from issue — ${GOAL}"
2255
- fi
2256
-
2257
- # Restore branch context
2258
- if [[ -z "$GIT_BRANCH" ]]; then
2259
- local ci_branch="ci/issue-${ISSUE_NUMBER}"
2260
- info "CI resume: creating branch ${ci_branch} from current HEAD"
2261
- git checkout -b "$ci_branch" 2>/dev/null || git checkout "$ci_branch" 2>/dev/null || true
2262
- GIT_BRANCH="$ci_branch"
2263
- elif [[ "$(git branch --show-current 2>/dev/null)" != "$GIT_BRANCH" ]]; then
2264
- info "CI resume: checking out branch ${GIT_BRANCH}"
2265
- git checkout -b "$GIT_BRANCH" 2>/dev/null || git checkout "$GIT_BRANCH" 2>/dev/null || true
2266
- fi
2267
- write_state 2>/dev/null || true
2268
- fi
2269
-
2270
- echo ""
2271
- echo -e "${PURPLE}${BOLD}╔═══════════════════════════════════════════════════════════════════╗${RESET}"
2272
- echo -e "${PURPLE}${BOLD}║ shipwright pipeline — Autonomous Feature Delivery ║${RESET}"
2273
- echo -e "${PURPLE}${BOLD}╚═══════════════════════════════════════════════════════════════════╝${RESET}"
2274
- echo ""
2275
-
2276
- # Comprehensive environment summary
2277
- if [[ -n "$GOAL" ]]; then
2278
- echo -e " ${BOLD}Goal:${RESET} $GOAL"
2279
- fi
2280
- if [[ -n "$ISSUE_NUMBER" ]]; then
2281
- echo -e " ${BOLD}Issue:${RESET} #$ISSUE_NUMBER"
2282
- fi
2283
-
2284
- echo -e " ${BOLD}Pipeline:${RESET} $PIPELINE_NAME"
2285
-
2286
- local enabled_stages
2287
- enabled_stages=$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" | tr '\n' ' ')
2288
- echo -e " ${BOLD}Stages:${RESET} $enabled_stages"
2289
-
2290
- local gate_count
2291
- gate_count=$(jq '[.stages[] | select(.gate == "approve" and .enabled == true)] | length' "$PIPELINE_CONFIG")
2292
- if [[ "$HEADLESS" == "true" ]]; then
2293
- echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (headless — non-interactive stdin detected)${RESET}"
2294
- elif [[ "$SKIP_GATES" == "true" ]]; then
2295
- echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
2296
- else
2297
- echo -e " ${BOLD}Gates:${RESET} ${gate_count} approval gate(s)"
2298
- fi
2299
-
2300
- echo -e " ${BOLD}Model:${RESET} ${MODEL:-$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")}"
2301
- echo -e " ${BOLD}Self-heal:${RESET} ${BUILD_TEST_RETRIES} retry cycle(s)"
2302
-
2303
- if [[ "$GH_AVAILABLE" == "true" ]]; then
2304
- echo -e " ${BOLD}GitHub:${RESET} ${GREEN}✓${RESET} ${DIM}${REPO_OWNER}/${REPO_NAME}${RESET}"
2305
- else
2306
- echo -e " ${BOLD}GitHub:${RESET} ${DIM}disabled${RESET}"
2307
- fi
2308
-
2309
- if [[ -n "$SLACK_WEBHOOK" ]]; then
2310
- echo -e " ${BOLD}Slack:${RESET} ${GREEN}✓${RESET} notifications enabled"
2311
- fi
2312
-
2313
- echo ""
2314
-
2315
- if [[ "$DRY_RUN" == "true" ]]; then
2316
- run_dry_run
2317
- return $?
2318
- fi
2319
-
2320
- # Capture predictions for feedback loop (intelligence → actuals → learning)
2321
- if type intelligence_analyze_issue >/dev/null 2>&1 && (type intelligence_estimate_iterations >/dev/null 2>&1 || type intelligence_predict_cost >/dev/null 2>&1); then
2322
- local issue_json="${INTELLIGENCE_ANALYSIS:-}"
2323
- if [[ -z "$issue_json" || "$issue_json" == "{}" ]]; then
2324
- if [[ -n "$ISSUE_NUMBER" ]]; then
2325
- issue_json=$(gh issue view "$ISSUE_NUMBER" --json number,title,body,labels 2>/dev/null || echo "{}")
2326
- else
2327
- issue_json=$(jq -n --arg title "${GOAL:-untitled}" --arg body "" '{title: $title, body: $body, labels: []}')
2328
- fi
2329
- if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
2330
- issue_json=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "{}")
2331
- fi
2332
- fi
2333
- if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
2334
- if type intelligence_estimate_iterations >/dev/null 2>&1; then
2335
- PREDICTED_ITERATIONS=$(intelligence_estimate_iterations "$issue_json" "" 2>/dev/null || echo "")
2336
- export PREDICTED_ITERATIONS
2337
- fi
2338
- if type intelligence_predict_cost >/dev/null 2>&1; then
2339
- local cost_json
2340
- cost_json=$(intelligence_predict_cost "$issue_json" "{}" 2>/dev/null || echo "{}")
2341
- PREDICTED_COST=$(echo "$cost_json" | jq -r '.estimated_cost_usd // empty' 2>/dev/null || echo "")
2342
- export PREDICTED_COST
2343
- fi
2344
- fi
2345
- fi
2346
-
2347
- # Start background heartbeat writer
2348
- start_heartbeat
2349
-
2350
- # Initialize GitHub Check Runs for all pipeline stages
2351
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_pipeline_start >/dev/null 2>&1; then
2352
- local head_sha
2353
- head_sha=$(git rev-parse HEAD 2>/dev/null || echo "")
2354
- if [[ -n "$head_sha" && -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2355
- local stages_json
2356
- stages_json=$(jq -c '[.stages[] | select(.enabled == true) | .id]' "$PIPELINE_CONFIG" 2>/dev/null || echo '[]')
2357
- gh_checks_pipeline_start "$REPO_OWNER" "$REPO_NAME" "$head_sha" "$stages_json" >/dev/null 2>/dev/null || true
2358
- info "GitHub Checks: created check runs for pipeline stages"
2359
- fi
2360
- fi
2361
-
2362
- # Send start notification
2363
- notify "Pipeline Started" "Goal: ${GOAL}\nPipeline: ${PIPELINE_NAME}" "info"
2364
-
2365
- emit_event "pipeline.started" \
2366
- "issue=${ISSUE_NUMBER:-0}" \
2367
- "template=${PIPELINE_NAME}" \
2368
- "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
2369
- "machine=$(hostname 2>/dev/null || echo "unknown")" \
2370
- "pipeline=${PIPELINE_NAME}" \
2371
- "model=${MODEL:-opus}" \
2372
- "goal=${GOAL}"
2373
-
2374
- # Record pipeline run in SQLite for dashboard visibility
2375
- if type add_pipeline_run >/dev/null 2>&1; then
2376
- add_pipeline_run "${SHIPWRIGHT_PIPELINE_ID}" "${ISSUE_NUMBER:-0}" "${GOAL}" "${BRANCH:-}" "${PIPELINE_NAME}" 2>/dev/null || true
2377
- fi
2378
-
2379
- # Durable WAL: publish pipeline start event
2380
- if type publish_event >/dev/null 2>&1; then
2381
- publish_event "pipeline.started" "{\"issue\":\"${ISSUE_NUMBER:-0}\",\"pipeline\":\"${PIPELINE_NAME}\",\"goal\":\"${GOAL:0:200}\"}" 2>/dev/null || true
2382
- fi
2383
-
2384
- run_pipeline
2385
- local exit_code=$?
2386
- PIPELINE_EXIT_CODE="$exit_code"
2387
-
2388
- # Compute total cost for pipeline.completed (prefer actual from Claude when available)
2389
- local model_key="${MODEL:-sonnet}"
2390
- local total_cost
2391
- if [[ -n "${TOTAL_COST_USD:-}" && "${TOTAL_COST_USD}" != "0" && "${TOTAL_COST_USD}" != "null" ]]; then
2392
- total_cost="${TOTAL_COST_USD}"
2393
- else
2394
- local input_cost output_cost
2395
- input_cost=$(awk -v tokens="$TOTAL_INPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.input // 3")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
2396
- output_cost=$(awk -v tokens="$TOTAL_OUTPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.output // 15")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
2397
- total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
2398
- fi
2399
-
2400
- # Send completion notification + event
2401
- local total_dur_s=""
2402
- [[ -n "$PIPELINE_START_EPOCH" ]] && total_dur_s=$(( $(now_epoch) - PIPELINE_START_EPOCH ))
2403
- if [[ "$exit_code" -eq 0 ]]; then
2404
- local total_dur=""
2405
- [[ -n "$total_dur_s" ]] && total_dur=$(format_duration "$total_dur_s")
2406
- local pr_url
2407
- pr_url=$(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo "")
2408
- notify "Pipeline Complete" "Goal: ${GOAL}\nDuration: ${total_dur:-unknown}\nPR: ${pr_url:-N/A}" "success"
2409
- emit_event "pipeline.completed" \
2410
- "issue=${ISSUE_NUMBER:-0}" \
2411
- "result=success" \
2412
- "duration_s=${total_dur_s:-0}" \
2413
- "iterations=$((SELF_HEAL_COUNT + 1))" \
2414
- "template=${PIPELINE_NAME}" \
2415
- "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
2416
- "stages_passed=${PIPELINE_STAGES_PASSED:-0}" \
2417
- "slowest_stage=${PIPELINE_SLOWEST_STAGE:-}" \
2418
- "pr_url=${pr_url:-}" \
2419
- "agent_id=${PIPELINE_AGENT_ID}" \
2420
- "input_tokens=$TOTAL_INPUT_TOKENS" \
2421
- "output_tokens=$TOTAL_OUTPUT_TOKENS" \
2422
- "total_cost=$total_cost" \
2423
- "self_heal_count=$SELF_HEAL_COUNT"
2424
-
2425
- # Update pipeline run status in SQLite
2426
- if type update_pipeline_status >/dev/null 2>&1; then
2427
- update_pipeline_status "${SHIPWRIGHT_PIPELINE_ID}" "completed" "${PIPELINE_SLOWEST_STAGE:-}" "complete" "${total_dur_s:-0}" 2>/dev/null || true
2428
- fi
2429
-
2430
- # Auto-ingest pipeline outcome into recruit profiles
2431
- if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
2432
- bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
2433
- fi
2434
-
2435
- # Capture success patterns to memory (learn what works — parallel the failure path)
2436
- if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
2437
- bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
2438
- fi
2439
- # Update memory baselines with successful run metrics
2440
- if type memory_update_metrics >/dev/null 2>&1; then
2441
- memory_update_metrics "build_duration_s" "${total_dur_s:-0}" 2>/dev/null || true
2442
- memory_update_metrics "total_cost_usd" "${total_cost:-0}" 2>/dev/null || true
2443
- memory_update_metrics "iterations" "$((SELF_HEAL_COUNT + 1))" 2>/dev/null || true
2444
- fi
2445
-
2446
- # Record positive fix outcome if self-healing succeeded
2447
- if [[ "$SELF_HEAL_COUNT" -gt 0 && -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
2448
- local _success_sig
2449
- _success_sig=$(tail -30 "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//' || true)
2450
- if [[ -n "$_success_sig" ]]; then
2451
- bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_success_sig" "true" "true" 2>/dev/null || true
2452
- fi
2453
- fi
2454
- else
2455
- notify "Pipeline Failed" "Goal: ${GOAL}\nFailed at: ${CURRENT_STAGE_ID:-unknown}" "error"
2456
- emit_event "pipeline.completed" \
2457
- "issue=${ISSUE_NUMBER:-0}" \
2458
- "result=failure" \
2459
- "duration_s=${total_dur_s:-0}" \
2460
- "iterations=$((SELF_HEAL_COUNT + 1))" \
2461
- "template=${PIPELINE_NAME}" \
2462
- "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
2463
- "failed_stage=${CURRENT_STAGE_ID:-unknown}" \
2464
- "error_class=${LAST_STAGE_ERROR_CLASS:-unknown}" \
2465
- "agent_id=${PIPELINE_AGENT_ID}" \
2466
- "input_tokens=$TOTAL_INPUT_TOKENS" \
2467
- "output_tokens=$TOTAL_OUTPUT_TOKENS" \
2468
- "total_cost=$total_cost" \
2469
- "self_heal_count=$SELF_HEAL_COUNT"
2470
-
2471
- # Update pipeline run status in SQLite
2472
- if type update_pipeline_status >/dev/null 2>&1; then
2473
- update_pipeline_status "${SHIPWRIGHT_PIPELINE_ID}" "failed" "${CURRENT_STAGE_ID:-unknown}" "failed" "${total_dur_s:-0}" 2>/dev/null || true
2474
- fi
2475
-
2476
- # Auto-ingest pipeline outcome into recruit profiles
2477
- if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
2478
- bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
2479
- fi
2480
-
2481
- # Capture failure learnings to memory
2482
- if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
2483
- bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
2484
- bash "$SCRIPT_DIR/sw-memory.sh" analyze-failure "$ARTIFACTS_DIR/.claude-tokens-${CURRENT_STAGE_ID:-build}.log" "${CURRENT_STAGE_ID:-unknown}" 2>/dev/null || true
2485
-
2486
- # Record negative fix outcome — memory suggested a fix but it didn't resolve the issue
2487
- # This closes the negative side of the fix-outcome feedback loop
2488
- if [[ "$SELF_HEAL_COUNT" -gt 0 ]]; then
2489
- local _fail_sig
2490
- _fail_sig=$(tail -30 "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//' || true)
2491
- if [[ -n "$_fail_sig" ]]; then
2492
- bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_fail_sig" "true" "false" 2>/dev/null || true
2493
- fi
2494
- fi
2495
- fi
2496
- fi
2497
-
2498
- # ── Prediction Validation Events ──
2499
- # Compare predicted vs actual outcomes for feedback loop calibration
2500
- local pipeline_success="false"
2501
- [[ "$exit_code" -eq 0 ]] && pipeline_success="true"
2502
-
2503
- # Complexity prediction vs actual iterations
2504
- emit_event "prediction.validated" \
2505
- "issue=${ISSUE_NUMBER:-0}" \
2506
- "predicted_complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
2507
- "actual_iterations=$SELF_HEAL_COUNT" \
2508
- "success=$pipeline_success"
2509
-
2510
- # Close intelligence prediction feedback loop — validate predicted vs actual
2511
- if type intelligence_validate_prediction >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
2512
- intelligence_validate_prediction \
2513
- "$ISSUE_NUMBER" \
2514
- "${INTELLIGENCE_COMPLEXITY:-0}" \
2515
- "${SELF_HEAL_COUNT:-0}" \
2516
- "$pipeline_success" 2>/dev/null || true
2517
- fi
2518
-
2519
- # Validate iterations prediction against actuals (cost validation moved below after total_cost is computed)
2520
- local ACTUAL_ITERATIONS=$((SELF_HEAL_COUNT + 1))
2521
- if [[ -n "${PREDICTED_ITERATIONS:-}" ]] && type intelligence_validate_prediction >/dev/null 2>&1; then
2522
- intelligence_validate_prediction "iterations" "$PREDICTED_ITERATIONS" "$ACTUAL_ITERATIONS" 2>/dev/null || true
2523
- fi
2524
-
2525
- # Close predictive anomaly feedback loop — confirm whether flagged anomalies were real
2526
- if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
2527
- local _actual_failure="false"
2528
- [[ "$exit_code" -ne 0 ]] && _actual_failure="true"
2529
- # Confirm anomalies for build and test stages based on pipeline outcome
2530
- for _anomaly_stage in build test; do
2531
- bash "$SCRIPT_DIR/sw-predictive.sh" confirm-anomaly "$_anomaly_stage" "duration_s" "$_actual_failure" 2>/dev/null || true
2532
- done
2533
- fi
2534
-
2535
- # Template outcome tracking
2536
- emit_event "template.outcome" \
2537
- "issue=${ISSUE_NUMBER:-0}" \
2538
- "template=${PIPELINE_NAME}" \
2539
- "success=$pipeline_success" \
2540
- "duration_s=${total_dur_s:-0}" \
2541
- "complexity=${INTELLIGENCE_COMPLEXITY:-0}"
2542
-
2543
- # Risk prediction vs actual failure
2544
- local predicted_risk="${INTELLIGENCE_RISK_SCORE:-0}"
2545
- emit_event "risk.outcome" \
2546
- "issue=${ISSUE_NUMBER:-0}" \
2547
- "predicted_risk=$predicted_risk" \
2548
- "actual_failure=$([[ "$exit_code" -ne 0 ]] && echo "true" || echo "false")"
2549
-
2550
- # Per-stage model outcome events (read from stage timings)
2551
- local routing_log="${ARTIFACTS_DIR}/model-routing.log"
2552
- if [[ -f "$routing_log" ]]; then
2553
- while IFS='|' read -r s_stage s_model s_success; do
2554
- [[ -z "$s_stage" ]] && continue
2555
- emit_event "model.outcome" \
2556
- "issue=${ISSUE_NUMBER:-0}" \
2557
- "stage=$s_stage" \
2558
- "model=$s_model" \
2559
- "success=$s_success"
2560
- done < "$routing_log"
2561
- fi
2562
-
2563
- # Record pipeline outcome for model routing feedback loop
2564
- if type optimize_analyze_outcome >/dev/null 2>&1; then
2565
- optimize_analyze_outcome "$STATE_FILE" 2>/dev/null || true
2566
- fi
2567
-
2568
- # Auto-learn after pipeline completion (non-blocking)
2569
- if type optimize_tune_templates &>/dev/null; then
2570
- (
2571
- optimize_tune_templates 2>/dev/null
2572
- optimize_learn_iterations 2>/dev/null
2573
- optimize_route_models 2>/dev/null
2574
- optimize_learn_risk_keywords 2>/dev/null
2575
- ) &
2576
- fi
2577
-
2578
- if type memory_finalize_pipeline >/dev/null 2>&1; then
2579
- memory_finalize_pipeline "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
2580
- fi
2581
-
2582
- # Broadcast discovery for cross-pipeline learning
2583
- if type broadcast_discovery >/dev/null 2>&1; then
2584
- local _disc_result="failure"
2585
- [[ "$exit_code" -eq 0 ]] && _disc_result="success"
2586
- local _disc_files=""
2587
- _disc_files=$(git diff --name-only HEAD~1 HEAD 2>/dev/null | head -20 | tr '\n' ',' || true)
2588
- broadcast_discovery "pipeline_${_disc_result}" "${_disc_files:-unknown}" \
2589
- "Pipeline ${_disc_result} for issue #${ISSUE_NUMBER:-0} (${PIPELINE_NAME:-unknown} template, stage=${CURRENT_STAGE_ID:-unknown})" \
2590
- "${_disc_result}" 2>/dev/null || true
2591
- fi
2592
-
2593
- # Emit cost event — prefer actual cost from Claude CLI when available
2594
- local model_key="${MODEL:-sonnet}"
2595
- local total_cost
2596
- if [[ -n "${TOTAL_COST_USD:-}" && "${TOTAL_COST_USD}" != "0" && "${TOTAL_COST_USD}" != "null" ]]; then
2597
- total_cost="${TOTAL_COST_USD}"
2598
- else
2599
- # Fallback: estimate from token counts and model rates
2600
- local input_cost output_cost
2601
- input_cost=$(awk -v tokens="$TOTAL_INPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.input // 3")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
2602
- output_cost=$(awk -v tokens="$TOTAL_OUTPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.output // 15")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
2603
- total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
2604
- fi
2605
-
2606
- emit_event "pipeline.cost" \
2607
- "input_tokens=$TOTAL_INPUT_TOKENS" \
2608
- "output_tokens=$TOTAL_OUTPUT_TOKENS" \
2609
- "model=$model_key" \
2610
- "cost_usd=$total_cost"
2611
-
2612
- # Persist cost entry to costs.json + SQLite (was missing — tokens accumulated but never written)
2613
- if type cost_record >/dev/null 2>&1; then
2614
- cost_record "$TOTAL_INPUT_TOKENS" "$TOTAL_OUTPUT_TOKENS" "$model_key" "pipeline" "${ISSUE_NUMBER:-}" 2>/dev/null || true
2615
- fi
2616
-
2617
- # Record pipeline outcome for Thompson sampling / outcome-based learning
2618
- if type db_record_outcome >/dev/null 2>&1; then
2619
- local _outcome_success=0
2620
- [[ "$exit_code" -eq 0 ]] && _outcome_success=1
2621
- local _outcome_complexity="medium"
2622
- [[ "${INTELLIGENCE_COMPLEXITY:-5}" -le 3 ]] && _outcome_complexity="low"
2623
- [[ "${INTELLIGENCE_COMPLEXITY:-5}" -ge 7 ]] && _outcome_complexity="high"
2624
- db_record_outcome \
2625
- "${SHIPWRIGHT_PIPELINE_ID:-pipeline-$$-${ISSUE_NUMBER:-0}}" \
2626
- "${ISSUE_NUMBER:-}" \
2627
- "${PIPELINE_NAME:-standard}" \
2628
- "$_outcome_success" \
2629
- "${total_dur_s:-0}" \
2630
- "${SELF_HEAL_COUNT:-0}" \
2631
- "${total_cost:-0}" \
2632
- "$_outcome_complexity" 2>/dev/null || true
2633
- fi
2634
-
2635
- # Validate cost prediction against actual (after total_cost is computed)
2636
- if [[ -n "${PREDICTED_COST:-}" ]] && type intelligence_validate_prediction >/dev/null 2>&1; then
2637
- intelligence_validate_prediction "cost" "$PREDICTED_COST" "$total_cost" 2>/dev/null || true
2638
- fi
2639
-
2640
- return $exit_code
2641
- }
2642
-
2643
- pipeline_resume() {
2644
- setup_dirs
2645
- resume_state
2646
- echo ""
2647
- run_pipeline
2648
- }
2649
-
2650
- pipeline_status() {
2651
- setup_dirs
2652
-
2653
- if [[ ! -f "$STATE_FILE" ]]; then
2654
- info "No active pipeline."
2655
- echo -e " Start one: ${DIM}shipwright pipeline start --goal \"...\"${RESET}"
2656
- return
2657
- fi
2658
-
2659
- echo ""
2660
- echo -e "${PURPLE}${BOLD}━━━ Pipeline Status ━━━${RESET}"
2661
- echo ""
2662
-
2663
- local p_name="" p_goal="" p_status="" p_branch="" p_stage="" p_started="" p_issue="" p_elapsed="" p_pr=""
2664
- local in_frontmatter=false
2665
- while IFS= read -r line; do
2666
- if [[ "$line" == "---" ]]; then
2667
- if $in_frontmatter; then break; else in_frontmatter=true; continue; fi
2668
- fi
2669
- if $in_frontmatter; then
2670
- case "$line" in
2671
- pipeline:*) p_name="$(echo "${line#pipeline:}" | xargs)" ;;
2672
- goal:*) p_goal="$(echo "${line#goal:}" | sed 's/^ *"//;s/" *$//')" ;;
2673
- status:*) p_status="$(echo "${line#status:}" | xargs)" ;;
2674
- branch:*) p_branch="$(echo "${line#branch:}" | sed 's/^ *"//;s/" *$//')" ;;
2675
- current_stage:*) p_stage="$(echo "${line#current_stage:}" | xargs)" ;;
2676
- started_at:*) p_started="$(echo "${line#started_at:}" | xargs)" ;;
2677
- issue:*) p_issue="$(echo "${line#issue:}" | sed 's/^ *"//;s/" *$//')" ;;
2678
- elapsed:*) p_elapsed="$(echo "${line#elapsed:}" | xargs)" ;;
2679
- pr_number:*) p_pr="$(echo "${line#pr_number:}" | xargs)" ;;
2680
- esac
2681
- fi
2682
- done < "$STATE_FILE"
2683
-
2684
- local status_icon
2685
- case "$p_status" in
2686
- running) status_icon="${CYAN}●${RESET}" ;;
2687
- complete) status_icon="${GREEN}✓${RESET}" ;;
2688
- paused) status_icon="${YELLOW}⏸${RESET}" ;;
2689
- interrupted) status_icon="${YELLOW}⚡${RESET}" ;;
2690
- failed) status_icon="${RED}✗${RESET}" ;;
2691
- aborted) status_icon="${RED}◼${RESET}" ;;
2692
- *) status_icon="${DIM}○${RESET}" ;;
2693
- esac
2694
-
2695
- echo -e " ${BOLD}Pipeline:${RESET} $p_name"
2696
- echo -e " ${BOLD}Goal:${RESET} $p_goal"
2697
- echo -e " ${BOLD}Status:${RESET} $status_icon $p_status"
2698
- [[ -n "$p_branch" ]] && echo -e " ${BOLD}Branch:${RESET} $p_branch"
2699
- [[ -n "$p_issue" ]] && echo -e " ${BOLD}Issue:${RESET} $p_issue"
2700
- [[ -n "$p_pr" ]] && echo -e " ${BOLD}PR:${RESET} #$p_pr"
2701
- [[ -n "$p_stage" ]] && echo -e " ${BOLD}Stage:${RESET} $p_stage"
2702
- [[ -n "$p_started" ]] && echo -e " ${BOLD}Started:${RESET} $p_started"
2703
- [[ -n "$p_elapsed" ]] && echo -e " ${BOLD}Elapsed:${RESET} $p_elapsed"
2704
-
2705
- echo ""
2706
- echo -e " ${BOLD}Stages:${RESET}"
2707
-
2708
- local in_stages=false
2709
- while IFS= read -r line; do
2710
- if [[ "$line" == "stages:" ]]; then
2711
- in_stages=true; continue
2712
- fi
2713
- if $in_stages; then
2714
- if [[ "$line" == "---" || ! "$line" =~ ^" " ]]; then break; fi
2715
- local trimmed
2716
- trimmed="$(echo "$line" | xargs)"
2717
- if [[ "$trimmed" == *":"* ]]; then
2718
- local sid="${trimmed%%:*}"
2719
- local sst="${trimmed#*: }"
2720
- local s_icon
2721
- case "$sst" in
2722
- complete) s_icon="${GREEN}✓${RESET}" ;;
2723
- running) s_icon="${CYAN}●${RESET}" ;;
2724
- failed) s_icon="${RED}✗${RESET}" ;;
2725
- *) s_icon="${DIM}○${RESET}" ;;
2726
- esac
2727
- echo -e " $s_icon $sid"
2728
- fi
2729
- fi
2730
- done < "$STATE_FILE"
2731
-
2732
- if [[ -d "$ARTIFACTS_DIR" ]]; then
2733
- local artifact_count
2734
- artifact_count=$(find "$ARTIFACTS_DIR" -type f 2>/dev/null | wc -l | xargs)
2735
- if [[ "$artifact_count" -gt 0 ]]; then
2736
- echo ""
2737
- echo -e " ${BOLD}Artifacts:${RESET} ($artifact_count files)"
2738
- ls "$ARTIFACTS_DIR" 2>/dev/null | sed 's/^/ /'
2739
- fi
2740
- fi
2741
- echo ""
2742
- }
2743
-
2744
- pipeline_abort() {
2745
- setup_dirs
2746
-
2747
- if [[ ! -f "$STATE_FILE" ]]; then
2748
- info "No active pipeline to abort."
2749
- return
2750
- fi
2751
-
2752
- local current_status
2753
- current_status=$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)
2754
-
2755
- if [[ "$current_status" == "complete" || "$current_status" == "aborted" ]]; then
2756
- info "Pipeline already $current_status."
2757
- return
2758
- fi
2759
-
2760
- resume_state 2>/dev/null || true
2761
- PIPELINE_STATUS="aborted"
2762
- write_state
2763
-
2764
- # Update GitHub
2765
- if [[ -n "$ISSUE_NUMBER" ]]; then
2766
- gh_init
2767
- gh_remove_label "$ISSUE_NUMBER" "pipeline/in-progress"
2768
- gh_comment_issue "$ISSUE_NUMBER" "⏹️ **Pipeline aborted** at stage: ${CURRENT_STAGE:-unknown}"
2769
- fi
2770
-
2771
- warn "Pipeline aborted."
2772
- echo -e " State saved at: ${DIM}$STATE_FILE${RESET}"
2773
- }
2774
-
2775
- pipeline_list() {
2776
- local locations=(
2777
- "$REPO_DIR/templates/pipelines"
2778
- "$HOME/.shipwright/pipelines"
2779
- )
2780
-
2781
- echo ""
2782
- echo -e "${PURPLE}${BOLD}━━━ Pipeline Templates ━━━${RESET}"
2783
- echo ""
2784
-
2785
- local found=false
2786
- for dir in "${locations[@]}"; do
2787
- if [[ -d "$dir" ]]; then
2788
- for f in "$dir"/*.json; do
2789
- [[ -f "$f" ]] || continue
2790
- found=true
2791
- local name desc stages_enabled gate_count
2792
- name=$(jq -r '.name' "$f" 2>/dev/null)
2793
- desc=$(jq -r '.description' "$f" 2>/dev/null)
2794
- stages_enabled=$(jq -r '[.stages[] | select(.enabled == true) | .id] | join(" → ")' "$f" 2>/dev/null)
2795
- gate_count=$(jq '[.stages[] | select(.gate == "approve" and .enabled == true)] | length' "$f" 2>/dev/null)
2796
- echo -e " ${CYAN}${BOLD}$name${RESET}"
2797
- echo -e " $desc"
2798
- echo -e " ${DIM}$stages_enabled${RESET}"
2799
- echo -e " ${DIM}(${gate_count} approval gates)${RESET}"
2800
- echo ""
2801
- done
2802
- fi
2803
- done
2804
-
2805
- if [[ "$found" != "true" ]]; then
2806
- warn "No pipeline templates found."
2807
- echo -e " Expected at: ${DIM}templates/pipelines/*.json${RESET}"
2808
- fi
2809
- }
2810
-
2811
- pipeline_show() {
2812
- local name="${PIPELINE_NAME_ARG:-$PIPELINE_NAME}"
2813
-
2814
- local config_file
2815
- config_file=$(find_pipeline_config "$name") || {
2816
- error "Pipeline template not found: $name"
2817
- echo -e " Available: ${DIM}shipwright pipeline list${RESET}"
2818
- exit 1
2819
- }
2820
-
2821
- echo ""
2822
- echo -e "${PURPLE}${BOLD}━━━ Pipeline: $(jq -r '.name' "$config_file") ━━━${RESET}"
2823
- echo -e " $(jq -r '.description' "$config_file")"
2824
- echo ""
2825
-
2826
- echo -e "${BOLD} Defaults:${RESET}"
2827
- jq -r '.defaults | to_entries[] | " \(.key): \(.value)"' "$config_file" 2>/dev/null
2828
- echo ""
2829
-
2830
- echo -e "${BOLD} Stages:${RESET}"
2831
- jq -r '.stages[] |
2832
- (if .enabled then " ✓" else " ○" end) +
2833
- " \(.id)" +
2834
- (if .gate == "approve" then " [gate: approve]" elif .gate == "skip" then " [skip]" else "" end)
2835
- ' "$config_file" 2>/dev/null
2836
- echo ""
2837
-
2838
- echo -e "${BOLD} GitHub Integration:${RESET}"
2839
- echo -e " • Issue: self-assign, label lifecycle, progress comments"
2840
- echo -e " • PR: labels, milestone, reviewers auto-propagated"
2841
- echo -e " • Validation: auto-close issue on completion"
2842
- echo ""
2843
- }
2844
-
2845
246
  # ─── Main ───────────────────────────────────────────────────────────────────
2846
247
 
2847
248
  case "$SUBCOMMAND" in