shipwright-cli 3.2.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (279) hide show
  1. package/.claude/agents/code-reviewer.md +2 -0
  2. package/.claude/agents/devops-engineer.md +2 -0
  3. package/.claude/agents/doc-fleet-agent.md +2 -0
  4. package/.claude/agents/pipeline-agent.md +2 -0
  5. package/.claude/agents/shell-script-specialist.md +2 -0
  6. package/.claude/agents/test-specialist.md +2 -0
  7. package/.claude/hooks/agent-crash-capture.sh +32 -0
  8. package/.claude/hooks/post-tool-use.sh +3 -2
  9. package/.claude/hooks/pre-tool-use.sh +35 -3
  10. package/README.md +4 -4
  11. package/claude-code/hooks/config-change.sh +18 -0
  12. package/claude-code/hooks/instructions-reloaded.sh +7 -0
  13. package/claude-code/hooks/worktree-create.sh +25 -0
  14. package/claude-code/hooks/worktree-remove.sh +20 -0
  15. package/config/code-constitution.json +130 -0
  16. package/dashboard/middleware/auth.ts +134 -0
  17. package/dashboard/middleware/constants.ts +21 -0
  18. package/dashboard/public/index.html +2 -6
  19. package/dashboard/public/styles.css +100 -97
  20. package/dashboard/routes/auth.ts +38 -0
  21. package/dashboard/server.ts +66 -25
  22. package/dashboard/services/config.ts +26 -0
  23. package/dashboard/services/db.ts +118 -0
  24. package/dashboard/src/canvas/pixel-agent.ts +298 -0
  25. package/dashboard/src/canvas/pixel-sprites.ts +440 -0
  26. package/dashboard/src/canvas/shipyard-effects.ts +367 -0
  27. package/dashboard/src/canvas/shipyard-scene.ts +616 -0
  28. package/dashboard/src/canvas/submarine-layout.ts +267 -0
  29. package/dashboard/src/components/header.ts +8 -7
  30. package/dashboard/src/core/router.ts +1 -0
  31. package/dashboard/src/design/submarine-theme.ts +253 -0
  32. package/dashboard/src/main.ts +2 -0
  33. package/dashboard/src/types/api.ts +2 -1
  34. package/dashboard/src/views/activity.ts +2 -1
  35. package/dashboard/src/views/shipyard.ts +39 -0
  36. package/dashboard/types/index.ts +166 -0
  37. package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
  38. package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
  39. package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
  40. package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
  41. package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
  42. package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
  43. package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
  44. package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
  45. package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
  46. package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
  47. package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
  48. package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
  49. package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
  50. package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
  51. package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
  52. package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
  53. package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
  54. package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
  55. package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
  56. package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
  57. package/docs/research/RESEARCH_INDEX.md +439 -0
  58. package/docs/research/RESEARCH_SOURCES.md +440 -0
  59. package/docs/research/RESEARCH_SUMMARY.txt +275 -0
  60. package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
  61. package/package.json +2 -2
  62. package/scripts/lib/adaptive-model.sh +427 -0
  63. package/scripts/lib/adaptive-timeout.sh +316 -0
  64. package/scripts/lib/audit-trail.sh +309 -0
  65. package/scripts/lib/auto-recovery.sh +471 -0
  66. package/scripts/lib/bandit-selector.sh +431 -0
  67. package/scripts/lib/bootstrap.sh +104 -2
  68. package/scripts/lib/causal-graph.sh +455 -0
  69. package/scripts/lib/compat.sh +126 -0
  70. package/scripts/lib/compound-audit.sh +337 -0
  71. package/scripts/lib/constitutional.sh +454 -0
  72. package/scripts/lib/context-budget.sh +359 -0
  73. package/scripts/lib/convergence.sh +594 -0
  74. package/scripts/lib/cost-optimizer.sh +634 -0
  75. package/scripts/lib/daemon-adaptive.sh +10 -0
  76. package/scripts/lib/daemon-dispatch.sh +106 -17
  77. package/scripts/lib/daemon-failure.sh +34 -4
  78. package/scripts/lib/daemon-patrol.sh +23 -2
  79. package/scripts/lib/daemon-poll-github.sh +361 -0
  80. package/scripts/lib/daemon-poll-health.sh +299 -0
  81. package/scripts/lib/daemon-poll.sh +27 -611
  82. package/scripts/lib/daemon-state.sh +112 -66
  83. package/scripts/lib/daemon-triage.sh +10 -0
  84. package/scripts/lib/dod-scorecard.sh +442 -0
  85. package/scripts/lib/error-actionability.sh +300 -0
  86. package/scripts/lib/formal-spec.sh +461 -0
  87. package/scripts/lib/helpers.sh +177 -4
  88. package/scripts/lib/intent-analysis.sh +409 -0
  89. package/scripts/lib/loop-convergence.sh +350 -0
  90. package/scripts/lib/loop-iteration.sh +682 -0
  91. package/scripts/lib/loop-progress.sh +48 -0
  92. package/scripts/lib/loop-restart.sh +185 -0
  93. package/scripts/lib/memory-effectiveness.sh +506 -0
  94. package/scripts/lib/mutation-executor.sh +352 -0
  95. package/scripts/lib/outcome-feedback.sh +521 -0
  96. package/scripts/lib/pipeline-cli.sh +336 -0
  97. package/scripts/lib/pipeline-commands.sh +1216 -0
  98. package/scripts/lib/pipeline-detection.sh +100 -2
  99. package/scripts/lib/pipeline-execution.sh +897 -0
  100. package/scripts/lib/pipeline-github.sh +28 -3
  101. package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
  102. package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
  103. package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
  104. package/scripts/lib/pipeline-intelligence.sh +100 -1136
  105. package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
  106. package/scripts/lib/pipeline-quality-checks.sh +17 -715
  107. package/scripts/lib/pipeline-quality-gates.sh +563 -0
  108. package/scripts/lib/pipeline-stages-build.sh +730 -0
  109. package/scripts/lib/pipeline-stages-delivery.sh +965 -0
  110. package/scripts/lib/pipeline-stages-intake.sh +1133 -0
  111. package/scripts/lib/pipeline-stages-monitor.sh +407 -0
  112. package/scripts/lib/pipeline-stages-review.sh +1022 -0
  113. package/scripts/lib/pipeline-stages.sh +59 -2929
  114. package/scripts/lib/pipeline-state.sh +36 -5
  115. package/scripts/lib/pipeline-util.sh +487 -0
  116. package/scripts/lib/policy-learner.sh +438 -0
  117. package/scripts/lib/process-reward.sh +493 -0
  118. package/scripts/lib/project-detect.sh +649 -0
  119. package/scripts/lib/quality-profile.sh +334 -0
  120. package/scripts/lib/recruit-commands.sh +885 -0
  121. package/scripts/lib/recruit-learning.sh +739 -0
  122. package/scripts/lib/recruit-roles.sh +648 -0
  123. package/scripts/lib/reward-aggregator.sh +458 -0
  124. package/scripts/lib/rl-optimizer.sh +362 -0
  125. package/scripts/lib/root-cause.sh +427 -0
  126. package/scripts/lib/scope-enforcement.sh +445 -0
  127. package/scripts/lib/session-restart.sh +493 -0
  128. package/scripts/lib/skill-memory.sh +300 -0
  129. package/scripts/lib/skill-registry.sh +775 -0
  130. package/scripts/lib/spec-driven.sh +476 -0
  131. package/scripts/lib/test-helpers.sh +18 -7
  132. package/scripts/lib/test-holdout.sh +429 -0
  133. package/scripts/lib/test-optimizer.sh +511 -0
  134. package/scripts/shipwright-file-suggest.sh +45 -0
  135. package/scripts/skills/adversarial-quality.md +61 -0
  136. package/scripts/skills/api-design.md +44 -0
  137. package/scripts/skills/architecture-design.md +50 -0
  138. package/scripts/skills/brainstorming.md +43 -0
  139. package/scripts/skills/data-pipeline.md +44 -0
  140. package/scripts/skills/deploy-safety.md +64 -0
  141. package/scripts/skills/documentation.md +38 -0
  142. package/scripts/skills/frontend-design.md +45 -0
  143. package/scripts/skills/generated/.gitkeep +0 -0
  144. package/scripts/skills/generated/_refinements/.gitkeep +0 -0
  145. package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
  146. package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
  147. package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
  148. package/scripts/skills/generated/cli-version-management.md +29 -0
  149. package/scripts/skills/generated/collection-system-validation.md +99 -0
  150. package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
  151. package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
  152. package/scripts/skills/generated/test-parallelization-detection.md +65 -0
  153. package/scripts/skills/observability.md +79 -0
  154. package/scripts/skills/performance.md +48 -0
  155. package/scripts/skills/pr-quality.md +49 -0
  156. package/scripts/skills/product-thinking.md +43 -0
  157. package/scripts/skills/security-audit.md +49 -0
  158. package/scripts/skills/systematic-debugging.md +40 -0
  159. package/scripts/skills/testing-strategy.md +47 -0
  160. package/scripts/skills/two-stage-review.md +52 -0
  161. package/scripts/skills/validation-thoroughness.md +55 -0
  162. package/scripts/sw +9 -3
  163. package/scripts/sw-activity.sh +9 -2
  164. package/scripts/sw-adaptive.sh +2 -1
  165. package/scripts/sw-adversarial.sh +2 -1
  166. package/scripts/sw-architecture-enforcer.sh +3 -1
  167. package/scripts/sw-auth.sh +12 -2
  168. package/scripts/sw-autonomous.sh +5 -1
  169. package/scripts/sw-changelog.sh +4 -1
  170. package/scripts/sw-checkpoint.sh +2 -1
  171. package/scripts/sw-ci.sh +5 -1
  172. package/scripts/sw-cleanup.sh +4 -26
  173. package/scripts/sw-code-review.sh +10 -4
  174. package/scripts/sw-connect.sh +2 -1
  175. package/scripts/sw-context.sh +2 -1
  176. package/scripts/sw-cost.sh +48 -3
  177. package/scripts/sw-daemon.sh +66 -9
  178. package/scripts/sw-dashboard.sh +3 -1
  179. package/scripts/sw-db.sh +59 -16
  180. package/scripts/sw-decide.sh +8 -2
  181. package/scripts/sw-decompose.sh +360 -17
  182. package/scripts/sw-deps.sh +4 -1
  183. package/scripts/sw-developer-simulation.sh +4 -1
  184. package/scripts/sw-discovery.sh +325 -2
  185. package/scripts/sw-doc-fleet.sh +4 -1
  186. package/scripts/sw-docs-agent.sh +3 -1
  187. package/scripts/sw-docs.sh +2 -1
  188. package/scripts/sw-doctor.sh +453 -2
  189. package/scripts/sw-dora.sh +4 -1
  190. package/scripts/sw-durable.sh +4 -3
  191. package/scripts/sw-e2e-orchestrator.sh +17 -16
  192. package/scripts/sw-eventbus.sh +7 -1
  193. package/scripts/sw-evidence.sh +364 -12
  194. package/scripts/sw-feedback.sh +550 -9
  195. package/scripts/sw-fix.sh +20 -1
  196. package/scripts/sw-fleet-discover.sh +6 -2
  197. package/scripts/sw-fleet-viz.sh +4 -1
  198. package/scripts/sw-fleet.sh +5 -1
  199. package/scripts/sw-github-app.sh +16 -3
  200. package/scripts/sw-github-checks.sh +3 -2
  201. package/scripts/sw-github-deploy.sh +3 -2
  202. package/scripts/sw-github-graphql.sh +18 -7
  203. package/scripts/sw-guild.sh +5 -1
  204. package/scripts/sw-heartbeat.sh +5 -30
  205. package/scripts/sw-hello.sh +67 -0
  206. package/scripts/sw-hygiene.sh +6 -1
  207. package/scripts/sw-incident.sh +265 -1
  208. package/scripts/sw-init.sh +18 -2
  209. package/scripts/sw-instrument.sh +10 -2
  210. package/scripts/sw-intelligence.sh +42 -6
  211. package/scripts/sw-jira.sh +5 -1
  212. package/scripts/sw-launchd.sh +2 -1
  213. package/scripts/sw-linear.sh +4 -1
  214. package/scripts/sw-logs.sh +4 -1
  215. package/scripts/sw-loop.sh +432 -1128
  216. package/scripts/sw-memory.sh +356 -2
  217. package/scripts/sw-mission-control.sh +6 -1
  218. package/scripts/sw-model-router.sh +481 -26
  219. package/scripts/sw-otel.sh +13 -4
  220. package/scripts/sw-oversight.sh +14 -5
  221. package/scripts/sw-patrol-meta.sh +334 -0
  222. package/scripts/sw-pipeline-composer.sh +5 -1
  223. package/scripts/sw-pipeline-vitals.sh +2 -1
  224. package/scripts/sw-pipeline.sh +53 -2664
  225. package/scripts/sw-pm.sh +12 -5
  226. package/scripts/sw-pr-lifecycle.sh +2 -1
  227. package/scripts/sw-predictive.sh +7 -1
  228. package/scripts/sw-prep.sh +185 -2
  229. package/scripts/sw-ps.sh +5 -25
  230. package/scripts/sw-public-dashboard.sh +15 -3
  231. package/scripts/sw-quality.sh +2 -1
  232. package/scripts/sw-reaper.sh +8 -25
  233. package/scripts/sw-recruit.sh +156 -2303
  234. package/scripts/sw-regression.sh +19 -12
  235. package/scripts/sw-release-manager.sh +3 -1
  236. package/scripts/sw-release.sh +4 -1
  237. package/scripts/sw-remote.sh +3 -1
  238. package/scripts/sw-replay.sh +7 -1
  239. package/scripts/sw-retro.sh +158 -1
  240. package/scripts/sw-review-rerun.sh +3 -1
  241. package/scripts/sw-scale.sh +10 -3
  242. package/scripts/sw-security-audit.sh +6 -1
  243. package/scripts/sw-self-optimize.sh +6 -3
  244. package/scripts/sw-session.sh +9 -3
  245. package/scripts/sw-setup.sh +3 -1
  246. package/scripts/sw-stall-detector.sh +406 -0
  247. package/scripts/sw-standup.sh +15 -7
  248. package/scripts/sw-status.sh +3 -1
  249. package/scripts/sw-strategic.sh +4 -1
  250. package/scripts/sw-stream.sh +7 -1
  251. package/scripts/sw-swarm.sh +18 -6
  252. package/scripts/sw-team-stages.sh +13 -6
  253. package/scripts/sw-templates.sh +5 -29
  254. package/scripts/sw-testgen.sh +7 -1
  255. package/scripts/sw-tmux-pipeline.sh +4 -1
  256. package/scripts/sw-tmux-role-color.sh +2 -0
  257. package/scripts/sw-tmux-status.sh +1 -1
  258. package/scripts/sw-tmux.sh +3 -1
  259. package/scripts/sw-trace.sh +3 -1
  260. package/scripts/sw-tracker-github.sh +3 -0
  261. package/scripts/sw-tracker-jira.sh +3 -0
  262. package/scripts/sw-tracker-linear.sh +3 -0
  263. package/scripts/sw-tracker.sh +3 -1
  264. package/scripts/sw-triage.sh +2 -1
  265. package/scripts/sw-upgrade.sh +3 -1
  266. package/scripts/sw-ux.sh +5 -2
  267. package/scripts/sw-webhook.sh +3 -1
  268. package/scripts/sw-widgets.sh +3 -1
  269. package/scripts/sw-worktree.sh +15 -3
  270. package/scripts/test-skill-injection.sh +1233 -0
  271. package/templates/pipelines/autonomous.json +27 -3
  272. package/templates/pipelines/cost-aware.json +34 -8
  273. package/templates/pipelines/deployed.json +12 -0
  274. package/templates/pipelines/enterprise.json +12 -0
  275. package/templates/pipelines/fast.json +6 -0
  276. package/templates/pipelines/full.json +27 -3
  277. package/templates/pipelines/hotfix.json +6 -0
  278. package/templates/pipelines/standard.json +12 -0
  279. package/templates/pipelines/tdd.json +12 -0
@@ -0,0 +1,1216 @@
1
+ #!/usr/bin/env bash
2
+ # Module: pipeline-commands
3
+ # CLI commands: start, resume, status, abort, dry-run, reasoning trace, post-completion
4
+ set -euo pipefail
5
+
6
+ # Module guard
7
+ [[ -n "${_MODULE_PIPELINE_COMMANDS_LOADED:-}" ]] && return 0; _MODULE_PIPELINE_COMMANDS_LOADED=1
8
+
9
+ # ─── Defaults (needed if sourced independently) ──────────────────────────────
10
+ SCRIPT_DIR="${SCRIPT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)}"
11
+ REPO_DIR="${REPO_DIR:-$(cd "$SCRIPT_DIR/.." && pwd)}"
12
+ PROJECT_ROOT="${PROJECT_ROOT:-$(git rev-parse --show-toplevel 2>/dev/null || pwd)}"
13
+ STATE_DIR="${STATE_DIR:-$PROJECT_ROOT/.claude}"
14
+ STATE_FILE="${STATE_FILE:-$STATE_DIR/pipeline-state.md}"
15
+ ARTIFACTS_DIR="${ARTIFACTS_DIR:-$STATE_DIR/pipeline-artifacts}"
16
+
17
+ # Variables that pipeline_start references (set by sw-pipeline.sh, defaults here for safety)
18
+ COST_MODEL_RATES="${COST_MODEL_RATES:-'{\"opus\":{\"input\":15,\"output\":75},\"sonnet\":{\"input\":3,\"output\":15},\"haiku\":{\"input\":0.25,\"output\":1.25}}'}"
19
+ SELF_HEAL_COUNT="${SELF_HEAL_COUNT:-0}"
20
+ TOTAL_INPUT_TOKENS="${TOTAL_INPUT_TOKENS:-0}"
21
+ TOTAL_OUTPUT_TOKENS="${TOTAL_OUTPUT_TOKENS:-0}"
22
+ STASHED_CHANGES="${STASHED_CHANGES:-false}"
23
+ PIPELINE_START_EPOCH="${PIPELINE_START_EPOCH:-}"
24
+ PIPELINE_STATUS="${PIPELINE_STATUS:-}"
25
+ PIPELINE_STAGES_PASSED="${PIPELINE_STAGES_PASSED:-}"
26
+ PIPELINE_SLOWEST_STAGE="${PIPELINE_SLOWEST_STAGE:-}"
27
+
28
+ # Ensure helpers are loaded
29
+ [[ -f "$SCRIPT_DIR/lib/helpers.sh" ]] && source "$SCRIPT_DIR/lib/helpers.sh" 2>/dev/null || true
30
+ [[ "$(type -t info 2>/dev/null)" == "function" ]] || info() { echo "$*"; }
31
+ [[ "$(type -t warn 2>/dev/null)" == "function" ]] || warn() { echo "$*"; }
32
+ [[ "$(type -t error 2>/dev/null)" == "function" ]] || error() { echo "$*" >&2; }
33
+ [[ "$(type -t emit_event 2>/dev/null)" == "function" ]] || emit_event() { true; }
34
+
35
+ # ─── Post-Completion Cleanup ───────────────────────────────────────
36
+ pipeline_post_completion_cleanup() {
37
+ local cleaned=0
38
+
39
+ # 1. Clear checkpoints and context files (they only matter for resume; pipeline is done)
40
+ if [[ -d "${ARTIFACTS_DIR}/checkpoints" ]]; then
41
+ local cp_count=0
42
+ local cp_file
43
+ for cp_file in "${ARTIFACTS_DIR}/checkpoints"/*-checkpoint.json; do
44
+ [[ -f "$cp_file" ]] || continue
45
+ rm -f "$cp_file"
46
+ cp_count=$((cp_count + 1))
47
+ done
48
+ for cp_file in "${ARTIFACTS_DIR}/checkpoints"/*-claude-context.json; do
49
+ [[ -f "$cp_file" ]] || continue
50
+ rm -f "$cp_file"
51
+ cp_count=$((cp_count + 1))
52
+ done
53
+ if [[ "$cp_count" -gt 0 ]]; then
54
+ cleaned=$((cleaned + cp_count))
55
+ fi
56
+ fi
57
+
58
+ # 2. Clear per-run intelligence artifacts (not needed after completion)
59
+ local intel_files=(
60
+ "${ARTIFACTS_DIR}/classified-findings.json"
61
+ "${ARTIFACTS_DIR}/reassessment.json"
62
+ "${ARTIFACTS_DIR}/skip-stage.txt"
63
+ "${ARTIFACTS_DIR}/human-message.txt"
64
+ )
65
+ local f
66
+ for f in "${intel_files[@]}"; do
67
+ if [[ -f "$f" ]]; then
68
+ rm -f "$f"
69
+ cleaned=$((cleaned + 1))
70
+ fi
71
+ done
72
+
73
+ # 3. Clear stale pipeline state (mark as idle so next run starts clean)
74
+ if [[ -f "$STATE_FILE" ]]; then
75
+ # Reset status to idle (preserves the file for reference but unblocks new runs)
76
+ local tmp_state
77
+ tmp_state=$(mktemp "${TMPDIR:-/tmp}/sw-state.XXXXXX") || { warn "mktemp failed for state reset"; return 0; }
78
+ # shellcheck disable=SC2064 # intentional expansion at definition time
79
+ trap "rm -f '$tmp_state'" RETURN
80
+ sed 's/^status: .*/status: idle/' "$STATE_FILE" > "$tmp_state" 2>/dev/null || true
81
+ mv "$tmp_state" "$STATE_FILE"
82
+ fi
83
+
84
+ if [[ "$cleaned" -gt 0 ]]; then
85
+ emit_event "pipeline.cleanup" \
86
+ "issue=${ISSUE_NUMBER:-0}" \
87
+ "cleaned=$cleaned" \
88
+ "type=post_completion"
89
+ fi
90
+ }
91
+
92
+ # ─── Cancel GitHub Check Runs ──────────────────────────────────────
93
+ pipeline_cancel_check_runs() {
94
+ if [[ "${NO_GITHUB:-false}" == "true" ]]; then
95
+ return
96
+ fi
97
+
98
+ if ! type gh_checks_stage_update >/dev/null 2>&1; then
99
+ return
100
+ fi
101
+
102
+ local ids_file="${ARTIFACTS_DIR:-/dev/null}/check-run-ids.json"
103
+ [[ -f "$ids_file" ]] || return
104
+
105
+ local stage
106
+ while IFS= read -r stage; do
107
+ [[ -z "$stage" ]] && continue
108
+ gh_checks_stage_update "$stage" "completed" "cancelled" "Pipeline interrupted" 2>/dev/null || true
109
+ done < <(jq -r 'keys[]' "$ids_file" 2>/dev/null || true)
110
+ }
111
+
112
+ # ─── Worktree Isolation ────────────────────────────────────────────
113
+ pipeline_setup_worktree() {
114
+ local worktree_base=".worktrees"
115
+ local name="${WORKTREE_NAME}"
116
+
117
+ # Auto-generate name from issue number or timestamp
118
+ if [[ -z "$name" ]]; then
119
+ if [[ -n "${ISSUE_NUMBER:-}" ]]; then
120
+ name="pipeline-issue-${ISSUE_NUMBER}"
121
+ else
122
+ name="pipeline-$(date +%s)"
123
+ fi
124
+ fi
125
+
126
+ local worktree_path="${worktree_base}/${name}"
127
+ local branch_name="pipeline/${name}"
128
+
129
+ info "Setting up worktree: ${DIM}${worktree_path}${RESET}"
130
+
131
+ # Ensure worktree base exists
132
+ mkdir -p "$worktree_base"
133
+
134
+ # Remove stale worktree if it exists
135
+ if [[ -d "$worktree_path" ]]; then
136
+ warn "Worktree already exists — removing: ${worktree_path}"
137
+ git worktree remove --force "$worktree_path" 2>/dev/null || rm -rf "$worktree_path"
138
+ fi
139
+
140
+ # Delete stale branch if it exists
141
+ git branch -D "$branch_name" 2>/dev/null || true
142
+
143
+ # Create worktree with new branch from current HEAD
144
+ git worktree add -b "$branch_name" "$worktree_path" HEAD
145
+
146
+ # Store original dir for cleanup, then cd into worktree
147
+ ORIGINAL_REPO_DIR="$(pwd)"
148
+ cd "$worktree_path" || { error "Failed to cd into worktree: $worktree_path"; return 1; }
149
+ CLEANUP_WORKTREE=true
150
+
151
+ success "Worktree ready: ${CYAN}${worktree_path}${RESET} (branch: ${branch_name})"
152
+ }
153
+
154
+ pipeline_cleanup_worktree() {
155
+ if [[ "${CLEANUP_WORKTREE:-false}" != "true" ]]; then
156
+ return
157
+ fi
158
+
159
+ local worktree_path
160
+ worktree_path="$(pwd)"
161
+
162
+ if [[ -n "${ORIGINAL_REPO_DIR:-}" && "$worktree_path" != "$ORIGINAL_REPO_DIR" ]]; then
163
+ cd "$ORIGINAL_REPO_DIR" 2>/dev/null || cd /
164
+ # Only clean up worktree on success — preserve on failure for inspection
165
+ if [[ "${PIPELINE_EXIT_CODE:-1}" -eq 0 ]]; then
166
+ info "Cleaning up worktree: ${DIM}${worktree_path}${RESET}"
167
+ # Extract branch name before removing worktree
168
+ local _wt_branch=""
169
+ _wt_branch=$(git worktree list --porcelain 2>/dev/null | grep -A1 "worktree ${worktree_path}$" | grep "^branch " | sed 's|^branch refs/heads/||' || true)
170
+ if ! git worktree remove --force "$worktree_path" 2>/dev/null; then
171
+ warn "Failed to remove worktree at ${worktree_path} — may need manual cleanup"
172
+ fi
173
+ # Clean up the local branch
174
+ if [[ -n "$_wt_branch" ]]; then
175
+ if ! git branch -D "$_wt_branch" 2>/dev/null; then
176
+ warn "Failed to delete local branch ${_wt_branch}"
177
+ fi
178
+ fi
179
+ # Clean up the remote branch (if it was pushed)
180
+ if [[ -n "$_wt_branch" && "${NO_GITHUB:-}" != "true" ]]; then
181
+ git push origin --delete "$_wt_branch" 2>/dev/null || true
182
+ fi
183
+ else
184
+ warn "Pipeline failed — worktree preserved for inspection: ${DIM}${worktree_path}${RESET}"
185
+ warn "Clean up manually: ${DIM}git worktree remove --force ${worktree_path}${RESET}"
186
+ fi
187
+ fi
188
+ }
189
+
190
+ # ─── Dry Run Mode ───────────────────────────────────────────────────────────
191
+
192
+ # ─── Dry-Run Mode ──────────────────────────────────────────────────
193
+ run_dry_run() {
194
+ echo ""
195
+ echo -e "${BLUE}${BOLD}━━━ Dry Run: Pipeline Validation ━━━${RESET}"
196
+ echo ""
197
+
198
+ # Validate pipeline config
199
+ if [[ ! -f "$PIPELINE_CONFIG" ]]; then
200
+ error "Pipeline config not found: $PIPELINE_CONFIG"
201
+ return 1
202
+ fi
203
+
204
+ # Validate JSON structure
205
+ local validate_json
206
+ validate_json=$(jq . "$PIPELINE_CONFIG" 2>/dev/null) || {
207
+ error "Pipeline config is not valid JSON: $PIPELINE_CONFIG"
208
+ return 1
209
+ }
210
+
211
+ # Extract pipeline metadata
212
+ local pipeline_name stages_count enabled_stages gated_stages
213
+ pipeline_name=$(jq -r '.name // "unknown"' "$PIPELINE_CONFIG")
214
+ stages_count=$(jq '.stages | length' "$PIPELINE_CONFIG")
215
+ enabled_stages=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG")
216
+ gated_stages=$(jq '[.stages[] | select(.enabled == true and .gate == "approve")] | length' "$PIPELINE_CONFIG")
217
+
218
+ # Build model (per-stage override or default)
219
+ local default_model stage_model
220
+ default_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")
221
+ stage_model="$MODEL"
222
+ [[ -z "$stage_model" ]] && stage_model="$default_model"
223
+
224
+ echo -e " ${BOLD}Pipeline:${RESET} $pipeline_name"
225
+ echo -e " ${BOLD}Stages:${RESET} $enabled_stages enabled of $stages_count total"
226
+ if [[ "$SKIP_GATES" == "true" ]]; then
227
+ echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
228
+ else
229
+ echo -e " ${BOLD}Gates:${RESET} $gated_stages approval gate(s)"
230
+ fi
231
+ echo -e " ${BOLD}Model:${RESET} $stage_model"
232
+ echo ""
233
+
234
+ # Table header
235
+ echo -e "${CYAN}${BOLD}Stage Enabled Gate Model${RESET}"
236
+ echo -e "${CYAN}────────────────────────────────────────${RESET}"
237
+
238
+ # List all stages
239
+ while IFS= read -r stage_json; do
240
+ local stage_id stage_enabled stage_gate stage_config_model stage_model_display
241
+ stage_id=$(echo "$stage_json" | jq -r '.id')
242
+ stage_enabled=$(echo "$stage_json" | jq -r '.enabled')
243
+ stage_gate=$(echo "$stage_json" | jq -r '.gate')
244
+
245
+ # Determine stage model (config override or default)
246
+ stage_config_model=$(echo "$stage_json" | jq -r '.config.model // ""')
247
+ if [[ -n "$stage_config_model" && "$stage_config_model" != "null" ]]; then
248
+ stage_model_display="$stage_config_model"
249
+ else
250
+ stage_model_display="$default_model"
251
+ fi
252
+
253
+ # Format enabled
254
+ local enabled_str
255
+ if [[ "$stage_enabled" == "true" ]]; then
256
+ enabled_str="${GREEN}yes${RESET}"
257
+ else
258
+ enabled_str="${DIM}no${RESET}"
259
+ fi
260
+
261
+ # Format gate
262
+ local gate_str
263
+ if [[ "$stage_enabled" == "true" ]]; then
264
+ if [[ "$stage_gate" == "approve" ]]; then
265
+ gate_str="${YELLOW}approve${RESET}"
266
+ else
267
+ gate_str="${GREEN}auto${RESET}"
268
+ fi
269
+ else
270
+ gate_str="${DIM}—${RESET}"
271
+ fi
272
+
273
+ printf "%-15s %s %s %s\n" "$stage_id" "$enabled_str" "$gate_str" "$stage_model_display"
274
+ done < <(jq -c '.stages[]' "$PIPELINE_CONFIG")
275
+
276
+ echo ""
277
+
278
+ # Validate required tools
279
+ echo -e "${BLUE}${BOLD}━━━ Tool Validation ━━━${RESET}"
280
+ echo ""
281
+
282
+ local tool_errors=0
283
+ local required_tools=("git" "jq")
284
+ local optional_tools=("gh" "claude" "bc")
285
+
286
+ for tool in "${required_tools[@]}"; do
287
+ if command -v "$tool" >/dev/null 2>&1; then
288
+ echo -e " ${GREEN}✓${RESET} $tool"
289
+ else
290
+ echo -e " ${RED}✗${RESET} $tool ${RED}(required)${RESET}"
291
+ tool_errors=$((tool_errors + 1))
292
+ fi
293
+ done
294
+
295
+ for tool in "${optional_tools[@]}"; do
296
+ if command -v "$tool" >/dev/null 2>&1; then
297
+ echo -e " ${GREEN}✓${RESET} $tool"
298
+ else
299
+ echo -e " ${DIM}○${RESET} $tool"
300
+ fi
301
+ done
302
+
303
+ echo ""
304
+
305
+ # Cost estimation: use historical averages from past pipelines when available
306
+ echo -e "${BLUE}${BOLD}━━━ Estimated Resource Usage ━━━${RESET}"
307
+ echo ""
308
+
309
+ local stages_json
310
+ stages_json=$(jq '[.stages[] | select(.enabled == true)]' "$PIPELINE_CONFIG" 2>/dev/null || echo "[]")
311
+ local est
312
+ est=$(estimate_pipeline_cost "$stages_json")
313
+ local input_tokens_estimate output_tokens_estimate
314
+ input_tokens_estimate=$(echo "$est" | jq -r '.input_tokens // 0')
315
+ output_tokens_estimate=$(echo "$est" | jq -r '.output_tokens // 0')
316
+
317
+ # Calculate cost based on selected model
318
+ local input_rate output_rate input_cost output_cost total_cost
319
+ input_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.input // 3" 2>/dev/null || echo "3")
320
+ output_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.output // 15" 2>/dev/null || echo "15")
321
+
322
+ # Cost calculation: tokens per million * rate
323
+ input_cost=$(awk -v tokens="$input_tokens_estimate" -v rate="$input_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
324
+ output_cost=$(awk -v tokens="$output_tokens_estimate" -v rate="$output_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
325
+ total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
326
+
327
+ echo -e " ${BOLD}Estimated Input Tokens:${RESET} ~$input_tokens_estimate"
328
+ echo -e " ${BOLD}Estimated Output Tokens:${RESET} ~$output_tokens_estimate"
329
+ echo -e " ${BOLD}Model Cost Rate:${RESET} $stage_model"
330
+ echo -e " ${BOLD}Estimated Cost:${RESET} \$$total_cost USD"
331
+ echo ""
332
+
333
+ # Validate composed pipeline if intelligence is enabled
334
+ if [[ -f "$ARTIFACTS_DIR/composed-pipeline.json" ]] && type composer_validate_pipeline >/dev/null 2>&1; then
335
+ echo -e "${BLUE}${BOLD}━━━ Intelligence-Composed Pipeline ━━━${RESET}"
336
+ echo ""
337
+
338
+ if composer_validate_pipeline "$(cat "$ARTIFACTS_DIR/composed-pipeline.json" 2>/dev/null || echo "")" 2>/dev/null; then
339
+ echo -e " ${GREEN}✓${RESET} Composed pipeline is valid"
340
+ else
341
+ echo -e " ${YELLOW}⚠${RESET} Composed pipeline validation failed (will use template defaults)"
342
+ fi
343
+ echo ""
344
+ fi
345
+
346
+ # Final validation result
347
+ if [[ "$tool_errors" -gt 0 ]]; then
348
+ error "Dry run validation failed: $tool_errors required tool(s) missing"
349
+ return 1
350
+ fi
351
+
352
+ success "Dry run validation passed"
353
+ echo ""
354
+ echo -e " To execute this pipeline: ${DIM}remove --dry-run flag${RESET}"
355
+ echo ""
356
+ return 0
357
+ }
358
+
359
+ # ─── Reasoning Trace Generation ─────────────────────────────────────
360
+ generate_reasoning_trace() {
361
+ local job_id="${SHIPWRIGHT_PIPELINE_ID:-$$}"
362
+ local issue="${ISSUE_NUMBER:-}"
363
+ local goal="${GOAL:-}"
364
+
365
+ # Step 1: Analyze issue complexity and risk
366
+ local complexity="medium"
367
+ local risk_score=50
368
+ if [[ -n "$issue" ]] && type intelligence_analyze_issue >/dev/null 2>&1; then
369
+ local issue_json analysis
370
+ issue_json=$(gh issue view "$issue" --json number,title,body,labels 2>/dev/null || echo "{}")
371
+ if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
372
+ analysis=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "")
373
+ if [[ -n "$analysis" ]]; then
374
+ local comp_num
375
+ comp_num=$(echo "$analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
376
+ if [[ "$comp_num" -le 3 ]]; then
377
+ complexity="low"
378
+ elif [[ "$comp_num" -le 6 ]]; then
379
+ complexity="medium"
380
+ else
381
+ complexity="high"
382
+ fi
383
+ risk_score=$((100 - $(echo "$analysis" | jq -r '.success_probability // 50' 2>/dev/null || echo "50")))
384
+ fi
385
+ fi
386
+ elif [[ -n "$goal" ]]; then
387
+ issue_json=$(jq -n --arg title "${goal}" --arg body "" '{title: $title, body: $body, labels: []}')
388
+ if type intelligence_analyze_issue >/dev/null 2>&1; then
389
+ analysis=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "")
390
+ if [[ -n "$analysis" ]]; then
391
+ local comp_num
392
+ comp_num=$(echo "$analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
393
+ if [[ "$comp_num" -le 3 ]]; then complexity="low"; elif [[ "$comp_num" -le 6 ]]; then complexity="medium"; else complexity="high"; fi
394
+ risk_score=$((100 - $(echo "$analysis" | jq -r '.success_probability // 50' 2>/dev/null || echo "50")))
395
+ fi
396
+ fi
397
+ fi
398
+
399
+ # Step 2: Query similar past issues
400
+ local similar_context=""
401
+ if type memory_semantic_search >/dev/null 2>&1 && [[ -n "$goal" ]]; then
402
+ similar_context=$(memory_semantic_search "$goal" "" 3 2>/dev/null || echo "")
403
+ fi
404
+
405
+ # Step 3: Select template using Thompson sampling
406
+ local selected_template="${PIPELINE_TEMPLATE:-}"
407
+ if [[ -z "$selected_template" ]] && type thompson_select_template >/dev/null 2>&1; then
408
+ selected_template=$(thompson_select_template "$complexity" 2>/dev/null || echo "standard")
409
+ fi
410
+ [[ -z "$selected_template" ]] && selected_template="standard"
411
+
412
+ # Step 4: Predict failure modes from memory
413
+ local failure_predictions=""
414
+ if type memory_semantic_search >/dev/null 2>&1 && [[ -n "$goal" ]]; then
415
+ failure_predictions=$(memory_semantic_search "failure error $goal" "" 3 2>/dev/null || echo "")
416
+ fi
417
+
418
+ # Save reasoning traces to DB
419
+ if type db_save_reasoning_trace >/dev/null 2>&1; then
420
+ db_save_reasoning_trace "$job_id" "complexity_analysis" \
421
+ "issue=$issue goal=$goal" \
422
+ "Analyzed complexity=$complexity risk=$risk_score" \
423
+ "complexity=$complexity risk_score=$risk_score" 0.7 2>/dev/null || true
424
+
425
+ db_save_reasoning_trace "$job_id" "template_selection" \
426
+ "complexity=$complexity historical_outcomes" \
427
+ "Thompson sampling over historical success rates" \
428
+ "template=$selected_template" 0.8 2>/dev/null || true
429
+
430
+ if [[ -n "$similar_context" && "$similar_context" != "[]" ]]; then
431
+ db_save_reasoning_trace "$job_id" "similar_issues" \
432
+ "$goal" \
433
+ "Found similar past issues for context injection" \
434
+ "$similar_context" 0.6 2>/dev/null || true
435
+ fi
436
+
437
+ if [[ -n "$failure_predictions" && "$failure_predictions" != "[]" ]]; then
438
+ db_save_reasoning_trace "$job_id" "failure_prediction" \
439
+ "$goal" \
440
+ "Predicted potential failure modes from history" \
441
+ "$failure_predictions" 0.5 2>/dev/null || true
442
+ fi
443
+ fi
444
+
445
+ # Export for use by pipeline stages
446
+ [[ -n "$selected_template" && -z "${PIPELINE_TEMPLATE:-}" ]] && export PIPELINE_TEMPLATE="$selected_template"
447
+
448
+ emit_event "reasoning.trace" "job_id=$job_id" "complexity=$complexity" "risk=$risk_score" "template=${selected_template:-standard}" 2>/dev/null || true
449
+ }
450
+
451
+ # ─── Main 'start' Command ──────────────────────────────────────────
452
+ pipeline_start() {
453
+ # Handle --repo flag: change to directory before running
454
+ if [[ -n "$REPO_OVERRIDE" ]]; then
455
+ if [[ ! -d "$REPO_OVERRIDE" ]]; then
456
+ error "Directory does not exist: $REPO_OVERRIDE"
457
+ exit 1
458
+ fi
459
+ if ! cd "$REPO_OVERRIDE" 2>/dev/null; then
460
+ error "Cannot cd to: $REPO_OVERRIDE"
461
+ exit 1
462
+ fi
463
+ if ! git rev-parse --show-toplevel >/dev/null 2>&1; then
464
+ error "Not a git repository: $REPO_OVERRIDE"
465
+ exit 1
466
+ fi
467
+ ORIGINAL_REPO_DIR="$(pwd)"
468
+ info "Using repository: $ORIGINAL_REPO_DIR"
469
+ fi
470
+
471
+ # Bootstrap optimization & memory if cold start (before first intelligence use)
472
+ if [[ -f "$SCRIPT_DIR/lib/bootstrap.sh" ]]; then
473
+ source "$SCRIPT_DIR/lib/bootstrap.sh"
474
+ [[ ! -f "$HOME/.shipwright/optimization/iteration-model.json" ]] && bootstrap_optimization 2>/dev/null || true
475
+ [[ ! -f "$HOME/.shipwright/memory/patterns.json" ]] && bootstrap_memory 2>/dev/null || true
476
+ fi
477
+
478
+ if [[ -z "$GOAL" && -z "$ISSUE_NUMBER" ]]; then
479
+ error "Must provide --goal or --issue"
480
+ echo -e " Example: ${DIM}shipwright pipeline start --goal \"Add JWT auth\"${RESET}"
481
+ echo -e " Example: ${DIM}shipwright pipeline start --issue 123${RESET}"
482
+ exit 1
483
+ fi
484
+
485
+ if ! command -v jq >/dev/null 2>&1; then
486
+ error "jq is required. Install it: brew install jq"
487
+ exit 1
488
+ fi
489
+
490
+ # Set up worktree isolation if requested
491
+ if [[ "$AUTO_WORKTREE" == "true" ]]; then
492
+ pipeline_setup_worktree
493
+ fi
494
+
495
+ # Register worktree cleanup on exit (chain with existing cleanup)
496
+ if [[ "$CLEANUP_WORKTREE" == "true" ]]; then
497
+ trap 'pipeline_cleanup_worktree; cleanup_on_exit' SIGINT SIGTERM
498
+ trap 'pipeline_cleanup_worktree; cleanup_on_exit' EXIT
499
+ fi
500
+
501
+ setup_dirs
502
+
503
+ # Acquire durable lock to prevent concurrent pipelines on the same issue/goal
504
+ _PIPELINE_LOCK_ID=""
505
+ if type acquire_lock >/dev/null 2>&1; then
506
+ _PIPELINE_LOCK_ID="pipeline-${ISSUE_NUMBER:-goal-$$}"
507
+ if ! acquire_lock "$_PIPELINE_LOCK_ID" 5 2>/dev/null; then
508
+ error "Another pipeline is already running for this issue/goal"
509
+ echo -e " Wait for it to finish, or remove stale lock:"
510
+ echo -e " ${DIM}rm -rf ~/.shipwright/durable/locks/${_PIPELINE_LOCK_ID}.lock${RESET}"
511
+ _PIPELINE_LOCK_ID=""
512
+ exit 1
513
+ fi
514
+ fi
515
+
516
+ # Generate reasoning trace (complexity analysis, template selection, failure predictions)
517
+ local user_specified_pipeline="$PIPELINE_NAME"
518
+ generate_reasoning_trace 2>/dev/null || true
519
+ if [[ -n "${PIPELINE_TEMPLATE:-}" && "$user_specified_pipeline" == "standard" ]]; then
520
+ PIPELINE_NAME="$PIPELINE_TEMPLATE"
521
+ fi
522
+
523
+ # Check for existing pipeline
524
+ if [[ -f "$STATE_FILE" ]]; then
525
+ local existing_status
526
+ existing_status=$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)
527
+ if [[ "$existing_status" == "running" || "$existing_status" == "paused" || "$existing_status" == "interrupted" ]]; then
528
+ warn "A pipeline is already in progress (status: $existing_status)"
529
+ echo -e " Resume it: ${DIM}shipwright pipeline resume${RESET}"
530
+ echo -e " Abort it: ${DIM}shipwright pipeline abort${RESET}"
531
+ exit 1
532
+ fi
533
+ fi
534
+
535
+ # Pre-flight checks
536
+ preflight_checks || exit 1
537
+
538
+ # Initialize GitHub integration
539
+ gh_init
540
+
541
+ load_pipeline_config
542
+
543
+ # Checkpoint resume: when --resume is passed, try DB first, then file-based
544
+ checkpoint_stage=""
545
+ checkpoint_iteration=0
546
+ if $RESUME_FROM_CHECKPOINT && type db_load_checkpoint >/dev/null 2>&1; then
547
+ local saved_checkpoint
548
+ saved_checkpoint=$(db_load_checkpoint "pipeline-${SHIPWRIGHT_PIPELINE_ID:-$$}" 2>/dev/null || echo "")
549
+ if [[ -n "$saved_checkpoint" ]]; then
550
+ checkpoint_stage=$(echo "$saved_checkpoint" | jq -r '.stage // ""' 2>/dev/null || echo "")
551
+ if [[ -n "$checkpoint_stage" ]]; then
552
+ info "Resuming from DB checkpoint: stage=$checkpoint_stage"
553
+ checkpoint_iteration=$(echo "$saved_checkpoint" | jq -r '.iteration // 0' 2>/dev/null || echo "0")
554
+ # Build COMPLETED_STAGES: all enabled stages before checkpoint_stage
555
+ local enabled_list before_list=""
556
+ enabled_list=$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" 2>/dev/null) || true
557
+ local s
558
+ while IFS= read -r s; do
559
+ [[ -z "$s" ]] && continue
560
+ if [[ "$s" == "$checkpoint_stage" ]]; then
561
+ break
562
+ fi
563
+ [[ -n "$before_list" ]] && before_list="${before_list},${s}" || before_list="$s"
564
+ done <<< "$enabled_list"
565
+ if [[ -n "$before_list" ]]; then
566
+ COMPLETED_STAGES="${before_list}"
567
+ SELF_HEAL_COUNT="${checkpoint_iteration}"
568
+ fi
569
+ fi
570
+ fi
571
+ fi
572
+ if $RESUME_FROM_CHECKPOINT && [[ -z "$checkpoint_stage" ]] && [[ -d "${ARTIFACTS_DIR}/checkpoints" ]]; then
573
+ local cp_dir="${ARTIFACTS_DIR}/checkpoints"
574
+ local latest_cp="" latest_mtime=0
575
+ local f
576
+ for f in "$cp_dir"/*-checkpoint.json; do
577
+ [[ -f "$f" ]] || continue
578
+ local mtime
579
+ mtime=$(file_mtime "$f" 2>/dev/null || echo "0")
580
+ if [[ "${mtime:-0}" -gt "$latest_mtime" ]]; then
581
+ latest_mtime="${mtime}"
582
+ latest_cp="$f"
583
+ fi
584
+ done
585
+ if [[ -n "$latest_cp" && -x "$SCRIPT_DIR/sw-checkpoint.sh" ]]; then
586
+ checkpoint_stage="$(basename "$latest_cp" -checkpoint.json)"
587
+ local cp_json
588
+ cp_json="$("$SCRIPT_DIR/sw-checkpoint.sh" restore --stage "$checkpoint_stage" 2>/dev/null)" || true
589
+ if [[ -n "$cp_json" ]] && command -v jq >/dev/null 2>&1; then
590
+ checkpoint_iteration="$(echo "$cp_json" | jq -r '.iteration // 0' 2>/dev/null)" || checkpoint_iteration=0
591
+ info "Checkpoint resume: stage=${checkpoint_stage} iteration=${checkpoint_iteration}"
592
+ # Build COMPLETED_STAGES: all enabled stages before checkpoint_stage
593
+ local enabled_list before_list=""
594
+ enabled_list="$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" 2>/dev/null)" || true
595
+ local s
596
+ while IFS= read -r s; do
597
+ [[ -z "$s" ]] && continue
598
+ if [[ "$s" == "$checkpoint_stage" ]]; then
599
+ break
600
+ fi
601
+ [[ -n "$before_list" ]] && before_list="${before_list},${s}" || before_list="$s"
602
+ done <<< "$enabled_list"
603
+ if [[ -n "$before_list" ]]; then
604
+ COMPLETED_STAGES="${before_list}"
605
+ SELF_HEAL_COUNT="${checkpoint_iteration}"
606
+ fi
607
+ fi
608
+ fi
609
+ fi
610
+
611
+ # Restore from state file if resuming (failed/interrupted pipeline); else initialize fresh
612
+ if $RESUME_FROM_CHECKPOINT && [[ -f "$STATE_FILE" ]]; then
613
+ local existing_status
614
+ existing_status="$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)"
615
+ if [[ "$existing_status" == "failed" || "$existing_status" == "interrupted" ]]; then
616
+ resume_state
617
+ else
618
+ initialize_state
619
+ fi
620
+ else
621
+ initialize_state
622
+ fi
623
+
624
+ # CI resume: restore branch + goal context when intake is skipped
625
+ if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "intake"; then
626
+ # Intake was completed in a previous run — restore context
627
+ # The workflow merges the partial work branch, so code changes are on HEAD
628
+
629
+ # Restore GOAL from issue if not already set
630
+ if [[ -z "$GOAL" && -n "$ISSUE_NUMBER" ]]; then
631
+ GOAL=$(_timeout "$(_config_get_int "network.gh_timeout" 30 2>/dev/null || echo 30)" gh issue view "$ISSUE_NUMBER" --json title --jq '.title' 2>/dev/null || echo "Issue #${ISSUE_NUMBER}")
632
+ info "CI resume: goal from issue — ${GOAL}"
633
+ fi
634
+
635
+ # Restore branch context
636
+ if [[ -z "$GIT_BRANCH" ]]; then
637
+ local ci_branch="ci/issue-${ISSUE_NUMBER}"
638
+ info "CI resume: creating branch ${ci_branch} from current HEAD"
639
+ if ! git checkout -b "$ci_branch" 2>/dev/null && ! git checkout "$ci_branch" 2>/dev/null; then
640
+ warn "CI resume: failed to create or checkout branch ${ci_branch}"
641
+ fi
642
+ GIT_BRANCH="$ci_branch"
643
+ elif [[ "$(git branch --show-current 2>/dev/null)" != "$GIT_BRANCH" ]]; then
644
+ info "CI resume: checking out branch ${GIT_BRANCH}"
645
+ if ! git checkout -b "$GIT_BRANCH" 2>/dev/null && ! git checkout "$GIT_BRANCH" 2>/dev/null; then
646
+ warn "CI resume: failed to create or checkout branch ${GIT_BRANCH}"
647
+ fi
648
+ fi
649
+ write_state 2>/dev/null || true
650
+ fi
651
+
652
+ echo ""
653
+ echo -e "${PURPLE}${BOLD}╔═══════════════════════════════════════════════════════════════════╗${RESET}"
654
+ echo -e "${PURPLE}${BOLD}║ shipwright pipeline — Autonomous Feature Delivery ║${RESET}"
655
+ echo -e "${PURPLE}${BOLD}╚═══════════════════════════════════════════════════════════════════╝${RESET}"
656
+ echo ""
657
+
658
+ # Comprehensive environment summary
659
+ if [[ -n "$GOAL" ]]; then
660
+ echo -e " ${BOLD}Goal:${RESET} $GOAL"
661
+ fi
662
+ if [[ -n "$ISSUE_NUMBER" ]]; then
663
+ echo -e " ${BOLD}Issue:${RESET} #$ISSUE_NUMBER"
664
+ fi
665
+
666
+ echo -e " ${BOLD}Pipeline:${RESET} $PIPELINE_NAME"
667
+
668
+ local enabled_stages
669
+ enabled_stages=$(jq -r '.stages[] | select(.enabled == true) | .id' "$PIPELINE_CONFIG" | tr '\n' ' ')
670
+ echo -e " ${BOLD}Stages:${RESET} $enabled_stages"
671
+
672
+ local gate_count
673
+ gate_count=$(jq '[.stages[] | select(.gate == "approve" and .enabled == true)] | length' "$PIPELINE_CONFIG")
674
+ if [[ "$HEADLESS" == "true" ]]; then
675
+ echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (headless — non-interactive stdin detected)${RESET}"
676
+ elif [[ "$SKIP_GATES" == "true" ]]; then
677
+ echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
678
+ else
679
+ echo -e " ${BOLD}Gates:${RESET} ${gate_count} approval gate(s)"
680
+ fi
681
+
682
+ echo -e " ${BOLD}Model:${RESET} ${MODEL:-$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")}"
683
+ echo -e " ${BOLD}Self-heal:${RESET} ${BUILD_TEST_RETRIES} retry cycle(s)"
684
+
685
+ if [[ "$GH_AVAILABLE" == "true" ]]; then
686
+ echo -e " ${BOLD}GitHub:${RESET} ${GREEN}✓${RESET} ${DIM}${REPO_OWNER}/${REPO_NAME}${RESET}"
687
+ else
688
+ echo -e " ${BOLD}GitHub:${RESET} ${DIM}disabled${RESET}"
689
+ fi
690
+
691
+ if [[ -n "$SLACK_WEBHOOK" ]]; then
692
+ echo -e " ${BOLD}Slack:${RESET} ${GREEN}✓${RESET} notifications enabled"
693
+ fi
694
+
695
+ echo ""
696
+
697
+ if [[ "$DRY_RUN" == "true" ]]; then
698
+ run_dry_run
699
+ return $?
700
+ fi
701
+
702
+ # Capture predictions for feedback loop (intelligence → actuals → learning)
703
+ if type intelligence_analyze_issue >/dev/null 2>&1 && (type intelligence_estimate_iterations >/dev/null 2>&1 || type intelligence_predict_cost >/dev/null 2>&1); then
704
+ local issue_json="${INTELLIGENCE_ANALYSIS:-}"
705
+ if [[ -z "$issue_json" || "$issue_json" == "{}" ]]; then
706
+ if [[ -n "$ISSUE_NUMBER" ]]; then
707
+ issue_json=$(gh issue view "$ISSUE_NUMBER" --json number,title,body,labels 2>/dev/null || echo "{}")
708
+ else
709
+ issue_json=$(jq -n --arg title "${GOAL:-untitled}" --arg body "" '{title: $title, body: $body, labels: []}')
710
+ fi
711
+ if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
712
+ issue_json=$(intelligence_analyze_issue "$issue_json" 2>/dev/null || echo "{}")
713
+ fi
714
+ fi
715
+ if [[ -n "$issue_json" && "$issue_json" != "{}" ]]; then
716
+ if type intelligence_estimate_iterations >/dev/null 2>&1; then
717
+ PREDICTED_ITERATIONS=$(intelligence_estimate_iterations "$issue_json" "" 2>/dev/null || echo "")
718
+ export PREDICTED_ITERATIONS
719
+ fi
720
+ if type intelligence_predict_cost >/dev/null 2>&1; then
721
+ local cost_json
722
+ cost_json=$(intelligence_predict_cost "$issue_json" "{}" 2>/dev/null || echo "{}")
723
+ PREDICTED_COST=$(echo "$cost_json" | jq -r '.estimated_cost_usd // empty' 2>/dev/null || echo "")
724
+ export PREDICTED_COST
725
+ fi
726
+ fi
727
+ fi
728
+
729
+ # Start background heartbeat writer
730
+ start_heartbeat
731
+
732
+ # Initialize GitHub Check Runs for all pipeline stages
733
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_pipeline_start >/dev/null 2>&1; then
734
+ local head_sha
735
+ head_sha=$(git rev-parse HEAD 2>/dev/null || echo "")
736
+ if [[ -n "$head_sha" && -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
737
+ local stages_json
738
+ stages_json=$(jq -c '[.stages[] | select(.enabled == true) | .id]' "$PIPELINE_CONFIG" 2>/dev/null || echo '[]')
739
+ gh_checks_pipeline_start "$REPO_OWNER" "$REPO_NAME" "$head_sha" "$stages_json" >/dev/null 2>/dev/null || true
740
+ info "GitHub Checks: created check runs for pipeline stages"
741
+ fi
742
+ fi
743
+
744
+ # Send start notification
745
+ notify "Pipeline Started" "Goal: ${GOAL}\nPipeline: ${PIPELINE_NAME}" "info"
746
+
747
+ emit_event "pipeline.started" \
748
+ "issue=${ISSUE_NUMBER:-0}" \
749
+ "template=${PIPELINE_NAME}" \
750
+ "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
751
+ "machine=$(hostname 2>/dev/null || echo "unknown")" \
752
+ "pipeline=${PIPELINE_NAME}" \
753
+ "model=${MODEL:-opus}" \
754
+ "goal=${GOAL}"
755
+
756
+ # Record pipeline run in SQLite for dashboard visibility
757
+ if type add_pipeline_run >/dev/null 2>&1; then
758
+ add_pipeline_run "${SHIPWRIGHT_PIPELINE_ID}" "${ISSUE_NUMBER:-0}" "${GOAL}" "${BRANCH:-}" "${PIPELINE_NAME}" 2>/dev/null || true
759
+ fi
760
+
761
+ # Durable WAL: publish pipeline start event
762
+ if type publish_event >/dev/null 2>&1; then
763
+ publish_event "pipeline.started" "{\"issue\":\"${ISSUE_NUMBER:-0}\",\"pipeline\":\"${PIPELINE_NAME}\",\"goal\":\"${GOAL:0:200}\"}" 2>/dev/null || true
764
+ fi
765
+
766
+
767
+ run_pipeline
768
+ local exit_code=$?
769
+ PIPELINE_EXIT_CODE="$exit_code"
770
+
771
+ # Compute total cost for pipeline.completed (prefer actual from Claude when available)
772
+ local model_key="${MODEL:-sonnet}"
773
+ local total_cost
774
+ if [[ -n "${TOTAL_COST_USD:-}" && "${TOTAL_COST_USD}" != "0" && "${TOTAL_COST_USD}" != "null" ]]; then
775
+ total_cost="${TOTAL_COST_USD}"
776
+ else
777
+ local input_cost output_cost
778
+ input_cost=$(awk -v tokens="$TOTAL_INPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.input // 3")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
779
+ output_cost=$(awk -v tokens="$TOTAL_OUTPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.output // 15")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
780
+ total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
781
+ fi
782
+
783
+ # Send completion notification + event
784
+ local total_dur_s=""
785
+ [[ -n "$PIPELINE_START_EPOCH" ]] && total_dur_s=$(( $(now_epoch) - PIPELINE_START_EPOCH ))
786
+ if [[ "$exit_code" -eq 0 ]]; then
787
+ local total_dur=""
788
+ [[ -n "$total_dur_s" ]] && total_dur=$(format_duration "$total_dur_s")
789
+ local pr_url
790
+ pr_url=$(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo "")
791
+ notify "Pipeline Complete" "Goal: ${GOAL}\nDuration: ${total_dur:-unknown}\nPR: ${pr_url:-N/A}" "success"
792
+ emit_event "pipeline.completed" \
793
+ "issue=${ISSUE_NUMBER:-0}" \
794
+ "result=success" \
795
+ "duration_s=${total_dur_s:-0}" \
796
+ "iterations=$((SELF_HEAL_COUNT + 1))" \
797
+ "template=${PIPELINE_NAME}" \
798
+ "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
799
+ "stages_passed=${PIPELINE_STAGES_PASSED:-0}" \
800
+ "slowest_stage=${PIPELINE_SLOWEST_STAGE:-}" \
801
+ "pr_url=${pr_url:-}" \
802
+ "agent_id=${PIPELINE_AGENT_ID}" \
803
+ "input_tokens=$TOTAL_INPUT_TOKENS" \
804
+ "output_tokens=$TOTAL_OUTPUT_TOKENS" \
805
+ "total_cost=$total_cost" \
806
+ "self_heal_count=$SELF_HEAL_COUNT"
807
+
808
+ # Finalize audit trail
809
+ if type audit_finalize >/dev/null 2>&1; then
810
+ audit_finalize "success" || true
811
+ fi
812
+
813
+ # Update pipeline run status in SQLite
814
+ if type update_pipeline_status >/dev/null 2>&1; then
815
+ update_pipeline_status "${SHIPWRIGHT_PIPELINE_ID}" "completed" "${PIPELINE_SLOWEST_STAGE:-}" "complete" "${total_dur_s:-0}" 2>/dev/null || true
816
+ fi
817
+
818
+ # Auto-ingest pipeline outcome into recruit profiles
819
+ if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
820
+ bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
821
+ fi
822
+
823
+ # Capture success patterns to memory (learn what works — parallel the failure path)
824
+ if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
825
+ bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
826
+ fi
827
+ # Record RL episode for cross-session learning (Phase 7)
828
+ if type rl_record_from_pipeline >/dev/null 2>&1; then
829
+ rl_record_from_pipeline true "$((SELF_HEAL_COUNT + 1))" "${total_cost:-0}" \
830
+ "${INTELLIGENCE_LANGUAGE:-}" "${INTELLIGENCE_COMPLEXITY:-}" \
831
+ "${INTELLIGENCE_ISSUE_TYPE:-}" "[]" "[]" 2>/dev/null || true
832
+ fi
833
+ # Autoresearch RL Phase 8: aggregate rewards, update bandits, learn policy
834
+ if type reward_aggregate_pipeline >/dev/null 2>&1; then
835
+ reward_aggregate_pipeline "${PIPELINE_JOB_ID:-$$}" "${INTELLIGENCE_LANGUAGE:-unknown}" "${INTELLIGENCE_COMPLEXITY:-medium}" 2>/dev/null || true
836
+ fi
837
+ if type bandit_update >/dev/null 2>&1; then
838
+ bandit_update "model" "${CURRENT_STAGE_ID:-build}:${MODEL:-opus}" "success" 2>/dev/null || true
839
+ fi
840
+ if type policy_learn_from_history >/dev/null 2>&1; then
841
+ policy_learn_from_history 2>/dev/null || true
842
+ fi
843
+ # Update memory baselines with successful run metrics
844
+ if type memory_update_metrics >/dev/null 2>&1; then
845
+ memory_update_metrics "build_duration_s" "${total_dur_s:-0}" 2>/dev/null || true
846
+ memory_update_metrics "total_cost_usd" "${total_cost:-0}" 2>/dev/null || true
847
+ memory_update_metrics "iterations" "$((SELF_HEAL_COUNT + 1))" 2>/dev/null || true
848
+ fi
849
+
850
+ # Record positive fix outcome if self-healing succeeded
851
+ if [[ "$SELF_HEAL_COUNT" -gt 0 && -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
852
+ local _success_sig
853
+ _success_sig=$(tail -30 "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//' || true)
854
+ if [[ -n "$_success_sig" ]]; then
855
+ bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_success_sig" "true" "true" 2>/dev/null || true
856
+ fi
857
+ fi
858
+ else
859
+ notify "Pipeline Failed" "Goal: ${GOAL}\nFailed at: ${CURRENT_STAGE_ID:-unknown}" "error"
860
+ emit_event "pipeline.completed" \
861
+ "issue=${ISSUE_NUMBER:-0}" \
862
+ "result=failure" \
863
+ "duration_s=${total_dur_s:-0}" \
864
+ "iterations=$((SELF_HEAL_COUNT + 1))" \
865
+ "template=${PIPELINE_NAME}" \
866
+ "complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
867
+ "failed_stage=${CURRENT_STAGE_ID:-unknown}" \
868
+ "error_class=${LAST_STAGE_ERROR_CLASS:-unknown}" \
869
+ "agent_id=${PIPELINE_AGENT_ID}" \
870
+ "input_tokens=$TOTAL_INPUT_TOKENS" \
871
+ "output_tokens=$TOTAL_OUTPUT_TOKENS" \
872
+ "total_cost=$total_cost" \
873
+ "self_heal_count=$SELF_HEAL_COUNT"
874
+
875
+ # Finalize audit trail
876
+ if type audit_finalize >/dev/null 2>&1; then
877
+ audit_finalize "failure" || true
878
+ fi
879
+
880
+ # Update pipeline run status in SQLite
881
+ if type update_pipeline_status >/dev/null 2>&1; then
882
+ update_pipeline_status "${SHIPWRIGHT_PIPELINE_ID}" "failed" "${CURRENT_STAGE_ID:-unknown}" "failed" "${total_dur_s:-0}" 2>/dev/null || true
883
+ fi
884
+
885
+ # Auto-ingest pipeline outcome into recruit profiles
886
+ if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
887
+ bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
888
+ fi
889
+
890
+ # Record RL episode for cross-session learning (Phase 7 — failure case)
891
+ if type rl_record_from_pipeline >/dev/null 2>&1; then
892
+ rl_record_from_pipeline false "$((SELF_HEAL_COUNT + 1))" "${total_cost:-0}" \
893
+ "${INTELLIGENCE_LANGUAGE:-}" "${INTELLIGENCE_COMPLEXITY:-}" \
894
+ "${INTELLIGENCE_ISSUE_TYPE:-}" "[]" "[]" 2>/dev/null || true
895
+ fi
896
+ # Autoresearch RL Phase 8: aggregate rewards, update bandits, learn policy (failure case)
897
+ if type reward_aggregate_pipeline >/dev/null 2>&1; then
898
+ reward_aggregate_pipeline "${PIPELINE_JOB_ID:-$$}" "${INTELLIGENCE_LANGUAGE:-unknown}" "${INTELLIGENCE_COMPLEXITY:-medium}" 2>/dev/null || true
899
+ fi
900
+ if type bandit_update >/dev/null 2>&1; then
901
+ bandit_update "model" "${CURRENT_STAGE_ID:-build}:${MODEL:-opus}" "failure" 2>/dev/null || true
902
+ fi
903
+ if type policy_learn_from_history >/dev/null 2>&1; then
904
+ policy_learn_from_history 2>/dev/null || true
905
+ fi
906
+
907
+ # Capture failure learnings to memory
908
+ if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
909
+ bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
910
+ bash "$SCRIPT_DIR/sw-memory.sh" analyze-failure "$ARTIFACTS_DIR/.claude-tokens-${CURRENT_STAGE_ID:-build}.log" "${CURRENT_STAGE_ID:-unknown}" 2>/dev/null || true
911
+
912
+ # Record negative fix outcome — memory suggested a fix but it didn't resolve the issue
913
+ # This closes the negative side of the fix-outcome feedback loop
914
+ if [[ "$SELF_HEAL_COUNT" -gt 0 ]]; then
915
+ local _fail_sig
916
+ _fail_sig=$(tail -30 "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//' || true)
917
+ if [[ -n "$_fail_sig" ]]; then
918
+ bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_fail_sig" "true" "false" 2>/dev/null || true
919
+ fi
920
+ fi
921
+ fi
922
+ fi
923
+
924
+ # AI-powered outcome learning
925
+ if type skill_analyze_outcome >/dev/null 2>&1; then
926
+ local _failed_stage=""
927
+ local _error_ctx=""
928
+ if [[ "$exit_code" -ne 0 ]]; then
929
+ _failed_stage="${CURRENT_STAGE_ID:-unknown}"
930
+ _error_ctx=$(tail -30 "$ARTIFACTS_DIR/errors-collected.json" 2>/dev/null || true)
931
+ fi
932
+ local _outcome_result="success"
933
+ [[ "$exit_code" -ne 0 ]] && _outcome_result="failure"
934
+
935
+ if skill_analyze_outcome "$_outcome_result" "$ARTIFACTS_DIR" "$_failed_stage" "$_error_ctx" 2>/dev/null; then
936
+ info "Skill outcome analysis complete — learnings recorded"
937
+ fi
938
+ fi
939
+
940
+ # ── Prediction Validation Events ──
941
+ # Compare predicted vs actual outcomes for feedback loop calibration
942
+ local pipeline_success="false"
943
+ [[ "$exit_code" -eq 0 ]] && pipeline_success="true"
944
+
945
+ # Complexity prediction vs actual iterations
946
+ emit_event "prediction.validated" \
947
+ "issue=${ISSUE_NUMBER:-0}" \
948
+ "predicted_complexity=${INTELLIGENCE_COMPLEXITY:-0}" \
949
+ "actual_iterations=$SELF_HEAL_COUNT" \
950
+ "success=$pipeline_success"
951
+
952
+ # Close intelligence prediction feedback loop — validate predicted vs actual
953
+ if type intelligence_validate_prediction >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
954
+ intelligence_validate_prediction \
955
+ "$ISSUE_NUMBER" \
956
+ "${INTELLIGENCE_COMPLEXITY:-0}" \
957
+ "${SELF_HEAL_COUNT:-0}" \
958
+ "$pipeline_success" 2>/dev/null || true
959
+ fi
960
+
961
+ # Validate iterations prediction against actuals (cost validation moved below after total_cost is computed)
962
+ local ACTUAL_ITERATIONS=$((SELF_HEAL_COUNT + 1))
963
+ if [[ -n "${PREDICTED_ITERATIONS:-}" ]] && type intelligence_validate_prediction >/dev/null 2>&1; then
964
+ intelligence_validate_prediction "iterations" "$PREDICTED_ITERATIONS" "$ACTUAL_ITERATIONS" 2>/dev/null || true
965
+ fi
966
+
967
+ # Close predictive anomaly feedback loop — confirm whether flagged anomalies were real
968
+ if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
969
+ local _actual_failure="false"
970
+ [[ "$exit_code" -ne 0 ]] && _actual_failure="true"
971
+ # Confirm anomalies for build and test stages based on pipeline outcome
972
+ for _anomaly_stage in build test; do
973
+ bash "$SCRIPT_DIR/sw-predictive.sh" confirm-anomaly "$_anomaly_stage" "duration_s" "$_actual_failure" 2>/dev/null || true
974
+ done
975
+ fi
976
+
977
+ # Template outcome tracking
978
+ emit_event "template.outcome" \
979
+ "issue=${ISSUE_NUMBER:-0}" \
980
+ "template=${PIPELINE_NAME}" \
981
+ "success=$pipeline_success" \
982
+ "duration_s=${total_dur_s:-0}" \
983
+ "complexity=${INTELLIGENCE_COMPLEXITY:-0}"
984
+
985
+ # Risk prediction vs actual failure
986
+ local predicted_risk="${INTELLIGENCE_RISK_SCORE:-0}"
987
+ emit_event "risk.outcome" \
988
+ "issue=${ISSUE_NUMBER:-0}" \
989
+ "predicted_risk=$predicted_risk" \
990
+ "actual_failure=$([[ "$exit_code" -ne 0 ]] && echo "true" || echo "false")"
991
+
992
+ # Per-stage model outcome events (read from stage timings)
993
+ local routing_log="${ARTIFACTS_DIR}/model-routing.log"
994
+ if [[ -f "$routing_log" ]]; then
995
+ while IFS='|' read -r s_stage s_model s_success; do
996
+ [[ -z "$s_stage" ]] && continue
997
+ emit_event "model.outcome" \
998
+ "issue=${ISSUE_NUMBER:-0}" \
999
+ "stage=$s_stage" \
1000
+ "model=$s_model" \
1001
+ "success=$s_success"
1002
+ done < "$routing_log"
1003
+ fi
1004
+
1005
+ # Record pipeline outcome for model routing feedback loop
1006
+ if type optimize_analyze_outcome >/dev/null 2>&1; then
1007
+ optimize_analyze_outcome "$STATE_FILE" 2>/dev/null || true
1008
+ fi
1009
+
1010
+ # Auto-learn after pipeline completion (non-blocking)
1011
+ if type optimize_tune_templates &>/dev/null; then
1012
+ (
1013
+ optimize_tune_templates 2>/dev/null
1014
+ optimize_learn_iterations 2>/dev/null
1015
+ optimize_route_models 2>/dev/null
1016
+ optimize_learn_risk_keywords 2>/dev/null
1017
+ ) &
1018
+ fi
1019
+
1020
+ if type memory_finalize_pipeline >/dev/null 2>&1; then
1021
+ memory_finalize_pipeline "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
1022
+ fi
1023
+
1024
+ # Broadcast discovery for cross-pipeline learning
1025
+ if type broadcast_discovery >/dev/null 2>&1; then
1026
+ local _disc_result="failure"
1027
+ [[ "$exit_code" -eq 0 ]] && _disc_result="success"
1028
+ local _disc_files=""
1029
+ _disc_files=$(git diff --name-only HEAD~1 HEAD 2>/dev/null | head -20 | tr '\n' ',' || true)
1030
+ broadcast_discovery "pipeline_${_disc_result}" "${_disc_files:-unknown}" \
1031
+ "Pipeline ${_disc_result} for issue #${ISSUE_NUMBER:-0} (${PIPELINE_NAME:-unknown} template, stage=${CURRENT_STAGE_ID:-unknown})" \
1032
+ "${_disc_result}" 2>/dev/null || true
1033
+ fi
1034
+
1035
+ # Emit cost event — prefer actual cost from Claude CLI when available
1036
+ local model_key="${MODEL:-sonnet}"
1037
+ local total_cost
1038
+ if [[ -n "${TOTAL_COST_USD:-}" && "${TOTAL_COST_USD}" != "0" && "${TOTAL_COST_USD}" != "null" ]]; then
1039
+ total_cost="${TOTAL_COST_USD}"
1040
+ else
1041
+ # Fallback: estimate from token counts and model rates
1042
+ local input_cost output_cost
1043
+ input_cost=$(awk -v tokens="$TOTAL_INPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.input // 3")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
1044
+ output_cost=$(awk -v tokens="$TOTAL_OUTPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.output // 15")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
1045
+ total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
1046
+ fi
1047
+
1048
+ emit_event "pipeline.cost" \
1049
+ "input_tokens=$TOTAL_INPUT_TOKENS" \
1050
+ "output_tokens=$TOTAL_OUTPUT_TOKENS" \
1051
+ "model=$model_key" \
1052
+ "cost_usd=$total_cost"
1053
+
1054
+ # Persist cost entry to costs.json + SQLite (was missing — tokens accumulated but never written)
1055
+ if type cost_record >/dev/null 2>&1; then
1056
+ cost_record "$TOTAL_INPUT_TOKENS" "$TOTAL_OUTPUT_TOKENS" "$model_key" "pipeline" "${ISSUE_NUMBER:-}" 2>/dev/null || true
1057
+ fi
1058
+
1059
+ # Record pipeline outcome for Thompson sampling / outcome-based learning
1060
+ if type db_record_outcome >/dev/null 2>&1; then
1061
+ local _outcome_success=0
1062
+ [[ "$exit_code" -eq 0 ]] && _outcome_success=1
1063
+ local _outcome_complexity="medium"
1064
+ [[ "${INTELLIGENCE_COMPLEXITY:-5}" -le 3 ]] && _outcome_complexity="low"
1065
+ [[ "${INTELLIGENCE_COMPLEXITY:-5}" -ge 7 ]] && _outcome_complexity="high"
1066
+ db_record_outcome \
1067
+ "${SHIPWRIGHT_PIPELINE_ID:-pipeline-$$-${ISSUE_NUMBER:-0}}" \
1068
+ "${ISSUE_NUMBER:-}" \
1069
+ "${PIPELINE_NAME:-standard}" \
1070
+ "$_outcome_success" \
1071
+ "${total_dur_s:-0}" \
1072
+ "${SELF_HEAL_COUNT:-0}" \
1073
+ "${total_cost:-0}" \
1074
+ "$_outcome_complexity" 2>/dev/null || true
1075
+ fi
1076
+
1077
+ # Validate cost prediction against actual (after total_cost is computed)
1078
+ if [[ -n "${PREDICTED_COST:-}" ]] && type intelligence_validate_prediction >/dev/null 2>&1; then
1079
+ intelligence_validate_prediction "cost" "$PREDICTED_COST" "$total_cost" 2>/dev/null || true
1080
+ fi
1081
+
1082
+ return $exit_code
1083
+ }
1084
+
1085
+ # ─── Resume, Status, Abort Commands ────────────────────────────────
1086
+ pipeline_resume() {
1087
+ setup_dirs
1088
+ resume_state
1089
+ echo ""
1090
+ run_pipeline
1091
+ }
1092
+
1093
+ pipeline_status() {
1094
+ setup_dirs
1095
+
1096
+ if [[ ! -f "$STATE_FILE" ]]; then
1097
+ info "No active pipeline."
1098
+ echo -e " Start one: ${DIM}shipwright pipeline start --goal \"...\"${RESET}"
1099
+ return
1100
+ fi
1101
+
1102
+ echo ""
1103
+ echo -e "${PURPLE}${BOLD}━━━ Pipeline Status ━━━${RESET}"
1104
+ echo ""
1105
+
1106
+ local p_name="" p_goal="" p_status="" p_branch="" p_stage="" p_started="" p_issue="" p_elapsed="" p_pr=""
1107
+ local in_frontmatter=false
1108
+ while IFS= read -r line; do
1109
+ if [[ "$line" == "---" ]]; then
1110
+ if $in_frontmatter; then break; else in_frontmatter=true; continue; fi
1111
+ fi
1112
+ if $in_frontmatter; then
1113
+ case "$line" in
1114
+ pipeline:*) p_name="$(echo "${line#pipeline:}" | xargs)" ;;
1115
+ goal:*) p_goal="$(echo "${line#goal:}" | sed 's/^ *"//;s/" *$//')" ;;
1116
+ status:*) p_status="$(echo "${line#status:}" | xargs)" ;;
1117
+ branch:*) p_branch="$(echo "${line#branch:}" | sed 's/^ *"//;s/" *$//')" ;;
1118
+ current_stage:*) p_stage="$(echo "${line#current_stage:}" | xargs)" ;;
1119
+ started_at:*) p_started="$(echo "${line#started_at:}" | xargs)" ;;
1120
+ issue:*) p_issue="$(echo "${line#issue:}" | sed 's/^ *"//;s/" *$//')" ;;
1121
+ elapsed:*) p_elapsed="$(echo "${line#elapsed:}" | xargs)" ;;
1122
+ pr_number:*) p_pr="$(echo "${line#pr_number:}" | xargs)" ;;
1123
+ esac
1124
+ fi
1125
+ done < "$STATE_FILE"
1126
+
1127
+ local status_icon
1128
+ case "$p_status" in
1129
+ running) status_icon="${CYAN}●${RESET}" ;;
1130
+ complete) status_icon="${GREEN}✓${RESET}" ;;
1131
+ paused) status_icon="${YELLOW}⏸${RESET}" ;;
1132
+ interrupted) status_icon="${YELLOW}⚡${RESET}" ;;
1133
+ failed) status_icon="${RED}✗${RESET}" ;;
1134
+ aborted) status_icon="${RED}◼${RESET}" ;;
1135
+ *) status_icon="${DIM}○${RESET}" ;;
1136
+ esac
1137
+
1138
+ echo -e " ${BOLD}Pipeline:${RESET} $p_name"
1139
+ echo -e " ${BOLD}Goal:${RESET} $p_goal"
1140
+ echo -e " ${BOLD}Status:${RESET} $status_icon $p_status"
1141
+ [[ -n "$p_branch" ]] && echo -e " ${BOLD}Branch:${RESET} $p_branch"
1142
+ [[ -n "$p_issue" ]] && echo -e " ${BOLD}Issue:${RESET} $p_issue"
1143
+ [[ -n "$p_pr" ]] && echo -e " ${BOLD}PR:${RESET} #$p_pr"
1144
+ [[ -n "$p_stage" ]] && echo -e " ${BOLD}Stage:${RESET} $p_stage"
1145
+ [[ -n "$p_started" ]] && echo -e " ${BOLD}Started:${RESET} $p_started"
1146
+ [[ -n "$p_elapsed" ]] && echo -e " ${BOLD}Elapsed:${RESET} $p_elapsed"
1147
+
1148
+ echo ""
1149
+ echo -e " ${BOLD}Stages:${RESET}"
1150
+
1151
+ local in_stages=false
1152
+ while IFS= read -r line; do
1153
+ if [[ "$line" == "stages:" ]]; then
1154
+ in_stages=true; continue
1155
+ fi
1156
+ if $in_stages; then
1157
+ if [[ "$line" == "---" || ! "$line" =~ ^" " ]]; then break; fi
1158
+ local trimmed
1159
+ trimmed="$(echo "$line" | xargs)"
1160
+ if [[ "$trimmed" == *":"* ]]; then
1161
+ local sid="${trimmed%%:*}"
1162
+ local sst="${trimmed#*: }"
1163
+ local s_icon
1164
+ case "$sst" in
1165
+ complete) s_icon="${GREEN}✓${RESET}" ;;
1166
+ running) s_icon="${CYAN}●${RESET}" ;;
1167
+ failed) s_icon="${RED}✗${RESET}" ;;
1168
+ *) s_icon="${DIM}○${RESET}" ;;
1169
+ esac
1170
+ echo -e " $s_icon $sid"
1171
+ fi
1172
+ fi
1173
+ done < "$STATE_FILE"
1174
+
1175
+ if [[ -d "$ARTIFACTS_DIR" ]]; then
1176
+ local artifact_count
1177
+ artifact_count=$(find "$ARTIFACTS_DIR" -type f 2>/dev/null | wc -l | xargs)
1178
+ if [[ "$artifact_count" -gt 0 ]]; then
1179
+ echo ""
1180
+ echo -e " ${BOLD}Artifacts:${RESET} ($artifact_count files)"
1181
+ ls "$ARTIFACTS_DIR" 2>/dev/null | sed 's/^/ /'
1182
+ fi
1183
+ fi
1184
+ echo ""
1185
+ }
1186
+
1187
+ pipeline_abort() {
1188
+ setup_dirs
1189
+
1190
+ if [[ ! -f "$STATE_FILE" ]]; then
1191
+ info "No active pipeline to abort."
1192
+ return
1193
+ fi
1194
+
1195
+ local current_status
1196
+ current_status=$(sed -n 's/^status: *//p' "$STATE_FILE" | head -1)
1197
+
1198
+ if [[ "$current_status" == "complete" || "$current_status" == "aborted" ]]; then
1199
+ info "Pipeline already $current_status."
1200
+ return
1201
+ fi
1202
+
1203
+ resume_state 2>/dev/null || true
1204
+ PIPELINE_STATUS="aborted"
1205
+ write_state
1206
+
1207
+ # Update GitHub
1208
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1209
+ gh_init
1210
+ gh_remove_label "$ISSUE_NUMBER" "pipeline/in-progress"
1211
+ gh_comment_issue "$ISSUE_NUMBER" "⏹️ **Pipeline aborted** at stage: ${CURRENT_STAGE:-unknown}"
1212
+ fi
1213
+
1214
+ warn "Pipeline aborted."
1215
+ echo -e " State saved at: ${DIM}$STATE_FILE${RESET}"
1216
+ }