shipwright-cli 3.1.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (283) hide show
  1. package/.claude/agents/code-reviewer.md +2 -0
  2. package/.claude/agents/devops-engineer.md +2 -0
  3. package/.claude/agents/doc-fleet-agent.md +2 -0
  4. package/.claude/agents/pipeline-agent.md +2 -0
  5. package/.claude/agents/shell-script-specialist.md +2 -0
  6. package/.claude/agents/test-specialist.md +2 -0
  7. package/.claude/hooks/agent-crash-capture.sh +32 -0
  8. package/.claude/hooks/post-tool-use.sh +3 -2
  9. package/.claude/hooks/pre-tool-use.sh +35 -3
  10. package/README.md +22 -8
  11. package/claude-code/hooks/config-change.sh +18 -0
  12. package/claude-code/hooks/instructions-reloaded.sh +7 -0
  13. package/claude-code/hooks/worktree-create.sh +25 -0
  14. package/claude-code/hooks/worktree-remove.sh +20 -0
  15. package/config/code-constitution.json +130 -0
  16. package/config/defaults.json +25 -2
  17. package/config/policy.json +1 -1
  18. package/dashboard/middleware/auth.ts +134 -0
  19. package/dashboard/middleware/constants.ts +21 -0
  20. package/dashboard/public/index.html +8 -6
  21. package/dashboard/public/styles.css +176 -97
  22. package/dashboard/routes/auth.ts +38 -0
  23. package/dashboard/server.ts +117 -25
  24. package/dashboard/services/config.ts +26 -0
  25. package/dashboard/services/db.ts +118 -0
  26. package/dashboard/src/canvas/pixel-agent.ts +298 -0
  27. package/dashboard/src/canvas/pixel-sprites.ts +440 -0
  28. package/dashboard/src/canvas/shipyard-effects.ts +367 -0
  29. package/dashboard/src/canvas/shipyard-scene.ts +616 -0
  30. package/dashboard/src/canvas/submarine-layout.ts +267 -0
  31. package/dashboard/src/components/header.ts +8 -7
  32. package/dashboard/src/core/api.ts +5 -0
  33. package/dashboard/src/core/router.ts +1 -0
  34. package/dashboard/src/design/submarine-theme.ts +253 -0
  35. package/dashboard/src/main.ts +2 -0
  36. package/dashboard/src/types/api.ts +12 -1
  37. package/dashboard/src/views/activity.ts +2 -1
  38. package/dashboard/src/views/metrics.ts +69 -1
  39. package/dashboard/src/views/shipyard.ts +39 -0
  40. package/dashboard/types/index.ts +166 -0
  41. package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
  42. package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
  43. package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
  44. package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
  45. package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
  46. package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
  47. package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
  48. package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
  49. package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
  50. package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
  51. package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
  52. package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
  53. package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
  54. package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
  55. package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
  56. package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
  57. package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
  58. package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
  59. package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
  60. package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
  61. package/docs/research/RESEARCH_INDEX.md +439 -0
  62. package/docs/research/RESEARCH_SOURCES.md +440 -0
  63. package/docs/research/RESEARCH_SUMMARY.txt +275 -0
  64. package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
  65. package/package.json +2 -2
  66. package/scripts/lib/adaptive-model.sh +427 -0
  67. package/scripts/lib/adaptive-timeout.sh +316 -0
  68. package/scripts/lib/audit-trail.sh +309 -0
  69. package/scripts/lib/auto-recovery.sh +471 -0
  70. package/scripts/lib/bandit-selector.sh +431 -0
  71. package/scripts/lib/bootstrap.sh +104 -2
  72. package/scripts/lib/causal-graph.sh +455 -0
  73. package/scripts/lib/compat.sh +126 -0
  74. package/scripts/lib/compound-audit.sh +337 -0
  75. package/scripts/lib/constitutional.sh +454 -0
  76. package/scripts/lib/context-budget.sh +359 -0
  77. package/scripts/lib/convergence.sh +594 -0
  78. package/scripts/lib/cost-optimizer.sh +634 -0
  79. package/scripts/lib/daemon-adaptive.sh +14 -2
  80. package/scripts/lib/daemon-dispatch.sh +106 -17
  81. package/scripts/lib/daemon-failure.sh +34 -4
  82. package/scripts/lib/daemon-patrol.sh +25 -4
  83. package/scripts/lib/daemon-poll-github.sh +361 -0
  84. package/scripts/lib/daemon-poll-health.sh +299 -0
  85. package/scripts/lib/daemon-poll.sh +27 -611
  86. package/scripts/lib/daemon-state.sh +119 -66
  87. package/scripts/lib/daemon-triage.sh +10 -0
  88. package/scripts/lib/dod-scorecard.sh +442 -0
  89. package/scripts/lib/error-actionability.sh +300 -0
  90. package/scripts/lib/formal-spec.sh +461 -0
  91. package/scripts/lib/helpers.sh +180 -5
  92. package/scripts/lib/intent-analysis.sh +409 -0
  93. package/scripts/lib/loop-convergence.sh +350 -0
  94. package/scripts/lib/loop-iteration.sh +682 -0
  95. package/scripts/lib/loop-progress.sh +48 -0
  96. package/scripts/lib/loop-restart.sh +185 -0
  97. package/scripts/lib/memory-effectiveness.sh +506 -0
  98. package/scripts/lib/mutation-executor.sh +352 -0
  99. package/scripts/lib/outcome-feedback.sh +521 -0
  100. package/scripts/lib/pipeline-cli.sh +336 -0
  101. package/scripts/lib/pipeline-commands.sh +1216 -0
  102. package/scripts/lib/pipeline-detection.sh +101 -3
  103. package/scripts/lib/pipeline-execution.sh +897 -0
  104. package/scripts/lib/pipeline-github.sh +28 -3
  105. package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
  106. package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
  107. package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
  108. package/scripts/lib/pipeline-intelligence.sh +104 -1138
  109. package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
  110. package/scripts/lib/pipeline-quality-checks.sh +17 -711
  111. package/scripts/lib/pipeline-quality-gates.sh +563 -0
  112. package/scripts/lib/pipeline-stages-build.sh +730 -0
  113. package/scripts/lib/pipeline-stages-delivery.sh +965 -0
  114. package/scripts/lib/pipeline-stages-intake.sh +1133 -0
  115. package/scripts/lib/pipeline-stages-monitor.sh +407 -0
  116. package/scripts/lib/pipeline-stages-review.sh +1022 -0
  117. package/scripts/lib/pipeline-stages.sh +161 -2901
  118. package/scripts/lib/pipeline-state.sh +36 -5
  119. package/scripts/lib/pipeline-util.sh +487 -0
  120. package/scripts/lib/policy-learner.sh +438 -0
  121. package/scripts/lib/process-reward.sh +493 -0
  122. package/scripts/lib/project-detect.sh +649 -0
  123. package/scripts/lib/quality-profile.sh +334 -0
  124. package/scripts/lib/recruit-commands.sh +885 -0
  125. package/scripts/lib/recruit-learning.sh +739 -0
  126. package/scripts/lib/recruit-roles.sh +648 -0
  127. package/scripts/lib/reward-aggregator.sh +458 -0
  128. package/scripts/lib/rl-optimizer.sh +362 -0
  129. package/scripts/lib/root-cause.sh +427 -0
  130. package/scripts/lib/scope-enforcement.sh +445 -0
  131. package/scripts/lib/session-restart.sh +493 -0
  132. package/scripts/lib/skill-memory.sh +300 -0
  133. package/scripts/lib/skill-registry.sh +775 -0
  134. package/scripts/lib/spec-driven.sh +476 -0
  135. package/scripts/lib/test-helpers.sh +18 -7
  136. package/scripts/lib/test-holdout.sh +429 -0
  137. package/scripts/lib/test-optimizer.sh +511 -0
  138. package/scripts/shipwright-file-suggest.sh +45 -0
  139. package/scripts/skills/adversarial-quality.md +61 -0
  140. package/scripts/skills/api-design.md +44 -0
  141. package/scripts/skills/architecture-design.md +50 -0
  142. package/scripts/skills/brainstorming.md +43 -0
  143. package/scripts/skills/data-pipeline.md +44 -0
  144. package/scripts/skills/deploy-safety.md +64 -0
  145. package/scripts/skills/documentation.md +38 -0
  146. package/scripts/skills/frontend-design.md +45 -0
  147. package/scripts/skills/generated/.gitkeep +0 -0
  148. package/scripts/skills/generated/_refinements/.gitkeep +0 -0
  149. package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
  150. package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
  151. package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
  152. package/scripts/skills/generated/cli-version-management.md +29 -0
  153. package/scripts/skills/generated/collection-system-validation.md +99 -0
  154. package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
  155. package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
  156. package/scripts/skills/generated/test-parallelization-detection.md +65 -0
  157. package/scripts/skills/observability.md +79 -0
  158. package/scripts/skills/performance.md +48 -0
  159. package/scripts/skills/pr-quality.md +49 -0
  160. package/scripts/skills/product-thinking.md +43 -0
  161. package/scripts/skills/security-audit.md +49 -0
  162. package/scripts/skills/systematic-debugging.md +40 -0
  163. package/scripts/skills/testing-strategy.md +47 -0
  164. package/scripts/skills/two-stage-review.md +52 -0
  165. package/scripts/skills/validation-thoroughness.md +55 -0
  166. package/scripts/sw +9 -3
  167. package/scripts/sw-activity.sh +9 -8
  168. package/scripts/sw-adaptive.sh +8 -7
  169. package/scripts/sw-adversarial.sh +2 -1
  170. package/scripts/sw-architecture-enforcer.sh +3 -1
  171. package/scripts/sw-auth.sh +12 -2
  172. package/scripts/sw-autonomous.sh +5 -1
  173. package/scripts/sw-changelog.sh +4 -1
  174. package/scripts/sw-checkpoint.sh +2 -1
  175. package/scripts/sw-ci.sh +15 -6
  176. package/scripts/sw-cleanup.sh +4 -26
  177. package/scripts/sw-code-review.sh +45 -20
  178. package/scripts/sw-connect.sh +2 -1
  179. package/scripts/sw-context.sh +2 -1
  180. package/scripts/sw-cost.sh +107 -5
  181. package/scripts/sw-daemon.sh +71 -11
  182. package/scripts/sw-dashboard.sh +3 -1
  183. package/scripts/sw-db.sh +71 -20
  184. package/scripts/sw-decide.sh +8 -2
  185. package/scripts/sw-decompose.sh +360 -17
  186. package/scripts/sw-deps.sh +4 -1
  187. package/scripts/sw-developer-simulation.sh +4 -1
  188. package/scripts/sw-discovery.sh +378 -5
  189. package/scripts/sw-doc-fleet.sh +4 -1
  190. package/scripts/sw-docs-agent.sh +3 -1
  191. package/scripts/sw-docs.sh +2 -1
  192. package/scripts/sw-doctor.sh +453 -2
  193. package/scripts/sw-dora.sh +4 -1
  194. package/scripts/sw-durable.sh +12 -7
  195. package/scripts/sw-e2e-orchestrator.sh +17 -16
  196. package/scripts/sw-eventbus.sh +13 -4
  197. package/scripts/sw-evidence.sh +364 -12
  198. package/scripts/sw-feedback.sh +550 -9
  199. package/scripts/sw-fix.sh +20 -1
  200. package/scripts/sw-fleet-discover.sh +6 -2
  201. package/scripts/sw-fleet-viz.sh +9 -4
  202. package/scripts/sw-fleet.sh +5 -1
  203. package/scripts/sw-github-app.sh +18 -4
  204. package/scripts/sw-github-checks.sh +3 -2
  205. package/scripts/sw-github-deploy.sh +3 -2
  206. package/scripts/sw-github-graphql.sh +18 -7
  207. package/scripts/sw-guild.sh +5 -1
  208. package/scripts/sw-heartbeat.sh +5 -30
  209. package/scripts/sw-hello.sh +67 -0
  210. package/scripts/sw-hygiene.sh +10 -3
  211. package/scripts/sw-incident.sh +273 -5
  212. package/scripts/sw-init.sh +18 -2
  213. package/scripts/sw-instrument.sh +10 -2
  214. package/scripts/sw-intelligence.sh +44 -7
  215. package/scripts/sw-jira.sh +5 -1
  216. package/scripts/sw-launchd.sh +2 -1
  217. package/scripts/sw-linear.sh +4 -1
  218. package/scripts/sw-logs.sh +4 -1
  219. package/scripts/sw-loop.sh +436 -1076
  220. package/scripts/sw-memory.sh +357 -3
  221. package/scripts/sw-mission-control.sh +6 -1
  222. package/scripts/sw-model-router.sh +483 -27
  223. package/scripts/sw-otel.sh +15 -4
  224. package/scripts/sw-oversight.sh +14 -5
  225. package/scripts/sw-patrol-meta.sh +334 -0
  226. package/scripts/sw-pipeline-composer.sh +7 -1
  227. package/scripts/sw-pipeline-vitals.sh +12 -6
  228. package/scripts/sw-pipeline.sh +54 -2653
  229. package/scripts/sw-pm.sh +16 -8
  230. package/scripts/sw-pr-lifecycle.sh +2 -1
  231. package/scripts/sw-predictive.sh +17 -5
  232. package/scripts/sw-prep.sh +185 -2
  233. package/scripts/sw-ps.sh +5 -25
  234. package/scripts/sw-public-dashboard.sh +17 -4
  235. package/scripts/sw-quality.sh +14 -6
  236. package/scripts/sw-reaper.sh +8 -25
  237. package/scripts/sw-recruit.sh +156 -2303
  238. package/scripts/sw-regression.sh +19 -12
  239. package/scripts/sw-release-manager.sh +3 -1
  240. package/scripts/sw-release.sh +4 -1
  241. package/scripts/sw-remote.sh +3 -1
  242. package/scripts/sw-replay.sh +7 -1
  243. package/scripts/sw-retro.sh +158 -1
  244. package/scripts/sw-review-rerun.sh +3 -1
  245. package/scripts/sw-scale.sh +14 -5
  246. package/scripts/sw-security-audit.sh +6 -1
  247. package/scripts/sw-self-optimize.sh +173 -6
  248. package/scripts/sw-session.sh +9 -3
  249. package/scripts/sw-setup.sh +3 -1
  250. package/scripts/sw-stall-detector.sh +406 -0
  251. package/scripts/sw-standup.sh +15 -7
  252. package/scripts/sw-status.sh +3 -1
  253. package/scripts/sw-strategic.sh +14 -6
  254. package/scripts/sw-stream.sh +13 -4
  255. package/scripts/sw-swarm.sh +20 -7
  256. package/scripts/sw-team-stages.sh +13 -6
  257. package/scripts/sw-templates.sh +7 -31
  258. package/scripts/sw-testgen.sh +17 -6
  259. package/scripts/sw-tmux-pipeline.sh +4 -1
  260. package/scripts/sw-tmux-role-color.sh +2 -0
  261. package/scripts/sw-tmux-status.sh +1 -1
  262. package/scripts/sw-tmux.sh +37 -1
  263. package/scripts/sw-trace.sh +3 -1
  264. package/scripts/sw-tracker-github.sh +3 -0
  265. package/scripts/sw-tracker-jira.sh +3 -0
  266. package/scripts/sw-tracker-linear.sh +3 -0
  267. package/scripts/sw-tracker.sh +3 -1
  268. package/scripts/sw-triage.sh +3 -2
  269. package/scripts/sw-upgrade.sh +3 -1
  270. package/scripts/sw-ux.sh +5 -2
  271. package/scripts/sw-webhook.sh +5 -2
  272. package/scripts/sw-widgets.sh +9 -4
  273. package/scripts/sw-worktree.sh +15 -3
  274. package/scripts/test-skill-injection.sh +1233 -0
  275. package/templates/pipelines/autonomous.json +27 -3
  276. package/templates/pipelines/cost-aware.json +34 -8
  277. package/templates/pipelines/deployed.json +12 -0
  278. package/templates/pipelines/enterprise.json +12 -0
  279. package/templates/pipelines/fast.json +6 -0
  280. package/templates/pipelines/full.json +27 -3
  281. package/templates/pipelines/hotfix.json +6 -0
  282. package/templates/pipelines/standard.json +12 -0
  283. package/templates/pipelines/tdd.json +12 -0
@@ -1,8 +1,154 @@
1
- # pipeline-stages.sh — Stage implementations (intake, plan, build, test, review, compound_quality, pr, merge, deploy, validate, monitor) for sw-pipeline.sh
1
+ #\!/bin/bash
2
+ # pipeline-stages.sh — Stage implementations loader
3
+ # Sources domain-specific stage modules (intake, build, review, delivery, monitor).
2
4
  # Source from sw-pipeline.sh. Requires all pipeline globals and state/github/detection/quality modules.
3
5
  [[ -n "${_PIPELINE_STAGES_LOADED:-}" ]] && return 0
4
6
  _PIPELINE_STAGES_LOADED=1
5
7
 
8
+ # Source skill registry for dynamic prompt injection
9
+ _SKILL_REGISTRY_SH="${SCRIPT_DIR}/lib/skill-registry.sh"
10
+ [[ -f "$_SKILL_REGISTRY_SH" ]] && source "$_SKILL_REGISTRY_SH"
11
+
12
+ # Source skill memory for learning system
13
+ _SKILL_MEMORY_SH="${SCRIPT_DIR}/lib/skill-memory.sh"
14
+ [[ -f "$_SKILL_MEMORY_SH" ]] && source "$_SKILL_MEMORY_SH"
15
+
16
+ # Source dark factory modules (test holdout, spec-driven, causal graph)
17
+ [[ -f "$SCRIPT_DIR/lib/test-holdout.sh" ]] && source "$SCRIPT_DIR/lib/test-holdout.sh" 2>/dev/null || true
18
+ [[ -f "$SCRIPT_DIR/lib/spec-driven.sh" ]] && source "$SCRIPT_DIR/lib/spec-driven.sh" 2>/dev/null || true
19
+ [[ -f "$SCRIPT_DIR/lib/causal-graph.sh" ]] && source "$SCRIPT_DIR/lib/causal-graph.sh" 2>/dev/null || true
20
+ [[ -f "$SCRIPT_DIR/lib/constitutional.sh" ]] && source "$SCRIPT_DIR/lib/constitutional.sh" 2>/dev/null || true
21
+ [[ -f "$SCRIPT_DIR/lib/formal-spec.sh" ]] && source "$SCRIPT_DIR/lib/formal-spec.sh" 2>/dev/null || true
22
+ [[ -f "$SCRIPT_DIR/lib/mutation-executor.sh" ]] && source "$SCRIPT_DIR/lib/mutation-executor.sh" 2>/dev/null || true
23
+ # Cross-session reinforcement learning optimizer (Phase 7)
24
+ [[ -f "$SCRIPT_DIR/lib/rl-optimizer.sh" ]] && source "$SCRIPT_DIR/lib/rl-optimizer.sh" 2>/dev/null || true
25
+ # Autoresearch RL modules (Phase 8): reward aggregation, bandit selection, policy learning
26
+ [[ -f "$SCRIPT_DIR/lib/reward-aggregator.sh" ]] && source "$SCRIPT_DIR/lib/reward-aggregator.sh" 2>/dev/null || true
27
+ [[ -f "$SCRIPT_DIR/lib/bandit-selector.sh" ]] && source "$SCRIPT_DIR/lib/bandit-selector.sh" 2>/dev/null || true
28
+ [[ -f "$SCRIPT_DIR/lib/policy-learner.sh" ]] && source "$SCRIPT_DIR/lib/policy-learner.sh" 2>/dev/null || true
29
+
30
+ # Defaults for variables normally set by sw-pipeline.sh (safe under set -u).
31
+ ARTIFACTS_DIR="${ARTIFACTS_DIR:-.claude/pipeline-artifacts}"
32
+ SCRIPT_DIR="${SCRIPT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
33
+ PROJECT_ROOT="${PROJECT_ROOT:-$(pwd)}"
34
+ PIPELINE_CONFIG="${PIPELINE_CONFIG:-}"
35
+ PIPELINE_NAME="${PIPELINE_NAME:-pipeline}"
36
+ MODEL="${MODEL:-opus}"
37
+ BASE_BRANCH="${BASE_BRANCH:-main}"
38
+ NO_GITHUB="${NO_GITHUB:-false}"
39
+ ISSUE_NUMBER="${ISSUE_NUMBER:-}"
40
+ ISSUE_BODY="${ISSUE_BODY:-}"
41
+ ISSUE_LABELS="${ISSUE_LABELS:-}"
42
+ ISSUE_MILESTONE="${ISSUE_MILESTONE:-}"
43
+ GOAL="${GOAL:-}"
44
+ TASK_TYPE="${TASK_TYPE:-feature}"
45
+ INTELLIGENCE_ISSUE_TYPE="${INTELLIGENCE_ISSUE_TYPE:-backend}"
46
+ TEST_CMD="${TEST_CMD:-}"
47
+ GIT_BRANCH="${GIT_BRANCH:-}"
48
+ TASKS_FILE="${TASKS_FILE:-}"
49
+
50
+ # ─── Context pruning helpers ────────────────────────────────────────────────
51
+
52
+ # prune_context_section — Intelligently truncate a context section to fit a char budget.
53
+ # $1: section name (for logging/markers)
54
+ # $2: content string
55
+ # $3: max_chars (default 5000)
56
+ # For JSON content (starts with { or [): extracts summary fields via jq.
57
+ # For text content: sandwich approach — keeps first + last N lines.
58
+ # Outputs the (possibly truncated) content to stdout.
59
+ prune_context_section() {
60
+ local section_name="${1:-section}"
61
+ local content="${2:-}"
62
+ local max_chars="${3:-5000}"
63
+
64
+ [[ -z "$content" ]] && return 0
65
+
66
+ local content_len=${#content}
67
+ if [[ "$content_len" -le "$max_chars" ]]; then
68
+ printf '%s' "$content"
69
+ return 0
70
+ fi
71
+
72
+ # JSON content — try jq summary extraction
73
+ local first_char="${content:0:1}"
74
+ if [[ "$first_char" == "{" || "$first_char" == "[" ]]; then
75
+ local summary=""
76
+ # Try extracting summary/results fields
77
+ summary=$(printf '%s' "$content" | jq -r '
78
+ if type == "object" then
79
+ to_entries | map(
80
+ if (.value | type) == "array" then
81
+ "\(.key): \(.value | length) items"
82
+ elif (.value | type) == "object" then
83
+ "\(.key): \(.value | keys | join(", "))"
84
+ else
85
+ "\(.key): \(.value)"
86
+ end
87
+ ) | join("\n")
88
+ elif type == "array" then
89
+ .[:5] | map(tostring) | join("\n")
90
+ else . end
91
+ ' 2>/dev/null) || true
92
+
93
+ if [[ -n "$summary" && ${#summary} -le "$max_chars" ]]; then
94
+ printf '%s' "$summary"
95
+ return 0
96
+ fi
97
+ # jq failed or still too large — fall through to text truncation
98
+ fi
99
+
100
+ # Text content — sandwich approach (first N + last N lines)
101
+ local line_count=0
102
+ line_count=$(printf '%s\n' "$content" | wc -l | xargs)
103
+
104
+ # Calculate how many lines to keep from each end
105
+ # Approximate chars-per-line to figure out line budget
106
+ local avg_chars_per_line=80
107
+ if [[ "$line_count" -gt 0 ]]; then
108
+ avg_chars_per_line=$(( content_len / line_count ))
109
+ [[ "$avg_chars_per_line" -lt 20 ]] && avg_chars_per_line=20
110
+ fi
111
+ local total_lines_budget=$(( max_chars / avg_chars_per_line ))
112
+ [[ "$total_lines_budget" -lt 4 ]] && total_lines_budget=4
113
+ local half=$(( total_lines_budget / 2 ))
114
+
115
+ local head_part=""
116
+ local tail_part=""
117
+ head_part=$(printf '%s\n' "$content" | head -"$half")
118
+ tail_part=$(printf '%s\n' "$content" | tail -"$half")
119
+
120
+ printf '%s\n[... %s truncated: %d→%d chars ...]\n%s' \
121
+ "$head_part" "$section_name" "$content_len" "$max_chars" "$tail_part"
122
+ }
123
+
124
+ # guard_prompt_size — Warn and hard-truncate if prompt exceeds budget.
125
+ # $1: stage name (for logging)
126
+ # $2: prompt content
127
+ # $3: max_chars (default 100000)
128
+ # Outputs the (possibly truncated) prompt to stdout.
129
+ PIPELINE_PROMPT_BUDGET="${PIPELINE_PROMPT_BUDGET:-100000}"
130
+
131
+ guard_prompt_size() {
132
+ local stage_name="${1:-stage}"
133
+ local prompt="${2:-}"
134
+ local max_chars="${3:-$PIPELINE_PROMPT_BUDGET}"
135
+
136
+ local prompt_len=${#prompt}
137
+ if [[ "$prompt_len" -le "$max_chars" ]]; then
138
+ printf '%s' "$prompt"
139
+ return 0
140
+ fi
141
+
142
+ warn "${stage_name} prompt too large (${prompt_len} chars, budget ${max_chars}) — truncating"
143
+ emit_event "pipeline.prompt_truncated" \
144
+ "stage=$stage_name" \
145
+ "original=$prompt_len" \
146
+ "budget=$max_chars" 2>/dev/null || true
147
+
148
+ printf '%s\n\n... [CONTEXT TRUNCATED: %s prompt exceeded %d char budget. Focus on the goal and requirements.]' \
149
+ "${prompt:0:$max_chars}" "$stage_name" "$max_chars"
150
+ }
151
+
6
152
  # ─── Safe git helpers ────────────────────────────────────────────────────────
7
153
  # BASE_BRANCH may not exist locally (e.g. --local mode with no remote).
8
154
  # These helpers return empty output instead of crashing under set -euo pipefail.
@@ -40,2909 +186,23 @@ show_stage_preview() {
40
186
  echo ""
41
187
  }
42
188
 
43
- stage_intake() {
44
- CURRENT_STAGE_ID="intake"
45
- local project_lang
46
- project_lang=$(detect_project_lang)
47
- info "Project: ${BOLD}$project_lang${RESET}"
48
-
49
- # 1. Fetch issue metadata if --issue provided
50
- if [[ -n "$ISSUE_NUMBER" ]]; then
51
- local meta
52
- meta=$(gh_get_issue_meta "$ISSUE_NUMBER")
53
-
54
- if [[ -n "$meta" ]]; then
55
- GOAL=$(echo "$meta" | jq -r '.title // ""')
56
- ISSUE_BODY=$(echo "$meta" | jq -r '.body // ""')
57
- ISSUE_LABELS=$(echo "$meta" | jq -r '[.labels[].name] | join(",")' 2>/dev/null || true)
58
- ISSUE_MILESTONE=$(echo "$meta" | jq -r '.milestone.title // ""' 2>/dev/null || true)
59
- ISSUE_ASSIGNEES=$(echo "$meta" | jq -r '[.assignees[].login] | join(",")' 2>/dev/null || true)
60
- [[ "$ISSUE_MILESTONE" == "null" ]] && ISSUE_MILESTONE=""
61
- [[ "$ISSUE_LABELS" == "null" ]] && ISSUE_LABELS=""
62
- else
63
- # Fallback: just get title
64
- GOAL=$(gh issue view "$ISSUE_NUMBER" --json title -q .title 2>/dev/null) || {
65
- error "Failed to fetch issue #$ISSUE_NUMBER"
66
- return 1
67
- }
68
- fi
69
-
70
- GITHUB_ISSUE="#$ISSUE_NUMBER"
71
- info "Issue #$ISSUE_NUMBER: ${BOLD}$GOAL${RESET}"
72
-
73
- if [[ -n "$ISSUE_LABELS" ]]; then
74
- info "Labels: ${DIM}$ISSUE_LABELS${RESET}"
75
- fi
76
- if [[ -n "$ISSUE_MILESTONE" ]]; then
77
- info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
78
- fi
79
-
80
- # Self-assign
81
- gh_assign_self "$ISSUE_NUMBER"
82
-
83
- # Add in-progress label
84
- gh_add_labels "$ISSUE_NUMBER" "pipeline/in-progress"
85
- fi
86
-
87
- # 2. Detect task type
88
- TASK_TYPE=$(detect_task_type "$GOAL")
89
- local suggested_template
90
- suggested_template=$(template_for_type "$TASK_TYPE")
91
- info "Detected: ${BOLD}$TASK_TYPE${RESET} → team template: ${CYAN}$suggested_template${RESET}"
92
-
93
- # 3. Auto-detect test command if not provided
94
- if [[ -z "$TEST_CMD" ]]; then
95
- TEST_CMD=$(detect_test_cmd)
96
- if [[ -n "$TEST_CMD" ]]; then
97
- info "Auto-detected test: ${DIM}$TEST_CMD${RESET}"
98
- fi
99
- fi
100
-
101
- # 4. Create branch with smart prefix
102
- local prefix
103
- prefix=$(branch_prefix_for_type "$TASK_TYPE")
104
- local slug
105
- slug=$(echo "$GOAL" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/--*/-/g' | cut -c1-40)
106
- slug="${slug%-}"
107
- [[ -n "$ISSUE_NUMBER" ]] && slug="${slug}-${ISSUE_NUMBER}"
108
- GIT_BRANCH="${prefix}/${slug}"
109
-
110
- git checkout -b "$GIT_BRANCH" 2>/dev/null || {
111
- info "Branch $GIT_BRANCH exists, checking out"
112
- git checkout "$GIT_BRANCH" 2>/dev/null || true
113
- }
114
- success "Branch: ${BOLD}$GIT_BRANCH${RESET}"
115
-
116
- # 5. Post initial progress comment on GitHub issue
117
- if [[ -n "$ISSUE_NUMBER" ]]; then
118
- local body
119
- body=$(gh_build_progress_body)
120
- gh_post_progress "$ISSUE_NUMBER" "$body"
121
- fi
122
-
123
- # 6. Save artifacts
124
- save_artifact "intake.json" "$(jq -n \
125
- --arg goal "$GOAL" --arg type "$TASK_TYPE" \
126
- --arg template "$suggested_template" --arg branch "$GIT_BRANCH" \
127
- --arg issue "${GITHUB_ISSUE:-}" --arg lang "$project_lang" \
128
- --arg test_cmd "${TEST_CMD:-}" --arg labels "${ISSUE_LABELS:-}" \
129
- --arg milestone "${ISSUE_MILESTONE:-}" --arg body "${ISSUE_BODY:-}" \
130
- '{goal:$goal, type:$type, template:$template, branch:$branch,
131
- issue:$issue, language:$lang, test_cmd:$test_cmd,
132
- labels:$labels, milestone:$milestone, body:$body}')"
133
-
134
- log_stage "intake" "Goal: $GOAL
135
- Type: $TASK_TYPE → template: $suggested_template
136
- Branch: $GIT_BRANCH
137
- Language: $project_lang
138
- Test cmd: ${TEST_CMD:-none detected}"
139
- }
140
-
141
- stage_plan() {
142
- CURRENT_STAGE_ID="plan"
143
- local plan_file="$ARTIFACTS_DIR/plan.md"
144
-
145
- if ! command -v claude >/dev/null 2>&1; then
146
- error "Claude CLI not found — cannot generate plan"
147
- return 1
148
- fi
149
-
150
- info "Generating implementation plan..."
151
-
152
- # ── Gather context bundle (if context engine available) ──
153
- local context_script="${SCRIPT_DIR}/sw-context.sh"
154
- if [[ -x "$context_script" ]]; then
155
- "$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
156
- fi
157
-
158
- # Gather rich architecture context (call-graph, dependencies)
159
- local arch_context=""
160
- if type gather_architecture_context &>/dev/null; then
161
- arch_context=$(gather_architecture_context "${PROJECT_ROOT:-.}" 2>/dev/null || true)
162
- fi
163
-
164
- # Build rich prompt with all available context
165
- local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
166
-
167
- ## Goal
168
- ${GOAL}
169
- "
170
-
171
- # Add issue context
172
- if [[ -n "$ISSUE_BODY" ]]; then
173
- plan_prompt="${plan_prompt}
174
- ## Issue Description
175
- ${ISSUE_BODY}
176
- "
177
- fi
178
-
179
- # Inject architecture context (import graph, modules, test map)
180
- if [[ -n "$arch_context" ]]; then
181
- plan_prompt="${plan_prompt}
182
- ## Architecture Context
183
- ${arch_context}
184
- "
185
- fi
186
-
187
- # Inject context bundle from context engine (if available)
188
- local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
189
- if [[ -f "$_context_bundle" ]]; then
190
- local _cb_content
191
- _cb_content=$(cat "$_context_bundle" 2>/dev/null | head -100 || true)
192
- if [[ -n "$_cb_content" ]]; then
193
- plan_prompt="${plan_prompt}
194
- ## Pipeline Context
195
- ${_cb_content}
196
- "
197
- fi
198
- fi
199
-
200
- # Inject intelligence memory context for similar past plans
201
- if type intelligence_search_memory >/dev/null 2>&1; then
202
- local plan_memory
203
- plan_memory=$(intelligence_search_memory "plan stage for ${TASK_TYPE:-feature}: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
204
- if [[ -n "$plan_memory" && "$plan_memory" != *'"results":[]'* && "$plan_memory" != *'"error"'* ]]; then
205
- local memory_summary
206
- memory_summary=$(echo "$plan_memory" | jq -r '.results[]? | "- \(.)"' 2>/dev/null | head -10 || true)
207
- if [[ -n "$memory_summary" ]]; then
208
- plan_prompt="${plan_prompt}
209
- ## Historical Context (from previous pipelines)
210
- Previous similar issues were planned as:
211
- ${memory_summary}
212
- "
213
- fi
214
- fi
215
- fi
216
-
217
- # Self-aware pipeline: inject hint when plan stage has been failing recently
218
- local plan_hint
219
- plan_hint=$(get_stage_self_awareness_hint "plan" 2>/dev/null || true)
220
- if [[ -n "$plan_hint" ]]; then
221
- plan_prompt="${plan_prompt}
222
- ## Self-Assessment (recent plan stage performance)
223
- ${plan_hint}
224
- "
225
- fi
226
-
227
- # Inject cross-pipeline discoveries (from other concurrent/similar pipelines)
228
- if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
229
- local plan_discoveries
230
- plan_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.json" 2>/dev/null | head -20 || true)
231
- if [[ -n "$plan_discoveries" ]]; then
232
- plan_prompt="${plan_prompt}
233
- ## Discoveries from Other Pipelines
234
- ${plan_discoveries}
235
- "
236
- fi
237
- fi
238
-
239
- # Inject architecture patterns from intelligence layer
240
- local repo_hash_plan
241
- repo_hash_plan=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
242
- local arch_file_plan="${HOME}/.shipwright/memory/${repo_hash_plan}/architecture.json"
243
- if [[ -f "$arch_file_plan" ]]; then
244
- local arch_patterns
245
- arch_patterns=$(jq -r '
246
- "Language: \(.language // "unknown")",
247
- "Framework: \(.framework // "unknown")",
248
- "Patterns: \((.patterns // []) | join(", "))",
249
- "Rules: \((.rules // []) | join("; "))"
250
- ' "$arch_file_plan" 2>/dev/null || true)
251
- if [[ -n "$arch_patterns" ]]; then
252
- plan_prompt="${plan_prompt}
253
- ## Architecture Patterns
254
- ${arch_patterns}
255
- "
256
- fi
257
- fi
258
-
259
- # Task-type-specific guidance
260
- case "${TASK_TYPE:-feature}" in
261
- bug)
262
- plan_prompt="${plan_prompt}
263
- ## Task Type: Bug Fix
264
- Focus on: reproducing the bug, identifying root cause, minimal targeted fix, regression tests.
265
- " ;;
266
- refactor)
267
- plan_prompt="${plan_prompt}
268
- ## Task Type: Refactor
269
- Focus on: preserving all existing behavior, incremental changes, comprehensive test coverage.
270
- " ;;
271
- security)
272
- plan_prompt="${plan_prompt}
273
- ## Task Type: Security
274
- Focus on: threat modeling, OWASP top 10, input validation, authentication/authorization.
275
- " ;;
276
- esac
277
-
278
- # Add project context
279
- local project_lang
280
- project_lang=$(detect_project_lang)
281
- plan_prompt="${plan_prompt}
282
- ## Project Context
283
- - Language: ${project_lang}
284
- - Test command: ${TEST_CMD:-not configured}
285
- - Task type: ${TASK_TYPE:-feature}
286
-
287
- ## Required Output
288
- Create a Markdown plan with these sections:
289
-
290
- ### Files to Modify
291
- List every file to create or modify with full paths.
292
-
293
- ### Implementation Steps
294
- Numbered steps in order of execution. Be specific about what code to write.
295
-
296
- ### Task Checklist
297
- A checkbox list of discrete tasks that can be tracked:
298
- - [ ] Task 1: Description
299
- - [ ] Task 2: Description
300
- (Include 5-15 tasks covering the full implementation)
301
-
302
- ### Testing Approach
303
- How to verify the implementation works.
304
-
305
- ### Definition of Done
306
- Checklist of completion criteria.
307
- "
308
-
309
- local plan_model
310
- plan_model=$(jq -r --arg id "plan" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
311
- [[ -n "$MODEL" ]] && plan_model="$MODEL"
312
- [[ -z "$plan_model" || "$plan_model" == "null" ]] && plan_model="opus"
313
- # Intelligence model routing (when no explicit CLI --model override)
314
- if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
315
- plan_model="$CLAUDE_MODEL"
316
- fi
317
-
318
- local _token_log="${ARTIFACTS_DIR}/.claude-tokens-plan.log"
319
- claude --print --model "$plan_model" --max-turns 25 --dangerously-skip-permissions \
320
- "$plan_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
321
- parse_claude_tokens "$_token_log"
322
-
323
- # Claude may write to disk via tools instead of stdout — rescue those files
324
- local _plan_rescue
325
- for _plan_rescue in "${PROJECT_ROOT}/PLAN.md" "${PROJECT_ROOT}/plan.md" \
326
- "${PROJECT_ROOT}/implementation-plan.md"; do
327
- if [[ -s "$_plan_rescue" ]] && [[ $(wc -l < "$plan_file" 2>/dev/null | xargs) -lt 10 ]]; then
328
- info "Plan written to ${_plan_rescue} via tools — adopting as plan artifact"
329
- cat "$_plan_rescue" >> "$plan_file"
330
- rm -f "$_plan_rescue"
331
- break
332
- fi
333
- done
334
-
335
- if [[ ! -s "$plan_file" ]]; then
336
- error "Plan generation failed — empty output"
337
- return 1
338
- fi
339
-
340
- # Validate plan content — detect API/CLI errors masquerading as plans
341
- local _plan_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
342
- _plan_fatal="${_plan_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
343
- if grep -qiE "$_plan_fatal" "$plan_file" 2>/dev/null; then
344
- error "Plan stage produced API/CLI error instead of a plan: $(head -1 "$plan_file" | cut -c1-100)"
345
- return 1
346
- fi
347
-
348
- local line_count
349
- line_count=$(wc -l < "$plan_file" | xargs)
350
- if [[ "$line_count" -lt 3 ]]; then
351
- error "Plan too short (${line_count} lines) — likely an error, not a real plan"
352
- return 1
353
- fi
354
- info "Plan saved: ${DIM}$plan_file${RESET} (${line_count} lines)"
355
-
356
- # Extract task checklist for GitHub issue and task tracking
357
- local checklist
358
- checklist=$(sed -n '/### Task Checklist/,/^###/p' "$plan_file" 2>/dev/null | \
359
- grep '^\s*- \[' | head -20)
360
-
361
- if [[ -z "$checklist" ]]; then
362
- # Fallback: extract any checkbox lines
363
- checklist=$(grep '^\s*- \[' "$plan_file" 2>/dev/null | head -20)
364
- fi
365
-
366
- # Write local task file for Claude Code build stage
367
- if [[ -n "$checklist" ]]; then
368
- cat > "$TASKS_FILE" <<TASKS_EOF
369
- # Pipeline Tasks — ${GOAL}
370
-
371
- ## Implementation Checklist
372
- ${checklist}
373
-
374
- ## Context
375
- - Pipeline: ${PIPELINE_NAME}
376
- - Branch: ${GIT_BRANCH}
377
- - Issue: ${GITHUB_ISSUE:-none}
378
- - Generated: $(now_iso)
379
- TASKS_EOF
380
- info "Task list: ${DIM}$TASKS_FILE${RESET} ($(echo "$checklist" | wc -l | xargs) tasks)"
381
- fi
382
-
383
- # Post plan + task checklist to GitHub issue
384
- if [[ -n "$ISSUE_NUMBER" ]]; then
385
- local plan_summary
386
- plan_summary=$(head -50 "$plan_file")
387
- local gh_body="## 📋 Implementation Plan
388
-
389
- <details>
390
- <summary>Click to expand full plan (${line_count} lines)</summary>
391
-
392
- ${plan_summary}
393
-
394
- </details>
395
- "
396
- if [[ -n "$checklist" ]]; then
397
- gh_body="${gh_body}
398
- ## ✅ Task Checklist
399
- ${checklist}
400
- "
401
- fi
402
-
403
- gh_body="${gh_body}
404
- ---
405
- _Generated by \`shipwright pipeline\` at $(now_iso)_"
406
-
407
- gh_comment_issue "$ISSUE_NUMBER" "$gh_body"
408
- info "Plan posted to issue #$ISSUE_NUMBER"
409
- fi
410
-
411
- # Push plan to wiki
412
- gh_wiki_page "Pipeline-Plan-${ISSUE_NUMBER:-inline}" "$(<"$plan_file")"
413
-
414
- # Generate Claude Code task list
415
- local cc_tasks_file="$PROJECT_ROOT/.claude/tasks.md"
416
- if [[ -n "$checklist" ]]; then
417
- cat > "$cc_tasks_file" <<CC_TASKS_EOF
418
- # Tasks — ${GOAL}
419
-
420
- ## Status: In Progress
421
- Pipeline: ${PIPELINE_NAME} | Branch: ${GIT_BRANCH}
422
-
423
- ## Checklist
424
- ${checklist}
425
-
426
- ## Notes
427
- - Generated from pipeline plan at $(now_iso)
428
- - Pipeline will update status as tasks complete
429
- CC_TASKS_EOF
430
- info "Claude Code tasks: ${DIM}$cc_tasks_file${RESET}"
431
- fi
432
-
433
- # Extract definition of done for quality gates
434
- sed -n '/[Dd]efinition [Oo]f [Dd]one/,/^#/p' "$plan_file" | head -20 > "$ARTIFACTS_DIR/dod.md" 2>/dev/null || true
435
-
436
- # ── Plan Validation Gate ──
437
- # Ask Claude to validate the plan before proceeding
438
- if command -v claude >/dev/null 2>&1 && [[ -s "$plan_file" ]]; then
439
- local validation_attempts=0
440
- local max_validation_attempts=2
441
- local plan_valid=false
442
-
443
- while [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; do
444
- validation_attempts=$((validation_attempts + 1))
445
- info "Validating plan (attempt ${validation_attempts}/${max_validation_attempts})..."
446
-
447
- # Build enriched validation prompt with learned context
448
- local validation_extra=""
449
-
450
- # Inject rejected plan history from memory
451
- if type intelligence_search_memory >/dev/null 2>&1; then
452
- local rejected_plans
453
- rejected_plans=$(intelligence_search_memory "rejected plan validation failures for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
454
- if [[ -n "$rejected_plans" ]]; then
455
- validation_extra="${validation_extra}
456
- ## Previously Rejected Plans
457
- These issues were found in past plan validations for similar tasks:
458
- ${rejected_plans}
459
- "
460
- fi
461
- fi
462
-
463
- # Inject repo conventions contextually
464
- local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
465
- if [[ -f "$claudemd" ]]; then
466
- local conventions_summary
467
- conventions_summary=$(head -100 "$claudemd" 2>/dev/null | grep -E '^##|^-|^\*' | head -15 || true)
468
- if [[ -n "$conventions_summary" ]]; then
469
- validation_extra="${validation_extra}
470
- ## Repo Conventions
471
- ${conventions_summary}
472
- "
473
- fi
474
- fi
475
-
476
- # Inject complexity estimate
477
- local complexity_hint=""
478
- if [[ -n "${INTELLIGENCE_COMPLEXITY:-}" && "${INTELLIGENCE_COMPLEXITY:-0}" -gt 0 ]]; then
479
- complexity_hint="This is estimated as complexity ${INTELLIGENCE_COMPLEXITY}/10. Plans for this complexity typically need ${INTELLIGENCE_COMPLEXITY} or more tasks."
480
- fi
481
-
482
- local validation_prompt="You are a plan validator. Review this implementation plan and determine if it is valid.
483
-
484
- ## Goal
485
- ${GOAL}
486
- ${complexity_hint:+
487
- ## Complexity Estimate
488
- ${complexity_hint}
489
- }
490
- ## Plan
491
- $(cat "$plan_file")
492
- ${validation_extra}
493
- Evaluate:
494
- 1. Are all requirements from the goal addressed?
495
- 2. Is the plan decomposed into clear, achievable tasks?
496
- 3. Are the implementation steps specific enough to execute?
497
-
498
- Respond with EXACTLY one of these on the first line:
499
- VALID: true
500
- VALID: false
501
-
502
- Then explain your reasoning briefly."
503
-
504
- local validation_model="${plan_model:-opus}"
505
- local validation_result
506
- validation_result=$(claude --print --output-format text -p "$validation_prompt" --model "$validation_model" < /dev/null 2>"${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log" || true)
507
- parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log"
508
-
509
- # Save validation result
510
- echo "$validation_result" > "$ARTIFACTS_DIR/plan-validation.md"
511
-
512
- if echo "$validation_result" | head -5 | grep -qi "VALID: true"; then
513
- success "Plan validation passed"
514
- plan_valid=true
515
- break
516
- fi
517
-
518
- warn "Plan validation failed (attempt ${validation_attempts}/${max_validation_attempts})"
519
-
520
- # Analyze failure mode to decide how to recover
521
- local failure_mode="unknown"
522
- local validation_lower
523
- validation_lower=$(echo "$validation_result" | tr '[:upper:]' '[:lower:]')
524
- if echo "$validation_lower" | grep -qE 'requirements? unclear|goal.*vague|ambiguous|underspecified'; then
525
- failure_mode="requirements_unclear"
526
- elif echo "$validation_lower" | grep -qE 'insufficient detail|not specific|too high.level|missing.*steps|lacks.*detail'; then
527
- failure_mode="insufficient_detail"
528
- elif echo "$validation_lower" | grep -qE 'scope too (large|broad)|too many|overly complex|break.*down'; then
529
- failure_mode="scope_too_large"
530
- fi
531
-
532
- emit_event "plan.validation_failure" \
533
- "issue=${ISSUE_NUMBER:-0}" \
534
- "attempt=$validation_attempts" \
535
- "failure_mode=$failure_mode"
536
-
537
- # Track repeated failures — escalate if stuck in a loop
538
- if [[ -f "$ARTIFACTS_DIR/.plan-failure-sig.txt" ]]; then
539
- local prev_sig
540
- prev_sig=$(cat "$ARTIFACTS_DIR/.plan-failure-sig.txt" 2>/dev/null || true)
541
- if [[ "$failure_mode" == "$prev_sig" && "$failure_mode" != "unknown" ]]; then
542
- warn "Same validation failure mode repeated ($failure_mode) — escalating"
543
- emit_event "plan.validation_escalated" \
544
- "issue=${ISSUE_NUMBER:-0}" \
545
- "failure_mode=$failure_mode"
546
- break
547
- fi
548
- fi
549
- echo "$failure_mode" > "$ARTIFACTS_DIR/.plan-failure-sig.txt"
550
-
551
- if [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; then
552
- info "Regenerating plan with validation feedback (mode: ${failure_mode})..."
553
-
554
- # Tailor regeneration prompt based on failure mode
555
- local failure_guidance=""
556
- case "$failure_mode" in
557
- requirements_unclear)
558
- failure_guidance="The validator found the requirements unclear. Add more specific acceptance criteria, input/output examples, and concrete success metrics." ;;
559
- insufficient_detail)
560
- failure_guidance="The validator found the plan lacks detail. Break each task into smaller, more specific implementation steps with exact file paths and function names." ;;
561
- scope_too_large)
562
- failure_guidance="The validator found the scope too large. Focus on the minimal viable implementation and defer non-essential features to follow-up tasks." ;;
563
- esac
564
-
565
- local regen_prompt="${plan_prompt}
566
-
567
- IMPORTANT: A previous plan was rejected by validation. Issues found:
568
- $(echo "$validation_result" | tail -20)
569
- ${failure_guidance:+
570
- GUIDANCE: ${failure_guidance}}
571
-
572
- Fix these issues in the new plan."
573
-
574
- claude --print --model "$plan_model" --max-turns 25 \
575
- "$regen_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
576
- parse_claude_tokens "$_token_log"
577
-
578
- line_count=$(wc -l < "$plan_file" | xargs)
579
- info "Regenerated plan: ${DIM}$plan_file${RESET} (${line_count} lines)"
580
- fi
581
- done
582
-
583
- if [[ "$plan_valid" != "true" ]]; then
584
- warn "Plan validation did not pass after ${max_validation_attempts} attempts — proceeding anyway"
585
- fi
586
-
587
- emit_event "plan.validated" \
588
- "issue=${ISSUE_NUMBER:-0}" \
589
- "valid=${plan_valid}" \
590
- "attempts=${validation_attempts}"
591
- fi
592
-
593
- log_stage "plan" "Generated plan.md (${line_count} lines, $(echo "$checklist" | wc -l | xargs) tasks)"
594
- }
595
-
596
- stage_design() {
597
- CURRENT_STAGE_ID="design"
598
- local plan_file="$ARTIFACTS_DIR/plan.md"
599
- local design_file="$ARTIFACTS_DIR/design.md"
600
-
601
- if [[ ! -s "$plan_file" ]]; then
602
- warn "No plan found — skipping design stage"
603
- return 0
604
- fi
605
-
606
- if ! command -v claude >/dev/null 2>&1; then
607
- error "Claude CLI not found — cannot generate design"
608
- return 1
609
- fi
610
-
611
- info "Generating Architecture Decision Record..."
612
-
613
- # Gather rich architecture context (call-graph, dependencies)
614
- local arch_struct_context=""
615
- if type gather_architecture_context &>/dev/null; then
616
- arch_struct_context=$(gather_architecture_context "${PROJECT_ROOT:-.}" 2>/dev/null || true)
617
- fi
618
-
619
- # Memory integration — inject context if memory system available
620
- local memory_context=""
621
- if type intelligence_search_memory >/dev/null 2>&1; then
622
- local mem_dir="${HOME}/.shipwright/memory"
623
- memory_context=$(intelligence_search_memory "design stage architecture patterns for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
624
- fi
625
- if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
626
- memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "design" 2>/dev/null) || true
627
- fi
628
-
629
- # Inject cross-pipeline discoveries for design stage
630
- local design_discoveries=""
631
- if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
632
- design_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
633
- fi
634
-
635
- # Inject architecture model patterns if available
636
- local arch_context=""
637
- local repo_hash
638
- repo_hash=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
639
- local arch_model_file="${HOME}/.shipwright/memory/${repo_hash}/architecture.json"
640
- if [[ -f "$arch_model_file" ]]; then
641
- local arch_patterns
642
- arch_patterns=$(jq -r '
643
- [.patterns // [] | .[] | "- \(.name // "unnamed"): \(.description // "no description")"] | join("\n")
644
- ' "$arch_model_file" 2>/dev/null) || true
645
- local arch_layers
646
- arch_layers=$(jq -r '
647
- [.layers // [] | .[] | "- \(.name // "unnamed"): \(.path // "")"] | join("\n")
648
- ' "$arch_model_file" 2>/dev/null) || true
649
- if [[ -n "$arch_patterns" || -n "$arch_layers" ]]; then
650
- arch_context="Previous designs in this repo follow these patterns:
651
- ${arch_patterns:+Patterns:
652
- ${arch_patterns}
653
- }${arch_layers:+Layers:
654
- ${arch_layers}}"
655
- fi
656
- fi
657
-
658
- # Inject rejected design approaches and anti-patterns from memory
659
- local design_antipatterns=""
660
- if type intelligence_search_memory >/dev/null 2>&1; then
661
- local rejected_designs
662
- rejected_designs=$(intelligence_search_memory "rejected design approaches anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
663
- if [[ -n "$rejected_designs" ]]; then
664
- design_antipatterns="
665
- ## Rejected Approaches (from past reviews)
666
- These design approaches were rejected in past reviews. Avoid repeating them:
667
- ${rejected_designs}
668
- "
669
- fi
670
- fi
671
-
672
- # Build design prompt with plan + project context
673
- local project_lang
674
- project_lang=$(detect_project_lang)
675
-
676
- local design_prompt="You are a senior software architect. Review the implementation plan below and produce an Architecture Decision Record (ADR).
677
-
678
- ## Goal
679
- ${GOAL}
680
-
681
- ## Implementation Plan
682
- $(cat "$plan_file")
683
-
684
- ## Project Context
685
- - Language: ${project_lang}
686
- - Test command: ${TEST_CMD:-not configured}
687
- - Task type: ${TASK_TYPE:-feature}
688
- ${arch_struct_context:+
689
- ## Architecture Context (import graph, modules, test map)
690
- ${arch_struct_context}
691
- }${memory_context:+
692
- ## Historical Context (from memory)
693
- ${memory_context}
694
- }${arch_context:+
695
- ## Architecture Model (from previous designs)
696
- ${arch_context}
697
- }${design_antipatterns}${design_discoveries:+
698
- ## Discoveries from Other Pipelines
699
- ${design_discoveries}
700
- }
701
- ## Required Output — Architecture Decision Record
702
-
703
- Produce this EXACT format:
704
-
705
- # Design: ${GOAL}
706
-
707
- ## Context
708
- [What problem we're solving, constraints from the codebase]
709
-
710
- ## Decision
711
- [The chosen approach — be specific about patterns, data flow, error handling]
712
-
713
- ## Alternatives Considered
714
- 1. [Alternative A] — Pros: ... / Cons: ...
715
- 2. [Alternative B] — Pros: ... / Cons: ...
716
-
717
- ## Implementation Plan
718
- - Files to create: [list with full paths]
719
- - Files to modify: [list with full paths]
720
- - Dependencies: [new deps if any]
721
- - Risk areas: [fragile code, performance concerns]
722
-
723
- ## Validation Criteria
724
- - [ ] [How we'll know the design is correct — testable criteria]
725
- - [ ] [Additional validation items]
726
-
727
- Be concrete and specific. Reference actual file paths in the codebase. Consider edge cases and failure modes."
728
-
729
- local design_model
730
- design_model=$(jq -r --arg id "design" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
731
- [[ -n "$MODEL" ]] && design_model="$MODEL"
732
- [[ -z "$design_model" || "$design_model" == "null" ]] && design_model="opus"
733
- # Intelligence model routing (when no explicit CLI --model override)
734
- if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
735
- design_model="$CLAUDE_MODEL"
736
- fi
737
-
738
- local _token_log="${ARTIFACTS_DIR}/.claude-tokens-design.log"
739
- claude --print --model "$design_model" --max-turns 25 --dangerously-skip-permissions \
740
- "$design_prompt" < /dev/null > "$design_file" 2>"$_token_log" || true
741
- parse_claude_tokens "$_token_log"
742
-
743
- # Claude may write to disk via tools instead of stdout — rescue those files
744
- local _design_rescue
745
- for _design_rescue in "${PROJECT_ROOT}/design-adr.md" "${PROJECT_ROOT}/design.md" \
746
- "${PROJECT_ROOT}/ADR.md" "${PROJECT_ROOT}/DESIGN.md"; do
747
- if [[ -s "$_design_rescue" ]] && [[ $(wc -l < "$design_file" 2>/dev/null | xargs) -lt 10 ]]; then
748
- info "Design written to ${_design_rescue} via tools — adopting as design artifact"
749
- cat "$_design_rescue" >> "$design_file"
750
- rm -f "$_design_rescue"
751
- break
752
- fi
753
- done
754
-
755
- if [[ ! -s "$design_file" ]]; then
756
- error "Design generation failed — empty output"
757
- return 1
758
- fi
759
-
760
- # Validate design content — detect API/CLI errors masquerading as designs
761
- local _design_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
762
- _design_fatal="${_design_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
763
- if grep -qiE "$_design_fatal" "$design_file" 2>/dev/null; then
764
- error "Design stage produced API/CLI error instead of a design: $(head -1 "$design_file" | cut -c1-100)"
765
- return 1
766
- fi
767
-
768
- local line_count
769
- line_count=$(wc -l < "$design_file" | xargs)
770
- if [[ "$line_count" -lt 3 ]]; then
771
- error "Design too short (${line_count} lines) — likely an error, not a real design"
772
- return 1
773
- fi
774
- info "Design saved: ${DIM}$design_file${RESET} (${line_count} lines)"
775
-
776
- # Extract file lists for build stage awareness
777
- local files_to_create files_to_modify
778
- files_to_create=$(sed -n '/Files to create/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
779
- files_to_modify=$(sed -n '/Files to modify/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
780
-
781
- if [[ -n "$files_to_create" || -n "$files_to_modify" ]]; then
782
- info "Design scope: ${DIM}$(echo "$files_to_create $files_to_modify" | grep -c '^\s*-' || true) file(s)${RESET}"
783
- fi
784
-
785
- # Post design to GitHub issue
786
- if [[ -n "$ISSUE_NUMBER" ]]; then
787
- local design_summary
788
- design_summary=$(head -60 "$design_file")
789
- gh_comment_issue "$ISSUE_NUMBER" "## 📐 Architecture Decision Record
790
-
791
- <details>
792
- <summary>Click to expand ADR (${line_count} lines)</summary>
793
-
794
- ${design_summary}
795
-
796
- </details>
797
-
798
- ---
799
- _Generated by \`shipwright pipeline\` design stage at $(now_iso)_"
800
- fi
801
-
802
- # Push design to wiki
803
- gh_wiki_page "Pipeline-Design-${ISSUE_NUMBER:-inline}" "$(<"$design_file")"
804
-
805
- log_stage "design" "Generated design.md (${line_count} lines)"
806
- }
807
-
808
- # ─── TDD: Generate tests before implementation ─────────────────────────────────
809
- stage_test_first() {
810
- CURRENT_STAGE_ID="test_first"
811
- info "Generating tests from requirements (TDD mode)"
812
-
813
- local plan_file="${ARTIFACTS_DIR}/plan.md"
814
- local goal_file="${PROJECT_ROOT}/.claude/goal.md"
815
- local requirements=""
816
- if [[ -f "$plan_file" ]]; then
817
- requirements=$(cat "$plan_file" 2>/dev/null || true)
818
- elif [[ -f "$goal_file" ]]; then
819
- requirements=$(cat "$goal_file" 2>/dev/null || true)
820
- else
821
- requirements="${GOAL:-}: ${ISSUE_BODY:-}"
822
- fi
823
-
824
- local tdd_prompt="You are writing tests BEFORE implementation (TDD).
825
-
826
- Based on the following plan/requirements, generate test files that define the expected behavior. These tests should FAIL initially (since the implementation doesn't exist yet) but define the correct interface and behavior.
827
-
828
- Requirements:
829
- ${requirements}
830
-
831
- Instructions:
832
- 1. Create test files for each component mentioned in the plan
833
- 2. Tests should verify the PUBLIC interface and expected behavior
834
- 3. Include edge cases and error handling tests
835
- 4. Tests should be runnable with the project's test framework
836
- 5. Mark tests that need implementation with clear TODO comments
837
- 6. Do NOT write implementation code — only tests
838
-
839
- Output format: For each test file, use a fenced code block with the file path as the language identifier (e.g. \`\`\`tests/auth.test.ts):
840
- \`\`\`path/to/test.test.ts
841
- // file content
842
- \`\`\`
843
-
844
- Create files in the appropriate project directories (e.g. tests/, __tests__/, src/**/*.test.ts) per project convention."
845
-
846
- local model="${CLAUDE_MODEL:-${MODEL:-sonnet}}"
847
- [[ -z "$model" || "$model" == "null" ]] && model="sonnet"
848
-
849
- local output=""
850
- output=$(echo "$tdd_prompt" | timeout 120 claude --print --model "$model" 2>/dev/null) || {
851
- warn "TDD test generation failed, falling back to standard build"
852
- return 1
853
- }
854
-
855
- # Parse output: extract fenced code blocks and write to files
856
- local wrote_any=false
857
- local block_path="" in_block=false block_content=""
858
- while IFS= read -r line; do
859
- if [[ "$line" =~ ^\`\`\`([a-zA-Z0-9_/\.\-]+)$ ]]; then
860
- if [[ -n "$block_path" && -n "$block_content" ]]; then
861
- local out_file="${PROJECT_ROOT}/${block_path}"
862
- local out_dir
863
- out_dir=$(dirname "$out_file")
864
- mkdir -p "$out_dir" 2>/dev/null || true
865
- if echo "$block_content" > "$out_file" 2>/dev/null; then
866
- wrote_any=true
867
- info " Wrote: $block_path"
868
- fi
869
- fi
870
- block_path="${BASH_REMATCH[1]}"
871
- block_content=""
872
- in_block=true
873
- elif [[ "$line" == "\`\`\`" && "$in_block" == "true" ]]; then
874
- if [[ -n "$block_path" && -n "$block_content" ]]; then
875
- local out_file="${PROJECT_ROOT}/${block_path}"
876
- local out_dir
877
- out_dir=$(dirname "$out_file")
878
- mkdir -p "$out_dir" 2>/dev/null || true
879
- if echo "$block_content" > "$out_file" 2>/dev/null; then
880
- wrote_any=true
881
- info " Wrote: $block_path"
882
- fi
883
- fi
884
- block_path=""
885
- block_content=""
886
- in_block=false
887
- elif [[ "$in_block" == "true" && -n "$block_path" ]]; then
888
- [[ -n "$block_content" ]] && block_content="${block_content}"$'\n'
889
- block_content="${block_content}${line}"
890
- fi
891
- done <<< "$output"
892
-
893
- # Flush last block if unclosed
894
- if [[ -n "$block_path" && -n "$block_content" ]]; then
895
- local out_file="${PROJECT_ROOT}/${block_path}"
896
- local out_dir
897
- out_dir=$(dirname "$out_file")
898
- mkdir -p "$out_dir" 2>/dev/null || true
899
- if echo "$block_content" > "$out_file" 2>/dev/null; then
900
- wrote_any=true
901
- info " Wrote: $block_path"
902
- fi
903
- fi
904
-
905
- if [[ "$wrote_any" == "true" ]]; then
906
- if (cd "$PROJECT_ROOT" && git diff --name-only 2>/dev/null | grep -qE 'test|spec'); then
907
- git add -A 2>/dev/null || true
908
- git commit -m "test: TDD - define expected behavior before implementation" 2>/dev/null || true
909
- emit_event "tdd.tests_generated" "{\"stage\":\"test_first\"}"
910
- fi
911
- success "TDD tests generated"
912
- else
913
- warn "No test files extracted from TDD output — check format"
914
- fi
915
-
916
- return 0
917
- }
918
-
919
- stage_build() {
920
- local plan_file="$ARTIFACTS_DIR/plan.md"
921
- local design_file="$ARTIFACTS_DIR/design.md"
922
- local dod_file="$ARTIFACTS_DIR/dod.md"
923
- local loop_args=()
924
-
925
- # Memory integration — inject context if memory system available
926
- local memory_context=""
927
- if type intelligence_search_memory >/dev/null 2>&1; then
928
- local mem_dir="${HOME}/.shipwright/memory"
929
- memory_context=$(intelligence_search_memory "build stage for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
930
- fi
931
- if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
932
- memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "build" 2>/dev/null) || true
933
- fi
934
-
935
- # Build enriched goal with compact context (avoids prompt bloat)
936
- local enriched_goal
937
- enriched_goal=$(_pipeline_compact_goal "$GOAL" "$plan_file" "$design_file")
938
-
939
- # TDD: when test_first ran, tell build to make existing tests pass
940
- if [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
941
- enriched_goal="${enriched_goal}
942
-
943
- IMPORTANT (TDD mode): Test files already exist and define the expected behavior. Write implementation code to make ALL tests pass. Do not delete or modify the test files."
944
- fi
945
-
946
- # Inject memory context
947
- if [[ -n "$memory_context" ]]; then
948
- enriched_goal="${enriched_goal}
949
-
950
- Historical context (lessons from previous pipelines):
951
- ${memory_context}"
952
- fi
953
-
954
- # Inject cross-pipeline discoveries for build stage
955
- if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
956
- local build_discoveries
957
- build_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "src/*,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
958
- if [[ -n "$build_discoveries" ]]; then
959
- enriched_goal="${enriched_goal}
960
-
961
- Discoveries from other pipelines:
962
- ${build_discoveries}"
963
- fi
964
- fi
965
-
966
- # Add task list context
967
- if [[ -s "$TASKS_FILE" ]]; then
968
- enriched_goal="${enriched_goal}
969
-
970
- Task tracking (check off items as you complete them):
971
- $(cat "$TASKS_FILE")"
972
- fi
973
-
974
- # Inject file hotspots from GitHub intelligence
975
- if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_file_change_frequency >/dev/null 2>&1; then
976
- local build_hotspots
977
- build_hotspots=$(gh_file_change_frequency 2>/dev/null | head -5 || true)
978
- if [[ -n "$build_hotspots" ]]; then
979
- enriched_goal="${enriched_goal}
980
-
981
- File hotspots (most frequently changed — review these carefully):
982
- ${build_hotspots}"
983
- fi
984
- fi
985
-
986
- # Inject security alerts context
987
- if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_security_alerts >/dev/null 2>&1; then
988
- local build_alerts
989
- build_alerts=$(gh_security_alerts 2>/dev/null | head -3 || true)
990
- if [[ -n "$build_alerts" ]]; then
991
- enriched_goal="${enriched_goal}
992
-
993
- Active security alerts (do not introduce new vulnerabilities):
994
- ${build_alerts}"
995
- fi
996
- fi
997
-
998
- # Inject coverage baseline
999
- local repo_hash_build
1000
- repo_hash_build=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
1001
- local coverage_file_build="${HOME}/.shipwright/baselines/${repo_hash_build}/coverage.json"
1002
- if [[ -f "$coverage_file_build" ]]; then
1003
- local coverage_baseline
1004
- coverage_baseline=$(jq -r '.coverage_percent // empty' "$coverage_file_build" 2>/dev/null || true)
1005
- if [[ -n "$coverage_baseline" ]]; then
1006
- enriched_goal="${enriched_goal}
1007
-
1008
- Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
1009
- fi
1010
- fi
1011
-
1012
- # Predictive: inject prevention hints when risk/memory patterns suggest build-stage failures
1013
- if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
1014
- local issue_json_build="{}"
1015
- [[ -n "${ISSUE_NUMBER:-}" ]] && issue_json_build=$(jq -n --arg title "${GOAL:-}" --arg num "${ISSUE_NUMBER:-}" '{title: $title, number: $num}')
1016
- local prevention_text
1017
- prevention_text=$(bash "$SCRIPT_DIR/sw-predictive.sh" inject-prevention "build" "$issue_json_build" 2>/dev/null || true)
1018
- if [[ -n "$prevention_text" ]]; then
1019
- enriched_goal="${enriched_goal}
1020
-
1021
- ${prevention_text}"
1022
- fi
1023
- fi
1024
-
1025
- loop_args+=("$enriched_goal")
1026
-
1027
- # Build loop args from pipeline config + CLI overrides
1028
- CURRENT_STAGE_ID="build"
1029
-
1030
- local test_cmd="${TEST_CMD}"
1031
- if [[ -z "$test_cmd" ]]; then
1032
- test_cmd=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.test_cmd) // .defaults.test_cmd // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1033
- [[ "$test_cmd" == "null" ]] && test_cmd=""
1034
- fi
1035
- # Auto-detect if still empty
1036
- if [[ -z "$test_cmd" ]]; then
1037
- test_cmd=$(detect_test_cmd)
1038
- fi
1039
-
1040
- local max_iter
1041
- max_iter=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.max_iterations) // 20' "$PIPELINE_CONFIG" 2>/dev/null) || true
1042
- [[ -z "$max_iter" || "$max_iter" == "null" ]] && max_iter=20
1043
- # CLI --max-iterations override (from CI strategy engine)
1044
- [[ -n "${MAX_ITERATIONS_OVERRIDE:-}" ]] && max_iter="$MAX_ITERATIONS_OVERRIDE"
1045
-
1046
- local agents="${AGENTS}"
1047
- if [[ -z "$agents" ]]; then
1048
- agents=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.agents) // .defaults.agents // 1' "$PIPELINE_CONFIG" 2>/dev/null) || true
1049
- [[ -z "$agents" || "$agents" == "null" ]] && agents=1
1050
- fi
189
+ # ─── Load domain-specific stage modules ───────────────────────────────────────
1051
190
 
1052
- # Intelligence: suggest parallelism if design indicates independent work
1053
- if [[ "${agents:-1}" -le 1 ]] && [[ -s "$ARTIFACTS_DIR/design.md" ]]; then
1054
- local design_lower
1055
- design_lower=$(tr '[:upper:]' '[:lower:]' < "$ARTIFACTS_DIR/design.md" 2>/dev/null || true)
1056
- if echo "$design_lower" | grep -qE 'independent (files|modules|components|services)|separate (modules|packages|directories)|parallel|no shared state'; then
1057
- info "Design mentions independent modules — consider --agents 2 for parallelism"
1058
- emit_event "build.parallelism_suggested" "issue=${ISSUE_NUMBER:-0}" "current_agents=$agents"
1059
- fi
1060
- fi
191
+ # Load scope enforcement module for planned vs actual file tracking
192
+ _SCOPE_ENFORCEMENT_SH="${SCRIPT_DIR}/lib/scope-enforcement.sh"
193
+ [[ -f "$_SCOPE_ENFORCEMENT_SH" ]] && source "$_SCOPE_ENFORCEMENT_SH"
1061
194
 
1062
- local audit
1063
- audit=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.audit) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
1064
- local quality
1065
- quality=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.quality_gates) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
195
+ _PIPELINE_STAGES_INTAKE_SH="${SCRIPT_DIR}/lib/pipeline-stages-intake.sh"
196
+ [[ -f "$_PIPELINE_STAGES_INTAKE_SH" ]] && source "$_PIPELINE_STAGES_INTAKE_SH"
1066
197
 
1067
- local build_model="${MODEL}"
1068
- if [[ -z "$build_model" ]]; then
1069
- build_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
1070
- [[ -z "$build_model" || "$build_model" == "null" ]] && build_model="opus"
1071
- fi
1072
- # Intelligence model routing (when no explicit CLI --model override)
1073
- if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
1074
- build_model="$CLAUDE_MODEL"
1075
- fi
198
+ _PIPELINE_STAGES_BUILD_SH="${SCRIPT_DIR}/lib/pipeline-stages-build.sh"
199
+ [[ -f "$_PIPELINE_STAGES_BUILD_SH" ]] && source "$_PIPELINE_STAGES_BUILD_SH"
1076
200
 
1077
- # Recruit-powered model selection (when no explicit override)
1078
- if [[ -z "$MODEL" ]] && [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
1079
- local _recruit_goal="${GOAL:-}"
1080
- if [[ -n "$_recruit_goal" ]]; then
1081
- local _recruit_match
1082
- _recruit_match=$(bash "$SCRIPT_DIR/sw-recruit.sh" match --json "$_recruit_goal" 2>/dev/null) || true
1083
- if [[ -n "$_recruit_match" ]]; then
1084
- local _recruit_model
1085
- _recruit_model=$(echo "$_recruit_match" | jq -r '.model // ""' 2>/dev/null) || true
1086
- if [[ -n "$_recruit_model" && "$_recruit_model" != "null" && "$_recruit_model" != "" ]]; then
1087
- info "Recruit recommends model: ${CYAN}${_recruit_model}${RESET} for this task"
1088
- build_model="$_recruit_model"
1089
- fi
1090
- fi
1091
- fi
1092
- fi
1093
-
1094
- [[ -n "$test_cmd" && "$test_cmd" != "null" ]] && loop_args+=(--test-cmd "$test_cmd")
1095
- loop_args+=(--max-iterations "$max_iter")
1096
- loop_args+=(--model "$build_model")
1097
- [[ "$agents" -gt 1 ]] 2>/dev/null && loop_args+=(--agents "$agents")
1098
-
1099
- # Quality gates: always enabled in CI, otherwise from template config
1100
- if [[ "${CI_MODE:-false}" == "true" ]]; then
1101
- loop_args+=(--audit --audit-agent --quality-gates)
1102
- else
1103
- [[ "$audit" == "true" ]] && loop_args+=(--audit --audit-agent)
1104
- [[ "$quality" == "true" ]] && loop_args+=(--quality-gates)
1105
- fi
1106
-
1107
- # Session restart capability
1108
- [[ -n "${MAX_RESTARTS_OVERRIDE:-}" ]] && loop_args+=(--max-restarts "$MAX_RESTARTS_OVERRIDE")
1109
- # Fast test mode
1110
- [[ -n "${FAST_TEST_CMD_OVERRIDE:-}" ]] && loop_args+=(--fast-test-cmd "$FAST_TEST_CMD_OVERRIDE")
1111
-
1112
- # Definition of Done: use plan-extracted DoD if available
1113
- [[ -s "$dod_file" ]] && loop_args+=(--definition-of-done "$dod_file")
1114
-
1115
- # Checkpoint resume: when pipeline resumed from build-stage checkpoint, pass --resume to loop
1116
- if [[ "${RESUME_FROM_CHECKPOINT:-false}" == "true" && "${checkpoint_stage:-}" == "build" ]]; then
1117
- loop_args+=(--resume)
1118
- fi
1119
-
1120
- # Skip permissions — pipeline runs headlessly (claude -p) and has no terminal
1121
- # for interactive permission prompts. Without this flag, agents can't write files.
1122
- loop_args+=(--skip-permissions)
1123
-
1124
- info "Starting build loop: ${DIM}shipwright loop${RESET} (max ${max_iter} iterations, ${agents} agent(s))"
1125
-
1126
- # Post build start to GitHub
1127
- if [[ -n "$ISSUE_NUMBER" ]]; then
1128
- gh_comment_issue "$ISSUE_NUMBER" "🔨 **Build started** — \`shipwright loop\` with ${max_iter} max iterations, ${agents} agent(s), model: ${build_model}"
1129
- fi
1130
-
1131
- local _token_log="${ARTIFACTS_DIR}/.claude-tokens-build.log"
1132
- export PIPELINE_JOB_ID="${PIPELINE_NAME:-pipeline-$$}"
1133
- sw loop "${loop_args[@]}" < /dev/null 2>"$_token_log" || {
1134
- local _loop_exit=$?
1135
- parse_claude_tokens "$_token_log"
1136
-
1137
- # Detect context exhaustion from progress file
1138
- local _progress_file="${PWD}/.claude/loop-logs/progress.md"
1139
- if [[ -f "$_progress_file" ]]; then
1140
- local _prog_tests
1141
- _prog_tests=$(grep -oE 'Tests passing: (true|false)' "$_progress_file" 2>/dev/null | awk '{print $NF}' || echo "unknown")
1142
- if [[ "$_prog_tests" != "true" ]]; then
1143
- warn "Build loop exhausted with failing tests (context exhaustion)"
1144
- emit_event "pipeline.context_exhaustion" "issue=${ISSUE_NUMBER:-0}" "stage=build"
1145
- # Write flag for daemon retry logic
1146
- mkdir -p "$ARTIFACTS_DIR" 2>/dev/null || true
1147
- echo "context_exhaustion" > "$ARTIFACTS_DIR/failure-reason.txt" 2>/dev/null || true
1148
- fi
1149
- fi
1150
-
1151
- error "Build loop failed"
1152
- return 1
1153
- }
1154
- parse_claude_tokens "$_token_log"
1155
-
1156
- # Read accumulated token counts from build loop (written by sw-loop.sh)
1157
- local _loop_token_file="${PROJECT_ROOT}/.claude/loop-logs/loop-tokens.json"
1158
- if [[ -f "$_loop_token_file" ]] && command -v jq >/dev/null 2>&1; then
1159
- local _loop_in _loop_out _loop_cost
1160
- _loop_in=$(jq -r '.input_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
1161
- _loop_out=$(jq -r '.output_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
1162
- _loop_cost=$(jq -r '.cost_usd // 0' "$_loop_token_file" 2>/dev/null || echo "0")
1163
- TOTAL_INPUT_TOKENS=$(( TOTAL_INPUT_TOKENS + ${_loop_in:-0} ))
1164
- TOTAL_OUTPUT_TOKENS=$(( TOTAL_OUTPUT_TOKENS + ${_loop_out:-0} ))
1165
- if [[ -n "$_loop_cost" && "$_loop_cost" != "0" && "$_loop_cost" != "null" ]]; then
1166
- TOTAL_COST_USD="${_loop_cost}"
1167
- fi
1168
- if [[ "${_loop_in:-0}" -gt 0 || "${_loop_out:-0}" -gt 0 ]]; then
1169
- info "Build loop tokens: in=${_loop_in} out=${_loop_out} cost=\$${_loop_cost:-0}"
1170
- fi
1171
- fi
1172
-
1173
- # Count commits made during build
1174
- local commit_count
1175
- commit_count=$(_safe_base_log --oneline | wc -l | xargs)
1176
- info "Build produced ${BOLD}$commit_count${RESET} commit(s)"
1177
-
1178
- # Commit quality evaluation when intelligence is enabled
1179
- if type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1 && [[ "${commit_count:-0}" -gt 0 ]]; then
1180
- local commit_msgs
1181
- commit_msgs=$(_safe_base_log --format="%s" | head -20)
1182
- local quality_score
1183
- quality_score=$(claude --print --output-format text -p "Rate the quality of these git commit messages on a scale of 0-100. Consider: focus (one thing per commit), clarity (describes the why), atomicity (small logical units). Reply with ONLY a number 0-100.
1184
-
1185
- Commit messages:
1186
- ${commit_msgs}" --model haiku < /dev/null 2>/dev/null || true)
1187
- quality_score=$(echo "$quality_score" | grep -oE '^[0-9]+' | head -1 || true)
1188
- if [[ -n "$quality_score" ]]; then
1189
- emit_event "build.commit_quality" \
1190
- "issue=${ISSUE_NUMBER:-0}" \
1191
- "score=$quality_score" \
1192
- "commit_count=$commit_count"
1193
- if [[ "$quality_score" -lt 40 ]] 2>/dev/null; then
1194
- warn "Commit message quality low (score: ${quality_score}/100)"
1195
- else
1196
- info "Commit quality score: ${quality_score}/100"
1197
- fi
1198
- fi
1199
- fi
1200
-
1201
- log_stage "build" "Build loop completed ($commit_count commits)"
1202
- }
1203
-
1204
- stage_test() {
1205
- CURRENT_STAGE_ID="test"
1206
- local test_cmd="${TEST_CMD}"
1207
- if [[ -z "$test_cmd" ]]; then
1208
- test_cmd=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.test_cmd) // .defaults.test_cmd // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1209
- [[ -z "$test_cmd" || "$test_cmd" == "null" ]] && test_cmd=""
1210
- fi
1211
- # Auto-detect
1212
- if [[ -z "$test_cmd" ]]; then
1213
- test_cmd=$(detect_test_cmd)
1214
- fi
1215
- if [[ -z "$test_cmd" ]]; then
1216
- warn "No test command found — skipping test stage"
1217
- return 0
1218
- fi
1219
-
1220
- local coverage_min
1221
- coverage_min=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.coverage_min) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
1222
- [[ -z "$coverage_min" || "$coverage_min" == "null" ]] && coverage_min=0
1223
-
1224
- local test_log="$ARTIFACTS_DIR/test-results.log"
1225
-
1226
- info "Running tests: ${DIM}$test_cmd${RESET}"
1227
- local test_exit=0
1228
- bash -c "$test_cmd" > "$test_log" 2>&1 || test_exit=$?
1229
-
1230
- if [[ "$test_exit" -eq 0 ]]; then
1231
- success "Tests passed"
1232
- else
1233
- error "Tests failed (exit code: $test_exit)"
1234
- # Extract most relevant error section (assertion failures, stack traces)
1235
- local relevant_output=""
1236
- relevant_output=$(grep -A5 -E 'FAIL|AssertionError|Expected.*but.*got|Error:|panic:|assert' "$test_log" 2>/dev/null | tail -40 || true)
1237
- if [[ -z "$relevant_output" ]]; then
1238
- relevant_output=$(tail -40 "$test_log")
1239
- fi
1240
- echo "$relevant_output"
1241
-
1242
- # Post failure to GitHub with more context
1243
- if [[ -n "$ISSUE_NUMBER" ]]; then
1244
- local log_lines
1245
- log_lines=$(wc -l < "$test_log" 2>/dev/null || echo "0")
1246
- local log_excerpt
1247
- if [[ "$log_lines" -lt 60 ]]; then
1248
- log_excerpt="$(cat "$test_log" 2>/dev/null || true)"
1249
- else
1250
- log_excerpt="$(head -20 "$test_log" 2>/dev/null || true)
1251
- ... (${log_lines} lines total, showing head + tail) ...
1252
- $(tail -30 "$test_log" 2>/dev/null || true)"
1253
- fi
1254
- gh_comment_issue "$ISSUE_NUMBER" "❌ **Tests failed** (exit code: $test_exit, ${log_lines} lines)
1255
- \`\`\`
1256
- ${log_excerpt}
1257
- \`\`\`"
1258
- fi
1259
- return 1
1260
- fi
1261
-
1262
- # Coverage check — only enforce when coverage data is actually detected
1263
- local coverage=""
1264
- if [[ "$coverage_min" -gt 0 ]] 2>/dev/null; then
1265
- coverage=$(parse_coverage_from_output "$test_log")
1266
- if [[ -z "$coverage" ]]; then
1267
- # No coverage data found — skip enforcement (project may not have coverage tooling)
1268
- info "No coverage data detected — skipping coverage check (min: ${coverage_min}%)"
1269
- elif awk -v cov="$coverage" -v min="$coverage_min" 'BEGIN{exit !(cov < min)}' 2>/dev/null; then
1270
- warn "Coverage ${coverage}% below minimum ${coverage_min}%"
1271
- return 1
1272
- else
1273
- info "Coverage: ${coverage}% (min: ${coverage_min}%)"
1274
- fi
1275
- fi
1276
-
1277
- # Emit test.completed with coverage for adaptive learning
1278
- if [[ -n "$coverage" ]]; then
1279
- emit_event "test.completed" \
1280
- "issue=${ISSUE_NUMBER:-0}" \
1281
- "stage=test" \
1282
- "coverage=$coverage"
1283
- fi
1284
-
1285
- # Post test results to GitHub
1286
- if [[ -n "$ISSUE_NUMBER" ]]; then
1287
- local test_summary
1288
- test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
1289
- local cov_line=""
1290
- [[ -n "$coverage" ]] && cov_line="
1291
- **Coverage:** ${coverage}%"
1292
- gh_comment_issue "$ISSUE_NUMBER" "✅ **Tests passed**${cov_line}
1293
- <details>
1294
- <summary>Test output</summary>
1295
-
1296
- \`\`\`
1297
- ${test_summary}
1298
- \`\`\`
1299
- </details>"
1300
- fi
1301
-
1302
- # Write coverage summary for pre-deploy gate
1303
- local _cov_pct=0
1304
- if [[ -f "$ARTIFACTS_DIR/test-results.log" ]]; then
1305
- _cov_pct=$(grep -oE '[0-9]+%' "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -1 | tr -d '%' || true)
1306
- _cov_pct="${_cov_pct:-0}"
1307
- fi
1308
- local _cov_tmp
1309
- _cov_tmp=$(mktemp "${ARTIFACTS_DIR}/test-coverage.json.tmp.XXXXXX")
1310
- printf '{"coverage_pct":%d}' "${_cov_pct:-0}" > "$_cov_tmp" && mv "$_cov_tmp" "$ARTIFACTS_DIR/test-coverage.json" || rm -f "$_cov_tmp"
1311
-
1312
- log_stage "test" "Tests passed${coverage:+ (coverage: ${coverage}%)}"
1313
- }
1314
-
1315
- stage_review() {
1316
- CURRENT_STAGE_ID="review"
1317
- local diff_file="$ARTIFACTS_DIR/review-diff.patch"
1318
- local review_file="$ARTIFACTS_DIR/review.md"
1319
-
1320
- _safe_base_diff > "$diff_file" 2>/dev/null || true
1321
-
1322
- if [[ ! -s "$diff_file" ]]; then
1323
- warn "No diff found — skipping review"
1324
- return 0
1325
- fi
1326
-
1327
- if ! command -v claude >/dev/null 2>&1; then
1328
- warn "Claude CLI not found — skipping AI review"
1329
- return 0
1330
- fi
1331
-
1332
- local diff_stats
1333
- diff_stats=$(_safe_base_diff --stat | tail -1 || echo "")
1334
- info "Running AI code review... ${DIM}($diff_stats)${RESET}"
1335
-
1336
- # Semantic risk scoring when intelligence is enabled
1337
- if type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1; then
1338
- local diff_files
1339
- diff_files=$(_safe_base_diff --name-only || true)
1340
- local risk_score="low"
1341
- # Fast heuristic: flag high-risk file patterns
1342
- if echo "$diff_files" | grep -qiE 'migration|schema|auth|crypto|security|password|token|secret|\.env'; then
1343
- risk_score="high"
1344
- elif echo "$diff_files" | grep -qiE 'api|route|controller|middleware|hook'; then
1345
- risk_score="medium"
1346
- fi
1347
- emit_event "review.risk_assessed" \
1348
- "issue=${ISSUE_NUMBER:-0}" \
1349
- "risk=$risk_score" \
1350
- "files_changed=$(echo "$diff_files" | wc -l | xargs)"
1351
- if [[ "$risk_score" == "high" ]]; then
1352
- warn "High-risk changes detected (DB schema, auth, crypto, or secrets)"
1353
- fi
1354
- fi
1355
-
1356
- local review_model="${MODEL:-opus}"
1357
- # Intelligence model routing (when no explicit CLI --model override)
1358
- if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
1359
- review_model="$CLAUDE_MODEL"
1360
- fi
1361
-
1362
- # Build review prompt with project context
1363
- local review_prompt="You are a senior code reviewer. Review this git diff thoroughly.
1364
-
1365
- For each issue found, use this format:
1366
- - **[SEVERITY]** file:line — description
1367
-
1368
- Severity levels: Critical, Bug, Security, Warning, Suggestion
1369
-
1370
- Focus on:
1371
- 1. Logic bugs and edge cases
1372
- 2. Security vulnerabilities (injection, XSS, auth bypass, etc.)
1373
- 3. Error handling gaps
1374
- 4. Performance issues
1375
- 5. Missing validation
1376
- 6. Project convention violations (see conventions below)
1377
-
1378
- Be specific. Reference exact file paths and line numbers. Only flag genuine issues.
1379
- If no issues are found, write: \"Review clean — no issues found.\"
1380
- "
1381
-
1382
- # Inject previous review findings and anti-patterns from memory
1383
- if type intelligence_search_memory >/dev/null 2>&1; then
1384
- local review_memory
1385
- review_memory=$(intelligence_search_memory "code review findings anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
1386
- if [[ -n "$review_memory" ]]; then
1387
- review_prompt+="
1388
- ## Known Issues from Previous Reviews
1389
- These anti-patterns and issues have been found in past reviews of this codebase. Flag them if they recur:
1390
- ${review_memory}
1391
- "
1392
- fi
1393
- fi
1394
-
1395
- # Inject project conventions if CLAUDE.md exists
1396
- local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
1397
- if [[ -f "$claudemd" ]]; then
1398
- local conventions
1399
- conventions=$(grep -A2 'Common Pitfalls\|Shell Standards\|Bash 3.2' "$claudemd" 2>/dev/null | head -20 || true)
1400
- if [[ -n "$conventions" ]]; then
1401
- review_prompt+="
1402
- ## Project Conventions
1403
- ${conventions}
1404
- "
1405
- fi
1406
- fi
1407
-
1408
- # Inject CODEOWNERS focus areas for review
1409
- if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_codeowners >/dev/null 2>&1; then
1410
- local review_owners
1411
- review_owners=$(gh_codeowners 2>/dev/null | head -10 || true)
1412
- if [[ -n "$review_owners" ]]; then
1413
- review_prompt+="
1414
- ## Code Owners (focus areas)
1415
- ${review_owners}
1416
- "
1417
- fi
1418
- fi
1419
-
1420
- # Inject Definition of Done if present
1421
- local dod_file="$PROJECT_ROOT/.claude/DEFINITION-OF-DONE.md"
1422
- if [[ -f "$dod_file" ]]; then
1423
- review_prompt+="
1424
- ## Definition of Done (verify these)
1425
- $(cat "$dod_file")
1426
- "
1427
- fi
1428
-
1429
- review_prompt+="
1430
- ## Diff to Review
1431
- $(cat "$diff_file")"
1432
-
1433
- # Skip permissions — pipeline runs headlessly (claude -p) and has no terminal
1434
- # for interactive permission prompts. Same rationale as build stage (line ~1083).
1435
- local review_args=(--print --model "$review_model" --max-turns 25 --dangerously-skip-permissions)
1436
-
1437
- claude "${review_args[@]}" "$review_prompt" < /dev/null > "$review_file" 2>"${ARTIFACTS_DIR}/.claude-tokens-review.log" || true
1438
- parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-review.log"
1439
-
1440
- if [[ ! -s "$review_file" ]]; then
1441
- warn "Review produced no output — check ${ARTIFACTS_DIR}/.claude-tokens-review.log for errors"
1442
- return 0
1443
- fi
1444
-
1445
- # Extract severity counts — try JSON structure first, then grep fallback
1446
- local critical_count=0 bug_count=0 warning_count=0
1447
-
1448
- # Check if review output is structured JSON (e.g. from structured review tools)
1449
- local json_parsed=false
1450
- if head -1 "$review_file" 2>/dev/null | grep -q '^{' 2>/dev/null; then
1451
- local j_critical j_bug j_warning
1452
- j_critical=$(jq -r '.issues | map(select(.severity == "Critical")) | length' "$review_file" 2>/dev/null || echo "")
1453
- if [[ -n "$j_critical" && "$j_critical" != "null" ]]; then
1454
- critical_count="$j_critical"
1455
- bug_count=$(jq -r '.issues | map(select(.severity == "Bug" or .severity == "Security")) | length' "$review_file" 2>/dev/null || echo "0")
1456
- warning_count=$(jq -r '.issues | map(select(.severity == "Warning" or .severity == "Suggestion")) | length' "$review_file" 2>/dev/null || echo "0")
1457
- json_parsed=true
1458
- fi
1459
- fi
1460
-
1461
- # Grep fallback for markdown-formatted review output
1462
- if [[ "$json_parsed" != "true" ]]; then
1463
- critical_count=$(grep -ciE '\*\*\[?Critical\]?\*\*' "$review_file" 2>/dev/null || true)
1464
- critical_count="${critical_count:-0}"
1465
- bug_count=$(grep -ciE '\*\*\[?(Bug|Security)\]?\*\*' "$review_file" 2>/dev/null || true)
1466
- bug_count="${bug_count:-0}"
1467
- warning_count=$(grep -ciE '\*\*\[?(Warning|Suggestion)\]?\*\*' "$review_file" 2>/dev/null || true)
1468
- warning_count="${warning_count:-0}"
1469
- fi
1470
- local total_issues=$((critical_count + bug_count + warning_count))
1471
-
1472
- if [[ "$critical_count" -gt 0 ]]; then
1473
- error "Review found ${BOLD}$critical_count critical${RESET} issue(s) — see $review_file"
1474
- elif [[ "$bug_count" -gt 0 ]]; then
1475
- warn "Review found $bug_count bug/security issue(s) — see ${DIM}$review_file${RESET}"
1476
- elif [[ "$total_issues" -gt 0 ]]; then
1477
- info "Review found $total_issues suggestion(s)"
1478
- else
1479
- success "Review clean"
1480
- fi
1481
-
1482
- # ── Oversight gate: pipeline review/quality stages block on verdict ──
1483
- if [[ -x "$SCRIPT_DIR/sw-oversight.sh" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
1484
- local reject_reason=""
1485
- local _sec_count
1486
- _sec_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
1487
- _sec_count="${_sec_count:-0}"
1488
- local _blocking=$((critical_count + _sec_count))
1489
- [[ "$_blocking" -gt 0 ]] && reject_reason="Review found ${_blocking} critical/security issue(s)"
1490
- if ! bash "$SCRIPT_DIR/sw-oversight.sh" gate --diff "$diff_file" --description "${GOAL:-Pipeline review}" --reject-if "$reject_reason" >/dev/null 2>&1; then
1491
- error "Oversight gate rejected — blocking pipeline"
1492
- emit_event "review.oversight_blocked" "issue=${ISSUE_NUMBER:-0}"
1493
- log_stage "review" "BLOCKED: oversight gate rejected"
1494
- return 1
1495
- fi
1496
- fi
1497
-
1498
- # ── Review Blocking Gate ──
1499
- # Block pipeline on critical/security issues unless compound_quality handles them
1500
- local security_count
1501
- security_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
1502
- security_count="${security_count:-0}"
1503
-
1504
- local blocking_issues=$((critical_count + security_count))
1505
-
1506
- if [[ "$blocking_issues" -gt 0 ]]; then
1507
- # Check if compound_quality stage is enabled — if so, let it handle issues
1508
- local compound_enabled="false"
1509
- if [[ -n "${PIPELINE_CONFIG:-}" && -f "${PIPELINE_CONFIG:-/dev/null}" ]]; then
1510
- compound_enabled=$(jq -r '.stages[] | select(.id == "compound_quality") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null) || true
1511
- [[ -z "$compound_enabled" || "$compound_enabled" == "null" ]] && compound_enabled="false"
1512
- fi
1513
-
1514
- # Check if this is a fast template (don't block fast pipelines)
1515
- local is_fast="false"
1516
- if [[ "${PIPELINE_NAME:-}" == "fast" || "${PIPELINE_NAME:-}" == "hotfix" ]]; then
1517
- is_fast="true"
1518
- fi
1519
-
1520
- if [[ "$compound_enabled" == "true" ]]; then
1521
- info "Review found ${blocking_issues} critical/security issue(s) — compound_quality stage will handle"
1522
- elif [[ "$is_fast" == "true" ]]; then
1523
- warn "Review found ${blocking_issues} critical/security issue(s) — fast template, not blocking"
1524
- elif [[ "${SKIP_GATES:-false}" == "true" ]]; then
1525
- warn "Review found ${blocking_issues} critical/security issue(s) — skip-gates mode, not blocking"
1526
- else
1527
- error "Review found ${BOLD}${blocking_issues} critical/security issue(s)${RESET} — blocking pipeline"
1528
- emit_event "review.blocked" \
1529
- "issue=${ISSUE_NUMBER:-0}" \
1530
- "critical=${critical_count}" \
1531
- "security=${security_count}"
1532
-
1533
- # Save blocking issues for self-healing context
1534
- grep -iE '\*\*\[?(Critical|Security)\]?\*\*' "$review_file" > "$ARTIFACTS_DIR/review-blockers.md" 2>/dev/null || true
1535
-
1536
- # Post review to GitHub before failing
1537
- if [[ -n "$ISSUE_NUMBER" ]]; then
1538
- local review_summary
1539
- review_summary=$(head -40 "$review_file")
1540
- gh_comment_issue "$ISSUE_NUMBER" "## 🔍 Code Review — ❌ Blocked
1541
-
1542
- **Stats:** $diff_stats
1543
- **Blocking issues:** ${blocking_issues} (${critical_count} critical, ${security_count} security)
1544
-
1545
- <details>
1546
- <summary>Review details</summary>
1547
-
1548
- ${review_summary}
1549
-
1550
- </details>
1551
-
1552
- _Pipeline will attempt self-healing rebuild._"
1553
- fi
1554
-
1555
- log_stage "review" "BLOCKED: $blocking_issues critical/security issues found"
1556
- return 1
1557
- fi
1558
- fi
1559
-
1560
- # Post review to GitHub issue
1561
- if [[ -n "$ISSUE_NUMBER" ]]; then
1562
- local review_summary
1563
- review_summary=$(head -40 "$review_file")
1564
- gh_comment_issue "$ISSUE_NUMBER" "## 🔍 Code Review
1565
-
1566
- **Stats:** $diff_stats
1567
- **Issues found:** $total_issues (${critical_count} critical, ${bug_count} bugs, ${warning_count} suggestions)
1568
-
1569
- <details>
1570
- <summary>Review details</summary>
1571
-
1572
- ${review_summary}
1573
-
1574
- </details>"
1575
- fi
1576
-
1577
- log_stage "review" "AI review complete ($total_issues issues: $critical_count critical, $bug_count bugs, $warning_count suggestions)"
1578
- }
1579
-
1580
- # ─── Compound Quality (fallback) ────────────────────────────────────────────
1581
- # Basic implementation: adversarial review, negative testing, e2e checks, DoD audit.
1582
- # If pipeline-intelligence.sh was sourced first, its enhanced version takes priority.
1583
- if ! type stage_compound_quality >/dev/null 2>&1; then
1584
- stage_compound_quality() {
1585
- CURRENT_STAGE_ID="compound_quality"
1586
-
1587
- # Read stage config from pipeline template
1588
- local cfg
1589
- cfg=$(jq -r '.stages[] | select(.id == "compound_quality") | .config // {}' "$PIPELINE_CONFIG" 2>/dev/null) || cfg="{}"
1590
-
1591
- local do_adversarial do_negative do_e2e do_dod max_cycles blocking
1592
- do_adversarial=$(echo "$cfg" | jq -r '.adversarial // false')
1593
- do_negative=$(echo "$cfg" | jq -r '.negative // false')
1594
- do_e2e=$(echo "$cfg" | jq -r '.e2e // false')
1595
- do_dod=$(echo "$cfg" | jq -r '.dod_audit // false')
1596
- max_cycles=$(echo "$cfg" | jq -r '.max_cycles // 1')
1597
- blocking=$(echo "$cfg" | jq -r '.compound_quality_blocking // false')
1598
-
1599
- local pass_count=0 fail_count=0 total=0
1600
- local compound_log="$ARTIFACTS_DIR/compound-quality.log"
1601
- : > "$compound_log"
1602
-
1603
- # ── Adversarial review ──
1604
- if [[ "$do_adversarial" == "true" ]]; then
1605
- total=$((total + 1))
1606
- info "Running adversarial review..."
1607
- if [[ -x "$SCRIPT_DIR/sw-adversarial.sh" ]]; then
1608
- if bash "$SCRIPT_DIR/sw-adversarial.sh" --repo "${REPO_DIR:-.}" >> "$compound_log" 2>&1; then
1609
- pass_count=$((pass_count + 1))
1610
- success "Adversarial review passed"
1611
- else
1612
- fail_count=$((fail_count + 1))
1613
- warn "Adversarial review found issues"
1614
- fi
1615
- else
1616
- warn "sw-adversarial.sh not found, skipping"
1617
- fi
1618
- fi
1619
-
1620
- # ── Negative / edge-case testing ──
1621
- if [[ "$do_negative" == "true" ]]; then
1622
- total=$((total + 1))
1623
- info "Running negative test pass..."
1624
- if [[ -n "${TEST_CMD:-}" ]]; then
1625
- if eval "$TEST_CMD" >> "$compound_log" 2>&1; then
1626
- pass_count=$((pass_count + 1))
1627
- success "Negative test pass passed"
1628
- else
1629
- fail_count=$((fail_count + 1))
1630
- warn "Negative test pass found failures"
1631
- fi
1632
- else
1633
- pass_count=$((pass_count + 1))
1634
- info "No test command configured, skipping negative tests"
1635
- fi
1636
- fi
1637
-
1638
- # ── E2E checks ──
1639
- if [[ "$do_e2e" == "true" ]]; then
1640
- total=$((total + 1))
1641
- info "Running e2e checks..."
1642
- if [[ -x "$SCRIPT_DIR/sw-e2e-orchestrator.sh" ]]; then
1643
- if bash "$SCRIPT_DIR/sw-e2e-orchestrator.sh" run >> "$compound_log" 2>&1; then
1644
- pass_count=$((pass_count + 1))
1645
- success "E2E checks passed"
1646
- else
1647
- fail_count=$((fail_count + 1))
1648
- warn "E2E checks found issues"
1649
- fi
1650
- else
1651
- pass_count=$((pass_count + 1))
1652
- info "sw-e2e-orchestrator.sh not found, skipping e2e"
1653
- fi
1654
- fi
1655
-
1656
- # ── Definition of Done audit ──
1657
- if [[ "$do_dod" == "true" ]]; then
1658
- total=$((total + 1))
1659
- info "Running definition-of-done audit..."
1660
- if [[ -x "$SCRIPT_DIR/sw-quality.sh" ]]; then
1661
- if bash "$SCRIPT_DIR/sw-quality.sh" validate >> "$compound_log" 2>&1; then
1662
- pass_count=$((pass_count + 1))
1663
- success "DoD audit passed"
1664
- else
1665
- fail_count=$((fail_count + 1))
1666
- warn "DoD audit found gaps"
1667
- fi
1668
- else
1669
- pass_count=$((pass_count + 1))
1670
- info "sw-quality.sh not found, skipping DoD audit"
1671
- fi
1672
- fi
1673
-
1674
- # ── Summary ──
1675
- log_stage "compound_quality" "Compound quality: $pass_count/$total checks passed, $fail_count failed"
1676
-
1677
- if [[ "$fail_count" -gt 0 && "$blocking" == "true" ]]; then
1678
- error "Compound quality gate failed: $fail_count of $total checks failed"
1679
- return 1
1680
- fi
1681
-
1682
- return 0
1683
- }
1684
- fi # end fallback stage_compound_quality
1685
-
1686
- stage_pr() {
1687
- CURRENT_STAGE_ID="pr"
1688
- local plan_file="$ARTIFACTS_DIR/plan.md"
1689
- local test_log="$ARTIFACTS_DIR/test-results.log"
1690
- local review_file="$ARTIFACTS_DIR/review.md"
1691
-
1692
- # ── Skip PR in local/no-github mode ──
1693
- if [[ "${NO_GITHUB:-false}" == "true" || "${SHIPWRIGHT_LOCAL:-}" == "1" || "${LOCAL_MODE:-false}" == "true" ]]; then
1694
- info "Skipping PR stage — running in local/no-github mode"
1695
- # Save a PR draft locally for reference
1696
- local branch_name
1697
- branch_name=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
1698
- local commit_count
1699
- commit_count=$(_safe_base_log --oneline | wc -l | xargs)
1700
- {
1701
- echo "# PR Draft (local mode)"
1702
- echo ""
1703
- echo "**Branch:** ${branch_name}"
1704
- echo "**Commits:** ${commit_count:-0}"
1705
- echo "**Goal:** ${GOAL:-N/A}"
1706
- echo ""
1707
- echo "## Changes"
1708
- _safe_base_diff --stat || true
1709
- } > ".claude/pr-draft.md" 2>/dev/null || true
1710
- emit_event "pr.skipped" "issue=${ISSUE_NUMBER:-0}" "reason=local_mode"
1711
- return 0
1712
- fi
1713
-
1714
- # ── PR Hygiene Checks (informational) ──
1715
- local hygiene_commit_count
1716
- hygiene_commit_count=$(_safe_base_log --oneline | wc -l | xargs)
1717
- hygiene_commit_count="${hygiene_commit_count:-0}"
1718
-
1719
- if [[ "$hygiene_commit_count" -gt 20 ]]; then
1720
- warn "PR has ${hygiene_commit_count} commits — consider squashing before merge"
1721
- fi
1722
-
1723
- # Check for WIP/fixup/squash commits (expanded patterns)
1724
- local wip_commits
1725
- wip_commits=$(_safe_base_log --oneline | grep -ciE '^[0-9a-f]+ (WIP|fixup!|squash!|TODO|HACK|TEMP|BROKEN|wip[:-]|temp[:-]|broken[:-]|do not merge)' || true)
1726
- wip_commits="${wip_commits:-0}"
1727
- if [[ "$wip_commits" -gt 0 ]]; then
1728
- warn "Branch has ${wip_commits} WIP/fixup/squash/temp commit(s) — consider cleaning up"
1729
- fi
1730
-
1731
- # ── PR Quality Gate: reject PRs with no real code changes ──
1732
- local real_files
1733
- real_files=$(_safe_base_diff --name-only | grep -v '^\.claude/' | grep -v '^\.github/' || true)
1734
- if [[ -z "$real_files" ]]; then
1735
- error "No real code changes detected — only pipeline artifacts (.claude/ logs)."
1736
- error "The build agent did not produce meaningful changes. Skipping PR creation."
1737
- emit_event "pr.rejected" "issue=${ISSUE_NUMBER:-0}" "reason=no_real_changes"
1738
- # Mark issue so auto-retry knows not to retry empty builds
1739
- if [[ -n "${ISSUE_NUMBER:-}" && "${ISSUE_NUMBER:-0}" != "0" ]]; then
1740
- gh issue comment "$ISSUE_NUMBER" --body "<!-- SHIPWRIGHT-NO-CHANGES: true -->" 2>/dev/null || true
1741
- fi
1742
- return 1
1743
- fi
1744
- local real_file_count
1745
- real_file_count=$(echo "$real_files" | wc -l | xargs)
1746
- info "PR quality gate: ${real_file_count} real file(s) changed"
1747
-
1748
- # Commit any uncommitted changes left by the build agent
1749
- if ! git diff --quiet 2>/dev/null || ! git diff --cached --quiet 2>/dev/null; then
1750
- info "Committing remaining uncommitted changes..."
1751
- git add -A 2>/dev/null || true
1752
- git commit -m "chore: pipeline cleanup — commit remaining build changes" --no-verify 2>/dev/null || true
1753
- fi
1754
-
1755
- # Auto-rebase onto latest base branch before PR
1756
- auto_rebase || {
1757
- warn "Rebase/merge failed — pushing as-is"
1758
- }
1759
-
1760
- # Push branch
1761
- info "Pushing branch: $GIT_BRANCH"
1762
- git push -u origin "$GIT_BRANCH" --force-with-lease 2>/dev/null || {
1763
- # Retry with regular push if force-with-lease fails (first push)
1764
- git push -u origin "$GIT_BRANCH" 2>/dev/null || {
1765
- error "Failed to push branch"
1766
- return 1
1767
- }
1768
- }
1769
-
1770
- # ── Developer Simulation (pre-PR review) ──
1771
- local simulation_summary=""
1772
- if type simulation_review >/dev/null 2>&1; then
1773
- local sim_enabled
1774
- sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1775
- # Also check daemon-config
1776
- local daemon_cfg=".claude/daemon-config.json"
1777
- if [[ "$sim_enabled" != "true" && -f "$daemon_cfg" ]]; then
1778
- sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
1779
- fi
1780
- if [[ "$sim_enabled" == "true" ]]; then
1781
- info "Running developer simulation review..."
1782
- local diff_for_sim
1783
- diff_for_sim=$(_safe_base_diff || true)
1784
- if [[ -n "$diff_for_sim" ]]; then
1785
- local sim_result
1786
- sim_result=$(simulation_review "$diff_for_sim" "${GOAL:-}" 2>/dev/null || echo "")
1787
- if [[ -n "$sim_result" && "$sim_result" != *'"error"'* ]]; then
1788
- echo "$sim_result" > "$ARTIFACTS_DIR/simulation-review.json"
1789
- local sim_count
1790
- sim_count=$(echo "$sim_result" | jq 'length' 2>/dev/null || echo "0")
1791
- simulation_summary="**Developer simulation:** ${sim_count} reviewer concerns pre-addressed"
1792
- success "Simulation complete: ${sim_count} concerns found and addressed"
1793
- emit_event "simulation.complete" "issue=${ISSUE_NUMBER:-0}" "concerns=${sim_count}"
1794
- else
1795
- info "Simulation returned no actionable concerns"
1796
- fi
1797
- fi
1798
- fi
1799
- fi
1800
-
1801
- # ── Architecture Validation (pre-PR check) ──
1802
- local arch_summary=""
1803
- if type architecture_validate_changes >/dev/null 2>&1; then
1804
- local arch_enabled
1805
- arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1806
- local daemon_cfg=".claude/daemon-config.json"
1807
- if [[ "$arch_enabled" != "true" && -f "$daemon_cfg" ]]; then
1808
- arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
1809
- fi
1810
- if [[ "$arch_enabled" == "true" ]]; then
1811
- info "Validating architecture..."
1812
- local diff_for_arch
1813
- diff_for_arch=$(_safe_base_diff || true)
1814
- if [[ -n "$diff_for_arch" ]]; then
1815
- local arch_result
1816
- arch_result=$(architecture_validate_changes "$diff_for_arch" "" 2>/dev/null || echo "")
1817
- if [[ -n "$arch_result" && "$arch_result" != *'"error"'* ]]; then
1818
- echo "$arch_result" > "$ARTIFACTS_DIR/architecture-validation.json"
1819
- local violation_count
1820
- violation_count=$(echo "$arch_result" | jq '[.violations[]? | select(.severity == "critical" or .severity == "high")] | length' 2>/dev/null || echo "0")
1821
- arch_summary="**Architecture validation:** ${violation_count} violations"
1822
- if [[ "$violation_count" -gt 0 ]]; then
1823
- warn "Architecture: ${violation_count} high/critical violations found"
1824
- else
1825
- success "Architecture validation passed"
1826
- fi
1827
- emit_event "architecture.validated" "issue=${ISSUE_NUMBER:-0}" "violations=${violation_count}"
1828
- else
1829
- info "Architecture validation returned no results"
1830
- fi
1831
- fi
1832
- fi
1833
- fi
1834
-
1835
- # Pre-PR diff gate — verify meaningful code changes exist (not just bookkeeping)
1836
- local real_changes
1837
- real_changes=$(_safe_base_diff --name-only \
1838
- -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
1839
- ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
1840
- ':!**/error-summary.json' | wc -l | xargs || echo "0")
1841
- if [[ "${real_changes:-0}" -eq 0 ]]; then
1842
- error "No meaningful code changes detected — only bookkeeping files modified"
1843
- error "Refusing to create PR with zero real changes"
1844
- return 1
1845
- fi
1846
- info "Pre-PR diff check: ${real_changes} real files changed"
1847
-
1848
- # Build PR title — prefer GOAL over plan file first line
1849
- # (plan file first line often contains Claude analysis text, not a clean title)
1850
- local pr_title=""
1851
- if [[ -n "${GOAL:-}" ]]; then
1852
- pr_title=$(echo "$GOAL" | cut -c1-70)
1853
- fi
1854
- if [[ -z "$pr_title" ]] && [[ -s "$plan_file" ]]; then
1855
- pr_title=$(head -1 "$plan_file" 2>/dev/null | sed 's/^#* *//' | cut -c1-70)
1856
- fi
1857
- [[ -z "$pr_title" ]] && pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
1858
-
1859
- # Sanitize: reject PR titles that look like error messages
1860
- if echo "$pr_title" | grep -qiE 'Invalid API|API key|authentication_error|rate_limit|CLI error|no useful output'; then
1861
- warn "PR title looks like an error message: $pr_title"
1862
- pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
1863
- fi
1864
-
1865
- # Build comprehensive PR body
1866
- local plan_summary=""
1867
- if [[ -s "$plan_file" ]]; then
1868
- plan_summary=$(head -20 "$plan_file" 2>/dev/null | tail -15)
1869
- fi
1870
-
1871
- local test_summary=""
1872
- if [[ -s "$test_log" ]]; then
1873
- test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
1874
- fi
1875
-
1876
- local review_summary=""
1877
- if [[ -s "$review_file" ]]; then
1878
- local total_issues=0
1879
- # Try JSON structured output first
1880
- if head -1 "$review_file" 2>/dev/null | grep -q '^{' 2>/dev/null; then
1881
- total_issues=$(jq -r '.issues | length' "$review_file" 2>/dev/null || echo "0")
1882
- fi
1883
- # Grep fallback for markdown
1884
- if [[ "${total_issues:-0}" -eq 0 ]]; then
1885
- total_issues=$(grep -ciE '\*\*\[?(Critical|Bug|Security|Warning|Suggestion)\]?\*\*' "$review_file" 2>/dev/null || true)
1886
- total_issues="${total_issues:-0}"
1887
- fi
1888
- review_summary="**Code review:** $total_issues issues found"
1889
- fi
1890
-
1891
- local closes_line=""
1892
- [[ -n "${GITHUB_ISSUE:-}" ]] && closes_line="Closes ${GITHUB_ISSUE}"
1893
-
1894
- local diff_stats
1895
- diff_stats=$(_safe_base_diff --stat | tail -1 || echo "")
1896
-
1897
- local commit_count
1898
- commit_count=$(_safe_base_log --oneline | wc -l | xargs)
1899
-
1900
- local total_dur=""
1901
- if [[ -n "$PIPELINE_START_EPOCH" ]]; then
1902
- total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
1903
- fi
1904
-
1905
- local pr_body
1906
- pr_body="$(cat <<EOF
1907
- ## Summary
1908
- ${plan_summary:-$GOAL}
1909
-
1910
- ## Changes
1911
- ${diff_stats}
1912
- ${commit_count} commit(s) via \`shipwright pipeline\` (${PIPELINE_NAME})
1913
-
1914
- ## Test Results
1915
- \`\`\`
1916
- ${test_summary:-No test output}
1917
- \`\`\`
1918
-
1919
- ${review_summary}
1920
- ${simulation_summary}
1921
- ${arch_summary}
1922
-
1923
- ${closes_line}
1924
-
1925
- ---
1926
-
1927
- | Metric | Value |
1928
- |--------|-------|
1929
- | Pipeline | \`${PIPELINE_NAME}\` |
1930
- | Duration | ${total_dur:-—} |
1931
- | Model | ${MODEL:-opus} |
1932
- | Agents | ${AGENTS:-1} |
1933
-
1934
- Generated by \`shipwright pipeline\`
1935
- EOF
1936
- )"
1937
-
1938
- # Verify required evidence before PR (merge policy enforcement)
1939
- local risk_tier
1940
- risk_tier="low"
1941
- if [[ -f "$REPO_DIR/config/policy.json" ]]; then
1942
- local changed_files
1943
- changed_files=$(_safe_base_diff --name-only || true)
1944
- if [[ -n "$changed_files" ]]; then
1945
- local policy_file="$REPO_DIR/config/policy.json"
1946
- check_tier_match() {
1947
- local tier="$1"
1948
- local patterns
1949
- patterns=$(jq -r ".riskTierRules.${tier}[]? // empty" "$policy_file" 2>/dev/null)
1950
- [[ -z "$patterns" ]] && return 1
1951
- while IFS= read -r pattern; do
1952
- [[ -z "$pattern" ]] && continue
1953
- local regex
1954
- regex=$(echo "$pattern" | sed 's/\./\\./g; s/\*\*/DOUBLESTAR/g; s/\*/[^\/]*/g; s/DOUBLESTAR/.*/g')
1955
- while IFS= read -r file; do
1956
- [[ -z "$file" ]] && continue
1957
- if echo "$file" | grep -qE "^${regex}$"; then
1958
- return 0
1959
- fi
1960
- done <<< "$changed_files"
1961
- done <<< "$patterns"
1962
- return 1
1963
- }
1964
- check_tier_match "critical" && risk_tier="critical"
1965
- check_tier_match "high" && [[ "$risk_tier" != "critical" ]] && risk_tier="high"
1966
- check_tier_match "medium" && [[ "$risk_tier" != "critical" && "$risk_tier" != "high" ]] && risk_tier="medium"
1967
- fi
1968
- fi
1969
-
1970
- local required_evidence
1971
- required_evidence=$(jq -r ".mergePolicy.\"$risk_tier\".requiredEvidence // [] | .[]" "$REPO_DIR/config/policy.json" 2>/dev/null)
1972
-
1973
- if [[ -n "$required_evidence" ]]; then
1974
- local evidence_dir="$REPO_DIR/.claude/evidence"
1975
- local missing_evidence=()
1976
- while IFS= read -r etype; do
1977
- [[ -z "$etype" ]] && continue
1978
- local has_evidence=false
1979
- for f in "$evidence_dir"/*"$etype"*; do
1980
- [[ -f "$f" ]] && has_evidence=true && break
1981
- done
1982
- [[ "$has_evidence" != "true" ]] && missing_evidence+=("$etype")
1983
- done <<< "$required_evidence"
1984
-
1985
- if [[ ${#missing_evidence[@]} -gt 0 ]]; then
1986
- warn "Missing required evidence for $risk_tier tier: ${missing_evidence[*]}"
1987
- emit_event "evidence.missing" "{\"tier\":\"$risk_tier\",\"missing\":\"${missing_evidence[*]}\"}"
1988
- # Collect missing evidence
1989
- if [[ -x "$SCRIPT_DIR/sw-evidence.sh" ]]; then
1990
- for etype in "${missing_evidence[@]}"; do
1991
- (cd "$REPO_DIR" && bash "$SCRIPT_DIR/sw-evidence.sh" capture "$etype" 2>/dev/null) || warn "Failed to collect $etype evidence"
1992
- done
1993
- fi
1994
- fi
1995
- fi
1996
-
1997
- # Build gh pr create args
1998
- local pr_args=(--title "$pr_title" --body "$pr_body" --base "$BASE_BRANCH")
1999
-
2000
- # Propagate labels from issue + CLI
2001
- local all_labels="${LABELS}"
2002
- if [[ -n "$ISSUE_LABELS" ]]; then
2003
- if [[ -n "$all_labels" ]]; then
2004
- all_labels="${all_labels},${ISSUE_LABELS}"
2005
- else
2006
- all_labels="$ISSUE_LABELS"
2007
- fi
2008
- fi
2009
- if [[ -n "$all_labels" ]]; then
2010
- pr_args+=(--label "$all_labels")
2011
- fi
2012
-
2013
- # Auto-detect or use provided reviewers
2014
- local reviewers="${REVIEWERS}"
2015
- if [[ -z "$reviewers" ]]; then
2016
- reviewers=$(detect_reviewers)
2017
- fi
2018
- if [[ -n "$reviewers" ]]; then
2019
- pr_args+=(--reviewer "$reviewers")
2020
- info "Reviewers: ${DIM}$reviewers${RESET}"
2021
- fi
2022
-
2023
- # Propagate milestone
2024
- if [[ -n "$ISSUE_MILESTONE" ]]; then
2025
- pr_args+=(--milestone "$ISSUE_MILESTONE")
2026
- info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
2027
- fi
2028
-
2029
- # Check for existing open PR on this branch to avoid duplicates (issue #12)
2030
- local pr_url=""
2031
- local existing_pr
2032
- existing_pr=$(gh pr list --head "$GIT_BRANCH" --state open --json number,url --jq '.[0]' 2>/dev/null || echo "")
2033
- if [[ -n "$existing_pr" && "$existing_pr" != "null" ]]; then
2034
- local existing_pr_number existing_pr_url
2035
- existing_pr_number=$(echo "$existing_pr" | jq -r '.number' 2>/dev/null || echo "")
2036
- existing_pr_url=$(echo "$existing_pr" | jq -r '.url' 2>/dev/null || echo "")
2037
- info "Updating existing PR #$existing_pr_number instead of creating duplicate"
2038
- gh pr edit "$existing_pr_number" --title "$pr_title" --body "$pr_body" 2>/dev/null || true
2039
- pr_url="$existing_pr_url"
2040
- else
2041
- info "Creating PR..."
2042
- local pr_stderr pr_exit=0
2043
- pr_url=$(gh pr create "${pr_args[@]}" 2>/tmp/shipwright-pr-stderr.txt) || pr_exit=$?
2044
- pr_stderr=$(cat /tmp/shipwright-pr-stderr.txt 2>/dev/null || true)
2045
- rm -f /tmp/shipwright-pr-stderr.txt
2046
-
2047
- # gh pr create may return non-zero for reviewer issues but still create the PR
2048
- if [[ "$pr_exit" -ne 0 ]]; then
2049
- if [[ "$pr_url" == *"github.com"* ]]; then
2050
- # PR was created but something non-fatal failed (e.g., reviewer not found)
2051
- warn "PR created with warnings: ${pr_stderr:-unknown}"
2052
- else
2053
- error "PR creation failed: ${pr_stderr:-$pr_url}"
2054
- return 1
2055
- fi
2056
- fi
2057
- fi
2058
-
2059
- success "PR created: ${BOLD}$pr_url${RESET}"
2060
- echo "$pr_url" > "$ARTIFACTS_DIR/pr-url.txt"
2061
-
2062
- # Extract PR number
2063
- PR_NUMBER=$(echo "$pr_url" | grep -oE '[0-9]+$' || true)
2064
-
2065
- # ── Intelligent Reviewer Selection (GraphQL-enhanced) ──
2066
- if [[ "${NO_GITHUB:-false}" != "true" && -n "$PR_NUMBER" && -z "$reviewers" ]]; then
2067
- local reviewer_assigned=false
2068
-
2069
- # Try CODEOWNERS-based routing via GraphQL API
2070
- if type gh_codeowners >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2071
- local codeowners_json
2072
- codeowners_json=$(gh_codeowners "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
2073
- if [[ "$codeowners_json" != "[]" && -n "$codeowners_json" ]]; then
2074
- local changed_files
2075
- changed_files=$(_safe_base_diff --name-only || true)
2076
- if [[ -n "$changed_files" ]]; then
2077
- local co_reviewers
2078
- co_reviewers=$(echo "$codeowners_json" | jq -r '.[].owners[]' 2>/dev/null | sort -u | head -3 || true)
2079
- if [[ -n "$co_reviewers" ]]; then
2080
- local rev
2081
- while IFS= read -r rev; do
2082
- rev="${rev#@}"
2083
- [[ -n "$rev" ]] && gh pr edit "$PR_NUMBER" --add-reviewer "$rev" 2>/dev/null || true
2084
- done <<< "$co_reviewers"
2085
- info "Requested review from CODEOWNERS: $(echo "$co_reviewers" | tr '\n' ',' | sed 's/,$//')"
2086
- reviewer_assigned=true
2087
- fi
2088
- fi
2089
- fi
2090
- fi
2091
-
2092
- # Fallback: contributor-based routing via GraphQL API
2093
- if [[ "$reviewer_assigned" != "true" ]] && type gh_contributors >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2094
- local contributors_json
2095
- contributors_json=$(gh_contributors "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
2096
- local top_contributor
2097
- top_contributor=$(echo "$contributors_json" | jq -r '.[0].login // ""' 2>/dev/null || echo "")
2098
- local current_user
2099
- current_user=$(gh api user --jq '.login' 2>/dev/null || echo "")
2100
- if [[ -n "$top_contributor" && "$top_contributor" != "$current_user" ]]; then
2101
- gh pr edit "$PR_NUMBER" --add-reviewer "$top_contributor" 2>/dev/null || true
2102
- info "Requested review from top contributor: $top_contributor"
2103
- reviewer_assigned=true
2104
- fi
2105
- fi
2106
-
2107
- # Final fallback: auto-approve if no reviewers assigned
2108
- if [[ "$reviewer_assigned" != "true" ]]; then
2109
- gh pr review "$PR_NUMBER" --approve 2>/dev/null || warn "Could not auto-approve PR"
2110
- fi
2111
- fi
2112
-
2113
- # Update issue with PR link
2114
- if [[ -n "$ISSUE_NUMBER" ]]; then
2115
- gh_remove_label "$ISSUE_NUMBER" "pipeline/in-progress"
2116
- gh_add_labels "$ISSUE_NUMBER" "pipeline/pr-created"
2117
- gh_comment_issue "$ISSUE_NUMBER" "🎉 **PR created:** ${pr_url}
2118
-
2119
- Pipeline duration so far: ${total_dur:-unknown}"
2120
-
2121
- # Notify tracker of review/PR creation
2122
- "$SCRIPT_DIR/sw-tracker.sh" notify "review" "$ISSUE_NUMBER" "$pr_url" 2>/dev/null || true
2123
- fi
2124
-
2125
- # Wait for CI if configured
2126
- local wait_ci
2127
- wait_ci=$(jq -r --arg id "pr" '(.stages[] | select(.id == $id) | .config.wait_ci) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2128
- if [[ "$wait_ci" == "true" ]]; then
2129
- info "Waiting for CI checks..."
2130
- gh pr checks --watch 2>/dev/null || warn "CI checks did not all pass"
2131
- fi
2132
-
2133
- log_stage "pr" "PR created: $pr_url (${reviewers:+reviewers: $reviewers})"
2134
- }
2135
-
2136
- stage_merge() {
2137
- CURRENT_STAGE_ID="merge"
2138
-
2139
- if [[ "$NO_GITHUB" == "true" ]]; then
2140
- info "Merge stage skipped (--no-github)"
2141
- return 0
2142
- fi
2143
-
2144
- # ── Oversight gate: merge block on verdict (diff + review criticals + goal) ──
2145
- if [[ -x "$SCRIPT_DIR/sw-oversight.sh" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
2146
- local merge_diff_file="${ARTIFACTS_DIR}/review-diff.patch"
2147
- local merge_review_file="${ARTIFACTS_DIR}/review.md"
2148
- if [[ ! -s "$merge_diff_file" ]]; then
2149
- _safe_base_diff > "$merge_diff_file" 2>/dev/null || true
2150
- fi
2151
- if [[ -s "$merge_diff_file" ]]; then
2152
- local _merge_critical _merge_sec _merge_blocking _merge_reject
2153
- _merge_critical=$(grep -ciE '\*\*\[?Critical\]?\*\*' "$merge_review_file" 2>/dev/null || true)
2154
- _merge_critical="${_merge_critical:-0}"
2155
- _merge_sec=$(grep -ciE '\*\*\[?Security\]?\*\*' "$merge_review_file" 2>/dev/null || true)
2156
- _merge_sec="${_merge_sec:-0}"
2157
- _merge_blocking=$((${_merge_critical:-0} + ${_merge_sec:-0}))
2158
- [[ "$_merge_blocking" -gt 0 ]] && _merge_reject="Review found ${_merge_blocking} critical/security issue(s)"
2159
- if ! bash "$SCRIPT_DIR/sw-oversight.sh" gate --diff "$merge_diff_file" --description "${GOAL:-Pipeline merge}" --reject-if "${_merge_reject:-}" >/dev/null 2>&1; then
2160
- error "Oversight gate rejected — blocking merge"
2161
- emit_event "merge.oversight_blocked" "issue=${ISSUE_NUMBER:-0}"
2162
- log_stage "merge" "BLOCKED: oversight gate rejected"
2163
- return 1
2164
- fi
2165
- fi
2166
- fi
2167
-
2168
- # ── Approval gates: block if merge requires approval and pending for this issue ──
2169
- local ag_file="${HOME}/.shipwright/approval-gates.json"
2170
- if [[ -f "$ag_file" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
2171
- local ag_enabled ag_stages ag_pending_merge ag_issue_num
2172
- ag_enabled=$(jq -r '.enabled // false' "$ag_file" 2>/dev/null || echo "false")
2173
- ag_stages=$(jq -r '.stages // [] | if type == "array" then .[] else empty end' "$ag_file" 2>/dev/null || true)
2174
- ag_issue_num=$(echo "${ISSUE_NUMBER:-0}" | awk '{print $1+0}')
2175
- if [[ "$ag_enabled" == "true" ]] && echo "$ag_stages" | grep -qx "merge" 2>/dev/null; then
2176
- local ha_file="${ARTIFACTS_DIR}/human-approval.txt"
2177
- local ha_approved="false"
2178
- if [[ -f "$ha_file" ]]; then
2179
- ha_approved=$(jq -r --arg stage "merge" 'select(.stage == $stage) | .approved // false' "$ha_file" 2>/dev/null || echo "false")
2180
- fi
2181
- if [[ "$ha_approved" != "true" ]]; then
2182
- ag_pending_merge=$(jq -r --argjson issue "$ag_issue_num" --arg stage "merge" \
2183
- '[.pending[]? | select(.issue == $issue and .stage == $stage)] | length' "$ag_file" 2>/dev/null || echo "0")
2184
- if [[ "${ag_pending_merge:-0}" -eq 0 ]]; then
2185
- local req_at tmp_ag
2186
- req_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || true)
2187
- tmp_ag=$(mktemp "${HOME}/.shipwright/approval-gates.json.XXXXXX" 2>/dev/null || mktemp)
2188
- jq --argjson issue "$ag_issue_num" --arg stage "merge" --arg requested "${req_at}" \
2189
- '.pending += [{"issue": $issue, "stage": $stage, "requested_at": $requested}]' "$ag_file" > "$tmp_ag" 2>/dev/null && mv "$tmp_ag" "$ag_file" || rm -f "$tmp_ag"
2190
- fi
2191
- info "Merge requires approval — awaiting human approval via dashboard"
2192
- emit_event "merge.approval_pending" "issue=${ISSUE_NUMBER:-0}"
2193
- log_stage "merge" "BLOCKED: approval gate pending"
2194
- return 1
2195
- fi
2196
- fi
2197
- fi
2198
-
2199
- # ── Branch Protection Check ──
2200
- if type gh_branch_protection >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2201
- local protection_json
2202
- protection_json=$(gh_branch_protection "$REPO_OWNER" "$REPO_NAME" "${BASE_BRANCH:-main}" 2>/dev/null || echo '{"protected": false}')
2203
- local is_protected
2204
- is_protected=$(echo "$protection_json" | jq -r '.protected // false' 2>/dev/null || echo "false")
2205
- if [[ "$is_protected" == "true" ]]; then
2206
- local required_reviews
2207
- required_reviews=$(echo "$protection_json" | jq -r '.required_pull_request_reviews.required_approving_review_count // 0' 2>/dev/null || echo "0")
2208
- local required_checks
2209
- required_checks=$(echo "$protection_json" | jq -r '[.required_status_checks.contexts // [] | .[]] | length' 2>/dev/null || echo "0")
2210
-
2211
- info "Branch protection: ${required_reviews} required review(s), ${required_checks} required check(s)"
2212
-
2213
- if [[ "$required_reviews" -gt 0 ]]; then
2214
- # Check if PR has enough approvals
2215
- local prot_pr_number
2216
- prot_pr_number=$(gh pr list --head "$GIT_BRANCH" --json number --jq '.[0].number' 2>/dev/null || echo "")
2217
- if [[ -n "$prot_pr_number" ]]; then
2218
- local approvals
2219
- approvals=$(gh pr view "$prot_pr_number" --json reviews --jq '[.reviews[] | select(.state == "APPROVED")] | length' 2>/dev/null || echo "0")
2220
- if [[ "$approvals" -lt "$required_reviews" ]]; then
2221
- warn "PR has $approvals approval(s), needs $required_reviews — skipping auto-merge"
2222
- info "PR is ready for manual merge after required reviews"
2223
- emit_event "merge.blocked" "issue=${ISSUE_NUMBER:-0}" "reason=insufficient_reviews" "have=$approvals" "need=$required_reviews"
2224
- return 0
2225
- fi
2226
- fi
2227
- fi
2228
- fi
2229
- fi
2230
-
2231
- local merge_method wait_ci_timeout auto_delete_branch auto_merge auto_approve merge_strategy
2232
- merge_method=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.merge_method) // "squash"' "$PIPELINE_CONFIG" 2>/dev/null) || true
2233
- [[ -z "$merge_method" || "$merge_method" == "null" ]] && merge_method="squash"
2234
- wait_ci_timeout=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.wait_ci_timeout_s) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
2235
- [[ -z "$wait_ci_timeout" || "$wait_ci_timeout" == "null" ]] && wait_ci_timeout=0
2236
-
2237
- # Adaptive CI timeout: 90th percentile of historical times × 1.5 safety margin
2238
- if [[ "$wait_ci_timeout" -eq 0 ]] 2>/dev/null; then
2239
- local repo_hash_ci
2240
- repo_hash_ci=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
2241
- local ci_times_file="${HOME}/.shipwright/baselines/${repo_hash_ci}/ci-times.json"
2242
- if [[ -f "$ci_times_file" ]]; then
2243
- local p90_time
2244
- p90_time=$(jq '
2245
- .times | sort |
2246
- (length * 0.9 | floor) as $idx |
2247
- .[$idx] // 600
2248
- ' "$ci_times_file" 2>/dev/null || echo "0")
2249
- if [[ -n "$p90_time" ]] && awk -v t="$p90_time" 'BEGIN{exit !(t > 0)}' 2>/dev/null; then
2250
- # 1.5x safety margin, clamped to [120, 1800]
2251
- wait_ci_timeout=$(awk -v p90="$p90_time" 'BEGIN{
2252
- t = p90 * 1.5;
2253
- if (t < 120) t = 120;
2254
- if (t > 1800) t = 1800;
2255
- printf "%d", t
2256
- }')
2257
- fi
2258
- fi
2259
- # Default fallback if no history
2260
- [[ "$wait_ci_timeout" -eq 0 ]] && wait_ci_timeout=600
2261
- fi
2262
- auto_delete_branch=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_delete_branch) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
2263
- [[ -z "$auto_delete_branch" || "$auto_delete_branch" == "null" ]] && auto_delete_branch="true"
2264
- auto_merge=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_merge) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2265
- [[ -z "$auto_merge" || "$auto_merge" == "null" ]] && auto_merge="false"
2266
- auto_approve=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_approve) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2267
- [[ -z "$auto_approve" || "$auto_approve" == "null" ]] && auto_approve="false"
2268
- merge_strategy=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.merge_strategy) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2269
- [[ -z "$merge_strategy" || "$merge_strategy" == "null" ]] && merge_strategy=""
2270
- # merge_strategy overrides merge_method if set (squash/merge/rebase)
2271
- if [[ -n "$merge_strategy" ]]; then
2272
- merge_method="$merge_strategy"
2273
- fi
2274
-
2275
- # Find PR for current branch
2276
- local pr_number
2277
- pr_number=$(gh pr list --head "$GIT_BRANCH" --json number --jq '.[0].number' 2>/dev/null || echo "")
2278
-
2279
- if [[ -z "$pr_number" ]]; then
2280
- warn "No PR found for branch $GIT_BRANCH — skipping merge"
2281
- return 0
2282
- fi
2283
-
2284
- info "Found PR #${pr_number} for branch ${GIT_BRANCH}"
2285
-
2286
- # Wait for CI checks to pass
2287
- info "Waiting for CI checks (timeout: ${wait_ci_timeout}s)..."
2288
- local elapsed=0
2289
- local check_interval=15
2290
-
2291
- while [[ "$elapsed" -lt "$wait_ci_timeout" ]]; do
2292
- local check_status
2293
- check_status=$(gh pr checks "$pr_number" --json 'bucket,name' --jq '[.[] | .bucket] | unique | sort' 2>/dev/null || echo '["pending"]')
2294
-
2295
- # If all checks passed (only "pass" in buckets)
2296
- if echo "$check_status" | jq -e '. == ["pass"]' >/dev/null 2>&1; then
2297
- success "All CI checks passed"
2298
- break
2299
- fi
2300
-
2301
- # If any check failed
2302
- if echo "$check_status" | jq -e 'any(. == "fail")' >/dev/null 2>&1; then
2303
- error "CI checks failed — aborting merge"
2304
- return 1
2305
- fi
2306
-
2307
- sleep "$check_interval"
2308
- elapsed=$((elapsed + check_interval))
2309
- done
2310
-
2311
- # Record CI wait time for adaptive timeout calculation
2312
- if [[ "$elapsed" -gt 0 ]]; then
2313
- local repo_hash_ci_rec
2314
- repo_hash_ci_rec=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
2315
- local ci_times_dir="${HOME}/.shipwright/baselines/${repo_hash_ci_rec}"
2316
- local ci_times_rec_file="${ci_times_dir}/ci-times.json"
2317
- mkdir -p "$ci_times_dir"
2318
- local ci_history="[]"
2319
- if [[ -f "$ci_times_rec_file" ]]; then
2320
- ci_history=$(jq '.times // []' "$ci_times_rec_file" 2>/dev/null || echo "[]")
2321
- fi
2322
- local updated_ci
2323
- updated_ci=$(echo "$ci_history" | jq --arg t "$elapsed" '. + [($t | tonumber)] | .[-20:]' 2>/dev/null || echo "[$elapsed]")
2324
- local tmp_ci
2325
- tmp_ci=$(mktemp "${ci_times_dir}/ci-times.json.XXXXXX")
2326
- jq -n --argjson times "$updated_ci" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
2327
- '{times: $times, updated: $updated}' > "$tmp_ci" 2>/dev/null
2328
- mv "$tmp_ci" "$ci_times_rec_file" 2>/dev/null || true
2329
- fi
2330
-
2331
- if [[ "$elapsed" -ge "$wait_ci_timeout" ]]; then
2332
- warn "CI check timeout (${wait_ci_timeout}s) — proceeding with merge anyway"
2333
- fi
2334
-
2335
- # Auto-approve if configured (for branch protection requiring reviews)
2336
- if [[ "$auto_approve" == "true" ]]; then
2337
- info "Auto-approving PR #${pr_number}..."
2338
- gh pr review "$pr_number" --approve 2>/dev/null || warn "Auto-approve failed (may need different permissions)"
2339
- fi
2340
-
2341
- # Merge the PR
2342
- if [[ "$auto_merge" == "true" ]]; then
2343
- info "Enabling auto-merge for PR #${pr_number} (strategy: ${merge_method})..."
2344
- local auto_merge_args=("pr" "merge" "$pr_number" "--auto" "--${merge_method}")
2345
- if [[ "$auto_delete_branch" == "true" ]]; then
2346
- auto_merge_args+=("--delete-branch")
2347
- fi
2348
-
2349
- if gh "${auto_merge_args[@]}" 2>/dev/null; then
2350
- success "Auto-merge enabled for PR #${pr_number} (strategy: ${merge_method})"
2351
- emit_event "merge.auto_enabled" \
2352
- "issue=${ISSUE_NUMBER:-0}" \
2353
- "pr=$pr_number" \
2354
- "strategy=$merge_method"
2355
- else
2356
- warn "Auto-merge not available — falling back to direct merge"
2357
- # Fall through to direct merge below
2358
- auto_merge="false"
2359
- fi
2360
- fi
2361
-
2362
- if [[ "$auto_merge" != "true" ]]; then
2363
- info "Merging PR #${pr_number} (method: ${merge_method})..."
2364
- local merge_args=("pr" "merge" "$pr_number" "--${merge_method}")
2365
- if [[ "$auto_delete_branch" == "true" ]]; then
2366
- merge_args+=("--delete-branch")
2367
- fi
2368
-
2369
- if gh "${merge_args[@]}" 2>/dev/null; then
2370
- success "PR #${pr_number} merged successfully"
2371
- else
2372
- error "Failed to merge PR #${pr_number}"
2373
- return 1
2374
- fi
2375
- fi
2376
-
2377
- log_stage "merge" "PR #${pr_number} merged (strategy: ${merge_method}, auto_merge: ${auto_merge})"
2378
- }
2379
-
2380
- stage_deploy() {
2381
- CURRENT_STAGE_ID="deploy"
2382
- local staging_cmd
2383
- staging_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.staging_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2384
- [[ "$staging_cmd" == "null" ]] && staging_cmd=""
2385
-
2386
- local prod_cmd
2387
- prod_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.production_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2388
- [[ "$prod_cmd" == "null" ]] && prod_cmd=""
2389
-
2390
- local rollback_cmd
2391
- rollback_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.rollback_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2392
- [[ "$rollback_cmd" == "null" ]] && rollback_cmd=""
2393
-
2394
- if [[ -z "$staging_cmd" && -z "$prod_cmd" ]]; then
2395
- warn "No deploy commands configured — skipping"
2396
- return 0
2397
- fi
2398
-
2399
- # Create GitHub deployment tracking
2400
- local gh_deploy_env="production"
2401
- [[ -n "$staging_cmd" && -z "$prod_cmd" ]] && gh_deploy_env="staging"
2402
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_start >/dev/null 2>&1; then
2403
- if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2404
- gh_deploy_pipeline_start "$REPO_OWNER" "$REPO_NAME" "${GIT_BRANCH:-HEAD}" "$gh_deploy_env" 2>/dev/null || true
2405
- info "GitHub Deployment: tracking as $gh_deploy_env"
2406
- fi
2407
- fi
2408
-
2409
- # ── Pre-deploy gates ──
2410
- local pre_deploy_ci
2411
- pre_deploy_ci=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_ci_status) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
2412
-
2413
- if [[ "${pre_deploy_ci:-true}" == "true" && "${NO_GITHUB:-false}" != "true" && -n "${REPO_OWNER:-}" && -n "${REPO_NAME:-}" ]]; then
2414
- info "Pre-deploy gate: checking CI status..."
2415
- local ci_failures
2416
- ci_failures=$(gh api "repos/${REPO_OWNER}/${REPO_NAME}/commits/${GIT_BRANCH:-HEAD}/check-runs" \
2417
- --jq '[.check_runs[] | select(.conclusion != null and .conclusion != "success" and .conclusion != "skipped")] | length' 2>/dev/null || echo "0")
2418
- if [[ "${ci_failures:-0}" -gt 0 ]]; then
2419
- error "Pre-deploy gate FAILED: ${ci_failures} CI check(s) not passing"
2420
- [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: ${ci_failures} CI checks failing" 2>/dev/null || true
2421
- return 1
2422
- fi
2423
- success "Pre-deploy gate: all CI checks passing"
2424
- fi
2425
-
2426
- local pre_deploy_min_cov
2427
- pre_deploy_min_cov=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_min_coverage) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2428
- if [[ -n "${pre_deploy_min_cov:-}" && "${pre_deploy_min_cov}" != "null" && -f "$ARTIFACTS_DIR/test-coverage.json" ]]; then
2429
- local actual_cov
2430
- actual_cov=$(jq -r '.coverage_pct // 0' "$ARTIFACTS_DIR/test-coverage.json" 2>/dev/null || echo "0")
2431
- if [[ "${actual_cov:-0}" -lt "$pre_deploy_min_cov" ]]; then
2432
- error "Pre-deploy gate FAILED: coverage ${actual_cov}% < required ${pre_deploy_min_cov}%"
2433
- [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: coverage ${actual_cov}% below minimum ${pre_deploy_min_cov}%" 2>/dev/null || true
2434
- return 1
2435
- fi
2436
- success "Pre-deploy gate: coverage ${actual_cov}% >= ${pre_deploy_min_cov}%"
2437
- fi
2438
-
2439
- # Post deploy start to GitHub
2440
- if [[ -n "$ISSUE_NUMBER" ]]; then
2441
- gh_comment_issue "$ISSUE_NUMBER" "Deploy started"
2442
- fi
2443
-
2444
- # ── Deploy strategy ──
2445
- local deploy_strategy
2446
- deploy_strategy=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.deploy_strategy) // "direct"' "$PIPELINE_CONFIG" 2>/dev/null) || true
2447
- [[ "$deploy_strategy" == "null" ]] && deploy_strategy="direct"
2448
-
2449
- local canary_cmd promote_cmd switch_cmd health_url deploy_log
2450
- canary_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.canary_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2451
- [[ "$canary_cmd" == "null" ]] && canary_cmd=""
2452
- promote_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.promote_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2453
- [[ "$promote_cmd" == "null" ]] && promote_cmd=""
2454
- switch_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.switch_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2455
- [[ "$switch_cmd" == "null" ]] && switch_cmd=""
2456
- health_url=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2457
- [[ "$health_url" == "null" ]] && health_url=""
2458
- deploy_log="$ARTIFACTS_DIR/deploy.log"
2459
-
2460
- case "$deploy_strategy" in
2461
- canary)
2462
- info "Canary deployment strategy..."
2463
- if [[ -z "$canary_cmd" ]]; then
2464
- warn "No canary_cmd configured — falling back to direct"
2465
- deploy_strategy="direct"
2466
- else
2467
- info "Deploying canary..."
2468
- bash -c "$canary_cmd" >> "$deploy_log" 2>&1 || { error "Canary deploy failed"; return 1; }
2469
-
2470
- if [[ -n "$health_url" ]]; then
2471
- local canary_healthy=0
2472
- local _chk
2473
- for _chk in 1 2 3; do
2474
- sleep 10
2475
- local _status
2476
- _status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
2477
- if [[ "$_status" -ge 200 && "$_status" -lt 400 ]]; then
2478
- canary_healthy=$((canary_healthy + 1))
2479
- fi
2480
- done
2481
- if [[ "$canary_healthy" -lt 2 ]]; then
2482
- error "Canary health check failed ($canary_healthy/3 passed) — rolling back"
2483
- [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" 2>/dev/null || true
2484
- return 1
2485
- fi
2486
- success "Canary healthy ($canary_healthy/3 checks passed)"
2487
- fi
2488
-
2489
- info "Promoting canary to full deployment..."
2490
- if [[ -n "$promote_cmd" ]]; then
2491
- bash -c "$promote_cmd" >> "$deploy_log" 2>&1 || { error "Promote failed"; return 1; }
2492
- fi
2493
- success "Canary promoted"
2494
- fi
2495
- ;;
2496
- blue-green)
2497
- info "Blue-green deployment strategy..."
2498
- if [[ -z "$staging_cmd" || -z "$switch_cmd" ]]; then
2499
- warn "Blue-green requires staging_cmd + switch_cmd — falling back to direct"
2500
- deploy_strategy="direct"
2501
- else
2502
- info "Deploying to inactive environment..."
2503
- bash -c "$staging_cmd" >> "$deploy_log" 2>&1 || { error "Blue-green staging failed"; return 1; }
2504
-
2505
- if [[ -n "$health_url" ]]; then
2506
- local bg_healthy=0
2507
- local _chk
2508
- for _chk in 1 2 3; do
2509
- sleep 5
2510
- local _status
2511
- _status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
2512
- [[ "$_status" -ge 200 && "$_status" -lt 400 ]] && bg_healthy=$((bg_healthy + 1))
2513
- done
2514
- if [[ "$bg_healthy" -lt 2 ]]; then
2515
- error "Blue-green health check failed — not switching"
2516
- return 1
2517
- fi
2518
- fi
2519
-
2520
- info "Switching traffic..."
2521
- bash -c "$switch_cmd" >> "$deploy_log" 2>&1 || { error "Traffic switch failed"; return 1; }
2522
- success "Blue-green switch complete"
2523
- fi
2524
- ;;
2525
- esac
2526
-
2527
- # ── Direct deployment (default or fallback) ──
2528
- if [[ "$deploy_strategy" == "direct" ]]; then
2529
- if [[ -n "$staging_cmd" ]]; then
2530
- info "Deploying to staging..."
2531
- bash -c "$staging_cmd" > "$ARTIFACTS_DIR/deploy-staging.log" 2>&1 || {
2532
- error "Staging deploy failed"
2533
- [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Staging deploy failed"
2534
- # Mark GitHub deployment as failed
2535
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
2536
- gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Staging deploy failed" 2>/dev/null || true
2537
- fi
2538
- return 1
2539
- }
2540
- success "Staging deploy complete"
2541
- fi
2542
-
2543
- if [[ -n "$prod_cmd" ]]; then
2544
- info "Deploying to production..."
2545
- bash -c "$prod_cmd" > "$ARTIFACTS_DIR/deploy-prod.log" 2>&1 || {
2546
- error "Production deploy failed"
2547
- if [[ -n "$rollback_cmd" ]]; then
2548
- warn "Rolling back..."
2549
- bash -c "$rollback_cmd" 2>&1 || error "Rollback also failed!"
2550
- fi
2551
- [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Production deploy failed — rollback ${rollback_cmd:+attempted}"
2552
- # Mark GitHub deployment as failed
2553
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
2554
- gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Production deploy failed" 2>/dev/null || true
2555
- fi
2556
- return 1
2557
- }
2558
- success "Production deploy complete"
2559
- fi
2560
- fi
2561
-
2562
- if [[ -n "$ISSUE_NUMBER" ]]; then
2563
- gh_comment_issue "$ISSUE_NUMBER" "✅ **Deploy complete**"
2564
- gh_add_labels "$ISSUE_NUMBER" "deployed"
2565
- fi
2566
-
2567
- # Mark GitHub deployment as successful
2568
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
2569
- if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2570
- gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" true "" 2>/dev/null || true
2571
- fi
2572
- fi
2573
-
2574
- log_stage "deploy" "Deploy complete"
2575
- }
2576
-
2577
- stage_validate() {
2578
- CURRENT_STAGE_ID="validate"
2579
- local smoke_cmd
2580
- smoke_cmd=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.smoke_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2581
- [[ "$smoke_cmd" == "null" ]] && smoke_cmd=""
2582
-
2583
- local health_url
2584
- health_url=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2585
- [[ "$health_url" == "null" ]] && health_url=""
2586
-
2587
- local close_issue
2588
- close_issue=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.close_issue) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2589
-
2590
- # Smoke tests
2591
- if [[ -n "$smoke_cmd" ]]; then
2592
- info "Running smoke tests..."
2593
- bash -c "$smoke_cmd" > "$ARTIFACTS_DIR/smoke.log" 2>&1 || {
2594
- error "Smoke tests failed"
2595
- if [[ -n "$ISSUE_NUMBER" ]]; then
2596
- gh issue create --title "Deploy validation failed: $GOAL" \
2597
- --label "incident" --body "Pipeline smoke tests failed after deploy.
2598
-
2599
- Related issue: ${GITHUB_ISSUE}
2600
- Branch: ${GIT_BRANCH}
2601
- PR: $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'unknown')" 2>/dev/null || true
2602
- fi
2603
- return 1
2604
- }
2605
- success "Smoke tests passed"
2606
- fi
2607
-
2608
- # Health check with retry
2609
- if [[ -n "$health_url" ]]; then
2610
- info "Health check: $health_url"
2611
- local attempts=0
2612
- while [[ $attempts -lt 5 ]]; do
2613
- if curl -sf "$health_url" >/dev/null 2>&1; then
2614
- success "Health check passed"
2615
- break
2616
- fi
2617
- attempts=$((attempts + 1))
2618
- [[ $attempts -lt 5 ]] && { info "Retry ${attempts}/5..."; sleep 10; }
2619
- done
2620
- if [[ $attempts -ge 5 ]]; then
2621
- error "Health check failed after 5 attempts"
2622
- return 1
2623
- fi
2624
- fi
2625
-
2626
- # Compute total duration once for both issue close and wiki report
2627
- local total_dur=""
2628
- if [[ -n "$PIPELINE_START_EPOCH" ]]; then
2629
- total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
2630
- fi
2631
-
2632
- # Close original issue with comprehensive summary
2633
- if [[ "$close_issue" == "true" && -n "$ISSUE_NUMBER" ]]; then
2634
- gh issue close "$ISSUE_NUMBER" --comment "## ✅ Complete — Deployed & Validated
2635
-
2636
- | Metric | Value |
2637
- |--------|-------|
2638
- | Pipeline | \`${PIPELINE_NAME}\` |
2639
- | Branch | \`${GIT_BRANCH}\` |
2640
- | PR | $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'N/A') |
2641
- | Duration | ${total_dur:-unknown} |
2642
-
2643
- _Closed automatically by \`shipwright pipeline\`_" 2>/dev/null || true
2644
-
2645
- gh_remove_label "$ISSUE_NUMBER" "pipeline/pr-created"
2646
- gh_add_labels "$ISSUE_NUMBER" "pipeline/complete"
2647
- success "Issue #$ISSUE_NUMBER closed"
2648
- fi
2649
-
2650
- # Push pipeline report to wiki
2651
- local report="# Pipeline Report — ${GOAL}
2652
-
2653
- | Metric | Value |
2654
- |--------|-------|
2655
- | Pipeline | \`${PIPELINE_NAME}\` |
2656
- | Branch | \`${GIT_BRANCH}\` |
2657
- | PR | $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'N/A') |
2658
- | Duration | ${total_dur:-unknown} |
2659
- | Stages | $(echo "$STAGE_TIMINGS" | tr '|' '\n' | wc -l | xargs) completed |
2660
-
2661
- ## Stage Timings
2662
- $(echo "$STAGE_TIMINGS" | tr '|' '\n' | sed 's/^/- /')
2663
-
2664
- ## Artifacts
2665
- $(ls -1 "$ARTIFACTS_DIR" 2>/dev/null | sed 's/^/- /')
2666
-
2667
- ---
2668
- _Generated by \`shipwright pipeline\` at $(now_iso)_"
2669
- gh_wiki_page "Pipeline-Report-${ISSUE_NUMBER:-inline}" "$report"
2670
-
2671
- log_stage "validate" "Validation complete"
2672
- }
2673
-
2674
- stage_monitor() {
2675
- CURRENT_STAGE_ID="monitor"
2676
-
2677
- # Read config from pipeline template
2678
- local duration_minutes health_url error_threshold log_pattern log_cmd rollback_cmd auto_rollback
2679
- duration_minutes=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.duration_minutes) // 5' "$PIPELINE_CONFIG" 2>/dev/null) || true
2680
- [[ -z "$duration_minutes" || "$duration_minutes" == "null" ]] && duration_minutes=5
2681
- health_url=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2682
- [[ "$health_url" == "null" ]] && health_url=""
2683
- error_threshold=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.error_threshold) // 5' "$PIPELINE_CONFIG" 2>/dev/null) || true
2684
- [[ -z "$error_threshold" || "$error_threshold" == "null" ]] && error_threshold=5
2685
-
2686
- # Adaptive monitor: use historical baselines if available
2687
- local repo_hash
2688
- repo_hash=$(echo "${PROJECT_ROOT:-$(pwd)}" | cksum | awk '{print $1}')
2689
- local baseline_file="${HOME}/.shipwright/baselines/${repo_hash}/deploy-monitor.json"
2690
- if [[ -f "$baseline_file" ]]; then
2691
- local hist_duration hist_threshold
2692
- hist_duration=$(jq -r '.p90_stabilization_minutes // empty' "$baseline_file" 2>/dev/null || true)
2693
- hist_threshold=$(jq -r '.p90_error_threshold // empty' "$baseline_file" 2>/dev/null || true)
2694
- if [[ -n "$hist_duration" && "$hist_duration" != "null" ]]; then
2695
- duration_minutes="$hist_duration"
2696
- info "Monitor duration: ${duration_minutes}m ${DIM}(from baseline)${RESET}"
2697
- fi
2698
- if [[ -n "$hist_threshold" && "$hist_threshold" != "null" ]]; then
2699
- error_threshold="$hist_threshold"
2700
- info "Error threshold: ${error_threshold} ${DIM}(from baseline)${RESET}"
2701
- fi
2702
- fi
2703
- log_pattern=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.log_pattern) // "ERROR|FATAL|PANIC"' "$PIPELINE_CONFIG" 2>/dev/null) || true
2704
- [[ -z "$log_pattern" || "$log_pattern" == "null" ]] && log_pattern="ERROR|FATAL|PANIC"
2705
- log_cmd=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.log_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2706
- [[ "$log_cmd" == "null" ]] && log_cmd=""
2707
- rollback_cmd=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.rollback_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2708
- [[ "$rollback_cmd" == "null" ]] && rollback_cmd=""
2709
- auto_rollback=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.auto_rollback) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2710
- [[ -z "$auto_rollback" || "$auto_rollback" == "null" ]] && auto_rollback="false"
2711
-
2712
- if [[ -z "$health_url" && -z "$log_cmd" ]]; then
2713
- warn "No health_url or log_cmd configured — skipping monitor stage"
2714
- log_stage "monitor" "Skipped (no monitoring configured)"
2715
- return 0
2716
- fi
2717
-
2718
- local report_file="$ARTIFACTS_DIR/monitor-report.md"
2719
- local deploy_log_file="$ARTIFACTS_DIR/deploy-logs.txt"
2720
- : > "$deploy_log_file"
2721
- local total_errors=0
2722
- local poll_interval=30 # seconds between polls
2723
- local total_polls=$(( (duration_minutes * 60) / poll_interval ))
2724
- [[ "$total_polls" -lt 1 ]] && total_polls=1
2725
-
2726
- info "Post-deploy monitoring: ${duration_minutes}m (${total_polls} polls, threshold: ${error_threshold} errors)"
2727
-
2728
- emit_event "monitor.started" \
2729
- "issue=${ISSUE_NUMBER:-0}" \
2730
- "duration_minutes=$duration_minutes" \
2731
- "error_threshold=$error_threshold"
2732
-
2733
- {
2734
- echo "# Post-Deploy Monitor Report"
2735
- echo ""
2736
- echo "- Duration: ${duration_minutes} minutes"
2737
- echo "- Health URL: ${health_url:-none}"
2738
- echo "- Log command: ${log_cmd:-none}"
2739
- echo "- Error threshold: ${error_threshold}"
2740
- echo "- Auto-rollback: ${auto_rollback}"
2741
- echo ""
2742
- echo "## Poll Results"
2743
- echo ""
2744
- } > "$report_file"
2745
-
2746
- local poll=0
2747
- local health_failures=0
2748
- local log_errors=0
2749
- while [[ "$poll" -lt "$total_polls" ]]; do
2750
- poll=$((poll + 1))
2751
- local poll_time
2752
- poll_time=$(now_iso)
2753
-
2754
- # Health URL check
2755
- if [[ -n "$health_url" ]]; then
2756
- local http_status
2757
- http_status=$(curl -sf -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "000")
2758
- if [[ "$http_status" -ge 200 && "$http_status" -lt 400 ]]; then
2759
- echo "- [${poll_time}] Health: ✅ (HTTP ${http_status})" >> "$report_file"
2760
- else
2761
- health_failures=$((health_failures + 1))
2762
- total_errors=$((total_errors + 1))
2763
- echo "- [${poll_time}] Health: ❌ (HTTP ${http_status})" >> "$report_file"
2764
- warn "Health check failed: HTTP ${http_status}"
2765
- fi
2766
- fi
2767
-
2768
- # Log command check (accumulate deploy logs for feedback collect)
2769
- if [[ -n "$log_cmd" ]]; then
2770
- local log_output
2771
- log_output=$(bash -c "$log_cmd" 2>/dev/null || true)
2772
- [[ -n "$log_output" ]] && echo "$log_output" >> "$deploy_log_file"
2773
- local error_count=0
2774
- if [[ -n "$log_output" ]]; then
2775
- error_count=$(echo "$log_output" | grep -cE "$log_pattern" 2>/dev/null || true)
2776
- error_count="${error_count:-0}"
2777
- fi
2778
- if [[ "$error_count" -gt 0 ]]; then
2779
- log_errors=$((log_errors + error_count))
2780
- total_errors=$((total_errors + error_count))
2781
- echo "- [${poll_time}] Logs: ⚠️ ${error_count} error(s) matching '${log_pattern}'" >> "$report_file"
2782
- warn "Log errors detected: ${error_count}"
2783
- else
2784
- echo "- [${poll_time}] Logs: ✅ clean" >> "$report_file"
2785
- fi
2786
- fi
2787
-
2788
- emit_event "monitor.check" \
2789
- "issue=${ISSUE_NUMBER:-0}" \
2790
- "poll=$poll" \
2791
- "total_errors=$total_errors" \
2792
- "health_failures=$health_failures"
2793
-
2794
- # Check threshold
2795
- if [[ "$total_errors" -ge "$error_threshold" ]]; then
2796
- error "Error threshold exceeded: ${total_errors} >= ${error_threshold}"
2797
-
2798
- echo "" >> "$report_file"
2799
- echo "## ❌ THRESHOLD EXCEEDED" >> "$report_file"
2800
- echo "Total errors: ${total_errors} (threshold: ${error_threshold})" >> "$report_file"
2801
-
2802
- emit_event "monitor.alert" \
2803
- "issue=${ISSUE_NUMBER:-0}" \
2804
- "total_errors=$total_errors" \
2805
- "threshold=$error_threshold"
2806
-
2807
- # Feedback loop: collect deploy logs and optionally create issue
2808
- if [[ -f "$deploy_log_file" ]] && [[ -s "$deploy_log_file" ]] && [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
2809
- (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" collect "$deploy_log_file" 2>/dev/null) || true
2810
- (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" create-issue 2>/dev/null) || true
2811
- fi
2812
-
2813
- # Auto-rollback: feedback rollback (GitHub Deployments API) and/or config rollback_cmd
2814
- if [[ "$auto_rollback" == "true" ]]; then
2815
- warn "Auto-rolling back..."
2816
- echo "" >> "$report_file"
2817
- echo "## Rollback" >> "$report_file"
2818
-
2819
- # Trigger feedback rollback (calls sw-github-deploy.sh rollback)
2820
- if [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
2821
- (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" rollback production "Monitor threshold exceeded (${total_errors} errors)" >> "$report_file" 2>&1) || true
2822
- fi
2823
-
2824
- if [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" >> "$report_file" 2>&1; then
2825
- success "Rollback executed"
2826
- echo "Rollback: ✅ success" >> "$report_file"
2827
-
2828
- # Post-rollback smoke test verification
2829
- local smoke_cmd
2830
- smoke_cmd=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.smoke_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2831
- [[ "$smoke_cmd" == "null" ]] && smoke_cmd=""
2832
-
2833
- if [[ -n "$smoke_cmd" ]]; then
2834
- info "Verifying rollback with smoke tests..."
2835
- if bash -c "$smoke_cmd" > "$ARTIFACTS_DIR/rollback-smoke.log" 2>&1; then
2836
- success "Rollback verified — smoke tests pass"
2837
- echo "Rollback verification: ✅ smoke tests pass" >> "$report_file"
2838
- emit_event "monitor.rollback_verified" \
2839
- "issue=${ISSUE_NUMBER:-0}" \
2840
- "status=pass"
2841
- else
2842
- error "Rollback verification FAILED — smoke tests still failing"
2843
- echo "Rollback verification: ❌ smoke tests FAILED — manual intervention required" >> "$report_file"
2844
- emit_event "monitor.rollback_verified" \
2845
- "issue=${ISSUE_NUMBER:-0}" \
2846
- "status=fail"
2847
- if [[ -n "$ISSUE_NUMBER" ]]; then
2848
- gh_comment_issue "$ISSUE_NUMBER" "🚨 **Rollback executed but verification failed** — smoke tests still failing after rollback. Manual intervention required.
2849
-
2850
- Smoke command: \`${smoke_cmd}\`
2851
- Log: see \`pipeline-artifacts/rollback-smoke.log\`" 2>/dev/null || true
2852
- fi
2853
- fi
2854
- fi
2855
- else
2856
- error "Rollback failed!"
2857
- echo "Rollback: ❌ failed" >> "$report_file"
2858
- fi
2859
-
2860
- emit_event "monitor.rollback" \
2861
- "issue=${ISSUE_NUMBER:-0}" \
2862
- "total_errors=$total_errors"
2863
-
2864
- # Post to GitHub
2865
- if [[ -n "$ISSUE_NUMBER" ]]; then
2866
- gh_comment_issue "$ISSUE_NUMBER" "🚨 **Auto-rollback triggered** — ${total_errors} errors exceeded threshold (${error_threshold})
2867
-
2868
- Rollback command: \`${rollback_cmd}\`" 2>/dev/null || true
2869
-
2870
- # Create hotfix issue
2871
- if [[ "$GH_AVAILABLE" == "true" ]]; then
2872
- gh issue create \
2873
- --title "Hotfix: Deploy regression for ${GOAL}" \
2874
- --label "hotfix,incident" \
2875
- --body "Auto-rollback triggered during post-deploy monitoring.
2876
-
2877
- **Original issue:** ${GITHUB_ISSUE:-N/A}
2878
- **Errors detected:** ${total_errors}
2879
- **Threshold:** ${error_threshold}
2880
- **Branch:** ${GIT_BRANCH}
2881
-
2882
- ## Monitor Report
2883
- $(cat "$report_file")
2884
-
2885
- ---
2886
- _Created automatically by \`shipwright pipeline\` monitor stage_" 2>/dev/null || true
2887
- fi
2888
- fi
2889
- fi
2890
-
2891
- log_stage "monitor" "Failed — ${total_errors} errors (threshold: ${error_threshold})"
2892
- return 1
2893
- fi
2894
-
2895
- # Sleep between polls (skip on last poll)
2896
- if [[ "$poll" -lt "$total_polls" ]]; then
2897
- sleep "$poll_interval"
2898
- fi
2899
- done
2900
-
2901
- # Monitoring complete — all clear
2902
- echo "" >> "$report_file"
2903
- echo "## ✅ Monitoring Complete" >> "$report_file"
2904
- echo "Total errors: ${total_errors} (threshold: ${error_threshold})" >> "$report_file"
2905
- echo "Health failures: ${health_failures}" >> "$report_file"
2906
- echo "Log errors: ${log_errors}" >> "$report_file"
2907
-
2908
- success "Post-deploy monitoring clean (${total_errors} errors in ${duration_minutes}m)"
2909
-
2910
- # Proactive feedback collection: always collect deploy logs for trend analysis
2911
- if [[ -f "$deploy_log_file" ]] && [[ -s "$deploy_log_file" ]] && [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
2912
- (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" collect "$deploy_log_file" 2>/dev/null) || true
2913
- fi
2914
-
2915
- if [[ -n "$ISSUE_NUMBER" ]]; then
2916
- gh_comment_issue "$ISSUE_NUMBER" "✅ **Post-deploy monitoring passed** — ${duration_minutes}m, ${total_errors} errors" 2>/dev/null || true
2917
- fi
2918
-
2919
- log_stage "monitor" "Clean — ${total_errors} errors in ${duration_minutes}m"
2920
-
2921
- # Record baseline for adaptive monitoring on future runs
2922
- local baseline_dir="${HOME}/.shipwright/baselines/${repo_hash}"
2923
- mkdir -p "$baseline_dir" 2>/dev/null || true
2924
- local baseline_tmp
2925
- baseline_tmp="$(mktemp)"
2926
- if [[ -f "${baseline_dir}/deploy-monitor.json" ]]; then
2927
- # Append to history and recalculate p90
2928
- jq --arg dur "$duration_minutes" --arg errs "$total_errors" \
2929
- '.history += [{"duration_minutes": ($dur | tonumber), "errors": ($errs | tonumber)}] |
2930
- .p90_stabilization_minutes = ([.history[].duration_minutes] | sort | .[length * 9 / 10 | floor]) |
2931
- .p90_error_threshold = (([.history[].errors] | sort | .[length * 9 / 10 | floor]) + 2) |
2932
- .updated_at = now' \
2933
- "${baseline_dir}/deploy-monitor.json" > "$baseline_tmp" 2>/dev/null && \
2934
- mv "$baseline_tmp" "${baseline_dir}/deploy-monitor.json" || rm -f "$baseline_tmp"
2935
- else
2936
- jq -n --arg dur "$duration_minutes" --arg errs "$total_errors" \
2937
- '{history: [{"duration_minutes": ($dur | tonumber), "errors": ($errs | tonumber)}],
2938
- p90_stabilization_minutes: ($dur | tonumber),
2939
- p90_error_threshold: (($errs | tonumber) + 2),
2940
- updated_at: now}' \
2941
- > "$baseline_tmp" 2>/dev/null && \
2942
- mv "$baseline_tmp" "${baseline_dir}/deploy-monitor.json" || rm -f "$baseline_tmp"
2943
- fi
2944
- }
201
+ _PIPELINE_STAGES_REVIEW_SH="${SCRIPT_DIR}/lib/pipeline-stages-review.sh"
202
+ [[ -f "$_PIPELINE_STAGES_REVIEW_SH" ]] && source "$_PIPELINE_STAGES_REVIEW_SH"
2945
203
 
2946
- # ─── Multi-Dimensional Quality Checks ─────────────────────────────────────
2947
- # Beyond tests: security, bundle size, perf regression, API compat, coverage
204
+ _PIPELINE_STAGES_DELIVERY_SH="${SCRIPT_DIR}/lib/pipeline-stages-delivery.sh"
205
+ [[ -f "$_PIPELINE_STAGES_DELIVERY_SH" ]] && source "$_PIPELINE_STAGES_DELIVERY_SH"
2948
206
 
207
+ _PIPELINE_STAGES_MONITOR_SH="${SCRIPT_DIR}/lib/pipeline-stages-monitor.sh"
208
+ [[ -f "$_PIPELINE_STAGES_MONITOR_SH" ]] && source "$_PIPELINE_STAGES_MONITOR_SH"