shipwright-cli 2.2.0 → 2.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. package/README.md +15 -16
  2. package/config/policy.schema.json +104 -29
  3. package/docs/AGI-PLATFORM-PLAN.md +11 -7
  4. package/docs/AGI-WHATS-NEXT.md +26 -20
  5. package/docs/README.md +2 -0
  6. package/package.json +1 -1
  7. package/scripts/check-version-consistency.sh +72 -0
  8. package/scripts/lib/daemon-adaptive.sh +610 -0
  9. package/scripts/lib/daemon-dispatch.sh +489 -0
  10. package/scripts/lib/daemon-failure.sh +387 -0
  11. package/scripts/lib/daemon-patrol.sh +1113 -0
  12. package/scripts/lib/daemon-poll.sh +1202 -0
  13. package/scripts/lib/daemon-state.sh +550 -0
  14. package/scripts/lib/daemon-triage.sh +490 -0
  15. package/scripts/lib/helpers.sh +81 -1
  16. package/scripts/lib/pipeline-detection.sh +278 -0
  17. package/scripts/lib/pipeline-github.sh +196 -0
  18. package/scripts/lib/pipeline-intelligence.sh +1706 -0
  19. package/scripts/lib/pipeline-quality-checks.sh +1054 -0
  20. package/scripts/lib/pipeline-quality.sh +11 -0
  21. package/scripts/lib/pipeline-stages.sh +2508 -0
  22. package/scripts/lib/pipeline-state.sh +529 -0
  23. package/scripts/sw +26 -4
  24. package/scripts/sw-activity.sh +1 -1
  25. package/scripts/sw-adaptive.sh +2 -2
  26. package/scripts/sw-adversarial.sh +1 -1
  27. package/scripts/sw-architecture-enforcer.sh +1 -1
  28. package/scripts/sw-auth.sh +1 -1
  29. package/scripts/sw-autonomous.sh +1 -1
  30. package/scripts/sw-changelog.sh +1 -1
  31. package/scripts/sw-checkpoint.sh +1 -1
  32. package/scripts/sw-ci.sh +1 -1
  33. package/scripts/sw-cleanup.sh +1 -1
  34. package/scripts/sw-code-review.sh +1 -1
  35. package/scripts/sw-connect.sh +1 -1
  36. package/scripts/sw-context.sh +1 -1
  37. package/scripts/sw-cost.sh +1 -1
  38. package/scripts/sw-daemon.sh +52 -4816
  39. package/scripts/sw-dashboard.sh +1 -1
  40. package/scripts/sw-db.sh +1 -1
  41. package/scripts/sw-decompose.sh +1 -1
  42. package/scripts/sw-deps.sh +1 -1
  43. package/scripts/sw-developer-simulation.sh +1 -1
  44. package/scripts/sw-discovery.sh +1 -1
  45. package/scripts/sw-doc-fleet.sh +1 -1
  46. package/scripts/sw-docs-agent.sh +1 -1
  47. package/scripts/sw-docs.sh +1 -1
  48. package/scripts/sw-doctor.sh +42 -1
  49. package/scripts/sw-dora.sh +1 -1
  50. package/scripts/sw-durable.sh +1 -1
  51. package/scripts/sw-e2e-orchestrator.sh +1 -1
  52. package/scripts/sw-eventbus.sh +1 -1
  53. package/scripts/sw-feedback.sh +1 -1
  54. package/scripts/sw-fix.sh +1 -1
  55. package/scripts/sw-fleet-discover.sh +1 -1
  56. package/scripts/sw-fleet-viz.sh +3 -3
  57. package/scripts/sw-fleet.sh +1 -1
  58. package/scripts/sw-github-app.sh +1 -1
  59. package/scripts/sw-github-checks.sh +1 -1
  60. package/scripts/sw-github-deploy.sh +1 -1
  61. package/scripts/sw-github-graphql.sh +1 -1
  62. package/scripts/sw-guild.sh +1 -1
  63. package/scripts/sw-heartbeat.sh +1 -1
  64. package/scripts/sw-hygiene.sh +1 -1
  65. package/scripts/sw-incident.sh +1 -1
  66. package/scripts/sw-init.sh +1 -1
  67. package/scripts/sw-instrument.sh +1 -1
  68. package/scripts/sw-intelligence.sh +1 -1
  69. package/scripts/sw-jira.sh +1 -1
  70. package/scripts/sw-launchd.sh +1 -1
  71. package/scripts/sw-linear.sh +1 -1
  72. package/scripts/sw-logs.sh +1 -1
  73. package/scripts/sw-loop.sh +1 -1
  74. package/scripts/sw-memory.sh +1 -1
  75. package/scripts/sw-mission-control.sh +1 -1
  76. package/scripts/sw-model-router.sh +1 -1
  77. package/scripts/sw-otel.sh +4 -4
  78. package/scripts/sw-oversight.sh +1 -1
  79. package/scripts/sw-pipeline-composer.sh +1 -1
  80. package/scripts/sw-pipeline-vitals.sh +1 -1
  81. package/scripts/sw-pipeline.sh +23 -56
  82. package/scripts/sw-pipeline.sh.mock +7 -0
  83. package/scripts/sw-pm.sh +1 -1
  84. package/scripts/sw-pr-lifecycle.sh +1 -1
  85. package/scripts/sw-predictive.sh +1 -1
  86. package/scripts/sw-prep.sh +1 -1
  87. package/scripts/sw-ps.sh +1 -1
  88. package/scripts/sw-public-dashboard.sh +1 -1
  89. package/scripts/sw-quality.sh +1 -1
  90. package/scripts/sw-reaper.sh +1 -1
  91. package/scripts/sw-recruit.sh +9 -1
  92. package/scripts/sw-regression.sh +1 -1
  93. package/scripts/sw-release-manager.sh +1 -1
  94. package/scripts/sw-release.sh +1 -1
  95. package/scripts/sw-remote.sh +1 -1
  96. package/scripts/sw-replay.sh +1 -1
  97. package/scripts/sw-retro.sh +1 -1
  98. package/scripts/sw-scale.sh +8 -5
  99. package/scripts/sw-security-audit.sh +1 -1
  100. package/scripts/sw-self-optimize.sh +158 -7
  101. package/scripts/sw-session.sh +1 -1
  102. package/scripts/sw-setup.sh +1 -1
  103. package/scripts/sw-standup.sh +3 -3
  104. package/scripts/sw-status.sh +1 -1
  105. package/scripts/sw-strategic.sh +1 -1
  106. package/scripts/sw-stream.sh +8 -2
  107. package/scripts/sw-swarm.sh +7 -10
  108. package/scripts/sw-team-stages.sh +1 -1
  109. package/scripts/sw-templates.sh +1 -1
  110. package/scripts/sw-testgen.sh +1 -1
  111. package/scripts/sw-tmux-pipeline.sh +1 -1
  112. package/scripts/sw-tmux.sh +1 -1
  113. package/scripts/sw-trace.sh +1 -1
  114. package/scripts/sw-tracker.sh +24 -6
  115. package/scripts/sw-triage.sh +1 -1
  116. package/scripts/sw-upgrade.sh +1 -1
  117. package/scripts/sw-ux.sh +1 -1
  118. package/scripts/sw-webhook.sh +1 -1
  119. package/scripts/sw-widgets.sh +1 -1
  120. package/scripts/sw-worktree.sh +1 -1
@@ -0,0 +1,2508 @@
1
+ # pipeline-stages.sh — Stage implementations (intake, plan, build, test, review, pr, merge, deploy, validate, monitor) for sw-pipeline.sh
2
+ # Source from sw-pipeline.sh. Requires all pipeline globals and state/github/detection/quality modules.
3
+ [[ -n "${_PIPELINE_STAGES_LOADED:-}" ]] && return 0
4
+ _PIPELINE_STAGES_LOADED=1
5
+
6
+ show_stage_preview() {
7
+ local stage_id="$1"
8
+ echo ""
9
+ echo -e "${PURPLE}${BOLD}━━━ Stage: ${stage_id} ━━━${RESET}"
10
+ case "$stage_id" in
11
+ intake) echo -e " Fetch issue, detect task type, create branch, self-assign" ;;
12
+ plan) echo -e " Generate plan via Claude, post task checklist to issue" ;;
13
+ design) echo -e " Generate Architecture Decision Record (ADR), evaluate alternatives" ;;
14
+ build) echo -e " Delegate to ${CYAN}shipwright loop${RESET} for autonomous building" ;;
15
+ test) echo -e " Run test suite and check coverage" ;;
16
+ review) echo -e " AI code review on the diff, post findings" ;;
17
+ pr) echo -e " Create GitHub PR with labels, reviewers, milestone" ;;
18
+ merge) echo -e " Wait for CI checks, merge PR, optionally delete branch" ;;
19
+ deploy) echo -e " Deploy to staging/production with rollback" ;;
20
+ validate) echo -e " Smoke tests, health checks, close issue" ;;
21
+ monitor) echo -e " Post-deploy monitoring, health checks, auto-rollback" ;;
22
+ esac
23
+ echo ""
24
+ }
25
+
26
+ stage_intake() {
27
+ CURRENT_STAGE_ID="intake"
28
+ local project_lang
29
+ project_lang=$(detect_project_lang)
30
+ info "Project: ${BOLD}$project_lang${RESET}"
31
+
32
+ # 1. Fetch issue metadata if --issue provided
33
+ if [[ -n "$ISSUE_NUMBER" ]]; then
34
+ local meta
35
+ meta=$(gh_get_issue_meta "$ISSUE_NUMBER")
36
+
37
+ if [[ -n "$meta" ]]; then
38
+ GOAL=$(echo "$meta" | jq -r '.title // ""')
39
+ ISSUE_BODY=$(echo "$meta" | jq -r '.body // ""')
40
+ ISSUE_LABELS=$(echo "$meta" | jq -r '[.labels[].name] | join(",")' 2>/dev/null || true)
41
+ ISSUE_MILESTONE=$(echo "$meta" | jq -r '.milestone.title // ""' 2>/dev/null || true)
42
+ ISSUE_ASSIGNEES=$(echo "$meta" | jq -r '[.assignees[].login] | join(",")' 2>/dev/null || true)
43
+ [[ "$ISSUE_MILESTONE" == "null" ]] && ISSUE_MILESTONE=""
44
+ [[ "$ISSUE_LABELS" == "null" ]] && ISSUE_LABELS=""
45
+ else
46
+ # Fallback: just get title
47
+ GOAL=$(gh issue view "$ISSUE_NUMBER" --json title -q .title 2>/dev/null) || {
48
+ error "Failed to fetch issue #$ISSUE_NUMBER"
49
+ return 1
50
+ }
51
+ fi
52
+
53
+ GITHUB_ISSUE="#$ISSUE_NUMBER"
54
+ info "Issue #$ISSUE_NUMBER: ${BOLD}$GOAL${RESET}"
55
+
56
+ if [[ -n "$ISSUE_LABELS" ]]; then
57
+ info "Labels: ${DIM}$ISSUE_LABELS${RESET}"
58
+ fi
59
+ if [[ -n "$ISSUE_MILESTONE" ]]; then
60
+ info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
61
+ fi
62
+
63
+ # Self-assign
64
+ gh_assign_self "$ISSUE_NUMBER"
65
+
66
+ # Add in-progress label
67
+ gh_add_labels "$ISSUE_NUMBER" "pipeline/in-progress"
68
+ fi
69
+
70
+ # 2. Detect task type
71
+ TASK_TYPE=$(detect_task_type "$GOAL")
72
+ local suggested_template
73
+ suggested_template=$(template_for_type "$TASK_TYPE")
74
+ info "Detected: ${BOLD}$TASK_TYPE${RESET} → team template: ${CYAN}$suggested_template${RESET}"
75
+
76
+ # 3. Auto-detect test command if not provided
77
+ if [[ -z "$TEST_CMD" ]]; then
78
+ TEST_CMD=$(detect_test_cmd)
79
+ if [[ -n "$TEST_CMD" ]]; then
80
+ info "Auto-detected test: ${DIM}$TEST_CMD${RESET}"
81
+ fi
82
+ fi
83
+
84
+ # 4. Create branch with smart prefix
85
+ local prefix
86
+ prefix=$(branch_prefix_for_type "$TASK_TYPE")
87
+ local slug
88
+ slug=$(echo "$GOAL" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/--*/-/g' | cut -c1-40)
89
+ slug="${slug%-}"
90
+ [[ -n "$ISSUE_NUMBER" ]] && slug="${slug}-${ISSUE_NUMBER}"
91
+ GIT_BRANCH="${prefix}/${slug}"
92
+
93
+ git checkout -b "$GIT_BRANCH" 2>/dev/null || {
94
+ info "Branch $GIT_BRANCH exists, checking out"
95
+ git checkout "$GIT_BRANCH" 2>/dev/null || true
96
+ }
97
+ success "Branch: ${BOLD}$GIT_BRANCH${RESET}"
98
+
99
+ # 5. Post initial progress comment on GitHub issue
100
+ if [[ -n "$ISSUE_NUMBER" ]]; then
101
+ local body
102
+ body=$(gh_build_progress_body)
103
+ gh_post_progress "$ISSUE_NUMBER" "$body"
104
+ fi
105
+
106
+ # 6. Save artifacts
107
+ save_artifact "intake.json" "$(jq -n \
108
+ --arg goal "$GOAL" --arg type "$TASK_TYPE" \
109
+ --arg template "$suggested_template" --arg branch "$GIT_BRANCH" \
110
+ --arg issue "${GITHUB_ISSUE:-}" --arg lang "$project_lang" \
111
+ --arg test_cmd "${TEST_CMD:-}" --arg labels "${ISSUE_LABELS:-}" \
112
+ --arg milestone "${ISSUE_MILESTONE:-}" --arg body "${ISSUE_BODY:-}" \
113
+ '{goal:$goal, type:$type, template:$template, branch:$branch,
114
+ issue:$issue, language:$lang, test_cmd:$test_cmd,
115
+ labels:$labels, milestone:$milestone, body:$body}')"
116
+
117
+ log_stage "intake" "Goal: $GOAL
118
+ Type: $TASK_TYPE → template: $suggested_template
119
+ Branch: $GIT_BRANCH
120
+ Language: $project_lang
121
+ Test cmd: ${TEST_CMD:-none detected}"
122
+ }
123
+
124
+ stage_plan() {
125
+ CURRENT_STAGE_ID="plan"
126
+ local plan_file="$ARTIFACTS_DIR/plan.md"
127
+
128
+ if ! command -v claude &>/dev/null; then
129
+ error "Claude CLI not found — cannot generate plan"
130
+ return 1
131
+ fi
132
+
133
+ info "Generating implementation plan..."
134
+
135
+ # ── Gather context bundle (if context engine available) ──
136
+ local context_script="${SCRIPT_DIR}/sw-context.sh"
137
+ if [[ -x "$context_script" ]]; then
138
+ "$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
139
+ fi
140
+
141
+ # Build rich prompt with all available context
142
+ local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
143
+
144
+ ## Goal
145
+ ${GOAL}
146
+ "
147
+
148
+ # Add issue context
149
+ if [[ -n "$ISSUE_BODY" ]]; then
150
+ plan_prompt="${plan_prompt}
151
+ ## Issue Description
152
+ ${ISSUE_BODY}
153
+ "
154
+ fi
155
+
156
+ # Inject context bundle from context engine (if available)
157
+ local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
158
+ if [[ -f "$_context_bundle" ]]; then
159
+ local _cb_content
160
+ _cb_content=$(cat "$_context_bundle" 2>/dev/null | head -100 || true)
161
+ if [[ -n "$_cb_content" ]]; then
162
+ plan_prompt="${plan_prompt}
163
+ ## Pipeline Context
164
+ ${_cb_content}
165
+ "
166
+ fi
167
+ fi
168
+
169
+ # Inject intelligence memory context for similar past plans
170
+ if type intelligence_search_memory &>/dev/null 2>&1; then
171
+ local plan_memory
172
+ plan_memory=$(intelligence_search_memory "plan stage for ${TASK_TYPE:-feature}: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
173
+ if [[ -n "$plan_memory" && "$plan_memory" != *'"results":[]'* && "$plan_memory" != *'"error"'* ]]; then
174
+ local memory_summary
175
+ memory_summary=$(echo "$plan_memory" | jq -r '.results[]? | "- \(.)"' 2>/dev/null | head -10 || true)
176
+ if [[ -n "$memory_summary" ]]; then
177
+ plan_prompt="${plan_prompt}
178
+ ## Historical Context (from previous pipelines)
179
+ Previous similar issues were planned as:
180
+ ${memory_summary}
181
+ "
182
+ fi
183
+ fi
184
+ fi
185
+
186
+ # Self-aware pipeline: inject hint when plan stage has been failing recently
187
+ local plan_hint
188
+ plan_hint=$(get_stage_self_awareness_hint "plan" 2>/dev/null || true)
189
+ if [[ -n "$plan_hint" ]]; then
190
+ plan_prompt="${plan_prompt}
191
+ ## Self-Assessment (recent plan stage performance)
192
+ ${plan_hint}
193
+ "
194
+ fi
195
+
196
+ # Inject cross-pipeline discoveries (from other concurrent/similar pipelines)
197
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
198
+ local plan_discoveries
199
+ plan_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.json" 2>/dev/null | head -20 || true)
200
+ if [[ -n "$plan_discoveries" ]]; then
201
+ plan_prompt="${plan_prompt}
202
+ ## Discoveries from Other Pipelines
203
+ ${plan_discoveries}
204
+ "
205
+ fi
206
+ fi
207
+
208
+ # Inject architecture patterns from intelligence layer
209
+ local repo_hash_plan
210
+ repo_hash_plan=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
211
+ local arch_file_plan="${HOME}/.shipwright/memory/${repo_hash_plan}/architecture.json"
212
+ if [[ -f "$arch_file_plan" ]]; then
213
+ local arch_patterns
214
+ arch_patterns=$(jq -r '
215
+ "Language: \(.language // "unknown")",
216
+ "Framework: \(.framework // "unknown")",
217
+ "Patterns: \((.patterns // []) | join(", "))",
218
+ "Rules: \((.rules // []) | join("; "))"
219
+ ' "$arch_file_plan" 2>/dev/null || true)
220
+ if [[ -n "$arch_patterns" ]]; then
221
+ plan_prompt="${plan_prompt}
222
+ ## Architecture Patterns
223
+ ${arch_patterns}
224
+ "
225
+ fi
226
+ fi
227
+
228
+ # Task-type-specific guidance
229
+ case "${TASK_TYPE:-feature}" in
230
+ bug)
231
+ plan_prompt="${plan_prompt}
232
+ ## Task Type: Bug Fix
233
+ Focus on: reproducing the bug, identifying root cause, minimal targeted fix, regression tests.
234
+ " ;;
235
+ refactor)
236
+ plan_prompt="${plan_prompt}
237
+ ## Task Type: Refactor
238
+ Focus on: preserving all existing behavior, incremental changes, comprehensive test coverage.
239
+ " ;;
240
+ security)
241
+ plan_prompt="${plan_prompt}
242
+ ## Task Type: Security
243
+ Focus on: threat modeling, OWASP top 10, input validation, authentication/authorization.
244
+ " ;;
245
+ esac
246
+
247
+ # Add project context
248
+ local project_lang
249
+ project_lang=$(detect_project_lang)
250
+ plan_prompt="${plan_prompt}
251
+ ## Project Context
252
+ - Language: ${project_lang}
253
+ - Test command: ${TEST_CMD:-not configured}
254
+ - Task type: ${TASK_TYPE:-feature}
255
+
256
+ ## Required Output
257
+ Create a Markdown plan with these sections:
258
+
259
+ ### Files to Modify
260
+ List every file to create or modify with full paths.
261
+
262
+ ### Implementation Steps
263
+ Numbered steps in order of execution. Be specific about what code to write.
264
+
265
+ ### Task Checklist
266
+ A checkbox list of discrete tasks that can be tracked:
267
+ - [ ] Task 1: Description
268
+ - [ ] Task 2: Description
269
+ (Include 5-15 tasks covering the full implementation)
270
+
271
+ ### Testing Approach
272
+ How to verify the implementation works.
273
+
274
+ ### Definition of Done
275
+ Checklist of completion criteria.
276
+ "
277
+
278
+ local plan_model
279
+ plan_model=$(jq -r --arg id "plan" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
280
+ [[ -n "$MODEL" ]] && plan_model="$MODEL"
281
+ [[ -z "$plan_model" || "$plan_model" == "null" ]] && plan_model="opus"
282
+ # Intelligence model routing (when no explicit CLI --model override)
283
+ if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
284
+ plan_model="$CLAUDE_MODEL"
285
+ fi
286
+
287
+ local _token_log="${ARTIFACTS_DIR}/.claude-tokens-plan.log"
288
+ claude --print --model "$plan_model" --max-turns 25 \
289
+ "$plan_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
290
+ parse_claude_tokens "$_token_log"
291
+
292
+ if [[ ! -s "$plan_file" ]]; then
293
+ error "Plan generation failed — empty output"
294
+ return 1
295
+ fi
296
+
297
+ # Validate plan content — detect API/CLI errors masquerading as plans
298
+ local _plan_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
299
+ _plan_fatal="${_plan_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
300
+ if grep -qiE "$_plan_fatal" "$plan_file" 2>/dev/null; then
301
+ error "Plan stage produced API/CLI error instead of a plan: $(head -1 "$plan_file" | cut -c1-100)"
302
+ return 1
303
+ fi
304
+
305
+ local line_count
306
+ line_count=$(wc -l < "$plan_file" | xargs)
307
+ if [[ "$line_count" -lt 3 ]]; then
308
+ error "Plan too short (${line_count} lines) — likely an error, not a real plan"
309
+ return 1
310
+ fi
311
+ info "Plan saved: ${DIM}$plan_file${RESET} (${line_count} lines)"
312
+
313
+ # Extract task checklist for GitHub issue and task tracking
314
+ local checklist
315
+ checklist=$(sed -n '/### Task Checklist/,/^###/p' "$plan_file" 2>/dev/null | \
316
+ grep '^\s*- \[' | head -20)
317
+
318
+ if [[ -z "$checklist" ]]; then
319
+ # Fallback: extract any checkbox lines
320
+ checklist=$(grep '^\s*- \[' "$plan_file" 2>/dev/null | head -20)
321
+ fi
322
+
323
+ # Write local task file for Claude Code build stage
324
+ if [[ -n "$checklist" ]]; then
325
+ cat > "$TASKS_FILE" <<TASKS_EOF
326
+ # Pipeline Tasks — ${GOAL}
327
+
328
+ ## Implementation Checklist
329
+ ${checklist}
330
+
331
+ ## Context
332
+ - Pipeline: ${PIPELINE_NAME}
333
+ - Branch: ${GIT_BRANCH}
334
+ - Issue: ${GITHUB_ISSUE:-none}
335
+ - Generated: $(now_iso)
336
+ TASKS_EOF
337
+ info "Task list: ${DIM}$TASKS_FILE${RESET} ($(echo "$checklist" | wc -l | xargs) tasks)"
338
+ fi
339
+
340
+ # Post plan + task checklist to GitHub issue
341
+ if [[ -n "$ISSUE_NUMBER" ]]; then
342
+ local plan_summary
343
+ plan_summary=$(head -50 "$plan_file")
344
+ local gh_body="## 📋 Implementation Plan
345
+
346
+ <details>
347
+ <summary>Click to expand full plan (${line_count} lines)</summary>
348
+
349
+ ${plan_summary}
350
+
351
+ </details>
352
+ "
353
+ if [[ -n "$checklist" ]]; then
354
+ gh_body="${gh_body}
355
+ ## ✅ Task Checklist
356
+ ${checklist}
357
+ "
358
+ fi
359
+
360
+ gh_body="${gh_body}
361
+ ---
362
+ _Generated by \`shipwright pipeline\` at $(now_iso)_"
363
+
364
+ gh_comment_issue "$ISSUE_NUMBER" "$gh_body"
365
+ info "Plan posted to issue #$ISSUE_NUMBER"
366
+ fi
367
+
368
+ # Push plan to wiki
369
+ gh_wiki_page "Pipeline-Plan-${ISSUE_NUMBER:-inline}" "$(<"$plan_file")"
370
+
371
+ # Generate Claude Code task list
372
+ local cc_tasks_file="$PROJECT_ROOT/.claude/tasks.md"
373
+ if [[ -n "$checklist" ]]; then
374
+ cat > "$cc_tasks_file" <<CC_TASKS_EOF
375
+ # Tasks — ${GOAL}
376
+
377
+ ## Status: In Progress
378
+ Pipeline: ${PIPELINE_NAME} | Branch: ${GIT_BRANCH}
379
+
380
+ ## Checklist
381
+ ${checklist}
382
+
383
+ ## Notes
384
+ - Generated from pipeline plan at $(now_iso)
385
+ - Pipeline will update status as tasks complete
386
+ CC_TASKS_EOF
387
+ info "Claude Code tasks: ${DIM}$cc_tasks_file${RESET}"
388
+ fi
389
+
390
+ # Extract definition of done for quality gates
391
+ sed -n '/[Dd]efinition [Oo]f [Dd]one/,/^#/p' "$plan_file" | head -20 > "$ARTIFACTS_DIR/dod.md" 2>/dev/null || true
392
+
393
+ # ── Plan Validation Gate ──
394
+ # Ask Claude to validate the plan before proceeding
395
+ if command -v claude &>/dev/null && [[ -s "$plan_file" ]]; then
396
+ local validation_attempts=0
397
+ local max_validation_attempts=2
398
+ local plan_valid=false
399
+
400
+ while [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; do
401
+ validation_attempts=$((validation_attempts + 1))
402
+ info "Validating plan (attempt ${validation_attempts}/${max_validation_attempts})..."
403
+
404
+ # Build enriched validation prompt with learned context
405
+ local validation_extra=""
406
+
407
+ # Inject rejected plan history from memory
408
+ if type intelligence_search_memory &>/dev/null 2>&1; then
409
+ local rejected_plans
410
+ rejected_plans=$(intelligence_search_memory "rejected plan validation failures for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
411
+ if [[ -n "$rejected_plans" ]]; then
412
+ validation_extra="${validation_extra}
413
+ ## Previously Rejected Plans
414
+ These issues were found in past plan validations for similar tasks:
415
+ ${rejected_plans}
416
+ "
417
+ fi
418
+ fi
419
+
420
+ # Inject repo conventions contextually
421
+ local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
422
+ if [[ -f "$claudemd" ]]; then
423
+ local conventions_summary
424
+ conventions_summary=$(head -100 "$claudemd" 2>/dev/null | grep -E '^##|^-|^\*' | head -15 || true)
425
+ if [[ -n "$conventions_summary" ]]; then
426
+ validation_extra="${validation_extra}
427
+ ## Repo Conventions
428
+ ${conventions_summary}
429
+ "
430
+ fi
431
+ fi
432
+
433
+ # Inject complexity estimate
434
+ local complexity_hint=""
435
+ if [[ -n "${INTELLIGENCE_COMPLEXITY:-}" && "${INTELLIGENCE_COMPLEXITY:-0}" -gt 0 ]]; then
436
+ complexity_hint="This is estimated as complexity ${INTELLIGENCE_COMPLEXITY}/10. Plans for this complexity typically need ${INTELLIGENCE_COMPLEXITY} or more tasks."
437
+ fi
438
+
439
+ local validation_prompt="You are a plan validator. Review this implementation plan and determine if it is valid.
440
+
441
+ ## Goal
442
+ ${GOAL}
443
+ ${complexity_hint:+
444
+ ## Complexity Estimate
445
+ ${complexity_hint}
446
+ }
447
+ ## Plan
448
+ $(cat "$plan_file")
449
+ ${validation_extra}
450
+ Evaluate:
451
+ 1. Are all requirements from the goal addressed?
452
+ 2. Is the plan decomposed into clear, achievable tasks?
453
+ 3. Are the implementation steps specific enough to execute?
454
+
455
+ Respond with EXACTLY one of these on the first line:
456
+ VALID: true
457
+ VALID: false
458
+
459
+ Then explain your reasoning briefly."
460
+
461
+ local validation_model="${plan_model:-opus}"
462
+ local validation_result
463
+ validation_result=$(claude --print --output-format text -p "$validation_prompt" --model "$validation_model" < /dev/null 2>"${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log" || true)
464
+ parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log"
465
+
466
+ # Save validation result
467
+ echo "$validation_result" > "$ARTIFACTS_DIR/plan-validation.md"
468
+
469
+ if echo "$validation_result" | head -5 | grep -qi "VALID: true"; then
470
+ success "Plan validation passed"
471
+ plan_valid=true
472
+ break
473
+ fi
474
+
475
+ warn "Plan validation failed (attempt ${validation_attempts}/${max_validation_attempts})"
476
+
477
+ # Analyze failure mode to decide how to recover
478
+ local failure_mode="unknown"
479
+ local validation_lower
480
+ validation_lower=$(echo "$validation_result" | tr '[:upper:]' '[:lower:]')
481
+ if echo "$validation_lower" | grep -qE 'requirements? unclear|goal.*vague|ambiguous|underspecified'; then
482
+ failure_mode="requirements_unclear"
483
+ elif echo "$validation_lower" | grep -qE 'insufficient detail|not specific|too high.level|missing.*steps|lacks.*detail'; then
484
+ failure_mode="insufficient_detail"
485
+ elif echo "$validation_lower" | grep -qE 'scope too (large|broad)|too many|overly complex|break.*down'; then
486
+ failure_mode="scope_too_large"
487
+ fi
488
+
489
+ emit_event "plan.validation_failure" \
490
+ "issue=${ISSUE_NUMBER:-0}" \
491
+ "attempt=$validation_attempts" \
492
+ "failure_mode=$failure_mode"
493
+
494
+ # Track repeated failures — escalate if stuck in a loop
495
+ if [[ -f "$ARTIFACTS_DIR/.plan-failure-sig.txt" ]]; then
496
+ local prev_sig
497
+ prev_sig=$(cat "$ARTIFACTS_DIR/.plan-failure-sig.txt" 2>/dev/null || true)
498
+ if [[ "$failure_mode" == "$prev_sig" && "$failure_mode" != "unknown" ]]; then
499
+ warn "Same validation failure mode repeated ($failure_mode) — escalating"
500
+ emit_event "plan.validation_escalated" \
501
+ "issue=${ISSUE_NUMBER:-0}" \
502
+ "failure_mode=$failure_mode"
503
+ break
504
+ fi
505
+ fi
506
+ echo "$failure_mode" > "$ARTIFACTS_DIR/.plan-failure-sig.txt"
507
+
508
+ if [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; then
509
+ info "Regenerating plan with validation feedback (mode: ${failure_mode})..."
510
+
511
+ # Tailor regeneration prompt based on failure mode
512
+ local failure_guidance=""
513
+ case "$failure_mode" in
514
+ requirements_unclear)
515
+ failure_guidance="The validator found the requirements unclear. Add more specific acceptance criteria, input/output examples, and concrete success metrics." ;;
516
+ insufficient_detail)
517
+ failure_guidance="The validator found the plan lacks detail. Break each task into smaller, more specific implementation steps with exact file paths and function names." ;;
518
+ scope_too_large)
519
+ failure_guidance="The validator found the scope too large. Focus on the minimal viable implementation and defer non-essential features to follow-up tasks." ;;
520
+ esac
521
+
522
+ local regen_prompt="${plan_prompt}
523
+
524
+ IMPORTANT: A previous plan was rejected by validation. Issues found:
525
+ $(echo "$validation_result" | tail -20)
526
+ ${failure_guidance:+
527
+ GUIDANCE: ${failure_guidance}}
528
+
529
+ Fix these issues in the new plan."
530
+
531
+ claude --print --model "$plan_model" --max-turns 25 \
532
+ "$regen_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
533
+ parse_claude_tokens "$_token_log"
534
+
535
+ line_count=$(wc -l < "$plan_file" | xargs)
536
+ info "Regenerated plan: ${DIM}$plan_file${RESET} (${line_count} lines)"
537
+ fi
538
+ done
539
+
540
+ if [[ "$plan_valid" != "true" ]]; then
541
+ warn "Plan validation did not pass after ${max_validation_attempts} attempts — proceeding anyway"
542
+ fi
543
+
544
+ emit_event "plan.validated" \
545
+ "issue=${ISSUE_NUMBER:-0}" \
546
+ "valid=${plan_valid}" \
547
+ "attempts=${validation_attempts}"
548
+ fi
549
+
550
+ log_stage "plan" "Generated plan.md (${line_count} lines, $(echo "$checklist" | wc -l | xargs) tasks)"
551
+ }
552
+
553
+ stage_design() {
554
+ CURRENT_STAGE_ID="design"
555
+ local plan_file="$ARTIFACTS_DIR/plan.md"
556
+ local design_file="$ARTIFACTS_DIR/design.md"
557
+
558
+ if [[ ! -s "$plan_file" ]]; then
559
+ warn "No plan found — skipping design stage"
560
+ return 0
561
+ fi
562
+
563
+ if ! command -v claude &>/dev/null; then
564
+ error "Claude CLI not found — cannot generate design"
565
+ return 1
566
+ fi
567
+
568
+ info "Generating Architecture Decision Record..."
569
+
570
+ # Memory integration — inject context if memory system available
571
+ local memory_context=""
572
+ if type intelligence_search_memory &>/dev/null 2>&1; then
573
+ local mem_dir="${HOME}/.shipwright/memory"
574
+ memory_context=$(intelligence_search_memory "design stage architecture patterns for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
575
+ fi
576
+ if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
577
+ memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "design" 2>/dev/null) || true
578
+ fi
579
+
580
+ # Inject cross-pipeline discoveries for design stage
581
+ local design_discoveries=""
582
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
583
+ design_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
584
+ fi
585
+
586
+ # Inject architecture model patterns if available
587
+ local arch_context=""
588
+ local repo_hash
589
+ repo_hash=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
590
+ local arch_model_file="${HOME}/.shipwright/memory/${repo_hash}/architecture.json"
591
+ if [[ -f "$arch_model_file" ]]; then
592
+ local arch_patterns
593
+ arch_patterns=$(jq -r '
594
+ [.patterns // [] | .[] | "- \(.name // "unnamed"): \(.description // "no description")"] | join("\n")
595
+ ' "$arch_model_file" 2>/dev/null) || true
596
+ local arch_layers
597
+ arch_layers=$(jq -r '
598
+ [.layers // [] | .[] | "- \(.name // "unnamed"): \(.path // "")"] | join("\n")
599
+ ' "$arch_model_file" 2>/dev/null) || true
600
+ if [[ -n "$arch_patterns" || -n "$arch_layers" ]]; then
601
+ arch_context="Previous designs in this repo follow these patterns:
602
+ ${arch_patterns:+Patterns:
603
+ ${arch_patterns}
604
+ }${arch_layers:+Layers:
605
+ ${arch_layers}}"
606
+ fi
607
+ fi
608
+
609
+ # Inject rejected design approaches and anti-patterns from memory
610
+ local design_antipatterns=""
611
+ if type intelligence_search_memory &>/dev/null 2>&1; then
612
+ local rejected_designs
613
+ rejected_designs=$(intelligence_search_memory "rejected design approaches anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
614
+ if [[ -n "$rejected_designs" ]]; then
615
+ design_antipatterns="
616
+ ## Rejected Approaches (from past reviews)
617
+ These design approaches were rejected in past reviews. Avoid repeating them:
618
+ ${rejected_designs}
619
+ "
620
+ fi
621
+ fi
622
+
623
+ # Build design prompt with plan + project context
624
+ local project_lang
625
+ project_lang=$(detect_project_lang)
626
+
627
+ local design_prompt="You are a senior software architect. Review the implementation plan below and produce an Architecture Decision Record (ADR).
628
+
629
+ ## Goal
630
+ ${GOAL}
631
+
632
+ ## Implementation Plan
633
+ $(cat "$plan_file")
634
+
635
+ ## Project Context
636
+ - Language: ${project_lang}
637
+ - Test command: ${TEST_CMD:-not configured}
638
+ - Task type: ${TASK_TYPE:-feature}
639
+ ${memory_context:+
640
+ ## Historical Context (from memory)
641
+ ${memory_context}
642
+ }${arch_context:+
643
+ ## Architecture Model (from previous designs)
644
+ ${arch_context}
645
+ }${design_antipatterns}${design_discoveries:+
646
+ ## Discoveries from Other Pipelines
647
+ ${design_discoveries}
648
+ }
649
+ ## Required Output — Architecture Decision Record
650
+
651
+ Produce this EXACT format:
652
+
653
+ # Design: ${GOAL}
654
+
655
+ ## Context
656
+ [What problem we're solving, constraints from the codebase]
657
+
658
+ ## Decision
659
+ [The chosen approach — be specific about patterns, data flow, error handling]
660
+
661
+ ## Alternatives Considered
662
+ 1. [Alternative A] — Pros: ... / Cons: ...
663
+ 2. [Alternative B] — Pros: ... / Cons: ...
664
+
665
+ ## Implementation Plan
666
+ - Files to create: [list with full paths]
667
+ - Files to modify: [list with full paths]
668
+ - Dependencies: [new deps if any]
669
+ - Risk areas: [fragile code, performance concerns]
670
+
671
+ ## Validation Criteria
672
+ - [ ] [How we'll know the design is correct — testable criteria]
673
+ - [ ] [Additional validation items]
674
+
675
+ Be concrete and specific. Reference actual file paths in the codebase. Consider edge cases and failure modes."
676
+
677
+ local design_model
678
+ design_model=$(jq -r --arg id "design" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
679
+ [[ -n "$MODEL" ]] && design_model="$MODEL"
680
+ [[ -z "$design_model" || "$design_model" == "null" ]] && design_model="opus"
681
+ # Intelligence model routing (when no explicit CLI --model override)
682
+ if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
683
+ design_model="$CLAUDE_MODEL"
684
+ fi
685
+
686
+ local _token_log="${ARTIFACTS_DIR}/.claude-tokens-design.log"
687
+ claude --print --model "$design_model" --max-turns 25 \
688
+ "$design_prompt" < /dev/null > "$design_file" 2>"$_token_log" || true
689
+ parse_claude_tokens "$_token_log"
690
+
691
+ if [[ ! -s "$design_file" ]]; then
692
+ error "Design generation failed — empty output"
693
+ return 1
694
+ fi
695
+
696
+ # Validate design content — detect API/CLI errors masquerading as designs
697
+ local _design_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
698
+ _design_fatal="${_design_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
699
+ if grep -qiE "$_design_fatal" "$design_file" 2>/dev/null; then
700
+ error "Design stage produced API/CLI error instead of a design: $(head -1 "$design_file" | cut -c1-100)"
701
+ return 1
702
+ fi
703
+
704
+ local line_count
705
+ line_count=$(wc -l < "$design_file" | xargs)
706
+ if [[ "$line_count" -lt 3 ]]; then
707
+ error "Design too short (${line_count} lines) — likely an error, not a real design"
708
+ return 1
709
+ fi
710
+ info "Design saved: ${DIM}$design_file${RESET} (${line_count} lines)"
711
+
712
+ # Extract file lists for build stage awareness
713
+ local files_to_create files_to_modify
714
+ files_to_create=$(sed -n '/Files to create/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
715
+ files_to_modify=$(sed -n '/Files to modify/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
716
+
717
+ if [[ -n "$files_to_create" || -n "$files_to_modify" ]]; then
718
+ info "Design scope: ${DIM}$(echo "$files_to_create $files_to_modify" | grep -c '^\s*-' || echo 0) file(s)${RESET}"
719
+ fi
720
+
721
+ # Post design to GitHub issue
722
+ if [[ -n "$ISSUE_NUMBER" ]]; then
723
+ local design_summary
724
+ design_summary=$(head -60 "$design_file")
725
+ gh_comment_issue "$ISSUE_NUMBER" "## 📐 Architecture Decision Record
726
+
727
+ <details>
728
+ <summary>Click to expand ADR (${line_count} lines)</summary>
729
+
730
+ ${design_summary}
731
+
732
+ </details>
733
+
734
+ ---
735
+ _Generated by \`shipwright pipeline\` design stage at $(now_iso)_"
736
+ fi
737
+
738
+ # Push design to wiki
739
+ gh_wiki_page "Pipeline-Design-${ISSUE_NUMBER:-inline}" "$(<"$design_file")"
740
+
741
+ log_stage "design" "Generated design.md (${line_count} lines)"
742
+ }
743
+
744
+ stage_build() {
745
+ local plan_file="$ARTIFACTS_DIR/plan.md"
746
+ local design_file="$ARTIFACTS_DIR/design.md"
747
+ local dod_file="$ARTIFACTS_DIR/dod.md"
748
+ local loop_args=()
749
+
750
+ # Memory integration — inject context if memory system available
751
+ local memory_context=""
752
+ if type intelligence_search_memory &>/dev/null 2>&1; then
753
+ local mem_dir="${HOME}/.shipwright/memory"
754
+ memory_context=$(intelligence_search_memory "build stage for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
755
+ fi
756
+ if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
757
+ memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "build" 2>/dev/null) || true
758
+ fi
759
+
760
+ # Build enriched goal with compact context (avoids prompt bloat)
761
+ local enriched_goal
762
+ enriched_goal=$(_pipeline_compact_goal "$GOAL" "$plan_file" "$design_file")
763
+
764
+ # Inject memory context
765
+ if [[ -n "$memory_context" ]]; then
766
+ enriched_goal="${enriched_goal}
767
+
768
+ Historical context (lessons from previous pipelines):
769
+ ${memory_context}"
770
+ fi
771
+
772
+ # Inject cross-pipeline discoveries for build stage
773
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
774
+ local build_discoveries
775
+ build_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "src/*,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
776
+ if [[ -n "$build_discoveries" ]]; then
777
+ enriched_goal="${enriched_goal}
778
+
779
+ Discoveries from other pipelines:
780
+ ${build_discoveries}"
781
+ fi
782
+ fi
783
+
784
+ # Add task list context
785
+ if [[ -s "$TASKS_FILE" ]]; then
786
+ enriched_goal="${enriched_goal}
787
+
788
+ Task tracking (check off items as you complete them):
789
+ $(cat "$TASKS_FILE")"
790
+ fi
791
+
792
+ # Inject file hotspots from GitHub intelligence
793
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_file_change_frequency &>/dev/null 2>&1; then
794
+ local build_hotspots
795
+ build_hotspots=$(gh_file_change_frequency 2>/dev/null | head -5 || true)
796
+ if [[ -n "$build_hotspots" ]]; then
797
+ enriched_goal="${enriched_goal}
798
+
799
+ File hotspots (most frequently changed — review these carefully):
800
+ ${build_hotspots}"
801
+ fi
802
+ fi
803
+
804
+ # Inject security alerts context
805
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_security_alerts &>/dev/null 2>&1; then
806
+ local build_alerts
807
+ build_alerts=$(gh_security_alerts 2>/dev/null | head -3 || true)
808
+ if [[ -n "$build_alerts" ]]; then
809
+ enriched_goal="${enriched_goal}
810
+
811
+ Active security alerts (do not introduce new vulnerabilities):
812
+ ${build_alerts}"
813
+ fi
814
+ fi
815
+
816
+ # Inject coverage baseline
817
+ local repo_hash_build
818
+ repo_hash_build=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
819
+ local coverage_file_build="${HOME}/.shipwright/baselines/${repo_hash_build}/coverage.json"
820
+ if [[ -f "$coverage_file_build" ]]; then
821
+ local coverage_baseline
822
+ coverage_baseline=$(jq -r '.coverage_percent // empty' "$coverage_file_build" 2>/dev/null || true)
823
+ if [[ -n "$coverage_baseline" ]]; then
824
+ enriched_goal="${enriched_goal}
825
+
826
+ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
827
+ fi
828
+ fi
829
+
830
+ # Predictive: inject prevention hints when risk/memory patterns suggest build-stage failures
831
+ if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
832
+ local issue_json_build="{}"
833
+ [[ -n "${ISSUE_NUMBER:-}" ]] && issue_json_build=$(jq -n --arg title "${GOAL:-}" --arg num "${ISSUE_NUMBER:-}" '{title: $title, number: $num}')
834
+ local prevention_text
835
+ prevention_text=$(bash "$SCRIPT_DIR/sw-predictive.sh" inject-prevention "build" "$issue_json_build" 2>/dev/null || true)
836
+ if [[ -n "$prevention_text" ]]; then
837
+ enriched_goal="${enriched_goal}
838
+
839
+ ${prevention_text}"
840
+ fi
841
+ fi
842
+
843
+ loop_args+=("$enriched_goal")
844
+
845
+ # Build loop args from pipeline config + CLI overrides
846
+ CURRENT_STAGE_ID="build"
847
+
848
+ local test_cmd="${TEST_CMD}"
849
+ if [[ -z "$test_cmd" ]]; then
850
+ test_cmd=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.test_cmd) // .defaults.test_cmd // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
851
+ [[ "$test_cmd" == "null" ]] && test_cmd=""
852
+ fi
853
+ # Auto-detect if still empty
854
+ if [[ -z "$test_cmd" ]]; then
855
+ test_cmd=$(detect_test_cmd)
856
+ fi
857
+
858
+ local max_iter
859
+ max_iter=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.max_iterations) // 20' "$PIPELINE_CONFIG" 2>/dev/null) || true
860
+ [[ -z "$max_iter" || "$max_iter" == "null" ]] && max_iter=20
861
+ # CLI --max-iterations override (from CI strategy engine)
862
+ [[ -n "${MAX_ITERATIONS_OVERRIDE:-}" ]] && max_iter="$MAX_ITERATIONS_OVERRIDE"
863
+
864
+ local agents="${AGENTS}"
865
+ if [[ -z "$agents" ]]; then
866
+ agents=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.agents) // .defaults.agents // 1' "$PIPELINE_CONFIG" 2>/dev/null) || true
867
+ [[ -z "$agents" || "$agents" == "null" ]] && agents=1
868
+ fi
869
+
870
+ # Intelligence: suggest parallelism if design indicates independent work
871
+ if [[ "${agents:-1}" -le 1 ]] && [[ -s "$ARTIFACTS_DIR/design.md" ]]; then
872
+ local design_lower
873
+ design_lower=$(tr '[:upper:]' '[:lower:]' < "$ARTIFACTS_DIR/design.md" 2>/dev/null || true)
874
+ if echo "$design_lower" | grep -qE 'independent (files|modules|components|services)|separate (modules|packages|directories)|parallel|no shared state'; then
875
+ info "Design mentions independent modules — consider --agents 2 for parallelism"
876
+ emit_event "build.parallelism_suggested" "issue=${ISSUE_NUMBER:-0}" "current_agents=$agents"
877
+ fi
878
+ fi
879
+
880
+ local audit
881
+ audit=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.audit) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
882
+ local quality
883
+ quality=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.quality_gates) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
884
+
885
+ local build_model="${MODEL}"
886
+ if [[ -z "$build_model" ]]; then
887
+ build_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
888
+ [[ -z "$build_model" || "$build_model" == "null" ]] && build_model="opus"
889
+ fi
890
+ # Intelligence model routing (when no explicit CLI --model override)
891
+ if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
892
+ build_model="$CLAUDE_MODEL"
893
+ fi
894
+
895
+ # Recruit-powered model selection (when no explicit override)
896
+ if [[ -z "$MODEL" ]] && [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
897
+ local _recruit_goal="${GOAL:-}"
898
+ if [[ -n "$_recruit_goal" ]]; then
899
+ local _recruit_match
900
+ _recruit_match=$(bash "$SCRIPT_DIR/sw-recruit.sh" match --json "$_recruit_goal" 2>/dev/null) || true
901
+ if [[ -n "$_recruit_match" ]]; then
902
+ local _recruit_model
903
+ _recruit_model=$(echo "$_recruit_match" | jq -r '.model // ""' 2>/dev/null) || true
904
+ if [[ -n "$_recruit_model" && "$_recruit_model" != "null" && "$_recruit_model" != "" ]]; then
905
+ info "Recruit recommends model: ${CYAN}${_recruit_model}${RESET} for this task"
906
+ build_model="$_recruit_model"
907
+ fi
908
+ fi
909
+ fi
910
+ fi
911
+
912
+ [[ -n "$test_cmd" && "$test_cmd" != "null" ]] && loop_args+=(--test-cmd "$test_cmd")
913
+ loop_args+=(--max-iterations "$max_iter")
914
+ loop_args+=(--model "$build_model")
915
+ [[ "$agents" -gt 1 ]] 2>/dev/null && loop_args+=(--agents "$agents")
916
+
917
+ # Quality gates: always enabled in CI, otherwise from template config
918
+ if [[ "${CI_MODE:-false}" == "true" ]]; then
919
+ loop_args+=(--audit --audit-agent --quality-gates)
920
+ else
921
+ [[ "$audit" == "true" ]] && loop_args+=(--audit --audit-agent)
922
+ [[ "$quality" == "true" ]] && loop_args+=(--quality-gates)
923
+ fi
924
+
925
+ # Session restart capability
926
+ [[ -n "${MAX_RESTARTS_OVERRIDE:-}" ]] && loop_args+=(--max-restarts "$MAX_RESTARTS_OVERRIDE")
927
+ # Fast test mode
928
+ [[ -n "${FAST_TEST_CMD_OVERRIDE:-}" ]] && loop_args+=(--fast-test-cmd "$FAST_TEST_CMD_OVERRIDE")
929
+
930
+ # Definition of Done: use plan-extracted DoD if available
931
+ [[ -s "$dod_file" ]] && loop_args+=(--definition-of-done "$dod_file")
932
+
933
+ # Skip permissions in CI (no interactive terminal)
934
+ [[ "${CI_MODE:-false}" == "true" ]] && loop_args+=(--skip-permissions)
935
+
936
+ info "Starting build loop: ${DIM}shipwright loop${RESET} (max ${max_iter} iterations, ${agents} agent(s))"
937
+
938
+ # Post build start to GitHub
939
+ if [[ -n "$ISSUE_NUMBER" ]]; then
940
+ gh_comment_issue "$ISSUE_NUMBER" "🔨 **Build started** — \`shipwright loop\` with ${max_iter} max iterations, ${agents} agent(s), model: ${build_model}"
941
+ fi
942
+
943
+ local _token_log="${ARTIFACTS_DIR}/.claude-tokens-build.log"
944
+ export PIPELINE_JOB_ID="${PIPELINE_NAME:-pipeline-$$}"
945
+ sw loop "${loop_args[@]}" < /dev/null 2>"$_token_log" || {
946
+ local _loop_exit=$?
947
+ parse_claude_tokens "$_token_log"
948
+
949
+ # Detect context exhaustion from progress file
950
+ local _progress_file="${PWD}/.claude/loop-logs/progress.md"
951
+ if [[ -f "$_progress_file" ]]; then
952
+ local _prog_tests
953
+ _prog_tests=$(grep -oE 'Tests passing: (true|false)' "$_progress_file" 2>/dev/null | awk '{print $NF}' || echo "unknown")
954
+ if [[ "$_prog_tests" != "true" ]]; then
955
+ warn "Build loop exhausted with failing tests (context exhaustion)"
956
+ emit_event "pipeline.context_exhaustion" "issue=${ISSUE_NUMBER:-0}" "stage=build"
957
+ # Write flag for daemon retry logic
958
+ mkdir -p "$ARTIFACTS_DIR" 2>/dev/null || true
959
+ echo "context_exhaustion" > "$ARTIFACTS_DIR/failure-reason.txt" 2>/dev/null || true
960
+ fi
961
+ fi
962
+
963
+ error "Build loop failed"
964
+ return 1
965
+ }
966
+ parse_claude_tokens "$_token_log"
967
+
968
+ # Read accumulated token counts from build loop (written by sw-loop.sh)
969
+ local _loop_token_file="${PROJECT_ROOT}/.claude/loop-logs/loop-tokens.json"
970
+ if [[ -f "$_loop_token_file" ]] && command -v jq &>/dev/null; then
971
+ local _loop_in _loop_out _loop_cost
972
+ _loop_in=$(jq -r '.input_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
973
+ _loop_out=$(jq -r '.output_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
974
+ _loop_cost=$(jq -r '.cost_usd // 0' "$_loop_token_file" 2>/dev/null || echo "0")
975
+ TOTAL_INPUT_TOKENS=$(( TOTAL_INPUT_TOKENS + ${_loop_in:-0} ))
976
+ TOTAL_OUTPUT_TOKENS=$(( TOTAL_OUTPUT_TOKENS + ${_loop_out:-0} ))
977
+ if [[ -n "$_loop_cost" && "$_loop_cost" != "0" && "$_loop_cost" != "null" ]]; then
978
+ TOTAL_COST_USD="${_loop_cost}"
979
+ fi
980
+ if [[ "${_loop_in:-0}" -gt 0 || "${_loop_out:-0}" -gt 0 ]]; then
981
+ info "Build loop tokens: in=${_loop_in} out=${_loop_out} cost=\$${_loop_cost:-0}"
982
+ fi
983
+ fi
984
+
985
+ # Count commits made during build
986
+ local commit_count
987
+ commit_count=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | wc -l | xargs)
988
+ info "Build produced ${BOLD}$commit_count${RESET} commit(s)"
989
+
990
+ # Commit quality evaluation when intelligence is enabled
991
+ if type intelligence_search_memory &>/dev/null 2>&1 && command -v claude &>/dev/null && [[ "${commit_count:-0}" -gt 0 ]]; then
992
+ local commit_msgs
993
+ commit_msgs=$(git log --format="%s" "${BASE_BRANCH}..HEAD" 2>/dev/null | head -20)
994
+ local quality_score
995
+ quality_score=$(claude --print --output-format text -p "Rate the quality of these git commit messages on a scale of 0-100. Consider: focus (one thing per commit), clarity (describes the why), atomicity (small logical units). Reply with ONLY a number 0-100.
996
+
997
+ Commit messages:
998
+ ${commit_msgs}" --model haiku < /dev/null 2>/dev/null || true)
999
+ quality_score=$(echo "$quality_score" | grep -oE '^[0-9]+' | head -1 || true)
1000
+ if [[ -n "$quality_score" ]]; then
1001
+ emit_event "build.commit_quality" \
1002
+ "issue=${ISSUE_NUMBER:-0}" \
1003
+ "score=$quality_score" \
1004
+ "commit_count=$commit_count"
1005
+ if [[ "$quality_score" -lt 40 ]] 2>/dev/null; then
1006
+ warn "Commit message quality low (score: ${quality_score}/100)"
1007
+ else
1008
+ info "Commit quality score: ${quality_score}/100"
1009
+ fi
1010
+ fi
1011
+ fi
1012
+
1013
+ log_stage "build" "Build loop completed ($commit_count commits)"
1014
+ }
1015
+
1016
+ stage_test() {
1017
+ CURRENT_STAGE_ID="test"
1018
+ local test_cmd="${TEST_CMD}"
1019
+ if [[ -z "$test_cmd" ]]; then
1020
+ test_cmd=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.test_cmd) // .defaults.test_cmd // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1021
+ [[ -z "$test_cmd" || "$test_cmd" == "null" ]] && test_cmd=""
1022
+ fi
1023
+ # Auto-detect
1024
+ if [[ -z "$test_cmd" ]]; then
1025
+ test_cmd=$(detect_test_cmd)
1026
+ fi
1027
+ if [[ -z "$test_cmd" ]]; then
1028
+ warn "No test command found — skipping test stage"
1029
+ return 0
1030
+ fi
1031
+
1032
+ local coverage_min
1033
+ coverage_min=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.coverage_min) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
1034
+ [[ -z "$coverage_min" || "$coverage_min" == "null" ]] && coverage_min=0
1035
+
1036
+ local test_log="$ARTIFACTS_DIR/test-results.log"
1037
+
1038
+ info "Running tests: ${DIM}$test_cmd${RESET}"
1039
+ local test_exit=0
1040
+ bash -c "$test_cmd" > "$test_log" 2>&1 || test_exit=$?
1041
+
1042
+ if [[ "$test_exit" -eq 0 ]]; then
1043
+ success "Tests passed"
1044
+ else
1045
+ error "Tests failed (exit code: $test_exit)"
1046
+ # Extract most relevant error section (assertion failures, stack traces)
1047
+ local relevant_output=""
1048
+ relevant_output=$(grep -A5 -E 'FAIL|AssertionError|Expected.*but.*got|Error:|panic:|assert' "$test_log" 2>/dev/null | tail -40 || true)
1049
+ if [[ -z "$relevant_output" ]]; then
1050
+ relevant_output=$(tail -40 "$test_log")
1051
+ fi
1052
+ echo "$relevant_output"
1053
+
1054
+ # Post failure to GitHub with more context
1055
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1056
+ local log_lines
1057
+ log_lines=$(wc -l < "$test_log" 2>/dev/null || echo "0")
1058
+ local log_excerpt
1059
+ if [[ "$log_lines" -lt 60 ]]; then
1060
+ log_excerpt="$(cat "$test_log" 2>/dev/null || true)"
1061
+ else
1062
+ log_excerpt="$(head -20 "$test_log" 2>/dev/null || true)
1063
+ ... (${log_lines} lines total, showing head + tail) ...
1064
+ $(tail -30 "$test_log" 2>/dev/null || true)"
1065
+ fi
1066
+ gh_comment_issue "$ISSUE_NUMBER" "❌ **Tests failed** (exit code: $test_exit, ${log_lines} lines)
1067
+ \`\`\`
1068
+ ${log_excerpt}
1069
+ \`\`\`"
1070
+ fi
1071
+ return 1
1072
+ fi
1073
+
1074
+ # Coverage check — only enforce when coverage data is actually detected
1075
+ local coverage=""
1076
+ if [[ "$coverage_min" -gt 0 ]] 2>/dev/null; then
1077
+ coverage=$(parse_coverage_from_output "$test_log")
1078
+ if [[ -z "$coverage" ]]; then
1079
+ # No coverage data found — skip enforcement (project may not have coverage tooling)
1080
+ info "No coverage data detected — skipping coverage check (min: ${coverage_min}%)"
1081
+ elif awk -v cov="$coverage" -v min="$coverage_min" 'BEGIN{exit !(cov < min)}' 2>/dev/null; then
1082
+ warn "Coverage ${coverage}% below minimum ${coverage_min}%"
1083
+ return 1
1084
+ else
1085
+ info "Coverage: ${coverage}% (min: ${coverage_min}%)"
1086
+ fi
1087
+ fi
1088
+
1089
+ # Post test results to GitHub
1090
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1091
+ local test_summary
1092
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
1093
+ local cov_line=""
1094
+ [[ -n "$coverage" ]] && cov_line="
1095
+ **Coverage:** ${coverage}%"
1096
+ gh_comment_issue "$ISSUE_NUMBER" "✅ **Tests passed**${cov_line}
1097
+ <details>
1098
+ <summary>Test output</summary>
1099
+
1100
+ \`\`\`
1101
+ ${test_summary}
1102
+ \`\`\`
1103
+ </details>"
1104
+ fi
1105
+
1106
+ # Write coverage summary for pre-deploy gate
1107
+ local _cov_pct=0
1108
+ if [[ -f "$ARTIFACTS_DIR/test-results.log" ]]; then
1109
+ _cov_pct=$(grep -oE '[0-9]+%' "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -1 | tr -d '%' || true)
1110
+ _cov_pct="${_cov_pct:-0}"
1111
+ fi
1112
+ local _cov_tmp
1113
+ _cov_tmp=$(mktemp "${ARTIFACTS_DIR}/test-coverage.json.tmp.XXXXXX")
1114
+ printf '{"coverage_pct":%d}' "${_cov_pct:-0}" > "$_cov_tmp" && mv "$_cov_tmp" "$ARTIFACTS_DIR/test-coverage.json" || rm -f "$_cov_tmp"
1115
+
1116
+ log_stage "test" "Tests passed${coverage:+ (coverage: ${coverage}%)}"
1117
+ }
1118
+
1119
+ stage_review() {
1120
+ CURRENT_STAGE_ID="review"
1121
+ local diff_file="$ARTIFACTS_DIR/review-diff.patch"
1122
+ local review_file="$ARTIFACTS_DIR/review.md"
1123
+
1124
+ git diff "${BASE_BRANCH}...${GIT_BRANCH}" > "$diff_file" 2>/dev/null || \
1125
+ git diff HEAD~5 > "$diff_file" 2>/dev/null || true
1126
+
1127
+ if [[ ! -s "$diff_file" ]]; then
1128
+ warn "No diff found — skipping review"
1129
+ return 0
1130
+ fi
1131
+
1132
+ if ! command -v claude &>/dev/null; then
1133
+ warn "Claude CLI not found — skipping AI review"
1134
+ return 0
1135
+ fi
1136
+
1137
+ local diff_stats
1138
+ diff_stats=$(git diff --stat "${BASE_BRANCH}...${GIT_BRANCH}" 2>/dev/null | tail -1 || echo "")
1139
+ info "Running AI code review... ${DIM}($diff_stats)${RESET}"
1140
+
1141
+ # Semantic risk scoring when intelligence is enabled
1142
+ if type intelligence_search_memory &>/dev/null 2>&1 && command -v claude &>/dev/null; then
1143
+ local diff_files
1144
+ diff_files=$(git diff --name-only "${BASE_BRANCH}...${GIT_BRANCH}" 2>/dev/null || true)
1145
+ local risk_score="low"
1146
+ # Fast heuristic: flag high-risk file patterns
1147
+ if echo "$diff_files" | grep -qiE 'migration|schema|auth|crypto|security|password|token|secret|\.env'; then
1148
+ risk_score="high"
1149
+ elif echo "$diff_files" | grep -qiE 'api|route|controller|middleware|hook'; then
1150
+ risk_score="medium"
1151
+ fi
1152
+ emit_event "review.risk_assessed" \
1153
+ "issue=${ISSUE_NUMBER:-0}" \
1154
+ "risk=$risk_score" \
1155
+ "files_changed=$(echo "$diff_files" | wc -l | xargs)"
1156
+ if [[ "$risk_score" == "high" ]]; then
1157
+ warn "High-risk changes detected (DB schema, auth, crypto, or secrets)"
1158
+ fi
1159
+ fi
1160
+
1161
+ local review_model="${MODEL:-opus}"
1162
+ # Intelligence model routing (when no explicit CLI --model override)
1163
+ if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
1164
+ review_model="$CLAUDE_MODEL"
1165
+ fi
1166
+
1167
+ # Build review prompt with project context
1168
+ local review_prompt="You are a senior code reviewer. Review this git diff thoroughly.
1169
+
1170
+ For each issue found, use this format:
1171
+ - **[SEVERITY]** file:line — description
1172
+
1173
+ Severity levels: Critical, Bug, Security, Warning, Suggestion
1174
+
1175
+ Focus on:
1176
+ 1. Logic bugs and edge cases
1177
+ 2. Security vulnerabilities (injection, XSS, auth bypass, etc.)
1178
+ 3. Error handling gaps
1179
+ 4. Performance issues
1180
+ 5. Missing validation
1181
+ 6. Project convention violations (see conventions below)
1182
+
1183
+ Be specific. Reference exact file paths and line numbers. Only flag genuine issues.
1184
+ If no issues are found, write: \"Review clean — no issues found.\"
1185
+ "
1186
+
1187
+ # Inject previous review findings and anti-patterns from memory
1188
+ if type intelligence_search_memory &>/dev/null 2>&1; then
1189
+ local review_memory
1190
+ review_memory=$(intelligence_search_memory "code review findings anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
1191
+ if [[ -n "$review_memory" ]]; then
1192
+ review_prompt+="
1193
+ ## Known Issues from Previous Reviews
1194
+ These anti-patterns and issues have been found in past reviews of this codebase. Flag them if they recur:
1195
+ ${review_memory}
1196
+ "
1197
+ fi
1198
+ fi
1199
+
1200
+ # Inject project conventions if CLAUDE.md exists
1201
+ local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
1202
+ if [[ -f "$claudemd" ]]; then
1203
+ local conventions
1204
+ conventions=$(grep -A2 'Common Pitfalls\|Shell Standards\|Bash 3.2' "$claudemd" 2>/dev/null | head -20 || true)
1205
+ if [[ -n "$conventions" ]]; then
1206
+ review_prompt+="
1207
+ ## Project Conventions
1208
+ ${conventions}
1209
+ "
1210
+ fi
1211
+ fi
1212
+
1213
+ # Inject CODEOWNERS focus areas for review
1214
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_codeowners &>/dev/null 2>&1; then
1215
+ local review_owners
1216
+ review_owners=$(gh_codeowners 2>/dev/null | head -10 || true)
1217
+ if [[ -n "$review_owners" ]]; then
1218
+ review_prompt+="
1219
+ ## Code Owners (focus areas)
1220
+ ${review_owners}
1221
+ "
1222
+ fi
1223
+ fi
1224
+
1225
+ # Inject Definition of Done if present
1226
+ local dod_file="$PROJECT_ROOT/.claude/DEFINITION-OF-DONE.md"
1227
+ if [[ -f "$dod_file" ]]; then
1228
+ review_prompt+="
1229
+ ## Definition of Done (verify these)
1230
+ $(cat "$dod_file")
1231
+ "
1232
+ fi
1233
+
1234
+ review_prompt+="
1235
+ ## Diff to Review
1236
+ $(cat "$diff_file")"
1237
+
1238
+ # Build claude args — add --dangerously-skip-permissions in CI
1239
+ local review_args=(--print --model "$review_model" --max-turns 25)
1240
+ if [[ "${CI_MODE:-false}" == "true" ]]; then
1241
+ review_args+=(--dangerously-skip-permissions)
1242
+ fi
1243
+
1244
+ claude "${review_args[@]}" "$review_prompt" < /dev/null > "$review_file" 2>"${ARTIFACTS_DIR}/.claude-tokens-review.log" || true
1245
+ parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-review.log"
1246
+
1247
+ if [[ ! -s "$review_file" ]]; then
1248
+ warn "Review produced no output — check ${ARTIFACTS_DIR}/.claude-tokens-review.log for errors"
1249
+ return 0
1250
+ fi
1251
+
1252
+ # Extract severity counts — try JSON structure first, then grep fallback
1253
+ local critical_count=0 bug_count=0 warning_count=0
1254
+
1255
+ # Check if review output is structured JSON (e.g. from structured review tools)
1256
+ local json_parsed=false
1257
+ if head -1 "$review_file" 2>/dev/null | grep -q '^{' 2>/dev/null; then
1258
+ local j_critical j_bug j_warning
1259
+ j_critical=$(jq -r '.issues | map(select(.severity == "Critical")) | length' "$review_file" 2>/dev/null || echo "")
1260
+ if [[ -n "$j_critical" && "$j_critical" != "null" ]]; then
1261
+ critical_count="$j_critical"
1262
+ bug_count=$(jq -r '.issues | map(select(.severity == "Bug" or .severity == "Security")) | length' "$review_file" 2>/dev/null || echo "0")
1263
+ warning_count=$(jq -r '.issues | map(select(.severity == "Warning" or .severity == "Suggestion")) | length' "$review_file" 2>/dev/null || echo "0")
1264
+ json_parsed=true
1265
+ fi
1266
+ fi
1267
+
1268
+ # Grep fallback for markdown-formatted review output
1269
+ if [[ "$json_parsed" != "true" ]]; then
1270
+ critical_count=$(grep -ciE '\*\*\[?Critical\]?\*\*' "$review_file" 2>/dev/null || true)
1271
+ critical_count="${critical_count:-0}"
1272
+ bug_count=$(grep -ciE '\*\*\[?(Bug|Security)\]?\*\*' "$review_file" 2>/dev/null || true)
1273
+ bug_count="${bug_count:-0}"
1274
+ warning_count=$(grep -ciE '\*\*\[?(Warning|Suggestion)\]?\*\*' "$review_file" 2>/dev/null || true)
1275
+ warning_count="${warning_count:-0}"
1276
+ fi
1277
+ local total_issues=$((critical_count + bug_count + warning_count))
1278
+
1279
+ if [[ "$critical_count" -gt 0 ]]; then
1280
+ error "Review found ${BOLD}$critical_count critical${RESET} issue(s) — see $review_file"
1281
+ elif [[ "$bug_count" -gt 0 ]]; then
1282
+ warn "Review found $bug_count bug/security issue(s) — see ${DIM}$review_file${RESET}"
1283
+ elif [[ "$total_issues" -gt 0 ]]; then
1284
+ info "Review found $total_issues suggestion(s)"
1285
+ else
1286
+ success "Review clean"
1287
+ fi
1288
+
1289
+ # ── Oversight gate: pipeline review/quality stages block on verdict ──
1290
+ if [[ -x "$SCRIPT_DIR/sw-oversight.sh" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
1291
+ local reject_reason=""
1292
+ local _sec_count
1293
+ _sec_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
1294
+ _sec_count="${_sec_count:-0}"
1295
+ local _blocking=$((critical_count + _sec_count))
1296
+ [[ "$_blocking" -gt 0 ]] && reject_reason="Review found ${_blocking} critical/security issue(s)"
1297
+ if ! bash "$SCRIPT_DIR/sw-oversight.sh" gate --diff "$diff_file" --description "${GOAL:-Pipeline review}" --reject-if "$reject_reason" >/dev/null 2>&1; then
1298
+ error "Oversight gate rejected — blocking pipeline"
1299
+ emit_event "review.oversight_blocked" "issue=${ISSUE_NUMBER:-0}"
1300
+ log_stage "review" "BLOCKED: oversight gate rejected"
1301
+ return 1
1302
+ fi
1303
+ fi
1304
+
1305
+ # ── Review Blocking Gate ──
1306
+ # Block pipeline on critical/security issues unless compound_quality handles them
1307
+ local security_count
1308
+ security_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
1309
+ security_count="${security_count:-0}"
1310
+
1311
+ local blocking_issues=$((critical_count + security_count))
1312
+
1313
+ if [[ "$blocking_issues" -gt 0 ]]; then
1314
+ # Check if compound_quality stage is enabled — if so, let it handle issues
1315
+ local compound_enabled="false"
1316
+ if [[ -n "${PIPELINE_CONFIG:-}" && -f "${PIPELINE_CONFIG:-/dev/null}" ]]; then
1317
+ compound_enabled=$(jq -r '.stages[] | select(.id == "compound_quality") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null) || true
1318
+ [[ -z "$compound_enabled" || "$compound_enabled" == "null" ]] && compound_enabled="false"
1319
+ fi
1320
+
1321
+ # Check if this is a fast template (don't block fast pipelines)
1322
+ local is_fast="false"
1323
+ if [[ "${PIPELINE_NAME:-}" == "fast" || "${PIPELINE_NAME:-}" == "hotfix" ]]; then
1324
+ is_fast="true"
1325
+ fi
1326
+
1327
+ if [[ "$compound_enabled" == "true" ]]; then
1328
+ info "Review found ${blocking_issues} critical/security issue(s) — compound_quality stage will handle"
1329
+ elif [[ "$is_fast" == "true" ]]; then
1330
+ warn "Review found ${blocking_issues} critical/security issue(s) — fast template, not blocking"
1331
+ elif [[ "${SKIP_GATES:-false}" == "true" ]]; then
1332
+ warn "Review found ${blocking_issues} critical/security issue(s) — skip-gates mode, not blocking"
1333
+ else
1334
+ error "Review found ${BOLD}${blocking_issues} critical/security issue(s)${RESET} — blocking pipeline"
1335
+ emit_event "review.blocked" \
1336
+ "issue=${ISSUE_NUMBER:-0}" \
1337
+ "critical=${critical_count}" \
1338
+ "security=${security_count}"
1339
+
1340
+ # Save blocking issues for self-healing context
1341
+ grep -iE '\*\*\[?(Critical|Security)\]?\*\*' "$review_file" > "$ARTIFACTS_DIR/review-blockers.md" 2>/dev/null || true
1342
+
1343
+ # Post review to GitHub before failing
1344
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1345
+ local review_summary
1346
+ review_summary=$(head -40 "$review_file")
1347
+ gh_comment_issue "$ISSUE_NUMBER" "## 🔍 Code Review — ❌ Blocked
1348
+
1349
+ **Stats:** $diff_stats
1350
+ **Blocking issues:** ${blocking_issues} (${critical_count} critical, ${security_count} security)
1351
+
1352
+ <details>
1353
+ <summary>Review details</summary>
1354
+
1355
+ ${review_summary}
1356
+
1357
+ </details>
1358
+
1359
+ _Pipeline will attempt self-healing rebuild._"
1360
+ fi
1361
+
1362
+ log_stage "review" "BLOCKED: $blocking_issues critical/security issues found"
1363
+ return 1
1364
+ fi
1365
+ fi
1366
+
1367
+ # Post review to GitHub issue
1368
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1369
+ local review_summary
1370
+ review_summary=$(head -40 "$review_file")
1371
+ gh_comment_issue "$ISSUE_NUMBER" "## 🔍 Code Review
1372
+
1373
+ **Stats:** $diff_stats
1374
+ **Issues found:** $total_issues (${critical_count} critical, ${bug_count} bugs, ${warning_count} suggestions)
1375
+
1376
+ <details>
1377
+ <summary>Review details</summary>
1378
+
1379
+ ${review_summary}
1380
+
1381
+ </details>"
1382
+ fi
1383
+
1384
+ log_stage "review" "AI review complete ($total_issues issues: $critical_count critical, $bug_count bugs, $warning_count suggestions)"
1385
+ }
1386
+
1387
+ stage_pr() {
1388
+ CURRENT_STAGE_ID="pr"
1389
+ local plan_file="$ARTIFACTS_DIR/plan.md"
1390
+ local test_log="$ARTIFACTS_DIR/test-results.log"
1391
+ local review_file="$ARTIFACTS_DIR/review.md"
1392
+
1393
+ # ── PR Hygiene Checks (informational) ──
1394
+ local hygiene_commit_count
1395
+ hygiene_commit_count=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | wc -l | xargs)
1396
+ hygiene_commit_count="${hygiene_commit_count:-0}"
1397
+
1398
+ if [[ "$hygiene_commit_count" -gt 20 ]]; then
1399
+ warn "PR has ${hygiene_commit_count} commits — consider squashing before merge"
1400
+ fi
1401
+
1402
+ # Check for WIP/fixup/squash commits (expanded patterns)
1403
+ local wip_commits
1404
+ wip_commits=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | grep -ciE '^[0-9a-f]+ (WIP|fixup!|squash!|TODO|HACK|TEMP|BROKEN|wip[:-]|temp[:-]|broken[:-]|do not merge)' || true)
1405
+ wip_commits="${wip_commits:-0}"
1406
+ if [[ "$wip_commits" -gt 0 ]]; then
1407
+ warn "Branch has ${wip_commits} WIP/fixup/squash/temp commit(s) — consider cleaning up"
1408
+ fi
1409
+
1410
+ # ── PR Quality Gate: reject PRs with no real code changes ──
1411
+ local real_files
1412
+ real_files=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null | grep -v '^\.claude/' | grep -v '^\.github/' || true)
1413
+ if [[ -z "$real_files" ]]; then
1414
+ error "No real code changes detected — only pipeline artifacts (.claude/ logs)."
1415
+ error "The build agent did not produce meaningful changes. Skipping PR creation."
1416
+ emit_event "pr.rejected" "issue=${ISSUE_NUMBER:-0}" "reason=no_real_changes"
1417
+ # Mark issue so auto-retry knows not to retry empty builds
1418
+ if [[ -n "${ISSUE_NUMBER:-}" && "${ISSUE_NUMBER:-0}" != "0" ]]; then
1419
+ gh issue comment "$ISSUE_NUMBER" --body "<!-- SHIPWRIGHT-NO-CHANGES: true -->" 2>/dev/null || true
1420
+ fi
1421
+ return 1
1422
+ fi
1423
+ local real_file_count
1424
+ real_file_count=$(echo "$real_files" | wc -l | xargs)
1425
+ info "PR quality gate: ${real_file_count} real file(s) changed"
1426
+
1427
+ # Commit any uncommitted changes left by the build agent
1428
+ if ! git diff --quiet 2>/dev/null || ! git diff --cached --quiet 2>/dev/null; then
1429
+ info "Committing remaining uncommitted changes..."
1430
+ git add -A 2>/dev/null || true
1431
+ git commit -m "chore: pipeline cleanup — commit remaining build changes" --no-verify 2>/dev/null || true
1432
+ fi
1433
+
1434
+ # Auto-rebase onto latest base branch before PR
1435
+ auto_rebase || {
1436
+ warn "Rebase/merge failed — pushing as-is"
1437
+ }
1438
+
1439
+ # Push branch
1440
+ info "Pushing branch: $GIT_BRANCH"
1441
+ git push -u origin "$GIT_BRANCH" --force-with-lease 2>/dev/null || {
1442
+ # Retry with regular push if force-with-lease fails (first push)
1443
+ git push -u origin "$GIT_BRANCH" 2>/dev/null || {
1444
+ error "Failed to push branch"
1445
+ return 1
1446
+ }
1447
+ }
1448
+
1449
+ # ── Developer Simulation (pre-PR review) ──
1450
+ local simulation_summary=""
1451
+ if type simulation_review &>/dev/null 2>&1; then
1452
+ local sim_enabled
1453
+ sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1454
+ # Also check daemon-config
1455
+ local daemon_cfg=".claude/daemon-config.json"
1456
+ if [[ "$sim_enabled" != "true" && -f "$daemon_cfg" ]]; then
1457
+ sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
1458
+ fi
1459
+ if [[ "$sim_enabled" == "true" ]]; then
1460
+ info "Running developer simulation review..."
1461
+ local diff_for_sim
1462
+ diff_for_sim=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1463
+ if [[ -n "$diff_for_sim" ]]; then
1464
+ local sim_result
1465
+ sim_result=$(simulation_review "$diff_for_sim" "${GOAL:-}" 2>/dev/null || echo "")
1466
+ if [[ -n "$sim_result" && "$sim_result" != *'"error"'* ]]; then
1467
+ echo "$sim_result" > "$ARTIFACTS_DIR/simulation-review.json"
1468
+ local sim_count
1469
+ sim_count=$(echo "$sim_result" | jq 'length' 2>/dev/null || echo "0")
1470
+ simulation_summary="**Developer simulation:** ${sim_count} reviewer concerns pre-addressed"
1471
+ success "Simulation complete: ${sim_count} concerns found and addressed"
1472
+ emit_event "simulation.complete" "issue=${ISSUE_NUMBER:-0}" "concerns=${sim_count}"
1473
+ else
1474
+ info "Simulation returned no actionable concerns"
1475
+ fi
1476
+ fi
1477
+ fi
1478
+ fi
1479
+
1480
+ # ── Architecture Validation (pre-PR check) ──
1481
+ local arch_summary=""
1482
+ if type architecture_validate_changes &>/dev/null 2>&1; then
1483
+ local arch_enabled
1484
+ arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1485
+ local daemon_cfg=".claude/daemon-config.json"
1486
+ if [[ "$arch_enabled" != "true" && -f "$daemon_cfg" ]]; then
1487
+ arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
1488
+ fi
1489
+ if [[ "$arch_enabled" == "true" ]]; then
1490
+ info "Validating architecture..."
1491
+ local diff_for_arch
1492
+ diff_for_arch=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1493
+ if [[ -n "$diff_for_arch" ]]; then
1494
+ local arch_result
1495
+ arch_result=$(architecture_validate_changes "$diff_for_arch" "" 2>/dev/null || echo "")
1496
+ if [[ -n "$arch_result" && "$arch_result" != *'"error"'* ]]; then
1497
+ echo "$arch_result" > "$ARTIFACTS_DIR/architecture-validation.json"
1498
+ local violation_count
1499
+ violation_count=$(echo "$arch_result" | jq '[.violations[]? | select(.severity == "critical" or .severity == "high")] | length' 2>/dev/null || echo "0")
1500
+ arch_summary="**Architecture validation:** ${violation_count} violations"
1501
+ if [[ "$violation_count" -gt 0 ]]; then
1502
+ warn "Architecture: ${violation_count} high/critical violations found"
1503
+ else
1504
+ success "Architecture validation passed"
1505
+ fi
1506
+ emit_event "architecture.validated" "issue=${ISSUE_NUMBER:-0}" "violations=${violation_count}"
1507
+ else
1508
+ info "Architecture validation returned no results"
1509
+ fi
1510
+ fi
1511
+ fi
1512
+ fi
1513
+
1514
+ # Pre-PR diff gate — verify meaningful code changes exist (not just bookkeeping)
1515
+ local real_changes
1516
+ real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
1517
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
1518
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
1519
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
1520
+ if [[ "${real_changes:-0}" -eq 0 ]]; then
1521
+ error "No meaningful code changes detected — only bookkeeping files modified"
1522
+ error "Refusing to create PR with zero real changes"
1523
+ return 1
1524
+ fi
1525
+ info "Pre-PR diff check: ${real_changes} real files changed"
1526
+
1527
+ # Build PR title — prefer GOAL over plan file first line
1528
+ # (plan file first line often contains Claude analysis text, not a clean title)
1529
+ local pr_title=""
1530
+ if [[ -n "${GOAL:-}" ]]; then
1531
+ pr_title=$(echo "$GOAL" | cut -c1-70)
1532
+ fi
1533
+ if [[ -z "$pr_title" ]] && [[ -s "$plan_file" ]]; then
1534
+ pr_title=$(head -1 "$plan_file" 2>/dev/null | sed 's/^#* *//' | cut -c1-70)
1535
+ fi
1536
+ [[ -z "$pr_title" ]] && pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
1537
+
1538
+ # Sanitize: reject PR titles that look like error messages
1539
+ if echo "$pr_title" | grep -qiE 'Invalid API|API key|authentication_error|rate_limit|CLI error|no useful output'; then
1540
+ warn "PR title looks like an error message: $pr_title"
1541
+ pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
1542
+ fi
1543
+
1544
+ # Build comprehensive PR body
1545
+ local plan_summary=""
1546
+ if [[ -s "$plan_file" ]]; then
1547
+ plan_summary=$(head -20 "$plan_file" 2>/dev/null | tail -15)
1548
+ fi
1549
+
1550
+ local test_summary=""
1551
+ if [[ -s "$test_log" ]]; then
1552
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
1553
+ fi
1554
+
1555
+ local review_summary=""
1556
+ if [[ -s "$review_file" ]]; then
1557
+ local total_issues=0
1558
+ # Try JSON structured output first
1559
+ if head -1 "$review_file" 2>/dev/null | grep -q '^{' 2>/dev/null; then
1560
+ total_issues=$(jq -r '.issues | length' "$review_file" 2>/dev/null || echo "0")
1561
+ fi
1562
+ # Grep fallback for markdown
1563
+ if [[ "${total_issues:-0}" -eq 0 ]]; then
1564
+ total_issues=$(grep -ciE '\*\*\[?(Critical|Bug|Security|Warning|Suggestion)\]?\*\*' "$review_file" 2>/dev/null || true)
1565
+ total_issues="${total_issues:-0}"
1566
+ fi
1567
+ review_summary="**Code review:** $total_issues issues found"
1568
+ fi
1569
+
1570
+ local closes_line=""
1571
+ [[ -n "${GITHUB_ISSUE:-}" ]] && closes_line="Closes ${GITHUB_ISSUE}"
1572
+
1573
+ local diff_stats
1574
+ diff_stats=$(git diff --stat "${BASE_BRANCH}...${GIT_BRANCH}" 2>/dev/null | tail -1 || echo "")
1575
+
1576
+ local commit_count
1577
+ commit_count=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | wc -l | xargs)
1578
+
1579
+ local total_dur=""
1580
+ if [[ -n "$PIPELINE_START_EPOCH" ]]; then
1581
+ total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
1582
+ fi
1583
+
1584
+ local pr_body
1585
+ pr_body="$(cat <<EOF
1586
+ ## Summary
1587
+ ${plan_summary:-$GOAL}
1588
+
1589
+ ## Changes
1590
+ ${diff_stats}
1591
+ ${commit_count} commit(s) via \`shipwright pipeline\` (${PIPELINE_NAME})
1592
+
1593
+ ## Test Results
1594
+ \`\`\`
1595
+ ${test_summary:-No test output}
1596
+ \`\`\`
1597
+
1598
+ ${review_summary}
1599
+ ${simulation_summary}
1600
+ ${arch_summary}
1601
+
1602
+ ${closes_line}
1603
+
1604
+ ---
1605
+
1606
+ | Metric | Value |
1607
+ |--------|-------|
1608
+ | Pipeline | \`${PIPELINE_NAME}\` |
1609
+ | Duration | ${total_dur:-—} |
1610
+ | Model | ${MODEL:-opus} |
1611
+ | Agents | ${AGENTS:-1} |
1612
+
1613
+ Generated by \`shipwright pipeline\`
1614
+ EOF
1615
+ )"
1616
+
1617
+ # Build gh pr create args
1618
+ local pr_args=(--title "$pr_title" --body "$pr_body" --base "$BASE_BRANCH")
1619
+
1620
+ # Propagate labels from issue + CLI
1621
+ local all_labels="${LABELS}"
1622
+ if [[ -n "$ISSUE_LABELS" ]]; then
1623
+ if [[ -n "$all_labels" ]]; then
1624
+ all_labels="${all_labels},${ISSUE_LABELS}"
1625
+ else
1626
+ all_labels="$ISSUE_LABELS"
1627
+ fi
1628
+ fi
1629
+ if [[ -n "$all_labels" ]]; then
1630
+ pr_args+=(--label "$all_labels")
1631
+ fi
1632
+
1633
+ # Auto-detect or use provided reviewers
1634
+ local reviewers="${REVIEWERS}"
1635
+ if [[ -z "$reviewers" ]]; then
1636
+ reviewers=$(detect_reviewers)
1637
+ fi
1638
+ if [[ -n "$reviewers" ]]; then
1639
+ pr_args+=(--reviewer "$reviewers")
1640
+ info "Reviewers: ${DIM}$reviewers${RESET}"
1641
+ fi
1642
+
1643
+ # Propagate milestone
1644
+ if [[ -n "$ISSUE_MILESTONE" ]]; then
1645
+ pr_args+=(--milestone "$ISSUE_MILESTONE")
1646
+ info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
1647
+ fi
1648
+
1649
+ # Check for existing open PR on this branch to avoid duplicates (issue #12)
1650
+ local pr_url=""
1651
+ local existing_pr
1652
+ existing_pr=$(gh pr list --head "$GIT_BRANCH" --state open --json number,url --jq '.[0]' 2>/dev/null || echo "")
1653
+ if [[ -n "$existing_pr" && "$existing_pr" != "null" ]]; then
1654
+ local existing_pr_number existing_pr_url
1655
+ existing_pr_number=$(echo "$existing_pr" | jq -r '.number' 2>/dev/null || echo "")
1656
+ existing_pr_url=$(echo "$existing_pr" | jq -r '.url' 2>/dev/null || echo "")
1657
+ info "Updating existing PR #$existing_pr_number instead of creating duplicate"
1658
+ gh pr edit "$existing_pr_number" --title "$pr_title" --body "$pr_body" 2>/dev/null || true
1659
+ pr_url="$existing_pr_url"
1660
+ else
1661
+ info "Creating PR..."
1662
+ local pr_stderr pr_exit=0
1663
+ pr_url=$(gh pr create "${pr_args[@]}" 2>/tmp/shipwright-pr-stderr.txt) || pr_exit=$?
1664
+ pr_stderr=$(cat /tmp/shipwright-pr-stderr.txt 2>/dev/null || true)
1665
+ rm -f /tmp/shipwright-pr-stderr.txt
1666
+
1667
+ # gh pr create may return non-zero for reviewer issues but still create the PR
1668
+ if [[ "$pr_exit" -ne 0 ]]; then
1669
+ if [[ "$pr_url" == *"github.com"* ]]; then
1670
+ # PR was created but something non-fatal failed (e.g., reviewer not found)
1671
+ warn "PR created with warnings: ${pr_stderr:-unknown}"
1672
+ else
1673
+ error "PR creation failed: ${pr_stderr:-$pr_url}"
1674
+ return 1
1675
+ fi
1676
+ fi
1677
+ fi
1678
+
1679
+ success "PR created: ${BOLD}$pr_url${RESET}"
1680
+ echo "$pr_url" > "$ARTIFACTS_DIR/pr-url.txt"
1681
+
1682
+ # Extract PR number
1683
+ PR_NUMBER=$(echo "$pr_url" | grep -oE '[0-9]+$' || true)
1684
+
1685
+ # ── Intelligent Reviewer Selection (GraphQL-enhanced) ──
1686
+ if [[ "${NO_GITHUB:-false}" != "true" && -n "$PR_NUMBER" && -z "$reviewers" ]]; then
1687
+ local reviewer_assigned=false
1688
+
1689
+ # Try CODEOWNERS-based routing via GraphQL API
1690
+ if type gh_codeowners &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1691
+ local codeowners_json
1692
+ codeowners_json=$(gh_codeowners "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
1693
+ if [[ "$codeowners_json" != "[]" && -n "$codeowners_json" ]]; then
1694
+ local changed_files
1695
+ changed_files=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1696
+ if [[ -n "$changed_files" ]]; then
1697
+ local co_reviewers
1698
+ co_reviewers=$(echo "$codeowners_json" | jq -r '.[].owners[]' 2>/dev/null | sort -u | head -3 || true)
1699
+ if [[ -n "$co_reviewers" ]]; then
1700
+ local rev
1701
+ while IFS= read -r rev; do
1702
+ rev="${rev#@}"
1703
+ [[ -n "$rev" ]] && gh pr edit "$PR_NUMBER" --add-reviewer "$rev" 2>/dev/null || true
1704
+ done <<< "$co_reviewers"
1705
+ info "Requested review from CODEOWNERS: $(echo "$co_reviewers" | tr '\n' ',' | sed 's/,$//')"
1706
+ reviewer_assigned=true
1707
+ fi
1708
+ fi
1709
+ fi
1710
+ fi
1711
+
1712
+ # Fallback: contributor-based routing via GraphQL API
1713
+ if [[ "$reviewer_assigned" != "true" ]] && type gh_contributors &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1714
+ local contributors_json
1715
+ contributors_json=$(gh_contributors "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
1716
+ local top_contributor
1717
+ top_contributor=$(echo "$contributors_json" | jq -r '.[0].login // ""' 2>/dev/null || echo "")
1718
+ local current_user
1719
+ current_user=$(gh api user --jq '.login' 2>/dev/null || echo "")
1720
+ if [[ -n "$top_contributor" && "$top_contributor" != "$current_user" ]]; then
1721
+ gh pr edit "$PR_NUMBER" --add-reviewer "$top_contributor" 2>/dev/null || true
1722
+ info "Requested review from top contributor: $top_contributor"
1723
+ reviewer_assigned=true
1724
+ fi
1725
+ fi
1726
+
1727
+ # Final fallback: auto-approve if no reviewers assigned
1728
+ if [[ "$reviewer_assigned" != "true" ]]; then
1729
+ gh pr review "$PR_NUMBER" --approve 2>/dev/null || warn "Could not auto-approve PR"
1730
+ fi
1731
+ fi
1732
+
1733
+ # Update issue with PR link
1734
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1735
+ gh_remove_label "$ISSUE_NUMBER" "pipeline/in-progress"
1736
+ gh_add_labels "$ISSUE_NUMBER" "pipeline/pr-created"
1737
+ gh_comment_issue "$ISSUE_NUMBER" "🎉 **PR created:** ${pr_url}
1738
+
1739
+ Pipeline duration so far: ${total_dur:-unknown}"
1740
+
1741
+ # Notify tracker of review/PR creation
1742
+ "$SCRIPT_DIR/sw-tracker.sh" notify "review" "$ISSUE_NUMBER" "$pr_url" 2>/dev/null || true
1743
+ fi
1744
+
1745
+ # Wait for CI if configured
1746
+ local wait_ci
1747
+ wait_ci=$(jq -r --arg id "pr" '(.stages[] | select(.id == $id) | .config.wait_ci) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
1748
+ if [[ "$wait_ci" == "true" ]]; then
1749
+ info "Waiting for CI checks..."
1750
+ gh pr checks --watch 2>/dev/null || warn "CI checks did not all pass"
1751
+ fi
1752
+
1753
+ log_stage "pr" "PR created: $pr_url (${reviewers:+reviewers: $reviewers})"
1754
+ }
1755
+
1756
+ stage_merge() {
1757
+ CURRENT_STAGE_ID="merge"
1758
+
1759
+ if [[ "$NO_GITHUB" == "true" ]]; then
1760
+ info "Merge stage skipped (--no-github)"
1761
+ return 0
1762
+ fi
1763
+
1764
+ # ── Branch Protection Check ──
1765
+ if type gh_branch_protection &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1766
+ local protection_json
1767
+ protection_json=$(gh_branch_protection "$REPO_OWNER" "$REPO_NAME" "${BASE_BRANCH:-main}" 2>/dev/null || echo '{"protected": false}')
1768
+ local is_protected
1769
+ is_protected=$(echo "$protection_json" | jq -r '.protected // false' 2>/dev/null || echo "false")
1770
+ if [[ "$is_protected" == "true" ]]; then
1771
+ local required_reviews
1772
+ required_reviews=$(echo "$protection_json" | jq -r '.required_pull_request_reviews.required_approving_review_count // 0' 2>/dev/null || echo "0")
1773
+ local required_checks
1774
+ required_checks=$(echo "$protection_json" | jq -r '[.required_status_checks.contexts // [] | .[]] | length' 2>/dev/null || echo "0")
1775
+
1776
+ info "Branch protection: ${required_reviews} required review(s), ${required_checks} required check(s)"
1777
+
1778
+ if [[ "$required_reviews" -gt 0 ]]; then
1779
+ # Check if PR has enough approvals
1780
+ local prot_pr_number
1781
+ prot_pr_number=$(gh pr list --head "$GIT_BRANCH" --json number --jq '.[0].number' 2>/dev/null || echo "")
1782
+ if [[ -n "$prot_pr_number" ]]; then
1783
+ local approvals
1784
+ approvals=$(gh pr view "$prot_pr_number" --json reviews --jq '[.reviews[] | select(.state == "APPROVED")] | length' 2>/dev/null || echo "0")
1785
+ if [[ "$approvals" -lt "$required_reviews" ]]; then
1786
+ warn "PR has $approvals approval(s), needs $required_reviews — skipping auto-merge"
1787
+ info "PR is ready for manual merge after required reviews"
1788
+ emit_event "merge.blocked" "issue=${ISSUE_NUMBER:-0}" "reason=insufficient_reviews" "have=$approvals" "need=$required_reviews"
1789
+ return 0
1790
+ fi
1791
+ fi
1792
+ fi
1793
+ fi
1794
+ fi
1795
+
1796
+ local merge_method wait_ci_timeout auto_delete_branch auto_merge auto_approve merge_strategy
1797
+ merge_method=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.merge_method) // "squash"' "$PIPELINE_CONFIG" 2>/dev/null) || true
1798
+ [[ -z "$merge_method" || "$merge_method" == "null" ]] && merge_method="squash"
1799
+ wait_ci_timeout=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.wait_ci_timeout_s) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
1800
+ [[ -z "$wait_ci_timeout" || "$wait_ci_timeout" == "null" ]] && wait_ci_timeout=0
1801
+
1802
+ # Adaptive CI timeout: 90th percentile of historical times × 1.5 safety margin
1803
+ if [[ "$wait_ci_timeout" -eq 0 ]] 2>/dev/null; then
1804
+ local repo_hash_ci
1805
+ repo_hash_ci=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
1806
+ local ci_times_file="${HOME}/.shipwright/baselines/${repo_hash_ci}/ci-times.json"
1807
+ if [[ -f "$ci_times_file" ]]; then
1808
+ local p90_time
1809
+ p90_time=$(jq '
1810
+ .times | sort |
1811
+ (length * 0.9 | floor) as $idx |
1812
+ .[$idx] // 600
1813
+ ' "$ci_times_file" 2>/dev/null || echo "0")
1814
+ if [[ -n "$p90_time" ]] && awk -v t="$p90_time" 'BEGIN{exit !(t > 0)}' 2>/dev/null; then
1815
+ # 1.5x safety margin, clamped to [120, 1800]
1816
+ wait_ci_timeout=$(awk -v p90="$p90_time" 'BEGIN{
1817
+ t = p90 * 1.5;
1818
+ if (t < 120) t = 120;
1819
+ if (t > 1800) t = 1800;
1820
+ printf "%d", t
1821
+ }')
1822
+ fi
1823
+ fi
1824
+ # Default fallback if no history
1825
+ [[ "$wait_ci_timeout" -eq 0 ]] && wait_ci_timeout=600
1826
+ fi
1827
+ auto_delete_branch=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_delete_branch) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
1828
+ [[ -z "$auto_delete_branch" || "$auto_delete_branch" == "null" ]] && auto_delete_branch="true"
1829
+ auto_merge=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_merge) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
1830
+ [[ -z "$auto_merge" || "$auto_merge" == "null" ]] && auto_merge="false"
1831
+ auto_approve=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_approve) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
1832
+ [[ -z "$auto_approve" || "$auto_approve" == "null" ]] && auto_approve="false"
1833
+ merge_strategy=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.merge_strategy) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1834
+ [[ -z "$merge_strategy" || "$merge_strategy" == "null" ]] && merge_strategy=""
1835
+ # merge_strategy overrides merge_method if set (squash/merge/rebase)
1836
+ if [[ -n "$merge_strategy" ]]; then
1837
+ merge_method="$merge_strategy"
1838
+ fi
1839
+
1840
+ # Find PR for current branch
1841
+ local pr_number
1842
+ pr_number=$(gh pr list --head "$GIT_BRANCH" --json number --jq '.[0].number' 2>/dev/null || echo "")
1843
+
1844
+ if [[ -z "$pr_number" ]]; then
1845
+ warn "No PR found for branch $GIT_BRANCH — skipping merge"
1846
+ return 0
1847
+ fi
1848
+
1849
+ info "Found PR #${pr_number} for branch ${GIT_BRANCH}"
1850
+
1851
+ # Wait for CI checks to pass
1852
+ info "Waiting for CI checks (timeout: ${wait_ci_timeout}s)..."
1853
+ local elapsed=0
1854
+ local check_interval=15
1855
+
1856
+ while [[ "$elapsed" -lt "$wait_ci_timeout" ]]; do
1857
+ local check_status
1858
+ check_status=$(gh pr checks "$pr_number" --json 'bucket,name' --jq '[.[] | .bucket] | unique | sort' 2>/dev/null || echo '["pending"]')
1859
+
1860
+ # If all checks passed (only "pass" in buckets)
1861
+ if echo "$check_status" | jq -e '. == ["pass"]' &>/dev/null; then
1862
+ success "All CI checks passed"
1863
+ break
1864
+ fi
1865
+
1866
+ # If any check failed
1867
+ if echo "$check_status" | jq -e 'any(. == "fail")' &>/dev/null; then
1868
+ error "CI checks failed — aborting merge"
1869
+ return 1
1870
+ fi
1871
+
1872
+ sleep "$check_interval"
1873
+ elapsed=$((elapsed + check_interval))
1874
+ done
1875
+
1876
+ # Record CI wait time for adaptive timeout calculation
1877
+ if [[ "$elapsed" -gt 0 ]]; then
1878
+ local repo_hash_ci_rec
1879
+ repo_hash_ci_rec=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
1880
+ local ci_times_dir="${HOME}/.shipwright/baselines/${repo_hash_ci_rec}"
1881
+ local ci_times_rec_file="${ci_times_dir}/ci-times.json"
1882
+ mkdir -p "$ci_times_dir"
1883
+ local ci_history="[]"
1884
+ if [[ -f "$ci_times_rec_file" ]]; then
1885
+ ci_history=$(jq '.times // []' "$ci_times_rec_file" 2>/dev/null || echo "[]")
1886
+ fi
1887
+ local updated_ci
1888
+ updated_ci=$(echo "$ci_history" | jq --arg t "$elapsed" '. + [($t | tonumber)] | .[-20:]' 2>/dev/null || echo "[$elapsed]")
1889
+ local tmp_ci
1890
+ tmp_ci=$(mktemp "${ci_times_dir}/ci-times.json.XXXXXX")
1891
+ jq -n --argjson times "$updated_ci" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
1892
+ '{times: $times, updated: $updated}' > "$tmp_ci" 2>/dev/null
1893
+ mv "$tmp_ci" "$ci_times_rec_file" 2>/dev/null || true
1894
+ fi
1895
+
1896
+ if [[ "$elapsed" -ge "$wait_ci_timeout" ]]; then
1897
+ warn "CI check timeout (${wait_ci_timeout}s) — proceeding with merge anyway"
1898
+ fi
1899
+
1900
+ # Auto-approve if configured (for branch protection requiring reviews)
1901
+ if [[ "$auto_approve" == "true" ]]; then
1902
+ info "Auto-approving PR #${pr_number}..."
1903
+ gh pr review "$pr_number" --approve 2>/dev/null || warn "Auto-approve failed (may need different permissions)"
1904
+ fi
1905
+
1906
+ # Merge the PR
1907
+ if [[ "$auto_merge" == "true" ]]; then
1908
+ info "Enabling auto-merge for PR #${pr_number} (strategy: ${merge_method})..."
1909
+ local auto_merge_args=("pr" "merge" "$pr_number" "--auto" "--${merge_method}")
1910
+ if [[ "$auto_delete_branch" == "true" ]]; then
1911
+ auto_merge_args+=("--delete-branch")
1912
+ fi
1913
+
1914
+ if gh "${auto_merge_args[@]}" 2>/dev/null; then
1915
+ success "Auto-merge enabled for PR #${pr_number} (strategy: ${merge_method})"
1916
+ emit_event "merge.auto_enabled" \
1917
+ "issue=${ISSUE_NUMBER:-0}" \
1918
+ "pr=$pr_number" \
1919
+ "strategy=$merge_method"
1920
+ else
1921
+ warn "Auto-merge not available — falling back to direct merge"
1922
+ # Fall through to direct merge below
1923
+ auto_merge="false"
1924
+ fi
1925
+ fi
1926
+
1927
+ if [[ "$auto_merge" != "true" ]]; then
1928
+ info "Merging PR #${pr_number} (method: ${merge_method})..."
1929
+ local merge_args=("pr" "merge" "$pr_number" "--${merge_method}")
1930
+ if [[ "$auto_delete_branch" == "true" ]]; then
1931
+ merge_args+=("--delete-branch")
1932
+ fi
1933
+
1934
+ if gh "${merge_args[@]}" 2>/dev/null; then
1935
+ success "PR #${pr_number} merged successfully"
1936
+ else
1937
+ error "Failed to merge PR #${pr_number}"
1938
+ return 1
1939
+ fi
1940
+ fi
1941
+
1942
+ log_stage "merge" "PR #${pr_number} merged (strategy: ${merge_method}, auto_merge: ${auto_merge})"
1943
+ }
1944
+
1945
+ stage_deploy() {
1946
+ CURRENT_STAGE_ID="deploy"
1947
+ local staging_cmd
1948
+ staging_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.staging_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1949
+ [[ "$staging_cmd" == "null" ]] && staging_cmd=""
1950
+
1951
+ local prod_cmd
1952
+ prod_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.production_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1953
+ [[ "$prod_cmd" == "null" ]] && prod_cmd=""
1954
+
1955
+ local rollback_cmd
1956
+ rollback_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.rollback_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1957
+ [[ "$rollback_cmd" == "null" ]] && rollback_cmd=""
1958
+
1959
+ if [[ -z "$staging_cmd" && -z "$prod_cmd" ]]; then
1960
+ warn "No deploy commands configured — skipping"
1961
+ return 0
1962
+ fi
1963
+
1964
+ # Create GitHub deployment tracking
1965
+ local gh_deploy_env="production"
1966
+ [[ -n "$staging_cmd" && -z "$prod_cmd" ]] && gh_deploy_env="staging"
1967
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_start &>/dev/null 2>&1; then
1968
+ if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1969
+ gh_deploy_pipeline_start "$REPO_OWNER" "$REPO_NAME" "${GIT_BRANCH:-HEAD}" "$gh_deploy_env" 2>/dev/null || true
1970
+ info "GitHub Deployment: tracking as $gh_deploy_env"
1971
+ fi
1972
+ fi
1973
+
1974
+ # ── Pre-deploy gates ──
1975
+ local pre_deploy_ci
1976
+ pre_deploy_ci=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_ci_status) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
1977
+
1978
+ if [[ "${pre_deploy_ci:-true}" == "true" && "${NO_GITHUB:-false}" != "true" && -n "${REPO_OWNER:-}" && -n "${REPO_NAME:-}" ]]; then
1979
+ info "Pre-deploy gate: checking CI status..."
1980
+ local ci_failures
1981
+ ci_failures=$(gh api "repos/${REPO_OWNER}/${REPO_NAME}/commits/${GIT_BRANCH:-HEAD}/check-runs" \
1982
+ --jq '[.check_runs[] | select(.conclusion != null and .conclusion != "success" and .conclusion != "skipped")] | length' 2>/dev/null || echo "0")
1983
+ if [[ "${ci_failures:-0}" -gt 0 ]]; then
1984
+ error "Pre-deploy gate FAILED: ${ci_failures} CI check(s) not passing"
1985
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: ${ci_failures} CI checks failing" 2>/dev/null || true
1986
+ return 1
1987
+ fi
1988
+ success "Pre-deploy gate: all CI checks passing"
1989
+ fi
1990
+
1991
+ local pre_deploy_min_cov
1992
+ pre_deploy_min_cov=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_min_coverage) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1993
+ if [[ -n "${pre_deploy_min_cov:-}" && "${pre_deploy_min_cov}" != "null" && -f "$ARTIFACTS_DIR/test-coverage.json" ]]; then
1994
+ local actual_cov
1995
+ actual_cov=$(jq -r '.coverage_pct // 0' "$ARTIFACTS_DIR/test-coverage.json" 2>/dev/null || echo "0")
1996
+ if [[ "${actual_cov:-0}" -lt "$pre_deploy_min_cov" ]]; then
1997
+ error "Pre-deploy gate FAILED: coverage ${actual_cov}% < required ${pre_deploy_min_cov}%"
1998
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: coverage ${actual_cov}% below minimum ${pre_deploy_min_cov}%" 2>/dev/null || true
1999
+ return 1
2000
+ fi
2001
+ success "Pre-deploy gate: coverage ${actual_cov}% >= ${pre_deploy_min_cov}%"
2002
+ fi
2003
+
2004
+ # Post deploy start to GitHub
2005
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2006
+ gh_comment_issue "$ISSUE_NUMBER" "Deploy started"
2007
+ fi
2008
+
2009
+ # ── Deploy strategy ──
2010
+ local deploy_strategy
2011
+ deploy_strategy=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.deploy_strategy) // "direct"' "$PIPELINE_CONFIG" 2>/dev/null) || true
2012
+ [[ "$deploy_strategy" == "null" ]] && deploy_strategy="direct"
2013
+
2014
+ local canary_cmd promote_cmd switch_cmd health_url deploy_log
2015
+ canary_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.canary_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2016
+ [[ "$canary_cmd" == "null" ]] && canary_cmd=""
2017
+ promote_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.promote_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2018
+ [[ "$promote_cmd" == "null" ]] && promote_cmd=""
2019
+ switch_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.switch_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2020
+ [[ "$switch_cmd" == "null" ]] && switch_cmd=""
2021
+ health_url=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2022
+ [[ "$health_url" == "null" ]] && health_url=""
2023
+ deploy_log="$ARTIFACTS_DIR/deploy.log"
2024
+
2025
+ case "$deploy_strategy" in
2026
+ canary)
2027
+ info "Canary deployment strategy..."
2028
+ if [[ -z "$canary_cmd" ]]; then
2029
+ warn "No canary_cmd configured — falling back to direct"
2030
+ deploy_strategy="direct"
2031
+ else
2032
+ info "Deploying canary..."
2033
+ bash -c "$canary_cmd" >> "$deploy_log" 2>&1 || { error "Canary deploy failed"; return 1; }
2034
+
2035
+ if [[ -n "$health_url" ]]; then
2036
+ local canary_healthy=0
2037
+ local _chk
2038
+ for _chk in 1 2 3; do
2039
+ sleep 10
2040
+ local _status
2041
+ _status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
2042
+ if [[ "$_status" -ge 200 && "$_status" -lt 400 ]]; then
2043
+ canary_healthy=$((canary_healthy + 1))
2044
+ fi
2045
+ done
2046
+ if [[ "$canary_healthy" -lt 2 ]]; then
2047
+ error "Canary health check failed ($canary_healthy/3 passed) — rolling back"
2048
+ [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" 2>/dev/null || true
2049
+ return 1
2050
+ fi
2051
+ success "Canary healthy ($canary_healthy/3 checks passed)"
2052
+ fi
2053
+
2054
+ info "Promoting canary to full deployment..."
2055
+ if [[ -n "$promote_cmd" ]]; then
2056
+ bash -c "$promote_cmd" >> "$deploy_log" 2>&1 || { error "Promote failed"; return 1; }
2057
+ fi
2058
+ success "Canary promoted"
2059
+ fi
2060
+ ;;
2061
+ blue-green)
2062
+ info "Blue-green deployment strategy..."
2063
+ if [[ -z "$staging_cmd" || -z "$switch_cmd" ]]; then
2064
+ warn "Blue-green requires staging_cmd + switch_cmd — falling back to direct"
2065
+ deploy_strategy="direct"
2066
+ else
2067
+ info "Deploying to inactive environment..."
2068
+ bash -c "$staging_cmd" >> "$deploy_log" 2>&1 || { error "Blue-green staging failed"; return 1; }
2069
+
2070
+ if [[ -n "$health_url" ]]; then
2071
+ local bg_healthy=0
2072
+ local _chk
2073
+ for _chk in 1 2 3; do
2074
+ sleep 5
2075
+ local _status
2076
+ _status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
2077
+ [[ "$_status" -ge 200 && "$_status" -lt 400 ]] && bg_healthy=$((bg_healthy + 1))
2078
+ done
2079
+ if [[ "$bg_healthy" -lt 2 ]]; then
2080
+ error "Blue-green health check failed — not switching"
2081
+ return 1
2082
+ fi
2083
+ fi
2084
+
2085
+ info "Switching traffic..."
2086
+ bash -c "$switch_cmd" >> "$deploy_log" 2>&1 || { error "Traffic switch failed"; return 1; }
2087
+ success "Blue-green switch complete"
2088
+ fi
2089
+ ;;
2090
+ esac
2091
+
2092
+ # ── Direct deployment (default or fallback) ──
2093
+ if [[ "$deploy_strategy" == "direct" ]]; then
2094
+ if [[ -n "$staging_cmd" ]]; then
2095
+ info "Deploying to staging..."
2096
+ bash -c "$staging_cmd" > "$ARTIFACTS_DIR/deploy-staging.log" 2>&1 || {
2097
+ error "Staging deploy failed"
2098
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Staging deploy failed"
2099
+ # Mark GitHub deployment as failed
2100
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2101
+ gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Staging deploy failed" 2>/dev/null || true
2102
+ fi
2103
+ return 1
2104
+ }
2105
+ success "Staging deploy complete"
2106
+ fi
2107
+
2108
+ if [[ -n "$prod_cmd" ]]; then
2109
+ info "Deploying to production..."
2110
+ bash -c "$prod_cmd" > "$ARTIFACTS_DIR/deploy-prod.log" 2>&1 || {
2111
+ error "Production deploy failed"
2112
+ if [[ -n "$rollback_cmd" ]]; then
2113
+ warn "Rolling back..."
2114
+ bash -c "$rollback_cmd" 2>&1 || error "Rollback also failed!"
2115
+ fi
2116
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Production deploy failed — rollback ${rollback_cmd:+attempted}"
2117
+ # Mark GitHub deployment as failed
2118
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2119
+ gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Production deploy failed" 2>/dev/null || true
2120
+ fi
2121
+ return 1
2122
+ }
2123
+ success "Production deploy complete"
2124
+ fi
2125
+ fi
2126
+
2127
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2128
+ gh_comment_issue "$ISSUE_NUMBER" "✅ **Deploy complete**"
2129
+ gh_add_labels "$ISSUE_NUMBER" "deployed"
2130
+ fi
2131
+
2132
+ # Mark GitHub deployment as successful
2133
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2134
+ if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2135
+ gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" true "" 2>/dev/null || true
2136
+ fi
2137
+ fi
2138
+
2139
+ log_stage "deploy" "Deploy complete"
2140
+ }
2141
+
2142
+ stage_validate() {
2143
+ CURRENT_STAGE_ID="validate"
2144
+ local smoke_cmd
2145
+ smoke_cmd=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.smoke_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2146
+ [[ "$smoke_cmd" == "null" ]] && smoke_cmd=""
2147
+
2148
+ local health_url
2149
+ health_url=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2150
+ [[ "$health_url" == "null" ]] && health_url=""
2151
+
2152
+ local close_issue
2153
+ close_issue=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.close_issue) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2154
+
2155
+ # Smoke tests
2156
+ if [[ -n "$smoke_cmd" ]]; then
2157
+ info "Running smoke tests..."
2158
+ bash -c "$smoke_cmd" > "$ARTIFACTS_DIR/smoke.log" 2>&1 || {
2159
+ error "Smoke tests failed"
2160
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2161
+ gh issue create --title "Deploy validation failed: $GOAL" \
2162
+ --label "incident" --body "Pipeline smoke tests failed after deploy.
2163
+
2164
+ Related issue: ${GITHUB_ISSUE}
2165
+ Branch: ${GIT_BRANCH}
2166
+ PR: $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'unknown')" 2>/dev/null || true
2167
+ fi
2168
+ return 1
2169
+ }
2170
+ success "Smoke tests passed"
2171
+ fi
2172
+
2173
+ # Health check with retry
2174
+ if [[ -n "$health_url" ]]; then
2175
+ info "Health check: $health_url"
2176
+ local attempts=0
2177
+ while [[ $attempts -lt 5 ]]; do
2178
+ if curl -sf "$health_url" >/dev/null 2>&1; then
2179
+ success "Health check passed"
2180
+ break
2181
+ fi
2182
+ attempts=$((attempts + 1))
2183
+ [[ $attempts -lt 5 ]] && { info "Retry ${attempts}/5..."; sleep 10; }
2184
+ done
2185
+ if [[ $attempts -ge 5 ]]; then
2186
+ error "Health check failed after 5 attempts"
2187
+ return 1
2188
+ fi
2189
+ fi
2190
+
2191
+ # Compute total duration once for both issue close and wiki report
2192
+ local total_dur=""
2193
+ if [[ -n "$PIPELINE_START_EPOCH" ]]; then
2194
+ total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
2195
+ fi
2196
+
2197
+ # Close original issue with comprehensive summary
2198
+ if [[ "$close_issue" == "true" && -n "$ISSUE_NUMBER" ]]; then
2199
+ gh issue close "$ISSUE_NUMBER" --comment "## ✅ Complete — Deployed & Validated
2200
+
2201
+ | Metric | Value |
2202
+ |--------|-------|
2203
+ | Pipeline | \`${PIPELINE_NAME}\` |
2204
+ | Branch | \`${GIT_BRANCH}\` |
2205
+ | PR | $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'N/A') |
2206
+ | Duration | ${total_dur:-unknown} |
2207
+
2208
+ _Closed automatically by \`shipwright pipeline\`_" 2>/dev/null || true
2209
+
2210
+ gh_remove_label "$ISSUE_NUMBER" "pipeline/pr-created"
2211
+ gh_add_labels "$ISSUE_NUMBER" "pipeline/complete"
2212
+ success "Issue #$ISSUE_NUMBER closed"
2213
+ fi
2214
+
2215
+ # Push pipeline report to wiki
2216
+ local report="# Pipeline Report — ${GOAL}
2217
+
2218
+ | Metric | Value |
2219
+ |--------|-------|
2220
+ | Pipeline | \`${PIPELINE_NAME}\` |
2221
+ | Branch | \`${GIT_BRANCH}\` |
2222
+ | PR | $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'N/A') |
2223
+ | Duration | ${total_dur:-unknown} |
2224
+ | Stages | $(echo "$STAGE_TIMINGS" | tr '|' '\n' | wc -l | xargs) completed |
2225
+
2226
+ ## Stage Timings
2227
+ $(echo "$STAGE_TIMINGS" | tr '|' '\n' | sed 's/^/- /')
2228
+
2229
+ ## Artifacts
2230
+ $(ls -1 "$ARTIFACTS_DIR" 2>/dev/null | sed 's/^/- /')
2231
+
2232
+ ---
2233
+ _Generated by \`shipwright pipeline\` at $(now_iso)_"
2234
+ gh_wiki_page "Pipeline-Report-${ISSUE_NUMBER:-inline}" "$report"
2235
+
2236
+ log_stage "validate" "Validation complete"
2237
+ }
2238
+
2239
+ stage_monitor() {
2240
+ CURRENT_STAGE_ID="monitor"
2241
+
2242
+ # Read config from pipeline template
2243
+ local duration_minutes health_url error_threshold log_pattern log_cmd rollback_cmd auto_rollback
2244
+ duration_minutes=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.duration_minutes) // 5' "$PIPELINE_CONFIG" 2>/dev/null) || true
2245
+ [[ -z "$duration_minutes" || "$duration_minutes" == "null" ]] && duration_minutes=5
2246
+ health_url=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2247
+ [[ "$health_url" == "null" ]] && health_url=""
2248
+ error_threshold=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.error_threshold) // 5' "$PIPELINE_CONFIG" 2>/dev/null) || true
2249
+ [[ -z "$error_threshold" || "$error_threshold" == "null" ]] && error_threshold=5
2250
+
2251
+ # Adaptive monitor: use historical baselines if available
2252
+ local repo_hash
2253
+ repo_hash=$(echo "${PROJECT_ROOT:-$(pwd)}" | cksum | awk '{print $1}')
2254
+ local baseline_file="${HOME}/.shipwright/baselines/${repo_hash}/deploy-monitor.json"
2255
+ if [[ -f "$baseline_file" ]]; then
2256
+ local hist_duration hist_threshold
2257
+ hist_duration=$(jq -r '.p90_stabilization_minutes // empty' "$baseline_file" 2>/dev/null || true)
2258
+ hist_threshold=$(jq -r '.p90_error_threshold // empty' "$baseline_file" 2>/dev/null || true)
2259
+ if [[ -n "$hist_duration" && "$hist_duration" != "null" ]]; then
2260
+ duration_minutes="$hist_duration"
2261
+ info "Monitor duration: ${duration_minutes}m ${DIM}(from baseline)${RESET}"
2262
+ fi
2263
+ if [[ -n "$hist_threshold" && "$hist_threshold" != "null" ]]; then
2264
+ error_threshold="$hist_threshold"
2265
+ info "Error threshold: ${error_threshold} ${DIM}(from baseline)${RESET}"
2266
+ fi
2267
+ fi
2268
+ log_pattern=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.log_pattern) // "ERROR|FATAL|PANIC"' "$PIPELINE_CONFIG" 2>/dev/null) || true
2269
+ [[ -z "$log_pattern" || "$log_pattern" == "null" ]] && log_pattern="ERROR|FATAL|PANIC"
2270
+ log_cmd=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.log_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2271
+ [[ "$log_cmd" == "null" ]] && log_cmd=""
2272
+ rollback_cmd=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.rollback_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2273
+ [[ "$rollback_cmd" == "null" ]] && rollback_cmd=""
2274
+ auto_rollback=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.auto_rollback) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2275
+ [[ -z "$auto_rollback" || "$auto_rollback" == "null" ]] && auto_rollback="false"
2276
+
2277
+ if [[ -z "$health_url" && -z "$log_cmd" ]]; then
2278
+ warn "No health_url or log_cmd configured — skipping monitor stage"
2279
+ log_stage "monitor" "Skipped (no monitoring configured)"
2280
+ return 0
2281
+ fi
2282
+
2283
+ local report_file="$ARTIFACTS_DIR/monitor-report.md"
2284
+ local deploy_log_file="$ARTIFACTS_DIR/deploy-logs.txt"
2285
+ : > "$deploy_log_file"
2286
+ local total_errors=0
2287
+ local poll_interval=30 # seconds between polls
2288
+ local total_polls=$(( (duration_minutes * 60) / poll_interval ))
2289
+ [[ "$total_polls" -lt 1 ]] && total_polls=1
2290
+
2291
+ info "Post-deploy monitoring: ${duration_minutes}m (${total_polls} polls, threshold: ${error_threshold} errors)"
2292
+
2293
+ emit_event "monitor.started" \
2294
+ "issue=${ISSUE_NUMBER:-0}" \
2295
+ "duration_minutes=$duration_minutes" \
2296
+ "error_threshold=$error_threshold"
2297
+
2298
+ {
2299
+ echo "# Post-Deploy Monitor Report"
2300
+ echo ""
2301
+ echo "- Duration: ${duration_minutes} minutes"
2302
+ echo "- Health URL: ${health_url:-none}"
2303
+ echo "- Log command: ${log_cmd:-none}"
2304
+ echo "- Error threshold: ${error_threshold}"
2305
+ echo "- Auto-rollback: ${auto_rollback}"
2306
+ echo ""
2307
+ echo "## Poll Results"
2308
+ echo ""
2309
+ } > "$report_file"
2310
+
2311
+ local poll=0
2312
+ local health_failures=0
2313
+ local log_errors=0
2314
+ while [[ "$poll" -lt "$total_polls" ]]; do
2315
+ poll=$((poll + 1))
2316
+ local poll_time
2317
+ poll_time=$(now_iso)
2318
+
2319
+ # Health URL check
2320
+ if [[ -n "$health_url" ]]; then
2321
+ local http_status
2322
+ http_status=$(curl -sf -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "000")
2323
+ if [[ "$http_status" -ge 200 && "$http_status" -lt 400 ]]; then
2324
+ echo "- [${poll_time}] Health: ✅ (HTTP ${http_status})" >> "$report_file"
2325
+ else
2326
+ health_failures=$((health_failures + 1))
2327
+ total_errors=$((total_errors + 1))
2328
+ echo "- [${poll_time}] Health: ❌ (HTTP ${http_status})" >> "$report_file"
2329
+ warn "Health check failed: HTTP ${http_status}"
2330
+ fi
2331
+ fi
2332
+
2333
+ # Log command check (accumulate deploy logs for feedback collect)
2334
+ if [[ -n "$log_cmd" ]]; then
2335
+ local log_output
2336
+ log_output=$(bash -c "$log_cmd" 2>/dev/null || true)
2337
+ [[ -n "$log_output" ]] && echo "$log_output" >> "$deploy_log_file"
2338
+ local error_count=0
2339
+ if [[ -n "$log_output" ]]; then
2340
+ error_count=$(echo "$log_output" | grep -cE "$log_pattern" 2>/dev/null || true)
2341
+ error_count="${error_count:-0}"
2342
+ fi
2343
+ if [[ "$error_count" -gt 0 ]]; then
2344
+ log_errors=$((log_errors + error_count))
2345
+ total_errors=$((total_errors + error_count))
2346
+ echo "- [${poll_time}] Logs: ⚠️ ${error_count} error(s) matching '${log_pattern}'" >> "$report_file"
2347
+ warn "Log errors detected: ${error_count}"
2348
+ else
2349
+ echo "- [${poll_time}] Logs: ✅ clean" >> "$report_file"
2350
+ fi
2351
+ fi
2352
+
2353
+ emit_event "monitor.check" \
2354
+ "issue=${ISSUE_NUMBER:-0}" \
2355
+ "poll=$poll" \
2356
+ "total_errors=$total_errors" \
2357
+ "health_failures=$health_failures"
2358
+
2359
+ # Check threshold
2360
+ if [[ "$total_errors" -ge "$error_threshold" ]]; then
2361
+ error "Error threshold exceeded: ${total_errors} >= ${error_threshold}"
2362
+
2363
+ echo "" >> "$report_file"
2364
+ echo "## ❌ THRESHOLD EXCEEDED" >> "$report_file"
2365
+ echo "Total errors: ${total_errors} (threshold: ${error_threshold})" >> "$report_file"
2366
+
2367
+ emit_event "monitor.alert" \
2368
+ "issue=${ISSUE_NUMBER:-0}" \
2369
+ "total_errors=$total_errors" \
2370
+ "threshold=$error_threshold"
2371
+
2372
+ # Feedback loop: collect deploy logs and optionally create issue
2373
+ if [[ -f "$deploy_log_file" ]] && [[ -s "$deploy_log_file" ]] && [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
2374
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" collect "$deploy_log_file" 2>/dev/null) || true
2375
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" create-issue 2>/dev/null) || true
2376
+ fi
2377
+
2378
+ # Auto-rollback: feedback rollback (GitHub Deployments API) and/or config rollback_cmd
2379
+ if [[ "$auto_rollback" == "true" ]]; then
2380
+ warn "Auto-rolling back..."
2381
+ echo "" >> "$report_file"
2382
+ echo "## Rollback" >> "$report_file"
2383
+
2384
+ # Trigger feedback rollback (calls sw-github-deploy.sh rollback)
2385
+ if [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
2386
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" rollback production "Monitor threshold exceeded (${total_errors} errors)" >> "$report_file" 2>&1) || true
2387
+ fi
2388
+
2389
+ if [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" >> "$report_file" 2>&1; then
2390
+ success "Rollback executed"
2391
+ echo "Rollback: ✅ success" >> "$report_file"
2392
+
2393
+ # Post-rollback smoke test verification
2394
+ local smoke_cmd
2395
+ smoke_cmd=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.smoke_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2396
+ [[ "$smoke_cmd" == "null" ]] && smoke_cmd=""
2397
+
2398
+ if [[ -n "$smoke_cmd" ]]; then
2399
+ info "Verifying rollback with smoke tests..."
2400
+ if bash -c "$smoke_cmd" > "$ARTIFACTS_DIR/rollback-smoke.log" 2>&1; then
2401
+ success "Rollback verified — smoke tests pass"
2402
+ echo "Rollback verification: ✅ smoke tests pass" >> "$report_file"
2403
+ emit_event "monitor.rollback_verified" \
2404
+ "issue=${ISSUE_NUMBER:-0}" \
2405
+ "status=pass"
2406
+ else
2407
+ error "Rollback verification FAILED — smoke tests still failing"
2408
+ echo "Rollback verification: ❌ smoke tests FAILED — manual intervention required" >> "$report_file"
2409
+ emit_event "monitor.rollback_verified" \
2410
+ "issue=${ISSUE_NUMBER:-0}" \
2411
+ "status=fail"
2412
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2413
+ gh_comment_issue "$ISSUE_NUMBER" "🚨 **Rollback executed but verification failed** — smoke tests still failing after rollback. Manual intervention required.
2414
+
2415
+ Smoke command: \`${smoke_cmd}\`
2416
+ Log: see \`pipeline-artifacts/rollback-smoke.log\`" 2>/dev/null || true
2417
+ fi
2418
+ fi
2419
+ fi
2420
+ else
2421
+ error "Rollback failed!"
2422
+ echo "Rollback: ❌ failed" >> "$report_file"
2423
+ fi
2424
+
2425
+ emit_event "monitor.rollback" \
2426
+ "issue=${ISSUE_NUMBER:-0}" \
2427
+ "total_errors=$total_errors"
2428
+
2429
+ # Post to GitHub
2430
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2431
+ gh_comment_issue "$ISSUE_NUMBER" "🚨 **Auto-rollback triggered** — ${total_errors} errors exceeded threshold (${error_threshold})
2432
+
2433
+ Rollback command: \`${rollback_cmd}\`" 2>/dev/null || true
2434
+
2435
+ # Create hotfix issue
2436
+ if [[ "$GH_AVAILABLE" == "true" ]]; then
2437
+ gh issue create \
2438
+ --title "Hotfix: Deploy regression for ${GOAL}" \
2439
+ --label "hotfix,incident" \
2440
+ --body "Auto-rollback triggered during post-deploy monitoring.
2441
+
2442
+ **Original issue:** ${GITHUB_ISSUE:-N/A}
2443
+ **Errors detected:** ${total_errors}
2444
+ **Threshold:** ${error_threshold}
2445
+ **Branch:** ${GIT_BRANCH}
2446
+
2447
+ ## Monitor Report
2448
+ $(cat "$report_file")
2449
+
2450
+ ---
2451
+ _Created automatically by \`shipwright pipeline\` monitor stage_" 2>/dev/null || true
2452
+ fi
2453
+ fi
2454
+ fi
2455
+
2456
+ log_stage "monitor" "Failed — ${total_errors} errors (threshold: ${error_threshold})"
2457
+ return 1
2458
+ fi
2459
+
2460
+ # Sleep between polls (skip on last poll)
2461
+ if [[ "$poll" -lt "$total_polls" ]]; then
2462
+ sleep "$poll_interval"
2463
+ fi
2464
+ done
2465
+
2466
+ # Monitoring complete — all clear
2467
+ echo "" >> "$report_file"
2468
+ echo "## ✅ Monitoring Complete" >> "$report_file"
2469
+ echo "Total errors: ${total_errors} (threshold: ${error_threshold})" >> "$report_file"
2470
+ echo "Health failures: ${health_failures}" >> "$report_file"
2471
+ echo "Log errors: ${log_errors}" >> "$report_file"
2472
+
2473
+ success "Post-deploy monitoring clean (${total_errors} errors in ${duration_minutes}m)"
2474
+
2475
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2476
+ gh_comment_issue "$ISSUE_NUMBER" "✅ **Post-deploy monitoring passed** — ${duration_minutes}m, ${total_errors} errors" 2>/dev/null || true
2477
+ fi
2478
+
2479
+ log_stage "monitor" "Clean — ${total_errors} errors in ${duration_minutes}m"
2480
+
2481
+ # Record baseline for adaptive monitoring on future runs
2482
+ local baseline_dir="${HOME}/.shipwright/baselines/${repo_hash}"
2483
+ mkdir -p "$baseline_dir" 2>/dev/null || true
2484
+ local baseline_tmp
2485
+ baseline_tmp="$(mktemp)"
2486
+ if [[ -f "${baseline_dir}/deploy-monitor.json" ]]; then
2487
+ # Append to history and recalculate p90
2488
+ jq --arg dur "$duration_minutes" --arg errs "$total_errors" \
2489
+ '.history += [{"duration_minutes": ($dur | tonumber), "errors": ($errs | tonumber)}] |
2490
+ .p90_stabilization_minutes = ([.history[].duration_minutes] | sort | .[length * 9 / 10 | floor]) |
2491
+ .p90_error_threshold = (([.history[].errors] | sort | .[length * 9 / 10 | floor]) + 2) |
2492
+ .updated_at = now' \
2493
+ "${baseline_dir}/deploy-monitor.json" > "$baseline_tmp" 2>/dev/null && \
2494
+ mv "$baseline_tmp" "${baseline_dir}/deploy-monitor.json" || rm -f "$baseline_tmp"
2495
+ else
2496
+ jq -n --arg dur "$duration_minutes" --arg errs "$total_errors" \
2497
+ '{history: [{"duration_minutes": ($dur | tonumber), "errors": ($errs | tonumber)}],
2498
+ p90_stabilization_minutes: ($dur | tonumber),
2499
+ p90_error_threshold: (($errs | tonumber) + 2),
2500
+ updated_at: now}' \
2501
+ > "$baseline_tmp" 2>/dev/null && \
2502
+ mv "$baseline_tmp" "${baseline_dir}/deploy-monitor.json" || rm -f "$baseline_tmp"
2503
+ fi
2504
+ }
2505
+
2506
+ # ─── Multi-Dimensional Quality Checks ─────────────────────────────────────
2507
+ # Beyond tests: security, bundle size, perf regression, API compat, coverage
2508
+