shipwright-cli 2.2.0 → 2.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. package/README.md +4 -4
  2. package/config/policy.schema.json +104 -29
  3. package/docs/AGI-PLATFORM-PLAN.md +8 -4
  4. package/docs/AGI-WHATS-NEXT.md +23 -20
  5. package/package.json +1 -1
  6. package/scripts/lib/helpers.sh +30 -1
  7. package/scripts/lib/pipeline-detection.sh +278 -0
  8. package/scripts/lib/pipeline-github.sh +196 -0
  9. package/scripts/lib/pipeline-intelligence.sh +1712 -0
  10. package/scripts/lib/pipeline-quality-checks.sh +1052 -0
  11. package/scripts/lib/pipeline-quality.sh +11 -0
  12. package/scripts/lib/pipeline-stages.sh +2488 -0
  13. package/scripts/lib/pipeline-state.sh +529 -0
  14. package/scripts/sw +1 -1
  15. package/scripts/sw-activity.sh +1 -1
  16. package/scripts/sw-adaptive.sh +1 -1
  17. package/scripts/sw-adversarial.sh +1 -1
  18. package/scripts/sw-architecture-enforcer.sh +1 -1
  19. package/scripts/sw-auth.sh +1 -1
  20. package/scripts/sw-autonomous.sh +1 -1
  21. package/scripts/sw-changelog.sh +1 -1
  22. package/scripts/sw-checkpoint.sh +1 -1
  23. package/scripts/sw-ci.sh +1 -1
  24. package/scripts/sw-cleanup.sh +1 -1
  25. package/scripts/sw-code-review.sh +1 -1
  26. package/scripts/sw-connect.sh +1 -1
  27. package/scripts/sw-context.sh +1 -1
  28. package/scripts/sw-cost.sh +1 -1
  29. package/scripts/sw-daemon.sh +1 -1
  30. package/scripts/sw-dashboard.sh +1 -1
  31. package/scripts/sw-db.sh +1 -1
  32. package/scripts/sw-decompose.sh +1 -1
  33. package/scripts/sw-deps.sh +1 -1
  34. package/scripts/sw-developer-simulation.sh +1 -1
  35. package/scripts/sw-discovery.sh +1 -1
  36. package/scripts/sw-doc-fleet.sh +1 -1
  37. package/scripts/sw-docs-agent.sh +1 -1
  38. package/scripts/sw-docs.sh +1 -1
  39. package/scripts/sw-doctor.sh +1 -1
  40. package/scripts/sw-dora.sh +1 -1
  41. package/scripts/sw-durable.sh +1 -1
  42. package/scripts/sw-e2e-orchestrator.sh +1 -1
  43. package/scripts/sw-eventbus.sh +1 -1
  44. package/scripts/sw-feedback.sh +1 -1
  45. package/scripts/sw-fix.sh +1 -1
  46. package/scripts/sw-fleet-discover.sh +1 -1
  47. package/scripts/sw-fleet-viz.sh +1 -1
  48. package/scripts/sw-fleet.sh +1 -1
  49. package/scripts/sw-github-app.sh +1 -1
  50. package/scripts/sw-github-checks.sh +1 -1
  51. package/scripts/sw-github-deploy.sh +1 -1
  52. package/scripts/sw-github-graphql.sh +1 -1
  53. package/scripts/sw-guild.sh +1 -1
  54. package/scripts/sw-heartbeat.sh +1 -1
  55. package/scripts/sw-hygiene.sh +1 -1
  56. package/scripts/sw-incident.sh +1 -1
  57. package/scripts/sw-init.sh +1 -1
  58. package/scripts/sw-instrument.sh +1 -1
  59. package/scripts/sw-intelligence.sh +1 -1
  60. package/scripts/sw-jira.sh +1 -1
  61. package/scripts/sw-launchd.sh +1 -1
  62. package/scripts/sw-linear.sh +1 -1
  63. package/scripts/sw-logs.sh +1 -1
  64. package/scripts/sw-loop.sh +1 -1
  65. package/scripts/sw-memory.sh +1 -1
  66. package/scripts/sw-mission-control.sh +1 -1
  67. package/scripts/sw-model-router.sh +1 -1
  68. package/scripts/sw-otel.sh +1 -1
  69. package/scripts/sw-oversight.sh +1 -1
  70. package/scripts/sw-pipeline-composer.sh +1 -1
  71. package/scripts/sw-pipeline-vitals.sh +1 -1
  72. package/scripts/sw-pipeline.sh +8 -1
  73. package/scripts/sw-pm.sh +1 -1
  74. package/scripts/sw-pr-lifecycle.sh +1 -1
  75. package/scripts/sw-predictive.sh +1 -1
  76. package/scripts/sw-prep.sh +1 -1
  77. package/scripts/sw-ps.sh +1 -1
  78. package/scripts/sw-public-dashboard.sh +1 -1
  79. package/scripts/sw-quality.sh +1 -1
  80. package/scripts/sw-reaper.sh +1 -1
  81. package/scripts/sw-regression.sh +1 -1
  82. package/scripts/sw-release-manager.sh +1 -1
  83. package/scripts/sw-release.sh +1 -1
  84. package/scripts/sw-remote.sh +1 -1
  85. package/scripts/sw-replay.sh +1 -1
  86. package/scripts/sw-retro.sh +1 -1
  87. package/scripts/sw-scale.sh +1 -1
  88. package/scripts/sw-security-audit.sh +1 -1
  89. package/scripts/sw-self-optimize.sh +1 -1
  90. package/scripts/sw-session.sh +1 -1
  91. package/scripts/sw-setup.sh +1 -1
  92. package/scripts/sw-standup.sh +1 -1
  93. package/scripts/sw-status.sh +1 -1
  94. package/scripts/sw-strategic.sh +1 -1
  95. package/scripts/sw-stream.sh +1 -1
  96. package/scripts/sw-swarm.sh +1 -1
  97. package/scripts/sw-team-stages.sh +1 -1
  98. package/scripts/sw-templates.sh +1 -1
  99. package/scripts/sw-testgen.sh +1 -1
  100. package/scripts/sw-tmux-pipeline.sh +1 -1
  101. package/scripts/sw-tmux.sh +1 -1
  102. package/scripts/sw-trace.sh +1 -1
  103. package/scripts/sw-tracker.sh +1 -1
  104. package/scripts/sw-triage.sh +1 -1
  105. package/scripts/sw-upgrade.sh +1 -1
  106. package/scripts/sw-ux.sh +1 -1
  107. package/scripts/sw-webhook.sh +1 -1
  108. package/scripts/sw-widgets.sh +1 -1
  109. package/scripts/sw-worktree.sh +1 -1
@@ -0,0 +1,2488 @@
1
+ # pipeline-stages.sh — Stage implementations (intake, plan, build, test, review, pr, merge, deploy, validate, monitor) for sw-pipeline.sh
2
+ # Source from sw-pipeline.sh. Requires all pipeline globals and state/github/detection/quality modules.
3
+ [[ -n "${_PIPELINE_STAGES_LOADED:-}" ]] && return 0
4
+ _PIPELINE_STAGES_LOADED=1
5
+
6
+ stage_intake() {
7
+ CURRENT_STAGE_ID="intake"
8
+ local project_lang
9
+ project_lang=$(detect_project_lang)
10
+ info "Project: ${BOLD}$project_lang${RESET}"
11
+
12
+ # 1. Fetch issue metadata if --issue provided
13
+ if [[ -n "$ISSUE_NUMBER" ]]; then
14
+ local meta
15
+ meta=$(gh_get_issue_meta "$ISSUE_NUMBER")
16
+
17
+ if [[ -n "$meta" ]]; then
18
+ GOAL=$(echo "$meta" | jq -r '.title // ""')
19
+ ISSUE_BODY=$(echo "$meta" | jq -r '.body // ""')
20
+ ISSUE_LABELS=$(echo "$meta" | jq -r '[.labels[].name] | join(",")' 2>/dev/null || true)
21
+ ISSUE_MILESTONE=$(echo "$meta" | jq -r '.milestone.title // ""' 2>/dev/null || true)
22
+ ISSUE_ASSIGNEES=$(echo "$meta" | jq -r '[.assignees[].login] | join(",")' 2>/dev/null || true)
23
+ [[ "$ISSUE_MILESTONE" == "null" ]] && ISSUE_MILESTONE=""
24
+ [[ "$ISSUE_LABELS" == "null" ]] && ISSUE_LABELS=""
25
+ else
26
+ # Fallback: just get title
27
+ GOAL=$(gh issue view "$ISSUE_NUMBER" --json title -q .title 2>/dev/null) || {
28
+ error "Failed to fetch issue #$ISSUE_NUMBER"
29
+ return 1
30
+ }
31
+ fi
32
+
33
+ GITHUB_ISSUE="#$ISSUE_NUMBER"
34
+ info "Issue #$ISSUE_NUMBER: ${BOLD}$GOAL${RESET}"
35
+
36
+ if [[ -n "$ISSUE_LABELS" ]]; then
37
+ info "Labels: ${DIM}$ISSUE_LABELS${RESET}"
38
+ fi
39
+ if [[ -n "$ISSUE_MILESTONE" ]]; then
40
+ info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
41
+ fi
42
+
43
+ # Self-assign
44
+ gh_assign_self "$ISSUE_NUMBER"
45
+
46
+ # Add in-progress label
47
+ gh_add_labels "$ISSUE_NUMBER" "pipeline/in-progress"
48
+ fi
49
+
50
+ # 2. Detect task type
51
+ TASK_TYPE=$(detect_task_type "$GOAL")
52
+ local suggested_template
53
+ suggested_template=$(template_for_type "$TASK_TYPE")
54
+ info "Detected: ${BOLD}$TASK_TYPE${RESET} → team template: ${CYAN}$suggested_template${RESET}"
55
+
56
+ # 3. Auto-detect test command if not provided
57
+ if [[ -z "$TEST_CMD" ]]; then
58
+ TEST_CMD=$(detect_test_cmd)
59
+ if [[ -n "$TEST_CMD" ]]; then
60
+ info "Auto-detected test: ${DIM}$TEST_CMD${RESET}"
61
+ fi
62
+ fi
63
+
64
+ # 4. Create branch with smart prefix
65
+ local prefix
66
+ prefix=$(branch_prefix_for_type "$TASK_TYPE")
67
+ local slug
68
+ slug=$(echo "$GOAL" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/--*/-/g' | cut -c1-40)
69
+ slug="${slug%-}"
70
+ [[ -n "$ISSUE_NUMBER" ]] && slug="${slug}-${ISSUE_NUMBER}"
71
+ GIT_BRANCH="${prefix}/${slug}"
72
+
73
+ git checkout -b "$GIT_BRANCH" 2>/dev/null || {
74
+ info "Branch $GIT_BRANCH exists, checking out"
75
+ git checkout "$GIT_BRANCH" 2>/dev/null || true
76
+ }
77
+ success "Branch: ${BOLD}$GIT_BRANCH${RESET}"
78
+
79
+ # 5. Post initial progress comment on GitHub issue
80
+ if [[ -n "$ISSUE_NUMBER" ]]; then
81
+ local body
82
+ body=$(gh_build_progress_body)
83
+ gh_post_progress "$ISSUE_NUMBER" "$body"
84
+ fi
85
+
86
+ # 6. Save artifacts
87
+ save_artifact "intake.json" "$(jq -n \
88
+ --arg goal "$GOAL" --arg type "$TASK_TYPE" \
89
+ --arg template "$suggested_template" --arg branch "$GIT_BRANCH" \
90
+ --arg issue "${GITHUB_ISSUE:-}" --arg lang "$project_lang" \
91
+ --arg test_cmd "${TEST_CMD:-}" --arg labels "${ISSUE_LABELS:-}" \
92
+ --arg milestone "${ISSUE_MILESTONE:-}" --arg body "${ISSUE_BODY:-}" \
93
+ '{goal:$goal, type:$type, template:$template, branch:$branch,
94
+ issue:$issue, language:$lang, test_cmd:$test_cmd,
95
+ labels:$labels, milestone:$milestone, body:$body}')"
96
+
97
+ log_stage "intake" "Goal: $GOAL
98
+ Type: $TASK_TYPE → template: $suggested_template
99
+ Branch: $GIT_BRANCH
100
+ Language: $project_lang
101
+ Test cmd: ${TEST_CMD:-none detected}"
102
+ }
103
+
104
+ stage_plan() {
105
+ CURRENT_STAGE_ID="plan"
106
+ local plan_file="$ARTIFACTS_DIR/plan.md"
107
+
108
+ if ! command -v claude &>/dev/null; then
109
+ error "Claude CLI not found — cannot generate plan"
110
+ return 1
111
+ fi
112
+
113
+ info "Generating implementation plan..."
114
+
115
+ # ── Gather context bundle (if context engine available) ──
116
+ local context_script="${SCRIPT_DIR}/sw-context.sh"
117
+ if [[ -x "$context_script" ]]; then
118
+ "$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
119
+ fi
120
+
121
+ # Build rich prompt with all available context
122
+ local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
123
+
124
+ ## Goal
125
+ ${GOAL}
126
+ "
127
+
128
+ # Add issue context
129
+ if [[ -n "$ISSUE_BODY" ]]; then
130
+ plan_prompt="${plan_prompt}
131
+ ## Issue Description
132
+ ${ISSUE_BODY}
133
+ "
134
+ fi
135
+
136
+ # Inject context bundle from context engine (if available)
137
+ local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
138
+ if [[ -f "$_context_bundle" ]]; then
139
+ local _cb_content
140
+ _cb_content=$(cat "$_context_bundle" 2>/dev/null | head -100 || true)
141
+ if [[ -n "$_cb_content" ]]; then
142
+ plan_prompt="${plan_prompt}
143
+ ## Pipeline Context
144
+ ${_cb_content}
145
+ "
146
+ fi
147
+ fi
148
+
149
+ # Inject intelligence memory context for similar past plans
150
+ if type intelligence_search_memory &>/dev/null 2>&1; then
151
+ local plan_memory
152
+ plan_memory=$(intelligence_search_memory "plan stage for ${TASK_TYPE:-feature}: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
153
+ if [[ -n "$plan_memory" && "$plan_memory" != *'"results":[]'* && "$plan_memory" != *'"error"'* ]]; then
154
+ local memory_summary
155
+ memory_summary=$(echo "$plan_memory" | jq -r '.results[]? | "- \(.)"' 2>/dev/null | head -10 || true)
156
+ if [[ -n "$memory_summary" ]]; then
157
+ plan_prompt="${plan_prompt}
158
+ ## Historical Context (from previous pipelines)
159
+ Previous similar issues were planned as:
160
+ ${memory_summary}
161
+ "
162
+ fi
163
+ fi
164
+ fi
165
+
166
+ # Self-aware pipeline: inject hint when plan stage has been failing recently
167
+ local plan_hint
168
+ plan_hint=$(get_stage_self_awareness_hint "plan" 2>/dev/null || true)
169
+ if [[ -n "$plan_hint" ]]; then
170
+ plan_prompt="${plan_prompt}
171
+ ## Self-Assessment (recent plan stage performance)
172
+ ${plan_hint}
173
+ "
174
+ fi
175
+
176
+ # Inject cross-pipeline discoveries (from other concurrent/similar pipelines)
177
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
178
+ local plan_discoveries
179
+ plan_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.json" 2>/dev/null | head -20 || true)
180
+ if [[ -n "$plan_discoveries" ]]; then
181
+ plan_prompt="${plan_prompt}
182
+ ## Discoveries from Other Pipelines
183
+ ${plan_discoveries}
184
+ "
185
+ fi
186
+ fi
187
+
188
+ # Inject architecture patterns from intelligence layer
189
+ local repo_hash_plan
190
+ repo_hash_plan=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
191
+ local arch_file_plan="${HOME}/.shipwright/memory/${repo_hash_plan}/architecture.json"
192
+ if [[ -f "$arch_file_plan" ]]; then
193
+ local arch_patterns
194
+ arch_patterns=$(jq -r '
195
+ "Language: \(.language // "unknown")",
196
+ "Framework: \(.framework // "unknown")",
197
+ "Patterns: \((.patterns // []) | join(", "))",
198
+ "Rules: \((.rules // []) | join("; "))"
199
+ ' "$arch_file_plan" 2>/dev/null || true)
200
+ if [[ -n "$arch_patterns" ]]; then
201
+ plan_prompt="${plan_prompt}
202
+ ## Architecture Patterns
203
+ ${arch_patterns}
204
+ "
205
+ fi
206
+ fi
207
+
208
+ # Task-type-specific guidance
209
+ case "${TASK_TYPE:-feature}" in
210
+ bug)
211
+ plan_prompt="${plan_prompt}
212
+ ## Task Type: Bug Fix
213
+ Focus on: reproducing the bug, identifying root cause, minimal targeted fix, regression tests.
214
+ " ;;
215
+ refactor)
216
+ plan_prompt="${plan_prompt}
217
+ ## Task Type: Refactor
218
+ Focus on: preserving all existing behavior, incremental changes, comprehensive test coverage.
219
+ " ;;
220
+ security)
221
+ plan_prompt="${plan_prompt}
222
+ ## Task Type: Security
223
+ Focus on: threat modeling, OWASP top 10, input validation, authentication/authorization.
224
+ " ;;
225
+ esac
226
+
227
+ # Add project context
228
+ local project_lang
229
+ project_lang=$(detect_project_lang)
230
+ plan_prompt="${plan_prompt}
231
+ ## Project Context
232
+ - Language: ${project_lang}
233
+ - Test command: ${TEST_CMD:-not configured}
234
+ - Task type: ${TASK_TYPE:-feature}
235
+
236
+ ## Required Output
237
+ Create a Markdown plan with these sections:
238
+
239
+ ### Files to Modify
240
+ List every file to create or modify with full paths.
241
+
242
+ ### Implementation Steps
243
+ Numbered steps in order of execution. Be specific about what code to write.
244
+
245
+ ### Task Checklist
246
+ A checkbox list of discrete tasks that can be tracked:
247
+ - [ ] Task 1: Description
248
+ - [ ] Task 2: Description
249
+ (Include 5-15 tasks covering the full implementation)
250
+
251
+ ### Testing Approach
252
+ How to verify the implementation works.
253
+
254
+ ### Definition of Done
255
+ Checklist of completion criteria.
256
+ "
257
+
258
+ local plan_model
259
+ plan_model=$(jq -r --arg id "plan" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
260
+ [[ -n "$MODEL" ]] && plan_model="$MODEL"
261
+ [[ -z "$plan_model" || "$plan_model" == "null" ]] && plan_model="opus"
262
+ # Intelligence model routing (when no explicit CLI --model override)
263
+ if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
264
+ plan_model="$CLAUDE_MODEL"
265
+ fi
266
+
267
+ local _token_log="${ARTIFACTS_DIR}/.claude-tokens-plan.log"
268
+ claude --print --model "$plan_model" --max-turns 25 \
269
+ "$plan_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
270
+ parse_claude_tokens "$_token_log"
271
+
272
+ if [[ ! -s "$plan_file" ]]; then
273
+ error "Plan generation failed — empty output"
274
+ return 1
275
+ fi
276
+
277
+ # Validate plan content — detect API/CLI errors masquerading as plans
278
+ local _plan_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
279
+ _plan_fatal="${_plan_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
280
+ if grep -qiE "$_plan_fatal" "$plan_file" 2>/dev/null; then
281
+ error "Plan stage produced API/CLI error instead of a plan: $(head -1 "$plan_file" | cut -c1-100)"
282
+ return 1
283
+ fi
284
+
285
+ local line_count
286
+ line_count=$(wc -l < "$plan_file" | xargs)
287
+ if [[ "$line_count" -lt 3 ]]; then
288
+ error "Plan too short (${line_count} lines) — likely an error, not a real plan"
289
+ return 1
290
+ fi
291
+ info "Plan saved: ${DIM}$plan_file${RESET} (${line_count} lines)"
292
+
293
+ # Extract task checklist for GitHub issue and task tracking
294
+ local checklist
295
+ checklist=$(sed -n '/### Task Checklist/,/^###/p' "$plan_file" 2>/dev/null | \
296
+ grep '^\s*- \[' | head -20)
297
+
298
+ if [[ -z "$checklist" ]]; then
299
+ # Fallback: extract any checkbox lines
300
+ checklist=$(grep '^\s*- \[' "$plan_file" 2>/dev/null | head -20)
301
+ fi
302
+
303
+ # Write local task file for Claude Code build stage
304
+ if [[ -n "$checklist" ]]; then
305
+ cat > "$TASKS_FILE" <<TASKS_EOF
306
+ # Pipeline Tasks — ${GOAL}
307
+
308
+ ## Implementation Checklist
309
+ ${checklist}
310
+
311
+ ## Context
312
+ - Pipeline: ${PIPELINE_NAME}
313
+ - Branch: ${GIT_BRANCH}
314
+ - Issue: ${GITHUB_ISSUE:-none}
315
+ - Generated: $(now_iso)
316
+ TASKS_EOF
317
+ info "Task list: ${DIM}$TASKS_FILE${RESET} ($(echo "$checklist" | wc -l | xargs) tasks)"
318
+ fi
319
+
320
+ # Post plan + task checklist to GitHub issue
321
+ if [[ -n "$ISSUE_NUMBER" ]]; then
322
+ local plan_summary
323
+ plan_summary=$(head -50 "$plan_file")
324
+ local gh_body="## 📋 Implementation Plan
325
+
326
+ <details>
327
+ <summary>Click to expand full plan (${line_count} lines)</summary>
328
+
329
+ ${plan_summary}
330
+
331
+ </details>
332
+ "
333
+ if [[ -n "$checklist" ]]; then
334
+ gh_body="${gh_body}
335
+ ## ✅ Task Checklist
336
+ ${checklist}
337
+ "
338
+ fi
339
+
340
+ gh_body="${gh_body}
341
+ ---
342
+ _Generated by \`shipwright pipeline\` at $(now_iso)_"
343
+
344
+ gh_comment_issue "$ISSUE_NUMBER" "$gh_body"
345
+ info "Plan posted to issue #$ISSUE_NUMBER"
346
+ fi
347
+
348
+ # Push plan to wiki
349
+ gh_wiki_page "Pipeline-Plan-${ISSUE_NUMBER:-inline}" "$(<"$plan_file")"
350
+
351
+ # Generate Claude Code task list
352
+ local cc_tasks_file="$PROJECT_ROOT/.claude/tasks.md"
353
+ if [[ -n "$checklist" ]]; then
354
+ cat > "$cc_tasks_file" <<CC_TASKS_EOF
355
+ # Tasks — ${GOAL}
356
+
357
+ ## Status: In Progress
358
+ Pipeline: ${PIPELINE_NAME} | Branch: ${GIT_BRANCH}
359
+
360
+ ## Checklist
361
+ ${checklist}
362
+
363
+ ## Notes
364
+ - Generated from pipeline plan at $(now_iso)
365
+ - Pipeline will update status as tasks complete
366
+ CC_TASKS_EOF
367
+ info "Claude Code tasks: ${DIM}$cc_tasks_file${RESET}"
368
+ fi
369
+
370
+ # Extract definition of done for quality gates
371
+ sed -n '/[Dd]efinition [Oo]f [Dd]one/,/^#/p' "$plan_file" | head -20 > "$ARTIFACTS_DIR/dod.md" 2>/dev/null || true
372
+
373
+ # ── Plan Validation Gate ──
374
+ # Ask Claude to validate the plan before proceeding
375
+ if command -v claude &>/dev/null && [[ -s "$plan_file" ]]; then
376
+ local validation_attempts=0
377
+ local max_validation_attempts=2
378
+ local plan_valid=false
379
+
380
+ while [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; do
381
+ validation_attempts=$((validation_attempts + 1))
382
+ info "Validating plan (attempt ${validation_attempts}/${max_validation_attempts})..."
383
+
384
+ # Build enriched validation prompt with learned context
385
+ local validation_extra=""
386
+
387
+ # Inject rejected plan history from memory
388
+ if type intelligence_search_memory &>/dev/null 2>&1; then
389
+ local rejected_plans
390
+ rejected_plans=$(intelligence_search_memory "rejected plan validation failures for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
391
+ if [[ -n "$rejected_plans" ]]; then
392
+ validation_extra="${validation_extra}
393
+ ## Previously Rejected Plans
394
+ These issues were found in past plan validations for similar tasks:
395
+ ${rejected_plans}
396
+ "
397
+ fi
398
+ fi
399
+
400
+ # Inject repo conventions contextually
401
+ local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
402
+ if [[ -f "$claudemd" ]]; then
403
+ local conventions_summary
404
+ conventions_summary=$(head -100 "$claudemd" 2>/dev/null | grep -E '^##|^-|^\*' | head -15 || true)
405
+ if [[ -n "$conventions_summary" ]]; then
406
+ validation_extra="${validation_extra}
407
+ ## Repo Conventions
408
+ ${conventions_summary}
409
+ "
410
+ fi
411
+ fi
412
+
413
+ # Inject complexity estimate
414
+ local complexity_hint=""
415
+ if [[ -n "${INTELLIGENCE_COMPLEXITY:-}" && "${INTELLIGENCE_COMPLEXITY:-0}" -gt 0 ]]; then
416
+ complexity_hint="This is estimated as complexity ${INTELLIGENCE_COMPLEXITY}/10. Plans for this complexity typically need ${INTELLIGENCE_COMPLEXITY} or more tasks."
417
+ fi
418
+
419
+ local validation_prompt="You are a plan validator. Review this implementation plan and determine if it is valid.
420
+
421
+ ## Goal
422
+ ${GOAL}
423
+ ${complexity_hint:+
424
+ ## Complexity Estimate
425
+ ${complexity_hint}
426
+ }
427
+ ## Plan
428
+ $(cat "$plan_file")
429
+ ${validation_extra}
430
+ Evaluate:
431
+ 1. Are all requirements from the goal addressed?
432
+ 2. Is the plan decomposed into clear, achievable tasks?
433
+ 3. Are the implementation steps specific enough to execute?
434
+
435
+ Respond with EXACTLY one of these on the first line:
436
+ VALID: true
437
+ VALID: false
438
+
439
+ Then explain your reasoning briefly."
440
+
441
+ local validation_model="${plan_model:-opus}"
442
+ local validation_result
443
+ validation_result=$(claude --print --output-format text -p "$validation_prompt" --model "$validation_model" < /dev/null 2>"${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log" || true)
444
+ parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log"
445
+
446
+ # Save validation result
447
+ echo "$validation_result" > "$ARTIFACTS_DIR/plan-validation.md"
448
+
449
+ if echo "$validation_result" | head -5 | grep -qi "VALID: true"; then
450
+ success "Plan validation passed"
451
+ plan_valid=true
452
+ break
453
+ fi
454
+
455
+ warn "Plan validation failed (attempt ${validation_attempts}/${max_validation_attempts})"
456
+
457
+ # Analyze failure mode to decide how to recover
458
+ local failure_mode="unknown"
459
+ local validation_lower
460
+ validation_lower=$(echo "$validation_result" | tr '[:upper:]' '[:lower:]')
461
+ if echo "$validation_lower" | grep -qE 'requirements? unclear|goal.*vague|ambiguous|underspecified'; then
462
+ failure_mode="requirements_unclear"
463
+ elif echo "$validation_lower" | grep -qE 'insufficient detail|not specific|too high.level|missing.*steps|lacks.*detail'; then
464
+ failure_mode="insufficient_detail"
465
+ elif echo "$validation_lower" | grep -qE 'scope too (large|broad)|too many|overly complex|break.*down'; then
466
+ failure_mode="scope_too_large"
467
+ fi
468
+
469
+ emit_event "plan.validation_failure" \
470
+ "issue=${ISSUE_NUMBER:-0}" \
471
+ "attempt=$validation_attempts" \
472
+ "failure_mode=$failure_mode"
473
+
474
+ # Track repeated failures — escalate if stuck in a loop
475
+ if [[ -f "$ARTIFACTS_DIR/.plan-failure-sig.txt" ]]; then
476
+ local prev_sig
477
+ prev_sig=$(cat "$ARTIFACTS_DIR/.plan-failure-sig.txt" 2>/dev/null || true)
478
+ if [[ "$failure_mode" == "$prev_sig" && "$failure_mode" != "unknown" ]]; then
479
+ warn "Same validation failure mode repeated ($failure_mode) — escalating"
480
+ emit_event "plan.validation_escalated" \
481
+ "issue=${ISSUE_NUMBER:-0}" \
482
+ "failure_mode=$failure_mode"
483
+ break
484
+ fi
485
+ fi
486
+ echo "$failure_mode" > "$ARTIFACTS_DIR/.plan-failure-sig.txt"
487
+
488
+ if [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; then
489
+ info "Regenerating plan with validation feedback (mode: ${failure_mode})..."
490
+
491
+ # Tailor regeneration prompt based on failure mode
492
+ local failure_guidance=""
493
+ case "$failure_mode" in
494
+ requirements_unclear)
495
+ failure_guidance="The validator found the requirements unclear. Add more specific acceptance criteria, input/output examples, and concrete success metrics." ;;
496
+ insufficient_detail)
497
+ failure_guidance="The validator found the plan lacks detail. Break each task into smaller, more specific implementation steps with exact file paths and function names." ;;
498
+ scope_too_large)
499
+ failure_guidance="The validator found the scope too large. Focus on the minimal viable implementation and defer non-essential features to follow-up tasks." ;;
500
+ esac
501
+
502
+ local regen_prompt="${plan_prompt}
503
+
504
+ IMPORTANT: A previous plan was rejected by validation. Issues found:
505
+ $(echo "$validation_result" | tail -20)
506
+ ${failure_guidance:+
507
+ GUIDANCE: ${failure_guidance}}
508
+
509
+ Fix these issues in the new plan."
510
+
511
+ claude --print --model "$plan_model" --max-turns 25 \
512
+ "$regen_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
513
+ parse_claude_tokens "$_token_log"
514
+
515
+ line_count=$(wc -l < "$plan_file" | xargs)
516
+ info "Regenerated plan: ${DIM}$plan_file${RESET} (${line_count} lines)"
517
+ fi
518
+ done
519
+
520
+ if [[ "$plan_valid" != "true" ]]; then
521
+ warn "Plan validation did not pass after ${max_validation_attempts} attempts — proceeding anyway"
522
+ fi
523
+
524
+ emit_event "plan.validated" \
525
+ "issue=${ISSUE_NUMBER:-0}" \
526
+ "valid=${plan_valid}" \
527
+ "attempts=${validation_attempts}"
528
+ fi
529
+
530
+ log_stage "plan" "Generated plan.md (${line_count} lines, $(echo "$checklist" | wc -l | xargs) tasks)"
531
+ }
532
+
533
+ stage_design() {
534
+ CURRENT_STAGE_ID="design"
535
+ local plan_file="$ARTIFACTS_DIR/plan.md"
536
+ local design_file="$ARTIFACTS_DIR/design.md"
537
+
538
+ if [[ ! -s "$plan_file" ]]; then
539
+ warn "No plan found — skipping design stage"
540
+ return 0
541
+ fi
542
+
543
+ if ! command -v claude &>/dev/null; then
544
+ error "Claude CLI not found — cannot generate design"
545
+ return 1
546
+ fi
547
+
548
+ info "Generating Architecture Decision Record..."
549
+
550
+ # Memory integration — inject context if memory system available
551
+ local memory_context=""
552
+ if type intelligence_search_memory &>/dev/null 2>&1; then
553
+ local mem_dir="${HOME}/.shipwright/memory"
554
+ memory_context=$(intelligence_search_memory "design stage architecture patterns for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
555
+ fi
556
+ if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
557
+ memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "design" 2>/dev/null) || true
558
+ fi
559
+
560
+ # Inject cross-pipeline discoveries for design stage
561
+ local design_discoveries=""
562
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
563
+ design_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
564
+ fi
565
+
566
+ # Inject architecture model patterns if available
567
+ local arch_context=""
568
+ local repo_hash
569
+ repo_hash=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
570
+ local arch_model_file="${HOME}/.shipwright/memory/${repo_hash}/architecture.json"
571
+ if [[ -f "$arch_model_file" ]]; then
572
+ local arch_patterns
573
+ arch_patterns=$(jq -r '
574
+ [.patterns // [] | .[] | "- \(.name // "unnamed"): \(.description // "no description")"] | join("\n")
575
+ ' "$arch_model_file" 2>/dev/null) || true
576
+ local arch_layers
577
+ arch_layers=$(jq -r '
578
+ [.layers // [] | .[] | "- \(.name // "unnamed"): \(.path // "")"] | join("\n")
579
+ ' "$arch_model_file" 2>/dev/null) || true
580
+ if [[ -n "$arch_patterns" || -n "$arch_layers" ]]; then
581
+ arch_context="Previous designs in this repo follow these patterns:
582
+ ${arch_patterns:+Patterns:
583
+ ${arch_patterns}
584
+ }${arch_layers:+Layers:
585
+ ${arch_layers}}"
586
+ fi
587
+ fi
588
+
589
+ # Inject rejected design approaches and anti-patterns from memory
590
+ local design_antipatterns=""
591
+ if type intelligence_search_memory &>/dev/null 2>&1; then
592
+ local rejected_designs
593
+ rejected_designs=$(intelligence_search_memory "rejected design approaches anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
594
+ if [[ -n "$rejected_designs" ]]; then
595
+ design_antipatterns="
596
+ ## Rejected Approaches (from past reviews)
597
+ These design approaches were rejected in past reviews. Avoid repeating them:
598
+ ${rejected_designs}
599
+ "
600
+ fi
601
+ fi
602
+
603
+ # Build design prompt with plan + project context
604
+ local project_lang
605
+ project_lang=$(detect_project_lang)
606
+
607
+ local design_prompt="You are a senior software architect. Review the implementation plan below and produce an Architecture Decision Record (ADR).
608
+
609
+ ## Goal
610
+ ${GOAL}
611
+
612
+ ## Implementation Plan
613
+ $(cat "$plan_file")
614
+
615
+ ## Project Context
616
+ - Language: ${project_lang}
617
+ - Test command: ${TEST_CMD:-not configured}
618
+ - Task type: ${TASK_TYPE:-feature}
619
+ ${memory_context:+
620
+ ## Historical Context (from memory)
621
+ ${memory_context}
622
+ }${arch_context:+
623
+ ## Architecture Model (from previous designs)
624
+ ${arch_context}
625
+ }${design_antipatterns}${design_discoveries:+
626
+ ## Discoveries from Other Pipelines
627
+ ${design_discoveries}
628
+ }
629
+ ## Required Output — Architecture Decision Record
630
+
631
+ Produce this EXACT format:
632
+
633
+ # Design: ${GOAL}
634
+
635
+ ## Context
636
+ [What problem we're solving, constraints from the codebase]
637
+
638
+ ## Decision
639
+ [The chosen approach — be specific about patterns, data flow, error handling]
640
+
641
+ ## Alternatives Considered
642
+ 1. [Alternative A] — Pros: ... / Cons: ...
643
+ 2. [Alternative B] — Pros: ... / Cons: ...
644
+
645
+ ## Implementation Plan
646
+ - Files to create: [list with full paths]
647
+ - Files to modify: [list with full paths]
648
+ - Dependencies: [new deps if any]
649
+ - Risk areas: [fragile code, performance concerns]
650
+
651
+ ## Validation Criteria
652
+ - [ ] [How we'll know the design is correct — testable criteria]
653
+ - [ ] [Additional validation items]
654
+
655
+ Be concrete and specific. Reference actual file paths in the codebase. Consider edge cases and failure modes."
656
+
657
+ local design_model
658
+ design_model=$(jq -r --arg id "design" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
659
+ [[ -n "$MODEL" ]] && design_model="$MODEL"
660
+ [[ -z "$design_model" || "$design_model" == "null" ]] && design_model="opus"
661
+ # Intelligence model routing (when no explicit CLI --model override)
662
+ if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
663
+ design_model="$CLAUDE_MODEL"
664
+ fi
665
+
666
+ local _token_log="${ARTIFACTS_DIR}/.claude-tokens-design.log"
667
+ claude --print --model "$design_model" --max-turns 25 \
668
+ "$design_prompt" < /dev/null > "$design_file" 2>"$_token_log" || true
669
+ parse_claude_tokens "$_token_log"
670
+
671
+ if [[ ! -s "$design_file" ]]; then
672
+ error "Design generation failed — empty output"
673
+ return 1
674
+ fi
675
+
676
+ # Validate design content — detect API/CLI errors masquerading as designs
677
+ local _design_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
678
+ _design_fatal="${_design_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
679
+ if grep -qiE "$_design_fatal" "$design_file" 2>/dev/null; then
680
+ error "Design stage produced API/CLI error instead of a design: $(head -1 "$design_file" | cut -c1-100)"
681
+ return 1
682
+ fi
683
+
684
+ local line_count
685
+ line_count=$(wc -l < "$design_file" | xargs)
686
+ if [[ "$line_count" -lt 3 ]]; then
687
+ error "Design too short (${line_count} lines) — likely an error, not a real design"
688
+ return 1
689
+ fi
690
+ info "Design saved: ${DIM}$design_file${RESET} (${line_count} lines)"
691
+
692
+ # Extract file lists for build stage awareness
693
+ local files_to_create files_to_modify
694
+ files_to_create=$(sed -n '/Files to create/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
695
+ files_to_modify=$(sed -n '/Files to modify/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
696
+
697
+ if [[ -n "$files_to_create" || -n "$files_to_modify" ]]; then
698
+ info "Design scope: ${DIM}$(echo "$files_to_create $files_to_modify" | grep -c '^\s*-' || echo 0) file(s)${RESET}"
699
+ fi
700
+
701
+ # Post design to GitHub issue
702
+ if [[ -n "$ISSUE_NUMBER" ]]; then
703
+ local design_summary
704
+ design_summary=$(head -60 "$design_file")
705
+ gh_comment_issue "$ISSUE_NUMBER" "## 📐 Architecture Decision Record
706
+
707
+ <details>
708
+ <summary>Click to expand ADR (${line_count} lines)</summary>
709
+
710
+ ${design_summary}
711
+
712
+ </details>
713
+
714
+ ---
715
+ _Generated by \`shipwright pipeline\` design stage at $(now_iso)_"
716
+ fi
717
+
718
+ # Push design to wiki
719
+ gh_wiki_page "Pipeline-Design-${ISSUE_NUMBER:-inline}" "$(<"$design_file")"
720
+
721
+ log_stage "design" "Generated design.md (${line_count} lines)"
722
+ }
723
+
724
+ stage_build() {
725
+ local plan_file="$ARTIFACTS_DIR/plan.md"
726
+ local design_file="$ARTIFACTS_DIR/design.md"
727
+ local dod_file="$ARTIFACTS_DIR/dod.md"
728
+ local loop_args=()
729
+
730
+ # Memory integration — inject context if memory system available
731
+ local memory_context=""
732
+ if type intelligence_search_memory &>/dev/null 2>&1; then
733
+ local mem_dir="${HOME}/.shipwright/memory"
734
+ memory_context=$(intelligence_search_memory "build stage for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
735
+ fi
736
+ if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
737
+ memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "build" 2>/dev/null) || true
738
+ fi
739
+
740
+ # Build enriched goal with compact context (avoids prompt bloat)
741
+ local enriched_goal
742
+ enriched_goal=$(_pipeline_compact_goal "$GOAL" "$plan_file" "$design_file")
743
+
744
+ # Inject memory context
745
+ if [[ -n "$memory_context" ]]; then
746
+ enriched_goal="${enriched_goal}
747
+
748
+ Historical context (lessons from previous pipelines):
749
+ ${memory_context}"
750
+ fi
751
+
752
+ # Inject cross-pipeline discoveries for build stage
753
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
754
+ local build_discoveries
755
+ build_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "src/*,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
756
+ if [[ -n "$build_discoveries" ]]; then
757
+ enriched_goal="${enriched_goal}
758
+
759
+ Discoveries from other pipelines:
760
+ ${build_discoveries}"
761
+ fi
762
+ fi
763
+
764
+ # Add task list context
765
+ if [[ -s "$TASKS_FILE" ]]; then
766
+ enriched_goal="${enriched_goal}
767
+
768
+ Task tracking (check off items as you complete them):
769
+ $(cat "$TASKS_FILE")"
770
+ fi
771
+
772
+ # Inject file hotspots from GitHub intelligence
773
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_file_change_frequency &>/dev/null 2>&1; then
774
+ local build_hotspots
775
+ build_hotspots=$(gh_file_change_frequency 2>/dev/null | head -5 || true)
776
+ if [[ -n "$build_hotspots" ]]; then
777
+ enriched_goal="${enriched_goal}
778
+
779
+ File hotspots (most frequently changed — review these carefully):
780
+ ${build_hotspots}"
781
+ fi
782
+ fi
783
+
784
+ # Inject security alerts context
785
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_security_alerts &>/dev/null 2>&1; then
786
+ local build_alerts
787
+ build_alerts=$(gh_security_alerts 2>/dev/null | head -3 || true)
788
+ if [[ -n "$build_alerts" ]]; then
789
+ enriched_goal="${enriched_goal}
790
+
791
+ Active security alerts (do not introduce new vulnerabilities):
792
+ ${build_alerts}"
793
+ fi
794
+ fi
795
+
796
+ # Inject coverage baseline
797
+ local repo_hash_build
798
+ repo_hash_build=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
799
+ local coverage_file_build="${HOME}/.shipwright/baselines/${repo_hash_build}/coverage.json"
800
+ if [[ -f "$coverage_file_build" ]]; then
801
+ local coverage_baseline
802
+ coverage_baseline=$(jq -r '.coverage_percent // empty' "$coverage_file_build" 2>/dev/null || true)
803
+ if [[ -n "$coverage_baseline" ]]; then
804
+ enriched_goal="${enriched_goal}
805
+
806
+ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
807
+ fi
808
+ fi
809
+
810
+ # Predictive: inject prevention hints when risk/memory patterns suggest build-stage failures
811
+ if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
812
+ local issue_json_build="{}"
813
+ [[ -n "${ISSUE_NUMBER:-}" ]] && issue_json_build=$(jq -n --arg title "${GOAL:-}" --arg num "${ISSUE_NUMBER:-}" '{title: $title, number: $num}')
814
+ local prevention_text
815
+ prevention_text=$(bash "$SCRIPT_DIR/sw-predictive.sh" inject-prevention "build" "$issue_json_build" 2>/dev/null || true)
816
+ if [[ -n "$prevention_text" ]]; then
817
+ enriched_goal="${enriched_goal}
818
+
819
+ ${prevention_text}"
820
+ fi
821
+ fi
822
+
823
+ loop_args+=("$enriched_goal")
824
+
825
+ # Build loop args from pipeline config + CLI overrides
826
+ CURRENT_STAGE_ID="build"
827
+
828
+ local test_cmd="${TEST_CMD}"
829
+ if [[ -z "$test_cmd" ]]; then
830
+ test_cmd=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.test_cmd) // .defaults.test_cmd // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
831
+ [[ "$test_cmd" == "null" ]] && test_cmd=""
832
+ fi
833
+ # Auto-detect if still empty
834
+ if [[ -z "$test_cmd" ]]; then
835
+ test_cmd=$(detect_test_cmd)
836
+ fi
837
+
838
+ local max_iter
839
+ max_iter=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.max_iterations) // 20' "$PIPELINE_CONFIG" 2>/dev/null) || true
840
+ [[ -z "$max_iter" || "$max_iter" == "null" ]] && max_iter=20
841
+ # CLI --max-iterations override (from CI strategy engine)
842
+ [[ -n "${MAX_ITERATIONS_OVERRIDE:-}" ]] && max_iter="$MAX_ITERATIONS_OVERRIDE"
843
+
844
+ local agents="${AGENTS}"
845
+ if [[ -z "$agents" ]]; then
846
+ agents=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.agents) // .defaults.agents // 1' "$PIPELINE_CONFIG" 2>/dev/null) || true
847
+ [[ -z "$agents" || "$agents" == "null" ]] && agents=1
848
+ fi
849
+
850
+ # Intelligence: suggest parallelism if design indicates independent work
851
+ if [[ "${agents:-1}" -le 1 ]] && [[ -s "$ARTIFACTS_DIR/design.md" ]]; then
852
+ local design_lower
853
+ design_lower=$(tr '[:upper:]' '[:lower:]' < "$ARTIFACTS_DIR/design.md" 2>/dev/null || true)
854
+ if echo "$design_lower" | grep -qE 'independent (files|modules|components|services)|separate (modules|packages|directories)|parallel|no shared state'; then
855
+ info "Design mentions independent modules — consider --agents 2 for parallelism"
856
+ emit_event "build.parallelism_suggested" "issue=${ISSUE_NUMBER:-0}" "current_agents=$agents"
857
+ fi
858
+ fi
859
+
860
+ local audit
861
+ audit=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.audit) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
862
+ local quality
863
+ quality=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.quality_gates) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
864
+
865
+ local build_model="${MODEL}"
866
+ if [[ -z "$build_model" ]]; then
867
+ build_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
868
+ [[ -z "$build_model" || "$build_model" == "null" ]] && build_model="opus"
869
+ fi
870
+ # Intelligence model routing (when no explicit CLI --model override)
871
+ if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
872
+ build_model="$CLAUDE_MODEL"
873
+ fi
874
+
875
+ # Recruit-powered model selection (when no explicit override)
876
+ if [[ -z "$MODEL" ]] && [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
877
+ local _recruit_goal="${GOAL:-}"
878
+ if [[ -n "$_recruit_goal" ]]; then
879
+ local _recruit_match
880
+ _recruit_match=$(bash "$SCRIPT_DIR/sw-recruit.sh" match --json "$_recruit_goal" 2>/dev/null) || true
881
+ if [[ -n "$_recruit_match" ]]; then
882
+ local _recruit_model
883
+ _recruit_model=$(echo "$_recruit_match" | jq -r '.model // ""' 2>/dev/null) || true
884
+ if [[ -n "$_recruit_model" && "$_recruit_model" != "null" && "$_recruit_model" != "" ]]; then
885
+ info "Recruit recommends model: ${CYAN}${_recruit_model}${RESET} for this task"
886
+ build_model="$_recruit_model"
887
+ fi
888
+ fi
889
+ fi
890
+ fi
891
+
892
+ [[ -n "$test_cmd" && "$test_cmd" != "null" ]] && loop_args+=(--test-cmd "$test_cmd")
893
+ loop_args+=(--max-iterations "$max_iter")
894
+ loop_args+=(--model "$build_model")
895
+ [[ "$agents" -gt 1 ]] 2>/dev/null && loop_args+=(--agents "$agents")
896
+
897
+ # Quality gates: always enabled in CI, otherwise from template config
898
+ if [[ "${CI_MODE:-false}" == "true" ]]; then
899
+ loop_args+=(--audit --audit-agent --quality-gates)
900
+ else
901
+ [[ "$audit" == "true" ]] && loop_args+=(--audit --audit-agent)
902
+ [[ "$quality" == "true" ]] && loop_args+=(--quality-gates)
903
+ fi
904
+
905
+ # Session restart capability
906
+ [[ -n "${MAX_RESTARTS_OVERRIDE:-}" ]] && loop_args+=(--max-restarts "$MAX_RESTARTS_OVERRIDE")
907
+ # Fast test mode
908
+ [[ -n "${FAST_TEST_CMD_OVERRIDE:-}" ]] && loop_args+=(--fast-test-cmd "$FAST_TEST_CMD_OVERRIDE")
909
+
910
+ # Definition of Done: use plan-extracted DoD if available
911
+ [[ -s "$dod_file" ]] && loop_args+=(--definition-of-done "$dod_file")
912
+
913
+ # Skip permissions in CI (no interactive terminal)
914
+ [[ "${CI_MODE:-false}" == "true" ]] && loop_args+=(--skip-permissions)
915
+
916
+ info "Starting build loop: ${DIM}shipwright loop${RESET} (max ${max_iter} iterations, ${agents} agent(s))"
917
+
918
+ # Post build start to GitHub
919
+ if [[ -n "$ISSUE_NUMBER" ]]; then
920
+ gh_comment_issue "$ISSUE_NUMBER" "🔨 **Build started** — \`shipwright loop\` with ${max_iter} max iterations, ${agents} agent(s), model: ${build_model}"
921
+ fi
922
+
923
+ local _token_log="${ARTIFACTS_DIR}/.claude-tokens-build.log"
924
+ export PIPELINE_JOB_ID="${PIPELINE_NAME:-pipeline-$$}"
925
+ sw loop "${loop_args[@]}" < /dev/null 2>"$_token_log" || {
926
+ local _loop_exit=$?
927
+ parse_claude_tokens "$_token_log"
928
+
929
+ # Detect context exhaustion from progress file
930
+ local _progress_file="${PWD}/.claude/loop-logs/progress.md"
931
+ if [[ -f "$_progress_file" ]]; then
932
+ local _prog_tests
933
+ _prog_tests=$(grep -oE 'Tests passing: (true|false)' "$_progress_file" 2>/dev/null | awk '{print $NF}' || echo "unknown")
934
+ if [[ "$_prog_tests" != "true" ]]; then
935
+ warn "Build loop exhausted with failing tests (context exhaustion)"
936
+ emit_event "pipeline.context_exhaustion" "issue=${ISSUE_NUMBER:-0}" "stage=build"
937
+ # Write flag for daemon retry logic
938
+ mkdir -p "$ARTIFACTS_DIR" 2>/dev/null || true
939
+ echo "context_exhaustion" > "$ARTIFACTS_DIR/failure-reason.txt" 2>/dev/null || true
940
+ fi
941
+ fi
942
+
943
+ error "Build loop failed"
944
+ return 1
945
+ }
946
+ parse_claude_tokens "$_token_log"
947
+
948
+ # Read accumulated token counts from build loop (written by sw-loop.sh)
949
+ local _loop_token_file="${PROJECT_ROOT}/.claude/loop-logs/loop-tokens.json"
950
+ if [[ -f "$_loop_token_file" ]] && command -v jq &>/dev/null; then
951
+ local _loop_in _loop_out _loop_cost
952
+ _loop_in=$(jq -r '.input_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
953
+ _loop_out=$(jq -r '.output_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
954
+ _loop_cost=$(jq -r '.cost_usd // 0' "$_loop_token_file" 2>/dev/null || echo "0")
955
+ TOTAL_INPUT_TOKENS=$(( TOTAL_INPUT_TOKENS + ${_loop_in:-0} ))
956
+ TOTAL_OUTPUT_TOKENS=$(( TOTAL_OUTPUT_TOKENS + ${_loop_out:-0} ))
957
+ if [[ -n "$_loop_cost" && "$_loop_cost" != "0" && "$_loop_cost" != "null" ]]; then
958
+ TOTAL_COST_USD="${_loop_cost}"
959
+ fi
960
+ if [[ "${_loop_in:-0}" -gt 0 || "${_loop_out:-0}" -gt 0 ]]; then
961
+ info "Build loop tokens: in=${_loop_in} out=${_loop_out} cost=\$${_loop_cost:-0}"
962
+ fi
963
+ fi
964
+
965
+ # Count commits made during build
966
+ local commit_count
967
+ commit_count=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | wc -l | xargs)
968
+ info "Build produced ${BOLD}$commit_count${RESET} commit(s)"
969
+
970
+ # Commit quality evaluation when intelligence is enabled
971
+ if type intelligence_search_memory &>/dev/null 2>&1 && command -v claude &>/dev/null && [[ "${commit_count:-0}" -gt 0 ]]; then
972
+ local commit_msgs
973
+ commit_msgs=$(git log --format="%s" "${BASE_BRANCH}..HEAD" 2>/dev/null | head -20)
974
+ local quality_score
975
+ quality_score=$(claude --print --output-format text -p "Rate the quality of these git commit messages on a scale of 0-100. Consider: focus (one thing per commit), clarity (describes the why), atomicity (small logical units). Reply with ONLY a number 0-100.
976
+
977
+ Commit messages:
978
+ ${commit_msgs}" --model haiku < /dev/null 2>/dev/null || true)
979
+ quality_score=$(echo "$quality_score" | grep -oE '^[0-9]+' | head -1 || true)
980
+ if [[ -n "$quality_score" ]]; then
981
+ emit_event "build.commit_quality" \
982
+ "issue=${ISSUE_NUMBER:-0}" \
983
+ "score=$quality_score" \
984
+ "commit_count=$commit_count"
985
+ if [[ "$quality_score" -lt 40 ]] 2>/dev/null; then
986
+ warn "Commit message quality low (score: ${quality_score}/100)"
987
+ else
988
+ info "Commit quality score: ${quality_score}/100"
989
+ fi
990
+ fi
991
+ fi
992
+
993
+ log_stage "build" "Build loop completed ($commit_count commits)"
994
+ }
995
+
996
+ stage_test() {
997
+ CURRENT_STAGE_ID="test"
998
+ local test_cmd="${TEST_CMD}"
999
+ if [[ -z "$test_cmd" ]]; then
1000
+ test_cmd=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.test_cmd) // .defaults.test_cmd // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1001
+ [[ -z "$test_cmd" || "$test_cmd" == "null" ]] && test_cmd=""
1002
+ fi
1003
+ # Auto-detect
1004
+ if [[ -z "$test_cmd" ]]; then
1005
+ test_cmd=$(detect_test_cmd)
1006
+ fi
1007
+ if [[ -z "$test_cmd" ]]; then
1008
+ warn "No test command found — skipping test stage"
1009
+ return 0
1010
+ fi
1011
+
1012
+ local coverage_min
1013
+ coverage_min=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.coverage_min) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
1014
+ [[ -z "$coverage_min" || "$coverage_min" == "null" ]] && coverage_min=0
1015
+
1016
+ local test_log="$ARTIFACTS_DIR/test-results.log"
1017
+
1018
+ info "Running tests: ${DIM}$test_cmd${RESET}"
1019
+ local test_exit=0
1020
+ bash -c "$test_cmd" > "$test_log" 2>&1 || test_exit=$?
1021
+
1022
+ if [[ "$test_exit" -eq 0 ]]; then
1023
+ success "Tests passed"
1024
+ else
1025
+ error "Tests failed (exit code: $test_exit)"
1026
+ # Extract most relevant error section (assertion failures, stack traces)
1027
+ local relevant_output=""
1028
+ relevant_output=$(grep -A5 -E 'FAIL|AssertionError|Expected.*but.*got|Error:|panic:|assert' "$test_log" 2>/dev/null | tail -40 || true)
1029
+ if [[ -z "$relevant_output" ]]; then
1030
+ relevant_output=$(tail -40 "$test_log")
1031
+ fi
1032
+ echo "$relevant_output"
1033
+
1034
+ # Post failure to GitHub with more context
1035
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1036
+ local log_lines
1037
+ log_lines=$(wc -l < "$test_log" 2>/dev/null || echo "0")
1038
+ local log_excerpt
1039
+ if [[ "$log_lines" -lt 60 ]]; then
1040
+ log_excerpt="$(cat "$test_log" 2>/dev/null || true)"
1041
+ else
1042
+ log_excerpt="$(head -20 "$test_log" 2>/dev/null || true)
1043
+ ... (${log_lines} lines total, showing head + tail) ...
1044
+ $(tail -30 "$test_log" 2>/dev/null || true)"
1045
+ fi
1046
+ gh_comment_issue "$ISSUE_NUMBER" "❌ **Tests failed** (exit code: $test_exit, ${log_lines} lines)
1047
+ \`\`\`
1048
+ ${log_excerpt}
1049
+ \`\`\`"
1050
+ fi
1051
+ return 1
1052
+ fi
1053
+
1054
+ # Coverage check — only enforce when coverage data is actually detected
1055
+ local coverage=""
1056
+ if [[ "$coverage_min" -gt 0 ]] 2>/dev/null; then
1057
+ coverage=$(parse_coverage_from_output "$test_log")
1058
+ if [[ -z "$coverage" ]]; then
1059
+ # No coverage data found — skip enforcement (project may not have coverage tooling)
1060
+ info "No coverage data detected — skipping coverage check (min: ${coverage_min}%)"
1061
+ elif awk -v cov="$coverage" -v min="$coverage_min" 'BEGIN{exit !(cov < min)}' 2>/dev/null; then
1062
+ warn "Coverage ${coverage}% below minimum ${coverage_min}%"
1063
+ return 1
1064
+ else
1065
+ info "Coverage: ${coverage}% (min: ${coverage_min}%)"
1066
+ fi
1067
+ fi
1068
+
1069
+ # Post test results to GitHub
1070
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1071
+ local test_summary
1072
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
1073
+ local cov_line=""
1074
+ [[ -n "$coverage" ]] && cov_line="
1075
+ **Coverage:** ${coverage}%"
1076
+ gh_comment_issue "$ISSUE_NUMBER" "✅ **Tests passed**${cov_line}
1077
+ <details>
1078
+ <summary>Test output</summary>
1079
+
1080
+ \`\`\`
1081
+ ${test_summary}
1082
+ \`\`\`
1083
+ </details>"
1084
+ fi
1085
+
1086
+ # Write coverage summary for pre-deploy gate
1087
+ local _cov_pct=0
1088
+ if [[ -f "$ARTIFACTS_DIR/test-results.log" ]]; then
1089
+ _cov_pct=$(grep -oE '[0-9]+%' "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -1 | tr -d '%' || true)
1090
+ _cov_pct="${_cov_pct:-0}"
1091
+ fi
1092
+ local _cov_tmp
1093
+ _cov_tmp=$(mktemp "${ARTIFACTS_DIR}/test-coverage.json.tmp.XXXXXX")
1094
+ printf '{"coverage_pct":%d}' "${_cov_pct:-0}" > "$_cov_tmp" && mv "$_cov_tmp" "$ARTIFACTS_DIR/test-coverage.json" || rm -f "$_cov_tmp"
1095
+
1096
+ log_stage "test" "Tests passed${coverage:+ (coverage: ${coverage}%)}"
1097
+ }
1098
+
1099
+ stage_review() {
1100
+ CURRENT_STAGE_ID="review"
1101
+ local diff_file="$ARTIFACTS_DIR/review-diff.patch"
1102
+ local review_file="$ARTIFACTS_DIR/review.md"
1103
+
1104
+ git diff "${BASE_BRANCH}...${GIT_BRANCH}" > "$diff_file" 2>/dev/null || \
1105
+ git diff HEAD~5 > "$diff_file" 2>/dev/null || true
1106
+
1107
+ if [[ ! -s "$diff_file" ]]; then
1108
+ warn "No diff found — skipping review"
1109
+ return 0
1110
+ fi
1111
+
1112
+ if ! command -v claude &>/dev/null; then
1113
+ warn "Claude CLI not found — skipping AI review"
1114
+ return 0
1115
+ fi
1116
+
1117
+ local diff_stats
1118
+ diff_stats=$(git diff --stat "${BASE_BRANCH}...${GIT_BRANCH}" 2>/dev/null | tail -1 || echo "")
1119
+ info "Running AI code review... ${DIM}($diff_stats)${RESET}"
1120
+
1121
+ # Semantic risk scoring when intelligence is enabled
1122
+ if type intelligence_search_memory &>/dev/null 2>&1 && command -v claude &>/dev/null; then
1123
+ local diff_files
1124
+ diff_files=$(git diff --name-only "${BASE_BRANCH}...${GIT_BRANCH}" 2>/dev/null || true)
1125
+ local risk_score="low"
1126
+ # Fast heuristic: flag high-risk file patterns
1127
+ if echo "$diff_files" | grep -qiE 'migration|schema|auth|crypto|security|password|token|secret|\.env'; then
1128
+ risk_score="high"
1129
+ elif echo "$diff_files" | grep -qiE 'api|route|controller|middleware|hook'; then
1130
+ risk_score="medium"
1131
+ fi
1132
+ emit_event "review.risk_assessed" \
1133
+ "issue=${ISSUE_NUMBER:-0}" \
1134
+ "risk=$risk_score" \
1135
+ "files_changed=$(echo "$diff_files" | wc -l | xargs)"
1136
+ if [[ "$risk_score" == "high" ]]; then
1137
+ warn "High-risk changes detected (DB schema, auth, crypto, or secrets)"
1138
+ fi
1139
+ fi
1140
+
1141
+ local review_model="${MODEL:-opus}"
1142
+ # Intelligence model routing (when no explicit CLI --model override)
1143
+ if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
1144
+ review_model="$CLAUDE_MODEL"
1145
+ fi
1146
+
1147
+ # Build review prompt with project context
1148
+ local review_prompt="You are a senior code reviewer. Review this git diff thoroughly.
1149
+
1150
+ For each issue found, use this format:
1151
+ - **[SEVERITY]** file:line — description
1152
+
1153
+ Severity levels: Critical, Bug, Security, Warning, Suggestion
1154
+
1155
+ Focus on:
1156
+ 1. Logic bugs and edge cases
1157
+ 2. Security vulnerabilities (injection, XSS, auth bypass, etc.)
1158
+ 3. Error handling gaps
1159
+ 4. Performance issues
1160
+ 5. Missing validation
1161
+ 6. Project convention violations (see conventions below)
1162
+
1163
+ Be specific. Reference exact file paths and line numbers. Only flag genuine issues.
1164
+ If no issues are found, write: \"Review clean — no issues found.\"
1165
+ "
1166
+
1167
+ # Inject previous review findings and anti-patterns from memory
1168
+ if type intelligence_search_memory &>/dev/null 2>&1; then
1169
+ local review_memory
1170
+ review_memory=$(intelligence_search_memory "code review findings anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
1171
+ if [[ -n "$review_memory" ]]; then
1172
+ review_prompt+="
1173
+ ## Known Issues from Previous Reviews
1174
+ These anti-patterns and issues have been found in past reviews of this codebase. Flag them if they recur:
1175
+ ${review_memory}
1176
+ "
1177
+ fi
1178
+ fi
1179
+
1180
+ # Inject project conventions if CLAUDE.md exists
1181
+ local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
1182
+ if [[ -f "$claudemd" ]]; then
1183
+ local conventions
1184
+ conventions=$(grep -A2 'Common Pitfalls\|Shell Standards\|Bash 3.2' "$claudemd" 2>/dev/null | head -20 || true)
1185
+ if [[ -n "$conventions" ]]; then
1186
+ review_prompt+="
1187
+ ## Project Conventions
1188
+ ${conventions}
1189
+ "
1190
+ fi
1191
+ fi
1192
+
1193
+ # Inject CODEOWNERS focus areas for review
1194
+ if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_codeowners &>/dev/null 2>&1; then
1195
+ local review_owners
1196
+ review_owners=$(gh_codeowners 2>/dev/null | head -10 || true)
1197
+ if [[ -n "$review_owners" ]]; then
1198
+ review_prompt+="
1199
+ ## Code Owners (focus areas)
1200
+ ${review_owners}
1201
+ "
1202
+ fi
1203
+ fi
1204
+
1205
+ # Inject Definition of Done if present
1206
+ local dod_file="$PROJECT_ROOT/.claude/DEFINITION-OF-DONE.md"
1207
+ if [[ -f "$dod_file" ]]; then
1208
+ review_prompt+="
1209
+ ## Definition of Done (verify these)
1210
+ $(cat "$dod_file")
1211
+ "
1212
+ fi
1213
+
1214
+ review_prompt+="
1215
+ ## Diff to Review
1216
+ $(cat "$diff_file")"
1217
+
1218
+ # Build claude args — add --dangerously-skip-permissions in CI
1219
+ local review_args=(--print --model "$review_model" --max-turns 25)
1220
+ if [[ "${CI_MODE:-false}" == "true" ]]; then
1221
+ review_args+=(--dangerously-skip-permissions)
1222
+ fi
1223
+
1224
+ claude "${review_args[@]}" "$review_prompt" < /dev/null > "$review_file" 2>"${ARTIFACTS_DIR}/.claude-tokens-review.log" || true
1225
+ parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-review.log"
1226
+
1227
+ if [[ ! -s "$review_file" ]]; then
1228
+ warn "Review produced no output — check ${ARTIFACTS_DIR}/.claude-tokens-review.log for errors"
1229
+ return 0
1230
+ fi
1231
+
1232
+ # Extract severity counts — try JSON structure first, then grep fallback
1233
+ local critical_count=0 bug_count=0 warning_count=0
1234
+
1235
+ # Check if review output is structured JSON (e.g. from structured review tools)
1236
+ local json_parsed=false
1237
+ if head -1 "$review_file" 2>/dev/null | grep -q '^{' 2>/dev/null; then
1238
+ local j_critical j_bug j_warning
1239
+ j_critical=$(jq -r '.issues | map(select(.severity == "Critical")) | length' "$review_file" 2>/dev/null || echo "")
1240
+ if [[ -n "$j_critical" && "$j_critical" != "null" ]]; then
1241
+ critical_count="$j_critical"
1242
+ bug_count=$(jq -r '.issues | map(select(.severity == "Bug" or .severity == "Security")) | length' "$review_file" 2>/dev/null || echo "0")
1243
+ warning_count=$(jq -r '.issues | map(select(.severity == "Warning" or .severity == "Suggestion")) | length' "$review_file" 2>/dev/null || echo "0")
1244
+ json_parsed=true
1245
+ fi
1246
+ fi
1247
+
1248
+ # Grep fallback for markdown-formatted review output
1249
+ if [[ "$json_parsed" != "true" ]]; then
1250
+ critical_count=$(grep -ciE '\*\*\[?Critical\]?\*\*' "$review_file" 2>/dev/null || true)
1251
+ critical_count="${critical_count:-0}"
1252
+ bug_count=$(grep -ciE '\*\*\[?(Bug|Security)\]?\*\*' "$review_file" 2>/dev/null || true)
1253
+ bug_count="${bug_count:-0}"
1254
+ warning_count=$(grep -ciE '\*\*\[?(Warning|Suggestion)\]?\*\*' "$review_file" 2>/dev/null || true)
1255
+ warning_count="${warning_count:-0}"
1256
+ fi
1257
+ local total_issues=$((critical_count + bug_count + warning_count))
1258
+
1259
+ if [[ "$critical_count" -gt 0 ]]; then
1260
+ error "Review found ${BOLD}$critical_count critical${RESET} issue(s) — see $review_file"
1261
+ elif [[ "$bug_count" -gt 0 ]]; then
1262
+ warn "Review found $bug_count bug/security issue(s) — see ${DIM}$review_file${RESET}"
1263
+ elif [[ "$total_issues" -gt 0 ]]; then
1264
+ info "Review found $total_issues suggestion(s)"
1265
+ else
1266
+ success "Review clean"
1267
+ fi
1268
+
1269
+ # ── Oversight gate: pipeline review/quality stages block on verdict ──
1270
+ if [[ -x "$SCRIPT_DIR/sw-oversight.sh" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
1271
+ local reject_reason=""
1272
+ local _sec_count
1273
+ _sec_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
1274
+ _sec_count="${_sec_count:-0}"
1275
+ local _blocking=$((critical_count + _sec_count))
1276
+ [[ "$_blocking" -gt 0 ]] && reject_reason="Review found ${_blocking} critical/security issue(s)"
1277
+ if ! bash "$SCRIPT_DIR/sw-oversight.sh" gate --diff "$diff_file" --description "${GOAL:-Pipeline review}" --reject-if "$reject_reason" >/dev/null 2>&1; then
1278
+ error "Oversight gate rejected — blocking pipeline"
1279
+ emit_event "review.oversight_blocked" "issue=${ISSUE_NUMBER:-0}"
1280
+ log_stage "review" "BLOCKED: oversight gate rejected"
1281
+ return 1
1282
+ fi
1283
+ fi
1284
+
1285
+ # ── Review Blocking Gate ──
1286
+ # Block pipeline on critical/security issues unless compound_quality handles them
1287
+ local security_count
1288
+ security_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
1289
+ security_count="${security_count:-0}"
1290
+
1291
+ local blocking_issues=$((critical_count + security_count))
1292
+
1293
+ if [[ "$blocking_issues" -gt 0 ]]; then
1294
+ # Check if compound_quality stage is enabled — if so, let it handle issues
1295
+ local compound_enabled="false"
1296
+ if [[ -n "${PIPELINE_CONFIG:-}" && -f "${PIPELINE_CONFIG:-/dev/null}" ]]; then
1297
+ compound_enabled=$(jq -r '.stages[] | select(.id == "compound_quality") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null) || true
1298
+ [[ -z "$compound_enabled" || "$compound_enabled" == "null" ]] && compound_enabled="false"
1299
+ fi
1300
+
1301
+ # Check if this is a fast template (don't block fast pipelines)
1302
+ local is_fast="false"
1303
+ if [[ "${PIPELINE_NAME:-}" == "fast" || "${PIPELINE_NAME:-}" == "hotfix" ]]; then
1304
+ is_fast="true"
1305
+ fi
1306
+
1307
+ if [[ "$compound_enabled" == "true" ]]; then
1308
+ info "Review found ${blocking_issues} critical/security issue(s) — compound_quality stage will handle"
1309
+ elif [[ "$is_fast" == "true" ]]; then
1310
+ warn "Review found ${blocking_issues} critical/security issue(s) — fast template, not blocking"
1311
+ elif [[ "${SKIP_GATES:-false}" == "true" ]]; then
1312
+ warn "Review found ${blocking_issues} critical/security issue(s) — skip-gates mode, not blocking"
1313
+ else
1314
+ error "Review found ${BOLD}${blocking_issues} critical/security issue(s)${RESET} — blocking pipeline"
1315
+ emit_event "review.blocked" \
1316
+ "issue=${ISSUE_NUMBER:-0}" \
1317
+ "critical=${critical_count}" \
1318
+ "security=${security_count}"
1319
+
1320
+ # Save blocking issues for self-healing context
1321
+ grep -iE '\*\*\[?(Critical|Security)\]?\*\*' "$review_file" > "$ARTIFACTS_DIR/review-blockers.md" 2>/dev/null || true
1322
+
1323
+ # Post review to GitHub before failing
1324
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1325
+ local review_summary
1326
+ review_summary=$(head -40 "$review_file")
1327
+ gh_comment_issue "$ISSUE_NUMBER" "## 🔍 Code Review — ❌ Blocked
1328
+
1329
+ **Stats:** $diff_stats
1330
+ **Blocking issues:** ${blocking_issues} (${critical_count} critical, ${security_count} security)
1331
+
1332
+ <details>
1333
+ <summary>Review details</summary>
1334
+
1335
+ ${review_summary}
1336
+
1337
+ </details>
1338
+
1339
+ _Pipeline will attempt self-healing rebuild._"
1340
+ fi
1341
+
1342
+ log_stage "review" "BLOCKED: $blocking_issues critical/security issues found"
1343
+ return 1
1344
+ fi
1345
+ fi
1346
+
1347
+ # Post review to GitHub issue
1348
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1349
+ local review_summary
1350
+ review_summary=$(head -40 "$review_file")
1351
+ gh_comment_issue "$ISSUE_NUMBER" "## 🔍 Code Review
1352
+
1353
+ **Stats:** $diff_stats
1354
+ **Issues found:** $total_issues (${critical_count} critical, ${bug_count} bugs, ${warning_count} suggestions)
1355
+
1356
+ <details>
1357
+ <summary>Review details</summary>
1358
+
1359
+ ${review_summary}
1360
+
1361
+ </details>"
1362
+ fi
1363
+
1364
+ log_stage "review" "AI review complete ($total_issues issues: $critical_count critical, $bug_count bugs, $warning_count suggestions)"
1365
+ }
1366
+
1367
+ stage_pr() {
1368
+ CURRENT_STAGE_ID="pr"
1369
+ local plan_file="$ARTIFACTS_DIR/plan.md"
1370
+ local test_log="$ARTIFACTS_DIR/test-results.log"
1371
+ local review_file="$ARTIFACTS_DIR/review.md"
1372
+
1373
+ # ── PR Hygiene Checks (informational) ──
1374
+ local hygiene_commit_count
1375
+ hygiene_commit_count=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | wc -l | xargs)
1376
+ hygiene_commit_count="${hygiene_commit_count:-0}"
1377
+
1378
+ if [[ "$hygiene_commit_count" -gt 20 ]]; then
1379
+ warn "PR has ${hygiene_commit_count} commits — consider squashing before merge"
1380
+ fi
1381
+
1382
+ # Check for WIP/fixup/squash commits (expanded patterns)
1383
+ local wip_commits
1384
+ wip_commits=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | grep -ciE '^[0-9a-f]+ (WIP|fixup!|squash!|TODO|HACK|TEMP|BROKEN|wip[:-]|temp[:-]|broken[:-]|do not merge)' || true)
1385
+ wip_commits="${wip_commits:-0}"
1386
+ if [[ "$wip_commits" -gt 0 ]]; then
1387
+ warn "Branch has ${wip_commits} WIP/fixup/squash/temp commit(s) — consider cleaning up"
1388
+ fi
1389
+
1390
+ # ── PR Quality Gate: reject PRs with no real code changes ──
1391
+ local real_files
1392
+ real_files=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null | grep -v '^\.claude/' | grep -v '^\.github/' || true)
1393
+ if [[ -z "$real_files" ]]; then
1394
+ error "No real code changes detected — only pipeline artifacts (.claude/ logs)."
1395
+ error "The build agent did not produce meaningful changes. Skipping PR creation."
1396
+ emit_event "pr.rejected" "issue=${ISSUE_NUMBER:-0}" "reason=no_real_changes"
1397
+ # Mark issue so auto-retry knows not to retry empty builds
1398
+ if [[ -n "${ISSUE_NUMBER:-}" && "${ISSUE_NUMBER:-0}" != "0" ]]; then
1399
+ gh issue comment "$ISSUE_NUMBER" --body "<!-- SHIPWRIGHT-NO-CHANGES: true -->" 2>/dev/null || true
1400
+ fi
1401
+ return 1
1402
+ fi
1403
+ local real_file_count
1404
+ real_file_count=$(echo "$real_files" | wc -l | xargs)
1405
+ info "PR quality gate: ${real_file_count} real file(s) changed"
1406
+
1407
+ # Commit any uncommitted changes left by the build agent
1408
+ if ! git diff --quiet 2>/dev/null || ! git diff --cached --quiet 2>/dev/null; then
1409
+ info "Committing remaining uncommitted changes..."
1410
+ git add -A 2>/dev/null || true
1411
+ git commit -m "chore: pipeline cleanup — commit remaining build changes" --no-verify 2>/dev/null || true
1412
+ fi
1413
+
1414
+ # Auto-rebase onto latest base branch before PR
1415
+ auto_rebase || {
1416
+ warn "Rebase/merge failed — pushing as-is"
1417
+ }
1418
+
1419
+ # Push branch
1420
+ info "Pushing branch: $GIT_BRANCH"
1421
+ git push -u origin "$GIT_BRANCH" --force-with-lease 2>/dev/null || {
1422
+ # Retry with regular push if force-with-lease fails (first push)
1423
+ git push -u origin "$GIT_BRANCH" 2>/dev/null || {
1424
+ error "Failed to push branch"
1425
+ return 1
1426
+ }
1427
+ }
1428
+
1429
+ # ── Developer Simulation (pre-PR review) ──
1430
+ local simulation_summary=""
1431
+ if type simulation_review &>/dev/null 2>&1; then
1432
+ local sim_enabled
1433
+ sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1434
+ # Also check daemon-config
1435
+ local daemon_cfg=".claude/daemon-config.json"
1436
+ if [[ "$sim_enabled" != "true" && -f "$daemon_cfg" ]]; then
1437
+ sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
1438
+ fi
1439
+ if [[ "$sim_enabled" == "true" ]]; then
1440
+ info "Running developer simulation review..."
1441
+ local diff_for_sim
1442
+ diff_for_sim=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1443
+ if [[ -n "$diff_for_sim" ]]; then
1444
+ local sim_result
1445
+ sim_result=$(simulation_review "$diff_for_sim" "${GOAL:-}" 2>/dev/null || echo "")
1446
+ if [[ -n "$sim_result" && "$sim_result" != *'"error"'* ]]; then
1447
+ echo "$sim_result" > "$ARTIFACTS_DIR/simulation-review.json"
1448
+ local sim_count
1449
+ sim_count=$(echo "$sim_result" | jq 'length' 2>/dev/null || echo "0")
1450
+ simulation_summary="**Developer simulation:** ${sim_count} reviewer concerns pre-addressed"
1451
+ success "Simulation complete: ${sim_count} concerns found and addressed"
1452
+ emit_event "simulation.complete" "issue=${ISSUE_NUMBER:-0}" "concerns=${sim_count}"
1453
+ else
1454
+ info "Simulation returned no actionable concerns"
1455
+ fi
1456
+ fi
1457
+ fi
1458
+ fi
1459
+
1460
+ # ── Architecture Validation (pre-PR check) ──
1461
+ local arch_summary=""
1462
+ if type architecture_validate_changes &>/dev/null 2>&1; then
1463
+ local arch_enabled
1464
+ arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1465
+ local daemon_cfg=".claude/daemon-config.json"
1466
+ if [[ "$arch_enabled" != "true" && -f "$daemon_cfg" ]]; then
1467
+ arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
1468
+ fi
1469
+ if [[ "$arch_enabled" == "true" ]]; then
1470
+ info "Validating architecture..."
1471
+ local diff_for_arch
1472
+ diff_for_arch=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1473
+ if [[ -n "$diff_for_arch" ]]; then
1474
+ local arch_result
1475
+ arch_result=$(architecture_validate_changes "$diff_for_arch" "" 2>/dev/null || echo "")
1476
+ if [[ -n "$arch_result" && "$arch_result" != *'"error"'* ]]; then
1477
+ echo "$arch_result" > "$ARTIFACTS_DIR/architecture-validation.json"
1478
+ local violation_count
1479
+ violation_count=$(echo "$arch_result" | jq '[.violations[]? | select(.severity == "critical" or .severity == "high")] | length' 2>/dev/null || echo "0")
1480
+ arch_summary="**Architecture validation:** ${violation_count} violations"
1481
+ if [[ "$violation_count" -gt 0 ]]; then
1482
+ warn "Architecture: ${violation_count} high/critical violations found"
1483
+ else
1484
+ success "Architecture validation passed"
1485
+ fi
1486
+ emit_event "architecture.validated" "issue=${ISSUE_NUMBER:-0}" "violations=${violation_count}"
1487
+ else
1488
+ info "Architecture validation returned no results"
1489
+ fi
1490
+ fi
1491
+ fi
1492
+ fi
1493
+
1494
+ # Pre-PR diff gate — verify meaningful code changes exist (not just bookkeeping)
1495
+ local real_changes
1496
+ real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
1497
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
1498
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
1499
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
1500
+ if [[ "${real_changes:-0}" -eq 0 ]]; then
1501
+ error "No meaningful code changes detected — only bookkeeping files modified"
1502
+ error "Refusing to create PR with zero real changes"
1503
+ return 1
1504
+ fi
1505
+ info "Pre-PR diff check: ${real_changes} real files changed"
1506
+
1507
+ # Build PR title — prefer GOAL over plan file first line
1508
+ # (plan file first line often contains Claude analysis text, not a clean title)
1509
+ local pr_title=""
1510
+ if [[ -n "${GOAL:-}" ]]; then
1511
+ pr_title=$(echo "$GOAL" | cut -c1-70)
1512
+ fi
1513
+ if [[ -z "$pr_title" ]] && [[ -s "$plan_file" ]]; then
1514
+ pr_title=$(head -1 "$plan_file" 2>/dev/null | sed 's/^#* *//' | cut -c1-70)
1515
+ fi
1516
+ [[ -z "$pr_title" ]] && pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
1517
+
1518
+ # Sanitize: reject PR titles that look like error messages
1519
+ if echo "$pr_title" | grep -qiE 'Invalid API|API key|authentication_error|rate_limit|CLI error|no useful output'; then
1520
+ warn "PR title looks like an error message: $pr_title"
1521
+ pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
1522
+ fi
1523
+
1524
+ # Build comprehensive PR body
1525
+ local plan_summary=""
1526
+ if [[ -s "$plan_file" ]]; then
1527
+ plan_summary=$(head -20 "$plan_file" 2>/dev/null | tail -15)
1528
+ fi
1529
+
1530
+ local test_summary=""
1531
+ if [[ -s "$test_log" ]]; then
1532
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
1533
+ fi
1534
+
1535
+ local review_summary=""
1536
+ if [[ -s "$review_file" ]]; then
1537
+ local total_issues=0
1538
+ # Try JSON structured output first
1539
+ if head -1 "$review_file" 2>/dev/null | grep -q '^{' 2>/dev/null; then
1540
+ total_issues=$(jq -r '.issues | length' "$review_file" 2>/dev/null || echo "0")
1541
+ fi
1542
+ # Grep fallback for markdown
1543
+ if [[ "${total_issues:-0}" -eq 0 ]]; then
1544
+ total_issues=$(grep -ciE '\*\*\[?(Critical|Bug|Security|Warning|Suggestion)\]?\*\*' "$review_file" 2>/dev/null || true)
1545
+ total_issues="${total_issues:-0}"
1546
+ fi
1547
+ review_summary="**Code review:** $total_issues issues found"
1548
+ fi
1549
+
1550
+ local closes_line=""
1551
+ [[ -n "${GITHUB_ISSUE:-}" ]] && closes_line="Closes ${GITHUB_ISSUE}"
1552
+
1553
+ local diff_stats
1554
+ diff_stats=$(git diff --stat "${BASE_BRANCH}...${GIT_BRANCH}" 2>/dev/null | tail -1 || echo "")
1555
+
1556
+ local commit_count
1557
+ commit_count=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | wc -l | xargs)
1558
+
1559
+ local total_dur=""
1560
+ if [[ -n "$PIPELINE_START_EPOCH" ]]; then
1561
+ total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
1562
+ fi
1563
+
1564
+ local pr_body
1565
+ pr_body="$(cat <<EOF
1566
+ ## Summary
1567
+ ${plan_summary:-$GOAL}
1568
+
1569
+ ## Changes
1570
+ ${diff_stats}
1571
+ ${commit_count} commit(s) via \`shipwright pipeline\` (${PIPELINE_NAME})
1572
+
1573
+ ## Test Results
1574
+ \`\`\`
1575
+ ${test_summary:-No test output}
1576
+ \`\`\`
1577
+
1578
+ ${review_summary}
1579
+ ${simulation_summary}
1580
+ ${arch_summary}
1581
+
1582
+ ${closes_line}
1583
+
1584
+ ---
1585
+
1586
+ | Metric | Value |
1587
+ |--------|-------|
1588
+ | Pipeline | \`${PIPELINE_NAME}\` |
1589
+ | Duration | ${total_dur:-—} |
1590
+ | Model | ${MODEL:-opus} |
1591
+ | Agents | ${AGENTS:-1} |
1592
+
1593
+ Generated by \`shipwright pipeline\`
1594
+ EOF
1595
+ )"
1596
+
1597
+ # Build gh pr create args
1598
+ local pr_args=(--title "$pr_title" --body "$pr_body" --base "$BASE_BRANCH")
1599
+
1600
+ # Propagate labels from issue + CLI
1601
+ local all_labels="${LABELS}"
1602
+ if [[ -n "$ISSUE_LABELS" ]]; then
1603
+ if [[ -n "$all_labels" ]]; then
1604
+ all_labels="${all_labels},${ISSUE_LABELS}"
1605
+ else
1606
+ all_labels="$ISSUE_LABELS"
1607
+ fi
1608
+ fi
1609
+ if [[ -n "$all_labels" ]]; then
1610
+ pr_args+=(--label "$all_labels")
1611
+ fi
1612
+
1613
+ # Auto-detect or use provided reviewers
1614
+ local reviewers="${REVIEWERS}"
1615
+ if [[ -z "$reviewers" ]]; then
1616
+ reviewers=$(detect_reviewers)
1617
+ fi
1618
+ if [[ -n "$reviewers" ]]; then
1619
+ pr_args+=(--reviewer "$reviewers")
1620
+ info "Reviewers: ${DIM}$reviewers${RESET}"
1621
+ fi
1622
+
1623
+ # Propagate milestone
1624
+ if [[ -n "$ISSUE_MILESTONE" ]]; then
1625
+ pr_args+=(--milestone "$ISSUE_MILESTONE")
1626
+ info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
1627
+ fi
1628
+
1629
+ # Check for existing open PR on this branch to avoid duplicates (issue #12)
1630
+ local pr_url=""
1631
+ local existing_pr
1632
+ existing_pr=$(gh pr list --head "$GIT_BRANCH" --state open --json number,url --jq '.[0]' 2>/dev/null || echo "")
1633
+ if [[ -n "$existing_pr" && "$existing_pr" != "null" ]]; then
1634
+ local existing_pr_number existing_pr_url
1635
+ existing_pr_number=$(echo "$existing_pr" | jq -r '.number' 2>/dev/null || echo "")
1636
+ existing_pr_url=$(echo "$existing_pr" | jq -r '.url' 2>/dev/null || echo "")
1637
+ info "Updating existing PR #$existing_pr_number instead of creating duplicate"
1638
+ gh pr edit "$existing_pr_number" --title "$pr_title" --body "$pr_body" 2>/dev/null || true
1639
+ pr_url="$existing_pr_url"
1640
+ else
1641
+ info "Creating PR..."
1642
+ local pr_stderr pr_exit=0
1643
+ pr_url=$(gh pr create "${pr_args[@]}" 2>/tmp/shipwright-pr-stderr.txt) || pr_exit=$?
1644
+ pr_stderr=$(cat /tmp/shipwright-pr-stderr.txt 2>/dev/null || true)
1645
+ rm -f /tmp/shipwright-pr-stderr.txt
1646
+
1647
+ # gh pr create may return non-zero for reviewer issues but still create the PR
1648
+ if [[ "$pr_exit" -ne 0 ]]; then
1649
+ if [[ "$pr_url" == *"github.com"* ]]; then
1650
+ # PR was created but something non-fatal failed (e.g., reviewer not found)
1651
+ warn "PR created with warnings: ${pr_stderr:-unknown}"
1652
+ else
1653
+ error "PR creation failed: ${pr_stderr:-$pr_url}"
1654
+ return 1
1655
+ fi
1656
+ fi
1657
+ fi
1658
+
1659
+ success "PR created: ${BOLD}$pr_url${RESET}"
1660
+ echo "$pr_url" > "$ARTIFACTS_DIR/pr-url.txt"
1661
+
1662
+ # Extract PR number
1663
+ PR_NUMBER=$(echo "$pr_url" | grep -oE '[0-9]+$' || true)
1664
+
1665
+ # ── Intelligent Reviewer Selection (GraphQL-enhanced) ──
1666
+ if [[ "${NO_GITHUB:-false}" != "true" && -n "$PR_NUMBER" && -z "$reviewers" ]]; then
1667
+ local reviewer_assigned=false
1668
+
1669
+ # Try CODEOWNERS-based routing via GraphQL API
1670
+ if type gh_codeowners &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1671
+ local codeowners_json
1672
+ codeowners_json=$(gh_codeowners "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
1673
+ if [[ "$codeowners_json" != "[]" && -n "$codeowners_json" ]]; then
1674
+ local changed_files
1675
+ changed_files=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1676
+ if [[ -n "$changed_files" ]]; then
1677
+ local co_reviewers
1678
+ co_reviewers=$(echo "$codeowners_json" | jq -r '.[].owners[]' 2>/dev/null | sort -u | head -3 || true)
1679
+ if [[ -n "$co_reviewers" ]]; then
1680
+ local rev
1681
+ while IFS= read -r rev; do
1682
+ rev="${rev#@}"
1683
+ [[ -n "$rev" ]] && gh pr edit "$PR_NUMBER" --add-reviewer "$rev" 2>/dev/null || true
1684
+ done <<< "$co_reviewers"
1685
+ info "Requested review from CODEOWNERS: $(echo "$co_reviewers" | tr '\n' ',' | sed 's/,$//')"
1686
+ reviewer_assigned=true
1687
+ fi
1688
+ fi
1689
+ fi
1690
+ fi
1691
+
1692
+ # Fallback: contributor-based routing via GraphQL API
1693
+ if [[ "$reviewer_assigned" != "true" ]] && type gh_contributors &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1694
+ local contributors_json
1695
+ contributors_json=$(gh_contributors "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
1696
+ local top_contributor
1697
+ top_contributor=$(echo "$contributors_json" | jq -r '.[0].login // ""' 2>/dev/null || echo "")
1698
+ local current_user
1699
+ current_user=$(gh api user --jq '.login' 2>/dev/null || echo "")
1700
+ if [[ -n "$top_contributor" && "$top_contributor" != "$current_user" ]]; then
1701
+ gh pr edit "$PR_NUMBER" --add-reviewer "$top_contributor" 2>/dev/null || true
1702
+ info "Requested review from top contributor: $top_contributor"
1703
+ reviewer_assigned=true
1704
+ fi
1705
+ fi
1706
+
1707
+ # Final fallback: auto-approve if no reviewers assigned
1708
+ if [[ "$reviewer_assigned" != "true" ]]; then
1709
+ gh pr review "$PR_NUMBER" --approve 2>/dev/null || warn "Could not auto-approve PR"
1710
+ fi
1711
+ fi
1712
+
1713
+ # Update issue with PR link
1714
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1715
+ gh_remove_label "$ISSUE_NUMBER" "pipeline/in-progress"
1716
+ gh_add_labels "$ISSUE_NUMBER" "pipeline/pr-created"
1717
+ gh_comment_issue "$ISSUE_NUMBER" "🎉 **PR created:** ${pr_url}
1718
+
1719
+ Pipeline duration so far: ${total_dur:-unknown}"
1720
+
1721
+ # Notify tracker of review/PR creation
1722
+ "$SCRIPT_DIR/sw-tracker.sh" notify "review" "$ISSUE_NUMBER" "$pr_url" 2>/dev/null || true
1723
+ fi
1724
+
1725
+ # Wait for CI if configured
1726
+ local wait_ci
1727
+ wait_ci=$(jq -r --arg id "pr" '(.stages[] | select(.id == $id) | .config.wait_ci) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
1728
+ if [[ "$wait_ci" == "true" ]]; then
1729
+ info "Waiting for CI checks..."
1730
+ gh pr checks --watch 2>/dev/null || warn "CI checks did not all pass"
1731
+ fi
1732
+
1733
+ log_stage "pr" "PR created: $pr_url (${reviewers:+reviewers: $reviewers})"
1734
+ }
1735
+
1736
+ stage_merge() {
1737
+ CURRENT_STAGE_ID="merge"
1738
+
1739
+ if [[ "$NO_GITHUB" == "true" ]]; then
1740
+ info "Merge stage skipped (--no-github)"
1741
+ return 0
1742
+ fi
1743
+
1744
+ # ── Branch Protection Check ──
1745
+ if type gh_branch_protection &>/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1746
+ local protection_json
1747
+ protection_json=$(gh_branch_protection "$REPO_OWNER" "$REPO_NAME" "${BASE_BRANCH:-main}" 2>/dev/null || echo '{"protected": false}')
1748
+ local is_protected
1749
+ is_protected=$(echo "$protection_json" | jq -r '.protected // false' 2>/dev/null || echo "false")
1750
+ if [[ "$is_protected" == "true" ]]; then
1751
+ local required_reviews
1752
+ required_reviews=$(echo "$protection_json" | jq -r '.required_pull_request_reviews.required_approving_review_count // 0' 2>/dev/null || echo "0")
1753
+ local required_checks
1754
+ required_checks=$(echo "$protection_json" | jq -r '[.required_status_checks.contexts // [] | .[]] | length' 2>/dev/null || echo "0")
1755
+
1756
+ info "Branch protection: ${required_reviews} required review(s), ${required_checks} required check(s)"
1757
+
1758
+ if [[ "$required_reviews" -gt 0 ]]; then
1759
+ # Check if PR has enough approvals
1760
+ local prot_pr_number
1761
+ prot_pr_number=$(gh pr list --head "$GIT_BRANCH" --json number --jq '.[0].number' 2>/dev/null || echo "")
1762
+ if [[ -n "$prot_pr_number" ]]; then
1763
+ local approvals
1764
+ approvals=$(gh pr view "$prot_pr_number" --json reviews --jq '[.reviews[] | select(.state == "APPROVED")] | length' 2>/dev/null || echo "0")
1765
+ if [[ "$approvals" -lt "$required_reviews" ]]; then
1766
+ warn "PR has $approvals approval(s), needs $required_reviews — skipping auto-merge"
1767
+ info "PR is ready for manual merge after required reviews"
1768
+ emit_event "merge.blocked" "issue=${ISSUE_NUMBER:-0}" "reason=insufficient_reviews" "have=$approvals" "need=$required_reviews"
1769
+ return 0
1770
+ fi
1771
+ fi
1772
+ fi
1773
+ fi
1774
+ fi
1775
+
1776
+ local merge_method wait_ci_timeout auto_delete_branch auto_merge auto_approve merge_strategy
1777
+ merge_method=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.merge_method) // "squash"' "$PIPELINE_CONFIG" 2>/dev/null) || true
1778
+ [[ -z "$merge_method" || "$merge_method" == "null" ]] && merge_method="squash"
1779
+ wait_ci_timeout=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.wait_ci_timeout_s) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
1780
+ [[ -z "$wait_ci_timeout" || "$wait_ci_timeout" == "null" ]] && wait_ci_timeout=0
1781
+
1782
+ # Adaptive CI timeout: 90th percentile of historical times × 1.5 safety margin
1783
+ if [[ "$wait_ci_timeout" -eq 0 ]] 2>/dev/null; then
1784
+ local repo_hash_ci
1785
+ repo_hash_ci=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
1786
+ local ci_times_file="${HOME}/.shipwright/baselines/${repo_hash_ci}/ci-times.json"
1787
+ if [[ -f "$ci_times_file" ]]; then
1788
+ local p90_time
1789
+ p90_time=$(jq '
1790
+ .times | sort |
1791
+ (length * 0.9 | floor) as $idx |
1792
+ .[$idx] // 600
1793
+ ' "$ci_times_file" 2>/dev/null || echo "0")
1794
+ if [[ -n "$p90_time" ]] && awk -v t="$p90_time" 'BEGIN{exit !(t > 0)}' 2>/dev/null; then
1795
+ # 1.5x safety margin, clamped to [120, 1800]
1796
+ wait_ci_timeout=$(awk -v p90="$p90_time" 'BEGIN{
1797
+ t = p90 * 1.5;
1798
+ if (t < 120) t = 120;
1799
+ if (t > 1800) t = 1800;
1800
+ printf "%d", t
1801
+ }')
1802
+ fi
1803
+ fi
1804
+ # Default fallback if no history
1805
+ [[ "$wait_ci_timeout" -eq 0 ]] && wait_ci_timeout=600
1806
+ fi
1807
+ auto_delete_branch=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_delete_branch) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
1808
+ [[ -z "$auto_delete_branch" || "$auto_delete_branch" == "null" ]] && auto_delete_branch="true"
1809
+ auto_merge=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_merge) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
1810
+ [[ -z "$auto_merge" || "$auto_merge" == "null" ]] && auto_merge="false"
1811
+ auto_approve=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_approve) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
1812
+ [[ -z "$auto_approve" || "$auto_approve" == "null" ]] && auto_approve="false"
1813
+ merge_strategy=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.merge_strategy) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1814
+ [[ -z "$merge_strategy" || "$merge_strategy" == "null" ]] && merge_strategy=""
1815
+ # merge_strategy overrides merge_method if set (squash/merge/rebase)
1816
+ if [[ -n "$merge_strategy" ]]; then
1817
+ merge_method="$merge_strategy"
1818
+ fi
1819
+
1820
+ # Find PR for current branch
1821
+ local pr_number
1822
+ pr_number=$(gh pr list --head "$GIT_BRANCH" --json number --jq '.[0].number' 2>/dev/null || echo "")
1823
+
1824
+ if [[ -z "$pr_number" ]]; then
1825
+ warn "No PR found for branch $GIT_BRANCH — skipping merge"
1826
+ return 0
1827
+ fi
1828
+
1829
+ info "Found PR #${pr_number} for branch ${GIT_BRANCH}"
1830
+
1831
+ # Wait for CI checks to pass
1832
+ info "Waiting for CI checks (timeout: ${wait_ci_timeout}s)..."
1833
+ local elapsed=0
1834
+ local check_interval=15
1835
+
1836
+ while [[ "$elapsed" -lt "$wait_ci_timeout" ]]; do
1837
+ local check_status
1838
+ check_status=$(gh pr checks "$pr_number" --json 'bucket,name' --jq '[.[] | .bucket] | unique | sort' 2>/dev/null || echo '["pending"]')
1839
+
1840
+ # If all checks passed (only "pass" in buckets)
1841
+ if echo "$check_status" | jq -e '. == ["pass"]' &>/dev/null; then
1842
+ success "All CI checks passed"
1843
+ break
1844
+ fi
1845
+
1846
+ # If any check failed
1847
+ if echo "$check_status" | jq -e 'any(. == "fail")' &>/dev/null; then
1848
+ error "CI checks failed — aborting merge"
1849
+ return 1
1850
+ fi
1851
+
1852
+ sleep "$check_interval"
1853
+ elapsed=$((elapsed + check_interval))
1854
+ done
1855
+
1856
+ # Record CI wait time for adaptive timeout calculation
1857
+ if [[ "$elapsed" -gt 0 ]]; then
1858
+ local repo_hash_ci_rec
1859
+ repo_hash_ci_rec=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
1860
+ local ci_times_dir="${HOME}/.shipwright/baselines/${repo_hash_ci_rec}"
1861
+ local ci_times_rec_file="${ci_times_dir}/ci-times.json"
1862
+ mkdir -p "$ci_times_dir"
1863
+ local ci_history="[]"
1864
+ if [[ -f "$ci_times_rec_file" ]]; then
1865
+ ci_history=$(jq '.times // []' "$ci_times_rec_file" 2>/dev/null || echo "[]")
1866
+ fi
1867
+ local updated_ci
1868
+ updated_ci=$(echo "$ci_history" | jq --arg t "$elapsed" '. + [($t | tonumber)] | .[-20:]' 2>/dev/null || echo "[$elapsed]")
1869
+ local tmp_ci
1870
+ tmp_ci=$(mktemp "${ci_times_dir}/ci-times.json.XXXXXX")
1871
+ jq -n --argjson times "$updated_ci" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
1872
+ '{times: $times, updated: $updated}' > "$tmp_ci" 2>/dev/null
1873
+ mv "$tmp_ci" "$ci_times_rec_file" 2>/dev/null || true
1874
+ fi
1875
+
1876
+ if [[ "$elapsed" -ge "$wait_ci_timeout" ]]; then
1877
+ warn "CI check timeout (${wait_ci_timeout}s) — proceeding with merge anyway"
1878
+ fi
1879
+
1880
+ # Auto-approve if configured (for branch protection requiring reviews)
1881
+ if [[ "$auto_approve" == "true" ]]; then
1882
+ info "Auto-approving PR #${pr_number}..."
1883
+ gh pr review "$pr_number" --approve 2>/dev/null || warn "Auto-approve failed (may need different permissions)"
1884
+ fi
1885
+
1886
+ # Merge the PR
1887
+ if [[ "$auto_merge" == "true" ]]; then
1888
+ info "Enabling auto-merge for PR #${pr_number} (strategy: ${merge_method})..."
1889
+ local auto_merge_args=("pr" "merge" "$pr_number" "--auto" "--${merge_method}")
1890
+ if [[ "$auto_delete_branch" == "true" ]]; then
1891
+ auto_merge_args+=("--delete-branch")
1892
+ fi
1893
+
1894
+ if gh "${auto_merge_args[@]}" 2>/dev/null; then
1895
+ success "Auto-merge enabled for PR #${pr_number} (strategy: ${merge_method})"
1896
+ emit_event "merge.auto_enabled" \
1897
+ "issue=${ISSUE_NUMBER:-0}" \
1898
+ "pr=$pr_number" \
1899
+ "strategy=$merge_method"
1900
+ else
1901
+ warn "Auto-merge not available — falling back to direct merge"
1902
+ # Fall through to direct merge below
1903
+ auto_merge="false"
1904
+ fi
1905
+ fi
1906
+
1907
+ if [[ "$auto_merge" != "true" ]]; then
1908
+ info "Merging PR #${pr_number} (method: ${merge_method})..."
1909
+ local merge_args=("pr" "merge" "$pr_number" "--${merge_method}")
1910
+ if [[ "$auto_delete_branch" == "true" ]]; then
1911
+ merge_args+=("--delete-branch")
1912
+ fi
1913
+
1914
+ if gh "${merge_args[@]}" 2>/dev/null; then
1915
+ success "PR #${pr_number} merged successfully"
1916
+ else
1917
+ error "Failed to merge PR #${pr_number}"
1918
+ return 1
1919
+ fi
1920
+ fi
1921
+
1922
+ log_stage "merge" "PR #${pr_number} merged (strategy: ${merge_method}, auto_merge: ${auto_merge})"
1923
+ }
1924
+
1925
+ stage_deploy() {
1926
+ CURRENT_STAGE_ID="deploy"
1927
+ local staging_cmd
1928
+ staging_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.staging_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1929
+ [[ "$staging_cmd" == "null" ]] && staging_cmd=""
1930
+
1931
+ local prod_cmd
1932
+ prod_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.production_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1933
+ [[ "$prod_cmd" == "null" ]] && prod_cmd=""
1934
+
1935
+ local rollback_cmd
1936
+ rollback_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.rollback_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1937
+ [[ "$rollback_cmd" == "null" ]] && rollback_cmd=""
1938
+
1939
+ if [[ -z "$staging_cmd" && -z "$prod_cmd" ]]; then
1940
+ warn "No deploy commands configured — skipping"
1941
+ return 0
1942
+ fi
1943
+
1944
+ # Create GitHub deployment tracking
1945
+ local gh_deploy_env="production"
1946
+ [[ -n "$staging_cmd" && -z "$prod_cmd" ]] && gh_deploy_env="staging"
1947
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_start &>/dev/null 2>&1; then
1948
+ if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
1949
+ gh_deploy_pipeline_start "$REPO_OWNER" "$REPO_NAME" "${GIT_BRANCH:-HEAD}" "$gh_deploy_env" 2>/dev/null || true
1950
+ info "GitHub Deployment: tracking as $gh_deploy_env"
1951
+ fi
1952
+ fi
1953
+
1954
+ # ── Pre-deploy gates ──
1955
+ local pre_deploy_ci
1956
+ pre_deploy_ci=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_ci_status) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
1957
+
1958
+ if [[ "${pre_deploy_ci:-true}" == "true" && "${NO_GITHUB:-false}" != "true" && -n "${REPO_OWNER:-}" && -n "${REPO_NAME:-}" ]]; then
1959
+ info "Pre-deploy gate: checking CI status..."
1960
+ local ci_failures
1961
+ ci_failures=$(gh api "repos/${REPO_OWNER}/${REPO_NAME}/commits/${GIT_BRANCH:-HEAD}/check-runs" \
1962
+ --jq '[.check_runs[] | select(.conclusion != null and .conclusion != "success" and .conclusion != "skipped")] | length' 2>/dev/null || echo "0")
1963
+ if [[ "${ci_failures:-0}" -gt 0 ]]; then
1964
+ error "Pre-deploy gate FAILED: ${ci_failures} CI check(s) not passing"
1965
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: ${ci_failures} CI checks failing" 2>/dev/null || true
1966
+ return 1
1967
+ fi
1968
+ success "Pre-deploy gate: all CI checks passing"
1969
+ fi
1970
+
1971
+ local pre_deploy_min_cov
1972
+ pre_deploy_min_cov=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_min_coverage) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1973
+ if [[ -n "${pre_deploy_min_cov:-}" && "${pre_deploy_min_cov}" != "null" && -f "$ARTIFACTS_DIR/test-coverage.json" ]]; then
1974
+ local actual_cov
1975
+ actual_cov=$(jq -r '.coverage_pct // 0' "$ARTIFACTS_DIR/test-coverage.json" 2>/dev/null || echo "0")
1976
+ if [[ "${actual_cov:-0}" -lt "$pre_deploy_min_cov" ]]; then
1977
+ error "Pre-deploy gate FAILED: coverage ${actual_cov}% < required ${pre_deploy_min_cov}%"
1978
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: coverage ${actual_cov}% below minimum ${pre_deploy_min_cov}%" 2>/dev/null || true
1979
+ return 1
1980
+ fi
1981
+ success "Pre-deploy gate: coverage ${actual_cov}% >= ${pre_deploy_min_cov}%"
1982
+ fi
1983
+
1984
+ # Post deploy start to GitHub
1985
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1986
+ gh_comment_issue "$ISSUE_NUMBER" "Deploy started"
1987
+ fi
1988
+
1989
+ # ── Deploy strategy ──
1990
+ local deploy_strategy
1991
+ deploy_strategy=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.deploy_strategy) // "direct"' "$PIPELINE_CONFIG" 2>/dev/null) || true
1992
+ [[ "$deploy_strategy" == "null" ]] && deploy_strategy="direct"
1993
+
1994
+ local canary_cmd promote_cmd switch_cmd health_url deploy_log
1995
+ canary_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.canary_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1996
+ [[ "$canary_cmd" == "null" ]] && canary_cmd=""
1997
+ promote_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.promote_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
1998
+ [[ "$promote_cmd" == "null" ]] && promote_cmd=""
1999
+ switch_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.switch_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2000
+ [[ "$switch_cmd" == "null" ]] && switch_cmd=""
2001
+ health_url=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2002
+ [[ "$health_url" == "null" ]] && health_url=""
2003
+ deploy_log="$ARTIFACTS_DIR/deploy.log"
2004
+
2005
+ case "$deploy_strategy" in
2006
+ canary)
2007
+ info "Canary deployment strategy..."
2008
+ if [[ -z "$canary_cmd" ]]; then
2009
+ warn "No canary_cmd configured — falling back to direct"
2010
+ deploy_strategy="direct"
2011
+ else
2012
+ info "Deploying canary..."
2013
+ bash -c "$canary_cmd" >> "$deploy_log" 2>&1 || { error "Canary deploy failed"; return 1; }
2014
+
2015
+ if [[ -n "$health_url" ]]; then
2016
+ local canary_healthy=0
2017
+ local _chk
2018
+ for _chk in 1 2 3; do
2019
+ sleep 10
2020
+ local _status
2021
+ _status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
2022
+ if [[ "$_status" -ge 200 && "$_status" -lt 400 ]]; then
2023
+ canary_healthy=$((canary_healthy + 1))
2024
+ fi
2025
+ done
2026
+ if [[ "$canary_healthy" -lt 2 ]]; then
2027
+ error "Canary health check failed ($canary_healthy/3 passed) — rolling back"
2028
+ [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" 2>/dev/null || true
2029
+ return 1
2030
+ fi
2031
+ success "Canary healthy ($canary_healthy/3 checks passed)"
2032
+ fi
2033
+
2034
+ info "Promoting canary to full deployment..."
2035
+ if [[ -n "$promote_cmd" ]]; then
2036
+ bash -c "$promote_cmd" >> "$deploy_log" 2>&1 || { error "Promote failed"; return 1; }
2037
+ fi
2038
+ success "Canary promoted"
2039
+ fi
2040
+ ;;
2041
+ blue-green)
2042
+ info "Blue-green deployment strategy..."
2043
+ if [[ -z "$staging_cmd" || -z "$switch_cmd" ]]; then
2044
+ warn "Blue-green requires staging_cmd + switch_cmd — falling back to direct"
2045
+ deploy_strategy="direct"
2046
+ else
2047
+ info "Deploying to inactive environment..."
2048
+ bash -c "$staging_cmd" >> "$deploy_log" 2>&1 || { error "Blue-green staging failed"; return 1; }
2049
+
2050
+ if [[ -n "$health_url" ]]; then
2051
+ local bg_healthy=0
2052
+ local _chk
2053
+ for _chk in 1 2 3; do
2054
+ sleep 5
2055
+ local _status
2056
+ _status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
2057
+ [[ "$_status" -ge 200 && "$_status" -lt 400 ]] && bg_healthy=$((bg_healthy + 1))
2058
+ done
2059
+ if [[ "$bg_healthy" -lt 2 ]]; then
2060
+ error "Blue-green health check failed — not switching"
2061
+ return 1
2062
+ fi
2063
+ fi
2064
+
2065
+ info "Switching traffic..."
2066
+ bash -c "$switch_cmd" >> "$deploy_log" 2>&1 || { error "Traffic switch failed"; return 1; }
2067
+ success "Blue-green switch complete"
2068
+ fi
2069
+ ;;
2070
+ esac
2071
+
2072
+ # ── Direct deployment (default or fallback) ──
2073
+ if [[ "$deploy_strategy" == "direct" ]]; then
2074
+ if [[ -n "$staging_cmd" ]]; then
2075
+ info "Deploying to staging..."
2076
+ bash -c "$staging_cmd" > "$ARTIFACTS_DIR/deploy-staging.log" 2>&1 || {
2077
+ error "Staging deploy failed"
2078
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Staging deploy failed"
2079
+ # Mark GitHub deployment as failed
2080
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2081
+ gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Staging deploy failed" 2>/dev/null || true
2082
+ fi
2083
+ return 1
2084
+ }
2085
+ success "Staging deploy complete"
2086
+ fi
2087
+
2088
+ if [[ -n "$prod_cmd" ]]; then
2089
+ info "Deploying to production..."
2090
+ bash -c "$prod_cmd" > "$ARTIFACTS_DIR/deploy-prod.log" 2>&1 || {
2091
+ error "Production deploy failed"
2092
+ if [[ -n "$rollback_cmd" ]]; then
2093
+ warn "Rolling back..."
2094
+ bash -c "$rollback_cmd" 2>&1 || error "Rollback also failed!"
2095
+ fi
2096
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Production deploy failed — rollback ${rollback_cmd:+attempted}"
2097
+ # Mark GitHub deployment as failed
2098
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2099
+ gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Production deploy failed" 2>/dev/null || true
2100
+ fi
2101
+ return 1
2102
+ }
2103
+ success "Production deploy complete"
2104
+ fi
2105
+ fi
2106
+
2107
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2108
+ gh_comment_issue "$ISSUE_NUMBER" "✅ **Deploy complete**"
2109
+ gh_add_labels "$ISSUE_NUMBER" "deployed"
2110
+ fi
2111
+
2112
+ # Mark GitHub deployment as successful
2113
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
2114
+ if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
2115
+ gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" true "" 2>/dev/null || true
2116
+ fi
2117
+ fi
2118
+
2119
+ log_stage "deploy" "Deploy complete"
2120
+ }
2121
+
2122
+ stage_validate() {
2123
+ CURRENT_STAGE_ID="validate"
2124
+ local smoke_cmd
2125
+ smoke_cmd=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.smoke_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2126
+ [[ "$smoke_cmd" == "null" ]] && smoke_cmd=""
2127
+
2128
+ local health_url
2129
+ health_url=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2130
+ [[ "$health_url" == "null" ]] && health_url=""
2131
+
2132
+ local close_issue
2133
+ close_issue=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.close_issue) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2134
+
2135
+ # Smoke tests
2136
+ if [[ -n "$smoke_cmd" ]]; then
2137
+ info "Running smoke tests..."
2138
+ bash -c "$smoke_cmd" > "$ARTIFACTS_DIR/smoke.log" 2>&1 || {
2139
+ error "Smoke tests failed"
2140
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2141
+ gh issue create --title "Deploy validation failed: $GOAL" \
2142
+ --label "incident" --body "Pipeline smoke tests failed after deploy.
2143
+
2144
+ Related issue: ${GITHUB_ISSUE}
2145
+ Branch: ${GIT_BRANCH}
2146
+ PR: $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'unknown')" 2>/dev/null || true
2147
+ fi
2148
+ return 1
2149
+ }
2150
+ success "Smoke tests passed"
2151
+ fi
2152
+
2153
+ # Health check with retry
2154
+ if [[ -n "$health_url" ]]; then
2155
+ info "Health check: $health_url"
2156
+ local attempts=0
2157
+ while [[ $attempts -lt 5 ]]; do
2158
+ if curl -sf "$health_url" >/dev/null 2>&1; then
2159
+ success "Health check passed"
2160
+ break
2161
+ fi
2162
+ attempts=$((attempts + 1))
2163
+ [[ $attempts -lt 5 ]] && { info "Retry ${attempts}/5..."; sleep 10; }
2164
+ done
2165
+ if [[ $attempts -ge 5 ]]; then
2166
+ error "Health check failed after 5 attempts"
2167
+ return 1
2168
+ fi
2169
+ fi
2170
+
2171
+ # Compute total duration once for both issue close and wiki report
2172
+ local total_dur=""
2173
+ if [[ -n "$PIPELINE_START_EPOCH" ]]; then
2174
+ total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
2175
+ fi
2176
+
2177
+ # Close original issue with comprehensive summary
2178
+ if [[ "$close_issue" == "true" && -n "$ISSUE_NUMBER" ]]; then
2179
+ gh issue close "$ISSUE_NUMBER" --comment "## ✅ Complete — Deployed & Validated
2180
+
2181
+ | Metric | Value |
2182
+ |--------|-------|
2183
+ | Pipeline | \`${PIPELINE_NAME}\` |
2184
+ | Branch | \`${GIT_BRANCH}\` |
2185
+ | PR | $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'N/A') |
2186
+ | Duration | ${total_dur:-unknown} |
2187
+
2188
+ _Closed automatically by \`shipwright pipeline\`_" 2>/dev/null || true
2189
+
2190
+ gh_remove_label "$ISSUE_NUMBER" "pipeline/pr-created"
2191
+ gh_add_labels "$ISSUE_NUMBER" "pipeline/complete"
2192
+ success "Issue #$ISSUE_NUMBER closed"
2193
+ fi
2194
+
2195
+ # Push pipeline report to wiki
2196
+ local report="# Pipeline Report — ${GOAL}
2197
+
2198
+ | Metric | Value |
2199
+ |--------|-------|
2200
+ | Pipeline | \`${PIPELINE_NAME}\` |
2201
+ | Branch | \`${GIT_BRANCH}\` |
2202
+ | PR | $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'N/A') |
2203
+ | Duration | ${total_dur:-unknown} |
2204
+ | Stages | $(echo "$STAGE_TIMINGS" | tr '|' '\n' | wc -l | xargs) completed |
2205
+
2206
+ ## Stage Timings
2207
+ $(echo "$STAGE_TIMINGS" | tr '|' '\n' | sed 's/^/- /')
2208
+
2209
+ ## Artifacts
2210
+ $(ls -1 "$ARTIFACTS_DIR" 2>/dev/null | sed 's/^/- /')
2211
+
2212
+ ---
2213
+ _Generated by \`shipwright pipeline\` at $(now_iso)_"
2214
+ gh_wiki_page "Pipeline-Report-${ISSUE_NUMBER:-inline}" "$report"
2215
+
2216
+ log_stage "validate" "Validation complete"
2217
+ }
2218
+
2219
+ stage_monitor() {
2220
+ CURRENT_STAGE_ID="monitor"
2221
+
2222
+ # Read config from pipeline template
2223
+ local duration_minutes health_url error_threshold log_pattern log_cmd rollback_cmd auto_rollback
2224
+ duration_minutes=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.duration_minutes) // 5' "$PIPELINE_CONFIG" 2>/dev/null) || true
2225
+ [[ -z "$duration_minutes" || "$duration_minutes" == "null" ]] && duration_minutes=5
2226
+ health_url=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2227
+ [[ "$health_url" == "null" ]] && health_url=""
2228
+ error_threshold=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.error_threshold) // 5' "$PIPELINE_CONFIG" 2>/dev/null) || true
2229
+ [[ -z "$error_threshold" || "$error_threshold" == "null" ]] && error_threshold=5
2230
+
2231
+ # Adaptive monitor: use historical baselines if available
2232
+ local repo_hash
2233
+ repo_hash=$(echo "${PROJECT_ROOT:-$(pwd)}" | cksum | awk '{print $1}')
2234
+ local baseline_file="${HOME}/.shipwright/baselines/${repo_hash}/deploy-monitor.json"
2235
+ if [[ -f "$baseline_file" ]]; then
2236
+ local hist_duration hist_threshold
2237
+ hist_duration=$(jq -r '.p90_stabilization_minutes // empty' "$baseline_file" 2>/dev/null || true)
2238
+ hist_threshold=$(jq -r '.p90_error_threshold // empty' "$baseline_file" 2>/dev/null || true)
2239
+ if [[ -n "$hist_duration" && "$hist_duration" != "null" ]]; then
2240
+ duration_minutes="$hist_duration"
2241
+ info "Monitor duration: ${duration_minutes}m ${DIM}(from baseline)${RESET}"
2242
+ fi
2243
+ if [[ -n "$hist_threshold" && "$hist_threshold" != "null" ]]; then
2244
+ error_threshold="$hist_threshold"
2245
+ info "Error threshold: ${error_threshold} ${DIM}(from baseline)${RESET}"
2246
+ fi
2247
+ fi
2248
+ log_pattern=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.log_pattern) // "ERROR|FATAL|PANIC"' "$PIPELINE_CONFIG" 2>/dev/null) || true
2249
+ [[ -z "$log_pattern" || "$log_pattern" == "null" ]] && log_pattern="ERROR|FATAL|PANIC"
2250
+ log_cmd=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.log_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2251
+ [[ "$log_cmd" == "null" ]] && log_cmd=""
2252
+ rollback_cmd=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.rollback_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2253
+ [[ "$rollback_cmd" == "null" ]] && rollback_cmd=""
2254
+ auto_rollback=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.auto_rollback) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
2255
+ [[ -z "$auto_rollback" || "$auto_rollback" == "null" ]] && auto_rollback="false"
2256
+
2257
+ if [[ -z "$health_url" && -z "$log_cmd" ]]; then
2258
+ warn "No health_url or log_cmd configured — skipping monitor stage"
2259
+ log_stage "monitor" "Skipped (no monitoring configured)"
2260
+ return 0
2261
+ fi
2262
+
2263
+ local report_file="$ARTIFACTS_DIR/monitor-report.md"
2264
+ local deploy_log_file="$ARTIFACTS_DIR/deploy-logs.txt"
2265
+ : > "$deploy_log_file"
2266
+ local total_errors=0
2267
+ local poll_interval=30 # seconds between polls
2268
+ local total_polls=$(( (duration_minutes * 60) / poll_interval ))
2269
+ [[ "$total_polls" -lt 1 ]] && total_polls=1
2270
+
2271
+ info "Post-deploy monitoring: ${duration_minutes}m (${total_polls} polls, threshold: ${error_threshold} errors)"
2272
+
2273
+ emit_event "monitor.started" \
2274
+ "issue=${ISSUE_NUMBER:-0}" \
2275
+ "duration_minutes=$duration_minutes" \
2276
+ "error_threshold=$error_threshold"
2277
+
2278
+ {
2279
+ echo "# Post-Deploy Monitor Report"
2280
+ echo ""
2281
+ echo "- Duration: ${duration_minutes} minutes"
2282
+ echo "- Health URL: ${health_url:-none}"
2283
+ echo "- Log command: ${log_cmd:-none}"
2284
+ echo "- Error threshold: ${error_threshold}"
2285
+ echo "- Auto-rollback: ${auto_rollback}"
2286
+ echo ""
2287
+ echo "## Poll Results"
2288
+ echo ""
2289
+ } > "$report_file"
2290
+
2291
+ local poll=0
2292
+ local health_failures=0
2293
+ local log_errors=0
2294
+ while [[ "$poll" -lt "$total_polls" ]]; do
2295
+ poll=$((poll + 1))
2296
+ local poll_time
2297
+ poll_time=$(now_iso)
2298
+
2299
+ # Health URL check
2300
+ if [[ -n "$health_url" ]]; then
2301
+ local http_status
2302
+ http_status=$(curl -sf -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "000")
2303
+ if [[ "$http_status" -ge 200 && "$http_status" -lt 400 ]]; then
2304
+ echo "- [${poll_time}] Health: ✅ (HTTP ${http_status})" >> "$report_file"
2305
+ else
2306
+ health_failures=$((health_failures + 1))
2307
+ total_errors=$((total_errors + 1))
2308
+ echo "- [${poll_time}] Health: ❌ (HTTP ${http_status})" >> "$report_file"
2309
+ warn "Health check failed: HTTP ${http_status}"
2310
+ fi
2311
+ fi
2312
+
2313
+ # Log command check (accumulate deploy logs for feedback collect)
2314
+ if [[ -n "$log_cmd" ]]; then
2315
+ local log_output
2316
+ log_output=$(bash -c "$log_cmd" 2>/dev/null || true)
2317
+ [[ -n "$log_output" ]] && echo "$log_output" >> "$deploy_log_file"
2318
+ local error_count=0
2319
+ if [[ -n "$log_output" ]]; then
2320
+ error_count=$(echo "$log_output" | grep -cE "$log_pattern" 2>/dev/null || true)
2321
+ error_count="${error_count:-0}"
2322
+ fi
2323
+ if [[ "$error_count" -gt 0 ]]; then
2324
+ log_errors=$((log_errors + error_count))
2325
+ total_errors=$((total_errors + error_count))
2326
+ echo "- [${poll_time}] Logs: ⚠️ ${error_count} error(s) matching '${log_pattern}'" >> "$report_file"
2327
+ warn "Log errors detected: ${error_count}"
2328
+ else
2329
+ echo "- [${poll_time}] Logs: ✅ clean" >> "$report_file"
2330
+ fi
2331
+ fi
2332
+
2333
+ emit_event "monitor.check" \
2334
+ "issue=${ISSUE_NUMBER:-0}" \
2335
+ "poll=$poll" \
2336
+ "total_errors=$total_errors" \
2337
+ "health_failures=$health_failures"
2338
+
2339
+ # Check threshold
2340
+ if [[ "$total_errors" -ge "$error_threshold" ]]; then
2341
+ error "Error threshold exceeded: ${total_errors} >= ${error_threshold}"
2342
+
2343
+ echo "" >> "$report_file"
2344
+ echo "## ❌ THRESHOLD EXCEEDED" >> "$report_file"
2345
+ echo "Total errors: ${total_errors} (threshold: ${error_threshold})" >> "$report_file"
2346
+
2347
+ emit_event "monitor.alert" \
2348
+ "issue=${ISSUE_NUMBER:-0}" \
2349
+ "total_errors=$total_errors" \
2350
+ "threshold=$error_threshold"
2351
+
2352
+ # Feedback loop: collect deploy logs and optionally create issue
2353
+ if [[ -f "$deploy_log_file" ]] && [[ -s "$deploy_log_file" ]] && [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
2354
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" collect "$deploy_log_file" 2>/dev/null) || true
2355
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" create-issue 2>/dev/null) || true
2356
+ fi
2357
+
2358
+ # Auto-rollback: feedback rollback (GitHub Deployments API) and/or config rollback_cmd
2359
+ if [[ "$auto_rollback" == "true" ]]; then
2360
+ warn "Auto-rolling back..."
2361
+ echo "" >> "$report_file"
2362
+ echo "## Rollback" >> "$report_file"
2363
+
2364
+ # Trigger feedback rollback (calls sw-github-deploy.sh rollback)
2365
+ if [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
2366
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" rollback production "Monitor threshold exceeded (${total_errors} errors)" >> "$report_file" 2>&1) || true
2367
+ fi
2368
+
2369
+ if [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" >> "$report_file" 2>&1; then
2370
+ success "Rollback executed"
2371
+ echo "Rollback: ✅ success" >> "$report_file"
2372
+
2373
+ # Post-rollback smoke test verification
2374
+ local smoke_cmd
2375
+ smoke_cmd=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.smoke_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
2376
+ [[ "$smoke_cmd" == "null" ]] && smoke_cmd=""
2377
+
2378
+ if [[ -n "$smoke_cmd" ]]; then
2379
+ info "Verifying rollback with smoke tests..."
2380
+ if bash -c "$smoke_cmd" > "$ARTIFACTS_DIR/rollback-smoke.log" 2>&1; then
2381
+ success "Rollback verified — smoke tests pass"
2382
+ echo "Rollback verification: ✅ smoke tests pass" >> "$report_file"
2383
+ emit_event "monitor.rollback_verified" \
2384
+ "issue=${ISSUE_NUMBER:-0}" \
2385
+ "status=pass"
2386
+ else
2387
+ error "Rollback verification FAILED — smoke tests still failing"
2388
+ echo "Rollback verification: ❌ smoke tests FAILED — manual intervention required" >> "$report_file"
2389
+ emit_event "monitor.rollback_verified" \
2390
+ "issue=${ISSUE_NUMBER:-0}" \
2391
+ "status=fail"
2392
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2393
+ gh_comment_issue "$ISSUE_NUMBER" "🚨 **Rollback executed but verification failed** — smoke tests still failing after rollback. Manual intervention required.
2394
+
2395
+ Smoke command: \`${smoke_cmd}\`
2396
+ Log: see \`pipeline-artifacts/rollback-smoke.log\`" 2>/dev/null || true
2397
+ fi
2398
+ fi
2399
+ fi
2400
+ else
2401
+ error "Rollback failed!"
2402
+ echo "Rollback: ❌ failed" >> "$report_file"
2403
+ fi
2404
+
2405
+ emit_event "monitor.rollback" \
2406
+ "issue=${ISSUE_NUMBER:-0}" \
2407
+ "total_errors=$total_errors"
2408
+
2409
+ # Post to GitHub
2410
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2411
+ gh_comment_issue "$ISSUE_NUMBER" "🚨 **Auto-rollback triggered** — ${total_errors} errors exceeded threshold (${error_threshold})
2412
+
2413
+ Rollback command: \`${rollback_cmd}\`" 2>/dev/null || true
2414
+
2415
+ # Create hotfix issue
2416
+ if [[ "$GH_AVAILABLE" == "true" ]]; then
2417
+ gh issue create \
2418
+ --title "Hotfix: Deploy regression for ${GOAL}" \
2419
+ --label "hotfix,incident" \
2420
+ --body "Auto-rollback triggered during post-deploy monitoring.
2421
+
2422
+ **Original issue:** ${GITHUB_ISSUE:-N/A}
2423
+ **Errors detected:** ${total_errors}
2424
+ **Threshold:** ${error_threshold}
2425
+ **Branch:** ${GIT_BRANCH}
2426
+
2427
+ ## Monitor Report
2428
+ $(cat "$report_file")
2429
+
2430
+ ---
2431
+ _Created automatically by \`shipwright pipeline\` monitor stage_" 2>/dev/null || true
2432
+ fi
2433
+ fi
2434
+ fi
2435
+
2436
+ log_stage "monitor" "Failed — ${total_errors} errors (threshold: ${error_threshold})"
2437
+ return 1
2438
+ fi
2439
+
2440
+ # Sleep between polls (skip on last poll)
2441
+ if [[ "$poll" -lt "$total_polls" ]]; then
2442
+ sleep "$poll_interval"
2443
+ fi
2444
+ done
2445
+
2446
+ # Monitoring complete — all clear
2447
+ echo "" >> "$report_file"
2448
+ echo "## ✅ Monitoring Complete" >> "$report_file"
2449
+ echo "Total errors: ${total_errors} (threshold: ${error_threshold})" >> "$report_file"
2450
+ echo "Health failures: ${health_failures}" >> "$report_file"
2451
+ echo "Log errors: ${log_errors}" >> "$report_file"
2452
+
2453
+ success "Post-deploy monitoring clean (${total_errors} errors in ${duration_minutes}m)"
2454
+
2455
+ if [[ -n "$ISSUE_NUMBER" ]]; then
2456
+ gh_comment_issue "$ISSUE_NUMBER" "✅ **Post-deploy monitoring passed** — ${duration_minutes}m, ${total_errors} errors" 2>/dev/null || true
2457
+ fi
2458
+
2459
+ log_stage "monitor" "Clean — ${total_errors} errors in ${duration_minutes}m"
2460
+
2461
+ # Record baseline for adaptive monitoring on future runs
2462
+ local baseline_dir="${HOME}/.shipwright/baselines/${repo_hash}"
2463
+ mkdir -p "$baseline_dir" 2>/dev/null || true
2464
+ local baseline_tmp
2465
+ baseline_tmp="$(mktemp)"
2466
+ if [[ -f "${baseline_dir}/deploy-monitor.json" ]]; then
2467
+ # Append to history and recalculate p90
2468
+ jq --arg dur "$duration_minutes" --arg errs "$total_errors" \
2469
+ '.history += [{"duration_minutes": ($dur | tonumber), "errors": ($errs | tonumber)}] |
2470
+ .p90_stabilization_minutes = ([.history[].duration_minutes] | sort | .[length * 9 / 10 | floor]) |
2471
+ .p90_error_threshold = (([.history[].errors] | sort | .[length * 9 / 10 | floor]) + 2) |
2472
+ .updated_at = now' \
2473
+ "${baseline_dir}/deploy-monitor.json" > "$baseline_tmp" 2>/dev/null && \
2474
+ mv "$baseline_tmp" "${baseline_dir}/deploy-monitor.json" || rm -f "$baseline_tmp"
2475
+ else
2476
+ jq -n --arg dur "$duration_minutes" --arg errs "$total_errors" \
2477
+ '{history: [{"duration_minutes": ($dur | tonumber), "errors": ($errs | tonumber)}],
2478
+ p90_stabilization_minutes: ($dur | tonumber),
2479
+ p90_error_threshold: (($errs | tonumber) + 2),
2480
+ updated_at: now}' \
2481
+ > "$baseline_tmp" 2>/dev/null && \
2482
+ mv "$baseline_tmp" "${baseline_dir}/deploy-monitor.json" || rm -f "$baseline_tmp"
2483
+ fi
2484
+ }
2485
+
2486
+ # ─── Multi-Dimensional Quality Checks ─────────────────────────────────────
2487
+ # Beyond tests: security, bundle size, perf regression, API compat, coverage
2488
+