shipwright-cli 3.2.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/code-reviewer.md +2 -0
- package/.claude/agents/devops-engineer.md +2 -0
- package/.claude/agents/doc-fleet-agent.md +2 -0
- package/.claude/agents/pipeline-agent.md +2 -0
- package/.claude/agents/shell-script-specialist.md +2 -0
- package/.claude/agents/test-specialist.md +2 -0
- package/.claude/hooks/agent-crash-capture.sh +32 -0
- package/.claude/hooks/post-tool-use.sh +3 -2
- package/.claude/hooks/pre-tool-use.sh +35 -3
- package/README.md +4 -4
- package/claude-code/hooks/config-change.sh +18 -0
- package/claude-code/hooks/instructions-reloaded.sh +7 -0
- package/claude-code/hooks/worktree-create.sh +25 -0
- package/claude-code/hooks/worktree-remove.sh +20 -0
- package/config/code-constitution.json +130 -0
- package/dashboard/middleware/auth.ts +134 -0
- package/dashboard/middleware/constants.ts +21 -0
- package/dashboard/public/index.html +2 -6
- package/dashboard/public/styles.css +100 -97
- package/dashboard/routes/auth.ts +38 -0
- package/dashboard/server.ts +66 -25
- package/dashboard/services/config.ts +26 -0
- package/dashboard/services/db.ts +118 -0
- package/dashboard/src/canvas/pixel-agent.ts +298 -0
- package/dashboard/src/canvas/pixel-sprites.ts +440 -0
- package/dashboard/src/canvas/shipyard-effects.ts +367 -0
- package/dashboard/src/canvas/shipyard-scene.ts +616 -0
- package/dashboard/src/canvas/submarine-layout.ts +267 -0
- package/dashboard/src/components/header.ts +8 -7
- package/dashboard/src/core/router.ts +1 -0
- package/dashboard/src/design/submarine-theme.ts +253 -0
- package/dashboard/src/main.ts +2 -0
- package/dashboard/src/types/api.ts +2 -1
- package/dashboard/src/views/activity.ts +2 -1
- package/dashboard/src/views/shipyard.ts +39 -0
- package/dashboard/types/index.ts +166 -0
- package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
- package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
- package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
- package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
- package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
- package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
- package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
- package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
- package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
- package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
- package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
- package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
- package/docs/research/RESEARCH_INDEX.md +439 -0
- package/docs/research/RESEARCH_SOURCES.md +440 -0
- package/docs/research/RESEARCH_SUMMARY.txt +275 -0
- package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
- package/package.json +2 -2
- package/scripts/lib/adaptive-model.sh +427 -0
- package/scripts/lib/adaptive-timeout.sh +316 -0
- package/scripts/lib/audit-trail.sh +309 -0
- package/scripts/lib/auto-recovery.sh +471 -0
- package/scripts/lib/bandit-selector.sh +431 -0
- package/scripts/lib/bootstrap.sh +104 -2
- package/scripts/lib/causal-graph.sh +455 -0
- package/scripts/lib/compat.sh +126 -0
- package/scripts/lib/compound-audit.sh +337 -0
- package/scripts/lib/constitutional.sh +454 -0
- package/scripts/lib/context-budget.sh +359 -0
- package/scripts/lib/convergence.sh +594 -0
- package/scripts/lib/cost-optimizer.sh +634 -0
- package/scripts/lib/daemon-adaptive.sh +10 -0
- package/scripts/lib/daemon-dispatch.sh +106 -17
- package/scripts/lib/daemon-failure.sh +34 -4
- package/scripts/lib/daemon-patrol.sh +23 -2
- package/scripts/lib/daemon-poll-github.sh +361 -0
- package/scripts/lib/daemon-poll-health.sh +299 -0
- package/scripts/lib/daemon-poll.sh +27 -611
- package/scripts/lib/daemon-state.sh +112 -66
- package/scripts/lib/daemon-triage.sh +10 -0
- package/scripts/lib/dod-scorecard.sh +442 -0
- package/scripts/lib/error-actionability.sh +300 -0
- package/scripts/lib/formal-spec.sh +461 -0
- package/scripts/lib/helpers.sh +177 -4
- package/scripts/lib/intent-analysis.sh +409 -0
- package/scripts/lib/loop-convergence.sh +350 -0
- package/scripts/lib/loop-iteration.sh +682 -0
- package/scripts/lib/loop-progress.sh +48 -0
- package/scripts/lib/loop-restart.sh +185 -0
- package/scripts/lib/memory-effectiveness.sh +506 -0
- package/scripts/lib/mutation-executor.sh +352 -0
- package/scripts/lib/outcome-feedback.sh +521 -0
- package/scripts/lib/pipeline-cli.sh +336 -0
- package/scripts/lib/pipeline-commands.sh +1216 -0
- package/scripts/lib/pipeline-detection.sh +100 -2
- package/scripts/lib/pipeline-execution.sh +897 -0
- package/scripts/lib/pipeline-github.sh +28 -3
- package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
- package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
- package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
- package/scripts/lib/pipeline-intelligence.sh +100 -1136
- package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
- package/scripts/lib/pipeline-quality-checks.sh +17 -715
- package/scripts/lib/pipeline-quality-gates.sh +563 -0
- package/scripts/lib/pipeline-stages-build.sh +730 -0
- package/scripts/lib/pipeline-stages-delivery.sh +965 -0
- package/scripts/lib/pipeline-stages-intake.sh +1133 -0
- package/scripts/lib/pipeline-stages-monitor.sh +407 -0
- package/scripts/lib/pipeline-stages-review.sh +1022 -0
- package/scripts/lib/pipeline-stages.sh +59 -2929
- package/scripts/lib/pipeline-state.sh +36 -5
- package/scripts/lib/pipeline-util.sh +487 -0
- package/scripts/lib/policy-learner.sh +438 -0
- package/scripts/lib/process-reward.sh +493 -0
- package/scripts/lib/project-detect.sh +649 -0
- package/scripts/lib/quality-profile.sh +334 -0
- package/scripts/lib/recruit-commands.sh +885 -0
- package/scripts/lib/recruit-learning.sh +739 -0
- package/scripts/lib/recruit-roles.sh +648 -0
- package/scripts/lib/reward-aggregator.sh +458 -0
- package/scripts/lib/rl-optimizer.sh +362 -0
- package/scripts/lib/root-cause.sh +427 -0
- package/scripts/lib/scope-enforcement.sh +445 -0
- package/scripts/lib/session-restart.sh +493 -0
- package/scripts/lib/skill-memory.sh +300 -0
- package/scripts/lib/skill-registry.sh +775 -0
- package/scripts/lib/spec-driven.sh +476 -0
- package/scripts/lib/test-helpers.sh +18 -7
- package/scripts/lib/test-holdout.sh +429 -0
- package/scripts/lib/test-optimizer.sh +511 -0
- package/scripts/shipwright-file-suggest.sh +45 -0
- package/scripts/skills/adversarial-quality.md +61 -0
- package/scripts/skills/api-design.md +44 -0
- package/scripts/skills/architecture-design.md +50 -0
- package/scripts/skills/brainstorming.md +43 -0
- package/scripts/skills/data-pipeline.md +44 -0
- package/scripts/skills/deploy-safety.md +64 -0
- package/scripts/skills/documentation.md +38 -0
- package/scripts/skills/frontend-design.md +45 -0
- package/scripts/skills/generated/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
- package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
- package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
- package/scripts/skills/generated/cli-version-management.md +29 -0
- package/scripts/skills/generated/collection-system-validation.md +99 -0
- package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
- package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
- package/scripts/skills/generated/test-parallelization-detection.md +65 -0
- package/scripts/skills/observability.md +79 -0
- package/scripts/skills/performance.md +48 -0
- package/scripts/skills/pr-quality.md +49 -0
- package/scripts/skills/product-thinking.md +43 -0
- package/scripts/skills/security-audit.md +49 -0
- package/scripts/skills/systematic-debugging.md +40 -0
- package/scripts/skills/testing-strategy.md +47 -0
- package/scripts/skills/two-stage-review.md +52 -0
- package/scripts/skills/validation-thoroughness.md +55 -0
- package/scripts/sw +9 -3
- package/scripts/sw-activity.sh +9 -2
- package/scripts/sw-adaptive.sh +2 -1
- package/scripts/sw-adversarial.sh +2 -1
- package/scripts/sw-architecture-enforcer.sh +3 -1
- package/scripts/sw-auth.sh +12 -2
- package/scripts/sw-autonomous.sh +5 -1
- package/scripts/sw-changelog.sh +4 -1
- package/scripts/sw-checkpoint.sh +2 -1
- package/scripts/sw-ci.sh +5 -1
- package/scripts/sw-cleanup.sh +4 -26
- package/scripts/sw-code-review.sh +10 -4
- package/scripts/sw-connect.sh +2 -1
- package/scripts/sw-context.sh +2 -1
- package/scripts/sw-cost.sh +48 -3
- package/scripts/sw-daemon.sh +66 -9
- package/scripts/sw-dashboard.sh +3 -1
- package/scripts/sw-db.sh +59 -16
- package/scripts/sw-decide.sh +8 -2
- package/scripts/sw-decompose.sh +360 -17
- package/scripts/sw-deps.sh +4 -1
- package/scripts/sw-developer-simulation.sh +4 -1
- package/scripts/sw-discovery.sh +325 -2
- package/scripts/sw-doc-fleet.sh +4 -1
- package/scripts/sw-docs-agent.sh +3 -1
- package/scripts/sw-docs.sh +2 -1
- package/scripts/sw-doctor.sh +453 -2
- package/scripts/sw-dora.sh +4 -1
- package/scripts/sw-durable.sh +4 -3
- package/scripts/sw-e2e-orchestrator.sh +17 -16
- package/scripts/sw-eventbus.sh +7 -1
- package/scripts/sw-evidence.sh +364 -12
- package/scripts/sw-feedback.sh +550 -9
- package/scripts/sw-fix.sh +20 -1
- package/scripts/sw-fleet-discover.sh +6 -2
- package/scripts/sw-fleet-viz.sh +4 -1
- package/scripts/sw-fleet.sh +5 -1
- package/scripts/sw-github-app.sh +16 -3
- package/scripts/sw-github-checks.sh +3 -2
- package/scripts/sw-github-deploy.sh +3 -2
- package/scripts/sw-github-graphql.sh +18 -7
- package/scripts/sw-guild.sh +5 -1
- package/scripts/sw-heartbeat.sh +5 -30
- package/scripts/sw-hello.sh +67 -0
- package/scripts/sw-hygiene.sh +6 -1
- package/scripts/sw-incident.sh +265 -1
- package/scripts/sw-init.sh +18 -2
- package/scripts/sw-instrument.sh +10 -2
- package/scripts/sw-intelligence.sh +42 -6
- package/scripts/sw-jira.sh +5 -1
- package/scripts/sw-launchd.sh +2 -1
- package/scripts/sw-linear.sh +4 -1
- package/scripts/sw-logs.sh +4 -1
- package/scripts/sw-loop.sh +432 -1128
- package/scripts/sw-memory.sh +356 -2
- package/scripts/sw-mission-control.sh +6 -1
- package/scripts/sw-model-router.sh +481 -26
- package/scripts/sw-otel.sh +13 -4
- package/scripts/sw-oversight.sh +14 -5
- package/scripts/sw-patrol-meta.sh +334 -0
- package/scripts/sw-pipeline-composer.sh +5 -1
- package/scripts/sw-pipeline-vitals.sh +2 -1
- package/scripts/sw-pipeline.sh +53 -2664
- package/scripts/sw-pm.sh +12 -5
- package/scripts/sw-pr-lifecycle.sh +2 -1
- package/scripts/sw-predictive.sh +7 -1
- package/scripts/sw-prep.sh +185 -2
- package/scripts/sw-ps.sh +5 -25
- package/scripts/sw-public-dashboard.sh +15 -3
- package/scripts/sw-quality.sh +2 -1
- package/scripts/sw-reaper.sh +8 -25
- package/scripts/sw-recruit.sh +156 -2303
- package/scripts/sw-regression.sh +19 -12
- package/scripts/sw-release-manager.sh +3 -1
- package/scripts/sw-release.sh +4 -1
- package/scripts/sw-remote.sh +3 -1
- package/scripts/sw-replay.sh +7 -1
- package/scripts/sw-retro.sh +158 -1
- package/scripts/sw-review-rerun.sh +3 -1
- package/scripts/sw-scale.sh +10 -3
- package/scripts/sw-security-audit.sh +6 -1
- package/scripts/sw-self-optimize.sh +6 -3
- package/scripts/sw-session.sh +9 -3
- package/scripts/sw-setup.sh +3 -1
- package/scripts/sw-stall-detector.sh +406 -0
- package/scripts/sw-standup.sh +15 -7
- package/scripts/sw-status.sh +3 -1
- package/scripts/sw-strategic.sh +4 -1
- package/scripts/sw-stream.sh +7 -1
- package/scripts/sw-swarm.sh +18 -6
- package/scripts/sw-team-stages.sh +13 -6
- package/scripts/sw-templates.sh +5 -29
- package/scripts/sw-testgen.sh +7 -1
- package/scripts/sw-tmux-pipeline.sh +4 -1
- package/scripts/sw-tmux-role-color.sh +2 -0
- package/scripts/sw-tmux-status.sh +1 -1
- package/scripts/sw-tmux.sh +3 -1
- package/scripts/sw-trace.sh +3 -1
- package/scripts/sw-tracker-github.sh +3 -0
- package/scripts/sw-tracker-jira.sh +3 -0
- package/scripts/sw-tracker-linear.sh +3 -0
- package/scripts/sw-tracker.sh +3 -1
- package/scripts/sw-triage.sh +2 -1
- package/scripts/sw-upgrade.sh +3 -1
- package/scripts/sw-ux.sh +5 -2
- package/scripts/sw-webhook.sh +3 -1
- package/scripts/sw-widgets.sh +3 -1
- package/scripts/sw-worktree.sh +15 -3
- package/scripts/test-skill-injection.sh +1233 -0
- package/templates/pipelines/autonomous.json +27 -3
- package/templates/pipelines/cost-aware.json +34 -8
- package/templates/pipelines/deployed.json +12 -0
- package/templates/pipelines/enterprise.json +12 -0
- package/templates/pipelines/fast.json +6 -0
- package/templates/pipelines/full.json +27 -3
- package/templates/pipelines/hotfix.json +6 -0
- package/templates/pipelines/standard.json +12 -0
- package/templates/pipelines/tdd.json +12 -0
|
@@ -1,8 +1,52 @@
|
|
|
1
|
-
|
|
1
|
+
#\!/bin/bash
|
|
2
|
+
# pipeline-stages.sh — Stage implementations loader
|
|
3
|
+
# Sources domain-specific stage modules (intake, build, review, delivery, monitor).
|
|
2
4
|
# Source from sw-pipeline.sh. Requires all pipeline globals and state/github/detection/quality modules.
|
|
3
5
|
[[ -n "${_PIPELINE_STAGES_LOADED:-}" ]] && return 0
|
|
4
6
|
_PIPELINE_STAGES_LOADED=1
|
|
5
7
|
|
|
8
|
+
# Source skill registry for dynamic prompt injection
|
|
9
|
+
_SKILL_REGISTRY_SH="${SCRIPT_DIR}/lib/skill-registry.sh"
|
|
10
|
+
[[ -f "$_SKILL_REGISTRY_SH" ]] && source "$_SKILL_REGISTRY_SH"
|
|
11
|
+
|
|
12
|
+
# Source skill memory for learning system
|
|
13
|
+
_SKILL_MEMORY_SH="${SCRIPT_DIR}/lib/skill-memory.sh"
|
|
14
|
+
[[ -f "$_SKILL_MEMORY_SH" ]] && source "$_SKILL_MEMORY_SH"
|
|
15
|
+
|
|
16
|
+
# Source dark factory modules (test holdout, spec-driven, causal graph)
|
|
17
|
+
[[ -f "$SCRIPT_DIR/lib/test-holdout.sh" ]] && source "$SCRIPT_DIR/lib/test-holdout.sh" 2>/dev/null || true
|
|
18
|
+
[[ -f "$SCRIPT_DIR/lib/spec-driven.sh" ]] && source "$SCRIPT_DIR/lib/spec-driven.sh" 2>/dev/null || true
|
|
19
|
+
[[ -f "$SCRIPT_DIR/lib/causal-graph.sh" ]] && source "$SCRIPT_DIR/lib/causal-graph.sh" 2>/dev/null || true
|
|
20
|
+
[[ -f "$SCRIPT_DIR/lib/constitutional.sh" ]] && source "$SCRIPT_DIR/lib/constitutional.sh" 2>/dev/null || true
|
|
21
|
+
[[ -f "$SCRIPT_DIR/lib/formal-spec.sh" ]] && source "$SCRIPT_DIR/lib/formal-spec.sh" 2>/dev/null || true
|
|
22
|
+
[[ -f "$SCRIPT_DIR/lib/mutation-executor.sh" ]] && source "$SCRIPT_DIR/lib/mutation-executor.sh" 2>/dev/null || true
|
|
23
|
+
# Cross-session reinforcement learning optimizer (Phase 7)
|
|
24
|
+
[[ -f "$SCRIPT_DIR/lib/rl-optimizer.sh" ]] && source "$SCRIPT_DIR/lib/rl-optimizer.sh" 2>/dev/null || true
|
|
25
|
+
# Autoresearch RL modules (Phase 8): reward aggregation, bandit selection, policy learning
|
|
26
|
+
[[ -f "$SCRIPT_DIR/lib/reward-aggregator.sh" ]] && source "$SCRIPT_DIR/lib/reward-aggregator.sh" 2>/dev/null || true
|
|
27
|
+
[[ -f "$SCRIPT_DIR/lib/bandit-selector.sh" ]] && source "$SCRIPT_DIR/lib/bandit-selector.sh" 2>/dev/null || true
|
|
28
|
+
[[ -f "$SCRIPT_DIR/lib/policy-learner.sh" ]] && source "$SCRIPT_DIR/lib/policy-learner.sh" 2>/dev/null || true
|
|
29
|
+
|
|
30
|
+
# Defaults for variables normally set by sw-pipeline.sh (safe under set -u).
|
|
31
|
+
ARTIFACTS_DIR="${ARTIFACTS_DIR:-.claude/pipeline-artifacts}"
|
|
32
|
+
SCRIPT_DIR="${SCRIPT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
|
|
33
|
+
PROJECT_ROOT="${PROJECT_ROOT:-$(pwd)}"
|
|
34
|
+
PIPELINE_CONFIG="${PIPELINE_CONFIG:-}"
|
|
35
|
+
PIPELINE_NAME="${PIPELINE_NAME:-pipeline}"
|
|
36
|
+
MODEL="${MODEL:-opus}"
|
|
37
|
+
BASE_BRANCH="${BASE_BRANCH:-main}"
|
|
38
|
+
NO_GITHUB="${NO_GITHUB:-false}"
|
|
39
|
+
ISSUE_NUMBER="${ISSUE_NUMBER:-}"
|
|
40
|
+
ISSUE_BODY="${ISSUE_BODY:-}"
|
|
41
|
+
ISSUE_LABELS="${ISSUE_LABELS:-}"
|
|
42
|
+
ISSUE_MILESTONE="${ISSUE_MILESTONE:-}"
|
|
43
|
+
GOAL="${GOAL:-}"
|
|
44
|
+
TASK_TYPE="${TASK_TYPE:-feature}"
|
|
45
|
+
INTELLIGENCE_ISSUE_TYPE="${INTELLIGENCE_ISSUE_TYPE:-backend}"
|
|
46
|
+
TEST_CMD="${TEST_CMD:-}"
|
|
47
|
+
GIT_BRANCH="${GIT_BRANCH:-}"
|
|
48
|
+
TASKS_FILE="${TASKS_FILE:-}"
|
|
49
|
+
|
|
6
50
|
# ─── Context pruning helpers ────────────────────────────────────────────────
|
|
7
51
|
|
|
8
52
|
# prune_context_section — Intelligently truncate a context section to fit a char budget.
|
|
@@ -142,2937 +186,23 @@ show_stage_preview() {
|
|
|
142
186
|
echo ""
|
|
143
187
|
}
|
|
144
188
|
|
|
145
|
-
|
|
146
|
-
CURRENT_STAGE_ID="intake"
|
|
147
|
-
local project_lang
|
|
148
|
-
project_lang=$(detect_project_lang)
|
|
149
|
-
info "Project: ${BOLD}$project_lang${RESET}"
|
|
150
|
-
|
|
151
|
-
# 1. Fetch issue metadata if --issue provided
|
|
152
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
153
|
-
local meta
|
|
154
|
-
meta=$(gh_get_issue_meta "$ISSUE_NUMBER")
|
|
155
|
-
|
|
156
|
-
if [[ -n "$meta" ]]; then
|
|
157
|
-
GOAL=$(echo "$meta" | jq -r '.title // ""')
|
|
158
|
-
ISSUE_BODY=$(echo "$meta" | jq -r '.body // ""')
|
|
159
|
-
ISSUE_LABELS=$(echo "$meta" | jq -r '[.labels[].name] | join(",")' 2>/dev/null || true)
|
|
160
|
-
ISSUE_MILESTONE=$(echo "$meta" | jq -r '.milestone.title // ""' 2>/dev/null || true)
|
|
161
|
-
ISSUE_ASSIGNEES=$(echo "$meta" | jq -r '[.assignees[].login] | join(",")' 2>/dev/null || true)
|
|
162
|
-
[[ "$ISSUE_MILESTONE" == "null" ]] && ISSUE_MILESTONE=""
|
|
163
|
-
[[ "$ISSUE_LABELS" == "null" ]] && ISSUE_LABELS=""
|
|
164
|
-
else
|
|
165
|
-
# Fallback: just get title
|
|
166
|
-
GOAL=$(gh issue view "$ISSUE_NUMBER" --json title -q .title 2>/dev/null) || {
|
|
167
|
-
error "Failed to fetch issue #$ISSUE_NUMBER"
|
|
168
|
-
return 1
|
|
169
|
-
}
|
|
170
|
-
fi
|
|
171
|
-
|
|
172
|
-
GITHUB_ISSUE="#$ISSUE_NUMBER"
|
|
173
|
-
info "Issue #$ISSUE_NUMBER: ${BOLD}$GOAL${RESET}"
|
|
174
|
-
|
|
175
|
-
if [[ -n "$ISSUE_LABELS" ]]; then
|
|
176
|
-
info "Labels: ${DIM}$ISSUE_LABELS${RESET}"
|
|
177
|
-
fi
|
|
178
|
-
if [[ -n "$ISSUE_MILESTONE" ]]; then
|
|
179
|
-
info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
|
|
180
|
-
fi
|
|
181
|
-
|
|
182
|
-
# Self-assign
|
|
183
|
-
gh_assign_self "$ISSUE_NUMBER"
|
|
184
|
-
|
|
185
|
-
# Add in-progress label
|
|
186
|
-
gh_add_labels "$ISSUE_NUMBER" "pipeline/in-progress"
|
|
187
|
-
fi
|
|
188
|
-
|
|
189
|
-
# 2. Detect task type
|
|
190
|
-
TASK_TYPE=$(detect_task_type "$GOAL")
|
|
191
|
-
local suggested_template
|
|
192
|
-
suggested_template=$(template_for_type "$TASK_TYPE")
|
|
193
|
-
info "Detected: ${BOLD}$TASK_TYPE${RESET} → team template: ${CYAN}$suggested_template${RESET}"
|
|
194
|
-
|
|
195
|
-
# 3. Auto-detect test command if not provided
|
|
196
|
-
if [[ -z "$TEST_CMD" ]]; then
|
|
197
|
-
TEST_CMD=$(detect_test_cmd)
|
|
198
|
-
if [[ -n "$TEST_CMD" ]]; then
|
|
199
|
-
info "Auto-detected test: ${DIM}$TEST_CMD${RESET}"
|
|
200
|
-
fi
|
|
201
|
-
fi
|
|
202
|
-
|
|
203
|
-
# 4. Create branch with smart prefix
|
|
204
|
-
local prefix
|
|
205
|
-
prefix=$(branch_prefix_for_type "$TASK_TYPE")
|
|
206
|
-
local slug
|
|
207
|
-
slug=$(echo "$GOAL" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/-/g' | sed 's/--*/-/g' | cut -c1-40)
|
|
208
|
-
slug="${slug%-}"
|
|
209
|
-
[[ -n "$ISSUE_NUMBER" ]] && slug="${slug}-${ISSUE_NUMBER}"
|
|
210
|
-
GIT_BRANCH="${prefix}/${slug}"
|
|
211
|
-
|
|
212
|
-
git checkout -b "$GIT_BRANCH" 2>/dev/null || {
|
|
213
|
-
info "Branch $GIT_BRANCH exists, checking out"
|
|
214
|
-
git checkout "$GIT_BRANCH" 2>/dev/null || true
|
|
215
|
-
}
|
|
216
|
-
success "Branch: ${BOLD}$GIT_BRANCH${RESET}"
|
|
217
|
-
|
|
218
|
-
# 5. Post initial progress comment on GitHub issue
|
|
219
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
220
|
-
local body
|
|
221
|
-
body=$(gh_build_progress_body)
|
|
222
|
-
gh_post_progress "$ISSUE_NUMBER" "$body"
|
|
223
|
-
fi
|
|
224
|
-
|
|
225
|
-
# 6. Save artifacts
|
|
226
|
-
save_artifact "intake.json" "$(jq -n \
|
|
227
|
-
--arg goal "$GOAL" --arg type "$TASK_TYPE" \
|
|
228
|
-
--arg template "$suggested_template" --arg branch "$GIT_BRANCH" \
|
|
229
|
-
--arg issue "${GITHUB_ISSUE:-}" --arg lang "$project_lang" \
|
|
230
|
-
--arg test_cmd "${TEST_CMD:-}" --arg labels "${ISSUE_LABELS:-}" \
|
|
231
|
-
--arg milestone "${ISSUE_MILESTONE:-}" --arg body "${ISSUE_BODY:-}" \
|
|
232
|
-
'{goal:$goal, type:$type, template:$template, branch:$branch,
|
|
233
|
-
issue:$issue, language:$lang, test_cmd:$test_cmd,
|
|
234
|
-
labels:$labels, milestone:$milestone, body:$body}')"
|
|
235
|
-
|
|
236
|
-
log_stage "intake" "Goal: $GOAL
|
|
237
|
-
Type: $TASK_TYPE → template: $suggested_template
|
|
238
|
-
Branch: $GIT_BRANCH
|
|
239
|
-
Language: $project_lang
|
|
240
|
-
Test cmd: ${TEST_CMD:-none detected}"
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
stage_plan() {
|
|
244
|
-
CURRENT_STAGE_ID="plan"
|
|
245
|
-
local plan_file="$ARTIFACTS_DIR/plan.md"
|
|
246
|
-
|
|
247
|
-
if ! command -v claude >/dev/null 2>&1; then
|
|
248
|
-
error "Claude CLI not found — cannot generate plan"
|
|
249
|
-
return 1
|
|
250
|
-
fi
|
|
251
|
-
|
|
252
|
-
info "Generating implementation plan..."
|
|
253
|
-
|
|
254
|
-
# ── Gather context bundle (if context engine available) ──
|
|
255
|
-
local context_script="${SCRIPT_DIR}/sw-context.sh"
|
|
256
|
-
if [[ -x "$context_script" ]]; then
|
|
257
|
-
"$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
|
|
258
|
-
fi
|
|
259
|
-
|
|
260
|
-
# Gather rich architecture context (call-graph, dependencies)
|
|
261
|
-
local arch_context=""
|
|
262
|
-
if type gather_architecture_context &>/dev/null; then
|
|
263
|
-
arch_context=$(gather_architecture_context "${PROJECT_ROOT:-.}" 2>/dev/null || true)
|
|
264
|
-
fi
|
|
265
|
-
|
|
266
|
-
# Build rich prompt with all available context
|
|
267
|
-
local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
|
|
268
|
-
|
|
269
|
-
## Goal
|
|
270
|
-
${GOAL}
|
|
271
|
-
"
|
|
272
|
-
|
|
273
|
-
# Add issue context
|
|
274
|
-
if [[ -n "$ISSUE_BODY" ]]; then
|
|
275
|
-
plan_prompt="${plan_prompt}
|
|
276
|
-
## Issue Description
|
|
277
|
-
${ISSUE_BODY}
|
|
278
|
-
"
|
|
279
|
-
fi
|
|
280
|
-
|
|
281
|
-
# Inject architecture context (import graph, modules, test map)
|
|
282
|
-
if [[ -n "$arch_context" ]]; then
|
|
283
|
-
arch_context=$(prune_context_section "architecture" "$arch_context" 5000)
|
|
284
|
-
plan_prompt="${plan_prompt}
|
|
285
|
-
## Architecture Context
|
|
286
|
-
${arch_context}
|
|
287
|
-
"
|
|
288
|
-
fi
|
|
289
|
-
|
|
290
|
-
# Inject context bundle from context engine (if available)
|
|
291
|
-
local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
|
|
292
|
-
if [[ -f "$_context_bundle" ]]; then
|
|
293
|
-
local _cb_content
|
|
294
|
-
_cb_content=$(cat "$_context_bundle" 2>/dev/null | head -100 || true)
|
|
295
|
-
_cb_content=$(prune_context_section "context-bundle" "$_cb_content" 8000)
|
|
296
|
-
if [[ -n "$_cb_content" ]]; then
|
|
297
|
-
plan_prompt="${plan_prompt}
|
|
298
|
-
## Pipeline Context
|
|
299
|
-
${_cb_content}
|
|
300
|
-
"
|
|
301
|
-
fi
|
|
302
|
-
fi
|
|
303
|
-
|
|
304
|
-
# Inject intelligence memory context for similar past plans
|
|
305
|
-
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
306
|
-
local plan_memory
|
|
307
|
-
plan_memory=$(intelligence_search_memory "plan stage for ${TASK_TYPE:-feature}: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
|
|
308
|
-
if [[ -n "$plan_memory" && "$plan_memory" != *'"results":[]'* && "$plan_memory" != *'"error"'* ]]; then
|
|
309
|
-
local memory_summary
|
|
310
|
-
memory_summary=$(echo "$plan_memory" | jq -r '.results[]? | "- \(.)"' 2>/dev/null | head -10 || true)
|
|
311
|
-
memory_summary=$(prune_context_section "memory" "$memory_summary" 10000)
|
|
312
|
-
if [[ -n "$memory_summary" ]]; then
|
|
313
|
-
plan_prompt="${plan_prompt}
|
|
314
|
-
## Historical Context (from previous pipelines)
|
|
315
|
-
Previous similar issues were planned as:
|
|
316
|
-
${memory_summary}
|
|
317
|
-
"
|
|
318
|
-
fi
|
|
319
|
-
fi
|
|
320
|
-
fi
|
|
321
|
-
|
|
322
|
-
# Self-aware pipeline: inject hint when plan stage has been failing recently
|
|
323
|
-
local plan_hint
|
|
324
|
-
plan_hint=$(get_stage_self_awareness_hint "plan" 2>/dev/null || true)
|
|
325
|
-
if [[ -n "$plan_hint" ]]; then
|
|
326
|
-
plan_prompt="${plan_prompt}
|
|
327
|
-
## Self-Assessment (recent plan stage performance)
|
|
328
|
-
${plan_hint}
|
|
329
|
-
"
|
|
330
|
-
fi
|
|
331
|
-
|
|
332
|
-
# Inject cross-pipeline discoveries (from other concurrent/similar pipelines)
|
|
333
|
-
if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
|
|
334
|
-
local plan_discoveries
|
|
335
|
-
plan_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.json" 2>/dev/null | head -20 || true)
|
|
336
|
-
plan_discoveries=$(prune_context_section "discoveries" "$plan_discoveries" 3000)
|
|
337
|
-
if [[ -n "$plan_discoveries" ]]; then
|
|
338
|
-
plan_prompt="${plan_prompt}
|
|
339
|
-
## Discoveries from Other Pipelines
|
|
340
|
-
${plan_discoveries}
|
|
341
|
-
"
|
|
342
|
-
fi
|
|
343
|
-
fi
|
|
344
|
-
|
|
345
|
-
# Inject architecture patterns from intelligence layer
|
|
346
|
-
local repo_hash_plan
|
|
347
|
-
repo_hash_plan=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
348
|
-
local arch_file_plan="${HOME}/.shipwright/memory/${repo_hash_plan}/architecture.json"
|
|
349
|
-
if [[ -f "$arch_file_plan" ]]; then
|
|
350
|
-
local arch_patterns
|
|
351
|
-
arch_patterns=$(jq -r '
|
|
352
|
-
"Language: \(.language // "unknown")",
|
|
353
|
-
"Framework: \(.framework // "unknown")",
|
|
354
|
-
"Patterns: \((.patterns // []) | join(", "))",
|
|
355
|
-
"Rules: \((.rules // []) | join("; "))"
|
|
356
|
-
' "$arch_file_plan" 2>/dev/null || true)
|
|
357
|
-
arch_patterns=$(prune_context_section "intelligence" "$arch_patterns" 5000)
|
|
358
|
-
if [[ -n "$arch_patterns" ]]; then
|
|
359
|
-
plan_prompt="${plan_prompt}
|
|
360
|
-
## Architecture Patterns
|
|
361
|
-
${arch_patterns}
|
|
362
|
-
"
|
|
363
|
-
fi
|
|
364
|
-
fi
|
|
365
|
-
|
|
366
|
-
# Task-type-specific guidance
|
|
367
|
-
case "${TASK_TYPE:-feature}" in
|
|
368
|
-
bug)
|
|
369
|
-
plan_prompt="${plan_prompt}
|
|
370
|
-
## Task Type: Bug Fix
|
|
371
|
-
Focus on: reproducing the bug, identifying root cause, minimal targeted fix, regression tests.
|
|
372
|
-
" ;;
|
|
373
|
-
refactor)
|
|
374
|
-
plan_prompt="${plan_prompt}
|
|
375
|
-
## Task Type: Refactor
|
|
376
|
-
Focus on: preserving all existing behavior, incremental changes, comprehensive test coverage.
|
|
377
|
-
" ;;
|
|
378
|
-
security)
|
|
379
|
-
plan_prompt="${plan_prompt}
|
|
380
|
-
## Task Type: Security
|
|
381
|
-
Focus on: threat modeling, OWASP top 10, input validation, authentication/authorization.
|
|
382
|
-
" ;;
|
|
383
|
-
esac
|
|
384
|
-
|
|
385
|
-
# Add project context
|
|
386
|
-
local project_lang
|
|
387
|
-
project_lang=$(detect_project_lang)
|
|
388
|
-
plan_prompt="${plan_prompt}
|
|
389
|
-
## Project Context
|
|
390
|
-
- Language: ${project_lang}
|
|
391
|
-
- Test command: ${TEST_CMD:-not configured}
|
|
392
|
-
- Task type: ${TASK_TYPE:-feature}
|
|
393
|
-
|
|
394
|
-
## Context Efficiency
|
|
395
|
-
- Batch independent tool calls in parallel when possible
|
|
396
|
-
- Read specific file sections (offset/limit) instead of entire large files
|
|
397
|
-
- Use targeted grep searches — avoid scanning entire codebases into context
|
|
398
|
-
- Delegate multi-file analysis to subagents when available
|
|
399
|
-
|
|
400
|
-
## Required Output
|
|
401
|
-
Create a Markdown plan with these sections:
|
|
402
|
-
|
|
403
|
-
### Files to Modify
|
|
404
|
-
List every file to create or modify with full paths.
|
|
405
|
-
|
|
406
|
-
### Implementation Steps
|
|
407
|
-
Numbered steps in order of execution. Be specific about what code to write.
|
|
408
|
-
|
|
409
|
-
### Task Checklist
|
|
410
|
-
A checkbox list of discrete tasks that can be tracked:
|
|
411
|
-
- [ ] Task 1: Description
|
|
412
|
-
- [ ] Task 2: Description
|
|
413
|
-
(Include 5-15 tasks covering the full implementation)
|
|
414
|
-
|
|
415
|
-
### Testing Approach
|
|
416
|
-
How to verify the implementation works.
|
|
417
|
-
|
|
418
|
-
### Definition of Done
|
|
419
|
-
Checklist of completion criteria.
|
|
420
|
-
"
|
|
421
|
-
|
|
422
|
-
# Guard total prompt size
|
|
423
|
-
plan_prompt=$(guard_prompt_size "plan" "$plan_prompt")
|
|
424
|
-
|
|
425
|
-
local plan_model
|
|
426
|
-
plan_model=$(jq -r --arg id "plan" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
427
|
-
[[ -n "$MODEL" ]] && plan_model="$MODEL"
|
|
428
|
-
[[ -z "$plan_model" || "$plan_model" == "null" ]] && plan_model="opus"
|
|
429
|
-
# Intelligence model routing (when no explicit CLI --model override)
|
|
430
|
-
if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
|
|
431
|
-
plan_model="$CLAUDE_MODEL"
|
|
432
|
-
fi
|
|
433
|
-
|
|
434
|
-
local _token_log="${ARTIFACTS_DIR}/.claude-tokens-plan.log"
|
|
435
|
-
claude --print --model "$plan_model" --max-turns 25 --dangerously-skip-permissions \
|
|
436
|
-
"$plan_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
|
|
437
|
-
parse_claude_tokens "$_token_log"
|
|
438
|
-
|
|
439
|
-
# Claude may write to disk via tools instead of stdout — rescue those files
|
|
440
|
-
local _plan_rescue
|
|
441
|
-
for _plan_rescue in "${PROJECT_ROOT}/PLAN.md" "${PROJECT_ROOT}/plan.md" \
|
|
442
|
-
"${PROJECT_ROOT}/implementation-plan.md"; do
|
|
443
|
-
if [[ -s "$_plan_rescue" ]] && [[ $(wc -l < "$plan_file" 2>/dev/null | xargs) -lt 10 ]]; then
|
|
444
|
-
info "Plan written to ${_plan_rescue} via tools — adopting as plan artifact"
|
|
445
|
-
cat "$_plan_rescue" >> "$plan_file"
|
|
446
|
-
rm -f "$_plan_rescue"
|
|
447
|
-
break
|
|
448
|
-
fi
|
|
449
|
-
done
|
|
450
|
-
|
|
451
|
-
if [[ ! -s "$plan_file" ]]; then
|
|
452
|
-
error "Plan generation failed — empty output"
|
|
453
|
-
return 1
|
|
454
|
-
fi
|
|
455
|
-
|
|
456
|
-
# Validate plan content — detect API/CLI errors masquerading as plans
|
|
457
|
-
local _plan_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
|
|
458
|
-
_plan_fatal="${_plan_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
|
|
459
|
-
if grep -qiE "$_plan_fatal" "$plan_file" 2>/dev/null; then
|
|
460
|
-
error "Plan stage produced API/CLI error instead of a plan: $(head -1 "$plan_file" | cut -c1-100)"
|
|
461
|
-
return 1
|
|
462
|
-
fi
|
|
463
|
-
|
|
464
|
-
local line_count
|
|
465
|
-
line_count=$(wc -l < "$plan_file" | xargs)
|
|
466
|
-
if [[ "$line_count" -lt 3 ]]; then
|
|
467
|
-
error "Plan too short (${line_count} lines) — likely an error, not a real plan"
|
|
468
|
-
return 1
|
|
469
|
-
fi
|
|
470
|
-
info "Plan saved: ${DIM}$plan_file${RESET} (${line_count} lines)"
|
|
471
|
-
|
|
472
|
-
# Extract task checklist for GitHub issue and task tracking
|
|
473
|
-
local checklist
|
|
474
|
-
checklist=$(sed -n '/### Task Checklist/,/^###/p' "$plan_file" 2>/dev/null | \
|
|
475
|
-
grep '^\s*- \[' | head -20)
|
|
476
|
-
|
|
477
|
-
if [[ -z "$checklist" ]]; then
|
|
478
|
-
# Fallback: extract any checkbox lines
|
|
479
|
-
checklist=$(grep '^\s*- \[' "$plan_file" 2>/dev/null | head -20)
|
|
480
|
-
fi
|
|
481
|
-
|
|
482
|
-
# Write local task file for Claude Code build stage
|
|
483
|
-
if [[ -n "$checklist" ]]; then
|
|
484
|
-
cat > "$TASKS_FILE" <<TASKS_EOF
|
|
485
|
-
# Pipeline Tasks — ${GOAL}
|
|
486
|
-
|
|
487
|
-
## Implementation Checklist
|
|
488
|
-
${checklist}
|
|
489
|
-
|
|
490
|
-
## Context
|
|
491
|
-
- Pipeline: ${PIPELINE_NAME}
|
|
492
|
-
- Branch: ${GIT_BRANCH}
|
|
493
|
-
- Issue: ${GITHUB_ISSUE:-none}
|
|
494
|
-
- Generated: $(now_iso)
|
|
495
|
-
TASKS_EOF
|
|
496
|
-
info "Task list: ${DIM}$TASKS_FILE${RESET} ($(echo "$checklist" | wc -l | xargs) tasks)"
|
|
497
|
-
fi
|
|
498
|
-
|
|
499
|
-
# Post plan + task checklist to GitHub issue
|
|
500
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
501
|
-
local plan_summary
|
|
502
|
-
plan_summary=$(head -50 "$plan_file")
|
|
503
|
-
local gh_body="## 📋 Implementation Plan
|
|
504
|
-
|
|
505
|
-
<details>
|
|
506
|
-
<summary>Click to expand full plan (${line_count} lines)</summary>
|
|
507
|
-
|
|
508
|
-
${plan_summary}
|
|
509
|
-
|
|
510
|
-
</details>
|
|
511
|
-
"
|
|
512
|
-
if [[ -n "$checklist" ]]; then
|
|
513
|
-
gh_body="${gh_body}
|
|
514
|
-
## ✅ Task Checklist
|
|
515
|
-
${checklist}
|
|
516
|
-
"
|
|
517
|
-
fi
|
|
518
|
-
|
|
519
|
-
gh_body="${gh_body}
|
|
520
|
-
---
|
|
521
|
-
_Generated by \`shipwright pipeline\` at $(now_iso)_"
|
|
522
|
-
|
|
523
|
-
gh_comment_issue "$ISSUE_NUMBER" "$gh_body"
|
|
524
|
-
info "Plan posted to issue #$ISSUE_NUMBER"
|
|
525
|
-
fi
|
|
526
|
-
|
|
527
|
-
# Push plan to wiki
|
|
528
|
-
gh_wiki_page "Pipeline-Plan-${ISSUE_NUMBER:-inline}" "$(<"$plan_file")"
|
|
529
|
-
|
|
530
|
-
# Generate Claude Code task list
|
|
531
|
-
local cc_tasks_file="$PROJECT_ROOT/.claude/tasks.md"
|
|
532
|
-
if [[ -n "$checklist" ]]; then
|
|
533
|
-
cat > "$cc_tasks_file" <<CC_TASKS_EOF
|
|
534
|
-
# Tasks — ${GOAL}
|
|
535
|
-
|
|
536
|
-
## Status: In Progress
|
|
537
|
-
Pipeline: ${PIPELINE_NAME} | Branch: ${GIT_BRANCH}
|
|
538
|
-
|
|
539
|
-
## Checklist
|
|
540
|
-
${checklist}
|
|
541
|
-
|
|
542
|
-
## Notes
|
|
543
|
-
- Generated from pipeline plan at $(now_iso)
|
|
544
|
-
- Pipeline will update status as tasks complete
|
|
545
|
-
CC_TASKS_EOF
|
|
546
|
-
info "Claude Code tasks: ${DIM}$cc_tasks_file${RESET}"
|
|
547
|
-
fi
|
|
548
|
-
|
|
549
|
-
# Extract definition of done for quality gates
|
|
550
|
-
sed -n '/[Dd]efinition [Oo]f [Dd]one/,/^#/p' "$plan_file" | head -20 > "$ARTIFACTS_DIR/dod.md" 2>/dev/null || true
|
|
551
|
-
|
|
552
|
-
# ── Plan Validation Gate ──
|
|
553
|
-
# Ask Claude to validate the plan before proceeding
|
|
554
|
-
if command -v claude >/dev/null 2>&1 && [[ -s "$plan_file" ]]; then
|
|
555
|
-
local validation_attempts=0
|
|
556
|
-
local max_validation_attempts=2
|
|
557
|
-
local plan_valid=false
|
|
558
|
-
|
|
559
|
-
while [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; do
|
|
560
|
-
validation_attempts=$((validation_attempts + 1))
|
|
561
|
-
info "Validating plan (attempt ${validation_attempts}/${max_validation_attempts})..."
|
|
562
|
-
|
|
563
|
-
# Build enriched validation prompt with learned context
|
|
564
|
-
local validation_extra=""
|
|
565
|
-
|
|
566
|
-
# Inject rejected plan history from memory
|
|
567
|
-
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
568
|
-
local rejected_plans
|
|
569
|
-
rejected_plans=$(intelligence_search_memory "rejected plan validation failures for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
|
|
570
|
-
if [[ -n "$rejected_plans" ]]; then
|
|
571
|
-
validation_extra="${validation_extra}
|
|
572
|
-
## Previously Rejected Plans
|
|
573
|
-
These issues were found in past plan validations for similar tasks:
|
|
574
|
-
${rejected_plans}
|
|
575
|
-
"
|
|
576
|
-
fi
|
|
577
|
-
fi
|
|
578
|
-
|
|
579
|
-
# Inject repo conventions contextually
|
|
580
|
-
local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
|
|
581
|
-
if [[ -f "$claudemd" ]]; then
|
|
582
|
-
local conventions_summary
|
|
583
|
-
conventions_summary=$(head -100 "$claudemd" 2>/dev/null | grep -E '^##|^-|^\*' | head -15 || true)
|
|
584
|
-
if [[ -n "$conventions_summary" ]]; then
|
|
585
|
-
validation_extra="${validation_extra}
|
|
586
|
-
## Repo Conventions
|
|
587
|
-
${conventions_summary}
|
|
588
|
-
"
|
|
589
|
-
fi
|
|
590
|
-
fi
|
|
591
|
-
|
|
592
|
-
# Inject complexity estimate
|
|
593
|
-
local complexity_hint=""
|
|
594
|
-
if [[ -n "${INTELLIGENCE_COMPLEXITY:-}" && "${INTELLIGENCE_COMPLEXITY:-0}" -gt 0 ]]; then
|
|
595
|
-
complexity_hint="This is estimated as complexity ${INTELLIGENCE_COMPLEXITY}/10. Plans for this complexity typically need ${INTELLIGENCE_COMPLEXITY} or more tasks."
|
|
596
|
-
fi
|
|
597
|
-
|
|
598
|
-
local validation_prompt="You are a plan validator. Review this implementation plan and determine if it is valid.
|
|
599
|
-
|
|
600
|
-
## Goal
|
|
601
|
-
${GOAL}
|
|
602
|
-
${complexity_hint:+
|
|
603
|
-
## Complexity Estimate
|
|
604
|
-
${complexity_hint}
|
|
605
|
-
}
|
|
606
|
-
## Plan
|
|
607
|
-
$(cat "$plan_file")
|
|
608
|
-
${validation_extra}
|
|
609
|
-
Evaluate:
|
|
610
|
-
1. Are all requirements from the goal addressed?
|
|
611
|
-
2. Is the plan decomposed into clear, achievable tasks?
|
|
612
|
-
3. Are the implementation steps specific enough to execute?
|
|
613
|
-
|
|
614
|
-
Respond with EXACTLY one of these on the first line:
|
|
615
|
-
VALID: true
|
|
616
|
-
VALID: false
|
|
617
|
-
|
|
618
|
-
Then explain your reasoning briefly."
|
|
619
|
-
|
|
620
|
-
local validation_model="${plan_model:-opus}"
|
|
621
|
-
local validation_result
|
|
622
|
-
validation_result=$(claude --print --output-format text -p "$validation_prompt" --model "$validation_model" < /dev/null 2>"${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log" || true)
|
|
623
|
-
parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-plan-validate.log"
|
|
624
|
-
|
|
625
|
-
# Save validation result
|
|
626
|
-
echo "$validation_result" > "$ARTIFACTS_DIR/plan-validation.md"
|
|
627
|
-
|
|
628
|
-
if echo "$validation_result" | head -5 | grep -qi "VALID: true"; then
|
|
629
|
-
success "Plan validation passed"
|
|
630
|
-
plan_valid=true
|
|
631
|
-
break
|
|
632
|
-
fi
|
|
633
|
-
|
|
634
|
-
warn "Plan validation failed (attempt ${validation_attempts}/${max_validation_attempts})"
|
|
635
|
-
|
|
636
|
-
# Analyze failure mode to decide how to recover
|
|
637
|
-
local failure_mode="unknown"
|
|
638
|
-
local validation_lower
|
|
639
|
-
validation_lower=$(echo "$validation_result" | tr '[:upper:]' '[:lower:]')
|
|
640
|
-
if echo "$validation_lower" | grep -qE 'requirements? unclear|goal.*vague|ambiguous|underspecified'; then
|
|
641
|
-
failure_mode="requirements_unclear"
|
|
642
|
-
elif echo "$validation_lower" | grep -qE 'insufficient detail|not specific|too high.level|missing.*steps|lacks.*detail'; then
|
|
643
|
-
failure_mode="insufficient_detail"
|
|
644
|
-
elif echo "$validation_lower" | grep -qE 'scope too (large|broad)|too many|overly complex|break.*down'; then
|
|
645
|
-
failure_mode="scope_too_large"
|
|
646
|
-
fi
|
|
647
|
-
|
|
648
|
-
emit_event "plan.validation_failure" \
|
|
649
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
650
|
-
"attempt=$validation_attempts" \
|
|
651
|
-
"failure_mode=$failure_mode"
|
|
652
|
-
|
|
653
|
-
# Track repeated failures — escalate if stuck in a loop
|
|
654
|
-
if [[ -f "$ARTIFACTS_DIR/.plan-failure-sig.txt" ]]; then
|
|
655
|
-
local prev_sig
|
|
656
|
-
prev_sig=$(cat "$ARTIFACTS_DIR/.plan-failure-sig.txt" 2>/dev/null || true)
|
|
657
|
-
if [[ "$failure_mode" == "$prev_sig" && "$failure_mode" != "unknown" ]]; then
|
|
658
|
-
warn "Same validation failure mode repeated ($failure_mode) — escalating"
|
|
659
|
-
emit_event "plan.validation_escalated" \
|
|
660
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
661
|
-
"failure_mode=$failure_mode"
|
|
662
|
-
break
|
|
663
|
-
fi
|
|
664
|
-
fi
|
|
665
|
-
echo "$failure_mode" > "$ARTIFACTS_DIR/.plan-failure-sig.txt"
|
|
666
|
-
|
|
667
|
-
if [[ "$validation_attempts" -lt "$max_validation_attempts" ]]; then
|
|
668
|
-
info "Regenerating plan with validation feedback (mode: ${failure_mode})..."
|
|
669
|
-
|
|
670
|
-
# Tailor regeneration prompt based on failure mode
|
|
671
|
-
local failure_guidance=""
|
|
672
|
-
case "$failure_mode" in
|
|
673
|
-
requirements_unclear)
|
|
674
|
-
failure_guidance="The validator found the requirements unclear. Add more specific acceptance criteria, input/output examples, and concrete success metrics." ;;
|
|
675
|
-
insufficient_detail)
|
|
676
|
-
failure_guidance="The validator found the plan lacks detail. Break each task into smaller, more specific implementation steps with exact file paths and function names." ;;
|
|
677
|
-
scope_too_large)
|
|
678
|
-
failure_guidance="The validator found the scope too large. Focus on the minimal viable implementation and defer non-essential features to follow-up tasks." ;;
|
|
679
|
-
esac
|
|
680
|
-
|
|
681
|
-
local regen_prompt="${plan_prompt}
|
|
682
|
-
|
|
683
|
-
IMPORTANT: A previous plan was rejected by validation. Issues found:
|
|
684
|
-
$(echo "$validation_result" | tail -20)
|
|
685
|
-
${failure_guidance:+
|
|
686
|
-
GUIDANCE: ${failure_guidance}}
|
|
687
|
-
|
|
688
|
-
Fix these issues in the new plan."
|
|
689
|
-
|
|
690
|
-
claude --print --model "$plan_model" --max-turns 25 \
|
|
691
|
-
"$regen_prompt" < /dev/null > "$plan_file" 2>"$_token_log" || true
|
|
692
|
-
parse_claude_tokens "$_token_log"
|
|
693
|
-
|
|
694
|
-
line_count=$(wc -l < "$plan_file" | xargs)
|
|
695
|
-
info "Regenerated plan: ${DIM}$plan_file${RESET} (${line_count} lines)"
|
|
696
|
-
fi
|
|
697
|
-
done
|
|
698
|
-
|
|
699
|
-
if [[ "$plan_valid" != "true" ]]; then
|
|
700
|
-
warn "Plan validation did not pass after ${max_validation_attempts} attempts — proceeding anyway"
|
|
701
|
-
fi
|
|
702
|
-
|
|
703
|
-
emit_event "plan.validated" \
|
|
704
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
705
|
-
"valid=${plan_valid}" \
|
|
706
|
-
"attempts=${validation_attempts}"
|
|
707
|
-
fi
|
|
708
|
-
|
|
709
|
-
log_stage "plan" "Generated plan.md (${line_count} lines, $(echo "$checklist" | wc -l | xargs) tasks)"
|
|
710
|
-
}
|
|
711
|
-
|
|
712
|
-
stage_design() {
|
|
713
|
-
CURRENT_STAGE_ID="design"
|
|
714
|
-
local plan_file="$ARTIFACTS_DIR/plan.md"
|
|
715
|
-
local design_file="$ARTIFACTS_DIR/design.md"
|
|
716
|
-
|
|
717
|
-
if [[ ! -s "$plan_file" ]]; then
|
|
718
|
-
warn "No plan found — skipping design stage"
|
|
719
|
-
return 0
|
|
720
|
-
fi
|
|
721
|
-
|
|
722
|
-
if ! command -v claude >/dev/null 2>&1; then
|
|
723
|
-
error "Claude CLI not found — cannot generate design"
|
|
724
|
-
return 1
|
|
725
|
-
fi
|
|
726
|
-
|
|
727
|
-
info "Generating Architecture Decision Record..."
|
|
728
|
-
|
|
729
|
-
# Gather rich architecture context (call-graph, dependencies)
|
|
730
|
-
local arch_struct_context=""
|
|
731
|
-
if type gather_architecture_context &>/dev/null; then
|
|
732
|
-
arch_struct_context=$(gather_architecture_context "${PROJECT_ROOT:-.}" 2>/dev/null || true)
|
|
733
|
-
fi
|
|
734
|
-
arch_struct_context=$(prune_context_section "architecture" "$arch_struct_context" 5000)
|
|
735
|
-
|
|
736
|
-
# Memory integration — inject context if memory system available
|
|
737
|
-
local memory_context=""
|
|
738
|
-
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
739
|
-
local mem_dir="${HOME}/.shipwright/memory"
|
|
740
|
-
memory_context=$(intelligence_search_memory "design stage architecture patterns for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
|
|
741
|
-
fi
|
|
742
|
-
if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
743
|
-
memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "design" 2>/dev/null) || true
|
|
744
|
-
fi
|
|
745
|
-
memory_context=$(prune_context_section "memory" "$memory_context" 10000)
|
|
746
|
-
|
|
747
|
-
# Inject cross-pipeline discoveries for design stage
|
|
748
|
-
local design_discoveries=""
|
|
749
|
-
if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
|
|
750
|
-
design_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
|
|
751
|
-
fi
|
|
752
|
-
design_discoveries=$(prune_context_section "discoveries" "$design_discoveries" 3000)
|
|
753
|
-
|
|
754
|
-
# Inject architecture model patterns if available
|
|
755
|
-
local arch_context=""
|
|
756
|
-
local repo_hash
|
|
757
|
-
repo_hash=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
758
|
-
local arch_model_file="${HOME}/.shipwright/memory/${repo_hash}/architecture.json"
|
|
759
|
-
if [[ -f "$arch_model_file" ]]; then
|
|
760
|
-
local arch_patterns
|
|
761
|
-
arch_patterns=$(jq -r '
|
|
762
|
-
[.patterns // [] | .[] | "- \(.name // "unnamed"): \(.description // "no description")"] | join("\n")
|
|
763
|
-
' "$arch_model_file" 2>/dev/null) || true
|
|
764
|
-
local arch_layers
|
|
765
|
-
arch_layers=$(jq -r '
|
|
766
|
-
[.layers // [] | .[] | "- \(.name // "unnamed"): \(.path // "")"] | join("\n")
|
|
767
|
-
' "$arch_model_file" 2>/dev/null) || true
|
|
768
|
-
if [[ -n "$arch_patterns" || -n "$arch_layers" ]]; then
|
|
769
|
-
arch_context="Previous designs in this repo follow these patterns:
|
|
770
|
-
${arch_patterns:+Patterns:
|
|
771
|
-
${arch_patterns}
|
|
772
|
-
}${arch_layers:+Layers:
|
|
773
|
-
${arch_layers}}"
|
|
774
|
-
fi
|
|
775
|
-
fi
|
|
776
|
-
arch_context=$(prune_context_section "intelligence" "$arch_context" 5000)
|
|
777
|
-
|
|
778
|
-
# Inject rejected design approaches and anti-patterns from memory
|
|
779
|
-
local design_antipatterns=""
|
|
780
|
-
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
781
|
-
local rejected_designs
|
|
782
|
-
rejected_designs=$(intelligence_search_memory "rejected design approaches anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 3 2>/dev/null) || true
|
|
783
|
-
if [[ -n "$rejected_designs" ]]; then
|
|
784
|
-
rejected_designs=$(prune_context_section "antipatterns" "$rejected_designs" 5000)
|
|
785
|
-
design_antipatterns="
|
|
786
|
-
## Rejected Approaches (from past reviews)
|
|
787
|
-
These design approaches were rejected in past reviews. Avoid repeating them:
|
|
788
|
-
${rejected_designs}
|
|
789
|
-
"
|
|
790
|
-
fi
|
|
791
|
-
fi
|
|
792
|
-
|
|
793
|
-
# Build design prompt with plan + project context
|
|
794
|
-
local project_lang
|
|
795
|
-
project_lang=$(detect_project_lang)
|
|
796
|
-
|
|
797
|
-
local design_prompt="You are a senior software architect. Review the implementation plan below and produce an Architecture Decision Record (ADR).
|
|
798
|
-
|
|
799
|
-
## Goal
|
|
800
|
-
${GOAL}
|
|
801
|
-
|
|
802
|
-
## Implementation Plan
|
|
803
|
-
$(cat "$plan_file")
|
|
804
|
-
|
|
805
|
-
## Project Context
|
|
806
|
-
- Language: ${project_lang}
|
|
807
|
-
- Test command: ${TEST_CMD:-not configured}
|
|
808
|
-
- Task type: ${TASK_TYPE:-feature}
|
|
809
|
-
${arch_struct_context:+
|
|
810
|
-
## Architecture Context (import graph, modules, test map)
|
|
811
|
-
${arch_struct_context}
|
|
812
|
-
}${memory_context:+
|
|
813
|
-
## Historical Context (from memory)
|
|
814
|
-
${memory_context}
|
|
815
|
-
}${arch_context:+
|
|
816
|
-
## Architecture Model (from previous designs)
|
|
817
|
-
${arch_context}
|
|
818
|
-
}${design_antipatterns}${design_discoveries:+
|
|
819
|
-
## Discoveries from Other Pipelines
|
|
820
|
-
${design_discoveries}
|
|
821
|
-
}
|
|
822
|
-
## Required Output — Architecture Decision Record
|
|
823
|
-
|
|
824
|
-
Produce this EXACT format:
|
|
825
|
-
|
|
826
|
-
# Design: ${GOAL}
|
|
827
|
-
|
|
828
|
-
## Context
|
|
829
|
-
[What problem we're solving, constraints from the codebase]
|
|
830
|
-
|
|
831
|
-
## Decision
|
|
832
|
-
[The chosen approach — be specific about patterns, data flow, error handling]
|
|
833
|
-
|
|
834
|
-
## Alternatives Considered
|
|
835
|
-
1. [Alternative A] — Pros: ... / Cons: ...
|
|
836
|
-
2. [Alternative B] — Pros: ... / Cons: ...
|
|
837
|
-
|
|
838
|
-
## Implementation Plan
|
|
839
|
-
- Files to create: [list with full paths]
|
|
840
|
-
- Files to modify: [list with full paths]
|
|
841
|
-
- Dependencies: [new deps if any]
|
|
842
|
-
- Risk areas: [fragile code, performance concerns]
|
|
843
|
-
|
|
844
|
-
## Validation Criteria
|
|
845
|
-
- [ ] [How we'll know the design is correct — testable criteria]
|
|
846
|
-
- [ ] [Additional validation items]
|
|
847
|
-
|
|
848
|
-
Be concrete and specific. Reference actual file paths in the codebase. Consider edge cases and failure modes."
|
|
849
|
-
|
|
850
|
-
# Guard total prompt size
|
|
851
|
-
design_prompt=$(guard_prompt_size "design" "$design_prompt")
|
|
852
|
-
|
|
853
|
-
local design_model
|
|
854
|
-
design_model=$(jq -r --arg id "design" '(.stages[] | select(.id == $id) | .config.model) // .defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
855
|
-
[[ -n "$MODEL" ]] && design_model="$MODEL"
|
|
856
|
-
[[ -z "$design_model" || "$design_model" == "null" ]] && design_model="opus"
|
|
857
|
-
# Intelligence model routing (when no explicit CLI --model override)
|
|
858
|
-
if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
|
|
859
|
-
design_model="$CLAUDE_MODEL"
|
|
860
|
-
fi
|
|
861
|
-
|
|
862
|
-
local _token_log="${ARTIFACTS_DIR}/.claude-tokens-design.log"
|
|
863
|
-
claude --print --model "$design_model" --max-turns 25 --dangerously-skip-permissions \
|
|
864
|
-
"$design_prompt" < /dev/null > "$design_file" 2>"$_token_log" || true
|
|
865
|
-
parse_claude_tokens "$_token_log"
|
|
866
|
-
|
|
867
|
-
# Claude may write to disk via tools instead of stdout — rescue those files
|
|
868
|
-
local _design_rescue
|
|
869
|
-
for _design_rescue in "${PROJECT_ROOT}/design-adr.md" "${PROJECT_ROOT}/design.md" \
|
|
870
|
-
"${PROJECT_ROOT}/ADR.md" "${PROJECT_ROOT}/DESIGN.md"; do
|
|
871
|
-
if [[ -s "$_design_rescue" ]] && [[ $(wc -l < "$design_file" 2>/dev/null | xargs) -lt 10 ]]; then
|
|
872
|
-
info "Design written to ${_design_rescue} via tools — adopting as design artifact"
|
|
873
|
-
cat "$_design_rescue" >> "$design_file"
|
|
874
|
-
rm -f "$_design_rescue"
|
|
875
|
-
break
|
|
876
|
-
fi
|
|
877
|
-
done
|
|
878
|
-
|
|
879
|
-
if [[ ! -s "$design_file" ]]; then
|
|
880
|
-
error "Design generation failed — empty output"
|
|
881
|
-
return 1
|
|
882
|
-
fi
|
|
883
|
-
|
|
884
|
-
# Validate design content — detect API/CLI errors masquerading as designs
|
|
885
|
-
local _design_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
|
|
886
|
-
_design_fatal="${_design_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
|
|
887
|
-
if grep -qiE "$_design_fatal" "$design_file" 2>/dev/null; then
|
|
888
|
-
error "Design stage produced API/CLI error instead of a design: $(head -1 "$design_file" | cut -c1-100)"
|
|
889
|
-
return 1
|
|
890
|
-
fi
|
|
891
|
-
|
|
892
|
-
local line_count
|
|
893
|
-
line_count=$(wc -l < "$design_file" | xargs)
|
|
894
|
-
if [[ "$line_count" -lt 3 ]]; then
|
|
895
|
-
error "Design too short (${line_count} lines) — likely an error, not a real design"
|
|
896
|
-
return 1
|
|
897
|
-
fi
|
|
898
|
-
info "Design saved: ${DIM}$design_file${RESET} (${line_count} lines)"
|
|
899
|
-
|
|
900
|
-
# Extract file lists for build stage awareness
|
|
901
|
-
local files_to_create files_to_modify
|
|
902
|
-
files_to_create=$(sed -n '/Files to create/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
|
|
903
|
-
files_to_modify=$(sed -n '/Files to modify/,/^-\|^#\|^$/p' "$design_file" 2>/dev/null | grep -E '^\s*-' | head -20 || true)
|
|
904
|
-
|
|
905
|
-
if [[ -n "$files_to_create" || -n "$files_to_modify" ]]; then
|
|
906
|
-
info "Design scope: ${DIM}$(echo "$files_to_create $files_to_modify" | grep -c '^\s*-' || true) file(s)${RESET}"
|
|
907
|
-
fi
|
|
908
|
-
|
|
909
|
-
# Post design to GitHub issue
|
|
910
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
911
|
-
local design_summary
|
|
912
|
-
design_summary=$(head -60 "$design_file")
|
|
913
|
-
gh_comment_issue "$ISSUE_NUMBER" "## 📐 Architecture Decision Record
|
|
914
|
-
|
|
915
|
-
<details>
|
|
916
|
-
<summary>Click to expand ADR (${line_count} lines)</summary>
|
|
917
|
-
|
|
918
|
-
${design_summary}
|
|
919
|
-
|
|
920
|
-
</details>
|
|
921
|
-
|
|
922
|
-
---
|
|
923
|
-
_Generated by \`shipwright pipeline\` design stage at $(now_iso)_"
|
|
924
|
-
fi
|
|
925
|
-
|
|
926
|
-
# Push design to wiki
|
|
927
|
-
gh_wiki_page "Pipeline-Design-${ISSUE_NUMBER:-inline}" "$(<"$design_file")"
|
|
928
|
-
|
|
929
|
-
log_stage "design" "Generated design.md (${line_count} lines)"
|
|
930
|
-
}
|
|
931
|
-
|
|
932
|
-
# ─── TDD: Generate tests before implementation ─────────────────────────────────
|
|
933
|
-
stage_test_first() {
|
|
934
|
-
CURRENT_STAGE_ID="test_first"
|
|
935
|
-
info "Generating tests from requirements (TDD mode)"
|
|
936
|
-
|
|
937
|
-
local plan_file="${ARTIFACTS_DIR}/plan.md"
|
|
938
|
-
local goal_file="${PROJECT_ROOT}/.claude/goal.md"
|
|
939
|
-
local requirements=""
|
|
940
|
-
if [[ -f "$plan_file" ]]; then
|
|
941
|
-
requirements=$(cat "$plan_file" 2>/dev/null || true)
|
|
942
|
-
elif [[ -f "$goal_file" ]]; then
|
|
943
|
-
requirements=$(cat "$goal_file" 2>/dev/null || true)
|
|
944
|
-
else
|
|
945
|
-
requirements="${GOAL:-}: ${ISSUE_BODY:-}"
|
|
946
|
-
fi
|
|
947
|
-
|
|
948
|
-
local tdd_prompt="You are writing tests BEFORE implementation (TDD).
|
|
949
|
-
|
|
950
|
-
Based on the following plan/requirements, generate test files that define the expected behavior. These tests should FAIL initially (since the implementation doesn't exist yet) but define the correct interface and behavior.
|
|
951
|
-
|
|
952
|
-
Requirements:
|
|
953
|
-
${requirements}
|
|
954
|
-
|
|
955
|
-
Instructions:
|
|
956
|
-
1. Create test files for each component mentioned in the plan
|
|
957
|
-
2. Tests should verify the PUBLIC interface and expected behavior
|
|
958
|
-
3. Include edge cases and error handling tests
|
|
959
|
-
4. Tests should be runnable with the project's test framework
|
|
960
|
-
5. Mark tests that need implementation with clear TODO comments
|
|
961
|
-
6. Do NOT write implementation code — only tests
|
|
962
|
-
|
|
963
|
-
Output format: For each test file, use a fenced code block with the file path as the language identifier (e.g. \`\`\`tests/auth.test.ts):
|
|
964
|
-
\`\`\`path/to/test.test.ts
|
|
965
|
-
// file content
|
|
966
|
-
\`\`\`
|
|
967
|
-
|
|
968
|
-
Create files in the appropriate project directories (e.g. tests/, __tests__/, src/**/*.test.ts) per project convention."
|
|
969
|
-
|
|
970
|
-
local model="${CLAUDE_MODEL:-${MODEL:-sonnet}}"
|
|
971
|
-
[[ -z "$model" || "$model" == "null" ]] && model="sonnet"
|
|
972
|
-
|
|
973
|
-
local output=""
|
|
974
|
-
output=$(echo "$tdd_prompt" | timeout 120 claude --print --model "$model" 2>/dev/null) || {
|
|
975
|
-
warn "TDD test generation failed, falling back to standard build"
|
|
976
|
-
return 1
|
|
977
|
-
}
|
|
978
|
-
|
|
979
|
-
# Parse output: extract fenced code blocks and write to files
|
|
980
|
-
local wrote_any=false
|
|
981
|
-
local block_path="" in_block=false block_content=""
|
|
982
|
-
while IFS= read -r line; do
|
|
983
|
-
if [[ "$line" =~ ^\`\`\`([a-zA-Z0-9_/\.\-]+)$ ]]; then
|
|
984
|
-
if [[ -n "$block_path" && -n "$block_content" ]]; then
|
|
985
|
-
local out_file="${PROJECT_ROOT}/${block_path}"
|
|
986
|
-
local out_dir
|
|
987
|
-
out_dir=$(dirname "$out_file")
|
|
988
|
-
mkdir -p "$out_dir" 2>/dev/null || true
|
|
989
|
-
if echo "$block_content" > "$out_file" 2>/dev/null; then
|
|
990
|
-
wrote_any=true
|
|
991
|
-
info " Wrote: $block_path"
|
|
992
|
-
fi
|
|
993
|
-
fi
|
|
994
|
-
block_path="${BASH_REMATCH[1]}"
|
|
995
|
-
block_content=""
|
|
996
|
-
in_block=true
|
|
997
|
-
elif [[ "$line" == "\`\`\`" && "$in_block" == "true" ]]; then
|
|
998
|
-
if [[ -n "$block_path" && -n "$block_content" ]]; then
|
|
999
|
-
local out_file="${PROJECT_ROOT}/${block_path}"
|
|
1000
|
-
local out_dir
|
|
1001
|
-
out_dir=$(dirname "$out_file")
|
|
1002
|
-
mkdir -p "$out_dir" 2>/dev/null || true
|
|
1003
|
-
if echo "$block_content" > "$out_file" 2>/dev/null; then
|
|
1004
|
-
wrote_any=true
|
|
1005
|
-
info " Wrote: $block_path"
|
|
1006
|
-
fi
|
|
1007
|
-
fi
|
|
1008
|
-
block_path=""
|
|
1009
|
-
block_content=""
|
|
1010
|
-
in_block=false
|
|
1011
|
-
elif [[ "$in_block" == "true" && -n "$block_path" ]]; then
|
|
1012
|
-
[[ -n "$block_content" ]] && block_content="${block_content}"$'\n'
|
|
1013
|
-
block_content="${block_content}${line}"
|
|
1014
|
-
fi
|
|
1015
|
-
done <<< "$output"
|
|
1016
|
-
|
|
1017
|
-
# Flush last block if unclosed
|
|
1018
|
-
if [[ -n "$block_path" && -n "$block_content" ]]; then
|
|
1019
|
-
local out_file="${PROJECT_ROOT}/${block_path}"
|
|
1020
|
-
local out_dir
|
|
1021
|
-
out_dir=$(dirname "$out_file")
|
|
1022
|
-
mkdir -p "$out_dir" 2>/dev/null || true
|
|
1023
|
-
if echo "$block_content" > "$out_file" 2>/dev/null; then
|
|
1024
|
-
wrote_any=true
|
|
1025
|
-
info " Wrote: $block_path"
|
|
1026
|
-
fi
|
|
1027
|
-
fi
|
|
1028
|
-
|
|
1029
|
-
if [[ "$wrote_any" == "true" ]]; then
|
|
1030
|
-
if (cd "$PROJECT_ROOT" && git diff --name-only 2>/dev/null | grep -qE 'test|spec'); then
|
|
1031
|
-
git add -A 2>/dev/null || true
|
|
1032
|
-
git commit -m "test: TDD - define expected behavior before implementation" 2>/dev/null || true
|
|
1033
|
-
emit_event "tdd.tests_generated" "{\"stage\":\"test_first\"}"
|
|
1034
|
-
fi
|
|
1035
|
-
success "TDD tests generated"
|
|
1036
|
-
else
|
|
1037
|
-
warn "No test files extracted from TDD output — check format"
|
|
1038
|
-
fi
|
|
1039
|
-
|
|
1040
|
-
return 0
|
|
1041
|
-
}
|
|
1042
|
-
|
|
1043
|
-
stage_build() {
|
|
1044
|
-
local plan_file="$ARTIFACTS_DIR/plan.md"
|
|
1045
|
-
local design_file="$ARTIFACTS_DIR/design.md"
|
|
1046
|
-
local dod_file="$ARTIFACTS_DIR/dod.md"
|
|
1047
|
-
local loop_args=()
|
|
1048
|
-
|
|
1049
|
-
# Memory integration — inject context if memory system available
|
|
1050
|
-
local memory_context=""
|
|
1051
|
-
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
1052
|
-
local mem_dir="${HOME}/.shipwright/memory"
|
|
1053
|
-
memory_context=$(intelligence_search_memory "build stage for: ${GOAL:-}" "$mem_dir" 5 2>/dev/null) || true
|
|
1054
|
-
fi
|
|
1055
|
-
if [[ -z "$memory_context" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
1056
|
-
memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "build" 2>/dev/null) || true
|
|
1057
|
-
fi
|
|
1058
|
-
|
|
1059
|
-
# Build enriched goal with compact context (avoids prompt bloat)
|
|
1060
|
-
local enriched_goal
|
|
1061
|
-
enriched_goal=$(_pipeline_compact_goal "$GOAL" "$plan_file" "$design_file")
|
|
1062
|
-
|
|
1063
|
-
# TDD: when test_first ran, tell build to make existing tests pass
|
|
1064
|
-
if [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
|
|
1065
|
-
enriched_goal="${enriched_goal}
|
|
1066
|
-
|
|
1067
|
-
IMPORTANT (TDD mode): Test files already exist and define the expected behavior. Write implementation code to make ALL tests pass. Do not delete or modify the test files."
|
|
1068
|
-
fi
|
|
1069
|
-
|
|
1070
|
-
# Inject memory context
|
|
1071
|
-
if [[ -n "$memory_context" ]]; then
|
|
1072
|
-
enriched_goal="${enriched_goal}
|
|
1073
|
-
|
|
1074
|
-
Historical context (lessons from previous pipelines):
|
|
1075
|
-
${memory_context}"
|
|
1076
|
-
fi
|
|
1077
|
-
|
|
1078
|
-
# Inject cross-pipeline discoveries for build stage
|
|
1079
|
-
if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
|
|
1080
|
-
local build_discoveries
|
|
1081
|
-
build_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "src/*,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
|
|
1082
|
-
if [[ -n "$build_discoveries" ]]; then
|
|
1083
|
-
enriched_goal="${enriched_goal}
|
|
1084
|
-
|
|
1085
|
-
Discoveries from other pipelines:
|
|
1086
|
-
${build_discoveries}"
|
|
1087
|
-
fi
|
|
1088
|
-
fi
|
|
1089
|
-
|
|
1090
|
-
# Add task list context
|
|
1091
|
-
if [[ -s "$TASKS_FILE" ]]; then
|
|
1092
|
-
enriched_goal="${enriched_goal}
|
|
1093
|
-
|
|
1094
|
-
Task tracking (check off items as you complete them):
|
|
1095
|
-
$(cat "$TASKS_FILE")"
|
|
1096
|
-
fi
|
|
1097
|
-
|
|
1098
|
-
# Inject file hotspots from GitHub intelligence
|
|
1099
|
-
if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_file_change_frequency >/dev/null 2>&1; then
|
|
1100
|
-
local build_hotspots
|
|
1101
|
-
build_hotspots=$(gh_file_change_frequency 2>/dev/null | head -5 || true)
|
|
1102
|
-
if [[ -n "$build_hotspots" ]]; then
|
|
1103
|
-
enriched_goal="${enriched_goal}
|
|
1104
|
-
|
|
1105
|
-
File hotspots (most frequently changed — review these carefully):
|
|
1106
|
-
${build_hotspots}"
|
|
1107
|
-
fi
|
|
1108
|
-
fi
|
|
1109
|
-
|
|
1110
|
-
# Inject security alerts context
|
|
1111
|
-
if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_security_alerts >/dev/null 2>&1; then
|
|
1112
|
-
local build_alerts
|
|
1113
|
-
build_alerts=$(gh_security_alerts 2>/dev/null | head -3 || true)
|
|
1114
|
-
if [[ -n "$build_alerts" ]]; then
|
|
1115
|
-
enriched_goal="${enriched_goal}
|
|
1116
|
-
|
|
1117
|
-
Active security alerts (do not introduce new vulnerabilities):
|
|
1118
|
-
${build_alerts}"
|
|
1119
|
-
fi
|
|
1120
|
-
fi
|
|
1121
|
-
|
|
1122
|
-
# Inject coverage baseline
|
|
1123
|
-
local repo_hash_build
|
|
1124
|
-
repo_hash_build=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
1125
|
-
local coverage_file_build="${HOME}/.shipwright/baselines/${repo_hash_build}/coverage.json"
|
|
1126
|
-
if [[ -f "$coverage_file_build" ]]; then
|
|
1127
|
-
local coverage_baseline
|
|
1128
|
-
coverage_baseline=$(jq -r '.coverage_percent // empty' "$coverage_file_build" 2>/dev/null || true)
|
|
1129
|
-
if [[ -n "$coverage_baseline" ]]; then
|
|
1130
|
-
enriched_goal="${enriched_goal}
|
|
1131
|
-
|
|
1132
|
-
Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
|
|
1133
|
-
fi
|
|
1134
|
-
fi
|
|
1135
|
-
|
|
1136
|
-
# Predictive: inject prevention hints when risk/memory patterns suggest build-stage failures
|
|
1137
|
-
if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
|
|
1138
|
-
local issue_json_build="{}"
|
|
1139
|
-
[[ -n "${ISSUE_NUMBER:-}" ]] && issue_json_build=$(jq -n --arg title "${GOAL:-}" --arg num "${ISSUE_NUMBER:-}" '{title: $title, number: $num}')
|
|
1140
|
-
local prevention_text
|
|
1141
|
-
prevention_text=$(bash "$SCRIPT_DIR/sw-predictive.sh" inject-prevention "build" "$issue_json_build" 2>/dev/null || true)
|
|
1142
|
-
if [[ -n "$prevention_text" ]]; then
|
|
1143
|
-
enriched_goal="${enriched_goal}
|
|
1144
|
-
|
|
1145
|
-
${prevention_text}"
|
|
1146
|
-
fi
|
|
1147
|
-
fi
|
|
1148
|
-
|
|
1149
|
-
loop_args+=("$enriched_goal")
|
|
1150
|
-
|
|
1151
|
-
# Build loop args from pipeline config + CLI overrides
|
|
1152
|
-
CURRENT_STAGE_ID="build"
|
|
189
|
+
# ─── Load domain-specific stage modules ───────────────────────────────────────
|
|
1153
190
|
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
[[ "$test_cmd" == "null" ]] && test_cmd=""
|
|
1158
|
-
fi
|
|
1159
|
-
# Auto-detect if still empty
|
|
1160
|
-
if [[ -z "$test_cmd" ]]; then
|
|
1161
|
-
test_cmd=$(detect_test_cmd)
|
|
1162
|
-
fi
|
|
1163
|
-
|
|
1164
|
-
local max_iter
|
|
1165
|
-
max_iter=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.max_iterations) // 20' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1166
|
-
[[ -z "$max_iter" || "$max_iter" == "null" ]] && max_iter=20
|
|
1167
|
-
# CLI --max-iterations override (from CI strategy engine)
|
|
1168
|
-
[[ -n "${MAX_ITERATIONS_OVERRIDE:-}" ]] && max_iter="$MAX_ITERATIONS_OVERRIDE"
|
|
1169
|
-
|
|
1170
|
-
local agents="${AGENTS}"
|
|
1171
|
-
if [[ -z "$agents" ]]; then
|
|
1172
|
-
agents=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.agents) // .defaults.agents // 1' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1173
|
-
[[ -z "$agents" || "$agents" == "null" ]] && agents=1
|
|
1174
|
-
fi
|
|
1175
|
-
|
|
1176
|
-
# Intelligence: suggest parallelism if design indicates independent work
|
|
1177
|
-
if [[ "${agents:-1}" -le 1 ]] && [[ -s "$ARTIFACTS_DIR/design.md" ]]; then
|
|
1178
|
-
local design_lower
|
|
1179
|
-
design_lower=$(tr '[:upper:]' '[:lower:]' < "$ARTIFACTS_DIR/design.md" 2>/dev/null || true)
|
|
1180
|
-
if echo "$design_lower" | grep -qE 'independent (files|modules|components|services)|separate (modules|packages|directories)|parallel|no shared state'; then
|
|
1181
|
-
info "Design mentions independent modules — consider --agents 2 for parallelism"
|
|
1182
|
-
emit_event "build.parallelism_suggested" "issue=${ISSUE_NUMBER:-0}" "current_agents=$agents"
|
|
1183
|
-
fi
|
|
1184
|
-
fi
|
|
191
|
+
# Load scope enforcement module for planned vs actual file tracking
|
|
192
|
+
_SCOPE_ENFORCEMENT_SH="${SCRIPT_DIR}/lib/scope-enforcement.sh"
|
|
193
|
+
[[ -f "$_SCOPE_ENFORCEMENT_SH" ]] && source "$_SCOPE_ENFORCEMENT_SH"
|
|
1185
194
|
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
local quality
|
|
1189
|
-
quality=$(jq -r --arg id "build" '(.stages[] | select(.id == $id) | .config.quality_gates) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
195
|
+
_PIPELINE_STAGES_INTAKE_SH="${SCRIPT_DIR}/lib/pipeline-stages-intake.sh"
|
|
196
|
+
[[ -f "$_PIPELINE_STAGES_INTAKE_SH" ]] && source "$_PIPELINE_STAGES_INTAKE_SH"
|
|
1190
197
|
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
build_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1194
|
-
[[ -z "$build_model" || "$build_model" == "null" ]] && build_model="opus"
|
|
1195
|
-
fi
|
|
1196
|
-
# Intelligence model routing (when no explicit CLI --model override)
|
|
1197
|
-
if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
|
|
1198
|
-
build_model="$CLAUDE_MODEL"
|
|
1199
|
-
fi
|
|
1200
|
-
|
|
1201
|
-
# Recruit-powered model selection (when no explicit override)
|
|
1202
|
-
if [[ -z "$MODEL" ]] && [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
|
|
1203
|
-
local _recruit_goal="${GOAL:-}"
|
|
1204
|
-
if [[ -n "$_recruit_goal" ]]; then
|
|
1205
|
-
local _recruit_match
|
|
1206
|
-
_recruit_match=$(bash "$SCRIPT_DIR/sw-recruit.sh" match --json "$_recruit_goal" 2>/dev/null) || true
|
|
1207
|
-
if [[ -n "$_recruit_match" ]]; then
|
|
1208
|
-
local _recruit_model
|
|
1209
|
-
_recruit_model=$(echo "$_recruit_match" | jq -r '.model // ""' 2>/dev/null) || true
|
|
1210
|
-
if [[ -n "$_recruit_model" && "$_recruit_model" != "null" && "$_recruit_model" != "" ]]; then
|
|
1211
|
-
info "Recruit recommends model: ${CYAN}${_recruit_model}${RESET} for this task"
|
|
1212
|
-
build_model="$_recruit_model"
|
|
1213
|
-
fi
|
|
1214
|
-
fi
|
|
1215
|
-
fi
|
|
1216
|
-
fi
|
|
1217
|
-
|
|
1218
|
-
[[ -n "$test_cmd" && "$test_cmd" != "null" ]] && loop_args+=(--test-cmd "$test_cmd")
|
|
1219
|
-
loop_args+=(--max-iterations "$max_iter")
|
|
1220
|
-
loop_args+=(--model "$build_model")
|
|
1221
|
-
[[ "$agents" -gt 1 ]] 2>/dev/null && loop_args+=(--agents "$agents")
|
|
1222
|
-
|
|
1223
|
-
# Quality gates: always enabled in CI, otherwise from template config
|
|
1224
|
-
if [[ "${CI_MODE:-false}" == "true" ]]; then
|
|
1225
|
-
loop_args+=(--audit --audit-agent --quality-gates)
|
|
1226
|
-
else
|
|
1227
|
-
[[ "$audit" == "true" ]] && loop_args+=(--audit --audit-agent)
|
|
1228
|
-
[[ "$quality" == "true" ]] && loop_args+=(--quality-gates)
|
|
1229
|
-
fi
|
|
198
|
+
_PIPELINE_STAGES_BUILD_SH="${SCRIPT_DIR}/lib/pipeline-stages-build.sh"
|
|
199
|
+
[[ -f "$_PIPELINE_STAGES_BUILD_SH" ]] && source "$_PIPELINE_STAGES_BUILD_SH"
|
|
1230
200
|
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
# Fast test mode
|
|
1234
|
-
[[ -n "${FAST_TEST_CMD_OVERRIDE:-}" ]] && loop_args+=(--fast-test-cmd "$FAST_TEST_CMD_OVERRIDE")
|
|
1235
|
-
|
|
1236
|
-
# Definition of Done: use plan-extracted DoD if available
|
|
1237
|
-
[[ -s "$dod_file" ]] && loop_args+=(--definition-of-done "$dod_file")
|
|
1238
|
-
|
|
1239
|
-
# Checkpoint resume: when pipeline resumed from build-stage checkpoint, pass --resume to loop
|
|
1240
|
-
if [[ "${RESUME_FROM_CHECKPOINT:-false}" == "true" && "${checkpoint_stage:-}" == "build" ]]; then
|
|
1241
|
-
loop_args+=(--resume)
|
|
1242
|
-
fi
|
|
1243
|
-
|
|
1244
|
-
# Skip permissions — pipeline runs headlessly (claude -p) and has no terminal
|
|
1245
|
-
# for interactive permission prompts. Without this flag, agents can't write files.
|
|
1246
|
-
loop_args+=(--skip-permissions)
|
|
1247
|
-
|
|
1248
|
-
info "Starting build loop: ${DIM}shipwright loop${RESET} (max ${max_iter} iterations, ${agents} agent(s))"
|
|
1249
|
-
|
|
1250
|
-
# Post build start to GitHub
|
|
1251
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1252
|
-
gh_comment_issue "$ISSUE_NUMBER" "🔨 **Build started** — \`shipwright loop\` with ${max_iter} max iterations, ${agents} agent(s), model: ${build_model}"
|
|
1253
|
-
fi
|
|
1254
|
-
|
|
1255
|
-
local _token_log="${ARTIFACTS_DIR}/.claude-tokens-build.log"
|
|
1256
|
-
export PIPELINE_JOB_ID="${PIPELINE_NAME:-pipeline-$$}"
|
|
1257
|
-
sw loop "${loop_args[@]}" < /dev/null 2>"$_token_log" || {
|
|
1258
|
-
local _loop_exit=$?
|
|
1259
|
-
parse_claude_tokens "$_token_log"
|
|
1260
|
-
|
|
1261
|
-
# Detect context exhaustion from progress file
|
|
1262
|
-
local _progress_file="${PWD}/.claude/loop-logs/progress.md"
|
|
1263
|
-
if [[ -f "$_progress_file" ]]; then
|
|
1264
|
-
local _prog_tests
|
|
1265
|
-
_prog_tests=$(grep -oE 'Tests passing: (true|false)' "$_progress_file" 2>/dev/null | awk '{print $NF}' || echo "unknown")
|
|
1266
|
-
if [[ "$_prog_tests" != "true" ]]; then
|
|
1267
|
-
warn "Build loop exhausted with failing tests (context exhaustion)"
|
|
1268
|
-
emit_event "pipeline.context_exhaustion" "issue=${ISSUE_NUMBER:-0}" "stage=build"
|
|
1269
|
-
# Write flag for daemon retry logic
|
|
1270
|
-
mkdir -p "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
1271
|
-
echo "context_exhaustion" > "$ARTIFACTS_DIR/failure-reason.txt" 2>/dev/null || true
|
|
1272
|
-
fi
|
|
1273
|
-
fi
|
|
1274
|
-
|
|
1275
|
-
error "Build loop failed"
|
|
1276
|
-
return 1
|
|
1277
|
-
}
|
|
1278
|
-
parse_claude_tokens "$_token_log"
|
|
1279
|
-
|
|
1280
|
-
# Read accumulated token counts from build loop (written by sw-loop.sh)
|
|
1281
|
-
local _loop_token_file="${PROJECT_ROOT}/.claude/loop-logs/loop-tokens.json"
|
|
1282
|
-
if [[ -f "$_loop_token_file" ]] && command -v jq >/dev/null 2>&1; then
|
|
1283
|
-
local _loop_in _loop_out _loop_cost
|
|
1284
|
-
_loop_in=$(jq -r '.input_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
|
|
1285
|
-
_loop_out=$(jq -r '.output_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
|
|
1286
|
-
_loop_cost=$(jq -r '.cost_usd // 0' "$_loop_token_file" 2>/dev/null || echo "0")
|
|
1287
|
-
TOTAL_INPUT_TOKENS=$(( TOTAL_INPUT_TOKENS + ${_loop_in:-0} ))
|
|
1288
|
-
TOTAL_OUTPUT_TOKENS=$(( TOTAL_OUTPUT_TOKENS + ${_loop_out:-0} ))
|
|
1289
|
-
if [[ -n "$_loop_cost" && "$_loop_cost" != "0" && "$_loop_cost" != "null" ]]; then
|
|
1290
|
-
TOTAL_COST_USD="${_loop_cost}"
|
|
1291
|
-
fi
|
|
1292
|
-
if [[ "${_loop_in:-0}" -gt 0 || "${_loop_out:-0}" -gt 0 ]]; then
|
|
1293
|
-
info "Build loop tokens: in=${_loop_in} out=${_loop_out} cost=\$${_loop_cost:-0}"
|
|
1294
|
-
fi
|
|
1295
|
-
fi
|
|
1296
|
-
|
|
1297
|
-
# Count commits made during build
|
|
1298
|
-
local commit_count
|
|
1299
|
-
commit_count=$(_safe_base_log --oneline | wc -l | xargs)
|
|
1300
|
-
info "Build produced ${BOLD}$commit_count${RESET} commit(s)"
|
|
1301
|
-
|
|
1302
|
-
# Commit quality evaluation when intelligence is enabled
|
|
1303
|
-
if type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1 && [[ "${commit_count:-0}" -gt 0 ]]; then
|
|
1304
|
-
local commit_msgs
|
|
1305
|
-
commit_msgs=$(_safe_base_log --format="%s" | head -20)
|
|
1306
|
-
local quality_score
|
|
1307
|
-
quality_score=$(claude --print --output-format text -p "Rate the quality of these git commit messages on a scale of 0-100. Consider: focus (one thing per commit), clarity (describes the why), atomicity (small logical units). Reply with ONLY a number 0-100.
|
|
1308
|
-
|
|
1309
|
-
Commit messages:
|
|
1310
|
-
${commit_msgs}" --model haiku < /dev/null 2>/dev/null || true)
|
|
1311
|
-
quality_score=$(echo "$quality_score" | grep -oE '^[0-9]+' | head -1 || true)
|
|
1312
|
-
if [[ -n "$quality_score" ]]; then
|
|
1313
|
-
emit_event "build.commit_quality" \
|
|
1314
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
1315
|
-
"score=$quality_score" \
|
|
1316
|
-
"commit_count=$commit_count"
|
|
1317
|
-
if [[ "$quality_score" -lt 40 ]] 2>/dev/null; then
|
|
1318
|
-
warn "Commit message quality low (score: ${quality_score}/100)"
|
|
1319
|
-
else
|
|
1320
|
-
info "Commit quality score: ${quality_score}/100"
|
|
1321
|
-
fi
|
|
1322
|
-
fi
|
|
1323
|
-
fi
|
|
1324
|
-
|
|
1325
|
-
log_stage "build" "Build loop completed ($commit_count commits)"
|
|
1326
|
-
}
|
|
1327
|
-
|
|
1328
|
-
stage_test() {
|
|
1329
|
-
CURRENT_STAGE_ID="test"
|
|
1330
|
-
local test_cmd="${TEST_CMD}"
|
|
1331
|
-
if [[ -z "$test_cmd" ]]; then
|
|
1332
|
-
test_cmd=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.test_cmd) // .defaults.test_cmd // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1333
|
-
[[ -z "$test_cmd" || "$test_cmd" == "null" ]] && test_cmd=""
|
|
1334
|
-
fi
|
|
1335
|
-
# Auto-detect
|
|
1336
|
-
if [[ -z "$test_cmd" ]]; then
|
|
1337
|
-
test_cmd=$(detect_test_cmd)
|
|
1338
|
-
fi
|
|
1339
|
-
if [[ -z "$test_cmd" ]]; then
|
|
1340
|
-
warn "No test command found — skipping test stage"
|
|
1341
|
-
return 0
|
|
1342
|
-
fi
|
|
1343
|
-
|
|
1344
|
-
local coverage_min
|
|
1345
|
-
coverage_min=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.coverage_min) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1346
|
-
[[ -z "$coverage_min" || "$coverage_min" == "null" ]] && coverage_min=0
|
|
1347
|
-
|
|
1348
|
-
local test_log="$ARTIFACTS_DIR/test-results.log"
|
|
1349
|
-
|
|
1350
|
-
info "Running tests: ${DIM}$test_cmd${RESET}"
|
|
1351
|
-
local test_exit=0
|
|
1352
|
-
bash -c "$test_cmd" > "$test_log" 2>&1 || test_exit=$?
|
|
1353
|
-
|
|
1354
|
-
if [[ "$test_exit" -eq 0 ]]; then
|
|
1355
|
-
success "Tests passed"
|
|
1356
|
-
else
|
|
1357
|
-
error "Tests failed (exit code: $test_exit)"
|
|
1358
|
-
# Extract most relevant error section (assertion failures, stack traces)
|
|
1359
|
-
local relevant_output=""
|
|
1360
|
-
relevant_output=$(grep -A5 -E 'FAIL|AssertionError|Expected.*but.*got|Error:|panic:|assert' "$test_log" 2>/dev/null | tail -40 || true)
|
|
1361
|
-
if [[ -z "$relevant_output" ]]; then
|
|
1362
|
-
relevant_output=$(tail -40 "$test_log")
|
|
1363
|
-
fi
|
|
1364
|
-
echo "$relevant_output"
|
|
1365
|
-
|
|
1366
|
-
# Post failure to GitHub with more context
|
|
1367
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1368
|
-
local log_lines
|
|
1369
|
-
log_lines=$(wc -l < "$test_log" 2>/dev/null || true)
|
|
1370
|
-
log_lines="${log_lines:-0}"
|
|
1371
|
-
local log_excerpt
|
|
1372
|
-
if [[ "$log_lines" -lt 60 ]]; then
|
|
1373
|
-
log_excerpt="$(cat "$test_log" 2>/dev/null || true)"
|
|
1374
|
-
else
|
|
1375
|
-
log_excerpt="$(head -20 "$test_log" 2>/dev/null || true)
|
|
1376
|
-
... (${log_lines} lines total, showing head + tail) ...
|
|
1377
|
-
$(tail -30 "$test_log" 2>/dev/null || true)"
|
|
1378
|
-
fi
|
|
1379
|
-
gh_comment_issue "$ISSUE_NUMBER" "❌ **Tests failed** (exit code: $test_exit, ${log_lines} lines)
|
|
1380
|
-
\`\`\`
|
|
1381
|
-
${log_excerpt}
|
|
1382
|
-
\`\`\`"
|
|
1383
|
-
fi
|
|
1384
|
-
return 1
|
|
1385
|
-
fi
|
|
1386
|
-
|
|
1387
|
-
# Coverage check — only enforce when coverage data is actually detected
|
|
1388
|
-
local coverage=""
|
|
1389
|
-
if [[ "$coverage_min" -gt 0 ]] 2>/dev/null; then
|
|
1390
|
-
coverage=$(parse_coverage_from_output "$test_log")
|
|
1391
|
-
if [[ -z "$coverage" ]]; then
|
|
1392
|
-
# No coverage data found — skip enforcement (project may not have coverage tooling)
|
|
1393
|
-
info "No coverage data detected — skipping coverage check (min: ${coverage_min}%)"
|
|
1394
|
-
elif awk -v cov="$coverage" -v min="$coverage_min" 'BEGIN{exit !(cov < min)}' 2>/dev/null; then
|
|
1395
|
-
warn "Coverage ${coverage}% below minimum ${coverage_min}%"
|
|
1396
|
-
return 1
|
|
1397
|
-
else
|
|
1398
|
-
info "Coverage: ${coverage}% (min: ${coverage_min}%)"
|
|
1399
|
-
fi
|
|
1400
|
-
fi
|
|
1401
|
-
|
|
1402
|
-
# Emit test.completed with coverage for adaptive learning
|
|
1403
|
-
if [[ -n "$coverage" ]]; then
|
|
1404
|
-
emit_event "test.completed" \
|
|
1405
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
1406
|
-
"stage=test" \
|
|
1407
|
-
"coverage=$coverage"
|
|
1408
|
-
fi
|
|
1409
|
-
|
|
1410
|
-
# Post test results to GitHub
|
|
1411
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1412
|
-
local test_summary
|
|
1413
|
-
test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
|
|
1414
|
-
local cov_line=""
|
|
1415
|
-
[[ -n "$coverage" ]] && cov_line="
|
|
1416
|
-
**Coverage:** ${coverage}%"
|
|
1417
|
-
gh_comment_issue "$ISSUE_NUMBER" "✅ **Tests passed**${cov_line}
|
|
1418
|
-
<details>
|
|
1419
|
-
<summary>Test output</summary>
|
|
1420
|
-
|
|
1421
|
-
\`\`\`
|
|
1422
|
-
${test_summary}
|
|
1423
|
-
\`\`\`
|
|
1424
|
-
</details>"
|
|
1425
|
-
fi
|
|
1426
|
-
|
|
1427
|
-
# Write coverage summary for pre-deploy gate
|
|
1428
|
-
local _cov_pct=0
|
|
1429
|
-
if [[ -f "$ARTIFACTS_DIR/test-results.log" ]]; then
|
|
1430
|
-
_cov_pct=$(grep -oE '[0-9]+%' "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -1 | tr -d '%' || true)
|
|
1431
|
-
_cov_pct="${_cov_pct:-0}"
|
|
1432
|
-
fi
|
|
1433
|
-
local _cov_tmp
|
|
1434
|
-
_cov_tmp=$(mktemp "${ARTIFACTS_DIR}/test-coverage.json.tmp.XXXXXX")
|
|
1435
|
-
printf '{"coverage_pct":%d}' "${_cov_pct:-0}" > "$_cov_tmp" && mv "$_cov_tmp" "$ARTIFACTS_DIR/test-coverage.json" || rm -f "$_cov_tmp"
|
|
1436
|
-
|
|
1437
|
-
log_stage "test" "Tests passed${coverage:+ (coverage: ${coverage}%)}"
|
|
1438
|
-
}
|
|
1439
|
-
|
|
1440
|
-
stage_review() {
|
|
1441
|
-
CURRENT_STAGE_ID="review"
|
|
1442
|
-
local diff_file="$ARTIFACTS_DIR/review-diff.patch"
|
|
1443
|
-
local review_file="$ARTIFACTS_DIR/review.md"
|
|
1444
|
-
|
|
1445
|
-
_safe_base_diff > "$diff_file" 2>/dev/null || true
|
|
1446
|
-
|
|
1447
|
-
if [[ ! -s "$diff_file" ]]; then
|
|
1448
|
-
warn "No diff found — skipping review"
|
|
1449
|
-
return 0
|
|
1450
|
-
fi
|
|
1451
|
-
|
|
1452
|
-
if ! command -v claude >/dev/null 2>&1; then
|
|
1453
|
-
warn "Claude CLI not found — skipping AI review"
|
|
1454
|
-
return 0
|
|
1455
|
-
fi
|
|
1456
|
-
|
|
1457
|
-
local diff_stats
|
|
1458
|
-
diff_stats=$(_safe_base_diff --stat | tail -1 || echo "")
|
|
1459
|
-
info "Running AI code review... ${DIM}($diff_stats)${RESET}"
|
|
1460
|
-
|
|
1461
|
-
# Semantic risk scoring when intelligence is enabled
|
|
1462
|
-
if type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1; then
|
|
1463
|
-
local diff_files
|
|
1464
|
-
diff_files=$(_safe_base_diff --name-only || true)
|
|
1465
|
-
local risk_score="low"
|
|
1466
|
-
# Fast heuristic: flag high-risk file patterns
|
|
1467
|
-
if echo "$diff_files" | grep -qiE 'migration|schema|auth|crypto|security|password|token|secret|\.env'; then
|
|
1468
|
-
risk_score="high"
|
|
1469
|
-
elif echo "$diff_files" | grep -qiE 'api|route|controller|middleware|hook'; then
|
|
1470
|
-
risk_score="medium"
|
|
1471
|
-
fi
|
|
1472
|
-
emit_event "review.risk_assessed" \
|
|
1473
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
1474
|
-
"risk=$risk_score" \
|
|
1475
|
-
"files_changed=$(echo "$diff_files" | wc -l | xargs)"
|
|
1476
|
-
if [[ "$risk_score" == "high" ]]; then
|
|
1477
|
-
warn "High-risk changes detected (DB schema, auth, crypto, or secrets)"
|
|
1478
|
-
fi
|
|
1479
|
-
fi
|
|
1480
|
-
|
|
1481
|
-
local review_model="${MODEL:-opus}"
|
|
1482
|
-
# Intelligence model routing (when no explicit CLI --model override)
|
|
1483
|
-
if [[ -z "$MODEL" && -n "${CLAUDE_MODEL:-}" ]]; then
|
|
1484
|
-
review_model="$CLAUDE_MODEL"
|
|
1485
|
-
fi
|
|
1486
|
-
|
|
1487
|
-
# Build review prompt with project context
|
|
1488
|
-
local review_prompt="You are a senior code reviewer. Review this git diff thoroughly.
|
|
1489
|
-
|
|
1490
|
-
For each issue found, use this format:
|
|
1491
|
-
- **[SEVERITY]** file:line — description
|
|
1492
|
-
|
|
1493
|
-
Severity levels: Critical, Bug, Security, Warning, Suggestion
|
|
1494
|
-
|
|
1495
|
-
Focus on:
|
|
1496
|
-
1. Logic bugs and edge cases
|
|
1497
|
-
2. Security vulnerabilities (injection, XSS, auth bypass, etc.)
|
|
1498
|
-
3. Error handling gaps
|
|
1499
|
-
4. Performance issues
|
|
1500
|
-
5. Missing validation
|
|
1501
|
-
6. Project convention violations (see conventions below)
|
|
1502
|
-
|
|
1503
|
-
Be specific. Reference exact file paths and line numbers. Only flag genuine issues.
|
|
1504
|
-
If no issues are found, write: \"Review clean — no issues found.\"
|
|
1505
|
-
"
|
|
1506
|
-
|
|
1507
|
-
# Inject previous review findings and anti-patterns from memory
|
|
1508
|
-
if type intelligence_search_memory >/dev/null 2>&1; then
|
|
1509
|
-
local review_memory
|
|
1510
|
-
review_memory=$(intelligence_search_memory "code review findings anti-patterns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
|
|
1511
|
-
review_memory=$(prune_context_section "memory" "$review_memory" 10000)
|
|
1512
|
-
if [[ -n "$review_memory" ]]; then
|
|
1513
|
-
review_prompt+="
|
|
1514
|
-
## Known Issues from Previous Reviews
|
|
1515
|
-
These anti-patterns and issues have been found in past reviews of this codebase. Flag them if they recur:
|
|
1516
|
-
${review_memory}
|
|
1517
|
-
"
|
|
1518
|
-
fi
|
|
1519
|
-
fi
|
|
1520
|
-
|
|
1521
|
-
# Inject project conventions if CLAUDE.md exists
|
|
1522
|
-
local claudemd="$PROJECT_ROOT/.claude/CLAUDE.md"
|
|
1523
|
-
if [[ -f "$claudemd" ]]; then
|
|
1524
|
-
local conventions
|
|
1525
|
-
conventions=$(grep -A2 'Common Pitfalls\|Shell Standards\|Bash 3.2' "$claudemd" 2>/dev/null | head -20 || true)
|
|
1526
|
-
if [[ -n "$conventions" ]]; then
|
|
1527
|
-
review_prompt+="
|
|
1528
|
-
## Project Conventions
|
|
1529
|
-
${conventions}
|
|
1530
|
-
"
|
|
1531
|
-
fi
|
|
1532
|
-
fi
|
|
1533
|
-
|
|
1534
|
-
# Inject CODEOWNERS focus areas for review
|
|
1535
|
-
if [[ "${NO_GITHUB:-}" != "true" ]] && type gh_codeowners >/dev/null 2>&1; then
|
|
1536
|
-
local review_owners
|
|
1537
|
-
review_owners=$(gh_codeowners 2>/dev/null | head -10 || true)
|
|
1538
|
-
if [[ -n "$review_owners" ]]; then
|
|
1539
|
-
review_prompt+="
|
|
1540
|
-
## Code Owners (focus areas)
|
|
1541
|
-
${review_owners}
|
|
1542
|
-
"
|
|
1543
|
-
fi
|
|
1544
|
-
fi
|
|
1545
|
-
|
|
1546
|
-
# Inject Definition of Done if present
|
|
1547
|
-
local dod_file="$PROJECT_ROOT/.claude/DEFINITION-OF-DONE.md"
|
|
1548
|
-
if [[ -f "$dod_file" ]]; then
|
|
1549
|
-
review_prompt+="
|
|
1550
|
-
## Definition of Done (verify these)
|
|
1551
|
-
$(cat "$dod_file")
|
|
1552
|
-
"
|
|
1553
|
-
fi
|
|
1554
|
-
|
|
1555
|
-
review_prompt+="
|
|
1556
|
-
## Diff to Review
|
|
1557
|
-
$(cat "$diff_file")"
|
|
1558
|
-
|
|
1559
|
-
# Guard total prompt size
|
|
1560
|
-
review_prompt=$(guard_prompt_size "review" "$review_prompt")
|
|
1561
|
-
|
|
1562
|
-
# Skip permissions — pipeline runs headlessly (claude -p) and has no terminal
|
|
1563
|
-
# for interactive permission prompts. Same rationale as build stage (line ~1083).
|
|
1564
|
-
local review_args=(--print --model "$review_model" --max-turns 25 --dangerously-skip-permissions)
|
|
1565
|
-
|
|
1566
|
-
claude "${review_args[@]}" "$review_prompt" < /dev/null > "$review_file" 2>"${ARTIFACTS_DIR}/.claude-tokens-review.log" || true
|
|
1567
|
-
parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-review.log"
|
|
1568
|
-
|
|
1569
|
-
if [[ ! -s "$review_file" ]]; then
|
|
1570
|
-
warn "Review produced no output — check ${ARTIFACTS_DIR}/.claude-tokens-review.log for errors"
|
|
1571
|
-
return 0
|
|
1572
|
-
fi
|
|
1573
|
-
|
|
1574
|
-
# Extract severity counts — try JSON structure first, then grep fallback
|
|
1575
|
-
local critical_count=0 bug_count=0 warning_count=0
|
|
1576
|
-
|
|
1577
|
-
# Check if review output is structured JSON (e.g. from structured review tools)
|
|
1578
|
-
local json_parsed=false
|
|
1579
|
-
if head -1 "$review_file" 2>/dev/null | grep -q '^{' 2>/dev/null; then
|
|
1580
|
-
local j_critical j_bug j_warning
|
|
1581
|
-
j_critical=$(jq -r '.issues | map(select(.severity == "Critical")) | length' "$review_file" 2>/dev/null || echo "")
|
|
1582
|
-
if [[ -n "$j_critical" && "$j_critical" != "null" ]]; then
|
|
1583
|
-
critical_count="$j_critical"
|
|
1584
|
-
bug_count=$(jq -r '.issues | map(select(.severity == "Bug" or .severity == "Security")) | length' "$review_file" 2>/dev/null || echo "0")
|
|
1585
|
-
warning_count=$(jq -r '.issues | map(select(.severity == "Warning" or .severity == "Suggestion")) | length' "$review_file" 2>/dev/null || echo "0")
|
|
1586
|
-
json_parsed=true
|
|
1587
|
-
fi
|
|
1588
|
-
fi
|
|
1589
|
-
|
|
1590
|
-
# Grep fallback for markdown-formatted review output
|
|
1591
|
-
if [[ "$json_parsed" != "true" ]]; then
|
|
1592
|
-
critical_count=$(grep -ciE '\*\*\[?Critical\]?\*\*' "$review_file" 2>/dev/null || true)
|
|
1593
|
-
critical_count="${critical_count:-0}"
|
|
1594
|
-
bug_count=$(grep -ciE '\*\*\[?(Bug|Security)\]?\*\*' "$review_file" 2>/dev/null || true)
|
|
1595
|
-
bug_count="${bug_count:-0}"
|
|
1596
|
-
warning_count=$(grep -ciE '\*\*\[?(Warning|Suggestion)\]?\*\*' "$review_file" 2>/dev/null || true)
|
|
1597
|
-
warning_count="${warning_count:-0}"
|
|
1598
|
-
fi
|
|
1599
|
-
local total_issues=$((critical_count + bug_count + warning_count))
|
|
1600
|
-
|
|
1601
|
-
if [[ "$critical_count" -gt 0 ]]; then
|
|
1602
|
-
error "Review found ${BOLD}$critical_count critical${RESET} issue(s) — see $review_file"
|
|
1603
|
-
elif [[ "$bug_count" -gt 0 ]]; then
|
|
1604
|
-
warn "Review found $bug_count bug/security issue(s) — see ${DIM}$review_file${RESET}"
|
|
1605
|
-
elif [[ "$total_issues" -gt 0 ]]; then
|
|
1606
|
-
info "Review found $total_issues suggestion(s)"
|
|
1607
|
-
else
|
|
1608
|
-
success "Review clean"
|
|
1609
|
-
fi
|
|
1610
|
-
|
|
1611
|
-
# ── Oversight gate: pipeline review/quality stages block on verdict ──
|
|
1612
|
-
if [[ -x "$SCRIPT_DIR/sw-oversight.sh" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
|
|
1613
|
-
local reject_reason=""
|
|
1614
|
-
local _sec_count
|
|
1615
|
-
_sec_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
|
|
1616
|
-
_sec_count="${_sec_count:-0}"
|
|
1617
|
-
local _blocking=$((critical_count + _sec_count))
|
|
1618
|
-
[[ "$_blocking" -gt 0 ]] && reject_reason="Review found ${_blocking} critical/security issue(s)"
|
|
1619
|
-
if ! bash "$SCRIPT_DIR/sw-oversight.sh" gate --diff "$diff_file" --description "${GOAL:-Pipeline review}" --reject-if "$reject_reason" >/dev/null 2>&1; then
|
|
1620
|
-
error "Oversight gate rejected — blocking pipeline"
|
|
1621
|
-
emit_event "review.oversight_blocked" "issue=${ISSUE_NUMBER:-0}"
|
|
1622
|
-
log_stage "review" "BLOCKED: oversight gate rejected"
|
|
1623
|
-
return 1
|
|
1624
|
-
fi
|
|
1625
|
-
fi
|
|
1626
|
-
|
|
1627
|
-
# ── Review Blocking Gate ──
|
|
1628
|
-
# Block pipeline on critical/security issues unless compound_quality handles them
|
|
1629
|
-
local security_count
|
|
1630
|
-
security_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
|
|
1631
|
-
security_count="${security_count:-0}"
|
|
1632
|
-
|
|
1633
|
-
local blocking_issues=$((critical_count + security_count))
|
|
1634
|
-
|
|
1635
|
-
if [[ "$blocking_issues" -gt 0 ]]; then
|
|
1636
|
-
# Check if compound_quality stage is enabled — if so, let it handle issues
|
|
1637
|
-
local compound_enabled="false"
|
|
1638
|
-
if [[ -n "${PIPELINE_CONFIG:-}" && -f "${PIPELINE_CONFIG:-/dev/null}" ]]; then
|
|
1639
|
-
compound_enabled=$(jq -r '.stages[] | select(.id == "compound_quality") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1640
|
-
[[ -z "$compound_enabled" || "$compound_enabled" == "null" ]] && compound_enabled="false"
|
|
1641
|
-
fi
|
|
1642
|
-
|
|
1643
|
-
# Check if this is a fast template (don't block fast pipelines)
|
|
1644
|
-
local is_fast="false"
|
|
1645
|
-
if [[ "${PIPELINE_NAME:-}" == "fast" || "${PIPELINE_NAME:-}" == "hotfix" ]]; then
|
|
1646
|
-
is_fast="true"
|
|
1647
|
-
fi
|
|
1648
|
-
|
|
1649
|
-
if [[ "$compound_enabled" == "true" ]]; then
|
|
1650
|
-
info "Review found ${blocking_issues} critical/security issue(s) — compound_quality stage will handle"
|
|
1651
|
-
elif [[ "$is_fast" == "true" ]]; then
|
|
1652
|
-
warn "Review found ${blocking_issues} critical/security issue(s) — fast template, not blocking"
|
|
1653
|
-
elif [[ "${SKIP_GATES:-false}" == "true" ]]; then
|
|
1654
|
-
warn "Review found ${blocking_issues} critical/security issue(s) — skip-gates mode, not blocking"
|
|
1655
|
-
else
|
|
1656
|
-
error "Review found ${BOLD}${blocking_issues} critical/security issue(s)${RESET} — blocking pipeline"
|
|
1657
|
-
emit_event "review.blocked" \
|
|
1658
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
1659
|
-
"critical=${critical_count}" \
|
|
1660
|
-
"security=${security_count}"
|
|
1661
|
-
|
|
1662
|
-
# Save blocking issues for self-healing context
|
|
1663
|
-
grep -iE '\*\*\[?(Critical|Security)\]?\*\*' "$review_file" > "$ARTIFACTS_DIR/review-blockers.md" 2>/dev/null || true
|
|
1664
|
-
|
|
1665
|
-
# Post review to GitHub before failing
|
|
1666
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1667
|
-
local review_summary
|
|
1668
|
-
review_summary=$(head -40 "$review_file")
|
|
1669
|
-
gh_comment_issue "$ISSUE_NUMBER" "## 🔍 Code Review — ❌ Blocked
|
|
1670
|
-
|
|
1671
|
-
**Stats:** $diff_stats
|
|
1672
|
-
**Blocking issues:** ${blocking_issues} (${critical_count} critical, ${security_count} security)
|
|
1673
|
-
|
|
1674
|
-
<details>
|
|
1675
|
-
<summary>Review details</summary>
|
|
1676
|
-
|
|
1677
|
-
${review_summary}
|
|
1678
|
-
|
|
1679
|
-
</details>
|
|
1680
|
-
|
|
1681
|
-
_Pipeline will attempt self-healing rebuild._"
|
|
1682
|
-
fi
|
|
1683
|
-
|
|
1684
|
-
log_stage "review" "BLOCKED: $blocking_issues critical/security issues found"
|
|
1685
|
-
return 1
|
|
1686
|
-
fi
|
|
1687
|
-
fi
|
|
1688
|
-
|
|
1689
|
-
# Post review to GitHub issue
|
|
1690
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1691
|
-
local review_summary
|
|
1692
|
-
review_summary=$(head -40 "$review_file")
|
|
1693
|
-
gh_comment_issue "$ISSUE_NUMBER" "## 🔍 Code Review
|
|
1694
|
-
|
|
1695
|
-
**Stats:** $diff_stats
|
|
1696
|
-
**Issues found:** $total_issues (${critical_count} critical, ${bug_count} bugs, ${warning_count} suggestions)
|
|
1697
|
-
|
|
1698
|
-
<details>
|
|
1699
|
-
<summary>Review details</summary>
|
|
1700
|
-
|
|
1701
|
-
${review_summary}
|
|
1702
|
-
|
|
1703
|
-
</details>"
|
|
1704
|
-
fi
|
|
1705
|
-
|
|
1706
|
-
log_stage "review" "AI review complete ($total_issues issues: $critical_count critical, $bug_count bugs, $warning_count suggestions)"
|
|
1707
|
-
}
|
|
1708
|
-
|
|
1709
|
-
# ─── Compound Quality (fallback) ────────────────────────────────────────────
|
|
1710
|
-
# Basic implementation: adversarial review, negative testing, e2e checks, DoD audit.
|
|
1711
|
-
# If pipeline-intelligence.sh was sourced first, its enhanced version takes priority.
|
|
1712
|
-
if ! type stage_compound_quality >/dev/null 2>&1; then
|
|
1713
|
-
stage_compound_quality() {
|
|
1714
|
-
CURRENT_STAGE_ID="compound_quality"
|
|
1715
|
-
|
|
1716
|
-
# Read stage config from pipeline template
|
|
1717
|
-
local cfg
|
|
1718
|
-
cfg=$(jq -r '.stages[] | select(.id == "compound_quality") | .config // {}' "$PIPELINE_CONFIG" 2>/dev/null) || cfg="{}"
|
|
1719
|
-
|
|
1720
|
-
local do_adversarial do_negative do_e2e do_dod max_cycles blocking
|
|
1721
|
-
do_adversarial=$(echo "$cfg" | jq -r '.adversarial // false')
|
|
1722
|
-
do_negative=$(echo "$cfg" | jq -r '.negative // false')
|
|
1723
|
-
do_e2e=$(echo "$cfg" | jq -r '.e2e // false')
|
|
1724
|
-
do_dod=$(echo "$cfg" | jq -r '.dod_audit // false')
|
|
1725
|
-
max_cycles=$(echo "$cfg" | jq -r '.max_cycles // 1')
|
|
1726
|
-
blocking=$(echo "$cfg" | jq -r '.compound_quality_blocking // false')
|
|
1727
|
-
|
|
1728
|
-
local pass_count=0 fail_count=0 total=0
|
|
1729
|
-
local compound_log="$ARTIFACTS_DIR/compound-quality.log"
|
|
1730
|
-
: > "$compound_log"
|
|
1731
|
-
|
|
1732
|
-
# ── Adversarial review ──
|
|
1733
|
-
if [[ "$do_adversarial" == "true" ]]; then
|
|
1734
|
-
total=$((total + 1))
|
|
1735
|
-
info "Running adversarial review..."
|
|
1736
|
-
if [[ -x "$SCRIPT_DIR/sw-adversarial.sh" ]]; then
|
|
1737
|
-
if bash "$SCRIPT_DIR/sw-adversarial.sh" --repo "${REPO_DIR:-.}" >> "$compound_log" 2>&1; then
|
|
1738
|
-
pass_count=$((pass_count + 1))
|
|
1739
|
-
success "Adversarial review passed"
|
|
1740
|
-
else
|
|
1741
|
-
fail_count=$((fail_count + 1))
|
|
1742
|
-
warn "Adversarial review found issues"
|
|
1743
|
-
fi
|
|
1744
|
-
else
|
|
1745
|
-
warn "sw-adversarial.sh not found, skipping"
|
|
1746
|
-
fi
|
|
1747
|
-
fi
|
|
1748
|
-
|
|
1749
|
-
# ── Negative / edge-case testing ──
|
|
1750
|
-
if [[ "$do_negative" == "true" ]]; then
|
|
1751
|
-
total=$((total + 1))
|
|
1752
|
-
info "Running negative test pass..."
|
|
1753
|
-
if [[ -n "${TEST_CMD:-}" ]]; then
|
|
1754
|
-
if eval "$TEST_CMD" >> "$compound_log" 2>&1; then
|
|
1755
|
-
pass_count=$((pass_count + 1))
|
|
1756
|
-
success "Negative test pass passed"
|
|
1757
|
-
else
|
|
1758
|
-
fail_count=$((fail_count + 1))
|
|
1759
|
-
warn "Negative test pass found failures"
|
|
1760
|
-
fi
|
|
1761
|
-
else
|
|
1762
|
-
pass_count=$((pass_count + 1))
|
|
1763
|
-
info "No test command configured, skipping negative tests"
|
|
1764
|
-
fi
|
|
1765
|
-
fi
|
|
1766
|
-
|
|
1767
|
-
# ── E2E checks ──
|
|
1768
|
-
if [[ "$do_e2e" == "true" ]]; then
|
|
1769
|
-
total=$((total + 1))
|
|
1770
|
-
info "Running e2e checks..."
|
|
1771
|
-
if [[ -x "$SCRIPT_DIR/sw-e2e-orchestrator.sh" ]]; then
|
|
1772
|
-
if bash "$SCRIPT_DIR/sw-e2e-orchestrator.sh" run >> "$compound_log" 2>&1; then
|
|
1773
|
-
pass_count=$((pass_count + 1))
|
|
1774
|
-
success "E2E checks passed"
|
|
1775
|
-
else
|
|
1776
|
-
fail_count=$((fail_count + 1))
|
|
1777
|
-
warn "E2E checks found issues"
|
|
1778
|
-
fi
|
|
1779
|
-
else
|
|
1780
|
-
pass_count=$((pass_count + 1))
|
|
1781
|
-
info "sw-e2e-orchestrator.sh not found, skipping e2e"
|
|
1782
|
-
fi
|
|
1783
|
-
fi
|
|
1784
|
-
|
|
1785
|
-
# ── Definition of Done audit ──
|
|
1786
|
-
if [[ "$do_dod" == "true" ]]; then
|
|
1787
|
-
total=$((total + 1))
|
|
1788
|
-
info "Running definition-of-done audit..."
|
|
1789
|
-
if [[ -x "$SCRIPT_DIR/sw-quality.sh" ]]; then
|
|
1790
|
-
if bash "$SCRIPT_DIR/sw-quality.sh" validate >> "$compound_log" 2>&1; then
|
|
1791
|
-
pass_count=$((pass_count + 1))
|
|
1792
|
-
success "DoD audit passed"
|
|
1793
|
-
else
|
|
1794
|
-
fail_count=$((fail_count + 1))
|
|
1795
|
-
warn "DoD audit found gaps"
|
|
1796
|
-
fi
|
|
1797
|
-
else
|
|
1798
|
-
pass_count=$((pass_count + 1))
|
|
1799
|
-
info "sw-quality.sh not found, skipping DoD audit"
|
|
1800
|
-
fi
|
|
1801
|
-
fi
|
|
1802
|
-
|
|
1803
|
-
# ── Summary ──
|
|
1804
|
-
log_stage "compound_quality" "Compound quality: $pass_count/$total checks passed, $fail_count failed"
|
|
1805
|
-
|
|
1806
|
-
if [[ "$fail_count" -gt 0 && "$blocking" == "true" ]]; then
|
|
1807
|
-
error "Compound quality gate failed: $fail_count of $total checks failed"
|
|
1808
|
-
return 1
|
|
1809
|
-
fi
|
|
1810
|
-
|
|
1811
|
-
return 0
|
|
1812
|
-
}
|
|
1813
|
-
fi # end fallback stage_compound_quality
|
|
1814
|
-
|
|
1815
|
-
stage_pr() {
|
|
1816
|
-
CURRENT_STAGE_ID="pr"
|
|
1817
|
-
local plan_file="$ARTIFACTS_DIR/plan.md"
|
|
1818
|
-
local test_log="$ARTIFACTS_DIR/test-results.log"
|
|
1819
|
-
local review_file="$ARTIFACTS_DIR/review.md"
|
|
1820
|
-
|
|
1821
|
-
# ── Skip PR in local/no-github mode ──
|
|
1822
|
-
if [[ "${NO_GITHUB:-false}" == "true" || "${SHIPWRIGHT_LOCAL:-}" == "1" || "${LOCAL_MODE:-false}" == "true" ]]; then
|
|
1823
|
-
info "Skipping PR stage — running in local/no-github mode"
|
|
1824
|
-
# Save a PR draft locally for reference
|
|
1825
|
-
local branch_name
|
|
1826
|
-
branch_name=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown")
|
|
1827
|
-
local commit_count
|
|
1828
|
-
commit_count=$(_safe_base_log --oneline | wc -l | xargs)
|
|
1829
|
-
{
|
|
1830
|
-
echo "# PR Draft (local mode)"
|
|
1831
|
-
echo ""
|
|
1832
|
-
echo "**Branch:** ${branch_name}"
|
|
1833
|
-
echo "**Commits:** ${commit_count:-0}"
|
|
1834
|
-
echo "**Goal:** ${GOAL:-N/A}"
|
|
1835
|
-
echo ""
|
|
1836
|
-
echo "## Changes"
|
|
1837
|
-
_safe_base_diff --stat || true
|
|
1838
|
-
} > ".claude/pr-draft.md" 2>/dev/null || true
|
|
1839
|
-
emit_event "pr.skipped" "issue=${ISSUE_NUMBER:-0}" "reason=local_mode"
|
|
1840
|
-
return 0
|
|
1841
|
-
fi
|
|
1842
|
-
|
|
1843
|
-
# ── PR Hygiene Checks (informational) ──
|
|
1844
|
-
local hygiene_commit_count
|
|
1845
|
-
hygiene_commit_count=$(_safe_base_log --oneline | wc -l | xargs)
|
|
1846
|
-
hygiene_commit_count="${hygiene_commit_count:-0}"
|
|
1847
|
-
|
|
1848
|
-
if [[ "$hygiene_commit_count" -gt 20 ]]; then
|
|
1849
|
-
warn "PR has ${hygiene_commit_count} commits — consider squashing before merge"
|
|
1850
|
-
fi
|
|
1851
|
-
|
|
1852
|
-
# Check for WIP/fixup/squash commits (expanded patterns)
|
|
1853
|
-
local wip_commits
|
|
1854
|
-
wip_commits=$(_safe_base_log --oneline | grep -ciE '^[0-9a-f]+ (WIP|fixup!|squash!|TODO|HACK|TEMP|BROKEN|wip[:-]|temp[:-]|broken[:-]|do not merge)' || true)
|
|
1855
|
-
wip_commits="${wip_commits:-0}"
|
|
1856
|
-
if [[ "$wip_commits" -gt 0 ]]; then
|
|
1857
|
-
warn "Branch has ${wip_commits} WIP/fixup/squash/temp commit(s) — consider cleaning up"
|
|
1858
|
-
fi
|
|
1859
|
-
|
|
1860
|
-
# ── PR Quality Gate: reject PRs with no real code changes ──
|
|
1861
|
-
local real_files
|
|
1862
|
-
real_files=$(_safe_base_diff --name-only | grep -v '^\.claude/' | grep -v '^\.github/' || true)
|
|
1863
|
-
if [[ -z "$real_files" ]]; then
|
|
1864
|
-
error "No real code changes detected — only pipeline artifacts (.claude/ logs)."
|
|
1865
|
-
error "The build agent did not produce meaningful changes. Skipping PR creation."
|
|
1866
|
-
emit_event "pr.rejected" "issue=${ISSUE_NUMBER:-0}" "reason=no_real_changes"
|
|
1867
|
-
# Mark issue so auto-retry knows not to retry empty builds
|
|
1868
|
-
if [[ -n "${ISSUE_NUMBER:-}" && "${ISSUE_NUMBER:-0}" != "0" ]]; then
|
|
1869
|
-
gh issue comment "$ISSUE_NUMBER" --body "<!-- SHIPWRIGHT-NO-CHANGES: true -->" 2>/dev/null || true
|
|
1870
|
-
fi
|
|
1871
|
-
return 1
|
|
1872
|
-
fi
|
|
1873
|
-
local real_file_count
|
|
1874
|
-
real_file_count=$(echo "$real_files" | wc -l | xargs)
|
|
1875
|
-
info "PR quality gate: ${real_file_count} real file(s) changed"
|
|
1876
|
-
|
|
1877
|
-
# Commit any uncommitted changes left by the build agent
|
|
1878
|
-
if ! git diff --quiet 2>/dev/null || ! git diff --cached --quiet 2>/dev/null; then
|
|
1879
|
-
info "Committing remaining uncommitted changes..."
|
|
1880
|
-
git add -A 2>/dev/null || true
|
|
1881
|
-
git commit -m "chore: pipeline cleanup — commit remaining build changes" --no-verify 2>/dev/null || true
|
|
1882
|
-
fi
|
|
1883
|
-
|
|
1884
|
-
# Auto-rebase onto latest base branch before PR
|
|
1885
|
-
auto_rebase || {
|
|
1886
|
-
warn "Rebase/merge failed — pushing as-is"
|
|
1887
|
-
}
|
|
1888
|
-
|
|
1889
|
-
# Push branch
|
|
1890
|
-
info "Pushing branch: $GIT_BRANCH"
|
|
1891
|
-
git push -u origin "$GIT_BRANCH" --force-with-lease 2>/dev/null || {
|
|
1892
|
-
# Retry with regular push if force-with-lease fails (first push)
|
|
1893
|
-
git push -u origin "$GIT_BRANCH" 2>/dev/null || {
|
|
1894
|
-
error "Failed to push branch"
|
|
1895
|
-
return 1
|
|
1896
|
-
}
|
|
1897
|
-
}
|
|
1898
|
-
|
|
1899
|
-
# ── Developer Simulation (pre-PR review) ──
|
|
1900
|
-
local simulation_summary=""
|
|
1901
|
-
if type simulation_review >/dev/null 2>&1; then
|
|
1902
|
-
local sim_enabled
|
|
1903
|
-
sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
|
|
1904
|
-
# Also check daemon-config
|
|
1905
|
-
local daemon_cfg=".claude/daemon-config.json"
|
|
1906
|
-
if [[ "$sim_enabled" != "true" && -f "$daemon_cfg" ]]; then
|
|
1907
|
-
sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
|
|
1908
|
-
fi
|
|
1909
|
-
if [[ "$sim_enabled" == "true" ]]; then
|
|
1910
|
-
info "Running developer simulation review..."
|
|
1911
|
-
local diff_for_sim
|
|
1912
|
-
diff_for_sim=$(_safe_base_diff || true)
|
|
1913
|
-
if [[ -n "$diff_for_sim" ]]; then
|
|
1914
|
-
local sim_result
|
|
1915
|
-
sim_result=$(simulation_review "$diff_for_sim" "${GOAL:-}" 2>/dev/null || echo "")
|
|
1916
|
-
if [[ -n "$sim_result" && "$sim_result" != *'"error"'* ]]; then
|
|
1917
|
-
echo "$sim_result" > "$ARTIFACTS_DIR/simulation-review.json"
|
|
1918
|
-
local sim_count
|
|
1919
|
-
sim_count=$(echo "$sim_result" | jq 'length' 2>/dev/null || echo "0")
|
|
1920
|
-
simulation_summary="**Developer simulation:** ${sim_count} reviewer concerns pre-addressed"
|
|
1921
|
-
success "Simulation complete: ${sim_count} concerns found and addressed"
|
|
1922
|
-
emit_event "simulation.complete" "issue=${ISSUE_NUMBER:-0}" "concerns=${sim_count}"
|
|
1923
|
-
else
|
|
1924
|
-
info "Simulation returned no actionable concerns"
|
|
1925
|
-
fi
|
|
1926
|
-
fi
|
|
1927
|
-
fi
|
|
1928
|
-
fi
|
|
1929
|
-
|
|
1930
|
-
# ── Architecture Validation (pre-PR check) ──
|
|
1931
|
-
local arch_summary=""
|
|
1932
|
-
if type architecture_validate_changes >/dev/null 2>&1; then
|
|
1933
|
-
local arch_enabled
|
|
1934
|
-
arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
|
|
1935
|
-
local daemon_cfg=".claude/daemon-config.json"
|
|
1936
|
-
if [[ "$arch_enabled" != "true" && -f "$daemon_cfg" ]]; then
|
|
1937
|
-
arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
|
|
1938
|
-
fi
|
|
1939
|
-
if [[ "$arch_enabled" == "true" ]]; then
|
|
1940
|
-
info "Validating architecture..."
|
|
1941
|
-
local diff_for_arch
|
|
1942
|
-
diff_for_arch=$(_safe_base_diff || true)
|
|
1943
|
-
if [[ -n "$diff_for_arch" ]]; then
|
|
1944
|
-
local arch_result
|
|
1945
|
-
arch_result=$(architecture_validate_changes "$diff_for_arch" "" 2>/dev/null || echo "")
|
|
1946
|
-
if [[ -n "$arch_result" && "$arch_result" != *'"error"'* ]]; then
|
|
1947
|
-
echo "$arch_result" > "$ARTIFACTS_DIR/architecture-validation.json"
|
|
1948
|
-
local violation_count
|
|
1949
|
-
violation_count=$(echo "$arch_result" | jq '[.violations[]? | select(.severity == "critical" or .severity == "high")] | length' 2>/dev/null || echo "0")
|
|
1950
|
-
arch_summary="**Architecture validation:** ${violation_count} violations"
|
|
1951
|
-
if [[ "$violation_count" -gt 0 ]]; then
|
|
1952
|
-
warn "Architecture: ${violation_count} high/critical violations found"
|
|
1953
|
-
else
|
|
1954
|
-
success "Architecture validation passed"
|
|
1955
|
-
fi
|
|
1956
|
-
emit_event "architecture.validated" "issue=${ISSUE_NUMBER:-0}" "violations=${violation_count}"
|
|
1957
|
-
else
|
|
1958
|
-
info "Architecture validation returned no results"
|
|
1959
|
-
fi
|
|
1960
|
-
fi
|
|
1961
|
-
fi
|
|
1962
|
-
fi
|
|
1963
|
-
|
|
1964
|
-
# Pre-PR diff gate — verify meaningful code changes exist (not just bookkeeping)
|
|
1965
|
-
local real_changes
|
|
1966
|
-
real_changes=$(_safe_base_diff --name-only \
|
|
1967
|
-
-- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
|
|
1968
|
-
':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
|
|
1969
|
-
':!**/error-summary.json' | wc -l | xargs || true)
|
|
1970
|
-
real_changes="${real_changes:-0}"
|
|
1971
|
-
if [[ "${real_changes:-0}" -eq 0 ]]; then
|
|
1972
|
-
error "No meaningful code changes detected — only bookkeeping files modified"
|
|
1973
|
-
error "Refusing to create PR with zero real changes"
|
|
1974
|
-
return 1
|
|
1975
|
-
fi
|
|
1976
|
-
info "Pre-PR diff check: ${real_changes} real files changed"
|
|
1977
|
-
|
|
1978
|
-
# Build PR title — prefer GOAL over plan file first line
|
|
1979
|
-
# (plan file first line often contains Claude analysis text, not a clean title)
|
|
1980
|
-
local pr_title=""
|
|
1981
|
-
if [[ -n "${GOAL:-}" ]]; then
|
|
1982
|
-
pr_title=$(echo "$GOAL" | cut -c1-70)
|
|
1983
|
-
fi
|
|
1984
|
-
if [[ -z "$pr_title" ]] && [[ -s "$plan_file" ]]; then
|
|
1985
|
-
pr_title=$(head -1 "$plan_file" 2>/dev/null | sed 's/^#* *//' | cut -c1-70)
|
|
1986
|
-
fi
|
|
1987
|
-
[[ -z "$pr_title" ]] && pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
|
|
1988
|
-
|
|
1989
|
-
# Sanitize: reject PR titles that look like error messages
|
|
1990
|
-
if echo "$pr_title" | grep -qiE 'Invalid API|API key|authentication_error|rate_limit|CLI error|no useful output'; then
|
|
1991
|
-
warn "PR title looks like an error message: $pr_title"
|
|
1992
|
-
pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
|
|
1993
|
-
fi
|
|
1994
|
-
|
|
1995
|
-
# Build comprehensive PR body
|
|
1996
|
-
local plan_summary=""
|
|
1997
|
-
if [[ -s "$plan_file" ]]; then
|
|
1998
|
-
plan_summary=$(head -20 "$plan_file" 2>/dev/null | tail -15)
|
|
1999
|
-
fi
|
|
2000
|
-
|
|
2001
|
-
local test_summary=""
|
|
2002
|
-
if [[ -s "$test_log" ]]; then
|
|
2003
|
-
test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
|
|
2004
|
-
fi
|
|
2005
|
-
|
|
2006
|
-
local review_summary=""
|
|
2007
|
-
if [[ -s "$review_file" ]]; then
|
|
2008
|
-
local total_issues=0
|
|
2009
|
-
# Try JSON structured output first
|
|
2010
|
-
if head -1 "$review_file" 2>/dev/null | grep -q '^{' 2>/dev/null; then
|
|
2011
|
-
total_issues=$(jq -r '.issues | length' "$review_file" 2>/dev/null || echo "0")
|
|
2012
|
-
fi
|
|
2013
|
-
# Grep fallback for markdown
|
|
2014
|
-
if [[ "${total_issues:-0}" -eq 0 ]]; then
|
|
2015
|
-
total_issues=$(grep -ciE '\*\*\[?(Critical|Bug|Security|Warning|Suggestion)\]?\*\*' "$review_file" 2>/dev/null || true)
|
|
2016
|
-
total_issues="${total_issues:-0}"
|
|
2017
|
-
fi
|
|
2018
|
-
review_summary="**Code review:** $total_issues issues found"
|
|
2019
|
-
fi
|
|
2020
|
-
|
|
2021
|
-
local closes_line=""
|
|
2022
|
-
[[ -n "${GITHUB_ISSUE:-}" ]] && closes_line="Closes ${GITHUB_ISSUE}"
|
|
2023
|
-
|
|
2024
|
-
local diff_stats
|
|
2025
|
-
diff_stats=$(_safe_base_diff --stat | tail -1 || echo "")
|
|
2026
|
-
|
|
2027
|
-
local commit_count
|
|
2028
|
-
commit_count=$(_safe_base_log --oneline | wc -l | xargs)
|
|
2029
|
-
|
|
2030
|
-
local total_dur=""
|
|
2031
|
-
if [[ -n "$PIPELINE_START_EPOCH" ]]; then
|
|
2032
|
-
total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
|
|
2033
|
-
fi
|
|
2034
|
-
|
|
2035
|
-
local pr_body
|
|
2036
|
-
pr_body="$(cat <<EOF
|
|
2037
|
-
## Summary
|
|
2038
|
-
${plan_summary:-$GOAL}
|
|
2039
|
-
|
|
2040
|
-
## Changes
|
|
2041
|
-
${diff_stats}
|
|
2042
|
-
${commit_count} commit(s) via \`shipwright pipeline\` (${PIPELINE_NAME})
|
|
2043
|
-
|
|
2044
|
-
## Test Results
|
|
2045
|
-
\`\`\`
|
|
2046
|
-
${test_summary:-No test output}
|
|
2047
|
-
\`\`\`
|
|
2048
|
-
|
|
2049
|
-
${review_summary}
|
|
2050
|
-
${simulation_summary}
|
|
2051
|
-
${arch_summary}
|
|
2052
|
-
|
|
2053
|
-
${closes_line}
|
|
2054
|
-
|
|
2055
|
-
---
|
|
2056
|
-
|
|
2057
|
-
| Metric | Value |
|
|
2058
|
-
|--------|-------|
|
|
2059
|
-
| Pipeline | \`${PIPELINE_NAME}\` |
|
|
2060
|
-
| Duration | ${total_dur:-—} |
|
|
2061
|
-
| Model | ${MODEL:-opus} |
|
|
2062
|
-
| Agents | ${AGENTS:-1} |
|
|
2063
|
-
|
|
2064
|
-
Generated by \`shipwright pipeline\`
|
|
2065
|
-
EOF
|
|
2066
|
-
)"
|
|
2067
|
-
|
|
2068
|
-
# Verify required evidence before PR (merge policy enforcement)
|
|
2069
|
-
local risk_tier
|
|
2070
|
-
risk_tier="low"
|
|
2071
|
-
if [[ -f "$REPO_DIR/config/policy.json" ]]; then
|
|
2072
|
-
local changed_files
|
|
2073
|
-
changed_files=$(_safe_base_diff --name-only || true)
|
|
2074
|
-
if [[ -n "$changed_files" ]]; then
|
|
2075
|
-
local policy_file="$REPO_DIR/config/policy.json"
|
|
2076
|
-
check_tier_match() {
|
|
2077
|
-
local tier="$1"
|
|
2078
|
-
local patterns
|
|
2079
|
-
patterns=$(jq -r ".riskTierRules.${tier}[]? // empty" "$policy_file" 2>/dev/null)
|
|
2080
|
-
[[ -z "$patterns" ]] && return 1
|
|
2081
|
-
while IFS= read -r pattern; do
|
|
2082
|
-
[[ -z "$pattern" ]] && continue
|
|
2083
|
-
local regex
|
|
2084
|
-
regex=$(echo "$pattern" | sed 's/\./\\./g; s/\*\*/DOUBLESTAR/g; s/\*/[^\/]*/g; s/DOUBLESTAR/.*/g')
|
|
2085
|
-
while IFS= read -r file; do
|
|
2086
|
-
[[ -z "$file" ]] && continue
|
|
2087
|
-
if echo "$file" | grep -qE "^${regex}$"; then
|
|
2088
|
-
return 0
|
|
2089
|
-
fi
|
|
2090
|
-
done <<< "$changed_files"
|
|
2091
|
-
done <<< "$patterns"
|
|
2092
|
-
return 1
|
|
2093
|
-
}
|
|
2094
|
-
check_tier_match "critical" && risk_tier="critical"
|
|
2095
|
-
check_tier_match "high" && [[ "$risk_tier" != "critical" ]] && risk_tier="high"
|
|
2096
|
-
check_tier_match "medium" && [[ "$risk_tier" != "critical" && "$risk_tier" != "high" ]] && risk_tier="medium"
|
|
2097
|
-
fi
|
|
2098
|
-
fi
|
|
2099
|
-
|
|
2100
|
-
local required_evidence
|
|
2101
|
-
required_evidence=$(jq -r ".mergePolicy.\"$risk_tier\".requiredEvidence // [] | .[]" "$REPO_DIR/config/policy.json" 2>/dev/null)
|
|
2102
|
-
|
|
2103
|
-
if [[ -n "$required_evidence" ]]; then
|
|
2104
|
-
local evidence_dir="$REPO_DIR/.claude/evidence"
|
|
2105
|
-
local missing_evidence=()
|
|
2106
|
-
while IFS= read -r etype; do
|
|
2107
|
-
[[ -z "$etype" ]] && continue
|
|
2108
|
-
local has_evidence=false
|
|
2109
|
-
for f in "$evidence_dir"/*"$etype"*; do
|
|
2110
|
-
[[ -f "$f" ]] && has_evidence=true && break
|
|
2111
|
-
done
|
|
2112
|
-
[[ "$has_evidence" != "true" ]] && missing_evidence+=("$etype")
|
|
2113
|
-
done <<< "$required_evidence"
|
|
2114
|
-
|
|
2115
|
-
if [[ ${#missing_evidence[@]} -gt 0 ]]; then
|
|
2116
|
-
warn "Missing required evidence for $risk_tier tier: ${missing_evidence[*]}"
|
|
2117
|
-
emit_event "evidence.missing" "{\"tier\":\"$risk_tier\",\"missing\":\"${missing_evidence[*]}\"}"
|
|
2118
|
-
# Collect missing evidence
|
|
2119
|
-
if [[ -x "$SCRIPT_DIR/sw-evidence.sh" ]]; then
|
|
2120
|
-
for etype in "${missing_evidence[@]}"; do
|
|
2121
|
-
(cd "$REPO_DIR" && bash "$SCRIPT_DIR/sw-evidence.sh" capture "$etype" 2>/dev/null) || warn "Failed to collect $etype evidence"
|
|
2122
|
-
done
|
|
2123
|
-
fi
|
|
2124
|
-
fi
|
|
2125
|
-
fi
|
|
2126
|
-
|
|
2127
|
-
# Build gh pr create args
|
|
2128
|
-
local pr_args=(--title "$pr_title" --body "$pr_body" --base "$BASE_BRANCH")
|
|
2129
|
-
|
|
2130
|
-
# Propagate labels from issue + CLI
|
|
2131
|
-
local all_labels="${LABELS}"
|
|
2132
|
-
if [[ -n "$ISSUE_LABELS" ]]; then
|
|
2133
|
-
if [[ -n "$all_labels" ]]; then
|
|
2134
|
-
all_labels="${all_labels},${ISSUE_LABELS}"
|
|
2135
|
-
else
|
|
2136
|
-
all_labels="$ISSUE_LABELS"
|
|
2137
|
-
fi
|
|
2138
|
-
fi
|
|
2139
|
-
if [[ -n "$all_labels" ]]; then
|
|
2140
|
-
pr_args+=(--label "$all_labels")
|
|
2141
|
-
fi
|
|
2142
|
-
|
|
2143
|
-
# Auto-detect or use provided reviewers
|
|
2144
|
-
local reviewers="${REVIEWERS}"
|
|
2145
|
-
if [[ -z "$reviewers" ]]; then
|
|
2146
|
-
reviewers=$(detect_reviewers)
|
|
2147
|
-
fi
|
|
2148
|
-
if [[ -n "$reviewers" ]]; then
|
|
2149
|
-
pr_args+=(--reviewer "$reviewers")
|
|
2150
|
-
info "Reviewers: ${DIM}$reviewers${RESET}"
|
|
2151
|
-
fi
|
|
2152
|
-
|
|
2153
|
-
# Propagate milestone
|
|
2154
|
-
if [[ -n "$ISSUE_MILESTONE" ]]; then
|
|
2155
|
-
pr_args+=(--milestone "$ISSUE_MILESTONE")
|
|
2156
|
-
info "Milestone: ${DIM}$ISSUE_MILESTONE${RESET}"
|
|
2157
|
-
fi
|
|
2158
|
-
|
|
2159
|
-
# Check for existing open PR on this branch to avoid duplicates (issue #12)
|
|
2160
|
-
local pr_url=""
|
|
2161
|
-
local existing_pr
|
|
2162
|
-
existing_pr=$(gh pr list --head "$GIT_BRANCH" --state open --json number,url --jq '.[0]' 2>/dev/null || echo "")
|
|
2163
|
-
if [[ -n "$existing_pr" && "$existing_pr" != "null" ]]; then
|
|
2164
|
-
local existing_pr_number existing_pr_url
|
|
2165
|
-
existing_pr_number=$(echo "$existing_pr" | jq -r '.number' 2>/dev/null || echo "")
|
|
2166
|
-
existing_pr_url=$(echo "$existing_pr" | jq -r '.url' 2>/dev/null || echo "")
|
|
2167
|
-
info "Updating existing PR #$existing_pr_number instead of creating duplicate"
|
|
2168
|
-
gh pr edit "$existing_pr_number" --title "$pr_title" --body "$pr_body" 2>/dev/null || true
|
|
2169
|
-
pr_url="$existing_pr_url"
|
|
2170
|
-
else
|
|
2171
|
-
info "Creating PR..."
|
|
2172
|
-
local pr_stderr pr_exit=0
|
|
2173
|
-
pr_url=$(gh pr create "${pr_args[@]}" 2>/tmp/shipwright-pr-stderr.txt) || pr_exit=$?
|
|
2174
|
-
pr_stderr=$(cat /tmp/shipwright-pr-stderr.txt 2>/dev/null || true)
|
|
2175
|
-
rm -f /tmp/shipwright-pr-stderr.txt
|
|
2176
|
-
|
|
2177
|
-
# gh pr create may return non-zero for reviewer issues but still create the PR
|
|
2178
|
-
if [[ "$pr_exit" -ne 0 ]]; then
|
|
2179
|
-
if [[ "$pr_url" == *"github.com"* ]]; then
|
|
2180
|
-
# PR was created but something non-fatal failed (e.g., reviewer not found)
|
|
2181
|
-
warn "PR created with warnings: ${pr_stderr:-unknown}"
|
|
2182
|
-
else
|
|
2183
|
-
error "PR creation failed: ${pr_stderr:-$pr_url}"
|
|
2184
|
-
return 1
|
|
2185
|
-
fi
|
|
2186
|
-
fi
|
|
2187
|
-
fi
|
|
2188
|
-
|
|
2189
|
-
success "PR created: ${BOLD}$pr_url${RESET}"
|
|
2190
|
-
echo "$pr_url" > "$ARTIFACTS_DIR/pr-url.txt"
|
|
2191
|
-
|
|
2192
|
-
# Extract PR number
|
|
2193
|
-
PR_NUMBER=$(echo "$pr_url" | grep -oE '[0-9]+$' || true)
|
|
2194
|
-
|
|
2195
|
-
# ── Intelligent Reviewer Selection (GraphQL-enhanced) ──
|
|
2196
|
-
if [[ "${NO_GITHUB:-false}" != "true" && -n "$PR_NUMBER" && -z "$reviewers" ]]; then
|
|
2197
|
-
local reviewer_assigned=false
|
|
2198
|
-
|
|
2199
|
-
# Try CODEOWNERS-based routing via GraphQL API
|
|
2200
|
-
if type gh_codeowners >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
|
|
2201
|
-
local codeowners_json
|
|
2202
|
-
codeowners_json=$(gh_codeowners "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
|
|
2203
|
-
if [[ "$codeowners_json" != "[]" && -n "$codeowners_json" ]]; then
|
|
2204
|
-
local changed_files
|
|
2205
|
-
changed_files=$(_safe_base_diff --name-only || true)
|
|
2206
|
-
if [[ -n "$changed_files" ]]; then
|
|
2207
|
-
local co_reviewers
|
|
2208
|
-
co_reviewers=$(echo "$codeowners_json" | jq -r '.[].owners[]' 2>/dev/null | sort -u | head -3 || true)
|
|
2209
|
-
if [[ -n "$co_reviewers" ]]; then
|
|
2210
|
-
local rev
|
|
2211
|
-
while IFS= read -r rev; do
|
|
2212
|
-
rev="${rev#@}"
|
|
2213
|
-
[[ -n "$rev" ]] && gh pr edit "$PR_NUMBER" --add-reviewer "$rev" 2>/dev/null || true
|
|
2214
|
-
done <<< "$co_reviewers"
|
|
2215
|
-
info "Requested review from CODEOWNERS: $(echo "$co_reviewers" | tr '\n' ',' | sed 's/,$//')"
|
|
2216
|
-
reviewer_assigned=true
|
|
2217
|
-
fi
|
|
2218
|
-
fi
|
|
2219
|
-
fi
|
|
2220
|
-
fi
|
|
2221
|
-
|
|
2222
|
-
# Fallback: contributor-based routing via GraphQL API
|
|
2223
|
-
if [[ "$reviewer_assigned" != "true" ]] && type gh_contributors >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
|
|
2224
|
-
local contributors_json
|
|
2225
|
-
contributors_json=$(gh_contributors "$REPO_OWNER" "$REPO_NAME" 2>/dev/null || echo "[]")
|
|
2226
|
-
local top_contributor
|
|
2227
|
-
top_contributor=$(echo "$contributors_json" | jq -r '.[0].login // ""' 2>/dev/null || echo "")
|
|
2228
|
-
local current_user
|
|
2229
|
-
current_user=$(gh api user --jq '.login' 2>/dev/null || echo "")
|
|
2230
|
-
if [[ -n "$top_contributor" && "$top_contributor" != "$current_user" ]]; then
|
|
2231
|
-
gh pr edit "$PR_NUMBER" --add-reviewer "$top_contributor" 2>/dev/null || true
|
|
2232
|
-
info "Requested review from top contributor: $top_contributor"
|
|
2233
|
-
reviewer_assigned=true
|
|
2234
|
-
fi
|
|
2235
|
-
fi
|
|
2236
|
-
|
|
2237
|
-
# Final fallback: auto-approve if no reviewers assigned
|
|
2238
|
-
if [[ "$reviewer_assigned" != "true" ]]; then
|
|
2239
|
-
gh pr review "$PR_NUMBER" --approve 2>/dev/null || warn "Could not auto-approve PR"
|
|
2240
|
-
fi
|
|
2241
|
-
fi
|
|
2242
|
-
|
|
2243
|
-
# Update issue with PR link
|
|
2244
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
2245
|
-
gh_remove_label "$ISSUE_NUMBER" "pipeline/in-progress"
|
|
2246
|
-
gh_add_labels "$ISSUE_NUMBER" "pipeline/pr-created"
|
|
2247
|
-
gh_comment_issue "$ISSUE_NUMBER" "🎉 **PR created:** ${pr_url}
|
|
2248
|
-
|
|
2249
|
-
Pipeline duration so far: ${total_dur:-unknown}"
|
|
2250
|
-
|
|
2251
|
-
# Notify tracker of review/PR creation
|
|
2252
|
-
"$SCRIPT_DIR/sw-tracker.sh" notify "review" "$ISSUE_NUMBER" "$pr_url" 2>/dev/null || true
|
|
2253
|
-
fi
|
|
2254
|
-
|
|
2255
|
-
# Wait for CI if configured
|
|
2256
|
-
local wait_ci
|
|
2257
|
-
wait_ci=$(jq -r --arg id "pr" '(.stages[] | select(.id == $id) | .config.wait_ci) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2258
|
-
if [[ "$wait_ci" == "true" ]]; then
|
|
2259
|
-
info "Waiting for CI checks..."
|
|
2260
|
-
gh pr checks --watch 2>/dev/null || warn "CI checks did not all pass"
|
|
2261
|
-
fi
|
|
2262
|
-
|
|
2263
|
-
log_stage "pr" "PR created: $pr_url (${reviewers:+reviewers: $reviewers})"
|
|
2264
|
-
}
|
|
2265
|
-
|
|
2266
|
-
stage_merge() {
|
|
2267
|
-
CURRENT_STAGE_ID="merge"
|
|
2268
|
-
|
|
2269
|
-
if [[ "$NO_GITHUB" == "true" ]]; then
|
|
2270
|
-
info "Merge stage skipped (--no-github)"
|
|
2271
|
-
return 0
|
|
2272
|
-
fi
|
|
2273
|
-
|
|
2274
|
-
# ── Oversight gate: merge block on verdict (diff + review criticals + goal) ──
|
|
2275
|
-
if [[ -x "$SCRIPT_DIR/sw-oversight.sh" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
|
|
2276
|
-
local merge_diff_file="${ARTIFACTS_DIR}/review-diff.patch"
|
|
2277
|
-
local merge_review_file="${ARTIFACTS_DIR}/review.md"
|
|
2278
|
-
if [[ ! -s "$merge_diff_file" ]]; then
|
|
2279
|
-
_safe_base_diff > "$merge_diff_file" 2>/dev/null || true
|
|
2280
|
-
fi
|
|
2281
|
-
if [[ -s "$merge_diff_file" ]]; then
|
|
2282
|
-
local _merge_critical _merge_sec _merge_blocking _merge_reject
|
|
2283
|
-
_merge_critical=$(grep -ciE '\*\*\[?Critical\]?\*\*' "$merge_review_file" 2>/dev/null || true)
|
|
2284
|
-
_merge_critical="${_merge_critical:-0}"
|
|
2285
|
-
_merge_sec=$(grep -ciE '\*\*\[?Security\]?\*\*' "$merge_review_file" 2>/dev/null || true)
|
|
2286
|
-
_merge_sec="${_merge_sec:-0}"
|
|
2287
|
-
_merge_blocking=$((${_merge_critical:-0} + ${_merge_sec:-0}))
|
|
2288
|
-
[[ "$_merge_blocking" -gt 0 ]] && _merge_reject="Review found ${_merge_blocking} critical/security issue(s)"
|
|
2289
|
-
if ! bash "$SCRIPT_DIR/sw-oversight.sh" gate --diff "$merge_diff_file" --description "${GOAL:-Pipeline merge}" --reject-if "${_merge_reject:-}" >/dev/null 2>&1; then
|
|
2290
|
-
error "Oversight gate rejected — blocking merge"
|
|
2291
|
-
emit_event "merge.oversight_blocked" "issue=${ISSUE_NUMBER:-0}"
|
|
2292
|
-
log_stage "merge" "BLOCKED: oversight gate rejected"
|
|
2293
|
-
return 1
|
|
2294
|
-
fi
|
|
2295
|
-
fi
|
|
2296
|
-
fi
|
|
2297
|
-
|
|
2298
|
-
# ── Approval gates: block if merge requires approval and pending for this issue ──
|
|
2299
|
-
local ag_file="${HOME}/.shipwright/approval-gates.json"
|
|
2300
|
-
if [[ -f "$ag_file" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
|
|
2301
|
-
local ag_enabled ag_stages ag_pending_merge ag_issue_num
|
|
2302
|
-
ag_enabled=$(jq -r '.enabled // false' "$ag_file" 2>/dev/null || echo "false")
|
|
2303
|
-
ag_stages=$(jq -r '.stages // [] | if type == "array" then .[] else empty end' "$ag_file" 2>/dev/null || true)
|
|
2304
|
-
ag_issue_num=$(echo "${ISSUE_NUMBER:-0}" | awk '{print $1+0}')
|
|
2305
|
-
if [[ "$ag_enabled" == "true" ]] && echo "$ag_stages" | grep -qx "merge" 2>/dev/null; then
|
|
2306
|
-
local ha_file="${ARTIFACTS_DIR}/human-approval.txt"
|
|
2307
|
-
local ha_approved="false"
|
|
2308
|
-
if [[ -f "$ha_file" ]]; then
|
|
2309
|
-
ha_approved=$(jq -r --arg stage "merge" 'select(.stage == $stage) | .approved // false' "$ha_file" 2>/dev/null || echo "false")
|
|
2310
|
-
fi
|
|
2311
|
-
if [[ "$ha_approved" != "true" ]]; then
|
|
2312
|
-
ag_pending_merge=$(jq -r --argjson issue "$ag_issue_num" --arg stage "merge" \
|
|
2313
|
-
'[.pending[]? | select(.issue == $issue and .stage == $stage)] | length' "$ag_file" 2>/dev/null || echo "0")
|
|
2314
|
-
if [[ "${ag_pending_merge:-0}" -eq 0 ]]; then
|
|
2315
|
-
local req_at tmp_ag
|
|
2316
|
-
req_at=$(date -u +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || true)
|
|
2317
|
-
tmp_ag=$(mktemp "${HOME}/.shipwright/approval-gates.json.XXXXXX" 2>/dev/null || mktemp)
|
|
2318
|
-
jq --argjson issue "$ag_issue_num" --arg stage "merge" --arg requested "${req_at}" \
|
|
2319
|
-
'.pending += [{"issue": $issue, "stage": $stage, "requested_at": $requested}]' "$ag_file" > "$tmp_ag" 2>/dev/null && mv "$tmp_ag" "$ag_file" || rm -f "$tmp_ag"
|
|
2320
|
-
fi
|
|
2321
|
-
info "Merge requires approval — awaiting human approval via dashboard"
|
|
2322
|
-
emit_event "merge.approval_pending" "issue=${ISSUE_NUMBER:-0}"
|
|
2323
|
-
log_stage "merge" "BLOCKED: approval gate pending"
|
|
2324
|
-
return 1
|
|
2325
|
-
fi
|
|
2326
|
-
fi
|
|
2327
|
-
fi
|
|
2328
|
-
|
|
2329
|
-
# ── Branch Protection Check ──
|
|
2330
|
-
if type gh_branch_protection >/dev/null 2>&1 && [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
|
|
2331
|
-
local protection_json
|
|
2332
|
-
protection_json=$(gh_branch_protection "$REPO_OWNER" "$REPO_NAME" "${BASE_BRANCH:-main}" 2>/dev/null || echo '{"protected": false}')
|
|
2333
|
-
local is_protected
|
|
2334
|
-
is_protected=$(echo "$protection_json" | jq -r '.protected // false' 2>/dev/null || echo "false")
|
|
2335
|
-
if [[ "$is_protected" == "true" ]]; then
|
|
2336
|
-
local required_reviews
|
|
2337
|
-
required_reviews=$(echo "$protection_json" | jq -r '.required_pull_request_reviews.required_approving_review_count // 0' 2>/dev/null || echo "0")
|
|
2338
|
-
local required_checks
|
|
2339
|
-
required_checks=$(echo "$protection_json" | jq -r '[.required_status_checks.contexts // [] | .[]] | length' 2>/dev/null || echo "0")
|
|
2340
|
-
|
|
2341
|
-
info "Branch protection: ${required_reviews} required review(s), ${required_checks} required check(s)"
|
|
2342
|
-
|
|
2343
|
-
if [[ "$required_reviews" -gt 0 ]]; then
|
|
2344
|
-
# Check if PR has enough approvals
|
|
2345
|
-
local prot_pr_number
|
|
2346
|
-
prot_pr_number=$(gh pr list --head "$GIT_BRANCH" --json number --jq '.[0].number' 2>/dev/null || echo "")
|
|
2347
|
-
if [[ -n "$prot_pr_number" ]]; then
|
|
2348
|
-
local approvals
|
|
2349
|
-
approvals=$(gh pr view "$prot_pr_number" --json reviews --jq '[.reviews[] | select(.state == "APPROVED")] | length' 2>/dev/null || echo "0")
|
|
2350
|
-
if [[ "$approvals" -lt "$required_reviews" ]]; then
|
|
2351
|
-
warn "PR has $approvals approval(s), needs $required_reviews — skipping auto-merge"
|
|
2352
|
-
info "PR is ready for manual merge after required reviews"
|
|
2353
|
-
emit_event "merge.blocked" "issue=${ISSUE_NUMBER:-0}" "reason=insufficient_reviews" "have=$approvals" "need=$required_reviews"
|
|
2354
|
-
return 0
|
|
2355
|
-
fi
|
|
2356
|
-
fi
|
|
2357
|
-
fi
|
|
2358
|
-
fi
|
|
2359
|
-
fi
|
|
2360
|
-
|
|
2361
|
-
local merge_method wait_ci_timeout auto_delete_branch auto_merge auto_approve merge_strategy
|
|
2362
|
-
merge_method=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.merge_method) // "squash"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2363
|
-
[[ -z "$merge_method" || "$merge_method" == "null" ]] && merge_method="squash"
|
|
2364
|
-
wait_ci_timeout=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.wait_ci_timeout_s) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2365
|
-
[[ -z "$wait_ci_timeout" || "$wait_ci_timeout" == "null" ]] && wait_ci_timeout=0
|
|
2366
|
-
|
|
2367
|
-
# Adaptive CI timeout: 90th percentile of historical times × 1.5 safety margin
|
|
2368
|
-
if [[ "$wait_ci_timeout" -eq 0 ]] 2>/dev/null; then
|
|
2369
|
-
local repo_hash_ci
|
|
2370
|
-
repo_hash_ci=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
2371
|
-
local ci_times_file="${HOME}/.shipwright/baselines/${repo_hash_ci}/ci-times.json"
|
|
2372
|
-
if [[ -f "$ci_times_file" ]]; then
|
|
2373
|
-
local p90_time
|
|
2374
|
-
p90_time=$(jq '
|
|
2375
|
-
.times | sort |
|
|
2376
|
-
(length * 0.9 | floor) as $idx |
|
|
2377
|
-
.[$idx] // 600
|
|
2378
|
-
' "$ci_times_file" 2>/dev/null || echo "0")
|
|
2379
|
-
if [[ -n "$p90_time" ]] && awk -v t="$p90_time" 'BEGIN{exit !(t > 0)}' 2>/dev/null; then
|
|
2380
|
-
# 1.5x safety margin, clamped to [120, 1800]
|
|
2381
|
-
wait_ci_timeout=$(awk -v p90="$p90_time" 'BEGIN{
|
|
2382
|
-
t = p90 * 1.5;
|
|
2383
|
-
if (t < 120) t = 120;
|
|
2384
|
-
if (t > 1800) t = 1800;
|
|
2385
|
-
printf "%d", t
|
|
2386
|
-
}')
|
|
2387
|
-
fi
|
|
2388
|
-
fi
|
|
2389
|
-
# Default fallback if no history
|
|
2390
|
-
[[ "$wait_ci_timeout" -eq 0 ]] && wait_ci_timeout=600
|
|
2391
|
-
fi
|
|
2392
|
-
auto_delete_branch=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_delete_branch) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2393
|
-
[[ -z "$auto_delete_branch" || "$auto_delete_branch" == "null" ]] && auto_delete_branch="true"
|
|
2394
|
-
auto_merge=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_merge) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2395
|
-
[[ -z "$auto_merge" || "$auto_merge" == "null" ]] && auto_merge="false"
|
|
2396
|
-
auto_approve=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.auto_approve) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2397
|
-
[[ -z "$auto_approve" || "$auto_approve" == "null" ]] && auto_approve="false"
|
|
2398
|
-
merge_strategy=$(jq -r --arg id "merge" '(.stages[] | select(.id == $id) | .config.merge_strategy) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2399
|
-
[[ -z "$merge_strategy" || "$merge_strategy" == "null" ]] && merge_strategy=""
|
|
2400
|
-
# merge_strategy overrides merge_method if set (squash/merge/rebase)
|
|
2401
|
-
if [[ -n "$merge_strategy" ]]; then
|
|
2402
|
-
merge_method="$merge_strategy"
|
|
2403
|
-
fi
|
|
2404
|
-
|
|
2405
|
-
# Find PR for current branch
|
|
2406
|
-
local pr_number
|
|
2407
|
-
pr_number=$(gh pr list --head "$GIT_BRANCH" --json number --jq '.[0].number' 2>/dev/null || echo "")
|
|
2408
|
-
|
|
2409
|
-
if [[ -z "$pr_number" ]]; then
|
|
2410
|
-
warn "No PR found for branch $GIT_BRANCH — skipping merge"
|
|
2411
|
-
return 0
|
|
2412
|
-
fi
|
|
2413
|
-
|
|
2414
|
-
info "Found PR #${pr_number} for branch ${GIT_BRANCH}"
|
|
2415
|
-
|
|
2416
|
-
# Wait for CI checks to pass
|
|
2417
|
-
info "Waiting for CI checks (timeout: ${wait_ci_timeout}s)..."
|
|
2418
|
-
local elapsed=0
|
|
2419
|
-
local check_interval=15
|
|
2420
|
-
|
|
2421
|
-
while [[ "$elapsed" -lt "$wait_ci_timeout" ]]; do
|
|
2422
|
-
local check_status
|
|
2423
|
-
check_status=$(gh pr checks "$pr_number" --json 'bucket,name' --jq '[.[] | .bucket] | unique | sort' 2>/dev/null || echo '["pending"]')
|
|
2424
|
-
|
|
2425
|
-
# If all checks passed (only "pass" in buckets)
|
|
2426
|
-
if echo "$check_status" | jq -e '. == ["pass"]' >/dev/null 2>&1; then
|
|
2427
|
-
success "All CI checks passed"
|
|
2428
|
-
break
|
|
2429
|
-
fi
|
|
2430
|
-
|
|
2431
|
-
# If any check failed
|
|
2432
|
-
if echo "$check_status" | jq -e 'any(. == "fail")' >/dev/null 2>&1; then
|
|
2433
|
-
error "CI checks failed — aborting merge"
|
|
2434
|
-
return 1
|
|
2435
|
-
fi
|
|
2436
|
-
|
|
2437
|
-
sleep "$check_interval"
|
|
2438
|
-
elapsed=$((elapsed + check_interval))
|
|
2439
|
-
done
|
|
2440
|
-
|
|
2441
|
-
# Record CI wait time for adaptive timeout calculation
|
|
2442
|
-
if [[ "$elapsed" -gt 0 ]]; then
|
|
2443
|
-
local repo_hash_ci_rec
|
|
2444
|
-
repo_hash_ci_rec=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
2445
|
-
local ci_times_dir="${HOME}/.shipwright/baselines/${repo_hash_ci_rec}"
|
|
2446
|
-
local ci_times_rec_file="${ci_times_dir}/ci-times.json"
|
|
2447
|
-
mkdir -p "$ci_times_dir"
|
|
2448
|
-
local ci_history="[]"
|
|
2449
|
-
if [[ -f "$ci_times_rec_file" ]]; then
|
|
2450
|
-
ci_history=$(jq '.times // []' "$ci_times_rec_file" 2>/dev/null || echo "[]")
|
|
2451
|
-
fi
|
|
2452
|
-
local updated_ci
|
|
2453
|
-
updated_ci=$(echo "$ci_history" | jq --arg t "$elapsed" '. + [($t | tonumber)] | .[-20:]' 2>/dev/null || echo "[$elapsed]")
|
|
2454
|
-
local tmp_ci
|
|
2455
|
-
tmp_ci=$(mktemp "${ci_times_dir}/ci-times.json.XXXXXX")
|
|
2456
|
-
jq -n --argjson times "$updated_ci" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
|
2457
|
-
'{times: $times, updated: $updated}' > "$tmp_ci" 2>/dev/null
|
|
2458
|
-
mv "$tmp_ci" "$ci_times_rec_file" 2>/dev/null || true
|
|
2459
|
-
fi
|
|
2460
|
-
|
|
2461
|
-
if [[ "$elapsed" -ge "$wait_ci_timeout" ]]; then
|
|
2462
|
-
warn "CI check timeout (${wait_ci_timeout}s) — proceeding with merge anyway"
|
|
2463
|
-
fi
|
|
2464
|
-
|
|
2465
|
-
# Auto-approve if configured (for branch protection requiring reviews)
|
|
2466
|
-
if [[ "$auto_approve" == "true" ]]; then
|
|
2467
|
-
info "Auto-approving PR #${pr_number}..."
|
|
2468
|
-
gh pr review "$pr_number" --approve 2>/dev/null || warn "Auto-approve failed (may need different permissions)"
|
|
2469
|
-
fi
|
|
2470
|
-
|
|
2471
|
-
# Merge the PR
|
|
2472
|
-
if [[ "$auto_merge" == "true" ]]; then
|
|
2473
|
-
info "Enabling auto-merge for PR #${pr_number} (strategy: ${merge_method})..."
|
|
2474
|
-
local auto_merge_args=("pr" "merge" "$pr_number" "--auto" "--${merge_method}")
|
|
2475
|
-
if [[ "$auto_delete_branch" == "true" ]]; then
|
|
2476
|
-
auto_merge_args+=("--delete-branch")
|
|
2477
|
-
fi
|
|
2478
|
-
|
|
2479
|
-
if gh "${auto_merge_args[@]}" 2>/dev/null; then
|
|
2480
|
-
success "Auto-merge enabled for PR #${pr_number} (strategy: ${merge_method})"
|
|
2481
|
-
emit_event "merge.auto_enabled" \
|
|
2482
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
2483
|
-
"pr=$pr_number" \
|
|
2484
|
-
"strategy=$merge_method"
|
|
2485
|
-
else
|
|
2486
|
-
warn "Auto-merge not available — falling back to direct merge"
|
|
2487
|
-
# Fall through to direct merge below
|
|
2488
|
-
auto_merge="false"
|
|
2489
|
-
fi
|
|
2490
|
-
fi
|
|
2491
|
-
|
|
2492
|
-
if [[ "$auto_merge" != "true" ]]; then
|
|
2493
|
-
info "Merging PR #${pr_number} (method: ${merge_method})..."
|
|
2494
|
-
local merge_args=("pr" "merge" "$pr_number" "--${merge_method}")
|
|
2495
|
-
if [[ "$auto_delete_branch" == "true" ]]; then
|
|
2496
|
-
merge_args+=("--delete-branch")
|
|
2497
|
-
fi
|
|
2498
|
-
|
|
2499
|
-
if gh "${merge_args[@]}" 2>/dev/null; then
|
|
2500
|
-
success "PR #${pr_number} merged successfully"
|
|
2501
|
-
else
|
|
2502
|
-
error "Failed to merge PR #${pr_number}"
|
|
2503
|
-
return 1
|
|
2504
|
-
fi
|
|
2505
|
-
fi
|
|
2506
|
-
|
|
2507
|
-
log_stage "merge" "PR #${pr_number} merged (strategy: ${merge_method}, auto_merge: ${auto_merge})"
|
|
2508
|
-
}
|
|
2509
|
-
|
|
2510
|
-
stage_deploy() {
|
|
2511
|
-
CURRENT_STAGE_ID="deploy"
|
|
2512
|
-
local staging_cmd
|
|
2513
|
-
staging_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.staging_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2514
|
-
[[ "$staging_cmd" == "null" ]] && staging_cmd=""
|
|
2515
|
-
|
|
2516
|
-
local prod_cmd
|
|
2517
|
-
prod_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.production_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2518
|
-
[[ "$prod_cmd" == "null" ]] && prod_cmd=""
|
|
2519
|
-
|
|
2520
|
-
local rollback_cmd
|
|
2521
|
-
rollback_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.rollback_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2522
|
-
[[ "$rollback_cmd" == "null" ]] && rollback_cmd=""
|
|
2523
|
-
|
|
2524
|
-
if [[ -z "$staging_cmd" && -z "$prod_cmd" ]]; then
|
|
2525
|
-
warn "No deploy commands configured — skipping"
|
|
2526
|
-
return 0
|
|
2527
|
-
fi
|
|
2528
|
-
|
|
2529
|
-
# Create GitHub deployment tracking
|
|
2530
|
-
local gh_deploy_env="production"
|
|
2531
|
-
[[ -n "$staging_cmd" && -z "$prod_cmd" ]] && gh_deploy_env="staging"
|
|
2532
|
-
if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_start >/dev/null 2>&1; then
|
|
2533
|
-
if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
|
|
2534
|
-
gh_deploy_pipeline_start "$REPO_OWNER" "$REPO_NAME" "${GIT_BRANCH:-HEAD}" "$gh_deploy_env" 2>/dev/null || true
|
|
2535
|
-
info "GitHub Deployment: tracking as $gh_deploy_env"
|
|
2536
|
-
fi
|
|
2537
|
-
fi
|
|
2538
|
-
|
|
2539
|
-
# ── Pre-deploy gates ──
|
|
2540
|
-
local pre_deploy_ci
|
|
2541
|
-
pre_deploy_ci=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_ci_status) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2542
|
-
|
|
2543
|
-
if [[ "${pre_deploy_ci:-true}" == "true" && "${NO_GITHUB:-false}" != "true" && -n "${REPO_OWNER:-}" && -n "${REPO_NAME:-}" ]]; then
|
|
2544
|
-
info "Pre-deploy gate: checking CI status..."
|
|
2545
|
-
local ci_failures
|
|
2546
|
-
ci_failures=$(gh api "repos/${REPO_OWNER}/${REPO_NAME}/commits/${GIT_BRANCH:-HEAD}/check-runs" \
|
|
2547
|
-
--jq '[.check_runs[] | select(.conclusion != null and .conclusion != "success" and .conclusion != "skipped")] | length' 2>/dev/null || echo "0")
|
|
2548
|
-
if [[ "${ci_failures:-0}" -gt 0 ]]; then
|
|
2549
|
-
error "Pre-deploy gate FAILED: ${ci_failures} CI check(s) not passing"
|
|
2550
|
-
[[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: ${ci_failures} CI checks failing" 2>/dev/null || true
|
|
2551
|
-
return 1
|
|
2552
|
-
fi
|
|
2553
|
-
success "Pre-deploy gate: all CI checks passing"
|
|
2554
|
-
fi
|
|
2555
|
-
|
|
2556
|
-
local pre_deploy_min_cov
|
|
2557
|
-
pre_deploy_min_cov=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_min_coverage) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2558
|
-
if [[ -n "${pre_deploy_min_cov:-}" && "${pre_deploy_min_cov}" != "null" && -f "$ARTIFACTS_DIR/test-coverage.json" ]]; then
|
|
2559
|
-
local actual_cov
|
|
2560
|
-
actual_cov=$(jq -r '.coverage_pct // 0' "$ARTIFACTS_DIR/test-coverage.json" 2>/dev/null || echo "0")
|
|
2561
|
-
if [[ "${actual_cov:-0}" -lt "$pre_deploy_min_cov" ]]; then
|
|
2562
|
-
error "Pre-deploy gate FAILED: coverage ${actual_cov}% < required ${pre_deploy_min_cov}%"
|
|
2563
|
-
[[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: coverage ${actual_cov}% below minimum ${pre_deploy_min_cov}%" 2>/dev/null || true
|
|
2564
|
-
return 1
|
|
2565
|
-
fi
|
|
2566
|
-
success "Pre-deploy gate: coverage ${actual_cov}% >= ${pre_deploy_min_cov}%"
|
|
2567
|
-
fi
|
|
2568
|
-
|
|
2569
|
-
# Post deploy start to GitHub
|
|
2570
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
2571
|
-
gh_comment_issue "$ISSUE_NUMBER" "Deploy started"
|
|
2572
|
-
fi
|
|
2573
|
-
|
|
2574
|
-
# ── Deploy strategy ──
|
|
2575
|
-
local deploy_strategy
|
|
2576
|
-
deploy_strategy=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.deploy_strategy) // "direct"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2577
|
-
[[ "$deploy_strategy" == "null" ]] && deploy_strategy="direct"
|
|
2578
|
-
|
|
2579
|
-
local canary_cmd promote_cmd switch_cmd health_url deploy_log
|
|
2580
|
-
canary_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.canary_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2581
|
-
[[ "$canary_cmd" == "null" ]] && canary_cmd=""
|
|
2582
|
-
promote_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.promote_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2583
|
-
[[ "$promote_cmd" == "null" ]] && promote_cmd=""
|
|
2584
|
-
switch_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.switch_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2585
|
-
[[ "$switch_cmd" == "null" ]] && switch_cmd=""
|
|
2586
|
-
health_url=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2587
|
-
[[ "$health_url" == "null" ]] && health_url=""
|
|
2588
|
-
deploy_log="$ARTIFACTS_DIR/deploy.log"
|
|
2589
|
-
|
|
2590
|
-
case "$deploy_strategy" in
|
|
2591
|
-
canary)
|
|
2592
|
-
info "Canary deployment strategy..."
|
|
2593
|
-
if [[ -z "$canary_cmd" ]]; then
|
|
2594
|
-
warn "No canary_cmd configured — falling back to direct"
|
|
2595
|
-
deploy_strategy="direct"
|
|
2596
|
-
else
|
|
2597
|
-
info "Deploying canary..."
|
|
2598
|
-
bash -c "$canary_cmd" >> "$deploy_log" 2>&1 || { error "Canary deploy failed"; return 1; }
|
|
2599
|
-
|
|
2600
|
-
if [[ -n "$health_url" ]]; then
|
|
2601
|
-
local canary_healthy=0
|
|
2602
|
-
local _chk
|
|
2603
|
-
for _chk in 1 2 3; do
|
|
2604
|
-
sleep 10
|
|
2605
|
-
local _status
|
|
2606
|
-
_status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
|
|
2607
|
-
if [[ "$_status" -ge 200 && "$_status" -lt 400 ]]; then
|
|
2608
|
-
canary_healthy=$((canary_healthy + 1))
|
|
2609
|
-
fi
|
|
2610
|
-
done
|
|
2611
|
-
if [[ "$canary_healthy" -lt 2 ]]; then
|
|
2612
|
-
error "Canary health check failed ($canary_healthy/3 passed) — rolling back"
|
|
2613
|
-
[[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" 2>/dev/null || true
|
|
2614
|
-
return 1
|
|
2615
|
-
fi
|
|
2616
|
-
success "Canary healthy ($canary_healthy/3 checks passed)"
|
|
2617
|
-
fi
|
|
2618
|
-
|
|
2619
|
-
info "Promoting canary to full deployment..."
|
|
2620
|
-
if [[ -n "$promote_cmd" ]]; then
|
|
2621
|
-
bash -c "$promote_cmd" >> "$deploy_log" 2>&1 || { error "Promote failed"; return 1; }
|
|
2622
|
-
fi
|
|
2623
|
-
success "Canary promoted"
|
|
2624
|
-
fi
|
|
2625
|
-
;;
|
|
2626
|
-
blue-green)
|
|
2627
|
-
info "Blue-green deployment strategy..."
|
|
2628
|
-
if [[ -z "$staging_cmd" || -z "$switch_cmd" ]]; then
|
|
2629
|
-
warn "Blue-green requires staging_cmd + switch_cmd — falling back to direct"
|
|
2630
|
-
deploy_strategy="direct"
|
|
2631
|
-
else
|
|
2632
|
-
info "Deploying to inactive environment..."
|
|
2633
|
-
bash -c "$staging_cmd" >> "$deploy_log" 2>&1 || { error "Blue-green staging failed"; return 1; }
|
|
2634
|
-
|
|
2635
|
-
if [[ -n "$health_url" ]]; then
|
|
2636
|
-
local bg_healthy=0
|
|
2637
|
-
local _chk
|
|
2638
|
-
for _chk in 1 2 3; do
|
|
2639
|
-
sleep 5
|
|
2640
|
-
local _status
|
|
2641
|
-
_status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
|
|
2642
|
-
[[ "$_status" -ge 200 && "$_status" -lt 400 ]] && bg_healthy=$((bg_healthy + 1))
|
|
2643
|
-
done
|
|
2644
|
-
if [[ "$bg_healthy" -lt 2 ]]; then
|
|
2645
|
-
error "Blue-green health check failed — not switching"
|
|
2646
|
-
return 1
|
|
2647
|
-
fi
|
|
2648
|
-
fi
|
|
2649
|
-
|
|
2650
|
-
info "Switching traffic..."
|
|
2651
|
-
bash -c "$switch_cmd" >> "$deploy_log" 2>&1 || { error "Traffic switch failed"; return 1; }
|
|
2652
|
-
success "Blue-green switch complete"
|
|
2653
|
-
fi
|
|
2654
|
-
;;
|
|
2655
|
-
esac
|
|
2656
|
-
|
|
2657
|
-
# ── Direct deployment (default or fallback) ──
|
|
2658
|
-
if [[ "$deploy_strategy" == "direct" ]]; then
|
|
2659
|
-
if [[ -n "$staging_cmd" ]]; then
|
|
2660
|
-
info "Deploying to staging..."
|
|
2661
|
-
bash -c "$staging_cmd" > "$ARTIFACTS_DIR/deploy-staging.log" 2>&1 || {
|
|
2662
|
-
error "Staging deploy failed"
|
|
2663
|
-
[[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Staging deploy failed"
|
|
2664
|
-
# Mark GitHub deployment as failed
|
|
2665
|
-
if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
|
|
2666
|
-
gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Staging deploy failed" 2>/dev/null || true
|
|
2667
|
-
fi
|
|
2668
|
-
return 1
|
|
2669
|
-
}
|
|
2670
|
-
success "Staging deploy complete"
|
|
2671
|
-
fi
|
|
2672
|
-
|
|
2673
|
-
if [[ -n "$prod_cmd" ]]; then
|
|
2674
|
-
info "Deploying to production..."
|
|
2675
|
-
bash -c "$prod_cmd" > "$ARTIFACTS_DIR/deploy-prod.log" 2>&1 || {
|
|
2676
|
-
error "Production deploy failed"
|
|
2677
|
-
if [[ -n "$rollback_cmd" ]]; then
|
|
2678
|
-
warn "Rolling back..."
|
|
2679
|
-
bash -c "$rollback_cmd" 2>&1 || error "Rollback also failed!"
|
|
2680
|
-
fi
|
|
2681
|
-
[[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Production deploy failed — rollback ${rollback_cmd:+attempted}"
|
|
2682
|
-
# Mark GitHub deployment as failed
|
|
2683
|
-
if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
|
|
2684
|
-
gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Production deploy failed" 2>/dev/null || true
|
|
2685
|
-
fi
|
|
2686
|
-
return 1
|
|
2687
|
-
}
|
|
2688
|
-
success "Production deploy complete"
|
|
2689
|
-
fi
|
|
2690
|
-
fi
|
|
2691
|
-
|
|
2692
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
2693
|
-
gh_comment_issue "$ISSUE_NUMBER" "✅ **Deploy complete**"
|
|
2694
|
-
gh_add_labels "$ISSUE_NUMBER" "deployed"
|
|
2695
|
-
fi
|
|
2696
|
-
|
|
2697
|
-
# Mark GitHub deployment as successful
|
|
2698
|
-
if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete >/dev/null 2>&1; then
|
|
2699
|
-
if [[ -n "$REPO_OWNER" && -n "$REPO_NAME" ]]; then
|
|
2700
|
-
gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" true "" 2>/dev/null || true
|
|
2701
|
-
fi
|
|
2702
|
-
fi
|
|
2703
|
-
|
|
2704
|
-
log_stage "deploy" "Deploy complete"
|
|
2705
|
-
}
|
|
2706
|
-
|
|
2707
|
-
stage_validate() {
|
|
2708
|
-
CURRENT_STAGE_ID="validate"
|
|
2709
|
-
local smoke_cmd
|
|
2710
|
-
smoke_cmd=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.smoke_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2711
|
-
[[ "$smoke_cmd" == "null" ]] && smoke_cmd=""
|
|
2712
|
-
|
|
2713
|
-
local health_url
|
|
2714
|
-
health_url=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2715
|
-
[[ "$health_url" == "null" ]] && health_url=""
|
|
2716
|
-
|
|
2717
|
-
local close_issue
|
|
2718
|
-
close_issue=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.close_issue) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2719
|
-
|
|
2720
|
-
# Smoke tests
|
|
2721
|
-
if [[ -n "$smoke_cmd" ]]; then
|
|
2722
|
-
info "Running smoke tests..."
|
|
2723
|
-
bash -c "$smoke_cmd" > "$ARTIFACTS_DIR/smoke.log" 2>&1 || {
|
|
2724
|
-
error "Smoke tests failed"
|
|
2725
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
2726
|
-
gh issue create --title "Deploy validation failed: $GOAL" \
|
|
2727
|
-
--label "incident" --body "Pipeline smoke tests failed after deploy.
|
|
2728
|
-
|
|
2729
|
-
Related issue: ${GITHUB_ISSUE}
|
|
2730
|
-
Branch: ${GIT_BRANCH}
|
|
2731
|
-
PR: $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'unknown')" 2>/dev/null || true
|
|
2732
|
-
fi
|
|
2733
|
-
return 1
|
|
2734
|
-
}
|
|
2735
|
-
success "Smoke tests passed"
|
|
2736
|
-
fi
|
|
2737
|
-
|
|
2738
|
-
# Health check with retry
|
|
2739
|
-
if [[ -n "$health_url" ]]; then
|
|
2740
|
-
info "Health check: $health_url"
|
|
2741
|
-
local attempts=0
|
|
2742
|
-
while [[ $attempts -lt 5 ]]; do
|
|
2743
|
-
if curl -sf "$health_url" >/dev/null 2>&1; then
|
|
2744
|
-
success "Health check passed"
|
|
2745
|
-
break
|
|
2746
|
-
fi
|
|
2747
|
-
attempts=$((attempts + 1))
|
|
2748
|
-
[[ $attempts -lt 5 ]] && { info "Retry ${attempts}/5..."; sleep 10; }
|
|
2749
|
-
done
|
|
2750
|
-
if [[ $attempts -ge 5 ]]; then
|
|
2751
|
-
error "Health check failed after 5 attempts"
|
|
2752
|
-
return 1
|
|
2753
|
-
fi
|
|
2754
|
-
fi
|
|
2755
|
-
|
|
2756
|
-
# Compute total duration once for both issue close and wiki report
|
|
2757
|
-
local total_dur=""
|
|
2758
|
-
if [[ -n "$PIPELINE_START_EPOCH" ]]; then
|
|
2759
|
-
total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
|
|
2760
|
-
fi
|
|
2761
|
-
|
|
2762
|
-
# Close original issue with comprehensive summary
|
|
2763
|
-
if [[ "$close_issue" == "true" && -n "$ISSUE_NUMBER" ]]; then
|
|
2764
|
-
gh issue close "$ISSUE_NUMBER" --comment "## ✅ Complete — Deployed & Validated
|
|
2765
|
-
|
|
2766
|
-
| Metric | Value |
|
|
2767
|
-
|--------|-------|
|
|
2768
|
-
| Pipeline | \`${PIPELINE_NAME}\` |
|
|
2769
|
-
| Branch | \`${GIT_BRANCH}\` |
|
|
2770
|
-
| PR | $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'N/A') |
|
|
2771
|
-
| Duration | ${total_dur:-unknown} |
|
|
2772
|
-
|
|
2773
|
-
_Closed automatically by \`shipwright pipeline\`_" 2>/dev/null || true
|
|
2774
|
-
|
|
2775
|
-
gh_remove_label "$ISSUE_NUMBER" "pipeline/pr-created"
|
|
2776
|
-
gh_add_labels "$ISSUE_NUMBER" "pipeline/complete"
|
|
2777
|
-
success "Issue #$ISSUE_NUMBER closed"
|
|
2778
|
-
fi
|
|
2779
|
-
|
|
2780
|
-
# Push pipeline report to wiki
|
|
2781
|
-
local report="# Pipeline Report — ${GOAL}
|
|
2782
|
-
|
|
2783
|
-
| Metric | Value |
|
|
2784
|
-
|--------|-------|
|
|
2785
|
-
| Pipeline | \`${PIPELINE_NAME}\` |
|
|
2786
|
-
| Branch | \`${GIT_BRANCH}\` |
|
|
2787
|
-
| PR | $(cat "$ARTIFACTS_DIR/pr-url.txt" 2>/dev/null || echo 'N/A') |
|
|
2788
|
-
| Duration | ${total_dur:-unknown} |
|
|
2789
|
-
| Stages | $(echo "$STAGE_TIMINGS" | tr '|' '\n' | wc -l | xargs) completed |
|
|
2790
|
-
|
|
2791
|
-
## Stage Timings
|
|
2792
|
-
$(echo "$STAGE_TIMINGS" | tr '|' '\n' | sed 's/^/- /')
|
|
2793
|
-
|
|
2794
|
-
## Artifacts
|
|
2795
|
-
$(ls -1 "$ARTIFACTS_DIR" 2>/dev/null | sed 's/^/- /')
|
|
2796
|
-
|
|
2797
|
-
---
|
|
2798
|
-
_Generated by \`shipwright pipeline\` at $(now_iso)_"
|
|
2799
|
-
gh_wiki_page "Pipeline-Report-${ISSUE_NUMBER:-inline}" "$report"
|
|
2800
|
-
|
|
2801
|
-
log_stage "validate" "Validation complete"
|
|
2802
|
-
}
|
|
2803
|
-
|
|
2804
|
-
stage_monitor() {
|
|
2805
|
-
CURRENT_STAGE_ID="monitor"
|
|
2806
|
-
|
|
2807
|
-
# Read config from pipeline template
|
|
2808
|
-
local duration_minutes health_url error_threshold log_pattern log_cmd rollback_cmd auto_rollback
|
|
2809
|
-
duration_minutes=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.duration_minutes) // 5' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2810
|
-
[[ -z "$duration_minutes" || "$duration_minutes" == "null" ]] && duration_minutes=5
|
|
2811
|
-
health_url=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2812
|
-
[[ "$health_url" == "null" ]] && health_url=""
|
|
2813
|
-
error_threshold=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.error_threshold) // 5' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2814
|
-
[[ -z "$error_threshold" || "$error_threshold" == "null" ]] && error_threshold=5
|
|
2815
|
-
|
|
2816
|
-
# Adaptive monitor: use historical baselines if available
|
|
2817
|
-
local repo_hash
|
|
2818
|
-
repo_hash=$(echo "${PROJECT_ROOT:-$(pwd)}" | cksum | awk '{print $1}')
|
|
2819
|
-
local baseline_file="${HOME}/.shipwright/baselines/${repo_hash}/deploy-monitor.json"
|
|
2820
|
-
if [[ -f "$baseline_file" ]]; then
|
|
2821
|
-
local hist_duration hist_threshold
|
|
2822
|
-
hist_duration=$(jq -r '.p90_stabilization_minutes // empty' "$baseline_file" 2>/dev/null || true)
|
|
2823
|
-
hist_threshold=$(jq -r '.p90_error_threshold // empty' "$baseline_file" 2>/dev/null || true)
|
|
2824
|
-
if [[ -n "$hist_duration" && "$hist_duration" != "null" ]]; then
|
|
2825
|
-
duration_minutes="$hist_duration"
|
|
2826
|
-
info "Monitor duration: ${duration_minutes}m ${DIM}(from baseline)${RESET}"
|
|
2827
|
-
fi
|
|
2828
|
-
if [[ -n "$hist_threshold" && "$hist_threshold" != "null" ]]; then
|
|
2829
|
-
error_threshold="$hist_threshold"
|
|
2830
|
-
info "Error threshold: ${error_threshold} ${DIM}(from baseline)${RESET}"
|
|
2831
|
-
fi
|
|
2832
|
-
fi
|
|
2833
|
-
log_pattern=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.log_pattern) // "ERROR|FATAL|PANIC"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2834
|
-
[[ -z "$log_pattern" || "$log_pattern" == "null" ]] && log_pattern="ERROR|FATAL|PANIC"
|
|
2835
|
-
log_cmd=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.log_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2836
|
-
[[ "$log_cmd" == "null" ]] && log_cmd=""
|
|
2837
|
-
rollback_cmd=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.rollback_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2838
|
-
[[ "$rollback_cmd" == "null" ]] && rollback_cmd=""
|
|
2839
|
-
auto_rollback=$(jq -r --arg id "monitor" '(.stages[] | select(.id == $id) | .config.auto_rollback) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2840
|
-
[[ -z "$auto_rollback" || "$auto_rollback" == "null" ]] && auto_rollback="false"
|
|
2841
|
-
|
|
2842
|
-
if [[ -z "$health_url" && -z "$log_cmd" ]]; then
|
|
2843
|
-
warn "No health_url or log_cmd configured — skipping monitor stage"
|
|
2844
|
-
log_stage "monitor" "Skipped (no monitoring configured)"
|
|
2845
|
-
return 0
|
|
2846
|
-
fi
|
|
2847
|
-
|
|
2848
|
-
local report_file="$ARTIFACTS_DIR/monitor-report.md"
|
|
2849
|
-
local deploy_log_file="$ARTIFACTS_DIR/deploy-logs.txt"
|
|
2850
|
-
: > "$deploy_log_file"
|
|
2851
|
-
local total_errors=0
|
|
2852
|
-
local poll_interval=30 # seconds between polls
|
|
2853
|
-
local total_polls=$(( (duration_minutes * 60) / poll_interval ))
|
|
2854
|
-
[[ "$total_polls" -lt 1 ]] && total_polls=1
|
|
2855
|
-
|
|
2856
|
-
info "Post-deploy monitoring: ${duration_minutes}m (${total_polls} polls, threshold: ${error_threshold} errors)"
|
|
2857
|
-
|
|
2858
|
-
emit_event "monitor.started" \
|
|
2859
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
2860
|
-
"duration_minutes=$duration_minutes" \
|
|
2861
|
-
"error_threshold=$error_threshold"
|
|
2862
|
-
|
|
2863
|
-
{
|
|
2864
|
-
echo "# Post-Deploy Monitor Report"
|
|
2865
|
-
echo ""
|
|
2866
|
-
echo "- Duration: ${duration_minutes} minutes"
|
|
2867
|
-
echo "- Health URL: ${health_url:-none}"
|
|
2868
|
-
echo "- Log command: ${log_cmd:-none}"
|
|
2869
|
-
echo "- Error threshold: ${error_threshold}"
|
|
2870
|
-
echo "- Auto-rollback: ${auto_rollback}"
|
|
2871
|
-
echo ""
|
|
2872
|
-
echo "## Poll Results"
|
|
2873
|
-
echo ""
|
|
2874
|
-
} > "$report_file"
|
|
2875
|
-
|
|
2876
|
-
local poll=0
|
|
2877
|
-
local health_failures=0
|
|
2878
|
-
local log_errors=0
|
|
2879
|
-
while [[ "$poll" -lt "$total_polls" ]]; do
|
|
2880
|
-
poll=$((poll + 1))
|
|
2881
|
-
local poll_time
|
|
2882
|
-
poll_time=$(now_iso)
|
|
2883
|
-
|
|
2884
|
-
# Health URL check
|
|
2885
|
-
if [[ -n "$health_url" ]]; then
|
|
2886
|
-
local http_status
|
|
2887
|
-
http_status=$(curl -sf -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "000")
|
|
2888
|
-
if [[ "$http_status" -ge 200 && "$http_status" -lt 400 ]]; then
|
|
2889
|
-
echo "- [${poll_time}] Health: ✅ (HTTP ${http_status})" >> "$report_file"
|
|
2890
|
-
else
|
|
2891
|
-
health_failures=$((health_failures + 1))
|
|
2892
|
-
total_errors=$((total_errors + 1))
|
|
2893
|
-
echo "- [${poll_time}] Health: ❌ (HTTP ${http_status})" >> "$report_file"
|
|
2894
|
-
warn "Health check failed: HTTP ${http_status}"
|
|
2895
|
-
fi
|
|
2896
|
-
fi
|
|
2897
|
-
|
|
2898
|
-
# Log command check (accumulate deploy logs for feedback collect)
|
|
2899
|
-
if [[ -n "$log_cmd" ]]; then
|
|
2900
|
-
local log_output
|
|
2901
|
-
log_output=$(bash -c "$log_cmd" 2>/dev/null || true)
|
|
2902
|
-
[[ -n "$log_output" ]] && echo "$log_output" >> "$deploy_log_file"
|
|
2903
|
-
local error_count=0
|
|
2904
|
-
if [[ -n "$log_output" ]]; then
|
|
2905
|
-
error_count=$(echo "$log_output" | grep -cE "$log_pattern" 2>/dev/null || true)
|
|
2906
|
-
error_count="${error_count:-0}"
|
|
2907
|
-
fi
|
|
2908
|
-
if [[ "$error_count" -gt 0 ]]; then
|
|
2909
|
-
log_errors=$((log_errors + error_count))
|
|
2910
|
-
total_errors=$((total_errors + error_count))
|
|
2911
|
-
echo "- [${poll_time}] Logs: ⚠️ ${error_count} error(s) matching '${log_pattern}'" >> "$report_file"
|
|
2912
|
-
warn "Log errors detected: ${error_count}"
|
|
2913
|
-
else
|
|
2914
|
-
echo "- [${poll_time}] Logs: ✅ clean" >> "$report_file"
|
|
2915
|
-
fi
|
|
2916
|
-
fi
|
|
2917
|
-
|
|
2918
|
-
emit_event "monitor.check" \
|
|
2919
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
2920
|
-
"poll=$poll" \
|
|
2921
|
-
"total_errors=$total_errors" \
|
|
2922
|
-
"health_failures=$health_failures"
|
|
2923
|
-
|
|
2924
|
-
# Check threshold
|
|
2925
|
-
if [[ "$total_errors" -ge "$error_threshold" ]]; then
|
|
2926
|
-
error "Error threshold exceeded: ${total_errors} >= ${error_threshold}"
|
|
2927
|
-
|
|
2928
|
-
echo "" >> "$report_file"
|
|
2929
|
-
echo "## ❌ THRESHOLD EXCEEDED" >> "$report_file"
|
|
2930
|
-
echo "Total errors: ${total_errors} (threshold: ${error_threshold})" >> "$report_file"
|
|
2931
|
-
|
|
2932
|
-
emit_event "monitor.alert" \
|
|
2933
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
2934
|
-
"total_errors=$total_errors" \
|
|
2935
|
-
"threshold=$error_threshold"
|
|
2936
|
-
|
|
2937
|
-
# Feedback loop: collect deploy logs and optionally create issue
|
|
2938
|
-
if [[ -f "$deploy_log_file" ]] && [[ -s "$deploy_log_file" ]] && [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
|
|
2939
|
-
(cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" collect "$deploy_log_file" 2>/dev/null) || true
|
|
2940
|
-
(cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" create-issue 2>/dev/null) || true
|
|
2941
|
-
fi
|
|
2942
|
-
|
|
2943
|
-
# Auto-rollback: feedback rollback (GitHub Deployments API) and/or config rollback_cmd
|
|
2944
|
-
if [[ "$auto_rollback" == "true" ]]; then
|
|
2945
|
-
warn "Auto-rolling back..."
|
|
2946
|
-
echo "" >> "$report_file"
|
|
2947
|
-
echo "## Rollback" >> "$report_file"
|
|
2948
|
-
|
|
2949
|
-
# Trigger feedback rollback (calls sw-github-deploy.sh rollback)
|
|
2950
|
-
if [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
|
|
2951
|
-
(cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" rollback production "Monitor threshold exceeded (${total_errors} errors)" >> "$report_file" 2>&1) || true
|
|
2952
|
-
fi
|
|
2953
|
-
|
|
2954
|
-
if [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" >> "$report_file" 2>&1; then
|
|
2955
|
-
success "Rollback executed"
|
|
2956
|
-
echo "Rollback: ✅ success" >> "$report_file"
|
|
2957
|
-
|
|
2958
|
-
# Post-rollback smoke test verification
|
|
2959
|
-
local smoke_cmd
|
|
2960
|
-
smoke_cmd=$(jq -r --arg id "validate" '(.stages[] | select(.id == $id) | .config.smoke_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
2961
|
-
[[ "$smoke_cmd" == "null" ]] && smoke_cmd=""
|
|
2962
|
-
|
|
2963
|
-
if [[ -n "$smoke_cmd" ]]; then
|
|
2964
|
-
info "Verifying rollback with smoke tests..."
|
|
2965
|
-
if bash -c "$smoke_cmd" > "$ARTIFACTS_DIR/rollback-smoke.log" 2>&1; then
|
|
2966
|
-
success "Rollback verified — smoke tests pass"
|
|
2967
|
-
echo "Rollback verification: ✅ smoke tests pass" >> "$report_file"
|
|
2968
|
-
emit_event "monitor.rollback_verified" \
|
|
2969
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
2970
|
-
"status=pass"
|
|
2971
|
-
else
|
|
2972
|
-
error "Rollback verification FAILED — smoke tests still failing"
|
|
2973
|
-
echo "Rollback verification: ❌ smoke tests FAILED — manual intervention required" >> "$report_file"
|
|
2974
|
-
emit_event "monitor.rollback_verified" \
|
|
2975
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
2976
|
-
"status=fail"
|
|
2977
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
2978
|
-
gh_comment_issue "$ISSUE_NUMBER" "🚨 **Rollback executed but verification failed** — smoke tests still failing after rollback. Manual intervention required.
|
|
2979
|
-
|
|
2980
|
-
Smoke command: \`${smoke_cmd}\`
|
|
2981
|
-
Log: see \`pipeline-artifacts/rollback-smoke.log\`" 2>/dev/null || true
|
|
2982
|
-
fi
|
|
2983
|
-
fi
|
|
2984
|
-
fi
|
|
2985
|
-
else
|
|
2986
|
-
error "Rollback failed!"
|
|
2987
|
-
echo "Rollback: ❌ failed" >> "$report_file"
|
|
2988
|
-
fi
|
|
2989
|
-
|
|
2990
|
-
emit_event "monitor.rollback" \
|
|
2991
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
2992
|
-
"total_errors=$total_errors"
|
|
2993
|
-
|
|
2994
|
-
# Post to GitHub
|
|
2995
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
2996
|
-
gh_comment_issue "$ISSUE_NUMBER" "🚨 **Auto-rollback triggered** — ${total_errors} errors exceeded threshold (${error_threshold})
|
|
2997
|
-
|
|
2998
|
-
Rollback command: \`${rollback_cmd}\`" 2>/dev/null || true
|
|
2999
|
-
|
|
3000
|
-
# Create hotfix issue
|
|
3001
|
-
if [[ "$GH_AVAILABLE" == "true" ]]; then
|
|
3002
|
-
gh issue create \
|
|
3003
|
-
--title "Hotfix: Deploy regression for ${GOAL}" \
|
|
3004
|
-
--label "hotfix,incident" \
|
|
3005
|
-
--body "Auto-rollback triggered during post-deploy monitoring.
|
|
3006
|
-
|
|
3007
|
-
**Original issue:** ${GITHUB_ISSUE:-N/A}
|
|
3008
|
-
**Errors detected:** ${total_errors}
|
|
3009
|
-
**Threshold:** ${error_threshold}
|
|
3010
|
-
**Branch:** ${GIT_BRANCH}
|
|
3011
|
-
|
|
3012
|
-
## Monitor Report
|
|
3013
|
-
$(cat "$report_file")
|
|
3014
|
-
|
|
3015
|
-
---
|
|
3016
|
-
_Created automatically by \`shipwright pipeline\` monitor stage_" 2>/dev/null || true
|
|
3017
|
-
fi
|
|
3018
|
-
fi
|
|
3019
|
-
fi
|
|
3020
|
-
|
|
3021
|
-
log_stage "monitor" "Failed — ${total_errors} errors (threshold: ${error_threshold})"
|
|
3022
|
-
return 1
|
|
3023
|
-
fi
|
|
3024
|
-
|
|
3025
|
-
# Sleep between polls (skip on last poll)
|
|
3026
|
-
if [[ "$poll" -lt "$total_polls" ]]; then
|
|
3027
|
-
sleep "$poll_interval"
|
|
3028
|
-
fi
|
|
3029
|
-
done
|
|
3030
|
-
|
|
3031
|
-
# Monitoring complete — all clear
|
|
3032
|
-
echo "" >> "$report_file"
|
|
3033
|
-
echo "## ✅ Monitoring Complete" >> "$report_file"
|
|
3034
|
-
echo "Total errors: ${total_errors} (threshold: ${error_threshold})" >> "$report_file"
|
|
3035
|
-
echo "Health failures: ${health_failures}" >> "$report_file"
|
|
3036
|
-
echo "Log errors: ${log_errors}" >> "$report_file"
|
|
3037
|
-
|
|
3038
|
-
success "Post-deploy monitoring clean (${total_errors} errors in ${duration_minutes}m)"
|
|
3039
|
-
|
|
3040
|
-
# Proactive feedback collection: always collect deploy logs for trend analysis
|
|
3041
|
-
if [[ -f "$deploy_log_file" ]] && [[ -s "$deploy_log_file" ]] && [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
|
|
3042
|
-
(cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" collect "$deploy_log_file" 2>/dev/null) || true
|
|
3043
|
-
fi
|
|
3044
|
-
|
|
3045
|
-
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
3046
|
-
gh_comment_issue "$ISSUE_NUMBER" "✅ **Post-deploy monitoring passed** — ${duration_minutes}m, ${total_errors} errors" 2>/dev/null || true
|
|
3047
|
-
fi
|
|
3048
|
-
|
|
3049
|
-
log_stage "monitor" "Clean — ${total_errors} errors in ${duration_minutes}m"
|
|
3050
|
-
|
|
3051
|
-
# Record baseline for adaptive monitoring on future runs
|
|
3052
|
-
local baseline_dir="${HOME}/.shipwright/baselines/${repo_hash}"
|
|
3053
|
-
mkdir -p "$baseline_dir" 2>/dev/null || true
|
|
3054
|
-
local baseline_tmp
|
|
3055
|
-
baseline_tmp="$(mktemp)"
|
|
3056
|
-
if [[ -f "${baseline_dir}/deploy-monitor.json" ]]; then
|
|
3057
|
-
# Append to history and recalculate p90
|
|
3058
|
-
jq --arg dur "$duration_minutes" --arg errs "$total_errors" \
|
|
3059
|
-
'.history += [{"duration_minutes": ($dur | tonumber), "errors": ($errs | tonumber)}] |
|
|
3060
|
-
.p90_stabilization_minutes = ([.history[].duration_minutes] | sort | .[length * 9 / 10 | floor]) |
|
|
3061
|
-
.p90_error_threshold = (([.history[].errors] | sort | .[length * 9 / 10 | floor]) + 2) |
|
|
3062
|
-
.updated_at = now' \
|
|
3063
|
-
"${baseline_dir}/deploy-monitor.json" > "$baseline_tmp" 2>/dev/null && \
|
|
3064
|
-
mv "$baseline_tmp" "${baseline_dir}/deploy-monitor.json" || rm -f "$baseline_tmp"
|
|
3065
|
-
else
|
|
3066
|
-
jq -n --arg dur "$duration_minutes" --arg errs "$total_errors" \
|
|
3067
|
-
'{history: [{"duration_minutes": ($dur | tonumber), "errors": ($errs | tonumber)}],
|
|
3068
|
-
p90_stabilization_minutes: ($dur | tonumber),
|
|
3069
|
-
p90_error_threshold: (($errs | tonumber) + 2),
|
|
3070
|
-
updated_at: now}' \
|
|
3071
|
-
> "$baseline_tmp" 2>/dev/null && \
|
|
3072
|
-
mv "$baseline_tmp" "${baseline_dir}/deploy-monitor.json" || rm -f "$baseline_tmp"
|
|
3073
|
-
fi
|
|
3074
|
-
}
|
|
201
|
+
_PIPELINE_STAGES_REVIEW_SH="${SCRIPT_DIR}/lib/pipeline-stages-review.sh"
|
|
202
|
+
[[ -f "$_PIPELINE_STAGES_REVIEW_SH" ]] && source "$_PIPELINE_STAGES_REVIEW_SH"
|
|
3075
203
|
|
|
3076
|
-
|
|
3077
|
-
|
|
204
|
+
_PIPELINE_STAGES_DELIVERY_SH="${SCRIPT_DIR}/lib/pipeline-stages-delivery.sh"
|
|
205
|
+
[[ -f "$_PIPELINE_STAGES_DELIVERY_SH" ]] && source "$_PIPELINE_STAGES_DELIVERY_SH"
|
|
3078
206
|
|
|
207
|
+
_PIPELINE_STAGES_MONITOR_SH="${SCRIPT_DIR}/lib/pipeline-stages-monitor.sh"
|
|
208
|
+
[[ -f "$_PIPELINE_STAGES_MONITOR_SH" ]] && source "$_PIPELINE_STAGES_MONITOR_SH"
|