shipwright-cli 3.2.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/code-reviewer.md +2 -0
- package/.claude/agents/devops-engineer.md +2 -0
- package/.claude/agents/doc-fleet-agent.md +2 -0
- package/.claude/agents/pipeline-agent.md +2 -0
- package/.claude/agents/shell-script-specialist.md +2 -0
- package/.claude/agents/test-specialist.md +2 -0
- package/.claude/hooks/agent-crash-capture.sh +32 -0
- package/.claude/hooks/post-tool-use.sh +3 -2
- package/.claude/hooks/pre-tool-use.sh +35 -3
- package/README.md +4 -4
- package/claude-code/hooks/config-change.sh +18 -0
- package/claude-code/hooks/instructions-reloaded.sh +7 -0
- package/claude-code/hooks/worktree-create.sh +25 -0
- package/claude-code/hooks/worktree-remove.sh +20 -0
- package/config/code-constitution.json +130 -0
- package/dashboard/middleware/auth.ts +134 -0
- package/dashboard/middleware/constants.ts +21 -0
- package/dashboard/public/index.html +2 -6
- package/dashboard/public/styles.css +100 -97
- package/dashboard/routes/auth.ts +38 -0
- package/dashboard/server.ts +66 -25
- package/dashboard/services/config.ts +26 -0
- package/dashboard/services/db.ts +118 -0
- package/dashboard/src/canvas/pixel-agent.ts +298 -0
- package/dashboard/src/canvas/pixel-sprites.ts +440 -0
- package/dashboard/src/canvas/shipyard-effects.ts +367 -0
- package/dashboard/src/canvas/shipyard-scene.ts +616 -0
- package/dashboard/src/canvas/submarine-layout.ts +267 -0
- package/dashboard/src/components/header.ts +8 -7
- package/dashboard/src/core/router.ts +1 -0
- package/dashboard/src/design/submarine-theme.ts +253 -0
- package/dashboard/src/main.ts +2 -0
- package/dashboard/src/types/api.ts +2 -1
- package/dashboard/src/views/activity.ts +2 -1
- package/dashboard/src/views/shipyard.ts +39 -0
- package/dashboard/types/index.ts +166 -0
- package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
- package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
- package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
- package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
- package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
- package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
- package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
- package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
- package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
- package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
- package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
- package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
- package/docs/research/RESEARCH_INDEX.md +439 -0
- package/docs/research/RESEARCH_SOURCES.md +440 -0
- package/docs/research/RESEARCH_SUMMARY.txt +275 -0
- package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
- package/package.json +2 -2
- package/scripts/lib/adaptive-model.sh +427 -0
- package/scripts/lib/adaptive-timeout.sh +316 -0
- package/scripts/lib/audit-trail.sh +309 -0
- package/scripts/lib/auto-recovery.sh +471 -0
- package/scripts/lib/bandit-selector.sh +431 -0
- package/scripts/lib/bootstrap.sh +104 -2
- package/scripts/lib/causal-graph.sh +455 -0
- package/scripts/lib/compat.sh +126 -0
- package/scripts/lib/compound-audit.sh +337 -0
- package/scripts/lib/constitutional.sh +454 -0
- package/scripts/lib/context-budget.sh +359 -0
- package/scripts/lib/convergence.sh +594 -0
- package/scripts/lib/cost-optimizer.sh +634 -0
- package/scripts/lib/daemon-adaptive.sh +10 -0
- package/scripts/lib/daemon-dispatch.sh +106 -17
- package/scripts/lib/daemon-failure.sh +34 -4
- package/scripts/lib/daemon-patrol.sh +23 -2
- package/scripts/lib/daemon-poll-github.sh +361 -0
- package/scripts/lib/daemon-poll-health.sh +299 -0
- package/scripts/lib/daemon-poll.sh +27 -611
- package/scripts/lib/daemon-state.sh +112 -66
- package/scripts/lib/daemon-triage.sh +10 -0
- package/scripts/lib/dod-scorecard.sh +442 -0
- package/scripts/lib/error-actionability.sh +300 -0
- package/scripts/lib/formal-spec.sh +461 -0
- package/scripts/lib/helpers.sh +177 -4
- package/scripts/lib/intent-analysis.sh +409 -0
- package/scripts/lib/loop-convergence.sh +350 -0
- package/scripts/lib/loop-iteration.sh +682 -0
- package/scripts/lib/loop-progress.sh +48 -0
- package/scripts/lib/loop-restart.sh +185 -0
- package/scripts/lib/memory-effectiveness.sh +506 -0
- package/scripts/lib/mutation-executor.sh +352 -0
- package/scripts/lib/outcome-feedback.sh +521 -0
- package/scripts/lib/pipeline-cli.sh +336 -0
- package/scripts/lib/pipeline-commands.sh +1216 -0
- package/scripts/lib/pipeline-detection.sh +100 -2
- package/scripts/lib/pipeline-execution.sh +897 -0
- package/scripts/lib/pipeline-github.sh +28 -3
- package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
- package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
- package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
- package/scripts/lib/pipeline-intelligence.sh +100 -1136
- package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
- package/scripts/lib/pipeline-quality-checks.sh +17 -715
- package/scripts/lib/pipeline-quality-gates.sh +563 -0
- package/scripts/lib/pipeline-stages-build.sh +730 -0
- package/scripts/lib/pipeline-stages-delivery.sh +965 -0
- package/scripts/lib/pipeline-stages-intake.sh +1133 -0
- package/scripts/lib/pipeline-stages-monitor.sh +407 -0
- package/scripts/lib/pipeline-stages-review.sh +1022 -0
- package/scripts/lib/pipeline-stages.sh +59 -2929
- package/scripts/lib/pipeline-state.sh +36 -5
- package/scripts/lib/pipeline-util.sh +487 -0
- package/scripts/lib/policy-learner.sh +438 -0
- package/scripts/lib/process-reward.sh +493 -0
- package/scripts/lib/project-detect.sh +649 -0
- package/scripts/lib/quality-profile.sh +334 -0
- package/scripts/lib/recruit-commands.sh +885 -0
- package/scripts/lib/recruit-learning.sh +739 -0
- package/scripts/lib/recruit-roles.sh +648 -0
- package/scripts/lib/reward-aggregator.sh +458 -0
- package/scripts/lib/rl-optimizer.sh +362 -0
- package/scripts/lib/root-cause.sh +427 -0
- package/scripts/lib/scope-enforcement.sh +445 -0
- package/scripts/lib/session-restart.sh +493 -0
- package/scripts/lib/skill-memory.sh +300 -0
- package/scripts/lib/skill-registry.sh +775 -0
- package/scripts/lib/spec-driven.sh +476 -0
- package/scripts/lib/test-helpers.sh +18 -7
- package/scripts/lib/test-holdout.sh +429 -0
- package/scripts/lib/test-optimizer.sh +511 -0
- package/scripts/shipwright-file-suggest.sh +45 -0
- package/scripts/skills/adversarial-quality.md +61 -0
- package/scripts/skills/api-design.md +44 -0
- package/scripts/skills/architecture-design.md +50 -0
- package/scripts/skills/brainstorming.md +43 -0
- package/scripts/skills/data-pipeline.md +44 -0
- package/scripts/skills/deploy-safety.md +64 -0
- package/scripts/skills/documentation.md +38 -0
- package/scripts/skills/frontend-design.md +45 -0
- package/scripts/skills/generated/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
- package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
- package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
- package/scripts/skills/generated/cli-version-management.md +29 -0
- package/scripts/skills/generated/collection-system-validation.md +99 -0
- package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
- package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
- package/scripts/skills/generated/test-parallelization-detection.md +65 -0
- package/scripts/skills/observability.md +79 -0
- package/scripts/skills/performance.md +48 -0
- package/scripts/skills/pr-quality.md +49 -0
- package/scripts/skills/product-thinking.md +43 -0
- package/scripts/skills/security-audit.md +49 -0
- package/scripts/skills/systematic-debugging.md +40 -0
- package/scripts/skills/testing-strategy.md +47 -0
- package/scripts/skills/two-stage-review.md +52 -0
- package/scripts/skills/validation-thoroughness.md +55 -0
- package/scripts/sw +9 -3
- package/scripts/sw-activity.sh +9 -2
- package/scripts/sw-adaptive.sh +2 -1
- package/scripts/sw-adversarial.sh +2 -1
- package/scripts/sw-architecture-enforcer.sh +3 -1
- package/scripts/sw-auth.sh +12 -2
- package/scripts/sw-autonomous.sh +5 -1
- package/scripts/sw-changelog.sh +4 -1
- package/scripts/sw-checkpoint.sh +2 -1
- package/scripts/sw-ci.sh +5 -1
- package/scripts/sw-cleanup.sh +4 -26
- package/scripts/sw-code-review.sh +10 -4
- package/scripts/sw-connect.sh +2 -1
- package/scripts/sw-context.sh +2 -1
- package/scripts/sw-cost.sh +48 -3
- package/scripts/sw-daemon.sh +66 -9
- package/scripts/sw-dashboard.sh +3 -1
- package/scripts/sw-db.sh +59 -16
- package/scripts/sw-decide.sh +8 -2
- package/scripts/sw-decompose.sh +360 -17
- package/scripts/sw-deps.sh +4 -1
- package/scripts/sw-developer-simulation.sh +4 -1
- package/scripts/sw-discovery.sh +325 -2
- package/scripts/sw-doc-fleet.sh +4 -1
- package/scripts/sw-docs-agent.sh +3 -1
- package/scripts/sw-docs.sh +2 -1
- package/scripts/sw-doctor.sh +453 -2
- package/scripts/sw-dora.sh +4 -1
- package/scripts/sw-durable.sh +4 -3
- package/scripts/sw-e2e-orchestrator.sh +17 -16
- package/scripts/sw-eventbus.sh +7 -1
- package/scripts/sw-evidence.sh +364 -12
- package/scripts/sw-feedback.sh +550 -9
- package/scripts/sw-fix.sh +20 -1
- package/scripts/sw-fleet-discover.sh +6 -2
- package/scripts/sw-fleet-viz.sh +4 -1
- package/scripts/sw-fleet.sh +5 -1
- package/scripts/sw-github-app.sh +16 -3
- package/scripts/sw-github-checks.sh +3 -2
- package/scripts/sw-github-deploy.sh +3 -2
- package/scripts/sw-github-graphql.sh +18 -7
- package/scripts/sw-guild.sh +5 -1
- package/scripts/sw-heartbeat.sh +5 -30
- package/scripts/sw-hello.sh +67 -0
- package/scripts/sw-hygiene.sh +6 -1
- package/scripts/sw-incident.sh +265 -1
- package/scripts/sw-init.sh +18 -2
- package/scripts/sw-instrument.sh +10 -2
- package/scripts/sw-intelligence.sh +42 -6
- package/scripts/sw-jira.sh +5 -1
- package/scripts/sw-launchd.sh +2 -1
- package/scripts/sw-linear.sh +4 -1
- package/scripts/sw-logs.sh +4 -1
- package/scripts/sw-loop.sh +432 -1128
- package/scripts/sw-memory.sh +356 -2
- package/scripts/sw-mission-control.sh +6 -1
- package/scripts/sw-model-router.sh +481 -26
- package/scripts/sw-otel.sh +13 -4
- package/scripts/sw-oversight.sh +14 -5
- package/scripts/sw-patrol-meta.sh +334 -0
- package/scripts/sw-pipeline-composer.sh +5 -1
- package/scripts/sw-pipeline-vitals.sh +2 -1
- package/scripts/sw-pipeline.sh +53 -2664
- package/scripts/sw-pm.sh +12 -5
- package/scripts/sw-pr-lifecycle.sh +2 -1
- package/scripts/sw-predictive.sh +7 -1
- package/scripts/sw-prep.sh +185 -2
- package/scripts/sw-ps.sh +5 -25
- package/scripts/sw-public-dashboard.sh +15 -3
- package/scripts/sw-quality.sh +2 -1
- package/scripts/sw-reaper.sh +8 -25
- package/scripts/sw-recruit.sh +156 -2303
- package/scripts/sw-regression.sh +19 -12
- package/scripts/sw-release-manager.sh +3 -1
- package/scripts/sw-release.sh +4 -1
- package/scripts/sw-remote.sh +3 -1
- package/scripts/sw-replay.sh +7 -1
- package/scripts/sw-retro.sh +158 -1
- package/scripts/sw-review-rerun.sh +3 -1
- package/scripts/sw-scale.sh +10 -3
- package/scripts/sw-security-audit.sh +6 -1
- package/scripts/sw-self-optimize.sh +6 -3
- package/scripts/sw-session.sh +9 -3
- package/scripts/sw-setup.sh +3 -1
- package/scripts/sw-stall-detector.sh +406 -0
- package/scripts/sw-standup.sh +15 -7
- package/scripts/sw-status.sh +3 -1
- package/scripts/sw-strategic.sh +4 -1
- package/scripts/sw-stream.sh +7 -1
- package/scripts/sw-swarm.sh +18 -6
- package/scripts/sw-team-stages.sh +13 -6
- package/scripts/sw-templates.sh +5 -29
- package/scripts/sw-testgen.sh +7 -1
- package/scripts/sw-tmux-pipeline.sh +4 -1
- package/scripts/sw-tmux-role-color.sh +2 -0
- package/scripts/sw-tmux-status.sh +1 -1
- package/scripts/sw-tmux.sh +3 -1
- package/scripts/sw-trace.sh +3 -1
- package/scripts/sw-tracker-github.sh +3 -0
- package/scripts/sw-tracker-jira.sh +3 -0
- package/scripts/sw-tracker-linear.sh +3 -0
- package/scripts/sw-tracker.sh +3 -1
- package/scripts/sw-triage.sh +2 -1
- package/scripts/sw-upgrade.sh +3 -1
- package/scripts/sw-ux.sh +5 -2
- package/scripts/sw-webhook.sh +3 -1
- package/scripts/sw-widgets.sh +3 -1
- package/scripts/sw-worktree.sh +15 -3
- package/scripts/test-skill-injection.sh +1233 -0
- package/templates/pipelines/autonomous.json +27 -3
- package/templates/pipelines/cost-aware.json +34 -8
- package/templates/pipelines/deployed.json +12 -0
- package/templates/pipelines/enterprise.json +12 -0
- package/templates/pipelines/fast.json +6 -0
- package/templates/pipelines/full.json +27 -3
- package/templates/pipelines/hotfix.json +6 -0
- package/templates/pipelines/standard.json +12 -0
- package/templates/pipelines/tdd.json +12 -0
|
@@ -0,0 +1,897 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# Module: pipeline-execution
|
|
3
|
+
# Execution orchestration: stage retry logic, self-healing build-test loop, main pipeline run
|
|
4
|
+
set -euo pipefail
|
|
5
|
+
|
|
6
|
+
# Module guard
|
|
7
|
+
[[ -n "${_MODULE_PIPELINE_EXECUTION_LOADED:-}" ]] && return 0; _MODULE_PIPELINE_EXECUTION_LOADED=1
|
|
8
|
+
|
|
9
|
+
# ─── Defaults (needed if sourced independently) ──────────────────────────────
|
|
10
|
+
SCRIPT_DIR="${SCRIPT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)}"
|
|
11
|
+
REPO_DIR="${REPO_DIR:-$(cd "$SCRIPT_DIR/.." && pwd)}"
|
|
12
|
+
PROJECT_ROOT="${PROJECT_ROOT:-$(git rev-parse --show-toplevel 2>/dev/null || pwd)}"
|
|
13
|
+
STATE_DIR="${STATE_DIR:-$PROJECT_ROOT/.claude}"
|
|
14
|
+
STATE_FILE="${STATE_FILE:-$STATE_DIR/pipeline-state.md}"
|
|
15
|
+
ARTIFACTS_DIR="${ARTIFACTS_DIR:-$STATE_DIR/pipeline-artifacts}"
|
|
16
|
+
|
|
17
|
+
# Variables referenced by execution functions (set by sw-pipeline.sh, defaults here for safety)
|
|
18
|
+
BUILD_TEST_RETRIES="${BUILD_TEST_RETRIES:-2}"
|
|
19
|
+
SELF_HEAL_COUNT="${SELF_HEAL_COUNT:-0}"
|
|
20
|
+
STASHED_CHANGES="${STASHED_CHANGES:-false}"
|
|
21
|
+
NOTIFICATION_ENABLED="${NOTIFICATION_ENABLED:-false}"
|
|
22
|
+
HEARTBEAT_PID="${HEARTBEAT_PID:-}"
|
|
23
|
+
|
|
24
|
+
# Ensure helpers are loaded
|
|
25
|
+
[[ -f "$SCRIPT_DIR/lib/helpers.sh" ]] && source "$SCRIPT_DIR/lib/helpers.sh" 2>/dev/null || true
|
|
26
|
+
[[ "$(type -t info 2>/dev/null)" == "function" ]] || info() { echo "$*"; }
|
|
27
|
+
[[ "$(type -t warn 2>/dev/null)" == "function" ]] || warn() { echo "$*"; }
|
|
28
|
+
[[ "$(type -t error 2>/dev/null)" == "function" ]] || error() { echo "$*" >&2; }
|
|
29
|
+
[[ "$(type -t emit_event 2>/dev/null)" == "function" ]] || emit_event() { true; }
|
|
30
|
+
|
|
31
|
+
# Ensure pipeline intelligence skip module is loaded (provides pipeline_should_skip_stage)
|
|
32
|
+
# SCRIPT_DIR may point to scripts/ or scripts/lib/ depending on how this module was sourced
|
|
33
|
+
if [[ -f "$SCRIPT_DIR/pipeline-intelligence-skip.sh" ]]; then
|
|
34
|
+
source "$SCRIPT_DIR/pipeline-intelligence-skip.sh" 2>/dev/null || true
|
|
35
|
+
elif [[ -f "$SCRIPT_DIR/lib/pipeline-intelligence-skip.sh" ]]; then
|
|
36
|
+
source "$SCRIPT_DIR/lib/pipeline-intelligence-skip.sh" 2>/dev/null || true
|
|
37
|
+
fi
|
|
38
|
+
|
|
39
|
+
# ─── Stage Execution with Retry Logic ──────────────────────────────
|
|
40
|
+
run_stage_with_retry() {
|
|
41
|
+
local stage_id="$1"
|
|
42
|
+
local max_retries
|
|
43
|
+
max_retries=$(jq -r --arg id "$stage_id" '(.stages[] | select(.id == $id) | .config.retries) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
44
|
+
[[ -z "$max_retries" || "$max_retries" == "null" ]] && max_retries=0
|
|
45
|
+
|
|
46
|
+
local attempt=0
|
|
47
|
+
local prev_error_class=""
|
|
48
|
+
while true; do
|
|
49
|
+
if "stage_${stage_id}"; then
|
|
50
|
+
return 0
|
|
51
|
+
fi
|
|
52
|
+
|
|
53
|
+
# Capture error_class and error snippet for stage.failed / pipeline.completed events
|
|
54
|
+
local error_class
|
|
55
|
+
error_class=$(classify_error "$stage_id")
|
|
56
|
+
LAST_STAGE_ERROR_CLASS="$error_class"
|
|
57
|
+
LAST_STAGE_ERROR=""
|
|
58
|
+
local _log_file="${ARTIFACTS_DIR}/${stage_id}-results.log"
|
|
59
|
+
[[ ! -f "$_log_file" ]] && _log_file="${ARTIFACTS_DIR}/test-results.log"
|
|
60
|
+
if [[ -f "$_log_file" ]]; then
|
|
61
|
+
LAST_STAGE_ERROR=$(tail -20 "$_log_file" 2>/dev/null | grep -iE 'error|fail|exception|fatal' 2>/dev/null | head -1 | cut -c1-200 || true)
|
|
62
|
+
fi
|
|
63
|
+
|
|
64
|
+
attempt=$((attempt + 1))
|
|
65
|
+
|
|
66
|
+
# Critical fix: if plan stage already has a valid artifact, skip retry
|
|
67
|
+
if [[ "$stage_id" == "plan" ]]; then
|
|
68
|
+
local plan_artifact="${ARTIFACTS_DIR}/plan.md"
|
|
69
|
+
if [[ -s "$plan_artifact" ]]; then
|
|
70
|
+
local existing_lines
|
|
71
|
+
existing_lines=$(wc -l < "$plan_artifact" 2>/dev/null | xargs)
|
|
72
|
+
existing_lines="${existing_lines:-0}"
|
|
73
|
+
if [[ "$existing_lines" -gt 10 ]]; then
|
|
74
|
+
info "Plan already exists (${existing_lines} lines) — skipping retry, advancing"
|
|
75
|
+
emit_event "retry.skipped_existing_artifact" \
|
|
76
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
77
|
+
"stage=$stage_id" \
|
|
78
|
+
"artifact_lines=$existing_lines"
|
|
79
|
+
return 0
|
|
80
|
+
fi
|
|
81
|
+
fi
|
|
82
|
+
fi
|
|
83
|
+
|
|
84
|
+
if [[ "$attempt" -gt "$max_retries" ]]; then
|
|
85
|
+
return 1
|
|
86
|
+
fi
|
|
87
|
+
|
|
88
|
+
# Classify done above; decide whether retry makes sense
|
|
89
|
+
|
|
90
|
+
emit_event "retry.classified" \
|
|
91
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
92
|
+
"stage=$stage_id" \
|
|
93
|
+
"attempt=$attempt" \
|
|
94
|
+
"error_class=$error_class"
|
|
95
|
+
|
|
96
|
+
case "$error_class" in
|
|
97
|
+
infrastructure)
|
|
98
|
+
info "Error classified as infrastructure (timeout/network/OOM) — retry makes sense"
|
|
99
|
+
;;
|
|
100
|
+
configuration)
|
|
101
|
+
error "Error classified as configuration (missing env/path) — skipping retry, escalating"
|
|
102
|
+
emit_event "retry.escalated" \
|
|
103
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
104
|
+
"stage=$stage_id" \
|
|
105
|
+
"reason=configuration_error"
|
|
106
|
+
return 1
|
|
107
|
+
;;
|
|
108
|
+
logic)
|
|
109
|
+
if [[ "$error_class" == "$prev_error_class" ]]; then
|
|
110
|
+
error "Error classified as logic (assertion/type error) with same class — retry won't help without code change"
|
|
111
|
+
emit_event "retry.skipped" \
|
|
112
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
113
|
+
"stage=$stage_id" \
|
|
114
|
+
"reason=repeated_logic_error"
|
|
115
|
+
return 1
|
|
116
|
+
fi
|
|
117
|
+
warn "Error classified as logic — retrying once in case build fixes it"
|
|
118
|
+
;;
|
|
119
|
+
*)
|
|
120
|
+
info "Error classification: unknown — retrying"
|
|
121
|
+
;;
|
|
122
|
+
esac
|
|
123
|
+
prev_error_class="$error_class"
|
|
124
|
+
|
|
125
|
+
if type db_save_reasoning_trace >/dev/null 2>&1; then
|
|
126
|
+
local job_id="${SHIPWRIGHT_PIPELINE_ID:-$$}"
|
|
127
|
+
local error_msg="${LAST_STAGE_ERROR:-$error_class}"
|
|
128
|
+
db_save_reasoning_trace "$job_id" "retry_reasoning" \
|
|
129
|
+
"stage=$stage_id error=$error_msg" \
|
|
130
|
+
"Stage failed, analyzing error pattern before retry" \
|
|
131
|
+
"retry_strategy=self_heal" 0.6 2>/dev/null || true
|
|
132
|
+
fi
|
|
133
|
+
|
|
134
|
+
warn "Stage $stage_id failed (attempt $attempt/$((max_retries + 1)), class: $error_class) — retrying..."
|
|
135
|
+
# Exponential backoff with jitter to avoid thundering herd
|
|
136
|
+
local backoff=$((2 ** attempt))
|
|
137
|
+
[[ "$backoff" -gt 16 ]] && backoff=16
|
|
138
|
+
local jitter=$(( RANDOM % (backoff + 1) ))
|
|
139
|
+
local total_sleep=$((backoff + jitter))
|
|
140
|
+
info "Backing off ${total_sleep}s before retry..."
|
|
141
|
+
sleep "$total_sleep"
|
|
142
|
+
|
|
143
|
+
# Write debugging context for the retry attempt to consume
|
|
144
|
+
local _retry_ctx_file="${ARTIFACTS_DIR}/.retry-context-${stage_id}.md"
|
|
145
|
+
{
|
|
146
|
+
echo "## Previous Attempt Failed"
|
|
147
|
+
echo ""
|
|
148
|
+
echo "**Error classification:** ${error_class}"
|
|
149
|
+
echo "**Attempt:** ${attempt} of $((max_retries + 1))"
|
|
150
|
+
echo ""
|
|
151
|
+
echo "### Error Output (last 30 lines)"
|
|
152
|
+
echo '```'
|
|
153
|
+
tail -30 "$_log_file" 2>/dev/null || echo "(no log available)"
|
|
154
|
+
echo '```'
|
|
155
|
+
echo ""
|
|
156
|
+
# Check for existing artifacts that should be preserved
|
|
157
|
+
local _existing_artifacts=""
|
|
158
|
+
for _af in plan.md design.md test-results.log; do
|
|
159
|
+
if [[ -s "${ARTIFACTS_DIR}/${_af}" ]]; then
|
|
160
|
+
local _af_lines
|
|
161
|
+
_af_lines=$(wc -l < "${ARTIFACTS_DIR}/${_af}" 2>/dev/null | xargs)
|
|
162
|
+
_existing_artifacts="${_existing_artifacts} - ${_af} (${_af_lines} lines)\n"
|
|
163
|
+
fi
|
|
164
|
+
done
|
|
165
|
+
if [[ -n "$_existing_artifacts" ]]; then
|
|
166
|
+
echo "### Existing Artifacts (PRESERVE these)"
|
|
167
|
+
echo -e "$_existing_artifacts"
|
|
168
|
+
echo "These artifacts exist from previous successful stages. Use them as-is unless they are the source of the problem."
|
|
169
|
+
echo ""
|
|
170
|
+
fi
|
|
171
|
+
# Adaptive: check if additional skills could help this retry
|
|
172
|
+
if type skill_memory_get_recommendations >/dev/null 2>&1; then
|
|
173
|
+
local _retry_skills
|
|
174
|
+
_retry_skills=$(skill_memory_get_recommendations "${INTELLIGENCE_ISSUE_TYPE:-backend}" "$stage_id" 2>/dev/null || true)
|
|
175
|
+
if [[ -n "$_retry_skills" ]]; then
|
|
176
|
+
echo "### Skills Recommended by Learning System"
|
|
177
|
+
echo "Based on historical success rates, these skills may improve the retry:"
|
|
178
|
+
echo "- $(printf '%s' "$_retry_skills" | sed 's/,/\n- /g')"
|
|
179
|
+
echo ""
|
|
180
|
+
fi
|
|
181
|
+
fi
|
|
182
|
+
|
|
183
|
+
echo "### Investigation Required"
|
|
184
|
+
echo "Before attempting a fix:"
|
|
185
|
+
echo "1. Read the error output above carefully"
|
|
186
|
+
echo "2. Identify the ROOT CAUSE — not just the symptom"
|
|
187
|
+
echo "3. If previous artifacts exist and are correct, build on them"
|
|
188
|
+
echo "4. If previous artifacts are flawed, explain what's wrong before fixing"
|
|
189
|
+
} > "$_retry_ctx_file" 2>/dev/null || true
|
|
190
|
+
|
|
191
|
+
emit_event "retry.context_written" \
|
|
192
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
193
|
+
"stage=$stage_id" \
|
|
194
|
+
"attempt=$attempt" \
|
|
195
|
+
"context_file=$_retry_ctx_file"
|
|
196
|
+
done
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
# ─── Self-Healing Build→Test Feedback Loop ─────────────────────────
|
|
200
|
+
self_healing_build_test() {
|
|
201
|
+
local cycle=0
|
|
202
|
+
local max_cycles="$BUILD_TEST_RETRIES"
|
|
203
|
+
local last_test_error=""
|
|
204
|
+
|
|
205
|
+
# Convergence tracking
|
|
206
|
+
local prev_error_sig="" consecutive_same_error=0
|
|
207
|
+
local prev_fail_count=0 zero_convergence_streak=0
|
|
208
|
+
|
|
209
|
+
# Vitals-driven adaptive limit (preferred over static BUILD_TEST_RETRIES)
|
|
210
|
+
if type pipeline_adaptive_limit >/dev/null 2>&1; then
|
|
211
|
+
local _vitals_json=""
|
|
212
|
+
if type pipeline_compute_vitals >/dev/null 2>&1; then
|
|
213
|
+
_vitals_json=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
|
|
214
|
+
fi
|
|
215
|
+
local vitals_limit
|
|
216
|
+
vitals_limit=$(pipeline_adaptive_limit "build_test" "$_vitals_json" 2>/dev/null) || true
|
|
217
|
+
if [[ -n "$vitals_limit" && "$vitals_limit" =~ ^[0-9]+$ && "$vitals_limit" -gt 0 ]]; then
|
|
218
|
+
info "Vitals-driven build-test limit: ${max_cycles} → ${vitals_limit}"
|
|
219
|
+
max_cycles="$vitals_limit"
|
|
220
|
+
emit_event "vitals.adaptive_limit" \
|
|
221
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
222
|
+
"context=build_test" \
|
|
223
|
+
"original=$BUILD_TEST_RETRIES" \
|
|
224
|
+
"vitals_limit=$vitals_limit"
|
|
225
|
+
fi
|
|
226
|
+
# Fallback: intelligence-based adaptive limits
|
|
227
|
+
elif type composer_estimate_iterations >/dev/null 2>&1; then
|
|
228
|
+
local estimated
|
|
229
|
+
estimated=$(composer_estimate_iterations \
|
|
230
|
+
"${INTELLIGENCE_ANALYSIS:-{}}" \
|
|
231
|
+
"${HOME}/.shipwright/optimization/iteration-model.json" 2>/dev/null || echo "")
|
|
232
|
+
if [[ -n "$estimated" && "$estimated" =~ ^[0-9]+$ && "$estimated" -gt 0 ]]; then
|
|
233
|
+
max_cycles="$estimated"
|
|
234
|
+
emit_event "intelligence.adaptive_iterations" \
|
|
235
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
236
|
+
"estimated=$estimated" \
|
|
237
|
+
"original=$BUILD_TEST_RETRIES"
|
|
238
|
+
fi
|
|
239
|
+
fi
|
|
240
|
+
|
|
241
|
+
# Fallback: adaptive cycle limits from optimization data
|
|
242
|
+
if [[ "$max_cycles" == "$BUILD_TEST_RETRIES" ]]; then
|
|
243
|
+
local _iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
|
|
244
|
+
if [[ -f "$_iter_model" ]]; then
|
|
245
|
+
local adaptive_bt_limit
|
|
246
|
+
adaptive_bt_limit=$(pipeline_adaptive_cycles "$max_cycles" "build_test" "0" "-1" 2>/dev/null) || true
|
|
247
|
+
if [[ -n "$adaptive_bt_limit" && "$adaptive_bt_limit" =~ ^[0-9]+$ && "$adaptive_bt_limit" -gt 0 && "$adaptive_bt_limit" != "$max_cycles" ]]; then
|
|
248
|
+
info "Adaptive build-test cycles: ${max_cycles} → ${adaptive_bt_limit}"
|
|
249
|
+
max_cycles="$adaptive_bt_limit"
|
|
250
|
+
fi
|
|
251
|
+
fi
|
|
252
|
+
fi
|
|
253
|
+
|
|
254
|
+
while [[ "$cycle" -le "$max_cycles" ]]; do
|
|
255
|
+
cycle=$((cycle + 1))
|
|
256
|
+
|
|
257
|
+
if [[ "$cycle" -gt 1 ]]; then
|
|
258
|
+
SELF_HEAL_COUNT=$((SELF_HEAL_COUNT + 1))
|
|
259
|
+
echo ""
|
|
260
|
+
echo -e "${YELLOW}${BOLD}━━━ Self-Healing Cycle ${cycle}/$((max_cycles + 1)) ━━━${RESET}"
|
|
261
|
+
info "Feeding test failure back to build loop..."
|
|
262
|
+
|
|
263
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
264
|
+
gh_comment_issue "$ISSUE_NUMBER" "🔄 **Self-healing cycle ${cycle}** — rebuilding with error context" 2>/dev/null || true
|
|
265
|
+
fi
|
|
266
|
+
|
|
267
|
+
# Reset build/test stage statuses for retry
|
|
268
|
+
set_stage_status "build" "retrying"
|
|
269
|
+
set_stage_status "test" "pending"
|
|
270
|
+
fi
|
|
271
|
+
|
|
272
|
+
# ── Run Build Stage ──
|
|
273
|
+
echo ""
|
|
274
|
+
echo -e "${CYAN}${BOLD}▸ Stage: build${RESET} ${DIM}[cycle ${cycle}]${RESET}"
|
|
275
|
+
CURRENT_STAGE_ID="build"
|
|
276
|
+
|
|
277
|
+
# Inject error context on retry cycles
|
|
278
|
+
if [[ "$cycle" -gt 1 && -n "$last_test_error" ]]; then
|
|
279
|
+
# Query memory for known fixes
|
|
280
|
+
local _memory_fix=""
|
|
281
|
+
if type memory_closed_loop_inject >/dev/null 2>&1; then
|
|
282
|
+
local _error_sig_short
|
|
283
|
+
_error_sig_short=$(echo "$last_test_error" | head -3 || echo "")
|
|
284
|
+
_memory_fix=$(memory_closed_loop_inject "$_error_sig_short" 2>/dev/null) || true
|
|
285
|
+
fi
|
|
286
|
+
|
|
287
|
+
local memory_prefix=""
|
|
288
|
+
if [[ -n "$_memory_fix" ]]; then
|
|
289
|
+
info "Memory suggests fix: $(echo "$_memory_fix" | head -1)"
|
|
290
|
+
memory_prefix="KNOWN FIX (from past success): ${_memory_fix}
|
|
291
|
+
|
|
292
|
+
"
|
|
293
|
+
fi
|
|
294
|
+
|
|
295
|
+
# Temporarily augment the goal with error context
|
|
296
|
+
local original_goal="$GOAL"
|
|
297
|
+
GOAL="$GOAL
|
|
298
|
+
|
|
299
|
+
${memory_prefix}IMPORTANT — Previous build attempt failed tests. Fix these errors:
|
|
300
|
+
$last_test_error
|
|
301
|
+
|
|
302
|
+
Focus on fixing the failing tests while keeping all passing tests working."
|
|
303
|
+
|
|
304
|
+
update_status "running" "build"
|
|
305
|
+
record_stage_start "build"
|
|
306
|
+
type audit_emit >/dev/null 2>&1 && audit_emit "stage.start" "stage=build" || true
|
|
307
|
+
|
|
308
|
+
local build_start_epoch
|
|
309
|
+
build_start_epoch=$(date +%s)
|
|
310
|
+
if run_stage_with_retry "build"; then
|
|
311
|
+
mark_stage_complete "build"
|
|
312
|
+
local timing
|
|
313
|
+
timing=$(get_stage_timing "build")
|
|
314
|
+
local build_dur_s=$(( $(date +%s) - build_start_epoch ))
|
|
315
|
+
type audit_emit >/dev/null 2>&1 && audit_emit "stage.complete" "stage=build" "verdict=pass" "duration_s=${build_dur_s}" || true
|
|
316
|
+
success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
|
|
317
|
+
if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
318
|
+
local _diff_count
|
|
319
|
+
_diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
|
|
320
|
+
local _snap_files _snap_error
|
|
321
|
+
_snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
|
|
322
|
+
_snap_files="${_snap_files:-0}"
|
|
323
|
+
_snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
|
|
324
|
+
_snap_error="${_snap_error:-}"
|
|
325
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
|
|
326
|
+
fi
|
|
327
|
+
else
|
|
328
|
+
mark_stage_failed "build"
|
|
329
|
+
local build_dur_s=$(( $(date +%s) - build_start_epoch ))
|
|
330
|
+
type audit_emit >/dev/null 2>&1 && audit_emit "stage.complete" "stage=build" "verdict=fail" "duration_s=${build_dur_s}" || true
|
|
331
|
+
GOAL="$original_goal"
|
|
332
|
+
return 1
|
|
333
|
+
fi
|
|
334
|
+
GOAL="$original_goal"
|
|
335
|
+
else
|
|
336
|
+
update_status "running" "build"
|
|
337
|
+
record_stage_start "build"
|
|
338
|
+
type audit_emit >/dev/null 2>&1 && audit_emit "stage.start" "stage=build" || true
|
|
339
|
+
|
|
340
|
+
local build_start_epoch
|
|
341
|
+
build_start_epoch=$(date +%s)
|
|
342
|
+
if run_stage_with_retry "build"; then
|
|
343
|
+
mark_stage_complete "build"
|
|
344
|
+
local timing
|
|
345
|
+
timing=$(get_stage_timing "build")
|
|
346
|
+
local build_dur_s=$(( $(date +%s) - build_start_epoch ))
|
|
347
|
+
type audit_emit >/dev/null 2>&1 && audit_emit "stage.complete" "stage=build" "verdict=pass" "duration_s=${build_dur_s}" || true
|
|
348
|
+
success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
|
|
349
|
+
if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
350
|
+
local _diff_count
|
|
351
|
+
_diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
|
|
352
|
+
local _snap_files _snap_error
|
|
353
|
+
_snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
|
|
354
|
+
_snap_files="${_snap_files:-0}"
|
|
355
|
+
_snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
|
|
356
|
+
_snap_error="${_snap_error:-}"
|
|
357
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
|
|
358
|
+
fi
|
|
359
|
+
else
|
|
360
|
+
mark_stage_failed "build"
|
|
361
|
+
local build_dur_s=$(( $(date +%s) - build_start_epoch ))
|
|
362
|
+
type audit_emit >/dev/null 2>&1 && audit_emit "stage.complete" "stage=build" "verdict=fail" "duration_s=${build_dur_s}" || true
|
|
363
|
+
return 1
|
|
364
|
+
fi
|
|
365
|
+
fi
|
|
366
|
+
|
|
367
|
+
# ── Run Test Stage ──
|
|
368
|
+
echo ""
|
|
369
|
+
echo -e "${CYAN}${BOLD}▸ Stage: test${RESET} ${DIM}[cycle ${cycle}]${RESET}"
|
|
370
|
+
CURRENT_STAGE_ID="test"
|
|
371
|
+
update_status "running" "test"
|
|
372
|
+
record_stage_start "test"
|
|
373
|
+
|
|
374
|
+
if run_stage_with_retry "test"; then
|
|
375
|
+
mark_stage_complete "test"
|
|
376
|
+
local timing
|
|
377
|
+
timing=$(get_stage_timing "test")
|
|
378
|
+
success "Stage ${BOLD}test${RESET} complete ${DIM}(${timing})${RESET}"
|
|
379
|
+
emit_event "convergence.tests_passed" \
|
|
380
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
381
|
+
"cycle=$cycle"
|
|
382
|
+
if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
383
|
+
local _diff_count
|
|
384
|
+
_diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
|
|
385
|
+
local _snap_files _snap_error
|
|
386
|
+
_snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
|
|
387
|
+
_snap_files="${_snap_files:-0}"
|
|
388
|
+
_snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
|
|
389
|
+
_snap_error="${_snap_error:-}"
|
|
390
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-test}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
|
|
391
|
+
fi
|
|
392
|
+
# Record fix outcome when tests pass after a retry with memory injection (pipeline path)
|
|
393
|
+
if [[ "$cycle" -gt 1 && -n "${last_test_error:-}" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
394
|
+
local _sig
|
|
395
|
+
_sig=$(echo "$last_test_error" | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//')
|
|
396
|
+
[[ -n "$_sig" ]] && bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_sig" "true" "true" 2>/dev/null || true
|
|
397
|
+
fi
|
|
398
|
+
return 0 # Tests passed!
|
|
399
|
+
fi
|
|
400
|
+
|
|
401
|
+
# Tests failed — capture error for next cycle
|
|
402
|
+
local test_log="$ARTIFACTS_DIR/test-results.log"
|
|
403
|
+
last_test_error=$(tail -30 "$test_log" 2>/dev/null || echo "Test command failed with no output")
|
|
404
|
+
mark_stage_failed "test"
|
|
405
|
+
|
|
406
|
+
# ── Convergence Detection ──
|
|
407
|
+
# Hash the error output to detect repeated failures
|
|
408
|
+
local error_sig
|
|
409
|
+
error_sig=$(echo "$last_test_error" | shasum -a 256 2>/dev/null | cut -c1-16 || echo "unknown")
|
|
410
|
+
|
|
411
|
+
# Count failing tests (extract from common patterns)
|
|
412
|
+
local current_fail_count=0
|
|
413
|
+
current_fail_count=$(grep -ciE 'fail|error|FAIL' "$test_log" 2>/dev/null || true)
|
|
414
|
+
current_fail_count="${current_fail_count:-0}"
|
|
415
|
+
|
|
416
|
+
if [[ "$error_sig" == "$prev_error_sig" ]]; then
|
|
417
|
+
consecutive_same_error=$((consecutive_same_error + 1))
|
|
418
|
+
else
|
|
419
|
+
consecutive_same_error=1
|
|
420
|
+
fi
|
|
421
|
+
prev_error_sig="$error_sig"
|
|
422
|
+
|
|
423
|
+
# Check: same error 3 times consecutively → stuck
|
|
424
|
+
if [[ "$consecutive_same_error" -ge 3 ]]; then
|
|
425
|
+
error "Convergence: stuck on same error for 3 consecutive cycles — exiting early"
|
|
426
|
+
emit_event "convergence.stuck" \
|
|
427
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
428
|
+
"cycle=$cycle" \
|
|
429
|
+
"error_sig=$error_sig" \
|
|
430
|
+
"consecutive=$consecutive_same_error"
|
|
431
|
+
notify "Build Convergence" "Stuck on unfixable error after ${cycle} cycles" "error"
|
|
432
|
+
return 1
|
|
433
|
+
fi
|
|
434
|
+
|
|
435
|
+
# Track convergence rate: did we reduce failures?
|
|
436
|
+
if [[ "$cycle" -gt 1 && "$prev_fail_count" -gt 0 ]]; then
|
|
437
|
+
if [[ "$current_fail_count" -ge "$prev_fail_count" ]]; then
|
|
438
|
+
zero_convergence_streak=$((zero_convergence_streak + 1))
|
|
439
|
+
else
|
|
440
|
+
zero_convergence_streak=0
|
|
441
|
+
fi
|
|
442
|
+
|
|
443
|
+
# Check: zero convergence for 2 consecutive iterations → plateau
|
|
444
|
+
if [[ "$zero_convergence_streak" -ge 2 ]]; then
|
|
445
|
+
error "Convergence: no progress for 2 consecutive cycles (${current_fail_count} failures remain) — exiting early"
|
|
446
|
+
emit_event "convergence.plateau" \
|
|
447
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
448
|
+
"cycle=$cycle" \
|
|
449
|
+
"fail_count=$current_fail_count" \
|
|
450
|
+
"streak=$zero_convergence_streak"
|
|
451
|
+
notify "Build Convergence" "No progress after ${cycle} cycles — plateau reached" "error"
|
|
452
|
+
return 1
|
|
453
|
+
fi
|
|
454
|
+
fi
|
|
455
|
+
prev_fail_count="$current_fail_count"
|
|
456
|
+
|
|
457
|
+
info "Convergence: error_sig=${error_sig:0:8} repeat=${consecutive_same_error} failures=${current_fail_count} no_progress=${zero_convergence_streak}"
|
|
458
|
+
|
|
459
|
+
if [[ "$cycle" -le "$max_cycles" ]]; then
|
|
460
|
+
warn "Tests failed — will attempt self-healing (cycle $((cycle + 1))/$((max_cycles + 1)))"
|
|
461
|
+
notify "Self-Healing" "Tests failed on cycle ${cycle}, retrying..." "warn"
|
|
462
|
+
fi
|
|
463
|
+
done
|
|
464
|
+
|
|
465
|
+
error "Self-healing exhausted after $((max_cycles + 1)) cycles"
|
|
466
|
+
notify "Self-Healing Failed" "Tests still failing after $((max_cycles + 1)) build-test cycles" "error"
|
|
467
|
+
return 1
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
# ─── Auto-Rebase Before PR ─────────────────────────────────────────
|
|
471
|
+
auto_rebase() {
|
|
472
|
+
info "Syncing with ${BASE_BRANCH}..."
|
|
473
|
+
|
|
474
|
+
# Fetch latest
|
|
475
|
+
git fetch origin "$BASE_BRANCH" --quiet 2>/dev/null || {
|
|
476
|
+
warn "Could not fetch origin/${BASE_BRANCH}"
|
|
477
|
+
return 0
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
# Check if rebase is needed
|
|
481
|
+
local behind
|
|
482
|
+
behind=$(git rev-list --count "HEAD..origin/${BASE_BRANCH}" 2>/dev/null || echo "0")
|
|
483
|
+
|
|
484
|
+
if [[ "$behind" -eq 0 ]]; then
|
|
485
|
+
success "Already up to date with ${BASE_BRANCH}"
|
|
486
|
+
return 0
|
|
487
|
+
fi
|
|
488
|
+
|
|
489
|
+
info "Rebasing onto origin/${BASE_BRANCH} ($behind commits behind)..."
|
|
490
|
+
if git rebase "origin/${BASE_BRANCH}" --quiet 2>/dev/null; then
|
|
491
|
+
success "Rebase successful"
|
|
492
|
+
else
|
|
493
|
+
warn "Rebase conflict detected — aborting rebase"
|
|
494
|
+
git rebase --abort 2>/dev/null || true
|
|
495
|
+
warn "Falling back to merge..."
|
|
496
|
+
if git merge "origin/${BASE_BRANCH}" --no-edit --quiet 2>/dev/null; then
|
|
497
|
+
success "Merge successful"
|
|
498
|
+
else
|
|
499
|
+
git merge --abort 2>/dev/null || true
|
|
500
|
+
error "Both rebase and merge failed — manual intervention needed"
|
|
501
|
+
return 1
|
|
502
|
+
fi
|
|
503
|
+
fi
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
# ─── Main Pipeline Orchestration ───────────────────────────────────
|
|
507
|
+
run_pipeline() {
|
|
508
|
+
# Rotate event log if needed (standalone mode)
|
|
509
|
+
rotate_event_log_if_needed
|
|
510
|
+
|
|
511
|
+
# Initialize audit trail for this pipeline run
|
|
512
|
+
if type audit_init >/dev/null 2>&1; then
|
|
513
|
+
audit_init || true
|
|
514
|
+
fi
|
|
515
|
+
|
|
516
|
+
local stages
|
|
517
|
+
stages=$(jq -c '.stages[]' "$PIPELINE_CONFIG" 2>/dev/null)
|
|
518
|
+
|
|
519
|
+
local stage_count enabled_count
|
|
520
|
+
stage_count=$(jq '.stages | length' "$PIPELINE_CONFIG" 2>/dev/null)
|
|
521
|
+
enabled_count=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG" 2>/dev/null)
|
|
522
|
+
local completed=0
|
|
523
|
+
|
|
524
|
+
# Check which stages are enabled to determine if we use the self-healing loop
|
|
525
|
+
local build_enabled test_enabled
|
|
526
|
+
build_enabled=$(jq -r '.stages[] | select(.id == "build") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null)
|
|
527
|
+
test_enabled=$(jq -r '.stages[] | select(.id == "test") | .enabled' "$PIPELINE_CONFIG" 2>/dev/null)
|
|
528
|
+
local use_self_healing=false
|
|
529
|
+
if [[ "$build_enabled" == "true" && "$test_enabled" == "true" && "$BUILD_TEST_RETRIES" -gt 0 ]]; then
|
|
530
|
+
use_self_healing=true
|
|
531
|
+
fi
|
|
532
|
+
|
|
533
|
+
while IFS= read -r -u 3 stage; do
|
|
534
|
+
local id enabled gate
|
|
535
|
+
id=$(echo "$stage" | jq -r '.id' 2>/dev/null)
|
|
536
|
+
enabled=$(echo "$stage" | jq -r '.enabled' 2>/dev/null)
|
|
537
|
+
gate=$(echo "$stage" | jq -r '.gate' 2>/dev/null)
|
|
538
|
+
|
|
539
|
+
CURRENT_STAGE_ID="$id"
|
|
540
|
+
|
|
541
|
+
# Human intervention: check for skip-stage directive
|
|
542
|
+
if [[ -f "$ARTIFACTS_DIR/skip-stage.txt" ]]; then
|
|
543
|
+
local skip_list
|
|
544
|
+
skip_list="$(cat "$ARTIFACTS_DIR/skip-stage.txt" 2>/dev/null || true)"
|
|
545
|
+
if echo "$skip_list" | grep -qx "$id" 2>/dev/null; then
|
|
546
|
+
info "Stage ${BOLD}${id}${RESET} skipped by human directive"
|
|
547
|
+
emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=human_skip"
|
|
548
|
+
# Remove this stage from the skip file
|
|
549
|
+
local tmp_skip
|
|
550
|
+
tmp_skip="$(mktemp)" || { warn "mktemp failed"; continue; }
|
|
551
|
+
# shellcheck disable=SC2064 # intentional expansion at definition time
|
|
552
|
+
trap "rm -f '$tmp_skip'" RETURN
|
|
553
|
+
grep -vx "$id" "$ARTIFACTS_DIR/skip-stage.txt" > "$tmp_skip" 2>/dev/null || true
|
|
554
|
+
mv "$tmp_skip" "$ARTIFACTS_DIR/skip-stage.txt"
|
|
555
|
+
continue
|
|
556
|
+
fi
|
|
557
|
+
fi
|
|
558
|
+
|
|
559
|
+
# Human intervention: check for human message
|
|
560
|
+
if [[ -f "$ARTIFACTS_DIR/human-message.txt" ]]; then
|
|
561
|
+
local human_msg
|
|
562
|
+
human_msg="$(cat "$ARTIFACTS_DIR/human-message.txt" 2>/dev/null || true)"
|
|
563
|
+
if [[ -n "$human_msg" ]]; then
|
|
564
|
+
echo ""
|
|
565
|
+
echo -e " ${PURPLE}${BOLD}💬 Human message:${RESET} $human_msg"
|
|
566
|
+
emit_event "pipeline.human_message" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "message=$human_msg"
|
|
567
|
+
rm -f "$ARTIFACTS_DIR/human-message.txt"
|
|
568
|
+
fi
|
|
569
|
+
fi
|
|
570
|
+
|
|
571
|
+
if [[ "$enabled" != "true" ]]; then
|
|
572
|
+
echo -e " ${DIM}○ ${id} — skipped (disabled)${RESET}"
|
|
573
|
+
continue
|
|
574
|
+
fi
|
|
575
|
+
|
|
576
|
+
# Intelligence: evaluate whether to skip this stage (after intake, which populates ISSUE_LABELS)
|
|
577
|
+
if [[ "$id" != "intake" ]] && type pipeline_should_skip_stage >/dev/null 2>&1; then
|
|
578
|
+
local skip_reason=""
|
|
579
|
+
skip_reason=$(pipeline_should_skip_stage "$id" 2>/dev/null) || true
|
|
580
|
+
if [[ -n "$skip_reason" ]]; then
|
|
581
|
+
echo -e " ${DIM}○ ${id} — skipped (intelligence: ${skip_reason})${RESET}"
|
|
582
|
+
set_stage_status "$id" "complete"
|
|
583
|
+
completed=$((completed + 1))
|
|
584
|
+
continue
|
|
585
|
+
fi
|
|
586
|
+
fi
|
|
587
|
+
|
|
588
|
+
local stage_status
|
|
589
|
+
stage_status=$(get_stage_status "$id")
|
|
590
|
+
if [[ "$stage_status" == "complete" ]]; then
|
|
591
|
+
echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— already complete${RESET}"
|
|
592
|
+
completed=$((completed + 1))
|
|
593
|
+
continue
|
|
594
|
+
fi
|
|
595
|
+
|
|
596
|
+
# CI resume: skip stages marked as completed from previous run
|
|
597
|
+
if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "$id"; then
|
|
598
|
+
# Verify artifacts survived the merge — regenerate if missing
|
|
599
|
+
if verify_stage_artifacts "$id"; then
|
|
600
|
+
echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— skipped (CI resume)${RESET}"
|
|
601
|
+
set_stage_status "$id" "complete"
|
|
602
|
+
completed=$((completed + 1))
|
|
603
|
+
emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
|
|
604
|
+
continue
|
|
605
|
+
else
|
|
606
|
+
warn "Stage $id marked complete but artifacts missing — regenerating"
|
|
607
|
+
emit_event "stage.artifact_miss" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
|
|
608
|
+
fi
|
|
609
|
+
fi
|
|
610
|
+
|
|
611
|
+
# Self-healing build→test loop: when we hit build, run both together
|
|
612
|
+
if [[ "$id" == "build" && "$use_self_healing" == "true" ]]; then
|
|
613
|
+
# TDD: generate tests before build when enabled
|
|
614
|
+
if [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
|
|
615
|
+
stage_test_first || true
|
|
616
|
+
fi
|
|
617
|
+
# Gate check for build
|
|
618
|
+
local build_gate
|
|
619
|
+
build_gate=$(echo "$stage" | jq -r '.gate' 2>/dev/null)
|
|
620
|
+
if [[ "$build_gate" == "approve" && "$SKIP_GATES" != "true" ]]; then
|
|
621
|
+
show_stage_preview "build"
|
|
622
|
+
local answer=""
|
|
623
|
+
if [[ -t 0 ]]; then
|
|
624
|
+
read -rp " Proceed with build+test (self-healing)? [Y/n] " answer || true
|
|
625
|
+
fi
|
|
626
|
+
if [[ "$answer" =~ ^[Nn] ]]; then
|
|
627
|
+
update_status "paused" "build"
|
|
628
|
+
info "Pipeline paused. Resume with: ${DIM}shipwright pipeline resume${RESET}"
|
|
629
|
+
return 0
|
|
630
|
+
fi
|
|
631
|
+
fi
|
|
632
|
+
|
|
633
|
+
if self_healing_build_test; then
|
|
634
|
+
completed=$((completed + 2)) # Both build and test
|
|
635
|
+
|
|
636
|
+
# Intelligence: reassess complexity after build+test
|
|
637
|
+
local reassessment
|
|
638
|
+
reassessment=$(pipeline_reassess_complexity 2>/dev/null) || true
|
|
639
|
+
if [[ -n "$reassessment" && "$reassessment" != "as_expected" ]]; then
|
|
640
|
+
info "Complexity reassessment: ${reassessment}"
|
|
641
|
+
fi
|
|
642
|
+
else
|
|
643
|
+
update_status "failed" "test"
|
|
644
|
+
error "Pipeline failed: build→test self-healing exhausted"
|
|
645
|
+
return 1
|
|
646
|
+
fi
|
|
647
|
+
continue
|
|
648
|
+
fi
|
|
649
|
+
|
|
650
|
+
# TDD: generate tests before build when enabled (non-self-healing path)
|
|
651
|
+
if [[ "$id" == "build" && "$use_self_healing" != "true" ]] && [[ "${TDD_ENABLED:-false}" == "true" || "${PIPELINE_TDD:-}" == "true" ]]; then
|
|
652
|
+
stage_test_first || true
|
|
653
|
+
fi
|
|
654
|
+
|
|
655
|
+
# Skip test if already handled by self-healing loop
|
|
656
|
+
if [[ "$id" == "test" && "$use_self_healing" == "true" ]]; then
|
|
657
|
+
stage_status=$(get_stage_status "test")
|
|
658
|
+
if [[ "$stage_status" == "complete" ]]; then
|
|
659
|
+
echo -e " ${GREEN}✓ test${RESET} ${DIM}— completed in build→test loop${RESET}"
|
|
660
|
+
fi
|
|
661
|
+
continue
|
|
662
|
+
fi
|
|
663
|
+
|
|
664
|
+
# Gate check
|
|
665
|
+
if [[ "$gate" == "approve" && "$SKIP_GATES" != "true" ]]; then
|
|
666
|
+
show_stage_preview "$id"
|
|
667
|
+
local answer=""
|
|
668
|
+
if [[ -t 0 ]]; then
|
|
669
|
+
read -rp " Proceed with ${id}? [Y/n] " answer || true
|
|
670
|
+
else
|
|
671
|
+
# Non-interactive: auto-approve (shouldn't reach here if headless detection works)
|
|
672
|
+
info "Non-interactive mode — auto-approving ${id}"
|
|
673
|
+
fi
|
|
674
|
+
if [[ "$answer" =~ ^[Nn] ]]; then
|
|
675
|
+
update_status "paused" "$id"
|
|
676
|
+
info "Pipeline paused at ${BOLD}$id${RESET}. Resume with: ${DIM}shipwright pipeline resume${RESET}"
|
|
677
|
+
return 0
|
|
678
|
+
fi
|
|
679
|
+
fi
|
|
680
|
+
|
|
681
|
+
# Budget enforcement check (skip with --ignore-budget)
|
|
682
|
+
if [[ "$IGNORE_BUDGET" != "true" ]] && [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
|
|
683
|
+
local budget_rc=0
|
|
684
|
+
bash "$SCRIPT_DIR/sw-cost.sh" check-budget 2>/dev/null || budget_rc=$?
|
|
685
|
+
if [[ "$budget_rc" -eq 2 ]]; then
|
|
686
|
+
warn "Daily budget exceeded — pausing pipeline before stage ${BOLD}$id${RESET}"
|
|
687
|
+
warn "Resume with --ignore-budget to override, or wait until tomorrow"
|
|
688
|
+
emit_event "pipeline.budget_paused" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
|
|
689
|
+
update_status "paused" "$id"
|
|
690
|
+
return 0
|
|
691
|
+
fi
|
|
692
|
+
fi
|
|
693
|
+
|
|
694
|
+
# Intelligence: per-stage model routing (UCB1 when DB has data, else A/B testing)
|
|
695
|
+
local recommended_model="" from_ucb1=false
|
|
696
|
+
if type ucb1_select_model >/dev/null 2>&1; then
|
|
697
|
+
recommended_model=$(ucb1_select_model "$id" 2>/dev/null || echo "")
|
|
698
|
+
[[ -n "$recommended_model" ]] && from_ucb1=true
|
|
699
|
+
fi
|
|
700
|
+
if [[ -z "$recommended_model" ]] && type intelligence_recommend_model >/dev/null 2>&1; then
|
|
701
|
+
local stage_complexity="${INTELLIGENCE_COMPLEXITY:-5}"
|
|
702
|
+
local budget_remaining=""
|
|
703
|
+
if [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
|
|
704
|
+
budget_remaining=$(bash "$SCRIPT_DIR/sw-cost.sh" remaining-budget 2>/dev/null || echo "")
|
|
705
|
+
fi
|
|
706
|
+
local recommended_json
|
|
707
|
+
recommended_json=$(intelligence_recommend_model "$id" "$stage_complexity" "$budget_remaining" 2>/dev/null || echo "")
|
|
708
|
+
recommended_model=$(echo "$recommended_json" | jq -r '.model // empty' 2>/dev/null || echo "")
|
|
709
|
+
fi
|
|
710
|
+
if [[ -n "$recommended_model" && "$recommended_model" != "null" ]]; then
|
|
711
|
+
if [[ "$from_ucb1" == "true" ]]; then
|
|
712
|
+
# UCB1 already balances exploration/exploitation — use directly
|
|
713
|
+
export CLAUDE_MODEL="$recommended_model"
|
|
714
|
+
emit_event "intelligence.model_ucb1" \
|
|
715
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
716
|
+
"stage=$id" \
|
|
717
|
+
"model=$recommended_model"
|
|
718
|
+
else
|
|
719
|
+
# A/B testing for intelligence recommendation
|
|
720
|
+
local ab_ratio=20
|
|
721
|
+
local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
|
|
722
|
+
if [[ -f "$daemon_cfg" ]]; then
|
|
723
|
+
local cfg_ratio
|
|
724
|
+
cfg_ratio=$(jq -r '.intelligence.ab_test_ratio // 0.2' "$daemon_cfg" 2>/dev/null || echo "0.2")
|
|
725
|
+
ab_ratio=$(awk -v r="$cfg_ratio" 'BEGIN{printf "%d", r * 100}' 2>/dev/null || echo "20")
|
|
726
|
+
fi
|
|
727
|
+
|
|
728
|
+
local routing_file="${HOME}/.shipwright/optimization/model-routing.json"
|
|
729
|
+
local use_recommended=false
|
|
730
|
+
local ab_group="control"
|
|
731
|
+
|
|
732
|
+
if [[ -f "$routing_file" ]]; then
|
|
733
|
+
local stage_samples total_samples
|
|
734
|
+
stage_samples=$(jq -r --arg s "$id" '.routes[$s].sonnet_samples // .[$s].sonnet_samples // 0' "$routing_file" 2>/dev/null || echo "0")
|
|
735
|
+
total_samples=$(jq -r --arg s "$id" '((.routes[$s].sonnet_samples // .[$s].sonnet_samples // 0) + (.routes[$s].opus_samples // .[$s].opus_samples // 0))' "$routing_file" 2>/dev/null || echo "0")
|
|
736
|
+
if [[ "${total_samples:-0}" -ge 50 ]]; then
|
|
737
|
+
use_recommended=true
|
|
738
|
+
ab_group="graduated"
|
|
739
|
+
fi
|
|
740
|
+
fi
|
|
741
|
+
|
|
742
|
+
if [[ "$use_recommended" != "true" ]]; then
|
|
743
|
+
local roll=$((RANDOM % 100))
|
|
744
|
+
if [[ "$roll" -lt "$ab_ratio" ]]; then
|
|
745
|
+
use_recommended=true
|
|
746
|
+
ab_group="experiment"
|
|
747
|
+
fi
|
|
748
|
+
fi
|
|
749
|
+
|
|
750
|
+
if [[ "$use_recommended" == "true" ]]; then
|
|
751
|
+
export CLAUDE_MODEL="$recommended_model"
|
|
752
|
+
else
|
|
753
|
+
export CLAUDE_MODEL="$(_smart_model default sonnet)"
|
|
754
|
+
fi
|
|
755
|
+
|
|
756
|
+
emit_event "intelligence.model_ab" \
|
|
757
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
758
|
+
"stage=$id" \
|
|
759
|
+
"recommended=$recommended_model" \
|
|
760
|
+
"applied=$CLAUDE_MODEL" \
|
|
761
|
+
"ab_group=$ab_group" \
|
|
762
|
+
"ab_ratio=$ab_ratio"
|
|
763
|
+
fi
|
|
764
|
+
fi
|
|
765
|
+
|
|
766
|
+
echo ""
|
|
767
|
+
echo -e "${CYAN}${BOLD}▸ Stage: ${id}${RESET} ${DIM}[$((completed + 1))/${enabled_count}]${RESET}"
|
|
768
|
+
update_status "running" "$id"
|
|
769
|
+
record_stage_start "$id"
|
|
770
|
+
local stage_start_epoch
|
|
771
|
+
stage_start_epoch=$(now_epoch)
|
|
772
|
+
emit_event "stage.started" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
|
|
773
|
+
|
|
774
|
+
# Mark GitHub Check Run as in-progress
|
|
775
|
+
if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update >/dev/null 2>&1; then
|
|
776
|
+
gh_checks_stage_update "$id" "in_progress" "" "Stage $id started" 2>/dev/null || true
|
|
777
|
+
fi
|
|
778
|
+
|
|
779
|
+
# Audit: stage start
|
|
780
|
+
if type audit_emit >/dev/null 2>&1; then
|
|
781
|
+
audit_emit "stage.start" "stage=$id" || true
|
|
782
|
+
fi
|
|
783
|
+
|
|
784
|
+
local stage_model_used="${CLAUDE_MODEL:-${MODEL:-opus}}"
|
|
785
|
+
if run_stage_with_retry "$id"; then
|
|
786
|
+
mark_stage_complete "$id"
|
|
787
|
+
completed=$((completed + 1))
|
|
788
|
+
# Capture project pattern after intake (for memory context in later stages)
|
|
789
|
+
if [[ "$id" == "intake" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
790
|
+
(cd "$REPO_DIR" && bash "$SCRIPT_DIR/sw-memory.sh" pattern "project" "{}" 2>/dev/null) || true
|
|
791
|
+
fi
|
|
792
|
+
local timing stage_dur_s
|
|
793
|
+
timing=$(get_stage_timing "$id")
|
|
794
|
+
stage_dur_s=$(( $(now_epoch) - stage_start_epoch ))
|
|
795
|
+
success "Stage ${BOLD}$id${RESET} complete ${DIM}(${timing})${RESET}"
|
|
796
|
+
emit_event "stage.completed" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "duration_s=$stage_dur_s" "result=success"
|
|
797
|
+
# Audit: stage complete
|
|
798
|
+
if type audit_emit >/dev/null 2>&1; then
|
|
799
|
+
audit_emit "stage.complete" "stage=$id" "verdict=pass" \
|
|
800
|
+
"duration_s=${stage_dur_s:-0}" || true
|
|
801
|
+
fi
|
|
802
|
+
# Emit vitals snapshot on every stage transition (not just build/test)
|
|
803
|
+
if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
804
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "$id" "0" "0" "0" "" 2>/dev/null || true
|
|
805
|
+
fi
|
|
806
|
+
# Record model outcome for UCB1 learning
|
|
807
|
+
type record_model_outcome >/dev/null 2>&1 && record_model_outcome "$stage_model_used" "$id" 1 "$stage_dur_s" 0 2>/dev/null || true
|
|
808
|
+
# Broadcast discovery for cross-pipeline learning
|
|
809
|
+
if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
|
|
810
|
+
local _disc_cat _disc_patterns _disc_text
|
|
811
|
+
_disc_cat="$id"
|
|
812
|
+
case "$id" in
|
|
813
|
+
plan) _disc_patterns="*.md"; _disc_text="Plan completed: ${GOAL:-goal}" ;;
|
|
814
|
+
design) _disc_patterns="*.md,*.ts,*.tsx,*.js"; _disc_text="Design completed for ${GOAL:-goal}" ;;
|
|
815
|
+
build) _disc_patterns="src/*,*.ts,*.tsx,*.js"; _disc_text="Build completed" ;;
|
|
816
|
+
test) _disc_patterns="*.test.*,*_test.*"; _disc_text="Tests passed" ;;
|
|
817
|
+
review) _disc_patterns="*.md,*.ts,*.tsx"; _disc_text="Review completed" ;;
|
|
818
|
+
*) _disc_patterns="*"; _disc_text="Stage $id completed" ;;
|
|
819
|
+
esac
|
|
820
|
+
bash "$SCRIPT_DIR/sw-discovery.sh" broadcast "$_disc_cat" "$_disc_patterns" "$_disc_text" "" 2>/dev/null || true
|
|
821
|
+
fi
|
|
822
|
+
# Log model used for prediction feedback
|
|
823
|
+
echo "${id}|${stage_model_used}|true" >> "${ARTIFACTS_DIR}/model-routing.log"
|
|
824
|
+
else
|
|
825
|
+
mark_stage_failed "$id"
|
|
826
|
+
local stage_dur_s
|
|
827
|
+
stage_dur_s=$(( $(now_epoch) - stage_start_epoch ))
|
|
828
|
+
error "Pipeline failed at stage: ${BOLD}$id${RESET}"
|
|
829
|
+
update_status "failed" "$id"
|
|
830
|
+
emit_event "stage.failed" \
|
|
831
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
832
|
+
"stage=$id" \
|
|
833
|
+
"duration_s=$stage_dur_s" \
|
|
834
|
+
"error=${LAST_STAGE_ERROR:-unknown}" \
|
|
835
|
+
"error_class=${LAST_STAGE_ERROR_CLASS:-unknown}"
|
|
836
|
+
# Audit: stage failed
|
|
837
|
+
if type audit_emit >/dev/null 2>&1; then
|
|
838
|
+
audit_emit "stage.complete" "stage=$id" "verdict=fail" \
|
|
839
|
+
"duration_s=${stage_dur_s:-0}" || true
|
|
840
|
+
fi
|
|
841
|
+
# Emit vitals snapshot on failure too
|
|
842
|
+
if type pipeline_emit_progress_snapshot >/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
843
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "$id" "0" "0" "0" "${LAST_STAGE_ERROR:-unknown}" 2>/dev/null || true
|
|
844
|
+
fi
|
|
845
|
+
# Log model used for prediction feedback
|
|
846
|
+
echo "${id}|${stage_model_used}|false" >> "${ARTIFACTS_DIR}/model-routing.log"
|
|
847
|
+
# Record model outcome for UCB1 learning
|
|
848
|
+
type record_model_outcome >/dev/null 2>&1 && record_model_outcome "$stage_model_used" "$id" 0 "$stage_dur_s" 0 2>/dev/null || true
|
|
849
|
+
# Cancel any remaining in_progress check runs
|
|
850
|
+
pipeline_cancel_check_runs 2>/dev/null || true
|
|
851
|
+
return 1
|
|
852
|
+
fi
|
|
853
|
+
done 3<<< "$stages"
|
|
854
|
+
|
|
855
|
+
# Pipeline complete!
|
|
856
|
+
update_status "complete" ""
|
|
857
|
+
PIPELINE_STAGES_PASSED="$completed"
|
|
858
|
+
PIPELINE_SLOWEST_STAGE=""
|
|
859
|
+
if type get_slowest_stage >/dev/null 2>&1; then
|
|
860
|
+
PIPELINE_SLOWEST_STAGE=$(get_slowest_stage 2>/dev/null || true)
|
|
861
|
+
fi
|
|
862
|
+
local total_dur=""
|
|
863
|
+
if [[ -n "$PIPELINE_START_EPOCH" ]]; then
|
|
864
|
+
total_dur=$(format_duration $(( $(now_epoch) - PIPELINE_START_EPOCH )))
|
|
865
|
+
fi
|
|
866
|
+
|
|
867
|
+
echo ""
|
|
868
|
+
echo -e "${GREEN}${BOLD}═══════════════════════════════════════════════════════════════════${RESET}"
|
|
869
|
+
success "Pipeline complete! ${completed}/${enabled_count} stages passed in ${total_dur:-unknown}"
|
|
870
|
+
echo -e "${GREEN}${BOLD}═══════════════════════════════════════════════════════════════════${RESET}"
|
|
871
|
+
|
|
872
|
+
# Show summary
|
|
873
|
+
echo ""
|
|
874
|
+
if [[ -f "$ARTIFACTS_DIR/pr-url.txt" ]]; then
|
|
875
|
+
echo -e " ${BOLD}PR:${RESET} $(cat "$ARTIFACTS_DIR/pr-url.txt")"
|
|
876
|
+
fi
|
|
877
|
+
echo -e " ${BOLD}Branch:${RESET} $GIT_BRANCH"
|
|
878
|
+
[[ -n "${GITHUB_ISSUE:-}" ]] && echo -e " ${BOLD}Issue:${RESET} $GITHUB_ISSUE"
|
|
879
|
+
echo -e " ${BOLD}Duration:${RESET} $total_dur"
|
|
880
|
+
echo -e " ${BOLD}Artifacts:${RESET} $ARTIFACTS_DIR/"
|
|
881
|
+
echo ""
|
|
882
|
+
|
|
883
|
+
# Capture learnings to memory (success or failure)
|
|
884
|
+
if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
885
|
+
bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
886
|
+
fi
|
|
887
|
+
|
|
888
|
+
# Final GitHub progress update
|
|
889
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
890
|
+
local body
|
|
891
|
+
body=$(gh_build_progress_body)
|
|
892
|
+
gh_update_progress "$body"
|
|
893
|
+
fi
|
|
894
|
+
|
|
895
|
+
# Post-completion cleanup
|
|
896
|
+
pipeline_post_completion_cleanup
|
|
897
|
+
}
|