shipwright-cli 1.9.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. package/.claude/hooks/post-tool-use.sh +12 -5
  2. package/README.md +114 -36
  3. package/completions/_shipwright +212 -32
  4. package/completions/shipwright.bash +97 -25
  5. package/docs/strategy/01-market-research.md +619 -0
  6. package/docs/strategy/02-mission-and-brand.md +587 -0
  7. package/docs/strategy/03-gtm-and-roadmap.md +759 -0
  8. package/docs/strategy/QUICK-START.txt +289 -0
  9. package/docs/strategy/README.md +172 -0
  10. package/package.json +4 -2
  11. package/scripts/sw +217 -2
  12. package/scripts/sw-activity.sh +500 -0
  13. package/scripts/sw-adaptive.sh +925 -0
  14. package/scripts/sw-adversarial.sh +1 -1
  15. package/scripts/sw-architecture-enforcer.sh +1 -1
  16. package/scripts/sw-auth.sh +613 -0
  17. package/scripts/sw-autonomous.sh +664 -0
  18. package/scripts/sw-changelog.sh +704 -0
  19. package/scripts/sw-checkpoint.sh +79 -1
  20. package/scripts/sw-ci.sh +602 -0
  21. package/scripts/sw-cleanup.sh +192 -7
  22. package/scripts/sw-code-review.sh +637 -0
  23. package/scripts/sw-connect.sh +1 -1
  24. package/scripts/sw-context.sh +605 -0
  25. package/scripts/sw-cost.sh +1 -1
  26. package/scripts/sw-daemon.sh +812 -138
  27. package/scripts/sw-dashboard.sh +1 -1
  28. package/scripts/sw-db.sh +540 -0
  29. package/scripts/sw-decompose.sh +539 -0
  30. package/scripts/sw-deps.sh +551 -0
  31. package/scripts/sw-developer-simulation.sh +1 -1
  32. package/scripts/sw-discovery.sh +412 -0
  33. package/scripts/sw-docs-agent.sh +539 -0
  34. package/scripts/sw-docs.sh +1 -1
  35. package/scripts/sw-doctor.sh +59 -1
  36. package/scripts/sw-dora.sh +615 -0
  37. package/scripts/sw-durable.sh +710 -0
  38. package/scripts/sw-e2e-orchestrator.sh +535 -0
  39. package/scripts/sw-eventbus.sh +393 -0
  40. package/scripts/sw-feedback.sh +471 -0
  41. package/scripts/sw-fix.sh +1 -1
  42. package/scripts/sw-fleet-discover.sh +567 -0
  43. package/scripts/sw-fleet-viz.sh +404 -0
  44. package/scripts/sw-fleet.sh +8 -1
  45. package/scripts/sw-github-app.sh +596 -0
  46. package/scripts/sw-github-checks.sh +1 -1
  47. package/scripts/sw-github-deploy.sh +1 -1
  48. package/scripts/sw-github-graphql.sh +1 -1
  49. package/scripts/sw-guild.sh +569 -0
  50. package/scripts/sw-heartbeat.sh +1 -1
  51. package/scripts/sw-hygiene.sh +559 -0
  52. package/scripts/sw-incident.sh +617 -0
  53. package/scripts/sw-init.sh +88 -1
  54. package/scripts/sw-instrument.sh +699 -0
  55. package/scripts/sw-intelligence.sh +1 -1
  56. package/scripts/sw-jira.sh +1 -1
  57. package/scripts/sw-launchd.sh +366 -31
  58. package/scripts/sw-linear.sh +1 -1
  59. package/scripts/sw-logs.sh +1 -1
  60. package/scripts/sw-loop.sh +507 -51
  61. package/scripts/sw-memory.sh +198 -3
  62. package/scripts/sw-mission-control.sh +487 -0
  63. package/scripts/sw-model-router.sh +545 -0
  64. package/scripts/sw-otel.sh +596 -0
  65. package/scripts/sw-oversight.sh +689 -0
  66. package/scripts/sw-pipeline-composer.sh +8 -8
  67. package/scripts/sw-pipeline-vitals.sh +1096 -0
  68. package/scripts/sw-pipeline.sh +2451 -180
  69. package/scripts/sw-pm.sh +693 -0
  70. package/scripts/sw-pr-lifecycle.sh +522 -0
  71. package/scripts/sw-predictive.sh +1 -1
  72. package/scripts/sw-prep.sh +1 -1
  73. package/scripts/sw-ps.sh +4 -3
  74. package/scripts/sw-public-dashboard.sh +798 -0
  75. package/scripts/sw-quality.sh +595 -0
  76. package/scripts/sw-reaper.sh +5 -3
  77. package/scripts/sw-recruit.sh +573 -0
  78. package/scripts/sw-regression.sh +642 -0
  79. package/scripts/sw-release-manager.sh +736 -0
  80. package/scripts/sw-release.sh +706 -0
  81. package/scripts/sw-remote.sh +1 -1
  82. package/scripts/sw-replay.sh +520 -0
  83. package/scripts/sw-retro.sh +691 -0
  84. package/scripts/sw-scale.sh +444 -0
  85. package/scripts/sw-security-audit.sh +505 -0
  86. package/scripts/sw-self-optimize.sh +109 -8
  87. package/scripts/sw-session.sh +31 -9
  88. package/scripts/sw-setup.sh +1 -1
  89. package/scripts/sw-standup.sh +712 -0
  90. package/scripts/sw-status.sh +192 -1
  91. package/scripts/sw-strategic.sh +658 -0
  92. package/scripts/sw-stream.sh +450 -0
  93. package/scripts/sw-swarm.sh +583 -0
  94. package/scripts/sw-team-stages.sh +511 -0
  95. package/scripts/sw-templates.sh +1 -1
  96. package/scripts/sw-testgen.sh +515 -0
  97. package/scripts/sw-tmux-pipeline.sh +554 -0
  98. package/scripts/sw-tmux.sh +1 -1
  99. package/scripts/sw-trace.sh +485 -0
  100. package/scripts/sw-tracker-github.sh +188 -0
  101. package/scripts/sw-tracker-jira.sh +172 -0
  102. package/scripts/sw-tracker-linear.sh +251 -0
  103. package/scripts/sw-tracker.sh +117 -2
  104. package/scripts/sw-triage.sh +603 -0
  105. package/scripts/sw-upgrade.sh +1 -1
  106. package/scripts/sw-ux.sh +677 -0
  107. package/scripts/sw-webhook.sh +627 -0
  108. package/scripts/sw-widgets.sh +530 -0
  109. package/scripts/sw-worktree.sh +1 -1
  110. package/templates/pipelines/autonomous.json +8 -1
  111. package/templates/pipelines/cost-aware.json +21 -0
  112. package/templates/pipelines/deployed.json +40 -6
  113. package/templates/pipelines/enterprise.json +16 -2
  114. package/templates/pipelines/fast.json +19 -0
  115. package/templates/pipelines/full.json +16 -2
  116. package/templates/pipelines/hotfix.json +19 -0
  117. package/templates/pipelines/standard.json +19 -0
@@ -6,7 +6,12 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="1.9.0"
9
+ # Allow spawning Claude CLI from within a Claude Code session (daemon, fleet, etc.)
10
+ unset CLAUDECODE 2>/dev/null || true
11
+ # Ignore SIGHUP so tmux attach/detach doesn't kill long-running plan/design/review stages
12
+ trap '' HUP
13
+
14
+ VERSION="2.0.0"
10
15
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
16
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
17
 
@@ -46,6 +51,10 @@ fi
46
51
  if [[ -f "$SCRIPT_DIR/sw-adversarial.sh" ]]; then
47
52
  source "$SCRIPT_DIR/sw-adversarial.sh"
48
53
  fi
54
+ # shellcheck source=sw-pipeline-vitals.sh
55
+ if [[ -f "$SCRIPT_DIR/sw-pipeline-vitals.sh" ]]; then
56
+ source "$SCRIPT_DIR/sw-pipeline-vitals.sh"
57
+ fi
49
58
 
50
59
  # ─── GitHub API Modules (optional) ─────────────────────────────────────────
51
60
  # shellcheck source=sw-github-graphql.sh
@@ -99,6 +108,54 @@ format_duration() {
99
108
  fi
100
109
  }
101
110
 
111
+ _pipeline_compact_goal() {
112
+ local goal="$1"
113
+ local plan_file="${2:-}"
114
+ local design_file="${3:-}"
115
+ local compact="$goal"
116
+
117
+ # Include plan summary (first 20 lines only)
118
+ if [[ -n "$plan_file" && -f "$plan_file" ]]; then
119
+ compact="${compact}
120
+
121
+ ## Plan Summary
122
+ $(head -20 "$plan_file" 2>/dev/null || true)
123
+ [... full plan in .claude/pipeline-artifacts/plan.md]"
124
+ fi
125
+
126
+ # Include design key decisions only (grep for headers)
127
+ if [[ -n "$design_file" && -f "$design_file" ]]; then
128
+ compact="${compact}
129
+
130
+ ## Key Design Decisions
131
+ $(grep -E '^#{1,3} ' "$design_file" 2>/dev/null | head -10 || true)
132
+ [... full design in .claude/pipeline-artifacts/design.md]"
133
+ fi
134
+
135
+ echo "$compact"
136
+ }
137
+
138
+ load_composed_pipeline() {
139
+ local spec_file="$1"
140
+ [[ ! -f "$spec_file" ]] && return 1
141
+
142
+ # Read enabled stages from composed spec
143
+ local composed_stages
144
+ composed_stages=$(jq -r '.stages // [] | .[] | .id' "$spec_file" 2>/dev/null) || return 1
145
+ [[ -z "$composed_stages" ]] && return 1
146
+
147
+ # Override enabled stages
148
+ COMPOSED_STAGES="$composed_stages"
149
+
150
+ # Override per-stage settings
151
+ local build_max
152
+ build_max=$(jq -r '.stages[] | select(.id=="build") | .max_iterations // ""' "$spec_file" 2>/dev/null) || true
153
+ [[ -n "$build_max" && "$build_max" != "null" ]] && COMPOSED_BUILD_ITERATIONS="$build_max"
154
+
155
+ emit_event "pipeline.composed_loaded" "stages=$(echo "$composed_stages" | wc -l | tr -d ' ')"
156
+ return 0
157
+ }
158
+
102
159
  # ─── Structured Event Log ──────────────────────────────────────────────────
103
160
  # Appends JSON events to ~/.shipwright/events.jsonl for metrics/traceability
104
161
 
@@ -159,6 +216,8 @@ DRY_RUN=false
159
216
  IGNORE_BUDGET=false
160
217
  COMPLETED_STAGES=""
161
218
  MAX_ITERATIONS_OVERRIDE=""
219
+ MAX_RESTARTS_OVERRIDE=""
220
+ FAST_TEST_CMD_OVERRIDE=""
162
221
  PR_NUMBER=""
163
222
  AUTO_WORKTREE=false
164
223
  WORKTREE_NAME=""
@@ -222,6 +281,8 @@ show_help() {
222
281
  echo -e " ${DIM}--slack-webhook <url>${RESET} Send notifications to Slack"
223
282
  echo -e " ${DIM}--self-heal <n>${RESET} Build→test retry cycles on failure (default: 2)"
224
283
  echo -e " ${DIM}--max-iterations <n>${RESET} Override max build loop iterations"
284
+ echo -e " ${DIM}--max-restarts <n>${RESET} Max session restarts in build loop"
285
+ echo -e " ${DIM}--fast-test-cmd <cmd>${RESET} Fast/subset test for build loop"
225
286
  echo -e " ${DIM}--completed-stages \"a,b\"${RESET} Skip these stages (CI resume)"
226
287
  echo ""
227
288
  echo -e "${BOLD}STAGES${RESET} ${DIM}(configurable per pipeline template)${RESET}"
@@ -304,6 +365,15 @@ parse_args() {
304
365
  --dry-run) DRY_RUN=true; shift ;;
305
366
  --slack-webhook) SLACK_WEBHOOK="$2"; shift 2 ;;
306
367
  --self-heal) BUILD_TEST_RETRIES="${2:-3}"; shift 2 ;;
368
+ --max-restarts)
369
+ MAX_RESTARTS_OVERRIDE="$2"
370
+ if ! [[ "$MAX_RESTARTS_OVERRIDE" =~ ^[0-9]+$ ]]; then
371
+ error "--max-restarts must be numeric (got: $MAX_RESTARTS_OVERRIDE)"
372
+ exit 1
373
+ fi
374
+ shift 2 ;;
375
+
376
+ --fast-test-cmd) FAST_TEST_CMD_OVERRIDE="$2"; shift 2 ;;
307
377
  --help|-h) show_help; exit 0 ;;
308
378
  *)
309
379
  if [[ -z "$PIPELINE_NAME_ARG" ]]; then
@@ -476,6 +546,9 @@ cleanup_on_exit() {
476
546
  git stash pop --quiet 2>/dev/null || true
477
547
  fi
478
548
 
549
+ # Cancel lingering in_progress GitHub Check Runs
550
+ pipeline_cancel_check_runs 2>/dev/null || true
551
+
479
552
  # Update GitHub
480
553
  if [[ -n "${ISSUE_NUMBER:-}" && "${GH_AVAILABLE:-false}" == "true" ]]; then
481
554
  gh_comment_issue "$ISSUE_NUMBER" "⏸️ **Pipeline interrupted** at stage: ${CURRENT_STAGE_ID:-unknown}" 2>/dev/null || true
@@ -1039,6 +1112,7 @@ LOG_ENTRIES=""
1039
1112
 
1040
1113
  save_artifact() {
1041
1114
  local name="$1" content="$2"
1115
+ mkdir -p "$ARTIFACTS_DIR" 2>/dev/null || true
1042
1116
  echo "$content" > "$ARTIFACTS_DIR/$name"
1043
1117
  }
1044
1118
 
@@ -1196,6 +1270,80 @@ mark_stage_complete() {
1196
1270
  if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update &>/dev/null 2>&1; then
1197
1271
  gh_checks_stage_update "$stage_id" "completed" "success" "Stage $stage_id: ${timing}" 2>/dev/null || true
1198
1272
  fi
1273
+
1274
+ # Persist artifacts to feature branch after expensive stages
1275
+ case "$stage_id" in
1276
+ plan) persist_artifacts "plan" "plan.md" "dod.md" "context-bundle.md" ;;
1277
+ design) persist_artifacts "design" "design.md" ;;
1278
+ esac
1279
+ }
1280
+
1281
+ persist_artifacts() {
1282
+ # Commit and push pipeline artifacts to the feature branch mid-pipeline.
1283
+ # Only runs in CI — local runs skip. Non-fatal: logs failure but never crashes.
1284
+ [[ "${CI_MODE:-false}" != "true" ]] && return 0
1285
+ [[ -z "${ISSUE_NUMBER:-}" ]] && return 0
1286
+ [[ -z "${ARTIFACTS_DIR:-}" ]] && return 0
1287
+
1288
+ local stage="${1:-unknown}"
1289
+ shift
1290
+ local files=("$@")
1291
+
1292
+ # Collect files that actually exist
1293
+ local to_add=()
1294
+ for f in "${files[@]}"; do
1295
+ local path="${ARTIFACTS_DIR}/${f}"
1296
+ if [[ -f "$path" && -s "$path" ]]; then
1297
+ to_add+=("$path")
1298
+ fi
1299
+ done
1300
+
1301
+ if [[ ${#to_add[@]} -eq 0 ]]; then
1302
+ warn "persist_artifacts($stage): no artifact files found — skipping"
1303
+ return 0
1304
+ fi
1305
+
1306
+ info "Persisting ${#to_add[@]} artifact(s) after stage ${stage}..."
1307
+
1308
+ (
1309
+ git add "${to_add[@]}" 2>/dev/null || true
1310
+ if ! git diff --cached --quiet 2>/dev/null; then
1311
+ git commit -m "chore: persist ${stage} artifacts for #${ISSUE_NUMBER} [skip ci]" --no-verify 2>/dev/null || true
1312
+ local branch="shipwright/issue-${ISSUE_NUMBER}"
1313
+ git push origin "HEAD:refs/heads/$branch" --force 2>/dev/null || true
1314
+ emit_event "artifacts.persisted" "issue=${ISSUE_NUMBER}" "stage=$stage" "file_count=${#to_add[@]}"
1315
+ fi
1316
+ ) 2>/dev/null || {
1317
+ warn "persist_artifacts($stage): push failed — non-fatal, continuing"
1318
+ emit_event "artifacts.persist_failed" "issue=${ISSUE_NUMBER}" "stage=$stage"
1319
+ }
1320
+
1321
+ return 0
1322
+ }
1323
+
1324
+ verify_stage_artifacts() {
1325
+ # Check that required artifacts exist and are non-empty for a given stage.
1326
+ # Returns 0 if all artifacts are present, 1 if any are missing.
1327
+ local stage_id="$1"
1328
+ [[ -z "${ARTIFACTS_DIR:-}" ]] && return 0
1329
+
1330
+ local required=()
1331
+ case "$stage_id" in
1332
+ plan) required=("plan.md") ;;
1333
+ design) required=("design.md" "plan.md") ;;
1334
+ *) return 0 ;; # No artifact check needed
1335
+ esac
1336
+
1337
+ local missing=0
1338
+ for f in "${required[@]}"; do
1339
+ local path="${ARTIFACTS_DIR}/${f}"
1340
+ if [[ ! -f "$path" || ! -s "$path" ]]; then
1341
+ warn "verify_stage_artifacts($stage_id): missing or empty: $f"
1342
+ missing=1
1343
+ fi
1344
+ done
1345
+
1346
+ return "$missing"
1199
1347
  }
1200
1348
 
1201
1349
  mark_stage_failed() {
@@ -1261,6 +1409,7 @@ initialize_state() {
1261
1409
 
1262
1410
  write_state() {
1263
1411
  [[ -z "${STATE_FILE:-}" || -z "${ARTIFACTS_DIR:-}" ]] && return 0
1412
+ mkdir -p "$(dirname "$STATE_FILE")" 2>/dev/null || true
1264
1413
  local stages_yaml=""
1265
1414
  while IFS=: read -r sid sstatus; do
1266
1415
  [[ -z "$sid" ]] && continue
@@ -1283,28 +1432,31 @@ write_state() {
1283
1432
  stage_progress=$(build_stage_progress)
1284
1433
  fi
1285
1434
 
1286
- cat > "$STATE_FILE" <<EOF
1435
+ cat > "$STATE_FILE" <<'_SW_STATE_END_'
1287
1436
  ---
1288
- pipeline: $PIPELINE_NAME
1289
- goal: "$GOAL"
1290
- status: $PIPELINE_STATUS
1291
- issue: "${GITHUB_ISSUE:-}"
1292
- branch: "${GIT_BRANCH:-}"
1293
- template: "${TASK_TYPE:+$(template_for_type "$TASK_TYPE")}"
1294
- current_stage: $CURRENT_STAGE
1295
- current_stage_description: "${cur_stage_desc}"
1296
- stage_progress: "${stage_progress}"
1297
- started_at: ${STARTED_AT:-$(now_iso)}
1298
- updated_at: $(now_iso)
1299
- elapsed: ${total_dur:-0s}
1300
- pr_number: ${PR_NUMBER:-}
1301
- progress_comment_id: ${PROGRESS_COMMENT_ID:-}
1302
- stages:
1303
- ${stages_yaml}---
1304
-
1305
- ## Log
1306
- $LOG_ENTRIES
1307
- EOF
1437
+ _SW_STATE_END_
1438
+ # Write state with printf to avoid heredoc delimiter injection
1439
+ {
1440
+ printf 'pipeline: %s\n' "$PIPELINE_NAME"
1441
+ printf 'goal: "%s"\n' "$GOAL"
1442
+ printf 'status: %s\n' "$PIPELINE_STATUS"
1443
+ printf 'issue: "%s"\n' "${GITHUB_ISSUE:-}"
1444
+ printf 'branch: "%s"\n' "${GIT_BRANCH:-}"
1445
+ printf 'template: "%s"\n' "${TASK_TYPE:+$(template_for_type "$TASK_TYPE")}"
1446
+ printf 'current_stage: %s\n' "$CURRENT_STAGE"
1447
+ printf 'current_stage_description: "%s"\n' "${cur_stage_desc}"
1448
+ printf 'stage_progress: "%s"\n' "${stage_progress}"
1449
+ printf 'started_at: %s\n' "${STARTED_AT:-$(now_iso)}"
1450
+ printf 'updated_at: %s\n' "$(now_iso)"
1451
+ printf 'elapsed: %s\n' "${total_dur:-0s}"
1452
+ printf 'pr_number: %s\n' "${PR_NUMBER:-}"
1453
+ printf 'progress_comment_id: %s\n' "${PROGRESS_COMMENT_ID:-}"
1454
+ printf 'stages:\n'
1455
+ printf '%s' "${stages_yaml}"
1456
+ printf -- '---\n\n'
1457
+ printf '## Log\n'
1458
+ printf '%s\n' "$LOG_ENTRIES"
1459
+ } >> "$STATE_FILE"
1308
1460
  }
1309
1461
 
1310
1462
  resume_state() {
@@ -1576,6 +1728,12 @@ stage_plan() {
1576
1728
 
1577
1729
  info "Generating implementation plan..."
1578
1730
 
1731
+ # ── Gather context bundle (if context engine available) ──
1732
+ local context_script="${SCRIPT_DIR}/sw-context.sh"
1733
+ if [[ -x "$context_script" ]]; then
1734
+ "$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
1735
+ fi
1736
+
1579
1737
  # Build rich prompt with all available context
1580
1738
  local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
1581
1739
 
@@ -1591,6 +1749,19 @@ ${ISSUE_BODY}
1591
1749
  "
1592
1750
  fi
1593
1751
 
1752
+ # Inject context bundle from context engine (if available)
1753
+ local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
1754
+ if [[ -f "$_context_bundle" ]]; then
1755
+ local _cb_content
1756
+ _cb_content=$(cat "$_context_bundle" 2>/dev/null | head -100 || true)
1757
+ if [[ -n "$_cb_content" ]]; then
1758
+ plan_prompt="${plan_prompt}
1759
+ ## Pipeline Context
1760
+ ${_cb_content}
1761
+ "
1762
+ fi
1763
+ fi
1764
+
1594
1765
  # Inject intelligence memory context for similar past plans
1595
1766
  if type intelligence_search_memory &>/dev/null 2>&1; then
1596
1767
  local plan_memory
@@ -1693,12 +1864,24 @@ Checklist of completion criteria.
1693
1864
  parse_claude_tokens "$_token_log"
1694
1865
 
1695
1866
  if [[ ! -s "$plan_file" ]]; then
1696
- error "Plan generation failed"
1867
+ error "Plan generation failed — empty output"
1868
+ return 1
1869
+ fi
1870
+
1871
+ # Validate plan content — detect API/CLI errors masquerading as plans
1872
+ local _plan_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
1873
+ _plan_fatal="${_plan_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
1874
+ if grep -qiE "$_plan_fatal" "$plan_file" 2>/dev/null; then
1875
+ error "Plan stage produced API/CLI error instead of a plan: $(head -1 "$plan_file" | cut -c1-100)"
1697
1876
  return 1
1698
1877
  fi
1699
1878
 
1700
1879
  local line_count
1701
1880
  line_count=$(wc -l < "$plan_file" | xargs)
1881
+ if [[ "$line_count" -lt 3 ]]; then
1882
+ error "Plan too short (${line_count} lines) — likely an error, not a real plan"
1883
+ return 1
1884
+ fi
1702
1885
  info "Plan saved: ${DIM}$plan_file${RESET} (${line_count} lines)"
1703
1886
 
1704
1887
  # Extract task checklist for GitHub issue and task tracking
@@ -2071,12 +2254,24 @@ Be concrete and specific. Reference actual file paths in the codebase. Consider
2071
2254
  parse_claude_tokens "$_token_log"
2072
2255
 
2073
2256
  if [[ ! -s "$design_file" ]]; then
2074
- error "Design generation failed"
2257
+ error "Design generation failed — empty output"
2258
+ return 1
2259
+ fi
2260
+
2261
+ # Validate design content — detect API/CLI errors masquerading as designs
2262
+ local _design_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
2263
+ _design_fatal="${_design_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
2264
+ if grep -qiE "$_design_fatal" "$design_file" 2>/dev/null; then
2265
+ error "Design stage produced API/CLI error instead of a design: $(head -1 "$design_file" | cut -c1-100)"
2075
2266
  return 1
2076
2267
  fi
2077
2268
 
2078
2269
  local line_count
2079
2270
  line_count=$(wc -l < "$design_file" | xargs)
2271
+ if [[ "$line_count" -lt 3 ]]; then
2272
+ error "Design too short (${line_count} lines) — likely an error, not a real design"
2273
+ return 1
2274
+ fi
2080
2275
  info "Design saved: ${DIM}$design_file${RESET} (${line_count} lines)"
2081
2276
 
2082
2277
  # Extract file lists for build stage awareness
@@ -2127,22 +2322,9 @@ stage_build() {
2127
2322
  memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "build" 2>/dev/null) || true
2128
2323
  fi
2129
2324
 
2130
- # Build enriched goal with full context
2131
- local enriched_goal="$GOAL"
2132
- if [[ -s "$plan_file" ]]; then
2133
- enriched_goal="$GOAL
2134
-
2135
- Implementation plan (follow this exactly):
2136
- $(cat "$plan_file")"
2137
- fi
2138
-
2139
- # Inject approved design document
2140
- if [[ -s "$design_file" ]]; then
2141
- enriched_goal="${enriched_goal}
2142
-
2143
- Follow the approved design document:
2144
- $(cat "$design_file")"
2145
- fi
2325
+ # Build enriched goal with compact context (avoids prompt bloat)
2326
+ local enriched_goal
2327
+ enriched_goal=$(_pipeline_compact_goal "$GOAL" "$plan_file" "$design_file")
2146
2328
 
2147
2329
  # Inject memory context
2148
2330
  if [[ -n "$memory_context" ]]; then
@@ -2263,6 +2445,11 @@ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
2263
2445
  [[ "$quality" == "true" ]] && loop_args+=(--quality-gates)
2264
2446
  fi
2265
2447
 
2448
+ # Session restart capability
2449
+ [[ -n "${MAX_RESTARTS_OVERRIDE:-}" ]] && loop_args+=(--max-restarts "$MAX_RESTARTS_OVERRIDE")
2450
+ # Fast test mode
2451
+ [[ -n "${FAST_TEST_CMD_OVERRIDE:-}" ]] && loop_args+=(--fast-test-cmd "$FAST_TEST_CMD_OVERRIDE")
2452
+
2266
2453
  # Definition of Done: use plan-extracted DoD if available
2267
2454
  [[ -s "$dod_file" ]] && loop_args+=(--definition-of-done "$dod_file")
2268
2455
 
@@ -2279,7 +2466,23 @@ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
2279
2466
  local _token_log="${ARTIFACTS_DIR}/.claude-tokens-build.log"
2280
2467
  export PIPELINE_JOB_ID="${PIPELINE_NAME:-pipeline-$$}"
2281
2468
  sw loop "${loop_args[@]}" < /dev/null 2>"$_token_log" || {
2469
+ local _loop_exit=$?
2282
2470
  parse_claude_tokens "$_token_log"
2471
+
2472
+ # Detect context exhaustion from progress file
2473
+ local _progress_file="${PWD}/.claude/loop-logs/progress.md"
2474
+ if [[ -f "$_progress_file" ]]; then
2475
+ local _prog_tests
2476
+ _prog_tests=$(grep -oE 'Tests passing: (true|false)' "$_progress_file" 2>/dev/null | awk '{print $NF}' || echo "unknown")
2477
+ if [[ "$_prog_tests" != "true" ]]; then
2478
+ warn "Build loop exhausted with failing tests (context exhaustion)"
2479
+ emit_event "pipeline.context_exhaustion" "issue=${ISSUE_NUMBER:-0}" "stage=build"
2480
+ # Write flag for daemon retry logic
2481
+ mkdir -p "$ARTIFACTS_DIR" 2>/dev/null || true
2482
+ echo "context_exhaustion" > "$ARTIFACTS_DIR/failure-reason.txt" 2>/dev/null || true
2483
+ fi
2484
+ fi
2485
+
2283
2486
  error "Build loop failed"
2284
2487
  return 1
2285
2488
  }
@@ -2392,7 +2595,7 @@ ${log_excerpt}
2392
2595
  # Post test results to GitHub
2393
2596
  if [[ -n "$ISSUE_NUMBER" ]]; then
2394
2597
  local test_summary
2395
- test_summary=$(tail -10 "$test_log")
2598
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
2396
2599
  local cov_line=""
2397
2600
  [[ -n "$coverage" ]] && cov_line="
2398
2601
  **Coverage:** ${coverage}%"
@@ -2406,6 +2609,16 @@ ${test_summary}
2406
2609
  </details>"
2407
2610
  fi
2408
2611
 
2612
+ # Write coverage summary for pre-deploy gate
2613
+ local _cov_pct=0
2614
+ if [[ -f "$ARTIFACTS_DIR/test-results.log" ]]; then
2615
+ _cov_pct=$(grep -oE '[0-9]+%' "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -1 | tr -d '%' || true)
2616
+ _cov_pct="${_cov_pct:-0}"
2617
+ fi
2618
+ local _cov_tmp
2619
+ _cov_tmp=$(mktemp "${ARTIFACTS_DIR}/test-coverage.json.tmp.XXXXXX")
2620
+ printf '{"coverage_pct":%d}' "${_cov_pct:-0}" > "$_cov_tmp" && mv "$_cov_tmp" "$ARTIFACTS_DIR/test-coverage.json" || rm -f "$_cov_tmp"
2621
+
2409
2622
  log_stage "test" "Tests passed${coverage:+ (coverage: ${coverage}%)}"
2410
2623
  }
2411
2624
 
@@ -2788,6 +3001,19 @@ stage_pr() {
2788
3001
  fi
2789
3002
  fi
2790
3003
 
3004
+ # Pre-PR diff gate — verify meaningful code changes exist (not just bookkeeping)
3005
+ local real_changes
3006
+ real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
3007
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
3008
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
3009
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
3010
+ if [[ "${real_changes:-0}" -eq 0 ]]; then
3011
+ error "No meaningful code changes detected — only bookkeeping files modified"
3012
+ error "Refusing to create PR with zero real changes"
3013
+ return 1
3014
+ fi
3015
+ info "Pre-PR diff check: ${real_changes} real files changed"
3016
+
2791
3017
  # Build PR title — prefer GOAL over plan file first line
2792
3018
  # (plan file first line often contains Claude analysis text, not a clean title)
2793
3019
  local pr_title=""
@@ -2799,6 +3025,12 @@ stage_pr() {
2799
3025
  fi
2800
3026
  [[ -z "$pr_title" ]] && pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
2801
3027
 
3028
+ # Sanitize: reject PR titles that look like error messages
3029
+ if echo "$pr_title" | grep -qiE 'Invalid API|API key|authentication_error|rate_limit|CLI error|no useful output'; then
3030
+ warn "PR title looks like an error message: $pr_title"
3031
+ pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
3032
+ fi
3033
+
2802
3034
  # Build comprehensive PR body
2803
3035
  local plan_summary=""
2804
3036
  if [[ -s "$plan_file" ]]; then
@@ -2807,7 +3039,7 @@ stage_pr() {
2807
3039
 
2808
3040
  local test_summary=""
2809
3041
  if [[ -s "$test_log" ]]; then
2810
- test_summary=$(tail -10 "$test_log")
3042
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
2811
3043
  fi
2812
3044
 
2813
3045
  local review_summary=""
@@ -3229,41 +3461,157 @@ stage_deploy() {
3229
3461
  fi
3230
3462
  fi
3231
3463
 
3232
- # Post deploy start to GitHub
3233
- if [[ -n "$ISSUE_NUMBER" ]]; then
3234
- gh_comment_issue "$ISSUE_NUMBER" "🚀 **Deploy started**"
3464
+ # ── Pre-deploy gates ──
3465
+ local pre_deploy_ci
3466
+ pre_deploy_ci=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_ci_status) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
3467
+
3468
+ if [[ "${pre_deploy_ci:-true}" == "true" && "${NO_GITHUB:-false}" != "true" && -n "${REPO_OWNER:-}" && -n "${REPO_NAME:-}" ]]; then
3469
+ info "Pre-deploy gate: checking CI status..."
3470
+ local ci_failures
3471
+ ci_failures=$(gh api "repos/${REPO_OWNER}/${REPO_NAME}/commits/${GIT_BRANCH:-HEAD}/check-runs" \
3472
+ --jq '[.check_runs[] | select(.conclusion != null and .conclusion != "success" and .conclusion != "skipped")] | length' 2>/dev/null || echo "0")
3473
+ if [[ "${ci_failures:-0}" -gt 0 ]]; then
3474
+ error "Pre-deploy gate FAILED: ${ci_failures} CI check(s) not passing"
3475
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: ${ci_failures} CI checks failing" 2>/dev/null || true
3476
+ return 1
3477
+ fi
3478
+ success "Pre-deploy gate: all CI checks passing"
3235
3479
  fi
3236
3480
 
3237
- if [[ -n "$staging_cmd" ]]; then
3238
- info "Deploying to staging..."
3239
- bash -c "$staging_cmd" > "$ARTIFACTS_DIR/deploy-staging.log" 2>&1 || {
3240
- error "Staging deploy failed"
3241
- [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "❌ Staging deploy failed"
3242
- # Mark GitHub deployment as failed
3243
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
3244
- gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Staging deploy failed" 2>/dev/null || true
3245
- fi
3481
+ local pre_deploy_min_cov
3482
+ pre_deploy_min_cov=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_min_coverage) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
3483
+ if [[ -n "${pre_deploy_min_cov:-}" && "${pre_deploy_min_cov}" != "null" && -f "$ARTIFACTS_DIR/test-coverage.json" ]]; then
3484
+ local actual_cov
3485
+ actual_cov=$(jq -r '.coverage_pct // 0' "$ARTIFACTS_DIR/test-coverage.json" 2>/dev/null || echo "0")
3486
+ if [[ "${actual_cov:-0}" -lt "$pre_deploy_min_cov" ]]; then
3487
+ error "Pre-deploy gate FAILED: coverage ${actual_cov}% < required ${pre_deploy_min_cov}%"
3488
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: coverage ${actual_cov}% below minimum ${pre_deploy_min_cov}%" 2>/dev/null || true
3246
3489
  return 1
3247
- }
3248
- success "Staging deploy complete"
3490
+ fi
3491
+ success "Pre-deploy gate: coverage ${actual_cov}% >= ${pre_deploy_min_cov}%"
3249
3492
  fi
3250
3493
 
3251
- if [[ -n "$prod_cmd" ]]; then
3252
- info "Deploying to production..."
3253
- bash -c "$prod_cmd" > "$ARTIFACTS_DIR/deploy-prod.log" 2>&1 || {
3254
- error "Production deploy failed"
3255
- if [[ -n "$rollback_cmd" ]]; then
3256
- warn "Rolling back..."
3257
- bash -c "$rollback_cmd" 2>&1 || error "Rollback also failed!"
3494
+ # Post deploy start to GitHub
3495
+ if [[ -n "$ISSUE_NUMBER" ]]; then
3496
+ gh_comment_issue "$ISSUE_NUMBER" "Deploy started"
3497
+ fi
3498
+
3499
+ # ── Deploy strategy ──
3500
+ local deploy_strategy
3501
+ deploy_strategy=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.deploy_strategy) // "direct"' "$PIPELINE_CONFIG" 2>/dev/null) || true
3502
+ [[ "$deploy_strategy" == "null" ]] && deploy_strategy="direct"
3503
+
3504
+ local canary_cmd promote_cmd switch_cmd health_url deploy_log
3505
+ canary_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.canary_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
3506
+ [[ "$canary_cmd" == "null" ]] && canary_cmd=""
3507
+ promote_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.promote_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
3508
+ [[ "$promote_cmd" == "null" ]] && promote_cmd=""
3509
+ switch_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.switch_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
3510
+ [[ "$switch_cmd" == "null" ]] && switch_cmd=""
3511
+ health_url=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
3512
+ [[ "$health_url" == "null" ]] && health_url=""
3513
+ deploy_log="$ARTIFACTS_DIR/deploy.log"
3514
+
3515
+ case "$deploy_strategy" in
3516
+ canary)
3517
+ info "Canary deployment strategy..."
3518
+ if [[ -z "$canary_cmd" ]]; then
3519
+ warn "No canary_cmd configured — falling back to direct"
3520
+ deploy_strategy="direct"
3521
+ else
3522
+ info "Deploying canary..."
3523
+ bash -c "$canary_cmd" >> "$deploy_log" 2>&1 || { error "Canary deploy failed"; return 1; }
3524
+
3525
+ if [[ -n "$health_url" ]]; then
3526
+ local canary_healthy=0
3527
+ local _chk
3528
+ for _chk in 1 2 3; do
3529
+ sleep 10
3530
+ local _status
3531
+ _status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
3532
+ if [[ "$_status" -ge 200 && "$_status" -lt 400 ]]; then
3533
+ canary_healthy=$((canary_healthy + 1))
3534
+ fi
3535
+ done
3536
+ if [[ "$canary_healthy" -lt 2 ]]; then
3537
+ error "Canary health check failed ($canary_healthy/3 passed) — rolling back"
3538
+ [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" 2>/dev/null || true
3539
+ return 1
3540
+ fi
3541
+ success "Canary healthy ($canary_healthy/3 checks passed)"
3542
+ fi
3543
+
3544
+ info "Promoting canary to full deployment..."
3545
+ if [[ -n "$promote_cmd" ]]; then
3546
+ bash -c "$promote_cmd" >> "$deploy_log" 2>&1 || { error "Promote failed"; return 1; }
3547
+ fi
3548
+ success "Canary promoted"
3258
3549
  fi
3259
- [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "❌ Production deploy failed — rollback ${rollback_cmd:+attempted}"
3260
- # Mark GitHub deployment as failed
3261
- if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
3262
- gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Production deploy failed" 2>/dev/null || true
3550
+ ;;
3551
+ blue-green)
3552
+ info "Blue-green deployment strategy..."
3553
+ if [[ -z "$staging_cmd" || -z "$switch_cmd" ]]; then
3554
+ warn "Blue-green requires staging_cmd + switch_cmd — falling back to direct"
3555
+ deploy_strategy="direct"
3556
+ else
3557
+ info "Deploying to inactive environment..."
3558
+ bash -c "$staging_cmd" >> "$deploy_log" 2>&1 || { error "Blue-green staging failed"; return 1; }
3559
+
3560
+ if [[ -n "$health_url" ]]; then
3561
+ local bg_healthy=0
3562
+ local _chk
3563
+ for _chk in 1 2 3; do
3564
+ sleep 5
3565
+ local _status
3566
+ _status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
3567
+ [[ "$_status" -ge 200 && "$_status" -lt 400 ]] && bg_healthy=$((bg_healthy + 1))
3568
+ done
3569
+ if [[ "$bg_healthy" -lt 2 ]]; then
3570
+ error "Blue-green health check failed — not switching"
3571
+ return 1
3572
+ fi
3573
+ fi
3574
+
3575
+ info "Switching traffic..."
3576
+ bash -c "$switch_cmd" >> "$deploy_log" 2>&1 || { error "Traffic switch failed"; return 1; }
3577
+ success "Blue-green switch complete"
3263
3578
  fi
3264
- return 1
3265
- }
3266
- success "Production deploy complete"
3579
+ ;;
3580
+ esac
3581
+
3582
+ # ── Direct deployment (default or fallback) ──
3583
+ if [[ "$deploy_strategy" == "direct" ]]; then
3584
+ if [[ -n "$staging_cmd" ]]; then
3585
+ info "Deploying to staging..."
3586
+ bash -c "$staging_cmd" > "$ARTIFACTS_DIR/deploy-staging.log" 2>&1 || {
3587
+ error "Staging deploy failed"
3588
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Staging deploy failed"
3589
+ # Mark GitHub deployment as failed
3590
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
3591
+ gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Staging deploy failed" 2>/dev/null || true
3592
+ fi
3593
+ return 1
3594
+ }
3595
+ success "Staging deploy complete"
3596
+ fi
3597
+
3598
+ if [[ -n "$prod_cmd" ]]; then
3599
+ info "Deploying to production..."
3600
+ bash -c "$prod_cmd" > "$ARTIFACTS_DIR/deploy-prod.log" 2>&1 || {
3601
+ error "Production deploy failed"
3602
+ if [[ -n "$rollback_cmd" ]]; then
3603
+ warn "Rolling back..."
3604
+ bash -c "$rollback_cmd" 2>&1 || error "Rollback also failed!"
3605
+ fi
3606
+ [[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Production deploy failed — rollback ${rollback_cmd:+attempted}"
3607
+ # Mark GitHub deployment as failed
3608
+ if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
3609
+ gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Production deploy failed" 2>/dev/null || true
3610
+ fi
3611
+ return 1
3612
+ }
3613
+ success "Production deploy complete"
3614
+ fi
3267
3615
  fi
3268
3616
 
3269
3617
  if [[ -n "$ISSUE_NUMBER" ]]; then
@@ -4449,127 +4797,1512 @@ run_dod_audit() {
4449
4797
  return 0
4450
4798
  }
4451
4799
 
4452
- compound_rebuild_with_feedback() {
4453
- local feedback_file="$ARTIFACTS_DIR/quality-feedback.md"
4800
+ # ─── Intelligent Pipeline Orchestration ──────────────────────────────────────
4801
+ # AGI-like decision making: skip, classify, adapt, reassess, backtrack
4802
+
4803
+ # Global state for intelligence features
4804
+ PIPELINE_BACKTRACK_COUNT="${PIPELINE_BACKTRACK_COUNT:-0}"
4805
+ PIPELINE_MAX_BACKTRACKS=2
4806
+ PIPELINE_ADAPTIVE_COMPLEXITY=""
4807
+
4808
+ # ──────────────────────────────────────────────────────────────────────────────
4809
+ # 1. Intelligent Stage Skipping
4810
+ # Evaluates whether a stage should be skipped based on triage score, complexity,
4811
+ # issue labels, and diff size. Called before each stage in run_pipeline().
4812
+ # Returns 0 if the stage SHOULD be skipped, 1 if it should run.
4813
+ # ──────────────────────────────────────────────────────────────────────────────
4814
+ pipeline_should_skip_stage() {
4815
+ local stage_id="$1"
4816
+ local reason=""
4454
4817
 
4455
- # Collect all findings
4456
- {
4457
- echo "# Quality Feedback — Issues to Fix"
4458
- echo ""
4459
- if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
4460
- echo "## Adversarial Review Findings"
4461
- cat "$ARTIFACTS_DIR/adversarial-review.md"
4462
- echo ""
4463
- fi
4464
- if [[ -f "$ARTIFACTS_DIR/negative-review.md" ]]; then
4465
- echo "## Negative Prompting Concerns"
4466
- cat "$ARTIFACTS_DIR/negative-review.md"
4467
- echo ""
4468
- fi
4469
- if [[ -f "$ARTIFACTS_DIR/dod-audit.md" ]]; then
4470
- echo "## DoD Audit Failures"
4471
- grep "❌" "$ARTIFACTS_DIR/dod-audit.md" 2>/dev/null || true
4472
- echo ""
4473
- fi
4474
- if [[ -f "$ARTIFACTS_DIR/security-audit.log" ]] && grep -qiE 'critical|high' "$ARTIFACTS_DIR/security-audit.log" 2>/dev/null; then
4475
- echo "## Security Audit Findings"
4476
- cat "$ARTIFACTS_DIR/security-audit.log"
4477
- echo ""
4478
- fi
4479
- if [[ -f "$ARTIFACTS_DIR/api-compat.log" ]] && grep -qi 'BREAKING' "$ARTIFACTS_DIR/api-compat.log" 2>/dev/null; then
4480
- echo "## API Breaking Changes"
4481
- cat "$ARTIFACTS_DIR/api-compat.log"
4482
- echo ""
4483
- fi
4484
- } > "$feedback_file"
4818
+ # Never skip intake or build — they're always required
4819
+ case "$stage_id" in
4820
+ intake|build|test|pr|merge) return 1 ;;
4821
+ esac
4485
4822
 
4486
- # Validate feedback file has actual content
4487
- if [[ ! -s "$feedback_file" ]]; then
4488
- warn "No quality feedback collected skipping rebuild"
4489
- return 1
4823
+ # ── Signal 1: Triage score (from intelligence analysis) ──
4824
+ local triage_score="${INTELLIGENCE_COMPLEXITY:-0}"
4825
+ # Convert: high triage score (simple issue) means skip more stages
4826
+ # INTELLIGENCE_COMPLEXITY is 1-10 (1=simple, 10=complex)
4827
+ # Score >= 70 in daemon means simple → complexity 1-3
4828
+ local complexity="${INTELLIGENCE_COMPLEXITY:-5}"
4829
+
4830
+ # ── Signal 2: Issue labels ──
4831
+ local labels="${ISSUE_LABELS:-}"
4832
+
4833
+ # Documentation issues: skip test, review, compound_quality
4834
+ if echo ",$labels," | grep -qiE ',documentation,|,docs,|,typo,'; then
4835
+ case "$stage_id" in
4836
+ test|review|compound_quality)
4837
+ reason="label:documentation"
4838
+ ;;
4839
+ esac
4490
4840
  fi
4491
4841
 
4492
- # Reset build/test stages
4493
- set_stage_status "build" "pending"
4494
- set_stage_status "test" "pending"
4495
- set_stage_status "review" "pending"
4842
+ # Hotfix issues: skip plan, design, compound_quality
4843
+ if echo ",$labels," | grep -qiE ',hotfix,|,urgent,|,p0,'; then
4844
+ case "$stage_id" in
4845
+ plan|design|compound_quality)
4846
+ reason="label:hotfix"
4847
+ ;;
4848
+ esac
4849
+ fi
4496
4850
 
4497
- # Augment GOAL with quality feedback
4498
- local original_goal="$GOAL"
4499
- local feedback_content
4500
- feedback_content=$(cat "$feedback_file")
4501
- GOAL="$GOAL
4851
+ # ── Signal 3: Intelligence complexity ──
4852
+ if [[ -z "$reason" && "$complexity" -gt 0 ]]; then
4853
+ # Complexity 1-2: very simple → skip design, compound_quality, review
4854
+ if [[ "$complexity" -le 2 ]]; then
4855
+ case "$stage_id" in
4856
+ design|compound_quality|review)
4857
+ reason="complexity:${complexity}/10"
4858
+ ;;
4859
+ esac
4860
+ # Complexity 1-3: simple → skip design
4861
+ elif [[ "$complexity" -le 3 ]]; then
4862
+ case "$stage_id" in
4863
+ design)
4864
+ reason="complexity:${complexity}/10"
4865
+ ;;
4866
+ esac
4867
+ fi
4868
+ fi
4502
4869
 
4503
- IMPORTANT Compound quality review found issues. Fix ALL of these:
4504
- $feedback_content
4870
+ # ── Signal 4: Diff size (after build) ──
4871
+ if [[ -z "$reason" && "$stage_id" == "compound_quality" ]]; then
4872
+ local diff_lines=0
4873
+ local _skip_stat
4874
+ _skip_stat=$(git diff "${BASE_BRANCH:-main}...HEAD" --stat 2>/dev/null | tail -1) || true
4875
+ if [[ -n "${_skip_stat:-}" ]]; then
4876
+ local _s_ins _s_del
4877
+ _s_ins=$(echo "$_skip_stat" | grep -oE '[0-9]+ insertion' | grep -oE '[0-9]+') || true
4878
+ _s_del=$(echo "$_skip_stat" | grep -oE '[0-9]+ deletion' | grep -oE '[0-9]+') || true
4879
+ diff_lines=$(( ${_s_ins:-0} + ${_s_del:-0} ))
4880
+ fi
4881
+ diff_lines="${diff_lines:-0}"
4882
+ if [[ "$diff_lines" -gt 0 && "$diff_lines" -lt 20 ]]; then
4883
+ reason="diff_size:${diff_lines}_lines"
4884
+ fi
4885
+ fi
4505
4886
 
4506
- Fix every issue listed above while keeping all existing functionality working."
4887
+ # ── Signal 5: Mid-pipeline reassessment override ──
4888
+ if [[ -z "$reason" && -f "$ARTIFACTS_DIR/reassessment.json" ]]; then
4889
+ local skip_stages
4890
+ skip_stages=$(jq -r '.skip_stages // [] | .[]' "$ARTIFACTS_DIR/reassessment.json" 2>/dev/null || true)
4891
+ if echo "$skip_stages" | grep -qx "$stage_id" 2>/dev/null; then
4892
+ reason="reassessment:simpler_than_expected"
4893
+ fi
4894
+ fi
4507
4895
 
4508
- # Re-run self-healing build→test
4509
- info "Rebuilding with quality feedback..."
4510
- if self_healing_build_test; then
4511
- GOAL="$original_goal"
4896
+ if [[ -n "$reason" ]]; then
4897
+ emit_event "intelligence.stage_skipped" \
4898
+ "issue=${ISSUE_NUMBER:-0}" \
4899
+ "stage=$stage_id" \
4900
+ "reason=$reason" \
4901
+ "complexity=${complexity}" \
4902
+ "labels=${labels}"
4903
+ echo "$reason"
4512
4904
  return 0
4513
- else
4514
- GOAL="$original_goal"
4515
- return 1
4516
4905
  fi
4906
+
4907
+ return 1
4517
4908
  }
4518
4909
 
4519
- stage_compound_quality() {
4520
- CURRENT_STAGE_ID="compound_quality"
4910
+ # ──────────────────────────────────────────────────────────────────────────────
4911
+ # 2. Smart Finding Classification & Routing
4912
+ # Parses compound quality findings and classifies each as:
4913
+ # architecture, security, correctness, style
4914
+ # Returns JSON with classified findings and routing recommendations.
4915
+ # ──────────────────────────────────────────────────────────────────────────────
4916
+ classify_quality_findings() {
4917
+ local findings_dir="$ARTIFACTS_DIR"
4918
+ local result_file="$ARTIFACTS_DIR/classified-findings.json"
4521
4919
 
4522
- # Read config
4523
- local max_cycles adversarial_enabled negative_enabled e2e_enabled dod_enabled strict_quality
4524
- max_cycles=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.max_cycles) // 3' "$PIPELINE_CONFIG" 2>/dev/null) || true
4525
- [[ -z "$max_cycles" || "$max_cycles" == "null" ]] && max_cycles=3
4526
- adversarial_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.adversarial) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
4527
- negative_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.negative) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
4528
- e2e_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.e2e) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
4529
- dod_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.dod_audit) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
4530
- strict_quality=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.strict_quality) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
4531
- [[ -z "$strict_quality" || "$strict_quality" == "null" ]] && strict_quality="false"
4920
+ # Initialize counters
4921
+ local arch_count=0 security_count=0 correctness_count=0 performance_count=0 testing_count=0 style_count=0
4532
4922
 
4533
- # Convergence tracking
4534
- local prev_issue_count=-1
4923
+ # Start building JSON array
4924
+ local findings_json="[]"
4535
4925
 
4536
- local cycle=0
4537
- while [[ "$cycle" -lt "$max_cycles" ]]; do
4538
- cycle=$((cycle + 1))
4539
- local all_passed=true
4926
+ # ── Parse adversarial review ──
4927
+ if [[ -f "$findings_dir/adversarial-review.md" ]]; then
4928
+ local adv_content
4929
+ adv_content=$(cat "$findings_dir/adversarial-review.md" 2>/dev/null || true)
4540
4930
 
4541
- echo ""
4542
- echo -e "${PURPLE}${BOLD}━━━ Compound Quality — Cycle ${cycle}/${max_cycles} ━━━${RESET}"
4931
+ # Architecture findings: dependency violations, layer breaches, circular refs
4932
+ local arch_findings
4933
+ arch_findings=$(echo "$adv_content" | grep -ciE 'architect|layer.*violation|circular.*depend|coupling|abstraction|design.*flaw|separation.*concern' 2>/dev/null || true)
4934
+ arch_count=$((arch_count + ${arch_findings:-0}))
4543
4935
 
4544
- if [[ -n "$ISSUE_NUMBER" ]]; then
4545
- gh_comment_issue "$ISSUE_NUMBER" "🔬 **Compound quality** — cycle ${cycle}/${max_cycles}" 2>/dev/null || true
4936
+ # Security findings
4937
+ local sec_findings
4938
+ sec_findings=$(echo "$adv_content" | grep -ciE 'security|vulnerab|injection|XSS|CSRF|auth.*bypass|privilege|sanitiz|escap' 2>/dev/null || true)
4939
+ security_count=$((security_count + ${sec_findings:-0}))
4940
+
4941
+ # Correctness findings: bugs, logic errors, edge cases
4942
+ local corr_findings
4943
+ corr_findings=$(echo "$adv_content" | grep -ciE '\*\*\[?(Critical|Bug|Error|critical|high)\]?\*\*|race.*condition|null.*pointer|off.*by.*one|edge.*case|undefined.*behav' 2>/dev/null || true)
4944
+ correctness_count=$((correctness_count + ${corr_findings:-0}))
4945
+
4946
+ # Performance findings
4947
+ local perf_findings
4948
+ perf_findings=$(echo "$adv_content" | grep -ciE 'latency|slow|memory leak|O\(n|N\+1|cache miss|performance|bottleneck|throughput' 2>/dev/null || true)
4949
+ performance_count=$((performance_count + ${perf_findings:-0}))
4950
+
4951
+ # Testing findings
4952
+ local test_findings
4953
+ test_findings=$(echo "$adv_content" | grep -ciE 'untested|missing test|no coverage|flaky|test gap|test missing|coverage gap' 2>/dev/null || true)
4954
+ testing_count=$((testing_count + ${test_findings:-0}))
4955
+
4956
+ # Style findings
4957
+ local style_findings
4958
+ style_findings=$(echo "$adv_content" | grep -ciE 'naming|convention|format|style|readabil|inconsisten|whitespace|comment' 2>/dev/null || true)
4959
+ style_count=$((style_count + ${style_findings:-0}))
4960
+ fi
4961
+
4962
+ # ── Parse architecture validation ──
4963
+ if [[ -f "$findings_dir/compound-architecture-validation.json" ]]; then
4964
+ local arch_json_count
4965
+ arch_json_count=$(jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' "$findings_dir/compound-architecture-validation.json" 2>/dev/null || echo "0")
4966
+ arch_count=$((arch_count + ${arch_json_count:-0}))
4967
+ fi
4968
+
4969
+ # ── Parse security audit ──
4970
+ if [[ -f "$findings_dir/security-audit.log" ]]; then
4971
+ local sec_audit
4972
+ sec_audit=$(grep -ciE 'critical|high' "$findings_dir/security-audit.log" 2>/dev/null || true)
4973
+ security_count=$((security_count + ${sec_audit:-0}))
4974
+ fi
4975
+
4976
+ # ── Parse negative review ──
4977
+ if [[ -f "$findings_dir/negative-review.md" ]]; then
4978
+ local neg_corr
4979
+ neg_corr=$(grep -ciE '\[Critical\]|\[High\]' "$findings_dir/negative-review.md" 2>/dev/null || true)
4980
+ correctness_count=$((correctness_count + ${neg_corr:-0}))
4981
+ fi
4982
+
4983
+ # ── Determine routing ──
4984
+ # Priority order: security > architecture > correctness > performance > testing > style
4985
+ local route="correctness" # default
4986
+ local needs_backtrack=false
4987
+ local priority_findings=""
4988
+
4989
+ if [[ "$security_count" -gt 0 ]]; then
4990
+ route="security"
4991
+ priority_findings="security:${security_count}"
4992
+ fi
4993
+
4994
+ if [[ "$arch_count" -gt 0 ]]; then
4995
+ if [[ "$route" == "correctness" ]]; then
4996
+ route="architecture"
4997
+ needs_backtrack=true
4998
+ fi
4999
+ priority_findings="${priority_findings:+${priority_findings},}architecture:${arch_count}"
5000
+ fi
5001
+
5002
+ if [[ "$correctness_count" -gt 0 ]]; then
5003
+ priority_findings="${priority_findings:+${priority_findings},}correctness:${correctness_count}"
5004
+ fi
5005
+
5006
+ if [[ "$performance_count" -gt 0 ]]; then
5007
+ if [[ "$route" == "correctness" && "$correctness_count" -eq 0 ]]; then
5008
+ route="performance"
5009
+ fi
5010
+ priority_findings="${priority_findings:+${priority_findings},}performance:${performance_count}"
5011
+ fi
5012
+
5013
+ if [[ "$testing_count" -gt 0 ]]; then
5014
+ if [[ "$route" == "correctness" && "$correctness_count" -eq 0 && "$performance_count" -eq 0 ]]; then
5015
+ route="testing"
4546
5016
  fi
5017
+ priority_findings="${priority_findings:+${priority_findings},}testing:${testing_count}"
5018
+ fi
4547
5019
 
4548
- # 1. Adversarial Review
4549
- if [[ "$adversarial_enabled" == "true" ]]; then
4550
- echo ""
4551
- info "Running adversarial review..."
4552
- if ! run_adversarial_review; then
4553
- all_passed=false
5020
+ # Style findings don't affect routing or count toward failure threshold
5021
+ local total_blocking=$((arch_count + security_count + correctness_count + performance_count + testing_count))
5022
+
5023
+ # Write classified findings
5024
+ local tmp_findings
5025
+ tmp_findings="$(mktemp)"
5026
+ jq -n \
5027
+ --argjson arch "$arch_count" \
5028
+ --argjson security "$security_count" \
5029
+ --argjson correctness "$correctness_count" \
5030
+ --argjson performance "$performance_count" \
5031
+ --argjson testing "$testing_count" \
5032
+ --argjson style "$style_count" \
5033
+ --argjson total_blocking "$total_blocking" \
5034
+ --arg route "$route" \
5035
+ --argjson needs_backtrack "$needs_backtrack" \
5036
+ --arg priority "$priority_findings" \
5037
+ '{
5038
+ architecture: $arch,
5039
+ security: $security,
5040
+ correctness: $correctness,
5041
+ performance: $performance,
5042
+ testing: $testing,
5043
+ style: $style,
5044
+ total_blocking: $total_blocking,
5045
+ route: $route,
5046
+ needs_backtrack: $needs_backtrack,
5047
+ priority_findings: $priority
5048
+ }' > "$tmp_findings" 2>/dev/null && mv "$tmp_findings" "$result_file" || rm -f "$tmp_findings"
5049
+
5050
+ emit_event "intelligence.findings_classified" \
5051
+ "issue=${ISSUE_NUMBER:-0}" \
5052
+ "architecture=$arch_count" \
5053
+ "security=$security_count" \
5054
+ "correctness=$correctness_count" \
5055
+ "performance=$performance_count" \
5056
+ "testing=$testing_count" \
5057
+ "style=$style_count" \
5058
+ "route=$route" \
5059
+ "needs_backtrack=$needs_backtrack"
5060
+
5061
+ echo "$route"
5062
+ }
5063
+
5064
+ # ──────────────────────────────────────────────────────────────────────────────
5065
+ # 3. Adaptive Cycle Limits
5066
+ # Replaces hardcoded max_cycles with convergence-driven limits.
5067
+ # Takes the base limit, returns an adjusted limit based on:
5068
+ # - Learned iteration model
5069
+ # - Convergence/divergence signals
5070
+ # - Budget constraints
5071
+ # - Hard ceiling (2x template max)
5072
+ # ──────────────────────────────────────────────────────────────────────────────
5073
+ pipeline_adaptive_cycles() {
5074
+ local base_limit="$1"
5075
+ local context="${2:-compound_quality}" # compound_quality or build_test
5076
+ local current_issue_count="${3:-0}"
5077
+ local prev_issue_count="${4:--1}"
5078
+
5079
+ local adjusted="$base_limit"
5080
+ local hard_ceiling=$((base_limit * 2))
5081
+
5082
+ # ── Learned iteration model ──
5083
+ local model_file="${HOME}/.shipwright/optimization/iteration-model.json"
5084
+ if [[ -f "$model_file" ]]; then
5085
+ local learned
5086
+ learned=$(jq -r --arg ctx "$context" '.[$ctx].recommended_cycles // 0' "$model_file" 2>/dev/null || echo "0")
5087
+ if [[ "$learned" -gt 0 && "$learned" -le "$hard_ceiling" ]]; then
5088
+ adjusted="$learned"
5089
+ fi
5090
+ fi
5091
+
5092
+ # ── Convergence acceleration ──
5093
+ # If issue count drops >50% per cycle, extend limit by 1 (we're making progress)
5094
+ if [[ "$prev_issue_count" -gt 0 && "$current_issue_count" -ge 0 ]]; then
5095
+ local half_prev=$((prev_issue_count / 2))
5096
+ if [[ "$current_issue_count" -le "$half_prev" && "$current_issue_count" -gt 0 ]]; then
5097
+ # Rapid convergence — extend by 1
5098
+ local new_limit=$((adjusted + 1))
5099
+ if [[ "$new_limit" -le "$hard_ceiling" ]]; then
5100
+ adjusted="$new_limit"
5101
+ emit_event "intelligence.convergence_acceleration" \
5102
+ "issue=${ISSUE_NUMBER:-0}" \
5103
+ "context=$context" \
5104
+ "prev_issues=$prev_issue_count" \
5105
+ "current_issues=$current_issue_count" \
5106
+ "new_limit=$adjusted"
4554
5107
  fi
4555
5108
  fi
4556
5109
 
4557
- # 2. Negative Prompting
4558
- if [[ "$negative_enabled" == "true" ]]; then
4559
- echo ""
4560
- info "Running negative prompting..."
4561
- if ! run_negative_prompting; then
4562
- all_passed=false
5110
+ # ── Divergence detection ──
5111
+ # If issue count increases, reduce remaining cycles
5112
+ if [[ "$current_issue_count" -gt "$prev_issue_count" ]]; then
5113
+ local reduced=$((adjusted - 1))
5114
+ if [[ "$reduced" -ge 1 ]]; then
5115
+ adjusted="$reduced"
5116
+ emit_event "intelligence.divergence_detected" \
5117
+ "issue=${ISSUE_NUMBER:-0}" \
5118
+ "context=$context" \
5119
+ "prev_issues=$prev_issue_count" \
5120
+ "current_issues=$current_issue_count" \
5121
+ "new_limit=$adjusted"
4563
5122
  fi
4564
5123
  fi
5124
+ fi
4565
5125
 
4566
- # 3. Developer Simulation (intelligence module)
4567
- if type simulation_review &>/dev/null 2>&1; then
4568
- local sim_enabled
4569
- sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
4570
- local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
4571
- if [[ "$sim_enabled" != "true" && -f "$daemon_cfg" ]]; then
4572
- sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
5126
+ # ── Budget gate ──
5127
+ if [[ "$IGNORE_BUDGET" != "true" ]] && [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
5128
+ local budget_rc=0
5129
+ bash "$SCRIPT_DIR/sw-cost.sh" check-budget 2>/dev/null || budget_rc=$?
5130
+ if [[ "$budget_rc" -eq 2 ]]; then
5131
+ # Budget exhausted cap at current cycle
5132
+ adjusted=0
5133
+ emit_event "intelligence.budget_cap" \
5134
+ "issue=${ISSUE_NUMBER:-0}" \
5135
+ "context=$context"
5136
+ fi
5137
+ fi
5138
+
5139
+ # ── Enforce hard ceiling ──
5140
+ if [[ "$adjusted" -gt "$hard_ceiling" ]]; then
5141
+ adjusted="$hard_ceiling"
5142
+ fi
5143
+
5144
+ echo "$adjusted"
5145
+ }
5146
+
5147
+ # ──────────────────────────────────────────────────────────────────────────────
5148
+ # 5. Intelligent Audit Selection
5149
+ # AI-driven audit selection — all audits enabled, intensity varies.
5150
+ # ──────────────────────────────────────────────────────────────────────────────
5151
+ pipeline_select_audits() {
5152
+ local audit_intensity
5153
+ audit_intensity=$(jq -r --arg id "compound_quality" \
5154
+ '(.stages[] | select(.id == $id) | .config.audit_intensity) // "auto"' \
5155
+ "$PIPELINE_CONFIG" 2>/dev/null) || true
5156
+ [[ -z "$audit_intensity" || "$audit_intensity" == "null" ]] && audit_intensity="auto"
5157
+
5158
+ # Short-circuit for explicit overrides
5159
+ case "$audit_intensity" in
5160
+ off)
5161
+ echo '{"adversarial":"off","architecture":"off","simulation":"off","security":"off","dod":"off"}'
5162
+ return 0
5163
+ ;;
5164
+ full|lightweight)
5165
+ jq -n --arg i "$audit_intensity" \
5166
+ '{adversarial:$i,architecture:$i,simulation:$i,security:$i,dod:$i}'
5167
+ return 0
5168
+ ;;
5169
+ esac
5170
+
5171
+ # ── Auto mode: data-driven intensity ──
5172
+ local default_intensity="targeted"
5173
+ local security_intensity="targeted"
5174
+
5175
+ # Read last 5 quality scores for this repo
5176
+ local quality_scores_file="${HOME}/.shipwright/optimization/quality-scores.jsonl"
5177
+ local repo_name
5178
+ repo_name=$(basename "${PROJECT_ROOT:-.}") || true
5179
+ if [[ -f "$quality_scores_file" ]]; then
5180
+ local recent_scores
5181
+ recent_scores=$(grep "\"repo\":\"${repo_name}\"" "$quality_scores_file" 2>/dev/null | tail -5) || true
5182
+ if [[ -n "$recent_scores" ]]; then
5183
+ # Check for critical findings in recent history
5184
+ local has_critical
5185
+ has_critical=$(echo "$recent_scores" | jq -s '[.[].findings.critical // 0] | add' 2>/dev/null || echo "0")
5186
+ has_critical="${has_critical:-0}"
5187
+ if [[ "$has_critical" -gt 0 ]]; then
5188
+ security_intensity="full"
5189
+ fi
5190
+
5191
+ # Compute average quality score
5192
+ local avg_score
5193
+ avg_score=$(echo "$recent_scores" | jq -s 'if length > 0 then ([.[].quality_score] | add / length | floor) else 70 end' 2>/dev/null || echo "70")
5194
+ avg_score="${avg_score:-70}"
5195
+
5196
+ if [[ "$avg_score" -lt 60 ]]; then
5197
+ default_intensity="full"
5198
+ security_intensity="full"
5199
+ elif [[ "$avg_score" -gt 80 ]]; then
5200
+ default_intensity="lightweight"
5201
+ [[ "$security_intensity" != "full" ]] && security_intensity="lightweight"
5202
+ fi
5203
+ fi
5204
+ fi
5205
+
5206
+ # Intelligence cache: upgrade targeted→full for complex changes
5207
+ local intel_cache="${PROJECT_ROOT}/.claude/intelligence-cache.json"
5208
+ if [[ -f "$intel_cache" && "$default_intensity" == "targeted" ]]; then
5209
+ local complexity
5210
+ complexity=$(jq -r '.complexity // "medium"' "$intel_cache" 2>/dev/null || echo "medium")
5211
+ if [[ "$complexity" == "high" || "$complexity" == "very_high" ]]; then
5212
+ default_intensity="full"
5213
+ security_intensity="full"
5214
+ fi
5215
+ fi
5216
+
5217
+ emit_event "pipeline.audit_selection" \
5218
+ "issue=${ISSUE_NUMBER:-0}" \
5219
+ "default_intensity=$default_intensity" \
5220
+ "security_intensity=$security_intensity" \
5221
+ "repo=$repo_name"
5222
+
5223
+ jq -n \
5224
+ --arg adv "$default_intensity" \
5225
+ --arg arch "$default_intensity" \
5226
+ --arg sim "$default_intensity" \
5227
+ --arg sec "$security_intensity" \
5228
+ --arg dod "$default_intensity" \
5229
+ '{adversarial:$adv,architecture:$arch,simulation:$sim,security:$sec,dod:$dod}'
5230
+ }
5231
+
5232
+ # ──────────────────────────────────────────────────────────────────────────────
5233
+ # 6. Definition of Done Verification
5234
+ # Strict DoD enforcement after compound quality completes.
5235
+ # ──────────────────────────────────────────────────────────────────────────────
5236
+ pipeline_verify_dod() {
5237
+ local artifacts_dir="${1:-$ARTIFACTS_DIR}"
5238
+ local checks_total=0 checks_passed=0
5239
+ local results=""
5240
+
5241
+ # 1. Test coverage: verify changed source files have test counterparts
5242
+ local changed_files
5243
+ changed_files=$(git diff --name-only "${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
5244
+ local missing_tests=""
5245
+ local files_checked=0
5246
+
5247
+ if [[ -n "$changed_files" ]]; then
5248
+ while IFS= read -r src_file; do
5249
+ [[ -z "$src_file" ]] && continue
5250
+ # Only check source code files
5251
+ case "$src_file" in
5252
+ *.ts|*.js|*.tsx|*.jsx|*.py|*.go|*.rs|*.sh)
5253
+ # Skip test files themselves and config files
5254
+ case "$src_file" in
5255
+ *test*|*spec*|*__tests__*|*.config.*|*.d.ts) continue ;;
5256
+ esac
5257
+ files_checked=$((files_checked + 1))
5258
+ checks_total=$((checks_total + 1))
5259
+ # Check for corresponding test file
5260
+ local base_name dir_name ext
5261
+ base_name=$(basename "$src_file")
5262
+ dir_name=$(dirname "$src_file")
5263
+ ext="${base_name##*.}"
5264
+ local stem="${base_name%.*}"
5265
+ local test_found=false
5266
+ # Common test file patterns
5267
+ for pattern in \
5268
+ "${dir_name}/${stem}.test.${ext}" \
5269
+ "${dir_name}/${stem}.spec.${ext}" \
5270
+ "${dir_name}/__tests__/${stem}.test.${ext}" \
5271
+ "${dir_name}/${stem}-test.${ext}" \
5272
+ "${dir_name}/test_${stem}.${ext}" \
5273
+ "${dir_name}/${stem}_test.${ext}"; do
5274
+ if [[ -f "$pattern" ]]; then
5275
+ test_found=true
5276
+ break
5277
+ fi
5278
+ done
5279
+ if $test_found; then
5280
+ checks_passed=$((checks_passed + 1))
5281
+ else
5282
+ missing_tests="${missing_tests}${src_file}\n"
5283
+ fi
5284
+ ;;
5285
+ esac
5286
+ done <<EOF
5287
+ $changed_files
5288
+ EOF
5289
+ fi
5290
+
5291
+ # 2. Test-added verification: if significant logic added, ensure tests were also added
5292
+ local logic_lines=0 test_lines=0
5293
+ if [[ -n "$changed_files" ]]; then
5294
+ local full_diff
5295
+ full_diff=$(git diff "${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
5296
+ if [[ -n "$full_diff" ]]; then
5297
+ # Count added lines matching source patterns (rough heuristic)
5298
+ logic_lines=$(echo "$full_diff" | grep -cE '^\+.*(function |class |if |for |while |return |export )' 2>/dev/null || true)
5299
+ logic_lines="${logic_lines:-0}"
5300
+ # Count added lines in test files
5301
+ test_lines=$(echo "$full_diff" | grep -cE '^\+.*(it\(|test\(|describe\(|expect\(|assert|def test_|func Test)' 2>/dev/null || true)
5302
+ test_lines="${test_lines:-0}"
5303
+ fi
5304
+ fi
5305
+ checks_total=$((checks_total + 1))
5306
+ local test_ratio_passed=true
5307
+ if [[ "$logic_lines" -gt 20 && "$test_lines" -eq 0 ]]; then
5308
+ test_ratio_passed=false
5309
+ warn "DoD verification: ${logic_lines} logic lines added but no test lines detected"
5310
+ else
5311
+ checks_passed=$((checks_passed + 1))
5312
+ fi
5313
+
5314
+ # 3. Behavioral verification: check DoD audit artifacts for evidence
5315
+ local dod_audit_file="$artifacts_dir/dod-audit.md"
5316
+ local dod_verified=0 dod_total_items=0
5317
+ if [[ -f "$dod_audit_file" ]]; then
5318
+ # Count items marked as passing
5319
+ dod_total_items=$(grep -cE '^\s*-\s*\[x\]' "$dod_audit_file" 2>/dev/null || true)
5320
+ dod_total_items="${dod_total_items:-0}"
5321
+ local dod_failing
5322
+ dod_failing=$(grep -cE '^\s*-\s*\[\s\]' "$dod_audit_file" 2>/dev/null || true)
5323
+ dod_failing="${dod_failing:-0}"
5324
+ dod_verified=$dod_total_items
5325
+ checks_total=$((checks_total + dod_total_items + ${dod_failing:-0}))
5326
+ checks_passed=$((checks_passed + dod_total_items))
5327
+ fi
5328
+
5329
+ # Compute pass rate
5330
+ local pass_rate=100
5331
+ if [[ "$checks_total" -gt 0 ]]; then
5332
+ pass_rate=$(( (checks_passed * 100) / checks_total ))
5333
+ fi
5334
+
5335
+ # Write results
5336
+ local tmp_result
5337
+ tmp_result=$(mktemp)
5338
+ jq -n \
5339
+ --argjson checks_total "$checks_total" \
5340
+ --argjson checks_passed "$checks_passed" \
5341
+ --argjson pass_rate "$pass_rate" \
5342
+ --argjson files_checked "$files_checked" \
5343
+ --arg missing_tests "$(echo -e "$missing_tests" | head -20)" \
5344
+ --argjson logic_lines "$logic_lines" \
5345
+ --argjson test_lines "$test_lines" \
5346
+ --argjson test_ratio_passed "$test_ratio_passed" \
5347
+ --argjson dod_verified "$dod_verified" \
5348
+ '{
5349
+ checks_total: $checks_total,
5350
+ checks_passed: $checks_passed,
5351
+ pass_rate: $pass_rate,
5352
+ files_checked: $files_checked,
5353
+ missing_tests: ($missing_tests | split("\n") | map(select(. != ""))),
5354
+ logic_lines: $logic_lines,
5355
+ test_lines: $test_lines,
5356
+ test_ratio_passed: $test_ratio_passed,
5357
+ dod_verified: $dod_verified
5358
+ }' > "$tmp_result" 2>/dev/null
5359
+ mv "$tmp_result" "$artifacts_dir/dod-verification.json"
5360
+
5361
+ emit_event "pipeline.dod_verification" \
5362
+ "issue=${ISSUE_NUMBER:-0}" \
5363
+ "checks_total=$checks_total" \
5364
+ "checks_passed=$checks_passed" \
5365
+ "pass_rate=$pass_rate"
5366
+
5367
+ # Fail if pass rate < 70%
5368
+ if [[ "$pass_rate" -lt 70 ]]; then
5369
+ warn "DoD verification: ${pass_rate}% pass rate (${checks_passed}/${checks_total} checks)"
5370
+ return 1
5371
+ fi
5372
+
5373
+ success "DoD verification: ${pass_rate}% pass rate (${checks_passed}/${checks_total} checks)"
5374
+ return 0
5375
+ }
5376
+
5377
+ # ──────────────────────────────────────────────────────────────────────────────
5378
+ # 7. Source Code Security Scan
5379
+ # Grep-based vulnerability pattern matching on changed files.
5380
+ # ──────────────────────────────────────────────────────────────────────────────
5381
+ pipeline_security_source_scan() {
5382
+ local base_branch="${1:-${BASE_BRANCH:-main}}"
5383
+ local findings="[]"
5384
+ local finding_count=0
5385
+
5386
+ local changed_files
5387
+ changed_files=$(git diff --name-only "${base_branch}...HEAD" 2>/dev/null || true)
5388
+ [[ -z "$changed_files" ]] && { echo "[]"; return 0; }
5389
+
5390
+ local tmp_findings
5391
+ tmp_findings=$(mktemp)
5392
+ echo "[]" > "$tmp_findings"
5393
+
5394
+ while IFS= read -r file; do
5395
+ [[ -z "$file" || ! -f "$file" ]] && continue
5396
+ # Only scan code files
5397
+ case "$file" in
5398
+ *.ts|*.js|*.tsx|*.jsx|*.py|*.go|*.rs|*.java|*.rb|*.php|*.sh) ;;
5399
+ *) continue ;;
5400
+ esac
5401
+
5402
+ # SQL injection patterns
5403
+ local sql_matches
5404
+ sql_matches=$(grep -nE '(query|execute|sql)\s*\(?\s*[`"'"'"']\s*.*\$\{|\.query\s*\(\s*[`"'"'"'].*\+' "$file" 2>/dev/null || true)
5405
+ if [[ -n "$sql_matches" ]]; then
5406
+ while IFS= read -r match; do
5407
+ [[ -z "$match" ]] && continue
5408
+ local line_num="${match%%:*}"
5409
+ finding_count=$((finding_count + 1))
5410
+ local current
5411
+ current=$(cat "$tmp_findings")
5412
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "sql_injection" \
5413
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential SQL injection via string concatenation"}]' \
5414
+ > "$tmp_findings" 2>/dev/null || true
5415
+ done <<SQLEOF
5416
+ $sql_matches
5417
+ SQLEOF
5418
+ fi
5419
+
5420
+ # XSS patterns
5421
+ local xss_matches
5422
+ xss_matches=$(grep -nE 'innerHTML\s*=|document\.write\s*\(|dangerouslySetInnerHTML' "$file" 2>/dev/null || true)
5423
+ if [[ -n "$xss_matches" ]]; then
5424
+ while IFS= read -r match; do
5425
+ [[ -z "$match" ]] && continue
5426
+ local line_num="${match%%:*}"
5427
+ finding_count=$((finding_count + 1))
5428
+ local current
5429
+ current=$(cat "$tmp_findings")
5430
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "xss" \
5431
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential XSS via unsafe DOM manipulation"}]' \
5432
+ > "$tmp_findings" 2>/dev/null || true
5433
+ done <<XSSEOF
5434
+ $xss_matches
5435
+ XSSEOF
5436
+ fi
5437
+
5438
+ # Command injection patterns
5439
+ local cmd_matches
5440
+ cmd_matches=$(grep -nE 'eval\s*\(|child_process|os\.system\s*\(|subprocess\.(call|run|Popen)\s*\(' "$file" 2>/dev/null || true)
5441
+ if [[ -n "$cmd_matches" ]]; then
5442
+ while IFS= read -r match; do
5443
+ [[ -z "$match" ]] && continue
5444
+ local line_num="${match%%:*}"
5445
+ finding_count=$((finding_count + 1))
5446
+ local current
5447
+ current=$(cat "$tmp_findings")
5448
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "command_injection" \
5449
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential command injection via unsafe execution"}]' \
5450
+ > "$tmp_findings" 2>/dev/null || true
5451
+ done <<CMDEOF
5452
+ $cmd_matches
5453
+ CMDEOF
5454
+ fi
5455
+
5456
+ # Hardcoded secrets patterns
5457
+ local secret_matches
5458
+ secret_matches=$(grep -nEi '(password|api_key|secret|token)\s*=\s*['"'"'"][A-Za-z0-9+/=]{8,}['"'"'"]' "$file" 2>/dev/null || true)
5459
+ if [[ -n "$secret_matches" ]]; then
5460
+ while IFS= read -r match; do
5461
+ [[ -z "$match" ]] && continue
5462
+ local line_num="${match%%:*}"
5463
+ finding_count=$((finding_count + 1))
5464
+ local current
5465
+ current=$(cat "$tmp_findings")
5466
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "hardcoded_secret" \
5467
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential hardcoded secret or credential"}]' \
5468
+ > "$tmp_findings" 2>/dev/null || true
5469
+ done <<SECEOF
5470
+ $secret_matches
5471
+ SECEOF
5472
+ fi
5473
+
5474
+ # Insecure crypto patterns
5475
+ local crypto_matches
5476
+ crypto_matches=$(grep -nE '(md5|MD5|sha1|SHA1)\s*\(' "$file" 2>/dev/null || true)
5477
+ if [[ -n "$crypto_matches" ]]; then
5478
+ while IFS= read -r match; do
5479
+ [[ -z "$match" ]] && continue
5480
+ local line_num="${match%%:*}"
5481
+ finding_count=$((finding_count + 1))
5482
+ local current
5483
+ current=$(cat "$tmp_findings")
5484
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "insecure_crypto" \
5485
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"major","description":"Weak cryptographic function (consider SHA-256+)"}]' \
5486
+ > "$tmp_findings" 2>/dev/null || true
5487
+ done <<CRYEOF
5488
+ $crypto_matches
5489
+ CRYEOF
5490
+ fi
5491
+ done <<FILESEOF
5492
+ $changed_files
5493
+ FILESEOF
5494
+
5495
+ # Write to artifacts and output
5496
+ findings=$(cat "$tmp_findings")
5497
+ rm -f "$tmp_findings"
5498
+
5499
+ if [[ -n "${ARTIFACTS_DIR:-}" ]]; then
5500
+ local tmp_scan
5501
+ tmp_scan=$(mktemp)
5502
+ echo "$findings" > "$tmp_scan"
5503
+ mv "$tmp_scan" "$ARTIFACTS_DIR/security-source-scan.json"
5504
+ fi
5505
+
5506
+ emit_event "pipeline.security_source_scan" \
5507
+ "issue=${ISSUE_NUMBER:-0}" \
5508
+ "findings=$finding_count"
5509
+
5510
+ echo "$finding_count"
5511
+ }
5512
+
5513
+ # ──────────────────────────────────────────────────────────────────────────────
5514
+ # 8. Quality Score Recording
5515
+ # Writes quality scores to JSONL for learning.
5516
+ # ──────────────────────────────────────────────────────────────────────────────
5517
+ pipeline_record_quality_score() {
5518
+ local quality_score="${1:-0}"
5519
+ local critical="${2:-0}"
5520
+ local major="${3:-0}"
5521
+ local minor="${4:-0}"
5522
+ local dod_pass_rate="${5:-0}"
5523
+ local audits_run="${6:-}"
5524
+
5525
+ local scores_dir="${HOME}/.shipwright/optimization"
5526
+ local scores_file="${scores_dir}/quality-scores.jsonl"
5527
+ mkdir -p "$scores_dir"
5528
+
5529
+ local repo_name
5530
+ repo_name=$(basename "${PROJECT_ROOT:-.}") || true
5531
+
5532
+ local tmp_score
5533
+ tmp_score=$(mktemp)
5534
+ jq -n \
5535
+ --arg repo "$repo_name" \
5536
+ --arg issue "${ISSUE_NUMBER:-0}" \
5537
+ --arg ts "$(now_iso)" \
5538
+ --argjson score "$quality_score" \
5539
+ --argjson critical "$critical" \
5540
+ --argjson major "$major" \
5541
+ --argjson minor "$minor" \
5542
+ --argjson dod "$dod_pass_rate" \
5543
+ --arg template "${PIPELINE_NAME:-standard}" \
5544
+ --arg audits "$audits_run" \
5545
+ '{
5546
+ repo: $repo,
5547
+ issue: ($issue | tonumber),
5548
+ timestamp: $ts,
5549
+ quality_score: $score,
5550
+ findings: {critical: $critical, major: $major, minor: $minor},
5551
+ dod_pass_rate: $dod,
5552
+ template: $template,
5553
+ audits_run: ($audits | split(",") | map(select(. != "")))
5554
+ }' > "$tmp_score" 2>/dev/null
5555
+
5556
+ cat "$tmp_score" >> "$scores_file"
5557
+ rm -f "$tmp_score"
5558
+
5559
+ emit_event "pipeline.quality_score_recorded" \
5560
+ "issue=${ISSUE_NUMBER:-0}" \
5561
+ "quality_score=$quality_score" \
5562
+ "critical=$critical" \
5563
+ "major=$major" \
5564
+ "minor=$minor"
5565
+ }
5566
+
5567
+ # ──────────────────────────────────────────────────────────────────────────────
5568
+ # 4. Mid-Pipeline Complexity Re-evaluation
5569
+ # After build+test completes, compares actual effort to initial estimate.
5570
+ # Updates skip recommendations and model routing for remaining stages.
5571
+ # ──────────────────────────────────────────────────────────────────────────────
5572
+ pipeline_reassess_complexity() {
5573
+ local initial_complexity="${INTELLIGENCE_COMPLEXITY:-5}"
5574
+ local reassessment_file="$ARTIFACTS_DIR/reassessment.json"
5575
+
5576
+ # ── Gather actual metrics ──
5577
+ local files_changed=0 lines_changed=0 first_try_pass=false self_heal_cycles=0
5578
+
5579
+ files_changed=$(git diff "${BASE_BRANCH:-main}...HEAD" --name-only 2>/dev/null | wc -l | tr -d ' ') || files_changed=0
5580
+ files_changed="${files_changed:-0}"
5581
+
5582
+ # Count lines changed (insertions + deletions) without pipefail issues
5583
+ lines_changed=0
5584
+ local _diff_stat
5585
+ _diff_stat=$(git diff "${BASE_BRANCH:-main}...HEAD" --stat 2>/dev/null | tail -1) || true
5586
+ if [[ -n "${_diff_stat:-}" ]]; then
5587
+ local _ins _del
5588
+ _ins=$(echo "$_diff_stat" | grep -oE '[0-9]+ insertion' | grep -oE '[0-9]+') || true
5589
+ _del=$(echo "$_diff_stat" | grep -oE '[0-9]+ deletion' | grep -oE '[0-9]+') || true
5590
+ lines_changed=$(( ${_ins:-0} + ${_del:-0} ))
5591
+ fi
5592
+
5593
+ self_heal_cycles="${SELF_HEAL_COUNT:-0}"
5594
+ if [[ "$self_heal_cycles" -eq 0 ]]; then
5595
+ first_try_pass=true
5596
+ fi
5597
+
5598
+ # ── Compare to expectations ──
5599
+ local actual_complexity="$initial_complexity"
5600
+ local assessment="as_expected"
5601
+ local skip_stages="[]"
5602
+
5603
+ # Simpler than expected: small diff, tests passed first try
5604
+ if [[ "$lines_changed" -lt 50 && "$first_try_pass" == "true" && "$files_changed" -lt 5 ]]; then
5605
+ actual_complexity=$((initial_complexity > 2 ? initial_complexity - 2 : 1))
5606
+ assessment="simpler_than_expected"
5607
+ # Mark compound_quality as skippable, simplify review
5608
+ skip_stages='["compound_quality"]'
5609
+ # Much simpler
5610
+ elif [[ "$lines_changed" -lt 20 && "$first_try_pass" == "true" && "$files_changed" -lt 3 ]]; then
5611
+ actual_complexity=1
5612
+ assessment="much_simpler"
5613
+ skip_stages='["compound_quality","review"]'
5614
+ # Harder than expected: large diff, multiple self-heal cycles
5615
+ elif [[ "$lines_changed" -gt 500 || "$self_heal_cycles" -gt 2 ]]; then
5616
+ actual_complexity=$((initial_complexity < 9 ? initial_complexity + 2 : 10))
5617
+ assessment="harder_than_expected"
5618
+ # Ensure compound_quality runs, possibly upgrade model
5619
+ skip_stages='[]'
5620
+ # Much harder
5621
+ elif [[ "$lines_changed" -gt 1000 || "$self_heal_cycles" -gt 4 ]]; then
5622
+ actual_complexity=10
5623
+ assessment="much_harder"
5624
+ skip_stages='[]'
5625
+ fi
5626
+
5627
+ # ── Write reassessment ──
5628
+ local tmp_reassess
5629
+ tmp_reassess="$(mktemp)"
5630
+ jq -n \
5631
+ --argjson initial "$initial_complexity" \
5632
+ --argjson actual "$actual_complexity" \
5633
+ --arg assessment "$assessment" \
5634
+ --argjson files_changed "$files_changed" \
5635
+ --argjson lines_changed "$lines_changed" \
5636
+ --argjson self_heal_cycles "$self_heal_cycles" \
5637
+ --argjson first_try "$first_try_pass" \
5638
+ --argjson skip_stages "$skip_stages" \
5639
+ '{
5640
+ initial_complexity: $initial,
5641
+ actual_complexity: $actual,
5642
+ assessment: $assessment,
5643
+ files_changed: $files_changed,
5644
+ lines_changed: $lines_changed,
5645
+ self_heal_cycles: $self_heal_cycles,
5646
+ first_try_pass: $first_try,
5647
+ skip_stages: $skip_stages
5648
+ }' > "$tmp_reassess" 2>/dev/null && mv "$tmp_reassess" "$reassessment_file" || rm -f "$tmp_reassess"
5649
+
5650
+ # Update global complexity for downstream stages
5651
+ PIPELINE_ADAPTIVE_COMPLEXITY="$actual_complexity"
5652
+
5653
+ emit_event "intelligence.reassessment" \
5654
+ "issue=${ISSUE_NUMBER:-0}" \
5655
+ "initial=$initial_complexity" \
5656
+ "actual=$actual_complexity" \
5657
+ "assessment=$assessment" \
5658
+ "files=$files_changed" \
5659
+ "lines=$lines_changed" \
5660
+ "self_heals=$self_heal_cycles"
5661
+
5662
+ # ── Store for learning ──
5663
+ local learning_file="${HOME}/.shipwright/optimization/complexity-actuals.jsonl"
5664
+ mkdir -p "${HOME}/.shipwright/optimization" 2>/dev/null || true
5665
+ echo "{\"issue\":\"${ISSUE_NUMBER:-0}\",\"initial\":$initial_complexity,\"actual\":$actual_complexity,\"files\":$files_changed,\"lines\":$lines_changed,\"ts\":\"$(now_iso)\"}" \
5666
+ >> "$learning_file" 2>/dev/null || true
5667
+
5668
+ echo "$assessment"
5669
+ }
5670
+
5671
+ # ──────────────────────────────────────────────────────────────────────────────
5672
+ # 5. Backtracking Support
5673
+ # When compound_quality detects architecture-level problems, backtracks to
5674
+ # the design stage instead of just feeding findings to the build loop.
5675
+ # Limited to 1 backtrack per pipeline run to prevent infinite loops.
5676
+ # ──────────────────────────────────────────────────────────────────────────────
5677
+ pipeline_backtrack_to_stage() {
5678
+ local target_stage="$1"
5679
+ local reason="${2:-architecture_violation}"
5680
+
5681
+ # Prevent infinite backtracking
5682
+ if [[ "$PIPELINE_BACKTRACK_COUNT" -ge "$PIPELINE_MAX_BACKTRACKS" ]]; then
5683
+ warn "Max backtracks ($PIPELINE_MAX_BACKTRACKS) reached — cannot backtrack to $target_stage"
5684
+ emit_event "intelligence.backtrack_blocked" \
5685
+ "issue=${ISSUE_NUMBER:-0}" \
5686
+ "target=$target_stage" \
5687
+ "reason=max_backtracks_reached" \
5688
+ "count=$PIPELINE_BACKTRACK_COUNT"
5689
+ return 1
5690
+ fi
5691
+
5692
+ PIPELINE_BACKTRACK_COUNT=$((PIPELINE_BACKTRACK_COUNT + 1))
5693
+
5694
+ info "Backtracking to ${BOLD}${target_stage}${RESET} stage (reason: ${reason})"
5695
+
5696
+ emit_event "intelligence.backtrack" \
5697
+ "issue=${ISSUE_NUMBER:-0}" \
5698
+ "target=$target_stage" \
5699
+ "reason=$reason"
5700
+
5701
+ # Gather architecture context from findings
5702
+ local arch_context=""
5703
+ if [[ -f "$ARTIFACTS_DIR/compound-architecture-validation.json" ]]; then
5704
+ arch_context=$(jq -r '[.[] | select(.severity == "critical" or .severity == "high") | .message // .description // ""] | join("\n")' \
5705
+ "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || true)
5706
+ fi
5707
+ if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
5708
+ local arch_lines
5709
+ arch_lines=$(grep -iE 'architect|layer.*violation|circular.*depend|coupling|design.*flaw' \
5710
+ "$ARTIFACTS_DIR/adversarial-review.md" 2>/dev/null || true)
5711
+ if [[ -n "$arch_lines" ]]; then
5712
+ arch_context="${arch_context}
5713
+ ${arch_lines}"
5714
+ fi
5715
+ fi
5716
+
5717
+ # Reset stages from target onward
5718
+ set_stage_status "$target_stage" "pending"
5719
+ set_stage_status "build" "pending"
5720
+ set_stage_status "test" "pending"
5721
+
5722
+ # Augment goal with architecture context for re-run
5723
+ local original_goal="$GOAL"
5724
+ if [[ -n "$arch_context" ]]; then
5725
+ GOAL="$GOAL
5726
+
5727
+ IMPORTANT — Architecture violations were detected during quality review. Redesign to fix:
5728
+ $arch_context
5729
+
5730
+ Update the design to address these violations, then rebuild."
5731
+ fi
5732
+
5733
+ # Re-run design stage
5734
+ info "Re-running ${BOLD}${target_stage}${RESET} with architecture context..."
5735
+ if "stage_${target_stage}" 2>/dev/null; then
5736
+ mark_stage_complete "$target_stage"
5737
+ success "Backtrack: ${target_stage} re-run complete"
5738
+ else
5739
+ GOAL="$original_goal"
5740
+ error "Backtrack: ${target_stage} re-run failed"
5741
+ return 1
5742
+ fi
5743
+
5744
+ # Re-run build+test
5745
+ info "Re-running build→test after backtracked ${target_stage}..."
5746
+ if self_healing_build_test; then
5747
+ success "Backtrack: build→test passed after ${target_stage} redesign"
5748
+ GOAL="$original_goal"
5749
+ return 0
5750
+ else
5751
+ GOAL="$original_goal"
5752
+ error "Backtrack: build→test failed after ${target_stage} redesign"
5753
+ return 1
5754
+ fi
5755
+ }
5756
+
5757
+ compound_rebuild_with_feedback() {
5758
+ local feedback_file="$ARTIFACTS_DIR/quality-feedback.md"
5759
+
5760
+ # ── Intelligence: classify findings and determine routing ──
5761
+ local route="correctness"
5762
+ route=$(classify_quality_findings 2>/dev/null) || route="correctness"
5763
+
5764
+ # ── Build structured findings JSON alongside markdown ──
5765
+ local structured_findings="[]"
5766
+ local s_total_critical=0 s_total_major=0 s_total_minor=0
5767
+
5768
+ if [[ -f "$ARTIFACTS_DIR/classified-findings.json" ]]; then
5769
+ s_total_critical=$(jq -r '.security // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
5770
+ s_total_major=$(jq -r '.correctness // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
5771
+ s_total_minor=$(jq -r '.style // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
5772
+ fi
5773
+
5774
+ local tmp_qf
5775
+ tmp_qf="$(mktemp)"
5776
+ jq -n \
5777
+ --arg route "$route" \
5778
+ --argjson total_critical "$s_total_critical" \
5779
+ --argjson total_major "$s_total_major" \
5780
+ --argjson total_minor "$s_total_minor" \
5781
+ '{route: $route, total_critical: $total_critical, total_major: $total_major, total_minor: $total_minor}' \
5782
+ > "$tmp_qf" 2>/dev/null && mv "$tmp_qf" "$ARTIFACTS_DIR/quality-findings.json" || rm -f "$tmp_qf"
5783
+
5784
+ # ── Architecture route: backtrack to design instead of rebuild ──
5785
+ if [[ "$route" == "architecture" ]]; then
5786
+ info "Architecture-level findings detected — attempting backtrack to design"
5787
+ if pipeline_backtrack_to_stage "design" "architecture_violation" 2>/dev/null; then
5788
+ return 0
5789
+ fi
5790
+ # Backtrack failed or already used — fall through to standard rebuild
5791
+ warn "Backtrack unavailable — falling through to standard rebuild"
5792
+ fi
5793
+
5794
+ # Collect all findings (prioritized by classification)
5795
+ {
5796
+ echo "# Quality Feedback — Issues to Fix"
5797
+ echo ""
5798
+
5799
+ # Security findings first (highest priority)
5800
+ if [[ "$route" == "security" || -f "$ARTIFACTS_DIR/security-audit.log" ]] && grep -qiE 'critical|high' "$ARTIFACTS_DIR/security-audit.log" 2>/dev/null; then
5801
+ echo "## 🔴 PRIORITY: Security Findings (fix these first)"
5802
+ cat "$ARTIFACTS_DIR/security-audit.log"
5803
+ echo ""
5804
+ echo "Security issues MUST be resolved before any other changes."
5805
+ echo ""
5806
+ fi
5807
+
5808
+ # Correctness findings
5809
+ if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
5810
+ echo "## Adversarial Review Findings"
5811
+ cat "$ARTIFACTS_DIR/adversarial-review.md"
5812
+ echo ""
5813
+ fi
5814
+ if [[ -f "$ARTIFACTS_DIR/negative-review.md" ]]; then
5815
+ echo "## Negative Prompting Concerns"
5816
+ cat "$ARTIFACTS_DIR/negative-review.md"
5817
+ echo ""
5818
+ fi
5819
+ if [[ -f "$ARTIFACTS_DIR/dod-audit.md" ]]; then
5820
+ echo "## DoD Audit Failures"
5821
+ grep "❌" "$ARTIFACTS_DIR/dod-audit.md" 2>/dev/null || true
5822
+ echo ""
5823
+ fi
5824
+ if [[ -f "$ARTIFACTS_DIR/api-compat.log" ]] && grep -qi 'BREAKING' "$ARTIFACTS_DIR/api-compat.log" 2>/dev/null; then
5825
+ echo "## API Breaking Changes"
5826
+ cat "$ARTIFACTS_DIR/api-compat.log"
5827
+ echo ""
5828
+ fi
5829
+
5830
+ # Style findings last (deprioritized, informational)
5831
+ if [[ -f "$ARTIFACTS_DIR/classified-findings.json" ]]; then
5832
+ local style_count
5833
+ style_count=$(jq -r '.style // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
5834
+ if [[ "$style_count" -gt 0 ]]; then
5835
+ echo "## Style Notes (non-blocking, address if time permits)"
5836
+ echo "${style_count} style suggestions found. These do not block the build."
5837
+ echo ""
5838
+ fi
5839
+ fi
5840
+ } > "$feedback_file"
5841
+
5842
+ # Validate feedback file has actual content
5843
+ if [[ ! -s "$feedback_file" ]]; then
5844
+ warn "No quality feedback collected — skipping rebuild"
5845
+ return 1
5846
+ fi
5847
+
5848
+ # Reset build/test stages
5849
+ set_stage_status "build" "pending"
5850
+ set_stage_status "test" "pending"
5851
+ set_stage_status "review" "pending"
5852
+
5853
+ # Augment GOAL with quality feedback (route-specific instructions)
5854
+ local original_goal="$GOAL"
5855
+ local feedback_content
5856
+ feedback_content=$(cat "$feedback_file")
5857
+
5858
+ local route_instruction=""
5859
+ case "$route" in
5860
+ security)
5861
+ route_instruction="SECURITY PRIORITY: Fix all security vulnerabilities FIRST, then address other issues. Security issues are BLOCKING."
5862
+ ;;
5863
+ performance)
5864
+ route_instruction="PERFORMANCE PRIORITY: Address performance regressions and optimizations. Check for N+1 queries, memory leaks, and algorithmic complexity."
5865
+ ;;
5866
+ testing)
5867
+ route_instruction="TESTING PRIORITY: Add missing test coverage and fix flaky tests before addressing other issues."
5868
+ ;;
5869
+ correctness)
5870
+ route_instruction="Fix every issue listed above while keeping all existing functionality working."
5871
+ ;;
5872
+ architecture)
5873
+ route_instruction="ARCHITECTURE: Fix structural issues. Check dependency direction, layer boundaries, and separation of concerns."
5874
+ ;;
5875
+ *)
5876
+ route_instruction="Fix every issue listed above while keeping all existing functionality working."
5877
+ ;;
5878
+ esac
5879
+
5880
+ GOAL="$GOAL
5881
+
5882
+ IMPORTANT — Compound quality review found issues (route: ${route}). Fix ALL of these:
5883
+ $feedback_content
5884
+
5885
+ ${route_instruction}"
5886
+
5887
+ # Re-run self-healing build→test
5888
+ info "Rebuilding with quality feedback (route: ${route})..."
5889
+ if self_healing_build_test; then
5890
+ GOAL="$original_goal"
5891
+ return 0
5892
+ else
5893
+ GOAL="$original_goal"
5894
+ return 1
5895
+ fi
5896
+ }
5897
+
5898
+ # ──────────────────────────────────────────────────────────────────────────────
5899
+ # Bash 3.2 Compatibility Check
5900
+ # Scans modified .sh files for common bash 3.2 incompatibilities
5901
+ # Returns: count of violations found
5902
+ # ──────────────────────────────────────────────────────────────────────────────
5903
+ run_bash_compat_check() {
5904
+ local violations=0
5905
+ local violation_details=""
5906
+
5907
+ # Get modified .sh files relative to base branch
5908
+ local changed_files
5909
+ changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" -- '*.sh' 2>/dev/null || echo "")
5910
+
5911
+ if [[ -z "$changed_files" ]]; then
5912
+ echo "0"
5913
+ return 0
5914
+ fi
5915
+
5916
+ # Check each file for bash 3.2 incompatibilities
5917
+ while IFS= read -r filepath; do
5918
+ [[ -z "$filepath" ]] && continue
5919
+
5920
+ # declare -A (associative arrays)
5921
+ local declare_a_count
5922
+ declare_a_count=$(grep -c 'declare[[:space:]]*-[aA]' "$filepath" 2>/dev/null || true)
5923
+ if [[ "$declare_a_count" -gt 0 ]]; then
5924
+ violations=$((violations + declare_a_count))
5925
+ violation_details="${violation_details}${filepath}: declare -A (${declare_a_count} occurrences)
5926
+ "
5927
+ fi
5928
+
5929
+ # readarray or mapfile
5930
+ local readarray_count
5931
+ readarray_count=$(grep -c 'readarray\|mapfile' "$filepath" 2>/dev/null || true)
5932
+ if [[ "$readarray_count" -gt 0 ]]; then
5933
+ violations=$((violations + readarray_count))
5934
+ violation_details="${violation_details}${filepath}: readarray/mapfile (${readarray_count} occurrences)
5935
+ "
5936
+ fi
5937
+
5938
+ # ${var,,} or ${var^^} (case conversion)
5939
+ local case_conv_count
5940
+ case_conv_count=$(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*,,' "$filepath" 2>/dev/null || true)
5941
+ case_conv_count=$((case_conv_count + $(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*\^\^' "$filepath" 2>/dev/null || true)))
5942
+ if [[ "$case_conv_count" -gt 0 ]]; then
5943
+ violations=$((violations + case_conv_count))
5944
+ violation_details="${violation_details}${filepath}: case conversion \$\{var,,\} or \$\{var\^\^\} (${case_conv_count} occurrences)
5945
+ "
5946
+ fi
5947
+
5948
+ # |& (pipe stderr to stdout in-place)
5949
+ local pipe_ampersand_count
5950
+ pipe_ampersand_count=$(grep -c '|&' "$filepath" 2>/dev/null || true)
5951
+ if [[ "$pipe_ampersand_count" -gt 0 ]]; then
5952
+ violations=$((violations + pipe_ampersand_count))
5953
+ violation_details="${violation_details}${filepath}: |& operator (${pipe_ampersand_count} occurrences)
5954
+ "
5955
+ fi
5956
+
5957
+ # ;& or ;;& in case statements (advanced fallthrough)
5958
+ local advanced_case_count
5959
+ advanced_case_count=$(grep -c ';&\|;;&' "$filepath" 2>/dev/null || true)
5960
+ if [[ "$advanced_case_count" -gt 0 ]]; then
5961
+ violations=$((violations + advanced_case_count))
5962
+ violation_details="${violation_details}${filepath}: advanced case ;& or ;;& (${advanced_case_count} occurrences)
5963
+ "
5964
+ fi
5965
+
5966
+ done <<< "$changed_files"
5967
+
5968
+ # Log details if violations found
5969
+ if [[ "$violations" -gt 0 ]]; then
5970
+ warn "Bash 3.2 compatibility check: ${violations} violation(s) found:"
5971
+ echo "$violation_details" | sed 's/^/ /'
5972
+ fi
5973
+
5974
+ echo "$violations"
5975
+ }
5976
+
5977
+ # ──────────────────────────────────────────────────────────────────────────────
5978
+ # Test Coverage Check
5979
+ # Runs configured test command and extracts coverage percentage
5980
+ # Returns: coverage percentage (0-100), or "skip" if no test command configured
5981
+ # ──────────────────────────────────────────────────────────────────────────────
5982
+ run_test_coverage_check() {
5983
+ local test_cmd="${TEST_CMD:-}"
5984
+ if [[ -z "$test_cmd" ]]; then
5985
+ echo "skip"
5986
+ return 0
5987
+ fi
5988
+
5989
+ info "Running test coverage check..."
5990
+
5991
+ # Run tests and capture output
5992
+ local test_output
5993
+ local test_rc=0
5994
+ test_output=$(eval "$test_cmd" 2>&1) || test_rc=$?
5995
+
5996
+ if [[ "$test_rc" -ne 0 ]]; then
5997
+ warn "Test command failed (exit code: $test_rc) — cannot extract coverage"
5998
+ echo "0"
5999
+ return 0
6000
+ fi
6001
+
6002
+ # Extract coverage percentage from various formats
6003
+ # Patterns: "XX% coverage", "Lines: XX%", "Stmts: XX%", "Coverage: XX%", "coverage XX%"
6004
+ local coverage_pct
6005
+ coverage_pct=$(echo "$test_output" | grep -oE '[0-9]{1,3}%[[:space:]]*(coverage|lines|stmts|statements)' | grep -oE '^[0-9]{1,3}' | head -1 || true)
6006
+
6007
+ if [[ -z "$coverage_pct" ]]; then
6008
+ # Try alternate patterns without units
6009
+ coverage_pct=$(echo "$test_output" | grep -oE 'coverage[:]?[[:space:]]*[0-9]{1,3}' | grep -oE '[0-9]{1,3}' | head -1 || true)
6010
+ fi
6011
+
6012
+ if [[ -z "$coverage_pct" ]]; then
6013
+ warn "Could not extract coverage percentage from test output"
6014
+ echo "0"
6015
+ return 0
6016
+ fi
6017
+
6018
+ # Ensure it's a valid percentage (0-100)
6019
+ if [[ ! "$coverage_pct" =~ ^[0-9]{1,3}$ ]] || [[ "$coverage_pct" -gt 100 ]]; then
6020
+ coverage_pct=0
6021
+ fi
6022
+
6023
+ success "Test coverage: ${coverage_pct}%"
6024
+ echo "$coverage_pct"
6025
+ }
6026
+
6027
+ # ──────────────────────────────────────────────────────────────────────────────
6028
+ # Atomic Write Violations Check
6029
+ # Scans modified files for anti-patterns: direct echo > file to state/config files
6030
+ # Returns: count of violations found
6031
+ # ──────────────────────────────────────────────────────────────────────────────
6032
+ run_atomic_write_check() {
6033
+ local violations=0
6034
+ local violation_details=""
6035
+
6036
+ # Get modified files (not just .sh — includes state/config files)
6037
+ local changed_files
6038
+ changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || echo "")
6039
+
6040
+ if [[ -z "$changed_files" ]]; then
6041
+ echo "0"
6042
+ return 0
6043
+ fi
6044
+
6045
+ # Check for direct writes to state/config files (patterns that should use tmp+mv)
6046
+ # Look for: echo "..." > state/config files
6047
+ while IFS= read -r filepath; do
6048
+ [[ -z "$filepath" ]] && continue
6049
+
6050
+ # Only check state/config/artifacts files
6051
+ if [[ ! "$filepath" =~ (state|config|artifact|cache|db|json)$ ]]; then
6052
+ continue
6053
+ fi
6054
+
6055
+ # Check for direct redirection writes (> file) in state/config paths
6056
+ local bad_writes
6057
+ bad_writes=$(git show "HEAD:$filepath" 2>/dev/null | grep -c 'echo.*>' "$filepath" 2>/dev/null || true)
6058
+
6059
+ if [[ "$bad_writes" -gt 0 ]]; then
6060
+ violations=$((violations + bad_writes))
6061
+ violation_details="${violation_details}${filepath}: ${bad_writes} direct write(s) (should use tmp+mv)
6062
+ "
6063
+ fi
6064
+ done <<< "$changed_files"
6065
+
6066
+ if [[ "$violations" -gt 0 ]]; then
6067
+ warn "Atomic write violations: ${violations} found (should use tmp file + mv pattern):"
6068
+ echo "$violation_details" | sed 's/^/ /'
6069
+ fi
6070
+
6071
+ echo "$violations"
6072
+ }
6073
+
6074
+ # ──────────────────────────────────────────────────────────────────────────────
6075
+ # New Function Test Detection
6076
+ # Detects new functions added in the diff but checks if corresponding tests exist
6077
+ # Returns: count of untested new functions
6078
+ # ──────────────────────────────────────────────────────────────────────────────
6079
+ run_new_function_test_check() {
6080
+ local untested_functions=0
6081
+ local details=""
6082
+
6083
+ # Get diff
6084
+ local diff_content
6085
+ diff_content=$(git diff "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
6086
+
6087
+ if [[ -z "$diff_content" ]]; then
6088
+ echo "0"
6089
+ return 0
6090
+ fi
6091
+
6092
+ # Extract newly added function definitions (lines starting with +functionname())
6093
+ local new_functions
6094
+ new_functions=$(echo "$diff_content" | grep -E '^\+[a-zA-Z_][a-zA-Z0-9_]*\(\)' | sed 's/^\+//' | sed 's/()//' || true)
6095
+
6096
+ if [[ -z "$new_functions" ]]; then
6097
+ echo "0"
6098
+ return 0
6099
+ fi
6100
+
6101
+ # For each new function, check if test files were modified
6102
+ local test_files_modified=0
6103
+ test_files_modified=$(echo "$diff_content" | grep -c '\-\-\-.*test\|\.test\.\|_test\.' || true)
6104
+
6105
+ # Simple heuristic: if we have new functions but no test file modifications, warn
6106
+ if [[ "$test_files_modified" -eq 0 ]]; then
6107
+ local func_count
6108
+ func_count=$(echo "$new_functions" | wc -l | xargs)
6109
+ untested_functions="$func_count"
6110
+ details="Added ${func_count} new function(s) but no test file modifications detected"
6111
+ fi
6112
+
6113
+ if [[ "$untested_functions" -gt 0 ]]; then
6114
+ warn "New functions without tests: ${details}"
6115
+ fi
6116
+
6117
+ echo "$untested_functions"
6118
+ }
6119
+
6120
+ stage_compound_quality() {
6121
+ CURRENT_STAGE_ID="compound_quality"
6122
+
6123
+ # Pre-check: verify meaningful changes exist before running expensive quality checks
6124
+ local _cq_real_changes
6125
+ _cq_real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
6126
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
6127
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
6128
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
6129
+ if [[ "${_cq_real_changes:-0}" -eq 0 ]]; then
6130
+ error "Compound quality: no meaningful code changes found — failing quality gate"
6131
+ return 1
6132
+ fi
6133
+
6134
+ # Read config
6135
+ local max_cycles adversarial_enabled negative_enabled e2e_enabled dod_enabled strict_quality
6136
+ max_cycles=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.max_cycles) // 3' "$PIPELINE_CONFIG" 2>/dev/null) || true
6137
+ [[ -z "$max_cycles" || "$max_cycles" == "null" ]] && max_cycles=3
6138
+ adversarial_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.adversarial) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
6139
+ negative_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.negative) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
6140
+ e2e_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.e2e) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
6141
+ dod_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.dod_audit) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
6142
+ strict_quality=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.strict_quality) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
6143
+ [[ -z "$strict_quality" || "$strict_quality" == "null" ]] && strict_quality="false"
6144
+
6145
+ # Intelligent audit selection
6146
+ local audit_plan='{"adversarial":"targeted","architecture":"targeted","simulation":"targeted","security":"targeted","dod":"targeted"}'
6147
+ if type pipeline_select_audits &>/dev/null 2>&1; then
6148
+ local _selected
6149
+ _selected=$(pipeline_select_audits 2>/dev/null) || true
6150
+ if [[ -n "$_selected" && "$_selected" != "null" ]]; then
6151
+ audit_plan="$_selected"
6152
+ info "Audit plan: $(echo "$audit_plan" | jq -c '.' 2>/dev/null || echo "$audit_plan")"
6153
+ fi
6154
+ fi
6155
+
6156
+ # Track findings for quality score
6157
+ local total_critical=0 total_major=0 total_minor=0
6158
+ local audits_run_list=""
6159
+
6160
+ # ── HARDENED QUALITY GATES (RUN BEFORE CYCLES) ──
6161
+ # These checks must pass before we even start the audit cycles
6162
+ echo ""
6163
+ info "Running hardened quality gate checks..."
6164
+
6165
+ # 1. Bash 3.2 compatibility check
6166
+ local bash_violations=0
6167
+ bash_violations=$(run_bash_compat_check 2>/dev/null) || bash_violations=0
6168
+ bash_violations="${bash_violations:-0}"
6169
+
6170
+ if [[ "$strict_quality" == "true" && "$bash_violations" -gt 0 ]]; then
6171
+ error "STRICT QUALITY: Bash 3.2 incompatibilities found — blocking"
6172
+ emit_event "quality.bash_compat_failed" \
6173
+ "issue=${ISSUE_NUMBER:-0}" \
6174
+ "violations=$bash_violations"
6175
+ return 1
6176
+ fi
6177
+
6178
+ if [[ "$bash_violations" -gt 0 ]]; then
6179
+ warn "Bash 3.2 incompatibilities detected: ${bash_violations} (will impact quality score)"
6180
+ total_minor=$((total_minor + bash_violations))
6181
+ else
6182
+ success "Bash 3.2 compatibility: clean"
6183
+ fi
6184
+
6185
+ # 2. Test coverage check
6186
+ local coverage_pct=0
6187
+ coverage_pct=$(run_test_coverage_check 2>/dev/null) || coverage_pct=0
6188
+ coverage_pct="${coverage_pct:-0}"
6189
+
6190
+ if [[ "$coverage_pct" != "skip" ]]; then
6191
+ if [[ "$coverage_pct" -lt 60 ]]; then
6192
+ if [[ "$strict_quality" == "true" ]]; then
6193
+ error "STRICT QUALITY: Test coverage below 60% (${coverage_pct}%) — blocking"
6194
+ emit_event "quality.coverage_failed" \
6195
+ "issue=${ISSUE_NUMBER:-0}" \
6196
+ "coverage=$coverage_pct"
6197
+ return 1
6198
+ else
6199
+ warn "Test coverage below 60% threshold (${coverage_pct}%) — quality penalty applied"
6200
+ total_major=$((total_major + 2))
6201
+ fi
6202
+ fi
6203
+ fi
6204
+
6205
+ # 3. New functions without tests check
6206
+ local untested_functions=0
6207
+ untested_functions=$(run_new_function_test_check 2>/dev/null) || untested_functions=0
6208
+ untested_functions="${untested_functions:-0}"
6209
+
6210
+ if [[ "$untested_functions" -gt 0 ]]; then
6211
+ if [[ "$strict_quality" == "true" ]]; then
6212
+ error "STRICT QUALITY: ${untested_functions} new function(s) without tests — blocking"
6213
+ emit_event "quality.untested_functions" \
6214
+ "issue=${ISSUE_NUMBER:-0}" \
6215
+ "count=$untested_functions"
6216
+ return 1
6217
+ else
6218
+ warn "New functions without corresponding tests: ${untested_functions}"
6219
+ total_major=$((total_major + untested_functions))
6220
+ fi
6221
+ fi
6222
+
6223
+ # 4. Atomic write violations (optional, informational in most modes)
6224
+ local atomic_violations=0
6225
+ atomic_violations=$(run_atomic_write_check 2>/dev/null) || atomic_violations=0
6226
+ atomic_violations="${atomic_violations:-0}"
6227
+
6228
+ if [[ "$atomic_violations" -gt 0 ]]; then
6229
+ warn "Atomic write violations: ${atomic_violations} (state/config file patterns)"
6230
+ total_minor=$((total_minor + atomic_violations))
6231
+ fi
6232
+
6233
+ # Vitals-driven adaptive cycle limit (preferred)
6234
+ local base_max_cycles="$max_cycles"
6235
+ if type pipeline_adaptive_limit &>/dev/null 2>&1; then
6236
+ local _cq_vitals=""
6237
+ if type pipeline_compute_vitals &>/dev/null 2>&1; then
6238
+ _cq_vitals=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
6239
+ fi
6240
+ local vitals_cq_limit
6241
+ vitals_cq_limit=$(pipeline_adaptive_limit "compound_quality" "$_cq_vitals" 2>/dev/null) || true
6242
+ if [[ -n "$vitals_cq_limit" && "$vitals_cq_limit" =~ ^[0-9]+$ && "$vitals_cq_limit" -gt 0 ]]; then
6243
+ max_cycles="$vitals_cq_limit"
6244
+ if [[ "$max_cycles" != "$base_max_cycles" ]]; then
6245
+ info "Vitals-driven cycles: ${base_max_cycles} → ${max_cycles} (compound_quality)"
6246
+ fi
6247
+ fi
6248
+ else
6249
+ # Fallback: adaptive cycle limits from optimization data
6250
+ local _cq_iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
6251
+ if [[ -f "$_cq_iter_model" ]]; then
6252
+ local adaptive_limit
6253
+ adaptive_limit=$(pipeline_adaptive_cycles "$max_cycles" "compound_quality" "0" "-1" 2>/dev/null) || true
6254
+ if [[ -n "$adaptive_limit" && "$adaptive_limit" =~ ^[0-9]+$ && "$adaptive_limit" -gt 0 ]]; then
6255
+ max_cycles="$adaptive_limit"
6256
+ if [[ "$max_cycles" != "$base_max_cycles" ]]; then
6257
+ info "Adaptive cycles: ${base_max_cycles} → ${max_cycles} (compound_quality)"
6258
+ fi
6259
+ fi
6260
+ fi
6261
+ fi
6262
+
6263
+ # Convergence tracking
6264
+ local prev_issue_count=-1
6265
+
6266
+ local cycle=0
6267
+ while [[ "$cycle" -lt "$max_cycles" ]]; do
6268
+ cycle=$((cycle + 1))
6269
+ local all_passed=true
6270
+
6271
+ echo ""
6272
+ echo -e "${PURPLE}${BOLD}━━━ Compound Quality — Cycle ${cycle}/${max_cycles} ━━━${RESET}"
6273
+
6274
+ if [[ -n "$ISSUE_NUMBER" ]]; then
6275
+ gh_comment_issue "$ISSUE_NUMBER" "🔬 **Compound quality** — cycle ${cycle}/${max_cycles}" 2>/dev/null || true
6276
+ fi
6277
+
6278
+ # 1. Adversarial Review
6279
+ local _adv_intensity
6280
+ _adv_intensity=$(echo "$audit_plan" | jq -r '.adversarial // "targeted"' 2>/dev/null || echo "targeted")
6281
+ if [[ "$adversarial_enabled" == "true" && "$_adv_intensity" != "off" ]]; then
6282
+ echo ""
6283
+ info "Running adversarial review (${_adv_intensity})..."
6284
+ audits_run_list="${audits_run_list:+${audits_run_list},}adversarial"
6285
+ if ! run_adversarial_review; then
6286
+ all_passed=false
6287
+ fi
6288
+ fi
6289
+
6290
+ # 2. Negative Prompting
6291
+ if [[ "$negative_enabled" == "true" ]]; then
6292
+ echo ""
6293
+ info "Running negative prompting..."
6294
+ if ! run_negative_prompting; then
6295
+ all_passed=false
6296
+ fi
6297
+ fi
6298
+
6299
+ # 3. Developer Simulation (intelligence module)
6300
+ if type simulation_review &>/dev/null 2>&1; then
6301
+ local sim_enabled
6302
+ sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
6303
+ local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
6304
+ if [[ "$sim_enabled" != "true" && -f "$daemon_cfg" ]]; then
6305
+ sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
4573
6306
  fi
4574
6307
  if [[ "$sim_enabled" == "true" ]]; then
4575
6308
  echo ""
@@ -4653,14 +6386,36 @@ stage_compound_quality() {
4653
6386
  fi
4654
6387
 
4655
6388
  # 6. DoD Audit
4656
- if [[ "$dod_enabled" == "true" ]]; then
6389
+ local _dod_intensity
6390
+ _dod_intensity=$(echo "$audit_plan" | jq -r '.dod // "targeted"' 2>/dev/null || echo "targeted")
6391
+ if [[ "$dod_enabled" == "true" && "$_dod_intensity" != "off" ]]; then
4657
6392
  echo ""
4658
- info "Running Definition of Done audit..."
6393
+ info "Running Definition of Done audit (${_dod_intensity})..."
6394
+ audits_run_list="${audits_run_list:+${audits_run_list},}dod"
4659
6395
  if ! run_dod_audit; then
4660
6396
  all_passed=false
4661
6397
  fi
4662
6398
  fi
4663
6399
 
6400
+ # 6b. Security Source Scan
6401
+ local _sec_intensity
6402
+ _sec_intensity=$(echo "$audit_plan" | jq -r '.security // "targeted"' 2>/dev/null || echo "targeted")
6403
+ if [[ "$_sec_intensity" != "off" ]]; then
6404
+ echo ""
6405
+ info "Running security source scan (${_sec_intensity})..."
6406
+ audits_run_list="${audits_run_list:+${audits_run_list},}security"
6407
+ local sec_finding_count=0
6408
+ sec_finding_count=$(pipeline_security_source_scan 2>/dev/null) || true
6409
+ sec_finding_count="${sec_finding_count:-0}"
6410
+ if [[ "$sec_finding_count" -gt 0 ]]; then
6411
+ warn "Security source scan: ${sec_finding_count} finding(s)"
6412
+ total_critical=$((total_critical + sec_finding_count))
6413
+ all_passed=false
6414
+ else
6415
+ success "Security source scan: clean"
6416
+ fi
6417
+ fi
6418
+
4664
6419
  # 7. Multi-dimensional quality checks
4665
6420
  echo ""
4666
6421
  info "Running multi-dimensional quality checks..."
@@ -4743,6 +6498,17 @@ All quality checks clean:
4743
6498
  fi
4744
6499
 
4745
6500
  log_stage "compound_quality" "Passed on cycle ${cycle}/${max_cycles}"
6501
+
6502
+ # DoD verification on successful pass
6503
+ local _dod_pass_rate=100
6504
+ if type pipeline_verify_dod &>/dev/null 2>&1; then
6505
+ pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
6506
+ if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
6507
+ _dod_pass_rate=$(jq -r '.pass_rate // 100' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "100")
6508
+ fi
6509
+ fi
6510
+
6511
+ pipeline_record_quality_score 100 0 0 0 "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
4746
6512
  return 0
4747
6513
  fi
4748
6514
 
@@ -4754,6 +6520,17 @@ All quality checks clean:
4754
6520
  fi
4755
6521
 
4756
6522
  log_stage "compound_quality" "Passed on cycle ${cycle}/${max_cycles}"
6523
+
6524
+ # DoD verification on successful pass
6525
+ local _dod_pass_rate=100
6526
+ if type pipeline_verify_dod &>/dev/null 2>&1; then
6527
+ pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
6528
+ if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
6529
+ _dod_pass_rate=$(jq -r '.pass_rate // 100' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "100")
6530
+ fi
6531
+ fi
6532
+
6533
+ pipeline_record_quality_score 95 0 "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
4757
6534
  return 0
4758
6535
  fi
4759
6536
 
@@ -4776,6 +6553,16 @@ All quality checks clean:
4776
6553
 
4777
6554
  info "Convergence: ${current_issue_count} critical/high issues remaining"
4778
6555
 
6556
+ # Intelligence: re-evaluate adaptive cycle limit based on convergence (only after first cycle)
6557
+ if [[ "$prev_issue_count" -ge 0 ]]; then
6558
+ local updated_limit
6559
+ updated_limit=$(pipeline_adaptive_cycles "$max_cycles" "compound_quality" "$current_issue_count" "$prev_issue_count" 2>/dev/null) || true
6560
+ if [[ -n "$updated_limit" && "$updated_limit" =~ ^[0-9]+$ && "$updated_limit" -gt 0 && "$updated_limit" != "$max_cycles" ]]; then
6561
+ info "Adaptive cycles: ${max_cycles} → ${updated_limit} (convergence signal)"
6562
+ max_cycles="$updated_limit"
6563
+ fi
6564
+ fi
6565
+
4779
6566
  # Not all passed — rebuild if we have cycles left
4780
6567
  if [[ "$cycle" -lt "$max_cycles" ]]; then
4781
6568
  warn "Quality checks failed — rebuilding with feedback (cycle $((cycle + 1))/${max_cycles})"
@@ -4792,8 +6579,146 @@ All quality checks clean:
4792
6579
  fi
4793
6580
  done
4794
6581
 
4795
- # Exhausted all cycles
4796
- error "Compound quality exhausted after ${max_cycles} cycles"
6582
+ # ── Quality Score Computation ──
6583
+ # Starting score: 100, deductions based on findings
6584
+ local quality_score=100
6585
+
6586
+ # Count findings from artifact files
6587
+ if [[ -f "$ARTIFACTS_DIR/security-source-scan.json" ]]; then
6588
+ local _sec_critical
6589
+ _sec_critical=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/security-source-scan.json" 2>/dev/null || echo "0")
6590
+ local _sec_major
6591
+ _sec_major=$(jq '[.[] | select(.severity == "major")] | length' "$ARTIFACTS_DIR/security-source-scan.json" 2>/dev/null || echo "0")
6592
+ total_critical=$((total_critical + ${_sec_critical:-0}))
6593
+ total_major=$((total_major + ${_sec_major:-0}))
6594
+ fi
6595
+ if [[ -f "$ARTIFACTS_DIR/adversarial-review.json" ]]; then
6596
+ local _adv_crit
6597
+ _adv_crit=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
6598
+ local _adv_major
6599
+ _adv_major=$(jq '[.[] | select(.severity == "high" or .severity == "major")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
6600
+ local _adv_minor
6601
+ _adv_minor=$(jq '[.[] | select(.severity == "low" or .severity == "minor")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
6602
+ total_critical=$((total_critical + ${_adv_crit:-0}))
6603
+ total_major=$((total_major + ${_adv_major:-0}))
6604
+ total_minor=$((total_minor + ${_adv_minor:-0}))
6605
+ fi
6606
+ if [[ -f "$ARTIFACTS_DIR/compound-architecture-validation.json" ]]; then
6607
+ local _arch_crit
6608
+ _arch_crit=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || echo "0")
6609
+ local _arch_major
6610
+ _arch_major=$(jq '[.[] | select(.severity == "high" or .severity == "major")] | length' "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || echo "0")
6611
+ total_major=$((total_major + ${_arch_crit:-0} + ${_arch_major:-0}))
6612
+ fi
6613
+
6614
+ # Apply deductions
6615
+ quality_score=$((quality_score - (total_critical * 20) - (total_major * 10) - (total_minor * 2)))
6616
+ [[ "$quality_score" -lt 0 ]] && quality_score=0
6617
+
6618
+ # DoD verification
6619
+ local _dod_pass_rate=0
6620
+ if type pipeline_verify_dod &>/dev/null 2>&1; then
6621
+ pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
6622
+ if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
6623
+ _dod_pass_rate=$(jq -r '.pass_rate // 0' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "0")
6624
+ fi
6625
+ fi
6626
+
6627
+ # Record quality score
6628
+ pipeline_record_quality_score "$quality_score" "$total_critical" "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
6629
+
6630
+ # ── Quality Gate (HARDENED) ──
6631
+ local compound_quality_blocking
6632
+ compound_quality_blocking=$(jq -r --arg id "compound_quality" \
6633
+ '(.stages[] | select(.id == $id) | .config.compound_quality_blocking) // true' \
6634
+ "$PIPELINE_CONFIG" 2>/dev/null) || true
6635
+ [[ -z "$compound_quality_blocking" || "$compound_quality_blocking" == "null" ]] && compound_quality_blocking="true"
6636
+
6637
+ # HARDENED THRESHOLD: quality_score must be >= 60 to pass
6638
+ # In strict mode, higher requirements apply per the hardened checks above
6639
+ local min_threshold=60
6640
+ if [[ "$strict_quality" == "true" ]]; then
6641
+ # Strict mode: require score >= 70 and ZERO critical issues
6642
+ if [[ "$total_critical" -gt 0 ]]; then
6643
+ error "STRICT QUALITY: ${total_critical} critical issue(s) found — BLOCKING (strict mode)"
6644
+ emit_event "pipeline.quality_gate_failed_strict" \
6645
+ "issue=${ISSUE_NUMBER:-0}" \
6646
+ "reason=critical_issues" \
6647
+ "critical=$total_critical"
6648
+ log_stage "compound_quality" "Quality gate failed (strict mode): critical issues"
6649
+ return 1
6650
+ fi
6651
+ min_threshold=70
6652
+ fi
6653
+
6654
+ # Hard floor: score must be >= 40, regardless of other settings
6655
+ if [[ "$quality_score" -lt 40 ]]; then
6656
+ error "HARDENED GATE: Quality score ${quality_score}/100 below hard floor (40) — BLOCKING"
6657
+ emit_event "quality.hard_floor_failed" \
6658
+ "issue=${ISSUE_NUMBER:-0}" \
6659
+ "quality_score=$quality_score"
6660
+ log_stage "compound_quality" "Quality gate failed: score below hard floor (40)"
6661
+ return 1
6662
+ fi
6663
+
6664
+ if [[ "$quality_score" -lt "$min_threshold" && "$compound_quality_blocking" == "true" ]]; then
6665
+ emit_event "pipeline.quality_gate_failed" \
6666
+ "issue=${ISSUE_NUMBER:-0}" \
6667
+ "quality_score=$quality_score" \
6668
+ "threshold=$min_threshold" \
6669
+ "critical=$total_critical" \
6670
+ "major=$total_major"
6671
+
6672
+ error "Quality gate FAILED: score ${quality_score}/100 (threshold: ${min_threshold}/100, critical: ${total_critical}, major: ${total_major}, minor: ${total_minor})"
6673
+
6674
+ if [[ -n "$ISSUE_NUMBER" ]]; then
6675
+ gh_comment_issue "$ISSUE_NUMBER" "❌ **Quality gate failed** — score ${quality_score}/${min_threshold}
6676
+
6677
+ | Finding Type | Count | Deduction |
6678
+ |---|---|---|
6679
+ | Critical | ${total_critical} | -$((total_critical * 20)) |
6680
+ | Major | ${total_major} | -$((total_major * 10)) |
6681
+ | Minor | ${total_minor} | -$((total_minor * 2)) |
6682
+
6683
+ DoD pass rate: ${_dod_pass_rate}%
6684
+ Quality issues remain after ${max_cycles} cycles. Check artifacts for details." 2>/dev/null || true
6685
+ fi
6686
+
6687
+ log_stage "compound_quality" "Quality gate failed: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6688
+ return 1
6689
+ fi
6690
+
6691
+ # Exhausted all cycles but quality score is at or above threshold
6692
+ if [[ "$quality_score" -ge "$min_threshold" ]]; then
6693
+ if [[ "$quality_score" -eq 100 ]]; then
6694
+ success "Compound quality PERFECT: 100/100"
6695
+ elif [[ "$quality_score" -ge 80 ]]; then
6696
+ success "Compound quality EXCELLENT: ${quality_score}/100"
6697
+ elif [[ "$quality_score" -ge 70 ]]; then
6698
+ success "Compound quality GOOD: ${quality_score}/100"
6699
+ else
6700
+ warn "Compound quality ACCEPTABLE: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6701
+ fi
6702
+
6703
+ if [[ -n "$ISSUE_NUMBER" ]]; then
6704
+ local quality_emoji="✅"
6705
+ [[ "$quality_score" -lt 70 ]] && quality_emoji="⚠️"
6706
+ gh_comment_issue "$ISSUE_NUMBER" "${quality_emoji} **Compound quality passed** — score ${quality_score}/${min_threshold} after ${max_cycles} cycles
6707
+
6708
+ | Finding Type | Count |
6709
+ |---|---|
6710
+ | Critical | ${total_critical} |
6711
+ | Major | ${total_major} |
6712
+ | Minor | ${total_minor} |
6713
+
6714
+ DoD pass rate: ${_dod_pass_rate}%" 2>/dev/null || true
6715
+ fi
6716
+
6717
+ log_stage "compound_quality" "Passed with score ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6718
+ return 0
6719
+ fi
6720
+
6721
+ error "Compound quality exhausted after ${max_cycles} cycles with insufficient score"
4797
6722
 
4798
6723
  if [[ -n "$ISSUE_NUMBER" ]]; then
4799
6724
  gh_comment_issue "$ISSUE_NUMBER" "❌ **Compound quality failed** after ${max_cycles} cycles
@@ -4982,8 +6907,25 @@ self_healing_build_test() {
4982
6907
  local prev_error_sig="" consecutive_same_error=0
4983
6908
  local prev_fail_count=0 zero_convergence_streak=0
4984
6909
 
4985
- # Intelligence: adaptive iteration limit
4986
- if type composer_estimate_iterations &>/dev/null 2>&1; then
6910
+ # Vitals-driven adaptive limit (preferred over static BUILD_TEST_RETRIES)
6911
+ if type pipeline_adaptive_limit &>/dev/null 2>&1; then
6912
+ local _vitals_json=""
6913
+ if type pipeline_compute_vitals &>/dev/null 2>&1; then
6914
+ _vitals_json=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
6915
+ fi
6916
+ local vitals_limit
6917
+ vitals_limit=$(pipeline_adaptive_limit "build_test" "$_vitals_json" 2>/dev/null) || true
6918
+ if [[ -n "$vitals_limit" && "$vitals_limit" =~ ^[0-9]+$ && "$vitals_limit" -gt 0 ]]; then
6919
+ info "Vitals-driven build-test limit: ${max_cycles} → ${vitals_limit}"
6920
+ max_cycles="$vitals_limit"
6921
+ emit_event "vitals.adaptive_limit" \
6922
+ "issue=${ISSUE_NUMBER:-0}" \
6923
+ "context=build_test" \
6924
+ "original=$BUILD_TEST_RETRIES" \
6925
+ "vitals_limit=$vitals_limit"
6926
+ fi
6927
+ # Fallback: intelligence-based adaptive limits
6928
+ elif type composer_estimate_iterations &>/dev/null 2>&1; then
4987
6929
  local estimated
4988
6930
  estimated=$(composer_estimate_iterations \
4989
6931
  "${INTELLIGENCE_ANALYSIS:-{}}" \
@@ -4997,6 +6939,19 @@ self_healing_build_test() {
4997
6939
  fi
4998
6940
  fi
4999
6941
 
6942
+ # Fallback: adaptive cycle limits from optimization data
6943
+ if [[ "$max_cycles" == "$BUILD_TEST_RETRIES" ]]; then
6944
+ local _iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
6945
+ if [[ -f "$_iter_model" ]]; then
6946
+ local adaptive_bt_limit
6947
+ adaptive_bt_limit=$(pipeline_adaptive_cycles "$max_cycles" "build_test" "0" "-1" 2>/dev/null) || true
6948
+ if [[ -n "$adaptive_bt_limit" && "$adaptive_bt_limit" =~ ^[0-9]+$ && "$adaptive_bt_limit" -gt 0 && "$adaptive_bt_limit" != "$max_cycles" ]]; then
6949
+ info "Adaptive build-test cycles: ${max_cycles} → ${adaptive_bt_limit}"
6950
+ max_cycles="$adaptive_bt_limit"
6951
+ fi
6952
+ fi
6953
+ fi
6954
+
5000
6955
  while [[ "$cycle" -le "$max_cycles" ]]; do
5001
6956
  cycle=$((cycle + 1))
5002
6957
 
@@ -5022,11 +6977,27 @@ self_healing_build_test() {
5022
6977
 
5023
6978
  # Inject error context on retry cycles
5024
6979
  if [[ "$cycle" -gt 1 && -n "$last_test_error" ]]; then
6980
+ # Query memory for known fixes
6981
+ local _memory_fix=""
6982
+ if type memory_closed_loop_inject &>/dev/null 2>&1; then
6983
+ local _error_sig_short
6984
+ _error_sig_short=$(echo "$last_test_error" | head -3 || echo "")
6985
+ _memory_fix=$(memory_closed_loop_inject "$_error_sig_short" 2>/dev/null) || true
6986
+ fi
6987
+
6988
+ local memory_prefix=""
6989
+ if [[ -n "$_memory_fix" ]]; then
6990
+ info "Memory suggests fix: $(echo "$_memory_fix" | head -1)"
6991
+ memory_prefix="KNOWN FIX (from past success): ${_memory_fix}
6992
+
6993
+ "
6994
+ fi
6995
+
5025
6996
  # Temporarily augment the goal with error context
5026
6997
  local original_goal="$GOAL"
5027
6998
  GOAL="$GOAL
5028
6999
 
5029
- IMPORTANT — Previous build attempt failed tests. Fix these errors:
7000
+ ${memory_prefix}IMPORTANT — Previous build attempt failed tests. Fix these errors:
5030
7001
  $last_test_error
5031
7002
 
5032
7003
  Focus on fixing the failing tests while keeping all passing tests working."
@@ -5039,6 +7010,16 @@ Focus on fixing the failing tests while keeping all passing tests working."
5039
7010
  local timing
5040
7011
  timing=$(get_stage_timing "build")
5041
7012
  success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
7013
+ if type pipeline_emit_progress_snapshot &>/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
7014
+ local _diff_count
7015
+ _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
7016
+ local _snap_files _snap_error
7017
+ _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
7018
+ _snap_files="${_snap_files:-0}"
7019
+ _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
7020
+ _snap_error="${_snap_error:-}"
7021
+ pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
7022
+ fi
5042
7023
  else
5043
7024
  mark_stage_failed "build"
5044
7025
  GOAL="$original_goal"
@@ -5054,6 +7035,16 @@ Focus on fixing the failing tests while keeping all passing tests working."
5054
7035
  local timing
5055
7036
  timing=$(get_stage_timing "build")
5056
7037
  success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
7038
+ if type pipeline_emit_progress_snapshot &>/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
7039
+ local _diff_count
7040
+ _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
7041
+ local _snap_files _snap_error
7042
+ _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
7043
+ _snap_files="${_snap_files:-0}"
7044
+ _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
7045
+ _snap_error="${_snap_error:-}"
7046
+ pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
7047
+ fi
5057
7048
  else
5058
7049
  mark_stage_failed "build"
5059
7050
  return 1
@@ -5075,6 +7066,16 @@ Focus on fixing the failing tests while keeping all passing tests working."
5075
7066
  emit_event "convergence.tests_passed" \
5076
7067
  "issue=${ISSUE_NUMBER:-0}" \
5077
7068
  "cycle=$cycle"
7069
+ if type pipeline_emit_progress_snapshot &>/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
7070
+ local _diff_count
7071
+ _diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
7072
+ local _snap_files _snap_error
7073
+ _snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
7074
+ _snap_files="${_snap_files:-0}"
7075
+ _snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
7076
+ _snap_error="${_snap_error:-}"
7077
+ pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-test}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
7078
+ fi
5078
7079
  return 0 # Tests passed!
5079
7080
  fi
5080
7081
 
@@ -5243,6 +7244,16 @@ run_pipeline() {
5243
7244
  continue
5244
7245
  fi
5245
7246
 
7247
+ # Intelligence: evaluate whether to skip this stage
7248
+ local skip_reason=""
7249
+ skip_reason=$(pipeline_should_skip_stage "$id" 2>/dev/null) || true
7250
+ if [[ -n "$skip_reason" ]]; then
7251
+ echo -e " ${DIM}○ ${id} — skipped (intelligence: ${skip_reason})${RESET}"
7252
+ set_stage_status "$id" "complete"
7253
+ completed=$((completed + 1))
7254
+ continue
7255
+ fi
7256
+
5246
7257
  local stage_status
5247
7258
  stage_status=$(get_stage_status "$id")
5248
7259
  if [[ "$stage_status" == "complete" ]]; then
@@ -5253,11 +7264,17 @@ run_pipeline() {
5253
7264
 
5254
7265
  # CI resume: skip stages marked as completed from previous run
5255
7266
  if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "$id"; then
5256
- echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}skipped (CI resume)${RESET}"
5257
- set_stage_status "$id" "complete"
5258
- completed=$((completed + 1))
5259
- emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
5260
- continue
7267
+ # Verify artifacts survived the merge regenerate if missing
7268
+ if verify_stage_artifacts "$id"; then
7269
+ echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— skipped (CI resume)${RESET}"
7270
+ set_stage_status "$id" "complete"
7271
+ completed=$((completed + 1))
7272
+ emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
7273
+ continue
7274
+ else
7275
+ warn "Stage $id marked complete but artifacts missing — regenerating"
7276
+ emit_event "stage.artifact_miss" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
7277
+ fi
5261
7278
  fi
5262
7279
 
5263
7280
  # Self-healing build→test loop: when we hit build, run both together
@@ -5278,6 +7295,13 @@ run_pipeline() {
5278
7295
 
5279
7296
  if self_healing_build_test; then
5280
7297
  completed=$((completed + 2)) # Both build and test
7298
+
7299
+ # Intelligence: reassess complexity after build+test
7300
+ local reassessment
7301
+ reassessment=$(pipeline_reassess_complexity 2>/dev/null) || true
7302
+ if [[ -n "$reassessment" && "$reassessment" != "as_expected" ]]; then
7303
+ info "Complexity reassessment: ${reassessment}"
7304
+ fi
5281
7305
  else
5282
7306
  update_status "failed" "test"
5283
7307
  error "Pipeline failed: build→test self-healing exhausted"
@@ -5418,6 +7442,8 @@ run_pipeline() {
5418
7442
  emit_event "stage.failed" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "duration_s=$stage_dur_s"
5419
7443
  # Log model used for prediction feedback
5420
7444
  echo "${id}|${stage_model_used}|false" >> "${ARTIFACTS_DIR}/model-routing.log"
7445
+ # Cancel any remaining in_progress check runs
7446
+ pipeline_cancel_check_runs 2>/dev/null || true
5421
7447
  return 1
5422
7448
  fi
5423
7449
  done 3<<< "$stages"
@@ -5456,6 +7482,81 @@ run_pipeline() {
5456
7482
  body=$(gh_build_progress_body)
5457
7483
  gh_update_progress "$body"
5458
7484
  fi
7485
+
7486
+ # Post-completion cleanup
7487
+ pipeline_post_completion_cleanup
7488
+ }
7489
+
7490
+ # ─── Post-Completion Cleanup ──────────────────────────────────────────────
7491
+ # Cleans up transient artifacts after a successful pipeline run.
7492
+
7493
+ pipeline_post_completion_cleanup() {
7494
+ local cleaned=0
7495
+
7496
+ # 1. Clear checkpoints (they only matter for resume; pipeline is done)
7497
+ if [[ -d "${ARTIFACTS_DIR}/checkpoints" ]]; then
7498
+ local cp_count=0
7499
+ local cp_file
7500
+ for cp_file in "${ARTIFACTS_DIR}/checkpoints"/*-checkpoint.json; do
7501
+ [[ -f "$cp_file" ]] || continue
7502
+ rm -f "$cp_file"
7503
+ cp_count=$((cp_count + 1))
7504
+ done
7505
+ if [[ "$cp_count" -gt 0 ]]; then
7506
+ cleaned=$((cleaned + cp_count))
7507
+ fi
7508
+ fi
7509
+
7510
+ # 2. Clear per-run intelligence artifacts (not needed after completion)
7511
+ local intel_files=(
7512
+ "${ARTIFACTS_DIR}/classified-findings.json"
7513
+ "${ARTIFACTS_DIR}/reassessment.json"
7514
+ "${ARTIFACTS_DIR}/skip-stage.txt"
7515
+ "${ARTIFACTS_DIR}/human-message.txt"
7516
+ )
7517
+ local f
7518
+ for f in "${intel_files[@]}"; do
7519
+ if [[ -f "$f" ]]; then
7520
+ rm -f "$f"
7521
+ cleaned=$((cleaned + 1))
7522
+ fi
7523
+ done
7524
+
7525
+ # 3. Clear stale pipeline state (mark as idle so next run starts clean)
7526
+ if [[ -f "$STATE_FILE" ]]; then
7527
+ # Reset status to idle (preserves the file for reference but unblocks new runs)
7528
+ local tmp_state
7529
+ tmp_state=$(mktemp)
7530
+ sed 's/^status: .*/status: idle/' "$STATE_FILE" > "$tmp_state" 2>/dev/null || true
7531
+ mv "$tmp_state" "$STATE_FILE"
7532
+ fi
7533
+
7534
+ if [[ "$cleaned" -gt 0 ]]; then
7535
+ emit_event "pipeline.cleanup" \
7536
+ "issue=${ISSUE_NUMBER:-0}" \
7537
+ "cleaned=$cleaned" \
7538
+ "type=post_completion"
7539
+ fi
7540
+ }
7541
+
7542
+ # Cancel any lingering in_progress GitHub Check Runs (called on abort/interrupt)
7543
+ pipeline_cancel_check_runs() {
7544
+ if [[ "${NO_GITHUB:-false}" == "true" ]]; then
7545
+ return
7546
+ fi
7547
+
7548
+ if ! type gh_checks_stage_update &>/dev/null 2>&1; then
7549
+ return
7550
+ fi
7551
+
7552
+ local ids_file="${ARTIFACTS_DIR:-/dev/null}/check-run-ids.json"
7553
+ [[ -f "$ids_file" ]] || return
7554
+
7555
+ local stage
7556
+ while IFS= read -r stage; do
7557
+ [[ -z "$stage" ]] && continue
7558
+ gh_checks_stage_update "$stage" "completed" "cancelled" "Pipeline interrupted" 2>/dev/null || true
7559
+ done < <(jq -r 'keys[]' "$ids_file" 2>/dev/null || true)
5459
7560
  }
5460
7561
 
5461
7562
  # ─── Worktree Isolation ───────────────────────────────────────────────────
@@ -5517,6 +7618,172 @@ pipeline_cleanup_worktree() {
5517
7618
  fi
5518
7619
  }
5519
7620
 
7621
+ # ─── Dry Run Mode ───────────────────────────────────────────────────────────
7622
+ # Shows what would happen without executing
7623
+ run_dry_run() {
7624
+ echo ""
7625
+ echo -e "${BLUE}${BOLD}━━━ Dry Run: Pipeline Validation ━━━${RESET}"
7626
+ echo ""
7627
+
7628
+ # Validate pipeline config
7629
+ if [[ ! -f "$PIPELINE_CONFIG" ]]; then
7630
+ error "Pipeline config not found: $PIPELINE_CONFIG"
7631
+ return 1
7632
+ fi
7633
+
7634
+ # Validate JSON structure
7635
+ local validate_json
7636
+ validate_json=$(jq . "$PIPELINE_CONFIG" 2>/dev/null) || {
7637
+ error "Pipeline config is not valid JSON: $PIPELINE_CONFIG"
7638
+ return 1
7639
+ }
7640
+
7641
+ # Extract pipeline metadata
7642
+ local pipeline_name stages_count enabled_stages gated_stages
7643
+ pipeline_name=$(jq -r '.name // "unknown"' "$PIPELINE_CONFIG")
7644
+ stages_count=$(jq '.stages | length' "$PIPELINE_CONFIG")
7645
+ enabled_stages=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG")
7646
+ gated_stages=$(jq '[.stages[] | select(.enabled == true and .gate == "approve")] | length' "$PIPELINE_CONFIG")
7647
+
7648
+ # Build model (per-stage override or default)
7649
+ local default_model stage_model
7650
+ default_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")
7651
+ stage_model="$MODEL"
7652
+ [[ -z "$stage_model" ]] && stage_model="$default_model"
7653
+
7654
+ echo -e " ${BOLD}Pipeline:${RESET} $pipeline_name"
7655
+ echo -e " ${BOLD}Stages:${RESET} $enabled_stages enabled of $stages_count total"
7656
+ if [[ "$SKIP_GATES" == "true" ]]; then
7657
+ echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
7658
+ else
7659
+ echo -e " ${BOLD}Gates:${RESET} $gated_stages approval gate(s)"
7660
+ fi
7661
+ echo -e " ${BOLD}Model:${RESET} $stage_model"
7662
+ echo ""
7663
+
7664
+ # Table header
7665
+ echo -e "${CYAN}${BOLD}Stage Enabled Gate Model${RESET}"
7666
+ echo -e "${CYAN}────────────────────────────────────────${RESET}"
7667
+
7668
+ # List all stages
7669
+ while IFS= read -r stage_json; do
7670
+ local stage_id stage_enabled stage_gate stage_config_model stage_model_display
7671
+ stage_id=$(echo "$stage_json" | jq -r '.id')
7672
+ stage_enabled=$(echo "$stage_json" | jq -r '.enabled')
7673
+ stage_gate=$(echo "$stage_json" | jq -r '.gate')
7674
+
7675
+ # Determine stage model (config override or default)
7676
+ stage_config_model=$(echo "$stage_json" | jq -r '.config.model // ""')
7677
+ if [[ -n "$stage_config_model" && "$stage_config_model" != "null" ]]; then
7678
+ stage_model_display="$stage_config_model"
7679
+ else
7680
+ stage_model_display="$default_model"
7681
+ fi
7682
+
7683
+ # Format enabled
7684
+ local enabled_str
7685
+ if [[ "$stage_enabled" == "true" ]]; then
7686
+ enabled_str="${GREEN}yes${RESET}"
7687
+ else
7688
+ enabled_str="${DIM}no${RESET}"
7689
+ fi
7690
+
7691
+ # Format gate
7692
+ local gate_str
7693
+ if [[ "$stage_enabled" == "true" ]]; then
7694
+ if [[ "$stage_gate" == "approve" ]]; then
7695
+ gate_str="${YELLOW}approve${RESET}"
7696
+ else
7697
+ gate_str="${GREEN}auto${RESET}"
7698
+ fi
7699
+ else
7700
+ gate_str="${DIM}—${RESET}"
7701
+ fi
7702
+
7703
+ printf "%-15s %s %s %s\n" "$stage_id" "$enabled_str" "$gate_str" "$stage_model_display"
7704
+ done < <(jq -c '.stages[]' "$PIPELINE_CONFIG")
7705
+
7706
+ echo ""
7707
+
7708
+ # Validate required tools
7709
+ echo -e "${BLUE}${BOLD}━━━ Tool Validation ━━━${RESET}"
7710
+ echo ""
7711
+
7712
+ local tool_errors=0
7713
+ local required_tools=("git" "jq")
7714
+ local optional_tools=("gh" "claude" "bc")
7715
+
7716
+ for tool in "${required_tools[@]}"; do
7717
+ if command -v "$tool" &>/dev/null; then
7718
+ echo -e " ${GREEN}✓${RESET} $tool"
7719
+ else
7720
+ echo -e " ${RED}✗${RESET} $tool ${RED}(required)${RESET}"
7721
+ tool_errors=$((tool_errors + 1))
7722
+ fi
7723
+ done
7724
+
7725
+ for tool in "${optional_tools[@]}"; do
7726
+ if command -v "$tool" &>/dev/null; then
7727
+ echo -e " ${GREEN}✓${RESET} $tool"
7728
+ else
7729
+ echo -e " ${DIM}○${RESET} $tool"
7730
+ fi
7731
+ done
7732
+
7733
+ echo ""
7734
+
7735
+ # Cost estimation (rough approximation)
7736
+ echo -e "${BLUE}${BOLD}━━━ Estimated Resource Usage ━━━${RESET}"
7737
+ echo ""
7738
+
7739
+ # Very rough cost estimation: ~2000 input tokens per stage, ~3000 output tokens
7740
+ # Adjust based on pipeline complexity
7741
+ local input_tokens_estimate output_tokens_estimate
7742
+ input_tokens_estimate=$(( enabled_stages * 2000 ))
7743
+ output_tokens_estimate=$(( enabled_stages * 3000 ))
7744
+
7745
+ # Calculate cost based on selected model
7746
+ local input_rate output_rate input_cost output_cost total_cost
7747
+ input_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.input // 3" 2>/dev/null || echo "3")
7748
+ output_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.output // 15" 2>/dev/null || echo "15")
7749
+
7750
+ # Cost calculation: tokens per million * rate
7751
+ input_cost=$(awk -v tokens="$input_tokens_estimate" -v rate="$input_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
7752
+ output_cost=$(awk -v tokens="$output_tokens_estimate" -v rate="$output_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
7753
+ total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
7754
+
7755
+ echo -e " ${BOLD}Estimated Input Tokens:${RESET} ~$input_tokens_estimate"
7756
+ echo -e " ${BOLD}Estimated Output Tokens:${RESET} ~$output_tokens_estimate"
7757
+ echo -e " ${BOLD}Model Cost Rate:${RESET} $stage_model"
7758
+ echo -e " ${BOLD}Estimated Cost:${RESET} \$$total_cost USD (rough estimate)"
7759
+ echo ""
7760
+
7761
+ # Validate composed pipeline if intelligence is enabled
7762
+ if [[ -f "$ARTIFACTS_DIR/composed-pipeline.json" ]] && type composer_validate_pipeline &>/dev/null; then
7763
+ echo -e "${BLUE}${BOLD}━━━ Intelligence-Composed Pipeline ━━━${RESET}"
7764
+ echo ""
7765
+
7766
+ if composer_validate_pipeline "$(cat "$ARTIFACTS_DIR/composed-pipeline.json" 2>/dev/null || echo "")" 2>/dev/null; then
7767
+ echo -e " ${GREEN}✓${RESET} Composed pipeline is valid"
7768
+ else
7769
+ echo -e " ${YELLOW}⚠${RESET} Composed pipeline validation failed (will use template defaults)"
7770
+ fi
7771
+ echo ""
7772
+ fi
7773
+
7774
+ # Final validation result
7775
+ if [[ "$tool_errors" -gt 0 ]]; then
7776
+ error "Dry run validation failed: $tool_errors required tool(s) missing"
7777
+ return 1
7778
+ fi
7779
+
7780
+ success "Dry run validation passed"
7781
+ echo ""
7782
+ echo -e " To execute this pipeline: ${DIM}remove --dry-run flag${RESET}"
7783
+ echo ""
7784
+ return 0
7785
+ }
7786
+
5520
7787
  # ─── Subcommands ────────────────────────────────────────────────────────────
5521
7788
 
5522
7789
  pipeline_start() {
@@ -5634,8 +7901,8 @@ pipeline_start() {
5634
7901
  echo ""
5635
7902
 
5636
7903
  if [[ "$DRY_RUN" == "true" ]]; then
5637
- info "Dry run — no stages will execute"
5638
- return 0
7904
+ run_dry_run
7905
+ return $?
5639
7906
  fi
5640
7907
 
5641
7908
  # Start background heartbeat writer
@@ -5744,6 +8011,10 @@ pipeline_start() {
5744
8011
  optimize_analyze_outcome "$STATE_FILE" 2>/dev/null || true
5745
8012
  fi
5746
8013
 
8014
+ if type memory_finalize_pipeline &>/dev/null 2>&1; then
8015
+ memory_finalize_pipeline "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
8016
+ fi
8017
+
5747
8018
  # Emit cost event
5748
8019
  local model_key="${MODEL:-sonnet}"
5749
8020
  local input_cost output_cost total_cost