shipwright-cli 1.10.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. package/README.md +221 -55
  2. package/completions/_shipwright +264 -32
  3. package/completions/shipwright.bash +118 -26
  4. package/completions/shipwright.fish +80 -2
  5. package/dashboard/server.ts +208 -0
  6. package/docs/strategy/01-market-research.md +619 -0
  7. package/docs/strategy/02-mission-and-brand.md +587 -0
  8. package/docs/strategy/03-gtm-and-roadmap.md +759 -0
  9. package/docs/strategy/QUICK-START.txt +289 -0
  10. package/docs/strategy/README.md +172 -0
  11. package/docs/tmux-research/TMUX-ARCHITECTURE.md +567 -0
  12. package/docs/tmux-research/TMUX-AUDIT.md +925 -0
  13. package/docs/tmux-research/TMUX-BEST-PRACTICES-2025-2026.md +829 -0
  14. package/docs/tmux-research/TMUX-QUICK-REFERENCE.md +543 -0
  15. package/docs/tmux-research/TMUX-RESEARCH-INDEX.md +438 -0
  16. package/package.json +4 -2
  17. package/scripts/lib/helpers.sh +7 -0
  18. package/scripts/sw +323 -2
  19. package/scripts/sw-activity.sh +500 -0
  20. package/scripts/sw-adaptive.sh +925 -0
  21. package/scripts/sw-adversarial.sh +1 -1
  22. package/scripts/sw-architecture-enforcer.sh +1 -1
  23. package/scripts/sw-auth.sh +613 -0
  24. package/scripts/sw-autonomous.sh +754 -0
  25. package/scripts/sw-changelog.sh +704 -0
  26. package/scripts/sw-checkpoint.sh +1 -1
  27. package/scripts/sw-ci.sh +602 -0
  28. package/scripts/sw-cleanup.sh +1 -1
  29. package/scripts/sw-code-review.sh +698 -0
  30. package/scripts/sw-connect.sh +1 -1
  31. package/scripts/sw-context.sh +605 -0
  32. package/scripts/sw-cost.sh +44 -3
  33. package/scripts/sw-daemon.sh +568 -138
  34. package/scripts/sw-dashboard.sh +1 -1
  35. package/scripts/sw-db.sh +1380 -0
  36. package/scripts/sw-decompose.sh +539 -0
  37. package/scripts/sw-deps.sh +551 -0
  38. package/scripts/sw-developer-simulation.sh +1 -1
  39. package/scripts/sw-discovery.sh +412 -0
  40. package/scripts/sw-docs-agent.sh +539 -0
  41. package/scripts/sw-docs.sh +1 -1
  42. package/scripts/sw-doctor.sh +107 -1
  43. package/scripts/sw-dora.sh +615 -0
  44. package/scripts/sw-durable.sh +710 -0
  45. package/scripts/sw-e2e-orchestrator.sh +535 -0
  46. package/scripts/sw-eventbus.sh +393 -0
  47. package/scripts/sw-feedback.sh +479 -0
  48. package/scripts/sw-fix.sh +1 -1
  49. package/scripts/sw-fleet-discover.sh +567 -0
  50. package/scripts/sw-fleet-viz.sh +404 -0
  51. package/scripts/sw-fleet.sh +8 -1
  52. package/scripts/sw-github-app.sh +596 -0
  53. package/scripts/sw-github-checks.sh +4 -4
  54. package/scripts/sw-github-deploy.sh +1 -1
  55. package/scripts/sw-github-graphql.sh +1 -1
  56. package/scripts/sw-guild.sh +569 -0
  57. package/scripts/sw-heartbeat.sh +1 -1
  58. package/scripts/sw-hygiene.sh +559 -0
  59. package/scripts/sw-incident.sh +656 -0
  60. package/scripts/sw-init.sh +237 -24
  61. package/scripts/sw-instrument.sh +699 -0
  62. package/scripts/sw-intelligence.sh +1 -1
  63. package/scripts/sw-jira.sh +1 -1
  64. package/scripts/sw-launchd.sh +363 -28
  65. package/scripts/sw-linear.sh +1 -1
  66. package/scripts/sw-logs.sh +1 -1
  67. package/scripts/sw-loop.sh +267 -21
  68. package/scripts/sw-memory.sh +18 -1
  69. package/scripts/sw-mission-control.sh +487 -0
  70. package/scripts/sw-model-router.sh +545 -0
  71. package/scripts/sw-otel.sh +596 -0
  72. package/scripts/sw-oversight.sh +764 -0
  73. package/scripts/sw-pipeline-composer.sh +1 -1
  74. package/scripts/sw-pipeline-vitals.sh +1 -1
  75. package/scripts/sw-pipeline.sh +947 -35
  76. package/scripts/sw-pm.sh +758 -0
  77. package/scripts/sw-pr-lifecycle.sh +522 -0
  78. package/scripts/sw-predictive.sh +8 -1
  79. package/scripts/sw-prep.sh +1 -1
  80. package/scripts/sw-ps.sh +1 -1
  81. package/scripts/sw-public-dashboard.sh +798 -0
  82. package/scripts/sw-quality.sh +595 -0
  83. package/scripts/sw-reaper.sh +1 -1
  84. package/scripts/sw-recruit.sh +2248 -0
  85. package/scripts/sw-regression.sh +642 -0
  86. package/scripts/sw-release-manager.sh +736 -0
  87. package/scripts/sw-release.sh +706 -0
  88. package/scripts/sw-remote.sh +1 -1
  89. package/scripts/sw-replay.sh +520 -0
  90. package/scripts/sw-retro.sh +691 -0
  91. package/scripts/sw-scale.sh +444 -0
  92. package/scripts/sw-security-audit.sh +505 -0
  93. package/scripts/sw-self-optimize.sh +1 -1
  94. package/scripts/sw-session.sh +1 -1
  95. package/scripts/sw-setup.sh +263 -127
  96. package/scripts/sw-standup.sh +712 -0
  97. package/scripts/sw-status.sh +44 -2
  98. package/scripts/sw-strategic.sh +806 -0
  99. package/scripts/sw-stream.sh +450 -0
  100. package/scripts/sw-swarm.sh +620 -0
  101. package/scripts/sw-team-stages.sh +511 -0
  102. package/scripts/sw-templates.sh +4 -4
  103. package/scripts/sw-testgen.sh +566 -0
  104. package/scripts/sw-tmux-pipeline.sh +554 -0
  105. package/scripts/sw-tmux-role-color.sh +58 -0
  106. package/scripts/sw-tmux-status.sh +128 -0
  107. package/scripts/sw-tmux.sh +1 -1
  108. package/scripts/sw-trace.sh +485 -0
  109. package/scripts/sw-tracker-github.sh +188 -0
  110. package/scripts/sw-tracker-jira.sh +172 -0
  111. package/scripts/sw-tracker-linear.sh +251 -0
  112. package/scripts/sw-tracker.sh +117 -2
  113. package/scripts/sw-triage.sh +627 -0
  114. package/scripts/sw-upgrade.sh +1 -1
  115. package/scripts/sw-ux.sh +677 -0
  116. package/scripts/sw-webhook.sh +627 -0
  117. package/scripts/sw-widgets.sh +530 -0
  118. package/scripts/sw-worktree.sh +1 -1
  119. package/templates/pipelines/autonomous.json +2 -2
  120. package/tmux/shipwright-overlay.conf +35 -17
  121. package/tmux/tmux.conf +23 -21
@@ -6,7 +6,12 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="1.10.0"
9
+ # Allow spawning Claude CLI from within a Claude Code session (daemon, fleet, etc.)
10
+ unset CLAUDECODE 2>/dev/null || true
11
+ # Ignore SIGHUP so tmux attach/detach doesn't kill long-running plan/design/review stages
12
+ trap '' HUP
13
+
14
+ VERSION="2.1.0"
10
15
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
16
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
17
 
@@ -197,6 +202,7 @@ PIPELINE_CONFIG=""
197
202
  TEST_CMD=""
198
203
  MODEL=""
199
204
  AGENTS=""
205
+ PIPELINE_AGENT_ID="${PIPELINE_AGENT_ID:-pipeline-$$}"
200
206
  SKIP_GATES=false
201
207
  GIT_BRANCH=""
202
208
  GITHUB_ISSUE=""
@@ -218,6 +224,7 @@ AUTO_WORKTREE=false
218
224
  WORKTREE_NAME=""
219
225
  CLEANUP_WORKTREE=false
220
226
  ORIGINAL_REPO_DIR=""
227
+ REPO_OVERRIDE=""
221
228
  _cleanup_done=""
222
229
 
223
230
  # GitHub metadata (populated during intake)
@@ -259,6 +266,8 @@ show_help() {
259
266
  echo -e "${BOLD}START OPTIONS${RESET}"
260
267
  echo -e " ${DIM}--goal \"description\"${RESET} What to build (required unless --issue)"
261
268
  echo -e " ${DIM}--issue <number>${RESET} Fetch goal from GitHub issue"
269
+ echo -e " ${DIM}--repo <path>${RESET} Change to directory before running (must be a git repo)"
270
+ echo -e " ${DIM}--local${RESET} Alias for --no-github --no-github-label (local-only mode)"
262
271
  echo -e " ${DIM}--pipeline <name>${RESET} Pipeline template (default: standard)"
263
272
  echo -e " ${DIM}--test-cmd \"command\"${RESET} Override test command (auto-detected if omitted)"
264
273
  echo -e " ${DIM}--model <model>${RESET} Override AI model (opus, sonnet, haiku)"
@@ -341,6 +350,8 @@ parse_args() {
341
350
  case "$1" in
342
351
  --goal) GOAL="$2"; shift 2 ;;
343
352
  --issue) ISSUE_NUMBER="$2"; shift 2 ;;
353
+ --repo) REPO_OVERRIDE="$2"; shift 2 ;;
354
+ --local) NO_GITHUB=true; NO_GITHUB_LABEL=true; shift ;;
344
355
  --pipeline|--template) PIPELINE_NAME="$2"; shift 2 ;;
345
356
  --test-cmd) TEST_CMD="$2"; shift 2 ;;
346
357
  --model) MODEL="$2"; shift 2 ;;
@@ -391,6 +402,7 @@ setup_dirs() {
391
402
  ARTIFACTS_DIR="$STATE_DIR/pipeline-artifacts"
392
403
  TASKS_FILE="$STATE_DIR/pipeline-tasks.md"
393
404
  mkdir -p "$STATE_DIR" "$ARTIFACTS_DIR"
405
+ export SHIPWRIGHT_PIPELINE_ID="pipeline-$$-${ISSUE_NUMBER:-0}"
394
406
  }
395
407
 
396
408
  # ─── Pipeline Config Loading ───────────────────────────────────────────────
@@ -1150,6 +1162,21 @@ get_stage_timing() {
1150
1162
  fi
1151
1163
  }
1152
1164
 
1165
+ # Raw seconds for a stage (for memory baseline updates)
1166
+ get_stage_timing_seconds() {
1167
+ local stage_id="$1"
1168
+ local start_e end_e
1169
+ start_e=$(echo "$STAGE_TIMINGS" | grep "^${stage_id}_start:" | cut -d: -f2 | tail -1 || true)
1170
+ end_e=$(echo "$STAGE_TIMINGS" | grep "^${stage_id}_end:" | cut -d: -f2 | tail -1 || true)
1171
+ if [[ -n "$start_e" && -n "$end_e" ]]; then
1172
+ echo $(( end_e - start_e ))
1173
+ elif [[ -n "$start_e" ]]; then
1174
+ echo $(( $(now_epoch) - start_e ))
1175
+ else
1176
+ echo "0"
1177
+ fi
1178
+ }
1179
+
1153
1180
  get_stage_description() {
1154
1181
  local stage_id="$1"
1155
1182
 
@@ -1245,6 +1272,22 @@ mark_stage_complete() {
1245
1272
  log_stage "$stage_id" "complete (${timing})"
1246
1273
  write_state
1247
1274
 
1275
+ record_stage_effectiveness "$stage_id" "complete"
1276
+ # Update memory baselines and predictive baselines for stage durations
1277
+ if [[ "$stage_id" == "test" || "$stage_id" == "build" ]]; then
1278
+ local secs
1279
+ secs=$(get_stage_timing_seconds "$stage_id")
1280
+ if [[ -n "$secs" && "$secs" != "0" ]]; then
1281
+ [[ -x "$SCRIPT_DIR/sw-memory.sh" ]] && bash "$SCRIPT_DIR/sw-memory.sh" metric "${stage_id}_duration_s" "$secs" 2>/dev/null || true
1282
+ if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
1283
+ local anomaly_sev
1284
+ anomaly_sev=$(bash "$SCRIPT_DIR/sw-predictive.sh" anomaly "$stage_id" "duration_s" "$secs" 2>/dev/null || echo "normal")
1285
+ [[ "$anomaly_sev" == "critical" || "$anomaly_sev" == "warning" ]] && emit_event "pipeline.anomaly" "stage=$stage_id" "metric=duration_s" "value=$secs" "severity=$anomaly_sev" 2>/dev/null || true
1286
+ bash "$SCRIPT_DIR/sw-predictive.sh" baseline "$stage_id" "duration_s" "$secs" 2>/dev/null || true
1287
+ fi
1288
+ fi
1289
+ fi
1290
+
1248
1291
  # Update GitHub progress comment
1249
1292
  if [[ -n "$ISSUE_NUMBER" ]]; then
1250
1293
  local body
@@ -1265,11 +1308,116 @@ mark_stage_complete() {
1265
1308
  if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update &>/dev/null 2>&1; then
1266
1309
  gh_checks_stage_update "$stage_id" "completed" "success" "Stage $stage_id: ${timing}" 2>/dev/null || true
1267
1310
  fi
1311
+
1312
+ # Persist artifacts to feature branch after expensive stages
1313
+ case "$stage_id" in
1314
+ plan) persist_artifacts "plan" "plan.md" "dod.md" "context-bundle.md" ;;
1315
+ design) persist_artifacts "design" "design.md" ;;
1316
+ esac
1317
+ }
1318
+
1319
+ persist_artifacts() {
1320
+ # Commit and push pipeline artifacts to the feature branch mid-pipeline.
1321
+ # Only runs in CI — local runs skip. Non-fatal: logs failure but never crashes.
1322
+ [[ "${CI_MODE:-false}" != "true" ]] && return 0
1323
+ [[ -z "${ISSUE_NUMBER:-}" ]] && return 0
1324
+ [[ -z "${ARTIFACTS_DIR:-}" ]] && return 0
1325
+
1326
+ local stage="${1:-unknown}"
1327
+ shift
1328
+ local files=("$@")
1329
+
1330
+ # Collect files that actually exist
1331
+ local to_add=()
1332
+ for f in "${files[@]}"; do
1333
+ local path="${ARTIFACTS_DIR}/${f}"
1334
+ if [[ -f "$path" && -s "$path" ]]; then
1335
+ to_add+=("$path")
1336
+ fi
1337
+ done
1338
+
1339
+ if [[ ${#to_add[@]} -eq 0 ]]; then
1340
+ warn "persist_artifacts($stage): no artifact files found — skipping"
1341
+ return 0
1342
+ fi
1343
+
1344
+ info "Persisting ${#to_add[@]} artifact(s) after stage ${stage}..."
1345
+
1346
+ (
1347
+ git add "${to_add[@]}" 2>/dev/null || true
1348
+ if ! git diff --cached --quiet 2>/dev/null; then
1349
+ git commit -m "chore: persist ${stage} artifacts for #${ISSUE_NUMBER} [skip ci]" --no-verify 2>/dev/null || true
1350
+ local branch="shipwright/issue-${ISSUE_NUMBER}"
1351
+ git push origin "HEAD:refs/heads/$branch" --force 2>/dev/null || true
1352
+ emit_event "artifacts.persisted" "issue=${ISSUE_NUMBER}" "stage=$stage" "file_count=${#to_add[@]}"
1353
+ fi
1354
+ ) 2>/dev/null || {
1355
+ warn "persist_artifacts($stage): push failed — non-fatal, continuing"
1356
+ emit_event "artifacts.persist_failed" "issue=${ISSUE_NUMBER}" "stage=$stage"
1357
+ }
1358
+
1359
+ return 0
1360
+ }
1361
+
1362
+ verify_stage_artifacts() {
1363
+ # Check that required artifacts exist and are non-empty for a given stage.
1364
+ # Returns 0 if all artifacts are present, 1 if any are missing.
1365
+ local stage_id="$1"
1366
+ [[ -z "${ARTIFACTS_DIR:-}" ]] && return 0
1367
+
1368
+ local required=()
1369
+ case "$stage_id" in
1370
+ plan) required=("plan.md") ;;
1371
+ design) required=("design.md" "plan.md") ;;
1372
+ *) return 0 ;; # No artifact check needed
1373
+ esac
1374
+
1375
+ local missing=0
1376
+ for f in "${required[@]}"; do
1377
+ local path="${ARTIFACTS_DIR}/${f}"
1378
+ if [[ ! -f "$path" || ! -s "$path" ]]; then
1379
+ warn "verify_stage_artifacts($stage_id): missing or empty: $f"
1380
+ missing=1
1381
+ fi
1382
+ done
1383
+
1384
+ return "$missing"
1385
+ }
1386
+
1387
+ # Self-aware pipeline: record stage effectiveness for meta-cognition
1388
+ STAGE_EFFECTIVENESS_FILE="${HOME}/.shipwright/stage-effectiveness.jsonl"
1389
+ record_stage_effectiveness() {
1390
+ local stage_id="$1" outcome="${2:-failed}"
1391
+ mkdir -p "${HOME}/.shipwright"
1392
+ echo "{\"stage\":\"$stage_id\",\"outcome\":\"$outcome\",\"ts\":\"$(now_iso)\"}" >> "${STAGE_EFFECTIVENESS_FILE}"
1393
+ # Keep last 100 entries
1394
+ tail -100 "${STAGE_EFFECTIVENESS_FILE}" > "${STAGE_EFFECTIVENESS_FILE}.tmp" 2>/dev/null && mv "${STAGE_EFFECTIVENESS_FILE}.tmp" "${STAGE_EFFECTIVENESS_FILE}" 2>/dev/null || true
1395
+ }
1396
+ get_stage_self_awareness_hint() {
1397
+ local stage_id="$1"
1398
+ [[ ! -f "$STAGE_EFFECTIVENESS_FILE" ]] && return 0
1399
+ local recent
1400
+ recent=$(grep "\"stage\":\"$stage_id\"" "$STAGE_EFFECTIVENESS_FILE" 2>/dev/null | tail -10 || true)
1401
+ [[ -z "$recent" ]] && return 0
1402
+ local failures=0 total=0
1403
+ while IFS= read -r line; do
1404
+ [[ -z "$line" ]] && continue
1405
+ total=$((total + 1))
1406
+ echo "$line" | grep -q '"outcome":"failed"' && failures=$((failures + 1)) || true
1407
+ done <<< "$recent"
1408
+ if [[ "$total" -ge 3 ]] && [[ $((failures * 100 / total)) -ge 50 ]]; then
1409
+ case "$stage_id" in
1410
+ plan) echo "Recent plan stage failures: consider adding more context or breaking the goal into smaller steps." ;;
1411
+ build) echo "Recent build stage failures: consider adding test expectations or simplifying the change." ;;
1412
+ *) echo "Recent $stage_id failures: review past logs and adjust approach." ;;
1413
+ esac
1414
+ fi
1268
1415
  }
1269
1416
 
1270
1417
  mark_stage_failed() {
1271
1418
  local stage_id="$1"
1272
1419
  record_stage_end "$stage_id"
1420
+ record_stage_effectiveness "$stage_id" "failed"
1273
1421
  set_stage_status "$stage_id" "failed"
1274
1422
  local timing
1275
1423
  timing=$(get_stage_timing "$stage_id")
@@ -1649,6 +1797,12 @@ stage_plan() {
1649
1797
 
1650
1798
  info "Generating implementation plan..."
1651
1799
 
1800
+ # ── Gather context bundle (if context engine available) ──
1801
+ local context_script="${SCRIPT_DIR}/sw-context.sh"
1802
+ if [[ -x "$context_script" ]]; then
1803
+ "$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
1804
+ fi
1805
+
1652
1806
  # Build rich prompt with all available context
1653
1807
  local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
1654
1808
 
@@ -1664,6 +1818,19 @@ ${ISSUE_BODY}
1664
1818
  "
1665
1819
  fi
1666
1820
 
1821
+ # Inject context bundle from context engine (if available)
1822
+ local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
1823
+ if [[ -f "$_context_bundle" ]]; then
1824
+ local _cb_content
1825
+ _cb_content=$(cat "$_context_bundle" 2>/dev/null | head -100 || true)
1826
+ if [[ -n "$_cb_content" ]]; then
1827
+ plan_prompt="${plan_prompt}
1828
+ ## Pipeline Context
1829
+ ${_cb_content}
1830
+ "
1831
+ fi
1832
+ fi
1833
+
1667
1834
  # Inject intelligence memory context for similar past plans
1668
1835
  if type intelligence_search_memory &>/dev/null 2>&1; then
1669
1836
  local plan_memory
@@ -1681,6 +1848,28 @@ ${memory_summary}
1681
1848
  fi
1682
1849
  fi
1683
1850
 
1851
+ # Self-aware pipeline: inject hint when plan stage has been failing recently
1852
+ local plan_hint
1853
+ plan_hint=$(get_stage_self_awareness_hint "plan" 2>/dev/null || true)
1854
+ if [[ -n "$plan_hint" ]]; then
1855
+ plan_prompt="${plan_prompt}
1856
+ ## Self-Assessment (recent plan stage performance)
1857
+ ${plan_hint}
1858
+ "
1859
+ fi
1860
+
1861
+ # Inject cross-pipeline discoveries (from other concurrent/similar pipelines)
1862
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
1863
+ local plan_discoveries
1864
+ plan_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.json" 2>/dev/null | head -20 || true)
1865
+ if [[ -n "$plan_discoveries" ]]; then
1866
+ plan_prompt="${plan_prompt}
1867
+ ## Discoveries from Other Pipelines
1868
+ ${plan_discoveries}
1869
+ "
1870
+ fi
1871
+ fi
1872
+
1684
1873
  # Inject architecture patterns from intelligence layer
1685
1874
  local repo_hash_plan
1686
1875
  repo_hash_plan=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
@@ -1766,12 +1955,24 @@ Checklist of completion criteria.
1766
1955
  parse_claude_tokens "$_token_log"
1767
1956
 
1768
1957
  if [[ ! -s "$plan_file" ]]; then
1769
- error "Plan generation failed"
1958
+ error "Plan generation failed — empty output"
1959
+ return 1
1960
+ fi
1961
+
1962
+ # Validate plan content — detect API/CLI errors masquerading as plans
1963
+ local _plan_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
1964
+ _plan_fatal="${_plan_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
1965
+ if grep -qiE "$_plan_fatal" "$plan_file" 2>/dev/null; then
1966
+ error "Plan stage produced API/CLI error instead of a plan: $(head -1 "$plan_file" | cut -c1-100)"
1770
1967
  return 1
1771
1968
  fi
1772
1969
 
1773
1970
  local line_count
1774
1971
  line_count=$(wc -l < "$plan_file" | xargs)
1972
+ if [[ "$line_count" -lt 3 ]]; then
1973
+ error "Plan too short (${line_count} lines) — likely an error, not a real plan"
1974
+ return 1
1975
+ fi
1775
1976
  info "Plan saved: ${DIM}$plan_file${RESET} (${line_count} lines)"
1776
1977
 
1777
1978
  # Extract task checklist for GitHub issue and task tracking
@@ -2041,6 +2242,12 @@ stage_design() {
2041
2242
  memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "design" 2>/dev/null) || true
2042
2243
  fi
2043
2244
 
2245
+ # Inject cross-pipeline discoveries for design stage
2246
+ local design_discoveries=""
2247
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
2248
+ design_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "*.md,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
2249
+ fi
2250
+
2044
2251
  # Inject architecture model patterns if available
2045
2252
  local arch_context=""
2046
2253
  local repo_hash
@@ -2100,7 +2307,10 @@ ${memory_context}
2100
2307
  }${arch_context:+
2101
2308
  ## Architecture Model (from previous designs)
2102
2309
  ${arch_context}
2103
- }${design_antipatterns}
2310
+ }${design_antipatterns}${design_discoveries:+
2311
+ ## Discoveries from Other Pipelines
2312
+ ${design_discoveries}
2313
+ }
2104
2314
  ## Required Output — Architecture Decision Record
2105
2315
 
2106
2316
  Produce this EXACT format:
@@ -2144,12 +2354,24 @@ Be concrete and specific. Reference actual file paths in the codebase. Consider
2144
2354
  parse_claude_tokens "$_token_log"
2145
2355
 
2146
2356
  if [[ ! -s "$design_file" ]]; then
2147
- error "Design generation failed"
2357
+ error "Design generation failed — empty output"
2358
+ return 1
2359
+ fi
2360
+
2361
+ # Validate design content — detect API/CLI errors masquerading as designs
2362
+ local _design_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
2363
+ _design_fatal="${_design_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
2364
+ if grep -qiE "$_design_fatal" "$design_file" 2>/dev/null; then
2365
+ error "Design stage produced API/CLI error instead of a design: $(head -1 "$design_file" | cut -c1-100)"
2148
2366
  return 1
2149
2367
  fi
2150
2368
 
2151
2369
  local line_count
2152
2370
  line_count=$(wc -l < "$design_file" | xargs)
2371
+ if [[ "$line_count" -lt 3 ]]; then
2372
+ error "Design too short (${line_count} lines) — likely an error, not a real design"
2373
+ return 1
2374
+ fi
2153
2375
  info "Design saved: ${DIM}$design_file${RESET} (${line_count} lines)"
2154
2376
 
2155
2377
  # Extract file lists for build stage awareness
@@ -2212,6 +2434,18 @@ Historical context (lessons from previous pipelines):
2212
2434
  ${memory_context}"
2213
2435
  fi
2214
2436
 
2437
+ # Inject cross-pipeline discoveries for build stage
2438
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
2439
+ local build_discoveries
2440
+ build_discoveries=$("$SCRIPT_DIR/sw-discovery.sh" inject "src/*,*.ts,*.tsx,*.js" 2>/dev/null | head -20 || true)
2441
+ if [[ -n "$build_discoveries" ]]; then
2442
+ enriched_goal="${enriched_goal}
2443
+
2444
+ Discoveries from other pipelines:
2445
+ ${build_discoveries}"
2446
+ fi
2447
+ fi
2448
+
2215
2449
  # Add task list context
2216
2450
  if [[ -s "$TASKS_FILE" ]]; then
2217
2451
  enriched_goal="${enriched_goal}
@@ -2258,6 +2492,19 @@ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
2258
2492
  fi
2259
2493
  fi
2260
2494
 
2495
+ # Predictive: inject prevention hints when risk/memory patterns suggest build-stage failures
2496
+ if [[ -x "$SCRIPT_DIR/sw-predictive.sh" ]]; then
2497
+ local issue_json_build="{}"
2498
+ [[ -n "${ISSUE_NUMBER:-}" ]] && issue_json_build=$(jq -n --arg title "${GOAL:-}" --arg num "${ISSUE_NUMBER:-}" '{title: $title, number: $num}')
2499
+ local prevention_text
2500
+ prevention_text=$(bash "$SCRIPT_DIR/sw-predictive.sh" inject-prevention "build" "$issue_json_build" 2>/dev/null || true)
2501
+ if [[ -n "$prevention_text" ]]; then
2502
+ enriched_goal="${enriched_goal}
2503
+
2504
+ ${prevention_text}"
2505
+ fi
2506
+ fi
2507
+
2261
2508
  loop_args+=("$enriched_goal")
2262
2509
 
2263
2510
  # Build loop args from pipeline config + CLI overrides
@@ -2310,6 +2557,23 @@ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
2310
2557
  build_model="$CLAUDE_MODEL"
2311
2558
  fi
2312
2559
 
2560
+ # Recruit-powered model selection (when no explicit override)
2561
+ if [[ -z "$MODEL" ]] && [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
2562
+ local _recruit_goal="${GOAL:-}"
2563
+ if [[ -n "$_recruit_goal" ]]; then
2564
+ local _recruit_match
2565
+ _recruit_match=$(bash "$SCRIPT_DIR/sw-recruit.sh" match --json "$_recruit_goal" 2>/dev/null) || true
2566
+ if [[ -n "$_recruit_match" ]]; then
2567
+ local _recruit_model
2568
+ _recruit_model=$(echo "$_recruit_match" | jq -r '.model // ""' 2>/dev/null) || true
2569
+ if [[ -n "$_recruit_model" && "$_recruit_model" != "null" && "$_recruit_model" != "" ]]; then
2570
+ info "Recruit recommends model: ${CYAN}${_recruit_model}${RESET} for this task"
2571
+ build_model="$_recruit_model"
2572
+ fi
2573
+ fi
2574
+ fi
2575
+ fi
2576
+
2313
2577
  [[ -n "$test_cmd" && "$test_cmd" != "null" ]] && loop_args+=(--test-cmd "$test_cmd")
2314
2578
  loop_args+=(--max-iterations "$max_iter")
2315
2579
  loop_args+=(--model "$build_model")
@@ -2366,6 +2630,23 @@ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
2366
2630
  }
2367
2631
  parse_claude_tokens "$_token_log"
2368
2632
 
2633
+ # Read accumulated token counts from build loop (written by sw-loop.sh)
2634
+ local _loop_token_file="${PROJECT_ROOT}/.claude/loop-logs/loop-tokens.json"
2635
+ if [[ -f "$_loop_token_file" ]] && command -v jq &>/dev/null; then
2636
+ local _loop_in _loop_out _loop_cost
2637
+ _loop_in=$(jq -r '.input_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
2638
+ _loop_out=$(jq -r '.output_tokens // 0' "$_loop_token_file" 2>/dev/null || echo "0")
2639
+ _loop_cost=$(jq -r '.cost_usd // 0' "$_loop_token_file" 2>/dev/null || echo "0")
2640
+ TOTAL_INPUT_TOKENS=$(( TOTAL_INPUT_TOKENS + ${_loop_in:-0} ))
2641
+ TOTAL_OUTPUT_TOKENS=$(( TOTAL_OUTPUT_TOKENS + ${_loop_out:-0} ))
2642
+ if [[ -n "$_loop_cost" && "$_loop_cost" != "0" && "$_loop_cost" != "null" ]]; then
2643
+ TOTAL_COST_USD="${_loop_cost}"
2644
+ fi
2645
+ if [[ "${_loop_in:-0}" -gt 0 || "${_loop_out:-0}" -gt 0 ]]; then
2646
+ info "Build loop tokens: in=${_loop_in} out=${_loop_out} cost=\$${_loop_cost:-0}"
2647
+ fi
2648
+ fi
2649
+
2369
2650
  # Count commits made during build
2370
2651
  local commit_count
2371
2652
  commit_count=$(git log --oneline "${BASE_BRANCH}..HEAD" 2>/dev/null | wc -l | xargs)
@@ -2473,7 +2754,7 @@ ${log_excerpt}
2473
2754
  # Post test results to GitHub
2474
2755
  if [[ -n "$ISSUE_NUMBER" ]]; then
2475
2756
  local test_summary
2476
- test_summary=$(tail -10 "$test_log")
2757
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
2477
2758
  local cov_line=""
2478
2759
  [[ -n "$coverage" ]] && cov_line="
2479
2760
  **Coverage:** ${coverage}%"
@@ -2670,6 +2951,22 @@ $(cat "$diff_file")"
2670
2951
  success "Review clean"
2671
2952
  fi
2672
2953
 
2954
+ # ── Oversight gate: pipeline review/quality stages block on verdict ──
2955
+ if [[ -x "$SCRIPT_DIR/sw-oversight.sh" ]] && [[ "${SKIP_GATES:-false}" != "true" ]]; then
2956
+ local reject_reason=""
2957
+ local _sec_count
2958
+ _sec_count=$(grep -ciE '\*\*\[?Security\]?\*\*' "$review_file" 2>/dev/null || true)
2959
+ _sec_count="${_sec_count:-0}"
2960
+ local _blocking=$((critical_count + _sec_count))
2961
+ [[ "$_blocking" -gt 0 ]] && reject_reason="Review found ${_blocking} critical/security issue(s)"
2962
+ if ! bash "$SCRIPT_DIR/sw-oversight.sh" gate --diff "$diff_file" --description "${GOAL:-Pipeline review}" --reject-if "$reject_reason" >/dev/null 2>&1; then
2963
+ error "Oversight gate rejected — blocking pipeline"
2964
+ emit_event "review.oversight_blocked" "issue=${ISSUE_NUMBER:-0}"
2965
+ log_stage "review" "BLOCKED: oversight gate rejected"
2966
+ return 1
2967
+ fi
2968
+ fi
2969
+
2673
2970
  # ── Review Blocking Gate ──
2674
2971
  # Block pipeline on critical/security issues unless compound_quality handles them
2675
2972
  local security_count
@@ -2879,6 +3176,19 @@ stage_pr() {
2879
3176
  fi
2880
3177
  fi
2881
3178
 
3179
+ # Pre-PR diff gate — verify meaningful code changes exist (not just bookkeeping)
3180
+ local real_changes
3181
+ real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
3182
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
3183
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
3184
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
3185
+ if [[ "${real_changes:-0}" -eq 0 ]]; then
3186
+ error "No meaningful code changes detected — only bookkeeping files modified"
3187
+ error "Refusing to create PR with zero real changes"
3188
+ return 1
3189
+ fi
3190
+ info "Pre-PR diff check: ${real_changes} real files changed"
3191
+
2882
3192
  # Build PR title — prefer GOAL over plan file first line
2883
3193
  # (plan file first line often contains Claude analysis text, not a clean title)
2884
3194
  local pr_title=""
@@ -2890,6 +3200,12 @@ stage_pr() {
2890
3200
  fi
2891
3201
  [[ -z "$pr_title" ]] && pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
2892
3202
 
3203
+ # Sanitize: reject PR titles that look like error messages
3204
+ if echo "$pr_title" | grep -qiE 'Invalid API|API key|authentication_error|rate_limit|CLI error|no useful output'; then
3205
+ warn "PR title looks like an error message: $pr_title"
3206
+ pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
3207
+ fi
3208
+
2893
3209
  # Build comprehensive PR body
2894
3210
  local plan_summary=""
2895
3211
  if [[ -s "$plan_file" ]]; then
@@ -2898,7 +3214,7 @@ stage_pr() {
2898
3214
 
2899
3215
  local test_summary=""
2900
3216
  if [[ -s "$test_log" ]]; then
2901
- test_summary=$(tail -10 "$test_log")
3217
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
2902
3218
  fi
2903
3219
 
2904
3220
  local review_summary=""
@@ -3630,6 +3946,8 @@ stage_monitor() {
3630
3946
  fi
3631
3947
 
3632
3948
  local report_file="$ARTIFACTS_DIR/monitor-report.md"
3949
+ local deploy_log_file="$ARTIFACTS_DIR/deploy-logs.txt"
3950
+ : > "$deploy_log_file"
3633
3951
  local total_errors=0
3634
3952
  local poll_interval=30 # seconds between polls
3635
3953
  local total_polls=$(( (duration_minutes * 60) / poll_interval ))
@@ -3677,10 +3995,11 @@ stage_monitor() {
3677
3995
  fi
3678
3996
  fi
3679
3997
 
3680
- # Log command check
3998
+ # Log command check (accumulate deploy logs for feedback collect)
3681
3999
  if [[ -n "$log_cmd" ]]; then
3682
4000
  local log_output
3683
4001
  log_output=$(bash -c "$log_cmd" 2>/dev/null || true)
4002
+ [[ -n "$log_output" ]] && echo "$log_output" >> "$deploy_log_file"
3684
4003
  local error_count=0
3685
4004
  if [[ -n "$log_output" ]]; then
3686
4005
  error_count=$(echo "$log_output" | grep -cE "$log_pattern" 2>/dev/null || true)
@@ -3715,13 +4034,24 @@ stage_monitor() {
3715
4034
  "total_errors=$total_errors" \
3716
4035
  "threshold=$error_threshold"
3717
4036
 
3718
- # Auto-rollback if configured
3719
- if [[ "$auto_rollback" == "true" && -n "$rollback_cmd" ]]; then
4037
+ # Feedback loop: collect deploy logs and optionally create issue
4038
+ if [[ -f "$deploy_log_file" ]] && [[ -s "$deploy_log_file" ]] && [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
4039
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" collect "$deploy_log_file" 2>/dev/null) || true
4040
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" create-issue 2>/dev/null) || true
4041
+ fi
4042
+
4043
+ # Auto-rollback: feedback rollback (GitHub Deployments API) and/or config rollback_cmd
4044
+ if [[ "$auto_rollback" == "true" ]]; then
3720
4045
  warn "Auto-rolling back..."
3721
4046
  echo "" >> "$report_file"
3722
4047
  echo "## Rollback" >> "$report_file"
3723
4048
 
3724
- if bash -c "$rollback_cmd" >> "$report_file" 2>&1; then
4049
+ # Trigger feedback rollback (calls sw-github-deploy.sh rollback)
4050
+ if [[ -x "$SCRIPT_DIR/sw-feedback.sh" ]]; then
4051
+ (cd "$PROJECT_ROOT" && ARTIFACTS_DIR="$ARTIFACTS_DIR" bash "$SCRIPT_DIR/sw-feedback.sh" rollback production "Monitor threshold exceeded (${total_errors} errors)" >> "$report_file" 2>&1) || true
4052
+ fi
4053
+
4054
+ if [[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" >> "$report_file" 2>&1; then
3725
4055
  success "Rollback executed"
3726
4056
  echo "Rollback: ✅ success" >> "$report_file"
3727
4057
 
@@ -5754,9 +6084,242 @@ ${route_instruction}"
5754
6084
  fi
5755
6085
  }
5756
6086
 
6087
+ # ──────────────────────────────────────────────────────────────────────────────
6088
+ # Bash 3.2 Compatibility Check
6089
+ # Scans modified .sh files for common bash 3.2 incompatibilities
6090
+ # Returns: count of violations found
6091
+ # ──────────────────────────────────────────────────────────────────────────────
6092
+ run_bash_compat_check() {
6093
+ local violations=0
6094
+ local violation_details=""
6095
+
6096
+ # Get modified .sh files relative to base branch
6097
+ local changed_files
6098
+ changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" -- '*.sh' 2>/dev/null || echo "")
6099
+
6100
+ if [[ -z "$changed_files" ]]; then
6101
+ echo "0"
6102
+ return 0
6103
+ fi
6104
+
6105
+ # Check each file for bash 3.2 incompatibilities
6106
+ while IFS= read -r filepath; do
6107
+ [[ -z "$filepath" ]] && continue
6108
+
6109
+ # declare -A (associative arrays)
6110
+ local declare_a_count
6111
+ declare_a_count=$(grep -c 'declare[[:space:]]*-[aA]' "$filepath" 2>/dev/null || true)
6112
+ if [[ "$declare_a_count" -gt 0 ]]; then
6113
+ violations=$((violations + declare_a_count))
6114
+ violation_details="${violation_details}${filepath}: declare -A (${declare_a_count} occurrences)
6115
+ "
6116
+ fi
6117
+
6118
+ # readarray or mapfile
6119
+ local readarray_count
6120
+ readarray_count=$(grep -c 'readarray\|mapfile' "$filepath" 2>/dev/null || true)
6121
+ if [[ "$readarray_count" -gt 0 ]]; then
6122
+ violations=$((violations + readarray_count))
6123
+ violation_details="${violation_details}${filepath}: readarray/mapfile (${readarray_count} occurrences)
6124
+ "
6125
+ fi
6126
+
6127
+ # ${var,,} or ${var^^} (case conversion)
6128
+ local case_conv_count
6129
+ case_conv_count=$(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*,,' "$filepath" 2>/dev/null || true)
6130
+ case_conv_count=$((case_conv_count + $(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*\^\^' "$filepath" 2>/dev/null || true)))
6131
+ if [[ "$case_conv_count" -gt 0 ]]; then
6132
+ violations=$((violations + case_conv_count))
6133
+ violation_details="${violation_details}${filepath}: case conversion \$\{var,,\} or \$\{var\^\^\} (${case_conv_count} occurrences)
6134
+ "
6135
+ fi
6136
+
6137
+ # |& (pipe stderr to stdout in-place)
6138
+ local pipe_ampersand_count
6139
+ pipe_ampersand_count=$(grep -c '|&' "$filepath" 2>/dev/null || true)
6140
+ if [[ "$pipe_ampersand_count" -gt 0 ]]; then
6141
+ violations=$((violations + pipe_ampersand_count))
6142
+ violation_details="${violation_details}${filepath}: |& operator (${pipe_ampersand_count} occurrences)
6143
+ "
6144
+ fi
6145
+
6146
+ # ;& or ;;& in case statements (advanced fallthrough)
6147
+ local advanced_case_count
6148
+ advanced_case_count=$(grep -c ';&\|;;&' "$filepath" 2>/dev/null || true)
6149
+ if [[ "$advanced_case_count" -gt 0 ]]; then
6150
+ violations=$((violations + advanced_case_count))
6151
+ violation_details="${violation_details}${filepath}: advanced case ;& or ;;& (${advanced_case_count} occurrences)
6152
+ "
6153
+ fi
6154
+
6155
+ done <<< "$changed_files"
6156
+
6157
+ # Log details if violations found
6158
+ if [[ "$violations" -gt 0 ]]; then
6159
+ warn "Bash 3.2 compatibility check: ${violations} violation(s) found:"
6160
+ echo "$violation_details" | sed 's/^/ /'
6161
+ fi
6162
+
6163
+ echo "$violations"
6164
+ }
6165
+
6166
+ # ──────────────────────────────────────────────────────────────────────────────
6167
+ # Test Coverage Check
6168
+ # Runs configured test command and extracts coverage percentage
6169
+ # Returns: coverage percentage (0-100), or "skip" if no test command configured
6170
+ # ──────────────────────────────────────────────────────────────────────────────
6171
+ run_test_coverage_check() {
6172
+ local test_cmd="${TEST_CMD:-}"
6173
+ if [[ -z "$test_cmd" ]]; then
6174
+ echo "skip"
6175
+ return 0
6176
+ fi
6177
+
6178
+ info "Running test coverage check..."
6179
+
6180
+ # Run tests and capture output
6181
+ local test_output
6182
+ local test_rc=0
6183
+ test_output=$(eval "$test_cmd" 2>&1) || test_rc=$?
6184
+
6185
+ if [[ "$test_rc" -ne 0 ]]; then
6186
+ warn "Test command failed (exit code: $test_rc) — cannot extract coverage"
6187
+ echo "0"
6188
+ return 0
6189
+ fi
6190
+
6191
+ # Extract coverage percentage from various formats
6192
+ # Patterns: "XX% coverage", "Lines: XX%", "Stmts: XX%", "Coverage: XX%", "coverage XX%"
6193
+ local coverage_pct
6194
+ coverage_pct=$(echo "$test_output" | grep -oE '[0-9]{1,3}%[[:space:]]*(coverage|lines|stmts|statements)' | grep -oE '^[0-9]{1,3}' | head -1 || true)
6195
+
6196
+ if [[ -z "$coverage_pct" ]]; then
6197
+ # Try alternate patterns without units
6198
+ coverage_pct=$(echo "$test_output" | grep -oE 'coverage[:]?[[:space:]]*[0-9]{1,3}' | grep -oE '[0-9]{1,3}' | head -1 || true)
6199
+ fi
6200
+
6201
+ if [[ -z "$coverage_pct" ]]; then
6202
+ warn "Could not extract coverage percentage from test output"
6203
+ echo "0"
6204
+ return 0
6205
+ fi
6206
+
6207
+ # Ensure it's a valid percentage (0-100)
6208
+ if [[ ! "$coverage_pct" =~ ^[0-9]{1,3}$ ]] || [[ "$coverage_pct" -gt 100 ]]; then
6209
+ coverage_pct=0
6210
+ fi
6211
+
6212
+ success "Test coverage: ${coverage_pct}%"
6213
+ echo "$coverage_pct"
6214
+ }
6215
+
6216
+ # ──────────────────────────────────────────────────────────────────────────────
6217
+ # Atomic Write Violations Check
6218
+ # Scans modified files for anti-patterns: direct echo > file to state/config files
6219
+ # Returns: count of violations found
6220
+ # ──────────────────────────────────────────────────────────────────────────────
6221
+ run_atomic_write_check() {
6222
+ local violations=0
6223
+ local violation_details=""
6224
+
6225
+ # Get modified files (not just .sh — includes state/config files)
6226
+ local changed_files
6227
+ changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || echo "")
6228
+
6229
+ if [[ -z "$changed_files" ]]; then
6230
+ echo "0"
6231
+ return 0
6232
+ fi
6233
+
6234
+ # Check for direct writes to state/config files (patterns that should use tmp+mv)
6235
+ # Look for: echo "..." > state/config files
6236
+ while IFS= read -r filepath; do
6237
+ [[ -z "$filepath" ]] && continue
6238
+
6239
+ # Only check state/config/artifacts files
6240
+ if [[ ! "$filepath" =~ (state|config|artifact|cache|db|json)$ ]]; then
6241
+ continue
6242
+ fi
6243
+
6244
+ # Check for direct redirection writes (> file) in state/config paths
6245
+ local bad_writes
6246
+ bad_writes=$(git show "HEAD:$filepath" 2>/dev/null | grep -c 'echo.*>' "$filepath" 2>/dev/null || true)
6247
+
6248
+ if [[ "$bad_writes" -gt 0 ]]; then
6249
+ violations=$((violations + bad_writes))
6250
+ violation_details="${violation_details}${filepath}: ${bad_writes} direct write(s) (should use tmp+mv)
6251
+ "
6252
+ fi
6253
+ done <<< "$changed_files"
6254
+
6255
+ if [[ "$violations" -gt 0 ]]; then
6256
+ warn "Atomic write violations: ${violations} found (should use tmp file + mv pattern):"
6257
+ echo "$violation_details" | sed 's/^/ /'
6258
+ fi
6259
+
6260
+ echo "$violations"
6261
+ }
6262
+
6263
+ # ──────────────────────────────────────────────────────────────────────────────
6264
+ # New Function Test Detection
6265
+ # Detects new functions added in the diff but checks if corresponding tests exist
6266
+ # Returns: count of untested new functions
6267
+ # ──────────────────────────────────────────────────────────────────────────────
6268
+ run_new_function_test_check() {
6269
+ local untested_functions=0
6270
+ local details=""
6271
+
6272
+ # Get diff
6273
+ local diff_content
6274
+ diff_content=$(git diff "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
6275
+
6276
+ if [[ -z "$diff_content" ]]; then
6277
+ echo "0"
6278
+ return 0
6279
+ fi
6280
+
6281
+ # Extract newly added function definitions (lines starting with +functionname())
6282
+ local new_functions
6283
+ new_functions=$(echo "$diff_content" | grep -E '^\+[a-zA-Z_][a-zA-Z0-9_]*\(\)' | sed 's/^\+//' | sed 's/()//' || true)
6284
+
6285
+ if [[ -z "$new_functions" ]]; then
6286
+ echo "0"
6287
+ return 0
6288
+ fi
6289
+
6290
+ # For each new function, check if test files were modified
6291
+ local test_files_modified=0
6292
+ test_files_modified=$(echo "$diff_content" | grep -c '\-\-\-.*test\|\.test\.\|_test\.' || true)
6293
+
6294
+ # Simple heuristic: if we have new functions but no test file modifications, warn
6295
+ if [[ "$test_files_modified" -eq 0 ]]; then
6296
+ local func_count
6297
+ func_count=$(echo "$new_functions" | wc -l | xargs)
6298
+ untested_functions="$func_count"
6299
+ details="Added ${func_count} new function(s) but no test file modifications detected"
6300
+ fi
6301
+
6302
+ if [[ "$untested_functions" -gt 0 ]]; then
6303
+ warn "New functions without tests: ${details}"
6304
+ fi
6305
+
6306
+ echo "$untested_functions"
6307
+ }
6308
+
5757
6309
  stage_compound_quality() {
5758
6310
  CURRENT_STAGE_ID="compound_quality"
5759
6311
 
6312
+ # Pre-check: verify meaningful changes exist before running expensive quality checks
6313
+ local _cq_real_changes
6314
+ _cq_real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
6315
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
6316
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
6317
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
6318
+ if [[ "${_cq_real_changes:-0}" -eq 0 ]]; then
6319
+ error "Compound quality: no meaningful code changes found — failing quality gate"
6320
+ return 1
6321
+ fi
6322
+
5760
6323
  # Read config
5761
6324
  local max_cycles adversarial_enabled negative_enabled e2e_enabled dod_enabled strict_quality
5762
6325
  max_cycles=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.max_cycles) // 3' "$PIPELINE_CONFIG" 2>/dev/null) || true
@@ -5783,6 +6346,79 @@ stage_compound_quality() {
5783
6346
  local total_critical=0 total_major=0 total_minor=0
5784
6347
  local audits_run_list=""
5785
6348
 
6349
+ # ── HARDENED QUALITY GATES (RUN BEFORE CYCLES) ──
6350
+ # These checks must pass before we even start the audit cycles
6351
+ echo ""
6352
+ info "Running hardened quality gate checks..."
6353
+
6354
+ # 1. Bash 3.2 compatibility check
6355
+ local bash_violations=0
6356
+ bash_violations=$(run_bash_compat_check 2>/dev/null) || bash_violations=0
6357
+ bash_violations="${bash_violations:-0}"
6358
+
6359
+ if [[ "$strict_quality" == "true" && "$bash_violations" -gt 0 ]]; then
6360
+ error "STRICT QUALITY: Bash 3.2 incompatibilities found — blocking"
6361
+ emit_event "quality.bash_compat_failed" \
6362
+ "issue=${ISSUE_NUMBER:-0}" \
6363
+ "violations=$bash_violations"
6364
+ return 1
6365
+ fi
6366
+
6367
+ if [[ "$bash_violations" -gt 0 ]]; then
6368
+ warn "Bash 3.2 incompatibilities detected: ${bash_violations} (will impact quality score)"
6369
+ total_minor=$((total_minor + bash_violations))
6370
+ else
6371
+ success "Bash 3.2 compatibility: clean"
6372
+ fi
6373
+
6374
+ # 2. Test coverage check
6375
+ local coverage_pct=0
6376
+ coverage_pct=$(run_test_coverage_check 2>/dev/null) || coverage_pct=0
6377
+ coverage_pct="${coverage_pct:-0}"
6378
+
6379
+ if [[ "$coverage_pct" != "skip" ]]; then
6380
+ if [[ "$coverage_pct" -lt 60 ]]; then
6381
+ if [[ "$strict_quality" == "true" ]]; then
6382
+ error "STRICT QUALITY: Test coverage below 60% (${coverage_pct}%) — blocking"
6383
+ emit_event "quality.coverage_failed" \
6384
+ "issue=${ISSUE_NUMBER:-0}" \
6385
+ "coverage=$coverage_pct"
6386
+ return 1
6387
+ else
6388
+ warn "Test coverage below 60% threshold (${coverage_pct}%) — quality penalty applied"
6389
+ total_major=$((total_major + 2))
6390
+ fi
6391
+ fi
6392
+ fi
6393
+
6394
+ # 3. New functions without tests check
6395
+ local untested_functions=0
6396
+ untested_functions=$(run_new_function_test_check 2>/dev/null) || untested_functions=0
6397
+ untested_functions="${untested_functions:-0}"
6398
+
6399
+ if [[ "$untested_functions" -gt 0 ]]; then
6400
+ if [[ "$strict_quality" == "true" ]]; then
6401
+ error "STRICT QUALITY: ${untested_functions} new function(s) without tests — blocking"
6402
+ emit_event "quality.untested_functions" \
6403
+ "issue=${ISSUE_NUMBER:-0}" \
6404
+ "count=$untested_functions"
6405
+ return 1
6406
+ else
6407
+ warn "New functions without corresponding tests: ${untested_functions}"
6408
+ total_major=$((total_major + untested_functions))
6409
+ fi
6410
+ fi
6411
+
6412
+ # 4. Atomic write violations (optional, informational in most modes)
6413
+ local atomic_violations=0
6414
+ atomic_violations=$(run_atomic_write_check 2>/dev/null) || atomic_violations=0
6415
+ atomic_violations="${atomic_violations:-0}"
6416
+
6417
+ if [[ "$atomic_violations" -gt 0 ]]; then
6418
+ warn "Atomic write violations: ${atomic_violations} (state/config file patterns)"
6419
+ total_minor=$((total_minor + atomic_violations))
6420
+ fi
6421
+
5786
6422
  # Vitals-driven adaptive cycle limit (preferred)
5787
6423
  local base_max_cycles="$max_cycles"
5788
6424
  if type pipeline_adaptive_limit &>/dev/null 2>&1; then
@@ -6180,24 +6816,52 @@ All quality checks clean:
6180
6816
  # Record quality score
6181
6817
  pipeline_record_quality_score "$quality_score" "$total_critical" "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
6182
6818
 
6183
- # ── Quality Gate ──
6819
+ # ── Quality Gate (HARDENED) ──
6184
6820
  local compound_quality_blocking
6185
6821
  compound_quality_blocking=$(jq -r --arg id "compound_quality" \
6186
6822
  '(.stages[] | select(.id == $id) | .config.compound_quality_blocking) // true' \
6187
6823
  "$PIPELINE_CONFIG" 2>/dev/null) || true
6188
6824
  [[ -z "$compound_quality_blocking" || "$compound_quality_blocking" == "null" ]] && compound_quality_blocking="true"
6189
6825
 
6190
- if [[ "$quality_score" -lt 60 && "$compound_quality_blocking" == "true" ]]; then
6826
+ # HARDENED THRESHOLD: quality_score must be >= 60 to pass
6827
+ # In strict mode, higher requirements apply per the hardened checks above
6828
+ local min_threshold=60
6829
+ if [[ "$strict_quality" == "true" ]]; then
6830
+ # Strict mode: require score >= 70 and ZERO critical issues
6831
+ if [[ "$total_critical" -gt 0 ]]; then
6832
+ error "STRICT QUALITY: ${total_critical} critical issue(s) found — BLOCKING (strict mode)"
6833
+ emit_event "pipeline.quality_gate_failed_strict" \
6834
+ "issue=${ISSUE_NUMBER:-0}" \
6835
+ "reason=critical_issues" \
6836
+ "critical=$total_critical"
6837
+ log_stage "compound_quality" "Quality gate failed (strict mode): critical issues"
6838
+ return 1
6839
+ fi
6840
+ min_threshold=70
6841
+ fi
6842
+
6843
+ # Hard floor: score must be >= 40, regardless of other settings
6844
+ if [[ "$quality_score" -lt 40 ]]; then
6845
+ error "HARDENED GATE: Quality score ${quality_score}/100 below hard floor (40) — BLOCKING"
6846
+ emit_event "quality.hard_floor_failed" \
6847
+ "issue=${ISSUE_NUMBER:-0}" \
6848
+ "quality_score=$quality_score"
6849
+ log_stage "compound_quality" "Quality gate failed: score below hard floor (40)"
6850
+ return 1
6851
+ fi
6852
+
6853
+ if [[ "$quality_score" -lt "$min_threshold" && "$compound_quality_blocking" == "true" ]]; then
6191
6854
  emit_event "pipeline.quality_gate_failed" \
6192
6855
  "issue=${ISSUE_NUMBER:-0}" \
6193
6856
  "quality_score=$quality_score" \
6857
+ "threshold=$min_threshold" \
6194
6858
  "critical=$total_critical" \
6195
6859
  "major=$total_major"
6196
6860
 
6197
- error "Quality gate FAILED: score ${quality_score}/100 (critical: ${total_critical}, major: ${total_major}, minor: ${total_minor})"
6861
+ error "Quality gate FAILED: score ${quality_score}/100 (threshold: ${min_threshold}/100, critical: ${total_critical}, major: ${total_major}, minor: ${total_minor})"
6198
6862
 
6199
6863
  if [[ -n "$ISSUE_NUMBER" ]]; then
6200
- gh_comment_issue "$ISSUE_NUMBER" "❌ **Quality gate failed** — score ${quality_score}/100
6864
+ gh_comment_issue "$ISSUE_NUMBER" "❌ **Quality gate failed** — score ${quality_score}/${min_threshold}
6201
6865
 
6202
6866
  | Finding Type | Count | Deduction |
6203
6867
  |---|---|---|
@@ -6209,25 +6873,41 @@ DoD pass rate: ${_dod_pass_rate}%
6209
6873
  Quality issues remain after ${max_cycles} cycles. Check artifacts for details." 2>/dev/null || true
6210
6874
  fi
6211
6875
 
6212
- log_stage "compound_quality" "Quality gate failed: ${quality_score}/100 after ${max_cycles} cycles"
6876
+ log_stage "compound_quality" "Quality gate failed: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6213
6877
  return 1
6214
6878
  fi
6215
6879
 
6216
- # Exhausted all cycles but quality score is above threshold
6217
- if [[ "$quality_score" -ge 60 ]]; then
6218
- warn "Compound quality: score ${quality_score}/100 after ${max_cycles} cycles (above threshold, proceeding)"
6880
+ # Exhausted all cycles but quality score is at or above threshold
6881
+ if [[ "$quality_score" -ge "$min_threshold" ]]; then
6882
+ if [[ "$quality_score" -eq 100 ]]; then
6883
+ success "Compound quality PERFECT: 100/100"
6884
+ elif [[ "$quality_score" -ge 80 ]]; then
6885
+ success "Compound quality EXCELLENT: ${quality_score}/100"
6886
+ elif [[ "$quality_score" -ge 70 ]]; then
6887
+ success "Compound quality GOOD: ${quality_score}/100"
6888
+ else
6889
+ warn "Compound quality ACCEPTABLE: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6890
+ fi
6219
6891
 
6220
6892
  if [[ -n "$ISSUE_NUMBER" ]]; then
6221
- gh_comment_issue "$ISSUE_NUMBER" "⚠️ **Compound quality** — score ${quality_score}/100 after ${max_cycles} cycles
6893
+ local quality_emoji=""
6894
+ [[ "$quality_score" -lt 70 ]] && quality_emoji="⚠️"
6895
+ gh_comment_issue "$ISSUE_NUMBER" "${quality_emoji} **Compound quality passed** — score ${quality_score}/${min_threshold} after ${max_cycles} cycles
6896
+
6897
+ | Finding Type | Count |
6898
+ |---|---|
6899
+ | Critical | ${total_critical} |
6900
+ | Major | ${total_major} |
6901
+ | Minor | ${total_minor} |
6222
6902
 
6223
- Some issues remain but quality score is above threshold. Proceeding." 2>/dev/null || true
6903
+ DoD pass rate: ${_dod_pass_rate}%" 2>/dev/null || true
6224
6904
  fi
6225
6905
 
6226
- log_stage "compound_quality" "Passed with score ${quality_score}/100 after ${max_cycles} cycles"
6906
+ log_stage "compound_quality" "Passed with score ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6227
6907
  return 0
6228
6908
  fi
6229
6909
 
6230
- error "Compound quality exhausted after ${max_cycles} cycles"
6910
+ error "Compound quality exhausted after ${max_cycles} cycles with insufficient score"
6231
6911
 
6232
6912
  if [[ -n "$ISSUE_NUMBER" ]]; then
6233
6913
  gh_comment_issue "$ISSUE_NUMBER" "❌ **Compound quality failed** after ${max_cycles} cycles
@@ -6585,6 +7265,12 @@ Focus on fixing the failing tests while keeping all passing tests working."
6585
7265
  _snap_error="${_snap_error:-}"
6586
7266
  pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-test}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
6587
7267
  fi
7268
+ # Record fix outcome when tests pass after a retry with memory injection (pipeline path)
7269
+ if [[ "$cycle" -gt 1 && -n "${last_test_error:-}" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
7270
+ local _sig
7271
+ _sig=$(echo "$last_test_error" | head -3 | tr '\n' ' ' | sed 's/^ *//;s/ *$//')
7272
+ [[ -n "$_sig" ]] && bash "$SCRIPT_DIR/sw-memory.sh" fix-outcome "$_sig" "true" "true" 2>/dev/null || true
7273
+ fi
6588
7274
  return 0 # Tests passed!
6589
7275
  fi
6590
7276
 
@@ -6773,11 +7459,17 @@ run_pipeline() {
6773
7459
 
6774
7460
  # CI resume: skip stages marked as completed from previous run
6775
7461
  if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "$id"; then
6776
- echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}skipped (CI resume)${RESET}"
6777
- set_stage_status "$id" "complete"
6778
- completed=$((completed + 1))
6779
- emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
6780
- continue
7462
+ # Verify artifacts survived the merge regenerate if missing
7463
+ if verify_stage_artifacts "$id"; then
7464
+ echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— skipped (CI resume)${RESET}"
7465
+ set_stage_status "$id" "complete"
7466
+ completed=$((completed + 1))
7467
+ emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
7468
+ continue
7469
+ else
7470
+ warn "Stage $id marked complete but artifacts missing — regenerating"
7471
+ emit_event "stage.artifact_miss" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
7472
+ fi
6781
7473
  fi
6782
7474
 
6783
7475
  # Self-healing build→test loop: when we hit build, run both together
@@ -6929,11 +7621,29 @@ run_pipeline() {
6929
7621
  if run_stage_with_retry "$id"; then
6930
7622
  mark_stage_complete "$id"
6931
7623
  completed=$((completed + 1))
7624
+ # Capture project pattern after intake (for memory context in later stages)
7625
+ if [[ "$id" == "intake" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
7626
+ (cd "$REPO_DIR" && bash "$SCRIPT_DIR/sw-memory.sh" pattern "project" "{}" 2>/dev/null) || true
7627
+ fi
6932
7628
  local timing stage_dur_s
6933
7629
  timing=$(get_stage_timing "$id")
6934
7630
  stage_dur_s=$(( $(now_epoch) - stage_start_epoch ))
6935
7631
  success "Stage ${BOLD}$id${RESET} complete ${DIM}(${timing})${RESET}"
6936
7632
  emit_event "stage.completed" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "duration_s=$stage_dur_s"
7633
+ # Broadcast discovery for cross-pipeline learning
7634
+ if [[ -x "$SCRIPT_DIR/sw-discovery.sh" ]]; then
7635
+ local _disc_cat _disc_patterns _disc_text
7636
+ _disc_cat="$id"
7637
+ case "$id" in
7638
+ plan) _disc_patterns="*.md"; _disc_text="Plan completed: ${GOAL:-goal}" ;;
7639
+ design) _disc_patterns="*.md,*.ts,*.tsx,*.js"; _disc_text="Design completed for ${GOAL:-goal}" ;;
7640
+ build) _disc_patterns="src/*,*.ts,*.tsx,*.js"; _disc_text="Build completed" ;;
7641
+ test) _disc_patterns="*.test.*,*_test.*"; _disc_text="Tests passed" ;;
7642
+ review) _disc_patterns="*.md,*.ts,*.tsx"; _disc_text="Review completed" ;;
7643
+ *) _disc_patterns="*"; _disc_text="Stage $id completed" ;;
7644
+ esac
7645
+ bash "$SCRIPT_DIR/sw-discovery.sh" broadcast "$_disc_cat" "$_disc_patterns" "$_disc_text" "" 2>/dev/null || true
7646
+ fi
6937
7647
  # Log model used for prediction feedback
6938
7648
  echo "${id}|${stage_model_used}|true" >> "${ARTIFACTS_DIR}/model-routing.log"
6939
7649
  else
@@ -7121,9 +7831,193 @@ pipeline_cleanup_worktree() {
7121
7831
  fi
7122
7832
  }
7123
7833
 
7834
+ # ─── Dry Run Mode ───────────────────────────────────────────────────────────
7835
+ # Shows what would happen without executing
7836
+ run_dry_run() {
7837
+ echo ""
7838
+ echo -e "${BLUE}${BOLD}━━━ Dry Run: Pipeline Validation ━━━${RESET}"
7839
+ echo ""
7840
+
7841
+ # Validate pipeline config
7842
+ if [[ ! -f "$PIPELINE_CONFIG" ]]; then
7843
+ error "Pipeline config not found: $PIPELINE_CONFIG"
7844
+ return 1
7845
+ fi
7846
+
7847
+ # Validate JSON structure
7848
+ local validate_json
7849
+ validate_json=$(jq . "$PIPELINE_CONFIG" 2>/dev/null) || {
7850
+ error "Pipeline config is not valid JSON: $PIPELINE_CONFIG"
7851
+ return 1
7852
+ }
7853
+
7854
+ # Extract pipeline metadata
7855
+ local pipeline_name stages_count enabled_stages gated_stages
7856
+ pipeline_name=$(jq -r '.name // "unknown"' "$PIPELINE_CONFIG")
7857
+ stages_count=$(jq '.stages | length' "$PIPELINE_CONFIG")
7858
+ enabled_stages=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG")
7859
+ gated_stages=$(jq '[.stages[] | select(.enabled == true and .gate == "approve")] | length' "$PIPELINE_CONFIG")
7860
+
7861
+ # Build model (per-stage override or default)
7862
+ local default_model stage_model
7863
+ default_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")
7864
+ stage_model="$MODEL"
7865
+ [[ -z "$stage_model" ]] && stage_model="$default_model"
7866
+
7867
+ echo -e " ${BOLD}Pipeline:${RESET} $pipeline_name"
7868
+ echo -e " ${BOLD}Stages:${RESET} $enabled_stages enabled of $stages_count total"
7869
+ if [[ "$SKIP_GATES" == "true" ]]; then
7870
+ echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
7871
+ else
7872
+ echo -e " ${BOLD}Gates:${RESET} $gated_stages approval gate(s)"
7873
+ fi
7874
+ echo -e " ${BOLD}Model:${RESET} $stage_model"
7875
+ echo ""
7876
+
7877
+ # Table header
7878
+ echo -e "${CYAN}${BOLD}Stage Enabled Gate Model${RESET}"
7879
+ echo -e "${CYAN}────────────────────────────────────────${RESET}"
7880
+
7881
+ # List all stages
7882
+ while IFS= read -r stage_json; do
7883
+ local stage_id stage_enabled stage_gate stage_config_model stage_model_display
7884
+ stage_id=$(echo "$stage_json" | jq -r '.id')
7885
+ stage_enabled=$(echo "$stage_json" | jq -r '.enabled')
7886
+ stage_gate=$(echo "$stage_json" | jq -r '.gate')
7887
+
7888
+ # Determine stage model (config override or default)
7889
+ stage_config_model=$(echo "$stage_json" | jq -r '.config.model // ""')
7890
+ if [[ -n "$stage_config_model" && "$stage_config_model" != "null" ]]; then
7891
+ stage_model_display="$stage_config_model"
7892
+ else
7893
+ stage_model_display="$default_model"
7894
+ fi
7895
+
7896
+ # Format enabled
7897
+ local enabled_str
7898
+ if [[ "$stage_enabled" == "true" ]]; then
7899
+ enabled_str="${GREEN}yes${RESET}"
7900
+ else
7901
+ enabled_str="${DIM}no${RESET}"
7902
+ fi
7903
+
7904
+ # Format gate
7905
+ local gate_str
7906
+ if [[ "$stage_enabled" == "true" ]]; then
7907
+ if [[ "$stage_gate" == "approve" ]]; then
7908
+ gate_str="${YELLOW}approve${RESET}"
7909
+ else
7910
+ gate_str="${GREEN}auto${RESET}"
7911
+ fi
7912
+ else
7913
+ gate_str="${DIM}—${RESET}"
7914
+ fi
7915
+
7916
+ printf "%-15s %s %s %s\n" "$stage_id" "$enabled_str" "$gate_str" "$stage_model_display"
7917
+ done < <(jq -c '.stages[]' "$PIPELINE_CONFIG")
7918
+
7919
+ echo ""
7920
+
7921
+ # Validate required tools
7922
+ echo -e "${BLUE}${BOLD}━━━ Tool Validation ━━━${RESET}"
7923
+ echo ""
7924
+
7925
+ local tool_errors=0
7926
+ local required_tools=("git" "jq")
7927
+ local optional_tools=("gh" "claude" "bc")
7928
+
7929
+ for tool in "${required_tools[@]}"; do
7930
+ if command -v "$tool" &>/dev/null; then
7931
+ echo -e " ${GREEN}✓${RESET} $tool"
7932
+ else
7933
+ echo -e " ${RED}✗${RESET} $tool ${RED}(required)${RESET}"
7934
+ tool_errors=$((tool_errors + 1))
7935
+ fi
7936
+ done
7937
+
7938
+ for tool in "${optional_tools[@]}"; do
7939
+ if command -v "$tool" &>/dev/null; then
7940
+ echo -e " ${GREEN}✓${RESET} $tool"
7941
+ else
7942
+ echo -e " ${DIM}○${RESET} $tool"
7943
+ fi
7944
+ done
7945
+
7946
+ echo ""
7947
+
7948
+ # Cost estimation (rough approximation)
7949
+ echo -e "${BLUE}${BOLD}━━━ Estimated Resource Usage ━━━${RESET}"
7950
+ echo ""
7951
+
7952
+ # Very rough cost estimation: ~2000 input tokens per stage, ~3000 output tokens
7953
+ # Adjust based on pipeline complexity
7954
+ local input_tokens_estimate output_tokens_estimate
7955
+ input_tokens_estimate=$(( enabled_stages * 2000 ))
7956
+ output_tokens_estimate=$(( enabled_stages * 3000 ))
7957
+
7958
+ # Calculate cost based on selected model
7959
+ local input_rate output_rate input_cost output_cost total_cost
7960
+ input_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.input // 3" 2>/dev/null || echo "3")
7961
+ output_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.output // 15" 2>/dev/null || echo "15")
7962
+
7963
+ # Cost calculation: tokens per million * rate
7964
+ input_cost=$(awk -v tokens="$input_tokens_estimate" -v rate="$input_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
7965
+ output_cost=$(awk -v tokens="$output_tokens_estimate" -v rate="$output_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
7966
+ total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
7967
+
7968
+ echo -e " ${BOLD}Estimated Input Tokens:${RESET} ~$input_tokens_estimate"
7969
+ echo -e " ${BOLD}Estimated Output Tokens:${RESET} ~$output_tokens_estimate"
7970
+ echo -e " ${BOLD}Model Cost Rate:${RESET} $stage_model"
7971
+ echo -e " ${BOLD}Estimated Cost:${RESET} \$$total_cost USD (rough estimate)"
7972
+ echo ""
7973
+
7974
+ # Validate composed pipeline if intelligence is enabled
7975
+ if [[ -f "$ARTIFACTS_DIR/composed-pipeline.json" ]] && type composer_validate_pipeline &>/dev/null; then
7976
+ echo -e "${BLUE}${BOLD}━━━ Intelligence-Composed Pipeline ━━━${RESET}"
7977
+ echo ""
7978
+
7979
+ if composer_validate_pipeline "$(cat "$ARTIFACTS_DIR/composed-pipeline.json" 2>/dev/null || echo "")" 2>/dev/null; then
7980
+ echo -e " ${GREEN}✓${RESET} Composed pipeline is valid"
7981
+ else
7982
+ echo -e " ${YELLOW}⚠${RESET} Composed pipeline validation failed (will use template defaults)"
7983
+ fi
7984
+ echo ""
7985
+ fi
7986
+
7987
+ # Final validation result
7988
+ if [[ "$tool_errors" -gt 0 ]]; then
7989
+ error "Dry run validation failed: $tool_errors required tool(s) missing"
7990
+ return 1
7991
+ fi
7992
+
7993
+ success "Dry run validation passed"
7994
+ echo ""
7995
+ echo -e " To execute this pipeline: ${DIM}remove --dry-run flag${RESET}"
7996
+ echo ""
7997
+ return 0
7998
+ }
7999
+
7124
8000
  # ─── Subcommands ────────────────────────────────────────────────────────────
7125
8001
 
7126
8002
  pipeline_start() {
8003
+ # Handle --repo flag: change to directory before running
8004
+ if [[ -n "$REPO_OVERRIDE" ]]; then
8005
+ if [[ ! -d "$REPO_OVERRIDE" ]]; then
8006
+ error "Directory does not exist: $REPO_OVERRIDE"
8007
+ exit 1
8008
+ fi
8009
+ if ! cd "$REPO_OVERRIDE" 2>/dev/null; then
8010
+ error "Cannot cd to: $REPO_OVERRIDE"
8011
+ exit 1
8012
+ fi
8013
+ if ! git rev-parse --show-toplevel >/dev/null 2>&1; then
8014
+ error "Not a git repository: $REPO_OVERRIDE"
8015
+ exit 1
8016
+ fi
8017
+ ORIGINAL_REPO_DIR="$(pwd)"
8018
+ info "Using repository: $ORIGINAL_REPO_DIR"
8019
+ fi
8020
+
7127
8021
  if [[ -z "$GOAL" && -z "$ISSUE_NUMBER" ]]; then
7128
8022
  error "Must provide --goal or --issue"
7129
8023
  echo -e " Example: ${DIM}shipwright pipeline start --goal \"Add JWT auth\"${RESET}"
@@ -7238,8 +8132,8 @@ pipeline_start() {
7238
8132
  echo ""
7239
8133
 
7240
8134
  if [[ "$DRY_RUN" == "true" ]]; then
7241
- info "Dry run — no stages will execute"
7242
- return 0
8135
+ run_dry_run
8136
+ return $?
7243
8137
  fi
7244
8138
 
7245
8139
  # Start background heartbeat writer
@@ -7283,9 +8177,15 @@ pipeline_start() {
7283
8177
  "result=success" \
7284
8178
  "duration_s=${total_dur_s:-0}" \
7285
8179
  "pr_url=${pr_url:-}" \
8180
+ "agent_id=${PIPELINE_AGENT_ID}" \
7286
8181
  "input_tokens=$TOTAL_INPUT_TOKENS" \
7287
8182
  "output_tokens=$TOTAL_OUTPUT_TOKENS" \
7288
8183
  "self_heal_count=$SELF_HEAL_COUNT"
8184
+
8185
+ # Auto-ingest pipeline outcome into recruit profiles
8186
+ if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
8187
+ bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
8188
+ fi
7289
8189
  else
7290
8190
  notify "Pipeline Failed" "Goal: ${GOAL}\nFailed at: ${CURRENT_STAGE_ID:-unknown}" "error"
7291
8191
  emit_event "pipeline.completed" \
@@ -7293,10 +8193,16 @@ pipeline_start() {
7293
8193
  "result=failure" \
7294
8194
  "duration_s=${total_dur_s:-0}" \
7295
8195
  "failed_stage=${CURRENT_STAGE_ID:-unknown}" \
8196
+ "agent_id=${PIPELINE_AGENT_ID}" \
7296
8197
  "input_tokens=$TOTAL_INPUT_TOKENS" \
7297
8198
  "output_tokens=$TOTAL_OUTPUT_TOKENS" \
7298
8199
  "self_heal_count=$SELF_HEAL_COUNT"
7299
8200
 
8201
+ # Auto-ingest pipeline outcome into recruit profiles
8202
+ if [[ -x "$SCRIPT_DIR/sw-recruit.sh" ]]; then
8203
+ bash "$SCRIPT_DIR/sw-recruit.sh" ingest-pipeline 1 2>/dev/null || true
8204
+ fi
8205
+
7300
8206
  # Capture failure learnings to memory
7301
8207
  if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
7302
8208
  bash "$SCRIPT_DIR/sw-memory.sh" capture "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
@@ -7352,18 +8258,24 @@ pipeline_start() {
7352
8258
  memory_finalize_pipeline "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
7353
8259
  fi
7354
8260
 
7355
- # Emit cost event
8261
+ # Emit cost event — prefer actual cost from Claude CLI when available
7356
8262
  local model_key="${MODEL:-sonnet}"
7357
- local input_cost output_cost total_cost
7358
- input_cost=$(awk -v tokens="$TOTAL_INPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.input // 3")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
7359
- output_cost=$(awk -v tokens="$TOTAL_OUTPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.output // 15")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
7360
- total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
8263
+ local total_cost
8264
+ if [[ -n "${TOTAL_COST_USD:-}" && "${TOTAL_COST_USD}" != "0" && "${TOTAL_COST_USD}" != "null" ]]; then
8265
+ total_cost="${TOTAL_COST_USD}"
8266
+ else
8267
+ # Fallback: estimate from token counts and model rates
8268
+ local input_cost output_cost
8269
+ input_cost=$(awk -v tokens="$TOTAL_INPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.input // 3")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
8270
+ output_cost=$(awk -v tokens="$TOTAL_OUTPUT_TOKENS" -v rate="$(echo "$COST_MODEL_RATES" | jq -r ".${model_key}.output // 15")" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
8271
+ total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
8272
+ fi
7361
8273
 
7362
8274
  emit_event "pipeline.cost" \
7363
8275
  "input_tokens=$TOTAL_INPUT_TOKENS" \
7364
8276
  "output_tokens=$TOTAL_OUTPUT_TOKENS" \
7365
8277
  "model=$model_key" \
7366
- "estimated_cost_usd=$total_cost"
8278
+ "cost_usd=$total_cost"
7367
8279
 
7368
8280
  return $exit_code
7369
8281
  }