shipwright-cli 1.10.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/README.md +114 -36
  2. package/completions/_shipwright +212 -32
  3. package/completions/shipwright.bash +97 -25
  4. package/docs/strategy/01-market-research.md +619 -0
  5. package/docs/strategy/02-mission-and-brand.md +587 -0
  6. package/docs/strategy/03-gtm-and-roadmap.md +759 -0
  7. package/docs/strategy/QUICK-START.txt +289 -0
  8. package/docs/strategy/README.md +172 -0
  9. package/package.json +4 -2
  10. package/scripts/sw +208 -1
  11. package/scripts/sw-activity.sh +500 -0
  12. package/scripts/sw-adaptive.sh +925 -0
  13. package/scripts/sw-adversarial.sh +1 -1
  14. package/scripts/sw-architecture-enforcer.sh +1 -1
  15. package/scripts/sw-auth.sh +613 -0
  16. package/scripts/sw-autonomous.sh +664 -0
  17. package/scripts/sw-changelog.sh +704 -0
  18. package/scripts/sw-checkpoint.sh +1 -1
  19. package/scripts/sw-ci.sh +602 -0
  20. package/scripts/sw-cleanup.sh +1 -1
  21. package/scripts/sw-code-review.sh +637 -0
  22. package/scripts/sw-connect.sh +1 -1
  23. package/scripts/sw-context.sh +605 -0
  24. package/scripts/sw-cost.sh +1 -1
  25. package/scripts/sw-daemon.sh +432 -130
  26. package/scripts/sw-dashboard.sh +1 -1
  27. package/scripts/sw-db.sh +540 -0
  28. package/scripts/sw-decompose.sh +539 -0
  29. package/scripts/sw-deps.sh +551 -0
  30. package/scripts/sw-developer-simulation.sh +1 -1
  31. package/scripts/sw-discovery.sh +412 -0
  32. package/scripts/sw-docs-agent.sh +539 -0
  33. package/scripts/sw-docs.sh +1 -1
  34. package/scripts/sw-doctor.sh +59 -1
  35. package/scripts/sw-dora.sh +615 -0
  36. package/scripts/sw-durable.sh +710 -0
  37. package/scripts/sw-e2e-orchestrator.sh +535 -0
  38. package/scripts/sw-eventbus.sh +393 -0
  39. package/scripts/sw-feedback.sh +471 -0
  40. package/scripts/sw-fix.sh +1 -1
  41. package/scripts/sw-fleet-discover.sh +567 -0
  42. package/scripts/sw-fleet-viz.sh +404 -0
  43. package/scripts/sw-fleet.sh +8 -1
  44. package/scripts/sw-github-app.sh +596 -0
  45. package/scripts/sw-github-checks.sh +1 -1
  46. package/scripts/sw-github-deploy.sh +1 -1
  47. package/scripts/sw-github-graphql.sh +1 -1
  48. package/scripts/sw-guild.sh +569 -0
  49. package/scripts/sw-heartbeat.sh +1 -1
  50. package/scripts/sw-hygiene.sh +559 -0
  51. package/scripts/sw-incident.sh +617 -0
  52. package/scripts/sw-init.sh +88 -1
  53. package/scripts/sw-instrument.sh +699 -0
  54. package/scripts/sw-intelligence.sh +1 -1
  55. package/scripts/sw-jira.sh +1 -1
  56. package/scripts/sw-launchd.sh +363 -28
  57. package/scripts/sw-linear.sh +1 -1
  58. package/scripts/sw-logs.sh +1 -1
  59. package/scripts/sw-loop.sh +64 -3
  60. package/scripts/sw-memory.sh +1 -1
  61. package/scripts/sw-mission-control.sh +487 -0
  62. package/scripts/sw-model-router.sh +545 -0
  63. package/scripts/sw-otel.sh +596 -0
  64. package/scripts/sw-oversight.sh +689 -0
  65. package/scripts/sw-pipeline-composer.sh +1 -1
  66. package/scripts/sw-pipeline-vitals.sh +1 -1
  67. package/scripts/sw-pipeline.sh +687 -24
  68. package/scripts/sw-pm.sh +693 -0
  69. package/scripts/sw-pr-lifecycle.sh +522 -0
  70. package/scripts/sw-predictive.sh +1 -1
  71. package/scripts/sw-prep.sh +1 -1
  72. package/scripts/sw-ps.sh +1 -1
  73. package/scripts/sw-public-dashboard.sh +798 -0
  74. package/scripts/sw-quality.sh +595 -0
  75. package/scripts/sw-reaper.sh +1 -1
  76. package/scripts/sw-recruit.sh +573 -0
  77. package/scripts/sw-regression.sh +642 -0
  78. package/scripts/sw-release-manager.sh +736 -0
  79. package/scripts/sw-release.sh +706 -0
  80. package/scripts/sw-remote.sh +1 -1
  81. package/scripts/sw-replay.sh +520 -0
  82. package/scripts/sw-retro.sh +691 -0
  83. package/scripts/sw-scale.sh +444 -0
  84. package/scripts/sw-security-audit.sh +505 -0
  85. package/scripts/sw-self-optimize.sh +1 -1
  86. package/scripts/sw-session.sh +1 -1
  87. package/scripts/sw-setup.sh +1 -1
  88. package/scripts/sw-standup.sh +712 -0
  89. package/scripts/sw-status.sh +1 -1
  90. package/scripts/sw-strategic.sh +658 -0
  91. package/scripts/sw-stream.sh +450 -0
  92. package/scripts/sw-swarm.sh +583 -0
  93. package/scripts/sw-team-stages.sh +511 -0
  94. package/scripts/sw-templates.sh +1 -1
  95. package/scripts/sw-testgen.sh +515 -0
  96. package/scripts/sw-tmux-pipeline.sh +554 -0
  97. package/scripts/sw-tmux.sh +1 -1
  98. package/scripts/sw-trace.sh +485 -0
  99. package/scripts/sw-tracker-github.sh +188 -0
  100. package/scripts/sw-tracker-jira.sh +172 -0
  101. package/scripts/sw-tracker-linear.sh +251 -0
  102. package/scripts/sw-tracker.sh +117 -2
  103. package/scripts/sw-triage.sh +603 -0
  104. package/scripts/sw-upgrade.sh +1 -1
  105. package/scripts/sw-ux.sh +677 -0
  106. package/scripts/sw-webhook.sh +627 -0
  107. package/scripts/sw-widgets.sh +530 -0
  108. package/scripts/sw-worktree.sh +1 -1
@@ -6,7 +6,12 @@
6
6
  set -euo pipefail
7
7
  trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
8
 
9
- VERSION="1.10.0"
9
+ # Allow spawning Claude CLI from within a Claude Code session (daemon, fleet, etc.)
10
+ unset CLAUDECODE 2>/dev/null || true
11
+ # Ignore SIGHUP so tmux attach/detach doesn't kill long-running plan/design/review stages
12
+ trap '' HUP
13
+
14
+ VERSION="2.0.0"
10
15
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
16
  REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
17
 
@@ -1265,6 +1270,80 @@ mark_stage_complete() {
1265
1270
  if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_checks_stage_update &>/dev/null 2>&1; then
1266
1271
  gh_checks_stage_update "$stage_id" "completed" "success" "Stage $stage_id: ${timing}" 2>/dev/null || true
1267
1272
  fi
1273
+
1274
+ # Persist artifacts to feature branch after expensive stages
1275
+ case "$stage_id" in
1276
+ plan) persist_artifacts "plan" "plan.md" "dod.md" "context-bundle.md" ;;
1277
+ design) persist_artifacts "design" "design.md" ;;
1278
+ esac
1279
+ }
1280
+
1281
+ persist_artifacts() {
1282
+ # Commit and push pipeline artifacts to the feature branch mid-pipeline.
1283
+ # Only runs in CI — local runs skip. Non-fatal: logs failure but never crashes.
1284
+ [[ "${CI_MODE:-false}" != "true" ]] && return 0
1285
+ [[ -z "${ISSUE_NUMBER:-}" ]] && return 0
1286
+ [[ -z "${ARTIFACTS_DIR:-}" ]] && return 0
1287
+
1288
+ local stage="${1:-unknown}"
1289
+ shift
1290
+ local files=("$@")
1291
+
1292
+ # Collect files that actually exist
1293
+ local to_add=()
1294
+ for f in "${files[@]}"; do
1295
+ local path="${ARTIFACTS_DIR}/${f}"
1296
+ if [[ -f "$path" && -s "$path" ]]; then
1297
+ to_add+=("$path")
1298
+ fi
1299
+ done
1300
+
1301
+ if [[ ${#to_add[@]} -eq 0 ]]; then
1302
+ warn "persist_artifacts($stage): no artifact files found — skipping"
1303
+ return 0
1304
+ fi
1305
+
1306
+ info "Persisting ${#to_add[@]} artifact(s) after stage ${stage}..."
1307
+
1308
+ (
1309
+ git add "${to_add[@]}" 2>/dev/null || true
1310
+ if ! git diff --cached --quiet 2>/dev/null; then
1311
+ git commit -m "chore: persist ${stage} artifacts for #${ISSUE_NUMBER} [skip ci]" --no-verify 2>/dev/null || true
1312
+ local branch="shipwright/issue-${ISSUE_NUMBER}"
1313
+ git push origin "HEAD:refs/heads/$branch" --force 2>/dev/null || true
1314
+ emit_event "artifacts.persisted" "issue=${ISSUE_NUMBER}" "stage=$stage" "file_count=${#to_add[@]}"
1315
+ fi
1316
+ ) 2>/dev/null || {
1317
+ warn "persist_artifacts($stage): push failed — non-fatal, continuing"
1318
+ emit_event "artifacts.persist_failed" "issue=${ISSUE_NUMBER}" "stage=$stage"
1319
+ }
1320
+
1321
+ return 0
1322
+ }
1323
+
1324
+ verify_stage_artifacts() {
1325
+ # Check that required artifacts exist and are non-empty for a given stage.
1326
+ # Returns 0 if all artifacts are present, 1 if any are missing.
1327
+ local stage_id="$1"
1328
+ [[ -z "${ARTIFACTS_DIR:-}" ]] && return 0
1329
+
1330
+ local required=()
1331
+ case "$stage_id" in
1332
+ plan) required=("plan.md") ;;
1333
+ design) required=("design.md" "plan.md") ;;
1334
+ *) return 0 ;; # No artifact check needed
1335
+ esac
1336
+
1337
+ local missing=0
1338
+ for f in "${required[@]}"; do
1339
+ local path="${ARTIFACTS_DIR}/${f}"
1340
+ if [[ ! -f "$path" || ! -s "$path" ]]; then
1341
+ warn "verify_stage_artifacts($stage_id): missing or empty: $f"
1342
+ missing=1
1343
+ fi
1344
+ done
1345
+
1346
+ return "$missing"
1268
1347
  }
1269
1348
 
1270
1349
  mark_stage_failed() {
@@ -1649,6 +1728,12 @@ stage_plan() {
1649
1728
 
1650
1729
  info "Generating implementation plan..."
1651
1730
 
1731
+ # ── Gather context bundle (if context engine available) ──
1732
+ local context_script="${SCRIPT_DIR}/sw-context.sh"
1733
+ if [[ -x "$context_script" ]]; then
1734
+ "$context_script" gather --goal "$GOAL" --stage plan 2>/dev/null || true
1735
+ fi
1736
+
1652
1737
  # Build rich prompt with all available context
1653
1738
  local plan_prompt="You are an autonomous development agent. Analyze this codebase and create a detailed implementation plan.
1654
1739
 
@@ -1664,6 +1749,19 @@ ${ISSUE_BODY}
1664
1749
  "
1665
1750
  fi
1666
1751
 
1752
+ # Inject context bundle from context engine (if available)
1753
+ local _context_bundle="${ARTIFACTS_DIR}/context-bundle.md"
1754
+ if [[ -f "$_context_bundle" ]]; then
1755
+ local _cb_content
1756
+ _cb_content=$(cat "$_context_bundle" 2>/dev/null | head -100 || true)
1757
+ if [[ -n "$_cb_content" ]]; then
1758
+ plan_prompt="${plan_prompt}
1759
+ ## Pipeline Context
1760
+ ${_cb_content}
1761
+ "
1762
+ fi
1763
+ fi
1764
+
1667
1765
  # Inject intelligence memory context for similar past plans
1668
1766
  if type intelligence_search_memory &>/dev/null 2>&1; then
1669
1767
  local plan_memory
@@ -1766,12 +1864,24 @@ Checklist of completion criteria.
1766
1864
  parse_claude_tokens "$_token_log"
1767
1865
 
1768
1866
  if [[ ! -s "$plan_file" ]]; then
1769
- error "Plan generation failed"
1867
+ error "Plan generation failed — empty output"
1868
+ return 1
1869
+ fi
1870
+
1871
+ # Validate plan content — detect API/CLI errors masquerading as plans
1872
+ local _plan_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
1873
+ _plan_fatal="${_plan_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
1874
+ if grep -qiE "$_plan_fatal" "$plan_file" 2>/dev/null; then
1875
+ error "Plan stage produced API/CLI error instead of a plan: $(head -1 "$plan_file" | cut -c1-100)"
1770
1876
  return 1
1771
1877
  fi
1772
1878
 
1773
1879
  local line_count
1774
1880
  line_count=$(wc -l < "$plan_file" | xargs)
1881
+ if [[ "$line_count" -lt 3 ]]; then
1882
+ error "Plan too short (${line_count} lines) — likely an error, not a real plan"
1883
+ return 1
1884
+ fi
1775
1885
  info "Plan saved: ${DIM}$plan_file${RESET} (${line_count} lines)"
1776
1886
 
1777
1887
  # Extract task checklist for GitHub issue and task tracking
@@ -2144,12 +2254,24 @@ Be concrete and specific. Reference actual file paths in the codebase. Consider
2144
2254
  parse_claude_tokens "$_token_log"
2145
2255
 
2146
2256
  if [[ ! -s "$design_file" ]]; then
2147
- error "Design generation failed"
2257
+ error "Design generation failed — empty output"
2258
+ return 1
2259
+ fi
2260
+
2261
+ # Validate design content — detect API/CLI errors masquerading as designs
2262
+ local _design_fatal="Invalid API key|invalid_api_key|authentication_error|API key expired"
2263
+ _design_fatal="${_design_fatal}|rate_limit_error|overloaded_error|Could not resolve host|ANTHROPIC_API_KEY"
2264
+ if grep -qiE "$_design_fatal" "$design_file" 2>/dev/null; then
2265
+ error "Design stage produced API/CLI error instead of a design: $(head -1 "$design_file" | cut -c1-100)"
2148
2266
  return 1
2149
2267
  fi
2150
2268
 
2151
2269
  local line_count
2152
2270
  line_count=$(wc -l < "$design_file" | xargs)
2271
+ if [[ "$line_count" -lt 3 ]]; then
2272
+ error "Design too short (${line_count} lines) — likely an error, not a real design"
2273
+ return 1
2274
+ fi
2153
2275
  info "Design saved: ${DIM}$design_file${RESET} (${line_count} lines)"
2154
2276
 
2155
2277
  # Extract file lists for build stage awareness
@@ -2473,7 +2595,7 @@ ${log_excerpt}
2473
2595
  # Post test results to GitHub
2474
2596
  if [[ -n "$ISSUE_NUMBER" ]]; then
2475
2597
  local test_summary
2476
- test_summary=$(tail -10 "$test_log")
2598
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
2477
2599
  local cov_line=""
2478
2600
  [[ -n "$coverage" ]] && cov_line="
2479
2601
  **Coverage:** ${coverage}%"
@@ -2879,6 +3001,19 @@ stage_pr() {
2879
3001
  fi
2880
3002
  fi
2881
3003
 
3004
+ # Pre-PR diff gate — verify meaningful code changes exist (not just bookkeeping)
3005
+ local real_changes
3006
+ real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
3007
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
3008
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
3009
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
3010
+ if [[ "${real_changes:-0}" -eq 0 ]]; then
3011
+ error "No meaningful code changes detected — only bookkeeping files modified"
3012
+ error "Refusing to create PR with zero real changes"
3013
+ return 1
3014
+ fi
3015
+ info "Pre-PR diff check: ${real_changes} real files changed"
3016
+
2882
3017
  # Build PR title — prefer GOAL over plan file first line
2883
3018
  # (plan file first line often contains Claude analysis text, not a clean title)
2884
3019
  local pr_title=""
@@ -2890,6 +3025,12 @@ stage_pr() {
2890
3025
  fi
2891
3026
  [[ -z "$pr_title" ]] && pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
2892
3027
 
3028
+ # Sanitize: reject PR titles that look like error messages
3029
+ if echo "$pr_title" | grep -qiE 'Invalid API|API key|authentication_error|rate_limit|CLI error|no useful output'; then
3030
+ warn "PR title looks like an error message: $pr_title"
3031
+ pr_title="Pipeline changes for issue ${ISSUE_NUMBER:-unknown}"
3032
+ fi
3033
+
2893
3034
  # Build comprehensive PR body
2894
3035
  local plan_summary=""
2895
3036
  if [[ -s "$plan_file" ]]; then
@@ -2898,7 +3039,7 @@ stage_pr() {
2898
3039
 
2899
3040
  local test_summary=""
2900
3041
  if [[ -s "$test_log" ]]; then
2901
- test_summary=$(tail -10 "$test_log")
3042
+ test_summary=$(tail -10 "$test_log" | sed 's/\x1b\[[0-9;]*m//g')
2902
3043
  fi
2903
3044
 
2904
3045
  local review_summary=""
@@ -5754,9 +5895,242 @@ ${route_instruction}"
5754
5895
  fi
5755
5896
  }
5756
5897
 
5898
+ # ──────────────────────────────────────────────────────────────────────────────
5899
+ # Bash 3.2 Compatibility Check
5900
+ # Scans modified .sh files for common bash 3.2 incompatibilities
5901
+ # Returns: count of violations found
5902
+ # ──────────────────────────────────────────────────────────────────────────────
5903
+ run_bash_compat_check() {
5904
+ local violations=0
5905
+ local violation_details=""
5906
+
5907
+ # Get modified .sh files relative to base branch
5908
+ local changed_files
5909
+ changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" -- '*.sh' 2>/dev/null || echo "")
5910
+
5911
+ if [[ -z "$changed_files" ]]; then
5912
+ echo "0"
5913
+ return 0
5914
+ fi
5915
+
5916
+ # Check each file for bash 3.2 incompatibilities
5917
+ while IFS= read -r filepath; do
5918
+ [[ -z "$filepath" ]] && continue
5919
+
5920
+ # declare -A (associative arrays)
5921
+ local declare_a_count
5922
+ declare_a_count=$(grep -c 'declare[[:space:]]*-[aA]' "$filepath" 2>/dev/null || true)
5923
+ if [[ "$declare_a_count" -gt 0 ]]; then
5924
+ violations=$((violations + declare_a_count))
5925
+ violation_details="${violation_details}${filepath}: declare -A (${declare_a_count} occurrences)
5926
+ "
5927
+ fi
5928
+
5929
+ # readarray or mapfile
5930
+ local readarray_count
5931
+ readarray_count=$(grep -c 'readarray\|mapfile' "$filepath" 2>/dev/null || true)
5932
+ if [[ "$readarray_count" -gt 0 ]]; then
5933
+ violations=$((violations + readarray_count))
5934
+ violation_details="${violation_details}${filepath}: readarray/mapfile (${readarray_count} occurrences)
5935
+ "
5936
+ fi
5937
+
5938
+ # ${var,,} or ${var^^} (case conversion)
5939
+ local case_conv_count
5940
+ case_conv_count=$(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*,,' "$filepath" 2>/dev/null || true)
5941
+ case_conv_count=$((case_conv_count + $(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*\^\^' "$filepath" 2>/dev/null || true)))
5942
+ if [[ "$case_conv_count" -gt 0 ]]; then
5943
+ violations=$((violations + case_conv_count))
5944
+ violation_details="${violation_details}${filepath}: case conversion \$\{var,,\} or \$\{var\^\^\} (${case_conv_count} occurrences)
5945
+ "
5946
+ fi
5947
+
5948
+ # |& (pipe stderr to stdout in-place)
5949
+ local pipe_ampersand_count
5950
+ pipe_ampersand_count=$(grep -c '|&' "$filepath" 2>/dev/null || true)
5951
+ if [[ "$pipe_ampersand_count" -gt 0 ]]; then
5952
+ violations=$((violations + pipe_ampersand_count))
5953
+ violation_details="${violation_details}${filepath}: |& operator (${pipe_ampersand_count} occurrences)
5954
+ "
5955
+ fi
5956
+
5957
+ # ;& or ;;& in case statements (advanced fallthrough)
5958
+ local advanced_case_count
5959
+ advanced_case_count=$(grep -c ';&\|;;&' "$filepath" 2>/dev/null || true)
5960
+ if [[ "$advanced_case_count" -gt 0 ]]; then
5961
+ violations=$((violations + advanced_case_count))
5962
+ violation_details="${violation_details}${filepath}: advanced case ;& or ;;& (${advanced_case_count} occurrences)
5963
+ "
5964
+ fi
5965
+
5966
+ done <<< "$changed_files"
5967
+
5968
+ # Log details if violations found
5969
+ if [[ "$violations" -gt 0 ]]; then
5970
+ warn "Bash 3.2 compatibility check: ${violations} violation(s) found:"
5971
+ echo "$violation_details" | sed 's/^/ /'
5972
+ fi
5973
+
5974
+ echo "$violations"
5975
+ }
5976
+
5977
+ # ──────────────────────────────────────────────────────────────────────────────
5978
+ # Test Coverage Check
5979
+ # Runs configured test command and extracts coverage percentage
5980
+ # Returns: coverage percentage (0-100), or "skip" if no test command configured
5981
+ # ──────────────────────────────────────────────────────────────────────────────
5982
+ run_test_coverage_check() {
5983
+ local test_cmd="${TEST_CMD:-}"
5984
+ if [[ -z "$test_cmd" ]]; then
5985
+ echo "skip"
5986
+ return 0
5987
+ fi
5988
+
5989
+ info "Running test coverage check..."
5990
+
5991
+ # Run tests and capture output
5992
+ local test_output
5993
+ local test_rc=0
5994
+ test_output=$(eval "$test_cmd" 2>&1) || test_rc=$?
5995
+
5996
+ if [[ "$test_rc" -ne 0 ]]; then
5997
+ warn "Test command failed (exit code: $test_rc) — cannot extract coverage"
5998
+ echo "0"
5999
+ return 0
6000
+ fi
6001
+
6002
+ # Extract coverage percentage from various formats
6003
+ # Patterns: "XX% coverage", "Lines: XX%", "Stmts: XX%", "Coverage: XX%", "coverage XX%"
6004
+ local coverage_pct
6005
+ coverage_pct=$(echo "$test_output" | grep -oE '[0-9]{1,3}%[[:space:]]*(coverage|lines|stmts|statements)' | grep -oE '^[0-9]{1,3}' | head -1 || true)
6006
+
6007
+ if [[ -z "$coverage_pct" ]]; then
6008
+ # Try alternate patterns without units
6009
+ coverage_pct=$(echo "$test_output" | grep -oE 'coverage[:]?[[:space:]]*[0-9]{1,3}' | grep -oE '[0-9]{1,3}' | head -1 || true)
6010
+ fi
6011
+
6012
+ if [[ -z "$coverage_pct" ]]; then
6013
+ warn "Could not extract coverage percentage from test output"
6014
+ echo "0"
6015
+ return 0
6016
+ fi
6017
+
6018
+ # Ensure it's a valid percentage (0-100)
6019
+ if [[ ! "$coverage_pct" =~ ^[0-9]{1,3}$ ]] || [[ "$coverage_pct" -gt 100 ]]; then
6020
+ coverage_pct=0
6021
+ fi
6022
+
6023
+ success "Test coverage: ${coverage_pct}%"
6024
+ echo "$coverage_pct"
6025
+ }
6026
+
6027
+ # ──────────────────────────────────────────────────────────────────────────────
6028
+ # Atomic Write Violations Check
6029
+ # Scans modified files for anti-patterns: direct echo > file to state/config files
6030
+ # Returns: count of violations found
6031
+ # ──────────────────────────────────────────────────────────────────────────────
6032
+ run_atomic_write_check() {
6033
+ local violations=0
6034
+ local violation_details=""
6035
+
6036
+ # Get modified files (not just .sh — includes state/config files)
6037
+ local changed_files
6038
+ changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || echo "")
6039
+
6040
+ if [[ -z "$changed_files" ]]; then
6041
+ echo "0"
6042
+ return 0
6043
+ fi
6044
+
6045
+ # Check for direct writes to state/config files (patterns that should use tmp+mv)
6046
+ # Look for: echo "..." > state/config files
6047
+ while IFS= read -r filepath; do
6048
+ [[ -z "$filepath" ]] && continue
6049
+
6050
+ # Only check state/config/artifacts files
6051
+ if [[ ! "$filepath" =~ (state|config|artifact|cache|db|json)$ ]]; then
6052
+ continue
6053
+ fi
6054
+
6055
+ # Check for direct redirection writes (> file) in state/config paths
6056
+ local bad_writes
6057
+ bad_writes=$(git show "HEAD:$filepath" 2>/dev/null | grep -c 'echo.*>' "$filepath" 2>/dev/null || true)
6058
+
6059
+ if [[ "$bad_writes" -gt 0 ]]; then
6060
+ violations=$((violations + bad_writes))
6061
+ violation_details="${violation_details}${filepath}: ${bad_writes} direct write(s) (should use tmp+mv)
6062
+ "
6063
+ fi
6064
+ done <<< "$changed_files"
6065
+
6066
+ if [[ "$violations" -gt 0 ]]; then
6067
+ warn "Atomic write violations: ${violations} found (should use tmp file + mv pattern):"
6068
+ echo "$violation_details" | sed 's/^/ /'
6069
+ fi
6070
+
6071
+ echo "$violations"
6072
+ }
6073
+
6074
+ # ──────────────────────────────────────────────────────────────────────────────
6075
+ # New Function Test Detection
6076
+ # Detects new functions added in the diff but checks if corresponding tests exist
6077
+ # Returns: count of untested new functions
6078
+ # ──────────────────────────────────────────────────────────────────────────────
6079
+ run_new_function_test_check() {
6080
+ local untested_functions=0
6081
+ local details=""
6082
+
6083
+ # Get diff
6084
+ local diff_content
6085
+ diff_content=$(git diff "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
6086
+
6087
+ if [[ -z "$diff_content" ]]; then
6088
+ echo "0"
6089
+ return 0
6090
+ fi
6091
+
6092
+ # Extract newly added function definitions (lines starting with +functionname())
6093
+ local new_functions
6094
+ new_functions=$(echo "$diff_content" | grep -E '^\+[a-zA-Z_][a-zA-Z0-9_]*\(\)' | sed 's/^\+//' | sed 's/()//' || true)
6095
+
6096
+ if [[ -z "$new_functions" ]]; then
6097
+ echo "0"
6098
+ return 0
6099
+ fi
6100
+
6101
+ # For each new function, check if test files were modified
6102
+ local test_files_modified=0
6103
+ test_files_modified=$(echo "$diff_content" | grep -c '\-\-\-.*test\|\.test\.\|_test\.' || true)
6104
+
6105
+ # Simple heuristic: if we have new functions but no test file modifications, warn
6106
+ if [[ "$test_files_modified" -eq 0 ]]; then
6107
+ local func_count
6108
+ func_count=$(echo "$new_functions" | wc -l | xargs)
6109
+ untested_functions="$func_count"
6110
+ details="Added ${func_count} new function(s) but no test file modifications detected"
6111
+ fi
6112
+
6113
+ if [[ "$untested_functions" -gt 0 ]]; then
6114
+ warn "New functions without tests: ${details}"
6115
+ fi
6116
+
6117
+ echo "$untested_functions"
6118
+ }
6119
+
5757
6120
  stage_compound_quality() {
5758
6121
  CURRENT_STAGE_ID="compound_quality"
5759
6122
 
6123
+ # Pre-check: verify meaningful changes exist before running expensive quality checks
6124
+ local _cq_real_changes
6125
+ _cq_real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
6126
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
6127
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
6128
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
6129
+ if [[ "${_cq_real_changes:-0}" -eq 0 ]]; then
6130
+ error "Compound quality: no meaningful code changes found — failing quality gate"
6131
+ return 1
6132
+ fi
6133
+
5760
6134
  # Read config
5761
6135
  local max_cycles adversarial_enabled negative_enabled e2e_enabled dod_enabled strict_quality
5762
6136
  max_cycles=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.max_cycles) // 3' "$PIPELINE_CONFIG" 2>/dev/null) || true
@@ -5783,6 +6157,79 @@ stage_compound_quality() {
5783
6157
  local total_critical=0 total_major=0 total_minor=0
5784
6158
  local audits_run_list=""
5785
6159
 
6160
+ # ── HARDENED QUALITY GATES (RUN BEFORE CYCLES) ──
6161
+ # These checks must pass before we even start the audit cycles
6162
+ echo ""
6163
+ info "Running hardened quality gate checks..."
6164
+
6165
+ # 1. Bash 3.2 compatibility check
6166
+ local bash_violations=0
6167
+ bash_violations=$(run_bash_compat_check 2>/dev/null) || bash_violations=0
6168
+ bash_violations="${bash_violations:-0}"
6169
+
6170
+ if [[ "$strict_quality" == "true" && "$bash_violations" -gt 0 ]]; then
6171
+ error "STRICT QUALITY: Bash 3.2 incompatibilities found — blocking"
6172
+ emit_event "quality.bash_compat_failed" \
6173
+ "issue=${ISSUE_NUMBER:-0}" \
6174
+ "violations=$bash_violations"
6175
+ return 1
6176
+ fi
6177
+
6178
+ if [[ "$bash_violations" -gt 0 ]]; then
6179
+ warn "Bash 3.2 incompatibilities detected: ${bash_violations} (will impact quality score)"
6180
+ total_minor=$((total_minor + bash_violations))
6181
+ else
6182
+ success "Bash 3.2 compatibility: clean"
6183
+ fi
6184
+
6185
+ # 2. Test coverage check
6186
+ local coverage_pct=0
6187
+ coverage_pct=$(run_test_coverage_check 2>/dev/null) || coverage_pct=0
6188
+ coverage_pct="${coverage_pct:-0}"
6189
+
6190
+ if [[ "$coverage_pct" != "skip" ]]; then
6191
+ if [[ "$coverage_pct" -lt 60 ]]; then
6192
+ if [[ "$strict_quality" == "true" ]]; then
6193
+ error "STRICT QUALITY: Test coverage below 60% (${coverage_pct}%) — blocking"
6194
+ emit_event "quality.coverage_failed" \
6195
+ "issue=${ISSUE_NUMBER:-0}" \
6196
+ "coverage=$coverage_pct"
6197
+ return 1
6198
+ else
6199
+ warn "Test coverage below 60% threshold (${coverage_pct}%) — quality penalty applied"
6200
+ total_major=$((total_major + 2))
6201
+ fi
6202
+ fi
6203
+ fi
6204
+
6205
+ # 3. New functions without tests check
6206
+ local untested_functions=0
6207
+ untested_functions=$(run_new_function_test_check 2>/dev/null) || untested_functions=0
6208
+ untested_functions="${untested_functions:-0}"
6209
+
6210
+ if [[ "$untested_functions" -gt 0 ]]; then
6211
+ if [[ "$strict_quality" == "true" ]]; then
6212
+ error "STRICT QUALITY: ${untested_functions} new function(s) without tests — blocking"
6213
+ emit_event "quality.untested_functions" \
6214
+ "issue=${ISSUE_NUMBER:-0}" \
6215
+ "count=$untested_functions"
6216
+ return 1
6217
+ else
6218
+ warn "New functions without corresponding tests: ${untested_functions}"
6219
+ total_major=$((total_major + untested_functions))
6220
+ fi
6221
+ fi
6222
+
6223
+ # 4. Atomic write violations (optional, informational in most modes)
6224
+ local atomic_violations=0
6225
+ atomic_violations=$(run_atomic_write_check 2>/dev/null) || atomic_violations=0
6226
+ atomic_violations="${atomic_violations:-0}"
6227
+
6228
+ if [[ "$atomic_violations" -gt 0 ]]; then
6229
+ warn "Atomic write violations: ${atomic_violations} (state/config file patterns)"
6230
+ total_minor=$((total_minor + atomic_violations))
6231
+ fi
6232
+
5786
6233
  # Vitals-driven adaptive cycle limit (preferred)
5787
6234
  local base_max_cycles="$max_cycles"
5788
6235
  if type pipeline_adaptive_limit &>/dev/null 2>&1; then
@@ -6180,24 +6627,52 @@ All quality checks clean:
6180
6627
  # Record quality score
6181
6628
  pipeline_record_quality_score "$quality_score" "$total_critical" "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
6182
6629
 
6183
- # ── Quality Gate ──
6630
+ # ── Quality Gate (HARDENED) ──
6184
6631
  local compound_quality_blocking
6185
6632
  compound_quality_blocking=$(jq -r --arg id "compound_quality" \
6186
6633
  '(.stages[] | select(.id == $id) | .config.compound_quality_blocking) // true' \
6187
6634
  "$PIPELINE_CONFIG" 2>/dev/null) || true
6188
6635
  [[ -z "$compound_quality_blocking" || "$compound_quality_blocking" == "null" ]] && compound_quality_blocking="true"
6189
6636
 
6190
- if [[ "$quality_score" -lt 60 && "$compound_quality_blocking" == "true" ]]; then
6637
+ # HARDENED THRESHOLD: quality_score must be >= 60 to pass
6638
+ # In strict mode, higher requirements apply per the hardened checks above
6639
+ local min_threshold=60
6640
+ if [[ "$strict_quality" == "true" ]]; then
6641
+ # Strict mode: require score >= 70 and ZERO critical issues
6642
+ if [[ "$total_critical" -gt 0 ]]; then
6643
+ error "STRICT QUALITY: ${total_critical} critical issue(s) found — BLOCKING (strict mode)"
6644
+ emit_event "pipeline.quality_gate_failed_strict" \
6645
+ "issue=${ISSUE_NUMBER:-0}" \
6646
+ "reason=critical_issues" \
6647
+ "critical=$total_critical"
6648
+ log_stage "compound_quality" "Quality gate failed (strict mode): critical issues"
6649
+ return 1
6650
+ fi
6651
+ min_threshold=70
6652
+ fi
6653
+
6654
+ # Hard floor: score must be >= 40, regardless of other settings
6655
+ if [[ "$quality_score" -lt 40 ]]; then
6656
+ error "HARDENED GATE: Quality score ${quality_score}/100 below hard floor (40) — BLOCKING"
6657
+ emit_event "quality.hard_floor_failed" \
6658
+ "issue=${ISSUE_NUMBER:-0}" \
6659
+ "quality_score=$quality_score"
6660
+ log_stage "compound_quality" "Quality gate failed: score below hard floor (40)"
6661
+ return 1
6662
+ fi
6663
+
6664
+ if [[ "$quality_score" -lt "$min_threshold" && "$compound_quality_blocking" == "true" ]]; then
6191
6665
  emit_event "pipeline.quality_gate_failed" \
6192
6666
  "issue=${ISSUE_NUMBER:-0}" \
6193
6667
  "quality_score=$quality_score" \
6668
+ "threshold=$min_threshold" \
6194
6669
  "critical=$total_critical" \
6195
6670
  "major=$total_major"
6196
6671
 
6197
- error "Quality gate FAILED: score ${quality_score}/100 (critical: ${total_critical}, major: ${total_major}, minor: ${total_minor})"
6672
+ error "Quality gate FAILED: score ${quality_score}/100 (threshold: ${min_threshold}/100, critical: ${total_critical}, major: ${total_major}, minor: ${total_minor})"
6198
6673
 
6199
6674
  if [[ -n "$ISSUE_NUMBER" ]]; then
6200
- gh_comment_issue "$ISSUE_NUMBER" "❌ **Quality gate failed** — score ${quality_score}/100
6675
+ gh_comment_issue "$ISSUE_NUMBER" "❌ **Quality gate failed** — score ${quality_score}/${min_threshold}
6201
6676
 
6202
6677
  | Finding Type | Count | Deduction |
6203
6678
  |---|---|---|
@@ -6209,25 +6684,41 @@ DoD pass rate: ${_dod_pass_rate}%
6209
6684
  Quality issues remain after ${max_cycles} cycles. Check artifacts for details." 2>/dev/null || true
6210
6685
  fi
6211
6686
 
6212
- log_stage "compound_quality" "Quality gate failed: ${quality_score}/100 after ${max_cycles} cycles"
6687
+ log_stage "compound_quality" "Quality gate failed: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6213
6688
  return 1
6214
6689
  fi
6215
6690
 
6216
- # Exhausted all cycles but quality score is above threshold
6217
- if [[ "$quality_score" -ge 60 ]]; then
6218
- warn "Compound quality: score ${quality_score}/100 after ${max_cycles} cycles (above threshold, proceeding)"
6691
+ # Exhausted all cycles but quality score is at or above threshold
6692
+ if [[ "$quality_score" -ge "$min_threshold" ]]; then
6693
+ if [[ "$quality_score" -eq 100 ]]; then
6694
+ success "Compound quality PERFECT: 100/100"
6695
+ elif [[ "$quality_score" -ge 80 ]]; then
6696
+ success "Compound quality EXCELLENT: ${quality_score}/100"
6697
+ elif [[ "$quality_score" -ge 70 ]]; then
6698
+ success "Compound quality GOOD: ${quality_score}/100"
6699
+ else
6700
+ warn "Compound quality ACCEPTABLE: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6701
+ fi
6219
6702
 
6220
6703
  if [[ -n "$ISSUE_NUMBER" ]]; then
6221
- gh_comment_issue "$ISSUE_NUMBER" "⚠️ **Compound quality** — score ${quality_score}/100 after ${max_cycles} cycles
6704
+ local quality_emoji=""
6705
+ [[ "$quality_score" -lt 70 ]] && quality_emoji="⚠️"
6706
+ gh_comment_issue "$ISSUE_NUMBER" "${quality_emoji} **Compound quality passed** — score ${quality_score}/${min_threshold} after ${max_cycles} cycles
6707
+
6708
+ | Finding Type | Count |
6709
+ |---|---|
6710
+ | Critical | ${total_critical} |
6711
+ | Major | ${total_major} |
6712
+ | Minor | ${total_minor} |
6222
6713
 
6223
- Some issues remain but quality score is above threshold. Proceeding." 2>/dev/null || true
6714
+ DoD pass rate: ${_dod_pass_rate}%" 2>/dev/null || true
6224
6715
  fi
6225
6716
 
6226
- log_stage "compound_quality" "Passed with score ${quality_score}/100 after ${max_cycles} cycles"
6717
+ log_stage "compound_quality" "Passed with score ${quality_score}/${min_threshold} after ${max_cycles} cycles"
6227
6718
  return 0
6228
6719
  fi
6229
6720
 
6230
- error "Compound quality exhausted after ${max_cycles} cycles"
6721
+ error "Compound quality exhausted after ${max_cycles} cycles with insufficient score"
6231
6722
 
6232
6723
  if [[ -n "$ISSUE_NUMBER" ]]; then
6233
6724
  gh_comment_issue "$ISSUE_NUMBER" "❌ **Compound quality failed** after ${max_cycles} cycles
@@ -6773,11 +7264,17 @@ run_pipeline() {
6773
7264
 
6774
7265
  # CI resume: skip stages marked as completed from previous run
6775
7266
  if [[ -n "${COMPLETED_STAGES:-}" ]] && echo "$COMPLETED_STAGES" | tr ',' '\n' | grep -qx "$id"; then
6776
- echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}skipped (CI resume)${RESET}"
6777
- set_stage_status "$id" "complete"
6778
- completed=$((completed + 1))
6779
- emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
6780
- continue
7267
+ # Verify artifacts survived the merge regenerate if missing
7268
+ if verify_stage_artifacts "$id"; then
7269
+ echo -e " ${GREEN}✓ ${id}${RESET} ${DIM}— skipped (CI resume)${RESET}"
7270
+ set_stage_status "$id" "complete"
7271
+ completed=$((completed + 1))
7272
+ emit_event "stage.skipped" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "reason=ci_resume"
7273
+ continue
7274
+ else
7275
+ warn "Stage $id marked complete but artifacts missing — regenerating"
7276
+ emit_event "stage.artifact_miss" "issue=${ISSUE_NUMBER:-0}" "stage=$id"
7277
+ fi
6781
7278
  fi
6782
7279
 
6783
7280
  # Self-healing build→test loop: when we hit build, run both together
@@ -7121,6 +7618,172 @@ pipeline_cleanup_worktree() {
7121
7618
  fi
7122
7619
  }
7123
7620
 
7621
+ # ─── Dry Run Mode ───────────────────────────────────────────────────────────
7622
+ # Shows what would happen without executing
7623
+ run_dry_run() {
7624
+ echo ""
7625
+ echo -e "${BLUE}${BOLD}━━━ Dry Run: Pipeline Validation ━━━${RESET}"
7626
+ echo ""
7627
+
7628
+ # Validate pipeline config
7629
+ if [[ ! -f "$PIPELINE_CONFIG" ]]; then
7630
+ error "Pipeline config not found: $PIPELINE_CONFIG"
7631
+ return 1
7632
+ fi
7633
+
7634
+ # Validate JSON structure
7635
+ local validate_json
7636
+ validate_json=$(jq . "$PIPELINE_CONFIG" 2>/dev/null) || {
7637
+ error "Pipeline config is not valid JSON: $PIPELINE_CONFIG"
7638
+ return 1
7639
+ }
7640
+
7641
+ # Extract pipeline metadata
7642
+ local pipeline_name stages_count enabled_stages gated_stages
7643
+ pipeline_name=$(jq -r '.name // "unknown"' "$PIPELINE_CONFIG")
7644
+ stages_count=$(jq '.stages | length' "$PIPELINE_CONFIG")
7645
+ enabled_stages=$(jq '[.stages[] | select(.enabled == true)] | length' "$PIPELINE_CONFIG")
7646
+ gated_stages=$(jq '[.stages[] | select(.enabled == true and .gate == "approve")] | length' "$PIPELINE_CONFIG")
7647
+
7648
+ # Build model (per-stage override or default)
7649
+ local default_model stage_model
7650
+ default_model=$(jq -r '.defaults.model // "opus"' "$PIPELINE_CONFIG")
7651
+ stage_model="$MODEL"
7652
+ [[ -z "$stage_model" ]] && stage_model="$default_model"
7653
+
7654
+ echo -e " ${BOLD}Pipeline:${RESET} $pipeline_name"
7655
+ echo -e " ${BOLD}Stages:${RESET} $enabled_stages enabled of $stages_count total"
7656
+ if [[ "$SKIP_GATES" == "true" ]]; then
7657
+ echo -e " ${BOLD}Gates:${RESET} ${YELLOW}all auto (--skip-gates)${RESET}"
7658
+ else
7659
+ echo -e " ${BOLD}Gates:${RESET} $gated_stages approval gate(s)"
7660
+ fi
7661
+ echo -e " ${BOLD}Model:${RESET} $stage_model"
7662
+ echo ""
7663
+
7664
+ # Table header
7665
+ echo -e "${CYAN}${BOLD}Stage Enabled Gate Model${RESET}"
7666
+ echo -e "${CYAN}────────────────────────────────────────${RESET}"
7667
+
7668
+ # List all stages
7669
+ while IFS= read -r stage_json; do
7670
+ local stage_id stage_enabled stage_gate stage_config_model stage_model_display
7671
+ stage_id=$(echo "$stage_json" | jq -r '.id')
7672
+ stage_enabled=$(echo "$stage_json" | jq -r '.enabled')
7673
+ stage_gate=$(echo "$stage_json" | jq -r '.gate')
7674
+
7675
+ # Determine stage model (config override or default)
7676
+ stage_config_model=$(echo "$stage_json" | jq -r '.config.model // ""')
7677
+ if [[ -n "$stage_config_model" && "$stage_config_model" != "null" ]]; then
7678
+ stage_model_display="$stage_config_model"
7679
+ else
7680
+ stage_model_display="$default_model"
7681
+ fi
7682
+
7683
+ # Format enabled
7684
+ local enabled_str
7685
+ if [[ "$stage_enabled" == "true" ]]; then
7686
+ enabled_str="${GREEN}yes${RESET}"
7687
+ else
7688
+ enabled_str="${DIM}no${RESET}"
7689
+ fi
7690
+
7691
+ # Format gate
7692
+ local gate_str
7693
+ if [[ "$stage_enabled" == "true" ]]; then
7694
+ if [[ "$stage_gate" == "approve" ]]; then
7695
+ gate_str="${YELLOW}approve${RESET}"
7696
+ else
7697
+ gate_str="${GREEN}auto${RESET}"
7698
+ fi
7699
+ else
7700
+ gate_str="${DIM}—${RESET}"
7701
+ fi
7702
+
7703
+ printf "%-15s %s %s %s\n" "$stage_id" "$enabled_str" "$gate_str" "$stage_model_display"
7704
+ done < <(jq -c '.stages[]' "$PIPELINE_CONFIG")
7705
+
7706
+ echo ""
7707
+
7708
+ # Validate required tools
7709
+ echo -e "${BLUE}${BOLD}━━━ Tool Validation ━━━${RESET}"
7710
+ echo ""
7711
+
7712
+ local tool_errors=0
7713
+ local required_tools=("git" "jq")
7714
+ local optional_tools=("gh" "claude" "bc")
7715
+
7716
+ for tool in "${required_tools[@]}"; do
7717
+ if command -v "$tool" &>/dev/null; then
7718
+ echo -e " ${GREEN}✓${RESET} $tool"
7719
+ else
7720
+ echo -e " ${RED}✗${RESET} $tool ${RED}(required)${RESET}"
7721
+ tool_errors=$((tool_errors + 1))
7722
+ fi
7723
+ done
7724
+
7725
+ for tool in "${optional_tools[@]}"; do
7726
+ if command -v "$tool" &>/dev/null; then
7727
+ echo -e " ${GREEN}✓${RESET} $tool"
7728
+ else
7729
+ echo -e " ${DIM}○${RESET} $tool"
7730
+ fi
7731
+ done
7732
+
7733
+ echo ""
7734
+
7735
+ # Cost estimation (rough approximation)
7736
+ echo -e "${BLUE}${BOLD}━━━ Estimated Resource Usage ━━━${RESET}"
7737
+ echo ""
7738
+
7739
+ # Very rough cost estimation: ~2000 input tokens per stage, ~3000 output tokens
7740
+ # Adjust based on pipeline complexity
7741
+ local input_tokens_estimate output_tokens_estimate
7742
+ input_tokens_estimate=$(( enabled_stages * 2000 ))
7743
+ output_tokens_estimate=$(( enabled_stages * 3000 ))
7744
+
7745
+ # Calculate cost based on selected model
7746
+ local input_rate output_rate input_cost output_cost total_cost
7747
+ input_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.input // 3" 2>/dev/null || echo "3")
7748
+ output_rate=$(echo "$COST_MODEL_RATES" | jq -r ".${stage_model}.output // 15" 2>/dev/null || echo "15")
7749
+
7750
+ # Cost calculation: tokens per million * rate
7751
+ input_cost=$(awk -v tokens="$input_tokens_estimate" -v rate="$input_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
7752
+ output_cost=$(awk -v tokens="$output_tokens_estimate" -v rate="$output_rate" 'BEGIN{printf "%.4f", (tokens / 1000000) * rate}')
7753
+ total_cost=$(awk -v i="$input_cost" -v o="$output_cost" 'BEGIN{printf "%.4f", i + o}')
7754
+
7755
+ echo -e " ${BOLD}Estimated Input Tokens:${RESET} ~$input_tokens_estimate"
7756
+ echo -e " ${BOLD}Estimated Output Tokens:${RESET} ~$output_tokens_estimate"
7757
+ echo -e " ${BOLD}Model Cost Rate:${RESET} $stage_model"
7758
+ echo -e " ${BOLD}Estimated Cost:${RESET} \$$total_cost USD (rough estimate)"
7759
+ echo ""
7760
+
7761
+ # Validate composed pipeline if intelligence is enabled
7762
+ if [[ -f "$ARTIFACTS_DIR/composed-pipeline.json" ]] && type composer_validate_pipeline &>/dev/null; then
7763
+ echo -e "${BLUE}${BOLD}━━━ Intelligence-Composed Pipeline ━━━${RESET}"
7764
+ echo ""
7765
+
7766
+ if composer_validate_pipeline "$(cat "$ARTIFACTS_DIR/composed-pipeline.json" 2>/dev/null || echo "")" 2>/dev/null; then
7767
+ echo -e " ${GREEN}✓${RESET} Composed pipeline is valid"
7768
+ else
7769
+ echo -e " ${YELLOW}⚠${RESET} Composed pipeline validation failed (will use template defaults)"
7770
+ fi
7771
+ echo ""
7772
+ fi
7773
+
7774
+ # Final validation result
7775
+ if [[ "$tool_errors" -gt 0 ]]; then
7776
+ error "Dry run validation failed: $tool_errors required tool(s) missing"
7777
+ return 1
7778
+ fi
7779
+
7780
+ success "Dry run validation passed"
7781
+ echo ""
7782
+ echo -e " To execute this pipeline: ${DIM}remove --dry-run flag${RESET}"
7783
+ echo ""
7784
+ return 0
7785
+ }
7786
+
7124
7787
  # ─── Subcommands ────────────────────────────────────────────────────────────
7125
7788
 
7126
7789
  pipeline_start() {
@@ -7238,8 +7901,8 @@ pipeline_start() {
7238
7901
  echo ""
7239
7902
 
7240
7903
  if [[ "$DRY_RUN" == "true" ]]; then
7241
- info "Dry run — no stages will execute"
7242
- return 0
7904
+ run_dry_run
7905
+ return $?
7243
7906
  fi
7244
7907
 
7245
7908
  # Start background heartbeat writer