shipwright-cli 2.2.0 → 2.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. package/README.md +15 -16
  2. package/config/policy.schema.json +104 -29
  3. package/docs/AGI-PLATFORM-PLAN.md +11 -7
  4. package/docs/AGI-WHATS-NEXT.md +26 -20
  5. package/docs/README.md +2 -0
  6. package/package.json +1 -1
  7. package/scripts/check-version-consistency.sh +72 -0
  8. package/scripts/lib/daemon-adaptive.sh +610 -0
  9. package/scripts/lib/daemon-dispatch.sh +489 -0
  10. package/scripts/lib/daemon-failure.sh +387 -0
  11. package/scripts/lib/daemon-patrol.sh +1113 -0
  12. package/scripts/lib/daemon-poll.sh +1202 -0
  13. package/scripts/lib/daemon-state.sh +550 -0
  14. package/scripts/lib/daemon-triage.sh +490 -0
  15. package/scripts/lib/helpers.sh +81 -1
  16. package/scripts/lib/pipeline-detection.sh +278 -0
  17. package/scripts/lib/pipeline-github.sh +196 -0
  18. package/scripts/lib/pipeline-intelligence.sh +1706 -0
  19. package/scripts/lib/pipeline-quality-checks.sh +1054 -0
  20. package/scripts/lib/pipeline-quality.sh +11 -0
  21. package/scripts/lib/pipeline-stages.sh +2508 -0
  22. package/scripts/lib/pipeline-state.sh +529 -0
  23. package/scripts/sw +26 -4
  24. package/scripts/sw-activity.sh +1 -1
  25. package/scripts/sw-adaptive.sh +2 -2
  26. package/scripts/sw-adversarial.sh +1 -1
  27. package/scripts/sw-architecture-enforcer.sh +1 -1
  28. package/scripts/sw-auth.sh +1 -1
  29. package/scripts/sw-autonomous.sh +1 -1
  30. package/scripts/sw-changelog.sh +1 -1
  31. package/scripts/sw-checkpoint.sh +1 -1
  32. package/scripts/sw-ci.sh +1 -1
  33. package/scripts/sw-cleanup.sh +1 -1
  34. package/scripts/sw-code-review.sh +1 -1
  35. package/scripts/sw-connect.sh +1 -1
  36. package/scripts/sw-context.sh +1 -1
  37. package/scripts/sw-cost.sh +1 -1
  38. package/scripts/sw-daemon.sh +52 -4816
  39. package/scripts/sw-dashboard.sh +1 -1
  40. package/scripts/sw-db.sh +1 -1
  41. package/scripts/sw-decompose.sh +1 -1
  42. package/scripts/sw-deps.sh +1 -1
  43. package/scripts/sw-developer-simulation.sh +1 -1
  44. package/scripts/sw-discovery.sh +1 -1
  45. package/scripts/sw-doc-fleet.sh +1 -1
  46. package/scripts/sw-docs-agent.sh +1 -1
  47. package/scripts/sw-docs.sh +1 -1
  48. package/scripts/sw-doctor.sh +42 -1
  49. package/scripts/sw-dora.sh +1 -1
  50. package/scripts/sw-durable.sh +1 -1
  51. package/scripts/sw-e2e-orchestrator.sh +1 -1
  52. package/scripts/sw-eventbus.sh +1 -1
  53. package/scripts/sw-feedback.sh +1 -1
  54. package/scripts/sw-fix.sh +1 -1
  55. package/scripts/sw-fleet-discover.sh +1 -1
  56. package/scripts/sw-fleet-viz.sh +3 -3
  57. package/scripts/sw-fleet.sh +1 -1
  58. package/scripts/sw-github-app.sh +1 -1
  59. package/scripts/sw-github-checks.sh +1 -1
  60. package/scripts/sw-github-deploy.sh +1 -1
  61. package/scripts/sw-github-graphql.sh +1 -1
  62. package/scripts/sw-guild.sh +1 -1
  63. package/scripts/sw-heartbeat.sh +1 -1
  64. package/scripts/sw-hygiene.sh +1 -1
  65. package/scripts/sw-incident.sh +1 -1
  66. package/scripts/sw-init.sh +1 -1
  67. package/scripts/sw-instrument.sh +1 -1
  68. package/scripts/sw-intelligence.sh +1 -1
  69. package/scripts/sw-jira.sh +1 -1
  70. package/scripts/sw-launchd.sh +1 -1
  71. package/scripts/sw-linear.sh +1 -1
  72. package/scripts/sw-logs.sh +1 -1
  73. package/scripts/sw-loop.sh +1 -1
  74. package/scripts/sw-memory.sh +1 -1
  75. package/scripts/sw-mission-control.sh +1 -1
  76. package/scripts/sw-model-router.sh +1 -1
  77. package/scripts/sw-otel.sh +4 -4
  78. package/scripts/sw-oversight.sh +1 -1
  79. package/scripts/sw-pipeline-composer.sh +1 -1
  80. package/scripts/sw-pipeline-vitals.sh +1 -1
  81. package/scripts/sw-pipeline.sh +23 -56
  82. package/scripts/sw-pipeline.sh.mock +7 -0
  83. package/scripts/sw-pm.sh +1 -1
  84. package/scripts/sw-pr-lifecycle.sh +1 -1
  85. package/scripts/sw-predictive.sh +1 -1
  86. package/scripts/sw-prep.sh +1 -1
  87. package/scripts/sw-ps.sh +1 -1
  88. package/scripts/sw-public-dashboard.sh +1 -1
  89. package/scripts/sw-quality.sh +1 -1
  90. package/scripts/sw-reaper.sh +1 -1
  91. package/scripts/sw-recruit.sh +9 -1
  92. package/scripts/sw-regression.sh +1 -1
  93. package/scripts/sw-release-manager.sh +1 -1
  94. package/scripts/sw-release.sh +1 -1
  95. package/scripts/sw-remote.sh +1 -1
  96. package/scripts/sw-replay.sh +1 -1
  97. package/scripts/sw-retro.sh +1 -1
  98. package/scripts/sw-scale.sh +8 -5
  99. package/scripts/sw-security-audit.sh +1 -1
  100. package/scripts/sw-self-optimize.sh +158 -7
  101. package/scripts/sw-session.sh +1 -1
  102. package/scripts/sw-setup.sh +1 -1
  103. package/scripts/sw-standup.sh +3 -3
  104. package/scripts/sw-status.sh +1 -1
  105. package/scripts/sw-strategic.sh +1 -1
  106. package/scripts/sw-stream.sh +8 -2
  107. package/scripts/sw-swarm.sh +7 -10
  108. package/scripts/sw-team-stages.sh +1 -1
  109. package/scripts/sw-templates.sh +1 -1
  110. package/scripts/sw-testgen.sh +1 -1
  111. package/scripts/sw-tmux-pipeline.sh +1 -1
  112. package/scripts/sw-tmux.sh +1 -1
  113. package/scripts/sw-trace.sh +1 -1
  114. package/scripts/sw-tracker.sh +24 -6
  115. package/scripts/sw-triage.sh +1 -1
  116. package/scripts/sw-upgrade.sh +1 -1
  117. package/scripts/sw-ux.sh +1 -1
  118. package/scripts/sw-webhook.sh +1 -1
  119. package/scripts/sw-widgets.sh +1 -1
  120. package/scripts/sw-worktree.sh +1 -1
@@ -0,0 +1,1054 @@
1
+ # pipeline-quality-checks.sh — Quality checks (security, bundle, perf, api, coverage, adversarial, dod, bash compat, etc.) for sw-pipeline.sh
2
+ # Source from sw-pipeline.sh. Requires pipeline-quality.sh, ARTIFACTS_DIR, SCRIPT_DIR.
3
+ [[ -n "${_PIPELINE_QUALITY_CHECKS_LOADED:-}" ]] && return 0
4
+ _PIPELINE_QUALITY_CHECKS_LOADED=1
5
+
6
+ quality_check_security() {
7
+ info "Security audit..."
8
+ local audit_log="$ARTIFACTS_DIR/security-audit.log"
9
+ local audit_exit=0
10
+ local tool_found=false
11
+
12
+ # Try npm audit
13
+ if [[ -f "package.json" ]] && command -v npm &>/dev/null; then
14
+ tool_found=true
15
+ npm audit --production 2>&1 | tee "$audit_log" || audit_exit=$?
16
+ # Try pip-audit
17
+ elif [[ -f "requirements.txt" || -f "pyproject.toml" ]] && command -v pip-audit &>/dev/null; then
18
+ tool_found=true
19
+ pip-audit 2>&1 | tee "$audit_log" || audit_exit=$?
20
+ # Try cargo audit
21
+ elif [[ -f "Cargo.toml" ]] && command -v cargo-audit &>/dev/null; then
22
+ tool_found=true
23
+ cargo audit 2>&1 | tee "$audit_log" || audit_exit=$?
24
+ fi
25
+
26
+ if [[ "$tool_found" != "true" ]]; then
27
+ info "No security audit tool found — skipping"
28
+ echo "No audit tool available" > "$audit_log"
29
+ return 0
30
+ fi
31
+
32
+ # Parse results for critical/high severity
33
+ local critical_count high_count
34
+ critical_count=$(grep -ciE 'critical' "$audit_log" 2>/dev/null || true)
35
+ critical_count="${critical_count:-0}"
36
+ high_count=$(grep -ciE 'high' "$audit_log" 2>/dev/null || true)
37
+ high_count="${high_count:-0}"
38
+
39
+ emit_event "quality.security" \
40
+ "issue=${ISSUE_NUMBER:-0}" \
41
+ "critical=$critical_count" \
42
+ "high=$high_count"
43
+
44
+ if [[ "$critical_count" -gt 0 ]]; then
45
+ warn "Security audit: ${critical_count} critical, ${high_count} high"
46
+ return 1
47
+ fi
48
+
49
+ success "Security audit: clean"
50
+ return 0
51
+ }
52
+
53
+ quality_check_bundle_size() {
54
+ info "Bundle size check..."
55
+ local metrics_log="$ARTIFACTS_DIR/bundle-metrics.log"
56
+ local bundle_size=0
57
+ local bundle_dir=""
58
+
59
+ # Find build output directory — check config files first, then common dirs
60
+ # Parse tsconfig.json outDir
61
+ if [[ -z "$bundle_dir" && -f "tsconfig.json" ]]; then
62
+ local ts_out
63
+ ts_out=$(jq -r '.compilerOptions.outDir // empty' tsconfig.json 2>/dev/null || true)
64
+ [[ -n "$ts_out" && -d "$ts_out" ]] && bundle_dir="$ts_out"
65
+ fi
66
+ # Parse package.json build script for output hints
67
+ if [[ -z "$bundle_dir" && -f "package.json" ]]; then
68
+ local build_script
69
+ build_script=$(jq -r '.scripts.build // ""' package.json 2>/dev/null || true)
70
+ if [[ -n "$build_script" ]]; then
71
+ # Check for common output flags: --outDir, -o, --out-dir
72
+ local parsed_out
73
+ parsed_out=$(echo "$build_script" | grep -oE '(--outDir|--out-dir|-o)\s+[^ ]+' 2>/dev/null | awk '{print $NF}' | head -1 || true)
74
+ [[ -n "$parsed_out" && -d "$parsed_out" ]] && bundle_dir="$parsed_out"
75
+ fi
76
+ fi
77
+ # Fallback: check common directories
78
+ if [[ -z "$bundle_dir" ]]; then
79
+ for dir in dist build out .next target; do
80
+ if [[ -d "$dir" ]]; then
81
+ bundle_dir="$dir"
82
+ break
83
+ fi
84
+ done
85
+ fi
86
+
87
+ if [[ -z "$bundle_dir" ]]; then
88
+ info "No build output directory found — skipping bundle check"
89
+ echo "No build directory" > "$metrics_log"
90
+ return 0
91
+ fi
92
+
93
+ bundle_size=$(du -sk "$bundle_dir" 2>/dev/null | cut -f1 || echo "0")
94
+ local bundle_size_human
95
+ bundle_size_human=$(du -sh "$bundle_dir" 2>/dev/null | cut -f1 || echo "unknown")
96
+
97
+ echo "Bundle directory: $bundle_dir" > "$metrics_log"
98
+ echo "Size: ${bundle_size}KB (${bundle_size_human})" >> "$metrics_log"
99
+
100
+ emit_event "quality.bundle" \
101
+ "issue=${ISSUE_NUMBER:-0}" \
102
+ "size_kb=$bundle_size" \
103
+ "directory=$bundle_dir"
104
+
105
+ # Adaptive bundle size check: statistical deviation from historical mean
106
+ local repo_hash_bundle
107
+ repo_hash_bundle=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
108
+ local bundle_baselines_dir="${HOME}/.shipwright/baselines/${repo_hash_bundle}"
109
+ local bundle_history_file="${bundle_baselines_dir}/bundle-history.json"
110
+
111
+ local bundle_history="[]"
112
+ if [[ -f "$bundle_history_file" ]]; then
113
+ bundle_history=$(jq '.sizes // []' "$bundle_history_file" 2>/dev/null || echo "[]")
114
+ fi
115
+
116
+ local bundle_hist_count
117
+ bundle_hist_count=$(echo "$bundle_history" | jq 'length' 2>/dev/null || echo "0")
118
+
119
+ if [[ "$bundle_hist_count" -ge 3 ]]; then
120
+ # Statistical check: alert on growth > 2σ from historical mean
121
+ local mean_size stddev_size
122
+ mean_size=$(echo "$bundle_history" | jq 'add / length' 2>/dev/null || echo "0")
123
+ stddev_size=$(echo "$bundle_history" | jq '
124
+ (add / length) as $mean |
125
+ (map(. - $mean | . * .) | add / length | sqrt)
126
+ ' 2>/dev/null || echo "0")
127
+
128
+ # Adaptive tolerance: small repos (<1MB mean) get wider tolerance (3σ), large repos get 2σ
129
+ local sigma_mult
130
+ sigma_mult=$(awk -v mean="$mean_size" 'BEGIN{ print (mean < 1024 ? 3 : 2) }')
131
+ local adaptive_max
132
+ adaptive_max=$(awk -v mean="$mean_size" -v sd="$stddev_size" -v mult="$sigma_mult" \
133
+ 'BEGIN{ t = mean + mult*sd; min_t = mean * 1.1; printf "%.0f", (t > min_t ? t : min_t) }')
134
+
135
+ echo "History: ${bundle_hist_count} runs | Mean: ${mean_size}KB | StdDev: ${stddev_size}KB | Max: ${adaptive_max}KB (${sigma_mult}σ)" >> "$metrics_log"
136
+
137
+ if [[ "$bundle_size" -gt "$adaptive_max" ]] 2>/dev/null; then
138
+ local growth_pct
139
+ growth_pct=$(awk -v cur="$bundle_size" -v mean="$mean_size" 'BEGIN{printf "%d", ((cur - mean) / mean) * 100}')
140
+ warn "Bundle size ${growth_pct}% above average (${mean_size}KB → ${bundle_size}KB, ${sigma_mult}σ threshold: ${adaptive_max}KB)"
141
+ return 1
142
+ fi
143
+ else
144
+ # Fallback: legacy memory baseline with hardcoded 20% (not enough history)
145
+ local baseline_size=""
146
+ if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
147
+ baseline_size=$(bash "$SCRIPT_DIR/sw-memory.sh" get "bundle_size_kb" 2>/dev/null) || true
148
+ fi
149
+ if [[ -n "$baseline_size" && "$baseline_size" -gt 0 ]] 2>/dev/null; then
150
+ local growth_pct
151
+ growth_pct=$(awk -v cur="$bundle_size" -v base="$baseline_size" 'BEGIN{printf "%d", ((cur - base) / base) * 100}')
152
+ echo "Baseline: ${baseline_size}KB | Growth: ${growth_pct}%" >> "$metrics_log"
153
+ if [[ "$growth_pct" -gt 20 ]]; then
154
+ warn "Bundle size grew ${growth_pct}% (${baseline_size}KB → ${bundle_size}KB)"
155
+ return 1
156
+ fi
157
+ fi
158
+ fi
159
+
160
+ # Append current size to rolling history (keep last 10)
161
+ mkdir -p "$bundle_baselines_dir"
162
+ local updated_bundle_hist
163
+ updated_bundle_hist=$(echo "$bundle_history" | jq --arg sz "$bundle_size" '
164
+ . + [($sz | tonumber)] | .[-10:]
165
+ ' 2>/dev/null || echo "[$bundle_size]")
166
+ local tmp_bundle_hist
167
+ tmp_bundle_hist=$(mktemp "${bundle_baselines_dir}/bundle-history.json.XXXXXX")
168
+ jq -n --argjson sizes "$updated_bundle_hist" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
169
+ '{sizes: $sizes, updated: $updated}' > "$tmp_bundle_hist" 2>/dev/null
170
+ mv "$tmp_bundle_hist" "$bundle_history_file" 2>/dev/null || true
171
+
172
+ # Intelligence: identify top dependency bloaters
173
+ if type intelligence_search_memory &>/dev/null 2>&1 && [[ -f "package.json" ]] && command -v jq &>/dev/null; then
174
+ local dep_sizes=""
175
+ local deps
176
+ deps=$(jq -r '.dependencies // {} | keys[]' package.json 2>/dev/null || true)
177
+ if [[ -n "$deps" ]]; then
178
+ while IFS= read -r dep; do
179
+ [[ -z "$dep" ]] && continue
180
+ local dep_dir="node_modules/${dep}"
181
+ if [[ -d "$dep_dir" ]]; then
182
+ local dep_size
183
+ dep_size=$(du -sk "$dep_dir" 2>/dev/null | cut -f1 || echo "0")
184
+ dep_sizes="${dep_sizes}${dep_size} ${dep}
185
+ "
186
+ fi
187
+ done <<< "$deps"
188
+ if [[ -n "$dep_sizes" ]]; then
189
+ local top_bloaters
190
+ top_bloaters=$(echo "$dep_sizes" | sort -rn | head -3)
191
+ if [[ -n "$top_bloaters" ]]; then
192
+ echo "" >> "$metrics_log"
193
+ echo "Top 3 dependency sizes:" >> "$metrics_log"
194
+ echo "$top_bloaters" | while IFS=' ' read -r sz nm; do
195
+ [[ -z "$nm" ]] && continue
196
+ echo " ${nm}: ${sz}KB" >> "$metrics_log"
197
+ done
198
+ info "Top bloaters: $(echo "$top_bloaters" | head -1 | awk '{print $2 ": " $1 "KB"}')"
199
+ fi
200
+ fi
201
+ fi
202
+ fi
203
+
204
+ info "Bundle size: ${bundle_size_human}${bundle_hist_count:+ (${bundle_hist_count} historical samples)}"
205
+ return 0
206
+ }
207
+
208
+ quality_check_perf_regression() {
209
+ info "Performance regression check..."
210
+ local metrics_log="$ARTIFACTS_DIR/perf-metrics.log"
211
+ local test_log="$ARTIFACTS_DIR/test-results.log"
212
+
213
+ if [[ ! -f "$test_log" ]]; then
214
+ info "No test results — skipping perf check"
215
+ echo "No test results available" > "$metrics_log"
216
+ return 0
217
+ fi
218
+
219
+ # Extract test suite duration — multi-framework patterns
220
+ local duration_ms=""
221
+ # Jest/Vitest: "Time: 12.34 s" or "Duration 12.34s"
222
+ duration_ms=$(grep -oE 'Time:\s*[0-9.]+\s*s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
223
+ [[ -z "$duration_ms" ]] && duration_ms=$(grep -oE 'Duration\s+[0-9.]+\s*s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
224
+ # pytest: "passed in 12.34s" or "====== 5 passed in 12.34 seconds ======"
225
+ [[ -z "$duration_ms" ]] && duration_ms=$(grep -oE 'passed in [0-9.]+s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
226
+ # Go test: "ok pkg 12.345s"
227
+ [[ -z "$duration_ms" ]] && duration_ms=$(grep -oE '^ok\s+\S+\s+[0-9.]+s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+s' | grep -oE '[0-9.]+' | tail -1 || true)
228
+ # Cargo test: "test result: ok. ... finished in 12.34s"
229
+ [[ -z "$duration_ms" ]] && duration_ms=$(grep -oE 'finished in [0-9.]+s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
230
+ # Generic: "12.34 seconds" or "12.34s"
231
+ [[ -z "$duration_ms" ]] && duration_ms=$(grep -oE '[0-9.]+ ?s(econds?)?' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
232
+
233
+ # Claude fallback: parse test output when no pattern matches
234
+ if [[ -z "$duration_ms" ]]; then
235
+ local intel_enabled="false"
236
+ local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
237
+ if [[ -f "$daemon_cfg" ]]; then
238
+ intel_enabled=$(jq -r '.intelligence.enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
239
+ fi
240
+ if [[ "$intel_enabled" == "true" ]] && command -v claude &>/dev/null; then
241
+ local tail_output
242
+ tail_output=$(tail -30 "$test_log" 2>/dev/null || true)
243
+ if [[ -n "$tail_output" ]]; then
244
+ duration_ms=$(claude --print -p "Extract ONLY the total test suite duration in seconds from this output. Reply with ONLY a number (e.g. 12.34). If no duration found, reply NONE.
245
+
246
+ $tail_output" < /dev/null 2>/dev/null | grep -oE '^[0-9.]+$' | head -1 || true)
247
+ [[ "$duration_ms" == "NONE" ]] && duration_ms=""
248
+ fi
249
+ fi
250
+ fi
251
+
252
+ if [[ -z "$duration_ms" ]]; then
253
+ info "Could not extract test duration — skipping perf check"
254
+ echo "Duration not parseable" > "$metrics_log"
255
+ return 0
256
+ fi
257
+
258
+ echo "Test duration: ${duration_ms}s" > "$metrics_log"
259
+
260
+ emit_event "quality.perf" \
261
+ "issue=${ISSUE_NUMBER:-0}" \
262
+ "duration_s=$duration_ms"
263
+
264
+ # Adaptive performance check: 2σ from rolling 10-run average
265
+ local repo_hash_perf
266
+ repo_hash_perf=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
267
+ local perf_baselines_dir="${HOME}/.shipwright/baselines/${repo_hash_perf}"
268
+ local perf_history_file="${perf_baselines_dir}/perf-history.json"
269
+
270
+ # Read historical durations (rolling window of last 10 runs)
271
+ local history_json="[]"
272
+ if [[ -f "$perf_history_file" ]]; then
273
+ history_json=$(jq '.durations // []' "$perf_history_file" 2>/dev/null || echo "[]")
274
+ fi
275
+
276
+ local history_count
277
+ history_count=$(echo "$history_json" | jq 'length' 2>/dev/null || echo "0")
278
+
279
+ if [[ "$history_count" -ge 3 ]]; then
280
+ # Calculate mean and standard deviation from history
281
+ local mean_dur stddev_dur
282
+ mean_dur=$(echo "$history_json" | jq 'add / length' 2>/dev/null || echo "0")
283
+ stddev_dur=$(echo "$history_json" | jq '
284
+ (add / length) as $mean |
285
+ (map(. - $mean | . * .) | add / length | sqrt)
286
+ ' 2>/dev/null || echo "0")
287
+
288
+ # Threshold: mean + 2σ (but at least 10% above mean)
289
+ local adaptive_threshold
290
+ adaptive_threshold=$(awk -v mean="$mean_dur" -v sd="$stddev_dur" \
291
+ 'BEGIN{ t = mean + 2*sd; min_t = mean * 1.1; printf "%.2f", (t > min_t ? t : min_t) }')
292
+
293
+ echo "History: ${history_count} runs | Mean: ${mean_dur}s | StdDev: ${stddev_dur}s | Threshold: ${adaptive_threshold}s" >> "$metrics_log"
294
+
295
+ if awk -v cur="$duration_ms" -v thresh="$adaptive_threshold" 'BEGIN{exit !(cur > thresh)}' 2>/dev/null; then
296
+ local slowdown_pct
297
+ slowdown_pct=$(awk -v cur="$duration_ms" -v mean="$mean_dur" 'BEGIN{printf "%d", ((cur - mean) / mean) * 100}')
298
+ warn "Tests ${slowdown_pct}% slower than rolling average (${mean_dur}s → ${duration_ms}s, threshold: ${adaptive_threshold}s)"
299
+ return 1
300
+ fi
301
+ else
302
+ # Fallback: legacy memory baseline with hardcoded 30% (not enough history)
303
+ local baseline_dur=""
304
+ if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
305
+ baseline_dur=$(bash "$SCRIPT_DIR/sw-memory.sh" get "test_duration_s" 2>/dev/null) || true
306
+ fi
307
+ if [[ -n "$baseline_dur" ]] && awk -v cur="$duration_ms" -v base="$baseline_dur" 'BEGIN{exit !(base > 0)}' 2>/dev/null; then
308
+ local slowdown_pct
309
+ slowdown_pct=$(awk -v cur="$duration_ms" -v base="$baseline_dur" 'BEGIN{printf "%d", ((cur - base) / base) * 100}')
310
+ echo "Baseline: ${baseline_dur}s | Slowdown: ${slowdown_pct}%" >> "$metrics_log"
311
+ if [[ "$slowdown_pct" -gt 30 ]]; then
312
+ warn "Tests ${slowdown_pct}% slower (${baseline_dur}s → ${duration_ms}s)"
313
+ return 1
314
+ fi
315
+ fi
316
+ fi
317
+
318
+ # Append current duration to rolling history (keep last 10)
319
+ mkdir -p "$perf_baselines_dir"
320
+ local updated_history
321
+ updated_history=$(echo "$history_json" | jq --arg dur "$duration_ms" '
322
+ . + [($dur | tonumber)] | .[-10:]
323
+ ' 2>/dev/null || echo "[$duration_ms]")
324
+ local tmp_perf_hist
325
+ tmp_perf_hist=$(mktemp "${perf_baselines_dir}/perf-history.json.XXXXXX")
326
+ jq -n --argjson durations "$updated_history" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
327
+ '{durations: $durations, updated: $updated}' > "$tmp_perf_hist" 2>/dev/null
328
+ mv "$tmp_perf_hist" "$perf_history_file" 2>/dev/null || true
329
+
330
+ info "Test duration: ${duration_ms}s${history_count:+ (${history_count} historical samples)}"
331
+ return 0
332
+ }
333
+
334
+ quality_check_api_compat() {
335
+ info "API compatibility check..."
336
+ local compat_log="$ARTIFACTS_DIR/api-compat.log"
337
+
338
+ # Look for OpenAPI/Swagger specs — search beyond hardcoded paths
339
+ local spec_file=""
340
+ for candidate in openapi.json openapi.yaml swagger.json swagger.yaml api/openapi.json docs/openapi.yaml; do
341
+ if [[ -f "$candidate" ]]; then
342
+ spec_file="$candidate"
343
+ break
344
+ fi
345
+ done
346
+ # Broader search if nothing found at common paths
347
+ if [[ -z "$spec_file" ]]; then
348
+ spec_file=$(find . -maxdepth 4 \( -name "openapi*.json" -o -name "openapi*.yaml" -o -name "openapi*.yml" -o -name "swagger*.json" -o -name "swagger*.yaml" -o -name "swagger*.yml" \) -type f 2>/dev/null | head -1 || true)
349
+ fi
350
+
351
+ if [[ -z "$spec_file" ]]; then
352
+ info "No OpenAPI/Swagger spec found — skipping API compat check"
353
+ echo "No API spec found" > "$compat_log"
354
+ return 0
355
+ fi
356
+
357
+ # Check if spec was modified in this branch
358
+ local spec_changed
359
+ spec_changed=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null | grep -c "$(basename "$spec_file")" || true)
360
+ spec_changed="${spec_changed:-0}"
361
+
362
+ if [[ "$spec_changed" -eq 0 ]]; then
363
+ info "API spec unchanged"
364
+ echo "Spec unchanged" > "$compat_log"
365
+ return 0
366
+ fi
367
+
368
+ # Diff the spec against base branch
369
+ local old_spec new_spec
370
+ old_spec=$(git show "${BASE_BRANCH}:${spec_file}" 2>/dev/null || true)
371
+ new_spec=$(cat "$spec_file" 2>/dev/null || true)
372
+
373
+ if [[ -z "$old_spec" ]]; then
374
+ info "New API spec — no baseline to compare"
375
+ echo "New spec, no baseline" > "$compat_log"
376
+ return 0
377
+ fi
378
+
379
+ # Check for breaking changes: removed endpoints, changed methods
380
+ local removed_endpoints=""
381
+ if command -v jq &>/dev/null && [[ "$spec_file" == *.json ]]; then
382
+ local old_paths new_paths
383
+ old_paths=$(echo "$old_spec" | jq -r '.paths | keys[]' 2>/dev/null | sort || true)
384
+ new_paths=$(jq -r '.paths | keys[]' "$spec_file" 2>/dev/null | sort || true)
385
+ removed_endpoints=$(comm -23 <(echo "$old_paths") <(echo "$new_paths") 2>/dev/null || true)
386
+ fi
387
+
388
+ # Enhanced schema diff: parameter changes, response schema, auth changes
389
+ local param_changes="" schema_changes=""
390
+ if command -v jq &>/dev/null && [[ "$spec_file" == *.json ]]; then
391
+ # Detect parameter changes on existing endpoints
392
+ local common_paths
393
+ common_paths=$(comm -12 <(echo "$old_spec" | jq -r '.paths | keys[]' 2>/dev/null | sort) <(jq -r '.paths | keys[]' "$spec_file" 2>/dev/null | sort) 2>/dev/null || true)
394
+ if [[ -n "$common_paths" ]]; then
395
+ while IFS= read -r path; do
396
+ [[ -z "$path" ]] && continue
397
+ local old_params new_params
398
+ old_params=$(echo "$old_spec" | jq -r --arg p "$path" '.paths[$p] | to_entries[] | .value.parameters // [] | .[].name' 2>/dev/null | sort || true)
399
+ new_params=$(jq -r --arg p "$path" '.paths[$p] | to_entries[] | .value.parameters // [] | .[].name' "$spec_file" 2>/dev/null | sort || true)
400
+ local removed_params
401
+ removed_params=$(comm -23 <(echo "$old_params") <(echo "$new_params") 2>/dev/null || true)
402
+ [[ -n "$removed_params" ]] && param_changes="${param_changes}${path}: removed params: ${removed_params}
403
+ "
404
+ done <<< "$common_paths"
405
+ fi
406
+ fi
407
+
408
+ # Intelligence: semantic API diff for complex changes
409
+ local semantic_diff=""
410
+ if type intelligence_search_memory &>/dev/null 2>&1 && command -v claude &>/dev/null; then
411
+ local spec_git_diff
412
+ spec_git_diff=$(git diff "${BASE_BRANCH}...HEAD" -- "$spec_file" 2>/dev/null | head -200 || true)
413
+ if [[ -n "$spec_git_diff" ]]; then
414
+ semantic_diff=$(claude --print --output-format text -p "Analyze this API spec diff for breaking changes. List: removed endpoints, changed parameters, altered response schemas, auth changes. Be concise.
415
+
416
+ ${spec_git_diff}" --model haiku < /dev/null 2>/dev/null || true)
417
+ fi
418
+ fi
419
+
420
+ {
421
+ echo "Spec: $spec_file"
422
+ echo "Changed: yes"
423
+ if [[ -n "$removed_endpoints" ]]; then
424
+ echo "BREAKING — Removed endpoints:"
425
+ echo "$removed_endpoints"
426
+ fi
427
+ if [[ -n "$param_changes" ]]; then
428
+ echo "BREAKING — Parameter changes:"
429
+ echo "$param_changes"
430
+ fi
431
+ if [[ -n "$semantic_diff" ]]; then
432
+ echo ""
433
+ echo "Semantic analysis:"
434
+ echo "$semantic_diff"
435
+ fi
436
+ if [[ -z "$removed_endpoints" && -z "$param_changes" ]]; then
437
+ echo "No breaking changes detected"
438
+ fi
439
+ } > "$compat_log"
440
+
441
+ if [[ -n "$removed_endpoints" || -n "$param_changes" ]]; then
442
+ local issue_count=0
443
+ [[ -n "$removed_endpoints" ]] && issue_count=$((issue_count + $(echo "$removed_endpoints" | wc -l | xargs)))
444
+ [[ -n "$param_changes" ]] && issue_count=$((issue_count + $(echo "$param_changes" | grep -c '.' || true)))
445
+ warn "API breaking changes: ${issue_count} issue(s) found"
446
+ return 1
447
+ fi
448
+
449
+ success "API compatibility: no breaking changes"
450
+ return 0
451
+ }
452
+
453
+ quality_check_coverage() {
454
+ info "Coverage analysis..."
455
+ local test_log="$ARTIFACTS_DIR/test-results.log"
456
+
457
+ if [[ ! -f "$test_log" ]]; then
458
+ info "No test results — skipping coverage check"
459
+ return 0
460
+ fi
461
+
462
+ # Extract coverage percentage using shared parser
463
+ local coverage=""
464
+ coverage=$(parse_coverage_from_output "$test_log")
465
+
466
+ # Claude fallback: parse test output when no pattern matches
467
+ if [[ -z "$coverage" ]]; then
468
+ local intel_enabled_cov="false"
469
+ local daemon_cfg_cov="${PROJECT_ROOT}/.claude/daemon-config.json"
470
+ if [[ -f "$daemon_cfg_cov" ]]; then
471
+ intel_enabled_cov=$(jq -r '.intelligence.enabled // false' "$daemon_cfg_cov" 2>/dev/null || echo "false")
472
+ fi
473
+ if [[ "$intel_enabled_cov" == "true" ]] && command -v claude &>/dev/null; then
474
+ local tail_cov_output
475
+ tail_cov_output=$(tail -40 "$test_log" 2>/dev/null || true)
476
+ if [[ -n "$tail_cov_output" ]]; then
477
+ coverage=$(claude --print -p "Extract ONLY the overall code coverage percentage from this test output. Reply with ONLY a number (e.g. 85.5). If no coverage found, reply NONE.
478
+
479
+ $tail_cov_output" < /dev/null 2>/dev/null | grep -oE '^[0-9.]+$' | head -1 || true)
480
+ [[ "$coverage" == "NONE" ]] && coverage=""
481
+ fi
482
+ fi
483
+ fi
484
+
485
+ if [[ -z "$coverage" ]]; then
486
+ info "Could not extract coverage — skipping"
487
+ return 0
488
+ fi
489
+
490
+ emit_event "quality.coverage" \
491
+ "issue=${ISSUE_NUMBER:-0}" \
492
+ "coverage=$coverage"
493
+
494
+ # Check against pipeline config minimum
495
+ local coverage_min
496
+ coverage_min=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.coverage_min) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
497
+ [[ -z "$coverage_min" || "$coverage_min" == "null" ]] && coverage_min=0
498
+
499
+ # Adaptive baseline: read from baselines file, enforce no-regression (>= baseline - 2%)
500
+ local repo_hash_cov
501
+ repo_hash_cov=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
502
+ local baselines_dir="${HOME}/.shipwright/baselines/${repo_hash_cov}"
503
+ local coverage_baseline_file="${baselines_dir}/coverage.json"
504
+
505
+ local baseline_coverage=""
506
+ if [[ -f "$coverage_baseline_file" ]]; then
507
+ baseline_coverage=$(jq -r '.baseline // empty' "$coverage_baseline_file" 2>/dev/null) || true
508
+ fi
509
+ # Fallback: try legacy memory baseline
510
+ if [[ -z "$baseline_coverage" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
511
+ baseline_coverage=$(bash "$SCRIPT_DIR/sw-memory.sh" get "coverage_pct" 2>/dev/null) || true
512
+ fi
513
+
514
+ local dropped=false
515
+ if [[ -n "$baseline_coverage" && "$baseline_coverage" != "0" ]] && awk -v cur="$coverage" -v base="$baseline_coverage" 'BEGIN{exit !(base > 0)}' 2>/dev/null; then
516
+ # Adaptive: allow 2% regression tolerance from baseline
517
+ local min_allowed
518
+ min_allowed=$(awk -v base="$baseline_coverage" 'BEGIN{printf "%d", base - 2}')
519
+ if awk -v cur="$coverage" -v min="$min_allowed" 'BEGIN{exit !(cur < min)}' 2>/dev/null; then
520
+ warn "Coverage regression: ${baseline_coverage}% → ${coverage}% (adaptive min: ${min_allowed}%)"
521
+ dropped=true
522
+ fi
523
+ fi
524
+
525
+ if [[ "$coverage_min" -gt 0 ]] 2>/dev/null && awk -v cov="$coverage" -v min="$coverage_min" 'BEGIN{exit !(cov < min)}' 2>/dev/null; then
526
+ warn "Coverage ${coverage}% below minimum ${coverage_min}%"
527
+ return 1
528
+ fi
529
+
530
+ if $dropped; then
531
+ return 1
532
+ fi
533
+
534
+ # Update baseline on success (first run or improvement)
535
+ if [[ -z "$baseline_coverage" ]] || awk -v cur="$coverage" -v base="$baseline_coverage" 'BEGIN{exit !(cur >= base)}' 2>/dev/null; then
536
+ mkdir -p "$baselines_dir"
537
+ local tmp_cov_baseline
538
+ tmp_cov_baseline=$(mktemp "${baselines_dir}/coverage.json.XXXXXX")
539
+ jq -n --arg baseline "$coverage" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
540
+ '{baseline: ($baseline | tonumber), updated: $updated}' > "$tmp_cov_baseline" 2>/dev/null
541
+ mv "$tmp_cov_baseline" "$coverage_baseline_file" 2>/dev/null || true
542
+ fi
543
+
544
+ info "Coverage: ${coverage}%${baseline_coverage:+ (baseline: ${baseline_coverage}%)}"
545
+ return 0
546
+ }
547
+
548
+ # ─── Compound Quality Checks ──────────────────────────────────────────────
549
+ # Adversarial review, negative prompting, E2E validation, and DoD audit.
550
+ # Feeds findings back into a self-healing rebuild loop for automatic fixes.
551
+
552
+ run_adversarial_review() {
553
+ local diff_content
554
+ diff_content=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
555
+
556
+ if [[ -z "$diff_content" ]]; then
557
+ info "No diff to review"
558
+ return 0
559
+ fi
560
+
561
+ # Delegate to sw-adversarial.sh module when available (uses intelligence cache)
562
+ if type adversarial_review &>/dev/null 2>&1; then
563
+ info "Using intelligence-backed adversarial review..."
564
+ local json_result
565
+ json_result=$(adversarial_review "$diff_content" "${GOAL:-}" 2>/dev/null || echo "[]")
566
+
567
+ # Save raw JSON result
568
+ echo "$json_result" > "$ARTIFACTS_DIR/adversarial-review.json"
569
+
570
+ # Convert JSON findings to markdown for compatibility with compound_rebuild_with_feedback
571
+ local critical_count high_count
572
+ critical_count=$(echo "$json_result" | jq '[.[] | select(.severity == "critical")] | length' 2>/dev/null || echo "0")
573
+ high_count=$(echo "$json_result" | jq '[.[] | select(.severity == "high")] | length' 2>/dev/null || echo "0")
574
+ local total_findings
575
+ total_findings=$(echo "$json_result" | jq 'length' 2>/dev/null || echo "0")
576
+
577
+ # Generate markdown report from JSON
578
+ {
579
+ echo "# Adversarial Review (Intelligence-backed)"
580
+ echo ""
581
+ echo "Total findings: ${total_findings} (${critical_count} critical, ${high_count} high)"
582
+ echo ""
583
+ echo "$json_result" | jq -r '.[] | "- **[\(.severity // "unknown")]** \(.location // "unknown") — \(.description // .concern // "no description")"' 2>/dev/null || true
584
+ } > "$ARTIFACTS_DIR/adversarial-review.md"
585
+
586
+ emit_event "adversarial.delegated" \
587
+ "issue=${ISSUE_NUMBER:-0}" \
588
+ "findings=$total_findings" \
589
+ "critical=$critical_count" \
590
+ "high=$high_count"
591
+
592
+ if [[ "$critical_count" -gt 0 ]]; then
593
+ warn "Adversarial review: ${critical_count} critical, ${high_count} high"
594
+ return 1
595
+ elif [[ "$high_count" -gt 0 ]]; then
596
+ warn "Adversarial review: ${high_count} high-severity issues"
597
+ return 1
598
+ fi
599
+
600
+ success "Adversarial review: clean"
601
+ return 0
602
+ fi
603
+
604
+ # Fallback: inline Claude call when module not loaded
605
+
606
+ # Inject previous adversarial findings from memory
607
+ local adv_memory=""
608
+ if type intelligence_search_memory &>/dev/null 2>&1; then
609
+ adv_memory=$(intelligence_search_memory "adversarial review security findings for: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
610
+ fi
611
+
612
+ local prompt="You are a hostile code reviewer. Your job is to find EVERY possible issue in this diff.
613
+ Look for:
614
+ - Bugs (logic errors, off-by-one, null/undefined access, race conditions)
615
+ - Security vulnerabilities (injection, XSS, CSRF, auth bypass, secrets in code)
616
+ - Edge cases that aren't handled
617
+ - Error handling gaps
618
+ - Performance issues (N+1 queries, memory leaks, blocking calls)
619
+ - API contract violations
620
+ - Data validation gaps
621
+
622
+ Be thorough and adversarial. List every issue with severity [Critical/Bug/Warning].
623
+ Format: **[Severity]** file:line — description
624
+ ${adv_memory:+
625
+ ## Known Security Issues from Previous Reviews
626
+ These security issues have been found in past reviews. Check if any recur:
627
+ ${adv_memory}
628
+ }
629
+ Diff:
630
+ $diff_content"
631
+
632
+ local review_output
633
+ review_output=$(claude --print "$prompt" < /dev/null 2>"${ARTIFACTS_DIR}/.claude-tokens-adversarial.log" || true)
634
+ parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-adversarial.log"
635
+
636
+ echo "$review_output" > "$ARTIFACTS_DIR/adversarial-review.md"
637
+
638
+ # Count issues by severity
639
+ local critical_count bug_count
640
+ critical_count=$(grep -ciE '\*\*\[?Critical\]?\*\*' "$ARTIFACTS_DIR/adversarial-review.md" 2>/dev/null || true)
641
+ critical_count="${critical_count:-0}"
642
+ bug_count=$(grep -ciE '\*\*\[?Bug\]?\*\*' "$ARTIFACTS_DIR/adversarial-review.md" 2>/dev/null || true)
643
+ bug_count="${bug_count:-0}"
644
+
645
+ if [[ "$critical_count" -gt 0 ]]; then
646
+ warn "Adversarial review: ${critical_count} critical, ${bug_count} bugs"
647
+ return 1
648
+ elif [[ "$bug_count" -gt 0 ]]; then
649
+ warn "Adversarial review: ${bug_count} bugs found"
650
+ return 1
651
+ fi
652
+
653
+ success "Adversarial review: clean"
654
+ return 0
655
+ }
656
+
657
+ run_negative_prompting() {
658
+ local changed_files
659
+ changed_files=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
660
+
661
+ if [[ -z "$changed_files" ]]; then
662
+ info "No changed files to analyze"
663
+ return 0
664
+ fi
665
+
666
+ # Read contents of changed files
667
+ local file_contents=""
668
+ while IFS= read -r file; do
669
+ if [[ -f "$file" ]]; then
670
+ file_contents+="
671
+ --- $file ---
672
+ $(head -200 "$file" 2>/dev/null || true)
673
+ "
674
+ fi
675
+ done <<< "$changed_files"
676
+
677
+ # Inject previous negative prompting findings from memory
678
+ local neg_memory=""
679
+ if type intelligence_search_memory &>/dev/null 2>&1; then
680
+ neg_memory=$(intelligence_search_memory "negative prompting findings common concerns for: ${GOAL:-}" "${HOME}/.shipwright/memory" 5 2>/dev/null) || true
681
+ fi
682
+
683
+ local prompt="You are a pessimistic engineer who assumes everything will break.
684
+ Review these changes and answer:
685
+ 1. What could go wrong in production?
686
+ 2. What did the developer miss?
687
+ 3. What's fragile and will break when requirements change?
688
+ 4. What assumptions are being made that might not hold?
689
+ 5. What happens under load/stress?
690
+ 6. What happens with malicious input?
691
+ 7. Are there any implicit dependencies that could break?
692
+ ${neg_memory:+
693
+ ## Known Concerns from Previous Reviews
694
+ These issues have been found in past reviews of this codebase. Check if any apply to the current changes:
695
+ ${neg_memory}
696
+ }
697
+ Be specific. Reference actual code. Categorize each concern as [Critical/Concern/Minor].
698
+
699
+ Files changed: $changed_files
700
+
701
+ $file_contents"
702
+
703
+ local review_output
704
+ review_output=$(claude --print "$prompt" < /dev/null 2>"${ARTIFACTS_DIR}/.claude-tokens-negative.log" || true)
705
+ parse_claude_tokens "${ARTIFACTS_DIR}/.claude-tokens-negative.log"
706
+
707
+ echo "$review_output" > "$ARTIFACTS_DIR/negative-review.md"
708
+
709
+ local critical_count
710
+ critical_count=$(grep -ciE '\[Critical\]' "$ARTIFACTS_DIR/negative-review.md" 2>/dev/null || true)
711
+ critical_count="${critical_count:-0}"
712
+
713
+ if [[ "$critical_count" -gt 0 ]]; then
714
+ warn "Negative prompting: ${critical_count} critical concerns"
715
+ return 1
716
+ fi
717
+
718
+ success "Negative prompting: no critical concerns"
719
+ return 0
720
+ }
721
+
722
+ run_e2e_validation() {
723
+ local test_cmd="${TEST_CMD}"
724
+ if [[ -z "$test_cmd" ]]; then
725
+ test_cmd=$(detect_test_cmd)
726
+ fi
727
+
728
+ if [[ -z "$test_cmd" ]]; then
729
+ warn "No test command configured — skipping E2E validation"
730
+ return 0
731
+ fi
732
+
733
+ info "Running E2E validation: $test_cmd"
734
+ if bash -c "$test_cmd" > "$ARTIFACTS_DIR/e2e-validation.log" 2>&1; then
735
+ success "E2E validation passed"
736
+ return 0
737
+ else
738
+ error "E2E validation failed"
739
+ return 1
740
+ fi
741
+ }
742
+
743
+ run_dod_audit() {
744
+ local dod_file="$PROJECT_ROOT/.claude/DEFINITION-OF-DONE.md"
745
+
746
+ if [[ ! -f "$dod_file" ]]; then
747
+ # Check for alternative locations
748
+ for alt in "$PROJECT_ROOT/DEFINITION-OF-DONE.md" "$HOME/.shipwright/templates/definition-of-done.example.md"; do
749
+ if [[ -f "$alt" ]]; then
750
+ dod_file="$alt"
751
+ break
752
+ fi
753
+ done
754
+ fi
755
+
756
+ if [[ ! -f "$dod_file" ]]; then
757
+ info "No definition-of-done found — skipping DoD audit"
758
+ return 0
759
+ fi
760
+
761
+ info "Auditing Definition of Done..."
762
+
763
+ local total=0 passed=0 failed=0
764
+ local audit_output="# DoD Audit Results\n\n"
765
+
766
+ while IFS= read -r line; do
767
+ if [[ "$line" =~ ^[[:space:]]*-[[:space:]]*\[[[:space:]]\] ]]; then
768
+ total=$((total + 1))
769
+ local item="${line#*] }"
770
+
771
+ # Try to verify common items
772
+ local item_passed=false
773
+ case "$item" in
774
+ *"tests pass"*|*"test pass"*)
775
+ if [[ -f "$ARTIFACTS_DIR/test-results.log" ]] && ! grep -qi "fail\|error" "$ARTIFACTS_DIR/test-results.log" 2>/dev/null; then
776
+ item_passed=true
777
+ fi
778
+ ;;
779
+ *"lint"*|*"Lint"*)
780
+ if [[ -f "$ARTIFACTS_DIR/lint.log" ]] && ! grep -qi "error" "$ARTIFACTS_DIR/lint.log" 2>/dev/null; then
781
+ item_passed=true
782
+ fi
783
+ ;;
784
+ *"console.log"*|*"print("*)
785
+ local debug_count
786
+ debug_count=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null | grep -c "^+.*console\.log\|^+.*print(" 2>/dev/null || true)
787
+ debug_count="${debug_count:-0}"
788
+ if [[ "$debug_count" -eq 0 ]]; then
789
+ item_passed=true
790
+ fi
791
+ ;;
792
+ *"coverage"*)
793
+ item_passed=true # Trust test stage coverage check
794
+ ;;
795
+ *)
796
+ item_passed=true # Default pass for items we can't auto-verify
797
+ ;;
798
+ esac
799
+
800
+ if $item_passed; then
801
+ passed=$((passed + 1))
802
+ audit_output+="- [x] $item\n"
803
+ else
804
+ failed=$((failed + 1))
805
+ audit_output+="- [ ] $item ❌\n"
806
+ fi
807
+ fi
808
+ done < "$dod_file"
809
+
810
+ echo -e "$audit_output\n\n**Score: ${passed}/${total} passed**" > "$ARTIFACTS_DIR/dod-audit.md"
811
+
812
+ if [[ "$failed" -gt 0 ]]; then
813
+ warn "DoD audit: ${passed}/${total} passed, ${failed} failed"
814
+ return 1
815
+ fi
816
+
817
+ success "DoD audit: ${passed}/${total} passed"
818
+ return 0
819
+ }
820
+
821
+ # ─── Intelligent Pipeline Orchestration ──────────────────────────────────────
822
+ # AGI-like decision making: skip, classify, adapt, reassess, backtrack
823
+
824
+ # Global state for intelligence features
825
+ PIPELINE_BACKTRACK_COUNT="${PIPELINE_BACKTRACK_COUNT:-0}"
826
+ PIPELINE_MAX_BACKTRACKS=2
827
+ PIPELINE_ADAPTIVE_COMPLEXITY=""
828
+
829
+ # ──────────────────────────────────────────────────────────────────────────────
830
+ # 1. Intelligent Stage Skipping
831
+ # Evaluates whether a stage should be skipped based on triage score, complexity,
832
+ # issue labels, and diff size. Called before each stage in run_pipeline().
833
+ # Returns 0 if the stage SHOULD be skipped, 1 if it should run.
834
+ # ──────────────────────────────────────────────────────────────────────────────
835
+
836
+ # Scans modified .sh files for common bash 3.2 incompatibilities
837
+ # Returns: count of violations found
838
+ # ──────────────────────────────────────────────────────────────────────────────
839
+ run_bash_compat_check() {
840
+ local violations=0
841
+ local violation_details=""
842
+
843
+ # Get modified .sh files relative to base branch
844
+ local changed_files
845
+ changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" -- '*.sh' 2>/dev/null || echo "")
846
+
847
+ if [[ -z "$changed_files" ]]; then
848
+ echo "0"
849
+ return 0
850
+ fi
851
+
852
+ # Check each file for bash 3.2 incompatibilities
853
+ while IFS= read -r filepath; do
854
+ [[ -z "$filepath" ]] && continue
855
+
856
+ # declare -A (associative arrays)
857
+ local declare_a_count
858
+ declare_a_count=$(grep -c 'declare[[:space:]]*-[aA]' "$filepath" 2>/dev/null || true)
859
+ if [[ "$declare_a_count" -gt 0 ]]; then
860
+ violations=$((violations + declare_a_count))
861
+ violation_details="${violation_details}${filepath}: declare -A (${declare_a_count} occurrences)
862
+ "
863
+ fi
864
+
865
+ # readarray or mapfile
866
+ local readarray_count
867
+ readarray_count=$(grep -c 'readarray\|mapfile' "$filepath" 2>/dev/null || true)
868
+ if [[ "$readarray_count" -gt 0 ]]; then
869
+ violations=$((violations + readarray_count))
870
+ violation_details="${violation_details}${filepath}: readarray/mapfile (${readarray_count} occurrences)
871
+ "
872
+ fi
873
+
874
+ # ${var,,} or ${var^^} (case conversion)
875
+ local case_conv_count
876
+ case_conv_count=$(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*,,' "$filepath" 2>/dev/null || true)
877
+ case_conv_count=$((case_conv_count + $(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*\^\^' "$filepath" 2>/dev/null || true)))
878
+ if [[ "$case_conv_count" -gt 0 ]]; then
879
+ violations=$((violations + case_conv_count))
880
+ violation_details="${violation_details}${filepath}: case conversion \$\{var,,\} or \$\{var\^\^\} (${case_conv_count} occurrences)
881
+ "
882
+ fi
883
+
884
+ # |& (pipe stderr to stdout in-place)
885
+ local pipe_ampersand_count
886
+ pipe_ampersand_count=$(grep -c '|&' "$filepath" 2>/dev/null || true)
887
+ if [[ "$pipe_ampersand_count" -gt 0 ]]; then
888
+ violations=$((violations + pipe_ampersand_count))
889
+ violation_details="${violation_details}${filepath}: |& operator (${pipe_ampersand_count} occurrences)
890
+ "
891
+ fi
892
+
893
+ # ;& or ;;& in case statements (advanced fallthrough)
894
+ local advanced_case_count
895
+ advanced_case_count=$(grep -c ';&\|;;&' "$filepath" 2>/dev/null || true)
896
+ if [[ "$advanced_case_count" -gt 0 ]]; then
897
+ violations=$((violations + advanced_case_count))
898
+ violation_details="${violation_details}${filepath}: advanced case ;& or ;;& (${advanced_case_count} occurrences)
899
+ "
900
+ fi
901
+
902
+ done <<< "$changed_files"
903
+
904
+ # Log details if violations found
905
+ if [[ "$violations" -gt 0 ]]; then
906
+ warn "Bash 3.2 compatibility check: ${violations} violation(s) found:"
907
+ echo "$violation_details" | sed 's/^/ /'
908
+ fi
909
+
910
+ echo "$violations"
911
+ }
912
+
913
+ # ──────────────────────────────────────────────────────────────────────────────
914
+ # Test Coverage Check
915
+ # Runs configured test command and extracts coverage percentage
916
+ # Returns: coverage percentage (0-100), or "skip" if no test command configured
917
+ # ──────────────────────────────────────────────────────────────────────────────
918
+ run_test_coverage_check() {
919
+ local test_cmd="${TEST_CMD:-}"
920
+ if [[ -z "$test_cmd" ]]; then
921
+ echo "skip"
922
+ return 0
923
+ fi
924
+
925
+ info "Running test coverage check..."
926
+
927
+ # Run tests and capture output
928
+ local test_output
929
+ local test_rc=0
930
+ test_output=$(eval "$test_cmd" 2>&1) || test_rc=$?
931
+
932
+ if [[ "$test_rc" -ne 0 ]]; then
933
+ warn "Test command failed (exit code: $test_rc) — cannot extract coverage"
934
+ echo "0"
935
+ return 0
936
+ fi
937
+
938
+ # Extract coverage percentage from various formats
939
+ # Patterns: "XX% coverage", "Lines: XX%", "Stmts: XX%", "Coverage: XX%", "coverage XX%"
940
+ local coverage_pct
941
+ coverage_pct=$(echo "$test_output" | grep -oE '[0-9]{1,3}%[[:space:]]*(coverage|lines|stmts|statements)' | grep -oE '^[0-9]{1,3}' | head -1 || true)
942
+
943
+ if [[ -z "$coverage_pct" ]]; then
944
+ # Try alternate patterns without units
945
+ coverage_pct=$(echo "$test_output" | grep -oE 'coverage[:]?[[:space:]]*[0-9]{1,3}' | grep -oE '[0-9]{1,3}' | head -1 || true)
946
+ fi
947
+
948
+ if [[ -z "$coverage_pct" ]]; then
949
+ warn "Could not extract coverage percentage from test output"
950
+ echo "0"
951
+ return 0
952
+ fi
953
+
954
+ # Ensure it's a valid percentage (0-100)
955
+ if [[ ! "$coverage_pct" =~ ^[0-9]{1,3}$ ]] || [[ "$coverage_pct" -gt 100 ]]; then
956
+ coverage_pct=0
957
+ fi
958
+
959
+ success "Test coverage: ${coverage_pct}%"
960
+ echo "$coverage_pct"
961
+ }
962
+
963
+ # ──────────────────────────────────────────────────────────────────────────────
964
+ # Atomic Write Violations Check
965
+ # Scans modified files for anti-patterns: direct echo > file to state/config files
966
+ # Returns: count of violations found
967
+ # ──────────────────────────────────────────────────────────────────────────────
968
+ run_atomic_write_check() {
969
+ local violations=0
970
+ local violation_details=""
971
+
972
+ # Get modified files (not just .sh — includes state/config files)
973
+ local changed_files
974
+ changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || echo "")
975
+
976
+ if [[ -z "$changed_files" ]]; then
977
+ echo "0"
978
+ return 0
979
+ fi
980
+
981
+ # Check for direct writes to state/config files (patterns that should use tmp+mv)
982
+ # Look for: echo "..." > state/config files
983
+ while IFS= read -r filepath; do
984
+ [[ -z "$filepath" ]] && continue
985
+
986
+ # Only check state/config/artifacts files
987
+ if [[ ! "$filepath" =~ (state|config|artifact|cache|db|json)$ ]]; then
988
+ continue
989
+ fi
990
+
991
+ # Check for direct redirection writes (> file) in state/config paths
992
+ local bad_writes
993
+ bad_writes=$(git show "HEAD:$filepath" 2>/dev/null | grep -c 'echo.*>' "$filepath" 2>/dev/null || true)
994
+
995
+ if [[ "$bad_writes" -gt 0 ]]; then
996
+ violations=$((violations + bad_writes))
997
+ violation_details="${violation_details}${filepath}: ${bad_writes} direct write(s) (should use tmp+mv)
998
+ "
999
+ fi
1000
+ done <<< "$changed_files"
1001
+
1002
+ if [[ "$violations" -gt 0 ]]; then
1003
+ warn "Atomic write violations: ${violations} found (should use tmp file + mv pattern):"
1004
+ echo "$violation_details" | sed 's/^/ /'
1005
+ fi
1006
+
1007
+ echo "$violations"
1008
+ }
1009
+
1010
+ # ──────────────────────────────────────────────────────────────────────────────
1011
+ # New Function Test Detection
1012
+ # Detects new functions added in the diff but checks if corresponding tests exist
1013
+ # Returns: count of untested new functions
1014
+ # ──────────────────────────────────────────────────────────────────────────────
1015
+ run_new_function_test_check() {
1016
+ local untested_functions=0
1017
+ local details=""
1018
+
1019
+ # Get diff
1020
+ local diff_content
1021
+ diff_content=$(git diff "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
1022
+
1023
+ if [[ -z "$diff_content" ]]; then
1024
+ echo "0"
1025
+ return 0
1026
+ fi
1027
+
1028
+ # Extract newly added function definitions (lines starting with +functionname())
1029
+ local new_functions
1030
+ new_functions=$(echo "$diff_content" | grep -E '^\+[a-zA-Z_][a-zA-Z0-9_]*\(\)' | sed 's/^\+//' | sed 's/()//' || true)
1031
+
1032
+ if [[ -z "$new_functions" ]]; then
1033
+ echo "0"
1034
+ return 0
1035
+ fi
1036
+
1037
+ # For each new function, check if test files were modified
1038
+ local test_files_modified=0
1039
+ test_files_modified=$(echo "$diff_content" | grep -c '\-\-\-.*test\|\.test\.\|_test\.' || true)
1040
+
1041
+ # Simple heuristic: if we have new functions but no test file modifications, warn
1042
+ if [[ "$test_files_modified" -eq 0 ]]; then
1043
+ local func_count
1044
+ func_count=$(echo "$new_functions" | wc -l | xargs)
1045
+ untested_functions="$func_count"
1046
+ details="Added ${func_count} new function(s) but no test file modifications detected"
1047
+ fi
1048
+
1049
+ if [[ "$untested_functions" -gt 0 ]]; then
1050
+ warn "New functions without tests: ${details}"
1051
+ fi
1052
+
1053
+ echo "$untested_functions"
1054
+ }