shipwright-cli 2.1.2 → 2.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (136) hide show
  1. package/.claude/agents/devops-engineer.md +14 -12
  2. package/.claude/agents/doc-fleet-agent.md +99 -0
  3. package/.claude/agents/test-specialist.md +5 -3
  4. package/README.md +48 -27
  5. package/claude-code/CLAUDE.md.shipwright +2 -2
  6. package/config/policy.json +73 -0
  7. package/config/policy.schema.json +150 -0
  8. package/docs/AGI-PLATFORM-PLAN.md +126 -0
  9. package/docs/AGI-WHATS-NEXT.md +72 -0
  10. package/docs/KNOWN-ISSUES.md +1 -23
  11. package/docs/PLATFORM-TODO-BACKLOG.md +41 -0
  12. package/docs/PLATFORM-TODO-TRIAGE.md +56 -0
  13. package/docs/README.md +83 -0
  14. package/docs/TIPS.md +39 -2
  15. package/docs/config-policy.md +40 -0
  16. package/docs/definition-of-done.example.md +2 -0
  17. package/docs/patterns/README.md +5 -0
  18. package/docs/strategy/02-mission-and-brand.md +3 -3
  19. package/docs/strategy/README.md +4 -3
  20. package/docs/tmux-research/TMUX-AUDIT.md +2 -0
  21. package/docs/tmux-research/TMUX-RESEARCH-INDEX.md +17 -0
  22. package/package.json +3 -2
  23. package/scripts/lib/daemon-health.sh +32 -0
  24. package/scripts/lib/helpers.sh +30 -1
  25. package/scripts/lib/pipeline-detection.sh +278 -0
  26. package/scripts/lib/pipeline-github.sh +196 -0
  27. package/scripts/lib/pipeline-intelligence.sh +1712 -0
  28. package/scripts/lib/pipeline-quality-checks.sh +1052 -0
  29. package/scripts/lib/pipeline-quality.sh +34 -0
  30. package/scripts/lib/pipeline-stages.sh +2488 -0
  31. package/scripts/lib/pipeline-state.sh +529 -0
  32. package/scripts/lib/policy.sh +32 -0
  33. package/scripts/sw +5 -1
  34. package/scripts/sw-activity.sh +35 -46
  35. package/scripts/sw-adaptive.sh +30 -39
  36. package/scripts/sw-adversarial.sh +30 -36
  37. package/scripts/sw-architecture-enforcer.sh +30 -33
  38. package/scripts/sw-auth.sh +30 -42
  39. package/scripts/sw-autonomous.sh +60 -40
  40. package/scripts/sw-changelog.sh +29 -30
  41. package/scripts/sw-checkpoint.sh +30 -18
  42. package/scripts/sw-ci.sh +30 -42
  43. package/scripts/sw-cleanup.sh +32 -15
  44. package/scripts/sw-code-review.sh +26 -32
  45. package/scripts/sw-connect.sh +30 -19
  46. package/scripts/sw-context.sh +30 -19
  47. package/scripts/sw-cost.sh +30 -40
  48. package/scripts/sw-daemon.sh +66 -36
  49. package/scripts/sw-dashboard.sh +31 -40
  50. package/scripts/sw-db.sh +30 -20
  51. package/scripts/sw-decompose.sh +30 -38
  52. package/scripts/sw-deps.sh +30 -41
  53. package/scripts/sw-developer-simulation.sh +30 -36
  54. package/scripts/sw-discovery.sh +36 -19
  55. package/scripts/sw-doc-fleet.sh +822 -0
  56. package/scripts/sw-docs-agent.sh +30 -36
  57. package/scripts/sw-docs.sh +29 -31
  58. package/scripts/sw-doctor.sh +52 -20
  59. package/scripts/sw-dora.sh +29 -34
  60. package/scripts/sw-durable.sh +30 -20
  61. package/scripts/sw-e2e-orchestrator.sh +36 -21
  62. package/scripts/sw-eventbus.sh +30 -17
  63. package/scripts/sw-feedback.sh +30 -41
  64. package/scripts/sw-fix.sh +30 -40
  65. package/scripts/sw-fleet-discover.sh +30 -41
  66. package/scripts/sw-fleet-viz.sh +30 -20
  67. package/scripts/sw-fleet.sh +30 -40
  68. package/scripts/sw-github-app.sh +30 -41
  69. package/scripts/sw-github-checks.sh +30 -41
  70. package/scripts/sw-github-deploy.sh +30 -41
  71. package/scripts/sw-github-graphql.sh +30 -38
  72. package/scripts/sw-guild.sh +30 -37
  73. package/scripts/sw-heartbeat.sh +30 -19
  74. package/scripts/sw-hygiene.sh +134 -42
  75. package/scripts/sw-incident.sh +30 -39
  76. package/scripts/sw-init.sh +31 -14
  77. package/scripts/sw-instrument.sh +30 -41
  78. package/scripts/sw-intelligence.sh +39 -44
  79. package/scripts/sw-jira.sh +31 -41
  80. package/scripts/sw-launchd.sh +30 -17
  81. package/scripts/sw-linear.sh +31 -41
  82. package/scripts/sw-logs.sh +32 -17
  83. package/scripts/sw-loop.sh +32 -19
  84. package/scripts/sw-memory.sh +32 -43
  85. package/scripts/sw-mission-control.sh +31 -40
  86. package/scripts/sw-model-router.sh +30 -20
  87. package/scripts/sw-otel.sh +30 -20
  88. package/scripts/sw-oversight.sh +30 -36
  89. package/scripts/sw-patrol-meta.sh +31 -0
  90. package/scripts/sw-pipeline-composer.sh +30 -39
  91. package/scripts/sw-pipeline-vitals.sh +30 -44
  92. package/scripts/sw-pipeline.sh +277 -6383
  93. package/scripts/sw-pm.sh +31 -41
  94. package/scripts/sw-pr-lifecycle.sh +30 -42
  95. package/scripts/sw-predictive.sh +32 -34
  96. package/scripts/sw-prep.sh +30 -19
  97. package/scripts/sw-ps.sh +32 -17
  98. package/scripts/sw-public-dashboard.sh +30 -40
  99. package/scripts/sw-quality.sh +42 -40
  100. package/scripts/sw-reaper.sh +32 -15
  101. package/scripts/sw-recruit.sh +428 -48
  102. package/scripts/sw-regression.sh +30 -38
  103. package/scripts/sw-release-manager.sh +30 -38
  104. package/scripts/sw-release.sh +29 -31
  105. package/scripts/sw-remote.sh +31 -40
  106. package/scripts/sw-replay.sh +30 -18
  107. package/scripts/sw-retro.sh +33 -42
  108. package/scripts/sw-scale.sh +41 -24
  109. package/scripts/sw-security-audit.sh +30 -20
  110. package/scripts/sw-self-optimize.sh +33 -37
  111. package/scripts/sw-session.sh +31 -15
  112. package/scripts/sw-setup.sh +30 -16
  113. package/scripts/sw-standup.sh +30 -20
  114. package/scripts/sw-status.sh +33 -13
  115. package/scripts/sw-strategic.sh +55 -43
  116. package/scripts/sw-stream.sh +33 -37
  117. package/scripts/sw-swarm.sh +30 -21
  118. package/scripts/sw-team-stages.sh +30 -38
  119. package/scripts/sw-templates.sh +31 -16
  120. package/scripts/sw-testgen.sh +30 -31
  121. package/scripts/sw-tmux-pipeline.sh +29 -31
  122. package/scripts/sw-tmux-role-color.sh +31 -0
  123. package/scripts/sw-tmux-status.sh +31 -0
  124. package/scripts/sw-tmux.sh +31 -15
  125. package/scripts/sw-trace.sh +30 -19
  126. package/scripts/sw-tracker-github.sh +31 -0
  127. package/scripts/sw-tracker-jira.sh +31 -0
  128. package/scripts/sw-tracker-linear.sh +31 -0
  129. package/scripts/sw-tracker.sh +30 -40
  130. package/scripts/sw-triage.sh +68 -61
  131. package/scripts/sw-upgrade.sh +30 -16
  132. package/scripts/sw-ux.sh +30 -35
  133. package/scripts/sw-webhook.sh +30 -25
  134. package/scripts/sw-widgets.sh +30 -19
  135. package/scripts/sw-worktree.sh +32 -15
  136. package/tmux/templates/doc-fleet.json +43 -0
@@ -0,0 +1,1712 @@
1
+ # pipeline-intelligence.sh — Skip/adaptive/audits/DoD/security/compound_quality for sw-pipeline.sh
2
+ # Source from sw-pipeline.sh. Requires pipeline-quality-checks, state, ARTIFACTS_DIR, PIPELINE_CONFIG.
3
+ [[ -n "${_PIPELINE_INTELLIGENCE_LOADED:-}" ]] && return 0
4
+ _PIPELINE_INTELLIGENCE_LOADED=1
5
+
6
+ pipeline_should_skip_stage() {
7
+ local stage_id="$1"
8
+ local reason=""
9
+
10
+ # Never skip intake or build — they're always required
11
+ case "$stage_id" in
12
+ intake|build|test|pr|merge) return 1 ;;
13
+ esac
14
+
15
+ # ── Signal 1: Triage score (from intelligence analysis) ──
16
+ local triage_score="${INTELLIGENCE_COMPLEXITY:-0}"
17
+ # Convert: high triage score (simple issue) means skip more stages
18
+ # INTELLIGENCE_COMPLEXITY is 1-10 (1=simple, 10=complex)
19
+ # Score >= 70 in daemon means simple → complexity 1-3
20
+ local complexity="${INTELLIGENCE_COMPLEXITY:-5}"
21
+
22
+ # ── Signal 2: Issue labels ──
23
+ local labels="${ISSUE_LABELS:-}"
24
+
25
+ # Documentation issues: skip test, review, compound_quality
26
+ if echo ",$labels," | grep -qiE ',documentation,|,docs,|,typo,'; then
27
+ case "$stage_id" in
28
+ test|review|compound_quality)
29
+ reason="label:documentation"
30
+ ;;
31
+ esac
32
+ fi
33
+
34
+ # Hotfix issues: skip plan, design, compound_quality
35
+ if echo ",$labels," | grep -qiE ',hotfix,|,urgent,|,p0,'; then
36
+ case "$stage_id" in
37
+ plan|design|compound_quality)
38
+ reason="label:hotfix"
39
+ ;;
40
+ esac
41
+ fi
42
+
43
+ # ── Signal 3: Intelligence complexity ──
44
+ if [[ -z "$reason" && "$complexity" -gt 0 ]]; then
45
+ # Complexity 1-2: very simple → skip design, compound_quality, review
46
+ if [[ "$complexity" -le 2 ]]; then
47
+ case "$stage_id" in
48
+ design|compound_quality|review)
49
+ reason="complexity:${complexity}/10"
50
+ ;;
51
+ esac
52
+ # Complexity 1-3: simple → skip design
53
+ elif [[ "$complexity" -le 3 ]]; then
54
+ case "$stage_id" in
55
+ design)
56
+ reason="complexity:${complexity}/10"
57
+ ;;
58
+ esac
59
+ fi
60
+ fi
61
+
62
+ # ── Signal 4: Diff size (after build) ──
63
+ if [[ -z "$reason" && "$stage_id" == "compound_quality" ]]; then
64
+ local diff_lines=0
65
+ local _skip_stat
66
+ _skip_stat=$(git diff "${BASE_BRANCH:-main}...HEAD" --stat 2>/dev/null | tail -1) || true
67
+ if [[ -n "${_skip_stat:-}" ]]; then
68
+ local _s_ins _s_del
69
+ _s_ins=$(echo "$_skip_stat" | grep -oE '[0-9]+ insertion' | grep -oE '[0-9]+') || true
70
+ _s_del=$(echo "$_skip_stat" | grep -oE '[0-9]+ deletion' | grep -oE '[0-9]+') || true
71
+ diff_lines=$(( ${_s_ins:-0} + ${_s_del:-0} ))
72
+ fi
73
+ diff_lines="${diff_lines:-0}"
74
+ if [[ "$diff_lines" -gt 0 && "$diff_lines" -lt 20 ]]; then
75
+ reason="diff_size:${diff_lines}_lines"
76
+ fi
77
+ fi
78
+
79
+ # ── Signal 5: Mid-pipeline reassessment override ──
80
+ if [[ -z "$reason" && -f "$ARTIFACTS_DIR/reassessment.json" ]]; then
81
+ local skip_stages
82
+ skip_stages=$(jq -r '.skip_stages // [] | .[]' "$ARTIFACTS_DIR/reassessment.json" 2>/dev/null || true)
83
+ if echo "$skip_stages" | grep -qx "$stage_id" 2>/dev/null; then
84
+ reason="reassessment:simpler_than_expected"
85
+ fi
86
+ fi
87
+
88
+ if [[ -n "$reason" ]]; then
89
+ emit_event "intelligence.stage_skipped" \
90
+ "issue=${ISSUE_NUMBER:-0}" \
91
+ "stage=$stage_id" \
92
+ "reason=$reason" \
93
+ "complexity=${complexity}" \
94
+ "labels=${labels}"
95
+ echo "$reason"
96
+ return 0
97
+ fi
98
+
99
+ return 1
100
+ }
101
+
102
+ # ──────────────────────────────────────────────────────────────────────────────
103
+ # 2. Smart Finding Classification & Routing
104
+ # Parses compound quality findings and classifies each as:
105
+ # architecture, security, correctness, style
106
+ # Returns JSON with classified findings and routing recommendations.
107
+ # ──────────────────────────────────────────────────────────────────────────────
108
+ classify_quality_findings() {
109
+ local findings_dir="$ARTIFACTS_DIR"
110
+ local result_file="$ARTIFACTS_DIR/classified-findings.json"
111
+
112
+ # Initialize counters
113
+ local arch_count=0 security_count=0 correctness_count=0 performance_count=0 testing_count=0 style_count=0
114
+
115
+ # Start building JSON array
116
+ local findings_json="[]"
117
+
118
+ # ── Parse adversarial review ──
119
+ if [[ -f "$findings_dir/adversarial-review.md" ]]; then
120
+ local adv_content
121
+ adv_content=$(cat "$findings_dir/adversarial-review.md" 2>/dev/null || true)
122
+
123
+ # Architecture findings: dependency violations, layer breaches, circular refs
124
+ local arch_findings
125
+ arch_findings=$(echo "$adv_content" | grep -ciE 'architect|layer.*violation|circular.*depend|coupling|abstraction|design.*flaw|separation.*concern' 2>/dev/null || true)
126
+ arch_count=$((arch_count + ${arch_findings:-0}))
127
+
128
+ # Security findings
129
+ local sec_findings
130
+ sec_findings=$(echo "$adv_content" | grep -ciE 'security|vulnerab|injection|XSS|CSRF|auth.*bypass|privilege|sanitiz|escap' 2>/dev/null || true)
131
+ security_count=$((security_count + ${sec_findings:-0}))
132
+
133
+ # Correctness findings: bugs, logic errors, edge cases
134
+ local corr_findings
135
+ corr_findings=$(echo "$adv_content" | grep -ciE '\*\*\[?(Critical|Bug|Error|critical|high)\]?\*\*|race.*condition|null.*pointer|off.*by.*one|edge.*case|undefined.*behav' 2>/dev/null || true)
136
+ correctness_count=$((correctness_count + ${corr_findings:-0}))
137
+
138
+ # Performance findings
139
+ local perf_findings
140
+ perf_findings=$(echo "$adv_content" | grep -ciE 'latency|slow|memory leak|O\(n|N\+1|cache miss|performance|bottleneck|throughput' 2>/dev/null || true)
141
+ performance_count=$((performance_count + ${perf_findings:-0}))
142
+
143
+ # Testing findings
144
+ local test_findings
145
+ test_findings=$(echo "$adv_content" | grep -ciE 'untested|missing test|no coverage|flaky|test gap|test missing|coverage gap' 2>/dev/null || true)
146
+ testing_count=$((testing_count + ${test_findings:-0}))
147
+
148
+ # Style findings
149
+ local style_findings
150
+ style_findings=$(echo "$adv_content" | grep -ciE 'naming|convention|format|style|readabil|inconsisten|whitespace|comment' 2>/dev/null || true)
151
+ style_count=$((style_count + ${style_findings:-0}))
152
+ fi
153
+
154
+ # ── Parse architecture validation ──
155
+ if [[ -f "$findings_dir/compound-architecture-validation.json" ]]; then
156
+ local arch_json_count
157
+ arch_json_count=$(jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' "$findings_dir/compound-architecture-validation.json" 2>/dev/null || echo "0")
158
+ arch_count=$((arch_count + ${arch_json_count:-0}))
159
+ fi
160
+
161
+ # ── Parse security audit ──
162
+ if [[ -f "$findings_dir/security-audit.log" ]]; then
163
+ local sec_audit
164
+ sec_audit=$(grep -ciE 'critical|high' "$findings_dir/security-audit.log" 2>/dev/null || true)
165
+ security_count=$((security_count + ${sec_audit:-0}))
166
+ fi
167
+
168
+ # ── Parse negative review ──
169
+ if [[ -f "$findings_dir/negative-review.md" ]]; then
170
+ local neg_corr
171
+ neg_corr=$(grep -ciE '\[Critical\]|\[High\]' "$findings_dir/negative-review.md" 2>/dev/null || true)
172
+ correctness_count=$((correctness_count + ${neg_corr:-0}))
173
+ fi
174
+
175
+ # ── Determine routing ──
176
+ # Priority order: security > architecture > correctness > performance > testing > style
177
+ local route="correctness" # default
178
+ local needs_backtrack=false
179
+ local priority_findings=""
180
+
181
+ if [[ "$security_count" -gt 0 ]]; then
182
+ route="security"
183
+ priority_findings="security:${security_count}"
184
+ fi
185
+
186
+ if [[ "$arch_count" -gt 0 ]]; then
187
+ if [[ "$route" == "correctness" ]]; then
188
+ route="architecture"
189
+ needs_backtrack=true
190
+ fi
191
+ priority_findings="${priority_findings:+${priority_findings},}architecture:${arch_count}"
192
+ fi
193
+
194
+ if [[ "$correctness_count" -gt 0 ]]; then
195
+ priority_findings="${priority_findings:+${priority_findings},}correctness:${correctness_count}"
196
+ fi
197
+
198
+ if [[ "$performance_count" -gt 0 ]]; then
199
+ if [[ "$route" == "correctness" && "$correctness_count" -eq 0 ]]; then
200
+ route="performance"
201
+ fi
202
+ priority_findings="${priority_findings:+${priority_findings},}performance:${performance_count}"
203
+ fi
204
+
205
+ if [[ "$testing_count" -gt 0 ]]; then
206
+ if [[ "$route" == "correctness" && "$correctness_count" -eq 0 && "$performance_count" -eq 0 ]]; then
207
+ route="testing"
208
+ fi
209
+ priority_findings="${priority_findings:+${priority_findings},}testing:${testing_count}"
210
+ fi
211
+
212
+ # Style findings don't affect routing or count toward failure threshold
213
+ local total_blocking=$((arch_count + security_count + correctness_count + performance_count + testing_count))
214
+
215
+ # Write classified findings
216
+ local tmp_findings
217
+ tmp_findings="$(mktemp)"
218
+ jq -n \
219
+ --argjson arch "$arch_count" \
220
+ --argjson security "$security_count" \
221
+ --argjson correctness "$correctness_count" \
222
+ --argjson performance "$performance_count" \
223
+ --argjson testing "$testing_count" \
224
+ --argjson style "$style_count" \
225
+ --argjson total_blocking "$total_blocking" \
226
+ --arg route "$route" \
227
+ --argjson needs_backtrack "$needs_backtrack" \
228
+ --arg priority "$priority_findings" \
229
+ '{
230
+ architecture: $arch,
231
+ security: $security,
232
+ correctness: $correctness,
233
+ performance: $performance,
234
+ testing: $testing,
235
+ style: $style,
236
+ total_blocking: $total_blocking,
237
+ route: $route,
238
+ needs_backtrack: $needs_backtrack,
239
+ priority_findings: $priority
240
+ }' > "$tmp_findings" 2>/dev/null && mv "$tmp_findings" "$result_file" || rm -f "$tmp_findings"
241
+
242
+ emit_event "intelligence.findings_classified" \
243
+ "issue=${ISSUE_NUMBER:-0}" \
244
+ "architecture=$arch_count" \
245
+ "security=$security_count" \
246
+ "correctness=$correctness_count" \
247
+ "performance=$performance_count" \
248
+ "testing=$testing_count" \
249
+ "style=$style_count" \
250
+ "route=$route" \
251
+ "needs_backtrack=$needs_backtrack"
252
+
253
+ echo "$route"
254
+ }
255
+
256
+ # ──────────────────────────────────────────────────────────────────────────────
257
+ # 3. Adaptive Cycle Limits
258
+ # Replaces hardcoded max_cycles with convergence-driven limits.
259
+ # Takes the base limit, returns an adjusted limit based on:
260
+ # - Learned iteration model
261
+ # - Convergence/divergence signals
262
+ # - Budget constraints
263
+ # - Hard ceiling (2x template max)
264
+ # ──────────────────────────────────────────────────────────────────────────────
265
+ pipeline_adaptive_cycles() {
266
+ local base_limit="$1"
267
+ local context="${2:-compound_quality}" # compound_quality or build_test
268
+ local current_issue_count="${3:-0}"
269
+ local prev_issue_count="${4:--1}"
270
+
271
+ local adjusted="$base_limit"
272
+ local hard_ceiling=$((base_limit * 2))
273
+
274
+ # ── Learned iteration model ──
275
+ local model_file="${HOME}/.shipwright/optimization/iteration-model.json"
276
+ if [[ -f "$model_file" ]]; then
277
+ local learned
278
+ learned=$(jq -r --arg ctx "$context" '.[$ctx].recommended_cycles // 0' "$model_file" 2>/dev/null || echo "0")
279
+ if [[ "$learned" -gt 0 && "$learned" -le "$hard_ceiling" ]]; then
280
+ adjusted="$learned"
281
+ fi
282
+ fi
283
+
284
+ # ── Convergence acceleration ──
285
+ # If issue count drops >50% per cycle, extend limit by 1 (we're making progress)
286
+ if [[ "$prev_issue_count" -gt 0 && "$current_issue_count" -ge 0 ]]; then
287
+ local half_prev=$((prev_issue_count / 2))
288
+ if [[ "$current_issue_count" -le "$half_prev" && "$current_issue_count" -gt 0 ]]; then
289
+ # Rapid convergence — extend by 1
290
+ local new_limit=$((adjusted + 1))
291
+ if [[ "$new_limit" -le "$hard_ceiling" ]]; then
292
+ adjusted="$new_limit"
293
+ emit_event "intelligence.convergence_acceleration" \
294
+ "issue=${ISSUE_NUMBER:-0}" \
295
+ "context=$context" \
296
+ "prev_issues=$prev_issue_count" \
297
+ "current_issues=$current_issue_count" \
298
+ "new_limit=$adjusted"
299
+ fi
300
+ fi
301
+
302
+ # ── Divergence detection ──
303
+ # If issue count increases, reduce remaining cycles
304
+ if [[ "$current_issue_count" -gt "$prev_issue_count" ]]; then
305
+ local reduced=$((adjusted - 1))
306
+ if [[ "$reduced" -ge 1 ]]; then
307
+ adjusted="$reduced"
308
+ emit_event "intelligence.divergence_detected" \
309
+ "issue=${ISSUE_NUMBER:-0}" \
310
+ "context=$context" \
311
+ "prev_issues=$prev_issue_count" \
312
+ "current_issues=$current_issue_count" \
313
+ "new_limit=$adjusted"
314
+ fi
315
+ fi
316
+ fi
317
+
318
+ # ── Budget gate ──
319
+ if [[ "$IGNORE_BUDGET" != "true" ]] && [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
320
+ local budget_rc=0
321
+ bash "$SCRIPT_DIR/sw-cost.sh" check-budget 2>/dev/null || budget_rc=$?
322
+ if [[ "$budget_rc" -eq 2 ]]; then
323
+ # Budget exhausted — cap at current cycle
324
+ adjusted=0
325
+ emit_event "intelligence.budget_cap" \
326
+ "issue=${ISSUE_NUMBER:-0}" \
327
+ "context=$context"
328
+ fi
329
+ fi
330
+
331
+ # ── Enforce hard ceiling ──
332
+ if [[ "$adjusted" -gt "$hard_ceiling" ]]; then
333
+ adjusted="$hard_ceiling"
334
+ fi
335
+
336
+ echo "$adjusted"
337
+ }
338
+
339
+ # ──────────────────────────────────────────────────────────────────────────────
340
+ # 5. Intelligent Audit Selection
341
+ # AI-driven audit selection — all audits enabled, intensity varies.
342
+ # ──────────────────────────────────────────────────────────────────────────────
343
+ pipeline_select_audits() {
344
+ local audit_intensity
345
+ audit_intensity=$(jq -r --arg id "compound_quality" \
346
+ '(.stages[] | select(.id == $id) | .config.audit_intensity) // "auto"' \
347
+ "$PIPELINE_CONFIG" 2>/dev/null) || true
348
+ [[ -z "$audit_intensity" || "$audit_intensity" == "null" ]] && audit_intensity="auto"
349
+
350
+ # Short-circuit for explicit overrides
351
+ case "$audit_intensity" in
352
+ off)
353
+ echo '{"adversarial":"off","architecture":"off","simulation":"off","security":"off","dod":"off"}'
354
+ return 0
355
+ ;;
356
+ full|lightweight)
357
+ jq -n --arg i "$audit_intensity" \
358
+ '{adversarial:$i,architecture:$i,simulation:$i,security:$i,dod:$i}'
359
+ return 0
360
+ ;;
361
+ esac
362
+
363
+ # ── Auto mode: data-driven intensity ──
364
+ local default_intensity="targeted"
365
+ local security_intensity="targeted"
366
+
367
+ # Read last 5 quality scores for this repo
368
+ local quality_scores_file="${HOME}/.shipwright/optimization/quality-scores.jsonl"
369
+ local repo_name
370
+ repo_name=$(basename "${PROJECT_ROOT:-.}") || true
371
+ if [[ -f "$quality_scores_file" ]]; then
372
+ local recent_scores
373
+ recent_scores=$(grep "\"repo\":\"${repo_name}\"" "$quality_scores_file" 2>/dev/null | tail -5) || true
374
+ if [[ -n "$recent_scores" ]]; then
375
+ # Check for critical findings in recent history
376
+ local has_critical
377
+ has_critical=$(echo "$recent_scores" | jq -s '[.[].findings.critical // 0] | add' 2>/dev/null || echo "0")
378
+ has_critical="${has_critical:-0}"
379
+ if [[ "$has_critical" -gt 0 ]]; then
380
+ security_intensity="full"
381
+ fi
382
+
383
+ # Compute average quality score
384
+ local avg_score
385
+ avg_score=$(echo "$recent_scores" | jq -s 'if length > 0 then ([.[].quality_score] | add / length | floor) else 70 end' 2>/dev/null || echo "70")
386
+ avg_score="${avg_score:-70}"
387
+
388
+ if [[ "$avg_score" -lt 60 ]]; then
389
+ default_intensity="full"
390
+ security_intensity="full"
391
+ elif [[ "$avg_score" -gt 80 ]]; then
392
+ default_intensity="lightweight"
393
+ [[ "$security_intensity" != "full" ]] && security_intensity="lightweight"
394
+ fi
395
+ fi
396
+ fi
397
+
398
+ # Intelligence cache: upgrade targeted→full for complex changes
399
+ local intel_cache="${PROJECT_ROOT}/.claude/intelligence-cache.json"
400
+ if [[ -f "$intel_cache" && "$default_intensity" == "targeted" ]]; then
401
+ local complexity
402
+ complexity=$(jq -r '.complexity // "medium"' "$intel_cache" 2>/dev/null || echo "medium")
403
+ if [[ "$complexity" == "high" || "$complexity" == "very_high" ]]; then
404
+ default_intensity="full"
405
+ security_intensity="full"
406
+ fi
407
+ fi
408
+
409
+ emit_event "pipeline.audit_selection" \
410
+ "issue=${ISSUE_NUMBER:-0}" \
411
+ "default_intensity=$default_intensity" \
412
+ "security_intensity=$security_intensity" \
413
+ "repo=$repo_name"
414
+
415
+ jq -n \
416
+ --arg adv "$default_intensity" \
417
+ --arg arch "$default_intensity" \
418
+ --arg sim "$default_intensity" \
419
+ --arg sec "$security_intensity" \
420
+ --arg dod "$default_intensity" \
421
+ '{adversarial:$adv,architecture:$arch,simulation:$sim,security:$sec,dod:$dod}'
422
+ }
423
+
424
+ # ──────────────────────────────────────────────────────────────────────────────
425
+ # 6. Definition of Done Verification
426
+ # Strict DoD enforcement after compound quality completes.
427
+ # ──────────────────────────────────────────────────────────────────────────────
428
+ pipeline_verify_dod() {
429
+ local artifacts_dir="${1:-$ARTIFACTS_DIR}"
430
+ local checks_total=0 checks_passed=0
431
+ local results=""
432
+
433
+ # 1. Test coverage: verify changed source files have test counterparts
434
+ local changed_files
435
+ changed_files=$(git diff --name-only "${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
436
+ local missing_tests=""
437
+ local files_checked=0
438
+
439
+ if [[ -n "$changed_files" ]]; then
440
+ while IFS= read -r src_file; do
441
+ [[ -z "$src_file" ]] && continue
442
+ # Only check source code files
443
+ case "$src_file" in
444
+ *.ts|*.js|*.tsx|*.jsx|*.py|*.go|*.rs|*.sh)
445
+ # Skip test files themselves and config files
446
+ case "$src_file" in
447
+ *test*|*spec*|*__tests__*|*.config.*|*.d.ts) continue ;;
448
+ esac
449
+ files_checked=$((files_checked + 1))
450
+ checks_total=$((checks_total + 1))
451
+ # Check for corresponding test file
452
+ local base_name dir_name ext
453
+ base_name=$(basename "$src_file")
454
+ dir_name=$(dirname "$src_file")
455
+ ext="${base_name##*.}"
456
+ local stem="${base_name%.*}"
457
+ local test_found=false
458
+ # Common test file patterns
459
+ for pattern in \
460
+ "${dir_name}/${stem}.test.${ext}" \
461
+ "${dir_name}/${stem}.spec.${ext}" \
462
+ "${dir_name}/__tests__/${stem}.test.${ext}" \
463
+ "${dir_name}/${stem}-test.${ext}" \
464
+ "${dir_name}/test_${stem}.${ext}" \
465
+ "${dir_name}/${stem}_test.${ext}"; do
466
+ if [[ -f "$pattern" ]]; then
467
+ test_found=true
468
+ break
469
+ fi
470
+ done
471
+ if $test_found; then
472
+ checks_passed=$((checks_passed + 1))
473
+ else
474
+ missing_tests="${missing_tests}${src_file}\n"
475
+ fi
476
+ ;;
477
+ esac
478
+ done <<EOF
479
+ $changed_files
480
+ EOF
481
+ fi
482
+
483
+ # 2. Test-added verification: if significant logic added, ensure tests were also added
484
+ local logic_lines=0 test_lines=0
485
+ if [[ -n "$changed_files" ]]; then
486
+ local full_diff
487
+ full_diff=$(git diff "${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
488
+ if [[ -n "$full_diff" ]]; then
489
+ # Count added lines matching source patterns (rough heuristic)
490
+ logic_lines=$(echo "$full_diff" | grep -cE '^\+.*(function |class |if |for |while |return |export )' 2>/dev/null || true)
491
+ logic_lines="${logic_lines:-0}"
492
+ # Count added lines in test files
493
+ test_lines=$(echo "$full_diff" | grep -cE '^\+.*(it\(|test\(|describe\(|expect\(|assert|def test_|func Test)' 2>/dev/null || true)
494
+ test_lines="${test_lines:-0}"
495
+ fi
496
+ fi
497
+ checks_total=$((checks_total + 1))
498
+ local test_ratio_passed=true
499
+ if [[ "$logic_lines" -gt 20 && "$test_lines" -eq 0 ]]; then
500
+ test_ratio_passed=false
501
+ warn "DoD verification: ${logic_lines} logic lines added but no test lines detected"
502
+ else
503
+ checks_passed=$((checks_passed + 1))
504
+ fi
505
+
506
+ # 3. Behavioral verification: check DoD audit artifacts for evidence
507
+ local dod_audit_file="$artifacts_dir/dod-audit.md"
508
+ local dod_verified=0 dod_total_items=0
509
+ if [[ -f "$dod_audit_file" ]]; then
510
+ # Count items marked as passing
511
+ dod_total_items=$(grep -cE '^\s*-\s*\[x\]' "$dod_audit_file" 2>/dev/null || true)
512
+ dod_total_items="${dod_total_items:-0}"
513
+ local dod_failing
514
+ dod_failing=$(grep -cE '^\s*-\s*\[\s\]' "$dod_audit_file" 2>/dev/null || true)
515
+ dod_failing="${dod_failing:-0}"
516
+ dod_verified=$dod_total_items
517
+ checks_total=$((checks_total + dod_total_items + ${dod_failing:-0}))
518
+ checks_passed=$((checks_passed + dod_total_items))
519
+ fi
520
+
521
+ # Compute pass rate
522
+ local pass_rate=100
523
+ if [[ "$checks_total" -gt 0 ]]; then
524
+ pass_rate=$(( (checks_passed * 100) / checks_total ))
525
+ fi
526
+
527
+ # Write results
528
+ local tmp_result
529
+ tmp_result=$(mktemp)
530
+ jq -n \
531
+ --argjson checks_total "$checks_total" \
532
+ --argjson checks_passed "$checks_passed" \
533
+ --argjson pass_rate "$pass_rate" \
534
+ --argjson files_checked "$files_checked" \
535
+ --arg missing_tests "$(echo -e "$missing_tests" | head -20)" \
536
+ --argjson logic_lines "$logic_lines" \
537
+ --argjson test_lines "$test_lines" \
538
+ --argjson test_ratio_passed "$test_ratio_passed" \
539
+ --argjson dod_verified "$dod_verified" \
540
+ '{
541
+ checks_total: $checks_total,
542
+ checks_passed: $checks_passed,
543
+ pass_rate: $pass_rate,
544
+ files_checked: $files_checked,
545
+ missing_tests: ($missing_tests | split("\n") | map(select(. != ""))),
546
+ logic_lines: $logic_lines,
547
+ test_lines: $test_lines,
548
+ test_ratio_passed: $test_ratio_passed,
549
+ dod_verified: $dod_verified
550
+ }' > "$tmp_result" 2>/dev/null
551
+ mv "$tmp_result" "$artifacts_dir/dod-verification.json"
552
+
553
+ emit_event "pipeline.dod_verification" \
554
+ "issue=${ISSUE_NUMBER:-0}" \
555
+ "checks_total=$checks_total" \
556
+ "checks_passed=$checks_passed" \
557
+ "pass_rate=$pass_rate"
558
+
559
+ # Fail if pass rate < 70%
560
+ if [[ "$pass_rate" -lt 70 ]]; then
561
+ warn "DoD verification: ${pass_rate}% pass rate (${checks_passed}/${checks_total} checks)"
562
+ return 1
563
+ fi
564
+
565
+ success "DoD verification: ${pass_rate}% pass rate (${checks_passed}/${checks_total} checks)"
566
+ return 0
567
+ }
568
+
569
+ # ──────────────────────────────────────────────────────────────────────────────
570
+ # 7. Source Code Security Scan
571
+ # Grep-based vulnerability pattern matching on changed files.
572
+ # ──────────────────────────────────────────────────────────────────────────────
573
+ pipeline_security_source_scan() {
574
+ local base_branch="${1:-${BASE_BRANCH:-main}}"
575
+ local findings="[]"
576
+ local finding_count=0
577
+
578
+ local changed_files
579
+ changed_files=$(git diff --name-only "${base_branch}...HEAD" 2>/dev/null || true)
580
+ [[ -z "$changed_files" ]] && { echo "[]"; return 0; }
581
+
582
+ local tmp_findings
583
+ tmp_findings=$(mktemp)
584
+ echo "[]" > "$tmp_findings"
585
+
586
+ while IFS= read -r file; do
587
+ [[ -z "$file" || ! -f "$file" ]] && continue
588
+ # Only scan code files
589
+ case "$file" in
590
+ *.ts|*.js|*.tsx|*.jsx|*.py|*.go|*.rs|*.java|*.rb|*.php|*.sh) ;;
591
+ *) continue ;;
592
+ esac
593
+
594
+ # SQL injection patterns
595
+ local sql_matches
596
+ sql_matches=$(grep -nE '(query|execute|sql)\s*\(?\s*[`"'"'"']\s*.*\$\{|\.query\s*\(\s*[`"'"'"'].*\+' "$file" 2>/dev/null || true)
597
+ if [[ -n "$sql_matches" ]]; then
598
+ while IFS= read -r match; do
599
+ [[ -z "$match" ]] && continue
600
+ local line_num="${match%%:*}"
601
+ finding_count=$((finding_count + 1))
602
+ local current
603
+ current=$(cat "$tmp_findings")
604
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "sql_injection" \
605
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential SQL injection via string concatenation"}]' \
606
+ > "$tmp_findings" 2>/dev/null || true
607
+ done <<SQLEOF
608
+ $sql_matches
609
+ SQLEOF
610
+ fi
611
+
612
+ # XSS patterns
613
+ local xss_matches
614
+ xss_matches=$(grep -nE 'innerHTML\s*=|document\.write\s*\(|dangerouslySetInnerHTML' "$file" 2>/dev/null || true)
615
+ if [[ -n "$xss_matches" ]]; then
616
+ while IFS= read -r match; do
617
+ [[ -z "$match" ]] && continue
618
+ local line_num="${match%%:*}"
619
+ finding_count=$((finding_count + 1))
620
+ local current
621
+ current=$(cat "$tmp_findings")
622
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "xss" \
623
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential XSS via unsafe DOM manipulation"}]' \
624
+ > "$tmp_findings" 2>/dev/null || true
625
+ done <<XSSEOF
626
+ $xss_matches
627
+ XSSEOF
628
+ fi
629
+
630
+ # Command injection patterns
631
+ local cmd_matches
632
+ cmd_matches=$(grep -nE 'eval\s*\(|child_process|os\.system\s*\(|subprocess\.(call|run|Popen)\s*\(' "$file" 2>/dev/null || true)
633
+ if [[ -n "$cmd_matches" ]]; then
634
+ while IFS= read -r match; do
635
+ [[ -z "$match" ]] && continue
636
+ local line_num="${match%%:*}"
637
+ finding_count=$((finding_count + 1))
638
+ local current
639
+ current=$(cat "$tmp_findings")
640
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "command_injection" \
641
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential command injection via unsafe execution"}]' \
642
+ > "$tmp_findings" 2>/dev/null || true
643
+ done <<CMDEOF
644
+ $cmd_matches
645
+ CMDEOF
646
+ fi
647
+
648
+ # Hardcoded secrets patterns
649
+ local secret_matches
650
+ secret_matches=$(grep -nEi '(password|api_key|secret|token)\s*=\s*['"'"'"][A-Za-z0-9+/=]{8,}['"'"'"]' "$file" 2>/dev/null || true)
651
+ if [[ -n "$secret_matches" ]]; then
652
+ while IFS= read -r match; do
653
+ [[ -z "$match" ]] && continue
654
+ local line_num="${match%%:*}"
655
+ finding_count=$((finding_count + 1))
656
+ local current
657
+ current=$(cat "$tmp_findings")
658
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "hardcoded_secret" \
659
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential hardcoded secret or credential"}]' \
660
+ > "$tmp_findings" 2>/dev/null || true
661
+ done <<SECEOF
662
+ $secret_matches
663
+ SECEOF
664
+ fi
665
+
666
+ # Insecure crypto patterns
667
+ local crypto_matches
668
+ crypto_matches=$(grep -nE '(md5|MD5|sha1|SHA1)\s*\(' "$file" 2>/dev/null || true)
669
+ if [[ -n "$crypto_matches" ]]; then
670
+ while IFS= read -r match; do
671
+ [[ -z "$match" ]] && continue
672
+ local line_num="${match%%:*}"
673
+ finding_count=$((finding_count + 1))
674
+ local current
675
+ current=$(cat "$tmp_findings")
676
+ echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "insecure_crypto" \
677
+ '. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"major","description":"Weak cryptographic function (consider SHA-256+)"}]' \
678
+ > "$tmp_findings" 2>/dev/null || true
679
+ done <<CRYEOF
680
+ $crypto_matches
681
+ CRYEOF
682
+ fi
683
+ done <<FILESEOF
684
+ $changed_files
685
+ FILESEOF
686
+
687
+ # Write to artifacts and output
688
+ findings=$(cat "$tmp_findings")
689
+ rm -f "$tmp_findings"
690
+
691
+ if [[ -n "${ARTIFACTS_DIR:-}" ]]; then
692
+ local tmp_scan
693
+ tmp_scan=$(mktemp)
694
+ echo "$findings" > "$tmp_scan"
695
+ mv "$tmp_scan" "$ARTIFACTS_DIR/security-source-scan.json"
696
+ fi
697
+
698
+ emit_event "pipeline.security_source_scan" \
699
+ "issue=${ISSUE_NUMBER:-0}" \
700
+ "findings=$finding_count"
701
+
702
+ echo "$finding_count"
703
+ }
704
+
705
+ # ──────────────────────────────────────────────────────────────────────────────
706
+ # 8. Quality Score Recording
707
+ # Writes quality scores to JSONL for learning.
708
+ # ──────────────────────────────────────────────────────────────────────────────
709
+ pipeline_record_quality_score() {
710
+ local quality_score="${1:-0}"
711
+ local critical="${2:-0}"
712
+ local major="${3:-0}"
713
+ local minor="${4:-0}"
714
+ local dod_pass_rate="${5:-0}"
715
+ local audits_run="${6:-}"
716
+
717
+ local scores_dir="${HOME}/.shipwright/optimization"
718
+ local scores_file="${scores_dir}/quality-scores.jsonl"
719
+ mkdir -p "$scores_dir"
720
+
721
+ local repo_name
722
+ repo_name=$(basename "${PROJECT_ROOT:-.}") || true
723
+
724
+ local tmp_score
725
+ tmp_score=$(mktemp)
726
+ jq -n \
727
+ --arg repo "$repo_name" \
728
+ --arg issue "${ISSUE_NUMBER:-0}" \
729
+ --arg ts "$(now_iso)" \
730
+ --argjson score "$quality_score" \
731
+ --argjson critical "$critical" \
732
+ --argjson major "$major" \
733
+ --argjson minor "$minor" \
734
+ --argjson dod "$dod_pass_rate" \
735
+ --arg template "${PIPELINE_NAME:-standard}" \
736
+ --arg audits "$audits_run" \
737
+ '{
738
+ repo: $repo,
739
+ issue: ($issue | tonumber),
740
+ timestamp: $ts,
741
+ quality_score: $score,
742
+ findings: {critical: $critical, major: $major, minor: $minor},
743
+ dod_pass_rate: $dod,
744
+ template: $template,
745
+ audits_run: ($audits | split(",") | map(select(. != "")))
746
+ }' > "$tmp_score" 2>/dev/null
747
+
748
+ cat "$tmp_score" >> "$scores_file"
749
+ rm -f "$tmp_score"
750
+
751
+ # Rotate quality scores file to prevent unbounded growth
752
+ type rotate_jsonl &>/dev/null 2>&1 && rotate_jsonl "$scores_file" 5000
753
+
754
+ emit_event "pipeline.quality_score_recorded" \
755
+ "issue=${ISSUE_NUMBER:-0}" \
756
+ "quality_score=$quality_score" \
757
+ "critical=$critical" \
758
+ "major=$major" \
759
+ "minor=$minor"
760
+ }
761
+
762
+ # ──────────────────────────────────────────────────────────────────────────────
763
+ # 4. Mid-Pipeline Complexity Re-evaluation
764
+ # After build+test completes, compares actual effort to initial estimate.
765
+ # Updates skip recommendations and model routing for remaining stages.
766
+ # ──────────────────────────────────────────────────────────────────────────────
767
+ pipeline_reassess_complexity() {
768
+ local initial_complexity="${INTELLIGENCE_COMPLEXITY:-5}"
769
+ local reassessment_file="$ARTIFACTS_DIR/reassessment.json"
770
+
771
+ # ── Gather actual metrics ──
772
+ local files_changed=0 lines_changed=0 first_try_pass=false self_heal_cycles=0
773
+
774
+ files_changed=$(git diff "${BASE_BRANCH:-main}...HEAD" --name-only 2>/dev/null | wc -l | tr -d ' ') || files_changed=0
775
+ files_changed="${files_changed:-0}"
776
+
777
+ # Count lines changed (insertions + deletions) without pipefail issues
778
+ lines_changed=0
779
+ local _diff_stat
780
+ _diff_stat=$(git diff "${BASE_BRANCH:-main}...HEAD" --stat 2>/dev/null | tail -1) || true
781
+ if [[ -n "${_diff_stat:-}" ]]; then
782
+ local _ins _del
783
+ _ins=$(echo "$_diff_stat" | grep -oE '[0-9]+ insertion' | grep -oE '[0-9]+') || true
784
+ _del=$(echo "$_diff_stat" | grep -oE '[0-9]+ deletion' | grep -oE '[0-9]+') || true
785
+ lines_changed=$(( ${_ins:-0} + ${_del:-0} ))
786
+ fi
787
+
788
+ self_heal_cycles="${SELF_HEAL_COUNT:-0}"
789
+ if [[ "$self_heal_cycles" -eq 0 ]]; then
790
+ first_try_pass=true
791
+ fi
792
+
793
+ # ── Compare to expectations ──
794
+ local actual_complexity="$initial_complexity"
795
+ local assessment="as_expected"
796
+ local skip_stages="[]"
797
+
798
+ # Simpler than expected: small diff, tests passed first try
799
+ if [[ "$lines_changed" -lt 50 && "$first_try_pass" == "true" && "$files_changed" -lt 5 ]]; then
800
+ actual_complexity=$((initial_complexity > 2 ? initial_complexity - 2 : 1))
801
+ assessment="simpler_than_expected"
802
+ # Mark compound_quality as skippable, simplify review
803
+ skip_stages='["compound_quality"]'
804
+ # Much simpler
805
+ elif [[ "$lines_changed" -lt 20 && "$first_try_pass" == "true" && "$files_changed" -lt 3 ]]; then
806
+ actual_complexity=1
807
+ assessment="much_simpler"
808
+ skip_stages='["compound_quality","review"]'
809
+ # Harder than expected: large diff, multiple self-heal cycles
810
+ elif [[ "$lines_changed" -gt 500 || "$self_heal_cycles" -gt 2 ]]; then
811
+ actual_complexity=$((initial_complexity < 9 ? initial_complexity + 2 : 10))
812
+ assessment="harder_than_expected"
813
+ # Ensure compound_quality runs, possibly upgrade model
814
+ skip_stages='[]'
815
+ # Much harder
816
+ elif [[ "$lines_changed" -gt 1000 || "$self_heal_cycles" -gt 4 ]]; then
817
+ actual_complexity=10
818
+ assessment="much_harder"
819
+ skip_stages='[]'
820
+ fi
821
+
822
+ # ── Write reassessment ──
823
+ local tmp_reassess
824
+ tmp_reassess="$(mktemp)"
825
+ jq -n \
826
+ --argjson initial "$initial_complexity" \
827
+ --argjson actual "$actual_complexity" \
828
+ --arg assessment "$assessment" \
829
+ --argjson files_changed "$files_changed" \
830
+ --argjson lines_changed "$lines_changed" \
831
+ --argjson self_heal_cycles "$self_heal_cycles" \
832
+ --argjson first_try "$first_try_pass" \
833
+ --argjson skip_stages "$skip_stages" \
834
+ '{
835
+ initial_complexity: $initial,
836
+ actual_complexity: $actual,
837
+ assessment: $assessment,
838
+ files_changed: $files_changed,
839
+ lines_changed: $lines_changed,
840
+ self_heal_cycles: $self_heal_cycles,
841
+ first_try_pass: $first_try,
842
+ skip_stages: $skip_stages
843
+ }' > "$tmp_reassess" 2>/dev/null && mv "$tmp_reassess" "$reassessment_file" || rm -f "$tmp_reassess"
844
+
845
+ # Update global complexity for downstream stages
846
+ PIPELINE_ADAPTIVE_COMPLEXITY="$actual_complexity"
847
+
848
+ emit_event "intelligence.reassessment" \
849
+ "issue=${ISSUE_NUMBER:-0}" \
850
+ "initial=$initial_complexity" \
851
+ "actual=$actual_complexity" \
852
+ "assessment=$assessment" \
853
+ "files=$files_changed" \
854
+ "lines=$lines_changed" \
855
+ "self_heals=$self_heal_cycles"
856
+
857
+ # ── Store for learning ──
858
+ local learning_file="${HOME}/.shipwright/optimization/complexity-actuals.jsonl"
859
+ mkdir -p "${HOME}/.shipwright/optimization" 2>/dev/null || true
860
+ echo "{\"issue\":\"${ISSUE_NUMBER:-0}\",\"initial\":$initial_complexity,\"actual\":$actual_complexity,\"files\":$files_changed,\"lines\":$lines_changed,\"ts\":\"$(now_iso)\"}" \
861
+ >> "$learning_file" 2>/dev/null || true
862
+
863
+ echo "$assessment"
864
+ }
865
+
866
+ # ──────────────────────────────────────────────────────────────────────────────
867
+ # 5. Backtracking Support
868
+ # When compound_quality detects architecture-level problems, backtracks to
869
+ # the design stage instead of just feeding findings to the build loop.
870
+ # Limited to 1 backtrack per pipeline run to prevent infinite loops.
871
+ # ──────────────────────────────────────────────────────────────────────────────
872
+ pipeline_backtrack_to_stage() {
873
+ local target_stage="$1"
874
+ local reason="${2:-architecture_violation}"
875
+
876
+ # Prevent infinite backtracking
877
+ if [[ "$PIPELINE_BACKTRACK_COUNT" -ge "$PIPELINE_MAX_BACKTRACKS" ]]; then
878
+ warn "Max backtracks ($PIPELINE_MAX_BACKTRACKS) reached — cannot backtrack to $target_stage"
879
+ emit_event "intelligence.backtrack_blocked" \
880
+ "issue=${ISSUE_NUMBER:-0}" \
881
+ "target=$target_stage" \
882
+ "reason=max_backtracks_reached" \
883
+ "count=$PIPELINE_BACKTRACK_COUNT"
884
+ return 1
885
+ fi
886
+
887
+ PIPELINE_BACKTRACK_COUNT=$((PIPELINE_BACKTRACK_COUNT + 1))
888
+
889
+ info "Backtracking to ${BOLD}${target_stage}${RESET} stage (reason: ${reason})"
890
+
891
+ emit_event "intelligence.backtrack" \
892
+ "issue=${ISSUE_NUMBER:-0}" \
893
+ "target=$target_stage" \
894
+ "reason=$reason"
895
+
896
+ # Gather architecture context from findings
897
+ local arch_context=""
898
+ if [[ -f "$ARTIFACTS_DIR/compound-architecture-validation.json" ]]; then
899
+ arch_context=$(jq -r '[.[] | select(.severity == "critical" or .severity == "high") | .message // .description // ""] | join("\n")' \
900
+ "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || true)
901
+ fi
902
+ if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
903
+ local arch_lines
904
+ arch_lines=$(grep -iE 'architect|layer.*violation|circular.*depend|coupling|design.*flaw' \
905
+ "$ARTIFACTS_DIR/adversarial-review.md" 2>/dev/null || true)
906
+ if [[ -n "$arch_lines" ]]; then
907
+ arch_context="${arch_context}
908
+ ${arch_lines}"
909
+ fi
910
+ fi
911
+
912
+ # Reset stages from target onward
913
+ set_stage_status "$target_stage" "pending"
914
+ set_stage_status "build" "pending"
915
+ set_stage_status "test" "pending"
916
+
917
+ # Augment goal with architecture context for re-run
918
+ local original_goal="$GOAL"
919
+ if [[ -n "$arch_context" ]]; then
920
+ GOAL="$GOAL
921
+
922
+ IMPORTANT — Architecture violations were detected during quality review. Redesign to fix:
923
+ $arch_context
924
+
925
+ Update the design to address these violations, then rebuild."
926
+ fi
927
+
928
+ # Re-run design stage
929
+ info "Re-running ${BOLD}${target_stage}${RESET} with architecture context..."
930
+ if "stage_${target_stage}" 2>/dev/null; then
931
+ mark_stage_complete "$target_stage"
932
+ success "Backtrack: ${target_stage} re-run complete"
933
+ else
934
+ GOAL="$original_goal"
935
+ error "Backtrack: ${target_stage} re-run failed"
936
+ return 1
937
+ fi
938
+
939
+ # Re-run build+test
940
+ info "Re-running build→test after backtracked ${target_stage}..."
941
+ if self_healing_build_test; then
942
+ success "Backtrack: build→test passed after ${target_stage} redesign"
943
+ GOAL="$original_goal"
944
+ return 0
945
+ else
946
+ GOAL="$original_goal"
947
+ error "Backtrack: build→test failed after ${target_stage} redesign"
948
+ return 1
949
+ fi
950
+ }
951
+
952
+ compound_rebuild_with_feedback() {
953
+ local feedback_file="$ARTIFACTS_DIR/quality-feedback.md"
954
+
955
+ # ── Intelligence: classify findings and determine routing ──
956
+ local route="correctness"
957
+ route=$(classify_quality_findings 2>/dev/null) || route="correctness"
958
+
959
+ # ── Build structured findings JSON alongside markdown ──
960
+ local structured_findings="[]"
961
+ local s_total_critical=0 s_total_major=0 s_total_minor=0
962
+
963
+ if [[ -f "$ARTIFACTS_DIR/classified-findings.json" ]]; then
964
+ s_total_critical=$(jq -r '.security // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
965
+ s_total_major=$(jq -r '.correctness // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
966
+ s_total_minor=$(jq -r '.style // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
967
+ fi
968
+
969
+ local tmp_qf
970
+ tmp_qf="$(mktemp)"
971
+ jq -n \
972
+ --arg route "$route" \
973
+ --argjson total_critical "$s_total_critical" \
974
+ --argjson total_major "$s_total_major" \
975
+ --argjson total_minor "$s_total_minor" \
976
+ '{route: $route, total_critical: $total_critical, total_major: $total_major, total_minor: $total_minor}' \
977
+ > "$tmp_qf" 2>/dev/null && mv "$tmp_qf" "$ARTIFACTS_DIR/quality-findings.json" || rm -f "$tmp_qf"
978
+
979
+ # ── Architecture route: backtrack to design instead of rebuild ──
980
+ if [[ "$route" == "architecture" ]]; then
981
+ info "Architecture-level findings detected — attempting backtrack to design"
982
+ if pipeline_backtrack_to_stage "design" "architecture_violation" 2>/dev/null; then
983
+ return 0
984
+ fi
985
+ # Backtrack failed or already used — fall through to standard rebuild
986
+ warn "Backtrack unavailable — falling through to standard rebuild"
987
+ fi
988
+
989
+ # Collect all findings (prioritized by classification)
990
+ {
991
+ echo "# Quality Feedback — Issues to Fix"
992
+ echo ""
993
+
994
+ # Security findings first (highest priority)
995
+ if [[ "$route" == "security" || -f "$ARTIFACTS_DIR/security-audit.log" ]] && grep -qiE 'critical|high' "$ARTIFACTS_DIR/security-audit.log" 2>/dev/null; then
996
+ echo "## 🔴 PRIORITY: Security Findings (fix these first)"
997
+ cat "$ARTIFACTS_DIR/security-audit.log"
998
+ echo ""
999
+ echo "Security issues MUST be resolved before any other changes."
1000
+ echo ""
1001
+ fi
1002
+
1003
+ # Correctness findings
1004
+ if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
1005
+ echo "## Adversarial Review Findings"
1006
+ cat "$ARTIFACTS_DIR/adversarial-review.md"
1007
+ echo ""
1008
+ fi
1009
+ if [[ -f "$ARTIFACTS_DIR/negative-review.md" ]]; then
1010
+ echo "## Negative Prompting Concerns"
1011
+ cat "$ARTIFACTS_DIR/negative-review.md"
1012
+ echo ""
1013
+ fi
1014
+ if [[ -f "$ARTIFACTS_DIR/dod-audit.md" ]]; then
1015
+ echo "## DoD Audit Failures"
1016
+ grep "❌" "$ARTIFACTS_DIR/dod-audit.md" 2>/dev/null || true
1017
+ echo ""
1018
+ fi
1019
+ if [[ -f "$ARTIFACTS_DIR/api-compat.log" ]] && grep -qi 'BREAKING' "$ARTIFACTS_DIR/api-compat.log" 2>/dev/null; then
1020
+ echo "## API Breaking Changes"
1021
+ cat "$ARTIFACTS_DIR/api-compat.log"
1022
+ echo ""
1023
+ fi
1024
+
1025
+ # Style findings last (deprioritized, informational)
1026
+ if [[ -f "$ARTIFACTS_DIR/classified-findings.json" ]]; then
1027
+ local style_count
1028
+ style_count=$(jq -r '.style // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
1029
+ if [[ "$style_count" -gt 0 ]]; then
1030
+ echo "## Style Notes (non-blocking, address if time permits)"
1031
+ echo "${style_count} style suggestions found. These do not block the build."
1032
+ echo ""
1033
+ fi
1034
+ fi
1035
+ } > "$feedback_file"
1036
+
1037
+ # Validate feedback file has actual content
1038
+ if [[ ! -s "$feedback_file" ]]; then
1039
+ warn "No quality feedback collected — skipping rebuild"
1040
+ return 1
1041
+ fi
1042
+
1043
+ # Reset build/test stages
1044
+ set_stage_status "build" "pending"
1045
+ set_stage_status "test" "pending"
1046
+ set_stage_status "review" "pending"
1047
+
1048
+ # Augment GOAL with quality feedback (route-specific instructions)
1049
+ local original_goal="$GOAL"
1050
+ local feedback_content
1051
+ feedback_content=$(cat "$feedback_file")
1052
+
1053
+ local route_instruction=""
1054
+ case "$route" in
1055
+ security)
1056
+ route_instruction="SECURITY PRIORITY: Fix all security vulnerabilities FIRST, then address other issues. Security issues are BLOCKING."
1057
+ ;;
1058
+ performance)
1059
+ route_instruction="PERFORMANCE PRIORITY: Address performance regressions and optimizations. Check for N+1 queries, memory leaks, and algorithmic complexity."
1060
+ ;;
1061
+ testing)
1062
+ route_instruction="TESTING PRIORITY: Add missing test coverage and fix flaky tests before addressing other issues."
1063
+ ;;
1064
+ correctness)
1065
+ route_instruction="Fix every issue listed above while keeping all existing functionality working."
1066
+ ;;
1067
+ architecture)
1068
+ route_instruction="ARCHITECTURE: Fix structural issues. Check dependency direction, layer boundaries, and separation of concerns."
1069
+ ;;
1070
+ *)
1071
+ route_instruction="Fix every issue listed above while keeping all existing functionality working."
1072
+ ;;
1073
+ esac
1074
+
1075
+ GOAL="$GOAL
1076
+
1077
+ IMPORTANT — Compound quality review found issues (route: ${route}). Fix ALL of these:
1078
+ $feedback_content
1079
+
1080
+ ${route_instruction}"
1081
+
1082
+ # Re-run self-healing build→test
1083
+ info "Rebuilding with quality feedback (route: ${route})..."
1084
+ if self_healing_build_test; then
1085
+ GOAL="$original_goal"
1086
+ return 0
1087
+ else
1088
+ GOAL="$original_goal"
1089
+ return 1
1090
+ fi
1091
+ }
1092
+
1093
+ # ──────────────────────────────────────────────────────────────────────────────
1094
+ # Bash 3.2 Compatibility Check
1095
+
1096
+ echo "$untested_functions"
1097
+ }
1098
+
1099
+ stage_compound_quality() {
1100
+ CURRENT_STAGE_ID="compound_quality"
1101
+
1102
+ # Pre-check: verify meaningful changes exist before running expensive quality checks
1103
+ local _cq_real_changes
1104
+ _cq_real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
1105
+ -- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
1106
+ ':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
1107
+ ':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
1108
+ if [[ "${_cq_real_changes:-0}" -eq 0 ]]; then
1109
+ error "Compound quality: no meaningful code changes found — failing quality gate"
1110
+ return 1
1111
+ fi
1112
+
1113
+ # Read config
1114
+ local max_cycles adversarial_enabled negative_enabled e2e_enabled dod_enabled strict_quality
1115
+ max_cycles=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.max_cycles) // 3' "$PIPELINE_CONFIG" 2>/dev/null) || true
1116
+ [[ -z "$max_cycles" || "$max_cycles" == "null" ]] && max_cycles=3
1117
+ adversarial_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.adversarial) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
1118
+ negative_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.negative) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
1119
+ e2e_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.e2e) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
1120
+ dod_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.dod_audit) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
1121
+ strict_quality=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.strict_quality) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
1122
+ [[ -z "$strict_quality" || "$strict_quality" == "null" ]] && strict_quality="false"
1123
+
1124
+ # Intelligent audit selection
1125
+ local audit_plan='{"adversarial":"targeted","architecture":"targeted","simulation":"targeted","security":"targeted","dod":"targeted"}'
1126
+ if type pipeline_select_audits &>/dev/null 2>&1; then
1127
+ local _selected
1128
+ _selected=$(pipeline_select_audits 2>/dev/null) || true
1129
+ if [[ -n "$_selected" && "$_selected" != "null" ]]; then
1130
+ audit_plan="$_selected"
1131
+ info "Audit plan: $(echo "$audit_plan" | jq -c '.' 2>/dev/null || echo "$audit_plan")"
1132
+ fi
1133
+ fi
1134
+
1135
+ # Track findings for quality score
1136
+ local total_critical=0 total_major=0 total_minor=0
1137
+ local audits_run_list=""
1138
+
1139
+ # ── HARDENED QUALITY GATES (RUN BEFORE CYCLES) ──
1140
+ # These checks must pass before we even start the audit cycles
1141
+ echo ""
1142
+ info "Running hardened quality gate checks..."
1143
+
1144
+ # 1. Bash 3.2 compatibility check
1145
+ local bash_violations=0
1146
+ bash_violations=$(run_bash_compat_check 2>/dev/null) || bash_violations=0
1147
+ bash_violations="${bash_violations:-0}"
1148
+
1149
+ if [[ "$strict_quality" == "true" && "$bash_violations" -gt 0 ]]; then
1150
+ error "STRICT QUALITY: Bash 3.2 incompatibilities found — blocking"
1151
+ emit_event "quality.bash_compat_failed" \
1152
+ "issue=${ISSUE_NUMBER:-0}" \
1153
+ "violations=$bash_violations"
1154
+ return 1
1155
+ fi
1156
+
1157
+ if [[ "$bash_violations" -gt 0 ]]; then
1158
+ warn "Bash 3.2 incompatibilities detected: ${bash_violations} (will impact quality score)"
1159
+ total_minor=$((total_minor + bash_violations))
1160
+ else
1161
+ success "Bash 3.2 compatibility: clean"
1162
+ fi
1163
+
1164
+ # 2. Test coverage check
1165
+ local coverage_pct=0
1166
+ coverage_pct=$(run_test_coverage_check 2>/dev/null) || coverage_pct=0
1167
+ coverage_pct="${coverage_pct:-0}"
1168
+
1169
+ if [[ "$coverage_pct" != "skip" ]]; then
1170
+ if [[ "$coverage_pct" -lt "${PIPELINE_COVERAGE_THRESHOLD:-60}" ]]; then
1171
+ if [[ "$strict_quality" == "true" ]]; then
1172
+ error "STRICT QUALITY: Test coverage below ${PIPELINE_COVERAGE_THRESHOLD:-60}% (${coverage_pct}%) — blocking"
1173
+ emit_event "quality.coverage_failed" \
1174
+ "issue=${ISSUE_NUMBER:-0}" \
1175
+ "coverage=$coverage_pct"
1176
+ return 1
1177
+ else
1178
+ warn "Test coverage below ${PIPELINE_COVERAGE_THRESHOLD:-60}% threshold (${coverage_pct}%) — quality penalty applied"
1179
+ total_major=$((total_major + 2))
1180
+ fi
1181
+ fi
1182
+ fi
1183
+
1184
+ # 3. New functions without tests check
1185
+ local untested_functions=0
1186
+ untested_functions=$(run_new_function_test_check 2>/dev/null) || untested_functions=0
1187
+ untested_functions="${untested_functions:-0}"
1188
+
1189
+ if [[ "$untested_functions" -gt 0 ]]; then
1190
+ if [[ "$strict_quality" == "true" ]]; then
1191
+ error "STRICT QUALITY: ${untested_functions} new function(s) without tests — blocking"
1192
+ emit_event "quality.untested_functions" \
1193
+ "issue=${ISSUE_NUMBER:-0}" \
1194
+ "count=$untested_functions"
1195
+ return 1
1196
+ else
1197
+ warn "New functions without corresponding tests: ${untested_functions}"
1198
+ total_major=$((total_major + untested_functions))
1199
+ fi
1200
+ fi
1201
+
1202
+ # 4. Atomic write violations (optional, informational in most modes)
1203
+ local atomic_violations=0
1204
+ atomic_violations=$(run_atomic_write_check 2>/dev/null) || atomic_violations=0
1205
+ atomic_violations="${atomic_violations:-0}"
1206
+
1207
+ if [[ "$atomic_violations" -gt 0 ]]; then
1208
+ warn "Atomic write violations: ${atomic_violations} (state/config file patterns)"
1209
+ total_minor=$((total_minor + atomic_violations))
1210
+ fi
1211
+
1212
+ # Vitals-driven adaptive cycle limit (preferred)
1213
+ local base_max_cycles="$max_cycles"
1214
+ if type pipeline_adaptive_limit &>/dev/null 2>&1; then
1215
+ local _cq_vitals=""
1216
+ if type pipeline_compute_vitals &>/dev/null 2>&1; then
1217
+ _cq_vitals=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
1218
+ fi
1219
+ local vitals_cq_limit
1220
+ vitals_cq_limit=$(pipeline_adaptive_limit "compound_quality" "$_cq_vitals" 2>/dev/null) || true
1221
+ if [[ -n "$vitals_cq_limit" && "$vitals_cq_limit" =~ ^[0-9]+$ && "$vitals_cq_limit" -gt 0 ]]; then
1222
+ max_cycles="$vitals_cq_limit"
1223
+ if [[ "$max_cycles" != "$base_max_cycles" ]]; then
1224
+ info "Vitals-driven cycles: ${base_max_cycles} → ${max_cycles} (compound_quality)"
1225
+ fi
1226
+ fi
1227
+ else
1228
+ # Fallback: adaptive cycle limits from optimization data
1229
+ local _cq_iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
1230
+ if [[ -f "$_cq_iter_model" ]]; then
1231
+ local adaptive_limit
1232
+ adaptive_limit=$(pipeline_adaptive_cycles "$max_cycles" "compound_quality" "0" "-1" 2>/dev/null) || true
1233
+ if [[ -n "$adaptive_limit" && "$adaptive_limit" =~ ^[0-9]+$ && "$adaptive_limit" -gt 0 ]]; then
1234
+ max_cycles="$adaptive_limit"
1235
+ if [[ "$max_cycles" != "$base_max_cycles" ]]; then
1236
+ info "Adaptive cycles: ${base_max_cycles} → ${max_cycles} (compound_quality)"
1237
+ fi
1238
+ fi
1239
+ fi
1240
+ fi
1241
+
1242
+ # Convergence tracking
1243
+ local prev_issue_count=-1
1244
+
1245
+ local cycle=0
1246
+ while [[ "$cycle" -lt "$max_cycles" ]]; do
1247
+ cycle=$((cycle + 1))
1248
+ local all_passed=true
1249
+
1250
+ echo ""
1251
+ echo -e "${PURPLE}${BOLD}━━━ Compound Quality — Cycle ${cycle}/${max_cycles} ━━━${RESET}"
1252
+
1253
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1254
+ gh_comment_issue "$ISSUE_NUMBER" "🔬 **Compound quality** — cycle ${cycle}/${max_cycles}" 2>/dev/null || true
1255
+ fi
1256
+
1257
+ # 1. Adversarial Review
1258
+ local _adv_intensity
1259
+ _adv_intensity=$(echo "$audit_plan" | jq -r '.adversarial // "targeted"' 2>/dev/null || echo "targeted")
1260
+ if [[ "$adversarial_enabled" == "true" && "$_adv_intensity" != "off" ]]; then
1261
+ echo ""
1262
+ info "Running adversarial review (${_adv_intensity})..."
1263
+ audits_run_list="${audits_run_list:+${audits_run_list},}adversarial"
1264
+ if ! run_adversarial_review; then
1265
+ all_passed=false
1266
+ fi
1267
+ fi
1268
+
1269
+ # 2. Negative Prompting
1270
+ if [[ "$negative_enabled" == "true" ]]; then
1271
+ echo ""
1272
+ info "Running negative prompting..."
1273
+ if ! run_negative_prompting; then
1274
+ all_passed=false
1275
+ fi
1276
+ fi
1277
+
1278
+ # 3. Developer Simulation (intelligence module)
1279
+ if type simulation_review &>/dev/null 2>&1; then
1280
+ local sim_enabled
1281
+ sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1282
+ local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
1283
+ if [[ "$sim_enabled" != "true" && -f "$daemon_cfg" ]]; then
1284
+ sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
1285
+ fi
1286
+ if [[ "$sim_enabled" == "true" ]]; then
1287
+ echo ""
1288
+ info "Running developer simulation review..."
1289
+ local sim_diff
1290
+ sim_diff=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1291
+ if [[ -n "$sim_diff" ]]; then
1292
+ local sim_result
1293
+ sim_result=$(simulation_review "$sim_diff" "${GOAL:-}" 2>/dev/null || echo "[]")
1294
+ if [[ -n "$sim_result" && "$sim_result" != "[]" && "$sim_result" != *'"error"'* ]]; then
1295
+ echo "$sim_result" > "$ARTIFACTS_DIR/compound-simulation-review.json"
1296
+ local sim_critical
1297
+ sim_critical=$(echo "$sim_result" | jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' 2>/dev/null || echo "0")
1298
+ local sim_total
1299
+ sim_total=$(echo "$sim_result" | jq 'length' 2>/dev/null || echo "0")
1300
+ if [[ "$sim_critical" -gt 0 ]]; then
1301
+ warn "Developer simulation: ${sim_critical} critical/high concerns (${sim_total} total)"
1302
+ all_passed=false
1303
+ else
1304
+ success "Developer simulation: ${sim_total} concerns (none critical/high)"
1305
+ fi
1306
+ emit_event "compound.simulation" \
1307
+ "issue=${ISSUE_NUMBER:-0}" \
1308
+ "cycle=$cycle" \
1309
+ "total=$sim_total" \
1310
+ "critical=$sim_critical"
1311
+ else
1312
+ success "Developer simulation: no concerns"
1313
+ fi
1314
+ fi
1315
+ fi
1316
+ fi
1317
+
1318
+ # 4. Architecture Enforcer (intelligence module)
1319
+ if type architecture_validate_changes &>/dev/null 2>&1; then
1320
+ local arch_enabled
1321
+ arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
1322
+ local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
1323
+ if [[ "$arch_enabled" != "true" && -f "$daemon_cfg" ]]; then
1324
+ arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
1325
+ fi
1326
+ if [[ "$arch_enabled" == "true" ]]; then
1327
+ echo ""
1328
+ info "Running architecture validation..."
1329
+ local arch_diff
1330
+ arch_diff=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
1331
+ if [[ -n "$arch_diff" ]]; then
1332
+ local arch_result
1333
+ arch_result=$(architecture_validate_changes "$arch_diff" "" 2>/dev/null || echo "[]")
1334
+ if [[ -n "$arch_result" && "$arch_result" != "[]" && "$arch_result" != *'"error"'* ]]; then
1335
+ echo "$arch_result" > "$ARTIFACTS_DIR/compound-architecture-validation.json"
1336
+ local arch_violations
1337
+ arch_violations=$(echo "$arch_result" | jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' 2>/dev/null || echo "0")
1338
+ local arch_total
1339
+ arch_total=$(echo "$arch_result" | jq 'length' 2>/dev/null || echo "0")
1340
+ if [[ "$arch_violations" -gt 0 ]]; then
1341
+ warn "Architecture validation: ${arch_violations} critical/high violations (${arch_total} total)"
1342
+ all_passed=false
1343
+ else
1344
+ success "Architecture validation: ${arch_total} violations (none critical/high)"
1345
+ fi
1346
+ emit_event "compound.architecture" \
1347
+ "issue=${ISSUE_NUMBER:-0}" \
1348
+ "cycle=$cycle" \
1349
+ "total=$arch_total" \
1350
+ "violations=$arch_violations"
1351
+ else
1352
+ success "Architecture validation: no violations"
1353
+ fi
1354
+ fi
1355
+ fi
1356
+ fi
1357
+
1358
+ # 5. E2E Validation
1359
+ if [[ "$e2e_enabled" == "true" ]]; then
1360
+ echo ""
1361
+ info "Running E2E validation..."
1362
+ if ! run_e2e_validation; then
1363
+ all_passed=false
1364
+ fi
1365
+ fi
1366
+
1367
+ # 6. DoD Audit
1368
+ local _dod_intensity
1369
+ _dod_intensity=$(echo "$audit_plan" | jq -r '.dod // "targeted"' 2>/dev/null || echo "targeted")
1370
+ if [[ "$dod_enabled" == "true" && "$_dod_intensity" != "off" ]]; then
1371
+ echo ""
1372
+ info "Running Definition of Done audit (${_dod_intensity})..."
1373
+ audits_run_list="${audits_run_list:+${audits_run_list},}dod"
1374
+ if ! run_dod_audit; then
1375
+ all_passed=false
1376
+ fi
1377
+ fi
1378
+
1379
+ # 6b. Security Source Scan
1380
+ local _sec_intensity
1381
+ _sec_intensity=$(echo "$audit_plan" | jq -r '.security // "targeted"' 2>/dev/null || echo "targeted")
1382
+ if [[ "$_sec_intensity" != "off" ]]; then
1383
+ echo ""
1384
+ info "Running security source scan (${_sec_intensity})..."
1385
+ audits_run_list="${audits_run_list:+${audits_run_list},}security"
1386
+ local sec_finding_count=0
1387
+ sec_finding_count=$(pipeline_security_source_scan 2>/dev/null) || true
1388
+ sec_finding_count="${sec_finding_count:-0}"
1389
+ if [[ "$sec_finding_count" -gt 0 ]]; then
1390
+ warn "Security source scan: ${sec_finding_count} finding(s)"
1391
+ total_critical=$((total_critical + sec_finding_count))
1392
+ all_passed=false
1393
+ else
1394
+ success "Security source scan: clean"
1395
+ fi
1396
+ fi
1397
+
1398
+ # 7. Multi-dimensional quality checks
1399
+ echo ""
1400
+ info "Running multi-dimensional quality checks..."
1401
+ local quality_failures=0
1402
+
1403
+ if ! quality_check_security; then
1404
+ quality_failures=$((quality_failures + 1))
1405
+ fi
1406
+ if ! quality_check_coverage; then
1407
+ quality_failures=$((quality_failures + 1))
1408
+ fi
1409
+ if ! quality_check_perf_regression; then
1410
+ quality_failures=$((quality_failures + 1))
1411
+ fi
1412
+ if ! quality_check_bundle_size; then
1413
+ quality_failures=$((quality_failures + 1))
1414
+ fi
1415
+ if ! quality_check_api_compat; then
1416
+ quality_failures=$((quality_failures + 1))
1417
+ fi
1418
+
1419
+ if [[ "$quality_failures" -gt 0 ]]; then
1420
+ if [[ "$strict_quality" == "true" ]]; then
1421
+ warn "Multi-dimensional quality: ${quality_failures} check(s) failed (strict mode — blocking)"
1422
+ all_passed=false
1423
+ else
1424
+ warn "Multi-dimensional quality: ${quality_failures} check(s) failed (non-blocking)"
1425
+ fi
1426
+ else
1427
+ success "Multi-dimensional quality: all checks passed"
1428
+ fi
1429
+
1430
+ # ── Convergence Detection ──
1431
+ # Count critical/high issues from all review artifacts
1432
+ local current_issue_count=0
1433
+ if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
1434
+ local adv_issues
1435
+ adv_issues=$(grep -ciE '\*\*\[?(Critical|Bug|critical|high)\]?\*\*' "$ARTIFACTS_DIR/adversarial-review.md" 2>/dev/null || true)
1436
+ current_issue_count=$((current_issue_count + ${adv_issues:-0}))
1437
+ fi
1438
+ if [[ -f "$ARTIFACTS_DIR/adversarial-review.json" ]]; then
1439
+ local adv_json_issues
1440
+ adv_json_issues=$(jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
1441
+ current_issue_count=$((current_issue_count + ${adv_json_issues:-0}))
1442
+ fi
1443
+ if [[ -f "$ARTIFACTS_DIR/negative-review.md" ]]; then
1444
+ local neg_issues
1445
+ neg_issues=$(grep -ciE '\[Critical\]' "$ARTIFACTS_DIR/negative-review.md" 2>/dev/null || true)
1446
+ current_issue_count=$((current_issue_count + ${neg_issues:-0}))
1447
+ fi
1448
+ current_issue_count=$((current_issue_count + quality_failures))
1449
+
1450
+ emit_event "compound.cycle" \
1451
+ "issue=${ISSUE_NUMBER:-0}" \
1452
+ "cycle=$cycle" \
1453
+ "max_cycles=$max_cycles" \
1454
+ "passed=$all_passed" \
1455
+ "critical_issues=$current_issue_count" \
1456
+ "self_heal_count=$SELF_HEAL_COUNT"
1457
+
1458
+ # Early exit: zero critical/high issues
1459
+ if [[ "$current_issue_count" -eq 0 ]] && $all_passed; then
1460
+ success "Compound quality passed on cycle ${cycle} — zero critical/high issues"
1461
+
1462
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1463
+ gh_comment_issue "$ISSUE_NUMBER" "✅ **Compound quality passed** — cycle ${cycle}/${max_cycles}
1464
+
1465
+ All quality checks clean:
1466
+ - Adversarial review: ✅
1467
+ - Negative prompting: ✅
1468
+ - Developer simulation: ✅
1469
+ - Architecture validation: ✅
1470
+ - E2E validation: ✅
1471
+ - DoD audit: ✅
1472
+ - Security audit: ✅
1473
+ - Coverage: ✅
1474
+ - Performance: ✅
1475
+ - Bundle size: ✅
1476
+ - API compat: ✅" 2>/dev/null || true
1477
+ fi
1478
+
1479
+ log_stage "compound_quality" "Passed on cycle ${cycle}/${max_cycles}"
1480
+
1481
+ # DoD verification on successful pass
1482
+ local _dod_pass_rate=100
1483
+ if type pipeline_verify_dod &>/dev/null 2>&1; then
1484
+ pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
1485
+ if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
1486
+ _dod_pass_rate=$(jq -r '.pass_rate // 100' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "100")
1487
+ fi
1488
+ fi
1489
+
1490
+ pipeline_record_quality_score 100 0 0 0 "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
1491
+ return 0
1492
+ fi
1493
+
1494
+ if $all_passed; then
1495
+ success "Compound quality passed on cycle ${cycle}"
1496
+
1497
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1498
+ gh_comment_issue "$ISSUE_NUMBER" "✅ **Compound quality passed** — cycle ${cycle}/${max_cycles}" 2>/dev/null || true
1499
+ fi
1500
+
1501
+ log_stage "compound_quality" "Passed on cycle ${cycle}/${max_cycles}"
1502
+
1503
+ # DoD verification on successful pass
1504
+ local _dod_pass_rate=100
1505
+ if type pipeline_verify_dod &>/dev/null 2>&1; then
1506
+ pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
1507
+ if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
1508
+ _dod_pass_rate=$(jq -r '.pass_rate // 100' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "100")
1509
+ fi
1510
+ fi
1511
+
1512
+ pipeline_record_quality_score 95 0 "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
1513
+ return 0
1514
+ fi
1515
+
1516
+ # Check for plateau: issue count unchanged between cycles
1517
+ if [[ "$prev_issue_count" -ge 0 && "$current_issue_count" -eq "$prev_issue_count" && "$cycle" -gt 1 ]]; then
1518
+ warn "Convergence: quality plateau — ${current_issue_count} issues unchanged between cycles"
1519
+ emit_event "compound.plateau" \
1520
+ "issue=${ISSUE_NUMBER:-0}" \
1521
+ "cycle=$cycle" \
1522
+ "issue_count=$current_issue_count"
1523
+
1524
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1525
+ gh_comment_issue "$ISSUE_NUMBER" "⚠️ **Compound quality plateau** — ${current_issue_count} issues unchanged after cycle ${cycle}. Stopping early." 2>/dev/null || true
1526
+ fi
1527
+
1528
+ log_stage "compound_quality" "Plateau at cycle ${cycle}/${max_cycles} (${current_issue_count} issues)"
1529
+ return 1
1530
+ fi
1531
+ prev_issue_count="$current_issue_count"
1532
+
1533
+ info "Convergence: ${current_issue_count} critical/high issues remaining"
1534
+
1535
+ # Intelligence: re-evaluate adaptive cycle limit based on convergence (only after first cycle)
1536
+ if [[ "$prev_issue_count" -ge 0 ]]; then
1537
+ local updated_limit
1538
+ updated_limit=$(pipeline_adaptive_cycles "$max_cycles" "compound_quality" "$current_issue_count" "$prev_issue_count" 2>/dev/null) || true
1539
+ if [[ -n "$updated_limit" && "$updated_limit" =~ ^[0-9]+$ && "$updated_limit" -gt 0 && "$updated_limit" != "$max_cycles" ]]; then
1540
+ info "Adaptive cycles: ${max_cycles} → ${updated_limit} (convergence signal)"
1541
+ max_cycles="$updated_limit"
1542
+ fi
1543
+ fi
1544
+
1545
+ # Not all passed — rebuild if we have cycles left
1546
+ if [[ "$cycle" -lt "$max_cycles" ]]; then
1547
+ warn "Quality checks failed — rebuilding with feedback (cycle $((cycle + 1))/${max_cycles})"
1548
+
1549
+ if ! compound_rebuild_with_feedback; then
1550
+ error "Rebuild with feedback failed"
1551
+ log_stage "compound_quality" "Rebuild failed on cycle ${cycle}"
1552
+ return 1
1553
+ fi
1554
+
1555
+ # Re-run review stage too (since code changed)
1556
+ info "Re-running review after rebuild..."
1557
+ stage_review 2>/dev/null || true
1558
+ fi
1559
+ done
1560
+
1561
+ # ── Quality Score Computation ──
1562
+ # Starting score: 100, deductions based on findings
1563
+ local quality_score=100
1564
+
1565
+ # Count findings from artifact files
1566
+ if [[ -f "$ARTIFACTS_DIR/security-source-scan.json" ]]; then
1567
+ local _sec_critical
1568
+ _sec_critical=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/security-source-scan.json" 2>/dev/null || echo "0")
1569
+ local _sec_major
1570
+ _sec_major=$(jq '[.[] | select(.severity == "major")] | length' "$ARTIFACTS_DIR/security-source-scan.json" 2>/dev/null || echo "0")
1571
+ total_critical=$((total_critical + ${_sec_critical:-0}))
1572
+ total_major=$((total_major + ${_sec_major:-0}))
1573
+ fi
1574
+ if [[ -f "$ARTIFACTS_DIR/adversarial-review.json" ]]; then
1575
+ local _adv_crit
1576
+ _adv_crit=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
1577
+ local _adv_major
1578
+ _adv_major=$(jq '[.[] | select(.severity == "high" or .severity == "major")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
1579
+ local _adv_minor
1580
+ _adv_minor=$(jq '[.[] | select(.severity == "low" or .severity == "minor")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
1581
+ total_critical=$((total_critical + ${_adv_crit:-0}))
1582
+ total_major=$((total_major + ${_adv_major:-0}))
1583
+ total_minor=$((total_minor + ${_adv_minor:-0}))
1584
+ fi
1585
+ if [[ -f "$ARTIFACTS_DIR/compound-architecture-validation.json" ]]; then
1586
+ local _arch_crit
1587
+ _arch_crit=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || echo "0")
1588
+ local _arch_major
1589
+ _arch_major=$(jq '[.[] | select(.severity == "high" or .severity == "major")] | length' "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || echo "0")
1590
+ total_major=$((total_major + ${_arch_crit:-0} + ${_arch_major:-0}))
1591
+ fi
1592
+
1593
+ # Apply deductions
1594
+ quality_score=$((quality_score - (total_critical * 20) - (total_major * 10) - (total_minor * 2)))
1595
+ [[ "$quality_score" -lt 0 ]] && quality_score=0
1596
+
1597
+ # DoD verification
1598
+ local _dod_pass_rate=0
1599
+ if type pipeline_verify_dod &>/dev/null 2>&1; then
1600
+ pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
1601
+ if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
1602
+ _dod_pass_rate=$(jq -r '.pass_rate // 0' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "0")
1603
+ fi
1604
+ fi
1605
+
1606
+ # Record quality score
1607
+ pipeline_record_quality_score "$quality_score" "$total_critical" "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
1608
+
1609
+ # ── Quality Gate (HARDENED) ──
1610
+ local compound_quality_blocking
1611
+ compound_quality_blocking=$(jq -r --arg id "compound_quality" \
1612
+ '(.stages[] | select(.id == $id) | .config.compound_quality_blocking) // true' \
1613
+ "$PIPELINE_CONFIG" 2>/dev/null) || true
1614
+ [[ -z "$compound_quality_blocking" || "$compound_quality_blocking" == "null" ]] && compound_quality_blocking="true"
1615
+
1616
+ # HARDENED THRESHOLD: quality_score must be >= 60 (non-strict) or policy threshold (strict) to pass
1617
+ local min_threshold=60
1618
+ if [[ "$strict_quality" == "true" ]]; then
1619
+ min_threshold="${PIPELINE_QUALITY_GATE_THRESHOLD:-70}"
1620
+ # Strict mode: require score >= threshold and ZERO critical issues
1621
+ if [[ "$total_critical" -gt 0 ]]; then
1622
+ error "STRICT QUALITY: ${total_critical} critical issue(s) found — BLOCKING (strict mode)"
1623
+ emit_event "pipeline.quality_gate_failed_strict" \
1624
+ "issue=${ISSUE_NUMBER:-0}" \
1625
+ "reason=critical_issues" \
1626
+ "critical=$total_critical"
1627
+ log_stage "compound_quality" "Quality gate failed (strict mode): critical issues"
1628
+ return 1
1629
+ fi
1630
+ min_threshold=70
1631
+ fi
1632
+
1633
+ # Hard floor: score must be >= 40, regardless of other settings
1634
+ if [[ "$quality_score" -lt 40 ]]; then
1635
+ error "HARDENED GATE: Quality score ${quality_score}/100 below hard floor (40) — BLOCKING"
1636
+ emit_event "quality.hard_floor_failed" \
1637
+ "issue=${ISSUE_NUMBER:-0}" \
1638
+ "quality_score=$quality_score"
1639
+ log_stage "compound_quality" "Quality gate failed: score below hard floor (40)"
1640
+ return 1
1641
+ fi
1642
+
1643
+ if [[ "$quality_score" -lt "$min_threshold" && "$compound_quality_blocking" == "true" ]]; then
1644
+ emit_event "pipeline.quality_gate_failed" \
1645
+ "issue=${ISSUE_NUMBER:-0}" \
1646
+ "quality_score=$quality_score" \
1647
+ "threshold=$min_threshold" \
1648
+ "critical=$total_critical" \
1649
+ "major=$total_major"
1650
+
1651
+ error "Quality gate FAILED: score ${quality_score}/100 (threshold: ${min_threshold}/100, critical: ${total_critical}, major: ${total_major}, minor: ${total_minor})"
1652
+
1653
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1654
+ gh_comment_issue "$ISSUE_NUMBER" "❌ **Quality gate failed** — score ${quality_score}/${min_threshold}
1655
+
1656
+ | Finding Type | Count | Deduction |
1657
+ |---|---|---|
1658
+ | Critical | ${total_critical} | -$((total_critical * 20)) |
1659
+ | Major | ${total_major} | -$((total_major * 10)) |
1660
+ | Minor | ${total_minor} | -$((total_minor * 2)) |
1661
+
1662
+ DoD pass rate: ${_dod_pass_rate}%
1663
+ Quality issues remain after ${max_cycles} cycles. Check artifacts for details." 2>/dev/null || true
1664
+ fi
1665
+
1666
+ log_stage "compound_quality" "Quality gate failed: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
1667
+ return 1
1668
+ fi
1669
+
1670
+ # Exhausted all cycles but quality score is at or above threshold
1671
+ if [[ "$quality_score" -ge "$min_threshold" ]]; then
1672
+ if [[ "$quality_score" -eq 100 ]]; then
1673
+ success "Compound quality PERFECT: 100/100"
1674
+ elif [[ "$quality_score" -ge 80 ]]; then
1675
+ success "Compound quality EXCELLENT: ${quality_score}/100"
1676
+ elif [[ "$quality_score" -ge 70 ]]; then
1677
+ success "Compound quality GOOD: ${quality_score}/100"
1678
+ else
1679
+ warn "Compound quality ACCEPTABLE: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
1680
+ fi
1681
+
1682
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1683
+ local quality_emoji="✅"
1684
+ [[ "$quality_score" -lt 70 ]] && quality_emoji="⚠️"
1685
+ gh_comment_issue "$ISSUE_NUMBER" "${quality_emoji} **Compound quality passed** — score ${quality_score}/${min_threshold} after ${max_cycles} cycles
1686
+
1687
+ | Finding Type | Count |
1688
+ |---|---|
1689
+ | Critical | ${total_critical} |
1690
+ | Major | ${total_major} |
1691
+ | Minor | ${total_minor} |
1692
+
1693
+ DoD pass rate: ${_dod_pass_rate}%" 2>/dev/null || true
1694
+ fi
1695
+
1696
+ log_stage "compound_quality" "Passed with score ${quality_score}/${min_threshold} after ${max_cycles} cycles"
1697
+ return 0
1698
+ fi
1699
+
1700
+ error "Compound quality exhausted after ${max_cycles} cycles with insufficient score"
1701
+
1702
+ if [[ -n "$ISSUE_NUMBER" ]]; then
1703
+ gh_comment_issue "$ISSUE_NUMBER" "❌ **Compound quality failed** after ${max_cycles} cycles
1704
+
1705
+ Quality issues remain. Check artifacts for details." 2>/dev/null || true
1706
+ fi
1707
+
1708
+ log_stage "compound_quality" "Failed after ${max_cycles} cycles"
1709
+ return 1
1710
+ }
1711
+
1712
+ # ─── Error Classification ──────────────────────────────────────────────────