shipwright-cli 2.2.0 → 2.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +15 -16
- package/config/policy.schema.json +104 -29
- package/docs/AGI-PLATFORM-PLAN.md +11 -7
- package/docs/AGI-WHATS-NEXT.md +26 -20
- package/docs/README.md +2 -0
- package/package.json +1 -1
- package/scripts/check-version-consistency.sh +72 -0
- package/scripts/lib/daemon-adaptive.sh +610 -0
- package/scripts/lib/daemon-dispatch.sh +489 -0
- package/scripts/lib/daemon-failure.sh +387 -0
- package/scripts/lib/daemon-patrol.sh +1113 -0
- package/scripts/lib/daemon-poll.sh +1202 -0
- package/scripts/lib/daemon-state.sh +550 -0
- package/scripts/lib/daemon-triage.sh +490 -0
- package/scripts/lib/helpers.sh +81 -1
- package/scripts/lib/pipeline-detection.sh +278 -0
- package/scripts/lib/pipeline-github.sh +196 -0
- package/scripts/lib/pipeline-intelligence.sh +1706 -0
- package/scripts/lib/pipeline-quality-checks.sh +1054 -0
- package/scripts/lib/pipeline-quality.sh +11 -0
- package/scripts/lib/pipeline-stages.sh +2508 -0
- package/scripts/lib/pipeline-state.sh +529 -0
- package/scripts/sw +26 -4
- package/scripts/sw-activity.sh +1 -1
- package/scripts/sw-adaptive.sh +2 -2
- package/scripts/sw-adversarial.sh +1 -1
- package/scripts/sw-architecture-enforcer.sh +1 -1
- package/scripts/sw-auth.sh +1 -1
- package/scripts/sw-autonomous.sh +1 -1
- package/scripts/sw-changelog.sh +1 -1
- package/scripts/sw-checkpoint.sh +1 -1
- package/scripts/sw-ci.sh +1 -1
- package/scripts/sw-cleanup.sh +1 -1
- package/scripts/sw-code-review.sh +1 -1
- package/scripts/sw-connect.sh +1 -1
- package/scripts/sw-context.sh +1 -1
- package/scripts/sw-cost.sh +1 -1
- package/scripts/sw-daemon.sh +52 -4816
- package/scripts/sw-dashboard.sh +1 -1
- package/scripts/sw-db.sh +1 -1
- package/scripts/sw-decompose.sh +1 -1
- package/scripts/sw-deps.sh +1 -1
- package/scripts/sw-developer-simulation.sh +1 -1
- package/scripts/sw-discovery.sh +1 -1
- package/scripts/sw-doc-fleet.sh +1 -1
- package/scripts/sw-docs-agent.sh +1 -1
- package/scripts/sw-docs.sh +1 -1
- package/scripts/sw-doctor.sh +42 -1
- package/scripts/sw-dora.sh +1 -1
- package/scripts/sw-durable.sh +1 -1
- package/scripts/sw-e2e-orchestrator.sh +1 -1
- package/scripts/sw-eventbus.sh +1 -1
- package/scripts/sw-feedback.sh +1 -1
- package/scripts/sw-fix.sh +1 -1
- package/scripts/sw-fleet-discover.sh +1 -1
- package/scripts/sw-fleet-viz.sh +3 -3
- package/scripts/sw-fleet.sh +1 -1
- package/scripts/sw-github-app.sh +1 -1
- package/scripts/sw-github-checks.sh +1 -1
- package/scripts/sw-github-deploy.sh +1 -1
- package/scripts/sw-github-graphql.sh +1 -1
- package/scripts/sw-guild.sh +1 -1
- package/scripts/sw-heartbeat.sh +1 -1
- package/scripts/sw-hygiene.sh +1 -1
- package/scripts/sw-incident.sh +1 -1
- package/scripts/sw-init.sh +1 -1
- package/scripts/sw-instrument.sh +1 -1
- package/scripts/sw-intelligence.sh +1 -1
- package/scripts/sw-jira.sh +1 -1
- package/scripts/sw-launchd.sh +1 -1
- package/scripts/sw-linear.sh +1 -1
- package/scripts/sw-logs.sh +1 -1
- package/scripts/sw-loop.sh +1 -1
- package/scripts/sw-memory.sh +1 -1
- package/scripts/sw-mission-control.sh +1 -1
- package/scripts/sw-model-router.sh +1 -1
- package/scripts/sw-otel.sh +4 -4
- package/scripts/sw-oversight.sh +1 -1
- package/scripts/sw-pipeline-composer.sh +1 -1
- package/scripts/sw-pipeline-vitals.sh +1 -1
- package/scripts/sw-pipeline.sh +23 -56
- package/scripts/sw-pipeline.sh.mock +7 -0
- package/scripts/sw-pm.sh +1 -1
- package/scripts/sw-pr-lifecycle.sh +1 -1
- package/scripts/sw-predictive.sh +1 -1
- package/scripts/sw-prep.sh +1 -1
- package/scripts/sw-ps.sh +1 -1
- package/scripts/sw-public-dashboard.sh +1 -1
- package/scripts/sw-quality.sh +1 -1
- package/scripts/sw-reaper.sh +1 -1
- package/scripts/sw-recruit.sh +9 -1
- package/scripts/sw-regression.sh +1 -1
- package/scripts/sw-release-manager.sh +1 -1
- package/scripts/sw-release.sh +1 -1
- package/scripts/sw-remote.sh +1 -1
- package/scripts/sw-replay.sh +1 -1
- package/scripts/sw-retro.sh +1 -1
- package/scripts/sw-scale.sh +8 -5
- package/scripts/sw-security-audit.sh +1 -1
- package/scripts/sw-self-optimize.sh +158 -7
- package/scripts/sw-session.sh +1 -1
- package/scripts/sw-setup.sh +1 -1
- package/scripts/sw-standup.sh +3 -3
- package/scripts/sw-status.sh +1 -1
- package/scripts/sw-strategic.sh +1 -1
- package/scripts/sw-stream.sh +8 -2
- package/scripts/sw-swarm.sh +7 -10
- package/scripts/sw-team-stages.sh +1 -1
- package/scripts/sw-templates.sh +1 -1
- package/scripts/sw-testgen.sh +1 -1
- package/scripts/sw-tmux-pipeline.sh +1 -1
- package/scripts/sw-tmux.sh +1 -1
- package/scripts/sw-trace.sh +1 -1
- package/scripts/sw-tracker.sh +24 -6
- package/scripts/sw-triage.sh +1 -1
- package/scripts/sw-upgrade.sh +1 -1
- package/scripts/sw-ux.sh +1 -1
- package/scripts/sw-webhook.sh +1 -1
- package/scripts/sw-widgets.sh +1 -1
- package/scripts/sw-worktree.sh +1 -1
|
@@ -0,0 +1,1706 @@
|
|
|
1
|
+
# pipeline-intelligence.sh — Skip/adaptive/audits/DoD/security/compound_quality for sw-pipeline.sh
|
|
2
|
+
# Source from sw-pipeline.sh. Requires pipeline-quality-checks, state, ARTIFACTS_DIR, PIPELINE_CONFIG.
|
|
3
|
+
[[ -n "${_PIPELINE_INTELLIGENCE_LOADED:-}" ]] && return 0
|
|
4
|
+
_PIPELINE_INTELLIGENCE_LOADED=1
|
|
5
|
+
|
|
6
|
+
pipeline_should_skip_stage() {
|
|
7
|
+
local stage_id="$1"
|
|
8
|
+
local reason=""
|
|
9
|
+
|
|
10
|
+
# Never skip intake or build — they're always required
|
|
11
|
+
case "$stage_id" in
|
|
12
|
+
intake|build|test|pr|merge) return 1 ;;
|
|
13
|
+
esac
|
|
14
|
+
|
|
15
|
+
# ── Signal 1: Triage score (from intelligence analysis) ──
|
|
16
|
+
local triage_score="${INTELLIGENCE_COMPLEXITY:-0}"
|
|
17
|
+
# Convert: high triage score (simple issue) means skip more stages
|
|
18
|
+
# INTELLIGENCE_COMPLEXITY is 1-10 (1=simple, 10=complex)
|
|
19
|
+
# Score >= 70 in daemon means simple → complexity 1-3
|
|
20
|
+
local complexity="${INTELLIGENCE_COMPLEXITY:-5}"
|
|
21
|
+
|
|
22
|
+
# ── Signal 2: Issue labels ──
|
|
23
|
+
local labels="${ISSUE_LABELS:-}"
|
|
24
|
+
|
|
25
|
+
# Documentation issues: skip test, review, compound_quality
|
|
26
|
+
if echo ",$labels," | grep -qiE ',documentation,|,docs,|,typo,'; then
|
|
27
|
+
case "$stage_id" in
|
|
28
|
+
test|review|compound_quality)
|
|
29
|
+
reason="label:documentation"
|
|
30
|
+
;;
|
|
31
|
+
esac
|
|
32
|
+
fi
|
|
33
|
+
|
|
34
|
+
# Hotfix issues: skip plan, design, compound_quality
|
|
35
|
+
if echo ",$labels," | grep -qiE ',hotfix,|,urgent,|,p0,'; then
|
|
36
|
+
case "$stage_id" in
|
|
37
|
+
plan|design|compound_quality)
|
|
38
|
+
reason="label:hotfix"
|
|
39
|
+
;;
|
|
40
|
+
esac
|
|
41
|
+
fi
|
|
42
|
+
|
|
43
|
+
# ── Signal 3: Intelligence complexity ──
|
|
44
|
+
if [[ -z "$reason" && "$complexity" -gt 0 ]]; then
|
|
45
|
+
# Complexity 1-2: very simple → skip design, compound_quality, review
|
|
46
|
+
if [[ "$complexity" -le 2 ]]; then
|
|
47
|
+
case "$stage_id" in
|
|
48
|
+
design|compound_quality|review)
|
|
49
|
+
reason="complexity:${complexity}/10"
|
|
50
|
+
;;
|
|
51
|
+
esac
|
|
52
|
+
# Complexity 1-3: simple → skip design
|
|
53
|
+
elif [[ "$complexity" -le 3 ]]; then
|
|
54
|
+
case "$stage_id" in
|
|
55
|
+
design)
|
|
56
|
+
reason="complexity:${complexity}/10"
|
|
57
|
+
;;
|
|
58
|
+
esac
|
|
59
|
+
fi
|
|
60
|
+
fi
|
|
61
|
+
|
|
62
|
+
# ── Signal 4: Diff size (after build) ──
|
|
63
|
+
if [[ -z "$reason" && "$stage_id" == "compound_quality" ]]; then
|
|
64
|
+
local diff_lines=0
|
|
65
|
+
local _skip_stat
|
|
66
|
+
_skip_stat=$(git diff "${BASE_BRANCH:-main}...HEAD" --stat 2>/dev/null | tail -1) || true
|
|
67
|
+
if [[ -n "${_skip_stat:-}" ]]; then
|
|
68
|
+
local _s_ins _s_del
|
|
69
|
+
_s_ins=$(echo "$_skip_stat" | grep -oE '[0-9]+ insertion' | grep -oE '[0-9]+') || true
|
|
70
|
+
_s_del=$(echo "$_skip_stat" | grep -oE '[0-9]+ deletion' | grep -oE '[0-9]+') || true
|
|
71
|
+
diff_lines=$(( ${_s_ins:-0} + ${_s_del:-0} ))
|
|
72
|
+
fi
|
|
73
|
+
diff_lines="${diff_lines:-0}"
|
|
74
|
+
if [[ "$diff_lines" -gt 0 && "$diff_lines" -lt 20 ]]; then
|
|
75
|
+
reason="diff_size:${diff_lines}_lines"
|
|
76
|
+
fi
|
|
77
|
+
fi
|
|
78
|
+
|
|
79
|
+
# ── Signal 5: Mid-pipeline reassessment override ──
|
|
80
|
+
if [[ -z "$reason" && -f "$ARTIFACTS_DIR/reassessment.json" ]]; then
|
|
81
|
+
local skip_stages
|
|
82
|
+
skip_stages=$(jq -r '.skip_stages // [] | .[]' "$ARTIFACTS_DIR/reassessment.json" 2>/dev/null || true)
|
|
83
|
+
if echo "$skip_stages" | grep -qx "$stage_id" 2>/dev/null; then
|
|
84
|
+
reason="reassessment:simpler_than_expected"
|
|
85
|
+
fi
|
|
86
|
+
fi
|
|
87
|
+
|
|
88
|
+
if [[ -n "$reason" ]]; then
|
|
89
|
+
emit_event "intelligence.stage_skipped" \
|
|
90
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
91
|
+
"stage=$stage_id" \
|
|
92
|
+
"reason=$reason" \
|
|
93
|
+
"complexity=${complexity}" \
|
|
94
|
+
"labels=${labels}"
|
|
95
|
+
echo "$reason"
|
|
96
|
+
return 0
|
|
97
|
+
fi
|
|
98
|
+
|
|
99
|
+
return 1
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
103
|
+
# 2. Smart Finding Classification & Routing
|
|
104
|
+
# Parses compound quality findings and classifies each as:
|
|
105
|
+
# architecture, security, correctness, style
|
|
106
|
+
# Returns JSON with classified findings and routing recommendations.
|
|
107
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
108
|
+
classify_quality_findings() {
|
|
109
|
+
local findings_dir="$ARTIFACTS_DIR"
|
|
110
|
+
local result_file="$ARTIFACTS_DIR/classified-findings.json"
|
|
111
|
+
|
|
112
|
+
# Initialize counters
|
|
113
|
+
local arch_count=0 security_count=0 correctness_count=0 performance_count=0 testing_count=0 style_count=0
|
|
114
|
+
|
|
115
|
+
# Start building JSON array
|
|
116
|
+
local findings_json="[]"
|
|
117
|
+
|
|
118
|
+
# ── Parse adversarial review ──
|
|
119
|
+
if [[ -f "$findings_dir/adversarial-review.md" ]]; then
|
|
120
|
+
local adv_content
|
|
121
|
+
adv_content=$(cat "$findings_dir/adversarial-review.md" 2>/dev/null || true)
|
|
122
|
+
|
|
123
|
+
# Architecture findings: dependency violations, layer breaches, circular refs
|
|
124
|
+
local arch_findings
|
|
125
|
+
arch_findings=$(echo "$adv_content" | grep -ciE 'architect|layer.*violation|circular.*depend|coupling|abstraction|design.*flaw|separation.*concern' 2>/dev/null || true)
|
|
126
|
+
arch_count=$((arch_count + ${arch_findings:-0}))
|
|
127
|
+
|
|
128
|
+
# Security findings
|
|
129
|
+
local sec_findings
|
|
130
|
+
sec_findings=$(echo "$adv_content" | grep -ciE 'security|vulnerab|injection|XSS|CSRF|auth.*bypass|privilege|sanitiz|escap' 2>/dev/null || true)
|
|
131
|
+
security_count=$((security_count + ${sec_findings:-0}))
|
|
132
|
+
|
|
133
|
+
# Correctness findings: bugs, logic errors, edge cases
|
|
134
|
+
local corr_findings
|
|
135
|
+
corr_findings=$(echo "$adv_content" | grep -ciE '\*\*\[?(Critical|Bug|Error|critical|high)\]?\*\*|race.*condition|null.*pointer|off.*by.*one|edge.*case|undefined.*behav' 2>/dev/null || true)
|
|
136
|
+
correctness_count=$((correctness_count + ${corr_findings:-0}))
|
|
137
|
+
|
|
138
|
+
# Performance findings
|
|
139
|
+
local perf_findings
|
|
140
|
+
perf_findings=$(echo "$adv_content" | grep -ciE 'latency|slow|memory leak|O\(n|N\+1|cache miss|performance|bottleneck|throughput' 2>/dev/null || true)
|
|
141
|
+
performance_count=$((performance_count + ${perf_findings:-0}))
|
|
142
|
+
|
|
143
|
+
# Testing findings
|
|
144
|
+
local test_findings
|
|
145
|
+
test_findings=$(echo "$adv_content" | grep -ciE 'untested|missing test|no coverage|flaky|test gap|test missing|coverage gap' 2>/dev/null || true)
|
|
146
|
+
testing_count=$((testing_count + ${test_findings:-0}))
|
|
147
|
+
|
|
148
|
+
# Style findings
|
|
149
|
+
local style_findings
|
|
150
|
+
style_findings=$(echo "$adv_content" | grep -ciE 'naming|convention|format|style|readabil|inconsisten|whitespace|comment' 2>/dev/null || true)
|
|
151
|
+
style_count=$((style_count + ${style_findings:-0}))
|
|
152
|
+
fi
|
|
153
|
+
|
|
154
|
+
# ── Parse architecture validation ──
|
|
155
|
+
if [[ -f "$findings_dir/compound-architecture-validation.json" ]]; then
|
|
156
|
+
local arch_json_count
|
|
157
|
+
arch_json_count=$(jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' "$findings_dir/compound-architecture-validation.json" 2>/dev/null || echo "0")
|
|
158
|
+
arch_count=$((arch_count + ${arch_json_count:-0}))
|
|
159
|
+
fi
|
|
160
|
+
|
|
161
|
+
# ── Parse security audit ──
|
|
162
|
+
if [[ -f "$findings_dir/security-audit.log" ]]; then
|
|
163
|
+
local sec_audit
|
|
164
|
+
sec_audit=$(grep -ciE 'critical|high' "$findings_dir/security-audit.log" 2>/dev/null || true)
|
|
165
|
+
security_count=$((security_count + ${sec_audit:-0}))
|
|
166
|
+
fi
|
|
167
|
+
|
|
168
|
+
# ── Parse negative review ──
|
|
169
|
+
if [[ -f "$findings_dir/negative-review.md" ]]; then
|
|
170
|
+
local neg_corr
|
|
171
|
+
neg_corr=$(grep -ciE '\[Critical\]|\[High\]' "$findings_dir/negative-review.md" 2>/dev/null || true)
|
|
172
|
+
correctness_count=$((correctness_count + ${neg_corr:-0}))
|
|
173
|
+
fi
|
|
174
|
+
|
|
175
|
+
# ── Determine routing ──
|
|
176
|
+
# Priority order: security > architecture > correctness > performance > testing > style
|
|
177
|
+
local route="correctness" # default
|
|
178
|
+
local needs_backtrack=false
|
|
179
|
+
local priority_findings=""
|
|
180
|
+
|
|
181
|
+
if [[ "$security_count" -gt 0 ]]; then
|
|
182
|
+
route="security"
|
|
183
|
+
priority_findings="security:${security_count}"
|
|
184
|
+
fi
|
|
185
|
+
|
|
186
|
+
if [[ "$arch_count" -gt 0 ]]; then
|
|
187
|
+
if [[ "$route" == "correctness" ]]; then
|
|
188
|
+
route="architecture"
|
|
189
|
+
needs_backtrack=true
|
|
190
|
+
fi
|
|
191
|
+
priority_findings="${priority_findings:+${priority_findings},}architecture:${arch_count}"
|
|
192
|
+
fi
|
|
193
|
+
|
|
194
|
+
if [[ "$correctness_count" -gt 0 ]]; then
|
|
195
|
+
priority_findings="${priority_findings:+${priority_findings},}correctness:${correctness_count}"
|
|
196
|
+
fi
|
|
197
|
+
|
|
198
|
+
if [[ "$performance_count" -gt 0 ]]; then
|
|
199
|
+
if [[ "$route" == "correctness" && "$correctness_count" -eq 0 ]]; then
|
|
200
|
+
route="performance"
|
|
201
|
+
fi
|
|
202
|
+
priority_findings="${priority_findings:+${priority_findings},}performance:${performance_count}"
|
|
203
|
+
fi
|
|
204
|
+
|
|
205
|
+
if [[ "$testing_count" -gt 0 ]]; then
|
|
206
|
+
if [[ "$route" == "correctness" && "$correctness_count" -eq 0 && "$performance_count" -eq 0 ]]; then
|
|
207
|
+
route="testing"
|
|
208
|
+
fi
|
|
209
|
+
priority_findings="${priority_findings:+${priority_findings},}testing:${testing_count}"
|
|
210
|
+
fi
|
|
211
|
+
|
|
212
|
+
# Style findings don't affect routing or count toward failure threshold
|
|
213
|
+
local total_blocking=$((arch_count + security_count + correctness_count + performance_count + testing_count))
|
|
214
|
+
|
|
215
|
+
# Write classified findings
|
|
216
|
+
local tmp_findings
|
|
217
|
+
tmp_findings="$(mktemp)"
|
|
218
|
+
jq -n \
|
|
219
|
+
--argjson arch "$arch_count" \
|
|
220
|
+
--argjson security "$security_count" \
|
|
221
|
+
--argjson correctness "$correctness_count" \
|
|
222
|
+
--argjson performance "$performance_count" \
|
|
223
|
+
--argjson testing "$testing_count" \
|
|
224
|
+
--argjson style "$style_count" \
|
|
225
|
+
--argjson total_blocking "$total_blocking" \
|
|
226
|
+
--arg route "$route" \
|
|
227
|
+
--argjson needs_backtrack "$needs_backtrack" \
|
|
228
|
+
--arg priority "$priority_findings" \
|
|
229
|
+
'{
|
|
230
|
+
architecture: $arch,
|
|
231
|
+
security: $security,
|
|
232
|
+
correctness: $correctness,
|
|
233
|
+
performance: $performance,
|
|
234
|
+
testing: $testing,
|
|
235
|
+
style: $style,
|
|
236
|
+
total_blocking: $total_blocking,
|
|
237
|
+
route: $route,
|
|
238
|
+
needs_backtrack: $needs_backtrack,
|
|
239
|
+
priority_findings: $priority
|
|
240
|
+
}' > "$tmp_findings" 2>/dev/null && mv "$tmp_findings" "$result_file" || rm -f "$tmp_findings"
|
|
241
|
+
|
|
242
|
+
emit_event "intelligence.findings_classified" \
|
|
243
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
244
|
+
"architecture=$arch_count" \
|
|
245
|
+
"security=$security_count" \
|
|
246
|
+
"correctness=$correctness_count" \
|
|
247
|
+
"performance=$performance_count" \
|
|
248
|
+
"testing=$testing_count" \
|
|
249
|
+
"style=$style_count" \
|
|
250
|
+
"route=$route" \
|
|
251
|
+
"needs_backtrack=$needs_backtrack"
|
|
252
|
+
|
|
253
|
+
echo "$route"
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
257
|
+
# 3. Adaptive Cycle Limits
|
|
258
|
+
# Replaces hardcoded max_cycles with convergence-driven limits.
|
|
259
|
+
# Takes the base limit, returns an adjusted limit based on:
|
|
260
|
+
# - Learned iteration model
|
|
261
|
+
# - Convergence/divergence signals
|
|
262
|
+
# - Budget constraints
|
|
263
|
+
# - Hard ceiling (2x template max)
|
|
264
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
265
|
+
pipeline_adaptive_cycles() {
|
|
266
|
+
local base_limit="$1"
|
|
267
|
+
local context="${2:-compound_quality}" # compound_quality or build_test
|
|
268
|
+
local current_issue_count="${3:-0}"
|
|
269
|
+
local prev_issue_count="${4:--1}"
|
|
270
|
+
|
|
271
|
+
local adjusted="$base_limit"
|
|
272
|
+
local hard_ceiling=$((base_limit * 2))
|
|
273
|
+
|
|
274
|
+
# ── Learned iteration model ──
|
|
275
|
+
local model_file="${HOME}/.shipwright/optimization/iteration-model.json"
|
|
276
|
+
if [[ -f "$model_file" ]]; then
|
|
277
|
+
local learned
|
|
278
|
+
learned=$(jq -r --arg ctx "$context" '.[$ctx].recommended_cycles // 0' "$model_file" 2>/dev/null || echo "0")
|
|
279
|
+
if [[ "$learned" -gt 0 && "$learned" -le "$hard_ceiling" ]]; then
|
|
280
|
+
adjusted="$learned"
|
|
281
|
+
fi
|
|
282
|
+
fi
|
|
283
|
+
|
|
284
|
+
# ── Convergence acceleration ──
|
|
285
|
+
# If issue count drops >50% per cycle, extend limit by 1 (we're making progress)
|
|
286
|
+
if [[ "$prev_issue_count" -gt 0 && "$current_issue_count" -ge 0 ]]; then
|
|
287
|
+
local half_prev=$((prev_issue_count / 2))
|
|
288
|
+
if [[ "$current_issue_count" -le "$half_prev" && "$current_issue_count" -gt 0 ]]; then
|
|
289
|
+
# Rapid convergence — extend by 1
|
|
290
|
+
local new_limit=$((adjusted + 1))
|
|
291
|
+
if [[ "$new_limit" -le "$hard_ceiling" ]]; then
|
|
292
|
+
adjusted="$new_limit"
|
|
293
|
+
emit_event "intelligence.convergence_acceleration" \
|
|
294
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
295
|
+
"context=$context" \
|
|
296
|
+
"prev_issues=$prev_issue_count" \
|
|
297
|
+
"current_issues=$current_issue_count" \
|
|
298
|
+
"new_limit=$adjusted"
|
|
299
|
+
fi
|
|
300
|
+
fi
|
|
301
|
+
|
|
302
|
+
# ── Divergence detection ──
|
|
303
|
+
# If issue count increases, reduce remaining cycles
|
|
304
|
+
if [[ "$current_issue_count" -gt "$prev_issue_count" ]]; then
|
|
305
|
+
local reduced=$((adjusted - 1))
|
|
306
|
+
if [[ "$reduced" -ge 1 ]]; then
|
|
307
|
+
adjusted="$reduced"
|
|
308
|
+
emit_event "intelligence.divergence_detected" \
|
|
309
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
310
|
+
"context=$context" \
|
|
311
|
+
"prev_issues=$prev_issue_count" \
|
|
312
|
+
"current_issues=$current_issue_count" \
|
|
313
|
+
"new_limit=$adjusted"
|
|
314
|
+
fi
|
|
315
|
+
fi
|
|
316
|
+
fi
|
|
317
|
+
|
|
318
|
+
# ── Budget gate ──
|
|
319
|
+
if [[ "$IGNORE_BUDGET" != "true" ]] && [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
|
|
320
|
+
local budget_rc=0
|
|
321
|
+
bash "$SCRIPT_DIR/sw-cost.sh" check-budget 2>/dev/null || budget_rc=$?
|
|
322
|
+
if [[ "$budget_rc" -eq 2 ]]; then
|
|
323
|
+
# Budget exhausted — cap at current cycle
|
|
324
|
+
adjusted=0
|
|
325
|
+
emit_event "intelligence.budget_cap" \
|
|
326
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
327
|
+
"context=$context"
|
|
328
|
+
fi
|
|
329
|
+
fi
|
|
330
|
+
|
|
331
|
+
# ── Enforce hard ceiling ──
|
|
332
|
+
if [[ "$adjusted" -gt "$hard_ceiling" ]]; then
|
|
333
|
+
adjusted="$hard_ceiling"
|
|
334
|
+
fi
|
|
335
|
+
|
|
336
|
+
echo "$adjusted"
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
340
|
+
# 5. Intelligent Audit Selection
|
|
341
|
+
# AI-driven audit selection — all audits enabled, intensity varies.
|
|
342
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
343
|
+
pipeline_select_audits() {
|
|
344
|
+
local audit_intensity
|
|
345
|
+
audit_intensity=$(jq -r --arg id "compound_quality" \
|
|
346
|
+
'(.stages[] | select(.id == $id) | .config.audit_intensity) // "auto"' \
|
|
347
|
+
"$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
348
|
+
[[ -z "$audit_intensity" || "$audit_intensity" == "null" ]] && audit_intensity="auto"
|
|
349
|
+
|
|
350
|
+
# Short-circuit for explicit overrides
|
|
351
|
+
case "$audit_intensity" in
|
|
352
|
+
off)
|
|
353
|
+
echo '{"adversarial":"off","architecture":"off","simulation":"off","security":"off","dod":"off"}'
|
|
354
|
+
return 0
|
|
355
|
+
;;
|
|
356
|
+
full|lightweight)
|
|
357
|
+
jq -n --arg i "$audit_intensity" \
|
|
358
|
+
'{adversarial:$i,architecture:$i,simulation:$i,security:$i,dod:$i}'
|
|
359
|
+
return 0
|
|
360
|
+
;;
|
|
361
|
+
esac
|
|
362
|
+
|
|
363
|
+
# ── Auto mode: data-driven intensity ──
|
|
364
|
+
local default_intensity="targeted"
|
|
365
|
+
local security_intensity="targeted"
|
|
366
|
+
|
|
367
|
+
# Read last 5 quality scores for this repo
|
|
368
|
+
local quality_scores_file="${HOME}/.shipwright/optimization/quality-scores.jsonl"
|
|
369
|
+
local repo_name
|
|
370
|
+
repo_name=$(basename "${PROJECT_ROOT:-.}") || true
|
|
371
|
+
if [[ -f "$quality_scores_file" ]]; then
|
|
372
|
+
local recent_scores
|
|
373
|
+
recent_scores=$(grep "\"repo\":\"${repo_name}\"" "$quality_scores_file" 2>/dev/null | tail -5) || true
|
|
374
|
+
if [[ -n "$recent_scores" ]]; then
|
|
375
|
+
# Check for critical findings in recent history
|
|
376
|
+
local has_critical
|
|
377
|
+
has_critical=$(echo "$recent_scores" | jq -s '[.[].findings.critical // 0] | add' 2>/dev/null || echo "0")
|
|
378
|
+
has_critical="${has_critical:-0}"
|
|
379
|
+
if [[ "$has_critical" -gt 0 ]]; then
|
|
380
|
+
security_intensity="full"
|
|
381
|
+
fi
|
|
382
|
+
|
|
383
|
+
# Compute average quality score
|
|
384
|
+
local avg_score
|
|
385
|
+
avg_score=$(echo "$recent_scores" | jq -s 'if length > 0 then ([.[].quality_score] | add / length | floor) else 70 end' 2>/dev/null || echo "70")
|
|
386
|
+
avg_score="${avg_score:-70}"
|
|
387
|
+
|
|
388
|
+
if [[ "$avg_score" -lt 60 ]]; then
|
|
389
|
+
default_intensity="full"
|
|
390
|
+
security_intensity="full"
|
|
391
|
+
elif [[ "$avg_score" -gt 80 ]]; then
|
|
392
|
+
default_intensity="lightweight"
|
|
393
|
+
[[ "$security_intensity" != "full" ]] && security_intensity="lightweight"
|
|
394
|
+
fi
|
|
395
|
+
fi
|
|
396
|
+
fi
|
|
397
|
+
|
|
398
|
+
# Intelligence cache: upgrade targeted→full for complex changes
|
|
399
|
+
local intel_cache="${PROJECT_ROOT}/.claude/intelligence-cache.json"
|
|
400
|
+
if [[ -f "$intel_cache" && "$default_intensity" == "targeted" ]]; then
|
|
401
|
+
local complexity
|
|
402
|
+
complexity=$(jq -r '.complexity // "medium"' "$intel_cache" 2>/dev/null || echo "medium")
|
|
403
|
+
if [[ "$complexity" == "high" || "$complexity" == "very_high" ]]; then
|
|
404
|
+
default_intensity="full"
|
|
405
|
+
security_intensity="full"
|
|
406
|
+
fi
|
|
407
|
+
fi
|
|
408
|
+
|
|
409
|
+
emit_event "pipeline.audit_selection" \
|
|
410
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
411
|
+
"default_intensity=$default_intensity" \
|
|
412
|
+
"security_intensity=$security_intensity" \
|
|
413
|
+
"repo=$repo_name"
|
|
414
|
+
|
|
415
|
+
jq -n \
|
|
416
|
+
--arg adv "$default_intensity" \
|
|
417
|
+
--arg arch "$default_intensity" \
|
|
418
|
+
--arg sim "$default_intensity" \
|
|
419
|
+
--arg sec "$security_intensity" \
|
|
420
|
+
--arg dod "$default_intensity" \
|
|
421
|
+
'{adversarial:$adv,architecture:$arch,simulation:$sim,security:$sec,dod:$dod}'
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
425
|
+
# 6. Definition of Done Verification
|
|
426
|
+
# Strict DoD enforcement after compound quality completes.
|
|
427
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
428
|
+
pipeline_verify_dod() {
|
|
429
|
+
local artifacts_dir="${1:-$ARTIFACTS_DIR}"
|
|
430
|
+
local checks_total=0 checks_passed=0
|
|
431
|
+
local results=""
|
|
432
|
+
|
|
433
|
+
# 1. Test coverage: verify changed source files have test counterparts
|
|
434
|
+
local changed_files
|
|
435
|
+
changed_files=$(git diff --name-only "${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
|
|
436
|
+
local missing_tests=""
|
|
437
|
+
local files_checked=0
|
|
438
|
+
|
|
439
|
+
if [[ -n "$changed_files" ]]; then
|
|
440
|
+
while IFS= read -r src_file; do
|
|
441
|
+
[[ -z "$src_file" ]] && continue
|
|
442
|
+
# Only check source code files
|
|
443
|
+
case "$src_file" in
|
|
444
|
+
*.ts|*.js|*.tsx|*.jsx|*.py|*.go|*.rs|*.sh)
|
|
445
|
+
# Skip test files themselves and config files
|
|
446
|
+
case "$src_file" in
|
|
447
|
+
*test*|*spec*|*__tests__*|*.config.*|*.d.ts) continue ;;
|
|
448
|
+
esac
|
|
449
|
+
files_checked=$((files_checked + 1))
|
|
450
|
+
checks_total=$((checks_total + 1))
|
|
451
|
+
# Check for corresponding test file
|
|
452
|
+
local base_name dir_name ext
|
|
453
|
+
base_name=$(basename "$src_file")
|
|
454
|
+
dir_name=$(dirname "$src_file")
|
|
455
|
+
ext="${base_name##*.}"
|
|
456
|
+
local stem="${base_name%.*}"
|
|
457
|
+
local test_found=false
|
|
458
|
+
# Common test file patterns
|
|
459
|
+
for pattern in \
|
|
460
|
+
"${dir_name}/${stem}.test.${ext}" \
|
|
461
|
+
"${dir_name}/${stem}.spec.${ext}" \
|
|
462
|
+
"${dir_name}/__tests__/${stem}.test.${ext}" \
|
|
463
|
+
"${dir_name}/${stem}-test.${ext}" \
|
|
464
|
+
"${dir_name}/test_${stem}.${ext}" \
|
|
465
|
+
"${dir_name}/${stem}_test.${ext}"; do
|
|
466
|
+
if [[ -f "$pattern" ]]; then
|
|
467
|
+
test_found=true
|
|
468
|
+
break
|
|
469
|
+
fi
|
|
470
|
+
done
|
|
471
|
+
if $test_found; then
|
|
472
|
+
checks_passed=$((checks_passed + 1))
|
|
473
|
+
else
|
|
474
|
+
missing_tests="${missing_tests}${src_file}\n"
|
|
475
|
+
fi
|
|
476
|
+
;;
|
|
477
|
+
esac
|
|
478
|
+
done <<EOF
|
|
479
|
+
$changed_files
|
|
480
|
+
EOF
|
|
481
|
+
fi
|
|
482
|
+
|
|
483
|
+
# 2. Test-added verification: if significant logic added, ensure tests were also added
|
|
484
|
+
local logic_lines=0 test_lines=0
|
|
485
|
+
if [[ -n "$changed_files" ]]; then
|
|
486
|
+
local full_diff
|
|
487
|
+
full_diff=$(git diff "${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
|
|
488
|
+
if [[ -n "$full_diff" ]]; then
|
|
489
|
+
# Count added lines matching source patterns (rough heuristic)
|
|
490
|
+
logic_lines=$(echo "$full_diff" | grep -cE '^\+.*(function |class |if |for |while |return |export )' 2>/dev/null || true)
|
|
491
|
+
logic_lines="${logic_lines:-0}"
|
|
492
|
+
# Count added lines in test files
|
|
493
|
+
test_lines=$(echo "$full_diff" | grep -cE '^\+.*(it\(|test\(|describe\(|expect\(|assert|def test_|func Test)' 2>/dev/null || true)
|
|
494
|
+
test_lines="${test_lines:-0}"
|
|
495
|
+
fi
|
|
496
|
+
fi
|
|
497
|
+
checks_total=$((checks_total + 1))
|
|
498
|
+
local test_ratio_passed=true
|
|
499
|
+
if [[ "$logic_lines" -gt 20 && "$test_lines" -eq 0 ]]; then
|
|
500
|
+
test_ratio_passed=false
|
|
501
|
+
warn "DoD verification: ${logic_lines} logic lines added but no test lines detected"
|
|
502
|
+
else
|
|
503
|
+
checks_passed=$((checks_passed + 1))
|
|
504
|
+
fi
|
|
505
|
+
|
|
506
|
+
# 3. Behavioral verification: check DoD audit artifacts for evidence
|
|
507
|
+
local dod_audit_file="$artifacts_dir/dod-audit.md"
|
|
508
|
+
local dod_verified=0 dod_total_items=0
|
|
509
|
+
if [[ -f "$dod_audit_file" ]]; then
|
|
510
|
+
# Count items marked as passing
|
|
511
|
+
dod_total_items=$(grep -cE '^\s*-\s*\[x\]' "$dod_audit_file" 2>/dev/null || true)
|
|
512
|
+
dod_total_items="${dod_total_items:-0}"
|
|
513
|
+
local dod_failing
|
|
514
|
+
dod_failing=$(grep -cE '^\s*-\s*\[\s\]' "$dod_audit_file" 2>/dev/null || true)
|
|
515
|
+
dod_failing="${dod_failing:-0}"
|
|
516
|
+
dod_verified=$dod_total_items
|
|
517
|
+
checks_total=$((checks_total + dod_total_items + ${dod_failing:-0}))
|
|
518
|
+
checks_passed=$((checks_passed + dod_total_items))
|
|
519
|
+
fi
|
|
520
|
+
|
|
521
|
+
# Compute pass rate
|
|
522
|
+
local pass_rate=100
|
|
523
|
+
if [[ "$checks_total" -gt 0 ]]; then
|
|
524
|
+
pass_rate=$(( (checks_passed * 100) / checks_total ))
|
|
525
|
+
fi
|
|
526
|
+
|
|
527
|
+
# Write results
|
|
528
|
+
local tmp_result
|
|
529
|
+
tmp_result=$(mktemp)
|
|
530
|
+
jq -n \
|
|
531
|
+
--argjson checks_total "$checks_total" \
|
|
532
|
+
--argjson checks_passed "$checks_passed" \
|
|
533
|
+
--argjson pass_rate "$pass_rate" \
|
|
534
|
+
--argjson files_checked "$files_checked" \
|
|
535
|
+
--arg missing_tests "$(echo -e "$missing_tests" | head -20)" \
|
|
536
|
+
--argjson logic_lines "$logic_lines" \
|
|
537
|
+
--argjson test_lines "$test_lines" \
|
|
538
|
+
--argjson test_ratio_passed "$test_ratio_passed" \
|
|
539
|
+
--argjson dod_verified "$dod_verified" \
|
|
540
|
+
'{
|
|
541
|
+
checks_total: $checks_total,
|
|
542
|
+
checks_passed: $checks_passed,
|
|
543
|
+
pass_rate: $pass_rate,
|
|
544
|
+
files_checked: $files_checked,
|
|
545
|
+
missing_tests: ($missing_tests | split("\n") | map(select(. != ""))),
|
|
546
|
+
logic_lines: $logic_lines,
|
|
547
|
+
test_lines: $test_lines,
|
|
548
|
+
test_ratio_passed: $test_ratio_passed,
|
|
549
|
+
dod_verified: $dod_verified
|
|
550
|
+
}' > "$tmp_result" 2>/dev/null
|
|
551
|
+
mv "$tmp_result" "$artifacts_dir/dod-verification.json"
|
|
552
|
+
|
|
553
|
+
emit_event "pipeline.dod_verification" \
|
|
554
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
555
|
+
"checks_total=$checks_total" \
|
|
556
|
+
"checks_passed=$checks_passed" \
|
|
557
|
+
"pass_rate=$pass_rate"
|
|
558
|
+
|
|
559
|
+
# Fail if pass rate < 70%
|
|
560
|
+
if [[ "$pass_rate" -lt 70 ]]; then
|
|
561
|
+
warn "DoD verification: ${pass_rate}% pass rate (${checks_passed}/${checks_total} checks)"
|
|
562
|
+
return 1
|
|
563
|
+
fi
|
|
564
|
+
|
|
565
|
+
success "DoD verification: ${pass_rate}% pass rate (${checks_passed}/${checks_total} checks)"
|
|
566
|
+
return 0
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
570
|
+
# 7. Source Code Security Scan
|
|
571
|
+
# Grep-based vulnerability pattern matching on changed files.
|
|
572
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
573
|
+
pipeline_security_source_scan() {
|
|
574
|
+
local base_branch="${1:-${BASE_BRANCH:-main}}"
|
|
575
|
+
local findings="[]"
|
|
576
|
+
local finding_count=0
|
|
577
|
+
|
|
578
|
+
local changed_files
|
|
579
|
+
changed_files=$(git diff --name-only "${base_branch}...HEAD" 2>/dev/null || true)
|
|
580
|
+
[[ -z "$changed_files" ]] && { echo "[]"; return 0; }
|
|
581
|
+
|
|
582
|
+
local tmp_findings
|
|
583
|
+
tmp_findings=$(mktemp)
|
|
584
|
+
echo "[]" > "$tmp_findings"
|
|
585
|
+
|
|
586
|
+
while IFS= read -r file; do
|
|
587
|
+
[[ -z "$file" || ! -f "$file" ]] && continue
|
|
588
|
+
# Only scan code files
|
|
589
|
+
case "$file" in
|
|
590
|
+
*.ts|*.js|*.tsx|*.jsx|*.py|*.go|*.rs|*.java|*.rb|*.php|*.sh) ;;
|
|
591
|
+
*) continue ;;
|
|
592
|
+
esac
|
|
593
|
+
|
|
594
|
+
# SQL injection patterns
|
|
595
|
+
local sql_matches
|
|
596
|
+
sql_matches=$(grep -nE '(query|execute|sql)\s*\(?\s*[`"'"'"']\s*.*\$\{|\.query\s*\(\s*[`"'"'"'].*\+' "$file" 2>/dev/null || true)
|
|
597
|
+
if [[ -n "$sql_matches" ]]; then
|
|
598
|
+
while IFS= read -r match; do
|
|
599
|
+
[[ -z "$match" ]] && continue
|
|
600
|
+
local line_num="${match%%:*}"
|
|
601
|
+
finding_count=$((finding_count + 1))
|
|
602
|
+
local current
|
|
603
|
+
current=$(cat "$tmp_findings")
|
|
604
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "sql_injection" \
|
|
605
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential SQL injection via string concatenation"}]' \
|
|
606
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
607
|
+
done <<SQLEOF
|
|
608
|
+
$sql_matches
|
|
609
|
+
SQLEOF
|
|
610
|
+
fi
|
|
611
|
+
|
|
612
|
+
# XSS patterns
|
|
613
|
+
local xss_matches
|
|
614
|
+
xss_matches=$(grep -nE 'innerHTML\s*=|document\.write\s*\(|dangerouslySetInnerHTML' "$file" 2>/dev/null || true)
|
|
615
|
+
if [[ -n "$xss_matches" ]]; then
|
|
616
|
+
while IFS= read -r match; do
|
|
617
|
+
[[ -z "$match" ]] && continue
|
|
618
|
+
local line_num="${match%%:*}"
|
|
619
|
+
finding_count=$((finding_count + 1))
|
|
620
|
+
local current
|
|
621
|
+
current=$(cat "$tmp_findings")
|
|
622
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "xss" \
|
|
623
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential XSS via unsafe DOM manipulation"}]' \
|
|
624
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
625
|
+
done <<XSSEOF
|
|
626
|
+
$xss_matches
|
|
627
|
+
XSSEOF
|
|
628
|
+
fi
|
|
629
|
+
|
|
630
|
+
# Command injection patterns
|
|
631
|
+
local cmd_matches
|
|
632
|
+
cmd_matches=$(grep -nE 'eval\s*\(|child_process|os\.system\s*\(|subprocess\.(call|run|Popen)\s*\(' "$file" 2>/dev/null || true)
|
|
633
|
+
if [[ -n "$cmd_matches" ]]; then
|
|
634
|
+
while IFS= read -r match; do
|
|
635
|
+
[[ -z "$match" ]] && continue
|
|
636
|
+
local line_num="${match%%:*}"
|
|
637
|
+
finding_count=$((finding_count + 1))
|
|
638
|
+
local current
|
|
639
|
+
current=$(cat "$tmp_findings")
|
|
640
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "command_injection" \
|
|
641
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential command injection via unsafe execution"}]' \
|
|
642
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
643
|
+
done <<CMDEOF
|
|
644
|
+
$cmd_matches
|
|
645
|
+
CMDEOF
|
|
646
|
+
fi
|
|
647
|
+
|
|
648
|
+
# Hardcoded secrets patterns
|
|
649
|
+
local secret_matches
|
|
650
|
+
secret_matches=$(grep -nEi '(password|api_key|secret|token)\s*=\s*['"'"'"][A-Za-z0-9+/=]{8,}['"'"'"]' "$file" 2>/dev/null || true)
|
|
651
|
+
if [[ -n "$secret_matches" ]]; then
|
|
652
|
+
while IFS= read -r match; do
|
|
653
|
+
[[ -z "$match" ]] && continue
|
|
654
|
+
local line_num="${match%%:*}"
|
|
655
|
+
finding_count=$((finding_count + 1))
|
|
656
|
+
local current
|
|
657
|
+
current=$(cat "$tmp_findings")
|
|
658
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "hardcoded_secret" \
|
|
659
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential hardcoded secret or credential"}]' \
|
|
660
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
661
|
+
done <<SECEOF
|
|
662
|
+
$secret_matches
|
|
663
|
+
SECEOF
|
|
664
|
+
fi
|
|
665
|
+
|
|
666
|
+
# Insecure crypto patterns
|
|
667
|
+
local crypto_matches
|
|
668
|
+
crypto_matches=$(grep -nE '(md5|MD5|sha1|SHA1)\s*\(' "$file" 2>/dev/null || true)
|
|
669
|
+
if [[ -n "$crypto_matches" ]]; then
|
|
670
|
+
while IFS= read -r match; do
|
|
671
|
+
[[ -z "$match" ]] && continue
|
|
672
|
+
local line_num="${match%%:*}"
|
|
673
|
+
finding_count=$((finding_count + 1))
|
|
674
|
+
local current
|
|
675
|
+
current=$(cat "$tmp_findings")
|
|
676
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "insecure_crypto" \
|
|
677
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"major","description":"Weak cryptographic function (consider SHA-256+)"}]' \
|
|
678
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
679
|
+
done <<CRYEOF
|
|
680
|
+
$crypto_matches
|
|
681
|
+
CRYEOF
|
|
682
|
+
fi
|
|
683
|
+
done <<FILESEOF
|
|
684
|
+
$changed_files
|
|
685
|
+
FILESEOF
|
|
686
|
+
|
|
687
|
+
# Write to artifacts and output
|
|
688
|
+
findings=$(cat "$tmp_findings")
|
|
689
|
+
rm -f "$tmp_findings"
|
|
690
|
+
|
|
691
|
+
if [[ -n "${ARTIFACTS_DIR:-}" ]]; then
|
|
692
|
+
local tmp_scan
|
|
693
|
+
tmp_scan=$(mktemp)
|
|
694
|
+
echo "$findings" > "$tmp_scan"
|
|
695
|
+
mv "$tmp_scan" "$ARTIFACTS_DIR/security-source-scan.json"
|
|
696
|
+
fi
|
|
697
|
+
|
|
698
|
+
emit_event "pipeline.security_source_scan" \
|
|
699
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
700
|
+
"findings=$finding_count"
|
|
701
|
+
|
|
702
|
+
echo "$finding_count"
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
706
|
+
# 8. Quality Score Recording
|
|
707
|
+
# Writes quality scores to JSONL for learning.
|
|
708
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
709
|
+
pipeline_record_quality_score() {
|
|
710
|
+
local quality_score="${1:-0}"
|
|
711
|
+
local critical="${2:-0}"
|
|
712
|
+
local major="${3:-0}"
|
|
713
|
+
local minor="${4:-0}"
|
|
714
|
+
local dod_pass_rate="${5:-0}"
|
|
715
|
+
local audits_run="${6:-}"
|
|
716
|
+
|
|
717
|
+
local scores_dir="${HOME}/.shipwright/optimization"
|
|
718
|
+
local scores_file="${scores_dir}/quality-scores.jsonl"
|
|
719
|
+
mkdir -p "$scores_dir"
|
|
720
|
+
|
|
721
|
+
local repo_name
|
|
722
|
+
repo_name=$(basename "${PROJECT_ROOT:-.}") || true
|
|
723
|
+
|
|
724
|
+
local tmp_score
|
|
725
|
+
tmp_score=$(mktemp)
|
|
726
|
+
jq -n \
|
|
727
|
+
--arg repo "$repo_name" \
|
|
728
|
+
--arg issue "${ISSUE_NUMBER:-0}" \
|
|
729
|
+
--arg ts "$(now_iso)" \
|
|
730
|
+
--argjson score "$quality_score" \
|
|
731
|
+
--argjson critical "$critical" \
|
|
732
|
+
--argjson major "$major" \
|
|
733
|
+
--argjson minor "$minor" \
|
|
734
|
+
--argjson dod "$dod_pass_rate" \
|
|
735
|
+
--arg template "${PIPELINE_NAME:-standard}" \
|
|
736
|
+
--arg audits "$audits_run" \
|
|
737
|
+
'{
|
|
738
|
+
repo: $repo,
|
|
739
|
+
issue: ($issue | tonumber),
|
|
740
|
+
timestamp: $ts,
|
|
741
|
+
quality_score: $score,
|
|
742
|
+
findings: {critical: $critical, major: $major, minor: $minor},
|
|
743
|
+
dod_pass_rate: $dod,
|
|
744
|
+
template: $template,
|
|
745
|
+
audits_run: ($audits | split(",") | map(select(. != "")))
|
|
746
|
+
}' > "$tmp_score" 2>/dev/null
|
|
747
|
+
|
|
748
|
+
cat "$tmp_score" >> "$scores_file"
|
|
749
|
+
rm -f "$tmp_score"
|
|
750
|
+
|
|
751
|
+
# Rotate quality scores file to prevent unbounded growth
|
|
752
|
+
type rotate_jsonl &>/dev/null 2>&1 && rotate_jsonl "$scores_file" 5000
|
|
753
|
+
|
|
754
|
+
emit_event "pipeline.quality_score_recorded" \
|
|
755
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
756
|
+
"quality_score=$quality_score" \
|
|
757
|
+
"critical=$critical" \
|
|
758
|
+
"major=$major" \
|
|
759
|
+
"minor=$minor"
|
|
760
|
+
}
|
|
761
|
+
|
|
762
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
763
|
+
# 4. Mid-Pipeline Complexity Re-evaluation
|
|
764
|
+
# After build+test completes, compares actual effort to initial estimate.
|
|
765
|
+
# Updates skip recommendations and model routing for remaining stages.
|
|
766
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
767
|
+
pipeline_reassess_complexity() {
|
|
768
|
+
local initial_complexity="${INTELLIGENCE_COMPLEXITY:-5}"
|
|
769
|
+
local reassessment_file="$ARTIFACTS_DIR/reassessment.json"
|
|
770
|
+
|
|
771
|
+
# ── Gather actual metrics ──
|
|
772
|
+
local files_changed=0 lines_changed=0 first_try_pass=false self_heal_cycles=0
|
|
773
|
+
|
|
774
|
+
files_changed=$(git diff "${BASE_BRANCH:-main}...HEAD" --name-only 2>/dev/null | wc -l | tr -d ' ') || files_changed=0
|
|
775
|
+
files_changed="${files_changed:-0}"
|
|
776
|
+
|
|
777
|
+
# Count lines changed (insertions + deletions) without pipefail issues
|
|
778
|
+
lines_changed=0
|
|
779
|
+
local _diff_stat
|
|
780
|
+
_diff_stat=$(git diff "${BASE_BRANCH:-main}...HEAD" --stat 2>/dev/null | tail -1) || true
|
|
781
|
+
if [[ -n "${_diff_stat:-}" ]]; then
|
|
782
|
+
local _ins _del
|
|
783
|
+
_ins=$(echo "$_diff_stat" | grep -oE '[0-9]+ insertion' | grep -oE '[0-9]+') || true
|
|
784
|
+
_del=$(echo "$_diff_stat" | grep -oE '[0-9]+ deletion' | grep -oE '[0-9]+') || true
|
|
785
|
+
lines_changed=$(( ${_ins:-0} + ${_del:-0} ))
|
|
786
|
+
fi
|
|
787
|
+
|
|
788
|
+
self_heal_cycles="${SELF_HEAL_COUNT:-0}"
|
|
789
|
+
if [[ "$self_heal_cycles" -eq 0 ]]; then
|
|
790
|
+
first_try_pass=true
|
|
791
|
+
fi
|
|
792
|
+
|
|
793
|
+
# ── Compare to expectations ──
|
|
794
|
+
local actual_complexity="$initial_complexity"
|
|
795
|
+
local assessment="as_expected"
|
|
796
|
+
local skip_stages="[]"
|
|
797
|
+
|
|
798
|
+
# Simpler than expected: small diff, tests passed first try
|
|
799
|
+
if [[ "$lines_changed" -lt 50 && "$first_try_pass" == "true" && "$files_changed" -lt 5 ]]; then
|
|
800
|
+
actual_complexity=$((initial_complexity > 2 ? initial_complexity - 2 : 1))
|
|
801
|
+
assessment="simpler_than_expected"
|
|
802
|
+
# Mark compound_quality as skippable, simplify review
|
|
803
|
+
skip_stages='["compound_quality"]'
|
|
804
|
+
# Much simpler
|
|
805
|
+
elif [[ "$lines_changed" -lt 20 && "$first_try_pass" == "true" && "$files_changed" -lt 3 ]]; then
|
|
806
|
+
actual_complexity=1
|
|
807
|
+
assessment="much_simpler"
|
|
808
|
+
skip_stages='["compound_quality","review"]'
|
|
809
|
+
# Harder than expected: large diff, multiple self-heal cycles
|
|
810
|
+
elif [[ "$lines_changed" -gt 500 || "$self_heal_cycles" -gt 2 ]]; then
|
|
811
|
+
actual_complexity=$((initial_complexity < 9 ? initial_complexity + 2 : 10))
|
|
812
|
+
assessment="harder_than_expected"
|
|
813
|
+
# Ensure compound_quality runs, possibly upgrade model
|
|
814
|
+
skip_stages='[]'
|
|
815
|
+
# Much harder
|
|
816
|
+
elif [[ "$lines_changed" -gt 1000 || "$self_heal_cycles" -gt 4 ]]; then
|
|
817
|
+
actual_complexity=10
|
|
818
|
+
assessment="much_harder"
|
|
819
|
+
skip_stages='[]'
|
|
820
|
+
fi
|
|
821
|
+
|
|
822
|
+
# ── Write reassessment ──
|
|
823
|
+
local tmp_reassess
|
|
824
|
+
tmp_reassess="$(mktemp)"
|
|
825
|
+
jq -n \
|
|
826
|
+
--argjson initial "$initial_complexity" \
|
|
827
|
+
--argjson actual "$actual_complexity" \
|
|
828
|
+
--arg assessment "$assessment" \
|
|
829
|
+
--argjson files_changed "$files_changed" \
|
|
830
|
+
--argjson lines_changed "$lines_changed" \
|
|
831
|
+
--argjson self_heal_cycles "$self_heal_cycles" \
|
|
832
|
+
--argjson first_try "$first_try_pass" \
|
|
833
|
+
--argjson skip_stages "$skip_stages" \
|
|
834
|
+
'{
|
|
835
|
+
initial_complexity: $initial,
|
|
836
|
+
actual_complexity: $actual,
|
|
837
|
+
assessment: $assessment,
|
|
838
|
+
files_changed: $files_changed,
|
|
839
|
+
lines_changed: $lines_changed,
|
|
840
|
+
self_heal_cycles: $self_heal_cycles,
|
|
841
|
+
first_try_pass: $first_try,
|
|
842
|
+
skip_stages: $skip_stages
|
|
843
|
+
}' > "$tmp_reassess" 2>/dev/null && mv "$tmp_reassess" "$reassessment_file" || rm -f "$tmp_reassess"
|
|
844
|
+
|
|
845
|
+
# Update global complexity for downstream stages
|
|
846
|
+
PIPELINE_ADAPTIVE_COMPLEXITY="$actual_complexity"
|
|
847
|
+
|
|
848
|
+
emit_event "intelligence.reassessment" \
|
|
849
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
850
|
+
"initial=$initial_complexity" \
|
|
851
|
+
"actual=$actual_complexity" \
|
|
852
|
+
"assessment=$assessment" \
|
|
853
|
+
"files=$files_changed" \
|
|
854
|
+
"lines=$lines_changed" \
|
|
855
|
+
"self_heals=$self_heal_cycles"
|
|
856
|
+
|
|
857
|
+
# ── Store for learning ──
|
|
858
|
+
local learning_file="${HOME}/.shipwright/optimization/complexity-actuals.jsonl"
|
|
859
|
+
mkdir -p "${HOME}/.shipwright/optimization" 2>/dev/null || true
|
|
860
|
+
echo "{\"issue\":\"${ISSUE_NUMBER:-0}\",\"initial\":$initial_complexity,\"actual\":$actual_complexity,\"files\":$files_changed,\"lines\":$lines_changed,\"ts\":\"$(now_iso)\"}" \
|
|
861
|
+
>> "$learning_file" 2>/dev/null || true
|
|
862
|
+
|
|
863
|
+
echo "$assessment"
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
867
|
+
# 5. Backtracking Support
|
|
868
|
+
# When compound_quality detects architecture-level problems, backtracks to
|
|
869
|
+
# the design stage instead of just feeding findings to the build loop.
|
|
870
|
+
# Limited to 1 backtrack per pipeline run to prevent infinite loops.
|
|
871
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
872
|
+
pipeline_backtrack_to_stage() {
|
|
873
|
+
local target_stage="$1"
|
|
874
|
+
local reason="${2:-architecture_violation}"
|
|
875
|
+
|
|
876
|
+
# Prevent infinite backtracking
|
|
877
|
+
if [[ "$PIPELINE_BACKTRACK_COUNT" -ge "$PIPELINE_MAX_BACKTRACKS" ]]; then
|
|
878
|
+
warn "Max backtracks ($PIPELINE_MAX_BACKTRACKS) reached — cannot backtrack to $target_stage"
|
|
879
|
+
emit_event "intelligence.backtrack_blocked" \
|
|
880
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
881
|
+
"target=$target_stage" \
|
|
882
|
+
"reason=max_backtracks_reached" \
|
|
883
|
+
"count=$PIPELINE_BACKTRACK_COUNT"
|
|
884
|
+
return 1
|
|
885
|
+
fi
|
|
886
|
+
|
|
887
|
+
PIPELINE_BACKTRACK_COUNT=$((PIPELINE_BACKTRACK_COUNT + 1))
|
|
888
|
+
|
|
889
|
+
info "Backtracking to ${BOLD}${target_stage}${RESET} stage (reason: ${reason})"
|
|
890
|
+
|
|
891
|
+
emit_event "intelligence.backtrack" \
|
|
892
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
893
|
+
"target=$target_stage" \
|
|
894
|
+
"reason=$reason"
|
|
895
|
+
|
|
896
|
+
# Gather architecture context from findings
|
|
897
|
+
local arch_context=""
|
|
898
|
+
if [[ -f "$ARTIFACTS_DIR/compound-architecture-validation.json" ]]; then
|
|
899
|
+
arch_context=$(jq -r '[.[] | select(.severity == "critical" or .severity == "high") | .message // .description // ""] | join("\n")' \
|
|
900
|
+
"$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || true)
|
|
901
|
+
fi
|
|
902
|
+
if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
|
|
903
|
+
local arch_lines
|
|
904
|
+
arch_lines=$(grep -iE 'architect|layer.*violation|circular.*depend|coupling|design.*flaw' \
|
|
905
|
+
"$ARTIFACTS_DIR/adversarial-review.md" 2>/dev/null || true)
|
|
906
|
+
if [[ -n "$arch_lines" ]]; then
|
|
907
|
+
arch_context="${arch_context}
|
|
908
|
+
${arch_lines}"
|
|
909
|
+
fi
|
|
910
|
+
fi
|
|
911
|
+
|
|
912
|
+
# Reset stages from target onward
|
|
913
|
+
set_stage_status "$target_stage" "pending"
|
|
914
|
+
set_stage_status "build" "pending"
|
|
915
|
+
set_stage_status "test" "pending"
|
|
916
|
+
|
|
917
|
+
# Augment goal with architecture context for re-run
|
|
918
|
+
local original_goal="$GOAL"
|
|
919
|
+
if [[ -n "$arch_context" ]]; then
|
|
920
|
+
GOAL="$GOAL
|
|
921
|
+
|
|
922
|
+
IMPORTANT — Architecture violations were detected during quality review. Redesign to fix:
|
|
923
|
+
$arch_context
|
|
924
|
+
|
|
925
|
+
Update the design to address these violations, then rebuild."
|
|
926
|
+
fi
|
|
927
|
+
|
|
928
|
+
# Re-run design stage
|
|
929
|
+
info "Re-running ${BOLD}${target_stage}${RESET} with architecture context..."
|
|
930
|
+
if "stage_${target_stage}" 2>/dev/null; then
|
|
931
|
+
mark_stage_complete "$target_stage"
|
|
932
|
+
success "Backtrack: ${target_stage} re-run complete"
|
|
933
|
+
else
|
|
934
|
+
GOAL="$original_goal"
|
|
935
|
+
error "Backtrack: ${target_stage} re-run failed"
|
|
936
|
+
return 1
|
|
937
|
+
fi
|
|
938
|
+
|
|
939
|
+
# Re-run build+test
|
|
940
|
+
info "Re-running build→test after backtracked ${target_stage}..."
|
|
941
|
+
if self_healing_build_test; then
|
|
942
|
+
success "Backtrack: build→test passed after ${target_stage} redesign"
|
|
943
|
+
GOAL="$original_goal"
|
|
944
|
+
return 0
|
|
945
|
+
else
|
|
946
|
+
GOAL="$original_goal"
|
|
947
|
+
error "Backtrack: build→test failed after ${target_stage} redesign"
|
|
948
|
+
return 1
|
|
949
|
+
fi
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
compound_rebuild_with_feedback() {
|
|
953
|
+
local feedback_file="$ARTIFACTS_DIR/quality-feedback.md"
|
|
954
|
+
|
|
955
|
+
# ── Intelligence: classify findings and determine routing ──
|
|
956
|
+
local route="correctness"
|
|
957
|
+
route=$(classify_quality_findings 2>/dev/null) || route="correctness"
|
|
958
|
+
|
|
959
|
+
# ── Build structured findings JSON alongside markdown ──
|
|
960
|
+
local structured_findings="[]"
|
|
961
|
+
local s_total_critical=0 s_total_major=0 s_total_minor=0
|
|
962
|
+
|
|
963
|
+
if [[ -f "$ARTIFACTS_DIR/classified-findings.json" ]]; then
|
|
964
|
+
s_total_critical=$(jq -r '.security // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
|
|
965
|
+
s_total_major=$(jq -r '.correctness // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
|
|
966
|
+
s_total_minor=$(jq -r '.style // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
|
|
967
|
+
fi
|
|
968
|
+
|
|
969
|
+
local tmp_qf
|
|
970
|
+
tmp_qf="$(mktemp)"
|
|
971
|
+
jq -n \
|
|
972
|
+
--arg route "$route" \
|
|
973
|
+
--argjson total_critical "$s_total_critical" \
|
|
974
|
+
--argjson total_major "$s_total_major" \
|
|
975
|
+
--argjson total_minor "$s_total_minor" \
|
|
976
|
+
'{route: $route, total_critical: $total_critical, total_major: $total_major, total_minor: $total_minor}' \
|
|
977
|
+
> "$tmp_qf" 2>/dev/null && mv "$tmp_qf" "$ARTIFACTS_DIR/quality-findings.json" || rm -f "$tmp_qf"
|
|
978
|
+
|
|
979
|
+
# ── Architecture route: backtrack to design instead of rebuild ──
|
|
980
|
+
if [[ "$route" == "architecture" ]]; then
|
|
981
|
+
info "Architecture-level findings detected — attempting backtrack to design"
|
|
982
|
+
if pipeline_backtrack_to_stage "design" "architecture_violation" 2>/dev/null; then
|
|
983
|
+
return 0
|
|
984
|
+
fi
|
|
985
|
+
# Backtrack failed or already used — fall through to standard rebuild
|
|
986
|
+
warn "Backtrack unavailable — falling through to standard rebuild"
|
|
987
|
+
fi
|
|
988
|
+
|
|
989
|
+
# Collect all findings (prioritized by classification)
|
|
990
|
+
{
|
|
991
|
+
echo "# Quality Feedback — Issues to Fix"
|
|
992
|
+
echo ""
|
|
993
|
+
|
|
994
|
+
# Security findings first (highest priority)
|
|
995
|
+
if [[ "$route" == "security" || -f "$ARTIFACTS_DIR/security-audit.log" ]] && grep -qiE 'critical|high' "$ARTIFACTS_DIR/security-audit.log" 2>/dev/null; then
|
|
996
|
+
echo "## 🔴 PRIORITY: Security Findings (fix these first)"
|
|
997
|
+
cat "$ARTIFACTS_DIR/security-audit.log"
|
|
998
|
+
echo ""
|
|
999
|
+
echo "Security issues MUST be resolved before any other changes."
|
|
1000
|
+
echo ""
|
|
1001
|
+
fi
|
|
1002
|
+
|
|
1003
|
+
# Correctness findings
|
|
1004
|
+
if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
|
|
1005
|
+
echo "## Adversarial Review Findings"
|
|
1006
|
+
cat "$ARTIFACTS_DIR/adversarial-review.md"
|
|
1007
|
+
echo ""
|
|
1008
|
+
fi
|
|
1009
|
+
if [[ -f "$ARTIFACTS_DIR/negative-review.md" ]]; then
|
|
1010
|
+
echo "## Negative Prompting Concerns"
|
|
1011
|
+
cat "$ARTIFACTS_DIR/negative-review.md"
|
|
1012
|
+
echo ""
|
|
1013
|
+
fi
|
|
1014
|
+
if [[ -f "$ARTIFACTS_DIR/dod-audit.md" ]]; then
|
|
1015
|
+
echo "## DoD Audit Failures"
|
|
1016
|
+
grep "❌" "$ARTIFACTS_DIR/dod-audit.md" 2>/dev/null || true
|
|
1017
|
+
echo ""
|
|
1018
|
+
fi
|
|
1019
|
+
if [[ -f "$ARTIFACTS_DIR/api-compat.log" ]] && grep -qi 'BREAKING' "$ARTIFACTS_DIR/api-compat.log" 2>/dev/null; then
|
|
1020
|
+
echo "## API Breaking Changes"
|
|
1021
|
+
cat "$ARTIFACTS_DIR/api-compat.log"
|
|
1022
|
+
echo ""
|
|
1023
|
+
fi
|
|
1024
|
+
|
|
1025
|
+
# Style findings last (deprioritized, informational)
|
|
1026
|
+
if [[ -f "$ARTIFACTS_DIR/classified-findings.json" ]]; then
|
|
1027
|
+
local style_count
|
|
1028
|
+
style_count=$(jq -r '.style // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
|
|
1029
|
+
if [[ "$style_count" -gt 0 ]]; then
|
|
1030
|
+
echo "## Style Notes (non-blocking, address if time permits)"
|
|
1031
|
+
echo "${style_count} style suggestions found. These do not block the build."
|
|
1032
|
+
echo ""
|
|
1033
|
+
fi
|
|
1034
|
+
fi
|
|
1035
|
+
} > "$feedback_file"
|
|
1036
|
+
|
|
1037
|
+
# Validate feedback file has actual content
|
|
1038
|
+
if [[ ! -s "$feedback_file" ]]; then
|
|
1039
|
+
warn "No quality feedback collected — skipping rebuild"
|
|
1040
|
+
return 1
|
|
1041
|
+
fi
|
|
1042
|
+
|
|
1043
|
+
# Reset build/test stages
|
|
1044
|
+
set_stage_status "build" "pending"
|
|
1045
|
+
set_stage_status "test" "pending"
|
|
1046
|
+
set_stage_status "review" "pending"
|
|
1047
|
+
|
|
1048
|
+
# Augment GOAL with quality feedback (route-specific instructions)
|
|
1049
|
+
local original_goal="$GOAL"
|
|
1050
|
+
local feedback_content
|
|
1051
|
+
feedback_content=$(cat "$feedback_file")
|
|
1052
|
+
|
|
1053
|
+
local route_instruction=""
|
|
1054
|
+
case "$route" in
|
|
1055
|
+
security)
|
|
1056
|
+
route_instruction="SECURITY PRIORITY: Fix all security vulnerabilities FIRST, then address other issues. Security issues are BLOCKING."
|
|
1057
|
+
;;
|
|
1058
|
+
performance)
|
|
1059
|
+
route_instruction="PERFORMANCE PRIORITY: Address performance regressions and optimizations. Check for N+1 queries, memory leaks, and algorithmic complexity."
|
|
1060
|
+
;;
|
|
1061
|
+
testing)
|
|
1062
|
+
route_instruction="TESTING PRIORITY: Add missing test coverage and fix flaky tests before addressing other issues."
|
|
1063
|
+
;;
|
|
1064
|
+
correctness)
|
|
1065
|
+
route_instruction="Fix every issue listed above while keeping all existing functionality working."
|
|
1066
|
+
;;
|
|
1067
|
+
architecture)
|
|
1068
|
+
route_instruction="ARCHITECTURE: Fix structural issues. Check dependency direction, layer boundaries, and separation of concerns."
|
|
1069
|
+
;;
|
|
1070
|
+
*)
|
|
1071
|
+
route_instruction="Fix every issue listed above while keeping all existing functionality working."
|
|
1072
|
+
;;
|
|
1073
|
+
esac
|
|
1074
|
+
|
|
1075
|
+
GOAL="$GOAL
|
|
1076
|
+
|
|
1077
|
+
IMPORTANT — Compound quality review found issues (route: ${route}). Fix ALL of these:
|
|
1078
|
+
$feedback_content
|
|
1079
|
+
|
|
1080
|
+
${route_instruction}"
|
|
1081
|
+
|
|
1082
|
+
# Re-run self-healing build→test
|
|
1083
|
+
info "Rebuilding with quality feedback (route: ${route})..."
|
|
1084
|
+
if self_healing_build_test; then
|
|
1085
|
+
GOAL="$original_goal"
|
|
1086
|
+
return 0
|
|
1087
|
+
else
|
|
1088
|
+
GOAL="$original_goal"
|
|
1089
|
+
return 1
|
|
1090
|
+
fi
|
|
1091
|
+
}
|
|
1092
|
+
|
|
1093
|
+
stage_compound_quality() {
|
|
1094
|
+
CURRENT_STAGE_ID="compound_quality"
|
|
1095
|
+
|
|
1096
|
+
# Pre-check: verify meaningful changes exist before running expensive quality checks
|
|
1097
|
+
local _cq_real_changes
|
|
1098
|
+
_cq_real_changes=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" \
|
|
1099
|
+
-- . ':!.claude/loop-state.md' ':!.claude/pipeline-state.md' \
|
|
1100
|
+
':!.claude/pipeline-artifacts/*' ':!**/progress.md' \
|
|
1101
|
+
':!**/error-summary.json' 2>/dev/null | wc -l | xargs || echo "0")
|
|
1102
|
+
if [[ "${_cq_real_changes:-0}" -eq 0 ]]; then
|
|
1103
|
+
error "Compound quality: no meaningful code changes found — failing quality gate"
|
|
1104
|
+
return 1
|
|
1105
|
+
fi
|
|
1106
|
+
|
|
1107
|
+
# Read config
|
|
1108
|
+
local max_cycles adversarial_enabled negative_enabled e2e_enabled dod_enabled strict_quality
|
|
1109
|
+
max_cycles=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.max_cycles) // 3' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1110
|
+
[[ -z "$max_cycles" || "$max_cycles" == "null" ]] && max_cycles=3
|
|
1111
|
+
adversarial_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.adversarial) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1112
|
+
negative_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.negative) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1113
|
+
e2e_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.e2e) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1114
|
+
dod_enabled=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.dod_audit) // true' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1115
|
+
strict_quality=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.strict_quality) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1116
|
+
[[ -z "$strict_quality" || "$strict_quality" == "null" ]] && strict_quality="false"
|
|
1117
|
+
|
|
1118
|
+
# Intelligent audit selection
|
|
1119
|
+
local audit_plan='{"adversarial":"targeted","architecture":"targeted","simulation":"targeted","security":"targeted","dod":"targeted"}'
|
|
1120
|
+
if type pipeline_select_audits &>/dev/null 2>&1; then
|
|
1121
|
+
local _selected
|
|
1122
|
+
_selected=$(pipeline_select_audits 2>/dev/null) || true
|
|
1123
|
+
if [[ -n "$_selected" && "$_selected" != "null" ]]; then
|
|
1124
|
+
audit_plan="$_selected"
|
|
1125
|
+
info "Audit plan: $(echo "$audit_plan" | jq -c '.' 2>/dev/null || echo "$audit_plan")"
|
|
1126
|
+
fi
|
|
1127
|
+
fi
|
|
1128
|
+
|
|
1129
|
+
# Track findings for quality score
|
|
1130
|
+
local total_critical=0 total_major=0 total_minor=0
|
|
1131
|
+
local audits_run_list=""
|
|
1132
|
+
|
|
1133
|
+
# ── HARDENED QUALITY GATES (RUN BEFORE CYCLES) ──
|
|
1134
|
+
# These checks must pass before we even start the audit cycles
|
|
1135
|
+
echo ""
|
|
1136
|
+
info "Running hardened quality gate checks..."
|
|
1137
|
+
|
|
1138
|
+
# 1. Bash 3.2 compatibility check
|
|
1139
|
+
local bash_violations=0
|
|
1140
|
+
bash_violations=$(run_bash_compat_check 2>/dev/null) || bash_violations=0
|
|
1141
|
+
bash_violations="${bash_violations:-0}"
|
|
1142
|
+
|
|
1143
|
+
if [[ "$strict_quality" == "true" && "$bash_violations" -gt 0 ]]; then
|
|
1144
|
+
error "STRICT QUALITY: Bash 3.2 incompatibilities found — blocking"
|
|
1145
|
+
emit_event "quality.bash_compat_failed" \
|
|
1146
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1147
|
+
"violations=$bash_violations"
|
|
1148
|
+
return 1
|
|
1149
|
+
fi
|
|
1150
|
+
|
|
1151
|
+
if [[ "$bash_violations" -gt 0 ]]; then
|
|
1152
|
+
warn "Bash 3.2 incompatibilities detected: ${bash_violations} (will impact quality score)"
|
|
1153
|
+
total_minor=$((total_minor + bash_violations))
|
|
1154
|
+
else
|
|
1155
|
+
success "Bash 3.2 compatibility: clean"
|
|
1156
|
+
fi
|
|
1157
|
+
|
|
1158
|
+
# 2. Test coverage check
|
|
1159
|
+
local coverage_pct=0
|
|
1160
|
+
coverage_pct=$(run_test_coverage_check 2>/dev/null) || coverage_pct=0
|
|
1161
|
+
coverage_pct="${coverage_pct:-0}"
|
|
1162
|
+
|
|
1163
|
+
if [[ "$coverage_pct" != "skip" ]]; then
|
|
1164
|
+
if [[ "$coverage_pct" -lt "${PIPELINE_COVERAGE_THRESHOLD:-60}" ]]; then
|
|
1165
|
+
if [[ "$strict_quality" == "true" ]]; then
|
|
1166
|
+
error "STRICT QUALITY: Test coverage below ${PIPELINE_COVERAGE_THRESHOLD:-60}% (${coverage_pct}%) — blocking"
|
|
1167
|
+
emit_event "quality.coverage_failed" \
|
|
1168
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1169
|
+
"coverage=$coverage_pct"
|
|
1170
|
+
return 1
|
|
1171
|
+
else
|
|
1172
|
+
warn "Test coverage below ${PIPELINE_COVERAGE_THRESHOLD:-60}% threshold (${coverage_pct}%) — quality penalty applied"
|
|
1173
|
+
total_major=$((total_major + 2))
|
|
1174
|
+
fi
|
|
1175
|
+
fi
|
|
1176
|
+
fi
|
|
1177
|
+
|
|
1178
|
+
# 3. New functions without tests check
|
|
1179
|
+
local untested_functions=0
|
|
1180
|
+
untested_functions=$(run_new_function_test_check 2>/dev/null) || untested_functions=0
|
|
1181
|
+
untested_functions="${untested_functions:-0}"
|
|
1182
|
+
|
|
1183
|
+
if [[ "$untested_functions" -gt 0 ]]; then
|
|
1184
|
+
if [[ "$strict_quality" == "true" ]]; then
|
|
1185
|
+
error "STRICT QUALITY: ${untested_functions} new function(s) without tests — blocking"
|
|
1186
|
+
emit_event "quality.untested_functions" \
|
|
1187
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1188
|
+
"count=$untested_functions"
|
|
1189
|
+
return 1
|
|
1190
|
+
else
|
|
1191
|
+
warn "New functions without corresponding tests: ${untested_functions}"
|
|
1192
|
+
total_major=$((total_major + untested_functions))
|
|
1193
|
+
fi
|
|
1194
|
+
fi
|
|
1195
|
+
|
|
1196
|
+
# 4. Atomic write violations (optional, informational in most modes)
|
|
1197
|
+
local atomic_violations=0
|
|
1198
|
+
atomic_violations=$(run_atomic_write_check 2>/dev/null) || atomic_violations=0
|
|
1199
|
+
atomic_violations="${atomic_violations:-0}"
|
|
1200
|
+
|
|
1201
|
+
if [[ "$atomic_violations" -gt 0 ]]; then
|
|
1202
|
+
warn "Atomic write violations: ${atomic_violations} (state/config file patterns)"
|
|
1203
|
+
total_minor=$((total_minor + atomic_violations))
|
|
1204
|
+
fi
|
|
1205
|
+
|
|
1206
|
+
# Vitals-driven adaptive cycle limit (preferred)
|
|
1207
|
+
local base_max_cycles="$max_cycles"
|
|
1208
|
+
if type pipeline_adaptive_limit &>/dev/null 2>&1; then
|
|
1209
|
+
local _cq_vitals=""
|
|
1210
|
+
if type pipeline_compute_vitals &>/dev/null 2>&1; then
|
|
1211
|
+
_cq_vitals=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
|
|
1212
|
+
fi
|
|
1213
|
+
local vitals_cq_limit
|
|
1214
|
+
vitals_cq_limit=$(pipeline_adaptive_limit "compound_quality" "$_cq_vitals" 2>/dev/null) || true
|
|
1215
|
+
if [[ -n "$vitals_cq_limit" && "$vitals_cq_limit" =~ ^[0-9]+$ && "$vitals_cq_limit" -gt 0 ]]; then
|
|
1216
|
+
max_cycles="$vitals_cq_limit"
|
|
1217
|
+
if [[ "$max_cycles" != "$base_max_cycles" ]]; then
|
|
1218
|
+
info "Vitals-driven cycles: ${base_max_cycles} → ${max_cycles} (compound_quality)"
|
|
1219
|
+
fi
|
|
1220
|
+
fi
|
|
1221
|
+
else
|
|
1222
|
+
# Fallback: adaptive cycle limits from optimization data
|
|
1223
|
+
local _cq_iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
|
|
1224
|
+
if [[ -f "$_cq_iter_model" ]]; then
|
|
1225
|
+
local adaptive_limit
|
|
1226
|
+
adaptive_limit=$(pipeline_adaptive_cycles "$max_cycles" "compound_quality" "0" "-1" 2>/dev/null) || true
|
|
1227
|
+
if [[ -n "$adaptive_limit" && "$adaptive_limit" =~ ^[0-9]+$ && "$adaptive_limit" -gt 0 ]]; then
|
|
1228
|
+
max_cycles="$adaptive_limit"
|
|
1229
|
+
if [[ "$max_cycles" != "$base_max_cycles" ]]; then
|
|
1230
|
+
info "Adaptive cycles: ${base_max_cycles} → ${max_cycles} (compound_quality)"
|
|
1231
|
+
fi
|
|
1232
|
+
fi
|
|
1233
|
+
fi
|
|
1234
|
+
fi
|
|
1235
|
+
|
|
1236
|
+
# Convergence tracking
|
|
1237
|
+
local prev_issue_count=-1
|
|
1238
|
+
|
|
1239
|
+
local cycle=0
|
|
1240
|
+
while [[ "$cycle" -lt "$max_cycles" ]]; do
|
|
1241
|
+
cycle=$((cycle + 1))
|
|
1242
|
+
local all_passed=true
|
|
1243
|
+
|
|
1244
|
+
echo ""
|
|
1245
|
+
echo -e "${PURPLE}${BOLD}━━━ Compound Quality — Cycle ${cycle}/${max_cycles} ━━━${RESET}"
|
|
1246
|
+
|
|
1247
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1248
|
+
gh_comment_issue "$ISSUE_NUMBER" "🔬 **Compound quality** — cycle ${cycle}/${max_cycles}" 2>/dev/null || true
|
|
1249
|
+
fi
|
|
1250
|
+
|
|
1251
|
+
# 1. Adversarial Review
|
|
1252
|
+
local _adv_intensity
|
|
1253
|
+
_adv_intensity=$(echo "$audit_plan" | jq -r '.adversarial // "targeted"' 2>/dev/null || echo "targeted")
|
|
1254
|
+
if [[ "$adversarial_enabled" == "true" && "$_adv_intensity" != "off" ]]; then
|
|
1255
|
+
echo ""
|
|
1256
|
+
info "Running adversarial review (${_adv_intensity})..."
|
|
1257
|
+
audits_run_list="${audits_run_list:+${audits_run_list},}adversarial"
|
|
1258
|
+
if ! run_adversarial_review; then
|
|
1259
|
+
all_passed=false
|
|
1260
|
+
fi
|
|
1261
|
+
fi
|
|
1262
|
+
|
|
1263
|
+
# 2. Negative Prompting
|
|
1264
|
+
if [[ "$negative_enabled" == "true" ]]; then
|
|
1265
|
+
echo ""
|
|
1266
|
+
info "Running negative prompting..."
|
|
1267
|
+
if ! run_negative_prompting; then
|
|
1268
|
+
all_passed=false
|
|
1269
|
+
fi
|
|
1270
|
+
fi
|
|
1271
|
+
|
|
1272
|
+
# 3. Developer Simulation (intelligence module)
|
|
1273
|
+
if type simulation_review &>/dev/null 2>&1; then
|
|
1274
|
+
local sim_enabled
|
|
1275
|
+
sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
|
|
1276
|
+
local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
|
|
1277
|
+
if [[ "$sim_enabled" != "true" && -f "$daemon_cfg" ]]; then
|
|
1278
|
+
sim_enabled=$(jq -r '.intelligence.simulation_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
|
|
1279
|
+
fi
|
|
1280
|
+
if [[ "$sim_enabled" == "true" ]]; then
|
|
1281
|
+
echo ""
|
|
1282
|
+
info "Running developer simulation review..."
|
|
1283
|
+
local sim_diff
|
|
1284
|
+
sim_diff=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
|
|
1285
|
+
if [[ -n "$sim_diff" ]]; then
|
|
1286
|
+
local sim_result
|
|
1287
|
+
sim_result=$(simulation_review "$sim_diff" "${GOAL:-}" 2>/dev/null || echo "[]")
|
|
1288
|
+
if [[ -n "$sim_result" && "$sim_result" != "[]" && "$sim_result" != *'"error"'* ]]; then
|
|
1289
|
+
echo "$sim_result" > "$ARTIFACTS_DIR/compound-simulation-review.json"
|
|
1290
|
+
local sim_critical
|
|
1291
|
+
sim_critical=$(echo "$sim_result" | jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' 2>/dev/null || echo "0")
|
|
1292
|
+
local sim_total
|
|
1293
|
+
sim_total=$(echo "$sim_result" | jq 'length' 2>/dev/null || echo "0")
|
|
1294
|
+
if [[ "$sim_critical" -gt 0 ]]; then
|
|
1295
|
+
warn "Developer simulation: ${sim_critical} critical/high concerns (${sim_total} total)"
|
|
1296
|
+
all_passed=false
|
|
1297
|
+
else
|
|
1298
|
+
success "Developer simulation: ${sim_total} concerns (none critical/high)"
|
|
1299
|
+
fi
|
|
1300
|
+
emit_event "compound.simulation" \
|
|
1301
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1302
|
+
"cycle=$cycle" \
|
|
1303
|
+
"total=$sim_total" \
|
|
1304
|
+
"critical=$sim_critical"
|
|
1305
|
+
else
|
|
1306
|
+
success "Developer simulation: no concerns"
|
|
1307
|
+
fi
|
|
1308
|
+
fi
|
|
1309
|
+
fi
|
|
1310
|
+
fi
|
|
1311
|
+
|
|
1312
|
+
# 4. Architecture Enforcer (intelligence module)
|
|
1313
|
+
if type architecture_validate_changes &>/dev/null 2>&1; then
|
|
1314
|
+
local arch_enabled
|
|
1315
|
+
arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$PIPELINE_CONFIG" 2>/dev/null || echo "false")
|
|
1316
|
+
local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
|
|
1317
|
+
if [[ "$arch_enabled" != "true" && -f "$daemon_cfg" ]]; then
|
|
1318
|
+
arch_enabled=$(jq -r '.intelligence.architecture_enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
|
|
1319
|
+
fi
|
|
1320
|
+
if [[ "$arch_enabled" == "true" ]]; then
|
|
1321
|
+
echo ""
|
|
1322
|
+
info "Running architecture validation..."
|
|
1323
|
+
local arch_diff
|
|
1324
|
+
arch_diff=$(git diff "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
|
|
1325
|
+
if [[ -n "$arch_diff" ]]; then
|
|
1326
|
+
local arch_result
|
|
1327
|
+
arch_result=$(architecture_validate_changes "$arch_diff" "" 2>/dev/null || echo "[]")
|
|
1328
|
+
if [[ -n "$arch_result" && "$arch_result" != "[]" && "$arch_result" != *'"error"'* ]]; then
|
|
1329
|
+
echo "$arch_result" > "$ARTIFACTS_DIR/compound-architecture-validation.json"
|
|
1330
|
+
local arch_violations
|
|
1331
|
+
arch_violations=$(echo "$arch_result" | jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' 2>/dev/null || echo "0")
|
|
1332
|
+
local arch_total
|
|
1333
|
+
arch_total=$(echo "$arch_result" | jq 'length' 2>/dev/null || echo "0")
|
|
1334
|
+
if [[ "$arch_violations" -gt 0 ]]; then
|
|
1335
|
+
warn "Architecture validation: ${arch_violations} critical/high violations (${arch_total} total)"
|
|
1336
|
+
all_passed=false
|
|
1337
|
+
else
|
|
1338
|
+
success "Architecture validation: ${arch_total} violations (none critical/high)"
|
|
1339
|
+
fi
|
|
1340
|
+
emit_event "compound.architecture" \
|
|
1341
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1342
|
+
"cycle=$cycle" \
|
|
1343
|
+
"total=$arch_total" \
|
|
1344
|
+
"violations=$arch_violations"
|
|
1345
|
+
else
|
|
1346
|
+
success "Architecture validation: no violations"
|
|
1347
|
+
fi
|
|
1348
|
+
fi
|
|
1349
|
+
fi
|
|
1350
|
+
fi
|
|
1351
|
+
|
|
1352
|
+
# 5. E2E Validation
|
|
1353
|
+
if [[ "$e2e_enabled" == "true" ]]; then
|
|
1354
|
+
echo ""
|
|
1355
|
+
info "Running E2E validation..."
|
|
1356
|
+
if ! run_e2e_validation; then
|
|
1357
|
+
all_passed=false
|
|
1358
|
+
fi
|
|
1359
|
+
fi
|
|
1360
|
+
|
|
1361
|
+
# 6. DoD Audit
|
|
1362
|
+
local _dod_intensity
|
|
1363
|
+
_dod_intensity=$(echo "$audit_plan" | jq -r '.dod // "targeted"' 2>/dev/null || echo "targeted")
|
|
1364
|
+
if [[ "$dod_enabled" == "true" && "$_dod_intensity" != "off" ]]; then
|
|
1365
|
+
echo ""
|
|
1366
|
+
info "Running Definition of Done audit (${_dod_intensity})..."
|
|
1367
|
+
audits_run_list="${audits_run_list:+${audits_run_list},}dod"
|
|
1368
|
+
if ! run_dod_audit; then
|
|
1369
|
+
all_passed=false
|
|
1370
|
+
fi
|
|
1371
|
+
fi
|
|
1372
|
+
|
|
1373
|
+
# 6b. Security Source Scan
|
|
1374
|
+
local _sec_intensity
|
|
1375
|
+
_sec_intensity=$(echo "$audit_plan" | jq -r '.security // "targeted"' 2>/dev/null || echo "targeted")
|
|
1376
|
+
if [[ "$_sec_intensity" != "off" ]]; then
|
|
1377
|
+
echo ""
|
|
1378
|
+
info "Running security source scan (${_sec_intensity})..."
|
|
1379
|
+
audits_run_list="${audits_run_list:+${audits_run_list},}security"
|
|
1380
|
+
local sec_finding_count=0
|
|
1381
|
+
sec_finding_count=$(pipeline_security_source_scan 2>/dev/null) || true
|
|
1382
|
+
sec_finding_count="${sec_finding_count:-0}"
|
|
1383
|
+
if [[ "$sec_finding_count" -gt 0 ]]; then
|
|
1384
|
+
warn "Security source scan: ${sec_finding_count} finding(s)"
|
|
1385
|
+
total_critical=$((total_critical + sec_finding_count))
|
|
1386
|
+
all_passed=false
|
|
1387
|
+
else
|
|
1388
|
+
success "Security source scan: clean"
|
|
1389
|
+
fi
|
|
1390
|
+
fi
|
|
1391
|
+
|
|
1392
|
+
# 7. Multi-dimensional quality checks
|
|
1393
|
+
echo ""
|
|
1394
|
+
info "Running multi-dimensional quality checks..."
|
|
1395
|
+
local quality_failures=0
|
|
1396
|
+
|
|
1397
|
+
if ! quality_check_security; then
|
|
1398
|
+
quality_failures=$((quality_failures + 1))
|
|
1399
|
+
fi
|
|
1400
|
+
if ! quality_check_coverage; then
|
|
1401
|
+
quality_failures=$((quality_failures + 1))
|
|
1402
|
+
fi
|
|
1403
|
+
if ! quality_check_perf_regression; then
|
|
1404
|
+
quality_failures=$((quality_failures + 1))
|
|
1405
|
+
fi
|
|
1406
|
+
if ! quality_check_bundle_size; then
|
|
1407
|
+
quality_failures=$((quality_failures + 1))
|
|
1408
|
+
fi
|
|
1409
|
+
if ! quality_check_api_compat; then
|
|
1410
|
+
quality_failures=$((quality_failures + 1))
|
|
1411
|
+
fi
|
|
1412
|
+
|
|
1413
|
+
if [[ "$quality_failures" -gt 0 ]]; then
|
|
1414
|
+
if [[ "$strict_quality" == "true" ]]; then
|
|
1415
|
+
warn "Multi-dimensional quality: ${quality_failures} check(s) failed (strict mode — blocking)"
|
|
1416
|
+
all_passed=false
|
|
1417
|
+
else
|
|
1418
|
+
warn "Multi-dimensional quality: ${quality_failures} check(s) failed (non-blocking)"
|
|
1419
|
+
fi
|
|
1420
|
+
else
|
|
1421
|
+
success "Multi-dimensional quality: all checks passed"
|
|
1422
|
+
fi
|
|
1423
|
+
|
|
1424
|
+
# ── Convergence Detection ──
|
|
1425
|
+
# Count critical/high issues from all review artifacts
|
|
1426
|
+
local current_issue_count=0
|
|
1427
|
+
if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
|
|
1428
|
+
local adv_issues
|
|
1429
|
+
adv_issues=$(grep -ciE '\*\*\[?(Critical|Bug|critical|high)\]?\*\*' "$ARTIFACTS_DIR/adversarial-review.md" 2>/dev/null || true)
|
|
1430
|
+
current_issue_count=$((current_issue_count + ${adv_issues:-0}))
|
|
1431
|
+
fi
|
|
1432
|
+
if [[ -f "$ARTIFACTS_DIR/adversarial-review.json" ]]; then
|
|
1433
|
+
local adv_json_issues
|
|
1434
|
+
adv_json_issues=$(jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
|
|
1435
|
+
current_issue_count=$((current_issue_count + ${adv_json_issues:-0}))
|
|
1436
|
+
fi
|
|
1437
|
+
if [[ -f "$ARTIFACTS_DIR/negative-review.md" ]]; then
|
|
1438
|
+
local neg_issues
|
|
1439
|
+
neg_issues=$(grep -ciE '\[Critical\]' "$ARTIFACTS_DIR/negative-review.md" 2>/dev/null || true)
|
|
1440
|
+
current_issue_count=$((current_issue_count + ${neg_issues:-0}))
|
|
1441
|
+
fi
|
|
1442
|
+
current_issue_count=$((current_issue_count + quality_failures))
|
|
1443
|
+
|
|
1444
|
+
emit_event "compound.cycle" \
|
|
1445
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1446
|
+
"cycle=$cycle" \
|
|
1447
|
+
"max_cycles=$max_cycles" \
|
|
1448
|
+
"passed=$all_passed" \
|
|
1449
|
+
"critical_issues=$current_issue_count" \
|
|
1450
|
+
"self_heal_count=$SELF_HEAL_COUNT"
|
|
1451
|
+
|
|
1452
|
+
# Early exit: zero critical/high issues
|
|
1453
|
+
if [[ "$current_issue_count" -eq 0 ]] && $all_passed; then
|
|
1454
|
+
success "Compound quality passed on cycle ${cycle} — zero critical/high issues"
|
|
1455
|
+
|
|
1456
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1457
|
+
gh_comment_issue "$ISSUE_NUMBER" "✅ **Compound quality passed** — cycle ${cycle}/${max_cycles}
|
|
1458
|
+
|
|
1459
|
+
All quality checks clean:
|
|
1460
|
+
- Adversarial review: ✅
|
|
1461
|
+
- Negative prompting: ✅
|
|
1462
|
+
- Developer simulation: ✅
|
|
1463
|
+
- Architecture validation: ✅
|
|
1464
|
+
- E2E validation: ✅
|
|
1465
|
+
- DoD audit: ✅
|
|
1466
|
+
- Security audit: ✅
|
|
1467
|
+
- Coverage: ✅
|
|
1468
|
+
- Performance: ✅
|
|
1469
|
+
- Bundle size: ✅
|
|
1470
|
+
- API compat: ✅" 2>/dev/null || true
|
|
1471
|
+
fi
|
|
1472
|
+
|
|
1473
|
+
log_stage "compound_quality" "Passed on cycle ${cycle}/${max_cycles}"
|
|
1474
|
+
|
|
1475
|
+
# DoD verification on successful pass
|
|
1476
|
+
local _dod_pass_rate=100
|
|
1477
|
+
if type pipeline_verify_dod &>/dev/null 2>&1; then
|
|
1478
|
+
pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
1479
|
+
if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
|
|
1480
|
+
_dod_pass_rate=$(jq -r '.pass_rate // 100' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "100")
|
|
1481
|
+
fi
|
|
1482
|
+
fi
|
|
1483
|
+
|
|
1484
|
+
pipeline_record_quality_score 100 0 0 0 "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
|
|
1485
|
+
return 0
|
|
1486
|
+
fi
|
|
1487
|
+
|
|
1488
|
+
if $all_passed; then
|
|
1489
|
+
success "Compound quality passed on cycle ${cycle}"
|
|
1490
|
+
|
|
1491
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1492
|
+
gh_comment_issue "$ISSUE_NUMBER" "✅ **Compound quality passed** — cycle ${cycle}/${max_cycles}" 2>/dev/null || true
|
|
1493
|
+
fi
|
|
1494
|
+
|
|
1495
|
+
log_stage "compound_quality" "Passed on cycle ${cycle}/${max_cycles}"
|
|
1496
|
+
|
|
1497
|
+
# DoD verification on successful pass
|
|
1498
|
+
local _dod_pass_rate=100
|
|
1499
|
+
if type pipeline_verify_dod &>/dev/null 2>&1; then
|
|
1500
|
+
pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
1501
|
+
if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
|
|
1502
|
+
_dod_pass_rate=$(jq -r '.pass_rate // 100' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "100")
|
|
1503
|
+
fi
|
|
1504
|
+
fi
|
|
1505
|
+
|
|
1506
|
+
pipeline_record_quality_score 95 0 "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
|
|
1507
|
+
return 0
|
|
1508
|
+
fi
|
|
1509
|
+
|
|
1510
|
+
# Check for plateau: issue count unchanged between cycles
|
|
1511
|
+
if [[ "$prev_issue_count" -ge 0 && "$current_issue_count" -eq "$prev_issue_count" && "$cycle" -gt 1 ]]; then
|
|
1512
|
+
warn "Convergence: quality plateau — ${current_issue_count} issues unchanged between cycles"
|
|
1513
|
+
emit_event "compound.plateau" \
|
|
1514
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1515
|
+
"cycle=$cycle" \
|
|
1516
|
+
"issue_count=$current_issue_count"
|
|
1517
|
+
|
|
1518
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1519
|
+
gh_comment_issue "$ISSUE_NUMBER" "⚠️ **Compound quality plateau** — ${current_issue_count} issues unchanged after cycle ${cycle}. Stopping early." 2>/dev/null || true
|
|
1520
|
+
fi
|
|
1521
|
+
|
|
1522
|
+
log_stage "compound_quality" "Plateau at cycle ${cycle}/${max_cycles} (${current_issue_count} issues)"
|
|
1523
|
+
return 1
|
|
1524
|
+
fi
|
|
1525
|
+
prev_issue_count="$current_issue_count"
|
|
1526
|
+
|
|
1527
|
+
info "Convergence: ${current_issue_count} critical/high issues remaining"
|
|
1528
|
+
|
|
1529
|
+
# Intelligence: re-evaluate adaptive cycle limit based on convergence (only after first cycle)
|
|
1530
|
+
if [[ "$prev_issue_count" -ge 0 ]]; then
|
|
1531
|
+
local updated_limit
|
|
1532
|
+
updated_limit=$(pipeline_adaptive_cycles "$max_cycles" "compound_quality" "$current_issue_count" "$prev_issue_count" 2>/dev/null) || true
|
|
1533
|
+
if [[ -n "$updated_limit" && "$updated_limit" =~ ^[0-9]+$ && "$updated_limit" -gt 0 && "$updated_limit" != "$max_cycles" ]]; then
|
|
1534
|
+
info "Adaptive cycles: ${max_cycles} → ${updated_limit} (convergence signal)"
|
|
1535
|
+
max_cycles="$updated_limit"
|
|
1536
|
+
fi
|
|
1537
|
+
fi
|
|
1538
|
+
|
|
1539
|
+
# Not all passed — rebuild if we have cycles left
|
|
1540
|
+
if [[ "$cycle" -lt "$max_cycles" ]]; then
|
|
1541
|
+
warn "Quality checks failed — rebuilding with feedback (cycle $((cycle + 1))/${max_cycles})"
|
|
1542
|
+
|
|
1543
|
+
if ! compound_rebuild_with_feedback; then
|
|
1544
|
+
error "Rebuild with feedback failed"
|
|
1545
|
+
log_stage "compound_quality" "Rebuild failed on cycle ${cycle}"
|
|
1546
|
+
return 1
|
|
1547
|
+
fi
|
|
1548
|
+
|
|
1549
|
+
# Re-run review stage too (since code changed)
|
|
1550
|
+
info "Re-running review after rebuild..."
|
|
1551
|
+
stage_review 2>/dev/null || true
|
|
1552
|
+
fi
|
|
1553
|
+
done
|
|
1554
|
+
|
|
1555
|
+
# ── Quality Score Computation ──
|
|
1556
|
+
# Starting score: 100, deductions based on findings
|
|
1557
|
+
local quality_score=100
|
|
1558
|
+
|
|
1559
|
+
# Count findings from artifact files
|
|
1560
|
+
if [[ -f "$ARTIFACTS_DIR/security-source-scan.json" ]]; then
|
|
1561
|
+
local _sec_critical
|
|
1562
|
+
_sec_critical=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/security-source-scan.json" 2>/dev/null || echo "0")
|
|
1563
|
+
local _sec_major
|
|
1564
|
+
_sec_major=$(jq '[.[] | select(.severity == "major")] | length' "$ARTIFACTS_DIR/security-source-scan.json" 2>/dev/null || echo "0")
|
|
1565
|
+
total_critical=$((total_critical + ${_sec_critical:-0}))
|
|
1566
|
+
total_major=$((total_major + ${_sec_major:-0}))
|
|
1567
|
+
fi
|
|
1568
|
+
if [[ -f "$ARTIFACTS_DIR/adversarial-review.json" ]]; then
|
|
1569
|
+
local _adv_crit
|
|
1570
|
+
_adv_crit=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
|
|
1571
|
+
local _adv_major
|
|
1572
|
+
_adv_major=$(jq '[.[] | select(.severity == "high" or .severity == "major")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
|
|
1573
|
+
local _adv_minor
|
|
1574
|
+
_adv_minor=$(jq '[.[] | select(.severity == "low" or .severity == "minor")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
|
|
1575
|
+
total_critical=$((total_critical + ${_adv_crit:-0}))
|
|
1576
|
+
total_major=$((total_major + ${_adv_major:-0}))
|
|
1577
|
+
total_minor=$((total_minor + ${_adv_minor:-0}))
|
|
1578
|
+
fi
|
|
1579
|
+
if [[ -f "$ARTIFACTS_DIR/compound-architecture-validation.json" ]]; then
|
|
1580
|
+
local _arch_crit
|
|
1581
|
+
_arch_crit=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || echo "0")
|
|
1582
|
+
local _arch_major
|
|
1583
|
+
_arch_major=$(jq '[.[] | select(.severity == "high" or .severity == "major")] | length' "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || echo "0")
|
|
1584
|
+
total_major=$((total_major + ${_arch_crit:-0} + ${_arch_major:-0}))
|
|
1585
|
+
fi
|
|
1586
|
+
|
|
1587
|
+
# Apply deductions
|
|
1588
|
+
quality_score=$((quality_score - (total_critical * 20) - (total_major * 10) - (total_minor * 2)))
|
|
1589
|
+
[[ "$quality_score" -lt 0 ]] && quality_score=0
|
|
1590
|
+
|
|
1591
|
+
# DoD verification
|
|
1592
|
+
local _dod_pass_rate=0
|
|
1593
|
+
if type pipeline_verify_dod &>/dev/null 2>&1; then
|
|
1594
|
+
pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
1595
|
+
if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
|
|
1596
|
+
_dod_pass_rate=$(jq -r '.pass_rate // 0' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "0")
|
|
1597
|
+
fi
|
|
1598
|
+
fi
|
|
1599
|
+
|
|
1600
|
+
# Record quality score
|
|
1601
|
+
pipeline_record_quality_score "$quality_score" "$total_critical" "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
|
|
1602
|
+
|
|
1603
|
+
# ── Quality Gate (HARDENED) ──
|
|
1604
|
+
local compound_quality_blocking
|
|
1605
|
+
compound_quality_blocking=$(jq -r --arg id "compound_quality" \
|
|
1606
|
+
'(.stages[] | select(.id == $id) | .config.compound_quality_blocking) // true' \
|
|
1607
|
+
"$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
1608
|
+
[[ -z "$compound_quality_blocking" || "$compound_quality_blocking" == "null" ]] && compound_quality_blocking="true"
|
|
1609
|
+
|
|
1610
|
+
# HARDENED THRESHOLD: quality_score must be >= 60 (non-strict) or policy threshold (strict) to pass
|
|
1611
|
+
local min_threshold=60
|
|
1612
|
+
if [[ "$strict_quality" == "true" ]]; then
|
|
1613
|
+
min_threshold="${PIPELINE_QUALITY_GATE_THRESHOLD:-70}"
|
|
1614
|
+
# Strict mode: require score >= threshold and ZERO critical issues
|
|
1615
|
+
if [[ "$total_critical" -gt 0 ]]; then
|
|
1616
|
+
error "STRICT QUALITY: ${total_critical} critical issue(s) found — BLOCKING (strict mode)"
|
|
1617
|
+
emit_event "pipeline.quality_gate_failed_strict" \
|
|
1618
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1619
|
+
"reason=critical_issues" \
|
|
1620
|
+
"critical=$total_critical"
|
|
1621
|
+
log_stage "compound_quality" "Quality gate failed (strict mode): critical issues"
|
|
1622
|
+
return 1
|
|
1623
|
+
fi
|
|
1624
|
+
min_threshold=70
|
|
1625
|
+
fi
|
|
1626
|
+
|
|
1627
|
+
# Hard floor: score must be >= 40, regardless of other settings
|
|
1628
|
+
if [[ "$quality_score" -lt 40 ]]; then
|
|
1629
|
+
error "HARDENED GATE: Quality score ${quality_score}/100 below hard floor (40) — BLOCKING"
|
|
1630
|
+
emit_event "quality.hard_floor_failed" \
|
|
1631
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1632
|
+
"quality_score=$quality_score"
|
|
1633
|
+
log_stage "compound_quality" "Quality gate failed: score below hard floor (40)"
|
|
1634
|
+
return 1
|
|
1635
|
+
fi
|
|
1636
|
+
|
|
1637
|
+
if [[ "$quality_score" -lt "$min_threshold" && "$compound_quality_blocking" == "true" ]]; then
|
|
1638
|
+
emit_event "pipeline.quality_gate_failed" \
|
|
1639
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
1640
|
+
"quality_score=$quality_score" \
|
|
1641
|
+
"threshold=$min_threshold" \
|
|
1642
|
+
"critical=$total_critical" \
|
|
1643
|
+
"major=$total_major"
|
|
1644
|
+
|
|
1645
|
+
error "Quality gate FAILED: score ${quality_score}/100 (threshold: ${min_threshold}/100, critical: ${total_critical}, major: ${total_major}, minor: ${total_minor})"
|
|
1646
|
+
|
|
1647
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1648
|
+
gh_comment_issue "$ISSUE_NUMBER" "❌ **Quality gate failed** — score ${quality_score}/${min_threshold}
|
|
1649
|
+
|
|
1650
|
+
| Finding Type | Count | Deduction |
|
|
1651
|
+
|---|---|---|
|
|
1652
|
+
| Critical | ${total_critical} | -$((total_critical * 20)) |
|
|
1653
|
+
| Major | ${total_major} | -$((total_major * 10)) |
|
|
1654
|
+
| Minor | ${total_minor} | -$((total_minor * 2)) |
|
|
1655
|
+
|
|
1656
|
+
DoD pass rate: ${_dod_pass_rate}%
|
|
1657
|
+
Quality issues remain after ${max_cycles} cycles. Check artifacts for details." 2>/dev/null || true
|
|
1658
|
+
fi
|
|
1659
|
+
|
|
1660
|
+
log_stage "compound_quality" "Quality gate failed: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
|
|
1661
|
+
return 1
|
|
1662
|
+
fi
|
|
1663
|
+
|
|
1664
|
+
# Exhausted all cycles but quality score is at or above threshold
|
|
1665
|
+
if [[ "$quality_score" -ge "$min_threshold" ]]; then
|
|
1666
|
+
if [[ "$quality_score" -eq 100 ]]; then
|
|
1667
|
+
success "Compound quality PERFECT: 100/100"
|
|
1668
|
+
elif [[ "$quality_score" -ge 80 ]]; then
|
|
1669
|
+
success "Compound quality EXCELLENT: ${quality_score}/100"
|
|
1670
|
+
elif [[ "$quality_score" -ge 70 ]]; then
|
|
1671
|
+
success "Compound quality GOOD: ${quality_score}/100"
|
|
1672
|
+
else
|
|
1673
|
+
warn "Compound quality ACCEPTABLE: ${quality_score}/${min_threshold} after ${max_cycles} cycles"
|
|
1674
|
+
fi
|
|
1675
|
+
|
|
1676
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1677
|
+
local quality_emoji="✅"
|
|
1678
|
+
[[ "$quality_score" -lt 70 ]] && quality_emoji="⚠️"
|
|
1679
|
+
gh_comment_issue "$ISSUE_NUMBER" "${quality_emoji} **Compound quality passed** — score ${quality_score}/${min_threshold} after ${max_cycles} cycles
|
|
1680
|
+
|
|
1681
|
+
| Finding Type | Count |
|
|
1682
|
+
|---|---|
|
|
1683
|
+
| Critical | ${total_critical} |
|
|
1684
|
+
| Major | ${total_major} |
|
|
1685
|
+
| Minor | ${total_minor} |
|
|
1686
|
+
|
|
1687
|
+
DoD pass rate: ${_dod_pass_rate}%" 2>/dev/null || true
|
|
1688
|
+
fi
|
|
1689
|
+
|
|
1690
|
+
log_stage "compound_quality" "Passed with score ${quality_score}/${min_threshold} after ${max_cycles} cycles"
|
|
1691
|
+
return 0
|
|
1692
|
+
fi
|
|
1693
|
+
|
|
1694
|
+
error "Compound quality exhausted after ${max_cycles} cycles with insufficient score"
|
|
1695
|
+
|
|
1696
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
1697
|
+
gh_comment_issue "$ISSUE_NUMBER" "❌ **Compound quality failed** after ${max_cycles} cycles
|
|
1698
|
+
|
|
1699
|
+
Quality issues remain. Check artifacts for details." 2>/dev/null || true
|
|
1700
|
+
fi
|
|
1701
|
+
|
|
1702
|
+
log_stage "compound_quality" "Failed after ${max_cycles} cycles"
|
|
1703
|
+
return 1
|
|
1704
|
+
}
|
|
1705
|
+
|
|
1706
|
+
# ─── Error Classification ──────────────────────────────────────────────────
|