shipwright-cli 1.9.0 → 1.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/hooks/post-tool-use.sh +12 -5
- package/package.json +2 -2
- package/scripts/sw +9 -1
- package/scripts/sw-adversarial.sh +1 -1
- package/scripts/sw-architecture-enforcer.sh +1 -1
- package/scripts/sw-checkpoint.sh +79 -1
- package/scripts/sw-cleanup.sh +192 -7
- package/scripts/sw-connect.sh +1 -1
- package/scripts/sw-cost.sh +1 -1
- package/scripts/sw-daemon.sh +409 -37
- package/scripts/sw-dashboard.sh +1 -1
- package/scripts/sw-developer-simulation.sh +1 -1
- package/scripts/sw-docs.sh +1 -1
- package/scripts/sw-doctor.sh +1 -1
- package/scripts/sw-fix.sh +1 -1
- package/scripts/sw-fleet.sh +1 -1
- package/scripts/sw-github-checks.sh +1 -1
- package/scripts/sw-github-deploy.sh +1 -1
- package/scripts/sw-github-graphql.sh +1 -1
- package/scripts/sw-heartbeat.sh +1 -1
- package/scripts/sw-init.sh +1 -1
- package/scripts/sw-intelligence.sh +1 -1
- package/scripts/sw-jira.sh +1 -1
- package/scripts/sw-launchd.sh +4 -4
- package/scripts/sw-linear.sh +1 -1
- package/scripts/sw-logs.sh +1 -1
- package/scripts/sw-loop.sh +444 -49
- package/scripts/sw-memory.sh +198 -3
- package/scripts/sw-pipeline-composer.sh +8 -8
- package/scripts/sw-pipeline-vitals.sh +1096 -0
- package/scripts/sw-pipeline.sh +1692 -84
- package/scripts/sw-predictive.sh +1 -1
- package/scripts/sw-prep.sh +1 -1
- package/scripts/sw-ps.sh +4 -3
- package/scripts/sw-reaper.sh +5 -3
- package/scripts/sw-remote.sh +1 -1
- package/scripts/sw-self-optimize.sh +109 -8
- package/scripts/sw-session.sh +31 -9
- package/scripts/sw-setup.sh +1 -1
- package/scripts/sw-status.sh +192 -1
- package/scripts/sw-templates.sh +1 -1
- package/scripts/sw-tmux.sh +1 -1
- package/scripts/sw-tracker.sh +1 -1
- package/scripts/sw-upgrade.sh +1 -1
- package/scripts/sw-worktree.sh +1 -1
- package/templates/pipelines/autonomous.json +8 -1
- package/templates/pipelines/cost-aware.json +21 -0
- package/templates/pipelines/deployed.json +40 -6
- package/templates/pipelines/enterprise.json +16 -2
- package/templates/pipelines/fast.json +19 -0
- package/templates/pipelines/full.json +16 -2
- package/templates/pipelines/hotfix.json +19 -0
- package/templates/pipelines/standard.json +19 -0
package/scripts/sw-pipeline.sh
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
set -euo pipefail
|
|
7
7
|
trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
|
|
8
8
|
|
|
9
|
-
VERSION="1.
|
|
9
|
+
VERSION="1.10.0"
|
|
10
10
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
11
11
|
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
12
12
|
|
|
@@ -46,6 +46,10 @@ fi
|
|
|
46
46
|
if [[ -f "$SCRIPT_DIR/sw-adversarial.sh" ]]; then
|
|
47
47
|
source "$SCRIPT_DIR/sw-adversarial.sh"
|
|
48
48
|
fi
|
|
49
|
+
# shellcheck source=sw-pipeline-vitals.sh
|
|
50
|
+
if [[ -f "$SCRIPT_DIR/sw-pipeline-vitals.sh" ]]; then
|
|
51
|
+
source "$SCRIPT_DIR/sw-pipeline-vitals.sh"
|
|
52
|
+
fi
|
|
49
53
|
|
|
50
54
|
# ─── GitHub API Modules (optional) ─────────────────────────────────────────
|
|
51
55
|
# shellcheck source=sw-github-graphql.sh
|
|
@@ -99,6 +103,54 @@ format_duration() {
|
|
|
99
103
|
fi
|
|
100
104
|
}
|
|
101
105
|
|
|
106
|
+
_pipeline_compact_goal() {
|
|
107
|
+
local goal="$1"
|
|
108
|
+
local plan_file="${2:-}"
|
|
109
|
+
local design_file="${3:-}"
|
|
110
|
+
local compact="$goal"
|
|
111
|
+
|
|
112
|
+
# Include plan summary (first 20 lines only)
|
|
113
|
+
if [[ -n "$plan_file" && -f "$plan_file" ]]; then
|
|
114
|
+
compact="${compact}
|
|
115
|
+
|
|
116
|
+
## Plan Summary
|
|
117
|
+
$(head -20 "$plan_file" 2>/dev/null || true)
|
|
118
|
+
[... full plan in .claude/pipeline-artifacts/plan.md]"
|
|
119
|
+
fi
|
|
120
|
+
|
|
121
|
+
# Include design key decisions only (grep for headers)
|
|
122
|
+
if [[ -n "$design_file" && -f "$design_file" ]]; then
|
|
123
|
+
compact="${compact}
|
|
124
|
+
|
|
125
|
+
## Key Design Decisions
|
|
126
|
+
$(grep -E '^#{1,3} ' "$design_file" 2>/dev/null | head -10 || true)
|
|
127
|
+
[... full design in .claude/pipeline-artifacts/design.md]"
|
|
128
|
+
fi
|
|
129
|
+
|
|
130
|
+
echo "$compact"
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
load_composed_pipeline() {
|
|
134
|
+
local spec_file="$1"
|
|
135
|
+
[[ ! -f "$spec_file" ]] && return 1
|
|
136
|
+
|
|
137
|
+
# Read enabled stages from composed spec
|
|
138
|
+
local composed_stages
|
|
139
|
+
composed_stages=$(jq -r '.stages // [] | .[] | .id' "$spec_file" 2>/dev/null) || return 1
|
|
140
|
+
[[ -z "$composed_stages" ]] && return 1
|
|
141
|
+
|
|
142
|
+
# Override enabled stages
|
|
143
|
+
COMPOSED_STAGES="$composed_stages"
|
|
144
|
+
|
|
145
|
+
# Override per-stage settings
|
|
146
|
+
local build_max
|
|
147
|
+
build_max=$(jq -r '.stages[] | select(.id=="build") | .max_iterations // ""' "$spec_file" 2>/dev/null) || true
|
|
148
|
+
[[ -n "$build_max" && "$build_max" != "null" ]] && COMPOSED_BUILD_ITERATIONS="$build_max"
|
|
149
|
+
|
|
150
|
+
emit_event "pipeline.composed_loaded" "stages=$(echo "$composed_stages" | wc -l | tr -d ' ')"
|
|
151
|
+
return 0
|
|
152
|
+
}
|
|
153
|
+
|
|
102
154
|
# ─── Structured Event Log ──────────────────────────────────────────────────
|
|
103
155
|
# Appends JSON events to ~/.shipwright/events.jsonl for metrics/traceability
|
|
104
156
|
|
|
@@ -159,6 +211,8 @@ DRY_RUN=false
|
|
|
159
211
|
IGNORE_BUDGET=false
|
|
160
212
|
COMPLETED_STAGES=""
|
|
161
213
|
MAX_ITERATIONS_OVERRIDE=""
|
|
214
|
+
MAX_RESTARTS_OVERRIDE=""
|
|
215
|
+
FAST_TEST_CMD_OVERRIDE=""
|
|
162
216
|
PR_NUMBER=""
|
|
163
217
|
AUTO_WORKTREE=false
|
|
164
218
|
WORKTREE_NAME=""
|
|
@@ -222,6 +276,8 @@ show_help() {
|
|
|
222
276
|
echo -e " ${DIM}--slack-webhook <url>${RESET} Send notifications to Slack"
|
|
223
277
|
echo -e " ${DIM}--self-heal <n>${RESET} Build→test retry cycles on failure (default: 2)"
|
|
224
278
|
echo -e " ${DIM}--max-iterations <n>${RESET} Override max build loop iterations"
|
|
279
|
+
echo -e " ${DIM}--max-restarts <n>${RESET} Max session restarts in build loop"
|
|
280
|
+
echo -e " ${DIM}--fast-test-cmd <cmd>${RESET} Fast/subset test for build loop"
|
|
225
281
|
echo -e " ${DIM}--completed-stages \"a,b\"${RESET} Skip these stages (CI resume)"
|
|
226
282
|
echo ""
|
|
227
283
|
echo -e "${BOLD}STAGES${RESET} ${DIM}(configurable per pipeline template)${RESET}"
|
|
@@ -304,6 +360,15 @@ parse_args() {
|
|
|
304
360
|
--dry-run) DRY_RUN=true; shift ;;
|
|
305
361
|
--slack-webhook) SLACK_WEBHOOK="$2"; shift 2 ;;
|
|
306
362
|
--self-heal) BUILD_TEST_RETRIES="${2:-3}"; shift 2 ;;
|
|
363
|
+
--max-restarts)
|
|
364
|
+
MAX_RESTARTS_OVERRIDE="$2"
|
|
365
|
+
if ! [[ "$MAX_RESTARTS_OVERRIDE" =~ ^[0-9]+$ ]]; then
|
|
366
|
+
error "--max-restarts must be numeric (got: $MAX_RESTARTS_OVERRIDE)"
|
|
367
|
+
exit 1
|
|
368
|
+
fi
|
|
369
|
+
shift 2 ;;
|
|
370
|
+
|
|
371
|
+
--fast-test-cmd) FAST_TEST_CMD_OVERRIDE="$2"; shift 2 ;;
|
|
307
372
|
--help|-h) show_help; exit 0 ;;
|
|
308
373
|
*)
|
|
309
374
|
if [[ -z "$PIPELINE_NAME_ARG" ]]; then
|
|
@@ -476,6 +541,9 @@ cleanup_on_exit() {
|
|
|
476
541
|
git stash pop --quiet 2>/dev/null || true
|
|
477
542
|
fi
|
|
478
543
|
|
|
544
|
+
# Cancel lingering in_progress GitHub Check Runs
|
|
545
|
+
pipeline_cancel_check_runs 2>/dev/null || true
|
|
546
|
+
|
|
479
547
|
# Update GitHub
|
|
480
548
|
if [[ -n "${ISSUE_NUMBER:-}" && "${GH_AVAILABLE:-false}" == "true" ]]; then
|
|
481
549
|
gh_comment_issue "$ISSUE_NUMBER" "⏸️ **Pipeline interrupted** at stage: ${CURRENT_STAGE_ID:-unknown}" 2>/dev/null || true
|
|
@@ -1039,6 +1107,7 @@ LOG_ENTRIES=""
|
|
|
1039
1107
|
|
|
1040
1108
|
save_artifact() {
|
|
1041
1109
|
local name="$1" content="$2"
|
|
1110
|
+
mkdir -p "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
1042
1111
|
echo "$content" > "$ARTIFACTS_DIR/$name"
|
|
1043
1112
|
}
|
|
1044
1113
|
|
|
@@ -1261,6 +1330,7 @@ initialize_state() {
|
|
|
1261
1330
|
|
|
1262
1331
|
write_state() {
|
|
1263
1332
|
[[ -z "${STATE_FILE:-}" || -z "${ARTIFACTS_DIR:-}" ]] && return 0
|
|
1333
|
+
mkdir -p "$(dirname "$STATE_FILE")" 2>/dev/null || true
|
|
1264
1334
|
local stages_yaml=""
|
|
1265
1335
|
while IFS=: read -r sid sstatus; do
|
|
1266
1336
|
[[ -z "$sid" ]] && continue
|
|
@@ -1283,28 +1353,31 @@ write_state() {
|
|
|
1283
1353
|
stage_progress=$(build_stage_progress)
|
|
1284
1354
|
fi
|
|
1285
1355
|
|
|
1286
|
-
cat > "$STATE_FILE" <<
|
|
1356
|
+
cat > "$STATE_FILE" <<'_SW_STATE_END_'
|
|
1287
1357
|
---
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
${
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
$
|
|
1307
|
-
|
|
1358
|
+
_SW_STATE_END_
|
|
1359
|
+
# Write state with printf to avoid heredoc delimiter injection
|
|
1360
|
+
{
|
|
1361
|
+
printf 'pipeline: %s\n' "$PIPELINE_NAME"
|
|
1362
|
+
printf 'goal: "%s"\n' "$GOAL"
|
|
1363
|
+
printf 'status: %s\n' "$PIPELINE_STATUS"
|
|
1364
|
+
printf 'issue: "%s"\n' "${GITHUB_ISSUE:-}"
|
|
1365
|
+
printf 'branch: "%s"\n' "${GIT_BRANCH:-}"
|
|
1366
|
+
printf 'template: "%s"\n' "${TASK_TYPE:+$(template_for_type "$TASK_TYPE")}"
|
|
1367
|
+
printf 'current_stage: %s\n' "$CURRENT_STAGE"
|
|
1368
|
+
printf 'current_stage_description: "%s"\n' "${cur_stage_desc}"
|
|
1369
|
+
printf 'stage_progress: "%s"\n' "${stage_progress}"
|
|
1370
|
+
printf 'started_at: %s\n' "${STARTED_AT:-$(now_iso)}"
|
|
1371
|
+
printf 'updated_at: %s\n' "$(now_iso)"
|
|
1372
|
+
printf 'elapsed: %s\n' "${total_dur:-0s}"
|
|
1373
|
+
printf 'pr_number: %s\n' "${PR_NUMBER:-}"
|
|
1374
|
+
printf 'progress_comment_id: %s\n' "${PROGRESS_COMMENT_ID:-}"
|
|
1375
|
+
printf 'stages:\n'
|
|
1376
|
+
printf '%s' "${stages_yaml}"
|
|
1377
|
+
printf -- '---\n\n'
|
|
1378
|
+
printf '## Log\n'
|
|
1379
|
+
printf '%s\n' "$LOG_ENTRIES"
|
|
1380
|
+
} >> "$STATE_FILE"
|
|
1308
1381
|
}
|
|
1309
1382
|
|
|
1310
1383
|
resume_state() {
|
|
@@ -2127,22 +2200,9 @@ stage_build() {
|
|
|
2127
2200
|
memory_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "build" 2>/dev/null) || true
|
|
2128
2201
|
fi
|
|
2129
2202
|
|
|
2130
|
-
# Build enriched goal with
|
|
2131
|
-
local enriched_goal
|
|
2132
|
-
|
|
2133
|
-
enriched_goal="$GOAL
|
|
2134
|
-
|
|
2135
|
-
Implementation plan (follow this exactly):
|
|
2136
|
-
$(cat "$plan_file")"
|
|
2137
|
-
fi
|
|
2138
|
-
|
|
2139
|
-
# Inject approved design document
|
|
2140
|
-
if [[ -s "$design_file" ]]; then
|
|
2141
|
-
enriched_goal="${enriched_goal}
|
|
2142
|
-
|
|
2143
|
-
Follow the approved design document:
|
|
2144
|
-
$(cat "$design_file")"
|
|
2145
|
-
fi
|
|
2203
|
+
# Build enriched goal with compact context (avoids prompt bloat)
|
|
2204
|
+
local enriched_goal
|
|
2205
|
+
enriched_goal=$(_pipeline_compact_goal "$GOAL" "$plan_file" "$design_file")
|
|
2146
2206
|
|
|
2147
2207
|
# Inject memory context
|
|
2148
2208
|
if [[ -n "$memory_context" ]]; then
|
|
@@ -2263,6 +2323,11 @@ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
|
|
|
2263
2323
|
[[ "$quality" == "true" ]] && loop_args+=(--quality-gates)
|
|
2264
2324
|
fi
|
|
2265
2325
|
|
|
2326
|
+
# Session restart capability
|
|
2327
|
+
[[ -n "${MAX_RESTARTS_OVERRIDE:-}" ]] && loop_args+=(--max-restarts "$MAX_RESTARTS_OVERRIDE")
|
|
2328
|
+
# Fast test mode
|
|
2329
|
+
[[ -n "${FAST_TEST_CMD_OVERRIDE:-}" ]] && loop_args+=(--fast-test-cmd "$FAST_TEST_CMD_OVERRIDE")
|
|
2330
|
+
|
|
2266
2331
|
# Definition of Done: use plan-extracted DoD if available
|
|
2267
2332
|
[[ -s "$dod_file" ]] && loop_args+=(--definition-of-done "$dod_file")
|
|
2268
2333
|
|
|
@@ -2279,7 +2344,23 @@ Coverage baseline: ${coverage_baseline}% — do not decrease coverage."
|
|
|
2279
2344
|
local _token_log="${ARTIFACTS_DIR}/.claude-tokens-build.log"
|
|
2280
2345
|
export PIPELINE_JOB_ID="${PIPELINE_NAME:-pipeline-$$}"
|
|
2281
2346
|
sw loop "${loop_args[@]}" < /dev/null 2>"$_token_log" || {
|
|
2347
|
+
local _loop_exit=$?
|
|
2282
2348
|
parse_claude_tokens "$_token_log"
|
|
2349
|
+
|
|
2350
|
+
# Detect context exhaustion from progress file
|
|
2351
|
+
local _progress_file="${PWD}/.claude/loop-logs/progress.md"
|
|
2352
|
+
if [[ -f "$_progress_file" ]]; then
|
|
2353
|
+
local _prog_tests
|
|
2354
|
+
_prog_tests=$(grep -oE 'Tests passing: (true|false)' "$_progress_file" 2>/dev/null | awk '{print $NF}' || echo "unknown")
|
|
2355
|
+
if [[ "$_prog_tests" != "true" ]]; then
|
|
2356
|
+
warn "Build loop exhausted with failing tests (context exhaustion)"
|
|
2357
|
+
emit_event "pipeline.context_exhaustion" "issue=${ISSUE_NUMBER:-0}" "stage=build"
|
|
2358
|
+
# Write flag for daemon retry logic
|
|
2359
|
+
mkdir -p "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
2360
|
+
echo "context_exhaustion" > "$ARTIFACTS_DIR/failure-reason.txt" 2>/dev/null || true
|
|
2361
|
+
fi
|
|
2362
|
+
fi
|
|
2363
|
+
|
|
2283
2364
|
error "Build loop failed"
|
|
2284
2365
|
return 1
|
|
2285
2366
|
}
|
|
@@ -2406,6 +2487,16 @@ ${test_summary}
|
|
|
2406
2487
|
</details>"
|
|
2407
2488
|
fi
|
|
2408
2489
|
|
|
2490
|
+
# Write coverage summary for pre-deploy gate
|
|
2491
|
+
local _cov_pct=0
|
|
2492
|
+
if [[ -f "$ARTIFACTS_DIR/test-results.log" ]]; then
|
|
2493
|
+
_cov_pct=$(grep -oE '[0-9]+%' "$ARTIFACTS_DIR/test-results.log" 2>/dev/null | head -1 | tr -d '%' || true)
|
|
2494
|
+
_cov_pct="${_cov_pct:-0}"
|
|
2495
|
+
fi
|
|
2496
|
+
local _cov_tmp
|
|
2497
|
+
_cov_tmp=$(mktemp "${ARTIFACTS_DIR}/test-coverage.json.tmp.XXXXXX")
|
|
2498
|
+
printf '{"coverage_pct":%d}' "${_cov_pct:-0}" > "$_cov_tmp" && mv "$_cov_tmp" "$ARTIFACTS_DIR/test-coverage.json" || rm -f "$_cov_tmp"
|
|
2499
|
+
|
|
2409
2500
|
log_stage "test" "Tests passed${coverage:+ (coverage: ${coverage}%)}"
|
|
2410
2501
|
}
|
|
2411
2502
|
|
|
@@ -3229,41 +3320,157 @@ stage_deploy() {
|
|
|
3229
3320
|
fi
|
|
3230
3321
|
fi
|
|
3231
3322
|
|
|
3232
|
-
#
|
|
3233
|
-
|
|
3234
|
-
|
|
3323
|
+
# ── Pre-deploy gates ──
|
|
3324
|
+
local pre_deploy_ci
|
|
3325
|
+
pre_deploy_ci=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_ci_status) // "true"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
3326
|
+
|
|
3327
|
+
if [[ "${pre_deploy_ci:-true}" == "true" && "${NO_GITHUB:-false}" != "true" && -n "${REPO_OWNER:-}" && -n "${REPO_NAME:-}" ]]; then
|
|
3328
|
+
info "Pre-deploy gate: checking CI status..."
|
|
3329
|
+
local ci_failures
|
|
3330
|
+
ci_failures=$(gh api "repos/${REPO_OWNER}/${REPO_NAME}/commits/${GIT_BRANCH:-HEAD}/check-runs" \
|
|
3331
|
+
--jq '[.check_runs[] | select(.conclusion != null and .conclusion != "success" and .conclusion != "skipped")] | length' 2>/dev/null || echo "0")
|
|
3332
|
+
if [[ "${ci_failures:-0}" -gt 0 ]]; then
|
|
3333
|
+
error "Pre-deploy gate FAILED: ${ci_failures} CI check(s) not passing"
|
|
3334
|
+
[[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: ${ci_failures} CI checks failing" 2>/dev/null || true
|
|
3335
|
+
return 1
|
|
3336
|
+
fi
|
|
3337
|
+
success "Pre-deploy gate: all CI checks passing"
|
|
3235
3338
|
fi
|
|
3236
3339
|
|
|
3237
|
-
|
|
3238
|
-
|
|
3239
|
-
|
|
3240
|
-
|
|
3241
|
-
|
|
3242
|
-
|
|
3243
|
-
|
|
3244
|
-
|
|
3245
|
-
fi
|
|
3340
|
+
local pre_deploy_min_cov
|
|
3341
|
+
pre_deploy_min_cov=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.pre_deploy_min_coverage) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
3342
|
+
if [[ -n "${pre_deploy_min_cov:-}" && "${pre_deploy_min_cov}" != "null" && -f "$ARTIFACTS_DIR/test-coverage.json" ]]; then
|
|
3343
|
+
local actual_cov
|
|
3344
|
+
actual_cov=$(jq -r '.coverage_pct // 0' "$ARTIFACTS_DIR/test-coverage.json" 2>/dev/null || echo "0")
|
|
3345
|
+
if [[ "${actual_cov:-0}" -lt "$pre_deploy_min_cov" ]]; then
|
|
3346
|
+
error "Pre-deploy gate FAILED: coverage ${actual_cov}% < required ${pre_deploy_min_cov}%"
|
|
3347
|
+
[[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Pre-deploy gate: coverage ${actual_cov}% below minimum ${pre_deploy_min_cov}%" 2>/dev/null || true
|
|
3246
3348
|
return 1
|
|
3247
|
-
|
|
3248
|
-
success "
|
|
3349
|
+
fi
|
|
3350
|
+
success "Pre-deploy gate: coverage ${actual_cov}% >= ${pre_deploy_min_cov}%"
|
|
3249
3351
|
fi
|
|
3250
3352
|
|
|
3251
|
-
|
|
3252
|
-
|
|
3253
|
-
|
|
3254
|
-
|
|
3255
|
-
|
|
3256
|
-
|
|
3257
|
-
|
|
3353
|
+
# Post deploy start to GitHub
|
|
3354
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
3355
|
+
gh_comment_issue "$ISSUE_NUMBER" "Deploy started"
|
|
3356
|
+
fi
|
|
3357
|
+
|
|
3358
|
+
# ── Deploy strategy ──
|
|
3359
|
+
local deploy_strategy
|
|
3360
|
+
deploy_strategy=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.deploy_strategy) // "direct"' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
3361
|
+
[[ "$deploy_strategy" == "null" ]] && deploy_strategy="direct"
|
|
3362
|
+
|
|
3363
|
+
local canary_cmd promote_cmd switch_cmd health_url deploy_log
|
|
3364
|
+
canary_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.canary_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
3365
|
+
[[ "$canary_cmd" == "null" ]] && canary_cmd=""
|
|
3366
|
+
promote_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.promote_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
3367
|
+
[[ "$promote_cmd" == "null" ]] && promote_cmd=""
|
|
3368
|
+
switch_cmd=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.switch_cmd) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
3369
|
+
[[ "$switch_cmd" == "null" ]] && switch_cmd=""
|
|
3370
|
+
health_url=$(jq -r --arg id "deploy" '(.stages[] | select(.id == $id) | .config.health_url) // ""' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
3371
|
+
[[ "$health_url" == "null" ]] && health_url=""
|
|
3372
|
+
deploy_log="$ARTIFACTS_DIR/deploy.log"
|
|
3373
|
+
|
|
3374
|
+
case "$deploy_strategy" in
|
|
3375
|
+
canary)
|
|
3376
|
+
info "Canary deployment strategy..."
|
|
3377
|
+
if [[ -z "$canary_cmd" ]]; then
|
|
3378
|
+
warn "No canary_cmd configured — falling back to direct"
|
|
3379
|
+
deploy_strategy="direct"
|
|
3380
|
+
else
|
|
3381
|
+
info "Deploying canary..."
|
|
3382
|
+
bash -c "$canary_cmd" >> "$deploy_log" 2>&1 || { error "Canary deploy failed"; return 1; }
|
|
3383
|
+
|
|
3384
|
+
if [[ -n "$health_url" ]]; then
|
|
3385
|
+
local canary_healthy=0
|
|
3386
|
+
local _chk
|
|
3387
|
+
for _chk in 1 2 3; do
|
|
3388
|
+
sleep 10
|
|
3389
|
+
local _status
|
|
3390
|
+
_status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
|
|
3391
|
+
if [[ "$_status" -ge 200 && "$_status" -lt 400 ]]; then
|
|
3392
|
+
canary_healthy=$((canary_healthy + 1))
|
|
3393
|
+
fi
|
|
3394
|
+
done
|
|
3395
|
+
if [[ "$canary_healthy" -lt 2 ]]; then
|
|
3396
|
+
error "Canary health check failed ($canary_healthy/3 passed) — rolling back"
|
|
3397
|
+
[[ -n "$rollback_cmd" ]] && bash -c "$rollback_cmd" 2>/dev/null || true
|
|
3398
|
+
return 1
|
|
3399
|
+
fi
|
|
3400
|
+
success "Canary healthy ($canary_healthy/3 checks passed)"
|
|
3401
|
+
fi
|
|
3402
|
+
|
|
3403
|
+
info "Promoting canary to full deployment..."
|
|
3404
|
+
if [[ -n "$promote_cmd" ]]; then
|
|
3405
|
+
bash -c "$promote_cmd" >> "$deploy_log" 2>&1 || { error "Promote failed"; return 1; }
|
|
3406
|
+
fi
|
|
3407
|
+
success "Canary promoted"
|
|
3258
3408
|
fi
|
|
3259
|
-
|
|
3260
|
-
|
|
3261
|
-
|
|
3262
|
-
|
|
3409
|
+
;;
|
|
3410
|
+
blue-green)
|
|
3411
|
+
info "Blue-green deployment strategy..."
|
|
3412
|
+
if [[ -z "$staging_cmd" || -z "$switch_cmd" ]]; then
|
|
3413
|
+
warn "Blue-green requires staging_cmd + switch_cmd — falling back to direct"
|
|
3414
|
+
deploy_strategy="direct"
|
|
3415
|
+
else
|
|
3416
|
+
info "Deploying to inactive environment..."
|
|
3417
|
+
bash -c "$staging_cmd" >> "$deploy_log" 2>&1 || { error "Blue-green staging failed"; return 1; }
|
|
3418
|
+
|
|
3419
|
+
if [[ -n "$health_url" ]]; then
|
|
3420
|
+
local bg_healthy=0
|
|
3421
|
+
local _chk
|
|
3422
|
+
for _chk in 1 2 3; do
|
|
3423
|
+
sleep 5
|
|
3424
|
+
local _status
|
|
3425
|
+
_status=$(curl -s -o /dev/null -w "%{http_code}" "$health_url" 2>/dev/null || echo "0")
|
|
3426
|
+
[[ "$_status" -ge 200 && "$_status" -lt 400 ]] && bg_healthy=$((bg_healthy + 1))
|
|
3427
|
+
done
|
|
3428
|
+
if [[ "$bg_healthy" -lt 2 ]]; then
|
|
3429
|
+
error "Blue-green health check failed — not switching"
|
|
3430
|
+
return 1
|
|
3431
|
+
fi
|
|
3432
|
+
fi
|
|
3433
|
+
|
|
3434
|
+
info "Switching traffic..."
|
|
3435
|
+
bash -c "$switch_cmd" >> "$deploy_log" 2>&1 || { error "Traffic switch failed"; return 1; }
|
|
3436
|
+
success "Blue-green switch complete"
|
|
3263
3437
|
fi
|
|
3264
|
-
|
|
3265
|
-
|
|
3266
|
-
|
|
3438
|
+
;;
|
|
3439
|
+
esac
|
|
3440
|
+
|
|
3441
|
+
# ── Direct deployment (default or fallback) ──
|
|
3442
|
+
if [[ "$deploy_strategy" == "direct" ]]; then
|
|
3443
|
+
if [[ -n "$staging_cmd" ]]; then
|
|
3444
|
+
info "Deploying to staging..."
|
|
3445
|
+
bash -c "$staging_cmd" > "$ARTIFACTS_DIR/deploy-staging.log" 2>&1 || {
|
|
3446
|
+
error "Staging deploy failed"
|
|
3447
|
+
[[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Staging deploy failed"
|
|
3448
|
+
# Mark GitHub deployment as failed
|
|
3449
|
+
if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
|
|
3450
|
+
gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Staging deploy failed" 2>/dev/null || true
|
|
3451
|
+
fi
|
|
3452
|
+
return 1
|
|
3453
|
+
}
|
|
3454
|
+
success "Staging deploy complete"
|
|
3455
|
+
fi
|
|
3456
|
+
|
|
3457
|
+
if [[ -n "$prod_cmd" ]]; then
|
|
3458
|
+
info "Deploying to production..."
|
|
3459
|
+
bash -c "$prod_cmd" > "$ARTIFACTS_DIR/deploy-prod.log" 2>&1 || {
|
|
3460
|
+
error "Production deploy failed"
|
|
3461
|
+
if [[ -n "$rollback_cmd" ]]; then
|
|
3462
|
+
warn "Rolling back..."
|
|
3463
|
+
bash -c "$rollback_cmd" 2>&1 || error "Rollback also failed!"
|
|
3464
|
+
fi
|
|
3465
|
+
[[ -n "$ISSUE_NUMBER" ]] && gh_comment_issue "$ISSUE_NUMBER" "Production deploy failed — rollback ${rollback_cmd:+attempted}"
|
|
3466
|
+
# Mark GitHub deployment as failed
|
|
3467
|
+
if [[ "${NO_GITHUB:-false}" != "true" ]] && type gh_deploy_pipeline_complete &>/dev/null 2>&1; then
|
|
3468
|
+
gh_deploy_pipeline_complete "$REPO_OWNER" "$REPO_NAME" "$gh_deploy_env" false "Production deploy failed" 2>/dev/null || true
|
|
3469
|
+
fi
|
|
3470
|
+
return 1
|
|
3471
|
+
}
|
|
3472
|
+
success "Production deploy complete"
|
|
3473
|
+
fi
|
|
3267
3474
|
fi
|
|
3268
3475
|
|
|
3269
3476
|
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
@@ -4449,13 +4656,1015 @@ run_dod_audit() {
|
|
|
4449
4656
|
return 0
|
|
4450
4657
|
}
|
|
4451
4658
|
|
|
4659
|
+
# ─── Intelligent Pipeline Orchestration ──────────────────────────────────────
|
|
4660
|
+
# AGI-like decision making: skip, classify, adapt, reassess, backtrack
|
|
4661
|
+
|
|
4662
|
+
# Global state for intelligence features
|
|
4663
|
+
PIPELINE_BACKTRACK_COUNT="${PIPELINE_BACKTRACK_COUNT:-0}"
|
|
4664
|
+
PIPELINE_MAX_BACKTRACKS=2
|
|
4665
|
+
PIPELINE_ADAPTIVE_COMPLEXITY=""
|
|
4666
|
+
|
|
4667
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
4668
|
+
# 1. Intelligent Stage Skipping
|
|
4669
|
+
# Evaluates whether a stage should be skipped based on triage score, complexity,
|
|
4670
|
+
# issue labels, and diff size. Called before each stage in run_pipeline().
|
|
4671
|
+
# Returns 0 if the stage SHOULD be skipped, 1 if it should run.
|
|
4672
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
4673
|
+
pipeline_should_skip_stage() {
|
|
4674
|
+
local stage_id="$1"
|
|
4675
|
+
local reason=""
|
|
4676
|
+
|
|
4677
|
+
# Never skip intake or build — they're always required
|
|
4678
|
+
case "$stage_id" in
|
|
4679
|
+
intake|build|test|pr|merge) return 1 ;;
|
|
4680
|
+
esac
|
|
4681
|
+
|
|
4682
|
+
# ── Signal 1: Triage score (from intelligence analysis) ──
|
|
4683
|
+
local triage_score="${INTELLIGENCE_COMPLEXITY:-0}"
|
|
4684
|
+
# Convert: high triage score (simple issue) means skip more stages
|
|
4685
|
+
# INTELLIGENCE_COMPLEXITY is 1-10 (1=simple, 10=complex)
|
|
4686
|
+
# Score >= 70 in daemon means simple → complexity 1-3
|
|
4687
|
+
local complexity="${INTELLIGENCE_COMPLEXITY:-5}"
|
|
4688
|
+
|
|
4689
|
+
# ── Signal 2: Issue labels ──
|
|
4690
|
+
local labels="${ISSUE_LABELS:-}"
|
|
4691
|
+
|
|
4692
|
+
# Documentation issues: skip test, review, compound_quality
|
|
4693
|
+
if echo ",$labels," | grep -qiE ',documentation,|,docs,|,typo,'; then
|
|
4694
|
+
case "$stage_id" in
|
|
4695
|
+
test|review|compound_quality)
|
|
4696
|
+
reason="label:documentation"
|
|
4697
|
+
;;
|
|
4698
|
+
esac
|
|
4699
|
+
fi
|
|
4700
|
+
|
|
4701
|
+
# Hotfix issues: skip plan, design, compound_quality
|
|
4702
|
+
if echo ",$labels," | grep -qiE ',hotfix,|,urgent,|,p0,'; then
|
|
4703
|
+
case "$stage_id" in
|
|
4704
|
+
plan|design|compound_quality)
|
|
4705
|
+
reason="label:hotfix"
|
|
4706
|
+
;;
|
|
4707
|
+
esac
|
|
4708
|
+
fi
|
|
4709
|
+
|
|
4710
|
+
# ── Signal 3: Intelligence complexity ──
|
|
4711
|
+
if [[ -z "$reason" && "$complexity" -gt 0 ]]; then
|
|
4712
|
+
# Complexity 1-2: very simple → skip design, compound_quality, review
|
|
4713
|
+
if [[ "$complexity" -le 2 ]]; then
|
|
4714
|
+
case "$stage_id" in
|
|
4715
|
+
design|compound_quality|review)
|
|
4716
|
+
reason="complexity:${complexity}/10"
|
|
4717
|
+
;;
|
|
4718
|
+
esac
|
|
4719
|
+
# Complexity 1-3: simple → skip design
|
|
4720
|
+
elif [[ "$complexity" -le 3 ]]; then
|
|
4721
|
+
case "$stage_id" in
|
|
4722
|
+
design)
|
|
4723
|
+
reason="complexity:${complexity}/10"
|
|
4724
|
+
;;
|
|
4725
|
+
esac
|
|
4726
|
+
fi
|
|
4727
|
+
fi
|
|
4728
|
+
|
|
4729
|
+
# ── Signal 4: Diff size (after build) ──
|
|
4730
|
+
if [[ -z "$reason" && "$stage_id" == "compound_quality" ]]; then
|
|
4731
|
+
local diff_lines=0
|
|
4732
|
+
local _skip_stat
|
|
4733
|
+
_skip_stat=$(git diff "${BASE_BRANCH:-main}...HEAD" --stat 2>/dev/null | tail -1) || true
|
|
4734
|
+
if [[ -n "${_skip_stat:-}" ]]; then
|
|
4735
|
+
local _s_ins _s_del
|
|
4736
|
+
_s_ins=$(echo "$_skip_stat" | grep -oE '[0-9]+ insertion' | grep -oE '[0-9]+') || true
|
|
4737
|
+
_s_del=$(echo "$_skip_stat" | grep -oE '[0-9]+ deletion' | grep -oE '[0-9]+') || true
|
|
4738
|
+
diff_lines=$(( ${_s_ins:-0} + ${_s_del:-0} ))
|
|
4739
|
+
fi
|
|
4740
|
+
diff_lines="${diff_lines:-0}"
|
|
4741
|
+
if [[ "$diff_lines" -gt 0 && "$diff_lines" -lt 20 ]]; then
|
|
4742
|
+
reason="diff_size:${diff_lines}_lines"
|
|
4743
|
+
fi
|
|
4744
|
+
fi
|
|
4745
|
+
|
|
4746
|
+
# ── Signal 5: Mid-pipeline reassessment override ──
|
|
4747
|
+
if [[ -z "$reason" && -f "$ARTIFACTS_DIR/reassessment.json" ]]; then
|
|
4748
|
+
local skip_stages
|
|
4749
|
+
skip_stages=$(jq -r '.skip_stages // [] | .[]' "$ARTIFACTS_DIR/reassessment.json" 2>/dev/null || true)
|
|
4750
|
+
if echo "$skip_stages" | grep -qx "$stage_id" 2>/dev/null; then
|
|
4751
|
+
reason="reassessment:simpler_than_expected"
|
|
4752
|
+
fi
|
|
4753
|
+
fi
|
|
4754
|
+
|
|
4755
|
+
if [[ -n "$reason" ]]; then
|
|
4756
|
+
emit_event "intelligence.stage_skipped" \
|
|
4757
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
4758
|
+
"stage=$stage_id" \
|
|
4759
|
+
"reason=$reason" \
|
|
4760
|
+
"complexity=${complexity}" \
|
|
4761
|
+
"labels=${labels}"
|
|
4762
|
+
echo "$reason"
|
|
4763
|
+
return 0
|
|
4764
|
+
fi
|
|
4765
|
+
|
|
4766
|
+
return 1
|
|
4767
|
+
}
|
|
4768
|
+
|
|
4769
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
4770
|
+
# 2. Smart Finding Classification & Routing
|
|
4771
|
+
# Parses compound quality findings and classifies each as:
|
|
4772
|
+
# architecture, security, correctness, style
|
|
4773
|
+
# Returns JSON with classified findings and routing recommendations.
|
|
4774
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
4775
|
+
classify_quality_findings() {
|
|
4776
|
+
local findings_dir="$ARTIFACTS_DIR"
|
|
4777
|
+
local result_file="$ARTIFACTS_DIR/classified-findings.json"
|
|
4778
|
+
|
|
4779
|
+
# Initialize counters
|
|
4780
|
+
local arch_count=0 security_count=0 correctness_count=0 performance_count=0 testing_count=0 style_count=0
|
|
4781
|
+
|
|
4782
|
+
# Start building JSON array
|
|
4783
|
+
local findings_json="[]"
|
|
4784
|
+
|
|
4785
|
+
# ── Parse adversarial review ──
|
|
4786
|
+
if [[ -f "$findings_dir/adversarial-review.md" ]]; then
|
|
4787
|
+
local adv_content
|
|
4788
|
+
adv_content=$(cat "$findings_dir/adversarial-review.md" 2>/dev/null || true)
|
|
4789
|
+
|
|
4790
|
+
# Architecture findings: dependency violations, layer breaches, circular refs
|
|
4791
|
+
local arch_findings
|
|
4792
|
+
arch_findings=$(echo "$adv_content" | grep -ciE 'architect|layer.*violation|circular.*depend|coupling|abstraction|design.*flaw|separation.*concern' 2>/dev/null || true)
|
|
4793
|
+
arch_count=$((arch_count + ${arch_findings:-0}))
|
|
4794
|
+
|
|
4795
|
+
# Security findings
|
|
4796
|
+
local sec_findings
|
|
4797
|
+
sec_findings=$(echo "$adv_content" | grep -ciE 'security|vulnerab|injection|XSS|CSRF|auth.*bypass|privilege|sanitiz|escap' 2>/dev/null || true)
|
|
4798
|
+
security_count=$((security_count + ${sec_findings:-0}))
|
|
4799
|
+
|
|
4800
|
+
# Correctness findings: bugs, logic errors, edge cases
|
|
4801
|
+
local corr_findings
|
|
4802
|
+
corr_findings=$(echo "$adv_content" | grep -ciE '\*\*\[?(Critical|Bug|Error|critical|high)\]?\*\*|race.*condition|null.*pointer|off.*by.*one|edge.*case|undefined.*behav' 2>/dev/null || true)
|
|
4803
|
+
correctness_count=$((correctness_count + ${corr_findings:-0}))
|
|
4804
|
+
|
|
4805
|
+
# Performance findings
|
|
4806
|
+
local perf_findings
|
|
4807
|
+
perf_findings=$(echo "$adv_content" | grep -ciE 'latency|slow|memory leak|O\(n|N\+1|cache miss|performance|bottleneck|throughput' 2>/dev/null || true)
|
|
4808
|
+
performance_count=$((performance_count + ${perf_findings:-0}))
|
|
4809
|
+
|
|
4810
|
+
# Testing findings
|
|
4811
|
+
local test_findings
|
|
4812
|
+
test_findings=$(echo "$adv_content" | grep -ciE 'untested|missing test|no coverage|flaky|test gap|test missing|coverage gap' 2>/dev/null || true)
|
|
4813
|
+
testing_count=$((testing_count + ${test_findings:-0}))
|
|
4814
|
+
|
|
4815
|
+
# Style findings
|
|
4816
|
+
local style_findings
|
|
4817
|
+
style_findings=$(echo "$adv_content" | grep -ciE 'naming|convention|format|style|readabil|inconsisten|whitespace|comment' 2>/dev/null || true)
|
|
4818
|
+
style_count=$((style_count + ${style_findings:-0}))
|
|
4819
|
+
fi
|
|
4820
|
+
|
|
4821
|
+
# ── Parse architecture validation ──
|
|
4822
|
+
if [[ -f "$findings_dir/compound-architecture-validation.json" ]]; then
|
|
4823
|
+
local arch_json_count
|
|
4824
|
+
arch_json_count=$(jq '[.[] | select(.severity == "critical" or .severity == "high")] | length' "$findings_dir/compound-architecture-validation.json" 2>/dev/null || echo "0")
|
|
4825
|
+
arch_count=$((arch_count + ${arch_json_count:-0}))
|
|
4826
|
+
fi
|
|
4827
|
+
|
|
4828
|
+
# ── Parse security audit ──
|
|
4829
|
+
if [[ -f "$findings_dir/security-audit.log" ]]; then
|
|
4830
|
+
local sec_audit
|
|
4831
|
+
sec_audit=$(grep -ciE 'critical|high' "$findings_dir/security-audit.log" 2>/dev/null || true)
|
|
4832
|
+
security_count=$((security_count + ${sec_audit:-0}))
|
|
4833
|
+
fi
|
|
4834
|
+
|
|
4835
|
+
# ── Parse negative review ──
|
|
4836
|
+
if [[ -f "$findings_dir/negative-review.md" ]]; then
|
|
4837
|
+
local neg_corr
|
|
4838
|
+
neg_corr=$(grep -ciE '\[Critical\]|\[High\]' "$findings_dir/negative-review.md" 2>/dev/null || true)
|
|
4839
|
+
correctness_count=$((correctness_count + ${neg_corr:-0}))
|
|
4840
|
+
fi
|
|
4841
|
+
|
|
4842
|
+
# ── Determine routing ──
|
|
4843
|
+
# Priority order: security > architecture > correctness > performance > testing > style
|
|
4844
|
+
local route="correctness" # default
|
|
4845
|
+
local needs_backtrack=false
|
|
4846
|
+
local priority_findings=""
|
|
4847
|
+
|
|
4848
|
+
if [[ "$security_count" -gt 0 ]]; then
|
|
4849
|
+
route="security"
|
|
4850
|
+
priority_findings="security:${security_count}"
|
|
4851
|
+
fi
|
|
4852
|
+
|
|
4853
|
+
if [[ "$arch_count" -gt 0 ]]; then
|
|
4854
|
+
if [[ "$route" == "correctness" ]]; then
|
|
4855
|
+
route="architecture"
|
|
4856
|
+
needs_backtrack=true
|
|
4857
|
+
fi
|
|
4858
|
+
priority_findings="${priority_findings:+${priority_findings},}architecture:${arch_count}"
|
|
4859
|
+
fi
|
|
4860
|
+
|
|
4861
|
+
if [[ "$correctness_count" -gt 0 ]]; then
|
|
4862
|
+
priority_findings="${priority_findings:+${priority_findings},}correctness:${correctness_count}"
|
|
4863
|
+
fi
|
|
4864
|
+
|
|
4865
|
+
if [[ "$performance_count" -gt 0 ]]; then
|
|
4866
|
+
if [[ "$route" == "correctness" && "$correctness_count" -eq 0 ]]; then
|
|
4867
|
+
route="performance"
|
|
4868
|
+
fi
|
|
4869
|
+
priority_findings="${priority_findings:+${priority_findings},}performance:${performance_count}"
|
|
4870
|
+
fi
|
|
4871
|
+
|
|
4872
|
+
if [[ "$testing_count" -gt 0 ]]; then
|
|
4873
|
+
if [[ "$route" == "correctness" && "$correctness_count" -eq 0 && "$performance_count" -eq 0 ]]; then
|
|
4874
|
+
route="testing"
|
|
4875
|
+
fi
|
|
4876
|
+
priority_findings="${priority_findings:+${priority_findings},}testing:${testing_count}"
|
|
4877
|
+
fi
|
|
4878
|
+
|
|
4879
|
+
# Style findings don't affect routing or count toward failure threshold
|
|
4880
|
+
local total_blocking=$((arch_count + security_count + correctness_count + performance_count + testing_count))
|
|
4881
|
+
|
|
4882
|
+
# Write classified findings
|
|
4883
|
+
local tmp_findings
|
|
4884
|
+
tmp_findings="$(mktemp)"
|
|
4885
|
+
jq -n \
|
|
4886
|
+
--argjson arch "$arch_count" \
|
|
4887
|
+
--argjson security "$security_count" \
|
|
4888
|
+
--argjson correctness "$correctness_count" \
|
|
4889
|
+
--argjson performance "$performance_count" \
|
|
4890
|
+
--argjson testing "$testing_count" \
|
|
4891
|
+
--argjson style "$style_count" \
|
|
4892
|
+
--argjson total_blocking "$total_blocking" \
|
|
4893
|
+
--arg route "$route" \
|
|
4894
|
+
--argjson needs_backtrack "$needs_backtrack" \
|
|
4895
|
+
--arg priority "$priority_findings" \
|
|
4896
|
+
'{
|
|
4897
|
+
architecture: $arch,
|
|
4898
|
+
security: $security,
|
|
4899
|
+
correctness: $correctness,
|
|
4900
|
+
performance: $performance,
|
|
4901
|
+
testing: $testing,
|
|
4902
|
+
style: $style,
|
|
4903
|
+
total_blocking: $total_blocking,
|
|
4904
|
+
route: $route,
|
|
4905
|
+
needs_backtrack: $needs_backtrack,
|
|
4906
|
+
priority_findings: $priority
|
|
4907
|
+
}' > "$tmp_findings" 2>/dev/null && mv "$tmp_findings" "$result_file" || rm -f "$tmp_findings"
|
|
4908
|
+
|
|
4909
|
+
emit_event "intelligence.findings_classified" \
|
|
4910
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
4911
|
+
"architecture=$arch_count" \
|
|
4912
|
+
"security=$security_count" \
|
|
4913
|
+
"correctness=$correctness_count" \
|
|
4914
|
+
"performance=$performance_count" \
|
|
4915
|
+
"testing=$testing_count" \
|
|
4916
|
+
"style=$style_count" \
|
|
4917
|
+
"route=$route" \
|
|
4918
|
+
"needs_backtrack=$needs_backtrack"
|
|
4919
|
+
|
|
4920
|
+
echo "$route"
|
|
4921
|
+
}
|
|
4922
|
+
|
|
4923
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
4924
|
+
# 3. Adaptive Cycle Limits
|
|
4925
|
+
# Replaces hardcoded max_cycles with convergence-driven limits.
|
|
4926
|
+
# Takes the base limit, returns an adjusted limit based on:
|
|
4927
|
+
# - Learned iteration model
|
|
4928
|
+
# - Convergence/divergence signals
|
|
4929
|
+
# - Budget constraints
|
|
4930
|
+
# - Hard ceiling (2x template max)
|
|
4931
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
4932
|
+
pipeline_adaptive_cycles() {
|
|
4933
|
+
local base_limit="$1"
|
|
4934
|
+
local context="${2:-compound_quality}" # compound_quality or build_test
|
|
4935
|
+
local current_issue_count="${3:-0}"
|
|
4936
|
+
local prev_issue_count="${4:--1}"
|
|
4937
|
+
|
|
4938
|
+
local adjusted="$base_limit"
|
|
4939
|
+
local hard_ceiling=$((base_limit * 2))
|
|
4940
|
+
|
|
4941
|
+
# ── Learned iteration model ──
|
|
4942
|
+
local model_file="${HOME}/.shipwright/optimization/iteration-model.json"
|
|
4943
|
+
if [[ -f "$model_file" ]]; then
|
|
4944
|
+
local learned
|
|
4945
|
+
learned=$(jq -r --arg ctx "$context" '.[$ctx].recommended_cycles // 0' "$model_file" 2>/dev/null || echo "0")
|
|
4946
|
+
if [[ "$learned" -gt 0 && "$learned" -le "$hard_ceiling" ]]; then
|
|
4947
|
+
adjusted="$learned"
|
|
4948
|
+
fi
|
|
4949
|
+
fi
|
|
4950
|
+
|
|
4951
|
+
# ── Convergence acceleration ──
|
|
4952
|
+
# If issue count drops >50% per cycle, extend limit by 1 (we're making progress)
|
|
4953
|
+
if [[ "$prev_issue_count" -gt 0 && "$current_issue_count" -ge 0 ]]; then
|
|
4954
|
+
local half_prev=$((prev_issue_count / 2))
|
|
4955
|
+
if [[ "$current_issue_count" -le "$half_prev" && "$current_issue_count" -gt 0 ]]; then
|
|
4956
|
+
# Rapid convergence — extend by 1
|
|
4957
|
+
local new_limit=$((adjusted + 1))
|
|
4958
|
+
if [[ "$new_limit" -le "$hard_ceiling" ]]; then
|
|
4959
|
+
adjusted="$new_limit"
|
|
4960
|
+
emit_event "intelligence.convergence_acceleration" \
|
|
4961
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
4962
|
+
"context=$context" \
|
|
4963
|
+
"prev_issues=$prev_issue_count" \
|
|
4964
|
+
"current_issues=$current_issue_count" \
|
|
4965
|
+
"new_limit=$adjusted"
|
|
4966
|
+
fi
|
|
4967
|
+
fi
|
|
4968
|
+
|
|
4969
|
+
# ── Divergence detection ──
|
|
4970
|
+
# If issue count increases, reduce remaining cycles
|
|
4971
|
+
if [[ "$current_issue_count" -gt "$prev_issue_count" ]]; then
|
|
4972
|
+
local reduced=$((adjusted - 1))
|
|
4973
|
+
if [[ "$reduced" -ge 1 ]]; then
|
|
4974
|
+
adjusted="$reduced"
|
|
4975
|
+
emit_event "intelligence.divergence_detected" \
|
|
4976
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
4977
|
+
"context=$context" \
|
|
4978
|
+
"prev_issues=$prev_issue_count" \
|
|
4979
|
+
"current_issues=$current_issue_count" \
|
|
4980
|
+
"new_limit=$adjusted"
|
|
4981
|
+
fi
|
|
4982
|
+
fi
|
|
4983
|
+
fi
|
|
4984
|
+
|
|
4985
|
+
# ── Budget gate ──
|
|
4986
|
+
if [[ "$IGNORE_BUDGET" != "true" ]] && [[ -x "$SCRIPT_DIR/sw-cost.sh" ]]; then
|
|
4987
|
+
local budget_rc=0
|
|
4988
|
+
bash "$SCRIPT_DIR/sw-cost.sh" check-budget 2>/dev/null || budget_rc=$?
|
|
4989
|
+
if [[ "$budget_rc" -eq 2 ]]; then
|
|
4990
|
+
# Budget exhausted — cap at current cycle
|
|
4991
|
+
adjusted=0
|
|
4992
|
+
emit_event "intelligence.budget_cap" \
|
|
4993
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
4994
|
+
"context=$context"
|
|
4995
|
+
fi
|
|
4996
|
+
fi
|
|
4997
|
+
|
|
4998
|
+
# ── Enforce hard ceiling ──
|
|
4999
|
+
if [[ "$adjusted" -gt "$hard_ceiling" ]]; then
|
|
5000
|
+
adjusted="$hard_ceiling"
|
|
5001
|
+
fi
|
|
5002
|
+
|
|
5003
|
+
echo "$adjusted"
|
|
5004
|
+
}
|
|
5005
|
+
|
|
5006
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5007
|
+
# 5. Intelligent Audit Selection
|
|
5008
|
+
# AI-driven audit selection — all audits enabled, intensity varies.
|
|
5009
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5010
|
+
pipeline_select_audits() {
|
|
5011
|
+
local audit_intensity
|
|
5012
|
+
audit_intensity=$(jq -r --arg id "compound_quality" \
|
|
5013
|
+
'(.stages[] | select(.id == $id) | .config.audit_intensity) // "auto"' \
|
|
5014
|
+
"$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
5015
|
+
[[ -z "$audit_intensity" || "$audit_intensity" == "null" ]] && audit_intensity="auto"
|
|
5016
|
+
|
|
5017
|
+
# Short-circuit for explicit overrides
|
|
5018
|
+
case "$audit_intensity" in
|
|
5019
|
+
off)
|
|
5020
|
+
echo '{"adversarial":"off","architecture":"off","simulation":"off","security":"off","dod":"off"}'
|
|
5021
|
+
return 0
|
|
5022
|
+
;;
|
|
5023
|
+
full|lightweight)
|
|
5024
|
+
jq -n --arg i "$audit_intensity" \
|
|
5025
|
+
'{adversarial:$i,architecture:$i,simulation:$i,security:$i,dod:$i}'
|
|
5026
|
+
return 0
|
|
5027
|
+
;;
|
|
5028
|
+
esac
|
|
5029
|
+
|
|
5030
|
+
# ── Auto mode: data-driven intensity ──
|
|
5031
|
+
local default_intensity="targeted"
|
|
5032
|
+
local security_intensity="targeted"
|
|
5033
|
+
|
|
5034
|
+
# Read last 5 quality scores for this repo
|
|
5035
|
+
local quality_scores_file="${HOME}/.shipwright/optimization/quality-scores.jsonl"
|
|
5036
|
+
local repo_name
|
|
5037
|
+
repo_name=$(basename "${PROJECT_ROOT:-.}") || true
|
|
5038
|
+
if [[ -f "$quality_scores_file" ]]; then
|
|
5039
|
+
local recent_scores
|
|
5040
|
+
recent_scores=$(grep "\"repo\":\"${repo_name}\"" "$quality_scores_file" 2>/dev/null | tail -5) || true
|
|
5041
|
+
if [[ -n "$recent_scores" ]]; then
|
|
5042
|
+
# Check for critical findings in recent history
|
|
5043
|
+
local has_critical
|
|
5044
|
+
has_critical=$(echo "$recent_scores" | jq -s '[.[].findings.critical // 0] | add' 2>/dev/null || echo "0")
|
|
5045
|
+
has_critical="${has_critical:-0}"
|
|
5046
|
+
if [[ "$has_critical" -gt 0 ]]; then
|
|
5047
|
+
security_intensity="full"
|
|
5048
|
+
fi
|
|
5049
|
+
|
|
5050
|
+
# Compute average quality score
|
|
5051
|
+
local avg_score
|
|
5052
|
+
avg_score=$(echo "$recent_scores" | jq -s 'if length > 0 then ([.[].quality_score] | add / length | floor) else 70 end' 2>/dev/null || echo "70")
|
|
5053
|
+
avg_score="${avg_score:-70}"
|
|
5054
|
+
|
|
5055
|
+
if [[ "$avg_score" -lt 60 ]]; then
|
|
5056
|
+
default_intensity="full"
|
|
5057
|
+
security_intensity="full"
|
|
5058
|
+
elif [[ "$avg_score" -gt 80 ]]; then
|
|
5059
|
+
default_intensity="lightweight"
|
|
5060
|
+
[[ "$security_intensity" != "full" ]] && security_intensity="lightweight"
|
|
5061
|
+
fi
|
|
5062
|
+
fi
|
|
5063
|
+
fi
|
|
5064
|
+
|
|
5065
|
+
# Intelligence cache: upgrade targeted→full for complex changes
|
|
5066
|
+
local intel_cache="${PROJECT_ROOT}/.claude/intelligence-cache.json"
|
|
5067
|
+
if [[ -f "$intel_cache" && "$default_intensity" == "targeted" ]]; then
|
|
5068
|
+
local complexity
|
|
5069
|
+
complexity=$(jq -r '.complexity // "medium"' "$intel_cache" 2>/dev/null || echo "medium")
|
|
5070
|
+
if [[ "$complexity" == "high" || "$complexity" == "very_high" ]]; then
|
|
5071
|
+
default_intensity="full"
|
|
5072
|
+
security_intensity="full"
|
|
5073
|
+
fi
|
|
5074
|
+
fi
|
|
5075
|
+
|
|
5076
|
+
emit_event "pipeline.audit_selection" \
|
|
5077
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
5078
|
+
"default_intensity=$default_intensity" \
|
|
5079
|
+
"security_intensity=$security_intensity" \
|
|
5080
|
+
"repo=$repo_name"
|
|
5081
|
+
|
|
5082
|
+
jq -n \
|
|
5083
|
+
--arg adv "$default_intensity" \
|
|
5084
|
+
--arg arch "$default_intensity" \
|
|
5085
|
+
--arg sim "$default_intensity" \
|
|
5086
|
+
--arg sec "$security_intensity" \
|
|
5087
|
+
--arg dod "$default_intensity" \
|
|
5088
|
+
'{adversarial:$adv,architecture:$arch,simulation:$sim,security:$sec,dod:$dod}'
|
|
5089
|
+
}
|
|
5090
|
+
|
|
5091
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5092
|
+
# 6. Definition of Done Verification
|
|
5093
|
+
# Strict DoD enforcement after compound quality completes.
|
|
5094
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5095
|
+
pipeline_verify_dod() {
|
|
5096
|
+
local artifacts_dir="${1:-$ARTIFACTS_DIR}"
|
|
5097
|
+
local checks_total=0 checks_passed=0
|
|
5098
|
+
local results=""
|
|
5099
|
+
|
|
5100
|
+
# 1. Test coverage: verify changed source files have test counterparts
|
|
5101
|
+
local changed_files
|
|
5102
|
+
changed_files=$(git diff --name-only "${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
|
|
5103
|
+
local missing_tests=""
|
|
5104
|
+
local files_checked=0
|
|
5105
|
+
|
|
5106
|
+
if [[ -n "$changed_files" ]]; then
|
|
5107
|
+
while IFS= read -r src_file; do
|
|
5108
|
+
[[ -z "$src_file" ]] && continue
|
|
5109
|
+
# Only check source code files
|
|
5110
|
+
case "$src_file" in
|
|
5111
|
+
*.ts|*.js|*.tsx|*.jsx|*.py|*.go|*.rs|*.sh)
|
|
5112
|
+
# Skip test files themselves and config files
|
|
5113
|
+
case "$src_file" in
|
|
5114
|
+
*test*|*spec*|*__tests__*|*.config.*|*.d.ts) continue ;;
|
|
5115
|
+
esac
|
|
5116
|
+
files_checked=$((files_checked + 1))
|
|
5117
|
+
checks_total=$((checks_total + 1))
|
|
5118
|
+
# Check for corresponding test file
|
|
5119
|
+
local base_name dir_name ext
|
|
5120
|
+
base_name=$(basename "$src_file")
|
|
5121
|
+
dir_name=$(dirname "$src_file")
|
|
5122
|
+
ext="${base_name##*.}"
|
|
5123
|
+
local stem="${base_name%.*}"
|
|
5124
|
+
local test_found=false
|
|
5125
|
+
# Common test file patterns
|
|
5126
|
+
for pattern in \
|
|
5127
|
+
"${dir_name}/${stem}.test.${ext}" \
|
|
5128
|
+
"${dir_name}/${stem}.spec.${ext}" \
|
|
5129
|
+
"${dir_name}/__tests__/${stem}.test.${ext}" \
|
|
5130
|
+
"${dir_name}/${stem}-test.${ext}" \
|
|
5131
|
+
"${dir_name}/test_${stem}.${ext}" \
|
|
5132
|
+
"${dir_name}/${stem}_test.${ext}"; do
|
|
5133
|
+
if [[ -f "$pattern" ]]; then
|
|
5134
|
+
test_found=true
|
|
5135
|
+
break
|
|
5136
|
+
fi
|
|
5137
|
+
done
|
|
5138
|
+
if $test_found; then
|
|
5139
|
+
checks_passed=$((checks_passed + 1))
|
|
5140
|
+
else
|
|
5141
|
+
missing_tests="${missing_tests}${src_file}\n"
|
|
5142
|
+
fi
|
|
5143
|
+
;;
|
|
5144
|
+
esac
|
|
5145
|
+
done <<EOF
|
|
5146
|
+
$changed_files
|
|
5147
|
+
EOF
|
|
5148
|
+
fi
|
|
5149
|
+
|
|
5150
|
+
# 2. Test-added verification: if significant logic added, ensure tests were also added
|
|
5151
|
+
local logic_lines=0 test_lines=0
|
|
5152
|
+
if [[ -n "$changed_files" ]]; then
|
|
5153
|
+
local full_diff
|
|
5154
|
+
full_diff=$(git diff "${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
|
|
5155
|
+
if [[ -n "$full_diff" ]]; then
|
|
5156
|
+
# Count added lines matching source patterns (rough heuristic)
|
|
5157
|
+
logic_lines=$(echo "$full_diff" | grep -cE '^\+.*(function |class |if |for |while |return |export )' 2>/dev/null || true)
|
|
5158
|
+
logic_lines="${logic_lines:-0}"
|
|
5159
|
+
# Count added lines in test files
|
|
5160
|
+
test_lines=$(echo "$full_diff" | grep -cE '^\+.*(it\(|test\(|describe\(|expect\(|assert|def test_|func Test)' 2>/dev/null || true)
|
|
5161
|
+
test_lines="${test_lines:-0}"
|
|
5162
|
+
fi
|
|
5163
|
+
fi
|
|
5164
|
+
checks_total=$((checks_total + 1))
|
|
5165
|
+
local test_ratio_passed=true
|
|
5166
|
+
if [[ "$logic_lines" -gt 20 && "$test_lines" -eq 0 ]]; then
|
|
5167
|
+
test_ratio_passed=false
|
|
5168
|
+
warn "DoD verification: ${logic_lines} logic lines added but no test lines detected"
|
|
5169
|
+
else
|
|
5170
|
+
checks_passed=$((checks_passed + 1))
|
|
5171
|
+
fi
|
|
5172
|
+
|
|
5173
|
+
# 3. Behavioral verification: check DoD audit artifacts for evidence
|
|
5174
|
+
local dod_audit_file="$artifacts_dir/dod-audit.md"
|
|
5175
|
+
local dod_verified=0 dod_total_items=0
|
|
5176
|
+
if [[ -f "$dod_audit_file" ]]; then
|
|
5177
|
+
# Count items marked as passing
|
|
5178
|
+
dod_total_items=$(grep -cE '^\s*-\s*\[x\]' "$dod_audit_file" 2>/dev/null || true)
|
|
5179
|
+
dod_total_items="${dod_total_items:-0}"
|
|
5180
|
+
local dod_failing
|
|
5181
|
+
dod_failing=$(grep -cE '^\s*-\s*\[\s\]' "$dod_audit_file" 2>/dev/null || true)
|
|
5182
|
+
dod_failing="${dod_failing:-0}"
|
|
5183
|
+
dod_verified=$dod_total_items
|
|
5184
|
+
checks_total=$((checks_total + dod_total_items + ${dod_failing:-0}))
|
|
5185
|
+
checks_passed=$((checks_passed + dod_total_items))
|
|
5186
|
+
fi
|
|
5187
|
+
|
|
5188
|
+
# Compute pass rate
|
|
5189
|
+
local pass_rate=100
|
|
5190
|
+
if [[ "$checks_total" -gt 0 ]]; then
|
|
5191
|
+
pass_rate=$(( (checks_passed * 100) / checks_total ))
|
|
5192
|
+
fi
|
|
5193
|
+
|
|
5194
|
+
# Write results
|
|
5195
|
+
local tmp_result
|
|
5196
|
+
tmp_result=$(mktemp)
|
|
5197
|
+
jq -n \
|
|
5198
|
+
--argjson checks_total "$checks_total" \
|
|
5199
|
+
--argjson checks_passed "$checks_passed" \
|
|
5200
|
+
--argjson pass_rate "$pass_rate" \
|
|
5201
|
+
--argjson files_checked "$files_checked" \
|
|
5202
|
+
--arg missing_tests "$(echo -e "$missing_tests" | head -20)" \
|
|
5203
|
+
--argjson logic_lines "$logic_lines" \
|
|
5204
|
+
--argjson test_lines "$test_lines" \
|
|
5205
|
+
--argjson test_ratio_passed "$test_ratio_passed" \
|
|
5206
|
+
--argjson dod_verified "$dod_verified" \
|
|
5207
|
+
'{
|
|
5208
|
+
checks_total: $checks_total,
|
|
5209
|
+
checks_passed: $checks_passed,
|
|
5210
|
+
pass_rate: $pass_rate,
|
|
5211
|
+
files_checked: $files_checked,
|
|
5212
|
+
missing_tests: ($missing_tests | split("\n") | map(select(. != ""))),
|
|
5213
|
+
logic_lines: $logic_lines,
|
|
5214
|
+
test_lines: $test_lines,
|
|
5215
|
+
test_ratio_passed: $test_ratio_passed,
|
|
5216
|
+
dod_verified: $dod_verified
|
|
5217
|
+
}' > "$tmp_result" 2>/dev/null
|
|
5218
|
+
mv "$tmp_result" "$artifacts_dir/dod-verification.json"
|
|
5219
|
+
|
|
5220
|
+
emit_event "pipeline.dod_verification" \
|
|
5221
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
5222
|
+
"checks_total=$checks_total" \
|
|
5223
|
+
"checks_passed=$checks_passed" \
|
|
5224
|
+
"pass_rate=$pass_rate"
|
|
5225
|
+
|
|
5226
|
+
# Fail if pass rate < 70%
|
|
5227
|
+
if [[ "$pass_rate" -lt 70 ]]; then
|
|
5228
|
+
warn "DoD verification: ${pass_rate}% pass rate (${checks_passed}/${checks_total} checks)"
|
|
5229
|
+
return 1
|
|
5230
|
+
fi
|
|
5231
|
+
|
|
5232
|
+
success "DoD verification: ${pass_rate}% pass rate (${checks_passed}/${checks_total} checks)"
|
|
5233
|
+
return 0
|
|
5234
|
+
}
|
|
5235
|
+
|
|
5236
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5237
|
+
# 7. Source Code Security Scan
|
|
5238
|
+
# Grep-based vulnerability pattern matching on changed files.
|
|
5239
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5240
|
+
pipeline_security_source_scan() {
|
|
5241
|
+
local base_branch="${1:-${BASE_BRANCH:-main}}"
|
|
5242
|
+
local findings="[]"
|
|
5243
|
+
local finding_count=0
|
|
5244
|
+
|
|
5245
|
+
local changed_files
|
|
5246
|
+
changed_files=$(git diff --name-only "${base_branch}...HEAD" 2>/dev/null || true)
|
|
5247
|
+
[[ -z "$changed_files" ]] && { echo "[]"; return 0; }
|
|
5248
|
+
|
|
5249
|
+
local tmp_findings
|
|
5250
|
+
tmp_findings=$(mktemp)
|
|
5251
|
+
echo "[]" > "$tmp_findings"
|
|
5252
|
+
|
|
5253
|
+
while IFS= read -r file; do
|
|
5254
|
+
[[ -z "$file" || ! -f "$file" ]] && continue
|
|
5255
|
+
# Only scan code files
|
|
5256
|
+
case "$file" in
|
|
5257
|
+
*.ts|*.js|*.tsx|*.jsx|*.py|*.go|*.rs|*.java|*.rb|*.php|*.sh) ;;
|
|
5258
|
+
*) continue ;;
|
|
5259
|
+
esac
|
|
5260
|
+
|
|
5261
|
+
# SQL injection patterns
|
|
5262
|
+
local sql_matches
|
|
5263
|
+
sql_matches=$(grep -nE '(query|execute|sql)\s*\(?\s*[`"'"'"']\s*.*\$\{|\.query\s*\(\s*[`"'"'"'].*\+' "$file" 2>/dev/null || true)
|
|
5264
|
+
if [[ -n "$sql_matches" ]]; then
|
|
5265
|
+
while IFS= read -r match; do
|
|
5266
|
+
[[ -z "$match" ]] && continue
|
|
5267
|
+
local line_num="${match%%:*}"
|
|
5268
|
+
finding_count=$((finding_count + 1))
|
|
5269
|
+
local current
|
|
5270
|
+
current=$(cat "$tmp_findings")
|
|
5271
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "sql_injection" \
|
|
5272
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential SQL injection via string concatenation"}]' \
|
|
5273
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
5274
|
+
done <<SQLEOF
|
|
5275
|
+
$sql_matches
|
|
5276
|
+
SQLEOF
|
|
5277
|
+
fi
|
|
5278
|
+
|
|
5279
|
+
# XSS patterns
|
|
5280
|
+
local xss_matches
|
|
5281
|
+
xss_matches=$(grep -nE 'innerHTML\s*=|document\.write\s*\(|dangerouslySetInnerHTML' "$file" 2>/dev/null || true)
|
|
5282
|
+
if [[ -n "$xss_matches" ]]; then
|
|
5283
|
+
while IFS= read -r match; do
|
|
5284
|
+
[[ -z "$match" ]] && continue
|
|
5285
|
+
local line_num="${match%%:*}"
|
|
5286
|
+
finding_count=$((finding_count + 1))
|
|
5287
|
+
local current
|
|
5288
|
+
current=$(cat "$tmp_findings")
|
|
5289
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "xss" \
|
|
5290
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential XSS via unsafe DOM manipulation"}]' \
|
|
5291
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
5292
|
+
done <<XSSEOF
|
|
5293
|
+
$xss_matches
|
|
5294
|
+
XSSEOF
|
|
5295
|
+
fi
|
|
5296
|
+
|
|
5297
|
+
# Command injection patterns
|
|
5298
|
+
local cmd_matches
|
|
5299
|
+
cmd_matches=$(grep -nE 'eval\s*\(|child_process|os\.system\s*\(|subprocess\.(call|run|Popen)\s*\(' "$file" 2>/dev/null || true)
|
|
5300
|
+
if [[ -n "$cmd_matches" ]]; then
|
|
5301
|
+
while IFS= read -r match; do
|
|
5302
|
+
[[ -z "$match" ]] && continue
|
|
5303
|
+
local line_num="${match%%:*}"
|
|
5304
|
+
finding_count=$((finding_count + 1))
|
|
5305
|
+
local current
|
|
5306
|
+
current=$(cat "$tmp_findings")
|
|
5307
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "command_injection" \
|
|
5308
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential command injection via unsafe execution"}]' \
|
|
5309
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
5310
|
+
done <<CMDEOF
|
|
5311
|
+
$cmd_matches
|
|
5312
|
+
CMDEOF
|
|
5313
|
+
fi
|
|
5314
|
+
|
|
5315
|
+
# Hardcoded secrets patterns
|
|
5316
|
+
local secret_matches
|
|
5317
|
+
secret_matches=$(grep -nEi '(password|api_key|secret|token)\s*=\s*['"'"'"][A-Za-z0-9+/=]{8,}['"'"'"]' "$file" 2>/dev/null || true)
|
|
5318
|
+
if [[ -n "$secret_matches" ]]; then
|
|
5319
|
+
while IFS= read -r match; do
|
|
5320
|
+
[[ -z "$match" ]] && continue
|
|
5321
|
+
local line_num="${match%%:*}"
|
|
5322
|
+
finding_count=$((finding_count + 1))
|
|
5323
|
+
local current
|
|
5324
|
+
current=$(cat "$tmp_findings")
|
|
5325
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "hardcoded_secret" \
|
|
5326
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"critical","description":"Potential hardcoded secret or credential"}]' \
|
|
5327
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
5328
|
+
done <<SECEOF
|
|
5329
|
+
$secret_matches
|
|
5330
|
+
SECEOF
|
|
5331
|
+
fi
|
|
5332
|
+
|
|
5333
|
+
# Insecure crypto patterns
|
|
5334
|
+
local crypto_matches
|
|
5335
|
+
crypto_matches=$(grep -nE '(md5|MD5|sha1|SHA1)\s*\(' "$file" 2>/dev/null || true)
|
|
5336
|
+
if [[ -n "$crypto_matches" ]]; then
|
|
5337
|
+
while IFS= read -r match; do
|
|
5338
|
+
[[ -z "$match" ]] && continue
|
|
5339
|
+
local line_num="${match%%:*}"
|
|
5340
|
+
finding_count=$((finding_count + 1))
|
|
5341
|
+
local current
|
|
5342
|
+
current=$(cat "$tmp_findings")
|
|
5343
|
+
echo "$current" | jq --arg f "$file" --arg l "$line_num" --arg p "insecure_crypto" \
|
|
5344
|
+
'. + [{"file":$f,"line":($l|tonumber),"pattern":$p,"severity":"major","description":"Weak cryptographic function (consider SHA-256+)"}]' \
|
|
5345
|
+
> "$tmp_findings" 2>/dev/null || true
|
|
5346
|
+
done <<CRYEOF
|
|
5347
|
+
$crypto_matches
|
|
5348
|
+
CRYEOF
|
|
5349
|
+
fi
|
|
5350
|
+
done <<FILESEOF
|
|
5351
|
+
$changed_files
|
|
5352
|
+
FILESEOF
|
|
5353
|
+
|
|
5354
|
+
# Write to artifacts and output
|
|
5355
|
+
findings=$(cat "$tmp_findings")
|
|
5356
|
+
rm -f "$tmp_findings"
|
|
5357
|
+
|
|
5358
|
+
if [[ -n "${ARTIFACTS_DIR:-}" ]]; then
|
|
5359
|
+
local tmp_scan
|
|
5360
|
+
tmp_scan=$(mktemp)
|
|
5361
|
+
echo "$findings" > "$tmp_scan"
|
|
5362
|
+
mv "$tmp_scan" "$ARTIFACTS_DIR/security-source-scan.json"
|
|
5363
|
+
fi
|
|
5364
|
+
|
|
5365
|
+
emit_event "pipeline.security_source_scan" \
|
|
5366
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
5367
|
+
"findings=$finding_count"
|
|
5368
|
+
|
|
5369
|
+
echo "$finding_count"
|
|
5370
|
+
}
|
|
5371
|
+
|
|
5372
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5373
|
+
# 8. Quality Score Recording
|
|
5374
|
+
# Writes quality scores to JSONL for learning.
|
|
5375
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5376
|
+
pipeline_record_quality_score() {
|
|
5377
|
+
local quality_score="${1:-0}"
|
|
5378
|
+
local critical="${2:-0}"
|
|
5379
|
+
local major="${3:-0}"
|
|
5380
|
+
local minor="${4:-0}"
|
|
5381
|
+
local dod_pass_rate="${5:-0}"
|
|
5382
|
+
local audits_run="${6:-}"
|
|
5383
|
+
|
|
5384
|
+
local scores_dir="${HOME}/.shipwright/optimization"
|
|
5385
|
+
local scores_file="${scores_dir}/quality-scores.jsonl"
|
|
5386
|
+
mkdir -p "$scores_dir"
|
|
5387
|
+
|
|
5388
|
+
local repo_name
|
|
5389
|
+
repo_name=$(basename "${PROJECT_ROOT:-.}") || true
|
|
5390
|
+
|
|
5391
|
+
local tmp_score
|
|
5392
|
+
tmp_score=$(mktemp)
|
|
5393
|
+
jq -n \
|
|
5394
|
+
--arg repo "$repo_name" \
|
|
5395
|
+
--arg issue "${ISSUE_NUMBER:-0}" \
|
|
5396
|
+
--arg ts "$(now_iso)" \
|
|
5397
|
+
--argjson score "$quality_score" \
|
|
5398
|
+
--argjson critical "$critical" \
|
|
5399
|
+
--argjson major "$major" \
|
|
5400
|
+
--argjson minor "$minor" \
|
|
5401
|
+
--argjson dod "$dod_pass_rate" \
|
|
5402
|
+
--arg template "${PIPELINE_NAME:-standard}" \
|
|
5403
|
+
--arg audits "$audits_run" \
|
|
5404
|
+
'{
|
|
5405
|
+
repo: $repo,
|
|
5406
|
+
issue: ($issue | tonumber),
|
|
5407
|
+
timestamp: $ts,
|
|
5408
|
+
quality_score: $score,
|
|
5409
|
+
findings: {critical: $critical, major: $major, minor: $minor},
|
|
5410
|
+
dod_pass_rate: $dod,
|
|
5411
|
+
template: $template,
|
|
5412
|
+
audits_run: ($audits | split(",") | map(select(. != "")))
|
|
5413
|
+
}' > "$tmp_score" 2>/dev/null
|
|
5414
|
+
|
|
5415
|
+
cat "$tmp_score" >> "$scores_file"
|
|
5416
|
+
rm -f "$tmp_score"
|
|
5417
|
+
|
|
5418
|
+
emit_event "pipeline.quality_score_recorded" \
|
|
5419
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
5420
|
+
"quality_score=$quality_score" \
|
|
5421
|
+
"critical=$critical" \
|
|
5422
|
+
"major=$major" \
|
|
5423
|
+
"minor=$minor"
|
|
5424
|
+
}
|
|
5425
|
+
|
|
5426
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5427
|
+
# 4. Mid-Pipeline Complexity Re-evaluation
|
|
5428
|
+
# After build+test completes, compares actual effort to initial estimate.
|
|
5429
|
+
# Updates skip recommendations and model routing for remaining stages.
|
|
5430
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5431
|
+
pipeline_reassess_complexity() {
|
|
5432
|
+
local initial_complexity="${INTELLIGENCE_COMPLEXITY:-5}"
|
|
5433
|
+
local reassessment_file="$ARTIFACTS_DIR/reassessment.json"
|
|
5434
|
+
|
|
5435
|
+
# ── Gather actual metrics ──
|
|
5436
|
+
local files_changed=0 lines_changed=0 first_try_pass=false self_heal_cycles=0
|
|
5437
|
+
|
|
5438
|
+
files_changed=$(git diff "${BASE_BRANCH:-main}...HEAD" --name-only 2>/dev/null | wc -l | tr -d ' ') || files_changed=0
|
|
5439
|
+
files_changed="${files_changed:-0}"
|
|
5440
|
+
|
|
5441
|
+
# Count lines changed (insertions + deletions) without pipefail issues
|
|
5442
|
+
lines_changed=0
|
|
5443
|
+
local _diff_stat
|
|
5444
|
+
_diff_stat=$(git diff "${BASE_BRANCH:-main}...HEAD" --stat 2>/dev/null | tail -1) || true
|
|
5445
|
+
if [[ -n "${_diff_stat:-}" ]]; then
|
|
5446
|
+
local _ins _del
|
|
5447
|
+
_ins=$(echo "$_diff_stat" | grep -oE '[0-9]+ insertion' | grep -oE '[0-9]+') || true
|
|
5448
|
+
_del=$(echo "$_diff_stat" | grep -oE '[0-9]+ deletion' | grep -oE '[0-9]+') || true
|
|
5449
|
+
lines_changed=$(( ${_ins:-0} + ${_del:-0} ))
|
|
5450
|
+
fi
|
|
5451
|
+
|
|
5452
|
+
self_heal_cycles="${SELF_HEAL_COUNT:-0}"
|
|
5453
|
+
if [[ "$self_heal_cycles" -eq 0 ]]; then
|
|
5454
|
+
first_try_pass=true
|
|
5455
|
+
fi
|
|
5456
|
+
|
|
5457
|
+
# ── Compare to expectations ──
|
|
5458
|
+
local actual_complexity="$initial_complexity"
|
|
5459
|
+
local assessment="as_expected"
|
|
5460
|
+
local skip_stages="[]"
|
|
5461
|
+
|
|
5462
|
+
# Simpler than expected: small diff, tests passed first try
|
|
5463
|
+
if [[ "$lines_changed" -lt 50 && "$first_try_pass" == "true" && "$files_changed" -lt 5 ]]; then
|
|
5464
|
+
actual_complexity=$((initial_complexity > 2 ? initial_complexity - 2 : 1))
|
|
5465
|
+
assessment="simpler_than_expected"
|
|
5466
|
+
# Mark compound_quality as skippable, simplify review
|
|
5467
|
+
skip_stages='["compound_quality"]'
|
|
5468
|
+
# Much simpler
|
|
5469
|
+
elif [[ "$lines_changed" -lt 20 && "$first_try_pass" == "true" && "$files_changed" -lt 3 ]]; then
|
|
5470
|
+
actual_complexity=1
|
|
5471
|
+
assessment="much_simpler"
|
|
5472
|
+
skip_stages='["compound_quality","review"]'
|
|
5473
|
+
# Harder than expected: large diff, multiple self-heal cycles
|
|
5474
|
+
elif [[ "$lines_changed" -gt 500 || "$self_heal_cycles" -gt 2 ]]; then
|
|
5475
|
+
actual_complexity=$((initial_complexity < 9 ? initial_complexity + 2 : 10))
|
|
5476
|
+
assessment="harder_than_expected"
|
|
5477
|
+
# Ensure compound_quality runs, possibly upgrade model
|
|
5478
|
+
skip_stages='[]'
|
|
5479
|
+
# Much harder
|
|
5480
|
+
elif [[ "$lines_changed" -gt 1000 || "$self_heal_cycles" -gt 4 ]]; then
|
|
5481
|
+
actual_complexity=10
|
|
5482
|
+
assessment="much_harder"
|
|
5483
|
+
skip_stages='[]'
|
|
5484
|
+
fi
|
|
5485
|
+
|
|
5486
|
+
# ── Write reassessment ──
|
|
5487
|
+
local tmp_reassess
|
|
5488
|
+
tmp_reassess="$(mktemp)"
|
|
5489
|
+
jq -n \
|
|
5490
|
+
--argjson initial "$initial_complexity" \
|
|
5491
|
+
--argjson actual "$actual_complexity" \
|
|
5492
|
+
--arg assessment "$assessment" \
|
|
5493
|
+
--argjson files_changed "$files_changed" \
|
|
5494
|
+
--argjson lines_changed "$lines_changed" \
|
|
5495
|
+
--argjson self_heal_cycles "$self_heal_cycles" \
|
|
5496
|
+
--argjson first_try "$first_try_pass" \
|
|
5497
|
+
--argjson skip_stages "$skip_stages" \
|
|
5498
|
+
'{
|
|
5499
|
+
initial_complexity: $initial,
|
|
5500
|
+
actual_complexity: $actual,
|
|
5501
|
+
assessment: $assessment,
|
|
5502
|
+
files_changed: $files_changed,
|
|
5503
|
+
lines_changed: $lines_changed,
|
|
5504
|
+
self_heal_cycles: $self_heal_cycles,
|
|
5505
|
+
first_try_pass: $first_try,
|
|
5506
|
+
skip_stages: $skip_stages
|
|
5507
|
+
}' > "$tmp_reassess" 2>/dev/null && mv "$tmp_reassess" "$reassessment_file" || rm -f "$tmp_reassess"
|
|
5508
|
+
|
|
5509
|
+
# Update global complexity for downstream stages
|
|
5510
|
+
PIPELINE_ADAPTIVE_COMPLEXITY="$actual_complexity"
|
|
5511
|
+
|
|
5512
|
+
emit_event "intelligence.reassessment" \
|
|
5513
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
5514
|
+
"initial=$initial_complexity" \
|
|
5515
|
+
"actual=$actual_complexity" \
|
|
5516
|
+
"assessment=$assessment" \
|
|
5517
|
+
"files=$files_changed" \
|
|
5518
|
+
"lines=$lines_changed" \
|
|
5519
|
+
"self_heals=$self_heal_cycles"
|
|
5520
|
+
|
|
5521
|
+
# ── Store for learning ──
|
|
5522
|
+
local learning_file="${HOME}/.shipwright/optimization/complexity-actuals.jsonl"
|
|
5523
|
+
mkdir -p "${HOME}/.shipwright/optimization" 2>/dev/null || true
|
|
5524
|
+
echo "{\"issue\":\"${ISSUE_NUMBER:-0}\",\"initial\":$initial_complexity,\"actual\":$actual_complexity,\"files\":$files_changed,\"lines\":$lines_changed,\"ts\":\"$(now_iso)\"}" \
|
|
5525
|
+
>> "$learning_file" 2>/dev/null || true
|
|
5526
|
+
|
|
5527
|
+
echo "$assessment"
|
|
5528
|
+
}
|
|
5529
|
+
|
|
5530
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5531
|
+
# 5. Backtracking Support
|
|
5532
|
+
# When compound_quality detects architecture-level problems, backtracks to
|
|
5533
|
+
# the design stage instead of just feeding findings to the build loop.
|
|
5534
|
+
# Limited to 1 backtrack per pipeline run to prevent infinite loops.
|
|
5535
|
+
# ──────────────────────────────────────────────────────────────────────────────
|
|
5536
|
+
pipeline_backtrack_to_stage() {
|
|
5537
|
+
local target_stage="$1"
|
|
5538
|
+
local reason="${2:-architecture_violation}"
|
|
5539
|
+
|
|
5540
|
+
# Prevent infinite backtracking
|
|
5541
|
+
if [[ "$PIPELINE_BACKTRACK_COUNT" -ge "$PIPELINE_MAX_BACKTRACKS" ]]; then
|
|
5542
|
+
warn "Max backtracks ($PIPELINE_MAX_BACKTRACKS) reached — cannot backtrack to $target_stage"
|
|
5543
|
+
emit_event "intelligence.backtrack_blocked" \
|
|
5544
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
5545
|
+
"target=$target_stage" \
|
|
5546
|
+
"reason=max_backtracks_reached" \
|
|
5547
|
+
"count=$PIPELINE_BACKTRACK_COUNT"
|
|
5548
|
+
return 1
|
|
5549
|
+
fi
|
|
5550
|
+
|
|
5551
|
+
PIPELINE_BACKTRACK_COUNT=$((PIPELINE_BACKTRACK_COUNT + 1))
|
|
5552
|
+
|
|
5553
|
+
info "Backtracking to ${BOLD}${target_stage}${RESET} stage (reason: ${reason})"
|
|
5554
|
+
|
|
5555
|
+
emit_event "intelligence.backtrack" \
|
|
5556
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
5557
|
+
"target=$target_stage" \
|
|
5558
|
+
"reason=$reason"
|
|
5559
|
+
|
|
5560
|
+
# Gather architecture context from findings
|
|
5561
|
+
local arch_context=""
|
|
5562
|
+
if [[ -f "$ARTIFACTS_DIR/compound-architecture-validation.json" ]]; then
|
|
5563
|
+
arch_context=$(jq -r '[.[] | select(.severity == "critical" or .severity == "high") | .message // .description // ""] | join("\n")' \
|
|
5564
|
+
"$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || true)
|
|
5565
|
+
fi
|
|
5566
|
+
if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
|
|
5567
|
+
local arch_lines
|
|
5568
|
+
arch_lines=$(grep -iE 'architect|layer.*violation|circular.*depend|coupling|design.*flaw' \
|
|
5569
|
+
"$ARTIFACTS_DIR/adversarial-review.md" 2>/dev/null || true)
|
|
5570
|
+
if [[ -n "$arch_lines" ]]; then
|
|
5571
|
+
arch_context="${arch_context}
|
|
5572
|
+
${arch_lines}"
|
|
5573
|
+
fi
|
|
5574
|
+
fi
|
|
5575
|
+
|
|
5576
|
+
# Reset stages from target onward
|
|
5577
|
+
set_stage_status "$target_stage" "pending"
|
|
5578
|
+
set_stage_status "build" "pending"
|
|
5579
|
+
set_stage_status "test" "pending"
|
|
5580
|
+
|
|
5581
|
+
# Augment goal with architecture context for re-run
|
|
5582
|
+
local original_goal="$GOAL"
|
|
5583
|
+
if [[ -n "$arch_context" ]]; then
|
|
5584
|
+
GOAL="$GOAL
|
|
5585
|
+
|
|
5586
|
+
IMPORTANT — Architecture violations were detected during quality review. Redesign to fix:
|
|
5587
|
+
$arch_context
|
|
5588
|
+
|
|
5589
|
+
Update the design to address these violations, then rebuild."
|
|
5590
|
+
fi
|
|
5591
|
+
|
|
5592
|
+
# Re-run design stage
|
|
5593
|
+
info "Re-running ${BOLD}${target_stage}${RESET} with architecture context..."
|
|
5594
|
+
if "stage_${target_stage}" 2>/dev/null; then
|
|
5595
|
+
mark_stage_complete "$target_stage"
|
|
5596
|
+
success "Backtrack: ${target_stage} re-run complete"
|
|
5597
|
+
else
|
|
5598
|
+
GOAL="$original_goal"
|
|
5599
|
+
error "Backtrack: ${target_stage} re-run failed"
|
|
5600
|
+
return 1
|
|
5601
|
+
fi
|
|
5602
|
+
|
|
5603
|
+
# Re-run build+test
|
|
5604
|
+
info "Re-running build→test after backtracked ${target_stage}..."
|
|
5605
|
+
if self_healing_build_test; then
|
|
5606
|
+
success "Backtrack: build→test passed after ${target_stage} redesign"
|
|
5607
|
+
GOAL="$original_goal"
|
|
5608
|
+
return 0
|
|
5609
|
+
else
|
|
5610
|
+
GOAL="$original_goal"
|
|
5611
|
+
error "Backtrack: build→test failed after ${target_stage} redesign"
|
|
5612
|
+
return 1
|
|
5613
|
+
fi
|
|
5614
|
+
}
|
|
5615
|
+
|
|
4452
5616
|
compound_rebuild_with_feedback() {
|
|
4453
5617
|
local feedback_file="$ARTIFACTS_DIR/quality-feedback.md"
|
|
4454
5618
|
|
|
4455
|
-
#
|
|
5619
|
+
# ── Intelligence: classify findings and determine routing ──
|
|
5620
|
+
local route="correctness"
|
|
5621
|
+
route=$(classify_quality_findings 2>/dev/null) || route="correctness"
|
|
5622
|
+
|
|
5623
|
+
# ── Build structured findings JSON alongside markdown ──
|
|
5624
|
+
local structured_findings="[]"
|
|
5625
|
+
local s_total_critical=0 s_total_major=0 s_total_minor=0
|
|
5626
|
+
|
|
5627
|
+
if [[ -f "$ARTIFACTS_DIR/classified-findings.json" ]]; then
|
|
5628
|
+
s_total_critical=$(jq -r '.security // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
|
|
5629
|
+
s_total_major=$(jq -r '.correctness // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
|
|
5630
|
+
s_total_minor=$(jq -r '.style // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
|
|
5631
|
+
fi
|
|
5632
|
+
|
|
5633
|
+
local tmp_qf
|
|
5634
|
+
tmp_qf="$(mktemp)"
|
|
5635
|
+
jq -n \
|
|
5636
|
+
--arg route "$route" \
|
|
5637
|
+
--argjson total_critical "$s_total_critical" \
|
|
5638
|
+
--argjson total_major "$s_total_major" \
|
|
5639
|
+
--argjson total_minor "$s_total_minor" \
|
|
5640
|
+
'{route: $route, total_critical: $total_critical, total_major: $total_major, total_minor: $total_minor}' \
|
|
5641
|
+
> "$tmp_qf" 2>/dev/null && mv "$tmp_qf" "$ARTIFACTS_DIR/quality-findings.json" || rm -f "$tmp_qf"
|
|
5642
|
+
|
|
5643
|
+
# ── Architecture route: backtrack to design instead of rebuild ──
|
|
5644
|
+
if [[ "$route" == "architecture" ]]; then
|
|
5645
|
+
info "Architecture-level findings detected — attempting backtrack to design"
|
|
5646
|
+
if pipeline_backtrack_to_stage "design" "architecture_violation" 2>/dev/null; then
|
|
5647
|
+
return 0
|
|
5648
|
+
fi
|
|
5649
|
+
# Backtrack failed or already used — fall through to standard rebuild
|
|
5650
|
+
warn "Backtrack unavailable — falling through to standard rebuild"
|
|
5651
|
+
fi
|
|
5652
|
+
|
|
5653
|
+
# Collect all findings (prioritized by classification)
|
|
4456
5654
|
{
|
|
4457
5655
|
echo "# Quality Feedback — Issues to Fix"
|
|
4458
5656
|
echo ""
|
|
5657
|
+
|
|
5658
|
+
# Security findings first (highest priority)
|
|
5659
|
+
if [[ "$route" == "security" || -f "$ARTIFACTS_DIR/security-audit.log" ]] && grep -qiE 'critical|high' "$ARTIFACTS_DIR/security-audit.log" 2>/dev/null; then
|
|
5660
|
+
echo "## 🔴 PRIORITY: Security Findings (fix these first)"
|
|
5661
|
+
cat "$ARTIFACTS_DIR/security-audit.log"
|
|
5662
|
+
echo ""
|
|
5663
|
+
echo "Security issues MUST be resolved before any other changes."
|
|
5664
|
+
echo ""
|
|
5665
|
+
fi
|
|
5666
|
+
|
|
5667
|
+
# Correctness findings
|
|
4459
5668
|
if [[ -f "$ARTIFACTS_DIR/adversarial-review.md" ]]; then
|
|
4460
5669
|
echo "## Adversarial Review Findings"
|
|
4461
5670
|
cat "$ARTIFACTS_DIR/adversarial-review.md"
|
|
@@ -4471,16 +5680,22 @@ compound_rebuild_with_feedback() {
|
|
|
4471
5680
|
grep "❌" "$ARTIFACTS_DIR/dod-audit.md" 2>/dev/null || true
|
|
4472
5681
|
echo ""
|
|
4473
5682
|
fi
|
|
4474
|
-
if [[ -f "$ARTIFACTS_DIR/security-audit.log" ]] && grep -qiE 'critical|high' "$ARTIFACTS_DIR/security-audit.log" 2>/dev/null; then
|
|
4475
|
-
echo "## Security Audit Findings"
|
|
4476
|
-
cat "$ARTIFACTS_DIR/security-audit.log"
|
|
4477
|
-
echo ""
|
|
4478
|
-
fi
|
|
4479
5683
|
if [[ -f "$ARTIFACTS_DIR/api-compat.log" ]] && grep -qi 'BREAKING' "$ARTIFACTS_DIR/api-compat.log" 2>/dev/null; then
|
|
4480
5684
|
echo "## API Breaking Changes"
|
|
4481
5685
|
cat "$ARTIFACTS_DIR/api-compat.log"
|
|
4482
5686
|
echo ""
|
|
4483
5687
|
fi
|
|
5688
|
+
|
|
5689
|
+
# Style findings last (deprioritized, informational)
|
|
5690
|
+
if [[ -f "$ARTIFACTS_DIR/classified-findings.json" ]]; then
|
|
5691
|
+
local style_count
|
|
5692
|
+
style_count=$(jq -r '.style // 0' "$ARTIFACTS_DIR/classified-findings.json" 2>/dev/null || echo "0")
|
|
5693
|
+
if [[ "$style_count" -gt 0 ]]; then
|
|
5694
|
+
echo "## Style Notes (non-blocking, address if time permits)"
|
|
5695
|
+
echo "${style_count} style suggestions found. These do not block the build."
|
|
5696
|
+
echo ""
|
|
5697
|
+
fi
|
|
5698
|
+
fi
|
|
4484
5699
|
} > "$feedback_file"
|
|
4485
5700
|
|
|
4486
5701
|
# Validate feedback file has actual content
|
|
@@ -4494,19 +5709,42 @@ compound_rebuild_with_feedback() {
|
|
|
4494
5709
|
set_stage_status "test" "pending"
|
|
4495
5710
|
set_stage_status "review" "pending"
|
|
4496
5711
|
|
|
4497
|
-
# Augment GOAL with quality feedback
|
|
5712
|
+
# Augment GOAL with quality feedback (route-specific instructions)
|
|
4498
5713
|
local original_goal="$GOAL"
|
|
4499
5714
|
local feedback_content
|
|
4500
5715
|
feedback_content=$(cat "$feedback_file")
|
|
5716
|
+
|
|
5717
|
+
local route_instruction=""
|
|
5718
|
+
case "$route" in
|
|
5719
|
+
security)
|
|
5720
|
+
route_instruction="SECURITY PRIORITY: Fix all security vulnerabilities FIRST, then address other issues. Security issues are BLOCKING."
|
|
5721
|
+
;;
|
|
5722
|
+
performance)
|
|
5723
|
+
route_instruction="PERFORMANCE PRIORITY: Address performance regressions and optimizations. Check for N+1 queries, memory leaks, and algorithmic complexity."
|
|
5724
|
+
;;
|
|
5725
|
+
testing)
|
|
5726
|
+
route_instruction="TESTING PRIORITY: Add missing test coverage and fix flaky tests before addressing other issues."
|
|
5727
|
+
;;
|
|
5728
|
+
correctness)
|
|
5729
|
+
route_instruction="Fix every issue listed above while keeping all existing functionality working."
|
|
5730
|
+
;;
|
|
5731
|
+
architecture)
|
|
5732
|
+
route_instruction="ARCHITECTURE: Fix structural issues. Check dependency direction, layer boundaries, and separation of concerns."
|
|
5733
|
+
;;
|
|
5734
|
+
*)
|
|
5735
|
+
route_instruction="Fix every issue listed above while keeping all existing functionality working."
|
|
5736
|
+
;;
|
|
5737
|
+
esac
|
|
5738
|
+
|
|
4501
5739
|
GOAL="$GOAL
|
|
4502
5740
|
|
|
4503
|
-
IMPORTANT — Compound quality review found issues. Fix ALL of these:
|
|
5741
|
+
IMPORTANT — Compound quality review found issues (route: ${route}). Fix ALL of these:
|
|
4504
5742
|
$feedback_content
|
|
4505
5743
|
|
|
4506
|
-
|
|
5744
|
+
${route_instruction}"
|
|
4507
5745
|
|
|
4508
5746
|
# Re-run self-healing build→test
|
|
4509
|
-
info "Rebuilding with quality feedback..."
|
|
5747
|
+
info "Rebuilding with quality feedback (route: ${route})..."
|
|
4510
5748
|
if self_healing_build_test; then
|
|
4511
5749
|
GOAL="$original_goal"
|
|
4512
5750
|
return 0
|
|
@@ -4530,6 +5768,51 @@ stage_compound_quality() {
|
|
|
4530
5768
|
strict_quality=$(jq -r --arg id "compound_quality" '(.stages[] | select(.id == $id) | .config.strict_quality) // false' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
4531
5769
|
[[ -z "$strict_quality" || "$strict_quality" == "null" ]] && strict_quality="false"
|
|
4532
5770
|
|
|
5771
|
+
# Intelligent audit selection
|
|
5772
|
+
local audit_plan='{"adversarial":"targeted","architecture":"targeted","simulation":"targeted","security":"targeted","dod":"targeted"}'
|
|
5773
|
+
if type pipeline_select_audits &>/dev/null 2>&1; then
|
|
5774
|
+
local _selected
|
|
5775
|
+
_selected=$(pipeline_select_audits 2>/dev/null) || true
|
|
5776
|
+
if [[ -n "$_selected" && "$_selected" != "null" ]]; then
|
|
5777
|
+
audit_plan="$_selected"
|
|
5778
|
+
info "Audit plan: $(echo "$audit_plan" | jq -c '.' 2>/dev/null || echo "$audit_plan")"
|
|
5779
|
+
fi
|
|
5780
|
+
fi
|
|
5781
|
+
|
|
5782
|
+
# Track findings for quality score
|
|
5783
|
+
local total_critical=0 total_major=0 total_minor=0
|
|
5784
|
+
local audits_run_list=""
|
|
5785
|
+
|
|
5786
|
+
# Vitals-driven adaptive cycle limit (preferred)
|
|
5787
|
+
local base_max_cycles="$max_cycles"
|
|
5788
|
+
if type pipeline_adaptive_limit &>/dev/null 2>&1; then
|
|
5789
|
+
local _cq_vitals=""
|
|
5790
|
+
if type pipeline_compute_vitals &>/dev/null 2>&1; then
|
|
5791
|
+
_cq_vitals=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
|
|
5792
|
+
fi
|
|
5793
|
+
local vitals_cq_limit
|
|
5794
|
+
vitals_cq_limit=$(pipeline_adaptive_limit "compound_quality" "$_cq_vitals" 2>/dev/null) || true
|
|
5795
|
+
if [[ -n "$vitals_cq_limit" && "$vitals_cq_limit" =~ ^[0-9]+$ && "$vitals_cq_limit" -gt 0 ]]; then
|
|
5796
|
+
max_cycles="$vitals_cq_limit"
|
|
5797
|
+
if [[ "$max_cycles" != "$base_max_cycles" ]]; then
|
|
5798
|
+
info "Vitals-driven cycles: ${base_max_cycles} → ${max_cycles} (compound_quality)"
|
|
5799
|
+
fi
|
|
5800
|
+
fi
|
|
5801
|
+
else
|
|
5802
|
+
# Fallback: adaptive cycle limits from optimization data
|
|
5803
|
+
local _cq_iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
|
|
5804
|
+
if [[ -f "$_cq_iter_model" ]]; then
|
|
5805
|
+
local adaptive_limit
|
|
5806
|
+
adaptive_limit=$(pipeline_adaptive_cycles "$max_cycles" "compound_quality" "0" "-1" 2>/dev/null) || true
|
|
5807
|
+
if [[ -n "$adaptive_limit" && "$adaptive_limit" =~ ^[0-9]+$ && "$adaptive_limit" -gt 0 ]]; then
|
|
5808
|
+
max_cycles="$adaptive_limit"
|
|
5809
|
+
if [[ "$max_cycles" != "$base_max_cycles" ]]; then
|
|
5810
|
+
info "Adaptive cycles: ${base_max_cycles} → ${max_cycles} (compound_quality)"
|
|
5811
|
+
fi
|
|
5812
|
+
fi
|
|
5813
|
+
fi
|
|
5814
|
+
fi
|
|
5815
|
+
|
|
4533
5816
|
# Convergence tracking
|
|
4534
5817
|
local prev_issue_count=-1
|
|
4535
5818
|
|
|
@@ -4546,9 +5829,12 @@ stage_compound_quality() {
|
|
|
4546
5829
|
fi
|
|
4547
5830
|
|
|
4548
5831
|
# 1. Adversarial Review
|
|
4549
|
-
|
|
5832
|
+
local _adv_intensity
|
|
5833
|
+
_adv_intensity=$(echo "$audit_plan" | jq -r '.adversarial // "targeted"' 2>/dev/null || echo "targeted")
|
|
5834
|
+
if [[ "$adversarial_enabled" == "true" && "$_adv_intensity" != "off" ]]; then
|
|
4550
5835
|
echo ""
|
|
4551
|
-
info "Running adversarial review..."
|
|
5836
|
+
info "Running adversarial review (${_adv_intensity})..."
|
|
5837
|
+
audits_run_list="${audits_run_list:+${audits_run_list},}adversarial"
|
|
4552
5838
|
if ! run_adversarial_review; then
|
|
4553
5839
|
all_passed=false
|
|
4554
5840
|
fi
|
|
@@ -4653,14 +5939,36 @@ stage_compound_quality() {
|
|
|
4653
5939
|
fi
|
|
4654
5940
|
|
|
4655
5941
|
# 6. DoD Audit
|
|
4656
|
-
|
|
5942
|
+
local _dod_intensity
|
|
5943
|
+
_dod_intensity=$(echo "$audit_plan" | jq -r '.dod // "targeted"' 2>/dev/null || echo "targeted")
|
|
5944
|
+
if [[ "$dod_enabled" == "true" && "$_dod_intensity" != "off" ]]; then
|
|
4657
5945
|
echo ""
|
|
4658
|
-
info "Running Definition of Done audit..."
|
|
5946
|
+
info "Running Definition of Done audit (${_dod_intensity})..."
|
|
5947
|
+
audits_run_list="${audits_run_list:+${audits_run_list},}dod"
|
|
4659
5948
|
if ! run_dod_audit; then
|
|
4660
5949
|
all_passed=false
|
|
4661
5950
|
fi
|
|
4662
5951
|
fi
|
|
4663
5952
|
|
|
5953
|
+
# 6b. Security Source Scan
|
|
5954
|
+
local _sec_intensity
|
|
5955
|
+
_sec_intensity=$(echo "$audit_plan" | jq -r '.security // "targeted"' 2>/dev/null || echo "targeted")
|
|
5956
|
+
if [[ "$_sec_intensity" != "off" ]]; then
|
|
5957
|
+
echo ""
|
|
5958
|
+
info "Running security source scan (${_sec_intensity})..."
|
|
5959
|
+
audits_run_list="${audits_run_list:+${audits_run_list},}security"
|
|
5960
|
+
local sec_finding_count=0
|
|
5961
|
+
sec_finding_count=$(pipeline_security_source_scan 2>/dev/null) || true
|
|
5962
|
+
sec_finding_count="${sec_finding_count:-0}"
|
|
5963
|
+
if [[ "$sec_finding_count" -gt 0 ]]; then
|
|
5964
|
+
warn "Security source scan: ${sec_finding_count} finding(s)"
|
|
5965
|
+
total_critical=$((total_critical + sec_finding_count))
|
|
5966
|
+
all_passed=false
|
|
5967
|
+
else
|
|
5968
|
+
success "Security source scan: clean"
|
|
5969
|
+
fi
|
|
5970
|
+
fi
|
|
5971
|
+
|
|
4664
5972
|
# 7. Multi-dimensional quality checks
|
|
4665
5973
|
echo ""
|
|
4666
5974
|
info "Running multi-dimensional quality checks..."
|
|
@@ -4743,6 +6051,17 @@ All quality checks clean:
|
|
|
4743
6051
|
fi
|
|
4744
6052
|
|
|
4745
6053
|
log_stage "compound_quality" "Passed on cycle ${cycle}/${max_cycles}"
|
|
6054
|
+
|
|
6055
|
+
# DoD verification on successful pass
|
|
6056
|
+
local _dod_pass_rate=100
|
|
6057
|
+
if type pipeline_verify_dod &>/dev/null 2>&1; then
|
|
6058
|
+
pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
6059
|
+
if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
|
|
6060
|
+
_dod_pass_rate=$(jq -r '.pass_rate // 100' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "100")
|
|
6061
|
+
fi
|
|
6062
|
+
fi
|
|
6063
|
+
|
|
6064
|
+
pipeline_record_quality_score 100 0 0 0 "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
|
|
4746
6065
|
return 0
|
|
4747
6066
|
fi
|
|
4748
6067
|
|
|
@@ -4754,6 +6073,17 @@ All quality checks clean:
|
|
|
4754
6073
|
fi
|
|
4755
6074
|
|
|
4756
6075
|
log_stage "compound_quality" "Passed on cycle ${cycle}/${max_cycles}"
|
|
6076
|
+
|
|
6077
|
+
# DoD verification on successful pass
|
|
6078
|
+
local _dod_pass_rate=100
|
|
6079
|
+
if type pipeline_verify_dod &>/dev/null 2>&1; then
|
|
6080
|
+
pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
6081
|
+
if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
|
|
6082
|
+
_dod_pass_rate=$(jq -r '.pass_rate // 100' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "100")
|
|
6083
|
+
fi
|
|
6084
|
+
fi
|
|
6085
|
+
|
|
6086
|
+
pipeline_record_quality_score 95 0 "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
|
|
4757
6087
|
return 0
|
|
4758
6088
|
fi
|
|
4759
6089
|
|
|
@@ -4776,6 +6106,16 @@ All quality checks clean:
|
|
|
4776
6106
|
|
|
4777
6107
|
info "Convergence: ${current_issue_count} critical/high issues remaining"
|
|
4778
6108
|
|
|
6109
|
+
# Intelligence: re-evaluate adaptive cycle limit based on convergence (only after first cycle)
|
|
6110
|
+
if [[ "$prev_issue_count" -ge 0 ]]; then
|
|
6111
|
+
local updated_limit
|
|
6112
|
+
updated_limit=$(pipeline_adaptive_cycles "$max_cycles" "compound_quality" "$current_issue_count" "$prev_issue_count" 2>/dev/null) || true
|
|
6113
|
+
if [[ -n "$updated_limit" && "$updated_limit" =~ ^[0-9]+$ && "$updated_limit" -gt 0 && "$updated_limit" != "$max_cycles" ]]; then
|
|
6114
|
+
info "Adaptive cycles: ${max_cycles} → ${updated_limit} (convergence signal)"
|
|
6115
|
+
max_cycles="$updated_limit"
|
|
6116
|
+
fi
|
|
6117
|
+
fi
|
|
6118
|
+
|
|
4779
6119
|
# Not all passed — rebuild if we have cycles left
|
|
4780
6120
|
if [[ "$cycle" -lt "$max_cycles" ]]; then
|
|
4781
6121
|
warn "Quality checks failed — rebuilding with feedback (cycle $((cycle + 1))/${max_cycles})"
|
|
@@ -4792,7 +6132,101 @@ All quality checks clean:
|
|
|
4792
6132
|
fi
|
|
4793
6133
|
done
|
|
4794
6134
|
|
|
4795
|
-
#
|
|
6135
|
+
# ── Quality Score Computation ──
|
|
6136
|
+
# Starting score: 100, deductions based on findings
|
|
6137
|
+
local quality_score=100
|
|
6138
|
+
|
|
6139
|
+
# Count findings from artifact files
|
|
6140
|
+
if [[ -f "$ARTIFACTS_DIR/security-source-scan.json" ]]; then
|
|
6141
|
+
local _sec_critical
|
|
6142
|
+
_sec_critical=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/security-source-scan.json" 2>/dev/null || echo "0")
|
|
6143
|
+
local _sec_major
|
|
6144
|
+
_sec_major=$(jq '[.[] | select(.severity == "major")] | length' "$ARTIFACTS_DIR/security-source-scan.json" 2>/dev/null || echo "0")
|
|
6145
|
+
total_critical=$((total_critical + ${_sec_critical:-0}))
|
|
6146
|
+
total_major=$((total_major + ${_sec_major:-0}))
|
|
6147
|
+
fi
|
|
6148
|
+
if [[ -f "$ARTIFACTS_DIR/adversarial-review.json" ]]; then
|
|
6149
|
+
local _adv_crit
|
|
6150
|
+
_adv_crit=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
|
|
6151
|
+
local _adv_major
|
|
6152
|
+
_adv_major=$(jq '[.[] | select(.severity == "high" or .severity == "major")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
|
|
6153
|
+
local _adv_minor
|
|
6154
|
+
_adv_minor=$(jq '[.[] | select(.severity == "low" or .severity == "minor")] | length' "$ARTIFACTS_DIR/adversarial-review.json" 2>/dev/null || echo "0")
|
|
6155
|
+
total_critical=$((total_critical + ${_adv_crit:-0}))
|
|
6156
|
+
total_major=$((total_major + ${_adv_major:-0}))
|
|
6157
|
+
total_minor=$((total_minor + ${_adv_minor:-0}))
|
|
6158
|
+
fi
|
|
6159
|
+
if [[ -f "$ARTIFACTS_DIR/compound-architecture-validation.json" ]]; then
|
|
6160
|
+
local _arch_crit
|
|
6161
|
+
_arch_crit=$(jq '[.[] | select(.severity == "critical")] | length' "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || echo "0")
|
|
6162
|
+
local _arch_major
|
|
6163
|
+
_arch_major=$(jq '[.[] | select(.severity == "high" or .severity == "major")] | length' "$ARTIFACTS_DIR/compound-architecture-validation.json" 2>/dev/null || echo "0")
|
|
6164
|
+
total_major=$((total_major + ${_arch_crit:-0} + ${_arch_major:-0}))
|
|
6165
|
+
fi
|
|
6166
|
+
|
|
6167
|
+
# Apply deductions
|
|
6168
|
+
quality_score=$((quality_score - (total_critical * 20) - (total_major * 10) - (total_minor * 2)))
|
|
6169
|
+
[[ "$quality_score" -lt 0 ]] && quality_score=0
|
|
6170
|
+
|
|
6171
|
+
# DoD verification
|
|
6172
|
+
local _dod_pass_rate=0
|
|
6173
|
+
if type pipeline_verify_dod &>/dev/null 2>&1; then
|
|
6174
|
+
pipeline_verify_dod "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
6175
|
+
if [[ -f "$ARTIFACTS_DIR/dod-verification.json" ]]; then
|
|
6176
|
+
_dod_pass_rate=$(jq -r '.pass_rate // 0' "$ARTIFACTS_DIR/dod-verification.json" 2>/dev/null || echo "0")
|
|
6177
|
+
fi
|
|
6178
|
+
fi
|
|
6179
|
+
|
|
6180
|
+
# Record quality score
|
|
6181
|
+
pipeline_record_quality_score "$quality_score" "$total_critical" "$total_major" "$total_minor" "$_dod_pass_rate" "$audits_run_list" 2>/dev/null || true
|
|
6182
|
+
|
|
6183
|
+
# ── Quality Gate ──
|
|
6184
|
+
local compound_quality_blocking
|
|
6185
|
+
compound_quality_blocking=$(jq -r --arg id "compound_quality" \
|
|
6186
|
+
'(.stages[] | select(.id == $id) | .config.compound_quality_blocking) // true' \
|
|
6187
|
+
"$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
6188
|
+
[[ -z "$compound_quality_blocking" || "$compound_quality_blocking" == "null" ]] && compound_quality_blocking="true"
|
|
6189
|
+
|
|
6190
|
+
if [[ "$quality_score" -lt 60 && "$compound_quality_blocking" == "true" ]]; then
|
|
6191
|
+
emit_event "pipeline.quality_gate_failed" \
|
|
6192
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
6193
|
+
"quality_score=$quality_score" \
|
|
6194
|
+
"critical=$total_critical" \
|
|
6195
|
+
"major=$total_major"
|
|
6196
|
+
|
|
6197
|
+
error "Quality gate FAILED: score ${quality_score}/100 (critical: ${total_critical}, major: ${total_major}, minor: ${total_minor})"
|
|
6198
|
+
|
|
6199
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
6200
|
+
gh_comment_issue "$ISSUE_NUMBER" "❌ **Quality gate failed** — score ${quality_score}/100
|
|
6201
|
+
|
|
6202
|
+
| Finding Type | Count | Deduction |
|
|
6203
|
+
|---|---|---|
|
|
6204
|
+
| Critical | ${total_critical} | -$((total_critical * 20)) |
|
|
6205
|
+
| Major | ${total_major} | -$((total_major * 10)) |
|
|
6206
|
+
| Minor | ${total_minor} | -$((total_minor * 2)) |
|
|
6207
|
+
|
|
6208
|
+
DoD pass rate: ${_dod_pass_rate}%
|
|
6209
|
+
Quality issues remain after ${max_cycles} cycles. Check artifacts for details." 2>/dev/null || true
|
|
6210
|
+
fi
|
|
6211
|
+
|
|
6212
|
+
log_stage "compound_quality" "Quality gate failed: ${quality_score}/100 after ${max_cycles} cycles"
|
|
6213
|
+
return 1
|
|
6214
|
+
fi
|
|
6215
|
+
|
|
6216
|
+
# Exhausted all cycles but quality score is above threshold
|
|
6217
|
+
if [[ "$quality_score" -ge 60 ]]; then
|
|
6218
|
+
warn "Compound quality: score ${quality_score}/100 after ${max_cycles} cycles (above threshold, proceeding)"
|
|
6219
|
+
|
|
6220
|
+
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
6221
|
+
gh_comment_issue "$ISSUE_NUMBER" "⚠️ **Compound quality** — score ${quality_score}/100 after ${max_cycles} cycles
|
|
6222
|
+
|
|
6223
|
+
Some issues remain but quality score is above threshold. Proceeding." 2>/dev/null || true
|
|
6224
|
+
fi
|
|
6225
|
+
|
|
6226
|
+
log_stage "compound_quality" "Passed with score ${quality_score}/100 after ${max_cycles} cycles"
|
|
6227
|
+
return 0
|
|
6228
|
+
fi
|
|
6229
|
+
|
|
4796
6230
|
error "Compound quality exhausted after ${max_cycles} cycles"
|
|
4797
6231
|
|
|
4798
6232
|
if [[ -n "$ISSUE_NUMBER" ]]; then
|
|
@@ -4982,8 +6416,25 @@ self_healing_build_test() {
|
|
|
4982
6416
|
local prev_error_sig="" consecutive_same_error=0
|
|
4983
6417
|
local prev_fail_count=0 zero_convergence_streak=0
|
|
4984
6418
|
|
|
4985
|
-
#
|
|
4986
|
-
if type
|
|
6419
|
+
# Vitals-driven adaptive limit (preferred over static BUILD_TEST_RETRIES)
|
|
6420
|
+
if type pipeline_adaptive_limit &>/dev/null 2>&1; then
|
|
6421
|
+
local _vitals_json=""
|
|
6422
|
+
if type pipeline_compute_vitals &>/dev/null 2>&1; then
|
|
6423
|
+
_vitals_json=$(pipeline_compute_vitals "$STATE_FILE" "$ARTIFACTS_DIR" "${ISSUE_NUMBER:-}" 2>/dev/null) || true
|
|
6424
|
+
fi
|
|
6425
|
+
local vitals_limit
|
|
6426
|
+
vitals_limit=$(pipeline_adaptive_limit "build_test" "$_vitals_json" 2>/dev/null) || true
|
|
6427
|
+
if [[ -n "$vitals_limit" && "$vitals_limit" =~ ^[0-9]+$ && "$vitals_limit" -gt 0 ]]; then
|
|
6428
|
+
info "Vitals-driven build-test limit: ${max_cycles} → ${vitals_limit}"
|
|
6429
|
+
max_cycles="$vitals_limit"
|
|
6430
|
+
emit_event "vitals.adaptive_limit" \
|
|
6431
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
6432
|
+
"context=build_test" \
|
|
6433
|
+
"original=$BUILD_TEST_RETRIES" \
|
|
6434
|
+
"vitals_limit=$vitals_limit"
|
|
6435
|
+
fi
|
|
6436
|
+
# Fallback: intelligence-based adaptive limits
|
|
6437
|
+
elif type composer_estimate_iterations &>/dev/null 2>&1; then
|
|
4987
6438
|
local estimated
|
|
4988
6439
|
estimated=$(composer_estimate_iterations \
|
|
4989
6440
|
"${INTELLIGENCE_ANALYSIS:-{}}" \
|
|
@@ -4997,6 +6448,19 @@ self_healing_build_test() {
|
|
|
4997
6448
|
fi
|
|
4998
6449
|
fi
|
|
4999
6450
|
|
|
6451
|
+
# Fallback: adaptive cycle limits from optimization data
|
|
6452
|
+
if [[ "$max_cycles" == "$BUILD_TEST_RETRIES" ]]; then
|
|
6453
|
+
local _iter_model="${HOME}/.shipwright/optimization/iteration-model.json"
|
|
6454
|
+
if [[ -f "$_iter_model" ]]; then
|
|
6455
|
+
local adaptive_bt_limit
|
|
6456
|
+
adaptive_bt_limit=$(pipeline_adaptive_cycles "$max_cycles" "build_test" "0" "-1" 2>/dev/null) || true
|
|
6457
|
+
if [[ -n "$adaptive_bt_limit" && "$adaptive_bt_limit" =~ ^[0-9]+$ && "$adaptive_bt_limit" -gt 0 && "$adaptive_bt_limit" != "$max_cycles" ]]; then
|
|
6458
|
+
info "Adaptive build-test cycles: ${max_cycles} → ${adaptive_bt_limit}"
|
|
6459
|
+
max_cycles="$adaptive_bt_limit"
|
|
6460
|
+
fi
|
|
6461
|
+
fi
|
|
6462
|
+
fi
|
|
6463
|
+
|
|
5000
6464
|
while [[ "$cycle" -le "$max_cycles" ]]; do
|
|
5001
6465
|
cycle=$((cycle + 1))
|
|
5002
6466
|
|
|
@@ -5022,11 +6486,27 @@ self_healing_build_test() {
|
|
|
5022
6486
|
|
|
5023
6487
|
# Inject error context on retry cycles
|
|
5024
6488
|
if [[ "$cycle" -gt 1 && -n "$last_test_error" ]]; then
|
|
6489
|
+
# Query memory for known fixes
|
|
6490
|
+
local _memory_fix=""
|
|
6491
|
+
if type memory_closed_loop_inject &>/dev/null 2>&1; then
|
|
6492
|
+
local _error_sig_short
|
|
6493
|
+
_error_sig_short=$(echo "$last_test_error" | head -3 || echo "")
|
|
6494
|
+
_memory_fix=$(memory_closed_loop_inject "$_error_sig_short" 2>/dev/null) || true
|
|
6495
|
+
fi
|
|
6496
|
+
|
|
6497
|
+
local memory_prefix=""
|
|
6498
|
+
if [[ -n "$_memory_fix" ]]; then
|
|
6499
|
+
info "Memory suggests fix: $(echo "$_memory_fix" | head -1)"
|
|
6500
|
+
memory_prefix="KNOWN FIX (from past success): ${_memory_fix}
|
|
6501
|
+
|
|
6502
|
+
"
|
|
6503
|
+
fi
|
|
6504
|
+
|
|
5025
6505
|
# Temporarily augment the goal with error context
|
|
5026
6506
|
local original_goal="$GOAL"
|
|
5027
6507
|
GOAL="$GOAL
|
|
5028
6508
|
|
|
5029
|
-
IMPORTANT — Previous build attempt failed tests. Fix these errors:
|
|
6509
|
+
${memory_prefix}IMPORTANT — Previous build attempt failed tests. Fix these errors:
|
|
5030
6510
|
$last_test_error
|
|
5031
6511
|
|
|
5032
6512
|
Focus on fixing the failing tests while keeping all passing tests working."
|
|
@@ -5039,6 +6519,16 @@ Focus on fixing the failing tests while keeping all passing tests working."
|
|
|
5039
6519
|
local timing
|
|
5040
6520
|
timing=$(get_stage_timing "build")
|
|
5041
6521
|
success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
|
|
6522
|
+
if type pipeline_emit_progress_snapshot &>/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
6523
|
+
local _diff_count
|
|
6524
|
+
_diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
|
|
6525
|
+
local _snap_files _snap_error
|
|
6526
|
+
_snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
|
|
6527
|
+
_snap_files="${_snap_files:-0}"
|
|
6528
|
+
_snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
|
|
6529
|
+
_snap_error="${_snap_error:-}"
|
|
6530
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
|
|
6531
|
+
fi
|
|
5042
6532
|
else
|
|
5043
6533
|
mark_stage_failed "build"
|
|
5044
6534
|
GOAL="$original_goal"
|
|
@@ -5054,6 +6544,16 @@ Focus on fixing the failing tests while keeping all passing tests working."
|
|
|
5054
6544
|
local timing
|
|
5055
6545
|
timing=$(get_stage_timing "build")
|
|
5056
6546
|
success "Stage ${BOLD}build${RESET} complete ${DIM}(${timing})${RESET}"
|
|
6547
|
+
if type pipeline_emit_progress_snapshot &>/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
6548
|
+
local _diff_count
|
|
6549
|
+
_diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
|
|
6550
|
+
local _snap_files _snap_error
|
|
6551
|
+
_snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
|
|
6552
|
+
_snap_files="${_snap_files:-0}"
|
|
6553
|
+
_snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
|
|
6554
|
+
_snap_error="${_snap_error:-}"
|
|
6555
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-build}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
|
|
6556
|
+
fi
|
|
5057
6557
|
else
|
|
5058
6558
|
mark_stage_failed "build"
|
|
5059
6559
|
return 1
|
|
@@ -5075,6 +6575,16 @@ Focus on fixing the failing tests while keeping all passing tests working."
|
|
|
5075
6575
|
emit_event "convergence.tests_passed" \
|
|
5076
6576
|
"issue=${ISSUE_NUMBER:-0}" \
|
|
5077
6577
|
"cycle=$cycle"
|
|
6578
|
+
if type pipeline_emit_progress_snapshot &>/dev/null 2>&1 && [[ -n "${ISSUE_NUMBER:-}" ]]; then
|
|
6579
|
+
local _diff_count
|
|
6580
|
+
_diff_count=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1) || true
|
|
6581
|
+
local _snap_files _snap_error
|
|
6582
|
+
_snap_files=$(git diff --stat HEAD~1 2>/dev/null | tail -1 | grep -oE '[0-9]+' | head -1 || true)
|
|
6583
|
+
_snap_files="${_snap_files:-0}"
|
|
6584
|
+
_snap_error=$(tail -1 "$ARTIFACTS_DIR/error-log.jsonl" 2>/dev/null | jq -r '.error // ""' 2>/dev/null || true)
|
|
6585
|
+
_snap_error="${_snap_error:-}"
|
|
6586
|
+
pipeline_emit_progress_snapshot "${ISSUE_NUMBER}" "${CURRENT_STAGE_ID:-test}" "${cycle:-0}" "${_diff_count:-0}" "${_snap_files}" "${_snap_error}" 2>/dev/null || true
|
|
6587
|
+
fi
|
|
5078
6588
|
return 0 # Tests passed!
|
|
5079
6589
|
fi
|
|
5080
6590
|
|
|
@@ -5243,6 +6753,16 @@ run_pipeline() {
|
|
|
5243
6753
|
continue
|
|
5244
6754
|
fi
|
|
5245
6755
|
|
|
6756
|
+
# Intelligence: evaluate whether to skip this stage
|
|
6757
|
+
local skip_reason=""
|
|
6758
|
+
skip_reason=$(pipeline_should_skip_stage "$id" 2>/dev/null) || true
|
|
6759
|
+
if [[ -n "$skip_reason" ]]; then
|
|
6760
|
+
echo -e " ${DIM}○ ${id} — skipped (intelligence: ${skip_reason})${RESET}"
|
|
6761
|
+
set_stage_status "$id" "complete"
|
|
6762
|
+
completed=$((completed + 1))
|
|
6763
|
+
continue
|
|
6764
|
+
fi
|
|
6765
|
+
|
|
5246
6766
|
local stage_status
|
|
5247
6767
|
stage_status=$(get_stage_status "$id")
|
|
5248
6768
|
if [[ "$stage_status" == "complete" ]]; then
|
|
@@ -5278,6 +6798,13 @@ run_pipeline() {
|
|
|
5278
6798
|
|
|
5279
6799
|
if self_healing_build_test; then
|
|
5280
6800
|
completed=$((completed + 2)) # Both build and test
|
|
6801
|
+
|
|
6802
|
+
# Intelligence: reassess complexity after build+test
|
|
6803
|
+
local reassessment
|
|
6804
|
+
reassessment=$(pipeline_reassess_complexity 2>/dev/null) || true
|
|
6805
|
+
if [[ -n "$reassessment" && "$reassessment" != "as_expected" ]]; then
|
|
6806
|
+
info "Complexity reassessment: ${reassessment}"
|
|
6807
|
+
fi
|
|
5281
6808
|
else
|
|
5282
6809
|
update_status "failed" "test"
|
|
5283
6810
|
error "Pipeline failed: build→test self-healing exhausted"
|
|
@@ -5418,6 +6945,8 @@ run_pipeline() {
|
|
|
5418
6945
|
emit_event "stage.failed" "issue=${ISSUE_NUMBER:-0}" "stage=$id" "duration_s=$stage_dur_s"
|
|
5419
6946
|
# Log model used for prediction feedback
|
|
5420
6947
|
echo "${id}|${stage_model_used}|false" >> "${ARTIFACTS_DIR}/model-routing.log"
|
|
6948
|
+
# Cancel any remaining in_progress check runs
|
|
6949
|
+
pipeline_cancel_check_runs 2>/dev/null || true
|
|
5421
6950
|
return 1
|
|
5422
6951
|
fi
|
|
5423
6952
|
done 3<<< "$stages"
|
|
@@ -5456,6 +6985,81 @@ run_pipeline() {
|
|
|
5456
6985
|
body=$(gh_build_progress_body)
|
|
5457
6986
|
gh_update_progress "$body"
|
|
5458
6987
|
fi
|
|
6988
|
+
|
|
6989
|
+
# Post-completion cleanup
|
|
6990
|
+
pipeline_post_completion_cleanup
|
|
6991
|
+
}
|
|
6992
|
+
|
|
6993
|
+
# ─── Post-Completion Cleanup ──────────────────────────────────────────────
|
|
6994
|
+
# Cleans up transient artifacts after a successful pipeline run.
|
|
6995
|
+
|
|
6996
|
+
pipeline_post_completion_cleanup() {
|
|
6997
|
+
local cleaned=0
|
|
6998
|
+
|
|
6999
|
+
# 1. Clear checkpoints (they only matter for resume; pipeline is done)
|
|
7000
|
+
if [[ -d "${ARTIFACTS_DIR}/checkpoints" ]]; then
|
|
7001
|
+
local cp_count=0
|
|
7002
|
+
local cp_file
|
|
7003
|
+
for cp_file in "${ARTIFACTS_DIR}/checkpoints"/*-checkpoint.json; do
|
|
7004
|
+
[[ -f "$cp_file" ]] || continue
|
|
7005
|
+
rm -f "$cp_file"
|
|
7006
|
+
cp_count=$((cp_count + 1))
|
|
7007
|
+
done
|
|
7008
|
+
if [[ "$cp_count" -gt 0 ]]; then
|
|
7009
|
+
cleaned=$((cleaned + cp_count))
|
|
7010
|
+
fi
|
|
7011
|
+
fi
|
|
7012
|
+
|
|
7013
|
+
# 2. Clear per-run intelligence artifacts (not needed after completion)
|
|
7014
|
+
local intel_files=(
|
|
7015
|
+
"${ARTIFACTS_DIR}/classified-findings.json"
|
|
7016
|
+
"${ARTIFACTS_DIR}/reassessment.json"
|
|
7017
|
+
"${ARTIFACTS_DIR}/skip-stage.txt"
|
|
7018
|
+
"${ARTIFACTS_DIR}/human-message.txt"
|
|
7019
|
+
)
|
|
7020
|
+
local f
|
|
7021
|
+
for f in "${intel_files[@]}"; do
|
|
7022
|
+
if [[ -f "$f" ]]; then
|
|
7023
|
+
rm -f "$f"
|
|
7024
|
+
cleaned=$((cleaned + 1))
|
|
7025
|
+
fi
|
|
7026
|
+
done
|
|
7027
|
+
|
|
7028
|
+
# 3. Clear stale pipeline state (mark as idle so next run starts clean)
|
|
7029
|
+
if [[ -f "$STATE_FILE" ]]; then
|
|
7030
|
+
# Reset status to idle (preserves the file for reference but unblocks new runs)
|
|
7031
|
+
local tmp_state
|
|
7032
|
+
tmp_state=$(mktemp)
|
|
7033
|
+
sed 's/^status: .*/status: idle/' "$STATE_FILE" > "$tmp_state" 2>/dev/null || true
|
|
7034
|
+
mv "$tmp_state" "$STATE_FILE"
|
|
7035
|
+
fi
|
|
7036
|
+
|
|
7037
|
+
if [[ "$cleaned" -gt 0 ]]; then
|
|
7038
|
+
emit_event "pipeline.cleanup" \
|
|
7039
|
+
"issue=${ISSUE_NUMBER:-0}" \
|
|
7040
|
+
"cleaned=$cleaned" \
|
|
7041
|
+
"type=post_completion"
|
|
7042
|
+
fi
|
|
7043
|
+
}
|
|
7044
|
+
|
|
7045
|
+
# Cancel any lingering in_progress GitHub Check Runs (called on abort/interrupt)
|
|
7046
|
+
pipeline_cancel_check_runs() {
|
|
7047
|
+
if [[ "${NO_GITHUB:-false}" == "true" ]]; then
|
|
7048
|
+
return
|
|
7049
|
+
fi
|
|
7050
|
+
|
|
7051
|
+
if ! type gh_checks_stage_update &>/dev/null 2>&1; then
|
|
7052
|
+
return
|
|
7053
|
+
fi
|
|
7054
|
+
|
|
7055
|
+
local ids_file="${ARTIFACTS_DIR:-/dev/null}/check-run-ids.json"
|
|
7056
|
+
[[ -f "$ids_file" ]] || return
|
|
7057
|
+
|
|
7058
|
+
local stage
|
|
7059
|
+
while IFS= read -r stage; do
|
|
7060
|
+
[[ -z "$stage" ]] && continue
|
|
7061
|
+
gh_checks_stage_update "$stage" "completed" "cancelled" "Pipeline interrupted" 2>/dev/null || true
|
|
7062
|
+
done < <(jq -r 'keys[]' "$ids_file" 2>/dev/null || true)
|
|
5459
7063
|
}
|
|
5460
7064
|
|
|
5461
7065
|
# ─── Worktree Isolation ───────────────────────────────────────────────────
|
|
@@ -5744,6 +7348,10 @@ pipeline_start() {
|
|
|
5744
7348
|
optimize_analyze_outcome "$STATE_FILE" 2>/dev/null || true
|
|
5745
7349
|
fi
|
|
5746
7350
|
|
|
7351
|
+
if type memory_finalize_pipeline &>/dev/null 2>&1; then
|
|
7352
|
+
memory_finalize_pipeline "$STATE_FILE" "$ARTIFACTS_DIR" 2>/dev/null || true
|
|
7353
|
+
fi
|
|
7354
|
+
|
|
5747
7355
|
# Emit cost event
|
|
5748
7356
|
local model_key="${MODEL:-sonnet}"
|
|
5749
7357
|
local input_cost output_cost total_cost
|