shipwright-cli 3.1.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/agents/code-reviewer.md +2 -0
- package/.claude/agents/devops-engineer.md +2 -0
- package/.claude/agents/doc-fleet-agent.md +2 -0
- package/.claude/agents/pipeline-agent.md +2 -0
- package/.claude/agents/shell-script-specialist.md +2 -0
- package/.claude/agents/test-specialist.md +2 -0
- package/.claude/hooks/agent-crash-capture.sh +32 -0
- package/.claude/hooks/post-tool-use.sh +3 -2
- package/.claude/hooks/pre-tool-use.sh +35 -3
- package/README.md +22 -8
- package/claude-code/hooks/config-change.sh +18 -0
- package/claude-code/hooks/instructions-reloaded.sh +7 -0
- package/claude-code/hooks/worktree-create.sh +25 -0
- package/claude-code/hooks/worktree-remove.sh +20 -0
- package/config/code-constitution.json +130 -0
- package/config/defaults.json +25 -2
- package/config/policy.json +1 -1
- package/dashboard/middleware/auth.ts +134 -0
- package/dashboard/middleware/constants.ts +21 -0
- package/dashboard/public/index.html +8 -6
- package/dashboard/public/styles.css +176 -97
- package/dashboard/routes/auth.ts +38 -0
- package/dashboard/server.ts +117 -25
- package/dashboard/services/config.ts +26 -0
- package/dashboard/services/db.ts +118 -0
- package/dashboard/src/canvas/pixel-agent.ts +298 -0
- package/dashboard/src/canvas/pixel-sprites.ts +440 -0
- package/dashboard/src/canvas/shipyard-effects.ts +367 -0
- package/dashboard/src/canvas/shipyard-scene.ts +616 -0
- package/dashboard/src/canvas/submarine-layout.ts +267 -0
- package/dashboard/src/components/header.ts +8 -7
- package/dashboard/src/core/api.ts +5 -0
- package/dashboard/src/core/router.ts +1 -0
- package/dashboard/src/design/submarine-theme.ts +253 -0
- package/dashboard/src/main.ts +2 -0
- package/dashboard/src/types/api.ts +12 -1
- package/dashboard/src/views/activity.ts +2 -1
- package/dashboard/src/views/metrics.ts +69 -1
- package/dashboard/src/views/shipyard.ts +39 -0
- package/dashboard/types/index.ts +166 -0
- package/docs/plans/2026-02-28-compound-audit-and-shipyard-design.md +186 -0
- package/docs/plans/2026-02-28-skipper-shipwright-implementation-plan.md +1182 -0
- package/docs/plans/2026-02-28-skipper-shipwright-integration-design.md +531 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-design.md +298 -0
- package/docs/plans/2026-03-01-ai-powered-skill-injection-plan.md +1109 -0
- package/docs/plans/2026-03-01-capabilities-cleanup-plan.md +658 -0
- package/docs/plans/2026-03-01-clean-architecture-plan.md +924 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-design.md +191 -0
- package/docs/plans/2026-03-01-compound-audit-cascade-plan.md +921 -0
- package/docs/plans/2026-03-01-deep-integration-plan.md +851 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-design.md +145 -0
- package/docs/plans/2026-03-01-pipeline-audit-trail-plan.md +770 -0
- package/docs/plans/2026-03-01-refined-depths-brand-design.md +382 -0
- package/docs/plans/2026-03-01-refined-depths-implementation.md +599 -0
- package/docs/plans/2026-03-01-skipper-kernel-integration-design.md +203 -0
- package/docs/plans/2026-03-01-unified-platform-design.md +272 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-design.md +189 -0
- package/docs/plans/2026-03-07-claude-code-feature-integration-plan.md +1165 -0
- package/docs/research/BACKLOG_QUICK_REFERENCE.md +352 -0
- package/docs/research/CUTTING_EDGE_RESEARCH_2026.md +546 -0
- package/docs/research/RESEARCH_INDEX.md +439 -0
- package/docs/research/RESEARCH_SOURCES.md +440 -0
- package/docs/research/RESEARCH_SUMMARY.txt +275 -0
- package/docs/superpowers/specs/2026-03-10-pipeline-quality-revolution-design.md +341 -0
- package/package.json +2 -2
- package/scripts/lib/adaptive-model.sh +427 -0
- package/scripts/lib/adaptive-timeout.sh +316 -0
- package/scripts/lib/audit-trail.sh +309 -0
- package/scripts/lib/auto-recovery.sh +471 -0
- package/scripts/lib/bandit-selector.sh +431 -0
- package/scripts/lib/bootstrap.sh +104 -2
- package/scripts/lib/causal-graph.sh +455 -0
- package/scripts/lib/compat.sh +126 -0
- package/scripts/lib/compound-audit.sh +337 -0
- package/scripts/lib/constitutional.sh +454 -0
- package/scripts/lib/context-budget.sh +359 -0
- package/scripts/lib/convergence.sh +594 -0
- package/scripts/lib/cost-optimizer.sh +634 -0
- package/scripts/lib/daemon-adaptive.sh +14 -2
- package/scripts/lib/daemon-dispatch.sh +106 -17
- package/scripts/lib/daemon-failure.sh +34 -4
- package/scripts/lib/daemon-patrol.sh +25 -4
- package/scripts/lib/daemon-poll-github.sh +361 -0
- package/scripts/lib/daemon-poll-health.sh +299 -0
- package/scripts/lib/daemon-poll.sh +27 -611
- package/scripts/lib/daemon-state.sh +119 -66
- package/scripts/lib/daemon-triage.sh +10 -0
- package/scripts/lib/dod-scorecard.sh +442 -0
- package/scripts/lib/error-actionability.sh +300 -0
- package/scripts/lib/formal-spec.sh +461 -0
- package/scripts/lib/helpers.sh +180 -5
- package/scripts/lib/intent-analysis.sh +409 -0
- package/scripts/lib/loop-convergence.sh +350 -0
- package/scripts/lib/loop-iteration.sh +682 -0
- package/scripts/lib/loop-progress.sh +48 -0
- package/scripts/lib/loop-restart.sh +185 -0
- package/scripts/lib/memory-effectiveness.sh +506 -0
- package/scripts/lib/mutation-executor.sh +352 -0
- package/scripts/lib/outcome-feedback.sh +521 -0
- package/scripts/lib/pipeline-cli.sh +336 -0
- package/scripts/lib/pipeline-commands.sh +1216 -0
- package/scripts/lib/pipeline-detection.sh +101 -3
- package/scripts/lib/pipeline-execution.sh +897 -0
- package/scripts/lib/pipeline-github.sh +28 -3
- package/scripts/lib/pipeline-intelligence-compound.sh +431 -0
- package/scripts/lib/pipeline-intelligence-scoring.sh +407 -0
- package/scripts/lib/pipeline-intelligence-skip.sh +181 -0
- package/scripts/lib/pipeline-intelligence.sh +104 -1138
- package/scripts/lib/pipeline-quality-bash-compat.sh +182 -0
- package/scripts/lib/pipeline-quality-checks.sh +17 -711
- package/scripts/lib/pipeline-quality-gates.sh +563 -0
- package/scripts/lib/pipeline-stages-build.sh +730 -0
- package/scripts/lib/pipeline-stages-delivery.sh +965 -0
- package/scripts/lib/pipeline-stages-intake.sh +1133 -0
- package/scripts/lib/pipeline-stages-monitor.sh +407 -0
- package/scripts/lib/pipeline-stages-review.sh +1022 -0
- package/scripts/lib/pipeline-stages.sh +161 -2901
- package/scripts/lib/pipeline-state.sh +36 -5
- package/scripts/lib/pipeline-util.sh +487 -0
- package/scripts/lib/policy-learner.sh +438 -0
- package/scripts/lib/process-reward.sh +493 -0
- package/scripts/lib/project-detect.sh +649 -0
- package/scripts/lib/quality-profile.sh +334 -0
- package/scripts/lib/recruit-commands.sh +885 -0
- package/scripts/lib/recruit-learning.sh +739 -0
- package/scripts/lib/recruit-roles.sh +648 -0
- package/scripts/lib/reward-aggregator.sh +458 -0
- package/scripts/lib/rl-optimizer.sh +362 -0
- package/scripts/lib/root-cause.sh +427 -0
- package/scripts/lib/scope-enforcement.sh +445 -0
- package/scripts/lib/session-restart.sh +493 -0
- package/scripts/lib/skill-memory.sh +300 -0
- package/scripts/lib/skill-registry.sh +775 -0
- package/scripts/lib/spec-driven.sh +476 -0
- package/scripts/lib/test-helpers.sh +18 -7
- package/scripts/lib/test-holdout.sh +429 -0
- package/scripts/lib/test-optimizer.sh +511 -0
- package/scripts/shipwright-file-suggest.sh +45 -0
- package/scripts/skills/adversarial-quality.md +61 -0
- package/scripts/skills/api-design.md +44 -0
- package/scripts/skills/architecture-design.md +50 -0
- package/scripts/skills/brainstorming.md +43 -0
- package/scripts/skills/data-pipeline.md +44 -0
- package/scripts/skills/deploy-safety.md +64 -0
- package/scripts/skills/documentation.md +38 -0
- package/scripts/skills/frontend-design.md +45 -0
- package/scripts/skills/generated/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/.gitkeep +0 -0
- package/scripts/skills/generated/_refinements/adversarial-quality.patch.md +3 -0
- package/scripts/skills/generated/_refinements/architecture-design.patch.md +3 -0
- package/scripts/skills/generated/_refinements/brainstorming.patch.md +3 -0
- package/scripts/skills/generated/cli-version-management.md +29 -0
- package/scripts/skills/generated/collection-system-validation.md +99 -0
- package/scripts/skills/generated/large-scale-c-refactoring-coordination.md +97 -0
- package/scripts/skills/generated/pattern-matching-similarity-scoring.md +195 -0
- package/scripts/skills/generated/test-parallelization-detection.md +65 -0
- package/scripts/skills/observability.md +79 -0
- package/scripts/skills/performance.md +48 -0
- package/scripts/skills/pr-quality.md +49 -0
- package/scripts/skills/product-thinking.md +43 -0
- package/scripts/skills/security-audit.md +49 -0
- package/scripts/skills/systematic-debugging.md +40 -0
- package/scripts/skills/testing-strategy.md +47 -0
- package/scripts/skills/two-stage-review.md +52 -0
- package/scripts/skills/validation-thoroughness.md +55 -0
- package/scripts/sw +9 -3
- package/scripts/sw-activity.sh +9 -8
- package/scripts/sw-adaptive.sh +8 -7
- package/scripts/sw-adversarial.sh +2 -1
- package/scripts/sw-architecture-enforcer.sh +3 -1
- package/scripts/sw-auth.sh +12 -2
- package/scripts/sw-autonomous.sh +5 -1
- package/scripts/sw-changelog.sh +4 -1
- package/scripts/sw-checkpoint.sh +2 -1
- package/scripts/sw-ci.sh +15 -6
- package/scripts/sw-cleanup.sh +4 -26
- package/scripts/sw-code-review.sh +45 -20
- package/scripts/sw-connect.sh +2 -1
- package/scripts/sw-context.sh +2 -1
- package/scripts/sw-cost.sh +107 -5
- package/scripts/sw-daemon.sh +71 -11
- package/scripts/sw-dashboard.sh +3 -1
- package/scripts/sw-db.sh +71 -20
- package/scripts/sw-decide.sh +8 -2
- package/scripts/sw-decompose.sh +360 -17
- package/scripts/sw-deps.sh +4 -1
- package/scripts/sw-developer-simulation.sh +4 -1
- package/scripts/sw-discovery.sh +378 -5
- package/scripts/sw-doc-fleet.sh +4 -1
- package/scripts/sw-docs-agent.sh +3 -1
- package/scripts/sw-docs.sh +2 -1
- package/scripts/sw-doctor.sh +453 -2
- package/scripts/sw-dora.sh +4 -1
- package/scripts/sw-durable.sh +12 -7
- package/scripts/sw-e2e-orchestrator.sh +17 -16
- package/scripts/sw-eventbus.sh +13 -4
- package/scripts/sw-evidence.sh +364 -12
- package/scripts/sw-feedback.sh +550 -9
- package/scripts/sw-fix.sh +20 -1
- package/scripts/sw-fleet-discover.sh +6 -2
- package/scripts/sw-fleet-viz.sh +9 -4
- package/scripts/sw-fleet.sh +5 -1
- package/scripts/sw-github-app.sh +18 -4
- package/scripts/sw-github-checks.sh +3 -2
- package/scripts/sw-github-deploy.sh +3 -2
- package/scripts/sw-github-graphql.sh +18 -7
- package/scripts/sw-guild.sh +5 -1
- package/scripts/sw-heartbeat.sh +5 -30
- package/scripts/sw-hello.sh +67 -0
- package/scripts/sw-hygiene.sh +10 -3
- package/scripts/sw-incident.sh +273 -5
- package/scripts/sw-init.sh +18 -2
- package/scripts/sw-instrument.sh +10 -2
- package/scripts/sw-intelligence.sh +44 -7
- package/scripts/sw-jira.sh +5 -1
- package/scripts/sw-launchd.sh +2 -1
- package/scripts/sw-linear.sh +4 -1
- package/scripts/sw-logs.sh +4 -1
- package/scripts/sw-loop.sh +436 -1076
- package/scripts/sw-memory.sh +357 -3
- package/scripts/sw-mission-control.sh +6 -1
- package/scripts/sw-model-router.sh +483 -27
- package/scripts/sw-otel.sh +15 -4
- package/scripts/sw-oversight.sh +14 -5
- package/scripts/sw-patrol-meta.sh +334 -0
- package/scripts/sw-pipeline-composer.sh +7 -1
- package/scripts/sw-pipeline-vitals.sh +12 -6
- package/scripts/sw-pipeline.sh +54 -2653
- package/scripts/sw-pm.sh +16 -8
- package/scripts/sw-pr-lifecycle.sh +2 -1
- package/scripts/sw-predictive.sh +17 -5
- package/scripts/sw-prep.sh +185 -2
- package/scripts/sw-ps.sh +5 -25
- package/scripts/sw-public-dashboard.sh +17 -4
- package/scripts/sw-quality.sh +14 -6
- package/scripts/sw-reaper.sh +8 -25
- package/scripts/sw-recruit.sh +156 -2303
- package/scripts/sw-regression.sh +19 -12
- package/scripts/sw-release-manager.sh +3 -1
- package/scripts/sw-release.sh +4 -1
- package/scripts/sw-remote.sh +3 -1
- package/scripts/sw-replay.sh +7 -1
- package/scripts/sw-retro.sh +158 -1
- package/scripts/sw-review-rerun.sh +3 -1
- package/scripts/sw-scale.sh +14 -5
- package/scripts/sw-security-audit.sh +6 -1
- package/scripts/sw-self-optimize.sh +173 -6
- package/scripts/sw-session.sh +9 -3
- package/scripts/sw-setup.sh +3 -1
- package/scripts/sw-stall-detector.sh +406 -0
- package/scripts/sw-standup.sh +15 -7
- package/scripts/sw-status.sh +3 -1
- package/scripts/sw-strategic.sh +14 -6
- package/scripts/sw-stream.sh +13 -4
- package/scripts/sw-swarm.sh +20 -7
- package/scripts/sw-team-stages.sh +13 -6
- package/scripts/sw-templates.sh +7 -31
- package/scripts/sw-testgen.sh +17 -6
- package/scripts/sw-tmux-pipeline.sh +4 -1
- package/scripts/sw-tmux-role-color.sh +2 -0
- package/scripts/sw-tmux-status.sh +1 -1
- package/scripts/sw-tmux.sh +37 -1
- package/scripts/sw-trace.sh +3 -1
- package/scripts/sw-tracker-github.sh +3 -0
- package/scripts/sw-tracker-jira.sh +3 -0
- package/scripts/sw-tracker-linear.sh +3 -0
- package/scripts/sw-tracker.sh +3 -1
- package/scripts/sw-triage.sh +3 -2
- package/scripts/sw-upgrade.sh +3 -1
- package/scripts/sw-ux.sh +5 -2
- package/scripts/sw-webhook.sh +5 -2
- package/scripts/sw-widgets.sh +9 -4
- package/scripts/sw-worktree.sh +15 -3
- package/scripts/test-skill-injection.sh +1233 -0
- package/templates/pipelines/autonomous.json +27 -3
- package/templates/pipelines/cost-aware.json +34 -8
- package/templates/pipelines/deployed.json +12 -0
- package/templates/pipelines/enterprise.json +12 -0
- package/templates/pipelines/fast.json +6 -0
- package/templates/pipelines/full.json +27 -3
- package/templates/pipelines/hotfix.json +6 -0
- package/templates/pipelines/standard.json +12 -0
- package/templates/pipelines/tdd.json +12 -0
|
@@ -3,551 +3,21 @@
|
|
|
3
3
|
[[ -n "${_PIPELINE_QUALITY_CHECKS_LOADED:-}" ]] && return 0
|
|
4
4
|
_PIPELINE_QUALITY_CHECKS_LOADED=1
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
elif [[ -f "Cargo.toml" ]] && command -v cargo-audit >/dev/null 2>&1; then
|
|
22
|
-
tool_found=true
|
|
23
|
-
cargo audit 2>&1 | tee "$audit_log" || audit_exit=$?
|
|
24
|
-
fi
|
|
25
|
-
|
|
26
|
-
if [[ "$tool_found" != "true" ]]; then
|
|
27
|
-
info "No security audit tool found — skipping"
|
|
28
|
-
echo "No audit tool available" > "$audit_log"
|
|
29
|
-
return 0
|
|
30
|
-
fi
|
|
31
|
-
|
|
32
|
-
# Parse results for critical/high severity
|
|
33
|
-
local critical_count high_count
|
|
34
|
-
critical_count=$(grep -ciE 'critical' "$audit_log" 2>/dev/null || true)
|
|
35
|
-
critical_count="${critical_count:-0}"
|
|
36
|
-
high_count=$(grep -ciE 'high' "$audit_log" 2>/dev/null || true)
|
|
37
|
-
high_count="${high_count:-0}"
|
|
38
|
-
|
|
39
|
-
emit_event "quality.security" \
|
|
40
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
41
|
-
"critical=$critical_count" \
|
|
42
|
-
"high=$high_count"
|
|
43
|
-
|
|
44
|
-
if [[ "$critical_count" -gt 0 ]]; then
|
|
45
|
-
warn "Security audit: ${critical_count} critical, ${high_count} high"
|
|
46
|
-
return 1
|
|
47
|
-
fi
|
|
48
|
-
|
|
49
|
-
success "Security audit: clean"
|
|
50
|
-
return 0
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
quality_check_bundle_size() {
|
|
54
|
-
info "Bundle size check..."
|
|
55
|
-
local metrics_log="$ARTIFACTS_DIR/bundle-metrics.log"
|
|
56
|
-
local bundle_size=0
|
|
57
|
-
local bundle_dir=""
|
|
58
|
-
|
|
59
|
-
# Find build output directory — check config files first, then common dirs
|
|
60
|
-
# Parse tsconfig.json outDir
|
|
61
|
-
if [[ -z "$bundle_dir" && -f "tsconfig.json" ]]; then
|
|
62
|
-
local ts_out
|
|
63
|
-
ts_out=$(jq -r '.compilerOptions.outDir // empty' tsconfig.json 2>/dev/null || true)
|
|
64
|
-
[[ -n "$ts_out" && -d "$ts_out" ]] && bundle_dir="$ts_out"
|
|
65
|
-
fi
|
|
66
|
-
# Parse package.json build script for output hints
|
|
67
|
-
if [[ -z "$bundle_dir" && -f "package.json" ]]; then
|
|
68
|
-
local build_script
|
|
69
|
-
build_script=$(jq -r '.scripts.build // ""' package.json 2>/dev/null || true)
|
|
70
|
-
if [[ -n "$build_script" ]]; then
|
|
71
|
-
# Check for common output flags: --outDir, -o, --out-dir
|
|
72
|
-
local parsed_out
|
|
73
|
-
parsed_out=$(echo "$build_script" | grep -oE '(--outDir|--out-dir|-o)\s+[^ ]+' 2>/dev/null | awk '{print $NF}' | head -1 || true)
|
|
74
|
-
[[ -n "$parsed_out" && -d "$parsed_out" ]] && bundle_dir="$parsed_out"
|
|
75
|
-
fi
|
|
76
|
-
fi
|
|
77
|
-
# Fallback: check common directories
|
|
78
|
-
if [[ -z "$bundle_dir" ]]; then
|
|
79
|
-
for dir in dist build out .next target; do
|
|
80
|
-
if [[ -d "$dir" ]]; then
|
|
81
|
-
bundle_dir="$dir"
|
|
82
|
-
break
|
|
83
|
-
fi
|
|
84
|
-
done
|
|
85
|
-
fi
|
|
86
|
-
|
|
87
|
-
if [[ -z "$bundle_dir" ]]; then
|
|
88
|
-
info "No build output directory found — skipping bundle check"
|
|
89
|
-
echo "No build directory" > "$metrics_log"
|
|
90
|
-
return 0
|
|
91
|
-
fi
|
|
92
|
-
|
|
93
|
-
bundle_size=$(du -sk "$bundle_dir" 2>/dev/null | cut -f1 || echo "0")
|
|
94
|
-
local bundle_size_human
|
|
95
|
-
bundle_size_human=$(du -sh "$bundle_dir" 2>/dev/null | cut -f1 || echo "unknown")
|
|
96
|
-
|
|
97
|
-
echo "Bundle directory: $bundle_dir" > "$metrics_log"
|
|
98
|
-
echo "Size: ${bundle_size}KB (${bundle_size_human})" >> "$metrics_log"
|
|
99
|
-
|
|
100
|
-
emit_event "quality.bundle" \
|
|
101
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
102
|
-
"size_kb=$bundle_size" \
|
|
103
|
-
"directory=$bundle_dir"
|
|
104
|
-
|
|
105
|
-
# Adaptive bundle size check: statistical deviation from historical mean
|
|
106
|
-
local repo_hash_bundle
|
|
107
|
-
repo_hash_bundle=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
108
|
-
local bundle_baselines_dir="${HOME}/.shipwright/baselines/${repo_hash_bundle}"
|
|
109
|
-
local bundle_history_file="${bundle_baselines_dir}/bundle-history.json"
|
|
110
|
-
|
|
111
|
-
local bundle_history="[]"
|
|
112
|
-
if [[ -f "$bundle_history_file" ]]; then
|
|
113
|
-
bundle_history=$(jq '.sizes // []' "$bundle_history_file" 2>/dev/null || echo "[]")
|
|
114
|
-
fi
|
|
115
|
-
|
|
116
|
-
local bundle_hist_count
|
|
117
|
-
bundle_hist_count=$(echo "$bundle_history" | jq 'length' 2>/dev/null || echo "0")
|
|
118
|
-
|
|
119
|
-
if [[ "$bundle_hist_count" -ge 3 ]]; then
|
|
120
|
-
# Statistical check: alert on growth > 2σ from historical mean
|
|
121
|
-
local mean_size stddev_size
|
|
122
|
-
mean_size=$(echo "$bundle_history" | jq 'add / length' 2>/dev/null || echo "0")
|
|
123
|
-
stddev_size=$(echo "$bundle_history" | jq '
|
|
124
|
-
(add / length) as $mean |
|
|
125
|
-
(map(. - $mean | . * .) | add / length | sqrt)
|
|
126
|
-
' 2>/dev/null || echo "0")
|
|
127
|
-
|
|
128
|
-
# Adaptive tolerance: small repos (<1MB mean) get wider tolerance (3σ), large repos get 2σ
|
|
129
|
-
local sigma_mult
|
|
130
|
-
sigma_mult=$(awk -v mean="$mean_size" 'BEGIN{ print (mean < 1024 ? 3 : 2) }')
|
|
131
|
-
local adaptive_max
|
|
132
|
-
adaptive_max=$(awk -v mean="$mean_size" -v sd="$stddev_size" -v mult="$sigma_mult" \
|
|
133
|
-
'BEGIN{ t = mean + mult*sd; min_t = mean * 1.1; printf "%.0f", (t > min_t ? t : min_t) }')
|
|
134
|
-
|
|
135
|
-
echo "History: ${bundle_hist_count} runs | Mean: ${mean_size}KB | StdDev: ${stddev_size}KB | Max: ${adaptive_max}KB (${sigma_mult}σ)" >> "$metrics_log"
|
|
136
|
-
|
|
137
|
-
if [[ "$bundle_size" -gt "$adaptive_max" ]] 2>/dev/null; then
|
|
138
|
-
local growth_pct
|
|
139
|
-
growth_pct=$(awk -v cur="$bundle_size" -v mean="$mean_size" 'BEGIN{printf "%d", ((cur - mean) / mean) * 100}')
|
|
140
|
-
warn "Bundle size ${growth_pct}% above average (${mean_size}KB → ${bundle_size}KB, ${sigma_mult}σ threshold: ${adaptive_max}KB)"
|
|
141
|
-
return 1
|
|
142
|
-
fi
|
|
143
|
-
else
|
|
144
|
-
# Fallback: legacy memory baseline with hardcoded 20% (not enough history)
|
|
145
|
-
local baseline_size=""
|
|
146
|
-
if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
147
|
-
baseline_size=$(bash "$SCRIPT_DIR/sw-memory.sh" get "bundle_size_kb" 2>/dev/null) || true
|
|
148
|
-
fi
|
|
149
|
-
if [[ -n "$baseline_size" && "$baseline_size" -gt 0 ]] 2>/dev/null; then
|
|
150
|
-
local growth_pct
|
|
151
|
-
growth_pct=$(awk -v cur="$bundle_size" -v base="$baseline_size" 'BEGIN{printf "%d", ((cur - base) / base) * 100}')
|
|
152
|
-
echo "Baseline: ${baseline_size}KB | Growth: ${growth_pct}%" >> "$metrics_log"
|
|
153
|
-
if [[ "$growth_pct" -gt 20 ]]; then
|
|
154
|
-
warn "Bundle size grew ${growth_pct}% (${baseline_size}KB → ${bundle_size}KB)"
|
|
155
|
-
return 1
|
|
156
|
-
fi
|
|
157
|
-
fi
|
|
158
|
-
fi
|
|
159
|
-
|
|
160
|
-
# Append current size to rolling history (keep last 10)
|
|
161
|
-
mkdir -p "$bundle_baselines_dir"
|
|
162
|
-
local updated_bundle_hist
|
|
163
|
-
updated_bundle_hist=$(echo "$bundle_history" | jq --arg sz "$bundle_size" '
|
|
164
|
-
. + [($sz | tonumber)] | .[-10:]
|
|
165
|
-
' 2>/dev/null || echo "[$bundle_size]")
|
|
166
|
-
local tmp_bundle_hist
|
|
167
|
-
tmp_bundle_hist=$(mktemp "${bundle_baselines_dir}/bundle-history.json.XXXXXX")
|
|
168
|
-
jq -n --argjson sizes "$updated_bundle_hist" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
|
169
|
-
'{sizes: $sizes, updated: $updated}' > "$tmp_bundle_hist" 2>/dev/null
|
|
170
|
-
mv "$tmp_bundle_hist" "$bundle_history_file" 2>/dev/null || true
|
|
171
|
-
|
|
172
|
-
# Intelligence: identify top dependency bloaters
|
|
173
|
-
if type intelligence_search_memory >/dev/null 2>&1 && [[ -f "package.json" ]] && command -v jq >/dev/null 2>&1; then
|
|
174
|
-
local dep_sizes=""
|
|
175
|
-
local deps
|
|
176
|
-
deps=$(jq -r '.dependencies // {} | keys[]' package.json 2>/dev/null || true)
|
|
177
|
-
if [[ -n "$deps" ]]; then
|
|
178
|
-
while IFS= read -r dep; do
|
|
179
|
-
[[ -z "$dep" ]] && continue
|
|
180
|
-
local dep_dir="node_modules/${dep}"
|
|
181
|
-
if [[ -d "$dep_dir" ]]; then
|
|
182
|
-
local dep_size
|
|
183
|
-
dep_size=$(du -sk "$dep_dir" 2>/dev/null | cut -f1 || echo "0")
|
|
184
|
-
dep_sizes="${dep_sizes}${dep_size} ${dep}
|
|
185
|
-
"
|
|
186
|
-
fi
|
|
187
|
-
done <<< "$deps"
|
|
188
|
-
if [[ -n "$dep_sizes" ]]; then
|
|
189
|
-
local top_bloaters
|
|
190
|
-
top_bloaters=$(echo "$dep_sizes" | sort -rn | head -3)
|
|
191
|
-
if [[ -n "$top_bloaters" ]]; then
|
|
192
|
-
echo "" >> "$metrics_log"
|
|
193
|
-
echo "Top 3 dependency sizes:" >> "$metrics_log"
|
|
194
|
-
echo "$top_bloaters" | while IFS=' ' read -r sz nm; do
|
|
195
|
-
[[ -z "$nm" ]] && continue
|
|
196
|
-
echo " ${nm}: ${sz}KB" >> "$metrics_log"
|
|
197
|
-
done
|
|
198
|
-
info "Top bloaters: $(echo "$top_bloaters" | head -1 | awk '{print $2 ": " $1 "KB"}')"
|
|
199
|
-
fi
|
|
200
|
-
fi
|
|
201
|
-
fi
|
|
202
|
-
fi
|
|
203
|
-
|
|
204
|
-
info "Bundle size: ${bundle_size_human}${bundle_hist_count:+ (${bundle_hist_count} historical samples)}"
|
|
205
|
-
return 0
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
quality_check_perf_regression() {
|
|
209
|
-
info "Performance regression check..."
|
|
210
|
-
local metrics_log="$ARTIFACTS_DIR/perf-metrics.log"
|
|
211
|
-
local test_log="$ARTIFACTS_DIR/test-results.log"
|
|
212
|
-
|
|
213
|
-
if [[ ! -f "$test_log" ]]; then
|
|
214
|
-
info "No test results — skipping perf check"
|
|
215
|
-
echo "No test results available" > "$metrics_log"
|
|
216
|
-
return 0
|
|
217
|
-
fi
|
|
218
|
-
|
|
219
|
-
# Extract test suite duration — multi-framework patterns
|
|
220
|
-
local duration_ms=""
|
|
221
|
-
# Jest/Vitest: "Time: 12.34 s" or "Duration 12.34s"
|
|
222
|
-
duration_ms=$(grep -oE 'Time:\s*[0-9.]+\s*s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
|
|
223
|
-
[[ -z "$duration_ms" ]] && duration_ms=$(grep -oE 'Duration\s+[0-9.]+\s*s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
|
|
224
|
-
# pytest: "passed in 12.34s" or "====== 5 passed in 12.34 seconds ======"
|
|
225
|
-
[[ -z "$duration_ms" ]] && duration_ms=$(grep -oE 'passed in [0-9.]+s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
|
|
226
|
-
# Go test: "ok pkg 12.345s"
|
|
227
|
-
[[ -z "$duration_ms" ]] && duration_ms=$(grep -oE '^ok\s+\S+\s+[0-9.]+s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+s' | grep -oE '[0-9.]+' | tail -1 || true)
|
|
228
|
-
# Cargo test: "test result: ok. ... finished in 12.34s"
|
|
229
|
-
[[ -z "$duration_ms" ]] && duration_ms=$(grep -oE 'finished in [0-9.]+s' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
|
|
230
|
-
# Generic: "12.34 seconds" or "12.34s"
|
|
231
|
-
[[ -z "$duration_ms" ]] && duration_ms=$(grep -oE '[0-9.]+ ?s(econds?)?' "$test_log" 2>/dev/null | grep -oE '[0-9.]+' | tail -1 || true)
|
|
232
|
-
|
|
233
|
-
# Claude fallback: parse test output when no pattern matches
|
|
234
|
-
if [[ -z "$duration_ms" ]]; then
|
|
235
|
-
local intel_enabled="false"
|
|
236
|
-
local daemon_cfg="${PROJECT_ROOT}/.claude/daemon-config.json"
|
|
237
|
-
if [[ -f "$daemon_cfg" ]]; then
|
|
238
|
-
intel_enabled=$(jq -r '.intelligence.enabled // false' "$daemon_cfg" 2>/dev/null || echo "false")
|
|
239
|
-
fi
|
|
240
|
-
if [[ "$intel_enabled" == "true" ]] && command -v claude >/dev/null 2>&1; then
|
|
241
|
-
local tail_output
|
|
242
|
-
tail_output=$(tail -30 "$test_log" 2>/dev/null || true)
|
|
243
|
-
if [[ -n "$tail_output" ]]; then
|
|
244
|
-
duration_ms=$(claude --print -p "Extract ONLY the total test suite duration in seconds from this output. Reply with ONLY a number (e.g. 12.34). If no duration found, reply NONE.
|
|
245
|
-
|
|
246
|
-
$tail_output" < /dev/null 2>/dev/null | grep -oE '^[0-9.]+$' | head -1 || true)
|
|
247
|
-
[[ "$duration_ms" == "NONE" ]] && duration_ms=""
|
|
248
|
-
fi
|
|
249
|
-
fi
|
|
250
|
-
fi
|
|
251
|
-
|
|
252
|
-
if [[ -z "$duration_ms" ]]; then
|
|
253
|
-
info "Could not extract test duration — skipping perf check"
|
|
254
|
-
echo "Duration not parseable" > "$metrics_log"
|
|
255
|
-
return 0
|
|
256
|
-
fi
|
|
257
|
-
|
|
258
|
-
echo "Test duration: ${duration_ms}s" > "$metrics_log"
|
|
259
|
-
|
|
260
|
-
emit_event "quality.perf" \
|
|
261
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
262
|
-
"duration_s=$duration_ms"
|
|
263
|
-
|
|
264
|
-
# Adaptive performance check: 2σ from rolling 10-run average
|
|
265
|
-
local repo_hash_perf
|
|
266
|
-
repo_hash_perf=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
267
|
-
local perf_baselines_dir="${HOME}/.shipwright/baselines/${repo_hash_perf}"
|
|
268
|
-
local perf_history_file="${perf_baselines_dir}/perf-history.json"
|
|
269
|
-
|
|
270
|
-
# Read historical durations (rolling window of last 10 runs)
|
|
271
|
-
local history_json="[]"
|
|
272
|
-
if [[ -f "$perf_history_file" ]]; then
|
|
273
|
-
history_json=$(jq '.durations // []' "$perf_history_file" 2>/dev/null || echo "[]")
|
|
274
|
-
fi
|
|
275
|
-
|
|
276
|
-
local history_count
|
|
277
|
-
history_count=$(echo "$history_json" | jq 'length' 2>/dev/null || echo "0")
|
|
278
|
-
|
|
279
|
-
if [[ "$history_count" -ge 3 ]]; then
|
|
280
|
-
# Calculate mean and standard deviation from history
|
|
281
|
-
local mean_dur stddev_dur
|
|
282
|
-
mean_dur=$(echo "$history_json" | jq 'add / length' 2>/dev/null || echo "0")
|
|
283
|
-
stddev_dur=$(echo "$history_json" | jq '
|
|
284
|
-
(add / length) as $mean |
|
|
285
|
-
(map(. - $mean | . * .) | add / length | sqrt)
|
|
286
|
-
' 2>/dev/null || echo "0")
|
|
287
|
-
|
|
288
|
-
# Threshold: mean + 2σ (but at least 10% above mean)
|
|
289
|
-
local adaptive_threshold
|
|
290
|
-
adaptive_threshold=$(awk -v mean="$mean_dur" -v sd="$stddev_dur" \
|
|
291
|
-
'BEGIN{ t = mean + 2*sd; min_t = mean * 1.1; printf "%.2f", (t > min_t ? t : min_t) }')
|
|
292
|
-
|
|
293
|
-
echo "History: ${history_count} runs | Mean: ${mean_dur}s | StdDev: ${stddev_dur}s | Threshold: ${adaptive_threshold}s" >> "$metrics_log"
|
|
294
|
-
|
|
295
|
-
if awk -v cur="$duration_ms" -v thresh="$adaptive_threshold" 'BEGIN{exit !(cur > thresh)}' 2>/dev/null; then
|
|
296
|
-
local slowdown_pct
|
|
297
|
-
slowdown_pct=$(awk -v cur="$duration_ms" -v mean="$mean_dur" 'BEGIN{printf "%d", ((cur - mean) / mean) * 100}')
|
|
298
|
-
warn "Tests ${slowdown_pct}% slower than rolling average (${mean_dur}s → ${duration_ms}s, threshold: ${adaptive_threshold}s)"
|
|
299
|
-
return 1
|
|
300
|
-
fi
|
|
301
|
-
else
|
|
302
|
-
# Fallback: legacy memory baseline with hardcoded 30% (not enough history)
|
|
303
|
-
local baseline_dur=""
|
|
304
|
-
if [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
305
|
-
baseline_dur=$(bash "$SCRIPT_DIR/sw-memory.sh" get "test_duration_s" 2>/dev/null) || true
|
|
306
|
-
fi
|
|
307
|
-
if [[ -n "$baseline_dur" ]] && awk -v cur="$duration_ms" -v base="$baseline_dur" 'BEGIN{exit !(base > 0)}' 2>/dev/null; then
|
|
308
|
-
local slowdown_pct
|
|
309
|
-
slowdown_pct=$(awk -v cur="$duration_ms" -v base="$baseline_dur" 'BEGIN{printf "%d", ((cur - base) / base) * 100}')
|
|
310
|
-
echo "Baseline: ${baseline_dur}s | Slowdown: ${slowdown_pct}%" >> "$metrics_log"
|
|
311
|
-
if [[ "$slowdown_pct" -gt 30 ]]; then
|
|
312
|
-
warn "Tests ${slowdown_pct}% slower (${baseline_dur}s → ${duration_ms}s)"
|
|
313
|
-
return 1
|
|
314
|
-
fi
|
|
315
|
-
fi
|
|
316
|
-
fi
|
|
317
|
-
|
|
318
|
-
# Append current duration to rolling history (keep last 10)
|
|
319
|
-
mkdir -p "$perf_baselines_dir"
|
|
320
|
-
local updated_history
|
|
321
|
-
updated_history=$(echo "$history_json" | jq --arg dur "$duration_ms" '
|
|
322
|
-
. + [($dur | tonumber)] | .[-10:]
|
|
323
|
-
' 2>/dev/null || echo "[$duration_ms]")
|
|
324
|
-
local tmp_perf_hist
|
|
325
|
-
tmp_perf_hist=$(mktemp "${perf_baselines_dir}/perf-history.json.XXXXXX")
|
|
326
|
-
jq -n --argjson durations "$updated_history" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
|
327
|
-
'{durations: $durations, updated: $updated}' > "$tmp_perf_hist" 2>/dev/null
|
|
328
|
-
mv "$tmp_perf_hist" "$perf_history_file" 2>/dev/null || true
|
|
329
|
-
|
|
330
|
-
info "Test duration: ${duration_ms}s${history_count:+ (${history_count} historical samples)}"
|
|
331
|
-
return 0
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
quality_check_api_compat() {
|
|
335
|
-
info "API compatibility check..."
|
|
336
|
-
local compat_log="$ARTIFACTS_DIR/api-compat.log"
|
|
337
|
-
|
|
338
|
-
# Look for OpenAPI/Swagger specs — search beyond hardcoded paths
|
|
339
|
-
local spec_file=""
|
|
340
|
-
for candidate in openapi.json openapi.yaml swagger.json swagger.yaml api/openapi.json docs/openapi.yaml; do
|
|
341
|
-
if [[ -f "$candidate" ]]; then
|
|
342
|
-
spec_file="$candidate"
|
|
343
|
-
break
|
|
344
|
-
fi
|
|
345
|
-
done
|
|
346
|
-
# Broader search if nothing found at common paths
|
|
347
|
-
if [[ -z "$spec_file" ]]; then
|
|
348
|
-
spec_file=$(find . -maxdepth 4 \( -name "openapi*.json" -o -name "openapi*.yaml" -o -name "openapi*.yml" -o -name "swagger*.json" -o -name "swagger*.yaml" -o -name "swagger*.yml" \) -type f 2>/dev/null | head -1 || true)
|
|
349
|
-
fi
|
|
350
|
-
|
|
351
|
-
if [[ -z "$spec_file" ]]; then
|
|
352
|
-
info "No OpenAPI/Swagger spec found — skipping API compat check"
|
|
353
|
-
echo "No API spec found" > "$compat_log"
|
|
354
|
-
return 0
|
|
355
|
-
fi
|
|
356
|
-
|
|
357
|
-
# Check if spec was modified in this branch
|
|
358
|
-
local spec_changed
|
|
359
|
-
spec_changed=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null | grep -c "$(basename "$spec_file")" || true)
|
|
360
|
-
spec_changed="${spec_changed:-0}"
|
|
361
|
-
|
|
362
|
-
if [[ "$spec_changed" -eq 0 ]]; then
|
|
363
|
-
info "API spec unchanged"
|
|
364
|
-
echo "Spec unchanged" > "$compat_log"
|
|
365
|
-
return 0
|
|
366
|
-
fi
|
|
367
|
-
|
|
368
|
-
# Diff the spec against base branch
|
|
369
|
-
local old_spec new_spec
|
|
370
|
-
old_spec=$(git show "${BASE_BRANCH}:${spec_file}" 2>/dev/null || true)
|
|
371
|
-
new_spec=$(cat "$spec_file" 2>/dev/null || true)
|
|
372
|
-
|
|
373
|
-
if [[ -z "$old_spec" ]]; then
|
|
374
|
-
info "New API spec — no baseline to compare"
|
|
375
|
-
echo "New spec, no baseline" > "$compat_log"
|
|
376
|
-
return 0
|
|
377
|
-
fi
|
|
378
|
-
|
|
379
|
-
# Check for breaking changes: removed endpoints, changed methods
|
|
380
|
-
local removed_endpoints=""
|
|
381
|
-
if command -v jq >/dev/null 2>&1 && [[ "$spec_file" == *.json ]]; then
|
|
382
|
-
local old_paths new_paths
|
|
383
|
-
old_paths=$(echo "$old_spec" | jq -r '.paths | keys[]' 2>/dev/null | sort || true)
|
|
384
|
-
new_paths=$(jq -r '.paths | keys[]' "$spec_file" 2>/dev/null | sort || true)
|
|
385
|
-
removed_endpoints=$(comm -23 <(echo "$old_paths") <(echo "$new_paths") 2>/dev/null || true)
|
|
386
|
-
fi
|
|
387
|
-
|
|
388
|
-
# Enhanced schema diff: parameter changes, response schema, auth changes
|
|
389
|
-
local param_changes="" schema_changes=""
|
|
390
|
-
if command -v jq >/dev/null 2>&1 && [[ "$spec_file" == *.json ]]; then
|
|
391
|
-
# Detect parameter changes on existing endpoints
|
|
392
|
-
local common_paths
|
|
393
|
-
common_paths=$(comm -12 <(echo "$old_spec" | jq -r '.paths | keys[]' 2>/dev/null | sort) <(jq -r '.paths | keys[]' "$spec_file" 2>/dev/null | sort) 2>/dev/null || true)
|
|
394
|
-
if [[ -n "$common_paths" ]]; then
|
|
395
|
-
while IFS= read -r path; do
|
|
396
|
-
[[ -z "$path" ]] && continue
|
|
397
|
-
local old_params new_params
|
|
398
|
-
old_params=$(echo "$old_spec" | jq -r --arg p "$path" '.paths[$p] | to_entries[] | .value.parameters // [] | .[].name' 2>/dev/null | sort || true)
|
|
399
|
-
new_params=$(jq -r --arg p "$path" '.paths[$p] | to_entries[] | .value.parameters // [] | .[].name' "$spec_file" 2>/dev/null | sort || true)
|
|
400
|
-
local removed_params
|
|
401
|
-
removed_params=$(comm -23 <(echo "$old_params") <(echo "$new_params") 2>/dev/null || true)
|
|
402
|
-
[[ -n "$removed_params" ]] && param_changes="${param_changes}${path}: removed params: ${removed_params}
|
|
403
|
-
"
|
|
404
|
-
done <<< "$common_paths"
|
|
405
|
-
fi
|
|
406
|
-
fi
|
|
407
|
-
|
|
408
|
-
# Intelligence: semantic API diff for complex changes
|
|
409
|
-
local semantic_diff=""
|
|
410
|
-
if type intelligence_search_memory >/dev/null 2>&1 && command -v claude >/dev/null 2>&1; then
|
|
411
|
-
local spec_git_diff
|
|
412
|
-
spec_git_diff=$(git diff "${BASE_BRANCH}...HEAD" -- "$spec_file" 2>/dev/null | head -200 || true)
|
|
413
|
-
if [[ -n "$spec_git_diff" ]]; then
|
|
414
|
-
semantic_diff=$(claude --print --output-format text -p "Analyze this API spec diff for breaking changes. List: removed endpoints, changed parameters, altered response schemas, auth changes. Be concise.
|
|
415
|
-
|
|
416
|
-
${spec_git_diff}" --model haiku < /dev/null 2>/dev/null || true)
|
|
417
|
-
fi
|
|
418
|
-
fi
|
|
419
|
-
|
|
420
|
-
{
|
|
421
|
-
echo "Spec: $spec_file"
|
|
422
|
-
echo "Changed: yes"
|
|
423
|
-
if [[ -n "$removed_endpoints" ]]; then
|
|
424
|
-
echo "BREAKING — Removed endpoints:"
|
|
425
|
-
echo "$removed_endpoints"
|
|
426
|
-
fi
|
|
427
|
-
if [[ -n "$param_changes" ]]; then
|
|
428
|
-
echo "BREAKING — Parameter changes:"
|
|
429
|
-
echo "$param_changes"
|
|
430
|
-
fi
|
|
431
|
-
if [[ -n "$semantic_diff" ]]; then
|
|
432
|
-
echo ""
|
|
433
|
-
echo "Semantic analysis:"
|
|
434
|
-
echo "$semantic_diff"
|
|
435
|
-
fi
|
|
436
|
-
if [[ -z "$removed_endpoints" && -z "$param_changes" ]]; then
|
|
437
|
-
echo "No breaking changes detected"
|
|
438
|
-
fi
|
|
439
|
-
} > "$compat_log"
|
|
440
|
-
|
|
441
|
-
if [[ -n "$removed_endpoints" || -n "$param_changes" ]]; then
|
|
442
|
-
local issue_count=0
|
|
443
|
-
[[ -n "$removed_endpoints" ]] && issue_count=$((issue_count + $(echo "$removed_endpoints" | wc -l | xargs)))
|
|
444
|
-
[[ -n "$param_changes" ]] && issue_count=$((issue_count + $(echo "$param_changes" | grep -c '.' 2>/dev/null || true)))
|
|
445
|
-
warn "API breaking changes: ${issue_count} issue(s) found"
|
|
446
|
-
return 1
|
|
447
|
-
fi
|
|
448
|
-
|
|
449
|
-
success "API compatibility: no breaking changes"
|
|
450
|
-
return 0
|
|
451
|
-
}
|
|
452
|
-
|
|
453
|
-
quality_check_coverage() {
|
|
454
|
-
info "Coverage analysis..."
|
|
455
|
-
local test_log="$ARTIFACTS_DIR/test-results.log"
|
|
456
|
-
|
|
457
|
-
if [[ ! -f "$test_log" ]]; then
|
|
458
|
-
info "No test results — skipping coverage check"
|
|
459
|
-
return 0
|
|
460
|
-
fi
|
|
461
|
-
|
|
462
|
-
# Extract coverage percentage using shared parser
|
|
463
|
-
local coverage=""
|
|
464
|
-
coverage=$(parse_coverage_from_output "$test_log")
|
|
465
|
-
|
|
466
|
-
# Claude fallback: parse test output when no pattern matches
|
|
467
|
-
if [[ -z "$coverage" ]]; then
|
|
468
|
-
local intel_enabled_cov="false"
|
|
469
|
-
local daemon_cfg_cov="${PROJECT_ROOT}/.claude/daemon-config.json"
|
|
470
|
-
if [[ -f "$daemon_cfg_cov" ]]; then
|
|
471
|
-
intel_enabled_cov=$(jq -r '.intelligence.enabled // false' "$daemon_cfg_cov" 2>/dev/null || echo "false")
|
|
472
|
-
fi
|
|
473
|
-
if [[ "$intel_enabled_cov" == "true" ]] && command -v claude >/dev/null 2>&1; then
|
|
474
|
-
local tail_cov_output
|
|
475
|
-
tail_cov_output=$(tail -40 "$test_log" 2>/dev/null || true)
|
|
476
|
-
if [[ -n "$tail_cov_output" ]]; then
|
|
477
|
-
coverage=$(claude --print -p "Extract ONLY the overall code coverage percentage from this test output. Reply with ONLY a number (e.g. 85.5). If no coverage found, reply NONE.
|
|
478
|
-
|
|
479
|
-
$tail_cov_output" < /dev/null 2>/dev/null | grep -oE '^[0-9.]+$' | head -1 || true)
|
|
480
|
-
[[ "$coverage" == "NONE" ]] && coverage=""
|
|
481
|
-
fi
|
|
482
|
-
fi
|
|
483
|
-
fi
|
|
484
|
-
|
|
485
|
-
if [[ -z "$coverage" ]]; then
|
|
486
|
-
info "Could not extract coverage — skipping"
|
|
487
|
-
return 0
|
|
488
|
-
fi
|
|
489
|
-
|
|
490
|
-
emit_event "quality.coverage" \
|
|
491
|
-
"issue=${ISSUE_NUMBER:-0}" \
|
|
492
|
-
"coverage=$coverage"
|
|
493
|
-
|
|
494
|
-
# Check against pipeline config minimum
|
|
495
|
-
local coverage_min
|
|
496
|
-
coverage_min=$(jq -r --arg id "test" '(.stages[] | select(.id == $id) | .config.coverage_min) // 0' "$PIPELINE_CONFIG" 2>/dev/null) || true
|
|
497
|
-
[[ -z "$coverage_min" || "$coverage_min" == "null" ]] && coverage_min=0
|
|
498
|
-
|
|
499
|
-
# Adaptive baseline: read from baselines file, enforce no-regression (>= baseline - 2%)
|
|
500
|
-
local repo_hash_cov
|
|
501
|
-
repo_hash_cov=$(echo -n "$PROJECT_ROOT" | shasum -a 256 2>/dev/null | cut -c1-12 || echo "unknown")
|
|
502
|
-
local baselines_dir="${HOME}/.shipwright/baselines/${repo_hash_cov}"
|
|
503
|
-
local coverage_baseline_file="${baselines_dir}/coverage.json"
|
|
504
|
-
|
|
505
|
-
local baseline_coverage=""
|
|
506
|
-
if [[ -f "$coverage_baseline_file" ]]; then
|
|
507
|
-
baseline_coverage=$(jq -r '.baseline // empty' "$coverage_baseline_file" 2>/dev/null) || true
|
|
508
|
-
fi
|
|
509
|
-
# Fallback: try legacy memory baseline
|
|
510
|
-
if [[ -z "$baseline_coverage" ]] && [[ -x "$SCRIPT_DIR/sw-memory.sh" ]]; then
|
|
511
|
-
baseline_coverage=$(bash "$SCRIPT_DIR/sw-memory.sh" get "coverage_pct" 2>/dev/null) || true
|
|
512
|
-
fi
|
|
513
|
-
|
|
514
|
-
local dropped=false
|
|
515
|
-
if [[ -n "$baseline_coverage" && "$baseline_coverage" != "0" ]] && awk -v cur="$coverage" -v base="$baseline_coverage" 'BEGIN{exit !(base > 0)}' 2>/dev/null; then
|
|
516
|
-
# Adaptive: allow 2% regression tolerance from baseline
|
|
517
|
-
local min_allowed
|
|
518
|
-
min_allowed=$(awk -v base="$baseline_coverage" 'BEGIN{printf "%d", base - 2}')
|
|
519
|
-
if awk -v cur="$coverage" -v min="$min_allowed" 'BEGIN{exit !(cur < min)}' 2>/dev/null; then
|
|
520
|
-
warn "Coverage regression: ${baseline_coverage}% → ${coverage}% (adaptive min: ${min_allowed}%)"
|
|
521
|
-
dropped=true
|
|
522
|
-
fi
|
|
523
|
-
fi
|
|
524
|
-
|
|
525
|
-
if [[ "$coverage_min" -gt 0 ]] 2>/dev/null && awk -v cov="$coverage" -v min="$coverage_min" 'BEGIN{exit !(cov < min)}' 2>/dev/null; then
|
|
526
|
-
warn "Coverage ${coverage}% below minimum ${coverage_min}%"
|
|
527
|
-
return 1
|
|
528
|
-
fi
|
|
529
|
-
|
|
530
|
-
if $dropped; then
|
|
531
|
-
return 1
|
|
532
|
-
fi
|
|
533
|
-
|
|
534
|
-
# Update baseline on success (first run or improvement)
|
|
535
|
-
if [[ -z "$baseline_coverage" ]] || awk -v cur="$coverage" -v base="$baseline_coverage" 'BEGIN{exit !(cur >= base)}' 2>/dev/null; then
|
|
536
|
-
mkdir -p "$baselines_dir"
|
|
537
|
-
local tmp_cov_baseline
|
|
538
|
-
tmp_cov_baseline=$(mktemp "${baselines_dir}/coverage.json.XXXXXX")
|
|
539
|
-
jq -n --arg baseline "$coverage" --arg updated "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
|
540
|
-
'{baseline: ($baseline | tonumber), updated: $updated}' > "$tmp_cov_baseline" 2>/dev/null
|
|
541
|
-
mv "$tmp_cov_baseline" "$coverage_baseline_file" 2>/dev/null || true
|
|
542
|
-
fi
|
|
543
|
-
|
|
544
|
-
info "Coverage: ${coverage}%${baseline_coverage:+ (baseline: ${baseline_coverage}%)}"
|
|
545
|
-
return 0
|
|
546
|
-
}
|
|
547
|
-
|
|
548
|
-
# ─── Compound Quality Checks ──────────────────────────────────────────────
|
|
549
|
-
# Adversarial review, negative prompting, E2E validation, and DoD audit.
|
|
550
|
-
# Feeds findings back into a self-healing rebuild loop for automatic fixes.
|
|
6
|
+
# Defaults for variables normally set by sw-pipeline.sh (safe under set -u).
|
|
7
|
+
ARTIFACTS_DIR="${ARTIFACTS_DIR:-.claude/pipeline-artifacts}"
|
|
8
|
+
SCRIPT_DIR="${SCRIPT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
|
|
9
|
+
PROJECT_ROOT="${PROJECT_ROOT:-$(pwd)}"
|
|
10
|
+
BASE_BRANCH="${BASE_BRANCH:-main}"
|
|
11
|
+
PIPELINE_CONFIG="${PIPELINE_CONFIG:-}"
|
|
12
|
+
TEST_CMD="${TEST_CMD:-}"
|
|
13
|
+
|
|
14
|
+
# Source sub-modules
|
|
15
|
+
if [[ -f "${SCRIPT_DIR}/lib/pipeline-quality-gates.sh" ]]; then
|
|
16
|
+
source "${SCRIPT_DIR}/lib/pipeline-quality-gates.sh"
|
|
17
|
+
fi
|
|
18
|
+
if [[ -f "${SCRIPT_DIR}/lib/pipeline-quality-bash-compat.sh" ]]; then
|
|
19
|
+
source "${SCRIPT_DIR}/lib/pipeline-quality-bash-compat.sh"
|
|
20
|
+
fi
|
|
551
21
|
|
|
552
22
|
run_adversarial_review() {
|
|
553
23
|
local diff_content
|
|
@@ -654,6 +124,7 @@ $diff_content"
|
|
|
654
124
|
return 0
|
|
655
125
|
}
|
|
656
126
|
|
|
127
|
+
|
|
657
128
|
run_negative_prompting() {
|
|
658
129
|
local changed_files
|
|
659
130
|
changed_files=$(git diff --name-only "${BASE_BRANCH}...HEAD" 2>/dev/null || true)
|
|
@@ -740,6 +211,7 @@ run_e2e_validation() {
|
|
|
740
211
|
fi
|
|
741
212
|
}
|
|
742
213
|
|
|
214
|
+
|
|
743
215
|
run_dod_audit() {
|
|
744
216
|
local dod_file="$PROJECT_ROOT/.claude/DEFINITION-OF-DONE.md"
|
|
745
217
|
|
|
@@ -836,85 +308,7 @@ PIPELINE_ADAPTIVE_COMPLEXITY=""
|
|
|
836
308
|
# Scans modified .sh files for common bash 3.2 incompatibilities
|
|
837
309
|
# Returns: count of violations found
|
|
838
310
|
# ──────────────────────────────────────────────────────────────────────────────
|
|
839
|
-
run_bash_compat_check() {
|
|
840
|
-
local violations=0
|
|
841
|
-
local violation_details=""
|
|
842
|
-
|
|
843
|
-
# Get modified .sh files relative to base branch
|
|
844
|
-
local changed_files
|
|
845
|
-
changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" -- '*.sh' 2>/dev/null || echo "")
|
|
846
|
-
|
|
847
|
-
if [[ -z "$changed_files" ]]; then
|
|
848
|
-
echo "0"
|
|
849
|
-
return 0
|
|
850
|
-
fi
|
|
851
|
-
|
|
852
|
-
# Check each file for bash 3.2 incompatibilities
|
|
853
|
-
while IFS= read -r filepath; do
|
|
854
|
-
[[ -z "$filepath" ]] && continue
|
|
855
|
-
|
|
856
|
-
# declare -A (associative arrays; declare -a is bash 3.2 compatible)
|
|
857
|
-
local declare_a_count
|
|
858
|
-
declare_a_count=$(grep -c 'declare[[:space:]]*-A' "$filepath" 2>/dev/null || true)
|
|
859
|
-
if [[ "$declare_a_count" -gt 0 ]]; then
|
|
860
|
-
violations=$((violations + declare_a_count))
|
|
861
|
-
violation_details="${violation_details}${filepath}: declare -A (${declare_a_count} occurrences)
|
|
862
|
-
"
|
|
863
|
-
fi
|
|
864
|
-
|
|
865
|
-
# readarray or mapfile
|
|
866
|
-
local readarray_count
|
|
867
|
-
readarray_count=$(grep -c 'readarray\|mapfile' "$filepath" 2>/dev/null || true)
|
|
868
|
-
if [[ "$readarray_count" -gt 0 ]]; then
|
|
869
|
-
violations=$((violations + readarray_count))
|
|
870
|
-
violation_details="${violation_details}${filepath}: readarray/mapfile (${readarray_count} occurrences)
|
|
871
|
-
"
|
|
872
|
-
fi
|
|
873
311
|
|
|
874
|
-
# ${var,,} or ${var^^} (case conversion)
|
|
875
|
-
local case_conv_count
|
|
876
|
-
case_conv_count=$(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*,,' "$filepath" 2>/dev/null || true)
|
|
877
|
-
case_conv_count=$((case_conv_count + $(grep -c '\$\{[a-zA-Z_][a-zA-Z0-9_]*\^\^' "$filepath" 2>/dev/null || true)))
|
|
878
|
-
if [[ "$case_conv_count" -gt 0 ]]; then
|
|
879
|
-
violations=$((violations + case_conv_count))
|
|
880
|
-
violation_details="${violation_details}${filepath}: case conversion \$\{var,,\} or \$\{var\^\^\} (${case_conv_count} occurrences)
|
|
881
|
-
"
|
|
882
|
-
fi
|
|
883
|
-
|
|
884
|
-
# |& (pipe stderr to stdout in-place)
|
|
885
|
-
local pipe_ampersand_count
|
|
886
|
-
pipe_ampersand_count=$(grep -c '|&' "$filepath" 2>/dev/null || true)
|
|
887
|
-
if [[ "$pipe_ampersand_count" -gt 0 ]]; then
|
|
888
|
-
violations=$((violations + pipe_ampersand_count))
|
|
889
|
-
violation_details="${violation_details}${filepath}: |& operator (${pipe_ampersand_count} occurrences)
|
|
890
|
-
"
|
|
891
|
-
fi
|
|
892
|
-
|
|
893
|
-
# ;& or ;;& in case statements (advanced fallthrough)
|
|
894
|
-
local advanced_case_count
|
|
895
|
-
advanced_case_count=$(grep -c ';&\|;;&' "$filepath" 2>/dev/null || true)
|
|
896
|
-
if [[ "$advanced_case_count" -gt 0 ]]; then
|
|
897
|
-
violations=$((violations + advanced_case_count))
|
|
898
|
-
violation_details="${violation_details}${filepath}: advanced case ;& or ;;& (${advanced_case_count} occurrences)
|
|
899
|
-
"
|
|
900
|
-
fi
|
|
901
|
-
|
|
902
|
-
done <<< "$changed_files"
|
|
903
|
-
|
|
904
|
-
# Log details if violations found
|
|
905
|
-
if [[ "$violations" -gt 0 ]]; then
|
|
906
|
-
warn "Bash 3.2 compatibility check: ${violations} violation(s) found:"
|
|
907
|
-
echo "$violation_details" | sed 's/^/ /'
|
|
908
|
-
fi
|
|
909
|
-
|
|
910
|
-
echo "$violations"
|
|
911
|
-
}
|
|
912
|
-
|
|
913
|
-
# ──────────────────────────────────────────────────────────────────────────────
|
|
914
|
-
# Test Coverage Check
|
|
915
|
-
# Runs configured test command and extracts coverage percentage
|
|
916
|
-
# Returns: coverage percentage (0-100), or "skip" if no test command configured
|
|
917
|
-
# ──────────────────────────────────────────────────────────────────────────────
|
|
918
312
|
run_test_coverage_check() {
|
|
919
313
|
local test_cmd="${TEST_CMD:-}"
|
|
920
314
|
if [[ -z "$test_cmd" ]]; then
|
|
@@ -965,91 +359,3 @@ run_test_coverage_check() {
|
|
|
965
359
|
# Scans modified files for anti-patterns: direct echo > file to state/config files
|
|
966
360
|
# Returns: count of violations found
|
|
967
361
|
# ──────────────────────────────────────────────────────────────────────────────
|
|
968
|
-
run_atomic_write_check() {
|
|
969
|
-
local violations=0
|
|
970
|
-
local violation_details=""
|
|
971
|
-
|
|
972
|
-
# Get modified files (not just .sh — includes state/config files)
|
|
973
|
-
local changed_files
|
|
974
|
-
changed_files=$(git diff --name-only "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || echo "")
|
|
975
|
-
|
|
976
|
-
if [[ -z "$changed_files" ]]; then
|
|
977
|
-
echo "0"
|
|
978
|
-
return 0
|
|
979
|
-
fi
|
|
980
|
-
|
|
981
|
-
# Check for direct writes to state/config files (patterns that should use tmp+mv)
|
|
982
|
-
# Look for: echo "..." > state/config files
|
|
983
|
-
while IFS= read -r filepath; do
|
|
984
|
-
[[ -z "$filepath" ]] && continue
|
|
985
|
-
|
|
986
|
-
# Only check state/config/artifacts files
|
|
987
|
-
if [[ ! "$filepath" =~ (state|config|artifact|cache|db|json)$ ]]; then
|
|
988
|
-
continue
|
|
989
|
-
fi
|
|
990
|
-
|
|
991
|
-
# Check for direct redirection writes (> file) in state/config paths
|
|
992
|
-
local bad_writes
|
|
993
|
-
bad_writes=$(git show "HEAD:$filepath" 2>/dev/null | grep -c 'echo.*>' 2>/dev/null || true)
|
|
994
|
-
bad_writes="${bad_writes:-0}"
|
|
995
|
-
|
|
996
|
-
if [[ "$bad_writes" -gt 0 ]]; then
|
|
997
|
-
violations=$((violations + bad_writes))
|
|
998
|
-
violation_details="${violation_details}${filepath}: ${bad_writes} direct write(s) (should use tmp+mv)
|
|
999
|
-
"
|
|
1000
|
-
fi
|
|
1001
|
-
done <<< "$changed_files"
|
|
1002
|
-
|
|
1003
|
-
if [[ "$violations" -gt 0 ]]; then
|
|
1004
|
-
warn "Atomic write violations: ${violations} found (should use tmp file + mv pattern):"
|
|
1005
|
-
echo "$violation_details" | sed 's/^/ /'
|
|
1006
|
-
fi
|
|
1007
|
-
|
|
1008
|
-
echo "$violations"
|
|
1009
|
-
}
|
|
1010
|
-
|
|
1011
|
-
# ──────────────────────────────────────────────────────────────────────────────
|
|
1012
|
-
# New Function Test Detection
|
|
1013
|
-
# Detects new functions added in the diff but checks if corresponding tests exist
|
|
1014
|
-
# Returns: count of untested new functions
|
|
1015
|
-
# ──────────────────────────────────────────────────────────────────────────────
|
|
1016
|
-
run_new_function_test_check() {
|
|
1017
|
-
local untested_functions=0
|
|
1018
|
-
local details=""
|
|
1019
|
-
|
|
1020
|
-
# Get diff
|
|
1021
|
-
local diff_content
|
|
1022
|
-
diff_content=$(git diff "origin/${BASE_BRANCH:-main}...HEAD" 2>/dev/null || true)
|
|
1023
|
-
|
|
1024
|
-
if [[ -z "$diff_content" ]]; then
|
|
1025
|
-
echo "0"
|
|
1026
|
-
return 0
|
|
1027
|
-
fi
|
|
1028
|
-
|
|
1029
|
-
# Extract newly added function definitions (lines starting with +functionname())
|
|
1030
|
-
local new_functions
|
|
1031
|
-
new_functions=$(echo "$diff_content" | grep -E '^\+[a-zA-Z_][a-zA-Z0-9_]*\(\)' | sed 's/^\+//' | sed 's/()//' || true)
|
|
1032
|
-
|
|
1033
|
-
if [[ -z "$new_functions" ]]; then
|
|
1034
|
-
echo "0"
|
|
1035
|
-
return 0
|
|
1036
|
-
fi
|
|
1037
|
-
|
|
1038
|
-
# For each new function, check if test files were modified
|
|
1039
|
-
local test_files_modified=0
|
|
1040
|
-
test_files_modified=$(echo "$diff_content" | grep -c '\-\-\-.*test\|\.test\.\|_test\.' || true)
|
|
1041
|
-
|
|
1042
|
-
# Simple heuristic: if we have new functions but no test file modifications, warn
|
|
1043
|
-
if [[ "$test_files_modified" -eq 0 ]]; then
|
|
1044
|
-
local func_count
|
|
1045
|
-
func_count=$(echo "$new_functions" | wc -l | xargs)
|
|
1046
|
-
untested_functions="$func_count"
|
|
1047
|
-
details="Added ${func_count} new function(s) but no test file modifications detected"
|
|
1048
|
-
fi
|
|
1049
|
-
|
|
1050
|
-
if [[ "$untested_functions" -gt 0 ]]; then
|
|
1051
|
-
warn "New functions without tests: ${details}"
|
|
1052
|
-
fi
|
|
1053
|
-
|
|
1054
|
-
echo "$untested_functions"
|
|
1055
|
-
}
|