shipwright-cli 2.2.0 → 2.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +15 -16
- package/config/policy.schema.json +104 -29
- package/docs/AGI-PLATFORM-PLAN.md +11 -7
- package/docs/AGI-WHATS-NEXT.md +26 -20
- package/docs/README.md +2 -0
- package/package.json +1 -1
- package/scripts/check-version-consistency.sh +72 -0
- package/scripts/lib/daemon-adaptive.sh +610 -0
- package/scripts/lib/daemon-dispatch.sh +489 -0
- package/scripts/lib/daemon-failure.sh +387 -0
- package/scripts/lib/daemon-patrol.sh +1113 -0
- package/scripts/lib/daemon-poll.sh +1202 -0
- package/scripts/lib/daemon-state.sh +550 -0
- package/scripts/lib/daemon-triage.sh +490 -0
- package/scripts/lib/helpers.sh +81 -1
- package/scripts/lib/pipeline-detection.sh +278 -0
- package/scripts/lib/pipeline-github.sh +196 -0
- package/scripts/lib/pipeline-intelligence.sh +1706 -0
- package/scripts/lib/pipeline-quality-checks.sh +1054 -0
- package/scripts/lib/pipeline-quality.sh +11 -0
- package/scripts/lib/pipeline-stages.sh +2508 -0
- package/scripts/lib/pipeline-state.sh +529 -0
- package/scripts/sw +26 -4
- package/scripts/sw-activity.sh +1 -1
- package/scripts/sw-adaptive.sh +2 -2
- package/scripts/sw-adversarial.sh +1 -1
- package/scripts/sw-architecture-enforcer.sh +1 -1
- package/scripts/sw-auth.sh +1 -1
- package/scripts/sw-autonomous.sh +1 -1
- package/scripts/sw-changelog.sh +1 -1
- package/scripts/sw-checkpoint.sh +1 -1
- package/scripts/sw-ci.sh +1 -1
- package/scripts/sw-cleanup.sh +1 -1
- package/scripts/sw-code-review.sh +1 -1
- package/scripts/sw-connect.sh +1 -1
- package/scripts/sw-context.sh +1 -1
- package/scripts/sw-cost.sh +1 -1
- package/scripts/sw-daemon.sh +52 -4816
- package/scripts/sw-dashboard.sh +1 -1
- package/scripts/sw-db.sh +1 -1
- package/scripts/sw-decompose.sh +1 -1
- package/scripts/sw-deps.sh +1 -1
- package/scripts/sw-developer-simulation.sh +1 -1
- package/scripts/sw-discovery.sh +1 -1
- package/scripts/sw-doc-fleet.sh +1 -1
- package/scripts/sw-docs-agent.sh +1 -1
- package/scripts/sw-docs.sh +1 -1
- package/scripts/sw-doctor.sh +42 -1
- package/scripts/sw-dora.sh +1 -1
- package/scripts/sw-durable.sh +1 -1
- package/scripts/sw-e2e-orchestrator.sh +1 -1
- package/scripts/sw-eventbus.sh +1 -1
- package/scripts/sw-feedback.sh +1 -1
- package/scripts/sw-fix.sh +1 -1
- package/scripts/sw-fleet-discover.sh +1 -1
- package/scripts/sw-fleet-viz.sh +3 -3
- package/scripts/sw-fleet.sh +1 -1
- package/scripts/sw-github-app.sh +1 -1
- package/scripts/sw-github-checks.sh +1 -1
- package/scripts/sw-github-deploy.sh +1 -1
- package/scripts/sw-github-graphql.sh +1 -1
- package/scripts/sw-guild.sh +1 -1
- package/scripts/sw-heartbeat.sh +1 -1
- package/scripts/sw-hygiene.sh +1 -1
- package/scripts/sw-incident.sh +1 -1
- package/scripts/sw-init.sh +1 -1
- package/scripts/sw-instrument.sh +1 -1
- package/scripts/sw-intelligence.sh +1 -1
- package/scripts/sw-jira.sh +1 -1
- package/scripts/sw-launchd.sh +1 -1
- package/scripts/sw-linear.sh +1 -1
- package/scripts/sw-logs.sh +1 -1
- package/scripts/sw-loop.sh +1 -1
- package/scripts/sw-memory.sh +1 -1
- package/scripts/sw-mission-control.sh +1 -1
- package/scripts/sw-model-router.sh +1 -1
- package/scripts/sw-otel.sh +4 -4
- package/scripts/sw-oversight.sh +1 -1
- package/scripts/sw-pipeline-composer.sh +1 -1
- package/scripts/sw-pipeline-vitals.sh +1 -1
- package/scripts/sw-pipeline.sh +23 -56
- package/scripts/sw-pipeline.sh.mock +7 -0
- package/scripts/sw-pm.sh +1 -1
- package/scripts/sw-pr-lifecycle.sh +1 -1
- package/scripts/sw-predictive.sh +1 -1
- package/scripts/sw-prep.sh +1 -1
- package/scripts/sw-ps.sh +1 -1
- package/scripts/sw-public-dashboard.sh +1 -1
- package/scripts/sw-quality.sh +1 -1
- package/scripts/sw-reaper.sh +1 -1
- package/scripts/sw-recruit.sh +9 -1
- package/scripts/sw-regression.sh +1 -1
- package/scripts/sw-release-manager.sh +1 -1
- package/scripts/sw-release.sh +1 -1
- package/scripts/sw-remote.sh +1 -1
- package/scripts/sw-replay.sh +1 -1
- package/scripts/sw-retro.sh +1 -1
- package/scripts/sw-scale.sh +8 -5
- package/scripts/sw-security-audit.sh +1 -1
- package/scripts/sw-self-optimize.sh +158 -7
- package/scripts/sw-session.sh +1 -1
- package/scripts/sw-setup.sh +1 -1
- package/scripts/sw-standup.sh +3 -3
- package/scripts/sw-status.sh +1 -1
- package/scripts/sw-strategic.sh +1 -1
- package/scripts/sw-stream.sh +8 -2
- package/scripts/sw-swarm.sh +7 -10
- package/scripts/sw-team-stages.sh +1 -1
- package/scripts/sw-templates.sh +1 -1
- package/scripts/sw-testgen.sh +1 -1
- package/scripts/sw-tmux-pipeline.sh +1 -1
- package/scripts/sw-tmux.sh +1 -1
- package/scripts/sw-trace.sh +1 -1
- package/scripts/sw-tracker.sh +24 -6
- package/scripts/sw-triage.sh +1 -1
- package/scripts/sw-upgrade.sh +1 -1
- package/scripts/sw-ux.sh +1 -1
- package/scripts/sw-webhook.sh +1 -1
- package/scripts/sw-widgets.sh +1 -1
- package/scripts/sw-worktree.sh +1 -1
|
@@ -0,0 +1,1113 @@
|
|
|
1
|
+
# daemon-patrol.sh — Patrol and patrol_* (for sw-daemon.sh)
|
|
2
|
+
# Source from sw-daemon.sh. Requires state, helpers.
|
|
3
|
+
[[ -n "${_DAEMON_PATROL_LOADED:-}" ]] && return 0
|
|
4
|
+
_DAEMON_PATROL_LOADED=1
|
|
5
|
+
|
|
6
|
+
patrol_build_labels() {
|
|
7
|
+
local check_label="$1"
|
|
8
|
+
local labels="${PATROL_LABEL},${check_label}"
|
|
9
|
+
if [[ "$PATROL_AUTO_WATCH" == "true" && -n "${WATCH_LABEL:-}" ]]; then
|
|
10
|
+
labels="${labels},${WATCH_LABEL}"
|
|
11
|
+
fi
|
|
12
|
+
echo "$labels"
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
# ─── Proactive Patrol Mode ───────────────────────────────────────────────────
|
|
16
|
+
|
|
17
|
+
daemon_patrol() {
|
|
18
|
+
local once=false
|
|
19
|
+
local dry_run="$PATROL_DRY_RUN"
|
|
20
|
+
|
|
21
|
+
while [[ $# -gt 0 ]]; do
|
|
22
|
+
case "$1" in
|
|
23
|
+
--once) once=true; shift ;;
|
|
24
|
+
--dry-run) dry_run=true; shift ;;
|
|
25
|
+
*) shift ;;
|
|
26
|
+
esac
|
|
27
|
+
done
|
|
28
|
+
|
|
29
|
+
echo -e "${PURPLE}${BOLD}━━━ Codebase Patrol ━━━${RESET}"
|
|
30
|
+
echo ""
|
|
31
|
+
|
|
32
|
+
if [[ "$dry_run" == "true" ]]; then
|
|
33
|
+
echo -e " ${YELLOW}DRY RUN${RESET} — findings will be reported but no issues created"
|
|
34
|
+
echo ""
|
|
35
|
+
fi
|
|
36
|
+
|
|
37
|
+
emit_event "patrol.started" "dry_run=$dry_run"
|
|
38
|
+
|
|
39
|
+
local total_findings=0
|
|
40
|
+
local issues_created=0
|
|
41
|
+
|
|
42
|
+
# ── 1. Dependency Security Audit ──
|
|
43
|
+
patrol_security_audit() {
|
|
44
|
+
daemon_log INFO "Patrol: running dependency security audit"
|
|
45
|
+
local findings=0
|
|
46
|
+
|
|
47
|
+
# npm audit
|
|
48
|
+
if [[ -f "package.json" ]] && command -v npm &>/dev/null; then
|
|
49
|
+
local audit_json
|
|
50
|
+
audit_json=$(npm audit --json 2>/dev/null || true)
|
|
51
|
+
if [[ -n "$audit_json" ]]; then
|
|
52
|
+
while IFS= read -r vuln; do
|
|
53
|
+
local severity name advisory_url title
|
|
54
|
+
severity=$(echo "$vuln" | jq -r '.severity // "unknown"')
|
|
55
|
+
name=$(echo "$vuln" | jq -r '.name // "unknown"')
|
|
56
|
+
advisory_url=$(echo "$vuln" | jq -r '.url // ""')
|
|
57
|
+
title=$(echo "$vuln" | jq -r '.title // "vulnerability"')
|
|
58
|
+
|
|
59
|
+
# Only report critical/high
|
|
60
|
+
if [[ "$severity" != "critical" ]] && [[ "$severity" != "high" ]]; then
|
|
61
|
+
continue
|
|
62
|
+
fi
|
|
63
|
+
|
|
64
|
+
findings=$((findings + 1))
|
|
65
|
+
emit_event "patrol.finding" "check=security" "severity=$severity" "package=$name"
|
|
66
|
+
|
|
67
|
+
# Check if issue already exists
|
|
68
|
+
if [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
69
|
+
local existing
|
|
70
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "security" \
|
|
71
|
+
--search "Security: $name" --json number -q 'length' 2>/dev/null || echo "0")
|
|
72
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
73
|
+
gh issue create \
|
|
74
|
+
--title "Security: ${title} in ${name}" \
|
|
75
|
+
--body "## Dependency Security Finding
|
|
76
|
+
|
|
77
|
+
| Field | Value |
|
|
78
|
+
|-------|-------|
|
|
79
|
+
| Package | \`${name}\` |
|
|
80
|
+
| Severity | **${severity}** |
|
|
81
|
+
| Advisory | ${advisory_url} |
|
|
82
|
+
| Found by | Shipwright patrol |
|
|
83
|
+
| Date | $(now_iso) |
|
|
84
|
+
|
|
85
|
+
Auto-detected by \`shipwright daemon patrol\`." \
|
|
86
|
+
--label "$(patrol_build_labels "security")" 2>/dev/null || true
|
|
87
|
+
issues_created=$((issues_created + 1))
|
|
88
|
+
emit_event "patrol.issue_created" "check=security" "package=$name"
|
|
89
|
+
fi
|
|
90
|
+
else
|
|
91
|
+
echo -e " ${RED}●${RESET} ${BOLD}${severity}${RESET}: ${title} in ${CYAN}${name}${RESET}"
|
|
92
|
+
fi
|
|
93
|
+
done < <(echo "$audit_json" | jq -c '.vulnerabilities | to_entries[] | .value' 2>/dev/null)
|
|
94
|
+
fi
|
|
95
|
+
fi
|
|
96
|
+
|
|
97
|
+
# pip-audit
|
|
98
|
+
if [[ -f "requirements.txt" ]] && command -v pip-audit &>/dev/null; then
|
|
99
|
+
local pip_json
|
|
100
|
+
pip_json=$(pip-audit --format=json 2>/dev/null || true)
|
|
101
|
+
if [[ -n "$pip_json" ]]; then
|
|
102
|
+
local vuln_count
|
|
103
|
+
vuln_count=$(echo "$pip_json" | jq '[.dependencies[] | select(.vulns | length > 0)] | length' 2>/dev/null || echo "0")
|
|
104
|
+
findings=$((findings + ${vuln_count:-0}))
|
|
105
|
+
fi
|
|
106
|
+
fi
|
|
107
|
+
|
|
108
|
+
# cargo audit
|
|
109
|
+
if [[ -f "Cargo.toml" ]] && command -v cargo-audit &>/dev/null; then
|
|
110
|
+
local cargo_json
|
|
111
|
+
cargo_json=$(cargo audit --json 2>/dev/null || true)
|
|
112
|
+
if [[ -n "$cargo_json" ]]; then
|
|
113
|
+
local vuln_count
|
|
114
|
+
vuln_count=$(echo "$cargo_json" | jq '.vulnerabilities.found' 2>/dev/null || echo "0")
|
|
115
|
+
findings=$((findings + ${vuln_count:-0}))
|
|
116
|
+
fi
|
|
117
|
+
fi
|
|
118
|
+
|
|
119
|
+
# Enrich with GitHub security alerts
|
|
120
|
+
if type gh_security_alerts &>/dev/null 2>&1 && [[ "${NO_GITHUB:-false}" != "true" ]]; then
|
|
121
|
+
if type _gh_detect_repo &>/dev/null 2>&1; then
|
|
122
|
+
_gh_detect_repo 2>/dev/null || true
|
|
123
|
+
fi
|
|
124
|
+
local gh_owner="${GH_OWNER:-}" gh_repo="${GH_REPO:-}"
|
|
125
|
+
if [[ -n "$gh_owner" && -n "$gh_repo" ]]; then
|
|
126
|
+
local gh_alerts
|
|
127
|
+
gh_alerts=$(gh_security_alerts "$gh_owner" "$gh_repo" 2>/dev/null || echo "[]")
|
|
128
|
+
local gh_alert_count
|
|
129
|
+
gh_alert_count=$(echo "$gh_alerts" | jq 'length' 2>/dev/null || echo "0")
|
|
130
|
+
if [[ "${gh_alert_count:-0}" -gt 0 ]]; then
|
|
131
|
+
daemon_log WARN "Patrol: $gh_alert_count GitHub security alert(s) found"
|
|
132
|
+
findings=$((findings + gh_alert_count))
|
|
133
|
+
fi
|
|
134
|
+
fi
|
|
135
|
+
fi
|
|
136
|
+
|
|
137
|
+
# Enrich with GitHub Dependabot alerts
|
|
138
|
+
if type gh_dependabot_alerts &>/dev/null 2>&1 && [[ "${NO_GITHUB:-false}" != "true" ]]; then
|
|
139
|
+
local gh_owner="${GH_OWNER:-}" gh_repo="${GH_REPO:-}"
|
|
140
|
+
if [[ -n "$gh_owner" && -n "$gh_repo" ]]; then
|
|
141
|
+
local dep_alerts
|
|
142
|
+
dep_alerts=$(gh_dependabot_alerts "$gh_owner" "$gh_repo" 2>/dev/null || echo "[]")
|
|
143
|
+
local dep_alert_count
|
|
144
|
+
dep_alert_count=$(echo "$dep_alerts" | jq 'length' 2>/dev/null || echo "0")
|
|
145
|
+
if [[ "${dep_alert_count:-0}" -gt 0 ]]; then
|
|
146
|
+
daemon_log WARN "Patrol: $dep_alert_count Dependabot alert(s) found"
|
|
147
|
+
findings=$((findings + dep_alert_count))
|
|
148
|
+
fi
|
|
149
|
+
fi
|
|
150
|
+
fi
|
|
151
|
+
|
|
152
|
+
total_findings=$((total_findings + findings))
|
|
153
|
+
if [[ "$findings" -gt 0 ]]; then
|
|
154
|
+
daemon_log INFO "Patrol: found ${findings} security vulnerability(ies)"
|
|
155
|
+
else
|
|
156
|
+
daemon_log INFO "Patrol: no security vulnerabilities found"
|
|
157
|
+
fi
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
# ── 2. Stale Dependency Check ──
|
|
161
|
+
patrol_stale_dependencies() {
|
|
162
|
+
daemon_log INFO "Patrol: checking for stale dependencies"
|
|
163
|
+
local findings=0
|
|
164
|
+
|
|
165
|
+
if [[ -f "package.json" ]] && command -v npm &>/dev/null; then
|
|
166
|
+
local outdated_json
|
|
167
|
+
outdated_json=$(npm outdated --json 2>/dev/null || true)
|
|
168
|
+
if [[ -n "$outdated_json" ]] && [[ "$outdated_json" != "{}" ]]; then
|
|
169
|
+
local stale_packages=""
|
|
170
|
+
while IFS= read -r pkg; do
|
|
171
|
+
local name current latest current_major latest_major
|
|
172
|
+
name=$(echo "$pkg" | jq -r '.key')
|
|
173
|
+
current=$(echo "$pkg" | jq -r '.value.current // "0.0.0"')
|
|
174
|
+
latest=$(echo "$pkg" | jq -r '.value.latest // "0.0.0"')
|
|
175
|
+
current_major="${current%%.*}"
|
|
176
|
+
latest_major="${latest%%.*}"
|
|
177
|
+
|
|
178
|
+
# Only flag if > 2 major versions behind
|
|
179
|
+
if [[ "$latest_major" =~ ^[0-9]+$ ]] && [[ "$current_major" =~ ^[0-9]+$ ]]; then
|
|
180
|
+
local diff=$((latest_major - current_major))
|
|
181
|
+
if [[ "$diff" -ge 2 ]]; then
|
|
182
|
+
findings=$((findings + 1))
|
|
183
|
+
stale_packages="${stale_packages}\n- \`${name}\`: ${current} → ${latest} (${diff} major versions behind)"
|
|
184
|
+
emit_event "patrol.finding" "check=stale_dependency" "package=$name" "current=$current" "latest=$latest"
|
|
185
|
+
|
|
186
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
187
|
+
echo -e " ${YELLOW}●${RESET} ${CYAN}${name}${RESET}: ${current} → ${latest} (${diff} major versions behind)"
|
|
188
|
+
fi
|
|
189
|
+
fi
|
|
190
|
+
fi
|
|
191
|
+
done < <(echo "$outdated_json" | jq -c 'to_entries[]' 2>/dev/null)
|
|
192
|
+
|
|
193
|
+
# Create a single issue for all stale deps
|
|
194
|
+
if [[ "$findings" -gt 0 ]] && [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
195
|
+
local existing
|
|
196
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "dependencies" \
|
|
197
|
+
--search "Stale dependencies" --json number -q 'length' 2>/dev/null || echo "0")
|
|
198
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
199
|
+
gh issue create \
|
|
200
|
+
--title "Update ${findings} stale dependencies" \
|
|
201
|
+
--body "## Stale Dependencies
|
|
202
|
+
|
|
203
|
+
The following packages are 2+ major versions behind:
|
|
204
|
+
$(echo -e "$stale_packages")
|
|
205
|
+
|
|
206
|
+
Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
207
|
+
--label "$(patrol_build_labels "dependencies")" 2>/dev/null || true
|
|
208
|
+
issues_created=$((issues_created + 1))
|
|
209
|
+
emit_event "patrol.issue_created" "check=stale_dependency" "count=$findings"
|
|
210
|
+
fi
|
|
211
|
+
fi
|
|
212
|
+
fi
|
|
213
|
+
fi
|
|
214
|
+
|
|
215
|
+
total_findings=$((total_findings + findings))
|
|
216
|
+
daemon_log INFO "Patrol: found ${findings} stale dependency(ies)"
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
# ── 3. Dead Code Detection ──
|
|
220
|
+
patrol_dead_code() {
|
|
221
|
+
daemon_log INFO "Patrol: scanning for dead code"
|
|
222
|
+
local findings=0
|
|
223
|
+
local dead_files=""
|
|
224
|
+
|
|
225
|
+
# For JS/TS projects: find exported files not imported anywhere
|
|
226
|
+
if [[ -f "package.json" ]] || [[ -f "tsconfig.json" ]]; then
|
|
227
|
+
local src_dirs=("src" "lib" "app")
|
|
228
|
+
for dir in "${src_dirs[@]}"; do
|
|
229
|
+
[[ -d "$dir" ]] || continue
|
|
230
|
+
while IFS= read -r file; do
|
|
231
|
+
local basename_no_ext
|
|
232
|
+
basename_no_ext=$(basename "$file" | sed 's/\.\(ts\|js\|tsx\|jsx\)$//')
|
|
233
|
+
# Skip index files and test files
|
|
234
|
+
[[ "$basename_no_ext" == "index" ]] && continue
|
|
235
|
+
[[ "$basename_no_ext" =~ \.(test|spec)$ ]] && continue
|
|
236
|
+
|
|
237
|
+
# Check if this file is imported anywhere
|
|
238
|
+
local import_count
|
|
239
|
+
import_count=$(grep -rlE "(from|require).*['\"].*${basename_no_ext}['\"]" \
|
|
240
|
+
--include="*.ts" --include="*.js" --include="*.tsx" --include="*.jsx" \
|
|
241
|
+
. 2>/dev/null | grep -cv "$file" || true)
|
|
242
|
+
import_count=${import_count:-0}
|
|
243
|
+
|
|
244
|
+
if [[ "$import_count" -eq 0 ]]; then
|
|
245
|
+
findings=$((findings + 1))
|
|
246
|
+
dead_files="${dead_files}\n- \`${file}\`"
|
|
247
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
248
|
+
echo -e " ${DIM}●${RESET} ${file} ${DIM}(not imported)${RESET}"
|
|
249
|
+
fi
|
|
250
|
+
fi
|
|
251
|
+
done < <(find "$dir" -type f \( -name "*.ts" -o -name "*.js" -o -name "*.tsx" -o -name "*.jsx" \) \
|
|
252
|
+
! -name "*.test.*" ! -name "*.spec.*" ! -name "*.d.ts" 2>/dev/null)
|
|
253
|
+
done
|
|
254
|
+
fi
|
|
255
|
+
|
|
256
|
+
if [[ "$findings" -gt 0 ]] && [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
257
|
+
local existing
|
|
258
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "tech-debt" \
|
|
259
|
+
--search "Dead code candidates" --json number -q 'length' 2>/dev/null || echo "0")
|
|
260
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
261
|
+
gh issue create \
|
|
262
|
+
--title "Dead code candidates (${findings} files)" \
|
|
263
|
+
--body "## Dead Code Detection
|
|
264
|
+
|
|
265
|
+
These files appear to have no importers — they may be unused:
|
|
266
|
+
$(echo -e "$dead_files")
|
|
267
|
+
|
|
268
|
+
> **Note:** Some files may be entry points or dynamically loaded. Verify before removing.
|
|
269
|
+
|
|
270
|
+
Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
271
|
+
--label "$(patrol_build_labels "tech-debt")" 2>/dev/null || true
|
|
272
|
+
issues_created=$((issues_created + 1))
|
|
273
|
+
emit_event "patrol.issue_created" "check=dead_code" "count=$findings"
|
|
274
|
+
fi
|
|
275
|
+
fi
|
|
276
|
+
|
|
277
|
+
total_findings=$((total_findings + findings))
|
|
278
|
+
daemon_log INFO "Patrol: found ${findings} dead code candidate(s)"
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
# ── 4. Test Coverage Gaps ──
|
|
282
|
+
patrol_coverage_gaps() {
|
|
283
|
+
daemon_log INFO "Patrol: checking test coverage gaps"
|
|
284
|
+
local findings=0
|
|
285
|
+
local low_cov_files=""
|
|
286
|
+
|
|
287
|
+
# Look for coverage reports from last pipeline run
|
|
288
|
+
local coverage_file=""
|
|
289
|
+
for candidate in \
|
|
290
|
+
".claude/pipeline-artifacts/coverage/coverage-summary.json" \
|
|
291
|
+
"coverage/coverage-summary.json" \
|
|
292
|
+
".coverage/coverage-summary.json"; do
|
|
293
|
+
if [[ -f "$candidate" ]]; then
|
|
294
|
+
coverage_file="$candidate"
|
|
295
|
+
break
|
|
296
|
+
fi
|
|
297
|
+
done
|
|
298
|
+
|
|
299
|
+
if [[ -z "$coverage_file" ]]; then
|
|
300
|
+
daemon_log INFO "Patrol: no coverage report found — skipping"
|
|
301
|
+
return
|
|
302
|
+
fi
|
|
303
|
+
|
|
304
|
+
while IFS= read -r entry; do
|
|
305
|
+
local file_path line_pct
|
|
306
|
+
file_path=$(echo "$entry" | jq -r '.key')
|
|
307
|
+
line_pct=$(echo "$entry" | jq -r '.value.lines.pct // 100')
|
|
308
|
+
|
|
309
|
+
# Skip total and well-covered files
|
|
310
|
+
[[ "$file_path" == "total" ]] && continue
|
|
311
|
+
if awk "BEGIN{exit !($line_pct >= 50)}" 2>/dev/null; then continue; fi
|
|
312
|
+
|
|
313
|
+
findings=$((findings + 1))
|
|
314
|
+
low_cov_files="${low_cov_files}\n- \`${file_path}\`: ${line_pct}% line coverage"
|
|
315
|
+
|
|
316
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
317
|
+
echo -e " ${YELLOW}●${RESET} ${file_path}: ${line_pct}% coverage"
|
|
318
|
+
fi
|
|
319
|
+
done < <(jq -c 'to_entries[]' "$coverage_file" 2>/dev/null)
|
|
320
|
+
|
|
321
|
+
if [[ "$findings" -gt 0 ]] && [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
322
|
+
local existing
|
|
323
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "testing" \
|
|
324
|
+
--search "Test coverage gaps" --json number -q 'length' 2>/dev/null || echo "0")
|
|
325
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
326
|
+
gh issue create \
|
|
327
|
+
--title "Improve test coverage for ${findings} file(s)" \
|
|
328
|
+
--body "## Test Coverage Gaps
|
|
329
|
+
|
|
330
|
+
These files have < 50% line coverage:
|
|
331
|
+
$(echo -e "$low_cov_files")
|
|
332
|
+
|
|
333
|
+
Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
334
|
+
--label "$(patrol_build_labels "testing")" 2>/dev/null || true
|
|
335
|
+
issues_created=$((issues_created + 1))
|
|
336
|
+
emit_event "patrol.issue_created" "check=coverage" "count=$findings"
|
|
337
|
+
fi
|
|
338
|
+
fi
|
|
339
|
+
|
|
340
|
+
total_findings=$((total_findings + findings))
|
|
341
|
+
daemon_log INFO "Patrol: found ${findings} low-coverage file(s)"
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
# ── 5. Documentation Staleness ──
|
|
345
|
+
patrol_doc_staleness() {
|
|
346
|
+
daemon_log INFO "Patrol: checking documentation staleness"
|
|
347
|
+
local findings=0
|
|
348
|
+
local stale_docs=""
|
|
349
|
+
|
|
350
|
+
# Check if README is older than recent source changes
|
|
351
|
+
if [[ -f "README.md" ]]; then
|
|
352
|
+
local readme_epoch src_epoch
|
|
353
|
+
readme_epoch=$(git log -1 --format=%ct -- README.md 2>/dev/null || echo "0")
|
|
354
|
+
src_epoch=$(git log -1 --format=%ct -- "*.ts" "*.js" "*.py" "*.go" "*.rs" "*.sh" 2>/dev/null || echo "0")
|
|
355
|
+
|
|
356
|
+
if [[ "$src_epoch" -gt 0 ]] && [[ "$readme_epoch" -gt 0 ]]; then
|
|
357
|
+
local drift=$((src_epoch - readme_epoch))
|
|
358
|
+
# Flag if README is > 30 days behind source
|
|
359
|
+
if [[ "$drift" -gt 2592000 ]]; then
|
|
360
|
+
findings=$((findings + 1))
|
|
361
|
+
local days_behind=$((drift / 86400))
|
|
362
|
+
stale_docs="${stale_docs}\n- \`README.md\`: ${days_behind} days behind source code"
|
|
363
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
364
|
+
echo -e " ${YELLOW}●${RESET} README.md is ${days_behind} days behind source code"
|
|
365
|
+
fi
|
|
366
|
+
fi
|
|
367
|
+
fi
|
|
368
|
+
fi
|
|
369
|
+
|
|
370
|
+
# Check if CHANGELOG is behind latest tag
|
|
371
|
+
if [[ -f "CHANGELOG.md" ]]; then
|
|
372
|
+
local latest_tag changelog_epoch tag_epoch
|
|
373
|
+
latest_tag=$(git describe --tags --abbrev=0 2>/dev/null || true)
|
|
374
|
+
if [[ -n "$latest_tag" ]]; then
|
|
375
|
+
changelog_epoch=$(git log -1 --format=%ct -- CHANGELOG.md 2>/dev/null || echo "0")
|
|
376
|
+
tag_epoch=$(git log -1 --format=%ct "$latest_tag" 2>/dev/null || echo "0")
|
|
377
|
+
if [[ "$tag_epoch" -gt "$changelog_epoch" ]] && [[ "$changelog_epoch" -gt 0 ]]; then
|
|
378
|
+
findings=$((findings + 1))
|
|
379
|
+
stale_docs="${stale_docs}\n- \`CHANGELOG.md\`: not updated since tag \`${latest_tag}\`"
|
|
380
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
381
|
+
echo -e " ${YELLOW}●${RESET} CHANGELOG.md not updated since ${latest_tag}"
|
|
382
|
+
fi
|
|
383
|
+
fi
|
|
384
|
+
fi
|
|
385
|
+
fi
|
|
386
|
+
|
|
387
|
+
# Check CLAUDE.md staleness (same pattern as README)
|
|
388
|
+
if [[ -f ".claude/CLAUDE.md" ]]; then
|
|
389
|
+
local claudemd_epoch claudemd_src_epoch
|
|
390
|
+
claudemd_src_epoch=$(git log -1 --format=%ct -- "*.ts" "*.js" "*.py" "*.go" "*.rs" "*.sh" 2>/dev/null || echo "0")
|
|
391
|
+
claudemd_epoch=$(git log -1 --format=%ct -- ".claude/CLAUDE.md" 2>/dev/null || echo "0")
|
|
392
|
+
if [[ "$claudemd_src_epoch" -gt 0 ]] && [[ "$claudemd_epoch" -gt 0 ]]; then
|
|
393
|
+
local claude_drift=$((claudemd_src_epoch - claudemd_epoch))
|
|
394
|
+
if [[ "$claude_drift" -gt 2592000 ]]; then
|
|
395
|
+
findings=$((findings + 1))
|
|
396
|
+
local claude_days_behind=$((claude_drift / 86400))
|
|
397
|
+
stale_docs="${stale_docs}\n- \`.claude/CLAUDE.md\`: ${claude_days_behind} days behind source code"
|
|
398
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
399
|
+
echo -e " ${YELLOW}●${RESET} CLAUDE.md is ${claude_days_behind} days behind source code"
|
|
400
|
+
fi
|
|
401
|
+
fi
|
|
402
|
+
fi
|
|
403
|
+
fi
|
|
404
|
+
|
|
405
|
+
# Check AUTO section freshness (if sw-docs.sh available)
|
|
406
|
+
if [[ -x "$SCRIPT_DIR/sw-docs.sh" ]]; then
|
|
407
|
+
local docs_stale=false
|
|
408
|
+
bash "$SCRIPT_DIR/sw-docs.sh" check >/dev/null 2>&1 || docs_stale=true
|
|
409
|
+
if [[ "$docs_stale" == "true" ]]; then
|
|
410
|
+
findings=$((findings + 1))
|
|
411
|
+
stale_docs="${stale_docs}\n- AUTO sections: some documentation sections are stale"
|
|
412
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
413
|
+
echo -e " ${YELLOW}●${RESET} AUTO documentation sections are stale"
|
|
414
|
+
fi
|
|
415
|
+
# Auto-sync if not dry run
|
|
416
|
+
if [[ "$dry_run" != "true" ]] && [[ "$NO_GITHUB" != "true" ]]; then
|
|
417
|
+
daemon_log INFO "Auto-syncing stale documentation sections"
|
|
418
|
+
bash "$SCRIPT_DIR/sw-docs.sh" sync 2>/dev/null || true
|
|
419
|
+
if ! git diff --quiet -- '*.md' 2>/dev/null; then
|
|
420
|
+
git add -A '*.md' 2>/dev/null || true
|
|
421
|
+
git commit -m "docs: auto-sync stale documentation sections" 2>/dev/null || true
|
|
422
|
+
fi
|
|
423
|
+
fi
|
|
424
|
+
fi
|
|
425
|
+
fi
|
|
426
|
+
|
|
427
|
+
if [[ "$findings" -gt 0 ]] && [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
428
|
+
local existing
|
|
429
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "documentation" \
|
|
430
|
+
--search "Stale documentation" --json number -q 'length' 2>/dev/null || echo "0")
|
|
431
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
432
|
+
gh issue create \
|
|
433
|
+
--title "Stale documentation detected" \
|
|
434
|
+
--body "## Documentation Staleness
|
|
435
|
+
|
|
436
|
+
The following docs may need updating:
|
|
437
|
+
$(echo -e "$stale_docs")
|
|
438
|
+
|
|
439
|
+
Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
440
|
+
--label "$(patrol_build_labels "documentation")" 2>/dev/null || true
|
|
441
|
+
issues_created=$((issues_created + 1))
|
|
442
|
+
emit_event "patrol.issue_created" "check=documentation" "count=$findings"
|
|
443
|
+
fi
|
|
444
|
+
fi
|
|
445
|
+
|
|
446
|
+
total_findings=$((total_findings + findings))
|
|
447
|
+
daemon_log INFO "Patrol: found ${findings} stale documentation item(s)"
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
# ── 6. Performance Baseline ──
|
|
451
|
+
patrol_performance_baseline() {
|
|
452
|
+
daemon_log INFO "Patrol: checking performance baseline"
|
|
453
|
+
|
|
454
|
+
# Look for test timing in recent pipeline events
|
|
455
|
+
if [[ ! -f "$EVENTS_FILE" ]]; then
|
|
456
|
+
daemon_log INFO "Patrol: no events file — skipping performance check"
|
|
457
|
+
return
|
|
458
|
+
fi
|
|
459
|
+
|
|
460
|
+
local baseline_file="$DAEMON_DIR/patrol-perf-baseline.json"
|
|
461
|
+
local recent_test_dur
|
|
462
|
+
recent_test_dur=$(tail -500 "$EVENTS_FILE" | \
|
|
463
|
+
jq -s '[.[] | select(.type == "stage.completed" and .stage == "test") | .duration_s] | if length > 0 then .[-1] else null end' \
|
|
464
|
+
2>/dev/null || echo "null")
|
|
465
|
+
|
|
466
|
+
if [[ "$recent_test_dur" == "null" ]] || [[ -z "$recent_test_dur" ]]; then
|
|
467
|
+
daemon_log INFO "Patrol: no recent test duration found — skipping"
|
|
468
|
+
return
|
|
469
|
+
fi
|
|
470
|
+
|
|
471
|
+
if [[ -f "$baseline_file" ]]; then
|
|
472
|
+
local baseline_dur
|
|
473
|
+
baseline_dur=$(jq -r '.test_duration_s // 0' "$baseline_file" 2>/dev/null || echo "0")
|
|
474
|
+
if [[ "$baseline_dur" -gt 0 ]]; then
|
|
475
|
+
local threshold=$(( baseline_dur * 130 / 100 )) # 30% slower
|
|
476
|
+
if [[ "$recent_test_dur" -gt "$threshold" ]]; then
|
|
477
|
+
total_findings=$((total_findings + 1))
|
|
478
|
+
local pct_slower=$(( (recent_test_dur - baseline_dur) * 100 / baseline_dur ))
|
|
479
|
+
emit_event "patrol.finding" "check=performance" "baseline=${baseline_dur}s" "current=${recent_test_dur}s" "regression=${pct_slower}%"
|
|
480
|
+
|
|
481
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
482
|
+
echo -e " ${RED}●${RESET} Test suite ${pct_slower}% slower than baseline (${baseline_dur}s → ${recent_test_dur}s)"
|
|
483
|
+
elif [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
484
|
+
local existing
|
|
485
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "performance" \
|
|
486
|
+
--search "Test suite performance regression" --json number -q 'length' 2>/dev/null || echo "0")
|
|
487
|
+
if [[ "${existing:-0}" -eq 0 ]]; then
|
|
488
|
+
gh issue create \
|
|
489
|
+
--title "Test suite performance regression (${pct_slower}% slower)" \
|
|
490
|
+
--body "## Performance Regression
|
|
491
|
+
|
|
492
|
+
| Metric | Value |
|
|
493
|
+
|--------|-------|
|
|
494
|
+
| Baseline | ${baseline_dur}s |
|
|
495
|
+
| Current | ${recent_test_dur}s |
|
|
496
|
+
| Regression | ${pct_slower}% |
|
|
497
|
+
|
|
498
|
+
Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
499
|
+
--label "$(patrol_build_labels "performance")" 2>/dev/null || true
|
|
500
|
+
issues_created=$((issues_created + 1))
|
|
501
|
+
emit_event "patrol.issue_created" "check=performance"
|
|
502
|
+
fi
|
|
503
|
+
fi
|
|
504
|
+
|
|
505
|
+
daemon_log WARN "Patrol: test suite ${pct_slower}% slower than baseline"
|
|
506
|
+
return
|
|
507
|
+
fi
|
|
508
|
+
fi
|
|
509
|
+
fi
|
|
510
|
+
|
|
511
|
+
# Save/update baseline
|
|
512
|
+
jq -n --argjson dur "$recent_test_dur" --arg ts "$(now_iso)" \
|
|
513
|
+
'{test_duration_s: $dur, updated_at: $ts}' > "$baseline_file"
|
|
514
|
+
daemon_log INFO "Patrol: performance baseline updated (${recent_test_dur}s)"
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
# ── 7. Recurring Failure Patterns ──
|
|
518
|
+
patrol_recurring_failures() {
|
|
519
|
+
if [[ "$PATROL_FAILURES_THRESHOLD" -le 0 ]]; then return; fi
|
|
520
|
+
daemon_log INFO "Patrol: checking recurring failure patterns"
|
|
521
|
+
local findings=0
|
|
522
|
+
|
|
523
|
+
# Source memory functions if available
|
|
524
|
+
local memory_script="$SCRIPT_DIR/sw-memory.sh"
|
|
525
|
+
if [[ ! -f "$memory_script" ]]; then
|
|
526
|
+
daemon_log INFO "Patrol: memory script not found — skipping recurring failures"
|
|
527
|
+
return
|
|
528
|
+
fi
|
|
529
|
+
|
|
530
|
+
# Get actionable failures from memory
|
|
531
|
+
# Note: sw-memory.sh runs its CLI router on source, so we must redirect
|
|
532
|
+
# the source's stdout to /dev/null and only capture the function's output
|
|
533
|
+
local failures_json
|
|
534
|
+
failures_json=$(
|
|
535
|
+
(
|
|
536
|
+
source "$memory_script" > /dev/null 2>&1 || true
|
|
537
|
+
if command -v memory_get_actionable_failures &>/dev/null; then
|
|
538
|
+
memory_get_actionable_failures "$PATROL_FAILURES_THRESHOLD"
|
|
539
|
+
else
|
|
540
|
+
echo "[]"
|
|
541
|
+
fi
|
|
542
|
+
)
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
local count
|
|
546
|
+
count=$(echo "$failures_json" | jq 'length' 2>/dev/null || echo "0")
|
|
547
|
+
if [[ "${count:-0}" -eq 0 ]]; then
|
|
548
|
+
daemon_log INFO "Patrol: no recurring failures above threshold ($PATROL_FAILURES_THRESHOLD)"
|
|
549
|
+
return
|
|
550
|
+
fi
|
|
551
|
+
|
|
552
|
+
while IFS= read -r failure; do
|
|
553
|
+
local pattern stage seen_count last_seen root_cause
|
|
554
|
+
pattern=$(echo "$failure" | jq -r '.pattern // "unknown"')
|
|
555
|
+
stage=$(echo "$failure" | jq -r '.stage // "unknown"')
|
|
556
|
+
seen_count=$(echo "$failure" | jq -r '.seen_count // 0')
|
|
557
|
+
last_seen=$(echo "$failure" | jq -r '.last_seen // "unknown"')
|
|
558
|
+
root_cause=$(echo "$failure" | jq -r '.root_cause // "Not yet identified"')
|
|
559
|
+
|
|
560
|
+
# Truncate pattern for title (first 60 chars)
|
|
561
|
+
local short_pattern
|
|
562
|
+
short_pattern=$(echo "$pattern" | cut -c1-60)
|
|
563
|
+
|
|
564
|
+
findings=$((findings + 1))
|
|
565
|
+
emit_event "patrol.finding" "check=recurring_failure" "pattern=$short_pattern" "seen_count=$seen_count"
|
|
566
|
+
|
|
567
|
+
if [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
568
|
+
# Deduplicate
|
|
569
|
+
local existing
|
|
570
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "recurring-failure" \
|
|
571
|
+
--search "Fix recurring: ${short_pattern}" --json number -q 'length' 2>/dev/null || echo "0")
|
|
572
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
573
|
+
gh issue create \
|
|
574
|
+
--title "Fix recurring: ${short_pattern}" \
|
|
575
|
+
--body "## Recurring Failure Pattern
|
|
576
|
+
|
|
577
|
+
| Field | Value |
|
|
578
|
+
|-------|-------|
|
|
579
|
+
| Stage | \`${stage}\` |
|
|
580
|
+
| Pattern | \`${pattern}\` |
|
|
581
|
+
| Seen count | **${seen_count}** |
|
|
582
|
+
| Last seen | ${last_seen} |
|
|
583
|
+
| Root cause | ${root_cause} |
|
|
584
|
+
| Found by | Shipwright patrol |
|
|
585
|
+
| Date | $(now_iso) |
|
|
586
|
+
|
|
587
|
+
### Suggested Actions
|
|
588
|
+
- Investigate the root cause in the \`${stage}\` stage
|
|
589
|
+
- Check if recent changes introduced the failure
|
|
590
|
+
- Add a targeted test to prevent regression
|
|
591
|
+
|
|
592
|
+
Auto-detected by \`shipwright daemon patrol\`." \
|
|
593
|
+
--label "$(patrol_build_labels "recurring-failure")" 2>/dev/null || true
|
|
594
|
+
issues_created=$((issues_created + 1))
|
|
595
|
+
emit_event "patrol.issue_created" "check=recurring_failure" "pattern=$short_pattern"
|
|
596
|
+
fi
|
|
597
|
+
else
|
|
598
|
+
echo -e " ${RED}●${RESET} ${BOLD}recurring${RESET}: ${short_pattern} (${seen_count}x in ${CYAN}${stage}${RESET})"
|
|
599
|
+
fi
|
|
600
|
+
done < <(echo "$failures_json" | jq -c '.[]' 2>/dev/null)
|
|
601
|
+
|
|
602
|
+
total_findings=$((total_findings + findings))
|
|
603
|
+
daemon_log INFO "Patrol: found ${findings} recurring failure pattern(s)"
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
# ── 8. DORA Metric Degradation ──
|
|
607
|
+
patrol_dora_degradation() {
|
|
608
|
+
if [[ "$PATROL_DORA_ENABLED" != "true" ]]; then return; fi
|
|
609
|
+
daemon_log INFO "Patrol: checking DORA metric degradation"
|
|
610
|
+
|
|
611
|
+
if [[ ! -f "$EVENTS_FILE" ]]; then
|
|
612
|
+
daemon_log INFO "Patrol: no events file — skipping DORA check"
|
|
613
|
+
return
|
|
614
|
+
fi
|
|
615
|
+
|
|
616
|
+
local now_e
|
|
617
|
+
now_e=$(now_epoch)
|
|
618
|
+
|
|
619
|
+
# Current 7-day window
|
|
620
|
+
local current_start=$((now_e - 604800))
|
|
621
|
+
# Previous 7-day window
|
|
622
|
+
local prev_start=$((now_e - 1209600))
|
|
623
|
+
local prev_end=$current_start
|
|
624
|
+
|
|
625
|
+
# Get events for both windows
|
|
626
|
+
local current_events prev_events
|
|
627
|
+
current_events=$(jq -s --argjson start "$current_start" \
|
|
628
|
+
'[.[] | select(.ts_epoch >= $start)]' "$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
629
|
+
prev_events=$(jq -s --argjson start "$prev_start" --argjson end "$prev_end" \
|
|
630
|
+
'[.[] | select(.ts_epoch >= $start and .ts_epoch < $end)]' "$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
631
|
+
|
|
632
|
+
# Helper: calculate DORA metrics from an event set
|
|
633
|
+
calc_dora() {
|
|
634
|
+
local events="$1"
|
|
635
|
+
local total successes failures
|
|
636
|
+
total=$(echo "$events" | jq '[.[] | select(.type == "pipeline.completed")] | length' 2>/dev/null || echo "0")
|
|
637
|
+
successes=$(echo "$events" | jq '[.[] | select(.type == "pipeline.completed" and .result == "success")] | length' 2>/dev/null || echo "0")
|
|
638
|
+
failures=$(echo "$events" | jq '[.[] | select(.type == "pipeline.completed" and .result == "failure")] | length' 2>/dev/null || echo "0")
|
|
639
|
+
|
|
640
|
+
local deploy_freq="0"
|
|
641
|
+
[[ "$total" -gt 0 ]] && deploy_freq=$(echo "$successes 7" | awk '{printf "%.1f", $1 / ($2 / 7)}')
|
|
642
|
+
|
|
643
|
+
local cfr="0"
|
|
644
|
+
[[ "$total" -gt 0 ]] && cfr=$(echo "$failures $total" | awk '{printf "%.1f", ($1 / $2) * 100}')
|
|
645
|
+
|
|
646
|
+
local cycle_time="0"
|
|
647
|
+
cycle_time=$(echo "$events" | jq '[.[] | select(.type == "pipeline.completed" and .result == "success") | .duration_s] | sort | if length > 0 then .[length/2 | floor] else 0 end' 2>/dev/null || echo "0")
|
|
648
|
+
|
|
649
|
+
echo "{\"deploy_freq\":$deploy_freq,\"cfr\":$cfr,\"cycle_time\":$cycle_time,\"total\":$total}"
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
local current_metrics prev_metrics
|
|
653
|
+
current_metrics=$(calc_dora "$current_events")
|
|
654
|
+
prev_metrics=$(calc_dora "$prev_events")
|
|
655
|
+
|
|
656
|
+
local prev_total
|
|
657
|
+
prev_total=$(echo "$prev_metrics" | jq '.total' 2>/dev/null || echo "0")
|
|
658
|
+
local current_total
|
|
659
|
+
current_total=$(echo "$current_metrics" | jq '.total' 2>/dev/null || echo "0")
|
|
660
|
+
|
|
661
|
+
# Need data in both windows to compare
|
|
662
|
+
if [[ "${prev_total:-0}" -lt 3 ]] || [[ "${current_total:-0}" -lt 3 ]]; then
|
|
663
|
+
daemon_log INFO "Patrol: insufficient data for DORA comparison (prev=$prev_total, current=$current_total)"
|
|
664
|
+
return
|
|
665
|
+
fi
|
|
666
|
+
|
|
667
|
+
# Grade each metric using dora_grade (defined in daemon_metrics, redefined here inline)
|
|
668
|
+
local_dora_grade() {
|
|
669
|
+
local metric="$1" value="$2"
|
|
670
|
+
case "$metric" in
|
|
671
|
+
deploy_freq)
|
|
672
|
+
if awk "BEGIN{exit !($value >= 7)}" 2>/dev/null; then echo "Elite"; return; fi
|
|
673
|
+
if awk "BEGIN{exit !($value >= 1)}" 2>/dev/null; then echo "High"; return; fi
|
|
674
|
+
if awk "BEGIN{exit !($value >= 0.25)}" 2>/dev/null; then echo "Medium"; return; fi
|
|
675
|
+
echo "Low" ;;
|
|
676
|
+
cfr)
|
|
677
|
+
if awk "BEGIN{exit !($value < 5)}" 2>/dev/null; then echo "Elite"; return; fi
|
|
678
|
+
if awk "BEGIN{exit !($value < 10)}" 2>/dev/null; then echo "High"; return; fi
|
|
679
|
+
if awk "BEGIN{exit !($value < 15)}" 2>/dev/null; then echo "Medium"; return; fi
|
|
680
|
+
echo "Low" ;;
|
|
681
|
+
cycle_time)
|
|
682
|
+
[[ "$value" -lt 3600 ]] && echo "Elite" && return
|
|
683
|
+
[[ "$value" -lt 86400 ]] && echo "High" && return
|
|
684
|
+
[[ "$value" -lt 604800 ]] && echo "Medium" && return
|
|
685
|
+
echo "Low" ;;
|
|
686
|
+
esac
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
grade_rank() {
|
|
690
|
+
case "$1" in
|
|
691
|
+
Elite) echo 4 ;; High) echo 3 ;; Medium) echo 2 ;; Low) echo 1 ;; *) echo 0 ;;
|
|
692
|
+
esac
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
local degraded_metrics=""
|
|
696
|
+
local degradation_details=""
|
|
697
|
+
|
|
698
|
+
# Check deploy frequency
|
|
699
|
+
local prev_df curr_df
|
|
700
|
+
prev_df=$(echo "$prev_metrics" | jq -r '.deploy_freq')
|
|
701
|
+
curr_df=$(echo "$current_metrics" | jq -r '.deploy_freq')
|
|
702
|
+
local prev_df_grade curr_df_grade
|
|
703
|
+
prev_df_grade=$(local_dora_grade deploy_freq "$prev_df")
|
|
704
|
+
curr_df_grade=$(local_dora_grade deploy_freq "$curr_df")
|
|
705
|
+
if [[ "$(grade_rank "$curr_df_grade")" -lt "$(grade_rank "$prev_df_grade")" ]]; then
|
|
706
|
+
degraded_metrics="${degraded_metrics}deploy_freq "
|
|
707
|
+
degradation_details="${degradation_details}\n| Deploy Frequency | ${prev_df_grade} (${prev_df}/wk) | ${curr_df_grade} (${curr_df}/wk) | Check for blocked PRs, increase automation |"
|
|
708
|
+
fi
|
|
709
|
+
|
|
710
|
+
# Check CFR
|
|
711
|
+
local prev_cfr curr_cfr
|
|
712
|
+
prev_cfr=$(echo "$prev_metrics" | jq -r '.cfr')
|
|
713
|
+
curr_cfr=$(echo "$current_metrics" | jq -r '.cfr')
|
|
714
|
+
local prev_cfr_grade curr_cfr_grade
|
|
715
|
+
prev_cfr_grade=$(local_dora_grade cfr "$prev_cfr")
|
|
716
|
+
curr_cfr_grade=$(local_dora_grade cfr "$curr_cfr")
|
|
717
|
+
if [[ "$(grade_rank "$curr_cfr_grade")" -lt "$(grade_rank "$prev_cfr_grade")" ]]; then
|
|
718
|
+
degraded_metrics="${degraded_metrics}cfr "
|
|
719
|
+
degradation_details="${degradation_details}\n| Change Failure Rate | ${prev_cfr_grade} (${prev_cfr}%) | ${curr_cfr_grade} (${curr_cfr}%) | Investigate recent failures, improve test coverage |"
|
|
720
|
+
fi
|
|
721
|
+
|
|
722
|
+
# Check Cycle Time
|
|
723
|
+
local prev_ct curr_ct
|
|
724
|
+
prev_ct=$(echo "$prev_metrics" | jq -r '.cycle_time')
|
|
725
|
+
curr_ct=$(echo "$current_metrics" | jq -r '.cycle_time')
|
|
726
|
+
local prev_ct_grade curr_ct_grade
|
|
727
|
+
prev_ct_grade=$(local_dora_grade cycle_time "$prev_ct")
|
|
728
|
+
curr_ct_grade=$(local_dora_grade cycle_time "$curr_ct")
|
|
729
|
+
if [[ "$(grade_rank "$curr_ct_grade")" -lt "$(grade_rank "$prev_ct_grade")" ]]; then
|
|
730
|
+
degraded_metrics="${degraded_metrics}cycle_time "
|
|
731
|
+
degradation_details="${degradation_details}\n| Cycle Time | ${prev_ct_grade} (${prev_ct}s) | ${curr_ct_grade} (${curr_ct}s) | Profile slow stages, check for new slow tests |"
|
|
732
|
+
fi
|
|
733
|
+
|
|
734
|
+
if [[ -z "$degraded_metrics" ]]; then
|
|
735
|
+
daemon_log INFO "Patrol: no DORA degradation detected"
|
|
736
|
+
return
|
|
737
|
+
fi
|
|
738
|
+
|
|
739
|
+
local findings=0
|
|
740
|
+
findings=1
|
|
741
|
+
total_findings=$((total_findings + findings))
|
|
742
|
+
emit_event "patrol.finding" "check=dora_regression" "metrics=$degraded_metrics"
|
|
743
|
+
|
|
744
|
+
if [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
745
|
+
local trimmed
|
|
746
|
+
trimmed=$(echo "$degraded_metrics" | sed 's/ *$//' | tr ' ' ',')
|
|
747
|
+
local existing
|
|
748
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "dora-regression" \
|
|
749
|
+
--search "DORA regression" --json number -q 'length' 2>/dev/null || echo "0")
|
|
750
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
751
|
+
gh issue create \
|
|
752
|
+
--title "DORA regression: ${trimmed}" \
|
|
753
|
+
--body "## DORA Metric Degradation
|
|
754
|
+
|
|
755
|
+
| Metric | Previous (7d) | Current (7d) | Suggested Action |
|
|
756
|
+
|--------|---------------|--------------|------------------|$(echo -e "$degradation_details")
|
|
757
|
+
|
|
758
|
+
> Compared: previous 7-day window vs current 7-day window.
|
|
759
|
+
|
|
760
|
+
Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
761
|
+
--label "$(patrol_build_labels "dora-regression")" 2>/dev/null || true
|
|
762
|
+
issues_created=$((issues_created + 1))
|
|
763
|
+
emit_event "patrol.issue_created" "check=dora_regression" "metrics=$trimmed"
|
|
764
|
+
fi
|
|
765
|
+
else
|
|
766
|
+
local trimmed
|
|
767
|
+
trimmed=$(echo "$degraded_metrics" | sed 's/ *$//')
|
|
768
|
+
echo -e " ${RED}●${RESET} ${BOLD}DORA regression${RESET}: ${trimmed}"
|
|
769
|
+
fi
|
|
770
|
+
|
|
771
|
+
daemon_log INFO "Patrol: DORA degradation detected in: ${degraded_metrics}"
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
# ── 9. Untested Scripts ──
|
|
775
|
+
patrol_untested_scripts() {
|
|
776
|
+
if [[ "$PATROL_UNTESTED_ENABLED" != "true" ]]; then return; fi
|
|
777
|
+
daemon_log INFO "Patrol: checking for untested scripts"
|
|
778
|
+
local findings=0
|
|
779
|
+
local untested_list=""
|
|
780
|
+
|
|
781
|
+
local scripts_dir="$SCRIPT_DIR"
|
|
782
|
+
if [[ ! -d "$scripts_dir" ]]; then
|
|
783
|
+
daemon_log INFO "Patrol: scripts directory not found — skipping"
|
|
784
|
+
return
|
|
785
|
+
fi
|
|
786
|
+
|
|
787
|
+
# Collect untested scripts with usage counts
|
|
788
|
+
local untested_entries=""
|
|
789
|
+
while IFS= read -r script; do
|
|
790
|
+
local basename
|
|
791
|
+
basename=$(basename "$script")
|
|
792
|
+
# Skip test scripts themselves
|
|
793
|
+
[[ "$basename" == *-test.sh ]] && continue
|
|
794
|
+
# Skip the main CLI router
|
|
795
|
+
[[ "$basename" == "sw" ]] && continue
|
|
796
|
+
|
|
797
|
+
# Extract the name part (sw-NAME.sh -> NAME)
|
|
798
|
+
local name
|
|
799
|
+
name=$(echo "$basename" | sed 's/^sw-//' | sed 's/\.sh$//')
|
|
800
|
+
|
|
801
|
+
# Check if a test file exists
|
|
802
|
+
if [[ ! -f "$scripts_dir/sw-${name}-test.sh" ]]; then
|
|
803
|
+
# Count usage across other scripts
|
|
804
|
+
local usage_count
|
|
805
|
+
usage_count=$(grep -rl "sw-${name}" "$scripts_dir"/sw-*.sh 2>/dev/null | grep -cv "$basename" 2>/dev/null || echo "0")
|
|
806
|
+
usage_count=${usage_count:-0}
|
|
807
|
+
|
|
808
|
+
local line_count
|
|
809
|
+
line_count=$(wc -l < "$script" 2>/dev/null | tr -d ' ' || echo "0")
|
|
810
|
+
line_count=${line_count:-0}
|
|
811
|
+
|
|
812
|
+
untested_entries="${untested_entries}${usage_count}|${basename}|${line_count}\n"
|
|
813
|
+
findings=$((findings + 1))
|
|
814
|
+
fi
|
|
815
|
+
done < <(find "$scripts_dir" -maxdepth 1 -name "sw-*.sh" -type f 2>/dev/null | sort)
|
|
816
|
+
|
|
817
|
+
if [[ "$findings" -eq 0 ]]; then
|
|
818
|
+
daemon_log INFO "Patrol: all scripts have test files"
|
|
819
|
+
return
|
|
820
|
+
fi
|
|
821
|
+
|
|
822
|
+
# Sort by usage count descending
|
|
823
|
+
local sorted_entries
|
|
824
|
+
sorted_entries=$(echo -e "$untested_entries" | sort -t'|' -k1 -rn | head -10)
|
|
825
|
+
|
|
826
|
+
while IFS='|' read -r usage_count basename line_count; do
|
|
827
|
+
[[ -z "$basename" ]] && continue
|
|
828
|
+
untested_list="${untested_list}\n- \`${basename}\` (${line_count} lines, referenced by ${usage_count} scripts)"
|
|
829
|
+
emit_event "patrol.finding" "check=untested_script" "script=$basename" "lines=$line_count" "usage=$usage_count"
|
|
830
|
+
|
|
831
|
+
if [[ "$dry_run" == "true" ]] || [[ "$NO_GITHUB" == "true" ]]; then
|
|
832
|
+
echo -e " ${YELLOW}●${RESET} ${CYAN}${basename}${RESET} (${line_count} lines, ${usage_count} refs)"
|
|
833
|
+
fi
|
|
834
|
+
done <<< "$sorted_entries"
|
|
835
|
+
|
|
836
|
+
total_findings=$((total_findings + findings))
|
|
837
|
+
|
|
838
|
+
if [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
839
|
+
local existing
|
|
840
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "test-coverage" \
|
|
841
|
+
--search "Add tests for untested scripts" --json number -q 'length' 2>/dev/null || echo "0")
|
|
842
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
843
|
+
gh issue create \
|
|
844
|
+
--title "Add tests for ${findings} untested script(s)" \
|
|
845
|
+
--body "## Untested Scripts
|
|
846
|
+
|
|
847
|
+
The following scripts have no corresponding test file (\`sw-*-test.sh\`):
|
|
848
|
+
$(echo -e "$untested_list")
|
|
849
|
+
|
|
850
|
+
### How to Add Tests
|
|
851
|
+
Each test file should follow the pattern in existing test scripts (e.g., \`sw-daemon-test.sh\`):
|
|
852
|
+
- Mock environment with TEMP_DIR
|
|
853
|
+
- PASS/FAIL counters
|
|
854
|
+
- \`run_test\` harness
|
|
855
|
+
- Register in \`package.json\` test script
|
|
856
|
+
|
|
857
|
+
Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
858
|
+
--label "$(patrol_build_labels "test-coverage")" 2>/dev/null || true
|
|
859
|
+
issues_created=$((issues_created + 1))
|
|
860
|
+
emit_event "patrol.issue_created" "check=untested_scripts" "count=$findings"
|
|
861
|
+
fi
|
|
862
|
+
fi
|
|
863
|
+
|
|
864
|
+
daemon_log INFO "Patrol: found ${findings} untested script(s)"
|
|
865
|
+
}
|
|
866
|
+
|
|
867
|
+
# ── 10. Retry Exhaustion Patterns ──
|
|
868
|
+
patrol_retry_exhaustion() {
|
|
869
|
+
if [[ "$PATROL_RETRY_ENABLED" != "true" ]]; then return; fi
|
|
870
|
+
daemon_log INFO "Patrol: checking retry exhaustion patterns"
|
|
871
|
+
local findings=0
|
|
872
|
+
|
|
873
|
+
if [[ ! -f "$EVENTS_FILE" ]]; then
|
|
874
|
+
daemon_log INFO "Patrol: no events file — skipping retry check"
|
|
875
|
+
return
|
|
876
|
+
fi
|
|
877
|
+
|
|
878
|
+
local seven_days_ago
|
|
879
|
+
seven_days_ago=$(($(now_epoch) - 604800))
|
|
880
|
+
|
|
881
|
+
# Find retry_exhausted events in last 7 days
|
|
882
|
+
local exhausted_events
|
|
883
|
+
exhausted_events=$(jq -s --argjson since "$seven_days_ago" \
|
|
884
|
+
'[.[] | select(.type == "daemon.retry_exhausted" and (.ts_epoch // 0) >= $since)]' \
|
|
885
|
+
"$EVENTS_FILE" 2>/dev/null || echo "[]")
|
|
886
|
+
|
|
887
|
+
local exhausted_count
|
|
888
|
+
exhausted_count=$(echo "$exhausted_events" | jq 'length' 2>/dev/null || echo "0")
|
|
889
|
+
|
|
890
|
+
if [[ "${exhausted_count:-0}" -lt "$PATROL_RETRY_THRESHOLD" ]]; then
|
|
891
|
+
daemon_log INFO "Patrol: retry exhaustions ($exhausted_count) below threshold ($PATROL_RETRY_THRESHOLD)"
|
|
892
|
+
return
|
|
893
|
+
fi
|
|
894
|
+
|
|
895
|
+
findings=1
|
|
896
|
+
total_findings=$((total_findings + findings))
|
|
897
|
+
|
|
898
|
+
# Get unique issue patterns
|
|
899
|
+
local issue_list
|
|
900
|
+
issue_list=$(echo "$exhausted_events" | jq -r '[.[] | .issue // "unknown"] | unique | join(", ")' 2>/dev/null || echo "unknown")
|
|
901
|
+
|
|
902
|
+
local first_ts last_ts
|
|
903
|
+
first_ts=$(echo "$exhausted_events" | jq -r '[.[] | .ts] | sort | first // "unknown"' 2>/dev/null || echo "unknown")
|
|
904
|
+
last_ts=$(echo "$exhausted_events" | jq -r '[.[] | .ts] | sort | last // "unknown"' 2>/dev/null || echo "unknown")
|
|
905
|
+
|
|
906
|
+
emit_event "patrol.finding" "check=retry_exhaustion" "count=$exhausted_count" "issues=$issue_list"
|
|
907
|
+
|
|
908
|
+
if [[ "$NO_GITHUB" != "true" ]] && [[ "$dry_run" != "true" ]]; then
|
|
909
|
+
local existing
|
|
910
|
+
existing=$(gh issue list --label "$PATROL_LABEL" --label "reliability" \
|
|
911
|
+
--search "Retry exhaustion pattern" --json number -q 'length' 2>/dev/null || echo "0")
|
|
912
|
+
if [[ "${existing:-0}" -eq 0 ]] && [[ "$issues_created" -lt "$PATROL_MAX_ISSUES" ]]; then
|
|
913
|
+
gh issue create \
|
|
914
|
+
--title "Retry exhaustion pattern (${exhausted_count} in 7 days)" \
|
|
915
|
+
--body "## Retry Exhaustion Pattern
|
|
916
|
+
|
|
917
|
+
| Field | Value |
|
|
918
|
+
|-------|-------|
|
|
919
|
+
| Exhaustions (7d) | **${exhausted_count}** |
|
|
920
|
+
| Threshold | ${PATROL_RETRY_THRESHOLD} |
|
|
921
|
+
| Affected issues | ${issue_list} |
|
|
922
|
+
| First occurrence | ${first_ts} |
|
|
923
|
+
| Latest occurrence | ${last_ts} |
|
|
924
|
+
|
|
925
|
+
### Investigation Steps
|
|
926
|
+
1. Check the affected issues for common patterns
|
|
927
|
+
2. Review pipeline logs for root cause
|
|
928
|
+
3. Consider if max_retries needs adjustment
|
|
929
|
+
4. Investigate if an external dependency is flaky
|
|
930
|
+
|
|
931
|
+
Auto-detected by \`shipwright daemon patrol\` on $(now_iso)." \
|
|
932
|
+
--label "$(patrol_build_labels "reliability")" 2>/dev/null || true
|
|
933
|
+
issues_created=$((issues_created + 1))
|
|
934
|
+
emit_event "patrol.issue_created" "check=retry_exhaustion" "count=$exhausted_count"
|
|
935
|
+
fi
|
|
936
|
+
else
|
|
937
|
+
echo -e " ${RED}●${RESET} ${BOLD}retry exhaustion${RESET}: ${exhausted_count} exhaustions in 7 days (issues: ${issue_list})"
|
|
938
|
+
fi
|
|
939
|
+
|
|
940
|
+
daemon_log INFO "Patrol: found retry exhaustion pattern (${exhausted_count} in 7 days)"
|
|
941
|
+
}
|
|
942
|
+
|
|
943
|
+
# ── Stage 1: Run all grep-based patrol checks (fast pre-filter) ──
|
|
944
|
+
local patrol_findings_summary=""
|
|
945
|
+
local pre_check_findings=0
|
|
946
|
+
|
|
947
|
+
echo -e " ${BOLD}Security Audit${RESET}"
|
|
948
|
+
pre_check_findings=$total_findings
|
|
949
|
+
patrol_security_audit
|
|
950
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
951
|
+
patrol_findings_summary="${patrol_findings_summary}security: $((total_findings - pre_check_findings)) finding(s); "
|
|
952
|
+
fi
|
|
953
|
+
echo ""
|
|
954
|
+
|
|
955
|
+
echo -e " ${BOLD}Stale Dependencies${RESET}"
|
|
956
|
+
pre_check_findings=$total_findings
|
|
957
|
+
patrol_stale_dependencies
|
|
958
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
959
|
+
patrol_findings_summary="${patrol_findings_summary}stale_deps: $((total_findings - pre_check_findings)) finding(s); "
|
|
960
|
+
fi
|
|
961
|
+
echo ""
|
|
962
|
+
|
|
963
|
+
echo -e " ${BOLD}Dead Code Detection${RESET}"
|
|
964
|
+
pre_check_findings=$total_findings
|
|
965
|
+
patrol_dead_code
|
|
966
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
967
|
+
patrol_findings_summary="${patrol_findings_summary}dead_code: $((total_findings - pre_check_findings)) finding(s); "
|
|
968
|
+
fi
|
|
969
|
+
echo ""
|
|
970
|
+
|
|
971
|
+
echo -e " ${BOLD}Test Coverage Gaps${RESET}"
|
|
972
|
+
pre_check_findings=$total_findings
|
|
973
|
+
patrol_coverage_gaps
|
|
974
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
975
|
+
patrol_findings_summary="${patrol_findings_summary}coverage: $((total_findings - pre_check_findings)) finding(s); "
|
|
976
|
+
fi
|
|
977
|
+
echo ""
|
|
978
|
+
|
|
979
|
+
echo -e " ${BOLD}Documentation Staleness${RESET}"
|
|
980
|
+
pre_check_findings=$total_findings
|
|
981
|
+
patrol_doc_staleness
|
|
982
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
983
|
+
patrol_findings_summary="${patrol_findings_summary}docs: $((total_findings - pre_check_findings)) finding(s); "
|
|
984
|
+
fi
|
|
985
|
+
echo ""
|
|
986
|
+
|
|
987
|
+
echo -e " ${BOLD}Performance Baseline${RESET}"
|
|
988
|
+
pre_check_findings=$total_findings
|
|
989
|
+
patrol_performance_baseline
|
|
990
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
991
|
+
patrol_findings_summary="${patrol_findings_summary}performance: $((total_findings - pre_check_findings)) finding(s); "
|
|
992
|
+
fi
|
|
993
|
+
echo ""
|
|
994
|
+
|
|
995
|
+
echo -e " ${BOLD}Recurring Failures${RESET}"
|
|
996
|
+
pre_check_findings=$total_findings
|
|
997
|
+
patrol_recurring_failures
|
|
998
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
999
|
+
patrol_findings_summary="${patrol_findings_summary}recurring_failures: $((total_findings - pre_check_findings)) finding(s); "
|
|
1000
|
+
fi
|
|
1001
|
+
echo ""
|
|
1002
|
+
|
|
1003
|
+
echo -e " ${BOLD}DORA Degradation${RESET}"
|
|
1004
|
+
pre_check_findings=$total_findings
|
|
1005
|
+
patrol_dora_degradation
|
|
1006
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
1007
|
+
patrol_findings_summary="${patrol_findings_summary}dora: $((total_findings - pre_check_findings)) finding(s); "
|
|
1008
|
+
fi
|
|
1009
|
+
echo ""
|
|
1010
|
+
|
|
1011
|
+
echo -e " ${BOLD}Untested Scripts${RESET}"
|
|
1012
|
+
pre_check_findings=$total_findings
|
|
1013
|
+
patrol_untested_scripts
|
|
1014
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
1015
|
+
patrol_findings_summary="${patrol_findings_summary}untested: $((total_findings - pre_check_findings)) finding(s); "
|
|
1016
|
+
fi
|
|
1017
|
+
echo ""
|
|
1018
|
+
|
|
1019
|
+
echo -e " ${BOLD}Retry Exhaustion${RESET}"
|
|
1020
|
+
pre_check_findings=$total_findings
|
|
1021
|
+
patrol_retry_exhaustion
|
|
1022
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
1023
|
+
patrol_findings_summary="${patrol_findings_summary}retry_exhaustion: $((total_findings - pre_check_findings)) finding(s); "
|
|
1024
|
+
fi
|
|
1025
|
+
echo ""
|
|
1026
|
+
|
|
1027
|
+
echo -e " ${BOLD}Dead Pane Reaping${RESET}"
|
|
1028
|
+
pre_check_findings=$total_findings
|
|
1029
|
+
if [[ -x "$SCRIPT_DIR/sw-reaper.sh" ]] && [[ -n "${TMUX:-}" ]]; then
|
|
1030
|
+
local reaper_output
|
|
1031
|
+
reaper_output=$(bash "$SCRIPT_DIR/sw-reaper.sh" --once 2>/dev/null) || true
|
|
1032
|
+
local reaped_count=0
|
|
1033
|
+
reaped_count=$(echo "$reaper_output" | grep -c "Reaped" 2>/dev/null || true)
|
|
1034
|
+
if [[ "${reaped_count:-0}" -gt 0 ]]; then
|
|
1035
|
+
total_findings=$((total_findings + reaped_count))
|
|
1036
|
+
echo -e " ${CYAN}●${RESET} Reaped ${reaped_count} dead agent pane(s)"
|
|
1037
|
+
else
|
|
1038
|
+
echo -e " ${GREEN}●${RESET} No dead panes found"
|
|
1039
|
+
fi
|
|
1040
|
+
else
|
|
1041
|
+
echo -e " ${DIM}●${RESET} Skipped (no tmux session or reaper not found)"
|
|
1042
|
+
fi
|
|
1043
|
+
if [[ "$total_findings" -gt "$pre_check_findings" ]]; then
|
|
1044
|
+
patrol_findings_summary="${patrol_findings_summary}reaper: $((total_findings - pre_check_findings)) finding(s); "
|
|
1045
|
+
fi
|
|
1046
|
+
echo ""
|
|
1047
|
+
|
|
1048
|
+
# ── Stage 2: AI-Powered Confirmation (if enabled) ──
|
|
1049
|
+
if [[ "${PREDICTION_ENABLED:-false}" == "true" ]] && type patrol_ai_analyze &>/dev/null 2>&1; then
|
|
1050
|
+
daemon_log INFO "Intelligence: using AI patrol analysis (prediction enabled)"
|
|
1051
|
+
echo -e " ${BOLD}AI Deep Analysis${RESET}"
|
|
1052
|
+
# Sample recent source files for AI analysis
|
|
1053
|
+
local sample_files=""
|
|
1054
|
+
local git_log_recent=""
|
|
1055
|
+
sample_files=$(git diff --name-only HEAD~5 2>/dev/null | head -10 | tr '\n' ',' || echo "")
|
|
1056
|
+
git_log_recent=$(git log --oneline -10 2>/dev/null || echo "")
|
|
1057
|
+
# Include grep-based findings summary as context for AI confirmation
|
|
1058
|
+
if [[ -n "$patrol_findings_summary" ]]; then
|
|
1059
|
+
git_log_recent="${git_log_recent}
|
|
1060
|
+
|
|
1061
|
+
Patrol pre-filter findings to confirm: ${patrol_findings_summary}"
|
|
1062
|
+
daemon_log INFO "Patrol: passing ${total_findings} grep findings to AI for confirmation"
|
|
1063
|
+
fi
|
|
1064
|
+
if [[ -n "$sample_files" ]]; then
|
|
1065
|
+
local ai_findings
|
|
1066
|
+
ai_findings=$(patrol_ai_analyze "$sample_files" "$git_log_recent" 2>/dev/null || echo "[]")
|
|
1067
|
+
if [[ -n "$ai_findings" && "$ai_findings" != "[]" ]]; then
|
|
1068
|
+
local ai_count
|
|
1069
|
+
ai_count=$(echo "$ai_findings" | jq 'length' 2>/dev/null || echo "0")
|
|
1070
|
+
ai_count=${ai_count:-0}
|
|
1071
|
+
total_findings=$((total_findings + ai_count))
|
|
1072
|
+
echo -e " ${CYAN}●${RESET} AI confirmed findings + found ${ai_count} additional issue(s)"
|
|
1073
|
+
emit_event "patrol.ai_analysis" "findings=$ai_count" "grep_findings=${patrol_findings_summary:-none}"
|
|
1074
|
+
else
|
|
1075
|
+
echo -e " ${GREEN}●${RESET} AI analysis: grep findings confirmed, no additional issues"
|
|
1076
|
+
fi
|
|
1077
|
+
fi
|
|
1078
|
+
echo ""
|
|
1079
|
+
else
|
|
1080
|
+
daemon_log INFO "Intelligence: using grep-only patrol (prediction disabled, enable with intelligence.prediction_enabled=true)"
|
|
1081
|
+
fi
|
|
1082
|
+
|
|
1083
|
+
# ── Meta Self-Improvement Patrol ──
|
|
1084
|
+
if [[ -f "$SCRIPT_DIR/sw-patrol-meta.sh" ]]; then
|
|
1085
|
+
# shellcheck source=sw-patrol-meta.sh
|
|
1086
|
+
source "$SCRIPT_DIR/sw-patrol-meta.sh"
|
|
1087
|
+
patrol_meta_run
|
|
1088
|
+
fi
|
|
1089
|
+
|
|
1090
|
+
# ── Strategic Intelligence Patrol (requires CLAUDE_CODE_OAUTH_TOKEN) ──
|
|
1091
|
+
if [[ -f "$SCRIPT_DIR/sw-strategic.sh" ]] && [[ -n "${CLAUDE_CODE_OAUTH_TOKEN:-}" ]]; then
|
|
1092
|
+
# shellcheck source=sw-strategic.sh
|
|
1093
|
+
source "$SCRIPT_DIR/sw-strategic.sh"
|
|
1094
|
+
strategic_patrol_run || true
|
|
1095
|
+
fi
|
|
1096
|
+
|
|
1097
|
+
# ── Summary ──
|
|
1098
|
+
emit_event "patrol.completed" "findings=$total_findings" "issues_created=$issues_created" "dry_run=$dry_run"
|
|
1099
|
+
|
|
1100
|
+
echo -e "${PURPLE}${BOLD}━━━ Patrol Summary ━━━${RESET}"
|
|
1101
|
+
echo -e " Findings: ${total_findings}"
|
|
1102
|
+
echo -e " Issues created: ${issues_created}"
|
|
1103
|
+
if [[ "$dry_run" == "true" ]]; then
|
|
1104
|
+
echo -e " ${DIM}(dry run — no issues were created)${RESET}"
|
|
1105
|
+
fi
|
|
1106
|
+
echo ""
|
|
1107
|
+
|
|
1108
|
+
daemon_log INFO "Patrol complete: ${total_findings} findings, ${issues_created} issues created"
|
|
1109
|
+
|
|
1110
|
+
# Adapt patrol limits based on hit rate
|
|
1111
|
+
adapt_patrol_limits "$total_findings" "$PATROL_MAX_ISSUES"
|
|
1112
|
+
}
|
|
1113
|
+
|