shipwright-cli 1.7.1 → 1.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. package/.claude/agents/code-reviewer.md +90 -0
  2. package/.claude/agents/devops-engineer.md +142 -0
  3. package/.claude/agents/pipeline-agent.md +80 -0
  4. package/.claude/agents/shell-script-specialist.md +150 -0
  5. package/.claude/agents/test-specialist.md +196 -0
  6. package/.claude/hooks/post-tool-use.sh +45 -0
  7. package/.claude/hooks/pre-tool-use.sh +25 -0
  8. package/.claude/hooks/session-started.sh +37 -0
  9. package/README.md +212 -814
  10. package/claude-code/CLAUDE.md.shipwright +54 -0
  11. package/claude-code/hooks/notify-idle.sh +2 -2
  12. package/claude-code/hooks/session-start.sh +24 -0
  13. package/claude-code/hooks/task-completed.sh +6 -2
  14. package/claude-code/settings.json.template +12 -0
  15. package/dashboard/public/app.js +4422 -0
  16. package/dashboard/public/index.html +816 -0
  17. package/dashboard/public/styles.css +4755 -0
  18. package/dashboard/server.ts +4315 -0
  19. package/docs/KNOWN-ISSUES.md +18 -10
  20. package/docs/TIPS.md +38 -26
  21. package/docs/patterns/README.md +33 -23
  22. package/package.json +9 -5
  23. package/scripts/adapters/iterm2-adapter.sh +1 -1
  24. package/scripts/adapters/tmux-adapter.sh +52 -23
  25. package/scripts/adapters/wezterm-adapter.sh +26 -14
  26. package/scripts/lib/compat.sh +200 -0
  27. package/scripts/lib/helpers.sh +72 -0
  28. package/scripts/postinstall.mjs +72 -13
  29. package/scripts/{cct → sw} +118 -22
  30. package/scripts/sw-adversarial.sh +274 -0
  31. package/scripts/sw-architecture-enforcer.sh +330 -0
  32. package/scripts/sw-checkpoint.sh +468 -0
  33. package/scripts/sw-cleanup.sh +359 -0
  34. package/scripts/sw-connect.sh +619 -0
  35. package/scripts/{cct-cost.sh → sw-cost.sh} +368 -34
  36. package/scripts/sw-daemon.sh +5574 -0
  37. package/scripts/sw-dashboard.sh +477 -0
  38. package/scripts/sw-developer-simulation.sh +252 -0
  39. package/scripts/sw-docs.sh +635 -0
  40. package/scripts/sw-doctor.sh +907 -0
  41. package/scripts/{cct-fix.sh → sw-fix.sh} +10 -6
  42. package/scripts/{cct-fleet.sh → sw-fleet.sh} +498 -22
  43. package/scripts/sw-github-checks.sh +521 -0
  44. package/scripts/sw-github-deploy.sh +533 -0
  45. package/scripts/sw-github-graphql.sh +972 -0
  46. package/scripts/sw-heartbeat.sh +293 -0
  47. package/scripts/{cct-init.sh → sw-init.sh} +144 -11
  48. package/scripts/sw-intelligence.sh +1196 -0
  49. package/scripts/sw-jira.sh +643 -0
  50. package/scripts/sw-launchd.sh +364 -0
  51. package/scripts/sw-linear.sh +648 -0
  52. package/scripts/{cct-logs.sh → sw-logs.sh} +72 -2
  53. package/scripts/sw-loop.sh +2217 -0
  54. package/scripts/{cct-memory.sh → sw-memory.sh} +514 -36
  55. package/scripts/sw-patrol-meta.sh +417 -0
  56. package/scripts/sw-pipeline-composer.sh +455 -0
  57. package/scripts/sw-pipeline-vitals.sh +1096 -0
  58. package/scripts/sw-pipeline.sh +7593 -0
  59. package/scripts/sw-predictive.sh +820 -0
  60. package/scripts/{cct-prep.sh → sw-prep.sh} +339 -49
  61. package/scripts/{cct-ps.sh → sw-ps.sh} +9 -6
  62. package/scripts/{cct-reaper.sh → sw-reaper.sh} +10 -6
  63. package/scripts/sw-remote.sh +687 -0
  64. package/scripts/sw-self-optimize.sh +1048 -0
  65. package/scripts/sw-session.sh +541 -0
  66. package/scripts/sw-setup.sh +234 -0
  67. package/scripts/sw-status.sh +796 -0
  68. package/scripts/{cct-templates.sh → sw-templates.sh} +9 -4
  69. package/scripts/sw-tmux.sh +591 -0
  70. package/scripts/sw-tracker-jira.sh +277 -0
  71. package/scripts/sw-tracker-linear.sh +292 -0
  72. package/scripts/sw-tracker.sh +409 -0
  73. package/scripts/{cct-upgrade.sh → sw-upgrade.sh} +103 -46
  74. package/scripts/{cct-worktree.sh → sw-worktree.sh} +3 -0
  75. package/templates/pipelines/autonomous.json +35 -6
  76. package/templates/pipelines/cost-aware.json +21 -0
  77. package/templates/pipelines/deployed.json +40 -6
  78. package/templates/pipelines/enterprise.json +16 -2
  79. package/templates/pipelines/fast.json +19 -0
  80. package/templates/pipelines/full.json +28 -2
  81. package/templates/pipelines/hotfix.json +19 -0
  82. package/templates/pipelines/standard.json +31 -0
  83. package/tmux/{claude-teams-overlay.conf → shipwright-overlay.conf} +27 -9
  84. package/tmux/templates/accessibility.json +34 -0
  85. package/tmux/templates/api-design.json +35 -0
  86. package/tmux/templates/architecture.json +1 -0
  87. package/tmux/templates/bug-fix.json +9 -0
  88. package/tmux/templates/code-review.json +1 -0
  89. package/tmux/templates/compliance.json +36 -0
  90. package/tmux/templates/data-pipeline.json +36 -0
  91. package/tmux/templates/debt-paydown.json +34 -0
  92. package/tmux/templates/devops.json +1 -0
  93. package/tmux/templates/documentation.json +1 -0
  94. package/tmux/templates/exploration.json +1 -0
  95. package/tmux/templates/feature-dev.json +1 -0
  96. package/tmux/templates/full-stack.json +8 -0
  97. package/tmux/templates/i18n.json +34 -0
  98. package/tmux/templates/incident-response.json +36 -0
  99. package/tmux/templates/migration.json +1 -0
  100. package/tmux/templates/observability.json +35 -0
  101. package/tmux/templates/onboarding.json +33 -0
  102. package/tmux/templates/performance.json +35 -0
  103. package/tmux/templates/refactor.json +1 -0
  104. package/tmux/templates/release.json +35 -0
  105. package/tmux/templates/security-audit.json +8 -0
  106. package/tmux/templates/spike.json +34 -0
  107. package/tmux/templates/testing.json +1 -0
  108. package/tmux/tmux.conf +98 -9
  109. package/scripts/cct-cleanup.sh +0 -172
  110. package/scripts/cct-daemon.sh +0 -3189
  111. package/scripts/cct-doctor.sh +0 -414
  112. package/scripts/cct-loop.sh +0 -1332
  113. package/scripts/cct-pipeline.sh +0 -3844
  114. package/scripts/cct-session.sh +0 -284
  115. package/scripts/cct-status.sh +0 -169
@@ -0,0 +1,820 @@
1
+ #!/usr/bin/env bash
2
+ # ╔═══════════════════════════════════════════════════════════════════════════╗
3
+ # ║ shipwright predictive — Predictive & Proactive Intelligence ║
4
+ # ║ Risk assessment · Anomaly detection · AI patrol · Failure prevention ║
5
+ # ╚═══════════════════════════════════════════════════════════════════════════╝
6
+ set -euo pipefail
7
+ trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
+
9
+ VERSION="1.10.0"
10
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
+ REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
+
13
+ # ─── Colors (matches Seth's tmux theme) ─────────────────────────────────────
14
+ CYAN='\033[38;2;0;212;255m' # #00d4ff — primary accent
15
+ PURPLE='\033[38;2;124;58;237m' # #7c3aed — secondary
16
+ BLUE='\033[38;2;0;102;255m' # #0066ff — tertiary
17
+ GREEN='\033[38;2;74;222;128m' # success
18
+ YELLOW='\033[38;2;250;204;21m' # warning
19
+ RED='\033[38;2;248;113;113m' # error
20
+ DIM='\033[2m'
21
+ BOLD='\033[1m'
22
+ RESET='\033[0m'
23
+
24
+ # ─── Cross-platform compatibility ──────────────────────────────────────────
25
+ # shellcheck source=lib/compat.sh
26
+ [[ -f "$SCRIPT_DIR/lib/compat.sh" ]] && source "$SCRIPT_DIR/lib/compat.sh"
27
+
28
+ info() { echo -e "${CYAN}${BOLD}▸${RESET} $*"; }
29
+ success() { echo -e "${GREEN}${BOLD}✓${RESET} $*"; }
30
+ warn() { echo -e "${YELLOW}${BOLD}⚠${RESET} $*"; }
31
+ error() { echo -e "${RED}${BOLD}✗${RESET} $*" >&2; }
32
+
33
+ now_iso() { date -u +"%Y-%m-%dT%H:%M:%SZ"; }
34
+ now_epoch() { date +%s; }
35
+
36
+ # ─── Structured Event Log ──────────────────────────────────────────────────
37
+ EVENTS_FILE="${HOME}/.shipwright/events.jsonl"
38
+
39
+ emit_event() {
40
+ local event_type="$1"; shift
41
+ local json_fields=""
42
+ for kv in "$@"; do
43
+ local key="${kv%%=*}"; local val="${kv#*=}"
44
+ if [[ "$val" =~ ^-?[0-9]+\.?[0-9]*$ ]]; then
45
+ json_fields="${json_fields},\"${key}\":${val}"
46
+ else
47
+ val="${val//\"/\\\"}"; json_fields="${json_fields},\"${key}\":\"${val}\""
48
+ fi
49
+ done
50
+ mkdir -p "${HOME}/.shipwright"
51
+ echo "{\"ts\":\"$(now_iso)\",\"ts_epoch\":$(now_epoch),\"type\":\"${event_type}\"${json_fields}}" >> "$EVENTS_FILE"
52
+ }
53
+
54
+ # ─── Intelligence Engine (optional) ────────────────────────────────────────
55
+ INTELLIGENCE_AVAILABLE=false
56
+ if [[ -f "$SCRIPT_DIR/sw-intelligence.sh" ]]; then
57
+ source "$SCRIPT_DIR/sw-intelligence.sh"
58
+ INTELLIGENCE_AVAILABLE=true
59
+ fi
60
+
61
+ # ─── Storage ───────────────────────────────────────────────────────────────
62
+ BASELINES_DIR="${HOME}/.shipwright/baselines"
63
+ OPTIMIZATION_DIR="${HOME}/.shipwright/optimization"
64
+ DEFAULT_ANOMALY_THRESHOLD=3.0
65
+ DEFAULT_WARNING_MULTIPLIER=2.0
66
+ DEFAULT_EMA_ALPHA=0.1
67
+ ANOMALY_THRESHOLD="${ANOMALY_THRESHOLD:-$DEFAULT_ANOMALY_THRESHOLD}"
68
+
69
+ # ─── Adaptive Threshold Helpers ───────────────────────────────────────────
70
+
71
+ # _predictive_get_repo_hash
72
+ # Returns a short hash for the current repo (for per-repo config isolation)
73
+ _predictive_get_repo_hash() {
74
+ local repo_root
75
+ repo_root=$(git -C "$REPO_DIR" rev-parse --show-toplevel 2>/dev/null || echo "$REPO_DIR")
76
+ compute_md5 --string "$repo_root"
77
+ }
78
+
79
+ # _predictive_get_anomaly_threshold <metric_name>
80
+ # Returns per-metric anomaly threshold from config, or default
81
+ _predictive_get_anomaly_threshold() {
82
+ local metric_name="${1:-}"
83
+ local repo_hash
84
+ repo_hash=$(_predictive_get_repo_hash)
85
+ local thresholds_file="${BASELINES_DIR}/${repo_hash}/anomaly-thresholds.json"
86
+
87
+ if [[ -n "$metric_name" && -f "$thresholds_file" ]]; then
88
+ local threshold
89
+ threshold=$(jq -r --arg m "$metric_name" '.[$m].critical_multiplier // empty' "$thresholds_file" 2>/dev/null || true)
90
+ if [[ -n "$threshold" && "$threshold" != "null" ]]; then
91
+ echo "$threshold"
92
+ return 0
93
+ fi
94
+ fi
95
+ echo "$DEFAULT_ANOMALY_THRESHOLD"
96
+ }
97
+
98
+ # _predictive_get_warning_multiplier <metric_name>
99
+ # Returns per-metric warning multiplier from config, or default
100
+ _predictive_get_warning_multiplier() {
101
+ local metric_name="${1:-}"
102
+ local repo_hash
103
+ repo_hash=$(_predictive_get_repo_hash)
104
+ local thresholds_file="${BASELINES_DIR}/${repo_hash}/anomaly-thresholds.json"
105
+
106
+ if [[ -n "$metric_name" && -f "$thresholds_file" ]]; then
107
+ local multiplier
108
+ multiplier=$(jq -r --arg m "$metric_name" '.[$m].warning_multiplier // empty' "$thresholds_file" 2>/dev/null || true)
109
+ if [[ -n "$multiplier" && "$multiplier" != "null" ]]; then
110
+ echo "$multiplier"
111
+ return 0
112
+ fi
113
+ fi
114
+ echo "$DEFAULT_WARNING_MULTIPLIER"
115
+ }
116
+
117
+ # _predictive_get_ema_alpha
118
+ # Returns EMA alpha from per-repo config, or default
119
+ _predictive_get_ema_alpha() {
120
+ local repo_hash
121
+ repo_hash=$(_predictive_get_repo_hash)
122
+ local ema_config="${BASELINES_DIR}/${repo_hash}/ema-config.json"
123
+
124
+ if [[ -f "$ema_config" ]]; then
125
+ local alpha
126
+ alpha=$(jq -r '.alpha // empty' "$ema_config" 2>/dev/null || true)
127
+ if [[ -n "$alpha" && "$alpha" != "null" ]]; then
128
+ echo "$alpha"
129
+ return 0
130
+ fi
131
+ fi
132
+ echo "$DEFAULT_EMA_ALPHA"
133
+ }
134
+
135
+ # _predictive_get_risk_keywords
136
+ # Returns JSON object of keyword→weight mapping from config, or empty
137
+ _predictive_get_risk_keywords() {
138
+ local keywords_file="${OPTIMIZATION_DIR}/risk-keywords.json"
139
+ if [[ -f "$keywords_file" ]]; then
140
+ local content
141
+ content=$(jq '.' "$keywords_file" 2>/dev/null || true)
142
+ if [[ -n "$content" && "$content" != "null" ]]; then
143
+ echo "$content"
144
+ return 0
145
+ fi
146
+ fi
147
+ echo ""
148
+ }
149
+
150
+ # _predictive_record_anomaly <stage> <metric_name> <severity> <value> <baseline>
151
+ # Records an anomaly detection event for false-alarm tracking
152
+ _predictive_record_anomaly() {
153
+ local stage="${1:-}"
154
+ local metric_name="${2:-}"
155
+ local severity="${3:-}"
156
+ local value="${4:-0}"
157
+ local baseline="${5:-0}"
158
+
159
+ local repo_hash
160
+ repo_hash=$(_predictive_get_repo_hash)
161
+ local tracking_file="${BASELINES_DIR}/${repo_hash}/anomaly-tracking.jsonl"
162
+ mkdir -p "${BASELINES_DIR}/${repo_hash}"
163
+
164
+ local record
165
+ record=$(jq -c -n \
166
+ --arg ts "$(now_iso)" \
167
+ --argjson epoch "$(now_epoch)" \
168
+ --arg stage "$stage" \
169
+ --arg metric "$metric_name" \
170
+ --arg severity "$severity" \
171
+ --argjson value "$value" \
172
+ --argjson baseline "$baseline" \
173
+ '{ts: $ts, ts_epoch: $epoch, stage: $stage, metric: $metric, severity: $severity, value: $value, baseline: $baseline, confirmed: null}')
174
+ echo "$record" >> "$tracking_file"
175
+ }
176
+
177
+ # predictive_confirm_anomaly <stage> <metric_name> <was_real_failure>
178
+ # After pipeline completes, confirm whether anomaly predicted a real failure
179
+ predictive_confirm_anomaly() {
180
+ local stage="${1:-}"
181
+ local metric_name="${2:-}"
182
+ local was_real="${3:-false}"
183
+
184
+ local repo_hash
185
+ repo_hash=$(_predictive_get_repo_hash)
186
+ local tracking_file="${BASELINES_DIR}/${repo_hash}/anomaly-tracking.jsonl"
187
+
188
+ [[ -f "$tracking_file" ]] || return 0
189
+
190
+ # Find the most recent unconfirmed anomaly for this stage+metric
191
+ local tmp_file
192
+ tmp_file=$(mktemp "${TMPDIR:-/tmp}/sw-anomaly-confirm.XXXXXX")
193
+ local found=false
194
+
195
+ # Process file in reverse to find most recent unconfirmed
196
+ while IFS= read -r line; do
197
+ local line_stage line_metric line_confirmed
198
+ line_stage=$(echo "$line" | jq -r '.stage // ""' 2>/dev/null || true)
199
+ line_metric=$(echo "$line" | jq -r '.metric // ""' 2>/dev/null || true)
200
+ line_confirmed=$(echo "$line" | jq -r '.confirmed // "null"' 2>/dev/null || true)
201
+
202
+ if [[ "$line_stage" == "$stage" && "$line_metric" == "$metric_name" && "$line_confirmed" == "null" && "$found" == "false" ]]; then
203
+ # Update this entry
204
+ echo "$line" | jq -c --arg c "$was_real" '.confirmed = ($c == "true")' >> "$tmp_file"
205
+ found=true
206
+ else
207
+ echo "$line" >> "$tmp_file"
208
+ fi
209
+ done < "$tracking_file"
210
+
211
+ if [[ "$found" == "true" ]]; then
212
+ mv "$tmp_file" "$tracking_file"
213
+ else
214
+ rm -f "$tmp_file"
215
+ fi
216
+
217
+ # Update false-alarm rate and adjust thresholds
218
+ _predictive_update_alarm_rates "$metric_name"
219
+ }
220
+
221
+ # _predictive_update_alarm_rates <metric_name>
222
+ # Recalculates false-alarm rate for a metric and adjusts thresholds
223
+ _predictive_update_alarm_rates() {
224
+ local metric_name="${1:-}"
225
+ [[ -z "$metric_name" ]] && return 0
226
+
227
+ local repo_hash
228
+ repo_hash=$(_predictive_get_repo_hash)
229
+ local tracking_file="${BASELINES_DIR}/${repo_hash}/anomaly-tracking.jsonl"
230
+ local thresholds_file="${BASELINES_DIR}/${repo_hash}/anomaly-thresholds.json"
231
+
232
+ [[ -f "$tracking_file" ]] || return 0
233
+
234
+ # Count confirmed entries for this metric
235
+ local total_confirmed=0
236
+ local true_positives=0
237
+ local false_positives=0
238
+
239
+ while IFS= read -r line; do
240
+ local line_metric line_confirmed
241
+ line_metric=$(echo "$line" | jq -r '.metric // ""' 2>/dev/null || true)
242
+ line_confirmed=$(echo "$line" | jq -r '.confirmed // "null"' 2>/dev/null || true)
243
+
244
+ [[ "$line_metric" != "$metric_name" ]] && continue
245
+ [[ "$line_confirmed" == "null" ]] && continue
246
+
247
+ total_confirmed=$((total_confirmed + 1))
248
+ if [[ "$line_confirmed" == "true" ]]; then
249
+ true_positives=$((true_positives + 1))
250
+ else
251
+ false_positives=$((false_positives + 1))
252
+ fi
253
+ done < "$tracking_file"
254
+
255
+ # Need at least 5 confirmed anomalies to adjust
256
+ [[ "$total_confirmed" -lt 5 ]] && return 0
257
+
258
+ local precision
259
+ precision=$(awk -v tp="$true_positives" -v total="$total_confirmed" 'BEGIN { printf "%.2f", (tp / total) * 100 }')
260
+
261
+ # Initialize thresholds file if missing
262
+ mkdir -p "${BASELINES_DIR}/${repo_hash}"
263
+ if [[ ! -f "$thresholds_file" ]]; then
264
+ echo '{}' > "$thresholds_file"
265
+ fi
266
+
267
+ # Adjust thresholds to maintain 90%+ precision
268
+ local current_critical current_warning
269
+ current_critical=$(jq -r --arg m "$metric_name" '.[$m].critical_multiplier // 3.0' "$thresholds_file" 2>/dev/null || echo "3.0")
270
+ current_warning=$(jq -r --arg m "$metric_name" '.[$m].warning_multiplier // 2.0' "$thresholds_file" 2>/dev/null || echo "2.0")
271
+
272
+ local new_critical="$current_critical"
273
+ local new_warning="$current_warning"
274
+
275
+ if awk -v p="$precision" 'BEGIN { exit !(p < 90) }' 2>/dev/null; then
276
+ # Too many false alarms — loosen thresholds (increase multipliers)
277
+ new_critical=$(awk -v c="$current_critical" 'BEGIN { v = c * 1.1; if (v > 10.0) v = 10.0; printf "%.2f", v }')
278
+ new_warning=$(awk -v w="$current_warning" 'BEGIN { v = w * 1.1; if (v > 8.0) v = 8.0; printf "%.2f", v }')
279
+ elif awk -v p="$precision" 'BEGIN { exit !(p > 95) }' 2>/dev/null; then
280
+ # Very high precision — can tighten slightly (decrease multipliers)
281
+ new_critical=$(awk -v c="$current_critical" 'BEGIN { v = c * 0.95; if (v < 1.5) v = 1.5; printf "%.2f", v }')
282
+ new_warning=$(awk -v w="$current_warning" 'BEGIN { v = w * 0.95; if (v < 1.2) v = 1.2; printf "%.2f", v }')
283
+ fi
284
+
285
+ # Atomic write
286
+ local tmp_file
287
+ tmp_file=$(mktemp "${TMPDIR:-/tmp}/sw-anomaly-thresh.XXXXXX")
288
+ jq --arg m "$metric_name" \
289
+ --argjson crit "$new_critical" \
290
+ --argjson warn "$new_warning" \
291
+ --argjson precision "$precision" \
292
+ --argjson tp "$true_positives" \
293
+ --argjson fp "$false_positives" \
294
+ --arg ts "$(now_iso)" \
295
+ '.[$m] = {critical_multiplier: $crit, warning_multiplier: $warn, precision: $precision, true_positives: $tp, false_positives: $fp, updated: $ts}' \
296
+ "$thresholds_file" > "$tmp_file" && mv "$tmp_file" "$thresholds_file" || rm -f "$tmp_file"
297
+
298
+ emit_event "predictive.threshold_adjusted" \
299
+ "metric=$metric_name" \
300
+ "precision=$precision" \
301
+ "critical=$new_critical" \
302
+ "warning=$new_warning"
303
+ }
304
+
305
+ # ─── GitHub Risk Factors ──────────────────────────────────────────────────
306
+
307
+ _predictive_github_risk_factors() {
308
+ local issue_json="$1"
309
+ local risk_factors='{"security_risk": 0, "churn_risk": 0, "contributor_risk": 0, "recurrence_risk": 0}'
310
+
311
+ type _gh_detect_repo &>/dev/null 2>&1 || { echo "$risk_factors"; return 0; }
312
+ _gh_detect_repo 2>/dev/null || { echo "$risk_factors"; return 0; }
313
+
314
+ local owner="${GH_OWNER:-}" repo="${GH_REPO:-}"
315
+ [[ -z "$owner" || -z "$repo" ]] && { echo "$risk_factors"; return 0; }
316
+
317
+ # Security risk: active alerts
318
+ local sec_risk=0
319
+ if type gh_security_alerts &>/dev/null 2>&1; then
320
+ local alert_count
321
+ alert_count=$(gh_security_alerts "$owner" "$repo" 2>/dev/null | jq 'length' 2>/dev/null || echo "0")
322
+ if [[ "${alert_count:-0}" -gt 10 ]]; then
323
+ sec_risk=30
324
+ elif [[ "${alert_count:-0}" -gt 5 ]]; then
325
+ sec_risk=20
326
+ elif [[ "${alert_count:-0}" -gt 0 ]]; then
327
+ sec_risk=10
328
+ fi
329
+ fi
330
+
331
+ # Recurrence risk: similar past issues
332
+ local rec_risk=0
333
+ if type gh_similar_issues &>/dev/null 2>&1; then
334
+ local title
335
+ title=$(echo "$issue_json" | jq -r '.title // ""' 2>/dev/null | head -c 100)
336
+ if [[ -n "$title" ]]; then
337
+ local similar_count
338
+ similar_count=$(gh_similar_issues "$owner" "$repo" "$title" 5 2>/dev/null | jq 'length' 2>/dev/null || echo "0")
339
+ if [[ "${similar_count:-0}" -gt 3 ]]; then
340
+ rec_risk=25
341
+ elif [[ "${similar_count:-0}" -gt 0 ]]; then
342
+ rec_risk=10
343
+ fi
344
+ fi
345
+ fi
346
+
347
+ # Contributor risk: low contributor count = bus factor risk
348
+ local cont_risk=0
349
+ if type gh_contributors &>/dev/null 2>&1; then
350
+ local contributor_count
351
+ contributor_count=$(gh_contributors "$owner" "$repo" 2>/dev/null | jq 'length' 2>/dev/null || echo "0")
352
+ if [[ "${contributor_count:-0}" -lt 2 ]]; then
353
+ cont_risk=15
354
+ fi
355
+ fi
356
+
357
+ jq -n --argjson sec "$sec_risk" --argjson rec "$rec_risk" --argjson cont "$cont_risk" \
358
+ '{security_risk: $sec, churn_risk: 0, contributor_risk: $cont, recurrence_risk: $rec}'
359
+ }
360
+
361
+ # ═══════════════════════════════════════════════════════════════════════════════
362
+ # RISK ASSESSMENT
363
+ # ═══════════════════════════════════════════════════════════════════════════════
364
+
365
+ # predict_pipeline_risk <issue_json> [repo_context]
366
+ # Pre-pipeline risk assessment. Returns JSON with overall_risk, failure_stages, preventative_actions.
367
+ predict_pipeline_risk() {
368
+ local issue_json="${1:-"{}"}"
369
+ local repo_context="${2:-}"
370
+
371
+ if [[ "$INTELLIGENCE_AVAILABLE" == "true" ]] && command -v _intelligence_call_claude &>/dev/null; then
372
+ local prompt
373
+ prompt="Analyze this issue for pipeline risk. Return ONLY valid JSON.
374
+
375
+ Issue: ${issue_json}
376
+ Repo context: ${repo_context:-none}
377
+
378
+ Return JSON format:
379
+ {\"overall_risk\": <0-100>, \"failure_stages\": [{\"stage\": \"<name>\", \"risk\": <0-100>, \"reason\": \"<why>\"}], \"preventative_actions\": [\"<action>\"]}"
380
+
381
+ local result
382
+ result=$(_intelligence_call_claude "$prompt" 2>/dev/null || echo "")
383
+
384
+ if [[ -n "$result" ]] && echo "$result" | jq -e '.overall_risk' &>/dev/null; then
385
+ # Validate range
386
+ local risk
387
+ risk=$(echo "$result" | jq '.overall_risk')
388
+ if [[ "$risk" -ge 0 && "$risk" -le 100 ]]; then
389
+ emit_event "prediction.risk_assessed" "risk=${risk}" "source=ai"
390
+ echo "$result"
391
+ return 0
392
+ fi
393
+ fi
394
+ fi
395
+
396
+ # Fallback: heuristic risk assessment
397
+ local risk=50
398
+ local reason="Default medium risk — no AI analysis available"
399
+
400
+ # Check for learned keyword weights first, fall back to hardcoded
401
+ local keywords_json
402
+ keywords_json=$(_predictive_get_risk_keywords)
403
+
404
+ if [[ -n "$keywords_json" ]]; then
405
+ # Use learned keyword→weight mapping
406
+ local total_weight=0
407
+ local matched_keywords=""
408
+ local issue_lower
409
+ issue_lower=$(echo "$issue_json" | tr '[:upper:]' '[:lower:]')
410
+
411
+ while IFS= read -r keyword; do
412
+ [[ -z "$keyword" ]] && continue
413
+ local kw_lower
414
+ kw_lower=$(echo "$keyword" | tr '[:upper:]' '[:lower:]')
415
+ if echo "$issue_lower" | grep -q "$kw_lower" 2>/dev/null; then
416
+ local weight
417
+ weight=$(echo "$keywords_json" | jq -r --arg k "$keyword" '.[$k] // 0' 2>/dev/null || echo "0")
418
+ total_weight=$(awk -v tw="$total_weight" -v w="$weight" 'BEGIN { printf "%.0f", tw + w }')
419
+ matched_keywords="${matched_keywords}${keyword}, "
420
+ fi
421
+ done < <(echo "$keywords_json" | jq -r 'keys[]' 2>/dev/null || true)
422
+
423
+ if [[ "$total_weight" -gt 0 ]]; then
424
+ # Clamp risk to 0-100
425
+ risk=$(awk -v base=50 -v tw="$total_weight" 'BEGIN { v = base + tw; if (v > 100) v = 100; if (v < 0) v = 0; printf "%.0f", v }')
426
+ reason="Learned keyword weights: ${matched_keywords%%, }"
427
+ fi
428
+ else
429
+ # Default hardcoded keyword check
430
+ if echo "$issue_json" | grep -qiE "refactor|migration|breaking|security|deploy"; then
431
+ risk=70
432
+ reason="Keywords suggest elevated complexity"
433
+ fi
434
+ fi
435
+
436
+ # Add GitHub risk factors if available
437
+ local gh_factors
438
+ gh_factors=$(_predictive_github_risk_factors "$issue_json" 2>/dev/null || echo '{"security_risk": 0, "churn_risk": 0, "contributor_risk": 0, "recurrence_risk": 0}')
439
+ local gh_sec gh_rec gh_cont
440
+ gh_sec=$(echo "$gh_factors" | jq -r '.security_risk // 0' 2>/dev/null || echo "0")
441
+ gh_rec=$(echo "$gh_factors" | jq -r '.recurrence_risk // 0' 2>/dev/null || echo "0")
442
+ gh_cont=$(echo "$gh_factors" | jq -r '.contributor_risk // 0' 2>/dev/null || echo "0")
443
+ local gh_total=$((gh_sec + gh_rec + gh_cont))
444
+ if [[ "$gh_total" -gt 0 ]]; then
445
+ risk=$(awk -v r="$risk" -v g="$gh_total" 'BEGIN { v = r + g; if (v > 100) v = 100; printf "%.0f", v }')
446
+ info "Risk scoring: GitHub factors — security=$gh_sec, recurrence=$gh_rec, contributor=$gh_cont"
447
+ fi
448
+
449
+ local result_json
450
+ result_json=$(jq -n \
451
+ --argjson risk "$risk" \
452
+ --arg reason "$reason" \
453
+ --argjson gh_factors "$gh_factors" \
454
+ '{
455
+ overall_risk: $risk,
456
+ failure_stages: [{stage: "build", risk: $risk, reason: $reason}],
457
+ preventative_actions: ["Review scope before starting", "Ensure test coverage"],
458
+ github_risk_factors: $gh_factors
459
+ }')
460
+
461
+ emit_event "prediction.risk_assessed" "risk=${risk}" "source=heuristic"
462
+ echo "$result_json"
463
+ }
464
+
465
+ # ═══════════════════════════════════════════════════════════════════════════════
466
+ # AI PATROL ANALYSIS
467
+ # ═══════════════════════════════════════════════════════════════════════════════
468
+
469
+ # patrol_ai_analyze <sample_files_list> [recent_git_log]
470
+ # Reads sampled source files and asks Claude to analyze for issues.
471
+ # Returns structured findings array.
472
+ patrol_ai_analyze() {
473
+ local sample_files="${1:-}"
474
+ local git_log="${2:-}"
475
+
476
+ if [[ -z "$sample_files" ]]; then
477
+ echo '[]'
478
+ return 0
479
+ fi
480
+
481
+ # Collect file contents (max 5 files, first 100 lines each)
482
+ local file_contents=""
483
+ local file_count=0
484
+ local IFS_ORIG="$IFS"
485
+ IFS=$'\n'
486
+ for file_path in $sample_files; do
487
+ IFS="$IFS_ORIG"
488
+ if [[ "$file_count" -ge 5 ]]; then
489
+ break
490
+ fi
491
+ if [[ -f "$file_path" ]]; then
492
+ file_contents="${file_contents}
493
+ --- ${file_path} ---
494
+ $(head -100 "$file_path" 2>/dev/null || true)
495
+ "
496
+ file_count=$((file_count + 1))
497
+ fi
498
+ done
499
+ IFS="$IFS_ORIG"
500
+
501
+ if [[ -z "$file_contents" ]]; then
502
+ echo '[]'
503
+ return 0
504
+ fi
505
+
506
+ if [[ "$INTELLIGENCE_AVAILABLE" != "true" ]] || ! command -v _intelligence_call_claude &>/dev/null; then
507
+ echo '[]'
508
+ return 0
509
+ fi
510
+
511
+ local prompt
512
+ prompt="Analyze these source files for issues. Return ONLY a JSON array.
513
+ Focus on high/critical severity only. Categories: security, performance, architecture, testing.
514
+
515
+ Files:
516
+ ${file_contents}
517
+
518
+ Recent git log:
519
+ ${git_log:-none}
520
+
521
+ Return format: [{\"severity\": \"high\", \"category\": \"security\", \"finding\": \"...\", \"recommendation\": \"...\"}]
522
+ Only return findings with severity 'high' or 'critical'. Return [] if nothing significant found."
523
+
524
+ local result
525
+ result=$(_intelligence_call_claude "$prompt" 2>/dev/null || echo "")
526
+
527
+ if [[ -n "$result" ]] && echo "$result" | jq -e 'type == "array"' &>/dev/null; then
528
+ # Filter to only high/critical findings
529
+ local filtered
530
+ filtered=$(echo "$result" | jq '[.[] | select(.severity == "high" or .severity == "critical")]')
531
+
532
+ local count
533
+ count=$(echo "$filtered" | jq 'length')
534
+
535
+ local i=0
536
+ while [[ "$i" -lt "$count" ]]; do
537
+ local sev cat finding
538
+ sev=$(echo "$filtered" | jq -r ".[$i].severity")
539
+ cat=$(echo "$filtered" | jq -r ".[$i].category")
540
+ finding=$(echo "$filtered" | jq -r ".[$i].finding" | cut -c1-80)
541
+ emit_event "patrol.ai_finding" "severity=${sev}" "category=${cat}" "finding=${finding}"
542
+ i=$((i + 1))
543
+ done
544
+
545
+ echo "$filtered"
546
+ return 0
547
+ fi
548
+
549
+ # Dismissed — no valid result
550
+ emit_event "patrol.ai_dismissed" "reason=invalid_response"
551
+ echo '[]'
552
+ }
553
+
554
+ # ═══════════════════════════════════════════════════════════════════════════════
555
+ # ANOMALY DETECTION
556
+ # ═══════════════════════════════════════════════════════════════════════════════
557
+
558
+ # predict_detect_anomaly <stage> <metric_name> <current_value> [baseline_file]
559
+ # Compare current metric against baseline. Returns "critical", "warning", or "normal".
560
+ predict_detect_anomaly() {
561
+ local stage="${1:-}"
562
+ local metric_name="${2:-}"
563
+ local current_value="${3:-0}"
564
+ local baseline_file="${4:-}"
565
+
566
+ if [[ -z "$stage" || -z "$metric_name" ]]; then
567
+ error "Usage: predict_detect_anomaly <stage> <metric_name> <current_value> [baseline_file]"
568
+ return 1
569
+ fi
570
+
571
+ # Default baseline file
572
+ if [[ -z "$baseline_file" ]]; then
573
+ mkdir -p "$BASELINES_DIR"
574
+ baseline_file="${BASELINES_DIR}/default.json"
575
+ fi
576
+
577
+ # Read baseline for this stage+metric
578
+ local key="${stage}.${metric_name}"
579
+ local baseline_value=0
580
+
581
+ if [[ -f "$baseline_file" ]]; then
582
+ baseline_value=$(jq -r --arg key "$key" '.[$key].value // 0' "$baseline_file" 2>/dev/null || echo "0")
583
+ fi
584
+
585
+ # No baseline yet — treat as normal
586
+ if [[ "$baseline_value" == "0" || "$baseline_value" == "null" ]]; then
587
+ echo "normal"
588
+ return 0
589
+ fi
590
+
591
+ # Get per-metric thresholds (adaptive or default)
592
+ local metric_critical_mult metric_warning_mult
593
+ metric_critical_mult=$(_predictive_get_anomaly_threshold "$metric_name")
594
+ metric_warning_mult=$(_predictive_get_warning_multiplier "$metric_name")
595
+
596
+ # Calculate thresholds using awk for floating-point
597
+ local critical_threshold warning_threshold
598
+ critical_threshold=$(awk -v bv="$baseline_value" -v m="$metric_critical_mult" 'BEGIN{printf "%.2f", bv * m}')
599
+ warning_threshold=$(awk -v bv="$baseline_value" -v m="$metric_warning_mult" 'BEGIN{printf "%.2f", bv * m}')
600
+
601
+ local severity="normal"
602
+
603
+ if awk -v cv="$current_value" -v ct="$critical_threshold" 'BEGIN{exit !(cv > ct)}' 2>/dev/null; then
604
+ severity="critical"
605
+ elif awk -v cv="$current_value" -v wt="$warning_threshold" 'BEGIN{exit !(cv > wt)}' 2>/dev/null; then
606
+ severity="warning"
607
+ fi
608
+
609
+ if [[ "$severity" != "normal" ]]; then
610
+ emit_event "prediction.anomaly" \
611
+ "stage=${stage}" \
612
+ "metric=${metric_name}" \
613
+ "value=${current_value}" \
614
+ "baseline=${baseline_value}" \
615
+ "severity=${severity}" \
616
+ "critical_mult=${metric_critical_mult}" \
617
+ "warning_mult=${metric_warning_mult}"
618
+
619
+ # Record anomaly for false-alarm tracking
620
+ _predictive_record_anomaly "$stage" "$metric_name" "$severity" "$current_value" "$baseline_value"
621
+ fi
622
+
623
+ echo "$severity"
624
+ }
625
+
626
+ # ═══════════════════════════════════════════════════════════════════════════════
627
+ # PREVENTATIVE INJECTION
628
+ # ═══════════════════════════════════════════════════════════════════════════════
629
+
630
+ # predict_inject_prevention <stage> <issue_json> [memory_context]
631
+ # Returns prevention text based on memory patterns relevant to this stage.
632
+ predict_inject_prevention() {
633
+ local stage="${1:-}"
634
+ local issue_json="${2:-"{}"}"
635
+ local memory_context="${3:-}"
636
+
637
+ if [[ -z "$stage" ]]; then
638
+ return 0
639
+ fi
640
+
641
+ # If memory context was passed directly, search it
642
+ if [[ -n "$memory_context" ]]; then
643
+ local prevention_text=""
644
+
645
+ # Look for failure patterns mentioning this stage
646
+ local failures
647
+ failures=$(echo "$memory_context" | grep -i "\\[$stage\\]" 2>/dev/null || true)
648
+
649
+ if [[ -n "$failures" ]]; then
650
+ prevention_text="WARNING: Previous similar issues failed at ${stage} stage."
651
+ prevention_text="${prevention_text}
652
+ Known patterns:"
653
+
654
+ local line_count=0
655
+ while IFS= read -r line; do
656
+ if [[ "$line_count" -ge 5 ]]; then
657
+ break
658
+ fi
659
+ prevention_text="${prevention_text}
660
+ - ${line}"
661
+ line_count=$((line_count + 1))
662
+ done <<EOF
663
+ ${failures}
664
+ EOF
665
+ prevention_text="${prevention_text}
666
+ Recommended: Review these patterns before proceeding."
667
+
668
+ emit_event "prediction.prevented" "stage=${stage}" "patterns=${line_count}"
669
+ echo "$prevention_text"
670
+ return 0
671
+ fi
672
+ fi
673
+
674
+ # Try sourcing memory for context if available
675
+ if [[ -f "$SCRIPT_DIR/sw-memory.sh" ]]; then
676
+ local mem_context
677
+ mem_context=$(bash "$SCRIPT_DIR/sw-memory.sh" inject "$stage" 2>/dev/null || true)
678
+
679
+ if [[ -n "$mem_context" ]] && echo "$mem_context" | grep -qi "failure\|pattern\|avoid"; then
680
+ local pattern_lines
681
+ pattern_lines=$(echo "$mem_context" | grep -iE "^\s*-\s*\[" | head -3 || true)
682
+
683
+ if [[ -n "$pattern_lines" ]]; then
684
+ local prevention_text="WARNING: Memory system flagged relevant patterns for ${stage}:
685
+ ${pattern_lines}
686
+ Recommended: Apply known fixes proactively."
687
+
688
+ emit_event "prediction.prevented" "stage=${stage}" "source=memory"
689
+ echo "$prevention_text"
690
+ return 0
691
+ fi
692
+ fi
693
+ fi
694
+
695
+ # No relevant patterns found
696
+ echo ""
697
+ }
698
+
699
+ # ═══════════════════════════════════════════════════════════════════════════════
700
+ # BASELINE MANAGEMENT
701
+ # ═══════════════════════════════════════════════════════════════════════════════
702
+
703
+ # predict_update_baseline <stage> <metric_name> <value> [baseline_file]
704
+ # Exponential moving average: new = 0.9 * old + 0.1 * current
705
+ predict_update_baseline() {
706
+ local stage="${1:-}"
707
+ local metric_name="${2:-}"
708
+ local value="${3:-0}"
709
+ local baseline_file="${4:-}"
710
+
711
+ if [[ -z "$stage" || -z "$metric_name" ]]; then
712
+ error "Usage: predict_update_baseline <stage> <metric_name> <value> [baseline_file]"
713
+ return 1
714
+ fi
715
+
716
+ # Default baseline file
717
+ if [[ -z "$baseline_file" ]]; then
718
+ mkdir -p "$BASELINES_DIR"
719
+ baseline_file="${BASELINES_DIR}/default.json"
720
+ fi
721
+
722
+ local key="${stage}.${metric_name}"
723
+
724
+ # Initialize file if missing
725
+ if [[ ! -f "$baseline_file" ]]; then
726
+ mkdir -p "$(dirname "$baseline_file")"
727
+ echo '{}' > "$baseline_file"
728
+ fi
729
+
730
+ # Read current baseline
731
+ local old_value old_count
732
+ old_value=$(jq -r --arg key "$key" '.[$key].value // 0' "$baseline_file" 2>/dev/null || echo "0")
733
+ old_count=$(jq -r --arg key "$key" '.[$key].count // 0' "$baseline_file" 2>/dev/null || echo "0")
734
+
735
+ # Calculate new baseline using EMA
736
+ local new_value new_count
737
+ new_count=$((old_count + 1))
738
+
739
+ if [[ "$old_value" == "0" || "$old_value" == "null" ]]; then
740
+ # First data point — use raw value
741
+ new_value="$value"
742
+ else
743
+ # Get adaptive EMA alpha (learned or default)
744
+ local alpha
745
+ alpha=$(_predictive_get_ema_alpha)
746
+ local one_minus_alpha
747
+ one_minus_alpha=$(awk -v a="$alpha" 'BEGIN{printf "%.4f", 1.0 - a}')
748
+
749
+ # Exponential moving average: (1-alpha) * old + alpha * new
750
+ new_value=$(awk -v oma="$one_minus_alpha" -v ov="$old_value" -v a="$alpha" -v nv="$value" \
751
+ 'BEGIN{printf "%.2f", oma * ov + a * nv}')
752
+ fi
753
+
754
+ local updated_at
755
+ updated_at="$(now_iso)"
756
+
757
+ # Atomic write
758
+ local tmp_file
759
+ tmp_file=$(mktemp)
760
+ jq --arg key "$key" \
761
+ --argjson val "$new_value" \
762
+ --argjson cnt "$new_count" \
763
+ --arg ts "$updated_at" \
764
+ '.[$key] = {value: $val, count: $cnt, updated: $ts}' \
765
+ "$baseline_file" > "$tmp_file" && mv "$tmp_file" "$baseline_file"
766
+ }
767
+
768
+ # ═══════════════════════════════════════════════════════════════════════════════
769
+ # HELP
770
+ # ═══════════════════════════════════════════════════════════════════════════════
771
+
772
+ show_help() {
773
+ echo ""
774
+ echo -e "${CYAN}${BOLD}━━━ shipwright predictive ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
775
+ echo ""
776
+ echo -e " ${BOLD}Risk Assessment${RESET}"
777
+ echo -e " ${CYAN}shipwright predictive risk${RESET} <issue_json> [repo_context]"
778
+ echo -e " Pre-pipeline risk scoring with AI analysis"
779
+ echo ""
780
+ echo -e " ${BOLD}Anomaly Detection${RESET}"
781
+ echo -e " ${CYAN}shipwright predictive anomaly${RESET} <stage> <metric> <value> [baseline_file]"
782
+ echo -e " Compare metrics against running baselines"
783
+ echo ""
784
+ echo -e " ${BOLD}AI Patrol${RESET}"
785
+ echo -e " ${CYAN}shipwright predictive patrol${RESET} <files_list> [git_log]"
786
+ echo -e " AI-driven code analysis for high-severity issues"
787
+ echo ""
788
+ echo -e " ${BOLD}Baseline Management${RESET}"
789
+ echo -e " ${CYAN}shipwright predictive baseline${RESET} <stage> <metric> <value> [baseline_file]"
790
+ echo -e " Update running metric baselines (adaptive EMA)"
791
+ echo ""
792
+ echo -e " ${BOLD}False-Alarm Tracking${RESET}"
793
+ echo -e " ${CYAN}shipwright predictive confirm-anomaly${RESET} <stage> <metric> <was_real>"
794
+ echo -e " Confirm whether detected anomaly predicted a real failure"
795
+ echo ""
796
+ echo -e "${CYAN}${BOLD}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
797
+ echo ""
798
+ }
799
+
800
+ # ═══════════════════════════════════════════════════════════════════════════════
801
+ # MAIN
802
+ # ═══════════════════════════════════════════════════════════════════════════════
803
+
804
+ main() {
805
+ local cmd="${1:-help}"
806
+ shift 2>/dev/null || true
807
+ case "$cmd" in
808
+ risk) predict_pipeline_risk "$@" ;;
809
+ anomaly) predict_detect_anomaly "$@" ;;
810
+ confirm-anomaly) predictive_confirm_anomaly "$@" ;;
811
+ patrol) patrol_ai_analyze "$@" ;;
812
+ baseline) predict_update_baseline "$@" ;;
813
+ help|--help|-h) show_help ;;
814
+ *) error "Unknown command: $cmd"; exit 1 ;;
815
+ esac
816
+ }
817
+
818
+ if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
819
+ main "$@"
820
+ fi