shipwright-cli 1.7.1 → 1.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.claude/agents/code-reviewer.md +90 -0
  2. package/.claude/agents/devops-engineer.md +142 -0
  3. package/.claude/agents/pipeline-agent.md +80 -0
  4. package/.claude/agents/shell-script-specialist.md +150 -0
  5. package/.claude/agents/test-specialist.md +196 -0
  6. package/.claude/hooks/post-tool-use.sh +38 -0
  7. package/.claude/hooks/pre-tool-use.sh +25 -0
  8. package/.claude/hooks/session-started.sh +37 -0
  9. package/README.md +212 -814
  10. package/claude-code/CLAUDE.md.shipwright +54 -0
  11. package/claude-code/hooks/notify-idle.sh +2 -2
  12. package/claude-code/hooks/session-start.sh +24 -0
  13. package/claude-code/hooks/task-completed.sh +6 -2
  14. package/claude-code/settings.json.template +12 -0
  15. package/dashboard/public/app.js +4422 -0
  16. package/dashboard/public/index.html +816 -0
  17. package/dashboard/public/styles.css +4755 -0
  18. package/dashboard/server.ts +4315 -0
  19. package/docs/KNOWN-ISSUES.md +18 -10
  20. package/docs/TIPS.md +38 -26
  21. package/docs/patterns/README.md +33 -23
  22. package/package.json +9 -5
  23. package/scripts/adapters/iterm2-adapter.sh +1 -1
  24. package/scripts/adapters/tmux-adapter.sh +52 -23
  25. package/scripts/adapters/wezterm-adapter.sh +26 -14
  26. package/scripts/lib/compat.sh +200 -0
  27. package/scripts/lib/helpers.sh +72 -0
  28. package/scripts/postinstall.mjs +72 -13
  29. package/scripts/{cct → sw} +109 -21
  30. package/scripts/sw-adversarial.sh +274 -0
  31. package/scripts/sw-architecture-enforcer.sh +330 -0
  32. package/scripts/sw-checkpoint.sh +390 -0
  33. package/scripts/{cct-cleanup.sh → sw-cleanup.sh} +3 -1
  34. package/scripts/sw-connect.sh +619 -0
  35. package/scripts/{cct-cost.sh → sw-cost.sh} +368 -34
  36. package/scripts/{cct-daemon.sh → sw-daemon.sh} +2217 -204
  37. package/scripts/sw-dashboard.sh +477 -0
  38. package/scripts/sw-developer-simulation.sh +252 -0
  39. package/scripts/sw-docs.sh +635 -0
  40. package/scripts/sw-doctor.sh +907 -0
  41. package/scripts/{cct-fix.sh → sw-fix.sh} +10 -6
  42. package/scripts/{cct-fleet.sh → sw-fleet.sh} +498 -22
  43. package/scripts/sw-github-checks.sh +521 -0
  44. package/scripts/sw-github-deploy.sh +533 -0
  45. package/scripts/sw-github-graphql.sh +972 -0
  46. package/scripts/sw-heartbeat.sh +293 -0
  47. package/scripts/{cct-init.sh → sw-init.sh} +144 -11
  48. package/scripts/sw-intelligence.sh +1196 -0
  49. package/scripts/sw-jira.sh +643 -0
  50. package/scripts/sw-launchd.sh +364 -0
  51. package/scripts/sw-linear.sh +648 -0
  52. package/scripts/{cct-logs.sh → sw-logs.sh} +72 -2
  53. package/scripts/{cct-loop.sh → sw-loop.sh} +534 -44
  54. package/scripts/{cct-memory.sh → sw-memory.sh} +321 -38
  55. package/scripts/sw-patrol-meta.sh +417 -0
  56. package/scripts/sw-pipeline-composer.sh +455 -0
  57. package/scripts/{cct-pipeline.sh → sw-pipeline.sh} +2319 -178
  58. package/scripts/sw-predictive.sh +820 -0
  59. package/scripts/{cct-prep.sh → sw-prep.sh} +339 -49
  60. package/scripts/{cct-ps.sh → sw-ps.sh} +6 -4
  61. package/scripts/{cct-reaper.sh → sw-reaper.sh} +6 -4
  62. package/scripts/sw-remote.sh +687 -0
  63. package/scripts/sw-self-optimize.sh +947 -0
  64. package/scripts/sw-session.sh +519 -0
  65. package/scripts/sw-setup.sh +234 -0
  66. package/scripts/sw-status.sh +605 -0
  67. package/scripts/{cct-templates.sh → sw-templates.sh} +9 -4
  68. package/scripts/sw-tmux.sh +591 -0
  69. package/scripts/sw-tracker-jira.sh +277 -0
  70. package/scripts/sw-tracker-linear.sh +292 -0
  71. package/scripts/sw-tracker.sh +409 -0
  72. package/scripts/{cct-upgrade.sh → sw-upgrade.sh} +103 -46
  73. package/scripts/{cct-worktree.sh → sw-worktree.sh} +3 -0
  74. package/templates/pipelines/autonomous.json +27 -5
  75. package/templates/pipelines/full.json +12 -0
  76. package/templates/pipelines/standard.json +12 -0
  77. package/tmux/{claude-teams-overlay.conf → shipwright-overlay.conf} +27 -9
  78. package/tmux/templates/accessibility.json +34 -0
  79. package/tmux/templates/api-design.json +35 -0
  80. package/tmux/templates/architecture.json +1 -0
  81. package/tmux/templates/bug-fix.json +9 -0
  82. package/tmux/templates/code-review.json +1 -0
  83. package/tmux/templates/compliance.json +36 -0
  84. package/tmux/templates/data-pipeline.json +36 -0
  85. package/tmux/templates/debt-paydown.json +34 -0
  86. package/tmux/templates/devops.json +1 -0
  87. package/tmux/templates/documentation.json +1 -0
  88. package/tmux/templates/exploration.json +1 -0
  89. package/tmux/templates/feature-dev.json +1 -0
  90. package/tmux/templates/full-stack.json +8 -0
  91. package/tmux/templates/i18n.json +34 -0
  92. package/tmux/templates/incident-response.json +36 -0
  93. package/tmux/templates/migration.json +1 -0
  94. package/tmux/templates/observability.json +35 -0
  95. package/tmux/templates/onboarding.json +33 -0
  96. package/tmux/templates/performance.json +35 -0
  97. package/tmux/templates/refactor.json +1 -0
  98. package/tmux/templates/release.json +35 -0
  99. package/tmux/templates/security-audit.json +8 -0
  100. package/tmux/templates/spike.json +34 -0
  101. package/tmux/templates/testing.json +1 -0
  102. package/tmux/tmux.conf +98 -9
  103. package/scripts/cct-doctor.sh +0 -414
  104. package/scripts/cct-session.sh +0 -284
  105. package/scripts/cct-status.sh +0 -169
@@ -0,0 +1,1196 @@
1
+ #!/usr/bin/env bash
2
+ # ╔═══════════════════════════════════════════════════════════════════════════╗
3
+ # ║ shipwright intelligence — AI-Powered Analysis & Decision Engine ║
4
+ # ║ Semantic issue analysis · Pipeline composition · Cost prediction ║
5
+ # ╚═══════════════════════════════════════════════════════════════════════════╝
6
+ set -euo pipefail
7
+ trap 'echo "ERROR: $BASH_SOURCE:$LINENO exited with status $?" >&2' ERR
8
+
9
+ VERSION="1.9.0"
10
+ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11
+ REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
12
+
13
+ # ─── Colors (matches Seth's tmux theme) ─────────────────────────────────────
14
+ CYAN='\033[38;2;0;212;255m' # #00d4ff — primary accent
15
+ PURPLE='\033[38;2;124;58;237m' # #7c3aed — secondary
16
+ BLUE='\033[38;2;0;102;255m' # #0066ff — tertiary
17
+ GREEN='\033[38;2;74;222;128m' # success
18
+ YELLOW='\033[38;2;250;204;21m' # warning
19
+ RED='\033[38;2;248;113;113m' # error
20
+ DIM='\033[2m'
21
+ BOLD='\033[1m'
22
+ RESET='\033[0m'
23
+
24
+ # ─── Cross-platform compatibility ──────────────────────────────────────────
25
+ # shellcheck source=lib/compat.sh
26
+ [[ -f "$SCRIPT_DIR/lib/compat.sh" ]] && source "$SCRIPT_DIR/lib/compat.sh"
27
+
28
+ # ─── Output Helpers ─────────────────────────────────────────────────────────
29
+ info() { echo -e "${CYAN}${BOLD}▸${RESET} $*"; }
30
+ success() { echo -e "${GREEN}${BOLD}✓${RESET} $*"; }
31
+ warn() { echo -e "${YELLOW}${BOLD}⚠${RESET} $*"; }
32
+ error() { echo -e "${RED}${BOLD}✗${RESET} $*" >&2; }
33
+
34
+ now_iso() { date -u +"%Y-%m-%dT%H:%M:%SZ"; }
35
+ now_epoch() { date +%s; }
36
+
37
+ # ─── Structured Event Log ──────────────────────────────────────────────────
38
+ EVENTS_FILE="${HOME}/.shipwright/events.jsonl"
39
+
40
+ emit_event() {
41
+ local event_type="$1"
42
+ shift
43
+ local json_fields=""
44
+ for kv in "$@"; do
45
+ local key="${kv%%=*}"
46
+ local val="${kv#*=}"
47
+ if [[ "$val" =~ ^-?[0-9]+\.?[0-9]*$ ]]; then
48
+ json_fields="${json_fields},\"${key}\":${val}"
49
+ else
50
+ val="${val//\"/\\\"}"
51
+ json_fields="${json_fields},\"${key}\":\"${val}\""
52
+ fi
53
+ done
54
+ mkdir -p "${HOME}/.shipwright"
55
+ echo "{\"ts\":\"$(now_iso)\",\"ts_epoch\":$(now_epoch),\"type\":\"${event_type}\"${json_fields}}" >> "$EVENTS_FILE"
56
+ }
57
+
58
+ # ─── Intelligence Configuration ─────────────────────────────────────────────
59
+ INTELLIGENCE_CACHE="${REPO_DIR}/.claude/intelligence-cache.json"
60
+ INTELLIGENCE_CONFIG_DIR="${HOME}/.shipwright/optimization"
61
+ CACHE_TTL_CONFIG="${INTELLIGENCE_CONFIG_DIR}/cache-ttl.json"
62
+ CACHE_STATS_FILE="${INTELLIGENCE_CONFIG_DIR}/cache-stats.json"
63
+ DEFAULT_CACHE_TTL=3600 # 1 hour (fallback)
64
+
65
+ # Load adaptive cache TTL from config or use default
66
+ _intelligence_get_cache_ttl() {
67
+ if [[ -f "$CACHE_TTL_CONFIG" ]]; then
68
+ local ttl
69
+ ttl=$(jq -r '.ttl // empty' "$CACHE_TTL_CONFIG" 2>/dev/null || true)
70
+ if [[ -n "$ttl" && "$ttl" != "null" && "$ttl" -gt 0 ]] 2>/dev/null; then
71
+ echo "$ttl"
72
+ return 0
73
+ fi
74
+ fi
75
+ echo "$DEFAULT_CACHE_TTL"
76
+ }
77
+
78
+ # Track cache hit/miss and adjust TTL
79
+ _intelligence_track_cache_access() {
80
+ local hit_or_miss="${1:-miss}" # "hit" or "miss"
81
+
82
+ mkdir -p "$INTELLIGENCE_CONFIG_DIR"
83
+ if [[ ! -f "$CACHE_STATS_FILE" ]]; then
84
+ echo '{"hits":0,"misses":0,"total":0}' > "$CACHE_STATS_FILE"
85
+ fi
86
+
87
+ local tmp_file
88
+ tmp_file=$(mktemp "${TMPDIR:-/tmp}/sw-cache-stats.XXXXXX")
89
+ if [[ "$hit_or_miss" == "hit" ]]; then
90
+ jq '.hits += 1 | .total += 1' "$CACHE_STATS_FILE" > "$tmp_file" && mv "$tmp_file" "$CACHE_STATS_FILE" || rm -f "$tmp_file"
91
+ else
92
+ jq '.misses += 1 | .total += 1' "$CACHE_STATS_FILE" > "$tmp_file" && mv "$tmp_file" "$CACHE_STATS_FILE" || rm -f "$tmp_file"
93
+ fi
94
+
95
+ # Every 20 accesses, evaluate whether to adjust TTL
96
+ local total
97
+ total=$(jq '.total // 0' "$CACHE_STATS_FILE" 2>/dev/null || echo "0")
98
+ if [[ "$((total % 20))" -eq 0 && "$total" -gt 0 ]]; then
99
+ _intelligence_adjust_cache_ttl
100
+ fi
101
+ }
102
+
103
+ # Adjust cache TTL based on hit/miss rates
104
+ _intelligence_adjust_cache_ttl() {
105
+ [[ -f "$CACHE_STATS_FILE" ]] || return 0
106
+
107
+ local hits misses total
108
+ hits=$(jq '.hits // 0' "$CACHE_STATS_FILE" 2>/dev/null || echo "0")
109
+ misses=$(jq '.misses // 0' "$CACHE_STATS_FILE" 2>/dev/null || echo "0")
110
+ total=$(jq '.total // 0' "$CACHE_STATS_FILE" 2>/dev/null || echo "0")
111
+
112
+ [[ "$total" -lt 10 ]] && return 0
113
+
114
+ local miss_rate
115
+ miss_rate=$(awk -v m="$misses" -v t="$total" 'BEGIN { printf "%.0f", (m / t) * 100 }')
116
+
117
+ local current_ttl
118
+ current_ttl=$(_intelligence_get_cache_ttl)
119
+ local new_ttl="$current_ttl"
120
+
121
+ if [[ "$miss_rate" -gt 30 ]]; then
122
+ # High miss rate — reduce TTL (data getting stale too often)
123
+ new_ttl=$(awk -v ttl="$current_ttl" 'BEGIN { v = int(ttl * 0.75); if (v < 300) v = 300; print v }')
124
+ elif [[ "$miss_rate" -lt 5 ]]; then
125
+ # Very low miss rate — increase TTL (cache is very effective)
126
+ new_ttl=$(awk -v ttl="$current_ttl" 'BEGIN { v = int(ttl * 1.25); if (v > 14400) v = 14400; print v }')
127
+ fi
128
+
129
+ if [[ "$new_ttl" != "$current_ttl" ]]; then
130
+ local tmp_file
131
+ tmp_file=$(mktemp "${TMPDIR:-/tmp}/sw-cache-ttl.XXXXXX")
132
+ jq -n \
133
+ --argjson ttl "$new_ttl" \
134
+ --argjson miss_rate "$miss_rate" \
135
+ --arg updated "$(now_iso)" \
136
+ '{ttl: $ttl, miss_rate_pct: $miss_rate, updated: $updated}' \
137
+ > "$tmp_file" && mv "$tmp_file" "$CACHE_TTL_CONFIG" || rm -f "$tmp_file"
138
+
139
+ emit_event "intelligence.cache_ttl_adjusted" \
140
+ "old_ttl=$current_ttl" \
141
+ "new_ttl=$new_ttl" \
142
+ "miss_rate=$miss_rate"
143
+ fi
144
+
145
+ # Reset stats for next window
146
+ local tmp_reset
147
+ tmp_reset=$(mktemp "${TMPDIR:-/tmp}/sw-cache-reset.XXXXXX")
148
+ echo '{"hits":0,"misses":0,"total":0}' > "$tmp_reset" && mv "$tmp_reset" "$CACHE_STATS_FILE" || rm -f "$tmp_reset"
149
+ }
150
+
151
+ # ─── Feature Flag ───────────────────────────────────────────────────────────
152
+
153
+ _intelligence_enabled() {
154
+ local config="${REPO_DIR}/.claude/daemon-config.json"
155
+ if [[ -f "$config" ]]; then
156
+ local enabled
157
+ enabled=$(jq -r '.intelligence.enabled // false' "$config" 2>/dev/null || echo "false")
158
+ [[ "$enabled" == "true" ]]
159
+ else
160
+ return 1
161
+ fi
162
+ }
163
+
164
+ # MD5 hashing: uses compute_md5 from lib/compat.sh
165
+
166
+ # ─── Cache Operations ───────────────────────────────────────────────────────
167
+
168
+ _intelligence_cache_init() {
169
+ local cache_dir
170
+ cache_dir="$(dirname "$INTELLIGENCE_CACHE")"
171
+ mkdir -p "$cache_dir"
172
+ if [[ ! -f "$INTELLIGENCE_CACHE" ]]; then
173
+ echo '{"entries":{}}' > "$INTELLIGENCE_CACHE"
174
+ fi
175
+ }
176
+
177
+ _intelligence_cache_get() {
178
+ local cache_key="$1"
179
+ local adaptive_ttl
180
+ adaptive_ttl=$(_intelligence_get_cache_ttl)
181
+ local ttl="${2:-$adaptive_ttl}"
182
+
183
+ _intelligence_cache_init
184
+
185
+ local hash
186
+ hash=$(compute_md5 --string "$cache_key")
187
+
188
+ local entry
189
+ entry=$(jq -r --arg h "$hash" '.entries[$h] // empty' "$INTELLIGENCE_CACHE" 2>/dev/null || true)
190
+
191
+ if [[ -z "$entry" ]]; then
192
+ _intelligence_track_cache_access "miss"
193
+ return 1
194
+ fi
195
+
196
+ local cached_ts
197
+ cached_ts=$(echo "$entry" | jq -r '.timestamp // 0')
198
+ local now
199
+ now=$(now_epoch)
200
+ local age=$(( now - cached_ts ))
201
+
202
+ if [[ "$age" -gt "$ttl" ]]; then
203
+ _intelligence_track_cache_access "miss"
204
+ return 1
205
+ fi
206
+
207
+ _intelligence_track_cache_access "hit"
208
+ echo "$entry" | jq -r '.result'
209
+ return 0
210
+ }
211
+
212
+ _intelligence_cache_set() {
213
+ local cache_key="$1"
214
+ local result="$2"
215
+ local ttl="${3:-$DEFAULT_CACHE_TTL}"
216
+
217
+ _intelligence_cache_init
218
+
219
+ local hash
220
+ hash=$(compute_md5 --string "$cache_key")
221
+ local now
222
+ now=$(now_epoch)
223
+
224
+ local tmp_file
225
+ tmp_file=$(mktemp "${TMPDIR:-/tmp}/sw-intel-cache.XXXXXX")
226
+ jq --arg h "$hash" \
227
+ --argjson result "$result" \
228
+ --argjson ts "$now" \
229
+ --argjson ttl "$ttl" \
230
+ '.entries[$h] = {"result": $result, "timestamp": $ts, "ttl": $ttl}' \
231
+ "$INTELLIGENCE_CACHE" > "$tmp_file" 2>/dev/null && mv "$tmp_file" "$INTELLIGENCE_CACHE" || rm -f "$tmp_file"
232
+ }
233
+
234
+ # ─── Core Claude Call ────────────────────────────────────────────────────────
235
+
236
+ _intelligence_call_claude() {
237
+ local prompt="$1"
238
+ local cache_key="${2:-}"
239
+ local ttl="${3:-$DEFAULT_CACHE_TTL}"
240
+
241
+ # Check cache first
242
+ local cached
243
+ if cached=$(_intelligence_cache_get "$cache_key" "$ttl"); then
244
+ emit_event "intelligence.cache_hit" "cache_key=$cache_key"
245
+ echo "$cached"
246
+ return 0
247
+ fi
248
+
249
+ # Verify claude CLI is available
250
+ if ! command -v claude >/dev/null 2>&1; then
251
+ error "claude CLI not found"
252
+ echo '{"error":"claude_cli_not_found"}'
253
+ return 1
254
+ fi
255
+
256
+ # Call Claude (--print mode returns raw text response)
257
+ local response
258
+ if ! response=$(claude -p "$prompt" 2>/dev/null); then
259
+ error "Claude call failed"
260
+ echo '{"error":"claude_call_failed"}'
261
+ return 1
262
+ fi
263
+
264
+ # Extract JSON from the response
265
+ local result
266
+ # First try: raw response is valid JSON directly
267
+ if echo "$response" | jq '.' >/dev/null 2>&1; then
268
+ result=$(echo "$response" | jq '.')
269
+ else
270
+ # Second try: extract JSON from markdown code blocks (```json ... ```)
271
+ local extracted
272
+ extracted=$(echo "$response" | sed -n '/^```/,/^```$/p' | sed '1d;$d' || true)
273
+ if [[ -n "$extracted" ]] && echo "$extracted" | jq '.' >/dev/null 2>&1; then
274
+ result="$extracted"
275
+ else
276
+ # Third try: find first { to last } in response
277
+ local braced
278
+ braced=$(echo "$response" | sed -n '/{/,/}/p' || true)
279
+ if [[ -n "$braced" ]] && echo "$braced" | jq '.' >/dev/null 2>&1; then
280
+ result="$braced"
281
+ else
282
+ # Wrap raw text in a JSON object
283
+ result=$(jq -n --arg text "$response" '{"raw_response": $text}')
284
+ fi
285
+ fi
286
+ fi
287
+
288
+ # Cache the result
289
+ _intelligence_cache_set "$cache_key" "$result" "$ttl"
290
+
291
+ echo "$result"
292
+ return 0
293
+ }
294
+
295
+ # ═══════════════════════════════════════════════════════════════════════════════
296
+ # PUBLIC FUNCTIONS
297
+ # ═══════════════════════════════════════════════════════════════════════════════
298
+
299
+ # ─── Analyze Issue ───────────────────────────────────────────────────────────
300
+
301
+ intelligence_analyze_issue() {
302
+ local issue_json="${1:-"{}"}"
303
+
304
+ if ! _intelligence_enabled; then
305
+ echo '{"error":"intelligence_disabled","complexity":5,"risk_level":"medium","success_probability":50,"recommended_template":"standard","key_risks":[],"implementation_hints":[]}'
306
+ return 0
307
+ fi
308
+
309
+ local title body labels
310
+ title=$(echo "$issue_json" | jq -r '.title // "untitled"' 2>/dev/null || echo "untitled")
311
+ body=$(echo "$issue_json" | jq -r '.body // ""' 2>/dev/null || echo "")
312
+ labels=$(echo "$issue_json" | jq -r '(.labels // []) | join(", ")' 2>/dev/null || echo "")
313
+
314
+ local prompt
315
+ prompt="Analyze this GitHub issue for a software project and return ONLY a JSON object (no markdown, no explanation).
316
+
317
+ Issue title: ${title}
318
+ Issue body: ${body}
319
+ Labels: ${labels}
320
+
321
+ Return JSON with exactly these fields:
322
+ {
323
+ \"complexity\": <number 1-10>,
324
+ \"risk_level\": \"<low|medium|high|critical>\",
325
+ \"success_probability\": <number 0-100>,
326
+ \"recommended_template\": \"<fast|standard|full|hotfix|autonomous|enterprise|cost-aware>\",
327
+ \"key_risks\": [\"risk1\", \"risk2\"],
328
+ \"implementation_hints\": [\"hint1\", \"hint2\"]
329
+ }"
330
+
331
+ local cache_key
332
+ cache_key="analyze_issue_$(compute_md5 --string "${title}${body}")"
333
+
334
+ local result
335
+ if result=$(_intelligence_call_claude "$prompt" "$cache_key"); then
336
+ # Validate schema: ensure required fields exist
337
+ local valid
338
+ valid=$(echo "$result" | jq 'has("complexity") and has("risk_level") and has("success_probability") and has("recommended_template")' 2>/dev/null || echo "false")
339
+
340
+ if [[ "$valid" != "true" ]]; then
341
+ warn "Intelligence response missing required fields, using fallback"
342
+ result='{"complexity":5,"risk_level":"medium","success_probability":50,"recommended_template":"standard","key_risks":["analysis_incomplete"],"implementation_hints":[]}'
343
+ fi
344
+
345
+ emit_event "intelligence.analysis" \
346
+ "complexity=$(echo "$result" | jq -r '.complexity')" \
347
+ "risk_level=$(echo "$result" | jq -r '.risk_level')" \
348
+ "success_probability=$(echo "$result" | jq -r '.success_probability')" \
349
+ "recommended_template=$(echo "$result" | jq -r '.recommended_template')"
350
+
351
+ # Enrich with GitHub data if available
352
+ result=$(intelligence_github_enrich "$result")
353
+
354
+ echo "$result"
355
+ return 0
356
+ else
357
+ echo '{"error":"analysis_failed","complexity":5,"risk_level":"medium","success_probability":50,"recommended_template":"standard","key_risks":["analysis_failed"],"implementation_hints":[]}'
358
+ return 0
359
+ fi
360
+ }
361
+
362
+ # ─── Compose Pipeline ───────────────────────────────────────────────────────
363
+
364
+ intelligence_compose_pipeline() {
365
+ local issue_analysis="${1:-"{}"}"
366
+ local repo_context="${2:-"{}"}"
367
+ local budget="${3:-0}"
368
+
369
+ if ! _intelligence_enabled; then
370
+ echo '{"error":"intelligence_disabled","stages":[]}'
371
+ return 0
372
+ fi
373
+
374
+ local complexity risk_level
375
+ complexity=$(echo "$issue_analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
376
+ risk_level=$(echo "$issue_analysis" | jq -r '.risk_level // "medium"' 2>/dev/null || echo "medium")
377
+
378
+ local prompt
379
+ prompt="You are a CI/CD pipeline composer. Given the issue analysis and constraints, compose an optimal pipeline.
380
+
381
+ Issue complexity: ${complexity}/10
382
+ Risk level: ${risk_level}
383
+ Budget remaining: \$${budget} USD
384
+ Repository context: ${repo_context}
385
+
386
+ Available stages: intake, plan, design, build, test, review, compound_quality, pr, merge, deploy, validate, monitor
387
+ Available models: opus (most capable, expensive), sonnet (balanced), haiku (fast, cheap)
388
+
389
+ Return ONLY a JSON object (no markdown):
390
+ {
391
+ \"stages\": [
392
+ {\"id\": \"stage_name\", \"enabled\": true, \"model\": \"sonnet\", \"config\": {}}
393
+ ],
394
+ \"rationale\": \"brief explanation\"
395
+ }"
396
+
397
+ local cache_key
398
+ cache_key="compose_pipeline_$(compute_md5 --string "${issue_analysis}${budget}")"
399
+
400
+ local result
401
+ if result=$(_intelligence_call_claude "$prompt" "$cache_key"); then
402
+ local has_stages
403
+ has_stages=$(echo "$result" | jq 'has("stages") and (.stages | type == "array")' 2>/dev/null || echo "false")
404
+
405
+ if [[ "$has_stages" != "true" ]]; then
406
+ warn "Pipeline composition missing stages array, using fallback"
407
+ result='{"stages":[{"id":"intake","enabled":true,"model":"sonnet","config":{}},{"id":"build","enabled":true,"model":"sonnet","config":{}},{"id":"test","enabled":true,"model":"sonnet","config":{}},{"id":"pr","enabled":true,"model":"sonnet","config":{}}],"rationale":"fallback pipeline"}'
408
+ fi
409
+
410
+ emit_event "intelligence.compose" \
411
+ "stage_count=$(echo "$result" | jq '.stages | length')" \
412
+ "complexity=$complexity"
413
+
414
+ echo "$result"
415
+ return 0
416
+ else
417
+ echo '{"error":"composition_failed","stages":[]}'
418
+ return 0
419
+ fi
420
+ }
421
+
422
+ # ─── Predict Cost ────────────────────────────────────────────────────────────
423
+
424
+ intelligence_predict_cost() {
425
+ local issue_analysis="${1:-"{}"}"
426
+ local historical_data="${2:-"{}"}"
427
+
428
+ if ! _intelligence_enabled; then
429
+ echo '{"error":"intelligence_disabled","estimated_cost_usd":0,"estimated_iterations":0,"likely_failure_stage":"unknown"}'
430
+ return 0
431
+ fi
432
+
433
+ local complexity
434
+ complexity=$(echo "$issue_analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
435
+
436
+ local prompt
437
+ prompt="Estimate the cost and effort for a CI pipeline run. Return ONLY JSON (no markdown).
438
+
439
+ Issue analysis: ${issue_analysis}
440
+ Historical data from past runs: ${historical_data}
441
+
442
+ Based on similar complexity (${complexity}/10) issues, estimate:
443
+ {
444
+ \"estimated_cost_usd\": <number>,
445
+ \"estimated_iterations\": <number of build-test cycles>,
446
+ \"estimated_tokens\": <total token count>,
447
+ \"likely_failure_stage\": \"<stage name or 'none'>\",
448
+ \"confidence\": <0-100>
449
+ }"
450
+
451
+ local cache_key
452
+ cache_key="predict_cost_$(compute_md5 --string "${issue_analysis}${historical_data}")"
453
+
454
+ local result
455
+ if result=$(_intelligence_call_claude "$prompt" "$cache_key"); then
456
+ local valid
457
+ valid=$(echo "$result" | jq 'has("estimated_cost_usd") and has("estimated_iterations")' 2>/dev/null || echo "false")
458
+
459
+ if [[ "$valid" != "true" ]]; then
460
+ warn "Cost prediction missing required fields, using fallback"
461
+ result='{"estimated_cost_usd":5.0,"estimated_iterations":3,"estimated_tokens":500000,"likely_failure_stage":"test","confidence":30}'
462
+ fi
463
+
464
+ emit_event "intelligence.prediction" \
465
+ "estimated_cost=$(echo "$result" | jq -r '.estimated_cost_usd')" \
466
+ "estimated_iterations=$(echo "$result" | jq -r '.estimated_iterations')" \
467
+ "complexity=$complexity"
468
+
469
+ echo "$result"
470
+ return 0
471
+ else
472
+ echo '{"error":"prediction_failed","estimated_cost_usd":0,"estimated_iterations":0,"likely_failure_stage":"unknown"}'
473
+ return 0
474
+ fi
475
+ }
476
+
477
+ # ─── Synthesize Findings ─────────────────────────────────────────────────────
478
+
479
+ intelligence_synthesize_findings() {
480
+ local findings_json="${1:-"[]"}"
481
+
482
+ if ! _intelligence_enabled; then
483
+ echo '{"error":"intelligence_disabled","priority_fixes":[],"root_causes":[],"recommended_approach":""}'
484
+ return 0
485
+ fi
486
+
487
+ local prompt
488
+ prompt="Synthesize multiple signal sources (patrol findings, test results, review comments) into a unified fix strategy. Return ONLY JSON (no markdown).
489
+
490
+ Findings: ${findings_json}
491
+
492
+ Return:
493
+ {
494
+ \"priority_fixes\": [\"fix1\", \"fix2\"],
495
+ \"root_causes\": [\"cause1\", \"cause2\"],
496
+ \"recommended_approach\": \"description of unified strategy\",
497
+ \"estimated_effort\": \"<low|medium|high>\"
498
+ }"
499
+
500
+ local cache_key
501
+ cache_key="synthesize_$(compute_md5 --string "$findings_json")"
502
+
503
+ local result
504
+ if result=$(_intelligence_call_claude "$prompt" "$cache_key"); then
505
+ local valid
506
+ valid=$(echo "$result" | jq 'has("priority_fixes") and has("root_causes")' 2>/dev/null || echo "false")
507
+
508
+ if [[ "$valid" != "true" ]]; then
509
+ result='{"priority_fixes":[],"root_causes":["analysis_incomplete"],"recommended_approach":"manual review needed","estimated_effort":"medium"}'
510
+ fi
511
+
512
+ emit_event "intelligence.synthesize" \
513
+ "fix_count=$(echo "$result" | jq '.priority_fixes | length')" \
514
+ "cause_count=$(echo "$result" | jq '.root_causes | length')"
515
+
516
+ echo "$result"
517
+ return 0
518
+ else
519
+ echo '{"error":"synthesis_failed","priority_fixes":[],"root_causes":[],"recommended_approach":""}'
520
+ return 0
521
+ fi
522
+ }
523
+
524
+ # ─── Search Memory ───────────────────────────────────────────────────────────
525
+
526
+ intelligence_search_memory() {
527
+ local context="${1:-""}"
528
+ local memory_dir="${2:-"${HOME}/.shipwright/memory"}"
529
+ local top_n="${3:-5}"
530
+
531
+ if ! _intelligence_enabled; then
532
+ echo '{"error":"intelligence_disabled","results":[]}'
533
+ return 0
534
+ fi
535
+
536
+ # Gather memory file contents
537
+ local memory_content=""
538
+ if [[ -d "$memory_dir" ]]; then
539
+ local file_list=""
540
+ local count=0
541
+ while IFS= read -r f; do
542
+ [[ -z "$f" ]] && continue
543
+ local fname
544
+ fname=$(basename "$f")
545
+ local content
546
+ content=$(head -100 "$f" 2>/dev/null || true)
547
+ if [[ -n "$content" ]]; then
548
+ file_list="${file_list}
549
+ --- ${fname} ---
550
+ ${content}
551
+ "
552
+ count=$((count + 1))
553
+ fi
554
+ done < <(find "$memory_dir" -name "*.json" -o -name "*.md" 2>/dev/null | head -20 || true)
555
+
556
+ if [[ "$count" -eq 0 ]]; then
557
+ echo '{"results":[],"message":"no memory files found"}'
558
+ return 0
559
+ fi
560
+ memory_content="$file_list"
561
+ else
562
+ echo '{"results":[],"message":"memory directory not found"}'
563
+ return 0
564
+ fi
565
+
566
+ local prompt
567
+ prompt="Rank the following memory entries by relevance to this context. Return ONLY JSON (no markdown).
568
+
569
+ Context: ${context}
570
+
571
+ Memory entries:
572
+ ${memory_content}
573
+
574
+ Return the top ${top_n} most relevant entries:
575
+ {
576
+ \"results\": [
577
+ {\"file\": \"filename\", \"relevance\": <0-100>, \"summary\": \"why this is relevant\"}
578
+ ]
579
+ }"
580
+
581
+ local cache_key
582
+ cache_key="search_memory_$(compute_md5 --string "${context}${memory_dir}")"
583
+
584
+ local result
585
+ if result=$(_intelligence_call_claude "$prompt" "$cache_key" 1800); then
586
+ local valid
587
+ valid=$(echo "$result" | jq 'has("results") and (.results | type == "array")' 2>/dev/null || echo "false")
588
+
589
+ if [[ "$valid" != "true" ]]; then
590
+ result='{"results":[]}'
591
+ fi
592
+
593
+ echo "$result"
594
+ return 0
595
+ else
596
+ echo '{"error":"memory_search_failed","results":[]}'
597
+ return 0
598
+ fi
599
+ }
600
+
601
+ # ─── Estimate Iterations ─────────────────────────────────────────────────────
602
+
603
+ intelligence_estimate_iterations() {
604
+ local issue_analysis="${1:-"{}"}"
605
+ local historical_data="${2:-""}"
606
+
607
+ local iteration_model="${HOME}/.shipwright/optimization/iteration-model.json"
608
+
609
+ # Extract complexity from issue analysis (numeric 1-10)
610
+ local complexity
611
+ complexity=$(echo "$issue_analysis" | jq -r '.complexity // 5' 2>/dev/null || echo "5")
612
+
613
+ # Map numeric complexity to bucket: 1-3=low, 4-6=medium, 7-10=high
614
+ local bucket
615
+ if [[ "$complexity" -le 3 ]]; then
616
+ bucket="low"
617
+ elif [[ "$complexity" -le 6 ]]; then
618
+ bucket="medium"
619
+ else
620
+ bucket="high"
621
+ fi
622
+
623
+ # Strategy 1: Intelligence-enabled — call Claude for fine-grained estimation
624
+ if _intelligence_enabled; then
625
+ local prompt
626
+ prompt="Estimate the number of build-test iterations needed for this issue. Return ONLY a JSON object.
627
+
628
+ Issue analysis: ${issue_analysis}
629
+ Historical iteration data: ${historical_data:-none}
630
+
631
+ Consider:
632
+ - Complexity: ${complexity}/10 (${bucket})
633
+ - Higher complexity usually needs more iterations
634
+ - Well-understood patterns need fewer iterations
635
+
636
+ Return JSON: {\"estimated_iterations\": <integer 1-50>}"
637
+
638
+ local cache_key
639
+ cache_key="estimate_iterations_$(compute_md5 --string "${issue_analysis}")"
640
+
641
+ local result
642
+ if result=$(_intelligence_call_claude "$prompt" "$cache_key" 1800); then
643
+ # Extract number from result
644
+ local estimate
645
+ estimate=$(echo "$result" | jq -r '.estimated_iterations // .iterations // .estimate // empty' 2>/dev/null || true)
646
+ if [[ -z "$estimate" ]]; then
647
+ # Try raw number from response
648
+ estimate=$(echo "$result" | jq -r '.raw_response // empty' 2>/dev/null | tr -dc '0-9' | head -c 3 || true)
649
+ fi
650
+ if [[ -n "$estimate" ]] && [[ "$estimate" =~ ^[0-9]+$ ]] && \
651
+ [[ "$estimate" -ge 1 ]] && [[ "$estimate" -le 50 ]]; then
652
+ emit_event "intelligence.estimate_iterations" \
653
+ "estimate=$estimate" \
654
+ "complexity=$complexity" \
655
+ "source=claude"
656
+ echo "$estimate"
657
+ return 0
658
+ fi
659
+ fi
660
+ # Fall through to historical data if Claude call fails
661
+ fi
662
+
663
+ # Strategy 2: Use historical averages from iteration model
664
+ if [[ -f "$iteration_model" ]]; then
665
+ local mean samples
666
+ mean=$(jq -r --arg b "$bucket" '.[$b].mean // 0' "$iteration_model" 2>/dev/null || echo "0")
667
+ samples=$(jq -r --arg b "$bucket" '.[$b].samples // 0' "$iteration_model" 2>/dev/null || echo "0")
668
+
669
+ if [[ "$samples" -gt 0 ]] && [[ "$mean" != "0" ]]; then
670
+ # Round to nearest integer, clamp 1-50
671
+ local estimate
672
+ estimate=$(awk "BEGIN{v=int($mean + 0.5); if(v<1)v=1; if(v>50)v=50; print v}")
673
+ emit_event "intelligence.estimate_iterations" \
674
+ "estimate=$estimate" \
675
+ "complexity=$complexity" \
676
+ "source=historical" \
677
+ "samples=$samples"
678
+ echo "$estimate"
679
+ return 0
680
+ fi
681
+ fi
682
+
683
+ # Strategy 3: Heuristic fallback based on numeric complexity
684
+ local estimate
685
+ if [[ "$complexity" -le 2 ]]; then
686
+ estimate=5
687
+ elif [[ "$complexity" -le 4 ]]; then
688
+ estimate=10
689
+ elif [[ "$complexity" -le 6 ]]; then
690
+ estimate=15
691
+ elif [[ "$complexity" -le 8 ]]; then
692
+ estimate=25
693
+ else
694
+ estimate=35
695
+ fi
696
+
697
+ emit_event "intelligence.estimate_iterations" \
698
+ "estimate=$estimate" \
699
+ "complexity=$complexity" \
700
+ "source=heuristic"
701
+
702
+ echo "$estimate"
703
+ return 0
704
+ }
705
+
706
+ # ─── Recommend Model ─────────────────────────────────────────────────────────
707
+
708
+ intelligence_recommend_model() {
709
+ local stage="${1:-"build"}"
710
+ local complexity="${2:-5}"
711
+ local budget_remaining="${3:-100}"
712
+
713
+ local model="sonnet"
714
+ local reason="default balanced choice"
715
+
716
+ # Strategy 1: Check historical model routing data
717
+ local routing_file="${HOME}/.shipwright/optimization/model-routing.json"
718
+ if [[ -f "$routing_file" ]]; then
719
+ local stage_data
720
+ stage_data=$(jq -r --arg s "$stage" '.[$s] // empty' "$routing_file" 2>/dev/null || true)
721
+
722
+ if [[ -n "$stage_data" && "$stage_data" != "null" ]]; then
723
+ local recommended sonnet_rate sonnet_samples opus_rate opus_samples
724
+ recommended=$(echo "$stage_data" | jq -r '.recommended // empty' 2>/dev/null || true)
725
+ sonnet_rate=$(echo "$stage_data" | jq -r '.sonnet_rate // 0' 2>/dev/null || echo "0")
726
+ sonnet_samples=$(echo "$stage_data" | jq -r '.sonnet_samples // 0' 2>/dev/null || echo "0")
727
+ opus_rate=$(echo "$stage_data" | jq -r '.opus_rate // 0' 2>/dev/null || echo "0")
728
+ opus_samples=$(echo "$stage_data" | jq -r '.opus_samples // 0' 2>/dev/null || echo "0")
729
+
730
+ # Load adaptive routing thresholds from config or use defaults
731
+ local routing_config="${HOME}/.shipwright/optimization/model-routing-thresholds.json"
732
+ local min_samples=3
733
+ local success_threshold=90
734
+ local complexity_upgrade=8
735
+
736
+ if [[ -f "$routing_config" ]]; then
737
+ local cfg_min cfg_success cfg_complexity
738
+ cfg_min=$(jq -r '.min_samples // empty' "$routing_config" 2>/dev/null || true)
739
+ cfg_success=$(jq -r '.success_threshold // empty' "$routing_config" 2>/dev/null || true)
740
+ cfg_complexity=$(jq -r '.complexity_upgrade // empty' "$routing_config" 2>/dev/null || true)
741
+ [[ -n "$cfg_min" && "$cfg_min" != "null" ]] && min_samples="$cfg_min"
742
+ [[ -n "$cfg_success" && "$cfg_success" != "null" ]] && success_threshold="$cfg_success"
743
+ [[ -n "$cfg_complexity" && "$cfg_complexity" != "null" ]] && complexity_upgrade="$cfg_complexity"
744
+ fi
745
+
746
+ # SPRT-inspired evidence check: if enough data, use log-likelihood ratio
747
+ local use_sonnet=false
748
+ local total_samples=$((sonnet_samples + opus_samples))
749
+
750
+ if [[ "$total_samples" -ge 10 ]]; then
751
+ # With 10+ data points, use evidence ratio
752
+ # Log-likelihood ratio: ln(P(data|sonnet_good) / P(data|sonnet_bad))
753
+ # Simplified: if sonnet_rate / opus_rate > 0.95, switch to sonnet
754
+ local rate_ratio
755
+ if awk -v or="$opus_rate" 'BEGIN { exit !(or > 0) }' 2>/dev/null; then
756
+ rate_ratio=$(awk -v sr="$sonnet_rate" -v or="$opus_rate" 'BEGIN { printf "%.3f", sr / or }')
757
+ else
758
+ rate_ratio="1.0"
759
+ fi
760
+
761
+ if [[ "$sonnet_samples" -ge "$min_samples" ]] && \
762
+ awk -v sr="$sonnet_rate" -v st="$success_threshold" 'BEGIN { exit !(sr >= st) }' 2>/dev/null && \
763
+ awk -v rr="$rate_ratio" 'BEGIN { exit !(rr >= 0.95) }' 2>/dev/null; then
764
+ use_sonnet=true
765
+ fi
766
+ elif [[ "$sonnet_samples" -ge "$min_samples" ]] && \
767
+ awk -v sr="$sonnet_rate" -v st="$success_threshold" 'BEGIN { exit !(sr >= st) }' 2>/dev/null; then
768
+ # Fewer data points — fall back to simple threshold check
769
+ use_sonnet=true
770
+ fi
771
+
772
+ if [[ "$use_sonnet" == "true" ]]; then
773
+ if [[ "$budget_remaining" != "" ]] && [[ "$(echo "$budget_remaining < 5" | bc 2>/dev/null || echo "0")" == "1" ]]; then
774
+ model="haiku"
775
+ reason="sonnet viable (${sonnet_rate}% success) but budget constrained"
776
+ else
777
+ model="sonnet"
778
+ reason="evidence-based: ${sonnet_rate}% success on ${stage} (${sonnet_samples} samples, SPRT)"
779
+ fi
780
+ elif [[ -n "$recommended" && "$recommended" != "null" ]]; then
781
+ model="$recommended"
782
+ reason="historical routing recommendation for ${stage}"
783
+ fi
784
+
785
+ # Override: high complexity + critical stage → upgrade to opus if budget allows
786
+ if [[ "$complexity" -ge "$complexity_upgrade" ]]; then
787
+ case "$stage" in
788
+ plan|design|review|compound_quality)
789
+ if [[ "$model" != "opus" ]]; then
790
+ if [[ "$budget_remaining" == "" ]] || [[ "$(echo "$budget_remaining >= 10" | bc 2>/dev/null || echo "1")" == "1" ]]; then
791
+ model="opus"
792
+ reason="high complexity (${complexity}/10) overrides historical for critical stage (${stage})"
793
+ fi
794
+ fi
795
+ ;;
796
+ esac
797
+ fi
798
+
799
+ local result
800
+ result=$(jq -n --arg model "$model" --arg reason "$reason" --arg stage "$stage" --argjson complexity "$complexity" \
801
+ '{"model": $model, "reason": $reason, "stage": $stage, "complexity": $complexity, "source": "historical"}')
802
+
803
+ emit_event "intelligence.model" \
804
+ "stage=$stage" \
805
+ "complexity=$complexity" \
806
+ "model=$model" \
807
+ "source=historical"
808
+
809
+ echo "$result"
810
+ return 0
811
+ fi
812
+ fi
813
+
814
+ # Strategy 2: Heuristic fallback (no historical data available)
815
+ # Budget-constrained: use haiku
816
+ if [[ "$budget_remaining" != "" ]] && [[ "$(echo "$budget_remaining < 5" | bc 2>/dev/null || echo "0")" == "1" ]]; then
817
+ model="haiku"
818
+ reason="budget constrained (< \$5 remaining)"
819
+ # High complexity + critical stages: use opus
820
+ elif [[ "$complexity" -ge 8 ]]; then
821
+ case "$stage" in
822
+ plan|design|review|compound_quality)
823
+ model="opus"
824
+ reason="high complexity (${complexity}/10) + critical stage (${stage})"
825
+ ;;
826
+ build|test)
827
+ model="sonnet"
828
+ reason="high complexity but execution stage — sonnet is sufficient"
829
+ ;;
830
+ *)
831
+ model="sonnet"
832
+ reason="high complexity, non-critical stage"
833
+ ;;
834
+ esac
835
+ # Low complexity: use haiku for simple stages
836
+ elif [[ "$complexity" -le 3 ]]; then
837
+ case "$stage" in
838
+ intake|pr|merge)
839
+ model="haiku"
840
+ reason="low complexity (${complexity}/10), simple stage (${stage})"
841
+ ;;
842
+ build|test)
843
+ model="sonnet"
844
+ reason="low complexity but code execution stage"
845
+ ;;
846
+ *)
847
+ model="haiku"
848
+ reason="low complexity, standard stage"
849
+ ;;
850
+ esac
851
+ fi
852
+
853
+ local result
854
+ result=$(jq -n --arg model "$model" --arg reason "$reason" --arg stage "$stage" --argjson complexity "$complexity" \
855
+ '{"model": $model, "reason": $reason, "stage": $stage, "complexity": $complexity, "source": "heuristic"}')
856
+
857
+ emit_event "intelligence.model" \
858
+ "stage=$stage" \
859
+ "complexity=$complexity" \
860
+ "model=$model" \
861
+ "source=heuristic"
862
+
863
+ echo "$result"
864
+ return 0
865
+ }
866
+
867
+ # ─── Prediction Validation ─────────────────────────────────────────────────
868
+
869
+ # intelligence_validate_prediction <issue_id> <predicted_complexity> <actual_iterations> <actual_success>
870
+ # Compares predicted complexity to actual outcome for feedback learning.
871
+ intelligence_validate_prediction() {
872
+ local issue_id="${1:-}"
873
+ local predicted_complexity="${2:-0}"
874
+ local actual_iterations="${3:-0}"
875
+ local actual_success="${4:-false}"
876
+
877
+ if [[ -z "$issue_id" ]]; then
878
+ error "Usage: intelligence_validate_prediction <issue_id> <predicted> <actual_iterations> <actual_success>"
879
+ return 1
880
+ fi
881
+
882
+ # Infer actual complexity from iterations (heuristic: map iterations to 1-10 scale)
883
+ local actual_complexity
884
+ if [[ "$actual_iterations" -le 5 ]]; then
885
+ actual_complexity=2
886
+ elif [[ "$actual_iterations" -le 10 ]]; then
887
+ actual_complexity=4
888
+ elif [[ "$actual_iterations" -le 15 ]]; then
889
+ actual_complexity=6
890
+ elif [[ "$actual_iterations" -le 25 ]]; then
891
+ actual_complexity=8
892
+ else
893
+ actual_complexity=10
894
+ fi
895
+
896
+ # Calculate prediction error (signed delta)
897
+ local delta=$(( predicted_complexity - actual_complexity ))
898
+ local abs_delta="${delta#-}"
899
+
900
+ # Emit prediction error event
901
+ emit_event "intelligence.prediction_error" \
902
+ "issue_id=$issue_id" \
903
+ "predicted=$predicted_complexity" \
904
+ "actual=$actual_complexity" \
905
+ "delta=$delta" \
906
+ "actual_iterations=$actual_iterations" \
907
+ "actual_success=$actual_success"
908
+
909
+ # Warn if prediction was significantly off
910
+ if [[ "$abs_delta" -gt 3 ]]; then
911
+ warn "Prediction error for issue #${issue_id}: predicted complexity=${predicted_complexity}, actual~=${actual_complexity} (delta=${delta})"
912
+ fi
913
+
914
+ # Update cache entry with actual outcome for future learning
915
+ _intelligence_cache_init
916
+
917
+ local validation_file="${HOME}/.shipwright/optimization/prediction-validation.jsonl"
918
+ mkdir -p "${HOME}/.shipwright/optimization"
919
+
920
+ local record
921
+ record=$(jq -c -n \
922
+ --arg ts "$(now_iso)" \
923
+ --arg issue "$issue_id" \
924
+ --argjson predicted "$predicted_complexity" \
925
+ --argjson actual "$actual_complexity" \
926
+ --argjson delta "$delta" \
927
+ --argjson iterations "$actual_iterations" \
928
+ --arg success "$actual_success" \
929
+ '{
930
+ ts: $ts,
931
+ issue: $issue,
932
+ predicted_complexity: $predicted,
933
+ actual_complexity: $actual,
934
+ delta: $delta,
935
+ actual_iterations: $iterations,
936
+ actual_success: $success
937
+ }')
938
+
939
+ echo "$record" >> "$validation_file"
940
+
941
+ # Output summary
942
+ local accuracy_label="good"
943
+ if [[ "$abs_delta" -gt 3 ]]; then
944
+ accuracy_label="poor"
945
+ elif [[ "$abs_delta" -gt 1 ]]; then
946
+ accuracy_label="fair"
947
+ fi
948
+
949
+ jq -n \
950
+ --arg issue "$issue_id" \
951
+ --argjson predicted "$predicted_complexity" \
952
+ --argjson actual "$actual_complexity" \
953
+ --argjson delta "$delta" \
954
+ --arg accuracy "$accuracy_label" \
955
+ --arg success "$actual_success" \
956
+ '{
957
+ issue: $issue,
958
+ predicted_complexity: $predicted,
959
+ actual_complexity: $actual,
960
+ delta: $delta,
961
+ accuracy: $accuracy,
962
+ actual_success: $success
963
+ }'
964
+ }
965
+
966
+ # ─── GitHub Enrichment ─────────────────────────────────────────────────────
967
+
968
+ intelligence_github_enrich() {
969
+ local analysis_json="$1"
970
+
971
+ # Skip if GraphQL not available
972
+ type _gh_detect_repo &>/dev/null 2>&1 || { echo "$analysis_json"; return 0; }
973
+ _gh_detect_repo 2>/dev/null || { echo "$analysis_json"; return 0; }
974
+
975
+ local owner="${GH_OWNER:-}" repo="${GH_REPO:-}"
976
+ [[ -z "$owner" || -z "$repo" ]] && { echo "$analysis_json"; return 0; }
977
+
978
+ # Get repo context
979
+ local repo_context="{}"
980
+ if type gh_repo_context &>/dev/null 2>&1; then
981
+ repo_context=$(gh_repo_context "$owner" "$repo" 2>/dev/null || echo "{}")
982
+ fi
983
+
984
+ # Get security alerts count
985
+ local security_count=0
986
+ if type gh_security_alerts &>/dev/null 2>&1; then
987
+ local alerts
988
+ alerts=$(gh_security_alerts "$owner" "$repo" 2>/dev/null || echo "[]")
989
+ security_count=$(echo "$alerts" | jq 'length' 2>/dev/null || echo "0")
990
+ fi
991
+
992
+ # Get dependabot alerts count
993
+ local dependabot_count=0
994
+ if type gh_dependabot_alerts &>/dev/null 2>&1; then
995
+ local deps
996
+ deps=$(gh_dependabot_alerts "$owner" "$repo" 2>/dev/null || echo "[]")
997
+ dependabot_count=$(echo "$deps" | jq 'length' 2>/dev/null || echo "0")
998
+ fi
999
+
1000
+ # Merge GitHub context into analysis
1001
+ echo "$analysis_json" | jq --arg ctx "$repo_context" \
1002
+ --argjson sec "${security_count:-0}" \
1003
+ --argjson dep "${dependabot_count:-0}" \
1004
+ '. + {
1005
+ github_context: ($ctx | fromjson? // {}),
1006
+ security_alert_count: $sec,
1007
+ dependabot_alert_count: $dep
1008
+ }' 2>/dev/null || echo "$analysis_json"
1009
+ }
1010
+
1011
+ intelligence_file_risk_score() {
1012
+ local file_path="$1"
1013
+ local risk_score=0
1014
+
1015
+ type _gh_detect_repo &>/dev/null 2>&1 || { echo "0"; return 0; }
1016
+ _gh_detect_repo 2>/dev/null || { echo "0"; return 0; }
1017
+
1018
+ local owner="${GH_OWNER:-}" repo="${GH_REPO:-}"
1019
+ [[ -z "$owner" || -z "$repo" ]] && { echo "0"; return 0; }
1020
+
1021
+ # Factor 1: File churn (high change frequency = higher risk)
1022
+ local changes=0
1023
+ if type gh_file_change_frequency &>/dev/null 2>&1; then
1024
+ changes=$(gh_file_change_frequency "$owner" "$repo" "$file_path" 30 2>/dev/null || echo "0")
1025
+ fi
1026
+ if [[ "${changes:-0}" -gt 20 ]]; then
1027
+ risk_score=$((risk_score + 30))
1028
+ elif [[ "${changes:-0}" -gt 10 ]]; then
1029
+ risk_score=$((risk_score + 15))
1030
+ elif [[ "${changes:-0}" -gt 5 ]]; then
1031
+ risk_score=$((risk_score + 5))
1032
+ fi
1033
+
1034
+ # Factor 2: Security alerts on this file
1035
+ if type gh_security_alerts &>/dev/null 2>&1; then
1036
+ local file_alerts
1037
+ file_alerts=$(gh_security_alerts "$owner" "$repo" 2>/dev/null | \
1038
+ jq --arg path "$file_path" '[.[] | select(.most_recent_instance.location.path == $path)] | length' 2>/dev/null || echo "0")
1039
+ [[ "${file_alerts:-0}" -gt 0 ]] && risk_score=$((risk_score + 40))
1040
+ fi
1041
+
1042
+ # Factor 3: Many contributors = higher coordination risk
1043
+ if type gh_blame_data &>/dev/null 2>&1; then
1044
+ local author_count
1045
+ author_count=$(gh_blame_data "$owner" "$repo" "$file_path" 2>/dev/null | jq 'length' 2>/dev/null || echo "0")
1046
+ [[ "${author_count:-0}" -gt 5 ]] && risk_score=$((risk_score + 10))
1047
+ fi
1048
+
1049
+ # Cap at 100
1050
+ [[ "$risk_score" -gt 100 ]] && risk_score=100
1051
+ echo "$risk_score"
1052
+ }
1053
+
1054
+ intelligence_contributor_expertise() {
1055
+ local file_path="$1"
1056
+
1057
+ type _gh_detect_repo &>/dev/null 2>&1 || { echo "[]"; return 0; }
1058
+ _gh_detect_repo 2>/dev/null || { echo "[]"; return 0; }
1059
+
1060
+ local owner="${GH_OWNER:-}" repo="${GH_REPO:-}"
1061
+ [[ -z "$owner" || -z "$repo" ]] && { echo "[]"; return 0; }
1062
+
1063
+ if type gh_blame_data &>/dev/null 2>&1; then
1064
+ gh_blame_data "$owner" "$repo" "$file_path" 2>/dev/null || echo "[]"
1065
+ else
1066
+ echo "[]"
1067
+ fi
1068
+ }
1069
+
1070
+ # ═══════════════════════════════════════════════════════════════════════════════
1071
+ # CLI INTERFACE
1072
+ # ═══════════════════════════════════════════════════════════════════════════════
1073
+
1074
+ show_help() {
1075
+ echo ""
1076
+ echo -e "${CYAN}${BOLD}shipwright intelligence${RESET} — AI-Powered Analysis & Decision Engine"
1077
+ echo ""
1078
+ echo -e "${BOLD}USAGE${RESET}"
1079
+ echo -e " shipwright intelligence <command> [options]"
1080
+ echo ""
1081
+ echo -e "${BOLD}COMMANDS${RESET}"
1082
+ echo -e " ${CYAN}analyze${RESET} <issue_json> Analyze an issue semantically"
1083
+ echo -e " ${CYAN}compose${RESET} <analysis> [ctx] [$$] Compose optimal pipeline"
1084
+ echo -e " ${CYAN}predict${RESET} <analysis> [history] Predict cost and effort"
1085
+ echo -e " ${CYAN}synthesize${RESET} <findings_json> Synthesize findings into strategy"
1086
+ echo -e " ${CYAN}search-memory${RESET} <context> [dir] Search memory by relevance"
1087
+ echo -e " ${CYAN}estimate-iterations${RESET} <analysis> Estimate build iterations"
1088
+ echo -e " ${CYAN}recommend-model${RESET} <stage> [cplx] Recommend model for stage"
1089
+ echo -e " ${CYAN}cache-stats${RESET} Show cache statistics"
1090
+ echo -e " ${CYAN}validate-prediction${RESET} <id> <pred> <iters> <success> Validate prediction accuracy"
1091
+ echo -e " ${CYAN}cache-clear${RESET} Clear intelligence cache"
1092
+ echo -e " ${CYAN}help${RESET} Show this help"
1093
+ echo ""
1094
+ echo -e "${BOLD}CONFIGURATION${RESET}"
1095
+ echo -e " Enable in ${DIM}.claude/daemon-config.json${RESET}:"
1096
+ echo -e " ${DIM}{\"intelligence\": {\"enabled\": true}}${RESET}"
1097
+ echo ""
1098
+ echo -e "${DIM}Version ${VERSION}${RESET}"
1099
+ }
1100
+
1101
+ cmd_cache_stats() {
1102
+ _intelligence_cache_init
1103
+
1104
+ local entry_count
1105
+ entry_count=$(jq '.entries | length' "$INTELLIGENCE_CACHE" 2>/dev/null || echo "0")
1106
+ local cache_size
1107
+ cache_size=$(wc -c < "$INTELLIGENCE_CACHE" 2>/dev/null | tr -d ' ' || echo "0")
1108
+
1109
+ echo ""
1110
+ echo -e "${BOLD}Intelligence Cache${RESET}"
1111
+ echo -e " Entries: ${CYAN}${entry_count}${RESET}"
1112
+ echo -e " Size: ${DIM}${cache_size} bytes${RESET}"
1113
+ echo -e " Location: ${DIM}${INTELLIGENCE_CACHE}${RESET}"
1114
+
1115
+ if [[ "$entry_count" -gt 0 ]]; then
1116
+ local now
1117
+ now=$(now_epoch)
1118
+ local expired=0
1119
+ local active=0
1120
+ while IFS= read -r key; do
1121
+ [[ -z "$key" ]] && continue
1122
+ local ts ttl
1123
+ ts=$(jq -r --arg k "$key" '.entries[$k].timestamp // 0' "$INTELLIGENCE_CACHE" 2>/dev/null || echo "0")
1124
+ ttl=$(jq -r --arg k "$key" '.entries[$k].ttl // 3600' "$INTELLIGENCE_CACHE" 2>/dev/null || echo "3600")
1125
+ local age=$(( now - ts ))
1126
+ if [[ "$age" -gt "$ttl" ]]; then
1127
+ expired=$((expired + 1))
1128
+ else
1129
+ active=$((active + 1))
1130
+ fi
1131
+ done < <(jq -r '.entries | keys[]' "$INTELLIGENCE_CACHE" 2>/dev/null || true)
1132
+ echo -e " Active: ${GREEN}${active}${RESET}"
1133
+ echo -e " Expired: ${DIM}${expired}${RESET}"
1134
+ fi
1135
+ echo ""
1136
+ }
1137
+
1138
+ cmd_cache_clear() {
1139
+ if [[ -f "$INTELLIGENCE_CACHE" ]]; then
1140
+ echo '{"entries":{}}' > "$INTELLIGENCE_CACHE"
1141
+ success "Intelligence cache cleared"
1142
+ else
1143
+ info "No cache file found"
1144
+ fi
1145
+ }
1146
+
1147
+ main() {
1148
+ local cmd="${1:-help}"
1149
+ shift 2>/dev/null || true
1150
+
1151
+ case "$cmd" in
1152
+ analyze)
1153
+ intelligence_analyze_issue "$@"
1154
+ ;;
1155
+ compose)
1156
+ intelligence_compose_pipeline "$@"
1157
+ ;;
1158
+ predict)
1159
+ intelligence_predict_cost "$@"
1160
+ ;;
1161
+ synthesize)
1162
+ intelligence_synthesize_findings "$@"
1163
+ ;;
1164
+ search-memory)
1165
+ intelligence_search_memory "$@"
1166
+ ;;
1167
+ estimate-iterations)
1168
+ intelligence_estimate_iterations "$@"
1169
+ ;;
1170
+ recommend-model)
1171
+ intelligence_recommend_model "$@"
1172
+ ;;
1173
+ validate-prediction)
1174
+ intelligence_validate_prediction "$@"
1175
+ ;;
1176
+ cache-stats)
1177
+ cmd_cache_stats
1178
+ ;;
1179
+ cache-clear)
1180
+ cmd_cache_clear
1181
+ ;;
1182
+ help|--help|-h)
1183
+ show_help
1184
+ ;;
1185
+ *)
1186
+ error "Unknown command: $cmd"
1187
+ show_help
1188
+ exit 1
1189
+ ;;
1190
+ esac
1191
+ }
1192
+
1193
+ # Only run main if executed directly (not sourced)
1194
+ if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
1195
+ main "$@"
1196
+ fi